summaryrefslogtreecommitdiffstats
path: root/js/src/jit
diff options
context:
space:
mode:
Diffstat (limited to 'js/src/jit')
-rw-r--r--js/src/jit/AliasAnalysis.cpp283
-rw-r--r--js/src/jit/AliasAnalysis.h31
-rw-r--r--js/src/jit/AliasAnalysisShared.cpp188
-rw-r--r--js/src/jit/AliasAnalysisShared.h81
-rw-r--r--js/src/jit/AlignmentMaskAnalysis.cpp94
-rw-r--r--js/src/jit/AlignmentMaskAnalysis.h32
-rw-r--r--js/src/jit/AtomicOp.h73
-rw-r--r--js/src/jit/AtomicOperations.h353
-rw-r--r--js/src/jit/BacktrackingAllocator.cpp3124
-rw-r--r--js/src/jit/BacktrackingAllocator.h816
-rw-r--r--js/src/jit/Bailouts.cpp314
-rw-r--r--js/src/jit/Bailouts.h219
-rw-r--r--js/src/jit/BaselineBailouts.cpp1999
-rw-r--r--js/src/jit/BaselineCacheIR.cpp1283
-rw-r--r--js/src/jit/BaselineCacheIR.h67
-rw-r--r--js/src/jit/BaselineCompiler.cpp4527
-rw-r--r--js/src/jit/BaselineCompiler.h357
-rw-r--r--js/src/jit/BaselineDebugModeOSR.cpp1184
-rw-r--r--js/src/jit/BaselineDebugModeOSR.h146
-rw-r--r--js/src/jit/BaselineFrame-inl.h107
-rw-r--r--js/src/jit/BaselineFrame.cpp157
-rw-r--r--js/src/jit/BaselineFrame.h458
-rw-r--r--js/src/jit/BaselineFrameInfo-inl.h41
-rw-r--r--js/src/jit/BaselineFrameInfo.cpp196
-rw-r--r--js/src/jit/BaselineFrameInfo.h315
-rw-r--r--js/src/jit/BaselineIC.cpp8719
-rw-r--r--js/src/jit/BaselineIC.h3384
-rw-r--r--js/src/jit/BaselineICList.h123
-rw-r--r--js/src/jit/BaselineInspector.cpp924
-rw-r--r--js/src/jit/BaselineInspector.h148
-rw-r--r--js/src/jit/BaselineJIT.cpp1251
-rw-r--r--js/src/jit/BaselineJIT.h635
-rw-r--r--js/src/jit/BitSet.cpp115
-rw-r--r--js/src/jit/BitSet.h182
-rw-r--r--js/src/jit/BytecodeAnalysis.cpp227
-rw-r--r--js/src/jit/BytecodeAnalysis.h78
-rw-r--r--js/src/jit/C1Spewer.cpp194
-rw-r--r--js/src/jit/C1Spewer.h51
-rw-r--r--js/src/jit/CacheIR.cpp473
-rw-r--r--js/src/jit/CacheIR.h453
-rw-r--r--js/src/jit/CodeGenerator.cpp12098
-rw-r--r--js/src/jit/CodeGenerator.h593
-rw-r--r--js/src/jit/CompactBuffer.h206
-rw-r--r--js/src/jit/CompileInfo-inl.h90
-rw-r--r--js/src/jit/CompileInfo.h560
-rw-r--r--js/src/jit/CompileWrappers.cpp310
-rw-r--r--js/src/jit/CompileWrappers.h158
-rw-r--r--js/src/jit/Disassembler.cpp64
-rw-r--r--js/src/jit/Disassembler.h278
-rw-r--r--js/src/jit/EagerSimdUnbox.cpp128
-rw-r--r--js/src/jit/EagerSimdUnbox.h25
-rw-r--r--js/src/jit/EdgeCaseAnalysis.cpp47
-rw-r--r--js/src/jit/EdgeCaseAnalysis.h31
-rw-r--r--js/src/jit/EffectiveAddressAnalysis.cpp277
-rw-r--r--js/src/jit/EffectiveAddressAnalysis.h39
-rw-r--r--js/src/jit/ExecutableAllocator.cpp390
-rw-r--r--js/src/jit/ExecutableAllocator.h330
-rw-r--r--js/src/jit/FixedList.h106
-rw-r--r--js/src/jit/FlowAliasAnalysis.cpp949
-rw-r--r--js/src/jit/FlowAliasAnalysis.h71
-rw-r--r--js/src/jit/FoldLinearArithConstants.cpp104
-rw-r--r--js/src/jit/FoldLinearArithConstants.h22
-rw-r--r--js/src/jit/ICStubSpace.h82
-rw-r--r--js/src/jit/InlinableNatives.h166
-rw-r--r--js/src/jit/InlineList.h671
-rw-r--r--js/src/jit/InstructionReordering.cpp190
-rw-r--r--js/src/jit/InstructionReordering.h21
-rw-r--r--js/src/jit/Ion.cpp3560
-rw-r--r--js/src/jit/Ion.h221
-rw-r--r--js/src/jit/IonAnalysis.cpp4760
-rw-r--r--js/src/jit/IonAnalysis.h218
-rw-r--r--js/src/jit/IonBuilder.cpp14696
-rw-r--r--js/src/jit/IonBuilder.h1533
-rw-r--r--js/src/jit/IonCaches.cpp5072
-rw-r--r--js/src/jit/IonCaches.h848
-rw-r--r--js/src/jit/IonCode.h825
-rw-r--r--js/src/jit/IonInstrumentation.h33
-rw-r--r--js/src/jit/IonOptimizationLevels.cpp178
-rw-r--r--js/src/jit/IonOptimizationLevels.h302
-rw-r--r--js/src/jit/IonTypes.h875
-rw-r--r--js/src/jit/JSONSpewer.cpp410
-rw-r--r--js/src/jit/JSONSpewer.h72
-rw-r--r--js/src/jit/JitAllocPolicy.h210
-rw-r--r--js/src/jit/JitCommon.h52
-rw-r--r--js/src/jit/JitCompartment.h667
-rw-r--r--js/src/jit/JitFrameIterator-inl.h51
-rw-r--r--js/src/jit/JitFrameIterator.h864
-rw-r--r--js/src/jit/JitFrames-inl.h73
-rw-r--r--js/src/jit/JitFrames.cpp3158
-rw-r--r--js/src/jit/JitFrames.h1044
-rw-r--r--js/src/jit/JitOptions.cpp298
-rw-r--r--js/src/jit/JitOptions.h110
-rw-r--r--js/src/jit/JitSpewer.cpp679
-rw-r--r--js/src/jit/JitSpewer.h293
-rw-r--r--js/src/jit/JitcodeMap.cpp1662
-rw-r--r--js/src/jit/JitcodeMap.h1493
-rw-r--r--js/src/jit/LICM.cpp272
-rw-r--r--js/src/jit/LICM.h25
-rw-r--r--js/src/jit/LIR.cpp621
-rw-r--r--js/src/jit/LIR.h2025
-rw-r--r--js/src/jit/LOpcodes.h32
-rw-r--r--js/src/jit/Label.h117
-rw-r--r--js/src/jit/Linker.cpp64
-rw-r--r--js/src/jit/Linker.h46
-rw-r--r--js/src/jit/LoopUnroller.cpp408
-rw-r--r--js/src/jit/LoopUnroller.h21
-rw-r--r--js/src/jit/Lowering.cpp4930
-rw-r--r--js/src/jit/Lowering.h338
-rw-r--r--js/src/jit/MCallOptimize.cpp4099
-rw-r--r--js/src/jit/MIR.cpp6642
-rw-r--r--js/src/jit/MIR.h14267
-rw-r--r--js/src/jit/MIRGenerator.h229
-rw-r--r--js/src/jit/MIRGraph.cpp1750
-rw-r--r--js/src/jit/MIRGraph.h1060
-rw-r--r--js/src/jit/MOpcodes.h349
-rw-r--r--js/src/jit/MacroAssembler-inl.h819
-rw-r--r--js/src/jit/MacroAssembler.cpp2980
-rw-r--r--js/src/jit/MacroAssembler.h2233
-rw-r--r--js/src/jit/MoveEmitter.h26
-rw-r--r--js/src/jit/MoveResolver.cpp321
-rw-r--r--js/src/jit/MoveResolver.h333
-rw-r--r--js/src/jit/OptimizationTracking.cpp1305
-rw-r--r--js/src/jit/OptimizationTracking.h575
-rw-r--r--js/src/jit/PcScriptCache.h81
-rw-r--r--js/src/jit/PerfSpewer.cpp340
-rw-r--r--js/src/jit/PerfSpewer.h95
-rw-r--r--js/src/jit/ProcessExecutableMemory.cpp656
-rw-r--r--js/src/jit/ProcessExecutableMemory.h48
-rw-r--r--js/src/jit/RangeAnalysis.cpp3634
-rw-r--r--js/src/jit/RangeAnalysis.h711
-rw-r--r--js/src/jit/Recover.cpp1694
-rw-r--r--js/src/jit/Recover.h692
-rw-r--r--js/src/jit/RegisterAllocator.cpp614
-rw-r--r--js/src/jit/RegisterAllocator.h375
-rw-r--r--js/src/jit/RegisterSets.h1333
-rw-r--r--js/src/jit/Registers.h250
-rw-r--r--js/src/jit/RematerializedFrame.cpp222
-rw-r--r--js/src/jit/RematerializedFrame.h275
-rw-r--r--js/src/jit/Safepoints.cpp562
-rw-r--r--js/src/jit/Safepoints.h131
-rw-r--r--js/src/jit/ScalarReplacement.cpp1350
-rw-r--r--js/src/jit/ScalarReplacement.h25
-rw-r--r--js/src/jit/SharedIC.cpp4306
-rw-r--r--js/src/jit/SharedIC.h3120
-rw-r--r--js/src/jit/SharedICHelpers.h32
-rw-r--r--js/src/jit/SharedICList.h55
-rw-r--r--js/src/jit/SharedICRegisters.h34
-rw-r--r--js/src/jit/Sink.cpp232
-rw-r--r--js/src/jit/Sink.h25
-rw-r--r--js/src/jit/Snapshots.cpp731
-rw-r--r--js/src/jit/Snapshots.h579
-rw-r--r--js/src/jit/StackSlotAllocator.h110
-rw-r--r--js/src/jit/StupidAllocator.cpp434
-rw-r--r--js/src/jit/StupidAllocator.h90
-rw-r--r--js/src/jit/TypePolicy.cpp1330
-rw-r--r--js/src/jit/TypePolicy.h536
-rw-r--r--js/src/jit/TypedObjectPrediction.cpp308
-rw-r--r--js/src/jit/TypedObjectPrediction.h201
-rw-r--r--js/src/jit/VMFunctions.cpp1361
-rw-r--r--js/src/jit/VMFunctions.h808
-rw-r--r--js/src/jit/ValueNumbering.cpp1306
-rw-r--r--js/src/jit/ValueNumbering.h127
-rw-r--r--js/src/jit/WasmBCE.cpp94
-rw-r--r--js/src/jit/WasmBCE.h33
-rw-r--r--js/src/jit/arm/Architecture-arm.cpp444
-rw-r--r--js/src/jit/arm/Architecture-arm.h673
-rw-r--r--js/src/jit/arm/Assembler-arm.cpp3442
-rw-r--r--js/src/jit/arm/Assembler-arm.h2429
-rw-r--r--js/src/jit/arm/AtomicOperations-arm.h247
-rw-r--r--js/src/jit/arm/Bailouts-arm.cpp119
-rw-r--r--js/src/jit/arm/BaselineCompiler-arm.cpp15
-rw-r--r--js/src/jit/arm/BaselineCompiler-arm.h26
-rw-r--r--js/src/jit/arm/BaselineIC-arm.cpp74
-rw-r--r--js/src/jit/arm/CodeGenerator-arm.cpp3720
-rw-r--r--js/src/jit/arm/CodeGenerator-arm.h336
-rw-r--r--js/src/jit/arm/DoubleEntryTable.tbl257
-rw-r--r--js/src/jit/arm/LIR-arm.h710
-rw-r--r--js/src/jit/arm/LOpcodes-arm.h32
-rw-r--r--js/src/jit/arm/Lowering-arm.cpp1031
-rw-r--r--js/src/jit/arm/Lowering-arm.h132
-rw-r--r--js/src/jit/arm/MacroAssembler-arm-inl.h2143
-rw-r--r--js/src/jit/arm/MacroAssembler-arm.cpp5559
-rw-r--r--js/src/jit/arm/MacroAssembler-arm.h1554
-rw-r--r--js/src/jit/arm/MoveEmitter-arm.cpp427
-rw-r--r--js/src/jit/arm/MoveEmitter-arm.h66
-rw-r--r--js/src/jit/arm/SharedIC-arm.cpp217
-rw-r--r--js/src/jit/arm/SharedICHelpers-arm.h384
-rw-r--r--js/src/jit/arm/SharedICRegisters-arm.h54
-rw-r--r--js/src/jit/arm/Simulator-arm.cpp4941
-rw-r--r--js/src/jit/arm/Simulator-arm.h519
-rw-r--r--js/src/jit/arm/Trampoline-arm.cpp1442
-rw-r--r--js/src/jit/arm/disasm/Constants-arm.cpp144
-rw-r--r--js/src/jit/arm/disasm/Constants-arm.h745
-rw-r--r--js/src/jit/arm/disasm/Disasm-arm.cpp2173
-rw-r--r--js/src/jit/arm/disasm/Disasm-arm.h143
-rw-r--r--js/src/jit/arm/gen-double-encoder-table.py32
-rw-r--r--js/src/jit/arm/llvm-compiler-rt/arm/aeabi_idivmod.S27
-rw-r--r--js/src/jit/arm/llvm-compiler-rt/arm/aeabi_uidivmod.S28
-rw-r--r--js/src/jit/arm/llvm-compiler-rt/assembly.h70
-rw-r--r--js/src/jit/arm64/Architecture-arm64.cpp75
-rw-r--r--js/src/jit/arm64/Architecture-arm64.h462
-rw-r--r--js/src/jit/arm64/Assembler-arm64.cpp670
-rw-r--r--js/src/jit/arm64/Assembler-arm64.h557
-rw-r--r--js/src/jit/arm64/AtomicOperations-arm64.h156
-rw-r--r--js/src/jit/arm64/Bailouts-arm64.cpp67
-rw-r--r--js/src/jit/arm64/BaselineCompiler-arm64.h28
-rw-r--r--js/src/jit/arm64/BaselineIC-arm64.cpp75
-rw-r--r--js/src/jit/arm64/CodeGenerator-arm64.cpp783
-rw-r--r--js/src/jit/arm64/CodeGenerator-arm64.h262
-rw-r--r--js/src/jit/arm64/LIR-arm64.h395
-rw-r--r--js/src/jit/arm64/LOpcodes-arm64.h20
-rw-r--r--js/src/jit/arm64/Lowering-arm64.cpp369
-rw-r--r--js/src/jit/arm64/Lowering-arm64.h132
-rw-r--r--js/src/jit/arm64/MacroAssembler-arm64-inl.h1793
-rw-r--r--js/src/jit/arm64/MacroAssembler-arm64.cpp838
-rw-r--r--js/src/jit/arm64/MacroAssembler-arm64.h2338
-rw-r--r--js/src/jit/arm64/MoveEmitter-arm64.cpp300
-rw-r--r--js/src/jit/arm64/MoveEmitter-arm64.h86
-rw-r--r--js/src/jit/arm64/SharedIC-arm64.cpp219
-rw-r--r--js/src/jit/arm64/SharedICHelpers-arm64.h337
-rw-r--r--js/src/jit/arm64/SharedICRegisters-arm64.h58
-rw-r--r--js/src/jit/arm64/Trampoline-arm64.cpp1229
-rw-r--r--js/src/jit/arm64/vixl/.clang-format4
-rw-r--r--js/src/jit/arm64/vixl/Assembler-vixl.cpp5088
-rw-r--r--js/src/jit/arm64/vixl/Assembler-vixl.h4257
-rw-r--r--js/src/jit/arm64/vixl/CompilerIntrinsics-vixl.h179
-rw-r--r--js/src/jit/arm64/vixl/Constants-vixl.h2148
-rw-r--r--js/src/jit/arm64/vixl/Cpu-vixl.cpp170
-rw-r--r--js/src/jit/arm64/vixl/Cpu-vixl.h83
-rw-r--r--js/src/jit/arm64/vixl/Debugger-vixl.cpp1535
-rw-r--r--js/src/jit/arm64/vixl/Debugger-vixl.h117
-rw-r--r--js/src/jit/arm64/vixl/Decoder-vixl.cpp874
-rw-r--r--js/src/jit/arm64/vixl/Decoder-vixl.h274
-rw-r--r--js/src/jit/arm64/vixl/Disasm-vixl.cpp3488
-rw-r--r--js/src/jit/arm64/vixl/Disasm-vixl.h177
-rw-r--r--js/src/jit/arm64/vixl/Globals-vixl.h122
-rw-r--r--js/src/jit/arm64/vixl/Instructions-vixl.cpp670
-rw-r--r--js/src/jit/arm64/vixl/Instructions-vixl.h830
-rw-r--r--js/src/jit/arm64/vixl/Instrument-vixl.cpp844
-rw-r--r--js/src/jit/arm64/vixl/Instrument-vixl.h110
-rw-r--r--js/src/jit/arm64/vixl/Logic-vixl.cpp4878
-rw-r--r--js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp2007
-rw-r--r--js/src/jit/arm64/vixl/MacroAssembler-vixl.h2494
-rw-r--r--js/src/jit/arm64/vixl/MozAssembler-vixl.cpp712
-rw-r--r--js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h216
-rw-r--r--js/src/jit/arm64/vixl/MozInstructions-vixl.cpp195
-rw-r--r--js/src/jit/arm64/vixl/MozSimulator-vixl.cpp708
-rw-r--r--js/src/jit/arm64/vixl/Platform-vixl.h39
-rw-r--r--js/src/jit/arm64/vixl/Simulator-Constants-vixl.h141
-rw-r--r--js/src/jit/arm64/vixl/Simulator-vixl.cpp3949
-rw-r--r--js/src/jit/arm64/vixl/Simulator-vixl.h2677
-rw-r--r--js/src/jit/arm64/vixl/Utils-vixl.cpp145
-rw-r--r--js/src/jit/arm64/vixl/Utils-vixl.h286
-rw-r--r--js/src/jit/mips-shared/Architecture-mips-shared.cpp77
-rw-r--r--js/src/jit/mips-shared/Architecture-mips-shared.h338
-rw-r--r--js/src/jit/mips-shared/Assembler-mips-shared.cpp1746
-rw-r--r--js/src/jit/mips-shared/Assembler-mips-shared.h1522
-rw-r--r--js/src/jit/mips-shared/AtomicOperations-mips-shared.h241
-rw-r--r--js/src/jit/mips-shared/Bailouts-mips-shared.cpp24
-rw-r--r--js/src/jit/mips-shared/BaselineCompiler-mips-shared.cpp16
-rw-r--r--js/src/jit/mips-shared/BaselineCompiler-mips-shared.h24
-rw-r--r--js/src/jit/mips-shared/BaselineIC-mips-shared.cpp39
-rw-r--r--js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp2931
-rw-r--r--js/src/jit/mips-shared/CodeGenerator-mips-shared.h301
-rw-r--r--js/src/jit/mips-shared/LIR-mips-shared.h408
-rw-r--r--js/src/jit/mips-shared/Lowering-mips-shared.cpp753
-rw-r--r--js/src/jit/mips-shared/Lowering-mips-shared.h108
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h1030
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp1728
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared.h262
-rw-r--r--js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp223
-rw-r--r--js/src/jit/mips-shared/MoveEmitter-mips-shared.h76
-rw-r--r--js/src/jit/mips-shared/SharedICHelpers-mips-shared.h382
-rw-r--r--js/src/jit/mips32/Architecture-mips32.cpp102
-rw-r--r--js/src/jit/mips32/Architecture-mips32.h287
-rw-r--r--js/src/jit/mips32/Assembler-mips32.cpp545
-rw-r--r--js/src/jit/mips32/Assembler-mips32.h227
-rw-r--r--js/src/jit/mips32/Bailouts-mips32.cpp48
-rw-r--r--js/src/jit/mips32/Bailouts-mips32.h77
-rw-r--r--js/src/jit/mips32/BaselineCompiler-mips32.cpp16
-rw-r--r--js/src/jit/mips32/BaselineCompiler-mips32.h26
-rw-r--r--js/src/jit/mips32/BaselineIC-mips32.cpp45
-rw-r--r--js/src/jit/mips32/CodeGenerator-mips32.cpp832
-rw-r--r--js/src/jit/mips32/CodeGenerator-mips32.h96
-rw-r--r--js/src/jit/mips32/LIR-mips32.h169
-rw-r--r--js/src/jit/mips32/LOpcodes-mips32.h25
-rw-r--r--js/src/jit/mips32/Lowering-mips32.cpp258
-rw-r--r--js/src/jit/mips32/Lowering-mips32.h57
-rw-r--r--js/src/jit/mips32/MacroAssembler-mips32-inl.h1077
-rw-r--r--js/src/jit/mips32/MacroAssembler-mips32.cpp2365
-rw-r--r--js/src/jit/mips32/MacroAssembler-mips32.h1021
-rw-r--r--js/src/jit/mips32/MoveEmitter-mips32.cpp156
-rw-r--r--js/src/jit/mips32/MoveEmitter-mips32.h34
-rw-r--r--js/src/jit/mips32/SharedIC-mips32.cpp177
-rw-r--r--js/src/jit/mips32/SharedICRegisters-mips32.h44
-rw-r--r--js/src/jit/mips32/Simulator-mips32.cpp3519
-rw-r--r--js/src/jit/mips32/Simulator-mips32.h424
-rw-r--r--js/src/jit/mips32/Trampoline-mips32.cpp1418
-rw-r--r--js/src/jit/mips64/Architecture-mips64.cpp93
-rw-r--r--js/src/jit/mips64/Architecture-mips64.h209
-rw-r--r--js/src/jit/mips64/Assembler-mips64.cpp529
-rw-r--r--js/src/jit/mips64/Assembler-mips64.h236
-rw-r--r--js/src/jit/mips64/Bailouts-mips64.cpp28
-rw-r--r--js/src/jit/mips64/Bailouts-mips64.h44
-rw-r--r--js/src/jit/mips64/BaselineCompiler-mips64.cpp16
-rw-r--r--js/src/jit/mips64/BaselineCompiler-mips64.h26
-rw-r--r--js/src/jit/mips64/BaselineIC-mips64.cpp47
-rw-r--r--js/src/jit/mips64/CodeGenerator-mips64.cpp774
-rw-r--r--js/src/jit/mips64/CodeGenerator-mips64.h102
-rw-r--r--js/src/jit/mips64/LIR-mips64.h140
-rw-r--r--js/src/jit/mips64/LOpcodes-mips64.h24
-rw-r--r--js/src/jit/mips64/Lowering-mips64.cpp184
-rw-r--r--js/src/jit/mips64/Lowering-mips64.h57
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64-inl.h774
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64.cpp2485
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64.h1041
-rw-r--r--js/src/jit/mips64/MoveEmitter-mips64.cpp155
-rw-r--r--js/src/jit/mips64/MoveEmitter-mips64.h34
-rw-r--r--js/src/jit/mips64/SharedIC-mips64.cpp191
-rw-r--r--js/src/jit/mips64/SharedICRegisters-mips64.h47
-rw-r--r--js/src/jit/mips64/Simulator-mips64.cpp3874
-rw-r--r--js/src/jit/mips64/Simulator-mips64.h440
-rw-r--r--js/src/jit/mips64/Trampoline-mips64.cpp1363
-rw-r--r--js/src/jit/none/Architecture-none.h157
-rw-r--r--js/src/jit/none/AtomicOperations-none.h134
-rw-r--r--js/src/jit/none/AtomicOperations-ppc.h242
-rw-r--r--js/src/jit/none/AtomicOperations-sparc.h251
-rw-r--r--js/src/jit/none/BaselineCompiler-none.h30
-rw-r--r--js/src/jit/none/CodeGenerator-none.h62
-rw-r--r--js/src/jit/none/LIR-none.h111
-rw-r--r--js/src/jit/none/LOpcodes-none.h14
-rw-r--r--js/src/jit/none/Lowering-none.h118
-rw-r--r--js/src/jit/none/MacroAssembler-none.h464
-rw-r--r--js/src/jit/none/MoveEmitter-none.h30
-rw-r--r--js/src/jit/none/SharedICHelpers-none.h42
-rw-r--r--js/src/jit/none/SharedICRegisters-none.h35
-rw-r--r--js/src/jit/none/Trampoline-none.cpp49
-rw-r--r--js/src/jit/shared/Assembler-shared.h991
-rw-r--r--js/src/jit/shared/BaselineCompiler-shared.cpp146
-rw-r--r--js/src/jit/shared/BaselineCompiler-shared.h172
-rw-r--r--js/src/jit/shared/CodeGenerator-shared-inl.h437
-rw-r--r--js/src/jit/shared/CodeGenerator-shared.cpp1865
-rw-r--r--js/src/jit/shared/CodeGenerator-shared.h850
-rw-r--r--js/src/jit/shared/IonAssemblerBuffer.h417
-rw-r--r--js/src/jit/shared/IonAssemblerBufferWithConstantPools.h1145
-rw-r--r--js/src/jit/shared/LIR-shared.h8904
-rw-r--r--js/src/jit/shared/LOpcodes-shared.h441
-rw-r--r--js/src/jit/shared/Lowering-shared-inl.h858
-rw-r--r--js/src/jit/shared/Lowering-shared.cpp306
-rw-r--r--js/src/jit/shared/Lowering-shared.h296
-rw-r--r--js/src/jit/x64/Assembler-x64.cpp303
-rw-r--r--js/src/jit/x64/Assembler-x64.h1040
-rw-r--r--js/src/jit/x64/Bailouts-x64.cpp75
-rw-r--r--js/src/jit/x64/BaseAssembler-x64.h929
-rw-r--r--js/src/jit/x64/BaselineCompiler-x64.cpp15
-rw-r--r--js/src/jit/x64/BaselineCompiler-x64.h26
-rw-r--r--js/src/jit/x64/BaselineIC-x64.cpp46
-rw-r--r--js/src/jit/x64/CodeGenerator-x64.cpp880
-rw-r--r--js/src/jit/x64/CodeGenerator-x64.h89
-rw-r--r--js/src/jit/x64/LIR-x64.h183
-rw-r--r--js/src/jit/x64/LOpcodes-x64.h23
-rw-r--r--js/src/jit/x64/Lowering-x64.cpp495
-rw-r--r--js/src/jit/x64/Lowering-x64.h80
-rw-r--r--js/src/jit/x64/MacroAssembler-x64-inl.h897
-rw-r--r--js/src/jit/x64/MacroAssembler-x64.cpp859
-rw-r--r--js/src/jit/x64/MacroAssembler-x64.h966
-rw-r--r--js/src/jit/x64/SharedIC-x64.cpp234
-rw-r--r--js/src/jit/x64/SharedICHelpers-x64.h352
-rw-r--r--js/src/jit/x64/SharedICRegisters-x64.h35
-rw-r--r--js/src/jit/x64/Trampoline-x64.cpp1303
-rw-r--r--js/src/jit/x86-shared/Architecture-x86-shared.cpp97
-rw-r--r--js/src/jit/x86-shared/Architecture-x86-shared.h463
-rw-r--r--js/src/jit/x86-shared/Assembler-x86-shared.cpp350
-rw-r--r--js/src/jit/x86-shared/Assembler-x86-shared.h3652
-rw-r--r--js/src/jit/x86-shared/AssemblerBuffer-x86-shared.cpp25
-rw-r--r--js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h205
-rw-r--r--js/src/jit/x86-shared/AtomicOperations-x86-shared.h602
-rw-r--r--js/src/jit/x86-shared/BaseAssembler-x86-shared.h5393
-rw-r--r--js/src/jit/x86-shared/BaselineCompiler-x86-shared.cpp15
-rw-r--r--js/src/jit/x86-shared/BaselineCompiler-x86-shared.h24
-rw-r--r--js/src/jit/x86-shared/BaselineIC-x86-shared.cpp44
-rw-r--r--js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp4727
-rw-r--r--js/src/jit/x86-shared/CodeGenerator-x86-shared.h357
-rw-r--r--js/src/jit/x86-shared/Constants-x86-shared.h228
-rw-r--r--js/src/jit/x86-shared/Disassembler-x86-shared.cpp568
-rw-r--r--js/src/jit/x86-shared/Encoding-x86-shared.h413
-rw-r--r--js/src/jit/x86-shared/LIR-x86-shared.h421
-rw-r--r--js/src/jit/x86-shared/Lowering-x86-shared.cpp1019
-rw-r--r--js/src/jit/x86-shared/Lowering-x86-shared.h81
-rw-r--r--js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h1284
-rw-r--r--js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp855
-rw-r--r--js/src/jit/x86-shared/MacroAssembler-x86-shared.h1411
-rw-r--r--js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp581
-rw-r--r--js/src/jit/x86-shared/MoveEmitter-x86-shared.h74
-rw-r--r--js/src/jit/x86-shared/Patching-x86-shared.h124
-rw-r--r--js/src/jit/x86/Assembler-x86.cpp106
-rw-r--r--js/src/jit/x86/Assembler-x86.h991
-rw-r--r--js/src/jit/x86/Bailouts-x86.cpp115
-rw-r--r--js/src/jit/x86/BaseAssembler-x86.h203
-rw-r--r--js/src/jit/x86/BaselineCompiler-x86.cpp15
-rw-r--r--js/src/jit/x86/BaselineCompiler-x86.h26
-rw-r--r--js/src/jit/x86/BaselineIC-x86.cpp48
-rw-r--r--js/src/jit/x86/CodeGenerator-x86.cpp1298
-rw-r--r--js/src/jit/x86/CodeGenerator-x86.h98
-rw-r--r--js/src/jit/x86/LIR-x86.h207
-rw-r--r--js/src/jit/x86/LOpcodes-x86.h24
-rw-r--r--js/src/jit/x86/Lowering-x86.cpp658
-rw-r--r--js/src/jit/x86/Lowering-x86.h96
-rw-r--r--js/src/jit/x86/MacroAssembler-x86-inl.h1075
-rw-r--r--js/src/jit/x86/MacroAssembler-x86.cpp1028
-rw-r--r--js/src/jit/x86/MacroAssembler-x86.h870
-rw-r--r--js/src/jit/x86/SharedIC-x86.cpp242
-rw-r--r--js/src/jit/x86/SharedICHelpers-x86.h353
-rw-r--r--js/src/jit/x86/SharedICRegisters-x86.h38
-rw-r--r--js/src/jit/x86/Trampoline-x86.cpp1336
415 files changed, 352937 insertions, 0 deletions
diff --git a/js/src/jit/AliasAnalysis.cpp b/js/src/jit/AliasAnalysis.cpp
new file mode 100644
index 000000000..ad26a890e
--- /dev/null
+++ b/js/src/jit/AliasAnalysis.cpp
@@ -0,0 +1,283 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/AliasAnalysis.h"
+
+#include <stdio.h>
+
+#include "jit/AliasAnalysisShared.h"
+#include "jit/Ion.h"
+#include "jit/IonBuilder.h"
+#include "jit/JitSpewer.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+
+#include "vm/Printer.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::Array;
+
+namespace js {
+namespace jit {
+
+class LoopAliasInfo : public TempObject
+{
+ private:
+ LoopAliasInfo* outer_;
+ MBasicBlock* loopHeader_;
+ MInstructionVector invariantLoads_;
+
+ public:
+ LoopAliasInfo(TempAllocator& alloc, LoopAliasInfo* outer, MBasicBlock* loopHeader)
+ : outer_(outer), loopHeader_(loopHeader), invariantLoads_(alloc)
+ { }
+
+ MBasicBlock* loopHeader() const {
+ return loopHeader_;
+ }
+ LoopAliasInfo* outer() const {
+ return outer_;
+ }
+ bool addInvariantLoad(MInstruction* ins) {
+ return invariantLoads_.append(ins);
+ }
+ const MInstructionVector& invariantLoads() const {
+ return invariantLoads_;
+ }
+ MInstruction* firstInstruction() const {
+ return *loopHeader_->begin();
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+AliasAnalysis::AliasAnalysis(MIRGenerator* mir, MIRGraph& graph)
+ : AliasAnalysisShared(mir, graph),
+ loop_(nullptr)
+{
+}
+
+// Whether there might be a path from src to dest, excluding loop backedges. This is
+// approximate and really ought to depend on precomputed reachability information.
+static inline bool
+BlockMightReach(MBasicBlock* src, MBasicBlock* dest)
+{
+ while (src->id() <= dest->id()) {
+ if (src == dest)
+ return true;
+ switch (src->numSuccessors()) {
+ case 0:
+ return false;
+ case 1: {
+ MBasicBlock* successor = src->getSuccessor(0);
+ if (successor->id() <= src->id())
+ return true; // Don't iloop.
+ src = successor;
+ break;
+ }
+ default:
+ return true;
+ }
+ }
+ return false;
+}
+
+static void
+IonSpewDependency(MInstruction* load, MInstruction* store, const char* verb, const char* reason)
+{
+ if (!JitSpewEnabled(JitSpew_Alias))
+ return;
+
+ Fprinter& out = JitSpewPrinter();
+ out.printf("Load ");
+ load->printName(out);
+ out.printf(" %s on store ", verb);
+ store->printName(out);
+ out.printf(" (%s)\n", reason);
+}
+
+static void
+IonSpewAliasInfo(const char* pre, MInstruction* ins, const char* post)
+{
+ if (!JitSpewEnabled(JitSpew_Alias))
+ return;
+
+ Fprinter& out = JitSpewPrinter();
+ out.printf("%s ", pre);
+ ins->printName(out);
+ out.printf(" %s\n", post);
+}
+
+// This pass annotates every load instruction with the last store instruction
+// on which it depends. The algorithm is optimistic in that it ignores explicit
+// dependencies and only considers loads and stores.
+//
+// Loads inside loops only have an implicit dependency on a store before the
+// loop header if no instruction inside the loop body aliases it. To calculate
+// this efficiently, we maintain a list of maybe-invariant loads and the combined
+// alias set for all stores inside the loop. When we see the loop's backedge, this
+// information is used to mark every load we wrongly assumed to be loop invariant as
+// having an implicit dependency on the last instruction of the loop header, so that
+// it's never moved before the loop header.
+//
+// The algorithm depends on the invariant that both control instructions and effectful
+// instructions (stores) are never hoisted.
+bool
+AliasAnalysis::analyze()
+{
+ Vector<MInstructionVector, AliasSet::NumCategories, JitAllocPolicy> stores(alloc());
+
+ // Initialize to the first instruction.
+ MInstruction* firstIns = *graph_.entryBlock()->begin();
+ for (unsigned i = 0; i < AliasSet::NumCategories; i++) {
+ MInstructionVector defs(alloc());
+ if (!defs.append(firstIns))
+ return false;
+ if (!stores.append(Move(defs)))
+ return false;
+ }
+
+ // Type analysis may have inserted new instructions. Since this pass depends
+ // on the instruction number ordering, all instructions are renumbered.
+ uint32_t newId = 0;
+
+ for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
+ if (mir->shouldCancel("Alias Analysis (main loop)"))
+ return false;
+
+ if (block->isLoopHeader()) {
+ JitSpew(JitSpew_Alias, "Processing loop header %d", block->id());
+ loop_ = new(alloc()) LoopAliasInfo(alloc(), loop_, *block);
+ }
+
+ for (MPhiIterator def(block->phisBegin()), end(block->phisEnd()); def != end; ++def)
+ def->setId(newId++);
+
+ for (MInstructionIterator def(block->begin()), end(block->begin(block->lastIns()));
+ def != end;
+ ++def)
+ {
+ def->setId(newId++);
+
+ AliasSet set = def->getAliasSet();
+ if (set.isNone())
+ continue;
+
+ // For the purposes of alias analysis, all recoverable operations
+ // are treated as effect free as the memory represented by these
+ // operations cannot be aliased by others.
+ if (def->canRecoverOnBailout())
+ continue;
+
+ if (set.isStore()) {
+ for (AliasSetIterator iter(set); iter; iter++) {
+ if (!stores[*iter].append(*def))
+ return false;
+ }
+
+ if (JitSpewEnabled(JitSpew_Alias)) {
+ Fprinter& out = JitSpewPrinter();
+ out.printf("Processing store ");
+ def->printName(out);
+ out.printf(" (flags %x)\n", set.flags());
+ }
+ } else {
+ // Find the most recent store on which this instruction depends.
+ MInstruction* lastStore = firstIns;
+
+ for (AliasSetIterator iter(set); iter; iter++) {
+ MInstructionVector& aliasedStores = stores[*iter];
+ for (int i = aliasedStores.length() - 1; i >= 0; i--) {
+ MInstruction* store = aliasedStores[i];
+ if (genericMightAlias(*def, store) != MDefinition::AliasType::NoAlias &&
+ def->mightAlias(store) != MDefinition::AliasType::NoAlias &&
+ BlockMightReach(store->block(), *block))
+ {
+ if (lastStore->id() < store->id())
+ lastStore = store;
+ break;
+ }
+ }
+ }
+
+ def->setDependency(lastStore);
+ IonSpewDependency(*def, lastStore, "depends", "");
+
+ // If the last store was before the current loop, we assume this load
+ // is loop invariant. If a later instruction writes to the same location,
+ // we will fix this at the end of the loop.
+ if (loop_ && lastStore->id() < loop_->firstInstruction()->id()) {
+ if (!loop_->addInvariantLoad(*def))
+ return false;
+ }
+ }
+ }
+
+ // Renumber the last instruction, as the analysis depends on this and the order.
+ block->lastIns()->setId(newId++);
+
+ if (block->isLoopBackedge()) {
+ MOZ_ASSERT(loop_->loopHeader() == block->loopHeaderOfBackedge());
+ JitSpew(JitSpew_Alias, "Processing loop backedge %d (header %d)", block->id(),
+ loop_->loopHeader()->id());
+ LoopAliasInfo* outerLoop = loop_->outer();
+ MInstruction* firstLoopIns = *loop_->loopHeader()->begin();
+
+ const MInstructionVector& invariant = loop_->invariantLoads();
+
+ for (unsigned i = 0; i < invariant.length(); i++) {
+ MInstruction* ins = invariant[i];
+ AliasSet set = ins->getAliasSet();
+ MOZ_ASSERT(set.isLoad());
+
+ bool hasAlias = false;
+ for (AliasSetIterator iter(set); iter; iter++) {
+ MInstructionVector& aliasedStores = stores[*iter];
+ for (int i = aliasedStores.length() - 1;; i--) {
+ MInstruction* store = aliasedStores[i];
+ if (store->id() < firstLoopIns->id())
+ break;
+ if (genericMightAlias(ins, store) != MDefinition::AliasType::NoAlias &&
+ ins->mightAlias(store) != MDefinition::AliasType::NoAlias)
+ {
+ hasAlias = true;
+ IonSpewDependency(ins, store, "aliases", "store in loop body");
+ break;
+ }
+ }
+ if (hasAlias)
+ break;
+ }
+
+ if (hasAlias) {
+ // This instruction depends on stores inside the loop body. Mark it as having a
+ // dependency on the last instruction of the loop header. The last instruction is a
+ // control instruction and these are never hoisted.
+ MControlInstruction* controlIns = loop_->loopHeader()->lastIns();
+ IonSpewDependency(ins, controlIns, "depends", "due to stores in loop body");
+ ins->setDependency(controlIns);
+ } else {
+ IonSpewAliasInfo("Load", ins, "does not depend on any stores in this loop");
+
+ if (outerLoop && ins->dependency()->id() < outerLoop->firstInstruction()->id()) {
+ IonSpewAliasInfo("Load", ins, "may be invariant in outer loop");
+ if (!outerLoop->addInvariantLoad(ins))
+ return false;
+ }
+ }
+ }
+ loop_ = loop_->outer();
+ }
+ }
+
+ spewDependencyList();
+
+ MOZ_ASSERT(loop_ == nullptr);
+ return true;
+}
diff --git a/js/src/jit/AliasAnalysis.h b/js/src/jit/AliasAnalysis.h
new file mode 100644
index 000000000..9d9dabc17
--- /dev/null
+++ b/js/src/jit/AliasAnalysis.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_AliasAnalysis_h
+#define jit_AliasAnalysis_h
+
+#include "jit/AliasAnalysisShared.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+
+namespace js {
+namespace jit {
+
+class LoopAliasInfo;
+
+class AliasAnalysis : public AliasAnalysisShared
+{
+ LoopAliasInfo* loop_;
+
+ public:
+ AliasAnalysis(MIRGenerator* mir, MIRGraph& graph);
+ MOZ_MUST_USE bool analyze() override;
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_AliasAnalysis_h */
diff --git a/js/src/jit/AliasAnalysisShared.cpp b/js/src/jit/AliasAnalysisShared.cpp
new file mode 100644
index 000000000..ae28327ca
--- /dev/null
+++ b/js/src/jit/AliasAnalysisShared.cpp
@@ -0,0 +1,188 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/AliasAnalysisShared.h"
+
+#include "jit/MIR.h"
+
+namespace js {
+namespace jit {
+
+void
+AliasAnalysisShared::spewDependencyList()
+{
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_AliasSummaries)) {
+ Fprinter &print = JitSpewPrinter();
+ JitSpewHeader(JitSpew_AliasSummaries);
+ print.printf("Dependency list for other passes:\n");
+
+ for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
+ for (MInstructionIterator def(block->begin()), end(block->begin(block->lastIns()));
+ def != end;
+ ++def)
+ {
+ if (!def->dependency())
+ continue;
+ if (!def->getAliasSet().isLoad())
+ continue;
+
+ JitSpewHeader(JitSpew_AliasSummaries);
+ print.printf(" ");
+ MDefinition::PrintOpcodeName(print, def->op());
+ print.printf("%d marked depending on ", def->id());
+ MDefinition::PrintOpcodeName(print, def->dependency()->op());
+ print.printf("%d\n", def->dependency()->id());
+ }
+ }
+ }
+#endif
+}
+
+// Unwrap any slot or element to its corresponding object.
+static inline const MDefinition*
+MaybeUnwrap(const MDefinition* object)
+{
+
+ while (object->isSlots() || object->isElements() || object->isConvertElementsToDoubles()) {
+ MOZ_ASSERT(object->numOperands() == 1);
+ object = object->getOperand(0);
+ }
+
+ if (object->isTypedArrayElements())
+ return nullptr;
+ if (object->isTypedObjectElements())
+ return nullptr;
+ if (object->isConstantElements())
+ return nullptr;
+
+ return object;
+}
+
+// Get the object of any load/store. Returns nullptr if not tied to
+// an object.
+static inline const MDefinition*
+GetObject(const MDefinition* ins)
+{
+ if (!ins->getAliasSet().isStore() && !ins->getAliasSet().isLoad())
+ return nullptr;
+
+ // Note: only return the object if that objects owns that property.
+ // I.e. the poperty isn't on the prototype chain.
+ const MDefinition* object = nullptr;
+ switch (ins->op()) {
+ case MDefinition::Op_InitializedLength:
+ case MDefinition::Op_LoadElement:
+ case MDefinition::Op_LoadUnboxedScalar:
+ case MDefinition::Op_LoadUnboxedObjectOrNull:
+ case MDefinition::Op_LoadUnboxedString:
+ case MDefinition::Op_StoreElement:
+ case MDefinition::Op_StoreUnboxedObjectOrNull:
+ case MDefinition::Op_StoreUnboxedString:
+ case MDefinition::Op_StoreUnboxedScalar:
+ case MDefinition::Op_SetInitializedLength:
+ case MDefinition::Op_ArrayLength:
+ case MDefinition::Op_SetArrayLength:
+ case MDefinition::Op_StoreElementHole:
+ case MDefinition::Op_FallibleStoreElement:
+ case MDefinition::Op_TypedObjectDescr:
+ case MDefinition::Op_Slots:
+ case MDefinition::Op_Elements:
+ case MDefinition::Op_MaybeCopyElementsForWrite:
+ case MDefinition::Op_MaybeToDoubleElement:
+ case MDefinition::Op_UnboxedArrayLength:
+ case MDefinition::Op_UnboxedArrayInitializedLength:
+ case MDefinition::Op_IncrementUnboxedArrayInitializedLength:
+ case MDefinition::Op_SetUnboxedArrayInitializedLength:
+ case MDefinition::Op_TypedArrayLength:
+ case MDefinition::Op_SetTypedObjectOffset:
+ case MDefinition::Op_SetDisjointTypedElements:
+ case MDefinition::Op_ArrayPopShift:
+ case MDefinition::Op_ArrayPush:
+ case MDefinition::Op_ArraySlice:
+ case MDefinition::Op_LoadTypedArrayElementHole:
+ case MDefinition::Op_StoreTypedArrayElementHole:
+ case MDefinition::Op_LoadFixedSlot:
+ case MDefinition::Op_LoadFixedSlotAndUnbox:
+ case MDefinition::Op_StoreFixedSlot:
+ case MDefinition::Op_GetPropertyPolymorphic:
+ case MDefinition::Op_SetPropertyPolymorphic:
+ case MDefinition::Op_GuardShape:
+ case MDefinition::Op_GuardReceiverPolymorphic:
+ case MDefinition::Op_GuardObjectGroup:
+ case MDefinition::Op_GuardObjectIdentity:
+ case MDefinition::Op_GuardClass:
+ case MDefinition::Op_GuardUnboxedExpando:
+ case MDefinition::Op_LoadUnboxedExpando:
+ case MDefinition::Op_LoadSlot:
+ case MDefinition::Op_StoreSlot:
+ case MDefinition::Op_InArray:
+ case MDefinition::Op_LoadElementHole:
+ case MDefinition::Op_TypedArrayElements:
+ case MDefinition::Op_TypedObjectElements:
+ object = ins->getOperand(0);
+ break;
+ case MDefinition::Op_GetPropertyCache:
+ case MDefinition::Op_LoadTypedArrayElementStatic:
+ case MDefinition::Op_StoreTypedArrayElementStatic:
+ case MDefinition::Op_GetDOMProperty:
+ case MDefinition::Op_GetDOMMember:
+ case MDefinition::Op_Call:
+ case MDefinition::Op_Compare:
+ case MDefinition::Op_GetArgumentsObjectArg:
+ case MDefinition::Op_SetArgumentsObjectArg:
+ case MDefinition::Op_GetFrameArgument:
+ case MDefinition::Op_SetFrameArgument:
+ case MDefinition::Op_CompareExchangeTypedArrayElement:
+ case MDefinition::Op_AtomicExchangeTypedArrayElement:
+ case MDefinition::Op_AtomicTypedArrayElementBinop:
+ case MDefinition::Op_AsmJSLoadHeap:
+ case MDefinition::Op_AsmJSStoreHeap:
+ case MDefinition::Op_WasmLoad:
+ case MDefinition::Op_WasmStore:
+ case MDefinition::Op_AsmJSCompareExchangeHeap:
+ case MDefinition::Op_AsmJSAtomicBinopHeap:
+ case MDefinition::Op_WasmLoadGlobalVar:
+ case MDefinition::Op_WasmStoreGlobalVar:
+ case MDefinition::Op_ArrayJoin:
+ return nullptr;
+ default:
+#ifdef DEBUG
+ // Crash when the default aliasSet is overriden, but when not added in the list above.
+ if (!ins->getAliasSet().isStore() || ins->getAliasSet().flags() != AliasSet::Flag::Any)
+ MOZ_CRASH("Overridden getAliasSet without updating AliasAnalysisShared GetObject");
+#endif
+
+ return nullptr;
+ }
+
+ MOZ_ASSERT(!ins->getAliasSet().isStore() || ins->getAliasSet().flags() != AliasSet::Flag::Any);
+ object = MaybeUnwrap(object);
+ MOZ_ASSERT_IF(object, object->type() == MIRType::Object);
+ return object;
+}
+
+// Generic comparing if a load aliases a store using TI information.
+MDefinition::AliasType
+AliasAnalysisShared::genericMightAlias(const MDefinition* load, const MDefinition* store)
+{
+ const MDefinition* loadObject = GetObject(load);
+ const MDefinition* storeObject = GetObject(store);
+ if (!loadObject || !storeObject)
+ return MDefinition::AliasType::MayAlias;
+
+ if (!loadObject->resultTypeSet() || !storeObject->resultTypeSet())
+ return MDefinition::AliasType::MayAlias;
+
+ if (loadObject->resultTypeSet()->objectsIntersect(storeObject->resultTypeSet()))
+ return MDefinition::AliasType::MayAlias;
+
+ return MDefinition::AliasType::NoAlias;
+}
+
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/AliasAnalysisShared.h b/js/src/jit/AliasAnalysisShared.h
new file mode 100644
index 000000000..dc19bdb16
--- /dev/null
+++ b/js/src/jit/AliasAnalysisShared.h
@@ -0,0 +1,81 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_AliasAnalysisShared_h
+#define jit_AliasAnalysisShared_h
+
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+
+namespace js {
+namespace jit {
+
+class MIRGraph;
+
+class AliasAnalysisShared
+{
+ protected:
+ MIRGenerator* mir;
+ MIRGraph& graph_;
+
+ public:
+ AliasAnalysisShared(MIRGenerator* mir, MIRGraph& graph)
+ : mir(mir),
+ graph_(graph)
+ {}
+
+ virtual MOZ_MUST_USE bool analyze() {
+ return true;
+ }
+
+ static MDefinition::AliasType genericMightAlias(const MDefinition* load,
+ const MDefinition* store);
+
+
+ protected:
+ void spewDependencyList();
+
+ TempAllocator& alloc() const {
+ return graph_.alloc();
+ }
+};
+
+// Iterates over the flags in an AliasSet.
+class AliasSetIterator
+{
+ private:
+ uint32_t flags;
+ unsigned pos;
+
+ public:
+ explicit AliasSetIterator(AliasSet set)
+ : flags(set.flags()), pos(0)
+ {
+ while (flags && (flags & 1) == 0) {
+ flags >>= 1;
+ pos++;
+ }
+ }
+ AliasSetIterator& operator ++(int) {
+ do {
+ flags >>= 1;
+ pos++;
+ } while (flags && (flags & 1) == 0);
+ return *this;
+ }
+ explicit operator bool() const {
+ return !!flags;
+ }
+ unsigned operator*() const {
+ MOZ_ASSERT(pos < AliasSet::NumCategories);
+ return pos;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_AliasAnalysisShared_h */
diff --git a/js/src/jit/AlignmentMaskAnalysis.cpp b/js/src/jit/AlignmentMaskAnalysis.cpp
new file mode 100644
index 000000000..d4fefec07
--- /dev/null
+++ b/js/src/jit/AlignmentMaskAnalysis.cpp
@@ -0,0 +1,94 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/AlignmentMaskAnalysis.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+
+using namespace js;
+using namespace jit;
+
+static bool
+IsAlignmentMask(uint32_t m)
+{
+ // Test whether m is just leading ones and trailing zeros.
+ return (-m & ~m) == 0;
+}
+
+static void
+AnalyzeAsmHeapAddress(MDefinition* ptr, MIRGraph& graph)
+{
+ // Fold (a+i)&m to (a&m)+i, provided that this doesn't change the result,
+ // since the users of the BitAnd include heap accesses. This will expose
+ // the redundancy for GVN when expressions like this:
+ // a&m
+ // (a+1)&m,
+ // (a+2)&m,
+ // are transformed into this:
+ // a&m
+ // (a&m)+1
+ // (a&m)+2
+ // and it will allow the constants to be folded by the
+ // EffectiveAddressAnalysis pass.
+ //
+ // Putting the add on the outside might seem like it exposes other users of
+ // the expression to the possibility of i32 overflow, if we aren't in wasm
+ // and they aren't naturally truncating. However, since we use MAdd::New
+ // with MIRType::Int32, we make sure that the value is truncated, just as it
+ // would be by the MBitAnd.
+
+ MOZ_ASSERT(IsCompilingWasm());
+
+ if (!ptr->isBitAnd())
+ return;
+
+ MDefinition* lhs = ptr->toBitAnd()->getOperand(0);
+ MDefinition* rhs = ptr->toBitAnd()->getOperand(1);
+ if (lhs->isConstant())
+ mozilla::Swap(lhs, rhs);
+ if (!lhs->isAdd() || !rhs->isConstant())
+ return;
+
+ MDefinition* op0 = lhs->toAdd()->getOperand(0);
+ MDefinition* op1 = lhs->toAdd()->getOperand(1);
+ if (op0->isConstant())
+ mozilla::Swap(op0, op1);
+ if (!op1->isConstant())
+ return;
+
+ uint32_t i = op1->toConstant()->toInt32();
+ uint32_t m = rhs->toConstant()->toInt32();
+ if (!IsAlignmentMask(m) || (i & m) != i)
+ return;
+
+ // The pattern was matched! Produce the replacement expression.
+ MInstruction* and_ = MBitAnd::New(graph.alloc(), op0, rhs, MIRType::Int32);
+ ptr->block()->insertBefore(ptr->toBitAnd(), and_);
+ MInstruction* add = MAdd::New(graph.alloc(), and_, op1, MIRType::Int32);
+ ptr->block()->insertBefore(ptr->toBitAnd(), add);
+ ptr->replaceAllUsesWith(add);
+ ptr->block()->discard(ptr->toBitAnd());
+}
+
+bool
+AlignmentMaskAnalysis::analyze()
+{
+ for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
+ for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
+ if (!graph_.alloc().ensureBallast())
+ return false;
+
+ // Note that we don't check for MAsmJSCompareExchangeHeap
+ // or MAsmJSAtomicBinopHeap, because the backend and the OOB
+ // mechanism don't support non-zero offsets for them yet.
+ if (i->isAsmJSLoadHeap())
+ AnalyzeAsmHeapAddress(i->toAsmJSLoadHeap()->base(), graph_);
+ else if (i->isAsmJSStoreHeap())
+ AnalyzeAsmHeapAddress(i->toAsmJSStoreHeap()->base(), graph_);
+ }
+ }
+ return true;
+}
diff --git a/js/src/jit/AlignmentMaskAnalysis.h b/js/src/jit/AlignmentMaskAnalysis.h
new file mode 100644
index 000000000..a455f29a2
--- /dev/null
+++ b/js/src/jit/AlignmentMaskAnalysis.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_AlignmentMaskAnalysis_h
+#define jit_AlignmentMaskAnalysis_h
+
+#include "mozilla/Attributes.h"
+
+namespace js {
+namespace jit {
+
+class MIRGraph;
+
+class AlignmentMaskAnalysis
+{
+ MIRGraph& graph_;
+
+ public:
+ explicit AlignmentMaskAnalysis(MIRGraph& graph)
+ : graph_(graph)
+ {}
+
+ MOZ_MUST_USE bool analyze();
+};
+
+} /* namespace jit */
+} /* namespace js */
+
+#endif /* jit_AlignmentMaskAnalysis_h */
diff --git a/js/src/jit/AtomicOp.h b/js/src/jit/AtomicOp.h
new file mode 100644
index 000000000..9a686cdd7
--- /dev/null
+++ b/js/src/jit/AtomicOp.h
@@ -0,0 +1,73 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_AtomicOp_h
+#define jit_AtomicOp_h
+
+namespace js {
+namespace jit {
+
+// Types of atomic operation, shared by MIR and LIR.
+
+enum AtomicOp {
+ AtomicFetchAddOp,
+ AtomicFetchSubOp,
+ AtomicFetchAndOp,
+ AtomicFetchOrOp,
+ AtomicFetchXorOp
+};
+
+// Memory barrier types, shared by MIR and LIR.
+//
+// MembarSynchronizing is here because some platforms can make the
+// distinction (DSB vs DMB on ARM, SYNC vs parameterized SYNC on MIPS)
+// but there's been no reason to use it yet.
+
+enum MemoryBarrierBits {
+ MembarLoadLoad = 1,
+ MembarLoadStore = 2,
+ MembarStoreStore = 4,
+ MembarStoreLoad = 8,
+
+ MembarSynchronizing = 16,
+
+ // For validity testing
+ MembarNobits = 0,
+ MembarAllbits = 31,
+};
+
+static inline constexpr MemoryBarrierBits
+operator|(MemoryBarrierBits a, MemoryBarrierBits b)
+{
+ return MemoryBarrierBits(int(a) | int(b));
+}
+
+static inline constexpr MemoryBarrierBits
+operator&(MemoryBarrierBits a, MemoryBarrierBits b)
+{
+ return MemoryBarrierBits(int(a) & int(b));
+}
+
+static inline constexpr MemoryBarrierBits
+operator~(MemoryBarrierBits a)
+{
+ return MemoryBarrierBits(~int(a));
+}
+
+// Standard barrier bits for a full barrier.
+static constexpr MemoryBarrierBits MembarFull = MembarLoadLoad|MembarLoadStore|MembarStoreLoad|MembarStoreStore;
+
+// Standard sets of barrier bits for atomic loads and stores.
+// See http://gee.cs.oswego.edu/dl/jmm/cookbook.html for more.
+static constexpr MemoryBarrierBits MembarBeforeLoad = MembarNobits;
+static constexpr MemoryBarrierBits MembarAfterLoad = MembarLoadLoad|MembarLoadStore;
+static constexpr MemoryBarrierBits MembarBeforeStore = MembarStoreStore;
+static constexpr MemoryBarrierBits MembarAfterStore = MembarStoreLoad;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_AtomicOp_h */
diff --git a/js/src/jit/AtomicOperations.h b/js/src/jit/AtomicOperations.h
new file mode 100644
index 000000000..42aee72eb
--- /dev/null
+++ b/js/src/jit/AtomicOperations.h
@@ -0,0 +1,353 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_AtomicOperations_h
+#define jit_AtomicOperations_h
+
+#include "mozilla/Types.h"
+
+#include "vm/SharedMem.h"
+
+namespace js {
+namespace jit {
+
+class RegionLock;
+
+/*
+ * The atomic operations layer defines types and functions for
+ * JIT-compatible atomic operation.
+ *
+ * The fundamental constraints on the functions are:
+ *
+ * - That their realization here MUST be compatible with code the JIT
+ * generates for its Atomics operations, so that an atomic access
+ * from the interpreter or runtime - from any C++ code - really is
+ * atomic relative to a concurrent, compatible atomic access from
+ * jitted code. That is, these primitives expose JIT-compatible
+ * atomicity functionality to C++.
+ *
+ * - That accesses may race without creating C++ undefined behavior:
+ * atomic accesses (marked "SeqCst") may race with non-atomic
+ * accesses (marked "SafeWhenRacy"); overlapping but non-matching,
+ * and hence incompatible, atomic accesses may race; and non-atomic
+ * accesses may race. The effects of races need not be predictable,
+ * so garbage can be produced by a read or written by a write, but
+ * the effects must be benign: the program must continue to run, and
+ * only the memory in the union of addresses named in the racing
+ * accesses may be affected.
+ *
+ * The compatibility constraint means that if the JIT makes dynamic
+ * decisions about how to implement atomic operations then
+ * corresponding dynamic decisions MUST be made in the implementations
+ * of the functions below.
+ *
+ * The safe-for-races constraint means that by and large, it is hard
+ * to implement these primitives in C++. See "Implementation notes"
+ * below.
+ *
+ * The "SeqCst" suffix on operations means "sequentially consistent"
+ * and means such a function's operation must have "sequentially
+ * consistent" memory ordering. See mfbt/Atomics.h for an explanation
+ * of this memory ordering.
+ *
+ * Note that a "SafeWhenRacy" access does not provide the atomicity of
+ * a "relaxed atomic" access: it can read or write garbage if there's
+ * a race.
+ *
+ *
+ * Implementation notes.
+ *
+ * It's not a requirement that these functions be inlined; performance
+ * is not a great concern. On some platforms these functions may call
+ * out to code that's generated at run time.
+ *
+ * In principle these functions will not be written in C++, thus
+ * making races defined behavior if all racy accesses from C++ go via
+ * these functions. (Jitted code will always be safe for races and
+ * provides the same guarantees as these functions.)
+ *
+ * The appropriate implementations will be platform-specific and
+ * there are some obvious implementation strategies to choose
+ * from, sometimes a combination is appropriate:
+ *
+ * - generating the code at run-time with the JIT;
+ * - hand-written assembler (maybe inline); or
+ * - using special compiler intrinsics or directives.
+ *
+ * Trusting the compiler not to generate code that blows up on a
+ * race definitely won't work in the presence of TSan, or even of
+ * optimizing compilers in seemingly-"innocuous" conditions. (See
+ * https://www.usenix.org/legacy/event/hotpar11/tech/final_files/Boehm.pdf
+ * for details.)
+ */
+class AtomicOperations
+{
+ friend class RegionLock;
+
+ private:
+ // The following functions are defined for T = int8_t, uint8_t,
+ // int16_t, uint16_t, int32_t, uint32_t, int64_t, and uint64_t.
+
+ // Atomically read *addr.
+ template<typename T>
+ static inline T loadSeqCst(T* addr);
+
+ // Atomically store val in *addr.
+ template<typename T>
+ static inline void storeSeqCst(T* addr, T val);
+
+ // Atomically store val in *addr and return the old value of *addr.
+ template<typename T>
+ static inline T exchangeSeqCst(T* addr, T val);
+
+ // Atomically check that *addr contains oldval and if so replace it
+ // with newval, in any case returning the old contents of *addr.
+ template<typename T>
+ static inline T compareExchangeSeqCst(T* addr, T oldval, T newval);
+
+ // The following functions are defined for T = int8_t, uint8_t,
+ // int16_t, uint16_t, int32_t, uint32_t only.
+
+ // Atomically add, subtract, bitwise-AND, bitwise-OR, or bitwise-XOR
+ // val into *addr and return the old value of *addr.
+ template<typename T>
+ static inline T fetchAddSeqCst(T* addr, T val);
+
+ template<typename T>
+ static inline T fetchSubSeqCst(T* addr, T val);
+
+ template<typename T>
+ static inline T fetchAndSeqCst(T* addr, T val);
+
+ template<typename T>
+ static inline T fetchOrSeqCst(T* addr, T val);
+
+ template<typename T>
+ static inline T fetchXorSeqCst(T* addr, T val);
+
+ // The SafeWhenRacy functions are to be used when C++ code has to access
+ // memory without synchronization and can't guarantee that there
+ // won't be a race on the access.
+
+ // Defined for all the integral types as well as for float32 and float64.
+ template<typename T>
+ static inline T loadSafeWhenRacy(T* addr);
+
+ // Defined for all the integral types as well as for float32 and float64.
+ template<typename T>
+ static inline void storeSafeWhenRacy(T* addr, T val);
+
+ // Replacement for memcpy().
+ static inline void memcpySafeWhenRacy(void* dest, const void* src, size_t nbytes);
+
+ // Replacement for memmove().
+ static inline void memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes);
+
+ public:
+ // Test lock-freedom for any int32 value. This implements the
+ // Atomics::isLockFree() operation in the Shared Memory and
+ // Atomics specification, as follows:
+ //
+ // 1, 2, and 4 bytes are always lock free (in SpiderMonkey).
+ //
+ // Lock-freedom for 8 bytes is determined by the platform's
+ // isLockfree8(). However, the spec stipulates that isLockFree(8)
+ // is true only if there is an integer array that admits atomic
+ // operations whose BYTES_PER_ELEMENT=8; at the moment (February
+ // 2016) there are no such arrays.
+ //
+ // There is no lock-freedom for any other values on any platform.
+ static inline bool isLockfree(int32_t n);
+
+ // If the return value is true then a call to the 64-bit (8-byte)
+ // routines below will work, otherwise those functions will assert in
+ // debug builds and may crash in release build. (See the code in
+ // ../arm for an example.) The value of this call does not change
+ // during execution.
+ static inline bool isLockfree8();
+
+ // Execute a full memory barrier (LoadLoad+LoadStore+StoreLoad+StoreStore).
+ static inline void fenceSeqCst();
+
+ // All clients should use the APIs that take SharedMem pointers.
+ // See above for semantics and acceptable types.
+
+ template<typename T>
+ static T loadSeqCst(SharedMem<T*> addr) {
+ return loadSeqCst(addr.unwrap());
+ }
+
+ template<typename T>
+ static void storeSeqCst(SharedMem<T*> addr, T val) {
+ return storeSeqCst(addr.unwrap(), val);
+ }
+
+ template<typename T>
+ static T exchangeSeqCst(SharedMem<T*> addr, T val) {
+ return exchangeSeqCst(addr.unwrap(), val);
+ }
+
+ template<typename T>
+ static T compareExchangeSeqCst(SharedMem<T*> addr, T oldval, T newval) {
+ return compareExchangeSeqCst(addr.unwrap(), oldval, newval);
+ }
+
+ template<typename T>
+ static T fetchAddSeqCst(SharedMem<T*> addr, T val) {
+ return fetchAddSeqCst(addr.unwrap(), val);
+ }
+
+ template<typename T>
+ static T fetchSubSeqCst(SharedMem<T*> addr, T val) {
+ return fetchSubSeqCst(addr.unwrap(), val);
+ }
+
+ template<typename T>
+ static T fetchAndSeqCst(SharedMem<T*> addr, T val) {
+ return fetchAndSeqCst(addr.unwrap(), val);
+ }
+
+ template<typename T>
+ static T fetchOrSeqCst(SharedMem<T*> addr, T val) {
+ return fetchOrSeqCst(addr.unwrap(), val);
+ }
+
+ template<typename T>
+ static T fetchXorSeqCst(SharedMem<T*> addr, T val) {
+ return fetchXorSeqCst(addr.unwrap(), val);
+ }
+
+ template<typename T>
+ static T loadSafeWhenRacy(SharedMem<T*> addr) {
+ return loadSafeWhenRacy(addr.unwrap());
+ }
+
+ template<typename T>
+ static void storeSafeWhenRacy(SharedMem<T*> addr, T val) {
+ return storeSafeWhenRacy(addr.unwrap(), val);
+ }
+
+ template<typename T>
+ static void memcpySafeWhenRacy(SharedMem<T*> dest, SharedMem<T*> src, size_t nbytes) {
+ memcpySafeWhenRacy(dest.template cast<void*>().unwrap(),
+ src.template cast<void*>().unwrap(), nbytes);
+ }
+
+ template<typename T>
+ static void memcpySafeWhenRacy(SharedMem<T*> dest, T* src, size_t nbytes) {
+ memcpySafeWhenRacy(dest.template cast<void*>().unwrap(), static_cast<void*>(src), nbytes);
+ }
+
+ template<typename T>
+ static void memcpySafeWhenRacy(T* dest, SharedMem<T*> src, size_t nbytes) {
+ memcpySafeWhenRacy(static_cast<void*>(dest), src.template cast<void*>().unwrap(), nbytes);
+ }
+
+ template<typename T>
+ static void memmoveSafeWhenRacy(SharedMem<T*> dest, SharedMem<T*> src, size_t nbytes) {
+ memmoveSafeWhenRacy(dest.template cast<void*>().unwrap(),
+ src.template cast<void*>().unwrap(), nbytes);
+ }
+
+ template<typename T>
+ static void podCopySafeWhenRacy(SharedMem<T*> dest, SharedMem<T*> src, size_t nelem) {
+ memcpySafeWhenRacy(dest, src, nelem * sizeof(T));
+ }
+
+ template<typename T>
+ static void podMoveSafeWhenRacy(SharedMem<T*> dest, SharedMem<T*> src, size_t nelem) {
+ memmoveSafeWhenRacy(dest, src, nelem * sizeof(T));
+ }
+};
+
+/* A data type representing a lock on some region of a
+ * SharedArrayRawBuffer's memory, to be used only when the hardware
+ * does not provide necessary atomicity (eg, float64 access on ARMv6
+ * and some ARMv7 systems).
+ */
+class RegionLock
+{
+ public:
+ RegionLock() : spinlock(0) {}
+
+ /* Addr is the address to be locked, nbytes the number of bytes we
+ * need to lock. The lock that is taken may cover a larger range
+ * of bytes.
+ */
+ template<size_t nbytes>
+ void acquire(void* addr);
+
+ /* Addr is the address to be unlocked, nbytes the number of bytes
+ * we need to unlock. The lock must be held by the calling thread,
+ * at the given address and for the number of bytes.
+ */
+ template<size_t nbytes>
+ void release(void* addr);
+
+ private:
+ /* For now, a simple spinlock that covers the entire buffer. */
+ uint32_t spinlock;
+};
+
+inline bool
+AtomicOperations::isLockfree(int32_t size)
+{
+ // Keep this in sync with visitAtomicIsLockFree() in jit/CodeGenerator.cpp.
+
+ switch (size) {
+ case 1:
+ return true;
+ case 2:
+ return true;
+ case 4:
+ // The spec requires Atomics.isLockFree(4) to return true.
+ return true;
+ case 8:
+ // The spec requires Atomics.isLockFree(n) to return false
+ // unless n is the BYTES_PER_ELEMENT value of some integer
+ // TypedArray that admits atomic operations. At the time of
+ // writing (February 2016) there is no such array with n=8.
+ // return AtomicOperations::isLockfree8();
+ return false;
+ default:
+ return false;
+ }
+}
+
+} // namespace jit
+} // namespace js
+
+#if defined(JS_CODEGEN_ARM)
+# include "jit/arm/AtomicOperations-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/AtomicOperations-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+# include "jit/mips-shared/AtomicOperations-mips-shared.h"
+#elif defined(__ppc__) || defined(__PPC__)
+# include "jit/none/AtomicOperations-ppc.h"
+#elif defined(__sparc__)
+# include "jit/none/AtomicOperations-sparc.h"
+#elif defined(JS_CODEGEN_NONE)
+ // You can disable the JIT with --disable-ion but you must still
+ // provide the atomic operations that will be used by the JS engine.
+ // When the JIT is disabled the operations are simply safe-for-races
+ // C++ realizations of atomics. These operations cannot be written
+ // in portable C++, hence the default here is to crash. See the
+ // top of the file for more guidance.
+# if defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || defined(__PPC64LE__)
+# include "jit/none/AtomicOperations-ppc.h"
+# elif defined(__aarch64__)
+# include "jit/arm64/AtomicOperations-arm64.h"
+# else
+# include "jit/none/AtomicOperations-none.h" // These MOZ_CRASH() always
+# endif
+#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+# include "jit/x86-shared/AtomicOperations-x86-shared.h"
+#else
+# error "Atomic operations must be defined for this platform"
+#endif
+
+#endif // jit_AtomicOperations_h
diff --git a/js/src/jit/BacktrackingAllocator.cpp b/js/src/jit/BacktrackingAllocator.cpp
new file mode 100644
index 000000000..94ef25785
--- /dev/null
+++ b/js/src/jit/BacktrackingAllocator.cpp
@@ -0,0 +1,3124 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BacktrackingAllocator.h"
+
+#include "jsprf.h"
+
+#include "jit/BitSet.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::DebugOnly;
+
+/////////////////////////////////////////////////////////////////////
+// Utility
+/////////////////////////////////////////////////////////////////////
+
+static inline bool
+SortBefore(UsePosition* a, UsePosition* b)
+{
+ return a->pos <= b->pos;
+}
+
+static inline bool
+SortBefore(LiveRange::BundleLink* a, LiveRange::BundleLink* b)
+{
+ LiveRange* rangea = LiveRange::get(a);
+ LiveRange* rangeb = LiveRange::get(b);
+ MOZ_ASSERT(!rangea->intersects(rangeb));
+ return rangea->from() < rangeb->from();
+}
+
+static inline bool
+SortBefore(LiveRange::RegisterLink* a, LiveRange::RegisterLink* b)
+{
+ return LiveRange::get(a)->from() <= LiveRange::get(b)->from();
+}
+
+template <typename T>
+static inline void
+InsertSortedList(InlineForwardList<T> &list, T* value)
+{
+ if (list.empty()) {
+ list.pushFront(value);
+ return;
+ }
+
+ if (SortBefore(list.back(), value)) {
+ list.pushBack(value);
+ return;
+ }
+
+ T* prev = nullptr;
+ for (InlineForwardListIterator<T> iter = list.begin(); iter; iter++) {
+ if (SortBefore(value, *iter))
+ break;
+ prev = *iter;
+ }
+
+ if (prev)
+ list.insertAfter(prev, value);
+ else
+ list.pushFront(value);
+}
+
+/////////////////////////////////////////////////////////////////////
+// LiveRange
+/////////////////////////////////////////////////////////////////////
+
+void
+LiveRange::addUse(UsePosition* use)
+{
+ MOZ_ASSERT(covers(use->pos));
+ InsertSortedList(uses_, use);
+}
+
+void
+LiveRange::distributeUses(LiveRange* other)
+{
+ MOZ_ASSERT(other->vreg() == vreg());
+ MOZ_ASSERT(this != other);
+
+ // Move over all uses which fit in |other|'s boundaries.
+ for (UsePositionIterator iter = usesBegin(); iter; ) {
+ UsePosition* use = *iter;
+ if (other->covers(use->pos)) {
+ uses_.removeAndIncrement(iter);
+ other->addUse(use);
+ } else {
+ iter++;
+ }
+ }
+
+ // Distribute the definition to |other| as well, if possible.
+ if (hasDefinition() && from() == other->from())
+ other->setHasDefinition();
+}
+
+bool
+LiveRange::contains(LiveRange* other) const
+{
+ return from() <= other->from() && to() >= other->to();
+}
+
+void
+LiveRange::intersect(LiveRange* other, Range* pre, Range* inside, Range* post) const
+{
+ MOZ_ASSERT(pre->empty() && inside->empty() && post->empty());
+
+ CodePosition innerFrom = from();
+ if (from() < other->from()) {
+ if (to() < other->from()) {
+ *pre = range_;
+ return;
+ }
+ *pre = Range(from(), other->from());
+ innerFrom = other->from();
+ }
+
+ CodePosition innerTo = to();
+ if (to() > other->to()) {
+ if (from() >= other->to()) {
+ *post = range_;
+ return;
+ }
+ *post = Range(other->to(), to());
+ innerTo = other->to();
+ }
+
+ if (innerFrom != innerTo)
+ *inside = Range(innerFrom, innerTo);
+}
+
+bool
+LiveRange::intersects(LiveRange* other) const
+{
+ Range pre, inside, post;
+ intersect(other, &pre, &inside, &post);
+ return !inside.empty();
+}
+
+/////////////////////////////////////////////////////////////////////
+// SpillSet
+/////////////////////////////////////////////////////////////////////
+
+void
+SpillSet::setAllocation(LAllocation alloc)
+{
+ for (size_t i = 0; i < numSpilledBundles(); i++)
+ spilledBundle(i)->setAllocation(alloc);
+}
+
+/////////////////////////////////////////////////////////////////////
+// LiveBundle
+/////////////////////////////////////////////////////////////////////
+
+#ifdef DEBUG
+size_t
+LiveBundle::numRanges() const
+{
+ size_t count = 0;
+ for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++)
+ count++;
+ return count;
+}
+#endif // DEBUG
+
+LiveRange*
+LiveBundle::rangeFor(CodePosition pos) const
+{
+ for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ if (range->covers(pos))
+ return range;
+ }
+ return nullptr;
+}
+
+void
+LiveBundle::addRange(LiveRange* range)
+{
+ MOZ_ASSERT(!range->bundle());
+ range->setBundle(this);
+ InsertSortedList(ranges_, &range->bundleLink);
+}
+
+bool
+LiveBundle::addRange(TempAllocator& alloc, uint32_t vreg, CodePosition from, CodePosition to)
+{
+ LiveRange* range = LiveRange::FallibleNew(alloc, vreg, from, to);
+ if (!range)
+ return false;
+ addRange(range);
+ return true;
+}
+
+bool
+LiveBundle::addRangeAndDistributeUses(TempAllocator& alloc, LiveRange* oldRange,
+ CodePosition from, CodePosition to)
+{
+ LiveRange* range = LiveRange::FallibleNew(alloc, oldRange->vreg(), from, to);
+ if (!range)
+ return false;
+ addRange(range);
+ oldRange->distributeUses(range);
+ return true;
+}
+
+LiveRange*
+LiveBundle::popFirstRange()
+{
+ LiveRange::BundleLinkIterator iter = rangesBegin();
+ if (!iter)
+ return nullptr;
+
+ LiveRange* range = LiveRange::get(*iter);
+ ranges_.removeAt(iter);
+
+ range->setBundle(nullptr);
+ return range;
+}
+
+void
+LiveBundle::removeRange(LiveRange* range)
+{
+ for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
+ LiveRange* existing = LiveRange::get(*iter);
+ if (existing == range) {
+ ranges_.removeAt(iter);
+ return;
+ }
+ }
+ MOZ_CRASH();
+}
+
+/////////////////////////////////////////////////////////////////////
+// VirtualRegister
+/////////////////////////////////////////////////////////////////////
+
+bool
+VirtualRegister::addInitialRange(TempAllocator& alloc, CodePosition from, CodePosition to)
+{
+ MOZ_ASSERT(from < to);
+
+ // Mark [from,to) as a live range for this register during the initial
+ // liveness analysis, coalescing with any existing overlapping ranges.
+
+ LiveRange* prev = nullptr;
+ LiveRange* merged = nullptr;
+ for (LiveRange::RegisterLinkIterator iter(rangesBegin()); iter; ) {
+ LiveRange* existing = LiveRange::get(*iter);
+
+ if (from > existing->to()) {
+ // The new range should go after this one.
+ prev = existing;
+ iter++;
+ continue;
+ }
+
+ if (to.next() < existing->from()) {
+ // The new range should go before this one.
+ break;
+ }
+
+ if (!merged) {
+ // This is the first old range we've found that overlaps the new
+ // range. Extend this one to cover its union with the new range.
+ merged = existing;
+
+ if (from < existing->from())
+ existing->setFrom(from);
+ if (to > existing->to())
+ existing->setTo(to);
+
+ // Continue searching to see if any other old ranges can be
+ // coalesced with the new merged range.
+ iter++;
+ continue;
+ }
+
+ // Coalesce this range into the previous range we merged into.
+ MOZ_ASSERT(existing->from() >= merged->from());
+ if (existing->to() > merged->to())
+ merged->setTo(existing->to());
+
+ MOZ_ASSERT(!existing->hasDefinition());
+ existing->distributeUses(merged);
+ MOZ_ASSERT(!existing->hasUses());
+
+ ranges_.removeAndIncrement(iter);
+ }
+
+ if (!merged) {
+ // The new range does not overlap any existing range for the vreg.
+ LiveRange* range = LiveRange::FallibleNew(alloc, vreg(), from, to);
+ if (!range)
+ return false;
+
+ if (prev)
+ ranges_.insertAfter(&prev->registerLink, &range->registerLink);
+ else
+ ranges_.pushFront(&range->registerLink);
+ }
+
+ return true;
+}
+
+void
+VirtualRegister::addInitialUse(UsePosition* use)
+{
+ LiveRange::get(*rangesBegin())->addUse(use);
+}
+
+void
+VirtualRegister::setInitialDefinition(CodePosition from)
+{
+ LiveRange* first = LiveRange::get(*rangesBegin());
+ MOZ_ASSERT(from >= first->from());
+ first->setFrom(from);
+ first->setHasDefinition();
+}
+
+LiveRange*
+VirtualRegister::rangeFor(CodePosition pos, bool preferRegister /* = false */) const
+{
+ LiveRange* found = nullptr;
+ for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ if (range->covers(pos)) {
+ if (!preferRegister || range->bundle()->allocation().isRegister())
+ return range;
+ if (!found)
+ found = range;
+ }
+ }
+ return found;
+}
+
+void
+VirtualRegister::addRange(LiveRange* range)
+{
+ InsertSortedList(ranges_, &range->registerLink);
+}
+
+void
+VirtualRegister::removeRange(LiveRange* range)
+{
+ for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) {
+ LiveRange* existing = LiveRange::get(*iter);
+ if (existing == range) {
+ ranges_.removeAt(iter);
+ return;
+ }
+ }
+ MOZ_CRASH();
+}
+
+/////////////////////////////////////////////////////////////////////
+// BacktrackingAllocator
+/////////////////////////////////////////////////////////////////////
+
+// This function pre-allocates and initializes as much global state as possible
+// to avoid littering the algorithms with memory management cruft.
+bool
+BacktrackingAllocator::init()
+{
+ if (!RegisterAllocator::init())
+ return false;
+
+ liveIn = mir->allocate<BitSet>(graph.numBlockIds());
+ if (!liveIn)
+ return false;
+
+ size_t numVregs = graph.numVirtualRegisters();
+ if (!vregs.init(mir->alloc(), numVregs))
+ return false;
+ memset(&vregs[0], 0, sizeof(VirtualRegister) * numVregs);
+ for (uint32_t i = 0; i < numVregs; i++)
+ new(&vregs[i]) VirtualRegister();
+
+ // Build virtual register objects.
+ for (size_t i = 0; i < graph.numBlocks(); i++) {
+ if (mir->shouldCancel("Create data structures (main loop)"))
+ return false;
+
+ LBlock* block = graph.getBlock(i);
+ for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
+ if (mir->shouldCancel("Create data structures (inner loop 1)"))
+ return false;
+
+ for (size_t j = 0; j < ins->numDefs(); j++) {
+ LDefinition* def = ins->getDef(j);
+ if (def->isBogusTemp())
+ continue;
+ vreg(def).init(*ins, def, /* isTemp = */ false);
+ }
+
+ for (size_t j = 0; j < ins->numTemps(); j++) {
+ LDefinition* def = ins->getTemp(j);
+ if (def->isBogusTemp())
+ continue;
+ vreg(def).init(*ins, def, /* isTemp = */ true);
+ }
+ }
+ for (size_t j = 0; j < block->numPhis(); j++) {
+ LPhi* phi = block->getPhi(j);
+ LDefinition* def = phi->getDef(0);
+ vreg(def).init(phi, def, /* isTemp = */ false);
+ }
+ }
+
+ LiveRegisterSet remainingRegisters(allRegisters_.asLiveSet());
+ while (!remainingRegisters.emptyGeneral()) {
+ AnyRegister reg = AnyRegister(remainingRegisters.takeAnyGeneral());
+ registers[reg.code()].allocatable = true;
+ }
+ while (!remainingRegisters.emptyFloat()) {
+ AnyRegister reg = AnyRegister(remainingRegisters.takeAnyFloat());
+ registers[reg.code()].allocatable = true;
+ }
+
+ LifoAlloc* lifoAlloc = mir->alloc().lifoAlloc();
+ for (size_t i = 0; i < AnyRegister::Total; i++) {
+ registers[i].reg = AnyRegister::FromCode(i);
+ registers[i].allocations.setAllocator(lifoAlloc);
+ }
+
+ hotcode.setAllocator(lifoAlloc);
+ callRanges.setAllocator(lifoAlloc);
+
+ // Partition the graph into hot and cold sections, for helping to make
+ // splitting decisions. Since we don't have any profiling data this is a
+ // crapshoot, so just mark the bodies of inner loops as hot and everything
+ // else as cold.
+
+ LBlock* backedge = nullptr;
+ for (size_t i = 0; i < graph.numBlocks(); i++) {
+ LBlock* block = graph.getBlock(i);
+
+ // If we see a loop header, mark the backedge so we know when we have
+ // hit the end of the loop. Don't process the loop immediately, so that
+ // if there is an inner loop we will ignore the outer backedge.
+ if (block->mir()->isLoopHeader())
+ backedge = block->mir()->backedge()->lir();
+
+ if (block == backedge) {
+ LBlock* header = block->mir()->loopHeaderOfBackedge()->lir();
+ LiveRange* range = LiveRange::FallibleNew(alloc(), 0, entryOf(header),
+ exitOf(block).next());
+ if (!range || !hotcode.insert(range))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool
+BacktrackingAllocator::addInitialFixedRange(AnyRegister reg, CodePosition from, CodePosition to)
+{
+ LiveRange* range = LiveRange::FallibleNew(alloc(), 0, from, to);
+ return range && registers[reg.code()].allocations.insert(range);
+}
+
+#ifdef DEBUG
+// Returns true iff ins has a def/temp reusing the input allocation.
+static bool
+IsInputReused(LInstruction* ins, LUse* use)
+{
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ if (ins->getDef(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
+ ins->getOperand(ins->getDef(i)->getReusedInput())->toUse() == use)
+ {
+ return true;
+ }
+ }
+
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ if (ins->getTemp(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
+ ins->getOperand(ins->getTemp(i)->getReusedInput())->toUse() == use)
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
+#endif
+
+/*
+ * This function builds up liveness ranges for all virtual registers
+ * defined in the function.
+ *
+ * The algorithm is based on the one published in:
+ *
+ * Wimmer, Christian, and Michael Franz. "Linear Scan Register Allocation on
+ * SSA Form." Proceedings of the International Symposium on Code Generation
+ * and Optimization. Toronto, Ontario, Canada, ACM. 2010. 170-79. PDF.
+ *
+ * The algorithm operates on blocks ordered such that dominators of a block
+ * are before the block itself, and such that all blocks of a loop are
+ * contiguous. It proceeds backwards over the instructions in this order,
+ * marking registers live at their uses, ending their live ranges at
+ * definitions, and recording which registers are live at the top of every
+ * block. To deal with loop backedges, registers live at the beginning of
+ * a loop gain a range covering the entire loop.
+ */
+bool
+BacktrackingAllocator::buildLivenessInfo()
+{
+ JitSpew(JitSpew_RegAlloc, "Beginning liveness analysis");
+
+ Vector<MBasicBlock*, 1, SystemAllocPolicy> loopWorkList;
+ BitSet loopDone(graph.numBlockIds());
+ if (!loopDone.init(alloc()))
+ return false;
+
+ for (size_t i = graph.numBlocks(); i > 0; i--) {
+ if (mir->shouldCancel("Build Liveness Info (main loop)"))
+ return false;
+
+ LBlock* block = graph.getBlock(i - 1);
+ MBasicBlock* mblock = block->mir();
+
+ BitSet& live = liveIn[mblock->id()];
+ new (&live) BitSet(graph.numVirtualRegisters());
+ if (!live.init(alloc()))
+ return false;
+
+ // Propagate liveIn from our successors to us.
+ for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
+ MBasicBlock* successor = mblock->lastIns()->getSuccessor(i);
+ // Skip backedges, as we fix them up at the loop header.
+ if (mblock->id() < successor->id())
+ live.insertAll(liveIn[successor->id()]);
+ }
+
+ // Add successor phis.
+ if (mblock->successorWithPhis()) {
+ LBlock* phiSuccessor = mblock->successorWithPhis()->lir();
+ for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
+ LPhi* phi = phiSuccessor->getPhi(j);
+ LAllocation* use = phi->getOperand(mblock->positionInPhiSuccessor());
+ uint32_t reg = use->toUse()->virtualRegister();
+ live.insert(reg);
+ vreg(use).setUsedByPhi();
+ }
+ }
+
+ // Registers are assumed alive for the entire block, a define shortens
+ // the range to the point of definition.
+ for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
+ if (!vregs[*liveRegId].addInitialRange(alloc(), entryOf(block), exitOf(block).next()))
+ return false;
+ }
+
+ // Shorten the front end of ranges for live variables to their point of
+ // definition, if found.
+ for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
+ // Calls may clobber registers, so force a spill and reload around the callsite.
+ if (ins->isCall()) {
+ for (AnyRegisterIterator iter(allRegisters_.asLiveSet()); iter.more(); ++iter) {
+ bool found = false;
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ if (ins->getDef(i)->isFixed() &&
+ ins->getDef(i)->output()->aliases(LAllocation(*iter))) {
+ found = true;
+ break;
+ }
+ }
+ // If this register doesn't have an explicit def above, mark
+ // it as clobbered by the call unless it is actually
+ // call-preserved.
+ if (!found && !ins->isCallPreserved(*iter)) {
+ if (!addInitialFixedRange(*iter, outputOf(*ins), outputOf(*ins).next()))
+ return false;
+ }
+ }
+
+ CallRange* callRange =
+ new(alloc().fallible()) CallRange(outputOf(*ins), outputOf(*ins).next());
+ if (!callRange)
+ return false;
+
+ callRangesList.pushFront(callRange);
+ if (!callRanges.insert(callRange))
+ return false;
+ }
+ DebugOnly<bool> hasDoubleDef = false;
+ DebugOnly<bool> hasFloat32Def = false;
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ LDefinition* def = ins->getDef(i);
+ if (def->isBogusTemp())
+ continue;
+#ifdef DEBUG
+ if (def->type() == LDefinition::DOUBLE)
+ hasDoubleDef = true;
+ if (def->type() == LDefinition::FLOAT32)
+ hasFloat32Def = true;
+#endif
+ CodePosition from = outputOf(*ins);
+
+ if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
+ // MUST_REUSE_INPUT is implemented by allocating an output
+ // register and moving the input to it. Register hints are
+ // used to avoid unnecessary moves. We give the input an
+ // LUse::ANY policy to avoid allocating a register for the
+ // input.
+ LUse* inputUse = ins->getOperand(def->getReusedInput())->toUse();
+ MOZ_ASSERT(inputUse->policy() == LUse::REGISTER);
+ MOZ_ASSERT(inputUse->usedAtStart());
+ *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true);
+ }
+
+ if (!vreg(def).addInitialRange(alloc(), from, from.next()))
+ return false;
+ vreg(def).setInitialDefinition(from);
+ live.remove(def->virtualRegister());
+ }
+
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ LDefinition* temp = ins->getTemp(i);
+ if (temp->isBogusTemp())
+ continue;
+
+ // Normally temps are considered to cover both the input
+ // and output of the associated instruction. In some cases
+ // though we want to use a fixed register as both an input
+ // and clobbered register in the instruction, so watch for
+ // this and shorten the temp to cover only the output.
+ CodePosition from = inputOf(*ins);
+ if (temp->policy() == LDefinition::FIXED) {
+ AnyRegister reg = temp->output()->toRegister();
+ for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) {
+ if (alloc->isUse()) {
+ LUse* use = alloc->toUse();
+ if (use->isFixedRegister()) {
+ if (GetFixedRegister(vreg(use).def(), use) == reg)
+ from = outputOf(*ins);
+ }
+ }
+ }
+ }
+
+ CodePosition to =
+ ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
+
+ if (!vreg(temp).addInitialRange(alloc(), from, to))
+ return false;
+ vreg(temp).setInitialDefinition(from);
+ }
+
+ DebugOnly<bool> hasUseRegister = false;
+ DebugOnly<bool> hasUseRegisterAtStart = false;
+
+ for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more(); inputAlloc.next()) {
+ if (inputAlloc->isUse()) {
+ LUse* use = inputAlloc->toUse();
+
+ // Call uses should always be at-start, since calls use all
+ // registers.
+ MOZ_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(),
+ use->usedAtStart());
+
+#ifdef DEBUG
+ // Don't allow at-start call uses if there are temps of the same kind,
+ // so that we don't assign the same register. Only allow this when the
+ // use and temp are fixed registers, as they can't alias.
+ if (ins->isCall() && use->usedAtStart()) {
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ MOZ_ASSERT(vreg(ins->getTemp(i)).type() != vreg(use).type() ||
+ (use->isFixedRegister() && ins->getTemp(i)->isFixed()));
+ }
+ }
+
+ // If there are both useRegisterAtStart(x) and useRegister(y)
+ // uses, we may assign the same register to both operands
+ // (bug 772830). Don't allow this for now.
+ if (use->policy() == LUse::REGISTER) {
+ if (use->usedAtStart()) {
+ if (!IsInputReused(*ins, use))
+ hasUseRegisterAtStart = true;
+ } else {
+ hasUseRegister = true;
+ }
+ }
+ MOZ_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
+#endif
+
+ // Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
+ if (use->policy() == LUse::RECOVERED_INPUT)
+ continue;
+
+ CodePosition to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins);
+ if (use->isFixedRegister()) {
+ LAllocation reg(AnyRegister::FromCode(use->registerCode()));
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ LDefinition* def = ins->getDef(i);
+ if (def->policy() == LDefinition::FIXED && *def->output() == reg)
+ to = inputOf(*ins);
+ }
+ }
+
+ if (!vreg(use).addInitialRange(alloc(), entryOf(block), to.next()))
+ return false;
+ UsePosition* usePosition = new(alloc().fallible()) UsePosition(use, to);
+ if (!usePosition)
+ return false;
+ vreg(use).addInitialUse(usePosition);
+ live.insert(use->virtualRegister());
+ }
+ }
+ }
+
+ // Phis have simultaneous assignment semantics at block begin, so at
+ // the beginning of the block we can be sure that liveIn does not
+ // contain any phi outputs.
+ for (unsigned int i = 0; i < block->numPhis(); i++) {
+ LDefinition* def = block->getPhi(i)->getDef(0);
+ if (live.contains(def->virtualRegister())) {
+ live.remove(def->virtualRegister());
+ } else {
+ // This is a dead phi, so add a dummy range over all phis. This
+ // can go away if we have an earlier dead code elimination pass.
+ CodePosition entryPos = entryOf(block);
+ if (!vreg(def).addInitialRange(alloc(), entryPos, entryPos.next()))
+ return false;
+ }
+ }
+
+ if (mblock->isLoopHeader()) {
+ // A divergence from the published algorithm is required here, as
+ // our block order does not guarantee that blocks of a loop are
+ // contiguous. As a result, a single live range spanning the
+ // loop is not possible. Additionally, we require liveIn in a later
+ // pass for resolution, so that must also be fixed up here.
+ MBasicBlock* loopBlock = mblock->backedge();
+ while (true) {
+ // Blocks must already have been visited to have a liveIn set.
+ MOZ_ASSERT(loopBlock->id() >= mblock->id());
+
+ // Add a range for this entire loop block
+ CodePosition from = entryOf(loopBlock->lir());
+ CodePosition to = exitOf(loopBlock->lir()).next();
+
+ for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
+ if (!vregs[*liveRegId].addInitialRange(alloc(), from, to))
+ return false;
+ }
+
+ // Fix up the liveIn set.
+ liveIn[loopBlock->id()].insertAll(live);
+
+ // Make sure we don't visit this node again
+ loopDone.insert(loopBlock->id());
+
+ // If this is the loop header, any predecessors are either the
+ // backedge or out of the loop, so skip any predecessors of
+ // this block
+ if (loopBlock != mblock) {
+ for (size_t i = 0; i < loopBlock->numPredecessors(); i++) {
+ MBasicBlock* pred = loopBlock->getPredecessor(i);
+ if (loopDone.contains(pred->id()))
+ continue;
+ if (!loopWorkList.append(pred))
+ return false;
+ }
+ }
+
+ // Terminate loop if out of work.
+ if (loopWorkList.empty())
+ break;
+
+ // Grab the next block off the work list, skipping any OSR block.
+ MBasicBlock* osrBlock = graph.mir().osrBlock();
+ while (!loopWorkList.empty()) {
+ loopBlock = loopWorkList.popCopy();
+ if (loopBlock != osrBlock)
+ break;
+ }
+
+ // If end is reached without finding a non-OSR block, then no more work items were found.
+ if (loopBlock == osrBlock) {
+ MOZ_ASSERT(loopWorkList.empty());
+ break;
+ }
+ }
+
+ // Clear the done set for other loops
+ loopDone.clear();
+ }
+
+ MOZ_ASSERT_IF(!mblock->numPredecessors(), live.empty());
+ }
+
+ JitSpew(JitSpew_RegAlloc, "Liveness analysis complete");
+
+ if (JitSpewEnabled(JitSpew_RegAlloc))
+ dumpInstructions();
+
+ return true;
+}
+
+bool
+BacktrackingAllocator::go()
+{
+ JitSpew(JitSpew_RegAlloc, "Beginning register allocation");
+
+ if (!init())
+ return false;
+
+ if (!buildLivenessInfo())
+ return false;
+
+ if (!allocationQueue.reserve(graph.numVirtualRegisters() * 3 / 2))
+ return false;
+
+ JitSpew(JitSpew_RegAlloc, "Beginning grouping and queueing registers");
+ if (!mergeAndQueueRegisters())
+ return false;
+
+ if (JitSpewEnabled(JitSpew_RegAlloc))
+ dumpVregs();
+
+ JitSpew(JitSpew_RegAlloc, "Beginning main allocation loop");
+
+ // Allocate, spill and split bundles until finished.
+ while (!allocationQueue.empty()) {
+ if (mir->shouldCancel("Backtracking Allocation"))
+ return false;
+
+ QueueItem item = allocationQueue.removeHighest();
+ if (!processBundle(mir, item.bundle))
+ return false;
+ }
+ JitSpew(JitSpew_RegAlloc, "Main allocation loop complete");
+
+ if (!pickStackSlots())
+ return false;
+
+ if (JitSpewEnabled(JitSpew_RegAlloc))
+ dumpAllocations();
+
+ if (!resolveControlFlow())
+ return false;
+
+ if (!reifyAllocations())
+ return false;
+
+ if (!populateSafepoints())
+ return false;
+
+ if (!annotateMoveGroups())
+ return false;
+
+ return true;
+}
+
+static bool
+IsArgumentSlotDefinition(LDefinition* def)
+{
+ return def->policy() == LDefinition::FIXED && def->output()->isArgument();
+}
+
+static bool
+IsThisSlotDefinition(LDefinition* def)
+{
+ return IsArgumentSlotDefinition(def) &&
+ def->output()->toArgument()->index() < THIS_FRAME_ARGSLOT + sizeof(Value);
+}
+
+bool
+BacktrackingAllocator::tryMergeBundles(LiveBundle* bundle0, LiveBundle* bundle1)
+{
+ // See if bundle0 and bundle1 can be merged together.
+ if (bundle0 == bundle1)
+ return true;
+
+ // Get a representative virtual register from each bundle.
+ VirtualRegister& reg0 = vregs[bundle0->firstRange()->vreg()];
+ VirtualRegister& reg1 = vregs[bundle1->firstRange()->vreg()];
+
+ if (!reg0.isCompatible(reg1))
+ return true;
+
+ // Registers which might spill to the frame's |this| slot can only be
+ // grouped with other such registers. The frame's |this| slot must always
+ // hold the |this| value, as required by JitFrame tracing and by the Ion
+ // constructor calling convention.
+ if (IsThisSlotDefinition(reg0.def()) || IsThisSlotDefinition(reg1.def())) {
+ if (*reg0.def()->output() != *reg1.def()->output())
+ return true;
+ }
+
+ // Registers which might spill to the frame's argument slots can only be
+ // grouped with other such registers if the frame might access those
+ // arguments through a lazy arguments object or rest parameter.
+ if (IsArgumentSlotDefinition(reg0.def()) || IsArgumentSlotDefinition(reg1.def())) {
+ if (graph.mir().entryBlock()->info().mayReadFrameArgsDirectly()) {
+ if (*reg0.def()->output() != *reg1.def()->output())
+ return true;
+ }
+ }
+
+ // Limit the number of times we compare ranges if there are many ranges in
+ // one of the bundles, to avoid quadratic behavior.
+ static const size_t MAX_RANGES = 200;
+
+ // Make sure that ranges in the bundles do not overlap.
+ LiveRange::BundleLinkIterator iter0 = bundle0->rangesBegin(), iter1 = bundle1->rangesBegin();
+ size_t count = 0;
+ while (iter0 && iter1) {
+ if (++count >= MAX_RANGES)
+ return true;
+
+ LiveRange* range0 = LiveRange::get(*iter0);
+ LiveRange* range1 = LiveRange::get(*iter1);
+
+ if (range0->from() >= range1->to())
+ iter1++;
+ else if (range1->from() >= range0->to())
+ iter0++;
+ else
+ return true;
+ }
+
+ // Move all ranges from bundle1 into bundle0.
+ while (LiveRange* range = bundle1->popFirstRange())
+ bundle0->addRange(range);
+
+ return true;
+}
+
+static inline LDefinition*
+FindReusingDefOrTemp(LNode* ins, LAllocation* alloc)
+{
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ LDefinition* def = ins->getDef(i);
+ if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
+ ins->getOperand(def->getReusedInput()) == alloc)
+ return def;
+ }
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ LDefinition* def = ins->getTemp(i);
+ if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
+ ins->getOperand(def->getReusedInput()) == alloc)
+ return def;
+ }
+ return nullptr;
+}
+
+static inline size_t
+NumReusingDefs(LNode* ins)
+{
+ size_t num = 0;
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ LDefinition* def = ins->getDef(i);
+ if (def->policy() == LDefinition::MUST_REUSE_INPUT)
+ num++;
+ }
+ return num;
+}
+
+bool
+BacktrackingAllocator::tryMergeReusedRegister(VirtualRegister& def, VirtualRegister& input)
+{
+ // def is a vreg which reuses input for its output physical register. Try
+ // to merge ranges for def with those of input if possible, as avoiding
+ // copies before def's instruction is crucial for generated code quality
+ // (MUST_REUSE_INPUT is used for all arithmetic on x86/x64).
+
+ if (def.rangeFor(inputOf(def.ins()))) {
+ MOZ_ASSERT(def.isTemp());
+ def.setMustCopyInput();
+ return true;
+ }
+
+ LiveRange* inputRange = input.rangeFor(outputOf(def.ins()));
+ if (!inputRange) {
+ // The input is not live after the instruction, either in a safepoint
+ // for the instruction or in subsequent code. The input and output
+ // can thus be in the same group.
+ return tryMergeBundles(def.firstBundle(), input.firstBundle());
+ }
+
+ // The input is live afterwards, either in future instructions or in a
+ // safepoint for the reusing instruction. This is impossible to satisfy
+ // without copying the input.
+ //
+ // It may or may not be better to split the input into two bundles at the
+ // point of the definition, which may permit merging. One case where it is
+ // definitely better to split is if the input never has any register uses
+ // after the instruction. Handle this splitting eagerly.
+
+ LBlock* block = def.ins()->block();
+
+ // The input's lifetime must end within the same block as the definition,
+ // otherwise it could live on in phis elsewhere.
+ if (inputRange != input.lastRange() || inputRange->to() > exitOf(block)) {
+ def.setMustCopyInput();
+ return true;
+ }
+
+ // If we already split the input for some other register, don't make a
+ // third bundle.
+ if (inputRange->bundle() != input.firstRange()->bundle()) {
+ def.setMustCopyInput();
+ return true;
+ }
+
+ // If the input will start out in memory then adding a separate bundle for
+ // memory uses after the def won't help.
+ if (input.def()->isFixed() && !input.def()->output()->isRegister()) {
+ def.setMustCopyInput();
+ return true;
+ }
+
+ // The input cannot have register or reused uses after the definition.
+ for (UsePositionIterator iter = inputRange->usesBegin(); iter; iter++) {
+ if (iter->pos <= inputOf(def.ins()))
+ continue;
+
+ LUse* use = iter->use();
+ if (FindReusingDefOrTemp(insData[iter->pos], use)) {
+ def.setMustCopyInput();
+ return true;
+ }
+ if (iter->usePolicy() != LUse::ANY && iter->usePolicy() != LUse::KEEPALIVE) {
+ def.setMustCopyInput();
+ return true;
+ }
+ }
+
+ LiveRange* preRange = LiveRange::FallibleNew(alloc(), input.vreg(),
+ inputRange->from(), outputOf(def.ins()));
+ if (!preRange)
+ return false;
+
+ // The new range starts at reg's input position, which means it overlaps
+ // with the old range at one position. This is what we want, because we
+ // need to copy the input before the instruction.
+ LiveRange* postRange = LiveRange::FallibleNew(alloc(), input.vreg(),
+ inputOf(def.ins()), inputRange->to());
+ if (!postRange)
+ return false;
+
+ inputRange->distributeUses(preRange);
+ inputRange->distributeUses(postRange);
+ MOZ_ASSERT(!inputRange->hasUses());
+
+ JitSpew(JitSpew_RegAlloc, " splitting reused input at %u to try to help grouping",
+ inputOf(def.ins()).bits());
+
+ LiveBundle* firstBundle = inputRange->bundle();
+ input.removeRange(inputRange);
+ input.addRange(preRange);
+ input.addRange(postRange);
+
+ firstBundle->removeRange(inputRange);
+ firstBundle->addRange(preRange);
+
+ // The new range goes in a separate bundle, where it will be spilled during
+ // allocation.
+ LiveBundle* secondBundle = LiveBundle::FallibleNew(alloc(), nullptr, nullptr);
+ if (!secondBundle)
+ return false;
+ secondBundle->addRange(postRange);
+
+ return tryMergeBundles(def.firstBundle(), input.firstBundle());
+}
+
+bool
+BacktrackingAllocator::mergeAndQueueRegisters()
+{
+ MOZ_ASSERT(!vregs[0u].hasRanges());
+
+ // Create a bundle for each register containing all its ranges.
+ for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ VirtualRegister& reg = vregs[i];
+ if (!reg.hasRanges())
+ continue;
+
+ LiveBundle* bundle = LiveBundle::FallibleNew(alloc(), nullptr, nullptr);
+ if (!bundle)
+ return false;
+ for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ bundle->addRange(range);
+ }
+ }
+
+ // If there is an OSR block, merge parameters in that block with the
+ // corresponding parameters in the initial block.
+ if (MBasicBlock* osr = graph.mir().osrBlock()) {
+ size_t original = 1;
+ for (LInstructionIterator iter = osr->lir()->begin(); iter != osr->lir()->end(); iter++) {
+ if (iter->isParameter()) {
+ for (size_t i = 0; i < iter->numDefs(); i++) {
+ DebugOnly<bool> found = false;
+ VirtualRegister &paramVreg = vreg(iter->getDef(i));
+ for (; original < paramVreg.vreg(); original++) {
+ VirtualRegister &originalVreg = vregs[original];
+ if (*originalVreg.def()->output() == *iter->getDef(i)->output()) {
+ MOZ_ASSERT(originalVreg.ins()->isParameter());
+ if (!tryMergeBundles(originalVreg.firstBundle(), paramVreg.firstBundle()))
+ return false;
+ found = true;
+ break;
+ }
+ }
+ MOZ_ASSERT(found);
+ }
+ }
+ }
+ }
+
+ // Try to merge registers with their reused inputs.
+ for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ VirtualRegister& reg = vregs[i];
+ if (!reg.hasRanges())
+ continue;
+
+ if (reg.def()->policy() == LDefinition::MUST_REUSE_INPUT) {
+ LUse* use = reg.ins()->getOperand(reg.def()->getReusedInput())->toUse();
+ if (!tryMergeReusedRegister(reg, vreg(use)))
+ return false;
+ }
+ }
+
+ // Try to merge phis with their inputs.
+ for (size_t i = 0; i < graph.numBlocks(); i++) {
+ LBlock* block = graph.getBlock(i);
+ for (size_t j = 0; j < block->numPhis(); j++) {
+ LPhi* phi = block->getPhi(j);
+ VirtualRegister &outputVreg = vreg(phi->getDef(0));
+ for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
+ VirtualRegister& inputVreg = vreg(phi->getOperand(k)->toUse());
+ if (!tryMergeBundles(inputVreg.firstBundle(), outputVreg.firstBundle()))
+ return false;
+ }
+ }
+ }
+
+ // Add all bundles to the allocation queue, and create spill sets for them.
+ for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ VirtualRegister& reg = vregs[i];
+ for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ LiveBundle* bundle = range->bundle();
+ if (range == bundle->firstRange()) {
+ if (!alloc().ensureBallast())
+ return false;
+
+ SpillSet* spill = SpillSet::New(alloc());
+ if (!spill)
+ return false;
+ bundle->setSpillSet(spill);
+
+ size_t priority = computePriority(bundle);
+ if (!allocationQueue.insert(QueueItem(bundle, priority)))
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+static const size_t MAX_ATTEMPTS = 2;
+
+bool
+BacktrackingAllocator::tryAllocateFixed(LiveBundle* bundle, Requirement requirement,
+ bool* success, bool* pfixed,
+ LiveBundleVector& conflicting)
+{
+ // Spill bundles which are required to be in a certain stack slot.
+ if (!requirement.allocation().isRegister()) {
+ JitSpew(JitSpew_RegAlloc, " stack allocation requirement");
+ bundle->setAllocation(requirement.allocation());
+ *success = true;
+ return true;
+ }
+
+ AnyRegister reg = requirement.allocation().toRegister();
+ return tryAllocateRegister(registers[reg.code()], bundle, success, pfixed, conflicting);
+}
+
+bool
+BacktrackingAllocator::tryAllocateNonFixed(LiveBundle* bundle,
+ Requirement requirement, Requirement hint,
+ bool* success, bool* pfixed,
+ LiveBundleVector& conflicting)
+{
+ // If we want, but do not require a bundle to be in a specific register,
+ // only look at that register for allocating and evict or spill if it is
+ // not available. Picking a separate register may be even worse than
+ // spilling, as it will still necessitate moves and will tie up more
+ // registers than if we spilled.
+ if (hint.kind() == Requirement::FIXED) {
+ AnyRegister reg = hint.allocation().toRegister();
+ if (!tryAllocateRegister(registers[reg.code()], bundle, success, pfixed, conflicting))
+ return false;
+ if (*success)
+ return true;
+ }
+
+ // Spill bundles which have no hint or register requirement.
+ if (requirement.kind() == Requirement::NONE && hint.kind() != Requirement::REGISTER) {
+ if (!spill(bundle))
+ return false;
+ *success = true;
+ return true;
+ }
+
+ if (conflicting.empty() || minimalBundle(bundle)) {
+ // Search for any available register which the bundle can be
+ // allocated to.
+ for (size_t i = 0; i < AnyRegister::Total; i++) {
+ if (!tryAllocateRegister(registers[i], bundle, success, pfixed, conflicting))
+ return false;
+ if (*success)
+ return true;
+ }
+ }
+
+ // Spill bundles which have no register requirement if they didn't get
+ // allocated.
+ if (requirement.kind() == Requirement::NONE) {
+ if (!spill(bundle))
+ return false;
+ *success = true;
+ return true;
+ }
+
+ // We failed to allocate this bundle.
+ MOZ_ASSERT(!*success);
+ return true;
+}
+
+bool
+BacktrackingAllocator::processBundle(MIRGenerator* mir, LiveBundle* bundle)
+{
+ if (JitSpewEnabled(JitSpew_RegAlloc)) {
+ JitSpew(JitSpew_RegAlloc, "Allocating %s [priority %" PRIuSIZE "] [weight %" PRIuSIZE "]",
+ bundle->toString().get(), computePriority(bundle), computeSpillWeight(bundle));
+ }
+
+ // A bundle can be processed by doing any of the following:
+ //
+ // - Assigning the bundle a register. The bundle cannot overlap any other
+ // bundle allocated for that physical register.
+ //
+ // - Spilling the bundle, provided it has no register uses.
+ //
+ // - Splitting the bundle into two or more bundles which cover the original
+ // one. The new bundles are placed back onto the priority queue for later
+ // processing.
+ //
+ // - Evicting one or more existing allocated bundles, and then doing one
+ // of the above operations. Evicted bundles are placed back on the
+ // priority queue. Any evicted bundles must have a lower spill weight
+ // than the bundle being processed.
+ //
+ // As long as this structure is followed, termination is guaranteed.
+ // In general, we want to minimize the amount of bundle splitting (which
+ // generally necessitates spills), so allocate longer lived, lower weight
+ // bundles first and evict and split them later if they prevent allocation
+ // for higher weight bundles.
+
+ Requirement requirement, hint;
+ bool canAllocate = computeRequirement(bundle, &requirement, &hint);
+
+ bool fixed;
+ LiveBundleVector conflicting;
+ for (size_t attempt = 0;; attempt++) {
+ if (mir->shouldCancel("Backtracking Allocation (processBundle loop)"))
+ return false;
+
+ if (canAllocate) {
+ bool success = false;
+ fixed = false;
+ conflicting.clear();
+
+ // Ok, let's try allocating for this bundle.
+ if (requirement.kind() == Requirement::FIXED) {
+ if (!tryAllocateFixed(bundle, requirement, &success, &fixed, conflicting))
+ return false;
+ } else {
+ if (!tryAllocateNonFixed(bundle, requirement, hint, &success, &fixed, conflicting))
+ return false;
+ }
+
+ // If that worked, we're done!
+ if (success)
+ return true;
+
+ // If that didn't work, but we have one or more non-fixed bundles
+ // known to be conflicting, maybe we can evict them and try again.
+ if ((attempt < MAX_ATTEMPTS || minimalBundle(bundle)) &&
+ !fixed &&
+ !conflicting.empty() &&
+ maximumSpillWeight(conflicting) < computeSpillWeight(bundle))
+ {
+ for (size_t i = 0; i < conflicting.length(); i++) {
+ if (!evictBundle(conflicting[i]))
+ return false;
+ }
+ continue;
+ }
+ }
+
+ // A minimal bundle cannot be split any further. If we try to split it
+ // it at this point we will just end up with the same bundle and will
+ // enter an infinite loop. Weights and the initial live ranges must
+ // be constructed so that any minimal bundle is allocatable.
+ MOZ_ASSERT(!minimalBundle(bundle));
+
+ LiveBundle* conflict = conflicting.empty() ? nullptr : conflicting[0];
+ return chooseBundleSplit(bundle, canAllocate && fixed, conflict);
+ }
+}
+
+bool
+BacktrackingAllocator::computeRequirement(LiveBundle* bundle,
+ Requirement *requirement, Requirement *hint)
+{
+ // Set any requirement or hint on bundle according to its definition and
+ // uses. Return false if there are conflicting requirements which will
+ // require the bundle to be split.
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ VirtualRegister &reg = vregs[range->vreg()];
+
+ if (range->hasDefinition()) {
+ // Deal with any definition constraints/hints.
+ LDefinition::Policy policy = reg.def()->policy();
+ if (policy == LDefinition::FIXED) {
+ // Fixed policies get a FIXED requirement.
+ JitSpew(JitSpew_RegAlloc, " Requirement %s, fixed by definition",
+ reg.def()->output()->toString().get());
+ if (!requirement->merge(Requirement(*reg.def()->output())))
+ return false;
+ } else if (reg.ins()->isPhi()) {
+ // Phis don't have any requirements, but they should prefer their
+ // input allocations. This is captured by the group hints above.
+ } else {
+ // Non-phis get a REGISTER requirement.
+ if (!requirement->merge(Requirement(Requirement::REGISTER)))
+ return false;
+ }
+ }
+
+ // Search uses for requirements.
+ for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
+ LUse::Policy policy = iter->usePolicy();
+ if (policy == LUse::FIXED) {
+ AnyRegister required = GetFixedRegister(reg.def(), iter->use());
+
+ JitSpew(JitSpew_RegAlloc, " Requirement %s, due to use at %u",
+ required.name(), iter->pos.bits());
+
+ // If there are multiple fixed registers which the bundle is
+ // required to use, fail. The bundle will need to be split before
+ // it can be allocated.
+ if (!requirement->merge(Requirement(LAllocation(required))))
+ return false;
+ } else if (policy == LUse::REGISTER) {
+ if (!requirement->merge(Requirement(Requirement::REGISTER)))
+ return false;
+ } else if (policy == LUse::ANY) {
+ // ANY differs from KEEPALIVE by actively preferring a register.
+ if (!hint->merge(Requirement(Requirement::REGISTER)))
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool
+BacktrackingAllocator::tryAllocateRegister(PhysicalRegister& r, LiveBundle* bundle,
+ bool* success, bool* pfixed, LiveBundleVector& conflicting)
+{
+ *success = false;
+
+ if (!r.allocatable)
+ return true;
+
+ LiveBundleVector aliasedConflicting;
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ VirtualRegister &reg = vregs[range->vreg()];
+
+ if (!reg.isCompatible(r.reg))
+ return true;
+
+ for (size_t a = 0; a < r.reg.numAliased(); a++) {
+ PhysicalRegister& rAlias = registers[r.reg.aliased(a).code()];
+ LiveRange* existing;
+ if (!rAlias.allocations.contains(range, &existing))
+ continue;
+ if (existing->hasVreg()) {
+ MOZ_ASSERT(existing->bundle()->allocation().toRegister() == rAlias.reg);
+ bool duplicate = false;
+ for (size_t i = 0; i < aliasedConflicting.length(); i++) {
+ if (aliasedConflicting[i] == existing->bundle()) {
+ duplicate = true;
+ break;
+ }
+ }
+ if (!duplicate && !aliasedConflicting.append(existing->bundle()))
+ return false;
+ } else {
+ JitSpew(JitSpew_RegAlloc, " %s collides with fixed use %s",
+ rAlias.reg.name(), existing->toString().get());
+ *pfixed = true;
+ return true;
+ }
+ }
+ }
+
+ if (!aliasedConflicting.empty()) {
+ // One or more aliased registers is allocated to another bundle
+ // overlapping this one. Keep track of the conflicting set, and in the
+ // case of multiple conflicting sets keep track of the set with the
+ // lowest maximum spill weight.
+
+ // The #ifdef guards against "unused variable 'existing'" bustage.
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_RegAlloc)) {
+ if (aliasedConflicting.length() == 1) {
+ LiveBundle* existing = aliasedConflicting[0];
+ JitSpew(JitSpew_RegAlloc, " %s collides with %s [weight %" PRIuSIZE "]",
+ r.reg.name(), existing->toString().get(), computeSpillWeight(existing));
+ } else {
+ JitSpew(JitSpew_RegAlloc, " %s collides with the following", r.reg.name());
+ for (size_t i = 0; i < aliasedConflicting.length(); i++) {
+ LiveBundle* existing = aliasedConflicting[i];
+ JitSpew(JitSpew_RegAlloc, " %s [weight %" PRIuSIZE "]",
+ existing->toString().get(), computeSpillWeight(existing));
+ }
+ }
+ }
+#endif
+
+ if (conflicting.empty()) {
+ if (!conflicting.appendAll(aliasedConflicting))
+ return false;
+ } else {
+ if (maximumSpillWeight(aliasedConflicting) < maximumSpillWeight(conflicting)) {
+ conflicting.clear();
+ if (!conflicting.appendAll(aliasedConflicting))
+ return false;
+ }
+ }
+ return true;
+ }
+
+ JitSpew(JitSpew_RegAlloc, " allocated to %s", r.reg.name());
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ if (!alloc().ensureBallast())
+ return false;
+ if (!r.allocations.insert(range))
+ return false;
+ }
+
+ bundle->setAllocation(LAllocation(r.reg));
+ *success = true;
+ return true;
+}
+
+bool
+BacktrackingAllocator::evictBundle(LiveBundle* bundle)
+{
+ if (JitSpewEnabled(JitSpew_RegAlloc)) {
+ JitSpew(JitSpew_RegAlloc, " Evicting %s [priority %" PRIuSIZE "] [weight %" PRIuSIZE "]",
+ bundle->toString().get(), computePriority(bundle), computeSpillWeight(bundle));
+ }
+
+ AnyRegister reg(bundle->allocation().toRegister());
+ PhysicalRegister& physical = registers[reg.code()];
+ MOZ_ASSERT(physical.reg == reg && physical.allocatable);
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ physical.allocations.remove(range);
+ }
+
+ bundle->setAllocation(LAllocation());
+
+ size_t priority = computePriority(bundle);
+ return allocationQueue.insert(QueueItem(bundle, priority));
+}
+
+bool
+BacktrackingAllocator::splitAndRequeueBundles(LiveBundle* bundle,
+ const LiveBundleVector& newBundles)
+{
+ if (JitSpewEnabled(JitSpew_RegAlloc)) {
+ JitSpew(JitSpew_RegAlloc, " splitting bundle %s into:", bundle->toString().get());
+ for (size_t i = 0; i < newBundles.length(); i++)
+ JitSpew(JitSpew_RegAlloc, " %s", newBundles[i]->toString().get());
+ }
+
+ // Remove all ranges in the old bundle from their register's list.
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ vregs[range->vreg()].removeRange(range);
+ }
+
+ // Add all ranges in the new bundles to their register's list.
+ for (size_t i = 0; i < newBundles.length(); i++) {
+ LiveBundle* newBundle = newBundles[i];
+ for (LiveRange::BundleLinkIterator iter = newBundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ vregs[range->vreg()].addRange(range);
+ }
+ }
+
+ // Queue the new bundles for register assignment.
+ for (size_t i = 0; i < newBundles.length(); i++) {
+ LiveBundle* newBundle = newBundles[i];
+ size_t priority = computePriority(newBundle);
+ if (!allocationQueue.insert(QueueItem(newBundle, priority)))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+BacktrackingAllocator::spill(LiveBundle* bundle)
+{
+ JitSpew(JitSpew_RegAlloc, " Spilling bundle");
+ MOZ_ASSERT(bundle->allocation().isBogus());
+
+ if (LiveBundle* spillParent = bundle->spillParent()) {
+ JitSpew(JitSpew_RegAlloc, " Using existing spill bundle");
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ LiveRange* parentRange = spillParent->rangeFor(range->from());
+ MOZ_ASSERT(parentRange->contains(range));
+ MOZ_ASSERT(range->vreg() == parentRange->vreg());
+ range->distributeUses(parentRange);
+ MOZ_ASSERT(!range->hasUses());
+ vregs[range->vreg()].removeRange(range);
+ }
+ return true;
+ }
+
+ return bundle->spillSet()->addSpilledBundle(bundle);
+}
+
+bool
+BacktrackingAllocator::pickStackSlots()
+{
+ for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ VirtualRegister& reg = vregs[i];
+
+ if (mir->shouldCancel("Backtracking Pick Stack Slots"))
+ return false;
+
+ for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ LiveBundle* bundle = range->bundle();
+
+ if (bundle->allocation().isBogus()) {
+ if (!pickStackSlot(bundle->spillSet()))
+ return false;
+ MOZ_ASSERT(!bundle->allocation().isBogus());
+ }
+ }
+ }
+
+ return true;
+}
+
+bool
+BacktrackingAllocator::pickStackSlot(SpillSet* spillSet)
+{
+ // Look through all ranges that have been spilled in this set for a
+ // register definition which is fixed to a stack or argument slot. If we
+ // find one, use it for all bundles that have been spilled. tryMergeBundles
+ // makes sure this reuse is possible when an initial bundle contains ranges
+ // from multiple virtual registers.
+ for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
+ LiveBundle* bundle = spillSet->spilledBundle(i);
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ if (range->hasDefinition()) {
+ LDefinition* def = vregs[range->vreg()].def();
+ if (def->policy() == LDefinition::FIXED) {
+ MOZ_ASSERT(!def->output()->isRegister());
+ MOZ_ASSERT(!def->output()->isStackSlot());
+ spillSet->setAllocation(*def->output());
+ return true;
+ }
+ }
+ }
+ }
+
+ LDefinition::Type type = vregs[spillSet->spilledBundle(0)->firstRange()->vreg()].type();
+
+ SpillSlotList* slotList;
+ switch (StackSlotAllocator::width(type)) {
+ case 4: slotList = &normalSlots; break;
+ case 8: slotList = &doubleSlots; break;
+ case 16: slotList = &quadSlots; break;
+ default:
+ MOZ_CRASH("Bad width");
+ }
+
+ // Maximum number of existing spill slots we will look at before giving up
+ // and allocating a new slot.
+ static const size_t MAX_SEARCH_COUNT = 10;
+
+ size_t searches = 0;
+ SpillSlot* stop = nullptr;
+ while (!slotList->empty()) {
+ SpillSlot* spillSlot = *slotList->begin();
+ if (!stop) {
+ stop = spillSlot;
+ } else if (stop == spillSlot) {
+ // We looked through every slot in the list.
+ break;
+ }
+
+ bool success = true;
+ for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
+ LiveBundle* bundle = spillSet->spilledBundle(i);
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ LiveRange* existing;
+ if (spillSlot->allocated.contains(range, &existing)) {
+ success = false;
+ break;
+ }
+ }
+ if (!success)
+ break;
+ }
+ if (success) {
+ // We can reuse this physical stack slot for the new bundles.
+ // Update the allocated ranges for the slot.
+ for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
+ LiveBundle* bundle = spillSet->spilledBundle(i);
+ if (!insertAllRanges(spillSlot->allocated, bundle))
+ return false;
+ }
+ spillSet->setAllocation(spillSlot->alloc);
+ return true;
+ }
+
+ // On a miss, move the spill to the end of the list. This will cause us
+ // to make fewer attempts to allocate from slots with a large and
+ // highly contended range.
+ slotList->popFront();
+ slotList->pushBack(spillSlot);
+
+ if (++searches == MAX_SEARCH_COUNT)
+ break;
+ }
+
+ // We need a new physical stack slot.
+ uint32_t stackSlot = stackSlotAllocator.allocateSlot(type);
+
+ SpillSlot* spillSlot = new(alloc().fallible()) SpillSlot(stackSlot, alloc().lifoAlloc());
+ if (!spillSlot)
+ return false;
+
+ for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
+ LiveBundle* bundle = spillSet->spilledBundle(i);
+ if (!insertAllRanges(spillSlot->allocated, bundle))
+ return false;
+ }
+
+ spillSet->setAllocation(spillSlot->alloc);
+
+ slotList->pushFront(spillSlot);
+ return true;
+}
+
+bool
+BacktrackingAllocator::insertAllRanges(LiveRangeSet& set, LiveBundle* bundle)
+{
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ if (!alloc().ensureBallast())
+ return false;
+ if (!set.insert(range))
+ return false;
+ }
+ return true;
+}
+
+bool
+BacktrackingAllocator::deadRange(LiveRange* range)
+{
+ // Check for direct uses of this range.
+ if (range->hasUses() || range->hasDefinition())
+ return false;
+
+ CodePosition start = range->from();
+ LNode* ins = insData[start];
+ if (start == entryOf(ins->block()))
+ return false;
+
+ VirtualRegister& reg = vregs[range->vreg()];
+
+ // Check if there are later ranges for this vreg.
+ LiveRange::RegisterLinkIterator iter = reg.rangesBegin(range);
+ for (iter++; iter; iter++) {
+ LiveRange* laterRange = LiveRange::get(*iter);
+ if (laterRange->from() > range->from())
+ return false;
+ }
+
+ // Check if this range ends at a loop backedge.
+ LNode* last = insData[range->to().previous()];
+ if (last->isGoto() && last->toGoto()->target()->id() < last->block()->mir()->id())
+ return false;
+
+ // Check if there are phis which this vreg flows to.
+ if (reg.usedByPhi())
+ return false;
+
+ return true;
+}
+
+bool
+BacktrackingAllocator::resolveControlFlow()
+{
+ // Add moves to handle changing assignments for vregs over their lifetime.
+ JitSpew(JitSpew_RegAlloc, "Resolving control flow (vreg loop)");
+
+ // Look for places where a register's assignment changes in the middle of a
+ // basic block.
+ MOZ_ASSERT(!vregs[0u].hasRanges());
+ for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ VirtualRegister& reg = vregs[i];
+
+ if (mir->shouldCancel("Backtracking Resolve Control Flow (vreg outer loop)"))
+ return false;
+
+ for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; ) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ if (mir->shouldCancel("Backtracking Resolve Control Flow (vreg inner loop)"))
+ return false;
+
+ // Remove ranges which will never be used.
+ if (deadRange(range)) {
+ reg.removeRangeAndIncrement(iter);
+ continue;
+ }
+
+ // The range which defines the register does not have a predecessor
+ // to add moves from.
+ if (range->hasDefinition()) {
+ iter++;
+ continue;
+ }
+
+ // Ignore ranges that start at block boundaries. We will handle
+ // these in the next phase.
+ CodePosition start = range->from();
+ LNode* ins = insData[start];
+ if (start == entryOf(ins->block())) {
+ iter++;
+ continue;
+ }
+
+ // If we already saw a range which covers the start of this range
+ // and has the same allocation, we don't need an explicit move at
+ // the start of this range.
+ bool skip = false;
+ for (LiveRange::RegisterLinkIterator prevIter = reg.rangesBegin();
+ prevIter != iter;
+ prevIter++)
+ {
+ LiveRange* prevRange = LiveRange::get(*prevIter);
+ if (prevRange->covers(start) &&
+ prevRange->bundle()->allocation() == range->bundle()->allocation())
+ {
+ skip = true;
+ break;
+ }
+ }
+ if (skip) {
+ iter++;
+ continue;
+ }
+
+ if (!alloc().ensureBallast())
+ return false;
+
+ LiveRange* predecessorRange = reg.rangeFor(start.previous(), /* preferRegister = */ true);
+ if (start.subpos() == CodePosition::INPUT) {
+ if (!moveInput(ins->toInstruction(), predecessorRange, range, reg.type()))
+ return false;
+ } else {
+ if (!moveAfter(ins->toInstruction(), predecessorRange, range, reg.type()))
+ return false;
+ }
+
+ iter++;
+ }
+ }
+
+ JitSpew(JitSpew_RegAlloc, "Resolving control flow (block loop)");
+
+ for (size_t i = 0; i < graph.numBlocks(); i++) {
+ if (mir->shouldCancel("Backtracking Resolve Control Flow (block loop)"))
+ return false;
+
+ LBlock* successor = graph.getBlock(i);
+ MBasicBlock* mSuccessor = successor->mir();
+ if (mSuccessor->numPredecessors() < 1)
+ continue;
+
+ // Resolve phis to moves.
+ for (size_t j = 0; j < successor->numPhis(); j++) {
+ LPhi* phi = successor->getPhi(j);
+ MOZ_ASSERT(phi->numDefs() == 1);
+ LDefinition* def = phi->getDef(0);
+ VirtualRegister& reg = vreg(def);
+ LiveRange* to = reg.rangeFor(entryOf(successor));
+ MOZ_ASSERT(to);
+
+ for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) {
+ LBlock* predecessor = mSuccessor->getPredecessor(k)->lir();
+ MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1);
+
+ LAllocation* input = phi->getOperand(k);
+ LiveRange* from = vreg(input).rangeFor(exitOf(predecessor), /* preferRegister = */ true);
+ MOZ_ASSERT(from);
+
+ if (!alloc().ensureBallast())
+ return false;
+ if (!moveAtExit(predecessor, from, to, def->type()))
+ return false;
+ }
+ }
+ }
+
+ // Add moves to resolve graph edges with different allocations at their
+ // source and target.
+ for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ VirtualRegister& reg = vregs[i];
+ for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+ LiveRange* targetRange = LiveRange::get(*iter);
+
+ size_t firstBlockId = insData[targetRange->from()]->block()->mir()->id();
+ if (!targetRange->covers(entryOf(graph.getBlock(firstBlockId))))
+ firstBlockId++;
+ for (size_t id = firstBlockId; id < graph.numBlocks(); id++) {
+ LBlock* successor = graph.getBlock(id);
+ if (!targetRange->covers(entryOf(successor)))
+ break;
+
+ BitSet& live = liveIn[id];
+ if (!live.contains(i))
+ continue;
+
+ for (size_t j = 0; j < successor->mir()->numPredecessors(); j++) {
+ LBlock* predecessor = successor->mir()->getPredecessor(j)->lir();
+ if (targetRange->covers(exitOf(predecessor)))
+ continue;
+
+ if (!alloc().ensureBallast())
+ return false;
+ LiveRange* from = reg.rangeFor(exitOf(predecessor), true);
+ if (successor->mir()->numPredecessors() > 1) {
+ MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1);
+ if (!moveAtExit(predecessor, from, targetRange, reg.type()))
+ return false;
+ } else {
+ if (!moveAtEntry(successor, from, targetRange, reg.type()))
+ return false;
+ }
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+bool
+BacktrackingAllocator::isReusedInput(LUse* use, LNode* ins, bool considerCopy)
+{
+ if (LDefinition* def = FindReusingDefOrTemp(ins, use))
+ return considerCopy || !vregs[def->virtualRegister()].mustCopyInput();
+ return false;
+}
+
+bool
+BacktrackingAllocator::isRegisterUse(UsePosition* use, LNode* ins, bool considerCopy)
+{
+ switch (use->usePolicy()) {
+ case LUse::ANY:
+ return isReusedInput(use->use(), ins, considerCopy);
+
+ case LUse::REGISTER:
+ case LUse::FIXED:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool
+BacktrackingAllocator::isRegisterDefinition(LiveRange* range)
+{
+ if (!range->hasDefinition())
+ return false;
+
+ VirtualRegister& reg = vregs[range->vreg()];
+ if (reg.ins()->isPhi())
+ return false;
+
+ if (reg.def()->policy() == LDefinition::FIXED && !reg.def()->output()->isRegister())
+ return false;
+
+ return true;
+}
+
+bool
+BacktrackingAllocator::reifyAllocations()
+{
+ JitSpew(JitSpew_RegAlloc, "Reifying Allocations");
+
+ MOZ_ASSERT(!vregs[0u].hasRanges());
+ for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ VirtualRegister& reg = vregs[i];
+
+ if (mir->shouldCancel("Backtracking Reify Allocations (main loop)"))
+ return false;
+
+ for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ if (range->hasDefinition()) {
+ reg.def()->setOutput(range->bundle()->allocation());
+ if (reg.ins()->recoversInput()) {
+ LSnapshot* snapshot = reg.ins()->toInstruction()->snapshot();
+ for (size_t i = 0; i < snapshot->numEntries(); i++) {
+ LAllocation* entry = snapshot->getEntry(i);
+ if (entry->isUse() && entry->toUse()->policy() == LUse::RECOVERED_INPUT)
+ *entry = *reg.def()->output();
+ }
+ }
+ }
+
+ for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
+ LAllocation* alloc = iter->use();
+ *alloc = range->bundle()->allocation();
+
+ // For any uses which feed into MUST_REUSE_INPUT definitions,
+ // add copies if the use and def have different allocations.
+ LNode* ins = insData[iter->pos];
+ if (LDefinition* def = FindReusingDefOrTemp(ins, alloc)) {
+ LiveRange* outputRange = vreg(def).rangeFor(outputOf(ins));
+ LAllocation res = outputRange->bundle()->allocation();
+ LAllocation sourceAlloc = range->bundle()->allocation();
+
+ if (res != *alloc) {
+ if (!this->alloc().ensureBallast())
+ return false;
+ if (NumReusingDefs(ins) <= 1) {
+ LMoveGroup* group = getInputMoveGroup(ins->toInstruction());
+ if (!group->addAfter(sourceAlloc, res, reg.type()))
+ return false;
+ } else {
+ LMoveGroup* group = getFixReuseMoveGroup(ins->toInstruction());
+ if (!group->add(sourceAlloc, res, reg.type()))
+ return false;
+ }
+ *alloc = res;
+ }
+ }
+ }
+
+ addLiveRegistersForRange(reg, range);
+ }
+ }
+
+ graph.setLocalSlotCount(stackSlotAllocator.stackHeight());
+ return true;
+}
+
+size_t
+BacktrackingAllocator::findFirstNonCallSafepoint(CodePosition from)
+{
+ size_t i = 0;
+ for (; i < graph.numNonCallSafepoints(); i++) {
+ const LInstruction* ins = graph.getNonCallSafepoint(i);
+ if (from <= inputOf(ins))
+ break;
+ }
+ return i;
+}
+
+void
+BacktrackingAllocator::addLiveRegistersForRange(VirtualRegister& reg, LiveRange* range)
+{
+ // Fill in the live register sets for all non-call safepoints.
+ LAllocation a = range->bundle()->allocation();
+ if (!a.isRegister())
+ return;
+
+ // Don't add output registers to the safepoint.
+ CodePosition start = range->from();
+ if (range->hasDefinition() && !reg.isTemp()) {
+#ifdef CHECK_OSIPOINT_REGISTERS
+ // We don't add the output register to the safepoint,
+ // but it still might get added as one of the inputs.
+ // So eagerly add this reg to the safepoint clobbered registers.
+ if (reg.ins()->isInstruction()) {
+ if (LSafepoint* safepoint = reg.ins()->toInstruction()->safepoint())
+ safepoint->addClobberedRegister(a.toRegister());
+ }
+#endif
+ start = start.next();
+ }
+
+ size_t i = findFirstNonCallSafepoint(start);
+ for (; i < graph.numNonCallSafepoints(); i++) {
+ LInstruction* ins = graph.getNonCallSafepoint(i);
+ CodePosition pos = inputOf(ins);
+
+ // Safepoints are sorted, so we can shortcut out of this loop
+ // if we go out of range.
+ if (range->to() <= pos)
+ break;
+
+ MOZ_ASSERT(range->covers(pos));
+
+ LSafepoint* safepoint = ins->safepoint();
+ safepoint->addLiveRegister(a.toRegister());
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ if (reg.isTemp())
+ safepoint->addClobberedRegister(a.toRegister());
+#endif
+ }
+}
+
+static inline bool
+IsNunbox(VirtualRegister& reg)
+{
+#ifdef JS_NUNBOX32
+ return reg.type() == LDefinition::TYPE ||
+ reg.type() == LDefinition::PAYLOAD;
+#else
+ return false;
+#endif
+}
+
+static inline bool
+IsSlotsOrElements(VirtualRegister& reg)
+{
+ return reg.type() == LDefinition::SLOTS;
+}
+
+static inline bool
+IsTraceable(VirtualRegister& reg)
+{
+ if (reg.type() == LDefinition::OBJECT)
+ return true;
+#ifdef JS_PUNBOX64
+ if (reg.type() == LDefinition::BOX)
+ return true;
+#endif
+ return false;
+}
+
+size_t
+BacktrackingAllocator::findFirstSafepoint(CodePosition pos, size_t startFrom)
+{
+ size_t i = startFrom;
+ for (; i < graph.numSafepoints(); i++) {
+ LInstruction* ins = graph.getSafepoint(i);
+ if (pos <= inputOf(ins))
+ break;
+ }
+ return i;
+}
+
+bool
+BacktrackingAllocator::populateSafepoints()
+{
+ JitSpew(JitSpew_RegAlloc, "Populating Safepoints");
+
+ size_t firstSafepoint = 0;
+
+ MOZ_ASSERT(!vregs[0u].def());
+ for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ VirtualRegister& reg = vregs[i];
+
+ if (!reg.def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg)))
+ continue;
+
+ firstSafepoint = findFirstSafepoint(inputOf(reg.ins()), firstSafepoint);
+ if (firstSafepoint >= graph.numSafepoints())
+ break;
+
+ for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) {
+ LInstruction* ins = graph.getSafepoint(j);
+
+ if (!range->covers(inputOf(ins))) {
+ if (inputOf(ins) >= range->to())
+ break;
+ continue;
+ }
+
+ // Include temps but not instruction outputs. Also make sure
+ // MUST_REUSE_INPUT is not used with gcthings or nunboxes, or
+ // we would have to add the input reg to this safepoint.
+ if (ins == reg.ins() && !reg.isTemp()) {
+ DebugOnly<LDefinition*> def = reg.def();
+ MOZ_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT,
+ def->type() == LDefinition::GENERAL ||
+ def->type() == LDefinition::INT32 ||
+ def->type() == LDefinition::FLOAT32 ||
+ def->type() == LDefinition::DOUBLE);
+ continue;
+ }
+
+ LSafepoint* safepoint = ins->safepoint();
+
+ LAllocation a = range->bundle()->allocation();
+ if (a.isGeneralReg() && ins->isCall())
+ continue;
+
+ switch (reg.type()) {
+ case LDefinition::OBJECT:
+ if (!safepoint->addGcPointer(a))
+ return false;
+ break;
+ case LDefinition::SLOTS:
+ if (!safepoint->addSlotsOrElementsPointer(a))
+ return false;
+ break;
+#ifdef JS_NUNBOX32
+ case LDefinition::TYPE:
+ if (!safepoint->addNunboxType(i, a))
+ return false;
+ break;
+ case LDefinition::PAYLOAD:
+ if (!safepoint->addNunboxPayload(i, a))
+ return false;
+ break;
+#else
+ case LDefinition::BOX:
+ if (!safepoint->addBoxedValue(a))
+ return false;
+ break;
+#endif
+ default:
+ MOZ_CRASH("Bad register type");
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+bool
+BacktrackingAllocator::annotateMoveGroups()
+{
+ // Annotate move groups in the LIR graph with any register that is not
+ // allocated at that point and can be used as a scratch register. This is
+ // only required for x86, as other platforms always have scratch registers
+ // available for use.
+#ifdef JS_CODEGEN_X86
+ LiveRange* range = LiveRange::FallibleNew(alloc(), 0, CodePosition(), CodePosition().next());
+ if (!range)
+ return false;
+
+ for (size_t i = 0; i < graph.numBlocks(); i++) {
+ if (mir->shouldCancel("Backtracking Annotate Move Groups"))
+ return false;
+
+ LBlock* block = graph.getBlock(i);
+ LInstruction* last = nullptr;
+ for (LInstructionIterator iter = block->begin(); iter != block->end(); ++iter) {
+ if (iter->isMoveGroup()) {
+ CodePosition from = last ? outputOf(last) : entryOf(block);
+ range->setTo(from.next());
+ range->setFrom(from);
+
+ for (size_t i = 0; i < AnyRegister::Total; i++) {
+ PhysicalRegister& reg = registers[i];
+ if (reg.reg.isFloat() || !reg.allocatable)
+ continue;
+
+ // This register is unavailable for use if (a) it is in use
+ // by some live range immediately before the move group,
+ // or (b) it is an operand in one of the group's moves. The
+ // latter case handles live ranges which end immediately
+ // before the move group or start immediately after.
+ // For (b) we need to consider move groups immediately
+ // preceding or following this one.
+
+ if (iter->toMoveGroup()->uses(reg.reg.gpr()))
+ continue;
+ bool found = false;
+ LInstructionIterator niter(iter);
+ for (niter++; niter != block->end(); niter++) {
+ if (niter->isMoveGroup()) {
+ if (niter->toMoveGroup()->uses(reg.reg.gpr())) {
+ found = true;
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ if (iter != block->begin()) {
+ LInstructionIterator riter(iter);
+ do {
+ riter--;
+ if (riter->isMoveGroup()) {
+ if (riter->toMoveGroup()->uses(reg.reg.gpr())) {
+ found = true;
+ break;
+ }
+ } else {
+ break;
+ }
+ } while (riter != block->begin());
+ }
+
+ LiveRange* existing;
+ if (found || reg.allocations.contains(range, &existing))
+ continue;
+
+ iter->toMoveGroup()->setScratchRegister(reg.reg.gpr());
+ break;
+ }
+ } else {
+ last = *iter;
+ }
+ }
+ }
+#endif
+
+ return true;
+}
+
+/////////////////////////////////////////////////////////////////////
+// Debugging methods
+/////////////////////////////////////////////////////////////////////
+
+#ifdef JS_JITSPEW
+
+UniqueChars
+LiveRange::toString() const
+{
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+
+ char* buf = JS_smprintf("v%u [%u,%u)", hasVreg() ? vreg() : 0, from().bits(), to().bits());
+
+ if (buf && bundle() && !bundle()->allocation().isBogus())
+ buf = JS_sprintf_append(buf, " %s", bundle()->allocation().toString().get());
+
+ if (buf && hasDefinition())
+ buf = JS_sprintf_append(buf, " (def)");
+
+ for (UsePositionIterator iter = usesBegin(); buf && iter; iter++)
+ buf = JS_sprintf_append(buf, " %s@%u", iter->use()->toString().get(), iter->pos.bits());
+
+ if (!buf)
+ oomUnsafe.crash("LiveRange::toString()");
+
+ return UniqueChars(buf);
+}
+
+UniqueChars
+LiveBundle::toString() const
+{
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+
+ // Suppress -Wformat warning.
+ char *buf = JS_smprintf("%s", "");
+
+ for (LiveRange::BundleLinkIterator iter = rangesBegin(); buf && iter; iter++) {
+ buf = JS_sprintf_append(buf, "%s %s",
+ (iter == rangesBegin()) ? "" : " ##",
+ LiveRange::get(*iter)->toString().get());
+ }
+
+ if (!buf)
+ oomUnsafe.crash("LiveBundle::toString()");
+
+ return UniqueChars(buf);
+}
+
+#endif // JS_JITSPEW
+
+void
+BacktrackingAllocator::dumpVregs()
+{
+#ifdef JS_JITSPEW
+ MOZ_ASSERT(!vregs[0u].hasRanges());
+
+ fprintf(stderr, "Live ranges by virtual register:\n");
+
+ for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ fprintf(stderr, " ");
+ VirtualRegister& reg = vregs[i];
+ for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+ if (iter != reg.rangesBegin())
+ fprintf(stderr, " ## ");
+ fprintf(stderr, "%s", LiveRange::get(*iter)->toString().get());
+ }
+ fprintf(stderr, "\n");
+ }
+
+ fprintf(stderr, "\nLive ranges by bundle:\n");
+
+ for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ VirtualRegister& reg = vregs[i];
+ for (LiveRange::RegisterLinkIterator baseIter = reg.rangesBegin(); baseIter; baseIter++) {
+ LiveRange* range = LiveRange::get(*baseIter);
+ LiveBundle* bundle = range->bundle();
+ if (range == bundle->firstRange()) {
+ fprintf(stderr, " ");
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ if (iter != bundle->rangesBegin())
+ fprintf(stderr, " ## ");
+ fprintf(stderr, "%s", LiveRange::get(*iter)->toString().get());
+ }
+ fprintf(stderr, "\n");
+ }
+ }
+ }
+#endif
+}
+
+#ifdef JS_JITSPEW
+struct BacktrackingAllocator::PrintLiveRange
+{
+ bool& first_;
+
+ explicit PrintLiveRange(bool& first) : first_(first) {}
+
+ void operator()(const LiveRange* range)
+ {
+ if (first_)
+ first_ = false;
+ else
+ fprintf(stderr, " /");
+ fprintf(stderr, " %s", range->toString().get());
+ }
+};
+#endif
+
+void
+BacktrackingAllocator::dumpAllocations()
+{
+#ifdef JS_JITSPEW
+ fprintf(stderr, "Allocations:\n");
+
+ dumpVregs();
+
+ fprintf(stderr, "Allocations by physical register:\n");
+
+ for (size_t i = 0; i < AnyRegister::Total; i++) {
+ if (registers[i].allocatable && !registers[i].allocations.empty()) {
+ fprintf(stderr, " %s:", AnyRegister::FromCode(i).name());
+ bool first = true;
+ registers[i].allocations.forEach(PrintLiveRange(first));
+ fprintf(stderr, "\n");
+ }
+ }
+
+ fprintf(stderr, "\n");
+#endif // JS_JITSPEW
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Heuristic Methods
+///////////////////////////////////////////////////////////////////////////////
+
+size_t
+BacktrackingAllocator::computePriority(LiveBundle* bundle)
+{
+ // The priority of a bundle is its total length, so that longer lived
+ // bundles will be processed before shorter ones (even if the longer ones
+ // have a low spill weight). See processBundle().
+ size_t lifetimeTotal = 0;
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ lifetimeTotal += range->to() - range->from();
+ }
+
+ return lifetimeTotal;
+}
+
+bool
+BacktrackingAllocator::minimalDef(LiveRange* range, LNode* ins)
+{
+ // Whether this is a minimal range capturing a definition at ins.
+ return (range->to() <= minimalDefEnd(ins).next()) &&
+ ((!ins->isPhi() && range->from() == inputOf(ins)) || range->from() == outputOf(ins));
+}
+
+bool
+BacktrackingAllocator::minimalUse(LiveRange* range, UsePosition* use)
+{
+ // Whether this is a minimal range capturing |use|.
+ LNode* ins = insData[use->pos];
+ return (range->from() == inputOf(ins)) &&
+ (range->to() == (use->use()->usedAtStart() ? outputOf(ins) : outputOf(ins).next()));
+}
+
+bool
+BacktrackingAllocator::minimalBundle(LiveBundle* bundle, bool* pfixed)
+{
+ LiveRange::BundleLinkIterator iter = bundle->rangesBegin();
+ LiveRange* range = LiveRange::get(*iter);
+
+ if (!range->hasVreg()) {
+ *pfixed = true;
+ return true;
+ }
+
+ // If a bundle contains multiple ranges, splitAtAllRegisterUses will split
+ // each range into a separate bundle.
+ if (++iter)
+ return false;
+
+ if (range->hasDefinition()) {
+ VirtualRegister& reg = vregs[range->vreg()];
+ if (pfixed)
+ *pfixed = reg.def()->policy() == LDefinition::FIXED && reg.def()->output()->isRegister();
+ return minimalDef(range, reg.ins());
+ }
+
+ bool fixed = false, minimal = false, multiple = false;
+
+ for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
+ if (iter != range->usesBegin())
+ multiple = true;
+
+ switch (iter->usePolicy()) {
+ case LUse::FIXED:
+ if (fixed)
+ return false;
+ fixed = true;
+ if (minimalUse(range, *iter))
+ minimal = true;
+ break;
+
+ case LUse::REGISTER:
+ if (minimalUse(range, *iter))
+ minimal = true;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ // If a range contains a fixed use and at least one other use,
+ // splitAtAllRegisterUses will split each use into a different bundle.
+ if (multiple && fixed)
+ minimal = false;
+
+ if (pfixed)
+ *pfixed = fixed;
+ return minimal;
+}
+
+size_t
+BacktrackingAllocator::computeSpillWeight(LiveBundle* bundle)
+{
+ // Minimal bundles have an extremely high spill weight, to ensure they
+ // can evict any other bundles and be allocated to a register.
+ bool fixed;
+ if (minimalBundle(bundle, &fixed))
+ return fixed ? 2000000 : 1000000;
+
+ size_t usesTotal = 0;
+ fixed = false;
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ if (range->hasDefinition()) {
+ VirtualRegister& reg = vregs[range->vreg()];
+ if (reg.def()->policy() == LDefinition::FIXED && reg.def()->output()->isRegister()) {
+ usesTotal += 2000;
+ fixed = true;
+ } else if (!reg.ins()->isPhi()) {
+ usesTotal += 2000;
+ }
+ }
+
+ for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
+ switch (iter->usePolicy()) {
+ case LUse::ANY:
+ usesTotal += 1000;
+ break;
+
+ case LUse::FIXED:
+ fixed = true;
+ MOZ_FALLTHROUGH;
+ case LUse::REGISTER:
+ usesTotal += 2000;
+ break;
+
+ case LUse::KEEPALIVE:
+ break;
+
+ default:
+ // Note: RECOVERED_INPUT will not appear in UsePositionIterator.
+ MOZ_CRASH("Bad use");
+ }
+ }
+ }
+
+ // Bundles with fixed uses are given a higher spill weight, since they must
+ // be allocated to a specific register.
+ if (testbed && fixed)
+ usesTotal *= 2;
+
+ // Compute spill weight as a use density, lowering the weight for long
+ // lived bundles with relatively few uses.
+ size_t lifetimeTotal = computePriority(bundle);
+ return lifetimeTotal ? usesTotal / lifetimeTotal : 0;
+}
+
+size_t
+BacktrackingAllocator::maximumSpillWeight(const LiveBundleVector& bundles)
+{
+ size_t maxWeight = 0;
+ for (size_t i = 0; i < bundles.length(); i++)
+ maxWeight = Max(maxWeight, computeSpillWeight(bundles[i]));
+ return maxWeight;
+}
+
+bool
+BacktrackingAllocator::trySplitAcrossHotcode(LiveBundle* bundle, bool* success)
+{
+ // If this bundle has portions that are hot and portions that are cold,
+ // split it at the boundaries between hot and cold code.
+
+ LiveRange* hotRange = nullptr;
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ if (hotcode.contains(range, &hotRange))
+ break;
+ }
+
+ // Don't split if there is no hot code in the bundle.
+ if (!hotRange) {
+ JitSpew(JitSpew_RegAlloc, " bundle does not contain hot code");
+ return true;
+ }
+
+ // Don't split if there is no cold code in the bundle.
+ bool coldCode = false;
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ if (!hotRange->contains(range)) {
+ coldCode = true;
+ break;
+ }
+ }
+ if (!coldCode) {
+ JitSpew(JitSpew_RegAlloc, " bundle does not contain cold code");
+ return true;
+ }
+
+ JitSpew(JitSpew_RegAlloc, " split across hot range %s", hotRange->toString().get());
+
+ // Tweak the splitting method when compiling wasm code to look at actual
+ // uses within the hot/cold code. This heuristic is in place as the below
+ // mechanism regresses several asm.js tests. Hopefully this will be fixed
+ // soon and this special case removed. See bug 948838.
+ if (compilingWasm()) {
+ SplitPositionVector splitPositions;
+ if (!splitPositions.append(hotRange->from()) || !splitPositions.append(hotRange->to()))
+ return false;
+ *success = true;
+ return splitAt(bundle, splitPositions);
+ }
+
+ LiveBundle* hotBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
+ bundle->spillParent());
+ if (!hotBundle)
+ return false;
+ LiveBundle* preBundle = nullptr;
+ LiveBundle* postBundle = nullptr;
+ LiveBundle* coldBundle = nullptr;
+
+ if (testbed) {
+ coldBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), bundle->spillParent());
+ if (!coldBundle)
+ return false;
+ }
+
+ // Accumulate the ranges of hot and cold code in the bundle. Note that
+ // we are only comparing with the single hot range found, so the cold code
+ // may contain separate hot ranges.
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ LiveRange::Range hot, coldPre, coldPost;
+ range->intersect(hotRange, &coldPre, &hot, &coldPost);
+
+ if (!hot.empty()) {
+ if (!hotBundle->addRangeAndDistributeUses(alloc(), range, hot.from, hot.to))
+ return false;
+ }
+
+ if (!coldPre.empty()) {
+ if (testbed) {
+ if (!coldBundle->addRangeAndDistributeUses(alloc(), range, coldPre.from, coldPre.to))
+ return false;
+ } else {
+ if (!preBundle) {
+ preBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
+ bundle->spillParent());
+ if (!preBundle)
+ return false;
+ }
+ if (!preBundle->addRangeAndDistributeUses(alloc(), range, coldPre.from, coldPre.to))
+ return false;
+ }
+ }
+
+ if (!coldPost.empty()) {
+ if (testbed) {
+ if (!coldBundle->addRangeAndDistributeUses(alloc(), range, coldPost.from, coldPost.to))
+ return false;
+ } else {
+ if (!postBundle) {
+ postBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
+ bundle->spillParent());
+ if (!postBundle)
+ return false;
+ }
+ if (!postBundle->addRangeAndDistributeUses(alloc(), range, coldPost.from, coldPost.to))
+ return false;
+ }
+ }
+ }
+
+ MOZ_ASSERT(hotBundle->numRanges() != 0);
+
+ LiveBundleVector newBundles;
+ if (!newBundles.append(hotBundle))
+ return false;
+
+ if (testbed) {
+ MOZ_ASSERT(coldBundle->numRanges() != 0);
+ if (!newBundles.append(coldBundle))
+ return false;
+ } else {
+ MOZ_ASSERT(preBundle || postBundle);
+ if (preBundle && !newBundles.append(preBundle))
+ return false;
+ if (postBundle && !newBundles.append(postBundle))
+ return false;
+ }
+
+ *success = true;
+ return splitAndRequeueBundles(bundle, newBundles);
+}
+
+bool
+BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveBundle* bundle, LiveBundle* conflict,
+ bool* success)
+{
+ // If this bundle's later uses do not require it to be in a register,
+ // split it after the last use which does require a register. If conflict
+ // is specified, only consider register uses before the conflict starts.
+
+ CodePosition lastRegisterFrom, lastRegisterTo, lastUse;
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ // If the range defines a register, consider that a register use for
+ // our purposes here.
+ if (isRegisterDefinition(range)) {
+ CodePosition spillStart = minimalDefEnd(insData[range->from()]).next();
+ if (!conflict || spillStart < conflict->firstRange()->from()) {
+ lastUse = lastRegisterFrom = range->from();
+ lastRegisterTo = spillStart;
+ }
+ }
+
+ for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
+ LNode* ins = insData[iter->pos];
+
+ // Uses in the bundle should be sorted.
+ MOZ_ASSERT(iter->pos >= lastUse);
+ lastUse = inputOf(ins);
+
+ if (!conflict || outputOf(ins) < conflict->firstRange()->from()) {
+ if (isRegisterUse(*iter, ins, /* considerCopy = */ true)) {
+ lastRegisterFrom = inputOf(ins);
+ lastRegisterTo = iter->pos.next();
+ }
+ }
+ }
+ }
+
+ // Can't trim non-register uses off the end by splitting.
+ if (!lastRegisterFrom.bits()) {
+ JitSpew(JitSpew_RegAlloc, " bundle has no register uses");
+ return true;
+ }
+ if (lastUse < lastRegisterTo) {
+ JitSpew(JitSpew_RegAlloc, " bundle's last use is a register use");
+ return true;
+ }
+
+ JitSpew(JitSpew_RegAlloc, " split after last register use at %u",
+ lastRegisterTo.bits());
+
+ SplitPositionVector splitPositions;
+ if (!splitPositions.append(lastRegisterTo))
+ return false;
+ *success = true;
+ return splitAt(bundle, splitPositions);
+}
+
+bool
+BacktrackingAllocator::trySplitBeforeFirstRegisterUse(LiveBundle* bundle, LiveBundle* conflict, bool* success)
+{
+ // If this bundle's earlier uses do not require it to be in a register,
+ // split it before the first use which does require a register. If conflict
+ // is specified, only consider register uses after the conflict ends.
+
+ if (isRegisterDefinition(bundle->firstRange())) {
+ JitSpew(JitSpew_RegAlloc, " bundle is defined by a register");
+ return true;
+ }
+ if (!bundle->firstRange()->hasDefinition()) {
+ JitSpew(JitSpew_RegAlloc, " bundle does not have definition");
+ return true;
+ }
+
+ CodePosition firstRegisterFrom;
+
+ CodePosition conflictEnd;
+ if (conflict) {
+ for (LiveRange::BundleLinkIterator iter = conflict->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ if (range->to() > conflictEnd)
+ conflictEnd = range->to();
+ }
+ }
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ if (!conflict || range->from() > conflictEnd) {
+ if (range->hasDefinition() && isRegisterDefinition(range)) {
+ firstRegisterFrom = range->from();
+ break;
+ }
+ }
+
+ for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
+ LNode* ins = insData[iter->pos];
+
+ if (!conflict || outputOf(ins) >= conflictEnd) {
+ if (isRegisterUse(*iter, ins, /* considerCopy = */ true)) {
+ firstRegisterFrom = inputOf(ins);
+ break;
+ }
+ }
+ }
+ if (firstRegisterFrom.bits())
+ break;
+ }
+
+ if (!firstRegisterFrom.bits()) {
+ // Can't trim non-register uses off the beginning by splitting.
+ JitSpew(JitSpew_RegAlloc, " bundle has no register uses");
+ return true;
+ }
+
+ JitSpew(JitSpew_RegAlloc, " split before first register use at %u",
+ firstRegisterFrom.bits());
+
+ SplitPositionVector splitPositions;
+ if (!splitPositions.append(firstRegisterFrom))
+ return false;
+ *success = true;
+ return splitAt(bundle, splitPositions);
+}
+
+// When splitting a bundle according to a list of split positions, return
+// whether a use or range at |pos| should use a different bundle than the last
+// position this was called for.
+static bool
+UseNewBundle(const SplitPositionVector& splitPositions, CodePosition pos,
+ size_t* activeSplitPosition)
+{
+ if (splitPositions.empty()) {
+ // When the split positions are empty we are splitting at all uses.
+ return true;
+ }
+
+ if (*activeSplitPosition == splitPositions.length()) {
+ // We've advanced past all split positions.
+ return false;
+ }
+
+ if (splitPositions[*activeSplitPosition] > pos) {
+ // We haven't gotten to the next split position yet.
+ return false;
+ }
+
+ // We've advanced past the next split position, find the next one which we
+ // should split at.
+ while (*activeSplitPosition < splitPositions.length() &&
+ splitPositions[*activeSplitPosition] <= pos)
+ {
+ (*activeSplitPosition)++;
+ }
+ return true;
+}
+
+static bool
+HasPrecedingRangeSharingVreg(LiveBundle* bundle, LiveRange* range)
+{
+ MOZ_ASSERT(range->bundle() == bundle);
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* prevRange = LiveRange::get(*iter);
+ if (prevRange == range)
+ return false;
+ if (prevRange->vreg() == range->vreg())
+ return true;
+ }
+
+ MOZ_CRASH();
+}
+
+static bool
+HasFollowingRangeSharingVreg(LiveBundle* bundle, LiveRange* range)
+{
+ MOZ_ASSERT(range->bundle() == bundle);
+
+ bool foundRange = false;
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* prevRange = LiveRange::get(*iter);
+ if (foundRange && prevRange->vreg() == range->vreg())
+ return true;
+ if (prevRange == range)
+ foundRange = true;
+ }
+
+ MOZ_ASSERT(foundRange);
+ return false;
+}
+
+bool
+BacktrackingAllocator::splitAt(LiveBundle* bundle, const SplitPositionVector& splitPositions)
+{
+ // Split the bundle at the given split points. Register uses which have no
+ // intervening split points are consolidated into the same bundle. If the
+ // list of split points is empty, then all register uses are placed in
+ // minimal bundles.
+
+ // splitPositions should be sorted.
+ for (size_t i = 1; i < splitPositions.length(); ++i)
+ MOZ_ASSERT(splitPositions[i-1] < splitPositions[i]);
+
+ // We don't need to create a new spill bundle if there already is one.
+ bool spillBundleIsNew = false;
+ LiveBundle* spillBundle = bundle->spillParent();
+ if (!spillBundle) {
+ spillBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), nullptr);
+ if (!spillBundle)
+ return false;
+ spillBundleIsNew = true;
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ CodePosition from = range->from();
+ if (isRegisterDefinition(range))
+ from = minimalDefEnd(insData[from]).next();
+
+ if (from < range->to()) {
+ if (!spillBundle->addRange(alloc(), range->vreg(), from, range->to()))
+ return false;
+
+ if (range->hasDefinition() && !isRegisterDefinition(range))
+ spillBundle->lastRange()->setHasDefinition();
+ }
+ }
+ }
+
+ LiveBundleVector newBundles;
+
+ // The bundle which ranges are currently being added to.
+ LiveBundle* activeBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), spillBundle);
+ if (!activeBundle || !newBundles.append(activeBundle))
+ return false;
+
+ // State for use by UseNewBundle.
+ size_t activeSplitPosition = 0;
+
+ // Make new bundles according to the split positions, and distribute ranges
+ // and uses to them.
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ if (UseNewBundle(splitPositions, range->from(), &activeSplitPosition)) {
+ activeBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), spillBundle);
+ if (!activeBundle || !newBundles.append(activeBundle))
+ return false;
+ }
+
+ LiveRange* activeRange = LiveRange::FallibleNew(alloc(), range->vreg(),
+ range->from(), range->to());
+ if (!activeRange)
+ return false;
+ activeBundle->addRange(activeRange);
+
+ if (isRegisterDefinition(range))
+ activeRange->setHasDefinition();
+
+ while (range->hasUses()) {
+ UsePosition* use = range->popUse();
+ LNode* ins = insData[use->pos];
+
+ // Any uses of a register that appear before its definition has
+ // finished must be associated with the range for that definition.
+ if (isRegisterDefinition(range) && use->pos <= minimalDefEnd(insData[range->from()])) {
+ activeRange->addUse(use);
+ } else if (isRegisterUse(use, ins)) {
+ // Place this register use into a different bundle from the
+ // last one if there are any split points between the two uses.
+ // UseNewBundle always returns true if we are splitting at all
+ // register uses, but we can still reuse the last range and
+ // bundle if they have uses at the same position, except when
+ // either use is fixed (the two uses might require incompatible
+ // registers.)
+ if (UseNewBundle(splitPositions, use->pos, &activeSplitPosition) &&
+ (!activeRange->hasUses() ||
+ activeRange->usesBegin()->pos != use->pos ||
+ activeRange->usesBegin()->usePolicy() == LUse::FIXED ||
+ use->usePolicy() == LUse::FIXED))
+ {
+ activeBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
+ spillBundle);
+ if (!activeBundle || !newBundles.append(activeBundle))
+ return false;
+ activeRange = LiveRange::FallibleNew(alloc(), range->vreg(),
+ range->from(), range->to());
+ if (!activeRange)
+ return false;
+ activeBundle->addRange(activeRange);
+ }
+
+ activeRange->addUse(use);
+ } else {
+ MOZ_ASSERT(spillBundleIsNew);
+ spillBundle->rangeFor(use->pos)->addUse(use);
+ }
+ }
+ }
+
+ LiveBundleVector filteredBundles;
+
+ // Trim the ends of ranges in each new bundle when there are no other
+ // earlier or later ranges in the same bundle with the same vreg.
+ for (size_t i = 0; i < newBundles.length(); i++) {
+ LiveBundle* bundle = newBundles[i];
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; ) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ if (!range->hasDefinition()) {
+ if (!HasPrecedingRangeSharingVreg(bundle, range)) {
+ if (range->hasUses()) {
+ UsePosition* use = *range->usesBegin();
+ range->setFrom(inputOf(insData[use->pos]));
+ } else {
+ bundle->removeRangeAndIncrementIterator(iter);
+ continue;
+ }
+ }
+ }
+
+ if (!HasFollowingRangeSharingVreg(bundle, range)) {
+ if (range->hasUses()) {
+ UsePosition* use = range->lastUse();
+ range->setTo(use->pos.next());
+ } else if (range->hasDefinition()) {
+ range->setTo(minimalDefEnd(insData[range->from()]).next());
+ } else {
+ bundle->removeRangeAndIncrementIterator(iter);
+ continue;
+ }
+ }
+
+ iter++;
+ }
+
+ if (bundle->hasRanges() && !filteredBundles.append(bundle))
+ return false;
+ }
+
+ if (spillBundleIsNew && !filteredBundles.append(spillBundle))
+ return false;
+
+ return splitAndRequeueBundles(bundle, filteredBundles);
+}
+
+bool
+BacktrackingAllocator::splitAcrossCalls(LiveBundle* bundle)
+{
+ // Split the bundle to separate register uses and non-register uses and
+ // allow the vreg to be spilled across its range.
+
+ // Find the locations of all calls in the bundle's range.
+ SplitPositionVector callPositions;
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ CallRange searchRange(range->from(), range->to());
+ CallRange* callRange;
+ if (!callRanges.contains(&searchRange, &callRange)) {
+ // There are no calls inside this range.
+ continue;
+ }
+ MOZ_ASSERT(range->covers(callRange->range.from));
+
+ // The search above returns an arbitrary call within the range. Walk
+ // backwards to find the first call in the range.
+ for (CallRangeList::reverse_iterator riter = callRangesList.rbegin(callRange);
+ riter != callRangesList.rend();
+ ++riter)
+ {
+ CodePosition pos = riter->range.from;
+ if (range->covers(pos))
+ callRange = *riter;
+ else
+ break;
+ }
+
+ // Add all call positions within the range, by walking forwards.
+ for (CallRangeList::iterator iter = callRangesList.begin(callRange);
+ iter != callRangesList.end();
+ ++iter)
+ {
+ CodePosition pos = iter->range.from;
+ if (!range->covers(pos))
+ break;
+
+ // Calls at the beginning of the range are ignored; there is no splitting to do.
+ if (range->covers(pos.previous())) {
+ MOZ_ASSERT_IF(callPositions.length(), pos > callPositions.back());
+ if (!callPositions.append(pos))
+ return false;
+ }
+ }
+ }
+ MOZ_ASSERT(callPositions.length());
+
+#ifdef JS_JITSPEW
+ JitSpewStart(JitSpew_RegAlloc, " split across calls at ");
+ for (size_t i = 0; i < callPositions.length(); ++i)
+ JitSpewCont(JitSpew_RegAlloc, "%s%u", i != 0 ? ", " : "", callPositions[i].bits());
+ JitSpewFin(JitSpew_RegAlloc);
+#endif
+
+ return splitAt(bundle, callPositions);
+}
+
+bool
+BacktrackingAllocator::chooseBundleSplit(LiveBundle* bundle, bool fixed, LiveBundle* conflict)
+{
+ bool success = false;
+
+ if (!trySplitAcrossHotcode(bundle, &success))
+ return false;
+ if (success)
+ return true;
+
+ if (fixed)
+ return splitAcrossCalls(bundle);
+
+ if (!trySplitBeforeFirstRegisterUse(bundle, conflict, &success))
+ return false;
+ if (success)
+ return true;
+
+ if (!trySplitAfterLastRegisterUse(bundle, conflict, &success))
+ return false;
+ if (success)
+ return true;
+
+ // Split at all register uses.
+ SplitPositionVector emptyPositions;
+ return splitAt(bundle, emptyPositions);
+}
diff --git a/js/src/jit/BacktrackingAllocator.h b/js/src/jit/BacktrackingAllocator.h
new file mode 100644
index 000000000..6d14ffacd
--- /dev/null
+++ b/js/src/jit/BacktrackingAllocator.h
@@ -0,0 +1,816 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BacktrackingAllocator_h
+#define jit_BacktrackingAllocator_h
+
+#include "mozilla/Array.h"
+
+#include "ds/PriorityQueue.h"
+#include "ds/SplayTree.h"
+#include "jit/RegisterAllocator.h"
+#include "jit/StackSlotAllocator.h"
+
+// Backtracking priority queue based register allocator based on that described
+// in the following blog post:
+//
+// http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
+
+namespace js {
+namespace jit {
+
+class Requirement
+{
+ public:
+ enum Kind {
+ NONE,
+ REGISTER,
+ FIXED,
+ MUST_REUSE_INPUT
+ };
+
+ Requirement()
+ : kind_(NONE)
+ { }
+
+ explicit Requirement(Kind kind)
+ : kind_(kind)
+ {
+ // These have dedicated constructors.
+ MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT);
+ }
+
+ Requirement(Kind kind, CodePosition at)
+ : kind_(kind),
+ position_(at)
+ {
+ // These have dedicated constructors.
+ MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT);
+ }
+
+ explicit Requirement(LAllocation fixed)
+ : kind_(FIXED),
+ allocation_(fixed)
+ {
+ MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
+ }
+
+ // Only useful as a hint, encodes where the fixed requirement is used to
+ // avoid allocating a fixed register too early.
+ Requirement(LAllocation fixed, CodePosition at)
+ : kind_(FIXED),
+ allocation_(fixed),
+ position_(at)
+ {
+ MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
+ }
+
+ Requirement(uint32_t vreg, CodePosition at)
+ : kind_(MUST_REUSE_INPUT),
+ allocation_(LUse(vreg, LUse::ANY)),
+ position_(at)
+ { }
+
+ Kind kind() const {
+ return kind_;
+ }
+
+ LAllocation allocation() const {
+ MOZ_ASSERT(!allocation_.isBogus() && !allocation_.isUse());
+ return allocation_;
+ }
+
+ uint32_t virtualRegister() const {
+ MOZ_ASSERT(allocation_.isUse());
+ MOZ_ASSERT(kind() == MUST_REUSE_INPUT);
+ return allocation_.toUse()->virtualRegister();
+ }
+
+ CodePosition pos() const {
+ return position_;
+ }
+
+ int priority() const;
+
+ MOZ_MUST_USE bool merge(const Requirement& newRequirement) {
+ // Merge newRequirement with any existing requirement, returning false
+ // if the new and old requirements conflict.
+ MOZ_ASSERT(newRequirement.kind() != Requirement::MUST_REUSE_INPUT);
+
+ if (newRequirement.kind() == Requirement::FIXED) {
+ if (kind() == Requirement::FIXED)
+ return newRequirement.allocation() == allocation();
+ *this = newRequirement;
+ return true;
+ }
+
+ MOZ_ASSERT(newRequirement.kind() == Requirement::REGISTER);
+ if (kind() == Requirement::FIXED)
+ return allocation().isRegister();
+
+ *this = newRequirement;
+ return true;
+ }
+
+ void dump() const;
+
+ private:
+ Kind kind_;
+ LAllocation allocation_;
+ CodePosition position_;
+};
+
+struct UsePosition : public TempObject,
+ public InlineForwardListNode<UsePosition>
+{
+ private:
+ // Packed LUse* with a copy of the LUse::Policy value, in order to avoid
+ // making cache misses while reaching out to the policy value.
+ uintptr_t use_;
+
+ void setUse(LUse* use) {
+ // Assert that we can safely pack the LUse policy in the last 2 bits of
+ // the LUse pointer.
+ static_assert((LUse::ANY | LUse::REGISTER | LUse::FIXED | LUse::KEEPALIVE) <= 0x3,
+ "Cannot pack the LUse::Policy value on 32 bits architectures.");
+
+ // RECOVERED_INPUT is used by snapshots and ignored when building the
+ // liveness information. Thus we can safely assume that no such value
+ // would be seen.
+ MOZ_ASSERT(use->policy() != LUse::RECOVERED_INPUT);
+ use_ = uintptr_t(use) | (use->policy() & 0x3);
+ }
+
+ public:
+ CodePosition pos;
+
+ LUse* use() const {
+ return reinterpret_cast<LUse*>(use_ & ~0x3);
+ }
+
+ LUse::Policy usePolicy() const {
+ LUse::Policy policy = LUse::Policy(use_ & 0x3);
+ MOZ_ASSERT(use()->policy() == policy);
+ return policy;
+ }
+
+ UsePosition(LUse* use, CodePosition pos) :
+ pos(pos)
+ {
+ // Verify that the usedAtStart() flag is consistent with the
+ // subposition. For now ignore fixed registers, because they
+ // are handled specially around calls.
+ MOZ_ASSERT_IF(!use->isFixedRegister(),
+ pos.subpos() == (use->usedAtStart()
+ ? CodePosition::INPUT
+ : CodePosition::OUTPUT));
+ setUse(use);
+ }
+};
+
+typedef InlineForwardListIterator<UsePosition> UsePositionIterator;
+
+// Backtracking allocator data structures overview.
+//
+// LiveRange: A continuous range of positions where a virtual register is live.
+// LiveBundle: A set of LiveRanges which do not overlap.
+// VirtualRegister: A set of all LiveRanges used for some LDefinition.
+//
+// The allocator first performs a liveness ananlysis on the LIR graph which
+// constructs LiveRanges for each VirtualRegister, determining where the
+// registers are live.
+//
+// The ranges are then bundled together according to heuristics, and placed on
+// the allocation queue.
+//
+// As bundles are removed from the allocation queue, we attempt to find a
+// physical register or stack slot allocation for all ranges in the removed
+// bundle, possibly evicting already-allocated bundles. See processBundle()
+// for details.
+//
+// If we are not able to allocate a bundle, it is split according to heuristics
+// into two or more smaller bundles which cover all the ranges of the original.
+// These smaller bundles are then allocated independently.
+
+class LiveBundle;
+
+class LiveRange : public TempObject
+{
+ public:
+ // Linked lists are used to keep track of the ranges in each LiveBundle and
+ // VirtualRegister. Since a LiveRange may be in two lists simultaneously, use
+ // these auxiliary classes to keep things straight.
+ class BundleLink : public InlineForwardListNode<BundleLink> {};
+ class RegisterLink : public InlineForwardListNode<RegisterLink> {};
+
+ typedef InlineForwardListIterator<BundleLink> BundleLinkIterator;
+ typedef InlineForwardListIterator<RegisterLink> RegisterLinkIterator;
+
+ // Links in the lists in LiveBundle and VirtualRegister.
+ BundleLink bundleLink;
+ RegisterLink registerLink;
+
+ static LiveRange* get(BundleLink* link) {
+ return reinterpret_cast<LiveRange*>(reinterpret_cast<uint8_t*>(link) -
+ offsetof(LiveRange, bundleLink));
+ }
+ static LiveRange* get(RegisterLink* link) {
+ return reinterpret_cast<LiveRange*>(reinterpret_cast<uint8_t*>(link) -
+ offsetof(LiveRange, registerLink));
+ }
+
+ struct Range
+ {
+ // The beginning of this range, inclusive.
+ CodePosition from;
+
+ // The end of this range, exclusive.
+ CodePosition to;
+
+ Range() {}
+
+ Range(CodePosition from, CodePosition to)
+ : from(from), to(to)
+ {
+ MOZ_ASSERT(!empty());
+ }
+
+ bool empty() {
+ MOZ_ASSERT(from <= to);
+ return from == to;
+ }
+ };
+
+ private:
+ // The virtual register this range is for, or zero if this does not have a
+ // virtual register (for example, it is in the callRanges bundle).
+ uint32_t vreg_;
+
+ // The bundle containing this range, null if liveness information is being
+ // constructed and we haven't started allocating bundles yet.
+ LiveBundle* bundle_;
+
+ // The code positions in this range.
+ Range range_;
+
+ // All uses of the virtual register in this range, ordered by location.
+ InlineForwardList<UsePosition> uses_;
+
+ // Whether this range contains the virtual register's definition.
+ bool hasDefinition_;
+
+ LiveRange(uint32_t vreg, Range range)
+ : vreg_(vreg), bundle_(nullptr), range_(range), hasDefinition_(false)
+ {
+ MOZ_ASSERT(!range.empty());
+ }
+
+ public:
+ static LiveRange* FallibleNew(TempAllocator& alloc, uint32_t vreg,
+ CodePosition from, CodePosition to)
+ {
+ return new(alloc.fallible()) LiveRange(vreg, Range(from, to));
+ }
+
+ uint32_t vreg() const {
+ MOZ_ASSERT(hasVreg());
+ return vreg_;
+ }
+ bool hasVreg() const {
+ return vreg_ != 0;
+ }
+
+ LiveBundle* bundle() const {
+ return bundle_;
+ }
+
+ CodePosition from() const {
+ return range_.from;
+ }
+ CodePosition to() const {
+ return range_.to;
+ }
+ bool covers(CodePosition pos) const {
+ return pos >= from() && pos < to();
+ }
+
+ // Whether this range wholly contains other.
+ bool contains(LiveRange* other) const;
+
+ // Intersect this range with other, returning the subranges of this
+ // that are before, inside, or after other.
+ void intersect(LiveRange* other, Range* pre, Range* inside, Range* post) const;
+
+ // Whether this range has any intersection with other.
+ bool intersects(LiveRange* other) const;
+
+ UsePositionIterator usesBegin() const {
+ return uses_.begin();
+ }
+ UsePosition* lastUse() const {
+ return uses_.back();
+ }
+ bool hasUses() const {
+ return !!usesBegin();
+ }
+ UsePosition* popUse() {
+ return uses_.popFront();
+ }
+
+ bool hasDefinition() const {
+ return hasDefinition_;
+ }
+
+ void setFrom(CodePosition from) {
+ range_.from = from;
+ MOZ_ASSERT(!range_.empty());
+ }
+ void setTo(CodePosition to) {
+ range_.to = to;
+ MOZ_ASSERT(!range_.empty());
+ }
+
+ void setBundle(LiveBundle* bundle) {
+ bundle_ = bundle;
+ }
+
+ void addUse(UsePosition* use);
+ void distributeUses(LiveRange* other);
+
+ void setHasDefinition() {
+ MOZ_ASSERT(!hasDefinition_);
+ hasDefinition_ = true;
+ }
+
+#ifdef JS_JITSPEW
+ // Return a string describing this range.
+ UniqueChars toString() const;
+#endif
+
+ // Comparator for use in range splay trees.
+ static int compare(LiveRange* v0, LiveRange* v1) {
+ // LiveRange includes 'from' but excludes 'to'.
+ if (v0->to() <= v1->from())
+ return -1;
+ if (v0->from() >= v1->to())
+ return 1;
+ return 0;
+ }
+};
+
+// Tracks information about bundles that should all be spilled to the same
+// physical location. At the beginning of allocation, each bundle has its own
+// spill set. As bundles are split, the new smaller bundles continue to use the
+// same spill set.
+class SpillSet : public TempObject
+{
+ // All bundles with this spill set which have been spilled. All bundles in
+ // this list will be given the same physical slot.
+ Vector<LiveBundle*, 1, JitAllocPolicy> list_;
+
+ explicit SpillSet(TempAllocator& alloc)
+ : list_(alloc)
+ { }
+
+ public:
+ static SpillSet* New(TempAllocator& alloc) {
+ return new(alloc) SpillSet(alloc);
+ }
+
+ MOZ_MUST_USE bool addSpilledBundle(LiveBundle* bundle) {
+ return list_.append(bundle);
+ }
+ size_t numSpilledBundles() const {
+ return list_.length();
+ }
+ LiveBundle* spilledBundle(size_t i) const {
+ return list_[i];
+ }
+
+ void setAllocation(LAllocation alloc);
+};
+
+// A set of live ranges which are all pairwise disjoint. The register allocator
+// attempts to find allocations for an entire bundle, and if it fails the
+// bundle will be broken into smaller ones which are allocated independently.
+class LiveBundle : public TempObject
+{
+ // Set to use if this bundle or one it is split into is spilled.
+ SpillSet* spill_;
+
+ // All the ranges in this set, ordered by location.
+ InlineForwardList<LiveRange::BundleLink> ranges_;
+
+ // Allocation to use for ranges in this set, bogus if unallocated or spilled
+ // and not yet given a physical stack slot.
+ LAllocation alloc_;
+
+ // Bundle which entirely contains this one and has no register uses. This
+ // may or may not be spilled by the allocator, but it can be spilled and
+ // will not be split.
+ LiveBundle* spillParent_;
+
+ LiveBundle(SpillSet* spill, LiveBundle* spillParent)
+ : spill_(spill), spillParent_(spillParent)
+ { }
+
+ public:
+ static LiveBundle* FallibleNew(TempAllocator& alloc, SpillSet* spill, LiveBundle* spillParent)
+ {
+ return new(alloc.fallible()) LiveBundle(spill, spillParent);
+ }
+
+ SpillSet* spillSet() const {
+ return spill_;
+ }
+ void setSpillSet(SpillSet* spill) {
+ spill_ = spill;
+ }
+
+ LiveRange::BundleLinkIterator rangesBegin() const {
+ return ranges_.begin();
+ }
+ bool hasRanges() const {
+ return !!rangesBegin();
+ }
+ LiveRange* firstRange() const {
+ return LiveRange::get(*rangesBegin());
+ }
+ LiveRange* lastRange() const {
+ return LiveRange::get(ranges_.back());
+ }
+ LiveRange* rangeFor(CodePosition pos) const;
+ void removeRange(LiveRange* range);
+ void removeRangeAndIncrementIterator(LiveRange::BundleLinkIterator& iter) {
+ ranges_.removeAndIncrement(iter);
+ }
+ void addRange(LiveRange* range);
+ MOZ_MUST_USE bool addRange(TempAllocator& alloc, uint32_t vreg,
+ CodePosition from, CodePosition to);
+ MOZ_MUST_USE bool addRangeAndDistributeUses(TempAllocator& alloc, LiveRange* oldRange,
+ CodePosition from, CodePosition to);
+ LiveRange* popFirstRange();
+#ifdef DEBUG
+ size_t numRanges() const;
+#endif
+
+ LAllocation allocation() const {
+ return alloc_;
+ }
+ void setAllocation(LAllocation alloc) {
+ alloc_ = alloc;
+ }
+
+ LiveBundle* spillParent() const {
+ return spillParent_;
+ }
+
+#ifdef JS_JITSPEW
+ // Return a string describing this bundle.
+ UniqueChars toString() const;
+#endif
+};
+
+// Information about the allocation for a virtual register.
+class VirtualRegister
+{
+ // Instruction which defines this register.
+ LNode* ins_;
+
+ // Definition in the instruction for this register.
+ LDefinition* def_;
+
+ // All live ranges for this register. These may overlap each other, and are
+ // ordered by their start position.
+ InlineForwardList<LiveRange::RegisterLink> ranges_;
+
+ // Whether def_ is a temp or an output.
+ bool isTemp_;
+
+ // Whether this vreg is an input for some phi. This use is not reflected in
+ // any range on the vreg.
+ bool usedByPhi_;
+
+ // If this register's definition is MUST_REUSE_INPUT, whether a copy must
+ // be introduced before the definition that relaxes the policy.
+ bool mustCopyInput_;
+
+ void operator=(const VirtualRegister&) = delete;
+ VirtualRegister(const VirtualRegister&) = delete;
+
+ public:
+ explicit VirtualRegister()
+ {
+ // Note: This class is zeroed before it is constructed.
+ }
+
+ void init(LNode* ins, LDefinition* def, bool isTemp) {
+ MOZ_ASSERT(!ins_);
+ ins_ = ins;
+ def_ = def;
+ isTemp_ = isTemp;
+ }
+
+ LNode* ins() const {
+ return ins_;
+ }
+ LDefinition* def() const {
+ return def_;
+ }
+ LDefinition::Type type() const {
+ return def()->type();
+ }
+ uint32_t vreg() const {
+ return def()->virtualRegister();
+ }
+ bool isCompatible(const AnyRegister& r) const {
+ return def_->isCompatibleReg(r);
+ }
+ bool isCompatible(const VirtualRegister& vr) const {
+ return def_->isCompatibleDef(*vr.def_);
+ }
+ bool isTemp() const {
+ return isTemp_;
+ }
+
+ void setUsedByPhi() {
+ usedByPhi_ = true;
+ }
+ bool usedByPhi() {
+ return usedByPhi_;
+ }
+
+ void setMustCopyInput() {
+ mustCopyInput_ = true;
+ }
+ bool mustCopyInput() {
+ return mustCopyInput_;
+ }
+
+ LiveRange::RegisterLinkIterator rangesBegin() const {
+ return ranges_.begin();
+ }
+ LiveRange::RegisterLinkIterator rangesBegin(LiveRange* range) const {
+ return ranges_.begin(&range->registerLink);
+ }
+ bool hasRanges() const {
+ return !!rangesBegin();
+ }
+ LiveRange* firstRange() const {
+ return LiveRange::get(*rangesBegin());
+ }
+ LiveRange* lastRange() const {
+ return LiveRange::get(ranges_.back());
+ }
+ LiveRange* rangeFor(CodePosition pos, bool preferRegister = false) const;
+ void removeRange(LiveRange* range);
+ void addRange(LiveRange* range);
+
+ void removeRangeAndIncrement(LiveRange::RegisterLinkIterator& iter) {
+ ranges_.removeAndIncrement(iter);
+ }
+
+ LiveBundle* firstBundle() const {
+ return firstRange()->bundle();
+ }
+
+ MOZ_MUST_USE bool addInitialRange(TempAllocator& alloc, CodePosition from, CodePosition to);
+ void addInitialUse(UsePosition* use);
+ void setInitialDefinition(CodePosition from);
+};
+
+// A sequence of code positions, for tellings BacktrackingAllocator::splitAt
+// where to split.
+typedef js::Vector<CodePosition, 4, SystemAllocPolicy> SplitPositionVector;
+
+class BacktrackingAllocator : protected RegisterAllocator
+{
+ friend class C1Spewer;
+ friend class JSONSpewer;
+
+ // This flag is set when testing new allocator modifications.
+ bool testbed;
+
+ BitSet* liveIn;
+ FixedList<VirtualRegister> vregs;
+
+ // Allocation state.
+ StackSlotAllocator stackSlotAllocator;
+
+ // Priority queue element: a bundle and the associated priority.
+ struct QueueItem
+ {
+ LiveBundle* bundle;
+
+ QueueItem(LiveBundle* bundle, size_t priority)
+ : bundle(bundle), priority_(priority)
+ {}
+
+ static size_t priority(const QueueItem& v) {
+ return v.priority_;
+ }
+
+ private:
+ size_t priority_;
+ };
+
+ PriorityQueue<QueueItem, QueueItem, 0, SystemAllocPolicy> allocationQueue;
+
+ typedef SplayTree<LiveRange*, LiveRange> LiveRangeSet;
+
+ // Each physical register is associated with the set of ranges over which
+ // that register is currently allocated.
+ struct PhysicalRegister {
+ bool allocatable;
+ AnyRegister reg;
+ LiveRangeSet allocations;
+
+ PhysicalRegister() : allocatable(false) {}
+ };
+ mozilla::Array<PhysicalRegister, AnyRegister::Total> registers;
+
+ // Ranges of code which are considered to be hot, for which good allocation
+ // should be prioritized.
+ LiveRangeSet hotcode;
+
+ struct CallRange : public TempObject, public InlineListNode<CallRange> {
+ LiveRange::Range range;
+
+ CallRange(CodePosition from, CodePosition to)
+ : range(from, to)
+ {}
+
+ // Comparator for use in splay tree.
+ static int compare(CallRange* v0, CallRange* v1) {
+ if (v0->range.to <= v1->range.from)
+ return -1;
+ if (v0->range.from >= v1->range.to)
+ return 1;
+ return 0;
+ }
+ };
+
+ // Ranges where all registers must be spilled due to call instructions.
+ typedef InlineList<CallRange> CallRangeList;
+ CallRangeList callRangesList;
+ SplayTree<CallRange*, CallRange> callRanges;
+
+ // Information about an allocated stack slot.
+ struct SpillSlot : public TempObject, public InlineForwardListNode<SpillSlot> {
+ LStackSlot alloc;
+ LiveRangeSet allocated;
+
+ SpillSlot(uint32_t slot, LifoAlloc* alloc)
+ : alloc(slot), allocated(alloc)
+ {}
+ };
+ typedef InlineForwardList<SpillSlot> SpillSlotList;
+
+ // All allocated slots of each width.
+ SpillSlotList normalSlots, doubleSlots, quadSlots;
+
+ public:
+ BacktrackingAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph, bool testbed)
+ : RegisterAllocator(mir, lir, graph),
+ testbed(testbed),
+ liveIn(nullptr),
+ callRanges(nullptr)
+ { }
+
+ MOZ_MUST_USE bool go();
+
+ private:
+
+ typedef Vector<LiveRange*, 4, SystemAllocPolicy> LiveRangeVector;
+ typedef Vector<LiveBundle*, 4, SystemAllocPolicy> LiveBundleVector;
+
+ // Liveness methods.
+ MOZ_MUST_USE bool init();
+ MOZ_MUST_USE bool buildLivenessInfo();
+
+ MOZ_MUST_USE bool addInitialFixedRange(AnyRegister reg, CodePosition from, CodePosition to);
+
+ VirtualRegister& vreg(const LDefinition* def) {
+ return vregs[def->virtualRegister()];
+ }
+ VirtualRegister& vreg(const LAllocation* alloc) {
+ MOZ_ASSERT(alloc->isUse());
+ return vregs[alloc->toUse()->virtualRegister()];
+ }
+
+ // Allocation methods.
+ MOZ_MUST_USE bool tryMergeBundles(LiveBundle* bundle0, LiveBundle* bundle1);
+ MOZ_MUST_USE bool tryMergeReusedRegister(VirtualRegister& def, VirtualRegister& input);
+ MOZ_MUST_USE bool mergeAndQueueRegisters();
+ MOZ_MUST_USE bool tryAllocateFixed(LiveBundle* bundle, Requirement requirement,
+ bool* success, bool* pfixed, LiveBundleVector& conflicting);
+ MOZ_MUST_USE bool tryAllocateNonFixed(LiveBundle* bundle, Requirement requirement,
+ Requirement hint, bool* success, bool* pfixed,
+ LiveBundleVector& conflicting);
+ MOZ_MUST_USE bool processBundle(MIRGenerator* mir, LiveBundle* bundle);
+ MOZ_MUST_USE bool computeRequirement(LiveBundle* bundle, Requirement *prequirement,
+ Requirement *phint);
+ MOZ_MUST_USE bool tryAllocateRegister(PhysicalRegister& r, LiveBundle* bundle, bool* success,
+ bool* pfixed, LiveBundleVector& conflicting);
+ MOZ_MUST_USE bool evictBundle(LiveBundle* bundle);
+ MOZ_MUST_USE bool splitAndRequeueBundles(LiveBundle* bundle,
+ const LiveBundleVector& newBundles);
+ MOZ_MUST_USE bool spill(LiveBundle* bundle);
+
+ bool isReusedInput(LUse* use, LNode* ins, bool considerCopy);
+ bool isRegisterUse(UsePosition* use, LNode* ins, bool considerCopy = false);
+ bool isRegisterDefinition(LiveRange* range);
+ MOZ_MUST_USE bool pickStackSlot(SpillSet* spill);
+ MOZ_MUST_USE bool insertAllRanges(LiveRangeSet& set, LiveBundle* bundle);
+
+ // Reification methods.
+ MOZ_MUST_USE bool pickStackSlots();
+ MOZ_MUST_USE bool resolveControlFlow();
+ MOZ_MUST_USE bool reifyAllocations();
+ MOZ_MUST_USE bool populateSafepoints();
+ MOZ_MUST_USE bool annotateMoveGroups();
+ MOZ_MUST_USE bool deadRange(LiveRange* range);
+ size_t findFirstNonCallSafepoint(CodePosition from);
+ size_t findFirstSafepoint(CodePosition pos, size_t startFrom);
+ void addLiveRegistersForRange(VirtualRegister& reg, LiveRange* range);
+
+ MOZ_MUST_USE bool addMove(LMoveGroup* moves, LiveRange* from, LiveRange* to,
+ LDefinition::Type type) {
+ LAllocation fromAlloc = from->bundle()->allocation();
+ LAllocation toAlloc = to->bundle()->allocation();
+ MOZ_ASSERT(fromAlloc != toAlloc);
+ return moves->add(fromAlloc, toAlloc, type);
+ }
+
+ MOZ_MUST_USE bool moveInput(LInstruction* ins, LiveRange* from, LiveRange* to,
+ LDefinition::Type type) {
+ if (from->bundle()->allocation() == to->bundle()->allocation())
+ return true;
+ LMoveGroup* moves = getInputMoveGroup(ins);
+ return addMove(moves, from, to, type);
+ }
+
+ MOZ_MUST_USE bool moveAfter(LInstruction* ins, LiveRange* from, LiveRange* to,
+ LDefinition::Type type) {
+ if (from->bundle()->allocation() == to->bundle()->allocation())
+ return true;
+ LMoveGroup* moves = getMoveGroupAfter(ins);
+ return addMove(moves, from, to, type);
+ }
+
+ MOZ_MUST_USE bool moveAtExit(LBlock* block, LiveRange* from, LiveRange* to,
+ LDefinition::Type type) {
+ if (from->bundle()->allocation() == to->bundle()->allocation())
+ return true;
+ LMoveGroup* moves = block->getExitMoveGroup(alloc());
+ return addMove(moves, from, to, type);
+ }
+
+ MOZ_MUST_USE bool moveAtEntry(LBlock* block, LiveRange* from, LiveRange* to,
+ LDefinition::Type type) {
+ if (from->bundle()->allocation() == to->bundle()->allocation())
+ return true;
+ LMoveGroup* moves = block->getEntryMoveGroup(alloc());
+ return addMove(moves, from, to, type);
+ }
+
+ // Debugging methods.
+ void dumpAllocations();
+
+ struct PrintLiveRange;
+
+ bool minimalDef(LiveRange* range, LNode* ins);
+ bool minimalUse(LiveRange* range, UsePosition* use);
+ bool minimalBundle(LiveBundle* bundle, bool* pfixed = nullptr);
+
+ // Heuristic methods.
+
+ size_t computePriority(LiveBundle* bundle);
+ size_t computeSpillWeight(LiveBundle* bundle);
+
+ size_t maximumSpillWeight(const LiveBundleVector& bundles);
+
+ MOZ_MUST_USE bool chooseBundleSplit(LiveBundle* bundle, bool fixed, LiveBundle* conflict);
+
+ MOZ_MUST_USE bool splitAt(LiveBundle* bundle, const SplitPositionVector& splitPositions);
+ MOZ_MUST_USE bool trySplitAcrossHotcode(LiveBundle* bundle, bool* success);
+ MOZ_MUST_USE bool trySplitAfterLastRegisterUse(LiveBundle* bundle, LiveBundle* conflict,
+ bool* success);
+ MOZ_MUST_USE bool trySplitBeforeFirstRegisterUse(LiveBundle* bundle, LiveBundle* conflict,
+ bool* success);
+ MOZ_MUST_USE bool splitAcrossCalls(LiveBundle* bundle);
+
+ bool compilingWasm() {
+ return mir->info().compilingWasm();
+ }
+
+ void dumpVregs();
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BacktrackingAllocator_h */
diff --git a/js/src/jit/Bailouts.cpp b/js/src/jit/Bailouts.cpp
new file mode 100644
index 000000000..d5172c6a3
--- /dev/null
+++ b/js/src/jit/Bailouts.cpp
@@ -0,0 +1,314 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Bailouts.h"
+
+#include "mozilla/ScopeExit.h"
+
+#include "jscntxt.h"
+
+#include "jit/BaselineJIT.h"
+#include "jit/Ion.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitSpewer.h"
+#include "jit/Snapshots.h"
+#include "vm/TraceLogging.h"
+
+#include "jit/JitFrameIterator-inl.h"
+#include "vm/Probes-inl.h"
+#include "vm/Stack-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::IsInRange;
+
+uint32_t
+jit::Bailout(BailoutStack* sp, BaselineBailoutInfo** bailoutInfo)
+{
+ JSContext* cx = GetJSContextFromMainThread();
+ MOZ_ASSERT(bailoutInfo);
+
+ // We don't have an exit frame.
+ MOZ_ASSERT(IsInRange(FAKE_JIT_TOP_FOR_BAILOUT, 0, 0x1000) &&
+ IsInRange(FAKE_JIT_TOP_FOR_BAILOUT + sizeof(CommonFrameLayout), 0, 0x1000),
+ "Fake jitTop pointer should be within the first page.");
+ cx->runtime()->jitTop = FAKE_JIT_TOP_FOR_BAILOUT;
+
+ JitActivationIterator jitActivations(cx->runtime());
+ BailoutFrameInfo bailoutData(jitActivations, sp);
+ JitFrameIterator iter(jitActivations);
+ MOZ_ASSERT(!iter.ionScript()->invalidated());
+ CommonFrameLayout* currentFramePtr = iter.current();
+
+ TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
+ TraceLogTimestamp(logger, TraceLogger_Bailout);
+
+ JitSpew(JitSpew_IonBailouts, "Took bailout! Snapshot offset: %d", iter.snapshotOffset());
+
+ MOZ_ASSERT(IsBaselineEnabled(cx));
+
+ *bailoutInfo = nullptr;
+ uint32_t retval = BailoutIonToBaseline(cx, bailoutData.activation(), iter, false, bailoutInfo,
+ /* excInfo = */ nullptr);
+ MOZ_ASSERT(retval == BAILOUT_RETURN_OK ||
+ retval == BAILOUT_RETURN_FATAL_ERROR ||
+ retval == BAILOUT_RETURN_OVERRECURSED);
+ MOZ_ASSERT_IF(retval == BAILOUT_RETURN_OK, *bailoutInfo != nullptr);
+
+ if (retval != BAILOUT_RETURN_OK) {
+ JSScript* script = iter.script();
+ probes::ExitScript(cx, script, script->functionNonDelazifying(),
+ /* popSPSFrame = */ false);
+ }
+
+ // This condition was wrong when we entered this bailout function, but it
+ // might be true now. A GC might have reclaimed all the Jit code and
+ // invalidated all frames which are currently on the stack. As we are
+ // already in a bailout, we could not switch to an invalidation
+ // bailout. When the code of an IonScript which is on the stack is
+ // invalidated (see InvalidateActivation), we remove references to it and
+ // increment the reference counter for each activation that appear on the
+ // stack. As the bailed frame is one of them, we have to decrement it now.
+ if (iter.ionScript()->invalidated())
+ iter.ionScript()->decrementInvalidationCount(cx->runtime()->defaultFreeOp());
+
+ // NB: Commentary on how |lastProfilingFrame| is set from bailouts.
+ //
+ // Once we return to jitcode, any following frames might get clobbered,
+ // but the current frame will not (as it will be clobbered "in-place"
+ // with a baseline frame that will share the same frame prefix).
+ // However, there may be multiple baseline frames unpacked from this
+ // single Ion frame, which means we will need to once again reset
+ // |lastProfilingFrame| to point to the correct unpacked last frame
+ // in |FinishBailoutToBaseline|.
+ //
+ // In the case of error, the jitcode will jump immediately to an
+ // exception handler, which will unwind the frames and properly set
+ // the |lastProfilingFrame| to point to the frame being resumed into
+ // (see |AutoResetLastProfilerFrameOnReturnFromException|).
+ //
+ // In both cases, we want to temporarily set the |lastProfilingFrame|
+ // to the current frame being bailed out, and then fix it up later.
+ if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
+ cx->runtime()->jitActivation->setLastProfilingFrame(currentFramePtr);
+
+ return retval;
+}
+
+uint32_t
+jit::InvalidationBailout(InvalidationBailoutStack* sp, size_t* frameSizeOut,
+ BaselineBailoutInfo** bailoutInfo)
+{
+ sp->checkInvariants();
+
+ JSContext* cx = GetJSContextFromMainThread();
+
+ // We don't have an exit frame.
+ cx->runtime()->jitTop = FAKE_JIT_TOP_FOR_BAILOUT;
+
+ JitActivationIterator jitActivations(cx->runtime());
+ BailoutFrameInfo bailoutData(jitActivations, sp);
+ JitFrameIterator iter(jitActivations);
+ CommonFrameLayout* currentFramePtr = iter.current();
+
+ TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
+ TraceLogTimestamp(logger, TraceLogger_Invalidation);
+
+ JitSpew(JitSpew_IonBailouts, "Took invalidation bailout! Snapshot offset: %d", iter.snapshotOffset());
+
+ // Note: the frame size must be computed before we return from this function.
+ *frameSizeOut = iter.frameSize();
+
+ MOZ_ASSERT(IsBaselineEnabled(cx));
+
+ *bailoutInfo = nullptr;
+ uint32_t retval = BailoutIonToBaseline(cx, bailoutData.activation(), iter, true, bailoutInfo,
+ /* excInfo = */ nullptr);
+ MOZ_ASSERT(retval == BAILOUT_RETURN_OK ||
+ retval == BAILOUT_RETURN_FATAL_ERROR ||
+ retval == BAILOUT_RETURN_OVERRECURSED);
+ MOZ_ASSERT_IF(retval == BAILOUT_RETURN_OK, *bailoutInfo != nullptr);
+
+ if (retval != BAILOUT_RETURN_OK) {
+ // If the bailout failed, then bailout trampoline will pop the
+ // current frame and jump straight to exception handling code when
+ // this function returns. Any SPS entry pushed for this frame will
+ // be silently forgotten.
+ //
+ // We call ExitScript here to ensure that if the ionScript had SPS
+ // instrumentation, then the SPS entry for it is popped.
+ //
+ // However, if the bailout was during argument check, then a
+ // pseudostack frame would not have been pushed in the first
+ // place, so don't pop anything in that case.
+ JSScript* script = iter.script();
+ probes::ExitScript(cx, script, script->functionNonDelazifying(),
+ /* popSPSFrame = */ false);
+
+#ifdef JS_JITSPEW
+ JitFrameLayout* frame = iter.jsFrame();
+ JitSpew(JitSpew_IonInvalidate, "Bailout failed (%s)",
+ (retval == BAILOUT_RETURN_FATAL_ERROR) ? "Fatal Error" : "Over Recursion");
+ JitSpew(JitSpew_IonInvalidate, " calleeToken %p", (void*) frame->calleeToken());
+ JitSpew(JitSpew_IonInvalidate, " frameSize %u", unsigned(frame->prevFrameLocalSize()));
+ JitSpew(JitSpew_IonInvalidate, " ra %p", (void*) frame->returnAddress());
+#endif
+ }
+
+ iter.ionScript()->decrementInvalidationCount(cx->runtime()->defaultFreeOp());
+
+ // Make the frame being bailed out the top profiled frame.
+ if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
+ cx->runtime()->jitActivation->setLastProfilingFrame(currentFramePtr);
+
+ return retval;
+}
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ const JitFrameIterator& frame)
+ : machine_(frame.machineState())
+{
+ framePointer_ = (uint8_t*) frame.fp();
+ topFrameSize_ = frame.frameSize();
+ topIonScript_ = frame.ionScript();
+ attachOnJitActivation(activations);
+
+ const OsiIndex* osiIndex = frame.osiIndex();
+ snapshotOffset_ = osiIndex->snapshotOffset();
+}
+
+uint32_t
+jit::ExceptionHandlerBailout(JSContext* cx, const InlineFrameIterator& frame,
+ ResumeFromException* rfe,
+ const ExceptionBailoutInfo& excInfo,
+ bool* overrecursed)
+{
+ // We can be propagating debug mode exceptions without there being an
+ // actual exception pending. For instance, when we return false from an
+ // operation callback like a timeout handler.
+ MOZ_ASSERT_IF(!excInfo.propagatingIonExceptionForDebugMode(), cx->isExceptionPending());
+
+ uint8_t* prevJitTop = cx->runtime()->jitTop;
+ auto restoreJitTop = mozilla::MakeScopeExit([&]() { cx->runtime()->jitTop = prevJitTop; });
+ cx->runtime()->jitTop = FAKE_JIT_TOP_FOR_BAILOUT;
+
+ gc::AutoSuppressGC suppress(cx);
+
+ JitActivationIterator jitActivations(cx->runtime());
+ BailoutFrameInfo bailoutData(jitActivations, frame.frame());
+ JitFrameIterator iter(jitActivations);
+ CommonFrameLayout* currentFramePtr = iter.current();
+
+ BaselineBailoutInfo* bailoutInfo = nullptr;
+ uint32_t retval;
+
+ {
+ // Currently we do not tolerate OOM here so as not to complicate the
+ // exception handling code further.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+
+ retval = BailoutIonToBaseline(cx, bailoutData.activation(), iter, true,
+ &bailoutInfo, &excInfo);
+ if (retval == BAILOUT_RETURN_FATAL_ERROR && cx->isThrowingOutOfMemory())
+ oomUnsafe.crash("ExceptionHandlerBailout");
+ }
+
+ if (retval == BAILOUT_RETURN_OK) {
+ MOZ_ASSERT(bailoutInfo);
+
+ // Overwrite the kind so HandleException after the bailout returns
+ // false, jumping directly to the exception tail.
+ if (excInfo.propagatingIonExceptionForDebugMode())
+ bailoutInfo->bailoutKind = Bailout_IonExceptionDebugMode;
+
+ rfe->kind = ResumeFromException::RESUME_BAILOUT;
+ rfe->target = cx->runtime()->jitRuntime()->getBailoutTail()->raw();
+ rfe->bailoutInfo = bailoutInfo;
+ } else {
+ // Bailout failed. If the overrecursion check failed, clear the
+ // exception to turn this into an uncatchable error, continue popping
+ // all inline frames and have the caller report the error.
+ MOZ_ASSERT(!bailoutInfo);
+
+ if (retval == BAILOUT_RETURN_OVERRECURSED) {
+ *overrecursed = true;
+ if (!excInfo.propagatingIonExceptionForDebugMode())
+ cx->clearPendingException();
+ } else {
+ MOZ_ASSERT(retval == BAILOUT_RETURN_FATAL_ERROR);
+
+ // Crash for now so as not to complicate the exception handling code
+ // further.
+ MOZ_CRASH();
+ }
+ }
+
+ // Make the frame being bailed out the top profiled frame.
+ if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
+ cx->runtime()->jitActivation->setLastProfilingFrame(currentFramePtr);
+
+ return retval;
+}
+
+// Initialize the decl env Object, call object, and any arguments obj of the
+// current frame.
+bool
+jit::EnsureHasEnvironmentObjects(JSContext* cx, AbstractFramePtr fp)
+{
+ // Ion does not compile eval scripts.
+ MOZ_ASSERT(!fp.isEvalFrame());
+
+ if (fp.isFunctionFrame()) {
+ // Ion does not handle extra var environments due to parameter
+ // expressions yet.
+ MOZ_ASSERT(!fp.callee()->needsExtraBodyVarEnvironment());
+
+ if (!fp.hasInitialEnvironment() && fp.callee()->needsFunctionEnvironmentObjects()) {
+ if (!fp.initFunctionEnvironmentObjects(cx))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void
+jit::CheckFrequentBailouts(JSContext* cx, JSScript* script, BailoutKind bailoutKind)
+{
+ if (script->hasIonScript()) {
+ // Invalidate if this script keeps bailing out without invalidation. Next time
+ // we compile this script LICM will be disabled.
+ IonScript* ionScript = script->ionScript();
+
+ if (ionScript->bailoutExpected()) {
+ // If we bailout because of the first execution of a basic block,
+ // then we should record which basic block we are returning in,
+ // which should prevent this from happening again. Also note that
+ // the first execution bailout can be related to an inlined script,
+ // so there is no need to penalize the caller.
+ if (bailoutKind != Bailout_FirstExecution && !script->hadFrequentBailouts())
+ script->setHadFrequentBailouts();
+
+ JitSpew(JitSpew_IonInvalidate, "Invalidating due to too many bailouts");
+
+ Invalidate(cx, script);
+ }
+ }
+}
+
+void
+BailoutFrameInfo::attachOnJitActivation(const JitActivationIterator& jitActivations)
+{
+ MOZ_ASSERT(jitActivations.jitTop() == FAKE_JIT_TOP_FOR_BAILOUT);
+ activation_ = jitActivations->asJit();
+ activation_->setBailoutData(this);
+}
+
+BailoutFrameInfo::~BailoutFrameInfo()
+{
+ activation_->cleanBailoutData();
+}
diff --git a/js/src/jit/Bailouts.h b/js/src/jit/Bailouts.h
new file mode 100644
index 000000000..747f59b7d
--- /dev/null
+++ b/js/src/jit/Bailouts.h
@@ -0,0 +1,219 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Bailouts_h
+#define jit_Bailouts_h
+
+#include "jstypes.h"
+
+#include "jit/JitFrameIterator.h"
+#include "jit/JitFrames.h"
+#include "vm/Stack.h"
+
+namespace js {
+namespace jit {
+
+// A "bailout" is a condition in which we need to recover an interpreter frame
+// from an IonFrame. Bailouts can happen for the following reasons:
+// (1) A deoptimization guard, for example, an add overflows or a type check
+// fails.
+// (2) A check or assumption held by the JIT is invalidated by the VM, and
+// JIT code must be thrown away. This includes the GC possibly deciding
+// to evict live JIT code, or a Type Inference reflow.
+//
+// Note that bailouts as described here do not include normal Ion frame
+// inspection, for example, if an exception must be built or the GC needs to
+// scan an Ion frame for gcthings.
+//
+// The second type of bailout needs a different name - "deoptimization" or
+// "deep bailout". Here we are concerned with eager (or maybe "shallow")
+// bailouts, that happen from JIT code. These happen from guards, like:
+//
+// cmp [obj + shape], 0x50M37TH1NG
+// jmp _bailout
+//
+// The bailout target needs to somehow translate the Ion frame (whose state
+// will differ at each program point) to an interpreter frame. This state is
+// captured into the IonScript's snapshot buffer, and for each bailout we know
+// which snapshot corresponds to its state.
+//
+// Roughly, the following needs to happen at the bailout target.
+// (1) Move snapshot ID into a known stack location (registers cannot be
+// mutated).
+// (2) Spill all registers to the stack.
+// (3) Call a Bailout() routine, whose argument is the stack pointer.
+// (4) Bailout() will find the IonScript on the stack, use the snapshot ID
+// to find the structure of the frame, and then use the stack and spilled
+// registers to perform frame conversion.
+// (5) Bailout() returns, and the JIT must immediately return to the
+// interpreter (all frames are converted at once).
+//
+// (2) and (3) are implemented by a trampoline held in the compartment.
+// Naively, we could implement (1) like:
+//
+// _bailout_ID_1:
+// push 1
+// jmp _global_bailout_handler
+// _bailout_ID_2:
+// push 2
+// jmp _global_bailout_handler
+//
+// This takes about 10 extra bytes per guard. On some platforms, we can reduce
+// this overhead to 4 bytes by creating a global jump table, shared again in
+// the compartment:
+//
+// call _global_bailout_handler
+// call _global_bailout_handler
+// call _global_bailout_handler
+// call _global_bailout_handler
+// ...
+// _global_bailout_handler:
+//
+// In the bailout handler, we can recompute which entry in the table was
+// selected by subtracting the return addressed pushed by the call, from the
+// start of the table, and then dividing by the size of a (call X) entry in the
+// table. This gives us a number in [0, TableSize), which we call a
+// "BailoutId".
+//
+// Then, we can provide a per-script mapping from BailoutIds to snapshots,
+// which takes only four bytes per entry.
+//
+// This strategy does not work as given, because the bailout handler has no way
+// to compute the location of an IonScript. Currently, we do not use frame
+// pointers. To account for this we segregate frames into a limited set of
+// "frame sizes", and create a table for each frame size. We also have the
+// option of not using bailout tables, for platforms or situations where the
+// 10 byte cost is more optimal than a bailout table. See JitFrames.h for more
+// detail.
+
+static const BailoutId INVALID_BAILOUT_ID = BailoutId(-1);
+
+// Keep this arbitrarily small for now, for testing.
+static const uint32_t BAILOUT_TABLE_SIZE = 16;
+
+// Bailout return codes.
+// N.B. the relative order of these values is hard-coded into ::GenerateBailoutThunk.
+static const uint32_t BAILOUT_RETURN_OK = 0;
+static const uint32_t BAILOUT_RETURN_FATAL_ERROR = 1;
+static const uint32_t BAILOUT_RETURN_OVERRECURSED = 2;
+
+// This address is a magic number made to cause crashes while indicating that we
+// are making an attempt to mark the stack during a bailout.
+static uint8_t * const FAKE_JIT_TOP_FOR_BAILOUT = reinterpret_cast<uint8_t*>(0xba1);
+
+// BailoutStack is an architecture specific pointer to the stack, given by the
+// bailout handler.
+class BailoutStack;
+class InvalidationBailoutStack;
+
+// Must be implemented by each architecture.
+
+// This structure is constructed before recovering the baseline frames for a
+// bailout. It records all information extracted from the stack, and which are
+// needed for the JitFrameIterator.
+class BailoutFrameInfo
+{
+ MachineState machine_;
+ uint8_t* framePointer_;
+ size_t topFrameSize_;
+ IonScript* topIonScript_;
+ uint32_t snapshotOffset_;
+ JitActivation* activation_;
+
+ void attachOnJitActivation(const JitActivationIterator& activations);
+
+ public:
+ BailoutFrameInfo(const JitActivationIterator& activations, BailoutStack* sp);
+ BailoutFrameInfo(const JitActivationIterator& activations, InvalidationBailoutStack* sp);
+ BailoutFrameInfo(const JitActivationIterator& activations, const JitFrameIterator& frame);
+ ~BailoutFrameInfo();
+
+ uint8_t* fp() const {
+ return framePointer_;
+ }
+ SnapshotOffset snapshotOffset() const {
+ return snapshotOffset_;
+ }
+ const MachineState* machineState() const {
+ return &machine_;
+ }
+ size_t topFrameSize() const {
+ return topFrameSize_;
+ }
+ IonScript* ionScript() const {
+ return topIonScript_;
+ }
+ JitActivation* activation() const {
+ return activation_;
+ }
+};
+
+MOZ_MUST_USE bool EnsureHasEnvironmentObjects(JSContext* cx, AbstractFramePtr fp);
+
+struct BaselineBailoutInfo;
+
+// Called from a bailout thunk. Returns a BAILOUT_* error code.
+uint32_t Bailout(BailoutStack* sp, BaselineBailoutInfo** info);
+
+// Called from the invalidation thunk. Returns a BAILOUT_* error code.
+uint32_t InvalidationBailout(InvalidationBailoutStack* sp, size_t* frameSizeOut,
+ BaselineBailoutInfo** info);
+
+class ExceptionBailoutInfo
+{
+ size_t frameNo_;
+ jsbytecode* resumePC_;
+ size_t numExprSlots_;
+
+ public:
+ ExceptionBailoutInfo(size_t frameNo, jsbytecode* resumePC, size_t numExprSlots)
+ : frameNo_(frameNo),
+ resumePC_(resumePC),
+ numExprSlots_(numExprSlots)
+ { }
+
+ ExceptionBailoutInfo()
+ : frameNo_(0),
+ resumePC_(nullptr),
+ numExprSlots_(0)
+ { }
+
+ bool catchingException() const {
+ return !!resumePC_;
+ }
+ bool propagatingIonExceptionForDebugMode() const {
+ return !resumePC_;
+ }
+
+ size_t frameNo() const {
+ MOZ_ASSERT(catchingException());
+ return frameNo_;
+ }
+ jsbytecode* resumePC() const {
+ MOZ_ASSERT(catchingException());
+ return resumePC_;
+ }
+ size_t numExprSlots() const {
+ MOZ_ASSERT(catchingException());
+ return numExprSlots_;
+ }
+};
+
+// Called from the exception handler to enter a catch or finally block.
+// Returns a BAILOUT_* error code.
+uint32_t ExceptionHandlerBailout(JSContext* cx, const InlineFrameIterator& frame,
+ ResumeFromException* rfe,
+ const ExceptionBailoutInfo& excInfo,
+ bool* overrecursed);
+
+uint32_t FinishBailoutToBaseline(BaselineBailoutInfo* bailoutInfo);
+
+void CheckFrequentBailouts(JSContext* cx, JSScript* script, BailoutKind bailoutKind);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Bailouts_h */
diff --git a/js/src/jit/BaselineBailouts.cpp b/js/src/jit/BaselineBailouts.cpp
new file mode 100644
index 000000000..8fc8a522d
--- /dev/null
+++ b/js/src/jit/BaselineBailouts.cpp
@@ -0,0 +1,1999 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/ScopeExit.h"
+#include "mozilla/SizePrintfMacros.h"
+
+#include "jsprf.h"
+#include "jsutil.h"
+#include "jit/arm/Simulator-arm.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/CompileInfo.h"
+#include "jit/JitSpewer.h"
+#include "jit/mips32/Simulator-mips32.h"
+#include "jit/mips64/Simulator-mips64.h"
+#include "jit/Recover.h"
+#include "jit/RematerializedFrame.h"
+
+#include "vm/ArgumentsObject.h"
+#include "vm/Debugger.h"
+#include "vm/TraceLogging.h"
+
+#include "jsscriptinlines.h"
+
+#include "jit/JitFrames-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// BaselineStackBuilder may reallocate its buffer if the current one is too
+// small. To avoid dangling pointers, BufferPointer represents a pointer into
+// this buffer as a pointer to the header and a fixed offset.
+template <typename T>
+class BufferPointer
+{
+ BaselineBailoutInfo** header_;
+ size_t offset_;
+ bool heap_;
+
+ public:
+ BufferPointer(BaselineBailoutInfo** header, size_t offset, bool heap)
+ : header_(header), offset_(offset), heap_(heap)
+ { }
+
+ T* get() const {
+ BaselineBailoutInfo* header = *header_;
+ if (!heap_)
+ return (T*)(header->incomingStack + offset_);
+
+ uint8_t* p = header->copyStackTop - offset_;
+ MOZ_ASSERT(p >= header->copyStackBottom && p < header->copyStackTop);
+ return (T*)p;
+ }
+
+ void set(const T& value) {
+ *get() = value;
+ }
+
+ // Note: we return a copy instead of a reference, to avoid potential memory
+ // safety hazards when the underlying buffer gets resized.
+ const T operator*() const { return *get(); }
+ T* operator->() const { return get(); }
+};
+
+/**
+ * BaselineStackBuilder helps abstract the process of rebuilding the C stack on the heap.
+ * It takes a bailout iterator and keeps track of the point on the C stack from which
+ * the reconstructed frames will be written.
+ *
+ * It exposes methods to write data into the heap memory storing the reconstructed
+ * stack. It also exposes method to easily calculate addresses. This includes both the
+ * virtual address that a particular value will be at when it's eventually copied onto
+ * the stack, as well as the current actual address of that value (whether on the heap
+ * allocated portion being constructed or the existing stack).
+ *
+ * The abstraction handles transparent re-allocation of the heap memory when it
+ * needs to be enlarged to accommodate new data. Similarly to the C stack, the
+ * data that's written to the reconstructed stack grows from high to low in memory.
+ *
+ * The lowest region of the allocated memory contains a BaselineBailoutInfo structure that
+ * points to the start and end of the written data.
+ */
+struct BaselineStackBuilder
+{
+ JitFrameIterator& iter_;
+ JitFrameLayout* frame_;
+
+ static size_t HeaderSize() {
+ return AlignBytes(sizeof(BaselineBailoutInfo), sizeof(void*));
+ }
+ size_t bufferTotal_;
+ size_t bufferAvail_;
+ size_t bufferUsed_;
+ uint8_t* buffer_;
+ BaselineBailoutInfo* header_;
+
+ size_t framePushed_;
+
+ BaselineStackBuilder(JitFrameIterator& iter, size_t initialSize)
+ : iter_(iter),
+ frame_(static_cast<JitFrameLayout*>(iter.current())),
+ bufferTotal_(initialSize),
+ bufferAvail_(0),
+ bufferUsed_(0),
+ buffer_(nullptr),
+ header_(nullptr),
+ framePushed_(0)
+ {
+ MOZ_ASSERT(bufferTotal_ >= HeaderSize());
+ MOZ_ASSERT(iter.isBailoutJS());
+ }
+
+ ~BaselineStackBuilder() {
+ js_free(buffer_);
+ }
+
+ MOZ_MUST_USE bool init() {
+ MOZ_ASSERT(!buffer_);
+ MOZ_ASSERT(bufferUsed_ == 0);
+ buffer_ = reinterpret_cast<uint8_t*>(js_calloc(bufferTotal_));
+ if (!buffer_)
+ return false;
+ bufferAvail_ = bufferTotal_ - HeaderSize();
+ bufferUsed_ = 0;
+
+ header_ = reinterpret_cast<BaselineBailoutInfo*>(buffer_);
+ header_->incomingStack = reinterpret_cast<uint8_t*>(frame_);
+ header_->copyStackTop = buffer_ + bufferTotal_;
+ header_->copyStackBottom = header_->copyStackTop;
+ header_->setR0 = 0;
+ header_->valueR0 = UndefinedValue();
+ header_->setR1 = 0;
+ header_->valueR1 = UndefinedValue();
+ header_->resumeFramePtr = nullptr;
+ header_->resumeAddr = nullptr;
+ header_->resumePC = nullptr;
+ header_->monitorStub = nullptr;
+ header_->numFrames = 0;
+ header_->checkGlobalDeclarationConflicts = false;
+ return true;
+ }
+
+ MOZ_MUST_USE bool enlarge() {
+ MOZ_ASSERT(buffer_ != nullptr);
+ if (bufferTotal_ & mozilla::tl::MulOverflowMask<2>::value)
+ return false;
+ size_t newSize = bufferTotal_ * 2;
+ uint8_t* newBuffer = reinterpret_cast<uint8_t*>(js_calloc(newSize));
+ if (!newBuffer)
+ return false;
+ memcpy((newBuffer + newSize) - bufferUsed_, header_->copyStackBottom, bufferUsed_);
+ memcpy(newBuffer, header_, sizeof(BaselineBailoutInfo));
+ js_free(buffer_);
+ buffer_ = newBuffer;
+ bufferTotal_ = newSize;
+ bufferAvail_ = newSize - (HeaderSize() + bufferUsed_);
+
+ header_ = reinterpret_cast<BaselineBailoutInfo*>(buffer_);
+ header_->copyStackTop = buffer_ + bufferTotal_;
+ header_->copyStackBottom = header_->copyStackTop - bufferUsed_;
+ return true;
+ }
+
+ BaselineBailoutInfo* info() {
+ MOZ_ASSERT(header_ == reinterpret_cast<BaselineBailoutInfo*>(buffer_));
+ return header_;
+ }
+
+ BaselineBailoutInfo* takeBuffer() {
+ MOZ_ASSERT(header_ == reinterpret_cast<BaselineBailoutInfo*>(buffer_));
+ buffer_ = nullptr;
+ return header_;
+ }
+
+ void resetFramePushed() {
+ framePushed_ = 0;
+ }
+
+ size_t framePushed() const {
+ return framePushed_;
+ }
+
+ MOZ_MUST_USE bool subtract(size_t size, const char* info = nullptr) {
+ // enlarge the buffer if need be.
+ while (size > bufferAvail_) {
+ if (!enlarge())
+ return false;
+ }
+
+ // write out element.
+ header_->copyStackBottom -= size;
+ bufferAvail_ -= size;
+ bufferUsed_ += size;
+ framePushed_ += size;
+ if (info) {
+ JitSpew(JitSpew_BaselineBailouts,
+ " SUB_%03d %p/%p %-15s",
+ (int) size, header_->copyStackBottom, virtualPointerAtStackOffset(0), info);
+ }
+ return true;
+ }
+
+ template <typename T>
+ MOZ_MUST_USE bool write(const T& t) {
+ MOZ_ASSERT(!(uintptr_t(&t) >= uintptr_t(header_->copyStackBottom) &&
+ uintptr_t(&t) < uintptr_t(header_->copyStackTop)),
+ "Should not reference memory that can be freed");
+ if (!subtract(sizeof(T)))
+ return false;
+ memcpy(header_->copyStackBottom, &t, sizeof(T));
+ return true;
+ }
+
+ template <typename T>
+ MOZ_MUST_USE bool writePtr(T* t, const char* info) {
+ if (!write<T*>(t))
+ return false;
+ if (info)
+ JitSpew(JitSpew_BaselineBailouts,
+ " WRITE_PTR %p/%p %-15s %p",
+ header_->copyStackBottom, virtualPointerAtStackOffset(0), info, t);
+ return true;
+ }
+
+ MOZ_MUST_USE bool writeWord(size_t w, const char* info) {
+ if (!write<size_t>(w))
+ return false;
+ if (info) {
+ if (sizeof(size_t) == 4) {
+ JitSpew(JitSpew_BaselineBailouts,
+ " WRITE_WRD %p/%p %-15s %08" PRIxSIZE,
+ header_->copyStackBottom, virtualPointerAtStackOffset(0), info, w);
+ } else {
+ JitSpew(JitSpew_BaselineBailouts,
+ " WRITE_WRD %p/%p %-15s %016" PRIxSIZE,
+ header_->copyStackBottom, virtualPointerAtStackOffset(0), info, w);
+ }
+ }
+ return true;
+ }
+
+ MOZ_MUST_USE bool writeValue(const Value& val, const char* info) {
+ if (!write<Value>(val))
+ return false;
+ if (info) {
+ JitSpew(JitSpew_BaselineBailouts,
+ " WRITE_VAL %p/%p %-15s %016" PRIx64,
+ header_->copyStackBottom, virtualPointerAtStackOffset(0), info,
+ *((uint64_t*) &val));
+ }
+ return true;
+ }
+
+ MOZ_MUST_USE bool maybeWritePadding(size_t alignment, size_t after, const char* info) {
+ MOZ_ASSERT(framePushed_ % sizeof(Value) == 0);
+ MOZ_ASSERT(after % sizeof(Value) == 0);
+ size_t offset = ComputeByteAlignment(after, alignment);
+ while (framePushed_ % alignment != offset) {
+ if (!writeValue(MagicValue(JS_ARG_POISON), info))
+ return false;
+ }
+
+ return true;
+ }
+
+ Value popValue() {
+ MOZ_ASSERT(bufferUsed_ >= sizeof(Value));
+ MOZ_ASSERT(framePushed_ >= sizeof(Value));
+ bufferAvail_ += sizeof(Value);
+ bufferUsed_ -= sizeof(Value);
+ framePushed_ -= sizeof(Value);
+ Value result = *((Value*) header_->copyStackBottom);
+ header_->copyStackBottom += sizeof(Value);
+ return result;
+ }
+
+ void popValueInto(PCMappingSlotInfo::SlotLocation loc) {
+ MOZ_ASSERT(PCMappingSlotInfo::ValidSlotLocation(loc));
+ switch(loc) {
+ case PCMappingSlotInfo::SlotInR0:
+ header_->setR0 = 1;
+ header_->valueR0 = popValue();
+ break;
+ case PCMappingSlotInfo::SlotInR1:
+ header_->setR1 = 1;
+ header_->valueR1 = popValue();
+ break;
+ default:
+ MOZ_ASSERT(loc == PCMappingSlotInfo::SlotIgnore);
+ popValue();
+ break;
+ }
+ }
+
+ void setResumeFramePtr(void* resumeFramePtr) {
+ header_->resumeFramePtr = resumeFramePtr;
+ }
+
+ void setResumeAddr(void* resumeAddr) {
+ header_->resumeAddr = resumeAddr;
+ }
+
+ void setResumePC(jsbytecode* pc) {
+ header_->resumePC = pc;
+ }
+
+ void setMonitorStub(ICStub* stub) {
+ header_->monitorStub = stub;
+ }
+
+ template <typename T>
+ BufferPointer<T> pointerAtStackOffset(size_t offset) {
+ if (offset < bufferUsed_) {
+ // Calculate offset from copyStackTop.
+ offset = header_->copyStackTop - (header_->copyStackBottom + offset);
+ return BufferPointer<T>(&header_, offset, /* heap = */ true);
+ }
+
+ return BufferPointer<T>(&header_, offset - bufferUsed_, /* heap = */ false);
+ }
+
+ BufferPointer<Value> valuePointerAtStackOffset(size_t offset) {
+ return pointerAtStackOffset<Value>(offset);
+ }
+
+ inline uint8_t* virtualPointerAtStackOffset(size_t offset) {
+ if (offset < bufferUsed_)
+ return reinterpret_cast<uint8_t*>(frame_) - (bufferUsed_ - offset);
+ return reinterpret_cast<uint8_t*>(frame_) + (offset - bufferUsed_);
+ }
+
+ inline JitFrameLayout* startFrame() {
+ return frame_;
+ }
+
+ BufferPointer<JitFrameLayout> topFrameAddress() {
+ return pointerAtStackOffset<JitFrameLayout>(0);
+ }
+
+ //
+ // This method should only be called when the builder is in a state where it is
+ // starting to construct the stack frame for the next callee. This means that
+ // the lowest value on the constructed stack is the return address for the previous
+ // caller frame.
+ //
+ // This method is used to compute the value of the frame pointer (e.g. ebp on x86)
+ // that would have been saved by the baseline jitcode when it was entered. In some
+ // cases, this value can be bogus since we can ensure that the caller would have saved
+ // it anyway.
+ //
+ void* calculatePrevFramePtr() {
+ // Get the incoming frame.
+ BufferPointer<JitFrameLayout> topFrame = topFrameAddress();
+ FrameType type = topFrame->prevType();
+
+ // For IonJS, IonAccessorIC and Entry frames, the "saved" frame pointer
+ // in the baseline frame is meaningless, since Ion saves all registers
+ // before calling other ion frames, and the entry frame saves all
+ // registers too.
+ if (type == JitFrame_IonJS || type == JitFrame_Entry || type == JitFrame_IonAccessorIC)
+ return nullptr;
+
+ // BaselineStub - Baseline calling into Ion.
+ // PrevFramePtr needs to point to the BaselineStubFrame's saved frame pointer.
+ // STACK_START_ADDR + JitFrameLayout::Size() + PREV_FRAME_SIZE
+ // - BaselineStubFrameLayout::reverseOffsetOfSavedFramePtr()
+ if (type == JitFrame_BaselineStub) {
+ size_t offset = JitFrameLayout::Size() + topFrame->prevFrameLocalSize() +
+ BaselineStubFrameLayout::reverseOffsetOfSavedFramePtr();
+ return virtualPointerAtStackOffset(offset);
+ }
+
+ MOZ_ASSERT(type == JitFrame_Rectifier);
+ // Rectifier - behaviour depends on the frame preceding the rectifier frame, and
+ // whether the arch is x86 or not. The x86 rectifier frame saves the frame pointer,
+ // so we can calculate it directly. For other archs, the previous frame pointer
+ // is stored on the stack in the frame that precedes the rectifier frame.
+ size_t priorOffset = JitFrameLayout::Size() + topFrame->prevFrameLocalSize();
+#if defined(JS_CODEGEN_X86)
+ // On X86, the FramePointer is pushed as the first value in the Rectifier frame.
+ MOZ_ASSERT(BaselineFrameReg == FramePointer);
+ priorOffset -= sizeof(void*);
+ return virtualPointerAtStackOffset(priorOffset);
+#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_X64)
+ // On X64, ARM, ARM64, and MIPS, the frame pointer save location depends on
+ // the caller of the rectifier frame.
+ BufferPointer<RectifierFrameLayout> priorFrame =
+ pointerAtStackOffset<RectifierFrameLayout>(priorOffset);
+ FrameType priorType = priorFrame->prevType();
+ MOZ_ASSERT(priorType == JitFrame_IonJS || priorType == JitFrame_BaselineStub);
+
+ // If the frame preceding the rectifier is an IonJS frame, then once again
+ // the frame pointer does not matter.
+ if (priorType == JitFrame_IonJS)
+ return nullptr;
+
+ // Otherwise, the frame preceding the rectifier is a BaselineStub frame.
+ // let X = STACK_START_ADDR + JitFrameLayout::Size() + PREV_FRAME_SIZE
+ // X + RectifierFrameLayout::Size()
+ // + ((RectifierFrameLayout*) X)->prevFrameLocalSize()
+ // - BaselineStubFrameLayout::reverseOffsetOfSavedFramePtr()
+ size_t extraOffset = RectifierFrameLayout::Size() + priorFrame->prevFrameLocalSize() +
+ BaselineStubFrameLayout::reverseOffsetOfSavedFramePtr();
+ return virtualPointerAtStackOffset(priorOffset + extraOffset);
+#elif defined(JS_CODEGEN_NONE)
+ MOZ_CRASH();
+#else
+# error "Bad architecture!"
+#endif
+ }
+
+ void setCheckGlobalDeclarationConflicts() {
+ header_->checkGlobalDeclarationConflicts = true;
+ }
+};
+
+// Ensure that all value locations are readable from the SnapshotIterator.
+// Remove RInstructionResults from the JitActivation if the frame got recovered
+// ahead of the bailout.
+class SnapshotIteratorForBailout : public SnapshotIterator
+{
+ JitActivation* activation_;
+ JitFrameIterator& iter_;
+
+ public:
+ SnapshotIteratorForBailout(JitActivation* activation, JitFrameIterator& iter)
+ : SnapshotIterator(iter, activation->bailoutData()->machineState()),
+ activation_(activation),
+ iter_(iter)
+ {
+ MOZ_ASSERT(iter.isBailoutJS());
+ }
+
+ ~SnapshotIteratorForBailout() {
+ // The bailout is complete, we no longer need the recover instruction
+ // results.
+ activation_->removeIonFrameRecovery(fp_);
+ }
+
+ // Take previously computed result out of the activation, or compute the
+ // results of all recover instructions contained in the snapshot.
+ MOZ_MUST_USE bool init(JSContext* cx) {
+
+ // Under a bailout, there is no need to invalidate the frame after
+ // evaluating the recover instruction, as the invalidation is only
+ // needed to cause of the frame which has been introspected.
+ MaybeReadFallback recoverBailout(cx, activation_, &iter_, MaybeReadFallback::Fallback_DoNothing);
+ return initInstructionResults(recoverBailout);
+ }
+};
+
+#ifdef DEBUG
+static inline bool
+IsInlinableFallback(ICFallbackStub* icEntry)
+{
+ return icEntry->isCall_Fallback() || icEntry->isGetProp_Fallback() ||
+ icEntry->isSetProp_Fallback();
+}
+#endif
+
+static inline void*
+GetStubReturnAddress(JSContext* cx, jsbytecode* pc)
+{
+ if (IsGetPropPC(pc))
+ return cx->compartment()->jitCompartment()->baselineGetPropReturnAddr();
+ if (IsSetPropPC(pc))
+ return cx->compartment()->jitCompartment()->baselineSetPropReturnAddr();
+ // This should be a call op of some kind, now.
+ MOZ_ASSERT(IsCallPC(pc));
+ return cx->compartment()->jitCompartment()->baselineCallReturnAddr(JSOp(*pc) == JSOP_NEW);
+}
+
+static inline jsbytecode*
+GetNextNonLoopEntryPc(jsbytecode* pc)
+{
+ JSOp op = JSOp(*pc);
+ if (op == JSOP_GOTO)
+ return pc + GET_JUMP_OFFSET(pc);
+ if (op == JSOP_LOOPENTRY || op == JSOP_NOP || op == JSOP_LOOPHEAD)
+ return GetNextPc(pc);
+ return pc;
+}
+
+static bool
+HasLiveIteratorAtStackDepth(JSScript* script, jsbytecode* pc, uint32_t stackDepth)
+{
+ if (!script->hasTrynotes())
+ return false;
+
+ JSTryNote* tn = script->trynotes()->vector;
+ JSTryNote* tnEnd = tn + script->trynotes()->length;
+ uint32_t pcOffset = uint32_t(pc - script->main());
+ for (; tn != tnEnd; ++tn) {
+ if (pcOffset < tn->start)
+ continue;
+ if (pcOffset >= tn->start + tn->length)
+ continue;
+
+ // For-in loops have only the iterator on stack.
+ if (tn->kind == JSTRY_FOR_IN && stackDepth == tn->stackDepth)
+ return true;
+
+ // For-of loops have both the iterator and the result object on
+ // stack. The iterator is below the result object.
+ if (tn->kind == JSTRY_FOR_OF && stackDepth == tn->stackDepth - 1)
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+IsPrologueBailout(const SnapshotIterator& iter, const ExceptionBailoutInfo* excInfo)
+{
+ // If we are propagating an exception for debug mode, we will not resume
+ // into baseline code, but instead into HandleExceptionBaseline (i.e.,
+ // never before the prologue).
+ return iter.pcOffset() == 0 && !iter.resumeAfter() &&
+ (!excInfo || !excInfo->propagatingIonExceptionForDebugMode());
+}
+
+// For every inline frame, we write out the following data:
+//
+// | ... |
+// +---------------+
+// | Descr(???) | --- Descr size here is (PREV_FRAME_SIZE)
+// +---------------+
+// | ReturnAddr |
+// -- +===============+ --- OVERWRITE STARTS HERE (START_STACK_ADDR)
+// | | PrevFramePtr |
+// | +-> +---------------+
+// | | | Baseline |
+// | | | Frame |
+// | | +---------------+
+// | | | Fixed0 |
+// | | +---------------+
+// +--< | | ... |
+// | | | +---------------+
+// | | | | FixedF |
+// | | | +---------------+
+// | | | | Stack0 |
+// | | | +---------------+
+// | | | | ... |
+// | | | +---------------+
+// | | | | StackS |
+// | -- | +---------------+ --- IF NOT LAST INLINE FRAME,
+// +------------| Descr(BLJS) | --- CALLING INFO STARTS HERE
+// | +---------------+
+// | | ReturnAddr | <-- return into main jitcode after IC
+// -- | +===============+
+// | | | StubPtr |
+// | | +---------------+
+// | +---| FramePtr |
+// | +---------------+ --- The inlined frame might OSR in Ion
+// | | Padding? | --- Thus the return address should be aligned.
+// | +---------------+
+// +--< | ArgA |
+// | | +---------------+
+// | | | ... |
+// | | +---------------+
+// | | | Arg0 |
+// | | +---------------+
+// | | | ThisV |
+// | -- +---------------+
+// | | ActualArgC |
+// | +---------------+
+// | | CalleeToken |
+// | +---------------+
+// +------------| Descr(BLStub) |
+// +---------------+
+// | ReturnAddr | <-- return into ICCall_Scripted IC
+// -- +===============+ --- IF CALLEE FORMAL ARGS > ActualArgC
+// | | Padding? |
+// | +---------------+
+// | | UndefinedU |
+// | +---------------+
+// | | ... |
+// | +---------------+
+// | | Undefined0 |
+// +--< +---------------+
+// | | | ArgA |
+// | | +---------------+
+// | | | ... |
+// | | +---------------+
+// | | | Arg0 |
+// | | +---------------+
+// | | | ThisV |
+// | -- +---------------+
+// | | ActualArgC |
+// | +---------------+
+// | | CalleeToken |
+// | +---------------+
+// +------------| Descr(Rect) |
+// +---------------+
+// | ReturnAddr | <-- return into ArgumentsRectifier after call
+// +===============+
+//
+static bool
+InitFromBailout(JSContext* cx, HandleScript caller, jsbytecode* callerPC,
+ HandleFunction fun, HandleScript script, IonScript* ionScript,
+ SnapshotIterator& iter, bool invalidate, BaselineStackBuilder& builder,
+ MutableHandle<GCVector<Value>> startFrameFormals, MutableHandleFunction nextCallee,
+ jsbytecode** callPC, const ExceptionBailoutInfo* excInfo)
+{
+ // The Baseline frames we will reconstruct on the heap are not rooted, so GC
+ // must be suppressed here.
+ MOZ_ASSERT(cx->mainThread().suppressGC);
+
+ MOZ_ASSERT(script->hasBaselineScript());
+
+ // Are we catching an exception?
+ bool catchingException = excInfo && excInfo->catchingException();
+
+ // If we are catching an exception, we are bailing out to a catch or
+ // finally block and this is the frame where we will resume. Usually the
+ // expression stack should be empty in this case but there can be
+ // iterators on the stack.
+ uint32_t exprStackSlots;
+ if (catchingException)
+ exprStackSlots = excInfo->numExprSlots();
+ else
+ exprStackSlots = iter.numAllocations() - (script->nfixed() + CountArgSlots(script, fun));
+
+ builder.resetFramePushed();
+
+ // Build first baseline frame:
+ // +===============+
+ // | PrevFramePtr |
+ // +---------------+
+ // | Baseline |
+ // | Frame |
+ // +---------------+
+ // | Fixed0 |
+ // +---------------+
+ // | ... |
+ // +---------------+
+ // | FixedF |
+ // +---------------+
+ // | Stack0 |
+ // +---------------+
+ // | ... |
+ // +---------------+
+ // | StackS |
+ // +---------------+ --- IF NOT LAST INLINE FRAME,
+ // | Descr(BLJS) | --- CALLING INFO STARTS HERE
+ // +---------------+
+ // | ReturnAddr | <-- return into main jitcode after IC
+ // +===============+
+
+ JitSpew(JitSpew_BaselineBailouts, " Unpacking %s:%" PRIuSIZE, script->filename(), script->lineno());
+ JitSpew(JitSpew_BaselineBailouts, " [BASELINE-JS FRAME]");
+
+ // Calculate and write the previous frame pointer value.
+ // Record the virtual stack offset at this location. Later on, if we end up
+ // writing out a BaselineStub frame for the next callee, we'll need to save the
+ // address.
+ void* prevFramePtr = builder.calculatePrevFramePtr();
+ if (!builder.writePtr(prevFramePtr, "PrevFramePtr"))
+ return false;
+ prevFramePtr = builder.virtualPointerAtStackOffset(0);
+
+ // Write struct BaselineFrame.
+ if (!builder.subtract(BaselineFrame::Size(), "BaselineFrame"))
+ return false;
+ BufferPointer<BaselineFrame> blFrame = builder.pointerAtStackOffset<BaselineFrame>(0);
+
+ uint32_t flags = 0;
+
+ // If we are bailing to a script whose execution is observed, mark the
+ // baseline frame as a debuggee frame. This is to cover the case where we
+ // don't rematerialize the Ion frame via the Debugger.
+ if (script->isDebuggee())
+ flags |= BaselineFrame::DEBUGGEE;
+
+ // Initialize BaselineFrame's envChain and argsObj
+ JSObject* envChain = nullptr;
+ Value returnValue;
+ ArgumentsObject* argsObj = nullptr;
+ BailoutKind bailoutKind = iter.bailoutKind();
+ if (bailoutKind == Bailout_ArgumentCheck) {
+ // Temporary hack -- skip the (unused) envChain, because it could be
+ // bogus (we can fail before the env chain slot is set). Strip the
+ // hasEnvironmentChain flag and this will be fixed up later in
+ // |FinishBailoutToBaseline|, which calls
+ // |EnsureHasEnvironmentObjects|.
+ JitSpew(JitSpew_BaselineBailouts, " Bailout_ArgumentCheck! (no valid envChain)");
+ iter.skip();
+
+ // skip |return value|
+ iter.skip();
+ returnValue = UndefinedValue();
+
+ // Scripts with |argumentsHasVarBinding| have an extra slot.
+ if (script->argumentsHasVarBinding()) {
+ JitSpew(JitSpew_BaselineBailouts,
+ " Bailout_ArgumentCheck for script with argumentsHasVarBinding!"
+ "Using empty arguments object");
+ iter.skip();
+ }
+ } else {
+ Value v = iter.read();
+ if (v.isObject()) {
+ envChain = &v.toObject();
+ if (fun &&
+ ((fun->needsCallObject() && envChain->is<CallObject>()) ||
+ (fun->needsNamedLambdaEnvironment() &&
+ !fun->needsCallObject() &&
+ envChain->is<LexicalEnvironmentObject>() &&
+ &envChain->as<LexicalEnvironmentObject>().scope() ==
+ script->maybeNamedLambdaScope())))
+ {
+ MOZ_ASSERT(!fun->needsExtraBodyVarEnvironment());
+ flags |= BaselineFrame::HAS_INITIAL_ENV;
+ }
+ } else {
+ MOZ_ASSERT(v.isUndefined() || v.isMagic(JS_OPTIMIZED_OUT));
+
+ // Get env chain from function or script.
+ if (fun) {
+ // If pcOffset == 0, we may have to push a new call object, so
+ // we leave envChain nullptr and enter baseline code before
+ // the prologue.
+ if (!IsPrologueBailout(iter, excInfo))
+ envChain = fun->environment();
+ } else if (script->module()) {
+ envChain = script->module()->environment();
+ } else {
+ // For global scripts without a non-syntactic env the env
+ // chain is the script's global lexical environment (Ion does
+ // not compile scripts with a non-syntactic global scope).
+ // Also note that it's invalid to resume into the prologue in
+ // this case because the prologue expects the env chain in R1
+ // for eval and global scripts.
+ MOZ_ASSERT(!script->isForEval());
+ MOZ_ASSERT(!script->hasNonSyntacticScope());
+ envChain = &(script->global().lexicalEnvironment());
+
+ // We have possibly bailed out before Ion could do the global
+ // declaration conflicts check. Since it's invalid to resume
+ // into the prologue, set a flag so FinishBailoutToBaseline
+ // can do the conflict check.
+ if (IsPrologueBailout(iter, excInfo))
+ builder.setCheckGlobalDeclarationConflicts();
+ }
+ }
+
+ // Make sure to add HAS_RVAL to flags here because setFlags() below
+ // will clobber it.
+ returnValue = iter.read();
+ flags |= BaselineFrame::HAS_RVAL;
+
+ // If script maybe has an arguments object, the third slot will hold it.
+ if (script->argumentsHasVarBinding()) {
+ v = iter.read();
+ MOZ_ASSERT(v.isObject() || v.isUndefined() || v.isMagic(JS_OPTIMIZED_OUT));
+ if (v.isObject())
+ argsObj = &v.toObject().as<ArgumentsObject>();
+ }
+ }
+ JitSpew(JitSpew_BaselineBailouts, " EnvChain=%p", envChain);
+ blFrame->setEnvironmentChain(envChain);
+ JitSpew(JitSpew_BaselineBailouts, " ReturnValue=%016" PRIx64, *((uint64_t*) &returnValue));
+ blFrame->setReturnValue(returnValue);
+
+ // Do not need to initialize scratchValue field in BaselineFrame.
+ blFrame->setFlags(flags);
+
+ // initArgsObjUnchecked modifies the frame's flags, so call it after setFlags.
+ if (argsObj)
+ blFrame->initArgsObjUnchecked(*argsObj);
+
+ if (fun) {
+ // The unpacked thisv and arguments should overwrite the pushed args present
+ // in the calling frame.
+ Value thisv = iter.read();
+ JitSpew(JitSpew_BaselineBailouts, " Is function!");
+ JitSpew(JitSpew_BaselineBailouts, " thisv=%016" PRIx64, *((uint64_t*) &thisv));
+
+ size_t thisvOffset = builder.framePushed() + JitFrameLayout::offsetOfThis();
+ builder.valuePointerAtStackOffset(thisvOffset).set(thisv);
+
+ MOZ_ASSERT(iter.numAllocations() >= CountArgSlots(script, fun));
+ JitSpew(JitSpew_BaselineBailouts, " frame slots %u, nargs %" PRIuSIZE ", nfixed %" PRIuSIZE,
+ iter.numAllocations(), fun->nargs(), script->nfixed());
+
+ if (!callerPC) {
+ // This is the first frame. Store the formals in a Vector until we
+ // are done. Due to UCE and phi elimination, we could store an
+ // UndefinedValue() here for formals we think are unused, but
+ // locals may still reference the original argument slot
+ // (MParameter/LArgument) and expect the original Value.
+ MOZ_ASSERT(startFrameFormals.empty());
+ if (!startFrameFormals.resize(fun->nargs()))
+ return false;
+ }
+
+ for (uint32_t i = 0; i < fun->nargs(); i++) {
+ Value arg = iter.read();
+ JitSpew(JitSpew_BaselineBailouts, " arg %d = %016" PRIx64,
+ (int) i, *((uint64_t*) &arg));
+ if (callerPC) {
+ size_t argOffset = builder.framePushed() + JitFrameLayout::offsetOfActualArg(i);
+ builder.valuePointerAtStackOffset(argOffset).set(arg);
+ } else {
+ startFrameFormals[i].set(arg);
+ }
+ }
+ }
+
+ for (uint32_t i = 0; i < script->nfixed(); i++) {
+ Value slot = iter.read();
+ if (!builder.writeValue(slot, "FixedValue"))
+ return false;
+ }
+
+ // Get the pc. If we are handling an exception, resume at the pc of the
+ // catch or finally block.
+ jsbytecode* pc = catchingException ? excInfo->resumePC() : script->offsetToPC(iter.pcOffset());
+ bool resumeAfter = catchingException ? false : iter.resumeAfter();
+
+ // When pgo is enabled, increment the counter of the block in which we
+ // resume, as Ion does not keep track of the code coverage.
+ //
+ // We need to do that when pgo is enabled, as after a specific number of
+ // FirstExecution bailouts, we invalidate and recompile the script with
+ // IonMonkey. Failing to increment the counter of the current basic block
+ // might lead to repeated bailouts and invalidations.
+ if (!JitOptions.disablePgo && script->hasScriptCounts())
+ script->incHitCount(pc);
+
+ JSOp op = JSOp(*pc);
+
+ // Fixup inlined JSOP_FUNCALL, JSOP_FUNAPPLY, and accessors on the caller side.
+ // On the caller side this must represent like the function wasn't inlined.
+ uint32_t pushedSlots = 0;
+ AutoValueVector savedCallerArgs(cx);
+ bool needToSaveArgs = op == JSOP_FUNAPPLY || IsGetPropPC(pc) || IsSetPropPC(pc);
+ if (iter.moreFrames() && (op == JSOP_FUNCALL || needToSaveArgs))
+ {
+ uint32_t inlined_args = 0;
+ if (op == JSOP_FUNCALL)
+ inlined_args = 2 + GET_ARGC(pc) - 1;
+ else if (op == JSOP_FUNAPPLY)
+ inlined_args = 2 + blFrame->numActualArgs();
+ else
+ inlined_args = 2 + IsSetPropPC(pc);
+
+ MOZ_ASSERT(exprStackSlots >= inlined_args);
+ pushedSlots = exprStackSlots - inlined_args;
+
+ JitSpew(JitSpew_BaselineBailouts,
+ " pushing %u expression stack slots before fixup",
+ pushedSlots);
+ for (uint32_t i = 0; i < pushedSlots; i++) {
+ Value v = iter.read();
+ if (!builder.writeValue(v, "StackValue"))
+ return false;
+ }
+
+ if (op == JSOP_FUNCALL) {
+ // When funcall got inlined and the native js_fun_call was bypassed,
+ // the stack state is incorrect. To restore correctly it must look like
+ // js_fun_call was actually called. This means transforming the stack
+ // from |target, this, args| to |js_fun_call, target, this, args|
+ // The js_fun_call is never read, so just pushing undefined now.
+ JitSpew(JitSpew_BaselineBailouts, " pushing undefined to fixup funcall");
+ if (!builder.writeValue(UndefinedValue(), "StackValue"))
+ return false;
+ }
+
+ if (needToSaveArgs) {
+ // When an accessor is inlined, the whole thing is a lie. There
+ // should never have been a call there. Fix the caller's stack to
+ // forget it ever happened.
+
+ // When funapply gets inlined we take all arguments out of the
+ // arguments array. So the stack state is incorrect. To restore
+ // correctly it must look like js_fun_apply was actually called.
+ // This means transforming the stack from |target, this, arg1, ...|
+ // to |js_fun_apply, target, this, argObject|.
+ // Since the information is never read, we can just push undefined
+ // for all values.
+ if (op == JSOP_FUNAPPLY) {
+ JitSpew(JitSpew_BaselineBailouts, " pushing 4x undefined to fixup funapply");
+ if (!builder.writeValue(UndefinedValue(), "StackValue"))
+ return false;
+ if (!builder.writeValue(UndefinedValue(), "StackValue"))
+ return false;
+ if (!builder.writeValue(UndefinedValue(), "StackValue"))
+ return false;
+ if (!builder.writeValue(UndefinedValue(), "StackValue"))
+ return false;
+ }
+ // Save the actual arguments. They are needed on the callee side
+ // as the arguments. Else we can't recover them.
+ if (!savedCallerArgs.resize(inlined_args))
+ return false;
+ for (uint32_t i = 0; i < inlined_args; i++)
+ savedCallerArgs[i].set(iter.read());
+
+ if (IsSetPropPC(pc)) {
+ // We would love to just save all the arguments and leave them
+ // in the stub frame pushed below, but we will lose the inital
+ // argument which the function was called with, which we must
+ // return to the caller, even if the setter internally modifies
+ // its arguments. Stash the initial argument on the stack, to be
+ // later retrieved by the SetProp_Fallback stub.
+ Value initialArg = savedCallerArgs[inlined_args - 1];
+ JitSpew(JitSpew_BaselineBailouts, " pushing setter's initial argument");
+ if (!builder.writeValue(initialArg, "StackValue"))
+ return false;
+ }
+ pushedSlots = exprStackSlots;
+ }
+ }
+
+ JitSpew(JitSpew_BaselineBailouts, " pushing %u expression stack slots",
+ exprStackSlots - pushedSlots);
+ for (uint32_t i = pushedSlots; i < exprStackSlots; i++) {
+ Value v;
+
+ if (!iter.moreFrames() && i == exprStackSlots - 1 &&
+ cx->runtime()->jitRuntime()->hasIonReturnOverride())
+ {
+ // If coming from an invalidation bailout, and this is the topmost
+ // value, and a value override has been specified, don't read from the
+ // iterator. Otherwise, we risk using a garbage value.
+ MOZ_ASSERT(invalidate);
+ iter.skip();
+ JitSpew(JitSpew_BaselineBailouts, " [Return Override]");
+ v = cx->runtime()->jitRuntime()->takeIonReturnOverride();
+ } else if (excInfo && excInfo->propagatingIonExceptionForDebugMode()) {
+ // If we are in the middle of propagating an exception from Ion by
+ // bailing to baseline due to debug mode, we might not have all
+ // the stack if we are at the newest frame.
+ //
+ // For instance, if calling |f()| pushed an Ion frame which threw,
+ // the snapshot expects the return value to be pushed, but it's
+ // possible nothing was pushed before we threw. We can't drop
+ // iterators, however, so read them out. They will be closed by
+ // HandleExceptionBaseline.
+ MOZ_ASSERT(cx->compartment()->isDebuggee());
+ if (iter.moreFrames() || HasLiveIteratorAtStackDepth(script, pc, i + 1)) {
+ v = iter.read();
+ } else {
+ iter.skip();
+ v = MagicValue(JS_OPTIMIZED_OUT);
+ }
+ } else {
+ v = iter.read();
+ }
+ if (!builder.writeValue(v, "StackValue"))
+ return false;
+ }
+
+ // BaselineFrame::frameSize is the size of everything pushed since
+ // the builder.resetFramePushed() call.
+ uint32_t frameSize = builder.framePushed();
+ blFrame->setFrameSize(frameSize);
+ JitSpew(JitSpew_BaselineBailouts, " FrameSize=%u", frameSize);
+
+ // numValueSlots() is based on the frame size, do some sanity checks.
+ MOZ_ASSERT(blFrame->numValueSlots() >= script->nfixed());
+ MOZ_ASSERT(blFrame->numValueSlots() <= script->nslots());
+
+ // If we are resuming at a LOOPENTRY op, resume at the next op to avoid
+ // a bailout -> enter Ion -> bailout loop with --ion-eager. See also
+ // ThunkToInterpreter.
+ //
+ // The algorithm below is the "tortoise and the hare" algorithm. See bug
+ // 994444 for more explanation.
+ if (!resumeAfter) {
+ jsbytecode* fasterPc = pc;
+ while (true) {
+ pc = GetNextNonLoopEntryPc(pc);
+ fasterPc = GetNextNonLoopEntryPc(GetNextNonLoopEntryPc(fasterPc));
+ if (fasterPc == pc)
+ break;
+ }
+ op = JSOp(*pc);
+ }
+
+ uint32_t pcOff = script->pcToOffset(pc);
+ bool isCall = IsCallPC(pc);
+ BaselineScript* baselineScript = script->baselineScript();
+
+#ifdef DEBUG
+ uint32_t expectedDepth;
+ bool reachablePC;
+ if (!ReconstructStackDepth(cx, script, resumeAfter ? GetNextPc(pc) : pc, &expectedDepth, &reachablePC))
+ return false;
+
+ if (reachablePC) {
+ if (op != JSOP_FUNAPPLY || !iter.moreFrames() || resumeAfter) {
+ if (op == JSOP_FUNCALL) {
+ // For fun.call(this, ...); the reconstructStackDepth will
+ // include the this. When inlining that is not included.
+ // So the exprStackSlots will be one less.
+ MOZ_ASSERT(expectedDepth - exprStackSlots <= 1);
+ } else if (iter.moreFrames() && (IsGetPropPC(pc) || IsSetPropPC(pc))) {
+ // Accessors coming out of ion are inlined via a complete
+ // lie perpetrated by the compiler internally. Ion just rearranges
+ // the stack, and pretends that it looked like a call all along.
+ // This means that the depth is actually one *more* than expected
+ // by the interpreter, as there is now a JSFunction, |this| and [arg],
+ // rather than the expected |this| and [arg]
+ // Note that none of that was pushed, but it's still reflected
+ // in exprStackSlots.
+ MOZ_ASSERT(exprStackSlots - expectedDepth == 1);
+ } else {
+ // For fun.apply({}, arguments) the reconstructStackDepth will
+ // have stackdepth 4, but it could be that we inlined the
+ // funapply. In that case exprStackSlots, will have the real
+ // arguments in the slots and not be 4.
+ MOZ_ASSERT(exprStackSlots == expectedDepth);
+ }
+ }
+ }
+#endif
+
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_BaselineBailouts, " Resuming %s pc offset %d (op %s) (line %d) of %s:%" PRIuSIZE,
+ resumeAfter ? "after" : "at", (int) pcOff, CodeName[op],
+ PCToLineNumber(script, pc), script->filename(), script->lineno());
+ JitSpew(JitSpew_BaselineBailouts, " Bailout kind: %s",
+ BailoutKindString(bailoutKind));
+#endif
+
+ bool pushedNewTarget = op == JSOP_NEW;
+
+ // If this was the last inline frame, or we are bailing out to a catch or
+ // finally block in this frame, then unpacking is almost done.
+ if (!iter.moreFrames() || catchingException) {
+ // Last frame, so PC for call to next frame is set to nullptr.
+ *callPC = nullptr;
+
+ // If the bailout was a resumeAfter, and the opcode is monitored,
+ // then the bailed out state should be in a position to enter
+ // into the ICTypeMonitor chain for the op.
+ bool enterMonitorChain = false;
+ if (resumeAfter && (CodeSpec[op].format & JOF_TYPESET)) {
+ // Not every monitored op has a monitored fallback stub, e.g.
+ // JSOP_NEWOBJECT, which always returns the same type for a
+ // particular script/pc location.
+ BaselineICEntry& icEntry = baselineScript->icEntryFromPCOffset(pcOff);
+ ICFallbackStub* fallbackStub = icEntry.firstStub()->getChainFallback();
+ if (fallbackStub->isMonitoredFallback())
+ enterMonitorChain = true;
+ }
+
+ uint32_t numCallArgs = isCall ? GET_ARGC(pc) : 0;
+
+ if (resumeAfter && !enterMonitorChain)
+ pc = GetNextPc(pc);
+
+ builder.setResumePC(pc);
+ builder.setResumeFramePtr(prevFramePtr);
+
+ if (enterMonitorChain) {
+ BaselineICEntry& icEntry = baselineScript->icEntryFromPCOffset(pcOff);
+ ICFallbackStub* fallbackStub = icEntry.firstStub()->getChainFallback();
+ MOZ_ASSERT(fallbackStub->isMonitoredFallback());
+ JitSpew(JitSpew_BaselineBailouts, " [TYPE-MONITOR CHAIN]");
+ ICMonitoredFallbackStub* monFallbackStub = fallbackStub->toMonitoredFallbackStub();
+ ICStub* firstMonStub = monFallbackStub->fallbackMonitorStub()->firstMonitorStub();
+
+ // To enter a monitoring chain, we load the top stack value into R0
+ JitSpew(JitSpew_BaselineBailouts, " Popping top stack value into R0.");
+ builder.popValueInto(PCMappingSlotInfo::SlotInR0);
+
+ // Need to adjust the frameSize for the frame to match the values popped
+ // into registers.
+ frameSize -= sizeof(Value);
+ blFrame->setFrameSize(frameSize);
+ JitSpew(JitSpew_BaselineBailouts, " Adjusted framesize -= %d: %d",
+ (int) sizeof(Value), (int) frameSize);
+
+ // If resuming into a JSOP_CALL, baseline keeps the arguments on the
+ // stack and pops them only after returning from the call IC.
+ // Push undefs onto the stack in anticipation of the popping of the
+ // callee, thisv, and actual arguments passed from the caller's frame.
+ if (isCall) {
+ if (!builder.writeValue(UndefinedValue(), "CallOp FillerCallee"))
+ return false;
+ if (!builder.writeValue(UndefinedValue(), "CallOp FillerThis"))
+ return false;
+ for (uint32_t i = 0; i < numCallArgs; i++) {
+ if (!builder.writeValue(UndefinedValue(), "CallOp FillerArg"))
+ return false;
+ }
+ if (pushedNewTarget) {
+ if (!builder.writeValue(UndefinedValue(), "CallOp FillerNewTarget"))
+ return false;
+ }
+
+ frameSize += (numCallArgs + 2 + pushedNewTarget) * sizeof(Value);
+ blFrame->setFrameSize(frameSize);
+ JitSpew(JitSpew_BaselineBailouts, " Adjusted framesize += %d: %d",
+ (int) ((numCallArgs + 2 + pushedNewTarget) * sizeof(Value)),
+ (int) frameSize);
+ }
+
+ // Set the resume address to the return point from the IC, and set
+ // the monitor stub addr.
+ builder.setResumeAddr(baselineScript->returnAddressForIC(icEntry));
+ builder.setMonitorStub(firstMonStub);
+ JitSpew(JitSpew_BaselineBailouts, " Set resumeAddr=%p monitorStub=%p",
+ baselineScript->returnAddressForIC(icEntry), firstMonStub);
+
+ } else {
+ // If needed, initialize BaselineBailoutInfo's valueR0 and/or valueR1 with the
+ // top stack values.
+ //
+ // Note that we use the 'maybe' variant of nativeCodeForPC because
+ // of exception propagation for debug mode. See note below.
+ PCMappingSlotInfo slotInfo;
+ uint8_t* nativeCodeForPC;
+
+ if (excInfo && excInfo->propagatingIonExceptionForDebugMode()) {
+ // When propagating an exception for debug mode, set the
+ // resume pc to the throwing pc, so that Debugger hooks report
+ // the correct pc offset of the throwing op instead of its
+ // successor (this pc will be used as the BaselineFrame's
+ // override pc).
+ //
+ // Note that we never resume at this pc, it is set for the sake
+ // of frame iterators giving the correct answer.
+ jsbytecode* throwPC = script->offsetToPC(iter.pcOffset());
+ builder.setResumePC(throwPC);
+ nativeCodeForPC = baselineScript->nativeCodeForPC(script, throwPC);
+ } else {
+ nativeCodeForPC = baselineScript->nativeCodeForPC(script, pc, &slotInfo);
+ }
+ MOZ_ASSERT(nativeCodeForPC);
+
+ unsigned numUnsynced = slotInfo.numUnsynced();
+
+ MOZ_ASSERT(numUnsynced <= 2);
+ PCMappingSlotInfo::SlotLocation loc1, loc2;
+ if (numUnsynced > 0) {
+ loc1 = slotInfo.topSlotLocation();
+ JitSpew(JitSpew_BaselineBailouts, " Popping top stack value into %d.",
+ (int) loc1);
+ builder.popValueInto(loc1);
+ }
+ if (numUnsynced > 1) {
+ loc2 = slotInfo.nextSlotLocation();
+ JitSpew(JitSpew_BaselineBailouts, " Popping next stack value into %d.",
+ (int) loc2);
+ MOZ_ASSERT_IF(loc1 != PCMappingSlotInfo::SlotIgnore, loc1 != loc2);
+ builder.popValueInto(loc2);
+ }
+
+ // Need to adjust the frameSize for the frame to match the values popped
+ // into registers.
+ frameSize -= sizeof(Value) * numUnsynced;
+ blFrame->setFrameSize(frameSize);
+ JitSpew(JitSpew_BaselineBailouts, " Adjusted framesize -= %d: %d",
+ int(sizeof(Value) * numUnsynced), int(frameSize));
+
+ // If envChain is nullptr, then bailout is occurring during argument check.
+ // In this case, resume into the prologue.
+ uint8_t* opReturnAddr;
+ if (envChain == nullptr) {
+ // Global and eval scripts expect the env chain in R1, so only
+ // resume into the prologue for function scripts.
+ MOZ_ASSERT(fun);
+ MOZ_ASSERT(numUnsynced == 0);
+ opReturnAddr = baselineScript->prologueEntryAddr();
+ JitSpew(JitSpew_BaselineBailouts, " Resuming into prologue.");
+
+ } else {
+ opReturnAddr = nativeCodeForPC;
+ }
+ builder.setResumeAddr(opReturnAddr);
+ JitSpew(JitSpew_BaselineBailouts, " Set resumeAddr=%p", opReturnAddr);
+ }
+
+ if (cx->runtime()->spsProfiler.enabled()) {
+ // Register bailout with profiler.
+ const char* filename = script->filename();
+ if (filename == nullptr)
+ filename = "<unknown>";
+ unsigned len = strlen(filename) + 200;
+ char* buf = js_pod_malloc<char>(len);
+ if (buf == nullptr) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ snprintf(buf, len, "%s %s %s on line %u of %s:%" PRIuSIZE,
+ BailoutKindString(bailoutKind),
+ resumeAfter ? "after" : "at",
+ CodeName[op],
+ PCToLineNumber(script, pc),
+ filename,
+ script->lineno());
+ cx->runtime()->spsProfiler.markEvent(buf);
+ js_free(buf);
+ }
+
+ return true;
+ }
+
+ *callPC = pc;
+
+ // Write out descriptor of BaselineJS frame.
+ size_t baselineFrameDescr = MakeFrameDescriptor((uint32_t) builder.framePushed(),
+ JitFrame_BaselineJS,
+ BaselineStubFrameLayout::Size());
+ if (!builder.writeWord(baselineFrameDescr, "Descriptor"))
+ return false;
+
+ // Calculate and write out return address.
+ // The icEntry in question MUST have an inlinable fallback stub.
+ BaselineICEntry& icEntry = baselineScript->icEntryFromPCOffset(pcOff);
+ MOZ_ASSERT(IsInlinableFallback(icEntry.firstStub()->getChainFallback()));
+ if (!builder.writePtr(baselineScript->returnAddressForIC(icEntry), "ReturnAddr"))
+ return false;
+
+ // Build baseline stub frame:
+ // +===============+
+ // | StubPtr |
+ // +---------------+
+ // | FramePtr |
+ // +---------------+
+ // | Padding? |
+ // +---------------+
+ // | ArgA |
+ // +---------------+
+ // | ... |
+ // +---------------+
+ // | Arg0 |
+ // +---------------+
+ // | ThisV |
+ // +---------------+
+ // | ActualArgC |
+ // +---------------+
+ // | CalleeToken |
+ // +---------------+
+ // | Descr(BLStub) |
+ // +---------------+
+ // | ReturnAddr |
+ // +===============+
+
+ JitSpew(JitSpew_BaselineBailouts, " [BASELINE-STUB FRAME]");
+
+ size_t startOfBaselineStubFrame = builder.framePushed();
+
+ // Write stub pointer.
+ MOZ_ASSERT(IsInlinableFallback(icEntry.fallbackStub()));
+ if (!builder.writePtr(icEntry.fallbackStub(), "StubPtr"))
+ return false;
+
+ // Write previous frame pointer (saved earlier).
+ if (!builder.writePtr(prevFramePtr, "PrevFramePtr"))
+ return false;
+ prevFramePtr = builder.virtualPointerAtStackOffset(0);
+
+ // Write out actual arguments (and thisv), copied from unpacked stack of BaselineJS frame.
+ // Arguments are reversed on the BaselineJS frame's stack values.
+ MOZ_ASSERT(IsIonInlinablePC(pc));
+ unsigned actualArgc;
+ Value callee;
+ if (needToSaveArgs) {
+ // For FUNAPPLY or an accessor, the arguments are not on the stack anymore,
+ // but they are copied in a vector and are written here.
+ if (op == JSOP_FUNAPPLY)
+ actualArgc = blFrame->numActualArgs();
+ else
+ actualArgc = IsSetPropPC(pc);
+ callee = savedCallerArgs[0];
+
+ // Align the stack based on the number of arguments.
+ size_t afterFrameSize = (actualArgc + 1) * sizeof(Value) + JitFrameLayout::Size();
+ if (!builder.maybeWritePadding(JitStackAlignment, afterFrameSize, "Padding"))
+ return false;
+
+ // Push arguments.
+ MOZ_ASSERT(actualArgc + 2 <= exprStackSlots);
+ MOZ_ASSERT(savedCallerArgs.length() == actualArgc + 2);
+ for (unsigned i = 0; i < actualArgc + 1; i++) {
+ size_t arg = savedCallerArgs.length() - (i + 1);
+ if (!builder.writeValue(savedCallerArgs[arg], "ArgVal"))
+ return false;
+ }
+ } else {
+ actualArgc = GET_ARGC(pc);
+ if (op == JSOP_FUNCALL) {
+ MOZ_ASSERT(actualArgc > 0);
+ actualArgc--;
+ }
+
+ // Align the stack based on the number of arguments.
+ size_t afterFrameSize = (actualArgc + 1 + pushedNewTarget) * sizeof(Value) +
+ JitFrameLayout::Size();
+ if (!builder.maybeWritePadding(JitStackAlignment, afterFrameSize, "Padding"))
+ return false;
+
+ // Copy the arguments and |this| from the BaselineFrame, in reverse order.
+ size_t valueSlot = blFrame->numValueSlots() - 1;
+ size_t calleeSlot = valueSlot - actualArgc - 1 - pushedNewTarget;
+
+ for (size_t i = valueSlot; i > calleeSlot; i--) {
+ Value v = *blFrame->valueSlot(i);
+ if (!builder.writeValue(v, "ArgVal"))
+ return false;
+ }
+
+ callee = *blFrame->valueSlot(calleeSlot);
+ }
+
+ // In case these arguments need to be copied on the stack again for a rectifier frame,
+ // save the framePushed values here for later use.
+ size_t endOfBaselineStubArgs = builder.framePushed();
+
+ // Calculate frame size for descriptor.
+ size_t baselineStubFrameSize = builder.framePushed() - startOfBaselineStubFrame;
+ size_t baselineStubFrameDescr = MakeFrameDescriptor((uint32_t) baselineStubFrameSize,
+ JitFrame_BaselineStub,
+ JitFrameLayout::Size());
+
+ // Push actual argc
+ if (!builder.writeWord(actualArgc, "ActualArgc"))
+ return false;
+
+ // Push callee token (must be a JS Function)
+ JitSpew(JitSpew_BaselineBailouts, " Callee = %016" PRIx64, callee.asRawBits());
+
+ JSFunction* calleeFun = &callee.toObject().as<JSFunction>();
+ if (!builder.writePtr(CalleeToToken(calleeFun, JSOp(*pc) == JSOP_NEW), "CalleeToken"))
+ return false;
+ nextCallee.set(calleeFun);
+
+ // Push BaselineStub frame descriptor
+ if (!builder.writeWord(baselineStubFrameDescr, "Descriptor"))
+ return false;
+
+ // Push return address into ICCall_Scripted stub, immediately after the call.
+ void* baselineCallReturnAddr = GetStubReturnAddress(cx, pc);
+ MOZ_ASSERT(baselineCallReturnAddr);
+ if (!builder.writePtr(baselineCallReturnAddr, "ReturnAddr"))
+ return false;
+ MOZ_ASSERT(builder.framePushed() % JitStackAlignment == 0);
+
+ // If actualArgc >= fun->nargs, then we are done. Otherwise, we need to push on
+ // a reconstructed rectifier frame.
+ if (actualArgc >= calleeFun->nargs())
+ return true;
+
+ // Push a reconstructed rectifier frame.
+ // +===============+
+ // | Padding? |
+ // +---------------+
+ // | UndefinedU |
+ // +---------------+
+ // | ... |
+ // +---------------+
+ // | Undefined0 |
+ // +---------------+
+ // | ArgA |
+ // +---------------+
+ // | ... |
+ // +---------------+
+ // | Arg0 |
+ // +---------------+
+ // | ThisV |
+ // +---------------+
+ // | ActualArgC |
+ // +---------------+
+ // | CalleeToken |
+ // +---------------+
+ // | Descr(Rect) |
+ // +---------------+
+ // | ReturnAddr |
+ // +===============+
+
+ JitSpew(JitSpew_BaselineBailouts, " [RECTIFIER FRAME]");
+
+ size_t startOfRectifierFrame = builder.framePushed();
+
+ // On x86-only, the frame pointer is saved again in the rectifier frame.
+#if defined(JS_CODEGEN_X86)
+ if (!builder.writePtr(prevFramePtr, "PrevFramePtr-X86Only"))
+ return false;
+ // Follow the same logic as in JitRuntime::generateArgumentsRectifier.
+ prevFramePtr = builder.virtualPointerAtStackOffset(0);
+ if (!builder.writePtr(prevFramePtr, "Padding-X86Only"))
+ return false;
+#endif
+
+ // Align the stack based on the number of arguments.
+ size_t afterFrameSize = (calleeFun->nargs() + 1 + pushedNewTarget) * sizeof(Value) +
+ RectifierFrameLayout::Size();
+ if (!builder.maybeWritePadding(JitStackAlignment, afterFrameSize, "Padding"))
+ return false;
+
+ // Copy new.target, if necessary.
+ if (pushedNewTarget) {
+ size_t newTargetOffset = (builder.framePushed() - endOfBaselineStubArgs) +
+ (actualArgc + 1) * sizeof(Value);
+ Value newTargetValue = *builder.valuePointerAtStackOffset(newTargetOffset);
+ if (!builder.writeValue(newTargetValue, "CopiedNewTarget"))
+ return false;
+ }
+
+ // Push undefined for missing arguments.
+ for (unsigned i = 0; i < (calleeFun->nargs() - actualArgc); i++) {
+ if (!builder.writeValue(UndefinedValue(), "FillerVal"))
+ return false;
+ }
+
+ // Copy arguments + thisv from BaselineStub frame.
+ if (!builder.subtract((actualArgc + 1) * sizeof(Value), "CopiedArgs"))
+ return false;
+ BufferPointer<uint8_t> stubArgsEnd =
+ builder.pointerAtStackOffset<uint8_t>(builder.framePushed() - endOfBaselineStubArgs);
+ JitSpew(JitSpew_BaselineBailouts, " MemCpy from %p", stubArgsEnd.get());
+ memcpy(builder.pointerAtStackOffset<uint8_t>(0).get(), stubArgsEnd.get(),
+ (actualArgc + 1) * sizeof(Value));
+
+ // Calculate frame size for descriptor.
+ size_t rectifierFrameSize = builder.framePushed() - startOfRectifierFrame;
+ size_t rectifierFrameDescr = MakeFrameDescriptor((uint32_t) rectifierFrameSize,
+ JitFrame_Rectifier,
+ JitFrameLayout::Size());
+
+ // Push actualArgc
+ if (!builder.writeWord(actualArgc, "ActualArgc"))
+ return false;
+
+ // Push calleeToken again.
+ if (!builder.writePtr(CalleeToToken(calleeFun, JSOp(*pc) == JSOP_NEW), "CalleeToken"))
+ return false;
+
+ // Push rectifier frame descriptor
+ if (!builder.writeWord(rectifierFrameDescr, "Descriptor"))
+ return false;
+
+ // Push return address into the ArgumentsRectifier code, immediately after the ioncode
+ // call.
+ void* rectReturnAddr = cx->runtime()->jitRuntime()->getArgumentsRectifierReturnAddr();
+ MOZ_ASSERT(rectReturnAddr);
+ if (!builder.writePtr(rectReturnAddr, "ReturnAddr"))
+ return false;
+ MOZ_ASSERT(builder.framePushed() % JitStackAlignment == 0);
+
+ return true;
+}
+
+uint32_t
+jit::BailoutIonToBaseline(JSContext* cx, JitActivation* activation, JitFrameIterator& iter,
+ bool invalidate, BaselineBailoutInfo** bailoutInfo,
+ const ExceptionBailoutInfo* excInfo)
+{
+ MOZ_ASSERT(bailoutInfo != nullptr);
+ MOZ_ASSERT(*bailoutInfo == nullptr);
+
+ TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
+ TraceLogStopEvent(logger, TraceLogger_IonMonkey);
+ TraceLogStartEvent(logger, TraceLogger_Baseline);
+
+ // Ion bailout can fail due to overrecursion and OOM. In such cases we
+ // cannot honor any further Debugger hooks on the frame, and need to
+ // ensure that its Debugger.Frame entry is cleaned up.
+ auto guardRemoveRematerializedFramesFromDebugger = mozilla::MakeScopeExit([&] {
+ activation->removeRematerializedFramesFromDebugger(cx, iter.fp());
+ });
+
+ // The caller of the top frame must be one of the following:
+ // IonJS - Ion calling into Ion.
+ // BaselineStub - Baseline calling into Ion.
+ // Entry - Interpreter or other calling into Ion.
+ // Rectifier - Arguments rectifier calling into Ion.
+ MOZ_ASSERT(iter.isBailoutJS());
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ FrameType prevFrameType = iter.prevType();
+ MOZ_ASSERT(prevFrameType == JitFrame_IonJS ||
+ prevFrameType == JitFrame_BaselineStub ||
+ prevFrameType == JitFrame_Entry ||
+ prevFrameType == JitFrame_Rectifier ||
+ prevFrameType == JitFrame_IonAccessorIC);
+#endif
+
+ // All incoming frames are going to look like this:
+ //
+ // +---------------+
+ // | ... |
+ // +---------------+
+ // | Args |
+ // | ... |
+ // +---------------+
+ // | ThisV |
+ // +---------------+
+ // | ActualArgC |
+ // +---------------+
+ // | CalleeToken |
+ // +---------------+
+ // | Descriptor |
+ // +---------------+
+ // | ReturnAddr |
+ // +---------------+
+ // | ||||| | <---- Overwrite starting here.
+ // | ||||| |
+ // | ||||| |
+ // +---------------+
+
+ JitSpew(JitSpew_BaselineBailouts, "Bailing to baseline %s:%" PRIuSIZE " (IonScript=%p) (FrameType=%d)",
+ iter.script()->filename(), iter.script()->lineno(), (void*) iter.ionScript(),
+ (int) prevFrameType);
+
+ bool catchingException;
+ bool propagatingExceptionForDebugMode;
+ if (excInfo) {
+ catchingException = excInfo->catchingException();
+ propagatingExceptionForDebugMode = excInfo->propagatingIonExceptionForDebugMode();
+
+ if (catchingException)
+ JitSpew(JitSpew_BaselineBailouts, "Resuming in catch or finally block");
+
+ if (propagatingExceptionForDebugMode)
+ JitSpew(JitSpew_BaselineBailouts, "Resuming in-place for debug mode");
+ } else {
+ catchingException = false;
+ propagatingExceptionForDebugMode = false;
+ }
+
+ JitSpew(JitSpew_BaselineBailouts, " Reading from snapshot offset %u size %" PRIuSIZE,
+ iter.snapshotOffset(), iter.ionScript()->snapshotsListSize());
+
+ if (!excInfo)
+ iter.ionScript()->incNumBailouts();
+ iter.script()->updateBaselineOrIonRaw(cx->runtime());
+
+ // Allocate buffer to hold stack replacement data.
+ BaselineStackBuilder builder(iter, 1024);
+ if (!builder.init()) {
+ ReportOutOfMemory(cx);
+ return BAILOUT_RETURN_FATAL_ERROR;
+ }
+ JitSpew(JitSpew_BaselineBailouts, " Incoming frame ptr = %p", builder.startFrame());
+
+ SnapshotIteratorForBailout snapIter(activation, iter);
+ if (!snapIter.init(cx))
+ return BAILOUT_RETURN_FATAL_ERROR;
+
+#ifdef TRACK_SNAPSHOTS
+ snapIter.spewBailingFrom();
+#endif
+
+ RootedFunction callee(cx, iter.maybeCallee());
+ RootedScript scr(cx, iter.script());
+ if (callee) {
+ JitSpew(JitSpew_BaselineBailouts, " Callee function (%s:%" PRIuSIZE ")",
+ scr->filename(), scr->lineno());
+ } else {
+ JitSpew(JitSpew_BaselineBailouts, " No callee!");
+ }
+
+ if (iter.isConstructing())
+ JitSpew(JitSpew_BaselineBailouts, " Constructing!");
+ else
+ JitSpew(JitSpew_BaselineBailouts, " Not constructing!");
+
+ JitSpew(JitSpew_BaselineBailouts, " Restoring frames:");
+ size_t frameNo = 0;
+
+ // Reconstruct baseline frames using the builder.
+ RootedScript caller(cx);
+ jsbytecode* callerPC = nullptr;
+ RootedFunction fun(cx, callee);
+ Rooted<GCVector<Value>> startFrameFormals(cx, GCVector<Value>(cx));
+
+ gc::AutoSuppressGC suppress(cx);
+
+ while (true) {
+ // Skip recover instructions as they are already recovered by |initInstructionResults|.
+ snapIter.settleOnFrame();
+
+ if (frameNo > 0) {
+ // TraceLogger doesn't create entries for inlined frames. But we
+ // see them in Baseline. Here we create the start events of those
+ // entries. So they correspond to what we will see in Baseline.
+ TraceLoggerEvent scriptEvent(logger, TraceLogger_Scripts, scr);
+ TraceLogStartEvent(logger, scriptEvent);
+ TraceLogStartEvent(logger, TraceLogger_Baseline);
+ }
+
+ JitSpew(JitSpew_BaselineBailouts, " FrameNo %" PRIuSIZE, frameNo);
+
+ // If we are bailing out to a catch or finally block in this frame,
+ // pass excInfo to InitFromBailout and don't unpack any other frames.
+ bool handleException = (catchingException && excInfo->frameNo() == frameNo);
+
+ // We also need to pass excInfo if we're bailing out in place for
+ // debug mode.
+ bool passExcInfo = handleException || propagatingExceptionForDebugMode;
+
+ jsbytecode* callPC = nullptr;
+ RootedFunction nextCallee(cx, nullptr);
+ if (!InitFromBailout(cx, caller, callerPC, fun, scr, iter.ionScript(),
+ snapIter, invalidate, builder, &startFrameFormals,
+ &nextCallee, &callPC, passExcInfo ? excInfo : nullptr))
+ {
+ return BAILOUT_RETURN_FATAL_ERROR;
+ }
+
+ if (!snapIter.moreFrames()) {
+ MOZ_ASSERT(!callPC);
+ break;
+ }
+
+ if (handleException)
+ break;
+
+ MOZ_ASSERT(nextCallee);
+ MOZ_ASSERT(callPC);
+ caller = scr;
+ callerPC = callPC;
+ fun = nextCallee;
+ scr = fun->existingScript();
+
+ frameNo++;
+
+ snapIter.nextInstruction();
+ }
+ JitSpew(JitSpew_BaselineBailouts, " Done restoring frames");
+
+ BailoutKind bailoutKind = snapIter.bailoutKind();
+
+ if (!startFrameFormals.empty()) {
+ // Set the first frame's formals, see the comment in InitFromBailout.
+ Value* argv = builder.startFrame()->argv() + 1; // +1 to skip |this|.
+ mozilla::PodCopy(argv, startFrameFormals.begin(), startFrameFormals.length());
+ }
+
+ // Do stack check.
+ bool overRecursed = false;
+ BaselineBailoutInfo *info = builder.info();
+ uint8_t* newsp = info->incomingStack - (info->copyStackTop - info->copyStackBottom);
+#ifdef JS_SIMULATOR
+ if (Simulator::Current()->overRecursed(uintptr_t(newsp)))
+ overRecursed = true;
+#else
+ JS_CHECK_RECURSION_WITH_SP_DONT_REPORT(cx, newsp, overRecursed = true);
+#endif
+ if (overRecursed) {
+ JitSpew(JitSpew_BaselineBailouts, " Overrecursion check failed!");
+ return BAILOUT_RETURN_OVERRECURSED;
+ }
+
+ // Take the reconstructed baseline stack so it doesn't get freed when builder destructs.
+ info = builder.takeBuffer();
+ info->numFrames = frameNo + 1;
+ info->bailoutKind = bailoutKind;
+ *bailoutInfo = info;
+ guardRemoveRematerializedFramesFromDebugger.release();
+ return BAILOUT_RETURN_OK;
+}
+
+static void
+InvalidateAfterBailout(JSContext* cx, HandleScript outerScript, const char* reason)
+{
+ // In some cases, the computation of recover instruction can invalidate the
+ // Ion script before we reach the end of the bailout. Thus, if the outer
+ // script no longer have any Ion script attached, then we just skip the
+ // invalidation.
+ //
+ // For example, such case can happen if the template object for an unboxed
+ // objects no longer match the content of its properties (see Bug 1174547)
+ if (!outerScript->hasIonScript()) {
+ JitSpew(JitSpew_BaselineBailouts, "Ion script is already invalidated");
+ return;
+ }
+
+ MOZ_ASSERT(!outerScript->ionScript()->invalidated());
+
+ JitSpew(JitSpew_BaselineBailouts, "Invalidating due to %s", reason);
+ Invalidate(cx, outerScript);
+}
+
+static void
+HandleBoundsCheckFailure(JSContext* cx, HandleScript outerScript, HandleScript innerScript)
+{
+ JitSpew(JitSpew_IonBailouts, "Bounds check failure %s:%" PRIuSIZE ", inlined into %s:%" PRIuSIZE,
+ innerScript->filename(), innerScript->lineno(),
+ outerScript->filename(), outerScript->lineno());
+
+ if (!innerScript->failedBoundsCheck())
+ innerScript->setFailedBoundsCheck();
+
+ InvalidateAfterBailout(cx, outerScript, "bounds check failure");
+ if (innerScript->hasIonScript())
+ Invalidate(cx, innerScript);
+}
+
+static void
+HandleShapeGuardFailure(JSContext* cx, HandleScript outerScript, HandleScript innerScript)
+{
+ JitSpew(JitSpew_IonBailouts, "Shape guard failure %s:%" PRIuSIZE ", inlined into %s:%" PRIuSIZE,
+ innerScript->filename(), innerScript->lineno(),
+ outerScript->filename(), outerScript->lineno());
+
+ // TODO: Currently this mimic's Ion's handling of this case. Investigate setting
+ // the flag on innerScript as opposed to outerScript, and maybe invalidating both
+ // inner and outer scripts, instead of just the outer one.
+ outerScript->setFailedShapeGuard();
+
+ InvalidateAfterBailout(cx, outerScript, "shape guard failure");
+}
+
+static void
+HandleBaselineInfoBailout(JSContext* cx, HandleScript outerScript, HandleScript innerScript)
+{
+ JitSpew(JitSpew_IonBailouts, "Baseline info failure %s:%" PRIuSIZE ", inlined into %s:%" PRIuSIZE,
+ innerScript->filename(), innerScript->lineno(),
+ outerScript->filename(), outerScript->lineno());
+
+ InvalidateAfterBailout(cx, outerScript, "invalid baseline info");
+}
+
+static void
+HandleLexicalCheckFailure(JSContext* cx, HandleScript outerScript, HandleScript innerScript)
+{
+ JitSpew(JitSpew_IonBailouts, "Lexical check failure %s:%" PRIuSIZE ", inlined into %s:%" PRIuSIZE,
+ innerScript->filename(), innerScript->lineno(),
+ outerScript->filename(), outerScript->lineno());
+
+ if (!innerScript->failedLexicalCheck())
+ innerScript->setFailedLexicalCheck();
+
+ InvalidateAfterBailout(cx, outerScript, "lexical check failure");
+ if (innerScript->hasIonScript())
+ Invalidate(cx, innerScript);
+}
+
+static bool
+CopyFromRematerializedFrame(JSContext* cx, JitActivation* act, uint8_t* fp, size_t inlineDepth,
+ BaselineFrame* frame)
+{
+ RematerializedFrame* rematFrame = act->lookupRematerializedFrame(fp, inlineDepth);
+
+ // We might not have rematerialized a frame if the user never requested a
+ // Debugger.Frame for it.
+ if (!rematFrame)
+ return true;
+
+ MOZ_ASSERT(rematFrame->script() == frame->script());
+ MOZ_ASSERT(rematFrame->numActualArgs() == frame->numActualArgs());
+
+ frame->setEnvironmentChain(rematFrame->environmentChain());
+
+ if (frame->isFunctionFrame())
+ frame->thisArgument() = rematFrame->thisArgument();
+
+ for (unsigned i = 0; i < frame->numActualArgs(); i++)
+ frame->argv()[i] = rematFrame->argv()[i];
+
+ for (size_t i = 0; i < frame->script()->nfixed(); i++)
+ *frame->valueSlot(i) = rematFrame->locals()[i];
+
+ frame->setReturnValue(rematFrame->returnValue());
+
+ if (rematFrame->hasCachedSavedFrame())
+ frame->setHasCachedSavedFrame();
+
+ JitSpew(JitSpew_BaselineBailouts,
+ " Copied from rematerialized frame at (%p,%" PRIuSIZE ")",
+ fp, inlineDepth);
+
+ // Propagate the debuggee frame flag. For the case where the Debugger did
+ // not rematerialize an Ion frame, the baseline frame has its debuggee
+ // flag set iff its script is considered a debuggee. See the debuggee case
+ // in InitFromBailout.
+ if (rematFrame->isDebuggee()) {
+ frame->setIsDebuggee();
+ return Debugger::handleIonBailout(cx, rematFrame, frame);
+ }
+
+ return true;
+}
+
+uint32_t
+jit::FinishBailoutToBaseline(BaselineBailoutInfo* bailoutInfo)
+{
+ // The caller pushes R0 and R1 on the stack without rooting them.
+ // Since GC here is very unlikely just suppress it.
+ JSContext* cx = GetJSContextFromMainThread();
+ js::gc::AutoSuppressGC suppressGC(cx);
+
+ JitSpew(JitSpew_BaselineBailouts, " Done restoring frames");
+
+ // The current native code pc may not have a corresponding ICEntry, so we
+ // store the bytecode pc in the frame for frame iterators. This pc is
+ // cleared at the end of this function. If we return false, we don't clear
+ // it: the exception handler also needs it and will clear it for us.
+ BaselineFrame* topFrame = GetTopBaselineFrame(cx);
+ topFrame->setOverridePc(bailoutInfo->resumePC);
+
+ uint32_t numFrames = bailoutInfo->numFrames;
+ MOZ_ASSERT(numFrames > 0);
+ BailoutKind bailoutKind = bailoutInfo->bailoutKind;
+ bool checkGlobalDeclarationConflicts = bailoutInfo->checkGlobalDeclarationConflicts;
+
+ // Free the bailout buffer.
+ js_free(bailoutInfo);
+ bailoutInfo = nullptr;
+
+ if (topFrame->environmentChain()) {
+ // Ensure the frame has a call object if it needs one. If the env chain
+ // is nullptr, we will enter baseline code at the prologue so no need to do
+ // anything in that case.
+ if (!EnsureHasEnvironmentObjects(cx, topFrame))
+ return false;
+
+ // If we bailed out before Ion could do the global declaration
+ // conflicts check, because we resume in the body instead of the
+ // prologue for global frames.
+ if (checkGlobalDeclarationConflicts) {
+ Rooted<LexicalEnvironmentObject*> lexicalEnv(cx, &cx->global()->lexicalEnvironment());
+ RootedScript script(cx, topFrame->script());
+ if (!CheckGlobalDeclarationConflicts(cx, script, lexicalEnv, cx->global()))
+ return false;
+ }
+ }
+
+ // Create arguments objects for bailed out frames, to maintain the invariant
+ // that script->needsArgsObj() implies frame->hasArgsObj().
+ RootedScript innerScript(cx, nullptr);
+ RootedScript outerScript(cx, nullptr);
+
+ MOZ_ASSERT(cx->currentlyRunningInJit());
+ JitFrameIterator iter(cx);
+ uint8_t* outerFp = nullptr;
+
+ // Iter currently points at the exit frame. Get the previous frame
+ // (which must be a baseline frame), and set it as the last profiling
+ // frame.
+ if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
+ cx->runtime()->jitActivation->setLastProfilingFrame(iter.prevFp());
+
+ uint32_t frameno = 0;
+ while (frameno < numFrames) {
+ MOZ_ASSERT(!iter.isIonJS());
+
+ if (iter.isBaselineJS()) {
+ BaselineFrame* frame = iter.baselineFrame();
+ MOZ_ASSERT(frame->script()->hasBaselineScript());
+
+ // If the frame doesn't even have a env chain set yet, then it's resuming
+ // into the the prologue before the env chain is initialized. Any
+ // necessary args object will also be initialized there.
+ if (frame->environmentChain() && frame->script()->needsArgsObj()) {
+ ArgumentsObject* argsObj;
+ if (frame->hasArgsObj()) {
+ argsObj = &frame->argsObj();
+ } else {
+ argsObj = ArgumentsObject::createExpected(cx, frame);
+ if (!argsObj)
+ return false;
+ }
+
+ // The arguments is a local binding and needsArgsObj does not
+ // check if it is clobbered. Ensure that the local binding
+ // restored during bailout before storing the arguments object
+ // to the slot.
+ RootedScript script(cx, frame->script());
+ SetFrameArgumentsObject(cx, frame, script, argsObj);
+ }
+
+ if (frameno == 0)
+ innerScript = frame->script();
+
+ if (frameno == numFrames - 1) {
+ outerScript = frame->script();
+ outerFp = iter.fp();
+ }
+
+ frameno++;
+ }
+
+ ++iter;
+ }
+
+ MOZ_ASSERT(innerScript);
+ MOZ_ASSERT(outerScript);
+ MOZ_ASSERT(outerFp);
+
+ // If we rematerialized Ion frames due to debug mode toggling, copy their
+ // values into the baseline frame. We need to do this even when debug mode
+ // is off, as we should respect the mutations made while debug mode was
+ // on.
+ JitActivation* act = cx->runtime()->activation()->asJit();
+ if (act->hasRematerializedFrame(outerFp)) {
+ JitFrameIterator iter(cx);
+ size_t inlineDepth = numFrames;
+ bool ok = true;
+ while (inlineDepth > 0) {
+ if (iter.isBaselineJS()) {
+ // We must attempt to copy all rematerialized frames over,
+ // even if earlier ones failed, to invoke the proper frame
+ // cleanup in the Debugger.
+ ok = CopyFromRematerializedFrame(cx, act, outerFp, --inlineDepth,
+ iter.baselineFrame());
+ }
+ ++iter;
+ }
+
+ // After copying from all the rematerialized frames, remove them from
+ // the table to keep the table up to date.
+ act->removeRematerializedFrame(outerFp);
+
+ if (!ok)
+ return false;
+ }
+
+ JitSpew(JitSpew_BaselineBailouts,
+ " Restored outerScript=(%s:%" PRIuSIZE ",%u) innerScript=(%s:%" PRIuSIZE ",%u) (bailoutKind=%u)",
+ outerScript->filename(), outerScript->lineno(), outerScript->getWarmUpCount(),
+ innerScript->filename(), innerScript->lineno(), innerScript->getWarmUpCount(),
+ (unsigned) bailoutKind);
+
+ switch (bailoutKind) {
+ // Normal bailouts.
+ case Bailout_Inevitable:
+ case Bailout_DuringVMCall:
+ case Bailout_NonJSFunctionCallee:
+ case Bailout_DynamicNameNotFound:
+ case Bailout_StringArgumentsEval:
+ case Bailout_Overflow:
+ case Bailout_Round:
+ case Bailout_NonPrimitiveInput:
+ case Bailout_PrecisionLoss:
+ case Bailout_TypeBarrierO:
+ case Bailout_TypeBarrierV:
+ case Bailout_MonitorTypes:
+ case Bailout_Hole:
+ case Bailout_NegativeIndex:
+ case Bailout_NonInt32Input:
+ case Bailout_NonNumericInput:
+ case Bailout_NonBooleanInput:
+ case Bailout_NonObjectInput:
+ case Bailout_NonStringInput:
+ case Bailout_NonSymbolInput:
+ case Bailout_UnexpectedSimdInput:
+ case Bailout_NonSharedTypedArrayInput:
+ case Bailout_Debugger:
+ case Bailout_UninitializedThis:
+ case Bailout_BadDerivedConstructorReturn:
+ // Do nothing.
+ break;
+
+ case Bailout_FirstExecution:
+ // Do not return directly, as this was not frequent in the first place,
+ // thus rely on the check for frequent bailouts to recompile the current
+ // script.
+ break;
+
+ // Invalid assumption based on baseline code.
+ case Bailout_OverflowInvalidate:
+ outerScript->setHadOverflowBailout();
+ MOZ_FALLTHROUGH;
+ case Bailout_NonStringInputInvalidate:
+ case Bailout_DoubleOutput:
+ case Bailout_ObjectIdentityOrTypeGuard:
+ HandleBaselineInfoBailout(cx, outerScript, innerScript);
+ break;
+
+ case Bailout_ArgumentCheck:
+ // Do nothing, bailout will resume before the argument monitor ICs.
+ break;
+ case Bailout_BoundsCheck:
+ case Bailout_Detached:
+ HandleBoundsCheckFailure(cx, outerScript, innerScript);
+ break;
+ case Bailout_ShapeGuard:
+ HandleShapeGuardFailure(cx, outerScript, innerScript);
+ break;
+ case Bailout_UninitializedLexical:
+ HandleLexicalCheckFailure(cx, outerScript, innerScript);
+ break;
+ case Bailout_IonExceptionDebugMode:
+ // Return false to resume in HandleException with reconstructed
+ // baseline frame.
+ return false;
+ default:
+ MOZ_CRASH("Unknown bailout kind!");
+ }
+
+ CheckFrequentBailouts(cx, outerScript, bailoutKind);
+
+ // We're returning to JIT code, so we should clear the override pc.
+ topFrame->clearOverridePc();
+ return true;
+}
diff --git a/js/src/jit/BaselineCacheIR.cpp b/js/src/jit/BaselineCacheIR.cpp
new file mode 100644
index 000000000..bf96932d1
--- /dev/null
+++ b/js/src/jit/BaselineCacheIR.cpp
@@ -0,0 +1,1283 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineCacheIR.h"
+
+#include "jit/CacheIR.h"
+#include "jit/Linker.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// OperandLocation represents the location of an OperandId. The operand is
+// either in a register or on the stack, and is either boxed or unboxed.
+class OperandLocation
+{
+ public:
+ enum Kind {
+ Uninitialized = 0,
+ PayloadReg,
+ ValueReg,
+ PayloadStack,
+ ValueStack,
+ };
+
+ private:
+ Kind kind_;
+
+ union Data {
+ struct {
+ Register reg;
+ JSValueType type;
+ } payloadReg;
+ ValueOperand valueReg;
+ struct {
+ uint32_t stackPushed;
+ JSValueType type;
+ } payloadStack;
+ uint32_t valueStackPushed;
+
+ Data() : valueStackPushed(0) {}
+ };
+ Data data_;
+
+ public:
+ OperandLocation() : kind_(Uninitialized) {}
+
+ Kind kind() const { return kind_; }
+
+ void setUninitialized() {
+ kind_ = Uninitialized;
+ }
+
+ ValueOperand valueReg() const {
+ MOZ_ASSERT(kind_ == ValueReg);
+ return data_.valueReg;
+ }
+ Register payloadReg() const {
+ MOZ_ASSERT(kind_ == PayloadReg);
+ return data_.payloadReg.reg;
+ }
+ uint32_t payloadStack() const {
+ MOZ_ASSERT(kind_ == PayloadStack);
+ return data_.payloadStack.stackPushed;
+ }
+ uint32_t valueStack() const {
+ MOZ_ASSERT(kind_ == ValueStack);
+ return data_.valueStackPushed;
+ }
+ JSValueType payloadType() const {
+ if (kind_ == PayloadReg)
+ return data_.payloadReg.type;
+ MOZ_ASSERT(kind_ == PayloadStack);
+ return data_.payloadStack.type;
+ }
+ void setPayloadReg(Register reg, JSValueType type) {
+ kind_ = PayloadReg;
+ data_.payloadReg.reg = reg;
+ data_.payloadReg.type = type;
+ }
+ void setValueReg(ValueOperand reg) {
+ kind_ = ValueReg;
+ data_.valueReg = reg;
+ }
+ void setPayloadStack(uint32_t stackPushed, JSValueType type) {
+ kind_ = PayloadStack;
+ data_.payloadStack.stackPushed = stackPushed;
+ data_.payloadStack.type = type;
+ }
+ void setValueStack(uint32_t stackPushed) {
+ kind_ = ValueStack;
+ data_.valueStackPushed = stackPushed;
+ }
+
+ bool aliasesReg(Register reg) {
+ if (kind_ == PayloadReg)
+ return payloadReg() == reg;
+ if (kind_ == ValueReg)
+ return valueReg().aliases(reg);
+ return false;
+ }
+ bool aliasesReg(ValueOperand reg) {
+#if defined(JS_NUNBOX32)
+ return aliasesReg(reg.typeReg()) || aliasesReg(reg.payloadReg());
+#else
+ return aliasesReg(reg.valueReg());
+#endif
+ }
+
+ bool operator==(const OperandLocation& other) const {
+ if (kind_ != other.kind_)
+ return false;
+ switch (kind()) {
+ case Uninitialized:
+ return true;
+ case PayloadReg:
+ return payloadReg() == other.payloadReg() && payloadType() == other.payloadType();
+ case ValueReg:
+ return valueReg() == other.valueReg();
+ case PayloadStack:
+ return payloadStack() == other.payloadStack() && payloadType() == other.payloadType();
+ case ValueStack:
+ return valueStack() == other.valueStack();
+ }
+ MOZ_CRASH("Invalid OperandLocation kind");
+ }
+ bool operator!=(const OperandLocation& other) const { return !operator==(other); }
+};
+
+// Class to track and allocate registers while emitting IC code.
+class MOZ_RAII CacheRegisterAllocator
+{
+ // The original location of the inputs to the cache.
+ Vector<OperandLocation, 4, SystemAllocPolicy> origInputLocations_;
+
+ // The current location of each operand.
+ Vector<OperandLocation, 8, SystemAllocPolicy> operandLocations_;
+
+ // The registers allocated while emitting the current CacheIR op.
+ // This prevents us from allocating a register and then immediately
+ // clobbering it for something else, while we're still holding on to it.
+ LiveGeneralRegisterSet currentOpRegs_;
+
+ // Registers that are currently unused and available.
+ AllocatableGeneralRegisterSet availableRegs_;
+
+ // The number of bytes pushed on the native stack.
+ uint32_t stackPushed_;
+
+ // The index of the CacheIR instruction we're currently emitting.
+ uint32_t currentInstruction_;
+
+ const CacheIRWriter& writer_;
+
+ CacheRegisterAllocator(const CacheRegisterAllocator&) = delete;
+ CacheRegisterAllocator& operator=(const CacheRegisterAllocator&) = delete;
+
+ public:
+ friend class AutoScratchRegister;
+
+ explicit CacheRegisterAllocator(const CacheIRWriter& writer)
+ : stackPushed_(0),
+ currentInstruction_(0),
+ writer_(writer)
+ {}
+
+ MOZ_MUST_USE bool init(const AllocatableGeneralRegisterSet& available) {
+ availableRegs_ = available;
+ if (!origInputLocations_.resize(writer_.numInputOperands()))
+ return false;
+ if (!operandLocations_.resize(writer_.numOperandIds()))
+ return false;
+ return true;
+ }
+
+ OperandLocation operandLocation(size_t i) const {
+ return operandLocations_[i];
+ }
+ OperandLocation origInputLocation(size_t i) const {
+ return origInputLocations_[i];
+ }
+ void initInputLocation(size_t i, ValueOperand reg) {
+ origInputLocations_[i].setValueReg(reg);
+ operandLocations_[i] = origInputLocations_[i];
+ }
+
+ void nextOp() {
+ currentOpRegs_.clear();
+ currentInstruction_++;
+ }
+
+ uint32_t stackPushed() const {
+ return stackPushed_;
+ }
+
+ // Allocates a new register.
+ Register allocateRegister(MacroAssembler& masm);
+ ValueOperand allocateValueRegister(MacroAssembler& masm);
+
+ // Returns the register for the given operand. If the operand is currently
+ // not in a register, it will load it into one.
+ ValueOperand useRegister(MacroAssembler& masm, ValOperandId val);
+ Register useRegister(MacroAssembler& masm, ObjOperandId obj);
+
+ // Allocates an output register for the given operand.
+ Register defineRegister(MacroAssembler& masm, ObjOperandId obj);
+};
+
+// RAII class to put a scratch register back in the allocator's availableRegs
+// set when we're done with it.
+class MOZ_RAII AutoScratchRegister
+{
+ CacheRegisterAllocator& alloc_;
+ Register reg_;
+
+ public:
+ AutoScratchRegister(CacheRegisterAllocator& alloc, MacroAssembler& masm)
+ : alloc_(alloc)
+ {
+ reg_ = alloc.allocateRegister(masm);
+ MOZ_ASSERT(alloc_.currentOpRegs_.has(reg_));
+ }
+ ~AutoScratchRegister() {
+ MOZ_ASSERT(alloc_.currentOpRegs_.has(reg_));
+ alloc_.availableRegs_.add(reg_);
+ }
+ operator Register() const { return reg_; }
+};
+
+// The FailurePath class stores everything we need to generate a failure path
+// at the end of the IC code. The failure path restores the input registers, if
+// needed, and jumps to the next stub.
+class FailurePath
+{
+ Vector<OperandLocation, 4, SystemAllocPolicy> inputs_;
+ NonAssertingLabel label_;
+ uint32_t stackPushed_;
+
+ public:
+ FailurePath() = default;
+
+ FailurePath(FailurePath&& other)
+ : inputs_(Move(other.inputs_)),
+ label_(other.label_),
+ stackPushed_(other.stackPushed_)
+ {}
+
+ Label* label() { return &label_; }
+
+ void setStackPushed(uint32_t i) { stackPushed_ = i; }
+ uint32_t stackPushed() const { return stackPushed_; }
+
+ bool appendInput(OperandLocation loc) {
+ return inputs_.append(loc);
+ }
+ OperandLocation input(size_t i) const {
+ return inputs_[i];
+ }
+
+ // If canShareFailurePath(other) returns true, the same machine code will
+ // be emitted for two failure paths, so we can share them.
+ bool canShareFailurePath(const FailurePath& other) const {
+ if (stackPushed_ != other.stackPushed_)
+ return false;
+
+ MOZ_ASSERT(inputs_.length() == other.inputs_.length());
+
+ for (size_t i = 0; i < inputs_.length(); i++) {
+ if (inputs_[i] != other.inputs_[i])
+ return false;
+ }
+ return true;
+ }
+};
+
+// Base class for BaselineCacheIRCompiler and IonCacheIRCompiler.
+class MOZ_RAII CacheIRCompiler
+{
+ protected:
+ JSContext* cx_;
+ CacheIRReader reader;
+ const CacheIRWriter& writer_;
+ MacroAssembler masm;
+
+ CacheRegisterAllocator allocator;
+ Vector<FailurePath, 4, SystemAllocPolicy> failurePaths;
+
+ CacheIRCompiler(JSContext* cx, const CacheIRWriter& writer)
+ : cx_(cx),
+ reader(writer),
+ writer_(writer),
+ allocator(writer_)
+ {}
+
+ void emitFailurePath(size_t i);
+};
+
+void
+CacheIRCompiler::emitFailurePath(size_t i)
+{
+ FailurePath& failure = failurePaths[i];
+
+ masm.bind(failure.label());
+
+ uint32_t stackPushed = failure.stackPushed();
+ size_t numInputOperands = writer_.numInputOperands();
+
+ for (size_t j = 0; j < numInputOperands; j++) {
+ OperandLocation orig = allocator.origInputLocation(j);
+ OperandLocation cur = failure.input(j);
+
+ MOZ_ASSERT(orig.kind() == OperandLocation::ValueReg);
+
+ // We have a cycle if a destination register will be used later
+ // as source register. If that happens, just push the current value
+ // on the stack and later get it from there.
+ for (size_t k = j + 1; k < numInputOperands; k++) {
+ OperandLocation laterSource = failure.input(k);
+ switch (laterSource.kind()) {
+ case OperandLocation::ValueReg:
+ if (orig.aliasesReg(laterSource.valueReg())) {
+ stackPushed += sizeof(js::Value);
+ masm.pushValue(laterSource.valueReg());
+ laterSource.setValueStack(stackPushed);
+ }
+ break;
+ case OperandLocation::PayloadReg:
+ if (orig.aliasesReg(laterSource.payloadReg())) {
+ stackPushed += sizeof(uintptr_t);
+ masm.push(laterSource.payloadReg());
+ laterSource.setPayloadStack(stackPushed, laterSource.payloadType());
+ }
+ break;
+ case OperandLocation::PayloadStack:
+ case OperandLocation::ValueStack:
+ case OperandLocation::Uninitialized:
+ break;
+ }
+ }
+
+ switch (cur.kind()) {
+ case OperandLocation::ValueReg:
+ masm.moveValue(cur.valueReg(), orig.valueReg());
+ break;
+ case OperandLocation::PayloadReg:
+ masm.tagValue(cur.payloadType(), cur.payloadReg(), orig.valueReg());
+ break;
+ case OperandLocation::PayloadStack: {
+ MOZ_ASSERT(stackPushed >= sizeof(uintptr_t));
+ Register scratch = orig.valueReg().scratchReg();
+ if (cur.payloadStack() == stackPushed) {
+ masm.pop(scratch);
+ stackPushed -= sizeof(uintptr_t);
+ } else {
+ MOZ_ASSERT(cur.payloadStack() < stackPushed);
+ masm.loadPtr(Address(masm.getStackPointer(), stackPushed - cur.payloadStack()),
+ scratch);
+ }
+ masm.tagValue(cur.payloadType(), scratch, orig.valueReg());
+ break;
+ }
+ case OperandLocation::ValueStack:
+ MOZ_ASSERT(stackPushed >= sizeof(js::Value));
+ if (cur.valueStack() == stackPushed) {
+ masm.popValue(orig.valueReg());
+ stackPushed -= sizeof(js::Value);
+ } else {
+ MOZ_ASSERT(cur.valueStack() < stackPushed);
+ masm.loadValue(Address(masm.getStackPointer(), stackPushed - cur.valueStack()),
+ orig.valueReg());
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ }
+
+ if (stackPushed > 0)
+ masm.addToStackPtr(Imm32(stackPushed));
+}
+
+// BaselineCacheIRCompiler compiles CacheIR to BaselineIC native code.
+class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler
+{
+ uint32_t stubDataOffset_;
+
+ public:
+ BaselineCacheIRCompiler(JSContext* cx, const CacheIRWriter& writer, uint32_t stubDataOffset)
+ : CacheIRCompiler(cx, writer),
+ stubDataOffset_(stubDataOffset)
+ {}
+
+ MOZ_MUST_USE bool init(CacheKind kind);
+
+ JitCode* compile();
+
+ private:
+#define DEFINE_OP(op) MOZ_MUST_USE bool emit##op();
+ CACHE_IR_OPS(DEFINE_OP)
+#undef DEFINE_OP
+
+ Address stubAddress(uint32_t offset) const {
+ return Address(ICStubReg, stubDataOffset_ + offset * sizeof(uintptr_t));
+ }
+
+ bool addFailurePath(FailurePath** failure) {
+ FailurePath newFailure;
+ for (size_t i = 0; i < writer_.numInputOperands(); i++) {
+ if (!newFailure.appendInput(allocator.operandLocation(i)))
+ return false;
+ }
+ newFailure.setStackPushed(allocator.stackPushed());
+
+ // Reuse the previous failure path if the current one is the same, to
+ // avoid emitting duplicate code.
+ if (failurePaths.length() > 0 && failurePaths.back().canShareFailurePath(newFailure)) {
+ *failure = &failurePaths.back();
+ return true;
+ }
+
+ if (!failurePaths.append(Move(newFailure)))
+ return false;
+
+ *failure = &failurePaths.back();
+ return true;
+ }
+ void emitEnterTypeMonitorIC() {
+ if (allocator.stackPushed() > 0)
+ masm.addToStackPtr(Imm32(allocator.stackPushed()));
+ EmitEnterTypeMonitorIC(masm);
+ }
+ void emitReturnFromIC() {
+ if (allocator.stackPushed() > 0)
+ masm.addToStackPtr(Imm32(allocator.stackPushed()));
+ EmitReturnFromIC(masm);
+ }
+};
+
+JitCode*
+BaselineCacheIRCompiler::compile()
+{
+#ifndef JS_USE_LINK_REGISTER
+ // The first value contains the return addres,
+ // which we pull into ICTailCallReg for tail calls.
+ masm.adjustFrame(sizeof(intptr_t));
+#endif
+#ifdef JS_CODEGEN_ARM
+ masm.setSecondScratchReg(BaselineSecondScratchReg);
+#endif
+
+ do {
+ switch (reader.readOp()) {
+#define DEFINE_OP(op) \
+ case CacheOp::op: \
+ if (!emit##op()) \
+ return nullptr; \
+ break;
+ CACHE_IR_OPS(DEFINE_OP)
+#undef DEFINE_OP
+
+ default:
+ MOZ_CRASH("Invalid op");
+ }
+
+ allocator.nextOp();
+ } while (reader.more());
+
+ // Done emitting the main IC code. Now emit the failure paths.
+ for (size_t i = 0; i < failurePaths.length(); i++) {
+ emitFailurePath(i);
+ EmitStubGuardFailure(masm);
+ }
+
+ Linker linker(masm);
+ AutoFlushICache afc("getStubCode");
+ Rooted<JitCode*> newStubCode(cx_, linker.newCode<NoGC>(cx_, BASELINE_CODE));
+ if (!newStubCode) {
+ cx_->recoverFromOutOfMemory();
+ return nullptr;
+ }
+
+ // All barriers are emitted off-by-default, enable them if needed.
+ if (cx_->zone()->needsIncrementalBarrier())
+ newStubCode->togglePreBarriers(true, DontReprotect);
+
+ return newStubCode;
+}
+
+ValueOperand
+CacheRegisterAllocator::useRegister(MacroAssembler& masm, ValOperandId op)
+{
+ OperandLocation& loc = operandLocations_[op.id()];
+
+ switch (loc.kind()) {
+ case OperandLocation::ValueReg:
+ currentOpRegs_.add(loc.valueReg());
+ return loc.valueReg();
+
+ case OperandLocation::ValueStack: {
+ // The Value is on the stack. If it's on top of the stack, unbox and
+ // then pop it. If we need the registers later, we can always spill
+ // back. If it's not on the top of the stack, just unbox.
+ ValueOperand reg = allocateValueRegister(masm);
+ if (loc.valueStack() == stackPushed_) {
+ masm.popValue(reg);
+ MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
+ stackPushed_ -= sizeof(js::Value);
+ } else {
+ MOZ_ASSERT(loc.valueStack() < stackPushed_);
+ masm.loadValue(Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()), reg);
+ }
+ loc.setValueReg(reg);
+ return reg;
+ }
+
+ // The operand should never be unboxed.
+ case OperandLocation::PayloadStack:
+ case OperandLocation::PayloadReg:
+ case OperandLocation::Uninitialized:
+ break;
+ }
+
+ MOZ_CRASH();
+}
+
+Register
+CacheRegisterAllocator::useRegister(MacroAssembler& masm, ObjOperandId op)
+{
+ OperandLocation& loc = operandLocations_[op.id()];
+ switch (loc.kind()) {
+ case OperandLocation::PayloadReg:
+ currentOpRegs_.add(loc.payloadReg());
+ return loc.payloadReg();
+
+ case OperandLocation::ValueReg: {
+ // It's possible the value is still boxed: as an optimization, we unbox
+ // the first time we use a value as object.
+ ValueOperand val = loc.valueReg();
+ availableRegs_.add(val);
+ Register reg = val.scratchReg();
+ availableRegs_.take(reg);
+ masm.unboxObject(val, reg);
+ loc.setPayloadReg(reg, JSVAL_TYPE_OBJECT);
+ currentOpRegs_.add(reg);
+ return reg;
+ }
+
+ case OperandLocation::PayloadStack: {
+ // The payload is on the stack. If it's on top of the stack we can just
+ // pop it, else we emit a load.
+ Register reg = allocateRegister(masm);
+ if (loc.payloadStack() == stackPushed_) {
+ masm.pop(reg);
+ MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
+ stackPushed_ -= sizeof(uintptr_t);
+ } else {
+ MOZ_ASSERT(loc.payloadStack() < stackPushed_);
+ masm.loadPtr(Address(masm.getStackPointer(), stackPushed_ - loc.payloadStack()), reg);
+ }
+ loc.setPayloadReg(reg, loc.payloadType());
+ return reg;
+ }
+
+ case OperandLocation::ValueStack: {
+ // The value is on the stack, but boxed. If it's on top of the stack we
+ // unbox it and then remove it from the stack, else we just unbox.
+ Register reg = allocateRegister(masm);
+ if (loc.valueStack() == stackPushed_) {
+ masm.unboxObject(Address(masm.getStackPointer(), 0), reg);
+ masm.addToStackPtr(Imm32(sizeof(js::Value)));
+ MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
+ stackPushed_ -= sizeof(js::Value);
+ } else {
+ MOZ_ASSERT(loc.valueStack() < stackPushed_);
+ masm.unboxObject(Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()),
+ reg);
+ }
+ loc.setPayloadReg(reg, JSVAL_TYPE_OBJECT);
+ return reg;
+ }
+
+ case OperandLocation::Uninitialized:
+ break;
+ }
+
+ MOZ_CRASH();
+}
+
+Register
+CacheRegisterAllocator::defineRegister(MacroAssembler& masm, ObjOperandId op)
+{
+ OperandLocation& loc = operandLocations_[op.id()];
+ MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
+
+ Register reg = allocateRegister(masm);
+ loc.setPayloadReg(reg, JSVAL_TYPE_OBJECT);
+ return reg;
+}
+
+Register
+CacheRegisterAllocator::allocateRegister(MacroAssembler& masm)
+{
+ if (availableRegs_.empty()) {
+ // No registers available. See if any operands are dead so we can reuse
+ // their registers. Note that we skip the input operands, as those are
+ // also used by failure paths, and we currently don't track those uses.
+ for (size_t i = writer_.numInputOperands(); i < operandLocations_.length(); i++) {
+ if (!writer_.operandIsDead(i, currentInstruction_))
+ continue;
+
+ OperandLocation& loc = operandLocations_[i];
+ switch (loc.kind()) {
+ case OperandLocation::PayloadReg:
+ availableRegs_.add(loc.payloadReg());
+ break;
+ case OperandLocation::ValueReg:
+ availableRegs_.add(loc.valueReg());
+ break;
+ case OperandLocation::Uninitialized:
+ case OperandLocation::PayloadStack:
+ case OperandLocation::ValueStack:
+ break;
+ }
+ loc.setUninitialized();
+ }
+ }
+
+ if (availableRegs_.empty()) {
+ // Still no registers available, try to spill unused operands to
+ // the stack.
+ for (size_t i = 0; i < operandLocations_.length(); i++) {
+ OperandLocation& loc = operandLocations_[i];
+ if (loc.kind() == OperandLocation::PayloadReg) {
+ Register reg = loc.payloadReg();
+ if (currentOpRegs_.has(reg))
+ continue;
+
+ masm.push(reg);
+ stackPushed_ += sizeof(uintptr_t);
+ loc.setPayloadStack(stackPushed_, loc.payloadType());
+ availableRegs_.add(reg);
+ break; // We got a register, so break out of the loop.
+ }
+ if (loc.kind() == OperandLocation::ValueReg) {
+ ValueOperand reg = loc.valueReg();
+ if (currentOpRegs_.aliases(reg))
+ continue;
+
+ masm.pushValue(reg);
+ stackPushed_ += sizeof(js::Value);
+ loc.setValueStack(stackPushed_);
+ availableRegs_.add(reg);
+ break; // Break out of the loop.
+ }
+ }
+ }
+
+ // At this point, there must be a free register. (Ion ICs don't have as
+ // many registers available, so once we support Ion code generation, we may
+ // have to spill some unrelated registers.)
+ MOZ_RELEASE_ASSERT(!availableRegs_.empty());
+
+ Register reg = availableRegs_.takeAny();
+ currentOpRegs_.add(reg);
+ return reg;
+}
+
+ValueOperand
+CacheRegisterAllocator::allocateValueRegister(MacroAssembler& masm)
+{
+#ifdef JS_NUNBOX32
+ Register reg1 = allocateRegister(masm);
+ Register reg2 = allocateRegister(masm);
+ return ValueOperand(reg1, reg2);
+#else
+ Register reg = allocateRegister(masm);
+ return ValueOperand(reg);
+#endif
+}
+
+bool
+BaselineCacheIRCompiler::emitGuardIsObject()
+{
+ ValueOperand input = allocator.useRegister(masm, reader.valOperandId());
+ FailurePath* failure;
+ if (!addFailurePath(&failure))
+ return false;
+ masm.branchTestObject(Assembler::NotEqual, input, failure->label());
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::emitGuardType()
+{
+ ValueOperand input = allocator.useRegister(masm, reader.valOperandId());
+ JSValueType type = reader.valueType();
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure))
+ return false;
+
+ switch (type) {
+ case JSVAL_TYPE_STRING:
+ masm.branchTestString(Assembler::NotEqual, input, failure->label());
+ break;
+ case JSVAL_TYPE_SYMBOL:
+ masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
+ break;
+ case JSVAL_TYPE_DOUBLE:
+ masm.branchTestNumber(Assembler::NotEqual, input, failure->label());
+ break;
+ case JSVAL_TYPE_BOOLEAN:
+ masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
+ break;
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::emitGuardShape()
+{
+ Register obj = allocator.useRegister(masm, reader.objOperandId());
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure))
+ return false;
+
+ Address addr(stubAddress(reader.stubOffset()));
+ masm.loadPtr(addr, scratch);
+ masm.branchTestObjShape(Assembler::NotEqual, obj, scratch, failure->label());
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::emitGuardGroup()
+{
+ Register obj = allocator.useRegister(masm, reader.objOperandId());
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure))
+ return false;
+
+ Address addr(stubAddress(reader.stubOffset()));
+ masm.loadPtr(addr, scratch);
+ masm.branchTestObjGroup(Assembler::NotEqual, obj, scratch, failure->label());
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::emitGuardProto()
+{
+ Register obj = allocator.useRegister(masm, reader.objOperandId());
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure))
+ return false;
+
+ Address addr(stubAddress(reader.stubOffset()));
+ masm.loadObjProto(obj, scratch);
+ masm.branchPtr(Assembler::NotEqual, addr, scratch, failure->label());
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::emitGuardClass()
+{
+ Register obj = allocator.useRegister(masm, reader.objOperandId());
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure))
+ return false;
+
+ const Class* clasp = nullptr;
+ switch (reader.guardClassKind()) {
+ case GuardClassKind::Array:
+ clasp = &ArrayObject::class_;
+ break;
+ case GuardClassKind::UnboxedArray:
+ clasp = &UnboxedArrayObject::class_;
+ break;
+ case GuardClassKind::MappedArguments:
+ clasp = &MappedArgumentsObject::class_;
+ break;
+ case GuardClassKind::UnmappedArguments:
+ clasp = &UnmappedArgumentsObject::class_;
+ break;
+ }
+
+ MOZ_ASSERT(clasp);
+ masm.branchTestObjClass(Assembler::NotEqual, obj, scratch, clasp, failure->label());
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::emitGuardSpecificObject()
+{
+ Register obj = allocator.useRegister(masm, reader.objOperandId());
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure))
+ return false;
+
+ Address addr(stubAddress(reader.stubOffset()));
+ masm.branchPtr(Assembler::NotEqual, addr, obj, failure->label());
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::emitGuardNoUnboxedExpando()
+{
+ Register obj = allocator.useRegister(masm, reader.objOperandId());
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure))
+ return false;
+
+ Address expandoAddr(obj, UnboxedPlainObject::offsetOfExpando());
+ masm.branchPtr(Assembler::NotEqual, expandoAddr, ImmWord(0), failure->label());
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::emitGuardAndLoadUnboxedExpando()
+{
+ Register obj = allocator.useRegister(masm, reader.objOperandId());
+ Register output = allocator.defineRegister(masm, reader.objOperandId());
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure))
+ return false;
+
+ Address expandoAddr(obj, UnboxedPlainObject::offsetOfExpando());
+ masm.loadPtr(expandoAddr, output);
+ masm.branchTestPtr(Assembler::Zero, output, output, failure->label());
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::emitLoadFixedSlotResult()
+{
+ Register obj = allocator.useRegister(masm, reader.objOperandId());
+ AutoScratchRegister scratch(allocator, masm);
+
+ masm.load32(stubAddress(reader.stubOffset()), scratch);
+ masm.loadValue(BaseIndex(obj, scratch, TimesOne), R0);
+ emitEnterTypeMonitorIC();
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::emitLoadDynamicSlotResult()
+{
+ Register obj = allocator.useRegister(masm, reader.objOperandId());
+ AutoScratchRegister scratch(allocator, masm);
+
+ // We're about to return, so it's safe to clobber obj now.
+ masm.load32(stubAddress(reader.stubOffset()), scratch);
+ masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), obj);
+ masm.loadValue(BaseIndex(obj, scratch, TimesOne), R0);
+ emitEnterTypeMonitorIC();
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::emitLoadUnboxedPropertyResult()
+{
+ Register obj = allocator.useRegister(masm, reader.objOperandId());
+ AutoScratchRegister scratch(allocator, masm);
+
+ JSValueType fieldType = reader.valueType();
+
+ Address fieldOffset(stubAddress(reader.stubOffset()));
+ masm.load32(fieldOffset, scratch);
+ masm.loadUnboxedProperty(BaseIndex(obj, scratch, TimesOne), fieldType, R0);
+
+ if (fieldType == JSVAL_TYPE_OBJECT)
+ emitEnterTypeMonitorIC();
+ else
+ emitReturnFromIC();
+
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::emitGuardNoDetachedTypedObjects()
+{
+ FailurePath* failure;
+ if (!addFailurePath(&failure))
+ return false;
+
+ CheckForTypedObjectWithDetachedStorage(cx_, masm, failure->label());
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::emitLoadTypedObjectResult()
+{
+ Register obj = allocator.useRegister(masm, reader.objOperandId());
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+
+ TypedThingLayout layout = reader.typedThingLayout();
+ uint32_t typeDescr = reader.typeDescrKey();
+ Address fieldOffset(stubAddress(reader.stubOffset()));
+
+ // Get the object's data pointer.
+ LoadTypedThingData(masm, layout, obj, scratch1);
+
+ // Get the address being written to.
+ masm.load32(fieldOffset, scratch2);
+ masm.addPtr(scratch2, scratch1);
+
+ // Only monitor the result if the type produced by this stub might vary.
+ bool monitorLoad;
+ if (SimpleTypeDescrKeyIsScalar(typeDescr)) {
+ Scalar::Type type = ScalarTypeFromSimpleTypeDescrKey(typeDescr);
+ monitorLoad = type == Scalar::Uint32;
+
+ masm.loadFromTypedArray(type, Address(scratch1, 0), R0, /* allowDouble = */ true,
+ scratch2, nullptr);
+ } else {
+ ReferenceTypeDescr::Type type = ReferenceTypeFromSimpleTypeDescrKey(typeDescr);
+ monitorLoad = type != ReferenceTypeDescr::TYPE_STRING;
+
+ switch (type) {
+ case ReferenceTypeDescr::TYPE_ANY:
+ masm.loadValue(Address(scratch1, 0), R0);
+ break;
+
+ case ReferenceTypeDescr::TYPE_OBJECT: {
+ Label notNull, done;
+ masm.loadPtr(Address(scratch1, 0), scratch1);
+ masm.branchTestPtr(Assembler::NonZero, scratch1, scratch1, &notNull);
+ masm.moveValue(NullValue(), R0);
+ masm.jump(&done);
+ masm.bind(&notNull);
+ masm.tagValue(JSVAL_TYPE_OBJECT, scratch1, R0);
+ masm.bind(&done);
+ break;
+ }
+
+ case ReferenceTypeDescr::TYPE_STRING:
+ masm.loadPtr(Address(scratch1, 0), scratch1);
+ masm.tagValue(JSVAL_TYPE_STRING, scratch1, R0);
+ break;
+
+ default:
+ MOZ_CRASH("Invalid ReferenceTypeDescr");
+ }
+ }
+
+ if (monitorLoad)
+ emitEnterTypeMonitorIC();
+ else
+ emitReturnFromIC();
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::emitLoadUndefinedResult()
+{
+ masm.moveValue(UndefinedValue(), R0);
+
+ // Normally for this op, the result would have to be monitored by TI.
+ // However, since this stub ALWAYS returns UndefinedValue(), and we can be sure
+ // that undefined is already registered with the type-set, this can be avoided.
+ emitReturnFromIC();
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::emitLoadInt32ArrayLengthResult()
+{
+ Register obj = allocator.useRegister(masm, reader.objOperandId());
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure))
+ return false;
+
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
+ masm.load32(Address(scratch, ObjectElements::offsetOfLength()), scratch);
+
+ // Guard length fits in an int32.
+ masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, R0);
+
+ // The int32 type was monitored when attaching the stub, so we can
+ // just return.
+ emitReturnFromIC();
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::emitLoadUnboxedArrayLengthResult()
+{
+ Register obj = allocator.useRegister(masm, reader.objOperandId());
+ masm.load32(Address(obj, UnboxedArrayObject::offsetOfLength()), R0.scratchReg());
+ masm.tagValue(JSVAL_TYPE_INT32, R0.scratchReg(), R0);
+
+ // The int32 type was monitored when attaching the stub, so we can
+ // just return.
+ emitReturnFromIC();
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::emitLoadArgumentsObjectLengthResult()
+{
+ Register obj = allocator.useRegister(masm, reader.objOperandId());
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure))
+ return false;
+
+ // Get initial length value.
+ masm.unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()), scratch);
+
+ // Test if length has been overridden.
+ masm.branchTest32(Assembler::NonZero,
+ scratch,
+ Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT),
+ failure->label());
+
+ // Shift out arguments length and return it. No need to type monitor
+ // because this stub always returns int32.
+ masm.rshiftPtr(Imm32(ArgumentsObject::PACKED_BITS_COUNT), scratch);
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, R0);
+ emitReturnFromIC();
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::emitLoadObject()
+{
+ Register reg = allocator.defineRegister(masm, reader.objOperandId());
+ masm.loadPtr(stubAddress(reader.stubOffset()), reg);
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::emitLoadProto()
+{
+ Register obj = allocator.useRegister(masm, reader.objOperandId());
+ Register reg = allocator.defineRegister(masm, reader.objOperandId());
+ masm.loadObjProto(obj, reg);
+ return true;
+}
+
+bool
+BaselineCacheIRCompiler::init(CacheKind kind)
+{
+ size_t numInputs = writer_.numInputOperands();
+ if (!allocator.init(ICStubCompiler::availableGeneralRegs(numInputs)))
+ return false;
+
+ MOZ_ASSERT(numInputs == 1);
+ allocator.initInputLocation(0, R0);
+
+ return true;
+}
+
+template <typename T>
+static GCPtr<T>*
+AsGCPtr(uintptr_t* ptr)
+{
+ return reinterpret_cast<GCPtr<T>*>(ptr);
+}
+
+template<class T>
+GCPtr<T>&
+CacheIRStubInfo::getStubField(ICStub* stub, uint32_t field) const
+{
+ uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
+ MOZ_ASSERT(uintptr_t(stubData) % sizeof(uintptr_t) == 0);
+
+ return *AsGCPtr<T>((uintptr_t*)stubData + field);
+}
+
+template GCPtr<Shape*>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
+template GCPtr<ObjectGroup*>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
+template GCPtr<JSObject*>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
+
+template <typename T>
+static void
+InitGCPtr(uintptr_t* ptr, uintptr_t val)
+{
+ AsGCPtr<T*>(ptr)->init((T*)val);
+}
+
+void
+CacheIRWriter::copyStubData(uint8_t* dest) const
+{
+ uintptr_t* destWords = reinterpret_cast<uintptr_t*>(dest);
+
+ for (size_t i = 0; i < stubFields_.length(); i++) {
+ switch (stubFields_[i].gcType) {
+ case StubField::GCType::NoGCThing:
+ destWords[i] = stubFields_[i].word;
+ continue;
+ case StubField::GCType::Shape:
+ InitGCPtr<Shape>(destWords + i, stubFields_[i].word);
+ continue;
+ case StubField::GCType::JSObject:
+ InitGCPtr<JSObject>(destWords + i, stubFields_[i].word);
+ continue;
+ case StubField::GCType::ObjectGroup:
+ InitGCPtr<ObjectGroup>(destWords + i, stubFields_[i].word);
+ continue;
+ case StubField::GCType::Limit:
+ break;
+ }
+ MOZ_CRASH();
+ }
+}
+
+HashNumber
+CacheIRStubKey::hash(const CacheIRStubKey::Lookup& l)
+{
+ HashNumber hash = mozilla::HashBytes(l.code, l.length);
+ return mozilla::AddToHash(hash, uint32_t(l.kind));
+}
+
+bool
+CacheIRStubKey::match(const CacheIRStubKey& entry, const CacheIRStubKey::Lookup& l)
+{
+ if (entry.stubInfo->kind() != l.kind)
+ return false;
+
+ if (entry.stubInfo->codeLength() != l.length)
+ return false;
+
+ if (!mozilla::PodEqual(entry.stubInfo->code(), l.code, l.length))
+ return false;
+
+ return true;
+}
+
+CacheIRReader::CacheIRReader(const CacheIRStubInfo* stubInfo)
+ : CacheIRReader(stubInfo->code(), stubInfo->code() + stubInfo->codeLength())
+{}
+
+CacheIRStubInfo*
+CacheIRStubInfo::New(CacheKind kind, uint32_t stubDataOffset, const CacheIRWriter& writer)
+{
+ size_t numStubFields = writer.numStubFields();
+ size_t bytesNeeded = sizeof(CacheIRStubInfo) +
+ writer.codeLength() +
+ (numStubFields + 1); // +1 for the GCType::Limit terminator.
+ uint8_t* p = js_pod_malloc<uint8_t>(bytesNeeded);
+ if (!p)
+ return nullptr;
+
+ // Copy the CacheIR code.
+ uint8_t* codeStart = p + sizeof(CacheIRStubInfo);
+ mozilla::PodCopy(codeStart, writer.codeStart(), writer.codeLength());
+
+ static_assert(uint32_t(StubField::GCType::Limit) <= UINT8_MAX,
+ "All StubField::GCTypes must fit in uint8_t");
+
+ // Copy the GC types of the stub fields.
+ uint8_t* gcTypes = codeStart + writer.codeLength();
+ for (size_t i = 0; i < numStubFields; i++)
+ gcTypes[i] = uint8_t(writer.stubFieldGCType(i));
+ gcTypes[numStubFields] = uint8_t(StubField::GCType::Limit);
+
+ return new(p) CacheIRStubInfo(kind, stubDataOffset, codeStart, writer.codeLength(), gcTypes);
+}
+
+static const size_t MaxOptimizedCacheIRStubs = 16;
+
+ICStub*
+jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer, CacheKind kind,
+ ICFallbackStub* stub)
+{
+ // We shouldn't GC or report OOM (or any other exception) here.
+ AutoAssertNoPendingException aanpe(cx);
+ JS::AutoCheckCannotGC nogc;
+
+ if (writer.failed())
+ return nullptr;
+
+ // Just a sanity check: the caller should ensure we don't attach an
+ // unlimited number of stubs.
+ MOZ_ASSERT(stub->numOptimizedStubs() < MaxOptimizedCacheIRStubs);
+
+ MOZ_ASSERT(kind == CacheKind::GetProp);
+ uint32_t stubDataOffset = sizeof(ICCacheIR_Monitored);
+
+ JitCompartment* jitCompartment = cx->compartment()->jitCompartment();
+
+ // Check if we already have JitCode for this stub.
+ CacheIRStubInfo* stubInfo;
+ CacheIRStubKey::Lookup lookup(kind, writer.codeStart(), writer.codeLength());
+ JitCode* code = jitCompartment->getCacheIRStubCode(lookup, &stubInfo);
+ if (!code) {
+ // We have to generate stub code.
+ JitContext jctx(cx, nullptr);
+ BaselineCacheIRCompiler comp(cx, writer, stubDataOffset);
+ if (!comp.init(kind))
+ return nullptr;
+
+ code = comp.compile();
+ if (!code)
+ return nullptr;
+
+ // Allocate the shared CacheIRStubInfo. Note that the putCacheIRStubCode
+ // call below will transfer ownership to the stub code HashMap, so we
+ // don't have to worry about freeing it below.
+ MOZ_ASSERT(!stubInfo);
+ stubInfo = CacheIRStubInfo::New(kind, stubDataOffset, writer);
+ if (!stubInfo)
+ return nullptr;
+
+ CacheIRStubKey key(stubInfo);
+ if (!jitCompartment->putCacheIRStubCode(lookup, key, code))
+ return nullptr;
+ }
+
+ // We got our shared stub code and stub info. Time to allocate and attach a
+ // new stub.
+
+ MOZ_ASSERT(code);
+ MOZ_ASSERT(stubInfo);
+ MOZ_ASSERT(stub->isMonitoredFallback());
+
+ size_t bytesNeeded = stubInfo->stubDataOffset() + writer.stubDataSize();
+
+ // For now, no stubs can make calls so they are all allocated in the
+ // optimized stub space.
+ void* newStub = cx->zone()->jitZone()->optimizedStubSpace()->alloc(bytesNeeded);
+ if (!newStub)
+ return nullptr;
+
+ ICStub* monitorStub = stub->toMonitoredFallbackStub()->fallbackMonitorStub()->firstMonitorStub();
+ new(newStub) ICCacheIR_Monitored(code, monitorStub, stubInfo);
+
+ writer.copyStubData((uint8_t*)newStub + stubInfo->stubDataOffset());
+ stub->addNewStub((ICStub*)newStub);
+ return (ICStub*)newStub;
+}
+
+void
+jit::TraceBaselineCacheIRStub(JSTracer* trc, ICStub* stub, const CacheIRStubInfo* stubInfo)
+{
+ uint32_t field = 0;
+ while (true) {
+ switch (stubInfo->gcType(field)) {
+ case StubField::GCType::NoGCThing:
+ break;
+ case StubField::GCType::Shape:
+ TraceNullableEdge(trc, &stubInfo->getStubField<Shape*>(stub, field),
+ "baseline-cacheir-shape");
+ break;
+ case StubField::GCType::ObjectGroup:
+ TraceNullableEdge(trc, &stubInfo->getStubField<ObjectGroup*>(stub, field),
+ "baseline-cacheir-group");
+ break;
+ case StubField::GCType::JSObject:
+ TraceNullableEdge(trc, &stubInfo->getStubField<JSObject*>(stub, field),
+ "baseline-cacheir-object");
+ break;
+ case StubField::GCType::Limit:
+ return; // Done.
+ default:
+ MOZ_CRASH();
+ }
+ field++;
+ }
+}
diff --git a/js/src/jit/BaselineCacheIR.h b/js/src/jit/BaselineCacheIR.h
new file mode 100644
index 000000000..187d18e3a
--- /dev/null
+++ b/js/src/jit/BaselineCacheIR.h
@@ -0,0 +1,67 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineCacheIR_h
+#define jit_BaselineCacheIR_h
+
+#include "gc/Barrier.h"
+#include "jit/CacheIR.h"
+
+namespace js {
+namespace jit {
+
+class ICFallbackStub;
+class ICStub;
+
+// See the 'Sharing Baseline stub code' comment in CacheIR.h for a description
+// of this class.
+class CacheIRStubInfo
+{
+ CacheKind kind_;
+ uint8_t stubDataOffset_;
+ const uint8_t* code_;
+ uint32_t length_;
+ const uint8_t* gcTypes_;
+
+ CacheIRStubInfo(CacheKind kind, uint32_t stubDataOffset, const uint8_t* code, uint32_t codeLength,
+ const uint8_t* gcTypes)
+ : kind_(kind),
+ stubDataOffset_(stubDataOffset),
+ code_(code),
+ length_(codeLength),
+ gcTypes_(gcTypes)
+ {
+ MOZ_ASSERT(stubDataOffset_ == stubDataOffset, "stubDataOffset must fit in uint8_t");
+ }
+
+ CacheIRStubInfo(const CacheIRStubInfo&) = delete;
+ CacheIRStubInfo& operator=(const CacheIRStubInfo&) = delete;
+
+ public:
+ CacheKind kind() const { return kind_; }
+
+ const uint8_t* code() const { return code_; }
+ uint32_t codeLength() const { return length_; }
+ uint32_t stubDataOffset() const { return stubDataOffset_; }
+
+ StubField::GCType gcType(uint32_t i) const { return (StubField::GCType)gcTypes_[i]; }
+
+ static CacheIRStubInfo* New(CacheKind kind, uint32_t stubDataOffset,
+ const CacheIRWriter& writer);
+
+ template <class T>
+ js::GCPtr<T>& getStubField(ICStub* stub, uint32_t field) const;
+};
+
+void TraceBaselineCacheIRStub(JSTracer* trc, ICStub* stub, const CacheIRStubInfo* stubInfo);
+
+ICStub* AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer, CacheKind kind,
+ ICFallbackStub* stub);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BaselineCacheIR_h */
diff --git a/js/src/jit/BaselineCompiler.cpp b/js/src/jit/BaselineCompiler.cpp
new file mode 100644
index 000000000..c58367aa3
--- /dev/null
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -0,0 +1,4527 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineCompiler.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/SizePrintfMacros.h"
+
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/FixedList.h"
+#include "jit/IonAnalysis.h"
+#include "jit/JitcodeMap.h"
+#include "jit/JitSpewer.h"
+#include "jit/Linker.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/SharedICHelpers.h"
+#include "jit/VMFunctions.h"
+#include "js/UniquePtr.h"
+#include "vm/AsyncFunction.h"
+#include "vm/EnvironmentObject.h"
+#include "vm/Interpreter.h"
+#include "vm/TraceLogging.h"
+
+#include "jsscriptinlines.h"
+
+#include "jit/BaselineFrameInfo-inl.h"
+#include "jit/MacroAssembler-inl.h"
+#include "vm/Interpreter-inl.h"
+#include "vm/NativeObject-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::AssertedCast;
+
+BaselineCompiler::BaselineCompiler(JSContext* cx, TempAllocator& alloc, JSScript* script)
+ : BaselineCompilerSpecific(cx, alloc, script),
+ yieldOffsets_(cx),
+ modifiesArguments_(false)
+{
+}
+
+bool
+BaselineCompiler::init()
+{
+ if (!analysis_.init(alloc_, cx->caches.gsnCache))
+ return false;
+
+ if (!labels_.init(alloc_, script->length()))
+ return false;
+
+ for (size_t i = 0; i < script->length(); i++)
+ new (&labels_[i]) Label();
+
+ if (!frame.init(alloc_))
+ return false;
+
+ return true;
+}
+
+bool
+BaselineCompiler::addPCMappingEntry(bool addIndexEntry)
+{
+ // Don't add multiple entries for a single pc.
+ size_t nentries = pcMappingEntries_.length();
+ if (nentries > 0 && pcMappingEntries_[nentries - 1].pcOffset == script->pcToOffset(pc))
+ return true;
+
+ PCMappingEntry entry;
+ entry.pcOffset = script->pcToOffset(pc);
+ entry.nativeOffset = masm.currentOffset();
+ entry.slotInfo = getStackTopSlotInfo();
+ entry.addIndexEntry = addIndexEntry;
+
+ return pcMappingEntries_.append(entry);
+}
+
+MethodStatus
+BaselineCompiler::compile()
+{
+ JitSpew(JitSpew_BaselineScripts, "Baseline compiling script %s:%" PRIuSIZE " (%p)",
+ script->filename(), script->lineno(), script);
+
+ JitSpew(JitSpew_Codegen, "# Emitting baseline code for script %s:%" PRIuSIZE,
+ script->filename(), script->lineno());
+
+ TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
+ TraceLoggerEvent scriptEvent(logger, TraceLogger_AnnotateScripts, script);
+ AutoTraceLog logScript(logger, scriptEvent);
+ AutoTraceLog logCompile(logger, TraceLogger_BaselineCompilation);
+
+ if (!script->ensureHasTypes(cx) || !script->ensureHasAnalyzedArgsUsage(cx))
+ return Method_Error;
+
+ // When code coverage is only enabled for optimizations, or when a Debugger
+ // set the collectCoverageInfo flag, we have to create the ScriptCounts if
+ // they do not exist.
+ if (!script->hasScriptCounts() && cx->compartment()->collectCoverage()) {
+ if (!script->initScriptCounts(cx))
+ return Method_Error;
+ }
+
+ // Pin analysis info during compilation.
+ AutoEnterAnalysis autoEnterAnalysis(cx);
+
+ MOZ_ASSERT(!script->hasBaselineScript());
+
+ if (!emitPrologue())
+ return Method_Error;
+
+ MethodStatus status = emitBody();
+ if (status != Method_Compiled)
+ return status;
+
+ if (!emitEpilogue())
+ return Method_Error;
+
+ if (!emitOutOfLinePostBarrierSlot())
+ return Method_Error;
+
+ Linker linker(masm);
+ if (masm.oom()) {
+ ReportOutOfMemory(cx);
+ return Method_Error;
+ }
+
+ AutoFlushICache afc("Baseline");
+ JitCode* code = linker.newCode<CanGC>(cx, BASELINE_CODE);
+ if (!code)
+ return Method_Error;
+
+ Rooted<EnvironmentObject*> templateEnv(cx);
+ if (script->functionNonDelazifying()) {
+ RootedFunction fun(cx, script->functionNonDelazifying());
+
+ if (fun->needsNamedLambdaEnvironment()) {
+ templateEnv = NamedLambdaObject::createTemplateObject(cx, fun, gc::TenuredHeap);
+ if (!templateEnv)
+ return Method_Error;
+ }
+
+ if (fun->needsCallObject()) {
+ RootedScript scriptRoot(cx, script);
+ templateEnv = CallObject::createTemplateObject(cx, scriptRoot, templateEnv,
+ gc::TenuredHeap);
+ if (!templateEnv)
+ return Method_Error;
+ }
+ }
+
+ // Encode the pc mapping table. See PCMappingIndexEntry for
+ // more information.
+ Vector<PCMappingIndexEntry> pcMappingIndexEntries(cx);
+ CompactBufferWriter pcEntries;
+ uint32_t previousOffset = 0;
+
+ for (size_t i = 0; i < pcMappingEntries_.length(); i++) {
+ PCMappingEntry& entry = pcMappingEntries_[i];
+
+ if (entry.addIndexEntry) {
+ PCMappingIndexEntry indexEntry;
+ indexEntry.pcOffset = entry.pcOffset;
+ indexEntry.nativeOffset = entry.nativeOffset;
+ indexEntry.bufferOffset = pcEntries.length();
+ if (!pcMappingIndexEntries.append(indexEntry)) {
+ ReportOutOfMemory(cx);
+ return Method_Error;
+ }
+ previousOffset = entry.nativeOffset;
+ }
+
+ // Use the high bit of the SlotInfo byte to indicate the
+ // native code offset (relative to the previous op) > 0 and
+ // comes next in the buffer.
+ MOZ_ASSERT((entry.slotInfo.toByte() & 0x80) == 0);
+
+ if (entry.nativeOffset == previousOffset) {
+ pcEntries.writeByte(entry.slotInfo.toByte());
+ } else {
+ MOZ_ASSERT(entry.nativeOffset > previousOffset);
+ pcEntries.writeByte(0x80 | entry.slotInfo.toByte());
+ pcEntries.writeUnsigned(entry.nativeOffset - previousOffset);
+ }
+
+ previousOffset = entry.nativeOffset;
+ }
+
+ if (pcEntries.oom()) {
+ ReportOutOfMemory(cx);
+ return Method_Error;
+ }
+
+ // Note: There is an extra entry in the bytecode type map for the search hint, see below.
+ size_t bytecodeTypeMapEntries = script->nTypeSets() + 1;
+ UniquePtr<BaselineScript> baselineScript(
+ BaselineScript::New(script, prologueOffset_.offset(),
+ epilogueOffset_.offset(),
+ profilerEnterFrameToggleOffset_.offset(),
+ profilerExitFrameToggleOffset_.offset(),
+ postDebugPrologueOffset_.offset(),
+ icEntries_.length(),
+ pcMappingIndexEntries.length(),
+ pcEntries.length(),
+ bytecodeTypeMapEntries,
+ yieldOffsets_.length(),
+ traceLoggerToggleOffsets_.length()),
+ JS::DeletePolicy<BaselineScript>(cx->runtime()));
+ if (!baselineScript) {
+ ReportOutOfMemory(cx);
+ return Method_Error;
+ }
+
+ baselineScript->setMethod(code);
+ baselineScript->setTemplateEnvironment(templateEnv);
+
+ JitSpew(JitSpew_BaselineScripts, "Created BaselineScript %p (raw %p) for %s:%" PRIuSIZE,
+ (void*) baselineScript.get(), (void*) code->raw(),
+ script->filename(), script->lineno());
+
+#ifdef JS_ION_PERF
+ writePerfSpewerBaselineProfile(script, code);
+#endif
+
+ MOZ_ASSERT(pcMappingIndexEntries.length() > 0);
+ baselineScript->copyPCMappingIndexEntries(&pcMappingIndexEntries[0]);
+
+ MOZ_ASSERT(pcEntries.length() > 0);
+ baselineScript->copyPCMappingEntries(pcEntries);
+
+ // Copy IC entries
+ if (icEntries_.length())
+ baselineScript->copyICEntries(script, &icEntries_[0], masm);
+
+ // Adopt fallback stubs from the compiler into the baseline script.
+ baselineScript->adoptFallbackStubs(&stubSpace_);
+
+ // All barriers are emitted off-by-default, toggle them on if needed.
+ if (cx->zone()->needsIncrementalBarrier())
+ baselineScript->toggleBarriers(true, DontReprotect);
+
+ // If profiler instrumentation is enabled, toggle instrumentation on.
+ if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
+ baselineScript->toggleProfilerInstrumentation(true);
+
+ // Patch IC loads using IC entries.
+ for (size_t i = 0; i < icLoadLabels_.length(); i++) {
+ CodeOffset label = icLoadLabels_[i].label;
+ size_t icEntry = icLoadLabels_[i].icEntry;
+ BaselineICEntry* entryAddr = &(baselineScript->icEntry(icEntry));
+ Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label),
+ ImmPtr(entryAddr),
+ ImmPtr((void*)-1));
+ }
+
+ if (modifiesArguments_)
+ baselineScript->setModifiesArguments();
+
+#ifdef JS_TRACE_LOGGING
+ // Initialize the tracelogger instrumentation.
+ baselineScript->initTraceLogger(cx->runtime(), script, traceLoggerToggleOffsets_);
+#endif
+
+ uint32_t* bytecodeMap = baselineScript->bytecodeTypeMap();
+ FillBytecodeTypeMap(script, bytecodeMap);
+
+ // The last entry in the last index found, and is used to avoid binary
+ // searches for the sought entry when queries are in linear order.
+ bytecodeMap[script->nTypeSets()] = 0;
+
+ baselineScript->copyYieldEntries(script, yieldOffsets_);
+
+ if (compileDebugInstrumentation_)
+ baselineScript->setHasDebugInstrumentation();
+
+ // Always register a native => bytecode mapping entry, since profiler can be
+ // turned on with baseline jitcode on stack, and baseline jitcode cannot be invalidated.
+ {
+ JitSpew(JitSpew_Profiling, "Added JitcodeGlobalEntry for baseline script %s:%" PRIuSIZE " (%p)",
+ script->filename(), script->lineno(), baselineScript.get());
+
+ // Generate profiling string.
+ char* str = JitcodeGlobalEntry::createScriptString(cx, script);
+ if (!str)
+ return Method_Error;
+
+ JitcodeGlobalEntry::BaselineEntry entry;
+ entry.init(code, code->raw(), code->rawEnd(), script, str);
+
+ JitcodeGlobalTable* globalTable = cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
+ if (!globalTable->addEntry(entry, cx->runtime())) {
+ entry.destroy();
+ ReportOutOfMemory(cx);
+ return Method_Error;
+ }
+
+ // Mark the jitcode as having a bytecode map.
+ code->setHasBytecodeMap();
+ }
+
+ script->setBaselineScript(cx->runtime(), baselineScript.release());
+
+ return Method_Compiled;
+}
+
+void
+BaselineCompiler::emitInitializeLocals()
+{
+ // Initialize all locals to |undefined|. Lexical bindings are temporal
+ // dead zoned in bytecode.
+
+ size_t n = frame.nlocals();
+ if (n == 0)
+ return;
+
+ // Use R0 to minimize code size. If the number of locals to push is <
+ // LOOP_UNROLL_FACTOR, then the initialization pushes are emitted directly
+ // and inline. Otherwise, they're emitted in a partially unrolled loop.
+ static const size_t LOOP_UNROLL_FACTOR = 4;
+ size_t toPushExtra = n % LOOP_UNROLL_FACTOR;
+
+ masm.moveValue(UndefinedValue(), R0);
+
+ // Handle any extra pushes left over by the optional unrolled loop below.
+ for (size_t i = 0; i < toPushExtra; i++)
+ masm.pushValue(R0);
+
+ // Partially unrolled loop of pushes.
+ if (n >= LOOP_UNROLL_FACTOR) {
+ size_t toPush = n - toPushExtra;
+ MOZ_ASSERT(toPush % LOOP_UNROLL_FACTOR == 0);
+ MOZ_ASSERT(toPush >= LOOP_UNROLL_FACTOR);
+ masm.move32(Imm32(toPush), R1.scratchReg());
+ // Emit unrolled loop with 4 pushes per iteration.
+ Label pushLoop;
+ masm.bind(&pushLoop);
+ for (size_t i = 0; i < LOOP_UNROLL_FACTOR; i++)
+ masm.pushValue(R0);
+ masm.branchSub32(Assembler::NonZero,
+ Imm32(LOOP_UNROLL_FACTOR), R1.scratchReg(), &pushLoop);
+ }
+}
+
+bool
+BaselineCompiler::emitPrologue()
+{
+#ifdef JS_USE_LINK_REGISTER
+ // Push link register from generateEnterJIT()'s BLR.
+ masm.pushReturnAddress();
+ masm.checkStackAlignment();
+#endif
+ emitProfilerEnterFrame();
+
+ masm.push(BaselineFrameReg);
+ masm.moveStackPtrTo(BaselineFrameReg);
+ masm.subFromStackPtr(Imm32(BaselineFrame::Size()));
+
+ // Initialize BaselineFrame. For eval scripts, the scope chain
+ // is passed in R1, so we have to be careful not to clobber it.
+
+ // Initialize BaselineFrame::flags.
+ masm.store32(Imm32(0), frame.addressOfFlags());
+
+ // Handle env chain pre-initialization (in case GC gets run
+ // during stack check). For global and eval scripts, the env
+ // chain is in R1. For function scripts, the env chain is in
+ // the callee, nullptr is stored for now so that GC doesn't choke
+ // on a bogus EnvironmentChain value in the frame.
+ if (function())
+ masm.storePtr(ImmPtr(nullptr), frame.addressOfEnvironmentChain());
+ else
+ masm.storePtr(R1.scratchReg(), frame.addressOfEnvironmentChain());
+
+ // Functions with a large number of locals require two stack checks.
+ // The VMCall for a fallible stack check can only occur after the
+ // env chain has been initialized, as that is required for proper
+ // exception handling if the VMCall returns false. The env chain
+ // initialization can only happen after the UndefinedValues for the
+ // local slots have been pushed.
+ // However by that time, the stack might have grown too much.
+ // In these cases, we emit an extra, early, infallible check
+ // before pushing the locals. The early check sets a flag on the
+ // frame if the stack check fails (but otherwise doesn't throw an
+ // exception). If the flag is set, then the jitcode skips past
+ // the pushing of the locals, and directly to env chain initialization
+ // followed by the actual stack check, which will throw the correct
+ // exception.
+ Label earlyStackCheckFailed;
+ if (needsEarlyStackCheck()) {
+ if (!emitStackCheck(/* earlyCheck = */ true))
+ return false;
+ masm.branchTest32(Assembler::NonZero,
+ frame.addressOfFlags(),
+ Imm32(BaselineFrame::OVER_RECURSED),
+ &earlyStackCheckFailed);
+ }
+
+ emitInitializeLocals();
+
+ if (needsEarlyStackCheck())
+ masm.bind(&earlyStackCheckFailed);
+
+#ifdef JS_TRACE_LOGGING
+ if (!emitTraceLoggerEnter())
+ return false;
+#endif
+
+ // Record the offset of the prologue, because Ion can bailout before
+ // the env chain is initialized.
+ prologueOffset_ = CodeOffset(masm.currentOffset());
+
+ // When compiling with Debugger instrumentation, set the debuggeeness of
+ // the frame before any operation that can call into the VM.
+ emitIsDebuggeeCheck();
+
+ // Initialize the env chain before any operation that may
+ // call into the VM and trigger a GC.
+ if (!initEnvironmentChain())
+ return false;
+
+ if (!emitStackCheck())
+ return false;
+
+ if (!emitDebugPrologue())
+ return false;
+
+ if (!emitWarmUpCounterIncrement())
+ return false;
+
+ if (!emitArgumentTypeChecks())
+ return false;
+
+ return true;
+}
+
+bool
+BaselineCompiler::emitEpilogue()
+{
+ // Record the offset of the epilogue, so we can do early return from
+ // Debugger handlers during on-stack recompile.
+ epilogueOffset_ = CodeOffset(masm.currentOffset());
+
+ masm.bind(&return_);
+
+#ifdef JS_TRACE_LOGGING
+ if (!emitTraceLoggerExit())
+ return false;
+#endif
+
+ masm.moveToStackPtr(BaselineFrameReg);
+ masm.pop(BaselineFrameReg);
+
+ emitProfilerExitFrame();
+
+ masm.ret();
+ return true;
+}
+
+// On input:
+// R2.scratchReg() contains object being written to.
+// Called with the baseline stack synced, except for R0 which is preserved.
+// All other registers are usable as scratch.
+// This calls:
+// void PostWriteBarrier(JSRuntime* rt, JSObject* obj);
+bool
+BaselineCompiler::emitOutOfLinePostBarrierSlot()
+{
+ masm.bind(&postBarrierSlot_);
+
+ Register objReg = R2.scratchReg();
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(R0);
+ regs.take(objReg);
+ regs.take(BaselineFrameReg);
+ Register scratch = regs.takeAny();
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+ // On ARM, save the link register before calling. It contains the return
+ // address. The |masm.ret()| later will pop this into |pc| to return.
+ masm.push(lr);
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ masm.push(ra);
+#endif
+ masm.pushValue(R0);
+
+ masm.setupUnalignedABICall(scratch);
+ masm.movePtr(ImmPtr(cx->runtime()), scratch);
+ masm.passABIArg(scratch);
+ masm.passABIArg(objReg);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, PostWriteBarrier));
+
+ masm.popValue(R0);
+ masm.ret();
+ return true;
+}
+
+bool
+BaselineCompiler::emitIC(ICStub* stub, ICEntry::Kind kind)
+{
+ BaselineICEntry* entry = allocateICEntry(stub, kind);
+ if (!entry)
+ return false;
+
+ CodeOffset patchOffset;
+ EmitCallIC(&patchOffset, masm);
+ entry->setReturnOffset(CodeOffset(masm.currentOffset()));
+ if (!addICLoadLabel(patchOffset))
+ return false;
+
+ return true;
+}
+
+typedef bool (*CheckOverRecursedWithExtraFn)(JSContext*, BaselineFrame*, uint32_t, uint32_t);
+static const VMFunction CheckOverRecursedWithExtraInfo =
+ FunctionInfo<CheckOverRecursedWithExtraFn>(CheckOverRecursedWithExtra,
+ "CheckOverRecursedWithExtra");
+
+bool
+BaselineCompiler::emitStackCheck(bool earlyCheck)
+{
+ Label skipCall;
+ void* limitAddr = cx->runtime()->addressOfJitStackLimit();
+ uint32_t slotsSize = script->nslots() * sizeof(Value);
+ uint32_t tolerance = earlyCheck ? slotsSize : 0;
+
+ masm.moveStackPtrTo(R1.scratchReg());
+
+ // If this is the early stack check, locals haven't been pushed yet. Adjust the
+ // stack pointer to account for the locals that would be pushed before performing
+ // the guard around the vmcall to the stack check.
+ if (earlyCheck)
+ masm.subPtr(Imm32(tolerance), R1.scratchReg());
+
+ // If this is the late stack check for a frame which contains an early stack check,
+ // then the early stack check might have failed and skipped past the pushing of locals
+ // on the stack.
+ //
+ // If this is a possibility, then the OVER_RECURSED flag should be checked, and the
+ // VMCall to CheckOverRecursed done unconditionally if it's set.
+ Label forceCall;
+ if (!earlyCheck && needsEarlyStackCheck()) {
+ masm.branchTest32(Assembler::NonZero,
+ frame.addressOfFlags(),
+ Imm32(BaselineFrame::OVER_RECURSED),
+ &forceCall);
+ }
+
+ masm.branchPtr(Assembler::BelowOrEqual, AbsoluteAddress(limitAddr), R1.scratchReg(),
+ &skipCall);
+
+ if (!earlyCheck && needsEarlyStackCheck())
+ masm.bind(&forceCall);
+
+ prepareVMCall();
+ pushArg(Imm32(earlyCheck));
+ pushArg(Imm32(tolerance));
+ masm.loadBaselineFramePtr(BaselineFrameReg, R1.scratchReg());
+ pushArg(R1.scratchReg());
+
+ CallVMPhase phase = POST_INITIALIZE;
+ if (earlyCheck)
+ phase = PRE_INITIALIZE;
+ else if (needsEarlyStackCheck())
+ phase = CHECK_OVER_RECURSED;
+
+ if (!callVMNonOp(CheckOverRecursedWithExtraInfo, phase))
+ return false;
+
+ icEntries_.back().setFakeKind(earlyCheck
+ ? ICEntry::Kind_EarlyStackCheck
+ : ICEntry::Kind_StackCheck);
+
+ masm.bind(&skipCall);
+ return true;
+}
+
+void
+BaselineCompiler::emitIsDebuggeeCheck()
+{
+ if (compileDebugInstrumentation_) {
+ masm.Push(BaselineFrameReg);
+ masm.setupUnalignedABICall(R0.scratchReg());
+ masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+ masm.passABIArg(R0.scratchReg());
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, jit::FrameIsDebuggeeCheck));
+ masm.Pop(BaselineFrameReg);
+ }
+}
+
+typedef bool (*DebugPrologueFn)(JSContext*, BaselineFrame*, jsbytecode*, bool*);
+static const VMFunction DebugPrologueInfo =
+ FunctionInfo<DebugPrologueFn>(jit::DebugPrologue, "DebugPrologue");
+
+bool
+BaselineCompiler::emitDebugPrologue()
+{
+ if (compileDebugInstrumentation_) {
+ // Load pointer to BaselineFrame in R0.
+ masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+
+ prepareVMCall();
+ pushArg(ImmPtr(pc));
+ pushArg(R0.scratchReg());
+ if (!callVM(DebugPrologueInfo))
+ return false;
+
+ // Fix up the fake ICEntry appended by callVM for on-stack recompilation.
+ icEntries_.back().setFakeKind(ICEntry::Kind_DebugPrologue);
+
+ // If the stub returns |true|, we have to return the value stored in the
+ // frame's return value slot.
+ Label done;
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, &done);
+ {
+ masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand);
+ masm.jump(&return_);
+ }
+ masm.bind(&done);
+ }
+
+ postDebugPrologueOffset_ = CodeOffset(masm.currentOffset());
+
+ return true;
+}
+
+typedef bool (*CheckGlobalOrEvalDeclarationConflictsFn)(JSContext*, BaselineFrame*);
+static const VMFunction CheckGlobalOrEvalDeclarationConflictsInfo =
+ FunctionInfo<CheckGlobalOrEvalDeclarationConflictsFn>(jit::CheckGlobalOrEvalDeclarationConflicts,
+ "CheckGlobalOrEvalDeclarationConflicts");
+
+typedef bool (*InitFunctionEnvironmentObjectsFn)(JSContext*, BaselineFrame*);
+static const VMFunction InitFunctionEnvironmentObjectsInfo =
+ FunctionInfo<InitFunctionEnvironmentObjectsFn>(jit::InitFunctionEnvironmentObjects,
+ "InitFunctionEnvironmentObjects");
+
+bool
+BaselineCompiler::initEnvironmentChain()
+{
+ CallVMPhase phase = POST_INITIALIZE;
+ if (needsEarlyStackCheck())
+ phase = CHECK_OVER_RECURSED;
+
+ RootedFunction fun(cx, function());
+ if (fun) {
+ // Use callee->environment as scope chain. Note that we do this also
+ // for needsSomeEnvironmentObject functions, so that the scope chain
+ // slot is properly initialized if the call triggers GC.
+ Register callee = R0.scratchReg();
+ Register scope = R1.scratchReg();
+ masm.loadFunctionFromCalleeToken(frame.addressOfCalleeToken(), callee);
+ masm.loadPtr(Address(callee, JSFunction::offsetOfEnvironment()), scope);
+ masm.storePtr(scope, frame.addressOfEnvironmentChain());
+
+ if (fun->needsFunctionEnvironmentObjects()) {
+ // Call into the VM to create the proper environment objects.
+ prepareVMCall();
+
+ masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+ pushArg(R0.scratchReg());
+
+ if (!callVMNonOp(InitFunctionEnvironmentObjectsInfo, phase))
+ return false;
+ }
+ } else if (module()) {
+ // Modules use a pre-created scope object.
+ Register scope = R1.scratchReg();
+ masm.movePtr(ImmGCPtr(&module()->initialEnvironment()), scope);
+ masm.storePtr(scope, frame.addressOfEnvironmentChain());
+ } else {
+ // EnvironmentChain pointer in BaselineFrame has already been initialized
+ // in prologue, but we need to check for redeclaration errors.
+
+ prepareVMCall();
+ masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+ pushArg(R0.scratchReg());
+
+ if (!callVMNonOp(CheckGlobalOrEvalDeclarationConflictsInfo, phase))
+ return false;
+ }
+
+ return true;
+}
+
+typedef bool (*InterruptCheckFn)(JSContext*);
+static const VMFunction InterruptCheckInfo =
+ FunctionInfo<InterruptCheckFn>(InterruptCheck, "InterruptCheck");
+
+bool
+BaselineCompiler::emitInterruptCheck()
+{
+ frame.syncStack(0);
+
+ Label done;
+ void* interrupt = cx->runtimeAddressOfInterruptUint32();
+ masm.branch32(Assembler::Equal, AbsoluteAddress(interrupt), Imm32(0), &done);
+
+ prepareVMCall();
+ if (!callVM(InterruptCheckInfo))
+ return false;
+
+ masm.bind(&done);
+ return true;
+}
+
+typedef bool (*IonCompileScriptForBaselineFn)(JSContext*, BaselineFrame*, jsbytecode*);
+static const VMFunction IonCompileScriptForBaselineInfo =
+ FunctionInfo<IonCompileScriptForBaselineFn>(IonCompileScriptForBaseline,
+ "IonCompileScriptForBaseline");
+
+bool
+BaselineCompiler::emitWarmUpCounterIncrement(bool allowOsr)
+{
+ // Emit no warm-up counter increments or bailouts if Ion is not
+ // enabled, or if the script will never be Ion-compileable
+
+ if (!ionCompileable_ && !ionOSRCompileable_)
+ return true;
+
+ frame.assertSyncedStack();
+
+ Register scriptReg = R2.scratchReg();
+ Register countReg = R0.scratchReg();
+ Address warmUpCounterAddr(scriptReg, JSScript::offsetOfWarmUpCounter());
+
+ masm.movePtr(ImmGCPtr(script), scriptReg);
+ masm.load32(warmUpCounterAddr, countReg);
+ masm.add32(Imm32(1), countReg);
+ masm.store32(countReg, warmUpCounterAddr);
+
+ // If this is a loop inside a catch or finally block, increment the warmup
+ // counter but don't attempt OSR (Ion only compiles the try block).
+ if (analysis_.info(pc).loopEntryInCatchOrFinally) {
+ MOZ_ASSERT(JSOp(*pc) == JSOP_LOOPENTRY);
+ return true;
+ }
+
+ // OSR not possible at this loop entry.
+ if (!allowOsr) {
+ MOZ_ASSERT(JSOp(*pc) == JSOP_LOOPENTRY);
+ return true;
+ }
+
+ Label skipCall;
+
+ const OptimizationInfo* info = IonOptimizations.get(IonOptimizations.firstLevel());
+ uint32_t warmUpThreshold = info->compilerWarmUpThreshold(script, pc);
+ masm.branch32(Assembler::LessThan, countReg, Imm32(warmUpThreshold), &skipCall);
+
+ masm.branchPtr(Assembler::Equal,
+ Address(scriptReg, JSScript::offsetOfIonScript()),
+ ImmPtr(ION_COMPILING_SCRIPT), &skipCall);
+
+ // Try to compile and/or finish a compilation.
+ if (JSOp(*pc) == JSOP_LOOPENTRY) {
+ // During the loop entry we can try to OSR into ion.
+ // The ic has logic for this.
+ ICWarmUpCounter_Fallback::Compiler stubCompiler(cx);
+ if (!emitNonOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+ } else {
+ // To call stubs we need to have an opcode. This code handles the
+ // prologue and there is no dedicatd opcode present. Therefore use an
+ // annotated vm call.
+ prepareVMCall();
+
+ masm.Push(ImmPtr(pc));
+ masm.PushBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+
+ if (!callVM(IonCompileScriptForBaselineInfo))
+ return false;
+
+ // Annotate the ICEntry as warmup counter.
+ icEntries_.back().setFakeKind(ICEntry::Kind_WarmupCounter);
+ }
+ masm.bind(&skipCall);
+
+ return true;
+}
+
+bool
+BaselineCompiler::emitArgumentTypeChecks()
+{
+ if (!function())
+ return true;
+
+ frame.pushThis();
+ frame.popRegsAndSync(1);
+
+ ICTypeMonitor_Fallback::Compiler compiler(cx, ICStubCompiler::Engine::Baseline,
+ (uint32_t) 0);
+ if (!emitNonOpIC(compiler.getStub(&stubSpace_)))
+ return false;
+
+ for (size_t i = 0; i < function()->nargs(); i++) {
+ frame.pushArg(i);
+ frame.popRegsAndSync(1);
+
+ ICTypeMonitor_Fallback::Compiler compiler(cx, ICStubCompiler::Engine::Baseline,
+ i + 1);
+ if (!emitNonOpIC(compiler.getStub(&stubSpace_)))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+BaselineCompiler::emitDebugTrap()
+{
+ MOZ_ASSERT(compileDebugInstrumentation_);
+ MOZ_ASSERT(frame.numUnsyncedSlots() == 0);
+
+ bool enabled = script->stepModeEnabled() || script->hasBreakpointsAt(pc);
+
+ // Emit patchable call to debug trap handler.
+ JitCode* handler = cx->runtime()->jitRuntime()->debugTrapHandler(cx);
+ if (!handler)
+ return false;
+ mozilla::DebugOnly<CodeOffset> offset = masm.toggledCall(handler, enabled);
+
+#ifdef DEBUG
+ // Patchable call offset has to match the pc mapping offset.
+ PCMappingEntry& entry = pcMappingEntries_.back();
+ MOZ_ASSERT((&offset)->offset() == entry.nativeOffset);
+#endif
+
+ // Add an IC entry for the return offset -> pc mapping.
+ return appendICEntry(ICEntry::Kind_DebugTrap, masm.currentOffset());
+}
+
+#ifdef JS_TRACE_LOGGING
+bool
+BaselineCompiler::emitTraceLoggerEnter()
+{
+ TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ Register loggerReg = regs.takeAnyGeneral();
+ Register scriptReg = regs.takeAnyGeneral();
+
+ Label noTraceLogger;
+ if (!traceLoggerToggleOffsets_.append(masm.toggledJump(&noTraceLogger)))
+ return false;
+
+ masm.Push(loggerReg);
+ masm.Push(scriptReg);
+
+ masm.movePtr(ImmPtr(logger), loggerReg);
+
+ // Script start.
+ masm.movePtr(ImmGCPtr(script), scriptReg);
+ masm.loadPtr(Address(scriptReg, JSScript::offsetOfBaselineScript()), scriptReg);
+ Address scriptEvent(scriptReg, BaselineScript::offsetOfTraceLoggerScriptEvent());
+ masm.computeEffectiveAddress(scriptEvent, scriptReg);
+ masm.tracelogStartEvent(loggerReg, scriptReg);
+
+ // Engine start.
+ masm.tracelogStartId(loggerReg, TraceLogger_Baseline, /* force = */ true);
+
+ masm.Pop(scriptReg);
+ masm.Pop(loggerReg);
+
+ masm.bind(&noTraceLogger);
+
+ return true;
+}
+
+bool
+BaselineCompiler::emitTraceLoggerExit()
+{
+ TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ Register loggerReg = regs.takeAnyGeneral();
+
+ Label noTraceLogger;
+ if (!traceLoggerToggleOffsets_.append(masm.toggledJump(&noTraceLogger)))
+ return false;
+
+ masm.Push(loggerReg);
+ masm.movePtr(ImmPtr(logger), loggerReg);
+
+ masm.tracelogStopId(loggerReg, TraceLogger_Baseline, /* force = */ true);
+ masm.tracelogStopId(loggerReg, TraceLogger_Scripts, /* force = */ true);
+
+ masm.Pop(loggerReg);
+
+ masm.bind(&noTraceLogger);
+
+ return true;
+}
+
+bool
+BaselineCompiler::emitTraceLoggerResume(Register baselineScript, AllocatableGeneralRegisterSet& regs)
+{
+ Register scriptId = regs.takeAny();
+ Register loggerReg = regs.takeAny();
+
+ Label noTraceLogger;
+ if (!traceLoggerToggleOffsets_.append(masm.toggledJump(&noTraceLogger)))
+ return false;
+
+ TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
+ masm.movePtr(ImmPtr(logger), loggerReg);
+
+ Address scriptEvent(baselineScript, BaselineScript::offsetOfTraceLoggerScriptEvent());
+ masm.computeEffectiveAddress(scriptEvent, scriptId);
+ masm.tracelogStartEvent(loggerReg, scriptId);
+ masm.tracelogStartId(loggerReg, TraceLogger_Baseline, /* force = */ true);
+
+ regs.add(loggerReg);
+ regs.add(scriptId);
+
+ masm.bind(&noTraceLogger);
+
+ return true;
+}
+#endif
+
+void
+BaselineCompiler::emitProfilerEnterFrame()
+{
+ // Store stack position to lastProfilingFrame variable, guarded by a toggled jump.
+ // Starts off initially disabled.
+ Label noInstrument;
+ CodeOffset toggleOffset = masm.toggledJump(&noInstrument);
+ masm.profilerEnterFrame(masm.getStackPointer(), R0.scratchReg());
+ masm.bind(&noInstrument);
+
+ // Store the start offset in the appropriate location.
+ MOZ_ASSERT(!profilerEnterFrameToggleOffset_.bound());
+ profilerEnterFrameToggleOffset_ = toggleOffset;
+}
+
+void
+BaselineCompiler::emitProfilerExitFrame()
+{
+ // Store previous frame to lastProfilingFrame variable, guarded by a toggled jump.
+ // Starts off initially disabled.
+ Label noInstrument;
+ CodeOffset toggleOffset = masm.toggledJump(&noInstrument);
+ masm.profilerExitFrame();
+ masm.bind(&noInstrument);
+
+ // Store the start offset in the appropriate location.
+ MOZ_ASSERT(!profilerExitFrameToggleOffset_.bound());
+ profilerExitFrameToggleOffset_ = toggleOffset;
+}
+
+MethodStatus
+BaselineCompiler::emitBody()
+{
+ MOZ_ASSERT(pc == script->code());
+
+ bool lastOpUnreachable = false;
+ uint32_t emittedOps = 0;
+ mozilla::DebugOnly<jsbytecode*> prevpc = pc;
+
+ while (true) {
+ JSOp op = JSOp(*pc);
+ JitSpew(JitSpew_BaselineOp, "Compiling op @ %d: %s",
+ int(script->pcToOffset(pc)), CodeName[op]);
+
+ BytecodeInfo* info = analysis_.maybeInfo(pc);
+
+ // Skip unreachable ops.
+ if (!info) {
+ // Test if last instructions and stop emitting in that case.
+ pc += GetBytecodeLength(pc);
+ if (pc >= script->codeEnd())
+ break;
+
+ lastOpUnreachable = true;
+ prevpc = pc;
+ continue;
+ }
+
+ // Fully sync the stack if there are incoming jumps.
+ if (info->jumpTarget) {
+ frame.syncStack(0);
+ frame.setStackDepth(info->stackDepth);
+ }
+
+ // Always sync in debug mode.
+ if (compileDebugInstrumentation_)
+ frame.syncStack(0);
+
+ // At the beginning of any op, at most the top 2 stack-values are unsynced.
+ if (frame.stackDepth() > 2)
+ frame.syncStack(2);
+
+ frame.assertValidState(*info);
+
+ masm.bind(labelOf(pc));
+
+ // Add a PC -> native mapping entry for the current op. These entries are
+ // used when we need the native code address for a given pc, for instance
+ // for bailouts from Ion, the debugger and exception handling. See
+ // PCMappingIndexEntry for more information.
+ bool addIndexEntry = (pc == script->code() || lastOpUnreachable || emittedOps > 100);
+ if (addIndexEntry)
+ emittedOps = 0;
+ if (!addPCMappingEntry(addIndexEntry)) {
+ ReportOutOfMemory(cx);
+ return Method_Error;
+ }
+
+ // Emit traps for breakpoints and step mode.
+ if (compileDebugInstrumentation_ && !emitDebugTrap())
+ return Method_Error;
+
+ switch (op) {
+ default:
+ JitSpew(JitSpew_BaselineAbort, "Unhandled op: %s", CodeName[op]);
+ return Method_CantCompile;
+
+#define EMIT_OP(OP) \
+ case OP: \
+ if (!this->emit_##OP()) \
+ return Method_Error; \
+ break;
+OPCODE_LIST(EMIT_OP)
+#undef EMIT_OP
+ }
+
+ // If the main instruction is not a jump target, then we emit the
+ // corresponding code coverage counter.
+ if (pc == script->main() && !BytecodeIsJumpTarget(op)) {
+ if (!emit_JSOP_JUMPTARGET())
+ return Method_Error;
+ }
+
+ // Test if last instructions and stop emitting in that case.
+ pc += GetBytecodeLength(pc);
+ if (pc >= script->codeEnd())
+ break;
+
+ emittedOps++;
+ lastOpUnreachable = false;
+#ifdef DEBUG
+ prevpc = pc;
+#endif
+ }
+
+ MOZ_ASSERT(JSOp(*prevpc) == JSOP_RETRVAL);
+ return Method_Compiled;
+}
+
+bool
+BaselineCompiler::emit_JSOP_NOP()
+{
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_NOP_DESTRUCTURING()
+{
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_LABEL()
+{
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_POP()
+{
+ frame.pop();
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_POPN()
+{
+ frame.popn(GET_UINT16(pc));
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_DUPAT()
+{
+ frame.syncStack(0);
+
+ // DUPAT takes a value on the stack and re-pushes it on top. It's like
+ // GETLOCAL but it addresses from the top of the stack instead of from the
+ // stack frame.
+
+ int depth = -(GET_UINT24(pc) + 1);
+ masm.loadValue(frame.addressOfStackValue(frame.peek(depth)), R0);
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_DUP()
+{
+ // Keep top stack value in R0, sync the rest so that we can use R1. We use
+ // separate registers because every register can be used by at most one
+ // StackValue.
+ frame.popRegsAndSync(1);
+ masm.moveValue(R0, R1);
+
+ // inc/dec ops use DUP followed by ONE, ADD. Push R0 last to avoid a move.
+ frame.push(R1);
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_DUP2()
+{
+ frame.syncStack(0);
+
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-2)), R0);
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R1);
+
+ frame.push(R0);
+ frame.push(R1);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_SWAP()
+{
+ // Keep top stack values in R0 and R1.
+ frame.popRegsAndSync(2);
+
+ frame.push(R1);
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_PICK()
+{
+ frame.syncStack(0);
+
+ // Pick takes a value on the stack and moves it to the top.
+ // For instance, pick 2:
+ // before: A B C D E
+ // after : A B D E C
+
+ // First, move value at -(amount + 1) into R0.
+ int depth = -(GET_INT8(pc) + 1);
+ masm.loadValue(frame.addressOfStackValue(frame.peek(depth)), R0);
+
+ // Move the other values down.
+ depth++;
+ for (; depth < 0; depth++) {
+ Address source = frame.addressOfStackValue(frame.peek(depth));
+ Address dest = frame.addressOfStackValue(frame.peek(depth - 1));
+ masm.loadValue(source, R1);
+ masm.storeValue(R1, dest);
+ }
+
+ // Push R0.
+ frame.pop();
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_GOTO()
+{
+ frame.syncStack(0);
+
+ jsbytecode* target = pc + GET_JUMP_OFFSET(pc);
+ masm.jump(labelOf(target));
+ return true;
+}
+
+bool
+BaselineCompiler::emitToBoolean()
+{
+ Label skipIC;
+ masm.branchTestBoolean(Assembler::Equal, R0, &skipIC);
+
+ // Call IC
+ ICToBool_Fallback::Compiler stubCompiler(cx);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ masm.bind(&skipIC);
+ return true;
+}
+
+bool
+BaselineCompiler::emitTest(bool branchIfTrue)
+{
+ bool knownBoolean = frame.peek(-1)->isKnownBoolean();
+
+ // Keep top stack value in R0.
+ frame.popRegsAndSync(1);
+
+ if (!knownBoolean && !emitToBoolean())
+ return false;
+
+ // IC will leave a BooleanValue in R0, just need to branch on it.
+ masm.branchTestBooleanTruthy(branchIfTrue, R0, labelOf(pc + GET_JUMP_OFFSET(pc)));
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_IFEQ()
+{
+ return emitTest(false);
+}
+
+bool
+BaselineCompiler::emit_JSOP_IFNE()
+{
+ return emitTest(true);
+}
+
+bool
+BaselineCompiler::emitAndOr(bool branchIfTrue)
+{
+ bool knownBoolean = frame.peek(-1)->isKnownBoolean();
+
+ // AND and OR leave the original value on the stack.
+ frame.syncStack(0);
+
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0);
+ if (!knownBoolean && !emitToBoolean())
+ return false;
+
+ masm.branchTestBooleanTruthy(branchIfTrue, R0, labelOf(pc + GET_JUMP_OFFSET(pc)));
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_AND()
+{
+ return emitAndOr(false);
+}
+
+bool
+BaselineCompiler::emit_JSOP_OR()
+{
+ return emitAndOr(true);
+}
+
+bool
+BaselineCompiler::emit_JSOP_NOT()
+{
+ bool knownBoolean = frame.peek(-1)->isKnownBoolean();
+
+ // Keep top stack value in R0.
+ frame.popRegsAndSync(1);
+
+ if (!knownBoolean && !emitToBoolean())
+ return false;
+
+ masm.notBoolean(R0);
+
+ frame.push(R0, JSVAL_TYPE_BOOLEAN);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_POS()
+{
+ // Keep top stack value in R0.
+ frame.popRegsAndSync(1);
+
+ // Inline path for int32 and double.
+ Label done;
+ masm.branchTestNumber(Assembler::Equal, R0, &done);
+
+ // Call IC.
+ ICToNumber_Fallback::Compiler stubCompiler(cx);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ masm.bind(&done);
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_LOOPHEAD()
+{
+ if (!emit_JSOP_JUMPTARGET())
+ return false;
+ return emitInterruptCheck();
+}
+
+bool
+BaselineCompiler::emit_JSOP_LOOPENTRY()
+{
+ if (!emit_JSOP_JUMPTARGET())
+ return false;
+ frame.syncStack(0);
+ return emitWarmUpCounterIncrement(LoopEntryCanIonOsr(pc));
+}
+
+bool
+BaselineCompiler::emit_JSOP_VOID()
+{
+ frame.pop();
+ frame.push(UndefinedValue());
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_UNDEFINED()
+{
+ // If this ever changes, change what JSOP_GIMPLICITTHIS does too.
+ frame.push(UndefinedValue());
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_HOLE()
+{
+ frame.push(MagicValue(JS_ELEMENTS_HOLE));
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_NULL()
+{
+ frame.push(NullValue());
+ return true;
+}
+
+typedef bool (*ThrowCheckIsObjectFn)(JSContext*, CheckIsObjectKind);
+static const VMFunction ThrowCheckIsObjectInfo =
+ FunctionInfo<ThrowCheckIsObjectFn>(ThrowCheckIsObject, "ThrowCheckIsObject");
+
+bool
+BaselineCompiler::emit_JSOP_CHECKISOBJ()
+{
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0);
+
+ Label ok;
+ masm.branchTestObject(Assembler::Equal, R0, &ok);
+
+ prepareVMCall();
+
+ pushArg(Imm32(GET_UINT8(pc)));
+ if (!callVM(ThrowCheckIsObjectInfo))
+ return false;
+
+ masm.bind(&ok);
+ return true;
+}
+
+typedef bool (*ThrowUninitializedThisFn)(JSContext*, BaselineFrame* frame);
+static const VMFunction ThrowUninitializedThisInfo =
+ FunctionInfo<ThrowUninitializedThisFn>(BaselineThrowUninitializedThis,
+ "BaselineThrowUninitializedThis");
+
+bool
+BaselineCompiler::emit_JSOP_CHECKTHIS()
+{
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0);
+
+ return emitCheckThis(R0);
+}
+
+bool
+BaselineCompiler::emitCheckThis(ValueOperand val)
+{
+ Label thisOK;
+ masm.branchTestMagic(Assembler::NotEqual, val, &thisOK);
+
+ prepareVMCall();
+
+ masm.loadBaselineFramePtr(BaselineFrameReg, val.scratchReg());
+ pushArg(val.scratchReg());
+
+ if (!callVM(ThrowUninitializedThisInfo))
+ return false;
+
+ masm.bind(&thisOK);
+ return true;
+}
+
+typedef bool (*ThrowBadDerivedReturnFn)(JSContext*, HandleValue);
+static const VMFunction ThrowBadDerivedReturnInfo =
+ FunctionInfo<ThrowBadDerivedReturnFn>(jit::ThrowBadDerivedReturn, "ThrowBadDerivedReturn");
+
+bool
+BaselineCompiler::emit_JSOP_CHECKRETURN()
+{
+ MOZ_ASSERT(script->isDerivedClassConstructor());
+
+ // Load |this| in R0, return value in R1.
+ frame.popRegsAndSync(1);
+ emitLoadReturnValue(R1);
+
+ Label done, returnOK;
+ masm.branchTestObject(Assembler::Equal, R1, &done);
+ masm.branchTestUndefined(Assembler::Equal, R1, &returnOK);
+
+ prepareVMCall();
+ pushArg(R1);
+ if (!callVM(ThrowBadDerivedReturnInfo))
+ return false;
+ masm.assumeUnreachable("Should throw on bad derived constructor return");
+
+ masm.bind(&returnOK);
+
+ if (!emitCheckThis(R0))
+ return false;
+
+ // Store |this| in the return value slot.
+ masm.storeValue(R0, frame.addressOfReturnValue());
+ masm.or32(Imm32(BaselineFrame::HAS_RVAL), frame.addressOfFlags());
+
+ masm.bind(&done);
+ return true;
+}
+
+typedef bool (*GetFunctionThisFn)(JSContext*, BaselineFrame*, MutableHandleValue);
+static const VMFunction GetFunctionThisInfo =
+ FunctionInfo<GetFunctionThisFn>(jit::BaselineGetFunctionThis, "BaselineGetFunctionThis");
+
+bool
+BaselineCompiler::emit_JSOP_FUNCTIONTHIS()
+{
+ MOZ_ASSERT(function());
+ MOZ_ASSERT(!function()->isArrow());
+
+ frame.pushThis();
+
+ // In strict mode code or self-hosted functions, |this| is left alone.
+ if (script->strict() || (function() && function()->isSelfHostedBuiltin()))
+ return true;
+
+ // Load |thisv| in R0. Skip the call if it's already an object.
+ Label skipCall;
+ frame.popRegsAndSync(1);
+ masm.branchTestObject(Assembler::Equal, R0, &skipCall);
+
+ prepareVMCall();
+ masm.loadBaselineFramePtr(BaselineFrameReg, R1.scratchReg());
+
+ pushArg(R1.scratchReg());
+
+ if (!callVM(GetFunctionThisInfo))
+ return false;
+
+ masm.bind(&skipCall);
+ frame.push(R0);
+ return true;
+}
+
+typedef bool (*GetNonSyntacticGlobalThisFn)(JSContext*, HandleObject, MutableHandleValue);
+static const VMFunction GetNonSyntacticGlobalThisInfo =
+ FunctionInfo<GetNonSyntacticGlobalThisFn>(js::GetNonSyntacticGlobalThis,
+ "GetNonSyntacticGlobalThis");
+
+bool
+BaselineCompiler::emit_JSOP_GLOBALTHIS()
+{
+ frame.syncStack(0);
+
+ if (!script->hasNonSyntacticScope()) {
+ LexicalEnvironmentObject* globalLexical = &script->global().lexicalEnvironment();
+ masm.moveValue(globalLexical->thisValue(), R0);
+ frame.push(R0);
+ return true;
+ }
+
+ prepareVMCall();
+
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg());
+ pushArg(R0.scratchReg());
+
+ if (!callVM(GetNonSyntacticGlobalThisInfo))
+ return false;
+
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_TRUE()
+{
+ frame.push(BooleanValue(true));
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_FALSE()
+{
+ frame.push(BooleanValue(false));
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_ZERO()
+{
+ frame.push(Int32Value(0));
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_ONE()
+{
+ frame.push(Int32Value(1));
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_INT8()
+{
+ frame.push(Int32Value(GET_INT8(pc)));
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_INT32()
+{
+ frame.push(Int32Value(GET_INT32(pc)));
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_UINT16()
+{
+ frame.push(Int32Value(GET_UINT16(pc)));
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_UINT24()
+{
+ frame.push(Int32Value(GET_UINT24(pc)));
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_DOUBLE()
+{
+ frame.push(script->getConst(GET_UINT32_INDEX(pc)));
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_STRING()
+{
+ frame.push(StringValue(script->getAtom(pc)));
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_SYMBOL()
+{
+ unsigned which = GET_UINT8(pc);
+ JS::Symbol* sym = cx->runtime()->wellKnownSymbols->get(which);
+ frame.push(SymbolValue(sym));
+ return true;
+}
+
+typedef JSObject* (*DeepCloneObjectLiteralFn)(JSContext*, HandleObject, NewObjectKind);
+static const VMFunction DeepCloneObjectLiteralInfo =
+ FunctionInfo<DeepCloneObjectLiteralFn>(DeepCloneObjectLiteral, "DeepCloneObjectLiteral");
+
+bool
+BaselineCompiler::emit_JSOP_OBJECT()
+{
+ JSCompartment* comp = cx->compartment();
+ if (comp->creationOptions().cloneSingletons()) {
+ RootedObject obj(cx, script->getObject(GET_UINT32_INDEX(pc)));
+ if (!obj)
+ return false;
+
+ prepareVMCall();
+
+ pushArg(ImmWord(TenuredObject));
+ pushArg(ImmGCPtr(obj));
+
+ if (!callVM(DeepCloneObjectLiteralInfo))
+ return false;
+
+ // Box and push return value.
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.push(R0);
+ return true;
+ }
+
+ comp->behaviors().setSingletonsAsValues();
+ frame.push(ObjectValue(*script->getObject(pc)));
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_CALLSITEOBJ()
+{
+ RootedObject cso(cx, script->getObject(pc));
+ RootedObject raw(cx, script->getObject(GET_UINT32_INDEX(pc) + 1));
+ if (!cso || !raw)
+ return false;
+ RootedValue rawValue(cx);
+ rawValue.setObject(*raw);
+
+ if (!ProcessCallSiteObjOperation(cx, cso, raw, rawValue))
+ return false;
+
+ frame.push(ObjectValue(*cso));
+ return true;
+}
+
+typedef JSObject* (*CloneRegExpObjectFn)(JSContext*, JSObject*);
+static const VMFunction CloneRegExpObjectInfo =
+ FunctionInfo<CloneRegExpObjectFn>(CloneRegExpObject, "CloneRegExpObject");
+
+bool
+BaselineCompiler::emit_JSOP_REGEXP()
+{
+ RootedObject reObj(cx, script->getRegExp(pc));
+
+ prepareVMCall();
+ pushArg(ImmGCPtr(reObj));
+ if (!callVM(CloneRegExpObjectInfo))
+ return false;
+
+ // Box and push return value.
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.push(R0);
+ return true;
+}
+
+typedef JSObject* (*LambdaFn)(JSContext*, HandleFunction, HandleObject);
+static const VMFunction LambdaInfo = FunctionInfo<LambdaFn>(js::Lambda, "Lambda");
+
+bool
+BaselineCompiler::emit_JSOP_LAMBDA()
+{
+ RootedFunction fun(cx, script->getFunction(GET_UINT32_INDEX(pc)));
+
+ prepareVMCall();
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg());
+
+ pushArg(R0.scratchReg());
+ pushArg(ImmGCPtr(fun));
+
+ if (!callVM(LambdaInfo))
+ return false;
+
+ // Box and push return value.
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.push(R0);
+ return true;
+}
+
+typedef JSObject* (*LambdaArrowFn)(JSContext*, HandleFunction, HandleObject, HandleValue);
+static const VMFunction LambdaArrowInfo =
+ FunctionInfo<LambdaArrowFn>(js::LambdaArrow, "LambdaArrow");
+
+bool
+BaselineCompiler::emit_JSOP_LAMBDA_ARROW()
+{
+ // Keep pushed newTarget in R0.
+ frame.popRegsAndSync(1);
+
+ RootedFunction fun(cx, script->getFunction(GET_UINT32_INDEX(pc)));
+
+ prepareVMCall();
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R2.scratchReg());
+
+ pushArg(R0);
+ pushArg(R2.scratchReg());
+ pushArg(ImmGCPtr(fun));
+
+ if (!callVM(LambdaArrowInfo))
+ return false;
+
+ // Box and push return value.
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.push(R0);
+ return true;
+}
+
+void
+BaselineCompiler::storeValue(const StackValue* source, const Address& dest,
+ const ValueOperand& scratch)
+{
+ switch (source->kind()) {
+ case StackValue::Constant:
+ masm.storeValue(source->constant(), dest);
+ break;
+ case StackValue::Register:
+ masm.storeValue(source->reg(), dest);
+ break;
+ case StackValue::LocalSlot:
+ masm.loadValue(frame.addressOfLocal(source->localSlot()), scratch);
+ masm.storeValue(scratch, dest);
+ break;
+ case StackValue::ArgSlot:
+ masm.loadValue(frame.addressOfArg(source->argSlot()), scratch);
+ masm.storeValue(scratch, dest);
+ break;
+ case StackValue::ThisSlot:
+ masm.loadValue(frame.addressOfThis(), scratch);
+ masm.storeValue(scratch, dest);
+ break;
+ case StackValue::EvalNewTargetSlot:
+ MOZ_ASSERT(script->isForEval());
+ masm.loadValue(frame.addressOfEvalNewTarget(), scratch);
+ masm.storeValue(scratch, dest);
+ break;
+ case StackValue::Stack:
+ masm.loadValue(frame.addressOfStackValue(source), scratch);
+ masm.storeValue(scratch, dest);
+ break;
+ default:
+ MOZ_CRASH("Invalid kind");
+ }
+}
+
+bool
+BaselineCompiler::emit_JSOP_BITOR()
+{
+ return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_BITXOR()
+{
+ return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_BITAND()
+{
+ return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_LSH()
+{
+ return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_RSH()
+{
+ return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_URSH()
+{
+ return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_ADD()
+{
+ return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_SUB()
+{
+ return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_MUL()
+{
+ return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_DIV()
+{
+ return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_MOD()
+{
+ return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_POW()
+{
+ return emitBinaryArith();
+}
+
+bool
+BaselineCompiler::emitBinaryArith()
+{
+ // Keep top JSStack value in R0 and R2
+ frame.popRegsAndSync(2);
+
+ // Call IC
+ ICBinaryArith_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::Baseline);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ // Mark R0 as pushed stack value.
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emitUnaryArith()
+{
+ // Keep top stack value in R0.
+ frame.popRegsAndSync(1);
+
+ // Call IC
+ ICUnaryArith_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::Baseline);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ // Mark R0 as pushed stack value.
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_BITNOT()
+{
+ return emitUnaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_NEG()
+{
+ return emitUnaryArith();
+}
+
+bool
+BaselineCompiler::emit_JSOP_LT()
+{
+ return emitCompare();
+}
+
+bool
+BaselineCompiler::emit_JSOP_LE()
+{
+ return emitCompare();
+}
+
+bool
+BaselineCompiler::emit_JSOP_GT()
+{
+ return emitCompare();
+}
+
+bool
+BaselineCompiler::emit_JSOP_GE()
+{
+ return emitCompare();
+}
+
+bool
+BaselineCompiler::emit_JSOP_EQ()
+{
+ return emitCompare();
+}
+
+bool
+BaselineCompiler::emit_JSOP_NE()
+{
+ return emitCompare();
+}
+
+bool
+BaselineCompiler::emitCompare()
+{
+ // CODEGEN
+
+ // Keep top JSStack value in R0 and R1.
+ frame.popRegsAndSync(2);
+
+ // Call IC.
+ ICCompare_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::Baseline);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ // Mark R0 as pushed stack value.
+ frame.push(R0, JSVAL_TYPE_BOOLEAN);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_STRICTEQ()
+{
+ return emitCompare();
+}
+
+bool
+BaselineCompiler::emit_JSOP_STRICTNE()
+{
+ return emitCompare();
+}
+
+bool
+BaselineCompiler::emit_JSOP_CONDSWITCH()
+{
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_CASE()
+{
+ frame.popRegsAndSync(2);
+ frame.push(R0);
+ frame.syncStack(0);
+
+ // Call IC.
+ ICCompare_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::Baseline);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ Register payload = masm.extractInt32(R0, R0.scratchReg());
+ jsbytecode* target = pc + GET_JUMP_OFFSET(pc);
+
+ Label done;
+ masm.branch32(Assembler::Equal, payload, Imm32(0), &done);
+ {
+ // Pop the switch value if the case matches.
+ masm.addToStackPtr(Imm32(sizeof(Value)));
+ masm.jump(labelOf(target));
+ }
+ masm.bind(&done);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_DEFAULT()
+{
+ frame.pop();
+ return emit_JSOP_GOTO();
+}
+
+bool
+BaselineCompiler::emit_JSOP_LINENO()
+{
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_NEWARRAY()
+{
+ frame.syncStack(0);
+
+ uint32_t length = GET_UINT32(pc);
+ MOZ_ASSERT(length <= INT32_MAX,
+ "the bytecode emitter must fail to compile code that would "
+ "produce JSOP_NEWARRAY with a length exceeding int32_t range");
+
+ // Pass length in R0.
+ masm.move32(Imm32(AssertedCast<int32_t>(length)), R0.scratchReg());
+
+ ObjectGroup* group = ObjectGroup::allocationSiteGroup(cx, script, pc, JSProto_Array);
+ if (!group)
+ return false;
+
+ ICNewArray_Fallback::Compiler stubCompiler(cx, group, ICStubCompiler::Engine::Baseline);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_SPREADCALLARRAY()
+{
+ return emit_JSOP_NEWARRAY();
+}
+
+typedef JSObject* (*NewArrayCopyOnWriteFn)(JSContext*, HandleArrayObject, gc::InitialHeap);
+const VMFunction jit::NewArrayCopyOnWriteInfo =
+ FunctionInfo<NewArrayCopyOnWriteFn>(js::NewDenseCopyOnWriteArray, "NewDenseCopyOnWriteArray");
+
+bool
+BaselineCompiler::emit_JSOP_NEWARRAY_COPYONWRITE()
+{
+ RootedScript scriptRoot(cx, script);
+ JSObject* obj = ObjectGroup::getOrFixupCopyOnWriteObject(cx, scriptRoot, pc);
+ if (!obj)
+ return false;
+
+ prepareVMCall();
+
+ pushArg(Imm32(gc::DefaultHeap));
+ pushArg(ImmGCPtr(obj));
+
+ if (!callVM(NewArrayCopyOnWriteInfo))
+ return false;
+
+ // Box and push return value.
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITELEM_ARRAY()
+{
+ // Keep the object and rhs on the stack.
+ frame.syncStack(0);
+
+ // Load object in R0, index in R1.
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-2)), R0);
+ uint32_t index = GET_UINT32(pc);
+ MOZ_ASSERT(index <= INT32_MAX,
+ "the bytecode emitter must fail to compile code that would "
+ "produce JSOP_INITELEM_ARRAY with a length exceeding "
+ "int32_t range");
+ masm.moveValue(Int32Value(AssertedCast<int32_t>(index)), R1);
+
+ // Call IC.
+ ICSetElem_Fallback::Compiler stubCompiler(cx);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ // Pop the rhs, so that the object is on the top of the stack.
+ frame.pop();
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_NEWOBJECT()
+{
+ frame.syncStack(0);
+
+ ICNewObject_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::Baseline);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_NEWINIT()
+{
+ frame.syncStack(0);
+ JSProtoKey key = JSProtoKey(GET_UINT8(pc));
+
+ if (key == JSProto_Array) {
+ // Pass length in R0.
+ masm.move32(Imm32(0), R0.scratchReg());
+
+ ObjectGroup* group = ObjectGroup::allocationSiteGroup(cx, script, pc, JSProto_Array);
+ if (!group)
+ return false;
+
+ ICNewArray_Fallback::Compiler stubCompiler(cx, group, ICStubCompiler::Engine::Baseline);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+ } else {
+ MOZ_ASSERT(key == JSProto_Object);
+
+ ICNewObject_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::Baseline);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITELEM()
+{
+ // Store RHS in the scratch slot.
+ storeValue(frame.peek(-1), frame.addressOfScratchValue(), R2);
+ frame.pop();
+
+ // Keep object and index in R0 and R1.
+ frame.popRegsAndSync(2);
+
+ // Push the object to store the result of the IC.
+ frame.push(R0);
+ frame.syncStack(0);
+
+ // Keep RHS on the stack.
+ frame.pushScratchValue();
+
+ // Call IC.
+ ICSetElem_Fallback::Compiler stubCompiler(cx);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ // Pop the rhs, so that the object is on the top of the stack.
+ frame.pop();
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITHIDDENELEM()
+{
+ return emit_JSOP_INITELEM();
+}
+
+typedef bool (*MutateProtoFn)(JSContext* cx, HandlePlainObject obj, HandleValue newProto);
+static const VMFunction MutateProtoInfo =
+ FunctionInfo<MutateProtoFn>(MutatePrototype, "MutatePrototype");
+
+bool
+BaselineCompiler::emit_JSOP_MUTATEPROTO()
+{
+ // Keep values on the stack for the decompiler.
+ frame.syncStack(0);
+
+ masm.extractObject(frame.addressOfStackValue(frame.peek(-2)), R0.scratchReg());
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R1);
+
+ prepareVMCall();
+
+ pushArg(R1);
+ pushArg(R0.scratchReg());
+
+ if (!callVM(MutateProtoInfo))
+ return false;
+
+ frame.pop();
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITPROP()
+{
+ // Keep lhs in R0, rhs in R1.
+ frame.popRegsAndSync(2);
+
+ // Push the object to store the result of the IC.
+ frame.push(R0);
+ frame.syncStack(0);
+
+ // Call IC.
+ ICSetProp_Fallback::Compiler compiler(cx);
+ return emitOpIC(compiler.getStub(&stubSpace_));
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITLOCKEDPROP()
+{
+ return emit_JSOP_INITPROP();
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITHIDDENPROP()
+{
+ return emit_JSOP_INITPROP();
+}
+
+typedef bool (*NewbornArrayPushFn)(JSContext*, HandleObject, const Value&);
+static const VMFunction NewbornArrayPushInfo =
+ FunctionInfo<NewbornArrayPushFn>(NewbornArrayPush, "NewbornArrayPush");
+
+bool
+BaselineCompiler::emit_JSOP_ARRAYPUSH()
+{
+ // Keep value in R0, object in R1.
+ frame.popRegsAndSync(2);
+ masm.unboxObject(R1, R1.scratchReg());
+
+ prepareVMCall();
+
+ pushArg(R0);
+ pushArg(R1.scratchReg());
+
+ return callVM(NewbornArrayPushInfo);
+}
+
+bool
+BaselineCompiler::emit_JSOP_GETELEM()
+{
+ // Keep top two stack values in R0 and R1.
+ frame.popRegsAndSync(2);
+
+ // Call IC.
+ ICGetElem_Fallback::Compiler stubCompiler(cx);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ // Mark R0 as pushed stack value.
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_CALLELEM()
+{
+ return emit_JSOP_GETELEM();
+}
+
+bool
+BaselineCompiler::emit_JSOP_SETELEM()
+{
+ // Store RHS in the scratch slot.
+ storeValue(frame.peek(-1), frame.addressOfScratchValue(), R2);
+ frame.pop();
+
+ // Keep object and index in R0 and R1.
+ frame.popRegsAndSync(2);
+
+ // Keep RHS on the stack.
+ frame.pushScratchValue();
+
+ // Call IC.
+ ICSetElem_Fallback::Compiler stubCompiler(cx);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_STRICTSETELEM()
+{
+ return emit_JSOP_SETELEM();
+}
+
+typedef bool (*DeleteElementFn)(JSContext*, HandleValue, HandleValue, bool*);
+static const VMFunction DeleteElementStrictInfo
+ = FunctionInfo<DeleteElementFn>(DeleteElementJit<true>, "DeleteElementStrict");
+static const VMFunction DeleteElementNonStrictInfo
+ = FunctionInfo<DeleteElementFn>(DeleteElementJit<false>, "DeleteElementNonStrict");
+
+bool
+BaselineCompiler::emit_JSOP_DELELEM()
+{
+ // Keep values on the stack for the decompiler.
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-2)), R0);
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R1);
+
+ prepareVMCall();
+
+ pushArg(R1);
+ pushArg(R0);
+
+ bool strict = JSOp(*pc) == JSOP_STRICTDELELEM;
+ if (!callVM(strict ? DeleteElementStrictInfo : DeleteElementNonStrictInfo))
+ return false;
+
+ masm.boxNonDouble(JSVAL_TYPE_BOOLEAN, ReturnReg, R1);
+ frame.popn(2);
+ frame.push(R1);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_STRICTDELELEM()
+{
+ return emit_JSOP_DELELEM();
+}
+
+bool
+BaselineCompiler::emit_JSOP_IN()
+{
+ frame.popRegsAndSync(2);
+
+ ICIn_Fallback::Compiler stubCompiler(cx);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_GETGNAME()
+{
+ if (script->hasNonSyntacticScope())
+ return emit_JSOP_GETNAME();
+
+ RootedPropertyName name(cx, script->getName(pc));
+
+ // These names are non-configurable on the global and cannot be shadowed.
+ if (name == cx->names().undefined) {
+ frame.push(UndefinedValue());
+ return true;
+ }
+ if (name == cx->names().NaN) {
+ frame.push(cx->runtime()->NaNValue);
+ return true;
+ }
+ if (name == cx->names().Infinity) {
+ frame.push(cx->runtime()->positiveInfinityValue);
+ return true;
+ }
+
+ frame.syncStack(0);
+
+ masm.movePtr(ImmGCPtr(&script->global().lexicalEnvironment()), R0.scratchReg());
+
+ // Call IC.
+ ICGetName_Fallback::Compiler stubCompiler(cx);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ // Mark R0 as pushed stack value.
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_BINDGNAME()
+{
+ if (!script->hasNonSyntacticScope()) {
+ // We can bind name to the global lexical scope if the binding already
+ // exists, is initialized, and is writable (i.e., an initialized
+ // 'let') at compile time.
+ RootedPropertyName name(cx, script->getName(pc));
+ Rooted<LexicalEnvironmentObject*> env(cx, &script->global().lexicalEnvironment());
+ if (Shape* shape = env->lookup(cx, name)) {
+ if (shape->writable() &&
+ !env->getSlot(shape->slot()).isMagic(JS_UNINITIALIZED_LEXICAL))
+ {
+ frame.push(ObjectValue(*env));
+ return true;
+ }
+ } else if (Shape* shape = script->global().lookup(cx, name)) {
+ // If the property does not currently exist on the global lexical
+ // scope, we can bind name to the global object if the property
+ // exists on the global and is non-configurable, as then it cannot
+ // be shadowed.
+ if (!shape->configurable()) {
+ frame.push(ObjectValue(script->global()));
+ return true;
+ }
+ }
+
+ // Otherwise we have to use the dynamic scope chain.
+ }
+
+ return emit_JSOP_BINDNAME();
+}
+
+typedef JSObject* (*BindVarFn)(JSContext*, HandleObject);
+static const VMFunction BindVarInfo = FunctionInfo<BindVarFn>(jit::BindVar, "BindVar");
+
+bool
+BaselineCompiler::emit_JSOP_BINDVAR()
+{
+ frame.syncStack(0);
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg());
+
+ prepareVMCall();
+ pushArg(R0.scratchReg());
+
+ if (!callVM(BindVarInfo))
+ return false;
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_SETPROP()
+{
+ // Keep lhs in R0, rhs in R1.
+ frame.popRegsAndSync(2);
+
+ // Call IC.
+ ICSetProp_Fallback::Compiler compiler(cx);
+ if (!emitOpIC(compiler.getStub(&stubSpace_)))
+ return false;
+
+ // The IC will return the RHS value in R0, mark it as pushed value.
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_STRICTSETPROP()
+{
+ return emit_JSOP_SETPROP();
+}
+
+bool
+BaselineCompiler::emit_JSOP_SETNAME()
+{
+ return emit_JSOP_SETPROP();
+}
+
+bool
+BaselineCompiler::emit_JSOP_STRICTSETNAME()
+{
+ return emit_JSOP_SETPROP();
+}
+
+bool
+BaselineCompiler::emit_JSOP_SETGNAME()
+{
+ return emit_JSOP_SETPROP();
+}
+
+bool
+BaselineCompiler::emit_JSOP_STRICTSETGNAME()
+{
+ return emit_JSOP_SETPROP();
+}
+
+bool
+BaselineCompiler::emit_JSOP_GETPROP()
+{
+ // Keep object in R0.
+ frame.popRegsAndSync(1);
+
+ // Call IC.
+ ICGetProp_Fallback::Compiler compiler(cx, ICStubCompiler::Engine::Baseline);
+ if (!emitOpIC(compiler.getStub(&stubSpace_)))
+ return false;
+
+ // Mark R0 as pushed stack value.
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_CALLPROP()
+{
+ return emit_JSOP_GETPROP();
+}
+
+bool
+BaselineCompiler::emit_JSOP_LENGTH()
+{
+ return emit_JSOP_GETPROP();
+}
+
+bool
+BaselineCompiler::emit_JSOP_GETXPROP()
+{
+ return emit_JSOP_GETPROP();
+}
+
+typedef bool (*DeletePropertyFn)(JSContext*, HandleValue, HandlePropertyName, bool*);
+static const VMFunction DeletePropertyStrictInfo =
+ FunctionInfo<DeletePropertyFn>(DeletePropertyJit<true>, "DeletePropertyStrict");
+static const VMFunction DeletePropertyNonStrictInfo =
+ FunctionInfo<DeletePropertyFn>(DeletePropertyJit<false>, "DeletePropertyNonStrict");
+
+bool
+BaselineCompiler::emit_JSOP_DELPROP()
+{
+ // Keep value on the stack for the decompiler.
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0);
+
+ prepareVMCall();
+
+ pushArg(ImmGCPtr(script->getName(pc)));
+ pushArg(R0);
+
+ bool strict = JSOp(*pc) == JSOP_STRICTDELPROP;
+ if (!callVM(strict ? DeletePropertyStrictInfo : DeletePropertyNonStrictInfo))
+ return false;
+
+ masm.boxNonDouble(JSVAL_TYPE_BOOLEAN, ReturnReg, R1);
+ frame.pop();
+ frame.push(R1);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_STRICTDELPROP()
+{
+ return emit_JSOP_DELPROP();
+}
+
+void
+BaselineCompiler::getEnvironmentCoordinateObject(Register reg)
+{
+ EnvironmentCoordinate ec(pc);
+
+ masm.loadPtr(frame.addressOfEnvironmentChain(), reg);
+ for (unsigned i = ec.hops(); i; i--)
+ masm.extractObject(Address(reg, EnvironmentObject::offsetOfEnclosingEnvironment()), reg);
+}
+
+Address
+BaselineCompiler::getEnvironmentCoordinateAddressFromObject(Register objReg, Register reg)
+{
+ EnvironmentCoordinate ec(pc);
+ Shape* shape = EnvironmentCoordinateToEnvironmentShape(script, pc);
+
+ Address addr;
+ if (shape->numFixedSlots() <= ec.slot()) {
+ masm.loadPtr(Address(objReg, NativeObject::offsetOfSlots()), reg);
+ return Address(reg, (ec.slot() - shape->numFixedSlots()) * sizeof(Value));
+ }
+
+ return Address(objReg, NativeObject::getFixedSlotOffset(ec.slot()));
+}
+
+Address
+BaselineCompiler::getEnvironmentCoordinateAddress(Register reg)
+{
+ getEnvironmentCoordinateObject(reg);
+ return getEnvironmentCoordinateAddressFromObject(reg, reg);
+}
+
+bool
+BaselineCompiler::emit_JSOP_GETALIASEDVAR()
+{
+ frame.syncStack(0);
+
+ Address address = getEnvironmentCoordinateAddress(R0.scratchReg());
+ masm.loadValue(address, R0);
+
+ if (ionCompileable_) {
+ // No need to monitor types if we know Ion can't compile this script.
+ ICTypeMonitor_Fallback::Compiler compiler(cx, ICStubCompiler::Engine::Baseline,
+ (ICMonitoredFallbackStub*) nullptr);
+ if (!emitOpIC(compiler.getStub(&stubSpace_)))
+ return false;
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_SETALIASEDVAR()
+{
+ JSScript* outerScript = EnvironmentCoordinateFunctionScript(script, pc);
+ if (outerScript && outerScript->treatAsRunOnce()) {
+ // Type updates for this operation might need to be tracked, so treat
+ // this as a SETPROP.
+
+ // Load rhs into R1.
+ frame.syncStack(1);
+ frame.popValue(R1);
+
+ // Load and box lhs into R0.
+ getEnvironmentCoordinateObject(R2.scratchReg());
+ masm.tagValue(JSVAL_TYPE_OBJECT, R2.scratchReg(), R0);
+
+ // Call SETPROP IC.
+ ICSetProp_Fallback::Compiler compiler(cx);
+ if (!emitOpIC(compiler.getStub(&stubSpace_)))
+ return false;
+
+ // The IC will return the RHS value in R0, mark it as pushed value.
+ frame.push(R0);
+ return true;
+ }
+
+ // Keep rvalue in R0.
+ frame.popRegsAndSync(1);
+ Register objReg = R2.scratchReg();
+
+ getEnvironmentCoordinateObject(objReg);
+ Address address = getEnvironmentCoordinateAddressFromObject(objReg, R1.scratchReg());
+ masm.patchableCallPreBarrier(address, MIRType::Value);
+ masm.storeValue(R0, address);
+ frame.push(R0);
+
+ // Only R0 is live at this point.
+ // Scope coordinate object is already in R2.scratchReg().
+ Register temp = R1.scratchReg();
+
+ Label skipBarrier;
+ masm.branchPtrInNurseryChunk(Assembler::Equal, objReg, temp, &skipBarrier);
+ masm.branchValueIsNurseryObject(Assembler::NotEqual, R0, temp, &skipBarrier);
+
+ masm.call(&postBarrierSlot_); // Won't clobber R0
+
+ masm.bind(&skipBarrier);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_GETNAME()
+{
+ frame.syncStack(0);
+
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg());
+
+ // Call IC.
+ ICGetName_Fallback::Compiler stubCompiler(cx);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ // Mark R0 as pushed stack value.
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_BINDNAME()
+{
+ frame.syncStack(0);
+
+ if (*pc == JSOP_BINDGNAME && !script->hasNonSyntacticScope())
+ masm.movePtr(ImmGCPtr(&script->global().lexicalEnvironment()), R0.scratchReg());
+ else
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg());
+
+ // Call IC.
+ ICBindName_Fallback::Compiler stubCompiler(cx);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ // Mark R0 as pushed stack value.
+ frame.push(R0);
+ return true;
+}
+
+typedef bool (*DeleteNameFn)(JSContext*, HandlePropertyName, HandleObject,
+ MutableHandleValue);
+static const VMFunction DeleteNameInfo =
+ FunctionInfo<DeleteNameFn>(DeleteNameOperation, "DeleteNameOperation");
+
+bool
+BaselineCompiler::emit_JSOP_DELNAME()
+{
+ frame.syncStack(0);
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg());
+
+ prepareVMCall();
+
+ pushArg(R0.scratchReg());
+ pushArg(ImmGCPtr(script->getName(pc)));
+
+ if (!callVM(DeleteNameInfo))
+ return false;
+
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_GETIMPORT()
+{
+ ModuleEnvironmentObject* env = GetModuleEnvironmentForScript(script);
+ MOZ_ASSERT(env);
+
+ ModuleEnvironmentObject* targetEnv;
+ Shape* shape;
+ MOZ_ALWAYS_TRUE(env->lookupImport(NameToId(script->getName(pc)), &targetEnv, &shape));
+
+ EnsureTrackPropertyTypes(cx, targetEnv, shape->propid());
+
+ frame.syncStack(0);
+
+ uint32_t slot = shape->slot();
+ Register scratch = R0.scratchReg();
+ masm.movePtr(ImmGCPtr(targetEnv), scratch);
+ if (slot < targetEnv->numFixedSlots()) {
+ masm.loadValue(Address(scratch, NativeObject::getFixedSlotOffset(slot)), R0);
+ } else {
+ masm.loadPtr(Address(scratch, NativeObject::offsetOfSlots()), scratch);
+ masm.loadValue(Address(scratch, (slot - targetEnv->numFixedSlots()) * sizeof(Value)), R0);
+ }
+
+ // Imports are initialized by this point except in rare circumstances, so
+ // don't emit a check unless we have to.
+ if (targetEnv->getSlot(shape->slot()).isMagic(JS_UNINITIALIZED_LEXICAL))
+ if (!emitUninitializedLexicalCheck(R0))
+ return false;
+
+ if (ionCompileable_) {
+ // No need to monitor types if we know Ion can't compile this script.
+ ICTypeMonitor_Fallback::Compiler compiler(cx, ICStubCompiler::Engine::Baseline,
+ (ICMonitoredFallbackStub*) nullptr);
+ if (!emitOpIC(compiler.getStub(&stubSpace_)))
+ return false;
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_GETINTRINSIC()
+{
+ frame.syncStack(0);
+
+ ICGetIntrinsic_Fallback::Compiler stubCompiler(cx);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ frame.push(R0);
+ return true;
+}
+
+typedef bool (*DefVarFn)(JSContext*, HandlePropertyName, unsigned, HandleObject);
+static const VMFunction DefVarInfo = FunctionInfo<DefVarFn>(DefVar, "DefVar");
+
+bool
+BaselineCompiler::emit_JSOP_DEFVAR()
+{
+ frame.syncStack(0);
+
+ unsigned attrs = JSPROP_ENUMERATE;
+ if (!script->isForEval())
+ attrs |= JSPROP_PERMANENT;
+ MOZ_ASSERT(attrs <= UINT32_MAX);
+
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg());
+
+ prepareVMCall();
+
+ pushArg(R0.scratchReg());
+ pushArg(Imm32(attrs));
+ pushArg(ImmGCPtr(script->getName(pc)));
+
+ return callVM(DefVarInfo);
+}
+
+typedef bool (*DefLexicalFn)(JSContext*, HandlePropertyName, unsigned, HandleObject);
+static const VMFunction DefLexicalInfo = FunctionInfo<DefLexicalFn>(DefLexical, "DefLexical");
+
+bool
+BaselineCompiler::emit_JSOP_DEFCONST()
+{
+ return emit_JSOP_DEFLET();
+}
+
+bool
+BaselineCompiler::emit_JSOP_DEFLET()
+{
+ frame.syncStack(0);
+
+ unsigned attrs = JSPROP_ENUMERATE | JSPROP_PERMANENT;
+ if (*pc == JSOP_DEFCONST)
+ attrs |= JSPROP_READONLY;
+ MOZ_ASSERT(attrs <= UINT32_MAX);
+
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg());
+
+ prepareVMCall();
+
+ pushArg(R0.scratchReg());
+ pushArg(Imm32(attrs));
+ pushArg(ImmGCPtr(script->getName(pc)));
+
+ return callVM(DefLexicalInfo);
+}
+
+typedef bool (*DefFunOperationFn)(JSContext*, HandleScript, HandleObject, HandleFunction);
+static const VMFunction DefFunOperationInfo =
+ FunctionInfo<DefFunOperationFn>(DefFunOperation, "DefFunOperation");
+
+bool
+BaselineCompiler::emit_JSOP_DEFFUN()
+{
+ frame.popRegsAndSync(1);
+ masm.unboxObject(R0, R0.scratchReg());
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R1.scratchReg());
+
+ prepareVMCall();
+
+ pushArg(R0.scratchReg());
+ pushArg(R1.scratchReg());
+ pushArg(ImmGCPtr(script));
+
+ return callVM(DefFunOperationInfo);
+}
+
+typedef bool (*InitPropGetterSetterFn)(JSContext*, jsbytecode*, HandleObject, HandlePropertyName,
+ HandleObject);
+static const VMFunction InitPropGetterSetterInfo =
+ FunctionInfo<InitPropGetterSetterFn>(InitGetterSetterOperation,
+ "InitPropGetterSetterOperation");
+
+bool
+BaselineCompiler::emitInitPropGetterSetter()
+{
+ MOZ_ASSERT(JSOp(*pc) == JSOP_INITPROP_GETTER ||
+ JSOp(*pc) == JSOP_INITHIDDENPROP_GETTER ||
+ JSOp(*pc) == JSOP_INITPROP_SETTER ||
+ JSOp(*pc) == JSOP_INITHIDDENPROP_SETTER);
+
+ // Keep values on the stack for the decompiler.
+ frame.syncStack(0);
+
+ prepareVMCall();
+
+ masm.extractObject(frame.addressOfStackValue(frame.peek(-1)), R0.scratchReg());
+ masm.extractObject(frame.addressOfStackValue(frame.peek(-2)), R1.scratchReg());
+
+ pushArg(R0.scratchReg());
+ pushArg(ImmGCPtr(script->getName(pc)));
+ pushArg(R1.scratchReg());
+ pushArg(ImmPtr(pc));
+
+ if (!callVM(InitPropGetterSetterInfo))
+ return false;
+
+ frame.pop();
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITPROP_GETTER()
+{
+ return emitInitPropGetterSetter();
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITHIDDENPROP_GETTER()
+{
+ return emitInitPropGetterSetter();
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITPROP_SETTER()
+{
+ return emitInitPropGetterSetter();
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITHIDDENPROP_SETTER()
+{
+ return emitInitPropGetterSetter();
+}
+
+typedef bool (*InitElemGetterSetterFn)(JSContext*, jsbytecode*, HandleObject, HandleValue,
+ HandleObject);
+static const VMFunction InitElemGetterSetterInfo =
+ FunctionInfo<InitElemGetterSetterFn>(InitGetterSetterOperation,
+ "InitElemGetterSetterOperation");
+
+bool
+BaselineCompiler::emitInitElemGetterSetter()
+{
+ MOZ_ASSERT(JSOp(*pc) == JSOP_INITELEM_GETTER ||
+ JSOp(*pc) == JSOP_INITHIDDENELEM_GETTER ||
+ JSOp(*pc) == JSOP_INITELEM_SETTER ||
+ JSOp(*pc) == JSOP_INITHIDDENELEM_SETTER);
+
+ // Load index and value in R0 and R1, but keep values on the stack for the
+ // decompiler.
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-2)), R0);
+ masm.extractObject(frame.addressOfStackValue(frame.peek(-1)), R1.scratchReg());
+
+ prepareVMCall();
+
+ pushArg(R1.scratchReg());
+ pushArg(R0);
+ masm.extractObject(frame.addressOfStackValue(frame.peek(-3)), R0.scratchReg());
+ pushArg(R0.scratchReg());
+ pushArg(ImmPtr(pc));
+
+ if (!callVM(InitElemGetterSetterInfo))
+ return false;
+
+ frame.popn(2);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITELEM_GETTER()
+{
+ return emitInitElemGetterSetter();
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITHIDDENELEM_GETTER()
+{
+ return emitInitElemGetterSetter();
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITELEM_SETTER()
+{
+ return emitInitElemGetterSetter();
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITHIDDENELEM_SETTER()
+{
+ return emitInitElemGetterSetter();
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITELEM_INC()
+{
+ // Keep the object and rhs on the stack.
+ frame.syncStack(0);
+
+ // Load object in R0, index in R1.
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-3)), R0);
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-2)), R1);
+
+ // Call IC.
+ ICSetElem_Fallback::Compiler stubCompiler(cx);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ // Pop the rhs
+ frame.pop();
+
+ // Increment index
+ Address indexAddr = frame.addressOfStackValue(frame.peek(-1));
+ masm.incrementInt32Value(indexAddr);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_GETLOCAL()
+{
+ frame.pushLocal(GET_LOCALNO(pc));
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_SETLOCAL()
+{
+ // Ensure no other StackValue refers to the old value, for instance i + (i = 3).
+ // This also allows us to use R0 as scratch below.
+ frame.syncStack(1);
+
+ uint32_t local = GET_LOCALNO(pc);
+ storeValue(frame.peek(-1), frame.addressOfLocal(local), R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emitFormalArgAccess(uint32_t arg, bool get)
+{
+ // Fast path: the script does not use |arguments| or formals don't
+ // alias the arguments object.
+ if (!script->argumentsAliasesFormals()) {
+ if (get) {
+ frame.pushArg(arg);
+ } else {
+ // See the comment in emit_JSOP_SETLOCAL.
+ frame.syncStack(1);
+ storeValue(frame.peek(-1), frame.addressOfArg(arg), R0);
+ }
+
+ return true;
+ }
+
+ // Sync so that we can use R0.
+ frame.syncStack(0);
+
+ // If the script is known to have an arguments object, we can just use it.
+ // Else, we *may* have an arguments object (because we can't invalidate
+ // when needsArgsObj becomes |true|), so we have to test HAS_ARGS_OBJ.
+ Label done;
+ if (!script->needsArgsObj()) {
+ Label hasArgsObj;
+ masm.branchTest32(Assembler::NonZero, frame.addressOfFlags(),
+ Imm32(BaselineFrame::HAS_ARGS_OBJ), &hasArgsObj);
+ if (get)
+ masm.loadValue(frame.addressOfArg(arg), R0);
+ else
+ storeValue(frame.peek(-1), frame.addressOfArg(arg), R0);
+ masm.jump(&done);
+ masm.bind(&hasArgsObj);
+ }
+
+ // Load the arguments object data vector.
+ Register reg = R2.scratchReg();
+ masm.loadPtr(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfArgsObj()), reg);
+ masm.loadPrivate(Address(reg, ArgumentsObject::getDataSlotOffset()), reg);
+
+ // Load/store the argument.
+ Address argAddr(reg, ArgumentsData::offsetOfArgs() + arg * sizeof(Value));
+ if (get) {
+ masm.loadValue(argAddr, R0);
+ frame.push(R0);
+ } else {
+ masm.patchableCallPreBarrier(argAddr, MIRType::Value);
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0);
+ masm.storeValue(R0, argAddr);
+
+ MOZ_ASSERT(frame.numUnsyncedSlots() == 0);
+
+ Register temp = R1.scratchReg();
+
+ // Reload the arguments object
+ Register reg = R2.scratchReg();
+ masm.loadPtr(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfArgsObj()), reg);
+
+ Label skipBarrier;
+
+ masm.branchPtrInNurseryChunk(Assembler::Equal, reg, temp, &skipBarrier);
+ masm.branchValueIsNurseryObject(Assembler::NotEqual, R0, temp, &skipBarrier);
+
+ masm.call(&postBarrierSlot_);
+
+ masm.bind(&skipBarrier);
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_GETARG()
+{
+ uint32_t arg = GET_ARGNO(pc);
+ return emitFormalArgAccess(arg, /* get = */ true);
+}
+
+bool
+BaselineCompiler::emit_JSOP_SETARG()
+{
+ // Ionmonkey can't inline functions with SETARG with magic arguments.
+ if (!script->argsObjAliasesFormals() && script->argumentsAliasesFormals())
+ script->setUninlineable();
+
+ modifiesArguments_ = true;
+
+ uint32_t arg = GET_ARGNO(pc);
+ return emitFormalArgAccess(arg, /* get = */ false);
+}
+
+bool
+BaselineCompiler::emit_JSOP_NEWTARGET()
+{
+ if (script->isForEval()) {
+ frame.pushEvalNewTarget();
+ return true;
+ }
+
+ MOZ_ASSERT(function());
+ frame.syncStack(0);
+
+ if (function()->isArrow()) {
+ // Arrow functions store their |new.target| value in an
+ // extended slot.
+ Register scratch = R0.scratchReg();
+ masm.loadFunctionFromCalleeToken(frame.addressOfCalleeToken(), scratch);
+ masm.loadValue(Address(scratch, FunctionExtended::offsetOfArrowNewTargetSlot()), R0);
+ frame.push(R0);
+ return true;
+ }
+
+ // if (isConstructing()) push(argv[Max(numActualArgs, numFormalArgs)])
+ Label notConstructing, done;
+ masm.branchTestPtr(Assembler::Zero, frame.addressOfCalleeToken(),
+ Imm32(CalleeToken_FunctionConstructing), &notConstructing);
+
+ Register argvLen = R0.scratchReg();
+
+ Address actualArgs(BaselineFrameReg, BaselineFrame::offsetOfNumActualArgs());
+ masm.loadPtr(actualArgs, argvLen);
+
+ Label useNFormals;
+
+ masm.branchPtr(Assembler::Below, argvLen, Imm32(function()->nargs()),
+ &useNFormals);
+
+ {
+ BaseValueIndex newTarget(BaselineFrameReg, argvLen, BaselineFrame::offsetOfArg(0));
+ masm.loadValue(newTarget, R0);
+ masm.jump(&done);
+ }
+
+ masm.bind(&useNFormals);
+
+ {
+ Address newTarget(BaselineFrameReg,
+ BaselineFrame::offsetOfArg(0) + (function()->nargs() * sizeof(Value)));
+ masm.loadValue(newTarget, R0);
+ masm.jump(&done);
+ }
+
+ // else push(undefined)
+ masm.bind(&notConstructing);
+ masm.moveValue(UndefinedValue(), R0);
+
+ masm.bind(&done);
+ frame.push(R0);
+
+ return true;
+}
+
+typedef bool (*ThrowRuntimeLexicalErrorFn)(JSContext* cx, unsigned);
+static const VMFunction ThrowRuntimeLexicalErrorInfo =
+ FunctionInfo<ThrowRuntimeLexicalErrorFn>(jit::ThrowRuntimeLexicalError,
+ "ThrowRuntimeLexicalError");
+
+bool
+BaselineCompiler::emitThrowConstAssignment()
+{
+ prepareVMCall();
+ pushArg(Imm32(JSMSG_BAD_CONST_ASSIGN));
+ return callVM(ThrowRuntimeLexicalErrorInfo);
+}
+
+bool
+BaselineCompiler::emit_JSOP_THROWSETCONST()
+{
+ return emitThrowConstAssignment();
+}
+
+bool
+BaselineCompiler::emit_JSOP_THROWSETALIASEDCONST()
+{
+ return emitThrowConstAssignment();
+}
+
+bool
+BaselineCompiler::emit_JSOP_THROWSETCALLEE()
+{
+ return emitThrowConstAssignment();
+}
+
+bool
+BaselineCompiler::emitUninitializedLexicalCheck(const ValueOperand& val)
+{
+ Label done;
+ masm.branchTestMagicValue(Assembler::NotEqual, val, JS_UNINITIALIZED_LEXICAL, &done);
+
+ prepareVMCall();
+ pushArg(Imm32(JSMSG_UNINITIALIZED_LEXICAL));
+ if (!callVM(ThrowRuntimeLexicalErrorInfo))
+ return false;
+
+ masm.bind(&done);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_CHECKLEXICAL()
+{
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfLocal(GET_LOCALNO(pc)), R0);
+ return emitUninitializedLexicalCheck(R0);
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITLEXICAL()
+{
+ return emit_JSOP_SETLOCAL();
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITGLEXICAL()
+{
+ frame.popRegsAndSync(1);
+ frame.push(ObjectValue(script->global().lexicalEnvironment()));
+ frame.push(R0);
+ return emit_JSOP_SETPROP();
+}
+
+bool
+BaselineCompiler::emit_JSOP_CHECKALIASEDLEXICAL()
+{
+ frame.syncStack(0);
+ masm.loadValue(getEnvironmentCoordinateAddress(R0.scratchReg()), R0);
+ return emitUninitializedLexicalCheck(R0);
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITALIASEDLEXICAL()
+{
+ return emit_JSOP_SETALIASEDVAR();
+}
+
+bool
+BaselineCompiler::emit_JSOP_UNINITIALIZED()
+{
+ frame.push(MagicValue(JS_UNINITIALIZED_LEXICAL));
+ return true;
+}
+
+bool
+BaselineCompiler::emitCall()
+{
+ MOZ_ASSERT(IsCallPC(pc));
+
+ bool construct = JSOp(*pc) == JSOP_NEW || JSOp(*pc) == JSOP_SUPERCALL;
+ uint32_t argc = GET_ARGC(pc);
+
+ frame.syncStack(0);
+ masm.move32(Imm32(argc), R0.scratchReg());
+
+ // Call IC
+ ICCall_Fallback::Compiler stubCompiler(cx, /* isConstructing = */ construct,
+ /* isSpread = */ false);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ // Update FrameInfo.
+ frame.popn(2 + argc + construct);
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emitSpreadCall()
+{
+ MOZ_ASSERT(IsCallPC(pc));
+
+ frame.syncStack(0);
+ masm.move32(Imm32(1), R0.scratchReg());
+
+ // Call IC
+ bool construct = JSOp(*pc) == JSOP_SPREADNEW || JSOp(*pc) == JSOP_SPREADSUPERCALL;
+ ICCall_Fallback::Compiler stubCompiler(cx, /* isConstructing = */ construct,
+ /* isSpread = */ true);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ // Update FrameInfo.
+ frame.popn(3 + construct);
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_CALL()
+{
+ return emitCall();
+}
+
+bool
+BaselineCompiler::emit_JSOP_CALLITER()
+{
+ return emitCall();
+}
+
+bool
+BaselineCompiler::emit_JSOP_NEW()
+{
+ return emitCall();
+}
+
+bool
+BaselineCompiler::emit_JSOP_SUPERCALL()
+{
+ return emitCall();
+}
+
+bool
+BaselineCompiler::emit_JSOP_FUNCALL()
+{
+ return emitCall();
+}
+
+bool
+BaselineCompiler::emit_JSOP_FUNAPPLY()
+{
+ return emitCall();
+}
+
+bool
+BaselineCompiler::emit_JSOP_EVAL()
+{
+ return emitCall();
+}
+
+bool
+BaselineCompiler::emit_JSOP_STRICTEVAL()
+{
+ return emitCall();
+}
+
+bool
+BaselineCompiler::emit_JSOP_SPREADCALL()
+{
+ return emitSpreadCall();
+}
+
+bool
+BaselineCompiler::emit_JSOP_SPREADNEW()
+{
+ return emitSpreadCall();
+}
+
+bool
+BaselineCompiler::emit_JSOP_SPREADSUPERCALL()
+{
+ return emitSpreadCall();
+}
+
+bool
+BaselineCompiler::emit_JSOP_SPREADEVAL()
+{
+ return emitSpreadCall();
+}
+
+bool
+BaselineCompiler::emit_JSOP_STRICTSPREADEVAL()
+{
+ return emitSpreadCall();
+}
+
+typedef bool (*OptimizeSpreadCallFn)(JSContext*, HandleValue, bool*);
+static const VMFunction OptimizeSpreadCallInfo =
+ FunctionInfo<OptimizeSpreadCallFn>(OptimizeSpreadCall, "OptimizeSpreadCall");
+
+bool
+BaselineCompiler::emit_JSOP_OPTIMIZE_SPREADCALL()
+{
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0);
+
+ prepareVMCall();
+ pushArg(R0);
+
+ if (!callVM(OptimizeSpreadCallInfo))
+ return false;
+
+ masm.boxNonDouble(JSVAL_TYPE_BOOLEAN, ReturnReg, R0);
+ frame.push(R0);
+ return true;
+}
+
+typedef bool (*ImplicitThisFn)(JSContext*, HandleObject, HandlePropertyName,
+ MutableHandleValue);
+static const VMFunction ImplicitThisInfo =
+ FunctionInfo<ImplicitThisFn>(ImplicitThisOperation, "ImplicitThisOperation");
+
+bool
+BaselineCompiler::emit_JSOP_IMPLICITTHIS()
+{
+ frame.syncStack(0);
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg());
+
+ prepareVMCall();
+
+ pushArg(ImmGCPtr(script->getName(pc)));
+ pushArg(R0.scratchReg());
+
+ if (!callVM(ImplicitThisInfo))
+ return false;
+
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_GIMPLICITTHIS()
+{
+ if (!script->hasNonSyntacticScope()) {
+ frame.push(UndefinedValue());
+ return true;
+ }
+
+ return emit_JSOP_IMPLICITTHIS();
+}
+
+bool
+BaselineCompiler::emit_JSOP_INSTANCEOF()
+{
+ frame.popRegsAndSync(2);
+
+ ICInstanceOf_Fallback::Compiler stubCompiler(cx);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_TYPEOF()
+{
+ frame.popRegsAndSync(1);
+
+ ICTypeOf_Fallback::Compiler stubCompiler(cx);
+ if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
+ return false;
+
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_TYPEOFEXPR()
+{
+ return emit_JSOP_TYPEOF();
+}
+
+typedef bool (*ThrowMsgFn)(JSContext*, const unsigned);
+static const VMFunction ThrowMsgInfo =
+ FunctionInfo<ThrowMsgFn>(js::ThrowMsgOperation, "ThrowMsgOperation");
+
+bool
+BaselineCompiler::emit_JSOP_THROWMSG()
+{
+ prepareVMCall();
+ pushArg(Imm32(GET_UINT16(pc)));
+ return callVM(ThrowMsgInfo);
+}
+
+typedef bool (*ThrowFn)(JSContext*, HandleValue);
+static const VMFunction ThrowInfo = FunctionInfo<ThrowFn>(js::Throw, "Throw");
+
+bool
+BaselineCompiler::emit_JSOP_THROW()
+{
+ // Keep value to throw in R0.
+ frame.popRegsAndSync(1);
+
+ prepareVMCall();
+ pushArg(R0);
+
+ return callVM(ThrowInfo);
+}
+
+typedef bool (*ThrowingFn)(JSContext*, HandleValue);
+static const VMFunction ThrowingInfo =
+ FunctionInfo<ThrowingFn>(js::ThrowingOperation, "ThrowingOperation");
+
+bool
+BaselineCompiler::emit_JSOP_THROWING()
+{
+ // Keep value to throw in R0.
+ frame.popRegsAndSync(1);
+
+ prepareVMCall();
+ pushArg(R0);
+
+ return callVM(ThrowingInfo);
+}
+
+bool
+BaselineCompiler::emit_JSOP_TRY()
+{
+ if (!emit_JSOP_JUMPTARGET())
+ return false;
+
+ // Ionmonkey can't inline function with JSOP_TRY.
+ script->setUninlineable();
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_FINALLY()
+{
+ // JSOP_FINALLY has a def count of 2, but these values are already on the
+ // stack (they're pushed by JSOP_GOSUB). Update the compiler's stack state.
+ frame.setStackDepth(frame.stackDepth() + 2);
+
+ // To match the interpreter, emit an interrupt check at the start of the
+ // finally block.
+ return emitInterruptCheck();
+}
+
+bool
+BaselineCompiler::emit_JSOP_GOSUB()
+{
+ // Push |false| so that RETSUB knows the value on top of the
+ // stack is not an exception but the offset to the op following
+ // this GOSUB.
+ frame.push(BooleanValue(false));
+
+ int32_t nextOffset = script->pcToOffset(GetNextPc(pc));
+ frame.push(Int32Value(nextOffset));
+
+ // Jump to the finally block.
+ frame.syncStack(0);
+ jsbytecode* target = pc + GET_JUMP_OFFSET(pc);
+ masm.jump(labelOf(target));
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_RETSUB()
+{
+ frame.popRegsAndSync(2);
+
+ ICRetSub_Fallback::Compiler stubCompiler(cx);
+ return emitOpIC(stubCompiler.getStub(&stubSpace_));
+}
+
+typedef bool (*PushLexicalEnvFn)(JSContext*, BaselineFrame*, Handle<LexicalScope*>);
+static const VMFunction PushLexicalEnvInfo =
+ FunctionInfo<PushLexicalEnvFn>(jit::PushLexicalEnv, "PushLexicalEnv");
+
+bool
+BaselineCompiler::emit_JSOP_PUSHLEXICALENV()
+{
+ LexicalScope& scope = script->getScope(pc)->as<LexicalScope>();
+
+ // Call a stub to push the block on the block chain.
+ prepareVMCall();
+ masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+
+ pushArg(ImmGCPtr(&scope));
+ pushArg(R0.scratchReg());
+
+ return callVM(PushLexicalEnvInfo);
+}
+
+typedef bool (*PopLexicalEnvFn)(JSContext*, BaselineFrame*);
+static const VMFunction PopLexicalEnvInfo =
+ FunctionInfo<PopLexicalEnvFn>(jit::PopLexicalEnv, "PopLexicalEnv");
+
+typedef bool (*DebugLeaveThenPopLexicalEnvFn)(JSContext*, BaselineFrame*, jsbytecode*);
+static const VMFunction DebugLeaveThenPopLexicalEnvInfo =
+ FunctionInfo<DebugLeaveThenPopLexicalEnvFn>(jit::DebugLeaveThenPopLexicalEnv,
+ "DebugLeaveThenPopLexicalEnv");
+
+bool
+BaselineCompiler::emit_JSOP_POPLEXICALENV()
+{
+ prepareVMCall();
+
+ masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+
+ if (compileDebugInstrumentation_) {
+ pushArg(ImmPtr(pc));
+ pushArg(R0.scratchReg());
+ return callVM(DebugLeaveThenPopLexicalEnvInfo);
+ }
+
+ pushArg(R0.scratchReg());
+ return callVM(PopLexicalEnvInfo);
+}
+
+typedef bool (*FreshenLexicalEnvFn)(JSContext*, BaselineFrame*);
+static const VMFunction FreshenLexicalEnvInfo =
+ FunctionInfo<FreshenLexicalEnvFn>(jit::FreshenLexicalEnv, "FreshenLexicalEnv");
+
+typedef bool (*DebugLeaveThenFreshenLexicalEnvFn)(JSContext*, BaselineFrame*, jsbytecode*);
+static const VMFunction DebugLeaveThenFreshenLexicalEnvInfo =
+ FunctionInfo<DebugLeaveThenFreshenLexicalEnvFn>(jit::DebugLeaveThenFreshenLexicalEnv,
+ "DebugLeaveThenFreshenLexicalEnv");
+
+bool
+BaselineCompiler::emit_JSOP_FRESHENLEXICALENV()
+{
+ prepareVMCall();
+
+ masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+
+ if (compileDebugInstrumentation_) {
+ pushArg(ImmPtr(pc));
+ pushArg(R0.scratchReg());
+ return callVM(DebugLeaveThenFreshenLexicalEnvInfo);
+ }
+
+ pushArg(R0.scratchReg());
+ return callVM(FreshenLexicalEnvInfo);
+}
+
+
+typedef bool (*RecreateLexicalEnvFn)(JSContext*, BaselineFrame*);
+static const VMFunction RecreateLexicalEnvInfo =
+ FunctionInfo<RecreateLexicalEnvFn>(jit::RecreateLexicalEnv, "RecreateLexicalEnv");
+
+typedef bool (*DebugLeaveThenRecreateLexicalEnvFn)(JSContext*, BaselineFrame*, jsbytecode*);
+static const VMFunction DebugLeaveThenRecreateLexicalEnvInfo =
+ FunctionInfo<DebugLeaveThenRecreateLexicalEnvFn>(jit::DebugLeaveThenRecreateLexicalEnv,
+ "DebugLeaveThenRecreateLexicalEnv");
+
+bool
+BaselineCompiler::emit_JSOP_RECREATELEXICALENV()
+{
+ prepareVMCall();
+
+ masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+
+ if (compileDebugInstrumentation_) {
+ pushArg(ImmPtr(pc));
+ pushArg(R0.scratchReg());
+ return callVM(DebugLeaveThenRecreateLexicalEnvInfo);
+ }
+
+ pushArg(R0.scratchReg());
+ return callVM(RecreateLexicalEnvInfo);
+}
+
+typedef bool (*DebugLeaveLexicalEnvFn)(JSContext*, BaselineFrame*, jsbytecode*);
+static const VMFunction DebugLeaveLexicalEnvInfo =
+ FunctionInfo<DebugLeaveLexicalEnvFn>(jit::DebugLeaveLexicalEnv, "DebugLeaveLexicalEnv");
+
+bool
+BaselineCompiler::emit_JSOP_DEBUGLEAVELEXICALENV()
+{
+ if (!compileDebugInstrumentation_)
+ return true;
+
+ prepareVMCall();
+ masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+ pushArg(ImmPtr(pc));
+ pushArg(R0.scratchReg());
+
+ return callVM(DebugLeaveLexicalEnvInfo);
+}
+
+typedef bool (*PushVarEnvFn)(JSContext*, BaselineFrame*, HandleScope);
+static const VMFunction PushVarEnvInfo =
+ FunctionInfo<PushVarEnvFn>(jit::PushVarEnv, "PushVarEnv");
+
+bool
+BaselineCompiler::emit_JSOP_PUSHVARENV()
+{
+ prepareVMCall();
+ masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+ pushArg(ImmGCPtr(script->getScope(pc)));
+ pushArg(R0.scratchReg());
+
+ return callVM(PushVarEnvInfo);
+}
+
+typedef bool (*PopVarEnvFn)(JSContext*, BaselineFrame*);
+static const VMFunction PopVarEnvInfo =
+ FunctionInfo<PopVarEnvFn>(jit::PopVarEnv, "PopVarEnv");
+
+bool
+BaselineCompiler::emit_JSOP_POPVARENV()
+{
+ prepareVMCall();
+ masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+ pushArg(R0.scratchReg());
+
+ return callVM(PopVarEnvInfo);
+}
+
+typedef bool (*EnterWithFn)(JSContext*, BaselineFrame*, HandleValue, Handle<WithScope*>);
+static const VMFunction EnterWithInfo =
+ FunctionInfo<EnterWithFn>(jit::EnterWith, "EnterWith");
+
+bool
+BaselineCompiler::emit_JSOP_ENTERWITH()
+{
+ WithScope& withScope = script->getScope(pc)->as<WithScope>();
+
+ // Pop "with" object to R0.
+ frame.popRegsAndSync(1);
+
+ // Call a stub to push the object onto the scope chain.
+ prepareVMCall();
+ masm.loadBaselineFramePtr(BaselineFrameReg, R1.scratchReg());
+
+ pushArg(ImmGCPtr(&withScope));
+ pushArg(R0);
+ pushArg(R1.scratchReg());
+
+ return callVM(EnterWithInfo);
+}
+
+typedef bool (*LeaveWithFn)(JSContext*, BaselineFrame*);
+static const VMFunction LeaveWithInfo =
+ FunctionInfo<LeaveWithFn>(jit::LeaveWith, "LeaveWith");
+
+bool
+BaselineCompiler::emit_JSOP_LEAVEWITH()
+{
+ // Call a stub to pop the with object from the scope chain.
+ prepareVMCall();
+
+ masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+ pushArg(R0.scratchReg());
+
+ return callVM(LeaveWithInfo);
+}
+
+typedef bool (*GetAndClearExceptionFn)(JSContext*, MutableHandleValue);
+static const VMFunction GetAndClearExceptionInfo =
+ FunctionInfo<GetAndClearExceptionFn>(GetAndClearException, "GetAndClearException");
+
+bool
+BaselineCompiler::emit_JSOP_EXCEPTION()
+{
+ prepareVMCall();
+
+ if (!callVM(GetAndClearExceptionInfo))
+ return false;
+
+ frame.push(R0);
+ return true;
+}
+
+typedef bool (*OnDebuggerStatementFn)(JSContext*, BaselineFrame*, jsbytecode* pc, bool*);
+static const VMFunction OnDebuggerStatementInfo =
+ FunctionInfo<OnDebuggerStatementFn>(jit::OnDebuggerStatement, "OnDebuggerStatement");
+
+bool
+BaselineCompiler::emit_JSOP_DEBUGGER()
+{
+ prepareVMCall();
+ pushArg(ImmPtr(pc));
+
+ frame.assertSyncedStack();
+ masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+ pushArg(R0.scratchReg());
+
+ if (!callVM(OnDebuggerStatementInfo))
+ return false;
+
+ // If the stub returns |true|, return the frame's return value.
+ Label done;
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, &done);
+ {
+ masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand);
+ masm.jump(&return_);
+ }
+ masm.bind(&done);
+ return true;
+}
+
+typedef bool (*DebugEpilogueFn)(JSContext*, BaselineFrame*, jsbytecode*);
+static const VMFunction DebugEpilogueInfo =
+ FunctionInfo<DebugEpilogueFn>(jit::DebugEpilogueOnBaselineReturn,
+ "DebugEpilogueOnBaselineReturn");
+
+bool
+BaselineCompiler::emitReturn()
+{
+ if (compileDebugInstrumentation_) {
+ // Move return value into the frame's rval slot.
+ masm.storeValue(JSReturnOperand, frame.addressOfReturnValue());
+ masm.or32(Imm32(BaselineFrame::HAS_RVAL), frame.addressOfFlags());
+
+ // Load BaselineFrame pointer in R0.
+ frame.syncStack(0);
+ masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+
+ prepareVMCall();
+ pushArg(ImmPtr(pc));
+ pushArg(R0.scratchReg());
+ if (!callVM(DebugEpilogueInfo))
+ return false;
+
+ // Fix up the fake ICEntry appended by callVM for on-stack recompilation.
+ icEntries_.back().setFakeKind(ICEntry::Kind_DebugEpilogue);
+
+ masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand);
+ }
+
+ // Only emit the jump if this JSOP_RETRVAL is not the last instruction.
+ // Not needed for last instruction, because last instruction flows
+ // into return label.
+ if (pc + GetBytecodeLength(pc) < script->codeEnd())
+ masm.jump(&return_);
+
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_RETURN()
+{
+ MOZ_ASSERT(frame.stackDepth() == 1);
+
+ frame.popValue(JSReturnOperand);
+ return emitReturn();
+}
+
+void
+BaselineCompiler::emitLoadReturnValue(ValueOperand val)
+{
+ Label done, noRval;
+ masm.branchTest32(Assembler::Zero, frame.addressOfFlags(),
+ Imm32(BaselineFrame::HAS_RVAL), &noRval);
+ masm.loadValue(frame.addressOfReturnValue(), val);
+ masm.jump(&done);
+
+ masm.bind(&noRval);
+ masm.moveValue(UndefinedValue(), val);
+
+ masm.bind(&done);
+}
+
+bool
+BaselineCompiler::emit_JSOP_RETRVAL()
+{
+ MOZ_ASSERT(frame.stackDepth() == 0);
+
+ masm.moveValue(UndefinedValue(), JSReturnOperand);
+
+ if (!script->noScriptRval()) {
+ // Return the value in the return value slot, if any.
+ Label done;
+ Address flags = frame.addressOfFlags();
+ masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL), &done);
+ masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand);
+ masm.bind(&done);
+ }
+
+ return emitReturn();
+}
+
+typedef bool (*ToIdFn)(JSContext*, HandleScript, jsbytecode*, HandleValue, MutableHandleValue);
+static const VMFunction ToIdInfo = FunctionInfo<ToIdFn>(js::ToIdOperation, "ToIdOperation");
+
+bool
+BaselineCompiler::emit_JSOP_TOID()
+{
+ // Load index in R0, but keep values on the stack for the decompiler.
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0);
+
+ // No-op if index is int32.
+ Label done;
+ masm.branchTestInt32(Assembler::Equal, R0, &done);
+
+ prepareVMCall();
+
+ pushArg(R0);
+ pushArg(ImmPtr(pc));
+ pushArg(ImmGCPtr(script));
+
+ if (!callVM(ToIdInfo))
+ return false;
+
+ masm.bind(&done);
+ frame.pop(); // Pop index.
+ frame.push(R0);
+ return true;
+}
+
+typedef JSObject* (*ToAsyncFn)(JSContext*, HandleFunction);
+static const VMFunction ToAsyncInfo = FunctionInfo<ToAsyncFn>(js::WrapAsyncFunction, "ToAsync");
+
+bool
+BaselineCompiler::emit_JSOP_TOASYNC()
+{
+ frame.syncStack(0);
+ masm.unboxObject(frame.addressOfStackValue(frame.peek(-1)), R0.scratchReg());
+
+ prepareVMCall();
+ pushArg(R0.scratchReg());
+
+ if (!callVM(ToAsyncInfo))
+ return false;
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.pop();
+ frame.push(R0);
+ return true;
+}
+
+typedef bool (*ThrowObjectCoercibleFn)(JSContext*, HandleValue);
+static const VMFunction ThrowObjectCoercibleInfo =
+ FunctionInfo<ThrowObjectCoercibleFn>(ThrowObjectCoercible, "ThrowObjectCoercible");
+
+bool
+BaselineCompiler::emit_JSOP_CHECKOBJCOERCIBLE()
+{
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0);
+
+ Label fail, done;
+
+ masm.branchTestUndefined(Assembler::Equal, R0, &fail);
+ masm.branchTestNull(Assembler::NotEqual, R0, &done);
+
+ masm.bind(&fail);
+ prepareVMCall();
+
+ pushArg(R0);
+
+ if (!callVM(ThrowObjectCoercibleInfo))
+ return false;
+
+ masm.bind(&done);
+ return true;
+}
+
+typedef JSString* (*ToStringFn)(JSContext*, HandleValue);
+static const VMFunction ToStringInfo = FunctionInfo<ToStringFn>(ToStringSlow, "ToStringSlow");
+
+bool
+BaselineCompiler::emit_JSOP_TOSTRING()
+{
+ // Keep top stack value in R0.
+ frame.popRegsAndSync(1);
+
+ // Inline path for string.
+ Label done;
+ masm.branchTestString(Assembler::Equal, R0, &done);
+
+ prepareVMCall();
+
+ pushArg(R0);
+
+ // Call ToStringSlow which doesn't handle string inputs.
+ if (!callVM(ToStringInfo))
+ return false;
+
+ masm.tagValue(JSVAL_TYPE_STRING, ReturnReg, R0);
+
+ masm.bind(&done);
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_TABLESWITCH()
+{
+ frame.popRegsAndSync(1);
+
+ // Call IC.
+ ICTableSwitch::Compiler compiler(cx, pc);
+ return emitOpIC(compiler.getStub(&stubSpace_));
+}
+
+bool
+BaselineCompiler::emit_JSOP_ITER()
+{
+ frame.popRegsAndSync(1);
+
+ ICIteratorNew_Fallback::Compiler compiler(cx);
+ if (!emitOpIC(compiler.getStub(&stubSpace_)))
+ return false;
+
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_MOREITER()
+{
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0);
+
+ ICIteratorMore_Fallback::Compiler compiler(cx);
+ if (!emitOpIC(compiler.getStub(&stubSpace_)))
+ return false;
+
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_ISNOITER()
+{
+ frame.syncStack(0);
+
+ Label isMagic, done;
+ masm.branchTestMagic(Assembler::Equal, frame.addressOfStackValue(frame.peek(-1)),
+ &isMagic);
+ masm.moveValue(BooleanValue(false), R0);
+ masm.jump(&done);
+
+ masm.bind(&isMagic);
+ masm.moveValue(BooleanValue(true), R0);
+
+ masm.bind(&done);
+ frame.push(R0, JSVAL_TYPE_BOOLEAN);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_ENDITER()
+{
+ if (!emit_JSOP_JUMPTARGET())
+ return false;
+ frame.popRegsAndSync(1);
+
+ ICIteratorClose_Fallback::Compiler compiler(cx);
+ return emitOpIC(compiler.getStub(&stubSpace_));
+}
+
+bool
+BaselineCompiler::emit_JSOP_GETRVAL()
+{
+ frame.syncStack(0);
+
+ emitLoadReturnValue(R0);
+
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_SETRVAL()
+{
+ // Store to the frame's return value slot.
+ storeValue(frame.peek(-1), frame.addressOfReturnValue(), R2);
+ masm.or32(Imm32(BaselineFrame::HAS_RVAL), frame.addressOfFlags());
+ frame.pop();
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_CALLEE()
+{
+ MOZ_ASSERT(function());
+ frame.syncStack(0);
+ masm.loadFunctionFromCalleeToken(frame.addressOfCalleeToken(), R0.scratchReg());
+ masm.tagValue(JSVAL_TYPE_OBJECT, R0.scratchReg(), R0);
+ frame.push(R0);
+ return true;
+}
+
+typedef bool (*NewArgumentsObjectFn)(JSContext*, BaselineFrame*, MutableHandleValue);
+static const VMFunction NewArgumentsObjectInfo =
+ FunctionInfo<NewArgumentsObjectFn>(jit::NewArgumentsObject, "NewArgumentsObject");
+
+bool
+BaselineCompiler::emit_JSOP_ARGUMENTS()
+{
+ frame.syncStack(0);
+
+ Label done;
+ if (!script->argumentsHasVarBinding() || !script->needsArgsObj()) {
+ // We assume the script does not need an arguments object. However, this
+ // assumption can be invalidated later, see argumentsOptimizationFailed
+ // in JSScript. Because we can't invalidate baseline JIT code, we set a
+ // flag on BaselineScript when that happens and guard on it here.
+ masm.moveValue(MagicValue(JS_OPTIMIZED_ARGUMENTS), R0);
+
+ // Load script->baseline.
+ Register scratch = R1.scratchReg();
+ masm.movePtr(ImmGCPtr(script), scratch);
+ masm.loadPtr(Address(scratch, JSScript::offsetOfBaselineScript()), scratch);
+
+ // If we don't need an arguments object, skip the VM call.
+ masm.branchTest32(Assembler::Zero, Address(scratch, BaselineScript::offsetOfFlags()),
+ Imm32(BaselineScript::NEEDS_ARGS_OBJ), &done);
+ }
+
+ prepareVMCall();
+
+ masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+ pushArg(R0.scratchReg());
+
+ if (!callVM(NewArgumentsObjectInfo))
+ return false;
+
+ masm.bind(&done);
+ frame.push(R0);
+ return true;
+}
+
+typedef bool (*RunOnceScriptPrologueFn)(JSContext*, HandleScript);
+static const VMFunction RunOnceScriptPrologueInfo =
+ FunctionInfo<RunOnceScriptPrologueFn>(js::RunOnceScriptPrologue, "RunOnceScriptPrologue");
+
+bool
+BaselineCompiler::emit_JSOP_RUNONCE()
+{
+ frame.syncStack(0);
+
+ prepareVMCall();
+
+ masm.movePtr(ImmGCPtr(script), R0.scratchReg());
+ pushArg(R0.scratchReg());
+
+ return callVM(RunOnceScriptPrologueInfo);
+}
+
+bool
+BaselineCompiler::emit_JSOP_REST()
+{
+ frame.syncStack(0);
+
+ JSObject* templateObject =
+ ObjectGroup::newArrayObject(cx, nullptr, 0, TenuredObject,
+ ObjectGroup::NewArrayKind::UnknownIndex);
+ if (!templateObject)
+ return false;
+
+ // Call IC.
+ ICRest_Fallback::Compiler compiler(cx, &templateObject->as<ArrayObject>());
+ if (!emitOpIC(compiler.getStub(&stubSpace_)))
+ return false;
+
+ // Mark R0 as pushed stack value.
+ frame.push(R0);
+ return true;
+}
+
+typedef JSObject* (*CreateGeneratorFn)(JSContext*, BaselineFrame*);
+static const VMFunction CreateGeneratorInfo =
+ FunctionInfo<CreateGeneratorFn>(jit::CreateGenerator, "CreateGenerator");
+
+bool
+BaselineCompiler::emit_JSOP_GENERATOR()
+{
+ MOZ_ASSERT(frame.stackDepth() == 0);
+
+ masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+
+ prepareVMCall();
+ pushArg(R0.scratchReg());
+ if (!callVM(CreateGeneratorInfo))
+ return false;
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.push(R0);
+ return true;
+}
+
+bool
+BaselineCompiler::addYieldOffset()
+{
+ MOZ_ASSERT(*pc == JSOP_INITIALYIELD || *pc == JSOP_YIELD);
+
+ uint32_t yieldIndex = GET_UINT24(pc);
+
+ while (yieldIndex >= yieldOffsets_.length()) {
+ if (!yieldOffsets_.append(0))
+ return false;
+ }
+
+ static_assert(JSOP_INITIALYIELD_LENGTH == JSOP_YIELD_LENGTH,
+ "code below assumes INITIALYIELD and YIELD have same length");
+ yieldOffsets_[yieldIndex] = script->pcToOffset(pc + JSOP_YIELD_LENGTH);
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_INITIALYIELD()
+{
+ if (!addYieldOffset())
+ return false;
+
+ frame.syncStack(0);
+ MOZ_ASSERT(frame.stackDepth() == 1);
+
+ Register genObj = R2.scratchReg();
+ masm.unboxObject(frame.addressOfStackValue(frame.peek(-1)), genObj);
+
+ MOZ_ASSERT(GET_UINT24(pc) == 0);
+ masm.storeValue(Int32Value(0), Address(genObj, GeneratorObject::offsetOfYieldIndexSlot()));
+
+ Register envObj = R0.scratchReg();
+ Address envChainSlot(genObj, GeneratorObject::offsetOfEnvironmentChainSlot());
+ masm.loadPtr(frame.addressOfEnvironmentChain(), envObj);
+ masm.patchableCallPreBarrier(envChainSlot, MIRType::Value);
+ masm.storeValue(JSVAL_TYPE_OBJECT, envObj, envChainSlot);
+
+ Register temp = R1.scratchReg();
+ Label skipBarrier;
+ masm.branchPtrInNurseryChunk(Assembler::Equal, genObj, temp, &skipBarrier);
+ masm.branchPtrInNurseryChunk(Assembler::NotEqual, envObj, temp, &skipBarrier);
+ masm.push(genObj);
+ MOZ_ASSERT(genObj == R2.scratchReg());
+ masm.call(&postBarrierSlot_);
+ masm.pop(genObj);
+ masm.bind(&skipBarrier);
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, genObj, JSReturnOperand);
+ return emitReturn();
+}
+
+typedef bool (*NormalSuspendFn)(JSContext*, HandleObject, BaselineFrame*, jsbytecode*, uint32_t);
+static const VMFunction NormalSuspendInfo =
+ FunctionInfo<NormalSuspendFn>(jit::NormalSuspend, "NormalSuspend");
+
+bool
+BaselineCompiler::emit_JSOP_YIELD()
+{
+ if (!addYieldOffset())
+ return false;
+
+ // Store generator in R0.
+ frame.popRegsAndSync(1);
+
+ Register genObj = R2.scratchReg();
+ masm.unboxObject(R0, genObj);
+
+ MOZ_ASSERT(frame.stackDepth() >= 1);
+
+ if (frame.stackDepth() == 1 && !script->isLegacyGenerator()) {
+ // If the expression stack is empty, we can inline the YIELD. Don't do
+ // this for legacy generators: we have to throw an exception if the
+ // generator is in the closing state, see GeneratorObject::suspend.
+
+ masm.storeValue(Int32Value(GET_UINT24(pc)),
+ Address(genObj, GeneratorObject::offsetOfYieldIndexSlot()));
+
+ Register envObj = R0.scratchReg();
+ Address envChainSlot(genObj, GeneratorObject::offsetOfEnvironmentChainSlot());
+ masm.loadPtr(frame.addressOfEnvironmentChain(), envObj);
+ masm.patchableCallPreBarrier(envChainSlot, MIRType::Value);
+ masm.storeValue(JSVAL_TYPE_OBJECT, envObj, envChainSlot);
+
+ Register temp = R1.scratchReg();
+ Label skipBarrier;
+ masm.branchPtrInNurseryChunk(Assembler::Equal, genObj, temp, &skipBarrier);
+ masm.branchPtrInNurseryChunk(Assembler::NotEqual, envObj, temp, &skipBarrier);
+ MOZ_ASSERT(genObj == R2.scratchReg());
+ masm.call(&postBarrierSlot_);
+ masm.bind(&skipBarrier);
+ } else {
+ masm.loadBaselineFramePtr(BaselineFrameReg, R1.scratchReg());
+
+ prepareVMCall();
+ pushArg(Imm32(frame.stackDepth()));
+ pushArg(ImmPtr(pc));
+ pushArg(R1.scratchReg());
+ pushArg(genObj);
+
+ if (!callVM(NormalSuspendInfo))
+ return false;
+ }
+
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), JSReturnOperand);
+ return emitReturn();
+}
+
+typedef bool (*DebugAfterYieldFn)(JSContext*, BaselineFrame*);
+static const VMFunction DebugAfterYieldInfo =
+ FunctionInfo<DebugAfterYieldFn>(jit::DebugAfterYield, "DebugAfterYield");
+
+bool
+BaselineCompiler::emit_JSOP_DEBUGAFTERYIELD()
+{
+ if (!compileDebugInstrumentation_)
+ return true;
+
+ frame.assertSyncedStack();
+ masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
+ prepareVMCall();
+ pushArg(R0.scratchReg());
+ return callVM(DebugAfterYieldInfo);
+}
+
+typedef bool (*FinalSuspendFn)(JSContext*, HandleObject, BaselineFrame*, jsbytecode*);
+static const VMFunction FinalSuspendInfo =
+ FunctionInfo<FinalSuspendFn>(jit::FinalSuspend, "FinalSuspend");
+
+bool
+BaselineCompiler::emit_JSOP_FINALYIELDRVAL()
+{
+ // Store generator in R0, BaselineFrame pointer in R1.
+ frame.popRegsAndSync(1);
+ masm.unboxObject(R0, R0.scratchReg());
+ masm.loadBaselineFramePtr(BaselineFrameReg, R1.scratchReg());
+
+ prepareVMCall();
+ pushArg(ImmPtr(pc));
+ pushArg(R1.scratchReg());
+ pushArg(R0.scratchReg());
+
+ if (!callVM(FinalSuspendInfo))
+ return false;
+
+ masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand);
+ return emitReturn();
+}
+
+typedef bool (*InterpretResumeFn)(JSContext*, HandleObject, HandleValue, HandlePropertyName,
+ MutableHandleValue);
+static const VMFunction InterpretResumeInfo =
+ FunctionInfo<InterpretResumeFn>(jit::InterpretResume, "InterpretResume");
+
+typedef bool (*GeneratorThrowFn)(JSContext*, BaselineFrame*, Handle<GeneratorObject*>,
+ HandleValue, uint32_t);
+static const VMFunction GeneratorThrowInfo =
+ FunctionInfo<GeneratorThrowFn>(jit::GeneratorThrowOrClose, "GeneratorThrowOrClose", TailCall);
+
+bool
+BaselineCompiler::emit_JSOP_RESUME()
+{
+ GeneratorObject::ResumeKind resumeKind = GeneratorObject::getResumeKind(pc);
+
+ frame.syncStack(0);
+ masm.checkStackAlignment();
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(BaselineFrameReg);
+
+ // Load generator object.
+ Register genObj = regs.takeAny();
+ masm.unboxObject(frame.addressOfStackValue(frame.peek(-2)), genObj);
+
+ // Load callee.
+ Register callee = regs.takeAny();
+ masm.unboxObject(Address(genObj, GeneratorObject::offsetOfCalleeSlot()), callee);
+
+ // Load the script. Note that we don't relazify generator scripts, so it's
+ // guaranteed to be non-lazy.
+ Register scratch1 = regs.takeAny();
+ masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), scratch1);
+
+ // Load the BaselineScript or call a stub if we don't have one.
+ Label interpret;
+ masm.loadPtr(Address(scratch1, JSScript::offsetOfBaselineScript()), scratch1);
+ masm.branchPtr(Assembler::BelowOrEqual, scratch1, ImmPtr(BASELINE_DISABLED_SCRIPT), &interpret);
+
+#ifdef JS_TRACE_LOGGING
+ if (!emitTraceLoggerResume(scratch1, regs))
+ return false;
+#endif
+
+ Register constructing = regs.takeAny();
+ ValueOperand newTarget = regs.takeAnyValue();
+ masm.loadValue(Address(genObj, GeneratorObject::offsetOfNewTargetSlot()), newTarget);
+ masm.move32(Imm32(0), constructing);
+ {
+ Label notConstructing;
+ masm.branchTestObject(Assembler::NotEqual, newTarget, &notConstructing);
+ masm.pushValue(newTarget);
+ masm.move32(Imm32(CalleeToken_FunctionConstructing), constructing);
+ masm.bind(&notConstructing);
+ }
+ regs.add(newTarget);
+
+ // Push |undefined| for all formals.
+ Register scratch2 = regs.takeAny();
+ Label loop, loopDone;
+ masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), scratch2);
+ masm.bind(&loop);
+ masm.branchTest32(Assembler::Zero, scratch2, scratch2, &loopDone);
+ {
+ masm.pushValue(UndefinedValue());
+ masm.sub32(Imm32(1), scratch2);
+ masm.jump(&loop);
+ }
+ masm.bind(&loopDone);
+
+ // Push |undefined| for |this|.
+ masm.pushValue(UndefinedValue());
+
+ // Update BaselineFrame frameSize field and create the frame descriptor.
+ masm.computeEffectiveAddress(Address(BaselineFrameReg, BaselineFrame::FramePointerOffset),
+ scratch2);
+ masm.subStackPtrFrom(scratch2);
+ masm.store32(scratch2, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+ masm.makeFrameDescriptor(scratch2, JitFrame_BaselineJS, JitFrameLayout::Size());
+
+ masm.Push(Imm32(0)); // actual argc
+
+ // Duplicate PushCalleeToken with a variable instead.
+ masm.orPtr(constructing, callee);
+ masm.Push(callee);
+ masm.Push(scratch2); // frame descriptor
+
+ regs.add(callee);
+ regs.add(constructing);
+
+ // Load the return value.
+ ValueOperand retVal = regs.takeAnyValue();
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), retVal);
+
+ // Push a fake return address on the stack. We will resume here when the
+ // generator returns.
+ Label genStart, returnTarget;
+#ifdef JS_USE_LINK_REGISTER
+ masm.call(&genStart);
+#else
+ masm.callAndPushReturnAddress(&genStart);
+#endif
+
+ // Add an IC entry so the return offset -> pc mapping works.
+ if (!appendICEntry(ICEntry::Kind_Op, masm.currentOffset()))
+ return false;
+
+ masm.jump(&returnTarget);
+ masm.bind(&genStart);
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+
+ // If profiler instrumentation is on, update lastProfilingFrame on
+ // current JitActivation
+ {
+ Register scratchReg = scratch2;
+ Label skip;
+ AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skip);
+ masm.loadPtr(AbsoluteAddress(cx->runtime()->addressOfProfilingActivation()), scratchReg);
+ masm.storePtr(masm.getStackPointer(),
+ Address(scratchReg, JitActivation::offsetOfLastProfilingFrame()));
+ masm.bind(&skip);
+ }
+
+ // Construct BaselineFrame.
+ masm.push(BaselineFrameReg);
+ masm.moveStackPtrTo(BaselineFrameReg);
+ masm.subFromStackPtr(Imm32(BaselineFrame::Size()));
+ masm.checkStackAlignment();
+
+ // Store flags and env chain.
+ masm.store32(Imm32(BaselineFrame::HAS_INITIAL_ENV), frame.addressOfFlags());
+ masm.unboxObject(Address(genObj, GeneratorObject::offsetOfEnvironmentChainSlot()), scratch2);
+ masm.storePtr(scratch2, frame.addressOfEnvironmentChain());
+
+ // Store the arguments object if there is one.
+ Label noArgsObj;
+ masm.unboxObject(Address(genObj, GeneratorObject::offsetOfArgsObjSlot()), scratch2);
+ masm.branchTestPtr(Assembler::Zero, scratch2, scratch2, &noArgsObj);
+ {
+ masm.storePtr(scratch2, frame.addressOfArgsObj());
+ masm.or32(Imm32(BaselineFrame::HAS_ARGS_OBJ), frame.addressOfFlags());
+ }
+ masm.bind(&noArgsObj);
+
+ // Push expression slots if needed.
+ Label noExprStack;
+ Address exprStackSlot(genObj, GeneratorObject::offsetOfExpressionStackSlot());
+ masm.branchTestNull(Assembler::Equal, exprStackSlot, &noExprStack);
+ {
+ masm.unboxObject(exprStackSlot, scratch2);
+
+ Register initLength = regs.takeAny();
+ masm.loadPtr(Address(scratch2, NativeObject::offsetOfElements()), scratch2);
+ masm.load32(Address(scratch2, ObjectElements::offsetOfInitializedLength()), initLength);
+
+ Label loop, loopDone;
+ masm.bind(&loop);
+ masm.branchTest32(Assembler::Zero, initLength, initLength, &loopDone);
+ {
+ masm.pushValue(Address(scratch2, 0));
+ masm.addPtr(Imm32(sizeof(Value)), scratch2);
+ masm.sub32(Imm32(1), initLength);
+ masm.jump(&loop);
+ }
+ masm.bind(&loopDone);
+
+ masm.patchableCallPreBarrier(exprStackSlot, MIRType::Value);
+ masm.storeValue(NullValue(), exprStackSlot);
+ regs.add(initLength);
+ }
+
+ masm.bind(&noExprStack);
+ masm.pushValue(retVal);
+
+ if (resumeKind == GeneratorObject::NEXT) {
+ // Determine the resume address based on the yieldIndex and the
+ // yieldIndex -> native table in the BaselineScript.
+ masm.load32(Address(scratch1, BaselineScript::offsetOfYieldEntriesOffset()), scratch2);
+ masm.addPtr(scratch2, scratch1);
+ masm.unboxInt32(Address(genObj, GeneratorObject::offsetOfYieldIndexSlot()), scratch2);
+ masm.loadPtr(BaseIndex(scratch1, scratch2, ScaleFromElemWidth(sizeof(uintptr_t))), scratch1);
+
+ // Mark as running and jump to the generator's JIT code.
+ masm.storeValue(Int32Value(GeneratorObject::YIELD_INDEX_RUNNING),
+ Address(genObj, GeneratorObject::offsetOfYieldIndexSlot()));
+ masm.jump(scratch1);
+ } else {
+ MOZ_ASSERT(resumeKind == GeneratorObject::THROW || resumeKind == GeneratorObject::CLOSE);
+
+ // Update the frame's frameSize field.
+ masm.computeEffectiveAddress(Address(BaselineFrameReg, BaselineFrame::FramePointerOffset),
+ scratch2);
+ masm.movePtr(scratch2, scratch1);
+ masm.subStackPtrFrom(scratch2);
+ masm.store32(scratch2, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+ masm.loadBaselineFramePtr(BaselineFrameReg, scratch2);
+
+ prepareVMCall();
+ pushArg(Imm32(resumeKind));
+ pushArg(retVal);
+ pushArg(genObj);
+ pushArg(scratch2);
+
+ JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(GeneratorThrowInfo);
+ if (!code)
+ return false;
+
+ // Create the frame descriptor.
+ masm.subStackPtrFrom(scratch1);
+ masm.makeFrameDescriptor(scratch1, JitFrame_BaselineJS, ExitFrameLayout::Size());
+
+ // Push the frame descriptor and a dummy return address (it doesn't
+ // matter what we push here, frame iterators will use the frame pc
+ // set in jit::GeneratorThrowOrClose).
+ masm.push(scratch1);
+
+ // On ARM64, the callee will push the return address.
+#ifndef JS_CODEGEN_ARM64
+ masm.push(ImmWord(0));
+#endif
+ masm.jump(code);
+ }
+
+ // If the generator script has no JIT code, call into the VM.
+ masm.bind(&interpret);
+
+ prepareVMCall();
+ if (resumeKind == GeneratorObject::NEXT) {
+ pushArg(ImmGCPtr(cx->names().next));
+ } else if (resumeKind == GeneratorObject::THROW) {
+ pushArg(ImmGCPtr(cx->names().throw_));
+ } else {
+ MOZ_ASSERT(resumeKind == GeneratorObject::CLOSE);
+ pushArg(ImmGCPtr(cx->names().close));
+ }
+
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), retVal);
+ pushArg(retVal);
+ pushArg(genObj);
+
+ if (!callVM(InterpretResumeInfo))
+ return false;
+
+ // After the generator returns, we restore the stack pointer, push the
+ // return value and we're done.
+ masm.bind(&returnTarget);
+ masm.computeEffectiveAddress(frame.addressOfStackValue(frame.peek(-1)), masm.getStackPointer());
+ frame.popn(2);
+ frame.push(R0);
+ return true;
+}
+
+typedef bool (*CheckSelfHostedFn)(JSContext*, HandleValue);
+static const VMFunction CheckSelfHostedInfo =
+ FunctionInfo<CheckSelfHostedFn>(js::Debug_CheckSelfHosted, "Debug_CheckSelfHosted");
+
+bool
+BaselineCompiler::emit_JSOP_DEBUGCHECKSELFHOSTED()
+{
+#ifdef DEBUG
+ frame.syncStack(0);
+
+ masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0);
+
+ prepareVMCall();
+ pushArg(R0);
+ if (!callVM(CheckSelfHostedInfo))
+ return false;
+#endif
+ return true;
+
+}
+
+bool
+BaselineCompiler::emit_JSOP_IS_CONSTRUCTING()
+{
+ frame.push(MagicValue(JS_IS_CONSTRUCTING));
+ return true;
+}
+
+bool
+BaselineCompiler::emit_JSOP_JUMPTARGET()
+{
+ if (!script->hasScriptCounts())
+ return true;
+ PCCounts* counts = script->maybeGetPCCounts(pc);
+ uint64_t* counterAddr = &counts->numExec();
+ masm.inc64(AbsoluteAddress(counterAddr));
+ return true;
+}
diff --git a/js/src/jit/BaselineCompiler.h b/js/src/jit/BaselineCompiler.h
new file mode 100644
index 000000000..9adf65c27
--- /dev/null
+++ b/js/src/jit/BaselineCompiler.h
@@ -0,0 +1,357 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineCompiler_h
+#define jit_BaselineCompiler_h
+
+#include "jit/FixedList.h"
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/BaselineCompiler-x86.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/BaselineCompiler-x64.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/BaselineCompiler-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/BaselineCompiler-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/BaselineCompiler-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/BaselineCompiler-mips64.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/BaselineCompiler-none.h"
+#else
+# error "Unknown architecture!"
+#endif
+
+namespace js {
+namespace jit {
+
+#define OPCODE_LIST(_) \
+ _(JSOP_NOP) \
+ _(JSOP_NOP_DESTRUCTURING) \
+ _(JSOP_LABEL) \
+ _(JSOP_POP) \
+ _(JSOP_POPN) \
+ _(JSOP_DUPAT) \
+ _(JSOP_ENTERWITH) \
+ _(JSOP_LEAVEWITH) \
+ _(JSOP_DUP) \
+ _(JSOP_DUP2) \
+ _(JSOP_SWAP) \
+ _(JSOP_PICK) \
+ _(JSOP_GOTO) \
+ _(JSOP_IFEQ) \
+ _(JSOP_IFNE) \
+ _(JSOP_AND) \
+ _(JSOP_OR) \
+ _(JSOP_NOT) \
+ _(JSOP_POS) \
+ _(JSOP_LOOPHEAD) \
+ _(JSOP_LOOPENTRY) \
+ _(JSOP_VOID) \
+ _(JSOP_UNDEFINED) \
+ _(JSOP_HOLE) \
+ _(JSOP_NULL) \
+ _(JSOP_TRUE) \
+ _(JSOP_FALSE) \
+ _(JSOP_ZERO) \
+ _(JSOP_ONE) \
+ _(JSOP_INT8) \
+ _(JSOP_INT32) \
+ _(JSOP_UINT16) \
+ _(JSOP_UINT24) \
+ _(JSOP_DOUBLE) \
+ _(JSOP_STRING) \
+ _(JSOP_SYMBOL) \
+ _(JSOP_OBJECT) \
+ _(JSOP_CALLSITEOBJ) \
+ _(JSOP_REGEXP) \
+ _(JSOP_LAMBDA) \
+ _(JSOP_LAMBDA_ARROW) \
+ _(JSOP_BITOR) \
+ _(JSOP_BITXOR) \
+ _(JSOP_BITAND) \
+ _(JSOP_LSH) \
+ _(JSOP_RSH) \
+ _(JSOP_URSH) \
+ _(JSOP_ADD) \
+ _(JSOP_SUB) \
+ _(JSOP_MUL) \
+ _(JSOP_DIV) \
+ _(JSOP_MOD) \
+ _(JSOP_POW) \
+ _(JSOP_LT) \
+ _(JSOP_LE) \
+ _(JSOP_GT) \
+ _(JSOP_GE) \
+ _(JSOP_EQ) \
+ _(JSOP_NE) \
+ _(JSOP_STRICTEQ) \
+ _(JSOP_STRICTNE) \
+ _(JSOP_CONDSWITCH) \
+ _(JSOP_CASE) \
+ _(JSOP_DEFAULT) \
+ _(JSOP_LINENO) \
+ _(JSOP_BITNOT) \
+ _(JSOP_NEG) \
+ _(JSOP_NEWARRAY) \
+ _(JSOP_SPREADCALLARRAY) \
+ _(JSOP_NEWARRAY_COPYONWRITE) \
+ _(JSOP_INITELEM_ARRAY) \
+ _(JSOP_NEWOBJECT) \
+ _(JSOP_NEWINIT) \
+ _(JSOP_INITELEM) \
+ _(JSOP_INITELEM_GETTER) \
+ _(JSOP_INITELEM_SETTER) \
+ _(JSOP_INITELEM_INC) \
+ _(JSOP_MUTATEPROTO) \
+ _(JSOP_INITPROP) \
+ _(JSOP_INITLOCKEDPROP) \
+ _(JSOP_INITHIDDENPROP) \
+ _(JSOP_INITPROP_GETTER) \
+ _(JSOP_INITPROP_SETTER) \
+ _(JSOP_ARRAYPUSH) \
+ _(JSOP_GETELEM) \
+ _(JSOP_SETELEM) \
+ _(JSOP_STRICTSETELEM) \
+ _(JSOP_CALLELEM) \
+ _(JSOP_DELELEM) \
+ _(JSOP_STRICTDELELEM) \
+ _(JSOP_IN) \
+ _(JSOP_GETGNAME) \
+ _(JSOP_BINDGNAME) \
+ _(JSOP_SETGNAME) \
+ _(JSOP_STRICTSETGNAME) \
+ _(JSOP_SETNAME) \
+ _(JSOP_STRICTSETNAME) \
+ _(JSOP_GETPROP) \
+ _(JSOP_SETPROP) \
+ _(JSOP_STRICTSETPROP) \
+ _(JSOP_CALLPROP) \
+ _(JSOP_DELPROP) \
+ _(JSOP_STRICTDELPROP) \
+ _(JSOP_LENGTH) \
+ _(JSOP_GETXPROP) \
+ _(JSOP_GETALIASEDVAR) \
+ _(JSOP_SETALIASEDVAR) \
+ _(JSOP_GETNAME) \
+ _(JSOP_BINDNAME) \
+ _(JSOP_DELNAME) \
+ _(JSOP_GETIMPORT) \
+ _(JSOP_GETINTRINSIC) \
+ _(JSOP_BINDVAR) \
+ _(JSOP_DEFVAR) \
+ _(JSOP_DEFCONST) \
+ _(JSOP_DEFLET) \
+ _(JSOP_DEFFUN) \
+ _(JSOP_GETLOCAL) \
+ _(JSOP_SETLOCAL) \
+ _(JSOP_GETARG) \
+ _(JSOP_SETARG) \
+ _(JSOP_CHECKLEXICAL) \
+ _(JSOP_INITLEXICAL) \
+ _(JSOP_INITGLEXICAL) \
+ _(JSOP_CHECKALIASEDLEXICAL) \
+ _(JSOP_INITALIASEDLEXICAL) \
+ _(JSOP_UNINITIALIZED) \
+ _(JSOP_CALL) \
+ _(JSOP_CALLITER) \
+ _(JSOP_FUNCALL) \
+ _(JSOP_FUNAPPLY) \
+ _(JSOP_NEW) \
+ _(JSOP_EVAL) \
+ _(JSOP_STRICTEVAL) \
+ _(JSOP_SPREADCALL) \
+ _(JSOP_SPREADNEW) \
+ _(JSOP_SPREADEVAL) \
+ _(JSOP_STRICTSPREADEVAL) \
+ _(JSOP_OPTIMIZE_SPREADCALL)\
+ _(JSOP_IMPLICITTHIS) \
+ _(JSOP_GIMPLICITTHIS) \
+ _(JSOP_INSTANCEOF) \
+ _(JSOP_TYPEOF) \
+ _(JSOP_TYPEOFEXPR) \
+ _(JSOP_THROWMSG) \
+ _(JSOP_THROW) \
+ _(JSOP_THROWING) \
+ _(JSOP_TRY) \
+ _(JSOP_FINALLY) \
+ _(JSOP_GOSUB) \
+ _(JSOP_RETSUB) \
+ _(JSOP_PUSHLEXICALENV) \
+ _(JSOP_POPLEXICALENV) \
+ _(JSOP_FRESHENLEXICALENV) \
+ _(JSOP_RECREATELEXICALENV) \
+ _(JSOP_DEBUGLEAVELEXICALENV) \
+ _(JSOP_PUSHVARENV) \
+ _(JSOP_POPVARENV) \
+ _(JSOP_EXCEPTION) \
+ _(JSOP_DEBUGGER) \
+ _(JSOP_ARGUMENTS) \
+ _(JSOP_RUNONCE) \
+ _(JSOP_REST) \
+ _(JSOP_TOASYNC) \
+ _(JSOP_TOID) \
+ _(JSOP_TOSTRING) \
+ _(JSOP_TABLESWITCH) \
+ _(JSOP_ITER) \
+ _(JSOP_MOREITER) \
+ _(JSOP_ISNOITER) \
+ _(JSOP_ENDITER) \
+ _(JSOP_GENERATOR) \
+ _(JSOP_INITIALYIELD) \
+ _(JSOP_YIELD) \
+ _(JSOP_DEBUGAFTERYIELD) \
+ _(JSOP_FINALYIELDRVAL) \
+ _(JSOP_RESUME) \
+ _(JSOP_CALLEE) \
+ _(JSOP_GETRVAL) \
+ _(JSOP_SETRVAL) \
+ _(JSOP_RETRVAL) \
+ _(JSOP_RETURN) \
+ _(JSOP_FUNCTIONTHIS) \
+ _(JSOP_GLOBALTHIS) \
+ _(JSOP_CHECKISOBJ) \
+ _(JSOP_CHECKTHIS) \
+ _(JSOP_CHECKRETURN) \
+ _(JSOP_NEWTARGET) \
+ _(JSOP_SUPERCALL) \
+ _(JSOP_SPREADSUPERCALL) \
+ _(JSOP_THROWSETCONST) \
+ _(JSOP_THROWSETALIASEDCONST) \
+ _(JSOP_THROWSETCALLEE) \
+ _(JSOP_INITHIDDENPROP_GETTER) \
+ _(JSOP_INITHIDDENPROP_SETTER) \
+ _(JSOP_INITHIDDENELEM) \
+ _(JSOP_INITHIDDENELEM_GETTER) \
+ _(JSOP_INITHIDDENELEM_SETTER) \
+ _(JSOP_CHECKOBJCOERCIBLE) \
+ _(JSOP_DEBUGCHECKSELFHOSTED) \
+ _(JSOP_JUMPTARGET) \
+ _(JSOP_IS_CONSTRUCTING)
+
+class BaselineCompiler : public BaselineCompilerSpecific
+{
+ FixedList<Label> labels_;
+ NonAssertingLabel return_;
+ NonAssertingLabel postBarrierSlot_;
+
+ // Native code offset right before the scope chain is initialized.
+ CodeOffset prologueOffset_;
+
+ // Native code offset right before the frame is popped and the method
+ // returned from.
+ CodeOffset epilogueOffset_;
+
+ // Native code offset right after debug prologue and epilogue, or
+ // equivalent positions when debug mode is off.
+ CodeOffset postDebugPrologueOffset_;
+
+ // For each INITIALYIELD or YIELD op, this Vector maps the yield index
+ // to the bytecode offset of the next op.
+ Vector<uint32_t> yieldOffsets_;
+
+ // Whether any on stack arguments are modified.
+ bool modifiesArguments_;
+
+ Label* labelOf(jsbytecode* pc) {
+ return &labels_[script->pcToOffset(pc)];
+ }
+
+ // If a script has more |nslots| than this, then emit code to do an
+ // early stack check.
+ static const unsigned EARLY_STACK_CHECK_SLOT_COUNT = 128;
+ bool needsEarlyStackCheck() const {
+ return script->nslots() > EARLY_STACK_CHECK_SLOT_COUNT;
+ }
+
+ public:
+ BaselineCompiler(JSContext* cx, TempAllocator& alloc, JSScript* script);
+ MOZ_MUST_USE bool init();
+
+ MethodStatus compile();
+
+ private:
+ MethodStatus emitBody();
+
+ MOZ_MUST_USE bool emitCheckThis(ValueOperand val);
+ void emitLoadReturnValue(ValueOperand val);
+
+ void emitInitializeLocals();
+ MOZ_MUST_USE bool emitPrologue();
+ MOZ_MUST_USE bool emitEpilogue();
+ MOZ_MUST_USE bool emitOutOfLinePostBarrierSlot();
+ MOZ_MUST_USE bool emitIC(ICStub* stub, ICEntry::Kind kind);
+ MOZ_MUST_USE bool emitOpIC(ICStub* stub) {
+ return emitIC(stub, ICEntry::Kind_Op);
+ }
+ MOZ_MUST_USE bool emitNonOpIC(ICStub* stub) {
+ return emitIC(stub, ICEntry::Kind_NonOp);
+ }
+
+ MOZ_MUST_USE bool emitStackCheck(bool earlyCheck=false);
+ MOZ_MUST_USE bool emitInterruptCheck();
+ MOZ_MUST_USE bool emitWarmUpCounterIncrement(bool allowOsr=true);
+ MOZ_MUST_USE bool emitArgumentTypeChecks();
+ void emitIsDebuggeeCheck();
+ MOZ_MUST_USE bool emitDebugPrologue();
+ MOZ_MUST_USE bool emitDebugTrap();
+ MOZ_MUST_USE bool emitTraceLoggerEnter();
+ MOZ_MUST_USE bool emitTraceLoggerExit();
+ MOZ_MUST_USE bool emitTraceLoggerResume(Register script, AllocatableGeneralRegisterSet& regs);
+
+ void emitProfilerEnterFrame();
+ void emitProfilerExitFrame();
+
+ MOZ_MUST_USE bool initEnvironmentChain();
+
+ void storeValue(const StackValue* source, const Address& dest,
+ const ValueOperand& scratch);
+
+#define EMIT_OP(op) bool emit_##op();
+ OPCODE_LIST(EMIT_OP)
+#undef EMIT_OP
+
+ // JSOP_NEG, JSOP_BITNOT
+ MOZ_MUST_USE bool emitUnaryArith();
+
+ // JSOP_BITXOR, JSOP_LSH, JSOP_ADD etc.
+ MOZ_MUST_USE bool emitBinaryArith();
+
+ // Handles JSOP_LT, JSOP_GT, and friends
+ MOZ_MUST_USE bool emitCompare();
+
+ MOZ_MUST_USE bool emitReturn();
+
+ MOZ_MUST_USE bool emitToBoolean();
+ MOZ_MUST_USE bool emitTest(bool branchIfTrue);
+ MOZ_MUST_USE bool emitAndOr(bool branchIfTrue);
+ MOZ_MUST_USE bool emitCall();
+ MOZ_MUST_USE bool emitSpreadCall();
+
+ MOZ_MUST_USE bool emitInitPropGetterSetter();
+ MOZ_MUST_USE bool emitInitElemGetterSetter();
+
+ MOZ_MUST_USE bool emitFormalArgAccess(uint32_t arg, bool get);
+
+ MOZ_MUST_USE bool emitThrowConstAssignment();
+ MOZ_MUST_USE bool emitUninitializedLexicalCheck(const ValueOperand& val);
+
+ MOZ_MUST_USE bool addPCMappingEntry(bool addIndexEntry);
+
+ MOZ_MUST_USE bool addYieldOffset();
+
+ void getEnvironmentCoordinateObject(Register reg);
+ Address getEnvironmentCoordinateAddressFromObject(Register objReg, Register reg);
+ Address getEnvironmentCoordinateAddress(Register reg);
+};
+
+extern const VMFunction NewArrayCopyOnWriteInfo;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BaselineCompiler_h */
diff --git a/js/src/jit/BaselineDebugModeOSR.cpp b/js/src/jit/BaselineDebugModeOSR.cpp
new file mode 100644
index 000000000..8b937ee24
--- /dev/null
+++ b/js/src/jit/BaselineDebugModeOSR.cpp
@@ -0,0 +1,1184 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineDebugModeOSR.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/SizePrintfMacros.h"
+
+#include "jit/BaselineIC.h"
+#include "jit/JitcodeMap.h"
+#include "jit/Linker.h"
+#include "jit/PerfSpewer.h"
+
+#include "jit/JitFrames-inl.h"
+#include "jit/MacroAssembler-inl.h"
+#include "vm/Stack-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::DebugOnly;
+
+struct DebugModeOSREntry
+{
+ JSScript* script;
+ BaselineScript* oldBaselineScript;
+ ICStub* oldStub;
+ ICStub* newStub;
+ BaselineDebugModeOSRInfo* recompInfo;
+ uint32_t pcOffset;
+ ICEntry::Kind frameKind;
+
+ explicit DebugModeOSREntry(JSScript* script)
+ : script(script),
+ oldBaselineScript(script->baselineScript()),
+ oldStub(nullptr),
+ newStub(nullptr),
+ recompInfo(nullptr),
+ pcOffset(uint32_t(-1)),
+ frameKind(ICEntry::Kind_Invalid)
+ { }
+
+ DebugModeOSREntry(JSScript* script, uint32_t pcOffset)
+ : script(script),
+ oldBaselineScript(script->baselineScript()),
+ oldStub(nullptr),
+ newStub(nullptr),
+ recompInfo(nullptr),
+ pcOffset(pcOffset),
+ frameKind(ICEntry::Kind_Invalid)
+ { }
+
+ DebugModeOSREntry(JSScript* script, const ICEntry& icEntry)
+ : script(script),
+ oldBaselineScript(script->baselineScript()),
+ oldStub(nullptr),
+ newStub(nullptr),
+ recompInfo(nullptr),
+ pcOffset(icEntry.pcOffset()),
+ frameKind(icEntry.kind())
+ {
+#ifdef DEBUG
+ MOZ_ASSERT(pcOffset == icEntry.pcOffset());
+ MOZ_ASSERT(frameKind == icEntry.kind());
+#endif
+ }
+
+ DebugModeOSREntry(JSScript* script, BaselineDebugModeOSRInfo* info)
+ : script(script),
+ oldBaselineScript(script->baselineScript()),
+ oldStub(nullptr),
+ newStub(nullptr),
+ recompInfo(nullptr),
+ pcOffset(script->pcToOffset(info->pc)),
+ frameKind(info->frameKind)
+ {
+#ifdef DEBUG
+ MOZ_ASSERT(pcOffset == script->pcToOffset(info->pc));
+ MOZ_ASSERT(frameKind == info->frameKind);
+#endif
+ }
+
+ DebugModeOSREntry(DebugModeOSREntry&& other)
+ : script(other.script),
+ oldBaselineScript(other.oldBaselineScript),
+ oldStub(other.oldStub),
+ newStub(other.newStub),
+ recompInfo(other.recompInfo ? other.takeRecompInfo() : nullptr),
+ pcOffset(other.pcOffset),
+ frameKind(other.frameKind)
+ { }
+
+ ~DebugModeOSREntry() {
+ // Note that this is nulled out when the recompInfo is taken by the
+ // frame. The frame then has the responsibility of freeing the
+ // recompInfo.
+ js_delete(recompInfo);
+ }
+
+ bool needsRecompileInfo() const {
+ return frameKind == ICEntry::Kind_CallVM ||
+ frameKind == ICEntry::Kind_WarmupCounter ||
+ frameKind == ICEntry::Kind_StackCheck ||
+ frameKind == ICEntry::Kind_EarlyStackCheck ||
+ frameKind == ICEntry::Kind_DebugTrap ||
+ frameKind == ICEntry::Kind_DebugPrologue ||
+ frameKind == ICEntry::Kind_DebugEpilogue;
+ }
+
+ bool recompiled() const {
+ return oldBaselineScript != script->baselineScript();
+ }
+
+ BaselineDebugModeOSRInfo* takeRecompInfo() {
+ MOZ_ASSERT(needsRecompileInfo() && recompInfo);
+ BaselineDebugModeOSRInfo* tmp = recompInfo;
+ recompInfo = nullptr;
+ return tmp;
+ }
+
+ bool allocateRecompileInfo(JSContext* cx) {
+ MOZ_ASSERT(script);
+ MOZ_ASSERT(needsRecompileInfo());
+
+ // If we are returning to a frame which needs a continuation fixer,
+ // allocate the recompile info up front so that the patching function
+ // is infallible.
+ jsbytecode* pc = script->offsetToPC(pcOffset);
+
+ // XXX: Work around compiler error disallowing using bitfields
+ // with the template magic of new_.
+ ICEntry::Kind kind = frameKind;
+ recompInfo = cx->new_<BaselineDebugModeOSRInfo>(pc, kind);
+ return !!recompInfo;
+ }
+
+ ICFallbackStub* fallbackStub() const {
+ MOZ_ASSERT(script);
+ MOZ_ASSERT(oldStub);
+ return script->baselineScript()->icEntryFromPCOffset(pcOffset).fallbackStub();
+ }
+};
+
+typedef Vector<DebugModeOSREntry> DebugModeOSREntryVector;
+
+class UniqueScriptOSREntryIter
+{
+ const DebugModeOSREntryVector& entries_;
+ size_t index_;
+
+ public:
+ explicit UniqueScriptOSREntryIter(const DebugModeOSREntryVector& entries)
+ : entries_(entries),
+ index_(0)
+ { }
+
+ bool done() {
+ return index_ == entries_.length();
+ }
+
+ const DebugModeOSREntry& entry() {
+ MOZ_ASSERT(!done());
+ return entries_[index_];
+ }
+
+ UniqueScriptOSREntryIter& operator++() {
+ MOZ_ASSERT(!done());
+ while (++index_ < entries_.length()) {
+ bool unique = true;
+ for (size_t i = 0; i < index_; i++) {
+ if (entries_[i].script == entries_[index_].script) {
+ unique = false;
+ break;
+ }
+ }
+ if (unique)
+ break;
+ }
+ return *this;
+ }
+};
+
+static bool
+CollectJitStackScripts(JSContext* cx, const Debugger::ExecutionObservableSet& obs,
+ const ActivationIterator& activation, DebugModeOSREntryVector& entries)
+{
+ ICStub* prevFrameStubPtr = nullptr;
+ bool needsRecompileHandler = false;
+ for (JitFrameIterator iter(activation); !iter.done(); ++iter) {
+ switch (iter.type()) {
+ case JitFrame_BaselineJS: {
+ JSScript* script = iter.script();
+
+ if (!obs.shouldRecompileOrInvalidate(script)) {
+ prevFrameStubPtr = nullptr;
+ break;
+ }
+
+ BaselineFrame* frame = iter.baselineFrame();
+
+ if (BaselineDebugModeOSRInfo* info = frame->getDebugModeOSRInfo()) {
+ // If patching a previously patched yet unpopped frame, we can
+ // use the BaselineDebugModeOSRInfo on the frame directly to
+ // patch. Indeed, we cannot use iter.returnAddressToFp(), as
+ // it points into the debug mode OSR handler and cannot be
+ // used to look up a corresponding ICEntry.
+ //
+ // See cases F and G in PatchBaselineFramesForDebugMode.
+ if (!entries.append(DebugModeOSREntry(script, info)))
+ return false;
+ } else if (frame->isHandlingException()) {
+ // We are in the middle of handling an exception and the frame
+ // must have an override pc.
+ uint32_t offset = script->pcToOffset(frame->overridePc());
+ if (!entries.append(DebugModeOSREntry(script, offset)))
+ return false;
+ } else {
+ // The frame must be settled on a pc with an ICEntry.
+ uint8_t* retAddr = iter.returnAddressToFp();
+ BaselineICEntry& icEntry = script->baselineScript()->icEntryFromReturnAddress(retAddr);
+ if (!entries.append(DebugModeOSREntry(script, icEntry)))
+ return false;
+ }
+
+ if (entries.back().needsRecompileInfo()) {
+ if (!entries.back().allocateRecompileInfo(cx))
+ return false;
+
+ needsRecompileHandler |= true;
+ }
+ entries.back().oldStub = prevFrameStubPtr;
+ prevFrameStubPtr = nullptr;
+ break;
+ }
+
+ case JitFrame_BaselineStub:
+ prevFrameStubPtr =
+ reinterpret_cast<BaselineStubFrameLayout*>(iter.fp())->maybeStubPtr();
+ break;
+
+ case JitFrame_IonJS: {
+ InlineFrameIterator inlineIter(cx, &iter);
+ while (true) {
+ if (obs.shouldRecompileOrInvalidate(inlineIter.script())) {
+ if (!entries.append(DebugModeOSREntry(inlineIter.script())))
+ return false;
+ }
+ if (!inlineIter.more())
+ break;
+ ++inlineIter;
+ }
+ break;
+ }
+
+ default:;
+ }
+ }
+
+ // Initialize the on-stack recompile handler, which may fail, so that
+ // patching the stack is infallible.
+ if (needsRecompileHandler) {
+ JitRuntime* rt = cx->runtime()->jitRuntime();
+ if (!rt->getBaselineDebugModeOSRHandlerAddress(cx, true))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+CollectInterpreterStackScripts(JSContext* cx, const Debugger::ExecutionObservableSet& obs,
+ const ActivationIterator& activation,
+ DebugModeOSREntryVector& entries)
+{
+ // Collect interpreter frame stacks with IonScript or BaselineScript as
+ // well. These do not need to be patched, but do need to be invalidated
+ // and recompiled.
+ InterpreterActivation* act = activation.activation()->asInterpreter();
+ for (InterpreterFrameIterator iter(act); !iter.done(); ++iter) {
+ JSScript* script = iter.frame()->script();
+ if (obs.shouldRecompileOrInvalidate(script)) {
+ if (!entries.append(DebugModeOSREntry(iter.frame()->script())))
+ return false;
+ }
+ }
+ return true;
+}
+
+#ifdef JS_JITSPEW
+static const char*
+ICEntryKindToString(ICEntry::Kind kind)
+{
+ switch (kind) {
+ case ICEntry::Kind_Op:
+ return "IC";
+ case ICEntry::Kind_NonOp:
+ return "non-op IC";
+ case ICEntry::Kind_CallVM:
+ return "callVM";
+ case ICEntry::Kind_WarmupCounter:
+ return "warmup counter";
+ case ICEntry::Kind_StackCheck:
+ return "stack check";
+ case ICEntry::Kind_EarlyStackCheck:
+ return "early stack check";
+ case ICEntry::Kind_DebugTrap:
+ return "debug trap";
+ case ICEntry::Kind_DebugPrologue:
+ return "debug prologue";
+ case ICEntry::Kind_DebugEpilogue:
+ return "debug epilogue";
+ default:
+ MOZ_CRASH("bad ICEntry kind");
+ }
+}
+#endif // JS_JITSPEW
+
+static void
+SpewPatchBaselineFrame(uint8_t* oldReturnAddress, uint8_t* newReturnAddress,
+ JSScript* script, ICEntry::Kind frameKind, jsbytecode* pc)
+{
+ JitSpew(JitSpew_BaselineDebugModeOSR,
+ "Patch return %p -> %p on BaselineJS frame (%s:%" PRIuSIZE ") from %s at %s",
+ oldReturnAddress, newReturnAddress, script->filename(), script->lineno(),
+ ICEntryKindToString(frameKind), CodeName[(JSOp)*pc]);
+}
+
+static void
+SpewPatchBaselineFrameFromExceptionHandler(uint8_t* oldReturnAddress, uint8_t* newReturnAddress,
+ JSScript* script, jsbytecode* pc)
+{
+ JitSpew(JitSpew_BaselineDebugModeOSR,
+ "Patch return %p -> %p on BaselineJS frame (%s:%" PRIuSIZE ") from exception handler at %s",
+ oldReturnAddress, newReturnAddress, script->filename(), script->lineno(),
+ CodeName[(JSOp)*pc]);
+}
+
+static void
+SpewPatchStubFrame(ICStub* oldStub, ICStub* newStub)
+{
+ JitSpew(JitSpew_BaselineDebugModeOSR,
+ "Patch stub %p -> %p on BaselineStub frame (%s)",
+ oldStub, newStub, newStub ? ICStub::KindString(newStub->kind()) : "exception handler");
+}
+
+static void
+PatchBaselineFramesForDebugMode(JSContext* cx, const Debugger::ExecutionObservableSet& obs,
+ const ActivationIterator& activation,
+ DebugModeOSREntryVector& entries, size_t* start)
+{
+ //
+ // Recompile Patching Overview
+ //
+ // When toggling debug mode with live baseline scripts on the stack, we
+ // could have entered the VM via the following ways from the baseline
+ // script.
+ //
+ // Off to On:
+ // A. From a "can call" stub.
+ // B. From a VM call.
+ // H. From inside HandleExceptionBaseline.
+ // I. From inside the interrupt handler via the prologue stack check.
+ // J. From the warmup counter in the prologue.
+ //
+ // On to Off:
+ // - All the ways above.
+ // C. From the debug trap handler.
+ // D. From the debug prologue.
+ // E. From the debug epilogue.
+ //
+ // Cycles (On to Off to On)+ or (Off to On to Off)+:
+ // F. Undo cases B, C, D, E, I or J above on previously patched yet unpopped
+ // frames.
+ //
+ // In general, we patch the return address from the VM call to return to a
+ // "continuation fixer" to fix up machine state (registers and stack
+ // state). Specifics on what needs to be done are documented below.
+ //
+
+ CommonFrameLayout* prev = nullptr;
+ size_t entryIndex = *start;
+
+ for (JitFrameIterator iter(activation); !iter.done(); ++iter) {
+ switch (iter.type()) {
+ case JitFrame_BaselineJS: {
+ // If the script wasn't recompiled or is not observed, there's
+ // nothing to patch.
+ if (!obs.shouldRecompileOrInvalidate(iter.script()))
+ break;
+
+ DebugModeOSREntry& entry = entries[entryIndex];
+
+ if (!entry.recompiled()) {
+ entryIndex++;
+ break;
+ }
+
+ JSScript* script = entry.script;
+ uint32_t pcOffset = entry.pcOffset;
+ jsbytecode* pc = script->offsetToPC(pcOffset);
+
+ MOZ_ASSERT(script == iter.script());
+ MOZ_ASSERT(pcOffset < script->length());
+
+ BaselineScript* bl = script->baselineScript();
+ ICEntry::Kind kind = entry.frameKind;
+
+ if (kind == ICEntry::Kind_Op) {
+ // Case A above.
+ //
+ // Patching these cases needs to patch both the stub frame and
+ // the baseline frame. The stub frame is patched below. For
+ // the baseline frame here, we resume right after the IC
+ // returns.
+ //
+ // Since we're using the same IC stub code, we can resume
+ // directly to the IC resume address.
+ uint8_t* retAddr = bl->returnAddressForIC(bl->icEntryFromPCOffset(pcOffset));
+ SpewPatchBaselineFrame(prev->returnAddress(), retAddr, script, kind, pc);
+ DebugModeOSRVolatileJitFrameIterator::forwardLiveIterators(
+ cx, prev->returnAddress(), retAddr);
+ prev->setReturnAddress(retAddr);
+ entryIndex++;
+ break;
+ }
+
+ if (kind == ICEntry::Kind_Invalid) {
+ // Case H above.
+ //
+ // We are recompiling on-stack scripts from inside the
+ // exception handler, by way of an onExceptionUnwind
+ // invocation, on a pc without an ICEntry. This means the
+ // frame must have an override pc.
+ //
+ // If profiling is off, patch the resume address to nullptr,
+ // to ensure the old address is not used anywhere.
+ //
+ // If profiling is on, JitProfilingFrameIterator requires a
+ // valid return address.
+ MOZ_ASSERT(iter.baselineFrame()->isHandlingException());
+ MOZ_ASSERT(iter.baselineFrame()->overridePc() == pc);
+ uint8_t* retAddr;
+ if (cx->runtime()->spsProfiler.enabled())
+ retAddr = bl->nativeCodeForPC(script, pc);
+ else
+ retAddr = nullptr;
+ SpewPatchBaselineFrameFromExceptionHandler(prev->returnAddress(), retAddr,
+ script, pc);
+ DebugModeOSRVolatileJitFrameIterator::forwardLiveIterators(
+ cx, prev->returnAddress(), retAddr);
+ prev->setReturnAddress(retAddr);
+ entryIndex++;
+ break;
+ }
+
+ // Case F above.
+ //
+ // We undo a previous recompile by handling cases B, C, D, E, I or J
+ // like normal, except that we retrieve the pc information via
+ // the previous OSR debug info stashed on the frame.
+ BaselineDebugModeOSRInfo* info = iter.baselineFrame()->getDebugModeOSRInfo();
+ if (info) {
+ MOZ_ASSERT(info->pc == pc);
+ MOZ_ASSERT(info->frameKind == kind);
+ MOZ_ASSERT(kind == ICEntry::Kind_CallVM ||
+ kind == ICEntry::Kind_WarmupCounter ||
+ kind == ICEntry::Kind_StackCheck ||
+ kind == ICEntry::Kind_EarlyStackCheck ||
+ kind == ICEntry::Kind_DebugTrap ||
+ kind == ICEntry::Kind_DebugPrologue ||
+ kind == ICEntry::Kind_DebugEpilogue);
+
+ // We will have allocated a new recompile info, so delete the
+ // existing one.
+ iter.baselineFrame()->deleteDebugModeOSRInfo();
+ }
+
+ // The RecompileInfo must already be allocated so that this
+ // function may be infallible.
+ BaselineDebugModeOSRInfo* recompInfo = entry.takeRecompInfo();
+
+ bool popFrameReg;
+ switch (kind) {
+ case ICEntry::Kind_CallVM: {
+ // Case B above.
+ //
+ // Patching returns from a VM call. After fixing up the the
+ // continuation for unsynced values (the frame register is
+ // popped by the callVM trampoline), we resume at the
+ // return-from-callVM address. The assumption here is that all
+ // callVMs which can trigger debug mode OSR are the *only*
+ // callVMs generated for their respective pc locations in the
+ // baseline JIT code.
+ BaselineICEntry& callVMEntry = bl->callVMEntryFromPCOffset(pcOffset);
+ recompInfo->resumeAddr = bl->returnAddressForIC(callVMEntry);
+ popFrameReg = false;
+ break;
+ }
+
+ case ICEntry::Kind_WarmupCounter: {
+ // Case J above.
+ //
+ // Patching mechanism is identical to a CallVM. This is
+ // handled especially only because the warmup counter VM call is
+ // part of the prologue, and not tied an opcode.
+ BaselineICEntry& warmupCountEntry = bl->warmupCountICEntry();
+ recompInfo->resumeAddr = bl->returnAddressForIC(warmupCountEntry);
+ popFrameReg = false;
+ break;
+ }
+
+ case ICEntry::Kind_StackCheck:
+ case ICEntry::Kind_EarlyStackCheck: {
+ // Case I above.
+ //
+ // Patching mechanism is identical to a CallVM. This is
+ // handled especially only because the stack check VM call is
+ // part of the prologue, and not tied an opcode.
+ bool earlyCheck = kind == ICEntry::Kind_EarlyStackCheck;
+ BaselineICEntry& stackCheckEntry = bl->stackCheckICEntry(earlyCheck);
+ recompInfo->resumeAddr = bl->returnAddressForIC(stackCheckEntry);
+ popFrameReg = false;
+ break;
+ }
+
+ case ICEntry::Kind_DebugTrap:
+ // Case C above.
+ //
+ // Debug traps are emitted before each op, so we resume at the
+ // same op. Calling debug trap handlers is done via a toggled
+ // call to a thunk (DebugTrapHandler) that takes care tearing
+ // down its own stub frame so we don't need to worry about
+ // popping the frame reg.
+ recompInfo->resumeAddr = bl->nativeCodeForPC(script, pc, &recompInfo->slotInfo);
+ popFrameReg = false;
+ break;
+
+ case ICEntry::Kind_DebugPrologue:
+ // Case D above.
+ //
+ // We patch a jump directly to the right place in the prologue
+ // after popping the frame reg and checking for forced return.
+ recompInfo->resumeAddr = bl->postDebugPrologueAddr();
+ popFrameReg = true;
+ break;
+
+ default:
+ // Case E above.
+ //
+ // We patch a jump directly to the epilogue after popping the
+ // frame reg and checking for forced return.
+ MOZ_ASSERT(kind == ICEntry::Kind_DebugEpilogue);
+ recompInfo->resumeAddr = bl->epilogueEntryAddr();
+ popFrameReg = true;
+ break;
+ }
+
+ SpewPatchBaselineFrame(prev->returnAddress(), recompInfo->resumeAddr,
+ script, kind, recompInfo->pc);
+
+ // The recompile handler must already be created so that this
+ // function may be infallible.
+ JitRuntime* rt = cx->runtime()->jitRuntime();
+ void* handlerAddr = rt->getBaselineDebugModeOSRHandlerAddress(cx, popFrameReg);
+ MOZ_ASSERT(handlerAddr);
+
+ prev->setReturnAddress(reinterpret_cast<uint8_t*>(handlerAddr));
+ iter.baselineFrame()->setDebugModeOSRInfo(recompInfo);
+ iter.baselineFrame()->setOverridePc(recompInfo->pc);
+
+ entryIndex++;
+ break;
+ }
+
+ case JitFrame_BaselineStub: {
+ JitFrameIterator prev(iter);
+ ++prev;
+ BaselineFrame* prevFrame = prev.baselineFrame();
+ if (!obs.shouldRecompileOrInvalidate(prevFrame->script()))
+ break;
+
+ DebugModeOSREntry& entry = entries[entryIndex];
+
+ // If the script wasn't recompiled, there's nothing to patch.
+ if (!entry.recompiled())
+ break;
+
+ BaselineStubFrameLayout* layout =
+ reinterpret_cast<BaselineStubFrameLayout*>(iter.fp());
+ MOZ_ASSERT(layout->maybeStubPtr() == entry.oldStub);
+
+ // Patch baseline stub frames for case A above.
+ //
+ // We need to patch the stub frame to point to an ICStub belonging
+ // to the recompiled baseline script. These stubs are allocated up
+ // front in CloneOldBaselineStub. They share the same JitCode as
+ // the old baseline script's stubs, so we don't need to patch the
+ // exit frame's return address.
+ //
+ // Subtlety here: the debug trap handler of case C above pushes a
+ // stub frame with a null stub pointer. This handler will exist
+ // across recompiling the script, so we don't patch anything for
+ // such stub frames. We will return to that handler, which takes
+ // care of cleaning up the stub frame.
+ //
+ // Note that for stub pointers that are already on the C stack
+ // (i.e. fallback calls), we need to check for recompilation using
+ // DebugModeOSRVolatileStub.
+ if (layout->maybeStubPtr()) {
+ MOZ_ASSERT(entry.newStub || prevFrame->isHandlingException());
+ SpewPatchStubFrame(entry.oldStub, entry.newStub);
+ layout->setStubPtr(entry.newStub);
+ }
+
+ break;
+ }
+
+ case JitFrame_IonJS: {
+ // Nothing to patch.
+ InlineFrameIterator inlineIter(cx, &iter);
+ while (true) {
+ if (obs.shouldRecompileOrInvalidate(inlineIter.script()))
+ entryIndex++;
+ if (!inlineIter.more())
+ break;
+ ++inlineIter;
+ }
+ break;
+ }
+
+ default:;
+ }
+
+ prev = iter.current();
+ }
+
+ *start = entryIndex;
+}
+
+static void
+SkipInterpreterFrameEntries(const Debugger::ExecutionObservableSet& obs,
+ const ActivationIterator& activation,
+ DebugModeOSREntryVector& entries, size_t* start)
+{
+ size_t entryIndex = *start;
+
+ // Skip interpreter frames, which do not need patching.
+ InterpreterActivation* act = activation.activation()->asInterpreter();
+ for (InterpreterFrameIterator iter(act); !iter.done(); ++iter) {
+ if (obs.shouldRecompileOrInvalidate(iter.frame()->script()))
+ entryIndex++;
+ }
+
+ *start = entryIndex;
+}
+
+static bool
+RecompileBaselineScriptForDebugMode(JSContext* cx, JSScript* script,
+ Debugger::IsObserving observing)
+{
+ BaselineScript* oldBaselineScript = script->baselineScript();
+
+ // If a script is on the stack multiple times, it may have already
+ // been recompiled.
+ if (oldBaselineScript->hasDebugInstrumentation() == observing)
+ return true;
+
+ JitSpew(JitSpew_BaselineDebugModeOSR, "Recompiling (%s:%" PRIuSIZE ") for %s",
+ script->filename(), script->lineno(), observing ? "DEBUGGING" : "NORMAL EXECUTION");
+
+ script->setBaselineScript(cx->runtime(), nullptr);
+
+ MethodStatus status = BaselineCompile(cx, script, /* forceDebugMode = */ observing);
+ if (status != Method_Compiled) {
+ // We will only fail to recompile for debug mode due to OOM. Restore
+ // the old baseline script in case something doesn't properly
+ // propagate OOM.
+ MOZ_ASSERT(status == Method_Error);
+ script->setBaselineScript(cx->runtime(), oldBaselineScript);
+ return false;
+ }
+
+ // Don't destroy the old baseline script yet, since if we fail any of the
+ // recompiles we need to rollback all the old baseline scripts.
+ MOZ_ASSERT(script->baselineScript()->hasDebugInstrumentation() == observing);
+ return true;
+}
+
+#define PATCHABLE_ICSTUB_KIND_LIST(_) \
+ _(Call_Scripted) \
+ _(Call_AnyScripted) \
+ _(Call_Native) \
+ _(Call_ClassHook) \
+ _(Call_ScriptedApplyArray) \
+ _(Call_ScriptedApplyArguments) \
+ _(Call_ScriptedFunCall) \
+ _(GetElem_NativePrototypeCallNativeName) \
+ _(GetElem_NativePrototypeCallNativeSymbol) \
+ _(GetElem_NativePrototypeCallScriptedName) \
+ _(GetElem_NativePrototypeCallScriptedSymbol) \
+ _(GetProp_CallScripted) \
+ _(GetProp_CallNative) \
+ _(GetProp_CallNativeGlobal) \
+ _(GetProp_CallDOMProxyNative) \
+ _(GetProp_CallDOMProxyWithGenerationNative) \
+ _(GetProp_DOMProxyShadowed) \
+ _(GetProp_Generic) \
+ _(SetProp_CallScripted) \
+ _(SetProp_CallNative)
+
+static bool
+CloneOldBaselineStub(JSContext* cx, DebugModeOSREntryVector& entries, size_t entryIndex)
+{
+ DebugModeOSREntry& entry = entries[entryIndex];
+ if (!entry.oldStub)
+ return true;
+
+ ICStub* oldStub = entry.oldStub;
+ MOZ_ASSERT(ICStub::CanMakeCalls(oldStub->kind()));
+
+ if (entry.frameKind == ICEntry::Kind_Invalid) {
+ // The exception handler can modify the frame's override pc while
+ // unwinding scopes. This is fine, but if we have a stub frame, the code
+ // code below will get confused: the entry's pcOffset doesn't match the
+ // stub that's still on the stack. To prevent that, we just set the new
+ // stub to nullptr as we will never return to this stub frame anyway.
+ entry.newStub = nullptr;
+ return true;
+ }
+
+ // Get the new fallback stub from the recompiled baseline script.
+ ICFallbackStub* fallbackStub = entry.fallbackStub();
+
+ // We don't need to clone fallback stubs, as they are guaranteed to
+ // exist. Furthermore, their JitCode is cached and should be the same even
+ // across the recompile.
+ if (oldStub->isFallback()) {
+ MOZ_ASSERT(oldStub->jitCode() == fallbackStub->jitCode());
+ entry.newStub = fallbackStub;
+ return true;
+ }
+
+ // Check if we have already cloned the stub on a younger frame. Ignore
+ // frames that entered the exception handler (entries[i].newStub is nullptr
+ // in that case, see above).
+ for (size_t i = 0; i < entryIndex; i++) {
+ if (oldStub == entries[i].oldStub && entries[i].frameKind != ICEntry::Kind_Invalid) {
+ MOZ_ASSERT(entries[i].newStub);
+ entry.newStub = entries[i].newStub;
+ return true;
+ }
+ }
+
+ // Some stubs are monitored, get the first stub in the monitor chain from
+ // the new fallback stub if so.
+ ICStub* firstMonitorStub;
+ if (fallbackStub->isMonitoredFallback()) {
+ ICMonitoredFallbackStub* monitored = fallbackStub->toMonitoredFallbackStub();
+ firstMonitorStub = monitored->fallbackMonitorStub()->firstMonitorStub();
+ } else {
+ firstMonitorStub = nullptr;
+ }
+ ICStubSpace* stubSpace = ICStubCompiler::StubSpaceForKind(oldStub->kind(), entry.script,
+ ICStubCompiler::Engine::Baseline);
+
+ // Clone the existing stub into the recompiled IC.
+ //
+ // Note that since JitCode is a GC thing, cloning an ICStub with the same
+ // JitCode ensures it won't be collected.
+ switch (oldStub->kind()) {
+#define CASE_KIND(kindName) \
+ case ICStub::kindName: \
+ entry.newStub = IC##kindName::Clone(cx, stubSpace, firstMonitorStub, \
+ *oldStub->to##kindName()); \
+ break;
+ PATCHABLE_ICSTUB_KIND_LIST(CASE_KIND)
+#undef CASE_KIND
+
+ default:
+ MOZ_CRASH("Bad stub kind");
+ }
+
+ if (!entry.newStub)
+ return false;
+
+ fallbackStub->addNewStub(entry.newStub);
+ return true;
+}
+
+static bool
+InvalidateScriptsInZone(JSContext* cx, Zone* zone, const Vector<DebugModeOSREntry>& entries)
+{
+ RecompileInfoVector invalid;
+ for (UniqueScriptOSREntryIter iter(entries); !iter.done(); ++iter) {
+ JSScript* script = iter.entry().script;
+ if (script->compartment()->zone() != zone)
+ continue;
+
+ if (script->hasIonScript()) {
+ if (!invalid.append(script->ionScript()->recompileInfo())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+
+ // Cancel off-thread Ion compile for anything that has a
+ // BaselineScript. If we relied on the call to Invalidate below to
+ // cancel off-thread Ion compiles, only those with existing IonScripts
+ // would be cancelled.
+ if (script->hasBaselineScript())
+ CancelOffThreadIonCompile(script);
+ }
+
+ // No need to cancel off-thread Ion compiles again, we already did it
+ // above.
+ Invalidate(zone->types, cx->runtime()->defaultFreeOp(), invalid,
+ /* resetUses = */ true, /* cancelOffThread = */ false);
+ return true;
+}
+
+static void
+UndoRecompileBaselineScriptsForDebugMode(JSContext* cx,
+ const DebugModeOSREntryVector& entries)
+{
+ // In case of failure, roll back the entire set of active scripts so that
+ // we don't have to patch return addresses on the stack.
+ for (UniqueScriptOSREntryIter iter(entries); !iter.done(); ++iter) {
+ const DebugModeOSREntry& entry = iter.entry();
+ JSScript* script = entry.script;
+ BaselineScript* baselineScript = script->baselineScript();
+ if (entry.recompiled()) {
+ script->setBaselineScript(cx->runtime(), entry.oldBaselineScript);
+ BaselineScript::Destroy(cx->runtime()->defaultFreeOp(), baselineScript);
+ }
+ }
+}
+
+bool
+jit::RecompileOnStackBaselineScriptsForDebugMode(JSContext* cx,
+ const Debugger::ExecutionObservableSet& obs,
+ Debugger::IsObserving observing)
+{
+ // First recompile the active scripts on the stack and patch the live
+ // frames.
+ Vector<DebugModeOSREntry> entries(cx);
+
+ for (ActivationIterator iter(cx->runtime()); !iter.done(); ++iter) {
+ if (iter->isJit()) {
+ if (!CollectJitStackScripts(cx, obs, iter, entries))
+ return false;
+ } else if (iter->isInterpreter()) {
+ if (!CollectInterpreterStackScripts(cx, obs, iter, entries))
+ return false;
+ }
+ }
+
+ if (entries.empty())
+ return true;
+
+ // When the profiler is enabled, we need to have suppressed sampling,
+ // since the basline jit scripts are in a state of flux.
+ MOZ_ASSERT(!cx->runtime()->isProfilerSamplingEnabled());
+
+ // Invalidate all scripts we are recompiling.
+ if (Zone* zone = obs.singleZone()) {
+ if (!InvalidateScriptsInZone(cx, zone, entries))
+ return false;
+ } else {
+ typedef Debugger::ExecutionObservableSet::ZoneRange ZoneRange;
+ for (ZoneRange r = obs.zones()->all(); !r.empty(); r.popFront()) {
+ if (!InvalidateScriptsInZone(cx, r.front(), entries))
+ return false;
+ }
+ }
+
+ // Try to recompile all the scripts. If we encounter an error, we need to
+ // roll back as if none of the compilations happened, so that we don't
+ // crash.
+ for (size_t i = 0; i < entries.length(); i++) {
+ JSScript* script = entries[i].script;
+ AutoCompartment ac(cx, script->compartment());
+ if (!RecompileBaselineScriptForDebugMode(cx, script, observing) ||
+ !CloneOldBaselineStub(cx, entries, i))
+ {
+ UndoRecompileBaselineScriptsForDebugMode(cx, entries);
+ return false;
+ }
+ }
+
+ // If all recompiles succeeded, destroy the old baseline scripts and patch
+ // the live frames.
+ //
+ // After this point the function must be infallible.
+
+ for (UniqueScriptOSREntryIter iter(entries); !iter.done(); ++iter) {
+ const DebugModeOSREntry& entry = iter.entry();
+ if (entry.recompiled())
+ BaselineScript::Destroy(cx->runtime()->defaultFreeOp(), entry.oldBaselineScript);
+ }
+
+ size_t processed = 0;
+ for (ActivationIterator iter(cx->runtime()); !iter.done(); ++iter) {
+ if (iter->isJit())
+ PatchBaselineFramesForDebugMode(cx, obs, iter, entries, &processed);
+ else if (iter->isInterpreter())
+ SkipInterpreterFrameEntries(obs, iter, entries, &processed);
+ }
+ MOZ_ASSERT(processed == entries.length());
+
+ return true;
+}
+
+void
+BaselineDebugModeOSRInfo::popValueInto(PCMappingSlotInfo::SlotLocation loc, Value* vp)
+{
+ switch (loc) {
+ case PCMappingSlotInfo::SlotInR0:
+ valueR0 = vp[stackAdjust];
+ break;
+ case PCMappingSlotInfo::SlotInR1:
+ valueR1 = vp[stackAdjust];
+ break;
+ case PCMappingSlotInfo::SlotIgnore:
+ break;
+ default:
+ MOZ_CRASH("Bad slot location");
+ }
+
+ stackAdjust++;
+}
+
+static inline bool
+HasForcedReturn(BaselineDebugModeOSRInfo* info, bool rv)
+{
+ ICEntry::Kind kind = info->frameKind;
+
+ // The debug epilogue always checks its resumption value, so we don't need
+ // to check rv.
+ if (kind == ICEntry::Kind_DebugEpilogue)
+ return true;
+
+ // |rv| is the value in ReturnReg. If true, in the case of the prologue,
+ // it means a forced return.
+ if (kind == ICEntry::Kind_DebugPrologue)
+ return rv;
+
+ // N.B. The debug trap handler handles its own forced return, so no
+ // need to deal with it here.
+ return false;
+}
+
+static inline bool
+IsReturningFromCallVM(BaselineDebugModeOSRInfo* info)
+{
+ // Keep this in sync with EmitBranchIsReturningFromCallVM.
+ //
+ // The stack check entries are returns from a callVM, but have a special
+ // kind because they do not exist in a 1-1 relationship with a pc offset.
+ return info->frameKind == ICEntry::Kind_CallVM ||
+ info->frameKind == ICEntry::Kind_WarmupCounter ||
+ info->frameKind == ICEntry::Kind_StackCheck ||
+ info->frameKind == ICEntry::Kind_EarlyStackCheck;
+}
+
+static void
+EmitBranchICEntryKind(MacroAssembler& masm, Register entry, ICEntry::Kind kind, Label* label)
+{
+ masm.branch32(MacroAssembler::Equal,
+ Address(entry, offsetof(BaselineDebugModeOSRInfo, frameKind)),
+ Imm32(kind), label);
+}
+
+static void
+EmitBranchIsReturningFromCallVM(MacroAssembler& masm, Register entry, Label* label)
+{
+ // Keep this in sync with IsReturningFromCallVM.
+ EmitBranchICEntryKind(masm, entry, ICEntry::Kind_CallVM, label);
+ EmitBranchICEntryKind(masm, entry, ICEntry::Kind_WarmupCounter, label);
+ EmitBranchICEntryKind(masm, entry, ICEntry::Kind_StackCheck, label);
+ EmitBranchICEntryKind(masm, entry, ICEntry::Kind_EarlyStackCheck, label);
+}
+
+static void
+SyncBaselineDebugModeOSRInfo(BaselineFrame* frame, Value* vp, bool rv)
+{
+ BaselineDebugModeOSRInfo* info = frame->debugModeOSRInfo();
+ MOZ_ASSERT(info);
+ MOZ_ASSERT(frame->script()->baselineScript()->containsCodeAddress(info->resumeAddr));
+
+ if (HasForcedReturn(info, rv)) {
+ // Load the frame's rval and overwrite the resume address to go to the
+ // epilogue.
+ MOZ_ASSERT(R0 == JSReturnOperand);
+ info->valueR0 = frame->returnValue();
+ info->resumeAddr = frame->script()->baselineScript()->epilogueEntryAddr();
+ return;
+ }
+
+ // Read stack values and make sure R0 and R1 have the right values if we
+ // aren't returning from a callVM.
+ //
+ // In the case of returning from a callVM, we don't need to restore R0 and
+ // R1 ourself since we'll return into code that does it if needed.
+ if (!IsReturningFromCallVM(info)) {
+ unsigned numUnsynced = info->slotInfo.numUnsynced();
+ MOZ_ASSERT(numUnsynced <= 2);
+ if (numUnsynced > 0)
+ info->popValueInto(info->slotInfo.topSlotLocation(), vp);
+ if (numUnsynced > 1)
+ info->popValueInto(info->slotInfo.nextSlotLocation(), vp);
+ }
+
+ // Scale stackAdjust.
+ info->stackAdjust *= sizeof(Value);
+}
+
+static void
+FinishBaselineDebugModeOSR(BaselineFrame* frame)
+{
+ frame->deleteDebugModeOSRInfo();
+
+ // We will return to JIT code now so we have to clear the override pc.
+ frame->clearOverridePc();
+}
+
+void
+BaselineFrame::deleteDebugModeOSRInfo()
+{
+ js_delete(getDebugModeOSRInfo());
+ flags_ &= ~HAS_DEBUG_MODE_OSR_INFO;
+}
+
+JitCode*
+JitRuntime::getBaselineDebugModeOSRHandler(JSContext* cx)
+{
+ if (!baselineDebugModeOSRHandler_) {
+ AutoLockForExclusiveAccess lock(cx);
+ AutoCompartment ac(cx, cx->runtime()->atomsCompartment(lock), &lock);
+ uint32_t offset;
+ if (JitCode* code = generateBaselineDebugModeOSRHandler(cx, &offset)) {
+ baselineDebugModeOSRHandler_ = code;
+ baselineDebugModeOSRHandlerNoFrameRegPopAddr_ = code->raw() + offset;
+ }
+ }
+
+ return baselineDebugModeOSRHandler_;
+}
+
+void*
+JitRuntime::getBaselineDebugModeOSRHandlerAddress(JSContext* cx, bool popFrameReg)
+{
+ if (!getBaselineDebugModeOSRHandler(cx))
+ return nullptr;
+ return popFrameReg
+ ? baselineDebugModeOSRHandler_->raw()
+ : baselineDebugModeOSRHandlerNoFrameRegPopAddr_;
+}
+
+static void
+EmitBaselineDebugModeOSRHandlerTail(MacroAssembler& masm, Register temp, bool returnFromCallVM)
+{
+ // Save real return address on the stack temporarily.
+ //
+ // If we're returning from a callVM, we don't need to worry about R0 and
+ // R1 but do need to propagate the original ReturnReg value. Otherwise we
+ // need to worry about R0 and R1 but can clobber ReturnReg. Indeed, on
+ // x86, R1 contains ReturnReg.
+ if (returnFromCallVM) {
+ masm.push(ReturnReg);
+ } else {
+ masm.pushValue(Address(temp, offsetof(BaselineDebugModeOSRInfo, valueR0)));
+ masm.pushValue(Address(temp, offsetof(BaselineDebugModeOSRInfo, valueR1)));
+ }
+ masm.push(BaselineFrameReg);
+ masm.push(Address(temp, offsetof(BaselineDebugModeOSRInfo, resumeAddr)));
+
+ // Call a stub to free the allocated info.
+ masm.setupUnalignedABICall(temp);
+ masm.loadBaselineFramePtr(BaselineFrameReg, temp);
+ masm.passABIArg(temp);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, FinishBaselineDebugModeOSR));
+
+ // Restore saved values.
+ AllocatableGeneralRegisterSet jumpRegs(GeneralRegisterSet::All());
+ if (returnFromCallVM) {
+ jumpRegs.take(ReturnReg);
+ } else {
+ jumpRegs.take(R0);
+ jumpRegs.take(R1);
+ }
+ jumpRegs.take(BaselineFrameReg);
+ Register target = jumpRegs.takeAny();
+
+ masm.pop(target);
+ masm.pop(BaselineFrameReg);
+ if (returnFromCallVM) {
+ masm.pop(ReturnReg);
+ } else {
+ masm.popValue(R1);
+ masm.popValue(R0);
+ }
+
+ masm.jump(target);
+}
+
+JitCode*
+JitRuntime::generateBaselineDebugModeOSRHandler(JSContext* cx, uint32_t* noFrameRegPopOffsetOut)
+{
+ MacroAssembler masm(cx);
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(BaselineFrameReg);
+ regs.take(ReturnReg);
+ Register temp = regs.takeAny();
+ Register syncedStackStart = regs.takeAny();
+
+ // Pop the frame reg.
+ masm.pop(BaselineFrameReg);
+
+ // Not all patched baseline frames are returning from a situation where
+ // the frame reg is already fixed up.
+ CodeOffset noFrameRegPopOffset(masm.currentOffset());
+
+ // Record the stack pointer for syncing.
+ masm.moveStackPtrTo(syncedStackStart);
+ masm.push(ReturnReg);
+ masm.push(BaselineFrameReg);
+
+ // Call a stub to fully initialize the info.
+ masm.setupUnalignedABICall(temp);
+ masm.loadBaselineFramePtr(BaselineFrameReg, temp);
+ masm.passABIArg(temp);
+ masm.passABIArg(syncedStackStart);
+ masm.passABIArg(ReturnReg);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, SyncBaselineDebugModeOSRInfo));
+
+ // Discard stack values depending on how many were unsynced, as we always
+ // have a fully synced stack in the recompile handler. We arrive here via
+ // a callVM, and prepareCallVM in BaselineCompiler always fully syncs the
+ // stack.
+ masm.pop(BaselineFrameReg);
+ masm.pop(ReturnReg);
+ masm.loadPtr(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfScratchValue()), temp);
+ masm.addToStackPtr(Address(temp, offsetof(BaselineDebugModeOSRInfo, stackAdjust)));
+
+ // Emit two tails for the case of returning from a callVM and all other
+ // cases, as the state we need to restore differs depending on the case.
+ Label returnFromCallVM, end;
+ EmitBranchIsReturningFromCallVM(masm, temp, &returnFromCallVM);
+
+ EmitBaselineDebugModeOSRHandlerTail(masm, temp, /* returnFromCallVM = */ false);
+ masm.jump(&end);
+ masm.bind(&returnFromCallVM);
+ EmitBaselineDebugModeOSRHandlerTail(masm, temp, /* returnFromCallVM = */ true);
+ masm.bind(&end);
+
+ Linker linker(masm);
+ AutoFlushICache afc("BaselineDebugModeOSRHandler");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+ if (!code)
+ return nullptr;
+
+ *noFrameRegPopOffsetOut = noFrameRegPopOffset.offset();
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BaselineDebugModeOSRHandler");
+#endif
+
+ return code;
+}
+
+/* static */ void
+DebugModeOSRVolatileJitFrameIterator::forwardLiveIterators(JSContext* cx,
+ uint8_t* oldAddr, uint8_t* newAddr)
+{
+ DebugModeOSRVolatileJitFrameIterator* iter;
+ for (iter = cx->liveVolatileJitFrameIterators_; iter; iter = iter->prev) {
+ if (iter->returnAddressToFp_ == oldAddr)
+ iter->returnAddressToFp_ = newAddr;
+ }
+}
diff --git a/js/src/jit/BaselineDebugModeOSR.h b/js/src/jit/BaselineDebugModeOSR.h
new file mode 100644
index 000000000..a7db0a600
--- /dev/null
+++ b/js/src/jit/BaselineDebugModeOSR.h
@@ -0,0 +1,146 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineDebugModeOSR_h
+#define jit_BaselineDebugModeOSR_h
+
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/JitFrameIterator.h"
+
+#include "vm/Debugger.h"
+
+namespace js {
+namespace jit {
+
+// Note that this file and the corresponding .cpp implement debug mode
+// on-stack recompilation. This is to be distinguished from ordinary
+// Baseline->Ion OSR, which is used to jump into compiled loops.
+
+//
+// A volatile location due to recompilation of an on-stack baseline script
+// (e.g., for debug mode toggling).
+//
+// It is usually used in fallback stubs which may trigger on-stack
+// recompilation by calling out into the VM. Example use:
+//
+// DebugModeOSRVolatileStub<FallbackStubT*> stub(frame, stub_)
+//
+// // Call out to the VM
+// // Other effectful operations like TypeScript::Monitor
+//
+// if (stub.invalid())
+// return true;
+//
+// // First use of stub after VM call.
+//
+template <typename T>
+class DebugModeOSRVolatileStub
+{
+ ICStubCompiler::Engine engine_;
+ T stub_;
+ BaselineFrame* frame_;
+ uint32_t pcOffset_;
+
+ public:
+ DebugModeOSRVolatileStub(ICStubCompiler::Engine engine, BaselineFrame* frame,
+ ICFallbackStub* stub)
+ : engine_(engine),
+ stub_(static_cast<T>(stub)),
+ frame_(frame),
+ pcOffset_(stub->icEntry()->pcOffset())
+ { }
+
+ DebugModeOSRVolatileStub(BaselineFrame* frame, ICFallbackStub* stub)
+ : engine_(ICStubCompiler::Engine::Baseline),
+ stub_(static_cast<T>(stub)),
+ frame_(frame),
+ pcOffset_(stub->icEntry()->pcOffset())
+ { }
+
+ bool invalid() const {
+ if (engine_ == ICStubCompiler::Engine::IonMonkey)
+ return stub_->invalid();
+ MOZ_ASSERT(!frame_->isHandlingException());
+ ICEntry& entry = frame_->script()->baselineScript()->icEntryFromPCOffset(pcOffset_);
+ return stub_ != entry.fallbackStub();
+ }
+
+ operator const T&() const { MOZ_ASSERT(!invalid()); return stub_; }
+ T operator->() const { MOZ_ASSERT(!invalid()); return stub_; }
+ T* address() { MOZ_ASSERT(!invalid()); return &stub_; }
+ const T* address() const { MOZ_ASSERT(!invalid()); return &stub_; }
+ T& get() { MOZ_ASSERT(!invalid()); return stub_; }
+ const T& get() const { MOZ_ASSERT(!invalid()); return stub_; }
+
+ bool operator!=(const T& other) const { MOZ_ASSERT(!invalid()); return stub_ != other; }
+ bool operator==(const T& other) const { MOZ_ASSERT(!invalid()); return stub_ == other; }
+};
+
+//
+// A JitFrameIterator that updates itself in case of recompilation of an
+// on-stack baseline script.
+//
+class DebugModeOSRVolatileJitFrameIterator : public JitFrameIterator
+{
+ DebugModeOSRVolatileJitFrameIterator** stack;
+ DebugModeOSRVolatileJitFrameIterator* prev;
+
+ public:
+ explicit DebugModeOSRVolatileJitFrameIterator(JSContext* cx)
+ : JitFrameIterator(cx)
+ {
+ stack = &cx->liveVolatileJitFrameIterators_;
+ prev = *stack;
+ *stack = this;
+ }
+
+ ~DebugModeOSRVolatileJitFrameIterator() {
+ MOZ_ASSERT(*stack == this);
+ *stack = prev;
+ }
+
+ static void forwardLiveIterators(JSContext* cx, uint8_t* oldAddr, uint8_t* newAddr);
+};
+
+//
+// Auxiliary info to help the DebugModeOSRHandler fix up state.
+//
+struct BaselineDebugModeOSRInfo
+{
+ uint8_t* resumeAddr;
+ jsbytecode* pc;
+ PCMappingSlotInfo slotInfo;
+ ICEntry::Kind frameKind;
+
+ // Filled in by SyncBaselineDebugModeOSRInfo.
+ uintptr_t stackAdjust;
+ Value valueR0;
+ Value valueR1;
+
+ BaselineDebugModeOSRInfo(jsbytecode* pc, ICEntry::Kind kind)
+ : resumeAddr(nullptr),
+ pc(pc),
+ slotInfo(0),
+ frameKind(kind),
+ stackAdjust(0),
+ valueR0(UndefinedValue()),
+ valueR1(UndefinedValue())
+ { }
+
+ void popValueInto(PCMappingSlotInfo::SlotLocation loc, Value* vp);
+};
+
+MOZ_MUST_USE bool
+RecompileOnStackBaselineScriptsForDebugMode(JSContext* cx,
+ const Debugger::ExecutionObservableSet& obs,
+ Debugger::IsObserving observing);
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_BaselineDebugModeOSR_h
diff --git a/js/src/jit/BaselineFrame-inl.h b/js/src/jit/BaselineFrame-inl.h
new file mode 100644
index 000000000..6ea234eec
--- /dev/null
+++ b/js/src/jit/BaselineFrame-inl.h
@@ -0,0 +1,107 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineFrame_inl_h
+#define jit_BaselineFrame_inl_h
+
+#include "jit/BaselineFrame.h"
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+
+#include "vm/EnvironmentObject.h"
+
+#include "jsscriptinlines.h"
+
+#include "vm/EnvironmentObject-inl.h"
+
+namespace js {
+namespace jit {
+
+template <typename SpecificEnvironment>
+inline void
+BaselineFrame::pushOnEnvironmentChain(SpecificEnvironment& env)
+{
+ MOZ_ASSERT(*environmentChain() == env.enclosingEnvironment());
+ envChain_ = &env;
+ if (IsFrameInitialEnvironment(this, env))
+ flags_ |= HAS_INITIAL_ENV;
+}
+
+template <typename SpecificEnvironment>
+inline void
+BaselineFrame::popOffEnvironmentChain()
+{
+ MOZ_ASSERT(envChain_->is<SpecificEnvironment>());
+ envChain_ = &envChain_->as<SpecificEnvironment>().enclosingEnvironment();
+}
+
+inline void
+BaselineFrame::replaceInnermostEnvironment(EnvironmentObject& env)
+{
+ MOZ_ASSERT(env.enclosingEnvironment() ==
+ envChain_->as<EnvironmentObject>().enclosingEnvironment());
+ envChain_ = &env;
+}
+
+inline bool
+BaselineFrame::pushLexicalEnvironment(JSContext* cx, Handle<LexicalScope*> scope)
+{
+ LexicalEnvironmentObject* env = LexicalEnvironmentObject::create(cx, scope, this);
+ if (!env)
+ return false;
+ pushOnEnvironmentChain(*env);
+
+ return true;
+}
+
+inline bool
+BaselineFrame::freshenLexicalEnvironment(JSContext* cx)
+{
+ Rooted<LexicalEnvironmentObject*> current(cx, &envChain_->as<LexicalEnvironmentObject>());
+ LexicalEnvironmentObject* clone = LexicalEnvironmentObject::clone(cx, current);
+ if (!clone)
+ return false;
+
+ replaceInnermostEnvironment(*clone);
+ return true;
+}
+
+inline bool
+BaselineFrame::recreateLexicalEnvironment(JSContext* cx)
+{
+ Rooted<LexicalEnvironmentObject*> current(cx, &envChain_->as<LexicalEnvironmentObject>());
+ LexicalEnvironmentObject* clone = LexicalEnvironmentObject::recreate(cx, current);
+ if (!clone)
+ return false;
+
+ replaceInnermostEnvironment(*clone);
+ return true;
+}
+
+inline CallObject&
+BaselineFrame::callObj() const
+{
+ MOZ_ASSERT(hasInitialEnvironment());
+ MOZ_ASSERT(callee()->needsCallObject());
+
+ JSObject* obj = environmentChain();
+ while (!obj->is<CallObject>())
+ obj = obj->enclosingEnvironment();
+ return obj->as<CallObject>();
+}
+
+inline void
+BaselineFrame::unsetIsDebuggee()
+{
+ MOZ_ASSERT(!script()->isDebuggee());
+ flags_ &= ~DEBUGGEE;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BaselineFrame_inl_h */
diff --git a/js/src/jit/BaselineFrame.cpp b/js/src/jit/BaselineFrame.cpp
new file mode 100644
index 000000000..6c9864ece
--- /dev/null
+++ b/js/src/jit/BaselineFrame.cpp
@@ -0,0 +1,157 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineFrame-inl.h"
+
+#include "jit/BaselineJIT.h"
+#include "jit/Ion.h"
+#include "vm/Debugger.h"
+#include "vm/EnvironmentObject.h"
+
+#include "jit/JitFrames-inl.h"
+#include "vm/Stack-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+static void
+MarkLocals(BaselineFrame* frame, JSTracer* trc, unsigned start, unsigned end)
+{
+ if (start < end) {
+ // Stack grows down.
+ Value* last = frame->valueSlot(end - 1);
+ TraceRootRange(trc, end - start, last, "baseline-stack");
+ }
+}
+
+void
+BaselineFrame::trace(JSTracer* trc, JitFrameIterator& frameIterator)
+{
+ replaceCalleeToken(MarkCalleeToken(trc, calleeToken()));
+
+ // Mark |this|, actual and formal args.
+ if (isFunctionFrame()) {
+ TraceRoot(trc, &thisArgument(), "baseline-this");
+
+ unsigned numArgs = js::Max(numActualArgs(), numFormalArgs());
+ TraceRootRange(trc, numArgs + isConstructing(), argv(), "baseline-args");
+ }
+
+ // Mark environment chain, if it exists.
+ if (envChain_)
+ TraceRoot(trc, &envChain_, "baseline-envchain");
+
+ // Mark return value.
+ if (hasReturnValue())
+ TraceRoot(trc, returnValue().address(), "baseline-rval");
+
+ if (isEvalFrame() && script()->isDirectEvalInFunction())
+ TraceRoot(trc, evalNewTargetAddress(), "baseline-evalNewTarget");
+
+ if (hasArgsObj())
+ TraceRoot(trc, &argsObj_, "baseline-args-obj");
+
+ // Mark locals and stack values.
+ JSScript* script = this->script();
+ size_t nfixed = script->nfixed();
+ jsbytecode* pc;
+ frameIterator.baselineScriptAndPc(nullptr, &pc);
+ size_t nlivefixed = script->calculateLiveFixed(pc);
+
+ // NB: It is possible that numValueSlots() could be zero, even if nfixed is
+ // nonzero. This is the case if the function has an early stack check.
+ if (numValueSlots() == 0)
+ return;
+
+ MOZ_ASSERT(nfixed <= numValueSlots());
+
+ if (nfixed == nlivefixed) {
+ // All locals are live.
+ MarkLocals(this, trc, 0, numValueSlots());
+ } else {
+ // Mark operand stack.
+ MarkLocals(this, trc, nfixed, numValueSlots());
+
+ // Clear dead block-scoped locals.
+ while (nfixed > nlivefixed)
+ unaliasedLocal(--nfixed).setUndefined();
+
+ // Mark live locals.
+ MarkLocals(this, trc, 0, nlivefixed);
+ }
+
+ if (script->compartment()->debugEnvs)
+ script->compartment()->debugEnvs->markLiveFrame(trc, this);
+}
+
+bool
+BaselineFrame::isNonGlobalEvalFrame() const
+{
+ return isEvalFrame() && script()->enclosingScope()->as<EvalScope>().isNonGlobal();
+}
+
+bool
+BaselineFrame::initFunctionEnvironmentObjects(JSContext* cx)
+{
+ return js::InitFunctionEnvironmentObjects(cx, this);
+}
+
+bool
+BaselineFrame::pushVarEnvironment(JSContext* cx, HandleScope scope)
+{
+ return js::PushVarEnvironmentObject(cx, scope, this);
+}
+
+bool
+BaselineFrame::initForOsr(InterpreterFrame* fp, uint32_t numStackValues)
+{
+ mozilla::PodZero(this);
+
+ envChain_ = fp->environmentChain();
+
+ if (fp->hasInitialEnvironmentUnchecked())
+ flags_ |= BaselineFrame::HAS_INITIAL_ENV;
+
+ if (fp->script()->needsArgsObj() && fp->hasArgsObj()) {
+ flags_ |= BaselineFrame::HAS_ARGS_OBJ;
+ argsObj_ = &fp->argsObj();
+ }
+
+ if (fp->hasReturnValue())
+ setReturnValue(fp->returnValue());
+
+ frameSize_ = BaselineFrame::FramePointerOffset +
+ BaselineFrame::Size() +
+ numStackValues * sizeof(Value);
+
+ MOZ_ASSERT(numValueSlots() == numStackValues);
+
+ for (uint32_t i = 0; i < numStackValues; i++)
+ *valueSlot(i) = fp->slots()[i];
+
+ if (fp->isDebuggee()) {
+ JSContext* cx = GetJSContextFromMainThread();
+
+ // For debuggee frames, update any Debugger.Frame objects for the
+ // InterpreterFrame to point to the BaselineFrame.
+
+ // The caller pushed a fake return address. ScriptFrameIter, used by the
+ // debugger, wants a valid return address, but it's okay to just pick one.
+ // In debug mode there's always at least 1 ICEntry (since there are always
+ // debug prologue/epilogue calls).
+ JitFrameIterator iter(cx);
+ MOZ_ASSERT(iter.returnAddress() == nullptr);
+ BaselineScript* baseline = fp->script()->baselineScript();
+ iter.current()->setReturnAddress(baseline->returnAddressForIC(baseline->icEntry(0)));
+
+ if (!Debugger::handleBaselineOsr(cx, fp, this))
+ return false;
+
+ setIsDebuggee();
+ }
+
+ return true;
+}
diff --git a/js/src/jit/BaselineFrame.h b/js/src/jit/BaselineFrame.h
new file mode 100644
index 000000000..9a30cdcfc
--- /dev/null
+++ b/js/src/jit/BaselineFrame.h
@@ -0,0 +1,458 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineFrame_h
+#define jit_BaselineFrame_h
+
+#include "jit/JitFrames.h"
+#include "vm/Stack.h"
+
+namespace js {
+namespace jit {
+
+struct BaselineDebugModeOSRInfo;
+
+// The stack looks like this, fp is the frame pointer:
+//
+// fp+y arguments
+// fp+x JitFrameLayout (frame header)
+// fp => saved frame pointer
+// fp-x BaselineFrame
+// locals
+// stack values
+
+class BaselineFrame
+{
+ public:
+ enum Flags : uint32_t {
+ // The frame has a valid return value. See also InterpreterFrame::HAS_RVAL.
+ HAS_RVAL = 1 << 0,
+
+ // An initial environment has been pushed on the environment chain for
+ // function frames that need a CallObject or eval frames that need a
+ // VarEnvironmentObject.
+ HAS_INITIAL_ENV = 1 << 2,
+
+ // Frame has an arguments object, argsObj_.
+ HAS_ARGS_OBJ = 1 << 4,
+
+ // See InterpreterFrame::PREV_UP_TO_DATE.
+ PREV_UP_TO_DATE = 1 << 5,
+
+ // Frame has execution observed by a Debugger.
+ //
+ // See comment above 'isDebuggee' in jscompartment.h for explanation of
+ // invariants of debuggee compartments, scripts, and frames.
+ DEBUGGEE = 1 << 6,
+
+ // (1 << 7 and 1 << 8 are unused)
+
+ // Frame has over-recursed on an early check.
+ OVER_RECURSED = 1 << 9,
+
+ // Frame has a BaselineRecompileInfo stashed in the scratch value
+ // slot. See PatchBaselineFramesForDebugMode.
+ HAS_DEBUG_MODE_OSR_INFO = 1 << 10,
+
+ // This flag is intended for use whenever the frame is settled on a
+ // native code address without a corresponding ICEntry. In this case,
+ // the frame contains an explicit bytecode offset for frame iterators.
+ //
+ // There can also be an override pc if the frame has had its
+ // environment chain unwound to a pc during exception handling that is
+ // different from its current pc.
+ //
+ // This flag should never be set when we're executing JIT code.
+ HAS_OVERRIDE_PC = 1 << 11,
+
+ // If set, we're handling an exception for this frame. This is set for
+ // debug mode OSR sanity checking when it handles corner cases which
+ // only arise during exception handling.
+ HANDLING_EXCEPTION = 1 << 12,
+
+ // If set, this frame has been on the stack when
+ // |js::SavedStacks::saveCurrentStack| was called, and so there is a
+ // |js::SavedFrame| object cached for this frame.
+ HAS_CACHED_SAVED_FRAME = 1 << 13
+ };
+
+ protected: // Silence Clang warning about unused private fields.
+ // We need to split the Value into 2 fields of 32 bits, otherwise the C++
+ // compiler may add some padding between the fields.
+
+ union {
+ struct {
+ uint32_t loScratchValue_;
+ uint32_t hiScratchValue_;
+ };
+ BaselineDebugModeOSRInfo* debugModeOSRInfo_;
+ };
+ uint32_t loReturnValue_; // If HAS_RVAL, the frame's return value.
+ uint32_t hiReturnValue_;
+ uint32_t frameSize_;
+ JSObject* envChain_; // Environment chain (always initialized).
+ ArgumentsObject* argsObj_; // If HAS_ARGS_OBJ, the arguments object.
+ uint32_t overrideOffset_; // If HAS_OVERRIDE_PC, the bytecode offset.
+ uint32_t flags_;
+
+ public:
+ // Distance between the frame pointer and the frame header (return address).
+ // This is the old frame pointer saved in the prologue.
+ static const uint32_t FramePointerOffset = sizeof(void*);
+
+ MOZ_MUST_USE bool initForOsr(InterpreterFrame* fp, uint32_t numStackValues);
+
+ uint32_t frameSize() const {
+ return frameSize_;
+ }
+ void setFrameSize(uint32_t frameSize) {
+ frameSize_ = frameSize;
+ }
+ inline uint32_t* addressOfFrameSize() {
+ return &frameSize_;
+ }
+ JSObject* environmentChain() const {
+ return envChain_;
+ }
+ void setEnvironmentChain(JSObject* envChain) {
+ envChain_ = envChain;
+ }
+ inline JSObject** addressOfEnvironmentChain() {
+ return &envChain_;
+ }
+
+ inline Value* addressOfScratchValue() {
+ return reinterpret_cast<Value*>(&loScratchValue_);
+ }
+
+ template <typename SpecificEnvironment>
+ inline void pushOnEnvironmentChain(SpecificEnvironment& env);
+ template <typename SpecificEnvironment>
+ inline void popOffEnvironmentChain();
+ inline void replaceInnermostEnvironment(EnvironmentObject& env);
+
+ CalleeToken calleeToken() const {
+ uint8_t* pointer = (uint8_t*)this + Size() + offsetOfCalleeToken();
+ return *(CalleeToken*)pointer;
+ }
+ void replaceCalleeToken(CalleeToken token) {
+ uint8_t* pointer = (uint8_t*)this + Size() + offsetOfCalleeToken();
+ *(CalleeToken*)pointer = token;
+ }
+ bool isConstructing() const {
+ return CalleeTokenIsConstructing(calleeToken());
+ }
+ JSScript* script() const {
+ return ScriptFromCalleeToken(calleeToken());
+ }
+ JSFunction* callee() const {
+ return CalleeTokenToFunction(calleeToken());
+ }
+ Value calleev() const {
+ return ObjectValue(*callee());
+ }
+ size_t numValueSlots() const {
+ size_t size = frameSize();
+
+ MOZ_ASSERT(size >= BaselineFrame::FramePointerOffset + BaselineFrame::Size());
+ size -= BaselineFrame::FramePointerOffset + BaselineFrame::Size();
+
+ MOZ_ASSERT((size % sizeof(Value)) == 0);
+ return size / sizeof(Value);
+ }
+ Value* valueSlot(size_t slot) const {
+ MOZ_ASSERT(slot < numValueSlots());
+ return (Value*)this - (slot + 1);
+ }
+
+ Value& unaliasedFormal(unsigned i, MaybeCheckAliasing checkAliasing = CHECK_ALIASING) const {
+ MOZ_ASSERT(i < numFormalArgs());
+ MOZ_ASSERT_IF(checkAliasing, !script()->argsObjAliasesFormals() &&
+ !script()->formalIsAliased(i));
+ return argv()[i];
+ }
+
+ Value& unaliasedActual(unsigned i, MaybeCheckAliasing checkAliasing = CHECK_ALIASING) const {
+ MOZ_ASSERT(i < numActualArgs());
+ MOZ_ASSERT_IF(checkAliasing, !script()->argsObjAliasesFormals());
+ MOZ_ASSERT_IF(checkAliasing && i < numFormalArgs(), !script()->formalIsAliased(i));
+ return argv()[i];
+ }
+
+ Value& unaliasedLocal(uint32_t i) const {
+ MOZ_ASSERT(i < script()->nfixed());
+ return *valueSlot(i);
+ }
+
+ unsigned numActualArgs() const {
+ return *(size_t*)(reinterpret_cast<const uint8_t*>(this) +
+ BaselineFrame::Size() +
+ offsetOfNumActualArgs());
+ }
+ unsigned numFormalArgs() const {
+ return script()->functionNonDelazifying()->nargs();
+ }
+ Value& thisArgument() const {
+ MOZ_ASSERT(isFunctionFrame());
+ return *(Value*)(reinterpret_cast<const uint8_t*>(this) +
+ BaselineFrame::Size() +
+ offsetOfThis());
+ }
+ Value* argv() const {
+ return (Value*)(reinterpret_cast<const uint8_t*>(this) +
+ BaselineFrame::Size() +
+ offsetOfArg(0));
+ }
+
+ private:
+ Value* evalNewTargetAddress() const {
+ MOZ_ASSERT(isEvalFrame());
+ MOZ_ASSERT(script()->isDirectEvalInFunction());
+ return (Value*)(reinterpret_cast<const uint8_t*>(this) +
+ BaselineFrame::Size() +
+ offsetOfEvalNewTarget());
+ }
+
+ public:
+ Value newTarget() const {
+ if (isEvalFrame())
+ return *evalNewTargetAddress();
+ MOZ_ASSERT(isFunctionFrame());
+ if (callee()->isArrow())
+ return callee()->getExtendedSlot(FunctionExtended::ARROW_NEWTARGET_SLOT);
+ if (isConstructing()) {
+ return *(Value*)(reinterpret_cast<const uint8_t*>(this) +
+ BaselineFrame::Size() +
+ offsetOfArg(Max(numFormalArgs(), numActualArgs())));
+ }
+ return UndefinedValue();
+ }
+
+ bool hasReturnValue() const {
+ return flags_ & HAS_RVAL;
+ }
+ MutableHandleValue returnValue() {
+ if (!hasReturnValue())
+ addressOfReturnValue()->setUndefined();
+ return MutableHandleValue::fromMarkedLocation(addressOfReturnValue());
+ }
+ void setReturnValue(const Value& v) {
+ returnValue().set(v);
+ flags_ |= HAS_RVAL;
+ }
+ inline Value* addressOfReturnValue() {
+ return reinterpret_cast<Value*>(&loReturnValue_);
+ }
+
+ bool hasInitialEnvironment() const {
+ return flags_ & HAS_INITIAL_ENV;
+ }
+
+ inline CallObject& callObj() const;
+
+ void setFlags(uint32_t flags) {
+ flags_ = flags;
+ }
+ uint32_t* addressOfFlags() {
+ return &flags_;
+ }
+
+ inline MOZ_MUST_USE bool pushLexicalEnvironment(JSContext* cx, Handle<LexicalScope*> scope);
+ inline MOZ_MUST_USE bool freshenLexicalEnvironment(JSContext* cx);
+ inline MOZ_MUST_USE bool recreateLexicalEnvironment(JSContext* cx);
+
+ MOZ_MUST_USE bool initFunctionEnvironmentObjects(JSContext* cx);
+ MOZ_MUST_USE bool pushVarEnvironment(JSContext* cx, HandleScope scope);
+
+ void initArgsObjUnchecked(ArgumentsObject& argsobj) {
+ flags_ |= HAS_ARGS_OBJ;
+ argsObj_ = &argsobj;
+ }
+ void initArgsObj(ArgumentsObject& argsobj) {
+ MOZ_ASSERT(script()->needsArgsObj());
+ initArgsObjUnchecked(argsobj);
+ }
+ bool hasArgsObj() const {
+ return flags_ & HAS_ARGS_OBJ;
+ }
+ ArgumentsObject& argsObj() const {
+ MOZ_ASSERT(hasArgsObj());
+ MOZ_ASSERT(script()->needsArgsObj());
+ return *argsObj_;
+ }
+
+ bool prevUpToDate() const {
+ return flags_ & PREV_UP_TO_DATE;
+ }
+ void setPrevUpToDate() {
+ flags_ |= PREV_UP_TO_DATE;
+ }
+ void unsetPrevUpToDate() {
+ flags_ &= ~PREV_UP_TO_DATE;
+ }
+
+ bool isDebuggee() const {
+ return flags_ & DEBUGGEE;
+ }
+ void setIsDebuggee() {
+ flags_ |= DEBUGGEE;
+ }
+ inline void unsetIsDebuggee();
+
+ bool isHandlingException() const {
+ return flags_ & HANDLING_EXCEPTION;
+ }
+ void setIsHandlingException() {
+ flags_ |= HANDLING_EXCEPTION;
+ }
+ void unsetIsHandlingException() {
+ flags_ &= ~HANDLING_EXCEPTION;
+ }
+
+ bool hasCachedSavedFrame() const {
+ return flags_ & HAS_CACHED_SAVED_FRAME;
+ }
+ void setHasCachedSavedFrame() {
+ flags_ |= HAS_CACHED_SAVED_FRAME;
+ }
+
+ bool overRecursed() const {
+ return flags_ & OVER_RECURSED;
+ }
+
+ void setOverRecursed() {
+ flags_ |= OVER_RECURSED;
+ }
+
+ BaselineDebugModeOSRInfo* debugModeOSRInfo() {
+ MOZ_ASSERT(flags_ & HAS_DEBUG_MODE_OSR_INFO);
+ return debugModeOSRInfo_;
+ }
+
+ BaselineDebugModeOSRInfo* getDebugModeOSRInfo() {
+ if (flags_ & HAS_DEBUG_MODE_OSR_INFO)
+ return debugModeOSRInfo();
+ return nullptr;
+ }
+
+ void setDebugModeOSRInfo(BaselineDebugModeOSRInfo* info) {
+ flags_ |= HAS_DEBUG_MODE_OSR_INFO;
+ debugModeOSRInfo_ = info;
+ }
+
+ void deleteDebugModeOSRInfo();
+
+ // See the HAS_OVERRIDE_PC comment.
+ bool hasOverridePc() const {
+ return flags_ & HAS_OVERRIDE_PC;
+ }
+
+ jsbytecode* overridePc() const {
+ MOZ_ASSERT(hasOverridePc());
+ return script()->offsetToPC(overrideOffset_);
+ }
+
+ jsbytecode* maybeOverridePc() const {
+ if (hasOverridePc())
+ return overridePc();
+ return nullptr;
+ }
+
+ void setOverridePc(jsbytecode* pc) {
+ flags_ |= HAS_OVERRIDE_PC;
+ overrideOffset_ = script()->pcToOffset(pc);
+ }
+
+ void clearOverridePc() {
+ flags_ &= ~HAS_OVERRIDE_PC;
+ }
+
+ void trace(JSTracer* trc, JitFrameIterator& frame);
+
+ bool isGlobalFrame() const {
+ return script()->isGlobalCode();
+ }
+ bool isModuleFrame() const {
+ return script()->module();
+ }
+ bool isEvalFrame() const {
+ return script()->isForEval();
+ }
+ bool isStrictEvalFrame() const {
+ return isEvalFrame() && script()->strict();
+ }
+ bool isNonStrictEvalFrame() const {
+ return isEvalFrame() && !script()->strict();
+ }
+ bool isNonGlobalEvalFrame() const;
+ bool isNonStrictDirectEvalFrame() const {
+ return isNonStrictEvalFrame() && isNonGlobalEvalFrame();
+ }
+ bool isFunctionFrame() const {
+ return CalleeTokenIsFunction(calleeToken());
+ }
+ bool isDebuggerEvalFrame() const {
+ return false;
+ }
+
+ JitFrameLayout* framePrefix() const {
+ uint8_t* fp = (uint8_t*)this + Size() + FramePointerOffset;
+ return (JitFrameLayout*)fp;
+ }
+
+ // Methods below are used by the compiler.
+ static size_t offsetOfCalleeToken() {
+ return FramePointerOffset + js::jit::JitFrameLayout::offsetOfCalleeToken();
+ }
+ static size_t offsetOfThis() {
+ return FramePointerOffset + js::jit::JitFrameLayout::offsetOfThis();
+ }
+ static size_t offsetOfEvalNewTarget() {
+ return FramePointerOffset + js::jit::JitFrameLayout::offsetOfEvalNewTarget();
+ }
+ static size_t offsetOfArg(size_t index) {
+ return FramePointerOffset + js::jit::JitFrameLayout::offsetOfActualArg(index);
+ }
+ static size_t offsetOfNumActualArgs() {
+ return FramePointerOffset + js::jit::JitFrameLayout::offsetOfNumActualArgs();
+ }
+ static size_t Size() {
+ return sizeof(BaselineFrame);
+ }
+
+ // The reverseOffsetOf methods below compute the offset relative to the
+ // frame's base pointer. Since the stack grows down, these offsets are
+ // negative.
+ static int reverseOffsetOfFrameSize() {
+ return -int(Size()) + offsetof(BaselineFrame, frameSize_);
+ }
+ static int reverseOffsetOfScratchValue() {
+ return -int(Size()) + offsetof(BaselineFrame, loScratchValue_);
+ }
+ static int reverseOffsetOfEnvironmentChain() {
+ return -int(Size()) + offsetof(BaselineFrame, envChain_);
+ }
+ static int reverseOffsetOfArgsObj() {
+ return -int(Size()) + offsetof(BaselineFrame, argsObj_);
+ }
+ static int reverseOffsetOfFlags() {
+ return -int(Size()) + offsetof(BaselineFrame, flags_);
+ }
+ static int reverseOffsetOfReturnValue() {
+ return -int(Size()) + offsetof(BaselineFrame, loReturnValue_);
+ }
+ static int reverseOffsetOfLocal(size_t index) {
+ return -int(Size()) - (index + 1) * sizeof(Value);
+ }
+};
+
+// Ensure the frame is 8-byte aligned (required on ARM).
+JS_STATIC_ASSERT(((sizeof(BaselineFrame) + BaselineFrame::FramePointerOffset) % 8) == 0);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BaselineFrame_h */
diff --git a/js/src/jit/BaselineFrameInfo-inl.h b/js/src/jit/BaselineFrameInfo-inl.h
new file mode 100644
index 000000000..699870904
--- /dev/null
+++ b/js/src/jit/BaselineFrameInfo-inl.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineFrameInfo_inl_h
+#define jit_BaselineFrameInfo_inl_h
+
+namespace js {
+namespace jit {
+
+void
+FrameInfo::pop(StackAdjustment adjust)
+{
+ spIndex--;
+ StackValue* popped = &stack[spIndex];
+
+ if (adjust == AdjustStack && popped->kind() == StackValue::Stack)
+ masm.addToStackPtr(Imm32(sizeof(Value)));
+ // Assert when anything uses this value.
+ popped->reset();
+}
+
+void
+FrameInfo::popn(uint32_t n, StackAdjustment adjust)
+{
+ uint32_t poppedStack = 0;
+ for (uint32_t i = 0; i < n; i++) {
+ if (peek(-1)->kind() == StackValue::Stack)
+ poppedStack++;
+ pop(DontAdjustStack);
+ }
+ if (adjust == AdjustStack && poppedStack > 0)
+ masm.addToStackPtr(Imm32(sizeof(Value) * poppedStack));
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BaselineFrameInfo_inl_h */
diff --git a/js/src/jit/BaselineFrameInfo.cpp b/js/src/jit/BaselineFrameInfo.cpp
new file mode 100644
index 000000000..9b9b991b6
--- /dev/null
+++ b/js/src/jit/BaselineFrameInfo.cpp
@@ -0,0 +1,196 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineFrameInfo.h"
+
+#ifdef DEBUG
+# include "jit/BytecodeAnalysis.h"
+#endif
+
+#include "jit/BaselineFrameInfo-inl.h"
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+bool
+FrameInfo::init(TempAllocator& alloc)
+{
+ // An extra slot is needed for global scopes because INITGLEXICAL (stack
+ // depth 1) is compiled as a SETPROP (stack depth 2) on the global lexical
+ // scope.
+ size_t extra = script->isGlobalCode() ? 1 : 0;
+ size_t nstack = Max(script->nslots() - script->nfixed(), size_t(MinJITStackSize)) + extra;
+ if (!stack.init(alloc, nstack))
+ return false;
+
+ return true;
+}
+
+void
+FrameInfo::sync(StackValue* val)
+{
+ switch (val->kind()) {
+ case StackValue::Stack:
+ break;
+ case StackValue::LocalSlot:
+ masm.pushValue(addressOfLocal(val->localSlot()));
+ break;
+ case StackValue::ArgSlot:
+ masm.pushValue(addressOfArg(val->argSlot()));
+ break;
+ case StackValue::ThisSlot:
+ masm.pushValue(addressOfThis());
+ break;
+ case StackValue::EvalNewTargetSlot:
+ MOZ_ASSERT(script->isForEval());
+ masm.pushValue(addressOfEvalNewTarget());
+ break;
+ case StackValue::Register:
+ masm.pushValue(val->reg());
+ break;
+ case StackValue::Constant:
+ masm.pushValue(val->constant());
+ break;
+ default:
+ MOZ_CRASH("Invalid kind");
+ }
+
+ val->setStack();
+}
+
+void
+FrameInfo::syncStack(uint32_t uses)
+{
+ MOZ_ASSERT(uses <= stackDepth());
+
+ uint32_t depth = stackDepth() - uses;
+
+ for (uint32_t i = 0; i < depth; i++) {
+ StackValue* current = &stack[i];
+ sync(current);
+ }
+}
+
+uint32_t
+FrameInfo::numUnsyncedSlots()
+{
+ // Start at the bottom, find the first value that's not synced.
+ uint32_t i = 0;
+ for (; i < stackDepth(); i++) {
+ if (peek(-int32_t(i + 1))->kind() == StackValue::Stack)
+ break;
+ }
+ return i;
+}
+
+void
+FrameInfo::popValue(ValueOperand dest)
+{
+ StackValue* val = peek(-1);
+
+ switch (val->kind()) {
+ case StackValue::Constant:
+ masm.moveValue(val->constant(), dest);
+ break;
+ case StackValue::LocalSlot:
+ masm.loadValue(addressOfLocal(val->localSlot()), dest);
+ break;
+ case StackValue::ArgSlot:
+ masm.loadValue(addressOfArg(val->argSlot()), dest);
+ break;
+ case StackValue::ThisSlot:
+ masm.loadValue(addressOfThis(), dest);
+ break;
+ case StackValue::EvalNewTargetSlot:
+ masm.loadValue(addressOfEvalNewTarget(), dest);
+ break;
+ case StackValue::Stack:
+ masm.popValue(dest);
+ break;
+ case StackValue::Register:
+ masm.moveValue(val->reg(), dest);
+ break;
+ default:
+ MOZ_CRASH("Invalid kind");
+ }
+
+ // masm.popValue already adjusted the stack pointer, don't do it twice.
+ pop(DontAdjustStack);
+}
+
+void
+FrameInfo::popRegsAndSync(uint32_t uses)
+{
+ // x86 has only 3 Value registers. Only support 2 regs here for now,
+ // so that there's always a scratch Value register for reg -> reg
+ // moves.
+ MOZ_ASSERT(uses > 0);
+ MOZ_ASSERT(uses <= 2);
+ MOZ_ASSERT(uses <= stackDepth());
+
+ syncStack(uses);
+
+ switch (uses) {
+ case 1:
+ popValue(R0);
+ break;
+ case 2: {
+ // If the second value is in R1, move it to R2 so that it's not
+ // clobbered by the first popValue.
+ StackValue* val = peek(-2);
+ if (val->kind() == StackValue::Register && val->reg() == R1) {
+ masm.moveValue(R1, R2);
+ val->setRegister(R2);
+ }
+ popValue(R1);
+ popValue(R0);
+ break;
+ }
+ default:
+ MOZ_CRASH("Invalid uses");
+ }
+}
+
+#ifdef DEBUG
+void
+FrameInfo::assertValidState(const BytecodeInfo& info)
+{
+ // Check stack depth.
+ MOZ_ASSERT(stackDepth() == info.stackDepth);
+
+ // Start at the bottom, find the first value that's not synced.
+ uint32_t i = 0;
+ for (; i < stackDepth(); i++) {
+ if (stack[i].kind() != StackValue::Stack)
+ break;
+ }
+
+ // Assert all values on top of it are also not synced.
+ for (; i < stackDepth(); i++)
+ MOZ_ASSERT(stack[i].kind() != StackValue::Stack);
+
+ // Assert every Value register is used by at most one StackValue.
+ // R2 is used as scratch register by the compiler and FrameInfo,
+ // so it shouldn't be used for StackValues.
+ bool usedR0 = false, usedR1 = false;
+
+ for (i = 0; i < stackDepth(); i++) {
+ if (stack[i].kind() == StackValue::Register) {
+ ValueOperand reg = stack[i].reg();
+ if (reg == R0) {
+ MOZ_ASSERT(!usedR0);
+ usedR0 = true;
+ } else if (reg == R1) {
+ MOZ_ASSERT(!usedR1);
+ usedR1 = true;
+ } else {
+ MOZ_CRASH("Invalid register");
+ }
+ }
+ }
+}
+#endif
diff --git a/js/src/jit/BaselineFrameInfo.h b/js/src/jit/BaselineFrameInfo.h
new file mode 100644
index 000000000..13bf0358d
--- /dev/null
+++ b/js/src/jit/BaselineFrameInfo.h
@@ -0,0 +1,315 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineFrameInfo_h
+#define jit_BaselineFrameInfo_h
+
+#include "mozilla/Alignment.h"
+
+#include "jit/BaselineFrame.h"
+#include "jit/FixedList.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+
+namespace js {
+namespace jit {
+
+struct BytecodeInfo;
+
+// FrameInfo overview.
+//
+// FrameInfo is used by the compiler to track values stored in the frame. This
+// includes locals, arguments and stack values. Locals and arguments are always
+// fully synced. Stack values can either be synced, stored as constant, stored in
+// a Value register or refer to a local slot. Syncing a StackValue ensures it's
+// stored on the stack, e.g. kind == Stack.
+//
+// To see how this works, consider the following statement:
+//
+// var y = x + 9;
+//
+// Here two values are pushed: StackValue(LocalSlot(0)) and StackValue(Int32Value(9)).
+// Only when we reach the ADD op, code is generated to load the operands directly
+// into the right operand registers and sync all other stack values.
+//
+// For stack values, the following invariants hold (and are checked between ops):
+//
+// (1) If a value is synced (kind == Stack), all values below it must also be synced.
+// In other words, values with kind other than Stack can only appear on top of the
+// abstract stack.
+//
+// (2) When we call a stub or IC, all values still on the stack must be synced.
+
+// Represents a value pushed on the stack. Note that StackValue is not used for
+// locals or arguments since these are always fully synced.
+class StackValue
+{
+ public:
+ enum Kind {
+ Constant,
+ Register,
+ Stack,
+ LocalSlot,
+ ArgSlot,
+ ThisSlot,
+ EvalNewTargetSlot
+#ifdef DEBUG
+ // In debug builds, assert Kind is initialized.
+ , Uninitialized
+#endif
+ };
+
+ private:
+ Kind kind_;
+
+ union {
+ struct {
+ Value v;
+ } constant;
+ struct {
+ mozilla::AlignedStorage2<ValueOperand> reg;
+ } reg;
+ struct {
+ uint32_t slot;
+ } local;
+ struct {
+ uint32_t slot;
+ } arg;
+ } data;
+
+ JSValueType knownType_;
+
+ public:
+ StackValue() {
+ reset();
+ }
+
+ Kind kind() const {
+ return kind_;
+ }
+ bool hasKnownType() const {
+ return knownType_ != JSVAL_TYPE_UNKNOWN;
+ }
+ bool hasKnownType(JSValueType type) const {
+ MOZ_ASSERT(type != JSVAL_TYPE_UNKNOWN);
+ return knownType_ == type;
+ }
+ bool isKnownBoolean() const {
+ return hasKnownType(JSVAL_TYPE_BOOLEAN);
+ }
+ JSValueType knownType() const {
+ MOZ_ASSERT(hasKnownType());
+ return knownType_;
+ }
+ void reset() {
+#ifdef DEBUG
+ kind_ = Uninitialized;
+ knownType_ = JSVAL_TYPE_UNKNOWN;
+#endif
+ }
+ Value constant() const {
+ MOZ_ASSERT(kind_ == Constant);
+ return data.constant.v;
+ }
+ ValueOperand reg() const {
+ MOZ_ASSERT(kind_ == Register);
+ return *data.reg.reg.addr();
+ }
+ uint32_t localSlot() const {
+ MOZ_ASSERT(kind_ == LocalSlot);
+ return data.local.slot;
+ }
+ uint32_t argSlot() const {
+ MOZ_ASSERT(kind_ == ArgSlot);
+ return data.arg.slot;
+ }
+
+ void setConstant(const Value& v) {
+ kind_ = Constant;
+ data.constant.v = v;
+ knownType_ = v.isDouble() ? JSVAL_TYPE_DOUBLE : v.extractNonDoubleType();
+ }
+ void setRegister(const ValueOperand& val, JSValueType knownType = JSVAL_TYPE_UNKNOWN) {
+ kind_ = Register;
+ *data.reg.reg.addr() = val;
+ knownType_ = knownType;
+ }
+ void setLocalSlot(uint32_t slot) {
+ kind_ = LocalSlot;
+ data.local.slot = slot;
+ knownType_ = JSVAL_TYPE_UNKNOWN;
+ }
+ void setArgSlot(uint32_t slot) {
+ kind_ = ArgSlot;
+ data.arg.slot = slot;
+ knownType_ = JSVAL_TYPE_UNKNOWN;
+ }
+ void setThis() {
+ kind_ = ThisSlot;
+ knownType_ = JSVAL_TYPE_UNKNOWN;
+ }
+ void setEvalNewTarget() {
+ kind_ = EvalNewTargetSlot;
+ knownType_ = JSVAL_TYPE_UNKNOWN;
+ }
+ void setStack() {
+ kind_ = Stack;
+ knownType_ = JSVAL_TYPE_UNKNOWN;
+ }
+};
+
+enum StackAdjustment { AdjustStack, DontAdjustStack };
+
+class FrameInfo
+{
+ JSScript* script;
+ MacroAssembler& masm;
+
+ FixedList<StackValue> stack;
+ size_t spIndex;
+
+ public:
+ FrameInfo(JSScript* script, MacroAssembler& masm)
+ : script(script),
+ masm(masm),
+ stack(),
+ spIndex(0)
+ { }
+
+ MOZ_MUST_USE bool init(TempAllocator& alloc);
+
+ size_t nlocals() const {
+ return script->nfixed();
+ }
+ size_t nargs() const {
+ return script->functionNonDelazifying()->nargs();
+ }
+
+ private:
+ inline StackValue* rawPush() {
+ StackValue* val = &stack[spIndex++];
+ val->reset();
+ return val;
+ }
+
+ public:
+ inline size_t stackDepth() const {
+ return spIndex;
+ }
+ inline void setStackDepth(uint32_t newDepth) {
+ if (newDepth <= stackDepth()) {
+ spIndex = newDepth;
+ } else {
+ uint32_t diff = newDepth - stackDepth();
+ for (uint32_t i = 0; i < diff; i++) {
+ StackValue* val = rawPush();
+ val->setStack();
+ }
+
+ MOZ_ASSERT(spIndex == newDepth);
+ }
+ }
+ inline StackValue* peek(int32_t index) const {
+ MOZ_ASSERT(index < 0);
+ return const_cast<StackValue*>(&stack[spIndex + index]);
+ }
+
+ inline void pop(StackAdjustment adjust = AdjustStack);
+ inline void popn(uint32_t n, StackAdjustment adjust = AdjustStack);
+ inline void push(const Value& val) {
+ StackValue* sv = rawPush();
+ sv->setConstant(val);
+ }
+ inline void push(const ValueOperand& val, JSValueType knownType=JSVAL_TYPE_UNKNOWN) {
+ StackValue* sv = rawPush();
+ sv->setRegister(val, knownType);
+ }
+ inline void pushLocal(uint32_t local) {
+ MOZ_ASSERT(local < nlocals());
+ StackValue* sv = rawPush();
+ sv->setLocalSlot(local);
+ }
+ inline void pushArg(uint32_t arg) {
+ StackValue* sv = rawPush();
+ sv->setArgSlot(arg);
+ }
+ inline void pushThis() {
+ StackValue* sv = rawPush();
+ sv->setThis();
+ }
+ inline void pushEvalNewTarget() {
+ MOZ_ASSERT(script->isForEval());
+ StackValue* sv = rawPush();
+ sv->setEvalNewTarget();
+ }
+
+ inline void pushScratchValue() {
+ masm.pushValue(addressOfScratchValue());
+ StackValue* sv = rawPush();
+ sv->setStack();
+ }
+ inline Address addressOfLocal(size_t local) const {
+ MOZ_ASSERT(local < nlocals());
+ return Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfLocal(local));
+ }
+ Address addressOfArg(size_t arg) const {
+ MOZ_ASSERT(arg < nargs());
+ return Address(BaselineFrameReg, BaselineFrame::offsetOfArg(arg));
+ }
+ Address addressOfThis() const {
+ return Address(BaselineFrameReg, BaselineFrame::offsetOfThis());
+ }
+ Address addressOfEvalNewTarget() const {
+ return Address(BaselineFrameReg, BaselineFrame::offsetOfEvalNewTarget());
+ }
+ Address addressOfCalleeToken() const {
+ return Address(BaselineFrameReg, BaselineFrame::offsetOfCalleeToken());
+ }
+ Address addressOfEnvironmentChain() const {
+ return Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfEnvironmentChain());
+ }
+ Address addressOfFlags() const {
+ return Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFlags());
+ }
+ Address addressOfReturnValue() const {
+ return Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue());
+ }
+ Address addressOfArgsObj() const {
+ return Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfArgsObj());
+ }
+ Address addressOfStackValue(const StackValue* value) const {
+ MOZ_ASSERT(value->kind() == StackValue::Stack);
+ size_t slot = value - &stack[0];
+ MOZ_ASSERT(slot < stackDepth());
+ return Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfLocal(nlocals() + slot));
+ }
+ Address addressOfScratchValue() const {
+ return Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfScratchValue());
+ }
+
+ void popValue(ValueOperand dest);
+
+ void sync(StackValue* val);
+ void syncStack(uint32_t uses);
+ uint32_t numUnsyncedSlots();
+ void popRegsAndSync(uint32_t uses);
+
+ inline void assertSyncedStack() const {
+ MOZ_ASSERT_IF(stackDepth() > 0, peek(-1)->kind() == StackValue::Stack);
+ }
+
+#ifdef DEBUG
+ // Assert the state is valid before excuting "pc".
+ void assertValidState(const BytecodeInfo& info);
+#else
+ inline void assertValidState(const BytecodeInfo& info) {}
+#endif
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BaselineFrameInfo_h */
diff --git a/js/src/jit/BaselineIC.cpp b/js/src/jit/BaselineIC.cpp
new file mode 100644
index 000000000..863c61161
--- /dev/null
+++ b/js/src/jit/BaselineIC.cpp
@@ -0,0 +1,8719 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineIC.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/SizePrintfMacros.h"
+#include "mozilla/TemplateLib.h"
+
+#include "jslibmath.h"
+#include "jstypes.h"
+
+#include "builtin/Eval.h"
+#include "builtin/SIMD.h"
+#include "gc/Policy.h"
+#include "jit/BaselineDebugModeOSR.h"
+#include "jit/BaselineJIT.h"
+#include "jit/InlinableNatives.h"
+#include "jit/JitSpewer.h"
+#include "jit/Linker.h"
+#include "jit/Lowering.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/SharedICHelpers.h"
+#include "jit/VMFunctions.h"
+#include "js/Conversions.h"
+#include "js/GCVector.h"
+#include "vm/Opcodes.h"
+#include "vm/SelfHosting.h"
+#include "vm/TypedArrayCommon.h"
+#include "vm/TypedArrayObject.h"
+
+#include "jsboolinlines.h"
+#include "jsscriptinlines.h"
+
+#include "jit/JitFrames-inl.h"
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/Lowering-shared-inl.h"
+#include "vm/EnvironmentObject-inl.h"
+#include "vm/Interpreter-inl.h"
+#include "vm/StringObject-inl.h"
+#include "vm/UnboxedObject-inl.h"
+
+using mozilla::DebugOnly;
+
+namespace js {
+namespace jit {
+
+//
+// WarmUpCounter_Fallback
+//
+
+
+//
+// The following data is kept in a temporary heap-allocated buffer, stored in
+// JitRuntime (high memory addresses at top, low at bottom):
+//
+// +----->+=================================+ -- <---- High Address
+// | | | |
+// | | ...BaselineFrame... | |-- Copy of BaselineFrame + stack values
+// | | | |
+// | +---------------------------------+ |
+// | | | |
+// | | ...Locals/Stack... | |
+// | | | |
+// | +=================================+ --
+// | | Padding(Maybe Empty) |
+// | +=================================+ --
+// +------|-- baselineFrame | |-- IonOsrTempData
+// | jitcode | |
+// +=================================+ -- <---- Low Address
+//
+// A pointer to the IonOsrTempData is returned.
+
+struct IonOsrTempData
+{
+ void* jitcode;
+ uint8_t* baselineFrame;
+};
+
+static IonOsrTempData*
+PrepareOsrTempData(JSContext* cx, ICWarmUpCounter_Fallback* stub, BaselineFrame* frame,
+ HandleScript script, jsbytecode* pc, void* jitcode)
+{
+ size_t numLocalsAndStackVals = frame->numValueSlots();
+
+ // Calculate the amount of space to allocate:
+ // BaselineFrame space:
+ // (sizeof(Value) * (numLocals + numStackVals))
+ // + sizeof(BaselineFrame)
+ //
+ // IonOsrTempData space:
+ // sizeof(IonOsrTempData)
+
+ size_t frameSpace = sizeof(BaselineFrame) + sizeof(Value) * numLocalsAndStackVals;
+ size_t ionOsrTempDataSpace = sizeof(IonOsrTempData);
+
+ size_t totalSpace = AlignBytes(frameSpace, sizeof(Value)) +
+ AlignBytes(ionOsrTempDataSpace, sizeof(Value));
+
+ IonOsrTempData* info = (IonOsrTempData*)cx->runtime()->getJitRuntime(cx)->allocateOsrTempData(totalSpace);
+ if (!info)
+ return nullptr;
+
+ memset(info, 0, totalSpace);
+
+ info->jitcode = jitcode;
+
+ // Copy the BaselineFrame + local/stack Values to the buffer. Arguments and
+ // |this| are not copied but left on the stack: the Baseline and Ion frame
+ // share the same frame prefix and Ion won't clobber these values. Note
+ // that info->baselineFrame will point to the *end* of the frame data, like
+ // the frame pointer register in baseline frames.
+ uint8_t* frameStart = (uint8_t*)info + AlignBytes(ionOsrTempDataSpace, sizeof(Value));
+ info->baselineFrame = frameStart + frameSpace;
+
+ memcpy(frameStart, (uint8_t*)frame - numLocalsAndStackVals * sizeof(Value), frameSpace);
+
+ JitSpew(JitSpew_BaselineOSR, "Allocated IonOsrTempData at %p", (void*) info);
+ JitSpew(JitSpew_BaselineOSR, "Jitcode is %p", info->jitcode);
+
+ // All done.
+ return info;
+}
+
+static bool
+DoWarmUpCounterFallbackOSR(JSContext* cx, BaselineFrame* frame, ICWarmUpCounter_Fallback* stub,
+ IonOsrTempData** infoPtr)
+{
+ MOZ_ASSERT(infoPtr);
+ *infoPtr = nullptr;
+
+ RootedScript script(cx, frame->script());
+ jsbytecode* pc = stub->icEntry()->pc(script);
+ MOZ_ASSERT(JSOp(*pc) == JSOP_LOOPENTRY);
+
+ FallbackICSpew(cx, stub, "WarmUpCounter(%d)", int(script->pcToOffset(pc)));
+
+ if (!IonCompileScriptForBaseline(cx, frame, pc))
+ return false;
+
+ if (!script->hasIonScript() || script->ionScript()->osrPc() != pc ||
+ script->ionScript()->bailoutExpected() ||
+ frame->isDebuggee())
+ {
+ return true;
+ }
+
+ IonScript* ion = script->ionScript();
+ MOZ_ASSERT(cx->runtime()->spsProfiler.enabled() == ion->hasProfilingInstrumentation());
+ MOZ_ASSERT(ion->osrPc() == pc);
+
+ JitSpew(JitSpew_BaselineOSR, " OSR possible!");
+ void* jitcode = ion->method()->raw() + ion->osrEntryOffset();
+
+ // Prepare the temporary heap copy of the fake InterpreterFrame and actual args list.
+ JitSpew(JitSpew_BaselineOSR, "Got jitcode. Preparing for OSR into ion.");
+ IonOsrTempData* info = PrepareOsrTempData(cx, stub, frame, script, pc, jitcode);
+ if (!info)
+ return false;
+ *infoPtr = info;
+
+ return true;
+}
+
+typedef bool (*DoWarmUpCounterFallbackOSRFn)(JSContext*, BaselineFrame*,
+ ICWarmUpCounter_Fallback*, IonOsrTempData** infoPtr);
+static const VMFunction DoWarmUpCounterFallbackOSRInfo =
+ FunctionInfo<DoWarmUpCounterFallbackOSRFn>(DoWarmUpCounterFallbackOSR,
+ "DoWarmUpCounterFallbackOSR");
+
+bool
+ICWarmUpCounter_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ // Push a stub frame so that we can perform a non-tail call.
+ enterStubFrame(masm, R1.scratchReg());
+
+ Label noCompiledCode;
+ // Call DoWarmUpCounterFallbackOSR to compile/check-for Ion-compiled function
+ {
+ // Push IonOsrTempData pointer storage
+ masm.subFromStackPtr(Imm32(sizeof(void*)));
+ masm.push(masm.getStackPointer());
+
+ // Push stub pointer.
+ masm.push(ICStubReg);
+
+ pushStubPayload(masm, R0.scratchReg());
+
+ if (!callVM(DoWarmUpCounterFallbackOSRInfo, masm))
+ return false;
+
+ // Pop IonOsrTempData pointer.
+ masm.pop(R0.scratchReg());
+
+ leaveStubFrame(masm);
+
+ // If no JitCode was found, then skip just exit the IC.
+ masm.branchPtr(Assembler::Equal, R0.scratchReg(), ImmPtr(nullptr), &noCompiledCode);
+ }
+
+ // Get a scratch register.
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(0));
+ Register osrDataReg = R0.scratchReg();
+ regs.take(osrDataReg);
+ regs.takeUnchecked(OsrFrameReg);
+
+ Register scratchReg = regs.takeAny();
+
+ // At this point, stack looks like:
+ // +-> [...Calling-Frame...]
+ // | [...Actual-Args/ThisV/ArgCount/Callee...]
+ // | [Descriptor]
+ // | [Return-Addr]
+ // +---[Saved-FramePtr] <-- BaselineFrameReg points here.
+ // [...Baseline-Frame...]
+
+ // Restore the stack pointer to point to the saved frame pointer.
+ masm.moveToStackPtr(BaselineFrameReg);
+
+ // Discard saved frame pointer, so that the return address is on top of
+ // the stack.
+ masm.pop(scratchReg);
+
+#ifdef DEBUG
+ // If profiler instrumentation is on, ensure that lastProfilingFrame is
+ // the frame currently being OSR-ed
+ {
+ Label checkOk;
+ AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &checkOk);
+ masm.loadPtr(AbsoluteAddress((void*)&cx->runtime()->jitActivation), scratchReg);
+ masm.loadPtr(Address(scratchReg, JitActivation::offsetOfLastProfilingFrame()), scratchReg);
+
+ // It may be the case that we entered the baseline frame with
+ // profiling turned off on, then in a call within a loop (i.e. a
+ // callee frame), turn on profiling, then return to this frame,
+ // and then OSR with profiling turned on. In this case, allow for
+ // lastProfilingFrame to be null.
+ masm.branchPtr(Assembler::Equal, scratchReg, ImmWord(0), &checkOk);
+
+ masm.branchStackPtr(Assembler::Equal, scratchReg, &checkOk);
+ masm.assumeUnreachable("Baseline OSR lastProfilingFrame mismatch.");
+ masm.bind(&checkOk);
+ }
+#endif
+
+ // Jump into Ion.
+ masm.loadPtr(Address(osrDataReg, offsetof(IonOsrTempData, jitcode)), scratchReg);
+ masm.loadPtr(Address(osrDataReg, offsetof(IonOsrTempData, baselineFrame)), OsrFrameReg);
+ masm.jump(scratchReg);
+
+ // No jitcode available, do nothing.
+ masm.bind(&noCompiledCode);
+ EmitReturnFromIC(masm);
+ return true;
+}
+
+//
+// TypeUpdate_Fallback
+//
+static bool
+DoTypeUpdateFallback(JSContext* cx, BaselineFrame* frame, ICUpdatedStub* stub, HandleValue objval,
+ HandleValue value)
+{
+ // This can get called from optimized stubs. Therefore it is not allowed to gc.
+ JS::AutoCheckCannotGC nogc;
+
+ FallbackICSpew(cx, stub->getChainFallback(), "TypeUpdate(%s)",
+ ICStub::KindString(stub->kind()));
+
+ RootedScript script(cx, frame->script());
+ RootedObject obj(cx, &objval.toObject());
+ RootedId id(cx);
+
+ switch(stub->kind()) {
+ case ICStub::SetElem_DenseOrUnboxedArray:
+ case ICStub::SetElem_DenseOrUnboxedArrayAdd: {
+ id = JSID_VOID;
+ AddTypePropertyId(cx, obj, id, value);
+ break;
+ }
+ case ICStub::SetProp_Native:
+ case ICStub::SetProp_NativeAdd:
+ case ICStub::SetProp_Unboxed: {
+ MOZ_ASSERT(obj->isNative() || obj->is<UnboxedPlainObject>());
+ jsbytecode* pc = stub->getChainFallback()->icEntry()->pc(script);
+ if (*pc == JSOP_SETALIASEDVAR || *pc == JSOP_INITALIASEDLEXICAL)
+ id = NameToId(EnvironmentCoordinateName(cx->caches.envCoordinateNameCache, script, pc));
+ else
+ id = NameToId(script->getName(pc));
+ AddTypePropertyId(cx, obj, id, value);
+ break;
+ }
+ case ICStub::SetProp_TypedObject: {
+ MOZ_ASSERT(obj->is<TypedObject>());
+ jsbytecode* pc = stub->getChainFallback()->icEntry()->pc(script);
+ id = NameToId(script->getName(pc));
+ if (stub->toSetProp_TypedObject()->isObjectReference()) {
+ // Ignore all values being written except plain objects. Null
+ // is included implicitly in type information for this property,
+ // and non-object non-null values will cause the stub to fail to
+ // match shortly and we will end up doing the assignment in the VM.
+ if (value.isObject())
+ AddTypePropertyId(cx, obj, id, value);
+ } else {
+ // Ignore undefined values, which are included implicitly in type
+ // information for this property.
+ if (!value.isUndefined())
+ AddTypePropertyId(cx, obj, id, value);
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH("Invalid stub");
+ }
+
+ return stub->addUpdateStubForValue(cx, script /* = outerScript */, obj, id, value);
+}
+
+typedef bool (*DoTypeUpdateFallbackFn)(JSContext*, BaselineFrame*, ICUpdatedStub*, HandleValue,
+ HandleValue);
+const VMFunction DoTypeUpdateFallbackInfo =
+ FunctionInfo<DoTypeUpdateFallbackFn>(DoTypeUpdateFallback, "DoTypeUpdateFallback", NonTailCall);
+
+bool
+ICTypeUpdate_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ // Just store false into R1.scratchReg() and return.
+ masm.move32(Imm32(0), R1.scratchReg());
+ EmitReturnFromIC(masm);
+ return true;
+}
+
+bool
+ICTypeUpdate_PrimitiveSet::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label success;
+ if ((flags_ & TypeToFlag(JSVAL_TYPE_INT32)) && !(flags_ & TypeToFlag(JSVAL_TYPE_DOUBLE)))
+ masm.branchTestInt32(Assembler::Equal, R0, &success);
+
+ if (flags_ & TypeToFlag(JSVAL_TYPE_DOUBLE))
+ masm.branchTestNumber(Assembler::Equal, R0, &success);
+
+ if (flags_ & TypeToFlag(JSVAL_TYPE_UNDEFINED))
+ masm.branchTestUndefined(Assembler::Equal, R0, &success);
+
+ if (flags_ & TypeToFlag(JSVAL_TYPE_BOOLEAN))
+ masm.branchTestBoolean(Assembler::Equal, R0, &success);
+
+ if (flags_ & TypeToFlag(JSVAL_TYPE_STRING))
+ masm.branchTestString(Assembler::Equal, R0, &success);
+
+ if (flags_ & TypeToFlag(JSVAL_TYPE_SYMBOL))
+ masm.branchTestSymbol(Assembler::Equal, R0, &success);
+
+ // Currently, we will never generate primitive stub checks for object. However,
+ // when we do get to the point where we want to collapse our monitor chains of
+ // objects and singletons down (when they get too long) to a generic "any object"
+ // in coordination with the typeset doing the same thing, this will need to
+ // be re-enabled.
+ /*
+ if (flags_ & TypeToFlag(JSVAL_TYPE_OBJECT))
+ masm.branchTestObject(Assembler::Equal, R0, &success);
+ */
+ MOZ_ASSERT(!(flags_ & TypeToFlag(JSVAL_TYPE_OBJECT)));
+
+ if (flags_ & TypeToFlag(JSVAL_TYPE_NULL))
+ masm.branchTestNull(Assembler::Equal, R0, &success);
+
+ EmitStubGuardFailure(masm);
+
+ // Type matches, load true into R1.scratchReg() and return.
+ masm.bind(&success);
+ masm.mov(ImmWord(1), R1.scratchReg());
+ EmitReturnFromIC(masm);
+
+ return true;
+}
+
+bool
+ICTypeUpdate_SingleObject::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+
+ // Guard on the object's identity.
+ Register obj = masm.extractObject(R0, R1.scratchReg());
+ Address expectedObject(ICStubReg, ICTypeUpdate_SingleObject::offsetOfObject());
+ masm.branchPtr(Assembler::NotEqual, expectedObject, obj, &failure);
+
+ // Identity matches, load true into R1.scratchReg() and return.
+ masm.mov(ImmWord(1), R1.scratchReg());
+ EmitReturnFromIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+bool
+ICTypeUpdate_ObjectGroup::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+
+ // Guard on the object's ObjectGroup.
+ Register obj = masm.extractObject(R0, R1.scratchReg());
+ masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), R1.scratchReg());
+
+ Address expectedGroup(ICStubReg, ICTypeUpdate_ObjectGroup::offsetOfGroup());
+ masm.branchPtr(Assembler::NotEqual, expectedGroup, R1.scratchReg(), &failure);
+
+ // Group matches, load true into R1.scratchReg() and return.
+ masm.mov(ImmWord(1), R1.scratchReg());
+ EmitReturnFromIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+typedef bool (*DoCallNativeGetterFn)(JSContext*, HandleFunction, HandleObject, MutableHandleValue);
+static const VMFunction DoCallNativeGetterInfo =
+ FunctionInfo<DoCallNativeGetterFn>(DoCallNativeGetter, "DoCallNativeGetter");
+
+//
+// ToBool_Fallback
+//
+
+static bool
+DoToBoolFallback(JSContext* cx, BaselineFrame* frame, ICToBool_Fallback* stub, HandleValue arg,
+ MutableHandleValue ret)
+{
+ FallbackICSpew(cx, stub, "ToBool");
+
+ bool cond = ToBoolean(arg);
+ ret.setBoolean(cond);
+
+ // Check to see if a new stub should be generated.
+ if (stub->numOptimizedStubs() >= ICToBool_Fallback::MAX_OPTIMIZED_STUBS) {
+ // TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
+ // But for now we just bail.
+ return true;
+ }
+
+ MOZ_ASSERT(!arg.isBoolean());
+
+ JSScript* script = frame->script();
+
+ // Try to generate new stubs.
+ if (arg.isInt32()) {
+ JitSpew(JitSpew_BaselineIC, " Generating ToBool(Int32) stub.");
+ ICToBool_Int32::Compiler compiler(cx);
+ ICStub* int32Stub = compiler.getStub(compiler.getStubSpace(script));
+ if (!int32Stub)
+ return false;
+
+ stub->addNewStub(int32Stub);
+ return true;
+ }
+
+ if (arg.isDouble() && cx->runtime()->jitSupportsFloatingPoint) {
+ JitSpew(JitSpew_BaselineIC, " Generating ToBool(Double) stub.");
+ ICToBool_Double::Compiler compiler(cx);
+ ICStub* doubleStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!doubleStub)
+ return false;
+
+ stub->addNewStub(doubleStub);
+ return true;
+ }
+
+ if (arg.isString()) {
+ JitSpew(JitSpew_BaselineIC, " Generating ToBool(String) stub");
+ ICToBool_String::Compiler compiler(cx);
+ ICStub* stringStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!stringStub)
+ return false;
+
+ stub->addNewStub(stringStub);
+ return true;
+ }
+
+ if (arg.isNull() || arg.isUndefined()) {
+ ICToBool_NullUndefined::Compiler compiler(cx);
+ ICStub* nilStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!nilStub)
+ return false;
+
+ stub->addNewStub(nilStub);
+ return true;
+ }
+
+ if (arg.isObject()) {
+ JitSpew(JitSpew_BaselineIC, " Generating ToBool(Object) stub.");
+ ICToBool_Object::Compiler compiler(cx);
+ ICStub* objStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!objStub)
+ return false;
+
+ stub->addNewStub(objStub);
+ return true;
+ }
+
+ return true;
+}
+
+typedef bool (*pf)(JSContext*, BaselineFrame*, ICToBool_Fallback*, HandleValue,
+ MutableHandleValue);
+static const VMFunction fun = FunctionInfo<pf>(DoToBoolFallback, "DoToBoolFallback", TailCall);
+
+bool
+ICToBool_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+ MOZ_ASSERT(R0 == JSReturnOperand);
+
+ // Restore the tail call register.
+ EmitRestoreTailCallReg(masm);
+
+ // Push arguments.
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ return tailCallVM(fun, masm);
+}
+
+//
+// ToBool_Int32
+//
+
+bool
+ICToBool_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+
+ Label ifFalse;
+ masm.branchTestInt32Truthy(false, R0, &ifFalse);
+
+ masm.moveValue(BooleanValue(true), R0);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&ifFalse);
+ masm.moveValue(BooleanValue(false), R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// ToBool_String
+//
+
+bool
+ICToBool_String::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ masm.branchTestString(Assembler::NotEqual, R0, &failure);
+
+ Label ifFalse;
+ masm.branchTestStringTruthy(false, R0, &ifFalse);
+
+ masm.moveValue(BooleanValue(true), R0);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&ifFalse);
+ masm.moveValue(BooleanValue(false), R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// ToBool_NullUndefined
+//
+
+bool
+ICToBool_NullUndefined::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure, ifFalse;
+ masm.branchTestNull(Assembler::Equal, R0, &ifFalse);
+ masm.branchTestUndefined(Assembler::NotEqual, R0, &failure);
+
+ masm.bind(&ifFalse);
+ masm.moveValue(BooleanValue(false), R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// ToBool_Double
+//
+
+bool
+ICToBool_Double::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure, ifTrue;
+ masm.branchTestDouble(Assembler::NotEqual, R0, &failure);
+ masm.unboxDouble(R0, FloatReg0);
+ masm.branchTestDoubleTruthy(true, FloatReg0, &ifTrue);
+
+ masm.moveValue(BooleanValue(false), R0);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&ifTrue);
+ masm.moveValue(BooleanValue(true), R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// ToBool_Object
+//
+
+bool
+ICToBool_Object::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure, ifFalse, slowPath;
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+
+ Register objReg = masm.extractObject(R0, ExtractTemp0);
+ Register scratch = R1.scratchReg();
+ masm.branchTestObjectTruthy(false, objReg, scratch, &slowPath, &ifFalse);
+
+ // If object doesn't emulate undefined, it evaulates to true.
+ masm.moveValue(BooleanValue(true), R0);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&ifFalse);
+ masm.moveValue(BooleanValue(false), R0);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&slowPath);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(objReg);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::EmulatesUndefined));
+ masm.convertBoolToInt32(ReturnReg, ReturnReg);
+ masm.xor32(Imm32(1), ReturnReg);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, ReturnReg, R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// ToNumber_Fallback
+//
+
+static bool
+DoToNumberFallback(JSContext* cx, ICToNumber_Fallback* stub, HandleValue arg, MutableHandleValue ret)
+{
+ FallbackICSpew(cx, stub, "ToNumber");
+ ret.set(arg);
+ return ToNumber(cx, ret);
+}
+
+typedef bool (*DoToNumberFallbackFn)(JSContext*, ICToNumber_Fallback*, HandleValue, MutableHandleValue);
+static const VMFunction DoToNumberFallbackInfo =
+ FunctionInfo<DoToNumberFallbackFn>(DoToNumberFallback, "DoToNumberFallback", TailCall,
+ PopValues(1));
+
+bool
+ICToNumber_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+ MOZ_ASSERT(R0 == JSReturnOperand);
+
+ // Restore the tail call register.
+ EmitRestoreTailCallReg(masm);
+
+ // Ensure stack is fully synced for the expression decompiler.
+ masm.pushValue(R0);
+
+ // Push arguments.
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+
+ return tailCallVM(DoToNumberFallbackInfo, masm);
+}
+
+//
+// GetElem_Fallback
+//
+
+static Shape*
+LastPropertyForSetProp(JSObject* obj)
+{
+ if (obj->isNative())
+ return obj->as<NativeObject>().lastProperty();
+
+ if (obj->is<UnboxedPlainObject>()) {
+ UnboxedExpandoObject* expando = obj->as<UnboxedPlainObject>().maybeExpando();
+ return expando ? expando->lastProperty() : nullptr;
+ }
+
+ return nullptr;
+}
+
+static bool
+IsCacheableSetPropWriteSlot(JSObject* obj, Shape* oldShape, Shape* propertyShape)
+{
+ // Object shape must not have changed during the property set.
+ if (LastPropertyForSetProp(obj) != oldShape)
+ return false;
+
+ if (!propertyShape->hasSlot() ||
+ !propertyShape->hasDefaultSetter() ||
+ !propertyShape->writable())
+ {
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+IsCacheableSetPropAddSlot(JSContext* cx, JSObject* obj, Shape* oldShape,
+ jsid id, Shape* propertyShape, size_t* protoChainDepth)
+{
+ // The property must be the last added property of the object.
+ if (LastPropertyForSetProp(obj) != propertyShape)
+ return false;
+
+ // Object must be extensible, oldShape must be immediate parent of current shape.
+ if (!obj->nonProxyIsExtensible() || propertyShape->previous() != oldShape)
+ return false;
+
+ // Basic shape checks.
+ if (propertyShape->inDictionary() ||
+ !propertyShape->hasSlot() ||
+ !propertyShape->hasDefaultSetter() ||
+ !propertyShape->writable())
+ {
+ return false;
+ }
+
+ // Watch out for resolve or addProperty hooks.
+ if (ClassMayResolveId(cx->names(), obj->getClass(), id, obj) ||
+ obj->getClass()->getAddProperty())
+ {
+ return false;
+ }
+
+ size_t chainDepth = 0;
+ // Walk up the object prototype chain and ensure that all prototypes are
+ // native, and that all prototypes have no setter defined on the property.
+ for (JSObject* proto = obj->staticPrototype(); proto; proto = proto->staticPrototype()) {
+ chainDepth++;
+ // if prototype is non-native, don't optimize
+ if (!proto->isNative())
+ return false;
+
+ MOZ_ASSERT(proto->hasStaticPrototype());
+
+ // if prototype defines this property in a non-plain way, don't optimize
+ Shape* protoShape = proto->as<NativeObject>().lookup(cx, id);
+ if (protoShape && !protoShape->hasDefaultSetter())
+ return false;
+
+ // Otherwise, if there's no such property, watch out for a resolve hook
+ // that would need to be invoked and thus prevent inlining of property
+ // addition.
+ if (ClassMayResolveId(cx->names(), proto->getClass(), id, proto))
+ return false;
+ }
+
+ // Only add a IC entry if the dynamic slots didn't change when the shapes
+ // changed. Need to ensure that a shape change for a subsequent object
+ // won't involve reallocating the slot array.
+ if (NativeObject::dynamicSlotsCount(propertyShape) != NativeObject::dynamicSlotsCount(oldShape))
+ return false;
+
+ *protoChainDepth = chainDepth;
+ return true;
+}
+
+static bool
+IsCacheableSetPropCall(JSContext* cx, JSObject* obj, JSObject* holder, Shape* shape,
+ bool* isScripted, bool* isTemporarilyUnoptimizable)
+{
+ MOZ_ASSERT(isScripted);
+
+ if (!shape || !IsCacheableProtoChain(obj, holder))
+ return false;
+
+ if (shape->hasSlot() || shape->hasDefaultSetter())
+ return false;
+
+ if (!shape->hasSetterValue())
+ return false;
+
+ if (!shape->setterValue().isObject() || !shape->setterObject()->is<JSFunction>())
+ return false;
+
+ JSFunction* func = &shape->setterObject()->as<JSFunction>();
+
+ if (IsWindow(obj)) {
+ if (!func->isNative())
+ return false;
+
+ if (!func->jitInfo() || func->jitInfo()->needsOuterizedThisObject())
+ return false;
+ }
+
+ if (func->isNative()) {
+ *isScripted = false;
+ return true;
+ }
+
+ if (!func->hasJITCode()) {
+ *isTemporarilyUnoptimizable = true;
+ return false;
+ }
+
+ *isScripted = true;
+ return true;
+}
+
+template <class T>
+static bool
+GetElemNativeStubExists(ICGetElem_Fallback* stub, HandleObject obj, HandleObject holder,
+ Handle<T> key, bool needsAtomize)
+{
+ bool indirect = (obj.get() != holder.get());
+ MOZ_ASSERT_IF(indirect, holder->isNative());
+
+ for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
+ if (iter->kind() != ICStub::GetElem_NativeSlotName &&
+ iter->kind() != ICStub::GetElem_NativeSlotSymbol &&
+ iter->kind() != ICStub::GetElem_NativePrototypeSlotName &&
+ iter->kind() != ICStub::GetElem_NativePrototypeSlotSymbol &&
+ iter->kind() != ICStub::GetElem_NativePrototypeCallNativeName &&
+ iter->kind() != ICStub::GetElem_NativePrototypeCallNativeSymbol &&
+ iter->kind() != ICStub::GetElem_NativePrototypeCallScriptedName &&
+ iter->kind() != ICStub::GetElem_NativePrototypeCallScriptedSymbol)
+ {
+ continue;
+ }
+
+ if (indirect && (iter->kind() != ICStub::GetElem_NativePrototypeSlotName &&
+ iter->kind() != ICStub::GetElem_NativePrototypeSlotSymbol &&
+ iter->kind() != ICStub::GetElem_NativePrototypeCallNativeName &&
+ iter->kind() != ICStub::GetElem_NativePrototypeCallNativeSymbol &&
+ iter->kind() != ICStub::GetElem_NativePrototypeCallScriptedName &&
+ iter->kind() != ICStub::GetElem_NativePrototypeCallScriptedSymbol))
+ {
+ continue;
+ }
+
+ if(mozilla::IsSame<T, JS::Symbol*>::value !=
+ static_cast<ICGetElemNativeStub*>(*iter)->isSymbol())
+ {
+ continue;
+ }
+
+ ICGetElemNativeStubImpl<T>* getElemNativeStub =
+ reinterpret_cast<ICGetElemNativeStubImpl<T>*>(*iter);
+ if (key != getElemNativeStub->key())
+ continue;
+
+ if (ReceiverGuard(obj) != getElemNativeStub->receiverGuard())
+ continue;
+
+ // If the new stub needs atomization, and the old stub doesn't atomize, then
+ // an appropriate stub doesn't exist.
+ if (needsAtomize && !getElemNativeStub->needsAtomize())
+ continue;
+
+ // For prototype gets, check the holder and holder shape.
+ if (indirect) {
+ if (iter->isGetElem_NativePrototypeSlotName() ||
+ iter->isGetElem_NativePrototypeSlotSymbol()) {
+ ICGetElem_NativePrototypeSlot<T>* protoStub =
+ reinterpret_cast<ICGetElem_NativePrototypeSlot<T>*>(*iter);
+
+ if (holder != protoStub->holder())
+ continue;
+
+ if (holder->as<NativeObject>().lastProperty() != protoStub->holderShape())
+ continue;
+ } else {
+ MOZ_ASSERT(iter->isGetElem_NativePrototypeCallNativeName() ||
+ iter->isGetElem_NativePrototypeCallNativeSymbol() ||
+ iter->isGetElem_NativePrototypeCallScriptedName() ||
+ iter->isGetElem_NativePrototypeCallScriptedSymbol());
+
+ ICGetElemNativePrototypeCallStub<T>* protoStub =
+ reinterpret_cast<ICGetElemNativePrototypeCallStub<T>*>(*iter);
+
+ if (holder != protoStub->holder())
+ continue;
+
+ if (holder->as<NativeObject>().lastProperty() != protoStub->holderShape())
+ continue;
+ }
+ }
+
+ return true;
+ }
+ return false;
+}
+
+template <class T>
+static void
+RemoveExistingGetElemNativeStubs(JSContext* cx, ICGetElem_Fallback* stub, HandleObject obj,
+ HandleObject holder, Handle<T> key, bool needsAtomize)
+{
+ bool indirect = (obj.get() != holder.get());
+ MOZ_ASSERT_IF(indirect, holder->isNative());
+
+ for (ICStubIterator iter = stub->beginChain(); !iter.atEnd(); iter++) {
+ switch (iter->kind()) {
+ case ICStub::GetElem_NativeSlotName:
+ case ICStub::GetElem_NativeSlotSymbol:
+ if (indirect)
+ continue;
+ MOZ_FALLTHROUGH;
+ case ICStub::GetElem_NativePrototypeSlotName:
+ case ICStub::GetElem_NativePrototypeSlotSymbol:
+ case ICStub::GetElem_NativePrototypeCallNativeName:
+ case ICStub::GetElem_NativePrototypeCallNativeSymbol:
+ case ICStub::GetElem_NativePrototypeCallScriptedName:
+ case ICStub::GetElem_NativePrototypeCallScriptedSymbol:
+ break;
+ default:
+ continue;
+ }
+
+ if(mozilla::IsSame<T, JS::Symbol*>::value !=
+ static_cast<ICGetElemNativeStub*>(*iter)->isSymbol())
+ {
+ continue;
+ }
+
+ ICGetElemNativeStubImpl<T>* getElemNativeStub =
+ reinterpret_cast<ICGetElemNativeStubImpl<T>*>(*iter);
+ if (key != getElemNativeStub->key())
+ continue;
+
+ if (ReceiverGuard(obj) != getElemNativeStub->receiverGuard())
+ continue;
+
+ // For prototype gets, check the holder and holder shape.
+ if (indirect) {
+ if (iter->isGetElem_NativePrototypeSlotName() ||
+ iter->isGetElem_NativePrototypeSlotSymbol()) {
+ ICGetElem_NativePrototypeSlot<T>* protoStub =
+ reinterpret_cast<ICGetElem_NativePrototypeSlot<T>*>(*iter);
+
+ if (holder != protoStub->holder())
+ continue;
+
+ // If the holder matches, but the holder's lastProperty doesn't match, then
+ // this stub is invalid anyway. Unlink it.
+ if (holder->as<NativeObject>().lastProperty() != protoStub->holderShape()) {
+ iter.unlink(cx);
+ continue;
+ }
+ } else {
+ MOZ_ASSERT(iter->isGetElem_NativePrototypeCallNativeName() ||
+ iter->isGetElem_NativePrototypeCallNativeSymbol() ||
+ iter->isGetElem_NativePrototypeCallScriptedName() ||
+ iter->isGetElem_NativePrototypeCallScriptedSymbol());
+ ICGetElemNativePrototypeCallStub<T>* protoStub =
+ reinterpret_cast<ICGetElemNativePrototypeCallStub<T>*>(*iter);
+
+ if (holder != protoStub->holder())
+ continue;
+
+ // If the holder matches, but the holder's lastProperty doesn't match, then
+ // this stub is invalid anyway. Unlink it.
+ if (holder->as<NativeObject>().lastProperty() != protoStub->holderShape()) {
+ iter.unlink(cx);
+ continue;
+ }
+ }
+ }
+
+ // If the new stub needs atomization, and the old stub doesn't atomize, then
+ // remove the old stub.
+ if (needsAtomize && !getElemNativeStub->needsAtomize()) {
+ iter.unlink(cx);
+ continue;
+ }
+
+ // Should never get here, because this means a matching stub exists, and if
+ // a matching stub exists, this procedure should never have been called.
+ MOZ_CRASH("Procedure should never have been called.");
+ }
+}
+
+static bool
+TypedArrayGetElemStubExists(ICGetElem_Fallback* stub, HandleObject obj)
+{
+ for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
+ if (!iter->isGetElem_TypedArray())
+ continue;
+ if (obj->maybeShape() == iter->toGetElem_TypedArray()->shape())
+ return true;
+ }
+ return false;
+}
+
+static bool
+ArgumentsGetElemStubExists(ICGetElem_Fallback* stub, ICGetElem_Arguments::Which which)
+{
+ for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
+ if (!iter->isGetElem_Arguments())
+ continue;
+ if (iter->toGetElem_Arguments()->which() == which)
+ return true;
+ }
+ return false;
+}
+
+template <class T>
+static T
+getKey(jsid id)
+{
+ MOZ_ASSERT_UNREACHABLE("Key has to be PropertyName or Symbol");
+ return false;
+}
+
+template <>
+JS::Symbol* getKey<JS::Symbol*>(jsid id)
+{
+ if (!JSID_IS_SYMBOL(id))
+ return nullptr;
+ return JSID_TO_SYMBOL(id);
+}
+
+template <>
+PropertyName* getKey<PropertyName*>(jsid id)
+{
+ uint32_t dummy;
+ if (!JSID_IS_ATOM(id) || JSID_TO_ATOM(id)->isIndex(&dummy))
+ return nullptr;
+ return JSID_TO_ATOM(id)->asPropertyName();
+}
+
+static bool
+IsOptimizableElementPropertyName(JSContext* cx, HandleValue key, MutableHandleId idp)
+{
+ if (!key.isString())
+ return false;
+
+ // Convert to interned property name.
+ if (!ValueToId<CanGC>(cx, key, idp))
+ return false;
+
+ uint32_t dummy;
+ if (!JSID_IS_ATOM(idp) || JSID_TO_ATOM(idp)->isIndex(&dummy))
+ return false;
+
+ return true;
+}
+
+template <class T>
+static bool
+checkAtomize(HandleValue key)
+{
+ MOZ_ASSERT_UNREACHABLE("Key has to be PropertyName or Symbol");
+ return false;
+}
+
+template <>
+bool checkAtomize<JS::Symbol*>(HandleValue key)
+{
+ return false;
+}
+
+template <>
+bool checkAtomize<PropertyName*>(HandleValue key)
+{
+ return !key.toString()->isAtom();
+}
+
+template <class T>
+static bool
+TryAttachNativeOrUnboxedGetValueElemStub(JSContext* cx, HandleScript script, jsbytecode* pc,
+ ICGetElem_Fallback* stub, HandleObject obj,
+ HandleValue keyVal, bool* attached)
+{
+ MOZ_ASSERT(keyVal.isString() || keyVal.isSymbol());
+
+ // Convert to id.
+ RootedId id(cx);
+ if (!ValueToId<CanGC>(cx, keyVal, &id))
+ return false;
+
+ Rooted<T> key(cx, getKey<T>(id));
+ if (!key)
+ return true;
+ bool needsAtomize = checkAtomize<T>(keyVal);
+
+ RootedShape shape(cx);
+ RootedObject holder(cx);
+ if (!EffectlesslyLookupProperty(cx, obj, id, &holder, &shape))
+ return false;
+ if (!holder || (holder != obj && !holder->isNative()))
+ return true;
+
+ // If a suitable stub already exists, nothing else to do.
+ if (GetElemNativeStubExists<T>(stub, obj, holder, key, needsAtomize))
+ return true;
+
+ // Remove any existing stubs that may interfere with the new stub being added.
+ RemoveExistingGetElemNativeStubs<T>(cx, stub, obj, holder, key, needsAtomize);
+
+ ICStub* monitorStub = stub->fallbackMonitorStub()->firstMonitorStub();
+
+ if (obj->is<UnboxedPlainObject>() && holder == obj) {
+ const UnboxedLayout::Property* property = obj->as<UnboxedPlainObject>().layout().lookup(id);
+
+ // Once unboxed objects support symbol-keys, we need to change the following accordingly
+ MOZ_ASSERT_IF(!keyVal.isString(), !property);
+
+ if (property) {
+ if (!cx->runtime()->jitSupportsFloatingPoint)
+ return true;
+
+ RootedPropertyName name(cx, JSID_TO_ATOM(id)->asPropertyName());
+ ICGetElemNativeCompiler<PropertyName*> compiler(cx, ICStub::GetElem_UnboxedPropertyName,
+ monitorStub, obj, holder,
+ name,
+ ICGetElemNativeStub::UnboxedProperty,
+ needsAtomize, property->offset +
+ UnboxedPlainObject::offsetOfData(),
+ property->type);
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+
+ stub->addNewStub(newStub);
+ *attached = true;
+ return true;
+ }
+
+ Shape* shape = obj->as<UnboxedPlainObject>().maybeExpando()->lookup(cx, id);
+ if (!shape->hasDefaultGetter() || !shape->hasSlot())
+ return true;
+
+ bool isFixedSlot;
+ uint32_t offset;
+ GetFixedOrDynamicSlotOffset(shape, &isFixedSlot, &offset);
+
+ ICGetElemNativeStub::AccessType acctype =
+ isFixedSlot ? ICGetElemNativeStub::FixedSlot
+ : ICGetElemNativeStub::DynamicSlot;
+ ICGetElemNativeCompiler<T> compiler(cx, getGetElemStubKind<T>(ICStub::GetElem_NativeSlotName),
+ monitorStub, obj, holder, key,
+ acctype, needsAtomize, offset);
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+
+ stub->addNewStub(newStub);
+ *attached = true;
+ return true;
+ }
+
+ if (!holder->isNative())
+ return true;
+
+ if (IsCacheableGetPropReadSlot(obj, holder, shape)) {
+ bool isFixedSlot;
+ uint32_t offset;
+ GetFixedOrDynamicSlotOffset(shape, &isFixedSlot, &offset);
+
+ ICStub::Kind kind = (obj == holder) ? ICStub::GetElem_NativeSlotName
+ : ICStub::GetElem_NativePrototypeSlotName;
+ kind = getGetElemStubKind<T>(kind);
+
+ JitSpew(JitSpew_BaselineIC, " Generating GetElem(Native %s%s slot) stub "
+ "(obj=%p, holder=%p, holderShape=%p)",
+ (obj == holder) ? "direct" : "prototype",
+ needsAtomize ? " atomizing" : "",
+ obj.get(), holder.get(), holder->as<NativeObject>().lastProperty());
+
+ AccType acctype = isFixedSlot ? ICGetElemNativeStub::FixedSlot
+ : ICGetElemNativeStub::DynamicSlot;
+ ICGetElemNativeCompiler<T> compiler(cx, kind, monitorStub, obj, holder, key,
+ acctype, needsAtomize, offset);
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+
+ stub->addNewStub(newStub);
+ *attached = true;
+ return true;
+ }
+
+ return true;
+}
+
+template <class T>
+static bool
+TryAttachNativeGetAccessorElemStub(JSContext* cx, HandleScript script, jsbytecode* pc,
+ ICGetElem_Fallback* stub, HandleNativeObject obj,
+ HandleValue keyVal, bool* attached,
+ bool* isTemporarilyUnoptimizable)
+{
+ MOZ_ASSERT(!*attached);
+ MOZ_ASSERT(keyVal.isString() || keyVal.isSymbol());
+
+ RootedId id(cx);
+ if (!ValueToId<CanGC>(cx, keyVal, &id))
+ return false;
+
+ Rooted<T> key(cx, getKey<T>(id));
+ if (!key)
+ return true;
+ bool needsAtomize = checkAtomize<T>(keyVal);
+
+ RootedShape shape(cx);
+ RootedObject baseHolder(cx);
+ if (!EffectlesslyLookupProperty(cx, obj, id, &baseHolder, &shape))
+ return false;
+ if (!baseHolder || !baseHolder->isNative())
+ return true;
+
+ HandleNativeObject holder = baseHolder.as<NativeObject>();
+
+ bool getterIsScripted = false;
+ if (IsCacheableGetPropCall(cx, obj, baseHolder, shape, &getterIsScripted,
+ isTemporarilyUnoptimizable, /*isDOMProxy=*/false))
+ {
+ RootedFunction getter(cx, &shape->getterObject()->as<JSFunction>());
+
+ // For now, we do not handle own property getters
+ if (obj == holder)
+ return true;
+
+ // If a suitable stub already exists, nothing else to do.
+ if (GetElemNativeStubExists<T>(stub, obj, holder, key, needsAtomize))
+ return true;
+
+ // Remove any existing stubs that may interfere with the new stub being added.
+ RemoveExistingGetElemNativeStubs<T>(cx, stub, obj, holder, key, needsAtomize);
+
+ ICStub* monitorStub = stub->fallbackMonitorStub()->firstMonitorStub();
+ ICStub::Kind kind = getterIsScripted ? ICStub::GetElem_NativePrototypeCallScriptedName
+ : ICStub::GetElem_NativePrototypeCallNativeName;
+ kind = getGetElemStubKind<T>(kind);
+
+ if (getterIsScripted) {
+ JitSpew(JitSpew_BaselineIC,
+ " Generating GetElem(Native %s%s call scripted %s:%" PRIuSIZE ") stub "
+ "(obj=%p, shape=%p, holder=%p, holderShape=%p)",
+ (obj == holder) ? "direct" : "prototype",
+ needsAtomize ? " atomizing" : "",
+ getter->nonLazyScript()->filename(), getter->nonLazyScript()->lineno(),
+ obj.get(), obj->lastProperty(), holder.get(), holder->lastProperty());
+ } else {
+ JitSpew(JitSpew_BaselineIC,
+ " Generating GetElem(Native %s%s call native) stub "
+ "(obj=%p, shape=%p, holder=%p, holderShape=%p)",
+ (obj == holder) ? "direct" : "prototype",
+ needsAtomize ? " atomizing" : "",
+ obj.get(), obj->lastProperty(), holder.get(), holder->lastProperty());
+ }
+
+ AccType acctype = getterIsScripted ? ICGetElemNativeStub::ScriptedGetter
+ : ICGetElemNativeStub::NativeGetter;
+ ICGetElemNativeCompiler<T> compiler(cx, kind, monitorStub, obj, holder, key, acctype,
+ needsAtomize, getter, script->pcToOffset(pc));
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+
+ stub->addNewStub(newStub);
+ *attached = true;
+ return true;
+ }
+
+ return true;
+}
+
+static bool
+IsPrimitiveArrayTypedObject(JSObject* obj)
+{
+ if (!obj->is<TypedObject>())
+ return false;
+ TypeDescr& descr = obj->as<TypedObject>().typeDescr();
+ return descr.is<ArrayTypeDescr>() &&
+ descr.as<ArrayTypeDescr>().elementType().is<ScalarTypeDescr>();
+}
+
+static Scalar::Type
+PrimitiveArrayTypedObjectType(JSObject* obj)
+{
+ MOZ_ASSERT(IsPrimitiveArrayTypedObject(obj));
+ TypeDescr& descr = obj->as<TypedObject>().typeDescr();
+ return descr.as<ArrayTypeDescr>().elementType().as<ScalarTypeDescr>().type();
+}
+
+static Scalar::Type
+TypedThingElementType(JSObject* obj)
+{
+ return obj->is<TypedArrayObject>()
+ ? obj->as<TypedArrayObject>().type()
+ : PrimitiveArrayTypedObjectType(obj);
+}
+
+static bool
+TypedThingRequiresFloatingPoint(JSObject* obj)
+{
+ Scalar::Type type = TypedThingElementType(obj);
+ return type == Scalar::Uint32 ||
+ type == Scalar::Float32 ||
+ type == Scalar::Float64;
+}
+
+static bool
+IsNativeDenseElementAccess(HandleObject obj, HandleValue key)
+{
+ if (obj->isNative() && key.isInt32() && key.toInt32() >= 0 && !obj->is<TypedArrayObject>())
+ return true;
+ return false;
+}
+
+static bool
+IsNativeOrUnboxedDenseElementAccess(HandleObject obj, HandleValue key)
+{
+ if (!obj->isNative() && !obj->is<UnboxedArrayObject>())
+ return false;
+ if (key.isInt32() && key.toInt32() >= 0 && !obj->is<TypedArrayObject>())
+ return true;
+ return false;
+}
+
+static bool
+TryAttachGetElemStub(JSContext* cx, JSScript* script, jsbytecode* pc, ICGetElem_Fallback* stub,
+ HandleValue lhs, HandleValue rhs, HandleValue res, bool* attached)
+{
+ // Check for String[i] => Char accesses.
+ if (lhs.isString() && rhs.isInt32() && res.isString() &&
+ !stub->hasStub(ICStub::GetElem_String))
+ {
+ // NoSuchMethod handling doesn't apply to string targets.
+
+ JitSpew(JitSpew_BaselineIC, " Generating GetElem(String[Int32]) stub");
+ ICGetElem_String::Compiler compiler(cx);
+ ICStub* stringStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!stringStub)
+ return false;
+
+ stub->addNewStub(stringStub);
+ *attached = true;
+ return true;
+ }
+
+ if (lhs.isMagic(JS_OPTIMIZED_ARGUMENTS) && rhs.isInt32() &&
+ !ArgumentsGetElemStubExists(stub, ICGetElem_Arguments::Magic))
+ {
+ JitSpew(JitSpew_BaselineIC, " Generating GetElem(MagicArgs[Int32]) stub");
+ ICGetElem_Arguments::Compiler compiler(cx, stub->fallbackMonitorStub()->firstMonitorStub(),
+ ICGetElem_Arguments::Magic);
+ ICStub* argsStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!argsStub)
+ return false;
+
+ stub->addNewStub(argsStub);
+ *attached = true;
+ return true;
+ }
+
+ // Otherwise, GetElem is only optimized on objects.
+ if (!lhs.isObject())
+ return true;
+ RootedObject obj(cx, &lhs.toObject());
+
+ // Check for ArgumentsObj[int] accesses
+ if (obj->is<ArgumentsObject>() && rhs.isInt32() &&
+ !obj->as<ArgumentsObject>().hasOverriddenElement())
+ {
+ ICGetElem_Arguments::Which which = ICGetElem_Arguments::Mapped;
+ if (obj->is<UnmappedArgumentsObject>())
+ which = ICGetElem_Arguments::Unmapped;
+ if (!ArgumentsGetElemStubExists(stub, which)) {
+ JitSpew(JitSpew_BaselineIC, " Generating GetElem(ArgsObj[Int32]) stub");
+ ICGetElem_Arguments::Compiler compiler(
+ cx, stub->fallbackMonitorStub()->firstMonitorStub(), which);
+ ICStub* argsStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!argsStub)
+ return false;
+
+ stub->addNewStub(argsStub);
+ *attached = true;
+ return true;
+ }
+ }
+
+ // Check for NativeObject[int] dense accesses.
+ if (IsNativeDenseElementAccess(obj, rhs)) {
+ JitSpew(JitSpew_BaselineIC, " Generating GetElem(Native[Int32] dense) stub");
+ ICGetElem_Dense::Compiler compiler(cx, stub->fallbackMonitorStub()->firstMonitorStub(),
+ obj->as<NativeObject>().lastProperty());
+ ICStub* denseStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!denseStub)
+ return false;
+
+ stub->addNewStub(denseStub);
+ *attached = true;
+ return true;
+ }
+
+ // Check for NativeObject[id] and UnboxedPlainObject[id] shape-optimizable accesses.
+ if (obj->isNative() || obj->is<UnboxedPlainObject>()) {
+ RootedScript rootedScript(cx, script);
+ if (rhs.isString()) {
+ if (!TryAttachNativeOrUnboxedGetValueElemStub<PropertyName*>(cx, rootedScript, pc, stub,
+ obj, rhs, attached))
+ {
+ return false;
+ }
+ } else if (rhs.isSymbol()) {
+ if (!TryAttachNativeOrUnboxedGetValueElemStub<JS::Symbol*>(cx, rootedScript, pc, stub,
+ obj, rhs, attached))
+ {
+ return false;
+ }
+ }
+ if (*attached)
+ return true;
+ script = rootedScript;
+ }
+
+ // Check for UnboxedArray[int] accesses.
+ if (obj->is<UnboxedArrayObject>() && rhs.isInt32() && rhs.toInt32() >= 0) {
+ JitSpew(JitSpew_BaselineIC, " Generating GetElem(UnboxedArray[Int32]) stub");
+ ICGetElem_UnboxedArray::Compiler compiler(cx, stub->fallbackMonitorStub()->firstMonitorStub(),
+ obj->group());
+ ICStub* unboxedStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!unboxedStub)
+ return false;
+
+ stub->addNewStub(unboxedStub);
+ *attached = true;
+ return true;
+ }
+
+ // Check for TypedArray[int] => Number and TypedObject[int] => Number accesses.
+ if ((obj->is<TypedArrayObject>() || IsPrimitiveArrayTypedObject(obj)) &&
+ rhs.isNumber() &&
+ res.isNumber() &&
+ !TypedArrayGetElemStubExists(stub, obj))
+ {
+ if (!cx->runtime()->jitSupportsFloatingPoint &&
+ (TypedThingRequiresFloatingPoint(obj) || rhs.isDouble()))
+ {
+ return true;
+ }
+
+ // Don't attach typed object stubs if the underlying storage could be
+ // detached, as the stub will always bail out.
+ if (IsPrimitiveArrayTypedObject(obj) && cx->compartment()->detachedTypedObjects)
+ return true;
+
+ JitSpew(JitSpew_BaselineIC, " Generating GetElem(TypedArray[Int32]) stub");
+ ICGetElem_TypedArray::Compiler compiler(cx, obj->maybeShape(), TypedThingElementType(obj));
+ ICStub* typedArrayStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!typedArrayStub)
+ return false;
+
+ stub->addNewStub(typedArrayStub);
+ *attached = true;
+ return true;
+ }
+
+ // GetElem operations on non-native objects cannot be cached by either
+ // Baseline or Ion. Indicate this in the cache so that Ion does not
+ // generate a cache for this op.
+ if (!obj->isNative())
+ stub->noteNonNativeAccess();
+
+ // GetElem operations which could access negative indexes generally can't
+ // be optimized without the potential for bailouts, as we can't statically
+ // determine that an object has no properties on such indexes.
+ if (rhs.isNumber() && rhs.toNumber() < 0)
+ stub->noteNegativeIndex();
+
+ return true;
+}
+
+static bool
+DoGetElemFallback(JSContext* cx, BaselineFrame* frame, ICGetElem_Fallback* stub_, HandleValue lhs,
+ HandleValue rhs, MutableHandleValue res)
+{
+ SharedStubInfo info(cx, frame, stub_->icEntry());
+
+ // This fallback stub may trigger debug mode toggling.
+ DebugModeOSRVolatileStub<ICGetElem_Fallback*> stub(frame, stub_);
+
+ RootedScript script(cx, frame->script());
+ jsbytecode* pc = stub->icEntry()->pc(frame->script());
+ JSOp op = JSOp(*pc);
+ FallbackICSpew(cx, stub, "GetElem(%s)", CodeName[op]);
+
+ MOZ_ASSERT(op == JSOP_GETELEM || op == JSOP_CALLELEM);
+
+ // Don't pass lhs directly, we need it when generating stubs.
+ RootedValue lhsCopy(cx, lhs);
+
+ bool isOptimizedArgs = false;
+ if (lhs.isMagic(JS_OPTIMIZED_ARGUMENTS)) {
+ // Handle optimized arguments[i] access.
+ if (!GetElemOptimizedArguments(cx, frame, &lhsCopy, rhs, res, &isOptimizedArgs))
+ return false;
+ if (isOptimizedArgs)
+ TypeScript::Monitor(cx, frame->script(), pc, res);
+ }
+
+ bool attached = false;
+ if (stub->numOptimizedStubs() >= ICGetElem_Fallback::MAX_OPTIMIZED_STUBS) {
+ // TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
+ // But for now we just bail.
+ stub->noteUnoptimizableAccess();
+ attached = true;
+ }
+
+ // Try to attach an optimized getter stub.
+ bool isTemporarilyUnoptimizable = false;
+ if (!attached && lhs.isObject() && lhs.toObject().isNative()){
+ if (rhs.isString()) {
+ RootedScript rootedScript(cx, frame->script());
+ RootedNativeObject obj(cx, &lhs.toObject().as<NativeObject>());
+ if (!TryAttachNativeGetAccessorElemStub<PropertyName*>(cx, rootedScript, pc, stub,
+ obj, rhs, &attached,
+ &isTemporarilyUnoptimizable))
+ {
+ return false;
+ }
+ script = rootedScript;
+ } else if (rhs.isSymbol()) {
+ RootedScript rootedScript(cx, frame->script());
+ RootedNativeObject obj(cx, &lhs.toObject().as<NativeObject>());
+ if (!TryAttachNativeGetAccessorElemStub<JS::Symbol*>(cx, rootedScript, pc, stub,
+ obj, rhs, &attached,
+ &isTemporarilyUnoptimizable))
+ {
+ return false;
+ }
+ script = rootedScript;
+ }
+ }
+
+ if (!isOptimizedArgs) {
+ if (!GetElementOperation(cx, op, &lhsCopy, rhs, res))
+ return false;
+ TypeScript::Monitor(cx, frame->script(), pc, res);
+ }
+
+ // Check if debug mode toggling made the stub invalid.
+ if (stub.invalid())
+ return true;
+
+ // Add a type monitor stub for the resulting value.
+ if (!stub->addMonitorStubForValue(cx, &info, res))
+ {
+ return false;
+ }
+
+ if (attached)
+ return true;
+
+ // Try to attach an optimized stub.
+ if (!TryAttachGetElemStub(cx, frame->script(), pc, stub, lhs, rhs, res, &attached))
+ return false;
+
+ if (!attached && !isTemporarilyUnoptimizable)
+ stub->noteUnoptimizableAccess();
+
+ return true;
+}
+
+typedef bool (*DoGetElemFallbackFn)(JSContext*, BaselineFrame*, ICGetElem_Fallback*,
+ HandleValue, HandleValue, MutableHandleValue);
+static const VMFunction DoGetElemFallbackInfo =
+ FunctionInfo<DoGetElemFallbackFn>(DoGetElemFallback, "DoGetElemFallback", TailCall,
+ PopValues(2));
+
+bool
+ICGetElem_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+ MOZ_ASSERT(R0 == JSReturnOperand);
+
+ // Restore the tail call register.
+ EmitRestoreTailCallReg(masm);
+
+ // Ensure stack is fully synced for the expression decompiler.
+ masm.pushValue(R0);
+ masm.pushValue(R1);
+
+ // Push arguments.
+ masm.pushValue(R1);
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ return tailCallVM(DoGetElemFallbackInfo, masm);
+}
+
+//
+// GetElem_NativeSlot
+//
+
+static bool
+DoAtomizeString(JSContext* cx, HandleString string, MutableHandleValue result)
+{
+ JitSpew(JitSpew_BaselineIC, " AtomizeString called");
+
+ RootedValue key(cx, StringValue(string));
+
+ // Convert to interned property name.
+ RootedId id(cx);
+ if (!ValueToId<CanGC>(cx, key, &id))
+ return false;
+
+ if (!JSID_IS_ATOM(id)) {
+ result.set(key);
+ return true;
+ }
+
+ result.set(StringValue(JSID_TO_ATOM(id)));
+ return true;
+}
+
+typedef bool (*DoAtomizeStringFn)(JSContext*, HandleString, MutableHandleValue);
+static const VMFunction DoAtomizeStringInfo = FunctionInfo<DoAtomizeStringFn>(DoAtomizeString,
+ "DoAtomizeString");
+
+template <class T>
+bool
+ICGetElemNativeCompiler<T>::emitCallNative(MacroAssembler& masm, Register objReg)
+{
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(0));
+ regs.takeUnchecked(objReg);
+ regs.takeUnchecked(ICTailCallReg);
+
+ enterStubFrame(masm, regs.getAny());
+
+ // Push object.
+ masm.push(objReg);
+
+ // Push native callee.
+ masm.loadPtr(Address(ICStubReg, ICGetElemNativeGetterStub<T>::offsetOfGetter()), objReg);
+ masm.push(objReg);
+
+ regs.add(objReg);
+
+ // Call helper.
+ if (!callVM(DoCallNativeGetterInfo, masm))
+ return false;
+
+ leaveStubFrame(masm);
+
+ return true;
+}
+
+template <class T>
+bool
+ICGetElemNativeCompiler<T>::emitCallScripted(MacroAssembler& masm, Register objReg)
+{
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(0));
+ regs.takeUnchecked(objReg);
+ regs.takeUnchecked(ICTailCallReg);
+
+ // Enter stub frame.
+ enterStubFrame(masm, regs.getAny());
+
+ // Align the stack such that the JitFrameLayout is aligned on
+ // JitStackAlignment.
+ masm.alignJitStackBasedOnNArgs(0);
+
+ // Push |this| for getter (target object).
+ {
+ ValueOperand val = regs.takeAnyValue();
+ masm.tagValue(JSVAL_TYPE_OBJECT, objReg, val);
+ masm.Push(val);
+ regs.add(val);
+ }
+
+ regs.add(objReg);
+
+ Register callee = regs.takeAny();
+ masm.loadPtr(Address(ICStubReg, ICGetElemNativeGetterStub<T>::offsetOfGetter()), callee);
+
+ // Push argc, callee, and descriptor.
+ {
+ Register callScratch = regs.takeAny();
+ EmitBaselineCreateStubFrameDescriptor(masm, callScratch, JitFrameLayout::Size());
+ masm.Push(Imm32(0)); // ActualArgc is 0
+ masm.Push(callee);
+ masm.Push(callScratch);
+ regs.add(callScratch);
+ }
+
+ Register code = regs.takeAnyExcluding(ArgumentsRectifierReg);
+ masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), code);
+ masm.loadBaselineOrIonRaw(code, code, nullptr);
+
+ Register scratch = regs.takeAny();
+
+ // Handle arguments underflow.
+ Label noUnderflow;
+ masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), scratch);
+ masm.branch32(Assembler::Equal, scratch, Imm32(0), &noUnderflow);
+ {
+ // Call the arguments rectifier.
+ MOZ_ASSERT(ArgumentsRectifierReg != code);
+
+ JitCode* argumentsRectifier =
+ cx->runtime()->jitRuntime()->getArgumentsRectifier();
+
+ masm.movePtr(ImmGCPtr(argumentsRectifier), code);
+ masm.loadPtr(Address(code, JitCode::offsetOfCode()), code);
+ masm.movePtr(ImmWord(0), ArgumentsRectifierReg);
+ }
+
+ masm.bind(&noUnderflow);
+ masm.callJit(code);
+
+ leaveStubFrame(masm, true);
+
+ return true;
+}
+
+template <class T>
+bool
+ICGetElemNativeCompiler<T>::emitCheckKey(MacroAssembler& masm, Label& failure)
+{
+ MOZ_ASSERT_UNREACHABLE("Key has to be PropertyName or Symbol");
+ return false;
+}
+
+template <>
+bool
+ICGetElemNativeCompiler<JS::Symbol*>::emitCheckKey(MacroAssembler& masm, Label& failure)
+{
+ MOZ_ASSERT(!needsAtomize_);
+ masm.branchTestSymbol(Assembler::NotEqual, R1, &failure);
+ Address symbolAddr(ICStubReg, ICGetElemNativeStubImpl<JS::Symbol*>::offsetOfKey());
+ Register symExtract = masm.extractObject(R1, ExtractTemp1);
+ masm.branchPtr(Assembler::NotEqual, symbolAddr, symExtract, &failure);
+ return true;
+}
+
+template <>
+bool
+ICGetElemNativeCompiler<PropertyName*>::emitCheckKey(MacroAssembler& masm, Label& failure)
+{
+ masm.branchTestString(Assembler::NotEqual, R1, &failure);
+ // Check key identity. Don't automatically fail if this fails, since the incoming
+ // key maybe a non-interned string. Switch to a slowpath vm-call based check.
+ Address nameAddr(ICStubReg, ICGetElemNativeStubImpl<PropertyName*>::offsetOfKey());
+ Register strExtract = masm.extractString(R1, ExtractTemp1);
+
+ // If needsAtomize_ is true, and the string is not already an atom, then atomize the
+ // string before proceeding.
+ if (needsAtomize_) {
+ Label skipAtomize;
+
+ // If string is already an atom, skip the atomize.
+ masm.branchTest32(Assembler::NonZero,
+ Address(strExtract, JSString::offsetOfFlags()),
+ Imm32(JSString::ATOM_BIT),
+ &skipAtomize);
+
+ // Stow R0.
+ EmitStowICValues(masm, 1);
+
+ enterStubFrame(masm, R0.scratchReg());
+
+ // Atomize the string into a new value.
+ masm.push(strExtract);
+ if (!callVM(DoAtomizeStringInfo, masm))
+ return false;
+
+ // Atomized string is now in JSReturnOperand (R0).
+ // Leave stub frame, move atomized string into R1.
+ MOZ_ASSERT(R0 == JSReturnOperand);
+ leaveStubFrame(masm);
+ masm.moveValue(JSReturnOperand, R1);
+
+ // Unstow R0
+ EmitUnstowICValues(masm, 1);
+
+ // Extract string from R1 again.
+ DebugOnly<Register> strExtract2 = masm.extractString(R1, ExtractTemp1);
+ MOZ_ASSERT(Register(strExtract2) == strExtract);
+
+ masm.bind(&skipAtomize);
+ }
+
+ // Key has been atomized if necessary. Do identity check on string pointer.
+ masm.branchPtr(Assembler::NotEqual, nameAddr, strExtract, &failure);
+ return true;
+}
+
+template <class T>
+bool
+ICGetElemNativeCompiler<T>::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ Label failurePopR1;
+ bool popR1 = false;
+
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
+ Register scratchReg = regs.takeAny();
+
+ // Unbox object.
+ Register objReg = masm.extractObject(R0, ExtractTemp0);
+
+ // Check object shape/group.
+ GuardReceiverObject(masm, ReceiverGuard(obj_), objReg, scratchReg,
+ ICGetElemNativeStub::offsetOfReceiverGuard(), &failure);
+
+ // Since this stub sometimes enters a stub frame, we manually set this to true (lie).
+#ifdef DEBUG
+ entersStubFrame_ = true;
+#endif
+
+ if (!emitCheckKey(masm, failure))
+ return false;
+
+ Register holderReg;
+ if (obj_ == holder_) {
+ holderReg = objReg;
+
+ if (obj_->is<UnboxedPlainObject>() && acctype_ != ICGetElemNativeStub::UnboxedProperty) {
+ // The property will be loaded off the unboxed expando.
+ masm.push(R1.scratchReg());
+ popR1 = true;
+ holderReg = R1.scratchReg();
+ masm.loadPtr(Address(objReg, UnboxedPlainObject::offsetOfExpando()), holderReg);
+ }
+ } else {
+ // Shape guard holder.
+ if (regs.empty()) {
+ masm.push(R1.scratchReg());
+ popR1 = true;
+ holderReg = R1.scratchReg();
+ } else {
+ holderReg = regs.takeAny();
+ }
+
+ if (kind == ICStub::GetElem_NativePrototypeCallNativeName ||
+ kind == ICStub::GetElem_NativePrototypeCallNativeSymbol ||
+ kind == ICStub::GetElem_NativePrototypeCallScriptedName ||
+ kind == ICStub::GetElem_NativePrototypeCallScriptedSymbol)
+ {
+ masm.loadPtr(Address(ICStubReg,
+ ICGetElemNativePrototypeCallStub<T>::offsetOfHolder()),
+ holderReg);
+ masm.loadPtr(Address(ICStubReg,
+ ICGetElemNativePrototypeCallStub<T>::offsetOfHolderShape()),
+ scratchReg);
+ } else {
+ masm.loadPtr(Address(ICStubReg,
+ ICGetElem_NativePrototypeSlot<T>::offsetOfHolder()),
+ holderReg);
+ masm.loadPtr(Address(ICStubReg,
+ ICGetElem_NativePrototypeSlot<T>::offsetOfHolderShape()),
+ scratchReg);
+ }
+ masm.branchTestObjShape(Assembler::NotEqual, holderReg, scratchReg,
+ popR1 ? &failurePopR1 : &failure);
+ }
+
+ if (acctype_ == ICGetElemNativeStub::DynamicSlot ||
+ acctype_ == ICGetElemNativeStub::FixedSlot)
+ {
+ masm.load32(Address(ICStubReg, ICGetElemNativeSlotStub<T>::offsetOfOffset()),
+ scratchReg);
+
+ // Load from object.
+ if (acctype_ == ICGetElemNativeStub::DynamicSlot)
+ masm.addPtr(Address(holderReg, NativeObject::offsetOfSlots()), scratchReg);
+ else
+ masm.addPtr(holderReg, scratchReg);
+
+ Address valAddr(scratchReg, 0);
+ masm.loadValue(valAddr, R0);
+ if (popR1)
+ masm.addToStackPtr(ImmWord(sizeof(size_t)));
+
+ } else if (acctype_ == ICGetElemNativeStub::UnboxedProperty) {
+ masm.load32(Address(ICStubReg, ICGetElemNativeSlotStub<T>::offsetOfOffset()),
+ scratchReg);
+ masm.loadUnboxedProperty(BaseIndex(objReg, scratchReg, TimesOne), unboxedType_,
+ TypedOrValueRegister(R0));
+ if (popR1)
+ masm.addToStackPtr(ImmWord(sizeof(size_t)));
+ } else {
+ MOZ_ASSERT(acctype_ == ICGetElemNativeStub::NativeGetter ||
+ acctype_ == ICGetElemNativeStub::ScriptedGetter);
+ MOZ_ASSERT(kind == ICStub::GetElem_NativePrototypeCallNativeName ||
+ kind == ICStub::GetElem_NativePrototypeCallNativeSymbol ||
+ kind == ICStub::GetElem_NativePrototypeCallScriptedName ||
+ kind == ICStub::GetElem_NativePrototypeCallScriptedSymbol);
+
+ if (acctype_ == ICGetElemNativeStub::NativeGetter) {
+ // If calling a native getter, there is no chance of failure now.
+
+ // GetElem key (R1) is no longer needed.
+ if (popR1)
+ masm.addToStackPtr(ImmWord(sizeof(size_t)));
+
+ if (!emitCallNative(masm, objReg))
+ return false;
+
+ } else {
+ MOZ_ASSERT(acctype_ == ICGetElemNativeStub::ScriptedGetter);
+
+ // Load function in scratchReg and ensure that it has a jit script.
+ masm.loadPtr(Address(ICStubReg, ICGetElemNativeGetterStub<T>::offsetOfGetter()),
+ scratchReg);
+ masm.branchIfFunctionHasNoScript(scratchReg, popR1 ? &failurePopR1 : &failure);
+ masm.loadPtr(Address(scratchReg, JSFunction::offsetOfNativeOrScript()), scratchReg);
+ masm.loadBaselineOrIonRaw(scratchReg, scratchReg, popR1 ? &failurePopR1 : &failure);
+
+ // At this point, we are guaranteed to successfully complete.
+ if (popR1)
+ masm.addToStackPtr(Imm32(sizeof(size_t)));
+
+ if (!emitCallScripted(masm, objReg))
+ return false;
+ }
+ }
+
+ // Enter type monitor IC to type-check result.
+ EmitEnterTypeMonitorIC(masm);
+
+ // Failure case - jump to next stub
+ if (popR1) {
+ masm.bind(&failurePopR1);
+ masm.pop(R1.scratchReg());
+ }
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ return true;
+}
+
+//
+// GetElem_String
+//
+
+bool
+ICGetElem_String::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ masm.branchTestString(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
+ Register scratchReg = regs.takeAny();
+
+ // Unbox string in R0.
+ Register str = masm.extractString(R0, ExtractTemp0);
+
+ // Check for non-linear strings.
+ masm.branchIfRope(str, &failure);
+
+ // Unbox key.
+ Register key = masm.extractInt32(R1, ExtractTemp1);
+
+ // Bounds check.
+ masm.branch32(Assembler::BelowOrEqual, Address(str, JSString::offsetOfLength()),
+ key, &failure);
+
+ // Get char code.
+ masm.loadStringChar(str, key, scratchReg);
+
+ // Check if char code >= UNIT_STATIC_LIMIT.
+ masm.branch32(Assembler::AboveOrEqual, scratchReg, Imm32(StaticStrings::UNIT_STATIC_LIMIT),
+ &failure);
+
+ // Load static string.
+ masm.movePtr(ImmPtr(&cx->staticStrings().unitStaticTable), str);
+ masm.loadPtr(BaseIndex(str, scratchReg, ScalePointer), str);
+
+ // Return.
+ masm.tagValue(JSVAL_TYPE_STRING, str, R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// GetElem_Dense
+//
+
+bool
+ICGetElem_Dense::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
+ Register scratchReg = regs.takeAny();
+
+ // Unbox R0 and shape guard.
+ Register obj = masm.extractObject(R0, ExtractTemp0);
+ masm.loadPtr(Address(ICStubReg, ICGetElem_Dense::offsetOfShape()), scratchReg);
+ masm.branchTestObjShape(Assembler::NotEqual, obj, scratchReg, &failure);
+
+ // Load obj->elements.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratchReg);
+
+ // Unbox key.
+ Register key = masm.extractInt32(R1, ExtractTemp1);
+
+ // Bounds check.
+ Address initLength(scratchReg, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::BelowOrEqual, initLength, key, &failure);
+
+ // Hole check and load value.
+ BaseObjectElementIndex element(scratchReg, key);
+ masm.branchTestMagic(Assembler::Equal, element, &failure);
+
+ // Load value from element location.
+ masm.loadValue(element, R0);
+
+ // Enter type monitor IC to type-check result.
+ EmitEnterTypeMonitorIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// GetElem_UnboxedArray
+//
+
+bool
+ICGetElem_UnboxedArray::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
+ Register scratchReg = regs.takeAny();
+
+ // Unbox R0 and group guard.
+ Register obj = masm.extractObject(R0, ExtractTemp0);
+ masm.loadPtr(Address(ICStubReg, ICGetElem_UnboxedArray::offsetOfGroup()), scratchReg);
+ masm.branchTestObjGroup(Assembler::NotEqual, obj, scratchReg, &failure);
+
+ // Unbox key.
+ Register key = masm.extractInt32(R1, ExtractTemp1);
+
+ // Bounds check.
+ masm.load32(Address(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength()),
+ scratchReg);
+ masm.and32(Imm32(UnboxedArrayObject::InitializedLengthMask), scratchReg);
+ masm.branch32(Assembler::BelowOrEqual, scratchReg, key, &failure);
+
+ // Load obj->elements.
+ masm.loadPtr(Address(obj, UnboxedArrayObject::offsetOfElements()), scratchReg);
+
+ // Load value.
+ size_t width = UnboxedTypeSize(elementType_);
+ BaseIndex addr(scratchReg, key, ScaleFromElemWidth(width));
+ masm.loadUnboxedProperty(addr, elementType_, R0);
+
+ // Only monitor the result if its type might change.
+ if (elementType_ == JSVAL_TYPE_OBJECT)
+ EmitEnterTypeMonitorIC(masm);
+ else
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// GetElem_TypedArray
+//
+
+static void
+LoadTypedThingLength(MacroAssembler& masm, TypedThingLayout layout, Register obj, Register result)
+{
+ switch (layout) {
+ case Layout_TypedArray:
+ masm.unboxInt32(Address(obj, TypedArrayObject::lengthOffset()), result);
+ break;
+ case Layout_OutlineTypedObject:
+ case Layout_InlineTypedObject:
+ masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), result);
+ masm.loadPtr(Address(result, ObjectGroup::offsetOfAddendum()), result);
+ masm.unboxInt32(Address(result, ArrayTypeDescr::offsetOfLength()), result);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+bool
+ICGetElem_TypedArray::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+
+ if (layout_ != Layout_TypedArray)
+ CheckForTypedObjectWithDetachedStorage(cx, masm, &failure);
+
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
+ Register scratchReg = regs.takeAny();
+
+ // Unbox R0 and shape guard.
+ Register obj = masm.extractObject(R0, ExtractTemp0);
+ masm.loadPtr(Address(ICStubReg, ICGetElem_TypedArray::offsetOfShape()), scratchReg);
+ masm.branchTestObjShape(Assembler::NotEqual, obj, scratchReg, &failure);
+
+ // Ensure the index is an integer.
+ if (cx->runtime()->jitSupportsFloatingPoint) {
+ Label isInt32;
+ masm.branchTestInt32(Assembler::Equal, R1, &isInt32);
+ {
+ // If the index is a double, try to convert it to int32. It's okay
+ // to convert -0 to 0: the shape check ensures the object is a typed
+ // array so the difference is not observable.
+ masm.branchTestDouble(Assembler::NotEqual, R1, &failure);
+ masm.unboxDouble(R1, FloatReg0);
+ masm.convertDoubleToInt32(FloatReg0, scratchReg, &failure, /* negZeroCheck = */false);
+ masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R1);
+ }
+ masm.bind(&isInt32);
+ } else {
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+ }
+
+ // Unbox key.
+ Register key = masm.extractInt32(R1, ExtractTemp1);
+
+ // Bounds check.
+ LoadTypedThingLength(masm, layout_, obj, scratchReg);
+ masm.branch32(Assembler::BelowOrEqual, scratchReg, key, &failure);
+
+ // Load the elements vector.
+ LoadTypedThingData(masm, layout_, obj, scratchReg);
+
+ // Load the value.
+ BaseIndex source(scratchReg, key, ScaleFromElemWidth(Scalar::byteSize(type_)));
+ masm.loadFromTypedArray(type_, source, R0, false, scratchReg, &failure);
+
+ // Todo: Allow loading doubles from uint32 arrays, but this requires monitoring.
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// GetElem_Arguments
+//
+bool
+ICGetElem_Arguments::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ if (which_ == ICGetElem_Arguments::Magic) {
+ // Ensure that this is a magic arguments value.
+ masm.branchTestMagicValue(Assembler::NotEqual, R0, JS_OPTIMIZED_ARGUMENTS, &failure);
+
+ // Ensure that frame has not loaded different arguments object since.
+ masm.branchTest32(Assembler::NonZero,
+ Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFlags()),
+ Imm32(BaselineFrame::HAS_ARGS_OBJ),
+ &failure);
+
+ // Ensure that index is an integer.
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+ Register idx = masm.extractInt32(R1, ExtractTemp1);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
+ Register scratch = regs.takeAny();
+
+ // Load num actual arguments
+ Address actualArgs(BaselineFrameReg, BaselineFrame::offsetOfNumActualArgs());
+ masm.loadPtr(actualArgs, scratch);
+
+ // Ensure idx < argc
+ masm.branch32(Assembler::AboveOrEqual, idx, scratch, &failure);
+
+ // Load argval
+ masm.movePtr(BaselineFrameReg, scratch);
+ masm.addPtr(Imm32(BaselineFrame::offsetOfArg(0)), scratch);
+ BaseValueIndex element(scratch, idx);
+ masm.loadValue(element, R0);
+
+ // Enter type monitor IC to type-check result.
+ EmitEnterTypeMonitorIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+ }
+
+ MOZ_ASSERT(which_ == ICGetElem_Arguments::Mapped ||
+ which_ == ICGetElem_Arguments::Unmapped);
+
+ const Class* clasp = (which_ == ICGetElem_Arguments::Mapped)
+ ? &MappedArgumentsObject::class_
+ : &UnmappedArgumentsObject::class_;
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
+ Register scratchReg = regs.takeAny();
+
+ // Guard on input being an arguments object.
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+ Register objReg = masm.extractObject(R0, ExtractTemp0);
+ masm.branchTestObjClass(Assembler::NotEqual, objReg, scratchReg, clasp, &failure);
+
+ // Guard on index being int32
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+ Register idxReg = masm.extractInt32(R1, ExtractTemp1);
+
+ // Get initial ArgsObj length value.
+ masm.unboxInt32(Address(objReg, ArgumentsObject::getInitialLengthSlotOffset()), scratchReg);
+
+ // Test if length or any element have been overridden.
+ masm.branchTest32(Assembler::NonZero,
+ scratchReg,
+ Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT |
+ ArgumentsObject::ELEMENT_OVERRIDDEN_BIT),
+ &failure);
+
+ // Length has not been overridden, ensure that R1 is an integer and is <= length.
+ masm.rshiftPtr(Imm32(ArgumentsObject::PACKED_BITS_COUNT), scratchReg);
+ masm.branch32(Assembler::AboveOrEqual, idxReg, scratchReg, &failure);
+
+ // Length check succeeded, now check the correct bit. We clobber potential type regs
+ // now. Inputs will have to be reconstructed if we fail after this point, but that's
+ // unlikely.
+ Label failureReconstructInputs;
+ regs = availableGeneralRegs(0);
+ regs.takeUnchecked(objReg);
+ regs.takeUnchecked(idxReg);
+ regs.take(scratchReg);
+ Register argData = regs.takeAny();
+
+ // Load ArgumentsData
+ masm.loadPrivate(Address(objReg, ArgumentsObject::getDataSlotOffset()), argData);
+
+ // Fail if we have a RareArgumentsData (elements were deleted).
+ masm.branchPtr(Assembler::NotEqual,
+ Address(argData, offsetof(ArgumentsData, rareData)),
+ ImmWord(0),
+ &failureReconstructInputs);
+
+ // Load the value. Use scratchReg to form a ValueOperand to load into.
+ masm.addPtr(Imm32(ArgumentsData::offsetOfArgs()), argData);
+ regs.add(scratchReg);
+ ValueOperand tempVal = regs.takeAnyValue();
+ masm.loadValue(BaseValueIndex(argData, idxReg), tempVal);
+
+ // Makesure that this is not a FORWARD_TO_CALL_SLOT magic value.
+ masm.branchTestMagic(Assembler::Equal, tempVal, &failureReconstructInputs);
+
+ // Copy value from temp to R0.
+ masm.moveValue(tempVal, R0);
+
+ // Type-check result
+ EmitEnterTypeMonitorIC(masm);
+
+ // Failed, but inputs are deconstructed into object and int, and need to be
+ // reconstructed into values.
+ masm.bind(&failureReconstructInputs);
+ masm.tagValue(JSVAL_TYPE_OBJECT, objReg, R0);
+ masm.tagValue(JSVAL_TYPE_INT32, idxReg, R1);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// SetElem_Fallback
+//
+
+static bool
+SetElemAddHasSameShapes(ICSetElem_DenseOrUnboxedArrayAdd* stub, JSObject* obj)
+{
+ static const size_t MAX_DEPTH = ICSetElem_DenseOrUnboxedArrayAdd::MAX_PROTO_CHAIN_DEPTH;
+ ICSetElem_DenseOrUnboxedArrayAddImpl<MAX_DEPTH>* nstub = stub->toImplUnchecked<MAX_DEPTH>();
+
+ if (obj->maybeShape() != nstub->shape(0))
+ return false;
+
+ JSObject* proto = obj->staticPrototype();
+ for (size_t i = 0; i < stub->protoChainDepth(); i++) {
+ if (!proto->isNative())
+ return false;
+ if (proto->as<NativeObject>().lastProperty() != nstub->shape(i + 1))
+ return false;
+ proto = obj->staticPrototype();
+ if (!proto) {
+ if (i != stub->protoChainDepth() - 1)
+ return false;
+ break;
+ }
+ }
+
+ return true;
+}
+
+static bool
+DenseOrUnboxedArraySetElemStubExists(JSContext* cx, ICStub::Kind kind,
+ ICSetElem_Fallback* stub, HandleObject obj)
+{
+ MOZ_ASSERT(kind == ICStub::SetElem_DenseOrUnboxedArray ||
+ kind == ICStub::SetElem_DenseOrUnboxedArrayAdd);
+
+ for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
+ if (kind == ICStub::SetElem_DenseOrUnboxedArray && iter->isSetElem_DenseOrUnboxedArray()) {
+ ICSetElem_DenseOrUnboxedArray* nstub = iter->toSetElem_DenseOrUnboxedArray();
+ if (obj->maybeShape() == nstub->shape() && obj->getGroup(cx) == nstub->group())
+ return true;
+ }
+
+ if (kind == ICStub::SetElem_DenseOrUnboxedArrayAdd && iter->isSetElem_DenseOrUnboxedArrayAdd()) {
+ ICSetElem_DenseOrUnboxedArrayAdd* nstub = iter->toSetElem_DenseOrUnboxedArrayAdd();
+ if (obj->getGroup(cx) == nstub->group() && SetElemAddHasSameShapes(nstub, obj))
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool
+TypedArraySetElemStubExists(ICSetElem_Fallback* stub, HandleObject obj, bool expectOOB)
+{
+ for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
+ if (!iter->isSetElem_TypedArray())
+ continue;
+ ICSetElem_TypedArray* taStub = iter->toSetElem_TypedArray();
+ if (obj->maybeShape() == taStub->shape() && taStub->expectOutOfBounds() == expectOOB)
+ return true;
+ }
+ return false;
+}
+
+static bool
+RemoveExistingTypedArraySetElemStub(JSContext* cx, ICSetElem_Fallback* stub, HandleObject obj)
+{
+ for (ICStubIterator iter = stub->beginChain(); !iter.atEnd(); iter++) {
+ if (!iter->isSetElem_TypedArray())
+ continue;
+
+ if (obj->maybeShape() != iter->toSetElem_TypedArray()->shape())
+ continue;
+
+ // TypedArraySetElem stubs are only removed using this procedure if
+ // being replaced with one that expects out of bounds index.
+ MOZ_ASSERT(!iter->toSetElem_TypedArray()->expectOutOfBounds());
+ iter.unlink(cx);
+ return true;
+ }
+ return false;
+}
+
+static bool
+CanOptimizeDenseOrUnboxedArraySetElem(JSObject* obj, uint32_t index,
+ Shape* oldShape, uint32_t oldCapacity, uint32_t oldInitLength,
+ bool* isAddingCaseOut, size_t* protoDepthOut)
+{
+ uint32_t initLength = GetAnyBoxedOrUnboxedInitializedLength(obj);
+ uint32_t capacity = GetAnyBoxedOrUnboxedCapacity(obj);
+
+ *isAddingCaseOut = false;
+ *protoDepthOut = 0;
+
+ // Some initial sanity checks.
+ if (initLength < oldInitLength || capacity < oldCapacity)
+ return false;
+
+ // Unboxed arrays need to be able to emit floating point code.
+ if (obj->is<UnboxedArrayObject>() && !obj->runtimeFromMainThread()->jitSupportsFloatingPoint)
+ return false;
+
+ Shape* shape = obj->maybeShape();
+
+ // Cannot optimize if the shape changed.
+ if (oldShape != shape)
+ return false;
+
+ // Cannot optimize if the capacity changed.
+ if (oldCapacity != capacity)
+ return false;
+
+ // Cannot optimize if the index doesn't fit within the new initialized length.
+ if (index >= initLength)
+ return false;
+
+ // Cannot optimize if the value at position after the set is a hole.
+ if (obj->isNative() && !obj->as<NativeObject>().containsDenseElement(index))
+ return false;
+
+ // At this point, if we know that the initLength did not change, then
+ // an optimized set is possible.
+ if (oldInitLength == initLength)
+ return true;
+
+ // If it did change, ensure that it changed specifically by incrementing by 1
+ // to accomodate this particular indexed set.
+ if (oldInitLength + 1 != initLength)
+ return false;
+ if (index != oldInitLength)
+ return false;
+
+ // The checks are not complete. The object may have a setter definition,
+ // either directly, or via a prototype, or via the target object for a prototype
+ // which is a proxy, that handles a particular integer write.
+ // Scan the prototype and shape chain to make sure that this is not the case.
+ if (obj->isIndexed())
+ return false;
+ JSObject* curObj = obj->staticPrototype();
+ while (curObj) {
+ ++*protoDepthOut;
+ if (!curObj->isNative() || curObj->isIndexed())
+ return false;
+ curObj = curObj->staticPrototype();
+ }
+
+ if (*protoDepthOut > ICSetElem_DenseOrUnboxedArrayAdd::MAX_PROTO_CHAIN_DEPTH)
+ return false;
+
+ *isAddingCaseOut = true;
+ return true;
+}
+
+static bool
+DoSetElemFallback(JSContext* cx, BaselineFrame* frame, ICSetElem_Fallback* stub_, Value* stack,
+ HandleValue objv, HandleValue index, HandleValue rhs)
+{
+ // This fallback stub may trigger debug mode toggling.
+ DebugModeOSRVolatileStub<ICSetElem_Fallback*> stub(frame, stub_);
+
+ RootedScript script(cx, frame->script());
+ RootedScript outerScript(cx, script);
+ jsbytecode* pc = stub->icEntry()->pc(script);
+ JSOp op = JSOp(*pc);
+ FallbackICSpew(cx, stub, "SetElem(%s)", CodeName[JSOp(*pc)]);
+
+ MOZ_ASSERT(op == JSOP_SETELEM ||
+ op == JSOP_STRICTSETELEM ||
+ op == JSOP_INITELEM ||
+ op == JSOP_INITHIDDENELEM ||
+ op == JSOP_INITELEM_ARRAY ||
+ op == JSOP_INITELEM_INC);
+
+ RootedObject obj(cx, ToObjectFromStack(cx, objv));
+ if (!obj)
+ return false;
+
+ RootedShape oldShape(cx, obj->maybeShape());
+
+ // Check the old capacity
+ uint32_t oldCapacity = 0;
+ uint32_t oldInitLength = 0;
+ if (index.isInt32() && index.toInt32() >= 0) {
+ oldCapacity = GetAnyBoxedOrUnboxedCapacity(obj);
+ oldInitLength = GetAnyBoxedOrUnboxedInitializedLength(obj);
+ }
+
+ if (op == JSOP_INITELEM || op == JSOP_INITHIDDENELEM) {
+ if (!InitElemOperation(cx, pc, obj, index, rhs))
+ return false;
+ } else if (op == JSOP_INITELEM_ARRAY) {
+ MOZ_ASSERT(uint32_t(index.toInt32()) <= INT32_MAX,
+ "the bytecode emitter must fail to compile code that would "
+ "produce JSOP_INITELEM_ARRAY with an index exceeding "
+ "int32_t range");
+ MOZ_ASSERT(uint32_t(index.toInt32()) == GET_UINT32(pc));
+ if (!InitArrayElemOperation(cx, pc, obj, index.toInt32(), rhs))
+ return false;
+ } else if (op == JSOP_INITELEM_INC) {
+ if (!InitArrayElemOperation(cx, pc, obj, index.toInt32(), rhs))
+ return false;
+ } else {
+ if (!SetObjectElement(cx, obj, index, rhs, objv, JSOp(*pc) == JSOP_STRICTSETELEM, script, pc))
+ return false;
+ }
+
+ // Don't try to attach stubs that wish to be hidden. We don't know how to
+ // have different enumerability in the stubs for the moment.
+ if (op == JSOP_INITHIDDENELEM)
+ return true;
+
+ // Overwrite the object on the stack (pushed for the decompiler) with the rhs.
+ MOZ_ASSERT(stack[2] == objv);
+ stack[2] = rhs;
+
+ // Check if debug mode toggling made the stub invalid.
+ if (stub.invalid())
+ return true;
+
+ if (stub->numOptimizedStubs() >= ICSetElem_Fallback::MAX_OPTIMIZED_STUBS) {
+ // TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
+ // But for now we just bail.
+ return true;
+ }
+
+ // Try to generate new stubs.
+ if (IsNativeOrUnboxedDenseElementAccess(obj, index) && !rhs.isMagic(JS_ELEMENTS_HOLE)) {
+ bool addingCase;
+ size_t protoDepth;
+
+ if (CanOptimizeDenseOrUnboxedArraySetElem(obj, index.toInt32(),
+ oldShape, oldCapacity, oldInitLength,
+ &addingCase, &protoDepth))
+ {
+ RootedShape shape(cx, obj->maybeShape());
+ RootedObjectGroup group(cx, obj->getGroup(cx));
+ if (!group)
+ return false;
+
+ if (addingCase &&
+ !DenseOrUnboxedArraySetElemStubExists(cx, ICStub::SetElem_DenseOrUnboxedArrayAdd,
+ stub, obj))
+ {
+ JitSpew(JitSpew_BaselineIC,
+ " Generating SetElem_DenseOrUnboxedArrayAdd stub "
+ "(shape=%p, group=%p, protoDepth=%" PRIuSIZE ")",
+ shape.get(), group.get(), protoDepth);
+ ICSetElemDenseOrUnboxedArrayAddCompiler compiler(cx, obj, protoDepth);
+ ICUpdatedStub* newStub = compiler.getStub(compiler.getStubSpace(outerScript));
+ if (!newStub)
+ return false;
+ if (compiler.needsUpdateStubs() &&
+ !newStub->addUpdateStubForValue(cx, outerScript, obj, JSID_VOIDHANDLE, rhs))
+ {
+ return false;
+ }
+
+ stub->addNewStub(newStub);
+ } else if (!addingCase &&
+ !DenseOrUnboxedArraySetElemStubExists(cx,
+ ICStub::SetElem_DenseOrUnboxedArray,
+ stub, obj))
+ {
+ JitSpew(JitSpew_BaselineIC,
+ " Generating SetElem_DenseOrUnboxedArray stub (shape=%p, group=%p)",
+ shape.get(), group.get());
+ ICSetElem_DenseOrUnboxedArray::Compiler compiler(cx, shape, group);
+ ICUpdatedStub* newStub = compiler.getStub(compiler.getStubSpace(outerScript));
+ if (!newStub)
+ return false;
+ if (compiler.needsUpdateStubs() &&
+ !newStub->addUpdateStubForValue(cx, outerScript, obj, JSID_VOIDHANDLE, rhs))
+ {
+ return false;
+ }
+
+ stub->addNewStub(newStub);
+ }
+ }
+
+ return true;
+ }
+
+ if ((obj->is<TypedArrayObject>() || IsPrimitiveArrayTypedObject(obj)) &&
+ index.isNumber() &&
+ rhs.isNumber())
+ {
+ if (!cx->runtime()->jitSupportsFloatingPoint &&
+ (TypedThingRequiresFloatingPoint(obj) || index.isDouble()))
+ {
+ return true;
+ }
+
+ bool expectOutOfBounds;
+ double idx = index.toNumber();
+ if (obj->is<TypedArrayObject>()) {
+ expectOutOfBounds = (idx < 0 || idx >= double(obj->as<TypedArrayObject>().length()));
+ } else {
+ // Typed objects throw on out of bounds accesses. Don't attach
+ // a stub in this case.
+ if (idx < 0 || idx >= double(obj->as<TypedObject>().length()))
+ return true;
+ expectOutOfBounds = false;
+
+ // Don't attach stubs if the underlying storage for typed objects
+ // in the compartment could be detached, as the stub will always
+ // bail out.
+ if (cx->compartment()->detachedTypedObjects)
+ return true;
+ }
+
+ if (!TypedArraySetElemStubExists(stub, obj, expectOutOfBounds)) {
+ // Remove any existing TypedArraySetElemStub that doesn't handle out-of-bounds
+ if (expectOutOfBounds)
+ RemoveExistingTypedArraySetElemStub(cx, stub, obj);
+
+ Shape* shape = obj->maybeShape();
+ Scalar::Type type = TypedThingElementType(obj);
+
+ JitSpew(JitSpew_BaselineIC,
+ " Generating SetElem_TypedArray stub (shape=%p, type=%u, oob=%s)",
+ shape, type, expectOutOfBounds ? "yes" : "no");
+ ICSetElem_TypedArray::Compiler compiler(cx, shape, type, expectOutOfBounds);
+ ICStub* typedArrayStub = compiler.getStub(compiler.getStubSpace(outerScript));
+ if (!typedArrayStub)
+ return false;
+
+ stub->addNewStub(typedArrayStub);
+ return true;
+ }
+ }
+
+ return true;
+}
+
+typedef bool (*DoSetElemFallbackFn)(JSContext*, BaselineFrame*, ICSetElem_Fallback*, Value*,
+ HandleValue, HandleValue, HandleValue);
+static const VMFunction DoSetElemFallbackInfo =
+ FunctionInfo<DoSetElemFallbackFn>(DoSetElemFallback, "DoSetElemFallback", TailCall,
+ PopValues(2));
+
+bool
+ICSetElem_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+ MOZ_ASSERT(R0 == JSReturnOperand);
+
+ EmitRestoreTailCallReg(masm);
+
+ // State: R0: object, R1: index, stack: rhs.
+ // For the decompiler, the stack has to be: object, index, rhs,
+ // so we push the index, then overwrite the rhs Value with R0
+ // and push the rhs value.
+ masm.pushValue(R1);
+ masm.loadValue(Address(masm.getStackPointer(), sizeof(Value)), R1);
+ masm.storeValue(R0, Address(masm.getStackPointer(), sizeof(Value)));
+ masm.pushValue(R1);
+
+ // Push arguments.
+ masm.pushValue(R1); // RHS
+
+ // Push index. On x86 and ARM two push instructions are emitted so use a
+ // separate register to store the old stack pointer.
+ masm.moveStackPtrTo(R1.scratchReg());
+ masm.pushValue(Address(R1.scratchReg(), 2 * sizeof(Value)));
+ masm.pushValue(R0); // Object.
+
+ // Push pointer to stack values, so that the stub can overwrite the object
+ // (pushed for the decompiler) with the rhs.
+ masm.computeEffectiveAddress(Address(masm.getStackPointer(), 3 * sizeof(Value)), R0.scratchReg());
+ masm.push(R0.scratchReg());
+
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ return tailCallVM(DoSetElemFallbackInfo, masm);
+}
+
+void
+BaselineScript::noteArrayWriteHole(uint32_t pcOffset)
+{
+ ICEntry& entry = icEntryFromPCOffset(pcOffset);
+ ICFallbackStub* stub = entry.fallbackStub();
+
+ if (stub->isSetElem_Fallback())
+ stub->toSetElem_Fallback()->noteArrayWriteHole();
+}
+
+//
+// SetElem_DenseOrUnboxedArray
+//
+
+template <typename T>
+void
+EmitUnboxedPreBarrierForBaseline(MacroAssembler &masm, T address, JSValueType type)
+{
+ if (type == JSVAL_TYPE_OBJECT)
+ EmitPreBarrier(masm, address, MIRType::Object);
+ else if (type == JSVAL_TYPE_STRING)
+ EmitPreBarrier(masm, address, MIRType::String);
+ else
+ MOZ_ASSERT(!UnboxedTypeNeedsPreBarrier(type));
+}
+
+bool
+ICSetElem_DenseOrUnboxedArray::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ // R0 = object
+ // R1 = key
+ // Stack = { ... rhs-value, <return-addr>? }
+ Label failure, failurePopR0;
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
+ Register scratchReg = regs.takeAny();
+
+ // Unbox R0 and guard on its group and, if this is a native access, its shape.
+ Register obj = masm.extractObject(R0, ExtractTemp0);
+ masm.loadPtr(Address(ICStubReg, ICSetElem_DenseOrUnboxedArray::offsetOfGroup()),
+ scratchReg);
+ masm.branchTestObjGroup(Assembler::NotEqual, obj, scratchReg, &failure);
+ if (unboxedType_ == JSVAL_TYPE_MAGIC) {
+ masm.loadPtr(Address(ICStubReg, ICSetElem_DenseOrUnboxedArray::offsetOfShape()),
+ scratchReg);
+ masm.branchTestObjShape(Assembler::NotEqual, obj, scratchReg, &failure);
+ }
+
+ if (needsUpdateStubs()) {
+ // Stow both R0 and R1 (object and key)
+ // But R0 and R1 still hold their values.
+ EmitStowICValues(masm, 2);
+
+ // Stack is now: { ..., rhs-value, object-value, key-value, maybe?-RET-ADDR }
+ // Load rhs-value into R0
+ masm.loadValue(Address(masm.getStackPointer(), 2 * sizeof(Value) + ICStackValueOffset), R0);
+
+ // Call the type-update stub.
+ if (!callTypeUpdateIC(masm, sizeof(Value)))
+ return false;
+
+ // Unstow R0 and R1 (object and key)
+ EmitUnstowICValues(masm, 2);
+
+ // Restore object.
+ obj = masm.extractObject(R0, ExtractTemp0);
+
+ // Trigger post barriers here on the value being written. Fields which
+ // objects can be written to also need update stubs.
+ masm.Push(R1);
+ masm.loadValue(Address(masm.getStackPointer(), sizeof(Value) + ICStackValueOffset), R1);
+
+ LiveGeneralRegisterSet saveRegs;
+ saveRegs.add(R0);
+ saveRegs.addUnchecked(obj);
+ saveRegs.add(ICStubReg);
+ emitPostWriteBarrierSlot(masm, obj, R1, scratchReg, saveRegs);
+
+ masm.Pop(R1);
+ }
+
+ // Unbox key.
+ Register key = masm.extractInt32(R1, ExtractTemp1);
+
+ if (unboxedType_ == JSVAL_TYPE_MAGIC) {
+ // Set element on a native object.
+
+ // Load obj->elements in scratchReg.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratchReg);
+
+ // Bounds check.
+ Address initLength(scratchReg, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::BelowOrEqual, initLength, key, &failure);
+
+ // Hole check.
+ BaseIndex element(scratchReg, key, TimesEight);
+ masm.branchTestMagic(Assembler::Equal, element, &failure);
+
+ // Perform a single test to see if we either need to convert double
+ // elements, clone the copy on write elements in the object or fail
+ // due to a frozen element.
+ Label noSpecialHandling;
+ Address elementsFlags(scratchReg, ObjectElements::offsetOfFlags());
+ masm.branchTest32(Assembler::Zero, elementsFlags,
+ Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS |
+ ObjectElements::COPY_ON_WRITE |
+ ObjectElements::FROZEN),
+ &noSpecialHandling);
+
+ // Fail if we need to clone copy on write elements or to throw due
+ // to a frozen element.
+ masm.branchTest32(Assembler::NonZero, elementsFlags,
+ Imm32(ObjectElements::COPY_ON_WRITE |
+ ObjectElements::FROZEN),
+ &failure);
+
+ // Failure is not possible now. Free up registers.
+ regs.add(R0);
+ regs.add(R1);
+ regs.takeUnchecked(obj);
+ regs.takeUnchecked(key);
+
+ Address valueAddr(masm.getStackPointer(), ICStackValueOffset);
+
+ // We need to convert int32 values being stored into doubles. In this case
+ // the heap typeset is guaranteed to contain both int32 and double, so it's
+ // okay to store a double. Note that double arrays are only created by
+ // IonMonkey, so if we have no floating-point support Ion is disabled and
+ // there should be no double arrays.
+ if (cx->runtime()->jitSupportsFloatingPoint)
+ masm.convertInt32ValueToDouble(valueAddr, regs.getAny(), &noSpecialHandling);
+ else
+ masm.assumeUnreachable("There shouldn't be double arrays when there is no FP support.");
+
+ masm.bind(&noSpecialHandling);
+
+ ValueOperand tmpVal = regs.takeAnyValue();
+ masm.loadValue(valueAddr, tmpVal);
+ EmitPreBarrier(masm, element, MIRType::Value);
+ masm.storeValue(tmpVal, element);
+ } else {
+ // Set element on an unboxed array.
+
+ // Bounds check.
+ Address initLength(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength());
+ masm.load32(initLength, scratchReg);
+ masm.and32(Imm32(UnboxedArrayObject::InitializedLengthMask), scratchReg);
+ masm.branch32(Assembler::BelowOrEqual, scratchReg, key, &failure);
+
+ // Load obj->elements.
+ masm.loadPtr(Address(obj, UnboxedArrayObject::offsetOfElements()), scratchReg);
+
+ // Compute the address being written to.
+ BaseIndex address(scratchReg, key, ScaleFromElemWidth(UnboxedTypeSize(unboxedType_)));
+
+ EmitUnboxedPreBarrierForBaseline(masm, address, unboxedType_);
+
+ Address valueAddr(masm.getStackPointer(), ICStackValueOffset + sizeof(Value));
+ masm.Push(R0);
+ masm.loadValue(valueAddr, R0);
+ masm.storeUnboxedProperty(address, unboxedType_,
+ ConstantOrRegister(TypedOrValueRegister(R0)), &failurePopR0);
+ masm.Pop(R0);
+ }
+
+ EmitReturnFromIC(masm);
+
+ if (failurePopR0.used()) {
+ // Failure case: restore the value of R0
+ masm.bind(&failurePopR0);
+ masm.popValue(R0);
+ }
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// SetElem_DenseOrUnboxedArrayAdd
+//
+
+ICUpdatedStub*
+ICSetElemDenseOrUnboxedArrayAddCompiler::getStub(ICStubSpace* space)
+{
+ Rooted<ShapeVector> shapes(cx, ShapeVector(cx));
+ if (!shapes.append(obj_->maybeShape()))
+ return nullptr;
+
+ if (!GetProtoShapes(obj_, protoChainDepth_, &shapes))
+ return nullptr;
+
+ JS_STATIC_ASSERT(ICSetElem_DenseOrUnboxedArrayAdd::MAX_PROTO_CHAIN_DEPTH == 4);
+
+ ICUpdatedStub* stub = nullptr;
+ switch (protoChainDepth_) {
+ case 0: stub = getStubSpecific<0>(space, shapes); break;
+ case 1: stub = getStubSpecific<1>(space, shapes); break;
+ case 2: stub = getStubSpecific<2>(space, shapes); break;
+ case 3: stub = getStubSpecific<3>(space, shapes); break;
+ case 4: stub = getStubSpecific<4>(space, shapes); break;
+ default: MOZ_CRASH("ProtoChainDepth too high.");
+ }
+ if (!stub || !stub->initUpdatingChain(cx, space))
+ return nullptr;
+ return stub;
+}
+
+bool
+ICSetElemDenseOrUnboxedArrayAddCompiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ // R0 = object
+ // R1 = key
+ // Stack = { ... rhs-value, <return-addr>? }
+ Label failure, failurePopR0, failureUnstow;
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
+ Register scratchReg = regs.takeAny();
+
+ // Unbox R0 and guard on its group and, if this is a native access, its shape.
+ Register obj = masm.extractObject(R0, ExtractTemp0);
+ masm.loadPtr(Address(ICStubReg, ICSetElem_DenseOrUnboxedArrayAdd::offsetOfGroup()),
+ scratchReg);
+ masm.branchTestObjGroup(Assembler::NotEqual, obj, scratchReg, &failure);
+ if (unboxedType_ == JSVAL_TYPE_MAGIC) {
+ masm.loadPtr(Address(ICStubReg, ICSetElem_DenseOrUnboxedArrayAddImpl<0>::offsetOfShape(0)),
+ scratchReg);
+ masm.branchTestObjShape(Assembler::NotEqual, obj, scratchReg, &failure);
+ }
+
+ // Stow both R0 and R1 (object and key)
+ // But R0 and R1 still hold their values.
+ EmitStowICValues(masm, 2);
+
+ uint32_t framePushedAfterStow = masm.framePushed();
+
+ // We may need to free up some registers.
+ regs = availableGeneralRegs(0);
+ regs.take(R0);
+ regs.take(scratchReg);
+
+ // Shape guard objects on the proto chain.
+ Register protoReg = regs.takeAny();
+ for (size_t i = 0; i < protoChainDepth_; i++) {
+ masm.loadObjProto(i == 0 ? obj : protoReg, protoReg);
+ masm.branchTestPtr(Assembler::Zero, protoReg, protoReg, &failureUnstow);
+ masm.loadPtr(Address(ICStubReg, ICSetElem_DenseOrUnboxedArrayAddImpl<0>::offsetOfShape(i + 1)),
+ scratchReg);
+ masm.branchTestObjShape(Assembler::NotEqual, protoReg, scratchReg, &failureUnstow);
+ }
+ regs.add(protoReg);
+ regs.add(scratchReg);
+
+ if (needsUpdateStubs()) {
+ // Stack is now: { ..., rhs-value, object-value, key-value, maybe?-RET-ADDR }
+ // Load rhs-value in to R0
+ masm.loadValue(Address(masm.getStackPointer(), 2 * sizeof(Value) + ICStackValueOffset), R0);
+
+ // Call the type-update stub.
+ if (!callTypeUpdateIC(masm, sizeof(Value)))
+ return false;
+ }
+
+ // Unstow R0 and R1 (object and key)
+ EmitUnstowICValues(masm, 2);
+
+ // Restore object.
+ obj = masm.extractObject(R0, ExtractTemp0);
+
+ if (needsUpdateStubs()) {
+ // Trigger post barriers here on the value being written. Fields which
+ // objects can be written to also need update stubs.
+ masm.Push(R1);
+ masm.loadValue(Address(masm.getStackPointer(), sizeof(Value) + ICStackValueOffset), R1);
+
+ LiveGeneralRegisterSet saveRegs;
+ saveRegs.add(R0);
+ saveRegs.addUnchecked(obj);
+ saveRegs.add(ICStubReg);
+ emitPostWriteBarrierSlot(masm, obj, R1, scratchReg, saveRegs);
+
+ masm.Pop(R1);
+ }
+
+ // Reset register set.
+ regs = availableGeneralRegs(2);
+ scratchReg = regs.takeAny();
+
+ // Unbox key.
+ Register key = masm.extractInt32(R1, ExtractTemp1);
+
+ if (unboxedType_ == JSVAL_TYPE_MAGIC) {
+ // Adding element to a native object.
+
+ // Load obj->elements in scratchReg.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratchReg);
+
+ // Bounds check (key == initLength)
+ Address initLength(scratchReg, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::NotEqual, initLength, key, &failure);
+
+ // Capacity check.
+ Address capacity(scratchReg, ObjectElements::offsetOfCapacity());
+ masm.branch32(Assembler::BelowOrEqual, capacity, key, &failure);
+
+ // Check for copy on write elements.
+ Address elementsFlags(scratchReg, ObjectElements::offsetOfFlags());
+ masm.branchTest32(Assembler::NonZero, elementsFlags,
+ Imm32(ObjectElements::COPY_ON_WRITE |
+ ObjectElements::FROZEN),
+ &failure);
+
+ // Failure is not possible now. Free up registers.
+ regs.add(R0);
+ regs.add(R1);
+ regs.takeUnchecked(obj);
+ regs.takeUnchecked(key);
+
+ // Increment initLength before write.
+ masm.add32(Imm32(1), initLength);
+
+ // If length is now <= key, increment length before write.
+ Label skipIncrementLength;
+ Address length(scratchReg, ObjectElements::offsetOfLength());
+ masm.branch32(Assembler::Above, length, key, &skipIncrementLength);
+ masm.add32(Imm32(1), length);
+ masm.bind(&skipIncrementLength);
+
+ // Convert int32 values to double if convertDoubleElements is set. In this
+ // case the heap typeset is guaranteed to contain both int32 and double, so
+ // it's okay to store a double.
+ Label dontConvertDoubles;
+ masm.branchTest32(Assembler::Zero, elementsFlags,
+ Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS),
+ &dontConvertDoubles);
+
+ Address valueAddr(masm.getStackPointer(), ICStackValueOffset);
+
+ // Note that double arrays are only created by IonMonkey, so if we have no
+ // floating-point support Ion is disabled and there should be no double arrays.
+ if (cx->runtime()->jitSupportsFloatingPoint)
+ masm.convertInt32ValueToDouble(valueAddr, regs.getAny(), &dontConvertDoubles);
+ else
+ masm.assumeUnreachable("There shouldn't be double arrays when there is no FP support.");
+ masm.bind(&dontConvertDoubles);
+
+ // Write the value. No need for pre-barrier since we're not overwriting an old value.
+ ValueOperand tmpVal = regs.takeAnyValue();
+ BaseIndex element(scratchReg, key, TimesEight);
+ masm.loadValue(valueAddr, tmpVal);
+ masm.storeValue(tmpVal, element);
+ } else {
+ // Adding element to an unboxed array.
+
+ // Bounds check (key == initLength)
+ Address initLengthAddr(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength());
+ masm.load32(initLengthAddr, scratchReg);
+ masm.and32(Imm32(UnboxedArrayObject::InitializedLengthMask), scratchReg);
+ masm.branch32(Assembler::NotEqual, scratchReg, key, &failure);
+
+ // Capacity check.
+ masm.checkUnboxedArrayCapacity(obj, RegisterOrInt32Constant(key), scratchReg, &failure);
+
+ // Load obj->elements.
+ masm.loadPtr(Address(obj, UnboxedArrayObject::offsetOfElements()), scratchReg);
+
+ // Write the value first, since this can fail. No need for pre-barrier
+ // since we're not overwriting an old value.
+ masm.Push(R0);
+ Address valueAddr(masm.getStackPointer(), ICStackValueOffset + sizeof(Value));
+ masm.loadValue(valueAddr, R0);
+ BaseIndex address(scratchReg, key, ScaleFromElemWidth(UnboxedTypeSize(unboxedType_)));
+ masm.storeUnboxedProperty(address, unboxedType_,
+ ConstantOrRegister(TypedOrValueRegister(R0)), &failurePopR0);
+ masm.Pop(R0);
+
+ // Increment initialized length.
+ masm.add32(Imm32(1), initLengthAddr);
+
+ // If length is now <= key, increment length.
+ Address lengthAddr(obj, UnboxedArrayObject::offsetOfLength());
+ Label skipIncrementLength;
+ masm.branch32(Assembler::Above, lengthAddr, key, &skipIncrementLength);
+ masm.add32(Imm32(1), lengthAddr);
+ masm.bind(&skipIncrementLength);
+ }
+
+ EmitReturnFromIC(masm);
+
+ if (failurePopR0.used()) {
+ // Failure case: restore the value of R0
+ masm.bind(&failurePopR0);
+ masm.popValue(R0);
+ masm.jump(&failure);
+ }
+
+ // Failure case - fail but first unstow R0 and R1
+ masm.bind(&failureUnstow);
+ masm.setFramePushed(framePushedAfterStow);
+ EmitUnstowICValues(masm, 2);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// SetElem_TypedArray
+//
+
+// Write an arbitrary value to a typed array or typed object address at dest.
+// If the value could not be converted to the appropriate format, jump to
+// failure or failureModifiedScratch.
+template <typename T>
+static void
+StoreToTypedArray(JSContext* cx, MacroAssembler& masm, Scalar::Type type, Address value, T dest,
+ Register scratch, Label* failure, Label* failureModifiedScratch)
+{
+ Label done;
+
+ if (type == Scalar::Float32 || type == Scalar::Float64) {
+ masm.ensureDouble(value, FloatReg0, failure);
+ if (type == Scalar::Float32) {
+ masm.convertDoubleToFloat32(FloatReg0, ScratchFloat32Reg);
+ masm.storeToTypedFloatArray(type, ScratchFloat32Reg, dest);
+ } else {
+ masm.storeToTypedFloatArray(type, FloatReg0, dest);
+ }
+ } else if (type == Scalar::Uint8Clamped) {
+ Label notInt32;
+ masm.branchTestInt32(Assembler::NotEqual, value, &notInt32);
+ masm.unboxInt32(value, scratch);
+ masm.clampIntToUint8(scratch);
+
+ Label clamped;
+ masm.bind(&clamped);
+ masm.storeToTypedIntArray(type, scratch, dest);
+ masm.jump(&done);
+
+ // If the value is a double, clamp to uint8 and jump back.
+ // Else, jump to failure.
+ masm.bind(&notInt32);
+ if (cx->runtime()->jitSupportsFloatingPoint) {
+ masm.branchTestDouble(Assembler::NotEqual, value, failure);
+ masm.unboxDouble(value, FloatReg0);
+ masm.clampDoubleToUint8(FloatReg0, scratch);
+ masm.jump(&clamped);
+ } else {
+ masm.jump(failure);
+ }
+ } else {
+ Label notInt32;
+ masm.branchTestInt32(Assembler::NotEqual, value, &notInt32);
+ masm.unboxInt32(value, scratch);
+
+ Label isInt32;
+ masm.bind(&isInt32);
+ masm.storeToTypedIntArray(type, scratch, dest);
+ masm.jump(&done);
+
+ // If the value is a double, truncate and jump back.
+ // Else, jump to failure.
+ masm.bind(&notInt32);
+ if (cx->runtime()->jitSupportsFloatingPoint) {
+ masm.branchTestDouble(Assembler::NotEqual, value, failure);
+ masm.unboxDouble(value, FloatReg0);
+ masm.branchTruncateDoubleMaybeModUint32(FloatReg0, scratch, failureModifiedScratch);
+ masm.jump(&isInt32);
+ } else {
+ masm.jump(failure);
+ }
+ }
+
+ masm.bind(&done);
+}
+
+bool
+ICSetElem_TypedArray::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+
+ if (layout_ != Layout_TypedArray)
+ CheckForTypedObjectWithDetachedStorage(cx, masm, &failure);
+
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
+ Register scratchReg = regs.takeAny();
+
+ // Unbox R0 and shape guard.
+ Register obj = masm.extractObject(R0, ExtractTemp0);
+ masm.loadPtr(Address(ICStubReg, ICSetElem_TypedArray::offsetOfShape()), scratchReg);
+ masm.branchTestObjShape(Assembler::NotEqual, obj, scratchReg, &failure);
+
+ // Ensure the index is an integer.
+ if (cx->runtime()->jitSupportsFloatingPoint) {
+ Label isInt32;
+ masm.branchTestInt32(Assembler::Equal, R1, &isInt32);
+ {
+ // If the index is a double, try to convert it to int32. It's okay
+ // to convert -0 to 0: the shape check ensures the object is a typed
+ // array so the difference is not observable.
+ masm.branchTestDouble(Assembler::NotEqual, R1, &failure);
+ masm.unboxDouble(R1, FloatReg0);
+ masm.convertDoubleToInt32(FloatReg0, scratchReg, &failure, /* negZeroCheck = */false);
+ masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R1);
+ }
+ masm.bind(&isInt32);
+ } else {
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+ }
+
+ // Unbox key.
+ Register key = masm.extractInt32(R1, ExtractTemp1);
+
+ // Bounds check.
+ Label oobWrite;
+ LoadTypedThingLength(masm, layout_, obj, scratchReg);
+ masm.branch32(Assembler::BelowOrEqual, scratchReg, key,
+ expectOutOfBounds_ ? &oobWrite : &failure);
+
+ // Load the elements vector.
+ LoadTypedThingData(masm, layout_, obj, scratchReg);
+
+ BaseIndex dest(scratchReg, key, ScaleFromElemWidth(Scalar::byteSize(type_)));
+ Address value(masm.getStackPointer(), ICStackValueOffset);
+
+ // We need a second scratch register. It's okay to clobber the type tag of
+ // R0 or R1, as long as it's restored before jumping to the next stub.
+ regs = availableGeneralRegs(0);
+ regs.takeUnchecked(obj);
+ regs.takeUnchecked(key);
+ regs.take(scratchReg);
+ Register secondScratch = regs.takeAny();
+
+ Label failureModifiedSecondScratch;
+ StoreToTypedArray(cx, masm, type_, value, dest,
+ secondScratch, &failure, &failureModifiedSecondScratch);
+ EmitReturnFromIC(masm);
+
+ if (failureModifiedSecondScratch.used()) {
+ // Writing to secondScratch may have clobbered R0 or R1, restore them
+ // first.
+ masm.bind(&failureModifiedSecondScratch);
+ masm.tagValue(JSVAL_TYPE_OBJECT, obj, R0);
+ masm.tagValue(JSVAL_TYPE_INT32, key, R1);
+ }
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ if (expectOutOfBounds_) {
+ MOZ_ASSERT(layout_ == Layout_TypedArray);
+ masm.bind(&oobWrite);
+ EmitReturnFromIC(masm);
+ }
+ return true;
+}
+
+//
+// In_Fallback
+//
+
+static bool
+TryAttachDenseInStub(JSContext* cx, HandleScript outerScript, ICIn_Fallback* stub,
+ HandleValue key, HandleObject obj, bool* attached)
+{
+ MOZ_ASSERT(!*attached);
+
+ if (!IsNativeDenseElementAccess(obj, key))
+ return true;
+
+ JitSpew(JitSpew_BaselineIC, " Generating In(Native[Int32] dense) stub");
+ ICIn_Dense::Compiler compiler(cx, obj->as<NativeObject>().lastProperty());
+ ICStub* denseStub = compiler.getStub(compiler.getStubSpace(outerScript));
+ if (!denseStub)
+ return false;
+
+ *attached = true;
+ stub->addNewStub(denseStub);
+ return true;
+}
+
+static bool
+TryAttachNativeInStub(JSContext* cx, HandleScript outerScript, ICIn_Fallback* stub,
+ HandleValue key, HandleObject obj, bool* attached)
+{
+ MOZ_ASSERT(!*attached);
+
+ RootedId id(cx);
+ if (!IsOptimizableElementPropertyName(cx, key, &id))
+ return true;
+
+ RootedPropertyName name(cx, JSID_TO_ATOM(id)->asPropertyName());
+ RootedShape shape(cx);
+ RootedObject holder(cx);
+ if (!EffectlesslyLookupProperty(cx, obj, id, &holder, &shape))
+ return false;
+
+ if (IsCacheableGetPropReadSlot(obj, holder, shape)) {
+ ICStub::Kind kind = (obj == holder) ? ICStub::In_Native
+ : ICStub::In_NativePrototype;
+ JitSpew(JitSpew_BaselineIC, " Generating In(Native %s) stub",
+ (obj == holder) ? "direct" : "prototype");
+ ICInNativeCompiler compiler(cx, kind, obj, holder, name);
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(outerScript));
+ if (!newStub)
+ return false;
+
+ *attached = true;
+ stub->addNewStub(newStub);
+ return true;
+ }
+
+ return true;
+}
+
+static bool
+TryAttachNativeInDoesNotExistStub(JSContext* cx, HandleScript outerScript,
+ ICIn_Fallback* stub, HandleValue key,
+ HandleObject obj, bool* attached)
+{
+ MOZ_ASSERT(!*attached);
+
+ RootedId id(cx);
+ if (!IsOptimizableElementPropertyName(cx, key, &id))
+ return true;
+
+ // Check if does-not-exist can be confirmed on property.
+ RootedPropertyName name(cx, JSID_TO_ATOM(id)->asPropertyName());
+ RootedObject lastProto(cx);
+ size_t protoChainDepth = SIZE_MAX;
+ if (!CheckHasNoSuchProperty(cx, obj.get(), name.get(), lastProto.address(), &protoChainDepth))
+ return true;
+ MOZ_ASSERT(protoChainDepth < SIZE_MAX);
+
+ if (protoChainDepth > ICIn_NativeDoesNotExist::MAX_PROTO_CHAIN_DEPTH)
+ return true;
+
+ // Confirmed no-such-property. Add stub.
+ JitSpew(JitSpew_BaselineIC, " Generating In_NativeDoesNotExist stub");
+ ICInNativeDoesNotExistCompiler compiler(cx, obj, name, protoChainDepth);
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(outerScript));
+ if (!newStub)
+ return false;
+
+ *attached = true;
+ stub->addNewStub(newStub);
+ return true;
+}
+
+static bool
+DoInFallback(JSContext* cx, BaselineFrame* frame, ICIn_Fallback* stub_,
+ HandleValue key, HandleValue objValue, MutableHandleValue res)
+{
+ // This fallback stub may trigger debug mode toggling.
+ DebugModeOSRVolatileStub<ICIn_Fallback*> stub(frame, stub_);
+
+ FallbackICSpew(cx, stub, "In");
+
+ if (!objValue.isObject()) {
+ ReportValueError(cx, JSMSG_IN_NOT_OBJECT, -1, objValue, nullptr);
+ return false;
+ }
+
+ RootedObject obj(cx, &objValue.toObject());
+
+ bool cond = false;
+ if (!OperatorIn(cx, key, obj, &cond))
+ return false;
+ res.setBoolean(cond);
+
+ if (stub.invalid())
+ return true;
+
+ if (stub->numOptimizedStubs() >= ICIn_Fallback::MAX_OPTIMIZED_STUBS)
+ return true;
+
+ if (obj->isNative()) {
+ RootedScript script(cx, frame->script());
+ bool attached = false;
+ if (cond) {
+ if (!TryAttachDenseInStub(cx, script, stub, key, obj, &attached))
+ return false;
+ if (attached)
+ return true;
+ if (!TryAttachNativeInStub(cx, script, stub, key, obj, &attached))
+ return false;
+ if (attached)
+ return true;
+ } else {
+ if (!TryAttachNativeInDoesNotExistStub(cx, script, stub, key, obj, &attached))
+ return false;
+ if (attached)
+ return true;
+ }
+ }
+
+ return true;
+}
+
+typedef bool (*DoInFallbackFn)(JSContext*, BaselineFrame*, ICIn_Fallback*, HandleValue,
+ HandleValue, MutableHandleValue);
+static const VMFunction DoInFallbackInfo =
+ FunctionInfo<DoInFallbackFn>(DoInFallback, "DoInFallback", TailCall, PopValues(2));
+
+bool
+ICIn_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ EmitRestoreTailCallReg(masm);
+
+ // Sync for the decompiler.
+ masm.pushValue(R0);
+ masm.pushValue(R1);
+
+ // Push arguments.
+ masm.pushValue(R1);
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ return tailCallVM(DoInFallbackInfo, masm);
+}
+
+bool
+ICInNativeCompiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure, failurePopR0Scratch;
+
+ masm.branchTestString(Assembler::NotEqual, R0, &failure);
+ masm.branchTestObject(Assembler::NotEqual, R1, &failure);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
+ Register scratch = regs.takeAny();
+
+ // Check key identity.
+ Register strExtract = masm.extractString(R0, ExtractTemp0);
+ masm.loadPtr(Address(ICStubReg, ICInNativeStub::offsetOfName()), scratch);
+ masm.branchPtr(Assembler::NotEqual, strExtract, scratch, &failure);
+
+ // Unbox and shape guard object.
+ Register objReg = masm.extractObject(R1, ExtractTemp0);
+ masm.loadPtr(Address(ICStubReg, ICInNativeStub::offsetOfShape()), scratch);
+ masm.branchTestObjShape(Assembler::NotEqual, objReg, scratch, &failure);
+
+ if (kind == ICStub::In_NativePrototype) {
+ // Shape guard holder. Use R0 scrachReg since on x86 there're not enough registers.
+ Register holderReg = R0.scratchReg();
+ masm.push(R0.scratchReg());
+ masm.loadPtr(Address(ICStubReg, ICIn_NativePrototype::offsetOfHolder()),
+ holderReg);
+ masm.loadPtr(Address(ICStubReg, ICIn_NativePrototype::offsetOfHolderShape()),
+ scratch);
+ masm.branchTestObjShape(Assembler::NotEqual, holderReg, scratch, &failurePopR0Scratch);
+ masm.addToStackPtr(Imm32(sizeof(size_t)));
+ }
+
+ masm.moveValue(BooleanValue(true), R0);
+
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failurePopR0Scratch);
+ masm.pop(R0.scratchReg());
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+ICStub*
+ICInNativeDoesNotExistCompiler::getStub(ICStubSpace* space)
+{
+ Rooted<ShapeVector> shapes(cx, ShapeVector(cx));
+ if (!shapes.append(obj_->as<NativeObject>().lastProperty()))
+ return nullptr;
+
+ if (!GetProtoShapes(obj_, protoChainDepth_, &shapes))
+ return nullptr;
+
+ JS_STATIC_ASSERT(ICIn_NativeDoesNotExist::MAX_PROTO_CHAIN_DEPTH == 8);
+
+ ICStub* stub = nullptr;
+ switch (protoChainDepth_) {
+ case 0: stub = getStubSpecific<0>(space, shapes); break;
+ case 1: stub = getStubSpecific<1>(space, shapes); break;
+ case 2: stub = getStubSpecific<2>(space, shapes); break;
+ case 3: stub = getStubSpecific<3>(space, shapes); break;
+ case 4: stub = getStubSpecific<4>(space, shapes); break;
+ case 5: stub = getStubSpecific<5>(space, shapes); break;
+ case 6: stub = getStubSpecific<6>(space, shapes); break;
+ case 7: stub = getStubSpecific<7>(space, shapes); break;
+ case 8: stub = getStubSpecific<8>(space, shapes); break;
+ default: MOZ_CRASH("ProtoChainDepth too high.");
+ }
+ if (!stub)
+ return nullptr;
+ return stub;
+}
+
+bool
+ICInNativeDoesNotExistCompiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure, failurePopR0Scratch;
+
+ masm.branchTestString(Assembler::NotEqual, R0, &failure);
+ masm.branchTestObject(Assembler::NotEqual, R1, &failure);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
+ Register scratch = regs.takeAny();
+
+#ifdef DEBUG
+ // Ensure that protoChainDepth_ matches the protoChainDepth stored on the stub.
+ {
+ Label ok;
+ masm.load16ZeroExtend(Address(ICStubReg, ICStub::offsetOfExtra()), scratch);
+ masm.branch32(Assembler::Equal, scratch, Imm32(protoChainDepth_), &ok);
+ masm.assumeUnreachable("Non-matching proto chain depth on stub.");
+ masm.bind(&ok);
+ }
+#endif // DEBUG
+
+ // Check key identity.
+ Register strExtract = masm.extractString(R0, ExtractTemp0);
+ masm.loadPtr(Address(ICStubReg, ICIn_NativeDoesNotExist::offsetOfName()), scratch);
+ masm.branchPtr(Assembler::NotEqual, strExtract, scratch, &failure);
+
+ // Unbox and guard against old shape.
+ Register objReg = masm.extractObject(R1, ExtractTemp0);
+ masm.loadPtr(Address(ICStubReg, ICIn_NativeDoesNotExist::offsetOfShape(0)),
+ scratch);
+ masm.branchTestObjShape(Assembler::NotEqual, objReg, scratch, &failure);
+
+ // Check the proto chain.
+ Register protoReg = R0.scratchReg();
+ masm.push(R0.scratchReg());
+ for (size_t i = 0; i < protoChainDepth_; ++i) {
+ masm.loadObjProto(i == 0 ? objReg : protoReg, protoReg);
+ masm.branchTestPtr(Assembler::Zero, protoReg, protoReg, &failurePopR0Scratch);
+ size_t shapeOffset = ICIn_NativeDoesNotExistImpl<0>::offsetOfShape(i + 1);
+ masm.loadPtr(Address(ICStubReg, shapeOffset), scratch);
+ masm.branchTestObjShape(Assembler::NotEqual, protoReg, scratch, &failurePopR0Scratch);
+ }
+ masm.addToStackPtr(Imm32(sizeof(size_t)));
+
+ // Shape and type checks succeeded, ok to proceed.
+ masm.moveValue(BooleanValue(false), R0);
+
+ EmitReturnFromIC(masm);
+
+ masm.bind(&failurePopR0Scratch);
+ masm.pop(R0.scratchReg());
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+bool
+ICIn_Dense::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestObject(Assembler::NotEqual, R1, &failure);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
+ Register scratch = regs.takeAny();
+
+ // Unbox and shape guard object.
+ Register obj = masm.extractObject(R1, ExtractTemp0);
+ masm.loadPtr(Address(ICStubReg, ICIn_Dense::offsetOfShape()), scratch);
+ masm.branchTestObjShape(Assembler::NotEqual, obj, scratch, &failure);
+
+ // Load obj->elements.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
+
+ // Unbox key and bounds check.
+ Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
+ Register key = masm.extractInt32(R0, ExtractTemp0);
+ masm.branch32(Assembler::BelowOrEqual, initLength, key, &failure);
+
+ // Hole check.
+ JS_STATIC_ASSERT(sizeof(Value) == 8);
+ BaseIndex element(scratch, key, TimesEight);
+ masm.branchTestMagic(Assembler::Equal, element, &failure);
+
+ masm.moveValue(BooleanValue(true), R0);
+
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+// Try to update existing SetProp setter call stubs for the given holder in
+// place with a new shape and setter.
+static bool
+UpdateExistingSetPropCallStubs(ICSetProp_Fallback* fallbackStub,
+ ICStub::Kind kind,
+ NativeObject* holder,
+ JSObject* receiver,
+ JSFunction* setter)
+{
+ MOZ_ASSERT(kind == ICStub::SetProp_CallScripted ||
+ kind == ICStub::SetProp_CallNative);
+ MOZ_ASSERT(holder);
+ MOZ_ASSERT(receiver);
+
+ bool isOwnSetter = (holder == receiver);
+ bool foundMatchingStub = false;
+ ReceiverGuard receiverGuard(receiver);
+ for (ICStubConstIterator iter = fallbackStub->beginChainConst(); !iter.atEnd(); iter++) {
+ if (iter->kind() == kind) {
+ ICSetPropCallSetter* setPropStub = static_cast<ICSetPropCallSetter*>(*iter);
+ if (setPropStub->holder() == holder && setPropStub->isOwnSetter() == isOwnSetter) {
+ // If this is an own setter, update the receiver guard as well,
+ // since that's the shape we'll be guarding on. Furthermore,
+ // isOwnSetter() relies on holderShape_ and receiverGuard_ being
+ // the same shape.
+ if (isOwnSetter)
+ setPropStub->receiverGuard().update(receiverGuard);
+
+ MOZ_ASSERT(setPropStub->holderShape() != holder->lastProperty() ||
+ !setPropStub->receiverGuard().matches(receiverGuard),
+ "Why didn't we end up using this stub?");
+
+ // We want to update the holder shape to match the new one no
+ // matter what, even if the receiver shape is different.
+ setPropStub->holderShape() = holder->lastProperty();
+
+ // Make sure to update the setter, since a shape change might
+ // have changed which setter we want to use.
+ setPropStub->setter() = setter;
+ if (setPropStub->receiverGuard().matches(receiverGuard))
+ foundMatchingStub = true;
+ }
+ }
+ }
+
+ return foundMatchingStub;
+}
+
+// Attach an optimized stub for a GETGNAME/CALLGNAME slot-read op.
+static bool
+TryAttachGlobalNameValueStub(JSContext* cx, HandleScript script, jsbytecode* pc,
+ ICGetName_Fallback* stub,
+ Handle<LexicalEnvironmentObject*> globalLexical,
+ HandlePropertyName name, bool* attached)
+{
+ MOZ_ASSERT(globalLexical->isGlobal());
+ MOZ_ASSERT(!*attached);
+
+ RootedId id(cx, NameToId(name));
+
+ // The property must be found, and it must be found as a normal data property.
+ RootedShape shape(cx, globalLexical->lookup(cx, id));
+ RootedNativeObject current(cx, globalLexical);
+ while (true) {
+ shape = current->lookup(cx, id);
+ if (shape)
+ break;
+ if (current == globalLexical) {
+ current = &globalLexical->global();
+ } else {
+ JSObject* proto = current->staticPrototype();
+ if (!proto || !proto->is<NativeObject>())
+ return true;
+ current = &proto->as<NativeObject>();
+ }
+ }
+
+ // Instantiate this global property, for use during Ion compilation.
+ if (IsIonEnabled(cx))
+ EnsureTrackPropertyTypes(cx, current, id);
+
+ if (shape->hasDefaultGetter() && shape->hasSlot()) {
+
+ // TODO: if there's a previous stub discard it, or just update its Shape + slot?
+
+ ICStub* monitorStub = stub->fallbackMonitorStub()->firstMonitorStub();
+ ICStub* newStub;
+ if (current == globalLexical) {
+ MOZ_ASSERT(shape->slot() >= current->numFixedSlots());
+ uint32_t slot = shape->slot() - current->numFixedSlots();
+
+ JitSpew(JitSpew_BaselineIC, " Generating GetName(GlobalName lexical) stub");
+ ICGetName_GlobalLexical::Compiler compiler(cx, monitorStub, slot);
+ newStub = compiler.getStub(compiler.getStubSpace(script));
+ } else {
+ bool isFixedSlot;
+ uint32_t offset;
+ GetFixedOrDynamicSlotOffset(shape, &isFixedSlot, &offset);
+
+ // Check the prototype chain from the global to the current
+ // prototype. Ignore the global lexical scope as it doesn' figure
+ // into the prototype chain. We guard on the global lexical
+ // scope's shape independently.
+ if (!IsCacheableGetPropReadSlot(&globalLexical->global(), current, shape))
+ return true;
+
+ JitSpew(JitSpew_BaselineIC, " Generating GetName(GlobalName non-lexical) stub");
+ ICGetPropNativeCompiler compiler(cx, ICStub::GetName_Global,
+ ICStubCompiler::Engine::Baseline, monitorStub,
+ globalLexical, current, name, isFixedSlot, offset,
+ /* inputDefinitelyObject = */ true);
+ newStub = compiler.getStub(compiler.getStubSpace(script));
+ }
+ if (!newStub)
+ return false;
+
+ stub->addNewStub(newStub);
+ *attached = true;
+ }
+ return true;
+}
+
+// Attach an optimized stub for a GETGNAME/CALLGNAME getter op.
+static bool
+TryAttachGlobalNameAccessorStub(JSContext* cx, HandleScript script, jsbytecode* pc,
+ ICGetName_Fallback* stub,
+ Handle<LexicalEnvironmentObject*> globalLexical,
+ HandlePropertyName name, bool* attached,
+ bool* isTemporarilyUnoptimizable)
+{
+ MOZ_ASSERT(globalLexical->isGlobal());
+ RootedId id(cx, NameToId(name));
+
+ // There must not be a shadowing binding on the global lexical scope.
+ if (globalLexical->lookup(cx, id))
+ return true;
+
+ RootedGlobalObject global(cx, &globalLexical->global());
+
+ // The property must be found, and it must be found as a normal data property.
+ RootedShape shape(cx);
+ RootedNativeObject current(cx, global);
+ while (true) {
+ shape = current->lookup(cx, id);
+ if (shape)
+ break;
+ JSObject* proto = current->staticPrototype();
+ if (!proto || !proto->is<NativeObject>())
+ return true;
+ current = &proto->as<NativeObject>();
+ }
+
+ // Instantiate this global property, for use during Ion compilation.
+ if (IsIonEnabled(cx))
+ EnsureTrackPropertyTypes(cx, current, id);
+
+ // Try to add a getter stub. We don't handle scripted getters yet; if this
+ // changes we need to make sure IonBuilder::getPropTryCommonGetter (which
+ // requires a Baseline stub) handles non-outerized this objects correctly.
+ bool isScripted;
+ if (IsCacheableGetPropCall(cx, global, current, shape, &isScripted, isTemporarilyUnoptimizable) &&
+ !isScripted)
+ {
+ ICStub* monitorStub = stub->fallbackMonitorStub()->firstMonitorStub();
+ RootedFunction getter(cx, &shape->getterObject()->as<JSFunction>());
+
+ // The CallNativeGlobal stub needs to generate 3 shape checks:
+ //
+ // 1. The global lexical scope shape check.
+ // 2. The global object shape check.
+ // 3. The holder shape check.
+ //
+ // 1 is done as the receiver check, as for GETNAME the global lexical scope is in the
+ // receiver position. 2 is done as a manual check that other GetProp stubs don't do. 3 is
+ // done as the holder check per normal.
+ //
+ // In the case the holder is the global object, check 2 is redundant but is not yet
+ // optimized away.
+ JitSpew(JitSpew_BaselineIC, " Generating GetName(GlobalName/NativeGetter) stub");
+ if (UpdateExistingGetPropCallStubs(stub, ICStub::GetProp_CallNativeGlobal, current,
+ globalLexical, getter))
+ {
+ *attached = true;
+ return true;
+ }
+ ICGetPropCallNativeCompiler compiler(cx, ICStub::GetProp_CallNativeGlobal,
+ ICStubCompiler::Engine::Baseline,
+ monitorStub, globalLexical, current,
+ getter, script->pcToOffset(pc),
+ /* outerClass = */ nullptr,
+ /* inputDefinitelyObject = */ true);
+
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+
+ stub->addNewStub(newStub);
+ *attached = true;
+ }
+ return true;
+}
+
+static bool
+TryAttachEnvNameStub(JSContext* cx, HandleScript script, ICGetName_Fallback* stub,
+ HandleObject initialEnvChain, HandlePropertyName name, bool* attached)
+{
+ MOZ_ASSERT(!*attached);
+
+ Rooted<ShapeVector> shapes(cx, ShapeVector(cx));
+ RootedId id(cx, NameToId(name));
+ RootedObject envChain(cx, initialEnvChain);
+
+ Shape* shape = nullptr;
+ while (envChain) {
+ if (!shapes.append(envChain->maybeShape()))
+ return false;
+
+ if (envChain->is<GlobalObject>()) {
+ shape = envChain->as<GlobalObject>().lookup(cx, id);
+ if (shape)
+ break;
+ return true;
+ }
+
+ if (!envChain->is<EnvironmentObject>() || envChain->is<WithEnvironmentObject>())
+ return true;
+
+ // Check for an 'own' property on the env. There is no need to
+ // check the prototype as non-with scopes do not inherit properties
+ // from any prototype.
+ shape = envChain->as<NativeObject>().lookup(cx, id);
+ if (shape)
+ break;
+
+ envChain = envChain->enclosingEnvironment();
+ }
+
+ // We don't handle getters here. When this changes, we need to make sure
+ // IonBuilder::getPropTryCommonGetter (which requires a Baseline stub to
+ // work) handles non-outerized this objects correctly.
+
+ if (!IsCacheableGetPropReadSlot(envChain, envChain, shape))
+ return true;
+
+ bool isFixedSlot;
+ uint32_t offset;
+ GetFixedOrDynamicSlotOffset(shape, &isFixedSlot, &offset);
+
+ ICStub* monitorStub = stub->fallbackMonitorStub()->firstMonitorStub();
+ ICStub* newStub;
+
+ switch (shapes.length()) {
+ case 1: {
+ ICGetName_Env<0>::Compiler compiler(cx, monitorStub, Move(shapes.get()), isFixedSlot,
+ offset);
+ newStub = compiler.getStub(compiler.getStubSpace(script));
+ break;
+ }
+ case 2: {
+ ICGetName_Env<1>::Compiler compiler(cx, monitorStub, Move(shapes.get()), isFixedSlot,
+ offset);
+ newStub = compiler.getStub(compiler.getStubSpace(script));
+ break;
+ }
+ case 3: {
+ ICGetName_Env<2>::Compiler compiler(cx, monitorStub, Move(shapes.get()), isFixedSlot,
+ offset);
+ newStub = compiler.getStub(compiler.getStubSpace(script));
+ break;
+ }
+ case 4: {
+ ICGetName_Env<3>::Compiler compiler(cx, monitorStub, Move(shapes.get()), isFixedSlot,
+ offset);
+ newStub = compiler.getStub(compiler.getStubSpace(script));
+ break;
+ }
+ case 5: {
+ ICGetName_Env<4>::Compiler compiler(cx, monitorStub, Move(shapes.get()), isFixedSlot,
+ offset);
+ newStub = compiler.getStub(compiler.getStubSpace(script));
+ break;
+ }
+ case 6: {
+ ICGetName_Env<5>::Compiler compiler(cx, monitorStub, Move(shapes.get()), isFixedSlot,
+ offset);
+ newStub = compiler.getStub(compiler.getStubSpace(script));
+ break;
+ }
+ case 7: {
+ ICGetName_Env<6>::Compiler compiler(cx, monitorStub, Move(shapes.get()), isFixedSlot,
+ offset);
+ newStub = compiler.getStub(compiler.getStubSpace(script));
+ break;
+ }
+ default:
+ return true;
+ }
+
+ if (!newStub)
+ return false;
+
+ stub->addNewStub(newStub);
+ *attached = true;
+ return true;
+}
+
+static bool
+DoGetNameFallback(JSContext* cx, BaselineFrame* frame, ICGetName_Fallback* stub_,
+ HandleObject envChain, MutableHandleValue res)
+{
+ SharedStubInfo info(cx, frame, stub_->icEntry());
+
+ // This fallback stub may trigger debug mode toggling.
+ DebugModeOSRVolatileStub<ICGetName_Fallback*> stub(frame, stub_);
+
+ RootedScript script(cx, frame->script());
+ jsbytecode* pc = stub->icEntry()->pc(script);
+ mozilla::DebugOnly<JSOp> op = JSOp(*pc);
+ FallbackICSpew(cx, stub, "GetName(%s)", CodeName[JSOp(*pc)]);
+
+ MOZ_ASSERT(op == JSOP_GETNAME || op == JSOP_GETGNAME);
+
+ RootedPropertyName name(cx, script->getName(pc));
+ bool attached = false;
+ bool isTemporarilyUnoptimizable = false;
+
+ // Attach new stub.
+ if (stub->numOptimizedStubs() >= ICGetName_Fallback::MAX_OPTIMIZED_STUBS) {
+ // TODO: Discard all stubs in this IC and replace with generic stub.
+ attached = true;
+ }
+
+ if (!attached && IsGlobalOp(JSOp(*pc)) && !script->hasNonSyntacticScope()) {
+ if (!TryAttachGlobalNameAccessorStub(cx, script, pc, stub,
+ envChain.as<LexicalEnvironmentObject>(),
+ name, &attached, &isTemporarilyUnoptimizable))
+ {
+ return false;
+ }
+ }
+
+ static_assert(JSOP_GETGNAME_LENGTH == JSOP_GETNAME_LENGTH,
+ "Otherwise our check for JSOP_TYPEOF isn't ok");
+ if (JSOp(pc[JSOP_GETGNAME_LENGTH]) == JSOP_TYPEOF) {
+ if (!GetEnvironmentNameForTypeOf(cx, envChain, name, res))
+ return false;
+ } else {
+ if (!GetEnvironmentName(cx, envChain, name, res))
+ return false;
+ }
+
+ TypeScript::Monitor(cx, script, pc, res);
+
+ // Check if debug mode toggling made the stub invalid.
+ if (stub.invalid())
+ return true;
+
+ // Add a type monitor stub for the resulting value.
+ if (!stub->addMonitorStubForValue(cx, &info, res))
+ return false;
+ if (attached)
+ return true;
+
+ if (IsGlobalOp(JSOp(*pc)) && !script->hasNonSyntacticScope()) {
+ Handle<LexicalEnvironmentObject*> globalLexical = envChain.as<LexicalEnvironmentObject>();
+ if (!TryAttachGlobalNameValueStub(cx, script, pc, stub, globalLexical, name, &attached))
+ return false;
+ } else {
+ if (!TryAttachEnvNameStub(cx, script, stub, envChain, name, &attached))
+ return false;
+ }
+
+ if (!attached && !isTemporarilyUnoptimizable)
+ stub->noteUnoptimizableAccess();
+ return true;
+}
+
+typedef bool (*DoGetNameFallbackFn)(JSContext*, BaselineFrame*, ICGetName_Fallback*,
+ HandleObject, MutableHandleValue);
+static const VMFunction DoGetNameFallbackInfo =
+ FunctionInfo<DoGetNameFallbackFn>(DoGetNameFallback, "DoGetNameFallback", TailCall);
+
+bool
+ICGetName_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+ MOZ_ASSERT(R0 == JSReturnOperand);
+
+ EmitRestoreTailCallReg(masm);
+
+ masm.push(R0.scratchReg());
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ return tailCallVM(DoGetNameFallbackInfo, masm);
+}
+
+bool
+ICGetName_GlobalLexical::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ Register obj = R0.scratchReg();
+ Register scratch = R1.scratchReg();
+
+ // There's no need to guard on the shape. Lexical bindings are
+ // non-configurable, and this stub cannot be shared across globals.
+
+ // Load dynamic slot.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), obj);
+ masm.load32(Address(ICStubReg, ICGetName_GlobalLexical::offsetOfSlot()), scratch);
+ masm.loadValue(BaseIndex(obj, scratch, TimesEight), R0);
+
+ // Enter type monitor IC to type-check result.
+ EmitEnterTypeMonitorIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+template <size_t NumHops>
+bool
+ICGetName_Env<NumHops>::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(1));
+ Register obj = R0.scratchReg();
+ Register walker = regs.takeAny();
+ Register scratch = regs.takeAny();
+
+ // Use a local to silence Clang tautological-compare warning if NumHops is 0.
+ size_t numHops = NumHops;
+
+ for (size_t index = 0; index < NumHops + 1; index++) {
+ Register scope = index ? walker : obj;
+
+ // Shape guard.
+ masm.loadPtr(Address(ICStubReg, ICGetName_Env::offsetOfShape(index)), scratch);
+ masm.branchTestObjShape(Assembler::NotEqual, scope, scratch, &failure);
+
+ if (index < numHops) {
+ masm.extractObject(Address(scope, EnvironmentObject::offsetOfEnclosingEnvironment()),
+ walker);
+ }
+ }
+
+ Register scope = NumHops ? walker : obj;
+
+ if (!isFixedSlot_) {
+ masm.loadPtr(Address(scope, NativeObject::offsetOfSlots()), walker);
+ scope = walker;
+ }
+
+ masm.load32(Address(ICStubReg, ICGetName_Env::offsetOfOffset()), scratch);
+
+ // GETNAME needs to check for uninitialized lexicals.
+ BaseIndex slot(scope, scratch, TimesOne);
+ masm.branchTestMagic(Assembler::Equal, slot, &failure);
+ masm.loadValue(slot, R0);
+
+ // Enter type monitor IC to type-check result.
+ EmitEnterTypeMonitorIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// BindName_Fallback
+//
+
+static bool
+DoBindNameFallback(JSContext* cx, BaselineFrame* frame, ICBindName_Fallback* stub,
+ HandleObject envChain, MutableHandleValue res)
+{
+ jsbytecode* pc = stub->icEntry()->pc(frame->script());
+ mozilla::DebugOnly<JSOp> op = JSOp(*pc);
+ FallbackICSpew(cx, stub, "BindName(%s)", CodeName[JSOp(*pc)]);
+
+ MOZ_ASSERT(op == JSOP_BINDNAME || op == JSOP_BINDGNAME);
+
+ RootedPropertyName name(cx, frame->script()->getName(pc));
+
+ RootedObject scope(cx);
+ if (!LookupNameUnqualified(cx, name, envChain, &scope))
+ return false;
+
+ res.setObject(*scope);
+ return true;
+}
+
+typedef bool (*DoBindNameFallbackFn)(JSContext*, BaselineFrame*, ICBindName_Fallback*,
+ HandleObject, MutableHandleValue);
+static const VMFunction DoBindNameFallbackInfo =
+ FunctionInfo<DoBindNameFallbackFn>(DoBindNameFallback, "DoBindNameFallback", TailCall);
+
+bool
+ICBindName_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+ MOZ_ASSERT(R0 == JSReturnOperand);
+
+ EmitRestoreTailCallReg(masm);
+
+ masm.push(R0.scratchReg());
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ return tailCallVM(DoBindNameFallbackInfo, masm);
+}
+
+//
+// GetIntrinsic_Fallback
+//
+
+static bool
+DoGetIntrinsicFallback(JSContext* cx, BaselineFrame* frame, ICGetIntrinsic_Fallback* stub_,
+ MutableHandleValue res)
+{
+ // This fallback stub may trigger debug mode toggling.
+ DebugModeOSRVolatileStub<ICGetIntrinsic_Fallback*> stub(frame, stub_);
+
+ RootedScript script(cx, frame->script());
+ jsbytecode* pc = stub->icEntry()->pc(script);
+ mozilla::DebugOnly<JSOp> op = JSOp(*pc);
+ FallbackICSpew(cx, stub, "GetIntrinsic(%s)", CodeName[JSOp(*pc)]);
+
+ MOZ_ASSERT(op == JSOP_GETINTRINSIC);
+
+ if (!GetIntrinsicOperation(cx, pc, res))
+ return false;
+
+ // An intrinsic operation will always produce the same result, so only
+ // needs to be monitored once. Attach a stub to load the resulting constant
+ // directly.
+
+ TypeScript::Monitor(cx, script, pc, res);
+
+ // Check if debug mode toggling made the stub invalid.
+ if (stub.invalid())
+ return true;
+
+ JitSpew(JitSpew_BaselineIC, " Generating GetIntrinsic optimized stub");
+ ICGetIntrinsic_Constant::Compiler compiler(cx, res);
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+
+ stub->addNewStub(newStub);
+ return true;
+}
+
+typedef bool (*DoGetIntrinsicFallbackFn)(JSContext*, BaselineFrame*, ICGetIntrinsic_Fallback*,
+ MutableHandleValue);
+static const VMFunction DoGetIntrinsicFallbackInfo =
+ FunctionInfo<DoGetIntrinsicFallbackFn>(DoGetIntrinsicFallback, "DoGetIntrinsicFallback",
+ TailCall);
+
+bool
+ICGetIntrinsic_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ EmitRestoreTailCallReg(masm);
+
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ return tailCallVM(DoGetIntrinsicFallbackInfo, masm);
+}
+
+bool
+ICGetIntrinsic_Constant::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ masm.loadValue(Address(ICStubReg, ICGetIntrinsic_Constant::offsetOfValue()), R0);
+
+ EmitReturnFromIC(masm);
+ return true;
+}
+
+//
+// SetProp_Fallback
+//
+
+// Attach an optimized property set stub for a SETPROP/SETGNAME/SETNAME op on a
+// value property.
+static bool
+TryAttachSetValuePropStub(JSContext* cx, HandleScript script, jsbytecode* pc, ICSetProp_Fallback* stub,
+ HandleObject obj, HandleShape oldShape, HandleObjectGroup oldGroup,
+ HandlePropertyName name, HandleId id, HandleValue rhs, bool* attached)
+{
+ MOZ_ASSERT(!*attached);
+
+ if (obj->watched())
+ return true;
+
+ RootedShape shape(cx);
+ RootedObject holder(cx);
+ if (!EffectlesslyLookupProperty(cx, obj, id, &holder, &shape))
+ return false;
+ if (obj != holder)
+ return true;
+
+ if (!obj->isNative()) {
+ if (obj->is<UnboxedPlainObject>()) {
+ UnboxedExpandoObject* expando = obj->as<UnboxedPlainObject>().maybeExpando();
+ if (expando) {
+ shape = expando->lookup(cx, name);
+ if (!shape)
+ return true;
+ } else {
+ return true;
+ }
+ } else {
+ return true;
+ }
+ }
+
+ size_t chainDepth;
+ if (IsCacheableSetPropAddSlot(cx, obj, oldShape, id, shape, &chainDepth)) {
+ // Don't attach if proto chain depth is too high.
+ if (chainDepth > ICSetProp_NativeAdd::MAX_PROTO_CHAIN_DEPTH)
+ return true;
+
+ // Don't attach if we are adding a property to an object which the new
+ // script properties analysis hasn't been performed for yet, as there
+ // may be a shape change required here afterwards. Pretend we attached
+ // a stub, though, so the access is not marked as unoptimizable.
+ if (oldGroup->newScript() && !oldGroup->newScript()->analyzed()) {
+ *attached = true;
+ return true;
+ }
+
+ bool isFixedSlot;
+ uint32_t offset;
+ GetFixedOrDynamicSlotOffset(shape, &isFixedSlot, &offset);
+
+ JitSpew(JitSpew_BaselineIC, " Generating SetProp(NativeObject.ADD) stub");
+ ICSetPropNativeAddCompiler compiler(cx, obj, oldShape, oldGroup,
+ chainDepth, isFixedSlot, offset);
+ ICUpdatedStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+ if (!newStub->addUpdateStubForValue(cx, script, obj, id, rhs))
+ return false;
+
+ stub->addNewStub(newStub);
+ *attached = true;
+ return true;
+ }
+
+ if (IsCacheableSetPropWriteSlot(obj, oldShape, shape)) {
+ // For some property writes, such as the initial overwrite of global
+ // properties, TI will not mark the property as having been
+ // overwritten. Don't attach a stub in this case, so that we don't
+ // execute another write to the property without TI seeing that write.
+ EnsureTrackPropertyTypes(cx, obj, id);
+ if (!PropertyHasBeenMarkedNonConstant(obj, id)) {
+ *attached = true;
+ return true;
+ }
+
+ bool isFixedSlot;
+ uint32_t offset;
+ GetFixedOrDynamicSlotOffset(shape, &isFixedSlot, &offset);
+
+ JitSpew(JitSpew_BaselineIC, " Generating SetProp(NativeObject.PROP) stub");
+ MOZ_ASSERT(LastPropertyForSetProp(obj) == oldShape,
+ "Should this really be a SetPropWriteSlot?");
+ ICSetProp_Native::Compiler compiler(cx, obj, isFixedSlot, offset);
+ ICSetProp_Native* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+ if (!newStub->addUpdateStubForValue(cx, script, obj, id, rhs))
+ return false;
+
+ if (IsPreliminaryObject(obj))
+ newStub->notePreliminaryObject();
+ else
+ StripPreliminaryObjectStubs(cx, stub);
+
+ stub->addNewStub(newStub);
+ *attached = true;
+ return true;
+ }
+
+ return true;
+}
+
+// Attach an optimized property set stub for a SETPROP/SETGNAME/SETNAME op on
+// an accessor property.
+static bool
+TryAttachSetAccessorPropStub(JSContext* cx, HandleScript script, jsbytecode* pc,
+ ICSetProp_Fallback* stub,
+ HandleObject obj, const RootedReceiverGuard& receiverGuard,
+ HandlePropertyName name,
+ HandleId id, HandleValue rhs, bool* attached,
+ bool* isTemporarilyUnoptimizable)
+{
+ MOZ_ASSERT(!*attached);
+ MOZ_ASSERT(!*isTemporarilyUnoptimizable);
+
+ if (obj->watched())
+ return true;
+
+ RootedShape shape(cx);
+ RootedObject holder(cx);
+ if (!EffectlesslyLookupProperty(cx, obj, id, &holder, &shape))
+ return false;
+
+ bool isScripted = false;
+ bool cacheableCall = IsCacheableSetPropCall(cx, obj, holder, shape,
+ &isScripted, isTemporarilyUnoptimizable);
+
+ // Try handling scripted setters.
+ if (cacheableCall && isScripted) {
+ RootedFunction callee(cx, &shape->setterObject()->as<JSFunction>());
+ MOZ_ASSERT(callee->hasScript());
+
+ if (UpdateExistingSetPropCallStubs(stub, ICStub::SetProp_CallScripted,
+ &holder->as<NativeObject>(), obj, callee)) {
+ *attached = true;
+ return true;
+ }
+
+ JitSpew(JitSpew_BaselineIC, " Generating SetProp(NativeObj/ScriptedSetter %s:%" PRIuSIZE ") stub",
+ callee->nonLazyScript()->filename(), callee->nonLazyScript()->lineno());
+
+ ICSetProp_CallScripted::Compiler compiler(cx, obj, holder, callee, script->pcToOffset(pc));
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+
+ stub->addNewStub(newStub);
+ *attached = true;
+ return true;
+ }
+
+ // Try handling JSNative setters.
+ if (cacheableCall && !isScripted) {
+ RootedFunction callee(cx, &shape->setterObject()->as<JSFunction>());
+ MOZ_ASSERT(callee->isNative());
+
+ if (UpdateExistingSetPropCallStubs(stub, ICStub::SetProp_CallNative,
+ &holder->as<NativeObject>(), obj, callee)) {
+ *attached = true;
+ return true;
+ }
+
+ JitSpew(JitSpew_BaselineIC, " Generating SetProp(NativeObj/NativeSetter %p) stub",
+ callee->native());
+
+ ICSetProp_CallNative::Compiler compiler(cx, obj, holder, callee, script->pcToOffset(pc));
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+
+ stub->addNewStub(newStub);
+ *attached = true;
+ return true;
+ }
+
+ return true;
+}
+
+static bool
+TryAttachUnboxedSetPropStub(JSContext* cx, HandleScript script,
+ ICSetProp_Fallback* stub, HandleId id,
+ HandleObject obj, HandleValue rhs, bool* attached)
+{
+ MOZ_ASSERT(!*attached);
+
+ if (!cx->runtime()->jitSupportsFloatingPoint)
+ return true;
+
+ if (!obj->is<UnboxedPlainObject>())
+ return true;
+
+ const UnboxedLayout::Property* property = obj->as<UnboxedPlainObject>().layout().lookup(id);
+ if (!property)
+ return true;
+
+ ICSetProp_Unboxed::Compiler compiler(cx, obj->group(),
+ property->offset + UnboxedPlainObject::offsetOfData(),
+ property->type);
+ ICUpdatedStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+ if (compiler.needsUpdateStubs() && !newStub->addUpdateStubForValue(cx, script, obj, id, rhs))
+ return false;
+
+ stub->addNewStub(newStub);
+
+ StripPreliminaryObjectStubs(cx, stub);
+
+ *attached = true;
+ return true;
+}
+
+static bool
+TryAttachTypedObjectSetPropStub(JSContext* cx, HandleScript script,
+ ICSetProp_Fallback* stub, HandleId id,
+ HandleObject obj, HandleValue rhs, bool* attached)
+{
+ MOZ_ASSERT(!*attached);
+
+ if (!cx->runtime()->jitSupportsFloatingPoint)
+ return true;
+
+ if (!obj->is<TypedObject>())
+ return true;
+
+ if (!obj->as<TypedObject>().typeDescr().is<StructTypeDescr>())
+ return true;
+ Rooted<StructTypeDescr*> structDescr(cx);
+ structDescr = &obj->as<TypedObject>().typeDescr().as<StructTypeDescr>();
+
+ size_t fieldIndex;
+ if (!structDescr->fieldIndex(id, &fieldIndex))
+ return true;
+
+ Rooted<TypeDescr*> fieldDescr(cx, &structDescr->fieldDescr(fieldIndex));
+ if (!fieldDescr->is<SimpleTypeDescr>())
+ return true;
+
+ uint32_t fieldOffset = structDescr->fieldOffset(fieldIndex);
+
+ ICSetProp_TypedObject::Compiler compiler(cx, obj->maybeShape(), obj->group(), fieldOffset,
+ &fieldDescr->as<SimpleTypeDescr>());
+ ICUpdatedStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+ if (compiler.needsUpdateStubs() && !newStub->addUpdateStubForValue(cx, script, obj, id, rhs))
+ return false;
+
+ stub->addNewStub(newStub);
+
+ *attached = true;
+ return true;
+}
+
+static bool
+DoSetPropFallback(JSContext* cx, BaselineFrame* frame, ICSetProp_Fallback* stub_,
+ HandleValue lhs, HandleValue rhs, MutableHandleValue res)
+{
+ // This fallback stub may trigger debug mode toggling.
+ DebugModeOSRVolatileStub<ICSetProp_Fallback*> stub(frame, stub_);
+
+ RootedScript script(cx, frame->script());
+ jsbytecode* pc = stub->icEntry()->pc(script);
+ JSOp op = JSOp(*pc);
+ FallbackICSpew(cx, stub, "SetProp(%s)", CodeName[op]);
+
+ MOZ_ASSERT(op == JSOP_SETPROP ||
+ op == JSOP_STRICTSETPROP ||
+ op == JSOP_SETNAME ||
+ op == JSOP_STRICTSETNAME ||
+ op == JSOP_SETGNAME ||
+ op == JSOP_STRICTSETGNAME ||
+ op == JSOP_INITPROP ||
+ op == JSOP_INITLOCKEDPROP ||
+ op == JSOP_INITHIDDENPROP ||
+ op == JSOP_SETALIASEDVAR ||
+ op == JSOP_INITALIASEDLEXICAL ||
+ op == JSOP_INITGLEXICAL);
+
+ RootedPropertyName name(cx);
+ if (op == JSOP_SETALIASEDVAR || op == JSOP_INITALIASEDLEXICAL)
+ name = EnvironmentCoordinateName(cx->caches.envCoordinateNameCache, script, pc);
+ else
+ name = script->getName(pc);
+ RootedId id(cx, NameToId(name));
+
+ RootedObject obj(cx, ToObjectFromStack(cx, lhs));
+ if (!obj)
+ return false;
+ RootedShape oldShape(cx, obj->maybeShape());
+ RootedObjectGroup oldGroup(cx, obj->getGroup(cx));
+ if (!oldGroup)
+ return false;
+ RootedReceiverGuard oldGuard(cx, ReceiverGuard(obj));
+
+ if (obj->is<UnboxedPlainObject>()) {
+ MOZ_ASSERT(!oldShape);
+ if (UnboxedExpandoObject* expando = obj->as<UnboxedPlainObject>().maybeExpando())
+ oldShape = expando->lastProperty();
+ }
+
+ bool attached = false;
+ // There are some reasons we can fail to attach a stub that are temporary.
+ // We want to avoid calling noteUnoptimizableAccess() if the reason we
+ // failed to attach a stub is one of those temporary reasons, since we might
+ // end up attaching a stub for the exact same access later.
+ bool isTemporarilyUnoptimizable = false;
+ if (stub->numOptimizedStubs() < ICSetProp_Fallback::MAX_OPTIMIZED_STUBS &&
+ lhs.isObject() &&
+ !TryAttachSetAccessorPropStub(cx, script, pc, stub, obj, oldGuard, name, id,
+ rhs, &attached, &isTemporarilyUnoptimizable))
+ {
+ return false;
+ }
+
+ if (op == JSOP_INITPROP ||
+ op == JSOP_INITLOCKEDPROP ||
+ op == JSOP_INITHIDDENPROP)
+ {
+ if (!InitPropertyOperation(cx, op, obj, id, rhs))
+ return false;
+ } else if (op == JSOP_SETNAME ||
+ op == JSOP_STRICTSETNAME ||
+ op == JSOP_SETGNAME ||
+ op == JSOP_STRICTSETGNAME)
+ {
+ if (!SetNameOperation(cx, script, pc, obj, rhs))
+ return false;
+ } else if (op == JSOP_SETALIASEDVAR || op == JSOP_INITALIASEDLEXICAL) {
+ obj->as<EnvironmentObject>().setAliasedBinding(cx, EnvironmentCoordinate(pc), name, rhs);
+ } else if (op == JSOP_INITGLEXICAL) {
+ RootedValue v(cx, rhs);
+ LexicalEnvironmentObject* lexicalEnv;
+ if (script->hasNonSyntacticScope())
+ lexicalEnv = &NearestEnclosingExtensibleLexicalEnvironment(frame->environmentChain());
+ else
+ lexicalEnv = &cx->global()->lexicalEnvironment();
+ InitGlobalLexicalOperation(cx, lexicalEnv, script, pc, v);
+ } else {
+ MOZ_ASSERT(op == JSOP_SETPROP || op == JSOP_STRICTSETPROP);
+
+ ObjectOpResult result;
+ if (!SetProperty(cx, obj, id, rhs, lhs, result) ||
+ !result.checkStrictErrorOrWarning(cx, obj, id, op == JSOP_STRICTSETPROP))
+ {
+ return false;
+ }
+ }
+
+ // Leave the RHS on the stack.
+ res.set(rhs);
+
+ // Check if debug mode toggling made the stub invalid.
+ if (stub.invalid())
+ return true;
+
+ if (stub->numOptimizedStubs() >= ICSetProp_Fallback::MAX_OPTIMIZED_STUBS) {
+ // TODO: Discard all stubs in this IC and replace with generic setprop stub.
+ return true;
+ }
+
+ if (!attached &&
+ lhs.isObject() &&
+ !TryAttachSetValuePropStub(cx, script, pc, stub, obj, oldShape, oldGroup,
+ name, id, rhs, &attached))
+ {
+ return false;
+ }
+ if (attached)
+ return true;
+
+ if (!attached &&
+ lhs.isObject() &&
+ !TryAttachUnboxedSetPropStub(cx, script, stub, id, obj, rhs, &attached))
+ {
+ return false;
+ }
+ if (attached)
+ return true;
+
+ if (!attached &&
+ lhs.isObject() &&
+ !TryAttachTypedObjectSetPropStub(cx, script, stub, id, obj, rhs, &attached))
+ {
+ return false;
+ }
+ if (attached)
+ return true;
+
+ MOZ_ASSERT(!attached);
+ if (!isTemporarilyUnoptimizable)
+ stub->noteUnoptimizableAccess();
+
+ return true;
+}
+
+typedef bool (*DoSetPropFallbackFn)(JSContext*, BaselineFrame*, ICSetProp_Fallback*,
+ HandleValue, HandleValue, MutableHandleValue);
+static const VMFunction DoSetPropFallbackInfo =
+ FunctionInfo<DoSetPropFallbackFn>(DoSetPropFallback, "DoSetPropFallback", TailCall,
+ PopValues(2));
+
+bool
+ICSetProp_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+ MOZ_ASSERT(R0 == JSReturnOperand);
+
+ EmitRestoreTailCallReg(masm);
+
+ // Ensure stack is fully synced for the expression decompiler.
+ masm.pushValue(R0);
+ masm.pushValue(R1);
+
+ // Push arguments.
+ masm.pushValue(R1);
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ if (!tailCallVM(DoSetPropFallbackInfo, masm))
+ return false;
+
+ // Even though the fallback frame doesn't enter a stub frame, the CallScripted
+ // frame that we are emulating does. Again, we lie.
+#ifdef DEBUG
+ EmitRepushTailCallReg(masm);
+ EmitStowICValues(masm, 1);
+ enterStubFrame(masm, R1.scratchReg());
+#else
+ inStubFrame_ = true;
+#endif
+
+ // What follows is bailout-only code for inlined script getters.
+ // The return address pointed to by the baseline stack points here.
+ returnOffset_ = masm.currentOffset();
+
+ leaveStubFrame(masm, true);
+
+ // Retrieve the stashed initial argument from the caller's frame before returning
+ EmitUnstowICValues(masm, 1);
+ EmitReturnFromIC(masm);
+
+ return true;
+}
+
+void
+ICSetProp_Fallback::Compiler::postGenerateStubCode(MacroAssembler& masm, Handle<JitCode*> code)
+{
+ cx->compartment()->jitCompartment()->initBaselineSetPropReturnAddr(code->raw() + returnOffset_);
+}
+
+static void
+GuardGroupAndShapeMaybeUnboxedExpando(MacroAssembler& masm, JSObject* obj,
+ Register object, Register scratch,
+ size_t offsetOfGroup, size_t offsetOfShape, Label* failure)
+{
+ // Guard against object group.
+ masm.loadPtr(Address(ICStubReg, offsetOfGroup), scratch);
+ masm.branchPtr(Assembler::NotEqual, Address(object, JSObject::offsetOfGroup()), scratch,
+ failure);
+
+ // Guard against shape or expando shape.
+ masm.loadPtr(Address(ICStubReg, offsetOfShape), scratch);
+ if (obj->is<UnboxedPlainObject>()) {
+ Address expandoAddress(object, UnboxedPlainObject::offsetOfExpando());
+ masm.branchPtr(Assembler::Equal, expandoAddress, ImmWord(0), failure);
+ Label done;
+ masm.push(object);
+ masm.loadPtr(expandoAddress, object);
+ masm.branchTestObjShape(Assembler::Equal, object, scratch, &done);
+ masm.pop(object);
+ masm.jump(failure);
+ masm.bind(&done);
+ masm.pop(object);
+ } else {
+ masm.branchTestObjShape(Assembler::NotEqual, object, scratch, failure);
+ }
+}
+
+bool
+ICSetProp_Native::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+
+ // Guard input is an object.
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+ Register objReg = masm.extractObject(R0, ExtractTemp0);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
+ Register scratch = regs.takeAny();
+
+ GuardGroupAndShapeMaybeUnboxedExpando(masm, obj_, objReg, scratch,
+ ICSetProp_Native::offsetOfGroup(),
+ ICSetProp_Native::offsetOfShape(),
+ &failure);
+
+ // Stow both R0 and R1 (object and value).
+ EmitStowICValues(masm, 2);
+
+ // Type update stub expects the value to check in R0.
+ masm.moveValue(R1, R0);
+
+ // Call the type-update stub.
+ if (!callTypeUpdateIC(masm, sizeof(Value)))
+ return false;
+
+ // Unstow R0 and R1 (object and key)
+ EmitUnstowICValues(masm, 2);
+
+ regs.add(R0);
+ regs.takeUnchecked(objReg);
+
+ Register holderReg;
+ if (obj_->is<UnboxedPlainObject>()) {
+ // We are loading off the expando object, so use that for the holder.
+ holderReg = regs.takeAny();
+ masm.loadPtr(Address(objReg, UnboxedPlainObject::offsetOfExpando()), holderReg);
+ if (!isFixedSlot_)
+ masm.loadPtr(Address(holderReg, NativeObject::offsetOfSlots()), holderReg);
+ } else if (isFixedSlot_) {
+ holderReg = objReg;
+ } else {
+ holderReg = regs.takeAny();
+ masm.loadPtr(Address(objReg, NativeObject::offsetOfSlots()), holderReg);
+ }
+
+ // Perform the store.
+ masm.load32(Address(ICStubReg, ICSetProp_Native::offsetOfOffset()), scratch);
+ EmitPreBarrier(masm, BaseIndex(holderReg, scratch, TimesOne), MIRType::Value);
+ masm.storeValue(R1, BaseIndex(holderReg, scratch, TimesOne));
+ if (holderReg != objReg)
+ regs.add(holderReg);
+ if (cx->runtime()->gc.nursery.exists()) {
+ Register scr = regs.takeAny();
+ LiveGeneralRegisterSet saveRegs;
+ saveRegs.add(R1);
+ emitPostWriteBarrierSlot(masm, objReg, R1, scr, saveRegs);
+ regs.add(scr);
+ }
+
+ // The RHS has to be in R0.
+ masm.moveValue(R1, R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+ICUpdatedStub*
+ICSetPropNativeAddCompiler::getStub(ICStubSpace* space)
+{
+ Rooted<ShapeVector> shapes(cx, ShapeVector(cx));
+ if (!shapes.append(oldShape_))
+ return nullptr;
+
+ if (!GetProtoShapes(obj_, protoChainDepth_, &shapes))
+ return nullptr;
+
+ JS_STATIC_ASSERT(ICSetProp_NativeAdd::MAX_PROTO_CHAIN_DEPTH == 4);
+
+ ICUpdatedStub* stub = nullptr;
+ switch(protoChainDepth_) {
+ case 0: stub = getStubSpecific<0>(space, shapes); break;
+ case 1: stub = getStubSpecific<1>(space, shapes); break;
+ case 2: stub = getStubSpecific<2>(space, shapes); break;
+ case 3: stub = getStubSpecific<3>(space, shapes); break;
+ case 4: stub = getStubSpecific<4>(space, shapes); break;
+ default: MOZ_CRASH("ProtoChainDepth too high.");
+ }
+ if (!stub || !stub->initUpdatingChain(cx, space))
+ return nullptr;
+ return stub;
+}
+
+bool
+ICSetPropNativeAddCompiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ Label failureUnstow;
+
+ // Guard input is an object.
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+ Register objReg = masm.extractObject(R0, ExtractTemp0);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
+ Register scratch = regs.takeAny();
+
+ GuardGroupAndShapeMaybeUnboxedExpando(masm, obj_, objReg, scratch,
+ ICSetProp_NativeAdd::offsetOfGroup(),
+ ICSetProp_NativeAddImpl<0>::offsetOfShape(0),
+ &failure);
+
+ // Stow both R0 and R1 (object and value).
+ EmitStowICValues(masm, 2);
+
+ regs = availableGeneralRegs(1);
+ scratch = regs.takeAny();
+ Register protoReg = regs.takeAny();
+ // Check the proto chain.
+ for (size_t i = 0; i < protoChainDepth_; i++) {
+ masm.loadObjProto(i == 0 ? objReg : protoReg, protoReg);
+ masm.branchTestPtr(Assembler::Zero, protoReg, protoReg, &failureUnstow);
+ masm.loadPtr(Address(ICStubReg, ICSetProp_NativeAddImpl<0>::offsetOfShape(i + 1)),
+ scratch);
+ masm.branchTestObjShape(Assembler::NotEqual, protoReg, scratch, &failureUnstow);
+ }
+
+ // Shape and type checks succeeded, ok to proceed.
+
+ // Load RHS into R0 for TypeUpdate check.
+ // Stack is currently: [..., ObjValue, RHSValue, MaybeReturnAddr? ]
+ masm.loadValue(Address(masm.getStackPointer(), ICStackValueOffset), R0);
+
+ // Call the type-update stub.
+ if (!callTypeUpdateIC(masm, sizeof(Value)))
+ return false;
+
+ // Unstow R0 and R1 (object and key)
+ EmitUnstowICValues(masm, 2);
+ regs = availableGeneralRegs(2);
+ scratch = regs.takeAny();
+
+ if (obj_->is<PlainObject>()) {
+ // Try to change the object's group.
+ Label noGroupChange;
+
+ // Check if the cache has a new group to change to.
+ masm.loadPtr(Address(ICStubReg, ICSetProp_NativeAdd::offsetOfNewGroup()), scratch);
+ masm.branchTestPtr(Assembler::Zero, scratch, scratch, &noGroupChange);
+
+ // Check if the old group still has a newScript.
+ masm.loadPtr(Address(objReg, JSObject::offsetOfGroup()), scratch);
+ masm.branchPtr(Assembler::Equal,
+ Address(scratch, ObjectGroup::offsetOfAddendum()),
+ ImmWord(0),
+ &noGroupChange);
+
+ // Reload the new group from the cache.
+ masm.loadPtr(Address(ICStubReg, ICSetProp_NativeAdd::offsetOfNewGroup()), scratch);
+
+ // Change the object's group.
+ Address groupAddr(objReg, JSObject::offsetOfGroup());
+ EmitPreBarrier(masm, groupAddr, MIRType::ObjectGroup);
+ masm.storePtr(scratch, groupAddr);
+
+ masm.bind(&noGroupChange);
+ }
+
+ Register holderReg;
+ regs.add(R0);
+ regs.takeUnchecked(objReg);
+
+ if (obj_->is<UnboxedPlainObject>()) {
+ holderReg = regs.takeAny();
+ masm.loadPtr(Address(objReg, UnboxedPlainObject::offsetOfExpando()), holderReg);
+
+ // Write the expando object's new shape.
+ Address shapeAddr(holderReg, ShapedObject::offsetOfShape());
+ EmitPreBarrier(masm, shapeAddr, MIRType::Shape);
+ masm.loadPtr(Address(ICStubReg, ICSetProp_NativeAdd::offsetOfNewShape()), scratch);
+ masm.storePtr(scratch, shapeAddr);
+
+ if (!isFixedSlot_)
+ masm.loadPtr(Address(holderReg, NativeObject::offsetOfSlots()), holderReg);
+ } else {
+ // Write the object's new shape.
+ Address shapeAddr(objReg, ShapedObject::offsetOfShape());
+ EmitPreBarrier(masm, shapeAddr, MIRType::Shape);
+ masm.loadPtr(Address(ICStubReg, ICSetProp_NativeAdd::offsetOfNewShape()), scratch);
+ masm.storePtr(scratch, shapeAddr);
+
+ if (isFixedSlot_) {
+ holderReg = objReg;
+ } else {
+ holderReg = regs.takeAny();
+ masm.loadPtr(Address(objReg, NativeObject::offsetOfSlots()), holderReg);
+ }
+ }
+
+ // Perform the store. No write barrier required since this is a new
+ // initialization.
+ masm.load32(Address(ICStubReg, ICSetProp_NativeAdd::offsetOfOffset()), scratch);
+ masm.storeValue(R1, BaseIndex(holderReg, scratch, TimesOne));
+
+ if (holderReg != objReg)
+ regs.add(holderReg);
+
+ if (cx->runtime()->gc.nursery.exists()) {
+ Register scr = regs.takeAny();
+ LiveGeneralRegisterSet saveRegs;
+ saveRegs.add(R1);
+ emitPostWriteBarrierSlot(masm, objReg, R1, scr, saveRegs);
+ }
+
+ // The RHS has to be in R0.
+ masm.moveValue(R1, R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failureUnstow);
+ EmitUnstowICValues(masm, 2);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+bool
+ICSetProp_Unboxed::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+
+ // Guard input is an object.
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
+ Register scratch = regs.takeAny();
+
+ // Unbox and group guard.
+ Register object = masm.extractObject(R0, ExtractTemp0);
+ masm.loadPtr(Address(ICStubReg, ICSetProp_Unboxed::offsetOfGroup()), scratch);
+ masm.branchPtr(Assembler::NotEqual, Address(object, JSObject::offsetOfGroup()), scratch,
+ &failure);
+
+ if (needsUpdateStubs()) {
+ // Stow both R0 and R1 (object and value).
+ EmitStowICValues(masm, 2);
+
+ // Move RHS into R0 for TypeUpdate check.
+ masm.moveValue(R1, R0);
+
+ // Call the type update stub.
+ if (!callTypeUpdateIC(masm, sizeof(Value)))
+ return false;
+
+ // Unstow R0 and R1 (object and key)
+ EmitUnstowICValues(masm, 2);
+
+ // The TypeUpdate IC may have smashed object. Rederive it.
+ masm.unboxObject(R0, object);
+
+ // Trigger post barriers here on the values being written. Fields which
+ // objects can be written to also need update stubs.
+ LiveGeneralRegisterSet saveRegs;
+ saveRegs.add(R0);
+ saveRegs.add(R1);
+ saveRegs.addUnchecked(object);
+ saveRegs.add(ICStubReg);
+ emitPostWriteBarrierSlot(masm, object, R1, scratch, saveRegs);
+ }
+
+ // Compute the address being written to.
+ masm.load32(Address(ICStubReg, ICSetProp_Unboxed::offsetOfFieldOffset()), scratch);
+ BaseIndex address(object, scratch, TimesOne);
+
+ EmitUnboxedPreBarrierForBaseline(masm, address, fieldType_);
+ masm.storeUnboxedProperty(address, fieldType_,
+ ConstantOrRegister(TypedOrValueRegister(R1)), &failure);
+
+ // The RHS has to be in R0.
+ masm.moveValue(R1, R0);
+
+ EmitReturnFromIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+bool
+ICSetProp_TypedObject::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+
+ CheckForTypedObjectWithDetachedStorage(cx, masm, &failure);
+
+ // Guard input is an object.
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
+ Register scratch = regs.takeAny();
+
+ // Unbox and shape guard.
+ Register object = masm.extractObject(R0, ExtractTemp0);
+ masm.loadPtr(Address(ICStubReg, ICSetProp_TypedObject::offsetOfShape()), scratch);
+ masm.branchTestObjShape(Assembler::NotEqual, object, scratch, &failure);
+
+ // Guard that the object group matches.
+ masm.loadPtr(Address(ICStubReg, ICSetProp_TypedObject::offsetOfGroup()), scratch);
+ masm.branchPtr(Assembler::NotEqual, Address(object, JSObject::offsetOfGroup()), scratch,
+ &failure);
+
+ if (needsUpdateStubs()) {
+ // Stow both R0 and R1 (object and value).
+ EmitStowICValues(masm, 2);
+
+ // Move RHS into R0 for TypeUpdate check.
+ masm.moveValue(R1, R0);
+
+ // Call the type update stub.
+ if (!callTypeUpdateIC(masm, sizeof(Value)))
+ return false;
+
+ // Unstow R0 and R1 (object and key)
+ EmitUnstowICValues(masm, 2);
+
+ // We may have clobbered object in the TypeUpdate IC. Rederive it.
+ masm.unboxObject(R0, object);
+
+ // Trigger post barriers here on the values being written. Descriptors
+ // which can write objects also need update stubs.
+ LiveGeneralRegisterSet saveRegs;
+ saveRegs.add(R0);
+ saveRegs.add(R1);
+ saveRegs.addUnchecked(object);
+ saveRegs.add(ICStubReg);
+ emitPostWriteBarrierSlot(masm, object, R1, scratch, saveRegs);
+ }
+
+ // Save the rhs on the stack so we can get a second scratch register.
+ Label failurePopRHS;
+ masm.pushValue(R1);
+ regs = availableGeneralRegs(1);
+ regs.takeUnchecked(object);
+ regs.take(scratch);
+ Register secondScratch = regs.takeAny();
+
+ // Get the object's data pointer.
+ LoadTypedThingData(masm, layout_, object, scratch);
+
+ // Compute the address being written to.
+ masm.load32(Address(ICStubReg, ICSetProp_TypedObject::offsetOfFieldOffset()), secondScratch);
+ masm.addPtr(secondScratch, scratch);
+
+ Address dest(scratch, 0);
+ Address value(masm.getStackPointer(), 0);
+
+ if (fieldDescr_->is<ScalarTypeDescr>()) {
+ Scalar::Type type = fieldDescr_->as<ScalarTypeDescr>().type();
+ StoreToTypedArray(cx, masm, type, value, dest,
+ secondScratch, &failurePopRHS, &failurePopRHS);
+ masm.popValue(R1);
+ } else {
+ ReferenceTypeDescr::Type type = fieldDescr_->as<ReferenceTypeDescr>().type();
+
+ masm.popValue(R1);
+
+ switch (type) {
+ case ReferenceTypeDescr::TYPE_ANY:
+ EmitPreBarrier(masm, dest, MIRType::Value);
+ masm.storeValue(R1, dest);
+ break;
+
+ case ReferenceTypeDescr::TYPE_OBJECT: {
+ EmitPreBarrier(masm, dest, MIRType::Object);
+ Label notObject;
+ masm.branchTestObject(Assembler::NotEqual, R1, &notObject);
+ Register rhsObject = masm.extractObject(R1, ExtractTemp0);
+ masm.storePtr(rhsObject, dest);
+ EmitReturnFromIC(masm);
+ masm.bind(&notObject);
+ masm.branchTestNull(Assembler::NotEqual, R1, &failure);
+ masm.storePtr(ImmWord(0), dest);
+ break;
+ }
+
+ case ReferenceTypeDescr::TYPE_STRING: {
+ EmitPreBarrier(masm, dest, MIRType::String);
+ masm.branchTestString(Assembler::NotEqual, R1, &failure);
+ Register rhsString = masm.extractString(R1, ExtractTemp0);
+ masm.storePtr(rhsString, dest);
+ break;
+ }
+
+ default:
+ MOZ_CRASH();
+ }
+ }
+
+ // The RHS has to be in R0.
+ masm.moveValue(R1, R0);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&failurePopRHS);
+ masm.popValue(R1);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+bool
+ICSetProp_CallScripted::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ Label failureUnstow;
+ Label failureLeaveStubFrame;
+
+ // Guard input is an object.
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+
+ // Stow R0 and R1 to free up registers.
+ EmitStowICValues(masm, 2);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(1));
+ Register scratch = regs.takeAnyExcluding(ICTailCallReg);
+
+ // Unbox and shape guard.
+ uint32_t framePushed = masm.framePushed();
+ Register objReg = masm.extractObject(R0, ExtractTemp0);
+ GuardReceiverObject(masm, ReceiverGuard(receiver_), objReg, scratch,
+ ICSetProp_CallScripted::offsetOfReceiverGuard(), &failureUnstow);
+
+ if (receiver_ != holder_) {
+ Register holderReg = regs.takeAny();
+ masm.loadPtr(Address(ICStubReg, ICSetProp_CallScripted::offsetOfHolder()), holderReg);
+ masm.loadPtr(Address(ICStubReg, ICSetProp_CallScripted::offsetOfHolderShape()), scratch);
+ masm.branchTestObjShape(Assembler::NotEqual, holderReg, scratch, &failureUnstow);
+ regs.add(holderReg);
+ }
+
+ // Push a stub frame so that we can perform a non-tail call.
+ enterStubFrame(masm, scratch);
+
+ // Load callee function and code. To ensure that |code| doesn't end up being
+ // ArgumentsRectifierReg, if it's available we assign it to |callee| instead.
+ Register callee;
+ if (regs.has(ArgumentsRectifierReg)) {
+ callee = ArgumentsRectifierReg;
+ regs.take(callee);
+ } else {
+ callee = regs.takeAny();
+ }
+ Register code = regs.takeAny();
+ masm.loadPtr(Address(ICStubReg, ICSetProp_CallScripted::offsetOfSetter()), callee);
+ masm.branchIfFunctionHasNoScript(callee, &failureLeaveStubFrame);
+ masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), code);
+ masm.loadBaselineOrIonRaw(code, code, &failureLeaveStubFrame);
+
+ // Align the stack such that the JitFrameLayout is aligned on
+ // JitStackAlignment.
+ masm.alignJitStackBasedOnNArgs(1);
+
+ // Setter is called with the new value as the only argument, and |obj| as thisv.
+ // Note that we use Push, not push, so that callJit will align the stack
+ // properly on ARM.
+
+ // To Push R1, read it off of the stowed values on stack.
+ // Stack: [ ..., R0, R1, ..STUBFRAME-HEADER.., padding? ]
+ masm.PushValue(Address(BaselineFrameReg, STUB_FRAME_SIZE));
+ masm.Push(R0);
+ EmitBaselineCreateStubFrameDescriptor(masm, scratch, JitFrameLayout::Size());
+ masm.Push(Imm32(1)); // ActualArgc is 1
+ masm.Push(callee);
+ masm.Push(scratch);
+
+ // Handle arguments underflow.
+ Label noUnderflow;
+ masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), scratch);
+ masm.branch32(Assembler::BelowOrEqual, scratch, Imm32(1), &noUnderflow);
+ {
+ // Call the arguments rectifier.
+ MOZ_ASSERT(ArgumentsRectifierReg != code);
+
+ JitCode* argumentsRectifier =
+ cx->runtime()->jitRuntime()->getArgumentsRectifier();
+
+ masm.movePtr(ImmGCPtr(argumentsRectifier), code);
+ masm.loadPtr(Address(code, JitCode::offsetOfCode()), code);
+ masm.movePtr(ImmWord(1), ArgumentsRectifierReg);
+ }
+
+ masm.bind(&noUnderflow);
+ masm.callJit(code);
+
+ uint32_t framePushedAfterCall = masm.framePushed();
+
+ leaveStubFrame(masm, true);
+ // Do not care about return value from function. The original RHS should be returned
+ // as the result of this operation.
+ EmitUnstowICValues(masm, 2);
+ masm.moveValue(R1, R0);
+ EmitReturnFromIC(masm);
+
+ // Leave stub frame and go to next stub.
+ masm.bind(&failureLeaveStubFrame);
+ masm.setFramePushed(framePushedAfterCall);
+ inStubFrame_ = true;
+ leaveStubFrame(masm, false);
+
+ // Unstow R0 and R1
+ masm.bind(&failureUnstow);
+ masm.setFramePushed(framePushed);
+ EmitUnstowICValues(masm, 2);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+static bool
+DoCallNativeSetter(JSContext* cx, HandleFunction callee, HandleObject obj, HandleValue val)
+{
+ MOZ_ASSERT(callee->isNative());
+ JSNative natfun = callee->native();
+
+ JS::AutoValueArray<3> vp(cx);
+ vp[0].setObject(*callee.get());
+ vp[1].setObject(*obj.get());
+ vp[2].set(val);
+
+ return natfun(cx, 1, vp.begin());
+}
+
+typedef bool (*DoCallNativeSetterFn)(JSContext*, HandleFunction, HandleObject, HandleValue);
+static const VMFunction DoCallNativeSetterInfo =
+ FunctionInfo<DoCallNativeSetterFn>(DoCallNativeSetter, "DoNativeCallSetter");
+
+bool
+ICSetProp_CallNative::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ Label failureUnstow;
+
+ // Guard input is an object.
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+
+ // Stow R0 and R1 to free up registers.
+ EmitStowICValues(masm, 2);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(1));
+ Register scratch = regs.takeAnyExcluding(ICTailCallReg);
+
+ // Unbox and shape guard.
+ uint32_t framePushed = masm.framePushed();
+ Register objReg = masm.extractObject(R0, ExtractTemp0);
+ GuardReceiverObject(masm, ReceiverGuard(receiver_), objReg, scratch,
+ ICSetProp_CallNative::offsetOfReceiverGuard(), &failureUnstow);
+
+ if (receiver_ != holder_) {
+ Register holderReg = regs.takeAny();
+ masm.loadPtr(Address(ICStubReg, ICSetProp_CallNative::offsetOfHolder()), holderReg);
+ masm.loadPtr(Address(ICStubReg, ICSetProp_CallNative::offsetOfHolderShape()), scratch);
+ masm.branchTestObjShape(Assembler::NotEqual, holderReg, scratch, &failureUnstow);
+ regs.add(holderReg);
+ }
+
+ // Push a stub frame so that we can perform a non-tail call.
+ enterStubFrame(masm, scratch);
+
+ // Load callee function and code. To ensure that |code| doesn't end up being
+ // ArgumentsRectifierReg, if it's available we assign it to |callee| instead.
+ Register callee = regs.takeAny();
+ masm.loadPtr(Address(ICStubReg, ICSetProp_CallNative::offsetOfSetter()), callee);
+
+ // To Push R1, read it off of the stowed values on stack.
+ // Stack: [ ..., R0, R1, ..STUBFRAME-HEADER.. ]
+ masm.moveStackPtrTo(scratch);
+ masm.pushValue(Address(scratch, STUB_FRAME_SIZE));
+ masm.push(objReg);
+ masm.push(callee);
+
+ // Don't need to preserve R0 anymore.
+ regs.add(R0);
+
+ if (!callVM(DoCallNativeSetterInfo, masm))
+ return false;
+ leaveStubFrame(masm);
+
+ // Do not care about return value from function. The original RHS should be returned
+ // as the result of this operation.
+ EmitUnstowICValues(masm, 2);
+ masm.moveValue(R1, R0);
+ EmitReturnFromIC(masm);
+
+ // Unstow R0 and R1
+ masm.bind(&failureUnstow);
+ masm.setFramePushed(framePushed);
+ EmitUnstowICValues(masm, 2);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// Call_Fallback
+//
+
+static bool
+TryAttachFunApplyStub(JSContext* cx, ICCall_Fallback* stub, HandleScript script, jsbytecode* pc,
+ HandleValue thisv, uint32_t argc, Value* argv, bool* attached)
+{
+ if (argc != 2)
+ return true;
+
+ if (!thisv.isObject() || !thisv.toObject().is<JSFunction>())
+ return true;
+ RootedFunction target(cx, &thisv.toObject().as<JSFunction>());
+
+ bool isScripted = target->hasJITCode();
+
+ // right now, only handle situation where second argument is |arguments|
+ if (argv[1].isMagic(JS_OPTIMIZED_ARGUMENTS) && !script->needsArgsObj()) {
+ if (isScripted && !stub->hasStub(ICStub::Call_ScriptedApplyArguments)) {
+ JitSpew(JitSpew_BaselineIC, " Generating Call_ScriptedApplyArguments stub");
+
+ ICCall_ScriptedApplyArguments::Compiler compiler(
+ cx, stub->fallbackMonitorStub()->firstMonitorStub(), script->pcToOffset(pc));
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+
+ stub->addNewStub(newStub);
+ *attached = true;
+ return true;
+ }
+
+ // TODO: handle FUNAPPLY for native targets.
+ }
+
+ if (argv[1].isObject() && argv[1].toObject().is<ArrayObject>()) {
+ if (isScripted && !stub->hasStub(ICStub::Call_ScriptedApplyArray)) {
+ JitSpew(JitSpew_BaselineIC, " Generating Call_ScriptedApplyArray stub");
+
+ ICCall_ScriptedApplyArray::Compiler compiler(
+ cx, stub->fallbackMonitorStub()->firstMonitorStub(), script->pcToOffset(pc));
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+
+ stub->addNewStub(newStub);
+ *attached = true;
+ return true;
+ }
+ }
+ return true;
+}
+
+static bool
+TryAttachFunCallStub(JSContext* cx, ICCall_Fallback* stub, HandleScript script, jsbytecode* pc,
+ HandleValue thisv, bool* attached)
+{
+ // Try to attach a stub for Function.prototype.call with scripted |this|.
+
+ *attached = false;
+ if (!thisv.isObject() || !thisv.toObject().is<JSFunction>())
+ return true;
+ RootedFunction target(cx, &thisv.toObject().as<JSFunction>());
+
+ // Attach a stub if the script can be Baseline-compiled. We do this also
+ // if the script is not yet compiled to avoid attaching a CallNative stub
+ // that handles everything, even after the callee becomes hot.
+ if (target->hasScript() && target->nonLazyScript()->canBaselineCompile() &&
+ !stub->hasStub(ICStub::Call_ScriptedFunCall))
+ {
+ JitSpew(JitSpew_BaselineIC, " Generating Call_ScriptedFunCall stub");
+
+ ICCall_ScriptedFunCall::Compiler compiler(cx, stub->fallbackMonitorStub()->firstMonitorStub(),
+ script->pcToOffset(pc));
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+
+ *attached = true;
+ stub->addNewStub(newStub);
+ return true;
+ }
+
+ return true;
+}
+
+// Check if target is a native SIMD operation which returns a SIMD type.
+// If so, set res to a template object matching the SIMD type produced and return true.
+static bool
+GetTemplateObjectForSimd(JSContext* cx, JSFunction* target, MutableHandleObject res)
+{
+ const JSJitInfo* jitInfo = target->jitInfo();
+ if (!jitInfo || jitInfo->type() != JSJitInfo::InlinableNative)
+ return false;
+
+ // Check if this is a native inlinable SIMD operation.
+ SimdType ctrlType;
+ switch (jitInfo->inlinableNative) {
+ case InlinableNative::SimdInt8x16: ctrlType = SimdType::Int8x16; break;
+ case InlinableNative::SimdUint8x16: ctrlType = SimdType::Uint8x16; break;
+ case InlinableNative::SimdInt16x8: ctrlType = SimdType::Int16x8; break;
+ case InlinableNative::SimdUint16x8: ctrlType = SimdType::Uint16x8; break;
+ case InlinableNative::SimdInt32x4: ctrlType = SimdType::Int32x4; break;
+ case InlinableNative::SimdUint32x4: ctrlType = SimdType::Uint32x4; break;
+ case InlinableNative::SimdFloat32x4: ctrlType = SimdType::Float32x4; break;
+ case InlinableNative::SimdBool8x16: ctrlType = SimdType::Bool8x16; break;
+ case InlinableNative::SimdBool16x8: ctrlType = SimdType::Bool16x8; break;
+ case InlinableNative::SimdBool32x4: ctrlType = SimdType::Bool32x4; break;
+ // This is not an inlinable SIMD operation.
+ default: return false;
+ }
+
+ // The controlling type is not necessarily the return type.
+ // Check the actual operation.
+ SimdOperation simdOp = SimdOperation(jitInfo->nativeOp);
+ SimdType retType;
+
+ switch(simdOp) {
+ case SimdOperation::Fn_allTrue:
+ case SimdOperation::Fn_anyTrue:
+ case SimdOperation::Fn_extractLane:
+ // These operations return a scalar. No template object needed.
+ return false;
+
+ case SimdOperation::Fn_lessThan:
+ case SimdOperation::Fn_lessThanOrEqual:
+ case SimdOperation::Fn_equal:
+ case SimdOperation::Fn_notEqual:
+ case SimdOperation::Fn_greaterThan:
+ case SimdOperation::Fn_greaterThanOrEqual:
+ // These operations return a boolean vector with the same shape as the
+ // controlling type.
+ retType = GetBooleanSimdType(ctrlType);
+ break;
+
+ default:
+ // All other operations return the controlling type.
+ retType = ctrlType;
+ break;
+ }
+
+ // Create a template object based on retType.
+ RootedGlobalObject global(cx, cx->global());
+ Rooted<SimdTypeDescr*> descr(cx, GlobalObject::getOrCreateSimdTypeDescr(cx, global, retType));
+ res.set(cx->compartment()->jitCompartment()->getSimdTemplateObjectFor(cx, descr));
+ return true;
+}
+
+static void
+EnsureArrayGroupAnalyzed(JSContext* cx, JSObject* obj)
+{
+ if (PreliminaryObjectArrayWithTemplate* objects = obj->group()->maybePreliminaryObjects())
+ objects->maybeAnalyze(cx, obj->group(), /* forceAnalyze = */ true);
+}
+
+static bool
+GetTemplateObjectForNative(JSContext* cx, HandleFunction target, const CallArgs& args,
+ MutableHandleObject res, bool* skipAttach)
+{
+ Native native = target->native();
+
+ // Check for natives to which template objects can be attached. This is
+ // done to provide templates to Ion for inlining these natives later on.
+
+ if (native == ArrayConstructor || native == array_construct) {
+ // Note: the template array won't be used if its length is inaccurately
+ // computed here. (We allocate here because compilation may occur on a
+ // separate thread where allocation is impossible.)
+ size_t count = 0;
+ if (args.length() != 1)
+ count = args.length();
+ else if (args.length() == 1 && args[0].isInt32() && args[0].toInt32() >= 0)
+ count = args[0].toInt32();
+
+ if (count <= ArrayObject::EagerAllocationMaxLength) {
+ ObjectGroup* group = ObjectGroup::callingAllocationSiteGroup(cx, JSProto_Array);
+ if (!group)
+ return false;
+ if (group->maybePreliminaryObjects()) {
+ *skipAttach = true;
+ return true;
+ }
+
+ // With this and other array templates, analyze the group so that
+ // we don't end up with a template whose structure might change later.
+ res.set(NewFullyAllocatedArrayForCallingAllocationSite(cx, count, TenuredObject));
+ if (!res)
+ return false;
+ EnsureArrayGroupAnalyzed(cx, res);
+ return true;
+ }
+ }
+
+ if (args.length() == 1) {
+ size_t len = 0;
+
+ if (args[0].isInt32() && args[0].toInt32() >= 0)
+ len = args[0].toInt32();
+
+ if (!TypedArrayObject::GetTemplateObjectForNative(cx, native, len, res))
+ return false;
+ if (res)
+ return true;
+ }
+
+ if (native == js::array_slice) {
+ if (args.thisv().isObject()) {
+ JSObject* obj = &args.thisv().toObject();
+ if (!obj->isSingleton()) {
+ if (obj->group()->maybePreliminaryObjects()) {
+ *skipAttach = true;
+ return true;
+ }
+ res.set(NewFullyAllocatedArrayTryReuseGroup(cx, &args.thisv().toObject(), 0,
+ TenuredObject));
+ if (!res)
+ return false;
+ EnsureArrayGroupAnalyzed(cx, res);
+ return true;
+ }
+ }
+ }
+
+ if (native == js::intrinsic_StringSplitString && args.length() == 2 && args[0].isString() &&
+ args[1].isString())
+ {
+ ObjectGroup* group = ObjectGroup::callingAllocationSiteGroup(cx, JSProto_Array);
+ if (!group)
+ return false;
+ if (group->maybePreliminaryObjects()) {
+ *skipAttach = true;
+ return true;
+ }
+
+ res.set(NewFullyAllocatedArrayForCallingAllocationSite(cx, 0, TenuredObject));
+ if (!res)
+ return false;
+ EnsureArrayGroupAnalyzed(cx, res);
+ return true;
+ }
+
+ if (native == StringConstructor) {
+ RootedString emptyString(cx, cx->runtime()->emptyString);
+ res.set(StringObject::create(cx, emptyString, /* proto = */ nullptr, TenuredObject));
+ return !!res;
+ }
+
+ if (native == obj_create && args.length() == 1 && args[0].isObjectOrNull()) {
+ RootedObject proto(cx, args[0].toObjectOrNull());
+ res.set(ObjectCreateImpl(cx, proto, TenuredObject));
+ return !!res;
+ }
+
+ if (JitSupportsSimd() && GetTemplateObjectForSimd(cx, target, res))
+ return !!res;
+
+ return true;
+}
+
+static bool
+GetTemplateObjectForClassHook(JSContext* cx, JSNative hook, CallArgs& args,
+ MutableHandleObject templateObject)
+{
+ if (hook == TypedObject::construct) {
+ Rooted<TypeDescr*> descr(cx, &args.callee().as<TypeDescr>());
+ templateObject.set(TypedObject::createZeroed(cx, descr, 1, gc::TenuredHeap));
+ return !!templateObject;
+ }
+
+ if (hook == SimdTypeDescr::call && JitSupportsSimd()) {
+ Rooted<SimdTypeDescr*> descr(cx, &args.callee().as<SimdTypeDescr>());
+ templateObject.set(cx->compartment()->jitCompartment()->getSimdTemplateObjectFor(cx, descr));
+ return !!templateObject;
+ }
+
+ return true;
+}
+
+static bool
+IsOptimizableCallStringSplit(const Value& callee, int argc, Value* args)
+{
+ if (argc != 2 || !args[0].isString() || !args[1].isString())
+ return false;
+
+ if (!args[0].toString()->isAtom() || !args[1].toString()->isAtom())
+ return false;
+
+ if (!callee.isObject() || !callee.toObject().is<JSFunction>())
+ return false;
+
+ JSFunction& calleeFun = callee.toObject().as<JSFunction>();
+ if (!calleeFun.isNative() || calleeFun.native() != js::intrinsic_StringSplitString)
+ return false;
+
+ return true;
+}
+
+static bool
+TryAttachCallStub(JSContext* cx, ICCall_Fallback* stub, HandleScript script, jsbytecode* pc,
+ JSOp op, uint32_t argc, Value* vp, bool constructing, bool isSpread,
+ bool createSingleton, bool* handled)
+{
+ bool isSuper = op == JSOP_SUPERCALL || op == JSOP_SPREADSUPERCALL;
+
+ if (createSingleton || op == JSOP_EVAL || op == JSOP_STRICTEVAL)
+ return true;
+
+ if (stub->numOptimizedStubs() >= ICCall_Fallback::MAX_OPTIMIZED_STUBS) {
+ // TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
+ // But for now we just bail.
+ return true;
+ }
+
+ RootedValue callee(cx, vp[0]);
+ RootedValue thisv(cx, vp[1]);
+
+ // Don't attach an optimized call stub if we could potentially attach an
+ // optimized StringSplit stub.
+ if (stub->numOptimizedStubs() == 0 && IsOptimizableCallStringSplit(callee, argc, vp + 2))
+ return true;
+
+ MOZ_ASSERT_IF(stub->hasStub(ICStub::Call_StringSplit), stub->numOptimizedStubs() == 1);
+
+ stub->unlinkStubsWithKind(cx, ICStub::Call_StringSplit);
+
+ if (!callee.isObject())
+ return true;
+
+ RootedObject obj(cx, &callee.toObject());
+ if (!obj->is<JSFunction>()) {
+ // Try to attach a stub for a call/construct hook on the object.
+ // Ignore proxies, which are special cased by callHook/constructHook.
+ if (obj->is<ProxyObject>())
+ return true;
+ if (JSNative hook = constructing ? obj->constructHook() : obj->callHook()) {
+ if (op != JSOP_FUNAPPLY && !isSpread && !createSingleton) {
+ RootedObject templateObject(cx);
+ CallArgs args = CallArgsFromVp(argc, vp);
+ if (!GetTemplateObjectForClassHook(cx, hook, args, &templateObject))
+ return false;
+
+ JitSpew(JitSpew_BaselineIC, " Generating Call_ClassHook stub");
+ ICCall_ClassHook::Compiler compiler(cx, stub->fallbackMonitorStub()->firstMonitorStub(),
+ obj->getClass(), hook, templateObject,
+ script->pcToOffset(pc), constructing);
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+
+ stub->addNewStub(newStub);
+ *handled = true;
+ return true;
+ }
+ }
+ return true;
+ }
+
+ RootedFunction fun(cx, &obj->as<JSFunction>());
+
+ if (fun->hasScript()) {
+ // Never attach optimized scripted call stubs for JSOP_FUNAPPLY.
+ // MagicArguments may escape the frame through them.
+ if (op == JSOP_FUNAPPLY)
+ return true;
+
+ // If callee is not an interpreted constructor, we have to throw.
+ if (constructing && !fun->isConstructor())
+ return true;
+
+ // Likewise, if the callee is a class constructor, we have to throw.
+ if (!constructing && fun->isClassConstructor())
+ return true;
+
+ if (!fun->hasJITCode()) {
+ // Don't treat this as an unoptimizable case, as we'll add a stub
+ // when the callee becomes hot.
+ *handled = true;
+ return true;
+ }
+
+ // Check if this stub chain has already generalized scripted calls.
+ if (stub->scriptedStubsAreGeneralized()) {
+ JitSpew(JitSpew_BaselineIC, " Chain already has generalized scripted call stub!");
+ return true;
+ }
+
+ if (stub->scriptedStubCount() >= ICCall_Fallback::MAX_SCRIPTED_STUBS) {
+ // Create a Call_AnyScripted stub.
+ JitSpew(JitSpew_BaselineIC, " Generating Call_AnyScripted stub (cons=%s, spread=%s)",
+ constructing ? "yes" : "no", isSpread ? "yes" : "no");
+ ICCallScriptedCompiler compiler(cx, stub->fallbackMonitorStub()->firstMonitorStub(),
+ constructing, isSpread, script->pcToOffset(pc));
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+
+ // Before adding new stub, unlink all previous Call_Scripted.
+ stub->unlinkStubsWithKind(cx, ICStub::Call_Scripted);
+
+ // Add new generalized stub.
+ stub->addNewStub(newStub);
+ *handled = true;
+ return true;
+ }
+
+ // Keep track of the function's |prototype| property in type
+ // information, for use during Ion compilation.
+ if (IsIonEnabled(cx))
+ EnsureTrackPropertyTypes(cx, fun, NameToId(cx->names().prototype));
+
+ // Remember the template object associated with any script being called
+ // as a constructor, for later use during Ion compilation. This is unsound
+ // for super(), as a single callsite can have multiple possible prototype object
+ // created (via different newTargets)
+ RootedObject templateObject(cx);
+ if (constructing && !isSuper) {
+ // If we are calling a constructor for which the new script
+ // properties analysis has not been performed yet, don't attach a
+ // stub. After the analysis is performed, CreateThisForFunction may
+ // start returning objects with a different type, and the Ion
+ // compiler will get confused.
+
+ // Only attach a stub if the function already has a prototype and
+ // we can look it up without causing side effects.
+ RootedObject newTarget(cx, &vp[2 + argc].toObject());
+ RootedValue protov(cx);
+ if (!GetPropertyPure(cx, newTarget, NameToId(cx->names().prototype), protov.address())) {
+ JitSpew(JitSpew_BaselineIC, " Can't purely lookup function prototype");
+ return true;
+ }
+
+ if (protov.isObject()) {
+ TaggedProto proto(&protov.toObject());
+ ObjectGroup* group = ObjectGroup::defaultNewGroup(cx, nullptr, proto, newTarget);
+ if (!group)
+ return false;
+
+ if (group->newScript() && !group->newScript()->analyzed()) {
+ JitSpew(JitSpew_BaselineIC, " Function newScript has not been analyzed");
+
+ // This is temporary until the analysis is perfomed, so
+ // don't treat this as unoptimizable.
+ *handled = true;
+ return true;
+ }
+ }
+
+ JSObject* thisObject = CreateThisForFunction(cx, fun, newTarget, TenuredObject);
+ if (!thisObject)
+ return false;
+
+ if (thisObject->is<PlainObject>() || thisObject->is<UnboxedPlainObject>())
+ templateObject = thisObject;
+ }
+
+ JitSpew(JitSpew_BaselineIC,
+ " Generating Call_Scripted stub (fun=%p, %s:%" PRIuSIZE ", cons=%s, spread=%s)",
+ fun.get(), fun->nonLazyScript()->filename(), fun->nonLazyScript()->lineno(),
+ constructing ? "yes" : "no", isSpread ? "yes" : "no");
+ ICCallScriptedCompiler compiler(cx, stub->fallbackMonitorStub()->firstMonitorStub(),
+ fun, templateObject,
+ constructing, isSpread, script->pcToOffset(pc));
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+
+ stub->addNewStub(newStub);
+ *handled = true;
+ return true;
+ }
+
+ if (fun->isNative() && (!constructing || (constructing && fun->isConstructor()))) {
+ // Generalized native call stubs are not here yet!
+ MOZ_ASSERT(!stub->nativeStubsAreGeneralized());
+
+ // Check for JSOP_FUNAPPLY
+ if (op == JSOP_FUNAPPLY) {
+ if (fun->native() == fun_apply)
+ return TryAttachFunApplyStub(cx, stub, script, pc, thisv, argc, vp + 2, handled);
+
+ // Don't try to attach a "regular" optimized call stubs for FUNAPPLY ops,
+ // since MagicArguments may escape through them.
+ return true;
+ }
+
+ if (op == JSOP_FUNCALL && fun->native() == fun_call) {
+ if (!TryAttachFunCallStub(cx, stub, script, pc, thisv, handled))
+ return false;
+ if (*handled)
+ return true;
+ }
+
+ if (stub->nativeStubCount() >= ICCall_Fallback::MAX_NATIVE_STUBS) {
+ JitSpew(JitSpew_BaselineIC,
+ " Too many Call_Native stubs. TODO: add Call_AnyNative!");
+ return true;
+ }
+
+ if (fun->native() == intrinsic_IsSuspendedStarGenerator) {
+ // This intrinsic only appears in self-hosted code.
+ MOZ_ASSERT(op != JSOP_NEW);
+ MOZ_ASSERT(argc == 1);
+ JitSpew(JitSpew_BaselineIC, " Generating Call_IsSuspendedStarGenerator stub");
+
+ ICCall_IsSuspendedStarGenerator::Compiler compiler(cx);
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+
+ stub->addNewStub(newStub);
+ *handled = true;
+ return true;
+ }
+
+ RootedObject templateObject(cx);
+ if (MOZ_LIKELY(!isSpread && !isSuper)) {
+ bool skipAttach = false;
+ CallArgs args = CallArgsFromVp(argc, vp);
+ if (!GetTemplateObjectForNative(cx, fun, args, &templateObject, &skipAttach))
+ return false;
+ if (skipAttach) {
+ *handled = true;
+ return true;
+ }
+ MOZ_ASSERT_IF(templateObject, !templateObject->group()->maybePreliminaryObjects());
+ }
+
+ JitSpew(JitSpew_BaselineIC, " Generating Call_Native stub (fun=%p, cons=%s, spread=%s)",
+ fun.get(), constructing ? "yes" : "no", isSpread ? "yes" : "no");
+ ICCall_Native::Compiler compiler(cx, stub->fallbackMonitorStub()->firstMonitorStub(),
+ fun, templateObject, constructing, isSpread,
+ script->pcToOffset(pc));
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+
+ stub->addNewStub(newStub);
+ *handled = true;
+ return true;
+ }
+
+ return true;
+}
+
+static bool
+CopyArray(JSContext* cx, HandleObject obj, MutableHandleValue result)
+{
+ uint32_t length = GetAnyBoxedOrUnboxedArrayLength(obj);
+ JSObject* nobj = NewFullyAllocatedArrayTryReuseGroup(cx, obj, length, TenuredObject);
+ if (!nobj)
+ return false;
+ EnsureArrayGroupAnalyzed(cx, nobj);
+ CopyAnyBoxedOrUnboxedDenseElements(cx, nobj, obj, 0, 0, length);
+
+ result.setObject(*nobj);
+ return true;
+}
+
+static bool
+TryAttachStringSplit(JSContext* cx, ICCall_Fallback* stub, HandleScript script,
+ uint32_t argc, HandleValue callee, Value* vp, jsbytecode* pc,
+ HandleValue res, bool* attached)
+{
+ if (stub->numOptimizedStubs() != 0)
+ return true;
+
+ Value* args = vp + 2;
+
+ // String.prototype.split will not yield a constructable.
+ if (JSOp(*pc) == JSOP_NEW)
+ return true;
+
+ if (!IsOptimizableCallStringSplit(callee, argc, args))
+ return true;
+
+ MOZ_ASSERT(callee.isObject());
+ MOZ_ASSERT(callee.toObject().is<JSFunction>());
+
+ RootedString str(cx, args[0].toString());
+ RootedString sep(cx, args[1].toString());
+ RootedObject obj(cx, &res.toObject());
+ RootedValue arr(cx);
+
+ // Copy the array before storing in stub.
+ if (!CopyArray(cx, obj, &arr))
+ return false;
+
+ // Atomize all elements of the array.
+ RootedObject arrObj(cx, &arr.toObject());
+ uint32_t initLength = GetAnyBoxedOrUnboxedArrayLength(arrObj);
+ for (uint32_t i = 0; i < initLength; i++) {
+ JSAtom* str = js::AtomizeString(cx, GetAnyBoxedOrUnboxedDenseElement(arrObj, i).toString());
+ if (!str)
+ return false;
+
+ if (!SetAnyBoxedOrUnboxedDenseElement(cx, arrObj, i, StringValue(str))) {
+ // The value could not be stored to an unboxed dense element.
+ return true;
+ }
+ }
+
+ ICCall_StringSplit::Compiler compiler(cx, stub->fallbackMonitorStub()->firstMonitorStub(),
+ script->pcToOffset(pc), str, sep,
+ arr);
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!newStub)
+ return false;
+
+ stub->addNewStub(newStub);
+ *attached = true;
+ return true;
+}
+
+static bool
+DoCallFallback(JSContext* cx, BaselineFrame* frame, ICCall_Fallback* stub_, uint32_t argc,
+ Value* vp, MutableHandleValue res)
+{
+ SharedStubInfo info(cx, frame, stub_->icEntry());
+
+ // This fallback stub may trigger debug mode toggling.
+ DebugModeOSRVolatileStub<ICCall_Fallback*> stub(frame, stub_);
+
+ RootedScript script(cx, frame->script());
+ jsbytecode* pc = stub->icEntry()->pc(script);
+ JSOp op = JSOp(*pc);
+ FallbackICSpew(cx, stub, "Call(%s)", CodeName[op]);
+
+ MOZ_ASSERT(argc == GET_ARGC(pc));
+ bool constructing = (op == JSOP_NEW);
+
+ // Ensure vp array is rooted - we may GC in here.
+ size_t numValues = argc + 2 + constructing;
+ AutoArrayRooter vpRoot(cx, numValues, vp);
+
+ CallArgs callArgs = CallArgsFromSp(argc + constructing, vp + numValues, constructing);
+ RootedValue callee(cx, vp[0]);
+
+ // Handle funapply with JSOP_ARGUMENTS
+ if (op == JSOP_FUNAPPLY && argc == 2 && callArgs[1].isMagic(JS_OPTIMIZED_ARGUMENTS)) {
+ if (!GuardFunApplyArgumentsOptimization(cx, frame, callArgs))
+ return false;
+ }
+
+ bool createSingleton = ObjectGroup::useSingletonForNewObject(cx, script, pc);
+
+ // Try attaching a call stub.
+ bool handled = false;
+ if (!TryAttachCallStub(cx, stub, script, pc, op, argc, vp, constructing, false,
+ createSingleton, &handled))
+ {
+ return false;
+ }
+
+ if (op == JSOP_NEW) {
+ if (!ConstructFromStack(cx, callArgs))
+ return false;
+ res.set(callArgs.rval());
+ } else if ((op == JSOP_EVAL || op == JSOP_STRICTEVAL) &&
+ frame->environmentChain()->global().valueIsEval(callee))
+ {
+ if (!DirectEval(cx, callArgs.get(0), res))
+ return false;
+ } else {
+ MOZ_ASSERT(op == JSOP_CALL ||
+ op == JSOP_CALLITER ||
+ op == JSOP_FUNCALL ||
+ op == JSOP_FUNAPPLY ||
+ op == JSOP_EVAL ||
+ op == JSOP_STRICTEVAL);
+ if (op == JSOP_CALLITER && callee.isPrimitive()) {
+ MOZ_ASSERT(argc == 0, "thisv must be on top of the stack");
+ ReportValueError(cx, JSMSG_NOT_ITERABLE, -1, callArgs.thisv(), nullptr);
+ return false;
+ }
+
+ if (!CallFromStack(cx, callArgs))
+ return false;
+
+ res.set(callArgs.rval());
+ }
+
+ TypeScript::Monitor(cx, script, pc, res);
+
+ // Check if debug mode toggling made the stub invalid.
+ if (stub.invalid())
+ return true;
+
+ // Attach a new TypeMonitor stub for this value.
+ ICTypeMonitor_Fallback* typeMonFbStub = stub->fallbackMonitorStub();
+ if (!typeMonFbStub->addMonitorStubForValue(cx, &info, res))
+ {
+ return false;
+ }
+
+ // Add a type monitor stub for the resulting value.
+ if (!stub->addMonitorStubForValue(cx, &info, res))
+ return false;
+
+ // If 'callee' is a potential Call_StringSplit, try to attach an
+ // optimized StringSplit stub. Note that vp[0] now holds the return value
+ // instead of the callee, so we pass the callee as well.
+ if (!TryAttachStringSplit(cx, stub, script, argc, callee, vp, pc, res, &handled))
+ return false;
+
+ if (!handled)
+ stub->noteUnoptimizableCall();
+ return true;
+}
+
+static bool
+DoSpreadCallFallback(JSContext* cx, BaselineFrame* frame, ICCall_Fallback* stub_, Value* vp,
+ MutableHandleValue res)
+{
+ SharedStubInfo info(cx, frame, stub_->icEntry());
+
+ // This fallback stub may trigger debug mode toggling.
+ DebugModeOSRVolatileStub<ICCall_Fallback*> stub(frame, stub_);
+
+ RootedScript script(cx, frame->script());
+ jsbytecode* pc = stub->icEntry()->pc(script);
+ JSOp op = JSOp(*pc);
+ bool constructing = (op == JSOP_SPREADNEW);
+ FallbackICSpew(cx, stub, "SpreadCall(%s)", CodeName[op]);
+
+ // Ensure vp array is rooted - we may GC in here.
+ AutoArrayRooter vpRoot(cx, 3 + constructing, vp);
+
+ RootedValue callee(cx, vp[0]);
+ RootedValue thisv(cx, vp[1]);
+ RootedValue arr(cx, vp[2]);
+ RootedValue newTarget(cx, constructing ? vp[3] : NullValue());
+
+ // Try attaching a call stub.
+ bool handled = false;
+ if (op != JSOP_SPREADEVAL && op != JSOP_STRICTSPREADEVAL &&
+ !TryAttachCallStub(cx, stub, script, pc, op, 1, vp, constructing, true, false,
+ &handled))
+ {
+ return false;
+ }
+
+ if (!SpreadCallOperation(cx, script, pc, thisv, callee, arr, newTarget, res))
+ return false;
+
+ // Check if debug mode toggling made the stub invalid.
+ if (stub.invalid())
+ return true;
+
+ // Attach a new TypeMonitor stub for this value.
+ ICTypeMonitor_Fallback* typeMonFbStub = stub->fallbackMonitorStub();
+ if (!typeMonFbStub->addMonitorStubForValue(cx, &info, res))
+ {
+ return false;
+ }
+
+ // Add a type monitor stub for the resulting value.
+ if (!stub->addMonitorStubForValue(cx, &info, res))
+ return false;
+
+ if (!handled)
+ stub->noteUnoptimizableCall();
+ return true;
+}
+
+void
+ICCallStubCompiler::pushCallArguments(MacroAssembler& masm, AllocatableGeneralRegisterSet regs,
+ Register argcReg, bool isJitCall, bool isConstructing)
+{
+ MOZ_ASSERT(!regs.has(argcReg));
+
+ // Account for new.target
+ Register count = regs.takeAny();
+
+ masm.move32(argcReg, count);
+
+ // If we are setting up for a jitcall, we have to align the stack taking
+ // into account the args and newTarget. We could also count callee and |this|,
+ // but it's a waste of stack space. Because we want to keep argcReg unchanged,
+ // just account for newTarget initially, and add the other 2 after assuring
+ // allignment.
+ if (isJitCall) {
+ if (isConstructing)
+ masm.add32(Imm32(1), count);
+ } else {
+ masm.add32(Imm32(2 + isConstructing), count);
+ }
+
+ // argPtr initially points to the last argument.
+ Register argPtr = regs.takeAny();
+ masm.moveStackPtrTo(argPtr);
+
+ // Skip 4 pointers pushed on top of the arguments: the frame descriptor,
+ // return address, old frame pointer and stub reg.
+ masm.addPtr(Imm32(STUB_FRAME_SIZE), argPtr);
+
+ // Align the stack such that the JitFrameLayout is aligned on the
+ // JitStackAlignment.
+ if (isJitCall) {
+ masm.alignJitStackBasedOnNArgs(count);
+
+ // Account for callee and |this|, skipped earlier
+ masm.add32(Imm32(2), count);
+ }
+
+ // Push all values, starting at the last one.
+ Label loop, done;
+ masm.bind(&loop);
+ masm.branchTest32(Assembler::Zero, count, count, &done);
+ {
+ masm.pushValue(Address(argPtr, 0));
+ masm.addPtr(Imm32(sizeof(Value)), argPtr);
+
+ masm.sub32(Imm32(1), count);
+ masm.jump(&loop);
+ }
+ masm.bind(&done);
+}
+
+void
+ICCallStubCompiler::guardSpreadCall(MacroAssembler& masm, Register argcReg, Label* failure,
+ bool isConstructing)
+{
+ masm.unboxObject(Address(masm.getStackPointer(),
+ isConstructing * sizeof(Value) + ICStackValueOffset), argcReg);
+ masm.loadPtr(Address(argcReg, NativeObject::offsetOfElements()), argcReg);
+ masm.load32(Address(argcReg, ObjectElements::offsetOfLength()), argcReg);
+
+ // Limit actual argc to something reasonable (huge number of arguments can
+ // blow the stack limit).
+ static_assert(ICCall_Scripted::MAX_ARGS_SPREAD_LENGTH <= ARGS_LENGTH_MAX,
+ "maximum arguments length for optimized stub should be <= ARGS_LENGTH_MAX");
+ masm.branch32(Assembler::Above, argcReg, Imm32(ICCall_Scripted::MAX_ARGS_SPREAD_LENGTH),
+ failure);
+}
+
+void
+ICCallStubCompiler::pushSpreadCallArguments(MacroAssembler& masm,
+ AllocatableGeneralRegisterSet regs,
+ Register argcReg, bool isJitCall,
+ bool isConstructing)
+{
+ // Pull the array off the stack before aligning.
+ Register startReg = regs.takeAny();
+ masm.unboxObject(Address(masm.getStackPointer(),
+ (isConstructing * sizeof(Value)) + STUB_FRAME_SIZE), startReg);
+ masm.loadPtr(Address(startReg, NativeObject::offsetOfElements()), startReg);
+
+ // Align the stack such that the JitFrameLayout is aligned on the
+ // JitStackAlignment.
+ if (isJitCall) {
+ Register alignReg = argcReg;
+ if (isConstructing) {
+ alignReg = regs.takeAny();
+ masm.movePtr(argcReg, alignReg);
+ masm.addPtr(Imm32(1), alignReg);
+ }
+ masm.alignJitStackBasedOnNArgs(alignReg);
+ if (isConstructing) {
+ MOZ_ASSERT(alignReg != argcReg);
+ regs.add(alignReg);
+ }
+ }
+
+ // Push newTarget, if necessary
+ if (isConstructing)
+ masm.pushValue(Address(BaselineFrameReg, STUB_FRAME_SIZE));
+
+ // Push arguments: set up endReg to point to &array[argc]
+ Register endReg = regs.takeAny();
+ masm.movePtr(argcReg, endReg);
+ static_assert(sizeof(Value) == 8, "Value must be 8 bytes");
+ masm.lshiftPtr(Imm32(3), endReg);
+ masm.addPtr(startReg, endReg);
+
+ // Copying pre-decrements endReg by 8 until startReg is reached
+ Label copyDone;
+ Label copyStart;
+ masm.bind(&copyStart);
+ masm.branchPtr(Assembler::Equal, endReg, startReg, &copyDone);
+ masm.subPtr(Imm32(sizeof(Value)), endReg);
+ masm.pushValue(Address(endReg, 0));
+ masm.jump(&copyStart);
+ masm.bind(&copyDone);
+
+ regs.add(startReg);
+ regs.add(endReg);
+
+ // Push the callee and |this|.
+ masm.pushValue(Address(BaselineFrameReg, STUB_FRAME_SIZE + (1 + isConstructing) * sizeof(Value)));
+ masm.pushValue(Address(BaselineFrameReg, STUB_FRAME_SIZE + (2 + isConstructing) * sizeof(Value)));
+}
+
+Register
+ICCallStubCompiler::guardFunApply(MacroAssembler& masm, AllocatableGeneralRegisterSet regs,
+ Register argcReg, bool checkNative, FunApplyThing applyThing,
+ Label* failure)
+{
+ // Ensure argc == 2
+ masm.branch32(Assembler::NotEqual, argcReg, Imm32(2), failure);
+
+ // Stack looks like:
+ // [..., CalleeV, ThisV, Arg0V, Arg1V <MaybeReturnReg>]
+
+ Address secondArgSlot(masm.getStackPointer(), ICStackValueOffset);
+ if (applyThing == FunApply_MagicArgs) {
+ // Ensure that the second arg is magic arguments.
+ masm.branchTestMagic(Assembler::NotEqual, secondArgSlot, failure);
+
+ // Ensure that this frame doesn't have an arguments object.
+ masm.branchTest32(Assembler::NonZero,
+ Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFlags()),
+ Imm32(BaselineFrame::HAS_ARGS_OBJ),
+ failure);
+
+ // Limit the length to something reasonable.
+ masm.branch32(Assembler::Above,
+ Address(BaselineFrameReg, BaselineFrame::offsetOfNumActualArgs()),
+ Imm32(ICCall_ScriptedApplyArray::MAX_ARGS_ARRAY_LENGTH),
+ failure);
+ } else {
+ MOZ_ASSERT(applyThing == FunApply_Array);
+
+ AllocatableGeneralRegisterSet regsx = regs;
+
+ // Ensure that the second arg is an array.
+ ValueOperand secondArgVal = regsx.takeAnyValue();
+ masm.loadValue(secondArgSlot, secondArgVal);
+
+ masm.branchTestObject(Assembler::NotEqual, secondArgVal, failure);
+ Register secondArgObj = masm.extractObject(secondArgVal, ExtractTemp1);
+
+ regsx.add(secondArgVal);
+ regsx.takeUnchecked(secondArgObj);
+
+ masm.branchTestObjClass(Assembler::NotEqual, secondArgObj, regsx.getAny(),
+ &ArrayObject::class_, failure);
+
+ // Get the array elements and ensure that initializedLength == length
+ masm.loadPtr(Address(secondArgObj, NativeObject::offsetOfElements()), secondArgObj);
+
+ Register lenReg = regsx.takeAny();
+ masm.load32(Address(secondArgObj, ObjectElements::offsetOfLength()), lenReg);
+
+ masm.branch32(Assembler::NotEqual,
+ Address(secondArgObj, ObjectElements::offsetOfInitializedLength()),
+ lenReg, failure);
+
+ // Limit the length to something reasonable (huge number of arguments can
+ // blow the stack limit).
+ masm.branch32(Assembler::Above, lenReg,
+ Imm32(ICCall_ScriptedApplyArray::MAX_ARGS_ARRAY_LENGTH),
+ failure);
+
+ // Ensure no holes. Loop through values in array and make sure none are magic.
+ // Start address is secondArgObj, end address is secondArgObj + (lenReg * sizeof(Value))
+ JS_STATIC_ASSERT(sizeof(Value) == 8);
+ masm.lshiftPtr(Imm32(3), lenReg);
+ masm.addPtr(secondArgObj, lenReg);
+
+ Register start = secondArgObj;
+ Register end = lenReg;
+ Label loop;
+ Label endLoop;
+ masm.bind(&loop);
+ masm.branchPtr(Assembler::AboveOrEqual, start, end, &endLoop);
+ masm.branchTestMagic(Assembler::Equal, Address(start, 0), failure);
+ masm.addPtr(Imm32(sizeof(Value)), start);
+ masm.jump(&loop);
+ masm.bind(&endLoop);
+ }
+
+ // Stack now confirmed to be like:
+ // [..., CalleeV, ThisV, Arg0V, MagicValue(Arguments), <MaybeReturnAddr>]
+
+ // Load the callee, ensure that it's fun_apply
+ ValueOperand val = regs.takeAnyValue();
+ Address calleeSlot(masm.getStackPointer(), ICStackValueOffset + (3 * sizeof(Value)));
+ masm.loadValue(calleeSlot, val);
+
+ masm.branchTestObject(Assembler::NotEqual, val, failure);
+ Register callee = masm.extractObject(val, ExtractTemp1);
+
+ masm.branchTestObjClass(Assembler::NotEqual, callee, regs.getAny(), &JSFunction::class_,
+ failure);
+ masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), callee);
+
+ masm.branchPtr(Assembler::NotEqual, callee, ImmPtr(fun_apply), failure);
+
+ // Load the |thisv|, ensure that it's a scripted function with a valid baseline or ion
+ // script, or a native function.
+ Address thisSlot(masm.getStackPointer(), ICStackValueOffset + (2 * sizeof(Value)));
+ masm.loadValue(thisSlot, val);
+
+ masm.branchTestObject(Assembler::NotEqual, val, failure);
+ Register target = masm.extractObject(val, ExtractTemp1);
+ regs.add(val);
+ regs.takeUnchecked(target);
+
+ masm.branchTestObjClass(Assembler::NotEqual, target, regs.getAny(), &JSFunction::class_,
+ failure);
+
+ if (checkNative) {
+ masm.branchIfInterpreted(target, failure);
+ } else {
+ masm.branchIfFunctionHasNoScript(target, failure);
+ Register temp = regs.takeAny();
+ masm.loadPtr(Address(target, JSFunction::offsetOfNativeOrScript()), temp);
+ masm.loadBaselineOrIonRaw(temp, temp, failure);
+ regs.add(temp);
+ }
+ return target;
+}
+
+void
+ICCallStubCompiler::pushCallerArguments(MacroAssembler& masm, AllocatableGeneralRegisterSet regs)
+{
+ // Initialize copyReg to point to start caller arguments vector.
+ // Initialize argcReg to poitn to the end of it.
+ Register startReg = regs.takeAny();
+ Register endReg = regs.takeAny();
+ masm.loadPtr(Address(BaselineFrameReg, 0), startReg);
+ masm.loadPtr(Address(startReg, BaselineFrame::offsetOfNumActualArgs()), endReg);
+ masm.addPtr(Imm32(BaselineFrame::offsetOfArg(0)), startReg);
+ masm.alignJitStackBasedOnNArgs(endReg);
+ masm.lshiftPtr(Imm32(ValueShift), endReg);
+ masm.addPtr(startReg, endReg);
+
+ // Copying pre-decrements endReg by 8 until startReg is reached
+ Label copyDone;
+ Label copyStart;
+ masm.bind(&copyStart);
+ masm.branchPtr(Assembler::Equal, endReg, startReg, &copyDone);
+ masm.subPtr(Imm32(sizeof(Value)), endReg);
+ masm.pushValue(Address(endReg, 0));
+ masm.jump(&copyStart);
+ masm.bind(&copyDone);
+}
+
+void
+ICCallStubCompiler::pushArrayArguments(MacroAssembler& masm, Address arrayVal,
+ AllocatableGeneralRegisterSet regs)
+{
+ // Load start and end address of values to copy.
+ // guardFunApply has already gauranteed that the array is packed and contains
+ // no holes.
+ Register startReg = regs.takeAny();
+ Register endReg = regs.takeAny();
+ masm.extractObject(arrayVal, startReg);
+ masm.loadPtr(Address(startReg, NativeObject::offsetOfElements()), startReg);
+ masm.load32(Address(startReg, ObjectElements::offsetOfInitializedLength()), endReg);
+ masm.alignJitStackBasedOnNArgs(endReg);
+ masm.lshiftPtr(Imm32(ValueShift), endReg);
+ masm.addPtr(startReg, endReg);
+
+ // Copying pre-decrements endReg by 8 until startReg is reached
+ Label copyDone;
+ Label copyStart;
+ masm.bind(&copyStart);
+ masm.branchPtr(Assembler::Equal, endReg, startReg, &copyDone);
+ masm.subPtr(Imm32(sizeof(Value)), endReg);
+ masm.pushValue(Address(endReg, 0));
+ masm.jump(&copyStart);
+ masm.bind(&copyDone);
+}
+
+typedef bool (*DoCallFallbackFn)(JSContext*, BaselineFrame*, ICCall_Fallback*,
+ uint32_t, Value*, MutableHandleValue);
+static const VMFunction DoCallFallbackInfo =
+ FunctionInfo<DoCallFallbackFn>(DoCallFallback, "DoCallFallback");
+
+typedef bool (*DoSpreadCallFallbackFn)(JSContext*, BaselineFrame*, ICCall_Fallback*,
+ Value*, MutableHandleValue);
+static const VMFunction DoSpreadCallFallbackInfo =
+ FunctionInfo<DoSpreadCallFallbackFn>(DoSpreadCallFallback, "DoSpreadCallFallback");
+
+bool
+ICCall_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ MOZ_ASSERT(R0 == JSReturnOperand);
+
+ // Values are on the stack left-to-right. Calling convention wants them
+ // right-to-left so duplicate them on the stack in reverse order.
+ // |this| and callee are pushed last.
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(0));
+
+ if (MOZ_UNLIKELY(isSpread_)) {
+ // Push a stub frame so that we can perform a non-tail call.
+ enterStubFrame(masm, R1.scratchReg());
+
+ // Use BaselineFrameReg instead of BaselineStackReg, because
+ // BaselineFrameReg and BaselineStackReg hold the same value just after
+ // calling enterStubFrame.
+
+ // newTarget
+ if (isConstructing_)
+ masm.pushValue(Address(BaselineFrameReg, STUB_FRAME_SIZE));
+
+ // array
+ uint32_t valueOffset = isConstructing_;
+ masm.pushValue(Address(BaselineFrameReg, valueOffset++ * sizeof(Value) + STUB_FRAME_SIZE));
+
+ // this
+ masm.pushValue(Address(BaselineFrameReg, valueOffset++ * sizeof(Value) + STUB_FRAME_SIZE));
+
+ // callee
+ masm.pushValue(Address(BaselineFrameReg, valueOffset++ * sizeof(Value) + STUB_FRAME_SIZE));
+
+ masm.push(masm.getStackPointer());
+ masm.push(ICStubReg);
+
+ PushStubPayload(masm, R0.scratchReg());
+
+ if (!callVM(DoSpreadCallFallbackInfo, masm))
+ return false;
+
+ leaveStubFrame(masm);
+ EmitReturnFromIC(masm);
+
+ // SPREADCALL is not yet supported in Ion, so do not generate asmcode for
+ // bailout.
+ return true;
+ }
+
+ // Push a stub frame so that we can perform a non-tail call.
+ enterStubFrame(masm, R1.scratchReg());
+
+ regs.take(R0.scratchReg()); // argc.
+
+ pushCallArguments(masm, regs, R0.scratchReg(), /* isJitCall = */ false, isConstructing_);
+
+ masm.push(masm.getStackPointer());
+ masm.push(R0.scratchReg());
+ masm.push(ICStubReg);
+
+ PushStubPayload(masm, R0.scratchReg());
+
+ if (!callVM(DoCallFallbackInfo, masm))
+ return false;
+
+ uint32_t framePushed = masm.framePushed();
+ leaveStubFrame(masm);
+ EmitReturnFromIC(masm);
+
+ // The following asmcode is only used when an Ion inlined frame bails out
+ // into into baseline jitcode. The return address pushed onto the
+ // reconstructed baseline stack points here.
+ returnOffset_ = masm.currentOffset();
+
+ // Here we are again in a stub frame. Marking as so.
+ inStubFrame_ = true;
+ masm.setFramePushed(framePushed);
+
+ // Load passed-in ThisV into R1 just in case it's needed. Need to do this before
+ // we leave the stub frame since that info will be lost.
+ // Current stack: [...., ThisV, ActualArgc, CalleeToken, Descriptor ]
+ masm.loadValue(Address(masm.getStackPointer(), 3 * sizeof(size_t)), R1);
+
+ leaveStubFrame(masm, true);
+
+ // If this is a |constructing| call, if the callee returns a non-object, we replace it with
+ // the |this| object passed in.
+ if (isConstructing_) {
+ MOZ_ASSERT(JSReturnOperand == R0);
+ Label skipThisReplace;
+
+ masm.branchTestObject(Assembler::Equal, JSReturnOperand, &skipThisReplace);
+ masm.moveValue(R1, R0);
+#ifdef DEBUG
+ masm.branchTestObject(Assembler::Equal, JSReturnOperand, &skipThisReplace);
+ masm.assumeUnreachable("Failed to return object in constructing call.");
+#endif
+ masm.bind(&skipThisReplace);
+ }
+
+ // At this point, ICStubReg points to the ICCall_Fallback stub, which is NOT
+ // a MonitoredStub, but rather a MonitoredFallbackStub. To use EmitEnterTypeMonitorIC,
+ // first load the ICTypeMonitor_Fallback stub into ICStubReg. Then, use
+ // EmitEnterTypeMonitorIC with a custom struct offset.
+ masm.loadPtr(Address(ICStubReg, ICMonitoredFallbackStub::offsetOfFallbackMonitorStub()),
+ ICStubReg);
+ EmitEnterTypeMonitorIC(masm, ICTypeMonitor_Fallback::offsetOfFirstMonitorStub());
+
+ return true;
+}
+
+void
+ICCall_Fallback::Compiler::postGenerateStubCode(MacroAssembler& masm, Handle<JitCode*> code)
+{
+ if (MOZ_UNLIKELY(isSpread_))
+ return;
+
+ cx->compartment()->jitCompartment()->initBaselineCallReturnAddr(code->raw() + returnOffset_,
+ isConstructing_);
+}
+
+typedef bool (*CreateThisFn)(JSContext* cx, HandleObject callee, HandleObject newTarget,
+ MutableHandleValue rval);
+static const VMFunction CreateThisInfoBaseline =
+ FunctionInfo<CreateThisFn>(CreateThis, "CreateThis");
+
+bool
+ICCallScriptedCompiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(0));
+ bool canUseTailCallReg = regs.has(ICTailCallReg);
+
+ Register argcReg = R0.scratchReg();
+ MOZ_ASSERT(argcReg != ArgumentsRectifierReg);
+
+ regs.take(argcReg);
+ regs.take(ArgumentsRectifierReg);
+ regs.takeUnchecked(ICTailCallReg);
+
+ if (isSpread_)
+ guardSpreadCall(masm, argcReg, &failure, isConstructing_);
+
+ // Load the callee in R1, accounting for newTarget, if necessary
+ // Stack Layout: [ ..., CalleeVal, ThisVal, Arg0Val, ..., ArgNVal, [newTarget] +ICStackValueOffset+ ]
+ if (isSpread_) {
+ unsigned skipToCallee = (2 + isConstructing_) * sizeof(Value);
+ masm.loadValue(Address(masm.getStackPointer(), skipToCallee + ICStackValueOffset), R1);
+ } else {
+ // Account for newTarget, if necessary
+ unsigned nonArgsSkip = (1 + isConstructing_) * sizeof(Value);
+ BaseValueIndex calleeSlot(masm.getStackPointer(), argcReg, ICStackValueOffset + nonArgsSkip);
+ masm.loadValue(calleeSlot, R1);
+ }
+ regs.take(R1);
+
+ // Ensure callee is an object.
+ masm.branchTestObject(Assembler::NotEqual, R1, &failure);
+
+ // Ensure callee is a function.
+ Register callee = masm.extractObject(R1, ExtractTemp0);
+
+ // If calling a specific script, check if the script matches. Otherwise, ensure that
+ // callee function is scripted. Leave calleeScript in |callee| reg.
+ if (callee_) {
+ MOZ_ASSERT(kind == ICStub::Call_Scripted);
+
+ // Check if the object matches this callee.
+ Address expectedCallee(ICStubReg, ICCall_Scripted::offsetOfCallee());
+ masm.branchPtr(Assembler::NotEqual, expectedCallee, callee, &failure);
+
+ // Guard against relazification.
+ masm.branchIfFunctionHasNoScript(callee, &failure);
+ } else {
+ // Ensure the object is a function.
+ masm.branchTestObjClass(Assembler::NotEqual, callee, regs.getAny(), &JSFunction::class_,
+ &failure);
+ if (isConstructing_) {
+ masm.branchIfNotInterpretedConstructor(callee, regs.getAny(), &failure);
+ } else {
+ masm.branchIfFunctionHasNoScript(callee, &failure);
+ masm.branchFunctionKind(Assembler::Equal, JSFunction::ClassConstructor, callee,
+ regs.getAny(), &failure);
+ }
+ }
+
+ // Load the JSScript.
+ masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), callee);
+
+ // Load the start of the target JitCode.
+ Register code;
+ if (!isConstructing_) {
+ code = regs.takeAny();
+ masm.loadBaselineOrIonRaw(callee, code, &failure);
+ } else {
+ Address scriptCode(callee, JSScript::offsetOfBaselineOrIonRaw());
+ masm.branchPtr(Assembler::Equal, scriptCode, ImmPtr(nullptr), &failure);
+ }
+
+ // We no longer need R1.
+ regs.add(R1);
+
+ // Push a stub frame so that we can perform a non-tail call.
+ enterStubFrame(masm, regs.getAny());
+ if (canUseTailCallReg)
+ regs.add(ICTailCallReg);
+
+ Label failureLeaveStubFrame;
+
+ if (isConstructing_) {
+ // Save argc before call.
+ masm.push(argcReg);
+
+ // Stack now looks like:
+ // [..., Callee, ThisV, Arg0V, ..., ArgNV, NewTarget, StubFrameHeader, ArgC ]
+ masm.loadValue(Address(masm.getStackPointer(), STUB_FRAME_SIZE + sizeof(size_t)), R1);
+ masm.push(masm.extractObject(R1, ExtractTemp0));
+
+ if (isSpread_) {
+ masm.loadValue(Address(masm.getStackPointer(),
+ 3 * sizeof(Value) + STUB_FRAME_SIZE + sizeof(size_t) +
+ sizeof(JSObject*)),
+ R1);
+ } else {
+ BaseValueIndex calleeSlot2(masm.getStackPointer(), argcReg,
+ 2 * sizeof(Value) + STUB_FRAME_SIZE + sizeof(size_t) +
+ sizeof(JSObject*));
+ masm.loadValue(calleeSlot2, R1);
+ }
+ masm.push(masm.extractObject(R1, ExtractTemp0));
+ if (!callVM(CreateThisInfoBaseline, masm))
+ return false;
+
+ // Return of CreateThis must be an object or uninitialized.
+#ifdef DEBUG
+ Label createdThisOK;
+ masm.branchTestObject(Assembler::Equal, JSReturnOperand, &createdThisOK);
+ masm.branchTestMagic(Assembler::Equal, JSReturnOperand, &createdThisOK);
+ masm.assumeUnreachable("The return of CreateThis must be an object or uninitialized.");
+ masm.bind(&createdThisOK);
+#endif
+
+ // Reset the register set from here on in.
+ MOZ_ASSERT(JSReturnOperand == R0);
+ regs = availableGeneralRegs(0);
+ regs.take(R0);
+ regs.take(ArgumentsRectifierReg);
+ argcReg = regs.takeAny();
+
+ // Restore saved argc so we can use it to calculate the address to save
+ // the resulting this object to.
+ masm.pop(argcReg);
+
+ // Save "this" value back into pushed arguments on stack. R0 can be clobbered after that.
+ // Stack now looks like:
+ // [..., Callee, ThisV, Arg0V, ..., ArgNV, [NewTarget], StubFrameHeader ]
+ if (isSpread_) {
+ masm.storeValue(R0, Address(masm.getStackPointer(),
+ (1 + isConstructing_) * sizeof(Value) + STUB_FRAME_SIZE));
+ } else {
+ BaseValueIndex thisSlot(masm.getStackPointer(), argcReg,
+ STUB_FRAME_SIZE + isConstructing_ * sizeof(Value));
+ masm.storeValue(R0, thisSlot);
+ }
+
+ // Restore the stub register from the baseline stub frame.
+ masm.loadPtr(Address(masm.getStackPointer(), STUB_FRAME_SAVED_STUB_OFFSET), ICStubReg);
+
+ // Reload callee script. Note that a GC triggered by CreateThis may
+ // have destroyed the callee BaselineScript and IonScript. CreateThis is
+ // safely repeatable though, so in this case we just leave the stub frame
+ // and jump to the next stub.
+
+ // Just need to load the script now.
+ if (isSpread_) {
+ unsigned skipForCallee = (2 + isConstructing_) * sizeof(Value);
+ masm.loadValue(Address(masm.getStackPointer(), skipForCallee + STUB_FRAME_SIZE), R0);
+ } else {
+ // Account for newTarget, if necessary
+ unsigned nonArgsSkip = (1 + isConstructing_) * sizeof(Value);
+ BaseValueIndex calleeSlot3(masm.getStackPointer(), argcReg, nonArgsSkip + STUB_FRAME_SIZE);
+ masm.loadValue(calleeSlot3, R0);
+ }
+ callee = masm.extractObject(R0, ExtractTemp0);
+ regs.add(R0);
+ regs.takeUnchecked(callee);
+ masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), callee);
+
+ code = regs.takeAny();
+ masm.loadBaselineOrIonRaw(callee, code, &failureLeaveStubFrame);
+
+ // Release callee register, but don't add ExtractTemp0 back into the pool
+ // ExtractTemp0 is used later, and if it's allocated to some other register at that
+ // point, it will get clobbered when used.
+ if (callee != ExtractTemp0)
+ regs.add(callee);
+
+ if (canUseTailCallReg)
+ regs.addUnchecked(ICTailCallReg);
+ }
+ Register scratch = regs.takeAny();
+
+ // Values are on the stack left-to-right. Calling convention wants them
+ // right-to-left so duplicate them on the stack in reverse order.
+ // |this| and callee are pushed last.
+ if (isSpread_)
+ pushSpreadCallArguments(masm, regs, argcReg, /* isJitCall = */ true, isConstructing_);
+ else
+ pushCallArguments(masm, regs, argcReg, /* isJitCall = */ true, isConstructing_);
+
+ // The callee is on top of the stack. Pop and unbox it.
+ ValueOperand val = regs.takeAnyValue();
+ masm.popValue(val);
+ callee = masm.extractObject(val, ExtractTemp0);
+
+ EmitBaselineCreateStubFrameDescriptor(masm, scratch, JitFrameLayout::Size());
+
+ // Note that we use Push, not push, so that callJit will align the stack
+ // properly on ARM.
+ masm.Push(argcReg);
+ masm.PushCalleeToken(callee, isConstructing_);
+ masm.Push(scratch);
+
+ // Handle arguments underflow.
+ Label noUnderflow;
+ masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), callee);
+ masm.branch32(Assembler::AboveOrEqual, argcReg, callee, &noUnderflow);
+ {
+ // Call the arguments rectifier.
+ MOZ_ASSERT(ArgumentsRectifierReg != code);
+ MOZ_ASSERT(ArgumentsRectifierReg != argcReg);
+
+ JitCode* argumentsRectifier =
+ cx->runtime()->jitRuntime()->getArgumentsRectifier();
+
+ masm.movePtr(ImmGCPtr(argumentsRectifier), code);
+ masm.loadPtr(Address(code, JitCode::offsetOfCode()), code);
+ masm.movePtr(argcReg, ArgumentsRectifierReg);
+ }
+
+ masm.bind(&noUnderflow);
+ masm.callJit(code);
+
+ // If this is a constructing call, and the callee returns a non-object, replace it with
+ // the |this| object passed in.
+ if (isConstructing_) {
+ Label skipThisReplace;
+ masm.branchTestObject(Assembler::Equal, JSReturnOperand, &skipThisReplace);
+
+ // Current stack: [ Padding?, ARGVALS..., ThisVal, ActualArgc, Callee, Descriptor ]
+ // However, we can't use this ThisVal, because it hasn't been traced. We need to use
+ // The ThisVal higher up the stack:
+ // Current stack: [ ThisVal, ARGVALS..., ...STUB FRAME...,
+ // Padding?, ARGVALS..., ThisVal, ActualArgc, Callee, Descriptor ]
+
+ // Restore the BaselineFrameReg based on the frame descriptor.
+ //
+ // BaselineFrameReg = BaselineStackReg
+ // + sizeof(Descriptor) + sizeof(Callee) + sizeof(ActualArgc)
+ // + stubFrameSize(Descriptor)
+ // - sizeof(ICStubReg) - sizeof(BaselineFrameReg)
+ Address descriptorAddr(masm.getStackPointer(), 0);
+ masm.loadPtr(descriptorAddr, BaselineFrameReg);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), BaselineFrameReg);
+ masm.addPtr(Imm32((3 - 2) * sizeof(size_t)), BaselineFrameReg);
+ masm.addStackPtrTo(BaselineFrameReg);
+
+ // Load the number of arguments present before the stub frame.
+ Register argcReg = JSReturnOperand.scratchReg();
+ if (isSpread_) {
+ // Account for the Array object.
+ masm.move32(Imm32(1), argcReg);
+ } else {
+ Address argcAddr(masm.getStackPointer(), 2 * sizeof(size_t));
+ masm.loadPtr(argcAddr, argcReg);
+ }
+
+ // Current stack: [ ThisVal, ARGVALS..., ...STUB FRAME..., <-- BaselineFrameReg
+ // Padding?, ARGVALS..., ThisVal, ActualArgc, Callee, Descriptor ]
+ //
+ // &ThisVal = BaselineFrameReg + argc * sizeof(Value) + STUB_FRAME_SIZE + sizeof(Value)
+ // This last sizeof(Value) accounts for the newTarget on the end of the arguments vector
+ // which is not reflected in actualArgc
+ BaseValueIndex thisSlotAddr(BaselineFrameReg, argcReg, STUB_FRAME_SIZE + sizeof(Value));
+ masm.loadValue(thisSlotAddr, JSReturnOperand);
+#ifdef DEBUG
+ masm.branchTestObject(Assembler::Equal, JSReturnOperand, &skipThisReplace);
+ masm.assumeUnreachable("Return of constructing call should be an object.");
+#endif
+ masm.bind(&skipThisReplace);
+ }
+
+ leaveStubFrame(masm, true);
+
+ // Enter type monitor IC to type-check result.
+ EmitEnterTypeMonitorIC(masm);
+
+ // Leave stub frame and restore argc for the next stub.
+ masm.bind(&failureLeaveStubFrame);
+ inStubFrame_ = true;
+ leaveStubFrame(masm, false);
+ if (argcReg != R0.scratchReg())
+ masm.movePtr(argcReg, R0.scratchReg());
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+typedef bool (*CopyArrayFn)(JSContext*, HandleObject, MutableHandleValue);
+static const VMFunction CopyArrayInfo = FunctionInfo<CopyArrayFn>(CopyArray, "CopyArray");
+
+bool
+ICCall_StringSplit::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ // Stack Layout: [ ..., CalleeVal, ThisVal, strVal, sepVal, +ICStackValueOffset+ ]
+ static const size_t SEP_DEPTH = 0;
+ static const size_t STR_DEPTH = sizeof(Value);
+ static const size_t CALLEE_DEPTH = 3 * sizeof(Value);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(0));
+ Label failureRestoreArgc;
+#ifdef DEBUG
+ Label twoArg;
+ Register argcReg = R0.scratchReg();
+ masm.branch32(Assembler::Equal, argcReg, Imm32(2), &twoArg);
+ masm.assumeUnreachable("Expected argc == 2");
+ masm.bind(&twoArg);
+#endif
+ Register scratchReg = regs.takeAny();
+
+ // Guard that callee is native function js::intrinsic_StringSplitString.
+ {
+ Address calleeAddr(masm.getStackPointer(), ICStackValueOffset + CALLEE_DEPTH);
+ ValueOperand calleeVal = regs.takeAnyValue();
+
+ // Ensure that callee is an object.
+ masm.loadValue(calleeAddr, calleeVal);
+ masm.branchTestObject(Assembler::NotEqual, calleeVal, &failureRestoreArgc);
+
+ // Ensure that callee is a function.
+ Register calleeObj = masm.extractObject(calleeVal, ExtractTemp0);
+ masm.branchTestObjClass(Assembler::NotEqual, calleeObj, scratchReg,
+ &JSFunction::class_, &failureRestoreArgc);
+
+ // Ensure that callee's function impl is the native intrinsic_StringSplitString.
+ masm.loadPtr(Address(calleeObj, JSFunction::offsetOfNativeOrScript()), scratchReg);
+ masm.branchPtr(Assembler::NotEqual, scratchReg, ImmPtr(js::intrinsic_StringSplitString),
+ &failureRestoreArgc);
+
+ regs.add(calleeVal);
+ }
+
+ // Guard sep.
+ {
+ // Ensure that sep is a string.
+ Address sepAddr(masm.getStackPointer(), ICStackValueOffset + SEP_DEPTH);
+ ValueOperand sepVal = regs.takeAnyValue();
+
+ masm.loadValue(sepAddr, sepVal);
+ masm.branchTestString(Assembler::NotEqual, sepVal, &failureRestoreArgc);
+
+ Register sep = masm.extractString(sepVal, ExtractTemp0);
+ masm.branchPtr(Assembler::NotEqual, Address(ICStubReg, offsetOfExpectedSep()),
+ sep, &failureRestoreArgc);
+ regs.add(sepVal);
+ }
+
+ // Guard str.
+ {
+ // Ensure that str is a string.
+ Address strAddr(masm.getStackPointer(), ICStackValueOffset + STR_DEPTH);
+ ValueOperand strVal = regs.takeAnyValue();
+
+ masm.loadValue(strAddr, strVal);
+ masm.branchTestString(Assembler::NotEqual, strVal, &failureRestoreArgc);
+
+ Register str = masm.extractString(strVal, ExtractTemp0);
+ masm.branchPtr(Assembler::NotEqual, Address(ICStubReg, offsetOfExpectedStr()),
+ str, &failureRestoreArgc);
+ regs.add(strVal);
+ }
+
+ // Main stub body.
+ {
+ Register paramReg = regs.takeAny();
+
+ // Push arguments.
+ enterStubFrame(masm, scratchReg);
+ masm.loadPtr(Address(ICStubReg, offsetOfTemplateObject()), paramReg);
+ masm.push(paramReg);
+
+ if (!callVM(CopyArrayInfo, masm))
+ return false;
+ leaveStubFrame(masm);
+ regs.add(paramReg);
+ }
+
+ // Enter type monitor IC to type-check result.
+ EmitEnterTypeMonitorIC(masm);
+
+ // Guard failure path.
+ masm.bind(&failureRestoreArgc);
+ masm.move32(Imm32(2), R0.scratchReg());
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+bool
+ICCall_IsSuspendedStarGenerator::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ // The IsSuspendedStarGenerator intrinsic is only called in self-hosted
+ // code, so it's safe to assume we have a single argument and the callee
+ // is our intrinsic.
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(0));
+
+ // Load the argument.
+ Address argAddr(masm.getStackPointer(), ICStackValueOffset);
+ ValueOperand argVal = regs.takeAnyValue();
+ masm.loadValue(argAddr, argVal);
+
+ // Check if it's an object.
+ Label returnFalse;
+ Register genObj = regs.takeAny();
+ masm.branchTestObject(Assembler::NotEqual, argVal, &returnFalse);
+ masm.unboxObject(argVal, genObj);
+
+ // Check if it's a StarGeneratorObject.
+ Register scratch = regs.takeAny();
+ masm.branchTestObjClass(Assembler::NotEqual, genObj, scratch, &StarGeneratorObject::class_,
+ &returnFalse);
+
+ // If the yield index slot holds an int32 value < YIELD_INDEX_CLOSING,
+ // the generator is suspended.
+ masm.loadValue(Address(genObj, GeneratorObject::offsetOfYieldIndexSlot()), argVal);
+ masm.branchTestInt32(Assembler::NotEqual, argVal, &returnFalse);
+ masm.unboxInt32(argVal, scratch);
+ masm.branch32(Assembler::AboveOrEqual, scratch, Imm32(StarGeneratorObject::YIELD_INDEX_CLOSING),
+ &returnFalse);
+
+ masm.moveValue(BooleanValue(true), R0);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&returnFalse);
+ masm.moveValue(BooleanValue(false), R0);
+ EmitReturnFromIC(masm);
+ return true;
+}
+
+bool
+ICCall_Native::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(0));
+
+ Register argcReg = R0.scratchReg();
+ regs.take(argcReg);
+ regs.takeUnchecked(ICTailCallReg);
+
+ if (isSpread_)
+ guardSpreadCall(masm, argcReg, &failure, isConstructing_);
+
+ // Load the callee in R1.
+ if (isSpread_) {
+ masm.loadValue(Address(masm.getStackPointer(), ICStackValueOffset + 2 * sizeof(Value)), R1);
+ } else {
+ unsigned nonArgsSlots = (1 + isConstructing_) * sizeof(Value);
+ BaseValueIndex calleeSlot(masm.getStackPointer(), argcReg, ICStackValueOffset + nonArgsSlots);
+ masm.loadValue(calleeSlot, R1);
+ }
+ regs.take(R1);
+
+ masm.branchTestObject(Assembler::NotEqual, R1, &failure);
+
+ // Ensure callee matches this stub's callee.
+ Register callee = masm.extractObject(R1, ExtractTemp0);
+ Address expectedCallee(ICStubReg, ICCall_Native::offsetOfCallee());
+ masm.branchPtr(Assembler::NotEqual, expectedCallee, callee, &failure);
+
+ regs.add(R1);
+ regs.takeUnchecked(callee);
+
+ // Push a stub frame so that we can perform a non-tail call.
+ // Note that this leaves the return address in TailCallReg.
+ enterStubFrame(masm, regs.getAny());
+
+ // Values are on the stack left-to-right. Calling convention wants them
+ // right-to-left so duplicate them on the stack in reverse order.
+ // |this| and callee are pushed last.
+ if (isSpread_)
+ pushSpreadCallArguments(masm, regs, argcReg, /* isJitCall = */ false, isConstructing_);
+ else
+ pushCallArguments(masm, regs, argcReg, /* isJitCall = */ false, isConstructing_);
+
+
+ // Native functions have the signature:
+ //
+ // bool (*)(JSContext*, unsigned, Value* vp)
+ //
+ // Where vp[0] is space for callee/return value, vp[1] is |this|, and vp[2] onward
+ // are the function arguments.
+
+ // Initialize vp.
+ Register vpReg = regs.takeAny();
+ masm.moveStackPtrTo(vpReg);
+
+ // Construct a native exit frame.
+ masm.push(argcReg);
+
+ Register scratch = regs.takeAny();
+ EmitBaselineCreateStubFrameDescriptor(masm, scratch, ExitFrameLayout::Size());
+ masm.push(scratch);
+ masm.push(ICTailCallReg);
+ masm.enterFakeExitFrameForNative(isConstructing_);
+
+ // Execute call.
+ masm.setupUnalignedABICall(scratch);
+ masm.loadJSContext(scratch);
+ masm.passABIArg(scratch);
+ masm.passABIArg(argcReg);
+ masm.passABIArg(vpReg);
+
+#ifdef JS_SIMULATOR
+ // The simulator requires VM calls to be redirected to a special swi
+ // instruction to handle them, so we store the redirected pointer in the
+ // stub and use that instead of the original one.
+ masm.callWithABI(Address(ICStubReg, ICCall_Native::offsetOfNative()));
+#else
+ masm.callWithABI(Address(callee, JSFunction::offsetOfNativeOrScript()));
+#endif
+
+ // Test for failure.
+ masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
+
+ // Load the return value into R0.
+ masm.loadValue(Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()), R0);
+
+ leaveStubFrame(masm);
+
+ // Enter type monitor IC to type-check result.
+ EmitEnterTypeMonitorIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+bool
+ICCall_ClassHook::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(0));
+
+ Register argcReg = R0.scratchReg();
+ regs.take(argcReg);
+ regs.takeUnchecked(ICTailCallReg);
+
+ // Load the callee in R1.
+ unsigned nonArgSlots = (1 + isConstructing_) * sizeof(Value);
+ BaseValueIndex calleeSlot(masm.getStackPointer(), argcReg, ICStackValueOffset + nonArgSlots);
+ masm.loadValue(calleeSlot, R1);
+ regs.take(R1);
+
+ masm.branchTestObject(Assembler::NotEqual, R1, &failure);
+
+ // Ensure the callee's class matches the one in this stub.
+ Register callee = masm.extractObject(R1, ExtractTemp0);
+ Register scratch = regs.takeAny();
+ masm.loadObjClass(callee, scratch);
+ masm.branchPtr(Assembler::NotEqual,
+ Address(ICStubReg, ICCall_ClassHook::offsetOfClass()),
+ scratch, &failure);
+
+ regs.add(R1);
+ regs.takeUnchecked(callee);
+
+ // Push a stub frame so that we can perform a non-tail call.
+ // Note that this leaves the return address in TailCallReg.
+ enterStubFrame(masm, regs.getAny());
+
+ regs.add(scratch);
+ pushCallArguments(masm, regs, argcReg, /* isJitCall = */ false, isConstructing_);
+ regs.take(scratch);
+
+ masm.checkStackAlignment();
+
+ // Native functions have the signature:
+ //
+ // bool (*)(JSContext*, unsigned, Value* vp)
+ //
+ // Where vp[0] is space for callee/return value, vp[1] is |this|, and vp[2] onward
+ // are the function arguments.
+
+ // Initialize vp.
+ Register vpReg = regs.takeAny();
+ masm.moveStackPtrTo(vpReg);
+
+ // Construct a native exit frame.
+ masm.push(argcReg);
+
+ EmitBaselineCreateStubFrameDescriptor(masm, scratch, ExitFrameLayout::Size());
+ masm.push(scratch);
+ masm.push(ICTailCallReg);
+ masm.enterFakeExitFrameForNative(isConstructing_);
+
+ // Execute call.
+ masm.setupUnalignedABICall(scratch);
+ masm.loadJSContext(scratch);
+ masm.passABIArg(scratch);
+ masm.passABIArg(argcReg);
+ masm.passABIArg(vpReg);
+ masm.callWithABI(Address(ICStubReg, ICCall_ClassHook::offsetOfNative()));
+
+ // Test for failure.
+ masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
+
+ // Load the return value into R0.
+ masm.loadValue(Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()), R0);
+
+ leaveStubFrame(masm);
+
+ // Enter type monitor IC to type-check result.
+ EmitEnterTypeMonitorIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+bool
+ICCall_ScriptedApplyArray::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(0));
+
+ Register argcReg = R0.scratchReg();
+ regs.take(argcReg);
+ regs.takeUnchecked(ICTailCallReg);
+ regs.takeUnchecked(ArgumentsRectifierReg);
+
+ //
+ // Validate inputs
+ //
+
+ Register target = guardFunApply(masm, regs, argcReg, /*checkNative=*/false,
+ FunApply_Array, &failure);
+ if (regs.has(target)) {
+ regs.take(target);
+ } else {
+ // If target is already a reserved reg, take another register for it, because it's
+ // probably currently an ExtractTemp, which might get clobbered later.
+ Register targetTemp = regs.takeAny();
+ masm.movePtr(target, targetTemp);
+ target = targetTemp;
+ }
+
+ // Push a stub frame so that we can perform a non-tail call.
+ enterStubFrame(masm, regs.getAny());
+
+ //
+ // Push arguments
+ //
+
+ // Stack now looks like:
+ // BaselineFrameReg -------------------.
+ // v
+ // [..., fun_apply, TargetV, TargetThisV, ArgsArrayV, StubFrameHeader]
+
+ // Push all array elements onto the stack:
+ Address arrayVal(BaselineFrameReg, STUB_FRAME_SIZE);
+ pushArrayArguments(masm, arrayVal, regs);
+
+ // Stack now looks like:
+ // BaselineFrameReg -------------------.
+ // v
+ // [..., fun_apply, TargetV, TargetThisV, ArgsArrayV, StubFrameHeader,
+ // PushedArgN, ..., PushedArg0]
+ // Can't fail after this, so it's ok to clobber argcReg.
+
+ // Push actual argument 0 as |thisv| for call.
+ masm.pushValue(Address(BaselineFrameReg, STUB_FRAME_SIZE + sizeof(Value)));
+
+ // All pushes after this use Push instead of push to make sure ARM can align
+ // stack properly for call.
+ Register scratch = regs.takeAny();
+ EmitBaselineCreateStubFrameDescriptor(masm, scratch, JitFrameLayout::Size());
+
+ // Reload argc from length of array.
+ masm.extractObject(arrayVal, argcReg);
+ masm.loadPtr(Address(argcReg, NativeObject::offsetOfElements()), argcReg);
+ masm.load32(Address(argcReg, ObjectElements::offsetOfInitializedLength()), argcReg);
+
+ masm.Push(argcReg);
+ masm.Push(target);
+ masm.Push(scratch);
+
+ // Load nargs into scratch for underflow check, and then load jitcode pointer into target.
+ masm.load16ZeroExtend(Address(target, JSFunction::offsetOfNargs()), scratch);
+ masm.loadPtr(Address(target, JSFunction::offsetOfNativeOrScript()), target);
+ masm.loadBaselineOrIonRaw(target, target, nullptr);
+
+ // Handle arguments underflow.
+ Label noUnderflow;
+ masm.branch32(Assembler::AboveOrEqual, argcReg, scratch, &noUnderflow);
+ {
+ // Call the arguments rectifier.
+ MOZ_ASSERT(ArgumentsRectifierReg != target);
+ MOZ_ASSERT(ArgumentsRectifierReg != argcReg);
+
+ JitCode* argumentsRectifier =
+ cx->runtime()->jitRuntime()->getArgumentsRectifier();
+
+ masm.movePtr(ImmGCPtr(argumentsRectifier), target);
+ masm.loadPtr(Address(target, JitCode::offsetOfCode()), target);
+ masm.movePtr(argcReg, ArgumentsRectifierReg);
+ }
+ masm.bind(&noUnderflow);
+ regs.add(argcReg);
+
+ // Do call
+ masm.callJit(target);
+ leaveStubFrame(masm, true);
+
+ // Enter type monitor IC to type-check result.
+ EmitEnterTypeMonitorIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+bool
+ICCall_ScriptedApplyArguments::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(0));
+
+ Register argcReg = R0.scratchReg();
+ regs.take(argcReg);
+ regs.takeUnchecked(ICTailCallReg);
+ regs.takeUnchecked(ArgumentsRectifierReg);
+
+ //
+ // Validate inputs
+ //
+
+ Register target = guardFunApply(masm, regs, argcReg, /*checkNative=*/false,
+ FunApply_MagicArgs, &failure);
+ if (regs.has(target)) {
+ regs.take(target);
+ } else {
+ // If target is already a reserved reg, take another register for it, because it's
+ // probably currently an ExtractTemp, which might get clobbered later.
+ Register targetTemp = regs.takeAny();
+ masm.movePtr(target, targetTemp);
+ target = targetTemp;
+ }
+
+ // Push a stub frame so that we can perform a non-tail call.
+ enterStubFrame(masm, regs.getAny());
+
+ //
+ // Push arguments
+ //
+
+ // Stack now looks like:
+ // [..., fun_apply, TargetV, TargetThisV, MagicArgsV, StubFrameHeader]
+
+ // Push all arguments supplied to caller function onto the stack.
+ pushCallerArguments(masm, regs);
+
+ // Stack now looks like:
+ // BaselineFrameReg -------------------.
+ // v
+ // [..., fun_apply, TargetV, TargetThisV, MagicArgsV, StubFrameHeader,
+ // PushedArgN, ..., PushedArg0]
+ // Can't fail after this, so it's ok to clobber argcReg.
+
+ // Push actual argument 0 as |thisv| for call.
+ masm.pushValue(Address(BaselineFrameReg, STUB_FRAME_SIZE + sizeof(Value)));
+
+ // All pushes after this use Push instead of push to make sure ARM can align
+ // stack properly for call.
+ Register scratch = regs.takeAny();
+ EmitBaselineCreateStubFrameDescriptor(masm, scratch, JitFrameLayout::Size());
+
+ masm.loadPtr(Address(BaselineFrameReg, 0), argcReg);
+ masm.loadPtr(Address(argcReg, BaselineFrame::offsetOfNumActualArgs()), argcReg);
+ masm.Push(argcReg);
+ masm.Push(target);
+ masm.Push(scratch);
+
+ // Load nargs into scratch for underflow check, and then load jitcode pointer into target.
+ masm.load16ZeroExtend(Address(target, JSFunction::offsetOfNargs()), scratch);
+ masm.loadPtr(Address(target, JSFunction::offsetOfNativeOrScript()), target);
+ masm.loadBaselineOrIonRaw(target, target, nullptr);
+
+ // Handle arguments underflow.
+ Label noUnderflow;
+ masm.branch32(Assembler::AboveOrEqual, argcReg, scratch, &noUnderflow);
+ {
+ // Call the arguments rectifier.
+ MOZ_ASSERT(ArgumentsRectifierReg != target);
+ MOZ_ASSERT(ArgumentsRectifierReg != argcReg);
+
+ JitCode* argumentsRectifier =
+ cx->runtime()->jitRuntime()->getArgumentsRectifier();
+
+ masm.movePtr(ImmGCPtr(argumentsRectifier), target);
+ masm.loadPtr(Address(target, JitCode::offsetOfCode()), target);
+ masm.movePtr(argcReg, ArgumentsRectifierReg);
+ }
+ masm.bind(&noUnderflow);
+ regs.add(argcReg);
+
+ // Do call
+ masm.callJit(target);
+ leaveStubFrame(masm, true);
+
+ // Enter type monitor IC to type-check result.
+ EmitEnterTypeMonitorIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+bool
+ICCall_ScriptedFunCall::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(0));
+ bool canUseTailCallReg = regs.has(ICTailCallReg);
+
+ Register argcReg = R0.scratchReg();
+ MOZ_ASSERT(argcReg != ArgumentsRectifierReg);
+
+ regs.take(argcReg);
+ regs.take(ArgumentsRectifierReg);
+ regs.takeUnchecked(ICTailCallReg);
+
+ // Load the callee in R1.
+ // Stack Layout: [ ..., CalleeVal, ThisVal, Arg0Val, ..., ArgNVal, +ICStackValueOffset+ ]
+ BaseValueIndex calleeSlot(masm.getStackPointer(), argcReg, ICStackValueOffset + sizeof(Value));
+ masm.loadValue(calleeSlot, R1);
+ regs.take(R1);
+
+ // Ensure callee is fun_call.
+ masm.branchTestObject(Assembler::NotEqual, R1, &failure);
+
+ Register callee = masm.extractObject(R1, ExtractTemp0);
+ masm.branchTestObjClass(Assembler::NotEqual, callee, regs.getAny(), &JSFunction::class_,
+ &failure);
+ masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), callee);
+ masm.branchPtr(Assembler::NotEqual, callee, ImmPtr(fun_call), &failure);
+
+ // Ensure |this| is a scripted function with JIT code.
+ BaseIndex thisSlot(masm.getStackPointer(), argcReg, TimesEight, ICStackValueOffset);
+ masm.loadValue(thisSlot, R1);
+
+ masm.branchTestObject(Assembler::NotEqual, R1, &failure);
+ callee = masm.extractObject(R1, ExtractTemp0);
+
+ masm.branchTestObjClass(Assembler::NotEqual, callee, regs.getAny(), &JSFunction::class_,
+ &failure);
+ masm.branchIfFunctionHasNoScript(callee, &failure);
+ masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), callee);
+
+ // Load the start of the target JitCode.
+ Register code = regs.takeAny();
+ masm.loadBaselineOrIonRaw(callee, code, &failure);
+
+ // We no longer need R1.
+ regs.add(R1);
+
+ // Push a stub frame so that we can perform a non-tail call.
+ enterStubFrame(masm, regs.getAny());
+ if (canUseTailCallReg)
+ regs.add(ICTailCallReg);
+
+ // Decrement argc if argc > 0. If argc == 0, push |undefined| as |this|.
+ Label zeroArgs, done;
+ masm.branchTest32(Assembler::Zero, argcReg, argcReg, &zeroArgs);
+
+ // Avoid the copy of the callee (function.call).
+ masm.sub32(Imm32(1), argcReg);
+
+ // Values are on the stack left-to-right. Calling convention wants them
+ // right-to-left so duplicate them on the stack in reverse order.
+
+ pushCallArguments(masm, regs, argcReg, /* isJitCall = */ true);
+
+ // Pop scripted callee (the original |this|).
+ ValueOperand val = regs.takeAnyValue();
+ masm.popValue(val);
+
+ masm.jump(&done);
+ masm.bind(&zeroArgs);
+
+ // Copy scripted callee (the original |this|).
+ Address thisSlotFromStubFrame(BaselineFrameReg, STUB_FRAME_SIZE);
+ masm.loadValue(thisSlotFromStubFrame, val);
+
+ // Align the stack.
+ masm.alignJitStackBasedOnNArgs(0);
+
+ // Store the new |this|.
+ masm.pushValue(UndefinedValue());
+
+ masm.bind(&done);
+
+ // Unbox scripted callee.
+ callee = masm.extractObject(val, ExtractTemp0);
+
+ Register scratch = regs.takeAny();
+ EmitBaselineCreateStubFrameDescriptor(masm, scratch, JitFrameLayout::Size());
+
+ // Note that we use Push, not push, so that callJit will align the stack
+ // properly on ARM.
+ masm.Push(argcReg);
+ masm.Push(callee);
+ masm.Push(scratch);
+
+ // Handle arguments underflow.
+ Label noUnderflow;
+ masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), callee);
+ masm.branch32(Assembler::AboveOrEqual, argcReg, callee, &noUnderflow);
+ {
+ // Call the arguments rectifier.
+ MOZ_ASSERT(ArgumentsRectifierReg != code);
+ MOZ_ASSERT(ArgumentsRectifierReg != argcReg);
+
+ JitCode* argumentsRectifier =
+ cx->runtime()->jitRuntime()->getArgumentsRectifier();
+
+ masm.movePtr(ImmGCPtr(argumentsRectifier), code);
+ masm.loadPtr(Address(code, JitCode::offsetOfCode()), code);
+ masm.movePtr(argcReg, ArgumentsRectifierReg);
+ }
+
+ masm.bind(&noUnderflow);
+ masm.callJit(code);
+
+ leaveStubFrame(masm, true);
+
+ // Enter type monitor IC to type-check result.
+ EmitEnterTypeMonitorIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+static bool
+DoubleValueToInt32ForSwitch(Value* v)
+{
+ double d = v->toDouble();
+ int32_t truncated = int32_t(d);
+ if (d != double(truncated))
+ return false;
+
+ v->setInt32(truncated);
+ return true;
+}
+
+bool
+ICTableSwitch::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label isInt32, notInt32, outOfRange;
+ Register scratch = R1.scratchReg();
+
+ masm.branchTestInt32(Assembler::NotEqual, R0, &notInt32);
+
+ Register key = masm.extractInt32(R0, ExtractTemp0);
+
+ masm.bind(&isInt32);
+
+ masm.load32(Address(ICStubReg, offsetof(ICTableSwitch, min_)), scratch);
+ masm.sub32(scratch, key);
+ masm.branch32(Assembler::BelowOrEqual,
+ Address(ICStubReg, offsetof(ICTableSwitch, length_)), key, &outOfRange);
+
+ masm.loadPtr(Address(ICStubReg, offsetof(ICTableSwitch, table_)), scratch);
+ masm.loadPtr(BaseIndex(scratch, key, ScalePointer), scratch);
+
+ EmitChangeICReturnAddress(masm, scratch);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&notInt32);
+
+ masm.branchTestDouble(Assembler::NotEqual, R0, &outOfRange);
+ if (cx->runtime()->jitSupportsFloatingPoint) {
+ masm.unboxDouble(R0, FloatReg0);
+
+ // N.B. -0 === 0, so convert -0 to a 0 int32.
+ masm.convertDoubleToInt32(FloatReg0, key, &outOfRange, /* negativeZeroCheck = */ false);
+ } else {
+ // Pass pointer to double value.
+ masm.pushValue(R0);
+ masm.moveStackPtrTo(R0.scratchReg());
+
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(R0.scratchReg());
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, DoubleValueToInt32ForSwitch));
+
+ // If the function returns |true|, the value has been converted to
+ // int32.
+ masm.movePtr(ReturnReg, scratch);
+ masm.popValue(R0);
+ masm.branchIfFalseBool(scratch, &outOfRange);
+ masm.unboxInt32(R0, key);
+ }
+ masm.jump(&isInt32);
+
+ masm.bind(&outOfRange);
+
+ masm.loadPtr(Address(ICStubReg, offsetof(ICTableSwitch, defaultTarget_)), scratch);
+
+ EmitChangeICReturnAddress(masm, scratch);
+ EmitReturnFromIC(masm);
+ return true;
+}
+
+ICStub*
+ICTableSwitch::Compiler::getStub(ICStubSpace* space)
+{
+ JitCode* code = getStubCode();
+ if (!code)
+ return nullptr;
+
+ jsbytecode* pc = pc_;
+ pc += JUMP_OFFSET_LEN;
+ int32_t low = GET_JUMP_OFFSET(pc);
+ pc += JUMP_OFFSET_LEN;
+ int32_t high = GET_JUMP_OFFSET(pc);
+ int32_t length = high - low + 1;
+ pc += JUMP_OFFSET_LEN;
+
+ void** table = (void**) space->alloc(sizeof(void*) * length);
+ if (!table) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ jsbytecode* defaultpc = pc_ + GET_JUMP_OFFSET(pc_);
+
+ for (int32_t i = 0; i < length; i++) {
+ int32_t off = GET_JUMP_OFFSET(pc);
+ if (off)
+ table[i] = pc_ + off;
+ else
+ table[i] = defaultpc;
+ pc += JUMP_OFFSET_LEN;
+ }
+
+ return newStub<ICTableSwitch>(space, code, table, low, length, defaultpc);
+}
+
+void
+ICTableSwitch::fixupJumpTable(JSScript* script, BaselineScript* baseline)
+{
+ defaultTarget_ = baseline->nativeCodeForPC(script, (jsbytecode*) defaultTarget_);
+
+ for (int32_t i = 0; i < length_; i++)
+ table_[i] = baseline->nativeCodeForPC(script, (jsbytecode*) table_[i]);
+}
+
+//
+// IteratorNew_Fallback
+//
+
+static bool
+DoIteratorNewFallback(JSContext* cx, BaselineFrame* frame, ICIteratorNew_Fallback* stub,
+ HandleValue value, MutableHandleValue res)
+{
+ jsbytecode* pc = stub->icEntry()->pc(frame->script());
+ FallbackICSpew(cx, stub, "IteratorNew");
+
+ uint8_t flags = GET_UINT8(pc);
+ res.set(value);
+ RootedObject iterobj(cx, ValueToIterator(cx, flags, res));
+ if (!iterobj)
+ return false;
+ res.setObject(*iterobj);
+ return true;
+}
+
+typedef bool (*DoIteratorNewFallbackFn)(JSContext*, BaselineFrame*, ICIteratorNew_Fallback*,
+ HandleValue, MutableHandleValue);
+static const VMFunction DoIteratorNewFallbackInfo =
+ FunctionInfo<DoIteratorNewFallbackFn>(DoIteratorNewFallback, "DoIteratorNewFallback",
+ TailCall, PopValues(1));
+
+bool
+ICIteratorNew_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ EmitRestoreTailCallReg(masm);
+
+ // Sync stack for the decompiler.
+ masm.pushValue(R0);
+
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ return tailCallVM(DoIteratorNewFallbackInfo, masm);
+}
+
+//
+// IteratorMore_Fallback
+//
+
+static bool
+DoIteratorMoreFallback(JSContext* cx, BaselineFrame* frame, ICIteratorMore_Fallback* stub_,
+ HandleObject iterObj, MutableHandleValue res)
+{
+ // This fallback stub may trigger debug mode toggling.
+ DebugModeOSRVolatileStub<ICIteratorMore_Fallback*> stub(frame, stub_);
+
+ FallbackICSpew(cx, stub, "IteratorMore");
+
+ if (!IteratorMore(cx, iterObj, res))
+ return false;
+
+ // Check if debug mode toggling made the stub invalid.
+ if (stub.invalid())
+ return true;
+
+ if (!res.isMagic(JS_NO_ITER_VALUE) && !res.isString())
+ stub->setHasNonStringResult();
+
+ if (iterObj->is<PropertyIteratorObject>() &&
+ !stub->hasStub(ICStub::IteratorMore_Native))
+ {
+ ICIteratorMore_Native::Compiler compiler(cx);
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(frame->script()));
+ if (!newStub)
+ return false;
+ stub->addNewStub(newStub);
+ }
+
+ return true;
+}
+
+typedef bool (*DoIteratorMoreFallbackFn)(JSContext*, BaselineFrame*, ICIteratorMore_Fallback*,
+ HandleObject, MutableHandleValue);
+static const VMFunction DoIteratorMoreFallbackInfo =
+ FunctionInfo<DoIteratorMoreFallbackFn>(DoIteratorMoreFallback, "DoIteratorMoreFallback",
+ TailCall);
+
+bool
+ICIteratorMore_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ EmitRestoreTailCallReg(masm);
+
+ masm.unboxObject(R0, R0.scratchReg());
+ masm.push(R0.scratchReg());
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ return tailCallVM(DoIteratorMoreFallbackInfo, masm);
+}
+
+//
+// IteratorMore_Native
+//
+
+bool
+ICIteratorMore_Native::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+
+ Register obj = masm.extractObject(R0, ExtractTemp0);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(1));
+ Register nativeIterator = regs.takeAny();
+ Register scratch = regs.takeAny();
+
+ masm.branchTestObjClass(Assembler::NotEqual, obj, scratch,
+ &PropertyIteratorObject::class_, &failure);
+ masm.loadObjPrivate(obj, JSObject::ITER_CLASS_NFIXED_SLOTS, nativeIterator);
+
+ masm.branchTest32(Assembler::NonZero, Address(nativeIterator, offsetof(NativeIterator, flags)),
+ Imm32(JSITER_FOREACH), &failure);
+
+ // If props_cursor < props_end, load the next string and advance the cursor.
+ // Else, return MagicValue(JS_NO_ITER_VALUE).
+ Label iterDone;
+ Address cursorAddr(nativeIterator, offsetof(NativeIterator, props_cursor));
+ Address cursorEndAddr(nativeIterator, offsetof(NativeIterator, props_end));
+ masm.loadPtr(cursorAddr, scratch);
+ masm.branchPtr(Assembler::BelowOrEqual, cursorEndAddr, scratch, &iterDone);
+
+ // Get next string.
+ masm.loadPtr(Address(scratch, 0), scratch);
+
+ // Increase the cursor.
+ masm.addPtr(Imm32(sizeof(JSString*)), cursorAddr);
+
+ masm.tagValue(JSVAL_TYPE_STRING, scratch, R0);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&iterDone);
+ masm.moveValue(MagicValue(JS_NO_ITER_VALUE), R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// IteratorClose_Fallback
+//
+
+static bool
+DoIteratorCloseFallback(JSContext* cx, ICIteratorClose_Fallback* stub, HandleValue iterValue)
+{
+ FallbackICSpew(cx, stub, "IteratorClose");
+
+ RootedObject iteratorObject(cx, &iterValue.toObject());
+ return CloseIterator(cx, iteratorObject);
+}
+
+typedef bool (*DoIteratorCloseFallbackFn)(JSContext*, ICIteratorClose_Fallback*, HandleValue);
+static const VMFunction DoIteratorCloseFallbackInfo =
+ FunctionInfo<DoIteratorCloseFallbackFn>(DoIteratorCloseFallback, "DoIteratorCloseFallback",
+ TailCall);
+
+bool
+ICIteratorClose_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ EmitRestoreTailCallReg(masm);
+
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+
+ return tailCallVM(DoIteratorCloseFallbackInfo, masm);
+}
+
+//
+// InstanceOf_Fallback
+//
+
+static bool
+TryAttachInstanceOfStub(JSContext* cx, BaselineFrame* frame, ICInstanceOf_Fallback* stub,
+ HandleFunction fun, bool* attached)
+{
+ MOZ_ASSERT(!*attached);
+ if (fun->isBoundFunction())
+ return true;
+
+ // If the user has supplied their own @@hasInstance method we shouldn't
+ // clobber it.
+ if (!js::FunctionHasDefaultHasInstance(fun, cx->wellKnownSymbols()))
+ return true;
+
+ // Refuse to optimize any function whose [[Prototype]] isn't
+ // Function.prototype.
+ if (!fun->hasStaticPrototype() || fun->hasUncacheableProto())
+ return true;
+
+ Value funProto = cx->global()->getPrototype(JSProto_Function);
+ if (funProto.isObject() && fun->staticPrototype() != &funProto.toObject())
+ return true;
+
+ Shape* shape = fun->lookupPure(cx->names().prototype);
+ if (!shape || !shape->hasSlot() || !shape->hasDefaultGetter())
+ return true;
+
+ uint32_t slot = shape->slot();
+ MOZ_ASSERT(fun->numFixedSlots() == 0, "Stub code relies on this");
+
+ if (!fun->getSlot(slot).isObject())
+ return true;
+
+ JSObject* protoObject = &fun->getSlot(slot).toObject();
+
+ JitSpew(JitSpew_BaselineIC, " Generating InstanceOf(Function) stub");
+ ICInstanceOf_Function::Compiler compiler(cx, fun->lastProperty(), protoObject, slot);
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(frame->script()));
+ if (!newStub)
+ return false;
+
+ stub->addNewStub(newStub);
+ *attached = true;
+ return true;
+}
+
+static bool
+DoInstanceOfFallback(JSContext* cx, BaselineFrame* frame, ICInstanceOf_Fallback* stub,
+ HandleValue lhs, HandleValue rhs, MutableHandleValue res)
+{
+ FallbackICSpew(cx, stub, "InstanceOf");
+
+ if (!rhs.isObject()) {
+ ReportValueError(cx, JSMSG_BAD_INSTANCEOF_RHS, -1, rhs, nullptr);
+ return false;
+ }
+
+ RootedObject obj(cx, &rhs.toObject());
+ bool cond = false;
+ if (!HasInstance(cx, obj, lhs, &cond))
+ return false;
+
+ res.setBoolean(cond);
+
+ if (!obj->is<JSFunction>()) {
+ stub->noteUnoptimizableAccess();
+ return true;
+ }
+
+ // For functions, keep track of the |prototype| property in type information,
+ // for use during Ion compilation.
+ EnsureTrackPropertyTypes(cx, obj, NameToId(cx->names().prototype));
+
+ if (stub->numOptimizedStubs() >= ICInstanceOf_Fallback::MAX_OPTIMIZED_STUBS)
+ return true;
+
+ RootedFunction fun(cx, &obj->as<JSFunction>());
+ bool attached = false;
+ if (!TryAttachInstanceOfStub(cx, frame, stub, fun, &attached))
+ return false;
+ if (!attached)
+ stub->noteUnoptimizableAccess();
+ return true;
+}
+
+typedef bool (*DoInstanceOfFallbackFn)(JSContext*, BaselineFrame*, ICInstanceOf_Fallback*,
+ HandleValue, HandleValue, MutableHandleValue);
+static const VMFunction DoInstanceOfFallbackInfo =
+ FunctionInfo<DoInstanceOfFallbackFn>(DoInstanceOfFallback, "DoInstanceOfFallback", TailCall,
+ PopValues(2));
+
+bool
+ICInstanceOf_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ EmitRestoreTailCallReg(masm);
+
+ // Sync stack for the decompiler.
+ masm.pushValue(R0);
+ masm.pushValue(R1);
+
+ masm.pushValue(R1);
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ return tailCallVM(DoInstanceOfFallbackInfo, masm);
+}
+
+bool
+ICInstanceOf_Function::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+
+ // Ensure RHS is an object.
+ masm.branchTestObject(Assembler::NotEqual, R1, &failure);
+ Register rhsObj = masm.extractObject(R1, ExtractTemp0);
+
+ // Allow using R1's type register as scratch. We have to restore it when
+ // we want to jump to the next stub.
+ Label failureRestoreR1;
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(1));
+ regs.takeUnchecked(rhsObj);
+
+ Register scratch1 = regs.takeAny();
+ Register scratch2 = regs.takeAny();
+
+ // Shape guard.
+ masm.loadPtr(Address(ICStubReg, ICInstanceOf_Function::offsetOfShape()), scratch1);
+ masm.branchTestObjShape(Assembler::NotEqual, rhsObj, scratch1, &failureRestoreR1);
+
+ // Guard on the .prototype object.
+ masm.loadPtr(Address(rhsObj, NativeObject::offsetOfSlots()), scratch1);
+ masm.load32(Address(ICStubReg, ICInstanceOf_Function::offsetOfSlot()), scratch2);
+ BaseValueIndex prototypeSlot(scratch1, scratch2);
+ masm.branchTestObject(Assembler::NotEqual, prototypeSlot, &failureRestoreR1);
+ masm.unboxObject(prototypeSlot, scratch1);
+ masm.branchPtr(Assembler::NotEqual,
+ Address(ICStubReg, ICInstanceOf_Function::offsetOfPrototypeObject()),
+ scratch1, &failureRestoreR1);
+
+ // If LHS is a primitive, return false.
+ Label returnFalse, returnTrue;
+ masm.branchTestObject(Assembler::NotEqual, R0, &returnFalse);
+
+ // LHS is an object. Load its proto.
+ masm.unboxObject(R0, scratch2);
+ masm.loadObjProto(scratch2, scratch2);
+
+ {
+ // Walk the proto chain until we either reach the target object,
+ // nullptr or LazyProto.
+ Label loop;
+ masm.bind(&loop);
+
+ masm.branchPtr(Assembler::Equal, scratch2, scratch1, &returnTrue);
+ masm.branchTestPtr(Assembler::Zero, scratch2, scratch2, &returnFalse);
+
+ MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
+ masm.branchPtr(Assembler::Equal, scratch2, ImmWord(1), &failureRestoreR1);
+
+ masm.loadObjProto(scratch2, scratch2);
+ masm.jump(&loop);
+ }
+
+ EmitReturnFromIC(masm);
+
+ masm.bind(&returnFalse);
+ masm.moveValue(BooleanValue(false), R0);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&returnTrue);
+ masm.moveValue(BooleanValue(true), R0);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&failureRestoreR1);
+ masm.tagValue(JSVAL_TYPE_OBJECT, rhsObj, R1);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// TypeOf_Fallback
+//
+
+static bool
+DoTypeOfFallback(JSContext* cx, BaselineFrame* frame, ICTypeOf_Fallback* stub, HandleValue val,
+ MutableHandleValue res)
+{
+ FallbackICSpew(cx, stub, "TypeOf");
+ JSType type = js::TypeOfValue(val);
+ RootedString string(cx, TypeName(type, cx->names()));
+
+ res.setString(string);
+
+ MOZ_ASSERT(type != JSTYPE_NULL);
+ if (type != JSTYPE_OBJECT && type != JSTYPE_FUNCTION) {
+ // Create a new TypeOf stub.
+ JitSpew(JitSpew_BaselineIC, " Generating TypeOf stub for JSType (%d)", (int) type);
+ ICTypeOf_Typed::Compiler compiler(cx, type, string);
+ ICStub* typeOfStub = compiler.getStub(compiler.getStubSpace(frame->script()));
+ if (!typeOfStub)
+ return false;
+ stub->addNewStub(typeOfStub);
+ }
+
+ return true;
+}
+
+typedef bool (*DoTypeOfFallbackFn)(JSContext*, BaselineFrame* frame, ICTypeOf_Fallback*,
+ HandleValue, MutableHandleValue);
+static const VMFunction DoTypeOfFallbackInfo =
+ FunctionInfo<DoTypeOfFallbackFn>(DoTypeOfFallback, "DoTypeOfFallback", TailCall);
+
+bool
+ICTypeOf_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ EmitRestoreTailCallReg(masm);
+
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ return tailCallVM(DoTypeOfFallbackInfo, masm);
+}
+
+bool
+ICTypeOf_Typed::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+ MOZ_ASSERT(type_ != JSTYPE_NULL);
+ MOZ_ASSERT(type_ != JSTYPE_FUNCTION);
+ MOZ_ASSERT(type_ != JSTYPE_OBJECT);
+
+ Label failure;
+ switch(type_) {
+ case JSTYPE_VOID:
+ masm.branchTestUndefined(Assembler::NotEqual, R0, &failure);
+ break;
+
+ case JSTYPE_STRING:
+ masm.branchTestString(Assembler::NotEqual, R0, &failure);
+ break;
+
+ case JSTYPE_NUMBER:
+ masm.branchTestNumber(Assembler::NotEqual, R0, &failure);
+ break;
+
+ case JSTYPE_BOOLEAN:
+ masm.branchTestBoolean(Assembler::NotEqual, R0, &failure);
+ break;
+
+ case JSTYPE_SYMBOL:
+ masm.branchTestSymbol(Assembler::NotEqual, R0, &failure);
+ break;
+
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+
+ masm.movePtr(ImmGCPtr(typeString_), R0.scratchReg());
+ masm.tagValue(JSVAL_TYPE_STRING, R0.scratchReg(), R0);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+static bool
+DoRetSubFallback(JSContext* cx, BaselineFrame* frame, ICRetSub_Fallback* stub,
+ HandleValue val, uint8_t** resumeAddr)
+{
+ FallbackICSpew(cx, stub, "RetSub");
+
+ // |val| is the bytecode offset where we should resume.
+
+ MOZ_ASSERT(val.isInt32());
+ MOZ_ASSERT(val.toInt32() >= 0);
+
+ JSScript* script = frame->script();
+ uint32_t offset = uint32_t(val.toInt32());
+
+ *resumeAddr = script->baselineScript()->nativeCodeForPC(script, script->offsetToPC(offset));
+
+ if (stub->numOptimizedStubs() >= ICRetSub_Fallback::MAX_OPTIMIZED_STUBS)
+ return true;
+
+ // Attach an optimized stub for this pc offset.
+ JitSpew(JitSpew_BaselineIC, " Generating RetSub stub for pc offset %u", offset);
+ ICRetSub_Resume::Compiler compiler(cx, offset, *resumeAddr);
+ ICStub* optStub = compiler.getStub(compiler.getStubSpace(script));
+ if (!optStub)
+ return false;
+
+ stub->addNewStub(optStub);
+ return true;
+}
+
+typedef bool(*DoRetSubFallbackFn)(JSContext* cx, BaselineFrame*, ICRetSub_Fallback*,
+ HandleValue, uint8_t**);
+static const VMFunction DoRetSubFallbackInfo =
+ FunctionInfo<DoRetSubFallbackFn>(DoRetSubFallback, "DoRetSubFallback");
+
+typedef bool (*ThrowFn)(JSContext*, HandleValue);
+static const VMFunction ThrowInfoBaseline =
+ FunctionInfo<ThrowFn>(js::Throw, "ThrowInfoBaseline", TailCall);
+
+bool
+ICRetSub_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ // If R0 is BooleanValue(true), rethrow R1.
+ Label rethrow;
+ masm.branchTestBooleanTruthy(true, R0, &rethrow);
+ {
+ // Call a stub to get the native code address for the pc offset in R1.
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(0));
+ regs.take(R1);
+ regs.takeUnchecked(ICTailCallReg);
+ Register scratch = regs.getAny();
+
+ enterStubFrame(masm, scratch);
+
+ masm.pushValue(R1);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, scratch);
+
+ if (!callVM(DoRetSubFallbackInfo, masm))
+ return false;
+
+ leaveStubFrame(masm);
+
+ EmitChangeICReturnAddress(masm, ReturnReg);
+ EmitReturnFromIC(masm);
+ }
+
+ masm.bind(&rethrow);
+ EmitRestoreTailCallReg(masm);
+ masm.pushValue(R1);
+ return tailCallVM(ThrowInfoBaseline, masm);
+}
+
+bool
+ICRetSub_Resume::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ // If R0 is BooleanValue(true), rethrow R1.
+ Label fail, rethrow;
+ masm.branchTestBooleanTruthy(true, R0, &rethrow);
+
+ // R1 is the pc offset. Ensure it matches this stub's offset.
+ Register offset = masm.extractInt32(R1, ExtractTemp0);
+ masm.branch32(Assembler::NotEqual,
+ Address(ICStubReg, ICRetSub_Resume::offsetOfPCOffset()),
+ offset,
+ &fail);
+
+ // pc offset matches, resume at the target pc.
+ masm.loadPtr(Address(ICStubReg, ICRetSub_Resume::offsetOfAddr()), R0.scratchReg());
+ EmitChangeICReturnAddress(masm, R0.scratchReg());
+ EmitReturnFromIC(masm);
+
+ // Rethrow the Value stored in R1.
+ masm.bind(&rethrow);
+ EmitRestoreTailCallReg(masm);
+ masm.pushValue(R1);
+ if (!tailCallVM(ThrowInfoBaseline, masm))
+ return false;
+
+ masm.bind(&fail);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+ICTypeMonitor_SingleObject::ICTypeMonitor_SingleObject(JitCode* stubCode, JSObject* obj)
+ : ICStub(TypeMonitor_SingleObject, stubCode),
+ obj_(obj)
+{ }
+
+ICTypeMonitor_ObjectGroup::ICTypeMonitor_ObjectGroup(JitCode* stubCode, ObjectGroup* group)
+ : ICStub(TypeMonitor_ObjectGroup, stubCode),
+ group_(group)
+{ }
+
+ICTypeUpdate_SingleObject::ICTypeUpdate_SingleObject(JitCode* stubCode, JSObject* obj)
+ : ICStub(TypeUpdate_SingleObject, stubCode),
+ obj_(obj)
+{ }
+
+ICTypeUpdate_ObjectGroup::ICTypeUpdate_ObjectGroup(JitCode* stubCode, ObjectGroup* group)
+ : ICStub(TypeUpdate_ObjectGroup, stubCode),
+ group_(group)
+{ }
+
+ICGetElemNativeStub::ICGetElemNativeStub(ICStub::Kind kind, JitCode* stubCode,
+ ICStub* firstMonitorStub,
+ ReceiverGuard guard, AccessType acctype,
+ bool needsAtomize, bool isSymbol)
+ : ICMonitoredStub(kind, stubCode, firstMonitorStub),
+ receiverGuard_(guard)
+{
+ extra_ = (static_cast<uint16_t>(acctype) << ACCESSTYPE_SHIFT) |
+ (static_cast<uint16_t>(needsAtomize) << NEEDS_ATOMIZE_SHIFT) |
+ (static_cast<uint16_t>(isSymbol) << ISSYMBOL_SHIFT);
+}
+
+ICGetElemNativeStub::~ICGetElemNativeStub()
+{ }
+
+template <class T>
+ICGetElemNativeGetterStub<T>::ICGetElemNativeGetterStub(
+ ICStub::Kind kind, JitCode* stubCode, ICStub* firstMonitorStub,
+ ReceiverGuard guard, const T* key, AccType acctype, bool needsAtomize,
+ JSFunction* getter, uint32_t pcOffset)
+ : ICGetElemNativeStubImpl<T>(kind, stubCode, firstMonitorStub, guard, key, acctype, needsAtomize),
+ getter_(getter),
+ pcOffset_(pcOffset)
+{
+ MOZ_ASSERT(kind == ICStub::GetElem_NativePrototypeCallNativeName ||
+ kind == ICStub::GetElem_NativePrototypeCallNativeSymbol ||
+ kind == ICStub::GetElem_NativePrototypeCallScriptedName ||
+ kind == ICStub::GetElem_NativePrototypeCallScriptedSymbol);
+ MOZ_ASSERT(acctype == ICGetElemNativeStub::NativeGetter ||
+ acctype == ICGetElemNativeStub::ScriptedGetter);
+}
+
+template <class T>
+ICGetElem_NativePrototypeSlot<T>::ICGetElem_NativePrototypeSlot(
+ JitCode* stubCode, ICStub* firstMonitorStub, ReceiverGuard guard,
+ const T* key, AccType acctype, bool needsAtomize, uint32_t offset,
+ JSObject* holder, Shape* holderShape)
+ : ICGetElemNativeSlotStub<T>(getGetElemStubKind<T>(ICStub::GetElem_NativePrototypeSlotName),
+ stubCode, firstMonitorStub, guard, key, acctype, needsAtomize, offset),
+ holder_(holder),
+ holderShape_(holderShape)
+{ }
+
+template <class T>
+ICGetElemNativePrototypeCallStub<T>::ICGetElemNativePrototypeCallStub(
+ ICStub::Kind kind, JitCode* stubCode, ICStub* firstMonitorStub,
+ ReceiverGuard guard, const T* key, AccType acctype,
+ bool needsAtomize, JSFunction* getter, uint32_t pcOffset,
+ JSObject* holder, Shape* holderShape)
+ : ICGetElemNativeGetterStub<T>(kind, stubCode, firstMonitorStub, guard, key, acctype, needsAtomize,
+ getter, pcOffset),
+ holder_(holder),
+ holderShape_(holderShape)
+{}
+
+template <class T>
+/* static */ ICGetElem_NativePrototypeCallNative<T>*
+ICGetElem_NativePrototypeCallNative<T>::Clone(JSContext* cx,
+ ICStubSpace* space,
+ ICStub* firstMonitorStub,
+ ICGetElem_NativePrototypeCallNative<T>& other)
+{
+ return ICStub::New<ICGetElem_NativePrototypeCallNative<T>>(cx, space, other.jitCode(),
+ firstMonitorStub, other.receiverGuard(), &other.key().get(), other.accessType(),
+ other.needsAtomize(), other.getter(), other.pcOffset_, other.holder(),
+ other.holderShape());
+}
+
+template ICGetElem_NativePrototypeCallNative<JS::Symbol*>*
+ICGetElem_NativePrototypeCallNative<JS::Symbol*>::Clone(JSContext*, ICStubSpace*, ICStub*,
+ ICGetElem_NativePrototypeCallNative<JS::Symbol*>&);
+template ICGetElem_NativePrototypeCallNative<js::PropertyName*>*
+ICGetElem_NativePrototypeCallNative<js::PropertyName*>::Clone(JSContext*, ICStubSpace*, ICStub*,
+ ICGetElem_NativePrototypeCallNative<js::PropertyName*>&);
+
+template <class T>
+/* static */ ICGetElem_NativePrototypeCallScripted<T>*
+ICGetElem_NativePrototypeCallScripted<T>::Clone(JSContext* cx,
+ ICStubSpace* space,
+ ICStub* firstMonitorStub,
+ ICGetElem_NativePrototypeCallScripted<T>& other)
+{
+ return ICStub::New<ICGetElem_NativePrototypeCallScripted<T>>(cx, space, other.jitCode(),
+ firstMonitorStub, other.receiverGuard(), &other.key().get(), other.accessType(),
+ other.needsAtomize(), other.getter(), other.pcOffset_, other.holder(),
+ other.holderShape());
+}
+
+template ICGetElem_NativePrototypeCallScripted<JS::Symbol*>*
+ICGetElem_NativePrototypeCallScripted<JS::Symbol*>::Clone(JSContext*, ICStubSpace*, ICStub*,
+ ICGetElem_NativePrototypeCallScripted<JS::Symbol*>&);
+template ICGetElem_NativePrototypeCallScripted<js::PropertyName*>*
+ICGetElem_NativePrototypeCallScripted<js::PropertyName*>::Clone(JSContext*, ICStubSpace*, ICStub*,
+ ICGetElem_NativePrototypeCallScripted<js::PropertyName*>&);
+
+ICGetElem_Dense::ICGetElem_Dense(JitCode* stubCode, ICStub* firstMonitorStub, Shape* shape)
+ : ICMonitoredStub(GetElem_Dense, stubCode, firstMonitorStub),
+ shape_(shape)
+{ }
+
+/* static */ ICGetElem_Dense*
+ICGetElem_Dense::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICGetElem_Dense& other)
+{
+ return New<ICGetElem_Dense>(cx, space, other.jitCode(), firstMonitorStub, other.shape_);
+}
+
+ICGetElem_UnboxedArray::ICGetElem_UnboxedArray(JitCode* stubCode, ICStub* firstMonitorStub,
+ ObjectGroup *group)
+ : ICMonitoredStub(GetElem_UnboxedArray, stubCode, firstMonitorStub),
+ group_(group)
+{ }
+
+/* static */ ICGetElem_UnboxedArray*
+ICGetElem_UnboxedArray::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICGetElem_UnboxedArray& other)
+{
+ return New<ICGetElem_UnboxedArray>(cx, space, other.jitCode(), firstMonitorStub, other.group_);
+}
+
+ICGetElem_TypedArray::ICGetElem_TypedArray(JitCode* stubCode, Shape* shape, Scalar::Type type)
+ : ICStub(GetElem_TypedArray, stubCode),
+ shape_(shape)
+{
+ extra_ = uint16_t(type);
+ MOZ_ASSERT(extra_ == type);
+}
+
+/* static */ ICGetElem_Arguments*
+ICGetElem_Arguments::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICGetElem_Arguments& other)
+{
+ return New<ICGetElem_Arguments>(cx, space, other.jitCode(), firstMonitorStub, other.which());
+}
+
+ICSetElem_DenseOrUnboxedArray::ICSetElem_DenseOrUnboxedArray(JitCode* stubCode, Shape* shape, ObjectGroup* group)
+ : ICUpdatedStub(SetElem_DenseOrUnboxedArray, stubCode),
+ shape_(shape),
+ group_(group)
+{ }
+
+ICSetElem_DenseOrUnboxedArrayAdd::ICSetElem_DenseOrUnboxedArrayAdd(JitCode* stubCode, ObjectGroup* group,
+ size_t protoChainDepth)
+ : ICUpdatedStub(SetElem_DenseOrUnboxedArrayAdd, stubCode),
+ group_(group)
+{
+ MOZ_ASSERT(protoChainDepth <= MAX_PROTO_CHAIN_DEPTH);
+ extra_ = protoChainDepth;
+}
+
+template <size_t ProtoChainDepth>
+ICUpdatedStub*
+ICSetElemDenseOrUnboxedArrayAddCompiler::getStubSpecific(ICStubSpace* space,
+ Handle<ShapeVector> shapes)
+{
+ RootedObjectGroup group(cx, obj_->getGroup(cx));
+ if (!group)
+ return nullptr;
+ Rooted<JitCode*> stubCode(cx, getStubCode());
+ return newStub<ICSetElem_DenseOrUnboxedArrayAddImpl<ProtoChainDepth>>(space, stubCode, group, shapes);
+}
+
+ICSetElem_TypedArray::ICSetElem_TypedArray(JitCode* stubCode, Shape* shape, Scalar::Type type,
+ bool expectOutOfBounds)
+ : ICStub(SetElem_TypedArray, stubCode),
+ shape_(shape)
+{
+ extra_ = uint8_t(type);
+ MOZ_ASSERT(extra_ == type);
+ extra_ |= (static_cast<uint16_t>(expectOutOfBounds) << 8);
+}
+
+ICInNativeStub::ICInNativeStub(ICStub::Kind kind, JitCode* stubCode, HandleShape shape,
+ HandlePropertyName name)
+ : ICStub(kind, stubCode),
+ shape_(shape),
+ name_(name)
+{ }
+
+ICIn_NativePrototype::ICIn_NativePrototype(JitCode* stubCode, HandleShape shape,
+ HandlePropertyName name, HandleObject holder,
+ HandleShape holderShape)
+ : ICInNativeStub(In_NativePrototype, stubCode, shape, name),
+ holder_(holder),
+ holderShape_(holderShape)
+{ }
+
+ICIn_NativeDoesNotExist::ICIn_NativeDoesNotExist(JitCode* stubCode, size_t protoChainDepth,
+ HandlePropertyName name)
+ : ICStub(In_NativeDoesNotExist, stubCode),
+ name_(name)
+{
+ MOZ_ASSERT(protoChainDepth <= MAX_PROTO_CHAIN_DEPTH);
+ extra_ = protoChainDepth;
+}
+
+/* static */ size_t
+ICIn_NativeDoesNotExist::offsetOfShape(size_t idx)
+{
+ MOZ_ASSERT(ICIn_NativeDoesNotExistImpl<0>::offsetOfShape(idx) ==
+ ICIn_NativeDoesNotExistImpl<
+ ICIn_NativeDoesNotExist::MAX_PROTO_CHAIN_DEPTH>::offsetOfShape(idx));
+ return ICIn_NativeDoesNotExistImpl<0>::offsetOfShape(idx);
+}
+
+template <size_t ProtoChainDepth>
+ICIn_NativeDoesNotExistImpl<ProtoChainDepth>::ICIn_NativeDoesNotExistImpl(
+ JitCode* stubCode, Handle<ShapeVector> shapes, HandlePropertyName name)
+ : ICIn_NativeDoesNotExist(stubCode, ProtoChainDepth, name)
+{
+ MOZ_ASSERT(shapes.length() == NumShapes);
+ for (size_t i = 0; i < NumShapes; i++)
+ shapes_[i].init(shapes[i]);
+}
+
+ICInNativeDoesNotExistCompiler::ICInNativeDoesNotExistCompiler(
+ JSContext* cx, HandleObject obj, HandlePropertyName name, size_t protoChainDepth)
+ : ICStubCompiler(cx, ICStub::In_NativeDoesNotExist, Engine::Baseline),
+ obj_(cx, obj),
+ name_(cx, name),
+ protoChainDepth_(protoChainDepth)
+{
+ MOZ_ASSERT(protoChainDepth_ <= ICIn_NativeDoesNotExist::MAX_PROTO_CHAIN_DEPTH);
+}
+
+ICIn_Dense::ICIn_Dense(JitCode* stubCode, HandleShape shape)
+ : ICStub(In_Dense, stubCode),
+ shape_(shape)
+{ }
+
+ICGetName_GlobalLexical::ICGetName_GlobalLexical(JitCode* stubCode, ICStub* firstMonitorStub,
+ uint32_t slot)
+ : ICMonitoredStub(GetName_GlobalLexical, stubCode, firstMonitorStub),
+ slot_(slot)
+{ }
+
+template <size_t NumHops>
+ICGetName_Env<NumHops>::ICGetName_Env(JitCode* stubCode, ICStub* firstMonitorStub,
+ Handle<ShapeVector> shapes, uint32_t offset)
+ : ICMonitoredStub(GetStubKind(), stubCode, firstMonitorStub),
+ offset_(offset)
+{
+ JS_STATIC_ASSERT(NumHops <= MAX_HOPS);
+ MOZ_ASSERT(shapes.length() == NumHops + 1);
+ for (size_t i = 0; i < NumHops + 1; i++)
+ shapes_[i].init(shapes[i]);
+}
+
+ICGetIntrinsic_Constant::ICGetIntrinsic_Constant(JitCode* stubCode, const Value& value)
+ : ICStub(GetIntrinsic_Constant, stubCode),
+ value_(value)
+{ }
+
+ICGetIntrinsic_Constant::~ICGetIntrinsic_Constant()
+{ }
+
+ICGetName_Global::ICGetName_Global(JitCode* stubCode, ICStub* firstMonitorStub,
+ ReceiverGuard guard, uint32_t offset,
+ JSObject* holder, Shape* holderShape, Shape* globalShape)
+ : ICGetPropNativePrototypeStub(GetName_Global, stubCode, firstMonitorStub, guard, offset,
+ holder, holderShape),
+ globalShape_(globalShape)
+{ }
+
+/* static */ ICGetName_Global*
+ICGetName_Global::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICGetName_Global& other)
+{
+ return New<ICGetName_Global>(cx, space, other.jitCode(), firstMonitorStub,
+ other.receiverGuard(), other.offset(),
+ other.holder(), other.holderShape(), other.globalShape());
+}
+
+ICInstanceOf_Function::ICInstanceOf_Function(JitCode* stubCode, Shape* shape,
+ JSObject* prototypeObj, uint32_t slot)
+ : ICStub(InstanceOf_Function, stubCode),
+ shape_(shape),
+ prototypeObj_(prototypeObj),
+ slot_(slot)
+{ }
+
+ICSetProp_Native::ICSetProp_Native(JitCode* stubCode, ObjectGroup* group, Shape* shape,
+ uint32_t offset)
+ : ICUpdatedStub(SetProp_Native, stubCode),
+ group_(group),
+ shape_(shape),
+ offset_(offset)
+{ }
+
+ICSetProp_Native*
+ICSetProp_Native::Compiler::getStub(ICStubSpace* space)
+{
+ RootedObjectGroup group(cx, obj_->getGroup(cx));
+ if (!group)
+ return nullptr;
+
+ RootedShape shape(cx, LastPropertyForSetProp(obj_));
+ ICSetProp_Native* stub = newStub<ICSetProp_Native>(space, getStubCode(), group, shape, offset_);
+ if (!stub || !stub->initUpdatingChain(cx, space))
+ return nullptr;
+ return stub;
+}
+
+ICSetProp_NativeAdd::ICSetProp_NativeAdd(JitCode* stubCode, ObjectGroup* group,
+ size_t protoChainDepth,
+ Shape* newShape,
+ ObjectGroup* newGroup,
+ uint32_t offset)
+ : ICUpdatedStub(SetProp_NativeAdd, stubCode),
+ group_(group),
+ newShape_(newShape),
+ newGroup_(newGroup),
+ offset_(offset)
+{
+ MOZ_ASSERT(protoChainDepth <= MAX_PROTO_CHAIN_DEPTH);
+ extra_ = protoChainDepth;
+}
+
+template <size_t ProtoChainDepth>
+ICSetProp_NativeAddImpl<ProtoChainDepth>::ICSetProp_NativeAddImpl(JitCode* stubCode,
+ ObjectGroup* group,
+ Handle<ShapeVector> shapes,
+ Shape* newShape,
+ ObjectGroup* newGroup,
+ uint32_t offset)
+ : ICSetProp_NativeAdd(stubCode, group, ProtoChainDepth, newShape, newGroup, offset)
+{
+ MOZ_ASSERT(shapes.length() == NumShapes);
+ for (size_t i = 0; i < NumShapes; i++)
+ shapes_[i].init(shapes[i]);
+}
+
+ICSetPropNativeAddCompiler::ICSetPropNativeAddCompiler(JSContext* cx, HandleObject obj,
+ HandleShape oldShape,
+ HandleObjectGroup oldGroup,
+ size_t protoChainDepth,
+ bool isFixedSlot,
+ uint32_t offset)
+ : ICStubCompiler(cx, ICStub::SetProp_NativeAdd, Engine::Baseline),
+ obj_(cx, obj),
+ oldShape_(cx, oldShape),
+ oldGroup_(cx, oldGroup),
+ protoChainDepth_(protoChainDepth),
+ isFixedSlot_(isFixedSlot),
+ offset_(offset)
+{
+ MOZ_ASSERT(protoChainDepth_ <= ICSetProp_NativeAdd::MAX_PROTO_CHAIN_DEPTH);
+}
+
+ICSetPropCallSetter::ICSetPropCallSetter(Kind kind, JitCode* stubCode, ReceiverGuard receiverGuard,
+ JSObject* holder, Shape* holderShape,
+ JSFunction* setter, uint32_t pcOffset)
+ : ICStub(kind, stubCode),
+ receiverGuard_(receiverGuard),
+ holder_(holder),
+ holderShape_(holderShape),
+ setter_(setter),
+ pcOffset_(pcOffset)
+{
+ MOZ_ASSERT(kind == ICStub::SetProp_CallScripted || kind == ICStub::SetProp_CallNative);
+}
+
+/* static */ ICSetProp_CallScripted*
+ICSetProp_CallScripted::Clone(JSContext* cx, ICStubSpace* space, ICStub*,
+ ICSetProp_CallScripted& other)
+{
+ return New<ICSetProp_CallScripted>(cx, space, other.jitCode(), other.receiverGuard(),
+ other.holder_, other.holderShape_, other.setter_,
+ other.pcOffset_);
+}
+
+/* static */ ICSetProp_CallNative*
+ICSetProp_CallNative::Clone(JSContext* cx, ICStubSpace* space, ICStub*, ICSetProp_CallNative& other)
+{
+ return New<ICSetProp_CallNative>(cx, space, other.jitCode(), other.receiverGuard(),
+ other.holder_, other.holderShape_, other.setter_,
+ other.pcOffset_);
+}
+
+ICCall_Scripted::ICCall_Scripted(JitCode* stubCode, ICStub* firstMonitorStub,
+ JSFunction* callee, JSObject* templateObject,
+ uint32_t pcOffset)
+ : ICMonitoredStub(ICStub::Call_Scripted, stubCode, firstMonitorStub),
+ callee_(callee),
+ templateObject_(templateObject),
+ pcOffset_(pcOffset)
+{ }
+
+/* static */ ICCall_Scripted*
+ICCall_Scripted::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICCall_Scripted& other)
+{
+ return New<ICCall_Scripted>(cx, space, other.jitCode(), firstMonitorStub, other.callee_,
+ other.templateObject_, other.pcOffset_);
+}
+
+/* static */ ICCall_AnyScripted*
+ICCall_AnyScripted::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICCall_AnyScripted& other)
+{
+ return New<ICCall_AnyScripted>(cx, space, other.jitCode(), firstMonitorStub, other.pcOffset_);
+}
+
+ICCall_Native::ICCall_Native(JitCode* stubCode, ICStub* firstMonitorStub,
+ JSFunction* callee, JSObject* templateObject,
+ uint32_t pcOffset)
+ : ICMonitoredStub(ICStub::Call_Native, stubCode, firstMonitorStub),
+ callee_(callee),
+ templateObject_(templateObject),
+ pcOffset_(pcOffset)
+{
+#ifdef JS_SIMULATOR
+ // The simulator requires VM calls to be redirected to a special swi
+ // instruction to handle them. To make this work, we store the redirected
+ // pointer in the stub.
+ native_ = Simulator::RedirectNativeFunction(JS_FUNC_TO_DATA_PTR(void*, callee->native()),
+ Args_General3);
+#endif
+}
+
+/* static */ ICCall_Native*
+ICCall_Native::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICCall_Native& other)
+{
+ return New<ICCall_Native>(cx, space, other.jitCode(), firstMonitorStub, other.callee_,
+ other.templateObject_, other.pcOffset_);
+}
+
+ICCall_ClassHook::ICCall_ClassHook(JitCode* stubCode, ICStub* firstMonitorStub,
+ const Class* clasp, Native native,
+ JSObject* templateObject, uint32_t pcOffset)
+ : ICMonitoredStub(ICStub::Call_ClassHook, stubCode, firstMonitorStub),
+ clasp_(clasp),
+ native_(JS_FUNC_TO_DATA_PTR(void*, native)),
+ templateObject_(templateObject),
+ pcOffset_(pcOffset)
+{
+#ifdef JS_SIMULATOR
+ // The simulator requires VM calls to be redirected to a special swi
+ // instruction to handle them. To make this work, we store the redirected
+ // pointer in the stub.
+ native_ = Simulator::RedirectNativeFunction(native_, Args_General3);
+#endif
+}
+
+/* static */ ICCall_ClassHook*
+ICCall_ClassHook::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICCall_ClassHook& other)
+{
+ ICCall_ClassHook* res = New<ICCall_ClassHook>(cx, space, other.jitCode(), firstMonitorStub,
+ other.clasp(), nullptr, other.templateObject_,
+ other.pcOffset_);
+ if (res)
+ res->native_ = other.native();
+ return res;
+}
+
+/* static */ ICCall_ScriptedApplyArray*
+ICCall_ScriptedApplyArray::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICCall_ScriptedApplyArray& other)
+{
+ return New<ICCall_ScriptedApplyArray>(cx, space, other.jitCode(), firstMonitorStub,
+ other.pcOffset_);
+}
+
+/* static */ ICCall_ScriptedApplyArguments*
+ICCall_ScriptedApplyArguments::Clone(JSContext* cx,
+ ICStubSpace* space,
+ ICStub* firstMonitorStub,
+ ICCall_ScriptedApplyArguments& other)
+{
+ return New<ICCall_ScriptedApplyArguments>(cx, space, other.jitCode(), firstMonitorStub,
+ other.pcOffset_);
+}
+
+/* static */ ICCall_ScriptedFunCall*
+ICCall_ScriptedFunCall::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICCall_ScriptedFunCall& other)
+{
+ return New<ICCall_ScriptedFunCall>(cx, space, other.jitCode(), firstMonitorStub,
+ other.pcOffset_);
+}
+
+//
+// Rest_Fallback
+//
+
+static bool DoRestFallback(JSContext* cx, BaselineFrame* frame, ICRest_Fallback* stub,
+ MutableHandleValue res)
+{
+ unsigned numFormals = frame->numFormalArgs() - 1;
+ unsigned numActuals = frame->numActualArgs();
+ unsigned numRest = numActuals > numFormals ? numActuals - numFormals : 0;
+ Value* rest = frame->argv() + numFormals;
+
+ JSObject* obj = ObjectGroup::newArrayObject(cx, rest, numRest, GenericObject,
+ ObjectGroup::NewArrayKind::UnknownIndex);
+ if (!obj)
+ return false;
+ res.setObject(*obj);
+ return true;
+}
+
+typedef bool (*DoRestFallbackFn)(JSContext*, BaselineFrame*, ICRest_Fallback*,
+ MutableHandleValue);
+static const VMFunction DoRestFallbackInfo =
+ FunctionInfo<DoRestFallbackFn>(DoRestFallback, "DoRestFallback", TailCall);
+
+bool
+ICRest_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ EmitRestoreTailCallReg(masm);
+
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ return tailCallVM(DoRestFallbackInfo, masm);
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/BaselineIC.h b/js/src/jit/BaselineIC.h
new file mode 100644
index 000000000..a57556d99
--- /dev/null
+++ b/js/src/jit/BaselineIC.h
@@ -0,0 +1,3384 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineIC_h
+#define jit_BaselineIC_h
+
+#include "mozilla/Assertions.h"
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+#include "jsgc.h"
+#include "jsopcode.h"
+
+#include "builtin/TypedObject.h"
+#include "gc/Barrier.h"
+#include "jit/BaselineICList.h"
+#include "jit/BaselineJIT.h"
+#include "jit/SharedIC.h"
+#include "jit/SharedICRegisters.h"
+#include "js/GCVector.h"
+#include "vm/ArrayObject.h"
+#include "vm/UnboxedObject.h"
+
+namespace js {
+namespace jit {
+
+// WarmUpCounter_Fallback
+
+// A WarmUpCounter IC chain has only the fallback stub.
+class ICWarmUpCounter_Fallback : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICWarmUpCounter_Fallback(JitCode* stubCode)
+ : ICFallbackStub(ICStub::WarmUpCounter_Fallback, stubCode)
+ { }
+
+ public:
+ // Compiler for this stub kind.
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::WarmUpCounter_Fallback, Engine::Baseline)
+ { }
+
+ ICWarmUpCounter_Fallback* getStub(ICStubSpace* space) {
+ return newStub<ICWarmUpCounter_Fallback>(space, getStubCode());
+ }
+ };
+};
+
+
+// TypeUpdate
+
+extern const VMFunction DoTypeUpdateFallbackInfo;
+
+// The TypeUpdate fallback is not a regular fallback, since it just
+// forwards to a different entry point in the main fallback stub.
+class ICTypeUpdate_Fallback : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICTypeUpdate_Fallback(JitCode* stubCode)
+ : ICStub(ICStub::TypeUpdate_Fallback, stubCode)
+ {}
+
+ public:
+ // Compiler for this stub kind.
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::TypeUpdate_Fallback, Engine::Baseline)
+ { }
+
+ ICTypeUpdate_Fallback* getStub(ICStubSpace* space) {
+ return newStub<ICTypeUpdate_Fallback>(space, getStubCode());
+ }
+ };
+};
+
+class ICTypeUpdate_PrimitiveSet : public TypeCheckPrimitiveSetStub
+{
+ friend class ICStubSpace;
+
+ ICTypeUpdate_PrimitiveSet(JitCode* stubCode, uint16_t flags)
+ : TypeCheckPrimitiveSetStub(TypeUpdate_PrimitiveSet, stubCode, flags)
+ {}
+
+ public:
+ class Compiler : public TypeCheckPrimitiveSetStub::Compiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, ICTypeUpdate_PrimitiveSet* existingStub, JSValueType type)
+ : TypeCheckPrimitiveSetStub::Compiler(cx, TypeUpdate_PrimitiveSet,
+ Engine::Baseline, existingStub, type)
+ {}
+
+ ICTypeUpdate_PrimitiveSet* updateStub() {
+ TypeCheckPrimitiveSetStub* stub =
+ this->TypeCheckPrimitiveSetStub::Compiler::updateStub();
+ if (!stub)
+ return nullptr;
+ return stub->toUpdateStub();
+ }
+
+ ICTypeUpdate_PrimitiveSet* getStub(ICStubSpace* space) {
+ MOZ_ASSERT(!existingStub_);
+ return newStub<ICTypeUpdate_PrimitiveSet>(space, getStubCode(), flags_);
+ }
+ };
+};
+
+// Type update stub to handle a singleton object.
+class ICTypeUpdate_SingleObject : public ICStub
+{
+ friend class ICStubSpace;
+
+ GCPtrObject obj_;
+
+ ICTypeUpdate_SingleObject(JitCode* stubCode, JSObject* obj);
+
+ public:
+ GCPtrObject& object() {
+ return obj_;
+ }
+
+ static size_t offsetOfObject() {
+ return offsetof(ICTypeUpdate_SingleObject, obj_);
+ }
+
+ class Compiler : public ICStubCompiler {
+ protected:
+ HandleObject obj_;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, HandleObject obj)
+ : ICStubCompiler(cx, TypeUpdate_SingleObject, Engine::Baseline),
+ obj_(obj)
+ { }
+
+ ICTypeUpdate_SingleObject* getStub(ICStubSpace* space) {
+ return newStub<ICTypeUpdate_SingleObject>(space, getStubCode(), obj_);
+ }
+ };
+};
+
+// Type update stub to handle a single ObjectGroup.
+class ICTypeUpdate_ObjectGroup : public ICStub
+{
+ friend class ICStubSpace;
+
+ GCPtrObjectGroup group_;
+
+ ICTypeUpdate_ObjectGroup(JitCode* stubCode, ObjectGroup* group);
+
+ public:
+ GCPtrObjectGroup& group() {
+ return group_;
+ }
+
+ static size_t offsetOfGroup() {
+ return offsetof(ICTypeUpdate_ObjectGroup, group_);
+ }
+
+ class Compiler : public ICStubCompiler {
+ protected:
+ HandleObjectGroup group_;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, HandleObjectGroup group)
+ : ICStubCompiler(cx, TypeUpdate_ObjectGroup, Engine::Baseline),
+ group_(group)
+ { }
+
+ ICTypeUpdate_ObjectGroup* getStub(ICStubSpace* space) {
+ return newStub<ICTypeUpdate_ObjectGroup>(space, getStubCode(), group_);
+ }
+ };
+};
+
+// ToBool
+// JSOP_IFNE
+
+class ICToBool_Fallback : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICToBool_Fallback(JitCode* stubCode)
+ : ICFallbackStub(ICStub::ToBool_Fallback, stubCode) {}
+
+ public:
+ static const uint32_t MAX_OPTIMIZED_STUBS = 8;
+
+ // Compiler for this stub kind.
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::ToBool_Fallback, Engine::Baseline) {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICToBool_Fallback>(space, getStubCode());
+ }
+ };
+};
+
+class ICToBool_Int32 : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICToBool_Int32(JitCode* stubCode)
+ : ICStub(ICStub::ToBool_Int32, stubCode) {}
+
+ public:
+ // Compiler for this stub kind.
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::ToBool_Int32, Engine::Baseline) {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICToBool_Int32>(space, getStubCode());
+ }
+ };
+};
+
+class ICToBool_String : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICToBool_String(JitCode* stubCode)
+ : ICStub(ICStub::ToBool_String, stubCode) {}
+
+ public:
+ // Compiler for this stub kind.
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::ToBool_String, Engine::Baseline) {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICToBool_String>(space, getStubCode());
+ }
+ };
+};
+
+class ICToBool_NullUndefined : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICToBool_NullUndefined(JitCode* stubCode)
+ : ICStub(ICStub::ToBool_NullUndefined, stubCode) {}
+
+ public:
+ // Compiler for this stub kind.
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::ToBool_NullUndefined, Engine::Baseline) {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICToBool_NullUndefined>(space, getStubCode());
+ }
+ };
+};
+
+class ICToBool_Double : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICToBool_Double(JitCode* stubCode)
+ : ICStub(ICStub::ToBool_Double, stubCode) {}
+
+ public:
+ // Compiler for this stub kind.
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::ToBool_Double, Engine::Baseline) {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICToBool_Double>(space, getStubCode());
+ }
+ };
+};
+
+class ICToBool_Object : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICToBool_Object(JitCode* stubCode)
+ : ICStub(ICStub::ToBool_Object, stubCode) {}
+
+ public:
+ // Compiler for this stub kind.
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::ToBool_Object, Engine::Baseline) {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICToBool_Object>(space, getStubCode());
+ }
+ };
+};
+
+// ToNumber
+// JSOP_POS
+
+class ICToNumber_Fallback : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICToNumber_Fallback(JitCode* stubCode)
+ : ICFallbackStub(ICStub::ToNumber_Fallback, stubCode) {}
+
+ public:
+ // Compiler for this stub kind.
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::ToNumber_Fallback, Engine::Baseline) {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICToNumber_Fallback>(space, getStubCode());
+ }
+ };
+};
+
+// GetElem
+// JSOP_GETELEM
+
+class ICGetElem_Fallback : public ICMonitoredFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICGetElem_Fallback(JitCode* stubCode)
+ : ICMonitoredFallbackStub(ICStub::GetElem_Fallback, stubCode)
+ { }
+
+ static const uint16_t EXTRA_NON_NATIVE = 0x1;
+ static const uint16_t EXTRA_NEGATIVE_INDEX = 0x2;
+ static const uint16_t EXTRA_UNOPTIMIZABLE_ACCESS = 0x4;
+
+ public:
+ static const uint32_t MAX_OPTIMIZED_STUBS = 16;
+
+ void noteNonNativeAccess() {
+ extra_ |= EXTRA_NON_NATIVE;
+ }
+ bool hasNonNativeAccess() const {
+ return extra_ & EXTRA_NON_NATIVE;
+ }
+
+ void noteNegativeIndex() {
+ extra_ |= EXTRA_NEGATIVE_INDEX;
+ }
+ bool hasNegativeIndex() const {
+ return extra_ & EXTRA_NEGATIVE_INDEX;
+ }
+ void noteUnoptimizableAccess() {
+ extra_ |= EXTRA_UNOPTIMIZABLE_ACCESS;
+ }
+ bool hadUnoptimizableAccess() const {
+ return extra_ & EXTRA_UNOPTIMIZABLE_ACCESS;
+ }
+
+ // Compiler for this stub kind.
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::GetElem_Fallback, Engine::Baseline)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ ICGetElem_Fallback* stub = newStub<ICGetElem_Fallback>(space, getStubCode());
+ if (!stub)
+ return nullptr;
+ if (!stub->initMonitoringChain(cx, space, engine_))
+ return nullptr;
+ return stub;
+ }
+ };
+};
+
+class ICGetElemNativeStub : public ICMonitoredStub
+{
+ public:
+ enum AccessType { FixedSlot = 0, DynamicSlot, UnboxedProperty, NativeGetter, ScriptedGetter, NumAccessTypes };
+
+ protected:
+ HeapReceiverGuard receiverGuard_;
+
+ static const unsigned NEEDS_ATOMIZE_SHIFT = 0;
+ static const uint16_t NEEDS_ATOMIZE_MASK = 0x1;
+
+ static const unsigned ACCESSTYPE_SHIFT = 1;
+ static const uint16_t ACCESSTYPE_MASK = 0x7;
+
+ static const unsigned ISSYMBOL_SHIFT = 4;
+ static const uint16_t ISSYMBOL_MASK = 0x1;
+
+ static_assert(ACCESSTYPE_MASK >= NumAccessTypes, "ACCESSTYPE_MASK must cover all possible AccessType values");
+
+ ICGetElemNativeStub(ICStub::Kind kind, JitCode* stubCode, ICStub* firstMonitorStub,
+ ReceiverGuard guard, AccessType acctype, bool needsAtomize, bool isSymbol);
+
+ ~ICGetElemNativeStub();
+
+ public:
+ HeapReceiverGuard& receiverGuard() {
+ return receiverGuard_;
+ }
+ static size_t offsetOfReceiverGuard() {
+ return offsetof(ICGetElemNativeStub, receiverGuard_);
+ }
+
+ AccessType accessType() const {
+ return static_cast<AccessType>((extra_ >> ACCESSTYPE_SHIFT) & ACCESSTYPE_MASK);
+ }
+
+ bool needsAtomize() const {
+ return (extra_ >> NEEDS_ATOMIZE_SHIFT) & NEEDS_ATOMIZE_MASK;
+ }
+
+ bool isSymbol() const {
+ return (extra_ >> ISSYMBOL_SHIFT) & ISSYMBOL_MASK;
+ }
+};
+
+template <class T>
+class ICGetElemNativeStubImpl : public ICGetElemNativeStub
+{
+ protected:
+ GCPtr<T> key_;
+
+ ICGetElemNativeStubImpl(ICStub::Kind kind, JitCode* stubCode, ICStub* firstMonitorStub,
+ ReceiverGuard guard, const T* key, AccessType acctype, bool needsAtomize)
+ : ICGetElemNativeStub(kind, stubCode, firstMonitorStub, guard, acctype, needsAtomize,
+ mozilla::IsSame<T, JS::Symbol*>::value),
+ key_(*key)
+ {}
+
+ public:
+ GCPtr<T>& key() {
+ return key_;
+ }
+ static size_t offsetOfKey() {
+ return offsetof(ICGetElemNativeStubImpl, key_);
+ }
+};
+
+typedef ICGetElemNativeStub::AccessType AccType;
+
+template <class T>
+class ICGetElemNativeSlotStub : public ICGetElemNativeStubImpl<T>
+{
+ protected:
+ uint32_t offset_;
+
+ ICGetElemNativeSlotStub(ICStub::Kind kind, JitCode* stubCode, ICStub* firstMonitorStub,
+ ReceiverGuard guard, const T* key, AccType acctype, bool needsAtomize,
+ uint32_t offset)
+ : ICGetElemNativeStubImpl<T>(kind, stubCode, firstMonitorStub, guard, key, acctype, needsAtomize),
+ offset_(offset)
+ {
+ MOZ_ASSERT(kind == ICStub::GetElem_NativeSlotName ||
+ kind == ICStub::GetElem_NativeSlotSymbol ||
+ kind == ICStub::GetElem_NativePrototypeSlotName ||
+ kind == ICStub::GetElem_NativePrototypeSlotSymbol ||
+ kind == ICStub::GetElem_UnboxedPropertyName);
+ MOZ_ASSERT(acctype == ICGetElemNativeStub::FixedSlot ||
+ acctype == ICGetElemNativeStub::DynamicSlot ||
+ acctype == ICGetElemNativeStub::UnboxedProperty);
+ }
+
+ public:
+ uint32_t offset() const {
+ return offset_;
+ }
+
+ static size_t offsetOfOffset() {
+ return offsetof(ICGetElemNativeSlotStub, offset_);
+ }
+};
+
+template <class T>
+class ICGetElemNativeGetterStub : public ICGetElemNativeStubImpl<T>
+{
+ protected:
+ GCPtrFunction getter_;
+ uint32_t pcOffset_;
+
+ ICGetElemNativeGetterStub(ICStub::Kind kind, JitCode* stubCode, ICStub* firstMonitorStub,
+ ReceiverGuard guard, const T* key, AccType acctype, bool needsAtomize,
+ JSFunction* getter, uint32_t pcOffset);
+
+ public:
+ GCPtrFunction& getter() {
+ return getter_;
+ }
+ static size_t offsetOfGetter() {
+ return offsetof(ICGetElemNativeGetterStub, getter_);
+ }
+
+ static size_t offsetOfPCOffset() {
+ return offsetof(ICGetElemNativeGetterStub, pcOffset_);
+ }
+};
+
+template <class T>
+ICStub::Kind
+getGetElemStubKind(ICStub::Kind kind)
+{
+ MOZ_ASSERT(kind == ICStub::GetElem_NativeSlotName ||
+ kind == ICStub::GetElem_NativePrototypeSlotName ||
+ kind == ICStub::GetElem_NativePrototypeCallNativeName ||
+ kind == ICStub::GetElem_NativePrototypeCallScriptedName);
+ return static_cast<ICStub::Kind>(kind + mozilla::IsSame<T, JS::Symbol*>::value);
+}
+
+template <class T>
+class ICGetElem_NativeSlot : public ICGetElemNativeSlotStub<T>
+{
+ friend class ICStubSpace;
+ ICGetElem_NativeSlot(JitCode* stubCode, ICStub* firstMonitorStub, ReceiverGuard guard,
+ const T* key, AccType acctype, bool needsAtomize, uint32_t offset)
+ : ICGetElemNativeSlotStub<T>(getGetElemStubKind<T>(ICStub::GetElem_NativeSlotName),
+ stubCode, firstMonitorStub, guard,
+ key, acctype, needsAtomize, offset)
+ {}
+};
+
+class ICGetElem_NativeSlotName :
+ public ICGetElem_NativeSlot<PropertyName*>
+{};
+class ICGetElem_NativeSlotSymbol :
+ public ICGetElem_NativeSlot<JS::Symbol*>
+{};
+
+template <class T>
+class ICGetElem_UnboxedProperty : public ICGetElemNativeSlotStub<T>
+{
+ friend class ICStubSpace;
+ ICGetElem_UnboxedProperty(JitCode* stubCode, ICStub* firstMonitorStub,
+ ReceiverGuard guard, const T* key, AccType acctype,
+ bool needsAtomize, uint32_t offset)
+ : ICGetElemNativeSlotStub<T>(ICStub::GetElem_UnboxedPropertyName, stubCode, firstMonitorStub,
+ guard, key, acctype, needsAtomize, offset)
+ {}
+};
+
+class ICGetElem_UnboxedPropertyName :
+ public ICGetElem_UnboxedProperty<PropertyName*>
+{};
+
+template <class T>
+class ICGetElem_NativePrototypeSlot : public ICGetElemNativeSlotStub<T>
+{
+ friend class ICStubSpace;
+ GCPtrObject holder_;
+ GCPtrShape holderShape_;
+
+ ICGetElem_NativePrototypeSlot(JitCode* stubCode, ICStub* firstMonitorStub, ReceiverGuard guard,
+ const T* key, AccType acctype, bool needsAtomize, uint32_t offset,
+ JSObject* holder, Shape* holderShape);
+
+ public:
+ GCPtrObject& holder() {
+ return holder_;
+ }
+ static size_t offsetOfHolder() {
+ return offsetof(ICGetElem_NativePrototypeSlot, holder_);
+ }
+
+ GCPtrShape& holderShape() {
+ return holderShape_;
+ }
+ static size_t offsetOfHolderShape() {
+ return offsetof(ICGetElem_NativePrototypeSlot, holderShape_);
+ }
+};
+
+class ICGetElem_NativePrototypeSlotName :
+ public ICGetElem_NativePrototypeSlot<PropertyName*>
+{};
+class ICGetElem_NativePrototypeSlotSymbol :
+ public ICGetElem_NativePrototypeSlot<JS::Symbol*>
+{};
+
+template <class T>
+class ICGetElemNativePrototypeCallStub : public ICGetElemNativeGetterStub<T>
+{
+ friend class ICStubSpace;
+ GCPtrObject holder_;
+ GCPtrShape holderShape_;
+
+ protected:
+ ICGetElemNativePrototypeCallStub(ICStub::Kind kind, JitCode* stubCode, ICStub* firstMonitorStub,
+ ReceiverGuard guard, const T* key, AccType acctype,
+ bool needsAtomize, JSFunction* getter, uint32_t pcOffset,
+ JSObject* holder, Shape* holderShape);
+
+ public:
+ GCPtrObject& holder() {
+ return holder_;
+ }
+ static size_t offsetOfHolder() {
+ return offsetof(ICGetElemNativePrototypeCallStub, holder_);
+ }
+
+ GCPtrShape& holderShape() {
+ return holderShape_;
+ }
+ static size_t offsetOfHolderShape() {
+ return offsetof(ICGetElemNativePrototypeCallStub, holderShape_);
+ }
+};
+
+template <class T>
+class ICGetElem_NativePrototypeCallNative : public ICGetElemNativePrototypeCallStub<T>
+{
+ friend class ICStubSpace;
+
+ ICGetElem_NativePrototypeCallNative(JitCode* stubCode, ICStub* firstMonitorStub,
+ ReceiverGuard guard, const T* key, AccType acctype,
+ bool needsAtomize, JSFunction* getter, uint32_t pcOffset,
+ JSObject* holder, Shape* holderShape)
+ : ICGetElemNativePrototypeCallStub<T>(getGetElemStubKind<T>(
+ ICStub::GetElem_NativePrototypeCallNativeName),
+ stubCode, firstMonitorStub, guard, key,
+ acctype, needsAtomize, getter, pcOffset, holder,
+ holderShape)
+ {}
+
+ public:
+ static ICGetElem_NativePrototypeCallNative<T>* Clone(JSContext* cx, ICStubSpace* space,
+ ICStub* firstMonitorStub,
+ ICGetElem_NativePrototypeCallNative<T>& other);
+};
+
+class ICGetElem_NativePrototypeCallNativeName :
+ public ICGetElem_NativePrototypeCallNative<PropertyName*>
+{};
+class ICGetElem_NativePrototypeCallNativeSymbol :
+ public ICGetElem_NativePrototypeCallNative<JS::Symbol*>
+{};
+
+template <class T>
+class ICGetElem_NativePrototypeCallScripted : public ICGetElemNativePrototypeCallStub<T>
+{
+ friend class ICStubSpace;
+
+ ICGetElem_NativePrototypeCallScripted(JitCode* stubCode, ICStub* firstMonitorStub,
+ ReceiverGuard guard, const T* key, AccType acctype,
+ bool needsAtomize, JSFunction* getter, uint32_t pcOffset,
+ JSObject* holder, Shape* holderShape)
+ : ICGetElemNativePrototypeCallStub<T>(getGetElemStubKind<T>(
+ ICStub::GetElem_NativePrototypeCallScriptedName),
+ stubCode, firstMonitorStub, guard, key, acctype,
+ needsAtomize, getter, pcOffset, holder, holderShape)
+ {}
+
+ public:
+ static ICGetElem_NativePrototypeCallScripted<T>*
+ Clone(JSContext* cx, ICStubSpace* space,
+ ICStub* firstMonitorStub,
+ ICGetElem_NativePrototypeCallScripted<T>& other);
+};
+
+class ICGetElem_NativePrototypeCallScriptedName :
+ public ICGetElem_NativePrototypeCallScripted<PropertyName*>
+{};
+class ICGetElem_NativePrototypeCallScriptedSymbol :
+ public ICGetElem_NativePrototypeCallScripted<JS::Symbol*>
+{};
+
+// Compiler for GetElem_NativeSlot and GetElem_NativePrototypeSlot stubs.
+template <class T>
+class ICGetElemNativeCompiler : public ICStubCompiler
+{
+ ICStub* firstMonitorStub_;
+ HandleObject obj_;
+ HandleObject holder_;
+ Handle<T> key_;
+ AccType acctype_;
+ bool needsAtomize_;
+ uint32_t offset_;
+ JSValueType unboxedType_;
+ HandleFunction getter_;
+ uint32_t pcOffset_;
+
+ MOZ_MUST_USE bool emitCheckKey(MacroAssembler& masm, Label& failure);
+ MOZ_MUST_USE bool emitCallNative(MacroAssembler& masm, Register objReg);
+ MOZ_MUST_USE bool emitCallScripted(MacroAssembler& masm, Register objReg);
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ protected:
+ virtual int32_t getKey() const {
+ MOZ_ASSERT(static_cast<int32_t>(acctype_) <= 7);
+ MOZ_ASSERT(static_cast<int32_t>(unboxedType_) <= 15);
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(needsAtomize_) << 17) |
+ (static_cast<int32_t>(acctype_) << 18) |
+ (static_cast<int32_t>(unboxedType_) << 21) |
+ (static_cast<int32_t>(mozilla::IsSame<JS::Symbol*, T>::value) << 25) |
+ (HeapReceiverGuard::keyBits(obj_) << 26);
+ }
+
+ public:
+ ICGetElemNativeCompiler(JSContext* cx, ICStub::Kind kind, ICStub* firstMonitorStub,
+ HandleObject obj, HandleObject holder, Handle<T> key, AccType acctype,
+ bool needsAtomize, uint32_t offset,
+ JSValueType unboxedType = JSVAL_TYPE_MAGIC)
+ : ICStubCompiler(cx, kind, Engine::Baseline),
+ firstMonitorStub_(firstMonitorStub),
+ obj_(obj),
+ holder_(holder),
+ key_(key),
+ acctype_(acctype),
+ needsAtomize_(needsAtomize),
+ offset_(offset),
+ unboxedType_(unboxedType),
+ getter_(nullptr),
+ pcOffset_(0)
+ {}
+
+ ICGetElemNativeCompiler(JSContext* cx, ICStub::Kind kind, ICStub* firstMonitorStub,
+ HandleObject obj, HandleObject holder, Handle<T> key, AccType acctype,
+ bool needsAtomize, HandleFunction getter, uint32_t pcOffset)
+ : ICStubCompiler(cx, kind, Engine::Baseline),
+ firstMonitorStub_(firstMonitorStub),
+ obj_(obj),
+ holder_(holder),
+ key_(key),
+ acctype_(acctype),
+ needsAtomize_(needsAtomize),
+ offset_(0),
+ unboxedType_(JSVAL_TYPE_MAGIC),
+ getter_(getter),
+ pcOffset_(pcOffset)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ RootedReceiverGuard guard(cx, ReceiverGuard(obj_));
+ if (kind == ICStub::GetElem_NativeSlotName || kind == ICStub::GetElem_NativeSlotSymbol) {
+ MOZ_ASSERT(obj_ == holder_);
+ return newStub<ICGetElem_NativeSlot<T>>(
+ space, getStubCode(), firstMonitorStub_, guard, key_.address(), acctype_,
+ needsAtomize_, offset_);
+ }
+
+ if (kind == ICStub::GetElem_UnboxedPropertyName) {
+ MOZ_ASSERT(obj_ == holder_);
+ return newStub<ICGetElem_UnboxedProperty<T>>(
+ space, getStubCode(), firstMonitorStub_, guard, key_.address(), acctype_,
+ needsAtomize_, offset_);
+ }
+
+ MOZ_ASSERT(obj_ != holder_);
+ RootedShape holderShape(cx, holder_->as<NativeObject>().lastProperty());
+ if (kind == ICStub::GetElem_NativePrototypeSlotName ||
+ kind == ICStub::GetElem_NativePrototypeSlotSymbol)
+ {
+ return newStub<ICGetElem_NativePrototypeSlot<T>>(
+ space, getStubCode(), firstMonitorStub_, guard, key_.address(), acctype_,
+ needsAtomize_, offset_, holder_, holderShape);
+ }
+
+ if (kind == ICStub::GetElem_NativePrototypeCallNativeSymbol ||
+ kind == ICStub::GetElem_NativePrototypeCallNativeName) {
+ return newStub<ICGetElem_NativePrototypeCallNative<T>>(
+ space, getStubCode(), firstMonitorStub_, guard, key_.address(), acctype_,
+ needsAtomize_, getter_, pcOffset_, holder_, holderShape);
+ }
+
+ MOZ_ASSERT(kind == ICStub::GetElem_NativePrototypeCallScriptedName ||
+ kind == ICStub::GetElem_NativePrototypeCallScriptedSymbol);
+ if (kind == ICStub::GetElem_NativePrototypeCallScriptedName ||
+ kind == ICStub::GetElem_NativePrototypeCallScriptedSymbol) {
+ return newStub<ICGetElem_NativePrototypeCallScripted<T>>(
+ space, getStubCode(), firstMonitorStub_, guard, key_.address(), acctype_,
+ needsAtomize_, getter_, pcOffset_, holder_, holderShape);
+ }
+
+ MOZ_CRASH("Invalid kind.");
+ }
+};
+
+class ICGetElem_String : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICGetElem_String(JitCode* stubCode)
+ : ICStub(ICStub::GetElem_String, stubCode) {}
+
+ public:
+ // Compiler for this stub kind.
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::GetElem_String, Engine::Baseline) {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICGetElem_String>(space, getStubCode());
+ }
+ };
+};
+
+class ICGetElem_Dense : public ICMonitoredStub
+{
+ friend class ICStubSpace;
+
+ GCPtrShape shape_;
+
+ ICGetElem_Dense(JitCode* stubCode, ICStub* firstMonitorStub, Shape* shape);
+
+ public:
+ static ICGetElem_Dense* Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICGetElem_Dense& other);
+
+ static size_t offsetOfShape() {
+ return offsetof(ICGetElem_Dense, shape_);
+ }
+
+ GCPtrShape& shape() {
+ return shape_;
+ }
+
+ class Compiler : public ICStubCompiler {
+ ICStub* firstMonitorStub_;
+ RootedShape shape_;
+
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1);
+ }
+
+ public:
+ Compiler(JSContext* cx, ICStub* firstMonitorStub, Shape* shape)
+ : ICStubCompiler(cx, ICStub::GetElem_Dense, Engine::Baseline),
+ firstMonitorStub_(firstMonitorStub),
+ shape_(cx, shape)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICGetElem_Dense>(space, getStubCode(), firstMonitorStub_, shape_);
+ }
+ };
+};
+
+class ICGetElem_UnboxedArray : public ICMonitoredStub
+{
+ friend class ICStubSpace;
+
+ GCPtrObjectGroup group_;
+
+ ICGetElem_UnboxedArray(JitCode* stubCode, ICStub* firstMonitorStub, ObjectGroup* group);
+
+ public:
+ static ICGetElem_UnboxedArray* Clone(JSContext* cx, ICStubSpace* space,
+ ICStub* firstMonitorStub, ICGetElem_UnboxedArray& other);
+
+ static size_t offsetOfGroup() {
+ return offsetof(ICGetElem_UnboxedArray, group_);
+ }
+
+ GCPtrObjectGroup& group() {
+ return group_;
+ }
+
+ class Compiler : public ICStubCompiler {
+ ICStub* firstMonitorStub_;
+ RootedObjectGroup group_;
+ JSValueType elementType_;
+
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(elementType_) << 17);
+ }
+
+ public:
+ Compiler(JSContext* cx, ICStub* firstMonitorStub, ObjectGroup* group)
+ : ICStubCompiler(cx, ICStub::GetElem_UnboxedArray, Engine::Baseline),
+ firstMonitorStub_(firstMonitorStub),
+ group_(cx, group),
+ elementType_(group->unboxedLayoutDontCheckGeneration().elementType())
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICGetElem_UnboxedArray>(space, getStubCode(), firstMonitorStub_, group_);
+ }
+ };
+};
+
+// Accesses scalar elements of a typed array or typed object.
+class ICGetElem_TypedArray : public ICStub
+{
+ friend class ICStubSpace;
+
+ protected: // Protected to silence Clang warning.
+ GCPtrShape shape_;
+
+ ICGetElem_TypedArray(JitCode* stubCode, Shape* shape, Scalar::Type type);
+
+ public:
+ static size_t offsetOfShape() {
+ return offsetof(ICGetElem_TypedArray, shape_);
+ }
+
+ GCPtrShape& shape() {
+ return shape_;
+ }
+
+ class Compiler : public ICStubCompiler {
+ RootedShape shape_;
+ Scalar::Type type_;
+ TypedThingLayout layout_;
+
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(type_) << 17) |
+ (static_cast<int32_t>(layout_) << 25);
+ }
+
+ public:
+ Compiler(JSContext* cx, Shape* shape, Scalar::Type type)
+ : ICStubCompiler(cx, ICStub::GetElem_TypedArray, Engine::Baseline),
+ shape_(cx, shape),
+ type_(type),
+ layout_(GetTypedThingLayout(shape->getObjectClass()))
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICGetElem_TypedArray>(space, getStubCode(), shape_, type_);
+ }
+ };
+};
+
+class ICGetElem_Arguments : public ICMonitoredStub
+{
+ friend class ICStubSpace;
+ public:
+ enum Which { Mapped, Unmapped, Magic };
+
+ private:
+ ICGetElem_Arguments(JitCode* stubCode, ICStub* firstMonitorStub, Which which)
+ : ICMonitoredStub(ICStub::GetElem_Arguments, stubCode, firstMonitorStub)
+ {
+ extra_ = static_cast<uint16_t>(which);
+ }
+
+ public:
+ static ICGetElem_Arguments* Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICGetElem_Arguments& other);
+
+ Which which() const {
+ return static_cast<Which>(extra_);
+ }
+
+ class Compiler : public ICStubCompiler {
+ ICStub* firstMonitorStub_;
+ Which which_;
+
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(which_) << 17);
+ }
+
+ public:
+ Compiler(JSContext* cx, ICStub* firstMonitorStub, Which which)
+ : ICStubCompiler(cx, ICStub::GetElem_Arguments, Engine::Baseline),
+ firstMonitorStub_(firstMonitorStub),
+ which_(which)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICGetElem_Arguments>(space, getStubCode(), firstMonitorStub_, which_);
+ }
+ };
+};
+
+// SetElem
+// JSOP_SETELEM
+// JSOP_INITELEM
+
+class ICSetElem_Fallback : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICSetElem_Fallback(JitCode* stubCode)
+ : ICFallbackStub(ICStub::SetElem_Fallback, stubCode)
+ { }
+
+ public:
+ static const uint32_t MAX_OPTIMIZED_STUBS = 8;
+
+ void noteArrayWriteHole() {
+ extra_ = 1;
+ }
+ bool hasArrayWriteHole() const {
+ return extra_;
+ }
+
+ // Compiler for this stub kind.
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::SetElem_Fallback, Engine::Baseline)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICSetElem_Fallback>(space, getStubCode());
+ }
+ };
+};
+
+class ICSetElem_DenseOrUnboxedArray : public ICUpdatedStub
+{
+ friend class ICStubSpace;
+
+ GCPtrShape shape_; // null for unboxed arrays
+ GCPtrObjectGroup group_;
+
+ ICSetElem_DenseOrUnboxedArray(JitCode* stubCode, Shape* shape, ObjectGroup* group);
+
+ public:
+ static size_t offsetOfShape() {
+ return offsetof(ICSetElem_DenseOrUnboxedArray, shape_);
+ }
+ static size_t offsetOfGroup() {
+ return offsetof(ICSetElem_DenseOrUnboxedArray, group_);
+ }
+
+ GCPtrShape& shape() {
+ return shape_;
+ }
+ GCPtrObjectGroup& group() {
+ return group_;
+ }
+
+ class Compiler : public ICStubCompiler {
+ RootedShape shape_;
+ RootedObjectGroup group_;
+ JSValueType unboxedType_;
+
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(unboxedType_) << 17);
+ }
+
+ Compiler(JSContext* cx, Shape* shape, HandleObjectGroup group)
+ : ICStubCompiler(cx, ICStub::SetElem_DenseOrUnboxedArray, Engine::Baseline),
+ shape_(cx, shape),
+ group_(cx, group),
+ unboxedType_(shape
+ ? JSVAL_TYPE_MAGIC
+ : group->unboxedLayoutDontCheckGeneration().elementType())
+ {}
+
+ ICUpdatedStub* getStub(ICStubSpace* space) {
+ ICSetElem_DenseOrUnboxedArray* stub =
+ newStub<ICSetElem_DenseOrUnboxedArray>(space, getStubCode(), shape_, group_);
+ if (!stub || !stub->initUpdatingChain(cx, space))
+ return nullptr;
+ return stub;
+ }
+
+ bool needsUpdateStubs() {
+ return unboxedType_ == JSVAL_TYPE_MAGIC || unboxedType_ == JSVAL_TYPE_OBJECT;
+ }
+ };
+};
+
+template <size_t ProtoChainDepth> class ICSetElem_DenseOrUnboxedArrayAddImpl;
+
+class ICSetElem_DenseOrUnboxedArrayAdd : public ICUpdatedStub
+{
+ friend class ICStubSpace;
+
+ public:
+ static const size_t MAX_PROTO_CHAIN_DEPTH = 4;
+
+ protected:
+ GCPtrObjectGroup group_;
+
+ ICSetElem_DenseOrUnboxedArrayAdd(JitCode* stubCode, ObjectGroup* group, size_t protoChainDepth);
+
+ public:
+ static size_t offsetOfGroup() {
+ return offsetof(ICSetElem_DenseOrUnboxedArrayAdd, group_);
+ }
+
+ GCPtrObjectGroup& group() {
+ return group_;
+ }
+ size_t protoChainDepth() const {
+ MOZ_ASSERT(extra_ <= MAX_PROTO_CHAIN_DEPTH);
+ return extra_;
+ }
+
+ template <size_t ProtoChainDepth>
+ ICSetElem_DenseOrUnboxedArrayAddImpl<ProtoChainDepth>* toImplUnchecked() {
+ return static_cast<ICSetElem_DenseOrUnboxedArrayAddImpl<ProtoChainDepth>*>(this);
+ }
+
+ template <size_t ProtoChainDepth>
+ ICSetElem_DenseOrUnboxedArrayAddImpl<ProtoChainDepth>* toImpl() {
+ MOZ_ASSERT(ProtoChainDepth == protoChainDepth());
+ return toImplUnchecked<ProtoChainDepth>();
+ }
+};
+
+template <size_t ProtoChainDepth>
+class ICSetElem_DenseOrUnboxedArrayAddImpl : public ICSetElem_DenseOrUnboxedArrayAdd
+{
+ friend class ICStubSpace;
+
+ // Note: for unboxed arrays, the first shape is null.
+ static const size_t NumShapes = ProtoChainDepth + 1;
+ mozilla::Array<GCPtrShape, NumShapes> shapes_;
+
+ ICSetElem_DenseOrUnboxedArrayAddImpl(JitCode* stubCode, ObjectGroup* group,
+ Handle<ShapeVector> shapes)
+ : ICSetElem_DenseOrUnboxedArrayAdd(stubCode, group, ProtoChainDepth)
+ {
+ MOZ_ASSERT(shapes.length() == NumShapes);
+ for (size_t i = 0; i < NumShapes; i++)
+ shapes_[i].init(shapes[i]);
+ }
+
+ public:
+ void traceShapes(JSTracer* trc) {
+ for (size_t i = 0; i < NumShapes; i++)
+ TraceNullableEdge(trc, &shapes_[i], "baseline-setelem-denseadd-stub-shape");
+ }
+ Shape* shape(size_t i) const {
+ MOZ_ASSERT(i < NumShapes);
+ return shapes_[i];
+ }
+ static size_t offsetOfShape(size_t idx) {
+ return offsetof(ICSetElem_DenseOrUnboxedArrayAddImpl, shapes_) + idx * sizeof(GCPtrShape);
+ }
+};
+
+class ICSetElemDenseOrUnboxedArrayAddCompiler : public ICStubCompiler {
+ RootedObject obj_;
+ size_t protoChainDepth_;
+ JSValueType unboxedType_;
+
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ protected:
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(protoChainDepth_) << 17) |
+ (static_cast<int32_t>(unboxedType_) << 20);
+ }
+
+ public:
+ ICSetElemDenseOrUnboxedArrayAddCompiler(JSContext* cx, HandleObject obj, size_t protoChainDepth)
+ : ICStubCompiler(cx, ICStub::SetElem_DenseOrUnboxedArrayAdd, Engine::Baseline),
+ obj_(cx, obj),
+ protoChainDepth_(protoChainDepth),
+ unboxedType_(obj->is<UnboxedArrayObject>()
+ ? obj->as<UnboxedArrayObject>().elementType()
+ : JSVAL_TYPE_MAGIC)
+ {}
+
+ template <size_t ProtoChainDepth>
+ ICUpdatedStub* getStubSpecific(ICStubSpace* space, Handle<ShapeVector> shapes);
+
+ ICUpdatedStub* getStub(ICStubSpace* space);
+
+ bool needsUpdateStubs() {
+ return unboxedType_ == JSVAL_TYPE_MAGIC || unboxedType_ == JSVAL_TYPE_OBJECT;
+ }
+};
+
+// Accesses scalar elements of a typed array or typed object.
+class ICSetElem_TypedArray : public ICStub
+{
+ friend class ICStubSpace;
+
+ protected: // Protected to silence Clang warning.
+ GCPtrShape shape_;
+
+ ICSetElem_TypedArray(JitCode* stubCode, Shape* shape, Scalar::Type type,
+ bool expectOutOfBounds);
+
+ public:
+ Scalar::Type type() const {
+ return (Scalar::Type) (extra_ & 0xff);
+ }
+
+ bool expectOutOfBounds() const {
+ return (extra_ >> 8) & 1;
+ }
+
+ static size_t offsetOfShape() {
+ return offsetof(ICSetElem_TypedArray, shape_);
+ }
+
+ GCPtrShape& shape() {
+ return shape_;
+ }
+
+ class Compiler : public ICStubCompiler {
+ RootedShape shape_;
+ Scalar::Type type_;
+ TypedThingLayout layout_;
+ bool expectOutOfBounds_;
+
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(type_) << 17) |
+ (static_cast<int32_t>(layout_) << 25) |
+ (static_cast<int32_t>(expectOutOfBounds_) << 29);
+ }
+
+ public:
+ Compiler(JSContext* cx, Shape* shape, Scalar::Type type, bool expectOutOfBounds)
+ : ICStubCompiler(cx, ICStub::SetElem_TypedArray, Engine::Baseline),
+ shape_(cx, shape),
+ type_(type),
+ layout_(GetTypedThingLayout(shape->getObjectClass())),
+ expectOutOfBounds_(expectOutOfBounds)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICSetElem_TypedArray>(space, getStubCode(), shape_, type_,
+ expectOutOfBounds_);
+ }
+ };
+};
+
+// In
+// JSOP_IN
+class ICIn_Fallback : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICIn_Fallback(JitCode* stubCode)
+ : ICFallbackStub(ICStub::In_Fallback, stubCode)
+ { }
+
+ public:
+ static const uint32_t MAX_OPTIMIZED_STUBS = 8;
+
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::In_Fallback, Engine::Baseline)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICIn_Fallback>(space, getStubCode());
+ }
+ };
+};
+
+// Base class for In_Native and In_NativePrototype stubs.
+class ICInNativeStub : public ICStub
+{
+ GCPtrShape shape_;
+ GCPtrPropertyName name_;
+
+ protected:
+ ICInNativeStub(ICStub::Kind kind, JitCode* stubCode, HandleShape shape,
+ HandlePropertyName name);
+
+ public:
+ GCPtrShape& shape() {
+ return shape_;
+ }
+ static size_t offsetOfShape() {
+ return offsetof(ICInNativeStub, shape_);
+ }
+
+ GCPtrPropertyName& name() {
+ return name_;
+ }
+ static size_t offsetOfName() {
+ return offsetof(ICInNativeStub, name_);
+ }
+};
+
+// Stub for confirming an own property on a native object.
+class ICIn_Native : public ICInNativeStub
+{
+ friend class ICStubSpace;
+
+ ICIn_Native(JitCode* stubCode, HandleShape shape, HandlePropertyName name)
+ : ICInNativeStub(In_Native, stubCode, shape, name)
+ {}
+};
+
+// Stub for confirming a property on a native object's prototype. Note that due to
+// the shape teleporting optimization, we only have to guard on the object's shape
+// and the holder's shape.
+class ICIn_NativePrototype : public ICInNativeStub
+{
+ friend class ICStubSpace;
+
+ GCPtrObject holder_;
+ GCPtrShape holderShape_;
+
+ ICIn_NativePrototype(JitCode* stubCode, HandleShape shape, HandlePropertyName name,
+ HandleObject holder, HandleShape holderShape);
+
+ public:
+ GCPtrObject& holder() {
+ return holder_;
+ }
+ GCPtrShape& holderShape() {
+ return holderShape_;
+ }
+ static size_t offsetOfHolder() {
+ return offsetof(ICIn_NativePrototype, holder_);
+ }
+ static size_t offsetOfHolderShape() {
+ return offsetof(ICIn_NativePrototype, holderShape_);
+ }
+};
+
+// Compiler for In_Native and In_NativePrototype stubs.
+class ICInNativeCompiler : public ICStubCompiler
+{
+ RootedObject obj_;
+ RootedObject holder_;
+ RootedPropertyName name_;
+
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ ICInNativeCompiler(JSContext* cx, ICStub::Kind kind, HandleObject obj, HandleObject holder,
+ HandlePropertyName name)
+ : ICStubCompiler(cx, kind, Engine::Baseline),
+ obj_(cx, obj),
+ holder_(cx, holder),
+ name_(cx, name)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ RootedShape shape(cx, obj_->as<NativeObject>().lastProperty());
+ if (kind == ICStub::In_Native) {
+ MOZ_ASSERT(obj_ == holder_);
+ return newStub<ICIn_Native>(space, getStubCode(), shape, name_);
+ }
+
+ MOZ_ASSERT(obj_ != holder_);
+ MOZ_ASSERT(kind == ICStub::In_NativePrototype);
+ RootedShape holderShape(cx, holder_->as<NativeObject>().lastProperty());
+ return newStub<ICIn_NativePrototype>(space, getStubCode(), shape, name_, holder_,
+ holderShape);
+ }
+};
+
+template <size_t ProtoChainDepth> class ICIn_NativeDoesNotExistImpl;
+
+class ICIn_NativeDoesNotExist : public ICStub
+{
+ friend class ICStubSpace;
+
+ GCPtrPropertyName name_;
+
+ public:
+ static const size_t MAX_PROTO_CHAIN_DEPTH = 8;
+
+ protected:
+ ICIn_NativeDoesNotExist(JitCode* stubCode, size_t protoChainDepth,
+ HandlePropertyName name);
+
+ public:
+ size_t protoChainDepth() const {
+ MOZ_ASSERT(extra_ <= MAX_PROTO_CHAIN_DEPTH);
+ return extra_;
+ }
+ GCPtrPropertyName& name() {
+ return name_;
+ }
+
+ template <size_t ProtoChainDepth>
+ ICIn_NativeDoesNotExistImpl<ProtoChainDepth>* toImpl() {
+ MOZ_ASSERT(ProtoChainDepth == protoChainDepth());
+ return static_cast<ICIn_NativeDoesNotExistImpl<ProtoChainDepth>*>(this);
+ }
+
+ static size_t offsetOfShape(size_t idx);
+ static size_t offsetOfName() {
+ return offsetof(ICIn_NativeDoesNotExist, name_);
+ }
+};
+
+template <size_t ProtoChainDepth>
+class ICIn_NativeDoesNotExistImpl : public ICIn_NativeDoesNotExist
+{
+ friend class ICStubSpace;
+
+ public:
+ static const size_t MAX_PROTO_CHAIN_DEPTH = 8;
+ static const size_t NumShapes = ProtoChainDepth + 1;
+
+ private:
+ mozilla::Array<GCPtrShape, NumShapes> shapes_;
+
+ ICIn_NativeDoesNotExistImpl(JitCode* stubCode, Handle<ShapeVector> shapes,
+ HandlePropertyName name);
+
+ public:
+ void traceShapes(JSTracer* trc) {
+ for (size_t i = 0; i < NumShapes; i++)
+ TraceEdge(trc, &shapes_[i], "baseline-innativedoesnotexist-stub-shape");
+ }
+
+ static size_t offsetOfShape(size_t idx) {
+ return offsetof(ICIn_NativeDoesNotExistImpl, shapes_) + (idx * sizeof(GCPtrShape));
+ }
+};
+
+class ICInNativeDoesNotExistCompiler : public ICStubCompiler
+{
+ RootedObject obj_;
+ RootedPropertyName name_;
+ size_t protoChainDepth_;
+
+ protected:
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(protoChainDepth_) << 17);
+ }
+
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ ICInNativeDoesNotExistCompiler(JSContext* cx, HandleObject obj, HandlePropertyName name,
+ size_t protoChainDepth);
+
+ template <size_t ProtoChainDepth>
+ ICStub* getStubSpecific(ICStubSpace* space, Handle<ShapeVector> shapes) {
+ return newStub<ICIn_NativeDoesNotExistImpl<ProtoChainDepth>>(space, getStubCode(), shapes,
+ name_);}
+
+ ICStub* getStub(ICStubSpace* space);
+};
+
+class ICIn_Dense : public ICStub
+{
+ friend class ICStubSpace;
+
+ GCPtrShape shape_;
+
+ ICIn_Dense(JitCode* stubCode, HandleShape shape);
+
+ public:
+ GCPtrShape& shape() {
+ return shape_;
+ }
+ static size_t offsetOfShape() {
+ return offsetof(ICIn_Dense, shape_);
+ }
+
+ class Compiler : public ICStubCompiler {
+ RootedShape shape_;
+
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, Shape* shape)
+ : ICStubCompiler(cx, ICStub::In_Dense, Engine::Baseline),
+ shape_(cx, shape)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICIn_Dense>(space, getStubCode(), shape_);
+ }
+ };
+};
+
+// GetName
+// JSOP_GETNAME
+// JSOP_GETGNAME
+class ICGetName_Fallback : public ICMonitoredFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICGetName_Fallback(JitCode* stubCode)
+ : ICMonitoredFallbackStub(ICStub::GetName_Fallback, stubCode)
+ { }
+
+ public:
+ static const uint32_t MAX_OPTIMIZED_STUBS = 8;
+ static const size_t UNOPTIMIZABLE_ACCESS_BIT = 0;
+
+ void noteUnoptimizableAccess() {
+ extra_ |= (1u << UNOPTIMIZABLE_ACCESS_BIT);
+ }
+ bool hadUnoptimizableAccess() const {
+ return extra_ & (1u << UNOPTIMIZABLE_ACCESS_BIT);
+ }
+
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::GetName_Fallback, Engine::Baseline)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ ICGetName_Fallback* stub = newStub<ICGetName_Fallback>(space, getStubCode());
+ if (!stub || !stub->initMonitoringChain(cx, space, engine_))
+ return nullptr;
+ return stub;
+ }
+ };
+};
+
+// Optimized lexical GETGNAME stub.
+class ICGetName_GlobalLexical : public ICMonitoredStub
+{
+ friend class ICStubSpace;
+
+ protected: // Protected to silence Clang warning.
+ uint32_t slot_;
+
+ ICGetName_GlobalLexical(JitCode* stubCode, ICStub* firstMonitorStub, uint32_t slot);
+
+ public:
+ static size_t offsetOfSlot() {
+ return offsetof(ICGetName_GlobalLexical, slot_);
+ }
+
+ class Compiler : public ICStubCompiler {
+ ICStub* firstMonitorStub_;
+ uint32_t slot_;
+
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, ICStub* firstMonitorStub, uint32_t slot)
+ : ICStubCompiler(cx, ICStub::GetName_GlobalLexical, Engine::Baseline),
+ firstMonitorStub_(firstMonitorStub),
+ slot_(slot)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICGetName_GlobalLexical>(space, getStubCode(), firstMonitorStub_, slot_);
+ }
+ };
+};
+
+// Optimized GETNAME/CALLNAME stub, making a variable number of hops to get an
+// 'own' property off some env object. Unlike GETPROP on an object's
+// prototype, there is no teleporting optimization to take advantage of and
+// shape checks are required all along the env chain.
+template <size_t NumHops>
+class ICGetName_Env : public ICMonitoredStub
+{
+ friend class ICStubSpace;
+
+ static const size_t MAX_HOPS = 6;
+
+ mozilla::Array<GCPtrShape, NumHops + 1> shapes_;
+ uint32_t offset_;
+
+ ICGetName_Env(JitCode* stubCode, ICStub* firstMonitorStub,
+ Handle<ShapeVector> shapes, uint32_t offset);
+
+ static Kind GetStubKind() {
+ return (Kind) (GetName_Env0 + NumHops);
+ }
+
+ public:
+ void traceEnvironments(JSTracer* trc) {
+ for (size_t i = 0; i < NumHops + 1; i++)
+ TraceEdge(trc, &shapes_[i], "baseline-env-stub-shape");
+ }
+
+ static size_t offsetOfShape(size_t index) {
+ MOZ_ASSERT(index <= NumHops);
+ return offsetof(ICGetName_Env, shapes_) + (index * sizeof(GCPtrShape));
+ }
+ static size_t offsetOfOffset() {
+ return offsetof(ICGetName_Env, offset_);
+ }
+
+ class Compiler : public ICStubCompiler {
+ ICStub* firstMonitorStub_;
+ Rooted<ShapeVector> shapes_;
+ bool isFixedSlot_;
+ uint32_t offset_;
+
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ protected:
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(isFixedSlot_) << 17);
+ }
+
+ public:
+ Compiler(JSContext* cx, ICStub* firstMonitorStub,
+ ShapeVector&& shapes, bool isFixedSlot, uint32_t offset)
+ : ICStubCompiler(cx, GetStubKind(), Engine::Baseline),
+ firstMonitorStub_(firstMonitorStub),
+ shapes_(cx, mozilla::Move(shapes)),
+ isFixedSlot_(isFixedSlot),
+ offset_(offset)
+ {
+ }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICGetName_Env>(space, getStubCode(), firstMonitorStub_, shapes_,
+ offset_);
+ }
+ };
+};
+
+// BindName
+// JSOP_BINDNAME
+class ICBindName_Fallback : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICBindName_Fallback(JitCode* stubCode)
+ : ICFallbackStub(ICStub::BindName_Fallback, stubCode)
+ { }
+
+ public:
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::BindName_Fallback, Engine::Baseline)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICBindName_Fallback>(space, getStubCode());
+ }
+ };
+};
+
+// GetIntrinsic
+// JSOP_GETINTRINSIC
+class ICGetIntrinsic_Fallback : public ICMonitoredFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICGetIntrinsic_Fallback(JitCode* stubCode)
+ : ICMonitoredFallbackStub(ICStub::GetIntrinsic_Fallback, stubCode)
+ { }
+
+ public:
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::GetIntrinsic_Fallback, Engine::Baseline)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ ICGetIntrinsic_Fallback* stub =
+ newStub<ICGetIntrinsic_Fallback>(space, getStubCode());
+ if (!stub || !stub->initMonitoringChain(cx, space, engine_))
+ return nullptr;
+ return stub;
+ }
+ };
+};
+
+// Stub that loads the constant result of a GETINTRINSIC operation.
+class ICGetIntrinsic_Constant : public ICStub
+{
+ friend class ICStubSpace;
+
+ GCPtrValue value_;
+
+ ICGetIntrinsic_Constant(JitCode* stubCode, const Value& value);
+ ~ICGetIntrinsic_Constant();
+
+ public:
+ GCPtrValue& value() {
+ return value_;
+ }
+ static size_t offsetOfValue() {
+ return offsetof(ICGetIntrinsic_Constant, value_);
+ }
+
+ class Compiler : public ICStubCompiler {
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ HandleValue value_;
+
+ public:
+ Compiler(JSContext* cx, HandleValue value)
+ : ICStubCompiler(cx, ICStub::GetIntrinsic_Constant, Engine::Baseline),
+ value_(value)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICGetIntrinsic_Constant>(space, getStubCode(), value_);
+ }
+ };
+};
+
+// SetProp
+// JSOP_SETPROP
+// JSOP_SETNAME
+// JSOP_SETGNAME
+// JSOP_INITPROP
+
+class ICSetProp_Fallback : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICSetProp_Fallback(JitCode* stubCode)
+ : ICFallbackStub(ICStub::SetProp_Fallback, stubCode)
+ { }
+
+ public:
+ static const uint32_t MAX_OPTIMIZED_STUBS = 8;
+
+ static const size_t UNOPTIMIZABLE_ACCESS_BIT = 0;
+ void noteUnoptimizableAccess() {
+ extra_ |= (1u << UNOPTIMIZABLE_ACCESS_BIT);
+ }
+ bool hadUnoptimizableAccess() const {
+ return extra_ & (1u << UNOPTIMIZABLE_ACCESS_BIT);
+ }
+
+ class Compiler : public ICStubCompiler {
+ public:
+ static const int32_t BASELINE_KEY =
+ (static_cast<int32_t>(Engine::Baseline)) |
+ (static_cast<int32_t>(ICStub::SetProp_Fallback) << 1);
+
+ protected:
+ uint32_t returnOffset_;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+ void postGenerateStubCode(MacroAssembler& masm, Handle<JitCode*> code);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::SetProp_Fallback, Engine::Baseline)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICSetProp_Fallback>(space, getStubCode());
+ }
+ };
+};
+
+// Optimized SETPROP/SETGNAME/SETNAME stub.
+class ICSetProp_Native : public ICUpdatedStub
+{
+ friend class ICStubSpace;
+
+ protected: // Protected to silence Clang warning.
+ GCPtrObjectGroup group_;
+ GCPtrShape shape_;
+ uint32_t offset_;
+
+ ICSetProp_Native(JitCode* stubCode, ObjectGroup* group, Shape* shape, uint32_t offset);
+
+ public:
+ GCPtrObjectGroup& group() {
+ return group_;
+ }
+ GCPtrShape& shape() {
+ return shape_;
+ }
+ void notePreliminaryObject() {
+ extra_ = 1;
+ }
+ bool hasPreliminaryObject() const {
+ return extra_;
+ }
+ static size_t offsetOfGroup() {
+ return offsetof(ICSetProp_Native, group_);
+ }
+ static size_t offsetOfShape() {
+ return offsetof(ICSetProp_Native, shape_);
+ }
+ static size_t offsetOfOffset() {
+ return offsetof(ICSetProp_Native, offset_);
+ }
+
+ class Compiler : public ICStubCompiler {
+ RootedObject obj_;
+ bool isFixedSlot_;
+ uint32_t offset_;
+
+ protected:
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(isFixedSlot_) << 17) |
+ (static_cast<int32_t>(obj_->is<UnboxedPlainObject>()) << 18);
+ }
+
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, HandleObject obj, bool isFixedSlot, uint32_t offset)
+ : ICStubCompiler(cx, ICStub::SetProp_Native, Engine::Baseline),
+ obj_(cx, obj),
+ isFixedSlot_(isFixedSlot),
+ offset_(offset)
+ {}
+
+ ICSetProp_Native* getStub(ICStubSpace* space);
+ };
+};
+
+
+template <size_t ProtoChainDepth> class ICSetProp_NativeAddImpl;
+
+class ICSetProp_NativeAdd : public ICUpdatedStub
+{
+ public:
+ static const size_t MAX_PROTO_CHAIN_DEPTH = 4;
+
+ protected: // Protected to silence Clang warning.
+ GCPtrObjectGroup group_;
+ GCPtrShape newShape_;
+ GCPtrObjectGroup newGroup_;
+ uint32_t offset_;
+
+ ICSetProp_NativeAdd(JitCode* stubCode, ObjectGroup* group, size_t protoChainDepth,
+ Shape* newShape, ObjectGroup* newGroup, uint32_t offset);
+
+ public:
+ size_t protoChainDepth() const {
+ return extra_;
+ }
+ GCPtrObjectGroup& group() {
+ return group_;
+ }
+ GCPtrShape& newShape() {
+ return newShape_;
+ }
+ GCPtrObjectGroup& newGroup() {
+ return newGroup_;
+ }
+
+ template <size_t ProtoChainDepth>
+ ICSetProp_NativeAddImpl<ProtoChainDepth>* toImpl() {
+ MOZ_ASSERT(ProtoChainDepth == protoChainDepth());
+ return static_cast<ICSetProp_NativeAddImpl<ProtoChainDepth>*>(this);
+ }
+
+ static size_t offsetOfGroup() {
+ return offsetof(ICSetProp_NativeAdd, group_);
+ }
+ static size_t offsetOfNewShape() {
+ return offsetof(ICSetProp_NativeAdd, newShape_);
+ }
+ static size_t offsetOfNewGroup() {
+ return offsetof(ICSetProp_NativeAdd, newGroup_);
+ }
+ static size_t offsetOfOffset() {
+ return offsetof(ICSetProp_NativeAdd, offset_);
+ }
+};
+
+template <size_t ProtoChainDepth>
+class ICSetProp_NativeAddImpl : public ICSetProp_NativeAdd
+{
+ friend class ICStubSpace;
+
+ static const size_t NumShapes = ProtoChainDepth + 1;
+ mozilla::Array<GCPtrShape, NumShapes> shapes_;
+
+ ICSetProp_NativeAddImpl(JitCode* stubCode, ObjectGroup* group,
+ Handle<ShapeVector> shapes,
+ Shape* newShape, ObjectGroup* newGroup, uint32_t offset);
+
+ public:
+ void traceShapes(JSTracer* trc) {
+ for (size_t i = 0; i < NumShapes; i++)
+ TraceEdge(trc, &shapes_[i], "baseline-setpropnativeadd-stub-shape");
+ }
+
+ static size_t offsetOfShape(size_t idx) {
+ return offsetof(ICSetProp_NativeAddImpl, shapes_) + (idx * sizeof(GCPtrShape));
+ }
+};
+
+class ICSetPropNativeAddCompiler : public ICStubCompiler
+{
+ RootedObject obj_;
+ RootedShape oldShape_;
+ RootedObjectGroup oldGroup_;
+ size_t protoChainDepth_;
+ bool isFixedSlot_;
+ uint32_t offset_;
+
+ protected:
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(isFixedSlot_) << 17) |
+ (static_cast<int32_t>(obj_->is<UnboxedPlainObject>()) << 18) |
+ (static_cast<int32_t>(protoChainDepth_) << 19);
+ }
+
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ ICSetPropNativeAddCompiler(JSContext* cx, HandleObject obj,
+ HandleShape oldShape, HandleObjectGroup oldGroup,
+ size_t protoChainDepth, bool isFixedSlot, uint32_t offset);
+
+ template <size_t ProtoChainDepth>
+ ICUpdatedStub* getStubSpecific(ICStubSpace* space, Handle<ShapeVector> shapes)
+ {
+ RootedObjectGroup newGroup(cx, obj_->getGroup(cx));
+ if (!newGroup)
+ return nullptr;
+
+ // Only specify newGroup when the object's group changes due to the
+ // object becoming fully initialized per the acquired properties
+ // analysis.
+ if (newGroup == oldGroup_)
+ newGroup = nullptr;
+
+ RootedShape newShape(cx);
+ if (obj_->isNative())
+ newShape = obj_->as<NativeObject>().lastProperty();
+ else
+ newShape = obj_->as<UnboxedPlainObject>().maybeExpando()->lastProperty();
+
+ return newStub<ICSetProp_NativeAddImpl<ProtoChainDepth>>(
+ space, getStubCode(), oldGroup_, shapes, newShape, newGroup, offset_);
+ }
+
+ ICUpdatedStub* getStub(ICStubSpace* space);
+};
+
+class ICSetProp_Unboxed : public ICUpdatedStub
+{
+ friend class ICStubSpace;
+
+ GCPtrObjectGroup group_;
+ uint32_t fieldOffset_;
+
+ ICSetProp_Unboxed(JitCode* stubCode, ObjectGroup* group, uint32_t fieldOffset)
+ : ICUpdatedStub(ICStub::SetProp_Unboxed, stubCode),
+ group_(group),
+ fieldOffset_(fieldOffset)
+ {
+ (void) fieldOffset_; // Silence clang warning
+ }
+
+ public:
+ GCPtrObjectGroup& group() {
+ return group_;
+ }
+
+ static size_t offsetOfGroup() {
+ return offsetof(ICSetProp_Unboxed, group_);
+ }
+ static size_t offsetOfFieldOffset() {
+ return offsetof(ICSetProp_Unboxed, fieldOffset_);
+ }
+
+ class Compiler : public ICStubCompiler {
+ protected:
+ RootedObjectGroup group_;
+ uint32_t fieldOffset_;
+ JSValueType fieldType_;
+
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(fieldType_) << 17);
+ }
+
+ public:
+ Compiler(JSContext* cx, ObjectGroup* group, uint32_t fieldOffset,
+ JSValueType fieldType)
+ : ICStubCompiler(cx, ICStub::SetProp_Unboxed, Engine::Baseline),
+ group_(cx, group),
+ fieldOffset_(fieldOffset),
+ fieldType_(fieldType)
+ {}
+
+ ICUpdatedStub* getStub(ICStubSpace* space) {
+ ICUpdatedStub* stub = newStub<ICSetProp_Unboxed>(space, getStubCode(), group_,
+ fieldOffset_);
+ if (!stub || !stub->initUpdatingChain(cx, space))
+ return nullptr;
+ return stub;
+ }
+
+ bool needsUpdateStubs() {
+ return fieldType_ == JSVAL_TYPE_OBJECT;
+ }
+ };
+};
+
+class ICSetProp_TypedObject : public ICUpdatedStub
+{
+ friend class ICStubSpace;
+
+ GCPtrShape shape_;
+ GCPtrObjectGroup group_;
+ uint32_t fieldOffset_;
+ bool isObjectReference_;
+
+ ICSetProp_TypedObject(JitCode* stubCode, Shape* shape, ObjectGroup* group,
+ uint32_t fieldOffset, bool isObjectReference)
+ : ICUpdatedStub(ICStub::SetProp_TypedObject, stubCode),
+ shape_(shape),
+ group_(group),
+ fieldOffset_(fieldOffset),
+ isObjectReference_(isObjectReference)
+ {
+ (void) fieldOffset_; // Silence clang warning
+ }
+
+ public:
+ GCPtrShape& shape() {
+ return shape_;
+ }
+ GCPtrObjectGroup& group() {
+ return group_;
+ }
+ bool isObjectReference() {
+ return isObjectReference_;
+ }
+
+ static size_t offsetOfShape() {
+ return offsetof(ICSetProp_TypedObject, shape_);
+ }
+ static size_t offsetOfGroup() {
+ return offsetof(ICSetProp_TypedObject, group_);
+ }
+ static size_t offsetOfFieldOffset() {
+ return offsetof(ICSetProp_TypedObject, fieldOffset_);
+ }
+
+ class Compiler : public ICStubCompiler {
+ protected:
+ RootedShape shape_;
+ RootedObjectGroup group_;
+ uint32_t fieldOffset_;
+ TypedThingLayout layout_;
+ Rooted<SimpleTypeDescr*> fieldDescr_;
+
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(SimpleTypeDescrKey(fieldDescr_)) << 17) |
+ (static_cast<int32_t>(layout_) << 25);
+ }
+
+ public:
+ Compiler(JSContext* cx, Shape* shape, ObjectGroup* group, uint32_t fieldOffset,
+ SimpleTypeDescr* fieldDescr)
+ : ICStubCompiler(cx, ICStub::SetProp_TypedObject, Engine::Baseline),
+ shape_(cx, shape),
+ group_(cx, group),
+ fieldOffset_(fieldOffset),
+ layout_(GetTypedThingLayout(shape->getObjectClass())),
+ fieldDescr_(cx, fieldDescr)
+ {}
+
+ ICUpdatedStub* getStub(ICStubSpace* space) {
+ bool isObjectReference =
+ fieldDescr_->is<ReferenceTypeDescr>() &&
+ fieldDescr_->as<ReferenceTypeDescr>().type() == ReferenceTypeDescr::TYPE_OBJECT;
+ ICUpdatedStub* stub = newStub<ICSetProp_TypedObject>(space, getStubCode(), shape_,
+ group_, fieldOffset_,
+ isObjectReference);
+ if (!stub || !stub->initUpdatingChain(cx, space))
+ return nullptr;
+ return stub;
+ }
+
+ bool needsUpdateStubs() {
+ return fieldDescr_->is<ReferenceTypeDescr>() &&
+ fieldDescr_->as<ReferenceTypeDescr>().type() != ReferenceTypeDescr::TYPE_STRING;
+ }
+ };
+};
+
+// Base stub for calling a setters on a native or unboxed object.
+class ICSetPropCallSetter : public ICStub
+{
+ friend class ICStubSpace;
+
+ protected:
+ // Shape/group of receiver object. Used for both own and proto setters.
+ HeapReceiverGuard receiverGuard_;
+
+ // Holder and holder shape. For own setters, guarding on receiverGuard_ is
+ // sufficient, although Ion may use holder_ and holderShape_ even for own
+ // setters. In this case holderShape_ == receiverGuard_.shape_ (isOwnSetter
+ // below relies on this).
+ GCPtrObject holder_;
+ GCPtrShape holderShape_;
+
+ // Function to call.
+ GCPtrFunction setter_;
+
+ // PC of call, for profiler
+ uint32_t pcOffset_;
+
+ ICSetPropCallSetter(Kind kind, JitCode* stubCode, ReceiverGuard receiverGuard,
+ JSObject* holder, Shape* holderShape, JSFunction* setter,
+ uint32_t pcOffset);
+
+ public:
+ HeapReceiverGuard& receiverGuard() {
+ return receiverGuard_;
+ }
+ GCPtrObject& holder() {
+ return holder_;
+ }
+ GCPtrShape& holderShape() {
+ return holderShape_;
+ }
+ GCPtrFunction& setter() {
+ return setter_;
+ }
+
+ bool isOwnSetter() const {
+ MOZ_ASSERT(holder_->isNative());
+ MOZ_ASSERT(holderShape_);
+ return receiverGuard_.shape() == holderShape_;
+ }
+
+ static size_t offsetOfReceiverGuard() {
+ return offsetof(ICSetPropCallSetter, receiverGuard_);
+ }
+ static size_t offsetOfHolder() {
+ return offsetof(ICSetPropCallSetter, holder_);
+ }
+ static size_t offsetOfHolderShape() {
+ return offsetof(ICSetPropCallSetter, holderShape_);
+ }
+ static size_t offsetOfSetter() {
+ return offsetof(ICSetPropCallSetter, setter_);
+ }
+ static size_t offsetOfPCOffset() {
+ return offsetof(ICSetPropCallSetter, pcOffset_);
+ }
+
+ class Compiler : public ICStubCompiler {
+ protected:
+ RootedObject receiver_;
+ RootedObject holder_;
+ RootedFunction setter_;
+ uint32_t pcOffset_;
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (HeapReceiverGuard::keyBits(receiver_) << 17) |
+ (static_cast<int32_t>(receiver_ != holder_) << 20);
+ }
+
+ public:
+ Compiler(JSContext* cx, ICStub::Kind kind, HandleObject receiver, HandleObject holder,
+ HandleFunction setter, uint32_t pcOffset)
+ : ICStubCompiler(cx, kind, Engine::Baseline),
+ receiver_(cx, receiver),
+ holder_(cx, holder),
+ setter_(cx, setter),
+ pcOffset_(pcOffset)
+ {
+ MOZ_ASSERT(kind == ICStub::SetProp_CallScripted || kind == ICStub::SetProp_CallNative);
+ }
+ };
+};
+
+// Stub for calling a scripted setter on a native object.
+class ICSetProp_CallScripted : public ICSetPropCallSetter
+{
+ friend class ICStubSpace;
+
+ protected:
+ ICSetProp_CallScripted(JitCode* stubCode, ReceiverGuard guard, JSObject* holder,
+ Shape* holderShape, JSFunction* setter, uint32_t pcOffset)
+ : ICSetPropCallSetter(SetProp_CallScripted, stubCode, guard, holder, holderShape,
+ setter, pcOffset)
+ {}
+
+ public:
+ static ICSetProp_CallScripted* Clone(JSContext* cx, ICStubSpace* space, ICStub*,
+ ICSetProp_CallScripted& other);
+
+ class Compiler : public ICSetPropCallSetter::Compiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, HandleObject obj, HandleObject holder, HandleFunction setter,
+ uint32_t pcOffset)
+ : ICSetPropCallSetter::Compiler(cx, ICStub::SetProp_CallScripted,
+ obj, holder, setter, pcOffset)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ ReceiverGuard guard(receiver_);
+ Shape* holderShape = holder_->as<NativeObject>().lastProperty();
+ return newStub<ICSetProp_CallScripted>(space, getStubCode(), guard, holder_,
+ holderShape, setter_, pcOffset_);
+ }
+ };
+};
+
+// Stub for calling a native setter on a native object.
+class ICSetProp_CallNative : public ICSetPropCallSetter
+{
+ friend class ICStubSpace;
+
+ protected:
+ ICSetProp_CallNative(JitCode* stubCode, ReceiverGuard guard, JSObject* holder,
+ Shape* holderShape, JSFunction* setter, uint32_t pcOffset)
+ : ICSetPropCallSetter(SetProp_CallNative, stubCode, guard, holder, holderShape,
+ setter, pcOffset)
+ {}
+
+ public:
+ static ICSetProp_CallNative* Clone(JSContext* cx,
+ ICStubSpace* space, ICStub*,
+ ICSetProp_CallNative& other);
+
+ class Compiler : public ICSetPropCallSetter::Compiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, HandleObject obj, HandleObject holder, HandleFunction setter,
+ uint32_t pcOffset)
+ : ICSetPropCallSetter::Compiler(cx, ICStub::SetProp_CallNative,
+ obj, holder, setter, pcOffset)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ ReceiverGuard guard(receiver_);
+ Shape* holderShape = holder_->as<NativeObject>().lastProperty();
+ return newStub<ICSetProp_CallNative>(space, getStubCode(), guard, holder_, holderShape,
+ setter_, pcOffset_);
+ }
+ };
+};
+
+// Call
+// JSOP_CALL
+// JSOP_FUNAPPLY
+// JSOP_FUNCALL
+// JSOP_NEW
+// JSOP_SPREADCALL
+// JSOP_SPREADNEW
+// JSOP_SPREADEVAL
+
+class ICCallStubCompiler : public ICStubCompiler
+{
+ protected:
+ ICCallStubCompiler(JSContext* cx, ICStub::Kind kind)
+ : ICStubCompiler(cx, kind, Engine::Baseline)
+ { }
+
+ enum FunApplyThing {
+ FunApply_MagicArgs,
+ FunApply_Array
+ };
+
+ void pushCallArguments(MacroAssembler& masm, AllocatableGeneralRegisterSet regs,
+ Register argcReg, bool isJitCall, bool isConstructing = false);
+ void pushSpreadCallArguments(MacroAssembler& masm, AllocatableGeneralRegisterSet regs,
+ Register argcReg, bool isJitCall, bool isConstructing);
+ void guardSpreadCall(MacroAssembler& masm, Register argcReg, Label* failure,
+ bool isConstructing);
+ Register guardFunApply(MacroAssembler& masm, AllocatableGeneralRegisterSet regs,
+ Register argcReg, bool checkNative, FunApplyThing applyThing,
+ Label* failure);
+ void pushCallerArguments(MacroAssembler& masm, AllocatableGeneralRegisterSet regs);
+ void pushArrayArguments(MacroAssembler& masm, Address arrayVal,
+ AllocatableGeneralRegisterSet regs);
+};
+
+class ICCall_Fallback : public ICMonitoredFallbackStub
+{
+ friend class ICStubSpace;
+ public:
+ static const unsigned UNOPTIMIZABLE_CALL_FLAG = 0x1;
+
+ static const uint32_t MAX_OPTIMIZED_STUBS = 16;
+ static const uint32_t MAX_SCRIPTED_STUBS = 7;
+ static const uint32_t MAX_NATIVE_STUBS = 7;
+ private:
+
+ explicit ICCall_Fallback(JitCode* stubCode)
+ : ICMonitoredFallbackStub(ICStub::Call_Fallback, stubCode)
+ { }
+
+ public:
+ void noteUnoptimizableCall() {
+ extra_ |= UNOPTIMIZABLE_CALL_FLAG;
+ }
+ bool hadUnoptimizableCall() const {
+ return extra_ & UNOPTIMIZABLE_CALL_FLAG;
+ }
+
+ unsigned scriptedStubCount() const {
+ return numStubsWithKind(Call_Scripted);
+ }
+ bool scriptedStubsAreGeneralized() const {
+ return hasStub(Call_AnyScripted);
+ }
+
+ unsigned nativeStubCount() const {
+ return numStubsWithKind(Call_Native);
+ }
+ bool nativeStubsAreGeneralized() const {
+ // Return hasStub(Call_AnyNative) after Call_AnyNative stub is added.
+ return false;
+ }
+
+ // Compiler for this stub kind.
+ class Compiler : public ICCallStubCompiler {
+ public:
+ static const int32_t BASELINE_CALL_KEY =
+ (static_cast<int32_t>(Engine::Baseline)) |
+ (static_cast<int32_t>(ICStub::Call_Fallback) << 1) |
+ (0 << 17) | // spread
+ (0 << 18); // constructing
+ static const int32_t BASELINE_CONSTRUCT_KEY =
+ (static_cast<int32_t>(Engine::Baseline)) |
+ (static_cast<int32_t>(ICStub::Call_Fallback) << 1) |
+ (0 << 17) | // spread
+ (1 << 18); // constructing
+
+ protected:
+ bool isConstructing_;
+ bool isSpread_;
+ uint32_t returnOffset_;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+ void postGenerateStubCode(MacroAssembler& masm, Handle<JitCode*> code);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(isSpread_) << 17) |
+ (static_cast<int32_t>(isConstructing_) << 18);
+ }
+
+ public:
+ Compiler(JSContext* cx, bool isConstructing, bool isSpread)
+ : ICCallStubCompiler(cx, ICStub::Call_Fallback),
+ isConstructing_(isConstructing),
+ isSpread_(isSpread)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ ICCall_Fallback* stub = newStub<ICCall_Fallback>(space, getStubCode());
+ if (!stub || !stub->initMonitoringChain(cx, space, engine_))
+ return nullptr;
+ return stub;
+ }
+ };
+};
+
+class ICCall_Scripted : public ICMonitoredStub
+{
+ friend class ICStubSpace;
+ public:
+ // The maximum number of inlineable spread call arguments. Keep this small
+ // to avoid controllable stack overflows by attackers passing large arrays
+ // to spread call. This value is shared with ICCall_Native.
+ static const uint32_t MAX_ARGS_SPREAD_LENGTH = 16;
+
+ protected:
+ GCPtrFunction callee_;
+ GCPtrObject templateObject_;
+ uint32_t pcOffset_;
+
+ ICCall_Scripted(JitCode* stubCode, ICStub* firstMonitorStub,
+ JSFunction* callee, JSObject* templateObject,
+ uint32_t pcOffset);
+
+ public:
+ static ICCall_Scripted* Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICCall_Scripted& other);
+
+ GCPtrFunction& callee() {
+ return callee_;
+ }
+ GCPtrObject& templateObject() {
+ return templateObject_;
+ }
+
+ static size_t offsetOfCallee() {
+ return offsetof(ICCall_Scripted, callee_);
+ }
+ static size_t offsetOfPCOffset() {
+ return offsetof(ICCall_Scripted, pcOffset_);
+ }
+};
+
+class ICCall_AnyScripted : public ICMonitoredStub
+{
+ friend class ICStubSpace;
+
+ protected:
+ uint32_t pcOffset_;
+
+ ICCall_AnyScripted(JitCode* stubCode, ICStub* firstMonitorStub, uint32_t pcOffset)
+ : ICMonitoredStub(ICStub::Call_AnyScripted, stubCode, firstMonitorStub),
+ pcOffset_(pcOffset)
+ { }
+
+ public:
+ static ICCall_AnyScripted* Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICCall_AnyScripted& other);
+
+ static size_t offsetOfPCOffset() {
+ return offsetof(ICCall_AnyScripted, pcOffset_);
+ }
+};
+
+// Compiler for Call_Scripted and Call_AnyScripted stubs.
+class ICCallScriptedCompiler : public ICCallStubCompiler {
+ protected:
+ ICStub* firstMonitorStub_;
+ bool isConstructing_;
+ bool isSpread_;
+ RootedFunction callee_;
+ RootedObject templateObject_;
+ uint32_t pcOffset_;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(isConstructing_) << 17) |
+ (static_cast<int32_t>(isSpread_) << 18);
+ }
+
+ public:
+ ICCallScriptedCompiler(JSContext* cx, ICStub* firstMonitorStub,
+ JSFunction* callee, JSObject* templateObject,
+ bool isConstructing, bool isSpread, uint32_t pcOffset)
+ : ICCallStubCompiler(cx, ICStub::Call_Scripted),
+ firstMonitorStub_(firstMonitorStub),
+ isConstructing_(isConstructing),
+ isSpread_(isSpread),
+ callee_(cx, callee),
+ templateObject_(cx, templateObject),
+ pcOffset_(pcOffset)
+ { }
+
+ ICCallScriptedCompiler(JSContext* cx, ICStub* firstMonitorStub, bool isConstructing,
+ bool isSpread, uint32_t pcOffset)
+ : ICCallStubCompiler(cx, ICStub::Call_AnyScripted),
+ firstMonitorStub_(firstMonitorStub),
+ isConstructing_(isConstructing),
+ isSpread_(isSpread),
+ callee_(cx, nullptr),
+ templateObject_(cx, nullptr),
+ pcOffset_(pcOffset)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ if (callee_) {
+ return newStub<ICCall_Scripted>(space, getStubCode(), firstMonitorStub_, callee_,
+ templateObject_, pcOffset_);
+ }
+ return newStub<ICCall_AnyScripted>(space, getStubCode(), firstMonitorStub_, pcOffset_);
+ }
+};
+
+class ICCall_Native : public ICMonitoredStub
+{
+ friend class ICStubSpace;
+
+ protected:
+ GCPtrFunction callee_;
+ GCPtrObject templateObject_;
+ uint32_t pcOffset_;
+
+#ifdef JS_SIMULATOR
+ void *native_;
+#endif
+
+ ICCall_Native(JitCode* stubCode, ICStub* firstMonitorStub,
+ JSFunction* callee, JSObject* templateObject,
+ uint32_t pcOffset);
+
+ public:
+ static ICCall_Native* Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICCall_Native& other);
+
+ GCPtrFunction& callee() {
+ return callee_;
+ }
+ GCPtrObject& templateObject() {
+ return templateObject_;
+ }
+
+ static size_t offsetOfCallee() {
+ return offsetof(ICCall_Native, callee_);
+ }
+ static size_t offsetOfPCOffset() {
+ return offsetof(ICCall_Native, pcOffset_);
+ }
+
+#ifdef JS_SIMULATOR
+ static size_t offsetOfNative() {
+ return offsetof(ICCall_Native, native_);
+ }
+#endif
+
+ // Compiler for this stub kind.
+ class Compiler : public ICCallStubCompiler {
+ protected:
+ ICStub* firstMonitorStub_;
+ bool isConstructing_;
+ bool isSpread_;
+ RootedFunction callee_;
+ RootedObject templateObject_;
+ uint32_t pcOffset_;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(isConstructing_) << 17) |
+ (static_cast<int32_t>(isSpread_) << 18);
+ }
+
+ public:
+ Compiler(JSContext* cx, ICStub* firstMonitorStub,
+ HandleFunction callee, HandleObject templateObject,
+ bool isConstructing, bool isSpread, uint32_t pcOffset)
+ : ICCallStubCompiler(cx, ICStub::Call_Native),
+ firstMonitorStub_(firstMonitorStub),
+ isConstructing_(isConstructing),
+ isSpread_(isSpread),
+ callee_(cx, callee),
+ templateObject_(cx, templateObject),
+ pcOffset_(pcOffset)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICCall_Native>(space, getStubCode(), firstMonitorStub_, callee_,
+ templateObject_, pcOffset_);
+ }
+ };
+};
+
+class ICCall_ClassHook : public ICMonitoredStub
+{
+ friend class ICStubSpace;
+
+ protected:
+ const Class* clasp_;
+ void* native_;
+ GCPtrObject templateObject_;
+ uint32_t pcOffset_;
+
+ ICCall_ClassHook(JitCode* stubCode, ICStub* firstMonitorStub,
+ const Class* clasp, Native native, JSObject* templateObject,
+ uint32_t pcOffset);
+
+ public:
+ static ICCall_ClassHook* Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICCall_ClassHook& other);
+
+ const Class* clasp() {
+ return clasp_;
+ }
+ void* native() {
+ return native_;
+ }
+ GCPtrObject& templateObject() {
+ return templateObject_;
+ }
+
+ static size_t offsetOfClass() {
+ return offsetof(ICCall_ClassHook, clasp_);
+ }
+ static size_t offsetOfNative() {
+ return offsetof(ICCall_ClassHook, native_);
+ }
+ static size_t offsetOfPCOffset() {
+ return offsetof(ICCall_ClassHook, pcOffset_);
+ }
+
+ // Compiler for this stub kind.
+ class Compiler : public ICCallStubCompiler {
+ protected:
+ ICStub* firstMonitorStub_;
+ bool isConstructing_;
+ const Class* clasp_;
+ Native native_;
+ RootedObject templateObject_;
+ uint32_t pcOffset_;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(isConstructing_) << 17);
+ }
+
+ public:
+ Compiler(JSContext* cx, ICStub* firstMonitorStub,
+ const Class* clasp, Native native,
+ HandleObject templateObject, uint32_t pcOffset,
+ bool isConstructing)
+ : ICCallStubCompiler(cx, ICStub::Call_ClassHook),
+ firstMonitorStub_(firstMonitorStub),
+ isConstructing_(isConstructing),
+ clasp_(clasp),
+ native_(native),
+ templateObject_(cx, templateObject),
+ pcOffset_(pcOffset)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICCall_ClassHook>(space, getStubCode(), firstMonitorStub_, clasp_,
+ native_, templateObject_, pcOffset_);
+ }
+ };
+};
+
+class ICCall_ScriptedApplyArray : public ICMonitoredStub
+{
+ friend class ICStubSpace;
+ public:
+ // The maximum length of an inlineable funcall array.
+ // Keep this small to avoid controllable stack overflows by attackers passing large
+ // arrays to fun.apply.
+ static const uint32_t MAX_ARGS_ARRAY_LENGTH = 16;
+
+ protected:
+ uint32_t pcOffset_;
+
+ ICCall_ScriptedApplyArray(JitCode* stubCode, ICStub* firstMonitorStub, uint32_t pcOffset)
+ : ICMonitoredStub(ICStub::Call_ScriptedApplyArray, stubCode, firstMonitorStub),
+ pcOffset_(pcOffset)
+ {}
+
+ public:
+ static ICCall_ScriptedApplyArray* Clone(JSContext* cx,
+ ICStubSpace* space,
+ ICStub* firstMonitorStub,
+ ICCall_ScriptedApplyArray& other);
+
+ static size_t offsetOfPCOffset() {
+ return offsetof(ICCall_ScriptedApplyArray, pcOffset_);
+ }
+
+ // Compiler for this stub kind.
+ class Compiler : public ICCallStubCompiler {
+ protected:
+ ICStub* firstMonitorStub_;
+ uint32_t pcOffset_;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1);
+ }
+
+ public:
+ Compiler(JSContext* cx, ICStub* firstMonitorStub, uint32_t pcOffset)
+ : ICCallStubCompiler(cx, ICStub::Call_ScriptedApplyArray),
+ firstMonitorStub_(firstMonitorStub),
+ pcOffset_(pcOffset)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICCall_ScriptedApplyArray>(space, getStubCode(), firstMonitorStub_,
+ pcOffset_);
+ }
+ };
+};
+
+class ICCall_ScriptedApplyArguments : public ICMonitoredStub
+{
+ friend class ICStubSpace;
+
+ protected:
+ uint32_t pcOffset_;
+
+ ICCall_ScriptedApplyArguments(JitCode* stubCode, ICStub* firstMonitorStub, uint32_t pcOffset)
+ : ICMonitoredStub(ICStub::Call_ScriptedApplyArguments, stubCode, firstMonitorStub),
+ pcOffset_(pcOffset)
+ {}
+
+ public:
+ static ICCall_ScriptedApplyArguments* Clone(JSContext* cx,
+ ICStubSpace* space,
+ ICStub* firstMonitorStub,
+ ICCall_ScriptedApplyArguments& other);
+
+ static size_t offsetOfPCOffset() {
+ return offsetof(ICCall_ScriptedApplyArguments, pcOffset_);
+ }
+
+ // Compiler for this stub kind.
+ class Compiler : public ICCallStubCompiler {
+ protected:
+ ICStub* firstMonitorStub_;
+ uint32_t pcOffset_;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1);
+ }
+
+ public:
+ Compiler(JSContext* cx, ICStub* firstMonitorStub, uint32_t pcOffset)
+ : ICCallStubCompiler(cx, ICStub::Call_ScriptedApplyArguments),
+ firstMonitorStub_(firstMonitorStub),
+ pcOffset_(pcOffset)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICCall_ScriptedApplyArguments>(space, getStubCode(), firstMonitorStub_,
+ pcOffset_);
+ }
+ };
+};
+
+// Handles calls of the form |fun.call(...)| where fun is a scripted function.
+class ICCall_ScriptedFunCall : public ICMonitoredStub
+{
+ friend class ICStubSpace;
+
+ protected:
+ uint32_t pcOffset_;
+
+ ICCall_ScriptedFunCall(JitCode* stubCode, ICStub* firstMonitorStub, uint32_t pcOffset)
+ : ICMonitoredStub(ICStub::Call_ScriptedFunCall, stubCode, firstMonitorStub),
+ pcOffset_(pcOffset)
+ {}
+
+ public:
+ static ICCall_ScriptedFunCall* Clone(JSContext* cx, ICStubSpace* space,
+ ICStub* firstMonitorStub, ICCall_ScriptedFunCall& other);
+
+ static size_t offsetOfPCOffset() {
+ return offsetof(ICCall_ScriptedFunCall, pcOffset_);
+ }
+
+ // Compiler for this stub kind.
+ class Compiler : public ICCallStubCompiler {
+ protected:
+ ICStub* firstMonitorStub_;
+ uint32_t pcOffset_;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1);
+ }
+
+ public:
+ Compiler(JSContext* cx, ICStub* firstMonitorStub, uint32_t pcOffset)
+ : ICCallStubCompiler(cx, ICStub::Call_ScriptedFunCall),
+ firstMonitorStub_(firstMonitorStub),
+ pcOffset_(pcOffset)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICCall_ScriptedFunCall>(space, getStubCode(), firstMonitorStub_,
+ pcOffset_);
+ }
+ };
+};
+
+class ICCall_StringSplit : public ICMonitoredStub
+{
+ friend class ICStubSpace;
+
+ protected:
+ uint32_t pcOffset_;
+ GCPtrString expectedStr_;
+ GCPtrString expectedSep_;
+ GCPtrObject templateObject_;
+
+ ICCall_StringSplit(JitCode* stubCode, ICStub* firstMonitorStub, uint32_t pcOffset, JSString* str,
+ JSString* sep, JSObject* templateObject)
+ : ICMonitoredStub(ICStub::Call_StringSplit, stubCode, firstMonitorStub),
+ pcOffset_(pcOffset), expectedStr_(str), expectedSep_(sep),
+ templateObject_(templateObject)
+ { }
+
+ public:
+ static size_t offsetOfExpectedStr() {
+ return offsetof(ICCall_StringSplit, expectedStr_);
+ }
+
+ static size_t offsetOfExpectedSep() {
+ return offsetof(ICCall_StringSplit, expectedSep_);
+ }
+
+ static size_t offsetOfTemplateObject() {
+ return offsetof(ICCall_StringSplit, templateObject_);
+ }
+
+ GCPtrString& expectedStr() {
+ return expectedStr_;
+ }
+
+ GCPtrString& expectedSep() {
+ return expectedSep_;
+ }
+
+ GCPtrObject& templateObject() {
+ return templateObject_;
+ }
+
+ class Compiler : public ICCallStubCompiler {
+ protected:
+ ICStub* firstMonitorStub_;
+ uint32_t pcOffset_;
+ RootedString expectedStr_;
+ RootedString expectedSep_;
+ RootedObject templateObject_;
+
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1);
+ }
+
+ public:
+ Compiler(JSContext* cx, ICStub* firstMonitorStub, uint32_t pcOffset, HandleString str,
+ HandleString sep, HandleValue templateObject)
+ : ICCallStubCompiler(cx, ICStub::Call_StringSplit),
+ firstMonitorStub_(firstMonitorStub),
+ pcOffset_(pcOffset),
+ expectedStr_(cx, str),
+ expectedSep_(cx, sep),
+ templateObject_(cx, &templateObject.toObject())
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICCall_StringSplit>(space, getStubCode(), firstMonitorStub_, pcOffset_,
+ expectedStr_, expectedSep_, templateObject_);
+ }
+ };
+};
+
+class ICCall_IsSuspendedStarGenerator : public ICStub
+{
+ friend class ICStubSpace;
+
+ protected:
+ explicit ICCall_IsSuspendedStarGenerator(JitCode* stubCode)
+ : ICStub(ICStub::Call_IsSuspendedStarGenerator, stubCode)
+ {}
+
+ public:
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::Call_IsSuspendedStarGenerator, Engine::Baseline)
+ {}
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICCall_IsSuspendedStarGenerator>(space, getStubCode());
+ }
+ };
+};
+
+// Stub for performing a TableSwitch, updating the IC's return address to jump
+// to whatever point the switch is branching to.
+class ICTableSwitch : public ICStub
+{
+ friend class ICStubSpace;
+
+ protected: // Protected to silence Clang warning.
+ void** table_;
+ int32_t min_;
+ int32_t length_;
+ void* defaultTarget_;
+
+ ICTableSwitch(JitCode* stubCode, void** table,
+ int32_t min, int32_t length, void* defaultTarget)
+ : ICStub(TableSwitch, stubCode), table_(table),
+ min_(min), length_(length), defaultTarget_(defaultTarget)
+ {}
+
+ public:
+ void fixupJumpTable(JSScript* script, BaselineScript* baseline);
+
+ class Compiler : public ICStubCompiler {
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ jsbytecode* pc_;
+
+ public:
+ Compiler(JSContext* cx, jsbytecode* pc)
+ : ICStubCompiler(cx, ICStub::TableSwitch, Engine::Baseline), pc_(pc)
+ {}
+
+ ICStub* getStub(ICStubSpace* space);
+ };
+};
+
+// IC for constructing an iterator from an input value.
+class ICIteratorNew_Fallback : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICIteratorNew_Fallback(JitCode* stubCode)
+ : ICFallbackStub(ICStub::IteratorNew_Fallback, stubCode)
+ { }
+
+ public:
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::IteratorNew_Fallback, Engine::Baseline)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICIteratorNew_Fallback>(space, getStubCode());
+ }
+ };
+};
+
+// IC for testing if there are more values in an iterator.
+class ICIteratorMore_Fallback : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICIteratorMore_Fallback(JitCode* stubCode)
+ : ICFallbackStub(ICStub::IteratorMore_Fallback, stubCode)
+ { }
+
+ public:
+ void setHasNonStringResult() {
+ extra_ = 1;
+ }
+ bool hasNonStringResult() const {
+ MOZ_ASSERT(extra_ <= 1);
+ return extra_;
+ }
+
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::IteratorMore_Fallback, Engine::Baseline)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICIteratorMore_Fallback>(space, getStubCode());
+ }
+ };
+};
+
+// IC for testing if there are more values in a native iterator.
+class ICIteratorMore_Native : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICIteratorMore_Native(JitCode* stubCode)
+ : ICStub(ICStub::IteratorMore_Native, stubCode)
+ { }
+
+ public:
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::IteratorMore_Native, Engine::Baseline)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICIteratorMore_Native>(space, getStubCode());
+ }
+ };
+};
+
+// IC for closing an iterator.
+class ICIteratorClose_Fallback : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICIteratorClose_Fallback(JitCode* stubCode)
+ : ICFallbackStub(ICStub::IteratorClose_Fallback, stubCode)
+ { }
+
+ public:
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::IteratorClose_Fallback, Engine::Baseline)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICIteratorClose_Fallback>(space, getStubCode());
+ }
+ };
+};
+
+// InstanceOf
+// JSOP_INSTANCEOF
+class ICInstanceOf_Fallback : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICInstanceOf_Fallback(JitCode* stubCode)
+ : ICFallbackStub(ICStub::InstanceOf_Fallback, stubCode)
+ { }
+
+ static const uint16_t UNOPTIMIZABLE_ACCESS_BIT = 0x1;
+
+ public:
+ static const uint32_t MAX_OPTIMIZED_STUBS = 4;
+
+ void noteUnoptimizableAccess() {
+ extra_ |= UNOPTIMIZABLE_ACCESS_BIT;
+ }
+ bool hadUnoptimizableAccess() const {
+ return extra_ & UNOPTIMIZABLE_ACCESS_BIT;
+ }
+
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::InstanceOf_Fallback, Engine::Baseline)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICInstanceOf_Fallback>(space, getStubCode());
+ }
+ };
+};
+
+class ICInstanceOf_Function : public ICStub
+{
+ friend class ICStubSpace;
+
+ GCPtrShape shape_;
+ GCPtrObject prototypeObj_;
+ uint32_t slot_;
+
+ ICInstanceOf_Function(JitCode* stubCode, Shape* shape, JSObject* prototypeObj, uint32_t slot);
+
+ public:
+ GCPtrShape& shape() {
+ return shape_;
+ }
+ GCPtrObject& prototypeObject() {
+ return prototypeObj_;
+ }
+ uint32_t slot() const {
+ return slot_;
+ }
+ static size_t offsetOfShape() {
+ return offsetof(ICInstanceOf_Function, shape_);
+ }
+ static size_t offsetOfPrototypeObject() {
+ return offsetof(ICInstanceOf_Function, prototypeObj_);
+ }
+ static size_t offsetOfSlot() {
+ return offsetof(ICInstanceOf_Function, slot_);
+ }
+
+ class Compiler : public ICStubCompiler {
+ RootedShape shape_;
+ RootedObject prototypeObj_;
+ uint32_t slot_;
+
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, Shape* shape, JSObject* prototypeObj, uint32_t slot)
+ : ICStubCompiler(cx, ICStub::InstanceOf_Function, Engine::Baseline),
+ shape_(cx, shape),
+ prototypeObj_(cx, prototypeObj),
+ slot_(slot)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICInstanceOf_Function>(space, getStubCode(), shape_, prototypeObj_,
+ slot_);
+ }
+ };
+};
+
+// TypeOf
+// JSOP_TYPEOF
+// JSOP_TYPEOFEXPR
+class ICTypeOf_Fallback : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICTypeOf_Fallback(JitCode* stubCode)
+ : ICFallbackStub(ICStub::TypeOf_Fallback, stubCode)
+ { }
+
+ public:
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::TypeOf_Fallback, Engine::Baseline)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICTypeOf_Fallback>(space, getStubCode());
+ }
+ };
+};
+
+class ICTypeOf_Typed : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ ICTypeOf_Typed(JitCode* stubCode, JSType type)
+ : ICFallbackStub(ICStub::TypeOf_Typed, stubCode)
+ {
+ extra_ = uint16_t(type);
+ MOZ_ASSERT(JSType(extra_) == type);
+ }
+
+ public:
+ JSType type() const {
+ return JSType(extra_);
+ }
+
+ class Compiler : public ICStubCompiler {
+ protected:
+ JSType type_;
+ RootedString typeString_;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(type_) << 17);
+ }
+
+ public:
+ Compiler(JSContext* cx, JSType type, HandleString string)
+ : ICStubCompiler(cx, ICStub::TypeOf_Typed, Engine::Baseline),
+ type_(type),
+ typeString_(cx, string)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICTypeOf_Typed>(space, getStubCode(), type_);
+ }
+ };
+};
+
+class ICRest_Fallback : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ GCPtrArrayObject templateObject_;
+
+ ICRest_Fallback(JitCode* stubCode, ArrayObject* templateObject)
+ : ICFallbackStub(ICStub::Rest_Fallback, stubCode), templateObject_(templateObject)
+ { }
+
+ public:
+ static const uint32_t MAX_OPTIMIZED_STUBS = 8;
+
+ GCPtrArrayObject& templateObject() {
+ return templateObject_;
+ }
+
+ class Compiler : public ICStubCompiler {
+ protected:
+ RootedArrayObject templateObject;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, ArrayObject* templateObject)
+ : ICStubCompiler(cx, ICStub::Rest_Fallback, Engine::Baseline),
+ templateObject(cx, templateObject)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICRest_Fallback>(space, getStubCode(), templateObject);
+ }
+ };
+};
+
+// Stub for JSOP_RETSUB ("returning" from a |finally| block).
+class ICRetSub_Fallback : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICRetSub_Fallback(JitCode* stubCode)
+ : ICFallbackStub(ICStub::RetSub_Fallback, stubCode)
+ { }
+
+ public:
+ static const uint32_t MAX_OPTIMIZED_STUBS = 8;
+
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx)
+ : ICStubCompiler(cx, ICStub::RetSub_Fallback, Engine::Baseline)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICRetSub_Fallback>(space, getStubCode());
+ }
+ };
+};
+
+// Optimized JSOP_RETSUB stub. Every stub maps a single pc offset to its
+// native code address.
+class ICRetSub_Resume : public ICStub
+{
+ friend class ICStubSpace;
+
+ protected:
+ uint32_t pcOffset_;
+ uint8_t* addr_;
+
+ ICRetSub_Resume(JitCode* stubCode, uint32_t pcOffset, uint8_t* addr)
+ : ICStub(ICStub::RetSub_Resume, stubCode),
+ pcOffset_(pcOffset),
+ addr_(addr)
+ { }
+
+ public:
+ static size_t offsetOfPCOffset() {
+ return offsetof(ICRetSub_Resume, pcOffset_);
+ }
+ static size_t offsetOfAddr() {
+ return offsetof(ICRetSub_Resume, addr_);
+ }
+
+ class Compiler : public ICStubCompiler {
+ uint32_t pcOffset_;
+ uint8_t* addr_;
+
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, uint32_t pcOffset, uint8_t* addr)
+ : ICStubCompiler(cx, ICStub::RetSub_Resume, Engine::Baseline),
+ pcOffset_(pcOffset),
+ addr_(addr)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICRetSub_Resume>(space, getStubCode(), pcOffset_, addr_);
+ }
+ };
+};
+
+inline bool
+IsCacheableDOMProxy(JSObject* obj)
+{
+ if (!obj->is<ProxyObject>())
+ return false;
+
+ const BaseProxyHandler* handler = obj->as<ProxyObject>().handler();
+ return handler->family() == GetDOMProxyHandlerFamily();
+}
+
+struct IonOsrTempData;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BaselineIC_h */
diff --git a/js/src/jit/BaselineICList.h b/js/src/jit/BaselineICList.h
new file mode 100644
index 000000000..be1174396
--- /dev/null
+++ b/js/src/jit/BaselineICList.h
@@ -0,0 +1,123 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineICList_h
+#define jit_BaselineICList_h
+
+namespace js {
+namespace jit {
+
+// List of IC stub kinds that can only run in Baseline.
+#define IC_BASELINE_STUB_KIND_LIST(_) \
+ _(WarmUpCounter_Fallback) \
+ \
+ _(TypeMonitor_Fallback) \
+ _(TypeMonitor_SingleObject) \
+ _(TypeMonitor_ObjectGroup) \
+ _(TypeMonitor_PrimitiveSet) \
+ \
+ _(TypeUpdate_Fallback) \
+ _(TypeUpdate_SingleObject) \
+ _(TypeUpdate_ObjectGroup) \
+ _(TypeUpdate_PrimitiveSet) \
+ \
+ _(NewArray_Fallback) \
+ _(NewObject_Fallback) \
+ _(NewObject_WithTemplate) \
+ \
+ _(ToBool_Fallback) \
+ _(ToBool_Int32) \
+ _(ToBool_String) \
+ _(ToBool_NullUndefined) \
+ _(ToBool_Double) \
+ _(ToBool_Object) \
+ \
+ _(ToNumber_Fallback) \
+ \
+ _(Call_Fallback) \
+ _(Call_Scripted) \
+ _(Call_AnyScripted) \
+ _(Call_Native) \
+ _(Call_ClassHook) \
+ _(Call_ScriptedApplyArray) \
+ _(Call_ScriptedApplyArguments) \
+ _(Call_ScriptedFunCall) \
+ _(Call_StringSplit) \
+ _(Call_IsSuspendedStarGenerator) \
+ \
+ _(GetElem_Fallback) \
+ _(GetElem_NativeSlotName) \
+ _(GetElem_NativeSlotSymbol) \
+ _(GetElem_NativePrototypeSlotName) \
+ _(GetElem_NativePrototypeSlotSymbol) \
+ _(GetElem_NativePrototypeCallNativeName) \
+ _(GetElem_NativePrototypeCallNativeSymbol) \
+ _(GetElem_NativePrototypeCallScriptedName) \
+ _(GetElem_NativePrototypeCallScriptedSymbol) \
+ _(GetElem_UnboxedPropertyName) \
+ _(GetElem_String) \
+ _(GetElem_Dense) \
+ _(GetElem_UnboxedArray) \
+ _(GetElem_TypedArray) \
+ _(GetElem_Arguments) \
+ \
+ _(SetElem_Fallback) \
+ _(SetElem_DenseOrUnboxedArray) \
+ _(SetElem_DenseOrUnboxedArrayAdd) \
+ _(SetElem_TypedArray) \
+ \
+ _(In_Fallback) \
+ _(In_Native) \
+ _(In_NativePrototype) \
+ _(In_NativeDoesNotExist) \
+ _(In_Dense) \
+ \
+ _(GetName_Fallback) \
+ _(GetName_GlobalLexical) \
+ _(GetName_Global) \
+ _(GetName_Env0) \
+ _(GetName_Env1) \
+ _(GetName_Env2) \
+ _(GetName_Env3) \
+ _(GetName_Env4) \
+ _(GetName_Env5) \
+ _(GetName_Env6) \
+ \
+ _(BindName_Fallback) \
+ \
+ _(GetIntrinsic_Fallback) \
+ _(GetIntrinsic_Constant) \
+ \
+ _(SetProp_Fallback) \
+ _(SetProp_Native) \
+ _(SetProp_NativeAdd) \
+ _(SetProp_Unboxed) \
+ _(SetProp_TypedObject) \
+ _(SetProp_CallScripted) \
+ _(SetProp_CallNative) \
+ \
+ _(TableSwitch) \
+ \
+ _(IteratorNew_Fallback) \
+ _(IteratorMore_Fallback) \
+ _(IteratorMore_Native) \
+ _(IteratorClose_Fallback) \
+ \
+ _(InstanceOf_Fallback) \
+ _(InstanceOf_Function) \
+ \
+ _(TypeOf_Fallback) \
+ _(TypeOf_Typed) \
+ \
+ _(Rest_Fallback) \
+ \
+ _(RetSub_Fallback) \
+ _(RetSub_Resume)
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BaselineICList_h */
diff --git a/js/src/jit/BaselineInspector.cpp b/js/src/jit/BaselineInspector.cpp
new file mode 100644
index 000000000..c9e09bed7
--- /dev/null
+++ b/js/src/jit/BaselineInspector.cpp
@@ -0,0 +1,924 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineInspector.h"
+
+#include "mozilla/DebugOnly.h"
+
+#include "jit/BaselineCacheIR.h"
+#include "jit/BaselineIC.h"
+
+#include "vm/EnvironmentObject-inl.h"
+#include "vm/ObjectGroup-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::DebugOnly;
+
+bool
+SetElemICInspector::sawOOBDenseWrite() const
+{
+ if (!icEntry_)
+ return false;
+
+ // Check for an element adding stub.
+ for (ICStub* stub = icEntry_->firstStub(); stub; stub = stub->next()) {
+ if (stub->isSetElem_DenseOrUnboxedArrayAdd())
+ return true;
+ }
+
+ // Check for a write hole bit on the SetElem_Fallback stub.
+ ICStub* stub = icEntry_->fallbackStub();
+ if (stub->isSetElem_Fallback())
+ return stub->toSetElem_Fallback()->hasArrayWriteHole();
+
+ return false;
+}
+
+bool
+SetElemICInspector::sawOOBTypedArrayWrite() const
+{
+ if (!icEntry_)
+ return false;
+
+ // Check for SetElem_TypedArray stubs with expectOutOfBounds set.
+ for (ICStub* stub = icEntry_->firstStub(); stub; stub = stub->next()) {
+ if (!stub->isSetElem_TypedArray())
+ continue;
+ if (stub->toSetElem_TypedArray()->expectOutOfBounds())
+ return true;
+ }
+ return false;
+}
+
+bool
+SetElemICInspector::sawDenseWrite() const
+{
+ if (!icEntry_)
+ return false;
+
+ // Check for a SetElem_DenseAdd or SetElem_Dense stub.
+ for (ICStub* stub = icEntry_->firstStub(); stub; stub = stub->next()) {
+ if (stub->isSetElem_DenseOrUnboxedArrayAdd() || stub->isSetElem_DenseOrUnboxedArray())
+ return true;
+ }
+ return false;
+}
+
+bool
+SetElemICInspector::sawTypedArrayWrite() const
+{
+ if (!icEntry_)
+ return false;
+
+ // Check for a SetElem_TypedArray stub.
+ for (ICStub* stub = icEntry_->firstStub(); stub; stub = stub->next()) {
+ if (stub->isSetElem_TypedArray())
+ return true;
+ }
+ return false;
+}
+
+template <typename S, typename T>
+static bool
+VectorAppendNoDuplicate(S& list, T value)
+{
+ for (size_t i = 0; i < list.length(); i++) {
+ if (list[i] == value)
+ return true;
+ }
+ return list.append(value);
+}
+
+static bool
+AddReceiver(const ReceiverGuard& receiver,
+ BaselineInspector::ReceiverVector& receivers,
+ BaselineInspector::ObjectGroupVector& convertUnboxedGroups)
+{
+ if (receiver.group && receiver.group->maybeUnboxedLayout()) {
+ if (receiver.group->unboxedLayout().nativeGroup())
+ return VectorAppendNoDuplicate(convertUnboxedGroups, receiver.group);
+ }
+ return VectorAppendNoDuplicate(receivers, receiver);
+}
+
+static bool
+GetCacheIRReceiverForNativeReadSlot(ICCacheIR_Monitored* stub, ReceiverGuard* receiver)
+{
+ // We match either:
+ //
+ // GuardIsObject 0
+ // GuardShape 0
+ // LoadFixedSlotResult 0 or LoadDynamicSlotResult 0
+ //
+ // or
+ //
+ // GuardIsObject 0
+ // GuardGroup 0
+ // 1: GuardAndLoadUnboxedExpando 0
+ // GuardShape 1
+ // LoadFixedSlotResult 1 or LoadDynamicSlotResult 1
+
+ *receiver = ReceiverGuard();
+ CacheIRReader reader(stub->stubInfo());
+
+ ObjOperandId objId = ObjOperandId(0);
+ if (!reader.matchOp(CacheOp::GuardIsObject, objId))
+ return false;
+
+ if (reader.matchOp(CacheOp::GuardGroup, objId)) {
+ receiver->group = stub->stubInfo()->getStubField<ObjectGroup*>(stub, reader.stubOffset());
+
+ if (!reader.matchOp(CacheOp::GuardAndLoadUnboxedExpando, objId))
+ return false;
+ objId = reader.objOperandId();
+ }
+
+ if (reader.matchOp(CacheOp::GuardShape, objId)) {
+ receiver->shape = stub->stubInfo()->getStubField<Shape*>(stub, reader.stubOffset());
+ return reader.matchOpEither(CacheOp::LoadFixedSlotResult, CacheOp::LoadDynamicSlotResult);
+ }
+
+ return false;
+}
+
+static bool
+GetCacheIRReceiverForUnboxedProperty(ICCacheIR_Monitored* stub, ReceiverGuard* receiver)
+{
+ // We match:
+ //
+ // GuardIsObject 0
+ // GuardGroup 0
+ // LoadUnboxedPropertyResult 0 ..
+
+ *receiver = ReceiverGuard();
+ CacheIRReader reader(stub->stubInfo());
+
+ ObjOperandId objId = ObjOperandId(0);
+ if (!reader.matchOp(CacheOp::GuardIsObject, objId))
+ return false;
+
+ if (!reader.matchOp(CacheOp::GuardGroup, objId))
+ return false;
+ receiver->group = stub->stubInfo()->getStubField<ObjectGroup*>(stub, reader.stubOffset());
+
+ return reader.matchOp(CacheOp::LoadUnboxedPropertyResult, objId);
+}
+
+bool
+BaselineInspector::maybeInfoForPropertyOp(jsbytecode* pc, ReceiverVector& receivers,
+ ObjectGroupVector& convertUnboxedGroups)
+{
+ // Return a list of the receivers seen by the baseline IC for the current
+ // op. Empty lists indicate no receivers are known, or there was an
+ // uncacheable access. convertUnboxedGroups is used for unboxed object
+ // groups which have been seen, but have had instances converted to native
+ // objects and should be eagerly converted by Ion.
+ MOZ_ASSERT(receivers.empty());
+ MOZ_ASSERT(convertUnboxedGroups.empty());
+
+ if (!hasBaselineScript())
+ return true;
+
+ MOZ_ASSERT(isValidPC(pc));
+ const ICEntry& entry = icEntryFromPC(pc);
+
+ ICStub* stub = entry.firstStub();
+ while (stub->next()) {
+ ReceiverGuard receiver;
+ if (stub->isCacheIR_Monitored()) {
+ if (!GetCacheIRReceiverForNativeReadSlot(stub->toCacheIR_Monitored(), &receiver) &&
+ !GetCacheIRReceiverForUnboxedProperty(stub->toCacheIR_Monitored(), &receiver))
+ {
+ receivers.clear();
+ return true;
+ }
+ } else if (stub->isSetProp_Native()) {
+ receiver = ReceiverGuard(stub->toSetProp_Native()->group(),
+ stub->toSetProp_Native()->shape());
+ } else if (stub->isSetProp_Unboxed()) {
+ receiver = ReceiverGuard(stub->toSetProp_Unboxed()->group(), nullptr);
+ } else {
+ receivers.clear();
+ return true;
+ }
+
+ if (!AddReceiver(receiver, receivers, convertUnboxedGroups))
+ return false;
+
+ stub = stub->next();
+ }
+
+ if (stub->isGetProp_Fallback()) {
+ if (stub->toGetProp_Fallback()->hadUnoptimizableAccess())
+ receivers.clear();
+ } else {
+ if (stub->toSetProp_Fallback()->hadUnoptimizableAccess())
+ receivers.clear();
+ }
+
+ // Don't inline if there are more than 5 receivers.
+ if (receivers.length() > 5)
+ receivers.clear();
+
+ return true;
+}
+
+ICStub*
+BaselineInspector::monomorphicStub(jsbytecode* pc)
+{
+ if (!hasBaselineScript())
+ return nullptr;
+
+ const ICEntry& entry = icEntryFromPC(pc);
+
+ ICStub* stub = entry.firstStub();
+ ICStub* next = stub->next();
+
+ if (!next || !next->isFallback())
+ return nullptr;
+
+ return stub;
+}
+
+bool
+BaselineInspector::dimorphicStub(jsbytecode* pc, ICStub** pfirst, ICStub** psecond)
+{
+ if (!hasBaselineScript())
+ return false;
+
+ const ICEntry& entry = icEntryFromPC(pc);
+
+ ICStub* stub = entry.firstStub();
+ ICStub* next = stub->next();
+ ICStub* after = next ? next->next() : nullptr;
+
+ if (!after || !after->isFallback())
+ return false;
+
+ *pfirst = stub;
+ *psecond = next;
+ return true;
+}
+
+MIRType
+BaselineInspector::expectedResultType(jsbytecode* pc)
+{
+ // Look at the IC entries for this op to guess what type it will produce,
+ // returning MIRType::None otherwise.
+
+ ICStub* stub = monomorphicStub(pc);
+ if (!stub)
+ return MIRType::None;
+
+ switch (stub->kind()) {
+ case ICStub::BinaryArith_Int32:
+ if (stub->toBinaryArith_Int32()->allowDouble())
+ return MIRType::Double;
+ return MIRType::Int32;
+ case ICStub::BinaryArith_BooleanWithInt32:
+ case ICStub::UnaryArith_Int32:
+ case ICStub::BinaryArith_DoubleWithInt32:
+ return MIRType::Int32;
+ case ICStub::BinaryArith_Double:
+ case ICStub::UnaryArith_Double:
+ return MIRType::Double;
+ case ICStub::BinaryArith_StringConcat:
+ case ICStub::BinaryArith_StringObjectConcat:
+ return MIRType::String;
+ default:
+ return MIRType::None;
+ }
+}
+
+// Whether a baseline stub kind is suitable for a double comparison that
+// converts its operands to doubles.
+static bool
+CanUseDoubleCompare(ICStub::Kind kind)
+{
+ return kind == ICStub::Compare_Double || kind == ICStub::Compare_NumberWithUndefined;
+}
+
+// Whether a baseline stub kind is suitable for an int32 comparison that
+// converts its operands to int32.
+static bool
+CanUseInt32Compare(ICStub::Kind kind)
+{
+ return kind == ICStub::Compare_Int32 || kind == ICStub::Compare_Int32WithBoolean;
+}
+
+MCompare::CompareType
+BaselineInspector::expectedCompareType(jsbytecode* pc)
+{
+ ICStub* first = monomorphicStub(pc);
+ ICStub* second = nullptr;
+ if (!first && !dimorphicStub(pc, &first, &second))
+ return MCompare::Compare_Unknown;
+
+ if (ICStub* fallback = second ? second->next() : first->next()) {
+ MOZ_ASSERT(fallback->isFallback());
+ if (fallback->toCompare_Fallback()->hadUnoptimizableAccess())
+ return MCompare::Compare_Unknown;
+ }
+
+ if (CanUseInt32Compare(first->kind()) && (!second || CanUseInt32Compare(second->kind()))) {
+ ICCompare_Int32WithBoolean* coerce =
+ first->isCompare_Int32WithBoolean()
+ ? first->toCompare_Int32WithBoolean()
+ : ((second && second->isCompare_Int32WithBoolean())
+ ? second->toCompare_Int32WithBoolean()
+ : nullptr);
+ if (coerce) {
+ return coerce->lhsIsInt32()
+ ? MCompare::Compare_Int32MaybeCoerceRHS
+ : MCompare::Compare_Int32MaybeCoerceLHS;
+ }
+ return MCompare::Compare_Int32;
+ }
+
+ if (CanUseDoubleCompare(first->kind()) && (!second || CanUseDoubleCompare(second->kind()))) {
+ ICCompare_NumberWithUndefined* coerce =
+ first->isCompare_NumberWithUndefined()
+ ? first->toCompare_NumberWithUndefined()
+ : (second && second->isCompare_NumberWithUndefined())
+ ? second->toCompare_NumberWithUndefined()
+ : nullptr;
+ if (coerce) {
+ return coerce->lhsIsUndefined()
+ ? MCompare::Compare_DoubleMaybeCoerceLHS
+ : MCompare::Compare_DoubleMaybeCoerceRHS;
+ }
+ return MCompare::Compare_Double;
+ }
+
+ return MCompare::Compare_Unknown;
+}
+
+static bool
+TryToSpecializeBinaryArithOp(ICStub** stubs,
+ uint32_t nstubs,
+ MIRType* result)
+{
+ DebugOnly<bool> sawInt32 = false;
+ bool sawDouble = false;
+ bool sawOther = false;
+
+ for (uint32_t i = 0; i < nstubs; i++) {
+ switch (stubs[i]->kind()) {
+ case ICStub::BinaryArith_Int32:
+ sawInt32 = true;
+ break;
+ case ICStub::BinaryArith_BooleanWithInt32:
+ sawInt32 = true;
+ break;
+ case ICStub::BinaryArith_Double:
+ sawDouble = true;
+ break;
+ case ICStub::BinaryArith_DoubleWithInt32:
+ sawDouble = true;
+ break;
+ default:
+ sawOther = true;
+ break;
+ }
+ }
+
+ if (sawOther)
+ return false;
+
+ if (sawDouble) {
+ *result = MIRType::Double;
+ return true;
+ }
+
+ MOZ_ASSERT(sawInt32);
+ *result = MIRType::Int32;
+ return true;
+}
+
+MIRType
+BaselineInspector::expectedBinaryArithSpecialization(jsbytecode* pc)
+{
+ if (!hasBaselineScript())
+ return MIRType::None;
+
+ MIRType result;
+ ICStub* stubs[2];
+
+ const ICEntry& entry = icEntryFromPC(pc);
+ ICStub* stub = entry.fallbackStub();
+ if (stub->isBinaryArith_Fallback() &&
+ stub->toBinaryArith_Fallback()->hadUnoptimizableOperands())
+ {
+ return MIRType::None;
+ }
+
+ stubs[0] = monomorphicStub(pc);
+ if (stubs[0]) {
+ if (TryToSpecializeBinaryArithOp(stubs, 1, &result))
+ return result;
+ }
+
+ if (dimorphicStub(pc, &stubs[0], &stubs[1])) {
+ if (TryToSpecializeBinaryArithOp(stubs, 2, &result))
+ return result;
+ }
+
+ return MIRType::None;
+}
+
+bool
+BaselineInspector::hasSeenNonNativeGetElement(jsbytecode* pc)
+{
+ if (!hasBaselineScript())
+ return false;
+
+ const ICEntry& entry = icEntryFromPC(pc);
+ ICStub* stub = entry.fallbackStub();
+
+ if (stub->isGetElem_Fallback())
+ return stub->toGetElem_Fallback()->hasNonNativeAccess();
+ return false;
+}
+
+bool
+BaselineInspector::hasSeenNegativeIndexGetElement(jsbytecode* pc)
+{
+ if (!hasBaselineScript())
+ return false;
+
+ const ICEntry& entry = icEntryFromPC(pc);
+ ICStub* stub = entry.fallbackStub();
+
+ if (stub->isGetElem_Fallback())
+ return stub->toGetElem_Fallback()->hasNegativeIndex();
+ return false;
+}
+
+bool
+BaselineInspector::hasSeenAccessedGetter(jsbytecode* pc)
+{
+ if (!hasBaselineScript())
+ return false;
+
+ const ICEntry& entry = icEntryFromPC(pc);
+ ICStub* stub = entry.fallbackStub();
+
+ if (stub->isGetProp_Fallback())
+ return stub->toGetProp_Fallback()->hasAccessedGetter();
+ return false;
+}
+
+bool
+BaselineInspector::hasSeenNonStringIterMore(jsbytecode* pc)
+{
+ MOZ_ASSERT(JSOp(*pc) == JSOP_MOREITER);
+
+ if (!hasBaselineScript())
+ return false;
+
+ const ICEntry& entry = icEntryFromPC(pc);
+ ICStub* stub = entry.fallbackStub();
+
+ return stub->toIteratorMore_Fallback()->hasNonStringResult();
+}
+
+bool
+BaselineInspector::hasSeenDoubleResult(jsbytecode* pc)
+{
+ if (!hasBaselineScript())
+ return false;
+
+ const ICEntry& entry = icEntryFromPC(pc);
+ ICStub* stub = entry.fallbackStub();
+
+ MOZ_ASSERT(stub->isUnaryArith_Fallback() || stub->isBinaryArith_Fallback());
+
+ if (stub->isUnaryArith_Fallback())
+ return stub->toUnaryArith_Fallback()->sawDoubleResult();
+ else
+ return stub->toBinaryArith_Fallback()->sawDoubleResult();
+
+ return false;
+}
+
+JSObject*
+BaselineInspector::getTemplateObject(jsbytecode* pc)
+{
+ if (!hasBaselineScript())
+ return nullptr;
+
+ const ICEntry& entry = icEntryFromPC(pc);
+ for (ICStub* stub = entry.firstStub(); stub; stub = stub->next()) {
+ switch (stub->kind()) {
+ case ICStub::NewArray_Fallback:
+ return stub->toNewArray_Fallback()->templateObject();
+ case ICStub::NewObject_Fallback:
+ return stub->toNewObject_Fallback()->templateObject();
+ case ICStub::Rest_Fallback:
+ return stub->toRest_Fallback()->templateObject();
+ case ICStub::Call_Scripted:
+ if (JSObject* obj = stub->toCall_Scripted()->templateObject())
+ return obj;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return nullptr;
+}
+
+ObjectGroup*
+BaselineInspector::getTemplateObjectGroup(jsbytecode* pc)
+{
+ if (!hasBaselineScript())
+ return nullptr;
+
+ const ICEntry& entry = icEntryFromPC(pc);
+ for (ICStub* stub = entry.firstStub(); stub; stub = stub->next()) {
+ switch (stub->kind()) {
+ case ICStub::NewArray_Fallback:
+ return stub->toNewArray_Fallback()->templateGroup();
+ default:
+ break;
+ }
+ }
+
+ return nullptr;
+}
+
+JSFunction*
+BaselineInspector::getSingleCallee(jsbytecode* pc)
+{
+ MOZ_ASSERT(*pc == JSOP_NEW);
+
+ if (!hasBaselineScript())
+ return nullptr;
+
+ const ICEntry& entry = icEntryFromPC(pc);
+ ICStub* stub = entry.firstStub();
+
+ if (entry.fallbackStub()->toCall_Fallback()->hadUnoptimizableCall())
+ return nullptr;
+
+ if (!stub->isCall_Scripted() || stub->next() != entry.fallbackStub())
+ return nullptr;
+
+ return stub->toCall_Scripted()->callee();
+}
+
+JSObject*
+BaselineInspector::getTemplateObjectForNative(jsbytecode* pc, Native native)
+{
+ if (!hasBaselineScript())
+ return nullptr;
+
+ const ICEntry& entry = icEntryFromPC(pc);
+ for (ICStub* stub = entry.firstStub(); stub; stub = stub->next()) {
+ if (stub->isCall_Native() && stub->toCall_Native()->callee()->native() == native)
+ return stub->toCall_Native()->templateObject();
+ }
+
+ return nullptr;
+}
+
+bool
+BaselineInspector::isOptimizableCallStringSplit(jsbytecode* pc, JSString** strOut, JSString** sepOut,
+ JSObject** objOut)
+{
+ if (!hasBaselineScript())
+ return false;
+
+ const ICEntry& entry = icEntryFromPC(pc);
+
+ // If StringSplit stub is attached, must have only one stub attached.
+ if (entry.fallbackStub()->numOptimizedStubs() != 1)
+ return false;
+
+ ICStub* stub = entry.firstStub();
+ if (stub->kind() != ICStub::Call_StringSplit)
+ return false;
+
+ *strOut = stub->toCall_StringSplit()->expectedStr();
+ *sepOut = stub->toCall_StringSplit()->expectedSep();
+ *objOut = stub->toCall_StringSplit()->templateObject();
+ return true;
+}
+
+JSObject*
+BaselineInspector::getTemplateObjectForClassHook(jsbytecode* pc, const Class* clasp)
+{
+ if (!hasBaselineScript())
+ return nullptr;
+
+ const ICEntry& entry = icEntryFromPC(pc);
+ for (ICStub* stub = entry.firstStub(); stub; stub = stub->next()) {
+ if (stub->isCall_ClassHook() && stub->toCall_ClassHook()->clasp() == clasp)
+ return stub->toCall_ClassHook()->templateObject();
+ }
+
+ return nullptr;
+}
+
+JSObject*
+BaselineInspector::getTemplateObjectForSimdCtor(jsbytecode* pc, SimdType simdType)
+{
+ if (!hasBaselineScript())
+ return nullptr;
+
+ const ICEntry& entry = icEntryFromPC(pc);
+ for (ICStub* stub = entry.firstStub(); stub; stub = stub->next()) {
+ if (stub->isCall_ClassHook() && stub->toCall_ClassHook()->clasp() == &SimdTypeDescr::class_) {
+ JSObject* templateObj = stub->toCall_ClassHook()->templateObject();
+ InlineTypedObject& typedObj = templateObj->as<InlineTypedObject>();
+ if (typedObj.typeDescr().as<SimdTypeDescr>().type() == simdType)
+ return templateObj;
+ }
+ }
+
+ return nullptr;
+}
+
+LexicalEnvironmentObject*
+BaselineInspector::templateNamedLambdaObject()
+{
+ if (!hasBaselineScript())
+ return nullptr;
+
+ JSObject* res = baselineScript()->templateEnvironment();
+ if (script->bodyScope()->hasEnvironment())
+ res = res->enclosingEnvironment();
+ MOZ_ASSERT(res);
+
+ return &res->as<LexicalEnvironmentObject>();
+}
+
+CallObject*
+BaselineInspector::templateCallObject()
+{
+ if (!hasBaselineScript())
+ return nullptr;
+
+ JSObject* res = baselineScript()->templateEnvironment();
+ MOZ_ASSERT(res);
+
+ return &res->as<CallObject>();
+}
+
+static Shape*
+GlobalShapeForGetPropFunction(ICStub* stub)
+{
+ if (stub->isGetProp_CallNative()) {
+ ICGetProp_CallNative* nstub = stub->toGetProp_CallNative();
+ if (nstub->isOwnGetter())
+ return nullptr;
+
+ const HeapReceiverGuard& guard = nstub->receiverGuard();
+ if (Shape* shape = guard.shape()) {
+ if (shape->getObjectClass()->flags & JSCLASS_IS_GLOBAL)
+ return shape;
+ }
+ } else if (stub->isGetProp_CallNativeGlobal()) {
+ ICGetProp_CallNativeGlobal* nstub = stub->toGetProp_CallNativeGlobal();
+ if (nstub->isOwnGetter())
+ return nullptr;
+
+ Shape* shape = nstub->globalShape();
+ MOZ_ASSERT(shape->getObjectClass()->flags & JSCLASS_IS_GLOBAL);
+ return shape;
+ }
+
+ return nullptr;
+}
+
+bool
+BaselineInspector::commonGetPropFunction(jsbytecode* pc, JSObject** holder, Shape** holderShape,
+ JSFunction** commonGetter, Shape** globalShape,
+ bool* isOwnProperty,
+ ReceiverVector& receivers,
+ ObjectGroupVector& convertUnboxedGroups)
+{
+ if (!hasBaselineScript())
+ return false;
+
+ MOZ_ASSERT(receivers.empty());
+ MOZ_ASSERT(convertUnboxedGroups.empty());
+
+ *holder = nullptr;
+ const ICEntry& entry = icEntryFromPC(pc);
+
+ for (ICStub* stub = entry.firstStub(); stub; stub = stub->next()) {
+ if (stub->isGetProp_CallScripted() ||
+ stub->isGetProp_CallNative() ||
+ stub->isGetProp_CallNativeGlobal())
+ {
+ ICGetPropCallGetter* nstub = static_cast<ICGetPropCallGetter*>(stub);
+ bool isOwn = nstub->isOwnGetter();
+ if (!isOwn && !AddReceiver(nstub->receiverGuard(), receivers, convertUnboxedGroups))
+ return false;
+
+ if (!*holder) {
+ *holder = nstub->holder();
+ *holderShape = nstub->holderShape();
+ *commonGetter = nstub->getter();
+ *globalShape = GlobalShapeForGetPropFunction(nstub);
+ *isOwnProperty = isOwn;
+ } else if (nstub->holderShape() != *holderShape ||
+ GlobalShapeForGetPropFunction(nstub) != *globalShape ||
+ isOwn != *isOwnProperty)
+ {
+ return false;
+ } else {
+ MOZ_ASSERT(*commonGetter == nstub->getter());
+ }
+ } else if (stub->isGetProp_Fallback()) {
+ // If we have an unoptimizable access, don't try to optimize.
+ if (stub->toGetProp_Fallback()->hadUnoptimizableAccess())
+ return false;
+ } else if (stub->isGetName_Fallback()) {
+ if (stub->toGetName_Fallback()->hadUnoptimizableAccess())
+ return false;
+ } else {
+ return false;
+ }
+ }
+
+ if (!*holder)
+ return false;
+
+ MOZ_ASSERT(*isOwnProperty == (receivers.empty() && convertUnboxedGroups.empty()));
+ return true;
+}
+
+bool
+BaselineInspector::commonSetPropFunction(jsbytecode* pc, JSObject** holder, Shape** holderShape,
+ JSFunction** commonSetter, bool* isOwnProperty,
+ ReceiverVector& receivers,
+ ObjectGroupVector& convertUnboxedGroups)
+{
+ if (!hasBaselineScript())
+ return false;
+
+ MOZ_ASSERT(receivers.empty());
+ MOZ_ASSERT(convertUnboxedGroups.empty());
+
+ *holder = nullptr;
+ const ICEntry& entry = icEntryFromPC(pc);
+
+ for (ICStub* stub = entry.firstStub(); stub; stub = stub->next()) {
+ if (stub->isSetProp_CallScripted() || stub->isSetProp_CallNative()) {
+ ICSetPropCallSetter* nstub = static_cast<ICSetPropCallSetter*>(stub);
+ bool isOwn = nstub->isOwnSetter();
+ if (!isOwn && !AddReceiver(nstub->receiverGuard(), receivers, convertUnboxedGroups))
+ return false;
+
+ if (!*holder) {
+ *holder = nstub->holder();
+ *holderShape = nstub->holderShape();
+ *commonSetter = nstub->setter();
+ *isOwnProperty = isOwn;
+ } else if (nstub->holderShape() != *holderShape || isOwn != *isOwnProperty) {
+ return false;
+ } else {
+ MOZ_ASSERT(*commonSetter == nstub->setter());
+ }
+ } else if (!stub->isSetProp_Fallback() ||
+ stub->toSetProp_Fallback()->hadUnoptimizableAccess())
+ {
+ // We have an unoptimizable access, so don't try to optimize.
+ return false;
+ }
+ }
+
+ if (!*holder)
+ return false;
+
+ return true;
+}
+
+static MIRType
+GetCacheIRExpectedInputType(ICCacheIR_Monitored* stub)
+{
+ CacheIRReader reader(stub->stubInfo());
+
+ if (reader.matchOp(CacheOp::GuardIsObject, ValOperandId(0)))
+ return MIRType::Object;
+ if (reader.matchOp(CacheOp::GuardType, ValOperandId(0))) {
+ JSValueType type = reader.valueType();
+ return MIRTypeFromValueType(type);
+ }
+
+ MOZ_ASSERT_UNREACHABLE("Unexpected instruction");
+ return MIRType::Value;
+}
+
+MIRType
+BaselineInspector::expectedPropertyAccessInputType(jsbytecode* pc)
+{
+ if (!hasBaselineScript())
+ return MIRType::Value;
+
+ const ICEntry& entry = icEntryFromPC(pc);
+ MIRType type = MIRType::None;
+
+ for (ICStub* stub = entry.firstStub(); stub; stub = stub->next()) {
+ MIRType stubType;
+ switch (stub->kind()) {
+ case ICStub::GetProp_Fallback:
+ if (stub->toGetProp_Fallback()->hadUnoptimizableAccess())
+ return MIRType::Value;
+ continue;
+
+ case ICStub::GetElem_Fallback:
+ if (stub->toGetElem_Fallback()->hadUnoptimizableAccess())
+ return MIRType::Value;
+ continue;
+
+ case ICStub::GetProp_Generic:
+ return MIRType::Value;
+
+ case ICStub::GetProp_ArgumentsLength:
+ case ICStub::GetElem_Arguments:
+ // Either an object or magic arguments.
+ return MIRType::Value;
+
+ case ICStub::GetProp_CallScripted:
+ case ICStub::GetProp_CallNative:
+ case ICStub::GetProp_CallDOMProxyNative:
+ case ICStub::GetProp_CallDOMProxyWithGenerationNative:
+ case ICStub::GetProp_DOMProxyShadowed:
+ case ICStub::GetElem_NativeSlotName:
+ case ICStub::GetElem_NativeSlotSymbol:
+ case ICStub::GetElem_NativePrototypeSlotName:
+ case ICStub::GetElem_NativePrototypeSlotSymbol:
+ case ICStub::GetElem_NativePrototypeCallNativeName:
+ case ICStub::GetElem_NativePrototypeCallNativeSymbol:
+ case ICStub::GetElem_NativePrototypeCallScriptedName:
+ case ICStub::GetElem_NativePrototypeCallScriptedSymbol:
+ case ICStub::GetElem_UnboxedPropertyName:
+ case ICStub::GetElem_String:
+ case ICStub::GetElem_Dense:
+ case ICStub::GetElem_TypedArray:
+ case ICStub::GetElem_UnboxedArray:
+ stubType = MIRType::Object;
+ break;
+
+ case ICStub::GetProp_StringLength:
+ stubType = MIRType::String;
+ break;
+
+ case ICStub::CacheIR_Monitored:
+ stubType = GetCacheIRExpectedInputType(stub->toCacheIR_Monitored());
+ if (stubType == MIRType::Value)
+ return MIRType::Value;
+ break;
+
+ default:
+ MOZ_CRASH("Unexpected stub");
+ }
+
+ if (type != MIRType::None) {
+ if (type != stubType)
+ return MIRType::Value;
+ } else {
+ type = stubType;
+ }
+ }
+
+ return (type == MIRType::None) ? MIRType::Value : type;
+}
+
+bool
+BaselineInspector::instanceOfData(jsbytecode* pc, Shape** shape, uint32_t* slot,
+ JSObject** prototypeObject)
+{
+ MOZ_ASSERT(*pc == JSOP_INSTANCEOF);
+
+ if (!hasBaselineScript())
+ return false;
+
+ const ICEntry& entry = icEntryFromPC(pc);
+
+ ICStub* stub = entry.firstStub();
+ if (!stub->isInstanceOf_Function() ||
+ !stub->next()->isInstanceOf_Fallback() ||
+ stub->next()->toInstanceOf_Fallback()->hadUnoptimizableAccess())
+ {
+ return false;
+ }
+
+ ICInstanceOf_Function* optStub = stub->toInstanceOf_Function();
+ *shape = optStub->shape();
+ *prototypeObject = optStub->prototypeObject();
+ *slot = optStub->slot();
+
+ if (IsInsideNursery(*prototypeObject))
+ return false;
+
+ return true;
+}
diff --git a/js/src/jit/BaselineInspector.h b/js/src/jit/BaselineInspector.h
new file mode 100644
index 000000000..961df18aa
--- /dev/null
+++ b/js/src/jit/BaselineInspector.h
@@ -0,0 +1,148 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineInspector_h
+#define jit_BaselineInspector_h
+
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/MIR.h"
+
+namespace js {
+namespace jit {
+
+class BaselineInspector;
+
+class ICInspector
+{
+ protected:
+ BaselineInspector* inspector_;
+ jsbytecode* pc_;
+ ICEntry* icEntry_;
+
+ ICInspector(BaselineInspector* inspector, jsbytecode* pc, ICEntry* icEntry)
+ : inspector_(inspector), pc_(pc), icEntry_(icEntry)
+ { }
+};
+
+class SetElemICInspector : public ICInspector
+{
+ public:
+ SetElemICInspector(BaselineInspector* inspector, jsbytecode* pc, ICEntry* icEntry)
+ : ICInspector(inspector, pc, icEntry)
+ { }
+
+ bool sawOOBDenseWrite() const;
+ bool sawOOBTypedArrayWrite() const;
+ bool sawDenseWrite() const;
+ bool sawTypedArrayWrite() const;
+};
+
+class BaselineInspector
+{
+ private:
+ JSScript* script;
+ BaselineICEntry* prevLookedUpEntry;
+
+ public:
+ explicit BaselineInspector(JSScript* script)
+ : script(script), prevLookedUpEntry(nullptr)
+ {
+ MOZ_ASSERT(script);
+ }
+
+ bool hasBaselineScript() const {
+ return script->hasBaselineScript();
+ }
+
+ BaselineScript* baselineScript() const {
+ return script->baselineScript();
+ }
+
+ private:
+#ifdef DEBUG
+ bool isValidPC(jsbytecode* pc) {
+ return script->containsPC(pc);
+ }
+#endif
+
+ BaselineICEntry& icEntryFromPC(jsbytecode* pc) {
+ MOZ_ASSERT(hasBaselineScript());
+ MOZ_ASSERT(isValidPC(pc));
+ BaselineICEntry& ent =
+ baselineScript()->icEntryFromPCOffset(script->pcToOffset(pc), prevLookedUpEntry);
+ MOZ_ASSERT(ent.isForOp());
+ prevLookedUpEntry = &ent;
+ return ent;
+ }
+
+ template <typename ICInspectorType>
+ ICInspectorType makeICInspector(jsbytecode* pc, ICStub::Kind expectedFallbackKind) {
+ BaselineICEntry* ent = nullptr;
+ if (hasBaselineScript()) {
+ ent = &icEntryFromPC(pc);
+ MOZ_ASSERT(ent->fallbackStub()->kind() == expectedFallbackKind);
+ }
+ return ICInspectorType(this, pc, ent);
+ }
+
+ ICStub* monomorphicStub(jsbytecode* pc);
+ MOZ_MUST_USE bool dimorphicStub(jsbytecode* pc, ICStub** pfirst, ICStub** psecond);
+
+ public:
+ typedef Vector<ReceiverGuard, 4, JitAllocPolicy> ReceiverVector;
+ typedef Vector<ObjectGroup*, 4, JitAllocPolicy> ObjectGroupVector;
+ MOZ_MUST_USE bool maybeInfoForPropertyOp(jsbytecode* pc, ReceiverVector& receivers,
+ ObjectGroupVector& convertUnboxedGroups);
+
+ SetElemICInspector setElemICInspector(jsbytecode* pc) {
+ return makeICInspector<SetElemICInspector>(pc, ICStub::SetElem_Fallback);
+ }
+
+ MIRType expectedResultType(jsbytecode* pc);
+ MCompare::CompareType expectedCompareType(jsbytecode* pc);
+ MIRType expectedBinaryArithSpecialization(jsbytecode* pc);
+ MIRType expectedPropertyAccessInputType(jsbytecode* pc);
+
+ bool hasSeenNonNativeGetElement(jsbytecode* pc);
+ bool hasSeenNegativeIndexGetElement(jsbytecode* pc);
+ bool hasSeenAccessedGetter(jsbytecode* pc);
+ bool hasSeenDoubleResult(jsbytecode* pc);
+ bool hasSeenNonStringIterMore(jsbytecode* pc);
+
+ MOZ_MUST_USE bool isOptimizableCallStringSplit(jsbytecode* pc, JSString** strOut,
+ JSString** sepOut, JSObject** objOut);
+ JSObject* getTemplateObject(jsbytecode* pc);
+ JSObject* getTemplateObjectForNative(jsbytecode* pc, Native native);
+ JSObject* getTemplateObjectForClassHook(jsbytecode* pc, const Class* clasp);
+ JSObject* getTemplateObjectForSimdCtor(jsbytecode* pc, SimdType simdType);
+
+ // Sometimes the group a template object will have is known, even if the
+ // object itself isn't.
+ ObjectGroup* getTemplateObjectGroup(jsbytecode* pc);
+
+ JSFunction* getSingleCallee(jsbytecode* pc);
+
+ LexicalEnvironmentObject* templateNamedLambdaObject();
+ CallObject* templateCallObject();
+
+ MOZ_MUST_USE bool commonGetPropFunction(jsbytecode* pc, JSObject** holder, Shape** holderShape,
+ JSFunction** commonGetter, Shape** globalShape,
+ bool* isOwnProperty, ReceiverVector& receivers,
+ ObjectGroupVector& convertUnboxedGroups);
+ MOZ_MUST_USE bool commonSetPropFunction(jsbytecode* pc, JSObject** holder, Shape** holderShape,
+ JSFunction** commonSetter, bool* isOwnProperty,
+ ReceiverVector& receivers,
+ ObjectGroupVector& convertUnboxedGroups);
+
+ MOZ_MUST_USE bool instanceOfData(jsbytecode* pc, Shape** shape, uint32_t* slot,
+ JSObject** prototypeObject);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BaselineInspector_h */
diff --git a/js/src/jit/BaselineJIT.cpp b/js/src/jit/BaselineJIT.cpp
new file mode 100644
index 000000000..d0e297c2d
--- /dev/null
+++ b/js/src/jit/BaselineJIT.cpp
@@ -0,0 +1,1251 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineJIT.h"
+
+#include "mozilla/BinarySearch.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MemoryReporting.h"
+
+#include "jit/BaselineCompiler.h"
+#include "jit/BaselineIC.h"
+#include "jit/CompileInfo.h"
+#include "jit/JitCommon.h"
+#include "jit/JitSpewer.h"
+#include "vm/Debugger.h"
+#include "vm/Interpreter.h"
+#include "vm/TraceLogging.h"
+#include "wasm/WasmInstance.h"
+
+#include "jsobjinlines.h"
+#include "jsopcodeinlines.h"
+#include "jsscriptinlines.h"
+
+#include "jit/JitFrames-inl.h"
+#include "jit/MacroAssembler-inl.h"
+#include "vm/Stack-inl.h"
+
+using mozilla::BinarySearchIf;
+using mozilla::DebugOnly;
+
+using namespace js;
+using namespace js::jit;
+
+/* static */ PCMappingSlotInfo::SlotLocation
+PCMappingSlotInfo::ToSlotLocation(const StackValue* stackVal)
+{
+ if (stackVal->kind() == StackValue::Register) {
+ if (stackVal->reg() == R0)
+ return SlotInR0;
+ MOZ_ASSERT(stackVal->reg() == R1);
+ return SlotInR1;
+ }
+ MOZ_ASSERT(stackVal->kind() != StackValue::Stack);
+ return SlotIgnore;
+}
+
+void
+ICStubSpace::freeAllAfterMinorGC(JSRuntime* rt)
+{
+ rt->gc.freeAllLifoBlocksAfterMinorGC(&allocator_);
+}
+
+BaselineScript::BaselineScript(uint32_t prologueOffset, uint32_t epilogueOffset,
+ uint32_t profilerEnterToggleOffset,
+ uint32_t profilerExitToggleOffset,
+ uint32_t postDebugPrologueOffset)
+ : method_(nullptr),
+ templateEnv_(nullptr),
+ fallbackStubSpace_(),
+ dependentWasmImports_(nullptr),
+ prologueOffset_(prologueOffset),
+ epilogueOffset_(epilogueOffset),
+ profilerEnterToggleOffset_(profilerEnterToggleOffset),
+ profilerExitToggleOffset_(profilerExitToggleOffset),
+#ifdef JS_TRACE_LOGGING
+# ifdef DEBUG
+ traceLoggerScriptsEnabled_(false),
+ traceLoggerEngineEnabled_(false),
+# endif
+ traceLoggerScriptEvent_(),
+#endif
+ postDebugPrologueOffset_(postDebugPrologueOffset),
+ flags_(0),
+ inlinedBytecodeLength_(0),
+ maxInliningDepth_(UINT8_MAX),
+ pendingBuilder_(nullptr)
+{ }
+
+static const unsigned BASELINE_MAX_ARGS_LENGTH = 20000;
+
+static bool
+CheckFrame(InterpreterFrame* fp)
+{
+ if (fp->isDebuggerEvalFrame()) {
+ // Debugger eval-in-frame. These are likely short-running scripts so
+ // don't bother compiling them for now.
+ JitSpew(JitSpew_BaselineAbort, "debugger frame");
+ return false;
+ }
+
+ if (fp->isFunctionFrame() && fp->numActualArgs() > BASELINE_MAX_ARGS_LENGTH) {
+ // Fall back to the interpreter to avoid running out of stack space.
+ JitSpew(JitSpew_BaselineAbort, "Too many arguments (%u)", fp->numActualArgs());
+ return false;
+ }
+
+ return true;
+}
+
+static JitExecStatus
+EnterBaseline(JSContext* cx, EnterJitData& data)
+{
+ if (data.osrFrame) {
+ // Check for potential stack overflow before OSR-ing.
+ uint8_t spDummy;
+ uint32_t extra = BaselineFrame::Size() + (data.osrNumStackValues * sizeof(Value));
+ uint8_t* checkSp = (&spDummy) - extra;
+ JS_CHECK_RECURSION_WITH_SP(cx, checkSp, return JitExec_Aborted);
+ } else {
+ JS_CHECK_RECURSION(cx, return JitExec_Aborted);
+ }
+
+#ifdef DEBUG
+ // Assert we don't GC before entering JIT code. A GC could discard JIT code
+ // or move the function stored in the CalleeToken (it won't be traced at
+ // this point). We use Maybe<> here so we can call reset() to call the
+ // AutoAssertNoGC destructor before we enter JIT code.
+ mozilla::Maybe<JS::AutoAssertNoGC> nogc;
+ nogc.emplace(cx);
+#endif
+
+ MOZ_ASSERT(jit::IsBaselineEnabled(cx));
+ MOZ_ASSERT_IF(data.osrFrame, CheckFrame(data.osrFrame));
+
+ EnterJitCode enter = cx->runtime()->jitRuntime()->enterBaseline();
+
+ bool constructingLegacyGen =
+ data.constructing && CalleeTokenToFunction(data.calleeToken)->isLegacyGenerator();
+
+ // Caller must construct |this| before invoking the Ion function. Legacy
+ // generators can be called with 'new' but when we resume them, the
+ // this-slot and arguments are |undefined| (they are stored in the
+ // CallObject).
+ MOZ_ASSERT_IF(data.constructing && !constructingLegacyGen,
+ data.maxArgv[0].isObject() || data.maxArgv[0].isMagic(JS_UNINITIALIZED_LEXICAL));
+
+ data.result.setInt32(data.numActualArgs);
+ {
+ AssertCompartmentUnchanged pcc(cx);
+ ActivationEntryMonitor entryMonitor(cx, data.calleeToken);
+ JitActivation activation(cx);
+
+ if (data.osrFrame)
+ data.osrFrame->setRunningInJit();
+
+#ifdef DEBUG
+ nogc.reset();
+#endif
+ // Single transition point from Interpreter to Baseline.
+ CALL_GENERATED_CODE(enter, data.jitcode, data.maxArgc, data.maxArgv, data.osrFrame,
+ data.calleeToken, data.envChain.get(), data.osrNumStackValues,
+ data.result.address());
+
+ if (data.osrFrame)
+ data.osrFrame->clearRunningInJit();
+ }
+
+ MOZ_ASSERT(!cx->runtime()->jitRuntime()->hasIonReturnOverride());
+
+ // Jit callers wrap primitive constructor return, except for derived
+ // class constructors, which are forced to do it themselves.
+ if (!data.result.isMagic() &&
+ data.constructing &&
+ data.result.isPrimitive() &&
+ !constructingLegacyGen)
+ {
+ MOZ_ASSERT(data.maxArgv[0].isObject());
+ data.result = data.maxArgv[0];
+ }
+
+ // Release temporary buffer used for OSR into Ion.
+ cx->runtime()->getJitRuntime(cx)->freeOsrTempData();
+
+ MOZ_ASSERT_IF(data.result.isMagic(), data.result.isMagic(JS_ION_ERROR));
+ return data.result.isMagic() ? JitExec_Error : JitExec_Ok;
+}
+
+JitExecStatus
+jit::EnterBaselineMethod(JSContext* cx, RunState& state)
+{
+ BaselineScript* baseline = state.script()->baselineScript();
+
+ EnterJitData data(cx);
+ data.jitcode = baseline->method()->raw();
+
+ Rooted<GCVector<Value>> vals(cx, GCVector<Value>(cx));
+ if (!SetEnterJitData(cx, data, state, &vals))
+ return JitExec_Error;
+
+ JitExecStatus status = EnterBaseline(cx, data);
+ if (status != JitExec_Ok)
+ return status;
+
+ state.setReturnValue(data.result);
+ return JitExec_Ok;
+}
+
+JitExecStatus
+jit::EnterBaselineAtBranch(JSContext* cx, InterpreterFrame* fp, jsbytecode* pc)
+{
+ MOZ_ASSERT(JSOp(*pc) == JSOP_LOOPENTRY);
+
+ BaselineScript* baseline = fp->script()->baselineScript();
+
+ EnterJitData data(cx);
+ data.jitcode = baseline->nativeCodeForPC(fp->script(), pc);
+
+ // Skip debug breakpoint/trap handler, the interpreter already handled it
+ // for the current op.
+ if (fp->isDebuggee()) {
+ MOZ_RELEASE_ASSERT(baseline->hasDebugInstrumentation());
+ data.jitcode += MacroAssembler::ToggledCallSize(data.jitcode);
+ }
+
+ data.osrFrame = fp;
+ data.osrNumStackValues = fp->script()->nfixed() + cx->interpreterRegs().stackDepth();
+
+ AutoValueVector vals(cx);
+ RootedValue thisv(cx);
+
+ if (fp->isFunctionFrame()) {
+ data.constructing = fp->isConstructing();
+ data.numActualArgs = fp->numActualArgs();
+ data.maxArgc = Max(fp->numActualArgs(), fp->numFormalArgs()) + 1; // +1 = include |this|
+ data.maxArgv = fp->argv() - 1; // -1 = include |this|
+ data.envChain = nullptr;
+ data.calleeToken = CalleeToToken(&fp->callee(), data.constructing);
+ } else {
+ thisv.setUndefined();
+ data.constructing = false;
+ data.numActualArgs = 0;
+ data.maxArgc = 1;
+ data.maxArgv = thisv.address();
+ data.envChain = fp->environmentChain();
+
+ data.calleeToken = CalleeToToken(fp->script());
+
+ if (fp->isEvalFrame()) {
+ if (!vals.reserve(2))
+ return JitExec_Aborted;
+
+ vals.infallibleAppend(thisv);
+
+ if (fp->script()->isDirectEvalInFunction())
+ vals.infallibleAppend(fp->newTarget());
+ else
+ vals.infallibleAppend(NullValue());
+
+ data.maxArgc = 2;
+ data.maxArgv = vals.begin();
+ }
+ }
+
+ TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
+ TraceLogStopEvent(logger, TraceLogger_Interpreter);
+ TraceLogStartEvent(logger, TraceLogger_Baseline);
+
+ JitExecStatus status = EnterBaseline(cx, data);
+ if (status != JitExec_Ok)
+ return status;
+
+ fp->setReturnValue(data.result);
+ return JitExec_Ok;
+}
+
+MethodStatus
+jit::BaselineCompile(JSContext* cx, JSScript* script, bool forceDebugInstrumentation)
+{
+ MOZ_ASSERT(!script->hasBaselineScript());
+ MOZ_ASSERT(script->canBaselineCompile());
+ MOZ_ASSERT(IsBaselineEnabled(cx));
+
+ script->ensureNonLazyCanonicalFunction(cx);
+
+ LifoAlloc alloc(TempAllocator::PreferredLifoChunkSize);
+ TempAllocator* temp = alloc.new_<TempAllocator>(&alloc);
+ if (!temp) {
+ ReportOutOfMemory(cx);
+ return Method_Error;
+ }
+
+ JitContext jctx(cx, temp);
+
+ BaselineCompiler compiler(cx, *temp, script);
+ if (!compiler.init()) {
+ ReportOutOfMemory(cx);
+ return Method_Error;
+ }
+
+ if (forceDebugInstrumentation)
+ compiler.setCompileDebugInstrumentation();
+
+ MethodStatus status = compiler.compile();
+
+ MOZ_ASSERT_IF(status == Method_Compiled, script->hasBaselineScript());
+ MOZ_ASSERT_IF(status != Method_Compiled, !script->hasBaselineScript());
+
+ if (status == Method_CantCompile)
+ script->setBaselineScript(cx->runtime(), BASELINE_DISABLED_SCRIPT);
+
+ return status;
+}
+
+static MethodStatus
+CanEnterBaselineJIT(JSContext* cx, HandleScript script, InterpreterFrame* osrFrame)
+{
+ MOZ_ASSERT(jit::IsBaselineEnabled(cx));
+
+ // Skip if the script has been disabled.
+ if (!script->canBaselineCompile())
+ return Method_Skipped;
+
+ if (script->length() > BaselineScript::MAX_JSSCRIPT_LENGTH)
+ return Method_CantCompile;
+
+ if (script->nslots() > BaselineScript::MAX_JSSCRIPT_SLOTS)
+ return Method_CantCompile;
+
+ if (script->hasBaselineScript())
+ return Method_Compiled;
+
+ // Check this before calling ensureJitCompartmentExists, so we're less
+ // likely to report OOM in JSRuntime::createJitRuntime.
+ if (!CanLikelyAllocateMoreExecutableMemory())
+ return Method_Skipped;
+
+ if (!cx->compartment()->ensureJitCompartmentExists(cx))
+ return Method_Error;
+
+ // Check script warm-up counter.
+ if (script->incWarmUpCounter() <= JitOptions.baselineWarmUpThreshold)
+ return Method_Skipped;
+
+ // Frames can be marked as debuggee frames independently of its underlying
+ // script being a debuggee script, e.g., when performing
+ // Debugger.Frame.prototype.eval.
+ return BaselineCompile(cx, script, osrFrame && osrFrame->isDebuggee());
+}
+
+MethodStatus
+jit::CanEnterBaselineAtBranch(JSContext* cx, InterpreterFrame* fp, bool newType)
+{
+ if (!CheckFrame(fp))
+ return Method_CantCompile;
+
+ // This check is needed in the following corner case. Consider a function h,
+ //
+ // function h(x) {
+ // h(false);
+ // if (!x)
+ // return;
+ // for (var i = 0; i < N; i++)
+ // /* do stuff */
+ // }
+ //
+ // Suppose h is not yet compiled in baseline and is executing in the
+ // interpreter. Let this interpreter frame be f_older. The debugger marks
+ // f_older as isDebuggee. At the point of the recursive call h(false), h is
+ // compiled in baseline without debug instrumentation, pushing a baseline
+ // frame f_newer. The debugger never flags f_newer as isDebuggee, and never
+ // recompiles h. When the recursive call returns and execution proceeds to
+ // the loop, the interpreter attempts to OSR into baseline. Since h is
+ // already compiled in baseline, execution jumps directly into baseline
+ // code. This is incorrect as h's baseline script does not have debug
+ // instrumentation.
+ if (fp->isDebuggee() && !Debugger::ensureExecutionObservabilityOfOsrFrame(cx, fp))
+ return Method_Error;
+
+ RootedScript script(cx, fp->script());
+ return CanEnterBaselineJIT(cx, script, fp);
+}
+
+MethodStatus
+jit::CanEnterBaselineMethod(JSContext* cx, RunState& state)
+{
+ if (state.isInvoke()) {
+ InvokeState& invoke = *state.asInvoke();
+
+ if (invoke.args().length() > BASELINE_MAX_ARGS_LENGTH) {
+ JitSpew(JitSpew_BaselineAbort, "Too many arguments (%u)", invoke.args().length());
+ return Method_CantCompile;
+ }
+
+ if (!state.maybeCreateThisForConstructor(cx)) {
+ if (cx->isThrowingOutOfMemory()) {
+ cx->recoverFromOutOfMemory();
+ return Method_Skipped;
+ }
+ return Method_Error;
+ }
+ } else {
+ if (state.asExecute()->isDebuggerEval()) {
+ JitSpew(JitSpew_BaselineAbort, "debugger frame");
+ return Method_CantCompile;
+ }
+ }
+
+ RootedScript script(cx, state.script());
+ return CanEnterBaselineJIT(cx, script, /* osrFrame = */ nullptr);
+};
+
+BaselineScript*
+BaselineScript::New(JSScript* jsscript,
+ uint32_t prologueOffset, uint32_t epilogueOffset,
+ uint32_t profilerEnterToggleOffset,
+ uint32_t profilerExitToggleOffset,
+ uint32_t postDebugPrologueOffset,
+ size_t icEntries,
+ size_t pcMappingIndexEntries, size_t pcMappingSize,
+ size_t bytecodeTypeMapEntries,
+ size_t yieldEntries,
+ size_t traceLoggerToggleOffsetEntries)
+{
+ static const unsigned DataAlignment = sizeof(uintptr_t);
+
+ size_t icEntriesSize = icEntries * sizeof(BaselineICEntry);
+ size_t pcMappingIndexEntriesSize = pcMappingIndexEntries * sizeof(PCMappingIndexEntry);
+ size_t bytecodeTypeMapSize = bytecodeTypeMapEntries * sizeof(uint32_t);
+ size_t yieldEntriesSize = yieldEntries * sizeof(uintptr_t);
+ size_t tlEntriesSize = traceLoggerToggleOffsetEntries * sizeof(uint32_t);
+
+ size_t paddedICEntriesSize = AlignBytes(icEntriesSize, DataAlignment);
+ size_t paddedPCMappingIndexEntriesSize = AlignBytes(pcMappingIndexEntriesSize, DataAlignment);
+ size_t paddedPCMappingSize = AlignBytes(pcMappingSize, DataAlignment);
+ size_t paddedBytecodeTypesMapSize = AlignBytes(bytecodeTypeMapSize, DataAlignment);
+ size_t paddedYieldEntriesSize = AlignBytes(yieldEntriesSize, DataAlignment);
+ size_t paddedTLEntriesSize = AlignBytes(tlEntriesSize, DataAlignment);
+
+ size_t allocBytes = paddedICEntriesSize +
+ paddedPCMappingIndexEntriesSize +
+ paddedPCMappingSize +
+ paddedBytecodeTypesMapSize +
+ paddedYieldEntriesSize +
+ paddedTLEntriesSize;
+
+ BaselineScript* script = jsscript->zone()->pod_malloc_with_extra<BaselineScript, uint8_t>(allocBytes);
+ if (!script)
+ return nullptr;
+ new (script) BaselineScript(prologueOffset, epilogueOffset,
+ profilerEnterToggleOffset, profilerExitToggleOffset,
+ postDebugPrologueOffset);
+
+ size_t offsetCursor = sizeof(BaselineScript);
+ MOZ_ASSERT(offsetCursor == AlignBytes(sizeof(BaselineScript), DataAlignment));
+
+ script->icEntriesOffset_ = offsetCursor;
+ script->icEntries_ = icEntries;
+ offsetCursor += paddedICEntriesSize;
+
+ script->pcMappingIndexOffset_ = offsetCursor;
+ script->pcMappingIndexEntries_ = pcMappingIndexEntries;
+ offsetCursor += paddedPCMappingIndexEntriesSize;
+
+ script->pcMappingOffset_ = offsetCursor;
+ script->pcMappingSize_ = pcMappingSize;
+ offsetCursor += paddedPCMappingSize;
+
+ script->bytecodeTypeMapOffset_ = bytecodeTypeMapEntries ? offsetCursor : 0;
+ offsetCursor += paddedBytecodeTypesMapSize;
+
+ script->yieldEntriesOffset_ = yieldEntries ? offsetCursor : 0;
+ offsetCursor += paddedYieldEntriesSize;
+
+ script->traceLoggerToggleOffsetsOffset_ = tlEntriesSize ? offsetCursor : 0;
+ script->numTraceLoggerToggleOffsets_ = traceLoggerToggleOffsetEntries;
+ offsetCursor += paddedTLEntriesSize;
+
+ MOZ_ASSERT(offsetCursor == sizeof(BaselineScript) + allocBytes);
+ return script;
+}
+
+void
+BaselineScript::trace(JSTracer* trc)
+{
+ TraceEdge(trc, &method_, "baseline-method");
+ TraceNullableEdge(trc, &templateEnv_, "baseline-template-environment");
+
+ // Mark all IC stub codes hanging off the IC stub entries.
+ for (size_t i = 0; i < numICEntries(); i++) {
+ BaselineICEntry& ent = icEntry(i);
+ ent.trace(trc);
+ }
+}
+
+/* static */
+void
+BaselineScript::writeBarrierPre(Zone* zone, BaselineScript* script)
+{
+ if (zone->needsIncrementalBarrier())
+ script->trace(zone->barrierTracer());
+}
+
+void
+BaselineScript::Trace(JSTracer* trc, BaselineScript* script)
+{
+ script->trace(trc);
+}
+
+void
+BaselineScript::Destroy(FreeOp* fop, BaselineScript* script)
+{
+
+ MOZ_ASSERT(!script->hasPendingIonBuilder());
+
+ script->unlinkDependentWasmImports(fop);
+
+ /*
+ * When the script contains pointers to nursery things, the store buffer can
+ * contain entries that point into the fallback stub space. Since we can
+ * destroy scripts outside the context of a GC, this situation could result
+ * in us trying to mark invalid store buffer entries.
+ *
+ * Defer freeing any allocated blocks until after the next minor GC.
+ */
+ script->fallbackStubSpace_.freeAllAfterMinorGC(fop->runtime());
+
+ fop->delete_(script);
+}
+
+void
+JS::DeletePolicy<js::jit::BaselineScript>::operator()(const js::jit::BaselineScript* script)
+{
+ BaselineScript::Destroy(rt_->defaultFreeOp(), const_cast<BaselineScript*>(script));
+}
+
+void
+BaselineScript::clearDependentWasmImports()
+{
+ // Remove any links from wasm::Instances that contain optimized import calls into
+ // this BaselineScript.
+ if (dependentWasmImports_) {
+ for (DependentWasmImport& dep : *dependentWasmImports_)
+ dep.instance->deoptimizeImportExit(dep.importIndex);
+ dependentWasmImports_->clear();
+ }
+}
+
+void
+BaselineScript::unlinkDependentWasmImports(FreeOp* fop)
+{
+ // Remove any links from wasm::Instances that contain optimized FFI calls into
+ // this BaselineScript.
+ clearDependentWasmImports();
+ if (dependentWasmImports_) {
+ fop->delete_(dependentWasmImports_);
+ dependentWasmImports_ = nullptr;
+ }
+}
+
+bool
+BaselineScript::addDependentWasmImport(JSContext* cx, wasm::Instance& instance, uint32_t idx)
+{
+ if (!dependentWasmImports_) {
+ dependentWasmImports_ = cx->new_<Vector<DependentWasmImport>>(cx);
+ if (!dependentWasmImports_)
+ return false;
+ }
+ return dependentWasmImports_->emplaceBack(instance, idx);
+}
+
+void
+BaselineScript::removeDependentWasmImport(wasm::Instance& instance, uint32_t idx)
+{
+ if (!dependentWasmImports_)
+ return;
+
+ for (DependentWasmImport& dep : *dependentWasmImports_) {
+ if (dep.instance == &instance && dep.importIndex == idx) {
+ dependentWasmImports_->erase(&dep);
+ break;
+ }
+ }
+}
+
+BaselineICEntry&
+BaselineScript::icEntry(size_t index)
+{
+ MOZ_ASSERT(index < numICEntries());
+ return icEntryList()[index];
+}
+
+PCMappingIndexEntry&
+BaselineScript::pcMappingIndexEntry(size_t index)
+{
+ MOZ_ASSERT(index < numPCMappingIndexEntries());
+ return pcMappingIndexEntryList()[index];
+}
+
+CompactBufferReader
+BaselineScript::pcMappingReader(size_t indexEntry)
+{
+ PCMappingIndexEntry& entry = pcMappingIndexEntry(indexEntry);
+
+ uint8_t* dataStart = pcMappingData() + entry.bufferOffset;
+ uint8_t* dataEnd = (indexEntry == numPCMappingIndexEntries() - 1)
+ ? pcMappingData() + pcMappingSize_
+ : pcMappingData() + pcMappingIndexEntry(indexEntry + 1).bufferOffset;
+
+ return CompactBufferReader(dataStart, dataEnd);
+}
+
+struct ICEntries
+{
+ BaselineScript* const baseline_;
+
+ explicit ICEntries(BaselineScript* baseline) : baseline_(baseline) {}
+
+ BaselineICEntry& operator[](size_t index) const {
+ return baseline_->icEntry(index);
+ }
+};
+
+BaselineICEntry&
+BaselineScript::icEntryFromReturnOffset(CodeOffset returnOffset)
+{
+ size_t loc;
+#ifdef DEBUG
+ bool found =
+#endif
+ BinarySearchIf(ICEntries(this), 0, numICEntries(),
+ [&returnOffset](BaselineICEntry& entry) {
+ size_t roffset = returnOffset.offset();
+ size_t entryRoffset = entry.returnOffset().offset();
+ if (roffset < entryRoffset)
+ return -1;
+ if (entryRoffset < roffset)
+ return 1;
+ return 0;
+ },
+ &loc);
+
+ MOZ_ASSERT(found);
+ MOZ_ASSERT(loc < numICEntries());
+ MOZ_ASSERT(icEntry(loc).returnOffset().offset() == returnOffset.offset());
+ return icEntry(loc);
+}
+
+static inline size_t
+ComputeBinarySearchMid(BaselineScript* baseline, uint32_t pcOffset)
+{
+ size_t loc;
+ BinarySearchIf(ICEntries(baseline), 0, baseline->numICEntries(),
+ [pcOffset](BaselineICEntry& entry) {
+ uint32_t entryOffset = entry.pcOffset();
+ if (pcOffset < entryOffset)
+ return -1;
+ if (entryOffset < pcOffset)
+ return 1;
+ return 0;
+ },
+ &loc);
+ return loc;
+}
+
+uint8_t*
+BaselineScript::returnAddressForIC(const BaselineICEntry& ent)
+{
+ return method()->raw() + ent.returnOffset().offset();
+}
+
+BaselineICEntry&
+BaselineScript::icEntryFromPCOffset(uint32_t pcOffset)
+{
+ // Multiple IC entries can have the same PC offset, but this method only looks for
+ // those which have isForOp() set.
+ size_t mid = ComputeBinarySearchMid(this, pcOffset);
+
+ // Found an IC entry with a matching PC offset. Search backward, and then
+ // forward from this IC entry, looking for one with the same PC offset which
+ // has isForOp() set.
+ for (size_t i = mid; i < numICEntries() && icEntry(i).pcOffset() == pcOffset; i--) {
+ if (icEntry(i).isForOp())
+ return icEntry(i);
+ }
+ for (size_t i = mid+1; i < numICEntries() && icEntry(i).pcOffset() == pcOffset; i++) {
+ if (icEntry(i).isForOp())
+ return icEntry(i);
+ }
+ MOZ_CRASH("Invalid PC offset for IC entry.");
+}
+
+BaselineICEntry&
+BaselineScript::icEntryFromPCOffset(uint32_t pcOffset, BaselineICEntry* prevLookedUpEntry)
+{
+ // Do a linear forward search from the last queried PC offset, or fallback to a
+ // binary search if the last offset is too far away.
+ if (prevLookedUpEntry && pcOffset >= prevLookedUpEntry->pcOffset() &&
+ (pcOffset - prevLookedUpEntry->pcOffset()) <= 10)
+ {
+ BaselineICEntry* firstEntry = &icEntry(0);
+ BaselineICEntry* lastEntry = &icEntry(numICEntries() - 1);
+ BaselineICEntry* curEntry = prevLookedUpEntry;
+ while (curEntry >= firstEntry && curEntry <= lastEntry) {
+ if (curEntry->pcOffset() == pcOffset && curEntry->isForOp())
+ break;
+ curEntry++;
+ }
+ MOZ_ASSERT(curEntry->pcOffset() == pcOffset && curEntry->isForOp());
+ return *curEntry;
+ }
+
+ return icEntryFromPCOffset(pcOffset);
+}
+
+BaselineICEntry&
+BaselineScript::callVMEntryFromPCOffset(uint32_t pcOffset)
+{
+ // Like icEntryFromPCOffset, but only looks for the fake ICEntries
+ // inserted by VM calls.
+ size_t mid = ComputeBinarySearchMid(this, pcOffset);
+
+ for (size_t i = mid; i < numICEntries() && icEntry(i).pcOffset() == pcOffset; i--) {
+ if (icEntry(i).kind() == ICEntry::Kind_CallVM)
+ return icEntry(i);
+ }
+ for (size_t i = mid+1; i < numICEntries() && icEntry(i).pcOffset() == pcOffset; i++) {
+ if (icEntry(i).kind() == ICEntry::Kind_CallVM)
+ return icEntry(i);
+ }
+ MOZ_CRASH("Invalid PC offset for callVM entry.");
+}
+
+BaselineICEntry&
+BaselineScript::stackCheckICEntry(bool earlyCheck)
+{
+ // The stack check will always be at offset 0, so just do a linear search
+ // from the beginning. This is only needed for debug mode OSR, when
+ // patching a frame that has invoked a Debugger hook via the interrupt
+ // handler via the stack check, which is part of the prologue.
+ ICEntry::Kind kind = earlyCheck ? ICEntry::Kind_EarlyStackCheck : ICEntry::Kind_StackCheck;
+ for (size_t i = 0; i < numICEntries() && icEntry(i).pcOffset() == 0; i++) {
+ if (icEntry(i).kind() == kind)
+ return icEntry(i);
+ }
+ MOZ_CRASH("No stack check ICEntry found.");
+}
+
+BaselineICEntry&
+BaselineScript::warmupCountICEntry()
+{
+ // The stack check will be at a very low offset, so just do a linear search
+ // from the beginning.
+ for (size_t i = 0; i < numICEntries() && icEntry(i).pcOffset() == 0; i++) {
+ if (icEntry(i).kind() == ICEntry::Kind_WarmupCounter)
+ return icEntry(i);
+ }
+ MOZ_CRASH("No warmup count ICEntry found.");
+}
+
+BaselineICEntry&
+BaselineScript::icEntryFromReturnAddress(uint8_t* returnAddr)
+{
+ MOZ_ASSERT(returnAddr > method_->raw());
+ MOZ_ASSERT(returnAddr < method_->raw() + method_->instructionsSize());
+ CodeOffset offset(returnAddr - method_->raw());
+ return icEntryFromReturnOffset(offset);
+}
+
+void
+BaselineScript::copyYieldEntries(JSScript* script, Vector<uint32_t>& yieldOffsets)
+{
+ uint8_t** entries = yieldEntryList();
+
+ for (size_t i = 0; i < yieldOffsets.length(); i++) {
+ uint32_t offset = yieldOffsets[i];
+ entries[i] = nativeCodeForPC(script, script->offsetToPC(offset));
+ }
+}
+
+void
+BaselineScript::copyICEntries(JSScript* script, const BaselineICEntry* entries, MacroAssembler& masm)
+{
+ // Fix up the return offset in the IC entries and copy them in.
+ // Also write out the IC entry ptrs in any fallback stubs that were added.
+ for (uint32_t i = 0; i < numICEntries(); i++) {
+ BaselineICEntry& realEntry = icEntry(i);
+ realEntry = entries[i];
+
+ if (!realEntry.hasStub()) {
+ // VM call without any stubs.
+ continue;
+ }
+
+ // If the attached stub is a fallback stub, then fix it up with
+ // a pointer to the (now available) realEntry.
+ if (realEntry.firstStub()->isFallback())
+ realEntry.firstStub()->toFallbackStub()->fixupICEntry(&realEntry);
+
+ if (realEntry.firstStub()->isTypeMonitor_Fallback()) {
+ ICTypeMonitor_Fallback* stub = realEntry.firstStub()->toTypeMonitor_Fallback();
+ stub->fixupICEntry(&realEntry);
+ }
+
+ if (realEntry.firstStub()->isTableSwitch()) {
+ ICTableSwitch* stub = realEntry.firstStub()->toTableSwitch();
+ stub->fixupJumpTable(script, this);
+ }
+ }
+}
+
+void
+BaselineScript::adoptFallbackStubs(FallbackICStubSpace* stubSpace)
+{
+ fallbackStubSpace_.adoptFrom(stubSpace);
+}
+
+void
+BaselineScript::copyPCMappingEntries(const CompactBufferWriter& entries)
+{
+ MOZ_ASSERT(entries.length() > 0);
+ MOZ_ASSERT(entries.length() == pcMappingSize_);
+
+ memcpy(pcMappingData(), entries.buffer(), entries.length());
+}
+
+void
+BaselineScript::copyPCMappingIndexEntries(const PCMappingIndexEntry* entries)
+{
+ for (uint32_t i = 0; i < numPCMappingIndexEntries(); i++)
+ pcMappingIndexEntry(i) = entries[i];
+}
+
+uint8_t*
+BaselineScript::nativeCodeForPC(JSScript* script, jsbytecode* pc, PCMappingSlotInfo* slotInfo)
+{
+ MOZ_ASSERT_IF(script->hasBaselineScript(), script->baselineScript() == this);
+
+ uint32_t pcOffset = script->pcToOffset(pc);
+
+ // Look for the first PCMappingIndexEntry with pc > the pc we are
+ // interested in.
+ uint32_t i = 1;
+ for (; i < numPCMappingIndexEntries(); i++) {
+ if (pcMappingIndexEntry(i).pcOffset > pcOffset)
+ break;
+ }
+
+ // The previous entry contains the current pc.
+ MOZ_ASSERT(i > 0);
+ i--;
+
+ PCMappingIndexEntry& entry = pcMappingIndexEntry(i);
+ MOZ_ASSERT(pcOffset >= entry.pcOffset);
+
+ CompactBufferReader reader(pcMappingReader(i));
+ jsbytecode* curPC = script->offsetToPC(entry.pcOffset);
+ uint32_t nativeOffset = entry.nativeOffset;
+
+ MOZ_ASSERT(script->containsPC(curPC));
+ MOZ_ASSERT(curPC <= pc);
+
+ while (reader.more()) {
+ // If the high bit is set, the native offset relative to the
+ // previous pc != 0 and comes next.
+ uint8_t b = reader.readByte();
+ if (b & 0x80)
+ nativeOffset += reader.readUnsigned();
+
+ if (curPC == pc) {
+ if (slotInfo)
+ *slotInfo = PCMappingSlotInfo(b & ~0x80);
+ return method_->raw() + nativeOffset;
+ }
+
+ curPC += GetBytecodeLength(curPC);
+ }
+
+ MOZ_CRASH("No native code for this pc");
+}
+
+jsbytecode*
+BaselineScript::approximatePcForNativeAddress(JSScript* script, uint8_t* nativeAddress)
+{
+ MOZ_ASSERT(script->baselineScript() == this);
+ MOZ_ASSERT(nativeAddress >= method_->raw());
+ MOZ_ASSERT(nativeAddress < method_->raw() + method_->instructionsSize());
+
+ uint32_t nativeOffset = nativeAddress - method_->raw();
+ MOZ_ASSERT(nativeOffset < method_->instructionsSize());
+
+ // Look for the first PCMappingIndexEntry with native offset > the native offset we are
+ // interested in.
+ uint32_t i = 1;
+ for (; i < numPCMappingIndexEntries(); i++) {
+ if (pcMappingIndexEntry(i).nativeOffset > nativeOffset)
+ break;
+ }
+
+ // Go back an entry to search forward from.
+ MOZ_ASSERT(i > 0);
+ i--;
+
+ PCMappingIndexEntry& entry = pcMappingIndexEntry(i);
+
+ CompactBufferReader reader(pcMappingReader(i));
+ jsbytecode* curPC = script->offsetToPC(entry.pcOffset);
+ uint32_t curNativeOffset = entry.nativeOffset;
+
+ MOZ_ASSERT(script->containsPC(curPC));
+
+ // The native code address can occur before the start of ops.
+ // Associate those with bytecode offset 0.
+ if (curNativeOffset > nativeOffset)
+ return script->code();
+
+ jsbytecode* lastPC = curPC;
+ while (true) {
+ // If the high bit is set, the native offset relative to the
+ // previous pc != 0 and comes next.
+ uint8_t b = reader.readByte();
+ if (b & 0x80)
+ curNativeOffset += reader.readUnsigned();
+
+ // Return the last PC that matched nativeOffset. Some bytecode
+ // generate no native code (e.g., constant-pushing bytecode like
+ // JSOP_INT8), and so their entries share the same nativeOffset as the
+ // next op that does generate code.
+ if (curNativeOffset > nativeOffset)
+ return lastPC;
+
+ // The native address may lie in-between the last delta-entry in
+ // a pcMappingIndexEntry, and the next pcMappingIndexEntry.
+ if (!reader.more())
+ return curPC;
+
+ lastPC = curPC;
+ curPC += GetBytecodeLength(curPC);
+ }
+}
+
+void
+BaselineScript::toggleDebugTraps(JSScript* script, jsbytecode* pc)
+{
+ MOZ_ASSERT(script->baselineScript() == this);
+
+ // Only scripts compiled for debug mode have toggled calls.
+ if (!hasDebugInstrumentation())
+ return;
+
+ SrcNoteLineScanner scanner(script->notes(), script->lineno());
+
+ AutoWritableJitCode awjc(method());
+
+ for (uint32_t i = 0; i < numPCMappingIndexEntries(); i++) {
+ PCMappingIndexEntry& entry = pcMappingIndexEntry(i);
+
+ CompactBufferReader reader(pcMappingReader(i));
+ jsbytecode* curPC = script->offsetToPC(entry.pcOffset);
+ uint32_t nativeOffset = entry.nativeOffset;
+
+ MOZ_ASSERT(script->containsPC(curPC));
+
+ while (reader.more()) {
+ uint8_t b = reader.readByte();
+ if (b & 0x80)
+ nativeOffset += reader.readUnsigned();
+
+ scanner.advanceTo(script->pcToOffset(curPC));
+
+ if (!pc || pc == curPC) {
+ bool enabled = (script->stepModeEnabled() && scanner.isLineHeader()) ||
+ script->hasBreakpointsAt(curPC);
+
+ // Patch the trap.
+ CodeLocationLabel label(method(), CodeOffset(nativeOffset));
+ Assembler::ToggleCall(label, enabled);
+ }
+
+ curPC += GetBytecodeLength(curPC);
+ }
+ }
+}
+
+#ifdef JS_TRACE_LOGGING
+void
+BaselineScript::initTraceLogger(JSRuntime* runtime, JSScript* script,
+ const Vector<CodeOffset>& offsets)
+{
+#ifdef DEBUG
+ traceLoggerScriptsEnabled_ = TraceLogTextIdEnabled(TraceLogger_Scripts);
+ traceLoggerEngineEnabled_ = TraceLogTextIdEnabled(TraceLogger_Engine);
+#endif
+
+ TraceLoggerThread* logger = TraceLoggerForMainThread(runtime);
+
+ MOZ_ASSERT(offsets.length() == numTraceLoggerToggleOffsets_);
+ for (size_t i = 0; i < offsets.length(); i++)
+ traceLoggerToggleOffsets()[i] = offsets[i].offset();
+
+ if (TraceLogTextIdEnabled(TraceLogger_Engine) || TraceLogTextIdEnabled(TraceLogger_Scripts)) {
+ traceLoggerScriptEvent_ = TraceLoggerEvent(logger, TraceLogger_Scripts, script);
+ for (size_t i = 0; i < numTraceLoggerToggleOffsets_; i++) {
+ CodeLocationLabel label(method_, CodeOffset(traceLoggerToggleOffsets()[i]));
+ Assembler::ToggleToCmp(label);
+ }
+ }
+}
+
+void
+BaselineScript::toggleTraceLoggerScripts(JSRuntime* runtime, JSScript* script, bool enable)
+{
+ DebugOnly<bool> engineEnabled = TraceLogTextIdEnabled(TraceLogger_Engine);
+ MOZ_ASSERT(enable == !traceLoggerScriptsEnabled_);
+ MOZ_ASSERT(engineEnabled == traceLoggerEngineEnabled_);
+
+ // Patch the logging script textId to be correct.
+ // When logging log the specific textId else the global Scripts textId.
+ TraceLoggerThread* logger = TraceLoggerForMainThread(runtime);
+ if (enable && !traceLoggerScriptEvent_.hasPayload())
+ traceLoggerScriptEvent_ = TraceLoggerEvent(logger, TraceLogger_Scripts, script);
+
+ AutoWritableJitCode awjc(method());
+
+ // Enable/Disable the traceLogger.
+ for (size_t i = 0; i < numTraceLoggerToggleOffsets_; i++) {
+ CodeLocationLabel label(method_, CodeOffset(traceLoggerToggleOffsets()[i]));
+ if (enable)
+ Assembler::ToggleToCmp(label);
+ else
+ Assembler::ToggleToJmp(label);
+ }
+
+#if DEBUG
+ traceLoggerScriptsEnabled_ = enable;
+#endif
+}
+
+void
+BaselineScript::toggleTraceLoggerEngine(bool enable)
+{
+ DebugOnly<bool> scriptsEnabled = TraceLogTextIdEnabled(TraceLogger_Scripts);
+ MOZ_ASSERT(enable == !traceLoggerEngineEnabled_);
+ MOZ_ASSERT(scriptsEnabled == traceLoggerScriptsEnabled_);
+
+ AutoWritableJitCode awjc(method());
+
+ // Enable/Disable the traceLogger prologue and epilogue.
+ for (size_t i = 0; i < numTraceLoggerToggleOffsets_; i++) {
+ CodeLocationLabel label(method_, CodeOffset(traceLoggerToggleOffsets()[i]));
+ if (enable)
+ Assembler::ToggleToCmp(label);
+ else
+ Assembler::ToggleToJmp(label);
+ }
+
+#if DEBUG
+ traceLoggerEngineEnabled_ = enable;
+#endif
+}
+#endif
+
+void
+BaselineScript::toggleProfilerInstrumentation(bool enable)
+{
+ if (enable == isProfilerInstrumentationOn())
+ return;
+
+ JitSpew(JitSpew_BaselineIC, " toggling profiling %s for BaselineScript %p",
+ enable ? "on" : "off", this);
+
+ // Toggle the jump
+ CodeLocationLabel enterToggleLocation(method_, CodeOffset(profilerEnterToggleOffset_));
+ CodeLocationLabel exitToggleLocation(method_, CodeOffset(profilerExitToggleOffset_));
+ if (enable) {
+ Assembler::ToggleToCmp(enterToggleLocation);
+ Assembler::ToggleToCmp(exitToggleLocation);
+ flags_ |= uint32_t(PROFILER_INSTRUMENTATION_ON);
+ } else {
+ Assembler::ToggleToJmp(enterToggleLocation);
+ Assembler::ToggleToJmp(exitToggleLocation);
+ flags_ &= ~uint32_t(PROFILER_INSTRUMENTATION_ON);
+ }
+}
+
+void
+BaselineScript::purgeOptimizedStubs(Zone* zone)
+{
+ JitSpew(JitSpew_BaselineIC, "Purging optimized stubs");
+
+ for (size_t i = 0; i < numICEntries(); i++) {
+ BaselineICEntry& entry = icEntry(i);
+ if (!entry.hasStub())
+ continue;
+
+ ICStub* lastStub = entry.firstStub();
+ while (lastStub->next())
+ lastStub = lastStub->next();
+
+ if (lastStub->isFallback()) {
+ // Unlink all stubs allocated in the optimized space.
+ ICStub* stub = entry.firstStub();
+ ICStub* prev = nullptr;
+
+ while (stub->next()) {
+ if (!stub->allocatedInFallbackSpace()) {
+ lastStub->toFallbackStub()->unlinkStub(zone, prev, stub);
+ stub = stub->next();
+ continue;
+ }
+
+ prev = stub;
+ stub = stub->next();
+ }
+
+ if (lastStub->isMonitoredFallback()) {
+ // Monitor stubs can't make calls, so are always in the
+ // optimized stub space.
+ ICTypeMonitor_Fallback* lastMonStub =
+ lastStub->toMonitoredFallbackStub()->fallbackMonitorStub();
+ lastMonStub->resetMonitorStubChain(zone);
+ }
+ } else if (lastStub->isTypeMonitor_Fallback()) {
+ lastStub->toTypeMonitor_Fallback()->resetMonitorStubChain(zone);
+ } else {
+ MOZ_ASSERT(lastStub->isTableSwitch());
+ }
+ }
+
+#ifdef DEBUG
+ // All remaining stubs must be allocated in the fallback space.
+ for (size_t i = 0; i < numICEntries(); i++) {
+ BaselineICEntry& entry = icEntry(i);
+ if (!entry.hasStub())
+ continue;
+
+ ICStub* stub = entry.firstStub();
+ while (stub->next()) {
+ MOZ_ASSERT(stub->allocatedInFallbackSpace());
+ stub = stub->next();
+ }
+ }
+#endif
+}
+
+void
+jit::FinishDiscardBaselineScript(FreeOp* fop, JSScript* script)
+{
+ if (!script->hasBaselineScript())
+ return;
+
+ if (script->baselineScript()->active()) {
+ // Script is live on the stack. Keep the BaselineScript, but destroy
+ // stubs allocated in the optimized stub space.
+ script->baselineScript()->purgeOptimizedStubs(script->zone());
+
+ // Reset |active| flag so that we don't need a separate script
+ // iteration to unmark them.
+ script->baselineScript()->resetActive();
+
+ // The baseline caches have been wiped out, so the script will need to
+ // warm back up before it can be inlined during Ion compilation.
+ script->baselineScript()->clearIonCompiledOrInlined();
+ return;
+ }
+
+ BaselineScript* baseline = script->baselineScript();
+ script->setBaselineScript(nullptr, nullptr);
+ BaselineScript::Destroy(fop, baseline);
+}
+
+void
+jit::AddSizeOfBaselineData(JSScript* script, mozilla::MallocSizeOf mallocSizeOf, size_t* data,
+ size_t* fallbackStubs)
+{
+ if (script->hasBaselineScript())
+ script->baselineScript()->addSizeOfIncludingThis(mallocSizeOf, data, fallbackStubs);
+}
+
+void
+jit::ToggleBaselineProfiling(JSRuntime* runtime, bool enable)
+{
+ JitRuntime* jrt = runtime->jitRuntime();
+ if (!jrt)
+ return;
+
+ for (ZonesIter zone(runtime, SkipAtoms); !zone.done(); zone.next()) {
+ for (auto script = zone->cellIter<JSScript>(); !script.done(); script.next()) {
+ if (!script->hasBaselineScript())
+ continue;
+ AutoWritableJitCode awjc(script->baselineScript()->method());
+ script->baselineScript()->toggleProfilerInstrumentation(enable);
+ }
+ }
+}
+
+#ifdef JS_TRACE_LOGGING
+void
+jit::ToggleBaselineTraceLoggerScripts(JSRuntime* runtime, bool enable)
+{
+ for (ZonesIter zone(runtime, SkipAtoms); !zone.done(); zone.next()) {
+ for (auto script = zone->cellIter<JSScript>(); !script.done(); script.next()) {
+ if (!script->hasBaselineScript())
+ continue;
+ script->baselineScript()->toggleTraceLoggerScripts(runtime, script, enable);
+ }
+ }
+}
+
+void
+jit::ToggleBaselineTraceLoggerEngine(JSRuntime* runtime, bool enable)
+{
+ for (ZonesIter zone(runtime, SkipAtoms); !zone.done(); zone.next()) {
+ for (auto script = zone->cellIter<JSScript>(); !script.done(); script.next()) {
+ if (!script->hasBaselineScript())
+ continue;
+ script->baselineScript()->toggleTraceLoggerEngine(enable);
+ }
+ }
+}
+#endif
+
+static void
+MarkActiveBaselineScripts(JSRuntime* rt, const JitActivationIterator& activation)
+{
+ for (jit::JitFrameIterator iter(activation); !iter.done(); ++iter) {
+ switch (iter.type()) {
+ case JitFrame_BaselineJS:
+ iter.script()->baselineScript()->setActive();
+ break;
+ case JitFrame_Exit:
+ if (iter.exitFrame()->is<LazyLinkExitFrameLayout>()) {
+ LazyLinkExitFrameLayout* ll = iter.exitFrame()->as<LazyLinkExitFrameLayout>();
+ ScriptFromCalleeToken(ll->jsFrame()->calleeToken())->baselineScript()->setActive();
+ }
+ break;
+ case JitFrame_Bailout:
+ case JitFrame_IonJS: {
+ // Keep the baseline script around, since bailouts from the ion
+ // jitcode might need to re-enter into the baseline jitcode.
+ iter.script()->baselineScript()->setActive();
+ for (InlineFrameIterator inlineIter(rt, &iter); inlineIter.more(); ++inlineIter)
+ inlineIter.script()->baselineScript()->setActive();
+ break;
+ }
+ default:;
+ }
+ }
+}
+
+void
+jit::MarkActiveBaselineScripts(Zone* zone)
+{
+ JSRuntime* rt = zone->runtimeFromMainThread();
+ for (JitActivationIterator iter(rt); !iter.done(); ++iter) {
+ if (iter->compartment()->zone() == zone)
+ MarkActiveBaselineScripts(rt, iter);
+ }
+}
diff --git a/js/src/jit/BaselineJIT.h b/js/src/jit/BaselineJIT.h
new file mode 100644
index 000000000..5e7775a61
--- /dev/null
+++ b/js/src/jit/BaselineJIT.h
@@ -0,0 +1,635 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineJIT_h
+#define jit_BaselineJIT_h
+
+#include "mozilla/MemoryReporting.h"
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+
+#include "ds/LifoAlloc.h"
+#include "jit/Bailouts.h"
+#include "jit/IonCode.h"
+#include "jit/MacroAssembler.h"
+#include "vm/TraceLogging.h"
+
+namespace js {
+namespace jit {
+
+class StackValue;
+class BaselineICEntry;
+class ICStub;
+
+class PCMappingSlotInfo
+{
+ uint8_t slotInfo_;
+
+ public:
+ // SlotInfo encoding:
+ // Bits 0 & 1: number of slots at top of stack which are unsynced.
+ // Bits 2 & 3: SlotLocation of top slot value (only relevant if numUnsynced > 0).
+ // Bits 3 & 4: SlotLocation of next slot value (only relevant if numUnsynced > 1).
+ enum SlotLocation { SlotInR0 = 0, SlotInR1 = 1, SlotIgnore = 3 };
+
+ PCMappingSlotInfo()
+ : slotInfo_(0)
+ { }
+
+ explicit PCMappingSlotInfo(uint8_t slotInfo)
+ : slotInfo_(slotInfo)
+ { }
+
+ static inline bool ValidSlotLocation(SlotLocation loc) {
+ return (loc == SlotInR0) || (loc == SlotInR1) || (loc == SlotIgnore);
+ }
+
+ static SlotLocation ToSlotLocation(const StackValue* stackVal);
+
+ inline static PCMappingSlotInfo MakeSlotInfo() { return PCMappingSlotInfo(0); }
+
+ inline static PCMappingSlotInfo MakeSlotInfo(SlotLocation topSlotLoc) {
+ MOZ_ASSERT(ValidSlotLocation(topSlotLoc));
+ return PCMappingSlotInfo(1 | (topSlotLoc << 2));
+ }
+
+ inline static PCMappingSlotInfo MakeSlotInfo(SlotLocation topSlotLoc, SlotLocation nextSlotLoc) {
+ MOZ_ASSERT(ValidSlotLocation(topSlotLoc));
+ MOZ_ASSERT(ValidSlotLocation(nextSlotLoc));
+ return PCMappingSlotInfo(2 | (topSlotLoc << 2) | (nextSlotLoc) << 4);
+ }
+
+ inline unsigned numUnsynced() const {
+ return slotInfo_ & 0x3;
+ }
+ inline SlotLocation topSlotLocation() const {
+ return static_cast<SlotLocation>((slotInfo_ >> 2) & 0x3);
+ }
+ inline SlotLocation nextSlotLocation() const {
+ return static_cast<SlotLocation>((slotInfo_ >> 4) & 0x3);
+ }
+ inline uint8_t toByte() const {
+ return slotInfo_;
+ }
+};
+
+// A CompactBuffer is used to store native code offsets (relative to the
+// previous pc) and PCMappingSlotInfo bytes. To allow binary search into this
+// table, we maintain a second table of "index" entries. Every X ops, the
+// compiler will add an index entry, so that from the index entry to the
+// actual native code offset, we have to iterate at most X times.
+struct PCMappingIndexEntry
+{
+ // jsbytecode offset.
+ uint32_t pcOffset;
+
+ // Native code offset.
+ uint32_t nativeOffset;
+
+ // Offset in the CompactBuffer where data for pcOffset starts.
+ uint32_t bufferOffset;
+};
+
+// Describes a single wasm::ImportExit which jumps (via an import with
+// the given index) directly to a BaselineScript or IonScript.
+struct DependentWasmImport
+{
+ wasm::Instance* instance;
+ size_t importIndex;
+
+ DependentWasmImport(wasm::Instance& instance, size_t importIndex)
+ : instance(&instance),
+ importIndex(importIndex)
+ { }
+};
+
+struct BaselineScript
+{
+ public:
+ // Largest script that the baseline compiler will attempt to compile.
+#if defined(JS_CODEGEN_ARM)
+ // ARM branches can only reach 32MB, and the macroassembler doesn't mitigate
+ // that limitation. Use a stricter limit on the acceptable script size to
+ // avoid crashing when branches go out of range.
+ static const uint32_t MAX_JSSCRIPT_LENGTH = 1000000u;
+#else
+ static const uint32_t MAX_JSSCRIPT_LENGTH = 0x0fffffffu;
+#endif
+
+ // Limit the locals on a given script so that stack check on baseline frames
+ // doesn't overflow a uint32_t value.
+ // (MAX_JSSCRIPT_SLOTS * sizeof(Value)) must fit within a uint32_t.
+ static const uint32_t MAX_JSSCRIPT_SLOTS = 0xffffu;
+
+ private:
+ // Code pointer containing the actual method.
+ HeapPtr<JitCode*> method_;
+
+ // For functions with a call object, template objects to use for the call
+ // object and decl env object (linked via the call object's enclosing
+ // scope).
+ HeapPtr<EnvironmentObject*> templateEnv_;
+
+ // Allocated space for fallback stubs.
+ FallbackICStubSpace fallbackStubSpace_;
+
+ // If non-null, the list of wasm::Modules that contain an optimized call
+ // directly to this script.
+ Vector<DependentWasmImport>* dependentWasmImports_;
+
+ // Native code offset right before the scope chain is initialized.
+ uint32_t prologueOffset_;
+
+ // Native code offset right before the frame is popped and the method
+ // returned from.
+ uint32_t epilogueOffset_;
+
+ // The offsets for the toggledJump instructions for profiler instrumentation.
+ uint32_t profilerEnterToggleOffset_;
+ uint32_t profilerExitToggleOffset_;
+
+ // The offsets and event used for Tracelogger toggling.
+#ifdef JS_TRACE_LOGGING
+# ifdef DEBUG
+ bool traceLoggerScriptsEnabled_;
+ bool traceLoggerEngineEnabled_;
+# endif
+ TraceLoggerEvent traceLoggerScriptEvent_;
+#endif
+
+ // Native code offsets right after the debug prologue VM call returns, or
+ // would have returned. This offset is recorded even when debug mode is
+ // off to aid on-stack debug mode recompilation.
+ //
+ // We don't need one for the debug epilogue because that always happens
+ // right before the epilogue, so we just use the epilogue offset.
+ uint32_t postDebugPrologueOffset_;
+
+ public:
+ enum Flag {
+ // Flag set by JSScript::argumentsOptimizationFailed. Similar to
+ // JSScript::needsArgsObj_, but can be read from JIT code.
+ NEEDS_ARGS_OBJ = 1 << 0,
+
+ // Flag set when discarding JIT code, to indicate this script is
+ // on the stack and should not be discarded.
+ ACTIVE = 1 << 1,
+
+ // Flag set when the script contains any writes to its on-stack
+ // (rather than call object stored) arguments.
+ MODIFIES_ARGUMENTS = 1 << 2,
+
+ // Flag set when compiled for use with Debugger. Handles various
+ // Debugger hooks and compiles toggled calls for traps.
+ HAS_DEBUG_INSTRUMENTATION = 1 << 3,
+
+ // Flag set if this script has ever been Ion compiled, either directly
+ // or inlined into another script. This is cleared when the script's
+ // type information or caches are cleared.
+ ION_COMPILED_OR_INLINED = 1 << 4,
+
+ // Flag is set if this script has profiling instrumentation turned on.
+ PROFILER_INSTRUMENTATION_ON = 1 << 5
+ };
+
+ private:
+ uint32_t flags_;
+
+ private:
+ void trace(JSTracer* trc);
+
+ uint32_t icEntriesOffset_;
+ uint32_t icEntries_;
+
+ uint32_t pcMappingIndexOffset_;
+ uint32_t pcMappingIndexEntries_;
+
+ uint32_t pcMappingOffset_;
+ uint32_t pcMappingSize_;
+
+ // List mapping indexes of bytecode type sets to the offset of the opcode
+ // they correspond to, for use by TypeScript::BytecodeTypes.
+ uint32_t bytecodeTypeMapOffset_;
+
+ // For generator scripts, we store the native code address for each yield
+ // instruction.
+ uint32_t yieldEntriesOffset_;
+
+ // By default tracelogger is disabled. Therefore we disable the logging code
+ // by default. We store the offsets we must patch to enable the logging.
+ uint32_t traceLoggerToggleOffsetsOffset_;
+ uint32_t numTraceLoggerToggleOffsets_;
+
+ // The total bytecode length of all scripts we inlined when we Ion-compiled
+ // this script. 0 if Ion did not compile this script or if we didn't inline
+ // anything.
+ uint16_t inlinedBytecodeLength_;
+
+ // The max inlining depth where we can still inline all functions we inlined
+ // when we Ion-compiled this script. This starts as UINT8_MAX, since we have
+ // no data yet, and won't affect inlining heuristics in that case. The value
+ // is updated when we Ion-compile this script. See makeInliningDecision for
+ // more info.
+ uint8_t maxInliningDepth_;
+
+ // An ion compilation that is ready, but isn't linked yet.
+ IonBuilder *pendingBuilder_;
+
+ public:
+ // Do not call directly, use BaselineScript::New. This is public for cx->new_.
+ BaselineScript(uint32_t prologueOffset, uint32_t epilogueOffset,
+ uint32_t profilerEnterToggleOffset,
+ uint32_t profilerExitToggleOffset,
+ uint32_t postDebugPrologueOffset);
+
+ ~BaselineScript() {
+ // The contents of the fallback stub space are removed and freed
+ // separately after the next minor GC. See BaselineScript::Destroy.
+ MOZ_ASSERT(fallbackStubSpace_.isEmpty());
+ }
+
+ static BaselineScript* New(JSScript* jsscript,
+ uint32_t prologueOffset, uint32_t epilogueOffset,
+ uint32_t profilerEnterToggleOffset,
+ uint32_t profilerExitToggleOffset,
+ uint32_t postDebugPrologueOffset,
+ size_t icEntries,
+ size_t pcMappingIndexEntries, size_t pcMappingSize,
+ size_t bytecodeTypeMapEntries,
+ size_t yieldEntries,
+ size_t traceLoggerToggleOffsetEntries);
+
+ static void Trace(JSTracer* trc, BaselineScript* script);
+ static void Destroy(FreeOp* fop, BaselineScript* script);
+
+ void purgeOptimizedStubs(Zone* zone);
+
+ static inline size_t offsetOfMethod() {
+ return offsetof(BaselineScript, method_);
+ }
+
+ void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf, size_t* data,
+ size_t* fallbackStubs) const {
+ *data += mallocSizeOf(this);
+
+ // |data| already includes the ICStubSpace itself, so use
+ // sizeOfExcludingThis.
+ *fallbackStubs += fallbackStubSpace_.sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ bool active() const {
+ return flags_ & ACTIVE;
+ }
+ void setActive() {
+ flags_ |= ACTIVE;
+ }
+ void resetActive() {
+ flags_ &= ~ACTIVE;
+ }
+
+ void setNeedsArgsObj() {
+ flags_ |= NEEDS_ARGS_OBJ;
+ }
+
+ void setModifiesArguments() {
+ flags_ |= MODIFIES_ARGUMENTS;
+ }
+ bool modifiesArguments() {
+ return flags_ & MODIFIES_ARGUMENTS;
+ }
+
+ void setHasDebugInstrumentation() {
+ flags_ |= HAS_DEBUG_INSTRUMENTATION;
+ }
+ bool hasDebugInstrumentation() const {
+ return flags_ & HAS_DEBUG_INSTRUMENTATION;
+ }
+
+ void setIonCompiledOrInlined() {
+ flags_ |= ION_COMPILED_OR_INLINED;
+ }
+ void clearIonCompiledOrInlined() {
+ flags_ &= ~ION_COMPILED_OR_INLINED;
+ }
+ bool ionCompiledOrInlined() const {
+ return flags_ & ION_COMPILED_OR_INLINED;
+ }
+
+ uint32_t prologueOffset() const {
+ return prologueOffset_;
+ }
+ uint8_t* prologueEntryAddr() const {
+ return method_->raw() + prologueOffset_;
+ }
+
+ uint32_t epilogueOffset() const {
+ return epilogueOffset_;
+ }
+ uint8_t* epilogueEntryAddr() const {
+ return method_->raw() + epilogueOffset_;
+ }
+
+ uint32_t postDebugPrologueOffset() const {
+ return postDebugPrologueOffset_;
+ }
+ uint8_t* postDebugPrologueAddr() const {
+ return method_->raw() + postDebugPrologueOffset_;
+ }
+
+ BaselineICEntry* icEntryList() {
+ return (BaselineICEntry*)(reinterpret_cast<uint8_t*>(this) + icEntriesOffset_);
+ }
+ uint8_t** yieldEntryList() {
+ return (uint8_t**)(reinterpret_cast<uint8_t*>(this) + yieldEntriesOffset_);
+ }
+ PCMappingIndexEntry* pcMappingIndexEntryList() {
+ return (PCMappingIndexEntry*)(reinterpret_cast<uint8_t*>(this) + pcMappingIndexOffset_);
+ }
+ uint8_t* pcMappingData() {
+ return reinterpret_cast<uint8_t*>(this) + pcMappingOffset_;
+ }
+ FallbackICStubSpace* fallbackStubSpace() {
+ return &fallbackStubSpace_;
+ }
+
+ JitCode* method() const {
+ return method_;
+ }
+ void setMethod(JitCode* code) {
+ MOZ_ASSERT(!method_);
+ method_ = code;
+ }
+
+ EnvironmentObject* templateEnvironment() const {
+ return templateEnv_;
+ }
+ void setTemplateEnvironment(EnvironmentObject* templateEnv) {
+ MOZ_ASSERT(!templateEnv_);
+ templateEnv_ = templateEnv;
+ }
+
+ void toggleBarriers(bool enabled, ReprotectCode reprotect = Reprotect) {
+ method()->togglePreBarriers(enabled, reprotect);
+ }
+
+ bool containsCodeAddress(uint8_t* addr) const {
+ return method()->raw() <= addr && addr <= method()->raw() + method()->instructionsSize();
+ }
+
+ BaselineICEntry& icEntry(size_t index);
+ BaselineICEntry& icEntryFromReturnOffset(CodeOffset returnOffset);
+ BaselineICEntry& icEntryFromPCOffset(uint32_t pcOffset);
+ BaselineICEntry& icEntryFromPCOffset(uint32_t pcOffset, BaselineICEntry* prevLookedUpEntry);
+ BaselineICEntry& callVMEntryFromPCOffset(uint32_t pcOffset);
+ BaselineICEntry& stackCheckICEntry(bool earlyCheck);
+ BaselineICEntry& warmupCountICEntry();
+ BaselineICEntry& icEntryFromReturnAddress(uint8_t* returnAddr);
+ uint8_t* returnAddressForIC(const BaselineICEntry& ent);
+
+ size_t numICEntries() const {
+ return icEntries_;
+ }
+
+ void copyICEntries(JSScript* script, const BaselineICEntry* entries, MacroAssembler& masm);
+ void adoptFallbackStubs(FallbackICStubSpace* stubSpace);
+
+ void copyYieldEntries(JSScript* script, Vector<uint32_t>& yieldOffsets);
+
+ PCMappingIndexEntry& pcMappingIndexEntry(size_t index);
+ CompactBufferReader pcMappingReader(size_t indexEntry);
+
+ size_t numPCMappingIndexEntries() const {
+ return pcMappingIndexEntries_;
+ }
+
+ void copyPCMappingIndexEntries(const PCMappingIndexEntry* entries);
+ void copyPCMappingEntries(const CompactBufferWriter& entries);
+
+ uint8_t* nativeCodeForPC(JSScript* script, jsbytecode* pc,
+ PCMappingSlotInfo* slotInfo = nullptr);
+
+ // Return the bytecode offset for a given native code address. Be careful
+ // when using this method: we don't emit code for some bytecode ops, so
+ // the result may not be accurate.
+ jsbytecode* approximatePcForNativeAddress(JSScript* script, uint8_t* nativeAddress);
+
+ MOZ_MUST_USE bool addDependentWasmImport(JSContext* cx, wasm::Instance& instance, uint32_t idx);
+ void removeDependentWasmImport(wasm::Instance& instance, uint32_t idx);
+ void unlinkDependentWasmImports(FreeOp* fop);
+ void clearDependentWasmImports();
+
+ // Toggle debug traps (used for breakpoints and step mode) in the script.
+ // If |pc| is nullptr, toggle traps for all ops in the script. Else, only
+ // toggle traps at |pc|.
+ void toggleDebugTraps(JSScript* script, jsbytecode* pc);
+
+ void toggleProfilerInstrumentation(bool enable);
+ bool isProfilerInstrumentationOn() const {
+ return flags_ & PROFILER_INSTRUMENTATION_ON;
+ }
+
+#ifdef JS_TRACE_LOGGING
+ void initTraceLogger(JSRuntime* runtime, JSScript* script, const Vector<CodeOffset>& offsets);
+ void toggleTraceLoggerScripts(JSRuntime* runtime, JSScript* script, bool enable);
+ void toggleTraceLoggerEngine(bool enable);
+
+ static size_t offsetOfTraceLoggerScriptEvent() {
+ return offsetof(BaselineScript, traceLoggerScriptEvent_);
+ }
+
+ uint32_t* traceLoggerToggleOffsets() {
+ MOZ_ASSERT(traceLoggerToggleOffsetsOffset_);
+ return reinterpret_cast<uint32_t*>(reinterpret_cast<uint8_t*>(this) +
+ traceLoggerToggleOffsetsOffset_);
+ }
+#endif
+
+ void noteAccessedGetter(uint32_t pcOffset);
+ void noteArrayWriteHole(uint32_t pcOffset);
+
+ static size_t offsetOfFlags() {
+ return offsetof(BaselineScript, flags_);
+ }
+ static size_t offsetOfYieldEntriesOffset() {
+ return offsetof(BaselineScript, yieldEntriesOffset_);
+ }
+
+ static void writeBarrierPre(Zone* zone, BaselineScript* script);
+
+ uint32_t* bytecodeTypeMap() {
+ MOZ_ASSERT(bytecodeTypeMapOffset_);
+ return reinterpret_cast<uint32_t*>(reinterpret_cast<uint8_t*>(this) + bytecodeTypeMapOffset_);
+ }
+
+ uint8_t maxInliningDepth() const {
+ return maxInliningDepth_;
+ }
+ void setMaxInliningDepth(uint32_t depth) {
+ MOZ_ASSERT(depth <= UINT8_MAX);
+ maxInliningDepth_ = depth;
+ }
+ void resetMaxInliningDepth() {
+ maxInliningDepth_ = UINT8_MAX;
+ }
+
+ uint16_t inlinedBytecodeLength() const {
+ return inlinedBytecodeLength_;
+ }
+ void setInlinedBytecodeLength(uint32_t len) {
+ if (len > UINT16_MAX)
+ len = UINT16_MAX;
+ inlinedBytecodeLength_ = len;
+ }
+
+ bool hasPendingIonBuilder() const {
+ return !!pendingBuilder_;
+ }
+
+ js::jit::IonBuilder* pendingIonBuilder() {
+ MOZ_ASSERT(hasPendingIonBuilder());
+ return pendingBuilder_;
+ }
+ void setPendingIonBuilder(JSRuntime* maybeRuntime, JSScript* script, js::jit::IonBuilder* builder) {
+ MOZ_ASSERT(script->baselineScript() == this);
+ MOZ_ASSERT(!builder || !hasPendingIonBuilder());
+
+ if (script->isIonCompilingOffThread())
+ script->setIonScript(maybeRuntime, ION_PENDING_SCRIPT);
+
+ pendingBuilder_ = builder;
+
+ // lazy linking cannot happen during asmjs to ion.
+ clearDependentWasmImports();
+
+ script->updateBaselineOrIonRaw(maybeRuntime);
+ }
+ void removePendingIonBuilder(JSScript* script) {
+ setPendingIonBuilder(nullptr, script, nullptr);
+ if (script->maybeIonScript() == ION_PENDING_SCRIPT)
+ script->setIonScript(nullptr, nullptr);
+ }
+
+};
+static_assert(sizeof(BaselineScript) % sizeof(uintptr_t) == 0,
+ "The data attached to the script must be aligned for fast JIT access.");
+
+inline bool
+IsBaselineEnabled(JSContext* cx)
+{
+#ifdef JS_CODEGEN_NONE
+ return false;
+#else
+ return cx->options().baseline();
+#endif
+}
+
+MethodStatus
+CanEnterBaselineMethod(JSContext* cx, RunState& state);
+
+MethodStatus
+CanEnterBaselineAtBranch(JSContext* cx, InterpreterFrame* fp, bool newType);
+
+JitExecStatus
+EnterBaselineMethod(JSContext* cx, RunState& state);
+
+JitExecStatus
+EnterBaselineAtBranch(JSContext* cx, InterpreterFrame* fp, jsbytecode* pc);
+
+void
+FinishDiscardBaselineScript(FreeOp* fop, JSScript* script);
+
+void
+AddSizeOfBaselineData(JSScript* script, mozilla::MallocSizeOf mallocSizeOf, size_t* data,
+ size_t* fallbackStubs);
+
+void
+ToggleBaselineProfiling(JSRuntime* runtime, bool enable);
+
+void
+ToggleBaselineTraceLoggerScripts(JSRuntime* runtime, bool enable);
+void
+ToggleBaselineTraceLoggerEngine(JSRuntime* runtime, bool enable);
+
+struct BaselineBailoutInfo
+{
+ // Pointer into the current C stack, where overwriting will start.
+ uint8_t* incomingStack;
+
+ // The top and bottom heapspace addresses of the reconstructed stack
+ // which will be copied to the bottom.
+ uint8_t* copyStackTop;
+ uint8_t* copyStackBottom;
+
+ // Fields to store the top-of-stack baseline values that are held
+ // in registers. The setR0 and setR1 fields are flags indicating
+ // whether each one is initialized.
+ uint32_t setR0;
+ Value valueR0;
+ uint32_t setR1;
+ Value valueR1;
+
+ // The value of the frame pointer register on resume.
+ void* resumeFramePtr;
+
+ // The native code address to resume into.
+ void* resumeAddr;
+
+ // The bytecode pc where we will resume.
+ jsbytecode* resumePC;
+
+ // If resuming into a TypeMonitor IC chain, this field holds the
+ // address of the first stub in that chain. If this field is
+ // set, then the actual jitcode resumed into is the jitcode for
+ // the first stub, not the resumeAddr above. The resumeAddr
+ // above, in this case, is pushed onto the stack so that the
+ // TypeMonitor chain can tail-return into the main jitcode when done.
+ ICStub* monitorStub;
+
+ // Number of baseline frames to push on the stack.
+ uint32_t numFrames;
+
+ // If Ion bailed out on a global script before it could perform the global
+ // declaration conflicts check. In such cases the baseline script is
+ // resumed at the first pc instead of the prologue, so an extra flag is
+ // needed to perform the check.
+ bool checkGlobalDeclarationConflicts;
+
+ // The bailout kind.
+ BailoutKind bailoutKind;
+};
+
+uint32_t
+BailoutIonToBaseline(JSContext* cx, JitActivation* activation, JitFrameIterator& iter,
+ bool invalidate, BaselineBailoutInfo** bailoutInfo,
+ const ExceptionBailoutInfo* exceptionInfo);
+
+// Mark baseline scripts on the stack as active, so that they are not discarded
+// during GC.
+void
+MarkActiveBaselineScripts(Zone* zone);
+
+MethodStatus
+BaselineCompile(JSContext* cx, JSScript* script, bool forceDebugInstrumentation = false);
+
+} // namespace jit
+} // namespace js
+
+namespace JS {
+
+template <>
+struct DeletePolicy<js::jit::BaselineScript>
+{
+ explicit DeletePolicy(JSRuntime* rt) : rt_(rt) {}
+ void operator()(const js::jit::BaselineScript* script);
+
+ private:
+ JSRuntime* rt_;
+};
+
+} // namespace JS
+
+#endif /* jit_BaselineJIT_h */
diff --git a/js/src/jit/BitSet.cpp b/js/src/jit/BitSet.cpp
new file mode 100644
index 000000000..6171a1b17
--- /dev/null
+++ b/js/src/jit/BitSet.cpp
@@ -0,0 +1,115 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BitSet.h"
+
+using namespace js;
+using namespace js::jit;
+
+bool
+BitSet::init(TempAllocator& alloc)
+{
+ size_t sizeRequired = numWords() * sizeof(*bits_);
+
+ bits_ = (uint32_t*)alloc.allocate(sizeRequired);
+ if (!bits_)
+ return false;
+
+ memset(bits_, 0, sizeRequired);
+
+ return true;
+}
+
+bool
+BitSet::empty() const
+{
+ MOZ_ASSERT(bits_);
+ const uint32_t* bits = bits_;
+ for (unsigned int i = 0, e = numWords(); i < e; i++) {
+ if (bits[i])
+ return false;
+ }
+ return true;
+}
+
+void
+BitSet::insertAll(const BitSet& other)
+{
+ MOZ_ASSERT(bits_);
+ MOZ_ASSERT(other.numBits_ == numBits_);
+ MOZ_ASSERT(other.bits_);
+
+ uint32_t* bits = bits_;
+ const uint32_t* otherBits = other.bits_;
+ for (unsigned int i = 0, e = numWords(); i < e; i++)
+ bits[i] |= otherBits[i];
+}
+
+void
+BitSet::removeAll(const BitSet& other)
+{
+ MOZ_ASSERT(bits_);
+ MOZ_ASSERT(other.numBits_ == numBits_);
+ MOZ_ASSERT(other.bits_);
+
+ uint32_t* bits = bits_;
+ const uint32_t* otherBits = other.bits_;
+ for (unsigned int i = 0, e = numWords(); i < e; i++)
+ bits[i] &= ~otherBits[i];
+}
+
+void
+BitSet::intersect(const BitSet& other)
+{
+ MOZ_ASSERT(bits_);
+ MOZ_ASSERT(other.numBits_ == numBits_);
+ MOZ_ASSERT(other.bits_);
+
+ uint32_t* bits = bits_;
+ const uint32_t* otherBits = other.bits_;
+ for (unsigned int i = 0, e = numWords(); i < e; i++)
+ bits[i] &= otherBits[i];
+}
+
+// returns true if the intersection caused the contents of the set to change.
+bool
+BitSet::fixedPointIntersect(const BitSet& other)
+{
+ MOZ_ASSERT(bits_);
+ MOZ_ASSERT(other.numBits_ == numBits_);
+ MOZ_ASSERT(other.bits_);
+
+ bool changed = false;
+
+ uint32_t* bits = bits_;
+ const uint32_t* otherBits = other.bits_;
+ for (unsigned int i = 0, e = numWords(); i < e; i++) {
+ uint32_t old = bits[i];
+ bits[i] &= otherBits[i];
+
+ if (!changed && old != bits[i])
+ changed = true;
+ }
+ return changed;
+}
+
+void
+BitSet::complement()
+{
+ MOZ_ASSERT(bits_);
+ uint32_t* bits = bits_;
+ for (unsigned int i = 0, e = numWords(); i < e; i++)
+ bits[i] = ~bits[i];
+}
+
+void
+BitSet::clear()
+{
+ MOZ_ASSERT(bits_);
+ uint32_t* bits = bits_;
+ for (unsigned int i = 0, e = numWords(); i < e; i++)
+ bits[i] = 0;
+}
diff --git a/js/src/jit/BitSet.h b/js/src/jit/BitSet.h
new file mode 100644
index 000000000..99541edb8
--- /dev/null
+++ b/js/src/jit/BitSet.h
@@ -0,0 +1,182 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BitSet_h
+#define jit_BitSet_h
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/JitAllocPolicy.h"
+
+namespace js {
+namespace jit {
+
+// Provides constant time set insertion and removal, and fast linear
+// set operations such as intersection, difference, and union.
+// N.B. All set operations must be performed on sets with the same number
+// of bits.
+class BitSet
+{
+ public:
+ static const size_t BitsPerWord = 8 * sizeof(uint32_t);
+
+ static size_t RawLengthForBits(size_t bits) {
+ return (bits + BitsPerWord - 1) / BitsPerWord;
+ }
+
+ private:
+ uint32_t* bits_;
+ const unsigned int numBits_;
+
+ static inline uint32_t bitForValue(unsigned int value) {
+ return 1l << uint32_t(value % BitsPerWord);
+ }
+
+ static inline unsigned int wordForValue(unsigned int value) {
+ return value / BitsPerWord;
+ }
+
+ inline unsigned int numWords() const {
+ return RawLengthForBits(numBits_);
+ }
+
+ BitSet(const BitSet&) = delete;
+ void operator=(const BitSet&) = delete;
+
+ public:
+ class Iterator;
+
+ explicit BitSet(unsigned int numBits) :
+ bits_(nullptr),
+ numBits_(numBits) {}
+
+ MOZ_MUST_USE bool init(TempAllocator& alloc);
+
+ unsigned int getNumBits() const {
+ return numBits_;
+ }
+
+ // O(1): Check if this set contains the given value.
+ bool contains(unsigned int value) const {
+ MOZ_ASSERT(bits_);
+ MOZ_ASSERT(value < numBits_);
+
+ return !!(bits_[wordForValue(value)] & bitForValue(value));
+ }
+
+ // O(numBits): Check if this set contains any value.
+ bool empty() const;
+
+ // O(1): Insert the given value into this set.
+ void insert(unsigned int value) {
+ MOZ_ASSERT(bits_);
+ MOZ_ASSERT(value < numBits_);
+
+ bits_[wordForValue(value)] |= bitForValue(value);
+ }
+
+ // O(numBits): Insert every element of the given set into this set.
+ void insertAll(const BitSet& other);
+
+ // O(1): Remove the given value from this set.
+ void remove(unsigned int value) {
+ MOZ_ASSERT(bits_);
+ MOZ_ASSERT(value < numBits_);
+
+ bits_[wordForValue(value)] &= ~bitForValue(value);
+ }
+
+ // O(numBits): Remove the every element of the given set from this set.
+ void removeAll(const BitSet& other);
+
+ // O(numBits): Intersect this set with the given set.
+ void intersect(const BitSet& other);
+
+ // O(numBits): Intersect this set with the given set; return whether the
+ // intersection caused the set to change.
+ bool fixedPointIntersect(const BitSet& other);
+
+ // O(numBits): Does inplace complement of the set.
+ void complement();
+
+ // O(numBits): Clear this set.
+ void clear();
+
+ uint32_t* raw() const {
+ return bits_;
+ }
+ size_t rawLength() const {
+ return numWords();
+ }
+};
+
+class BitSet::Iterator
+{
+ private:
+ BitSet& set_;
+ unsigned index_;
+ unsigned word_;
+ uint32_t value_;
+
+ void skipEmpty() {
+ // Skip words containing only zeros.
+ unsigned numWords = set_.numWords();
+ const uint32_t* bits = set_.bits_;
+ while (value_ == 0) {
+ word_++;
+ if (word_ == numWords)
+ return;
+
+ index_ = word_ * BitSet::BitsPerWord;
+ value_ = bits[word_];
+ }
+
+ // Be careful: the result of CountTrailingZeroes32 is undefined if the
+ // input is 0.
+ int numZeros = mozilla::CountTrailingZeroes32(value_);
+ index_ += numZeros;
+ value_ >>= numZeros;
+
+ MOZ_ASSERT_IF(index_ < set_.numBits_, set_.contains(index_));
+ }
+
+ public:
+ explicit Iterator(BitSet& set) :
+ set_(set),
+ index_(0),
+ word_(0),
+ value_(set.bits_[0])
+ {
+ skipEmpty();
+ }
+
+ inline bool more() const {
+ return word_ < set_.numWords();
+ }
+ explicit operator bool() const {
+ return more();
+ }
+
+ inline void operator++() {
+ MOZ_ASSERT(more());
+ MOZ_ASSERT(index_ < set_.numBits_);
+
+ index_++;
+ value_ >>= 1;
+
+ skipEmpty();
+ }
+
+ unsigned int operator*() {
+ MOZ_ASSERT(index_ < set_.numBits_);
+ return index_;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BitSet_h */
diff --git a/js/src/jit/BytecodeAnalysis.cpp b/js/src/jit/BytecodeAnalysis.cpp
new file mode 100644
index 000000000..f15420a7d
--- /dev/null
+++ b/js/src/jit/BytecodeAnalysis.cpp
@@ -0,0 +1,227 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BytecodeAnalysis.h"
+
+#include "jsopcode.h"
+#include "jit/JitSpewer.h"
+#include "jsopcodeinlines.h"
+#include "jsscriptinlines.h"
+
+using namespace js;
+using namespace js::jit;
+
+BytecodeAnalysis::BytecodeAnalysis(TempAllocator& alloc, JSScript* script)
+ : script_(script),
+ infos_(alloc),
+ usesEnvironmentChain_(false),
+ hasTryFinally_(false),
+ hasSetArg_(false)
+{
+}
+
+// Bytecode range containing only catch or finally code.
+struct CatchFinallyRange
+{
+ uint32_t start; // Inclusive.
+ uint32_t end; // Exclusive.
+
+ CatchFinallyRange(uint32_t start, uint32_t end)
+ : start(start), end(end)
+ {
+ MOZ_ASSERT(end > start);
+ }
+
+ bool contains(uint32_t offset) const {
+ return start <= offset && offset < end;
+ }
+};
+
+bool
+BytecodeAnalysis::init(TempAllocator& alloc, GSNCache& gsn)
+{
+ if (!infos_.growByUninitialized(script_->length()))
+ return false;
+
+ // Initialize the env chain slot if either the function needs some
+ // EnvironmentObject (like a CallObject) or the script uses the env
+ // chain. The latter case is handled below.
+ usesEnvironmentChain_ = script_->module() || script_->initialEnvironmentShape() ||
+ (script_->functionDelazifying() &&
+ script_->functionDelazifying()->needsSomeEnvironmentObject());
+
+ jsbytecode* end = script_->codeEnd();
+
+ // Clear all BytecodeInfo.
+ mozilla::PodZero(infos_.begin(), infos_.length());
+ infos_[0].init(/*stackDepth=*/0);
+
+ Vector<CatchFinallyRange, 0, JitAllocPolicy> catchFinallyRanges(alloc);
+
+ jsbytecode* nextpc;
+ for (jsbytecode* pc = script_->code(); pc < end; pc = nextpc) {
+ JSOp op = JSOp(*pc);
+ nextpc = pc + GetBytecodeLength(pc);
+ unsigned offset = script_->pcToOffset(pc);
+
+ JitSpew(JitSpew_BaselineOp, "Analyzing op @ %d (end=%d): %s",
+ int(script_->pcToOffset(pc)), int(script_->length()), CodeName[op]);
+
+ // If this bytecode info has not yet been initialized, it's not reachable.
+ if (!infos_[offset].initialized)
+ continue;
+
+ unsigned stackDepth = infos_[offset].stackDepth;
+#ifdef DEBUG
+ for (jsbytecode* chkpc = pc + 1; chkpc < (pc + GetBytecodeLength(pc)); chkpc++)
+ MOZ_ASSERT(!infos_[script_->pcToOffset(chkpc)].initialized);
+#endif
+
+ unsigned nuses = GetUseCount(script_, offset);
+ unsigned ndefs = GetDefCount(script_, offset);
+
+ MOZ_ASSERT(stackDepth >= nuses);
+ stackDepth -= nuses;
+ stackDepth += ndefs;
+
+ // If stack depth exceeds max allowed by analysis, fail fast.
+ MOZ_ASSERT(stackDepth <= BytecodeInfo::MAX_STACK_DEPTH);
+
+ switch (op) {
+ case JSOP_TABLESWITCH: {
+ unsigned defaultOffset = offset + GET_JUMP_OFFSET(pc);
+ jsbytecode* pc2 = pc + JUMP_OFFSET_LEN;
+ int32_t low = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ int32_t high = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+
+ infos_[defaultOffset].init(stackDepth);
+ infos_[defaultOffset].jumpTarget = true;
+
+ for (int32_t i = low; i <= high; i++) {
+ unsigned targetOffset = offset + GET_JUMP_OFFSET(pc2);
+ if (targetOffset != offset) {
+ infos_[targetOffset].init(stackDepth);
+ infos_[targetOffset].jumpTarget = true;
+ }
+ pc2 += JUMP_OFFSET_LEN;
+ }
+ break;
+ }
+
+ case JSOP_TRY: {
+ JSTryNote* tn = script_->trynotes()->vector;
+ JSTryNote* tnlimit = tn + script_->trynotes()->length;
+ for (; tn < tnlimit; tn++) {
+ unsigned startOffset = script_->mainOffset() + tn->start;
+ if (startOffset == offset + 1) {
+ unsigned catchOffset = startOffset + tn->length;
+
+ if (tn->kind != JSTRY_FOR_IN) {
+ infos_[catchOffset].init(stackDepth);
+ infos_[catchOffset].jumpTarget = true;
+ }
+ }
+ }
+
+ // Get the pc of the last instruction in the try block. It's a JSOP_GOTO to
+ // jump over the catch/finally blocks.
+ jssrcnote* sn = GetSrcNote(gsn, script_, pc);
+ MOZ_ASSERT(SN_TYPE(sn) == SRC_TRY);
+
+ jsbytecode* endOfTry = pc + GetSrcNoteOffset(sn, 0);
+ MOZ_ASSERT(JSOp(*endOfTry) == JSOP_GOTO);
+
+ jsbytecode* afterTry = endOfTry + GET_JUMP_OFFSET(endOfTry);
+ MOZ_ASSERT(afterTry > endOfTry);
+
+ // Pop CatchFinallyRanges that are no longer needed.
+ while (!catchFinallyRanges.empty() && catchFinallyRanges.back().end <= offset)
+ catchFinallyRanges.popBack();
+
+ CatchFinallyRange range(script_->pcToOffset(endOfTry), script_->pcToOffset(afterTry));
+ if (!catchFinallyRanges.append(range))
+ return false;
+ break;
+ }
+
+ case JSOP_LOOPENTRY:
+ for (size_t i = 0; i < catchFinallyRanges.length(); i++) {
+ if (catchFinallyRanges[i].contains(offset))
+ infos_[offset].loopEntryInCatchOrFinally = true;
+ }
+ break;
+
+ case JSOP_GETNAME:
+ case JSOP_BINDNAME:
+ case JSOP_BINDVAR:
+ case JSOP_SETNAME:
+ case JSOP_STRICTSETNAME:
+ case JSOP_DELNAME:
+ case JSOP_GETALIASEDVAR:
+ case JSOP_SETALIASEDVAR:
+ case JSOP_LAMBDA:
+ case JSOP_LAMBDA_ARROW:
+ case JSOP_DEFFUN:
+ case JSOP_DEFVAR:
+ usesEnvironmentChain_ = true;
+ break;
+
+ case JSOP_GETGNAME:
+ case JSOP_SETGNAME:
+ case JSOP_STRICTSETGNAME:
+ if (script_->hasNonSyntacticScope())
+ usesEnvironmentChain_ = true;
+ break;
+
+ case JSOP_FINALLY:
+ hasTryFinally_ = true;
+ break;
+
+ case JSOP_SETARG:
+ hasSetArg_ = true;
+ break;
+
+ default:
+ break;
+ }
+
+ bool jump = IsJumpOpcode(op);
+ if (jump) {
+ // Case instructions do not push the lvalue back when branching.
+ unsigned newStackDepth = stackDepth;
+ if (op == JSOP_CASE)
+ newStackDepth--;
+
+ unsigned targetOffset = offset + GET_JUMP_OFFSET(pc);
+
+ // If this is a a backedge to an un-analyzed segment, analyze from there.
+ bool jumpBack = (targetOffset < offset) && !infos_[targetOffset].initialized;
+
+ infos_[targetOffset].init(newStackDepth);
+ infos_[targetOffset].jumpTarget = true;
+
+ if (jumpBack)
+ nextpc = script_->offsetToPC(targetOffset);
+ }
+
+ // Handle any fallthrough from this opcode.
+ if (BytecodeFallsThrough(op)) {
+ jsbytecode* fallthrough = pc + GetBytecodeLength(pc);
+ MOZ_ASSERT(fallthrough < end);
+ unsigned fallthroughOffset = script_->pcToOffset(fallthrough);
+
+ infos_[fallthroughOffset].init(stackDepth);
+
+ // Treat the fallthrough of a branch instruction as a jump target.
+ if (jump)
+ infos_[fallthroughOffset].jumpTarget = true;
+ }
+ }
+
+ return true;
+}
diff --git a/js/src/jit/BytecodeAnalysis.h b/js/src/jit/BytecodeAnalysis.h
new file mode 100644
index 000000000..c040ca278
--- /dev/null
+++ b/js/src/jit/BytecodeAnalysis.h
@@ -0,0 +1,78 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BytecodeAnalysis_h
+#define jit_BytecodeAnalysis_h
+
+#include "jsscript.h"
+#include "jit/JitAllocPolicy.h"
+#include "js/Vector.h"
+
+namespace js {
+namespace jit {
+
+// Basic information about bytecodes in the script. Used to help baseline compilation.
+struct BytecodeInfo
+{
+ static const uint16_t MAX_STACK_DEPTH = 0xffffU;
+ uint16_t stackDepth;
+ bool initialized : 1;
+ bool jumpTarget : 1;
+
+ // If true, this is a JSOP_LOOPENTRY op inside a catch or finally block.
+ bool loopEntryInCatchOrFinally : 1;
+
+ void init(unsigned depth) {
+ MOZ_ASSERT(depth <= MAX_STACK_DEPTH);
+ MOZ_ASSERT_IF(initialized, stackDepth == depth);
+ initialized = true;
+ stackDepth = depth;
+ }
+};
+
+class BytecodeAnalysis
+{
+ JSScript* script_;
+ Vector<BytecodeInfo, 0, JitAllocPolicy> infos_;
+
+ bool usesEnvironmentChain_;
+ bool hasTryFinally_;
+ bool hasSetArg_;
+
+ public:
+ explicit BytecodeAnalysis(TempAllocator& alloc, JSScript* script);
+
+ MOZ_MUST_USE bool init(TempAllocator& alloc, GSNCache& gsn);
+
+ BytecodeInfo& info(jsbytecode* pc) {
+ MOZ_ASSERT(infos_[script_->pcToOffset(pc)].initialized);
+ return infos_[script_->pcToOffset(pc)];
+ }
+
+ BytecodeInfo* maybeInfo(jsbytecode* pc) {
+ if (infos_[script_->pcToOffset(pc)].initialized)
+ return &infos_[script_->pcToOffset(pc)];
+ return nullptr;
+ }
+
+ bool usesEnvironmentChain() const {
+ return usesEnvironmentChain_;
+ }
+
+ bool hasTryFinally() const {
+ return hasTryFinally_;
+ }
+
+ bool hasSetArg() const {
+ return hasSetArg_;
+ }
+};
+
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BytecodeAnalysis_h */
diff --git a/js/src/jit/C1Spewer.cpp b/js/src/jit/C1Spewer.cpp
new file mode 100644
index 000000000..f334345ba
--- /dev/null
+++ b/js/src/jit/C1Spewer.cpp
@@ -0,0 +1,194 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifdef JS_JITSPEW
+
+#include "jit/C1Spewer.h"
+
+#include "mozilla/SizePrintfMacros.h"
+
+#include <time.h>
+
+#include "jit/BacktrackingAllocator.h"
+#include "jit/LIR.h"
+#include "jit/MIRGraph.h"
+
+#include "vm/Printer.h"
+
+using namespace js;
+using namespace js::jit;
+
+void
+C1Spewer::beginFunction(MIRGraph* graph, JSScript* script)
+{
+ this->graph = graph;
+
+ out_.printf("begin_compilation\n");
+ if (script) {
+ out_.printf(" name \"%s:%" PRIuSIZE "\"\n", script->filename(), script->lineno());
+ out_.printf(" method \"%s:%" PRIuSIZE "\"\n", script->filename(), script->lineno());
+ } else {
+ out_.printf(" name \"wasm compilation\"\n");
+ out_.printf(" method \"wasm compilation\"\n");
+ }
+ out_.printf(" date %d\n", (int)time(nullptr));
+ out_.printf("end_compilation\n");
+}
+
+void
+C1Spewer::spewPass(const char* pass)
+{
+ out_.printf("begin_cfg\n");
+ out_.printf(" name \"%s\"\n", pass);
+
+ for (MBasicBlockIterator block(graph->begin()); block != graph->end(); block++)
+ spewPass(out_, *block);
+
+ out_.printf("end_cfg\n");
+}
+
+void
+C1Spewer::spewRanges(const char* pass, BacktrackingAllocator* regalloc)
+{
+ out_.printf("begin_ranges\n");
+ out_.printf(" name \"%s\"\n", pass);
+
+ for (MBasicBlockIterator block(graph->begin()); block != graph->end(); block++)
+ spewRanges(out_, *block, regalloc);
+
+ out_.printf("end_ranges\n");
+}
+
+void
+C1Spewer::endFunction()
+{
+}
+
+static void
+DumpDefinition(GenericPrinter& out, MDefinition* def)
+{
+ out.printf(" ");
+ out.printf("%u %u ", def->id(), unsigned(def->useCount()));
+ def->printName(out);
+ out.printf(" ");
+ def->printOpcode(out);
+ out.printf(" <|@\n");
+}
+
+static void
+DumpLIR(GenericPrinter& out, LNode* ins)
+{
+ out.printf(" ");
+ out.printf("%d ", ins->id());
+ ins->dump(out);
+ out.printf(" <|@\n");
+}
+
+void
+C1Spewer::spewRanges(GenericPrinter& out, BacktrackingAllocator* regalloc, LNode* ins)
+{
+ for (size_t k = 0; k < ins->numDefs(); k++) {
+ uint32_t id = ins->getDef(k)->virtualRegister();
+ VirtualRegister* vreg = &regalloc->vregs[id];
+
+ for (LiveRange::RegisterLinkIterator iter = vreg->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ out.printf("%d object \"", id);
+ out.printf("%s", range->bundle()->allocation().toString().get());
+ out.printf("\" %d -1", id);
+ out.printf(" [%u, %u[", range->from().bits(), range->to().bits());
+ for (UsePositionIterator usePos(range->usesBegin()); usePos; usePos++)
+ out.printf(" %u M", usePos->pos.bits());
+ out.printf(" \"\"\n");
+ }
+ }
+}
+
+void
+C1Spewer::spewRanges(GenericPrinter& out, MBasicBlock* block, BacktrackingAllocator* regalloc)
+{
+ LBlock* lir = block->lir();
+ if (!lir)
+ return;
+
+ for (size_t i = 0; i < lir->numPhis(); i++)
+ spewRanges(out, regalloc, lir->getPhi(i));
+
+ for (LInstructionIterator ins = lir->begin(); ins != lir->end(); ins++)
+ spewRanges(out, regalloc, *ins);
+}
+
+void
+C1Spewer::spewPass(GenericPrinter& out, MBasicBlock* block)
+{
+ out.printf(" begin_block\n");
+ out.printf(" name \"B%d\"\n", block->id());
+ out.printf(" from_bci -1\n");
+ out.printf(" to_bci -1\n");
+
+ out.printf(" predecessors");
+ for (uint32_t i = 0; i < block->numPredecessors(); i++) {
+ MBasicBlock* pred = block->getPredecessor(i);
+ out.printf(" \"B%d\"", pred->id());
+ }
+ out.printf("\n");
+
+ out.printf(" successors");
+ for (uint32_t i = 0; i < block->numSuccessors(); i++) {
+ MBasicBlock* successor = block->getSuccessor(i);
+ out.printf(" \"B%d\"", successor->id());
+ }
+ out.printf("\n");
+
+ out.printf(" xhandlers\n");
+ out.printf(" flags\n");
+
+ if (block->lir() && block->lir()->begin() != block->lir()->end()) {
+ out.printf(" first_lir_id %d\n", block->lir()->firstId());
+ out.printf(" last_lir_id %d\n", block->lir()->lastId());
+ }
+
+ out.printf(" begin_states\n");
+
+ if (block->entryResumePoint()) {
+ out.printf(" begin_locals\n");
+ out.printf(" size %d\n", (int)block->numEntrySlots());
+ out.printf(" method \"None\"\n");
+ for (uint32_t i = 0; i < block->numEntrySlots(); i++) {
+ MDefinition* ins = block->getEntrySlot(i);
+ out.printf(" ");
+ out.printf("%d ", i);
+ if (ins->isUnused())
+ out.printf("unused");
+ else
+ ins->printName(out);
+ out.printf("\n");
+ }
+ out.printf(" end_locals\n");
+ }
+ out.printf(" end_states\n");
+
+ out.printf(" begin_HIR\n");
+ for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++)
+ DumpDefinition(out, *phi);
+ for (MInstructionIterator i(block->begin()); i != block->end(); i++)
+ DumpDefinition(out, *i);
+ out.printf(" end_HIR\n");
+
+ if (block->lir()) {
+ out.printf(" begin_LIR\n");
+ for (size_t i = 0; i < block->lir()->numPhis(); i++)
+ DumpLIR(out, block->lir()->getPhi(i));
+ for (LInstructionIterator i(block->lir()->begin()); i != block->lir()->end(); i++)
+ DumpLIR(out, *i);
+ out.printf(" end_LIR\n");
+ }
+
+ out.printf(" end_block\n");
+}
+
+#endif /* JS_JITSPEW */
+
diff --git a/js/src/jit/C1Spewer.h b/js/src/jit/C1Spewer.h
new file mode 100644
index 000000000..002125e1d
--- /dev/null
+++ b/js/src/jit/C1Spewer.h
@@ -0,0 +1,51 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_C1Spewer_h
+#define jit_C1Spewer_h
+
+#ifdef JS_JITSPEW
+
+#include "NamespaceImports.h"
+
+#include "js/RootingAPI.h"
+#include "vm/Printer.h"
+
+namespace js {
+namespace jit {
+
+class BacktrackingAllocator;
+class MBasicBlock;
+class MIRGraph;
+class LNode;
+
+class C1Spewer
+{
+ MIRGraph* graph;
+ GenericPrinter& out_;
+
+ public:
+ explicit C1Spewer(GenericPrinter& out)
+ : graph(nullptr), out_(out)
+ { }
+
+ void beginFunction(MIRGraph* graph, JSScript* script);
+ void spewPass(const char* pass);
+ void spewRanges(const char* pass, BacktrackingAllocator* regalloc);
+ void endFunction();
+
+ private:
+ void spewPass(GenericPrinter& out, MBasicBlock* block);
+ void spewRanges(GenericPrinter& out, BacktrackingAllocator* regalloc, LNode* ins);
+ void spewRanges(GenericPrinter& out, MBasicBlock* block, BacktrackingAllocator* regalloc);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* JS_JITSPEW */
+
+#endif /* jit_C1Spewer_h */
diff --git a/js/src/jit/CacheIR.cpp b/js/src/jit/CacheIR.cpp
new file mode 100644
index 000000000..f1061af70
--- /dev/null
+++ b/js/src/jit/CacheIR.cpp
@@ -0,0 +1,473 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/CacheIR.h"
+
+#include "jit/BaselineIC.h"
+#include "jit/IonCaches.h"
+
+#include "jsobjinlines.h"
+
+#include "vm/UnboxedObject-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::Maybe;
+
+GetPropIRGenerator::GetPropIRGenerator(JSContext* cx, jsbytecode* pc, HandleValue val, HandlePropertyName name,
+ MutableHandleValue res)
+ : cx_(cx),
+ pc_(pc),
+ val_(val),
+ name_(name),
+ res_(res),
+ emitted_(false),
+ preliminaryObjectAction_(PreliminaryObjectAction::None)
+{}
+
+static void
+EmitLoadSlotResult(CacheIRWriter& writer, ObjOperandId holderOp, NativeObject* holder,
+ Shape* shape)
+{
+ if (holder->isFixedSlot(shape->slot())) {
+ writer.loadFixedSlotResult(holderOp, NativeObject::getFixedSlotOffset(shape->slot()));
+ } else {
+ size_t dynamicSlotOffset = holder->dynamicSlotIndex(shape->slot()) * sizeof(Value);
+ writer.loadDynamicSlotResult(holderOp, dynamicSlotOffset);
+ }
+}
+
+bool
+GetPropIRGenerator::tryAttachStub(Maybe<CacheIRWriter>& writer)
+{
+ AutoAssertNoPendingException aanpe(cx_);
+ JS::AutoCheckCannotGC nogc;
+
+ MOZ_ASSERT(!emitted_);
+
+ writer.emplace();
+ ValOperandId valId(writer->setInputOperandId(0));
+
+ if (val_.isObject()) {
+ RootedObject obj(cx_, &val_.toObject());
+ ObjOperandId objId = writer->guardIsObject(valId);
+
+ if (!emitted_ && !tryAttachObjectLength(*writer, obj, objId))
+ return false;
+ if (!emitted_ && !tryAttachNative(*writer, obj, objId))
+ return false;
+ if (!emitted_ && !tryAttachUnboxed(*writer, obj, objId))
+ return false;
+ if (!emitted_ && !tryAttachUnboxedExpando(*writer, obj, objId))
+ return false;
+ if (!emitted_ && !tryAttachTypedObject(*writer, obj, objId))
+ return false;
+ if (!emitted_ && !tryAttachModuleNamespace(*writer, obj, objId))
+ return false;
+ return true;
+ }
+
+ if (!emitted_ && !tryAttachPrimitive(*writer, valId))
+ return false;
+
+ return true;
+}
+
+static bool
+IsCacheableNoProperty(JSContext* cx, JSObject* obj, JSObject* holder, Shape* shape, jsid id,
+ jsbytecode* pc)
+{
+ if (shape)
+ return false;
+
+ MOZ_ASSERT(!holder);
+
+ // If we're doing a name lookup, we have to throw a ReferenceError.
+ if (*pc == JSOP_GETXPROP)
+ return false;
+
+ return CheckHasNoSuchProperty(cx, obj, JSID_TO_ATOM(id)->asPropertyName());
+}
+
+enum NativeGetPropCacheability {
+ CanAttachNone,
+ CanAttachReadSlot,
+};
+
+static NativeGetPropCacheability
+CanAttachNativeGetProp(JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandleNativeObject holder, MutableHandleShape shape,
+ jsbytecode* pc, bool skipArrayLen = false)
+{
+ MOZ_ASSERT(JSID_IS_STRING(id) || JSID_IS_SYMBOL(id));
+
+ // The lookup needs to be universally pure, otherwise we risk calling hooks out
+ // of turn. We don't mind doing this even when purity isn't required, because we
+ // only miss out on shape hashification, which is only a temporary perf cost.
+ // The limits were arbitrarily set, anyways.
+ JSObject* baseHolder = nullptr;
+ if (!LookupPropertyPure(cx, obj, id, &baseHolder, shape.address()))
+ return CanAttachNone;
+
+ MOZ_ASSERT(!holder);
+ if (baseHolder) {
+ if (!baseHolder->isNative())
+ return CanAttachNone;
+ holder.set(&baseHolder->as<NativeObject>());
+ }
+
+ if (IsCacheableGetPropReadSlotForIonOrCacheIR(obj, holder, shape) ||
+ IsCacheableNoProperty(cx, obj, holder, shape, id, pc))
+ {
+ return CanAttachReadSlot;
+ }
+
+ return CanAttachNone;
+}
+
+static void
+GeneratePrototypeGuards(CacheIRWriter& writer, JSObject* obj, JSObject* holder, ObjOperandId objId)
+{
+ // The guards here protect against the effects of JSObject::swap(). If the
+ // prototype chain is directly altered, then TI will toss the jitcode, so we
+ // don't have to worry about it, and any other change to the holder, or
+ // adding a shadowing property will result in reshaping the holder, and thus
+ // the failure of the shape guard.
+ MOZ_ASSERT(obj != holder);
+
+ if (obj->hasUncacheableProto()) {
+ // If the shape does not imply the proto, emit an explicit proto guard.
+ writer.guardProto(objId, obj->staticPrototype());
+ }
+
+ JSObject* pobj = obj->staticPrototype();
+ if (!pobj)
+ return;
+
+ while (pobj != holder) {
+ if (pobj->hasUncacheableProto()) {
+ ObjOperandId protoId = writer.loadObject(pobj);
+ if (pobj->isSingleton()) {
+ // Singletons can have their group's |proto| mutated directly.
+ writer.guardProto(protoId, pobj->staticPrototype());
+ } else {
+ writer.guardGroup(protoId, pobj->group());
+ }
+ }
+ pobj = pobj->staticPrototype();
+ }
+}
+
+static void
+TestMatchingReceiver(CacheIRWriter& writer, JSObject* obj, Shape* shape, ObjOperandId objId,
+ Maybe<ObjOperandId>* expandoId)
+{
+ if (obj->is<UnboxedPlainObject>()) {
+ writer.guardGroup(objId, obj->group());
+
+ if (UnboxedExpandoObject* expando = obj->as<UnboxedPlainObject>().maybeExpando()) {
+ expandoId->emplace(writer.guardAndLoadUnboxedExpando(objId));
+ writer.guardShape(expandoId->ref(), expando->lastProperty());
+ } else {
+ writer.guardNoUnboxedExpando(objId);
+ }
+ } else if (obj->is<UnboxedArrayObject>() || obj->is<TypedObject>()) {
+ writer.guardGroup(objId, obj->group());
+ } else {
+ Shape* shape = obj->maybeShape();
+ MOZ_ASSERT(shape);
+ writer.guardShape(objId, shape);
+ }
+}
+
+static void
+EmitReadSlotResult(CacheIRWriter& writer, JSObject* obj, JSObject* holder,
+ Shape* shape, ObjOperandId objId)
+{
+ Maybe<ObjOperandId> expandoId;
+ TestMatchingReceiver(writer, obj, shape, objId, &expandoId);
+
+ ObjOperandId holderId;
+ if (obj != holder) {
+ GeneratePrototypeGuards(writer, obj, holder, objId);
+
+ if (holder) {
+ // Guard on the holder's shape.
+ holderId = writer.loadObject(holder);
+ writer.guardShape(holderId, holder->as<NativeObject>().lastProperty());
+ } else {
+ // The property does not exist. Guard on everything in the prototype
+ // chain. This is guaranteed to see only Native objects because of
+ // CanAttachNativeGetProp().
+ JSObject* proto = obj->taggedProto().toObjectOrNull();
+ ObjOperandId lastObjId = objId;
+ while (proto) {
+ ObjOperandId protoId = writer.loadProto(lastObjId);
+ writer.guardShape(protoId, proto->as<NativeObject>().lastProperty());
+ proto = proto->staticPrototype();
+ lastObjId = protoId;
+ }
+ }
+ } else if (obj->is<UnboxedPlainObject>()) {
+ holder = obj->as<UnboxedPlainObject>().maybeExpando();
+ holderId = *expandoId;
+ } else {
+ holderId = objId;
+ }
+
+ // Slot access.
+ if (holder) {
+ MOZ_ASSERT(holderId.valid());
+ EmitLoadSlotResult(writer, holderId, &holder->as<NativeObject>(), shape);
+ } else {
+ MOZ_ASSERT(!holderId.valid());
+ writer.loadUndefinedResult();
+ }
+}
+
+bool
+GetPropIRGenerator::tryAttachNative(CacheIRWriter& writer, HandleObject obj, ObjOperandId objId)
+{
+ MOZ_ASSERT(!emitted_);
+
+ RootedShape shape(cx_);
+ RootedNativeObject holder(cx_);
+
+ RootedId id(cx_, NameToId(name_));
+ NativeGetPropCacheability type = CanAttachNativeGetProp(cx_, obj, id, &holder, &shape, pc_);
+ if (type == CanAttachNone)
+ return true;
+
+ emitted_ = true;
+
+ switch (type) {
+ case CanAttachReadSlot:
+ if (holder) {
+ EnsureTrackPropertyTypes(cx_, holder, NameToId(name_));
+ if (obj == holder) {
+ // See the comment in StripPreliminaryObjectStubs.
+ if (IsPreliminaryObject(obj))
+ preliminaryObjectAction_ = PreliminaryObjectAction::NotePreliminary;
+ else
+ preliminaryObjectAction_ = PreliminaryObjectAction::Unlink;
+ }
+ }
+ EmitReadSlotResult(writer, obj, holder, shape, objId);
+ break;
+ default:
+ MOZ_CRASH("Bad NativeGetPropCacheability");
+ }
+
+ return true;
+}
+
+bool
+GetPropIRGenerator::tryAttachUnboxed(CacheIRWriter& writer, HandleObject obj, ObjOperandId objId)
+{
+ MOZ_ASSERT(!emitted_);
+
+ if (!obj->is<UnboxedPlainObject>())
+ return true;
+
+ const UnboxedLayout::Property* property = obj->as<UnboxedPlainObject>().layout().lookup(name_);
+ if (!property)
+ return true;
+
+ if (!cx_->runtime()->jitSupportsFloatingPoint)
+ return true;
+
+ writer.guardGroup(objId, obj->group());
+ writer.loadUnboxedPropertyResult(objId, property->type,
+ UnboxedPlainObject::offsetOfData() + property->offset);
+ emitted_ = true;
+ preliminaryObjectAction_ = PreliminaryObjectAction::Unlink;
+ return true;
+}
+
+bool
+GetPropIRGenerator::tryAttachUnboxedExpando(CacheIRWriter& writer, HandleObject obj, ObjOperandId objId)
+{
+ MOZ_ASSERT(!emitted_);
+
+ if (!obj->is<UnboxedPlainObject>())
+ return true;
+
+ UnboxedExpandoObject* expando = obj->as<UnboxedPlainObject>().maybeExpando();
+ if (!expando)
+ return true;
+
+ Shape* shape = expando->lookup(cx_, NameToId(name_));
+ if (!shape || !shape->hasDefaultGetter() || !shape->hasSlot())
+ return true;
+
+ emitted_ = true;
+
+ EmitReadSlotResult(writer, obj, obj, shape, objId);
+ return true;
+}
+
+bool
+GetPropIRGenerator::tryAttachTypedObject(CacheIRWriter& writer, HandleObject obj, ObjOperandId objId)
+{
+ MOZ_ASSERT(!emitted_);
+
+ if (!obj->is<TypedObject>() ||
+ !cx_->runtime()->jitSupportsFloatingPoint ||
+ cx_->compartment()->detachedTypedObjects)
+ {
+ return true;
+ }
+
+ TypedObject* typedObj = &obj->as<TypedObject>();
+ if (!typedObj->typeDescr().is<StructTypeDescr>())
+ return true;
+
+ StructTypeDescr* structDescr = &typedObj->typeDescr().as<StructTypeDescr>();
+ size_t fieldIndex;
+ if (!structDescr->fieldIndex(NameToId(name_), &fieldIndex))
+ return true;
+
+ TypeDescr* fieldDescr = &structDescr->fieldDescr(fieldIndex);
+ if (!fieldDescr->is<SimpleTypeDescr>())
+ return true;
+
+ Shape* shape = typedObj->maybeShape();
+ TypedThingLayout layout = GetTypedThingLayout(shape->getObjectClass());
+
+ uint32_t fieldOffset = structDescr->fieldOffset(fieldIndex);
+ uint32_t typeDescr = SimpleTypeDescrKey(&fieldDescr->as<SimpleTypeDescr>());
+
+ writer.guardNoDetachedTypedObjects();
+ writer.guardShape(objId, shape);
+ writer.loadTypedObjectResult(objId, fieldOffset, layout, typeDescr);
+ emitted_ = true;
+ return true;
+}
+
+bool
+GetPropIRGenerator::tryAttachObjectLength(CacheIRWriter& writer, HandleObject obj, ObjOperandId objId)
+{
+ MOZ_ASSERT(!emitted_);
+
+ if (name_ != cx_->names().length)
+ return true;
+
+ if (obj->is<ArrayObject>()) {
+ // Make sure int32 is added to the TypeSet before we attach a stub, so
+ // the stub can return int32 values without monitoring the result.
+ if (obj->as<ArrayObject>().length() > INT32_MAX)
+ return true;
+
+ writer.guardClass(objId, GuardClassKind::Array);
+ writer.loadInt32ArrayLengthResult(objId);
+ emitted_ = true;
+ return true;
+ }
+
+ if (obj->is<UnboxedArrayObject>()) {
+ writer.guardClass(objId, GuardClassKind::UnboxedArray);
+ writer.loadUnboxedArrayLengthResult(objId);
+ emitted_ = true;
+ return true;
+ }
+
+ if (obj->is<ArgumentsObject>() && !obj->as<ArgumentsObject>().hasOverriddenLength()) {
+ if (obj->is<MappedArgumentsObject>()) {
+ writer.guardClass(objId, GuardClassKind::MappedArguments);
+ } else {
+ MOZ_ASSERT(obj->is<UnmappedArgumentsObject>());
+ writer.guardClass(objId, GuardClassKind::UnmappedArguments);
+ }
+ writer.loadArgumentsObjectLengthResult(objId);
+ emitted_ = true;
+ return true;
+ }
+
+ return true;
+}
+
+bool
+GetPropIRGenerator::tryAttachModuleNamespace(CacheIRWriter& writer, HandleObject obj,
+ ObjOperandId objId)
+{
+ MOZ_ASSERT(!emitted_);
+
+ if (!obj->is<ModuleNamespaceObject>())
+ return true;
+
+ Rooted<ModuleNamespaceObject*> ns(cx_, &obj->as<ModuleNamespaceObject>());
+ RootedModuleEnvironmentObject env(cx_);
+ RootedShape shape(cx_);
+ if (!ns->bindings().lookup(NameToId(name_), env.address(), shape.address()))
+ return true;
+
+ // Don't emit a stub until the target binding has been initialized.
+ if (env->getSlot(shape->slot()).isMagic(JS_UNINITIALIZED_LEXICAL))
+ return true;
+
+ if (IsIonEnabled(cx_))
+ EnsureTrackPropertyTypes(cx_, env, shape->propid());
+
+ emitted_ = true;
+
+ // Check for the specific namespace object.
+ writer.guardSpecificObject(objId, ns);
+
+ ObjOperandId envId = writer.loadObject(env);
+ EmitLoadSlotResult(writer, envId, env, shape);
+ return true;
+}
+
+bool
+GetPropIRGenerator::tryAttachPrimitive(CacheIRWriter& writer, ValOperandId valId)
+{
+ MOZ_ASSERT(!emitted_);
+
+ JSValueType primitiveType;
+ RootedNativeObject proto(cx_);
+ if (val_.isString()) {
+ if (name_ == cx_->names().length) {
+ // String length is special-cased, see js::GetProperty.
+ return true;
+ }
+ primitiveType = JSVAL_TYPE_STRING;
+ proto = MaybeNativeObject(GetBuiltinPrototypePure(cx_->global(), JSProto_String));
+ } else if (val_.isNumber()) {
+ primitiveType = JSVAL_TYPE_DOUBLE;
+ proto = MaybeNativeObject(GetBuiltinPrototypePure(cx_->global(), JSProto_Number));
+ } else if (val_.isBoolean()) {
+ primitiveType = JSVAL_TYPE_BOOLEAN;
+ proto = MaybeNativeObject(GetBuiltinPrototypePure(cx_->global(), JSProto_Boolean));
+ } else if (val_.isSymbol()) {
+ primitiveType = JSVAL_TYPE_SYMBOL;
+ proto = MaybeNativeObject(GetBuiltinPrototypePure(cx_->global(), JSProto_Symbol));
+ } else {
+ MOZ_ASSERT(val_.isNullOrUndefined() || val_.isMagic());
+ return true;
+ }
+ if (!proto)
+ return true;
+
+ // Instantiate this property, for use during Ion compilation.
+ RootedId id(cx_, NameToId(name_));
+ if (IsIonEnabled(cx_))
+ EnsureTrackPropertyTypes(cx_, proto, id);
+
+ // For now, only look for properties directly set on the prototype.
+ Shape* shape = proto->lookup(cx_, id);
+ if (!shape || !shape->hasSlot() || !shape->hasDefaultGetter())
+ return true;
+
+ writer.guardType(valId, primitiveType);
+
+ ObjOperandId protoId = writer.loadObject(proto);
+ writer.guardShape(protoId, proto->lastProperty());
+ EmitLoadSlotResult(writer, protoId, proto, shape);
+
+ emitted_ = true;
+ return true;
+}
diff --git a/js/src/jit/CacheIR.h b/js/src/jit/CacheIR.h
new file mode 100644
index 000000000..51e55f48b
--- /dev/null
+++ b/js/src/jit/CacheIR.h
@@ -0,0 +1,453 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_CacheIR_h
+#define jit_CacheIR_h
+
+#include "mozilla/Maybe.h"
+
+#include "NamespaceImports.h"
+
+#include "gc/Rooting.h"
+#include "jit/CompactBuffer.h"
+#include "jit/SharedIC.h"
+
+namespace js {
+namespace jit {
+
+// CacheIR is an (extremely simple) linear IR language for inline caches.
+// From this IR, we can generate machine code for Baseline or Ion IC stubs.
+//
+// IRWriter
+// --------
+// CacheIR bytecode is written using IRWriter. This class also records some
+// metadata that's used by the Baseline and Ion code generators to generate
+// (efficient) machine code.
+//
+// Sharing Baseline stub code
+// --------------------------
+// Baseline stores data (like Shape* and fixed slot offsets) inside the ICStub
+// structure, instead of embedding them directly in the JitCode. This makes
+// Baseline IC code slightly slower, but allows us to share IC code between
+// caches. CacheIR makes it easy to share code between stubs: stubs that have
+// the same CacheIR (and CacheKind), will have the same Baseline stub code.
+//
+// Baseline stubs that share JitCode also share a CacheIRStubInfo structure.
+// This class stores the CacheIR and the location of GC things stored in the
+// stub, for the GC.
+//
+// JitCompartment has a CacheIRStubInfo* -> JitCode* weak map that's used to
+// share both the IR and JitCode between CacheIR stubs. This HashMap owns the
+// stubInfo (it uses UniquePtr), so once there are no references left to the
+// shared stub code, we can also free the CacheIRStubInfo.
+
+// An OperandId represents either a cache input or a value returned by a
+// CacheIR instruction. Most code should use the ValOperandId and ObjOperandId
+// classes below. The ObjOperandId class represents an operand that's known to
+// be an object.
+class OperandId
+{
+ protected:
+ static const uint16_t InvalidId = UINT16_MAX;
+ uint16_t id_;
+
+ OperandId() : id_(InvalidId) {}
+ explicit OperandId(uint16_t id) : id_(id) {}
+
+ public:
+ uint16_t id() const { return id_; }
+ bool valid() const { return id_ != InvalidId; }
+};
+
+class ValOperandId : public OperandId
+{
+ public:
+ explicit ValOperandId(uint16_t id) : OperandId(id) {}
+};
+
+class ObjOperandId : public OperandId
+{
+ public:
+ ObjOperandId() = default;
+ explicit ObjOperandId(uint16_t id) : OperandId(id) {}
+
+ bool operator==(const ObjOperandId& other) const { return id_ == other.id_; }
+ bool operator!=(const ObjOperandId& other) const { return id_ != other.id_; }
+};
+
+#define CACHE_IR_OPS(_) \
+ _(GuardIsObject) \
+ _(GuardType) \
+ _(GuardShape) \
+ _(GuardGroup) \
+ _(GuardProto) \
+ _(GuardClass) \
+ _(GuardSpecificObject) \
+ _(GuardNoDetachedTypedObjects) \
+ _(GuardNoUnboxedExpando) \
+ _(GuardAndLoadUnboxedExpando) \
+ _(LoadObject) \
+ _(LoadProto) \
+ _(LoadFixedSlotResult) \
+ _(LoadDynamicSlotResult) \
+ _(LoadUnboxedPropertyResult) \
+ _(LoadTypedObjectResult) \
+ _(LoadInt32ArrayLengthResult) \
+ _(LoadUnboxedArrayLengthResult) \
+ _(LoadArgumentsObjectLengthResult) \
+ _(LoadUndefinedResult)
+
+enum class CacheOp {
+#define DEFINE_OP(op) op,
+ CACHE_IR_OPS(DEFINE_OP)
+#undef DEFINE_OP
+};
+
+struct StubField {
+ enum class GCType {
+ NoGCThing,
+ Shape,
+ ObjectGroup,
+ JSObject,
+ Limit
+ };
+
+ uintptr_t word;
+ GCType gcType;
+
+ StubField(uintptr_t word, GCType gcType)
+ : word(word), gcType(gcType)
+ {}
+};
+
+// We use this enum as GuardClass operand, instead of storing Class* pointers
+// in the IR, to keep the IR compact and the same size on all platforms.
+enum class GuardClassKind
+{
+ Array,
+ UnboxedArray,
+ MappedArguments,
+ UnmappedArguments,
+};
+
+// Class to record CacheIR + some additional metadata for code generation.
+class MOZ_RAII CacheIRWriter
+{
+ CompactBufferWriter buffer_;
+
+ uint32_t nextOperandId_;
+ uint32_t nextInstructionId_;
+ uint32_t numInputOperands_;
+
+ // The data (shapes, slot offsets, etc.) that will be stored in the ICStub.
+ Vector<StubField, 8, SystemAllocPolicy> stubFields_;
+
+ // For each operand id, record which instruction accessed it last. This
+ // information greatly improves register allocation.
+ Vector<uint32_t, 8, SystemAllocPolicy> operandLastUsed_;
+
+ // OperandId and stub offsets are stored in a single byte, so make sure
+ // this doesn't overflow. We use a very conservative limit for now.
+ static const size_t MaxOperandIds = 20;
+ static const size_t MaxStubFields = 20;
+ bool tooLarge_;
+
+ // stubFields_ contains unrooted pointers, so ensure we cannot GC in
+ // our scope.
+ JS::AutoCheckCannotGC nogc;
+
+ void writeOp(CacheOp op) {
+ MOZ_ASSERT(uint32_t(op) <= UINT8_MAX);
+ buffer_.writeByte(uint32_t(op));
+ nextInstructionId_++;
+ }
+
+ void writeOperandId(OperandId opId) {
+ if (opId.id() < MaxOperandIds) {
+ static_assert(MaxOperandIds <= UINT8_MAX, "operand id must fit in a single byte");
+ buffer_.writeByte(opId.id());
+ } else {
+ tooLarge_ = true;
+ return;
+ }
+ if (opId.id() >= operandLastUsed_.length()) {
+ buffer_.propagateOOM(operandLastUsed_.resize(opId.id() + 1));
+ if (buffer_.oom())
+ return;
+ }
+ MOZ_ASSERT(nextInstructionId_ > 0);
+ operandLastUsed_[opId.id()] = nextInstructionId_ - 1;
+ }
+
+ void writeOpWithOperandId(CacheOp op, OperandId opId) {
+ writeOp(op);
+ writeOperandId(opId);
+ }
+
+ void addStubWord(uintptr_t word, StubField::GCType gcType) {
+ uint32_t pos = stubFields_.length();
+ buffer_.propagateOOM(stubFields_.append(StubField(word, gcType)));
+ if (pos < MaxStubFields)
+ buffer_.writeByte(pos);
+ else
+ tooLarge_ = true;
+ }
+
+ CacheIRWriter(const CacheIRWriter&) = delete;
+ CacheIRWriter& operator=(const CacheIRWriter&) = delete;
+
+ public:
+ CacheIRWriter()
+ : nextOperandId_(0),
+ nextInstructionId_(0),
+ numInputOperands_(0),
+ tooLarge_(false)
+ {}
+
+ bool failed() const { return buffer_.oom() || tooLarge_; }
+
+ uint32_t numInputOperands() const { return numInputOperands_; }
+ uint32_t numOperandIds() const { return nextOperandId_; }
+ uint32_t numInstructions() const { return nextInstructionId_; }
+
+ size_t numStubFields() const { return stubFields_.length(); }
+ StubField::GCType stubFieldGCType(uint32_t i) const { return stubFields_[i].gcType; }
+
+ uint32_t setInputOperandId(uint32_t op) {
+ MOZ_ASSERT(op == nextOperandId_);
+ nextOperandId_++;
+ numInputOperands_++;
+ return op;
+ }
+
+ size_t stubDataSize() const {
+ return stubFields_.length() * sizeof(uintptr_t);
+ }
+ void copyStubData(uint8_t* dest) const;
+
+ bool operandIsDead(uint32_t operandId, uint32_t currentInstruction) const {
+ if (operandId >= operandLastUsed_.length())
+ return false;
+ return currentInstruction > operandLastUsed_[operandId];
+ }
+ const uint8_t* codeStart() const {
+ return buffer_.buffer();
+ }
+ const uint8_t* codeEnd() const {
+ return buffer_.buffer() + buffer_.length();
+ }
+ uint32_t codeLength() const {
+ return buffer_.length();
+ }
+
+ ObjOperandId guardIsObject(ValOperandId val) {
+ writeOpWithOperandId(CacheOp::GuardIsObject, val);
+ return ObjOperandId(val.id());
+ }
+ void guardType(ValOperandId val, JSValueType type) {
+ writeOpWithOperandId(CacheOp::GuardType, val);
+ static_assert(sizeof(type) == sizeof(uint8_t), "JSValueType should fit in a byte");
+ buffer_.writeByte(uint32_t(type));
+ }
+ void guardShape(ObjOperandId obj, Shape* shape) {
+ writeOpWithOperandId(CacheOp::GuardShape, obj);
+ addStubWord(uintptr_t(shape), StubField::GCType::Shape);
+ }
+ void guardGroup(ObjOperandId obj, ObjectGroup* group) {
+ writeOpWithOperandId(CacheOp::GuardGroup, obj);
+ addStubWord(uintptr_t(group), StubField::GCType::ObjectGroup);
+ }
+ void guardProto(ObjOperandId obj, JSObject* proto) {
+ writeOpWithOperandId(CacheOp::GuardProto, obj);
+ addStubWord(uintptr_t(proto), StubField::GCType::JSObject);
+ }
+ void guardClass(ObjOperandId obj, GuardClassKind kind) {
+ MOZ_ASSERT(uint32_t(kind) <= UINT8_MAX);
+ writeOpWithOperandId(CacheOp::GuardClass, obj);
+ buffer_.writeByte(uint32_t(kind));
+ }
+ void guardSpecificObject(ObjOperandId obj, JSObject* expected) {
+ writeOpWithOperandId(CacheOp::GuardSpecificObject, obj);
+ addStubWord(uintptr_t(expected), StubField::GCType::JSObject);
+ }
+ void guardNoDetachedTypedObjects() {
+ writeOp(CacheOp::GuardNoDetachedTypedObjects);
+ }
+ void guardNoUnboxedExpando(ObjOperandId obj) {
+ writeOpWithOperandId(CacheOp::GuardNoUnboxedExpando, obj);
+ }
+ ObjOperandId guardAndLoadUnboxedExpando(ObjOperandId obj) {
+ ObjOperandId res(nextOperandId_++);
+ writeOpWithOperandId(CacheOp::GuardAndLoadUnboxedExpando, obj);
+ writeOperandId(res);
+ return res;
+ }
+
+ ObjOperandId loadObject(JSObject* obj) {
+ ObjOperandId res(nextOperandId_++);
+ writeOpWithOperandId(CacheOp::LoadObject, res);
+ addStubWord(uintptr_t(obj), StubField::GCType::JSObject);
+ return res;
+ }
+ ObjOperandId loadProto(ObjOperandId obj) {
+ ObjOperandId res(nextOperandId_++);
+ writeOpWithOperandId(CacheOp::LoadProto, obj);
+ writeOperandId(res);
+ return res;
+ }
+
+ void loadUndefinedResult() {
+ writeOp(CacheOp::LoadUndefinedResult);
+ }
+ void loadFixedSlotResult(ObjOperandId obj, size_t offset) {
+ writeOpWithOperandId(CacheOp::LoadFixedSlotResult, obj);
+ addStubWord(offset, StubField::GCType::NoGCThing);
+ }
+ void loadDynamicSlotResult(ObjOperandId obj, size_t offset) {
+ writeOpWithOperandId(CacheOp::LoadDynamicSlotResult, obj);
+ addStubWord(offset, StubField::GCType::NoGCThing);
+ }
+ void loadUnboxedPropertyResult(ObjOperandId obj, JSValueType type, size_t offset) {
+ writeOpWithOperandId(CacheOp::LoadUnboxedPropertyResult, obj);
+ buffer_.writeByte(uint32_t(type));
+ addStubWord(offset, StubField::GCType::NoGCThing);
+ }
+ void loadTypedObjectResult(ObjOperandId obj, uint32_t offset, TypedThingLayout layout,
+ uint32_t typeDescr) {
+ MOZ_ASSERT(uint32_t(layout) <= UINT8_MAX);
+ MOZ_ASSERT(typeDescr <= UINT8_MAX);
+ writeOpWithOperandId(CacheOp::LoadTypedObjectResult, obj);
+ buffer_.writeByte(uint32_t(layout));
+ buffer_.writeByte(typeDescr);
+ addStubWord(offset, StubField::GCType::NoGCThing);
+ }
+ void loadInt32ArrayLengthResult(ObjOperandId obj) {
+ writeOpWithOperandId(CacheOp::LoadInt32ArrayLengthResult, obj);
+ }
+ void loadUnboxedArrayLengthResult(ObjOperandId obj) {
+ writeOpWithOperandId(CacheOp::LoadUnboxedArrayLengthResult, obj);
+ }
+ void loadArgumentsObjectLengthResult(ObjOperandId obj) {
+ writeOpWithOperandId(CacheOp::LoadArgumentsObjectLengthResult, obj);
+ }
+};
+
+class CacheIRStubInfo;
+
+// Helper class for reading CacheIR bytecode.
+class MOZ_RAII CacheIRReader
+{
+ CompactBufferReader buffer_;
+
+ CacheIRReader(const CacheIRReader&) = delete;
+ CacheIRReader& operator=(const CacheIRReader&) = delete;
+
+ public:
+ CacheIRReader(const uint8_t* start, const uint8_t* end)
+ : buffer_(start, end)
+ {}
+ explicit CacheIRReader(const CacheIRWriter& writer)
+ : CacheIRReader(writer.codeStart(), writer.codeEnd())
+ {}
+ explicit CacheIRReader(const CacheIRStubInfo* stubInfo);
+
+ bool more() const { return buffer_.more(); }
+
+ CacheOp readOp() {
+ return CacheOp(buffer_.readByte());
+ }
+
+ ValOperandId valOperandId() {
+ return ValOperandId(buffer_.readByte());
+ }
+ ObjOperandId objOperandId() {
+ return ObjOperandId(buffer_.readByte());
+ }
+
+ uint32_t stubOffset() { return buffer_.readByte(); }
+ GuardClassKind guardClassKind() { return GuardClassKind(buffer_.readByte()); }
+ JSValueType valueType() { return JSValueType(buffer_.readByte()); }
+ TypedThingLayout typedThingLayout() { return TypedThingLayout(buffer_.readByte()); }
+ uint32_t typeDescrKey() { return buffer_.readByte(); }
+
+ bool matchOp(CacheOp op) {
+ const uint8_t* pos = buffer_.currentPosition();
+ if (readOp() == op)
+ return true;
+ buffer_.seek(pos, 0);
+ return false;
+ }
+ bool matchOp(CacheOp op, OperandId id) {
+ const uint8_t* pos = buffer_.currentPosition();
+ if (readOp() == op && buffer_.readByte() == id.id())
+ return true;
+ buffer_.seek(pos, 0);
+ return false;
+ }
+ bool matchOpEither(CacheOp op1, CacheOp op2) {
+ const uint8_t* pos = buffer_.currentPosition();
+ CacheOp op = readOp();
+ if (op == op1 || op == op2)
+ return true;
+ buffer_.seek(pos, 0);
+ return false;
+ }
+};
+
+// GetPropIRGenerator generates CacheIR for a GetProp IC.
+class MOZ_RAII GetPropIRGenerator
+{
+ JSContext* cx_;
+ jsbytecode* pc_;
+ HandleValue val_;
+ HandlePropertyName name_;
+ MutableHandleValue res_;
+ bool emitted_;
+
+ enum class PreliminaryObjectAction { None, Unlink, NotePreliminary };
+ PreliminaryObjectAction preliminaryObjectAction_;
+
+ MOZ_MUST_USE bool tryAttachNative(CacheIRWriter& writer, HandleObject obj, ObjOperandId objId);
+ MOZ_MUST_USE bool tryAttachUnboxed(CacheIRWriter& writer, HandleObject obj, ObjOperandId objId);
+ MOZ_MUST_USE bool tryAttachUnboxedExpando(CacheIRWriter& writer, HandleObject obj,
+ ObjOperandId objId);
+ MOZ_MUST_USE bool tryAttachTypedObject(CacheIRWriter& writer, HandleObject obj,
+ ObjOperandId objId);
+ MOZ_MUST_USE bool tryAttachObjectLength(CacheIRWriter& writer, HandleObject obj,
+ ObjOperandId objId);
+ MOZ_MUST_USE bool tryAttachModuleNamespace(CacheIRWriter& writer, HandleObject obj,
+ ObjOperandId objId);
+
+ MOZ_MUST_USE bool tryAttachPrimitive(CacheIRWriter& writer, ValOperandId valId);
+
+ GetPropIRGenerator(const GetPropIRGenerator&) = delete;
+ GetPropIRGenerator& operator=(const GetPropIRGenerator&) = delete;
+
+ public:
+ GetPropIRGenerator(JSContext* cx, jsbytecode* pc, HandleValue val, HandlePropertyName name,
+ MutableHandleValue res);
+
+ bool emitted() const { return emitted_; }
+
+ MOZ_MUST_USE bool tryAttachStub(mozilla::Maybe<CacheIRWriter>& writer);
+
+ bool shouldUnlinkPreliminaryObjectStubs() const {
+ return preliminaryObjectAction_ == PreliminaryObjectAction::Unlink;
+ }
+ bool shouldNotePreliminaryObjectStub() const {
+ return preliminaryObjectAction_ == PreliminaryObjectAction::NotePreliminary;
+ }
+};
+
+enum class CacheKind
+{
+ GetProp
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_CacheIR_h */
diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
new file mode 100644
index 000000000..ce97363be
--- /dev/null
+++ b/js/src/jit/CodeGenerator.cpp
@@ -0,0 +1,12098 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/CodeGenerator.h"
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Casting.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/EnumeratedArray.h"
+#include "mozilla/EnumeratedRange.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/SizePrintfMacros.h"
+
+#include "jslibmath.h"
+#include "jsmath.h"
+#include "jsnum.h"
+#include "jsprf.h"
+#include "jsstr.h"
+
+#include "builtin/Eval.h"
+#include "builtin/TypedObject.h"
+#include "gc/Nursery.h"
+#include "irregexp/NativeRegExpMacroAssembler.h"
+#include "jit/AtomicOperations.h"
+#include "jit/BaselineCompiler.h"
+#include "jit/IonBuilder.h"
+#include "jit/IonCaches.h"
+#include "jit/IonOptimizationLevels.h"
+#include "jit/JitcodeMap.h"
+#include "jit/JitSpewer.h"
+#include "jit/Linker.h"
+#include "jit/Lowering.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MoveEmitter.h"
+#include "jit/RangeAnalysis.h"
+#include "jit/SharedICHelpers.h"
+#include "vm/AsyncFunction.h"
+#include "vm/MatchPairs.h"
+#include "vm/RegExpObject.h"
+#include "vm/RegExpStatics.h"
+#include "vm/TraceLogging.h"
+#include "vm/Unicode.h"
+
+#include "jsboolinlines.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+#include "jit/shared/Lowering-shared-inl.h"
+#include "vm/Interpreter-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::AssertedCast;
+using mozilla::DebugOnly;
+using mozilla::FloatingPoint;
+using mozilla::Maybe;
+using mozilla::NegativeInfinity;
+using mozilla::PositiveInfinity;
+using JS::GenericNaN;
+
+namespace js {
+namespace jit {
+
+// This out-of-line cache is used to do a double dispatch including it-self and
+// the wrapped IonCache.
+class OutOfLineUpdateCache :
+ public OutOfLineCodeBase<CodeGenerator>,
+ public IonCacheVisitor
+{
+ private:
+ LInstruction* lir_;
+ size_t cacheIndex_;
+ RepatchLabel entry_;
+
+ public:
+ OutOfLineUpdateCache(LInstruction* lir, size_t cacheIndex)
+ : lir_(lir),
+ cacheIndex_(cacheIndex)
+ { }
+
+ void bind(MacroAssembler* masm) {
+ // The binding of the initial jump is done in
+ // CodeGenerator::visitOutOfLineCache.
+ }
+
+ size_t getCacheIndex() const {
+ return cacheIndex_;
+ }
+ LInstruction* lir() const {
+ return lir_;
+ }
+ RepatchLabel& entry() {
+ return entry_;
+ }
+
+ void accept(CodeGenerator* codegen) {
+ codegen->visitOutOfLineCache(this);
+ }
+
+ // ICs' visit functions delegating the work to the CodeGen visit funtions.
+#define VISIT_CACHE_FUNCTION(op) \
+ void visit##op##IC(CodeGenerator* codegen) { \
+ CodeGenerator::DataPtr<op##IC> ic(codegen, getCacheIndex()); \
+ codegen->visit##op##IC(this, ic); \
+ }
+
+ IONCACHE_KIND_LIST(VISIT_CACHE_FUNCTION)
+#undef VISIT_CACHE_FUNCTION
+};
+
+// This function is declared here because it needs to instantiate an
+// OutOfLineUpdateCache, but we want to keep it visible inside the
+// CodeGeneratorShared such as we can specialize inline caches in function of
+// the architecture.
+void
+CodeGeneratorShared::addCache(LInstruction* lir, size_t cacheIndex)
+{
+ if (cacheIndex == SIZE_MAX) {
+ masm.setOOM();
+ return;
+ }
+
+ DataPtr<IonCache> cache(this, cacheIndex);
+ MInstruction* mir = lir->mirRaw()->toInstruction();
+ if (mir->resumePoint())
+ cache->setScriptedLocation(mir->block()->info().script(),
+ mir->resumePoint()->pc());
+ else
+ cache->setIdempotent();
+
+ OutOfLineUpdateCache* ool = new(alloc()) OutOfLineUpdateCache(lir, cacheIndex);
+ addOutOfLineCode(ool, mir);
+
+ cache->emitInitialJump(masm, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitOutOfLineCache(OutOfLineUpdateCache* ool)
+{
+ DataPtr<IonCache> cache(this, ool->getCacheIndex());
+
+ // Register the location of the OOL path in the IC.
+ cache->setFallbackLabel(masm.labelForPatch());
+ masm.bind(&ool->entry());
+
+ // Dispatch to ICs' accept functions.
+ cache->accept(this, ool);
+}
+
+StringObject*
+MNewStringObject::templateObj() const {
+ return &templateObj_->as<StringObject>();
+}
+
+CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorSpecific(gen, graph, masm)
+ , ionScriptLabels_(gen->alloc())
+ , scriptCounts_(nullptr)
+ , simdRefreshTemplatesDuringLink_(0)
+{
+}
+
+CodeGenerator::~CodeGenerator()
+{
+ MOZ_ASSERT_IF(!gen->compilingWasm(), masm.numSymbolicAccesses() == 0);
+ js_delete(scriptCounts_);
+}
+
+typedef bool (*StringToNumberFn)(ExclusiveContext*, JSString*, double*);
+static const VMFunction StringToNumberInfo =
+ FunctionInfo<StringToNumberFn>(StringToNumber, "StringToNumber");
+
+void
+CodeGenerator::visitValueToInt32(LValueToInt32* lir)
+{
+ ValueOperand operand = ToValue(lir, LValueToInt32::Input);
+ Register output = ToRegister(lir->output());
+ FloatRegister temp = ToFloatRegister(lir->tempFloat());
+
+ MDefinition* input;
+ if (lir->mode() == LValueToInt32::NORMAL)
+ input = lir->mirNormal()->input();
+ else
+ input = lir->mirTruncate()->input();
+
+ Label fails;
+ if (lir->mode() == LValueToInt32::TRUNCATE) {
+ OutOfLineCode* oolDouble = oolTruncateDouble(temp, output, lir->mir());
+
+ // We can only handle strings in truncation contexts, like bitwise
+ // operations.
+ Label* stringEntry;
+ Label* stringRejoin;
+ Register stringReg;
+ if (input->mightBeType(MIRType::String)) {
+ stringReg = ToRegister(lir->temp());
+ OutOfLineCode* oolString = oolCallVM(StringToNumberInfo, lir, ArgList(stringReg),
+ StoreFloatRegisterTo(temp));
+ stringEntry = oolString->entry();
+ stringRejoin = oolString->rejoin();
+ } else {
+ stringReg = InvalidReg;
+ stringEntry = nullptr;
+ stringRejoin = nullptr;
+ }
+
+ masm.truncateValueToInt32(operand, input, stringEntry, stringRejoin, oolDouble->entry(),
+ stringReg, temp, output, &fails);
+ masm.bind(oolDouble->rejoin());
+ } else {
+ masm.convertValueToInt32(operand, input, temp, output, &fails,
+ lir->mirNormal()->canBeNegativeZero(),
+ lir->mirNormal()->conversion());
+ }
+
+ bailoutFrom(&fails, lir->snapshot());
+}
+
+void
+CodeGenerator::visitValueToDouble(LValueToDouble* lir)
+{
+ MToDouble* mir = lir->mir();
+ ValueOperand operand = ToValue(lir, LValueToDouble::Input);
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ Register tag = masm.splitTagForTest(operand);
+
+ Label isDouble, isInt32, isBool, isNull, isUndefined, done;
+ bool hasBoolean = false, hasNull = false, hasUndefined = false;
+
+ masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
+ masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
+
+ if (mir->conversion() != MToFPInstruction::NumbersOnly) {
+ masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
+ masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
+ hasBoolean = true;
+ hasUndefined = true;
+ if (mir->conversion() != MToFPInstruction::NonNullNonStringPrimitives) {
+ masm.branchTestNull(Assembler::Equal, tag, &isNull);
+ hasNull = true;
+ }
+ }
+
+ bailout(lir->snapshot());
+
+ if (hasNull) {
+ masm.bind(&isNull);
+ masm.loadConstantDouble(0.0, output);
+ masm.jump(&done);
+ }
+
+ if (hasUndefined) {
+ masm.bind(&isUndefined);
+ masm.loadConstantDouble(GenericNaN(), output);
+ masm.jump(&done);
+ }
+
+ if (hasBoolean) {
+ masm.bind(&isBool);
+ masm.boolValueToDouble(operand, output);
+ masm.jump(&done);
+ }
+
+ masm.bind(&isInt32);
+ masm.int32ValueToDouble(operand, output);
+ masm.jump(&done);
+
+ masm.bind(&isDouble);
+ masm.unboxDouble(operand, output);
+ masm.bind(&done);
+}
+
+void
+CodeGenerator::visitValueToFloat32(LValueToFloat32* lir)
+{
+ MToFloat32* mir = lir->mir();
+ ValueOperand operand = ToValue(lir, LValueToFloat32::Input);
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ Register tag = masm.splitTagForTest(operand);
+
+ Label isDouble, isInt32, isBool, isNull, isUndefined, done;
+ bool hasBoolean = false, hasNull = false, hasUndefined = false;
+
+ masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
+ masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
+
+ if (mir->conversion() != MToFPInstruction::NumbersOnly) {
+ masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
+ masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
+ hasBoolean = true;
+ hasUndefined = true;
+ if (mir->conversion() != MToFPInstruction::NonNullNonStringPrimitives) {
+ masm.branchTestNull(Assembler::Equal, tag, &isNull);
+ hasNull = true;
+ }
+ }
+
+ bailout(lir->snapshot());
+
+ if (hasNull) {
+ masm.bind(&isNull);
+ masm.loadConstantFloat32(0.0f, output);
+ masm.jump(&done);
+ }
+
+ if (hasUndefined) {
+ masm.bind(&isUndefined);
+ masm.loadConstantFloat32(float(GenericNaN()), output);
+ masm.jump(&done);
+ }
+
+ if (hasBoolean) {
+ masm.bind(&isBool);
+ masm.boolValueToFloat32(operand, output);
+ masm.jump(&done);
+ }
+
+ masm.bind(&isInt32);
+ masm.int32ValueToFloat32(operand, output);
+ masm.jump(&done);
+
+ masm.bind(&isDouble);
+ // ARM and MIPS may not have a double register available if we've
+ // allocated output as a float32.
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
+ masm.unboxDouble(operand, ScratchDoubleReg);
+ masm.convertDoubleToFloat32(ScratchDoubleReg, output);
+#else
+ masm.unboxDouble(operand, output);
+ masm.convertDoubleToFloat32(output, output);
+#endif
+ masm.bind(&done);
+}
+
+void
+CodeGenerator::visitInt32ToDouble(LInt32ToDouble* lir)
+{
+ masm.convertInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void
+CodeGenerator::visitFloat32ToDouble(LFloat32ToDouble* lir)
+{
+ masm.convertFloat32ToDouble(ToFloatRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void
+CodeGenerator::visitDoubleToFloat32(LDoubleToFloat32* lir)
+{
+ masm.convertDoubleToFloat32(ToFloatRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void
+CodeGenerator::visitInt32ToFloat32(LInt32ToFloat32* lir)
+{
+ masm.convertInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void
+CodeGenerator::visitDoubleToInt32(LDoubleToInt32* lir)
+{
+ Label fail;
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ masm.convertDoubleToInt32(input, output, &fail, lir->mir()->canBeNegativeZero());
+ bailoutFrom(&fail, lir->snapshot());
+}
+
+void
+CodeGenerator::visitFloat32ToInt32(LFloat32ToInt32* lir)
+{
+ Label fail;
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ masm.convertFloat32ToInt32(input, output, &fail, lir->mir()->canBeNegativeZero());
+ bailoutFrom(&fail, lir->snapshot());
+}
+
+void
+CodeGenerator::emitOOLTestObject(Register objreg,
+ Label* ifEmulatesUndefined,
+ Label* ifDoesntEmulateUndefined,
+ Register scratch)
+{
+ saveVolatile(scratch);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(objreg);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::EmulatesUndefined));
+ masm.storeCallBoolResult(scratch);
+ restoreVolatile(scratch);
+
+ masm.branchIfTrueBool(scratch, ifEmulatesUndefined);
+ masm.jump(ifDoesntEmulateUndefined);
+}
+
+// Base out-of-line code generator for all tests of the truthiness of an
+// object, where the object might not be truthy. (Recall that per spec all
+// objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class
+// flag to permit objects to look like |undefined| in certain contexts,
+// including in object truthiness testing.) We check truthiness inline except
+// when we're testing it on a proxy (or if TI guarantees us that the specified
+// object will never emulate |undefined|), in which case out-of-line code will
+// call EmulatesUndefined for a conclusive answer.
+class OutOfLineTestObject : public OutOfLineCodeBase<CodeGenerator>
+{
+ Register objreg_;
+ Register scratch_;
+
+ Label* ifEmulatesUndefined_;
+ Label* ifDoesntEmulateUndefined_;
+
+#ifdef DEBUG
+ bool initialized() { return ifEmulatesUndefined_ != nullptr; }
+#endif
+
+ public:
+ OutOfLineTestObject()
+#ifdef DEBUG
+ : ifEmulatesUndefined_(nullptr), ifDoesntEmulateUndefined_(nullptr)
+#endif
+ { }
+
+ void accept(CodeGenerator* codegen) final override {
+ MOZ_ASSERT(initialized());
+ codegen->emitOOLTestObject(objreg_, ifEmulatesUndefined_, ifDoesntEmulateUndefined_,
+ scratch_);
+ }
+
+ // Specify the register where the object to be tested is found, labels to
+ // jump to if the object is truthy or falsy, and a scratch register for
+ // use in the out-of-line path.
+ void setInputAndTargets(Register objreg, Label* ifEmulatesUndefined, Label* ifDoesntEmulateUndefined,
+ Register scratch)
+ {
+ MOZ_ASSERT(!initialized());
+ MOZ_ASSERT(ifEmulatesUndefined);
+ objreg_ = objreg;
+ scratch_ = scratch;
+ ifEmulatesUndefined_ = ifEmulatesUndefined;
+ ifDoesntEmulateUndefined_ = ifDoesntEmulateUndefined;
+ }
+};
+
+// A subclass of OutOfLineTestObject containing two extra labels, for use when
+// the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line
+// code. The user should bind these labels in inline code, and specify them as
+// targets via setInputAndTargets, as appropriate.
+class OutOfLineTestObjectWithLabels : public OutOfLineTestObject
+{
+ Label label1_;
+ Label label2_;
+
+ public:
+ OutOfLineTestObjectWithLabels() { }
+
+ Label* label1() { return &label1_; }
+ Label* label2() { return &label2_; }
+};
+
+void
+CodeGenerator::testObjectEmulatesUndefinedKernel(Register objreg,
+ Label* ifEmulatesUndefined,
+ Label* ifDoesntEmulateUndefined,
+ Register scratch, OutOfLineTestObject* ool)
+{
+ ool->setInputAndTargets(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined, scratch);
+
+ // Perform a fast-path check of the object's class flags if the object's
+ // not a proxy. Let out-of-line code handle the slow cases that require
+ // saving registers, making a function call, and restoring registers.
+ masm.branchTestObjectTruthy(false, objreg, scratch, ool->entry(), ifEmulatesUndefined);
+}
+
+void
+CodeGenerator::branchTestObjectEmulatesUndefined(Register objreg,
+ Label* ifEmulatesUndefined,
+ Label* ifDoesntEmulateUndefined,
+ Register scratch, OutOfLineTestObject* ool)
+{
+ MOZ_ASSERT(!ifDoesntEmulateUndefined->bound(),
+ "ifDoesntEmulateUndefined will be bound to the fallthrough path");
+
+ testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
+ scratch, ool);
+ masm.bind(ifDoesntEmulateUndefined);
+}
+
+void
+CodeGenerator::testObjectEmulatesUndefined(Register objreg,
+ Label* ifEmulatesUndefined,
+ Label* ifDoesntEmulateUndefined,
+ Register scratch, OutOfLineTestObject* ool)
+{
+ testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
+ scratch, ool);
+ masm.jump(ifDoesntEmulateUndefined);
+}
+
+void
+CodeGenerator::testValueTruthyKernel(const ValueOperand& value,
+ const LDefinition* scratch1, const LDefinition* scratch2,
+ FloatRegister fr,
+ Label* ifTruthy, Label* ifFalsy,
+ OutOfLineTestObject* ool,
+ MDefinition* valueMIR)
+{
+ // Count the number of possible type tags we might have, so we'll know when
+ // we've checked them all and hence can avoid emitting a tag check for the
+ // last one. In particular, whenever tagCount is 1 that means we've tried
+ // all but one of them already so we know exactly what's left based on the
+ // mightBe* booleans.
+ bool mightBeUndefined = valueMIR->mightBeType(MIRType::Undefined);
+ bool mightBeNull = valueMIR->mightBeType(MIRType::Null);
+ bool mightBeBoolean = valueMIR->mightBeType(MIRType::Boolean);
+ bool mightBeInt32 = valueMIR->mightBeType(MIRType::Int32);
+ bool mightBeObject = valueMIR->mightBeType(MIRType::Object);
+ bool mightBeString = valueMIR->mightBeType(MIRType::String);
+ bool mightBeSymbol = valueMIR->mightBeType(MIRType::Symbol);
+ bool mightBeDouble = valueMIR->mightBeType(MIRType::Double);
+ int tagCount = int(mightBeUndefined) + int(mightBeNull) +
+ int(mightBeBoolean) + int(mightBeInt32) + int(mightBeObject) +
+ int(mightBeString) + int(mightBeSymbol) + int(mightBeDouble);
+
+ MOZ_ASSERT_IF(!valueMIR->emptyResultTypeSet(), tagCount > 0);
+
+ // If we know we're null or undefined, we're definitely falsy, no
+ // need to even check the tag.
+ if (int(mightBeNull) + int(mightBeUndefined) == tagCount) {
+ masm.jump(ifFalsy);
+ return;
+ }
+
+ Register tag = masm.splitTagForTest(value);
+
+ if (mightBeUndefined) {
+ MOZ_ASSERT(tagCount > 1);
+ masm.branchTestUndefined(Assembler::Equal, tag, ifFalsy);
+ --tagCount;
+ }
+
+ if (mightBeNull) {
+ MOZ_ASSERT(tagCount > 1);
+ masm.branchTestNull(Assembler::Equal, tag, ifFalsy);
+ --tagCount;
+ }
+
+ if (mightBeBoolean) {
+ MOZ_ASSERT(tagCount != 0);
+ Label notBoolean;
+ if (tagCount != 1)
+ masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
+ masm.branchTestBooleanTruthy(false, value, ifFalsy);
+ if (tagCount != 1)
+ masm.jump(ifTruthy);
+ // Else just fall through to truthiness.
+ masm.bind(&notBoolean);
+ --tagCount;
+ }
+
+ if (mightBeInt32) {
+ MOZ_ASSERT(tagCount != 0);
+ Label notInt32;
+ if (tagCount != 1)
+ masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
+ masm.branchTestInt32Truthy(false, value, ifFalsy);
+ if (tagCount != 1)
+ masm.jump(ifTruthy);
+ // Else just fall through to truthiness.
+ masm.bind(&notInt32);
+ --tagCount;
+ }
+
+ if (mightBeObject) {
+ MOZ_ASSERT(tagCount != 0);
+ if (ool) {
+ Label notObject;
+
+ if (tagCount != 1)
+ masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
+
+ Register objreg = masm.extractObject(value, ToRegister(scratch1));
+ testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy, ToRegister(scratch2), ool);
+
+ masm.bind(&notObject);
+ } else {
+ if (tagCount != 1)
+ masm.branchTestObject(Assembler::Equal, tag, ifTruthy);
+ // Else just fall through to truthiness.
+ }
+ --tagCount;
+ } else {
+ MOZ_ASSERT(!ool,
+ "We better not have an unused OOL path, since the code generator will try to "
+ "generate code for it but we never set up its labels, which will cause null "
+ "derefs of those labels.");
+ }
+
+ if (mightBeString) {
+ // Test if a string is non-empty.
+ MOZ_ASSERT(tagCount != 0);
+ Label notString;
+ if (tagCount != 1)
+ masm.branchTestString(Assembler::NotEqual, tag, &notString);
+ masm.branchTestStringTruthy(false, value, ifFalsy);
+ if (tagCount != 1)
+ masm.jump(ifTruthy);
+ // Else just fall through to truthiness.
+ masm.bind(&notString);
+ --tagCount;
+ }
+
+ if (mightBeSymbol) {
+ // All symbols are truthy.
+ MOZ_ASSERT(tagCount != 0);
+ if (tagCount != 1)
+ masm.branchTestSymbol(Assembler::Equal, tag, ifTruthy);
+ // Else fall through to ifTruthy.
+ --tagCount;
+ }
+
+ if (mightBeDouble) {
+ MOZ_ASSERT(tagCount == 1);
+ // If we reach here the value is a double.
+ masm.unboxDouble(value, fr);
+ masm.branchTestDoubleTruthy(false, fr, ifFalsy);
+ --tagCount;
+ }
+
+ MOZ_ASSERT(tagCount == 0);
+
+ // Fall through for truthy.
+}
+
+void
+CodeGenerator::testValueTruthy(const ValueOperand& value,
+ const LDefinition* scratch1, const LDefinition* scratch2,
+ FloatRegister fr,
+ Label* ifTruthy, Label* ifFalsy,
+ OutOfLineTestObject* ool,
+ MDefinition* valueMIR)
+{
+ testValueTruthyKernel(value, scratch1, scratch2, fr, ifTruthy, ifFalsy, ool, valueMIR);
+ masm.jump(ifTruthy);
+}
+
+void
+CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir)
+{
+ MIRType inputType = lir->mir()->input()->type();
+ MOZ_ASSERT(inputType == MIRType::ObjectOrNull || lir->mir()->operandMightEmulateUndefined(),
+ "If the object couldn't emulate undefined, this should have been folded.");
+
+ Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
+ Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
+ Register input = ToRegister(lir->input());
+
+ if (lir->mir()->operandMightEmulateUndefined()) {
+ if (inputType == MIRType::ObjectOrNull)
+ masm.branchTestPtr(Assembler::Zero, input, input, falsy);
+
+ OutOfLineTestObject* ool = new(alloc()) OutOfLineTestObject();
+ addOutOfLineCode(ool, lir->mir());
+
+ testObjectEmulatesUndefined(input, falsy, truthy, ToRegister(lir->temp()), ool);
+ } else {
+ MOZ_ASSERT(inputType == MIRType::ObjectOrNull);
+ testZeroEmitBranch(Assembler::NotEqual, input, lir->ifTruthy(), lir->ifFalsy());
+ }
+}
+
+void
+CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir)
+{
+ OutOfLineTestObject* ool = nullptr;
+ MDefinition* input = lir->mir()->input();
+ // Unfortunately, it's possible that someone (e.g. phi elimination) switched
+ // out our input after we did cacheOperandMightEmulateUndefined. So we
+ // might think it can emulate undefined _and_ know that it can't be an
+ // object.
+ if (lir->mir()->operandMightEmulateUndefined() && input->mightBeType(MIRType::Object)) {
+ ool = new(alloc()) OutOfLineTestObject();
+ addOutOfLineCode(ool, lir->mir());
+ }
+
+ Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
+ Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
+
+ testValueTruthy(ToValue(lir, LTestVAndBranch::Input),
+ lir->temp1(), lir->temp2(),
+ ToFloatRegister(lir->tempFloat()),
+ truthy, falsy, ool, input);
+}
+
+void
+CodeGenerator::visitFunctionDispatch(LFunctionDispatch* lir)
+{
+ MFunctionDispatch* mir = lir->mir();
+ Register input = ToRegister(lir->input());
+ Label* lastLabel;
+ size_t casesWithFallback;
+
+ // Determine if the last case is fallback or an ordinary case.
+ if (!mir->hasFallback()) {
+ MOZ_ASSERT(mir->numCases() > 0);
+ casesWithFallback = mir->numCases();
+ lastLabel = skipTrivialBlocks(mir->getCaseBlock(mir->numCases() - 1))->lir()->label();
+ } else {
+ casesWithFallback = mir->numCases() + 1;
+ lastLabel = skipTrivialBlocks(mir->getFallback())->lir()->label();
+ }
+
+ // Compare function pointers, except for the last case.
+ for (size_t i = 0; i < casesWithFallback - 1; i++) {
+ MOZ_ASSERT(i < mir->numCases());
+ LBlock* target = skipTrivialBlocks(mir->getCaseBlock(i))->lir();
+ if (ObjectGroup* funcGroup = mir->getCaseObjectGroup(i)) {
+ masm.branchPtr(Assembler::Equal, Address(input, JSObject::offsetOfGroup()),
+ ImmGCPtr(funcGroup), target->label());
+ } else {
+ JSFunction* func = mir->getCase(i);
+ masm.branchPtr(Assembler::Equal, input, ImmGCPtr(func), target->label());
+ }
+ }
+
+ // Jump to the last case.
+ masm.jump(lastLabel);
+}
+
+void
+CodeGenerator::visitObjectGroupDispatch(LObjectGroupDispatch* lir)
+{
+ MObjectGroupDispatch* mir = lir->mir();
+ Register input = ToRegister(lir->input());
+ Register temp = ToRegister(lir->temp());
+
+ // Load the incoming ObjectGroup in temp.
+ masm.loadPtr(Address(input, JSObject::offsetOfGroup()), temp);
+
+ // Compare ObjectGroups.
+ MacroAssembler::BranchGCPtr lastBranch;
+ LBlock* lastBlock = nullptr;
+ InlinePropertyTable* propTable = mir->propTable();
+ for (size_t i = 0; i < mir->numCases(); i++) {
+ JSFunction* func = mir->getCase(i);
+ LBlock* target = skipTrivialBlocks(mir->getCaseBlock(i))->lir();
+
+ DebugOnly<bool> found = false;
+ for (size_t j = 0; j < propTable->numEntries(); j++) {
+ if (propTable->getFunction(j) != func)
+ continue;
+
+ if (lastBranch.isInitialized())
+ lastBranch.emit(masm);
+
+ ObjectGroup* group = propTable->getObjectGroup(j);
+ lastBranch = MacroAssembler::BranchGCPtr(Assembler::Equal, temp, ImmGCPtr(group),
+ target->label());
+ lastBlock = target;
+ found = true;
+ }
+ MOZ_ASSERT(found);
+ }
+
+ // Jump to fallback block if we have an unknown ObjectGroup. If there's no
+ // fallback block, we should have handled all cases.
+
+ if (!mir->hasFallback()) {
+ MOZ_ASSERT(lastBranch.isInitialized());
+#ifdef DEBUG
+ Label ok;
+ lastBranch.relink(&ok);
+ lastBranch.emit(masm);
+ masm.assumeUnreachable("Unexpected ObjectGroup");
+ masm.bind(&ok);
+#endif
+ if (!isNextBlock(lastBlock))
+ masm.jump(lastBlock->label());
+ return;
+ }
+
+ LBlock* fallback = skipTrivialBlocks(mir->getFallback())->lir();
+ if (!lastBranch.isInitialized()) {
+ if (!isNextBlock(fallback))
+ masm.jump(fallback->label());
+ return;
+ }
+
+ lastBranch.invertCondition();
+ lastBranch.relink(fallback->label());
+ lastBranch.emit(masm);
+
+ if (!isNextBlock(lastBlock))
+ masm.jump(lastBlock->label());
+}
+
+void
+CodeGenerator::visitBooleanToString(LBooleanToString* lir)
+{
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ const JSAtomState& names = GetJitContext()->runtime->names();
+ Label true_, done;
+
+ masm.branchTest32(Assembler::NonZero, input, input, &true_);
+ masm.movePtr(ImmGCPtr(names.false_), output);
+ masm.jump(&done);
+
+ masm.bind(&true_);
+ masm.movePtr(ImmGCPtr(names.true_), output);
+
+ masm.bind(&done);
+}
+
+void
+CodeGenerator::emitIntToString(Register input, Register output, Label* ool)
+{
+ masm.branch32(Assembler::AboveOrEqual, input, Imm32(StaticStrings::INT_STATIC_LIMIT), ool);
+
+ // Fast path for small integers.
+ masm.movePtr(ImmPtr(&GetJitContext()->runtime->staticStrings().intStaticTable), output);
+ masm.loadPtr(BaseIndex(output, input, ScalePointer), output);
+}
+
+typedef JSFlatString* (*IntToStringFn)(ExclusiveContext*, int);
+static const VMFunction IntToStringInfo =
+ FunctionInfo<IntToStringFn>(Int32ToString<CanGC>, "Int32ToString");
+
+void
+CodeGenerator::visitIntToString(LIntToString* lir)
+{
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ OutOfLineCode* ool = oolCallVM(IntToStringInfo, lir, ArgList(input),
+ StoreRegisterTo(output));
+
+ emitIntToString(input, output, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+typedef JSString* (*DoubleToStringFn)(ExclusiveContext*, double);
+static const VMFunction DoubleToStringInfo =
+ FunctionInfo<DoubleToStringFn>(NumberToString<CanGC>, "NumberToString");
+
+void
+CodeGenerator::visitDoubleToString(LDoubleToString* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register temp = ToRegister(lir->tempInt());
+ Register output = ToRegister(lir->output());
+
+ OutOfLineCode* ool = oolCallVM(DoubleToStringInfo, lir, ArgList(input),
+ StoreRegisterTo(output));
+
+ // Try double to integer conversion and run integer to string code.
+ masm.convertDoubleToInt32(input, temp, ool->entry(), true);
+ emitIntToString(temp, output, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+typedef JSString* (*PrimitiveToStringFn)(JSContext*, HandleValue);
+static const VMFunction PrimitiveToStringInfo =
+ FunctionInfo<PrimitiveToStringFn>(ToStringSlow, "ToStringSlow");
+
+void
+CodeGenerator::visitValueToString(LValueToString* lir)
+{
+ ValueOperand input = ToValue(lir, LValueToString::Input);
+ Register output = ToRegister(lir->output());
+
+ OutOfLineCode* ool = oolCallVM(PrimitiveToStringInfo, lir, ArgList(input),
+ StoreRegisterTo(output));
+
+ Label done;
+ Register tag = masm.splitTagForTest(input);
+ const JSAtomState& names = GetJitContext()->runtime->names();
+
+ // String
+ if (lir->mir()->input()->mightBeType(MIRType::String)) {
+ Label notString;
+ masm.branchTestString(Assembler::NotEqual, tag, &notString);
+ masm.unboxString(input, output);
+ masm.jump(&done);
+ masm.bind(&notString);
+ }
+
+ // Integer
+ if (lir->mir()->input()->mightBeType(MIRType::Int32)) {
+ Label notInteger;
+ masm.branchTestInt32(Assembler::NotEqual, tag, &notInteger);
+ Register unboxed = ToTempUnboxRegister(lir->tempToUnbox());
+ unboxed = masm.extractInt32(input, unboxed);
+ emitIntToString(unboxed, output, ool->entry());
+ masm.jump(&done);
+ masm.bind(&notInteger);
+ }
+
+ // Double
+ if (lir->mir()->input()->mightBeType(MIRType::Double)) {
+ // Note: no fastpath. Need two extra registers and can only convert doubles
+ // that fit integers and are smaller than StaticStrings::INT_STATIC_LIMIT.
+ masm.branchTestDouble(Assembler::Equal, tag, ool->entry());
+ }
+
+ // Undefined
+ if (lir->mir()->input()->mightBeType(MIRType::Undefined)) {
+ Label notUndefined;
+ masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
+ masm.movePtr(ImmGCPtr(names.undefined), output);
+ masm.jump(&done);
+ masm.bind(&notUndefined);
+ }
+
+ // Null
+ if (lir->mir()->input()->mightBeType(MIRType::Null)) {
+ Label notNull;
+ masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
+ masm.movePtr(ImmGCPtr(names.null), output);
+ masm.jump(&done);
+ masm.bind(&notNull);
+ }
+
+ // Boolean
+ if (lir->mir()->input()->mightBeType(MIRType::Boolean)) {
+ Label notBoolean, true_;
+ masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
+ masm.branchTestBooleanTruthy(true, input, &true_);
+ masm.movePtr(ImmGCPtr(names.false_), output);
+ masm.jump(&done);
+ masm.bind(&true_);
+ masm.movePtr(ImmGCPtr(names.true_), output);
+ masm.jump(&done);
+ masm.bind(&notBoolean);
+ }
+
+ // Object
+ if (lir->mir()->input()->mightBeType(MIRType::Object)) {
+ // Bail.
+ MOZ_ASSERT(lir->mir()->fallible());
+ Label bail;
+ masm.branchTestObject(Assembler::Equal, tag, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+ }
+
+ // Symbol
+ if (lir->mir()->input()->mightBeType(MIRType::Symbol))
+ masm.branchTestSymbol(Assembler::Equal, tag, ool->entry());
+
+#ifdef DEBUG
+ masm.assumeUnreachable("Unexpected type for MValueToString.");
+#endif
+
+ masm.bind(&done);
+ masm.bind(ool->rejoin());
+}
+
+typedef JSObject* (*ToObjectFn)(JSContext*, HandleValue, bool);
+static const VMFunction ToObjectInfo =
+ FunctionInfo<ToObjectFn>(ToObjectSlow, "ToObjectSlow");
+
+void
+CodeGenerator::visitValueToObjectOrNull(LValueToObjectOrNull* lir)
+{
+ ValueOperand input = ToValue(lir, LValueToObjectOrNull::Input);
+ Register output = ToRegister(lir->output());
+
+ OutOfLineCode* ool = oolCallVM(ToObjectInfo, lir, ArgList(input, Imm32(0)),
+ StoreRegisterTo(output));
+
+ Label done;
+ masm.branchTestObject(Assembler::Equal, input, &done);
+ masm.branchTestNull(Assembler::NotEqual, input, ool->entry());
+
+ masm.bind(&done);
+ masm.unboxNonDouble(input, output);
+
+ masm.bind(ool->rejoin());
+}
+
+typedef JSObject* (*CloneRegExpObjectFn)(JSContext*, JSObject*);
+static const VMFunction CloneRegExpObjectInfo =
+ FunctionInfo<CloneRegExpObjectFn>(CloneRegExpObject, "CloneRegExpObject");
+
+void
+CodeGenerator::visitRegExp(LRegExp* lir)
+{
+ pushArg(ImmGCPtr(lir->mir()->source()));
+ callVM(CloneRegExpObjectInfo, lir);
+}
+
+// Amount of space to reserve on the stack when executing RegExps inline.
+static const size_t RegExpReservedStack = sizeof(irregexp::InputOutputData)
+ + sizeof(MatchPairs)
+ + RegExpObject::MaxPairCount * sizeof(MatchPair);
+
+static size_t
+RegExpPairsVectorStartOffset(size_t inputOutputDataStartOffset)
+{
+ return inputOutputDataStartOffset + sizeof(irregexp::InputOutputData) + sizeof(MatchPairs);
+}
+
+static Address
+RegExpPairCountAddress(MacroAssembler& masm, size_t inputOutputDataStartOffset)
+{
+ return Address(masm.getStackPointer(), inputOutputDataStartOffset
+ + sizeof(irregexp::InputOutputData)
+ + MatchPairs::offsetOfPairCount());
+}
+
+// Prepare an InputOutputData and optional MatchPairs which space has been
+// allocated for on the stack, and try to execute a RegExp on a string input.
+// If the RegExp was successfully executed and matched the input, fallthrough,
+// otherwise jump to notFound or failure.
+static bool
+PrepareAndExecuteRegExp(JSContext* cx, MacroAssembler& masm, Register regexp, Register input,
+ Register lastIndex,
+ Register temp1, Register temp2, Register temp3,
+ size_t inputOutputDataStartOffset,
+ RegExpShared::CompilationMode mode,
+ Label* notFound, Label* failure)
+{
+ size_t matchPairsStartOffset = inputOutputDataStartOffset + sizeof(irregexp::InputOutputData);
+ size_t pairsVectorStartOffset = RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
+
+ Address inputStartAddress(masm.getStackPointer(),
+ inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, inputStart));
+ Address inputEndAddress(masm.getStackPointer(),
+ inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, inputEnd));
+ Address matchesPointerAddress(masm.getStackPointer(),
+ inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, matches));
+ Address startIndexAddress(masm.getStackPointer(),
+ inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, startIndex));
+ Address endIndexAddress(masm.getStackPointer(),
+ inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, endIndex));
+ Address matchResultAddress(masm.getStackPointer(),
+ inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, result));
+
+ Address pairCountAddress = RegExpPairCountAddress(masm, inputOutputDataStartOffset);
+ Address pairsPointerAddress(masm.getStackPointer(),
+ matchPairsStartOffset + MatchPairs::offsetOfPairs());
+
+ Address pairsVectorAddress(masm.getStackPointer(), pairsVectorStartOffset);
+
+ RegExpStatics* res = cx->global()->getRegExpStatics(cx);
+ if (!res)
+ return false;
+#ifdef JS_USE_LINK_REGISTER
+ if (mode != RegExpShared::MatchOnly)
+ masm.pushReturnAddress();
+#endif
+ if (mode == RegExpShared::Normal) {
+ // First, fill in a skeletal MatchPairs instance on the stack. This will be
+ // passed to the OOL stub in the caller if we aren't able to execute the
+ // RegExp inline, and that stub needs to be able to determine whether the
+ // execution finished successfully.
+ masm.store32(Imm32(1), pairCountAddress);
+ masm.store32(Imm32(-1), pairsVectorAddress);
+ masm.computeEffectiveAddress(pairsVectorAddress, temp1);
+ masm.storePtr(temp1, pairsPointerAddress);
+ }
+
+ // Check for a linear input string.
+ masm.branchIfRopeOrExternal(input, temp1, failure);
+
+ // Get the RegExpShared for the RegExp.
+ masm.loadPtr(Address(regexp, NativeObject::getFixedSlotOffset(RegExpObject::PRIVATE_SLOT)), temp1);
+ masm.branchPtr(Assembler::Equal, temp1, ImmWord(0), failure);
+
+ // ES6 21.2.2.2 step 2.
+ // See RegExp.cpp ExecuteRegExp for more detail.
+ {
+ Label done;
+
+ masm.branchTest32(Assembler::Zero, Address(temp1, RegExpShared::offsetOfFlags()),
+ Imm32(UnicodeFlag), &done);
+
+ // If input is latin1, there should not be surrogate pair.
+ masm.branchLatin1String(input, &done);
+
+ // Check if |lastIndex > 0 && lastIndex < input->length()|.
+ // lastIndex should already have no sign here.
+ masm.branchTest32(Assembler::Zero, lastIndex, lastIndex, &done);
+ masm.loadStringLength(input, temp2);
+ masm.branch32(Assembler::AboveOrEqual, lastIndex, temp2, &done);
+
+ // Check if input[lastIndex] is trail surrogate.
+ masm.loadStringChars(input, temp2);
+ masm.computeEffectiveAddress(BaseIndex(temp2, lastIndex, TimesTwo), temp3);
+ masm.load16ZeroExtend(Address(temp3, 0), temp3);
+
+ masm.branch32(Assembler::Below, temp3, Imm32(unicode::TrailSurrogateMin), &done);
+ masm.branch32(Assembler::Above, temp3, Imm32(unicode::TrailSurrogateMax), &done);
+
+ // Check if input[lastIndex-1] is lead surrogate.
+ masm.move32(lastIndex, temp3);
+ masm.sub32(Imm32(1), temp3);
+ masm.computeEffectiveAddress(BaseIndex(temp2, temp3, TimesTwo), temp3);
+ masm.load16ZeroExtend(Address(temp3, 0), temp3);
+
+ masm.branch32(Assembler::Below, temp3, Imm32(unicode::LeadSurrogateMin), &done);
+ masm.branch32(Assembler::Above, temp3, Imm32(unicode::LeadSurrogateMax), &done);
+
+ // Move lastIndex to lead surrogate.
+ masm.subPtr(Imm32(1), lastIndex);
+
+ masm.bind(&done);
+ }
+
+ if (mode == RegExpShared::Normal) {
+ // Don't handle RegExps with excessive parens.
+ masm.load32(Address(temp1, RegExpShared::offsetOfParenCount()), temp2);
+ masm.branch32(Assembler::AboveOrEqual, temp2, Imm32(RegExpObject::MaxPairCount), failure);
+
+ // Fill in the paren count in the MatchPairs on the stack.
+ masm.add32(Imm32(1), temp2);
+ masm.store32(temp2, pairCountAddress);
+ }
+
+ // Load the code pointer for the type of input string we have, and compute
+ // the input start/end pointers in the InputOutputData.
+ Register codePointer = temp1;
+ {
+ masm.loadStringChars(input, temp2);
+ masm.storePtr(temp2, inputStartAddress);
+ masm.loadStringLength(input, temp3);
+
+ Label isLatin1, done;
+ masm.branchLatin1String(input, &isLatin1);
+ {
+ masm.lshiftPtr(Imm32(1), temp3);
+ masm.loadPtr(Address(temp1, RegExpShared::offsetOfTwoByteJitCode(mode)),
+ codePointer);
+ }
+ masm.jump(&done);
+ {
+ masm.bind(&isLatin1);
+ masm.loadPtr(Address(temp1, RegExpShared::offsetOfLatin1JitCode(mode)),
+ codePointer);
+ }
+ masm.bind(&done);
+
+ masm.addPtr(temp3, temp2);
+ masm.storePtr(temp2, inputEndAddress);
+ }
+
+ // Check the RegExpShared has been compiled for this type of input.
+ masm.branchPtr(Assembler::Equal, codePointer, ImmWord(0), failure);
+ masm.loadPtr(Address(codePointer, JitCode::offsetOfCode()), codePointer);
+
+ // Finish filling in the InputOutputData instance on the stack.
+ if (mode == RegExpShared::Normal) {
+ masm.computeEffectiveAddress(Address(masm.getStackPointer(), matchPairsStartOffset), temp2);
+ masm.storePtr(temp2, matchesPointerAddress);
+ } else {
+ // Use InputOutputData.endIndex itself for output.
+ masm.computeEffectiveAddress(endIndexAddress, temp2);
+ masm.storePtr(temp2, endIndexAddress);
+ }
+ masm.storePtr(lastIndex, startIndexAddress);
+ masm.store32(Imm32(0), matchResultAddress);
+
+ // Save any volatile inputs.
+ LiveGeneralRegisterSet volatileRegs;
+ if (lastIndex.volatile_())
+ volatileRegs.add(lastIndex);
+ if (input.volatile_())
+ volatileRegs.add(input);
+ if (regexp.volatile_())
+ volatileRegs.add(regexp);
+
+ // Execute the RegExp.
+ masm.computeEffectiveAddress(Address(masm.getStackPointer(), inputOutputDataStartOffset), temp2);
+ masm.PushRegsInMask(volatileRegs);
+ masm.setupUnalignedABICall(temp3);
+ masm.passABIArg(temp2);
+ masm.callWithABI(codePointer);
+ masm.PopRegsInMask(volatileRegs);
+
+ Label success;
+ masm.branch32(Assembler::Equal, matchResultAddress,
+ Imm32(RegExpRunStatus_Success_NotFound), notFound);
+ masm.branch32(Assembler::Equal, matchResultAddress,
+ Imm32(RegExpRunStatus_Error), failure);
+
+ // Lazily update the RegExpStatics.
+ masm.movePtr(ImmPtr(res), temp1);
+
+ Address pendingInputAddress(temp1, RegExpStatics::offsetOfPendingInput());
+ Address matchesInputAddress(temp1, RegExpStatics::offsetOfMatchesInput());
+ Address lazySourceAddress(temp1, RegExpStatics::offsetOfLazySource());
+ Address lazyIndexAddress(temp1, RegExpStatics::offsetOfLazyIndex());
+
+ masm.patchableCallPreBarrier(pendingInputAddress, MIRType::String);
+ masm.patchableCallPreBarrier(matchesInputAddress, MIRType::String);
+ masm.patchableCallPreBarrier(lazySourceAddress, MIRType::String);
+
+ masm.storePtr(input, pendingInputAddress);
+ masm.storePtr(input, matchesInputAddress);
+ masm.storePtr(lastIndex, Address(temp1, RegExpStatics::offsetOfLazyIndex()));
+ masm.store32(Imm32(1), Address(temp1, RegExpStatics::offsetOfPendingLazyEvaluation()));
+
+ masm.loadPtr(Address(regexp, NativeObject::getFixedSlotOffset(RegExpObject::PRIVATE_SLOT)), temp2);
+ masm.loadPtr(Address(temp2, RegExpShared::offsetOfSource()), temp3);
+ masm.storePtr(temp3, lazySourceAddress);
+ masm.load32(Address(temp2, RegExpShared::offsetOfFlags()), temp3);
+ masm.store32(temp3, Address(temp1, RegExpStatics::offsetOfLazyFlags()));
+
+ if (mode == RegExpShared::MatchOnly) {
+ // endIndex is passed via temp3.
+ masm.load32(endIndexAddress, temp3);
+ }
+
+ return true;
+}
+
+static void
+CopyStringChars(MacroAssembler& masm, Register to, Register from, Register len,
+ Register byteOpScratch, size_t fromWidth, size_t toWidth);
+
+class CreateDependentString
+{
+ Register string_;
+ Register temp_;
+ Label* failure_;
+ enum class FallbackKind : uint8_t {
+ InlineString,
+ FatInlineString,
+ NotInlineString,
+ Count
+ };
+ mozilla::EnumeratedArray<FallbackKind, FallbackKind::Count, Label> fallbacks_, joins_;
+
+public:
+ // Generate code that creates DependentString.
+ // Caller should call generateFallback after masm.ret(), to generate
+ // fallback path.
+ void generate(MacroAssembler& masm, const JSAtomState& names,
+ bool latin1, Register string,
+ Register base, Register temp1, Register temp2,
+ BaseIndex startIndexAddress, BaseIndex limitIndexAddress,
+ Label* failure);
+
+ // Generate fallback path for creating DependentString.
+ void generateFallback(MacroAssembler& masm, LiveRegisterSet regsToSave);
+};
+
+void
+CreateDependentString::generate(MacroAssembler& masm, const JSAtomState& names,
+ bool latin1, Register string,
+ Register base, Register temp1, Register temp2,
+ BaseIndex startIndexAddress, BaseIndex limitIndexAddress,
+ Label* failure)
+{
+ string_ = string;
+ temp_ = temp2;
+ failure_ = failure;
+
+ // Compute the string length.
+ masm.load32(startIndexAddress, temp2);
+ masm.load32(limitIndexAddress, temp1);
+ masm.sub32(temp2, temp1);
+
+ Label done, nonEmpty;
+
+ // Zero length matches use the empty string.
+ masm.branchTest32(Assembler::NonZero, temp1, temp1, &nonEmpty);
+ masm.movePtr(ImmGCPtr(names.empty), string);
+ masm.jump(&done);
+
+ masm.bind(&nonEmpty);
+
+ Label notInline;
+
+ int32_t maxInlineLength = latin1
+ ? (int32_t) JSFatInlineString::MAX_LENGTH_LATIN1
+ : (int32_t) JSFatInlineString::MAX_LENGTH_TWO_BYTE;
+ masm.branch32(Assembler::Above, temp1, Imm32(maxInlineLength), &notInline);
+
+ {
+ // Make a thin or fat inline string.
+ Label stringAllocated, fatInline;
+
+ int32_t maxThinInlineLength = latin1
+ ? (int32_t) JSThinInlineString::MAX_LENGTH_LATIN1
+ : (int32_t) JSThinInlineString::MAX_LENGTH_TWO_BYTE;
+ masm.branch32(Assembler::Above, temp1, Imm32(maxThinInlineLength), &fatInline);
+
+ int32_t thinFlags = (latin1 ? JSString::LATIN1_CHARS_BIT : 0) | JSString::INIT_THIN_INLINE_FLAGS;
+ masm.newGCString(string, temp2, &fallbacks_[FallbackKind::InlineString]);
+ masm.bind(&joins_[FallbackKind::InlineString]);
+ masm.store32(Imm32(thinFlags), Address(string, JSString::offsetOfFlags()));
+ masm.jump(&stringAllocated);
+
+ masm.bind(&fatInline);
+
+ int32_t fatFlags = (latin1 ? JSString::LATIN1_CHARS_BIT : 0) | JSString::INIT_FAT_INLINE_FLAGS;
+ masm.newGCFatInlineString(string, temp2, &fallbacks_[FallbackKind::FatInlineString]);
+ masm.bind(&joins_[FallbackKind::FatInlineString]);
+ masm.store32(Imm32(fatFlags), Address(string, JSString::offsetOfFlags()));
+
+ masm.bind(&stringAllocated);
+ masm.store32(temp1, Address(string, JSString::offsetOfLength()));
+
+ masm.push(string);
+ masm.push(base);
+
+ // Adjust the start index address for the above pushes.
+ MOZ_ASSERT(startIndexAddress.base == masm.getStackPointer());
+ BaseIndex newStartIndexAddress = startIndexAddress;
+ newStartIndexAddress.offset += 2 * sizeof(void*);
+
+ // Load chars pointer for the new string.
+ masm.addPtr(ImmWord(JSInlineString::offsetOfInlineStorage()), string);
+
+ // Load the source characters pointer.
+ masm.loadStringChars(base, base);
+ masm.load32(newStartIndexAddress, temp2);
+ if (latin1)
+ masm.addPtr(temp2, base);
+ else
+ masm.computeEffectiveAddress(BaseIndex(base, temp2, TimesTwo), base);
+
+ CopyStringChars(masm, string, base, temp1, temp2, latin1 ? 1 : 2, latin1 ? 1 : 2);
+
+ // Null-terminate.
+ if (latin1)
+ masm.store8(Imm32(0), Address(string, 0));
+ else
+ masm.store16(Imm32(0), Address(string, 0));
+
+ masm.pop(base);
+ masm.pop(string);
+ }
+
+ masm.jump(&done);
+ masm.bind(&notInline);
+
+ {
+ // Make a dependent string.
+ int32_t flags = (latin1 ? JSString::LATIN1_CHARS_BIT : 0) | JSString::DEPENDENT_FLAGS;
+
+ masm.newGCString(string, temp2, &fallbacks_[FallbackKind::NotInlineString]);
+ masm.bind(&joins_[FallbackKind::NotInlineString]);
+ masm.store32(Imm32(flags), Address(string, JSString::offsetOfFlags()));
+ masm.store32(temp1, Address(string, JSString::offsetOfLength()));
+
+ masm.loadPtr(Address(base, JSString::offsetOfNonInlineChars()), temp1);
+ masm.load32(startIndexAddress, temp2);
+ if (latin1)
+ masm.addPtr(temp2, temp1);
+ else
+ masm.computeEffectiveAddress(BaseIndex(temp1, temp2, TimesTwo), temp1);
+ masm.storePtr(temp1, Address(string, JSString::offsetOfNonInlineChars()));
+ masm.storePtr(base, Address(string, JSDependentString::offsetOfBase()));
+
+ // Follow any base pointer if the input is itself a dependent string.
+ // Watch for undepended strings, which have a base pointer but don't
+ // actually share their characters with it.
+ Label noBase;
+ masm.branchTest32(Assembler::Zero, Address(base, JSString::offsetOfFlags()),
+ Imm32(JSString::HAS_BASE_BIT), &noBase);
+ masm.branchTest32(Assembler::NonZero, Address(base, JSString::offsetOfFlags()),
+ Imm32(JSString::FLAT_BIT), &noBase);
+ masm.loadPtr(Address(base, JSDependentString::offsetOfBase()), temp1);
+ masm.storePtr(temp1, Address(string, JSDependentString::offsetOfBase()));
+ masm.bind(&noBase);
+ }
+
+ masm.bind(&done);
+}
+
+static void*
+AllocateString(JSContext* cx)
+{
+ return js::Allocate<JSString, NoGC>(cx);
+}
+
+static void*
+AllocateFatInlineString(JSContext* cx)
+{
+ return js::Allocate<JSFatInlineString, NoGC>(cx);
+}
+
+void
+CreateDependentString::generateFallback(MacroAssembler& masm, LiveRegisterSet regsToSave)
+{
+ regsToSave.take(string_);
+ regsToSave.take(temp_);
+ for (FallbackKind kind : mozilla::MakeEnumeratedRange(FallbackKind::Count)) {
+ masm.bind(&fallbacks_[kind]);
+
+ masm.PushRegsInMask(regsToSave);
+
+ masm.setupUnalignedABICall(string_);
+ masm.loadJSContext(string_);
+ masm.passABIArg(string_);
+ masm.callWithABI(kind == FallbackKind::FatInlineString
+ ? JS_FUNC_TO_DATA_PTR(void*, AllocateFatInlineString)
+ : JS_FUNC_TO_DATA_PTR(void*, AllocateString));
+ masm.storeCallPointerResult(string_);
+
+ masm.PopRegsInMask(regsToSave);
+
+ masm.branchPtr(Assembler::Equal, string_, ImmWord(0), failure_);
+
+ masm.jump(&joins_[kind]);
+ }
+}
+
+static void*
+CreateMatchResultFallbackFunc(JSContext* cx, gc::AllocKind kind, size_t nDynamicSlots)
+{
+ return js::Allocate<JSObject, NoGC>(cx, kind, nDynamicSlots, gc::DefaultHeap,
+ &ArrayObject::class_);
+}
+
+static void
+CreateMatchResultFallback(MacroAssembler& masm, LiveRegisterSet regsToSave,
+ Register object, Register temp2, Register temp5,
+ ArrayObject* templateObj, Label* fail)
+{
+ MOZ_ASSERT(templateObj->group()->clasp() == &ArrayObject::class_);
+
+ regsToSave.take(object);
+ regsToSave.take(temp2);
+ regsToSave.take(temp5);
+ masm.PushRegsInMask(regsToSave);
+
+ masm.setupUnalignedABICall(object);
+
+ masm.loadJSContext(object);
+ masm.passABIArg(object);
+ masm.move32(Imm32(int32_t(templateObj->asTenured().getAllocKind())), temp2);
+ masm.passABIArg(temp2);
+ masm.move32(Imm32(int32_t(templateObj->as<NativeObject>().numDynamicSlots())), temp5);
+ masm.passABIArg(temp5);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, CreateMatchResultFallbackFunc));
+ masm.storeCallPointerResult(object);
+
+ masm.PopRegsInMask(regsToSave);
+
+ masm.branchPtr(Assembler::Equal, object, ImmWord(0), fail);
+
+ masm.initGCThing(object, temp2, templateObj, true, false);
+}
+
+JitCode*
+JitCompartment::generateRegExpMatcherStub(JSContext* cx)
+{
+ Register regexp = RegExpMatcherRegExpReg;
+ Register input = RegExpMatcherStringReg;
+ Register lastIndex = RegExpMatcherLastIndexReg;
+ ValueOperand result = JSReturnOperand;
+
+ // We are free to clobber all registers, as LRegExpMatcher is a call instruction.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(input);
+ regs.take(regexp);
+ regs.take(lastIndex);
+
+ // temp5 is used in single byte instructions when creating dependent
+ // strings, and has restrictions on which register it can be on some
+ // platforms.
+ Register temp5;
+ {
+ AllocatableGeneralRegisterSet oregs = regs;
+ do {
+ temp5 = oregs.takeAny();
+ } while (!MacroAssembler::canUseInSingleByteInstruction(temp5));
+ regs.take(temp5);
+ }
+
+ Register temp1 = regs.takeAny();
+ Register temp2 = regs.takeAny();
+ Register temp3 = regs.takeAny();
+
+ Register maybeTemp4 = InvalidReg;
+ if (!regs.empty()) {
+ // There are not enough registers on x86.
+ maybeTemp4 = regs.takeAny();
+ }
+
+ ArrayObject* templateObject = cx->compartment()->regExps.getOrCreateMatchResultTemplateObject(cx);
+ if (!templateObject)
+ return nullptr;
+
+ // The template object should have enough space for the maximum number of
+ // pairs this stub can handle.
+ MOZ_ASSERT(ObjectElements::VALUES_PER_HEADER + RegExpObject::MaxPairCount ==
+ gc::GetGCKindSlots(templateObject->asTenured().getAllocKind()));
+
+ MacroAssembler masm(cx);
+
+ // The InputOutputData is placed above the return address on the stack.
+ size_t inputOutputDataStartOffset = sizeof(void*);
+
+ Label notFound, oolEntry;
+ if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex,
+ temp1, temp2, temp5, inputOutputDataStartOffset,
+ RegExpShared::Normal, &notFound, &oolEntry))
+ {
+ return nullptr;
+ }
+
+ // Construct the result.
+ Register object = temp1;
+ Label matchResultFallback, matchResultJoin;
+ masm.createGCObject(object, temp2, templateObject, gc::DefaultHeap, &matchResultFallback);
+ masm.bind(&matchResultJoin);
+
+ // Initialize slots of result object.
+ masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
+ masm.storeValue(templateObject->getSlot(0), Address(temp2, 0));
+ masm.storeValue(templateObject->getSlot(1), Address(temp2, sizeof(Value)));
+
+ size_t elementsOffset = NativeObject::offsetOfFixedElements();
+
+#ifdef DEBUG
+ // Assert the initial value of initializedLength and length to make sure
+ // restoration on failure case works.
+ {
+ Label initLengthOK, lengthOK;
+ masm.branch32(Assembler::Equal,
+ Address(object, elementsOffset + ObjectElements::offsetOfInitializedLength()),
+ Imm32(templateObject->getDenseInitializedLength()),
+ &initLengthOK);
+ masm.assumeUnreachable("Initial value of the match object's initializedLength does not match to restoration.");
+ masm.bind(&initLengthOK);
+
+ masm.branch32(Assembler::Equal,
+ Address(object, elementsOffset + ObjectElements::offsetOfLength()),
+ Imm32(templateObject->length()),
+ &lengthOK);
+ masm.assumeUnreachable("Initial value of The match object's length does not match to restoration.");
+ masm.bind(&lengthOK);
+ }
+#endif
+
+ Register matchIndex = temp2;
+ masm.move32(Imm32(0), matchIndex);
+
+ size_t pairsVectorStartOffset = RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
+ Address pairsVectorAddress(masm.getStackPointer(), pairsVectorStartOffset);
+ Address pairCountAddress = RegExpPairCountAddress(masm, inputOutputDataStartOffset);
+
+ BaseIndex stringAddress(object, matchIndex, TimesEight, elementsOffset);
+
+ JS_STATIC_ASSERT(sizeof(MatchPair) == 8);
+ BaseIndex stringIndexAddress(masm.getStackPointer(), matchIndex, TimesEight,
+ pairsVectorStartOffset + offsetof(MatchPair, start));
+ BaseIndex stringLimitAddress(masm.getStackPointer(), matchIndex, TimesEight,
+ pairsVectorStartOffset + offsetof(MatchPair, limit));
+
+ // Loop to construct the match strings. There are two different loops,
+ // depending on whether the input is latin1.
+ CreateDependentString depStr[2];
+ {
+ Label isLatin1, done;
+ masm.branchLatin1String(input, &isLatin1);
+
+ Label* failure = &oolEntry;
+ Register temp4 = (maybeTemp4 == InvalidReg) ? lastIndex : maybeTemp4;
+
+ Label failureRestore;
+ if (maybeTemp4 == InvalidReg) {
+ failure = &failureRestore;
+
+ // Save lastIndex value to temporary space.
+ masm.store32(lastIndex, Address(object, elementsOffset + ObjectElements::offsetOfLength()));
+ }
+
+ for (int isLatin = 0; isLatin <= 1; isLatin++) {
+ if (isLatin)
+ masm.bind(&isLatin1);
+
+ Label matchLoop;
+ masm.bind(&matchLoop);
+
+ Label isUndefined, storeDone;
+ masm.branch32(Assembler::LessThan, stringIndexAddress, Imm32(0), &isUndefined);
+
+ depStr[isLatin].generate(masm, cx->names(), isLatin, temp3, input, temp4, temp5,
+ stringIndexAddress, stringLimitAddress, failure);
+
+ masm.storeValue(JSVAL_TYPE_STRING, temp3, stringAddress);
+
+ masm.jump(&storeDone);
+ masm.bind(&isUndefined);
+
+ masm.storeValue(UndefinedValue(), stringAddress);
+ masm.bind(&storeDone);
+
+ masm.add32(Imm32(1), matchIndex);
+ masm.branch32(Assembler::LessThanOrEqual, pairCountAddress, matchIndex, &done);
+ masm.jump(&matchLoop);
+ }
+
+ if (maybeTemp4 == InvalidReg) {
+ // Restore lastIndex value from temporary space, both for success
+ // and failure cases.
+
+ masm.load32(Address(object, elementsOffset + ObjectElements::offsetOfLength()), lastIndex);
+ masm.jump(&done);
+
+ masm.bind(&failureRestore);
+ masm.load32(Address(object, elementsOffset + ObjectElements::offsetOfLength()), lastIndex);
+
+ // Restore the match object for failure case.
+ masm.store32(Imm32(templateObject->getDenseInitializedLength()),
+ Address(object, elementsOffset + ObjectElements::offsetOfInitializedLength()));
+ masm.store32(Imm32(templateObject->length()),
+ Address(object, elementsOffset + ObjectElements::offsetOfLength()));
+ masm.jump(&oolEntry);
+ }
+
+ masm.bind(&done);
+ }
+
+ // Fill in the rest of the output object.
+ masm.store32(matchIndex, Address(object, elementsOffset + ObjectElements::offsetOfInitializedLength()));
+ masm.store32(matchIndex, Address(object, elementsOffset + ObjectElements::offsetOfLength()));
+
+ masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
+
+ MOZ_ASSERT(templateObject->numFixedSlots() == 0);
+ MOZ_ASSERT(templateObject->lookupPure(cx->names().index)->slot() == 0);
+ MOZ_ASSERT(templateObject->lookupPure(cx->names().input)->slot() == 1);
+
+ masm.load32(pairsVectorAddress, temp3);
+ masm.storeValue(JSVAL_TYPE_INT32, temp3, Address(temp2, 0));
+ masm.storeValue(JSVAL_TYPE_STRING, input, Address(temp2, sizeof(Value)));
+
+ // All done!
+ masm.tagValue(JSVAL_TYPE_OBJECT, object, result);
+ masm.ret();
+
+ masm.bind(&notFound);
+ masm.moveValue(NullValue(), result);
+ masm.ret();
+
+ // Fallback paths for CreateDependentString and createGCObject.
+ // Need to save all registers in use when they were called.
+ LiveRegisterSet regsToSave(RegisterSet::Volatile());
+ regsToSave.addUnchecked(regexp);
+ regsToSave.addUnchecked(input);
+ regsToSave.addUnchecked(lastIndex);
+ regsToSave.addUnchecked(temp1);
+ regsToSave.addUnchecked(temp2);
+ regsToSave.addUnchecked(temp3);
+ if (maybeTemp4 != InvalidReg)
+ regsToSave.addUnchecked(maybeTemp4);
+ regsToSave.addUnchecked(temp5);
+
+ for (int isLatin = 0; isLatin <= 1; isLatin++)
+ depStr[isLatin].generateFallback(masm, regsToSave);
+
+ masm.bind(&matchResultFallback);
+ CreateMatchResultFallback(masm, regsToSave, object, temp2, temp5, templateObject, &oolEntry);
+ masm.jump(&matchResultJoin);
+
+ // Use an undefined value to signal to the caller that the OOL stub needs to be called.
+ masm.bind(&oolEntry);
+ masm.moveValue(UndefinedValue(), result);
+ masm.ret();
+
+ Linker linker(masm);
+ AutoFlushICache afc("RegExpMatcherStub");
+ JitCode* code = linker.newCode<CanGC>(cx, OTHER_CODE);
+ if (!code)
+ return nullptr;
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "RegExpMatcherStub");
+#endif
+
+ if (cx->zone()->needsIncrementalBarrier())
+ code->togglePreBarriers(true, DontReprotect);
+
+ return code;
+}
+
+class OutOfLineRegExpMatcher : public OutOfLineCodeBase<CodeGenerator>
+{
+ LRegExpMatcher* lir_;
+
+ public:
+ explicit OutOfLineRegExpMatcher(LRegExpMatcher* lir)
+ : lir_(lir)
+ { }
+
+ void accept(CodeGenerator* codegen) {
+ codegen->visitOutOfLineRegExpMatcher(this);
+ }
+
+ LRegExpMatcher* lir() const {
+ return lir_;
+ }
+};
+
+typedef bool (*RegExpMatcherRawFn)(JSContext* cx, HandleObject regexp, HandleString input,
+ int32_t lastIndex,
+ MatchPairs* pairs, MutableHandleValue output);
+static const VMFunction RegExpMatcherRawInfo =
+ FunctionInfo<RegExpMatcherRawFn>(RegExpMatcherRaw, "RegExpMatcherRaw");
+
+void
+CodeGenerator::visitOutOfLineRegExpMatcher(OutOfLineRegExpMatcher* ool)
+{
+ LRegExpMatcher* lir = ool->lir();
+ Register lastIndex = ToRegister(lir->lastIndex());
+ Register input = ToRegister(lir->string());
+ Register regexp = ToRegister(lir->regexp());
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(lastIndex);
+ regs.take(input);
+ regs.take(regexp);
+ Register temp = regs.takeAny();
+
+ masm.computeEffectiveAddress(Address(masm.getStackPointer(),
+ sizeof(irregexp::InputOutputData)), temp);
+
+ pushArg(temp);
+ pushArg(lastIndex);
+ pushArg(input);
+ pushArg(regexp);
+
+ // We are not using oolCallVM because we are in a Call, and that live
+ // registers are already saved by the the register allocator.
+ callVM(RegExpMatcherRawInfo, lir);
+
+ masm.jump(ool->rejoin());
+}
+
+void
+CodeGenerator::visitRegExpMatcher(LRegExpMatcher* lir)
+{
+ MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
+ MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
+ MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpMatcherLastIndexReg);
+ MOZ_ASSERT(GetValueOutput(lir) == JSReturnOperand);
+
+#if defined(JS_NUNBOX32)
+ MOZ_ASSERT(RegExpMatcherRegExpReg != JSReturnReg_Type);
+ MOZ_ASSERT(RegExpMatcherRegExpReg != JSReturnReg_Data);
+ MOZ_ASSERT(RegExpMatcherStringReg != JSReturnReg_Type);
+ MOZ_ASSERT(RegExpMatcherStringReg != JSReturnReg_Data);
+ MOZ_ASSERT(RegExpMatcherLastIndexReg != JSReturnReg_Type);
+ MOZ_ASSERT(RegExpMatcherLastIndexReg != JSReturnReg_Data);
+#elif defined(JS_PUNBOX64)
+ MOZ_ASSERT(RegExpMatcherRegExpReg != JSReturnReg);
+ MOZ_ASSERT(RegExpMatcherStringReg != JSReturnReg);
+ MOZ_ASSERT(RegExpMatcherLastIndexReg != JSReturnReg);
+#endif
+
+ masm.reserveStack(RegExpReservedStack);
+
+ OutOfLineRegExpMatcher* ool = new(alloc()) OutOfLineRegExpMatcher(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ JitCode* regExpMatcherStub = gen->compartment->jitCompartment()->regExpMatcherStubNoBarrier();
+ masm.call(regExpMatcherStub);
+ masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
+ masm.bind(ool->rejoin());
+
+ masm.freeStack(RegExpReservedStack);
+}
+
+static const int32_t RegExpSearcherResultNotFound = -1;
+static const int32_t RegExpSearcherResultFailed = -2;
+
+JitCode*
+JitCompartment::generateRegExpSearcherStub(JSContext* cx)
+{
+ Register regexp = RegExpTesterRegExpReg;
+ Register input = RegExpTesterStringReg;
+ Register lastIndex = RegExpTesterLastIndexReg;
+ Register result = ReturnReg;
+
+ // We are free to clobber all registers, as LRegExpSearcher is a call instruction.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(input);
+ regs.take(regexp);
+ regs.take(lastIndex);
+
+ Register temp1 = regs.takeAny();
+ Register temp2 = regs.takeAny();
+ Register temp3 = regs.takeAny();
+
+ MacroAssembler masm(cx);
+
+ // The InputOutputData is placed above the return address on the stack.
+ size_t inputOutputDataStartOffset = sizeof(void*);
+
+ Label notFound, oolEntry;
+ if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex,
+ temp1, temp2, temp3, inputOutputDataStartOffset,
+ RegExpShared::Normal, &notFound, &oolEntry))
+ {
+ return nullptr;
+ }
+
+ size_t pairsVectorStartOffset = RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
+ Address stringIndexAddress(masm.getStackPointer(),
+ pairsVectorStartOffset + offsetof(MatchPair, start));
+ Address stringLimitAddress(masm.getStackPointer(),
+ pairsVectorStartOffset + offsetof(MatchPair, limit));
+
+ masm.load32(stringIndexAddress, result);
+ masm.load32(stringLimitAddress, input);
+ masm.lshiftPtr(Imm32(15), input);
+ masm.or32(input, result);
+ masm.ret();
+
+ masm.bind(&notFound);
+ masm.move32(Imm32(RegExpSearcherResultNotFound), result);
+ masm.ret();
+
+ masm.bind(&oolEntry);
+ masm.move32(Imm32(RegExpSearcherResultFailed), result);
+ masm.ret();
+
+ Linker linker(masm);
+ AutoFlushICache afc("RegExpSearcherStub");
+ JitCode* code = linker.newCode<CanGC>(cx, OTHER_CODE);
+ if (!code)
+ return nullptr;
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "RegExpSearcherStub");
+#endif
+
+ if (cx->zone()->needsIncrementalBarrier())
+ code->togglePreBarriers(true, DontReprotect);
+
+ return code;
+}
+
+class OutOfLineRegExpSearcher : public OutOfLineCodeBase<CodeGenerator>
+{
+ LRegExpSearcher* lir_;
+
+ public:
+ explicit OutOfLineRegExpSearcher(LRegExpSearcher* lir)
+ : lir_(lir)
+ { }
+
+ void accept(CodeGenerator* codegen) {
+ codegen->visitOutOfLineRegExpSearcher(this);
+ }
+
+ LRegExpSearcher* lir() const {
+ return lir_;
+ }
+};
+
+typedef bool (*RegExpSearcherRawFn)(JSContext* cx, HandleObject regexp, HandleString input,
+ int32_t lastIndex,
+ MatchPairs* pairs, int32_t* result);
+static const VMFunction RegExpSearcherRawInfo =
+ FunctionInfo<RegExpSearcherRawFn>(RegExpSearcherRaw, "RegExpSearcherRaw");
+
+void
+CodeGenerator::visitOutOfLineRegExpSearcher(OutOfLineRegExpSearcher* ool)
+{
+ LRegExpSearcher* lir = ool->lir();
+ Register lastIndex = ToRegister(lir->lastIndex());
+ Register input = ToRegister(lir->string());
+ Register regexp = ToRegister(lir->regexp());
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(lastIndex);
+ regs.take(input);
+ regs.take(regexp);
+ Register temp = regs.takeAny();
+
+ masm.computeEffectiveAddress(Address(masm.getStackPointer(),
+ sizeof(irregexp::InputOutputData)), temp);
+
+ pushArg(temp);
+ pushArg(lastIndex);
+ pushArg(input);
+ pushArg(regexp);
+
+ // We are not using oolCallVM because we are in a Call, and that live
+ // registers are already saved by the the register allocator.
+ callVM(RegExpSearcherRawInfo, lir);
+
+ masm.jump(ool->rejoin());
+}
+
+void
+CodeGenerator::visitRegExpSearcher(LRegExpSearcher* lir)
+{
+ MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpTesterRegExpReg);
+ MOZ_ASSERT(ToRegister(lir->string()) == RegExpTesterStringReg);
+ MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpTesterLastIndexReg);
+ MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
+
+ MOZ_ASSERT(RegExpTesterRegExpReg != ReturnReg);
+ MOZ_ASSERT(RegExpTesterStringReg != ReturnReg);
+ MOZ_ASSERT(RegExpTesterLastIndexReg != ReturnReg);
+
+ masm.reserveStack(RegExpReservedStack);
+
+ OutOfLineRegExpSearcher* ool = new(alloc()) OutOfLineRegExpSearcher(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ JitCode* regExpSearcherStub = gen->compartment->jitCompartment()->regExpSearcherStubNoBarrier();
+ masm.call(regExpSearcherStub);
+ masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpSearcherResultFailed), ool->entry());
+ masm.bind(ool->rejoin());
+
+ masm.freeStack(RegExpReservedStack);
+}
+
+static const int32_t RegExpTesterResultNotFound = -1;
+static const int32_t RegExpTesterResultFailed = -2;
+
+JitCode*
+JitCompartment::generateRegExpTesterStub(JSContext* cx)
+{
+ Register regexp = RegExpTesterRegExpReg;
+ Register input = RegExpTesterStringReg;
+ Register lastIndex = RegExpTesterLastIndexReg;
+ Register result = ReturnReg;
+
+ MacroAssembler masm(cx);
+
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+
+ // We are free to clobber all registers, as LRegExpTester is a call instruction.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(input);
+ regs.take(regexp);
+ regs.take(lastIndex);
+
+ Register temp1 = regs.takeAny();
+ Register temp2 = regs.takeAny();
+ Register temp3 = regs.takeAny();
+
+ masm.reserveStack(sizeof(irregexp::InputOutputData));
+
+ Label notFound, oolEntry;
+ if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex,
+ temp1, temp2, temp3, 0,
+ RegExpShared::MatchOnly, &notFound, &oolEntry))
+ {
+ return nullptr;
+ }
+
+ Label done;
+
+ // temp3 contains endIndex.
+ masm.move32(temp3, result);
+ masm.jump(&done);
+
+ masm.bind(&notFound);
+ masm.move32(Imm32(RegExpTesterResultNotFound), result);
+ masm.jump(&done);
+
+ masm.bind(&oolEntry);
+ masm.move32(Imm32(RegExpTesterResultFailed), result);
+
+ masm.bind(&done);
+ masm.freeStack(sizeof(irregexp::InputOutputData));
+ masm.ret();
+
+ Linker linker(masm);
+ AutoFlushICache afc("RegExpTesterStub");
+ JitCode* code = linker.newCode<CanGC>(cx, OTHER_CODE);
+ if (!code)
+ return nullptr;
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "RegExpTesterStub");
+#endif
+
+ if (cx->zone()->needsIncrementalBarrier())
+ code->togglePreBarriers(true, DontReprotect);
+
+ return code;
+}
+
+class OutOfLineRegExpTester : public OutOfLineCodeBase<CodeGenerator>
+{
+ LRegExpTester* lir_;
+
+ public:
+ explicit OutOfLineRegExpTester(LRegExpTester* lir)
+ : lir_(lir)
+ { }
+
+ void accept(CodeGenerator* codegen) {
+ codegen->visitOutOfLineRegExpTester(this);
+ }
+
+ LRegExpTester* lir() const {
+ return lir_;
+ }
+};
+
+typedef bool (*RegExpTesterRawFn)(JSContext* cx, HandleObject regexp, HandleString input,
+ int32_t lastIndex, int32_t* result);
+static const VMFunction RegExpTesterRawInfo =
+ FunctionInfo<RegExpTesterRawFn>(RegExpTesterRaw, "RegExpTesterRaw");
+
+void
+CodeGenerator::visitOutOfLineRegExpTester(OutOfLineRegExpTester* ool)
+{
+ LRegExpTester* lir = ool->lir();
+ Register lastIndex = ToRegister(lir->lastIndex());
+ Register input = ToRegister(lir->string());
+ Register regexp = ToRegister(lir->regexp());
+
+ pushArg(lastIndex);
+ pushArg(input);
+ pushArg(regexp);
+
+ // We are not using oolCallVM because we are in a Call, and that live
+ // registers are already saved by the the register allocator.
+ callVM(RegExpTesterRawInfo, lir);
+
+ masm.jump(ool->rejoin());
+}
+
+void
+CodeGenerator::visitRegExpTester(LRegExpTester* lir)
+{
+ MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpTesterRegExpReg);
+ MOZ_ASSERT(ToRegister(lir->string()) == RegExpTesterStringReg);
+ MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpTesterLastIndexReg);
+ MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
+
+ MOZ_ASSERT(RegExpTesterRegExpReg != ReturnReg);
+ MOZ_ASSERT(RegExpTesterStringReg != ReturnReg);
+ MOZ_ASSERT(RegExpTesterLastIndexReg != ReturnReg);
+
+ OutOfLineRegExpTester* ool = new(alloc()) OutOfLineRegExpTester(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ JitCode* regExpTesterStub = gen->compartment->jitCompartment()->regExpTesterStubNoBarrier();
+ masm.call(regExpTesterStub);
+
+ masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpTesterResultFailed), ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+class OutOfLineRegExpPrototypeOptimizable : public OutOfLineCodeBase<CodeGenerator>
+{
+ LRegExpPrototypeOptimizable* ins_;
+
+ public:
+ explicit OutOfLineRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable* ins)
+ : ins_(ins)
+ { }
+
+ void accept(CodeGenerator* codegen) {
+ codegen->visitOutOfLineRegExpPrototypeOptimizable(this);
+ }
+ LRegExpPrototypeOptimizable* ins() const {
+ return ins_;
+ }
+};
+
+void
+CodeGenerator::visitRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable* ins)
+{
+ Register object = ToRegister(ins->object());
+ Register output = ToRegister(ins->output());
+ Register temp = ToRegister(ins->temp());
+
+ OutOfLineRegExpPrototypeOptimizable* ool = new(alloc()) OutOfLineRegExpPrototypeOptimizable(ins);
+ addOutOfLineCode(ool, ins->mir());
+
+ masm.loadJSContext(temp);
+ masm.loadPtr(Address(temp, JSContext::offsetOfCompartment()), temp);
+ size_t offset = JSCompartment::offsetOfRegExps() +
+ RegExpCompartment::offsetOfOptimizableRegExpPrototypeShape();
+ masm.loadPtr(Address(temp, offset), temp);
+
+ masm.loadPtr(Address(object, ShapedObject::offsetOfShape()), output);
+ masm.branchPtr(Assembler::NotEqual, output, temp, ool->entry());
+ masm.move32(Imm32(0x1), output);
+
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitOutOfLineRegExpPrototypeOptimizable(OutOfLineRegExpPrototypeOptimizable* ool)
+{
+ LRegExpPrototypeOptimizable* ins = ool->ins();
+ Register object = ToRegister(ins->object());
+ Register output = ToRegister(ins->output());
+
+ saveVolatile(output);
+
+ masm.setupUnalignedABICall(output);
+ masm.loadJSContext(output);
+ masm.passABIArg(output);
+ masm.passABIArg(object);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, RegExpPrototypeOptimizableRaw));
+ masm.storeCallBoolResult(output);
+
+ restoreVolatile(output);
+
+ masm.jump(ool->rejoin());
+}
+
+class OutOfLineRegExpInstanceOptimizable : public OutOfLineCodeBase<CodeGenerator>
+{
+ LRegExpInstanceOptimizable* ins_;
+
+ public:
+ explicit OutOfLineRegExpInstanceOptimizable(LRegExpInstanceOptimizable* ins)
+ : ins_(ins)
+ { }
+
+ void accept(CodeGenerator* codegen) {
+ codegen->visitOutOfLineRegExpInstanceOptimizable(this);
+ }
+ LRegExpInstanceOptimizable* ins() const {
+ return ins_;
+ }
+};
+
+void
+CodeGenerator::visitRegExpInstanceOptimizable(LRegExpInstanceOptimizable* ins)
+{
+ Register object = ToRegister(ins->object());
+ Register output = ToRegister(ins->output());
+ Register temp = ToRegister(ins->temp());
+
+ OutOfLineRegExpInstanceOptimizable* ool = new(alloc()) OutOfLineRegExpInstanceOptimizable(ins);
+ addOutOfLineCode(ool, ins->mir());
+
+ masm.loadJSContext(temp);
+ masm.loadPtr(Address(temp, JSContext::offsetOfCompartment()), temp);
+ size_t offset = JSCompartment::offsetOfRegExps() +
+ RegExpCompartment::offsetOfOptimizableRegExpInstanceShape();
+ masm.loadPtr(Address(temp, offset), temp);
+
+ masm.loadPtr(Address(object, ShapedObject::offsetOfShape()), output);
+ masm.branchPtr(Assembler::NotEqual, output, temp, ool->entry());
+ masm.move32(Imm32(0x1), output);
+
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitOutOfLineRegExpInstanceOptimizable(OutOfLineRegExpInstanceOptimizable* ool)
+{
+ LRegExpInstanceOptimizable* ins = ool->ins();
+ Register object = ToRegister(ins->object());
+ Register proto = ToRegister(ins->proto());
+ Register output = ToRegister(ins->output());
+
+ saveVolatile(output);
+
+ masm.setupUnalignedABICall(output);
+ masm.loadJSContext(output);
+ masm.passABIArg(output);
+ masm.passABIArg(object);
+ masm.passABIArg(proto);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, RegExpInstanceOptimizableRaw));
+ masm.storeCallBoolResult(output);
+
+ restoreVolatile(output);
+
+ masm.jump(ool->rejoin());
+}
+
+static void
+FindFirstDollarIndex(MacroAssembler& masm, Register str, Register len, Register chars,
+ Register temp, Register output, bool isLatin1)
+{
+ masm.loadStringChars(str, chars);
+
+ masm.move32(Imm32(0), output);
+
+ Label start, done;
+ masm.bind(&start);
+ if (isLatin1)
+ masm.load8ZeroExtend(BaseIndex(chars, output, TimesOne), temp);
+ else
+ masm.load16ZeroExtend(BaseIndex(chars, output, TimesTwo), temp);
+
+ masm.branch32(Assembler::Equal, temp, Imm32('$'), &done);
+
+ masm.add32(Imm32(1), output);
+ masm.branch32(Assembler::NotEqual, output, len, &start);
+
+ masm.move32(Imm32(-1), output);
+
+ masm.bind(&done);
+}
+
+typedef bool (*GetFirstDollarIndexRawFn)(JSContext*, HandleString, int32_t*);
+static const VMFunction GetFirstDollarIndexRawInfo =
+ FunctionInfo<GetFirstDollarIndexRawFn>(GetFirstDollarIndexRaw, "GetFirstDollarIndexRaw");
+
+void
+CodeGenerator::visitGetFirstDollarIndex(LGetFirstDollarIndex* ins)
+{
+ Register str = ToRegister(ins->str());
+ Register output = ToRegister(ins->output());
+ Register temp0 = ToRegister(ins->temp0());
+ Register temp1 = ToRegister(ins->temp1());
+ Register len = ToRegister(ins->temp2());
+
+ OutOfLineCode* ool = oolCallVM(GetFirstDollarIndexRawInfo, ins, ArgList(str),
+ StoreRegisterTo(output));
+
+ masm.branchIfRope(str, ool->entry());
+ masm.loadStringLength(str, len);
+
+ Label isLatin1, done;
+ masm.branchLatin1String(str, &isLatin1);
+ {
+ FindFirstDollarIndex(masm, str, len, temp0, temp1, output, /* isLatin1 = */ false);
+ }
+ masm.jump(&done);
+ {
+ masm.bind(&isLatin1);
+ FindFirstDollarIndex(masm, str, len, temp0, temp1, output, /* isLatin1 = */ true);
+ }
+ masm.bind(&done);
+ masm.bind(ool->rejoin());
+}
+
+typedef JSString* (*StringReplaceFn)(JSContext*, HandleString, HandleString, HandleString);
+static const VMFunction StringFlatReplaceInfo =
+ FunctionInfo<StringReplaceFn>(js::str_flat_replace_string, "str_flat_replace_string");
+static const VMFunction StringReplaceInfo =
+ FunctionInfo<StringReplaceFn>(StringReplace, "StringReplace");
+
+void
+CodeGenerator::visitStringReplace(LStringReplace* lir)
+{
+ if (lir->replacement()->isConstant())
+ pushArg(ImmGCPtr(lir->replacement()->toConstant()->toString()));
+ else
+ pushArg(ToRegister(lir->replacement()));
+
+ if (lir->pattern()->isConstant())
+ pushArg(ImmGCPtr(lir->pattern()->toConstant()->toString()));
+ else
+ pushArg(ToRegister(lir->pattern()));
+
+ if (lir->string()->isConstant())
+ pushArg(ImmGCPtr(lir->string()->toConstant()->toString()));
+ else
+ pushArg(ToRegister(lir->string()));
+
+ if (lir->mir()->isFlatReplacement())
+ callVM(StringFlatReplaceInfo, lir);
+ else
+ callVM(StringReplaceInfo, lir);
+}
+
+void
+CodeGenerator::emitSharedStub(ICStub::Kind kind, LInstruction* lir)
+{
+ JSScript* script = lir->mirRaw()->block()->info().script();
+ jsbytecode* pc = lir->mirRaw()->toInstruction()->resumePoint()->pc();
+
+#ifdef JS_USE_LINK_REGISTER
+ // Some architectures don't push the return address on the stack but
+ // use the link register. In that case the stack isn't aligned. Push
+ // to make sure we are aligned.
+ masm.Push(Imm32(0));
+#endif
+
+ // Create descriptor signifying end of Ion frame.
+ uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS,
+ JitStubFrameLayout::Size());
+ masm.Push(Imm32(descriptor));
+
+ // Call into the stubcode.
+ CodeOffset patchOffset;
+ IonICEntry entry(script->pcToOffset(pc), ICEntry::Kind_Op, script);
+ EmitCallIC(&patchOffset, masm);
+ entry.setReturnOffset(CodeOffset(masm.currentOffset()));
+
+ SharedStub sharedStub(kind, entry, patchOffset);
+ masm.propagateOOM(sharedStubs_.append(sharedStub));
+
+ // Fix up upon return.
+ uint32_t callOffset = masm.currentOffset();
+#ifdef JS_USE_LINK_REGISTER
+ masm.freeStack(sizeof(intptr_t) * 2);
+#else
+ masm.freeStack(sizeof(intptr_t));
+#endif
+ markSafepointAt(callOffset, lir);
+}
+
+void
+CodeGenerator::visitBinarySharedStub(LBinarySharedStub* lir)
+{
+ JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
+ switch (jsop) {
+ case JSOP_ADD:
+ case JSOP_SUB:
+ case JSOP_MUL:
+ case JSOP_DIV:
+ case JSOP_MOD:
+ case JSOP_POW:
+ emitSharedStub(ICStub::Kind::BinaryArith_Fallback, lir);
+ break;
+ case JSOP_LT:
+ case JSOP_LE:
+ case JSOP_GT:
+ case JSOP_GE:
+ case JSOP_EQ:
+ case JSOP_NE:
+ case JSOP_STRICTEQ:
+ case JSOP_STRICTNE:
+ emitSharedStub(ICStub::Kind::Compare_Fallback, lir);
+ break;
+ default:
+ MOZ_CRASH("Unsupported jsop in shared stubs.");
+ }
+}
+
+void
+CodeGenerator::visitUnarySharedStub(LUnarySharedStub* lir)
+{
+ JSOp jsop = JSOp(*lir->mir()->resumePoint()->pc());
+ switch (jsop) {
+ case JSOP_BITNOT:
+ case JSOP_NEG:
+ emitSharedStub(ICStub::Kind::UnaryArith_Fallback, lir);
+ break;
+ case JSOP_CALLPROP:
+ case JSOP_GETPROP:
+ case JSOP_LENGTH:
+ emitSharedStub(ICStub::Kind::GetProp_Fallback, lir);
+ break;
+ default:
+ MOZ_CRASH("Unsupported jsop in shared stubs.");
+ }
+}
+
+void
+CodeGenerator::visitNullarySharedStub(LNullarySharedStub* lir)
+{
+ jsbytecode* pc = lir->mir()->resumePoint()->pc();
+ JSOp jsop = JSOp(*pc);
+ switch (jsop) {
+ case JSOP_NEWARRAY: {
+ uint32_t length = GET_UINT32(pc);
+ MOZ_ASSERT(length <= INT32_MAX,
+ "the bytecode emitter must fail to compile code that would "
+ "produce JSOP_NEWARRAY with a length exceeding int32_t range");
+
+ // Pass length in R0.
+ masm.move32(Imm32(AssertedCast<int32_t>(length)), R0.scratchReg());
+ emitSharedStub(ICStub::Kind::NewArray_Fallback, lir);
+ break;
+ }
+ case JSOP_NEWOBJECT:
+ emitSharedStub(ICStub::Kind::NewObject_Fallback, lir);
+ break;
+ case JSOP_NEWINIT: {
+ JSProtoKey key = JSProtoKey(GET_UINT8(pc));
+ if (key == JSProto_Array) {
+ masm.move32(Imm32(0), R0.scratchReg());
+ emitSharedStub(ICStub::Kind::NewArray_Fallback, lir);
+ } else {
+ emitSharedStub(ICStub::Kind::NewObject_Fallback, lir);
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH("Unsupported jsop in shared stubs.");
+ }
+}
+
+typedef JSObject* (*LambdaFn)(JSContext*, HandleFunction, HandleObject);
+static const VMFunction LambdaInfo = FunctionInfo<LambdaFn>(js::Lambda, "Lambda");
+
+void
+CodeGenerator::visitLambdaForSingleton(LLambdaForSingleton* lir)
+{
+ pushArg(ToRegister(lir->environmentChain()));
+ pushArg(ImmGCPtr(lir->mir()->info().fun));
+ callVM(LambdaInfo, lir);
+}
+
+void
+CodeGenerator::visitLambda(LLambda* lir)
+{
+ Register envChain = ToRegister(lir->environmentChain());
+ Register output = ToRegister(lir->output());
+ Register tempReg = ToRegister(lir->temp());
+ const LambdaFunctionInfo& info = lir->mir()->info();
+
+ OutOfLineCode* ool = oolCallVM(LambdaInfo, lir, ArgList(ImmGCPtr(info.fun), envChain),
+ StoreRegisterTo(output));
+
+ MOZ_ASSERT(!info.singletonType);
+
+ masm.createGCObject(output, tempReg, info.fun, gc::DefaultHeap, ool->entry());
+
+ emitLambdaInit(output, envChain, info);
+
+ if (info.flags & JSFunction::EXTENDED) {
+ MOZ_ASSERT(info.fun->allowSuperProperty() || info.fun->isSelfHostedBuiltin() ||
+ info.fun->isAsync());
+ static_assert(FunctionExtended::NUM_EXTENDED_SLOTS == 2, "All slots must be initialized");
+ masm.storeValue(UndefinedValue(), Address(output, FunctionExtended::offsetOfExtendedSlot(0)));
+ masm.storeValue(UndefinedValue(), Address(output, FunctionExtended::offsetOfExtendedSlot(1)));
+ }
+
+ masm.bind(ool->rejoin());
+}
+
+class OutOfLineLambdaArrow : public OutOfLineCodeBase<CodeGenerator>
+{
+ public:
+ LLambdaArrow* lir;
+ Label entryNoPop_;
+
+ explicit OutOfLineLambdaArrow(LLambdaArrow* lir)
+ : lir(lir)
+ { }
+
+ void accept(CodeGenerator* codegen) {
+ codegen->visitOutOfLineLambdaArrow(this);
+ }
+
+ Label* entryNoPop() {
+ return &entryNoPop_;
+ }
+};
+
+typedef JSObject* (*LambdaArrowFn)(JSContext*, HandleFunction, HandleObject, HandleValue);
+static const VMFunction LambdaArrowInfo =
+ FunctionInfo<LambdaArrowFn>(js::LambdaArrow, "LambdaArrow");
+
+void
+CodeGenerator::visitOutOfLineLambdaArrow(OutOfLineLambdaArrow* ool)
+{
+ Register envChain = ToRegister(ool->lir->environmentChain());
+ ValueOperand newTarget = ToValue(ool->lir, LLambdaArrow::NewTargetValue);
+ Register output = ToRegister(ool->lir->output());
+ const LambdaFunctionInfo& info = ool->lir->mir()->info();
+
+ // When we get here, we may need to restore part of the newTarget,
+ // which has been conscripted into service as a temp register.
+ masm.pop(newTarget.scratchReg());
+
+ masm.bind(ool->entryNoPop());
+
+ saveLive(ool->lir);
+
+ pushArg(newTarget);
+ pushArg(envChain);
+ pushArg(ImmGCPtr(info.fun));
+
+ callVM(LambdaArrowInfo, ool->lir);
+ StoreRegisterTo(output).generate(this);
+
+ restoreLiveIgnore(ool->lir, StoreRegisterTo(output).clobbered());
+
+ masm.jump(ool->rejoin());
+}
+
+void
+CodeGenerator::visitLambdaArrow(LLambdaArrow* lir)
+{
+ Register envChain = ToRegister(lir->environmentChain());
+ ValueOperand newTarget = ToValue(lir, LLambdaArrow::NewTargetValue);
+ Register output = ToRegister(lir->output());
+ const LambdaFunctionInfo& info = lir->mir()->info();
+
+ OutOfLineLambdaArrow* ool = new (alloc()) OutOfLineLambdaArrow(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ MOZ_ASSERT(!info.useSingletonForClone);
+
+ if (info.singletonType) {
+ // If the function has a singleton type, this instruction will only be
+ // executed once so we don't bother inlining it.
+ masm.jump(ool->entryNoPop());
+ masm.bind(ool->rejoin());
+ return;
+ }
+
+ // There's not enough registers on x86 with the profiler enabled to request
+ // a temp. Instead, spill part of one of the values, being prepared to
+ // restore it if necessary on the out of line path.
+ Register tempReg = newTarget.scratchReg();
+ masm.push(newTarget.scratchReg());
+
+ masm.createGCObject(output, tempReg, info.fun, gc::DefaultHeap, ool->entry());
+
+ masm.pop(newTarget.scratchReg());
+
+ emitLambdaInit(output, envChain, info);
+
+ // Initialize extended slots. Lexical |this| is stored in the first one.
+ MOZ_ASSERT(info.flags & JSFunction::EXTENDED);
+ static_assert(FunctionExtended::NUM_EXTENDED_SLOTS == 2, "All slots must be initialized");
+ static_assert(FunctionExtended::ARROW_NEWTARGET_SLOT == 0,
+ "|new.target| must be stored in first slot");
+ masm.storeValue(newTarget, Address(output, FunctionExtended::offsetOfExtendedSlot(0)));
+ masm.storeValue(UndefinedValue(), Address(output, FunctionExtended::offsetOfExtendedSlot(1)));
+
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::emitLambdaInit(Register output, Register envChain,
+ const LambdaFunctionInfo& info)
+{
+ // Initialize nargs and flags. We do this with a single uint32 to avoid
+ // 16-bit writes.
+ union {
+ struct S {
+ uint16_t nargs;
+ uint16_t flags;
+ } s;
+ uint32_t word;
+ } u;
+ u.s.nargs = info.nargs;
+ u.s.flags = info.flags;
+
+ MOZ_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2);
+ masm.store32(Imm32(u.word), Address(output, JSFunction::offsetOfNargs()));
+ masm.storePtr(ImmGCPtr(info.scriptOrLazyScript),
+ Address(output, JSFunction::offsetOfNativeOrScript()));
+ masm.storePtr(envChain, Address(output, JSFunction::offsetOfEnvironment()));
+ masm.storePtr(ImmGCPtr(info.fun->displayAtom()), Address(output, JSFunction::offsetOfAtom()));
+}
+
+void
+CodeGenerator::visitOsiPoint(LOsiPoint* lir)
+{
+ // Note: markOsiPoint ensures enough space exists between the last
+ // LOsiPoint and this one to patch adjacent call instructions.
+
+ MOZ_ASSERT(masm.framePushed() == frameSize());
+
+ uint32_t osiCallPointOffset = markOsiPoint(lir);
+
+ LSafepoint* safepoint = lir->associatedSafepoint();
+ MOZ_ASSERT(!safepoint->osiCallPointOffset());
+ safepoint->setOsiCallPointOffset(osiCallPointOffset);
+
+#ifdef DEBUG
+ // There should be no movegroups or other instructions between
+ // an instruction and its OsiPoint. This is necessary because
+ // we use the OsiPoint's snapshot from within VM calls.
+ for (LInstructionReverseIterator iter(current->rbegin(lir)); iter != current->rend(); iter++) {
+ if (*iter == lir)
+ continue;
+ MOZ_ASSERT(!iter->isMoveGroup());
+ MOZ_ASSERT(iter->safepoint() == safepoint);
+ break;
+ }
+#endif
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ if (shouldVerifyOsiPointRegs(safepoint))
+ verifyOsiPointRegs(safepoint);
+#endif
+}
+
+void
+CodeGenerator::visitGoto(LGoto* lir)
+{
+ jumpToBlock(lir->target());
+}
+
+// Out-of-line path to execute any move groups between the start of a loop
+// header and its interrupt check, then invoke the interrupt handler.
+class OutOfLineInterruptCheckImplicit : public OutOfLineCodeBase<CodeGenerator>
+{
+ public:
+ LBlock* block;
+ LInterruptCheck* lir;
+
+ OutOfLineInterruptCheckImplicit(LBlock* block, LInterruptCheck* lir)
+ : block(block), lir(lir)
+ { }
+
+ void accept(CodeGenerator* codegen) {
+ codegen->visitOutOfLineInterruptCheckImplicit(this);
+ }
+};
+
+typedef bool (*InterruptCheckFn)(JSContext*);
+static const VMFunction InterruptCheckInfo =
+ FunctionInfo<InterruptCheckFn>(InterruptCheck, "InterruptCheck");
+
+void
+CodeGenerator::visitOutOfLineInterruptCheckImplicit(OutOfLineInterruptCheckImplicit* ool)
+{
+#ifdef CHECK_OSIPOINT_REGISTERS
+ // This is path is entered from the patched back-edge of the loop. This
+ // means that the JitAtivation flags used for checking the validity of the
+ // OSI points are not reseted by the path generated by generateBody, so we
+ // have to reset it here.
+ resetOsiPointRegs(ool->lir->safepoint());
+#endif
+
+ LInstructionIterator iter = ool->block->begin();
+ for (; iter != ool->block->end(); iter++) {
+ if (iter->isMoveGroup()) {
+ // Replay this move group that preceds the interrupt check at the
+ // start of the loop header. Any incoming jumps here will be from
+ // the backedge and will skip over the move group emitted inline.
+ visitMoveGroup(iter->toMoveGroup());
+ } else {
+ break;
+ }
+ }
+ MOZ_ASSERT(*iter == ool->lir);
+
+ saveLive(ool->lir);
+ callVM(InterruptCheckInfo, ool->lir);
+ restoreLive(ool->lir);
+ masm.jump(ool->rejoin());
+}
+
+void
+CodeGenerator::visitTableSwitch(LTableSwitch* ins)
+{
+ MTableSwitch* mir = ins->mir();
+ Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
+ const LAllocation* temp;
+
+ if (mir->getOperand(0)->type() != MIRType::Int32) {
+ temp = ins->tempInt()->output();
+
+ // The input is a double, so try and convert it to an integer.
+ // If it does not fit in an integer, take the default case.
+ masm.convertDoubleToInt32(ToFloatRegister(ins->index()), ToRegister(temp), defaultcase, false);
+ } else {
+ temp = ins->index();
+ }
+
+ emitTableSwitchDispatch(mir, ToRegister(temp), ToRegisterOrInvalid(ins->tempPointer()));
+}
+
+void
+CodeGenerator::visitTableSwitchV(LTableSwitchV* ins)
+{
+ MTableSwitch* mir = ins->mir();
+ Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
+
+ Register index = ToRegister(ins->tempInt());
+ ValueOperand value = ToValue(ins, LTableSwitchV::InputValue);
+ Register tag = masm.extractTag(value, index);
+ masm.branchTestNumber(Assembler::NotEqual, tag, defaultcase);
+
+ Label unboxInt, isInt;
+ masm.branchTestInt32(Assembler::Equal, tag, &unboxInt);
+ {
+ FloatRegister floatIndex = ToFloatRegister(ins->tempFloat());
+ masm.unboxDouble(value, floatIndex);
+ masm.convertDoubleToInt32(floatIndex, index, defaultcase, false);
+ masm.jump(&isInt);
+ }
+
+ masm.bind(&unboxInt);
+ masm.unboxInt32(value, index);
+
+ masm.bind(&isInt);
+
+ emitTableSwitchDispatch(mir, index, ToRegisterOrInvalid(ins->tempPointer()));
+}
+
+typedef JSObject* (*DeepCloneObjectLiteralFn)(JSContext*, HandleObject, NewObjectKind);
+static const VMFunction DeepCloneObjectLiteralInfo =
+ FunctionInfo<DeepCloneObjectLiteralFn>(DeepCloneObjectLiteral, "DeepCloneObjectLiteral");
+
+void
+CodeGenerator::visitCloneLiteral(LCloneLiteral* lir)
+{
+ pushArg(ImmWord(TenuredObject));
+ pushArg(ToRegister(lir->getObjectLiteral()));
+ callVM(DeepCloneObjectLiteralInfo, lir);
+}
+
+void
+CodeGenerator::visitParameter(LParameter* lir)
+{
+}
+
+void
+CodeGenerator::visitCallee(LCallee* lir)
+{
+ Register callee = ToRegister(lir->output());
+ Address ptr(masm.getStackPointer(), frameSize() + JitFrameLayout::offsetOfCalleeToken());
+
+ masm.loadFunctionFromCalleeToken(ptr, callee);
+}
+
+void
+CodeGenerator::visitIsConstructing(LIsConstructing* lir)
+{
+ Register output = ToRegister(lir->output());
+ Address calleeToken(masm.getStackPointer(), frameSize() + JitFrameLayout::offsetOfCalleeToken());
+ masm.loadPtr(calleeToken, output);
+
+ // We must be inside a function.
+ MOZ_ASSERT(current->mir()->info().script()->functionNonDelazifying());
+
+ // The low bit indicates whether this call is constructing, just clear the
+ // other bits.
+ static_assert(CalleeToken_Function == 0x0, "CalleeTokenTag value should match");
+ static_assert(CalleeToken_FunctionConstructing == 0x1, "CalleeTokenTag value should match");
+ masm.andPtr(Imm32(0x1), output);
+}
+
+void
+CodeGenerator::visitStart(LStart* lir)
+{
+}
+
+void
+CodeGenerator::visitReturn(LReturn* lir)
+{
+#if defined(JS_NUNBOX32)
+ DebugOnly<LAllocation*> type = lir->getOperand(TYPE_INDEX);
+ DebugOnly<LAllocation*> payload = lir->getOperand(PAYLOAD_INDEX);
+ MOZ_ASSERT(ToRegister(type) == JSReturnReg_Type);
+ MOZ_ASSERT(ToRegister(payload) == JSReturnReg_Data);
+#elif defined(JS_PUNBOX64)
+ DebugOnly<LAllocation*> result = lir->getOperand(0);
+ MOZ_ASSERT(ToRegister(result) == JSReturnReg);
+#endif
+ // Don't emit a jump to the return label if this is the last block.
+ if (current->mir() != *gen->graph().poBegin())
+ masm.jump(&returnLabel_);
+}
+
+void
+CodeGenerator::visitOsrEntry(LOsrEntry* lir)
+{
+ Register temp = ToRegister(lir->temp());
+
+ // Remember the OSR entry offset into the code buffer.
+ masm.flushBuffer();
+ setOsrEntryOffset(masm.size());
+
+#ifdef JS_TRACE_LOGGING
+ emitTracelogStopEvent(TraceLogger_Baseline);
+ emitTracelogStartEvent(TraceLogger_IonMonkey);
+#endif
+
+ // If profiling, save the current frame pointer to a per-thread global field.
+ if (isProfilerInstrumentationEnabled())
+ masm.profilerEnterFrame(masm.getStackPointer(), temp);
+
+ // Allocate the full frame for this function
+ // Note we have a new entry here. So we reset MacroAssembler::framePushed()
+ // to 0, before reserving the stack.
+ MOZ_ASSERT(masm.framePushed() == frameSize());
+ masm.setFramePushed(0);
+
+ // Ensure that the Ion frames is properly aligned.
+ masm.assertStackAlignment(JitStackAlignment, 0);
+
+ masm.reserveStack(frameSize());
+}
+
+void
+CodeGenerator::visitOsrEnvironmentChain(LOsrEnvironmentChain* lir)
+{
+ const LAllocation* frame = lir->getOperand(0);
+ const LDefinition* object = lir->getDef(0);
+
+ const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfEnvironmentChain();
+
+ masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
+}
+
+void
+CodeGenerator::visitOsrArgumentsObject(LOsrArgumentsObject* lir)
+{
+ const LAllocation* frame = lir->getOperand(0);
+ const LDefinition* object = lir->getDef(0);
+
+ const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfArgsObj();
+
+ masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
+}
+
+void
+CodeGenerator::visitOsrValue(LOsrValue* value)
+{
+ const LAllocation* frame = value->getOperand(0);
+ const ValueOperand out = ToOutValue(value);
+
+ const ptrdiff_t frameOffset = value->mir()->frameOffset();
+
+ masm.loadValue(Address(ToRegister(frame), frameOffset), out);
+}
+
+void
+CodeGenerator::visitOsrReturnValue(LOsrReturnValue* lir)
+{
+ const LAllocation* frame = lir->getOperand(0);
+ const ValueOperand out = ToOutValue(lir);
+
+ Address flags = Address(ToRegister(frame), BaselineFrame::reverseOffsetOfFlags());
+ Address retval = Address(ToRegister(frame), BaselineFrame::reverseOffsetOfReturnValue());
+
+ masm.moveValue(UndefinedValue(), out);
+
+ Label done;
+ masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL), &done);
+ masm.loadValue(retval, out);
+ masm.bind(&done);
+}
+
+void
+CodeGenerator::visitStackArgT(LStackArgT* lir)
+{
+ const LAllocation* arg = lir->getArgument();
+ MIRType argType = lir->type();
+ uint32_t argslot = lir->argslot();
+ MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
+
+ int32_t stack_offset = StackOffsetOfPassedArg(argslot);
+ Address dest(masm.getStackPointer(), stack_offset);
+
+ if (arg->isFloatReg())
+ masm.storeDouble(ToFloatRegister(arg), dest);
+ else if (arg->isRegister())
+ masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest);
+ else
+ masm.storeValue(arg->toConstant()->toJSValue(), dest);
+}
+
+void
+CodeGenerator::visitStackArgV(LStackArgV* lir)
+{
+ ValueOperand val = ToValue(lir, 0);
+ uint32_t argslot = lir->argslot();
+ MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
+
+ int32_t stack_offset = StackOffsetOfPassedArg(argslot);
+
+ masm.storeValue(val, Address(masm.getStackPointer(), stack_offset));
+}
+
+void
+CodeGenerator::visitMoveGroup(LMoveGroup* group)
+{
+ if (!group->numMoves())
+ return;
+
+ MoveResolver& resolver = masm.moveResolver();
+
+ for (size_t i = 0; i < group->numMoves(); i++) {
+ const LMove& move = group->getMove(i);
+
+ LAllocation from = move.from();
+ LAllocation to = move.to();
+ LDefinition::Type type = move.type();
+
+ // No bogus moves.
+ MOZ_ASSERT(from != to);
+ MOZ_ASSERT(!from.isConstant());
+ MoveOp::Type moveType;
+ switch (type) {
+ case LDefinition::OBJECT:
+ case LDefinition::SLOTS:
+#ifdef JS_NUNBOX32
+ case LDefinition::TYPE:
+ case LDefinition::PAYLOAD:
+#else
+ case LDefinition::BOX:
+#endif
+ case LDefinition::GENERAL: moveType = MoveOp::GENERAL; break;
+ case LDefinition::INT32: moveType = MoveOp::INT32; break;
+ case LDefinition::FLOAT32: moveType = MoveOp::FLOAT32; break;
+ case LDefinition::DOUBLE: moveType = MoveOp::DOUBLE; break;
+ case LDefinition::SIMD128INT: moveType = MoveOp::SIMD128INT; break;
+ case LDefinition::SIMD128FLOAT: moveType = MoveOp::SIMD128FLOAT; break;
+ default: MOZ_CRASH("Unexpected move type");
+ }
+
+ masm.propagateOOM(resolver.addMove(toMoveOperand(from), toMoveOperand(to), moveType));
+ }
+
+ masm.propagateOOM(resolver.resolve());
+ if (masm.oom())
+ return;
+
+ MoveEmitter emitter(masm);
+
+#ifdef JS_CODEGEN_X86
+ if (group->maybeScratchRegister().isGeneralReg())
+ emitter.setScratchRegister(group->maybeScratchRegister().toGeneralReg()->reg());
+ else
+ resolver.sortMemoryToMemoryMoves();
+#endif
+
+ emitter.emit(resolver);
+ emitter.finish();
+}
+
+void
+CodeGenerator::visitInteger(LInteger* lir)
+{
+ masm.move32(Imm32(lir->getValue()), ToRegister(lir->output()));
+}
+
+void
+CodeGenerator::visitInteger64(LInteger64* lir)
+{
+ masm.move64(Imm64(lir->getValue()), ToOutRegister64(lir));
+}
+
+void
+CodeGenerator::visitPointer(LPointer* lir)
+{
+ if (lir->kind() == LPointer::GC_THING)
+ masm.movePtr(ImmGCPtr(lir->gcptr()), ToRegister(lir->output()));
+ else
+ masm.movePtr(ImmPtr(lir->ptr()), ToRegister(lir->output()));
+}
+
+void
+CodeGenerator::visitKeepAliveObject(LKeepAliveObject* lir)
+{
+ // No-op.
+}
+
+void
+CodeGenerator::visitSlots(LSlots* lir)
+{
+ Address slots(ToRegister(lir->object()), NativeObject::offsetOfSlots());
+ masm.loadPtr(slots, ToRegister(lir->output()));
+}
+
+void
+CodeGenerator::visitLoadSlotT(LLoadSlotT* lir)
+{
+ Register base = ToRegister(lir->slots());
+ int32_t offset = lir->mir()->slot() * sizeof(js::Value);
+ AnyRegister result = ToAnyRegister(lir->output());
+
+ masm.loadUnboxedValue(Address(base, offset), lir->mir()->type(), result);
+}
+
+void
+CodeGenerator::visitLoadSlotV(LLoadSlotV* lir)
+{
+ ValueOperand dest = ToOutValue(lir);
+ Register base = ToRegister(lir->input());
+ int32_t offset = lir->mir()->slot() * sizeof(js::Value);
+
+ masm.loadValue(Address(base, offset), dest);
+}
+
+void
+CodeGenerator::visitStoreSlotT(LStoreSlotT* lir)
+{
+ Register base = ToRegister(lir->slots());
+ int32_t offset = lir->mir()->slot() * sizeof(js::Value);
+ Address dest(base, offset);
+
+ if (lir->mir()->needsBarrier())
+ emitPreBarrier(dest);
+
+ MIRType valueType = lir->mir()->value()->type();
+
+ if (valueType == MIRType::ObjectOrNull) {
+ masm.storeObjectOrNull(ToRegister(lir->value()), dest);
+ } else {
+ ConstantOrRegister value;
+ if (lir->value()->isConstant())
+ value = ConstantOrRegister(lir->value()->toConstant()->toJSValue());
+ else
+ value = TypedOrValueRegister(valueType, ToAnyRegister(lir->value()));
+ masm.storeUnboxedValue(value, valueType, dest, lir->mir()->slotType());
+ }
+}
+
+void
+CodeGenerator::visitStoreSlotV(LStoreSlotV* lir)
+{
+ Register base = ToRegister(lir->slots());
+ int32_t offset = lir->mir()->slot() * sizeof(Value);
+
+ const ValueOperand value = ToValue(lir, LStoreSlotV::Value);
+
+ if (lir->mir()->needsBarrier())
+ emitPreBarrier(Address(base, offset));
+
+ masm.storeValue(value, Address(base, offset));
+}
+
+static void
+GuardReceiver(MacroAssembler& masm, const ReceiverGuard& guard,
+ Register obj, Register scratch, Label* miss, bool checkNullExpando)
+{
+ if (guard.group) {
+ masm.branchTestObjGroup(Assembler::NotEqual, obj, guard.group, miss);
+
+ Address expandoAddress(obj, UnboxedPlainObject::offsetOfExpando());
+ if (guard.shape) {
+ masm.loadPtr(expandoAddress, scratch);
+ masm.branchPtr(Assembler::Equal, scratch, ImmWord(0), miss);
+ masm.branchTestObjShape(Assembler::NotEqual, scratch, guard.shape, miss);
+ } else if (checkNullExpando) {
+ masm.branchPtr(Assembler::NotEqual, expandoAddress, ImmWord(0), miss);
+ }
+ } else {
+ masm.branchTestObjShape(Assembler::NotEqual, obj, guard.shape, miss);
+ }
+}
+
+void
+CodeGenerator::emitGetPropertyPolymorphic(LInstruction* ins, Register obj, Register scratch,
+ const TypedOrValueRegister& output)
+{
+ MGetPropertyPolymorphic* mir = ins->mirRaw()->toGetPropertyPolymorphic();
+
+ Label done;
+
+ for (size_t i = 0; i < mir->numReceivers(); i++) {
+ ReceiverGuard receiver = mir->receiver(i);
+
+ Label next;
+ masm.comment("GuardReceiver");
+ GuardReceiver(masm, receiver, obj, scratch, &next, /* checkNullExpando = */ false);
+
+ if (receiver.shape) {
+ masm.comment("loadTypedOrValue");
+ // If this is an unboxed expando access, GuardReceiver loaded the
+ // expando object into scratch.
+ Register target = receiver.group ? scratch : obj;
+
+ Shape* shape = mir->shape(i);
+ if (shape->slot() < shape->numFixedSlots()) {
+ // Fixed slot.
+ masm.loadTypedOrValue(Address(target, NativeObject::getFixedSlotOffset(shape->slot())),
+ output);
+ } else {
+ // Dynamic slot.
+ uint32_t offset = (shape->slot() - shape->numFixedSlots()) * sizeof(js::Value);
+ masm.loadPtr(Address(target, NativeObject::offsetOfSlots()), scratch);
+ masm.loadTypedOrValue(Address(scratch, offset), output);
+ }
+ } else {
+ masm.comment("loadUnboxedProperty");
+ const UnboxedLayout::Property* property =
+ receiver.group->unboxedLayout().lookup(mir->name());
+ Address propertyAddr(obj, UnboxedPlainObject::offsetOfData() + property->offset);
+
+ masm.loadUnboxedProperty(propertyAddr, property->type, output);
+ }
+
+ if (i == mir->numReceivers() - 1) {
+ bailoutFrom(&next, ins->snapshot());
+ } else {
+ masm.jump(&done);
+ masm.bind(&next);
+ }
+ }
+
+ masm.bind(&done);
+}
+
+void
+CodeGenerator::visitGetPropertyPolymorphicV(LGetPropertyPolymorphicV* ins)
+{
+ Register obj = ToRegister(ins->obj());
+ ValueOperand output = GetValueOutput(ins);
+ emitGetPropertyPolymorphic(ins, obj, output.scratchReg(), output);
+}
+
+void
+CodeGenerator::visitGetPropertyPolymorphicT(LGetPropertyPolymorphicT* ins)
+{
+ Register obj = ToRegister(ins->obj());
+ TypedOrValueRegister output(ins->mir()->type(), ToAnyRegister(ins->output()));
+ Register temp = (output.type() == MIRType::Double)
+ ? ToRegister(ins->temp())
+ : output.typedReg().gpr();
+ emitGetPropertyPolymorphic(ins, obj, temp, output);
+}
+
+template <typename T>
+static void
+EmitUnboxedPreBarrier(MacroAssembler &masm, T address, JSValueType type)
+{
+ if (type == JSVAL_TYPE_OBJECT)
+ masm.patchableCallPreBarrier(address, MIRType::Object);
+ else if (type == JSVAL_TYPE_STRING)
+ masm.patchableCallPreBarrier(address, MIRType::String);
+ else
+ MOZ_ASSERT(!UnboxedTypeNeedsPreBarrier(type));
+}
+
+void
+CodeGenerator::emitSetPropertyPolymorphic(LInstruction* ins, Register obj, Register scratch,
+ const ConstantOrRegister& value)
+{
+ MSetPropertyPolymorphic* mir = ins->mirRaw()->toSetPropertyPolymorphic();
+
+ Label done;
+ for (size_t i = 0; i < mir->numReceivers(); i++) {
+ ReceiverGuard receiver = mir->receiver(i);
+
+ Label next;
+ GuardReceiver(masm, receiver, obj, scratch, &next, /* checkNullExpando = */ false);
+
+ if (receiver.shape) {
+ // If this is an unboxed expando access, GuardReceiver loaded the
+ // expando object into scratch.
+ Register target = receiver.group ? scratch : obj;
+
+ Shape* shape = mir->shape(i);
+ if (shape->slot() < shape->numFixedSlots()) {
+ // Fixed slot.
+ Address addr(target, NativeObject::getFixedSlotOffset(shape->slot()));
+ if (mir->needsBarrier())
+ emitPreBarrier(addr);
+ masm.storeConstantOrRegister(value, addr);
+ } else {
+ // Dynamic slot.
+ masm.loadPtr(Address(target, NativeObject::offsetOfSlots()), scratch);
+ Address addr(scratch, (shape->slot() - shape->numFixedSlots()) * sizeof(js::Value));
+ if (mir->needsBarrier())
+ emitPreBarrier(addr);
+ masm.storeConstantOrRegister(value, addr);
+ }
+ } else {
+ const UnboxedLayout::Property* property =
+ receiver.group->unboxedLayout().lookup(mir->name());
+ Address propertyAddr(obj, UnboxedPlainObject::offsetOfData() + property->offset);
+
+ EmitUnboxedPreBarrier(masm, propertyAddr, property->type);
+ masm.storeUnboxedProperty(propertyAddr, property->type, value, nullptr);
+ }
+
+ if (i == mir->numReceivers() - 1) {
+ bailoutFrom(&next, ins->snapshot());
+ } else {
+ masm.jump(&done);
+ masm.bind(&next);
+ }
+ }
+
+ masm.bind(&done);
+}
+
+void
+CodeGenerator::visitSetPropertyPolymorphicV(LSetPropertyPolymorphicV* ins)
+{
+ Register obj = ToRegister(ins->obj());
+ Register temp = ToRegister(ins->temp());
+ ValueOperand value = ToValue(ins, LSetPropertyPolymorphicV::Value);
+ emitSetPropertyPolymorphic(ins, obj, temp, TypedOrValueRegister(value));
+}
+
+void
+CodeGenerator::visitSetPropertyPolymorphicT(LSetPropertyPolymorphicT* ins)
+{
+ Register obj = ToRegister(ins->obj());
+ Register temp = ToRegister(ins->temp());
+
+ ConstantOrRegister value;
+ if (ins->mir()->value()->isConstant())
+ value = ConstantOrRegister(ins->mir()->value()->toConstant()->toJSValue());
+ else
+ value = TypedOrValueRegister(ins->mir()->value()->type(), ToAnyRegister(ins->value()));
+
+ emitSetPropertyPolymorphic(ins, obj, temp, value);
+}
+
+void
+CodeGenerator::visitElements(LElements* lir)
+{
+ Address elements(ToRegister(lir->object()),
+ lir->mir()->unboxed() ? UnboxedArrayObject::offsetOfElements()
+ : NativeObject::offsetOfElements());
+ masm.loadPtr(elements, ToRegister(lir->output()));
+}
+
+typedef bool (*ConvertElementsToDoublesFn)(JSContext*, uintptr_t);
+static const VMFunction ConvertElementsToDoublesInfo =
+ FunctionInfo<ConvertElementsToDoublesFn>(ObjectElements::ConvertElementsToDoubles,
+ "ObjectElements::ConvertElementsToDoubles");
+
+void
+CodeGenerator::visitConvertElementsToDoubles(LConvertElementsToDoubles* lir)
+{
+ Register elements = ToRegister(lir->elements());
+
+ OutOfLineCode* ool = oolCallVM(ConvertElementsToDoublesInfo, lir,
+ ArgList(elements), StoreNothing());
+
+ Address convertedAddress(elements, ObjectElements::offsetOfFlags());
+ Imm32 bit(ObjectElements::CONVERT_DOUBLE_ELEMENTS);
+ masm.branchTest32(Assembler::Zero, convertedAddress, bit, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitMaybeToDoubleElement(LMaybeToDoubleElement* lir)
+{
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ ValueOperand out = ToOutValue(lir);
+
+ FloatRegister temp = ToFloatRegister(lir->tempFloat());
+ Label convert, done;
+
+ // If the CONVERT_DOUBLE_ELEMENTS flag is set, convert the int32
+ // value to double. Else, just box it.
+ masm.branchTest32(Assembler::NonZero,
+ Address(elements, ObjectElements::offsetOfFlags()),
+ Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS),
+ &convert);
+
+ masm.tagValue(JSVAL_TYPE_INT32, value, out);
+ masm.jump(&done);
+
+ masm.bind(&convert);
+ masm.convertInt32ToDouble(value, temp);
+ masm.boxDouble(temp, out);
+
+ masm.bind(&done);
+}
+
+typedef bool (*CopyElementsForWriteFn)(ExclusiveContext*, NativeObject*);
+static const VMFunction CopyElementsForWriteInfo =
+ FunctionInfo<CopyElementsForWriteFn>(NativeObject::CopyElementsForWrite,
+ "NativeObject::CopyElementsForWrite");
+
+void
+CodeGenerator::visitMaybeCopyElementsForWrite(LMaybeCopyElementsForWrite* lir)
+{
+ Register object = ToRegister(lir->object());
+ Register temp = ToRegister(lir->temp());
+
+ OutOfLineCode* ool = oolCallVM(CopyElementsForWriteInfo, lir,
+ ArgList(object), StoreNothing());
+
+ if (lir->mir()->checkNative()) {
+ masm.loadObjClass(object, temp);
+ masm.branchTest32(Assembler::NonZero, Address(temp, Class::offsetOfFlags()),
+ Imm32(Class::NON_NATIVE), ool->rejoin());
+ }
+
+ masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
+ masm.branchTest32(Assembler::NonZero,
+ Address(temp, ObjectElements::offsetOfFlags()),
+ Imm32(ObjectElements::COPY_ON_WRITE),
+ ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment* lir)
+{
+ Address environment(ToRegister(lir->function()), JSFunction::offsetOfEnvironment());
+ masm.loadPtr(environment, ToRegister(lir->output()));
+}
+
+void
+CodeGenerator::visitGuardObjectIdentity(LGuardObjectIdentity* guard)
+{
+ Register input = ToRegister(guard->input());
+ Register expected = ToRegister(guard->expected());
+
+ Assembler::Condition cond =
+ guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
+ bailoutCmpPtr(cond, input, expected, guard->snapshot());
+}
+
+void
+CodeGenerator::visitGuardReceiverPolymorphic(LGuardReceiverPolymorphic* lir)
+{
+ const MGuardReceiverPolymorphic* mir = lir->mir();
+ Register obj = ToRegister(lir->object());
+ Register temp = ToRegister(lir->temp());
+
+ Label done;
+
+ for (size_t i = 0; i < mir->numReceivers(); i++) {
+ const ReceiverGuard& receiver = mir->receiver(i);
+
+ Label next;
+ GuardReceiver(masm, receiver, obj, temp, &next, /* checkNullExpando = */ true);
+
+ if (i == mir->numReceivers() - 1) {
+ bailoutFrom(&next, lir->snapshot());
+ } else {
+ masm.jump(&done);
+ masm.bind(&next);
+ }
+ }
+
+ masm.bind(&done);
+}
+
+void
+CodeGenerator::visitGuardUnboxedExpando(LGuardUnboxedExpando* lir)
+{
+ Label miss;
+
+ Register obj = ToRegister(lir->object());
+ masm.branchPtr(lir->mir()->requireExpando() ? Assembler::Equal : Assembler::NotEqual,
+ Address(obj, UnboxedPlainObject::offsetOfExpando()), ImmWord(0), &miss);
+
+ bailoutFrom(&miss, lir->snapshot());
+}
+
+void
+CodeGenerator::visitLoadUnboxedExpando(LLoadUnboxedExpando* lir)
+{
+ Register obj = ToRegister(lir->object());
+ Register result = ToRegister(lir->getDef(0));
+
+ masm.loadPtr(Address(obj, UnboxedPlainObject::offsetOfExpando()), result);
+}
+
+void
+CodeGenerator::visitTypeBarrierV(LTypeBarrierV* lir)
+{
+ ValueOperand operand = ToValue(lir, LTypeBarrierV::Input);
+ Register scratch = ToTempRegisterOrInvalid(lir->temp());
+
+ Label miss;
+ masm.guardTypeSet(operand, lir->mir()->resultTypeSet(), lir->mir()->barrierKind(), scratch, &miss);
+ bailoutFrom(&miss, lir->snapshot());
+}
+
+void
+CodeGenerator::visitTypeBarrierO(LTypeBarrierO* lir)
+{
+ Register obj = ToRegister(lir->object());
+ Register scratch = ToTempRegisterOrInvalid(lir->temp());
+ Label miss, ok;
+
+ if (lir->mir()->type() == MIRType::ObjectOrNull) {
+ masm.comment("Object or Null");
+ Label* nullTarget = lir->mir()->resultTypeSet()->mightBeMIRType(MIRType::Null) ? &ok : &miss;
+ masm.branchTestPtr(Assembler::Zero, obj, obj, nullTarget);
+ } else {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Object);
+ MOZ_ASSERT(lir->mir()->barrierKind() != BarrierKind::TypeTagOnly);
+ }
+
+ if (lir->mir()->barrierKind() != BarrierKind::TypeTagOnly) {
+ masm.comment("Type tag only");
+ masm.guardObjectType(obj, lir->mir()->resultTypeSet(), scratch, &miss);
+ }
+
+ bailoutFrom(&miss, lir->snapshot());
+ masm.bind(&ok);
+}
+
+void
+CodeGenerator::visitMonitorTypes(LMonitorTypes* lir)
+{
+ ValueOperand operand = ToValue(lir, LMonitorTypes::Input);
+ Register scratch = ToTempUnboxRegister(lir->temp());
+
+ Label matched, miss;
+ masm.guardTypeSet(operand, lir->mir()->typeSet(), lir->mir()->barrierKind(), scratch, &miss);
+ bailoutFrom(&miss, lir->snapshot());
+}
+
+// Out-of-line path to update the store buffer.
+class OutOfLineCallPostWriteBarrier : public OutOfLineCodeBase<CodeGenerator>
+{
+ LInstruction* lir_;
+ const LAllocation* object_;
+
+ public:
+ OutOfLineCallPostWriteBarrier(LInstruction* lir, const LAllocation* object)
+ : lir_(lir), object_(object)
+ { }
+
+ void accept(CodeGenerator* codegen) {
+ codegen->visitOutOfLineCallPostWriteBarrier(this);
+ }
+
+ LInstruction* lir() const {
+ return lir_;
+ }
+ const LAllocation* object() const {
+ return object_;
+ }
+};
+
+static void
+EmitStoreBufferCheckForConstant(MacroAssembler& masm, JSObject* object,
+ AllocatableGeneralRegisterSet& regs, Label* exit, Label* callVM)
+{
+ Register temp = regs.takeAny();
+
+ const gc::TenuredCell* cell = &object->asTenured();
+ gc::Arena* arena = cell->arena();
+
+ Register cells = temp;
+ masm.loadPtr(AbsoluteAddress(&arena->bufferedCells), cells);
+
+ size_t index = gc::ArenaCellSet::getCellIndex(cell);
+ size_t word;
+ uint32_t mask;
+ gc::ArenaCellSet::getWordIndexAndMask(index, &word, &mask);
+ size_t offset = gc::ArenaCellSet::offsetOfBits() + word * sizeof(uint32_t);
+
+ masm.branchTest32(Assembler::NonZero, Address(cells, offset), Imm32(mask), exit);
+
+ // Check whether this is the sentinel set and if so call the VM to allocate
+ // one for this arena.
+ masm.branchPtr(Assembler::Equal, Address(cells, gc::ArenaCellSet::offsetOfArena()),
+ ImmPtr(nullptr), callVM);
+
+ // Add the cell to the set.
+ masm.or32(Imm32(mask), Address(cells, offset));
+ masm.jump(exit);
+
+ regs.add(temp);
+}
+
+static void
+EmitPostWriteBarrier(MacroAssembler& masm, Register objreg, JSObject* maybeConstant, bool isGlobal,
+ AllocatableGeneralRegisterSet& regs)
+{
+ MOZ_ASSERT_IF(isGlobal, maybeConstant);
+
+ Label callVM;
+ Label exit;
+
+ // We already have a fast path to check whether a global is in the store
+ // buffer.
+ if (!isGlobal && maybeConstant)
+ EmitStoreBufferCheckForConstant(masm, maybeConstant, regs, &exit, &callVM);
+
+ // Call into the VM to barrier the write.
+ masm.bind(&callVM);
+
+ Register runtimereg = regs.takeAny();
+ masm.mov(ImmPtr(GetJitContext()->runtime), runtimereg);
+
+ void (*fun)(JSRuntime*, JSObject*) = isGlobal ? PostGlobalWriteBarrier : PostWriteBarrier;
+ masm.setupUnalignedABICall(regs.takeAny());
+ masm.passABIArg(runtimereg);
+ masm.passABIArg(objreg);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, fun));
+
+ masm.bind(&exit);
+}
+
+void
+CodeGenerator::emitPostWriteBarrier(const LAllocation* obj)
+{
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
+
+ Register objreg;
+ JSObject* object = nullptr;
+ bool isGlobal = false;
+ if (obj->isConstant()) {
+ object = &obj->toConstant()->toObject();
+ isGlobal = isGlobalObject(object);
+ objreg = regs.takeAny();
+ masm.movePtr(ImmGCPtr(object), objreg);
+ } else {
+ objreg = ToRegister(obj);
+ regs.takeUnchecked(objreg);
+ }
+
+ EmitPostWriteBarrier(masm, objreg, object, isGlobal, regs);
+}
+
+void
+CodeGenerator::emitPostWriteBarrier(Register objreg)
+{
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
+ regs.takeUnchecked(objreg);
+ EmitPostWriteBarrier(masm, objreg, nullptr, false, regs);
+}
+
+void
+CodeGenerator::visitOutOfLineCallPostWriteBarrier(OutOfLineCallPostWriteBarrier* ool)
+{
+ saveLiveVolatile(ool->lir());
+ const LAllocation* obj = ool->object();
+ emitPostWriteBarrier(obj);
+ restoreLiveVolatile(ool->lir());
+
+ masm.jump(ool->rejoin());
+}
+
+void
+CodeGenerator::maybeEmitGlobalBarrierCheck(const LAllocation* maybeGlobal, OutOfLineCode* ool)
+{
+ // Check whether an object is a global that we have already barriered before
+ // calling into the VM.
+
+ if (!maybeGlobal->isConstant())
+ return;
+
+ JSObject* obj = &maybeGlobal->toConstant()->toObject();
+ if (!isGlobalObject(obj))
+ return;
+
+ JSCompartment* comp = obj->compartment();
+ auto addr = AbsoluteAddress(&comp->globalWriteBarriered);
+ masm.branch32(Assembler::NotEqual, addr, Imm32(0), ool->rejoin());
+}
+
+template <class LPostBarrierType>
+void
+CodeGenerator::visitPostWriteBarrierCommonO(LPostBarrierType* lir, OutOfLineCode* ool)
+{
+ addOutOfLineCode(ool, lir->mir());
+
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+
+ if (lir->object()->isConstant()) {
+ // Constant nursery objects cannot appear here, see LIRGenerator::visitPostWriteElementBarrier.
+ MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
+ } else {
+ masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()), temp,
+ ool->rejoin());
+ }
+
+ maybeEmitGlobalBarrierCheck(lir->object(), ool);
+
+ Register valueObj = ToRegister(lir->value());
+ masm.branchTestPtr(Assembler::Zero, valueObj, valueObj, ool->rejoin());
+ masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->value()), temp, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+template <class LPostBarrierType>
+void
+CodeGenerator::visitPostWriteBarrierCommonV(LPostBarrierType* lir, OutOfLineCode* ool)
+{
+ addOutOfLineCode(ool, lir->mir());
+
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+
+ if (lir->object()->isConstant()) {
+ // Constant nursery objects cannot appear here, see LIRGenerator::visitPostWriteElementBarrier.
+ MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
+ } else {
+ masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()), temp,
+ ool->rejoin());
+ }
+
+ maybeEmitGlobalBarrierCheck(lir->object(), ool);
+
+ ValueOperand value = ToValue(lir, LPostBarrierType::Input);
+ masm.branchValueIsNurseryObject(Assembler::Equal, value, temp, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO* lir)
+{
+ auto ool = new(alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
+ visitPostWriteBarrierCommonO(lir, ool);
+}
+
+void
+CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV* lir)
+{
+ auto ool = new(alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
+ visitPostWriteBarrierCommonV(lir, ool);
+}
+
+// Out-of-line path to update the store buffer.
+class OutOfLineCallPostWriteElementBarrier : public OutOfLineCodeBase<CodeGenerator>
+{
+ LInstruction* lir_;
+ const LAllocation* object_;
+ const LAllocation* index_;
+
+ public:
+ OutOfLineCallPostWriteElementBarrier(LInstruction* lir, const LAllocation* object,
+ const LAllocation* index)
+ : lir_(lir),
+ object_(object),
+ index_(index)
+ { }
+
+ void accept(CodeGenerator* codegen) {
+ codegen->visitOutOfLineCallPostWriteElementBarrier(this);
+ }
+
+ LInstruction* lir() const {
+ return lir_;
+ }
+
+ const LAllocation* object() const {
+ return object_;
+ }
+
+ const LAllocation* index() const {
+ return index_;
+ }
+};
+
+void
+CodeGenerator::visitOutOfLineCallPostWriteElementBarrier(OutOfLineCallPostWriteElementBarrier* ool)
+{
+ saveLiveVolatile(ool->lir());
+
+ const LAllocation* obj = ool->object();
+ const LAllocation* index = ool->index();
+
+ Register objreg = obj->isConstant() ? InvalidReg : ToRegister(obj);
+ Register indexreg = ToRegister(index);
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
+ regs.takeUnchecked(indexreg);
+
+ if (obj->isConstant()) {
+ objreg = regs.takeAny();
+ masm.movePtr(ImmGCPtr(&obj->toConstant()->toObject()), objreg);
+ } else {
+ regs.takeUnchecked(objreg);
+ }
+
+ Register runtimereg = regs.takeAny();
+ masm.setupUnalignedABICall(runtimereg);
+ masm.mov(ImmPtr(GetJitContext()->runtime), runtimereg);
+ masm.passABIArg(runtimereg);
+ masm.passABIArg(objreg);
+ masm.passABIArg(indexreg);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, PostWriteElementBarrier));
+
+ restoreLiveVolatile(ool->lir());
+
+ masm.jump(ool->rejoin());
+}
+
+void
+CodeGenerator::visitPostWriteElementBarrierO(LPostWriteElementBarrierO* lir)
+{
+ auto ool = new(alloc()) OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
+ visitPostWriteBarrierCommonO(lir, ool);
+}
+
+void
+CodeGenerator::visitPostWriteElementBarrierV(LPostWriteElementBarrierV* lir)
+{
+ auto ool = new(alloc()) OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
+ visitPostWriteBarrierCommonV(lir, ool);
+}
+
+void
+CodeGenerator::visitCallNative(LCallNative* call)
+{
+ WrappedFunction* target = call->getSingleTarget();
+ MOZ_ASSERT(target);
+ MOZ_ASSERT(target->isNative());
+
+ int callargslot = call->argslot();
+ int unusedStack = StackOffsetOfPassedArg(callargslot);
+
+ // Registers used for callWithABI() argument-passing.
+ const Register argContextReg = ToRegister(call->getArgContextReg());
+ const Register argUintNReg = ToRegister(call->getArgUintNReg());
+ const Register argVpReg = ToRegister(call->getArgVpReg());
+
+ // Misc. temporary registers.
+ const Register tempReg = ToRegister(call->getTempReg());
+
+ DebugOnly<uint32_t> initialStack = masm.framePushed();
+
+ masm.checkStackAlignment();
+
+ // Native functions have the signature:
+ // bool (*)(JSContext*, unsigned, Value* vp)
+ // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
+ // are the function arguments.
+
+ // Allocate space for the outparam, moving the StackPointer to what will be &vp[1].
+ masm.adjustStack(unusedStack);
+
+ // Push a Value containing the callee object: natives are allowed to access their callee before
+ // setitng the return value. The StackPointer is moved to &vp[0].
+ masm.Push(ObjectValue(*target->rawJSFunction()));
+
+ // Preload arguments into registers.
+ masm.loadJSContext(argContextReg);
+ masm.move32(Imm32(call->numActualArgs()), argUintNReg);
+ masm.moveStackPtrTo(argVpReg);
+
+ masm.Push(argUintNReg);
+
+ // Construct native exit frame.
+ uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg);
+ masm.enterFakeExitFrameForNative(call->mir()->isConstructing());
+
+ markSafepointAt(safepointOffset, call);
+
+ emitTracelogStartEvent(TraceLogger_Call);
+
+ // Construct and execute call.
+ masm.setupUnalignedABICall(tempReg);
+ masm.passABIArg(argContextReg);
+ masm.passABIArg(argUintNReg);
+ masm.passABIArg(argVpReg);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, target->native()));
+
+ emitTracelogStopEvent(TraceLogger_Call);
+
+ // Test for failure.
+ masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
+
+ // Load the outparam vp[0] into output register(s).
+ masm.loadValue(Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()), JSReturnOperand);
+
+ // The next instruction is removing the footer of the exit frame, so there
+ // is no need for leaveFakeExitFrame.
+
+ // Move the StackPointer back to its original location, unwinding the native exit frame.
+ masm.adjustStack(NativeExitFrameLayout::Size() - unusedStack);
+ MOZ_ASSERT(masm.framePushed() == initialStack);
+}
+
+static void
+LoadDOMPrivate(MacroAssembler& masm, Register obj, Register priv)
+{
+ // Load the value in DOM_OBJECT_SLOT for a native or proxy DOM object. This
+ // will be in the first slot but may be fixed or non-fixed.
+ MOZ_ASSERT(obj != priv);
+
+ // Check shape->numFixedSlots != 0.
+ masm.loadPtr(Address(obj, ShapedObject::offsetOfShape()), priv);
+
+ Label hasFixedSlots, done;
+ masm.branchTest32(Assembler::NonZero,
+ Address(priv, Shape::offsetOfSlotInfo()),
+ Imm32(Shape::fixedSlotsMask()),
+ &hasFixedSlots);
+
+ masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), priv);
+ masm.loadPrivate(Address(priv, 0), priv);
+
+ masm.jump(&done);
+ masm.bind(&hasFixedSlots);
+
+ masm.loadPrivate(Address(obj, NativeObject::getFixedSlotOffset(0)), priv);
+
+ masm.bind(&done);
+}
+
+void
+CodeGenerator::visitCallDOMNative(LCallDOMNative* call)
+{
+ WrappedFunction* target = call->getSingleTarget();
+ MOZ_ASSERT(target);
+ MOZ_ASSERT(target->isNative());
+ MOZ_ASSERT(target->jitInfo());
+ MOZ_ASSERT(call->mir()->isCallDOMNative());
+
+ int callargslot = call->argslot();
+ int unusedStack = StackOffsetOfPassedArg(callargslot);
+
+ // Registers used for callWithABI() argument-passing.
+ const Register argJSContext = ToRegister(call->getArgJSContext());
+ const Register argObj = ToRegister(call->getArgObj());
+ const Register argPrivate = ToRegister(call->getArgPrivate());
+ const Register argArgs = ToRegister(call->getArgArgs());
+
+ DebugOnly<uint32_t> initialStack = masm.framePushed();
+
+ masm.checkStackAlignment();
+
+ // DOM methods have the signature:
+ // bool (*)(JSContext*, HandleObject, void* private, const JSJitMethodCallArgs& args)
+ // Where args is initialized from an argc and a vp, vp[0] is space for an
+ // outparam and the callee, vp[1] is |this|, and vp[2] onward are the
+ // function arguments. Note that args stores the argv, not the vp, and
+ // argv == vp + 2.
+
+ // Nestle the stack up against the pushed arguments, leaving StackPointer at
+ // &vp[1]
+ masm.adjustStack(unusedStack);
+ // argObj is filled with the extracted object, then returned.
+ Register obj = masm.extractObject(Address(masm.getStackPointer(), 0), argObj);
+ MOZ_ASSERT(obj == argObj);
+
+ // Push a Value containing the callee object: natives are allowed to access their callee before
+ // setitng the return value. After this the StackPointer points to &vp[0].
+ masm.Push(ObjectValue(*target->rawJSFunction()));
+
+ // Now compute the argv value. Since StackPointer is pointing to &vp[0] and
+ // argv is &vp[2] we just need to add 2*sizeof(Value) to the current
+ // StackPointer.
+ JS_STATIC_ASSERT(JSJitMethodCallArgsTraits::offsetOfArgv == 0);
+ JS_STATIC_ASSERT(JSJitMethodCallArgsTraits::offsetOfArgc ==
+ IonDOMMethodExitFrameLayoutTraits::offsetOfArgcFromArgv);
+ masm.computeEffectiveAddress(Address(masm.getStackPointer(), 2 * sizeof(Value)), argArgs);
+
+ LoadDOMPrivate(masm, obj, argPrivate);
+
+ // Push argc from the call instruction into what will become the IonExitFrame
+ masm.Push(Imm32(call->numActualArgs()));
+
+ // Push our argv onto the stack
+ masm.Push(argArgs);
+ // And store our JSJitMethodCallArgs* in argArgs.
+ masm.moveStackPtrTo(argArgs);
+
+ // Push |this| object for passing HandleObject. We push after argc to
+ // maintain the same sp-relative location of the object pointer with other
+ // DOMExitFrames.
+ masm.Push(argObj);
+ masm.moveStackPtrTo(argObj);
+
+ // Construct native exit frame.
+ uint32_t safepointOffset = masm.buildFakeExitFrame(argJSContext);
+ masm.enterFakeExitFrame(IonDOMMethodExitFrameLayoutToken);
+
+ markSafepointAt(safepointOffset, call);
+
+ // Construct and execute call.
+ masm.setupUnalignedABICall(argJSContext);
+
+ masm.loadJSContext(argJSContext);
+
+ masm.passABIArg(argJSContext);
+ masm.passABIArg(argObj);
+ masm.passABIArg(argPrivate);
+ masm.passABIArg(argArgs);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, target->jitInfo()->method));
+
+ if (target->jitInfo()->isInfallible) {
+ masm.loadValue(Address(masm.getStackPointer(), IonDOMMethodExitFrameLayout::offsetOfResult()),
+ JSReturnOperand);
+ } else {
+ // Test for failure.
+ masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
+
+ // Load the outparam vp[0] into output register(s).
+ masm.loadValue(Address(masm.getStackPointer(), IonDOMMethodExitFrameLayout::offsetOfResult()),
+ JSReturnOperand);
+ }
+
+ // The next instruction is removing the footer of the exit frame, so there
+ // is no need for leaveFakeExitFrame.
+
+ // Move the StackPointer back to its original location, unwinding the native exit frame.
+ masm.adjustStack(IonDOMMethodExitFrameLayout::Size() - unusedStack);
+ MOZ_ASSERT(masm.framePushed() == initialStack);
+}
+
+typedef bool (*GetIntrinsicValueFn)(JSContext* cx, HandlePropertyName, MutableHandleValue);
+static const VMFunction GetIntrinsicValueInfo =
+ FunctionInfo<GetIntrinsicValueFn>(GetIntrinsicValue, "GetIntrinsicValue");
+
+void
+CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue* lir)
+{
+ pushArg(ImmGCPtr(lir->mir()->name()));
+ callVM(GetIntrinsicValueInfo, lir);
+}
+
+typedef bool (*InvokeFunctionFn)(JSContext*, HandleObject, bool, uint32_t, Value*, MutableHandleValue);
+static const VMFunction InvokeFunctionInfo =
+ FunctionInfo<InvokeFunctionFn>(InvokeFunction, "InvokeFunction");
+
+void
+CodeGenerator::emitCallInvokeFunction(LInstruction* call, Register calleereg,
+ bool constructing, uint32_t argc, uint32_t unusedStack)
+{
+ // Nestle %esp up to the argument vector.
+ // Each path must account for framePushed_ separately, for callVM to be valid.
+ masm.freeStack(unusedStack);
+
+ pushArg(masm.getStackPointer()); // argv.
+ pushArg(Imm32(argc)); // argc.
+ pushArg(Imm32(constructing)); // constructing.
+ pushArg(calleereg); // JSFunction*.
+
+ callVM(InvokeFunctionInfo, call);
+
+ // Un-nestle %esp from the argument vector. No prefix was pushed.
+ masm.reserveStack(unusedStack);
+}
+
+void
+CodeGenerator::visitCallGeneric(LCallGeneric* call)
+{
+ Register calleereg = ToRegister(call->getFunction());
+ Register objreg = ToRegister(call->getTempObject());
+ Register nargsreg = ToRegister(call->getNargsReg());
+ uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot());
+ Label invoke, thunk, makeCall, end;
+
+ // Known-target case is handled by LCallKnown.
+ MOZ_ASSERT(!call->hasSingleTarget());
+
+ // Generate an ArgumentsRectifier.
+ JitCode* argumentsRectifier = gen->jitRuntime()->getArgumentsRectifier();
+
+ masm.checkStackAlignment();
+
+ // Guard that calleereg is actually a function object.
+ masm.loadObjClass(calleereg, nargsreg);
+ masm.branchPtr(Assembler::NotEqual, nargsreg, ImmPtr(&JSFunction::class_), &invoke);
+
+ // Guard that calleereg is an interpreted function with a JSScript.
+ // If we are constructing, also ensure the callee is a constructor.
+ if (call->mir()->isConstructing()) {
+ masm.branchIfNotInterpretedConstructor(calleereg, nargsreg, &invoke);
+ } else {
+ masm.branchIfFunctionHasNoScript(calleereg, &invoke);
+ masm.branchFunctionKind(Assembler::Equal, JSFunction::ClassConstructor, calleereg, objreg, &invoke);
+ }
+
+ // Knowing that calleereg is a non-native function, load the JSScript.
+ masm.loadPtr(Address(calleereg, JSFunction::offsetOfNativeOrScript()), objreg);
+
+ // Load script jitcode.
+ masm.loadBaselineOrIonRaw(objreg, objreg, &invoke);
+
+ // Nestle the StackPointer up to the argument vector.
+ masm.freeStack(unusedStack);
+
+ // Construct the IonFramePrefix.
+ uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS,
+ JitFrameLayout::Size());
+ masm.Push(Imm32(call->numActualArgs()));
+ masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
+ masm.Push(Imm32(descriptor));
+
+ // Check whether the provided arguments satisfy target argc.
+ // We cannot have lowered to LCallGeneric with a known target. Assert that we didn't
+ // add any undefineds in IonBuilder. NB: MCall::numStackArgs includes |this|.
+ DebugOnly<unsigned> numNonArgsOnStack = 1 + call->isConstructing();
+ MOZ_ASSERT(call->numActualArgs() == call->mir()->numStackArgs() - numNonArgsOnStack);
+ masm.load16ZeroExtend(Address(calleereg, JSFunction::offsetOfNargs()), nargsreg);
+ masm.branch32(Assembler::Above, nargsreg, Imm32(call->numActualArgs()), &thunk);
+ masm.jump(&makeCall);
+
+ // Argument fixed needed. Load the ArgumentsRectifier.
+ masm.bind(&thunk);
+ {
+ MOZ_ASSERT(ArgumentsRectifierReg != objreg);
+ masm.movePtr(ImmGCPtr(argumentsRectifier), objreg); // Necessary for GC marking.
+ masm.loadPtr(Address(objreg, JitCode::offsetOfCode()), objreg);
+ masm.move32(Imm32(call->numActualArgs()), ArgumentsRectifierReg);
+ }
+
+ // Finally call the function in objreg.
+ masm.bind(&makeCall);
+ uint32_t callOffset = masm.callJit(objreg);
+ markSafepointAt(callOffset, call);
+
+ // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass.
+ // The return address has already been removed from the Ion frame.
+ int prefixGarbage = sizeof(JitFrameLayout) - sizeof(void*);
+ masm.adjustStack(prefixGarbage - unusedStack);
+ masm.jump(&end);
+
+ // Handle uncompiled or native functions.
+ masm.bind(&invoke);
+ emitCallInvokeFunction(call, calleereg, call->isConstructing(), call->numActualArgs(),
+ unusedStack);
+
+ masm.bind(&end);
+
+ // If the return value of the constructing function is Primitive,
+ // replace the return value with the Object from CreateThis.
+ if (call->mir()->isConstructing()) {
+ Label notPrimitive;
+ masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand, &notPrimitive);
+ masm.loadValue(Address(masm.getStackPointer(), unusedStack), JSReturnOperand);
+ masm.bind(&notPrimitive);
+ }
+}
+
+typedef bool (*InvokeFunctionShuffleFn)(JSContext*, HandleObject, uint32_t, uint32_t, Value*,
+ MutableHandleValue);
+static const VMFunction InvokeFunctionShuffleInfo =
+ FunctionInfo<InvokeFunctionShuffleFn>(InvokeFunctionShuffleNewTarget,
+ "InvokeFunctionShuffleNewTarget");
+void
+CodeGenerator::emitCallInvokeFunctionShuffleNewTarget(LCallKnown* call, Register calleeReg,
+ uint32_t numFormals, uint32_t unusedStack)
+{
+ masm.freeStack(unusedStack);
+
+ pushArg(masm.getStackPointer());
+ pushArg(Imm32(numFormals));
+ pushArg(Imm32(call->numActualArgs()));
+ pushArg(calleeReg);
+
+ callVM(InvokeFunctionShuffleInfo, call);
+
+ masm.reserveStack(unusedStack);
+}
+
+void
+CodeGenerator::visitCallKnown(LCallKnown* call)
+{
+ Register calleereg = ToRegister(call->getFunction());
+ Register objreg = ToRegister(call->getTempObject());
+ uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot());
+ WrappedFunction* target = call->getSingleTarget();
+ Label end, uncompiled;
+
+ // Native single targets are handled by LCallNative.
+ MOZ_ASSERT(!target->isNative());
+ // Missing arguments must have been explicitly appended by the IonBuilder.
+ DebugOnly<unsigned> numNonArgsOnStack = 1 + call->isConstructing();
+ MOZ_ASSERT(target->nargs() <= call->mir()->numStackArgs() - numNonArgsOnStack);
+
+ MOZ_ASSERT_IF(call->isConstructing(), target->isConstructor());
+
+ masm.checkStackAlignment();
+
+ if (target->isClassConstructor() && !call->isConstructing()) {
+ emitCallInvokeFunction(call, calleereg, call->isConstructing(), call->numActualArgs(), unusedStack);
+ return;
+ }
+
+ MOZ_ASSERT_IF(target->isClassConstructor(), call->isConstructing());
+
+ // The calleereg is known to be a non-native function, but might point to
+ // a LazyScript instead of a JSScript.
+ masm.branchIfFunctionHasNoScript(calleereg, &uncompiled);
+
+ // Knowing that calleereg is a non-native function, load the JSScript.
+ masm.loadPtr(Address(calleereg, JSFunction::offsetOfNativeOrScript()), objreg);
+
+ // Load script jitcode.
+ if (call->mir()->needsArgCheck())
+ masm.loadBaselineOrIonRaw(objreg, objreg, &uncompiled);
+ else
+ masm.loadBaselineOrIonNoArgCheck(objreg, objreg, &uncompiled);
+
+ // Nestle the StackPointer up to the argument vector.
+ masm.freeStack(unusedStack);
+
+ // Construct the IonFramePrefix.
+ uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS,
+ JitFrameLayout::Size());
+ masm.Push(Imm32(call->numActualArgs()));
+ masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
+ masm.Push(Imm32(descriptor));
+
+ // Finally call the function in objreg.
+ uint32_t callOffset = masm.callJit(objreg);
+ markSafepointAt(callOffset, call);
+
+ // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass.
+ // The return address has already been removed from the Ion frame.
+ int prefixGarbage = sizeof(JitFrameLayout) - sizeof(void*);
+ masm.adjustStack(prefixGarbage - unusedStack);
+ masm.jump(&end);
+
+ // Handle uncompiled functions.
+ masm.bind(&uncompiled);
+ if (call->isConstructing() && target->nargs() > call->numActualArgs())
+ emitCallInvokeFunctionShuffleNewTarget(call, calleereg, target->nargs(), unusedStack);
+ else
+ emitCallInvokeFunction(call, calleereg, call->isConstructing(), call->numActualArgs(), unusedStack);
+
+ masm.bind(&end);
+
+ // If the return value of the constructing function is Primitive,
+ // replace the return value with the Object from CreateThis.
+ if (call->mir()->isConstructing()) {
+ Label notPrimitive;
+ masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand, &notPrimitive);
+ masm.loadValue(Address(masm.getStackPointer(), unusedStack), JSReturnOperand);
+ masm.bind(&notPrimitive);
+ }
+}
+
+template<typename T>
+void
+CodeGenerator::emitCallInvokeFunction(T* apply, Register extraStackSize)
+{
+ Register objreg = ToRegister(apply->getTempObject());
+ MOZ_ASSERT(objreg != extraStackSize);
+
+ // Push the space used by the arguments.
+ masm.moveStackPtrTo(objreg);
+ masm.Push(extraStackSize);
+
+ pushArg(objreg); // argv.
+ pushArg(ToRegister(apply->getArgc())); // argc.
+ pushArg(Imm32(false)); // isConstrucing.
+ pushArg(ToRegister(apply->getFunction())); // JSFunction*.
+
+ // This specialization og callVM restore the extraStackSize after the call.
+ callVM(InvokeFunctionInfo, apply, &extraStackSize);
+
+ masm.Pop(extraStackSize);
+}
+
+// Do not bailout after the execution of this function since the stack no longer
+// correspond to what is expected by the snapshots.
+void
+CodeGenerator::emitAllocateSpaceForApply(Register argcreg, Register extraStackSpace, Label* end)
+{
+ // Initialize the loop counter AND Compute the stack usage (if == 0)
+ masm.movePtr(argcreg, extraStackSpace);
+
+ // Align the JitFrameLayout on the JitStackAlignment.
+ if (JitStackValueAlignment > 1) {
+ MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
+ "Stack padding assumes that the frameSize is correct");
+ MOZ_ASSERT(JitStackValueAlignment == 2);
+ Label noPaddingNeeded;
+ // if the number of arguments is odd, then we do not need any padding.
+ masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
+ masm.addPtr(Imm32(1), extraStackSpace);
+ masm.bind(&noPaddingNeeded);
+ }
+
+ // Reserve space for copying the arguments.
+ NativeObject::elementsSizeMustNotOverflow();
+ masm.lshiftPtr(Imm32(ValueShift), extraStackSpace);
+ masm.subFromStackPtr(extraStackSpace);
+
+#ifdef DEBUG
+ // Put a magic value in the space reserved for padding. Note, this code
+ // cannot be merged with the previous test, as not all architectures can
+ // write below their stack pointers.
+ if (JitStackValueAlignment > 1) {
+ MOZ_ASSERT(JitStackValueAlignment == 2);
+ Label noPaddingNeeded;
+ // if the number of arguments is odd, then we do not need any padding.
+ masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
+ BaseValueIndex dstPtr(masm.getStackPointer(), argcreg);
+ masm.storeValue(MagicValue(JS_ARG_POISON), dstPtr);
+ masm.bind(&noPaddingNeeded);
+ }
+#endif
+
+ // Skip the copy of arguments if there are none.
+ masm.branchTestPtr(Assembler::Zero, argcreg, argcreg, end);
+}
+
+// Destroys argvIndex and copyreg.
+void
+CodeGenerator::emitCopyValuesForApply(Register argvSrcBase, Register argvIndex, Register copyreg,
+ size_t argvSrcOffset, size_t argvDstOffset)
+{
+ Label loop;
+ masm.bind(&loop);
+
+ // As argvIndex is off by 1, and we use the decBranchPtr instruction
+ // to loop back, we have to substract the size of the word which are
+ // copied.
+ BaseValueIndex srcPtr(argvSrcBase, argvIndex, argvSrcOffset - sizeof(void*));
+ BaseValueIndex dstPtr(masm.getStackPointer(), argvIndex, argvDstOffset - sizeof(void*));
+ masm.loadPtr(srcPtr, copyreg);
+ masm.storePtr(copyreg, dstPtr);
+
+ // Handle 32 bits architectures.
+ if (sizeof(Value) == 2 * sizeof(void*)) {
+ BaseValueIndex srcPtrLow(argvSrcBase, argvIndex, argvSrcOffset - 2 * sizeof(void*));
+ BaseValueIndex dstPtrLow(masm.getStackPointer(), argvIndex, argvDstOffset - 2 * sizeof(void*));
+ masm.loadPtr(srcPtrLow, copyreg);
+ masm.storePtr(copyreg, dstPtrLow);
+ }
+
+ masm.decBranchPtr(Assembler::NonZero, argvIndex, Imm32(1), &loop);
+}
+
+void
+CodeGenerator::emitPopArguments(Register extraStackSpace)
+{
+ // Pop |this| and Arguments.
+ masm.freeStack(extraStackSpace);
+}
+
+void
+CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply, Register extraStackSpace)
+{
+ // Holds the function nargs. Initially the number of args to the caller.
+ Register argcreg = ToRegister(apply->getArgc());
+ Register copyreg = ToRegister(apply->getTempObject());
+
+ Label end;
+ emitAllocateSpaceForApply(argcreg, extraStackSpace, &end);
+
+ // We are making a copy of the arguments which are above the JitFrameLayout
+ // of the current Ion frame.
+ //
+ // [arg1] [arg0] <- src [this] [JitFrameLayout] [.. frameSize ..] [pad] [arg1] [arg0] <- dst
+
+ // Compute the source and destination offsets into the stack.
+ size_t argvSrcOffset = frameSize() + JitFrameLayout::offsetOfActualArgs();
+ size_t argvDstOffset = 0;
+
+ // Save the extra stack space, and re-use the register as a base.
+ masm.push(extraStackSpace);
+ Register argvSrcBase = extraStackSpace;
+ argvSrcOffset += sizeof(void*);
+ argvDstOffset += sizeof(void*);
+
+ // Save the actual number of register, and re-use the register as an index register.
+ masm.push(argcreg);
+ Register argvIndex = argcreg;
+ argvSrcOffset += sizeof(void*);
+ argvDstOffset += sizeof(void*);
+
+ // srcPtr = (StackPointer + extraStackSpace) + argvSrcOffset
+ // dstPtr = (StackPointer ) + argvDstOffset
+ masm.addStackPtrTo(argvSrcBase);
+
+ // Copy arguments.
+ emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset, argvDstOffset);
+
+ // Restore argcreg and the extra stack space counter.
+ masm.pop(argcreg);
+ masm.pop(extraStackSpace);
+
+ // Join with all arguments copied and the extra stack usage computed.
+ masm.bind(&end);
+
+ // Push |this|.
+ masm.addPtr(Imm32(sizeof(Value)), extraStackSpace);
+ masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex));
+}
+
+void
+CodeGenerator::emitPushArguments(LApplyArrayGeneric* apply, Register extraStackSpace)
+{
+ Label noCopy, epilogue;
+ Register tmpArgc = ToRegister(apply->getTempObject());
+ Register elementsAndArgc = ToRegister(apply->getElements());
+
+ // Invariants guarded in the caller:
+ // - the array is not too long
+ // - the array length equals its initialized length
+
+ // The array length is our argc for the purposes of allocating space.
+ Address length(ToRegister(apply->getElements()), ObjectElements::offsetOfLength());
+ masm.load32(length, tmpArgc);
+
+ // Allocate space for the values.
+ emitAllocateSpaceForApply(tmpArgc, extraStackSpace, &noCopy);
+
+ // Copy the values. This code is skipped entirely if there are
+ // no values.
+ size_t argvDstOffset = 0;
+
+ Register argvSrcBase = elementsAndArgc; // Elements value
+
+ masm.push(extraStackSpace);
+ Register copyreg = extraStackSpace;
+ argvDstOffset += sizeof(void*);
+
+ masm.push(tmpArgc);
+ Register argvIndex = tmpArgc;
+ argvDstOffset += sizeof(void*);
+
+ // Copy
+ emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, 0, argvDstOffset);
+
+ // Restore.
+ masm.pop(elementsAndArgc);
+ masm.pop(extraStackSpace);
+ masm.jump(&epilogue);
+
+ // Clear argc if we skipped the copy step.
+ masm.bind(&noCopy);
+ masm.movePtr(ImmPtr(0), elementsAndArgc);
+
+ // Join with all arguments copied and the extra stack usage computed.
+ // Note, "elements" has become "argc".
+ masm.bind(&epilogue);
+
+ // Push |this|.
+ masm.addPtr(Imm32(sizeof(Value)), extraStackSpace);
+ masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex));
+}
+
+template<typename T>
+void
+CodeGenerator::emitApplyGeneric(T* apply)
+{
+ // Holds the function object.
+ Register calleereg = ToRegister(apply->getFunction());
+
+ // Temporary register for modifying the function object.
+ Register objreg = ToRegister(apply->getTempObject());
+ Register extraStackSpace = ToRegister(apply->getTempStackCounter());
+
+ // Holds the function nargs, computed in the invoker or (for
+ // ApplyArray) in the argument pusher.
+ Register argcreg = ToRegister(apply->getArgc());
+
+ // Unless already known, guard that calleereg is actually a function object.
+ if (!apply->hasSingleTarget()) {
+ masm.loadObjClass(calleereg, objreg);
+
+ ImmPtr ptr = ImmPtr(&JSFunction::class_);
+ bailoutCmpPtr(Assembler::NotEqual, objreg, ptr, apply->snapshot());
+ }
+
+ // Copy the arguments of the current function.
+ //
+ // In the case of ApplyArray, also compute argc: the argc register
+ // and the elements register are the same; argc must not be
+ // referenced before the call to emitPushArguments() and elements
+ // must not be referenced after it returns.
+ //
+ // objreg is dead across this call.
+ //
+ // extraStackSpace is garbage on entry and defined on exit.
+ emitPushArguments(apply, extraStackSpace);
+
+ masm.checkStackAlignment();
+
+ // If the function is native, only emit the call to InvokeFunction.
+ if (apply->hasSingleTarget() && apply->getSingleTarget()->isNative()) {
+ emitCallInvokeFunction(apply, extraStackSpace);
+ emitPopArguments(extraStackSpace);
+ return;
+ }
+
+ Label end, invoke;
+
+ // Guard that calleereg is an interpreted function with a JSScript.
+ masm.branchIfFunctionHasNoScript(calleereg, &invoke);
+
+ // Knowing that calleereg is a non-native function, load the JSScript.
+ masm.loadPtr(Address(calleereg, JSFunction::offsetOfNativeOrScript()), objreg);
+
+ // Load script jitcode.
+ masm.loadBaselineOrIonRaw(objreg, objreg, &invoke);
+
+ // Call with an Ion frame or a rectifier frame.
+ {
+ // Create the frame descriptor.
+ unsigned pushed = masm.framePushed();
+ Register stackSpace = extraStackSpace;
+ masm.addPtr(Imm32(pushed), stackSpace);
+ masm.makeFrameDescriptor(stackSpace, JitFrame_IonJS, JitFrameLayout::Size());
+
+ masm.Push(argcreg);
+ masm.Push(calleereg);
+ masm.Push(stackSpace); // descriptor
+
+ Label underflow, rejoin;
+
+ // Check whether the provided arguments satisfy target argc.
+ if (!apply->hasSingleTarget()) {
+ Register nformals = extraStackSpace;
+ masm.load16ZeroExtend(Address(calleereg, JSFunction::offsetOfNargs()), nformals);
+ masm.branch32(Assembler::Below, argcreg, nformals, &underflow);
+ } else {
+ masm.branch32(Assembler::Below, argcreg, Imm32(apply->getSingleTarget()->nargs()),
+ &underflow);
+ }
+
+ // Skip the construction of the rectifier frame because we have no
+ // underflow.
+ masm.jump(&rejoin);
+
+ // Argument fixup needed. Get ready to call the argumentsRectifier.
+ {
+ masm.bind(&underflow);
+
+ // Hardcode the address of the argumentsRectifier code.
+ JitCode* argumentsRectifier = gen->jitRuntime()->getArgumentsRectifier();
+
+ MOZ_ASSERT(ArgumentsRectifierReg != objreg);
+ masm.movePtr(ImmGCPtr(argumentsRectifier), objreg); // Necessary for GC marking.
+ masm.loadPtr(Address(objreg, JitCode::offsetOfCode()), objreg);
+ masm.movePtr(argcreg, ArgumentsRectifierReg);
+ }
+
+ masm.bind(&rejoin);
+
+ // Finally call the function in objreg, as assigned by one of the paths above.
+ uint32_t callOffset = masm.callJit(objreg);
+ markSafepointAt(callOffset, apply);
+
+ // Recover the number of arguments from the frame descriptor.
+ masm.loadPtr(Address(masm.getStackPointer(), 0), stackSpace);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), stackSpace);
+ masm.subPtr(Imm32(pushed), stackSpace);
+
+ // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass.
+ // The return address has already been removed from the Ion frame.
+ int prefixGarbage = sizeof(JitFrameLayout) - sizeof(void*);
+ masm.adjustStack(prefixGarbage);
+ masm.jump(&end);
+ }
+
+ // Handle uncompiled or native functions.
+ {
+ masm.bind(&invoke);
+ emitCallInvokeFunction(apply, extraStackSpace);
+ }
+
+ // Pop arguments and continue.
+ masm.bind(&end);
+ emitPopArguments(extraStackSpace);
+}
+
+void
+CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric* apply)
+{
+ // Limit the number of parameters we can handle to a number that does not risk
+ // us allocating too much stack, notably on Windows where there is a 4K guard page
+ // that has to be touched to extend the stack. See bug 1351278. The value "3000"
+ // is the size of the guard page minus an arbitrary, but large, safety margin.
+
+ LSnapshot* snapshot = apply->snapshot();
+ Register argcreg = ToRegister(apply->getArgc());
+
+ uint32_t limit = 3000 / sizeof(Value);
+ bailoutCmp32(Assembler::Above, argcreg, Imm32(limit), snapshot);
+
+ emitApplyGeneric(apply);
+}
+
+void
+CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply)
+{
+ LSnapshot* snapshot = apply->snapshot();
+ Register tmp = ToRegister(apply->getTempObject());
+
+ Address length(ToRegister(apply->getElements()), ObjectElements::offsetOfLength());
+ masm.load32(length, tmp);
+
+ // See comment in visitApplyArgsGeneric, above.
+
+ uint32_t limit = 3000 / sizeof(Value);
+ bailoutCmp32(Assembler::Above, tmp, Imm32(limit), snapshot);
+
+ // Ensure that the array does not contain an uninitialized tail.
+
+ Address initializedLength(ToRegister(apply->getElements()),
+ ObjectElements::offsetOfInitializedLength());
+ masm.sub32(initializedLength, tmp);
+ bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
+
+ emitApplyGeneric(apply);
+}
+
+typedef bool (*ArraySpliceDenseFn)(JSContext*, HandleObject, uint32_t, uint32_t);
+static const VMFunction ArraySpliceDenseInfo =
+ FunctionInfo<ArraySpliceDenseFn>(ArraySpliceDense, "ArraySpliceDense");
+
+void
+CodeGenerator::visitArraySplice(LArraySplice* lir)
+{
+ pushArg(ToRegister(lir->getDeleteCount()));
+ pushArg(ToRegister(lir->getStart()));
+ pushArg(ToRegister(lir->getObject()));
+ callVM(ArraySpliceDenseInfo, lir);
+}
+
+void
+CodeGenerator::visitBail(LBail* lir)
+{
+ bailout(lir->snapshot());
+}
+
+void
+CodeGenerator::visitUnreachable(LUnreachable* lir)
+{
+ masm.assumeUnreachable("end-of-block assumed unreachable");
+}
+
+void
+CodeGenerator::visitEncodeSnapshot(LEncodeSnapshot* lir)
+{
+ encode(lir->snapshot());
+}
+
+void
+CodeGenerator::visitGetDynamicName(LGetDynamicName* lir)
+{
+ Register envChain = ToRegister(lir->getEnvironmentChain());
+ Register name = ToRegister(lir->getName());
+ Register temp1 = ToRegister(lir->temp1());
+ Register temp2 = ToRegister(lir->temp2());
+ Register temp3 = ToRegister(lir->temp3());
+
+ masm.loadJSContext(temp3);
+
+ /* Make space for the outparam. */
+ masm.adjustStack(-int32_t(sizeof(Value)));
+ masm.moveStackPtrTo(temp2);
+
+ masm.setupUnalignedABICall(temp1);
+ masm.passABIArg(temp3);
+ masm.passABIArg(envChain);
+ masm.passABIArg(name);
+ masm.passABIArg(temp2);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, GetDynamicName));
+
+ const ValueOperand out = ToOutValue(lir);
+
+ masm.loadValue(Address(masm.getStackPointer(), 0), out);
+ masm.adjustStack(sizeof(Value));
+
+ Label undefined;
+ masm.branchTestUndefined(Assembler::Equal, out, &undefined);
+ bailoutFrom(&undefined, lir->snapshot());
+}
+
+typedef bool (*DirectEvalSFn)(JSContext*, HandleObject, HandleScript, HandleValue,
+ HandleString, jsbytecode*, MutableHandleValue);
+static const VMFunction DirectEvalStringInfo =
+ FunctionInfo<DirectEvalSFn>(DirectEvalStringFromIon, "DirectEvalStringFromIon");
+
+void
+CodeGenerator::visitCallDirectEval(LCallDirectEval* lir)
+{
+ Register envChain = ToRegister(lir->getEnvironmentChain());
+ Register string = ToRegister(lir->getString());
+
+ pushArg(ImmPtr(lir->mir()->pc()));
+ pushArg(string);
+ pushArg(ToValue(lir, LCallDirectEval::NewTarget));
+ pushArg(ImmGCPtr(current->mir()->info().script()));
+ pushArg(envChain);
+
+ callVM(DirectEvalStringInfo, lir);
+}
+
+void
+CodeGenerator::generateArgumentsChecks(bool bailout)
+{
+ // Registers safe for use before generatePrologue().
+ static const uint32_t EntryTempMask = Registers::TempMask & ~(1 << OsrFrameReg.code());
+
+ // This function can be used the normal way to check the argument types,
+ // before entering the function and bailout when arguments don't match.
+ // For debug purpose, this is can also be used to force/check that the
+ // arguments are correct. Upon fail it will hit a breakpoint.
+
+ MIRGraph& mir = gen->graph();
+ MResumePoint* rp = mir.entryResumePoint();
+
+ // No registers are allocated yet, so it's safe to grab anything.
+ Register temp = GeneralRegisterSet(EntryTempMask).getAny();
+
+ const CompileInfo& info = gen->info();
+
+ Label miss;
+ for (uint32_t i = info.startArgSlot(); i < info.endArgSlot(); i++) {
+ // All initial parameters are guaranteed to be MParameters.
+ MParameter* param = rp->getOperand(i)->toParameter();
+ const TypeSet* types = param->resultTypeSet();
+ if (!types || types->unknown())
+ continue;
+
+ // Calculate the offset on the stack of the argument.
+ // (i - info.startArgSlot()) - Compute index of arg within arg vector.
+ // ... * sizeof(Value) - Scale by value size.
+ // ArgToStackOffset(...) - Compute displacement within arg vector.
+ int32_t offset = ArgToStackOffset((i - info.startArgSlot()) * sizeof(Value));
+ masm.guardTypeSet(Address(masm.getStackPointer(), offset), types, BarrierKind::TypeSet, temp, &miss);
+ }
+
+ if (miss.used()) {
+ if (bailout) {
+ bailoutFrom(&miss, graph.entrySnapshot());
+ } else {
+ Label success;
+ masm.jump(&success);
+ masm.bind(&miss);
+
+ // Check for cases where the type set guard might have missed due to
+ // changing object groups.
+ for (uint32_t i = info.startArgSlot(); i < info.endArgSlot(); i++) {
+ MParameter* param = rp->getOperand(i)->toParameter();
+ const TemporaryTypeSet* types = param->resultTypeSet();
+ if (!types || types->unknown())
+ continue;
+
+ Label skip;
+ Address addr(masm.getStackPointer(), ArgToStackOffset((i - info.startArgSlot()) * sizeof(Value)));
+ masm.branchTestObject(Assembler::NotEqual, addr, &skip);
+ Register obj = masm.extractObject(addr, temp);
+ masm.guardTypeSetMightBeIncomplete(types, obj, temp, &success);
+ masm.bind(&skip);
+ }
+
+ masm.assumeUnreachable("Argument check fail.");
+ masm.bind(&success);
+ }
+ }
+}
+
+// Out-of-line path to report over-recursed error and fail.
+class CheckOverRecursedFailure : public OutOfLineCodeBase<CodeGenerator>
+{
+ LInstruction* lir_;
+
+ public:
+ explicit CheckOverRecursedFailure(LInstruction* lir)
+ : lir_(lir)
+ { }
+
+ void accept(CodeGenerator* codegen) {
+ codegen->visitCheckOverRecursedFailure(this);
+ }
+
+ LInstruction* lir() const {
+ return lir_;
+ }
+};
+
+void
+CodeGenerator::visitCheckOverRecursed(LCheckOverRecursed* lir)
+{
+ // If we don't push anything on the stack, skip the check.
+ if (omitOverRecursedCheck())
+ return;
+
+ // Ensure that this frame will not cross the stack limit.
+ // This is a weak check, justified by Ion using the C stack: we must always
+ // be some distance away from the actual limit, since if the limit is
+ // crossed, an error must be thrown, which requires more frames.
+ //
+ // It must always be possible to trespass past the stack limit.
+ // Ion may legally place frames very close to the limit. Calling additional
+ // C functions may then violate the limit without any checking.
+
+ // Since Ion frames exist on the C stack, the stack limit may be
+ // dynamically set by JS_SetThreadStackLimit() and JS_SetNativeStackQuota().
+ const void* limitAddr = GetJitContext()->runtime->addressOfJitStackLimit();
+
+ CheckOverRecursedFailure* ool = new(alloc()) CheckOverRecursedFailure(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ // Conditional forward (unlikely) branch to failure.
+ masm.branchStackPtrRhs(Assembler::AboveOrEqual, AbsoluteAddress(limitAddr), ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+typedef bool (*DefVarFn)(JSContext*, HandlePropertyName, unsigned, HandleObject);
+static const VMFunction DefVarInfo = FunctionInfo<DefVarFn>(DefVar, "DefVar");
+
+void
+CodeGenerator::visitDefVar(LDefVar* lir)
+{
+ Register envChain = ToRegister(lir->environmentChain());
+
+ pushArg(envChain); // JSObject*
+ pushArg(Imm32(lir->mir()->attrs())); // unsigned
+ pushArg(ImmGCPtr(lir->mir()->name())); // PropertyName*
+
+ callVM(DefVarInfo, lir);
+}
+
+typedef bool (*DefLexicalFn)(JSContext*, HandlePropertyName, unsigned);
+static const VMFunction DefLexicalInfo =
+ FunctionInfo<DefLexicalFn>(DefGlobalLexical, "DefGlobalLexical");
+
+void
+CodeGenerator::visitDefLexical(LDefLexical* lir)
+{
+ pushArg(Imm32(lir->mir()->attrs())); // unsigned
+ pushArg(ImmGCPtr(lir->mir()->name())); // PropertyName*
+
+ callVM(DefLexicalInfo, lir);
+}
+
+typedef bool (*DefFunOperationFn)(JSContext*, HandleScript, HandleObject, HandleFunction);
+static const VMFunction DefFunOperationInfo =
+ FunctionInfo<DefFunOperationFn>(DefFunOperation, "DefFunOperation");
+
+void
+CodeGenerator::visitDefFun(LDefFun* lir)
+{
+ Register envChain = ToRegister(lir->environmentChain());
+
+ Register fun = ToRegister(lir->fun());
+ pushArg(fun);
+ pushArg(envChain);
+ pushArg(ImmGCPtr(current->mir()->info().script()));
+
+ callVM(DefFunOperationInfo, lir);
+}
+
+typedef bool (*CheckOverRecursedFn)(JSContext*);
+static const VMFunction CheckOverRecursedInfo =
+ FunctionInfo<CheckOverRecursedFn>(CheckOverRecursed, "CheckOverRecursed");
+
+void
+CodeGenerator::visitCheckOverRecursedFailure(CheckOverRecursedFailure* ool)
+{
+ // The OOL path is hit if the recursion depth has been exceeded.
+ // Throw an InternalError for over-recursion.
+
+ // LFunctionEnvironment can appear before LCheckOverRecursed, so we have
+ // to save all live registers to avoid crashes if CheckOverRecursed triggers
+ // a GC.
+ saveLive(ool->lir());
+
+ callVM(CheckOverRecursedInfo, ool->lir());
+
+ restoreLive(ool->lir());
+ masm.jump(ool->rejoin());
+}
+
+IonScriptCounts*
+CodeGenerator::maybeCreateScriptCounts()
+{
+ // If scripts are being profiled, create a new IonScriptCounts for the
+ // profiling data, which will be attached to the associated JSScript or
+ // wasm module after code generation finishes.
+ if (!GetJitContext()->hasProfilingScripts())
+ return nullptr;
+
+ // This test inhibits IonScriptCount creation for wasm code which is
+ // currently incompatible with wasm codegen for two reasons: (1) wasm code
+ // must be serializable and script count codegen bakes in absolute
+ // addresses, (2) wasm code does not have a JSScript with which to associate
+ // code coverage data.
+ JSScript* script = gen->info().script();
+ if (!script)
+ return nullptr;
+
+ UniquePtr<IonScriptCounts> counts(js_new<IonScriptCounts>());
+ if (!counts || !counts->init(graph.numBlocks()))
+ return nullptr;
+
+ for (size_t i = 0; i < graph.numBlocks(); i++) {
+ MBasicBlock* block = graph.getBlock(i)->mir();
+
+ uint32_t offset = 0;
+ char* description = nullptr;
+ if (MResumePoint* resume = block->entryResumePoint()) {
+ // Find a PC offset in the outermost script to use. If this
+ // block is from an inlined script, find a location in the
+ // outer script to associate information about the inlining
+ // with.
+ while (resume->caller())
+ resume = resume->caller();
+ offset = script->pcToOffset(resume->pc());
+
+ if (block->entryResumePoint()->caller()) {
+ // Get the filename and line number of the inner script.
+ JSScript* innerScript = block->info().script();
+ description = (char*) js_calloc(200);
+ if (description) {
+ snprintf(description, 200, "%s:%" PRIuSIZE,
+ innerScript->filename(), innerScript->lineno());
+ }
+ }
+ }
+
+ if (!counts->block(i).init(block->id(), offset, description, block->numSuccessors()))
+ return nullptr;
+
+ for (size_t j = 0; j < block->numSuccessors(); j++)
+ counts->block(i).setSuccessor(j, skipTrivialBlocks(block->getSuccessor(j))->id());
+ }
+
+ scriptCounts_ = counts.release();
+ return scriptCounts_;
+}
+
+// Structure for managing the state tracked for a block by script counters.
+struct ScriptCountBlockState
+{
+ IonBlockCounts& block;
+ MacroAssembler& masm;
+
+ Sprinter printer;
+
+ public:
+ ScriptCountBlockState(IonBlockCounts* block, MacroAssembler* masm)
+ : block(*block), masm(*masm), printer(GetJitContext()->cx, false)
+ {
+ }
+
+ bool init()
+ {
+ if (!printer.init())
+ return false;
+
+ // Bump the hit count for the block at the start. This code is not
+ // included in either the text for the block or the instruction byte
+ // counts.
+ masm.inc64(AbsoluteAddress(block.addressOfHitCount()));
+
+ // Collect human readable assembly for the code generated in the block.
+ masm.setPrinter(&printer);
+
+ return true;
+ }
+
+ void visitInstruction(LInstruction* ins)
+ {
+ // Prefix stream of assembly instructions with their LIR instruction
+ // name and any associated high level info.
+ if (const char* extra = ins->extraName())
+ printer.printf("[%s:%s]\n", ins->opName(), extra);
+ else
+ printer.printf("[%s]\n", ins->opName());
+ }
+
+ ~ScriptCountBlockState()
+ {
+ masm.setPrinter(nullptr);
+
+ if (!printer.hadOutOfMemory())
+ block.setCode(printer.string());
+ }
+};
+
+void
+CodeGenerator::branchIfInvalidated(Register temp, Label* invalidated)
+{
+ CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), temp);
+ masm.propagateOOM(ionScriptLabels_.append(label));
+
+ // If IonScript::invalidationCount_ != 0, the script has been invalidated.
+ masm.branch32(Assembler::NotEqual,
+ Address(temp, IonScript::offsetOfInvalidationCount()),
+ Imm32(0),
+ invalidated);
+}
+
+void
+CodeGenerator::emitAssertObjectOrStringResult(Register input, MIRType type, const TemporaryTypeSet* typeset)
+{
+ MOZ_ASSERT(type == MIRType::Object || type == MIRType::ObjectOrNull ||
+ type == MIRType::String || type == MIRType::Symbol);
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(input);
+
+ Register temp = regs.takeAny();
+ masm.push(temp);
+
+ // Don't check if the script has been invalidated. In that case invalid
+ // types are expected (until we reach the OsiPoint and bailout).
+ Label done;
+ branchIfInvalidated(temp, &done);
+
+ if ((type == MIRType::Object || type == MIRType::ObjectOrNull) &&
+ typeset && !typeset->unknownObject())
+ {
+ // We have a result TypeSet, assert this object is in it.
+ Label miss, ok;
+ if (type == MIRType::ObjectOrNull)
+ masm.branchPtr(Assembler::Equal, input, ImmWord(0), &ok);
+ if (typeset->getObjectCount() > 0)
+ masm.guardObjectType(input, typeset, temp, &miss);
+ else
+ masm.jump(&miss);
+ masm.jump(&ok);
+
+ masm.bind(&miss);
+ masm.guardTypeSetMightBeIncomplete(typeset, input, temp, &ok);
+
+ masm.assumeUnreachable("MIR instruction returned object with unexpected type");
+
+ masm.bind(&ok);
+ }
+
+ // Check that we have a valid GC pointer.
+ saveVolatile();
+ masm.setupUnalignedABICall(temp);
+ masm.loadJSContext(temp);
+ masm.passABIArg(temp);
+ masm.passABIArg(input);
+
+ void* callee;
+ switch (type) {
+ case MIRType::Object:
+ callee = JS_FUNC_TO_DATA_PTR(void*, AssertValidObjectPtr);
+ break;
+ case MIRType::ObjectOrNull:
+ callee = JS_FUNC_TO_DATA_PTR(void*, AssertValidObjectOrNullPtr);
+ break;
+ case MIRType::String:
+ callee = JS_FUNC_TO_DATA_PTR(void*, AssertValidStringPtr);
+ break;
+ case MIRType::Symbol:
+ callee = JS_FUNC_TO_DATA_PTR(void*, AssertValidSymbolPtr);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.callWithABI(callee);
+ restoreVolatile();
+
+ masm.bind(&done);
+ masm.pop(temp);
+}
+
+void
+CodeGenerator::emitAssertResultV(const ValueOperand input, const TemporaryTypeSet* typeset)
+{
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(input);
+
+ Register temp1 = regs.takeAny();
+ Register temp2 = regs.takeAny();
+ masm.push(temp1);
+ masm.push(temp2);
+
+ // Don't check if the script has been invalidated. In that case invalid
+ // types are expected (until we reach the OsiPoint and bailout).
+ Label done;
+ branchIfInvalidated(temp1, &done);
+
+ if (typeset && !typeset->unknown()) {
+ // We have a result TypeSet, assert this value is in it.
+ Label miss, ok;
+ masm.guardTypeSet(input, typeset, BarrierKind::TypeSet, temp1, &miss);
+ masm.jump(&ok);
+
+ masm.bind(&miss);
+
+ // Check for cases where the type set guard might have missed due to
+ // changing object groups.
+ Label realMiss;
+ masm.branchTestObject(Assembler::NotEqual, input, &realMiss);
+ Register payload = masm.extractObject(input, temp1);
+ masm.guardTypeSetMightBeIncomplete(typeset, payload, temp1, &ok);
+ masm.bind(&realMiss);
+
+ masm.assumeUnreachable("MIR instruction returned value with unexpected type");
+
+ masm.bind(&ok);
+ }
+
+ // Check that we have a valid GC pointer.
+ saveVolatile();
+
+ masm.pushValue(input);
+ masm.moveStackPtrTo(temp1);
+
+ masm.setupUnalignedABICall(temp2);
+ masm.loadJSContext(temp2);
+ masm.passABIArg(temp2);
+ masm.passABIArg(temp1);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, AssertValidValue));
+ masm.popValue(input);
+ restoreVolatile();
+
+ masm.bind(&done);
+ masm.pop(temp2);
+ masm.pop(temp1);
+}
+
+#ifdef DEBUG
+void
+CodeGenerator::emitObjectOrStringResultChecks(LInstruction* lir, MDefinition* mir)
+{
+ if (lir->numDefs() == 0)
+ return;
+
+ MOZ_ASSERT(lir->numDefs() == 1);
+ Register output = ToRegister(lir->getDef(0));
+
+ emitAssertObjectOrStringResult(output, mir->type(), mir->resultTypeSet());
+}
+
+void
+CodeGenerator::emitValueResultChecks(LInstruction* lir, MDefinition* mir)
+{
+ if (lir->numDefs() == 0)
+ return;
+
+ MOZ_ASSERT(lir->numDefs() == BOX_PIECES);
+ if (!lir->getDef(0)->output()->isRegister())
+ return;
+
+ ValueOperand output = ToOutValue(lir);
+
+ emitAssertResultV(output, mir->resultTypeSet());
+}
+
+void
+CodeGenerator::emitDebugResultChecks(LInstruction* ins)
+{
+ // In debug builds, check that LIR instructions return valid values.
+
+ MDefinition* mir = ins->mirRaw();
+ if (!mir)
+ return;
+
+ switch (mir->type()) {
+ case MIRType::Object:
+ case MIRType::ObjectOrNull:
+ case MIRType::String:
+ case MIRType::Symbol:
+ emitObjectOrStringResultChecks(ins, mir);
+ break;
+ case MIRType::Value:
+ emitValueResultChecks(ins, mir);
+ break;
+ default:
+ break;
+ }
+}
+
+void
+CodeGenerator::emitDebugForceBailing(LInstruction* lir)
+{
+ if (!lir->snapshot())
+ return;
+ if (lir->isStart())
+ return;
+ if (lir->isOsiPoint())
+ return;
+
+ masm.comment("emitDebugForceBailing");
+ const void* bailAfterAddr = GetJitContext()->runtime->addressOfIonBailAfter();
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+
+ Label done, notBail, bail;
+ masm.branch32(Assembler::Equal, AbsoluteAddress(bailAfterAddr), Imm32(0), &done);
+ {
+ Register temp = regs.takeAny();
+
+ masm.push(temp);
+ masm.load32(AbsoluteAddress(bailAfterAddr), temp);
+ masm.sub32(Imm32(1), temp);
+ masm.store32(temp, AbsoluteAddress(bailAfterAddr));
+
+ masm.branch32(Assembler::NotEqual, temp, Imm32(0), &notBail);
+ {
+ masm.pop(temp);
+ masm.jump(&bail);
+ bailoutFrom(&bail, lir->snapshot());
+ }
+ masm.bind(&notBail);
+ masm.pop(temp);
+ }
+ masm.bind(&done);
+}
+#endif
+
+bool
+CodeGenerator::generateBody()
+{
+ IonScriptCounts* counts = maybeCreateScriptCounts();
+
+#if defined(JS_ION_PERF)
+ PerfSpewer* perfSpewer = &perfSpewer_;
+ if (gen->compilingWasm())
+ perfSpewer = &gen->perfSpewer();
+#endif
+
+ for (size_t i = 0; i < graph.numBlocks(); i++) {
+ current = graph.getBlock(i);
+
+ // Don't emit any code for trivial blocks, containing just a goto. Such
+ // blocks are created to split critical edges, and if we didn't end up
+ // putting any instructions in them, we can skip them.
+ if (current->isTrivial())
+ continue;
+
+#ifdef JS_JITSPEW
+ const char* filename = nullptr;
+ size_t lineNumber = 0;
+ unsigned columnNumber = 0;
+ if (current->mir()->info().script()) {
+ filename = current->mir()->info().script()->filename();
+ if (current->mir()->pc())
+ lineNumber = PCToLineNumber(current->mir()->info().script(), current->mir()->pc(),
+ &columnNumber);
+ } else {
+#ifdef DEBUG
+ lineNumber = current->mir()->lineno();
+ columnNumber = current->mir()->columnIndex();
+#endif
+ }
+ JitSpew(JitSpew_Codegen, "# block%" PRIuSIZE " %s:%" PRIuSIZE ":%u%s:",
+ i, filename ? filename : "?", lineNumber, columnNumber,
+ current->mir()->isLoopHeader() ? " (loop header)" : "");
+#endif
+
+ masm.bind(current->label());
+
+ mozilla::Maybe<ScriptCountBlockState> blockCounts;
+ if (counts) {
+ blockCounts.emplace(&counts->block(i), &masm);
+ if (!blockCounts->init())
+ return false;
+ }
+
+#if defined(JS_ION_PERF)
+ perfSpewer->startBasicBlock(current->mir(), masm);
+#endif
+
+ for (LInstructionIterator iter = current->begin(); iter != current->end(); iter++) {
+ if (!alloc().ensureBallast())
+ return false;
+
+#ifdef JS_JITSPEW
+ JitSpewStart(JitSpew_Codegen, "instruction %s", iter->opName());
+ if (const char* extra = iter->extraName())
+ JitSpewCont(JitSpew_Codegen, ":%s", extra);
+ JitSpewFin(JitSpew_Codegen);
+#endif
+
+ if (counts)
+ blockCounts->visitInstruction(*iter);
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ if (iter->safepoint())
+ resetOsiPointRegs(iter->safepoint());
+#endif
+
+ if (iter->mirRaw()) {
+ // Only add instructions that have a tracked inline script tree.
+ if (iter->mirRaw()->trackedTree()) {
+ if (!addNativeToBytecodeEntry(iter->mirRaw()->trackedSite()))
+ return false;
+ }
+
+ // Track the start native offset of optimizations.
+ if (iter->mirRaw()->trackedOptimizations()) {
+ if (!addTrackedOptimizationsEntry(iter->mirRaw()->trackedOptimizations()))
+ return false;
+ }
+ }
+
+#ifdef DEBUG
+ setElement(*iter); // needed to encode correct snapshot location.
+ emitDebugForceBailing(*iter);
+#endif
+
+ iter->accept(this);
+
+ // Track the end native offset of optimizations.
+ if (iter->mirRaw() && iter->mirRaw()->trackedOptimizations())
+ extendTrackedOptimizationsEntry(iter->mirRaw()->trackedOptimizations());
+
+#ifdef DEBUG
+ if (!counts)
+ emitDebugResultChecks(*iter);
+#endif
+ }
+ if (masm.oom())
+ return false;
+
+#if defined(JS_ION_PERF)
+ perfSpewer->endBasicBlock(masm);
+#endif
+ }
+
+ return true;
+}
+
+// Out-of-line object allocation for LNewArray.
+class OutOfLineNewArray : public OutOfLineCodeBase<CodeGenerator>
+{
+ LNewArray* lir_;
+
+ public:
+ explicit OutOfLineNewArray(LNewArray* lir)
+ : lir_(lir)
+ { }
+
+ void accept(CodeGenerator* codegen) {
+ codegen->visitOutOfLineNewArray(this);
+ }
+
+ LNewArray* lir() const {
+ return lir_;
+ }
+};
+
+typedef JSObject* (*NewArrayOperationFn)(JSContext*, HandleScript, jsbytecode*, uint32_t,
+ NewObjectKind);
+static const VMFunction NewArrayOperationInfo =
+ FunctionInfo<NewArrayOperationFn>(NewArrayOperation, "NewArrayOperation");
+
+static JSObject*
+NewArrayWithGroup(JSContext* cx, uint32_t length, HandleObjectGroup group,
+ bool convertDoubleElements)
+{
+ JSObject* res = NewFullyAllocatedArrayTryUseGroup(cx, group, length);
+ if (!res)
+ return nullptr;
+ if (convertDoubleElements)
+ res->as<ArrayObject>().setShouldConvertDoubleElements();
+ return res;
+}
+
+typedef JSObject* (*NewArrayWithGroupFn)(JSContext*, uint32_t, HandleObjectGroup, bool);
+static const VMFunction NewArrayWithGroupInfo =
+ FunctionInfo<NewArrayWithGroupFn>(NewArrayWithGroup, "NewArrayWithGroup");
+
+void
+CodeGenerator::visitNewArrayCallVM(LNewArray* lir)
+{
+ Register objReg = ToRegister(lir->output());
+
+ MOZ_ASSERT(!lir->isCall());
+ saveLive(lir);
+
+ JSObject* templateObject = lir->mir()->templateObject();
+
+ if (templateObject) {
+ pushArg(Imm32(lir->mir()->convertDoubleElements()));
+ pushArg(ImmGCPtr(templateObject->group()));
+ pushArg(Imm32(lir->mir()->length()));
+
+ callVM(NewArrayWithGroupInfo, lir);
+ } else {
+ pushArg(Imm32(GenericObject));
+ pushArg(Imm32(lir->mir()->length()));
+ pushArg(ImmPtr(lir->mir()->pc()));
+ pushArg(ImmGCPtr(lir->mir()->block()->info().script()));
+
+ callVM(NewArrayOperationInfo, lir);
+ }
+
+ if (ReturnReg != objReg)
+ masm.movePtr(ReturnReg, objReg);
+
+ restoreLive(lir);
+}
+
+typedef JSObject* (*NewDerivedTypedObjectFn)(JSContext*,
+ HandleObject type,
+ HandleObject owner,
+ int32_t offset);
+static const VMFunction CreateDerivedTypedObjInfo =
+ FunctionInfo<NewDerivedTypedObjectFn>(CreateDerivedTypedObj, "CreateDerivedTypedObj");
+
+void
+CodeGenerator::visitNewDerivedTypedObject(LNewDerivedTypedObject* lir)
+{
+ pushArg(ToRegister(lir->offset()));
+ pushArg(ToRegister(lir->owner()));
+ pushArg(ToRegister(lir->type()));
+ callVM(CreateDerivedTypedObjInfo, lir);
+}
+
+void
+CodeGenerator::visitAtan2D(LAtan2D* lir)
+{
+ Register temp = ToRegister(lir->temp());
+ FloatRegister y = ToFloatRegister(lir->y());
+ FloatRegister x = ToFloatRegister(lir->x());
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(y, MoveOp::DOUBLE);
+ masm.passABIArg(x, MoveOp::DOUBLE);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ecmaAtan2), MoveOp::DOUBLE);
+
+ MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
+}
+
+void
+CodeGenerator::visitHypot(LHypot* lir)
+{
+ Register temp = ToRegister(lir->temp());
+ uint32_t numArgs = lir->numArgs();
+ masm.setupUnalignedABICall(temp);
+
+ for (uint32_t i = 0 ; i < numArgs; ++i)
+ masm.passABIArg(ToFloatRegister(lir->getOperand(i)), MoveOp::DOUBLE);
+
+ switch(numArgs) {
+ case 2:
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ecmaHypot), MoveOp::DOUBLE);
+ break;
+ case 3:
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, hypot3), MoveOp::DOUBLE);
+ break;
+ case 4:
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, hypot4), MoveOp::DOUBLE);
+ break;
+ default:
+ MOZ_CRASH("Unexpected number of arguments to hypot function.");
+ }
+ MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
+}
+
+void
+CodeGenerator::visitNewArray(LNewArray* lir)
+{
+ Register objReg = ToRegister(lir->output());
+ Register tempReg = ToRegister(lir->temp());
+ JSObject* templateObject = lir->mir()->templateObject();
+ DebugOnly<uint32_t> length = lir->mir()->length();
+
+ MOZ_ASSERT(length <= NativeObject::MAX_DENSE_ELEMENTS_COUNT);
+
+ if (lir->mir()->isVMCall()) {
+ visitNewArrayCallVM(lir);
+ return;
+ }
+
+ OutOfLineNewArray* ool = new(alloc()) OutOfLineNewArray(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ masm.createGCObject(objReg, tempReg, templateObject, lir->mir()->initialHeap(),
+ ool->entry(), /* initContents = */ true,
+ lir->mir()->convertDoubleElements());
+
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitOutOfLineNewArray(OutOfLineNewArray* ool)
+{
+ visitNewArrayCallVM(ool->lir());
+ masm.jump(ool->rejoin());
+}
+
+void
+CodeGenerator::visitNewArrayCopyOnWrite(LNewArrayCopyOnWrite* lir)
+{
+ Register objReg = ToRegister(lir->output());
+ Register tempReg = ToRegister(lir->temp());
+ ArrayObject* templateObject = lir->mir()->templateObject();
+ gc::InitialHeap initialHeap = lir->mir()->initialHeap();
+
+ // If we have a template object, we can inline call object creation.
+ OutOfLineCode* ool = oolCallVM(NewArrayCopyOnWriteInfo, lir,
+ ArgList(ImmGCPtr(templateObject), Imm32(initialHeap)),
+ StoreRegisterTo(objReg));
+
+ masm.createGCObject(objReg, tempReg, templateObject, initialHeap, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+typedef JSObject* (*ArrayConstructorOneArgFn)(JSContext*, HandleObjectGroup, int32_t length);
+static const VMFunction ArrayConstructorOneArgInfo =
+ FunctionInfo<ArrayConstructorOneArgFn>(ArrayConstructorOneArg, "ArrayConstructorOneArg");
+
+void
+CodeGenerator::visitNewArrayDynamicLength(LNewArrayDynamicLength* lir)
+{
+ Register lengthReg = ToRegister(lir->length());
+ Register objReg = ToRegister(lir->output());
+ Register tempReg = ToRegister(lir->temp());
+
+ JSObject* templateObject = lir->mir()->templateObject();
+ gc::InitialHeap initialHeap = lir->mir()->initialHeap();
+
+ OutOfLineCode* ool = oolCallVM(ArrayConstructorOneArgInfo, lir,
+ ArgList(ImmGCPtr(templateObject->group()), lengthReg),
+ StoreRegisterTo(objReg));
+
+ bool canInline = true;
+ size_t inlineLength = 0;
+ if (templateObject->is<ArrayObject>()) {
+ if (templateObject->as<ArrayObject>().hasFixedElements()) {
+ size_t numSlots = gc::GetGCKindSlots(templateObject->asTenured().getAllocKind());
+ inlineLength = numSlots - ObjectElements::VALUES_PER_HEADER;
+ } else {
+ canInline = false;
+ }
+ } else {
+ if (templateObject->as<UnboxedArrayObject>().hasInlineElements()) {
+ size_t nbytes =
+ templateObject->tenuredSizeOfThis() - UnboxedArrayObject::offsetOfInlineElements();
+ inlineLength = nbytes / templateObject->as<UnboxedArrayObject>().elementSize();
+ } else {
+ canInline = false;
+ }
+ }
+
+ if (canInline) {
+ // Try to do the allocation inline if the template object is big enough
+ // for the length in lengthReg. If the length is bigger we could still
+ // use the template object and not allocate the elements, but it's more
+ // efficient to do a single big allocation than (repeatedly) reallocating
+ // the array later on when filling it.
+ masm.branch32(Assembler::Above, lengthReg, Imm32(inlineLength), ool->entry());
+
+ masm.createGCObject(objReg, tempReg, templateObject, initialHeap, ool->entry());
+
+ size_t lengthOffset = NativeObject::offsetOfFixedElements() + ObjectElements::offsetOfLength();
+ masm.store32(lengthReg, Address(objReg, lengthOffset));
+ } else {
+ masm.jump(ool->entry());
+ }
+
+ masm.bind(ool->rejoin());
+}
+
+typedef TypedArrayObject* (*TypedArrayConstructorOneArgFn)(JSContext*, HandleObject, int32_t length);
+static const VMFunction TypedArrayConstructorOneArgInfo =
+ FunctionInfo<TypedArrayConstructorOneArgFn>(TypedArrayCreateWithTemplate,
+ "TypedArrayCreateWithTemplate");
+
+void
+CodeGenerator::visitNewTypedArray(LNewTypedArray* lir)
+{
+ Register objReg = ToRegister(lir->output());
+ Register tempReg = ToRegister(lir->temp1());
+ Register lengthReg = ToRegister(lir->temp2());
+ LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
+
+ JSObject* templateObject = lir->mir()->templateObject();
+ gc::InitialHeap initialHeap = lir->mir()->initialHeap();
+
+ TypedArrayObject* ttemplate = &templateObject->as<TypedArrayObject>();
+ uint32_t n = ttemplate->length();
+
+ OutOfLineCode* ool = oolCallVM(TypedArrayConstructorOneArgInfo, lir,
+ ArgList(ImmGCPtr(templateObject), Imm32(n)),
+ StoreRegisterTo(objReg));
+
+ masm.createGCObject(objReg, tempReg, templateObject, initialHeap,
+ ool->entry(), /*initContents*/true, /*convertDoubleElements*/false);
+
+ masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
+ ttemplate, TypedArrayLength::Fixed);
+
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitNewTypedArrayDynamicLength(LNewTypedArrayDynamicLength* lir)
+{
+ Register lengthReg = ToRegister(lir->length());
+ Register objReg = ToRegister(lir->output());
+ Register tempReg = ToRegister(lir->temp());
+ LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
+
+ JSObject* templateObject = lir->mir()->templateObject();
+ gc::InitialHeap initialHeap = lir->mir()->initialHeap();
+
+ TypedArrayObject* ttemplate = &templateObject->as<TypedArrayObject>();
+
+ OutOfLineCode* ool = oolCallVM(TypedArrayConstructorOneArgInfo, lir,
+ ArgList(ImmGCPtr(templateObject), lengthReg),
+ StoreRegisterTo(objReg));
+
+ masm.createGCObject(objReg, tempReg, templateObject, initialHeap,
+ ool->entry(), /*initContents*/true, /*convertDoubleElements*/false);
+
+ masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
+ ttemplate, TypedArrayLength::Dynamic);
+
+ masm.bind(ool->rejoin());
+}
+
+// Out-of-line object allocation for JSOP_NEWOBJECT.
+class OutOfLineNewObject : public OutOfLineCodeBase<CodeGenerator>
+{
+ LNewObject* lir_;
+
+ public:
+ explicit OutOfLineNewObject(LNewObject* lir)
+ : lir_(lir)
+ { }
+
+ void accept(CodeGenerator* codegen) {
+ codegen->visitOutOfLineNewObject(this);
+ }
+
+ LNewObject* lir() const {
+ return lir_;
+ }
+};
+
+typedef JSObject* (*NewInitObjectWithTemplateFn)(JSContext*, HandleObject);
+static const VMFunction NewInitObjectWithTemplateInfo =
+ FunctionInfo<NewInitObjectWithTemplateFn>(NewObjectOperationWithTemplate,
+ "NewObjectOperationWithTemplate");
+
+typedef JSObject* (*NewInitObjectFn)(JSContext*, HandleScript, jsbytecode* pc, NewObjectKind);
+static const VMFunction NewInitObjectInfo =
+ FunctionInfo<NewInitObjectFn>(NewObjectOperation, "NewObjectOperation");
+
+typedef PlainObject* (*ObjectCreateWithTemplateFn)(JSContext*, HandlePlainObject);
+static const VMFunction ObjectCreateWithTemplateInfo =
+ FunctionInfo<ObjectCreateWithTemplateFn>(ObjectCreateWithTemplate, "ObjectCreateWithTemplate");
+
+void
+CodeGenerator::visitNewObjectVMCall(LNewObject* lir)
+{
+ Register objReg = ToRegister(lir->output());
+
+ MOZ_ASSERT(!lir->isCall());
+ saveLive(lir);
+
+ JSObject* templateObject = lir->mir()->templateObject();
+
+ // If we're making a new object with a class prototype (that is, an object
+ // that derives its class from its prototype instead of being
+ // PlainObject::class_'d) from self-hosted code, we need a different init
+ // function.
+ switch (lir->mir()->mode()) {
+ case MNewObject::ObjectLiteral:
+ if (templateObject) {
+ pushArg(ImmGCPtr(templateObject));
+ callVM(NewInitObjectWithTemplateInfo, lir);
+ } else {
+ pushArg(Imm32(GenericObject));
+ pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
+ pushArg(ImmGCPtr(lir->mir()->block()->info().script()));
+ callVM(NewInitObjectInfo, lir);
+ }
+ break;
+ case MNewObject::ObjectCreate:
+ pushArg(ImmGCPtr(templateObject));
+ callVM(ObjectCreateWithTemplateInfo, lir);
+ break;
+ }
+
+ if (ReturnReg != objReg)
+ masm.movePtr(ReturnReg, objReg);
+
+ restoreLive(lir);
+}
+
+static bool
+ShouldInitFixedSlots(LInstruction* lir, JSObject* obj)
+{
+ if (!obj->isNative())
+ return true;
+ NativeObject* templateObj = &obj->as<NativeObject>();
+
+ // Look for StoreFixedSlot instructions following an object allocation
+ // that write to this object before a GC is triggered or this object is
+ // passed to a VM call. If all fixed slots will be initialized, the
+ // allocation code doesn't need to set the slots to |undefined|.
+
+ uint32_t nfixed = templateObj->numUsedFixedSlots();
+ if (nfixed == 0)
+ return false;
+
+ // Only optimize if all fixed slots are initially |undefined|, so that we
+ // can assume incremental pre-barriers are not necessary. See also the
+ // comment below.
+ for (uint32_t slot = 0; slot < nfixed; slot++) {
+ if (!templateObj->getSlot(slot).isUndefined())
+ return true;
+ }
+
+ // Keep track of the fixed slots that are initialized. initializedSlots is
+ // a bit mask with a bit for each slot.
+ MOZ_ASSERT(nfixed <= NativeObject::MAX_FIXED_SLOTS);
+ static_assert(NativeObject::MAX_FIXED_SLOTS <= 32, "Slot bits must fit in 32 bits");
+ uint32_t initializedSlots = 0;
+ uint32_t numInitialized = 0;
+
+ MInstruction* allocMir = lir->mirRaw()->toInstruction();
+ MBasicBlock* block = allocMir->block();
+
+ // Skip the allocation instruction.
+ MInstructionIterator iter = block->begin(allocMir);
+ MOZ_ASSERT(*iter == allocMir);
+ iter++;
+
+ while (true) {
+ for (; iter != block->end(); iter++) {
+ if (iter->isNop() || iter->isConstant() || iter->isPostWriteBarrier()) {
+ // These instructions won't trigger a GC or read object slots.
+ continue;
+ }
+
+ if (iter->isStoreFixedSlot()) {
+ MStoreFixedSlot* store = iter->toStoreFixedSlot();
+ if (store->object() != allocMir)
+ return true;
+
+ // We may not initialize this object slot on allocation, so the
+ // pre-barrier could read uninitialized memory. Simply disable
+ // the barrier for this store: the object was just initialized
+ // so the barrier is not necessary.
+ store->setNeedsBarrier(false);
+
+ uint32_t slot = store->slot();
+ MOZ_ASSERT(slot < nfixed);
+ if ((initializedSlots & (1 << slot)) == 0) {
+ numInitialized++;
+ initializedSlots |= (1 << slot);
+
+ if (numInitialized == nfixed) {
+ // All fixed slots will be initialized.
+ MOZ_ASSERT(mozilla::CountPopulation32(initializedSlots) == nfixed);
+ return false;
+ }
+ }
+ continue;
+ }
+
+ if (iter->isGoto()) {
+ block = iter->toGoto()->target();
+ if (block->numPredecessors() != 1)
+ return true;
+ break;
+ }
+
+ // Unhandled instruction, assume it bails or reads object slots.
+ return true;
+ }
+ iter = block->begin();
+ }
+
+ MOZ_CRASH("Shouldn't get here");
+}
+
+void
+CodeGenerator::visitNewObject(LNewObject* lir)
+{
+ Register objReg = ToRegister(lir->output());
+ Register tempReg = ToRegister(lir->temp());
+ JSObject* templateObject = lir->mir()->templateObject();
+
+ if (lir->mir()->isVMCall()) {
+ visitNewObjectVMCall(lir);
+ return;
+ }
+
+ OutOfLineNewObject* ool = new(alloc()) OutOfLineNewObject(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ bool initContents = ShouldInitFixedSlots(lir, templateObject);
+ masm.createGCObject(objReg, tempReg, templateObject, lir->mir()->initialHeap(), ool->entry(),
+ initContents);
+
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitOutOfLineNewObject(OutOfLineNewObject* ool)
+{
+ visitNewObjectVMCall(ool->lir());
+ masm.jump(ool->rejoin());
+}
+
+typedef InlineTypedObject* (*NewTypedObjectFn)(JSContext*, Handle<InlineTypedObject*>, gc::InitialHeap);
+static const VMFunction NewTypedObjectInfo =
+ FunctionInfo<NewTypedObjectFn>(InlineTypedObject::createCopy, "InlineTypedObject::createCopy");
+
+void
+CodeGenerator::visitNewTypedObject(LNewTypedObject* lir)
+{
+ Register object = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp());
+ InlineTypedObject* templateObject = lir->mir()->templateObject();
+ gc::InitialHeap initialHeap = lir->mir()->initialHeap();
+
+ OutOfLineCode* ool = oolCallVM(NewTypedObjectInfo, lir,
+ ArgList(ImmGCPtr(templateObject), Imm32(initialHeap)),
+ StoreRegisterTo(object));
+
+ masm.createGCObject(object, temp, templateObject, initialHeap, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitSimdBox(LSimdBox* lir)
+{
+ FloatRegister in = ToFloatRegister(lir->input());
+ Register object = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp());
+ InlineTypedObject* templateObject = lir->mir()->templateObject();
+ gc::InitialHeap initialHeap = lir->mir()->initialHeap();
+ MIRType type = lir->mir()->input()->type();
+
+ registerSimdTemplate(lir->mir()->simdType());
+
+ MOZ_ASSERT(lir->safepoint()->liveRegs().has(in), "Save the input register across oolCallVM");
+ OutOfLineCode* ool = oolCallVM(NewTypedObjectInfo, lir,
+ ArgList(ImmGCPtr(templateObject), Imm32(initialHeap)),
+ StoreRegisterTo(object));
+
+ masm.createGCObject(object, temp, templateObject, initialHeap, ool->entry());
+ masm.bind(ool->rejoin());
+
+ Address objectData(object, InlineTypedObject::offsetOfDataStart());
+ switch (type) {
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ masm.storeUnalignedSimd128Int(in, objectData);
+ break;
+ case MIRType::Float32x4:
+ masm.storeUnalignedSimd128Float(in, objectData);
+ break;
+ default:
+ MOZ_CRASH("Unknown SIMD kind when generating code for SimdBox.");
+ }
+}
+
+void
+CodeGenerator::registerSimdTemplate(SimdType simdType)
+{
+ simdRefreshTemplatesDuringLink_ |= 1 << uint32_t(simdType);
+}
+
+void
+CodeGenerator::captureSimdTemplate(JSContext* cx)
+{
+ JitCompartment* jitCompartment = cx->compartment()->jitCompartment();
+ while (simdRefreshTemplatesDuringLink_) {
+ uint32_t typeIndex = mozilla::CountTrailingZeroes32(simdRefreshTemplatesDuringLink_);
+ simdRefreshTemplatesDuringLink_ ^= 1 << typeIndex;
+ SimdType type = SimdType(typeIndex);
+
+ // Note: the weak-reference on the template object should not have been
+ // garbage collected. It is either registered by IonBuilder, or verified
+ // before using it in the EagerSimdUnbox phase.
+ jitCompartment->registerSimdTemplateObjectFor(type);
+ }
+}
+
+void
+CodeGenerator::visitSimdUnbox(LSimdUnbox* lir)
+{
+ Register object = ToRegister(lir->input());
+ FloatRegister simd = ToFloatRegister(lir->output());
+ Register temp = ToRegister(lir->temp());
+ Label bail;
+
+ // obj->group()
+ masm.loadPtr(Address(object, JSObject::offsetOfGroup()), temp);
+
+ // Guard that the object has the same representation as the one produced for
+ // SIMD value-type.
+ Address clasp(temp, ObjectGroup::offsetOfClasp());
+ static_assert(!SimdTypeDescr::Opaque, "SIMD objects are transparent");
+ masm.branchPtr(Assembler::NotEqual, clasp, ImmPtr(&InlineTransparentTypedObject::class_),
+ &bail);
+
+ // obj->type()->typeDescr()
+ // The previous class pointer comparison implies that the addendumKind is
+ // Addendum_TypeDescr.
+ masm.loadPtr(Address(temp, ObjectGroup::offsetOfAddendum()), temp);
+
+ // Check for the /Kind/ reserved slot of the TypeDescr. This is an Int32
+ // Value which is equivalent to the object class check.
+ static_assert(JS_DESCR_SLOT_KIND < NativeObject::MAX_FIXED_SLOTS, "Load from fixed slots");
+ Address typeDescrKind(temp, NativeObject::getFixedSlotOffset(JS_DESCR_SLOT_KIND));
+ masm.assertTestInt32(Assembler::Equal, typeDescrKind,
+ "MOZ_ASSERT(obj->type()->typeDescr()->getReservedSlot(JS_DESCR_SLOT_KIND).isInt32())");
+ masm.branch32(Assembler::NotEqual, masm.ToPayload(typeDescrKind), Imm32(js::type::Simd), &bail);
+
+ SimdType type = lir->mir()->simdType();
+
+ // Check if the SimdTypeDescr /Type/ match the specialization of this
+ // MSimdUnbox instruction.
+ static_assert(JS_DESCR_SLOT_TYPE < NativeObject::MAX_FIXED_SLOTS, "Load from fixed slots");
+ Address typeDescrType(temp, NativeObject::getFixedSlotOffset(JS_DESCR_SLOT_TYPE));
+ masm.assertTestInt32(Assembler::Equal, typeDescrType,
+ "MOZ_ASSERT(obj->type()->typeDescr()->getReservedSlot(JS_DESCR_SLOT_TYPE).isInt32())");
+ masm.branch32(Assembler::NotEqual, masm.ToPayload(typeDescrType), Imm32(int32_t(type)), &bail);
+
+ // Load the value from the data of the InlineTypedObject.
+ Address objectData(object, InlineTypedObject::offsetOfDataStart());
+ switch (lir->mir()->type()) {
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ masm.loadUnalignedSimd128Int(objectData, simd);
+ break;
+ case MIRType::Float32x4:
+ masm.loadUnalignedSimd128Float(objectData, simd);
+ break;
+ default:
+ MOZ_CRASH("The impossible happened!");
+ }
+
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+typedef js::NamedLambdaObject* (*NewNamedLambdaObjectFn)(JSContext*, HandleFunction, gc::InitialHeap);
+static const VMFunction NewNamedLambdaObjectInfo =
+ FunctionInfo<NewNamedLambdaObjectFn>(NamedLambdaObject::createTemplateObject,
+ "NamedLambdaObject::createTemplateObject");
+
+void
+CodeGenerator::visitNewNamedLambdaObject(LNewNamedLambdaObject* lir)
+{
+ Register objReg = ToRegister(lir->output());
+ Register tempReg = ToRegister(lir->temp());
+ EnvironmentObject* templateObj = lir->mir()->templateObj();
+ const CompileInfo& info = lir->mir()->block()->info();
+
+ // If we have a template object, we can inline call object creation.
+ OutOfLineCode* ool = oolCallVM(NewNamedLambdaObjectInfo, lir,
+ ArgList(ImmGCPtr(info.funMaybeLazy()), Imm32(gc::DefaultHeap)),
+ StoreRegisterTo(objReg));
+
+ bool initContents = ShouldInitFixedSlots(lir, templateObj);
+ masm.createGCObject(objReg, tempReg, templateObj, gc::DefaultHeap, ool->entry(),
+ initContents);
+
+ masm.bind(ool->rejoin());
+}
+
+typedef JSObject* (*NewCallObjectFn)(JSContext*, HandleShape, HandleObjectGroup);
+static const VMFunction NewCallObjectInfo =
+ FunctionInfo<NewCallObjectFn>(NewCallObject, "NewCallObject");
+
+void
+CodeGenerator::visitNewCallObject(LNewCallObject* lir)
+{
+ Register objReg = ToRegister(lir->output());
+ Register tempReg = ToRegister(lir->temp());
+
+ CallObject* templateObj = lir->mir()->templateObject();
+
+ OutOfLineCode* ool = oolCallVM(NewCallObjectInfo, lir,
+ ArgList(ImmGCPtr(templateObj->lastProperty()),
+ ImmGCPtr(templateObj->group())),
+ StoreRegisterTo(objReg));
+
+ // Inline call object creation, using the OOL path only for tricky cases.
+ bool initContents = ShouldInitFixedSlots(lir, templateObj);
+ masm.createGCObject(objReg, tempReg, templateObj, gc::DefaultHeap, ool->entry(),
+ initContents);
+
+ masm.bind(ool->rejoin());
+}
+
+typedef JSObject* (*NewSingletonCallObjectFn)(JSContext*, HandleShape);
+static const VMFunction NewSingletonCallObjectInfo =
+ FunctionInfo<NewSingletonCallObjectFn>(NewSingletonCallObject, "NewSingletonCallObject");
+
+void
+CodeGenerator::visitNewSingletonCallObject(LNewSingletonCallObject* lir)
+{
+ Register objReg = ToRegister(lir->output());
+
+ JSObject* templateObj = lir->mir()->templateObject();
+
+ OutOfLineCode* ool;
+ ool = oolCallVM(NewSingletonCallObjectInfo, lir,
+ ArgList(ImmGCPtr(templateObj->as<CallObject>().lastProperty())),
+ StoreRegisterTo(objReg));
+
+ // Objects can only be given singleton types in VM calls. We make the call
+ // out of line to not bloat inline code, even if (naively) this seems like
+ // extra work.
+ masm.jump(ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+typedef JSObject* (*NewStringObjectFn)(JSContext*, HandleString);
+static const VMFunction NewStringObjectInfo =
+ FunctionInfo<NewStringObjectFn>(NewStringObject, "NewStringObject");
+
+void
+CodeGenerator::visitNewStringObject(LNewStringObject* lir)
+{
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp());
+
+ StringObject* templateObj = lir->mir()->templateObj();
+
+ OutOfLineCode* ool = oolCallVM(NewStringObjectInfo, lir, ArgList(input),
+ StoreRegisterTo(output));
+
+ masm.createGCObject(output, temp, templateObj, gc::DefaultHeap, ool->entry());
+
+ masm.loadStringLength(input, temp);
+
+ masm.storeValue(JSVAL_TYPE_STRING, input, Address(output, StringObject::offsetOfPrimitiveValue()));
+ masm.storeValue(JSVAL_TYPE_INT32, temp, Address(output, StringObject::offsetOfLength()));
+
+ masm.bind(ool->rejoin());
+}
+
+typedef bool(*InitElemFn)(JSContext* cx, jsbytecode* pc, HandleObject obj,
+ HandleValue id, HandleValue value);
+static const VMFunction InitElemInfo =
+ FunctionInfo<InitElemFn>(InitElemOperation, "InitElemOperation");
+
+void
+CodeGenerator::visitInitElem(LInitElem* lir)
+{
+ Register objReg = ToRegister(lir->getObject());
+
+ pushArg(ToValue(lir, LInitElem::ValueIndex));
+ pushArg(ToValue(lir, LInitElem::IdIndex));
+ pushArg(objReg);
+ pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
+
+ callVM(InitElemInfo, lir);
+}
+
+typedef bool (*InitElemGetterSetterFn)(JSContext*, jsbytecode*, HandleObject, HandleValue,
+ HandleObject);
+static const VMFunction InitElemGetterSetterInfo =
+ FunctionInfo<InitElemGetterSetterFn>(InitGetterSetterOperation, "InitGetterSetterOperation");
+
+void
+CodeGenerator::visitInitElemGetterSetter(LInitElemGetterSetter* lir)
+{
+ Register obj = ToRegister(lir->object());
+ Register value = ToRegister(lir->value());
+
+ pushArg(value);
+ pushArg(ToValue(lir, LInitElemGetterSetter::IdIndex));
+ pushArg(obj);
+ pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
+
+ callVM(InitElemGetterSetterInfo, lir);
+}
+
+typedef bool(*MutatePrototypeFn)(JSContext* cx, HandlePlainObject obj, HandleValue value);
+static const VMFunction MutatePrototypeInfo =
+ FunctionInfo<MutatePrototypeFn>(MutatePrototype, "MutatePrototype");
+
+void
+CodeGenerator::visitMutateProto(LMutateProto* lir)
+{
+ Register objReg = ToRegister(lir->getObject());
+
+ pushArg(ToValue(lir, LMutateProto::ValueIndex));
+ pushArg(objReg);
+
+ callVM(MutatePrototypeInfo, lir);
+}
+
+typedef bool(*InitPropFn)(JSContext*, HandleObject, HandlePropertyName, HandleValue, jsbytecode* pc);
+static const VMFunction InitPropInfo = FunctionInfo<InitPropFn>(InitProp, "InitProp");
+
+void
+CodeGenerator::visitInitProp(LInitProp* lir)
+{
+ Register objReg = ToRegister(lir->getObject());
+
+ pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
+ pushArg(ToValue(lir, LInitProp::ValueIndex));
+ pushArg(ImmGCPtr(lir->mir()->propertyName()));
+ pushArg(objReg);
+
+ callVM(InitPropInfo, lir);
+}
+
+typedef bool(*InitPropGetterSetterFn)(JSContext*, jsbytecode*, HandleObject, HandlePropertyName,
+ HandleObject);
+static const VMFunction InitPropGetterSetterInfo =
+ FunctionInfo<InitPropGetterSetterFn>(InitGetterSetterOperation, "InitGetterSetterOperation");
+
+void
+CodeGenerator::visitInitPropGetterSetter(LInitPropGetterSetter* lir)
+{
+ Register obj = ToRegister(lir->object());
+ Register value = ToRegister(lir->value());
+
+ pushArg(value);
+ pushArg(ImmGCPtr(lir->mir()->name()));
+ pushArg(obj);
+ pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
+
+ callVM(InitPropGetterSetterInfo, lir);
+}
+
+typedef bool (*CreateThisFn)(JSContext* cx, HandleObject callee, HandleObject newTarget, MutableHandleValue rval);
+static const VMFunction CreateThisInfoCodeGen = FunctionInfo<CreateThisFn>(CreateThis, "CreateThis");
+
+void
+CodeGenerator::visitCreateThis(LCreateThis* lir)
+{
+ const LAllocation* callee = lir->getCallee();
+ const LAllocation* newTarget = lir->getNewTarget();
+
+ if (newTarget->isConstant())
+ pushArg(ImmGCPtr(&newTarget->toConstant()->toObject()));
+ else
+ pushArg(ToRegister(newTarget));
+
+ if (callee->isConstant())
+ pushArg(ImmGCPtr(&callee->toConstant()->toObject()));
+ else
+ pushArg(ToRegister(callee));
+
+ callVM(CreateThisInfoCodeGen, lir);
+}
+
+static JSObject*
+CreateThisForFunctionWithProtoWrapper(JSContext* cx, HandleObject callee, HandleObject newTarget,
+ HandleObject proto)
+{
+ return CreateThisForFunctionWithProto(cx, callee, newTarget, proto);
+}
+
+typedef JSObject* (*CreateThisWithProtoFn)(JSContext* cx, HandleObject callee,
+ HandleObject newTarget, HandleObject proto);
+static const VMFunction CreateThisWithProtoInfo =
+ FunctionInfo<CreateThisWithProtoFn>(CreateThisForFunctionWithProtoWrapper,
+ "CreateThisForFunctionWithProtoWrapper");
+
+void
+CodeGenerator::visitCreateThisWithProto(LCreateThisWithProto* lir)
+{
+ const LAllocation* callee = lir->getCallee();
+ const LAllocation* newTarget = lir->getNewTarget();
+ const LAllocation* proto = lir->getPrototype();
+
+ if (proto->isConstant())
+ pushArg(ImmGCPtr(&proto->toConstant()->toObject()));
+ else
+ pushArg(ToRegister(proto));
+
+ if (newTarget->isConstant())
+ pushArg(ImmGCPtr(&newTarget->toConstant()->toObject()));
+ else
+ pushArg(ToRegister(newTarget));
+
+ if (callee->isConstant())
+ pushArg(ImmGCPtr(&callee->toConstant()->toObject()));
+ else
+ pushArg(ToRegister(callee));
+
+ callVM(CreateThisWithProtoInfo, lir);
+}
+
+void
+CodeGenerator::visitCreateThisWithTemplate(LCreateThisWithTemplate* lir)
+{
+ JSObject* templateObject = lir->mir()->templateObject();
+ Register objReg = ToRegister(lir->output());
+ Register tempReg = ToRegister(lir->temp());
+
+ OutOfLineCode* ool = oolCallVM(NewInitObjectWithTemplateInfo, lir,
+ ArgList(ImmGCPtr(templateObject)),
+ StoreRegisterTo(objReg));
+
+ // Allocate. If the FreeList is empty, call to VM, which may GC.
+ bool initContents = !templateObject->is<PlainObject>() ||
+ ShouldInitFixedSlots(lir, &templateObject->as<PlainObject>());
+ masm.createGCObject(objReg, tempReg, templateObject, lir->mir()->initialHeap(), ool->entry(),
+ initContents);
+
+ masm.bind(ool->rejoin());
+}
+
+typedef JSObject* (*NewIonArgumentsObjectFn)(JSContext* cx, JitFrameLayout* frame, HandleObject);
+static const VMFunction NewIonArgumentsObjectInfo =
+ FunctionInfo<NewIonArgumentsObjectFn>((NewIonArgumentsObjectFn) ArgumentsObject::createForIon,
+ "ArgumentsObject::createForIon");
+
+void
+CodeGenerator::visitCreateArgumentsObject(LCreateArgumentsObject* lir)
+{
+ // This should be getting constructed in the first block only, and not any OSR entry blocks.
+ MOZ_ASSERT(lir->mir()->block()->id() == 0);
+
+ Register callObj = ToRegister(lir->getCallObject());
+ Register temp = ToRegister(lir->temp0());
+ Label done;
+
+ if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
+ Register objTemp = ToRegister(lir->temp1());
+ Register cxTemp = ToRegister(lir->temp2());
+
+ masm.Push(callObj);
+
+ // Try to allocate an arguments object. This will leave the reserved
+ // slots uninitialized, so it's important we don't GC until we
+ // initialize these slots in ArgumentsObject::finishForIon.
+ Label failure;
+ masm.createGCObject(objTemp, temp, templateObj, gc::DefaultHeap, &failure,
+ /* initContents = */ false);
+
+ masm.moveStackPtrTo(temp);
+ masm.addPtr(Imm32(masm.framePushed()), temp);
+
+ masm.setupUnalignedABICall(cxTemp);
+ masm.loadJSContext(cxTemp);
+ masm.passABIArg(cxTemp);
+ masm.passABIArg(temp);
+ masm.passABIArg(callObj);
+ masm.passABIArg(objTemp);
+
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ArgumentsObject::finishForIon));
+ masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
+
+ // Discard saved callObj on the stack.
+ masm.addToStackPtr(Imm32(sizeof(uintptr_t)));
+ masm.jump(&done);
+
+ masm.bind(&failure);
+ masm.Pop(callObj);
+ }
+
+ masm.moveStackPtrTo(temp);
+ masm.addPtr(Imm32(frameSize()), temp);
+
+ pushArg(callObj);
+ pushArg(temp);
+ callVM(NewIonArgumentsObjectInfo, lir);
+
+ masm.bind(&done);
+}
+
+void
+CodeGenerator::visitGetArgumentsObjectArg(LGetArgumentsObjectArg* lir)
+{
+ Register temp = ToRegister(lir->getTemp(0));
+ Register argsObj = ToRegister(lir->getArgsObject());
+ ValueOperand out = ToOutValue(lir);
+
+ masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()), temp);
+ Address argAddr(temp, ArgumentsData::offsetOfArgs() + lir->mir()->argno() * sizeof(Value));
+ masm.loadValue(argAddr, out);
+#ifdef DEBUG
+ Label success;
+ masm.branchTestMagic(Assembler::NotEqual, out, &success);
+ masm.assumeUnreachable("Result from ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
+ masm.bind(&success);
+#endif
+}
+
+void
+CodeGenerator::visitSetArgumentsObjectArg(LSetArgumentsObjectArg* lir)
+{
+ Register temp = ToRegister(lir->getTemp(0));
+ Register argsObj = ToRegister(lir->getArgsObject());
+ ValueOperand value = ToValue(lir, LSetArgumentsObjectArg::ValueIndex);
+
+ masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()), temp);
+ Address argAddr(temp, ArgumentsData::offsetOfArgs() + lir->mir()->argno() * sizeof(Value));
+ emitPreBarrier(argAddr);
+#ifdef DEBUG
+ Label success;
+ masm.branchTestMagic(Assembler::NotEqual, argAddr, &success);
+ masm.assumeUnreachable("Result in ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
+ masm.bind(&success);
+#endif
+ masm.storeValue(value, argAddr);
+}
+
+void
+CodeGenerator::visitReturnFromCtor(LReturnFromCtor* lir)
+{
+ ValueOperand value = ToValue(lir, LReturnFromCtor::ValueIndex);
+ Register obj = ToRegister(lir->getObject());
+ Register output = ToRegister(lir->output());
+
+ Label valueIsObject, end;
+
+ masm.branchTestObject(Assembler::Equal, value, &valueIsObject);
+
+ // Value is not an object. Return that other object.
+ masm.movePtr(obj, output);
+ masm.jump(&end);
+
+ // Value is an object. Return unbox(Value).
+ masm.bind(&valueIsObject);
+ Register payload = masm.extractObject(value, output);
+ if (payload != output)
+ masm.movePtr(payload, output);
+
+ masm.bind(&end);
+}
+
+typedef bool (*BoxNonStrictThisFn)(JSContext*, HandleValue, MutableHandleValue);
+static const VMFunction BoxNonStrictThisInfo =
+ FunctionInfo<BoxNonStrictThisFn>(BoxNonStrictThis, "BoxNonStrictThis");
+
+void
+CodeGenerator::visitComputeThis(LComputeThis* lir)
+{
+ ValueOperand value = ToValue(lir, LComputeThis::ValueIndex);
+ ValueOperand output = ToOutValue(lir);
+
+ OutOfLineCode* ool = oolCallVM(BoxNonStrictThisInfo, lir, ArgList(value), StoreValueTo(output));
+
+ masm.branchTestObject(Assembler::NotEqual, value, ool->entry());
+ masm.moveValue(value, output);
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitArrowNewTarget(LArrowNewTarget* lir)
+{
+ Register callee = ToRegister(lir->callee());
+ ValueOperand output = ToOutValue(lir);
+ masm.loadValue(Address(callee, FunctionExtended::offsetOfArrowNewTargetSlot()), output);
+}
+
+void
+CodeGenerator::visitArrayLength(LArrayLength* lir)
+{
+ Address length(ToRegister(lir->elements()), ObjectElements::offsetOfLength());
+ masm.load32(length, ToRegister(lir->output()));
+}
+
+void
+CodeGenerator::visitSetArrayLength(LSetArrayLength* lir)
+{
+ Address length(ToRegister(lir->elements()), ObjectElements::offsetOfLength());
+ RegisterOrInt32Constant newLength = ToRegisterOrInt32Constant(lir->index());
+
+ masm.inc32(&newLength);
+ masm.store32(newLength, length);
+ // Restore register value if it is used/captured after.
+ masm.dec32(&newLength);
+}
+
+template <class OrderedHashTable>
+static void
+RangeFront(MacroAssembler&, Register, Register, Register);
+
+template <>
+void
+RangeFront<ValueMap>(MacroAssembler& masm, Register range, Register i, Register front)
+{
+ masm.loadPtr(Address(range, ValueMap::Range::offsetOfHashTable()), front);
+ masm.loadPtr(Address(front, ValueMap::offsetOfImplData()), front);
+
+ static_assert(ValueMap::offsetOfImplDataElement() == 0, "offsetof(Data, element) is 0");
+ static_assert(ValueMap::sizeofImplData() == 24, "sizeof(Data) is 24");
+ masm.mulBy3(i, i);
+ masm.lshiftPtr(Imm32(3), i);
+ masm.addPtr(i, front);
+}
+
+template <>
+void
+RangeFront<ValueSet>(MacroAssembler& masm, Register range, Register i, Register front)
+{
+ masm.loadPtr(Address(range, ValueSet::Range::offsetOfHashTable()), front);
+ masm.loadPtr(Address(front, ValueSet::offsetOfImplData()), front);
+
+ static_assert(ValueSet::offsetOfImplDataElement() == 0, "offsetof(Data, element) is 0");
+ static_assert(ValueSet::sizeofImplData() == 16, "sizeof(Data) is 16");
+ masm.lshiftPtr(Imm32(4), i);
+ masm.addPtr(i, front);
+}
+
+template <class OrderedHashTable>
+static void
+RangePopFront(MacroAssembler& masm, Register range, Register front, Register dataLength,
+ Register temp)
+{
+ Register i = temp;
+
+ masm.add32(Imm32(1), Address(range, OrderedHashTable::Range::offsetOfCount()));
+
+ masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), i);
+ masm.add32(Imm32(1), i);
+
+ Label done, seek;
+ masm.bind(&seek);
+ masm.branch32(Assembler::AboveOrEqual, i, dataLength, &done);
+
+ // We can add sizeof(Data) to |front| to select the next element, because
+ // |front| and |range.ht.data[i]| point to the same location.
+ static_assert(OrderedHashTable::offsetOfImplDataElement() == 0, "offsetof(Data, element) is 0");
+ masm.addPtr(Imm32(OrderedHashTable::sizeofImplData()), front);
+
+ masm.branchTestMagic(Assembler::NotEqual, Address(front, OrderedHashTable::offsetOfEntryKey()),
+ JS_HASH_KEY_EMPTY, &done);
+
+ masm.add32(Imm32(1), i);
+ masm.jump(&seek);
+
+ masm.bind(&done);
+ masm.store32(i, Address(range, OrderedHashTable::Range::offsetOfI()));
+}
+
+template <class OrderedHashTable>
+static inline void
+RangeDestruct(MacroAssembler& masm, Register range, Register temp0, Register temp1)
+{
+ Register next = temp0;
+ Register prevp = temp1;
+
+ masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfNext()), next);
+ masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfPrevP()), prevp);
+ masm.storePtr(next, Address(prevp, 0));
+
+ Label hasNoNext;
+ masm.branchTestPtr(Assembler::Zero, next, next, &hasNoNext);
+
+ masm.storePtr(prevp, Address(next, OrderedHashTable::Range::offsetOfPrevP()));
+
+ masm.bind(&hasNoNext);
+
+ masm.callFreeStub(range);
+}
+
+template <>
+void
+CodeGenerator::emitLoadIteratorValues<ValueMap>(Register result, Register temp, Register front)
+{
+ size_t elementsOffset = NativeObject::offsetOfFixedElements();
+
+ Address keyAddress(front, ValueMap::Entry::offsetOfKey());
+ Address valueAddress(front, ValueMap::Entry::offsetOfValue());
+ Address keyElemAddress(result, elementsOffset);
+ Address valueElemAddress(result, elementsOffset + sizeof(Value));
+ masm.patchableCallPreBarrier(keyElemAddress, MIRType::Value);
+ masm.patchableCallPreBarrier(valueElemAddress, MIRType::Value);
+ masm.storeValue(keyAddress, keyElemAddress, temp);
+ masm.storeValue(valueAddress, valueElemAddress, temp);
+
+ Label keyIsNotObject, valueIsNotNurseryObject, emitBarrier;
+ masm.branchTestObject(Assembler::NotEqual, keyAddress, &keyIsNotObject);
+ masm.branchValueIsNurseryObject(Assembler::Equal, keyAddress, temp, &emitBarrier);
+ masm.bind(&keyIsNotObject);
+ masm.branchTestObject(Assembler::NotEqual, valueAddress, &valueIsNotNurseryObject);
+ masm.branchValueIsNurseryObject(Assembler::NotEqual, valueAddress, temp,
+ &valueIsNotNurseryObject);
+ {
+ masm.bind(&emitBarrier);
+ saveVolatile(temp);
+ emitPostWriteBarrier(result);
+ restoreVolatile(temp);
+ }
+ masm.bind(&valueIsNotNurseryObject);
+}
+
+template <>
+void
+CodeGenerator::emitLoadIteratorValues<ValueSet>(Register result, Register temp, Register front)
+{
+ size_t elementsOffset = NativeObject::offsetOfFixedElements();
+
+ Address keyAddress(front, ValueSet::offsetOfEntryKey());
+ Address keyElemAddress(result, elementsOffset);
+ masm.patchableCallPreBarrier(keyElemAddress, MIRType::Value);
+ masm.storeValue(keyAddress, keyElemAddress, temp);
+
+ Label keyIsNotObject;
+ masm.branchTestObject(Assembler::NotEqual, keyAddress, &keyIsNotObject);
+ masm.branchValueIsNurseryObject(Assembler::NotEqual, keyAddress, temp, &keyIsNotObject);
+ {
+ saveVolatile(temp);
+ emitPostWriteBarrier(result);
+ restoreVolatile(temp);
+ }
+ masm.bind(&keyIsNotObject);
+}
+
+template <class IteratorObject, class OrderedHashTable>
+void
+CodeGenerator::emitGetNextEntryForIterator(LGetNextEntryForIterator* lir)
+{
+ Register iter = ToRegister(lir->iter());
+ Register result = ToRegister(lir->result());
+ Register temp = ToRegister(lir->temp0());
+ Register dataLength = ToRegister(lir->temp1());
+ Register range = ToRegister(lir->temp2());
+ Register output = ToRegister(lir->output());
+
+ masm.loadPrivate(Address(iter, NativeObject::getFixedSlotOffset(IteratorObject::RangeSlot)),
+ range);
+
+ Label iterAlreadyDone, iterDone, done;
+ masm.branchTestPtr(Assembler::Zero, range, range, &iterAlreadyDone);
+
+ masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), temp);
+ masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfHashTable()), dataLength);
+ masm.load32(Address(dataLength, OrderedHashTable::offsetOfImplDataLength()), dataLength);
+ masm.branch32(Assembler::AboveOrEqual, temp, dataLength, &iterDone);
+ {
+ masm.push(iter);
+
+ Register front = iter;
+ RangeFront<OrderedHashTable>(masm, range, temp, front);
+
+ emitLoadIteratorValues<OrderedHashTable>(result, temp, front);
+
+ RangePopFront<OrderedHashTable>(masm, range, front, dataLength, temp);
+
+ masm.pop(iter);
+ masm.move32(Imm32(0), output);
+ }
+ masm.jump(&done);
+ {
+ masm.bind(&iterDone);
+
+ RangeDestruct<OrderedHashTable>(masm, range, temp, dataLength);
+
+ masm.storeValue(PrivateValue(nullptr),
+ Address(iter, NativeObject::getFixedSlotOffset(IteratorObject::RangeSlot)));
+
+ masm.bind(&iterAlreadyDone);
+
+ masm.move32(Imm32(1), output);
+ }
+ masm.bind(&done);
+}
+
+void
+CodeGenerator::visitGetNextEntryForIterator(LGetNextEntryForIterator* lir)
+{
+ if (lir->mir()->mode() == MGetNextEntryForIterator::Map) {
+ emitGetNextEntryForIterator<MapIteratorObject, ValueMap>(lir);
+ } else {
+ MOZ_ASSERT(lir->mir()->mode() == MGetNextEntryForIterator::Set);
+ emitGetNextEntryForIterator<SetIteratorObject, ValueSet>(lir);
+ }
+}
+
+void
+CodeGenerator::visitTypedArrayLength(LTypedArrayLength* lir)
+{
+ Register obj = ToRegister(lir->object());
+ Register out = ToRegister(lir->output());
+ masm.unboxInt32(Address(obj, TypedArrayObject::lengthOffset()), out);
+}
+
+void
+CodeGenerator::visitTypedArrayElements(LTypedArrayElements* lir)
+{
+ Register obj = ToRegister(lir->object());
+ Register out = ToRegister(lir->output());
+ masm.loadPtr(Address(obj, TypedArrayObject::dataOffset()), out);
+}
+
+void
+CodeGenerator::visitSetDisjointTypedElements(LSetDisjointTypedElements* lir)
+{
+ Register target = ToRegister(lir->target());
+ Register targetOffset = ToRegister(lir->targetOffset());
+ Register source = ToRegister(lir->source());
+
+ Register temp = ToRegister(lir->temp());
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(target);
+ masm.passABIArg(targetOffset);
+ masm.passABIArg(source);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::SetDisjointTypedElements));
+}
+
+void
+CodeGenerator::visitTypedObjectDescr(LTypedObjectDescr* lir)
+{
+ Register obj = ToRegister(lir->object());
+ Register out = ToRegister(lir->output());
+
+ masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), out);
+ masm.loadPtr(Address(out, ObjectGroup::offsetOfAddendum()), out);
+}
+
+void
+CodeGenerator::visitTypedObjectElements(LTypedObjectElements* lir)
+{
+ Register obj = ToRegister(lir->object());
+ Register out = ToRegister(lir->output());
+
+ if (lir->mir()->definitelyOutline()) {
+ masm.loadPtr(Address(obj, OutlineTypedObject::offsetOfData()), out);
+ } else {
+ Label inlineObject, done;
+ masm.loadObjClass(obj, out);
+ masm.branchPtr(Assembler::Equal, out, ImmPtr(&InlineOpaqueTypedObject::class_), &inlineObject);
+ masm.branchPtr(Assembler::Equal, out, ImmPtr(&InlineTransparentTypedObject::class_), &inlineObject);
+
+ masm.loadPtr(Address(obj, OutlineTypedObject::offsetOfData()), out);
+ masm.jump(&done);
+
+ masm.bind(&inlineObject);
+ masm.computeEffectiveAddress(Address(obj, InlineTypedObject::offsetOfDataStart()), out);
+ masm.bind(&done);
+ }
+}
+
+void
+CodeGenerator::visitSetTypedObjectOffset(LSetTypedObjectOffset* lir)
+{
+ Register object = ToRegister(lir->object());
+ Register offset = ToRegister(lir->offset());
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+
+ // Compute the base pointer for the typed object's owner.
+ masm.loadPtr(Address(object, OutlineTypedObject::offsetOfOwner()), temp0);
+
+ Label inlineObject, done;
+ masm.loadObjClass(temp0, temp1);
+ masm.branchPtr(Assembler::Equal, temp1, ImmPtr(&InlineOpaqueTypedObject::class_), &inlineObject);
+ masm.branchPtr(Assembler::Equal, temp1, ImmPtr(&InlineTransparentTypedObject::class_), &inlineObject);
+
+ masm.loadPrivate(Address(temp0, ArrayBufferObject::offsetOfDataSlot()), temp0);
+ masm.jump(&done);
+
+ masm.bind(&inlineObject);
+ masm.addPtr(ImmWord(InlineTypedObject::offsetOfDataStart()), temp0);
+
+ masm.bind(&done);
+
+ // Compute the new data pointer and set it in the object.
+ masm.addPtr(offset, temp0);
+ masm.storePtr(temp0, Address(object, OutlineTypedObject::offsetOfData()));
+}
+
+void
+CodeGenerator::visitStringLength(LStringLength* lir)
+{
+ Register input = ToRegister(lir->string());
+ Register output = ToRegister(lir->output());
+
+ masm.loadStringLength(input, output);
+}
+
+void
+CodeGenerator::visitMinMaxI(LMinMaxI* ins)
+{
+ Register first = ToRegister(ins->first());
+ Register output = ToRegister(ins->output());
+
+ MOZ_ASSERT(first == output);
+
+ Label done;
+ Assembler::Condition cond = ins->mir()->isMax()
+ ? Assembler::GreaterThan
+ : Assembler::LessThan;
+
+ if (ins->second()->isConstant()) {
+ masm.branch32(cond, first, Imm32(ToInt32(ins->second())), &done);
+ masm.move32(Imm32(ToInt32(ins->second())), output);
+ } else {
+ masm.branch32(cond, first, ToRegister(ins->second()), &done);
+ masm.move32(ToRegister(ins->second()), output);
+ }
+
+ masm.bind(&done);
+}
+
+void
+CodeGenerator::visitAbsI(LAbsI* ins)
+{
+ Register input = ToRegister(ins->input());
+ Label positive;
+
+ MOZ_ASSERT(input == ToRegister(ins->output()));
+ masm.branchTest32(Assembler::NotSigned, input, input, &positive);
+ masm.neg32(input);
+ LSnapshot* snapshot = ins->snapshot();
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ if (snapshot)
+ bailoutCmp32(Assembler::Equal, input, Imm32(INT32_MIN), snapshot);
+#else
+ if (snapshot)
+ bailoutIf(Assembler::Overflow, snapshot);
+#endif
+ masm.bind(&positive);
+}
+
+void
+CodeGenerator::visitPowI(LPowI* ins)
+{
+ FloatRegister value = ToFloatRegister(ins->value());
+ Register power = ToRegister(ins->power());
+ Register temp = ToRegister(ins->temp());
+
+ MOZ_ASSERT(power != temp);
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(value, MoveOp::DOUBLE);
+ masm.passABIArg(power);
+
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::powi), MoveOp::DOUBLE);
+ MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
+}
+
+void
+CodeGenerator::visitPowD(LPowD* ins)
+{
+ FloatRegister value = ToFloatRegister(ins->value());
+ FloatRegister power = ToFloatRegister(ins->power());
+ Register temp = ToRegister(ins->temp());
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(value, MoveOp::DOUBLE);
+ masm.passABIArg(power, MoveOp::DOUBLE);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ecmaPow), MoveOp::DOUBLE);
+
+ MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
+}
+
+void
+CodeGenerator::visitMathFunctionD(LMathFunctionD* ins)
+{
+ Register temp = ToRegister(ins->temp());
+ FloatRegister input = ToFloatRegister(ins->input());
+ MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
+
+ masm.setupUnalignedABICall(temp);
+
+ const MathCache* mathCache = ins->mir()->cache();
+ if (mathCache) {
+ masm.movePtr(ImmPtr(mathCache), temp);
+ masm.passABIArg(temp);
+ }
+ masm.passABIArg(input, MoveOp::DOUBLE);
+
+# define MAYBE_CACHED(fcn) (mathCache ? (void*)fcn ## _impl : (void*)fcn ## _uncached)
+
+ void* funptr = nullptr;
+ switch (ins->mir()->function()) {
+ case MMathFunction::Log:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_log));
+ break;
+ case MMathFunction::Sin:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_sin));
+ break;
+ case MMathFunction::Cos:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_cos));
+ break;
+ case MMathFunction::Exp:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_exp));
+ break;
+ case MMathFunction::Tan:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_tan));
+ break;
+ case MMathFunction::ATan:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_atan));
+ break;
+ case MMathFunction::ASin:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_asin));
+ break;
+ case MMathFunction::ACos:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_acos));
+ break;
+ case MMathFunction::Log10:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_log10));
+ break;
+ case MMathFunction::Log2:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_log2));
+ break;
+ case MMathFunction::Log1P:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_log1p));
+ break;
+ case MMathFunction::ExpM1:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_expm1));
+ break;
+ case MMathFunction::CosH:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_cosh));
+ break;
+ case MMathFunction::SinH:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_sinh));
+ break;
+ case MMathFunction::TanH:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_tanh));
+ break;
+ case MMathFunction::ACosH:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_acosh));
+ break;
+ case MMathFunction::ASinH:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_asinh));
+ break;
+ case MMathFunction::ATanH:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_atanh));
+ break;
+ case MMathFunction::Sign:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_sign));
+ break;
+ case MMathFunction::Trunc:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_trunc));
+ break;
+ case MMathFunction::Cbrt:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_cbrt));
+ break;
+ case MMathFunction::Floor:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_floor_impl);
+ break;
+ case MMathFunction::Ceil:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_ceil_impl);
+ break;
+ case MMathFunction::Round:
+ funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_round_impl);
+ break;
+ default:
+ MOZ_CRASH("Unknown math function");
+ }
+
+# undef MAYBE_CACHED
+
+ masm.callWithABI(funptr, MoveOp::DOUBLE);
+}
+
+void
+CodeGenerator::visitMathFunctionF(LMathFunctionF* ins)
+{
+ Register temp = ToRegister(ins->temp());
+ FloatRegister input = ToFloatRegister(ins->input());
+ MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnFloat32Reg);
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(input, MoveOp::FLOAT32);
+
+ void* funptr = nullptr;
+ switch (ins->mir()->function()) {
+ case MMathFunction::Floor: funptr = JS_FUNC_TO_DATA_PTR(void*, floorf); break;
+ case MMathFunction::Round: funptr = JS_FUNC_TO_DATA_PTR(void*, math_roundf_impl); break;
+ case MMathFunction::Ceil: funptr = JS_FUNC_TO_DATA_PTR(void*, ceilf); break;
+ default:
+ MOZ_CRASH("Unknown or unsupported float32 math function");
+ }
+
+ masm.callWithABI(funptr, MoveOp::FLOAT32);
+}
+
+void
+CodeGenerator::visitModD(LModD* ins)
+{
+ FloatRegister lhs = ToFloatRegister(ins->lhs());
+ FloatRegister rhs = ToFloatRegister(ins->rhs());
+ Register temp = ToRegister(ins->temp());
+
+ MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(lhs, MoveOp::DOUBLE);
+ masm.passABIArg(rhs, MoveOp::DOUBLE);
+
+ if (gen->compilingWasm())
+ masm.callWithABI(wasm::SymbolicAddress::ModD, MoveOp::DOUBLE);
+ else
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, NumberMod), MoveOp::DOUBLE);
+}
+
+typedef bool (*BinaryFn)(JSContext*, MutableHandleValue, MutableHandleValue, MutableHandleValue);
+
+static const VMFunction AddInfo = FunctionInfo<BinaryFn>(js::AddValues, "AddValues");
+static const VMFunction SubInfo = FunctionInfo<BinaryFn>(js::SubValues, "SubValues");
+static const VMFunction MulInfo = FunctionInfo<BinaryFn>(js::MulValues, "MulValues");
+static const VMFunction DivInfo = FunctionInfo<BinaryFn>(js::DivValues, "DivValues");
+static const VMFunction ModInfo = FunctionInfo<BinaryFn>(js::ModValues, "ModValues");
+static const VMFunction UrshInfo = FunctionInfo<BinaryFn>(js::UrshValues, "UrshValues");
+
+void
+CodeGenerator::visitBinaryV(LBinaryV* lir)
+{
+ pushArg(ToValue(lir, LBinaryV::RhsInput));
+ pushArg(ToValue(lir, LBinaryV::LhsInput));
+
+ switch (lir->jsop()) {
+ case JSOP_ADD:
+ callVM(AddInfo, lir);
+ break;
+
+ case JSOP_SUB:
+ callVM(SubInfo, lir);
+ break;
+
+ case JSOP_MUL:
+ callVM(MulInfo, lir);
+ break;
+
+ case JSOP_DIV:
+ callVM(DivInfo, lir);
+ break;
+
+ case JSOP_MOD:
+ callVM(ModInfo, lir);
+ break;
+
+ case JSOP_URSH:
+ callVM(UrshInfo, lir);
+ break;
+
+ default:
+ MOZ_CRASH("Unexpected binary op");
+ }
+}
+
+typedef bool (*StringCompareFn)(JSContext*, HandleString, HandleString, bool*);
+static const VMFunction StringsEqualInfo =
+ FunctionInfo<StringCompareFn>(jit::StringsEqual<true>, "StringsEqual");
+static const VMFunction StringsNotEqualInfo =
+ FunctionInfo<StringCompareFn>(jit::StringsEqual<false>, "StringsEqual");
+
+void
+CodeGenerator::emitCompareS(LInstruction* lir, JSOp op, Register left, Register right,
+ Register output)
+{
+ MOZ_ASSERT(lir->isCompareS() || lir->isCompareStrictS());
+
+ OutOfLineCode* ool = nullptr;
+
+ if (op == JSOP_EQ || op == JSOP_STRICTEQ) {
+ ool = oolCallVM(StringsEqualInfo, lir, ArgList(left, right), StoreRegisterTo(output));
+ } else {
+ MOZ_ASSERT(op == JSOP_NE || op == JSOP_STRICTNE);
+ ool = oolCallVM(StringsNotEqualInfo, lir, ArgList(left, right), StoreRegisterTo(output));
+ }
+
+ masm.compareStrings(op, left, right, output, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitCompareStrictS(LCompareStrictS* lir)
+{
+ JSOp op = lir->mir()->jsop();
+ MOZ_ASSERT(op == JSOP_STRICTEQ || op == JSOP_STRICTNE);
+
+ const ValueOperand leftV = ToValue(lir, LCompareStrictS::Lhs);
+ Register right = ToRegister(lir->right());
+ Register output = ToRegister(lir->output());
+ Register tempToUnbox = ToTempUnboxRegister(lir->tempToUnbox());
+
+ Label string, done;
+
+ masm.branchTestString(Assembler::Equal, leftV, &string);
+ masm.move32(Imm32(op == JSOP_STRICTNE), output);
+ masm.jump(&done);
+
+ masm.bind(&string);
+ Register left = masm.extractString(leftV, tempToUnbox);
+ emitCompareS(lir, op, left, right, output);
+
+ masm.bind(&done);
+}
+
+void
+CodeGenerator::visitCompareS(LCompareS* lir)
+{
+ JSOp op = lir->mir()->jsop();
+ Register left = ToRegister(lir->left());
+ Register right = ToRegister(lir->right());
+ Register output = ToRegister(lir->output());
+
+ emitCompareS(lir, op, left, right, output);
+}
+
+typedef bool (*CompareFn)(JSContext*, MutableHandleValue, MutableHandleValue, bool*);
+static const VMFunction EqInfo =
+ FunctionInfo<CompareFn>(jit::LooselyEqual<true>, "LooselyEqual");
+static const VMFunction NeInfo =
+ FunctionInfo<CompareFn>(jit::LooselyEqual<false>, "LooselyEqual");
+static const VMFunction StrictEqInfo =
+ FunctionInfo<CompareFn>(jit::StrictlyEqual<true>, "StrictlyEqual");
+static const VMFunction StrictNeInfo =
+ FunctionInfo<CompareFn>(jit::StrictlyEqual<false>, "StrictlyEqual");
+static const VMFunction LtInfo =
+ FunctionInfo<CompareFn>(jit::LessThan, "LessThan");
+static const VMFunction LeInfo =
+ FunctionInfo<CompareFn>(jit::LessThanOrEqual, "LessThanOrEqual");
+static const VMFunction GtInfo =
+ FunctionInfo<CompareFn>(jit::GreaterThan, "GreaterThan");
+static const VMFunction GeInfo =
+ FunctionInfo<CompareFn>(jit::GreaterThanOrEqual, "GreaterThanOrEqual");
+
+void
+CodeGenerator::visitCompareVM(LCompareVM* lir)
+{
+ pushArg(ToValue(lir, LBinaryV::RhsInput));
+ pushArg(ToValue(lir, LBinaryV::LhsInput));
+
+ switch (lir->mir()->jsop()) {
+ case JSOP_EQ:
+ callVM(EqInfo, lir);
+ break;
+
+ case JSOP_NE:
+ callVM(NeInfo, lir);
+ break;
+
+ case JSOP_STRICTEQ:
+ callVM(StrictEqInfo, lir);
+ break;
+
+ case JSOP_STRICTNE:
+ callVM(StrictNeInfo, lir);
+ break;
+
+ case JSOP_LT:
+ callVM(LtInfo, lir);
+ break;
+
+ case JSOP_LE:
+ callVM(LeInfo, lir);
+ break;
+
+ case JSOP_GT:
+ callVM(GtInfo, lir);
+ break;
+
+ case JSOP_GE:
+ callVM(GeInfo, lir);
+ break;
+
+ default:
+ MOZ_CRASH("Unexpected compare op");
+ }
+}
+
+void
+CodeGenerator::visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV* lir)
+{
+ JSOp op = lir->mir()->jsop();
+ MCompare::CompareType compareType = lir->mir()->compareType();
+ MOZ_ASSERT(compareType == MCompare::Compare_Undefined ||
+ compareType == MCompare::Compare_Null);
+
+ const ValueOperand value = ToValue(lir, LIsNullOrLikeUndefinedV::Value);
+ Register output = ToRegister(lir->output());
+
+ if (op == JSOP_EQ || op == JSOP_NE) {
+ MOZ_ASSERT(lir->mir()->lhs()->type() != MIRType::Object ||
+ lir->mir()->operandMightEmulateUndefined(),
+ "Operands which can't emulate undefined should have been folded");
+
+ OutOfLineTestObjectWithLabels* ool = nullptr;
+ Maybe<Label> label1, label2;
+ Label* nullOrLikeUndefined;
+ Label* notNullOrLikeUndefined;
+ if (lir->mir()->operandMightEmulateUndefined()) {
+ ool = new(alloc()) OutOfLineTestObjectWithLabels();
+ addOutOfLineCode(ool, lir->mir());
+ nullOrLikeUndefined = ool->label1();
+ notNullOrLikeUndefined = ool->label2();
+ } else {
+ label1.emplace();
+ label2.emplace();
+ nullOrLikeUndefined = label1.ptr();
+ notNullOrLikeUndefined = label2.ptr();
+ }
+
+ Register tag = masm.splitTagForTest(value);
+ MDefinition* input = lir->mir()->lhs();
+ if (input->mightBeType(MIRType::Null))
+ masm.branchTestNull(Assembler::Equal, tag, nullOrLikeUndefined);
+ if (input->mightBeType(MIRType::Undefined))
+ masm.branchTestUndefined(Assembler::Equal, tag, nullOrLikeUndefined);
+
+ if (ool) {
+ // Check whether it's a truthy object or a falsy object that emulates
+ // undefined.
+ masm.branchTestObject(Assembler::NotEqual, tag, notNullOrLikeUndefined);
+
+ Register objreg = masm.extractObject(value, ToTempUnboxRegister(lir->tempToUnbox()));
+ branchTestObjectEmulatesUndefined(objreg, nullOrLikeUndefined, notNullOrLikeUndefined,
+ ToRegister(lir->temp()), ool);
+ // fall through
+ }
+
+ Label done;
+
+ // It's not null or undefined, and if it's an object it doesn't
+ // emulate undefined, so it's not like undefined.
+ masm.move32(Imm32(op == JSOP_NE), output);
+ masm.jump(&done);
+
+ masm.bind(nullOrLikeUndefined);
+ masm.move32(Imm32(op == JSOP_EQ), output);
+
+ // Both branches meet here.
+ masm.bind(&done);
+ return;
+ }
+
+ MOZ_ASSERT(op == JSOP_STRICTEQ || op == JSOP_STRICTNE);
+
+ Assembler::Condition cond = JSOpToCondition(compareType, op);
+ if (compareType == MCompare::Compare_Null)
+ masm.testNullSet(cond, value, output);
+ else
+ masm.testUndefinedSet(cond, value, output);
+}
+
+void
+CodeGenerator::visitIsNullOrLikeUndefinedAndBranchV(LIsNullOrLikeUndefinedAndBranchV* lir)
+{
+ JSOp op = lir->cmpMir()->jsop();
+ MCompare::CompareType compareType = lir->cmpMir()->compareType();
+ MOZ_ASSERT(compareType == MCompare::Compare_Undefined ||
+ compareType == MCompare::Compare_Null);
+
+ const ValueOperand value = ToValue(lir, LIsNullOrLikeUndefinedAndBranchV::Value);
+
+ if (op == JSOP_EQ || op == JSOP_NE) {
+ MBasicBlock* ifTrue;
+ MBasicBlock* ifFalse;
+
+ if (op == JSOP_EQ) {
+ ifTrue = lir->ifTrue();
+ ifFalse = lir->ifFalse();
+ } else {
+ // Swap branches.
+ ifTrue = lir->ifFalse();
+ ifFalse = lir->ifTrue();
+ op = JSOP_EQ;
+ }
+
+ MOZ_ASSERT(lir->cmpMir()->lhs()->type() != MIRType::Object ||
+ lir->cmpMir()->operandMightEmulateUndefined(),
+ "Operands which can't emulate undefined should have been folded");
+
+ OutOfLineTestObject* ool = nullptr;
+ if (lir->cmpMir()->operandMightEmulateUndefined()) {
+ ool = new(alloc()) OutOfLineTestObject();
+ addOutOfLineCode(ool, lir->cmpMir());
+ }
+
+ Register tag = masm.splitTagForTest(value);
+
+ Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
+ Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
+
+ MDefinition* input = lir->cmpMir()->lhs();
+ if (input->mightBeType(MIRType::Null))
+ masm.branchTestNull(Assembler::Equal, tag, ifTrueLabel);
+ if (input->mightBeType(MIRType::Undefined))
+ masm.branchTestUndefined(Assembler::Equal, tag, ifTrueLabel);
+
+ if (ool) {
+ masm.branchTestObject(Assembler::NotEqual, tag, ifFalseLabel);
+
+ // Objects that emulate undefined are loosely equal to null/undefined.
+ Register objreg = masm.extractObject(value, ToTempUnboxRegister(lir->tempToUnbox()));
+ Register scratch = ToRegister(lir->temp());
+ testObjectEmulatesUndefined(objreg, ifTrueLabel, ifFalseLabel, scratch, ool);
+ } else {
+ masm.jump(ifFalseLabel);
+ }
+ return;
+ }
+
+ MOZ_ASSERT(op == JSOP_STRICTEQ || op == JSOP_STRICTNE);
+
+ Assembler::Condition cond = JSOpToCondition(compareType, op);
+ if (compareType == MCompare::Compare_Null)
+ testNullEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
+ else
+ testUndefinedEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGenerator::visitIsNullOrLikeUndefinedT(LIsNullOrLikeUndefinedT * lir)
+{
+ MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
+ lir->mir()->compareType() == MCompare::Compare_Null);
+
+ MIRType lhsType = lir->mir()->lhs()->type();
+ MOZ_ASSERT(lhsType == MIRType::Object || lhsType == MIRType::ObjectOrNull);
+
+ JSOp op = lir->mir()->jsop();
+ MOZ_ASSERT(lhsType == MIRType::ObjectOrNull || op == JSOP_EQ || op == JSOP_NE,
+ "Strict equality should have been folded");
+
+ MOZ_ASSERT(lhsType == MIRType::ObjectOrNull || lir->mir()->operandMightEmulateUndefined(),
+ "If the object couldn't emulate undefined, this should have been folded.");
+
+ Register objreg = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ if ((op == JSOP_EQ || op == JSOP_NE) && lir->mir()->operandMightEmulateUndefined()) {
+ OutOfLineTestObjectWithLabels* ool = new(alloc()) OutOfLineTestObjectWithLabels();
+ addOutOfLineCode(ool, lir->mir());
+
+ Label* emulatesUndefined = ool->label1();
+ Label* doesntEmulateUndefined = ool->label2();
+
+ if (lhsType == MIRType::ObjectOrNull)
+ masm.branchTestPtr(Assembler::Zero, objreg, objreg, emulatesUndefined);
+
+ branchTestObjectEmulatesUndefined(objreg, emulatesUndefined, doesntEmulateUndefined,
+ output, ool);
+
+ Label done;
+
+ masm.move32(Imm32(op == JSOP_NE), output);
+ masm.jump(&done);
+
+ masm.bind(emulatesUndefined);
+ masm.move32(Imm32(op == JSOP_EQ), output);
+ masm.bind(&done);
+ } else {
+ MOZ_ASSERT(lhsType == MIRType::ObjectOrNull);
+
+ Label isNull, done;
+
+ masm.branchTestPtr(Assembler::Zero, objreg, objreg, &isNull);
+
+ masm.move32(Imm32(op == JSOP_NE || op == JSOP_STRICTNE), output);
+ masm.jump(&done);
+
+ masm.bind(&isNull);
+ masm.move32(Imm32(op == JSOP_EQ || op == JSOP_STRICTEQ), output);
+
+ masm.bind(&done);
+ }
+}
+
+void
+CodeGenerator::visitIsNullOrLikeUndefinedAndBranchT(LIsNullOrLikeUndefinedAndBranchT* lir)
+{
+ DebugOnly<MCompare::CompareType> compareType = lir->cmpMir()->compareType();
+ MOZ_ASSERT(compareType == MCompare::Compare_Undefined ||
+ compareType == MCompare::Compare_Null);
+
+ MIRType lhsType = lir->cmpMir()->lhs()->type();
+ MOZ_ASSERT(lhsType == MIRType::Object || lhsType == MIRType::ObjectOrNull);
+
+ JSOp op = lir->cmpMir()->jsop();
+ MOZ_ASSERT(lhsType == MIRType::ObjectOrNull || op == JSOP_EQ || op == JSOP_NE,
+ "Strict equality should have been folded");
+
+ MOZ_ASSERT(lhsType == MIRType::ObjectOrNull || lir->cmpMir()->operandMightEmulateUndefined(),
+ "If the object couldn't emulate undefined, this should have been folded.");
+
+ MBasicBlock* ifTrue;
+ MBasicBlock* ifFalse;
+
+ if (op == JSOP_EQ || op == JSOP_STRICTEQ) {
+ ifTrue = lir->ifTrue();
+ ifFalse = lir->ifFalse();
+ } else {
+ // Swap branches.
+ ifTrue = lir->ifFalse();
+ ifFalse = lir->ifTrue();
+ }
+
+ Register input = ToRegister(lir->getOperand(0));
+
+ if ((op == JSOP_EQ || op == JSOP_NE) && lir->cmpMir()->operandMightEmulateUndefined()) {
+ OutOfLineTestObject* ool = new(alloc()) OutOfLineTestObject();
+ addOutOfLineCode(ool, lir->cmpMir());
+
+ Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
+ Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
+
+ if (lhsType == MIRType::ObjectOrNull)
+ masm.branchTestPtr(Assembler::Zero, input, input, ifTrueLabel);
+
+ // Objects that emulate undefined are loosely equal to null/undefined.
+ Register scratch = ToRegister(lir->temp());
+ testObjectEmulatesUndefined(input, ifTrueLabel, ifFalseLabel, scratch, ool);
+ } else {
+ MOZ_ASSERT(lhsType == MIRType::ObjectOrNull);
+ testZeroEmitBranch(Assembler::Equal, input, ifTrue, ifFalse);
+ }
+}
+
+typedef JSString* (*ConcatStringsFn)(ExclusiveContext*, HandleString, HandleString);
+static const VMFunction ConcatStringsInfo =
+ FunctionInfo<ConcatStringsFn>(ConcatStrings<CanGC>, "ConcatStrings");
+
+void
+CodeGenerator::emitConcat(LInstruction* lir, Register lhs, Register rhs, Register output)
+{
+ OutOfLineCode* ool = oolCallVM(ConcatStringsInfo, lir, ArgList(lhs, rhs),
+ StoreRegisterTo(output));
+
+ JitCode* stringConcatStub = gen->compartment->jitCompartment()->stringConcatStubNoBarrier();
+ masm.call(stringConcatStub);
+ masm.branchTestPtr(Assembler::Zero, output, output, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitConcat(LConcat* lir)
+{
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+
+ Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(lhs == CallTempReg0);
+ MOZ_ASSERT(rhs == CallTempReg1);
+ MOZ_ASSERT(ToRegister(lir->temp1()) == CallTempReg0);
+ MOZ_ASSERT(ToRegister(lir->temp2()) == CallTempReg1);
+ MOZ_ASSERT(ToRegister(lir->temp3()) == CallTempReg2);
+ MOZ_ASSERT(ToRegister(lir->temp4()) == CallTempReg3);
+ MOZ_ASSERT(ToRegister(lir->temp5()) == CallTempReg4);
+ MOZ_ASSERT(output == CallTempReg5);
+
+ emitConcat(lir, lhs, rhs, output);
+}
+
+static void
+CopyStringChars(MacroAssembler& masm, Register to, Register from, Register len,
+ Register byteOpScratch, size_t fromWidth, size_t toWidth)
+{
+ // Copy |len| char16_t code units from |from| to |to|. Assumes len > 0
+ // (checked below in debug builds), and when done |to| must point to the
+ // next available char.
+
+#ifdef DEBUG
+ Label ok;
+ masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
+ masm.assumeUnreachable("Length should be greater than 0.");
+ masm.bind(&ok);
+#endif
+
+ MOZ_ASSERT(fromWidth == 1 || fromWidth == 2);
+ MOZ_ASSERT(toWidth == 1 || toWidth == 2);
+ MOZ_ASSERT_IF(toWidth == 1, fromWidth == 1);
+
+ Label start;
+ masm.bind(&start);
+ if (fromWidth == 2)
+ masm.load16ZeroExtend(Address(from, 0), byteOpScratch);
+ else
+ masm.load8ZeroExtend(Address(from, 0), byteOpScratch);
+ if (toWidth == 2)
+ masm.store16(byteOpScratch, Address(to, 0));
+ else
+ masm.store8(byteOpScratch, Address(to, 0));
+ masm.addPtr(Imm32(fromWidth), from);
+ masm.addPtr(Imm32(toWidth), to);
+ masm.branchSub32(Assembler::NonZero, Imm32(1), len, &start);
+}
+
+static void
+CopyStringCharsMaybeInflate(MacroAssembler& masm, Register input, Register destChars,
+ Register temp1, Register temp2)
+{
+ // destChars is TwoByte and input is a Latin1 or TwoByte string, so we may
+ // have to inflate.
+
+ Label isLatin1, done;
+ masm.loadStringLength(input, temp1);
+ masm.branchLatin1String(input, &isLatin1);
+ {
+ masm.loadStringChars(input, input);
+ CopyStringChars(masm, destChars, input, temp1, temp2, sizeof(char16_t), sizeof(char16_t));
+ masm.jump(&done);
+ }
+ masm.bind(&isLatin1);
+ {
+ masm.loadStringChars(input, input);
+ CopyStringChars(masm, destChars, input, temp1, temp2, sizeof(char), sizeof(char16_t));
+ }
+ masm.bind(&done);
+}
+
+static void
+ConcatInlineString(MacroAssembler& masm, Register lhs, Register rhs, Register output,
+ Register temp1, Register temp2, Register temp3,
+ Label* failure, Label* failurePopTemps, bool isTwoByte)
+{
+ // State: result length in temp2.
+
+ // Ensure both strings are linear.
+ masm.branchIfRope(lhs, failure);
+ masm.branchIfRope(rhs, failure);
+
+ // Allocate a JSThinInlineString or JSFatInlineString.
+ size_t maxThinInlineLength;
+ if (isTwoByte)
+ maxThinInlineLength = JSThinInlineString::MAX_LENGTH_TWO_BYTE;
+ else
+ maxThinInlineLength = JSThinInlineString::MAX_LENGTH_LATIN1;
+
+ Label isFat, allocDone;
+ masm.branch32(Assembler::Above, temp2, Imm32(maxThinInlineLength), &isFat);
+ {
+ uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
+ if (!isTwoByte)
+ flags |= JSString::LATIN1_CHARS_BIT;
+ masm.newGCString(output, temp1, failure);
+ masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
+ masm.jump(&allocDone);
+ }
+ masm.bind(&isFat);
+ {
+ uint32_t flags = JSString::INIT_FAT_INLINE_FLAGS;
+ if (!isTwoByte)
+ flags |= JSString::LATIN1_CHARS_BIT;
+ masm.newGCFatInlineString(output, temp1, failure);
+ masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
+ }
+ masm.bind(&allocDone);
+
+ // Store length.
+ masm.store32(temp2, Address(output, JSString::offsetOfLength()));
+
+ // Load chars pointer in temp2.
+ masm.computeEffectiveAddress(Address(output, JSInlineString::offsetOfInlineStorage()), temp2);
+
+ {
+ // Copy lhs chars. Note that this advances temp2 to point to the next
+ // char. This also clobbers the lhs register.
+ if (isTwoByte) {
+ CopyStringCharsMaybeInflate(masm, lhs, temp2, temp1, temp3);
+ } else {
+ masm.loadStringLength(lhs, temp3);
+ masm.loadStringChars(lhs, lhs);
+ CopyStringChars(masm, temp2, lhs, temp3, temp1, sizeof(char), sizeof(char));
+ }
+
+ // Copy rhs chars. Clobbers the rhs register.
+ if (isTwoByte) {
+ CopyStringCharsMaybeInflate(masm, rhs, temp2, temp1, temp3);
+ } else {
+ masm.loadStringLength(rhs, temp3);
+ masm.loadStringChars(rhs, rhs);
+ CopyStringChars(masm, temp2, rhs, temp3, temp1, sizeof(char), sizeof(char));
+ }
+
+ // Null-terminate.
+ if (isTwoByte)
+ masm.store16(Imm32(0), Address(temp2, 0));
+ else
+ masm.store8(Imm32(0), Address(temp2, 0));
+ }
+
+ masm.ret();
+}
+
+typedef JSString* (*SubstringKernelFn)(JSContext* cx, HandleString str, int32_t begin, int32_t len);
+static const VMFunction SubstringKernelInfo =
+ FunctionInfo<SubstringKernelFn>(SubstringKernel, "SubstringKernel");
+
+void
+CodeGenerator::visitSubstr(LSubstr* lir)
+{
+ Register string = ToRegister(lir->string());
+ Register begin = ToRegister(lir->begin());
+ Register length = ToRegister(lir->length());
+ Register output = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp());
+ Register temp3 = ToRegister(lir->temp3());
+
+ // On x86 there are not enough registers. In that case reuse the string
+ // register as temporary.
+ Register temp2 = lir->temp2()->isBogusTemp() ? string : ToRegister(lir->temp2());
+
+ Address stringFlags(string, JSString::offsetOfFlags());
+
+ Label isLatin1, notInline, nonZero, isInlinedLatin1;
+
+ // For every edge case use the C++ variant.
+ // Note: we also use this upon allocation failure in newGCString and
+ // newGCFatInlineString. To squeeze out even more performance those failures
+ // can be handled by allocate in ool code and returning to jit code to fill
+ // in all data.
+ OutOfLineCode* ool = oolCallVM(SubstringKernelInfo, lir,
+ ArgList(string, begin, length),
+ StoreRegisterTo(output));
+ Label* slowPath = ool->entry();
+ Label* done = ool->rejoin();
+
+ // Zero length, return emptystring.
+ masm.branchTest32(Assembler::NonZero, length, length, &nonZero);
+ const JSAtomState& names = GetJitContext()->runtime->names();
+ masm.movePtr(ImmGCPtr(names.empty), output);
+ masm.jump(done);
+
+ // Use slow path for ropes.
+ masm.bind(&nonZero);
+ masm.branchIfRopeOrExternal(string, temp, slowPath);
+
+ // Handle inlined strings by creating a FatInlineString.
+ masm.branchTest32(Assembler::Zero, stringFlags, Imm32(JSString::INLINE_CHARS_BIT), &notInline);
+ masm.newGCFatInlineString(output, temp, slowPath);
+ masm.store32(length, Address(output, JSString::offsetOfLength()));
+ Address stringStorage(string, JSInlineString::offsetOfInlineStorage());
+ Address outputStorage(output, JSInlineString::offsetOfInlineStorage());
+
+ masm.branchLatin1String(string, &isInlinedLatin1);
+ {
+ masm.store32(Imm32(JSString::INIT_FAT_INLINE_FLAGS),
+ Address(output, JSString::offsetOfFlags()));
+ masm.computeEffectiveAddress(stringStorage, temp);
+ if (temp2 == string)
+ masm.push(string);
+ BaseIndex chars(temp, begin, ScaleFromElemWidth(sizeof(char16_t)));
+ masm.computeEffectiveAddress(chars, temp2);
+ masm.computeEffectiveAddress(outputStorage, temp);
+ CopyStringChars(masm, temp, temp2, length, temp3, sizeof(char16_t), sizeof(char16_t));
+ masm.load32(Address(output, JSString::offsetOfLength()), length);
+ masm.store16(Imm32(0), Address(temp, 0));
+ if (temp2 == string)
+ masm.pop(string);
+ masm.jump(done);
+ }
+ masm.bind(&isInlinedLatin1);
+ {
+ masm.store32(Imm32(JSString::INIT_FAT_INLINE_FLAGS | JSString::LATIN1_CHARS_BIT),
+ Address(output, JSString::offsetOfFlags()));
+ if (temp2 == string)
+ masm.push(string);
+ masm.computeEffectiveAddress(stringStorage, temp2);
+ static_assert(sizeof(char) == 1, "begin index shouldn't need scaling");
+ masm.addPtr(begin, temp2);
+ masm.computeEffectiveAddress(outputStorage, temp);
+ CopyStringChars(masm, temp, temp2, length, temp3, sizeof(char), sizeof(char));
+ masm.load32(Address(output, JSString::offsetOfLength()), length);
+ masm.store8(Imm32(0), Address(temp, 0));
+ if (temp2 == string)
+ masm.pop(string);
+ masm.jump(done);
+ }
+
+ // Handle other cases with a DependentString.
+ masm.bind(&notInline);
+ masm.newGCString(output, temp, slowPath);
+ masm.store32(length, Address(output, JSString::offsetOfLength()));
+ masm.storePtr(string, Address(output, JSDependentString::offsetOfBase()));
+
+ masm.branchLatin1String(string, &isLatin1);
+ {
+ masm.store32(Imm32(JSString::DEPENDENT_FLAGS), Address(output, JSString::offsetOfFlags()));
+ masm.loadPtr(Address(string, JSString::offsetOfNonInlineChars()), temp);
+ BaseIndex chars(temp, begin, ScaleFromElemWidth(sizeof(char16_t)));
+ masm.computeEffectiveAddress(chars, temp);
+ masm.storePtr(temp, Address(output, JSString::offsetOfNonInlineChars()));
+ masm.jump(done);
+ }
+ masm.bind(&isLatin1);
+ {
+ masm.store32(Imm32(JSString::DEPENDENT_FLAGS | JSString::LATIN1_CHARS_BIT),
+ Address(output, JSString::offsetOfFlags()));
+ masm.loadPtr(Address(string, JSString::offsetOfNonInlineChars()), temp);
+ static_assert(sizeof(char) == 1, "begin index shouldn't need scaling");
+ masm.addPtr(begin, temp);
+ masm.storePtr(temp, Address(output, JSString::offsetOfNonInlineChars()));
+ masm.jump(done);
+ }
+
+ masm.bind(done);
+}
+
+JitCode*
+JitCompartment::generateStringConcatStub(JSContext* cx)
+{
+ MacroAssembler masm(cx);
+
+ Register lhs = CallTempReg0;
+ Register rhs = CallTempReg1;
+ Register temp1 = CallTempReg2;
+ Register temp2 = CallTempReg3;
+ Register temp3 = CallTempReg4;
+ Register output = CallTempReg5;
+
+ Label failure, failurePopTemps;
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+ // If lhs is empty, return rhs.
+ Label leftEmpty;
+ masm.loadStringLength(lhs, temp1);
+ masm.branchTest32(Assembler::Zero, temp1, temp1, &leftEmpty);
+
+ // If rhs is empty, return lhs.
+ Label rightEmpty;
+ masm.loadStringLength(rhs, temp2);
+ masm.branchTest32(Assembler::Zero, temp2, temp2, &rightEmpty);
+
+ masm.add32(temp1, temp2);
+
+ // Check if we can use a JSFatInlineString. The result is a Latin1 string if
+ // lhs and rhs are both Latin1, so we AND the flags.
+ Label isFatInlineTwoByte, isFatInlineLatin1;
+ masm.load32(Address(lhs, JSString::offsetOfFlags()), temp1);
+ masm.and32(Address(rhs, JSString::offsetOfFlags()), temp1);
+
+ Label isLatin1, notInline;
+ masm.branchTest32(Assembler::NonZero, temp1, Imm32(JSString::LATIN1_CHARS_BIT), &isLatin1);
+ {
+ masm.branch32(Assembler::BelowOrEqual, temp2, Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
+ &isFatInlineTwoByte);
+ masm.jump(&notInline);
+ }
+ masm.bind(&isLatin1);
+ {
+ masm.branch32(Assembler::BelowOrEqual, temp2, Imm32(JSFatInlineString::MAX_LENGTH_LATIN1),
+ &isFatInlineLatin1);
+ }
+ masm.bind(&notInline);
+
+ // Keep AND'ed flags in temp1.
+
+ // Ensure result length <= JSString::MAX_LENGTH.
+ masm.branch32(Assembler::Above, temp2, Imm32(JSString::MAX_LENGTH), &failure);
+
+ // Allocate a new rope.
+ masm.newGCString(output, temp3, &failure);
+
+ // Store rope length and flags. temp1 still holds the result of AND'ing the
+ // lhs and rhs flags, so we just have to clear the other flags to get our
+ // rope flags (Latin1 if both lhs and rhs are Latin1).
+ static_assert(JSString::ROPE_FLAGS == 0, "Rope flags must be 0");
+ masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp1);
+ masm.store32(temp1, Address(output, JSString::offsetOfFlags()));
+ masm.store32(temp2, Address(output, JSString::offsetOfLength()));
+
+ // Store left and right nodes.
+ masm.storePtr(lhs, Address(output, JSRope::offsetOfLeft()));
+ masm.storePtr(rhs, Address(output, JSRope::offsetOfRight()));
+ masm.ret();
+
+ masm.bind(&leftEmpty);
+ masm.mov(rhs, output);
+ masm.ret();
+
+ masm.bind(&rightEmpty);
+ masm.mov(lhs, output);
+ masm.ret();
+
+ masm.bind(&isFatInlineTwoByte);
+ ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
+ &failure, &failurePopTemps, true);
+
+ masm.bind(&isFatInlineLatin1);
+ ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
+ &failure, &failurePopTemps, false);
+
+ masm.bind(&failurePopTemps);
+ masm.pop(temp2);
+ masm.pop(temp1);
+
+ masm.bind(&failure);
+ masm.movePtr(ImmPtr(nullptr), output);
+ masm.ret();
+
+ Linker linker(masm);
+ AutoFlushICache afc("StringConcatStub");
+ JitCode* code = linker.newCode<CanGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "StringConcatStub");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateMallocStub(JSContext* cx)
+{
+ const Register regReturn = CallTempReg0;
+ const Register regNBytes = CallTempReg0;
+
+ MacroAssembler masm(cx);
+
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+ regs.takeUnchecked(regNBytes);
+ LiveRegisterSet save(regs.asLiveSet());
+ masm.PushRegsInMask(save);
+
+ const Register regTemp = regs.takeAnyGeneral();
+ const Register regRuntime = regTemp;
+ MOZ_ASSERT(regTemp != regNBytes);
+
+ masm.setupUnalignedABICall(regTemp);
+ masm.movePtr(ImmPtr(cx->runtime()), regRuntime);
+ masm.passABIArg(regRuntime);
+ masm.passABIArg(regNBytes);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, MallocWrapper));
+ masm.storeCallPointerResult(regReturn);
+
+ masm.PopRegsInMask(save);
+ masm.ret();
+
+ Linker linker(masm);
+ AutoFlushICache afc("MallocStub");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "MallocStub");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateFreeStub(JSContext* cx)
+{
+ const Register regSlots = CallTempReg0;
+
+ MacroAssembler masm(cx);
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ regs.takeUnchecked(regSlots);
+ LiveRegisterSet save(regs.asLiveSet());
+ masm.PushRegsInMask(save);
+
+ const Register regTemp = regs.takeAnyGeneral();
+ MOZ_ASSERT(regTemp != regSlots);
+
+ masm.setupUnalignedABICall(regTemp);
+ masm.passABIArg(regSlots);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js_free));
+
+ masm.PopRegsInMask(save);
+
+ masm.ret();
+
+ Linker linker(masm);
+ AutoFlushICache afc("FreeStub");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "FreeStub");
+#endif
+
+ return code;
+}
+
+
+JitCode*
+JitRuntime::generateLazyLinkStub(JSContext* cx)
+{
+ MacroAssembler masm(cx);
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
+ Register temp0 = regs.takeAny();
+
+ masm.enterFakeExitFrame(LazyLinkExitFrameLayoutToken);
+ masm.PushStubCode();
+
+ masm.setupUnalignedABICall(temp0);
+ masm.loadJSContext(temp0);
+ masm.passABIArg(temp0);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, LazyLinkTopActivation));
+
+ masm.leaveExitFrame(/* stub code */ sizeof(JitCode*));
+
+#ifdef JS_USE_LINK_REGISTER
+ // Restore the return address such that the emitPrologue function of the
+ // CodeGenerator can push it back on the stack with pushReturnAddress.
+ masm.popReturnAddress();
+#endif
+ masm.jump(ReturnReg);
+
+ Linker linker(masm);
+ AutoFlushICache afc("LazyLinkStub");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "LazyLinkStub");
+#endif
+ return code;
+}
+
+bool
+JitRuntime::generateTLEventVM(JSContext* cx, MacroAssembler& masm, const VMFunction& f,
+ bool enter)
+{
+#ifdef JS_TRACE_LOGGING
+ TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
+
+ bool vmEventEnabled = TraceLogTextIdEnabled(TraceLogger_VM);
+ bool vmSpecificEventEnabled = TraceLogTextIdEnabled(TraceLogger_VMSpecific);
+
+ if (vmEventEnabled || vmSpecificEventEnabled) {
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ Register loggerReg = regs.takeAnyGeneral();
+ masm.Push(loggerReg);
+ masm.movePtr(ImmPtr(logger), loggerReg);
+
+ if (vmEventEnabled) {
+ if (enter)
+ masm.tracelogStartId(loggerReg, TraceLogger_VM, /* force = */ true);
+ else
+ masm.tracelogStopId(loggerReg, TraceLogger_VM, /* force = */ true);
+ }
+ if (vmSpecificEventEnabled) {
+ TraceLoggerEvent event(logger, f.name());
+ if (!event.hasPayload())
+ return false;
+
+ if (enter)
+ masm.tracelogStartId(loggerReg, event.payload()->textId(), /* force = */ true);
+ else
+ masm.tracelogStopId(loggerReg, event.payload()->textId(), /* force = */ true);
+ }
+
+ masm.Pop(loggerReg);
+ }
+#endif
+
+ return true;
+}
+
+typedef bool (*CharCodeAtFn)(JSContext*, HandleString, int32_t, uint32_t*);
+static const VMFunction CharCodeAtInfo =
+ FunctionInfo<CharCodeAtFn>(jit::CharCodeAt, "CharCodeAt");
+
+void
+CodeGenerator::visitCharCodeAt(LCharCodeAt* lir)
+{
+ Register str = ToRegister(lir->str());
+ Register index = ToRegister(lir->index());
+ Register output = ToRegister(lir->output());
+
+ OutOfLineCode* ool = oolCallVM(CharCodeAtInfo, lir, ArgList(str, index), StoreRegisterTo(output));
+
+ masm.branchIfRope(str, ool->entry());
+ masm.loadStringChar(str, index, output);
+
+ masm.bind(ool->rejoin());
+}
+
+typedef JSFlatString* (*StringFromCharCodeFn)(JSContext*, int32_t);
+static const VMFunction StringFromCharCodeInfo =
+ FunctionInfo<StringFromCharCodeFn>(jit::StringFromCharCode, "StringFromCharCode");
+
+void
+CodeGenerator::visitFromCharCode(LFromCharCode* lir)
+{
+ Register code = ToRegister(lir->code());
+ Register output = ToRegister(lir->output());
+
+ OutOfLineCode* ool = oolCallVM(StringFromCharCodeInfo, lir, ArgList(code), StoreRegisterTo(output));
+
+ // OOL path if code >= UNIT_STATIC_LIMIT.
+ masm.branch32(Assembler::AboveOrEqual, code, Imm32(StaticStrings::UNIT_STATIC_LIMIT),
+ ool->entry());
+
+ masm.movePtr(ImmPtr(&GetJitContext()->runtime->staticStrings().unitStaticTable), output);
+ masm.loadPtr(BaseIndex(output, code, ScalePointer), output);
+
+ masm.bind(ool->rejoin());
+}
+
+typedef JSString* (*StringFromCodePointFn)(JSContext*, int32_t);
+static const VMFunction StringFromCodePointInfo =
+ FunctionInfo<StringFromCodePointFn>(jit::StringFromCodePoint, "StringFromCodePoint");
+
+void
+CodeGenerator::visitFromCodePoint(LFromCodePoint* lir)
+{
+ Register codePoint = ToRegister(lir->codePoint());
+ Register output = ToRegister(lir->output());
+ LSnapshot* snapshot = lir->snapshot();
+
+ OutOfLineCode* ool = oolCallVM(StringFromCodePointInfo, lir, ArgList(codePoint),
+ StoreRegisterTo(output));
+
+ // Use a bailout if the input is not a valid code point, because
+ // MFromCodePoint is movable and it'd be observable when a moved
+ // fromCodePoint throws an exception before its actual call site.
+ bailoutCmp32(Assembler::Above, codePoint, Imm32(unicode::NonBMPMax), snapshot);
+
+ // OOL path if code point >= UNIT_STATIC_LIMIT.
+ masm.branch32(Assembler::AboveOrEqual, codePoint, Imm32(StaticStrings::UNIT_STATIC_LIMIT),
+ ool->entry());
+
+ masm.movePtr(ImmPtr(&GetJitContext()->runtime->staticStrings().unitStaticTable), output);
+ masm.loadPtr(BaseIndex(output, codePoint, ScalePointer), output);
+
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitSinCos(LSinCos *lir)
+{
+ Register temp = ToRegister(lir->temp());
+ Register params = ToRegister(lir->temp2());
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister outputSin = ToFloatRegister(lir->outputSin());
+ FloatRegister outputCos = ToFloatRegister(lir->outputCos());
+
+ masm.reserveStack(sizeof(double) * 2);
+ masm.movePtr(masm.getStackPointer(), params);
+
+ const MathCache* mathCache = lir->mir()->cache();
+
+ masm.setupUnalignedABICall(temp);
+ if (mathCache) {
+ masm.movePtr(ImmPtr(mathCache), temp);
+ masm.passABIArg(temp);
+ }
+
+#define MAYBE_CACHED_(fcn) (mathCache ? (void*)fcn ## _impl : (void*)fcn ## _uncached)
+
+ masm.passABIArg(input, MoveOp::DOUBLE);
+ masm.passABIArg(MoveOperand(params, sizeof(double), MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ masm.passABIArg(MoveOperand(params, 0, MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED_(js::math_sincos)));
+#undef MAYBE_CACHED_
+
+ masm.loadDouble(Address(masm.getStackPointer(), 0), outputCos);
+ masm.loadDouble(Address(masm.getStackPointer(), sizeof(double)), outputSin);
+ masm.freeStack(sizeof(double) * 2);
+}
+
+typedef JSObject* (*StringSplitFn)(JSContext*, HandleObjectGroup, HandleString, HandleString, uint32_t);
+static const VMFunction StringSplitInfo =
+ FunctionInfo<StringSplitFn>(js::str_split_string, "str_split_string");
+
+void
+CodeGenerator::visitStringSplit(LStringSplit* lir)
+{
+ pushArg(Imm32(INT32_MAX));
+ pushArg(ToRegister(lir->separator()));
+ pushArg(ToRegister(lir->string()));
+ pushArg(ImmGCPtr(lir->mir()->group()));
+
+ callVM(StringSplitInfo, lir);
+}
+
+void
+CodeGenerator::visitInitializedLength(LInitializedLength* lir)
+{
+ Address initLength(ToRegister(lir->elements()), ObjectElements::offsetOfInitializedLength());
+ masm.load32(initLength, ToRegister(lir->output()));
+}
+
+void
+CodeGenerator::visitSetInitializedLength(LSetInitializedLength* lir)
+{
+ Address initLength(ToRegister(lir->elements()), ObjectElements::offsetOfInitializedLength());
+ RegisterOrInt32Constant index = ToRegisterOrInt32Constant(lir->index());
+
+ masm.inc32(&index);
+ masm.store32(index, initLength);
+ // Restore register value if it is used/captured after.
+ masm.dec32(&index);
+}
+
+void
+CodeGenerator::visitUnboxedArrayLength(LUnboxedArrayLength* lir)
+{
+ Register obj = ToRegister(lir->object());
+ Register result = ToRegister(lir->output());
+ masm.load32(Address(obj, UnboxedArrayObject::offsetOfLength()), result);
+}
+
+void
+CodeGenerator::visitUnboxedArrayInitializedLength(LUnboxedArrayInitializedLength* lir)
+{
+ Register obj = ToRegister(lir->object());
+ Register result = ToRegister(lir->output());
+ masm.load32(Address(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength()), result);
+ masm.and32(Imm32(UnboxedArrayObject::InitializedLengthMask), result);
+}
+
+void
+CodeGenerator::visitIncrementUnboxedArrayInitializedLength(LIncrementUnboxedArrayInitializedLength* lir)
+{
+ Register obj = ToRegister(lir->object());
+ masm.add32(Imm32(1), Address(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength()));
+}
+
+void
+CodeGenerator::visitSetUnboxedArrayInitializedLength(LSetUnboxedArrayInitializedLength* lir)
+{
+ Register obj = ToRegister(lir->object());
+ RegisterOrInt32Constant key = ToRegisterOrInt32Constant(lir->length());
+ Register temp = ToRegister(lir->temp());
+
+ Address initLengthAddr(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength());
+ masm.load32(initLengthAddr, temp);
+ masm.and32(Imm32(UnboxedArrayObject::CapacityMask), temp);
+
+ if (key.isRegister())
+ masm.or32(key.reg(), temp);
+ else
+ masm.or32(Imm32(key.constant()), temp);
+
+ masm.store32(temp, initLengthAddr);
+}
+
+void
+CodeGenerator::visitNotO(LNotO* lir)
+{
+ MOZ_ASSERT(lir->mir()->operandMightEmulateUndefined(),
+ "This should be constant-folded if the object can't emulate undefined.");
+
+ OutOfLineTestObjectWithLabels* ool = new(alloc()) OutOfLineTestObjectWithLabels();
+ addOutOfLineCode(ool, lir->mir());
+
+ Label* ifEmulatesUndefined = ool->label1();
+ Label* ifDoesntEmulateUndefined = ool->label2();
+
+ Register objreg = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ branchTestObjectEmulatesUndefined(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
+ output, ool);
+ // fall through
+
+ Label join;
+
+ masm.move32(Imm32(0), output);
+ masm.jump(&join);
+
+ masm.bind(ifEmulatesUndefined);
+ masm.move32(Imm32(1), output);
+
+ masm.bind(&join);
+}
+
+void
+CodeGenerator::visitNotV(LNotV* lir)
+{
+ Maybe<Label> ifTruthyLabel, ifFalsyLabel;
+ Label* ifTruthy;
+ Label* ifFalsy;
+
+ OutOfLineTestObjectWithLabels* ool = nullptr;
+ MDefinition* operand = lir->mir()->input();
+ // Unfortunately, it's possible that someone (e.g. phi elimination) switched
+ // out our operand after we did cacheOperandMightEmulateUndefined. So we
+ // might think it can emulate undefined _and_ know that it can't be an
+ // object.
+ if (lir->mir()->operandMightEmulateUndefined() && operand->mightBeType(MIRType::Object)) {
+ ool = new(alloc()) OutOfLineTestObjectWithLabels();
+ addOutOfLineCode(ool, lir->mir());
+ ifTruthy = ool->label1();
+ ifFalsy = ool->label2();
+ } else {
+ ifTruthyLabel.emplace();
+ ifFalsyLabel.emplace();
+ ifTruthy = ifTruthyLabel.ptr();
+ ifFalsy = ifFalsyLabel.ptr();
+ }
+
+ testValueTruthyKernel(ToValue(lir, LNotV::Input), lir->temp1(), lir->temp2(),
+ ToFloatRegister(lir->tempFloat()),
+ ifTruthy, ifFalsy, ool, operand);
+
+ Label join;
+ Register output = ToRegister(lir->output());
+
+ // Note that the testValueTruthyKernel call above may choose to fall through
+ // to ifTruthy instead of branching there.
+ masm.bind(ifTruthy);
+ masm.move32(Imm32(0), output);
+ masm.jump(&join);
+
+ masm.bind(ifFalsy);
+ masm.move32(Imm32(1), output);
+
+ // both branches meet here.
+ masm.bind(&join);
+}
+
+void
+CodeGenerator::visitBoundsCheck(LBoundsCheck* lir)
+{
+ const LAllocation* index = lir->index();
+ const LAllocation* length = lir->length();
+ LSnapshot* snapshot = lir->snapshot();
+
+ if (index->isConstant()) {
+ // Use uint32 so that the comparison is unsigned.
+ uint32_t idx = ToInt32(index);
+ if (length->isConstant()) {
+ uint32_t len = ToInt32(lir->length());
+ if (idx < len)
+ return;
+ bailout(snapshot);
+ return;
+ }
+
+ if (length->isRegister())
+ bailoutCmp32(Assembler::BelowOrEqual, ToRegister(length), Imm32(idx), snapshot);
+ else
+ bailoutCmp32(Assembler::BelowOrEqual, ToAddress(length), Imm32(idx), snapshot);
+ return;
+ }
+
+ Register indexReg = ToRegister(index);
+ if (length->isConstant())
+ bailoutCmp32(Assembler::AboveOrEqual, indexReg, Imm32(ToInt32(length)), snapshot);
+ else if (length->isRegister())
+ bailoutCmp32(Assembler::BelowOrEqual, ToRegister(length), indexReg, snapshot);
+ else
+ bailoutCmp32(Assembler::BelowOrEqual, ToAddress(length), indexReg, snapshot);
+}
+
+void
+CodeGenerator::visitBoundsCheckRange(LBoundsCheckRange* lir)
+{
+ int32_t min = lir->mir()->minimum();
+ int32_t max = lir->mir()->maximum();
+ MOZ_ASSERT(max >= min);
+
+ const LAllocation* length = lir->length();
+ LSnapshot* snapshot = lir->snapshot();
+ Register temp = ToRegister(lir->getTemp(0));
+ if (lir->index()->isConstant()) {
+ int32_t nmin, nmax;
+ int32_t index = ToInt32(lir->index());
+ if (SafeAdd(index, min, &nmin) && SafeAdd(index, max, &nmax) && nmin >= 0) {
+ if (length->isRegister())
+ bailoutCmp32(Assembler::BelowOrEqual, ToRegister(length), Imm32(nmax), snapshot);
+ else
+ bailoutCmp32(Assembler::BelowOrEqual, ToAddress(length), Imm32(nmax), snapshot);
+ return;
+ }
+ masm.mov(ImmWord(index), temp);
+ } else {
+ masm.mov(ToRegister(lir->index()), temp);
+ }
+
+ // If the minimum and maximum differ then do an underflow check first.
+ // If the two are the same then doing an unsigned comparison on the
+ // length will also catch a negative index.
+ if (min != max) {
+ if (min != 0) {
+ Label bail;
+ masm.branchAdd32(Assembler::Overflow, Imm32(min), temp, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+
+ bailoutCmp32(Assembler::LessThan, temp, Imm32(0), snapshot);
+
+ if (min != 0) {
+ int32_t diff;
+ if (SafeSub(max, min, &diff))
+ max = diff;
+ else
+ masm.sub32(Imm32(min), temp);
+ }
+ }
+
+ // Compute the maximum possible index. No overflow check is needed when
+ // max > 0. We can only wraparound to a negative number, which will test as
+ // larger than all nonnegative numbers in the unsigned comparison, and the
+ // length is required to be nonnegative (else testing a negative length
+ // would succeed on any nonnegative index).
+ if (max != 0) {
+ if (max < 0) {
+ Label bail;
+ masm.branchAdd32(Assembler::Overflow, Imm32(max), temp, &bail);
+ bailoutFrom(&bail, snapshot);
+ } else {
+ masm.add32(Imm32(max), temp);
+ }
+ }
+
+ if (length->isRegister())
+ bailoutCmp32(Assembler::BelowOrEqual, ToRegister(length), temp, snapshot);
+ else
+ bailoutCmp32(Assembler::BelowOrEqual, ToAddress(length), temp, snapshot);
+}
+
+void
+CodeGenerator::visitBoundsCheckLower(LBoundsCheckLower* lir)
+{
+ int32_t min = lir->mir()->minimum();
+ bailoutCmp32(Assembler::LessThan, ToRegister(lir->index()), Imm32(min),
+ lir->snapshot());
+}
+
+class OutOfLineStoreElementHole : public OutOfLineCodeBase<CodeGenerator>
+{
+ LInstruction* ins_;
+ Label rejoinStore_;
+
+ public:
+ explicit OutOfLineStoreElementHole(LInstruction* ins)
+ : ins_(ins)
+ {
+ MOZ_ASSERT(ins->isStoreElementHoleV() || ins->isStoreElementHoleT() ||
+ ins->isFallibleStoreElementV() || ins->isFallibleStoreElementT());
+ }
+
+ void accept(CodeGenerator* codegen) {
+ codegen->visitOutOfLineStoreElementHole(this);
+ }
+ LInstruction* ins() const {
+ return ins_;
+ }
+ Label* rejoinStore() {
+ return &rejoinStore_;
+ }
+};
+
+void
+CodeGenerator::emitStoreHoleCheck(Register elements, const LAllocation* index,
+ int32_t offsetAdjustment, LSnapshot* snapshot)
+{
+ Label bail;
+ if (index->isConstant()) {
+ Address dest(elements, ToInt32(index) * sizeof(js::Value) + offsetAdjustment);
+ masm.branchTestMagic(Assembler::Equal, dest, &bail);
+ } else {
+ BaseIndex dest(elements, ToRegister(index), TimesEight, offsetAdjustment);
+ masm.branchTestMagic(Assembler::Equal, dest, &bail);
+ }
+ bailoutFrom(&bail, snapshot);
+}
+
+static ConstantOrRegister
+ToConstantOrRegister(const LAllocation* value, MIRType valueType)
+{
+ if (value->isConstant())
+ return ConstantOrRegister(value->toConstant()->toJSValue());
+ return TypedOrValueRegister(valueType, ToAnyRegister(value));
+}
+
+void
+CodeGenerator::emitStoreElementTyped(const LAllocation* value,
+ MIRType valueType, MIRType elementType,
+ Register elements, const LAllocation* index,
+ int32_t offsetAdjustment)
+{
+ ConstantOrRegister v = ToConstantOrRegister(value, valueType);
+ if (index->isConstant()) {
+ Address dest(elements, ToInt32(index) * sizeof(js::Value) + offsetAdjustment);
+ masm.storeUnboxedValue(v, valueType, dest, elementType);
+ } else {
+ BaseIndex dest(elements, ToRegister(index), TimesEight, offsetAdjustment);
+ masm.storeUnboxedValue(v, valueType, dest, elementType);
+ }
+}
+
+void
+CodeGenerator::visitStoreElementT(LStoreElementT* store)
+{
+ Register elements = ToRegister(store->elements());
+ const LAllocation* index = store->index();
+
+ if (store->mir()->needsBarrier())
+ emitPreBarrier(elements, index, store->mir()->offsetAdjustment());
+
+ if (store->mir()->needsHoleCheck())
+ emitStoreHoleCheck(elements, index, store->mir()->offsetAdjustment(), store->snapshot());
+
+ emitStoreElementTyped(store->value(),
+ store->mir()->value()->type(), store->mir()->elementType(),
+ elements, index, store->mir()->offsetAdjustment());
+}
+
+void
+CodeGenerator::visitStoreElementV(LStoreElementV* lir)
+{
+ const ValueOperand value = ToValue(lir, LStoreElementV::Value);
+ Register elements = ToRegister(lir->elements());
+ const LAllocation* index = lir->index();
+
+ if (lir->mir()->needsBarrier())
+ emitPreBarrier(elements, index, lir->mir()->offsetAdjustment());
+
+ if (lir->mir()->needsHoleCheck())
+ emitStoreHoleCheck(elements, index, lir->mir()->offsetAdjustment(), lir->snapshot());
+
+ if (lir->index()->isConstant()) {
+ Address dest(elements,
+ ToInt32(lir->index()) * sizeof(js::Value) + lir->mir()->offsetAdjustment());
+ masm.storeValue(value, dest);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()), TimesEight,
+ lir->mir()->offsetAdjustment());
+ masm.storeValue(value, dest);
+ }
+}
+
+template <typename T> void
+CodeGenerator::emitStoreElementHoleT(T* lir)
+{
+ static_assert(std::is_same<T, LStoreElementHoleT>::value || std::is_same<T, LFallibleStoreElementT>::value,
+ "emitStoreElementHoleT called with unexpected argument type");
+
+ OutOfLineStoreElementHole* ool = new(alloc()) OutOfLineStoreElementHole(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ Register obj = ToRegister(lir->object());
+ Register elements = ToRegister(lir->elements());
+ const LAllocation* index = lir->index();
+ RegisterOrInt32Constant key = ToRegisterOrInt32Constant(index);
+
+ JSValueType unboxedType = lir->mir()->unboxedType();
+ if (unboxedType == JSVAL_TYPE_MAGIC) {
+ Address initLength(elements, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::BelowOrEqual, initLength, key, ool->entry());
+
+ if (lir->mir()->needsBarrier())
+ emitPreBarrier(elements, index, 0);
+
+ masm.bind(ool->rejoinStore());
+ emitStoreElementTyped(lir->value(), lir->mir()->value()->type(), lir->mir()->elementType(),
+ elements, index, 0);
+ } else {
+ Register temp = ToRegister(lir->getTemp(0));
+ Address initLength(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength());
+ masm.load32(initLength, temp);
+ masm.and32(Imm32(UnboxedArrayObject::InitializedLengthMask), temp);
+ masm.branch32(Assembler::BelowOrEqual, temp, key, ool->entry());
+
+ ConstantOrRegister v = ToConstantOrRegister(lir->value(), lir->mir()->value()->type());
+
+ if (index->isConstant()) {
+ Address address(elements, ToInt32(index) * UnboxedTypeSize(unboxedType));
+ EmitUnboxedPreBarrier(masm, address, unboxedType);
+
+ masm.bind(ool->rejoinStore());
+ masm.storeUnboxedProperty(address, unboxedType, v, nullptr);
+ } else {
+ BaseIndex address(elements, ToRegister(index),
+ ScaleFromElemWidth(UnboxedTypeSize(unboxedType)));
+ EmitUnboxedPreBarrier(masm, address, unboxedType);
+
+ masm.bind(ool->rejoinStore());
+ masm.storeUnboxedProperty(address, unboxedType, v, nullptr);
+ }
+ }
+
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitStoreElementHoleT(LStoreElementHoleT* lir)
+{
+ emitStoreElementHoleT(lir);
+}
+
+template <typename T> void
+CodeGenerator::emitStoreElementHoleV(T* lir)
+{
+ static_assert(std::is_same<T, LStoreElementHoleV>::value || std::is_same<T, LFallibleStoreElementV>::value,
+ "emitStoreElementHoleV called with unexpected parameter type");
+
+ OutOfLineStoreElementHole* ool = new(alloc()) OutOfLineStoreElementHole(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ Register obj = ToRegister(lir->object());
+ Register elements = ToRegister(lir->elements());
+ const LAllocation* index = lir->index();
+ const ValueOperand value = ToValue(lir, T::Value);
+ RegisterOrInt32Constant key = ToRegisterOrInt32Constant(index);
+
+ JSValueType unboxedType = lir->mir()->unboxedType();
+ if (unboxedType == JSVAL_TYPE_MAGIC) {
+ Address initLength(elements, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::BelowOrEqual, initLength, key, ool->entry());
+
+ if (lir->mir()->needsBarrier())
+ emitPreBarrier(elements, index, 0);
+
+ masm.bind(ool->rejoinStore());
+ if (index->isConstant())
+ masm.storeValue(value, Address(elements, ToInt32(index) * sizeof(js::Value)));
+ else
+ masm.storeValue(value, BaseIndex(elements, ToRegister(index), TimesEight));
+ } else {
+ Register temp = ToRegister(lir->getTemp(0));
+ Address initLength(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength());
+ masm.load32(initLength, temp);
+ masm.and32(Imm32(UnboxedArrayObject::InitializedLengthMask), temp);
+ masm.branch32(Assembler::BelowOrEqual, temp, key, ool->entry());
+
+ if (index->isConstant()) {
+ Address address(elements, ToInt32(index) * UnboxedTypeSize(unboxedType));
+ EmitUnboxedPreBarrier(masm, address, unboxedType);
+
+ masm.bind(ool->rejoinStore());
+ masm.storeUnboxedProperty(address, unboxedType, ConstantOrRegister(value), nullptr);
+ } else {
+ BaseIndex address(elements, ToRegister(index),
+ ScaleFromElemWidth(UnboxedTypeSize(unboxedType)));
+ EmitUnboxedPreBarrier(masm, address, unboxedType);
+
+ masm.bind(ool->rejoinStore());
+ masm.storeUnboxedProperty(address, unboxedType, ConstantOrRegister(value), nullptr);
+ }
+ }
+
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitStoreElementHoleV(LStoreElementHoleV* lir)
+{
+ emitStoreElementHoleV(lir);
+}
+
+typedef bool (*ThrowReadOnlyFn)(JSContext*, int32_t);
+static const VMFunction ThrowReadOnlyInfo =
+ FunctionInfo<ThrowReadOnlyFn>(ThrowReadOnlyError, "ThrowReadOnlyError");
+
+void
+CodeGenerator::visitFallibleStoreElementT(LFallibleStoreElementT* lir)
+{
+ Register elements = ToRegister(lir->elements());
+
+ // Handle frozen objects
+ Label isFrozen;
+ Address flags(elements, ObjectElements::offsetOfFlags());
+ if (!lir->mir()->strict()) {
+ masm.branchTest32(Assembler::NonZero, flags, Imm32(ObjectElements::FROZEN), &isFrozen);
+ } else {
+ const LAllocation* index = lir->index();
+ OutOfLineCode* ool;
+ if (index->isConstant())
+ ool = oolCallVM(ThrowReadOnlyInfo, lir, ArgList(Imm32(ToInt32(index))), StoreNothing());
+ else
+ ool = oolCallVM(ThrowReadOnlyInfo, lir, ArgList(ToRegister(index)), StoreNothing());
+ masm.branchTest32(Assembler::NonZero, flags, Imm32(ObjectElements::FROZEN), ool->entry());
+ // This OOL code should have thrown an exception, so will never return.
+ // So, do not bind ool->rejoin() anywhere, so that it implicitly (and without the cost
+ // of a jump) does a masm.assumeUnreachable().
+ }
+
+ emitStoreElementHoleT(lir);
+
+ masm.bind(&isFrozen);
+}
+
+void
+CodeGenerator::visitFallibleStoreElementV(LFallibleStoreElementV* lir)
+{
+ Register elements = ToRegister(lir->elements());
+
+ // Handle frozen objects
+ Label isFrozen;
+ Address flags(elements, ObjectElements::offsetOfFlags());
+ if (!lir->mir()->strict()) {
+ masm.branchTest32(Assembler::NonZero, flags, Imm32(ObjectElements::FROZEN), &isFrozen);
+ } else {
+ const LAllocation* index = lir->index();
+ OutOfLineCode* ool;
+ if (index->isConstant())
+ ool = oolCallVM(ThrowReadOnlyInfo, lir, ArgList(Imm32(ToInt32(index))), StoreNothing());
+ else
+ ool = oolCallVM(ThrowReadOnlyInfo, lir, ArgList(ToRegister(index)), StoreNothing());
+ masm.branchTest32(Assembler::NonZero, flags, Imm32(ObjectElements::FROZEN), ool->entry());
+ // This OOL code should have thrown an exception, so will never return.
+ // So, do not bind ool->rejoin() anywhere, so that it implicitly (and without the cost
+ // of a jump) does a masm.assumeUnreachable().
+ }
+
+ emitStoreElementHoleV(lir);
+
+ masm.bind(&isFrozen);
+}
+
+typedef bool (*SetDenseOrUnboxedArrayElementFn)(JSContext*, HandleObject, int32_t,
+ HandleValue, bool strict);
+static const VMFunction SetDenseOrUnboxedArrayElementInfo =
+ FunctionInfo<SetDenseOrUnboxedArrayElementFn>(SetDenseOrUnboxedArrayElement,
+ "SetDenseOrUnboxedArrayElement");
+
+void
+CodeGenerator::visitOutOfLineStoreElementHole(OutOfLineStoreElementHole* ool)
+{
+ Register object, elements;
+ LInstruction* ins = ool->ins();
+ const LAllocation* index;
+ MIRType valueType;
+ ConstantOrRegister value;
+ JSValueType unboxedType;
+ LDefinition *temp = nullptr;
+
+ if (ins->isStoreElementHoleV()) {
+ LStoreElementHoleV* store = ins->toStoreElementHoleV();
+ object = ToRegister(store->object());
+ elements = ToRegister(store->elements());
+ index = store->index();
+ valueType = store->mir()->value()->type();
+ value = TypedOrValueRegister(ToValue(store, LStoreElementHoleV::Value));
+ unboxedType = store->mir()->unboxedType();
+ temp = store->getTemp(0);
+ } else if (ins->isFallibleStoreElementV()) {
+ LFallibleStoreElementV* store = ins->toFallibleStoreElementV();
+ object = ToRegister(store->object());
+ elements = ToRegister(store->elements());
+ index = store->index();
+ valueType = store->mir()->value()->type();
+ value = TypedOrValueRegister(ToValue(store, LFallibleStoreElementV::Value));
+ unboxedType = store->mir()->unboxedType();
+ temp = store->getTemp(0);
+ } else if (ins->isStoreElementHoleT()) {
+ LStoreElementHoleT* store = ins->toStoreElementHoleT();
+ object = ToRegister(store->object());
+ elements = ToRegister(store->elements());
+ index = store->index();
+ valueType = store->mir()->value()->type();
+ if (store->value()->isConstant())
+ value = ConstantOrRegister(store->value()->toConstant()->toJSValue());
+ else
+ value = TypedOrValueRegister(valueType, ToAnyRegister(store->value()));
+ unboxedType = store->mir()->unboxedType();
+ temp = store->getTemp(0);
+ } else { // ins->isFallibleStoreElementT()
+ LFallibleStoreElementT* store = ins->toFallibleStoreElementT();
+ object = ToRegister(store->object());
+ elements = ToRegister(store->elements());
+ index = store->index();
+ valueType = store->mir()->value()->type();
+ if (store->value()->isConstant())
+ value = ConstantOrRegister(store->value()->toConstant()->toJSValue());
+ else
+ value = TypedOrValueRegister(valueType, ToAnyRegister(store->value()));
+ unboxedType = store->mir()->unboxedType();
+ temp = store->getTemp(0);
+ }
+
+ RegisterOrInt32Constant key = ToRegisterOrInt32Constant(index);
+
+ // If index == initializedLength, try to bump the initialized length inline.
+ // If index > initializedLength, call a stub. Note that this relies on the
+ // condition flags sticking from the incoming branch.
+ Label callStub;
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ // Had to reimplement for MIPS because there are no flags.
+ if (unboxedType == JSVAL_TYPE_MAGIC) {
+ Address initLength(elements, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::NotEqual, initLength, key, &callStub);
+ } else {
+ Address initLength(object, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength());
+ masm.load32(initLength, ToRegister(temp));
+ masm.and32(Imm32(UnboxedArrayObject::InitializedLengthMask), ToRegister(temp));
+ masm.branch32(Assembler::NotEqual, ToRegister(temp), key, &callStub);
+ }
+#else
+ masm.j(Assembler::NotEqual, &callStub);
+#endif
+
+ if (unboxedType == JSVAL_TYPE_MAGIC) {
+ // Check array capacity.
+ masm.branch32(Assembler::BelowOrEqual, Address(elements, ObjectElements::offsetOfCapacity()),
+ key, &callStub);
+
+ // Update initialized length. The capacity guard above ensures this won't overflow,
+ // due to MAX_DENSE_ELEMENTS_COUNT.
+ masm.inc32(&key);
+ masm.store32(key, Address(elements, ObjectElements::offsetOfInitializedLength()));
+
+ // Update length if length < initializedLength.
+ Label dontUpdate;
+ masm.branch32(Assembler::AboveOrEqual, Address(elements, ObjectElements::offsetOfLength()),
+ key, &dontUpdate);
+ masm.store32(key, Address(elements, ObjectElements::offsetOfLength()));
+ masm.bind(&dontUpdate);
+
+ masm.dec32(&key);
+ } else {
+ // Check array capacity.
+ masm.checkUnboxedArrayCapacity(object, key, ToRegister(temp), &callStub);
+
+ // Update initialized length.
+ masm.add32(Imm32(1), Address(object, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength()));
+
+ // Update length if length < initializedLength.
+ Address lengthAddr(object, UnboxedArrayObject::offsetOfLength());
+ Label dontUpdate;
+ masm.branch32(Assembler::Above, lengthAddr, key, &dontUpdate);
+ masm.add32(Imm32(1), lengthAddr);
+ masm.bind(&dontUpdate);
+ }
+
+ if ((ins->isStoreElementHoleT() || ins->isFallibleStoreElementT()) &&
+ unboxedType == JSVAL_TYPE_MAGIC && valueType != MIRType::Double)
+ {
+ // The inline path for StoreElementHoleT and FallibleStoreElementT does not always store
+ // the type tag, so we do the store on the OOL path. We use MIRType::None for the element
+ // type so that storeElementTyped will always store the type tag.
+ if (ins->isStoreElementHoleT()) {
+ emitStoreElementTyped(ins->toStoreElementHoleT()->value(), valueType, MIRType::None,
+ elements, index, 0);
+ masm.jump(ool->rejoin());
+ } else if (ins->isFallibleStoreElementT()) {
+ emitStoreElementTyped(ins->toFallibleStoreElementT()->value(), valueType,
+ MIRType::None, elements, index, 0);
+ masm.jump(ool->rejoin());
+ }
+ } else {
+ // Jump to the inline path where we will store the value.
+ masm.jump(ool->rejoinStore());
+ }
+
+ masm.bind(&callStub);
+ saveLive(ins);
+
+ pushArg(Imm32(current->mir()->strict()));
+ pushArg(value);
+ if (index->isConstant())
+ pushArg(Imm32(ToInt32(index)));
+ else
+ pushArg(ToRegister(index));
+ pushArg(object);
+ callVM(SetDenseOrUnboxedArrayElementInfo, ins);
+
+ restoreLive(ins);
+ masm.jump(ool->rejoin());
+}
+
+template <typename T>
+static void
+StoreUnboxedPointer(MacroAssembler& masm, T address, MIRType type, const LAllocation* value,
+ bool preBarrier)
+{
+ if (preBarrier)
+ masm.patchableCallPreBarrier(address, type);
+ if (value->isConstant()) {
+ Value v = value->toConstant()->toJSValue();
+ if (v.isMarkable()) {
+ masm.storePtr(ImmGCPtr(v.toMarkablePointer()), address);
+ } else {
+ MOZ_ASSERT(v.isNull());
+ masm.storePtr(ImmWord(0), address);
+ }
+ } else {
+ masm.storePtr(ToRegister(value), address);
+ }
+}
+
+void
+CodeGenerator::visitStoreUnboxedPointer(LStoreUnboxedPointer* lir)
+{
+ MIRType type;
+ int32_t offsetAdjustment;
+ bool preBarrier;
+ if (lir->mir()->isStoreUnboxedObjectOrNull()) {
+ type = MIRType::Object;
+ offsetAdjustment = lir->mir()->toStoreUnboxedObjectOrNull()->offsetAdjustment();
+ preBarrier = lir->mir()->toStoreUnboxedObjectOrNull()->preBarrier();
+ } else if (lir->mir()->isStoreUnboxedString()) {
+ type = MIRType::String;
+ offsetAdjustment = lir->mir()->toStoreUnboxedString()->offsetAdjustment();
+ preBarrier = lir->mir()->toStoreUnboxedString()->preBarrier();
+ } else {
+ MOZ_CRASH();
+ }
+
+ Register elements = ToRegister(lir->elements());
+ const LAllocation* index = lir->index();
+ const LAllocation* value = lir->value();
+
+ if (index->isConstant()) {
+ Address address(elements, ToInt32(index) * sizeof(uintptr_t) + offsetAdjustment);
+ StoreUnboxedPointer(masm, address, type, value, preBarrier);
+ } else {
+ BaseIndex address(elements, ToRegister(index), ScalePointer, offsetAdjustment);
+ StoreUnboxedPointer(masm, address, type, value, preBarrier);
+ }
+}
+
+typedef bool (*ConvertUnboxedObjectToNativeFn)(JSContext*, JSObject*);
+static const VMFunction ConvertUnboxedPlainObjectToNativeInfo =
+ FunctionInfo<ConvertUnboxedObjectToNativeFn>(UnboxedPlainObject::convertToNative,
+ "UnboxedPlainObject::convertToNative");
+static const VMFunction ConvertUnboxedArrayObjectToNativeInfo =
+ FunctionInfo<ConvertUnboxedObjectToNativeFn>(UnboxedArrayObject::convertToNative,
+ "UnboxedArrayObject::convertToNative");
+
+void
+CodeGenerator::visitConvertUnboxedObjectToNative(LConvertUnboxedObjectToNative* lir)
+{
+ Register object = ToRegister(lir->getOperand(0));
+
+ OutOfLineCode* ool = oolCallVM(lir->mir()->group()->unboxedLayoutDontCheckGeneration().isArray()
+ ? ConvertUnboxedArrayObjectToNativeInfo
+ : ConvertUnboxedPlainObjectToNativeInfo,
+ lir, ArgList(object), StoreNothing());
+
+ masm.branchPtr(Assembler::Equal, Address(object, JSObject::offsetOfGroup()),
+ ImmGCPtr(lir->mir()->group()), ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+typedef bool (*ArrayPopShiftFn)(JSContext*, HandleObject, MutableHandleValue);
+static const VMFunction ArrayPopDenseInfo =
+ FunctionInfo<ArrayPopShiftFn>(jit::ArrayPopDense, "ArrayPopDense");
+static const VMFunction ArrayShiftDenseInfo =
+ FunctionInfo<ArrayPopShiftFn>(jit::ArrayShiftDense, "ArrayShiftDense");
+
+void
+CodeGenerator::emitArrayPopShift(LInstruction* lir, const MArrayPopShift* mir, Register obj,
+ Register elementsTemp, Register lengthTemp, TypedOrValueRegister out)
+{
+ OutOfLineCode* ool;
+
+ if (mir->mode() == MArrayPopShift::Pop) {
+ ool = oolCallVM(ArrayPopDenseInfo, lir, ArgList(obj), StoreValueTo(out));
+ } else {
+ MOZ_ASSERT(mir->mode() == MArrayPopShift::Shift);
+ ool = oolCallVM(ArrayShiftDenseInfo, lir, ArgList(obj), StoreValueTo(out));
+ }
+
+ // VM call if a write barrier is necessary.
+ masm.branchTestNeedsIncrementalBarrier(Assembler::NonZero, ool->entry());
+
+ // Load elements and length, and VM call if length != initializedLength.
+ RegisterOrInt32Constant key = RegisterOrInt32Constant(lengthTemp);
+ if (mir->unboxedType() == JSVAL_TYPE_MAGIC) {
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), elementsTemp);
+ masm.load32(Address(elementsTemp, ObjectElements::offsetOfLength()), lengthTemp);
+
+ Address initLength(elementsTemp, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::NotEqual, initLength, key, ool->entry());
+ } else {
+ masm.loadPtr(Address(obj, UnboxedArrayObject::offsetOfElements()), elementsTemp);
+ masm.load32(Address(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength()), lengthTemp);
+ masm.and32(Imm32(UnboxedArrayObject::InitializedLengthMask), lengthTemp);
+
+ Address lengthAddr(obj, UnboxedArrayObject::offsetOfLength());
+ masm.branch32(Assembler::NotEqual, lengthAddr, key, ool->entry());
+ }
+
+ // Test for length != 0. On zero length either take a VM call or generate
+ // an undefined value, depending on whether the call is known to produce
+ // undefined.
+ Label done;
+ if (mir->maybeUndefined()) {
+ Label notEmpty;
+ masm.branchTest32(Assembler::NonZero, lengthTemp, lengthTemp, &notEmpty);
+
+ // According to the spec we need to set the length 0 (which is already 0).
+ // This is observable when the array length is made non-writable.
+ // Handle this case in the OOL. When freezing an unboxed array it is converted
+ // to an normal array.
+ if (mir->unboxedType() == JSVAL_TYPE_MAGIC) {
+ Address elementFlags(elementsTemp, ObjectElements::offsetOfFlags());
+ Imm32 bit(ObjectElements::NONWRITABLE_ARRAY_LENGTH);
+ masm.branchTest32(Assembler::NonZero, elementFlags, bit, ool->entry());
+ }
+
+ masm.moveValue(UndefinedValue(), out.valueReg());
+ masm.jump(&done);
+ masm.bind(&notEmpty);
+ } else {
+ masm.branchTest32(Assembler::Zero, lengthTemp, lengthTemp, ool->entry());
+ }
+
+ masm.dec32(&key);
+
+ if (mir->mode() == MArrayPopShift::Pop) {
+ if (mir->unboxedType() == JSVAL_TYPE_MAGIC) {
+ BaseIndex addr(elementsTemp, lengthTemp, TimesEight);
+ masm.loadElementTypedOrValue(addr, out, mir->needsHoleCheck(), ool->entry());
+ } else {
+ size_t elemSize = UnboxedTypeSize(mir->unboxedType());
+ BaseIndex addr(elementsTemp, lengthTemp, ScaleFromElemWidth(elemSize));
+ masm.loadUnboxedProperty(addr, mir->unboxedType(), out);
+ }
+ } else {
+ MOZ_ASSERT(mir->mode() == MArrayPopShift::Shift);
+ Address addr(elementsTemp, 0);
+ if (mir->unboxedType() == JSVAL_TYPE_MAGIC)
+ masm.loadElementTypedOrValue(addr, out, mir->needsHoleCheck(), ool->entry());
+ else
+ masm.loadUnboxedProperty(addr, mir->unboxedType(), out);
+ }
+
+ if (mir->unboxedType() == JSVAL_TYPE_MAGIC) {
+ // Handle the failure case when the array length is non-writable in the
+ // OOL path. (Unlike in the adding-an-element cases, we can't rely on the
+ // capacity <= length invariant for such arrays to avoid an explicit
+ // check.)
+ Address elementFlags(elementsTemp, ObjectElements::offsetOfFlags());
+ Imm32 bit(ObjectElements::NONWRITABLE_ARRAY_LENGTH);
+ masm.branchTest32(Assembler::NonZero, elementFlags, bit, ool->entry());
+
+ // Now adjust length and initializedLength.
+ masm.store32(lengthTemp, Address(elementsTemp, ObjectElements::offsetOfLength()));
+ masm.store32(lengthTemp, Address(elementsTemp, ObjectElements::offsetOfInitializedLength()));
+ } else {
+ // Unboxed arrays always have writable lengths. Adjust length and
+ // initializedLength.
+ masm.store32(lengthTemp, Address(obj, UnboxedArrayObject::offsetOfLength()));
+ masm.add32(Imm32(-1), Address(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength()));
+ }
+
+ if (mir->mode() == MArrayPopShift::Shift) {
+ // Don't save the temp registers.
+ LiveRegisterSet temps;
+ temps.add(elementsTemp);
+ temps.add(lengthTemp);
+
+ saveVolatile(temps);
+ masm.setupUnalignedABICall(lengthTemp);
+ masm.passABIArg(obj);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::ArrayShiftMoveElements));
+ restoreVolatile(temps);
+ }
+
+ masm.bind(&done);
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitArrayPopShiftV(LArrayPopShiftV* lir)
+{
+ Register obj = ToRegister(lir->object());
+ Register elements = ToRegister(lir->temp0());
+ Register length = ToRegister(lir->temp1());
+ TypedOrValueRegister out(ToOutValue(lir));
+ emitArrayPopShift(lir, lir->mir(), obj, elements, length, out);
+}
+
+void
+CodeGenerator::visitArrayPopShiftT(LArrayPopShiftT* lir)
+{
+ Register obj = ToRegister(lir->object());
+ Register elements = ToRegister(lir->temp0());
+ Register length = ToRegister(lir->temp1());
+ TypedOrValueRegister out(lir->mir()->type(), ToAnyRegister(lir->output()));
+ emitArrayPopShift(lir, lir->mir(), obj, elements, length, out);
+}
+
+typedef bool (*ArrayPushDenseFn)(JSContext*, HandleObject, HandleValue, uint32_t*);
+static const VMFunction ArrayPushDenseInfo =
+ FunctionInfo<ArrayPushDenseFn>(jit::ArrayPushDense, "ArrayPushDense");
+
+void
+CodeGenerator::emitArrayPush(LInstruction* lir, const MArrayPush* mir, Register obj,
+ const ConstantOrRegister& value, Register elementsTemp, Register length)
+{
+ OutOfLineCode* ool = oolCallVM(ArrayPushDenseInfo, lir, ArgList(obj, value), StoreRegisterTo(length));
+
+ RegisterOrInt32Constant key = RegisterOrInt32Constant(length);
+ if (mir->unboxedType() == JSVAL_TYPE_MAGIC) {
+ // Load elements and length.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), elementsTemp);
+ masm.load32(Address(elementsTemp, ObjectElements::offsetOfLength()), length);
+
+ // Guard length == initializedLength.
+ Address initLength(elementsTemp, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::NotEqual, initLength, key, ool->entry());
+
+ // Guard length < capacity.
+ Address capacity(elementsTemp, ObjectElements::offsetOfCapacity());
+ masm.branch32(Assembler::BelowOrEqual, capacity, key, ool->entry());
+
+ // Do the store.
+ masm.storeConstantOrRegister(value, BaseIndex(elementsTemp, length, TimesEight));
+ } else {
+ // Load initialized length.
+ masm.load32(Address(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength()), length);
+ masm.and32(Imm32(UnboxedArrayObject::InitializedLengthMask), length);
+
+ // Guard length == initializedLength.
+ Address lengthAddr(obj, UnboxedArrayObject::offsetOfLength());
+ masm.branch32(Assembler::NotEqual, lengthAddr, key, ool->entry());
+
+ // Guard length < capacity.
+ masm.checkUnboxedArrayCapacity(obj, key, elementsTemp, ool->entry());
+
+ // Load elements and do the store.
+ masm.loadPtr(Address(obj, UnboxedArrayObject::offsetOfElements()), elementsTemp);
+ size_t elemSize = UnboxedTypeSize(mir->unboxedType());
+ BaseIndex addr(elementsTemp, length, ScaleFromElemWidth(elemSize));
+ masm.storeUnboxedProperty(addr, mir->unboxedType(), value, nullptr);
+ }
+
+ masm.inc32(&key);
+
+ // Update length and initialized length.
+ if (mir->unboxedType() == JSVAL_TYPE_MAGIC) {
+ masm.store32(length, Address(elementsTemp, ObjectElements::offsetOfLength()));
+ masm.store32(length, Address(elementsTemp, ObjectElements::offsetOfInitializedLength()));
+ } else {
+ masm.store32(length, Address(obj, UnboxedArrayObject::offsetOfLength()));
+ masm.add32(Imm32(1), Address(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength()));
+ }
+
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitArrayPushV(LArrayPushV* lir)
+{
+ Register obj = ToRegister(lir->object());
+ Register elementsTemp = ToRegister(lir->temp());
+ Register length = ToRegister(lir->output());
+ ConstantOrRegister value = TypedOrValueRegister(ToValue(lir, LArrayPushV::Value));
+ emitArrayPush(lir, lir->mir(), obj, value, elementsTemp, length);
+}
+
+void
+CodeGenerator::visitArrayPushT(LArrayPushT* lir)
+{
+ Register obj = ToRegister(lir->object());
+ Register elementsTemp = ToRegister(lir->temp());
+ Register length = ToRegister(lir->output());
+ ConstantOrRegister value;
+ if (lir->value()->isConstant())
+ value = ConstantOrRegister(lir->value()->toConstant()->toJSValue());
+ else
+ value = TypedOrValueRegister(lir->mir()->value()->type(), ToAnyRegister(lir->value()));
+ emitArrayPush(lir, lir->mir(), obj, value, elementsTemp, length);
+}
+
+typedef JSObject* (*ArraySliceDenseFn)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
+static const VMFunction ArraySliceDenseInfo =
+ FunctionInfo<ArraySliceDenseFn>(array_slice_dense, "array_slice_dense");
+
+void
+CodeGenerator::visitArraySlice(LArraySlice* lir)
+{
+ Register object = ToRegister(lir->object());
+ Register begin = ToRegister(lir->begin());
+ Register end = ToRegister(lir->end());
+ Register temp1 = ToRegister(lir->temp1());
+ Register temp2 = ToRegister(lir->temp2());
+
+ Label call, fail;
+
+ // Try to allocate an object.
+ masm.createGCObject(temp1, temp2, lir->mir()->templateObj(), lir->mir()->initialHeap(), &fail);
+
+ // Fixup the group of the result in case it doesn't match the template object.
+ masm.loadPtr(Address(object, JSObject::offsetOfGroup()), temp2);
+ masm.storePtr(temp2, Address(temp1, JSObject::offsetOfGroup()));
+
+ masm.jump(&call);
+ {
+ masm.bind(&fail);
+ masm.movePtr(ImmPtr(nullptr), temp1);
+ }
+ masm.bind(&call);
+
+ pushArg(temp1);
+ pushArg(end);
+ pushArg(begin);
+ pushArg(object);
+ callVM(ArraySliceDenseInfo, lir);
+}
+
+typedef JSString* (*ArrayJoinFn)(JSContext*, HandleObject, HandleString);
+static const VMFunction ArrayJoinInfo = FunctionInfo<ArrayJoinFn>(jit::ArrayJoin, "ArrayJoin");
+
+void
+CodeGenerator::visitArrayJoin(LArrayJoin* lir)
+{
+ pushArg(ToRegister(lir->separator()));
+ pushArg(ToRegister(lir->array()));
+
+ callVM(ArrayJoinInfo, lir);
+}
+
+typedef JSObject* (*ValueToIteratorFn)(JSContext*, uint32_t, HandleValue);
+static const VMFunction ValueToIteratorInfo =
+ FunctionInfo<ValueToIteratorFn>(ValueToIterator, "ValueToIterator");
+
+void
+CodeGenerator::visitCallIteratorStartV(LCallIteratorStartV* lir)
+{
+ pushArg(ToValue(lir, LCallIteratorStartV::Value));
+ pushArg(Imm32(lir->mir()->flags()));
+ callVM(ValueToIteratorInfo, lir);
+}
+
+typedef JSObject* (*GetIteratorObjectFn)(JSContext*, HandleObject, uint32_t);
+static const VMFunction GetIteratorObjectInfo =
+ FunctionInfo<GetIteratorObjectFn>(GetIteratorObject, "GetIteratorObject");
+
+void
+CodeGenerator::visitCallIteratorStartO(LCallIteratorStartO* lir)
+{
+ pushArg(Imm32(lir->mir()->flags()));
+ pushArg(ToRegister(lir->object()));
+ callVM(GetIteratorObjectInfo, lir);
+}
+
+void
+CodeGenerator::branchIfNotEmptyObjectElements(Register obj, Label* target)
+{
+ Label emptyObj;
+ masm.branchPtr(Assembler::Equal,
+ Address(obj, NativeObject::offsetOfElements()),
+ ImmPtr(js::emptyObjectElements),
+ &emptyObj);
+ masm.branchPtr(Assembler::NotEqual,
+ Address(obj, NativeObject::offsetOfElements()),
+ ImmPtr(js::emptyObjectElementsShared),
+ target);
+ masm.bind(&emptyObj);
+}
+
+void
+CodeGenerator::visitIteratorStartO(LIteratorStartO* lir)
+{
+ const Register obj = ToRegister(lir->object());
+ const Register output = ToRegister(lir->output());
+
+ uint32_t flags = lir->mir()->flags();
+
+ OutOfLineCode* ool = oolCallVM(GetIteratorObjectInfo, lir,
+ ArgList(obj, Imm32(flags)), StoreRegisterTo(output));
+
+ const Register temp1 = ToRegister(lir->temp1());
+ const Register temp2 = ToRegister(lir->temp2());
+ const Register niTemp = ToRegister(lir->temp3()); // Holds the NativeIterator object.
+
+ // Iterators other than for-in should use LCallIteratorStart.
+ MOZ_ASSERT(flags == JSITER_ENUMERATE);
+
+ // Fetch the most recent iterator and ensure it's not nullptr.
+ masm.loadPtr(AbsoluteAddress(gen->compartment->addressOfLastCachedNativeIterator()), output);
+ masm.branchTestPtr(Assembler::Zero, output, output, ool->entry());
+
+ // Load NativeIterator.
+ masm.loadObjPrivate(output, JSObject::ITER_CLASS_NFIXED_SLOTS, niTemp);
+
+ // Ensure the |active| and |unreusable| bits are not set.
+ masm.branchTest32(Assembler::NonZero, Address(niTemp, offsetof(NativeIterator, flags)),
+ Imm32(JSITER_ACTIVE|JSITER_UNREUSABLE), ool->entry());
+
+ // Load the iterator's receiver guard array.
+ masm.loadPtr(Address(niTemp, offsetof(NativeIterator, guard_array)), temp2);
+
+ // Compare object with the first receiver guard. The last iterator can only
+ // match for native objects and unboxed objects.
+ {
+ Address groupAddr(temp2, offsetof(ReceiverGuard, group));
+ Address shapeAddr(temp2, offsetof(ReceiverGuard, shape));
+ Label guardDone, shapeMismatch, noExpando;
+ masm.loadObjShape(obj, temp1);
+ masm.branchPtr(Assembler::NotEqual, shapeAddr, temp1, &shapeMismatch);
+
+ // Ensure the object does not have any elements. The presence of dense
+ // elements is not captured by the shape tests above.
+ branchIfNotEmptyObjectElements(obj, ool->entry());
+ masm.jump(&guardDone);
+
+ masm.bind(&shapeMismatch);
+ masm.loadObjGroup(obj, temp1);
+ masm.branchPtr(Assembler::NotEqual, groupAddr, temp1, ool->entry());
+ masm.loadPtr(Address(obj, UnboxedPlainObject::offsetOfExpando()), temp1);
+ masm.branchTestPtr(Assembler::Zero, temp1, temp1, &noExpando);
+ branchIfNotEmptyObjectElements(temp1, ool->entry());
+ masm.loadObjShape(temp1, temp1);
+ masm.bind(&noExpando);
+ masm.branchPtr(Assembler::NotEqual, shapeAddr, temp1, ool->entry());
+ masm.bind(&guardDone);
+ }
+
+ // Compare shape of object's prototype with the second shape. The prototype
+ // must be native, as unboxed objects cannot be prototypes (they cannot
+ // have the delegate flag set). Also check for the absence of dense elements.
+ Address prototypeShapeAddr(temp2, sizeof(ReceiverGuard) + offsetof(ReceiverGuard, shape));
+ masm.loadObjProto(obj, temp1);
+ branchIfNotEmptyObjectElements(temp1, ool->entry());
+ masm.loadObjShape(temp1, temp1);
+ masm.branchPtr(Assembler::NotEqual, prototypeShapeAddr, temp1, ool->entry());
+
+ // Ensure the object's prototype's prototype is nullptr. The last native
+ // iterator will always have a prototype chain length of one (i.e. it must
+ // be a plain object), so we do not need to generate a loop here.
+ masm.loadObjProto(obj, temp1);
+ masm.loadObjProto(temp1, temp1);
+ masm.branchTestPtr(Assembler::NonZero, temp1, temp1, ool->entry());
+
+ // Write barrier for stores to the iterator. We only need to take a write
+ // barrier if NativeIterator::obj is actually going to change.
+ {
+ // Bug 867815: Unconditionally take this out- of-line so that we do not
+ // have to post-barrier the store to NativeIter::obj. This just needs
+ // JIT support for the Cell* buffer.
+ Address objAddr(niTemp, offsetof(NativeIterator, obj));
+ masm.branchPtr(Assembler::NotEqual, objAddr, obj, ool->entry());
+ }
+
+ // Mark iterator as active.
+ masm.storePtr(obj, Address(niTemp, offsetof(NativeIterator, obj)));
+ masm.or32(Imm32(JSITER_ACTIVE), Address(niTemp, offsetof(NativeIterator, flags)));
+
+ // Chain onto the active iterator stack.
+ masm.loadPtr(AbsoluteAddress(gen->compartment->addressOfEnumerators()), temp1);
+
+ // ni->next = list
+ masm.storePtr(temp1, Address(niTemp, NativeIterator::offsetOfNext()));
+
+ // ni->prev = list->prev
+ masm.loadPtr(Address(temp1, NativeIterator::offsetOfPrev()), temp2);
+ masm.storePtr(temp2, Address(niTemp, NativeIterator::offsetOfPrev()));
+
+ // list->prev->next = ni
+ masm.storePtr(niTemp, Address(temp2, NativeIterator::offsetOfNext()));
+
+ // list->prev = ni
+ masm.storePtr(niTemp, Address(temp1, NativeIterator::offsetOfPrev()));
+
+ masm.bind(ool->rejoin());
+}
+
+static void
+LoadNativeIterator(MacroAssembler& masm, Register obj, Register dest, Label* failures)
+{
+ MOZ_ASSERT(obj != dest);
+
+ // Test class.
+ masm.branchTestObjClass(Assembler::NotEqual, obj, dest, &PropertyIteratorObject::class_, failures);
+
+ // Load NativeIterator object.
+ masm.loadObjPrivate(obj, JSObject::ITER_CLASS_NFIXED_SLOTS, dest);
+}
+
+typedef bool (*IteratorMoreFn)(JSContext*, HandleObject, MutableHandleValue);
+static const VMFunction IteratorMoreInfo =
+ FunctionInfo<IteratorMoreFn>(IteratorMore, "IteratorMore");
+
+void
+CodeGenerator::visitIteratorMore(LIteratorMore* lir)
+{
+ const Register obj = ToRegister(lir->object());
+ const ValueOperand output = ToOutValue(lir);
+ const Register temp = ToRegister(lir->temp());
+
+ OutOfLineCode* ool = oolCallVM(IteratorMoreInfo, lir, ArgList(obj), StoreValueTo(output));
+
+ Register outputScratch = output.scratchReg();
+ LoadNativeIterator(masm, obj, outputScratch, ool->entry());
+
+ masm.branchTest32(Assembler::NonZero, Address(outputScratch, offsetof(NativeIterator, flags)),
+ Imm32(JSITER_FOREACH), ool->entry());
+
+ // If props_cursor < props_end, load the next string and advance the cursor.
+ // Else, return MagicValue(JS_NO_ITER_VALUE).
+ Label iterDone;
+ Address cursorAddr(outputScratch, offsetof(NativeIterator, props_cursor));
+ Address cursorEndAddr(outputScratch, offsetof(NativeIterator, props_end));
+ masm.loadPtr(cursorAddr, temp);
+ masm.branchPtr(Assembler::BelowOrEqual, cursorEndAddr, temp, &iterDone);
+
+ // Get next string.
+ masm.loadPtr(Address(temp, 0), temp);
+
+ // Increase the cursor.
+ masm.addPtr(Imm32(sizeof(JSString*)), cursorAddr);
+
+ masm.tagValue(JSVAL_TYPE_STRING, temp, output);
+ masm.jump(ool->rejoin());
+
+ masm.bind(&iterDone);
+ masm.moveValue(MagicValue(JS_NO_ITER_VALUE), output);
+
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitIsNoIterAndBranch(LIsNoIterAndBranch* lir)
+{
+ ValueOperand input = ToValue(lir, LIsNoIterAndBranch::Input);
+ Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
+ Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
+
+ masm.branchTestMagic(Assembler::Equal, input, ifTrue);
+
+ if (!isNextBlock(lir->ifFalse()->lir()))
+ masm.jump(ifFalse);
+}
+
+typedef bool (*CloseIteratorFn)(JSContext*, HandleObject);
+static const VMFunction CloseIteratorInfo =
+ FunctionInfo<CloseIteratorFn>(CloseIterator, "CloseIterator");
+
+void
+CodeGenerator::visitIteratorEnd(LIteratorEnd* lir)
+{
+ const Register obj = ToRegister(lir->object());
+ const Register temp1 = ToRegister(lir->temp1());
+ const Register temp2 = ToRegister(lir->temp2());
+ const Register temp3 = ToRegister(lir->temp3());
+
+ OutOfLineCode* ool = oolCallVM(CloseIteratorInfo, lir, ArgList(obj), StoreNothing());
+
+ LoadNativeIterator(masm, obj, temp1, ool->entry());
+
+ masm.branchTest32(Assembler::Zero, Address(temp1, offsetof(NativeIterator, flags)),
+ Imm32(JSITER_ENUMERATE), ool->entry());
+
+ // Clear active bit.
+ masm.and32(Imm32(~JSITER_ACTIVE), Address(temp1, offsetof(NativeIterator, flags)));
+
+ // Reset property cursor.
+ masm.loadPtr(Address(temp1, offsetof(NativeIterator, props_array)), temp2);
+ masm.storePtr(temp2, Address(temp1, offsetof(NativeIterator, props_cursor)));
+
+ // Unlink from the iterator list.
+ const Register next = temp2;
+ const Register prev = temp3;
+ masm.loadPtr(Address(temp1, NativeIterator::offsetOfNext()), next);
+ masm.loadPtr(Address(temp1, NativeIterator::offsetOfPrev()), prev);
+ masm.storePtr(prev, Address(next, NativeIterator::offsetOfPrev()));
+ masm.storePtr(next, Address(prev, NativeIterator::offsetOfNext()));
+#ifdef DEBUG
+ masm.storePtr(ImmPtr(nullptr), Address(temp1, NativeIterator::offsetOfNext()));
+ masm.storePtr(ImmPtr(nullptr), Address(temp1, NativeIterator::offsetOfPrev()));
+#endif
+
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitArgumentsLength(LArgumentsLength* lir)
+{
+ // read number of actual arguments from the JS frame.
+ Register argc = ToRegister(lir->output());
+ Address ptr(masm.getStackPointer(), frameSize() + JitFrameLayout::offsetOfNumActualArgs());
+
+ masm.loadPtr(ptr, argc);
+}
+
+void
+CodeGenerator::visitGetFrameArgument(LGetFrameArgument* lir)
+{
+ ValueOperand result = GetValueOutput(lir);
+ const LAllocation* index = lir->index();
+ size_t argvOffset = frameSize() + JitFrameLayout::offsetOfActualArgs();
+
+ if (index->isConstant()) {
+ int32_t i = index->toConstant()->toInt32();
+ Address argPtr(masm.getStackPointer(), sizeof(Value) * i + argvOffset);
+ masm.loadValue(argPtr, result);
+ } else {
+ Register i = ToRegister(index);
+ BaseValueIndex argPtr(masm.getStackPointer(), i, argvOffset);
+ masm.loadValue(argPtr, result);
+ }
+}
+
+void
+CodeGenerator::visitSetFrameArgumentT(LSetFrameArgumentT* lir)
+{
+ size_t argOffset = frameSize() + JitFrameLayout::offsetOfActualArgs() +
+ (sizeof(Value) * lir->mir()->argno());
+
+ MIRType type = lir->mir()->value()->type();
+
+ if (type == MIRType::Double) {
+ // Store doubles directly.
+ FloatRegister input = ToFloatRegister(lir->input());
+ masm.storeDouble(input, Address(masm.getStackPointer(), argOffset));
+
+ } else {
+ Register input = ToRegister(lir->input());
+ masm.storeValue(ValueTypeFromMIRType(type), input, Address(masm.getStackPointer(), argOffset));
+ }
+}
+
+void
+CodeGenerator:: visitSetFrameArgumentC(LSetFrameArgumentC* lir)
+{
+ size_t argOffset = frameSize() + JitFrameLayout::offsetOfActualArgs() +
+ (sizeof(Value) * lir->mir()->argno());
+ masm.storeValue(lir->val(), Address(masm.getStackPointer(), argOffset));
+}
+
+void
+CodeGenerator:: visitSetFrameArgumentV(LSetFrameArgumentV* lir)
+{
+ const ValueOperand val = ToValue(lir, LSetFrameArgumentV::Input);
+ size_t argOffset = frameSize() + JitFrameLayout::offsetOfActualArgs() +
+ (sizeof(Value) * lir->mir()->argno());
+ masm.storeValue(val, Address(masm.getStackPointer(), argOffset));
+}
+
+typedef bool (*RunOnceScriptPrologueFn)(JSContext*, HandleScript);
+static const VMFunction RunOnceScriptPrologueInfo =
+ FunctionInfo<RunOnceScriptPrologueFn>(js::RunOnceScriptPrologue, "RunOnceScriptPrologue");
+
+void
+CodeGenerator::visitRunOncePrologue(LRunOncePrologue* lir)
+{
+ pushArg(ImmGCPtr(lir->mir()->block()->info().script()));
+ callVM(RunOnceScriptPrologueInfo, lir);
+}
+
+typedef JSObject* (*InitRestParameterFn)(JSContext*, uint32_t, Value*, HandleObject,
+ HandleObject);
+static const VMFunction InitRestParameterInfo =
+ FunctionInfo<InitRestParameterFn>(InitRestParameter, "InitRestParameter");
+
+void
+CodeGenerator::emitRest(LInstruction* lir, Register array, Register numActuals,
+ Register temp0, Register temp1, unsigned numFormals,
+ JSObject* templateObject, bool saveAndRestore, Register resultreg)
+{
+ // Compute actuals() + numFormals.
+ size_t actualsOffset = frameSize() + JitFrameLayout::offsetOfActualArgs();
+ masm.moveStackPtrTo(temp1);
+ masm.addPtr(Imm32(sizeof(Value) * numFormals + actualsOffset), temp1);
+
+ // Compute numActuals - numFormals.
+ Label emptyLength, joinLength;
+ masm.movePtr(numActuals, temp0);
+ masm.branch32(Assembler::LessThanOrEqual, temp0, Imm32(numFormals), &emptyLength);
+ masm.sub32(Imm32(numFormals), temp0);
+ masm.jump(&joinLength);
+ {
+ masm.bind(&emptyLength);
+ masm.move32(Imm32(0), temp0);
+ }
+ masm.bind(&joinLength);
+
+ if (saveAndRestore)
+ saveLive(lir);
+
+ pushArg(array);
+ pushArg(ImmGCPtr(templateObject));
+ pushArg(temp1);
+ pushArg(temp0);
+
+ callVM(InitRestParameterInfo, lir);
+
+ if (saveAndRestore) {
+ storePointerResultTo(resultreg);
+ restoreLive(lir);
+ }
+}
+
+void
+CodeGenerator::visitRest(LRest* lir)
+{
+ Register numActuals = ToRegister(lir->numActuals());
+ Register temp0 = ToRegister(lir->getTemp(0));
+ Register temp1 = ToRegister(lir->getTemp(1));
+ Register temp2 = ToRegister(lir->getTemp(2));
+ unsigned numFormals = lir->mir()->numFormals();
+ ArrayObject* templateObject = lir->mir()->templateObject();
+
+ Label joinAlloc, failAlloc;
+ masm.createGCObject(temp2, temp0, templateObject, gc::DefaultHeap, &failAlloc);
+ masm.jump(&joinAlloc);
+ {
+ masm.bind(&failAlloc);
+ masm.movePtr(ImmPtr(nullptr), temp2);
+ }
+ masm.bind(&joinAlloc);
+
+ emitRest(lir, temp2, numActuals, temp0, temp1, numFormals, templateObject, false, ToRegister(lir->output()));
+}
+
+bool
+CodeGenerator::generateWasm(wasm::SigIdDesc sigId, wasm::TrapOffset trapOffset,
+ wasm::FuncOffsets* offsets)
+{
+ JitSpew(JitSpew_Codegen, "# Emitting wasm code");
+
+ wasm::GenerateFunctionPrologue(masm, frameSize(), sigId, offsets);
+
+ // Overflow checks are omitted by CodeGenerator in some cases (leaf
+ // functions with small framePushed). Perform overflow-checking after
+ // pushing framePushed to catch cases with really large frames.
+ Label onOverflow;
+ if (!omitOverRecursedCheck()) {
+ masm.branchPtr(Assembler::AboveOrEqual,
+ Address(WasmTlsReg, offsetof(wasm::TlsData, stackLimit)),
+ masm.getStackPointer(),
+ &onOverflow);
+ }
+
+ if (!generateBody())
+ return false;
+
+ masm.bind(&returnLabel_);
+ wasm::GenerateFunctionEpilogue(masm, frameSize(), offsets);
+
+ if (!omitOverRecursedCheck()) {
+ // Since we just overflowed the stack, to be on the safe side, pop the
+ // stack so that, when the trap exit stub executes, it is a safe
+ // distance away from the end of the native stack.
+ wasm::TrapDesc trap(trapOffset, wasm::Trap::StackOverflow, /* framePushed = */ 0);
+ if (frameSize() > 0) {
+ masm.bind(&onOverflow);
+ masm.addToStackPtr(Imm32(frameSize()));
+ masm.jump(trap);
+ } else {
+ masm.bindLater(&onOverflow, trap);
+ }
+ }
+
+#if defined(JS_ION_PERF)
+ // Note the end of the inline code and start of the OOL code.
+ gen->perfSpewer().noteEndInlineCode(masm);
+#endif
+
+ if (!generateOutOfLineCode())
+ return false;
+
+ masm.wasmEmitTrapOutOfLineCode();
+
+ masm.flush();
+ if (masm.oom())
+ return false;
+
+ offsets->end = masm.currentOffset();
+
+ MOZ_ASSERT(!masm.failureLabel()->used());
+ MOZ_ASSERT(snapshots_.listSize() == 0);
+ MOZ_ASSERT(snapshots_.RVATableSize() == 0);
+ MOZ_ASSERT(recovers_.size() == 0);
+ MOZ_ASSERT(bailouts_.empty());
+ MOZ_ASSERT(graph.numConstants() == 0);
+ MOZ_ASSERT(safepointIndices_.empty());
+ MOZ_ASSERT(osiIndices_.empty());
+ MOZ_ASSERT(cacheList_.empty());
+ MOZ_ASSERT(safepoints_.size() == 0);
+ MOZ_ASSERT(!scriptCounts_);
+ return true;
+}
+
+bool
+CodeGenerator::generate()
+{
+ JitSpew(JitSpew_Codegen, "# Emitting code for script %s:%" PRIuSIZE,
+ gen->info().script()->filename(),
+ gen->info().script()->lineno());
+
+ // Initialize native code table with an entry to the start of
+ // top-level script.
+ InlineScriptTree* tree = gen->info().inlineScriptTree();
+ jsbytecode* startPC = tree->script()->code();
+ BytecodeSite* startSite = new(gen->alloc()) BytecodeSite(tree, startPC);
+ if (!addNativeToBytecodeEntry(startSite))
+ return false;
+
+ if (!snapshots_.init())
+ return false;
+
+ if (!safepoints_.init(gen->alloc()))
+ return false;
+
+ if (!generatePrologue())
+ return false;
+
+ // Before generating any code, we generate type checks for all parameters.
+ // This comes before deoptTable_, because we can't use deopt tables without
+ // creating the actual frame.
+ generateArgumentsChecks();
+
+ if (frameClass_ != FrameSizeClass::None()) {
+ deoptTable_ = gen->jitRuntime()->getBailoutTable(frameClass_);
+ if (!deoptTable_)
+ return false;
+ }
+
+ // Skip over the alternative entry to IonScript code.
+ Label skipPrologue;
+ masm.jump(&skipPrologue);
+
+ // An alternative entry to the IonScript code, which doesn't test the
+ // arguments.
+ masm.flushBuffer();
+ setSkipArgCheckEntryOffset(masm.size());
+ masm.setFramePushed(0);
+ if (!generatePrologue())
+ return false;
+
+ masm.bind(&skipPrologue);
+
+#ifdef DEBUG
+ // Assert that the argument types are correct.
+ generateArgumentsChecks(/* bailout = */ false);
+#endif
+
+ // Reset native => bytecode map table with top-level script and startPc.
+ if (!addNativeToBytecodeEntry(startSite))
+ return false;
+
+ if (!generateBody())
+ return false;
+
+ // Reset native => bytecode map table with top-level script and startPc.
+ if (!addNativeToBytecodeEntry(startSite))
+ return false;
+
+ if (!generateEpilogue())
+ return false;
+
+ // Reset native => bytecode map table with top-level script and startPc.
+ if (!addNativeToBytecodeEntry(startSite))
+ return false;
+
+ generateInvalidateEpilogue();
+#if defined(JS_ION_PERF)
+ // Note the end of the inline code and start of the OOL code.
+ perfSpewer_.noteEndInlineCode(masm);
+#endif
+
+ // native => bytecode entries for OOL code will be added
+ // by CodeGeneratorShared::generateOutOfLineCode
+ if (!generateOutOfLineCode())
+ return false;
+
+ // Add terminal entry.
+ if (!addNativeToBytecodeEntry(startSite))
+ return false;
+
+ // Dump Native to bytecode entries to spew.
+ dumpNativeToBytecodeEntries();
+
+ return !masm.oom();
+}
+
+bool
+CodeGenerator::linkSharedStubs(JSContext* cx)
+{
+ for (uint32_t i = 0; i < sharedStubs_.length(); i++) {
+ ICStub *stub = nullptr;
+
+ switch (sharedStubs_[i].kind) {
+ case ICStub::Kind::BinaryArith_Fallback: {
+ ICBinaryArith_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonMonkey);
+ stub = stubCompiler.getStub(&stubSpace_);
+ break;
+ }
+ case ICStub::Kind::UnaryArith_Fallback: {
+ ICUnaryArith_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonMonkey);
+ stub = stubCompiler.getStub(&stubSpace_);
+ break;
+ }
+ case ICStub::Kind::Compare_Fallback: {
+ ICCompare_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonMonkey);
+ stub = stubCompiler.getStub(&stubSpace_);
+ break;
+ }
+ case ICStub::Kind::GetProp_Fallback: {
+ ICGetProp_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonMonkey);
+ stub = stubCompiler.getStub(&stubSpace_);
+ break;
+ }
+ case ICStub::Kind::NewArray_Fallback: {
+ JSScript* script = sharedStubs_[i].entry.script();
+ jsbytecode* pc = sharedStubs_[i].entry.pc(script);
+ ObjectGroup* group = ObjectGroup::allocationSiteGroup(cx, script, pc, JSProto_Array);
+ if (!group)
+ return false;
+
+ ICNewArray_Fallback::Compiler stubCompiler(cx, group, ICStubCompiler::Engine::IonMonkey);
+ stub = stubCompiler.getStub(&stubSpace_);
+ break;
+ }
+ case ICStub::Kind::NewObject_Fallback: {
+ ICNewObject_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonMonkey);
+ stub = stubCompiler.getStub(&stubSpace_);
+ break;
+ }
+ default:
+ MOZ_CRASH("Unsupported shared stub.");
+ }
+
+ if (!stub)
+ return false;
+
+ sharedStubs_[i].entry.setFirstStub(stub);
+ }
+ return true;
+}
+
+bool
+CodeGenerator::link(JSContext* cx, CompilerConstraintList* constraints)
+{
+ RootedScript script(cx, gen->info().script());
+ OptimizationLevel optimizationLevel = gen->optimizationInfo().level();
+
+ // Capture the SIMD template objects which are used during the
+ // compilation. This iterates over the template objects, using read-barriers
+ // to let the GC know that the generated code relies on these template
+ // objects.
+ captureSimdTemplate(cx);
+
+ // We finished the new IonScript. Invalidate the current active IonScript,
+ // so we can replace it with this new (probably higher optimized) version.
+ if (script->hasIonScript()) {
+ MOZ_ASSERT(script->ionScript()->isRecompiling());
+ // Do a normal invalidate, except don't cancel offThread compilations,
+ // since that will cancel this compilation too.
+ Invalidate(cx, script, /* resetUses */ false, /* cancelOffThread*/ false);
+ }
+
+ if (scriptCounts_ && !script->hasScriptCounts() && !script->initScriptCounts(cx))
+ return false;
+
+ if (!linkSharedStubs(cx))
+ return false;
+
+ // Check to make sure we didn't have a mid-build invalidation. If so, we
+ // will trickle to jit::Compile() and return Method_Skipped.
+ uint32_t warmUpCount = script->getWarmUpCount();
+
+ // Record constraints. If an error occured, returns false and potentially
+ // prevent future compilations. Otherwise, if an invalidation occured, then
+ // skip the current compilation.
+ RecompileInfo recompileInfo;
+ bool validRecompiledInfo = false;
+ if (!FinishCompilation(cx, script, constraints, &recompileInfo, &validRecompiledInfo))
+ return false;
+ if (!validRecompiledInfo)
+ return true;
+ auto guardRecordedConstraints = mozilla::MakeScopeExit([&] {
+ // In case of error, invalidate the current recompileInfo.
+ recompileInfo.compilerOutput(cx->zone()->types)->invalidate();
+ });
+
+ // IonMonkey could have inferred better type information during
+ // compilation. Since adding the new information to the actual type
+ // information can reset the usecount, increase it back to what it was
+ // before.
+ if (warmUpCount > script->getWarmUpCount())
+ script->incWarmUpCounter(warmUpCount - script->getWarmUpCount());
+
+ uint32_t argumentSlots = (gen->info().nargs() + 1) * sizeof(Value);
+ uint32_t scriptFrameSize = frameClass_ == FrameSizeClass::None()
+ ? frameDepth_
+ : FrameSizeClass::FromDepth(frameDepth_).frameSize();
+
+ // We encode safepoints after the OSI-point offsets have been determined.
+ if (!encodeSafepoints())
+ return false;
+
+ IonScript* ionScript =
+ IonScript::New(cx, recompileInfo,
+ graph.totalSlotCount(), argumentSlots, scriptFrameSize,
+ snapshots_.listSize(), snapshots_.RVATableSize(),
+ recovers_.size(), bailouts_.length(), graph.numConstants(),
+ safepointIndices_.length(), osiIndices_.length(),
+ cacheList_.length(), runtimeData_.length(),
+ safepoints_.size(), patchableBackedges_.length(),
+ sharedStubs_.length(), optimizationLevel);
+ if (!ionScript)
+ return false;
+ auto guardIonScript = mozilla::MakeScopeExit([&ionScript] {
+ // Use js_free instead of IonScript::Destroy: the cache list and
+ // backedge list are still uninitialized.
+ js_free(ionScript);
+ });
+
+ // Also, note that creating the code here during an incremental GC will
+ // trace the code and mark all GC things it refers to. This captures any
+ // read barriers which were skipped while compiling the script off thread.
+ Linker linker(masm);
+ AutoFlushICache afc("IonLink");
+ JitCode* code = linker.newCode<CanGC>(cx, ION_CODE, !patchableBackedges_.empty());
+ if (!code)
+ return false;
+
+ // Encode native to bytecode map if profiling is enabled.
+ if (isProfilerInstrumentationEnabled()) {
+ // Generate native-to-bytecode main table.
+ if (!generateCompactNativeToBytecodeMap(cx, code))
+ return false;
+
+ uint8_t* ionTableAddr = ((uint8_t*) nativeToBytecodeMap_) + nativeToBytecodeTableOffset_;
+ JitcodeIonTable* ionTable = (JitcodeIonTable*) ionTableAddr;
+
+ // Construct the IonEntry that will go into the global table.
+ JitcodeGlobalEntry::IonEntry entry;
+ if (!ionTable->makeIonEntry(cx, code, nativeToBytecodeScriptListLength_,
+ nativeToBytecodeScriptList_, entry))
+ {
+ js_free(nativeToBytecodeScriptList_);
+ js_free(nativeToBytecodeMap_);
+ return false;
+ }
+
+ // nativeToBytecodeScriptList_ is no longer needed.
+ js_free(nativeToBytecodeScriptList_);
+
+ // Generate the tracked optimizations map.
+ if (isOptimizationTrackingEnabled()) {
+ // Treat OOMs and failures as if optimization tracking were turned off.
+ IonTrackedTypeVector* allTypes = cx->new_<IonTrackedTypeVector>();
+ if (allTypes && generateCompactTrackedOptimizationsMap(cx, code, allTypes)) {
+ const uint8_t* optsRegionTableAddr = trackedOptimizationsMap_ +
+ trackedOptimizationsRegionTableOffset_;
+ const IonTrackedOptimizationsRegionTable* optsRegionTable =
+ (const IonTrackedOptimizationsRegionTable*) optsRegionTableAddr;
+ const uint8_t* optsTypesTableAddr = trackedOptimizationsMap_ +
+ trackedOptimizationsTypesTableOffset_;
+ const IonTrackedOptimizationsTypesTable* optsTypesTable =
+ (const IonTrackedOptimizationsTypesTable*) optsTypesTableAddr;
+ const uint8_t* optsAttemptsTableAddr = trackedOptimizationsMap_ +
+ trackedOptimizationsAttemptsTableOffset_;
+ const IonTrackedOptimizationsAttemptsTable* optsAttemptsTable =
+ (const IonTrackedOptimizationsAttemptsTable*) optsAttemptsTableAddr;
+ entry.initTrackedOptimizations(optsRegionTable, optsTypesTable, optsAttemptsTable,
+ allTypes);
+ } else {
+ cx->recoverFromOutOfMemory();
+ }
+ }
+
+ // Add entry to the global table.
+ JitcodeGlobalTable* globalTable = cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
+ if (!globalTable->addEntry(entry, cx->runtime())) {
+ // Memory may have been allocated for the entry.
+ entry.destroy();
+ return false;
+ }
+
+ // Mark the jitcode as having a bytecode map.
+ code->setHasBytecodeMap();
+ } else {
+ // Add a dumy jitcodeGlobalTable entry.
+ JitcodeGlobalEntry::DummyEntry entry;
+ entry.init(code, code->raw(), code->rawEnd());
+
+ // Add entry to the global table.
+ JitcodeGlobalTable* globalTable = cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
+ if (!globalTable->addEntry(entry, cx->runtime())) {
+ // Memory may have been allocated for the entry.
+ entry.destroy();
+ return false;
+ }
+
+ // Mark the jitcode as having a bytecode map.
+ code->setHasBytecodeMap();
+ }
+
+ ionScript->setMethod(code);
+ ionScript->setSkipArgCheckEntryOffset(getSkipArgCheckEntryOffset());
+
+ // If SPS is enabled, mark IonScript as having been instrumented with SPS
+ if (isProfilerInstrumentationEnabled())
+ ionScript->setHasProfilingInstrumentation();
+
+ script->setIonScript(cx->runtime(), ionScript);
+
+ // Adopt fallback shared stubs from the compiler into the ion script.
+ ionScript->adoptFallbackStubs(&stubSpace_);
+
+ Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, invalidateEpilogueData_),
+ ImmPtr(ionScript),
+ ImmPtr((void*)-1));
+
+ for (size_t i = 0; i < ionScriptLabels_.length(); i++) {
+ Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, ionScriptLabels_[i]),
+ ImmPtr(ionScript),
+ ImmPtr((void*)-1));
+ }
+
+#ifdef JS_TRACE_LOGGING
+ bool TLFailed = false;
+ TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
+
+ for (uint32_t i = 0; i < patchableTLEvents_.length(); i++) {
+ // Create an event on the mainthread.
+ TraceLoggerEvent event(logger, patchableTLEvents_[i].event);
+ if (!event.hasPayload() || !ionScript->addTraceLoggerEvent(event)) {
+ TLFailed = true;
+ break;
+ }
+ Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, patchableTLEvents_[i].offset),
+ ImmPtr((void*) uintptr_t(event.payload()->textId())),
+ ImmPtr((void*)0));
+ }
+
+ if (!TLFailed && patchableTLScripts_.length() > 0) {
+ MOZ_ASSERT(TraceLogTextIdEnabled(TraceLogger_Scripts));
+ TraceLoggerEvent event(logger, TraceLogger_Scripts, script);
+ if (!event.hasPayload() || !ionScript->addTraceLoggerEvent(event))
+ TLFailed = true;
+ if (!TLFailed) {
+ uint32_t textId = event.payload()->textId();
+ for (uint32_t i = 0; i < patchableTLScripts_.length(); i++) {
+ Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, patchableTLScripts_[i]),
+ ImmPtr((void*) uintptr_t(textId)),
+ ImmPtr((void*)0));
+ }
+ }
+ }
+
+ if (!TLFailed) {
+ for (uint32_t i = 0; i < patchableTraceLoggers_.length(); i++) {
+ Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, patchableTraceLoggers_[i]),
+ ImmPtr(logger),
+ ImmPtr(nullptr));
+ }
+ }
+#endif
+
+ // Patch shared stub IC loads using IC entries
+ for (size_t i = 0; i < sharedStubs_.length(); i++) {
+ CodeOffset label = sharedStubs_[i].label;
+
+ IonICEntry& entry = ionScript->sharedStubList()[i];
+ entry = sharedStubs_[i].entry;
+ Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label),
+ ImmPtr(&entry),
+ ImmPtr((void*)-1));
+
+ MOZ_ASSERT(entry.hasStub());
+ MOZ_ASSERT(entry.firstStub()->isFallback());
+
+ entry.firstStub()->toFallbackStub()->fixupICEntry(&entry);
+ }
+
+ // for generating inline caches during the execution.
+ if (runtimeData_.length())
+ ionScript->copyRuntimeData(&runtimeData_[0]);
+ if (cacheList_.length())
+ ionScript->copyCacheEntries(&cacheList_[0], masm);
+
+ JitSpew(JitSpew_Codegen, "Created IonScript %p (raw %p)",
+ (void*) ionScript, (void*) code->raw());
+
+ ionScript->setInvalidationEpilogueDataOffset(invalidateEpilogueData_.offset());
+ ionScript->setOsrPc(gen->info().osrPc());
+ ionScript->setOsrEntryOffset(getOsrEntryOffset());
+ ionScript->setInvalidationEpilogueOffset(invalidate_.offset());
+
+ ionScript->setDeoptTable(deoptTable_);
+
+#if defined(JS_ION_PERF)
+ if (PerfEnabled())
+ perfSpewer_.writeProfile(script, code, masm);
+#endif
+
+ // for marking during GC.
+ if (safepointIndices_.length())
+ ionScript->copySafepointIndices(&safepointIndices_[0], masm);
+ if (safepoints_.size())
+ ionScript->copySafepoints(&safepoints_);
+
+ // for reconvering from an Ion Frame.
+ if (bailouts_.length())
+ ionScript->copyBailoutTable(&bailouts_[0]);
+ if (osiIndices_.length())
+ ionScript->copyOsiIndices(&osiIndices_[0], masm);
+ if (snapshots_.listSize())
+ ionScript->copySnapshots(&snapshots_);
+ MOZ_ASSERT_IF(snapshots_.listSize(), recovers_.size());
+ if (recovers_.size())
+ ionScript->copyRecovers(&recovers_);
+ if (graph.numConstants()) {
+ const Value* vp = graph.constantPool();
+ ionScript->copyConstants(vp);
+ for (size_t i = 0; i < graph.numConstants(); i++) {
+ const Value& v = vp[i];
+ if (v.isObject() && IsInsideNursery(&v.toObject())) {
+ cx->runtime()->gc.storeBuffer.putWholeCell(script);
+ break;
+ }
+ }
+ }
+ if (patchableBackedges_.length() > 0)
+ ionScript->copyPatchableBackedges(cx, code, patchableBackedges_.begin(), masm);
+
+ // The correct state for prebarriers is unknown until the end of compilation,
+ // since a GC can occur during code generation. All barriers are emitted
+ // off-by-default, and are toggled on here if necessary.
+ if (cx->zone()->needsIncrementalBarrier())
+ ionScript->toggleBarriers(true, DontReprotect);
+
+ // Attach any generated script counts to the script.
+ if (IonScriptCounts* counts = extractScriptCounts())
+ script->addIonCounts(counts);
+
+ guardIonScript.release();
+ guardRecordedConstraints.release();
+ return true;
+}
+
+// An out-of-line path to convert a boxed int32 to either a float or double.
+class OutOfLineUnboxFloatingPoint : public OutOfLineCodeBase<CodeGenerator>
+{
+ LUnboxFloatingPoint* unboxFloatingPoint_;
+
+ public:
+ explicit OutOfLineUnboxFloatingPoint(LUnboxFloatingPoint* unboxFloatingPoint)
+ : unboxFloatingPoint_(unboxFloatingPoint)
+ { }
+
+ void accept(CodeGenerator* codegen) {
+ codegen->visitOutOfLineUnboxFloatingPoint(this);
+ }
+
+ LUnboxFloatingPoint* unboxFloatingPoint() const {
+ return unboxFloatingPoint_;
+ }
+};
+
+void
+CodeGenerator::visitUnboxFloatingPoint(LUnboxFloatingPoint* lir)
+{
+ const ValueOperand box = ToValue(lir, LUnboxFloatingPoint::Input);
+ const LDefinition* result = lir->output();
+
+ // Out-of-line path to convert int32 to double or bailout
+ // if this instruction is fallible.
+ OutOfLineUnboxFloatingPoint* ool = new(alloc()) OutOfLineUnboxFloatingPoint(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ FloatRegister resultReg = ToFloatRegister(result);
+ masm.branchTestDouble(Assembler::NotEqual, box, ool->entry());
+ masm.unboxDouble(box, resultReg);
+ if (lir->type() == MIRType::Float32)
+ masm.convertDoubleToFloat32(resultReg, resultReg);
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitOutOfLineUnboxFloatingPoint(OutOfLineUnboxFloatingPoint* ool)
+{
+ LUnboxFloatingPoint* ins = ool->unboxFloatingPoint();
+ const ValueOperand value = ToValue(ins, LUnboxFloatingPoint::Input);
+
+ if (ins->mir()->fallible()) {
+ Label bail;
+ masm.branchTestInt32(Assembler::NotEqual, value, &bail);
+ bailoutFrom(&bail, ins->snapshot());
+ }
+ masm.int32ValueToFloatingPoint(value, ToFloatRegister(ins->output()), ins->type());
+ masm.jump(ool->rejoin());
+}
+
+typedef JSObject* (*BindVarFn)(JSContext*, HandleObject);
+static const VMFunction BindVarInfo = FunctionInfo<BindVarFn>(jit::BindVar, "BindVar");
+
+void
+CodeGenerator::visitCallBindVar(LCallBindVar* lir)
+{
+ pushArg(ToRegister(lir->environmentChain()));
+ callVM(BindVarInfo, lir);
+}
+
+typedef bool (*GetPropertyFn)(JSContext*, HandleValue, HandlePropertyName, MutableHandleValue);
+static const VMFunction GetPropertyInfo = FunctionInfo<GetPropertyFn>(GetProperty, "GetProperty");
+
+void
+CodeGenerator::visitCallGetProperty(LCallGetProperty* lir)
+{
+ pushArg(ImmGCPtr(lir->mir()->name()));
+ pushArg(ToValue(lir, LCallGetProperty::Value));
+
+ callVM(GetPropertyInfo, lir);
+}
+
+typedef bool (*GetOrCallElementFn)(JSContext*, MutableHandleValue, HandleValue, MutableHandleValue);
+static const VMFunction GetElementInfo =
+ FunctionInfo<GetOrCallElementFn>(js::GetElement, "GetElement");
+static const VMFunction CallElementInfo =
+ FunctionInfo<GetOrCallElementFn>(js::CallElement, "CallElement");
+
+void
+CodeGenerator::visitCallGetElement(LCallGetElement* lir)
+{
+ pushArg(ToValue(lir, LCallGetElement::RhsInput));
+ pushArg(ToValue(lir, LCallGetElement::LhsInput));
+
+ JSOp op = JSOp(*lir->mir()->resumePoint()->pc());
+
+ if (op == JSOP_GETELEM) {
+ callVM(GetElementInfo, lir);
+ } else {
+ MOZ_ASSERT(op == JSOP_CALLELEM);
+ callVM(CallElementInfo, lir);
+ }
+}
+
+typedef bool (*SetObjectElementFn)(JSContext*, HandleObject, HandleValue, HandleValue,
+ bool strict);
+static const VMFunction SetObjectElementInfo =
+ FunctionInfo<SetObjectElementFn>(SetObjectElement, "SetObjectElement");
+
+void
+CodeGenerator::visitCallSetElement(LCallSetElement* lir)
+{
+ pushArg(Imm32(lir->mir()->strict()));
+ pushArg(ToValue(lir, LCallSetElement::Value));
+ pushArg(ToValue(lir, LCallSetElement::Index));
+ pushArg(ToRegister(lir->getOperand(0)));
+ callVM(SetObjectElementInfo, lir);
+}
+
+typedef bool (*InitElementArrayFn)(JSContext*, jsbytecode*, HandleObject, uint32_t, HandleValue);
+static const VMFunction InitElementArrayInfo =
+ FunctionInfo<InitElementArrayFn>(js::InitElementArray, "InitElementArray");
+
+void
+CodeGenerator::visitCallInitElementArray(LCallInitElementArray* lir)
+{
+ pushArg(ToValue(lir, LCallInitElementArray::Value));
+ pushArg(Imm32(lir->mir()->index()));
+ pushArg(ToRegister(lir->getOperand(0)));
+ pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
+ callVM(InitElementArrayInfo, lir);
+}
+
+void
+CodeGenerator::visitLoadFixedSlotV(LLoadFixedSlotV* ins)
+{
+ const Register obj = ToRegister(ins->getOperand(0));
+ size_t slot = ins->mir()->slot();
+ ValueOperand result = GetValueOutput(ins);
+
+ masm.loadValue(Address(obj, NativeObject::getFixedSlotOffset(slot)), result);
+}
+
+void
+CodeGenerator::visitLoadFixedSlotT(LLoadFixedSlotT* ins)
+{
+ const Register obj = ToRegister(ins->getOperand(0));
+ size_t slot = ins->mir()->slot();
+ AnyRegister result = ToAnyRegister(ins->getDef(0));
+ MIRType type = ins->mir()->type();
+
+ masm.loadUnboxedValue(Address(obj, NativeObject::getFixedSlotOffset(slot)), type, result);
+}
+
+void
+CodeGenerator::visitLoadFixedSlotAndUnbox(LLoadFixedSlotAndUnbox* ins)
+{
+ const MLoadFixedSlotAndUnbox* mir = ins->mir();
+ MIRType type = mir->type();
+ const Register input = ToRegister(ins->getOperand(0));
+ AnyRegister result = ToAnyRegister(ins->output());
+ size_t slot = mir->slot();
+
+ Address address(input, NativeObject::getFixedSlotOffset(slot));
+ Label bail;
+ if (type == MIRType::Double) {
+ MOZ_ASSERT(result.isFloat());
+ masm.ensureDouble(address, result.fpu(), &bail);
+ if (mir->fallible())
+ bailoutFrom(&bail, ins->snapshot());
+ return;
+ }
+ if (mir->fallible()) {
+ switch (type) {
+ case MIRType::Int32:
+ masm.branchTestInt32(Assembler::NotEqual, address, &bail);
+ break;
+ case MIRType::Boolean:
+ masm.branchTestBoolean(Assembler::NotEqual, address, &bail);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+ bailoutFrom(&bail, ins->snapshot());
+ }
+ masm.loadUnboxedValue(address, type, result);
+}
+
+void
+CodeGenerator::visitStoreFixedSlotV(LStoreFixedSlotV* ins)
+{
+ const Register obj = ToRegister(ins->getOperand(0));
+ size_t slot = ins->mir()->slot();
+
+ const ValueOperand value = ToValue(ins, LStoreFixedSlotV::Value);
+
+ Address address(obj, NativeObject::getFixedSlotOffset(slot));
+ if (ins->mir()->needsBarrier())
+ emitPreBarrier(address);
+
+ masm.storeValue(value, address);
+}
+
+void
+CodeGenerator::visitStoreFixedSlotT(LStoreFixedSlotT* ins)
+{
+ const Register obj = ToRegister(ins->getOperand(0));
+ size_t slot = ins->mir()->slot();
+
+ const LAllocation* value = ins->value();
+ MIRType valueType = ins->mir()->value()->type();
+
+ Address address(obj, NativeObject::getFixedSlotOffset(slot));
+ if (ins->mir()->needsBarrier())
+ emitPreBarrier(address);
+
+ if (valueType == MIRType::ObjectOrNull) {
+ Register nvalue = ToRegister(value);
+ masm.storeObjectOrNull(nvalue, address);
+ } else {
+ ConstantOrRegister nvalue = value->isConstant()
+ ? ConstantOrRegister(value->toConstant()->toJSValue())
+ : TypedOrValueRegister(valueType, ToAnyRegister(value));
+ masm.storeConstantOrRegister(nvalue, address);
+ }
+}
+
+void
+CodeGenerator::visitGetNameCache(LGetNameCache* ins)
+{
+ LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
+ Register envChain = ToRegister(ins->envObj());
+ TypedOrValueRegister output(GetValueOutput(ins));
+ bool isTypeOf = ins->mir()->accessKind() != MGetNameCache::NAME;
+
+ NameIC cache(liveRegs, isTypeOf, envChain, ins->mir()->name(), output);
+ cache.setProfilerLeavePC(ins->mir()->profilerLeavePc());
+ addCache(ins, allocateCache(cache));
+}
+
+typedef bool (*NameICFn)(JSContext*, HandleScript, size_t, HandleObject, MutableHandleValue);
+const VMFunction NameIC::UpdateInfo = FunctionInfo<NameICFn>(NameIC::update, "NameIC::update");
+
+void
+CodeGenerator::visitNameIC(OutOfLineUpdateCache* ool, DataPtr<NameIC>& ic)
+{
+ LInstruction* lir = ool->lir();
+ saveLive(lir);
+
+ pushArg(ic->environmentChainReg());
+ pushArg(Imm32(ool->getCacheIndex()));
+ pushArg(ImmGCPtr(gen->info().script()));
+ callVM(NameIC::UpdateInfo, lir);
+ StoreValueTo(ic->outputReg()).generate(this);
+ restoreLiveIgnore(lir, StoreValueTo(ic->outputReg()).clobbered());
+
+ masm.jump(ool->rejoin());
+}
+
+void
+CodeGenerator::addGetPropertyCache(LInstruction* ins, LiveRegisterSet liveRegs, Register objReg,
+ const ConstantOrRegister& id, TypedOrValueRegister output,
+ bool monitoredResult, bool allowDoubleResult,
+ jsbytecode* profilerLeavePc)
+{
+ GetPropertyIC cache(liveRegs, objReg, id, output, monitoredResult, allowDoubleResult);
+ cache.setProfilerLeavePC(profilerLeavePc);
+ addCache(ins, allocateCache(cache));
+}
+
+void
+CodeGenerator::addSetPropertyCache(LInstruction* ins, LiveRegisterSet liveRegs, Register objReg,
+ Register temp, Register tempUnbox, FloatRegister tempDouble,
+ FloatRegister tempF32, const ConstantOrRegister& id,
+ const ConstantOrRegister& value,
+ bool strict, bool needsTypeBarrier, bool guardHoles,
+ jsbytecode* profilerLeavePc)
+{
+ SetPropertyIC cache(liveRegs, objReg, temp, tempUnbox, tempDouble, tempF32, id, value, strict,
+ needsTypeBarrier, guardHoles);
+ cache.setProfilerLeavePC(profilerLeavePc);
+ addCache(ins, allocateCache(cache));
+}
+
+ConstantOrRegister
+CodeGenerator::toConstantOrRegister(LInstruction* lir, size_t n, MIRType type)
+{
+ if (type == MIRType::Value)
+ return TypedOrValueRegister(ToValue(lir, n));
+
+ const LAllocation* value = lir->getOperand(n);
+ if (value->isConstant())
+ return ConstantOrRegister(value->toConstant()->toJSValue());
+
+ return TypedOrValueRegister(type, ToAnyRegister(value));
+}
+
+void
+CodeGenerator::visitGetPropertyCacheV(LGetPropertyCacheV* ins)
+{
+ LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
+ Register objReg = ToRegister(ins->getOperand(0));
+ ConstantOrRegister id = toConstantOrRegister(ins, LGetPropertyCacheV::Id, ins->mir()->idval()->type());
+ bool monitoredResult = ins->mir()->monitoredResult();
+ TypedOrValueRegister output = TypedOrValueRegister(GetValueOutput(ins));
+
+ addGetPropertyCache(ins, liveRegs, objReg, id, output, monitoredResult,
+ ins->mir()->allowDoubleResult(), ins->mir()->profilerLeavePc());
+}
+
+void
+CodeGenerator::visitGetPropertyCacheT(LGetPropertyCacheT* ins)
+{
+ LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
+ Register objReg = ToRegister(ins->getOperand(0));
+ ConstantOrRegister id = toConstantOrRegister(ins, LGetPropertyCacheT::Id, ins->mir()->idval()->type());
+ bool monitoredResult = ins->mir()->monitoredResult();
+ TypedOrValueRegister output(ins->mir()->type(), ToAnyRegister(ins->getDef(0)));
+
+ addGetPropertyCache(ins, liveRegs, objReg, id, output, monitoredResult,
+ ins->mir()->allowDoubleResult(), ins->mir()->profilerLeavePc());
+}
+
+typedef bool (*GetPropertyICFn)(JSContext*, HandleScript, size_t, HandleObject, HandleValue,
+ MutableHandleValue);
+const VMFunction GetPropertyIC::UpdateInfo =
+ FunctionInfo<GetPropertyICFn>(GetPropertyIC::update, "GetPropertyIC::update");
+
+void
+CodeGenerator::visitGetPropertyIC(OutOfLineUpdateCache* ool, DataPtr<GetPropertyIC>& ic)
+{
+ LInstruction* lir = ool->lir();
+
+ if (ic->idempotent()) {
+ size_t numLocs;
+ CacheLocationList& cacheLocs = lir->mirRaw()->toGetPropertyCache()->location();
+ size_t locationBase;
+ if (!addCacheLocations(cacheLocs, &numLocs, &locationBase))
+ return;
+ ic->setLocationInfo(locationBase, numLocs);
+ }
+
+ saveLive(lir);
+
+ pushArg(ic->id());
+ pushArg(ic->object());
+ pushArg(Imm32(ool->getCacheIndex()));
+ pushArg(ImmGCPtr(gen->info().script()));
+ callVM(GetPropertyIC::UpdateInfo, lir);
+ StoreValueTo(ic->output()).generate(this);
+ restoreLiveIgnore(lir, StoreValueTo(ic->output()).clobbered());
+
+ masm.jump(ool->rejoin());
+}
+
+void
+CodeGenerator::visitBindNameCache(LBindNameCache* ins)
+{
+ Register envChain = ToRegister(ins->environmentChain());
+ Register output = ToRegister(ins->output());
+ BindNameIC cache(envChain, ins->mir()->name(), output);
+ cache.setProfilerLeavePC(ins->mir()->profilerLeavePc());
+
+ addCache(ins, allocateCache(cache));
+}
+
+typedef JSObject* (*BindNameICFn)(JSContext*, HandleScript, size_t, HandleObject);
+const VMFunction BindNameIC::UpdateInfo =
+ FunctionInfo<BindNameICFn>(BindNameIC::update, "BindNameIC::update");
+
+void
+CodeGenerator::visitBindNameIC(OutOfLineUpdateCache* ool, DataPtr<BindNameIC>& ic)
+{
+ LInstruction* lir = ool->lir();
+ saveLive(lir);
+
+ pushArg(ic->environmentChainReg());
+ pushArg(Imm32(ool->getCacheIndex()));
+ pushArg(ImmGCPtr(gen->info().script()));
+ callVM(BindNameIC::UpdateInfo, lir);
+ StoreRegisterTo(ic->outputReg()).generate(this);
+ restoreLiveIgnore(lir, StoreRegisterTo(ic->outputReg()).clobbered());
+
+ masm.jump(ool->rejoin());
+}
+
+typedef bool (*SetPropertyFn)(JSContext*, HandleObject,
+ HandlePropertyName, const HandleValue, bool, jsbytecode*);
+static const VMFunction SetPropertyInfo = FunctionInfo<SetPropertyFn>(SetProperty, "SetProperty");
+
+void
+CodeGenerator::visitCallSetProperty(LCallSetProperty* ins)
+{
+ ConstantOrRegister value = TypedOrValueRegister(ToValue(ins, LCallSetProperty::Value));
+
+ const Register objReg = ToRegister(ins->getOperand(0));
+
+ pushArg(ImmPtr(ins->mir()->resumePoint()->pc()));
+ pushArg(Imm32(ins->mir()->strict()));
+
+ pushArg(value);
+ pushArg(ImmGCPtr(ins->mir()->name()));
+ pushArg(objReg);
+
+ callVM(SetPropertyInfo, ins);
+}
+
+typedef bool (*DeletePropertyFn)(JSContext*, HandleValue, HandlePropertyName, bool*);
+static const VMFunction DeletePropertyStrictInfo =
+ FunctionInfo<DeletePropertyFn>(DeletePropertyJit<true>, "DeletePropertyStrictJit");
+static const VMFunction DeletePropertyNonStrictInfo =
+ FunctionInfo<DeletePropertyFn>(DeletePropertyJit<false>, "DeletePropertyNonStrictJit");
+
+void
+CodeGenerator::visitCallDeleteProperty(LCallDeleteProperty* lir)
+{
+ pushArg(ImmGCPtr(lir->mir()->name()));
+ pushArg(ToValue(lir, LCallDeleteProperty::Value));
+
+ if (lir->mir()->strict())
+ callVM(DeletePropertyStrictInfo, lir);
+ else
+ callVM(DeletePropertyNonStrictInfo, lir);
+}
+
+typedef bool (*DeleteElementFn)(JSContext*, HandleValue, HandleValue, bool*);
+static const VMFunction DeleteElementStrictInfo =
+ FunctionInfo<DeleteElementFn>(DeleteElementJit<true>, "DeleteElementStrictJit");
+static const VMFunction DeleteElementNonStrictInfo =
+ FunctionInfo<DeleteElementFn>(DeleteElementJit<false>, "DeleteElementNonStrictJit");
+
+void
+CodeGenerator::visitCallDeleteElement(LCallDeleteElement* lir)
+{
+ pushArg(ToValue(lir, LCallDeleteElement::Index));
+ pushArg(ToValue(lir, LCallDeleteElement::Value));
+
+ if (lir->mir()->strict())
+ callVM(DeleteElementStrictInfo, lir);
+ else
+ callVM(DeleteElementNonStrictInfo, lir);
+}
+
+void
+CodeGenerator::visitSetPropertyCache(LSetPropertyCache* ins)
+{
+ LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
+ Register objReg = ToRegister(ins->getOperand(0));
+ Register temp = ToRegister(ins->temp());
+ Register tempUnbox = ToTempUnboxRegister(ins->tempToUnboxIndex());
+ FloatRegister tempDouble = ToTempFloatRegisterOrInvalid(ins->tempDouble());
+ FloatRegister tempF32 = ToTempFloatRegisterOrInvalid(ins->tempFloat32());
+
+ ConstantOrRegister id =
+ toConstantOrRegister(ins, LSetPropertyCache::Id, ins->mir()->idval()->type());
+ ConstantOrRegister value =
+ toConstantOrRegister(ins, LSetPropertyCache::Value, ins->mir()->value()->type());
+
+ addSetPropertyCache(ins, liveRegs, objReg, temp, tempUnbox, tempDouble, tempF32,
+ id, value, ins->mir()->strict(), ins->mir()->needsTypeBarrier(),
+ ins->mir()->guardHoles(), ins->mir()->profilerLeavePc());
+}
+
+typedef bool (*SetPropertyICFn)(JSContext*, HandleScript, size_t, HandleObject, HandleValue,
+ HandleValue);
+const VMFunction SetPropertyIC::UpdateInfo =
+ FunctionInfo<SetPropertyICFn>(SetPropertyIC::update, "SetPropertyIC::update");
+
+void
+CodeGenerator::visitSetPropertyIC(OutOfLineUpdateCache* ool, DataPtr<SetPropertyIC>& ic)
+{
+ LInstruction* lir = ool->lir();
+ saveLive(lir);
+
+ pushArg(ic->value());
+ pushArg(ic->id());
+ pushArg(ic->object());
+ pushArg(Imm32(ool->getCacheIndex()));
+ pushArg(ImmGCPtr(gen->info().script()));
+ callVM(SetPropertyIC::UpdateInfo, lir);
+ restoreLive(lir);
+
+ masm.jump(ool->rejoin());
+}
+
+typedef bool (*ThrowFn)(JSContext*, HandleValue);
+static const VMFunction ThrowInfoCodeGen = FunctionInfo<ThrowFn>(js::Throw, "Throw");
+
+void
+CodeGenerator::visitThrow(LThrow* lir)
+{
+ pushArg(ToValue(lir, LThrow::Value));
+ callVM(ThrowInfoCodeGen, lir);
+}
+
+typedef bool (*BitNotFn)(JSContext*, HandleValue, int* p);
+static const VMFunction BitNotInfo = FunctionInfo<BitNotFn>(BitNot, "BitNot");
+
+void
+CodeGenerator::visitBitNotV(LBitNotV* lir)
+{
+ pushArg(ToValue(lir, LBitNotV::Input));
+ callVM(BitNotInfo, lir);
+}
+
+typedef bool (*BitopFn)(JSContext*, HandleValue, HandleValue, int* p);
+static const VMFunction BitAndInfo = FunctionInfo<BitopFn>(BitAnd, "BitAnd");
+static const VMFunction BitOrInfo = FunctionInfo<BitopFn>(BitOr, "BitOr");
+static const VMFunction BitXorInfo = FunctionInfo<BitopFn>(BitXor, "BitXor");
+static const VMFunction BitLhsInfo = FunctionInfo<BitopFn>(BitLsh, "BitLsh");
+static const VMFunction BitRhsInfo = FunctionInfo<BitopFn>(BitRsh, "BitRsh");
+
+void
+CodeGenerator::visitBitOpV(LBitOpV* lir)
+{
+ pushArg(ToValue(lir, LBitOpV::RhsInput));
+ pushArg(ToValue(lir, LBitOpV::LhsInput));
+
+ switch (lir->jsop()) {
+ case JSOP_BITAND:
+ callVM(BitAndInfo, lir);
+ break;
+ case JSOP_BITOR:
+ callVM(BitOrInfo, lir);
+ break;
+ case JSOP_BITXOR:
+ callVM(BitXorInfo, lir);
+ break;
+ case JSOP_LSH:
+ callVM(BitLhsInfo, lir);
+ break;
+ case JSOP_RSH:
+ callVM(BitRhsInfo, lir);
+ break;
+ default:
+ MOZ_CRASH("unexpected bitop");
+ }
+}
+
+class OutOfLineTypeOfV : public OutOfLineCodeBase<CodeGenerator>
+{
+ LTypeOfV* ins_;
+
+ public:
+ explicit OutOfLineTypeOfV(LTypeOfV* ins)
+ : ins_(ins)
+ { }
+
+ void accept(CodeGenerator* codegen) {
+ codegen->visitOutOfLineTypeOfV(this);
+ }
+ LTypeOfV* ins() const {
+ return ins_;
+ }
+};
+
+void
+CodeGenerator::visitTypeOfV(LTypeOfV* lir)
+{
+ const ValueOperand value = ToValue(lir, LTypeOfV::Input);
+ Register output = ToRegister(lir->output());
+ Register tag = masm.splitTagForTest(value);
+
+ const JSAtomState& names = GetJitContext()->runtime->names();
+ Label done;
+
+ MDefinition* input = lir->mir()->input();
+
+ bool testObject = input->mightBeType(MIRType::Object);
+ bool testNumber = input->mightBeType(MIRType::Int32) || input->mightBeType(MIRType::Double);
+ bool testBoolean = input->mightBeType(MIRType::Boolean);
+ bool testUndefined = input->mightBeType(MIRType::Undefined);
+ bool testNull = input->mightBeType(MIRType::Null);
+ bool testString = input->mightBeType(MIRType::String);
+ bool testSymbol = input->mightBeType(MIRType::Symbol);
+
+ unsigned numTests = unsigned(testObject) + unsigned(testNumber) + unsigned(testBoolean) +
+ unsigned(testUndefined) + unsigned(testNull) + unsigned(testString) + unsigned(testSymbol);
+
+ MOZ_ASSERT_IF(!input->emptyResultTypeSet(), numTests > 0);
+
+ OutOfLineTypeOfV* ool = nullptr;
+ if (testObject) {
+ if (lir->mir()->inputMaybeCallableOrEmulatesUndefined()) {
+ // The input may be a callable object (result is "function") or may
+ // emulate undefined (result is "undefined"). Use an OOL path.
+ ool = new(alloc()) OutOfLineTypeOfV(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ if (numTests > 1)
+ masm.branchTestObject(Assembler::Equal, tag, ool->entry());
+ else
+ masm.jump(ool->entry());
+ } else {
+ // Input is not callable and does not emulate undefined, so if
+ // it's an object the result is always "object".
+ Label notObject;
+ if (numTests > 1)
+ masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
+ masm.movePtr(ImmGCPtr(names.object), output);
+ if (numTests > 1)
+ masm.jump(&done);
+ masm.bind(&notObject);
+ }
+ numTests--;
+ }
+
+ if (testNumber) {
+ Label notNumber;
+ if (numTests > 1)
+ masm.branchTestNumber(Assembler::NotEqual, tag, &notNumber);
+ masm.movePtr(ImmGCPtr(names.number), output);
+ if (numTests > 1)
+ masm.jump(&done);
+ masm.bind(&notNumber);
+ numTests--;
+ }
+
+ if (testUndefined) {
+ Label notUndefined;
+ if (numTests > 1)
+ masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
+ masm.movePtr(ImmGCPtr(names.undefined), output);
+ if (numTests > 1)
+ masm.jump(&done);
+ masm.bind(&notUndefined);
+ numTests--;
+ }
+
+ if (testNull) {
+ Label notNull;
+ if (numTests > 1)
+ masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
+ masm.movePtr(ImmGCPtr(names.object), output);
+ if (numTests > 1)
+ masm.jump(&done);
+ masm.bind(&notNull);
+ numTests--;
+ }
+
+ if (testBoolean) {
+ Label notBoolean;
+ if (numTests > 1)
+ masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
+ masm.movePtr(ImmGCPtr(names.boolean), output);
+ if (numTests > 1)
+ masm.jump(&done);
+ masm.bind(&notBoolean);
+ numTests--;
+ }
+
+ if (testString) {
+ Label notString;
+ if (numTests > 1)
+ masm.branchTestString(Assembler::NotEqual, tag, &notString);
+ masm.movePtr(ImmGCPtr(names.string), output);
+ if (numTests > 1)
+ masm.jump(&done);
+ masm.bind(&notString);
+ numTests--;
+ }
+
+ if (testSymbol) {
+ Label notSymbol;
+ if (numTests > 1)
+ masm.branchTestSymbol(Assembler::NotEqual, tag, &notSymbol);
+ masm.movePtr(ImmGCPtr(names.symbol), output);
+ if (numTests > 1)
+ masm.jump(&done);
+ masm.bind(&notSymbol);
+ numTests--;
+ }
+
+ MOZ_ASSERT(numTests == 0);
+
+ masm.bind(&done);
+ if (ool)
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitOutOfLineTypeOfV(OutOfLineTypeOfV* ool)
+{
+ LTypeOfV* ins = ool->ins();
+
+ ValueOperand input = ToValue(ins, LTypeOfV::Input);
+ Register temp = ToTempUnboxRegister(ins->tempToUnbox());
+ Register output = ToRegister(ins->output());
+
+ Register obj = masm.extractObject(input, temp);
+
+ saveVolatile(output);
+ masm.setupUnalignedABICall(output);
+ masm.passABIArg(obj);
+ masm.movePtr(ImmPtr(GetJitContext()->runtime), output);
+ masm.passABIArg(output);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::TypeOfObjectOperation));
+ masm.storeCallPointerResult(output);
+ restoreVolatile(output);
+
+ masm.jump(ool->rejoin());
+}
+
+typedef JSObject* (*ToAsyncFn)(JSContext*, HandleFunction);
+static const VMFunction ToAsyncInfo = FunctionInfo<ToAsyncFn>(js::WrapAsyncFunction, "ToAsync");
+
+void
+CodeGenerator::visitToAsync(LToAsync* lir)
+{
+ pushArg(ToRegister(lir->unwrapped()));
+ callVM(ToAsyncInfo, lir);
+}
+
+typedef bool (*ToIdFn)(JSContext*, HandleScript, jsbytecode*, HandleValue,
+ MutableHandleValue);
+static const VMFunction ToIdInfo = FunctionInfo<ToIdFn>(ToIdOperation, "ToIdOperation");
+
+void
+CodeGenerator::visitToIdV(LToIdV* lir)
+{
+ Label notInt32;
+ FloatRegister temp = ToFloatRegister(lir->tempFloat());
+ const ValueOperand out = ToOutValue(lir);
+ ValueOperand input = ToValue(lir, LToIdV::Input);
+
+ OutOfLineCode* ool = oolCallVM(ToIdInfo, lir,
+ ArgList(ImmGCPtr(current->mir()->info().script()),
+ ImmPtr(lir->mir()->resumePoint()->pc()),
+ ToValue(lir, LToIdV::Input)),
+ StoreValueTo(out));
+
+ Register tag = masm.splitTagForTest(input);
+
+ masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
+ masm.moveValue(input, out);
+ masm.jump(ool->rejoin());
+
+ masm.bind(&notInt32);
+ masm.branchTestDouble(Assembler::NotEqual, tag, ool->entry());
+ masm.unboxDouble(input, temp);
+ masm.convertDoubleToInt32(temp, out.scratchReg(), ool->entry(), true);
+ masm.tagValue(JSVAL_TYPE_INT32, out.scratchReg(), out);
+
+ masm.bind(ool->rejoin());
+}
+
+template<typename T>
+void
+CodeGenerator::emitLoadElementT(LLoadElementT* lir, const T& source)
+{
+ if (LIRGenerator::allowTypedElementHoleCheck()) {
+ if (lir->mir()->needsHoleCheck()) {
+ Label bail;
+ masm.branchTestMagic(Assembler::Equal, source, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+ }
+ } else {
+ MOZ_ASSERT(!lir->mir()->needsHoleCheck());
+ }
+
+ AnyRegister output = ToAnyRegister(lir->output());
+ if (lir->mir()->loadDoubles())
+ masm.loadDouble(source, output.fpu());
+ else
+ masm.loadUnboxedValue(source, lir->mir()->type(), output);
+}
+
+void
+CodeGenerator::visitLoadElementT(LLoadElementT* lir)
+{
+ Register elements = ToRegister(lir->elements());
+ const LAllocation* index = lir->index();
+ if (index->isConstant()) {
+ int32_t offset = ToInt32(index) * sizeof(js::Value) + lir->mir()->offsetAdjustment();
+ emitLoadElementT(lir, Address(elements, offset));
+ } else {
+ emitLoadElementT(lir, BaseIndex(elements, ToRegister(index), TimesEight,
+ lir->mir()->offsetAdjustment()));
+ }
+}
+
+void
+CodeGenerator::visitLoadElementV(LLoadElementV* load)
+{
+ Register elements = ToRegister(load->elements());
+ const ValueOperand out = ToOutValue(load);
+
+ if (load->index()->isConstant()) {
+ NativeObject::elementsSizeMustNotOverflow();
+ int32_t offset = ToInt32(load->index()) * sizeof(Value) + load->mir()->offsetAdjustment();
+ masm.loadValue(Address(elements, offset), out);
+ } else {
+ masm.loadValue(BaseObjectElementIndex(elements, ToRegister(load->index()),
+ load->mir()->offsetAdjustment()), out);
+ }
+
+ if (load->mir()->needsHoleCheck()) {
+ Label testMagic;
+ masm.branchTestMagic(Assembler::Equal, out, &testMagic);
+ bailoutFrom(&testMagic, load->snapshot());
+ }
+}
+
+void
+CodeGenerator::visitLoadElementHole(LLoadElementHole* lir)
+{
+ Register elements = ToRegister(lir->elements());
+ Register initLength = ToRegister(lir->initLength());
+ const ValueOperand out = ToOutValue(lir);
+
+ const MLoadElementHole* mir = lir->mir();
+
+ // If the index is out of bounds, load |undefined|. Otherwise, load the
+ // value.
+ Label undefined, done;
+ if (lir->index()->isConstant())
+ masm.branch32(Assembler::BelowOrEqual, initLength, Imm32(ToInt32(lir->index())), &undefined);
+ else
+ masm.branch32(Assembler::BelowOrEqual, initLength, ToRegister(lir->index()), &undefined);
+
+ if (mir->unboxedType() != JSVAL_TYPE_MAGIC) {
+ size_t width = UnboxedTypeSize(mir->unboxedType());
+ if (lir->index()->isConstant()) {
+ Address addr(elements, ToInt32(lir->index()) * width);
+ masm.loadUnboxedProperty(addr, mir->unboxedType(), out);
+ } else {
+ BaseIndex addr(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+ masm.loadUnboxedProperty(addr, mir->unboxedType(), out);
+ }
+ } else {
+ if (lir->index()->isConstant()) {
+ NativeObject::elementsSizeMustNotOverflow();
+ masm.loadValue(Address(elements, ToInt32(lir->index()) * sizeof(Value)), out);
+ } else {
+ masm.loadValue(BaseObjectElementIndex(elements, ToRegister(lir->index())), out);
+ }
+ }
+
+ // If a hole check is needed, and the value wasn't a hole, we're done.
+ // Otherwise, we'll load undefined.
+ if (lir->mir()->needsHoleCheck())
+ masm.branchTestMagic(Assembler::NotEqual, out, &done);
+ else
+ masm.jump(&done);
+
+ masm.bind(&undefined);
+
+ if (mir->needsNegativeIntCheck()) {
+ if (lir->index()->isConstant()) {
+ if (ToInt32(lir->index()) < 0)
+ bailout(lir->snapshot());
+ } else {
+ Label negative;
+ masm.branch32(Assembler::LessThan, ToRegister(lir->index()), Imm32(0), &negative);
+ bailoutFrom(&negative, lir->snapshot());
+ }
+ }
+
+ masm.moveValue(UndefinedValue(), out);
+ masm.bind(&done);
+}
+
+void
+CodeGenerator::visitLoadUnboxedPointerV(LLoadUnboxedPointerV* lir)
+{
+ Register elements = ToRegister(lir->elements());
+ const ValueOperand out = ToOutValue(lir);
+
+ if (lir->index()->isConstant()) {
+ int32_t offset = ToInt32(lir->index()) * sizeof(uintptr_t) + lir->mir()->offsetAdjustment();
+ masm.loadPtr(Address(elements, offset), out.scratchReg());
+ } else {
+ masm.loadPtr(BaseIndex(elements, ToRegister(lir->index()), ScalePointer,
+ lir->mir()->offsetAdjustment()), out.scratchReg());
+ }
+
+ Label notNull, done;
+ masm.branchPtr(Assembler::NotEqual, out.scratchReg(), ImmWord(0), &notNull);
+
+ masm.moveValue(NullValue(), out);
+ masm.jump(&done);
+
+ masm.bind(&notNull);
+ masm.tagValue(JSVAL_TYPE_OBJECT, out.scratchReg(), out);
+
+ masm.bind(&done);
+}
+
+void
+CodeGenerator::visitLoadUnboxedPointerT(LLoadUnboxedPointerT* lir)
+{
+ Register elements = ToRegister(lir->elements());
+ const LAllocation* index = lir->index();
+ Register out = ToRegister(lir->output());
+
+ bool bailOnNull;
+ int32_t offsetAdjustment;
+ if (lir->mir()->isLoadUnboxedObjectOrNull()) {
+ bailOnNull = lir->mir()->toLoadUnboxedObjectOrNull()->nullBehavior() ==
+ MLoadUnboxedObjectOrNull::BailOnNull;
+ offsetAdjustment = lir->mir()->toLoadUnboxedObjectOrNull()->offsetAdjustment();
+ } else if (lir->mir()->isLoadUnboxedString()) {
+ bailOnNull = false;
+ offsetAdjustment = lir->mir()->toLoadUnboxedString()->offsetAdjustment();
+ } else {
+ MOZ_CRASH();
+ }
+
+ if (index->isConstant()) {
+ Address source(elements, ToInt32(index) * sizeof(uintptr_t) + offsetAdjustment);
+ masm.loadPtr(source, out);
+ } else {
+ BaseIndex source(elements, ToRegister(index), ScalePointer, offsetAdjustment);
+ masm.loadPtr(source, out);
+ }
+
+ if (bailOnNull) {
+ Label bail;
+ masm.branchTestPtr(Assembler::Zero, out, out, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+ }
+}
+
+void
+CodeGenerator::visitUnboxObjectOrNull(LUnboxObjectOrNull* lir)
+{
+ Register obj = ToRegister(lir->input());
+
+ if (lir->mir()->fallible()) {
+ Label bail;
+ masm.branchTestPtr(Assembler::Zero, obj, obj, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+ }
+}
+
+void
+CodeGenerator::visitLoadUnboxedScalar(LLoadUnboxedScalar* lir)
+{
+ Register elements = ToRegister(lir->elements());
+ Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+ AnyRegister out = ToAnyRegister(lir->output());
+
+ const MLoadUnboxedScalar* mir = lir->mir();
+
+ Scalar::Type readType = mir->readType();
+ unsigned numElems = mir->numElems();
+
+ int width = Scalar::byteSize(mir->storageType());
+ bool canonicalizeDouble = mir->canonicalizeDoubles();
+
+ Label fail;
+ if (lir->index()->isConstant()) {
+ Address source(elements, ToInt32(lir->index()) * width + mir->offsetAdjustment());
+ masm.loadFromTypedArray(readType, source, out, temp, &fail, canonicalizeDouble, numElems);
+ } else {
+ BaseIndex source(elements, ToRegister(lir->index()), ScaleFromElemWidth(width),
+ mir->offsetAdjustment());
+ masm.loadFromTypedArray(readType, source, out, temp, &fail, canonicalizeDouble, numElems);
+ }
+
+ if (fail.used())
+ bailoutFrom(&fail, lir->snapshot());
+}
+
+void
+CodeGenerator::visitLoadTypedArrayElementHole(LLoadTypedArrayElementHole* lir)
+{
+ Register object = ToRegister(lir->object());
+ const ValueOperand out = ToOutValue(lir);
+
+ // Load the length.
+ Register scratch = out.scratchReg();
+ RegisterOrInt32Constant key = ToRegisterOrInt32Constant(lir->index());
+ masm.unboxInt32(Address(object, TypedArrayObject::lengthOffset()), scratch);
+
+ // Load undefined unless length > key.
+ Label inbounds, done;
+ masm.branch32(Assembler::Above, scratch, key, &inbounds);
+ masm.moveValue(UndefinedValue(), out);
+ masm.jump(&done);
+
+ // Load the elements vector.
+ masm.bind(&inbounds);
+ masm.loadPtr(Address(object, TypedArrayObject::dataOffset()), scratch);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ Label fail;
+ if (key.isConstant()) {
+ Address source(scratch, key.constant() * width);
+ masm.loadFromTypedArray(arrayType, source, out, lir->mir()->allowDouble(),
+ out.scratchReg(), &fail);
+ } else {
+ BaseIndex source(scratch, key.reg(), ScaleFromElemWidth(width));
+ masm.loadFromTypedArray(arrayType, source, out, lir->mir()->allowDouble(),
+ out.scratchReg(), &fail);
+ }
+
+ if (fail.used())
+ bailoutFrom(&fail, lir->snapshot());
+
+ masm.bind(&done);
+}
+
+template <typename T>
+static inline void
+StoreToTypedArray(MacroAssembler& masm, Scalar::Type writeType, const LAllocation* value,
+ const T& dest, unsigned numElems = 0)
+{
+ if (Scalar::isSimdType(writeType) ||
+ writeType == Scalar::Float32 ||
+ writeType == Scalar::Float64)
+ {
+ masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest, numElems);
+ } else {
+ if (value->isConstant())
+ masm.storeToTypedIntArray(writeType, Imm32(ToInt32(value)), dest);
+ else
+ masm.storeToTypedIntArray(writeType, ToRegister(value), dest);
+ }
+}
+
+void
+CodeGenerator::visitStoreUnboxedScalar(LStoreUnboxedScalar* lir)
+{
+ Register elements = ToRegister(lir->elements());
+ const LAllocation* value = lir->value();
+
+ const MStoreUnboxedScalar* mir = lir->mir();
+
+ Scalar::Type writeType = mir->writeType();
+ unsigned numElems = mir->numElems();
+
+ int width = Scalar::byteSize(mir->storageType());
+
+ if (lir->index()->isConstant()) {
+ Address dest(elements, ToInt32(lir->index()) * width + mir->offsetAdjustment());
+ StoreToTypedArray(masm, writeType, value, dest, numElems);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width),
+ mir->offsetAdjustment());
+ StoreToTypedArray(masm, writeType, value, dest, numElems);
+ }
+}
+
+void
+CodeGenerator::visitStoreTypedArrayElementHole(LStoreTypedArrayElementHole* lir)
+{
+ Register elements = ToRegister(lir->elements());
+ const LAllocation* value = lir->value();
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ const LAllocation* index = lir->index();
+ const LAllocation* length = lir->length();
+
+ bool guardLength = true;
+ if (index->isConstant() && length->isConstant()) {
+ uint32_t idx = ToInt32(index);
+ uint32_t len = ToInt32(length);
+ if (idx >= len)
+ return;
+ guardLength = false;
+ }
+ Label skip;
+ if (index->isConstant()) {
+ uint32_t idx = ToInt32(index);
+ if (guardLength) {
+ if (length->isRegister())
+ masm.branch32(Assembler::BelowOrEqual, ToRegister(length), Imm32(idx), &skip);
+ else
+ masm.branch32(Assembler::BelowOrEqual, ToAddress(length), Imm32(idx), &skip);
+ }
+ Address dest(elements, idx * width);
+ StoreToTypedArray(masm, arrayType, value, dest);
+ } else {
+ Register idxReg = ToRegister(index);
+ MOZ_ASSERT(guardLength);
+ if (length->isConstant())
+ masm.branch32(Assembler::AboveOrEqual, idxReg, Imm32(ToInt32(length)), &skip);
+ else if (length->isRegister())
+ masm.branch32(Assembler::BelowOrEqual, ToRegister(length), idxReg, &skip);
+ else
+ masm.branch32(Assembler::BelowOrEqual, ToAddress(length), idxReg, &skip);
+ BaseIndex dest(elements, ToRegister(index), ScaleFromElemWidth(width));
+ StoreToTypedArray(masm, arrayType, value, dest);
+ }
+ if (guardLength)
+ masm.bind(&skip);
+}
+
+void
+CodeGenerator::visitAtomicIsLockFree(LAtomicIsLockFree* lir)
+{
+ Register value = ToRegister(lir->value());
+ Register output = ToRegister(lir->output());
+
+ // Keep this in sync with isLockfree() in jit/AtomicOperations.h.
+ MOZ_ASSERT(AtomicOperations::isLockfree(1)); // Implementation artifact
+ MOZ_ASSERT(AtomicOperations::isLockfree(2)); // Implementation artifact
+ MOZ_ASSERT(AtomicOperations::isLockfree(4)); // Spec requirement
+ MOZ_ASSERT(!AtomicOperations::isLockfree(8)); // Implementation invariant, for now
+
+ Label Ldone, Lfailed;
+ masm.move32(Imm32(1), output);
+ masm.branch32(Assembler::Equal, value, Imm32(4), &Ldone);
+ masm.branch32(Assembler::Equal, value, Imm32(2), &Ldone);
+ masm.branch32(Assembler::Equal, value, Imm32(1), &Ldone);
+ masm.move32(Imm32(0), output);
+ masm.bind(&Ldone);
+}
+
+void
+CodeGenerator::visitGuardSharedTypedArray(LGuardSharedTypedArray* guard)
+{
+ Register obj = ToRegister(guard->input());
+ Register tmp = ToRegister(guard->tempInt());
+
+ // The shared-memory flag is a bit in the ObjectElements header
+ // that is set if the TypedArray is mapping a SharedArrayBuffer.
+ // The flag is set at construction and does not change subsequently.
+ masm.loadPtr(Address(obj, TypedArrayObject::offsetOfElements()), tmp);
+ masm.load32(Address(tmp, ObjectElements::offsetOfFlags()), tmp);
+ bailoutTest32(Assembler::Zero, tmp, Imm32(ObjectElements::SHARED_MEMORY), guard->snapshot());
+}
+
+void
+CodeGenerator::visitClampIToUint8(LClampIToUint8* lir)
+{
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(output == ToRegister(lir->input()));
+ masm.clampIntToUint8(output);
+}
+
+void
+CodeGenerator::visitClampDToUint8(LClampDToUint8* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ masm.clampDoubleToUint8(input, output);
+}
+
+void
+CodeGenerator::visitClampVToUint8(LClampVToUint8* lir)
+{
+ ValueOperand operand = ToValue(lir, LClampVToUint8::Input);
+ FloatRegister tempFloat = ToFloatRegister(lir->tempFloat());
+ Register output = ToRegister(lir->output());
+ MDefinition* input = lir->mir()->input();
+
+ Label* stringEntry;
+ Label* stringRejoin;
+ if (input->mightBeType(MIRType::String)) {
+ OutOfLineCode* oolString = oolCallVM(StringToNumberInfo, lir, ArgList(output),
+ StoreFloatRegisterTo(tempFloat));
+ stringEntry = oolString->entry();
+ stringRejoin = oolString->rejoin();
+ } else {
+ stringEntry = nullptr;
+ stringRejoin = nullptr;
+ }
+
+ Label fails;
+ masm.clampValueToUint8(operand, input,
+ stringEntry, stringRejoin,
+ output, tempFloat, output, &fails);
+
+ bailoutFrom(&fails, lir->snapshot());
+}
+
+typedef bool (*OperatorInFn)(JSContext*, HandleValue, HandleObject, bool*);
+static const VMFunction OperatorInInfo = FunctionInfo<OperatorInFn>(OperatorIn, "OperatorIn");
+
+void
+CodeGenerator::visitIn(LIn* ins)
+{
+ pushArg(ToRegister(ins->rhs()));
+ pushArg(ToValue(ins, LIn::LHS));
+
+ callVM(OperatorInInfo, ins);
+}
+
+typedef bool (*OperatorInIFn)(JSContext*, uint32_t, HandleObject, bool*);
+static const VMFunction OperatorInIInfo = FunctionInfo<OperatorInIFn>(OperatorInI, "OperatorInI");
+
+void
+CodeGenerator::visitInArray(LInArray* lir)
+{
+ const MInArray* mir = lir->mir();
+ Register elements = ToRegister(lir->elements());
+ Register initLength = ToRegister(lir->initLength());
+ Register output = ToRegister(lir->output());
+
+ // When the array is not packed we need to do a hole check in addition to the bounds check.
+ Label falseBranch, done, trueBranch;
+
+ OutOfLineCode* ool = nullptr;
+ Label* failedInitLength = &falseBranch;
+
+ if (lir->index()->isConstant()) {
+ int32_t index = ToInt32(lir->index());
+
+ MOZ_ASSERT_IF(index < 0, mir->needsNegativeIntCheck());
+ if (mir->needsNegativeIntCheck()) {
+ ool = oolCallVM(OperatorInIInfo, lir,
+ ArgList(Imm32(index), ToRegister(lir->object())),
+ StoreRegisterTo(output));
+ failedInitLength = ool->entry();
+ }
+
+ masm.branch32(Assembler::BelowOrEqual, initLength, Imm32(index), failedInitLength);
+ if (mir->needsHoleCheck() && mir->unboxedType() == JSVAL_TYPE_MAGIC) {
+ NativeObject::elementsSizeMustNotOverflow();
+ Address address = Address(elements, index * sizeof(Value));
+ masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
+ }
+ } else {
+ Label negativeIntCheck;
+ Register index = ToRegister(lir->index());
+
+ if (mir->needsNegativeIntCheck())
+ failedInitLength = &negativeIntCheck;
+
+ masm.branch32(Assembler::BelowOrEqual, initLength, index, failedInitLength);
+ if (mir->needsHoleCheck() && mir->unboxedType() == JSVAL_TYPE_MAGIC) {
+ BaseIndex address = BaseIndex(elements, ToRegister(lir->index()), TimesEight);
+ masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
+ }
+ masm.jump(&trueBranch);
+
+ if (mir->needsNegativeIntCheck()) {
+ masm.bind(&negativeIntCheck);
+ ool = oolCallVM(OperatorInIInfo, lir,
+ ArgList(index, ToRegister(lir->object())),
+ StoreRegisterTo(output));
+
+ masm.branch32(Assembler::LessThan, index, Imm32(0), ool->entry());
+ masm.jump(&falseBranch);
+ }
+ }
+
+ masm.bind(&trueBranch);
+ masm.move32(Imm32(1), output);
+ masm.jump(&done);
+
+ masm.bind(&falseBranch);
+ masm.move32(Imm32(0), output);
+ masm.bind(&done);
+
+ if (ool)
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitInstanceOfO(LInstanceOfO* ins)
+{
+ emitInstanceOf(ins, ins->mir()->prototypeObject());
+}
+
+void
+CodeGenerator::visitInstanceOfV(LInstanceOfV* ins)
+{
+ emitInstanceOf(ins, ins->mir()->prototypeObject());
+}
+
+// Wrap IsDelegateOfObject, which takes a JSObject*, not a HandleObject
+static bool
+IsDelegateObject(JSContext* cx, HandleObject protoObj, HandleObject obj, bool* res)
+{
+ return IsDelegateOfObject(cx, protoObj, obj, res);
+}
+
+typedef bool (*IsDelegateObjectFn)(JSContext*, HandleObject, HandleObject, bool*);
+static const VMFunction IsDelegateObjectInfo =
+ FunctionInfo<IsDelegateObjectFn>(IsDelegateObject, "IsDelegateObject");
+
+void
+CodeGenerator::emitInstanceOf(LInstruction* ins, JSObject* prototypeObject)
+{
+ // This path implements fun_hasInstance when the function's prototype is
+ // known to be prototypeObject.
+
+ Label done;
+ Register output = ToRegister(ins->getDef(0));
+
+ // If the lhs is a primitive, the result is false.
+ Register objReg;
+ if (ins->isInstanceOfV()) {
+ Label isObject;
+ ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LHS);
+ masm.branchTestObject(Assembler::Equal, lhsValue, &isObject);
+ masm.mov(ImmWord(0), output);
+ masm.jump(&done);
+ masm.bind(&isObject);
+ objReg = masm.extractObject(lhsValue, output);
+ } else {
+ objReg = ToRegister(ins->toInstanceOfO()->lhs());
+ }
+
+ // Crawl the lhs's prototype chain in a loop to search for prototypeObject.
+ // This follows the main loop of js::IsDelegate, though additionally breaks
+ // out of the loop on Proxy::LazyProto.
+
+ // Load the lhs's prototype.
+ masm.loadObjProto(objReg, output);
+
+ Label testLazy;
+ {
+ Label loopPrototypeChain;
+ masm.bind(&loopPrototypeChain);
+
+ // Test for the target prototype object.
+ Label notPrototypeObject;
+ masm.branchPtr(Assembler::NotEqual, output, ImmGCPtr(prototypeObject), &notPrototypeObject);
+ masm.mov(ImmWord(1), output);
+ masm.jump(&done);
+ masm.bind(&notPrototypeObject);
+
+ MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
+
+ // Test for nullptr or Proxy::LazyProto
+ masm.branchPtr(Assembler::BelowOrEqual, output, ImmWord(1), &testLazy);
+
+ // Load the current object's prototype.
+ masm.loadObjProto(output, output);
+
+ masm.jump(&loopPrototypeChain);
+ }
+
+ // Make a VM call if an object with a lazy proto was found on the prototype
+ // chain. This currently occurs only for cross compartment wrappers, which
+ // we do not expect to be compared with non-wrapper functions from this
+ // compartment. Otherwise, we stopped on a nullptr prototype and the output
+ // register is already correct.
+
+ OutOfLineCode* ool = oolCallVM(IsDelegateObjectInfo, ins,
+ ArgList(ImmGCPtr(prototypeObject), objReg),
+ StoreRegisterTo(output));
+
+ // Regenerate the original lhs object for the VM call.
+ Label regenerate, *lazyEntry;
+ if (objReg != output) {
+ lazyEntry = ool->entry();
+ } else {
+ masm.bind(&regenerate);
+ lazyEntry = &regenerate;
+ if (ins->isInstanceOfV()) {
+ ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LHS);
+ objReg = masm.extractObject(lhsValue, output);
+ } else {
+ objReg = ToRegister(ins->toInstanceOfO()->lhs());
+ }
+ MOZ_ASSERT(objReg == output);
+ masm.jump(ool->entry());
+ }
+
+ masm.bind(&testLazy);
+ masm.branchPtr(Assembler::Equal, output, ImmWord(1), lazyEntry);
+
+ masm.bind(&done);
+ masm.bind(ool->rejoin());
+}
+
+typedef bool (*HasInstanceFn)(JSContext*, HandleObject, HandleValue, bool*);
+static const VMFunction HasInstanceInfo = FunctionInfo<HasInstanceFn>(js::HasInstance, "HasInstance");
+
+void
+CodeGenerator::visitCallInstanceOf(LCallInstanceOf* ins)
+{
+ ValueOperand lhs = ToValue(ins, LCallInstanceOf::LHS);
+ Register rhs = ToRegister(ins->rhs());
+ MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg);
+
+ pushArg(lhs);
+ pushArg(rhs);
+ callVM(HasInstanceInfo, ins);
+}
+
+void
+CodeGenerator::visitGetDOMProperty(LGetDOMProperty* ins)
+{
+ const Register JSContextReg = ToRegister(ins->getJSContextReg());
+ const Register ObjectReg = ToRegister(ins->getObjectReg());
+ const Register PrivateReg = ToRegister(ins->getPrivReg());
+ const Register ValueReg = ToRegister(ins->getValueReg());
+
+ Label haveValue;
+ if (ins->mir()->valueMayBeInSlot()) {
+ size_t slot = ins->mir()->domMemberSlotIndex();
+ // It's a bit annoying to redo these slot calculations, which duplcate
+ // LSlots and a few other things like that, but I'm not sure there's a
+ // way to reuse those here.
+ if (slot < NativeObject::MAX_FIXED_SLOTS) {
+ masm.loadValue(Address(ObjectReg, NativeObject::getFixedSlotOffset(slot)),
+ JSReturnOperand);
+ } else {
+ // It's a dynamic slot.
+ slot -= NativeObject::MAX_FIXED_SLOTS;
+ // Use PrivateReg as a scratch register for the slots pointer.
+ masm.loadPtr(Address(ObjectReg, NativeObject::offsetOfSlots()),
+ PrivateReg);
+ masm.loadValue(Address(PrivateReg, slot*sizeof(js::Value)),
+ JSReturnOperand);
+ }
+ masm.branchTestUndefined(Assembler::NotEqual, JSReturnOperand, &haveValue);
+ }
+
+ DebugOnly<uint32_t> initialStack = masm.framePushed();
+
+ masm.checkStackAlignment();
+
+ // Make space for the outparam. Pre-initialize it to UndefinedValue so we
+ // can trace it at GC time.
+ masm.Push(UndefinedValue());
+ // We pass the pointer to our out param as an instance of
+ // JSJitGetterCallArgs, since on the binary level it's the same thing.
+ JS_STATIC_ASSERT(sizeof(JSJitGetterCallArgs) == sizeof(Value*));
+ masm.moveStackPtrTo(ValueReg);
+
+ masm.Push(ObjectReg);
+
+ LoadDOMPrivate(masm, ObjectReg, PrivateReg);
+
+ // Rooting will happen at GC time.
+ masm.moveStackPtrTo(ObjectReg);
+
+ uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
+ masm.enterFakeExitFrame(IonDOMExitFrameLayoutGetterToken);
+
+ markSafepointAt(safepointOffset, ins);
+
+ masm.setupUnalignedABICall(JSContextReg);
+
+ masm.loadJSContext(JSContextReg);
+
+ masm.passABIArg(JSContextReg);
+ masm.passABIArg(ObjectReg);
+ masm.passABIArg(PrivateReg);
+ masm.passABIArg(ValueReg);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ins->mir()->fun()));
+
+ if (ins->mir()->isInfallible()) {
+ masm.loadValue(Address(masm.getStackPointer(), IonDOMExitFrameLayout::offsetOfResult()),
+ JSReturnOperand);
+ } else {
+ masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
+
+ masm.loadValue(Address(masm.getStackPointer(), IonDOMExitFrameLayout::offsetOfResult()),
+ JSReturnOperand);
+ }
+ masm.adjustStack(IonDOMExitFrameLayout::Size());
+
+ masm.bind(&haveValue);
+
+ MOZ_ASSERT(masm.framePushed() == initialStack);
+}
+
+void
+CodeGenerator::visitGetDOMMemberV(LGetDOMMemberV* ins)
+{
+ // It's simpler to duplicate visitLoadFixedSlotV here than it is to try to
+ // use an LLoadFixedSlotV or some subclass of it for this case: that would
+ // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
+ // we'd have to duplicate a bunch of stuff we now get for free from
+ // MGetDOMProperty.
+ Register object = ToRegister(ins->object());
+ size_t slot = ins->mir()->domMemberSlotIndex();
+ ValueOperand result = GetValueOutput(ins);
+
+ masm.loadValue(Address(object, NativeObject::getFixedSlotOffset(slot)), result);
+}
+
+void
+CodeGenerator::visitGetDOMMemberT(LGetDOMMemberT* ins)
+{
+ // It's simpler to duplicate visitLoadFixedSlotT here than it is to try to
+ // use an LLoadFixedSlotT or some subclass of it for this case: that would
+ // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
+ // we'd have to duplicate a bunch of stuff we now get for free from
+ // MGetDOMProperty.
+ Register object = ToRegister(ins->object());
+ size_t slot = ins->mir()->domMemberSlotIndex();
+ AnyRegister result = ToAnyRegister(ins->getDef(0));
+ MIRType type = ins->mir()->type();
+
+ masm.loadUnboxedValue(Address(object, NativeObject::getFixedSlotOffset(slot)), type, result);
+}
+
+void
+CodeGenerator::visitSetDOMProperty(LSetDOMProperty* ins)
+{
+ const Register JSContextReg = ToRegister(ins->getJSContextReg());
+ const Register ObjectReg = ToRegister(ins->getObjectReg());
+ const Register PrivateReg = ToRegister(ins->getPrivReg());
+ const Register ValueReg = ToRegister(ins->getValueReg());
+
+ DebugOnly<uint32_t> initialStack = masm.framePushed();
+
+ masm.checkStackAlignment();
+
+ // Push the argument. Rooting will happen at GC time.
+ ValueOperand argVal = ToValue(ins, LSetDOMProperty::Value);
+ masm.Push(argVal);
+ // We pass the pointer to our out param as an instance of
+ // JSJitGetterCallArgs, since on the binary level it's the same thing.
+ JS_STATIC_ASSERT(sizeof(JSJitSetterCallArgs) == sizeof(Value*));
+ masm.moveStackPtrTo(ValueReg);
+
+ masm.Push(ObjectReg);
+
+ LoadDOMPrivate(masm, ObjectReg, PrivateReg);
+
+ // Rooting will happen at GC time.
+ masm.moveStackPtrTo(ObjectReg);
+
+ uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
+ masm.enterFakeExitFrame(IonDOMExitFrameLayoutSetterToken);
+
+ markSafepointAt(safepointOffset, ins);
+
+ masm.setupUnalignedABICall(JSContextReg);
+
+ masm.loadJSContext(JSContextReg);
+
+ masm.passABIArg(JSContextReg);
+ masm.passABIArg(ObjectReg);
+ masm.passABIArg(PrivateReg);
+ masm.passABIArg(ValueReg);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ins->mir()->fun()));
+
+ masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
+
+ masm.adjustStack(IonDOMExitFrameLayout::Size());
+
+ MOZ_ASSERT(masm.framePushed() == initialStack);
+}
+
+class OutOfLineIsCallable : public OutOfLineCodeBase<CodeGenerator>
+{
+ LIsCallable* ins_;
+
+ public:
+ explicit OutOfLineIsCallable(LIsCallable* ins)
+ : ins_(ins)
+ { }
+
+ void accept(CodeGenerator* codegen) {
+ codegen->visitOutOfLineIsCallable(this);
+ }
+ LIsCallable* ins() const {
+ return ins_;
+ }
+};
+
+void
+CodeGenerator::visitIsCallable(LIsCallable* ins)
+{
+ Register object = ToRegister(ins->object());
+ Register output = ToRegister(ins->output());
+
+ OutOfLineIsCallable* ool = new(alloc()) OutOfLineIsCallable(ins);
+ addOutOfLineCode(ool, ins->mir());
+
+ Label notFunction, hasCOps, done;
+ masm.loadObjClass(object, output);
+
+ // Just skim proxies off. Their notion of isCallable() is more complicated.
+ masm.branchTestClassIsProxy(true, output, ool->entry());
+
+ // An object is callable iff:
+ // is<JSFunction>() || (getClass()->cOps && getClass()->cOps->call).
+ masm.branchPtr(Assembler::NotEqual, output, ImmPtr(&JSFunction::class_), &notFunction);
+ masm.move32(Imm32(1), output);
+ masm.jump(&done);
+
+ masm.bind(&notFunction);
+ masm.branchPtr(Assembler::NonZero, Address(output, offsetof(js::Class, cOps)),
+ ImmPtr(nullptr), &hasCOps);
+ masm.move32(Imm32(0), output);
+ masm.jump(&done);
+
+ masm.bind(&hasCOps);
+ masm.loadPtr(Address(output, offsetof(js::Class, cOps)), output);
+ masm.cmpPtrSet(Assembler::NonZero, Address(output, offsetof(js::ClassOps, call)),
+ ImmPtr(nullptr), output);
+
+ masm.bind(&done);
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitOutOfLineIsCallable(OutOfLineIsCallable* ool)
+{
+ LIsCallable* ins = ool->ins();
+ Register object = ToRegister(ins->object());
+ Register output = ToRegister(ins->output());
+
+ saveVolatile(output);
+ masm.setupUnalignedABICall(output);
+ masm.passABIArg(object);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ObjectIsCallable));
+ masm.storeCallBoolResult(output);
+ restoreVolatile(output);
+ masm.jump(ool->rejoin());
+}
+
+class OutOfLineIsConstructor : public OutOfLineCodeBase<CodeGenerator>
+{
+ LIsConstructor* ins_;
+
+ public:
+ explicit OutOfLineIsConstructor(LIsConstructor* ins)
+ : ins_(ins)
+ { }
+
+ void accept(CodeGenerator* codegen) {
+ codegen->visitOutOfLineIsConstructor(this);
+ }
+ LIsConstructor* ins() const {
+ return ins_;
+ }
+};
+
+void
+CodeGenerator::visitIsConstructor(LIsConstructor* ins)
+{
+ Register object = ToRegister(ins->object());
+ Register output = ToRegister(ins->output());
+
+ OutOfLineIsConstructor* ool = new(alloc()) OutOfLineIsConstructor(ins);
+ addOutOfLineCode(ool, ins->mir());
+
+ Label notFunction, notConstructor, hasCOps, done;
+ masm.loadObjClass(object, output);
+
+ // Just skim proxies off. Their notion of isConstructor() is more complicated.
+ masm.branchTestClassIsProxy(true, output, ool->entry());
+
+ // An object is constructor iff
+ // ((is<JSFunction>() && as<JSFunction>().isConstructor) ||
+ // (getClass()->cOps && getClass()->cOps->construct)).
+ masm.branchPtr(Assembler::NotEqual, output, ImmPtr(&JSFunction::class_), &notFunction);
+ masm.load16ZeroExtend(Address(object, JSFunction::offsetOfFlags()), output);
+ masm.and32(Imm32(JSFunction::CONSTRUCTOR), output);
+ masm.branchTest32(Assembler::Zero, output, output, &notConstructor);
+ masm.move32(Imm32(1), output);
+ masm.jump(&done);
+ masm.bind(&notConstructor);
+ masm.move32(Imm32(0), output);
+ masm.jump(&done);
+
+ masm.bind(&notFunction);
+ masm.branchPtr(Assembler::NonZero, Address(output, offsetof(js::Class, cOps)),
+ ImmPtr(nullptr), &hasCOps);
+ masm.move32(Imm32(0), output);
+ masm.jump(&done);
+
+ masm.bind(&hasCOps);
+ masm.loadPtr(Address(output, offsetof(js::Class, cOps)), output);
+ masm.cmpPtrSet(Assembler::NonZero, Address(output, offsetof(js::ClassOps, construct)),
+ ImmPtr(nullptr), output);
+
+ masm.bind(&done);
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitOutOfLineIsConstructor(OutOfLineIsConstructor* ool)
+{
+ LIsConstructor* ins = ool->ins();
+ Register object = ToRegister(ins->object());
+ Register output = ToRegister(ins->output());
+
+ saveVolatile(output);
+ masm.setupUnalignedABICall(output);
+ masm.passABIArg(object);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ObjectIsConstructor));
+ masm.storeCallBoolResult(output);
+ restoreVolatile(output);
+ masm.jump(ool->rejoin());
+}
+
+void
+CodeGenerator::visitIsObject(LIsObject* ins)
+{
+ Register output = ToRegister(ins->output());
+ ValueOperand value = ToValue(ins, LIsObject::Input);
+ masm.testObjectSet(Assembler::Equal, value, output);
+}
+
+void
+CodeGenerator::visitIsObjectAndBranch(LIsObjectAndBranch* ins)
+{
+ ValueOperand value = ToValue(ins, LIsObjectAndBranch::Input);
+ testObjectEmitBranch(Assembler::Equal, value, ins->ifTrue(), ins->ifFalse());
+}
+
+void
+CodeGenerator::loadOutermostJSScript(Register reg)
+{
+ // The "outermost" JSScript means the script that we are compiling
+ // basically; this is not always the script associated with the
+ // current basic block, which might be an inlined script.
+
+ MIRGraph& graph = current->mir()->graph();
+ MBasicBlock* entryBlock = graph.entryBlock();
+ masm.movePtr(ImmGCPtr(entryBlock->info().script()), reg);
+}
+
+void
+CodeGenerator::loadJSScriptForBlock(MBasicBlock* block, Register reg)
+{
+ // The current JSScript means the script for the current
+ // basic block. This may be an inlined script.
+
+ JSScript* script = block->info().script();
+ masm.movePtr(ImmGCPtr(script), reg);
+}
+
+void
+CodeGenerator::visitHasClass(LHasClass* ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ Register output = ToRegister(ins->output());
+
+ masm.loadObjClass(lhs, output);
+ masm.cmpPtrSet(Assembler::Equal, output, ImmPtr(ins->mir()->getClass()), output);
+}
+
+void
+CodeGenerator::visitWasmParameter(LWasmParameter* lir)
+{
+}
+
+void
+CodeGenerator::visitWasmParameterI64(LWasmParameterI64* lir)
+{
+}
+
+void
+CodeGenerator::visitWasmReturn(LWasmReturn* lir)
+{
+ // Don't emit a jump to the return label if this is the last block.
+ if (current->mir() != *gen->graph().poBegin())
+ masm.jump(&returnLabel_);
+}
+
+void
+CodeGenerator::visitWasmReturnI64(LWasmReturnI64* lir)
+{
+ // Don't emit a jump to the return label if this is the last block.
+ if (current->mir() != *gen->graph().poBegin())
+ masm.jump(&returnLabel_);
+}
+
+void
+CodeGenerator::visitWasmReturnVoid(LWasmReturnVoid* lir)
+{
+ // Don't emit a jump to the return label if this is the last block.
+ if (current->mir() != *gen->graph().poBegin())
+ masm.jump(&returnLabel_);
+}
+
+void
+CodeGenerator::emitAssertRangeI(const Range* r, Register input)
+{
+ // Check the lower bound.
+ if (r->hasInt32LowerBound() && r->lower() > INT32_MIN) {
+ Label success;
+ masm.branch32(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()), &success);
+ masm.assumeUnreachable("Integer input should be equal or higher than Lowerbound.");
+ masm.bind(&success);
+ }
+
+ // Check the upper bound.
+ if (r->hasInt32UpperBound() && r->upper() < INT32_MAX) {
+ Label success;
+ masm.branch32(Assembler::LessThanOrEqual, input, Imm32(r->upper()), &success);
+ masm.assumeUnreachable("Integer input should be lower or equal than Upperbound.");
+ masm.bind(&success);
+ }
+
+ // For r->canHaveFractionalPart(), r->canBeNegativeZero(), and
+ // r->exponent(), there's nothing to check, because if we ended up in the
+ // integer range checking code, the value is already in an integer register
+ // in the integer range.
+}
+
+void
+CodeGenerator::emitAssertRangeD(const Range* r, FloatRegister input, FloatRegister temp)
+{
+ // Check the lower bound.
+ if (r->hasInt32LowerBound()) {
+ Label success;
+ masm.loadConstantDouble(r->lower(), temp);
+ if (r->canBeNaN())
+ masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
+ masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp, &success);
+ masm.assumeUnreachable("Double input should be equal or higher than Lowerbound.");
+ masm.bind(&success);
+ }
+ // Check the upper bound.
+ if (r->hasInt32UpperBound()) {
+ Label success;
+ masm.loadConstantDouble(r->upper(), temp);
+ if (r->canBeNaN())
+ masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
+ masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp, &success);
+ masm.assumeUnreachable("Double input should be lower or equal than Upperbound.");
+ masm.bind(&success);
+ }
+
+ // This code does not yet check r->canHaveFractionalPart(). This would require new
+ // assembler interfaces to make rounding instructions available.
+
+ if (!r->canBeNegativeZero()) {
+ Label success;
+
+ // First, test for being equal to 0.0, which also includes -0.0.
+ masm.loadConstantDouble(0.0, temp);
+ masm.branchDouble(Assembler::DoubleNotEqualOrUnordered, input, temp, &success);
+
+ // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0 is
+ // -Infinity instead of Infinity.
+ masm.loadConstantDouble(1.0, temp);
+ masm.divDouble(input, temp);
+ masm.branchDouble(Assembler::DoubleGreaterThan, temp, input, &success);
+
+ masm.assumeUnreachable("Input shouldn't be negative zero.");
+
+ masm.bind(&success);
+ }
+
+ if (!r->hasInt32Bounds() && !r->canBeInfiniteOrNaN() &&
+ r->exponent() < FloatingPoint<double>::kExponentBias)
+ {
+ // Check the bounds implied by the maximum exponent.
+ Label exponentLoOk;
+ masm.loadConstantDouble(pow(2.0, r->exponent() + 1), temp);
+ masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentLoOk);
+ masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp, &exponentLoOk);
+ masm.assumeUnreachable("Check for exponent failed.");
+ masm.bind(&exponentLoOk);
+
+ Label exponentHiOk;
+ masm.loadConstantDouble(-pow(2.0, r->exponent() + 1), temp);
+ masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentHiOk);
+ masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp, &exponentHiOk);
+ masm.assumeUnreachable("Check for exponent failed.");
+ masm.bind(&exponentHiOk);
+ } else if (!r->hasInt32Bounds() && !r->canBeNaN()) {
+ // If we think the value can't be NaN, check that it isn't.
+ Label notnan;
+ masm.branchDouble(Assembler::DoubleOrdered, input, input, &notnan);
+ masm.assumeUnreachable("Input shouldn't be NaN.");
+ masm.bind(&notnan);
+
+ // If we think the value also can't be an infinity, check that it isn't.
+ if (!r->canBeInfiniteOrNaN()) {
+ Label notposinf;
+ masm.loadConstantDouble(PositiveInfinity<double>(), temp);
+ masm.branchDouble(Assembler::DoubleLessThan, input, temp, &notposinf);
+ masm.assumeUnreachable("Input shouldn't be +Inf.");
+ masm.bind(&notposinf);
+
+ Label notneginf;
+ masm.loadConstantDouble(NegativeInfinity<double>(), temp);
+ masm.branchDouble(Assembler::DoubleGreaterThan, input, temp, &notneginf);
+ masm.assumeUnreachable("Input shouldn't be -Inf.");
+ masm.bind(&notneginf);
+ }
+ }
+}
+
+void
+CodeGenerator::visitAssertResultV(LAssertResultV* ins)
+{
+ const ValueOperand value = ToValue(ins, LAssertResultV::Input);
+ emitAssertResultV(value, ins->mirRaw()->resultTypeSet());
+}
+
+void
+CodeGenerator::visitAssertResultT(LAssertResultT* ins)
+{
+ Register input = ToRegister(ins->input());
+ MDefinition* mir = ins->mirRaw();
+
+ emitAssertObjectOrStringResult(input, mir->type(), mir->resultTypeSet());
+}
+
+void
+CodeGenerator::visitAssertRangeI(LAssertRangeI* ins)
+{
+ Register input = ToRegister(ins->input());
+ const Range* r = ins->range();
+
+ emitAssertRangeI(r, input);
+}
+
+void
+CodeGenerator::visitAssertRangeD(LAssertRangeD* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister temp = ToFloatRegister(ins->temp());
+ const Range* r = ins->range();
+
+ emitAssertRangeD(r, input, temp);
+}
+
+void
+CodeGenerator::visitAssertRangeF(LAssertRangeF* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister temp = ToFloatRegister(ins->temp());
+ FloatRegister temp2 = ToFloatRegister(ins->temp2());
+
+ const Range* r = ins->range();
+
+ masm.convertFloat32ToDouble(input, temp);
+ emitAssertRangeD(r, temp, temp2);
+}
+
+void
+CodeGenerator::visitAssertRangeV(LAssertRangeV* ins)
+{
+ const Range* r = ins->range();
+ const ValueOperand value = ToValue(ins, LAssertRangeV::Input);
+ Register tag = masm.splitTagForTest(value);
+ Label done;
+
+ {
+ Label isNotInt32;
+ masm.branchTestInt32(Assembler::NotEqual, tag, &isNotInt32);
+ Register unboxInt32 = ToTempUnboxRegister(ins->temp());
+ Register input = masm.extractInt32(value, unboxInt32);
+ emitAssertRangeI(r, input);
+ masm.jump(&done);
+ masm.bind(&isNotInt32);
+ }
+
+ {
+ Label isNotDouble;
+ masm.branchTestDouble(Assembler::NotEqual, tag, &isNotDouble);
+ FloatRegister input = ToFloatRegister(ins->floatTemp1());
+ FloatRegister temp = ToFloatRegister(ins->floatTemp2());
+ masm.unboxDouble(value, input);
+ emitAssertRangeD(r, input, temp);
+ masm.jump(&done);
+ masm.bind(&isNotDouble);
+ }
+
+ masm.assumeUnreachable("Incorrect range for Value.");
+ masm.bind(&done);
+}
+
+void
+CodeGenerator::visitInterruptCheck(LInterruptCheck* lir)
+{
+ if (lir->implicit()) {
+ OutOfLineInterruptCheckImplicit* ool = new(alloc()) OutOfLineInterruptCheckImplicit(current, lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ lir->setOolEntry(ool->entry());
+ masm.bind(ool->rejoin());
+ return;
+ }
+
+ OutOfLineCode* ool = oolCallVM(InterruptCheckInfo, lir, ArgList(), StoreNothing());
+
+ AbsoluteAddress interruptAddr(GetJitContext()->runtime->addressOfInterruptUint32());
+ masm.branch32(Assembler::NotEqual, interruptAddr, Imm32(0), ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGenerator::visitWasmTrap(LWasmTrap* lir)
+{
+ MOZ_ASSERT(gen->compilingWasm());
+ const MWasmTrap* mir = lir->mir();
+
+ masm.jump(trap(mir, mir->trap()));
+}
+
+void
+CodeGenerator::visitWasmBoundsCheck(LWasmBoundsCheck* ins)
+{
+ const MWasmBoundsCheck* mir = ins->mir();
+ Register ptr = ToRegister(ins->ptr());
+ masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr, trap(mir, wasm::Trap::OutOfBounds));
+}
+
+typedef bool (*RecompileFn)(JSContext*);
+static const VMFunction RecompileFnInfo = FunctionInfo<RecompileFn>(Recompile, "Recompile");
+
+typedef bool (*ForcedRecompileFn)(JSContext*);
+static const VMFunction ForcedRecompileFnInfo =
+ FunctionInfo<ForcedRecompileFn>(ForcedRecompile, "ForcedRecompile");
+
+void
+CodeGenerator::visitRecompileCheck(LRecompileCheck* ins)
+{
+ Label done;
+ Register tmp = ToRegister(ins->scratch());
+ OutOfLineCode* ool;
+ if (ins->mir()->forceRecompilation())
+ ool = oolCallVM(ForcedRecompileFnInfo, ins, ArgList(), StoreRegisterTo(tmp));
+ else
+ ool = oolCallVM(RecompileFnInfo, ins, ArgList(), StoreRegisterTo(tmp));
+
+ // Check if warm-up counter is high enough.
+ AbsoluteAddress warmUpCount = AbsoluteAddress(ins->mir()->script()->addressOfWarmUpCounter());
+ if (ins->mir()->increaseWarmUpCounter()) {
+ masm.load32(warmUpCount, tmp);
+ masm.add32(Imm32(1), tmp);
+ masm.store32(tmp, warmUpCount);
+ masm.branch32(Assembler::BelowOrEqual, tmp, Imm32(ins->mir()->recompileThreshold()), &done);
+ } else {
+ masm.branch32(Assembler::BelowOrEqual, warmUpCount, Imm32(ins->mir()->recompileThreshold()),
+ &done);
+ }
+
+ // Check if not yet recompiling.
+ CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), tmp);
+ masm.propagateOOM(ionScriptLabels_.append(label));
+ masm.branch32(Assembler::Equal,
+ Address(tmp, IonScript::offsetOfRecompiling()),
+ Imm32(0),
+ ool->entry());
+ masm.bind(ool->rejoin());
+ masm.bind(&done);
+}
+
+void
+CodeGenerator::visitLexicalCheck(LLexicalCheck* ins)
+{
+ ValueOperand inputValue = ToValue(ins, LLexicalCheck::Input);
+ Label bail;
+ masm.branchTestMagicValue(Assembler::Equal, inputValue, JS_UNINITIALIZED_LEXICAL, &bail);
+ bailoutFrom(&bail, ins->snapshot());
+}
+
+typedef bool (*ThrowRuntimeLexicalErrorFn)(JSContext*, unsigned);
+static const VMFunction ThrowRuntimeLexicalErrorInfo =
+ FunctionInfo<ThrowRuntimeLexicalErrorFn>(ThrowRuntimeLexicalError, "ThrowRuntimeLexicalError");
+
+void
+CodeGenerator::visitThrowRuntimeLexicalError(LThrowRuntimeLexicalError* ins)
+{
+ pushArg(Imm32(ins->mir()->errorNumber()));
+ callVM(ThrowRuntimeLexicalErrorInfo, ins);
+}
+
+typedef bool (*GlobalNameConflictsCheckFromIonFn)(JSContext*, HandleScript);
+static const VMFunction GlobalNameConflictsCheckFromIonInfo =
+ FunctionInfo<GlobalNameConflictsCheckFromIonFn>(GlobalNameConflictsCheckFromIon,
+ "GlobalNameConflictsCheckFromIon");
+
+void
+CodeGenerator::visitGlobalNameConflictsCheck(LGlobalNameConflictsCheck* ins)
+{
+ pushArg(ImmGCPtr(ins->mirRaw()->block()->info().script()));
+ callVM(GlobalNameConflictsCheckFromIonInfo, ins);
+}
+
+void
+CodeGenerator::visitDebugger(LDebugger* ins)
+{
+ Register cx = ToRegister(ins->getTemp(0));
+ Register temp = ToRegister(ins->getTemp(1));
+
+ masm.loadJSContext(cx);
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(cx);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, GlobalHasLiveOnDebuggerStatement));
+
+ Label bail;
+ masm.branchIfTrueBool(ReturnReg, &bail);
+ bailoutFrom(&bail, ins->snapshot());
+}
+
+void
+CodeGenerator::visitNewTarget(LNewTarget *ins)
+{
+ ValueOperand output = GetValueOutput(ins);
+
+ // if (isConstructing) output = argv[Max(numActualArgs, numFormalArgs)]
+ Label notConstructing, done;
+ Address calleeToken(masm.getStackPointer(), frameSize() + JitFrameLayout::offsetOfCalleeToken());
+ masm.branchTestPtr(Assembler::Zero, calleeToken,
+ Imm32(CalleeToken_FunctionConstructing), &notConstructing);
+
+ Register argvLen = output.scratchReg();
+
+ Address actualArgsPtr(masm.getStackPointer(), frameSize() + JitFrameLayout::offsetOfNumActualArgs());
+ masm.loadPtr(actualArgsPtr, argvLen);
+
+ Label useNFormals;
+
+ size_t numFormalArgs = ins->mirRaw()->block()->info().funMaybeLazy()->nargs();
+ masm.branchPtr(Assembler::Below, argvLen, Imm32(numFormalArgs),
+ &useNFormals);
+
+ size_t argsOffset = frameSize() + JitFrameLayout::offsetOfActualArgs();
+ {
+ BaseValueIndex newTarget(masm.getStackPointer(), argvLen, argsOffset);
+ masm.loadValue(newTarget, output);
+ masm.jump(&done);
+ }
+
+ masm.bind(&useNFormals);
+
+ {
+ Address newTarget(masm.getStackPointer(), argsOffset + (numFormalArgs * sizeof(Value)));
+ masm.loadValue(newTarget, output);
+ masm.jump(&done);
+ }
+
+ // else output = undefined
+ masm.bind(&notConstructing);
+ masm.moveValue(UndefinedValue(), output);
+ masm.bind(&done);
+}
+
+void
+CodeGenerator::visitCheckReturn(LCheckReturn* ins)
+{
+ ValueOperand returnValue = ToValue(ins, LCheckReturn::ReturnValue);
+ ValueOperand thisValue = ToValue(ins, LCheckReturn::ThisValue);
+ Label bail, noChecks;
+ masm.branchTestObject(Assembler::Equal, returnValue, &noChecks);
+ masm.branchTestUndefined(Assembler::NotEqual, returnValue, &bail);
+ masm.branchTestMagicValue(Assembler::Equal, thisValue, JS_UNINITIALIZED_LEXICAL, &bail);
+ bailoutFrom(&bail, ins->snapshot());
+ masm.bind(&noChecks);
+}
+
+typedef bool (*ThrowCheckIsObjectFn)(JSContext*, CheckIsObjectKind);
+static const VMFunction ThrowCheckIsObjectInfo =
+ FunctionInfo<ThrowCheckIsObjectFn>(ThrowCheckIsObject, "ThrowCheckIsObject");
+
+void
+CodeGenerator::visitCheckIsObj(LCheckIsObj* ins)
+{
+ ValueOperand checkValue = ToValue(ins, LCheckIsObj::CheckValue);
+
+ OutOfLineCode* ool = oolCallVM(ThrowCheckIsObjectInfo, ins,
+ ArgList(Imm32(ins->mir()->checkKind())),
+ StoreNothing());
+ masm.branchTestObject(Assembler::NotEqual, checkValue, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+typedef bool (*ThrowObjCoercibleFn)(JSContext*, HandleValue);
+static const VMFunction ThrowObjectCoercibleInfo =
+ FunctionInfo<ThrowObjCoercibleFn>(ThrowObjectCoercible, "ThrowObjectCoercible");
+
+void
+CodeGenerator::visitCheckObjCoercible(LCheckObjCoercible* ins)
+{
+ ValueOperand checkValue = ToValue(ins, LCheckObjCoercible::CheckValue);
+ Label fail, done;
+ masm.branchTestNull(Assembler::Equal, checkValue, &fail);
+ masm.branchTestUndefined(Assembler::NotEqual, checkValue, &done);
+ masm.bind(&fail);
+ pushArg(checkValue);
+ callVM(ThrowObjectCoercibleInfo, ins);
+ masm.bind(&done);
+}
+
+typedef bool (*CheckSelfHostedFn)(JSContext*, HandleValue);
+static const VMFunction CheckSelfHostedInfo =
+ FunctionInfo<CheckSelfHostedFn>(js::Debug_CheckSelfHosted, "Debug_CheckSelfHosted");
+
+void
+CodeGenerator::visitDebugCheckSelfHosted(LDebugCheckSelfHosted* ins)
+{
+ ValueOperand checkValue = ToValue(ins, LDebugCheckSelfHosted::CheckValue);
+ pushArg(checkValue);
+ callVM(CheckSelfHostedInfo, ins);
+}
+
+void
+CodeGenerator::visitRandom(LRandom* ins)
+{
+ using mozilla::non_crypto::XorShift128PlusRNG;
+
+ FloatRegister output = ToFloatRegister(ins->output());
+ Register tempReg = ToRegister(ins->temp0());
+
+#ifdef JS_PUNBOX64
+ Register64 s0Reg(ToRegister(ins->temp1()));
+ Register64 s1Reg(ToRegister(ins->temp2()));
+#else
+ Register64 s0Reg(ToRegister(ins->temp1()), ToRegister(ins->temp2()));
+ Register64 s1Reg(ToRegister(ins->temp3()), ToRegister(ins->temp4()));
+#endif
+
+ const void* rng = gen->compartment->addressOfRandomNumberGenerator();
+ masm.movePtr(ImmPtr(rng), tempReg);
+
+ static_assert(sizeof(XorShift128PlusRNG) == 2 * sizeof(uint64_t),
+ "Code below assumes XorShift128PlusRNG contains two uint64_t values");
+
+ Address state0Addr(tempReg, XorShift128PlusRNG::offsetOfState0());
+ Address state1Addr(tempReg, XorShift128PlusRNG::offsetOfState1());
+
+ // uint64_t s1 = mState[0];
+ masm.load64(state0Addr, s1Reg);
+
+ // s1 ^= s1 << 23;
+ masm.move64(s1Reg, s0Reg);
+ masm.lshift64(Imm32(23), s1Reg);
+ masm.xor64(s0Reg, s1Reg);
+
+ // s1 ^= s1 >> 17
+ masm.move64(s1Reg, s0Reg);
+ masm.rshift64(Imm32(17), s1Reg);
+ masm.xor64(s0Reg, s1Reg);
+
+ // const uint64_t s0 = mState[1];
+ masm.load64(state1Addr, s0Reg);
+
+ // mState[0] = s0;
+ masm.store64(s0Reg, state0Addr);
+
+ // s1 ^= s0
+ masm.xor64(s0Reg, s1Reg);
+
+ // s1 ^= s0 >> 26
+ masm.rshift64(Imm32(26), s0Reg);
+ masm.xor64(s0Reg, s1Reg);
+
+ // mState[1] = s1
+ masm.store64(s1Reg, state1Addr);
+
+ // s1 += mState[0]
+ masm.load64(state0Addr, s0Reg);
+ masm.add64(s0Reg, s1Reg);
+
+ // See comment in XorShift128PlusRNG::nextDouble().
+ static const int MantissaBits = FloatingPoint<double>::kExponentShift + 1;
+ static const double ScaleInv = double(1) / (1ULL << MantissaBits);
+
+ masm.and64(Imm64((1ULL << MantissaBits) - 1), s1Reg);
+
+ if (masm.convertUInt64ToDoubleNeedsTemp())
+ masm.convertUInt64ToDouble(s1Reg, output, tempReg);
+ else
+ masm.convertUInt64ToDouble(s1Reg, output, Register::Invalid());
+
+ // output *= ScaleInv
+ masm.mulDoublePtr(ImmPtr(&ScaleInv), tempReg, output);
+}
+
+void
+CodeGenerator::visitSignExtend(LSignExtend* ins)
+{
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ switch (ins->mode()) {
+ case MSignExtend::Byte:
+ masm.move8SignExtend(input, output);
+ break;
+ case MSignExtend::Half:
+ masm.move16SignExtend(input, output);
+ break;
+ }
+}
+
+void
+CodeGenerator::visitRotate(LRotate* ins)
+{
+ MRotate* mir = ins->mir();
+ Register input = ToRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+
+ const LAllocation* count = ins->count();
+ if (count->isConstant()) {
+ int32_t c = ToInt32(count) & 0x1F;
+ if (mir->isLeftRotate())
+ masm.rotateLeft(Imm32(c), input, dest);
+ else
+ masm.rotateRight(Imm32(c), input, dest);
+ } else {
+ Register creg = ToRegister(count);
+ if (mir->isLeftRotate())
+ masm.rotateLeft(creg, input, dest);
+ else
+ masm.rotateRight(creg, input, dest);
+ }
+}
+
+class OutOfLineNaNToZero : public OutOfLineCodeBase<CodeGenerator>
+{
+ LNaNToZero* lir_;
+
+ public:
+ explicit OutOfLineNaNToZero(LNaNToZero* lir)
+ : lir_(lir)
+ {}
+
+ void accept(CodeGenerator* codegen) {
+ codegen->visitOutOfLineNaNToZero(this);
+ }
+ LNaNToZero* lir() const {
+ return lir_;
+ }
+};
+
+void
+CodeGenerator::visitOutOfLineNaNToZero(OutOfLineNaNToZero* ool)
+{
+ FloatRegister output = ToFloatRegister(ool->lir()->output());
+ masm.loadConstantDouble(0.0, output);
+ masm.jump(ool->rejoin());
+}
+
+void
+CodeGenerator::visitNaNToZero(LNaNToZero* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+
+ OutOfLineNaNToZero* ool = new(alloc()) OutOfLineNaNToZero(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ if (lir->mir()->operandIsNeverNegativeZero()){
+ masm.branchDouble(Assembler::DoubleUnordered, input, input, ool->entry());
+ } else {
+ FloatRegister scratch = ToFloatRegister(lir->tempDouble());
+ masm.loadConstantDouble(0.0, scratch);
+ masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, scratch, ool->entry());
+ }
+ masm.bind(ool->rejoin());
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/CodeGenerator.h b/js/src/jit/CodeGenerator.h
new file mode 100644
index 000000000..8f4bcc813
--- /dev/null
+++ b/js/src/jit/CodeGenerator.h
@@ -0,0 +1,593 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_CodeGenerator_h
+#define jit_CodeGenerator_h
+
+#include "jit/IonCaches.h"
+#if defined(JS_ION_PERF)
+# include "jit/PerfSpewer.h"
+#endif
+
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/CodeGenerator-x86.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/CodeGenerator-x64.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/CodeGenerator-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/CodeGenerator-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/CodeGenerator-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/CodeGenerator-mips64.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/CodeGenerator-none.h"
+#else
+#error "Unknown architecture!"
+#endif
+
+namespace js {
+namespace jit {
+
+class OutOfLineTestObject;
+class OutOfLineNewArray;
+class OutOfLineNewObject;
+class CheckOverRecursedFailure;
+class OutOfLineInterruptCheckImplicit;
+class OutOfLineUnboxFloatingPoint;
+class OutOfLineStoreElementHole;
+class OutOfLineTypeOfV;
+class OutOfLineUpdateCache;
+class OutOfLineCallPostWriteBarrier;
+class OutOfLineCallPostWriteElementBarrier;
+class OutOfLineIsCallable;
+class OutOfLineIsConstructor;
+class OutOfLineRegExpMatcher;
+class OutOfLineRegExpSearcher;
+class OutOfLineRegExpTester;
+class OutOfLineRegExpPrototypeOptimizable;
+class OutOfLineRegExpInstanceOptimizable;
+class OutOfLineLambdaArrow;
+class OutOfLineNaNToZero;
+
+class CodeGenerator final : public CodeGeneratorSpecific
+{
+ void generateArgumentsChecks(bool bailout = true);
+ MOZ_MUST_USE bool generateBody();
+
+ ConstantOrRegister toConstantOrRegister(LInstruction* lir, size_t n, MIRType type);
+
+ public:
+ CodeGenerator(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm = nullptr);
+ ~CodeGenerator();
+
+ public:
+ MOZ_MUST_USE bool generate();
+ MOZ_MUST_USE bool generateWasm(wasm::SigIdDesc sigId, wasm::TrapOffset trapOffset,
+ wasm::FuncOffsets *offsets);
+ MOZ_MUST_USE bool link(JSContext* cx, CompilerConstraintList* constraints);
+ MOZ_MUST_USE bool linkSharedStubs(JSContext* cx);
+
+ void visitOsiPoint(LOsiPoint* lir);
+ void visitGoto(LGoto* lir);
+ void visitTableSwitch(LTableSwitch* ins);
+ void visitTableSwitchV(LTableSwitchV* ins);
+ void visitCloneLiteral(LCloneLiteral* lir);
+ void visitParameter(LParameter* lir);
+ void visitCallee(LCallee* lir);
+ void visitIsConstructing(LIsConstructing* lir);
+ void visitStart(LStart* lir);
+ void visitReturn(LReturn* ret);
+ void visitDefVar(LDefVar* lir);
+ void visitDefLexical(LDefLexical* lir);
+ void visitDefFun(LDefFun* lir);
+ void visitOsrEntry(LOsrEntry* lir);
+ void visitOsrEnvironmentChain(LOsrEnvironmentChain* lir);
+ void visitOsrValue(LOsrValue* lir);
+ void visitOsrReturnValue(LOsrReturnValue* lir);
+ void visitOsrArgumentsObject(LOsrArgumentsObject* lir);
+ void visitStackArgT(LStackArgT* lir);
+ void visitStackArgV(LStackArgV* lir);
+ void visitMoveGroup(LMoveGroup* group);
+ void visitValueToInt32(LValueToInt32* lir);
+ void visitValueToDouble(LValueToDouble* lir);
+ void visitValueToFloat32(LValueToFloat32* lir);
+ void visitFloat32ToDouble(LFloat32ToDouble* lir);
+ void visitDoubleToFloat32(LDoubleToFloat32* lir);
+ void visitInt32ToFloat32(LInt32ToFloat32* lir);
+ void visitInt32ToDouble(LInt32ToDouble* lir);
+ void emitOOLTestObject(Register objreg, Label* ifTruthy, Label* ifFalsy, Register scratch);
+ void visitTestOAndBranch(LTestOAndBranch* lir);
+ void visitTestVAndBranch(LTestVAndBranch* lir);
+ void visitFunctionDispatch(LFunctionDispatch* lir);
+ void visitObjectGroupDispatch(LObjectGroupDispatch* lir);
+ void visitBooleanToString(LBooleanToString* lir);
+ void emitIntToString(Register input, Register output, Label* ool);
+ void visitIntToString(LIntToString* lir);
+ void visitDoubleToString(LDoubleToString* lir);
+ void visitValueToString(LValueToString* lir);
+ void visitValueToObjectOrNull(LValueToObjectOrNull* lir);
+ void visitInteger(LInteger* lir);
+ void visitInteger64(LInteger64* lir);
+ void visitRegExp(LRegExp* lir);
+ void visitRegExpMatcher(LRegExpMatcher* lir);
+ void visitOutOfLineRegExpMatcher(OutOfLineRegExpMatcher* ool);
+ void visitRegExpSearcher(LRegExpSearcher* lir);
+ void visitOutOfLineRegExpSearcher(OutOfLineRegExpSearcher* ool);
+ void visitRegExpTester(LRegExpTester* lir);
+ void visitOutOfLineRegExpTester(OutOfLineRegExpTester* ool);
+ void visitRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable* lir);
+ void visitOutOfLineRegExpPrototypeOptimizable(OutOfLineRegExpPrototypeOptimizable* ool);
+ void visitRegExpInstanceOptimizable(LRegExpInstanceOptimizable* lir);
+ void visitOutOfLineRegExpInstanceOptimizable(OutOfLineRegExpInstanceOptimizable* ool);
+ void visitGetFirstDollarIndex(LGetFirstDollarIndex* lir);
+ void visitStringReplace(LStringReplace* lir);
+ void emitSharedStub(ICStub::Kind kind, LInstruction* lir);
+ void visitBinarySharedStub(LBinarySharedStub* lir);
+ void visitUnarySharedStub(LUnarySharedStub* lir);
+ void visitNullarySharedStub(LNullarySharedStub* lir);
+ void visitLambda(LLambda* lir);
+ void visitOutOfLineLambdaArrow(OutOfLineLambdaArrow* ool);
+ void visitLambdaArrow(LLambdaArrow* lir);
+ void visitLambdaForSingleton(LLambdaForSingleton* lir);
+ void visitPointer(LPointer* lir);
+ void visitKeepAliveObject(LKeepAliveObject* lir);
+ void visitSlots(LSlots* lir);
+ void visitLoadSlotT(LLoadSlotT* lir);
+ void visitLoadSlotV(LLoadSlotV* lir);
+ void visitStoreSlotT(LStoreSlotT* lir);
+ void visitStoreSlotV(LStoreSlotV* lir);
+ void visitElements(LElements* lir);
+ void visitConvertElementsToDoubles(LConvertElementsToDoubles* lir);
+ void visitMaybeToDoubleElement(LMaybeToDoubleElement* lir);
+ void visitMaybeCopyElementsForWrite(LMaybeCopyElementsForWrite* lir);
+ void visitGuardObjectIdentity(LGuardObjectIdentity* guard);
+ void visitGuardReceiverPolymorphic(LGuardReceiverPolymorphic* lir);
+ void visitGuardUnboxedExpando(LGuardUnboxedExpando* lir);
+ void visitLoadUnboxedExpando(LLoadUnboxedExpando* lir);
+ void visitTypeBarrierV(LTypeBarrierV* lir);
+ void visitTypeBarrierO(LTypeBarrierO* lir);
+ void visitMonitorTypes(LMonitorTypes* lir);
+ void emitPostWriteBarrier(const LAllocation* obj);
+ void emitPostWriteBarrier(Register objreg);
+ template <class LPostBarrierType>
+ void visitPostWriteBarrierCommonO(LPostBarrierType* lir, OutOfLineCode* ool);
+ template <class LPostBarrierType>
+ void visitPostWriteBarrierCommonV(LPostBarrierType* lir, OutOfLineCode* ool);
+ void visitPostWriteBarrierO(LPostWriteBarrierO* lir);
+ void visitPostWriteElementBarrierO(LPostWriteElementBarrierO* lir);
+ void visitPostWriteBarrierV(LPostWriteBarrierV* lir);
+ void visitPostWriteElementBarrierV(LPostWriteElementBarrierV* lir);
+ void visitOutOfLineCallPostWriteBarrier(OutOfLineCallPostWriteBarrier* ool);
+ void visitOutOfLineCallPostWriteElementBarrier(OutOfLineCallPostWriteElementBarrier* ool);
+ void visitCallNative(LCallNative* call);
+ void emitCallInvokeFunction(LInstruction* call, Register callereg,
+ bool isConstructing, uint32_t argc,
+ uint32_t unusedStack);
+ void visitCallGeneric(LCallGeneric* call);
+ void emitCallInvokeFunctionShuffleNewTarget(LCallKnown *call,
+ Register calleeReg,
+ uint32_t numFormals,
+ uint32_t unusedStack);
+ void visitCallKnown(LCallKnown* call);
+ template<typename T> void emitApplyGeneric(T* apply);
+ template<typename T> void emitCallInvokeFunction(T* apply, Register extraStackSize);
+ void emitAllocateSpaceForApply(Register argcreg, Register extraStackSpace, Label* end);
+ void emitCopyValuesForApply(Register argvSrcBase, Register argvIndex, Register copyreg,
+ size_t argvSrcOffset, size_t argvDstOffset);
+ void emitPopArguments(Register extraStackSize);
+ void emitPushArguments(LApplyArgsGeneric* apply, Register extraStackSpace);
+ void visitApplyArgsGeneric(LApplyArgsGeneric* apply);
+ void emitPushArguments(LApplyArrayGeneric* apply, Register extraStackSpace);
+ void visitApplyArrayGeneric(LApplyArrayGeneric* apply);
+ void visitBail(LBail* lir);
+ void visitUnreachable(LUnreachable* unreachable);
+ void visitEncodeSnapshot(LEncodeSnapshot* lir);
+ void visitGetDynamicName(LGetDynamicName* lir);
+ void visitCallDirectEval(LCallDirectEval* lir);
+ void visitDoubleToInt32(LDoubleToInt32* lir);
+ void visitFloat32ToInt32(LFloat32ToInt32* lir);
+ void visitNewArrayCallVM(LNewArray* lir);
+ void visitNewArray(LNewArray* lir);
+ void visitOutOfLineNewArray(OutOfLineNewArray* ool);
+ void visitNewArrayCopyOnWrite(LNewArrayCopyOnWrite* lir);
+ void visitNewArrayDynamicLength(LNewArrayDynamicLength* lir);
+ void visitNewTypedArray(LNewTypedArray* lir);
+ void visitNewTypedArrayDynamicLength(LNewTypedArrayDynamicLength* lir);
+ void visitNewObjectVMCall(LNewObject* lir);
+ void visitNewObject(LNewObject* lir);
+ void visitOutOfLineNewObject(OutOfLineNewObject* ool);
+ void visitNewTypedObject(LNewTypedObject* lir);
+ void visitSimdBox(LSimdBox* lir);
+ void visitSimdUnbox(LSimdUnbox* lir);
+ void visitNewNamedLambdaObject(LNewNamedLambdaObject* lir);
+ void visitNewCallObject(LNewCallObject* lir);
+ void visitNewSingletonCallObject(LNewSingletonCallObject* lir);
+ void visitNewStringObject(LNewStringObject* lir);
+ void visitNewDerivedTypedObject(LNewDerivedTypedObject* lir);
+ void visitInitElem(LInitElem* lir);
+ void visitInitElemGetterSetter(LInitElemGetterSetter* lir);
+ void visitMutateProto(LMutateProto* lir);
+ void visitInitProp(LInitProp* lir);
+ void visitInitPropGetterSetter(LInitPropGetterSetter* lir);
+ void visitCreateThis(LCreateThis* lir);
+ void visitCreateThisWithProto(LCreateThisWithProto* lir);
+ void visitCreateThisWithTemplate(LCreateThisWithTemplate* lir);
+ void visitCreateArgumentsObject(LCreateArgumentsObject* lir);
+ void visitGetArgumentsObjectArg(LGetArgumentsObjectArg* lir);
+ void visitSetArgumentsObjectArg(LSetArgumentsObjectArg* lir);
+ void visitReturnFromCtor(LReturnFromCtor* lir);
+ void visitComputeThis(LComputeThis* lir);
+ void visitArrayLength(LArrayLength* lir);
+ void visitSetArrayLength(LSetArrayLength* lir);
+ void visitGetNextEntryForIterator(LGetNextEntryForIterator* lir);
+ void visitTypedArrayLength(LTypedArrayLength* lir);
+ void visitTypedArrayElements(LTypedArrayElements* lir);
+ void visitSetDisjointTypedElements(LSetDisjointTypedElements* lir);
+ void visitTypedObjectElements(LTypedObjectElements* lir);
+ void visitSetTypedObjectOffset(LSetTypedObjectOffset* lir);
+ void visitTypedObjectDescr(LTypedObjectDescr* ins);
+ void visitStringLength(LStringLength* lir);
+ void visitSubstr(LSubstr* lir);
+ void visitInitializedLength(LInitializedLength* lir);
+ void visitSetInitializedLength(LSetInitializedLength* lir);
+ void visitUnboxedArrayLength(LUnboxedArrayLength* lir);
+ void visitUnboxedArrayInitializedLength(LUnboxedArrayInitializedLength* lir);
+ void visitIncrementUnboxedArrayInitializedLength(LIncrementUnboxedArrayInitializedLength* lir);
+ void visitSetUnboxedArrayInitializedLength(LSetUnboxedArrayInitializedLength* lir);
+ void visitNotO(LNotO* ins);
+ void visitNotV(LNotV* ins);
+ void visitBoundsCheck(LBoundsCheck* lir);
+ void visitBoundsCheckRange(LBoundsCheckRange* lir);
+ void visitBoundsCheckLower(LBoundsCheckLower* lir);
+ void visitLoadFixedSlotV(LLoadFixedSlotV* ins);
+ void visitLoadFixedSlotAndUnbox(LLoadFixedSlotAndUnbox* lir);
+ void visitLoadFixedSlotT(LLoadFixedSlotT* ins);
+ void visitStoreFixedSlotV(LStoreFixedSlotV* ins);
+ void visitStoreFixedSlotT(LStoreFixedSlotT* ins);
+ void emitGetPropertyPolymorphic(LInstruction* lir, Register obj,
+ Register scratch, const TypedOrValueRegister& output);
+ void visitGetPropertyPolymorphicV(LGetPropertyPolymorphicV* ins);
+ void visitGetPropertyPolymorphicT(LGetPropertyPolymorphicT* ins);
+ void emitSetPropertyPolymorphic(LInstruction* lir, Register obj,
+ Register scratch, const ConstantOrRegister& value);
+ void visitSetPropertyPolymorphicV(LSetPropertyPolymorphicV* ins);
+ void visitArraySplice(LArraySplice* splice);
+ void visitSetPropertyPolymorphicT(LSetPropertyPolymorphicT* ins);
+ void visitAbsI(LAbsI* lir);
+ void visitAtan2D(LAtan2D* lir);
+ void visitHypot(LHypot* lir);
+ void visitPowI(LPowI* lir);
+ void visitPowD(LPowD* lir);
+ void visitMathFunctionD(LMathFunctionD* ins);
+ void visitMathFunctionF(LMathFunctionF* ins);
+ void visitModD(LModD* ins);
+ void visitMinMaxI(LMinMaxI* lir);
+ void visitBinaryV(LBinaryV* lir);
+ void emitCompareS(LInstruction* lir, JSOp op, Register left, Register right, Register output);
+ void visitCompareS(LCompareS* lir);
+ void visitCompareStrictS(LCompareStrictS* lir);
+ void visitCompareVM(LCompareVM* lir);
+ void visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV* lir);
+ void visitIsNullOrLikeUndefinedT(LIsNullOrLikeUndefinedT* lir);
+ void visitIsNullOrLikeUndefinedAndBranchV(LIsNullOrLikeUndefinedAndBranchV* lir);
+ void visitIsNullOrLikeUndefinedAndBranchT(LIsNullOrLikeUndefinedAndBranchT* lir);
+ void emitConcat(LInstruction* lir, Register lhs, Register rhs, Register output);
+ void visitConcat(LConcat* lir);
+ void visitCharCodeAt(LCharCodeAt* lir);
+ void visitFromCharCode(LFromCharCode* lir);
+ void visitFromCodePoint(LFromCodePoint* lir);
+ void visitSinCos(LSinCos *lir);
+ void visitStringSplit(LStringSplit* lir);
+ void visitFunctionEnvironment(LFunctionEnvironment* lir);
+ void visitCallGetProperty(LCallGetProperty* lir);
+ void visitCallGetElement(LCallGetElement* lir);
+ void visitCallSetElement(LCallSetElement* lir);
+ void visitCallInitElementArray(LCallInitElementArray* lir);
+ void visitThrow(LThrow* lir);
+ void visitTypeOfV(LTypeOfV* lir);
+ void visitOutOfLineTypeOfV(OutOfLineTypeOfV* ool);
+ void visitToAsync(LToAsync* lir);
+ void visitToIdV(LToIdV* lir);
+ template<typename T> void emitLoadElementT(LLoadElementT* lir, const T& source);
+ void visitLoadElementT(LLoadElementT* lir);
+ void visitLoadElementV(LLoadElementV* load);
+ void visitLoadElementHole(LLoadElementHole* lir);
+ void visitLoadUnboxedPointerV(LLoadUnboxedPointerV* lir);
+ void visitLoadUnboxedPointerT(LLoadUnboxedPointerT* lir);
+ void visitUnboxObjectOrNull(LUnboxObjectOrNull* lir);
+ void visitStoreElementT(LStoreElementT* lir);
+ void visitStoreElementV(LStoreElementV* lir);
+ template <typename T> void emitStoreElementHoleT(T* lir);
+ template <typename T> void emitStoreElementHoleV(T* lir);
+ void visitStoreElementHoleT(LStoreElementHoleT* lir);
+ void visitStoreElementHoleV(LStoreElementHoleV* lir);
+ void visitFallibleStoreElementV(LFallibleStoreElementV* lir);
+ void visitFallibleStoreElementT(LFallibleStoreElementT* lir);
+ void visitStoreUnboxedPointer(LStoreUnboxedPointer* lir);
+ void visitConvertUnboxedObjectToNative(LConvertUnboxedObjectToNative* lir);
+ void emitArrayPopShift(LInstruction* lir, const MArrayPopShift* mir, Register obj,
+ Register elementsTemp, Register lengthTemp, TypedOrValueRegister out);
+ void visitArrayPopShiftV(LArrayPopShiftV* lir);
+ void visitArrayPopShiftT(LArrayPopShiftT* lir);
+ void emitArrayPush(LInstruction* lir, const MArrayPush* mir, Register obj,
+ const ConstantOrRegister& value, Register elementsTemp, Register length);
+ void visitArrayPushV(LArrayPushV* lir);
+ void visitArrayPushT(LArrayPushT* lir);
+ void visitArraySlice(LArraySlice* lir);
+ void visitArrayJoin(LArrayJoin* lir);
+ void visitLoadUnboxedScalar(LLoadUnboxedScalar* lir);
+ void visitLoadTypedArrayElementHole(LLoadTypedArrayElementHole* lir);
+ void visitStoreUnboxedScalar(LStoreUnboxedScalar* lir);
+ void visitStoreTypedArrayElementHole(LStoreTypedArrayElementHole* lir);
+ void visitAtomicIsLockFree(LAtomicIsLockFree* lir);
+ void visitGuardSharedTypedArray(LGuardSharedTypedArray* lir);
+ void visitClampIToUint8(LClampIToUint8* lir);
+ void visitClampDToUint8(LClampDToUint8* lir);
+ void visitClampVToUint8(LClampVToUint8* lir);
+ void visitCallIteratorStartV(LCallIteratorStartV* lir);
+ void visitCallIteratorStartO(LCallIteratorStartO* lir);
+ void visitIteratorStartO(LIteratorStartO* lir);
+ void visitIteratorMore(LIteratorMore* lir);
+ void visitIsNoIterAndBranch(LIsNoIterAndBranch* lir);
+ void visitIteratorEnd(LIteratorEnd* lir);
+ void visitArgumentsLength(LArgumentsLength* lir);
+ void visitGetFrameArgument(LGetFrameArgument* lir);
+ void visitSetFrameArgumentT(LSetFrameArgumentT* lir);
+ void visitSetFrameArgumentC(LSetFrameArgumentC* lir);
+ void visitSetFrameArgumentV(LSetFrameArgumentV* lir);
+ void visitRunOncePrologue(LRunOncePrologue* lir);
+ void emitRest(LInstruction* lir, Register array, Register numActuals,
+ Register temp0, Register temp1, unsigned numFormals,
+ JSObject* templateObject, bool saveAndRestore, Register resultreg);
+ void visitRest(LRest* lir);
+ void visitCallSetProperty(LCallSetProperty* ins);
+ void visitCallDeleteProperty(LCallDeleteProperty* lir);
+ void visitCallDeleteElement(LCallDeleteElement* lir);
+ void visitBitNotV(LBitNotV* lir);
+ void visitBitOpV(LBitOpV* lir);
+ void emitInstanceOf(LInstruction* ins, JSObject* prototypeObject);
+ void visitIn(LIn* ins);
+ void visitInArray(LInArray* ins);
+ void visitInstanceOfO(LInstanceOfO* ins);
+ void visitInstanceOfV(LInstanceOfV* ins);
+ void visitCallInstanceOf(LCallInstanceOf* ins);
+ void visitGetDOMProperty(LGetDOMProperty* lir);
+ void visitGetDOMMemberV(LGetDOMMemberV* lir);
+ void visitGetDOMMemberT(LGetDOMMemberT* lir);
+ void visitSetDOMProperty(LSetDOMProperty* lir);
+ void visitCallDOMNative(LCallDOMNative* lir);
+ void visitCallGetIntrinsicValue(LCallGetIntrinsicValue* lir);
+ void visitCallBindVar(LCallBindVar* lir);
+ void visitIsCallable(LIsCallable* lir);
+ void visitOutOfLineIsCallable(OutOfLineIsCallable* ool);
+ void visitIsConstructor(LIsConstructor* lir);
+ void visitOutOfLineIsConstructor(OutOfLineIsConstructor* ool);
+ void visitIsObject(LIsObject* lir);
+ void visitIsObjectAndBranch(LIsObjectAndBranch* lir);
+ void visitHasClass(LHasClass* lir);
+ void visitWasmParameter(LWasmParameter* lir);
+ void visitWasmParameterI64(LWasmParameterI64* lir);
+ void visitWasmReturn(LWasmReturn* ret);
+ void visitWasmReturnI64(LWasmReturnI64* ret);
+ void visitWasmReturnVoid(LWasmReturnVoid* ret);
+ void visitLexicalCheck(LLexicalCheck* ins);
+ void visitThrowRuntimeLexicalError(LThrowRuntimeLexicalError* ins);
+ void visitGlobalNameConflictsCheck(LGlobalNameConflictsCheck* ins);
+ void visitDebugger(LDebugger* ins);
+ void visitNewTarget(LNewTarget* ins);
+ void visitArrowNewTarget(LArrowNewTarget* ins);
+ void visitCheckReturn(LCheckReturn* ins);
+ void visitCheckIsObj(LCheckIsObj* ins);
+ void visitCheckObjCoercible(LCheckObjCoercible* ins);
+ void visitDebugCheckSelfHosted(LDebugCheckSelfHosted* ins);
+ void visitNaNToZero(LNaNToZero* ins);
+ void visitOutOfLineNaNToZero(OutOfLineNaNToZero* ool);
+
+ void visitCheckOverRecursed(LCheckOverRecursed* lir);
+ void visitCheckOverRecursedFailure(CheckOverRecursedFailure* ool);
+
+ void visitUnboxFloatingPoint(LUnboxFloatingPoint* lir);
+ void visitOutOfLineUnboxFloatingPoint(OutOfLineUnboxFloatingPoint* ool);
+ void visitOutOfLineStoreElementHole(OutOfLineStoreElementHole* ool);
+
+ void loadJSScriptForBlock(MBasicBlock* block, Register reg);
+ void loadOutermostJSScript(Register reg);
+
+ // Inline caches visitors.
+ void visitOutOfLineCache(OutOfLineUpdateCache* ool);
+
+ void visitGetPropertyCacheV(LGetPropertyCacheV* ins);
+ void visitGetPropertyCacheT(LGetPropertyCacheT* ins);
+ void visitBindNameCache(LBindNameCache* ins);
+ void visitCallSetProperty(LInstruction* ins);
+ void visitSetPropertyCache(LSetPropertyCache* ins);
+ void visitGetNameCache(LGetNameCache* ins);
+
+ void visitGetPropertyIC(OutOfLineUpdateCache* ool, DataPtr<GetPropertyIC>& ic);
+ void visitSetPropertyIC(OutOfLineUpdateCache* ool, DataPtr<SetPropertyIC>& ic);
+ void visitBindNameIC(OutOfLineUpdateCache* ool, DataPtr<BindNameIC>& ic);
+ void visitNameIC(OutOfLineUpdateCache* ool, DataPtr<NameIC>& ic);
+
+ void visitAssertRangeI(LAssertRangeI* ins);
+ void visitAssertRangeD(LAssertRangeD* ins);
+ void visitAssertRangeF(LAssertRangeF* ins);
+ void visitAssertRangeV(LAssertRangeV* ins);
+
+ void visitAssertResultV(LAssertResultV* ins);
+ void visitAssertResultT(LAssertResultT* ins);
+ void emitAssertResultV(const ValueOperand output, const TemporaryTypeSet* typeset);
+ void emitAssertObjectOrStringResult(Register input, MIRType type, const TemporaryTypeSet* typeset);
+
+ void visitInterruptCheck(LInterruptCheck* lir);
+ void visitOutOfLineInterruptCheckImplicit(OutOfLineInterruptCheckImplicit* ins);
+ void visitWasmTrap(LWasmTrap* lir);
+ void visitWasmBoundsCheck(LWasmBoundsCheck* ins);
+ void visitRecompileCheck(LRecompileCheck* ins);
+ void visitRotate(LRotate* ins);
+
+ void visitRandom(LRandom* ins);
+ void visitSignExtend(LSignExtend* ins);
+
+#ifdef DEBUG
+ void emitDebugForceBailing(LInstruction* lir);
+#endif
+
+ IonScriptCounts* extractScriptCounts() {
+ IonScriptCounts* counts = scriptCounts_;
+ scriptCounts_ = nullptr; // prevent delete in dtor
+ return counts;
+ }
+
+ private:
+ void addGetPropertyCache(LInstruction* ins, LiveRegisterSet liveRegs, Register objReg,
+ const ConstantOrRegister& id, TypedOrValueRegister output,
+ bool monitoredResult, bool allowDoubleResult,
+ jsbytecode* profilerLeavePc);
+ void addSetPropertyCache(LInstruction* ins, LiveRegisterSet liveRegs, Register objReg,
+ Register temp, Register tempUnbox, FloatRegister tempDouble,
+ FloatRegister tempF32, const ConstantOrRegister& id,
+ const ConstantOrRegister& value,
+ bool strict, bool needsTypeBarrier, bool guardHoles,
+ jsbytecode* profilerLeavePc);
+
+ MOZ_MUST_USE bool generateBranchV(const ValueOperand& value, Label* ifTrue, Label* ifFalse,
+ FloatRegister fr);
+
+ void emitLambdaInit(Register resultReg, Register envChainReg,
+ const LambdaFunctionInfo& info);
+
+ void emitFilterArgumentsOrEval(LInstruction* lir, Register string, Register temp1,
+ Register temp2);
+
+ template <class IteratorObject, class OrderedHashTable>
+ void emitGetNextEntryForIterator(LGetNextEntryForIterator* lir);
+
+ template <class OrderedHashTable>
+ void emitLoadIteratorValues(Register result, Register temp, Register front);
+
+ IonScriptCounts* maybeCreateScriptCounts();
+
+ // This function behaves like testValueTruthy with the exception that it can
+ // choose to let control flow fall through when the object is truthy, as
+ // an optimization. Use testValueTruthy when it's required to branch to one
+ // of the two labels.
+ void testValueTruthyKernel(const ValueOperand& value,
+ const LDefinition* scratch1, const LDefinition* scratch2,
+ FloatRegister fr,
+ Label* ifTruthy, Label* ifFalsy,
+ OutOfLineTestObject* ool,
+ MDefinition* valueMIR);
+
+ // Test whether value is truthy or not and jump to the corresponding label.
+ // If the value can be an object that emulates |undefined|, |ool| must be
+ // non-null; otherwise it may be null (and the scratch definitions should
+ // be bogus), in which case an object encountered here will always be
+ // truthy.
+ void testValueTruthy(const ValueOperand& value,
+ const LDefinition* scratch1, const LDefinition* scratch2,
+ FloatRegister fr,
+ Label* ifTruthy, Label* ifFalsy,
+ OutOfLineTestObject* ool,
+ MDefinition* valueMIR);
+
+ // This function behaves like testObjectEmulatesUndefined with the exception
+ // that it can choose to let control flow fall through when the object
+ // doesn't emulate undefined, as an optimization. Use the regular
+ // testObjectEmulatesUndefined when it's required to branch to one of the
+ // two labels.
+ void testObjectEmulatesUndefinedKernel(Register objreg,
+ Label* ifEmulatesUndefined,
+ Label* ifDoesntEmulateUndefined,
+ Register scratch, OutOfLineTestObject* ool);
+
+ // Test whether an object emulates |undefined|. If it does, jump to
+ // |ifEmulatesUndefined|; the caller is responsible for binding this label.
+ // If it doesn't, fall through; the label |ifDoesntEmulateUndefined| (which
+ // must be initially unbound) will be bound at this point.
+ void branchTestObjectEmulatesUndefined(Register objreg,
+ Label* ifEmulatesUndefined,
+ Label* ifDoesntEmulateUndefined,
+ Register scratch, OutOfLineTestObject* ool);
+
+ // Test whether an object emulates |undefined|, and jump to the
+ // corresponding label.
+ //
+ // This method should be used when subsequent code can't be laid out in a
+ // straight line; if it can, branchTest* should be used instead.
+ void testObjectEmulatesUndefined(Register objreg,
+ Label* ifEmulatesUndefined,
+ Label* ifDoesntEmulateUndefined,
+ Register scratch, OutOfLineTestObject* ool);
+
+ // Branch to target unless obj has an emptyObjectElements or emptyObjectElementsShared
+ // elements pointer.
+ void branchIfNotEmptyObjectElements(Register obj, Label* target);
+
+ void emitStoreElementTyped(const LAllocation* value, MIRType valueType, MIRType elementType,
+ Register elements, const LAllocation* index,
+ int32_t offsetAdjustment);
+
+ // Bailout if an element about to be written to is a hole.
+ void emitStoreHoleCheck(Register elements, const LAllocation* index, int32_t offsetAdjustment,
+ LSnapshot* snapshot);
+
+ void emitAssertRangeI(const Range* r, Register input);
+ void emitAssertRangeD(const Range* r, FloatRegister input, FloatRegister temp);
+
+ void maybeEmitGlobalBarrierCheck(const LAllocation* maybeGlobal, OutOfLineCode* ool);
+
+ Vector<CodeOffset, 0, JitAllocPolicy> ionScriptLabels_;
+
+ struct SharedStub {
+ ICStub::Kind kind;
+ IonICEntry entry;
+ CodeOffset label;
+
+ SharedStub(ICStub::Kind kind, IonICEntry entry, CodeOffset label)
+ : kind(kind), entry(entry), label(label)
+ {}
+ };
+
+ Vector<SharedStub, 0, SystemAllocPolicy> sharedStubs_;
+
+ void branchIfInvalidated(Register temp, Label* invalidated);
+
+#ifdef DEBUG
+ void emitDebugResultChecks(LInstruction* ins);
+ void emitObjectOrStringResultChecks(LInstruction* lir, MDefinition* mir);
+ void emitValueResultChecks(LInstruction* lir, MDefinition* mir);
+#endif
+
+ // Script counts created during code generation.
+ IonScriptCounts* scriptCounts_;
+
+#if defined(JS_ION_PERF)
+ PerfSpewer perfSpewer_;
+#endif
+
+ // This integer is a bit mask of all SimdTypeDescr::Type indexes. When a
+ // MSimdBox instruction is encoded, it might have either been created by
+ // IonBuilder, or by the Eager Simd Unbox phase.
+ //
+ // As the template objects are weak references, the JitCompartment is using
+ // Read Barriers, but such barrier cannot be used during the compilation. To
+ // work around this issue, the barriers are captured during
+ // CodeGenerator::link.
+ //
+ // Instead of saving the pointers, we just save the index of the Read
+ // Barriered objects in a bit mask.
+ uint32_t simdRefreshTemplatesDuringLink_;
+
+ void registerSimdTemplate(SimdType simdType);
+ void captureSimdTemplate(JSContext* cx);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_CodeGenerator_h */
diff --git a/js/src/jit/CompactBuffer.h b/js/src/jit/CompactBuffer.h
new file mode 100644
index 000000000..2477549b3
--- /dev/null
+++ b/js/src/jit/CompactBuffer.h
@@ -0,0 +1,206 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Compactbuffer_h
+#define jit_Compactbuffer_h
+
+#include "jsalloc.h"
+
+#include "jit/IonTypes.h"
+#include "js/Vector.h"
+
+namespace js {
+namespace jit {
+
+class CompactBufferWriter;
+
+// CompactBuffers are byte streams designed for compressable integers. It has
+// helper functions for writing bytes, fixed-size integers, and variable-sized
+// integers. Variable sized integers are encoded in 1-5 bytes, each byte
+// containing 7 bits of the integer and a bit which specifies whether the next
+// byte is also part of the integer.
+//
+// Fixed-width integers are also available, in case the actual value will not
+// be known until later.
+
+class CompactBufferReader
+{
+ const uint8_t* buffer_;
+ const uint8_t* end_;
+
+ uint32_t readVariableLength() {
+ uint32_t val = 0;
+ uint32_t shift = 0;
+ uint8_t byte;
+ while (true) {
+ MOZ_ASSERT(shift < 32);
+ byte = readByte();
+ val |= (uint32_t(byte) >> 1) << shift;
+ shift += 7;
+ if (!(byte & 1))
+ return val;
+ }
+ }
+
+ public:
+ CompactBufferReader(const uint8_t* start, const uint8_t* end)
+ : buffer_(start),
+ end_(end)
+ { }
+ inline explicit CompactBufferReader(const CompactBufferWriter& writer);
+ uint8_t readByte() {
+ MOZ_ASSERT(buffer_ < end_);
+ return *buffer_++;
+ }
+ uint32_t readFixedUint32_t() {
+ uint32_t b0 = readByte();
+ uint32_t b1 = readByte();
+ uint32_t b2 = readByte();
+ uint32_t b3 = readByte();
+ return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
+ }
+ uint16_t readFixedUint16_t() {
+ uint32_t b0 = readByte();
+ uint32_t b1 = readByte();
+ return b0 | (b1 << 8);
+ }
+ uint32_t readNativeEndianUint32_t() {
+ // Must be at 4-byte boundary
+ MOZ_ASSERT(uintptr_t(buffer_) % sizeof(uint32_t) == 0);
+ return *reinterpret_cast<const uint32_t*>(buffer_);
+ }
+ uint32_t readUnsigned() {
+ return readVariableLength();
+ }
+ int32_t readSigned() {
+ uint8_t b = readByte();
+ bool isNegative = !!(b & (1 << 0));
+ bool more = !!(b & (1 << 1));
+ int32_t result = b >> 2;
+ if (more)
+ result |= readUnsigned() << 6;
+ if (isNegative)
+ return -result;
+ return result;
+ }
+
+ bool more() const {
+ MOZ_ASSERT(buffer_ <= end_);
+ return buffer_ < end_;
+ }
+
+ void seek(const uint8_t* start, uint32_t offset) {
+ buffer_ = start + offset;
+ MOZ_ASSERT(start < end_);
+ MOZ_ASSERT(buffer_ < end_);
+ }
+
+ const uint8_t* currentPosition() const {
+ return buffer_;
+ }
+};
+
+class CompactBufferWriter
+{
+ js::Vector<uint8_t, 32, SystemAllocPolicy> buffer_;
+ bool enoughMemory_;
+
+ public:
+ CompactBufferWriter()
+ : enoughMemory_(true)
+ { }
+
+ void setOOM() {
+ enoughMemory_ = false;
+ }
+
+ // Note: writeByte() takes uint32 to catch implicit casts with a runtime
+ // assert.
+ void writeByte(uint32_t byte) {
+ MOZ_ASSERT(byte <= 0xFF);
+ enoughMemory_ &= buffer_.append(byte);
+ }
+ void writeByteAt(uint32_t pos, uint32_t byte) {
+ MOZ_ASSERT(byte <= 0xFF);
+ if (!oom())
+ buffer_[pos] = byte;
+ }
+ void writeUnsigned(uint32_t value) {
+ do {
+ uint8_t byte = ((value & 0x7F) << 1) | (value > 0x7F);
+ writeByte(byte);
+ value >>= 7;
+ } while (value);
+ }
+ void writeUnsignedAt(uint32_t pos, uint32_t value, uint32_t original) {
+ MOZ_ASSERT(value <= original);
+ do {
+ uint8_t byte = ((value & 0x7F) << 1) | (original > 0x7F);
+ writeByteAt(pos++, byte);
+ value >>= 7;
+ original >>= 7;
+ } while (original);
+ }
+ void writeSigned(int32_t v) {
+ bool isNegative = v < 0;
+ uint32_t value = isNegative ? -v : v;
+ uint8_t byte = ((value & 0x3F) << 2) | ((value > 0x3F) << 1) | uint32_t(isNegative);
+ writeByte(byte);
+
+ // Write out the rest of the bytes, if needed.
+ value >>= 6;
+ if (value == 0)
+ return;
+ writeUnsigned(value);
+ }
+ void writeFixedUint32_t(uint32_t value) {
+ writeByte(value & 0xFF);
+ writeByte((value >> 8) & 0xFF);
+ writeByte((value >> 16) & 0xFF);
+ writeByte((value >> 24) & 0xFF);
+ }
+ void writeFixedUint16_t(uint16_t value) {
+ writeByte(value & 0xFF);
+ writeByte(value >> 8);
+ }
+ void writeNativeEndianUint32_t(uint32_t value) {
+ // Must be at 4-byte boundary
+ MOZ_ASSERT_IF(!oom(), length() % sizeof(uint32_t) == 0);
+ writeFixedUint32_t(0);
+ if (oom())
+ return;
+ uint8_t* endPtr = buffer() + length();
+ reinterpret_cast<uint32_t*>(endPtr)[-1] = value;
+ }
+ size_t length() const {
+ return buffer_.length();
+ }
+ uint8_t* buffer() {
+ MOZ_ASSERT(!oom());
+ return &buffer_[0];
+ }
+ const uint8_t* buffer() const {
+ MOZ_ASSERT(!oom());
+ return &buffer_[0];
+ }
+ bool oom() const {
+ return !enoughMemory_;
+ }
+ void propagateOOM(bool success) {
+ enoughMemory_ &= success;
+ }
+};
+
+CompactBufferReader::CompactBufferReader(const CompactBufferWriter& writer)
+ : buffer_(writer.buffer()),
+ end_(writer.buffer() + writer.length())
+{
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Compactbuffer_h */
diff --git a/js/src/jit/CompileInfo-inl.h b/js/src/jit/CompileInfo-inl.h
new file mode 100644
index 000000000..57d6d12ef
--- /dev/null
+++ b/js/src/jit/CompileInfo-inl.h
@@ -0,0 +1,90 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_CompileInfo_inl_h
+#define jit_CompileInfo_inl_h
+
+#include "jit/CompileInfo.h"
+#include "jit/JitAllocPolicy.h"
+
+#include "jsscriptinlines.h"
+
+namespace js {
+namespace jit {
+
+inline RegExpObject*
+CompileInfo::getRegExp(jsbytecode* pc) const
+{
+ return script_->getRegExp(pc);
+}
+
+inline JSFunction*
+CompileInfo::getFunction(jsbytecode* pc) const
+{
+ return script_->getFunction(GET_UINT32_INDEX(pc));
+}
+
+InlineScriptTree*
+InlineScriptTree::New(TempAllocator* allocator, InlineScriptTree* callerTree,
+ jsbytecode* callerPc, JSScript* script)
+{
+ MOZ_ASSERT_IF(!callerTree, !callerPc);
+ MOZ_ASSERT_IF(callerTree, callerTree->script()->containsPC(callerPc));
+
+ // Allocate a new InlineScriptTree
+ void* treeMem = allocator->allocate(sizeof(InlineScriptTree));
+ if (!treeMem)
+ return nullptr;
+
+ // Initialize it.
+ return new (treeMem) InlineScriptTree(callerTree, callerPc, script);
+}
+
+InlineScriptTree*
+InlineScriptTree::addCallee(TempAllocator* allocator, jsbytecode* callerPc,
+ JSScript* calleeScript)
+{
+ MOZ_ASSERT(script_ && script_->containsPC(callerPc));
+ InlineScriptTree* calleeTree = New(allocator, this, callerPc, calleeScript);
+ if (!calleeTree)
+ return nullptr;
+
+ calleeTree->nextCallee_ = children_;
+ children_ = calleeTree;
+ return calleeTree;
+}
+
+static inline const char*
+AnalysisModeString(AnalysisMode mode)
+{
+ switch (mode) {
+ case Analysis_None:
+ return "Analysis_None";
+ case Analysis_DefiniteProperties:
+ return "Analysis_DefiniteProperties";
+ case Analysis_ArgumentsUsage:
+ return "Analysis_ArgumentsUsage";
+ default:
+ MOZ_CRASH("Invalid AnalysisMode");
+ }
+}
+
+static inline bool
+CanIonCompile(JSScript* script, AnalysisMode mode)
+{
+ switch (mode) {
+ case Analysis_None: return script->canIonCompile();
+ case Analysis_DefiniteProperties: return true;
+ case Analysis_ArgumentsUsage: return true;
+ default:;
+ }
+ MOZ_CRASH("Invalid AnalysisMode");
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_CompileInfo_inl_h */
diff --git a/js/src/jit/CompileInfo.h b/js/src/jit/CompileInfo.h
new file mode 100644
index 000000000..44848890c
--- /dev/null
+++ b/js/src/jit/CompileInfo.h
@@ -0,0 +1,560 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_CompileInfo_h
+#define jit_CompileInfo_h
+
+#include "mozilla/Maybe.h"
+
+#include "jsfun.h"
+
+#include "jit/JitAllocPolicy.h"
+#include "jit/JitFrames.h"
+#include "jit/Registers.h"
+#include "vm/EnvironmentObject.h"
+
+namespace js {
+namespace jit {
+
+class TrackedOptimizations;
+
+inline unsigned
+StartArgSlot(JSScript* script)
+{
+ // Reserved slots:
+ // Slot 0: Environment chain.
+ // Slot 1: Return value.
+
+ // When needed:
+ // Slot 2: Argumentsobject.
+
+ // Note: when updating this, please also update the assert in SnapshotWriter::startFrame
+ return 2 + (script->argumentsHasVarBinding() ? 1 : 0);
+}
+
+inline unsigned
+CountArgSlots(JSScript* script, JSFunction* fun)
+{
+ // Slot x + 0: This value.
+ // Slot x + 1: Argument 1.
+ // ...
+ // Slot x + n: Argument n.
+
+ // Note: when updating this, please also update the assert in SnapshotWriter::startFrame
+ return StartArgSlot(script) + (fun ? fun->nargs() + 1 : 0);
+}
+
+
+// The compiler at various points needs to be able to store references to the
+// current inline path (the sequence of scripts and call-pcs that lead to the
+// current function being inlined).
+//
+// To support this, the top-level IonBuilder keeps a tree that records the
+// inlinings done during compilation.
+class InlineScriptTree {
+ // InlineScriptTree for the caller
+ InlineScriptTree* caller_;
+
+ // PC in the caller corresponding to this script.
+ jsbytecode* callerPc_;
+
+ // Script for this entry.
+ JSScript* script_;
+
+ // Child entries (linked together by nextCallee pointer)
+ InlineScriptTree* children_;
+ InlineScriptTree* nextCallee_;
+
+ public:
+ InlineScriptTree(InlineScriptTree* caller, jsbytecode* callerPc, JSScript* script)
+ : caller_(caller), callerPc_(callerPc), script_(script),
+ children_(nullptr), nextCallee_(nullptr)
+ {}
+
+ static InlineScriptTree* New(TempAllocator* allocator, InlineScriptTree* caller,
+ jsbytecode* callerPc, JSScript* script);
+
+ InlineScriptTree* addCallee(TempAllocator* allocator, jsbytecode* callerPc,
+ JSScript* calleeScript);
+
+ InlineScriptTree* caller() const {
+ return caller_;
+ }
+
+ bool isOutermostCaller() const {
+ return caller_ == nullptr;
+ }
+ bool hasCaller() const {
+ return caller_ != nullptr;
+ }
+ InlineScriptTree* outermostCaller() {
+ if (isOutermostCaller())
+ return this;
+ return caller_->outermostCaller();
+ }
+
+ jsbytecode* callerPc() const {
+ return callerPc_;
+ }
+
+ JSScript* script() const {
+ return script_;
+ }
+
+ bool hasChildren() const {
+ return children_ != nullptr;
+ }
+ InlineScriptTree* firstChild() const {
+ MOZ_ASSERT(hasChildren());
+ return children_;
+ }
+
+ bool hasNextCallee() const {
+ return nextCallee_ != nullptr;
+ }
+ InlineScriptTree* nextCallee() const {
+ MOZ_ASSERT(hasNextCallee());
+ return nextCallee_;
+ }
+
+ unsigned depth() const {
+ if (isOutermostCaller())
+ return 1;
+ return 1 + caller_->depth();
+ }
+};
+
+class BytecodeSite : public TempObject
+{
+ // InlineScriptTree identifying innermost active function at site.
+ InlineScriptTree* tree_;
+
+ // Bytecode address within innermost active function.
+ jsbytecode* pc_;
+
+ // Optimization information at the pc.
+ TrackedOptimizations* optimizations_;
+
+ public:
+ BytecodeSite()
+ : tree_(nullptr), pc_(nullptr), optimizations_(nullptr)
+ {}
+
+ BytecodeSite(InlineScriptTree* tree, jsbytecode* pc)
+ : tree_(tree), pc_(pc), optimizations_(nullptr)
+ {
+ MOZ_ASSERT(tree_ != nullptr);
+ MOZ_ASSERT(pc_ != nullptr);
+ }
+
+ InlineScriptTree* tree() const {
+ return tree_;
+ }
+
+ jsbytecode* pc() const {
+ return pc_;
+ }
+
+ JSScript* script() const {
+ return tree_ ? tree_->script() : nullptr;
+ }
+
+ bool hasOptimizations() const {
+ return !!optimizations_;
+ }
+
+ TrackedOptimizations* optimizations() const {
+ MOZ_ASSERT(hasOptimizations());
+ return optimizations_;
+ }
+
+ void setOptimizations(TrackedOptimizations* optimizations) {
+ optimizations_ = optimizations;
+ }
+};
+
+enum AnalysisMode {
+ /* JavaScript execution, not analysis. */
+ Analysis_None,
+
+ /*
+ * MIR analysis performed when invoking 'new' on a script, to determine
+ * definite properties. Used by the optimizing JIT.
+ */
+ Analysis_DefiniteProperties,
+
+ /*
+ * MIR analysis performed when executing a script which uses its arguments,
+ * when it is not known whether a lazy arguments value can be used.
+ */
+ Analysis_ArgumentsUsage
+};
+
+// Contains information about the compilation source for IR being generated.
+class CompileInfo
+{
+ public:
+ CompileInfo(JSScript* script, JSFunction* fun, jsbytecode* osrPc,
+ AnalysisMode analysisMode, bool scriptNeedsArgsObj,
+ InlineScriptTree* inlineScriptTree)
+ : script_(script), fun_(fun), osrPc_(osrPc),
+ analysisMode_(analysisMode), scriptNeedsArgsObj_(scriptNeedsArgsObj),
+ hadOverflowBailout_(script->hadOverflowBailout()),
+ mayReadFrameArgsDirectly_(script->mayReadFrameArgsDirectly()),
+ inlineScriptTree_(inlineScriptTree)
+ {
+ MOZ_ASSERT_IF(osrPc, JSOp(*osrPc) == JSOP_LOOPENTRY);
+
+ // The function here can flow in from anywhere so look up the canonical
+ // function to ensure that we do not try to embed a nursery pointer in
+ // jit-code. Precisely because it can flow in from anywhere, it's not
+ // guaranteed to be non-lazy. Hence, don't access its script!
+ if (fun_) {
+ fun_ = fun_->nonLazyScript()->functionNonDelazifying();
+ MOZ_ASSERT(fun_->isTenured());
+ }
+
+ nimplicit_ = StartArgSlot(script) /* env chain and argument obj */
+ + (fun ? 1 : 0); /* this */
+ nargs_ = fun ? fun->nargs() : 0;
+ nlocals_ = script->nfixed();
+
+ // An extra slot is needed for global scopes because INITGLEXICAL (stack
+ // depth 1) is compiled as a SETPROP (stack depth 2) on the global lexical
+ // scope.
+ uint32_t extra = script->isGlobalCode() ? 1 : 0;
+ nstack_ = Max<unsigned>(script->nslots() - script->nfixed(), MinJITStackSize) + extra;
+ nslots_ = nimplicit_ + nargs_ + nlocals_ + nstack_;
+
+ // For derived class constructors, find and cache the frame slot for
+ // the .this binding. This slot is assumed to be always
+ // observable. See isObservableFrameSlot.
+ if (script->isDerivedClassConstructor()) {
+ MOZ_ASSERT(script->functionHasThisBinding());
+ CompileRuntime* runtime = GetJitContext()->runtime;
+ for (BindingIter bi(script); bi; bi++) {
+ if (bi.name() != runtime->names().dotThis)
+ continue;
+ BindingLocation loc = bi.location();
+ if (loc.kind() == BindingLocation::Kind::Frame) {
+ thisSlotForDerivedClassConstructor_ = mozilla::Some(localSlot(loc.slot()));
+ break;
+ }
+ }
+ }
+ }
+
+ explicit CompileInfo(unsigned nlocals)
+ : script_(nullptr), fun_(nullptr), osrPc_(nullptr),
+ analysisMode_(Analysis_None), scriptNeedsArgsObj_(false),
+ mayReadFrameArgsDirectly_(false), inlineScriptTree_(nullptr)
+ {
+ nimplicit_ = 0;
+ nargs_ = 0;
+ nlocals_ = nlocals;
+ nstack_ = 1; /* For FunctionCompiler::pushPhiInput/popPhiOutput */
+ nslots_ = nlocals_ + nstack_;
+ }
+
+ JSScript* script() const {
+ return script_;
+ }
+ bool compilingWasm() const {
+ return script() == nullptr;
+ }
+ JSFunction* funMaybeLazy() const {
+ return fun_;
+ }
+ ModuleObject* module() const {
+ return script_->module();
+ }
+ jsbytecode* osrPc() const {
+ return osrPc_;
+ }
+ InlineScriptTree* inlineScriptTree() const {
+ return inlineScriptTree_;
+ }
+
+ bool hasOsrAt(jsbytecode* pc) const {
+ MOZ_ASSERT(JSOp(*pc) == JSOP_LOOPENTRY);
+ return pc == osrPc();
+ }
+
+ jsbytecode* startPC() const {
+ return script_->code();
+ }
+ jsbytecode* limitPC() const {
+ return script_->codeEnd();
+ }
+
+ const char* filename() const {
+ return script_->filename();
+ }
+
+ unsigned lineno() const {
+ return script_->lineno();
+ }
+ unsigned lineno(jsbytecode* pc) const {
+ return PCToLineNumber(script_, pc);
+ }
+
+ // Script accessors based on PC.
+
+ JSAtom* getAtom(jsbytecode* pc) const {
+ return script_->getAtom(GET_UINT32_INDEX(pc));
+ }
+
+ PropertyName* getName(jsbytecode* pc) const {
+ return script_->getName(GET_UINT32_INDEX(pc));
+ }
+
+ inline RegExpObject* getRegExp(jsbytecode* pc) const;
+
+ JSObject* getObject(jsbytecode* pc) const {
+ return script_->getObject(GET_UINT32_INDEX(pc));
+ }
+
+ inline JSFunction* getFunction(jsbytecode* pc) const;
+
+ const Value& getConst(jsbytecode* pc) const {
+ return script_->getConst(GET_UINT32_INDEX(pc));
+ }
+
+ jssrcnote* getNote(GSNCache& gsn, jsbytecode* pc) const {
+ return GetSrcNote(gsn, script(), pc);
+ }
+
+ // Total number of slots: args, locals, and stack.
+ unsigned nslots() const {
+ return nslots_;
+ }
+
+ // Number of slots needed for env chain, return value,
+ // maybe argumentsobject and this value.
+ unsigned nimplicit() const {
+ return nimplicit_;
+ }
+ // Number of arguments (without counting this value).
+ unsigned nargs() const {
+ return nargs_;
+ }
+ // Number of slots needed for all local variables. This includes "fixed
+ // vars" (see above) and also block-scoped locals.
+ unsigned nlocals() const {
+ return nlocals_;
+ }
+ unsigned ninvoke() const {
+ return nslots_ - nstack_;
+ }
+
+ uint32_t environmentChainSlot() const {
+ MOZ_ASSERT(script());
+ return 0;
+ }
+ uint32_t returnValueSlot() const {
+ MOZ_ASSERT(script());
+ return 1;
+ }
+ uint32_t argsObjSlot() const {
+ MOZ_ASSERT(hasArguments());
+ return 2;
+ }
+ uint32_t thisSlot() const {
+ MOZ_ASSERT(funMaybeLazy());
+ MOZ_ASSERT(nimplicit_ > 0);
+ return nimplicit_ - 1;
+ }
+ uint32_t firstArgSlot() const {
+ return nimplicit_;
+ }
+ uint32_t argSlotUnchecked(uint32_t i) const {
+ // During initialization, some routines need to get at arg
+ // slots regardless of how regular argument access is done.
+ MOZ_ASSERT(i < nargs_);
+ return nimplicit_ + i;
+ }
+ uint32_t argSlot(uint32_t i) const {
+ // This should only be accessed when compiling functions for
+ // which argument accesses don't need to go through the
+ // argument object.
+ MOZ_ASSERT(!argsObjAliasesFormals());
+ return argSlotUnchecked(i);
+ }
+ uint32_t firstLocalSlot() const {
+ return nimplicit_ + nargs_;
+ }
+ uint32_t localSlot(uint32_t i) const {
+ return firstLocalSlot() + i;
+ }
+ uint32_t firstStackSlot() const {
+ return firstLocalSlot() + nlocals();
+ }
+ uint32_t stackSlot(uint32_t i) const {
+ return firstStackSlot() + i;
+ }
+
+ uint32_t startArgSlot() const {
+ MOZ_ASSERT(script());
+ return StartArgSlot(script());
+ }
+ uint32_t endArgSlot() const {
+ MOZ_ASSERT(script());
+ return CountArgSlots(script(), funMaybeLazy());
+ }
+
+ uint32_t totalSlots() const {
+ MOZ_ASSERT(script() && funMaybeLazy());
+ return nimplicit() + nargs() + nlocals();
+ }
+
+ bool isSlotAliased(uint32_t index) const {
+ MOZ_ASSERT(index >= startArgSlot());
+ uint32_t arg = index - firstArgSlot();
+ if (arg < nargs())
+ return script()->formalIsAliased(arg);
+ return false;
+ }
+
+ bool hasArguments() const {
+ return script()->argumentsHasVarBinding();
+ }
+ bool argumentsAliasesFormals() const {
+ return script()->argumentsAliasesFormals();
+ }
+ bool needsArgsObj() const {
+ return scriptNeedsArgsObj_;
+ }
+ bool argsObjAliasesFormals() const {
+ return scriptNeedsArgsObj_ && script()->hasMappedArgsObj();
+ }
+
+ AnalysisMode analysisMode() const {
+ return analysisMode_;
+ }
+
+ bool isAnalysis() const {
+ return analysisMode_ != Analysis_None;
+ }
+
+ // Returns true if a slot can be observed out-side the current frame while
+ // the frame is active on the stack. This implies that these definitions
+ // would have to be executed and that they cannot be removed even if they
+ // are unused.
+ bool isObservableSlot(uint32_t slot) const {
+ if (isObservableFrameSlot(slot))
+ return true;
+
+ if (isObservableArgumentSlot(slot))
+ return true;
+
+ return false;
+ }
+
+ bool isObservableFrameSlot(uint32_t slot) const {
+ if (!funMaybeLazy())
+ return false;
+
+ // The |this| value must always be observable.
+ if (slot == thisSlot())
+ return true;
+
+ // The |this| frame slot in derived class constructors should never be
+ // optimized out, as a Debugger might need to perform TDZ checks on it
+ // via, e.g., an exceptionUnwind handler. The TDZ check is required
+ // for correctness if the handler decides to continue execution.
+ if (thisSlotForDerivedClassConstructor_ && *thisSlotForDerivedClassConstructor_ == slot)
+ return true;
+
+ if (funMaybeLazy()->needsSomeEnvironmentObject() && slot == environmentChainSlot())
+ return true;
+
+ // If the function may need an arguments object, then make sure to
+ // preserve the env chain, because it may be needed to construct the
+ // arguments object during bailout. If we've already created an
+ // arguments object (or got one via OSR), preserve that as well.
+ if (hasArguments() && (slot == environmentChainSlot() || slot == argsObjSlot()))
+ return true;
+
+ return false;
+ }
+
+ bool isObservableArgumentSlot(uint32_t slot) const {
+ if (!funMaybeLazy())
+ return false;
+
+ // Function.arguments can be used to access all arguments in non-strict
+ // scripts, so we can't optimize out any arguments.
+ if ((hasArguments() || !script()->strict()) &&
+ firstArgSlot() <= slot && slot - firstArgSlot() < nargs())
+ {
+ return true;
+ }
+
+ return false;
+ }
+
+ // Returns true if a slot can be recovered before or during a bailout. A
+ // definition which can be observed and recovered, implies that this
+ // definition can be optimized away as long as we can compute its values.
+ bool isRecoverableOperand(uint32_t slot) const {
+ // If this script is not a function, then none of the slots are
+ // observable. If it this |slot| is not observable, thus we can always
+ // recover it.
+ if (!funMaybeLazy())
+ return true;
+
+ // The |this| and the |envChain| values can be recovered.
+ if (slot == thisSlot() || slot == environmentChainSlot())
+ return true;
+
+ if (isObservableFrameSlot(slot))
+ return false;
+
+ if (needsArgsObj() && isObservableArgumentSlot(slot))
+ return false;
+
+ return true;
+ }
+
+ // Check previous bailout states to prevent doing the same bailout in the
+ // next compilation.
+ bool hadOverflowBailout() const {
+ return hadOverflowBailout_;
+ }
+ bool mayReadFrameArgsDirectly() const {
+ return mayReadFrameArgsDirectly_;
+ }
+
+ private:
+ unsigned nimplicit_;
+ unsigned nargs_;
+ unsigned nlocals_;
+ unsigned nstack_;
+ unsigned nslots_;
+ mozilla::Maybe<unsigned> thisSlotForDerivedClassConstructor_;
+ JSScript* script_;
+ JSFunction* fun_;
+ jsbytecode* osrPc_;
+ AnalysisMode analysisMode_;
+
+ // Whether a script needs an arguments object is unstable over compilation
+ // since the arguments optimization could be marked as failed on the main
+ // thread, so cache a value here and use it throughout for consistency.
+ bool scriptNeedsArgsObj_;
+
+ // Record the state of previous bailouts in order to prevent compiling the
+ // same function identically the next time.
+ bool hadOverflowBailout_;
+
+ bool mayReadFrameArgsDirectly_;
+
+ InlineScriptTree* inlineScriptTree_;
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_CompileInfo_h */
diff --git a/js/src/jit/CompileWrappers.cpp b/js/src/jit/CompileWrappers.cpp
new file mode 100644
index 000000000..9402efcbd
--- /dev/null
+++ b/js/src/jit/CompileWrappers.cpp
@@ -0,0 +1,310 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Ion.h"
+
+#include "jscompartmentinlines.h"
+
+using namespace js;
+using namespace js::jit;
+
+JSRuntime*
+CompileRuntime::runtime()
+{
+ return reinterpret_cast<JSRuntime*>(this);
+}
+
+/* static */ CompileRuntime*
+CompileRuntime::get(JSRuntime* rt)
+{
+ return reinterpret_cast<CompileRuntime*>(rt);
+}
+
+bool
+CompileRuntime::onMainThread()
+{
+ return js::CurrentThreadCanAccessRuntime(runtime());
+}
+
+js::PerThreadData*
+CompileRuntime::mainThread()
+{
+ MOZ_ASSERT(onMainThread());
+ return &runtime()->mainThread;
+}
+
+const void*
+CompileRuntime::addressOfJitTop()
+{
+ return &runtime()->jitTop;
+}
+
+const void*
+CompileRuntime::addressOfJitActivation()
+{
+ return &runtime()->jitActivation;
+}
+
+const void*
+CompileRuntime::addressOfProfilingActivation()
+{
+ return (const void*) &runtime()->profilingActivation_;
+}
+
+const void*
+CompileRuntime::addressOfJitStackLimit()
+{
+ return runtime()->addressOfJitStackLimit();
+}
+
+#ifdef DEBUG
+const void*
+CompileRuntime::addressOfIonBailAfter()
+{
+ return runtime()->addressOfIonBailAfter();
+}
+#endif
+
+const void*
+CompileRuntime::addressOfActivation()
+{
+ return runtime()->addressOfActivation();
+}
+
+#ifdef JS_GC_ZEAL
+const void*
+CompileRuntime::addressOfGCZealModeBits()
+{
+ return runtime()->gc.addressOfZealModeBits();
+}
+#endif
+
+const void*
+CompileRuntime::addressOfInterruptUint32()
+{
+ return runtime()->addressOfInterruptUint32();
+}
+
+const void*
+CompileRuntime::getJSContext()
+{
+ return runtime()->unsafeContextFromAnyThread();
+}
+
+const JitRuntime*
+CompileRuntime::jitRuntime()
+{
+ return runtime()->jitRuntime();
+}
+
+SPSProfiler&
+CompileRuntime::spsProfiler()
+{
+ return runtime()->spsProfiler;
+}
+
+bool
+CompileRuntime::jitSupportsFloatingPoint()
+{
+ return runtime()->jitSupportsFloatingPoint;
+}
+
+bool
+CompileRuntime::hadOutOfMemory()
+{
+ return runtime()->hadOutOfMemory;
+}
+
+bool
+CompileRuntime::profilingScripts()
+{
+ return runtime()->profilingScripts;
+}
+
+const JSAtomState&
+CompileRuntime::names()
+{
+ return *runtime()->commonNames;
+}
+
+const PropertyName*
+CompileRuntime::emptyString()
+{
+ return runtime()->emptyString;
+}
+
+const StaticStrings&
+CompileRuntime::staticStrings()
+{
+ return *runtime()->staticStrings;
+}
+
+const Value&
+CompileRuntime::NaNValue()
+{
+ return runtime()->NaNValue;
+}
+
+const Value&
+CompileRuntime::positiveInfinityValue()
+{
+ return runtime()->positiveInfinityValue;
+}
+
+const WellKnownSymbols&
+CompileRuntime::wellKnownSymbols()
+{
+ MOZ_ASSERT(onMainThread());
+ return *runtime()->wellKnownSymbols;
+}
+
+#ifdef DEBUG
+bool
+CompileRuntime::isInsideNursery(gc::Cell* cell)
+{
+ return UninlinedIsInsideNursery(cell);
+}
+#endif
+
+const DOMCallbacks*
+CompileRuntime::DOMcallbacks()
+{
+ return runtime()->DOMcallbacks;
+}
+
+const Nursery&
+CompileRuntime::gcNursery()
+{
+ return runtime()->gc.nursery;
+}
+
+void
+CompileRuntime::setMinorGCShouldCancelIonCompilations()
+{
+ MOZ_ASSERT(onMainThread());
+ runtime()->gc.storeBuffer.setShouldCancelIonCompilations();
+}
+
+bool
+CompileRuntime::runtimeMatches(JSRuntime* rt)
+{
+ return rt == runtime();
+}
+
+Zone*
+CompileZone::zone()
+{
+ return reinterpret_cast<Zone*>(this);
+}
+
+/* static */ CompileZone*
+CompileZone::get(Zone* zone)
+{
+ return reinterpret_cast<CompileZone*>(zone);
+}
+
+const void*
+CompileZone::addressOfNeedsIncrementalBarrier()
+{
+ return zone()->addressOfNeedsIncrementalBarrier();
+}
+
+const void*
+CompileZone::addressOfFreeList(gc::AllocKind allocKind)
+{
+ return zone()->arenas.addressOfFreeList(allocKind);
+}
+
+JSCompartment*
+CompileCompartment::compartment()
+{
+ return reinterpret_cast<JSCompartment*>(this);
+}
+
+/* static */ CompileCompartment*
+CompileCompartment::get(JSCompartment* comp)
+{
+ return reinterpret_cast<CompileCompartment*>(comp);
+}
+
+CompileZone*
+CompileCompartment::zone()
+{
+ return CompileZone::get(compartment()->zone());
+}
+
+CompileRuntime*
+CompileCompartment::runtime()
+{
+ return CompileRuntime::get(compartment()->runtimeFromAnyThread());
+}
+
+const void*
+CompileCompartment::addressOfEnumerators()
+{
+ return &compartment()->enumerators;
+}
+
+const void*
+CompileCompartment::addressOfLastCachedNativeIterator()
+{
+ return &compartment()->lastCachedNativeIterator;
+}
+
+const void*
+CompileCompartment::addressOfRandomNumberGenerator()
+{
+ return compartment()->randomNumberGenerator.ptr();
+}
+
+const JitCompartment*
+CompileCompartment::jitCompartment()
+{
+ return compartment()->jitCompartment();
+}
+
+const GlobalObject*
+CompileCompartment::maybeGlobal()
+{
+ // This uses unsafeUnbarrieredMaybeGlobal() so as not to trigger the read
+ // barrier on the global from off the main thread. This is safe because we
+ // abort Ion compilation when we GC.
+ return compartment()->unsafeUnbarrieredMaybeGlobal();
+}
+
+bool
+CompileCompartment::hasAllocationMetadataBuilder()
+{
+ return compartment()->hasAllocationMetadataBuilder();
+}
+
+// Note: This function is thread-safe because setSingletonAsValue sets a boolean
+// variable to false, and this boolean variable has no way to be resetted to
+// true. So even if there is a concurrent write, this concurrent write will
+// always have the same value. If there is a concurrent read, then we will
+// clone a singleton instead of using the value which is baked in the JSScript,
+// and this would be an unfortunate allocation, but this will not change the
+// semantics of the JavaScript code which is executed.
+void
+CompileCompartment::setSingletonsAsValues()
+{
+ compartment()->behaviors().setSingletonsAsValues();
+}
+
+JitCompileOptions::JitCompileOptions()
+ : cloneSingletons_(false),
+ spsSlowAssertionsEnabled_(false),
+ offThreadCompilationAvailable_(false)
+{
+}
+
+JitCompileOptions::JitCompileOptions(JSContext* cx)
+{
+ cloneSingletons_ = cx->compartment()->creationOptions().cloneSingletons();
+ spsSlowAssertionsEnabled_ = cx->runtime()->spsProfiler.enabled() &&
+ cx->runtime()->spsProfiler.slowAssertionsEnabled();
+ offThreadCompilationAvailable_ = OffThreadCompilationAvailable(cx);
+}
diff --git a/js/src/jit/CompileWrappers.h b/js/src/jit/CompileWrappers.h
new file mode 100644
index 000000000..bbec9ffa3
--- /dev/null
+++ b/js/src/jit/CompileWrappers.h
@@ -0,0 +1,158 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_CompileWrappers_h
+#define jit_CompileWrappers_h
+
+#include "jscntxt.h"
+
+namespace js {
+namespace jit {
+
+class JitRuntime;
+
+// During Ion compilation we need access to various bits of the current
+// compartment, runtime and so forth. However, since compilation can run off
+// thread while the main thread is actively mutating the VM, this access needs
+// to be restricted. The classes below give the compiler an interface to access
+// all necessary information in a threadsafe fashion.
+
+class CompileRuntime
+{
+ JSRuntime* runtime();
+
+ public:
+ static CompileRuntime* get(JSRuntime* rt);
+
+ bool onMainThread();
+
+ js::PerThreadData* mainThread();
+
+ // &runtime()->jitTop
+ const void* addressOfJitTop();
+
+ // &runtime()->jitActivation
+ const void* addressOfJitActivation();
+
+ // &runtime()->profilingActivation
+ const void* addressOfProfilingActivation();
+
+ // rt->runtime()->jitStackLimit;
+ const void* addressOfJitStackLimit();
+
+#ifdef DEBUG
+ // rt->runtime()->addressOfIonBailAfter;
+ const void* addressOfIonBailAfter();
+#endif
+
+ // &runtime()->activation_
+ const void* addressOfActivation();
+
+#ifdef JS_GC_ZEAL
+ const void* addressOfGCZealModeBits();
+#endif
+
+ const void* addressOfInterruptUint32();
+
+ // We have to bake JSContext* into JIT code, but this pointer shouldn't be
+ // used/dereferenced on the background thread so we return it as void*.
+ const void* getJSContext();
+
+ const JitRuntime* jitRuntime();
+
+ // Compilation does not occur off thread when the SPS profiler is enabled.
+ SPSProfiler& spsProfiler();
+
+ bool jitSupportsFloatingPoint();
+ bool hadOutOfMemory();
+ bool profilingScripts();
+
+ const JSAtomState& names();
+ const PropertyName* emptyString();
+ const StaticStrings& staticStrings();
+ const Value& NaNValue();
+ const Value& positiveInfinityValue();
+ const WellKnownSymbols& wellKnownSymbols();
+
+#ifdef DEBUG
+ bool isInsideNursery(gc::Cell* cell);
+#endif
+
+ // DOM callbacks must be threadsafe (and will hopefully be removed soon).
+ const DOMCallbacks* DOMcallbacks();
+
+ const Nursery& gcNursery();
+ void setMinorGCShouldCancelIonCompilations();
+
+ bool runtimeMatches(JSRuntime* rt);
+};
+
+class CompileZone
+{
+ Zone* zone();
+
+ public:
+ static CompileZone* get(Zone* zone);
+
+ const void* addressOfNeedsIncrementalBarrier();
+
+ const void* addressOfFreeList(gc::AllocKind allocKind);
+};
+
+class JitCompartment;
+
+class CompileCompartment
+{
+ JSCompartment* compartment();
+
+ public:
+ static CompileCompartment* get(JSCompartment* comp);
+
+ CompileZone* zone();
+ CompileRuntime* runtime();
+
+ const void* addressOfEnumerators();
+ const void* addressOfRandomNumberGenerator();
+ const void* addressOfLastCachedNativeIterator();
+
+ const JitCompartment* jitCompartment();
+
+ const GlobalObject* maybeGlobal();
+
+ bool hasAllocationMetadataBuilder();
+
+ // Mirror CompartmentOptions.
+ void setSingletonsAsValues();
+};
+
+class JitCompileOptions
+{
+ public:
+ JitCompileOptions();
+ explicit JitCompileOptions(JSContext* cx);
+
+ bool cloneSingletons() const {
+ return cloneSingletons_;
+ }
+
+ bool spsSlowAssertionsEnabled() const {
+ return spsSlowAssertionsEnabled_;
+ }
+
+ bool offThreadCompilationAvailable() const {
+ return offThreadCompilationAvailable_;
+ }
+
+ private:
+ bool cloneSingletons_;
+ bool spsSlowAssertionsEnabled_;
+ bool offThreadCompilationAvailable_;
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_CompileWrappers_h
diff --git a/js/src/jit/Disassembler.cpp b/js/src/jit/Disassembler.cpp
new file mode 100644
index 000000000..371e24205
--- /dev/null
+++ b/js/src/jit/Disassembler.cpp
@@ -0,0 +1,64 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Disassembler.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::jit::Disassembler;
+
+#ifdef DEBUG
+bool
+Disassembler::ComplexAddress::operator==(const ComplexAddress& other) const
+{
+ return base_ == other.base_ &&
+ index_ == other.index_ &&
+ scale_ == other.scale_ &&
+ disp_ == other.disp_ &&
+ isPCRelative_ == other.isPCRelative_;
+}
+
+bool
+Disassembler::ComplexAddress::operator!=(const ComplexAddress& other) const
+{
+ return !operator==(other);
+}
+
+bool
+Disassembler::OtherOperand::operator==(const OtherOperand& other) const
+{
+ if (kind_ != other.kind_)
+ return false;
+ switch (kind_) {
+ case Imm: return u_.imm == other.u_.imm;
+ case GPR: return u_.gpr == other.u_.gpr;
+ case FPR: return u_.fpr == other.u_.fpr;
+ }
+ MOZ_CRASH("Unexpected OtherOperand kind");
+}
+
+bool
+Disassembler::OtherOperand::operator!=(const OtherOperand& other) const
+{
+ return !operator==(other);
+}
+
+bool
+Disassembler::HeapAccess::operator==(const HeapAccess& other) const
+{
+ return kind_ == other.kind_ &&
+ size_ == other.size_ &&
+ address_ == other.address_ &&
+ otherOperand_ == other.otherOperand_;
+}
+
+bool
+Disassembler::HeapAccess::operator!=(const HeapAccess& other) const
+{
+ return !operator==(other);
+}
+
+#endif
diff --git a/js/src/jit/Disassembler.h b/js/src/jit/Disassembler.h
new file mode 100644
index 000000000..101b78968
--- /dev/null
+++ b/js/src/jit/Disassembler.h
@@ -0,0 +1,278 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Disassembler_h
+#define jit_Disassembler_h
+
+#include "jit/MacroAssembler.h"
+#include "jit/Registers.h"
+
+namespace js {
+namespace jit {
+
+namespace Disassembler {
+
+class ComplexAddress {
+ int32_t disp_;
+ Register::Encoding base_ : 8;
+ Register::Encoding index_ : 8;
+ int8_t scale_; // log2 encoding
+ bool isPCRelative_;
+
+ public:
+ ComplexAddress()
+ : disp_(0),
+ base_(Registers::Invalid),
+ index_(Registers::Invalid),
+ scale_(0),
+ isPCRelative_(false)
+ {
+ MOZ_ASSERT(*this == *this);
+ }
+
+ ComplexAddress(int32_t disp, Register::Encoding base)
+ : disp_(disp),
+ base_(base),
+ index_(Registers::Invalid),
+ scale_(0),
+ isPCRelative_(false)
+ {
+ MOZ_ASSERT(*this == *this);
+ MOZ_ASSERT(base != Registers::Invalid);
+ MOZ_ASSERT(base_ == base);
+ }
+
+ ComplexAddress(int32_t disp, Register::Encoding base, Register::Encoding index, int scale)
+ : disp_(disp),
+ base_(base),
+ index_(index),
+ scale_(scale),
+ isPCRelative_(false)
+ {
+ MOZ_ASSERT(scale >= 0 && scale < 4);
+ MOZ_ASSERT_IF(index == Registers::Invalid, scale == 0);
+ MOZ_ASSERT(*this == *this);
+ MOZ_ASSERT(base_ == base);
+ MOZ_ASSERT(index_ == index);
+ }
+
+ explicit ComplexAddress(const void* addr)
+ : disp_(static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr))),
+ base_(Registers::Invalid),
+ index_(Registers::Invalid),
+ scale_(0),
+ isPCRelative_(false)
+ {
+ MOZ_ASSERT(*this == *this);
+ MOZ_ASSERT(reinterpret_cast<const void*>(uintptr_t(disp_)) == addr);
+ }
+
+ explicit ComplexAddress(const Operand& op) {
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ *this = ComplexAddress(op.disp(), op.base());
+ return;
+ case Operand::MEM_SCALE:
+ *this = ComplexAddress(op.disp(), op.base(), op.index(), op.scale());
+ return;
+ case Operand::MEM_ADDRESS32:
+ *this = ComplexAddress(op.address());
+ return;
+ default:
+ break;
+ }
+#endif
+ MOZ_CRASH("Unexpected Operand kind");
+ }
+
+ bool isPCRelative() const {
+ return isPCRelative_;
+ }
+
+ int32_t disp() const {
+ return disp_;
+ }
+
+ bool hasBase() const {
+ return base_ != Registers::Invalid;
+ }
+
+ Register::Encoding base() const {
+ MOZ_ASSERT(hasBase());
+ return base_;
+ }
+
+ bool hasIndex() const {
+ return index_ != Registers::Invalid;
+ }
+
+ Register::Encoding index() const {
+ MOZ_ASSERT(hasIndex());
+ return index_;
+ }
+
+ uint32_t scale() const {
+ return scale_;
+ }
+
+#ifdef DEBUG
+ bool operator==(const ComplexAddress& other) const;
+ bool operator!=(const ComplexAddress& other) const;
+#endif
+};
+
+// An operand other than a memory operand -- a register or an immediate.
+class OtherOperand {
+ public:
+ enum Kind {
+ Imm,
+ GPR,
+ FPR,
+ };
+
+ private:
+ Kind kind_;
+ union {
+ int32_t imm;
+ Register::Encoding gpr;
+ FloatRegister::Encoding fpr;
+ } u_;
+
+ public:
+ OtherOperand()
+ : kind_(Imm)
+ {
+ u_.imm = 0;
+ MOZ_ASSERT(*this == *this);
+ }
+
+ explicit OtherOperand(int32_t imm)
+ : kind_(Imm)
+ {
+ u_.imm = imm;
+ MOZ_ASSERT(*this == *this);
+ }
+
+ explicit OtherOperand(Register::Encoding gpr)
+ : kind_(GPR)
+ {
+ u_.gpr = gpr;
+ MOZ_ASSERT(*this == *this);
+ }
+
+ explicit OtherOperand(FloatRegister::Encoding fpr)
+ : kind_(FPR)
+ {
+ u_.fpr = fpr;
+ MOZ_ASSERT(*this == *this);
+ }
+
+ Kind kind() const {
+ return kind_;
+ }
+
+ int32_t imm() const {
+ MOZ_ASSERT(kind_ == Imm);
+ return u_.imm;
+ }
+
+ Register::Encoding gpr() const {
+ MOZ_ASSERT(kind_ == GPR);
+ return u_.gpr;
+ }
+
+ FloatRegister::Encoding fpr() const {
+ MOZ_ASSERT(kind_ == FPR);
+ return u_.fpr;
+ }
+
+#ifdef DEBUG
+ bool operator==(const OtherOperand& other) const;
+ bool operator!=(const OtherOperand& other) const;
+#endif
+};
+
+class HeapAccess {
+ public:
+ enum Kind {
+ Unknown,
+ Load, // any bits not covered by the load are zeroed
+ LoadSext32, // like Load, but sign-extend to 32 bits
+ LoadSext64, // like Load, but sign-extend to 64 bits
+ Store
+ };
+
+ private:
+ Kind kind_;
+ size_t size_; // The number of bytes of memory accessed
+ ComplexAddress address_;
+ OtherOperand otherOperand_;
+
+ public:
+ HeapAccess()
+ : kind_(Unknown),
+ size_(0)
+ {
+ MOZ_ASSERT(*this == *this);
+ }
+
+ HeapAccess(Kind kind, size_t size, const ComplexAddress& address, const OtherOperand& otherOperand)
+ : kind_(kind),
+ size_(size),
+ address_(address),
+ otherOperand_(otherOperand)
+ {
+ MOZ_ASSERT(kind != Unknown);
+ MOZ_ASSERT_IF(kind == LoadSext32, otherOperand.kind() != OtherOperand::FPR);
+ MOZ_ASSERT_IF(kind == Load || kind == LoadSext32, otherOperand.kind() != OtherOperand::Imm);
+ MOZ_ASSERT(*this == *this);
+ }
+
+ Kind kind() const {
+ return kind_;
+ }
+
+ size_t size() const {
+ MOZ_ASSERT(kind_ != Unknown);
+ return size_;
+ }
+
+ const ComplexAddress& address() const {
+ return address_;
+ }
+
+ const OtherOperand& otherOperand() const {
+ return otherOperand_;
+ }
+
+#ifdef DEBUG
+ bool operator==(const HeapAccess& other) const;
+ bool operator!=(const HeapAccess& other) const;
+#endif
+};
+
+MOZ_COLD uint8_t* DisassembleHeapAccess(uint8_t* ptr, HeapAccess* access);
+
+#ifdef DEBUG
+void DumpHeapAccess(const HeapAccess& access);
+
+inline void
+VerifyHeapAccess(uint8_t* begin, uint8_t* end, const HeapAccess& expected)
+{
+ HeapAccess disassembled;
+ uint8_t* e = DisassembleHeapAccess(begin, &disassembled);
+ MOZ_ASSERT(e == end);
+ MOZ_ASSERT(disassembled == expected);
+}
+#endif
+
+} // namespace Disassembler
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Disassembler_h */
diff --git a/js/src/jit/EagerSimdUnbox.cpp b/js/src/jit/EagerSimdUnbox.cpp
new file mode 100644
index 000000000..0b93d145d
--- /dev/null
+++ b/js/src/jit/EagerSimdUnbox.cpp
@@ -0,0 +1,128 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/EagerSimdUnbox.h"
+
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+
+namespace js {
+namespace jit {
+
+// Do not optimize any Phi instruction which has conflicting Unbox operations,
+// as this might imply some intended polymorphism.
+static bool
+CanUnboxSimdPhi(const JitCompartment* jitCompartment, MPhi* phi, SimdType unboxType)
+{
+ MOZ_ASSERT(phi->type() == MIRType::Object);
+
+ // If we are unboxing, we are more than likely to have boxed this SIMD type
+ // once in baseline, otherwise, we cannot create a MSimdBox as we have no
+ // template object to use.
+ if (!jitCompartment->maybeGetSimdTemplateObjectFor(unboxType))
+ return false;
+
+ MResumePoint* entry = phi->block()->entryResumePoint();
+ MIRType mirType = SimdTypeToMIRType(unboxType);
+ for (MUseIterator i(phi->usesBegin()), e(phi->usesEnd()); i != e; i++) {
+ // If we cannot recover the Simd object at the entry of the basic block,
+ // then we would have to box the content anyways.
+ if ((*i)->consumer() == entry && !entry->isRecoverableOperand(*i))
+ return false;
+
+ if (!(*i)->consumer()->isDefinition())
+ continue;
+
+ MDefinition* def = (*i)->consumer()->toDefinition();
+ if (def->isSimdUnbox() && def->toSimdUnbox()->type() != mirType)
+ return false;
+ }
+
+ return true;
+}
+
+static void
+UnboxSimdPhi(const JitCompartment* jitCompartment, MIRGraph& graph, MPhi* phi, SimdType unboxType)
+{
+ TempAllocator& alloc = graph.alloc();
+
+ // Unbox and replace all operands.
+ for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
+ MDefinition* op = phi->getOperand(i);
+ MSimdUnbox* unbox = MSimdUnbox::New(alloc, op, unboxType);
+ op->block()->insertAtEnd(unbox);
+ phi->replaceOperand(i, unbox);
+ }
+
+ // Change the MIRType of the Phi.
+ MIRType mirType = SimdTypeToMIRType(unboxType);
+ phi->setResultType(mirType);
+
+ MBasicBlock* phiBlock = phi->block();
+ MInstruction* atRecover = phiBlock->safeInsertTop(nullptr, MBasicBlock::IgnoreRecover);
+ MInstruction* at = phiBlock->safeInsertTop(atRecover);
+
+ // Note, we capture the uses-list now, as new instructions are not visited.
+ MUseIterator i(phi->usesBegin()), e(phi->usesEnd());
+
+ // Add a MSimdBox, and replace all the Phi uses with it.
+ JSObject* templateObject = jitCompartment->maybeGetSimdTemplateObjectFor(unboxType);
+ InlineTypedObject* inlineTypedObject = &templateObject->as<InlineTypedObject>();
+ MSimdBox* recoverBox = MSimdBox::New(alloc, nullptr, phi, inlineTypedObject, unboxType, gc::DefaultHeap);
+ recoverBox->setRecoveredOnBailout();
+ phiBlock->insertBefore(atRecover, recoverBox);
+
+ MSimdBox* box = nullptr;
+ while (i != e) {
+ MUse* use = *i++;
+ MNode* ins = use->consumer();
+
+ if ((ins->isDefinition() && ins->toDefinition()->isRecoveredOnBailout()) ||
+ (ins->isResumePoint() && ins->toResumePoint()->isRecoverableOperand(use)))
+ {
+ use->replaceProducer(recoverBox);
+ continue;
+ }
+
+ if (!box) {
+ box = MSimdBox::New(alloc, nullptr, phi, inlineTypedObject, unboxType, gc::DefaultHeap);
+ phiBlock->insertBefore(at, box);
+ }
+
+ use->replaceProducer(box);
+ }
+}
+
+bool
+EagerSimdUnbox(MIRGenerator* mir, MIRGraph& graph)
+{
+ const JitCompartment* jitCompartment = GetJitContext()->compartment->jitCompartment();
+ for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
+ if (mir->shouldCancel("Eager Simd Unbox"))
+ return false;
+
+ for (MInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
+ if (!ins->isSimdUnbox())
+ continue;
+
+ MSimdUnbox* unbox = ins->toSimdUnbox();
+ if (!unbox->input()->isPhi())
+ continue;
+
+ MPhi* phi = unbox->input()->toPhi();
+ if (!CanUnboxSimdPhi(jitCompartment, phi, unbox->simdType()))
+ continue;
+
+ UnboxSimdPhi(jitCompartment, graph, phi, unbox->simdType());
+ }
+ }
+
+ return true;
+}
+
+} /* namespace jit */
+} /* namespace js */
diff --git a/js/src/jit/EagerSimdUnbox.h b/js/src/jit/EagerSimdUnbox.h
new file mode 100644
index 000000000..382662422
--- /dev/null
+++ b/js/src/jit/EagerSimdUnbox.h
@@ -0,0 +1,25 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// This file declares eager SIMD unboxing.
+#ifndef jit_EagerSimdUnbox_h
+#define jit_EagerSimdUnbox_h
+
+#include "mozilla/Attributes.h"
+
+namespace js {
+namespace jit {
+
+class MIRGenerator;
+class MIRGraph;
+
+MOZ_MUST_USE bool
+EagerSimdUnbox(MIRGenerator* mir, MIRGraph& graph);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_EagerSimdUnbox_h */
diff --git a/js/src/jit/EdgeCaseAnalysis.cpp b/js/src/jit/EdgeCaseAnalysis.cpp
new file mode 100644
index 000000000..b7d73973a
--- /dev/null
+++ b/js/src/jit/EdgeCaseAnalysis.cpp
@@ -0,0 +1,47 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/EdgeCaseAnalysis.h"
+
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+
+using namespace js;
+using namespace js::jit;
+
+EdgeCaseAnalysis::EdgeCaseAnalysis(MIRGenerator* mir, MIRGraph& graph)
+ : mir(mir), graph(graph)
+{
+}
+
+bool
+EdgeCaseAnalysis::analyzeLate()
+{
+ // Renumber definitions for NeedNegativeZeroCheck under analyzeEdgeCasesBackward.
+ uint32_t nextId = 0;
+
+ for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
+ for (MDefinitionIterator iter(*block); iter; iter++) {
+ if (mir->shouldCancel("Analyze Late (first loop)"))
+ return false;
+
+ iter->setId(nextId++);
+ iter->analyzeEdgeCasesForward();
+ }
+ block->lastIns()->setId(nextId++);
+ }
+
+ for (PostorderIterator block(graph.poBegin()); block != graph.poEnd(); block++) {
+ for (MInstructionReverseIterator riter(block->rbegin()); riter != block->rend(); riter++) {
+ if (mir->shouldCancel("Analyze Late (second loop)"))
+ return false;
+
+ riter->analyzeEdgeCasesBackward();
+ }
+ }
+
+ return true;
+}
diff --git a/js/src/jit/EdgeCaseAnalysis.h b/js/src/jit/EdgeCaseAnalysis.h
new file mode 100644
index 000000000..d35d95978
--- /dev/null
+++ b/js/src/jit/EdgeCaseAnalysis.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_EdgeCaseAnalysis_h
+#define jit_EdgeCaseAnalysis_h
+
+#include "jit/MIRGenerator.h"
+
+namespace js {
+namespace jit {
+
+class MIRGraph;
+
+class EdgeCaseAnalysis
+{
+ MIRGenerator* mir;
+ MIRGraph& graph;
+
+ public:
+ EdgeCaseAnalysis(MIRGenerator* mir, MIRGraph& graph);
+ MOZ_MUST_USE bool analyzeLate();
+};
+
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_EdgeCaseAnalysis_h */
diff --git a/js/src/jit/EffectiveAddressAnalysis.cpp b/js/src/jit/EffectiveAddressAnalysis.cpp
new file mode 100644
index 000000000..a17da8747
--- /dev/null
+++ b/js/src/jit/EffectiveAddressAnalysis.cpp
@@ -0,0 +1,277 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/EffectiveAddressAnalysis.h"
+#include "jit/IonAnalysis.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+
+using namespace js;
+using namespace jit;
+
+static void
+AnalyzeLsh(TempAllocator& alloc, MLsh* lsh)
+{
+ if (lsh->specialization() != MIRType::Int32)
+ return;
+
+ if (lsh->isRecoveredOnBailout())
+ return;
+
+ MDefinition* index = lsh->lhs();
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+
+ MConstant* shiftValue = lsh->rhs()->maybeConstantValue();
+ if (!shiftValue)
+ return;
+
+ if (shiftValue->type() != MIRType::Int32 || !IsShiftInScaleRange(shiftValue->toInt32()))
+ return;
+
+ Scale scale = ShiftToScale(shiftValue->toInt32());
+
+ int32_t displacement = 0;
+ MInstruction* last = lsh;
+ MDefinition* base = nullptr;
+ while (true) {
+ if (!last->hasOneUse())
+ break;
+
+ MUseIterator use = last->usesBegin();
+ if (!use->consumer()->isDefinition() || !use->consumer()->toDefinition()->isAdd())
+ break;
+
+ MAdd* add = use->consumer()->toDefinition()->toAdd();
+ if (add->specialization() != MIRType::Int32 || !add->isTruncated())
+ break;
+
+ MDefinition* other = add->getOperand(1 - add->indexOf(*use));
+
+ if (MConstant* otherConst = other->maybeConstantValue()) {
+ displacement += otherConst->toInt32();
+ } else {
+ if (base)
+ break;
+ base = other;
+ }
+
+ last = add;
+ if (last->isRecoveredOnBailout())
+ return;
+ }
+
+ if (!base) {
+ uint32_t elemSize = 1 << ScaleToShift(scale);
+ if (displacement % elemSize != 0)
+ return;
+
+ if (!last->hasOneUse())
+ return;
+
+ MUseIterator use = last->usesBegin();
+ if (!use->consumer()->isDefinition() || !use->consumer()->toDefinition()->isBitAnd())
+ return;
+
+ MBitAnd* bitAnd = use->consumer()->toDefinition()->toBitAnd();
+ if (bitAnd->isRecoveredOnBailout())
+ return;
+
+ MDefinition* other = bitAnd->getOperand(1 - bitAnd->indexOf(*use));
+ MConstant* otherConst = other->maybeConstantValue();
+ if (!otherConst || otherConst->type() != MIRType::Int32)
+ return;
+
+ uint32_t bitsClearedByShift = elemSize - 1;
+ uint32_t bitsClearedByMask = ~uint32_t(otherConst->toInt32());
+ if ((bitsClearedByShift & bitsClearedByMask) != bitsClearedByMask)
+ return;
+
+ bitAnd->replaceAllUsesWith(last);
+ return;
+ }
+
+ if (base->isRecoveredOnBailout())
+ return;
+
+ MEffectiveAddress* eaddr = MEffectiveAddress::New(alloc, base, index, scale, displacement);
+ last->replaceAllUsesWith(eaddr);
+ last->block()->insertAfter(last, eaddr);
+}
+
+// Transform:
+//
+// [AddI]
+// addl $9, %esi
+// [LoadUnboxedScalar]
+// movsd 0x0(%rbx,%rsi,8), %xmm4
+//
+// into:
+//
+// [LoadUnboxedScalar]
+// movsd 0x48(%rbx,%rsi,8), %xmm4
+//
+// This is possible when the AddI is only used by the LoadUnboxedScalar opcode.
+static void
+AnalyzeLoadUnboxedScalar(TempAllocator& alloc, MLoadUnboxedScalar* load)
+{
+ if (load->isRecoveredOnBailout())
+ return;
+
+ if (!load->getOperand(1)->isAdd())
+ return;
+
+ JitSpew(JitSpew_EAA, "analyze: %s%u", load->opName(), load->id());
+
+ MAdd* add = load->getOperand(1)->toAdd();
+
+ if (add->specialization() != MIRType::Int32 || !add->hasUses() ||
+ add->truncateKind() != MDefinition::TruncateKind::Truncate)
+ {
+ return;
+ }
+
+ MDefinition* lhs = add->lhs();
+ MDefinition* rhs = add->rhs();
+ MDefinition* constant = nullptr;
+ MDefinition* node = nullptr;
+
+ if (lhs->isConstant()) {
+ constant = lhs;
+ node = rhs;
+ } else if (rhs->isConstant()) {
+ constant = rhs;
+ node = lhs;
+ } else
+ return;
+
+ MOZ_ASSERT(constant->type() == MIRType::Int32);
+
+ size_t storageSize = Scalar::byteSize(load->storageType());
+ int32_t c1 = load->offsetAdjustment();
+ int32_t c2 = 0;
+ if (!SafeMul(constant->maybeConstantValue()->toInt32(), storageSize, &c2))
+ return;
+
+ int32_t offset = 0;
+ if (!SafeAdd(c1, c2, &offset))
+ return;
+
+ JitSpew(JitSpew_EAA, "set offset: %d + %d = %d on: %s%u", c1, c2, offset,
+ load->opName(), load->id());
+ load->setOffsetAdjustment(offset);
+ load->replaceOperand(1, node);
+
+ if (!add->hasLiveDefUses() && DeadIfUnused(add) && add->canRecoverOnBailout()) {
+ JitSpew(JitSpew_EAA, "mark as recovered on bailout: %s%u",
+ add->opName(), add->id());
+ add->setRecoveredOnBailoutUnchecked();
+ }
+}
+
+template<typename AsmJSMemoryAccess>
+bool
+EffectiveAddressAnalysis::tryAddDisplacement(AsmJSMemoryAccess* ins, int32_t o)
+{
+#ifdef WASM_HUGE_MEMORY
+ // Compute the new offset. Check for overflow.
+ uint32_t oldOffset = ins->offset();
+ uint32_t newOffset = oldOffset + o;
+ if (o < 0 ? (newOffset >= oldOffset) : (newOffset < oldOffset))
+ return false;
+
+ // The offset must ultimately be written into the offset immediate of a load
+ // or store instruction so don't allow folding of the offset is bigger.
+ if (newOffset >= wasm::OffsetGuardLimit)
+ return false;
+
+ // Everything checks out. This is the new offset.
+ ins->setOffset(newOffset);
+ return true;
+#else
+ return false;
+#endif
+}
+
+template<typename AsmJSMemoryAccess>
+void
+EffectiveAddressAnalysis::analyzeAsmJSHeapAccess(AsmJSMemoryAccess* ins)
+{
+ MDefinition* base = ins->base();
+
+ if (base->isConstant()) {
+ // Look for heap[i] where i is a constant offset, and fold the offset.
+ // By doing the folding now, we simplify the task of codegen; the offset
+ // is always the address mode immediate. This also allows it to avoid
+ // a situation where the sum of a constant pointer value and a non-zero
+ // offset doesn't actually fit into the address mode immediate.
+ int32_t imm = base->toConstant()->toInt32();
+ if (imm != 0 && tryAddDisplacement(ins, imm)) {
+ MInstruction* zero = MConstant::New(graph_.alloc(), Int32Value(0));
+ ins->block()->insertBefore(ins, zero);
+ ins->replaceBase(zero);
+ }
+
+ // If the index is within the minimum heap length, we can optimize
+ // away the bounds check.
+ if (imm >= 0) {
+ int32_t end = (uint32_t)imm + ins->byteSize();
+ if (end >= imm && (uint32_t)end <= mir_->minWasmHeapLength())
+ ins->removeBoundsCheck();
+ }
+ } else if (base->isAdd()) {
+ // Look for heap[a+i] where i is a constant offset, and fold the offset.
+ // Alignment masks have already been moved out of the way by the
+ // Alignment Mask Analysis pass.
+ MDefinition* op0 = base->toAdd()->getOperand(0);
+ MDefinition* op1 = base->toAdd()->getOperand(1);
+ if (op0->isConstant())
+ mozilla::Swap(op0, op1);
+ if (op1->isConstant()) {
+ int32_t imm = op1->toConstant()->toInt32();
+ if (tryAddDisplacement(ins, imm))
+ ins->replaceBase(op0);
+ }
+ }
+}
+
+// This analysis converts patterns of the form:
+// truncate(x + (y << {0,1,2,3}))
+// truncate(x + (y << {0,1,2,3}) + imm32)
+// into a single lea instruction, and patterns of the form:
+// asmload(x + imm32)
+// asmload(x << {0,1,2,3})
+// asmload((x << {0,1,2,3}) + imm32)
+// asmload((x << {0,1,2,3}) & mask) (where mask is redundant with shift)
+// asmload(((x << {0,1,2,3}) + imm32) & mask) (where mask is redundant with shift + imm32)
+// into a single asmload instruction (and for asmstore too).
+//
+// Additionally, we should consider the general forms:
+// truncate(x + y + imm32)
+// truncate((y << {0,1,2,3}) + imm32)
+bool
+EffectiveAddressAnalysis::analyze()
+{
+ for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
+ for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
+ if (!graph_.alloc().ensureBallast())
+ return false;
+
+ // Note that we don't check for MAsmJSCompareExchangeHeap
+ // or MAsmJSAtomicBinopHeap, because the backend and the OOB
+ // mechanism don't support non-zero offsets for them yet
+ // (TODO bug 1254935).
+ if (i->isLsh())
+ AnalyzeLsh(graph_.alloc(), i->toLsh());
+ else if (i->isLoadUnboxedScalar())
+ AnalyzeLoadUnboxedScalar(graph_.alloc(), i->toLoadUnboxedScalar());
+ else if (i->isAsmJSLoadHeap())
+ analyzeAsmJSHeapAccess(i->toAsmJSLoadHeap());
+ else if (i->isAsmJSStoreHeap())
+ analyzeAsmJSHeapAccess(i->toAsmJSStoreHeap());
+ }
+ }
+ return true;
+}
diff --git a/js/src/jit/EffectiveAddressAnalysis.h b/js/src/jit/EffectiveAddressAnalysis.h
new file mode 100644
index 000000000..fd53d090e
--- /dev/null
+++ b/js/src/jit/EffectiveAddressAnalysis.h
@@ -0,0 +1,39 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_EffectiveAddressAnalysis_h
+#define jit_EffectiveAddressAnalysis_h
+
+#include "jit/MIRGenerator.h"
+
+namespace js {
+namespace jit {
+
+class MIRGraph;
+
+class EffectiveAddressAnalysis
+{
+ MIRGenerator* mir_;
+ MIRGraph& graph_;
+
+ template <typename AsmJSMemoryAccess>
+ MOZ_MUST_USE bool tryAddDisplacement(AsmJSMemoryAccess* ins, int32_t o);
+
+ template <typename AsmJSMemoryAccess>
+ void analyzeAsmJSHeapAccess(AsmJSMemoryAccess* ins);
+
+ public:
+ EffectiveAddressAnalysis(MIRGenerator* mir, MIRGraph& graph)
+ : mir_(mir), graph_(graph)
+ {}
+
+ MOZ_MUST_USE bool analyze();
+};
+
+} /* namespace jit */
+} /* namespace js */
+
+#endif /* jit_EffectiveAddressAnalysis_h */
diff --git a/js/src/jit/ExecutableAllocator.cpp b/js/src/jit/ExecutableAllocator.cpp
new file mode 100644
index 000000000..20ca74183
--- /dev/null
+++ b/js/src/jit/ExecutableAllocator.cpp
@@ -0,0 +1,390 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "jit/ExecutableAllocator.h"
+
+#include "jit/JitCompartment.h"
+#include "js/MemoryMetrics.h"
+
+using namespace js::jit;
+
+ExecutablePool::~ExecutablePool()
+{
+ MOZ_ASSERT(m_ionCodeBytes == 0);
+ MOZ_ASSERT(m_baselineCodeBytes == 0);
+ MOZ_ASSERT(m_regexpCodeBytes == 0);
+ MOZ_ASSERT(m_otherCodeBytes == 0);
+
+ MOZ_ASSERT(!isMarked());
+
+ m_allocator->releasePoolPages(this);
+}
+
+void
+ExecutablePool::release(bool willDestroy)
+{
+ MOZ_ASSERT(m_refCount != 0);
+ MOZ_ASSERT_IF(willDestroy, m_refCount == 1);
+ if (--m_refCount == 0)
+ js_delete(this);
+}
+
+void
+ExecutablePool::release(size_t n, CodeKind kind)
+{
+ switch (kind) {
+ case ION_CODE:
+ m_ionCodeBytes -= n;
+ MOZ_ASSERT(m_ionCodeBytes < m_allocation.size); // Shouldn't underflow.
+ break;
+ case BASELINE_CODE:
+ m_baselineCodeBytes -= n;
+ MOZ_ASSERT(m_baselineCodeBytes < m_allocation.size);
+ break;
+ case REGEXP_CODE:
+ m_regexpCodeBytes -= n;
+ MOZ_ASSERT(m_regexpCodeBytes < m_allocation.size);
+ break;
+ case OTHER_CODE:
+ m_otherCodeBytes -= n;
+ MOZ_ASSERT(m_otherCodeBytes < m_allocation.size);
+ break;
+ default:
+ MOZ_CRASH("bad code kind");
+ }
+
+ release();
+}
+
+void
+ExecutablePool::addRef()
+{
+ // It should be impossible for us to roll over, because only small
+ // pools have multiple holders, and they have one holder per chunk
+ // of generated code, and they only hold 16KB or so of code.
+ MOZ_ASSERT(m_refCount);
+ ++m_refCount;
+ MOZ_ASSERT(m_refCount, "refcount overflow");
+}
+
+void*
+ExecutablePool::alloc(size_t n, CodeKind kind)
+{
+ MOZ_ASSERT(n <= available());
+ void* result = m_freePtr;
+ m_freePtr += n;
+
+ switch (kind) {
+ case ION_CODE: m_ionCodeBytes += n; break;
+ case BASELINE_CODE: m_baselineCodeBytes += n; break;
+ case REGEXP_CODE: m_regexpCodeBytes += n; break;
+ case OTHER_CODE: m_otherCodeBytes += n; break;
+ default: MOZ_CRASH("bad code kind");
+ }
+
+ return result;
+}
+
+size_t
+ExecutablePool::available() const
+{
+ MOZ_ASSERT(m_end >= m_freePtr);
+ return m_end - m_freePtr;
+}
+
+ExecutableAllocator::ExecutableAllocator(JSRuntime* rt)
+ : rt_(rt)
+{
+ MOZ_ASSERT(m_smallPools.empty());
+}
+
+ExecutableAllocator::~ExecutableAllocator()
+{
+ for (size_t i = 0; i < m_smallPools.length(); i++)
+ m_smallPools[i]->release(/* willDestroy = */true);
+
+ // If this asserts we have a pool leak.
+ MOZ_ASSERT_IF(m_pools.initialized(), m_pools.empty());
+}
+
+ExecutablePool*
+ExecutableAllocator::poolForSize(size_t n)
+{
+ // Try to fit in an existing small allocator. Use the pool with the
+ // least available space that is big enough (best-fit). This is the
+ // best strategy because (a) it maximizes the chance of the next
+ // allocation fitting in a small pool, and (b) it minimizes the
+ // potential waste when a small pool is next abandoned.
+ ExecutablePool* minPool = nullptr;
+ for (size_t i = 0; i < m_smallPools.length(); i++) {
+ ExecutablePool* pool = m_smallPools[i];
+ if (n <= pool->available() && (!minPool || pool->available() < minPool->available()))
+ minPool = pool;
+ }
+ if (minPool) {
+ minPool->addRef();
+ return minPool;
+ }
+
+ // If the request is large, we just provide a unshared allocator
+ if (n > ExecutableCodePageSize)
+ return createPool(n);
+
+ // Create a new allocator
+ ExecutablePool* pool = createPool(ExecutableCodePageSize);
+ if (!pool)
+ return nullptr;
+ // At this point, local |pool| is the owner.
+
+ if (m_smallPools.length() < maxSmallPools) {
+ // We haven't hit the maximum number of live pools; add the new pool.
+ // If append() OOMs, we just return an unshared allocator.
+ if (m_smallPools.append(pool))
+ pool->addRef();
+ } else {
+ // Find the pool with the least space.
+ int iMin = 0;
+ for (size_t i = 1; i < m_smallPools.length(); i++) {
+ if (m_smallPools[i]->available() < m_smallPools[iMin]->available())
+ iMin = i;
+ }
+
+ // If the new allocator will result in more free space than the small
+ // pool with the least space, then we will use it instead
+ ExecutablePool* minPool = m_smallPools[iMin];
+ if ((pool->available() - n) > minPool->available()) {
+ minPool->release();
+ m_smallPools[iMin] = pool;
+ pool->addRef();
+ }
+ }
+
+ // Pass ownership to the caller.
+ return pool;
+}
+
+/* static */ size_t
+ExecutableAllocator::roundUpAllocationSize(size_t request, size_t granularity)
+{
+ // Something included via windows.h defines a macro with this name,
+ // which causes the function below to fail to compile.
+#ifdef _MSC_VER
+# undef max
+#endif
+
+ if ((std::numeric_limits<size_t>::max() - granularity) <= request)
+ return OVERSIZE_ALLOCATION;
+
+ // Round up to next page boundary
+ size_t size = request + (granularity - 1);
+ size = size & ~(granularity - 1);
+ MOZ_ASSERT(size >= request);
+ return size;
+}
+
+ExecutablePool*
+ExecutableAllocator::createPool(size_t n)
+{
+ MOZ_ASSERT(rt_->jitRuntime()->preventBackedgePatching());
+
+ size_t allocSize = roundUpAllocationSize(n, ExecutableCodePageSize);
+ if (allocSize == OVERSIZE_ALLOCATION)
+ return nullptr;
+
+ if (!m_pools.initialized() && !m_pools.init())
+ return nullptr;
+
+ ExecutablePool::Allocation a = systemAlloc(allocSize);
+ if (!a.pages)
+ return nullptr;
+
+ ExecutablePool* pool = js_new<ExecutablePool>(this, a);
+ if (!pool) {
+ systemRelease(a);
+ return nullptr;
+ }
+
+ if (!m_pools.put(pool)) {
+ // Note: this will call |systemRelease(a)|.
+ js_delete(pool);
+ return nullptr;
+ }
+
+ return pool;
+}
+
+void*
+ExecutableAllocator::alloc(size_t n, ExecutablePool** poolp, CodeKind type)
+{
+ // Don't race with reprotectAll called from the signal handler.
+ JitRuntime::AutoPreventBackedgePatching apbp(rt_);
+
+ // Caller must ensure 'n' is word-size aligned. If all allocations are
+ // of word sized quantities, then all subsequent allocations will be
+ // aligned.
+ MOZ_ASSERT(roundUpAllocationSize(n, sizeof(void*)) == n);
+
+ if (n == OVERSIZE_ALLOCATION) {
+ *poolp = nullptr;
+ return nullptr;
+ }
+
+ *poolp = poolForSize(n);
+ if (!*poolp)
+ return nullptr;
+
+ // This alloc is infallible because poolForSize() just obtained
+ // (found, or created if necessary) a pool that had enough space.
+ void* result = (*poolp)->alloc(n, type);
+ MOZ_ASSERT(result);
+ return result;
+}
+
+void
+ExecutableAllocator::releasePoolPages(ExecutablePool* pool)
+{
+ // Don't race with reprotectAll called from the signal handler.
+ JitRuntime::AutoPreventBackedgePatching apbp(rt_);
+
+ MOZ_ASSERT(pool->m_allocation.pages);
+ systemRelease(pool->m_allocation);
+
+ MOZ_ASSERT(m_pools.initialized());
+
+ // Pool may not be present in m_pools if we hit OOM during creation.
+ if (auto ptr = m_pools.lookup(pool))
+ m_pools.remove(ptr);
+}
+
+void
+ExecutableAllocator::purge()
+{
+ // Don't race with reprotectAll called from the signal handler.
+ JitRuntime::AutoPreventBackedgePatching apbp(rt_);
+
+ for (size_t i = 0; i < m_smallPools.length(); i++)
+ m_smallPools[i]->release();
+ m_smallPools.clear();
+}
+
+void
+ExecutableAllocator::addSizeOfCode(JS::CodeSizes* sizes) const
+{
+ if (m_pools.initialized()) {
+ for (ExecPoolHashSet::Range r = m_pools.all(); !r.empty(); r.popFront()) {
+ ExecutablePool* pool = r.front();
+ sizes->ion += pool->m_ionCodeBytes;
+ sizes->baseline += pool->m_baselineCodeBytes;
+ sizes->regexp += pool->m_regexpCodeBytes;
+ sizes->other += pool->m_otherCodeBytes;
+ sizes->unused += pool->m_allocation.size - pool->m_ionCodeBytes
+ - pool->m_baselineCodeBytes
+ - pool->m_regexpCodeBytes
+ - pool->m_otherCodeBytes;
+ }
+ }
+}
+
+void
+ExecutableAllocator::reprotectAll(ProtectionSetting protection)
+{
+ if (!m_pools.initialized())
+ return;
+
+ for (ExecPoolHashSet::Range r = m_pools.all(); !r.empty(); r.popFront())
+ reprotectPool(rt_, r.front(), protection);
+}
+
+/* static */ void
+ExecutableAllocator::reprotectPool(JSRuntime* rt, ExecutablePool* pool, ProtectionSetting protection)
+{
+ // Don't race with reprotectAll called from the signal handler.
+ MOZ_ASSERT(rt->jitRuntime()->preventBackedgePatching() || rt->handlingJitInterrupt());
+
+ char* start = pool->m_allocation.pages;
+ if (!ReprotectRegion(start, pool->m_freePtr - start, protection))
+ MOZ_CRASH();
+}
+
+/* static */ void
+ExecutableAllocator::poisonCode(JSRuntime* rt, JitPoisonRangeVector& ranges)
+{
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+
+ // Don't race with reprotectAll called from the signal handler.
+ JitRuntime::AutoPreventBackedgePatching apbp(rt);
+
+#ifdef DEBUG
+ // Make sure no pools have the mark bit set.
+ for (size_t i = 0; i < ranges.length(); i++)
+ MOZ_ASSERT(!ranges[i].pool->isMarked());
+#endif
+
+ for (size_t i = 0; i < ranges.length(); i++) {
+ ExecutablePool* pool = ranges[i].pool;
+ if (pool->m_refCount == 1) {
+ // This is the last reference so the release() call below will
+ // unmap the memory. Don't bother poisoning it.
+ continue;
+ }
+
+ MOZ_ASSERT(pool->m_refCount > 1);
+
+ // Use the pool's mark bit to indicate we made the pool writable.
+ // This avoids reprotecting a pool multiple times.
+ if (!pool->isMarked()) {
+ reprotectPool(rt, pool, ProtectionSetting::Writable);
+ pool->mark();
+ }
+
+ memset(ranges[i].start, JS_SWEPT_CODE_PATTERN, ranges[i].size);
+ }
+
+ // Make the pools executable again and drop references.
+ for (size_t i = 0; i < ranges.length(); i++) {
+ ExecutablePool* pool = ranges[i].pool;
+ if (pool->isMarked()) {
+ reprotectPool(rt, pool, ProtectionSetting::Executable);
+ pool->unmark();
+ }
+ pool->release();
+ }
+}
+
+ExecutablePool::Allocation
+ExecutableAllocator::systemAlloc(size_t n)
+{
+ void* allocation = AllocateExecutableMemory(n, ProtectionSetting::Executable);
+ ExecutablePool::Allocation alloc = { reinterpret_cast<char*>(allocation), n };
+ return alloc;
+}
+
+void
+ExecutableAllocator::systemRelease(const ExecutablePool::Allocation& alloc)
+{
+ DeallocateExecutableMemory(alloc.pages, alloc.size);
+}
diff --git a/js/src/jit/ExecutableAllocator.h b/js/src/jit/ExecutableAllocator.h
new file mode 100644
index 000000000..30eccd12e
--- /dev/null
+++ b/js/src/jit/ExecutableAllocator.h
@@ -0,0 +1,330 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef jit_ExecutableAllocator_h
+#define jit_ExecutableAllocator_h
+
+#include "mozilla/Maybe.h"
+#include "mozilla/XorShift128PlusRNG.h"
+
+#include <limits>
+#include <stddef.h> // for ptrdiff_t
+
+#include "jsalloc.h"
+
+#ifdef JS_CODEGEN_ARM
+#include "jit/arm/Architecture-arm.h"
+#endif
+#include "jit/arm/Simulator-arm.h"
+#include "jit/mips32/Simulator-mips32.h"
+#include "jit/mips64/Simulator-mips64.h"
+#include "jit/ProcessExecutableMemory.h"
+#include "js/GCAPI.h"
+#include "js/HashTable.h"
+#include "js/Vector.h"
+
+#ifdef JS_CPU_SPARC
+#ifdef __linux__ // bugzilla 502369
+static void sync_instruction_memory(caddr_t v, u_int len)
+{
+ caddr_t end = v + len;
+ caddr_t p = v;
+ while (p < end) {
+ asm("flush %0" : : "r" (p));
+ p += 32;
+ }
+}
+#else
+extern "C" void sync_instruction_memory(caddr_t v, u_int len);
+#endif
+#endif
+
+#if defined(__linux__) && \
+ (defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)) && \
+ (!defined(JS_SIMULATOR_MIPS32) && !defined(JS_SIMULATOR_MIPS64))
+#include <sys/cachectl.h>
+#endif
+
+#if defined(JS_CODEGEN_ARM) && defined(XP_IOS)
+#include <libkern/OSCacheControl.h>
+#endif
+
+namespace JS {
+ struct CodeSizes;
+} // namespace JS
+
+namespace js {
+namespace jit {
+
+enum CodeKind { ION_CODE = 0, BASELINE_CODE, REGEXP_CODE, OTHER_CODE };
+
+class ExecutableAllocator;
+class JitRuntime;
+
+// These are reference-counted. A new one starts with a count of 1.
+class ExecutablePool
+{
+ friend class ExecutableAllocator;
+
+ private:
+ struct Allocation {
+ char* pages;
+ size_t size;
+ };
+
+ ExecutableAllocator* m_allocator;
+ char* m_freePtr;
+ char* m_end;
+ Allocation m_allocation;
+
+ // Reference count for automatic reclamation.
+ unsigned m_refCount:31;
+
+ // Flag that can be used by algorithms operating on pools.
+ bool m_mark:1;
+
+ // Number of bytes currently used for Method and Regexp JIT code.
+ size_t m_ionCodeBytes;
+ size_t m_baselineCodeBytes;
+ size_t m_regexpCodeBytes;
+ size_t m_otherCodeBytes;
+
+ public:
+ void release(bool willDestroy = false);
+ void release(size_t n, CodeKind kind);
+
+ void addRef();
+
+ ExecutablePool(ExecutableAllocator* allocator, Allocation a)
+ : m_allocator(allocator), m_freePtr(a.pages), m_end(m_freePtr + a.size), m_allocation(a),
+ m_refCount(1), m_mark(false), m_ionCodeBytes(0), m_baselineCodeBytes(0),
+ m_regexpCodeBytes(0), m_otherCodeBytes(0)
+ { }
+
+ ~ExecutablePool();
+
+ void mark() {
+ MOZ_ASSERT(!m_mark);
+ m_mark = true;
+ }
+ void unmark() {
+ MOZ_ASSERT(m_mark);
+ m_mark = false;
+ }
+ bool isMarked() const {
+ return m_mark;
+ }
+
+ private:
+ ExecutablePool(const ExecutablePool&) = delete;
+ void operator=(const ExecutablePool&) = delete;
+
+ void* alloc(size_t n, CodeKind kind);
+
+ size_t available() const;
+};
+
+struct JitPoisonRange
+{
+ jit::ExecutablePool* pool;
+ void* start;
+ size_t size;
+
+ JitPoisonRange(jit::ExecutablePool* pool, void* start, size_t size)
+ : pool(pool), start(start), size(size)
+ {}
+};
+
+typedef Vector<JitPoisonRange, 0, SystemAllocPolicy> JitPoisonRangeVector;
+
+class ExecutableAllocator
+{
+ JSRuntime* rt_;
+
+ public:
+ explicit ExecutableAllocator(JSRuntime* rt);
+ ~ExecutableAllocator();
+
+ void purge();
+
+ // alloc() returns a pointer to some memory, and also (by reference) a
+ // pointer to reference-counted pool. The caller owns a reference to the
+ // pool; i.e. alloc() increments the count before returning the object.
+ void* alloc(size_t n, ExecutablePool** poolp, CodeKind type);
+
+ void releasePoolPages(ExecutablePool* pool);
+
+ void addSizeOfCode(JS::CodeSizes* sizes) const;
+
+ private:
+ static const size_t OVERSIZE_ALLOCATION = size_t(-1);
+
+ static size_t roundUpAllocationSize(size_t request, size_t granularity);
+
+ // On OOM, this will return an Allocation where pages is nullptr.
+ ExecutablePool::Allocation systemAlloc(size_t n);
+ static void systemRelease(const ExecutablePool::Allocation& alloc);
+
+ ExecutablePool* createPool(size_t n);
+ ExecutablePool* poolForSize(size_t n);
+
+ static void reprotectPool(JSRuntime* rt, ExecutablePool* pool, ProtectionSetting protection);
+
+ public:
+ MOZ_MUST_USE
+ static bool makeWritable(void* start, size_t size)
+ {
+ return ReprotectRegion(start, size, ProtectionSetting::Writable);
+ }
+
+ MOZ_MUST_USE
+ static bool makeExecutable(void* start, size_t size)
+ {
+ return ReprotectRegion(start, size, ProtectionSetting::Executable);
+ }
+
+ void makeAllWritable() {
+ reprotectAll(ProtectionSetting::Writable);
+ }
+ void makeAllExecutable() {
+ reprotectAll(ProtectionSetting::Executable);
+ }
+
+ static void poisonCode(JSRuntime* rt, JitPoisonRangeVector& ranges);
+
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || defined(JS_SIMULATOR_ARM64)
+ static void cacheFlush(void*, size_t)
+ {
+ }
+#elif defined(JS_SIMULATOR_ARM) || defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
+ static void cacheFlush(void* code, size_t size)
+ {
+ js::jit::Simulator::FlushICache(code, size);
+ }
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ static void cacheFlush(void* code, size_t size)
+ {
+#if defined(_MIPS_ARCH_LOONGSON3A)
+ // On Loongson3-CPUs, The cache flushed automatically
+ // by hardware. Just need to execute an instruction hazard.
+ uintptr_t tmp;
+ asm volatile (
+ ".set push \n"
+ ".set noreorder \n"
+ "move %[tmp], $ra \n"
+ "bal 1f \n"
+ "daddiu $ra, 8 \n"
+ "1: \n"
+ "jr.hb $ra \n"
+ "move $ra, %[tmp] \n"
+ ".set pop\n"
+ :[tmp]"=&r"(tmp)
+ );
+#elif defined(__GNUC__)
+ intptr_t end = reinterpret_cast<intptr_t>(code) + size;
+ __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end));
+#else
+ _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
+#endif
+ }
+#elif defined(JS_CODEGEN_ARM) && (defined(__FreeBSD__) || defined(__NetBSD__))
+ static void cacheFlush(void* code, size_t size)
+ {
+ __clear_cache(code, reinterpret_cast<char*>(code) + size);
+ }
+#elif (defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)) && defined(XP_IOS)
+ static void cacheFlush(void* code, size_t size)
+ {
+ sys_icache_invalidate(code, size);
+ }
+#elif defined(JS_CODEGEN_ARM) && (defined(__linux__) || defined(ANDROID)) && defined(__GNUC__)
+ static void cacheFlush(void* code, size_t size)
+ {
+ void* end = (void*)(reinterpret_cast<char*>(code) + size);
+ asm volatile (
+ "push {r7}\n"
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "mov r7, #0xf0000\n"
+ "add r7, r7, #0x2\n"
+ "mov r2, #0x0\n"
+ "svc 0x0\n"
+ "pop {r7}\n"
+ :
+ : "r" (code), "r" (end)
+ : "r0", "r1", "r2");
+ if (ForceDoubleCacheFlush()) {
+ void* start = (void*)((uintptr_t)code + 1);
+ asm volatile (
+ "push {r7}\n"
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "mov r7, #0xf0000\n"
+ "add r7, r7, #0x2\n"
+ "mov r2, #0x0\n"
+ "svc 0x0\n"
+ "pop {r7}\n"
+ :
+ : "r" (start), "r" (end)
+ : "r0", "r1", "r2");
+ }
+ }
+#elif defined(JS_CODEGEN_ARM64)
+ static void cacheFlush(void* code, size_t size)
+ {
+ __clear_cache(code, (void *)((size_t)code + size));
+ }
+#elif JS_CPU_SPARC
+ static void cacheFlush(void* code, size_t size)
+ {
+ sync_instruction_memory((caddr_t)code, size);
+ }
+#endif
+
+ private:
+ ExecutableAllocator(const ExecutableAllocator&) = delete;
+ void operator=(const ExecutableAllocator&) = delete;
+
+ void reprotectAll(ProtectionSetting);
+
+ // These are strong references; they keep pools alive.
+ static const size_t maxSmallPools = 4;
+ typedef js::Vector<ExecutablePool*, maxSmallPools, js::SystemAllocPolicy> SmallExecPoolVector;
+ SmallExecPoolVector m_smallPools;
+
+ // All live pools are recorded here, just for stats purposes. These are
+ // weak references; they don't keep pools alive. When a pool is destroyed
+ // its reference is removed from m_pools.
+ typedef js::HashSet<ExecutablePool*, js::DefaultHasher<ExecutablePool*>, js::SystemAllocPolicy>
+ ExecPoolHashSet;
+ ExecPoolHashSet m_pools; // All pools, just for stats purposes.
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_ExecutableAllocator_h */
diff --git a/js/src/jit/FixedList.h b/js/src/jit/FixedList.h
new file mode 100644
index 000000000..dd019a658
--- /dev/null
+++ b/js/src/jit/FixedList.h
@@ -0,0 +1,106 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_FixedList_h
+#define jit_FixedList_h
+
+#include <stddef.h>
+
+#include "jit/Ion.h"
+#include "jit/JitAllocPolicy.h"
+
+namespace js {
+namespace jit {
+
+// List of a fixed length, but the length is unknown until runtime.
+template <typename T>
+class FixedList
+{
+ T* list_;
+ size_t length_;
+
+ private:
+ FixedList(const FixedList&); // no copy definition.
+ void operator= (const FixedList*); // no assignment definition.
+
+ public:
+ FixedList()
+ : list_(nullptr), length_(0)
+ { }
+
+ // Dynamic memory allocation requires the ability to report failure.
+ MOZ_MUST_USE bool init(TempAllocator& alloc, size_t length) {
+ if (length == 0)
+ return true;
+
+ size_t bytes;
+ if (MOZ_UNLIKELY(!CalculateAllocSize<T>(length, &bytes)))
+ return false;
+ list_ = (T*)alloc.allocate(bytes);
+ if (!list_)
+ return false;
+
+ length_ = length;
+ return true;
+ }
+
+ size_t empty() const {
+ return length_ == 0;
+ }
+
+ size_t length() const {
+ return length_;
+ }
+
+ void shrink(size_t num) {
+ MOZ_ASSERT(num < length_);
+ length_ -= num;
+ }
+
+ MOZ_MUST_USE bool growBy(TempAllocator& alloc, size_t num) {
+ size_t newlength = length_ + num;
+ if (newlength < length_)
+ return false;
+ size_t bytes;
+ if (MOZ_UNLIKELY(!CalculateAllocSize<T>(newlength, &bytes)))
+ return false;
+ T* list = (T*)alloc.allocate(bytes);
+ if (MOZ_UNLIKELY(!list))
+ return false;
+
+ for (size_t i = 0; i < length_; i++)
+ list[i] = list_[i];
+
+ length_ += num;
+ list_ = list;
+ return true;
+ }
+
+ T& operator[](size_t index) {
+ MOZ_ASSERT(index < length_);
+ return list_[index];
+ }
+ const T& operator [](size_t index) const {
+ MOZ_ASSERT(index < length_);
+ return list_[index];
+ }
+
+ T* data() {
+ return list_;
+ }
+
+ T* begin() {
+ return list_;
+ }
+ T* end() {
+ return list_ + length_;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_FixedList_h */
diff --git a/js/src/jit/FlowAliasAnalysis.cpp b/js/src/jit/FlowAliasAnalysis.cpp
new file mode 100644
index 000000000..2b88e3678
--- /dev/null
+++ b/js/src/jit/FlowAliasAnalysis.cpp
@@ -0,0 +1,949 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/FlowAliasAnalysis.h"
+
+#include <stdio.h>
+
+#include "jit/AliasAnalysisShared.h"
+#include "jit/Ion.h"
+#include "jit/IonBuilder.h"
+#include "jit/JitSpewer.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+
+#include "vm/Printer.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::Array;
+
+namespace js {
+namespace jit {
+
+class LoopInfo : public TempObject
+{
+ private:
+ LoopInfo* outer_;
+ MBasicBlock* loopHeader_;
+ MDefinitionVector loopinvariant_;
+
+ public:
+ LoopInfo(TempAllocator& alloc, LoopInfo* outer, MBasicBlock* loopHeader)
+ : outer_(outer), loopHeader_(loopHeader), loopinvariant_(alloc)
+ { }
+
+ MBasicBlock* loopHeader() const {
+ return loopHeader_;
+ }
+ LoopInfo* outer() const {
+ return outer_;
+ }
+ MDefinitionVector& loopinvariant() {
+ return loopinvariant_;
+ }
+};
+
+static bool
+KeepBlock(MBasicBlock *block)
+{
+ // Any block that is predecessor to a loopheader need to be kept.
+ // We need it to process possible loop invariant loads.
+ if (block->numSuccessors() == 1 && block->getSuccessor(0)->isLoopHeader())
+ return true;
+
+#ifdef DEBUG
+ // We assume a predecessor to a loopheader has one successor.
+ for (size_t i = 0; i < block->numSuccessors(); i++)
+ MOZ_ASSERT(!block->getSuccessor(i)->isLoopHeader());
+#endif
+
+ return false;
+}
+
+class GraphStoreInfo : public TempObject
+{
+ // The current BlockStoreInfo while iterating the block untill,
+ // it contains the store info at the end of the block.
+ BlockStoreInfo* current_;
+
+ // Vector with pointer to BlockStoreInfo at the end of the block for every block.
+ // Only keeping the info during iteration if needed, else contains nullptr.
+ GraphStoreVector stores_;
+
+ // All BlockStoreInfo's that aren't needed anymore and can be reused.
+ GraphStoreVector empty_;
+
+ public:
+ explicit GraphStoreInfo(TempAllocator& alloc)
+ : current_(nullptr),
+ stores_(alloc),
+ empty_(alloc)
+ { }
+
+ bool reserve(size_t num) {
+ return stores_.appendN(nullptr, num);
+ }
+
+ BlockStoreInfo& current() {
+ return *current_;
+ }
+
+ void unsetCurrent() {
+ current_ = nullptr;
+ }
+
+ BlockStoreInfo* newCurrent(TempAllocator& alloc, MBasicBlock* block) {
+ BlockStoreInfo *info = nullptr;
+ if (empty_.length() != 0) {
+ info = empty_.popCopy();
+ } else {
+ info = (BlockStoreInfo*) alloc.allocate(sizeof(BlockStoreInfo));
+ if (!info)
+ return nullptr;
+ new(info) BlockStoreInfo(alloc);
+ }
+
+ stores_[block->id()] = info;
+ current_ = info;
+ return current_;
+ }
+
+ void swap(MBasicBlock* block1, MBasicBlock* block2) {
+ BlockStoreInfo* info = stores_[block1->id()];
+ stores_[block1->id()] = stores_[block2->id()];
+ stores_[block2->id()] = info;
+ if (stores_[block1->id()] == current_)
+ current_ = stores_[block2->id()];
+ else if (stores_[block2->id()] == current_)
+ current_ = stores_[block1->id()];
+ }
+
+ bool maybeFreePredecessorBlocks(MBasicBlock* block) {
+ for (size_t i=0; i < block->numPredecessors(); i++) {
+
+ // For some blocks we cannot free the store info.
+ if (KeepBlock(block->getPredecessor(i)))
+ continue;
+
+ // Check the given block is the last successor.
+ bool release = true;
+ for (size_t j = 0; j < block->getPredecessor(i)->numSuccessors(); j++) {
+ if (block->getPredecessor(i)->getSuccessor(j)->id() > block->id()) {
+ release = false;
+ break;
+ }
+ }
+ if (release) {
+ BlockStoreInfo *info = stores_[block->getPredecessor(i)->id()];
+ if (!empty_.append(info))
+ return false;
+ info->clear();
+
+ stores_[block->getPredecessor(i)->id()] = nullptr;
+ }
+ }
+ return true;
+ }
+
+ BlockStoreInfo& get(MBasicBlock* block) {
+ MOZ_ASSERT(stores_[block->id()] != current_);
+ return *stores_[block->id()];
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+
+FlowAliasAnalysis::FlowAliasAnalysis(MIRGenerator* mir, MIRGraph& graph)
+ : AliasAnalysisShared(mir, graph),
+ loop_(nullptr),
+ output_(graph_.alloc()),
+ worklist_(graph_.alloc())
+{
+ stores_ = new(graph_.alloc()) GraphStoreInfo(graph_.alloc());
+}
+
+template <typename T>
+static bool
+AppendToWorklist(MDefinitionVector& worklist, T& stores)
+{
+ if (!worklist.reserve(worklist.length() + stores.length()))
+ return false;
+
+ for (size_t j = 0; j < stores.length(); j++) {
+ MOZ_ASSERT(stores[j]);
+ if (stores[j]->isInWorklist())
+ continue;
+
+ worklist.infallibleAppend(stores[j]);
+ stores[j]->setInWorklist();
+ }
+ return true;
+}
+
+static void
+SetNotInWorkList(MDefinitionVector& worklist)
+{
+ for (size_t item = 0; item < worklist.length(); item++)
+ worklist[item]->setNotInWorklistUnchecked();
+}
+
+static bool
+LoadAliasesStore(MDefinition* load, MDefinition* store)
+{
+ // Always alias first instruction of graph.
+ if (store->id() == 0)
+ return true;
+
+ // Default to alias control instructions which indicates loops.
+ // Control instructions are special, since we need to determine
+ // if it aliases anything in the full loop. Which we do lateron.
+ if (store->isControlInstruction())
+ return true;
+
+ // Check if the alias categories alias eachother.
+ if ((load->getAliasSet() & store->getAliasSet()).isNone())
+ return false;
+
+ // On any operation that has a specific alias category we can use TI to know
+ // the objects operating on don't intersect.
+ MDefinition::AliasType mightAlias = AliasAnalysisShared::genericMightAlias(load, store);
+ if (mightAlias == MDefinition::AliasType::NoAlias)
+ return false;
+
+ // Check if the instruction might alias eachother.
+ mightAlias = load->mightAlias(store);
+ if (mightAlias == MDefinition::AliasType::NoAlias)
+ return false;
+
+ return true;
+}
+
+#ifdef JS_JITSPEW
+static void
+DumpAliasSet(AliasSet set)
+{
+ Fprinter &print = JitSpewPrinter();
+
+ if (set.flags() == AliasSet::Any) {
+ print.printf("Any");
+ return;
+ }
+
+ bool first = true;
+ for (AliasSetIterator iter(set); iter; iter++) {
+ if (!first)
+ print.printf(", ");
+ print.printf("%s", AliasSet::Name(*iter));
+ first = false;
+ }
+}
+#endif
+
+#ifdef JS_JITSPEW
+static void
+DumpStoreList(BlockStoreInfo& stores)
+{
+ Fprinter &print = JitSpewPrinter();
+ if (stores.length() == 0) {
+ print.printf("empty");
+ return;
+ }
+ bool first = true;
+ for (size_t i = 0; i < stores.length(); i++) {
+ if (!first)
+ print.printf(", ");
+ if (!stores[i]) {
+ print.printf("nullptr");
+ continue;
+ }
+ MOZ_ASSERT(stores[i]->isControlInstruction() ||
+ stores[i]->getAliasSet().isStore() ||
+ stores[i]->id() == 0);
+ MDefinition::PrintOpcodeName(print, stores[i]->op());
+ print.printf("%d", stores[i]->id());
+ first = false;
+ }
+}
+#endif
+
+static void
+DumpAnalyzeStart()
+{
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_Alias) || JitSpewEnabled(JitSpew_AliasSummaries)) {
+ Fprinter &print = JitSpewPrinter();
+ JitSpewHeader(JitSpewEnabled(JitSpew_Alias) ? JitSpew_Alias : JitSpew_AliasSummaries);
+ print.printf("Running Alias Analysis on graph\n");
+ }
+#endif
+}
+
+static void
+DumpBlockStart(MBasicBlock* block)
+{
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_Alias) || JitSpewEnabled(JitSpew_AliasSummaries)) {
+ Fprinter &print = JitSpewPrinter();
+ JitSpewHeader(JitSpewEnabled(JitSpew_Alias)?JitSpew_Alias:JitSpew_AliasSummaries);
+ if (block->isLoopHeader())
+ print.printf(" Visiting block %d (loopheader)\n", block->id());
+ else
+ print.printf(" Visiting block %d\n", block->id());
+ }
+#endif
+}
+
+static void
+DumpProcessingDeferredLoads(MBasicBlock* loopHeader)
+{
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_Alias)) {
+ Fprinter &print = JitSpewPrinter();
+ JitSpewHeader(JitSpew_Alias);
+ print.printf(" Process deferred loads of loop %d\n", loopHeader->id());
+ }
+#endif
+}
+
+static void
+DumpBlockSummary(MBasicBlock* block, BlockStoreInfo& blockInfo)
+{
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_AliasSummaries)) {
+ Fprinter &print = JitSpewPrinter();
+ JitSpewHeader(JitSpew_AliasSummaries);
+ print.printf(" Store at end of block: ");
+ DumpStoreList(blockInfo);
+ print.printf("\n");
+ }
+#endif
+}
+
+static void
+DumpStore(MDefinition* store)
+{
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_Alias)) {
+ Fprinter &print = JitSpewPrinter();
+ JitSpewHeader(JitSpew_Alias);
+ print.printf(" Store ");
+ store->PrintOpcodeName(print, store->op());
+ print.printf("%d with flags (", store->id());
+ DumpAliasSet(store->getAliasSet());
+ print.printf(")\n");
+ }
+#endif
+}
+
+static void
+DumpLoad(MDefinition* load)
+{
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_Alias)) {
+ Fprinter &print = JitSpewPrinter();
+ JitSpewHeader(JitSpew_Alias);
+ print.printf(" Load ");
+ load->PrintOpcodeName(print, load->op());
+ print.printf("%d", load->id());
+ print.printf(" with flag (");
+ DumpAliasSet(load->getAliasSet());
+ print.printf(")\n");
+ }
+#endif
+}
+
+static void
+DumpLoadOutcome(MDefinition* load, MDefinitionVector& stores, bool defer)
+{
+#ifdef JS_JITSPEW
+ // Spew what we did.
+ if (JitSpewEnabled(JitSpew_Alias)) {
+ Fprinter &print = JitSpewPrinter();
+ JitSpewHeader(JitSpew_Alias);
+ print.printf(" Marked depending on ");
+ DumpStoreList(stores);
+ if (defer)
+ print.printf(" deferred");
+ print.printf("\n");
+ }
+#endif
+}
+
+static void
+DumpLoopInvariant(MDefinition* load, MBasicBlock* loopheader, bool loopinvariant,
+ MDefinitionVector& loopInvariantDependency)
+{
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_Alias)) {
+ Fprinter &print = JitSpewPrinter();
+ JitSpewHeader(JitSpew_Alias);
+ if (!loopinvariant) {
+ print.printf(" Determine not loop invariant to loop %d.\n", loopheader->id());
+ } else {
+ print.printf(" Determine loop invariant to loop %d. Dependendy is now: ", loopheader->id());
+ DumpStoreList(loopInvariantDependency);
+ print.printf("\n");
+ }
+ }
+#endif
+}
+
+static void
+DumpImprovement(MDefinition *load, MDefinitionVector& input, MDefinitionVector& output)
+{
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_Alias)) {
+ Fprinter &print = JitSpewPrinter();
+ JitSpewHeader(JitSpew_Alias);
+ print.printf(" Improve dependency from %d", load->id());
+ DumpStoreList(input);
+ print.printf(" to ");
+ DumpStoreList(output);
+ print.printf("\n");
+ }
+#endif
+}
+
+// Flow Sensitive Alias Analysis.
+//
+// This pass annotates every load instructions with the last store instruction
+// on which it depends in their dependency_ field. For loop variant instructions
+// this will depend on the control instruction in the specific loop it cannot
+// get hoisted out (if there is no store between start loopheader and
+// instruction).
+//
+// We visit the graph in RPO and keep track of the last stores in that block.
+// Upon entering a block we merge the stores information of the predecessors.
+// Only loopheaders are different, since we eagerly make it depend on the
+// control instruction of the loopheader.
+//
+// During the iteration of a block we keep a running store dependeny list.
+// At the end of the iteration, this will contain the last stores
+// (which we keep for successors).
+//
+// When encountering a store or load we do:
+// - Store: we update the current block store info and put a StoreDependency
+// to create a store-chain.
+//
+// - Load: we take the current block store dependency info and improve that by
+// following the store-chain when encountering not aliasing store. Upon
+// encountering a control instruction (indicates loop) it solely depends on
+// we defer until the loop has been examined.
+//
+// The algorithm depends on the invariant that both control instructions and effectful
+// instructions (stores) are never hoisted.
+
+bool
+FlowAliasAnalysis::analyze()
+{
+ DumpAnalyzeStart();
+
+ // Type analysis may have inserted new instructions. Since this pass depends
+ // on the instruction number ordering, all instructions are renumbered.
+ uint32_t newId = 0;
+
+ if (!stores_->reserve(graph_.numBlocks()))
+ return false;
+
+ for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
+ if (mir->shouldCancel("Alias Analysis (main loop)"))
+ return false;
+
+ DumpBlockStart(*block);
+
+ if (!computeBlockStores(*block))
+ return false;
+ if (!stores_->maybeFreePredecessorBlocks(*block))
+ return false;
+
+ if (block->isLoopHeader())
+ loop_ = new(alloc()) LoopInfo(alloc(), loop_, *block);
+
+ for (MPhiIterator def(block->phisBegin()), end(block->phisEnd()); def != end; ++def)
+ def->setId(newId++);
+
+ BlockStoreInfo& blockInfo = stores_->current();
+ for (MInstructionIterator def(block->begin()), end(block->begin(block->lastIns()));
+ def != end;
+ ++def)
+ {
+ def->setId(newId++);
+
+ // For the purposes of alias analysis, all recoverable operations
+ // are treated as effect free as the memory represented by these
+ // operations cannot be aliased by others.
+ if (def->canRecoverOnBailout())
+ continue;
+
+ AliasSet set = def->getAliasSet();
+ if (set.isStore()) {
+ if (!processStore(blockInfo, *def))
+ return false;
+ } else if (set.isLoad()) {
+ if (!processLoad(blockInfo, *def))
+ return false;
+ }
+ }
+
+ block->lastIns()->setId(newId++);
+
+ if (block->isLoopBackedge()) {
+ stores_->unsetCurrent();
+
+ LoopInfo* info = loop_;
+ loop_ = loop_->outer();
+
+ if (!processDeferredLoads(info))
+ return false;
+ }
+
+ DumpBlockSummary(*block, blockInfo);
+ }
+
+ spewDependencyList();
+ return true;
+}
+
+bool
+FlowAliasAnalysis::processStore(BlockStoreInfo& blockInfo, MDefinition* store)
+{
+ // Compute and set dependency information.
+ if (!saveStoreDependency(store, blockInfo))
+ return false;
+
+ // Update the block store dependency vector.
+ blockInfo.clear();
+ if (!blockInfo.append(store))
+ return false;
+
+ // Spew what we did.
+ DumpStore(store);
+ return true;
+}
+
+bool
+FlowAliasAnalysis::processLoad(BlockStoreInfo& blockInfo, MDefinition* load)
+{
+ DumpLoad(load);
+
+ // Compute dependency information.
+ MDefinitionVector& dependencies = blockInfo;
+ if (!improveDependency(load, dependencies, output_))
+ return false;
+ saveLoadDependency(load, output_);
+
+ // If possible defer when better loop information is present.
+ if (deferImproveDependency(output_)) {
+ if (!loop_->loopinvariant().append(load))
+ return false;
+
+ DumpLoadOutcome(load, output_, /* defer = */ true);
+ return true;
+ }
+
+ DumpLoadOutcome(load, output_, /* defer = */ false);
+ return true;
+}
+
+bool
+FlowAliasAnalysis::processDeferredLoads(LoopInfo* info)
+{
+ DumpProcessingDeferredLoads(info->loopHeader());
+ MOZ_ASSERT(loopIsFinished(info->loopHeader()));
+
+ for (size_t i = 0; i < info->loopinvariant().length(); i++) {
+ MDefinition* load = info->loopinvariant()[i];
+
+ DumpLoad(load);
+
+ // Defer load again when this loop still isn't finished yet.
+ if (!loopIsFinished(load->dependency()->block())) {
+ MOZ_ASSERT(loop_);
+ if (!loop_->loopinvariant().append(load))
+ return false;
+
+ DumpLoadOutcome(load, output_, /* defer = */ true);
+ continue;
+ }
+
+ MOZ_ASSERT(load->dependency()->block() == info->loopHeader());
+ MDefinition* store = load->dependency();
+ load->setDependency(nullptr);
+
+ // Test if this load is loop invariant and if it is,
+ // take the dependencies of non-backedge predecessors of the loop header.
+ bool loopinvariant;
+ if (!isLoopInvariant(load, store, &loopinvariant))
+ return false;
+ MDefinitionVector &loopInvariantDependency =
+ stores_->get(store->block()->loopPredecessor());
+
+ DumpLoopInvariant(load, info->loopHeader(), /* loopinvariant = */ loopinvariant,
+ loopInvariantDependency);
+
+ if (loopinvariant) {
+ if (!improveDependency(load, loopInvariantDependency, output_))
+ return false;
+ saveLoadDependency(load, output_);
+
+ // If possible defer when better loop information is present.
+ if (deferImproveDependency(output_)) {
+ if (!loop_->loopinvariant().append(load))
+ return false;
+
+ DumpLoadOutcome(load, output_, /* defer = */ true);
+ } else {
+ DumpLoadOutcome(load, output_, /* defer = */ false);
+ }
+
+ } else {
+ load->setDependency(store);
+
+#ifdef JS_JITSPEW
+ output_.clear();
+ if (!output_.append(store))
+ return false;
+ DumpLoadOutcome(load, output_, /* defer = */ false);
+#endif
+ }
+
+ }
+ return true;
+}
+
+// Given a load instruction and an initial store dependency list,
+// find the most accurate store dependency list.
+bool
+FlowAliasAnalysis::improveDependency(MDefinition* load, MDefinitionVector& inputStores,
+ MDefinitionVector& outputStores)
+{
+ MOZ_ASSERT(inputStores.length() > 0);
+ outputStores.clear();
+ if (!outputStores.appendAll(inputStores))
+ return false;
+
+ bool improved = false;
+ bool adjusted = true;
+ while (adjusted) {
+ adjusted = false;
+ if (!improveNonAliasedStores(load, outputStores, outputStores, &improved))
+ return false;
+ MOZ_ASSERT(outputStores.length() != 0);
+ if (!improveStoresInFinishedLoops(load, outputStores, &adjusted))
+ return false;
+ if (adjusted)
+ improved = true;
+ }
+
+ if (improved)
+ DumpImprovement(load, inputStores, outputStores);
+
+ return true;
+}
+
+// For every real store dependencies, follow the chain of stores to find the
+// unique set of 'might alias' store dependencies.
+bool
+FlowAliasAnalysis::improveNonAliasedStores(MDefinition* load, MDefinitionVector& inputStores,
+ MDefinitionVector& outputStores, bool* improved,
+ bool onlyControlInstructions)
+{
+ MOZ_ASSERT(worklist_.length() == 0);
+ if (!AppendToWorklist(worklist_, inputStores))
+ return false;
+ outputStores.clear();
+
+ for (size_t i = 0; i < worklist_.length(); i++) {
+ MOZ_ASSERT(worklist_[i]);
+
+ if (!LoadAliasesStore(load, worklist_[i])) {
+ StoreDependency* dep = worklist_[i]->storeDependency();
+ MOZ_ASSERT(dep);
+ MOZ_ASSERT(dep->get().length() > 0);
+
+ if (!AppendToWorklist(worklist_, dep->get()))
+ return false;
+
+ *improved = true;
+ continue;
+ }
+
+ if (onlyControlInstructions && !worklist_[i]->isControlInstruction()) {
+ outputStores.clear();
+ break;
+ }
+ if (!outputStores.append(worklist_[i]))
+ return false;
+ }
+
+ SetNotInWorkList(worklist_);
+ worklist_.clear();
+ return true;
+}
+
+// Given a load instruction and an initial store dependency list,
+// find the most accurate store dependency list with only control instructions.
+// Returns an empty output list, when there was a non control instructions
+// that couldn't get improved to a control instruction.
+bool
+FlowAliasAnalysis::improveLoopDependency(MDefinition* load, MDefinitionVector& inputStores,
+ MDefinitionVector& outputStores)
+{
+ outputStores.clear();
+ if (!outputStores.appendAll(inputStores))
+ return false;
+
+ bool improved = false;
+ bool adjusted = true;
+ while (adjusted) {
+ adjusted = false;
+ if (!improveNonAliasedStores(load, outputStores, outputStores, &improved,
+ /* onlyControlInstructions = */ true))
+ {
+ return false;
+ }
+ if (outputStores.length() == 0)
+ return true;
+ if (!improveStoresInFinishedLoops(load, outputStores, &adjusted))
+ return false;
+ if (adjusted)
+ improved = true;
+ }
+
+ if (improved)
+ DumpImprovement(load, inputStores, outputStores);
+
+ return true;
+}
+
+// For every control instruction in the output we find out if the load is loop
+// invariant to that loop. When it is, improve the output dependency store,
+// by pointing to the stores before the loop.
+bool
+FlowAliasAnalysis::improveStoresInFinishedLoops(MDefinition* load, MDefinitionVector& stores,
+ bool* improved)
+{
+ for (size_t i = 0; i < stores.length(); i++) {
+ if (!stores[i]->isControlInstruction())
+ continue;
+ if (!stores[i]->block()->isLoopHeader())
+ continue;
+
+ MOZ_ASSERT(!stores[i]->storeDependency());
+
+ if (!loopIsFinished(stores[i]->block()))
+ continue;
+
+ if (load->dependency() == stores[i])
+ continue;
+
+ bool loopinvariant;
+ if (!isLoopInvariant(load, stores[i], &loopinvariant))
+ return false;
+ if (!loopinvariant)
+ continue;
+
+ MBasicBlock* pred = stores[i]->block()->loopPredecessor();
+ BlockStoreInfo& predInfo = stores_->get(pred);
+
+ MOZ_ASSERT(predInfo.length() > 0);
+ stores[i] = predInfo[0];
+ for (size_t j = 1; j < predInfo.length(); j++) {
+ if (!stores.append(predInfo[j]))
+ return false;
+ }
+
+ *improved = true;
+ }
+
+ return true;
+}
+
+bool
+FlowAliasAnalysis::deferImproveDependency(MDefinitionVector& stores)
+{
+ // Look if the store depends only on 1 non finished loop.
+ // In that case we will defer until that loop has finished.
+ return loop_ && stores.length() == 1 &&
+ stores[0]->isControlInstruction() &&
+ stores[0]->block()->isLoopHeader() &&
+ !loopIsFinished(stores[0]->block());
+}
+
+void
+FlowAliasAnalysis::saveLoadDependency(MDefinition* load, MDefinitionVector& dependencies)
+{
+ MOZ_ASSERT(dependencies.length() > 0);
+
+ // For now we only save the last store before the load for other passes.
+ // That means the store with the maximum id.
+ MDefinition* max = dependencies[0];
+ MDefinition* maxNonControl = nullptr;
+ for (size_t i = 0; i < dependencies.length(); i++) {
+ MDefinition* ins = dependencies[i];
+ if (max->id() < ins->id())
+ max = ins;
+ if (!ins->isControlInstruction()) {
+ if (!maxNonControl || maxNonControl->id() < ins->id())
+ maxNonControl = ins;
+ }
+ }
+
+ // For loop variant loads with no stores between loopheader and the load,
+ // the control instruction of the loop header is returned.
+ // That id is higher than any store inside the loopheader itself.
+ // Fix for dependency on item in loopheader, but before the "test".
+ // Which would assume it depends on the loop itself.
+ if (maxNonControl != max && maxNonControl) {
+ if (maxNonControl->block() == max->block())
+ max = maxNonControl;
+ }
+
+ load->setDependency(max);
+}
+
+bool
+FlowAliasAnalysis::saveStoreDependency(MDefinition* ins, BlockStoreInfo& prevStores)
+{
+ // To form a store dependency chain, we store the previous last dependencies
+ // in the current store.
+
+ StoreDependency* dependency = new(alloc()) StoreDependency(alloc());
+ if (!dependency)
+ return false;
+ if (!dependency->init(prevStores))
+ return false;
+
+ ins->setStoreDependency(dependency);
+ return true;
+}
+
+// Returns if loop has been processed
+// and has complete backedge stores information.
+bool
+FlowAliasAnalysis::loopIsFinished(MBasicBlock* loopheader)
+{
+ MOZ_ASSERT(loopheader->isLoopHeader());
+
+ if (!loop_)
+ return true;
+
+ return loopheader->backedge()->id() <
+ loop_->loopHeader()->backedge()->id();
+}
+
+
+// Determines if a load is loop invariant.
+//
+// Get the last store dependencies of the backedge of the loop and follow
+// the store chain until finding the aliased stores. Make sure the computed
+// aliased stores is only the loop control instruction or control instructions
+// of loops it is also loop invariant. Only in that case the load is
+// definitely loop invariant.
+bool
+FlowAliasAnalysis::isLoopInvariant(MDefinition* load, MDefinition* store, bool* loopinvariant)
+{
+ MOZ_ASSERT(store->isControlInstruction());
+ MOZ_ASSERT(!store->storeDependency());
+ MOZ_ASSERT(store->block()->isLoopHeader());
+ MOZ_ASSERT(loopIsFinished(store->block()));
+
+ *loopinvariant = false;
+ MBasicBlock* backedge = store->block()->backedge();
+ MDefinitionVector output(alloc());
+
+ // To make sure the improve dependency stops at this loop,
+ // set the loop control instruction as dependency.
+ MDefinition* olddep = load->dependency();
+ load->setDependency(store);
+ if (!improveLoopDependency(load, stores_->get(backedge), output))
+ return false;
+ load->setDependency(olddep);
+
+ if (output.length() == 0)
+ return true;
+
+ for (size_t i = 0; i < output.length(); i++) {
+ if (output[i]->storeDependency())
+ return true;
+
+ if (!output[i]->isControlInstruction())
+ return true;
+ if (!output[i]->block()->isLoopHeader())
+ return true;
+
+ if (output[i] == store)
+ continue;
+
+ return true;
+ }
+
+ *loopinvariant = true;
+ return true;
+}
+
+// Compute the store dependencies at the start of this MBasicBlock.
+bool
+FlowAliasAnalysis::computeBlockStores(MBasicBlock* block)
+{
+ BlockStoreInfo* blockInfo = stores_->newCurrent(alloc(), block);
+ if (!blockInfo)
+ return false;
+
+ // First block depends on the first instruction.
+ if (block->id() == 0) {
+ MDefinition* firstIns = *graph_.entryBlock()->begin();
+ if (!blockInfo->append(firstIns))
+ return false;
+ return true;
+ }
+
+ // For loopheaders we take the loopheaders control instruction.
+ // That is not moveable and easy is to detect.
+ if (block->isLoopHeader()) {
+ if (!blockInfo->append(block->lastIns()))
+ return false;
+ return true;
+ }
+
+
+ // Optimization for consecutive blocks.
+ if (block->numPredecessors() == 1) {
+ MBasicBlock* pred = block->getPredecessor(0);
+ if (pred->numSuccessors() == 1) {
+ stores_->swap(block, pred);
+ return true;
+ }
+ MOZ_ASSERT (pred->numSuccessors() > 1);
+ BlockStoreInfo& predInfo = stores_->get(pred);
+ return blockInfo->appendAll(predInfo);
+ }
+
+ // Heuristic: in most cases having more than 5 predecessors,
+ // increases the number of dependencies too much to still be able
+ // to do an optimization. Therefore don't do the merge work.
+ // For simplicity we take an non-dominant always existing instruction.
+ // That way we cannot accidentally move instructions depending on it.
+ if (block->numPredecessors() > 5) {
+ if (!blockInfo->append(block->getPredecessor(0)->lastIns()))
+ return false;
+ return true;
+ }
+
+ // Merging of multiple predecessors.
+ for (size_t pred = 0; pred < block->numPredecessors(); pred++) {
+ BlockStoreInfo& predInfo = stores_->get(block->getPredecessor(pred));
+ if (!AppendToWorklist(*blockInfo, predInfo))
+ return false;
+ }
+ SetNotInWorkList(*blockInfo);
+
+ return true;
+}
diff --git a/js/src/jit/FlowAliasAnalysis.h b/js/src/jit/FlowAliasAnalysis.h
new file mode 100644
index 000000000..d4c8c0f5f
--- /dev/null
+++ b/js/src/jit/FlowAliasAnalysis.h
@@ -0,0 +1,71 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_FlowAliasAnalysis_h
+#define jit_FlowAliasAnalysis_h
+
+#include "jit/AliasAnalysisShared.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+
+namespace js {
+namespace jit {
+
+class LoopInfo;
+class MIRGraph;
+class GraphStoreInfo;
+
+typedef MDefinitionVector BlockStoreInfo;
+typedef Vector<BlockStoreInfo*, 50, JitAllocPolicy> GraphStoreVector;
+
+class FlowAliasAnalysis : public AliasAnalysisShared
+{
+ // Info on the graph.
+ LoopInfo* loop_;
+ GraphStoreInfo* stores_;
+
+ // Helper vectors. In order to not have to recreate them the whole time.
+ MDefinitionVector output_;
+ MDefinitionVector worklist_;
+
+ public:
+ FlowAliasAnalysis(MIRGenerator* mir, MIRGraph& graph);
+ MOZ_MUST_USE bool analyze() override;
+
+ protected:
+ /* Process instructions. */
+ MOZ_MUST_USE bool processStore(BlockStoreInfo& stores, MDefinition* store);
+ MOZ_MUST_USE bool processLoad(BlockStoreInfo& stores, MDefinition* load);
+ MOZ_MUST_USE bool processDeferredLoads(LoopInfo* info);
+
+ /* Improve dependency and helpers. */
+ MOZ_MUST_USE bool improveDependency(MDefinition* load, MDefinitionVector& inputStores,
+ MDefinitionVector& outputStores);
+ MOZ_MUST_USE bool improveNonAliasedStores(MDefinition* load, MDefinitionVector& inputStores,
+ MDefinitionVector& outputStores, bool* improved,
+ bool onlyControlInstructions = false);
+ MOZ_MUST_USE bool improveStoresInFinishedLoops(MDefinition* load, MDefinitionVector& stores,
+ bool* improved);
+
+ MOZ_MUST_USE bool improveLoopDependency(MDefinition* load, MDefinitionVector& inputStores,
+ MDefinitionVector& outputStores);
+ MOZ_MUST_USE bool deferImproveDependency(MDefinitionVector& stores);
+
+ /* Save dependency info. */
+ void saveLoadDependency(MDefinition* load, MDefinitionVector& dependencies);
+ MOZ_MUST_USE bool saveStoreDependency(MDefinition* store, BlockStoreInfo& prevStores);
+
+ /* Helper functions. */
+ MOZ_MUST_USE bool computeBlockStores(MBasicBlock* block);
+ MOZ_MUST_USE bool isLoopInvariant(MDefinition* load, MDefinition* store, bool* loopinvariant);
+ bool loopIsFinished(MBasicBlock* loopheader);
+
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_FlowAliasAnalysis_h */
diff --git a/js/src/jit/FoldLinearArithConstants.cpp b/js/src/jit/FoldLinearArithConstants.cpp
new file mode 100644
index 000000000..b97720cec
--- /dev/null
+++ b/js/src/jit/FoldLinearArithConstants.cpp
@@ -0,0 +1,104 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/FoldLinearArithConstants.h"
+
+#include "jit/IonAnalysis.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+
+using namespace js;
+using namespace jit;
+
+namespace js {
+namespace jit {
+
+// Mark this node and its children as RecoveredOnBailout when they are not used.
+// The marked nodes will be removed during DCE. Marking as RecoveredOnBailout is
+// necessary because the Sink pass is ran before this pass.
+static void
+markNodesAsRecoveredOnBailout(MDefinition* def)
+{
+ if (def->hasLiveDefUses() || !DeadIfUnused(def) || !def->canRecoverOnBailout())
+ return;
+
+ JitSpew(JitSpew_FLAC, "mark as recovered on bailout: %s%u", def->opName(), def->id());
+ def->setRecoveredOnBailoutUnchecked();
+
+ // Recursively mark nodes that do not have multiple uses. This loop is
+ // necessary because a node could be an unused right shift zero or an
+ // unused add, and both need to be marked as RecoveredOnBailout.
+ for (size_t i = 0; i < def->numOperands(); i++)
+ markNodesAsRecoveredOnBailout(def->getOperand(i));
+}
+
+// Fold AddIs with one variable and two or more constants into one AddI.
+static void
+AnalyzeAdd(TempAllocator& alloc, MAdd* add)
+{
+ if (add->specialization() != MIRType::Int32 || add->isRecoveredOnBailout())
+ return;
+
+ if (!add->hasUses())
+ return;
+
+ JitSpew(JitSpew_FLAC, "analyze add: %s%u", add->opName(), add->id());
+
+ SimpleLinearSum sum = ExtractLinearSum(add);
+ if (sum.constant == 0 || !sum.term)
+ return;
+
+ // Determine which operand is the constant.
+ int idx = add->getOperand(0)->isConstant() ? 0 : 1 ;
+ if (add->getOperand(idx)->isConstant()) {
+ // Do not replace an add where the outcome is the same add instruction.
+ MOZ_ASSERT(add->getOperand(idx)->toConstant()->type() == MIRType::Int32);
+ if (sum.term == add->getOperand(1 - idx) ||
+ sum.constant == add->getOperand(idx)->toConstant()->toInt32())
+ {
+ return;
+ }
+ }
+
+ MInstruction* rhs = MConstant::New(alloc, Int32Value(sum.constant));
+ add->block()->insertBefore(add, rhs);
+
+ MAdd* addNew = MAdd::New(alloc, sum.term, rhs, MIRType::Int32);
+
+ add->replaceAllLiveUsesWith(addNew);
+ add->block()->insertBefore(add, addNew);
+ JitSpew(JitSpew_FLAC, "replaced with: %s%u", addNew->opName(), addNew->id());
+ JitSpew(JitSpew_FLAC, "and constant: %s%u (%d)", rhs->opName(), rhs->id(), sum.constant);
+
+ // Mark the stale nodes as RecoveredOnBailout since the Sink pass has
+ // been run before this pass. DCE will then remove the unused nodes.
+ markNodesAsRecoveredOnBailout(add);
+}
+
+bool
+FoldLinearArithConstants(MIRGenerator* mir, MIRGraph& graph)
+{
+ for (PostorderIterator block(graph.poBegin()); block != graph.poEnd(); block++) {
+ if (mir->shouldCancel("Fold Linear Arithmetic Constants (main loop)"))
+ return false;
+
+ for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
+ if (!graph.alloc().ensureBallast())
+ return false;
+
+ if (mir->shouldCancel("Fold Linear Arithmetic Constants (inner loop)"))
+ return false;
+
+ if (i->isAdd())
+ AnalyzeAdd(graph.alloc(), i->toAdd());
+ }
+ }
+ return true;
+}
+
+} /* namespace jit */
+} /* namespace js */
diff --git a/js/src/jit/FoldLinearArithConstants.h b/js/src/jit/FoldLinearArithConstants.h
new file mode 100644
index 000000000..6512410c4
--- /dev/null
+++ b/js/src/jit/FoldLinearArithConstants.h
@@ -0,0 +1,22 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_FoldLinearArithConstants_h
+#define jit_FoldLinearArithConstants_h
+
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+
+namespace js {
+namespace jit {
+
+MOZ_MUST_USE bool
+FoldLinearArithConstants(MIRGenerator* mir, MIRGraph& graph);
+
+} /* namespace jit */
+} /* namespace js */
+
+#endif /* jit_FoldLinearArithConstants_h */
diff --git a/js/src/jit/ICStubSpace.h b/js/src/jit/ICStubSpace.h
new file mode 100644
index 000000000..30b9fbb45
--- /dev/null
+++ b/js/src/jit/ICStubSpace.h
@@ -0,0 +1,82 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_ICStubSpace_h
+#define jit_ICStubSpace_h
+
+#include "ds/LifoAlloc.h"
+
+namespace js {
+namespace jit {
+
+// ICStubSpace is an abstraction for allocation policy and storage for stub data.
+// There are two kinds of stubs: optimized stubs and fallback stubs (the latter
+// also includes stubs that can make non-tail calls that can GC).
+//
+// Optimized stubs are allocated per-compartment and are always purged when
+// JIT-code is discarded. Fallback stubs are allocated per BaselineScript and
+// are only destroyed when the BaselineScript is destroyed.
+class ICStubSpace
+{
+ protected:
+ LifoAlloc allocator_;
+
+ explicit ICStubSpace(size_t chunkSize)
+ : allocator_(chunkSize)
+ {}
+
+ public:
+ inline void* alloc(size_t size) {
+ return allocator_.alloc(size);
+ }
+
+ JS_DECLARE_NEW_METHODS(allocate, alloc, inline)
+
+ void freeAllAfterMinorGC(JSRuntime* rt);
+
+#ifdef DEBUG
+ bool isEmpty() const {
+ return allocator_.isEmpty();
+ }
+#endif
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return allocator_.sizeOfExcludingThis(mallocSizeOf);
+ }
+};
+
+// Space for optimized stubs. Every JitCompartment has a single
+// OptimizedICStubSpace.
+struct OptimizedICStubSpace : public ICStubSpace
+{
+ static const size_t STUB_DEFAULT_CHUNK_SIZE = 4096;
+
+ public:
+ OptimizedICStubSpace()
+ : ICStubSpace(STUB_DEFAULT_CHUNK_SIZE)
+ {}
+};
+
+// Space for fallback stubs. Every BaselineScript has a
+// FallbackICStubSpace.
+struct FallbackICStubSpace : public ICStubSpace
+{
+ static const size_t STUB_DEFAULT_CHUNK_SIZE = 4096;
+
+ public:
+ FallbackICStubSpace()
+ : ICStubSpace(STUB_DEFAULT_CHUNK_SIZE)
+ {}
+
+ inline void adoptFrom(FallbackICStubSpace* other) {
+ allocator_.steal(&(other->allocator_));
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_ICStubSpace_h */
diff --git a/js/src/jit/InlinableNatives.h b/js/src/jit/InlinableNatives.h
new file mode 100644
index 000000000..89c2ff0a4
--- /dev/null
+++ b/js/src/jit/InlinableNatives.h
@@ -0,0 +1,166 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_InlinableNatives_h
+#define jit_InlinableNatives_h
+
+#define INLINABLE_NATIVE_LIST(_) \
+ _(Array) \
+ _(ArrayIsArray) \
+ _(ArrayJoin) \
+ _(ArrayPop) \
+ _(ArrayShift) \
+ _(ArrayPush) \
+ _(ArraySlice) \
+ _(ArraySplice) \
+ \
+ _(AtomicsCompareExchange) \
+ _(AtomicsExchange) \
+ _(AtomicsLoad) \
+ _(AtomicsStore) \
+ _(AtomicsAdd) \
+ _(AtomicsSub) \
+ _(AtomicsAnd) \
+ _(AtomicsOr) \
+ _(AtomicsXor) \
+ _(AtomicsIsLockFree) \
+ \
+ _(MathAbs) \
+ _(MathFloor) \
+ _(MathCeil) \
+ _(MathRound) \
+ _(MathClz32) \
+ _(MathSqrt) \
+ _(MathATan2) \
+ _(MathHypot) \
+ _(MathMax) \
+ _(MathMin) \
+ _(MathPow) \
+ _(MathRandom) \
+ _(MathImul) \
+ _(MathFRound) \
+ _(MathSin) \
+ _(MathTan) \
+ _(MathCos) \
+ _(MathExp) \
+ _(MathLog) \
+ _(MathASin) \
+ _(MathATan) \
+ _(MathACos) \
+ _(MathLog10) \
+ _(MathLog2) \
+ _(MathLog1P) \
+ _(MathExpM1) \
+ _(MathSinH) \
+ _(MathTanH) \
+ _(MathCosH) \
+ _(MathASinH) \
+ _(MathATanH) \
+ _(MathACosH) \
+ _(MathSign) \
+ _(MathTrunc) \
+ _(MathCbrt) \
+ \
+ _(RegExpMatcher) \
+ _(RegExpSearcher) \
+ _(RegExpTester) \
+ _(IsRegExpObject) \
+ _(RegExpPrototypeOptimizable) \
+ _(RegExpInstanceOptimizable) \
+ _(GetFirstDollarIndex) \
+ \
+ _(String) \
+ _(StringCharCodeAt) \
+ _(StringFromCharCode) \
+ _(StringFromCodePoint) \
+ _(StringCharAt) \
+ \
+ _(IntrinsicStringReplaceString) \
+ _(IntrinsicStringSplitString) \
+ \
+ _(ObjectCreate) \
+ \
+ _(SimdInt32x4) \
+ _(SimdUint32x4) \
+ _(SimdInt16x8) \
+ _(SimdUint16x8) \
+ _(SimdInt8x16) \
+ _(SimdUint8x16) \
+ _(SimdFloat32x4) \
+ _(SimdBool32x4) \
+ _(SimdBool16x8) \
+ _(SimdBool8x16) \
+ \
+ _(TestBailout) \
+ _(TestAssertFloat32) \
+ _(TestAssertRecoveredOnBailout) \
+ \
+ _(IntrinsicUnsafeSetReservedSlot) \
+ _(IntrinsicUnsafeGetReservedSlot) \
+ _(IntrinsicUnsafeGetObjectFromReservedSlot) \
+ _(IntrinsicUnsafeGetInt32FromReservedSlot) \
+ _(IntrinsicUnsafeGetStringFromReservedSlot) \
+ _(IntrinsicUnsafeGetBooleanFromReservedSlot) \
+ \
+ _(IntrinsicIsCallable) \
+ _(IntrinsicIsConstructor) \
+ _(IntrinsicToObject) \
+ _(IntrinsicIsObject) \
+ _(IntrinsicIsWrappedArrayConstructor) \
+ _(IntrinsicToInteger) \
+ _(IntrinsicToString) \
+ _(IntrinsicIsConstructing) \
+ _(IntrinsicSubstringKernel) \
+ _(IntrinsicDefineDataProperty) \
+ _(IntrinsicObjectHasPrototype) \
+ \
+ _(IntrinsicIsArrayIterator) \
+ _(IntrinsicIsMapIterator) \
+ _(IntrinsicIsSetIterator) \
+ _(IntrinsicIsStringIterator) \
+ _(IntrinsicIsListIterator) \
+ \
+ _(IntrinsicGetNextMapEntryForIterator) \
+ \
+ _(IntrinsicGetNextSetEntryForIterator) \
+ \
+ _(IntrinsicArrayBufferByteLength) \
+ _(IntrinsicPossiblyWrappedArrayBufferByteLength) \
+ \
+ _(TypedArrayConstructor) \
+ _(IntrinsicIsTypedArray) \
+ _(IntrinsicIsPossiblyWrappedTypedArray) \
+ _(IntrinsicTypedArrayLength) \
+ _(IntrinsicPossiblyWrappedTypedArrayLength) \
+ _(IntrinsicSetDisjointTypedElements) \
+ \
+ _(IntrinsicObjectIsTypedObject) \
+ _(IntrinsicObjectIsTransparentTypedObject) \
+ _(IntrinsicObjectIsOpaqueTypedObject) \
+ _(IntrinsicObjectIsTypeDescr) \
+ _(IntrinsicTypeDescrIsSimpleType) \
+ _(IntrinsicTypeDescrIsArrayType)\
+ _(IntrinsicSetTypedObjectOffset)
+
+struct JSJitInfo;
+
+namespace js {
+namespace jit {
+
+enum class InlinableNative : uint16_t {
+#define ADD_NATIVE(native) native,
+ INLINABLE_NATIVE_LIST(ADD_NATIVE)
+#undef ADD_NATIVE
+};
+
+#define ADD_NATIVE(native) extern const JSJitInfo JitInfo_##native;
+ INLINABLE_NATIVE_LIST(ADD_NATIVE)
+#undef ADD_NATIVE
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_InlinableNatives_h */
diff --git a/js/src/jit/InlineList.h b/js/src/jit/InlineList.h
new file mode 100644
index 000000000..73537182e
--- /dev/null
+++ b/js/src/jit/InlineList.h
@@ -0,0 +1,671 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_InlineList_h
+#define jit_InlineList_h
+
+#include "jsutil.h"
+
+namespace js {
+
+template <typename T> class InlineForwardList;
+template <typename T> class InlineForwardListIterator;
+
+template <typename T>
+class InlineForwardListNode
+{
+ public:
+ InlineForwardListNode() : next(nullptr)
+ { }
+ explicit InlineForwardListNode(InlineForwardListNode<T>* n) : next(n)
+ { }
+
+ InlineForwardListNode(const InlineForwardListNode<T>&) = delete;
+
+ protected:
+ friend class InlineForwardList<T>;
+ friend class InlineForwardListIterator<T>;
+
+ InlineForwardListNode<T>* next;
+};
+
+template <typename T>
+class InlineForwardList : protected InlineForwardListNode<T>
+{
+ friend class InlineForwardListIterator<T>;
+
+ typedef InlineForwardListNode<T> Node;
+
+ Node* tail_;
+#ifdef DEBUG
+ int modifyCount_;
+#endif
+
+ InlineForwardList<T>* thisFromConstructor() {
+ return this;
+ }
+
+ public:
+ InlineForwardList()
+ : tail_(thisFromConstructor())
+ {
+#ifdef DEBUG
+ modifyCount_ = 0;
+#endif
+ }
+
+ public:
+ typedef InlineForwardListIterator<T> iterator;
+
+ public:
+ iterator begin() const {
+ return iterator(this);
+ }
+ iterator begin(Node* item) const {
+ return iterator(this, item);
+ }
+ iterator end() const {
+ return iterator(nullptr);
+ }
+ void removeAt(iterator where) {
+ removeAfter(where.prev, where.iter);
+ }
+ void pushFront(Node* t) {
+ insertAfter(this, t);
+ }
+ void pushBack(Node* t) {
+ MOZ_ASSERT(t->next == nullptr);
+#ifdef DEBUG
+ modifyCount_++;
+#endif
+ tail_->next = t;
+ tail_ = t;
+ }
+ T* popFront() {
+ MOZ_ASSERT(!empty());
+ T* result = static_cast<T*>(this->next);
+ removeAfter(this, result);
+ return result;
+ }
+ T* back() const {
+ MOZ_ASSERT(!empty());
+ return static_cast<T*>(tail_);
+ }
+ void insertAfter(Node* at, Node* item) {
+ MOZ_ASSERT(item->next == nullptr);
+#ifdef DEBUG
+ modifyCount_++;
+#endif
+ if (at == tail_)
+ tail_ = item;
+ item->next = at->next;
+ at->next = item;
+ }
+ void removeAfter(Node* at, Node* item) {
+#ifdef DEBUG
+ modifyCount_++;
+#endif
+ if (item == tail_)
+ tail_ = at;
+ MOZ_ASSERT(at->next == item);
+ at->next = item->next;
+ item->next = nullptr;
+ }
+ void removeAndIncrement(iterator &where) {
+ // Do not change modifyCount_ here. The iterator can still be used
+ // after calling this method, unlike the other methods that modify
+ // the list.
+ Node* item = where.iter;
+ where.iter = item->next;
+ if (item == tail_)
+ tail_ = where.prev;
+ MOZ_ASSERT(where.prev->next == item);
+ where.prev->next = where.iter;
+ item->next = nullptr;
+ }
+ void splitAfter(Node* at, InlineForwardList<T>* to) {
+ MOZ_ASSERT(to->empty());
+ if (!at)
+ at = this;
+ if (at == tail_)
+ return;
+#ifdef DEBUG
+ modifyCount_++;
+#endif
+ to->next = at->next;
+ to->tail_ = tail_;
+ tail_ = at;
+ at->next = nullptr;
+ }
+ bool empty() const {
+ return tail_ == this;
+ }
+ void clear() {
+ this->next = nullptr;
+ tail_ = this;
+#ifdef DEBUG
+ modifyCount_ = 0;
+#endif
+ }
+};
+
+template <typename T>
+class InlineForwardListIterator
+{
+private:
+ friend class InlineForwardList<T>;
+
+ typedef InlineForwardListNode<T> Node;
+
+ explicit InlineForwardListIterator<T>(const InlineForwardList<T>* owner)
+ : prev(const_cast<Node*>(static_cast<const Node*>(owner))),
+ iter(owner ? owner->next : nullptr)
+#ifdef DEBUG
+ , owner_(owner),
+ modifyCount_(owner ? owner->modifyCount_ : 0)
+#endif
+ { }
+
+ InlineForwardListIterator<T>(const InlineForwardList<T>* owner, Node* node)
+ : prev(nullptr),
+ iter(node)
+#ifdef DEBUG
+ , owner_(owner),
+ modifyCount_(owner ? owner->modifyCount_ : 0)
+#endif
+ { }
+
+public:
+ InlineForwardListIterator<T> & operator ++() {
+ MOZ_ASSERT(modifyCount_ == owner_->modifyCount_);
+ prev = iter;
+ iter = iter->next;
+ return *this;
+ }
+ InlineForwardListIterator<T> operator ++(int) {
+ InlineForwardListIterator<T> old(*this);
+ operator++();
+ return old;
+ }
+ T * operator*() const {
+ MOZ_ASSERT(modifyCount_ == owner_->modifyCount_);
+ return static_cast<T*>(iter);
+ }
+ T * operator ->() const {
+ MOZ_ASSERT(modifyCount_ == owner_->modifyCount_);
+ return static_cast<T*>(iter);
+ }
+ bool operator !=(const InlineForwardListIterator<T>& where) const {
+ return iter != where.iter;
+ }
+ bool operator ==(const InlineForwardListIterator<T>& where) const {
+ return iter == where.iter;
+ }
+ explicit operator bool() const {
+ return iter != nullptr;
+ }
+
+private:
+ Node* prev;
+ Node* iter;
+
+#ifdef DEBUG
+ const InlineForwardList<T>* owner_;
+ int modifyCount_;
+#endif
+};
+
+template <typename T> class InlineList;
+template <typename T> class InlineListIterator;
+template <typename T> class InlineListReverseIterator;
+
+template <typename T>
+class InlineListNode : public InlineForwardListNode<T>
+{
+ public:
+ InlineListNode() : InlineForwardListNode<T>(nullptr), prev(nullptr)
+ { }
+ InlineListNode(InlineListNode<T>* n, InlineListNode<T>* p)
+ : InlineForwardListNode<T>(n),
+ prev(p)
+ { }
+
+ // Move constructor. Nodes may be moved without being removed from their
+ // containing lists. For example, this allows list nodes to be safely
+ // stored in a resizable Vector -- when the Vector resizes, the new storage
+ // is initialized by this move constructor. |other| is a reference to the
+ // old node which the |this| node here is replacing.
+ InlineListNode(InlineListNode<T>&& other)
+ : InlineForwardListNode<T>(other.next)
+ {
+ InlineListNode<T>* newNext = static_cast<InlineListNode<T>*>(other.next);
+ InlineListNode<T>* newPrev = other.prev;
+ prev = newPrev;
+
+ // Update the pointers in the adjacent nodes to point to this node's new
+ // location.
+ newNext->prev = this;
+ newPrev->next = this;
+ }
+
+ InlineListNode(const InlineListNode<T>&) = delete;
+ void operator=(const InlineListNode<T>&) = delete;
+
+ protected:
+ friend class InlineList<T>;
+ friend class InlineListIterator<T>;
+ friend class InlineListReverseIterator<T>;
+
+ InlineListNode<T>* prev;
+};
+
+template <typename T>
+class InlineList : protected InlineListNode<T>
+{
+ typedef InlineListNode<T> Node;
+
+ public:
+ InlineList() : InlineListNode<T>(this, this)
+ { }
+
+ public:
+ typedef InlineListIterator<T> iterator;
+ typedef InlineListReverseIterator<T> reverse_iterator;
+
+ public:
+ iterator begin() const {
+ return iterator(static_cast<Node*>(this->next));
+ }
+ iterator begin(Node* t) const {
+ return iterator(t);
+ }
+ iterator end() const {
+ return iterator(this);
+ }
+ reverse_iterator rbegin() const {
+ return reverse_iterator(this->prev);
+ }
+ reverse_iterator rbegin(Node* t) const {
+ return reverse_iterator(t);
+ }
+ reverse_iterator rend() const {
+ return reverse_iterator(this);
+ }
+ void pushFront(Node* t) {
+ insertAfter(this, t);
+ }
+ void pushFrontUnchecked(Node* t) {
+ insertAfterUnchecked(this, t);
+ }
+ void pushBack(Node* t) {
+ insertBefore(this, t);
+ }
+ void pushBackUnchecked(Node* t) {
+ insertBeforeUnchecked(this, t);
+ }
+ T* popFront() {
+ MOZ_ASSERT(!empty());
+ T* t = static_cast<T*>(this->next);
+ remove(t);
+ return t;
+ }
+ T* popBack() {
+ MOZ_ASSERT(!empty());
+ T* t = static_cast<T*>(this->prev);
+ remove(t);
+ return t;
+ }
+ T* peekBack() const {
+ iterator iter = end();
+ iter--;
+ return *iter;
+ }
+ void insertBefore(Node* at, Node* item) {
+ MOZ_ASSERT(item->prev == nullptr);
+ MOZ_ASSERT(item->next == nullptr);
+ insertBeforeUnchecked(at, item);
+ }
+ void insertBeforeUnchecked(Node* at, Node* item) {
+ Node* atPrev = at->prev;
+ item->next = at;
+ item->prev = atPrev;
+ atPrev->next = item;
+ at->prev = item;
+ }
+ void insertAfter(Node* at, Node* item) {
+ MOZ_ASSERT(item->prev == nullptr);
+ MOZ_ASSERT(item->next == nullptr);
+ insertAfterUnchecked(at, item);
+ }
+ void insertAfterUnchecked(Node* at, Node* item) {
+ Node* atNext = static_cast<Node*>(at->next);
+ item->next = atNext;
+ item->prev = at;
+ atNext->prev = item;
+ at->next = item;
+ }
+ void remove(Node* t) {
+ Node* tNext = static_cast<Node*>(t->next);
+ Node* tPrev = t->prev;
+ tPrev->next = tNext;
+ tNext->prev = tPrev;
+ t->next = nullptr;
+ t->prev = nullptr;
+ }
+ // Remove |old| from the list and insert |now| in its place.
+ void replace(Node* old, Node* now) {
+ MOZ_ASSERT(now->next == nullptr && now->prev == nullptr);
+ Node* listNext = static_cast<Node*>(old->next);
+ Node* listPrev = old->prev;
+ listPrev->next = now;
+ listNext->prev = now;
+ now->next = listNext;
+ now->prev = listPrev;
+ old->next = nullptr;
+ old->prev = nullptr;
+ }
+ void clear() {
+ this->next = this->prev = this;
+ }
+ bool empty() const {
+ return begin() == end();
+ }
+ void takeElements(InlineList& l) {
+ MOZ_ASSERT(&l != this, "cannot takeElements from this");
+ Node* lprev = l.prev;
+ static_cast<Node*>(l.next)->prev = this;
+ lprev->next = this->next;
+ static_cast<Node*>(this->next)->prev = l.prev;
+ this->next = l.next;
+ l.clear();
+ }
+};
+
+template <typename T>
+class InlineListIterator
+{
+ private:
+ friend class InlineList<T>;
+
+ typedef InlineListNode<T> Node;
+
+ explicit InlineListIterator(const Node* iter)
+ : iter(const_cast<Node*>(iter))
+ { }
+
+ public:
+ InlineListIterator<T> & operator ++() {
+ iter = static_cast<Node*>(iter->next);
+ return *this;
+ }
+ InlineListIterator<T> operator ++(int) {
+ InlineListIterator<T> old(*this);
+ operator++();
+ return old;
+ }
+ InlineListIterator<T> & operator --() {
+ iter = iter->prev;
+ return *this;
+ }
+ InlineListIterator<T> operator --(int) {
+ InlineListIterator<T> old(*this);
+ operator--();
+ return old;
+ }
+ T * operator*() const {
+ return static_cast<T*>(iter);
+ }
+ T * operator ->() const {
+ return static_cast<T*>(iter);
+ }
+ bool operator !=(const InlineListIterator<T>& where) const {
+ return iter != where.iter;
+ }
+ bool operator ==(const InlineListIterator<T>& where) const {
+ return iter == where.iter;
+ }
+
+ private:
+ Node* iter;
+};
+
+template <typename T>
+class InlineListReverseIterator
+{
+ private:
+ friend class InlineList<T>;
+
+ typedef InlineListNode<T> Node;
+
+ explicit InlineListReverseIterator(const Node* iter)
+ : iter(const_cast<Node*>(iter))
+ { }
+
+ public:
+ InlineListReverseIterator<T> & operator ++() {
+ iter = iter->prev;
+ return *this;
+ }
+ InlineListReverseIterator<T> operator ++(int) {
+ InlineListReverseIterator<T> old(*this);
+ operator++();
+ return old;
+ }
+ InlineListReverseIterator<T> & operator --() {
+ iter = static_cast<Node*>(iter->next);
+ return *this;
+ }
+ InlineListReverseIterator<T> operator --(int) {
+ InlineListReverseIterator<T> old(*this);
+ operator--();
+ return old;
+ }
+ T * operator*() {
+ return static_cast<T*>(iter);
+ }
+ T * operator ->() {
+ return static_cast<T*>(iter);
+ }
+ bool operator !=(const InlineListReverseIterator<T>& where) const {
+ return iter != where.iter;
+ }
+ bool operator ==(const InlineListReverseIterator<T>& where) const {
+ return iter == where.iter;
+ }
+
+ private:
+ Node* iter;
+};
+
+/* This list type is more or less exactly an InlineForwardList without a sentinel
+ * node. It is useful in cases where you are doing algorithms that deal with many
+ * merging singleton lists, rather than often empty ones.
+ */
+template <typename T> class InlineConcatListIterator;
+template <typename T>
+class InlineConcatList
+{
+ private:
+ typedef InlineConcatList<T> Node;
+
+ InlineConcatList<T>* thisFromConstructor() {
+ return this;
+ }
+
+ public:
+ InlineConcatList() : next(nullptr), tail(thisFromConstructor())
+ { }
+
+ typedef InlineConcatListIterator<T> iterator;
+
+ iterator begin() const {
+ return iterator(this);
+ }
+
+ iterator end() const {
+ return iterator(nullptr);
+ }
+
+ void append(InlineConcatList<T>* adding)
+ {
+ MOZ_ASSERT(tail);
+ MOZ_ASSERT(!tail->next);
+ MOZ_ASSERT(adding->tail);
+ MOZ_ASSERT(!adding->tail->next);
+
+ tail->next = adding;
+ tail = adding->tail;
+ adding->tail = nullptr;
+ }
+
+ protected:
+ friend class InlineConcatListIterator<T>;
+ Node* next;
+ Node* tail;
+};
+
+template <typename T>
+class InlineConcatListIterator
+{
+ private:
+ friend class InlineConcatList<T>;
+
+ typedef InlineConcatList<T> Node;
+
+ explicit InlineConcatListIterator(const Node* iter)
+ : iter(const_cast<Node*>(iter))
+ { }
+
+ public:
+ InlineConcatListIterator<T> & operator ++() {
+ iter = static_cast<Node*>(iter->next);
+ return *this;
+ }
+ InlineConcatListIterator<T> operator ++(int) {
+ InlineConcatListIterator<T> old(*this);
+ operator++();
+ return old;
+ }
+ T * operator*() const {
+ return static_cast<T*>(iter);
+ }
+ T * operator ->() const {
+ return static_cast<T*>(iter);
+ }
+ bool operator !=(const InlineConcatListIterator<T>& where) const {
+ return iter != where.iter;
+ }
+ bool operator ==(const InlineConcatListIterator<T>& where) const {
+ return iter == where.iter;
+ }
+
+ private:
+ Node* iter;
+};
+
+template <typename T> class InlineSpaghettiStack;
+template <typename T> class InlineSpaghettiStackNode;
+template <typename T> class InlineSpaghettiStackIterator;
+
+template <typename T>
+class InlineSpaghettiStackNode : public InlineForwardListNode<T>
+{
+ typedef InlineForwardListNode<T> Parent;
+
+ public:
+ InlineSpaghettiStackNode() : Parent()
+ { }
+
+ explicit InlineSpaghettiStackNode(InlineSpaghettiStackNode<T>* n)
+ : Parent(n)
+ { }
+
+ InlineSpaghettiStackNode(const InlineSpaghettiStackNode<T>&) = delete;
+
+ protected:
+ friend class InlineSpaghettiStack<T>;
+ friend class InlineSpaghettiStackIterator<T>;
+};
+
+template <typename T>
+class InlineSpaghettiStack : protected InlineSpaghettiStackNode<T>
+{
+ friend class InlineSpaghettiStackIterator<T>;
+
+ typedef InlineSpaghettiStackNode<T> Node;
+
+ public:
+ InlineSpaghettiStack()
+ { }
+
+ public:
+ typedef InlineSpaghettiStackIterator<T> iterator;
+
+ public:
+ iterator begin() const {
+ return iterator(this);
+ }
+ iterator end() const {
+ return iterator(nullptr);
+ }
+
+ void push(Node* t) {
+ MOZ_ASSERT(t->next == nullptr);
+ t->next = this->next;
+ this->next = t;
+ }
+
+ void copy(const InlineSpaghettiStack<T>& stack) {
+ this->next = stack.next;
+ }
+
+ bool empty() const {
+ return this->next == nullptr;
+ }
+};
+
+template <typename T>
+class InlineSpaghettiStackIterator
+{
+ private:
+ friend class InlineSpaghettiStack<T>;
+
+ typedef InlineSpaghettiStackNode<T> Node;
+
+ explicit InlineSpaghettiStackIterator<T>(const InlineSpaghettiStack<T>* owner)
+ : iter(owner ? static_cast<Node*>(owner->next) : nullptr)
+ { }
+
+ public:
+ InlineSpaghettiStackIterator<T> & operator ++() {
+ iter = static_cast<Node*>(iter->next);
+ return *this;
+ }
+ InlineSpaghettiStackIterator<T> operator ++(int) {
+ InlineSpaghettiStackIterator<T> old(*this);
+ operator++();
+ return old;
+ }
+ T* operator*() const {
+ return static_cast<T*>(iter);
+ }
+ T* operator ->() const {
+ return static_cast<T*>(iter);
+ }
+ bool operator !=(const InlineSpaghettiStackIterator<T>& where) const {
+ return iter != where.iter;
+ }
+ bool operator ==(const InlineSpaghettiStackIterator<T>& where) const {
+ return iter == where.iter;
+ }
+
+ private:
+ Node* iter;
+};
+
+} // namespace js
+
+#endif /* jit_InlineList_h */
diff --git a/js/src/jit/InstructionReordering.cpp b/js/src/jit/InstructionReordering.cpp
new file mode 100644
index 000000000..48b619b7c
--- /dev/null
+++ b/js/src/jit/InstructionReordering.cpp
@@ -0,0 +1,190 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/InstructionReordering.h"
+#include "jit/MIRGraph.h"
+
+using namespace js;
+using namespace js::jit;
+
+static void
+MoveBefore(MBasicBlock* block, MInstruction* at, MInstruction* ins)
+{
+ if (at == ins)
+ return;
+
+ // Update instruction numbers.
+ for (MInstructionIterator iter(block->begin(at)); *iter != ins; iter++) {
+ MOZ_ASSERT(iter->id() < ins->id());
+ iter->setId(iter->id() + 1);
+ }
+ ins->setId(at->id() - 1);
+ block->moveBefore(at, ins);
+}
+
+static bool
+IsLastUse(MDefinition* ins, MDefinition* input, MBasicBlock* loopHeader)
+{
+ // If we are in a loop, this cannot be the last use of any definitions from
+ // outside the loop, as those definitions can be used in future iterations.
+ if (loopHeader && input->block()->id() < loopHeader->id())
+ return false;
+ for (MUseDefIterator iter(input); iter; iter++) {
+ // Watch for uses defined in blocks which ReorderInstructions hasn't
+ // processed yet. These nodes have not had their ids set yet.
+ if (iter.def()->block()->id() > ins->block()->id())
+ return false;
+ if (iter.def()->id() > ins->id())
+ return false;
+ }
+ return true;
+}
+
+bool
+jit::ReorderInstructions(MIRGenerator* mir, MIRGraph& graph)
+{
+ // Renumber all instructions in the graph as we go.
+ size_t nextId = 0;
+
+ // List of the headers of any loops we are in.
+ Vector<MBasicBlock*, 4, SystemAllocPolicy> loopHeaders;
+
+ for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
+ // Renumber all definitions inside the basic blocks.
+ for (MPhiIterator iter(block->phisBegin()); iter != block->phisEnd(); iter++)
+ iter->setId(nextId++);
+
+ for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++)
+ iter->setId(nextId++);
+
+ // Don't reorder instructions within entry blocks, which have special requirements.
+ if (*block == graph.entryBlock() || *block == graph.osrBlock())
+ continue;
+
+ if (block->isLoopHeader()) {
+ if (!loopHeaders.append(*block))
+ return false;
+ }
+
+ MBasicBlock* innerLoop = loopHeaders.empty() ? nullptr : loopHeaders.back();
+
+ MInstruction* top = block->safeInsertTop();
+ MInstructionReverseIterator rtop = ++block->rbegin(top);
+ for (MInstructionIterator iter(block->begin(top)); iter != block->end(); ) {
+ MInstruction* ins = *iter;
+
+ // Filter out some instructions which are never reordered.
+ if (ins->isEffectful() ||
+ !ins->isMovable() ||
+ ins->resumePoint() ||
+ ins == block->lastIns())
+ {
+ iter++;
+ continue;
+ }
+
+ // Move constants with a single use in the current block to the
+ // start of the block. Constants won't be reordered by the logic
+ // below, as they have no inputs. Moving them up as high as
+ // possible can allow their use to be moved up further, though,
+ // and has no cost if the constant is emitted at its use.
+ if (ins->isConstant() &&
+ ins->hasOneUse() &&
+ ins->usesBegin()->consumer()->block() == *block &&
+ !IsFloatingPointType(ins->type()))
+ {
+ iter++;
+ MInstructionIterator targetIter = block->begin();
+ while (targetIter->isConstant() || targetIter->isInterruptCheck()) {
+ if (*targetIter == ins)
+ break;
+ targetIter++;
+ }
+ MoveBefore(*block, *targetIter, ins);
+ continue;
+ }
+
+ // Look for inputs where this instruction is the last use of that
+ // input. If we move this instruction up, the input's lifetime will
+ // be shortened, modulo resume point uses (which don't need to be
+ // stored in a register, and can be handled by the register
+ // allocator by just spilling at some point with no reload).
+ Vector<MDefinition*, 4, SystemAllocPolicy> lastUsedInputs;
+ for (size_t i = 0; i < ins->numOperands(); i++) {
+ MDefinition* input = ins->getOperand(i);
+ if (!input->isConstant() && IsLastUse(ins, input, innerLoop)) {
+ if (!lastUsedInputs.append(input))
+ return false;
+ }
+ }
+
+ // Don't try to move instructions which aren't the last use of any
+ // of their inputs (we really ought to move these down instead).
+ if (lastUsedInputs.length() < 2) {
+ iter++;
+ continue;
+ }
+
+ MInstruction* target = ins;
+ for (MInstructionReverseIterator riter = ++block->rbegin(ins); riter != rtop; riter++) {
+ MInstruction* prev = *riter;
+ if (prev->isInterruptCheck())
+ break;
+
+ // The instruction can't be moved before any of its uses.
+ bool isUse = false;
+ for (size_t i = 0; i < ins->numOperands(); i++) {
+ if (ins->getOperand(i) == prev) {
+ isUse = true;
+ break;
+ }
+ }
+ if (isUse)
+ break;
+
+ // The instruction can't be moved before an instruction that
+ // stores to a location read by the instruction.
+ if (prev->isEffectful() &&
+ (ins->getAliasSet().flags() & prev->getAliasSet().flags()) &&
+ ins->mightAlias(prev) != MDefinition::AliasType::NoAlias)
+ {
+ break;
+ }
+
+ // Make sure the instruction will still be the last use of one
+ // of its inputs when moved up this far.
+ for (size_t i = 0; i < lastUsedInputs.length(); ) {
+ bool found = false;
+ for (size_t j = 0; j < prev->numOperands(); j++) {
+ if (prev->getOperand(j) == lastUsedInputs[i]) {
+ found = true;
+ break;
+ }
+ }
+ if (found) {
+ lastUsedInputs[i] = lastUsedInputs.back();
+ lastUsedInputs.popBack();
+ } else {
+ i++;
+ }
+ }
+ if (lastUsedInputs.length() < 2)
+ break;
+
+ // We can move the instruction before this one.
+ target = prev;
+ }
+
+ iter++;
+ MoveBefore(*block, target, ins);
+ }
+
+ if (block->isLoopBackedge())
+ loopHeaders.popBack();
+ }
+
+ return true;
+}
diff --git a/js/src/jit/InstructionReordering.h b/js/src/jit/InstructionReordering.h
new file mode 100644
index 000000000..ce9fa2665
--- /dev/null
+++ b/js/src/jit/InstructionReordering.h
@@ -0,0 +1,21 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_InstructionReordering_h
+#define jit_InstructionReordering_h
+
+#include "jit/IonAnalysis.h"
+
+namespace js {
+namespace jit {
+
+MOZ_MUST_USE bool
+ReorderInstructions(MIRGenerator* mir, MIRGraph& graph);
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_InstructionReordering_h
diff --git a/js/src/jit/Ion.cpp b/js/src/jit/Ion.cpp
new file mode 100644
index 000000000..2a158ed7e
--- /dev/null
+++ b/js/src/jit/Ion.cpp
@@ -0,0 +1,3560 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Ion.h"
+
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/SizePrintfMacros.h"
+#include "mozilla/ThreadLocal.h"
+
+#include "jscompartment.h"
+#include "jsgc.h"
+#include "jsprf.h"
+
+#include "gc/Marking.h"
+#include "jit/AliasAnalysis.h"
+#include "jit/AlignmentMaskAnalysis.h"
+#include "jit/BacktrackingAllocator.h"
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineInspector.h"
+#include "jit/BaselineJIT.h"
+#include "jit/CodeGenerator.h"
+#include "jit/EagerSimdUnbox.h"
+#include "jit/EdgeCaseAnalysis.h"
+#include "jit/EffectiveAddressAnalysis.h"
+#include "jit/FlowAliasAnalysis.h"
+#include "jit/FoldLinearArithConstants.h"
+#include "jit/InstructionReordering.h"
+#include "jit/IonAnalysis.h"
+#include "jit/IonBuilder.h"
+#include "jit/IonOptimizationLevels.h"
+#include "jit/JitcodeMap.h"
+#include "jit/JitCommon.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitSpewer.h"
+#include "jit/LICM.h"
+#include "jit/LIR.h"
+#include "jit/LoopUnroller.h"
+#include "jit/Lowering.h"
+#include "jit/PerfSpewer.h"
+#include "jit/RangeAnalysis.h"
+#include "jit/ScalarReplacement.h"
+#include "jit/Sink.h"
+#include "jit/StupidAllocator.h"
+#include "jit/ValueNumbering.h"
+#include "jit/WasmBCE.h"
+#include "vm/Debugger.h"
+#include "vm/HelperThreads.h"
+#include "vm/TraceLogging.h"
+
+#include "jscompartmentinlines.h"
+#include "jsobjinlines.h"
+#include "jsscriptinlines.h"
+
+#include "jit/JitFrames-inl.h"
+#include "jit/shared/Lowering-shared-inl.h"
+#include "vm/Debugger-inl.h"
+#include "vm/EnvironmentObject-inl.h"
+#include "vm/Stack-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// Assert that JitCode is gc::Cell aligned.
+JS_STATIC_ASSERT(sizeof(JitCode) % gc::CellSize == 0);
+
+static MOZ_THREAD_LOCAL(JitContext*) TlsJitContext;
+
+static JitContext*
+CurrentJitContext()
+{
+ if (!TlsJitContext.init())
+ return nullptr;
+ return TlsJitContext.get();
+}
+
+void
+jit::SetJitContext(JitContext* ctx)
+{
+ TlsJitContext.set(ctx);
+}
+
+JitContext*
+jit::GetJitContext()
+{
+ MOZ_ASSERT(CurrentJitContext());
+ return CurrentJitContext();
+}
+
+JitContext*
+jit::MaybeGetJitContext()
+{
+ return CurrentJitContext();
+}
+
+JitContext::JitContext(JSContext* cx, TempAllocator* temp)
+ : cx(cx),
+ temp(temp),
+ runtime(CompileRuntime::get(cx->runtime())),
+ compartment(CompileCompartment::get(cx->compartment())),
+ prev_(CurrentJitContext()),
+ assemblerCount_(0)
+{
+ SetJitContext(this);
+}
+
+JitContext::JitContext(ExclusiveContext* cx, TempAllocator* temp)
+ : cx(nullptr),
+ temp(temp),
+ runtime(CompileRuntime::get(cx->runtime_)),
+ compartment(nullptr),
+ prev_(CurrentJitContext()),
+ assemblerCount_(0)
+{
+ SetJitContext(this);
+}
+
+JitContext::JitContext(CompileRuntime* rt, CompileCompartment* comp, TempAllocator* temp)
+ : cx(nullptr),
+ temp(temp),
+ runtime(rt),
+ compartment(comp),
+ prev_(CurrentJitContext()),
+ assemblerCount_(0)
+{
+ SetJitContext(this);
+}
+
+JitContext::JitContext(CompileRuntime* rt)
+ : cx(nullptr),
+ temp(nullptr),
+ runtime(rt),
+ compartment(nullptr),
+ prev_(CurrentJitContext()),
+ assemblerCount_(0)
+{
+ SetJitContext(this);
+}
+
+JitContext::JitContext(TempAllocator* temp)
+ : cx(nullptr),
+ temp(temp),
+ runtime(nullptr),
+ compartment(nullptr),
+ prev_(CurrentJitContext()),
+ assemblerCount_(0)
+{
+ SetJitContext(this);
+}
+
+JitContext::JitContext(CompileRuntime* rt, TempAllocator* temp)
+ : cx(nullptr),
+ temp(temp),
+ runtime(rt),
+ compartment(nullptr),
+ prev_(CurrentJitContext()),
+ assemblerCount_(0)
+{
+ SetJitContext(this);
+}
+
+JitContext::JitContext()
+ : cx(nullptr),
+ temp(nullptr),
+ runtime(nullptr),
+ compartment(nullptr),
+ prev_(CurrentJitContext()),
+ assemblerCount_(0)
+{
+ SetJitContext(this);
+}
+
+JitContext::~JitContext()
+{
+ SetJitContext(prev_);
+}
+
+bool
+jit::InitializeIon()
+{
+ if (!TlsJitContext.init())
+ return false;
+ CheckLogging();
+#if defined(JS_CODEGEN_ARM)
+ InitARMFlags();
+#endif
+ CheckPerf();
+ return true;
+}
+
+JitRuntime::JitRuntime(JSRuntime* rt)
+ : execAlloc_(rt),
+ backedgeExecAlloc_(rt),
+ exceptionTail_(nullptr),
+ bailoutTail_(nullptr),
+ profilerExitFrameTail_(nullptr),
+ enterJIT_(nullptr),
+ bailoutHandler_(nullptr),
+ argumentsRectifier_(nullptr),
+ argumentsRectifierReturnAddr_(nullptr),
+ invalidator_(nullptr),
+ debugTrapHandler_(nullptr),
+ baselineDebugModeOSRHandler_(nullptr),
+ functionWrappers_(nullptr),
+ osrTempData_(nullptr),
+ preventBackedgePatching_(false),
+ backedgeTarget_(BackedgeLoopHeader),
+ ionReturnOverride_(MagicValue(JS_ARG_POISON)),
+ jitcodeGlobalTable_(nullptr)
+{
+}
+
+JitRuntime::~JitRuntime()
+{
+ js_delete(functionWrappers_);
+ freeOsrTempData();
+
+ // By this point, the jitcode global table should be empty.
+ MOZ_ASSERT_IF(jitcodeGlobalTable_, jitcodeGlobalTable_->empty());
+ js_delete(jitcodeGlobalTable_);
+}
+
+bool
+JitRuntime::initialize(JSContext* cx, AutoLockForExclusiveAccess& lock)
+{
+ AutoCompartment ac(cx, cx->atomsCompartment(lock), &lock);
+
+ JitContext jctx(cx, nullptr);
+
+ if (!cx->compartment()->ensureJitCompartmentExists(cx))
+ return false;
+
+ functionWrappers_ = cx->new_<VMWrapperMap>(cx);
+ if (!functionWrappers_ || !functionWrappers_->init())
+ return false;
+
+ JitSpew(JitSpew_Codegen, "# Emitting profiler exit frame tail stub");
+ profilerExitFrameTail_ = generateProfilerExitFrameTailStub(cx);
+ if (!profilerExitFrameTail_)
+ return false;
+
+ JitSpew(JitSpew_Codegen, "# Emitting exception tail stub");
+
+ void* handler = JS_FUNC_TO_DATA_PTR(void*, jit::HandleException);
+
+ exceptionTail_ = generateExceptionTailStub(cx, handler);
+ if (!exceptionTail_)
+ return false;
+
+ JitSpew(JitSpew_Codegen, "# Emitting bailout tail stub");
+ bailoutTail_ = generateBailoutTailStub(cx);
+ if (!bailoutTail_)
+ return false;
+
+ if (cx->runtime()->jitSupportsFloatingPoint) {
+ JitSpew(JitSpew_Codegen, "# Emitting bailout tables");
+
+ // Initialize some Ion-only stubs that require floating-point support.
+ if (!bailoutTables_.reserve(FrameSizeClass::ClassLimit().classId()))
+ return false;
+
+ for (uint32_t id = 0;; id++) {
+ FrameSizeClass class_ = FrameSizeClass::FromClass(id);
+ if (class_ == FrameSizeClass::ClassLimit())
+ break;
+ bailoutTables_.infallibleAppend((JitCode*)nullptr);
+ JitSpew(JitSpew_Codegen, "# Bailout table");
+ bailoutTables_[id] = generateBailoutTable(cx, id);
+ if (!bailoutTables_[id])
+ return false;
+ }
+
+ JitSpew(JitSpew_Codegen, "# Emitting bailout handler");
+ bailoutHandler_ = generateBailoutHandler(cx);
+ if (!bailoutHandler_)
+ return false;
+
+ JitSpew(JitSpew_Codegen, "# Emitting invalidator");
+ invalidator_ = generateInvalidator(cx);
+ if (!invalidator_)
+ return false;
+ }
+
+ JitSpew(JitSpew_Codegen, "# Emitting sequential arguments rectifier");
+ argumentsRectifier_ = generateArgumentsRectifier(cx, &argumentsRectifierReturnAddr_);
+ if (!argumentsRectifier_)
+ return false;
+
+ JitSpew(JitSpew_Codegen, "# Emitting EnterJIT sequence");
+ enterJIT_ = generateEnterJIT(cx, EnterJitOptimized);
+ if (!enterJIT_)
+ return false;
+
+ JitSpew(JitSpew_Codegen, "# Emitting EnterBaselineJIT sequence");
+ enterBaselineJIT_ = generateEnterJIT(cx, EnterJitBaseline);
+ if (!enterBaselineJIT_)
+ return false;
+
+ JitSpew(JitSpew_Codegen, "# Emitting Pre Barrier for Value");
+ valuePreBarrier_ = generatePreBarrier(cx, MIRType::Value);
+ if (!valuePreBarrier_)
+ return false;
+
+ JitSpew(JitSpew_Codegen, "# Emitting Pre Barrier for String");
+ stringPreBarrier_ = generatePreBarrier(cx, MIRType::String);
+ if (!stringPreBarrier_)
+ return false;
+
+ JitSpew(JitSpew_Codegen, "# Emitting Pre Barrier for Object");
+ objectPreBarrier_ = generatePreBarrier(cx, MIRType::Object);
+ if (!objectPreBarrier_)
+ return false;
+
+ JitSpew(JitSpew_Codegen, "# Emitting Pre Barrier for Shape");
+ shapePreBarrier_ = generatePreBarrier(cx, MIRType::Shape);
+ if (!shapePreBarrier_)
+ return false;
+
+ JitSpew(JitSpew_Codegen, "# Emitting Pre Barrier for ObjectGroup");
+ objectGroupPreBarrier_ = generatePreBarrier(cx, MIRType::ObjectGroup);
+ if (!objectGroupPreBarrier_)
+ return false;
+
+ JitSpew(JitSpew_Codegen, "# Emitting malloc stub");
+ mallocStub_ = generateMallocStub(cx);
+ if (!mallocStub_)
+ return false;
+
+ JitSpew(JitSpew_Codegen, "# Emitting free stub");
+ freeStub_ = generateFreeStub(cx);
+ if (!freeStub_)
+ return false;
+
+ JitSpew(JitSpew_Codegen, "# Emitting VM function wrappers");
+ for (VMFunction* fun = VMFunction::functions; fun; fun = fun->next) {
+ JitSpew(JitSpew_Codegen, "# VM function wrapper");
+ if (!generateVMWrapper(cx, *fun))
+ return false;
+ }
+
+ JitSpew(JitSpew_Codegen, "# Emitting lazy link stub");
+ lazyLinkStub_ = generateLazyLinkStub(cx);
+ if (!lazyLinkStub_)
+ return false;
+
+ jitcodeGlobalTable_ = cx->new_<JitcodeGlobalTable>();
+ if (!jitcodeGlobalTable_)
+ return false;
+
+ return true;
+}
+
+JitCode*
+JitRuntime::debugTrapHandler(JSContext* cx)
+{
+ if (!debugTrapHandler_) {
+ // JitRuntime code stubs are shared across compartments and have to
+ // be allocated in the atoms compartment.
+ AutoLockForExclusiveAccess lock(cx);
+ AutoCompartment ac(cx, cx->runtime()->atomsCompartment(lock), &lock);
+ debugTrapHandler_ = generateDebugTrapHandler(cx);
+ }
+ return debugTrapHandler_;
+}
+
+uint8_t*
+JitRuntime::allocateOsrTempData(size_t size)
+{
+ osrTempData_ = (uint8_t*)js_realloc(osrTempData_, size);
+ return osrTempData_;
+}
+
+void
+JitRuntime::freeOsrTempData()
+{
+ js_free(osrTempData_);
+ osrTempData_ = nullptr;
+}
+
+void
+JitRuntime::patchIonBackedges(JSRuntime* rt, BackedgeTarget target)
+{
+ if (target == BackedgeLoopHeader) {
+ // We must be on the main thread. The caller must use
+ // AutoPreventBackedgePatching to ensure we don't reenter.
+ MOZ_ASSERT(preventBackedgePatching_);
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ } else {
+ // We must be called from InterruptRunningJitCode, or a signal handler
+ // triggered there. rt->handlingJitInterrupt() ensures we can't reenter
+ // this code.
+ MOZ_ASSERT(!preventBackedgePatching_);
+ MOZ_ASSERT(rt->handlingJitInterrupt());
+ }
+
+ // Do nothing if we know all backedges are already jumping to `target`.
+ if (backedgeTarget_ == target)
+ return;
+
+ backedgeTarget_ = target;
+
+ backedgeExecAlloc_.makeAllWritable();
+
+ // Patch all loop backedges in Ion code so that they either jump to the
+ // normal loop header or to an interrupt handler each time they run.
+ for (InlineListIterator<PatchableBackedge> iter(backedgeList_.begin());
+ iter != backedgeList_.end();
+ iter++)
+ {
+ PatchableBackedge* patchableBackedge = *iter;
+ if (target == BackedgeLoopHeader)
+ PatchBackedge(patchableBackedge->backedge, patchableBackedge->loopHeader, target);
+ else
+ PatchBackedge(patchableBackedge->backedge, patchableBackedge->interruptCheck, target);
+ }
+
+ backedgeExecAlloc_.makeAllExecutable();
+}
+
+JitCompartment::JitCompartment()
+ : stubCodes_(nullptr),
+ cacheIRStubCodes_(nullptr),
+ baselineGetPropReturnAddr_(nullptr),
+ baselineSetPropReturnAddr_(nullptr),
+ stringConcatStub_(nullptr),
+ regExpMatcherStub_(nullptr),
+ regExpSearcherStub_(nullptr),
+ regExpTesterStub_(nullptr)
+{
+ baselineCallReturnAddrs_[0] = baselineCallReturnAddrs_[1] = nullptr;
+}
+
+JitCompartment::~JitCompartment()
+{
+ js_delete(stubCodes_);
+ js_delete(cacheIRStubCodes_);
+}
+
+bool
+JitCompartment::initialize(JSContext* cx)
+{
+ stubCodes_ = cx->new_<ICStubCodeMap>(cx->runtime());
+ if (!stubCodes_)
+ return false;
+
+ if (!stubCodes_->init()) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ cacheIRStubCodes_ = cx->new_<CacheIRStubCodeMap>(cx->runtime());
+ if (!cacheIRStubCodes_)
+ return false;
+
+ if (!cacheIRStubCodes_->init()) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+bool
+JitCompartment::ensureIonStubsExist(JSContext* cx)
+{
+ if (!stringConcatStub_) {
+ stringConcatStub_ = generateStringConcatStub(cx);
+ if (!stringConcatStub_)
+ return false;
+ }
+
+ return true;
+}
+
+void
+jit::FinishOffThreadBuilder(JSRuntime* runtime, IonBuilder* builder,
+ const AutoLockHelperThreadState& locked)
+{
+ // Clean the references to the pending IonBuilder, if we just finished it.
+ if (builder->script()->baselineScript()->hasPendingIonBuilder() &&
+ builder->script()->baselineScript()->pendingIonBuilder() == builder)
+ {
+ builder->script()->baselineScript()->removePendingIonBuilder(builder->script());
+ }
+
+ // If the builder is still in one of the helper thread list, then remove it.
+ if (builder->isInList()) {
+ MOZ_ASSERT(runtime);
+ runtime->ionLazyLinkListRemove(builder);
+ }
+
+ // Clear the recompiling flag of the old ionScript, since we continue to
+ // use the old ionScript if recompiling fails.
+ if (builder->script()->hasIonScript())
+ builder->script()->ionScript()->clearRecompiling();
+
+ // Clean up if compilation did not succeed.
+ if (builder->script()->isIonCompilingOffThread()) {
+ IonScript* ion =
+ builder->abortReason() == AbortReason_Disable ? ION_DISABLED_SCRIPT : nullptr;
+ builder->script()->setIonScript(runtime, ion);
+ }
+
+ // The builder is allocated into its LifoAlloc, so destroying that will
+ // destroy the builder and all other data accumulated during compilation,
+ // except any final codegen (which includes an assembler and needs to be
+ // explicitly destroyed).
+ js_delete(builder->backgroundCodegen());
+ js_delete(builder->alloc().lifoAlloc());
+}
+
+static bool
+LinkCodeGen(JSContext* cx, IonBuilder* builder, CodeGenerator *codegen)
+{
+ RootedScript script(cx, builder->script());
+ TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
+ TraceLoggerEvent event(logger, TraceLogger_AnnotateScripts, script);
+ AutoTraceLog logScript(logger, event);
+ AutoTraceLog logLink(logger, TraceLogger_IonLinking);
+
+ if (!codegen->link(cx, builder->constraints()))
+ return false;
+
+ return true;
+}
+
+static bool
+LinkBackgroundCodeGen(JSContext* cx, IonBuilder* builder)
+{
+ CodeGenerator* codegen = builder->backgroundCodegen();
+ if (!codegen)
+ return false;
+
+ JitContext jctx(cx, &builder->alloc());
+
+ // Root the assembler until the builder is finished below. As it was
+ // constructed off thread, the assembler has not been rooted previously,
+ // though any GC activity would discard the builder.
+ MacroAssembler::AutoRooter masm(cx, &codegen->masm);
+
+ return LinkCodeGen(cx, builder, codegen);
+}
+
+void
+jit::LinkIonScript(JSContext* cx, HandleScript calleeScript)
+{
+ IonBuilder* builder;
+
+ {
+ AutoLockHelperThreadState lock;
+
+ // Get the pending builder from the Ion frame.
+ MOZ_ASSERT(calleeScript->hasBaselineScript());
+ builder = calleeScript->baselineScript()->pendingIonBuilder();
+ calleeScript->baselineScript()->removePendingIonBuilder(calleeScript);
+
+ // Remove from pending.
+ cx->runtime()->ionLazyLinkListRemove(builder);
+ }
+
+ {
+ AutoEnterAnalysis enterTypes(cx);
+ if (!LinkBackgroundCodeGen(cx, builder)) {
+ // Silently ignore OOM during code generation. The assembly code
+ // doesn't has code to handle it after linking happened. So it's
+ // not OK to throw a catchable exception from there.
+ cx->clearPendingException();
+
+ // Reset the TypeZone's compiler output for this script, if any.
+ InvalidateCompilerOutputsForScript(cx, calleeScript);
+ }
+ }
+
+ {
+ AutoLockHelperThreadState lock;
+ FinishOffThreadBuilder(cx->runtime(), builder, lock);
+ }
+}
+
+uint8_t*
+jit::LazyLinkTopActivation(JSContext* cx)
+{
+ // First frame should be an exit frame.
+ JitFrameIterator it(cx);
+ LazyLinkExitFrameLayout* ll = it.exitFrame()->as<LazyLinkExitFrameLayout>();
+ RootedScript calleeScript(cx, ScriptFromCalleeToken(ll->jsFrame()->calleeToken()));
+
+ LinkIonScript(cx, calleeScript);
+
+ MOZ_ASSERT(calleeScript->hasBaselineScript());
+ MOZ_ASSERT(calleeScript->baselineOrIonRawPointer());
+
+ return calleeScript->baselineOrIonRawPointer();
+}
+
+/* static */ void
+JitRuntime::Mark(JSTracer* trc, AutoLockForExclusiveAccess& lock)
+{
+ MOZ_ASSERT(!trc->runtime()->isHeapMinorCollecting());
+
+ // Shared stubs are allocated in the atoms compartment, so do not iterate
+ // them after the atoms heap after it has been "finished."
+ if (trc->runtime()->atomsAreFinished())
+ return;
+
+ Zone* zone = trc->runtime()->atomsCompartment(lock)->zone();
+ for (auto i = zone->cellIter<JitCode>(); !i.done(); i.next()) {
+ JitCode* code = i;
+ TraceRoot(trc, &code, "wrapper");
+ }
+}
+
+/* static */ void
+JitRuntime::MarkJitcodeGlobalTableUnconditionally(JSTracer* trc)
+{
+ if (trc->runtime()->spsProfiler.enabled() &&
+ trc->runtime()->hasJitRuntime() &&
+ trc->runtime()->jitRuntime()->hasJitcodeGlobalTable())
+ {
+ trc->runtime()->jitRuntime()->getJitcodeGlobalTable()->markUnconditionally(trc);
+ }
+}
+
+/* static */ bool
+JitRuntime::MarkJitcodeGlobalTableIteratively(JSTracer* trc)
+{
+ if (trc->runtime()->hasJitRuntime() &&
+ trc->runtime()->jitRuntime()->hasJitcodeGlobalTable())
+ {
+ return trc->runtime()->jitRuntime()->getJitcodeGlobalTable()->markIteratively(trc);
+ }
+ return false;
+}
+
+/* static */ void
+JitRuntime::SweepJitcodeGlobalTable(JSRuntime* rt)
+{
+ if (rt->hasJitRuntime() && rt->jitRuntime()->hasJitcodeGlobalTable())
+ rt->jitRuntime()->getJitcodeGlobalTable()->sweep(rt);
+}
+
+void
+JitCompartment::mark(JSTracer* trc, JSCompartment* compartment)
+{
+ // Free temporary OSR buffer.
+ trc->runtime()->jitRuntime()->freeOsrTempData();
+}
+
+void
+JitCompartment::sweep(FreeOp* fop, JSCompartment* compartment)
+{
+ // Any outstanding compilations should have been cancelled by the GC.
+ MOZ_ASSERT(!HasOffThreadIonCompile(compartment));
+
+ stubCodes_->sweep();
+ cacheIRStubCodes_->sweep();
+
+ // If the sweep removed the ICCall_Fallback stub, nullptr the baselineCallReturnAddr_ field.
+ if (!stubCodes_->lookup(ICCall_Fallback::Compiler::BASELINE_CALL_KEY))
+ baselineCallReturnAddrs_[0] = nullptr;
+ if (!stubCodes_->lookup(ICCall_Fallback::Compiler::BASELINE_CONSTRUCT_KEY))
+ baselineCallReturnAddrs_[1] = nullptr;
+
+ // Similarly for the ICGetProp_Fallback stub.
+ if (!stubCodes_->lookup(ICGetProp_Fallback::Compiler::BASELINE_KEY))
+ baselineGetPropReturnAddr_ = nullptr;
+ if (!stubCodes_->lookup(ICSetProp_Fallback::Compiler::BASELINE_KEY))
+ baselineSetPropReturnAddr_ = nullptr;
+
+ JSRuntime* rt = fop->runtime();
+ if (stringConcatStub_ && !IsMarkedUnbarriered(rt, &stringConcatStub_))
+ stringConcatStub_ = nullptr;
+
+ if (regExpMatcherStub_ && !IsMarkedUnbarriered(rt, &regExpMatcherStub_))
+ regExpMatcherStub_ = nullptr;
+
+ if (regExpSearcherStub_ && !IsMarkedUnbarriered(rt, &regExpSearcherStub_))
+ regExpSearcherStub_ = nullptr;
+
+ if (regExpTesterStub_ && !IsMarkedUnbarriered(rt, &regExpTesterStub_))
+ regExpTesterStub_ = nullptr;
+
+ for (ReadBarrieredObject& obj : simdTemplateObjects_) {
+ if (obj && IsAboutToBeFinalized(&obj))
+ obj.set(nullptr);
+ }
+}
+
+void
+JitCompartment::toggleBarriers(bool enabled)
+{
+ // Toggle barriers in compartment wide stubs that have patchable pre barriers.
+ if (regExpMatcherStub_)
+ regExpMatcherStub_->togglePreBarriers(enabled, Reprotect);
+ if (regExpSearcherStub_)
+ regExpSearcherStub_->togglePreBarriers(enabled, Reprotect);
+ if (regExpTesterStub_)
+ regExpTesterStub_->togglePreBarriers(enabled, Reprotect);
+
+ // Toggle barriers in baseline IC stubs.
+ for (ICStubCodeMap::Enum e(*stubCodes_); !e.empty(); e.popFront()) {
+ JitCode* code = *e.front().value().unsafeGet();
+ code->togglePreBarriers(enabled, Reprotect);
+ }
+ for (CacheIRStubCodeMap::Enum e(*cacheIRStubCodes_); !e.empty(); e.popFront()) {
+ JitCode* code = *e.front().value().unsafeGet();
+ code->togglePreBarriers(enabled, Reprotect);
+ }
+}
+
+size_t
+JitCompartment::sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const
+{
+ size_t n = mallocSizeOf(this);
+ if (stubCodes_)
+ n += stubCodes_->sizeOfIncludingThis(mallocSizeOf);
+ if (cacheIRStubCodes_)
+ n += cacheIRStubCodes_->sizeOfIncludingThis(mallocSizeOf);
+ return n;
+}
+
+JitCode*
+JitRuntime::getBailoutTable(const FrameSizeClass& frameClass) const
+{
+ MOZ_ASSERT(frameClass != FrameSizeClass::None());
+ return bailoutTables_[frameClass.classId()];
+}
+
+JitCode*
+JitRuntime::getVMWrapper(const VMFunction& f) const
+{
+ MOZ_ASSERT(functionWrappers_);
+ MOZ_ASSERT(functionWrappers_->initialized());
+ JitRuntime::VMWrapperMap::Ptr p = functionWrappers_->readonlyThreadsafeLookup(&f);
+ MOZ_ASSERT(p);
+
+ return p->value();
+}
+
+template <AllowGC allowGC>
+JitCode*
+JitCode::New(JSContext* cx, uint8_t* code, uint32_t bufferSize, uint32_t headerSize,
+ ExecutablePool* pool, CodeKind kind)
+{
+ JitCode* codeObj = Allocate<JitCode, allowGC>(cx);
+ if (!codeObj) {
+ pool->release(headerSize + bufferSize, kind);
+ return nullptr;
+ }
+
+ new (codeObj) JitCode(code, bufferSize, headerSize, pool, kind);
+ return codeObj;
+}
+
+template
+JitCode*
+JitCode::New<CanGC>(JSContext* cx, uint8_t* code, uint32_t bufferSize, uint32_t headerSize,
+ ExecutablePool* pool, CodeKind kind);
+
+template
+JitCode*
+JitCode::New<NoGC>(JSContext* cx, uint8_t* code, uint32_t bufferSize, uint32_t headerSize,
+ ExecutablePool* pool, CodeKind kind);
+
+void
+JitCode::copyFrom(MacroAssembler& masm)
+{
+ // Store the JitCode pointer right before the code buffer, so we can
+ // recover the gcthing from relocation tables.
+ *(JitCode**)(code_ - sizeof(JitCode*)) = this;
+ insnSize_ = masm.instructionsSize();
+ masm.executableCopy(code_);
+
+ jumpRelocTableBytes_ = masm.jumpRelocationTableBytes();
+ masm.copyJumpRelocationTable(code_ + jumpRelocTableOffset());
+
+ dataRelocTableBytes_ = masm.dataRelocationTableBytes();
+ masm.copyDataRelocationTable(code_ + dataRelocTableOffset());
+
+ preBarrierTableBytes_ = masm.preBarrierTableBytes();
+ masm.copyPreBarrierTable(code_ + preBarrierTableOffset());
+
+ masm.processCodeLabels(code_);
+}
+
+void
+JitCode::traceChildren(JSTracer* trc)
+{
+ // Note that we cannot mark invalidated scripts, since we've basically
+ // corrupted the code stream by injecting bailouts.
+ if (invalidated())
+ return;
+
+ if (jumpRelocTableBytes_) {
+ uint8_t* start = code_ + jumpRelocTableOffset();
+ CompactBufferReader reader(start, start + jumpRelocTableBytes_);
+ MacroAssembler::TraceJumpRelocations(trc, this, reader);
+ }
+ if (dataRelocTableBytes_) {
+ // If we're moving objects, we need writable JIT code.
+ bool movingObjects = trc->runtime()->isHeapMinorCollecting() || zone()->isGCCompacting();
+ MaybeAutoWritableJitCode awjc(this, movingObjects ? Reprotect : DontReprotect);
+
+ uint8_t* start = code_ + dataRelocTableOffset();
+ CompactBufferReader reader(start, start + dataRelocTableBytes_);
+ MacroAssembler::TraceDataRelocations(trc, this, reader);
+ }
+}
+
+void
+JitCode::finalize(FreeOp* fop)
+{
+ // If this jitcode had a bytecode map, it must have already been removed.
+#ifdef DEBUG
+ JSRuntime* rt = fop->runtime();
+ if (hasBytecodeMap_) {
+ MOZ_ASSERT(rt->jitRuntime()->hasJitcodeGlobalTable());
+ MOZ_ASSERT(!rt->jitRuntime()->getJitcodeGlobalTable()->lookup(raw()));
+ }
+#endif
+
+ MOZ_ASSERT(pool_);
+
+ // With W^X JIT code, reprotecting memory for each JitCode instance is
+ // slow, so we record the ranges and poison them later all at once. It's
+ // safe to ignore OOM here, it just means we won't poison the code.
+ if (fop->appendJitPoisonRange(JitPoisonRange(pool_, code_ - headerSize_,
+ headerSize_ + bufferSize_)))
+ {
+ pool_->addRef();
+ }
+ code_ = nullptr;
+
+ // Code buffers are stored inside ExecutablePools. Pools are refcounted.
+ // Releasing the pool may free it. Horrible hack: if we are using perf
+ // integration, we don't want to reuse code addresses, so we just leak the
+ // memory instead.
+ if (!PerfEnabled())
+ pool_->release(headerSize_ + bufferSize_, CodeKind(kind_));
+ pool_ = nullptr;
+}
+
+void
+JitCode::togglePreBarriers(bool enabled, ReprotectCode reprotect)
+{
+ uint8_t* start = code_ + preBarrierTableOffset();
+ CompactBufferReader reader(start, start + preBarrierTableBytes_);
+
+ if (!reader.more())
+ return;
+
+ MaybeAutoWritableJitCode awjc(this, reprotect);
+ do {
+ size_t offset = reader.readUnsigned();
+ CodeLocationLabel loc(this, CodeOffset(offset));
+ if (enabled)
+ Assembler::ToggleToCmp(loc);
+ else
+ Assembler::ToggleToJmp(loc);
+ } while (reader.more());
+}
+
+IonScript::IonScript()
+ : method_(nullptr),
+ deoptTable_(nullptr),
+ osrPc_(nullptr),
+ osrEntryOffset_(0),
+ skipArgCheckEntryOffset_(0),
+ invalidateEpilogueOffset_(0),
+ invalidateEpilogueDataOffset_(0),
+ numBailouts_(0),
+ hasProfilingInstrumentation_(false),
+ recompiling_(false),
+ runtimeData_(0),
+ runtimeSize_(0),
+ cacheIndex_(0),
+ cacheEntries_(0),
+ safepointIndexOffset_(0),
+ safepointIndexEntries_(0),
+ safepointsStart_(0),
+ safepointsSize_(0),
+ frameSlots_(0),
+ frameSize_(0),
+ bailoutTable_(0),
+ bailoutEntries_(0),
+ osiIndexOffset_(0),
+ osiIndexEntries_(0),
+ snapshots_(0),
+ snapshotsListSize_(0),
+ snapshotsRVATableSize_(0),
+ constantTable_(0),
+ constantEntries_(0),
+ backedgeList_(0),
+ backedgeEntries_(0),
+ invalidationCount_(0),
+ recompileInfo_(),
+ osrPcMismatchCounter_(0),
+ fallbackStubSpace_()
+{
+}
+
+IonScript*
+IonScript::New(JSContext* cx, RecompileInfo recompileInfo,
+ uint32_t frameSlots, uint32_t argumentSlots, uint32_t frameSize,
+ size_t snapshotsListSize, size_t snapshotsRVATableSize,
+ size_t recoversSize, size_t bailoutEntries,
+ size_t constants, size_t safepointIndices,
+ size_t osiIndices, size_t cacheEntries,
+ size_t runtimeSize, size_t safepointsSize,
+ size_t backedgeEntries, size_t sharedStubEntries,
+ OptimizationLevel optimizationLevel)
+{
+ constexpr size_t DataAlignment = sizeof(void*);
+
+ if (snapshotsListSize >= MAX_BUFFER_SIZE ||
+ (bailoutEntries >= MAX_BUFFER_SIZE / sizeof(uint32_t)))
+ {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ // This should not overflow on x86, because the memory is already allocated
+ // *somewhere* and if their total overflowed there would be no memory left
+ // at all.
+ size_t paddedSnapshotsSize = AlignBytes(snapshotsListSize + snapshotsRVATableSize, DataAlignment);
+ size_t paddedRecoversSize = AlignBytes(recoversSize, DataAlignment);
+ size_t paddedBailoutSize = AlignBytes(bailoutEntries * sizeof(uint32_t), DataAlignment);
+ size_t paddedConstantsSize = AlignBytes(constants * sizeof(Value), DataAlignment);
+ size_t paddedSafepointIndicesSize = AlignBytes(safepointIndices * sizeof(SafepointIndex), DataAlignment);
+ size_t paddedOsiIndicesSize = AlignBytes(osiIndices * sizeof(OsiIndex), DataAlignment);
+ size_t paddedCacheEntriesSize = AlignBytes(cacheEntries * sizeof(uint32_t), DataAlignment);
+ size_t paddedRuntimeSize = AlignBytes(runtimeSize, DataAlignment);
+ size_t paddedSafepointSize = AlignBytes(safepointsSize, DataAlignment);
+ size_t paddedBackedgeSize = AlignBytes(backedgeEntries * sizeof(PatchableBackedge), DataAlignment);
+ size_t paddedSharedStubSize = AlignBytes(sharedStubEntries * sizeof(IonICEntry), DataAlignment);
+
+ size_t bytes = paddedSnapshotsSize +
+ paddedRecoversSize +
+ paddedBailoutSize +
+ paddedConstantsSize +
+ paddedSafepointIndicesSize +
+ paddedOsiIndicesSize +
+ paddedCacheEntriesSize +
+ paddedRuntimeSize +
+ paddedSafepointSize +
+ paddedBackedgeSize +
+ paddedSharedStubSize;
+ IonScript* script = cx->zone()->pod_malloc_with_extra<IonScript, uint8_t>(bytes);
+ if (!script)
+ return nullptr;
+ new (script) IonScript();
+
+ uint32_t offsetCursor = sizeof(IonScript);
+
+ script->runtimeData_ = offsetCursor;
+ script->runtimeSize_ = runtimeSize;
+ offsetCursor += paddedRuntimeSize;
+
+ script->cacheIndex_ = offsetCursor;
+ script->cacheEntries_ = cacheEntries;
+ offsetCursor += paddedCacheEntriesSize;
+
+ script->safepointIndexOffset_ = offsetCursor;
+ script->safepointIndexEntries_ = safepointIndices;
+ offsetCursor += paddedSafepointIndicesSize;
+
+ script->safepointsStart_ = offsetCursor;
+ script->safepointsSize_ = safepointsSize;
+ offsetCursor += paddedSafepointSize;
+
+ script->bailoutTable_ = offsetCursor;
+ script->bailoutEntries_ = bailoutEntries;
+ offsetCursor += paddedBailoutSize;
+
+ script->osiIndexOffset_ = offsetCursor;
+ script->osiIndexEntries_ = osiIndices;
+ offsetCursor += paddedOsiIndicesSize;
+
+ script->snapshots_ = offsetCursor;
+ script->snapshotsListSize_ = snapshotsListSize;
+ script->snapshotsRVATableSize_ = snapshotsRVATableSize;
+ offsetCursor += paddedSnapshotsSize;
+
+ script->recovers_ = offsetCursor;
+ script->recoversSize_ = recoversSize;
+ offsetCursor += paddedRecoversSize;
+
+ script->constantTable_ = offsetCursor;
+ script->constantEntries_ = constants;
+ offsetCursor += paddedConstantsSize;
+
+ script->backedgeList_ = offsetCursor;
+ script->backedgeEntries_ = backedgeEntries;
+ offsetCursor += paddedBackedgeSize;
+
+ script->sharedStubList_ = offsetCursor;
+ script->sharedStubEntries_ = sharedStubEntries;
+ offsetCursor += paddedSharedStubSize;
+
+ script->frameSlots_ = frameSlots;
+ script->argumentSlots_ = argumentSlots;
+
+ script->frameSize_ = frameSize;
+
+ script->recompileInfo_ = recompileInfo;
+ script->optimizationLevel_ = optimizationLevel;
+
+ return script;
+}
+
+void
+IonScript::adoptFallbackStubs(FallbackICStubSpace* stubSpace)
+
+{
+ fallbackStubSpace()->adoptFrom(stubSpace);
+}
+
+void
+IonScript::trace(JSTracer* trc)
+{
+ if (method_)
+ TraceEdge(trc, &method_, "method");
+
+ if (deoptTable_)
+ TraceEdge(trc, &deoptTable_, "deoptimizationTable");
+
+ for (size_t i = 0; i < numConstants(); i++)
+ TraceEdge(trc, &getConstant(i), "constant");
+
+ // Mark all IC stub codes hanging off the IC stub entries.
+ for (size_t i = 0; i < numSharedStubs(); i++) {
+ IonICEntry& ent = sharedStubList()[i];
+ ent.trace(trc);
+ }
+
+ // Trace caches so that the JSScript pointer can be updated if moved.
+ for (size_t i = 0; i < numCaches(); i++)
+ getCacheFromIndex(i).trace(trc);
+}
+
+/* static */ void
+IonScript::writeBarrierPre(Zone* zone, IonScript* ionScript)
+{
+ if (zone->needsIncrementalBarrier())
+ ionScript->trace(zone->barrierTracer());
+}
+
+void
+IonScript::copySnapshots(const SnapshotWriter* writer)
+{
+ MOZ_ASSERT(writer->listSize() == snapshotsListSize_);
+ memcpy((uint8_t*)this + snapshots_,
+ writer->listBuffer(), snapshotsListSize_);
+
+ MOZ_ASSERT(snapshotsRVATableSize_);
+ MOZ_ASSERT(writer->RVATableSize() == snapshotsRVATableSize_);
+ memcpy((uint8_t*)this + snapshots_ + snapshotsListSize_,
+ writer->RVATableBuffer(), snapshotsRVATableSize_);
+}
+
+void
+IonScript::copyRecovers(const RecoverWriter* writer)
+{
+ MOZ_ASSERT(writer->size() == recoversSize_);
+ memcpy((uint8_t*)this + recovers_, writer->buffer(), recoversSize_);
+}
+
+void
+IonScript::copySafepoints(const SafepointWriter* writer)
+{
+ MOZ_ASSERT(writer->size() == safepointsSize_);
+ memcpy((uint8_t*)this + safepointsStart_, writer->buffer(), safepointsSize_);
+}
+
+void
+IonScript::copyBailoutTable(const SnapshotOffset* table)
+{
+ memcpy(bailoutTable(), table, bailoutEntries_ * sizeof(uint32_t));
+}
+
+void
+IonScript::copyConstants(const Value* vp)
+{
+ for (size_t i = 0; i < constantEntries_; i++)
+ constants()[i].init(vp[i]);
+}
+
+void
+IonScript::copyPatchableBackedges(JSContext* cx, JitCode* code,
+ PatchableBackedgeInfo* backedges,
+ MacroAssembler& masm)
+{
+ JitRuntime* jrt = cx->runtime()->jitRuntime();
+ JitRuntime::AutoPreventBackedgePatching apbp(cx->runtime());
+
+ for (size_t i = 0; i < backedgeEntries_; i++) {
+ PatchableBackedgeInfo& info = backedges[i];
+ PatchableBackedge* patchableBackedge = &backedgeList()[i];
+
+ info.backedge.fixup(&masm);
+ CodeLocationJump backedge(code, info.backedge);
+ CodeLocationLabel loopHeader(code, CodeOffset(info.loopHeader->offset()));
+ CodeLocationLabel interruptCheck(code, CodeOffset(info.interruptCheck->offset()));
+ new(patchableBackedge) PatchableBackedge(backedge, loopHeader, interruptCheck);
+
+ // Point the backedge to either of its possible targets, matching the
+ // other backedges in the runtime.
+ if (jrt->backedgeTarget() == JitRuntime::BackedgeInterruptCheck)
+ PatchBackedge(backedge, interruptCheck, JitRuntime::BackedgeInterruptCheck);
+ else
+ PatchBackedge(backedge, loopHeader, JitRuntime::BackedgeLoopHeader);
+
+ jrt->addPatchableBackedge(patchableBackedge);
+ }
+}
+
+void
+IonScript::copySafepointIndices(const SafepointIndex* si, MacroAssembler& masm)
+{
+ // Jumps in the caches reflect the offset of those jumps in the compiled
+ // code, not the absolute positions of the jumps. Update according to the
+ // final code address now.
+ SafepointIndex* table = safepointIndices();
+ memcpy(table, si, safepointIndexEntries_ * sizeof(SafepointIndex));
+}
+
+void
+IonScript::copyOsiIndices(const OsiIndex* oi, MacroAssembler& masm)
+{
+ memcpy(osiIndices(), oi, osiIndexEntries_ * sizeof(OsiIndex));
+}
+
+void
+IonScript::copyRuntimeData(const uint8_t* data)
+{
+ memcpy(runtimeData(), data, runtimeSize());
+}
+
+void
+IonScript::copyCacheEntries(const uint32_t* caches, MacroAssembler& masm)
+{
+ memcpy(cacheIndex(), caches, numCaches() * sizeof(uint32_t));
+
+ // Jumps in the caches reflect the offset of those jumps in the compiled
+ // code, not the absolute positions of the jumps. Update according to the
+ // final code address now.
+ for (size_t i = 0; i < numCaches(); i++)
+ getCacheFromIndex(i).updateBaseAddress(method_, masm);
+}
+
+const SafepointIndex*
+IonScript::getSafepointIndex(uint32_t disp) const
+{
+ MOZ_ASSERT(safepointIndexEntries_ > 0);
+
+ const SafepointIndex* table = safepointIndices();
+ if (safepointIndexEntries_ == 1) {
+ MOZ_ASSERT(disp == table[0].displacement());
+ return &table[0];
+ }
+
+ size_t minEntry = 0;
+ size_t maxEntry = safepointIndexEntries_ - 1;
+ uint32_t min = table[minEntry].displacement();
+ uint32_t max = table[maxEntry].displacement();
+
+ // Raise if the element is not in the list.
+ MOZ_ASSERT(min <= disp && disp <= max);
+
+ // Approximate the location of the FrameInfo.
+ size_t guess = (disp - min) * (maxEntry - minEntry) / (max - min) + minEntry;
+ uint32_t guessDisp = table[guess].displacement();
+
+ if (table[guess].displacement() == disp)
+ return &table[guess];
+
+ // Doing a linear scan from the guess should be more efficient in case of
+ // small group which are equally distributed on the code.
+ //
+ // such as: <... ... ... ... . ... ...>
+ if (guessDisp > disp) {
+ while (--guess >= minEntry) {
+ guessDisp = table[guess].displacement();
+ MOZ_ASSERT(guessDisp >= disp);
+ if (guessDisp == disp)
+ return &table[guess];
+ }
+ } else {
+ while (++guess <= maxEntry) {
+ guessDisp = table[guess].displacement();
+ MOZ_ASSERT(guessDisp <= disp);
+ if (guessDisp == disp)
+ return &table[guess];
+ }
+ }
+
+ MOZ_CRASH("displacement not found.");
+}
+
+const OsiIndex*
+IonScript::getOsiIndex(uint32_t disp) const
+{
+ const OsiIndex* end = osiIndices() + osiIndexEntries_;
+ for (const OsiIndex* it = osiIndices(); it != end; ++it) {
+ if (it->returnPointDisplacement() == disp)
+ return it;
+ }
+
+ MOZ_CRASH("Failed to find OSI point return address");
+}
+
+const OsiIndex*
+IonScript::getOsiIndex(uint8_t* retAddr) const
+{
+ JitSpew(JitSpew_IonInvalidate, "IonScript %p has method %p raw %p", (void*) this, (void*)
+ method(), method()->raw());
+
+ MOZ_ASSERT(containsCodeAddress(retAddr));
+ uint32_t disp = retAddr - method()->raw();
+ return getOsiIndex(disp);
+}
+
+void
+IonScript::Trace(JSTracer* trc, IonScript* script)
+{
+ if (script != ION_DISABLED_SCRIPT)
+ script->trace(trc);
+}
+
+void
+IonScript::Destroy(FreeOp* fop, IonScript* script)
+{
+ script->unlinkFromRuntime(fop);
+
+ /*
+ * When the script contains pointers to nursery things, the store buffer can
+ * contain entries that point into the fallback stub space. Since we can
+ * destroy scripts outside the context of a GC, this situation could result
+ * in us trying to mark invalid store buffer entries.
+ *
+ * Defer freeing any allocated blocks until after the next minor GC.
+ */
+ script->fallbackStubSpace_.freeAllAfterMinorGC(fop->runtime());
+
+ fop->delete_(script);
+}
+
+void
+JS::DeletePolicy<js::jit::IonScript>::operator()(const js::jit::IonScript* script)
+{
+ IonScript::Destroy(rt_->defaultFreeOp(), const_cast<IonScript*>(script));
+}
+
+void
+IonScript::toggleBarriers(bool enabled, ReprotectCode reprotect)
+{
+ method()->togglePreBarriers(enabled, reprotect);
+}
+
+void
+IonScript::purgeOptimizedStubs(Zone* zone)
+{
+ for (size_t i = 0; i < numSharedStubs(); i++) {
+ IonICEntry& entry = sharedStubList()[i];
+ if (!entry.hasStub())
+ continue;
+
+ ICStub* lastStub = entry.firstStub();
+ while (lastStub->next())
+ lastStub = lastStub->next();
+
+ if (lastStub->isFallback()) {
+ // Unlink all stubs allocated in the optimized space.
+ ICStub* stub = entry.firstStub();
+ ICStub* prev = nullptr;
+
+ while (stub->next()) {
+ if (!stub->allocatedInFallbackSpace()) {
+ lastStub->toFallbackStub()->unlinkStub(zone, prev, stub);
+ stub = stub->next();
+ continue;
+ }
+
+ prev = stub;
+ stub = stub->next();
+ }
+
+ lastStub->toFallbackStub()->setInvalid();
+
+ if (lastStub->isMonitoredFallback()) {
+ // Monitor stubs can't make calls, so are always in the
+ // optimized stub space.
+ ICTypeMonitor_Fallback* lastMonStub =
+ lastStub->toMonitoredFallbackStub()->fallbackMonitorStub();
+ lastMonStub->resetMonitorStubChain(zone);
+ lastMonStub->setInvalid();
+ }
+ } else if (lastStub->isTypeMonitor_Fallback()) {
+ lastStub->toTypeMonitor_Fallback()->resetMonitorStubChain(zone);
+ lastStub->toTypeMonitor_Fallback()->setInvalid();
+ } else {
+ MOZ_ASSERT(lastStub->isTableSwitch());
+ }
+ }
+
+#ifdef DEBUG
+ // All remaining stubs must be allocated in the fallback space.
+ for (size_t i = 0; i < numSharedStubs(); i++) {
+ IonICEntry& entry = sharedStubList()[i];
+ if (!entry.hasStub())
+ continue;
+
+ ICStub* stub = entry.firstStub();
+ while (stub->next()) {
+ MOZ_ASSERT(stub->allocatedInFallbackSpace());
+ stub = stub->next();
+ }
+ }
+#endif
+}
+
+void
+IonScript::purgeCaches()
+{
+ // Don't reset any ICs if we're invalidated, otherwise, repointing the
+ // inline jump could overwrite an invalidation marker. These ICs can
+ // no longer run, however, the IC slow paths may be active on the stack.
+ // ICs therefore are required to check for invalidation before patching,
+ // to ensure the same invariant.
+ if (invalidated())
+ return;
+
+ if (numCaches() == 0)
+ return;
+
+ AutoWritableJitCode awjc(method());
+ for (size_t i = 0; i < numCaches(); i++)
+ getCacheFromIndex(i).reset(DontReprotect);
+}
+
+void
+IonScript::unlinkFromRuntime(FreeOp* fop)
+{
+ // The writes to the executable buffer below may clobber backedge jumps, so
+ // make sure that those backedges are unlinked from the runtime and not
+ // reclobbered with garbage if an interrupt is requested.
+ JitRuntime* jrt = fop->runtime()->jitRuntime();
+ JitRuntime::AutoPreventBackedgePatching apbp(fop->runtime());
+ for (size_t i = 0; i < backedgeEntries_; i++)
+ jrt->removePatchableBackedge(&backedgeList()[i]);
+
+ // Clear the list of backedges, so that this method is idempotent. It is
+ // called during destruction, and may be additionally called when the
+ // script is invalidated.
+ backedgeEntries_ = 0;
+}
+
+void
+jit::ToggleBarriers(JS::Zone* zone, bool needs)
+{
+ JSRuntime* rt = zone->runtimeFromMainThread();
+ if (!rt->hasJitRuntime())
+ return;
+
+ for (auto script = zone->cellIter<JSScript>(); !script.done(); script.next()) {
+ if (script->hasIonScript())
+ script->ionScript()->toggleBarriers(needs);
+ if (script->hasBaselineScript())
+ script->baselineScript()->toggleBarriers(needs);
+ }
+
+ for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
+ if (comp->jitCompartment())
+ comp->jitCompartment()->toggleBarriers(needs);
+ }
+}
+
+namespace js {
+namespace jit {
+
+static void
+OptimizeSinCos(MIRGenerator *mir, MIRGraph &graph)
+{
+ // Now, we are looking for:
+ // var y = sin(x);
+ // var z = cos(x);
+ // Graph before:
+ // - 1 op
+ // - 6 mathfunction op1 Sin
+ // - 7 mathfunction op1 Cos
+ // Graph will look like:
+ // - 1 op
+ // - 5 sincos op1
+ // - 6 mathfunction sincos5 Sin
+ // - 7 mathfunction sincos5 Cos
+ for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
+ for (MInstructionIterator iter(block->begin()), end(block->end()); iter != end; ) {
+ MInstruction *ins = *iter++;
+ if (!ins->isMathFunction() || ins->isRecoveredOnBailout())
+ continue;
+
+ MMathFunction *insFunc = ins->toMathFunction();
+ if (insFunc->function() != MMathFunction::Sin && insFunc->function() != MMathFunction::Cos)
+ continue;
+
+ // Check if sin/cos is already optimized.
+ if (insFunc->getOperand(0)->type() == MIRType::SinCosDouble)
+ continue;
+
+ // insFunc is either a |sin(x)| or |cos(x)| instruction. The
+ // following loop iterates over the uses of |x| to check if both
+ // |sin(x)| and |cos(x)| instructions exist.
+ bool hasSin = false;
+ bool hasCos = false;
+ for (MUseDefIterator uses(insFunc->input()); uses; uses++)
+ {
+ if (!uses.def()->isInstruction())
+ continue;
+
+ // We should replacing the argument of the sin/cos just when it
+ // is dominated by the |block|.
+ if (!block->dominates(uses.def()->block()))
+ continue;
+
+ MInstruction *insUse = uses.def()->toInstruction();
+ if (!insUse->isMathFunction() || insUse->isRecoveredOnBailout())
+ continue;
+
+ MMathFunction *mathIns = insUse->toMathFunction();
+ if (!hasSin && mathIns->function() == MMathFunction::Sin) {
+ hasSin = true;
+ JitSpew(JitSpew_Sincos, "Found sin in block %d.", mathIns->block()->id());
+ }
+ else if (!hasCos && mathIns->function() == MMathFunction::Cos) {
+ hasCos = true;
+ JitSpew(JitSpew_Sincos, "Found cos in block %d.", mathIns->block()->id());
+ }
+
+ if (hasCos && hasSin)
+ break;
+ }
+
+ if (!hasCos || !hasSin) {
+ JitSpew(JitSpew_Sincos, "No sin/cos pair found.");
+ continue;
+ }
+
+ JitSpew(JitSpew_Sincos, "Found, at least, a pair sin/cos. Adding sincos in block %d",
+ block->id());
+ // Adding the MSinCos and replacing the parameters of the
+ // sin(x)/cos(x) to sin(sincos(x))/cos(sincos(x)).
+ MSinCos *insSinCos = MSinCos::New(graph.alloc(),
+ insFunc->input(),
+ insFunc->toMathFunction()->cache());
+ insSinCos->setImplicitlyUsedUnchecked();
+ block->insertBefore(insFunc, insSinCos);
+ for (MUseDefIterator uses(insFunc->input()); uses; )
+ {
+ MDefinition* def = uses.def();
+ uses++;
+ if (!def->isInstruction())
+ continue;
+
+ // We should replacing the argument of the sin/cos just when it
+ // is dominated by the |block|.
+ if (!block->dominates(def->block()))
+ continue;
+
+ MInstruction *insUse = def->toInstruction();
+ if (!insUse->isMathFunction() || insUse->isRecoveredOnBailout())
+ continue;
+
+ MMathFunction *mathIns = insUse->toMathFunction();
+ if (mathIns->function() != MMathFunction::Sin && mathIns->function() != MMathFunction::Cos)
+ continue;
+
+ mathIns->replaceOperand(0, insSinCos);
+ JitSpew(JitSpew_Sincos, "Replacing %s by sincos in block %d",
+ mathIns->function() == MMathFunction::Sin ? "sin" : "cos",
+ mathIns->block()->id());
+ }
+ }
+ }
+}
+
+bool
+OptimizeMIR(MIRGenerator* mir)
+{
+ MIRGraph& graph = mir->graph();
+ GraphSpewer& gs = mir->graphSpewer();
+ TraceLoggerThread* logger;
+ if (GetJitContext()->onMainThread())
+ logger = TraceLoggerForMainThread(GetJitContext()->runtime);
+ else
+ logger = TraceLoggerForCurrentThread();
+
+ if (mir->shouldCancel("Start"))
+ return false;
+
+ if (!mir->compilingWasm()) {
+ if (!MakeMRegExpHoistable(mir, graph))
+ return false;
+
+ if (mir->shouldCancel("Make MRegExp Hoistable"))
+ return false;
+ }
+
+ gs.spewPass("BuildSSA");
+ AssertBasicGraphCoherency(graph);
+
+ if (!JitOptions.disablePgo && !mir->compilingWasm()) {
+ AutoTraceLog log(logger, TraceLogger_PruneUnusedBranches);
+ if (!PruneUnusedBranches(mir, graph))
+ return false;
+ gs.spewPass("Prune Unused Branches");
+ AssertBasicGraphCoherency(graph);
+
+ if (mir->shouldCancel("Prune Unused Branches"))
+ return false;
+ }
+
+ {
+ AutoTraceLog log(logger, TraceLogger_FoldTests);
+ if (!FoldTests(graph))
+ return false;
+ gs.spewPass("Fold Tests");
+ AssertBasicGraphCoherency(graph);
+
+ if (mir->shouldCancel("Fold Tests"))
+ return false;
+ }
+
+ {
+ AutoTraceLog log(logger, TraceLogger_SplitCriticalEdges);
+ if (!SplitCriticalEdges(graph))
+ return false;
+ gs.spewPass("Split Critical Edges");
+ AssertGraphCoherency(graph);
+
+ if (mir->shouldCancel("Split Critical Edges"))
+ return false;
+ }
+
+ {
+ AutoTraceLog log(logger, TraceLogger_RenumberBlocks);
+ RenumberBlocks(graph);
+ gs.spewPass("Renumber Blocks");
+ AssertGraphCoherency(graph);
+
+ if (mir->shouldCancel("Renumber Blocks"))
+ return false;
+ }
+
+ {
+ AutoTraceLog log(logger, TraceLogger_DominatorTree);
+ if (!BuildDominatorTree(graph))
+ return false;
+ // No spew: graph not changed.
+
+ if (mir->shouldCancel("Dominator Tree"))
+ return false;
+ }
+
+ {
+ AutoTraceLog log(logger, TraceLogger_PhiAnalysis);
+ // Aggressive phi elimination must occur before any code elimination. If the
+ // script contains a try-statement, we only compiled the try block and not
+ // the catch or finally blocks, so in this case it's also invalid to use
+ // aggressive phi elimination.
+ Observability observability = graph.hasTryBlock()
+ ? ConservativeObservability
+ : AggressiveObservability;
+ if (!EliminatePhis(mir, graph, observability))
+ return false;
+ gs.spewPass("Eliminate phis");
+ AssertGraphCoherency(graph);
+
+ if (mir->shouldCancel("Eliminate phis"))
+ return false;
+
+ if (!BuildPhiReverseMapping(graph))
+ return false;
+ AssertExtendedGraphCoherency(graph);
+ // No spew: graph not changed.
+
+ if (mir->shouldCancel("Phi reverse mapping"))
+ return false;
+ }
+
+ if (!JitOptions.disableRecoverIns && mir->optimizationInfo().scalarReplacementEnabled()) {
+ AutoTraceLog log(logger, TraceLogger_ScalarReplacement);
+ if (!ScalarReplacement(mir, graph))
+ return false;
+ gs.spewPass("Scalar Replacement");
+ AssertGraphCoherency(graph);
+
+ if (mir->shouldCancel("Scalar Replacement"))
+ return false;
+ }
+
+ if (!mir->compilingWasm()) {
+ AutoTraceLog log(logger, TraceLogger_ApplyTypes);
+ if (!ApplyTypeInformation(mir, graph))
+ return false;
+ gs.spewPass("Apply types");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Apply types"))
+ return false;
+ }
+
+ if (!JitOptions.disableRecoverIns && mir->optimizationInfo().eagerSimdUnboxEnabled()) {
+ AutoTraceLog log(logger, TraceLogger_EagerSimdUnbox);
+ if (!EagerSimdUnbox(mir, graph))
+ return false;
+ gs.spewPass("Eager Simd Unbox");
+ AssertGraphCoherency(graph);
+
+ if (mir->shouldCancel("Eager Simd Unbox"))
+ return false;
+ }
+
+ if (mir->optimizationInfo().amaEnabled()) {
+ AutoTraceLog log(logger, TraceLogger_AlignmentMaskAnalysis);
+ AlignmentMaskAnalysis ama(graph);
+ if (!ama.analyze())
+ return false;
+ gs.spewPass("Alignment Mask Analysis");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Alignment Mask Analysis"))
+ return false;
+ }
+
+ ValueNumberer gvn(mir, graph);
+ if (!gvn.init())
+ return false;
+
+ // Alias analysis is required for LICM and GVN so that we don't move
+ // loads across stores.
+ if (mir->optimizationInfo().licmEnabled() ||
+ mir->optimizationInfo().gvnEnabled())
+ {
+ {
+ AutoTraceLog log(logger, TraceLogger_AliasAnalysis);
+ if (JitOptions.disableFlowAA) {
+ AliasAnalysis analysis(mir, graph);
+ if (!analysis.analyze())
+ return false;
+ } else {
+ FlowAliasAnalysis analysis(mir, graph);
+ if (!analysis.analyze())
+ return false;
+ }
+
+ gs.spewPass("Alias analysis");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Alias analysis"))
+ return false;
+ }
+
+ if (!mir->compilingWasm()) {
+ // Eliminating dead resume point operands requires basic block
+ // instructions to be numbered. Reuse the numbering computed during
+ // alias analysis.
+ if (!EliminateDeadResumePointOperands(mir, graph))
+ return false;
+
+ if (mir->shouldCancel("Eliminate dead resume point operands"))
+ return false;
+ }
+ }
+
+ if (mir->optimizationInfo().gvnEnabled()) {
+ AutoTraceLog log(logger, TraceLogger_GVN);
+ if (!gvn.run(ValueNumberer::UpdateAliasAnalysis))
+ return false;
+ gs.spewPass("GVN");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("GVN"))
+ return false;
+ }
+
+ if (mir->optimizationInfo().licmEnabled()) {
+ AutoTraceLog log(logger, TraceLogger_LICM);
+ // LICM can hoist instructions from conditional branches and trigger
+ // repeated bailouts. Disable it if this script is known to bailout
+ // frequently.
+ JSScript* script = mir->info().script();
+ if (!script || !script->hadFrequentBailouts()) {
+ if (!LICM(mir, graph))
+ return false;
+ gs.spewPass("LICM");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("LICM"))
+ return false;
+ }
+ }
+
+ RangeAnalysis r(mir, graph);
+ if (mir->optimizationInfo().rangeAnalysisEnabled()) {
+ AutoTraceLog log(logger, TraceLogger_RangeAnalysis);
+ if (!r.addBetaNodes())
+ return false;
+ gs.spewPass("Beta");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("RA Beta"))
+ return false;
+
+ if (!r.analyze() || !r.addRangeAssertions())
+ return false;
+ gs.spewPass("Range Analysis");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Range Analysis"))
+ return false;
+
+ if (!r.removeBetaNodes())
+ return false;
+ gs.spewPass("De-Beta");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("RA De-Beta"))
+ return false;
+
+ if (mir->optimizationInfo().gvnEnabled()) {
+ bool shouldRunUCE = false;
+ if (!r.prepareForUCE(&shouldRunUCE))
+ return false;
+ gs.spewPass("RA check UCE");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("RA check UCE"))
+ return false;
+
+ if (shouldRunUCE) {
+ if (!gvn.run(ValueNumberer::DontUpdateAliasAnalysis))
+ return false;
+ gs.spewPass("UCE After RA");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("UCE After RA"))
+ return false;
+ }
+ }
+
+ if (mir->optimizationInfo().autoTruncateEnabled()) {
+ if (!r.truncate())
+ return false;
+ gs.spewPass("Truncate Doubles");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Truncate Doubles"))
+ return false;
+ }
+
+ if (mir->optimizationInfo().loopUnrollingEnabled()) {
+ AutoTraceLog log(logger, TraceLogger_LoopUnrolling);
+
+ if (!UnrollLoops(graph, r.loopIterationBounds))
+ return false;
+
+ gs.spewPass("Unroll Loops");
+ AssertExtendedGraphCoherency(graph);
+ }
+ }
+
+ if (!JitOptions.disableRecoverIns) {
+ AutoTraceLog log(logger, TraceLogger_Sink);
+ if (!Sink(mir, graph))
+ return false;
+ gs.spewPass("Sink");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Sink"))
+ return false;
+ }
+
+ if (!JitOptions.disableRecoverIns && mir->optimizationInfo().rangeAnalysisEnabled()) {
+ AutoTraceLog log(logger, TraceLogger_RemoveUnnecessaryBitops);
+ if (!r.removeUnnecessaryBitops())
+ return false;
+ gs.spewPass("Remove Unnecessary Bitops");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Remove Unnecessary Bitops"))
+ return false;
+ }
+
+ {
+ AutoTraceLog log(logger, TraceLogger_FoldLinearArithConstants);
+ if (!FoldLinearArithConstants(mir, graph))
+ return false;
+ gs.spewPass("Fold Linear Arithmetic Constants");
+ AssertBasicGraphCoherency(graph);
+
+ if (mir->shouldCancel("Fold Linear Arithmetic Constants"))
+ return false;
+ }
+
+ if (mir->optimizationInfo().eaaEnabled()) {
+ AutoTraceLog log(logger, TraceLogger_EffectiveAddressAnalysis);
+ EffectiveAddressAnalysis eaa(mir, graph);
+ if (!eaa.analyze())
+ return false;
+ gs.spewPass("Effective Address Analysis");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Effective Address Analysis"))
+ return false;
+ }
+
+ if (mir->optimizationInfo().sincosEnabled()) {
+ AutoTraceLog log(logger, TraceLogger_Sincos);
+ OptimizeSinCos(mir, graph);
+ gs.spewPass("Sincos optimization");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Sincos optimization"))
+ return false;
+ }
+
+ {
+ AutoTraceLog log(logger, TraceLogger_EliminateDeadCode);
+ if (!EliminateDeadCode(mir, graph))
+ return false;
+ gs.spewPass("DCE");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("DCE"))
+ return false;
+ }
+
+ if (mir->optimizationInfo().instructionReorderingEnabled()) {
+ AutoTraceLog log(logger, TraceLogger_ReorderInstructions);
+ if (!ReorderInstructions(mir, graph))
+ return false;
+ gs.spewPass("Reordering");
+
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Reordering"))
+ return false;
+ }
+
+ // Make loops contiguous. We do this after GVN/UCE and range analysis,
+ // which can remove CFG edges, exposing more blocks that can be moved.
+ {
+ AutoTraceLog log(logger, TraceLogger_MakeLoopsContiguous);
+ if (!MakeLoopsContiguous(graph))
+ return false;
+ gs.spewPass("Make loops contiguous");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Make loops contiguous"))
+ return false;
+ }
+
+ // Passes after this point must not move instructions; these analyses
+ // depend on knowing the final order in which instructions will execute.
+
+ if (mir->optimizationInfo().edgeCaseAnalysisEnabled()) {
+ AutoTraceLog log(logger, TraceLogger_EdgeCaseAnalysis);
+ EdgeCaseAnalysis edgeCaseAnalysis(mir, graph);
+ if (!edgeCaseAnalysis.analyzeLate())
+ return false;
+ gs.spewPass("Edge Case Analysis (Late)");
+ AssertGraphCoherency(graph);
+
+ if (mir->shouldCancel("Edge Case Analysis (Late)"))
+ return false;
+ }
+
+ if (mir->optimizationInfo().eliminateRedundantChecksEnabled()) {
+ AutoTraceLog log(logger, TraceLogger_EliminateRedundantChecks);
+ // Note: check elimination has to run after all other passes that move
+ // instructions. Since check uses are replaced with the actual index,
+ // code motion after this pass could incorrectly move a load or store
+ // before its bounds check.
+ if (!EliminateRedundantChecks(graph))
+ return false;
+ gs.spewPass("Bounds Check Elimination");
+ AssertGraphCoherency(graph);
+ }
+
+ if (!mir->compilingWasm()) {
+ AutoTraceLog log(logger, TraceLogger_AddKeepAliveInstructions);
+ if (!AddKeepAliveInstructions(graph))
+ return false;
+ gs.spewPass("Add KeepAlive Instructions");
+ AssertGraphCoherency(graph);
+ }
+
+ if (mir->compilingWasm()) {
+ if (!EliminateBoundsChecks(mir, graph))
+ return false;
+ gs.spewPass("Redundant Bounds Check Elimination");
+ AssertGraphCoherency(graph);
+ }
+
+ DumpMIRExpressions(graph);
+
+ return true;
+}
+
+LIRGraph*
+GenerateLIR(MIRGenerator* mir)
+{
+ MIRGraph& graph = mir->graph();
+ GraphSpewer& gs = mir->graphSpewer();
+
+ TraceLoggerThread* logger;
+ if (GetJitContext()->onMainThread())
+ logger = TraceLoggerForMainThread(GetJitContext()->runtime);
+ else
+ logger = TraceLoggerForCurrentThread();
+
+ LIRGraph* lir = mir->alloc().lifoAlloc()->new_<LIRGraph>(&graph);
+ if (!lir || !lir->init())
+ return nullptr;
+
+ LIRGenerator lirgen(mir, graph, *lir);
+ {
+ AutoTraceLog log(logger, TraceLogger_GenerateLIR);
+ if (!lirgen.generate())
+ return nullptr;
+ gs.spewPass("Generate LIR");
+
+ if (mir->shouldCancel("Generate LIR"))
+ return nullptr;
+ }
+
+ AllocationIntegrityState integrity(*lir);
+
+ {
+ AutoTraceLog log(logger, TraceLogger_RegisterAllocation);
+
+ IonRegisterAllocator allocator = mir->optimizationInfo().registerAllocator();
+
+ switch (allocator) {
+ case RegisterAllocator_Backtracking:
+ case RegisterAllocator_Testbed: {
+#ifdef DEBUG
+ if (!integrity.record())
+ return nullptr;
+#endif
+
+ BacktrackingAllocator regalloc(mir, &lirgen, *lir,
+ allocator == RegisterAllocator_Testbed);
+ if (!regalloc.go())
+ return nullptr;
+
+#ifdef DEBUG
+ if (!integrity.check(false))
+ return nullptr;
+#endif
+
+ gs.spewPass("Allocate Registers [Backtracking]");
+ break;
+ }
+
+ case RegisterAllocator_Stupid: {
+ // Use the integrity checker to populate safepoint information, so
+ // run it in all builds.
+ if (!integrity.record())
+ return nullptr;
+
+ StupidAllocator regalloc(mir, &lirgen, *lir);
+ if (!regalloc.go())
+ return nullptr;
+ if (!integrity.check(true))
+ return nullptr;
+ gs.spewPass("Allocate Registers [Stupid]");
+ break;
+ }
+
+ default:
+ MOZ_CRASH("Bad regalloc");
+ }
+
+ if (mir->shouldCancel("Allocate Registers"))
+ return nullptr;
+ }
+
+ return lir;
+}
+
+CodeGenerator*
+GenerateCode(MIRGenerator* mir, LIRGraph* lir)
+{
+ TraceLoggerThread* logger;
+ if (GetJitContext()->onMainThread())
+ logger = TraceLoggerForMainThread(GetJitContext()->runtime);
+ else
+ logger = TraceLoggerForCurrentThread();
+ AutoTraceLog log(logger, TraceLogger_GenerateCode);
+
+ CodeGenerator* codegen = js_new<CodeGenerator>(mir, lir);
+ if (!codegen)
+ return nullptr;
+
+ if (!codegen->generate()) {
+ js_delete(codegen);
+ return nullptr;
+ }
+
+ return codegen;
+}
+
+CodeGenerator*
+CompileBackEnd(MIRGenerator* mir)
+{
+ // Everything in CompileBackEnd can potentially run on a helper thread.
+ AutoEnterIonCompilation enter(mir->safeForMinorGC());
+ AutoSpewEndFunction spewEndFunction(mir);
+
+ if (!OptimizeMIR(mir))
+ return nullptr;
+
+ LIRGraph* lir = GenerateLIR(mir);
+ if (!lir)
+ return nullptr;
+
+ return GenerateCode(mir, lir);
+}
+
+// Find a finished builder for the compartment.
+static IonBuilder*
+GetFinishedBuilder(JSContext* cx, GlobalHelperThreadState::IonBuilderVector& finished)
+{
+ for (size_t i = 0; i < finished.length(); i++) {
+ IonBuilder* testBuilder = finished[i];
+ if (testBuilder->compartment == CompileCompartment::get(cx->compartment())) {
+ HelperThreadState().remove(finished, &i);
+ return testBuilder;
+ }
+ }
+
+ return nullptr;
+}
+
+void
+AttachFinishedCompilations(JSContext* cx)
+{
+ JitCompartment* ion = cx->compartment()->jitCompartment();
+ if (!ion)
+ return;
+
+ {
+ AutoLockHelperThreadState lock;
+
+ GlobalHelperThreadState::IonBuilderVector& finished = HelperThreadState().ionFinishedList(lock);
+
+ // Incorporate any off thread compilations for the compartment which have
+ // finished, failed or have been cancelled.
+ while (true) {
+ // Find a finished builder for the compartment.
+ IonBuilder* builder = GetFinishedBuilder(cx, finished);
+ if (!builder)
+ break;
+
+ JSScript* script = builder->script();
+ MOZ_ASSERT(script->hasBaselineScript());
+ script->baselineScript()->setPendingIonBuilder(cx->runtime(), script, builder);
+ cx->runtime()->ionLazyLinkListAdd(builder);
+
+ // Don't keep more than 100 lazy link builders.
+ // Link the oldest ones immediately.
+ while (cx->runtime()->ionLazyLinkListSize() > 100) {
+ jit::IonBuilder* builder = cx->runtime()->ionLazyLinkList().getLast();
+ RootedScript script(cx, builder->script());
+
+ AutoUnlockHelperThreadState unlock(lock);
+ AutoCompartment ac(cx, script->compartment());
+ jit::LinkIonScript(cx, script);
+ }
+
+ continue;
+ }
+ }
+}
+
+static void
+TrackAllProperties(JSContext* cx, JSObject* obj)
+{
+ MOZ_ASSERT(obj->isSingleton());
+
+ for (Shape::Range<NoGC> range(obj->as<NativeObject>().lastProperty()); !range.empty(); range.popFront())
+ EnsureTrackPropertyTypes(cx, obj, range.front().propid());
+}
+
+static void
+TrackPropertiesForSingletonScopes(JSContext* cx, JSScript* script, BaselineFrame* baselineFrame)
+{
+ // Ensure that all properties of singleton call objects which the script
+ // could access are tracked. These are generally accessed through
+ // ALIASEDVAR operations in baseline and will not be tracked even if they
+ // have been accessed in baseline code.
+ JSObject* environment = script->functionNonDelazifying()
+ ? script->functionNonDelazifying()->environment()
+ : nullptr;
+
+ while (environment && !environment->is<GlobalObject>()) {
+ if (environment->is<CallObject>() && environment->isSingleton())
+ TrackAllProperties(cx, environment);
+ environment = environment->enclosingEnvironment();
+ }
+
+ if (baselineFrame) {
+ JSObject* scope = baselineFrame->environmentChain();
+ if (scope->is<CallObject>() && scope->isSingleton())
+ TrackAllProperties(cx, scope);
+ }
+}
+
+static void
+TrackIonAbort(JSContext* cx, JSScript* script, jsbytecode* pc, const char* message)
+{
+ if (!cx->runtime()->jitRuntime()->isOptimizationTrackingEnabled(cx->runtime()))
+ return;
+
+ // Only bother tracking aborts of functions we're attempting to
+ // Ion-compile after successfully running in Baseline.
+ if (!script->hasBaselineScript())
+ return;
+
+ JitcodeGlobalTable* table = cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
+ void* ptr = script->baselineScript()->method()->raw();
+ JitcodeGlobalEntry& entry = table->lookupInfallible(ptr);
+ entry.baselineEntry().trackIonAbort(pc, message);
+}
+
+static void
+TrackAndSpewIonAbort(JSContext* cx, JSScript* script, const char* message)
+{
+ JitSpew(JitSpew_IonAbort, "%s", message);
+ TrackIonAbort(cx, script, script->code(), message);
+}
+
+static AbortReason
+IonCompile(JSContext* cx, JSScript* script,
+ BaselineFrame* baselineFrame, jsbytecode* osrPc,
+ bool recompile, OptimizationLevel optimizationLevel)
+{
+ TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
+ TraceLoggerEvent event(logger, TraceLogger_AnnotateScripts, script);
+ AutoTraceLog logScript(logger, event);
+ AutoTraceLog logCompile(logger, TraceLogger_IonCompilation);
+
+ // Make sure the script's canonical function isn't lazy. We can't de-lazify
+ // it in a helper thread.
+ script->ensureNonLazyCanonicalFunction(cx);
+
+ TrackPropertiesForSingletonScopes(cx, script, baselineFrame);
+
+ LifoAlloc* alloc = cx->new_<LifoAlloc>(TempAllocator::PreferredLifoChunkSize);
+ if (!alloc)
+ return AbortReason_Alloc;
+
+ ScopedJSDeletePtr<LifoAlloc> autoDelete(alloc);
+
+ TempAllocator* temp = alloc->new_<TempAllocator>(alloc);
+ if (!temp)
+ return AbortReason_Alloc;
+
+ JitContext jctx(cx, temp);
+
+ if (!cx->compartment()->ensureJitCompartmentExists(cx))
+ return AbortReason_Alloc;
+
+ if (!cx->compartment()->jitCompartment()->ensureIonStubsExist(cx))
+ return AbortReason_Alloc;
+
+ MIRGraph* graph = alloc->new_<MIRGraph>(temp);
+ if (!graph)
+ return AbortReason_Alloc;
+
+ InlineScriptTree* inlineScriptTree = InlineScriptTree::New(temp, nullptr, nullptr, script);
+ if (!inlineScriptTree)
+ return AbortReason_Alloc;
+
+ CompileInfo* info = alloc->new_<CompileInfo>(script, script->functionNonDelazifying(), osrPc,
+ Analysis_None,
+ script->needsArgsObj(), inlineScriptTree);
+ if (!info)
+ return AbortReason_Alloc;
+
+ BaselineInspector* inspector = alloc->new_<BaselineInspector>(script);
+ if (!inspector)
+ return AbortReason_Alloc;
+
+ BaselineFrameInspector* baselineFrameInspector = nullptr;
+ if (baselineFrame) {
+ baselineFrameInspector = NewBaselineFrameInspector(temp, baselineFrame, info);
+ if (!baselineFrameInspector)
+ return AbortReason_Alloc;
+ }
+
+ CompilerConstraintList* constraints = NewCompilerConstraintList(*temp);
+ if (!constraints)
+ return AbortReason_Alloc;
+
+ const OptimizationInfo* optimizationInfo = IonOptimizations.get(optimizationLevel);
+ const JitCompileOptions options(cx);
+
+ IonBuilder* builder = alloc->new_<IonBuilder>((JSContext*) nullptr,
+ CompileCompartment::get(cx->compartment()),
+ options, temp, graph, constraints,
+ inspector, info, optimizationInfo,
+ baselineFrameInspector);
+ if (!builder)
+ return AbortReason_Alloc;
+
+ if (cx->runtime()->gc.storeBuffer.cancelIonCompilations())
+ builder->setNotSafeForMinorGC();
+
+ MOZ_ASSERT(recompile == builder->script()->hasIonScript());
+ MOZ_ASSERT(builder->script()->canIonCompile());
+
+ RootedScript builderScript(cx, builder->script());
+
+ if (recompile)
+ builderScript->ionScript()->setRecompiling();
+
+ SpewBeginFunction(builder, builderScript);
+
+ bool succeeded;
+ {
+ AutoEnterAnalysis enter(cx);
+ succeeded = builder->build();
+ builder->clearForBackEnd();
+ }
+
+ if (!succeeded) {
+ AbortReason reason = builder->abortReason();
+ builder->graphSpewer().endFunction();
+ if (reason == AbortReason_PreliminaryObjects) {
+ // Some group was accessed which has associated preliminary objects
+ // to analyze. Do this now and we will try to build again shortly.
+ const MIRGenerator::ObjectGroupVector& groups = builder->abortedPreliminaryGroups();
+ for (size_t i = 0; i < groups.length(); i++) {
+ ObjectGroup* group = groups[i];
+ if (group->newScript()) {
+ if (!group->newScript()->maybeAnalyze(cx, group, nullptr, /* force = */ true))
+ return AbortReason_Alloc;
+ } else if (group->maybePreliminaryObjects()) {
+ group->maybePreliminaryObjects()->maybeAnalyze(cx, group, /* force = */ true);
+ } else {
+ MOZ_CRASH("Unexpected aborted preliminary group");
+ }
+ }
+ }
+
+ if (builder->hadActionableAbort()) {
+ JSScript* abortScript;
+ jsbytecode* abortPc;
+ const char* abortMessage;
+ builder->actionableAbortLocationAndMessage(&abortScript, &abortPc, &abortMessage);
+ TrackIonAbort(cx, abortScript, abortPc, abortMessage);
+ }
+
+ if (cx->isThrowingOverRecursed()) {
+ // Non-analysis compilations should never fail with stack overflow.
+ MOZ_CRASH("Stack overflow during compilation");
+ }
+
+ return reason;
+ }
+
+ // If possible, compile the script off thread.
+ if (options.offThreadCompilationAvailable()) {
+ JitSpew(JitSpew_IonSyncLogs, "Can't log script %s:%" PRIuSIZE
+ ". (Compiled on background thread.)",
+ builderScript->filename(), builderScript->lineno());
+
+ if (!CreateMIRRootList(*builder))
+ return AbortReason_Alloc;
+
+ if (!StartOffThreadIonCompile(cx, builder)) {
+ JitSpew(JitSpew_IonAbort, "Unable to start off-thread ion compilation.");
+ builder->graphSpewer().endFunction();
+ return AbortReason_Alloc;
+ }
+
+ if (!recompile)
+ builderScript->setIonScript(cx->runtime(), ION_COMPILING_SCRIPT);
+
+ // The allocator and associated data will be destroyed after being
+ // processed in the finishedOffThreadCompilations list.
+ autoDelete.forget();
+
+ return AbortReason_NoAbort;
+ }
+
+ {
+ ScopedJSDeletePtr<CodeGenerator> codegen;
+ AutoEnterAnalysis enter(cx);
+ codegen = CompileBackEnd(builder);
+ if (!codegen) {
+ JitSpew(JitSpew_IonAbort, "Failed during back-end compilation.");
+ if (cx->isExceptionPending())
+ return AbortReason_Error;
+ return AbortReason_Disable;
+ }
+
+ succeeded = LinkCodeGen(cx, builder, codegen);
+ }
+
+ if (succeeded)
+ return AbortReason_NoAbort;
+ if (cx->isExceptionPending())
+ return AbortReason_Error;
+ return AbortReason_Disable;
+}
+
+static bool
+CheckFrame(JSContext* cx, BaselineFrame* frame)
+{
+ MOZ_ASSERT(!frame->script()->isGenerator());
+ MOZ_ASSERT(!frame->isDebuggerEvalFrame());
+ MOZ_ASSERT(!frame->isEvalFrame());
+
+ // This check is to not overrun the stack.
+ if (frame->isFunctionFrame()) {
+ if (TooManyActualArguments(frame->numActualArgs())) {
+ TrackAndSpewIonAbort(cx, frame->script(), "too many actual arguments");
+ return false;
+ }
+
+ if (TooManyFormalArguments(frame->numFormalArgs())) {
+ TrackAndSpewIonAbort(cx, frame->script(), "too many arguments");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool
+CheckScript(JSContext* cx, JSScript* script, bool osr)
+{
+ if (script->isForEval()) {
+ // Eval frames are not yet supported. Supporting this will require new
+ // logic in pushBailoutFrame to deal with linking prev.
+ // Additionally, JSOP_DEFVAR support will require baking in isEvalFrame().
+ TrackAndSpewIonAbort(cx, script, "eval script");
+ return false;
+ }
+
+ if (script->isGenerator()) {
+ TrackAndSpewIonAbort(cx, script, "generator script");
+ return false;
+ }
+
+ if (script->hasNonSyntacticScope() && !script->functionNonDelazifying()) {
+ // Support functions with a non-syntactic global scope but not other
+ // scripts. For global scripts, IonBuilder currently uses the global
+ // object as scope chain, this is not valid when the script has a
+ // non-syntactic global scope.
+ TrackAndSpewIonAbort(cx, script, "has non-syntactic global scope");
+ return false;
+ }
+
+ if (script->functionHasExtraBodyVarScope() &&
+ script->functionExtraBodyVarScope()->hasEnvironment())
+ {
+ // This restriction will be lifted when intra-function scope chains
+ // are compilable by Ion. See bug 1273858.
+ TrackAndSpewIonAbort(cx, script, "has extra var environment");
+ return false;
+ }
+
+ if (script->nTypeSets() >= UINT16_MAX) {
+ // In this case multiple bytecode ops can share a single observed
+ // TypeSet (see bug 1303710).
+ TrackAndSpewIonAbort(cx, script, "too many typesets");
+ return false;
+ }
+
+ return true;
+}
+
+static MethodStatus
+CheckScriptSize(JSContext* cx, JSScript* script)
+{
+ if (!JitOptions.limitScriptSize)
+ return Method_Compiled;
+
+ uint32_t numLocalsAndArgs = NumLocalsAndArgs(script);
+
+ if (script->length() > MAX_MAIN_THREAD_SCRIPT_SIZE ||
+ numLocalsAndArgs > MAX_MAIN_THREAD_LOCALS_AND_ARGS)
+ {
+ if (!OffThreadCompilationAvailable(cx)) {
+ JitSpew(JitSpew_IonAbort, "Script too large (%" PRIuSIZE " bytes) (%u locals/args)",
+ script->length(), numLocalsAndArgs);
+ TrackIonAbort(cx, script, script->code(), "too large");
+ return Method_CantCompile;
+ }
+ }
+
+ return Method_Compiled;
+}
+
+bool
+CanIonCompileScript(JSContext* cx, JSScript* script, bool osr)
+{
+ if (!script->canIonCompile() || !CheckScript(cx, script, osr))
+ return false;
+
+ return CheckScriptSize(cx, script) == Method_Compiled;
+}
+
+static OptimizationLevel
+GetOptimizationLevel(HandleScript script, jsbytecode* pc)
+{
+ return IonOptimizations.levelForScript(script, pc);
+}
+
+static MethodStatus
+Compile(JSContext* cx, HandleScript script, BaselineFrame* osrFrame, jsbytecode* osrPc,
+ bool forceRecompile = false)
+{
+ MOZ_ASSERT(jit::IsIonEnabled(cx));
+ MOZ_ASSERT(jit::IsBaselineEnabled(cx));
+ MOZ_ASSERT_IF(osrPc != nullptr, LoopEntryCanIonOsr(osrPc));
+
+ if (!script->hasBaselineScript())
+ return Method_Skipped;
+
+ if (script->isDebuggee() || (osrFrame && osrFrame->isDebuggee())) {
+ TrackAndSpewIonAbort(cx, script, "debugging");
+ return Method_Skipped;
+ }
+
+ if (!CheckScript(cx, script, bool(osrPc))) {
+ JitSpew(JitSpew_IonAbort, "Aborted compilation of %s:%" PRIuSIZE, script->filename(), script->lineno());
+ return Method_CantCompile;
+ }
+
+ MethodStatus status = CheckScriptSize(cx, script);
+ if (status != Method_Compiled) {
+ JitSpew(JitSpew_IonAbort, "Aborted compilation of %s:%" PRIuSIZE, script->filename(), script->lineno());
+ return status;
+ }
+
+ bool recompile = false;
+ OptimizationLevel optimizationLevel = GetOptimizationLevel(script, osrPc);
+ if (optimizationLevel == OptimizationLevel::DontCompile)
+ return Method_Skipped;
+
+ if (!CanLikelyAllocateMoreExecutableMemory()) {
+ script->resetWarmUpCounter();
+ return Method_Skipped;
+ }
+
+ if (script->hasIonScript()) {
+ IonScript* scriptIon = script->ionScript();
+ if (!scriptIon->method())
+ return Method_CantCompile;
+
+ // Don't recompile/overwrite higher optimized code,
+ // with a lower optimization level.
+ if (optimizationLevel <= scriptIon->optimizationLevel() && !forceRecompile)
+ return Method_Compiled;
+
+ // Don't start compiling if already compiling
+ if (scriptIon->isRecompiling())
+ return Method_Compiled;
+
+ if (osrPc)
+ scriptIon->resetOsrPcMismatchCounter();
+
+ recompile = true;
+ }
+
+ if (script->baselineScript()->hasPendingIonBuilder()) {
+ IonBuilder* buildIon = script->baselineScript()->pendingIonBuilder();
+ if (optimizationLevel <= buildIon->optimizationInfo().level() && !forceRecompile)
+ return Method_Compiled;
+
+ recompile = true;
+ }
+
+ AbortReason reason = IonCompile(cx, script, osrFrame, osrPc, recompile, optimizationLevel);
+ if (reason == AbortReason_Error)
+ return Method_Error;
+
+ if (reason == AbortReason_Disable)
+ return Method_CantCompile;
+
+ if (reason == AbortReason_Alloc) {
+ ReportOutOfMemory(cx);
+ return Method_Error;
+ }
+
+ // Compilation succeeded or we invalidated right away or an inlining/alloc abort
+ if (script->hasIonScript())
+ return Method_Compiled;
+ return Method_Skipped;
+}
+
+} // namespace jit
+} // namespace js
+
+bool
+jit::OffThreadCompilationAvailable(JSContext* cx)
+{
+ // Even if off thread compilation is enabled, compilation must still occur
+ // on the main thread in some cases.
+ //
+ // Require cpuCount > 1 so that Ion compilation jobs and main-thread
+ // execution are not competing for the same resources.
+ return cx->runtime()->canUseOffthreadIonCompilation()
+ && HelperThreadState().cpuCount > 1
+ && CanUseExtraThreads();
+}
+
+MethodStatus
+jit::CanEnter(JSContext* cx, RunState& state)
+{
+ MOZ_ASSERT(jit::IsIonEnabled(cx));
+
+ JSScript* script = state.script();
+
+ // Skip if the script has been disabled.
+ if (!script->canIonCompile())
+ return Method_Skipped;
+
+ // Skip if the script is being compiled off thread.
+ if (script->isIonCompilingOffThread())
+ return Method_Skipped;
+
+ // Skip if the code is expected to result in a bailout.
+ if (script->hasIonScript() && script->ionScript()->bailoutExpected())
+ return Method_Skipped;
+
+ RootedScript rscript(cx, script);
+
+ // If constructing, allocate a new |this| object before building Ion.
+ // Creating |this| is done before building Ion because it may change the
+ // type information and invalidate compilation results.
+ if (state.isInvoke()) {
+ InvokeState& invoke = *state.asInvoke();
+
+ if (TooManyActualArguments(invoke.args().length())) {
+ TrackAndSpewIonAbort(cx, script, "too many actual args");
+ ForbidCompilation(cx, script);
+ return Method_CantCompile;
+ }
+
+ if (TooManyFormalArguments(invoke.args().callee().as<JSFunction>().nargs())) {
+ TrackAndSpewIonAbort(cx, script, "too many args");
+ ForbidCompilation(cx, script);
+ return Method_CantCompile;
+ }
+
+ if (!state.maybeCreateThisForConstructor(cx)) {
+ if (cx->isThrowingOutOfMemory()) {
+ cx->recoverFromOutOfMemory();
+ return Method_Skipped;
+ }
+ return Method_Error;
+ }
+ }
+
+ // If --ion-eager is used, compile with Baseline first, so that we
+ // can directly enter IonMonkey.
+ if (JitOptions.eagerCompilation && !rscript->hasBaselineScript()) {
+ MethodStatus status = CanEnterBaselineMethod(cx, state);
+ if (status != Method_Compiled)
+ return status;
+ }
+
+ // Skip if the script is being compiled off thread or can't be
+ // Ion-compiled (again). MaybeCreateThisForConstructor could have
+ // started an Ion compilation or marked the script as uncompilable.
+ if (rscript->isIonCompilingOffThread() || !rscript->canIonCompile())
+ return Method_Skipped;
+
+ // Attempt compilation. Returns Method_Compiled if already compiled.
+ MethodStatus status = Compile(cx, rscript, nullptr, nullptr);
+ if (status != Method_Compiled) {
+ if (status == Method_CantCompile)
+ ForbidCompilation(cx, rscript);
+ return status;
+ }
+
+ if (state.script()->baselineScript()->hasPendingIonBuilder()) {
+ LinkIonScript(cx, state.script());
+ if (!state.script()->hasIonScript())
+ return jit::Method_Skipped;
+ }
+
+ return Method_Compiled;
+}
+
+static MethodStatus
+BaselineCanEnterAtEntry(JSContext* cx, HandleScript script, BaselineFrame* frame)
+{
+ MOZ_ASSERT(jit::IsIonEnabled(cx));
+ MOZ_ASSERT(frame->callee()->nonLazyScript()->canIonCompile());
+ MOZ_ASSERT(!frame->callee()->nonLazyScript()->isIonCompilingOffThread());
+ MOZ_ASSERT(!frame->callee()->nonLazyScript()->hasIonScript());
+ MOZ_ASSERT(frame->isFunctionFrame());
+
+ // Mark as forbidden if frame can't be handled.
+ if (!CheckFrame(cx, frame)) {
+ ForbidCompilation(cx, script);
+ return Method_CantCompile;
+ }
+
+ // Attempt compilation. Returns Method_Compiled if already compiled.
+ MethodStatus status = Compile(cx, script, frame, nullptr);
+ if (status != Method_Compiled) {
+ if (status == Method_CantCompile)
+ ForbidCompilation(cx, script);
+ return status;
+ }
+
+ return Method_Compiled;
+}
+
+// Decide if a transition from baseline execution to Ion code should occur.
+// May compile or recompile the target JSScript.
+static MethodStatus
+BaselineCanEnterAtBranch(JSContext* cx, HandleScript script, BaselineFrame* osrFrame, jsbytecode* pc)
+{
+ MOZ_ASSERT(jit::IsIonEnabled(cx));
+ MOZ_ASSERT((JSOp)*pc == JSOP_LOOPENTRY);
+ MOZ_ASSERT(LoopEntryCanIonOsr(pc));
+
+ // Skip if the script has been disabled.
+ if (!script->canIonCompile())
+ return Method_Skipped;
+
+ // Skip if the script is being compiled off thread.
+ if (script->isIonCompilingOffThread())
+ return Method_Skipped;
+
+ // Skip if the code is expected to result in a bailout.
+ if (script->hasIonScript() && script->ionScript()->bailoutExpected())
+ return Method_Skipped;
+
+ // Optionally ignore on user request.
+ if (!JitOptions.osr)
+ return Method_Skipped;
+
+ // Mark as forbidden if frame can't be handled.
+ if (!CheckFrame(cx, osrFrame)) {
+ ForbidCompilation(cx, script);
+ return Method_CantCompile;
+ }
+
+ // Check if the jitcode still needs to get linked and do this
+ // to have a valid IonScript.
+ if (script->baselineScript()->hasPendingIonBuilder())
+ LinkIonScript(cx, script);
+
+ // By default a recompilation doesn't happen on osr mismatch.
+ // Decide if we want to force a recompilation if this happens too much.
+ bool force = false;
+ if (script->hasIonScript() && pc != script->ionScript()->osrPc()) {
+ uint32_t count = script->ionScript()->incrOsrPcMismatchCounter();
+ if (count <= JitOptions.osrPcMismatchesBeforeRecompile)
+ return Method_Skipped;
+ force = true;
+ }
+
+ // Attempt compilation.
+ // - Returns Method_Compiled if the right ionscript is present
+ // (Meaning it was present or a sequantial compile finished)
+ // - Returns Method_Skipped if pc doesn't match
+ // (This means a background thread compilation with that pc could have started or not.)
+ RootedScript rscript(cx, script);
+ MethodStatus status = Compile(cx, rscript, osrFrame, pc, force);
+ if (status != Method_Compiled) {
+ if (status == Method_CantCompile)
+ ForbidCompilation(cx, script);
+ return status;
+ }
+
+ // Return the compilation was skipped when the osr pc wasn't adjusted.
+ // This can happen when there was still an IonScript available and a
+ // background compilation started, but hasn't finished yet.
+ // Or when we didn't force a recompile.
+ if (script->hasIonScript() && pc != script->ionScript()->osrPc())
+ return Method_Skipped;
+
+ return Method_Compiled;
+}
+
+bool
+jit::IonCompileScriptForBaseline(JSContext* cx, BaselineFrame* frame, jsbytecode* pc)
+{
+ // A TI OOM will disable TI and Ion.
+ if (!jit::IsIonEnabled(cx))
+ return true;
+
+ RootedScript script(cx, frame->script());
+ bool isLoopEntry = JSOp(*pc) == JSOP_LOOPENTRY;
+
+ MOZ_ASSERT(!isLoopEntry || LoopEntryCanIonOsr(pc));
+
+ if (!script->canIonCompile()) {
+ // TODO: ASSERT that ion-compilation-disabled checker stub doesn't exist.
+ // TODO: Clear all optimized stubs.
+ // TODO: Add a ion-compilation-disabled checker IC stub
+ script->resetWarmUpCounter();
+ return true;
+ }
+
+ MOZ_ASSERT(!script->isIonCompilingOffThread());
+
+ // If Ion script exists, but PC is not at a loop entry, then Ion will be entered for
+ // this script at an appropriate LOOPENTRY or the next time this function is called.
+ if (script->hasIonScript() && !isLoopEntry) {
+ JitSpew(JitSpew_BaselineOSR, "IonScript exists, but not at loop entry!");
+ // TODO: ASSERT that a ion-script-already-exists checker stub doesn't exist.
+ // TODO: Clear all optimized stubs.
+ // TODO: Add a ion-script-already-exists checker stub.
+ return true;
+ }
+
+ // Ensure that Ion-compiled code is available.
+ JitSpew(JitSpew_BaselineOSR,
+ "WarmUpCounter for %s:%" PRIuSIZE " reached %d at pc %p, trying to switch to Ion!",
+ script->filename(), script->lineno(), (int) script->getWarmUpCount(), (void*) pc);
+
+ MethodStatus stat;
+ if (isLoopEntry) {
+ MOZ_ASSERT(LoopEntryCanIonOsr(pc));
+ JitSpew(JitSpew_BaselineOSR, " Compile at loop entry!");
+ stat = BaselineCanEnterAtBranch(cx, script, frame, pc);
+ } else if (frame->isFunctionFrame()) {
+ JitSpew(JitSpew_BaselineOSR, " Compile function from top for later entry!");
+ stat = BaselineCanEnterAtEntry(cx, script, frame);
+ } else {
+ return true;
+ }
+
+ if (stat == Method_Error) {
+ JitSpew(JitSpew_BaselineOSR, " Compile with Ion errored!");
+ return false;
+ }
+
+ if (stat == Method_CantCompile)
+ JitSpew(JitSpew_BaselineOSR, " Can't compile with Ion!");
+ else if (stat == Method_Skipped)
+ JitSpew(JitSpew_BaselineOSR, " Skipped compile with Ion!");
+ else if (stat == Method_Compiled)
+ JitSpew(JitSpew_BaselineOSR, " Compiled with Ion!");
+ else
+ MOZ_CRASH("Invalid MethodStatus!");
+
+ // Failed to compile. Reset warm-up counter and return.
+ if (stat != Method_Compiled) {
+ // TODO: If stat == Method_CantCompile, insert stub that just skips the
+ // warm-up counter entirely, instead of resetting it.
+ bool bailoutExpected = script->hasIonScript() && script->ionScript()->bailoutExpected();
+ if (stat == Method_CantCompile || bailoutExpected) {
+ JitSpew(JitSpew_BaselineOSR, " Reset WarmUpCounter cantCompile=%s bailoutExpected=%s!",
+ stat == Method_CantCompile ? "yes" : "no",
+ bailoutExpected ? "yes" : "no");
+ script->resetWarmUpCounter();
+ }
+ return true;
+ }
+
+ return true;
+}
+
+
+MethodStatus
+jit::Recompile(JSContext* cx, HandleScript script, BaselineFrame* osrFrame, jsbytecode* osrPc,
+ bool force)
+{
+ MOZ_ASSERT(script->hasIonScript());
+ if (script->ionScript()->isRecompiling())
+ return Method_Compiled;
+
+ MethodStatus status = Compile(cx, script, osrFrame, osrPc, force);
+ if (status != Method_Compiled) {
+ if (status == Method_CantCompile)
+ ForbidCompilation(cx, script);
+ return status;
+ }
+
+ return Method_Compiled;
+}
+
+MethodStatus
+jit::CanEnterUsingFastInvoke(JSContext* cx, HandleScript script, uint32_t numActualArgs)
+{
+ MOZ_ASSERT(jit::IsIonEnabled(cx));
+
+ // Skip if the code is expected to result in a bailout.
+ if (!script->hasIonScript() || script->ionScript()->bailoutExpected())
+ return Method_Skipped;
+
+ // Don't handle arguments underflow, to make this work we would have to pad
+ // missing arguments with |undefined|.
+ if (numActualArgs < script->functionNonDelazifying()->nargs())
+ return Method_Skipped;
+
+ if (!cx->compartment()->ensureJitCompartmentExists(cx))
+ return Method_Error;
+
+ // This can GC, so afterward, script->ion is not guaranteed to be valid.
+ if (!cx->runtime()->jitRuntime()->enterIon())
+ return Method_Error;
+
+ if (!script->hasIonScript())
+ return Method_Skipped;
+
+ return Method_Compiled;
+}
+
+static JitExecStatus
+EnterIon(JSContext* cx, EnterJitData& data)
+{
+ JS_CHECK_RECURSION(cx, return JitExec_Aborted);
+ MOZ_ASSERT(jit::IsIonEnabled(cx));
+ MOZ_ASSERT(!data.osrFrame);
+
+#ifdef DEBUG
+ // See comment in EnterBaseline.
+ mozilla::Maybe<JS::AutoAssertNoGC> nogc;
+ nogc.emplace(cx);
+#endif
+
+ EnterJitCode enter = cx->runtime()->jitRuntime()->enterIon();
+
+ // Caller must construct |this| before invoking the Ion function.
+ MOZ_ASSERT_IF(data.constructing,
+ data.maxArgv[0].isObject() || data.maxArgv[0].isMagic(JS_UNINITIALIZED_LEXICAL));
+
+ data.result.setInt32(data.numActualArgs);
+ {
+ AssertCompartmentUnchanged pcc(cx);
+ ActivationEntryMonitor entryMonitor(cx, data.calleeToken);
+ JitActivation activation(cx);
+
+#ifdef DEBUG
+ nogc.reset();
+#endif
+ CALL_GENERATED_CODE(enter, data.jitcode, data.maxArgc, data.maxArgv, /* osrFrame = */nullptr, data.calleeToken,
+ /* envChain = */ nullptr, 0, data.result.address());
+ }
+
+ MOZ_ASSERT(!cx->runtime()->jitRuntime()->hasIonReturnOverride());
+
+ // Jit callers wrap primitive constructor return, except for derived class constructors.
+ if (!data.result.isMagic() && data.constructing &&
+ data.result.isPrimitive())
+ {
+ MOZ_ASSERT(data.maxArgv[0].isObject());
+ data.result = data.maxArgv[0];
+ }
+
+ // Release temporary buffer used for OSR into Ion.
+ cx->runtime()->getJitRuntime(cx)->freeOsrTempData();
+
+ MOZ_ASSERT_IF(data.result.isMagic(), data.result.isMagic(JS_ION_ERROR));
+ return data.result.isMagic() ? JitExec_Error : JitExec_Ok;
+}
+
+bool
+jit::SetEnterJitData(JSContext* cx, EnterJitData& data, RunState& state,
+ MutableHandle<GCVector<Value>> vals)
+{
+ data.osrFrame = nullptr;
+
+ if (state.isInvoke()) {
+ const CallArgs& args = state.asInvoke()->args();
+ unsigned numFormals = state.script()->functionNonDelazifying()->nargs();
+ data.constructing = state.asInvoke()->constructing();
+ data.numActualArgs = args.length();
+ data.maxArgc = Max(args.length(), numFormals) + 1;
+ data.envChain = nullptr;
+ data.calleeToken = CalleeToToken(&args.callee().as<JSFunction>(), data.constructing);
+
+ if (data.numActualArgs >= numFormals) {
+ data.maxArgv = args.base() + 1;
+ } else {
+ MOZ_ASSERT(vals.empty());
+ unsigned numPushedArgs = Max(args.length(), numFormals);
+ if (!vals.reserve(numPushedArgs + 1 + data.constructing))
+ return false;
+
+ // Append |this| and any provided arguments.
+ for (size_t i = 1; i < args.length() + 2; ++i)
+ vals.infallibleAppend(args.base()[i]);
+
+ // Pad missing arguments with |undefined|.
+ while (vals.length() < numFormals + 1)
+ vals.infallibleAppend(UndefinedValue());
+
+ if (data.constructing)
+ vals.infallibleAppend(args.newTarget());
+
+ MOZ_ASSERT(vals.length() >= numFormals + 1 + data.constructing);
+ data.maxArgv = vals.begin();
+ }
+ } else {
+ data.constructing = false;
+ data.numActualArgs = 0;
+ data.maxArgc = 0;
+ data.maxArgv = nullptr;
+ data.envChain = state.asExecute()->environmentChain();
+
+ data.calleeToken = CalleeToToken(state.script());
+
+ if (state.script()->isForEval() && state.script()->isDirectEvalInFunction()) {
+ // Push newTarget onto the stack.
+ if (!vals.reserve(1))
+ return false;
+
+ data.maxArgc = 1;
+ data.maxArgv = vals.begin();
+ if (state.asExecute()->newTarget().isNull()) {
+ ScriptFrameIter iter(cx);
+ vals.infallibleAppend(iter.newTarget());
+ } else {
+ vals.infallibleAppend(state.asExecute()->newTarget());
+ }
+ }
+ }
+
+ return true;
+}
+
+JitExecStatus
+jit::IonCannon(JSContext* cx, RunState& state)
+{
+ IonScript* ion = state.script()->ionScript();
+
+ EnterJitData data(cx);
+ data.jitcode = ion->method()->raw();
+
+ Rooted<GCVector<Value>> vals(cx, GCVector<Value>(cx));
+ if (!SetEnterJitData(cx, data, state, &vals))
+ return JitExec_Error;
+
+ JitExecStatus status = EnterIon(cx, data);
+
+ if (status == JitExec_Ok)
+ state.setReturnValue(data.result);
+
+ return status;
+}
+
+JitExecStatus
+jit::FastInvoke(JSContext* cx, HandleFunction fun, CallArgs& args)
+{
+ JS_CHECK_RECURSION(cx, return JitExec_Error);
+
+ RootedScript script(cx, fun->nonLazyScript());
+
+ if (!Debugger::checkNoExecute(cx, script))
+ return JitExec_Error;
+
+#ifdef DEBUG
+ // See comment in EnterBaseline.
+ mozilla::Maybe<JS::AutoAssertNoGC> nogc;
+ nogc.emplace(cx);
+#endif
+
+ IonScript* ion = script->ionScript();
+ JitCode* code = ion->method();
+ void* jitcode = code->raw();
+
+ MOZ_ASSERT(jit::IsIonEnabled(cx));
+ MOZ_ASSERT(!ion->bailoutExpected());
+
+ ActivationEntryMonitor entryMonitor(cx, CalleeToToken(script));
+ JitActivation activation(cx);
+
+ EnterJitCode enter = cx->runtime()->jitRuntime()->enterIon();
+ void* calleeToken = CalleeToToken(fun, /* constructing = */ false);
+
+ RootedValue result(cx, Int32Value(args.length()));
+ MOZ_ASSERT(args.length() >= fun->nargs());
+
+#ifdef DEBUG
+ nogc.reset();
+#endif
+ CALL_GENERATED_CODE(enter, jitcode, args.length() + 1, args.array() - 1, /* osrFrame = */nullptr,
+ calleeToken, /* envChain = */ nullptr, 0, result.address());
+
+ MOZ_ASSERT(!cx->runtime()->jitRuntime()->hasIonReturnOverride());
+
+ args.rval().set(result);
+
+ MOZ_ASSERT_IF(result.isMagic(), result.isMagic(JS_ION_ERROR));
+ return result.isMagic() ? JitExec_Error : JitExec_Ok;
+}
+
+static void
+InvalidateActivation(FreeOp* fop, const JitActivationIterator& activations, bool invalidateAll)
+{
+ JitSpew(JitSpew_IonInvalidate, "BEGIN invalidating activation");
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ if (JitOptions.checkOsiPointRegisters)
+ activations->asJit()->setCheckRegs(false);
+#endif
+
+ size_t frameno = 1;
+
+ for (JitFrameIterator it(activations); !it.done(); ++it, ++frameno) {
+ MOZ_ASSERT_IF(frameno == 1, it.isExitFrame() || it.type() == JitFrame_Bailout);
+
+#ifdef JS_JITSPEW
+ switch (it.type()) {
+ case JitFrame_Exit:
+ JitSpew(JitSpew_IonInvalidate, "#%" PRIuSIZE " exit frame @ %p", frameno, it.fp());
+ break;
+ case JitFrame_BaselineJS:
+ case JitFrame_IonJS:
+ case JitFrame_Bailout:
+ {
+ MOZ_ASSERT(it.isScripted());
+ const char* type = "Unknown";
+ if (it.isIonJS())
+ type = "Optimized";
+ else if (it.isBaselineJS())
+ type = "Baseline";
+ else if (it.isBailoutJS())
+ type = "Bailing";
+ JitSpew(JitSpew_IonInvalidate,
+ "#%" PRIuSIZE " %s JS frame @ %p, %s:%" PRIuSIZE " (fun: %p, script: %p, pc %p)",
+ frameno, type, it.fp(), it.script()->maybeForwardedFilename(),
+ it.script()->lineno(), it.maybeCallee(), (JSScript*)it.script(),
+ it.returnAddressToFp());
+ break;
+ }
+ case JitFrame_IonStub:
+ JitSpew(JitSpew_IonInvalidate, "#%" PRIuSIZE " ion stub frame @ %p", frameno, it.fp());
+ break;
+ case JitFrame_BaselineStub:
+ JitSpew(JitSpew_IonInvalidate, "#%" PRIuSIZE " baseline stub frame @ %p", frameno, it.fp());
+ break;
+ case JitFrame_Rectifier:
+ JitSpew(JitSpew_IonInvalidate, "#%" PRIuSIZE " rectifier frame @ %p", frameno, it.fp());
+ break;
+ case JitFrame_IonAccessorIC:
+ JitSpew(JitSpew_IonInvalidate, "#%" PRIuSIZE " ion IC getter/setter frame @ %p", frameno, it.fp());
+ break;
+ case JitFrame_Entry:
+ JitSpew(JitSpew_IonInvalidate, "#%" PRIuSIZE " entry frame @ %p", frameno, it.fp());
+ break;
+ }
+#endif // JS_JITSPEW
+
+ if (!it.isIonScripted())
+ continue;
+
+ bool calledFromLinkStub = false;
+ JitCode* lazyLinkStub = fop->runtime()->jitRuntime()->lazyLinkStub();
+ if (it.returnAddressToFp() >= lazyLinkStub->raw() &&
+ it.returnAddressToFp() < lazyLinkStub->rawEnd())
+ {
+ calledFromLinkStub = true;
+ }
+
+ // See if the frame has already been invalidated.
+ if (!calledFromLinkStub && it.checkInvalidation())
+ continue;
+
+ JSScript* script = it.script();
+ if (!script->hasIonScript())
+ continue;
+
+ if (!invalidateAll && !script->ionScript()->invalidated())
+ continue;
+
+ IonScript* ionScript = script->ionScript();
+
+ // Purge ICs before we mark this script as invalidated. This will
+ // prevent lastJump_ from appearing to be a bogus pointer, just
+ // in case anyone tries to read it.
+ ionScript->purgeCaches();
+ ionScript->purgeOptimizedStubs(script->zone());
+
+ // Clean up any pointers from elsewhere in the runtime to this IonScript
+ // which is about to become disconnected from its JSScript.
+ ionScript->unlinkFromRuntime(fop);
+
+ // This frame needs to be invalidated. We do the following:
+ //
+ // 1. Increment the reference counter to keep the ionScript alive
+ // for the invalidation bailout or for the exception handler.
+ // 2. Determine safepoint that corresponds to the current call.
+ // 3. From safepoint, get distance to the OSI-patchable offset.
+ // 4. From the IonScript, determine the distance between the
+ // call-patchable offset and the invalidation epilogue.
+ // 5. Patch the OSI point with a call-relative to the
+ // invalidation epilogue.
+ //
+ // The code generator ensures that there's enough space for us
+ // to patch in a call-relative operation at each invalidation
+ // point.
+ //
+ // Note: you can't simplify this mechanism to "just patch the
+ // instruction immediately after the call" because things may
+ // need to move into a well-defined register state (using move
+ // instructions after the call) in to capture an appropriate
+ // snapshot after the call occurs.
+
+ ionScript->incrementInvalidationCount();
+
+ JitCode* ionCode = ionScript->method();
+
+ JS::Zone* zone = script->zone();
+ if (zone->needsIncrementalBarrier()) {
+ // We're about to remove edges from the JSScript to gcthings
+ // embedded in the JitCode. Perform one final trace of the
+ // JitCode for the incremental GC, as it must know about
+ // those edges.
+ ionCode->traceChildren(zone->barrierTracer());
+ }
+ ionCode->setInvalidated();
+
+ // Don't adjust OSI points in the linkStub (which don't exist), or in a
+ // bailout path.
+ if (calledFromLinkStub || it.isBailoutJS())
+ continue;
+
+ // Write the delta (from the return address offset to the
+ // IonScript pointer embedded into the invalidation epilogue)
+ // where the safepointed call instruction used to be. We rely on
+ // the call sequence causing the safepoint being >= the size of
+ // a uint32, which is checked during safepoint index
+ // construction.
+ AutoWritableJitCode awjc(ionCode);
+ const SafepointIndex* si = ionScript->getSafepointIndex(it.returnAddressToFp());
+ CodeLocationLabel dataLabelToMunge(it.returnAddressToFp());
+ ptrdiff_t delta = ionScript->invalidateEpilogueDataOffset() -
+ (it.returnAddressToFp() - ionCode->raw());
+ Assembler::PatchWrite_Imm32(dataLabelToMunge, Imm32(delta));
+
+ CodeLocationLabel osiPatchPoint = SafepointReader::InvalidationPatchPoint(ionScript, si);
+ CodeLocationLabel invalidateEpilogue(ionCode, CodeOffset(ionScript->invalidateEpilogueOffset()));
+
+ JitSpew(JitSpew_IonInvalidate, " ! Invalidate ionScript %p (inv count %" PRIuSIZE ") -> patching osipoint %p",
+ ionScript, ionScript->invalidationCount(), (void*) osiPatchPoint.raw());
+ Assembler::PatchWrite_NearCall(osiPatchPoint, invalidateEpilogue);
+ }
+
+ JitSpew(JitSpew_IonInvalidate, "END invalidating activation");
+}
+
+void
+jit::InvalidateAll(FreeOp* fop, Zone* zone)
+{
+ // The caller should previously have cancelled off thread compilation.
+#ifdef DEBUG
+ for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
+ MOZ_ASSERT(!HasOffThreadIonCompile(comp));
+#endif
+
+ for (JitActivationIterator iter(fop->runtime()); !iter.done(); ++iter) {
+ if (iter->compartment()->zone() == zone) {
+ JitSpew(JitSpew_IonInvalidate, "Invalidating all frames for GC");
+ InvalidateActivation(fop, iter, true);
+ }
+ }
+}
+
+
+void
+jit::Invalidate(TypeZone& types, FreeOp* fop,
+ const RecompileInfoVector& invalid, bool resetUses,
+ bool cancelOffThread)
+{
+ JitSpew(JitSpew_IonInvalidate, "Start invalidation.");
+
+ // Add an invalidation reference to all invalidated IonScripts to indicate
+ // to the traversal which frames have been invalidated.
+ size_t numInvalidations = 0;
+ for (size_t i = 0; i < invalid.length(); i++) {
+ const CompilerOutput* co = invalid[i].compilerOutput(types);
+ if (!co)
+ continue;
+ MOZ_ASSERT(co->isValid());
+
+ if (cancelOffThread)
+ CancelOffThreadIonCompile(co->script());
+
+ if (!co->ion())
+ continue;
+
+ JitSpew(JitSpew_IonInvalidate, " Invalidate %s:%" PRIuSIZE ", IonScript %p",
+ co->script()->filename(), co->script()->lineno(), co->ion());
+
+ // Keep the ion script alive during the invalidation and flag this
+ // ionScript as being invalidated. This increment is removed by the
+ // loop after the calls to InvalidateActivation.
+ co->ion()->incrementInvalidationCount();
+ numInvalidations++;
+ }
+
+ if (!numInvalidations) {
+ JitSpew(JitSpew_IonInvalidate, " No IonScript invalidation.");
+ return;
+ }
+
+ for (JitActivationIterator iter(fop->runtime()); !iter.done(); ++iter)
+ InvalidateActivation(fop, iter, false);
+
+ // Drop the references added above. If a script was never active, its
+ // IonScript will be immediately destroyed. Otherwise, it will be held live
+ // until its last invalidated frame is destroyed.
+ for (size_t i = 0; i < invalid.length(); i++) {
+ CompilerOutput* co = invalid[i].compilerOutput(types);
+ if (!co)
+ continue;
+ MOZ_ASSERT(co->isValid());
+
+ JSScript* script = co->script();
+ IonScript* ionScript = co->ion();
+ if (!ionScript)
+ continue;
+
+ script->setIonScript(nullptr, nullptr);
+ ionScript->decrementInvalidationCount(fop);
+ co->invalidate();
+ numInvalidations--;
+
+ // Wait for the scripts to get warm again before doing another
+ // compile, unless we are recompiling *because* a script got hot
+ // (resetUses is false).
+ if (resetUses)
+ script->resetWarmUpCounter();
+ }
+
+ // Make sure we didn't leak references by invalidating the same IonScript
+ // multiple times in the above loop.
+ MOZ_ASSERT(!numInvalidations);
+}
+
+void
+jit::Invalidate(JSContext* cx, const RecompileInfoVector& invalid, bool resetUses,
+ bool cancelOffThread)
+{
+ jit::Invalidate(cx->zone()->types, cx->runtime()->defaultFreeOp(), invalid, resetUses,
+ cancelOffThread);
+}
+
+void
+jit::IonScript::invalidate(JSContext* cx, bool resetUses, const char* reason)
+{
+ JitSpew(JitSpew_IonInvalidate, " Invalidate IonScript %p: %s", this, reason);
+
+ // RecompileInfoVector has inline space for at least one element.
+ RecompileInfoVector list;
+ MOZ_RELEASE_ASSERT(list.reserve(1));
+ list.infallibleAppend(recompileInfo());
+
+ Invalidate(cx, list, resetUses, true);
+}
+
+void
+jit::Invalidate(JSContext* cx, JSScript* script, bool resetUses, bool cancelOffThread)
+{
+ MOZ_ASSERT(script->hasIonScript());
+
+ if (cx->runtime()->spsProfiler.enabled()) {
+ // Register invalidation with profiler.
+ // Format of event payload string:
+ // "<filename>:<lineno>"
+
+ // Get the script filename, if any, and its length.
+ const char* filename = script->filename();
+ if (filename == nullptr)
+ filename = "<unknown>";
+
+ // Construct the descriptive string.
+ char* buf = JS_smprintf("Invalidate %s:%" PRIuSIZE, filename, script->lineno());
+
+ // Ignore the event on allocation failure.
+ if (buf) {
+ cx->runtime()->spsProfiler.markEvent(buf);
+ JS_smprintf_free(buf);
+ }
+ }
+
+ // RecompileInfoVector has inline space for at least one element.
+ RecompileInfoVector scripts;
+ MOZ_ASSERT(script->hasIonScript());
+ MOZ_RELEASE_ASSERT(scripts.reserve(1));
+ scripts.infallibleAppend(script->ionScript()->recompileInfo());
+
+ Invalidate(cx, scripts, resetUses, cancelOffThread);
+}
+
+static void
+FinishInvalidationOf(FreeOp* fop, JSScript* script, IonScript* ionScript)
+{
+ TypeZone& types = script->zone()->types;
+
+ // Note: If the script is about to be swept, the compiler output may have
+ // already been destroyed.
+ if (CompilerOutput* output = ionScript->recompileInfo().compilerOutput(types))
+ output->invalidate();
+
+ // If this script has Ion code on the stack, invalidated() will return
+ // true. In this case we have to wait until destroying it.
+ if (!ionScript->invalidated())
+ jit::IonScript::Destroy(fop, ionScript);
+}
+
+void
+jit::FinishInvalidation(FreeOp* fop, JSScript* script)
+{
+ // In all cases, nullptr out script->ion to avoid re-entry.
+ if (script->hasIonScript()) {
+ IonScript* ion = script->ionScript();
+ script->setIonScript(nullptr, nullptr);
+ FinishInvalidationOf(fop, script, ion);
+ }
+}
+
+void
+jit::ForbidCompilation(JSContext* cx, JSScript* script)
+{
+ JitSpew(JitSpew_IonAbort, "Disabling Ion compilation of script %s:%" PRIuSIZE,
+ script->filename(), script->lineno());
+
+ CancelOffThreadIonCompile(script);
+
+ if (script->hasIonScript())
+ Invalidate(cx, script, false);
+
+ script->setIonScript(cx->runtime(), ION_DISABLED_SCRIPT);
+}
+
+AutoFlushICache*
+PerThreadData::autoFlushICache() const
+{
+ return autoFlushICache_;
+}
+
+void
+PerThreadData::setAutoFlushICache(AutoFlushICache* afc)
+{
+ autoFlushICache_ = afc;
+}
+
+// Set the range for the merging of flushes. The flushing is deferred until the end of
+// the AutoFlushICache context. Subsequent flushing within this range will is also
+// deferred. This is only expected to be defined once for each AutoFlushICache
+// context. It assumes the range will be flushed is required to be within an
+// AutoFlushICache context.
+void
+AutoFlushICache::setRange(uintptr_t start, size_t len)
+{
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ AutoFlushICache* afc = TlsPerThreadData.get()->PerThreadData::autoFlushICache();
+ MOZ_ASSERT(afc);
+ MOZ_ASSERT(!afc->start_);
+ JitSpewCont(JitSpew_CacheFlush, "(%" PRIxPTR " %" PRIxSIZE "):", start, len);
+
+ uintptr_t stop = start + len;
+ afc->start_ = start;
+ afc->stop_ = stop;
+#endif
+}
+
+// Flush the instruction cache.
+//
+// If called within a dynamic AutoFlushICache context and if the range is already pending
+// flushing for this AutoFlushICache context then the request is ignored with the
+// understanding that it will be flushed on exit from the AutoFlushICache context.
+// Otherwise the range is flushed immediately.
+//
+// Updates outside the current code object are typically the exception so they are flushed
+// immediately rather than attempting to merge them.
+//
+// For efficiency it is expected that all large ranges will be flushed within an
+// AutoFlushICache, so check. If this assertion is hit then it does not necessarily
+// indicate a program fault but it might indicate a lost opportunity to merge cache
+// flushing. It can be corrected by wrapping the call in an AutoFlushICache to context.
+//
+// Note this can be called without TLS PerThreadData defined so this case needs
+// to be guarded against. E.g. when patching instructions from the exception
+// handler on MacOS running the ARM simulator.
+void
+AutoFlushICache::flush(uintptr_t start, size_t len)
+{
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ PerThreadData* pt = TlsPerThreadData.get();
+ AutoFlushICache* afc = pt ? pt->PerThreadData::autoFlushICache() : nullptr;
+ if (!afc) {
+ JitSpewCont(JitSpew_CacheFlush, "#");
+ ExecutableAllocator::cacheFlush((void*)start, len);
+ MOZ_ASSERT(len <= 32);
+ return;
+ }
+
+ uintptr_t stop = start + len;
+ if (start >= afc->start_ && stop <= afc->stop_) {
+ // Update is within the pending flush range, so defer to the end of the context.
+ JitSpewCont(JitSpew_CacheFlush, afc->inhibit_ ? "-" : "=");
+ return;
+ }
+
+ JitSpewCont(JitSpew_CacheFlush, afc->inhibit_ ? "x" : "*");
+ ExecutableAllocator::cacheFlush((void*)start, len);
+#endif
+}
+
+// Flag the current dynamic AutoFlushICache as inhibiting flushing. Useful in error paths
+// where the changes are being abandoned.
+void
+AutoFlushICache::setInhibit()
+{
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ AutoFlushICache* afc = TlsPerThreadData.get()->PerThreadData::autoFlushICache();
+ MOZ_ASSERT(afc);
+ MOZ_ASSERT(afc->start_);
+ JitSpewCont(JitSpew_CacheFlush, "I");
+ afc->inhibit_ = true;
+#endif
+}
+
+// The common use case is merging cache flushes when preparing a code object. In this
+// case the entire range of the code object is being flushed and as the code is patched
+// smaller redundant flushes could occur. The design allows an AutoFlushICache dynamic
+// thread local context to be declared in which the range of the code object can be set
+// which defers flushing until the end of this dynamic context. The redundant flushing
+// within this code range is also deferred avoiding redundant flushing. Flushing outside
+// this code range is not affected and proceeds immediately.
+//
+// In some cases flushing is not necessary, such as when compiling an wasm module which
+// is flushed again when dynamically linked, and also in error paths that abandon the
+// code. Flushing within the set code range can be inhibited within the AutoFlushICache
+// dynamic context by setting an inhibit flag.
+//
+// The JS compiler can be re-entered while within an AutoFlushICache dynamic context and
+// it is assumed that code being assembled or patched is not executed before the exit of
+// the respective AutoFlushICache dynamic context.
+//
+AutoFlushICache::AutoFlushICache(const char* nonce, bool inhibit)
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ : start_(0),
+ stop_(0),
+ name_(nonce),
+ inhibit_(inhibit)
+#endif
+{
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ PerThreadData* pt = TlsPerThreadData.get();
+ AutoFlushICache* afc = pt->PerThreadData::autoFlushICache();
+ if (afc)
+ JitSpew(JitSpew_CacheFlush, "<%s,%s%s ", nonce, afc->name_, inhibit ? " I" : "");
+ else
+ JitSpewCont(JitSpew_CacheFlush, "<%s%s ", nonce, inhibit ? " I" : "");
+
+ prev_ = afc;
+ pt->PerThreadData::setAutoFlushICache(this);
+#endif
+}
+
+AutoFlushICache::~AutoFlushICache()
+{
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ PerThreadData* pt = TlsPerThreadData.get();
+ MOZ_ASSERT(pt->PerThreadData::autoFlushICache() == this);
+
+ if (!inhibit_ && start_)
+ ExecutableAllocator::cacheFlush((void*)start_, size_t(stop_ - start_));
+
+ JitSpewCont(JitSpew_CacheFlush, "%s%s>", name_, start_ ? "" : " U");
+ JitSpewFin(JitSpew_CacheFlush);
+ pt->PerThreadData::setAutoFlushICache(prev_);
+#endif
+}
+
+void
+jit::PurgeCaches(JSScript* script)
+{
+ if (script->hasIonScript())
+ script->ionScript()->purgeCaches();
+}
+
+size_t
+jit::SizeOfIonData(JSScript* script, mozilla::MallocSizeOf mallocSizeOf)
+{
+ size_t result = 0;
+
+ if (script->hasIonScript())
+ result += script->ionScript()->sizeOfIncludingThis(mallocSizeOf);
+
+ return result;
+}
+
+void
+jit::DestroyJitScripts(FreeOp* fop, JSScript* script)
+{
+ if (script->hasIonScript())
+ jit::IonScript::Destroy(fop, script->ionScript());
+
+ if (script->hasBaselineScript())
+ jit::BaselineScript::Destroy(fop, script->baselineScript());
+}
+
+void
+jit::TraceJitScripts(JSTracer* trc, JSScript* script)
+{
+ if (script->hasIonScript())
+ jit::IonScript::Trace(trc, script->ionScript());
+
+ if (script->hasBaselineScript())
+ jit::BaselineScript::Trace(trc, script->baselineScript());
+}
+
+bool
+jit::JitSupportsFloatingPoint()
+{
+ return js::jit::MacroAssembler::SupportsFloatingPoint();
+}
+
+bool
+jit::JitSupportsUnalignedAccesses()
+{
+ return js::jit::MacroAssembler::SupportsUnalignedAccesses();
+}
+
+bool
+jit::JitSupportsSimd()
+{
+ return js::jit::MacroAssembler::SupportsSimd();
+}
+
+bool
+jit::JitSupportsAtomics()
+{
+#if defined(JS_CODEGEN_ARM)
+ // Bug 1146902, bug 1077318: Enable Ion inlining of Atomics
+ // operations on ARM only when the CPU has byte, halfword, and
+ // doubleword load-exclusive and store-exclusive instructions,
+ // until we can add support for systems that don't have those.
+ return js::jit::HasLDSTREXBHD();
+#else
+ return true;
+#endif
+}
+
+// If you change these, please also change the comment in TempAllocator.
+/* static */ const size_t TempAllocator::BallastSize = 16 * 1024;
+/* static */ const size_t TempAllocator::PreferredLifoChunkSize = 32 * 1024;
diff --git a/js/src/jit/Ion.h b/js/src/jit/Ion.h
new file mode 100644
index 000000000..018eea5cb
--- /dev/null
+++ b/js/src/jit/Ion.h
@@ -0,0 +1,221 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Ion_h
+#define jit_Ion_h
+
+#include "mozilla/MemoryReporting.h"
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+
+#include "jit/CompileWrappers.h"
+#include "jit/JitOptions.h"
+
+namespace js {
+namespace jit {
+
+class TempAllocator;
+
+enum MethodStatus
+{
+ Method_Error,
+ Method_CantCompile,
+ Method_Skipped,
+ Method_Compiled
+};
+
+enum AbortReason {
+ AbortReason_Alloc,
+ AbortReason_Inlining,
+ AbortReason_PreliminaryObjects,
+ AbortReason_Disable,
+ AbortReason_Error,
+ AbortReason_NoAbort
+};
+
+// A JIT context is needed to enter into either an JIT method or an instance
+// of a JIT compiler. It points to a temporary allocator and the active
+// JSContext, either of which may be nullptr, and the active compartment, which
+// will not be nullptr.
+
+class JitContext
+{
+ public:
+ JitContext(JSContext* cx, TempAllocator* temp);
+ JitContext(ExclusiveContext* cx, TempAllocator* temp);
+ JitContext(CompileRuntime* rt, CompileCompartment* comp, TempAllocator* temp);
+ JitContext(CompileRuntime* rt, TempAllocator* temp);
+ explicit JitContext(CompileRuntime* rt);
+ explicit JitContext(TempAllocator* temp);
+ JitContext();
+ ~JitContext();
+
+ // Running context when executing on the main thread. Not available during
+ // compilation.
+ JSContext* cx;
+
+ // Allocator for temporary memory during compilation.
+ TempAllocator* temp;
+
+ // Wrappers with information about the current runtime/compartment for use
+ // during compilation.
+ CompileRuntime* runtime;
+ CompileCompartment* compartment;
+
+ bool onMainThread() const {
+ return runtime && runtime->onMainThread();
+ }
+ bool hasProfilingScripts() const {
+ return runtime && !!runtime->profilingScripts();
+ }
+
+ int getNextAssemblerId() {
+ return assemblerCount_++;
+ }
+ private:
+ JitContext* prev_;
+ int assemblerCount_;
+};
+
+// Initialize Ion statically for all JSRuntimes.
+MOZ_MUST_USE bool InitializeIon();
+
+// Get and set the current JIT context.
+JitContext* GetJitContext();
+JitContext* MaybeGetJitContext();
+
+void SetJitContext(JitContext* ctx);
+
+bool CanIonCompileScript(JSContext* cx, JSScript* script, bool osr);
+
+MOZ_MUST_USE bool IonCompileScriptForBaseline(JSContext* cx, BaselineFrame* frame, jsbytecode* pc);
+
+MethodStatus CanEnter(JSContext* cx, RunState& state);
+MethodStatus CanEnterUsingFastInvoke(JSContext* cx, HandleScript script, uint32_t numActualArgs);
+
+MethodStatus
+Recompile(JSContext* cx, HandleScript script, BaselineFrame* osrFrame, jsbytecode* osrPc,
+ bool force);
+
+enum JitExecStatus
+{
+ // The method call had to be aborted due to a stack limit check. This
+ // error indicates that Ion never attempted to clean up frames.
+ JitExec_Aborted,
+
+ // The method call resulted in an error, and IonMonkey has cleaned up
+ // frames.
+ JitExec_Error,
+
+ // The method call succeeded and returned a value.
+ JitExec_Ok
+};
+
+static inline bool
+IsErrorStatus(JitExecStatus status)
+{
+ return status == JitExec_Error || status == JitExec_Aborted;
+}
+
+struct EnterJitData;
+
+MOZ_MUST_USE bool SetEnterJitData(JSContext* cx, EnterJitData& data, RunState& state,
+ MutableHandle<GCVector<Value>> vals);
+
+JitExecStatus IonCannon(JSContext* cx, RunState& state);
+
+// Used to enter Ion from C++ natives like Array.map. Called from FastInvokeGuard.
+JitExecStatus FastInvoke(JSContext* cx, HandleFunction fun, CallArgs& args);
+
+// Walk the stack and invalidate active Ion frames for the invalid scripts.
+void Invalidate(TypeZone& types, FreeOp* fop,
+ const RecompileInfoVector& invalid, bool resetUses = true,
+ bool cancelOffThread = true);
+void Invalidate(JSContext* cx, const RecompileInfoVector& invalid, bool resetUses = true,
+ bool cancelOffThread = true);
+void Invalidate(JSContext* cx, JSScript* script, bool resetUses = true,
+ bool cancelOffThread = true);
+
+void ToggleBarriers(JS::Zone* zone, bool needs);
+
+class IonBuilder;
+class MIRGenerator;
+class LIRGraph;
+class CodeGenerator;
+
+MOZ_MUST_USE bool OptimizeMIR(MIRGenerator* mir);
+LIRGraph* GenerateLIR(MIRGenerator* mir);
+CodeGenerator* GenerateCode(MIRGenerator* mir, LIRGraph* lir);
+CodeGenerator* CompileBackEnd(MIRGenerator* mir);
+
+void AttachFinishedCompilations(JSContext* cx);
+void FinishOffThreadBuilder(JSRuntime* runtime, IonBuilder* builder,
+ const AutoLockHelperThreadState& lock);
+
+void LinkIonScript(JSContext* cx, HandleScript calleescript);
+uint8_t* LazyLinkTopActivation(JSContext* cx);
+
+static inline bool
+IsIonEnabled(JSContext* cx)
+{
+ // The ARM64 Ion engine is not yet implemented.
+#if defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_ARM64)
+ return false;
+#else
+ return cx->options().ion() &&
+ cx->options().baseline() &&
+ cx->runtime()->jitSupportsFloatingPoint;
+#endif
+}
+
+inline bool
+IsIonInlinablePC(jsbytecode* pc) {
+ // CALL, FUNCALL, FUNAPPLY, EVAL, NEW (Normal Callsites)
+ // GETPROP, CALLPROP, and LENGTH. (Inlined Getters)
+ // SETPROP, SETNAME, SETGNAME (Inlined Setters)
+ return IsCallPC(pc) || IsGetPropPC(pc) || IsSetPropPC(pc);
+}
+
+inline bool
+TooManyActualArguments(unsigned nargs)
+{
+ return nargs > JitOptions.maxStackArgs;
+}
+
+inline bool
+TooManyFormalArguments(unsigned nargs)
+{
+ return nargs >= SNAPSHOT_MAX_NARGS || TooManyActualArguments(nargs);
+}
+
+inline size_t
+NumLocalsAndArgs(JSScript* script)
+{
+ size_t num = 1 /* this */ + script->nfixed();
+ if (JSFunction* fun = script->functionNonDelazifying())
+ num += fun->nargs();
+ return num;
+}
+
+bool OffThreadCompilationAvailable(JSContext* cx);
+
+void ForbidCompilation(JSContext* cx, JSScript* script);
+
+void PurgeCaches(JSScript* script);
+size_t SizeOfIonData(JSScript* script, mozilla::MallocSizeOf mallocSizeOf);
+void DestroyJitScripts(FreeOp* fop, JSScript* script);
+void TraceJitScripts(JSTracer* trc, JSScript* script);
+
+bool JitSupportsFloatingPoint();
+bool JitSupportsUnalignedAccesses();
+bool JitSupportsSimd();
+bool JitSupportsAtomics();
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Ion_h */
diff --git a/js/src/jit/IonAnalysis.cpp b/js/src/jit/IonAnalysis.cpp
new file mode 100644
index 000000000..90303255d
--- /dev/null
+++ b/js/src/jit/IonAnalysis.cpp
@@ -0,0 +1,4760 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/IonAnalysis.h"
+
+#include "mozilla/SizePrintfMacros.h"
+
+#include "jit/AliasAnalysis.h"
+#include "jit/BaselineInspector.h"
+#include "jit/BaselineJIT.h"
+#include "jit/FlowAliasAnalysis.h"
+#include "jit/Ion.h"
+#include "jit/IonBuilder.h"
+#include "jit/IonOptimizationLevels.h"
+#include "jit/LIR.h"
+#include "jit/Lowering.h"
+#include "jit/MIRGraph.h"
+#include "vm/RegExpObject.h"
+#include "vm/SelfHosting.h"
+
+#include "jsobjinlines.h"
+#include "jsopcodeinlines.h"
+#include "jsscriptinlines.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::DebugOnly;
+
+typedef Vector<MPhi*, 16, SystemAllocPolicy> MPhiVector;
+
+static bool
+FlagPhiInputsAsHavingRemovedUses(MIRGenerator* mir, MBasicBlock* block, MBasicBlock* succ,
+ MPhiVector& worklist)
+{
+ // When removing an edge between 2 blocks, we might remove the ability of
+ // later phases to figure out that the uses of a Phi should be considered as
+ // a use of all its inputs. Thus we need to mark the Phi inputs as having
+ // removed uses iff the phi has any uses.
+ //
+ //
+ // +--------------------+ +---------------------+
+ // |12 MFoo 6 | |32 MBar 5 |
+ // | | | |
+ // | ... | | ... |
+ // | | | |
+ // |25 MGoto Block 4 | |43 MGoto Block 4 |
+ // +--------------------+ +---------------------+
+ // | |
+ // | | |
+ // | | |
+ // | +-----X------------------------+
+ // | Edge |
+ // | Removed |
+ // | |
+ // | +------------v-----------+
+ // | |50 MPhi 12 32 |
+ // | | |
+ // | | ... |
+ // | | |
+ // | |70 MReturn 50 |
+ // | +------------------------+
+ // |
+ // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ // |
+ // v
+ //
+ // ^ +--------------------+ +---------------------+
+ // /!\ |12 MConst opt-out | |32 MBar 5 |
+ // '---' | | | |
+ // | ... | | ... |
+ // |78 MBail | | |
+ // |80 MUnreachable | |43 MGoto Block 4 |
+ // +--------------------+ +---------------------+
+ // |
+ // |
+ // |
+ // +---------------+
+ // |
+ // |
+ // |
+ // +------------v-----------+
+ // |50 MPhi 32 |
+ // | |
+ // | ... |
+ // | |
+ // |70 MReturn 50 |
+ // +------------------------+
+ //
+ //
+ // If the inputs of the Phi are not flagged as having removed uses, then
+ // later compilation phase might optimize them out. The problem is that a
+ // bailout will use this value and give it back to baseline, which will then
+ // use the OptimizedOut magic value in a computation.
+
+ // Conservative upper limit for the number of Phi instructions which are
+ // visited while looking for uses.
+ const size_t conservativeUsesLimit = 128;
+
+ MOZ_ASSERT(worklist.empty());
+ size_t predIndex = succ->getPredecessorIndex(block);
+ MPhiIterator end = succ->phisEnd();
+ MPhiIterator it = succ->phisBegin();
+ for (; it != end; it++) {
+ MPhi* phi = *it;
+
+ if (mir->shouldCancel("FlagPhiInputsAsHavingRemovedUses outer loop"))
+ return false;
+
+ // We are looking to mark the Phi inputs which are used across the edge
+ // between the |block| and its successor |succ|.
+ MDefinition* def = phi->getOperand(predIndex);
+ if (def->isUseRemoved())
+ continue;
+
+ phi->setInWorklist();
+ if (!worklist.append(phi))
+ return false;
+
+ // Fill the work list with all the Phi nodes uses until we reach either:
+ // - A resume point which uses the Phi as an observable operand.
+ // - An explicit use of the Phi instruction.
+ // - An implicit use of the Phi instruction.
+ bool isUsed = false;
+ for (size_t idx = 0; !isUsed && idx < worklist.length(); idx++) {
+ phi = worklist[idx];
+
+ if (mir->shouldCancel("FlagPhiInputsAsHavingRemovedUses inner loop 1"))
+ return false;
+
+ if (phi->isUseRemoved() || phi->isImplicitlyUsed()) {
+ // The phi is implicitly used.
+ isUsed = true;
+ break;
+ }
+
+ MUseIterator usesEnd(phi->usesEnd());
+ for (MUseIterator use(phi->usesBegin()); use != usesEnd; use++) {
+ MNode* consumer = (*use)->consumer();
+
+ if (mir->shouldCancel("FlagPhiInputsAsHavingRemovedUses inner loop 2"))
+ return false;
+
+ if (consumer->isResumePoint()) {
+ MResumePoint* rp = consumer->toResumePoint();
+ if (rp->isObservableOperand(*use)) {
+ // The phi is observable via a resume point operand.
+ isUsed = true;
+ break;
+ }
+ continue;
+ }
+
+ MDefinition* cdef = consumer->toDefinition();
+ if (!cdef->isPhi()) {
+ // The phi is explicitly used.
+ isUsed = true;
+ break;
+ }
+
+ phi = cdef->toPhi();
+ if (phi->isInWorklist())
+ continue;
+
+ phi->setInWorklist();
+ if (!worklist.append(phi))
+ return false;
+ }
+
+ // Use a conservative upper bound to avoid iterating too many times
+ // on very large graphs.
+ if (idx >= conservativeUsesLimit) {
+ isUsed = true;
+ break;
+ }
+ }
+
+ if (isUsed)
+ def->setUseRemoved();
+
+ // Remove all the InWorklist flags.
+ while (!worklist.empty()) {
+ phi = worklist.popCopy();
+ phi->setNotInWorklist();
+ }
+ }
+
+ return true;
+}
+
+static bool
+FlagAllOperandsAsHavingRemovedUses(MIRGenerator* mir, MBasicBlock* block)
+{
+ // Flag all instructions operands as having removed uses.
+ MInstructionIterator end = block->end();
+ for (MInstructionIterator it = block->begin(); it != end; it++) {
+ if (mir->shouldCancel("FlagAllOperandsAsHavingRemovedUses loop 1"))
+ return false;
+
+ MInstruction* ins = *it;
+ for (size_t i = 0, e = ins->numOperands(); i < e; i++)
+ ins->getOperand(i)->setUseRemovedUnchecked();
+
+ // Flag observable resume point operands as having removed uses.
+ if (MResumePoint* rp = ins->resumePoint()) {
+ // Note: no need to iterate over the caller's of the resume point as
+ // this is the same as the entry resume point.
+ for (size_t i = 0, e = rp->numOperands(); i < e; i++) {
+ if (mir->shouldCancel("FlagAllOperandsAsHavingRemovedUses inner loop"))
+ return false;
+
+ if (!rp->isObservableOperand(i))
+ continue;
+ rp->getOperand(i)->setUseRemovedUnchecked();
+ }
+ }
+ }
+
+ // Flag observable operands of the entry resume point as having removed uses.
+ MResumePoint* rp = block->entryResumePoint();
+ while (rp) {
+ if (mir->shouldCancel("FlagAllOperandsAsHavingRemovedUses loop 2"))
+ return false;
+
+ for (size_t i = 0, e = rp->numOperands(); i < e; i++) {
+ if (!rp->isObservableOperand(i))
+ continue;
+ rp->getOperand(i)->setUseRemovedUnchecked();
+ }
+ rp = rp->caller();
+ }
+
+ // Flag Phi inputs of the successors has having removed uses.
+ MPhiVector worklist;
+ for (size_t i = 0, e = block->numSuccessors(); i < e; i++) {
+ if (mir->shouldCancel("FlagAllOperandsAsHavingRemovedUses loop 3"))
+ return false;
+
+ if (!FlagPhiInputsAsHavingRemovedUses(mir, block, block->getSuccessor(i), worklist))
+ return false;
+ }
+
+ return true;
+}
+
+static void
+RemoveFromSuccessors(MBasicBlock* block)
+{
+ // Remove this block from its successors.
+ size_t numSucc = block->numSuccessors();
+ while (numSucc--) {
+ MBasicBlock* succ = block->getSuccessor(numSucc);
+ if (succ->isDead())
+ continue;
+ JitSpew(JitSpew_Prune, "Remove block edge %d -> %d.", block->id(), succ->id());
+ succ->removePredecessor(block);
+ }
+}
+
+static void
+ConvertToBailingBlock(TempAllocator& alloc, MBasicBlock* block)
+{
+ // Add a bailout instruction.
+ MBail* bail = MBail::New(alloc, Bailout_FirstExecution);
+ MInstruction* bailPoint = block->safeInsertTop();
+ block->insertBefore(block->safeInsertTop(), bail);
+
+ // Discard all remaining instructions.
+ MInstructionIterator clearStart = block->begin(bailPoint);
+ block->discardAllInstructionsStartingAt(clearStart);
+ if (block->outerResumePoint())
+ block->clearOuterResumePoint();
+
+ // And replace the last instruction by the unreachable control instruction.
+ block->end(MUnreachable::New(alloc));
+}
+
+bool
+jit::PruneUnusedBranches(MIRGenerator* mir, MIRGraph& graph)
+{
+ MOZ_ASSERT(!mir->compilingWasm(), "wasm compilation has no code coverage support.");
+
+ // We do a reverse-post-order traversal, marking basic blocks when the block
+ // have to be converted into bailing blocks, and flagging block as
+ // unreachable if all predecessors are flagged as bailing or unreachable.
+ bool someUnreachable = false;
+ for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
+ if (mir->shouldCancel("Prune unused branches (main loop)"))
+ return false;
+
+ JitSpew(JitSpew_Prune, "Investigate Block %d:", block->id());
+ JitSpewIndent indent(JitSpew_Prune);
+
+ // Do not touch entry basic blocks.
+ if (*block == graph.osrBlock() || *block == graph.entryBlock()) {
+ JitSpew(JitSpew_Prune, "Block %d is an entry point.", block->id());
+ continue;
+ }
+
+ // Compute if all the predecessors of this block are either bailling out
+ // or are already flagged as unreachable.
+ bool isUnreachable = true;
+ bool isLoopHeader = block->isLoopHeader();
+ size_t numPred = block->numPredecessors();
+ size_t i = 0;
+ for (; i < numPred; i++) {
+ if (mir->shouldCancel("Prune unused branches (inner loop 1)"))
+ return false;
+
+ MBasicBlock* pred = block->getPredecessor(i);
+
+ // The backedge is visited after the loop header, but if the loop
+ // header is unreachable, then we can assume that the backedge would
+ // be unreachable too.
+ if (isLoopHeader && pred == block->backedge())
+ continue;
+
+ // Break if any of the predecessor can continue in this block.
+ if (!pred->isMarked() && !pred->unreachable()) {
+ isUnreachable = false;
+ break;
+ }
+ }
+
+ // Compute if the block should bailout, based on the trivial heuristic
+ // which is that if the block never got visited before, then it is
+ // likely to not be visited after.
+ bool shouldBailout =
+ block->getHitState() == MBasicBlock::HitState::Count &&
+ block->getHitCount() == 0;
+
+ // Check if the predecessors got accessed a large number of times in
+ // comparisons of the current block, in order to know if our attempt at
+ // removing this block is not premature.
+ if (!isUnreachable && shouldBailout) {
+ size_t p = numPred;
+ size_t predCount = 0;
+ size_t numSuccessorsOfPreds = 1;
+ bool isLoopExit = false;
+ while (p--) {
+ if (mir->shouldCancel("Prune unused branches (inner loop 2)"))
+ return false;
+
+ MBasicBlock* pred = block->getPredecessor(p);
+ if (pred->getHitState() == MBasicBlock::HitState::Count)
+ predCount += pred->getHitCount();
+ isLoopExit |= pred->isLoopHeader() && pred->backedge() != *block;
+ numSuccessorsOfPreds += pred->numSuccessors() - 1;
+ }
+
+ // Iterate over the approximated set of dominated blocks and count
+ // the number of instructions which are dominated. Note that this
+ // approximation has issues with OSR blocks, but this should not be
+ // a big deal.
+ size_t numDominatedInst = 0;
+ size_t numEffectfulInst = 0;
+ int numInOutEdges = block->numPredecessors();
+ size_t branchSpan = 0;
+ ReversePostorderIterator it(block);
+ do {
+ if (mir->shouldCancel("Prune unused branches (inner loop 3)"))
+ return false;
+
+ // Iterate over dominated blocks, and visit exit blocks as well.
+ numInOutEdges -= it->numPredecessors();
+ if (numInOutEdges < 0)
+ break;
+ numInOutEdges += it->numSuccessors();
+
+ // Collect information about the instructions within the block.
+ for (MDefinitionIterator def(*it); def; def++) {
+ numDominatedInst++;
+ if (def->isEffectful())
+ numEffectfulInst++;
+ }
+
+ it++;
+ branchSpan++;
+ } while(numInOutEdges > 0 && it != graph.rpoEnd());
+
+ // The goal of branch pruning is to remove branches which are
+ // preventing other optimization, while keeping branches which would
+ // be costly if we were to bailout. The following heuristics are
+ // made to prevent bailouts in branches when we estimate that the
+ // confidence is not enough to compensate for the cost of a bailout.
+ //
+ // 1. Confidence for removal varies with the number of hit counts
+ // of the predecessor. The reason being that the likelyhood of
+ // taking this branch is decreasing with the number of hit
+ // counts of the predecessor.
+ //
+ // 2. Confidence for removal varies with the number of dominated
+ // instructions. The reason being that the complexity of the
+ // branch increases with the number of instructions, thus
+ // working against other optimizations.
+ //
+ // 3. Confidence for removal varies with the span of the
+ // branch. The reason being that a branch that spans over a
+ // large set of blocks is likely to remove optimization
+ // opportunity as it prevents instructions from the other
+ // branches to dominate the blocks which are after.
+ //
+ // 4. Confidence for removal varies with the number of effectful
+ // instructions. The reason being that an effectful instruction
+ // can remove optimization opportunities based on Scalar
+ // Replacement, and based on Alias Analysis.
+ //
+ // The following converts various units in some form of arbitrary
+ // score, such that we can compare it to a threshold.
+ size_t score = 0;
+ MOZ_ASSERT(numSuccessorsOfPreds >= 1);
+ score += predCount * JitOptions.branchPruningHitCountFactor / numSuccessorsOfPreds;
+ score += numDominatedInst * JitOptions.branchPruningInstFactor;
+ score += branchSpan * JitOptions.branchPruningBlockSpanFactor;
+ score += numEffectfulInst * JitOptions.branchPruningEffectfulInstFactor;
+ if (score < JitOptions.branchPruningThreshold)
+ shouldBailout = false;
+
+ // If the predecessors do not have enough hit counts, keep the
+ // branch, until we recompile this function later, with more
+ // information.
+ if (predCount / numSuccessorsOfPreds < 50)
+ shouldBailout = false;
+
+ // There is only a single successors to the predecessors, thus the
+ // decision should be taken as part of the previous block
+ // investigation, and this block should be unreachable.
+ if (numSuccessorsOfPreds == 1)
+ shouldBailout = false;
+
+ // If this is the exit block of a loop, then keep this basic
+ // block. This heuristic is useful as a bailout is often much more
+ // costly than a simple exit sequence.
+ if (isLoopExit)
+ shouldBailout = false;
+
+ // Interpreters are often implemented as a table switch within a for
+ // loop. What might happen is that the interpreter heats up in a
+ // subset of instructions, but might need other instructions for the
+ // rest of the evaluation.
+ if (numSuccessorsOfPreds > 8)
+ shouldBailout = false;
+
+ JitSpew(JitSpew_Prune, "info: block %d,"
+ " predCount: %" PRIuSIZE ", domInst: %" PRIuSIZE
+ ", span: %" PRIuSIZE ", effectful: %" PRIuSIZE ", "
+ " isLoopExit: %s, numSuccessorsOfPred: %" PRIuSIZE "."
+ " (score: %" PRIuSIZE ", shouldBailout: %s)",
+ block->id(), predCount, numDominatedInst, branchSpan, numEffectfulInst,
+ isLoopExit ? "true" : "false", numSuccessorsOfPreds,
+ score, shouldBailout ? "true" : "false");
+ }
+
+ // Continue to the next basic block if the current basic block can
+ // remain unchanged.
+ if (!isUnreachable && !shouldBailout)
+ continue;
+
+ someUnreachable = true;
+ if (isUnreachable) {
+ JitSpew(JitSpew_Prune, "Mark block %d as unreachable.", block->id());
+ block->setUnreachable();
+ // If the block is unreachable, then there is no need to convert it
+ // to a bailing block.
+ } else if (shouldBailout) {
+ JitSpew(JitSpew_Prune, "Mark block %d as bailing block.", block->id());
+ block->markUnchecked();
+ }
+
+ // When removing a loop header, we should ensure that its backedge is
+ // removed first, otherwise this triggers an assertion in
+ // removePredecessorsWithoutPhiOperands.
+ if (block->isLoopHeader()) {
+ JitSpew(JitSpew_Prune, "Mark block %d as bailing block. (loop backedge)", block->backedge()->id());
+ block->backedge()->markUnchecked();
+ }
+ }
+
+ // Returns early if nothing changed.
+ if (!someUnreachable)
+ return true;
+
+ JitSpew(JitSpew_Prune, "Convert basic block to bailing blocks, and remove unreachable blocks:");
+ JitSpewIndent indent(JitSpew_Prune);
+
+ // As we are going to remove edges and basic block, we have to mark
+ // instructions which would be needed by baseline if we were to bailout.
+ for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
+ if (mir->shouldCancel("Prune unused branches (marking loop)"))
+ return false;
+
+ MBasicBlock* block = *it++;
+ if (!block->isMarked() && !block->unreachable())
+ continue;
+
+ FlagAllOperandsAsHavingRemovedUses(mir, block);
+ }
+
+ // Remove the blocks in post-order such that consumers are visited before
+ // the predecessors, the only exception being the Phi nodes of loop headers.
+ for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
+ if (mir->shouldCancel("Prune unused branches (removal loop)"))
+ return false;
+
+ MBasicBlock* block = *it++;
+ if (!block->isMarked() && !block->unreachable())
+ continue;
+
+ JitSpew(JitSpew_Prune, "Remove / Replace block %d.", block->id());
+ JitSpewIndent indent(JitSpew_Prune);
+
+ // As we are going to replace/remove the last instruction, we first have
+ // to remove this block from the predecessor list of its successors.
+ RemoveFromSuccessors(block);
+
+ // Convert the current basic block to a bailing block which ends with an
+ // Unreachable control instruction.
+ if (block->isMarked()) {
+ JitSpew(JitSpew_Prune, "Convert Block %d to a bailing block.", block->id());
+ if (!graph.alloc().ensureBallast())
+ return false;
+ ConvertToBailingBlock(graph.alloc(), block);
+ block->unmark();
+ }
+
+ // Remove all instructions.
+ if (block->unreachable()) {
+ JitSpew(JitSpew_Prune, "Remove Block %d.", block->id());
+ JitSpewIndent indent(JitSpew_Prune);
+ graph.removeBlock(block);
+ }
+ }
+
+ return true;
+}
+
+static bool
+SplitCriticalEdgesForBlock(MIRGraph& graph, MBasicBlock* block)
+{
+ if (block->numSuccessors() < 2)
+ return true;
+ for (size_t i = 0; i < block->numSuccessors(); i++) {
+ MBasicBlock* target = block->getSuccessor(i);
+ if (target->numPredecessors() < 2)
+ continue;
+
+ // Create a simple new block which contains a goto and which split the
+ // edge between block and target.
+ MBasicBlock* split = MBasicBlock::NewSplitEdge(graph, block, i, target);
+ if (!split)
+ return false;
+ }
+ return true;
+}
+
+// A critical edge is an edge which is neither its successor's only predecessor
+// nor its predecessor's only successor. Critical edges must be split to
+// prevent copy-insertion and code motion from affecting other edges.
+bool
+jit::SplitCriticalEdges(MIRGraph& graph)
+{
+ for (MBasicBlockIterator iter(graph.begin()); iter != graph.end(); iter++) {
+ MBasicBlock* block = *iter;
+ if (!SplitCriticalEdgesForBlock(graph, block))
+ return false;
+ }
+ return true;
+}
+
+bool
+jit::IsUint32Type(const MDefinition* def)
+{
+ if (def->isBeta())
+ def = def->getOperand(0);
+
+ if (def->type() != MIRType::Int32)
+ return false;
+
+ return def->isUrsh() && def->getOperand(1)->isConstant() &&
+ def->getOperand(1)->toConstant()->type() == MIRType::Int32 &&
+ def->getOperand(1)->toConstant()->toInt32() == 0;
+}
+
+// Return whether a block simply computes the specified constant value.
+static bool
+BlockComputesConstant(MBasicBlock* block, MDefinition* value, bool* constBool)
+{
+ // Look for values with no uses. This is used to eliminate constant
+ // computing blocks in condition statements, and the phi which used to
+ // consume the constant has already been removed.
+ if (value->hasUses())
+ return false;
+
+ if (!value->isConstant() || value->block() != block)
+ return false;
+ if (!block->phisEmpty())
+ return false;
+ for (MInstructionIterator iter = block->begin(); iter != block->end(); ++iter) {
+ if (*iter != value || !iter->isGoto())
+ return false;
+ }
+ return value->toConstant()->valueToBoolean(constBool);
+}
+
+// Find phis that are redudant:
+//
+// 1) phi(a, a)
+// can get replaced by a
+//
+// 2) phi(filtertypeset(a, type1), filtertypeset(a, type1))
+// equals filtertypeset(a, type1)
+//
+// 3) phi(a, filtertypeset(a, type1))
+// equals filtertypeset(a, type1 union type(a))
+// equals filtertypeset(a, type(a))
+// equals a
+//
+// 4) phi(filtertypeset(a, type1), filtertypeset(a, type2))
+// equals filtertypeset(a, type1 union type2)
+//
+// This is the special case. We can only replace this with 'a' iif
+// type(a) == type1 union type2. Since optimizations could have
+// happened based on a more specific phi type.
+static bool
+IsPhiRedudantFilter(MPhi* phi)
+{
+ // Handle (1) and (2)
+ if (phi->operandIfRedundant())
+ return true;
+
+ // Handle (3)
+ bool onlyFilters = false;
+ MDefinition* a = phi->getOperand(0);
+ if (a->isFilterTypeSet()) {
+ a = a->toFilterTypeSet()->input();
+ onlyFilters = true;
+ }
+
+ for (size_t i = 1; i < phi->numOperands(); i++) {
+ MDefinition* operand = phi->getOperand(i);
+ if (operand == a) {
+ onlyFilters = false;
+ continue;
+ }
+ if (operand->isFilterTypeSet() && operand->toFilterTypeSet()->input() == a)
+ continue;
+ return false;
+ }
+ if (!onlyFilters)
+ return true;
+
+ // Handle (4)
+ MOZ_ASSERT(onlyFilters);
+ return EqualTypes(a->type(), a->resultTypeSet(),
+ phi->type(), phi->resultTypeSet());
+}
+
+// Determine whether phiBlock/testBlock simply compute a phi and perform a
+// test on it.
+static bool
+BlockIsSingleTest(MBasicBlock* phiBlock, MBasicBlock* testBlock, MPhi** pphi, MTest** ptest)
+{
+ *pphi = nullptr;
+ *ptest = nullptr;
+
+ if (phiBlock != testBlock) {
+ MOZ_ASSERT(phiBlock->numSuccessors() == 1 && phiBlock->getSuccessor(0) == testBlock);
+ if (!phiBlock->begin()->isGoto())
+ return false;
+ }
+
+ MInstruction* ins = *testBlock->begin();
+ if (!ins->isTest())
+ return false;
+ MTest* test = ins->toTest();
+ if (!test->input()->isPhi())
+ return false;
+ MPhi* phi = test->input()->toPhi();
+ if (phi->block() != phiBlock)
+ return false;
+
+ for (MUseIterator iter = phi->usesBegin(); iter != phi->usesEnd(); ++iter) {
+ MUse* use = *iter;
+ if (use->consumer() == test)
+ continue;
+ if (use->consumer()->isResumePoint()) {
+ MBasicBlock* useBlock = use->consumer()->block();
+ if (useBlock == phiBlock || useBlock == testBlock)
+ continue;
+ }
+ return false;
+ }
+
+ for (MPhiIterator iter = phiBlock->phisBegin(); iter != phiBlock->phisEnd(); ++iter) {
+ if (*iter == phi)
+ continue;
+
+ if (IsPhiRedudantFilter(*iter))
+ continue;
+
+ return false;
+ }
+
+ if (phiBlock != testBlock && !testBlock->phisEmpty())
+ return false;
+
+ *pphi = phi;
+ *ptest = test;
+
+ return true;
+}
+
+// Change block so that it ends in a goto to the specific target block.
+// existingPred is an existing predecessor of the block.
+static void
+UpdateGotoSuccessor(TempAllocator& alloc, MBasicBlock* block, MBasicBlock* target,
+ MBasicBlock* existingPred)
+{
+ MInstruction* ins = block->lastIns();
+ MOZ_ASSERT(ins->isGoto());
+ ins->toGoto()->target()->removePredecessor(block);
+ block->discardLastIns();
+
+ MGoto* newGoto = MGoto::New(alloc, target);
+ block->end(newGoto);
+
+ target->addPredecessorSameInputsAs(block, existingPred);
+}
+
+// Change block so that it ends in a test of the specified value, going to
+// either ifTrue or ifFalse. existingPred is an existing predecessor of ifTrue
+// or ifFalse with the same values incoming to ifTrue/ifFalse as block.
+// existingPred is not required to be a predecessor of ifTrue/ifFalse if block
+// already ends in a test going to that block on a true/false result.
+static void
+UpdateTestSuccessors(TempAllocator& alloc, MBasicBlock* block,
+ MDefinition* value, MBasicBlock* ifTrue, MBasicBlock* ifFalse,
+ MBasicBlock* existingPred)
+{
+ MInstruction* ins = block->lastIns();
+ if (ins->isTest()) {
+ MTest* test = ins->toTest();
+ MOZ_ASSERT(test->input() == value);
+
+ if (ifTrue != test->ifTrue()) {
+ test->ifTrue()->removePredecessor(block);
+ ifTrue->addPredecessorSameInputsAs(block, existingPred);
+ MOZ_ASSERT(test->ifTrue() == test->getSuccessor(0));
+ test->replaceSuccessor(0, ifTrue);
+ }
+
+ if (ifFalse != test->ifFalse()) {
+ test->ifFalse()->removePredecessor(block);
+ ifFalse->addPredecessorSameInputsAs(block, existingPred);
+ MOZ_ASSERT(test->ifFalse() == test->getSuccessor(1));
+ test->replaceSuccessor(1, ifFalse);
+ }
+
+ return;
+ }
+
+ MOZ_ASSERT(ins->isGoto());
+ ins->toGoto()->target()->removePredecessor(block);
+ block->discardLastIns();
+
+ MTest* test = MTest::New(alloc, value, ifTrue, ifFalse);
+ block->end(test);
+
+ ifTrue->addPredecessorSameInputsAs(block, existingPred);
+ ifFalse->addPredecessorSameInputsAs(block, existingPred);
+}
+
+static bool
+MaybeFoldConditionBlock(MIRGraph& graph, MBasicBlock* initialBlock)
+{
+ // Optimize the MIR graph to improve the code generated for conditional
+ // operations. A test like 'if (a ? b : c)' normally requires four blocks,
+ // with a phi for the intermediate value. This can be improved to use three
+ // blocks with no phi value, and if either b or c is constant,
+ // e.g. 'if (a ? b : 0)', then the block associated with that constant
+ // can be eliminated.
+
+ /*
+ * Look for a diamond pattern:
+ *
+ * initialBlock
+ * / \
+ * trueBranch falseBranch
+ * \ /
+ * phiBlock
+ * |
+ * testBlock
+ *
+ * Where phiBlock contains a single phi combining values pushed onto the
+ * stack by trueBranch and falseBranch, and testBlock contains a test on
+ * that phi. phiBlock and testBlock may be the same block; generated code
+ * will use different blocks if the (?:) op is in an inlined function.
+ */
+
+ MInstruction* ins = initialBlock->lastIns();
+ if (!ins->isTest())
+ return true;
+ MTest* initialTest = ins->toTest();
+
+ MBasicBlock* trueBranch = initialTest->ifTrue();
+ if (trueBranch->numPredecessors() != 1 || trueBranch->numSuccessors() != 1)
+ return true;
+ MBasicBlock* falseBranch = initialTest->ifFalse();
+ if (falseBranch->numPredecessors() != 1 || falseBranch->numSuccessors() != 1)
+ return true;
+ MBasicBlock* phiBlock = trueBranch->getSuccessor(0);
+ if (phiBlock != falseBranch->getSuccessor(0))
+ return true;
+ if (phiBlock->numPredecessors() != 2)
+ return true;
+
+ if (initialBlock->isLoopBackedge() || trueBranch->isLoopBackedge() || falseBranch->isLoopBackedge())
+ return true;
+
+ MBasicBlock* testBlock = phiBlock;
+ if (testBlock->numSuccessors() == 1) {
+ if (testBlock->isLoopBackedge())
+ return true;
+ testBlock = testBlock->getSuccessor(0);
+ if (testBlock->numPredecessors() != 1)
+ return true;
+ }
+
+ // Make sure the test block does not have any outgoing loop backedges.
+ if (!SplitCriticalEdgesForBlock(graph, testBlock))
+ return false;
+
+ MPhi* phi;
+ MTest* finalTest;
+ if (!BlockIsSingleTest(phiBlock, testBlock, &phi, &finalTest))
+ return true;
+
+ MDefinition* trueResult = phi->getOperand(phiBlock->indexForPredecessor(trueBranch));
+ MDefinition* falseResult = phi->getOperand(phiBlock->indexForPredecessor(falseBranch));
+
+ // OK, we found the desired pattern, now transform the graph.
+
+ // Patch up phis that filter their input.
+ for (MPhiIterator iter = phiBlock->phisBegin(); iter != phiBlock->phisEnd(); ++iter) {
+ if (*iter == phi)
+ continue;
+
+ MOZ_ASSERT(IsPhiRedudantFilter(*iter));
+ MDefinition* redundant = (*iter)->operandIfRedundant();
+
+ if (!redundant) {
+ redundant = (*iter)->getOperand(0);
+ if (redundant->isFilterTypeSet())
+ redundant = redundant->toFilterTypeSet()->input();
+ }
+
+ (*iter)->replaceAllUsesWith(redundant);
+ }
+
+ // Remove the phi from phiBlock.
+ phiBlock->discardPhi(*phiBlock->phisBegin());
+
+ // If either trueBranch or falseBranch just computes a constant for the
+ // test, determine the block that branch will end up jumping to and eliminate
+ // the branch. Otherwise, change the end of the block to a test that jumps
+ // directly to successors of testBlock, rather than to testBlock itself.
+
+ MBasicBlock* trueTarget = trueBranch;
+ bool constBool;
+ if (BlockComputesConstant(trueBranch, trueResult, &constBool)) {
+ trueTarget = constBool ? finalTest->ifTrue() : finalTest->ifFalse();
+ phiBlock->removePredecessor(trueBranch);
+ graph.removeBlock(trueBranch);
+ } else if (initialTest->input() == trueResult) {
+ UpdateGotoSuccessor(graph.alloc(), trueBranch, finalTest->ifTrue(), testBlock);
+ } else {
+ UpdateTestSuccessors(graph.alloc(), trueBranch, trueResult,
+ finalTest->ifTrue(), finalTest->ifFalse(), testBlock);
+ }
+
+ MBasicBlock* falseTarget = falseBranch;
+ if (BlockComputesConstant(falseBranch, falseResult, &constBool)) {
+ falseTarget = constBool ? finalTest->ifTrue() : finalTest->ifFalse();
+ phiBlock->removePredecessor(falseBranch);
+ graph.removeBlock(falseBranch);
+ } else if (initialTest->input() == falseResult) {
+ UpdateGotoSuccessor(graph.alloc(), falseBranch, finalTest->ifFalse(), testBlock);
+ } else {
+ UpdateTestSuccessors(graph.alloc(), falseBranch, falseResult,
+ finalTest->ifTrue(), finalTest->ifFalse(), testBlock);
+ }
+
+ // Short circuit the initial test to skip any constant branch eliminated above.
+ UpdateTestSuccessors(graph.alloc(), initialBlock, initialTest->input(),
+ trueTarget, falseTarget, testBlock);
+
+ // Remove phiBlock, if different from testBlock.
+ if (phiBlock != testBlock) {
+ testBlock->removePredecessor(phiBlock);
+ graph.removeBlock(phiBlock);
+ }
+
+ // Remove testBlock itself.
+ finalTest->ifTrue()->removePredecessor(testBlock);
+ finalTest->ifFalse()->removePredecessor(testBlock);
+ graph.removeBlock(testBlock);
+
+ return true;
+}
+
+bool
+jit::FoldTests(MIRGraph& graph)
+{
+ for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
+ if (!MaybeFoldConditionBlock(graph, *block))
+ return false;
+ }
+ return true;
+}
+
+static void
+EliminateTriviallyDeadResumePointOperands(MIRGraph& graph, MResumePoint* rp)
+{
+ // If we will pop the top of the stack immediately after resuming,
+ // then don't preserve the top value in the resume point.
+ if (rp->mode() != MResumePoint::ResumeAt || *rp->pc() != JSOP_POP)
+ return;
+
+ size_t top = rp->stackDepth() - 1;
+ MOZ_ASSERT(!rp->isObservableOperand(top));
+
+ MDefinition* def = rp->getOperand(top);
+ if (def->isConstant())
+ return;
+
+ MConstant* constant = rp->block()->optimizedOutConstant(graph.alloc());
+ rp->replaceOperand(top, constant);
+}
+
+// Operands to a resume point which are dead at the point of the resume can be
+// replaced with a magic value. This analysis supports limited detection of
+// dead operands, pruning those which are defined in the resume point's basic
+// block and have no uses outside the block or at points later than the resume
+// point.
+//
+// This is intended to ensure that extra resume points within a basic block
+// will not artificially extend the lifetimes of any SSA values. This could
+// otherwise occur if the new resume point captured a value which is created
+// between the old and new resume point and is dead at the new resume point.
+bool
+jit::EliminateDeadResumePointOperands(MIRGenerator* mir, MIRGraph& graph)
+{
+ // If we are compiling try blocks, locals and arguments may be observable
+ // from catch or finally blocks (which Ion does not compile). For now just
+ // disable the pass in this case.
+ if (graph.hasTryBlock())
+ return true;
+
+ for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
+ if (mir->shouldCancel("Eliminate Dead Resume Point Operands (main loop)"))
+ return false;
+
+ if (MResumePoint* rp = block->entryResumePoint())
+ EliminateTriviallyDeadResumePointOperands(graph, rp);
+
+ // The logic below can get confused on infinite loops.
+ if (block->isLoopHeader() && block->backedge() == *block)
+ continue;
+
+ for (MInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
+ if (MResumePoint* rp = ins->resumePoint())
+ EliminateTriviallyDeadResumePointOperands(graph, rp);
+
+ // No benefit to replacing constant operands with other constants.
+ if (ins->isConstant())
+ continue;
+
+ // Scanning uses does not give us sufficient information to tell
+ // where instructions that are involved in box/unbox operations or
+ // parameter passing might be live. Rewriting uses of these terms
+ // in resume points may affect the interpreter's behavior. Rather
+ // than doing a more sophisticated analysis, just ignore these.
+ if (ins->isUnbox() || ins->isParameter() || ins->isTypeBarrier() ||
+ ins->isComputeThis() || ins->isFilterTypeSet())
+ {
+ continue;
+ }
+
+ // Early intermediate values captured by resume points, such as
+ // TypedObject, ArrayState and its allocation, may be legitimately
+ // dead in Ion code, but are still needed if we bail out. They can
+ // recover on bailout.
+ if (ins->isNewDerivedTypedObject() || ins->isRecoveredOnBailout()) {
+ MOZ_ASSERT(ins->canRecoverOnBailout());
+ continue;
+ }
+
+ // If the instruction's behavior has been constant folded into a
+ // separate instruction, we can't determine precisely where the
+ // instruction becomes dead and can't eliminate its uses.
+ if (ins->isImplicitlyUsed() || ins->isUseRemoved())
+ continue;
+
+ // Check if this instruction's result is only used within the
+ // current block, and keep track of its last use in a definition
+ // (not resume point). This requires the instructions in the block
+ // to be numbered, ensured by running this immediately after alias
+ // analysis.
+ uint32_t maxDefinition = 0;
+ for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd(); uses++) {
+ MNode* consumer = uses->consumer();
+ if (consumer->isResumePoint()) {
+ // If the instruction's is captured by one of the resume point, then
+ // it might be observed indirectly while the frame is live on the
+ // stack, so it has to be computed.
+ MResumePoint* resume = consumer->toResumePoint();
+ if (resume->isObservableOperand(*uses)) {
+ maxDefinition = UINT32_MAX;
+ break;
+ }
+ continue;
+ }
+
+ MDefinition* def = consumer->toDefinition();
+ if (def->block() != *block || def->isBox() || def->isPhi()) {
+ maxDefinition = UINT32_MAX;
+ break;
+ }
+ maxDefinition = Max(maxDefinition, def->id());
+ }
+ if (maxDefinition == UINT32_MAX)
+ continue;
+
+ // Walk the uses a second time, removing any in resume points after
+ // the last use in a definition.
+ for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd(); ) {
+ MUse* use = *uses++;
+ if (use->consumer()->isDefinition())
+ continue;
+ MResumePoint* mrp = use->consumer()->toResumePoint();
+ if (mrp->block() != *block ||
+ !mrp->instruction() ||
+ mrp->instruction() == *ins ||
+ mrp->instruction()->id() <= maxDefinition)
+ {
+ continue;
+ }
+
+ if (!graph.alloc().ensureBallast())
+ return false;
+
+ // Store an optimized out magic value in place of all dead
+ // resume point operands. Making any such substitution can in
+ // general alter the interpreter's behavior, even though the
+ // code is dead, as the interpreter will still execute opcodes
+ // whose effects cannot be observed. If the magic value value
+ // were to flow to, say, a dead property access the
+ // interpreter could throw an exception; we avoid this problem
+ // by removing dead operands before removing dead code.
+ MConstant* constant = MConstant::New(graph.alloc(), MagicValue(JS_OPTIMIZED_OUT));
+ block->insertBefore(*(block->begin()), constant);
+ use->replaceProducer(constant);
+ }
+ }
+ }
+
+ return true;
+}
+
+// Test whether |def| would be needed if it had no uses.
+bool
+js::jit::DeadIfUnused(const MDefinition* def)
+{
+ return !def->isEffectful() &&
+ (!def->isGuard() || def->block() == def->block()->graph().osrBlock()) &&
+ !def->isGuardRangeBailouts() &&
+ !def->isControlInstruction() &&
+ (!def->isInstruction() || !def->toInstruction()->resumePoint());
+}
+
+// Test whether |def| may be safely discarded, due to being dead or due to being
+// located in a basic block which has itself been marked for discarding.
+bool
+js::jit::IsDiscardable(const MDefinition* def)
+{
+ return !def->hasUses() && (DeadIfUnused(def) || def->block()->isMarked());
+}
+
+// Instructions are useless if they are unused and have no side effects.
+// This pass eliminates useless instructions.
+// The graph itself is unchanged.
+bool
+jit::EliminateDeadCode(MIRGenerator* mir, MIRGraph& graph)
+{
+ // Traverse in postorder so that we hit uses before definitions.
+ // Traverse instruction list backwards for the same reason.
+ for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
+ if (mir->shouldCancel("Eliminate Dead Code (main loop)"))
+ return false;
+
+ // Remove unused instructions.
+ for (MInstructionReverseIterator iter = block->rbegin(); iter != block->rend(); ) {
+ MInstruction* inst = *iter++;
+ if (js::jit::IsDiscardable(inst))
+ {
+ block->discard(inst);
+ }
+ }
+ }
+
+ return true;
+}
+
+static inline bool
+IsPhiObservable(MPhi* phi, Observability observe)
+{
+ // If the phi has uses which are not reflected in SSA, then behavior in the
+ // interpreter may be affected by removing the phi.
+ if (phi->isImplicitlyUsed() || phi->isUseRemoved())
+ return true;
+
+ // Check for uses of this phi node outside of other phi nodes.
+ // Note that, initially, we skip reading resume points, which we
+ // don't count as actual uses. If the only uses are resume points,
+ // then the SSA name is never consumed by the program. However,
+ // after optimizations have been performed, it's possible that the
+ // actual uses in the program have been (incorrectly) optimized
+ // away, so we must be more conservative and consider resume
+ // points as well.
+ for (MUseIterator iter(phi->usesBegin()); iter != phi->usesEnd(); iter++) {
+ MNode* consumer = iter->consumer();
+ if (consumer->isResumePoint()) {
+ MResumePoint* resume = consumer->toResumePoint();
+ if (observe == ConservativeObservability)
+ return true;
+ if (resume->isObservableOperand(*iter))
+ return true;
+ } else {
+ MDefinition* def = consumer->toDefinition();
+ if (!def->isPhi())
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// Handles cases like:
+// x is phi(a, x) --> a
+// x is phi(a, a) --> a
+static inline MDefinition*
+IsPhiRedundant(MPhi* phi)
+{
+ MDefinition* first = phi->operandIfRedundant();
+ if (first == nullptr)
+ return nullptr;
+
+ // Propagate the ImplicitlyUsed flag if |phi| is replaced with another phi.
+ if (phi->isImplicitlyUsed())
+ first->setImplicitlyUsedUnchecked();
+
+ return first;
+}
+
+bool
+jit::EliminatePhis(MIRGenerator* mir, MIRGraph& graph,
+ Observability observe)
+{
+ // Eliminates redundant or unobservable phis from the graph. A
+ // redundant phi is something like b = phi(a, a) or b = phi(a, b),
+ // both of which can be replaced with a. An unobservable phi is
+ // one that whose value is never used in the program.
+ //
+ // Note that we must be careful not to eliminate phis representing
+ // values that the interpreter will require later. When the graph
+ // is first constructed, we can be more aggressive, because there
+ // is a greater correspondence between the CFG and the bytecode.
+ // After optimizations such as GVN have been performed, however,
+ // the bytecode and CFG may not correspond as closely to one
+ // another. In that case, we must be more conservative. The flag
+ // |conservativeObservability| is used to indicate that eliminate
+ // phis is being run after some optimizations have been performed,
+ // and thus we should use more conservative rules about
+ // observability. The particular danger is that we can optimize
+ // away uses of a phi because we think they are not executable,
+ // but the foundation for that assumption is false TI information
+ // that will eventually be invalidated. Therefore, if
+ // |conservativeObservability| is set, we will consider any use
+ // from a resume point to be observable. Otherwise, we demand a
+ // use from an actual instruction.
+
+ Vector<MPhi*, 16, SystemAllocPolicy> worklist;
+
+ // Add all observable phis to a worklist. We use the "in worklist" bit to
+ // mean "this phi is live".
+ for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
+ MPhiIterator iter = block->phisBegin();
+ while (iter != block->phisEnd()) {
+ MPhi* phi = *iter++;
+
+ if (mir->shouldCancel("Eliminate Phis (populate loop)"))
+ return false;
+
+ // Flag all as unused, only observable phis would be marked as used
+ // when processed by the work list.
+ phi->setUnused();
+
+ // If the phi is redundant, remove it here.
+ if (MDefinition* redundant = IsPhiRedundant(phi)) {
+ phi->justReplaceAllUsesWith(redundant);
+ block->discardPhi(phi);
+ continue;
+ }
+
+ // Enqueue observable Phis.
+ if (IsPhiObservable(phi, observe)) {
+ phi->setInWorklist();
+ if (!worklist.append(phi))
+ return false;
+ }
+ }
+ }
+
+ // Iteratively mark all phis reachable from live phis.
+ while (!worklist.empty()) {
+ if (mir->shouldCancel("Eliminate Phis (worklist)"))
+ return false;
+
+ MPhi* phi = worklist.popCopy();
+ MOZ_ASSERT(phi->isUnused());
+ phi->setNotInWorklist();
+
+ // The removal of Phis can produce newly redundant phis.
+ if (MDefinition* redundant = IsPhiRedundant(phi)) {
+ // Add to the worklist the used phis which are impacted.
+ for (MUseDefIterator it(phi); it; it++) {
+ if (it.def()->isPhi()) {
+ MPhi* use = it.def()->toPhi();
+ if (!use->isUnused()) {
+ use->setUnusedUnchecked();
+ use->setInWorklist();
+ if (!worklist.append(use))
+ return false;
+ }
+ }
+ }
+ phi->justReplaceAllUsesWith(redundant);
+ } else {
+ // Otherwise flag them as used.
+ phi->setNotUnused();
+ }
+
+ // The current phi is/was used, so all its operands are used.
+ for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
+ MDefinition* in = phi->getOperand(i);
+ if (!in->isPhi() || !in->isUnused() || in->isInWorklist())
+ continue;
+ in->setInWorklist();
+ if (!worklist.append(in->toPhi()))
+ return false;
+ }
+ }
+
+ // Sweep dead phis.
+ for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
+ MPhiIterator iter = block->phisBegin();
+ while (iter != block->phisEnd()) {
+ MPhi* phi = *iter++;
+ if (phi->isUnused()) {
+ if (!phi->optimizeOutAllUses(graph.alloc()))
+ return false;
+ block->discardPhi(phi);
+ }
+ }
+ }
+
+ return true;
+}
+
+namespace {
+
+// The type analysis algorithm inserts conversions and box/unbox instructions
+// to make the IR graph well-typed for future passes.
+//
+// Phi adjustment: If a phi's inputs are all the same type, the phi is
+// specialized to return that type.
+//
+// Input adjustment: Each input is asked to apply conversion operations to its
+// inputs. This may include Box, Unbox, or other instruction-specific type
+// conversion operations.
+//
+class TypeAnalyzer
+{
+ MIRGenerator* mir;
+ MIRGraph& graph;
+ Vector<MPhi*, 0, SystemAllocPolicy> phiWorklist_;
+
+ TempAllocator& alloc() const {
+ return graph.alloc();
+ }
+
+ bool addPhiToWorklist(MPhi* phi) {
+ if (phi->isInWorklist())
+ return true;
+ if (!phiWorklist_.append(phi))
+ return false;
+ phi->setInWorklist();
+ return true;
+ }
+ MPhi* popPhi() {
+ MPhi* phi = phiWorklist_.popCopy();
+ phi->setNotInWorklist();
+ return phi;
+ }
+
+ bool respecialize(MPhi* phi, MIRType type);
+ bool propagateSpecialization(MPhi* phi);
+ bool specializePhis();
+ void replaceRedundantPhi(MPhi* phi);
+ bool adjustPhiInputs(MPhi* phi);
+ bool adjustInputs(MDefinition* def);
+ bool insertConversions();
+
+ bool checkFloatCoherency();
+ bool graphContainsFloat32();
+ bool markPhiConsumers();
+ bool markPhiProducers();
+ bool specializeValidFloatOps();
+ bool tryEmitFloatOperations();
+
+ public:
+ TypeAnalyzer(MIRGenerator* mir, MIRGraph& graph)
+ : mir(mir), graph(graph)
+ { }
+
+ bool analyze();
+};
+
+} /* anonymous namespace */
+
+// Try to specialize this phi based on its non-cyclic inputs.
+static MIRType
+GuessPhiType(MPhi* phi, bool* hasInputsWithEmptyTypes)
+{
+#ifdef DEBUG
+ // Check that different magic constants aren't flowing together. Ignore
+ // JS_OPTIMIZED_OUT, since an operand could be legitimately optimized
+ // away.
+ MIRType magicType = MIRType::None;
+ for (size_t i = 0; i < phi->numOperands(); i++) {
+ MDefinition* in = phi->getOperand(i);
+ if (in->type() == MIRType::MagicOptimizedArguments ||
+ in->type() == MIRType::MagicHole ||
+ in->type() == MIRType::MagicIsConstructing)
+ {
+ if (magicType == MIRType::None)
+ magicType = in->type();
+ MOZ_ASSERT(magicType == in->type());
+ }
+ }
+#endif
+
+ *hasInputsWithEmptyTypes = false;
+
+ MIRType type = MIRType::None;
+ bool convertibleToFloat32 = false;
+ bool hasPhiInputs = false;
+ for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
+ MDefinition* in = phi->getOperand(i);
+ if (in->isPhi()) {
+ hasPhiInputs = true;
+ if (!in->toPhi()->triedToSpecialize())
+ continue;
+ if (in->type() == MIRType::None) {
+ // The operand is a phi we tried to specialize, but we were
+ // unable to guess its type. propagateSpecialization will
+ // propagate the type to this phi when it becomes known.
+ continue;
+ }
+ }
+
+ // Ignore operands which we've never observed.
+ if (in->resultTypeSet() && in->resultTypeSet()->empty()) {
+ *hasInputsWithEmptyTypes = true;
+ continue;
+ }
+
+ if (type == MIRType::None) {
+ type = in->type();
+ if (in->canProduceFloat32())
+ convertibleToFloat32 = true;
+ continue;
+ }
+ if (type != in->type()) {
+ if (convertibleToFloat32 && in->type() == MIRType::Float32) {
+ // If we only saw definitions that can be converted into Float32 before and
+ // encounter a Float32 value, promote previous values to Float32
+ type = MIRType::Float32;
+ } else if (IsTypeRepresentableAsDouble(type) &&
+ IsTypeRepresentableAsDouble(in->type()))
+ {
+ // Specialize phis with int32 and double operands as double.
+ type = MIRType::Double;
+ convertibleToFloat32 &= in->canProduceFloat32();
+ } else {
+ return MIRType::Value;
+ }
+ }
+ }
+
+ if (type == MIRType::None && !hasPhiInputs) {
+ // All inputs are non-phis with empty typesets. Use MIRType::Value
+ // in this case, as it's impossible to get better type information.
+ MOZ_ASSERT(*hasInputsWithEmptyTypes);
+ type = MIRType::Value;
+ }
+
+ return type;
+}
+
+bool
+TypeAnalyzer::respecialize(MPhi* phi, MIRType type)
+{
+ if (phi->type() == type)
+ return true;
+ phi->specialize(type);
+ return addPhiToWorklist(phi);
+}
+
+bool
+TypeAnalyzer::propagateSpecialization(MPhi* phi)
+{
+ MOZ_ASSERT(phi->type() != MIRType::None);
+
+ // Verify that this specialization matches any phis depending on it.
+ for (MUseDefIterator iter(phi); iter; iter++) {
+ if (!iter.def()->isPhi())
+ continue;
+ MPhi* use = iter.def()->toPhi();
+ if (!use->triedToSpecialize())
+ continue;
+ if (use->type() == MIRType::None) {
+ // We tried to specialize this phi, but were unable to guess its
+ // type. Now that we know the type of one of its operands, we can
+ // specialize it.
+ if (!respecialize(use, phi->type()))
+ return false;
+ continue;
+ }
+ if (use->type() != phi->type()) {
+ // Specialize phis with int32 that can be converted to float and float operands as floats.
+ if ((use->type() == MIRType::Int32 && use->canProduceFloat32() && phi->type() == MIRType::Float32) ||
+ (phi->type() == MIRType::Int32 && phi->canProduceFloat32() && use->type() == MIRType::Float32))
+ {
+ if (!respecialize(use, MIRType::Float32))
+ return false;
+ continue;
+ }
+
+ // Specialize phis with int32 and double operands as double.
+ if (IsTypeRepresentableAsDouble(use->type()) &&
+ IsTypeRepresentableAsDouble(phi->type()))
+ {
+ if (!respecialize(use, MIRType::Double))
+ return false;
+ continue;
+ }
+
+ // This phi in our use chain can now no longer be specialized.
+ if (!respecialize(use, MIRType::Value))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool
+TypeAnalyzer::specializePhis()
+{
+ Vector<MPhi*, 0, SystemAllocPolicy> phisWithEmptyInputTypes;
+
+ for (PostorderIterator block(graph.poBegin()); block != graph.poEnd(); block++) {
+ if (mir->shouldCancel("Specialize Phis (main loop)"))
+ return false;
+
+ for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
+ if (mir->shouldCancel("Specialize Phis (inner loop)"))
+ return false;
+
+ bool hasInputsWithEmptyTypes;
+ MIRType type = GuessPhiType(*phi, &hasInputsWithEmptyTypes);
+ phi->specialize(type);
+ if (type == MIRType::None) {
+ // We tried to guess the type but failed because all operands are
+ // phis we still have to visit. Set the triedToSpecialize flag but
+ // don't propagate the type to other phis, propagateSpecialization
+ // will do that once we know the type of one of the operands.
+
+ // Edge case: when this phi has a non-phi input with an empty
+ // typeset, it's possible for two phis to have a cyclic
+ // dependency and they will both have MIRType::None. Specialize
+ // such phis to MIRType::Value later on.
+ if (hasInputsWithEmptyTypes && !phisWithEmptyInputTypes.append(*phi))
+ return false;
+ continue;
+ }
+ if (!propagateSpecialization(*phi))
+ return false;
+ }
+ }
+
+ do {
+ while (!phiWorklist_.empty()) {
+ if (mir->shouldCancel("Specialize Phis (worklist)"))
+ return false;
+
+ MPhi* phi = popPhi();
+ if (!propagateSpecialization(phi))
+ return false;
+ }
+
+ // When two phis have a cyclic dependency and inputs that have an empty
+ // typeset (which are ignored by GuessPhiType), we may still have to
+ // specialize these to MIRType::Value.
+ while (!phisWithEmptyInputTypes.empty()) {
+ if (mir->shouldCancel("Specialize Phis (phisWithEmptyInputTypes)"))
+ return false;
+
+ MPhi* phi = phisWithEmptyInputTypes.popCopy();
+ if (phi->type() == MIRType::None) {
+ phi->specialize(MIRType::Value);
+ if (!propagateSpecialization(phi))
+ return false;
+ }
+ }
+ } while (!phiWorklist_.empty());
+
+ return true;
+}
+
+bool
+TypeAnalyzer::adjustPhiInputs(MPhi* phi)
+{
+ MIRType phiType = phi->type();
+ MOZ_ASSERT(phiType != MIRType::None);
+
+ // If we specialized a type that's not Value, there are 3 cases:
+ // 1. Every input is of that type.
+ // 2. Every observed input is of that type (i.e., some inputs haven't been executed yet).
+ // 3. Inputs were doubles and int32s, and was specialized to double.
+ if (phiType != MIRType::Value) {
+ for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
+ MDefinition* in = phi->getOperand(i);
+ if (in->type() == phiType)
+ continue;
+
+ if (!alloc().ensureBallast())
+ return false;
+
+ if (in->isBox() && in->toBox()->input()->type() == phiType) {
+ phi->replaceOperand(i, in->toBox()->input());
+ } else {
+ MInstruction* replacement;
+
+ if (phiType == MIRType::Double && IsFloatType(in->type())) {
+ // Convert int32 operands to double.
+ replacement = MToDouble::New(alloc(), in);
+ } else if (phiType == MIRType::Float32) {
+ if (in->type() == MIRType::Int32 || in->type() == MIRType::Double) {
+ replacement = MToFloat32::New(alloc(), in);
+ } else {
+ // See comment below
+ if (in->type() != MIRType::Value) {
+ MBox* box = MBox::New(alloc(), in);
+ in->block()->insertBefore(in->block()->lastIns(), box);
+ in = box;
+ }
+
+ MUnbox* unbox = MUnbox::New(alloc(), in, MIRType::Double, MUnbox::Fallible);
+ in->block()->insertBefore(in->block()->lastIns(), unbox);
+ replacement = MToFloat32::New(alloc(), in);
+ }
+ } else {
+ // If we know this branch will fail to convert to phiType,
+ // insert a box that'll immediately fail in the fallible unbox
+ // below.
+ if (in->type() != MIRType::Value) {
+ MBox* box = MBox::New(alloc(), in);
+ in->block()->insertBefore(in->block()->lastIns(), box);
+ in = box;
+ }
+
+ // Be optimistic and insert unboxes when the operand is a
+ // value.
+ replacement = MUnbox::New(alloc(), in, phiType, MUnbox::Fallible);
+ }
+
+ in->block()->insertBefore(in->block()->lastIns(), replacement);
+ phi->replaceOperand(i, replacement);
+ }
+ }
+
+ return true;
+ }
+
+ // Box every typed input.
+ for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
+ MDefinition* in = phi->getOperand(i);
+ if (in->type() == MIRType::Value)
+ continue;
+
+ // The input is being explicitly unboxed, so sneak past and grab
+ // the original box.
+ if (in->isUnbox() && phi->typeIncludes(in->toUnbox()->input()))
+ in = in->toUnbox()->input();
+
+ if (in->type() != MIRType::Value) {
+ if (!alloc().ensureBallast())
+ return false;
+
+ MBasicBlock* pred = phi->block()->getPredecessor(i);
+ in = AlwaysBoxAt(alloc(), pred->lastIns(), in);
+ }
+
+ phi->replaceOperand(i, in);
+ }
+
+ return true;
+}
+
+bool
+TypeAnalyzer::adjustInputs(MDefinition* def)
+{
+ // Definitions such as MPhi have no type policy.
+ if (!def->isInstruction())
+ return true;
+
+ MInstruction* ins = def->toInstruction();
+ TypePolicy* policy = ins->typePolicy();
+ if (policy && !policy->adjustInputs(alloc(), ins))
+ return false;
+ return true;
+}
+
+void
+TypeAnalyzer::replaceRedundantPhi(MPhi* phi)
+{
+ MBasicBlock* block = phi->block();
+ js::Value v;
+ switch (phi->type()) {
+ case MIRType::Undefined:
+ v = UndefinedValue();
+ break;
+ case MIRType::Null:
+ v = NullValue();
+ break;
+ case MIRType::MagicOptimizedArguments:
+ v = MagicValue(JS_OPTIMIZED_ARGUMENTS);
+ break;
+ case MIRType::MagicOptimizedOut:
+ v = MagicValue(JS_OPTIMIZED_OUT);
+ break;
+ case MIRType::MagicUninitializedLexical:
+ v = MagicValue(JS_UNINITIALIZED_LEXICAL);
+ break;
+ default:
+ MOZ_CRASH("unexpected type");
+ }
+ MConstant* c = MConstant::New(alloc(), v);
+ // The instruction pass will insert the box
+ block->insertBefore(*(block->begin()), c);
+ phi->justReplaceAllUsesWith(c);
+}
+
+bool
+TypeAnalyzer::insertConversions()
+{
+ // Instructions are processed in reverse postorder: all uses are defs are
+ // seen before uses. This ensures that output adjustment (which may rewrite
+ // inputs of uses) does not conflict with input adjustment.
+ for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
+ if (mir->shouldCancel("Insert Conversions"))
+ return false;
+
+ for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ) {
+ MPhi* phi = *iter++;
+ if (phi->type() == MIRType::Undefined ||
+ phi->type() == MIRType::Null ||
+ phi->type() == MIRType::MagicOptimizedArguments ||
+ phi->type() == MIRType::MagicOptimizedOut ||
+ phi->type() == MIRType::MagicUninitializedLexical)
+ {
+ replaceRedundantPhi(phi);
+ block->discardPhi(phi);
+ } else {
+ if (!adjustPhiInputs(phi))
+ return false;
+ }
+ }
+
+ // AdjustInputs can add/remove/mutate instructions before and after the
+ // current instruction. Only increment the iterator after it is finished.
+ for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) {
+ if (!alloc().ensureBallast())
+ return false;
+
+ if (!adjustInputs(*iter))
+ return false;
+ }
+ }
+ return true;
+}
+
+// This function tries to emit Float32 specialized operations whenever it's possible.
+// MIR nodes are flagged as:
+// - Producers, when they can create Float32 that might need to be coerced into a Double.
+// Loads in Float32 arrays and conversions to Float32 are producers.
+// - Consumers, when they can have Float32 as inputs and validate a legal use of a Float32.
+// Stores in Float32 arrays and conversions to Float32 are consumers.
+// - Float32 commutative, when using the Float32 instruction instead of the Double instruction
+// does not result in a compound loss of precision. This is the case for +, -, /, * with 2
+// operands, for instance. However, an addition with 3 operands is not commutative anymore,
+// so an intermediate coercion is needed.
+// Except for phis, all these flags are known after Ion building, so they cannot change during
+// the process.
+//
+// The idea behind the algorithm is easy: whenever we can prove that a commutative operation
+// has only producers as inputs and consumers as uses, we can specialize the operation as a
+// float32 operation. Otherwise, we have to convert all float32 inputs to doubles. Even
+// if a lot of conversions are produced, GVN will take care of eliminating the redundant ones.
+//
+// Phis have a special status. Phis need to be flagged as producers or consumers as they can
+// be inputs or outputs of commutative instructions. Fortunately, producers and consumers
+// properties are such that we can deduce the property using all non phis inputs first (which form
+// an initial phi graph) and then propagate all properties from one phi to another using a
+// fixed point algorithm. The algorithm is ensured to terminate as each iteration has less or as
+// many flagged phis as the previous iteration (so the worst steady state case is all phis being
+// flagged as false).
+//
+// In a nutshell, the algorithm applies three passes:
+// 1 - Determine which phis are consumers. Each phi gets an initial value by making a global AND on
+// all its non-phi inputs. Then each phi propagates its value to other phis. If after propagation,
+// the flag value changed, we have to reapply the algorithm on all phi operands, as a phi is a
+// consumer if all of its uses are consumers.
+// 2 - Determine which phis are producers. It's the same algorithm, except that we have to reapply
+// the algorithm on all phi uses, as a phi is a producer if all of its operands are producers.
+// 3 - Go through all commutative operations and ensure their inputs are all producers and their
+// uses are all consumers.
+bool
+TypeAnalyzer::markPhiConsumers()
+{
+ MOZ_ASSERT(phiWorklist_.empty());
+
+ // Iterate in postorder so worklist is initialized to RPO.
+ for (PostorderIterator block(graph.poBegin()); block != graph.poEnd(); ++block) {
+ if (mir->shouldCancel("Ensure Float32 commutativity - Consumer Phis - Initial state"))
+ return false;
+
+ for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) {
+ MOZ_ASSERT(!phi->isInWorklist());
+ bool canConsumeFloat32 = true;
+ for (MUseDefIterator use(*phi); canConsumeFloat32 && use; use++) {
+ MDefinition* usedef = use.def();
+ canConsumeFloat32 &= usedef->isPhi() || usedef->canConsumeFloat32(use.use());
+ }
+ phi->setCanConsumeFloat32(canConsumeFloat32);
+ if (canConsumeFloat32 && !addPhiToWorklist(*phi))
+ return false;
+ }
+ }
+
+ while (!phiWorklist_.empty()) {
+ if (mir->shouldCancel("Ensure Float32 commutativity - Consumer Phis - Fixed point"))
+ return false;
+
+ MPhi* phi = popPhi();
+ MOZ_ASSERT(phi->canConsumeFloat32(nullptr /* unused */));
+
+ bool validConsumer = true;
+ for (MUseDefIterator use(phi); use; use++) {
+ MDefinition* def = use.def();
+ if (def->isPhi() && !def->canConsumeFloat32(use.use())) {
+ validConsumer = false;
+ break;
+ }
+ }
+
+ if (validConsumer)
+ continue;
+
+ // Propagate invalidated phis
+ phi->setCanConsumeFloat32(false);
+ for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
+ MDefinition* input = phi->getOperand(i);
+ if (input->isPhi() && !input->isInWorklist() && input->canConsumeFloat32(nullptr /* unused */))
+ {
+ if (!addPhiToWorklist(input->toPhi()))
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+bool
+TypeAnalyzer::markPhiProducers()
+{
+ MOZ_ASSERT(phiWorklist_.empty());
+
+ // Iterate in reverse postorder so worklist is initialized to PO.
+ for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
+ if (mir->shouldCancel("Ensure Float32 commutativity - Producer Phis - initial state"))
+ return false;
+
+ for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) {
+ MOZ_ASSERT(!phi->isInWorklist());
+ bool canProduceFloat32 = true;
+ for (size_t i = 0, e = phi->numOperands(); canProduceFloat32 && i < e; ++i) {
+ MDefinition* input = phi->getOperand(i);
+ canProduceFloat32 &= input->isPhi() || input->canProduceFloat32();
+ }
+ phi->setCanProduceFloat32(canProduceFloat32);
+ if (canProduceFloat32 && !addPhiToWorklist(*phi))
+ return false;
+ }
+ }
+
+ while (!phiWorklist_.empty()) {
+ if (mir->shouldCancel("Ensure Float32 commutativity - Producer Phis - Fixed point"))
+ return false;
+
+ MPhi* phi = popPhi();
+ MOZ_ASSERT(phi->canProduceFloat32());
+
+ bool validProducer = true;
+ for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
+ MDefinition* input = phi->getOperand(i);
+ if (input->isPhi() && !input->canProduceFloat32()) {
+ validProducer = false;
+ break;
+ }
+ }
+
+ if (validProducer)
+ continue;
+
+ // Propagate invalidated phis
+ phi->setCanProduceFloat32(false);
+ for (MUseDefIterator use(phi); use; use++) {
+ MDefinition* def = use.def();
+ if (def->isPhi() && !def->isInWorklist() && def->canProduceFloat32())
+ {
+ if (!addPhiToWorklist(def->toPhi()))
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+bool
+TypeAnalyzer::specializeValidFloatOps()
+{
+ for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
+ if (mir->shouldCancel("Ensure Float32 commutativity - Instructions"))
+ return false;
+
+ for (MInstructionIterator ins(block->begin()); ins != block->end(); ++ins) {
+ if (!ins->isFloat32Commutative())
+ continue;
+
+ if (ins->type() == MIRType::Float32)
+ continue;
+
+ if (!alloc().ensureBallast())
+ return false;
+
+ // This call will try to specialize the instruction iff all uses are consumers and
+ // all inputs are producers.
+ ins->trySpecializeFloat32(alloc());
+ }
+ }
+ return true;
+}
+
+bool
+TypeAnalyzer::graphContainsFloat32()
+{
+ for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
+ for (MDefinitionIterator def(*block); def; def++) {
+ if (mir->shouldCancel("Ensure Float32 commutativity - Graph contains Float32"))
+ return false;
+
+ if (def->type() == MIRType::Float32)
+ return true;
+ }
+ }
+ return false;
+}
+
+bool
+TypeAnalyzer::tryEmitFloatOperations()
+{
+ // Asm.js uses the ahead of time type checks to specialize operations, no need to check
+ // them again at this point.
+ if (mir->compilingWasm())
+ return true;
+
+ // Check ahead of time that there is at least one definition typed as Float32, otherwise we
+ // don't need this pass.
+ if (!graphContainsFloat32())
+ return true;
+
+ if (!markPhiConsumers())
+ return false;
+ if (!markPhiProducers())
+ return false;
+ if (!specializeValidFloatOps())
+ return false;
+ return true;
+}
+
+bool
+TypeAnalyzer::checkFloatCoherency()
+{
+#ifdef DEBUG
+ // Asserts that all Float32 instructions are flowing into Float32 consumers or specialized
+ // operations
+ for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
+ if (mir->shouldCancel("Check Float32 coherency"))
+ return false;
+
+ for (MDefinitionIterator def(*block); def; def++) {
+ if (def->type() != MIRType::Float32)
+ continue;
+
+ for (MUseDefIterator use(*def); use; use++) {
+ MDefinition* consumer = use.def();
+ MOZ_ASSERT(consumer->isConsistentFloat32Use(use.use()));
+ }
+ }
+ }
+#endif
+ return true;
+}
+
+bool
+TypeAnalyzer::analyze()
+{
+ if (!tryEmitFloatOperations())
+ return false;
+ if (!specializePhis())
+ return false;
+ if (!insertConversions())
+ return false;
+ if (!checkFloatCoherency())
+ return false;
+ return true;
+}
+
+bool
+jit::ApplyTypeInformation(MIRGenerator* mir, MIRGraph& graph)
+{
+ TypeAnalyzer analyzer(mir, graph);
+
+ if (!analyzer.analyze())
+ return false;
+
+ return true;
+}
+
+// Check if `def` is only the N-th operand of `useDef`.
+static inline size_t
+IsExclusiveNthOperand(MDefinition* useDef, size_t n, MDefinition* def)
+{
+ uint32_t num = useDef->numOperands();
+ if (n >= num || useDef->getOperand(n) != def)
+ return false;
+
+ for (uint32_t i = 0; i < num; i++) {
+ if (i == n)
+ continue;
+ if (useDef->getOperand(i) == def)
+ return false;
+ }
+
+ return true;
+}
+
+static size_t
+IsExclusiveThisArg(MCall* call, MDefinition* def)
+{
+ return IsExclusiveNthOperand(call, MCall::IndexOfThis(), def);
+}
+
+static size_t
+IsExclusiveFirstArg(MCall* call, MDefinition* def)
+{
+ return IsExclusiveNthOperand(call, MCall::IndexOfArgument(0), def);
+}
+
+static bool
+IsRegExpHoistableCall(MCall* call, MDefinition* def)
+{
+ if (call->isConstructing())
+ return false;
+
+ JSAtom* name;
+ if (WrappedFunction* fun = call->getSingleTarget()) {
+ if (!fun->isSelfHostedBuiltin())
+ return false;
+ name = GetSelfHostedFunctionName(fun->rawJSFunction());
+ } else {
+ MDefinition* funDef = call->getFunction();
+ if (funDef->isDebugCheckSelfHosted())
+ funDef = funDef->toDebugCheckSelfHosted()->input();
+ if (funDef->isTypeBarrier())
+ funDef = funDef->toTypeBarrier()->input();
+
+ if (!funDef->isCallGetIntrinsicValue())
+ return false;
+ name = funDef->toCallGetIntrinsicValue()->name();
+ }
+
+ // Hoistable only if the RegExp is the first argument of RegExpBuiltinExec.
+ CompileRuntime* runtime = GetJitContext()->runtime;
+ if (name == runtime->names().RegExpBuiltinExec ||
+ name == runtime->names().UnwrapAndCallRegExpBuiltinExec ||
+ name == runtime->names().RegExpMatcher ||
+ name == runtime->names().RegExpTester ||
+ name == runtime->names().RegExpSearcher)
+ {
+ return IsExclusiveFirstArg(call, def);
+ }
+
+ if (name == runtime->names().RegExp_prototype_Exec)
+ return IsExclusiveThisArg(call, def);
+
+ return false;
+}
+
+static bool
+CanCompareRegExp(MCompare* compare, MDefinition* def)
+{
+ MDefinition* value;
+ if (compare->lhs() == def) {
+ value = compare->rhs();
+ } else {
+ MOZ_ASSERT(compare->rhs() == def);
+ value = compare->lhs();
+ }
+
+ // Comparing two regexp that weren't cloned will give different result
+ // than if they were cloned.
+ if (value->mightBeType(MIRType::Object))
+ return false;
+
+ // Make sure @@toPrimitive is not called which could notice
+ // the difference between a not cloned/cloned regexp.
+
+ JSOp op = compare->jsop();
+ // Strict equality comparison won't invoke @@toPrimitive.
+ if (op == JSOP_STRICTEQ || op == JSOP_STRICTNE)
+ return true;
+
+ if (op != JSOP_EQ && op != JSOP_NE) {
+ // Relational comparison always invoke @@toPrimitive.
+ MOZ_ASSERT(op == JSOP_GT || op == JSOP_GE || op == JSOP_LT || op == JSOP_LE);
+ return false;
+ }
+
+ // Loose equality comparison can invoke @@toPrimitive.
+ if (value->mightBeType(MIRType::Boolean) || value->mightBeType(MIRType::String) ||
+ value->mightBeType(MIRType::Int32) ||
+ value->mightBeType(MIRType::Double) || value->mightBeType(MIRType::Float32) ||
+ value->mightBeType(MIRType::Symbol))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+static inline void
+SetNotInWorklist(MDefinitionVector& worklist)
+{
+ for (size_t i = 0; i < worklist.length(); i++)
+ worklist[i]->setNotInWorklist();
+}
+
+static bool
+IsRegExpHoistable(MIRGenerator* mir, MDefinition* regexp, MDefinitionVector& worklist,
+ bool* hoistable)
+{
+ MOZ_ASSERT(worklist.length() == 0);
+
+ if (!worklist.append(regexp))
+ return false;
+ regexp->setInWorklist();
+
+ for (size_t i = 0; i < worklist.length(); i++) {
+ MDefinition* def = worklist[i];
+ if (mir->shouldCancel("IsRegExpHoistable outer loop"))
+ return false;
+
+ for (MUseIterator use = def->usesBegin(); use != def->usesEnd(); use++) {
+ if (mir->shouldCancel("IsRegExpHoistable inner loop"))
+ return false;
+
+ // Ignore resume points. At this point all uses are listed.
+ // No DCE or GVN or something has happened.
+ if (use->consumer()->isResumePoint())
+ continue;
+
+ MDefinition* useDef = use->consumer()->toDefinition();
+
+ // Step through a few white-listed ops.
+ if (useDef->isPhi() || useDef->isFilterTypeSet() || useDef->isGuardShape()) {
+ if (useDef->isInWorklist())
+ continue;
+
+ if (!worklist.append(useDef))
+ return false;
+ useDef->setInWorklist();
+ continue;
+ }
+
+ // Instructions that doesn't invoke unknown code that may modify
+ // RegExp instance or pass it to elsewhere.
+ if (useDef->isRegExpMatcher() || useDef->isRegExpTester() ||
+ useDef->isRegExpSearcher())
+ {
+ if (IsExclusiveNthOperand(useDef, 0, def))
+ continue;
+ } else if (useDef->isLoadFixedSlot() || useDef->isTypeOf()) {
+ continue;
+ } else if (useDef->isCompare()) {
+ if (CanCompareRegExp(useDef->toCompare(), def))
+ continue;
+ }
+ // Instructions that modifies `lastIndex` property.
+ else if (useDef->isStoreFixedSlot()) {
+ if (IsExclusiveNthOperand(useDef, 0, def)) {
+ MStoreFixedSlot* store = useDef->toStoreFixedSlot();
+ if (store->slot() == RegExpObject::lastIndexSlot())
+ continue;
+ }
+ } else if (useDef->isSetPropertyCache()) {
+ if (IsExclusiveNthOperand(useDef, 0, def)) {
+ MSetPropertyCache* setProp = useDef->toSetPropertyCache();
+ if (setProp->idval()->isConstant()) {
+ Value propIdVal = setProp->idval()->toConstant()->toJSValue();
+ if (propIdVal.isString()) {
+ CompileRuntime* runtime = GetJitContext()->runtime;
+ if (propIdVal.toString() == runtime->names().lastIndex)
+ continue;
+ }
+ }
+ }
+ }
+ // MCall is safe only for some known safe functions.
+ else if (useDef->isCall()) {
+ if (IsRegExpHoistableCall(useDef->toCall(), def))
+ continue;
+ }
+
+ // Everything else is unsafe.
+ SetNotInWorklist(worklist);
+ worklist.clear();
+ *hoistable = false;
+
+ return true;
+ }
+ }
+
+ SetNotInWorklist(worklist);
+ worklist.clear();
+ *hoistable = true;
+ return true;
+}
+
+bool
+jit::MakeMRegExpHoistable(MIRGenerator* mir, MIRGraph& graph)
+{
+ MDefinitionVector worklist(graph.alloc());
+
+ for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
+ if (mir->shouldCancel("MakeMRegExpHoistable outer loop"))
+ return false;
+
+ for (MDefinitionIterator iter(*block); iter; iter++) {
+ if (!*iter)
+ MOZ_CRASH("confirm bug 1263794.");
+
+ if (mir->shouldCancel("MakeMRegExpHoistable inner loop"))
+ return false;
+
+ if (!iter->isRegExp())
+ continue;
+
+ MRegExp* regexp = iter->toRegExp();
+
+ bool hoistable = false;
+ if (!IsRegExpHoistable(mir, regexp, worklist, &hoistable))
+ return false;
+
+ if (!hoistable)
+ continue;
+
+ // Make MRegExp hoistable
+ regexp->setMovable();
+ regexp->setDoNotClone();
+
+ // That would be incorrect for global/sticky, because lastIndex
+ // could be wrong. Therefore setting the lastIndex to 0. That is
+ // faster than a not movable regexp.
+ RegExpObject* source = regexp->source();
+ if (source->sticky() || source->global()) {
+ if (!graph.alloc().ensureBallast())
+ return false;
+ MConstant* zero = MConstant::New(graph.alloc(), Int32Value(0));
+ regexp->block()->insertAfter(regexp, zero);
+
+ MStoreFixedSlot* lastIndex =
+ MStoreFixedSlot::New(graph.alloc(), regexp, RegExpObject::lastIndexSlot(), zero);
+ regexp->block()->insertAfter(zero, lastIndex);
+ }
+ }
+ }
+
+ return true;
+}
+
+void
+jit::RenumberBlocks(MIRGraph& graph)
+{
+ size_t id = 0;
+ for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++)
+ block->setId(id++);
+}
+
+// A utility for code which deletes blocks. Renumber the remaining blocks,
+// recompute dominators, and optionally recompute AliasAnalysis dependencies.
+bool
+jit::AccountForCFGChanges(MIRGenerator* mir, MIRGraph& graph, bool updateAliasAnalysis,
+ bool underValueNumberer)
+{
+ // Renumber the blocks and clear out the old dominator info.
+ size_t id = 0;
+ for (ReversePostorderIterator i(graph.rpoBegin()), e(graph.rpoEnd()); i != e; ++i) {
+ i->clearDominatorInfo();
+ i->setId(id++);
+ }
+
+ // Recompute dominator info.
+ if (!BuildDominatorTree(graph))
+ return false;
+
+ // If needed, update alias analysis dependencies.
+ if (updateAliasAnalysis) {
+ TraceLoggerThread* logger;
+ if (GetJitContext()->onMainThread())
+ logger = TraceLoggerForMainThread(GetJitContext()->runtime);
+ else
+ logger = TraceLoggerForCurrentThread();
+ AutoTraceLog log(logger, TraceLogger_AliasAnalysis);
+
+ if (JitOptions.disableFlowAA) {
+ if (!AliasAnalysis(mir, graph).analyze())
+ return false;
+ } else {
+ if (!FlowAliasAnalysis(mir, graph).analyze())
+ return false;
+ }
+ }
+
+ AssertExtendedGraphCoherency(graph, underValueNumberer);
+ return true;
+}
+
+// Remove all blocks not marked with isMarked(). Unmark all remaining blocks.
+// Alias analysis dependencies may be invalid after calling this function.
+bool
+jit::RemoveUnmarkedBlocks(MIRGenerator* mir, MIRGraph& graph, uint32_t numMarkedBlocks)
+{
+ if (numMarkedBlocks == graph.numBlocks()) {
+ // If all blocks are marked, no blocks need removal. Just clear the
+ // marks. We'll still need to update the dominator tree below though,
+ // since we may have removed edges even if we didn't remove any blocks.
+ graph.unmarkBlocks();
+ } else {
+ // As we are going to remove edges and basic blocks, we have to mark
+ // instructions which would be needed by baseline if we were to
+ // bailout.
+ for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
+ MBasicBlock* block = *it++;
+ if (!block->isMarked())
+ continue;
+
+ FlagAllOperandsAsHavingRemovedUses(mir, block);
+ }
+
+ // Find unmarked blocks and remove them.
+ for (ReversePostorderIterator iter(graph.rpoBegin()); iter != graph.rpoEnd();) {
+ MBasicBlock* block = *iter++;
+
+ if (block->isMarked()) {
+ block->unmark();
+ continue;
+ }
+
+ // The block is unreachable. Clear out the loop header flag, as
+ // we're doing the sweep of a mark-and-sweep here, so we no longer
+ // need to worry about whether an unmarked block is a loop or not.
+ if (block->isLoopHeader())
+ block->clearLoopHeader();
+
+ for (size_t i = 0, e = block->numSuccessors(); i != e; ++i)
+ block->getSuccessor(i)->removePredecessor(block);
+ graph.removeBlockIncludingPhis(block);
+ }
+ }
+
+ // Renumber the blocks and update the dominator tree.
+ return AccountForCFGChanges(mir, graph, /*updateAliasAnalysis=*/false);
+}
+
+// A Simple, Fast Dominance Algorithm by Cooper et al.
+// Modified to support empty intersections for OSR, and in RPO.
+static MBasicBlock*
+IntersectDominators(MBasicBlock* block1, MBasicBlock* block2)
+{
+ MBasicBlock* finger1 = block1;
+ MBasicBlock* finger2 = block2;
+
+ MOZ_ASSERT(finger1);
+ MOZ_ASSERT(finger2);
+
+ // In the original paper, the block ID comparisons are on the postorder index.
+ // This implementation iterates in RPO, so the comparisons are reversed.
+
+ // For this function to be called, the block must have multiple predecessors.
+ // If a finger is then found to be self-dominating, it must therefore be
+ // reachable from multiple roots through non-intersecting control flow.
+ // nullptr is returned in this case, to denote an empty intersection.
+
+ while (finger1->id() != finger2->id()) {
+ while (finger1->id() > finger2->id()) {
+ MBasicBlock* idom = finger1->immediateDominator();
+ if (idom == finger1)
+ return nullptr; // Empty intersection.
+ finger1 = idom;
+ }
+
+ while (finger2->id() > finger1->id()) {
+ MBasicBlock* idom = finger2->immediateDominator();
+ if (idom == finger2)
+ return nullptr; // Empty intersection.
+ finger2 = idom;
+ }
+ }
+ return finger1;
+}
+
+void
+jit::ClearDominatorTree(MIRGraph& graph)
+{
+ for (MBasicBlockIterator iter = graph.begin(); iter != graph.end(); iter++)
+ iter->clearDominatorInfo();
+}
+
+static void
+ComputeImmediateDominators(MIRGraph& graph)
+{
+ // The default start block is a root and therefore only self-dominates.
+ MBasicBlock* startBlock = graph.entryBlock();
+ startBlock->setImmediateDominator(startBlock);
+
+ // Any OSR block is a root and therefore only self-dominates.
+ MBasicBlock* osrBlock = graph.osrBlock();
+ if (osrBlock)
+ osrBlock->setImmediateDominator(osrBlock);
+
+ bool changed = true;
+
+ while (changed) {
+ changed = false;
+
+ ReversePostorderIterator block = graph.rpoBegin();
+
+ // For each block in RPO, intersect all dominators.
+ for (; block != graph.rpoEnd(); block++) {
+ // If a node has once been found to have no exclusive dominator,
+ // it will never have an exclusive dominator, so it may be skipped.
+ if (block->immediateDominator() == *block)
+ continue;
+
+ // A block with no predecessors is not reachable from any entry, so
+ // it self-dominates.
+ if (MOZ_UNLIKELY(block->numPredecessors() == 0)) {
+ block->setImmediateDominator(*block);
+ continue;
+ }
+
+ MBasicBlock* newIdom = block->getPredecessor(0);
+
+ // Find the first common dominator.
+ for (size_t i = 1; i < block->numPredecessors(); i++) {
+ MBasicBlock* pred = block->getPredecessor(i);
+ if (pred->immediateDominator() == nullptr)
+ continue;
+
+ newIdom = IntersectDominators(pred, newIdom);
+
+ // If there is no common dominator, the block self-dominates.
+ if (newIdom == nullptr) {
+ block->setImmediateDominator(*block);
+ changed = true;
+ break;
+ }
+ }
+
+ if (newIdom && block->immediateDominator() != newIdom) {
+ block->setImmediateDominator(newIdom);
+ changed = true;
+ }
+ }
+ }
+
+#ifdef DEBUG
+ // Assert that all blocks have dominator information.
+ for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
+ MOZ_ASSERT(block->immediateDominator() != nullptr);
+ }
+#endif
+}
+
+bool
+jit::BuildDominatorTree(MIRGraph& graph)
+{
+ ComputeImmediateDominators(graph);
+
+ Vector<MBasicBlock*, 4, JitAllocPolicy> worklist(graph.alloc());
+
+ // Traversing through the graph in post-order means that every non-phi use
+ // of a definition is visited before the def itself. Since a def
+ // dominates its uses, by the time we reach a particular
+ // block, we have processed all of its dominated children, so
+ // block->numDominated() is accurate.
+ for (PostorderIterator i(graph.poBegin()); i != graph.poEnd(); i++) {
+ MBasicBlock* child = *i;
+ MBasicBlock* parent = child->immediateDominator();
+
+ // Dominance is defined such that blocks always dominate themselves.
+ child->addNumDominated(1);
+
+ // If the block only self-dominates, it has no definite parent.
+ // Add it to the worklist as a root for pre-order traversal.
+ // This includes all roots. Order does not matter.
+ if (child == parent) {
+ if (!worklist.append(child))
+ return false;
+ continue;
+ }
+
+ if (!parent->addImmediatelyDominatedBlock(child))
+ return false;
+
+ parent->addNumDominated(child->numDominated());
+ }
+
+#ifdef DEBUG
+ // If compiling with OSR, many blocks will self-dominate.
+ // Without OSR, there is only one root block which dominates all.
+ if (!graph.osrBlock())
+ MOZ_ASSERT(graph.entryBlock()->numDominated() == graph.numBlocks());
+#endif
+ // Now, iterate through the dominator tree in pre-order and annotate every
+ // block with its index in the traversal.
+ size_t index = 0;
+ while (!worklist.empty()) {
+ MBasicBlock* block = worklist.popCopy();
+ block->setDomIndex(index);
+
+ if (!worklist.append(block->immediatelyDominatedBlocksBegin(),
+ block->immediatelyDominatedBlocksEnd())) {
+ return false;
+ }
+ index++;
+ }
+
+ return true;
+}
+
+bool
+jit::BuildPhiReverseMapping(MIRGraph& graph)
+{
+ // Build a mapping such that given a basic block, whose successor has one or
+ // more phis, we can find our specific input to that phi. To make this fast
+ // mapping work we rely on a specific property of our structured control
+ // flow graph: For a block with phis, its predecessors each have only one
+ // successor with phis. Consider each case:
+ // * Blocks with less than two predecessors cannot have phis.
+ // * Breaks. A break always has exactly one successor, and the break
+ // catch block has exactly one predecessor for each break, as
+ // well as a final predecessor for the actual loop exit.
+ // * Continues. A continue always has exactly one successor, and the
+ // continue catch block has exactly one predecessor for each
+ // continue, as well as a final predecessor for the actual
+ // loop continuation. The continue itself has exactly one
+ // successor.
+ // * An if. Each branch as exactly one predecessor.
+ // * A switch. Each branch has exactly one predecessor.
+ // * Loop tail. A new block is always created for the exit, and if a
+ // break statement is present, the exit block will forward
+ // directly to the break block.
+ for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
+ if (block->phisEmpty())
+ continue;
+
+ // Assert on the above.
+ for (size_t j = 0; j < block->numPredecessors(); j++) {
+ MBasicBlock* pred = block->getPredecessor(j);
+
+#ifdef DEBUG
+ size_t numSuccessorsWithPhis = 0;
+ for (size_t k = 0; k < pred->numSuccessors(); k++) {
+ MBasicBlock* successor = pred->getSuccessor(k);
+ if (!successor->phisEmpty())
+ numSuccessorsWithPhis++;
+ }
+ MOZ_ASSERT(numSuccessorsWithPhis <= 1);
+#endif
+
+ pred->setSuccessorWithPhis(*block, j);
+ }
+ }
+
+ return true;
+}
+
+#ifdef DEBUG
+static bool
+CheckSuccessorImpliesPredecessor(MBasicBlock* A, MBasicBlock* B)
+{
+ // Assuming B = succ(A), verify A = pred(B).
+ for (size_t i = 0; i < B->numPredecessors(); i++) {
+ if (A == B->getPredecessor(i))
+ return true;
+ }
+ return false;
+}
+
+static bool
+CheckPredecessorImpliesSuccessor(MBasicBlock* A, MBasicBlock* B)
+{
+ // Assuming B = pred(A), verify A = succ(B).
+ for (size_t i = 0; i < B->numSuccessors(); i++) {
+ if (A == B->getSuccessor(i))
+ return true;
+ }
+ return false;
+}
+
+// If you have issues with the usesBalance assertions, then define the macro
+// _DEBUG_CHECK_OPERANDS_USES_BALANCE to spew information on the error output.
+// This output can then be processed with the following awk script to filter and
+// highlight which checks are missing or if there is an unexpected operand /
+// use.
+//
+// define _DEBUG_CHECK_OPERANDS_USES_BALANCE 1
+/*
+
+$ ./js 2>stderr.log
+$ gawk '
+ /^==Check/ { context = ""; state = $2; }
+ /^[a-z]/ { context = context "\n\t" $0; }
+ /^==End/ {
+ if (state == "Operand") {
+ list[context] = list[context] - 1;
+ } else if (state == "Use") {
+ list[context] = list[context] + 1;
+ }
+ }
+ END {
+ for (ctx in list) {
+ if (list[ctx] > 0) {
+ print "Missing operand check", ctx, "\n"
+ }
+ if (list[ctx] < 0) {
+ print "Missing use check", ctx, "\n"
+ }
+ };
+ }' < stderr.log
+
+*/
+
+static void
+CheckOperand(const MNode* consumer, const MUse* use, int32_t* usesBalance)
+{
+ MOZ_ASSERT(use->hasProducer());
+ MDefinition* producer = use->producer();
+ MOZ_ASSERT(!producer->isDiscarded());
+ MOZ_ASSERT(producer->block() != nullptr);
+ MOZ_ASSERT(use->consumer() == consumer);
+#ifdef _DEBUG_CHECK_OPERANDS_USES_BALANCE
+ fprintf(stderr, "==Check Operand\n");
+ use->producer()->dump(stderr);
+ fprintf(stderr, " index: %" PRIuSIZE "\n", use->consumer()->indexOf(use));
+ use->consumer()->dump(stderr);
+ fprintf(stderr, "==End\n");
+#endif
+ --*usesBalance;
+}
+
+static void
+CheckUse(const MDefinition* producer, const MUse* use, int32_t* usesBalance)
+{
+ MOZ_ASSERT(!use->consumer()->block()->isDead());
+ MOZ_ASSERT_IF(use->consumer()->isDefinition(),
+ !use->consumer()->toDefinition()->isDiscarded());
+ MOZ_ASSERT(use->consumer()->block() != nullptr);
+ MOZ_ASSERT(use->consumer()->getOperand(use->index()) == producer);
+#ifdef _DEBUG_CHECK_OPERANDS_USES_BALANCE
+ fprintf(stderr, "==Check Use\n");
+ use->producer()->dump(stderr);
+ fprintf(stderr, " index: %" PRIuSIZE "\n", use->consumer()->indexOf(use));
+ use->consumer()->dump(stderr);
+ fprintf(stderr, "==End\n");
+#endif
+ ++*usesBalance;
+}
+
+// To properly encode entry resume points, we have to ensure that all the
+// operands of the entry resume point are located before the safeInsertTop
+// location.
+static void
+AssertOperandsBeforeSafeInsertTop(MResumePoint* resume)
+{
+ MBasicBlock* block = resume->block();
+ if (block == block->graph().osrBlock())
+ return;
+ MInstruction* stop = block->safeInsertTop();
+ for (size_t i = 0, e = resume->numOperands(); i < e; ++i) {
+ MDefinition* def = resume->getOperand(i);
+ if (def->block() != block)
+ continue;
+ if (def->isPhi())
+ continue;
+
+ for (MInstructionIterator ins = block->begin(); true; ins++) {
+ if (*ins == def)
+ break;
+ MOZ_ASSERT(*ins != stop,
+ "Resume point operand located after the safeInsertTop location");
+ }
+ }
+}
+#endif // DEBUG
+
+void
+jit::AssertBasicGraphCoherency(MIRGraph& graph)
+{
+#ifdef DEBUG
+ MOZ_ASSERT(graph.entryBlock()->numPredecessors() == 0);
+ MOZ_ASSERT(graph.entryBlock()->phisEmpty());
+ MOZ_ASSERT(!graph.entryBlock()->unreachable());
+
+ if (MBasicBlock* osrBlock = graph.osrBlock()) {
+ MOZ_ASSERT(osrBlock->numPredecessors() == 0);
+ MOZ_ASSERT(osrBlock->phisEmpty());
+ MOZ_ASSERT(osrBlock != graph.entryBlock());
+ MOZ_ASSERT(!osrBlock->unreachable());
+ }
+
+ if (MResumePoint* resumePoint = graph.entryResumePoint())
+ MOZ_ASSERT(resumePoint->block() == graph.entryBlock());
+
+ // Assert successor and predecessor list coherency.
+ uint32_t count = 0;
+ int32_t usesBalance = 0;
+ for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
+ count++;
+
+ MOZ_ASSERT(&block->graph() == &graph);
+ MOZ_ASSERT(!block->isDead());
+ MOZ_ASSERT_IF(block->outerResumePoint() != nullptr,
+ block->entryResumePoint() != nullptr);
+
+ for (size_t i = 0; i < block->numSuccessors(); i++)
+ MOZ_ASSERT(CheckSuccessorImpliesPredecessor(*block, block->getSuccessor(i)));
+
+ for (size_t i = 0; i < block->numPredecessors(); i++)
+ MOZ_ASSERT(CheckPredecessorImpliesSuccessor(*block, block->getPredecessor(i)));
+
+ if (MResumePoint* resume = block->entryResumePoint()) {
+ MOZ_ASSERT(!resume->instruction());
+ MOZ_ASSERT(resume->block() == *block);
+ AssertOperandsBeforeSafeInsertTop(resume);
+ }
+ if (MResumePoint* resume = block->outerResumePoint()) {
+ MOZ_ASSERT(!resume->instruction());
+ MOZ_ASSERT(resume->block() == *block);
+ }
+ for (MResumePointIterator iter(block->resumePointsBegin()); iter != block->resumePointsEnd(); iter++) {
+ // We cannot yet assert that is there is no instruction then this is
+ // the entry resume point because we are still storing resume points
+ // in the InlinePropertyTable.
+ MOZ_ASSERT_IF(iter->instruction(), iter->instruction()->block() == *block);
+ for (uint32_t i = 0, e = iter->numOperands(); i < e; i++)
+ CheckOperand(*iter, iter->getUseFor(i), &usesBalance);
+ }
+ for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
+ MOZ_ASSERT(phi->numOperands() == block->numPredecessors());
+ MOZ_ASSERT(!phi->isRecoveredOnBailout());
+ MOZ_ASSERT(phi->type() != MIRType::None);
+ MOZ_ASSERT(phi->dependency() == nullptr);
+ }
+ for (MDefinitionIterator iter(*block); iter; iter++) {
+ MOZ_ASSERT(iter->block() == *block);
+ MOZ_ASSERT_IF(iter->hasUses(), iter->type() != MIRType::None);
+ MOZ_ASSERT(!iter->isDiscarded());
+ MOZ_ASSERT_IF(iter->isStart(),
+ *block == graph.entryBlock() || *block == graph.osrBlock());
+ MOZ_ASSERT_IF(iter->isParameter(),
+ *block == graph.entryBlock() || *block == graph.osrBlock());
+ MOZ_ASSERT_IF(iter->isOsrEntry(), *block == graph.osrBlock());
+ MOZ_ASSERT_IF(iter->isOsrValue(), *block == graph.osrBlock());
+
+ // Assert that use chains are valid for this instruction.
+ for (uint32_t i = 0, end = iter->numOperands(); i < end; i++)
+ CheckOperand(*iter, iter->getUseFor(i), &usesBalance);
+ for (MUseIterator use(iter->usesBegin()); use != iter->usesEnd(); use++)
+ CheckUse(*iter, *use, &usesBalance);
+
+ if (iter->isInstruction()) {
+ if (MResumePoint* resume = iter->toInstruction()->resumePoint()) {
+ MOZ_ASSERT(resume->instruction() == *iter);
+ MOZ_ASSERT(resume->block() == *block);
+ MOZ_ASSERT(resume->block()->entryResumePoint() != nullptr);
+ }
+ }
+
+ if (iter->isRecoveredOnBailout())
+ MOZ_ASSERT(!iter->hasLiveDefUses());
+ }
+
+ // The control instruction is not visited by the MDefinitionIterator.
+ MControlInstruction* control = block->lastIns();
+ MOZ_ASSERT(control->block() == *block);
+ MOZ_ASSERT(!control->hasUses());
+ MOZ_ASSERT(control->type() == MIRType::None);
+ MOZ_ASSERT(!control->isDiscarded());
+ MOZ_ASSERT(!control->isRecoveredOnBailout());
+ MOZ_ASSERT(control->resumePoint() == nullptr);
+ for (uint32_t i = 0, end = control->numOperands(); i < end; i++)
+ CheckOperand(control, control->getUseFor(i), &usesBalance);
+ for (size_t i = 0; i < control->numSuccessors(); i++)
+ MOZ_ASSERT(control->getSuccessor(i));
+ }
+
+ // In case issues, see the _DEBUG_CHECK_OPERANDS_USES_BALANCE macro above.
+ MOZ_ASSERT(usesBalance <= 0, "More use checks than operand checks");
+ MOZ_ASSERT(usesBalance >= 0, "More operand checks than use checks");
+ MOZ_ASSERT(graph.numBlocks() == count);
+#endif
+}
+
+#ifdef DEBUG
+static void
+AssertReversePostorder(MIRGraph& graph)
+{
+ // Check that every block is visited after all its predecessors (except backedges).
+ for (ReversePostorderIterator iter(graph.rpoBegin()); iter != graph.rpoEnd(); ++iter) {
+ MBasicBlock* block = *iter;
+ MOZ_ASSERT(!block->isMarked());
+
+ for (size_t i = 0; i < block->numPredecessors(); i++) {
+ MBasicBlock* pred = block->getPredecessor(i);
+ if (!pred->isMarked()) {
+ MOZ_ASSERT(pred->isLoopBackedge());
+ MOZ_ASSERT(block->backedge() == pred);
+ }
+ }
+
+ block->mark();
+ }
+
+ graph.unmarkBlocks();
+}
+#endif
+
+#ifdef DEBUG
+static void
+AssertDominatorTree(MIRGraph& graph)
+{
+ // Check dominators.
+
+ MOZ_ASSERT(graph.entryBlock()->immediateDominator() == graph.entryBlock());
+ if (MBasicBlock* osrBlock = graph.osrBlock())
+ MOZ_ASSERT(osrBlock->immediateDominator() == osrBlock);
+ else
+ MOZ_ASSERT(graph.entryBlock()->numDominated() == graph.numBlocks());
+
+ size_t i = graph.numBlocks();
+ size_t totalNumDominated = 0;
+ for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
+ MOZ_ASSERT(block->dominates(*block));
+
+ MBasicBlock* idom = block->immediateDominator();
+ MOZ_ASSERT(idom->dominates(*block));
+ MOZ_ASSERT(idom == *block || idom->id() < block->id());
+
+ if (idom == *block) {
+ totalNumDominated += block->numDominated();
+ } else {
+ bool foundInParent = false;
+ for (size_t j = 0; j < idom->numImmediatelyDominatedBlocks(); j++) {
+ if (idom->getImmediatelyDominatedBlock(j) == *block) {
+ foundInParent = true;
+ break;
+ }
+ }
+ MOZ_ASSERT(foundInParent);
+ }
+
+ size_t numDominated = 1;
+ for (size_t j = 0; j < block->numImmediatelyDominatedBlocks(); j++) {
+ MBasicBlock* dom = block->getImmediatelyDominatedBlock(j);
+ MOZ_ASSERT(block->dominates(dom));
+ MOZ_ASSERT(dom->id() > block->id());
+ MOZ_ASSERT(dom->immediateDominator() == *block);
+
+ numDominated += dom->numDominated();
+ }
+ MOZ_ASSERT(block->numDominated() == numDominated);
+ MOZ_ASSERT(block->numDominated() <= i);
+ MOZ_ASSERT(block->numSuccessors() != 0 || block->numDominated() == 1);
+ i--;
+ }
+ MOZ_ASSERT(i == 0);
+ MOZ_ASSERT(totalNumDominated == graph.numBlocks());
+}
+#endif
+
+void
+jit::AssertGraphCoherency(MIRGraph& graph)
+{
+#ifdef DEBUG
+ if (!JitOptions.checkGraphConsistency)
+ return;
+ AssertBasicGraphCoherency(graph);
+ AssertReversePostorder(graph);
+#endif
+}
+
+#ifdef DEBUG
+static bool
+IsResumableMIRType(MIRType type)
+{
+ // see CodeGeneratorShared::encodeAllocation
+ switch (type) {
+ case MIRType::Undefined:
+ case MIRType::Null:
+ case MIRType::Boolean:
+ case MIRType::Int32:
+ case MIRType::Double:
+ case MIRType::Float32:
+ case MIRType::String:
+ case MIRType::Symbol:
+ case MIRType::Object:
+ case MIRType::MagicOptimizedArguments:
+ case MIRType::MagicOptimizedOut:
+ case MIRType::MagicUninitializedLexical:
+ case MIRType::MagicIsConstructing:
+ case MIRType::Value:
+ case MIRType::Int32x4:
+ case MIRType::Int16x8:
+ case MIRType::Int8x16:
+ case MIRType::Float32x4:
+ case MIRType::Bool32x4:
+ case MIRType::Bool16x8:
+ case MIRType::Bool8x16:
+ return true;
+
+ case MIRType::MagicHole:
+ case MIRType::ObjectOrNull:
+ case MIRType::None:
+ case MIRType::Slots:
+ case MIRType::Elements:
+ case MIRType::Pointer:
+ case MIRType::Shape:
+ case MIRType::ObjectGroup:
+ case MIRType::Doublex2: // NYI, see also RSimdBox::recover
+ case MIRType::SinCosDouble:
+ case MIRType::Int64:
+ return false;
+ }
+ MOZ_CRASH("Unknown MIRType.");
+}
+
+static void
+AssertResumableOperands(MNode* node)
+{
+ for (size_t i = 0, e = node->numOperands(); i < e; ++i) {
+ MDefinition* op = node->getOperand(i);
+ if (op->isRecoveredOnBailout())
+ continue;
+ MOZ_ASSERT(IsResumableMIRType(op->type()),
+ "Resume point cannot encode its operands");
+ }
+}
+
+static void
+AssertIfResumableInstruction(MDefinition* def)
+{
+ if (!def->isRecoveredOnBailout())
+ return;
+ AssertResumableOperands(def);
+}
+
+static void
+AssertResumePointDominatedByOperands(MResumePoint* resume)
+{
+ for (size_t i = 0, e = resume->numOperands(); i < e; ++i) {
+ MDefinition* op = resume->getOperand(i);
+ if (op->type() == MIRType::MagicOptimizedArguments)
+ continue;
+ MOZ_ASSERT(op->block()->dominates(resume->block()),
+ "Resume point is not dominated by its operands");
+ }
+}
+#endif // DEBUG
+
+void
+jit::AssertExtendedGraphCoherency(MIRGraph& graph, bool underValueNumberer)
+{
+ // Checks the basic GraphCoherency but also other conditions that
+ // do not hold immediately (such as the fact that critical edges
+ // are split)
+
+#ifdef DEBUG
+ if (!JitOptions.checkGraphConsistency)
+ return;
+
+ AssertGraphCoherency(graph);
+
+ AssertDominatorTree(graph);
+
+ DebugOnly<uint32_t> idx = 0;
+ for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
+ MOZ_ASSERT(block->id() == idx);
+ ++idx;
+
+ // No critical edges:
+ if (block->numSuccessors() > 1)
+ for (size_t i = 0; i < block->numSuccessors(); i++)
+ MOZ_ASSERT(block->getSuccessor(i)->numPredecessors() == 1);
+
+ if (block->isLoopHeader()) {
+ if (underValueNumberer && block->numPredecessors() == 3) {
+ // Fixup block.
+ MOZ_ASSERT(block->getPredecessor(1)->numPredecessors() == 0);
+ MOZ_ASSERT(graph.osrBlock(),
+ "Fixup blocks should only exists if we have an osr block.");
+ } else {
+ MOZ_ASSERT(block->numPredecessors() == 2);
+ }
+ MBasicBlock* backedge = block->backedge();
+ MOZ_ASSERT(backedge->id() >= block->id());
+ MOZ_ASSERT(backedge->numSuccessors() == 1);
+ MOZ_ASSERT(backedge->getSuccessor(0) == *block);
+ }
+
+ if (!block->phisEmpty()) {
+ for (size_t i = 0; i < block->numPredecessors(); i++) {
+ MBasicBlock* pred = block->getPredecessor(i);
+ MOZ_ASSERT(pred->successorWithPhis() == *block);
+ MOZ_ASSERT(pred->positionInPhiSuccessor() == i);
+ }
+ }
+
+ uint32_t successorWithPhis = 0;
+ for (size_t i = 0; i < block->numSuccessors(); i++)
+ if (!block->getSuccessor(i)->phisEmpty())
+ successorWithPhis++;
+
+ MOZ_ASSERT(successorWithPhis <= 1);
+ MOZ_ASSERT((successorWithPhis != 0) == (block->successorWithPhis() != nullptr));
+
+ // Verify that phi operands dominate the corresponding CFG predecessor
+ // edges.
+ for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ++iter) {
+ MPhi* phi = *iter;
+ for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
+ // We sometimes see a phi with a magic-optimized-arguments
+ // operand defined in the normal entry block, while the phi is
+ // also reachable from the OSR entry (auto-regress/bug779818.js)
+ if (phi->getOperand(i)->type() == MIRType::MagicOptimizedArguments)
+ continue;
+
+ MOZ_ASSERT(phi->getOperand(i)->block()->dominates(block->getPredecessor(i)),
+ "Phi input is not dominated by its operand");
+ }
+ }
+
+ // Verify that instructions are dominated by their operands.
+ for (MInstructionIterator iter(block->begin()), end(block->end()); iter != end; ++iter) {
+ MInstruction* ins = *iter;
+ for (size_t i = 0, e = ins->numOperands(); i < e; ++i) {
+ MDefinition* op = ins->getOperand(i);
+ MBasicBlock* opBlock = op->block();
+ MOZ_ASSERT(opBlock->dominates(*block),
+ "Instruction is not dominated by its operands");
+
+ // If the operand is an instruction in the same block, check
+ // that it comes first.
+ if (opBlock == *block && !op->isPhi()) {
+ MInstructionIterator opIter = block->begin(op->toInstruction());
+ do {
+ ++opIter;
+ MOZ_ASSERT(opIter != block->end(),
+ "Operand in same block as instruction does not precede");
+ } while (*opIter != ins);
+ }
+ }
+ AssertIfResumableInstruction(ins);
+ if (MResumePoint* resume = ins->resumePoint()) {
+ AssertResumePointDominatedByOperands(resume);
+ AssertResumableOperands(resume);
+ }
+ }
+
+ // Verify that the block resume points are dominated by their operands.
+ if (MResumePoint* resume = block->entryResumePoint()) {
+ AssertResumePointDominatedByOperands(resume);
+ AssertResumableOperands(resume);
+ }
+ if (MResumePoint* resume = block->outerResumePoint()) {
+ AssertResumePointDominatedByOperands(resume);
+ AssertResumableOperands(resume);
+ }
+ }
+#endif
+}
+
+
+struct BoundsCheckInfo
+{
+ MBoundsCheck* check;
+ uint32_t validEnd;
+};
+
+typedef HashMap<uint32_t,
+ BoundsCheckInfo,
+ DefaultHasher<uint32_t>,
+ JitAllocPolicy> BoundsCheckMap;
+
+// Compute a hash for bounds checks which ignores constant offsets in the index.
+static HashNumber
+BoundsCheckHashIgnoreOffset(MBoundsCheck* check)
+{
+ SimpleLinearSum indexSum = ExtractLinearSum(check->index());
+ uintptr_t index = indexSum.term ? uintptr_t(indexSum.term) : 0;
+ uintptr_t length = uintptr_t(check->length());
+ return index ^ length;
+}
+
+static MBoundsCheck*
+FindDominatingBoundsCheck(BoundsCheckMap& checks, MBoundsCheck* check, size_t index)
+{
+ // Since we are traversing the dominator tree in pre-order, when we
+ // are looking at the |index|-th block, the next numDominated() blocks
+ // we traverse are precisely the set of blocks that are dominated.
+ //
+ // So, this value is visible in all blocks if:
+ // index <= index + ins->block->numDominated()
+ // and becomes invalid after that.
+ HashNumber hash = BoundsCheckHashIgnoreOffset(check);
+ BoundsCheckMap::Ptr p = checks.lookup(hash);
+ if (!p || index >= p->value().validEnd) {
+ // We didn't find a dominating bounds check.
+ BoundsCheckInfo info;
+ info.check = check;
+ info.validEnd = index + check->block()->numDominated();
+
+ if(!checks.put(hash, info))
+ return nullptr;
+
+ return check;
+ }
+
+ return p->value().check;
+}
+
+static MathSpace
+ExtractMathSpace(MDefinition* ins)
+{
+ MOZ_ASSERT(ins->isAdd() || ins->isSub());
+ MBinaryArithInstruction* arith = nullptr;
+ if (ins->isAdd())
+ arith = ins->toAdd();
+ else
+ arith = ins->toSub();
+ switch (arith->truncateKind()) {
+ case MDefinition::NoTruncate:
+ case MDefinition::TruncateAfterBailouts:
+ // TruncateAfterBailouts is considered as infinite space because the
+ // LinearSum will effectively remove the bailout check.
+ return MathSpace::Infinite;
+ case MDefinition::IndirectTruncate:
+ case MDefinition::Truncate:
+ return MathSpace::Modulo;
+ }
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unknown TruncateKind");
+}
+
+// Extract a linear sum from ins, if possible (otherwise giving the sum 'ins + 0').
+SimpleLinearSum
+jit::ExtractLinearSum(MDefinition* ins, MathSpace space)
+{
+ if (ins->isBeta())
+ ins = ins->getOperand(0);
+
+ if (ins->type() != MIRType::Int32)
+ return SimpleLinearSum(ins, 0);
+
+ if (ins->isConstant())
+ return SimpleLinearSum(nullptr, ins->toConstant()->toInt32());
+
+ if (!ins->isAdd() && !ins->isSub())
+ return SimpleLinearSum(ins, 0);
+
+ // Only allow math which are in the same space.
+ MathSpace insSpace = ExtractMathSpace(ins);
+ if (space == MathSpace::Unknown)
+ space = insSpace;
+ else if (space != insSpace)
+ return SimpleLinearSum(ins, 0);
+ MOZ_ASSERT(space == MathSpace::Modulo || space == MathSpace::Infinite);
+
+ MDefinition* lhs = ins->getOperand(0);
+ MDefinition* rhs = ins->getOperand(1);
+ if (lhs->type() != MIRType::Int32 || rhs->type() != MIRType::Int32)
+ return SimpleLinearSum(ins, 0);
+
+ // Extract linear sums of each operand.
+ SimpleLinearSum lsum = ExtractLinearSum(lhs, space);
+ SimpleLinearSum rsum = ExtractLinearSum(rhs, space);
+
+ // LinearSum only considers a single term operand, if both sides have
+ // terms, then ignore extracted linear sums.
+ if (lsum.term && rsum.term)
+ return SimpleLinearSum(ins, 0);
+
+ // Check if this is of the form <SUM> + n or n + <SUM>.
+ if (ins->isAdd()) {
+ int32_t constant;
+ if (space == MathSpace::Modulo)
+ constant = lsum.constant + rsum.constant;
+ else if (!SafeAdd(lsum.constant, rsum.constant, &constant))
+ return SimpleLinearSum(ins, 0);
+ return SimpleLinearSum(lsum.term ? lsum.term : rsum.term, constant);
+ }
+
+ MOZ_ASSERT(ins->isSub());
+ // Check if this is of the form <SUM> - n.
+ if (lsum.term) {
+ int32_t constant;
+ if (space == MathSpace::Modulo)
+ constant = lsum.constant - rsum.constant;
+ else if (!SafeSub(lsum.constant, rsum.constant, &constant))
+ return SimpleLinearSum(ins, 0);
+ return SimpleLinearSum(lsum.term, constant);
+ }
+
+ // Ignore any of the form n - <SUM>.
+ return SimpleLinearSum(ins, 0);
+}
+
+// Extract a linear inequality holding when a boolean test goes in the
+// specified direction, of the form 'lhs + lhsN <= rhs' (or >=).
+bool
+jit::ExtractLinearInequality(MTest* test, BranchDirection direction,
+ SimpleLinearSum* plhs, MDefinition** prhs, bool* plessEqual)
+{
+ if (!test->getOperand(0)->isCompare())
+ return false;
+
+ MCompare* compare = test->getOperand(0)->toCompare();
+
+ MDefinition* lhs = compare->getOperand(0);
+ MDefinition* rhs = compare->getOperand(1);
+
+ // TODO: optimize Compare_UInt32
+ if (!compare->isInt32Comparison())
+ return false;
+
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+
+ JSOp jsop = compare->jsop();
+ if (direction == FALSE_BRANCH)
+ jsop = NegateCompareOp(jsop);
+
+ SimpleLinearSum lsum = ExtractLinearSum(lhs);
+ SimpleLinearSum rsum = ExtractLinearSum(rhs);
+
+ if (!SafeSub(lsum.constant, rsum.constant, &lsum.constant))
+ return false;
+
+ // Normalize operations to use <= or >=.
+ switch (jsop) {
+ case JSOP_LE:
+ *plessEqual = true;
+ break;
+ case JSOP_LT:
+ /* x < y ==> x + 1 <= y */
+ if (!SafeAdd(lsum.constant, 1, &lsum.constant))
+ return false;
+ *plessEqual = true;
+ break;
+ case JSOP_GE:
+ *plessEqual = false;
+ break;
+ case JSOP_GT:
+ /* x > y ==> x - 1 >= y */
+ if (!SafeSub(lsum.constant, 1, &lsum.constant))
+ return false;
+ *plessEqual = false;
+ break;
+ default:
+ return false;
+ }
+
+ *plhs = lsum;
+ *prhs = rsum.term;
+
+ return true;
+}
+
+static bool
+TryEliminateBoundsCheck(BoundsCheckMap& checks, size_t blockIndex, MBoundsCheck* dominated, bool* eliminated)
+{
+ MOZ_ASSERT(!*eliminated);
+
+ // Replace all uses of the bounds check with the actual index.
+ // This is (a) necessary, because we can coalesce two different
+ // bounds checks and would otherwise use the wrong index and
+ // (b) helps register allocation. Note that this is safe since
+ // no other pass after bounds check elimination moves instructions.
+ dominated->replaceAllUsesWith(dominated->index());
+
+ if (!dominated->isMovable())
+ return true;
+
+ if (!dominated->fallible())
+ return true;
+
+ MBoundsCheck* dominating = FindDominatingBoundsCheck(checks, dominated, blockIndex);
+ if (!dominating)
+ return false;
+
+ if (dominating == dominated) {
+ // We didn't find a dominating bounds check.
+ return true;
+ }
+
+ // We found two bounds checks with the same hash number, but we still have
+ // to make sure the lengths and index terms are equal.
+ if (dominating->length() != dominated->length())
+ return true;
+
+ SimpleLinearSum sumA = ExtractLinearSum(dominating->index());
+ SimpleLinearSum sumB = ExtractLinearSum(dominated->index());
+
+ // Both terms should be nullptr or the same definition.
+ if (sumA.term != sumB.term)
+ return true;
+
+ // This bounds check is redundant.
+ *eliminated = true;
+
+ // Normalize the ranges according to the constant offsets in the two indexes.
+ int32_t minimumA, maximumA, minimumB, maximumB;
+ if (!SafeAdd(sumA.constant, dominating->minimum(), &minimumA) ||
+ !SafeAdd(sumA.constant, dominating->maximum(), &maximumA) ||
+ !SafeAdd(sumB.constant, dominated->minimum(), &minimumB) ||
+ !SafeAdd(sumB.constant, dominated->maximum(), &maximumB))
+ {
+ return false;
+ }
+
+ // Update the dominating check to cover both ranges, denormalizing the
+ // result per the constant offset in the index.
+ int32_t newMinimum, newMaximum;
+ if (!SafeSub(Min(minimumA, minimumB), sumA.constant, &newMinimum) ||
+ !SafeSub(Max(maximumA, maximumB), sumA.constant, &newMaximum))
+ {
+ return false;
+ }
+
+ dominating->setMinimum(newMinimum);
+ dominating->setMaximum(newMaximum);
+ return true;
+}
+
+static void
+TryEliminateTypeBarrierFromTest(MTypeBarrier* barrier, bool filtersNull, bool filtersUndefined,
+ MTest* test, BranchDirection direction, bool* eliminated)
+{
+ MOZ_ASSERT(filtersNull || filtersUndefined);
+
+ // Watch for code patterns similar to 'if (x.f) { ... = x.f }'. If x.f
+ // is either an object or null/undefined, there will be a type barrier on
+ // the latter read as the null/undefined value is never realized there.
+ // The type barrier can be eliminated, however, by looking at tests
+ // performed on the result of the first operation that filter out all
+ // types that have been seen in the first access but not the second.
+
+ // A test 'if (x.f)' filters both null and undefined.
+
+ // Disregard the possible unbox added before the Typebarrier for checking.
+ MDefinition* input = barrier->input();
+ MUnbox* inputUnbox = nullptr;
+ if (input->isUnbox() && input->toUnbox()->mode() != MUnbox::Fallible) {
+ inputUnbox = input->toUnbox();
+ input = inputUnbox->input();
+ }
+
+ MDefinition* subject = nullptr;
+ bool removeUndefined;
+ bool removeNull;
+ test->filtersUndefinedOrNull(direction == TRUE_BRANCH, &subject, &removeUndefined, &removeNull);
+
+ // The Test doesn't filter undefined nor null.
+ if (!subject)
+ return;
+
+ // Make sure the subject equals the input to the TypeBarrier.
+ if (subject != input)
+ return;
+
+ // When the TypeBarrier filters undefined, the test must at least also do,
+ // this, before the TypeBarrier can get removed.
+ if (!removeUndefined && filtersUndefined)
+ return;
+
+ // When the TypeBarrier filters null, the test must at least also do,
+ // this, before the TypeBarrier can get removed.
+ if (!removeNull && filtersNull)
+ return;
+
+ // Eliminate the TypeBarrier. The possible TypeBarrier unboxing is kept,
+ // but made infallible.
+ *eliminated = true;
+ if (inputUnbox)
+ inputUnbox->makeInfallible();
+ barrier->replaceAllUsesWith(barrier->input());
+}
+
+static bool
+TryEliminateTypeBarrier(MTypeBarrier* barrier, bool* eliminated)
+{
+ MOZ_ASSERT(!*eliminated);
+
+ const TemporaryTypeSet* barrierTypes = barrier->resultTypeSet();
+ const TemporaryTypeSet* inputTypes = barrier->input()->resultTypeSet();
+
+ // Disregard the possible unbox added before the Typebarrier.
+ if (barrier->input()->isUnbox() && barrier->input()->toUnbox()->mode() != MUnbox::Fallible)
+ inputTypes = barrier->input()->toUnbox()->input()->resultTypeSet();
+
+ if (!barrierTypes || !inputTypes)
+ return true;
+
+ bool filtersNull = barrierTypes->filtersType(inputTypes, TypeSet::NullType());
+ bool filtersUndefined = barrierTypes->filtersType(inputTypes, TypeSet::UndefinedType());
+
+ if (!filtersNull && !filtersUndefined)
+ return true;
+
+ MBasicBlock* block = barrier->block();
+ while (true) {
+ BranchDirection direction;
+ MTest* test = block->immediateDominatorBranch(&direction);
+
+ if (test) {
+ TryEliminateTypeBarrierFromTest(barrier, filtersNull, filtersUndefined,
+ test, direction, eliminated);
+ }
+
+ MBasicBlock* previous = block->immediateDominator();
+ if (previous == block)
+ break;
+ block = previous;
+ }
+
+ return true;
+}
+
+static bool
+TryOptimizeLoadObjectOrNull(MDefinition* def, MDefinitionVector* peliminateList)
+{
+ if (def->type() != MIRType::Value)
+ return true;
+
+ // Check if this definition can only produce object or null values.
+ TemporaryTypeSet* types = def->resultTypeSet();
+ if (!types)
+ return true;
+ if (types->baseFlags() & ~(TYPE_FLAG_NULL | TYPE_FLAG_ANYOBJECT))
+ return true;
+
+ MDefinitionVector eliminateList(def->block()->graph().alloc());
+
+ for (MUseDefIterator iter(def); iter; ++iter) {
+ MDefinition* ndef = iter.def();
+ switch (ndef->op()) {
+ case MDefinition::Op_Compare:
+ if (ndef->toCompare()->compareType() != MCompare::Compare_Null)
+ return true;
+ break;
+ case MDefinition::Op_Test:
+ break;
+ case MDefinition::Op_PostWriteBarrier:
+ break;
+ case MDefinition::Op_StoreFixedSlot:
+ break;
+ case MDefinition::Op_StoreSlot:
+ break;
+ case MDefinition::Op_ToObjectOrNull:
+ if (!eliminateList.append(ndef->toToObjectOrNull()))
+ return false;
+ break;
+ case MDefinition::Op_Unbox:
+ if (ndef->type() != MIRType::Object)
+ return true;
+ break;
+ case MDefinition::Op_TypeBarrier:
+ // For now, only handle type barriers which are not consumed
+ // anywhere and only test that the value is null.
+ if (ndef->hasUses() || ndef->resultTypeSet()->getKnownMIRType() != MIRType::Null)
+ return true;
+ break;
+ default:
+ return true;
+ }
+ }
+
+ // On punboxing systems we are better off leaving the value boxed if it
+ // is only stored back to the heap.
+#ifdef JS_PUNBOX64
+ bool foundUse = false;
+ for (MUseDefIterator iter(def); iter; ++iter) {
+ MDefinition* ndef = iter.def();
+ if (!ndef->isStoreFixedSlot() && !ndef->isStoreSlot()) {
+ foundUse = true;
+ break;
+ }
+ }
+ if (!foundUse)
+ return true;
+#endif // JS_PUNBOX64
+
+ def->setResultType(MIRType::ObjectOrNull);
+
+ // Fixup the result type of MTypeBarrier uses.
+ for (MUseDefIterator iter(def); iter; ++iter) {
+ MDefinition* ndef = iter.def();
+ if (ndef->isTypeBarrier())
+ ndef->setResultType(MIRType::ObjectOrNull);
+ }
+
+ // Eliminate MToObjectOrNull instruction uses.
+ for (size_t i = 0; i < eliminateList.length(); i++) {
+ MDefinition* ndef = eliminateList[i];
+ ndef->replaceAllUsesWith(def);
+ if (!peliminateList->append(ndef))
+ return false;
+ }
+
+ return true;
+}
+
+static inline MDefinition*
+PassthroughOperand(MDefinition* def)
+{
+ if (def->isConvertElementsToDoubles())
+ return def->toConvertElementsToDoubles()->elements();
+ if (def->isMaybeCopyElementsForWrite())
+ return def->toMaybeCopyElementsForWrite()->object();
+ if (def->isConvertUnboxedObjectToNative())
+ return def->toConvertUnboxedObjectToNative()->object();
+ return nullptr;
+}
+
+// Eliminate checks which are redundant given each other or other instructions.
+//
+// A type barrier is considered redundant if all missing types have been tested
+// for by earlier control instructions.
+//
+// A bounds check is considered redundant if it's dominated by another bounds
+// check with the same length and the indexes differ by only a constant amount.
+// In this case we eliminate the redundant bounds check and update the other one
+// to cover the ranges of both checks.
+//
+// Bounds checks are added to a hash map and since the hash function ignores
+// differences in constant offset, this offers a fast way to find redundant
+// checks.
+bool
+jit::EliminateRedundantChecks(MIRGraph& graph)
+{
+ BoundsCheckMap checks(graph.alloc());
+
+ if (!checks.init())
+ return false;
+
+ // Stack for pre-order CFG traversal.
+ Vector<MBasicBlock*, 1, JitAllocPolicy> worklist(graph.alloc());
+
+ // The index of the current block in the CFG traversal.
+ size_t index = 0;
+
+ // Add all self-dominating blocks to the worklist.
+ // This includes all roots. Order does not matter.
+ for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
+ MBasicBlock* block = *i;
+ if (block->immediateDominator() == block) {
+ if (!worklist.append(block))
+ return false;
+ }
+ }
+
+ MDefinitionVector eliminateList(graph.alloc());
+
+ // Starting from each self-dominating block, traverse the CFG in pre-order.
+ while (!worklist.empty()) {
+ MBasicBlock* block = worklist.popCopy();
+
+ // Add all immediate dominators to the front of the worklist.
+ if (!worklist.append(block->immediatelyDominatedBlocksBegin(),
+ block->immediatelyDominatedBlocksEnd())) {
+ return false;
+ }
+
+ for (MDefinitionIterator iter(block); iter; ) {
+ MDefinition* def = *iter++;
+
+ bool eliminated = false;
+
+ switch (def->op()) {
+ case MDefinition::Op_BoundsCheck:
+ if (!TryEliminateBoundsCheck(checks, index, def->toBoundsCheck(), &eliminated))
+ return false;
+ break;
+ case MDefinition::Op_TypeBarrier:
+ if (!TryEliminateTypeBarrier(def->toTypeBarrier(), &eliminated))
+ return false;
+ break;
+ case MDefinition::Op_LoadFixedSlot:
+ case MDefinition::Op_LoadSlot:
+ case MDefinition::Op_LoadUnboxedObjectOrNull:
+ if (!TryOptimizeLoadObjectOrNull(def, &eliminateList))
+ return false;
+ break;
+ default:
+ // Now that code motion passes have finished, replace
+ // instructions which pass through one of their operands
+ // (and perform additional checks) with that operand.
+ if (MDefinition* passthrough = PassthroughOperand(def))
+ def->replaceAllUsesWith(passthrough);
+ break;
+ }
+
+ if (eliminated)
+ block->discardDef(def);
+ }
+ index++;
+ }
+
+ MOZ_ASSERT(index == graph.numBlocks());
+
+ for (size_t i = 0; i < eliminateList.length(); i++) {
+ MDefinition* def = eliminateList[i];
+ def->block()->discardDef(def);
+ }
+
+ return true;
+}
+
+static bool
+NeedsKeepAlive(MInstruction* slotsOrElements, MInstruction* use)
+{
+ MOZ_ASSERT(slotsOrElements->type() == MIRType::Elements ||
+ slotsOrElements->type() == MIRType::Slots);
+
+ if (slotsOrElements->block() != use->block())
+ return true;
+
+ MBasicBlock* block = use->block();
+ MInstructionIterator iter(block->begin(slotsOrElements));
+ MOZ_ASSERT(*iter == slotsOrElements);
+ ++iter;
+
+ while (true) {
+ if (*iter == use)
+ return false;
+
+ switch (iter->op()) {
+ case MDefinition::Op_Nop:
+ case MDefinition::Op_Constant:
+ case MDefinition::Op_KeepAliveObject:
+ case MDefinition::Op_Unbox:
+ case MDefinition::Op_LoadSlot:
+ case MDefinition::Op_StoreSlot:
+ case MDefinition::Op_LoadFixedSlot:
+ case MDefinition::Op_StoreFixedSlot:
+ case MDefinition::Op_LoadElement:
+ case MDefinition::Op_StoreElement:
+ case MDefinition::Op_InitializedLength:
+ case MDefinition::Op_ArrayLength:
+ case MDefinition::Op_BoundsCheck:
+ iter++;
+ break;
+ default:
+ return true;
+ }
+ }
+
+ MOZ_CRASH("Unreachable");
+}
+
+bool
+jit::AddKeepAliveInstructions(MIRGraph& graph)
+{
+ for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
+ MBasicBlock* block = *i;
+
+ for (MInstructionIterator insIter(block->begin()); insIter != block->end(); insIter++) {
+ MInstruction* ins = *insIter;
+ if (ins->type() != MIRType::Elements && ins->type() != MIRType::Slots)
+ continue;
+
+ MDefinition* ownerObject;
+ switch (ins->op()) {
+ case MDefinition::Op_ConstantElements:
+ continue;
+ case MDefinition::Op_ConvertElementsToDoubles:
+ // EliminateRedundantChecks should have replaced all uses.
+ MOZ_ASSERT(!ins->hasUses());
+ continue;
+ case MDefinition::Op_Elements:
+ case MDefinition::Op_TypedArrayElements:
+ case MDefinition::Op_TypedObjectElements:
+ MOZ_ASSERT(ins->numOperands() == 1);
+ ownerObject = ins->getOperand(0);
+ break;
+ case MDefinition::Op_Slots:
+ ownerObject = ins->toSlots()->object();
+ break;
+ default:
+ MOZ_CRASH("Unexpected op");
+ }
+
+ MOZ_ASSERT(ownerObject->type() == MIRType::Object);
+
+ if (ownerObject->isConstant()) {
+ // Constants are kept alive by other pointers, for instance
+ // ImmGCPtr in JIT code.
+ continue;
+ }
+
+ for (MUseDefIterator uses(ins); uses; uses++) {
+ MInstruction* use = uses.def()->toInstruction();
+
+ if (use->isStoreElementHole()) {
+ // StoreElementHole has an explicit object operand. If GVN
+ // is disabled, we can get different unbox instructions with
+ // the same object as input, so we check for that case.
+ MOZ_ASSERT_IF(!use->toStoreElementHole()->object()->isUnbox() && !ownerObject->isUnbox(),
+ use->toStoreElementHole()->object() == ownerObject);
+ continue;
+ }
+
+ if (use->isFallibleStoreElement()) {
+ // See StoreElementHole case above.
+ MOZ_ASSERT_IF(!use->toFallibleStoreElement()->object()->isUnbox() && !ownerObject->isUnbox(),
+ use->toFallibleStoreElement()->object() == ownerObject);
+ continue;
+ }
+
+ if (use->isInArray()) {
+ // See StoreElementHole case above.
+ MOZ_ASSERT_IF(!use->toInArray()->object()->isUnbox() && !ownerObject->isUnbox(),
+ use->toInArray()->object() == ownerObject);
+ continue;
+ }
+
+ if (!NeedsKeepAlive(ins, use))
+ continue;
+
+ if (!graph.alloc().ensureBallast())
+ return false;
+ MKeepAliveObject* keepAlive = MKeepAliveObject::New(graph.alloc(), ownerObject);
+ use->block()->insertAfter(use, keepAlive);
+ }
+ }
+ }
+
+ return true;
+}
+
+bool
+LinearSum::multiply(int32_t scale)
+{
+ for (size_t i = 0; i < terms_.length(); i++) {
+ if (!SafeMul(scale, terms_[i].scale, &terms_[i].scale))
+ return false;
+ }
+ return SafeMul(scale, constant_, &constant_);
+}
+
+bool
+LinearSum::divide(uint32_t scale)
+{
+ MOZ_ASSERT(scale > 0);
+
+ for (size_t i = 0; i < terms_.length(); i++) {
+ if (terms_[i].scale % scale != 0)
+ return false;
+ }
+ if (constant_ % scale != 0)
+ return false;
+
+ for (size_t i = 0; i < terms_.length(); i++)
+ terms_[i].scale /= scale;
+ constant_ /= scale;
+
+ return true;
+}
+
+bool
+LinearSum::add(const LinearSum& other, int32_t scale /* = 1 */)
+{
+ for (size_t i = 0; i < other.terms_.length(); i++) {
+ int32_t newScale = scale;
+ if (!SafeMul(scale, other.terms_[i].scale, &newScale))
+ return false;
+ if (!add(other.terms_[i].term, newScale))
+ return false;
+ }
+ int32_t newConstant = scale;
+ if (!SafeMul(scale, other.constant_, &newConstant))
+ return false;
+ return add(newConstant);
+}
+
+bool
+LinearSum::add(SimpleLinearSum other, int32_t scale)
+{
+ if (other.term && !add(other.term, scale))
+ return false;
+
+ int32_t constant;
+ if (!SafeMul(other.constant, scale, &constant))
+ return false;
+
+ return add(constant);
+}
+
+bool
+LinearSum::add(MDefinition* term, int32_t scale)
+{
+ MOZ_ASSERT(term);
+
+ if (scale == 0)
+ return true;
+
+ if (MConstant* termConst = term->maybeConstantValue()) {
+ int32_t constant = termConst->toInt32();
+ if (!SafeMul(constant, scale, &constant))
+ return false;
+ return add(constant);
+ }
+
+ for (size_t i = 0; i < terms_.length(); i++) {
+ if (term == terms_[i].term) {
+ if (!SafeAdd(scale, terms_[i].scale, &terms_[i].scale))
+ return false;
+ if (terms_[i].scale == 0) {
+ terms_[i] = terms_.back();
+ terms_.popBack();
+ }
+ return true;
+ }
+ }
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!terms_.append(LinearTerm(term, scale)))
+ oomUnsafe.crash("LinearSum::add");
+
+ return true;
+}
+
+bool
+LinearSum::add(int32_t constant)
+{
+ return SafeAdd(constant, constant_, &constant_);
+}
+
+void
+LinearSum::dump(GenericPrinter& out) const
+{
+ for (size_t i = 0; i < terms_.length(); i++) {
+ int32_t scale = terms_[i].scale;
+ int32_t id = terms_[i].term->id();
+ MOZ_ASSERT(scale);
+ if (scale > 0) {
+ if (i)
+ out.printf("+");
+ if (scale == 1)
+ out.printf("#%d", id);
+ else
+ out.printf("%d*#%d", scale, id);
+ } else if (scale == -1) {
+ out.printf("-#%d", id);
+ } else {
+ out.printf("%d*#%d", scale, id);
+ }
+ }
+ if (constant_ > 0)
+ out.printf("+%d", constant_);
+ else if (constant_ < 0)
+ out.printf("%d", constant_);
+}
+
+void
+LinearSum::dump() const
+{
+ Fprinter out(stderr);
+ dump(out);
+ out.finish();
+}
+
+MDefinition*
+jit::ConvertLinearSum(TempAllocator& alloc, MBasicBlock* block, const LinearSum& sum, bool convertConstant)
+{
+ MDefinition* def = nullptr;
+
+ for (size_t i = 0; i < sum.numTerms(); i++) {
+ LinearTerm term = sum.term(i);
+ MOZ_ASSERT(!term.term->isConstant());
+ if (term.scale == 1) {
+ if (def) {
+ def = MAdd::New(alloc, def, term.term);
+ def->toAdd()->setInt32Specialization();
+ block->insertAtEnd(def->toInstruction());
+ def->computeRange(alloc);
+ } else {
+ def = term.term;
+ }
+ } else if (term.scale == -1) {
+ if (!def) {
+ def = MConstant::New(alloc, Int32Value(0));
+ block->insertAtEnd(def->toInstruction());
+ def->computeRange(alloc);
+ }
+ def = MSub::New(alloc, def, term.term);
+ def->toSub()->setInt32Specialization();
+ block->insertAtEnd(def->toInstruction());
+ def->computeRange(alloc);
+ } else {
+ MOZ_ASSERT(term.scale != 0);
+ MConstant* factor = MConstant::New(alloc, Int32Value(term.scale));
+ block->insertAtEnd(factor);
+ MMul* mul = MMul::New(alloc, term.term, factor);
+ mul->setInt32Specialization();
+ block->insertAtEnd(mul);
+ mul->computeRange(alloc);
+ if (def) {
+ def = MAdd::New(alloc, def, mul);
+ def->toAdd()->setInt32Specialization();
+ block->insertAtEnd(def->toInstruction());
+ def->computeRange(alloc);
+ } else {
+ def = mul;
+ }
+ }
+ }
+
+ if (convertConstant && sum.constant()) {
+ MConstant* constant = MConstant::New(alloc, Int32Value(sum.constant()));
+ block->insertAtEnd(constant);
+ constant->computeRange(alloc);
+ if (def) {
+ def = MAdd::New(alloc, def, constant);
+ def->toAdd()->setInt32Specialization();
+ block->insertAtEnd(def->toInstruction());
+ def->computeRange(alloc);
+ } else {
+ def = constant;
+ }
+ }
+
+ if (!def) {
+ def = MConstant::New(alloc, Int32Value(0));
+ block->insertAtEnd(def->toInstruction());
+ def->computeRange(alloc);
+ }
+
+ return def;
+}
+
+MCompare*
+jit::ConvertLinearInequality(TempAllocator& alloc, MBasicBlock* block, const LinearSum& sum)
+{
+ LinearSum lhs(sum);
+
+ // Look for a term with a -1 scale which we can use for the rhs.
+ MDefinition* rhsDef = nullptr;
+ for (size_t i = 0; i < lhs.numTerms(); i++) {
+ if (lhs.term(i).scale == -1) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ rhsDef = lhs.term(i).term;
+ if (!lhs.add(rhsDef, 1))
+ oomUnsafe.crash("ConvertLinearInequality");
+ break;
+ }
+ }
+
+ MDefinition* lhsDef = nullptr;
+ JSOp op = JSOP_GE;
+
+ do {
+ if (!lhs.numTerms()) {
+ lhsDef = MConstant::New(alloc, Int32Value(lhs.constant()));
+ block->insertAtEnd(lhsDef->toInstruction());
+ lhsDef->computeRange(alloc);
+ break;
+ }
+
+ lhsDef = ConvertLinearSum(alloc, block, lhs);
+ if (lhs.constant() == 0)
+ break;
+
+ if (lhs.constant() == -1) {
+ op = JSOP_GT;
+ break;
+ }
+
+ if (!rhsDef) {
+ int32_t constant = lhs.constant();
+ if (SafeMul(constant, -1, &constant)) {
+ rhsDef = MConstant::New(alloc, Int32Value(constant));
+ block->insertAtEnd(rhsDef->toInstruction());
+ rhsDef->computeRange(alloc);
+ break;
+ }
+ }
+
+ MDefinition* constant = MConstant::New(alloc, Int32Value(lhs.constant()));
+ block->insertAtEnd(constant->toInstruction());
+ constant->computeRange(alloc);
+ lhsDef = MAdd::New(alloc, lhsDef, constant);
+ lhsDef->toAdd()->setInt32Specialization();
+ block->insertAtEnd(lhsDef->toInstruction());
+ lhsDef->computeRange(alloc);
+ } while (false);
+
+ if (!rhsDef) {
+ rhsDef = MConstant::New(alloc, Int32Value(0));
+ block->insertAtEnd(rhsDef->toInstruction());
+ rhsDef->computeRange(alloc);
+ }
+
+ MCompare* compare = MCompare::New(alloc, lhsDef, rhsDef, op);
+ block->insertAtEnd(compare);
+ compare->setCompareType(MCompare::Compare_Int32);
+
+ return compare;
+}
+
+static bool
+AnalyzePoppedThis(JSContext* cx, ObjectGroup* group,
+ MDefinition* thisValue, MInstruction* ins, bool definitelyExecuted,
+ HandlePlainObject baseobj,
+ Vector<TypeNewScript::Initializer>* initializerList,
+ Vector<PropertyName*>* accessedProperties,
+ bool* phandled)
+{
+ // Determine the effect that a use of the |this| value when calling |new|
+ // on a script has on the properties definitely held by the new object.
+
+ if (ins->isCallSetProperty()) {
+ MCallSetProperty* setprop = ins->toCallSetProperty();
+
+ if (setprop->object() != thisValue)
+ return true;
+
+ if (setprop->name() == cx->names().prototype ||
+ setprop->name() == cx->names().proto ||
+ setprop->name() == cx->names().constructor)
+ {
+ return true;
+ }
+
+ // Ignore assignments to properties that were already written to.
+ if (baseobj->lookup(cx, NameToId(setprop->name()))) {
+ *phandled = true;
+ return true;
+ }
+
+ // Don't add definite properties for properties that were already
+ // read in the constructor.
+ for (size_t i = 0; i < accessedProperties->length(); i++) {
+ if ((*accessedProperties)[i] == setprop->name())
+ return true;
+ }
+
+ // Assignments to new properties must always execute.
+ if (!definitelyExecuted)
+ return true;
+
+ RootedId id(cx, NameToId(setprop->name()));
+ if (!AddClearDefiniteGetterSetterForPrototypeChain(cx, group, id)) {
+ // The prototype chain already contains a getter/setter for this
+ // property, or type information is too imprecise.
+ return true;
+ }
+
+ // Add the property to the object, being careful not to update type information.
+ DebugOnly<unsigned> slotSpan = baseobj->slotSpan();
+ MOZ_ASSERT(!baseobj->containsPure(id));
+ if (!baseobj->addDataProperty(cx, id, baseobj->slotSpan(), JSPROP_ENUMERATE))
+ return false;
+ MOZ_ASSERT(baseobj->slotSpan() != slotSpan);
+ MOZ_ASSERT(!baseobj->inDictionaryMode());
+
+ Vector<MResumePoint*> callerResumePoints(cx);
+ for (MResumePoint* rp = ins->block()->callerResumePoint();
+ rp;
+ rp = rp->block()->callerResumePoint())
+ {
+ if (!callerResumePoints.append(rp))
+ return false;
+ }
+
+ for (int i = callerResumePoints.length() - 1; i >= 0; i--) {
+ MResumePoint* rp = callerResumePoints[i];
+ JSScript* script = rp->block()->info().script();
+ TypeNewScript::Initializer entry(TypeNewScript::Initializer::SETPROP_FRAME,
+ script->pcToOffset(rp->pc()));
+ if (!initializerList->append(entry))
+ return false;
+ }
+
+ JSScript* script = ins->block()->info().script();
+ TypeNewScript::Initializer entry(TypeNewScript::Initializer::SETPROP,
+ script->pcToOffset(setprop->resumePoint()->pc()));
+ if (!initializerList->append(entry))
+ return false;
+
+ *phandled = true;
+ return true;
+ }
+
+ if (ins->isCallGetProperty()) {
+ MCallGetProperty* get = ins->toCallGetProperty();
+
+ /*
+ * Properties can be read from the 'this' object if the following hold:
+ *
+ * - The read is not on a getter along the prototype chain, which
+ * could cause 'this' to escape.
+ *
+ * - The accessed property is either already a definite property or
+ * is not later added as one. Since the definite properties are
+ * added to the object at the point of its creation, reading a
+ * definite property before it is assigned could incorrectly hit.
+ */
+ RootedId id(cx, NameToId(get->name()));
+ if (!baseobj->lookup(cx, id) && !accessedProperties->append(get->name()))
+ return false;
+
+ if (!AddClearDefiniteGetterSetterForPrototypeChain(cx, group, id)) {
+ // The |this| value can escape if any property reads it does go
+ // through a getter.
+ return true;
+ }
+
+ *phandled = true;
+ return true;
+ }
+
+ if (ins->isPostWriteBarrier()) {
+ *phandled = true;
+ return true;
+ }
+
+ return true;
+}
+
+static int
+CmpInstructions(const void* a, const void* b)
+{
+ return (*static_cast<MInstruction * const*>(a))->id() -
+ (*static_cast<MInstruction * const*>(b))->id();
+}
+
+bool
+jit::AnalyzeNewScriptDefiniteProperties(JSContext* cx, JSFunction* fun,
+ ObjectGroup* group, HandlePlainObject baseobj,
+ Vector<TypeNewScript::Initializer>* initializerList)
+{
+ MOZ_ASSERT(cx->zone()->types.activeAnalysis);
+
+ // When invoking 'new' on the specified script, try to find some properties
+ // which will definitely be added to the created object before it has a
+ // chance to escape and be accessed elsewhere.
+
+ RootedScript script(cx, fun->getOrCreateScript(cx));
+ if (!script)
+ return false;
+
+ if (!jit::IsIonEnabled(cx) || !jit::IsBaselineEnabled(cx) || !script->canBaselineCompile())
+ return true;
+
+ static const uint32_t MAX_SCRIPT_SIZE = 2000;
+ if (script->length() > MAX_SCRIPT_SIZE)
+ return true;
+
+ TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
+ TraceLoggerEvent event(logger, TraceLogger_AnnotateScripts, script);
+ AutoTraceLog logScript(logger, event);
+ AutoTraceLog logCompile(logger, TraceLogger_IonAnalysis);
+
+ Vector<PropertyName*> accessedProperties(cx);
+
+ LifoAlloc alloc(TempAllocator::PreferredLifoChunkSize);
+ TempAllocator temp(&alloc);
+ JitContext jctx(cx, &temp);
+
+ if (!jit::CanLikelyAllocateMoreExecutableMemory())
+ return true;
+
+ if (!cx->compartment()->ensureJitCompartmentExists(cx))
+ return false;
+
+ if (!script->hasBaselineScript()) {
+ MethodStatus status = BaselineCompile(cx, script);
+ if (status == Method_Error)
+ return false;
+ if (status != Method_Compiled)
+ return true;
+ }
+
+ TypeScript::SetThis(cx, script, TypeSet::ObjectType(group));
+
+ MIRGraph graph(&temp);
+ InlineScriptTree* inlineScriptTree = InlineScriptTree::New(&temp, nullptr, nullptr, script);
+ if (!inlineScriptTree)
+ return false;
+
+ CompileInfo info(script, fun,
+ /* osrPc = */ nullptr,
+ Analysis_DefiniteProperties,
+ script->needsArgsObj(),
+ inlineScriptTree);
+
+ const OptimizationInfo* optimizationInfo = IonOptimizations.get(OptimizationLevel::Normal);
+
+ CompilerConstraintList* constraints = NewCompilerConstraintList(temp);
+ if (!constraints) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ BaselineInspector inspector(script);
+ const JitCompileOptions options(cx);
+
+ IonBuilder builder(cx, CompileCompartment::get(cx->compartment()), options, &temp, &graph, constraints,
+ &inspector, &info, optimizationInfo, /* baselineFrame = */ nullptr);
+
+ if (!builder.build()) {
+ if (cx->isThrowingOverRecursed() ||
+ cx->isThrowingOutOfMemory() ||
+ builder.abortReason() == AbortReason_Alloc)
+ {
+ return false;
+ }
+ MOZ_ASSERT(!cx->isExceptionPending());
+ return true;
+ }
+
+ FinishDefinitePropertiesAnalysis(cx, constraints);
+
+ if (!SplitCriticalEdges(graph)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ RenumberBlocks(graph);
+
+ if (!BuildDominatorTree(graph)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ if (!EliminatePhis(&builder, graph, AggressiveObservability)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ MDefinition* thisValue = graph.entryBlock()->getSlot(info.thisSlot());
+
+ // Get a list of instructions using the |this| value in the order they
+ // appear in the graph.
+ Vector<MInstruction*> instructions(cx);
+
+ for (MUseDefIterator uses(thisValue); uses; uses++) {
+ MDefinition* use = uses.def();
+
+ // Don't track |this| through assignments to phis.
+ if (!use->isInstruction())
+ return true;
+
+ if (!instructions.append(use->toInstruction()))
+ return false;
+ }
+
+ // Sort the instructions to visit in increasing order.
+ qsort(instructions.begin(), instructions.length(),
+ sizeof(MInstruction*), CmpInstructions);
+
+ // Find all exit blocks in the graph.
+ Vector<MBasicBlock*> exitBlocks(cx);
+ for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
+ if (!block->numSuccessors() && !exitBlocks.append(*block))
+ return false;
+ }
+
+ // id of the last block which added a new property.
+ size_t lastAddedBlock = 0;
+
+ for (size_t i = 0; i < instructions.length(); i++) {
+ MInstruction* ins = instructions[i];
+
+ // Track whether the use of |this| is in unconditional code, i.e.
+ // the block dominates all graph exits.
+ bool definitelyExecuted = true;
+ for (size_t i = 0; i < exitBlocks.length(); i++) {
+ for (MBasicBlock* exit = exitBlocks[i];
+ exit != ins->block();
+ exit = exit->immediateDominator())
+ {
+ if (exit == exit->immediateDominator()) {
+ definitelyExecuted = false;
+ break;
+ }
+ }
+ }
+
+ // Also check to see if the instruction is inside a loop body. Even if
+ // an access will always execute in the script, if it executes multiple
+ // times then we can get confused when rolling back objects while
+ // clearing the new script information.
+ if (ins->block()->loopDepth() != 0)
+ definitelyExecuted = false;
+
+ bool handled = false;
+ size_t slotSpan = baseobj->slotSpan();
+ if (!AnalyzePoppedThis(cx, group, thisValue, ins, definitelyExecuted,
+ baseobj, initializerList, &accessedProperties, &handled))
+ {
+ return false;
+ }
+ if (!handled)
+ break;
+
+ if (slotSpan != baseobj->slotSpan()) {
+ MOZ_ASSERT(ins->block()->id() >= lastAddedBlock);
+ lastAddedBlock = ins->block()->id();
+ }
+ }
+
+ if (baseobj->slotSpan() != 0) {
+ // We found some definite properties, but their correctness is still
+ // contingent on the correct frames being inlined. Add constraints to
+ // invalidate the definite properties if additional functions could be
+ // called at the inline frame sites.
+ Vector<MBasicBlock*> exitBlocks(cx);
+ for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
+ // Inlining decisions made after the last new property was added to
+ // the object don't need to be frozen.
+ if (block->id() > lastAddedBlock)
+ break;
+ if (MResumePoint* rp = block->callerResumePoint()) {
+ if (block->numPredecessors() == 1 && block->getPredecessor(0) == rp->block()) {
+ JSScript* script = rp->block()->info().script();
+ if (!AddClearDefiniteFunctionUsesInScript(cx, group, script, block->info().script()))
+ return false;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+static bool
+ArgumentsUseCanBeLazy(JSContext* cx, JSScript* script, MInstruction* ins, size_t index,
+ bool* argumentsContentsObserved)
+{
+ // We can read the frame's arguments directly for f.apply(x, arguments).
+ if (ins->isCall()) {
+ if (*ins->toCall()->resumePoint()->pc() == JSOP_FUNAPPLY &&
+ ins->toCall()->numActualArgs() == 2 &&
+ index == MCall::IndexOfArgument(1))
+ {
+ *argumentsContentsObserved = true;
+ return true;
+ }
+ }
+
+ // arguments[i] can read fp->canonicalActualArg(i) directly.
+ if (ins->isCallGetElement() && index == 0) {
+ *argumentsContentsObserved = true;
+ return true;
+ }
+
+ // MGetArgumentsObjectArg needs to be considered as a use that allows laziness.
+ if (ins->isGetArgumentsObjectArg() && index == 0)
+ return true;
+
+ // arguments.length length can read fp->numActualArgs() directly.
+ // arguments.callee can read fp->callee() directly if the arguments object
+ // is mapped.
+ if (ins->isCallGetProperty() && index == 0 &&
+ (ins->toCallGetProperty()->name() == cx->names().length ||
+ (script->hasMappedArgsObj() && ins->toCallGetProperty()->name() == cx->names().callee)))
+ {
+ return true;
+ }
+
+ return false;
+}
+
+bool
+jit::AnalyzeArgumentsUsage(JSContext* cx, JSScript* scriptArg)
+{
+ RootedScript script(cx, scriptArg);
+ AutoEnterAnalysis enter(cx);
+
+ MOZ_ASSERT(!script->analyzedArgsUsage());
+
+ // Treat the script as needing an arguments object until we determine it
+ // does not need one. This both allows us to easily see where the arguments
+ // object can escape through assignments to the function's named arguments,
+ // and also simplifies handling of early returns.
+ script->setNeedsArgsObj(true);
+
+ // Always construct arguments objects when in debug mode, for generator
+ // scripts (generators can be suspended when speculation fails) or when
+ // direct eval is present.
+ //
+ // FIXME: Don't build arguments for ES6 generator expressions.
+ if (scriptArg->isDebuggee() || script->isGenerator() || script->bindingsAccessedDynamically())
+ return true;
+
+ if (!jit::IsIonEnabled(cx))
+ return true;
+
+ static const uint32_t MAX_SCRIPT_SIZE = 10000;
+ if (script->length() > MAX_SCRIPT_SIZE)
+ return true;
+
+ if (!script->ensureHasTypes(cx))
+ return false;
+
+ TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
+ TraceLoggerEvent event(logger, TraceLogger_AnnotateScripts, script);
+ AutoTraceLog logScript(logger, event);
+ AutoTraceLog logCompile(logger, TraceLogger_IonAnalysis);
+
+ LifoAlloc alloc(TempAllocator::PreferredLifoChunkSize);
+ TempAllocator temp(&alloc);
+ JitContext jctx(cx, &temp);
+
+ if (!jit::CanLikelyAllocateMoreExecutableMemory())
+ return true;
+
+ if (!cx->compartment()->ensureJitCompartmentExists(cx))
+ return false;
+
+ MIRGraph graph(&temp);
+ InlineScriptTree* inlineScriptTree = InlineScriptTree::New(&temp, nullptr, nullptr, script);
+ if (!inlineScriptTree) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ CompileInfo info(script, script->functionNonDelazifying(),
+ /* osrPc = */ nullptr,
+ Analysis_ArgumentsUsage,
+ /* needsArgsObj = */ true,
+ inlineScriptTree);
+
+ const OptimizationInfo* optimizationInfo = IonOptimizations.get(OptimizationLevel::Normal);
+
+ CompilerConstraintList* constraints = NewCompilerConstraintList(temp);
+ if (!constraints) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ BaselineInspector inspector(script);
+ const JitCompileOptions options(cx);
+
+ IonBuilder builder(nullptr, CompileCompartment::get(cx->compartment()), options, &temp, &graph, constraints,
+ &inspector, &info, optimizationInfo, /* baselineFrame = */ nullptr);
+
+ if (!builder.build()) {
+ if (cx->isThrowingOverRecursed() || builder.abortReason() == AbortReason_Alloc)
+ return false;
+ MOZ_ASSERT(!cx->isExceptionPending());
+ return true;
+ }
+
+ if (!SplitCriticalEdges(graph)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ RenumberBlocks(graph);
+
+ if (!BuildDominatorTree(graph)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ if (!EliminatePhis(&builder, graph, AggressiveObservability)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ MDefinition* argumentsValue = graph.entryBlock()->getSlot(info.argsObjSlot());
+
+ bool argumentsContentsObserved = false;
+
+ for (MUseDefIterator uses(argumentsValue); uses; uses++) {
+ MDefinition* use = uses.def();
+
+ // Don't track |arguments| through assignments to phis.
+ if (!use->isInstruction())
+ return true;
+
+ if (!ArgumentsUseCanBeLazy(cx, script, use->toInstruction(), use->indexOf(uses.use()),
+ &argumentsContentsObserved))
+ {
+ return true;
+ }
+ }
+
+ // If a script explicitly accesses the contents of 'arguments', and has
+ // formals which may be stored as part of a call object, don't use lazy
+ // arguments. The compiler can then assume that accesses through
+ // arguments[i] will be on unaliased variables.
+ if (script->funHasAnyAliasedFormal() && argumentsContentsObserved)
+ return true;
+
+ script->setNeedsArgsObj(false);
+ return true;
+}
+
+// Mark all the blocks that are in the loop with the given header.
+// Returns the number of blocks marked. Set *canOsr to true if the loop is
+// reachable from both the normal entry and the OSR entry.
+size_t
+jit::MarkLoopBlocks(MIRGraph& graph, MBasicBlock* header, bool* canOsr)
+{
+#ifdef DEBUG
+ for (ReversePostorderIterator i = graph.rpoBegin(), e = graph.rpoEnd(); i != e; ++i)
+ MOZ_ASSERT(!i->isMarked(), "Some blocks already marked");
+#endif
+
+ MBasicBlock* osrBlock = graph.osrBlock();
+ *canOsr = false;
+
+ // The blocks are in RPO; start at the loop backedge, which marks the bottom
+ // of the loop, and walk up until we get to the header. Loops may be
+ // discontiguous, so we trace predecessors to determine which blocks are
+ // actually part of the loop. The backedge is always part of the loop, and
+ // so are its predecessors, transitively, up to the loop header or an OSR
+ // entry.
+ MBasicBlock* backedge = header->backedge();
+ backedge->mark();
+ size_t numMarked = 1;
+ for (PostorderIterator i = graph.poBegin(backedge); ; ++i) {
+ MOZ_ASSERT(i != graph.poEnd(),
+ "Reached the end of the graph while searching for the loop header");
+ MBasicBlock* block = *i;
+ // If we've reached the loop header, we're done.
+ if (block == header)
+ break;
+ // A block not marked by the time we reach it is not in the loop.
+ if (!block->isMarked())
+ continue;
+ // This block is in the loop; trace to its predecessors.
+ for (size_t p = 0, e = block->numPredecessors(); p != e; ++p) {
+ MBasicBlock* pred = block->getPredecessor(p);
+ if (pred->isMarked())
+ continue;
+
+ // Blocks dominated by the OSR entry are not part of the loop
+ // (unless they aren't reachable from the normal entry).
+ if (osrBlock && pred != header &&
+ osrBlock->dominates(pred) && !osrBlock->dominates(header))
+ {
+ *canOsr = true;
+ continue;
+ }
+
+ MOZ_ASSERT(pred->id() >= header->id() && pred->id() <= backedge->id(),
+ "Loop block not between loop header and loop backedge");
+
+ pred->mark();
+ ++numMarked;
+
+ // A nested loop may not exit back to the enclosing loop at its
+ // bottom. If we just marked its header, then the whole nested loop
+ // is part of the enclosing loop.
+ if (pred->isLoopHeader()) {
+ MBasicBlock* innerBackedge = pred->backedge();
+ if (!innerBackedge->isMarked()) {
+ // Mark its backedge so that we add all of its blocks to the
+ // outer loop as we walk upwards.
+ innerBackedge->mark();
+ ++numMarked;
+
+ // If the nested loop is not contiguous, we may have already
+ // passed its backedge. If this happens, back up.
+ if (backedge->id() > block->id()) {
+ i = graph.poBegin(innerBackedge);
+ --i;
+ }
+ }
+ }
+ }
+ }
+
+ // If there's no path connecting the header to the backedge, then this isn't
+ // actually a loop. This can happen when the code starts with a loop but GVN
+ // folds some branches away.
+ if (!header->isMarked()) {
+ jit::UnmarkLoopBlocks(graph, header);
+ return 0;
+ }
+
+ return numMarked;
+}
+
+// Unmark all the blocks that are in the loop with the given header.
+void
+jit::UnmarkLoopBlocks(MIRGraph& graph, MBasicBlock* header)
+{
+ MBasicBlock* backedge = header->backedge();
+ for (ReversePostorderIterator i = graph.rpoBegin(header); ; ++i) {
+ MOZ_ASSERT(i != graph.rpoEnd(),
+ "Reached the end of the graph while searching for the backedge");
+ MBasicBlock* block = *i;
+ if (block->isMarked()) {
+ block->unmark();
+ if (block == backedge)
+ break;
+ }
+ }
+
+#ifdef DEBUG
+ for (ReversePostorderIterator i = graph.rpoBegin(), e = graph.rpoEnd(); i != e; ++i)
+ MOZ_ASSERT(!i->isMarked(), "Not all blocks got unmarked");
+#endif
+}
+
+// Reorder the blocks in the loop starting at the given header to be contiguous.
+static void
+MakeLoopContiguous(MIRGraph& graph, MBasicBlock* header, size_t numMarked)
+{
+ MBasicBlock* backedge = header->backedge();
+
+ MOZ_ASSERT(header->isMarked(), "Loop header is not part of loop");
+ MOZ_ASSERT(backedge->isMarked(), "Loop backedge is not part of loop");
+
+ // If there are any blocks between the loop header and the loop backedge
+ // that are not part of the loop, prepare to move them to the end. We keep
+ // them in order, which preserves RPO.
+ ReversePostorderIterator insertIter = graph.rpoBegin(backedge);
+ insertIter++;
+ MBasicBlock* insertPt = *insertIter;
+
+ // Visit all the blocks from the loop header to the loop backedge.
+ size_t headerId = header->id();
+ size_t inLoopId = headerId;
+ size_t notInLoopId = inLoopId + numMarked;
+ ReversePostorderIterator i = graph.rpoBegin(header);
+ for (;;) {
+ MBasicBlock* block = *i++;
+ MOZ_ASSERT(block->id() >= header->id() && block->id() <= backedge->id(),
+ "Loop backedge should be last block in loop");
+
+ if (block->isMarked()) {
+ // This block is in the loop.
+ block->unmark();
+ block->setId(inLoopId++);
+ // If we've reached the loop backedge, we're done!
+ if (block == backedge)
+ break;
+ } else {
+ // This block is not in the loop. Move it to the end.
+ graph.moveBlockBefore(insertPt, block);
+ block->setId(notInLoopId++);
+ }
+ }
+ MOZ_ASSERT(header->id() == headerId, "Loop header id changed");
+ MOZ_ASSERT(inLoopId == headerId + numMarked, "Wrong number of blocks kept in loop");
+ MOZ_ASSERT(notInLoopId == (insertIter != graph.rpoEnd() ? insertPt->id() : graph.numBlocks()),
+ "Wrong number of blocks moved out of loop");
+}
+
+// Reorder the blocks in the graph so that loops are contiguous.
+bool
+jit::MakeLoopsContiguous(MIRGraph& graph)
+{
+ // Visit all loop headers (in any order).
+ for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
+ MBasicBlock* header = *i;
+ if (!header->isLoopHeader())
+ continue;
+
+ // Mark all blocks that are actually part of the loop.
+ bool canOsr;
+ size_t numMarked = MarkLoopBlocks(graph, header, &canOsr);
+
+ // If the loop isn't a loop, don't try to optimize it.
+ if (numMarked == 0)
+ continue;
+
+ // If there's an OSR block entering the loop in the middle, it's tricky,
+ // so don't try to handle it, for now.
+ if (canOsr) {
+ UnmarkLoopBlocks(graph, header);
+ continue;
+ }
+
+ // Move all blocks between header and backedge that aren't marked to
+ // the end of the loop, making the loop itself contiguous.
+ MakeLoopContiguous(graph, header, numMarked);
+ }
+
+ return true;
+}
+
+MRootList::MRootList(TempAllocator& alloc)
+{
+#define INIT_VECTOR(name, _0, _1) \
+ roots_[JS::RootKind::name].emplace(alloc);
+JS_FOR_EACH_TRACEKIND(INIT_VECTOR)
+#undef INIT_VECTOR
+}
+
+template <typename T>
+static void
+TraceVector(JSTracer* trc, const MRootList::RootVector& vector, const char* name)
+{
+ for (auto ptr : vector) {
+ T ptrT = static_cast<T>(ptr);
+ TraceManuallyBarrieredEdge(trc, &ptrT, name);
+ MOZ_ASSERT(ptr == ptrT, "Shouldn't move without updating MIR pointers");
+ }
+}
+
+void
+MRootList::trace(JSTracer* trc)
+{
+#define TRACE_ROOTS(name, type, _) \
+ TraceVector<type*>(trc, *roots_[JS::RootKind::name], "mir-root-" #name);
+JS_FOR_EACH_TRACEKIND(TRACE_ROOTS)
+#undef TRACE_ROOTS
+}
+
+MOZ_MUST_USE bool
+jit::CreateMIRRootList(IonBuilder& builder)
+{
+ MOZ_ASSERT(!builder.info().isAnalysis());
+
+ TempAllocator& alloc = builder.alloc();
+ MIRGraph& graph = builder.graph();
+
+ MRootList* roots = new(alloc.fallible()) MRootList(alloc);
+ if (!roots)
+ return false;
+
+ JSScript* prevScript = nullptr;
+
+ for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
+
+ JSScript* script = block->info().script();
+ if (script != prevScript) {
+ if (!roots->append(script))
+ return false;
+ prevScript = script;
+ }
+
+ for (MInstructionIterator iter(block->begin()), end(block->end()); iter != end; iter++) {
+ if (!iter->appendRoots(*roots))
+ return false;
+ }
+ }
+
+ builder.setRootList(*roots);
+ return true;
+}
+
+static void
+DumpDefinition(GenericPrinter& out, MDefinition* def, size_t depth)
+{
+ MDefinition::PrintOpcodeName(out, def->op());
+
+ if (depth == 0)
+ return;
+
+ for (size_t i = 0; i < def->numOperands(); i++) {
+ out.printf(" (");
+ DumpDefinition(out, def->getOperand(i), depth - 1);
+ out.printf(")");
+ }
+}
+
+void
+jit::DumpMIRExpressions(MIRGraph& graph)
+{
+ if (!JitSpewEnabled(JitSpew_MIRExpressions))
+ return;
+
+ size_t depth = 2;
+
+ Fprinter& out = JitSpewPrinter();
+ for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
+ for (MInstructionIterator iter(block->begin()), end(block->end()); iter != end; iter++) {
+ DumpDefinition(out, *iter, depth);
+ out.printf("\n");
+ }
+ }
+}
diff --git a/js/src/jit/IonAnalysis.h b/js/src/jit/IonAnalysis.h
new file mode 100644
index 000000000..1ce8edc80
--- /dev/null
+++ b/js/src/jit/IonAnalysis.h
@@ -0,0 +1,218 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_IonAnalysis_h
+#define jit_IonAnalysis_h
+
+// This file declares various analysis passes that operate on MIR.
+
+#include "jit/JitAllocPolicy.h"
+#include "jit/MIR.h"
+
+namespace js {
+namespace jit {
+
+class MIRGenerator;
+class MIRGraph;
+
+MOZ_MUST_USE bool
+PruneUnusedBranches(MIRGenerator* mir, MIRGraph& graph);
+
+MOZ_MUST_USE bool
+FoldTests(MIRGraph& graph);
+
+MOZ_MUST_USE bool
+SplitCriticalEdges(MIRGraph& graph);
+
+bool
+IsUint32Type(const MDefinition* def);
+
+enum Observability {
+ ConservativeObservability,
+ AggressiveObservability
+};
+
+MOZ_MUST_USE bool
+EliminatePhis(MIRGenerator* mir, MIRGraph& graph, Observability observe);
+
+size_t
+MarkLoopBlocks(MIRGraph& graph, MBasicBlock* header, bool* canOsr);
+
+void
+UnmarkLoopBlocks(MIRGraph& graph, MBasicBlock* header);
+
+MOZ_MUST_USE bool
+MakeLoopsContiguous(MIRGraph& graph);
+
+MOZ_MUST_USE bool
+EliminateDeadResumePointOperands(MIRGenerator* mir, MIRGraph& graph);
+
+MOZ_MUST_USE bool
+EliminateDeadCode(MIRGenerator* mir, MIRGraph& graph);
+
+MOZ_MUST_USE bool
+ApplyTypeInformation(MIRGenerator* mir, MIRGraph& graph);
+
+MOZ_MUST_USE bool
+MakeMRegExpHoistable(MIRGenerator* mir, MIRGraph& graph);
+
+void
+RenumberBlocks(MIRGraph& graph);
+
+MOZ_MUST_USE bool
+AccountForCFGChanges(MIRGenerator* mir, MIRGraph& graph, bool updateAliasAnalysis,
+ bool underValueNumberer = false);
+
+MOZ_MUST_USE bool
+RemoveUnmarkedBlocks(MIRGenerator* mir, MIRGraph& graph, uint32_t numMarkedBlocks);
+
+MOZ_MUST_USE bool
+CreateMIRRootList(IonBuilder& builder);
+
+void
+ClearDominatorTree(MIRGraph& graph);
+
+MOZ_MUST_USE bool
+BuildDominatorTree(MIRGraph& graph);
+
+MOZ_MUST_USE bool
+BuildPhiReverseMapping(MIRGraph& graph);
+
+void
+AssertBasicGraphCoherency(MIRGraph& graph);
+
+void
+AssertGraphCoherency(MIRGraph& graph);
+
+void
+AssertExtendedGraphCoherency(MIRGraph& graph, bool underValueNumberer = false);
+
+MOZ_MUST_USE bool
+EliminateRedundantChecks(MIRGraph& graph);
+
+MOZ_MUST_USE bool
+AddKeepAliveInstructions(MIRGraph& graph);
+
+class MDefinition;
+
+// Simple linear sum of the form 'n' or 'x + n'.
+struct SimpleLinearSum
+{
+ MDefinition* term;
+ int32_t constant;
+
+ SimpleLinearSum(MDefinition* term, int32_t constant)
+ : term(term), constant(constant)
+ {}
+};
+
+// Math done in a Linear sum can either be in a modulo space, in which case
+// overflow are wrapped around, or they can be computed in the integer-space in
+// which case we have to check that no overflow can happen when summing
+// constants.
+//
+// When the caller ignores which space it is, the definition would be used to
+// deduce it.
+enum class MathSpace {
+ Modulo,
+ Infinite,
+ Unknown
+};
+
+SimpleLinearSum
+ExtractLinearSum(MDefinition* ins, MathSpace space = MathSpace::Unknown);
+
+MOZ_MUST_USE bool
+ExtractLinearInequality(MTest* test, BranchDirection direction,
+ SimpleLinearSum* plhs, MDefinition** prhs, bool* plessEqual);
+
+struct LinearTerm
+{
+ MDefinition* term;
+ int32_t scale;
+
+ LinearTerm(MDefinition* term, int32_t scale)
+ : term(term), scale(scale)
+ {
+ }
+};
+
+// General linear sum of the form 'x1*n1 + x2*n2 + ... + n'
+class LinearSum
+{
+ public:
+ explicit LinearSum(TempAllocator& alloc)
+ : terms_(alloc),
+ constant_(0)
+ {
+ }
+
+ LinearSum(const LinearSum& other)
+ : terms_(other.terms_.allocPolicy()),
+ constant_(other.constant_)
+ {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!terms_.appendAll(other.terms_))
+ oomUnsafe.crash("LinearSum::LinearSum");
+ }
+
+ // These return false on an integer overflow, and afterwards the sum must
+ // not be used.
+ MOZ_MUST_USE bool multiply(int32_t scale);
+ MOZ_MUST_USE bool add(const LinearSum& other, int32_t scale = 1);
+ MOZ_MUST_USE bool add(SimpleLinearSum other, int32_t scale = 1);
+ MOZ_MUST_USE bool add(MDefinition* term, int32_t scale);
+ MOZ_MUST_USE bool add(int32_t constant);
+
+ // Unlike the above function, on failure this leaves the sum unchanged and
+ // it can still be used.
+ MOZ_MUST_USE bool divide(uint32_t scale);
+
+ int32_t constant() const { return constant_; }
+ size_t numTerms() const { return terms_.length(); }
+ LinearTerm term(size_t i) const { return terms_[i]; }
+ void replaceTerm(size_t i, MDefinition* def) { terms_[i].term = def; }
+
+ void dump(GenericPrinter& out) const;
+ void dump() const;
+
+ private:
+ Vector<LinearTerm, 2, JitAllocPolicy> terms_;
+ int32_t constant_;
+};
+
+// Convert all components of a linear sum (except, optionally, the constant)
+// and add any new instructions to the end of block.
+MDefinition*
+ConvertLinearSum(TempAllocator& alloc, MBasicBlock* block, const LinearSum& sum,
+ bool convertConstant = false);
+
+// Convert the test 'sum >= 0' to a comparison, adding any necessary
+// instructions to the end of block.
+MCompare*
+ConvertLinearInequality(TempAllocator& alloc, MBasicBlock* block, const LinearSum& sum);
+
+MOZ_MUST_USE bool
+AnalyzeNewScriptDefiniteProperties(JSContext* cx, JSFunction* fun,
+ ObjectGroup* group, HandlePlainObject baseobj,
+ Vector<TypeNewScript::Initializer>* initializerList);
+
+MOZ_MUST_USE bool
+AnalyzeArgumentsUsage(JSContext* cx, JSScript* script);
+
+bool
+DeadIfUnused(const MDefinition* def);
+
+bool
+IsDiscardable(const MDefinition* def);
+
+void
+DumpMIRExpressions(MIRGraph& graph);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_IonAnalysis_h */
diff --git a/js/src/jit/IonBuilder.cpp b/js/src/jit/IonBuilder.cpp
new file mode 100644
index 000000000..1488d7d34
--- /dev/null
+++ b/js/src/jit/IonBuilder.cpp
@@ -0,0 +1,14696 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/IonBuilder.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/SizePrintfMacros.h"
+
+#include "builtin/Eval.h"
+#include "builtin/TypedObject.h"
+#include "frontend/SourceNotes.h"
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineInspector.h"
+#include "jit/Ion.h"
+#include "jit/IonOptimizationLevels.h"
+#include "jit/JitSpewer.h"
+#include "jit/Lowering.h"
+#include "jit/MIRGraph.h"
+#include "vm/ArgumentsObject.h"
+#include "vm/Opcodes.h"
+#include "vm/RegExpStatics.h"
+#include "vm/TraceLogging.h"
+
+#include "jsopcodeinlines.h"
+#include "jsscriptinlines.h"
+
+#include "jit/CompileInfo-inl.h"
+#include "jit/shared/Lowering-shared-inl.h"
+#include "vm/EnvironmentObject-inl.h"
+#include "vm/NativeObject-inl.h"
+#include "vm/ObjectGroup-inl.h"
+#include "vm/UnboxedObject-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::AssertedCast;
+using mozilla::DebugOnly;
+using mozilla::Maybe;
+
+using JS::TrackedStrategy;
+using JS::TrackedOutcome;
+using JS::TrackedTypeSite;
+
+class jit::BaselineFrameInspector
+{
+ public:
+ TypeSet::Type thisType;
+ JSObject* singletonEnvChain;
+
+ Vector<TypeSet::Type, 4, JitAllocPolicy> argTypes;
+ Vector<TypeSet::Type, 4, JitAllocPolicy> varTypes;
+
+ explicit BaselineFrameInspector(TempAllocator* temp)
+ : thisType(TypeSet::UndefinedType()),
+ singletonEnvChain(nullptr),
+ argTypes(*temp),
+ varTypes(*temp)
+ {}
+};
+
+BaselineFrameInspector*
+jit::NewBaselineFrameInspector(TempAllocator* temp, BaselineFrame* frame, CompileInfo* info)
+{
+ MOZ_ASSERT(frame);
+
+ BaselineFrameInspector* inspector = temp->lifoAlloc()->new_<BaselineFrameInspector>(temp);
+ if (!inspector)
+ return nullptr;
+
+ // Note: copying the actual values into a temporary structure for use
+ // during compilation could capture nursery pointers, so the values' types
+ // are recorded instead.
+
+ if (frame->isFunctionFrame())
+ inspector->thisType = TypeSet::GetMaybeUntrackedValueType(frame->thisArgument());
+
+ if (frame->environmentChain()->isSingleton())
+ inspector->singletonEnvChain = frame->environmentChain();
+
+ JSScript* script = frame->script();
+
+ if (script->functionNonDelazifying()) {
+ if (!inspector->argTypes.reserve(frame->numFormalArgs()))
+ return nullptr;
+ for (size_t i = 0; i < frame->numFormalArgs(); i++) {
+ if (script->formalIsAliased(i)) {
+ inspector->argTypes.infallibleAppend(TypeSet::UndefinedType());
+ } else if (!script->argsObjAliasesFormals()) {
+ TypeSet::Type type =
+ TypeSet::GetMaybeUntrackedValueType(frame->unaliasedFormal(i));
+ inspector->argTypes.infallibleAppend(type);
+ } else if (frame->hasArgsObj()) {
+ TypeSet::Type type =
+ TypeSet::GetMaybeUntrackedValueType(frame->argsObj().arg(i));
+ inspector->argTypes.infallibleAppend(type);
+ } else {
+ inspector->argTypes.infallibleAppend(TypeSet::UndefinedType());
+ }
+ }
+ }
+
+ if (!inspector->varTypes.reserve(frame->numValueSlots()))
+ return nullptr;
+ for (size_t i = 0; i < frame->numValueSlots(); i++) {
+ TypeSet::Type type = TypeSet::GetMaybeUntrackedValueType(*frame->valueSlot(i));
+ inspector->varTypes.infallibleAppend(type);
+ }
+
+ return inspector;
+}
+
+IonBuilder::IonBuilder(JSContext* analysisContext, CompileCompartment* comp,
+ const JitCompileOptions& options, TempAllocator* temp,
+ MIRGraph* graph, CompilerConstraintList* constraints,
+ BaselineInspector* inspector, CompileInfo* info,
+ const OptimizationInfo* optimizationInfo,
+ BaselineFrameInspector* baselineFrame, size_t inliningDepth,
+ uint32_t loopDepth)
+ : MIRGenerator(comp, options, temp, graph, info, optimizationInfo),
+ backgroundCodegen_(nullptr),
+ actionableAbortScript_(nullptr),
+ actionableAbortPc_(nullptr),
+ actionableAbortMessage_(nullptr),
+ rootList_(nullptr),
+ analysisContext(analysisContext),
+ baselineFrame_(baselineFrame),
+ constraints_(constraints),
+ analysis_(*temp, info->script()),
+ thisTypes(nullptr),
+ argTypes(nullptr),
+ typeArray(nullptr),
+ typeArrayHint(0),
+ bytecodeTypeMap(nullptr),
+ loopDepth_(loopDepth),
+ trackedOptimizationSites_(*temp),
+ lexicalCheck_(nullptr),
+ callerResumePoint_(nullptr),
+ callerBuilder_(nullptr),
+ cfgStack_(*temp),
+ loops_(*temp),
+ switches_(*temp),
+ labels_(*temp),
+ iterators_(*temp),
+ loopHeaders_(*temp),
+ inspector(inspector),
+ inliningDepth_(inliningDepth),
+ inlinedBytecodeLength_(0),
+ numLoopRestarts_(0),
+ failedBoundsCheck_(info->script()->failedBoundsCheck()),
+ failedShapeGuard_(info->script()->failedShapeGuard()),
+ failedLexicalCheck_(info->script()->failedLexicalCheck()),
+ nonStringIteration_(false),
+ lazyArguments_(nullptr),
+ inlineCallInfo_(nullptr),
+ maybeFallbackFunctionGetter_(nullptr)
+{
+ script_ = info->script();
+ scriptHasIonScript_ = script_->hasIonScript();
+ pc = info->startPC();
+ abortReason_ = AbortReason_Disable;
+
+ MOZ_ASSERT(script()->hasBaselineScript() == (info->analysisMode() != Analysis_ArgumentsUsage));
+ MOZ_ASSERT(!!analysisContext == (info->analysisMode() == Analysis_DefiniteProperties));
+ MOZ_ASSERT(script_->nTypeSets() < UINT16_MAX);
+
+ if (!info->isAnalysis())
+ script()->baselineScript()->setIonCompiledOrInlined();
+}
+
+void
+IonBuilder::clearForBackEnd()
+{
+ MOZ_ASSERT(!analysisContext);
+ baselineFrame_ = nullptr;
+
+ // The caches below allocate data from the malloc heap. Release this before
+ // later phases of compilation to avoid leaks, as the top level IonBuilder
+ // is not explicitly destroyed. Note that builders for inner scripts are
+ // constructed on the stack and will release this memory on destruction.
+ gsn.purge();
+ envCoordinateNameCache.purge();
+}
+
+bool
+IonBuilder::abort(const char* message, ...)
+{
+ // Don't call PCToLineNumber in release builds.
+#ifdef JS_JITSPEW
+ va_list ap;
+ va_start(ap, message);
+ abortFmt(message, ap);
+ va_end(ap);
+# ifdef DEBUG
+ JitSpew(JitSpew_IonAbort, "aborted @ %s:%d", script()->filename(), PCToLineNumber(script(), pc));
+# else
+ JitSpew(JitSpew_IonAbort, "aborted @ %s", script()->filename());
+# endif
+#endif
+ trackActionableAbort(message);
+ return false;
+}
+
+IonBuilder*
+IonBuilder::outermostBuilder()
+{
+ IonBuilder* builder = this;
+ while (builder->callerBuilder_)
+ builder = builder->callerBuilder_;
+ return builder;
+}
+
+void
+IonBuilder::trackActionableAbort(const char* message)
+{
+ if (!isOptimizationTrackingEnabled())
+ return;
+
+ IonBuilder* topBuilder = outermostBuilder();
+ if (topBuilder->hadActionableAbort())
+ return;
+
+ topBuilder->actionableAbortScript_ = script();
+ topBuilder->actionableAbortPc_ = pc;
+ topBuilder->actionableAbortMessage_ = message;
+}
+
+void
+IonBuilder::spew(const char* message)
+{
+ // Don't call PCToLineNumber in release builds.
+#ifdef DEBUG
+ JitSpew(JitSpew_IonMIR, "%s @ %s:%d", message, script()->filename(), PCToLineNumber(script(), pc));
+#endif
+}
+
+static inline int32_t
+GetJumpOffset(jsbytecode* pc)
+{
+ MOZ_ASSERT(CodeSpec[JSOp(*pc)].type() == JOF_JUMP);
+ return GET_JUMP_OFFSET(pc);
+}
+
+IonBuilder::CFGState
+IonBuilder::CFGState::If(jsbytecode* join, MTest* test)
+{
+ CFGState state;
+ state.state = IF_TRUE;
+ state.stopAt = join;
+ state.branch.ifFalse = test->ifFalse();
+ state.branch.test = test;
+ return state;
+}
+
+IonBuilder::CFGState
+IonBuilder::CFGState::IfElse(jsbytecode* trueEnd, jsbytecode* falseEnd, MTest* test)
+{
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ CFGState state;
+ // If the end of the false path is the same as the start of the
+ // false path, then the "else" block is empty and we can devolve
+ // this to the IF_TRUE case. We handle this here because there is
+ // still an extra GOTO on the true path and we want stopAt to point
+ // there, whereas the IF_TRUE case does not have the GOTO.
+ state.state = (falseEnd == ifFalse->pc())
+ ? IF_TRUE_EMPTY_ELSE
+ : IF_ELSE_TRUE;
+ state.stopAt = trueEnd;
+ state.branch.falseEnd = falseEnd;
+ state.branch.ifFalse = ifFalse;
+ state.branch.test = test;
+ return state;
+}
+
+IonBuilder::CFGState
+IonBuilder::CFGState::AndOr(jsbytecode* join, MBasicBlock* lhs)
+{
+ CFGState state;
+ state.state = AND_OR;
+ state.stopAt = join;
+ state.branch.ifFalse = lhs;
+ state.branch.test = nullptr;
+ return state;
+}
+
+IonBuilder::CFGState
+IonBuilder::CFGState::TableSwitch(jsbytecode* exitpc, MTableSwitch* ins)
+{
+ CFGState state;
+ state.state = TABLE_SWITCH;
+ state.stopAt = exitpc;
+ state.tableswitch.exitpc = exitpc;
+ state.tableswitch.breaks = nullptr;
+ state.tableswitch.ins = ins;
+ state.tableswitch.currentBlock = 0;
+ return state;
+}
+
+JSFunction*
+IonBuilder::getSingleCallTarget(TemporaryTypeSet* calleeTypes)
+{
+ if (!calleeTypes)
+ return nullptr;
+
+ JSObject* obj = calleeTypes->maybeSingleton();
+ if (!obj || !obj->is<JSFunction>())
+ return nullptr;
+
+ return &obj->as<JSFunction>();
+}
+
+bool
+IonBuilder::getPolyCallTargets(TemporaryTypeSet* calleeTypes, bool constructing,
+ ObjectVector& targets, uint32_t maxTargets)
+{
+ MOZ_ASSERT(targets.empty());
+
+ if (!calleeTypes)
+ return true;
+
+ if (calleeTypes->baseFlags() != 0)
+ return true;
+
+ unsigned objCount = calleeTypes->getObjectCount();
+
+ if (objCount == 0 || objCount > maxTargets)
+ return true;
+
+ if (!targets.reserve(objCount))
+ return false;
+ for (unsigned i = 0; i < objCount; i++) {
+ JSObject* obj = calleeTypes->getSingleton(i);
+ if (obj) {
+ MOZ_ASSERT(obj->isSingleton());
+ } else {
+ ObjectGroup* group = calleeTypes->getGroup(i);
+ if (!group)
+ continue;
+
+ obj = group->maybeInterpretedFunction();
+ if (!obj) {
+ targets.clear();
+ return true;
+ }
+
+ MOZ_ASSERT(!obj->isSingleton());
+ }
+
+ // Don't optimize if the callee is not callable or constructable per
+ // the manner it is being invoked, so that CallKnown does not have to
+ // handle these cases (they will always throw).
+ if (constructing ? !obj->isConstructor() : !obj->isCallable()) {
+ targets.clear();
+ return true;
+ }
+
+ targets.infallibleAppend(obj);
+ }
+
+ return true;
+}
+
+IonBuilder::InliningDecision
+IonBuilder::DontInline(JSScript* targetScript, const char* reason)
+{
+ if (targetScript) {
+ JitSpew(JitSpew_Inlining, "Cannot inline %s:%" PRIuSIZE ": %s",
+ targetScript->filename(), targetScript->lineno(), reason);
+ } else {
+ JitSpew(JitSpew_Inlining, "Cannot inline: %s", reason);
+ }
+
+ return InliningDecision_DontInline;
+}
+
+/*
+ * |hasCommonInliningPath| determines whether the current inlining path has been
+ * seen before based on the sequence of scripts in the chain of |IonBuilder|s.
+ *
+ * An inlining path for a function |f| is the sequence of functions whose
+ * inlinings precede |f| up to any previous occurrences of |f|.
+ * So, if we have the chain of inlinings
+ *
+ * f1 -> f2 -> f -> f3 -> f4 -> f5 -> f
+ * -------- --------------
+ *
+ * the inlining paths for |f| are [f2, f1] and [f5, f4, f3].
+ * When attempting to inline |f|, we find all existing inlining paths for |f|
+ * and check whether they share a common prefix with the path created were |f|
+ * inlined.
+ *
+ * For example, given mutually recursive functions |f| and |g|, a possible
+ * inlining is
+ *
+ * +---- Inlining stopped here...
+ * |
+ * v
+ * a -> f -> g -> f \ -> g -> f -> g -> ...
+ *
+ * where the vertical bar denotes the termination of inlining.
+ * Inlining is terminated because we have already observed the inlining path
+ * [f] when inlining function |g|. Note that this will inline recursive
+ * functions such as |fib| only one level, as |fib| has a zero length inlining
+ * path which trivially prefixes all inlining paths.
+ *
+ */
+bool
+IonBuilder::hasCommonInliningPath(const JSScript* scriptToInline)
+{
+ // Find all previous inlinings of the |scriptToInline| and check for common
+ // inlining paths with the top of the inlining stack.
+ for (IonBuilder* it = this->callerBuilder_; it; it = it->callerBuilder_) {
+ if (it->script() != scriptToInline)
+ continue;
+
+ // This only needs to check the top of each stack for a match,
+ // as a match of length one ensures a common prefix.
+ IonBuilder* path = it->callerBuilder_;
+ if (!path || this->script() == path->script())
+ return true;
+ }
+
+ return false;
+}
+
+IonBuilder::InliningDecision
+IonBuilder::canInlineTarget(JSFunction* target, CallInfo& callInfo)
+{
+ if (!optimizationInfo().inlineInterpreted()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineGeneric);
+ return InliningDecision_DontInline;
+ }
+
+ if (TraceLogTextIdEnabled(TraceLogger_InlinedScripts)) {
+ return DontInline(nullptr, "Tracelogging of inlined scripts is enabled"
+ "but Tracelogger cannot do that yet.");
+ }
+
+ if (!target->isInterpreted()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNotInterpreted);
+ return DontInline(nullptr, "Non-interpreted target");
+ }
+
+ if (info().analysisMode() != Analysis_DefiniteProperties) {
+ // If |this| or an argument has an empty resultTypeSet, don't bother
+ // inlining, as the call is currently unreachable due to incomplete type
+ // information. This does not apply to the definite properties analysis,
+ // in that case we want to inline anyway.
+
+ if (callInfo.thisArg()->emptyResultTypeSet()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineUnreachable);
+ return DontInline(nullptr, "Empty TypeSet for |this|");
+ }
+
+ for (size_t i = 0; i < callInfo.argc(); i++) {
+ if (callInfo.getArg(i)->emptyResultTypeSet()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineUnreachable);
+ return DontInline(nullptr, "Empty TypeSet for argument");
+ }
+ }
+ }
+
+ // Allow constructing lazy scripts when performing the definite properties
+ // analysis, as baseline has not been used to warm the caller up yet.
+ if (target->isInterpreted() && info().analysisMode() == Analysis_DefiniteProperties) {
+ RootedScript script(analysisContext, target->getOrCreateScript(analysisContext));
+ if (!script)
+ return InliningDecision_Error;
+
+ if (!script->hasBaselineScript() && script->canBaselineCompile()) {
+ MethodStatus status = BaselineCompile(analysisContext, script);
+ if (status == Method_Error)
+ return InliningDecision_Error;
+ if (status != Method_Compiled) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNoBaseline);
+ return InliningDecision_DontInline;
+ }
+ }
+ }
+
+ if (!target->hasScript()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineLazy);
+ return DontInline(nullptr, "Lazy script");
+ }
+
+ JSScript* inlineScript = target->nonLazyScript();
+ if (callInfo.constructing() && !target->isConstructor()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNotConstructor);
+ return DontInline(inlineScript, "Callee is not a constructor");
+ }
+
+ if (!callInfo.constructing() && target->isClassConstructor()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineClassConstructor);
+ return DontInline(inlineScript, "Not constructing class constructor");
+ }
+
+ AnalysisMode analysisMode = info().analysisMode();
+ if (!CanIonCompile(inlineScript, analysisMode)) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineDisabledIon);
+ return DontInline(inlineScript, "Disabled Ion compilation");
+ }
+
+ // Don't inline functions which don't have baseline scripts.
+ if (!inlineScript->hasBaselineScript()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNoBaseline);
+ return DontInline(inlineScript, "No baseline jitcode");
+ }
+
+ if (TooManyFormalArguments(target->nargs())) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineTooManyArgs);
+ return DontInline(inlineScript, "Too many args");
+ }
+
+ // We check the number of actual arguments against the maximum number of
+ // formal arguments as we do not want to encode all actual arguments in the
+ // callerResumePoint.
+ if (TooManyFormalArguments(callInfo.argc())) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineTooManyArgs);
+ return DontInline(inlineScript, "Too many actual args");
+ }
+
+ if (hasCommonInliningPath(inlineScript)) {
+ trackOptimizationOutcome(TrackedOutcome::HasCommonInliningPath);
+ return DontInline(inlineScript, "Common inlining path");
+ }
+
+ if (inlineScript->uninlineable()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineGeneric);
+ return DontInline(inlineScript, "Uninlineable script");
+ }
+
+ if (inlineScript->needsArgsObj()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNeedsArgsObj);
+ return DontInline(inlineScript, "Script that needs an arguments object");
+ }
+
+ if (inlineScript->isDebuggee()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineDebuggee);
+ return DontInline(inlineScript, "Script is debuggee");
+ }
+
+ TypeSet::ObjectKey* targetKey = TypeSet::ObjectKey::get(target);
+ if (targetKey->unknownProperties()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineUnknownProps);
+ return DontInline(inlineScript, "Target type has unknown properties");
+ }
+
+ return InliningDecision_Inline;
+}
+
+void
+IonBuilder::popCfgStack()
+{
+ if (cfgStack_.back().isLoop())
+ loops_.popBack();
+ if (cfgStack_.back().state == CFGState::LABEL)
+ labels_.popBack();
+ cfgStack_.popBack();
+}
+
+bool
+IonBuilder::analyzeNewLoopTypes(MBasicBlock* entry, jsbytecode* start, jsbytecode* end)
+{
+ // The phi inputs at the loop head only reflect types for variables that
+ // were present at the start of the loop. If the variable changes to a new
+ // type within the loop body, and that type is carried around to the loop
+ // head, then we need to know about the new type up front.
+ //
+ // Since SSA information hasn't been constructed for the loop body yet, we
+ // need a separate analysis to pick out the types that might flow around
+ // the loop header. This is a best-effort analysis that may either over-
+ // or under-approximate the set of such types.
+ //
+ // Over-approximating the types may lead to inefficient generated code, and
+ // under-approximating the types will cause the loop body to be analyzed
+ // multiple times as the correct types are deduced (see finishLoop).
+
+ // If we restarted processing of an outer loop then get loop header types
+ // directly from the last time we have previously processed this loop. This
+ // both avoids repeated work from the bytecode traverse below, and will
+ // also pick up types discovered while previously building the loop body.
+ for (size_t i = 0; i < loopHeaders_.length(); i++) {
+ if (loopHeaders_[i].pc == start) {
+ MBasicBlock* oldEntry = loopHeaders_[i].header;
+
+ // If this block has been discarded, its resume points will have
+ // already discarded their operands.
+ if (!oldEntry->isDead()) {
+ MResumePoint* oldEntryRp = oldEntry->entryResumePoint();
+ size_t stackDepth = oldEntryRp->stackDepth();
+ for (size_t slot = 0; slot < stackDepth; slot++) {
+ MDefinition* oldDef = oldEntryRp->getOperand(slot);
+ if (!oldDef->isPhi()) {
+ MOZ_ASSERT(oldDef->block()->id() < oldEntry->id());
+ MOZ_ASSERT(oldDef == entry->getSlot(slot));
+ continue;
+ }
+ MPhi* oldPhi = oldDef->toPhi();
+ MPhi* newPhi = entry->getSlot(slot)->toPhi();
+ if (!newPhi->addBackedgeType(alloc(), oldPhi->type(), oldPhi->resultTypeSet()))
+ return false;
+ }
+ }
+
+ // Update the most recent header for this loop encountered, in case
+ // new types flow to the phis and the loop is processed at least
+ // three times.
+ loopHeaders_[i].header = entry;
+ return true;
+ }
+ }
+ if (!loopHeaders_.append(LoopHeader(start, entry)))
+ return false;
+
+ jsbytecode* last = nullptr;
+ jsbytecode* earlier = nullptr;
+ for (jsbytecode* pc = start; pc != end; earlier = last, last = pc, pc += GetBytecodeLength(pc)) {
+ uint32_t slot;
+ if (*pc == JSOP_SETLOCAL)
+ slot = info().localSlot(GET_LOCALNO(pc));
+ else if (*pc == JSOP_SETARG)
+ slot = info().argSlotUnchecked(GET_ARGNO(pc));
+ else
+ continue;
+ if (slot >= info().firstStackSlot())
+ continue;
+ if (!analysis().maybeInfo(pc))
+ continue;
+ if (!last)
+ continue;
+
+ MPhi* phi = entry->getSlot(slot)->toPhi();
+
+ if (*last == JSOP_POS)
+ last = earlier;
+
+ if (CodeSpec[*last].format & JOF_TYPESET) {
+ TemporaryTypeSet* typeSet = bytecodeTypes(last);
+ if (!typeSet->empty()) {
+ MIRType type = typeSet->getKnownMIRType();
+ if (!phi->addBackedgeType(alloc(), type, typeSet))
+ return false;
+ }
+ } else if (*last == JSOP_GETLOCAL || *last == JSOP_GETARG) {
+ uint32_t slot = (*last == JSOP_GETLOCAL)
+ ? info().localSlot(GET_LOCALNO(last))
+ : info().argSlotUnchecked(GET_ARGNO(last));
+ if (slot < info().firstStackSlot()) {
+ MPhi* otherPhi = entry->getSlot(slot)->toPhi();
+ if (otherPhi->hasBackedgeType()) {
+ if (!phi->addBackedgeType(alloc(), otherPhi->type(), otherPhi->resultTypeSet()))
+ return false;
+ }
+ }
+ } else {
+ MIRType type = MIRType::None;
+ switch (*last) {
+ case JSOP_VOID:
+ case JSOP_UNDEFINED:
+ type = MIRType::Undefined;
+ break;
+ case JSOP_GIMPLICITTHIS:
+ if (!script()->hasNonSyntacticScope())
+ type = MIRType::Undefined;
+ break;
+ case JSOP_NULL:
+ type = MIRType::Null;
+ break;
+ case JSOP_ZERO:
+ case JSOP_ONE:
+ case JSOP_INT8:
+ case JSOP_INT32:
+ case JSOP_UINT16:
+ case JSOP_UINT24:
+ case JSOP_BITAND:
+ case JSOP_BITOR:
+ case JSOP_BITXOR:
+ case JSOP_BITNOT:
+ case JSOP_RSH:
+ case JSOP_LSH:
+ case JSOP_URSH:
+ type = MIRType::Int32;
+ break;
+ case JSOP_FALSE:
+ case JSOP_TRUE:
+ case JSOP_EQ:
+ case JSOP_NE:
+ case JSOP_LT:
+ case JSOP_LE:
+ case JSOP_GT:
+ case JSOP_GE:
+ case JSOP_NOT:
+ case JSOP_STRICTEQ:
+ case JSOP_STRICTNE:
+ case JSOP_IN:
+ case JSOP_INSTANCEOF:
+ type = MIRType::Boolean;
+ break;
+ case JSOP_DOUBLE:
+ type = MIRType::Double;
+ break;
+ case JSOP_STRING:
+ case JSOP_TOSTRING:
+ case JSOP_TYPEOF:
+ case JSOP_TYPEOFEXPR:
+ type = MIRType::String;
+ break;
+ case JSOP_SYMBOL:
+ type = MIRType::Symbol;
+ break;
+ case JSOP_ADD:
+ case JSOP_SUB:
+ case JSOP_MUL:
+ case JSOP_DIV:
+ case JSOP_MOD:
+ case JSOP_NEG:
+ type = inspector->expectedResultType(last);
+ break;
+ default:
+ break;
+ }
+ if (type != MIRType::None) {
+ if (!phi->addBackedgeType(alloc(), type, nullptr))
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+bool
+IonBuilder::pushLoop(CFGState::State initial, jsbytecode* stopAt, MBasicBlock* entry, bool osr,
+ jsbytecode* loopHead, jsbytecode* initialPc,
+ jsbytecode* bodyStart, jsbytecode* bodyEnd,
+ jsbytecode* exitpc, jsbytecode* continuepc)
+{
+ ControlFlowInfo loop(cfgStack_.length(), continuepc);
+ if (!loops_.append(loop))
+ return false;
+
+ CFGState state;
+ state.state = initial;
+ state.stopAt = stopAt;
+ state.loop.bodyStart = bodyStart;
+ state.loop.bodyEnd = bodyEnd;
+ state.loop.exitpc = exitpc;
+ state.loop.continuepc = continuepc;
+ state.loop.entry = entry;
+ state.loop.osr = osr;
+ state.loop.successor = nullptr;
+ state.loop.breaks = nullptr;
+ state.loop.continues = nullptr;
+ state.loop.initialState = initial;
+ state.loop.initialPc = initialPc;
+ state.loop.initialStopAt = stopAt;
+ state.loop.loopHead = loopHead;
+ return cfgStack_.append(state);
+}
+
+bool
+IonBuilder::init()
+{
+ {
+ LifoAlloc::AutoFallibleScope fallibleAllocator(alloc().lifoAlloc());
+ if (!TypeScript::FreezeTypeSets(constraints(), script(), &thisTypes, &argTypes, &typeArray))
+ return false;
+ }
+
+ if (!alloc().ensureBallast())
+ return false;
+
+ if (inlineCallInfo_) {
+ // If we're inlining, the actual this/argument types are not necessarily
+ // a subset of the script's observed types. |argTypes| is never accessed
+ // for inlined scripts, so we just null it.
+ thisTypes = inlineCallInfo_->thisArg()->resultTypeSet();
+ argTypes = nullptr;
+ }
+
+ if (!analysis().init(alloc(), gsn))
+ return false;
+
+ // The baseline script normally has the bytecode type map, but compute
+ // it ourselves if we do not have a baseline script.
+ if (script()->hasBaselineScript()) {
+ bytecodeTypeMap = script()->baselineScript()->bytecodeTypeMap();
+ } else {
+ bytecodeTypeMap = alloc_->lifoAlloc()->newArrayUninitialized<uint32_t>(script()->nTypeSets());
+ if (!bytecodeTypeMap)
+ return false;
+ FillBytecodeTypeMap(script(), bytecodeTypeMap);
+ }
+
+ return true;
+}
+
+bool
+IonBuilder::build()
+{
+ if (!init())
+ return false;
+
+ if (script()->hasBaselineScript())
+ script()->baselineScript()->resetMaxInliningDepth();
+
+ if (!setCurrentAndSpecializePhis(newBlock(pc)))
+ return false;
+ if (!current)
+ return false;
+
+#ifdef JS_JITSPEW
+ if (info().isAnalysis()) {
+ JitSpew(JitSpew_IonScripts, "Analyzing script %s:%" PRIuSIZE " (%p) %s",
+ script()->filename(), script()->lineno(), (void*)script(),
+ AnalysisModeString(info().analysisMode()));
+ } else {
+ JitSpew(JitSpew_IonScripts, "%sompiling script %s:%" PRIuSIZE " (%p) (warmup-counter=%" PRIu32 ", level=%s)",
+ (script()->hasIonScript() ? "Rec" : "C"),
+ script()->filename(), script()->lineno(), (void*)script(),
+ script()->getWarmUpCount(), OptimizationLevelString(optimizationInfo().level()));
+ }
+#endif
+
+ if (!initParameters())
+ return false;
+ initLocals();
+
+ // Initialize something for the env chain. We can bail out before the
+ // start instruction, but the snapshot is encoded *at* the start
+ // instruction, which means generating any code that could load into
+ // registers is illegal.
+ MInstruction* env = MConstant::New(alloc(), UndefinedValue());
+ current->add(env);
+ current->initSlot(info().environmentChainSlot(), env);
+
+ // Initialize the return value.
+ MInstruction* returnValue = MConstant::New(alloc(), UndefinedValue());
+ current->add(returnValue);
+ current->initSlot(info().returnValueSlot(), returnValue);
+
+ // Initialize the arguments object slot to undefined if necessary.
+ if (info().hasArguments()) {
+ MInstruction* argsObj = MConstant::New(alloc(), UndefinedValue());
+ current->add(argsObj);
+ current->initSlot(info().argsObjSlot(), argsObj);
+ }
+
+ // Emit the start instruction, so we can begin real instructions.
+ current->add(MStart::New(alloc()));
+
+ // Guard against over-recursion. Do this before we start unboxing, since
+ // this will create an OSI point that will read the incoming argument
+ // values, which is nice to do before their last real use, to minimize
+ // register/stack pressure.
+ MCheckOverRecursed* check = MCheckOverRecursed::New(alloc());
+ current->add(check);
+ MResumePoint* entryRpCopy = MResumePoint::Copy(alloc(), current->entryResumePoint());
+ if (!entryRpCopy)
+ return false;
+ check->setResumePoint(entryRpCopy);
+
+ // Parameters have been checked to correspond to the typeset, now we unbox
+ // what we can in an infallible manner.
+ if (!rewriteParameters())
+ return false;
+
+ // Check for redeclaration errors for global scripts.
+ if (!info().funMaybeLazy() && !info().module() &&
+ script()->bodyScope()->is<GlobalScope>() &&
+ script()->bodyScope()->as<GlobalScope>().hasBindings())
+ {
+ MGlobalNameConflictsCheck* redeclCheck = MGlobalNameConflictsCheck::New(alloc());
+ current->add(redeclCheck);
+ MResumePoint* entryRpCopy = MResumePoint::Copy(alloc(), current->entryResumePoint());
+ if (!entryRpCopy)
+ return false;
+ redeclCheck->setResumePoint(entryRpCopy);
+ }
+
+ // It's safe to start emitting actual IR, so now build the env chain.
+ if (!initEnvironmentChain())
+ return false;
+
+ if (info().needsArgsObj() && !initArgumentsObject())
+ return false;
+
+ // The type analysis phase attempts to insert unbox operations near
+ // definitions of values. It also attempts to replace uses in resume points
+ // with the narrower, unboxed variants. However, we must prevent this
+ // replacement from happening on values in the entry snapshot. Otherwise we
+ // could get this:
+ //
+ // v0 = MParameter(0)
+ // v1 = MParameter(1)
+ // -- ResumePoint(v2, v3)
+ // v2 = Unbox(v0, INT32)
+ // v3 = Unbox(v1, INT32)
+ //
+ // So we attach the initial resume point to each parameter, which the type
+ // analysis explicitly checks (this is the same mechanism used for
+ // effectful operations).
+ for (uint32_t i = 0; i < info().endArgSlot(); i++) {
+ MInstruction* ins = current->getEntrySlot(i)->toInstruction();
+ if (ins->type() != MIRType::Value)
+ continue;
+
+ MResumePoint* entryRpCopy = MResumePoint::Copy(alloc(), current->entryResumePoint());
+ if (!entryRpCopy)
+ return false;
+ ins->setResumePoint(entryRpCopy);
+ }
+
+ // lazyArguments should never be accessed in |argsObjAliasesFormals| scripts.
+ if (info().hasArguments() && !info().argsObjAliasesFormals()) {
+ lazyArguments_ = MConstant::New(alloc(), MagicValue(JS_OPTIMIZED_ARGUMENTS));
+ current->add(lazyArguments_);
+ }
+
+ insertRecompileCheck();
+
+ if (!traverseBytecode())
+ return false;
+
+ // Discard unreferenced & pre-allocated resume points.
+ replaceMaybeFallbackFunctionGetter(nullptr);
+
+ if (script_->hasBaselineScript() &&
+ inlinedBytecodeLength_ > script_->baselineScript()->inlinedBytecodeLength())
+ {
+ script_->baselineScript()->setInlinedBytecodeLength(inlinedBytecodeLength_);
+ }
+
+ if (!maybeAddOsrTypeBarriers())
+ return false;
+
+ if (!processIterators())
+ return false;
+
+ if (!info().isAnalysis() && !abortedPreliminaryGroups().empty()) {
+ abortReason_ = AbortReason_PreliminaryObjects;
+ return false;
+ }
+
+ if (shouldForceAbort()) {
+ abortReason_ = AbortReason_Disable;
+ return false;
+ }
+
+ MOZ_ASSERT(loopDepth_ == 0);
+ abortReason_ = AbortReason_NoAbort;
+ return true;
+}
+
+bool
+IonBuilder::processIterators()
+{
+ // Find phis that must directly hold an iterator live.
+ Vector<MPhi*, 0, SystemAllocPolicy> worklist;
+ for (size_t i = 0; i < iterators_.length(); i++) {
+ MInstruction* ins = iterators_[i];
+ for (MUseDefIterator iter(ins); iter; iter++) {
+ if (iter.def()->isPhi()) {
+ if (!worklist.append(iter.def()->toPhi()))
+ return false;
+ }
+ }
+ }
+
+ // Propagate the iterator and live status of phis to all other connected
+ // phis.
+ while (!worklist.empty()) {
+ MPhi* phi = worklist.popCopy();
+ phi->setIterator();
+ phi->setImplicitlyUsedUnchecked();
+
+ for (MUseDefIterator iter(phi); iter; iter++) {
+ if (iter.def()->isPhi()) {
+ MPhi* other = iter.def()->toPhi();
+ if (!other->isIterator() && !worklist.append(other))
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool
+IonBuilder::buildInline(IonBuilder* callerBuilder, MResumePoint* callerResumePoint,
+ CallInfo& callInfo)
+{
+ inlineCallInfo_ = &callInfo;
+
+ if (!init())
+ return false;
+
+ JitSpew(JitSpew_IonScripts, "Inlining script %s:%" PRIuSIZE " (%p)",
+ script()->filename(), script()->lineno(), (void*)script());
+
+ callerBuilder_ = callerBuilder;
+ callerResumePoint_ = callerResumePoint;
+
+ if (callerBuilder->failedBoundsCheck_)
+ failedBoundsCheck_ = true;
+
+ if (callerBuilder->failedShapeGuard_)
+ failedShapeGuard_ = true;
+
+ if (callerBuilder->failedLexicalCheck_)
+ failedLexicalCheck_ = true;
+
+ safeForMinorGC_ = callerBuilder->safeForMinorGC_;
+
+ // Generate single entrance block.
+ if (!setCurrentAndSpecializePhis(newBlock(pc)))
+ return false;
+ if (!current)
+ return false;
+
+ current->setCallerResumePoint(callerResumePoint);
+
+ // Connect the entrance block to the last block in the caller's graph.
+ MBasicBlock* predecessor = callerBuilder->current;
+ MOZ_ASSERT(predecessor == callerResumePoint->block());
+
+ predecessor->end(MGoto::New(alloc(), current));
+ if (!current->addPredecessorWithoutPhis(predecessor))
+ return false;
+
+ // Initialize env chain slot to Undefined. It's set later by
+ // |initEnvironmentChain|.
+ MInstruction* env = MConstant::New(alloc(), UndefinedValue());
+ current->add(env);
+ current->initSlot(info().environmentChainSlot(), env);
+
+ // Initialize |return value| slot.
+ MInstruction* returnValue = MConstant::New(alloc(), UndefinedValue());
+ current->add(returnValue);
+ current->initSlot(info().returnValueSlot(), returnValue);
+
+ // Initialize |arguments| slot.
+ if (info().hasArguments()) {
+ MInstruction* argsObj = MConstant::New(alloc(), UndefinedValue());
+ current->add(argsObj);
+ current->initSlot(info().argsObjSlot(), argsObj);
+ }
+
+ // Initialize |this| slot.
+ current->initSlot(info().thisSlot(), callInfo.thisArg());
+
+ JitSpew(JitSpew_Inlining, "Initializing %u arg slots", info().nargs());
+
+ // NB: Ion does not inline functions which |needsArgsObj|. So using argSlot()
+ // instead of argSlotUnchecked() below is OK
+ MOZ_ASSERT(!info().needsArgsObj());
+
+ // Initialize actually set arguments.
+ uint32_t existing_args = Min<uint32_t>(callInfo.argc(), info().nargs());
+ for (size_t i = 0; i < existing_args; ++i) {
+ MDefinition* arg = callInfo.getArg(i);
+ current->initSlot(info().argSlot(i), arg);
+ }
+
+ // Pass Undefined for missing arguments
+ for (size_t i = callInfo.argc(); i < info().nargs(); ++i) {
+ MConstant* arg = MConstant::New(alloc(), UndefinedValue());
+ current->add(arg);
+ current->initSlot(info().argSlot(i), arg);
+ }
+
+ JitSpew(JitSpew_Inlining, "Initializing %u locals", info().nlocals());
+
+ initLocals();
+
+ JitSpew(JitSpew_Inlining, "Inline entry block MResumePoint %p, %u stack slots",
+ (void*) current->entryResumePoint(), current->entryResumePoint()->stackDepth());
+
+ // +2 for the env chain and |this|, maybe another +1 for arguments object slot.
+ MOZ_ASSERT(current->entryResumePoint()->stackDepth() == info().totalSlots());
+
+ if (script_->argumentsHasVarBinding()) {
+ lazyArguments_ = MConstant::New(alloc(), MagicValue(JS_OPTIMIZED_ARGUMENTS));
+ current->add(lazyArguments_);
+ }
+
+ insertRecompileCheck();
+
+ // Initialize the env chain now that all resume points operands are
+ // initialized.
+ if (!initEnvironmentChain(callInfo.fun()))
+ return false;
+
+ if (!traverseBytecode())
+ return false;
+
+ // Discard unreferenced & pre-allocated resume points.
+ replaceMaybeFallbackFunctionGetter(nullptr);
+
+ MOZ_ASSERT(iterators_.empty(), "Iterators should be added to outer builder");
+
+ if (!info().isAnalysis() && !abortedPreliminaryGroups().empty()) {
+ abortReason_ = AbortReason_PreliminaryObjects;
+ return false;
+ }
+
+ if (shouldForceAbort()) {
+ abortReason_ = AbortReason_Disable;
+ return false;
+ }
+
+ return true;
+}
+
+void
+IonBuilder::rewriteParameter(uint32_t slotIdx, MDefinition* param, int32_t argIndex)
+{
+ MOZ_ASSERT(param->isParameter() || param->isGetArgumentsObjectArg());
+
+ TemporaryTypeSet* types = param->resultTypeSet();
+ MDefinition* actual = ensureDefiniteType(param, types->getKnownMIRType());
+ if (actual == param)
+ return;
+
+ // Careful! We leave the original MParameter in the entry resume point. The
+ // arguments still need to be checked unless proven otherwise at the call
+ // site, and these checks can bailout. We can end up:
+ // v0 = Parameter(0)
+ // v1 = Unbox(v0, INT32)
+ // -- ResumePoint(v0)
+ //
+ // As usual, it would be invalid for v1 to be captured in the initial
+ // resume point, rather than v0.
+ current->rewriteSlot(slotIdx, actual);
+}
+
+// Apply Type Inference information to parameters early on, unboxing them if
+// they have a definitive type. The actual guards will be emitted by the code
+// generator, explicitly, as part of the function prologue.
+bool
+IonBuilder::rewriteParameters()
+{
+ MOZ_ASSERT(info().environmentChainSlot() == 0);
+
+ if (!info().funMaybeLazy())
+ return true;
+
+ for (uint32_t i = info().startArgSlot(); i < info().endArgSlot(); i++) {
+ if (!alloc().ensureBallast())
+ return false;
+ MDefinition* param = current->getSlot(i);
+ rewriteParameter(i, param, param->toParameter()->index());
+ }
+
+ return true;
+}
+
+bool
+IonBuilder::initParameters()
+{
+ if (!info().funMaybeLazy())
+ return true;
+
+ // If we are doing OSR on a frame which initially executed in the
+ // interpreter and didn't accumulate type information, try to use that OSR
+ // frame to determine possible initial types for 'this' and parameters.
+
+ if (thisTypes->empty() && baselineFrame_) {
+ TypeSet::Type type = baselineFrame_->thisType;
+ if (type.isSingletonUnchecked())
+ checkNurseryObject(type.singleton());
+ thisTypes->addType(type, alloc_->lifoAlloc());
+ }
+
+ MParameter* param = MParameter::New(alloc(), MParameter::THIS_SLOT, thisTypes);
+ current->add(param);
+ current->initSlot(info().thisSlot(), param);
+
+ for (uint32_t i = 0; i < info().nargs(); i++) {
+ TemporaryTypeSet* types = &argTypes[i];
+ if (types->empty() && baselineFrame_ &&
+ !script_->baselineScript()->modifiesArguments())
+ {
+ TypeSet::Type type = baselineFrame_->argTypes[i];
+ if (type.isSingletonUnchecked())
+ checkNurseryObject(type.singleton());
+ types->addType(type, alloc_->lifoAlloc());
+ }
+
+ param = MParameter::New(alloc().fallible(), i, types);
+ if (!param)
+ return false;
+ current->add(param);
+ current->initSlot(info().argSlotUnchecked(i), param);
+ }
+
+ return true;
+}
+
+void
+IonBuilder::initLocals()
+{
+ // Initialize all frame slots to undefined. Lexical bindings are temporal
+ // dead zoned in bytecode.
+
+ if (info().nlocals() == 0)
+ return;
+
+ MConstant* undef = MConstant::New(alloc(), UndefinedValue());
+ current->add(undef);
+
+ for (uint32_t i = 0; i < info().nlocals(); i++)
+ current->initSlot(info().localSlot(i), undef);
+}
+
+bool
+IonBuilder::initEnvironmentChain(MDefinition* callee)
+{
+ MInstruction* env = nullptr;
+
+ // If the script doesn't use the envchain, then it's already initialized
+ // from earlier. However, always make a env chain when |needsArgsObj| is true
+ // for the script, since arguments object construction requires the env chain
+ // to be passed in.
+ if (!info().needsArgsObj() && !analysis().usesEnvironmentChain())
+ return true;
+
+ // The env chain is only tracked in scripts that have NAME opcodes which
+ // will try to access the env. For other scripts, the env instructions
+ // will be held live by resume points and code will still be generated for
+ // them, so just use a constant undefined value.
+
+ if (JSFunction* fun = info().funMaybeLazy()) {
+ if (!callee) {
+ MCallee* calleeIns = MCallee::New(alloc());
+ current->add(calleeIns);
+ callee = calleeIns;
+ }
+ env = MFunctionEnvironment::New(alloc(), callee);
+ current->add(env);
+
+ // This reproduce what is done in CallObject::createForFunction. Skip
+ // this for the arguments analysis, as the script might not have a
+ // baseline script with template objects yet.
+ if (fun->needsSomeEnvironmentObject() &&
+ info().analysisMode() != Analysis_ArgumentsUsage)
+ {
+ if (fun->needsNamedLambdaEnvironment()) {
+ env = createNamedLambdaObject(callee, env);
+ if (!env)
+ return false;
+ }
+
+ // TODO: Parameter expression-induced extra var environment not
+ // yet handled.
+ if (fun->needsExtraBodyVarEnvironment())
+ return abort("Extra var environment unsupported");
+
+ if (fun->needsCallObject()) {
+ env = createCallObject(callee, env);
+ if (!env)
+ return false;
+ }
+ }
+ } else if (ModuleObject* module = info().module()) {
+ // Modules use a pre-created env object.
+ env = constant(ObjectValue(module->initialEnvironment()));
+ } else {
+ // For global scripts without a non-syntactic global scope, the env
+ // chain is the global lexical env.
+ MOZ_ASSERT(!script()->isForEval());
+ MOZ_ASSERT(!script()->hasNonSyntacticScope());
+ env = constant(ObjectValue(script()->global().lexicalEnvironment()));
+ }
+
+ current->setEnvironmentChain(env);
+ return true;
+}
+
+bool
+IonBuilder::initArgumentsObject()
+{
+ JitSpew(JitSpew_IonMIR, "%s:%" PRIuSIZE " - Emitting code to initialize arguments object! block=%p",
+ script()->filename(), script()->lineno(), current);
+ MOZ_ASSERT(info().needsArgsObj());
+
+ bool mapped = script()->hasMappedArgsObj();
+ ArgumentsObject* templateObj = script()->compartment()->maybeArgumentsTemplateObject(mapped);
+
+ MCreateArgumentsObject* argsObj =
+ MCreateArgumentsObject::New(alloc(), current->environmentChain(), templateObj);
+ current->add(argsObj);
+ current->setArgumentsObject(argsObj);
+ return true;
+}
+
+bool
+IonBuilder::addOsrValueTypeBarrier(uint32_t slot, MInstruction** def_,
+ MIRType type, TemporaryTypeSet* typeSet)
+{
+ MInstruction*& def = *def_;
+ MBasicBlock* osrBlock = def->block();
+
+ // Clear bogus type information added in newOsrPreheader().
+ def->setResultType(MIRType::Value);
+ def->setResultTypeSet(nullptr);
+
+ if (typeSet && !typeSet->unknown()) {
+ MInstruction* barrier = MTypeBarrier::New(alloc(), def, typeSet);
+ osrBlock->insertBefore(osrBlock->lastIns(), barrier);
+ osrBlock->rewriteSlot(slot, barrier);
+ def = barrier;
+
+ // If the TypeSet is more precise than |type|, adjust |type| for the
+ // code below.
+ if (type == MIRType::Value)
+ type = barrier->type();
+ } else if (type == MIRType::Null ||
+ type == MIRType::Undefined ||
+ type == MIRType::MagicOptimizedArguments)
+ {
+ // No unbox instruction will be added below, so check the type by
+ // adding a type barrier for a singleton type set.
+ TypeSet::Type ntype = TypeSet::PrimitiveType(ValueTypeFromMIRType(type));
+ LifoAlloc* lifoAlloc = alloc().lifoAlloc();
+ typeSet = lifoAlloc->new_<TemporaryTypeSet>(lifoAlloc, ntype);
+ if (!typeSet)
+ return false;
+ MInstruction* barrier = MTypeBarrier::New(alloc(), def, typeSet);
+ osrBlock->insertBefore(osrBlock->lastIns(), barrier);
+ osrBlock->rewriteSlot(slot, barrier);
+ def = barrier;
+ }
+
+ switch (type) {
+ case MIRType::Boolean:
+ case MIRType::Int32:
+ case MIRType::Double:
+ case MIRType::String:
+ case MIRType::Symbol:
+ case MIRType::Object:
+ if (type != def->type()) {
+ MUnbox* unbox = MUnbox::New(alloc(), def, type, MUnbox::Fallible);
+ osrBlock->insertBefore(osrBlock->lastIns(), unbox);
+ osrBlock->rewriteSlot(slot, unbox);
+ def = unbox;
+ }
+ break;
+
+ case MIRType::Null:
+ {
+ MConstant* c = MConstant::New(alloc(), NullValue());
+ osrBlock->insertBefore(osrBlock->lastIns(), c);
+ osrBlock->rewriteSlot(slot, c);
+ def = c;
+ break;
+ }
+
+ case MIRType::Undefined:
+ {
+ MConstant* c = MConstant::New(alloc(), UndefinedValue());
+ osrBlock->insertBefore(osrBlock->lastIns(), c);
+ osrBlock->rewriteSlot(slot, c);
+ def = c;
+ break;
+ }
+
+ case MIRType::MagicOptimizedArguments:
+ MOZ_ASSERT(lazyArguments_);
+ osrBlock->rewriteSlot(slot, lazyArguments_);
+ def = lazyArguments_;
+ break;
+
+ default:
+ break;
+ }
+
+ MOZ_ASSERT(def == osrBlock->getSlot(slot));
+ return true;
+}
+
+bool
+IonBuilder::maybeAddOsrTypeBarriers()
+{
+ if (!info().osrPc())
+ return true;
+
+ // The loop has successfully been processed, and the loop header phis
+ // have their final type. Add unboxes and type barriers in the OSR
+ // block to check that the values have the appropriate type, and update
+ // the types in the preheader.
+
+ MBasicBlock* osrBlock = graph().osrBlock();
+ if (!osrBlock) {
+ // Because IonBuilder does not compile catch blocks, it's possible to
+ // end up without an OSR block if the OSR pc is only reachable via a
+ // break-statement inside the catch block. For instance:
+ //
+ // for (;;) {
+ // try {
+ // throw 3;
+ // } catch(e) {
+ // break;
+ // }
+ // }
+ // while (..) { } // <= OSR here, only reachable via catch block.
+ //
+ // For now we just abort in this case.
+ MOZ_ASSERT(graph().hasTryBlock());
+ return abort("OSR block only reachable through catch block");
+ }
+
+ MBasicBlock* preheader = osrBlock->getSuccessor(0);
+ MBasicBlock* header = preheader->getSuccessor(0);
+ static const size_t OSR_PHI_POSITION = 1;
+ MOZ_ASSERT(preheader->getPredecessor(OSR_PHI_POSITION) == osrBlock);
+
+ MResumePoint* headerRp = header->entryResumePoint();
+ size_t stackDepth = headerRp->stackDepth();
+ MOZ_ASSERT(stackDepth == osrBlock->stackDepth());
+ for (uint32_t slot = info().startArgSlot(); slot < stackDepth; slot++) {
+ // Aliased slots are never accessed, since they need to go through
+ // the callobject. The typebarriers are added there and can be
+ // discarded here.
+ if (info().isSlotAliased(slot))
+ continue;
+
+ if (!alloc().ensureBallast())
+ return false;
+
+ MInstruction* def = osrBlock->getSlot(slot)->toInstruction();
+ MPhi* preheaderPhi = preheader->getSlot(slot)->toPhi();
+ MPhi* headerPhi = headerRp->getOperand(slot)->toPhi();
+
+ MIRType type = headerPhi->type();
+ TemporaryTypeSet* typeSet = headerPhi->resultTypeSet();
+
+ if (!addOsrValueTypeBarrier(slot, &def, type, typeSet))
+ return false;
+
+ preheaderPhi->replaceOperand(OSR_PHI_POSITION, def);
+ preheaderPhi->setResultType(type);
+ preheaderPhi->setResultTypeSet(typeSet);
+ }
+
+ return true;
+}
+
+// We try to build a control-flow graph in the order that it would be built as
+// if traversing the AST. This leads to a nice ordering and lets us build SSA
+// in one pass, since the bytecode is structured.
+//
+// We traverse the bytecode iteratively, maintaining a current basic block.
+// Each basic block has a mapping of local slots to instructions, as well as a
+// stack depth. As we encounter instructions we mutate this mapping in the
+// current block.
+//
+// Things get interesting when we encounter a control structure. This can be
+// either an IFEQ, downward GOTO, or a decompiler hint stashed away in source
+// notes. Once we encounter such an opcode, we recover the structure of the
+// control flow (its branches and bounds), and push it on a stack.
+//
+// As we continue traversing the bytecode, we look for points that would
+// terminate the topmost control flow path pushed on the stack. These are:
+// (1) The bounds of the current structure (end of a loop or join/edge of a
+// branch).
+// (2) A "return", "break", or "continue" statement.
+//
+// For (1), we expect that there is a current block in the progress of being
+// built, and we complete the necessary edges in the CFG. For (2), we expect
+// that there is no active block.
+//
+// For normal diamond join points, we construct Phi nodes as we add
+// predecessors. For loops, care must be taken to propagate Phi nodes back
+// through uses in the loop body.
+bool
+IonBuilder::traverseBytecode()
+{
+ for (;;) {
+ MOZ_ASSERT(pc < info().limitPC());
+
+ for (;;) {
+ if (!alloc().ensureBallast())
+ return false;
+
+ // Check if we've hit an expected join point or edge in the bytecode.
+ // Leaving one control structure could place us at the edge of another,
+ // thus |while| instead of |if| so we don't skip any opcodes.
+ MOZ_ASSERT_IF(!cfgStack_.empty(), cfgStack_.back().stopAt >= pc);
+ if (!cfgStack_.empty() && cfgStack_.back().stopAt == pc) {
+ ControlStatus status = processCfgStack();
+ if (status == ControlStatus_Error)
+ return false;
+ if (status == ControlStatus_Abort)
+ return abort("Aborted while processing control flow");
+ if (!current)
+ return true;
+ continue;
+ }
+
+ // Some opcodes need to be handled early because they affect control
+ // flow, terminating the current basic block and/or instructing the
+ // traversal algorithm to continue from a new pc.
+ //
+ // (1) If the opcode does not affect control flow, then the opcode
+ // is inspected and transformed to IR. This is the process_opcode
+ // label.
+ // (2) A loop could be detected via a forward GOTO. In this case,
+ // we don't want to process the GOTO, but the following
+ // instruction.
+ // (3) A RETURN, STOP, BREAK, or CONTINUE may require processing the
+ // CFG stack to terminate open branches.
+ //
+ // Similar to above, snooping control flow could land us at another
+ // control flow point, so we iterate until it's time to inspect a real
+ // opcode.
+ ControlStatus status;
+ if ((status = snoopControlFlow(JSOp(*pc))) == ControlStatus_None)
+ break;
+ if (status == ControlStatus_Error)
+ return false;
+ if (status == ControlStatus_Abort)
+ return abort("Aborted while processing control flow");
+ if (!current)
+ return true;
+ }
+
+#ifdef DEBUG
+ // In debug builds, after compiling this op, check that all values
+ // popped by this opcode either:
+ //
+ // (1) Have the ImplicitlyUsed flag set on them.
+ // (2) Have more uses than before compiling this op (the value is
+ // used as operand of a new MIR instruction).
+ //
+ // This is used to catch problems where IonBuilder pops a value without
+ // adding any SSA uses and doesn't call setImplicitlyUsedUnchecked on it.
+ Vector<MDefinition*, 4, JitAllocPolicy> popped(alloc());
+ Vector<size_t, 4, JitAllocPolicy> poppedUses(alloc());
+ unsigned nuses = GetUseCount(script_, script_->pcToOffset(pc));
+
+ for (unsigned i = 0; i < nuses; i++) {
+ MDefinition* def = current->peek(-int32_t(i + 1));
+ if (!popped.append(def) || !poppedUses.append(def->defUseCount()))
+ return false;
+ }
+#endif
+
+ // Nothing in inspectOpcode() is allowed to advance the pc.
+ JSOp op = JSOp(*pc);
+ if (!inspectOpcode(op))
+ return false;
+
+#ifdef DEBUG
+ for (size_t i = 0; i < popped.length(); i++) {
+ switch (op) {
+ case JSOP_POP:
+ case JSOP_POPN:
+ case JSOP_DUPAT:
+ case JSOP_DUP:
+ case JSOP_DUP2:
+ case JSOP_PICK:
+ case JSOP_SWAP:
+ case JSOP_SETARG:
+ case JSOP_SETLOCAL:
+ case JSOP_INITLEXICAL:
+ case JSOP_SETRVAL:
+ case JSOP_VOID:
+ // Don't require SSA uses for values popped by these ops.
+ break;
+
+ case JSOP_POS:
+ case JSOP_TOID:
+ case JSOP_TOSTRING:
+ // These ops may leave their input on the stack without setting
+ // the ImplicitlyUsed flag. If this value will be popped immediately,
+ // we may replace it with |undefined|, but the difference is
+ // not observable.
+ MOZ_ASSERT(i == 0);
+ if (current->peek(-1) == popped[0])
+ break;
+ MOZ_FALLTHROUGH;
+
+ default:
+ MOZ_ASSERT(popped[i]->isImplicitlyUsed() ||
+
+ // MNewDerivedTypedObject instances are
+ // often dead unless they escape from the
+ // fn. See IonBuilder::loadTypedObjectData()
+ // for more details.
+ popped[i]->isNewDerivedTypedObject() ||
+
+ popped[i]->defUseCount() > poppedUses[i]);
+ break;
+ }
+ }
+#endif
+
+ pc += CodeSpec[op].length;
+ current->updateTrackedSite(bytecodeSite(pc));
+ }
+
+ return true;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::snoopControlFlow(JSOp op)
+{
+ switch (op) {
+ case JSOP_NOP:
+ return maybeLoop(op, info().getNote(gsn, pc));
+
+ case JSOP_POP:
+ return maybeLoop(op, info().getNote(gsn, pc));
+
+ case JSOP_RETURN:
+ case JSOP_RETRVAL:
+ return processReturn(op);
+
+ case JSOP_THROW:
+ return processThrow();
+
+ case JSOP_GOTO:
+ {
+ jssrcnote* sn = info().getNote(gsn, pc);
+ switch (sn ? SN_TYPE(sn) : SRC_NULL) {
+ case SRC_BREAK:
+ case SRC_BREAK2LABEL:
+ return processBreak(op, sn);
+
+ case SRC_CONTINUE:
+ return processContinue(op);
+
+ case SRC_SWITCHBREAK:
+ return processSwitchBreak(op);
+
+ case SRC_WHILE:
+ case SRC_FOR_IN:
+ case SRC_FOR_OF:
+ // while (cond) { }
+ return whileOrForInLoop(sn);
+
+ default:
+ // Hard assert for now - make an error later.
+ MOZ_CRASH("unknown goto case");
+ }
+ break;
+ }
+
+ case JSOP_TABLESWITCH:
+ return tableSwitch(op, info().getNote(gsn, pc));
+
+ case JSOP_IFNE:
+ // We should never reach an IFNE, it's a stopAt point, which will
+ // trigger closing the loop.
+ MOZ_CRASH("we should never reach an ifne!");
+
+ default:
+ break;
+ }
+ return ControlStatus_None;
+}
+
+bool
+IonBuilder::inspectOpcode(JSOp op)
+{
+ MOZ_ASSERT(analysis_.maybeInfo(pc), "Compiling unreachable op");
+
+ switch (op) {
+ case JSOP_NOP:
+ case JSOP_NOP_DESTRUCTURING:
+ case JSOP_LINENO:
+ case JSOP_LOOPENTRY:
+ case JSOP_JUMPTARGET:
+ return true;
+
+ case JSOP_LABEL:
+ return jsop_label();
+
+ case JSOP_UNDEFINED:
+ // If this ever changes, change what JSOP_GIMPLICITTHIS does too.
+ pushConstant(UndefinedValue());
+ return true;
+
+ case JSOP_IFEQ:
+ return jsop_ifeq(JSOP_IFEQ);
+
+ case JSOP_TRY:
+ return jsop_try();
+
+ case JSOP_CONDSWITCH:
+ return jsop_condswitch();
+
+ case JSOP_BITNOT:
+ return jsop_bitnot();
+
+ case JSOP_BITAND:
+ case JSOP_BITOR:
+ case JSOP_BITXOR:
+ case JSOP_LSH:
+ case JSOP_RSH:
+ case JSOP_URSH:
+ return jsop_bitop(op);
+
+ case JSOP_ADD:
+ case JSOP_SUB:
+ case JSOP_MUL:
+ case JSOP_DIV:
+ case JSOP_MOD:
+ return jsop_binary_arith(op);
+
+ case JSOP_POW:
+ return jsop_pow();
+
+ case JSOP_POS:
+ return jsop_pos();
+
+ case JSOP_NEG:
+ return jsop_neg();
+
+ case JSOP_TOSTRING:
+ return jsop_tostring();
+
+ case JSOP_AND:
+ case JSOP_OR:
+ return jsop_andor(op);
+
+ case JSOP_DEFVAR:
+ return jsop_defvar(GET_UINT32_INDEX(pc));
+
+ case JSOP_DEFLET:
+ case JSOP_DEFCONST:
+ return jsop_deflexical(GET_UINT32_INDEX(pc));
+
+ case JSOP_DEFFUN:
+ return jsop_deffun(GET_UINT32_INDEX(pc));
+
+ case JSOP_EQ:
+ case JSOP_NE:
+ case JSOP_STRICTEQ:
+ case JSOP_STRICTNE:
+ case JSOP_LT:
+ case JSOP_LE:
+ case JSOP_GT:
+ case JSOP_GE:
+ return jsop_compare(op);
+
+ case JSOP_DOUBLE:
+ pushConstant(info().getConst(pc));
+ return true;
+
+ case JSOP_STRING:
+ pushConstant(StringValue(info().getAtom(pc)));
+ return true;
+
+ case JSOP_SYMBOL: {
+ unsigned which = GET_UINT8(pc);
+ JS::Symbol* sym = compartment->runtime()->wellKnownSymbols().get(which);
+ pushConstant(SymbolValue(sym));
+ return true;
+ }
+
+ case JSOP_ZERO:
+ pushConstant(Int32Value(0));
+ return true;
+
+ case JSOP_ONE:
+ pushConstant(Int32Value(1));
+ return true;
+
+ case JSOP_NULL:
+ pushConstant(NullValue());
+ return true;
+
+ case JSOP_VOID:
+ current->pop();
+ pushConstant(UndefinedValue());
+ return true;
+
+ case JSOP_HOLE:
+ pushConstant(MagicValue(JS_ELEMENTS_HOLE));
+ return true;
+
+ case JSOP_FALSE:
+ pushConstant(BooleanValue(false));
+ return true;
+
+ case JSOP_TRUE:
+ pushConstant(BooleanValue(true));
+ return true;
+
+ case JSOP_ARGUMENTS:
+ return jsop_arguments();
+
+ case JSOP_RUNONCE:
+ return jsop_runonce();
+
+ case JSOP_REST:
+ return jsop_rest();
+
+ case JSOP_GETARG:
+ if (info().argsObjAliasesFormals()) {
+ MGetArgumentsObjectArg* getArg = MGetArgumentsObjectArg::New(alloc(),
+ current->argumentsObject(),
+ GET_ARGNO(pc));
+ current->add(getArg);
+ current->push(getArg);
+ } else {
+ current->pushArg(GET_ARGNO(pc));
+ }
+ return true;
+
+ case JSOP_SETARG:
+ return jsop_setarg(GET_ARGNO(pc));
+
+ case JSOP_GETLOCAL:
+ current->pushLocal(GET_LOCALNO(pc));
+ return true;
+
+ case JSOP_SETLOCAL:
+ current->setLocal(GET_LOCALNO(pc));
+ return true;
+
+ case JSOP_THROWSETCONST:
+ case JSOP_THROWSETALIASEDCONST:
+ case JSOP_THROWSETCALLEE:
+ return jsop_throwsetconst();
+
+ case JSOP_CHECKLEXICAL:
+ return jsop_checklexical();
+
+ case JSOP_INITLEXICAL:
+ current->setLocal(GET_LOCALNO(pc));
+ return true;
+
+ case JSOP_INITGLEXICAL: {
+ MOZ_ASSERT(!script()->hasNonSyntacticScope());
+ MDefinition* value = current->pop();
+ current->push(constant(ObjectValue(script()->global().lexicalEnvironment())));
+ current->push(value);
+ return jsop_setprop(info().getAtom(pc)->asPropertyName());
+ }
+
+ case JSOP_CHECKALIASEDLEXICAL:
+ return jsop_checkaliasedlexical(EnvironmentCoordinate(pc));
+
+ case JSOP_INITALIASEDLEXICAL:
+ return jsop_setaliasedvar(EnvironmentCoordinate(pc));
+
+ case JSOP_UNINITIALIZED:
+ pushConstant(MagicValue(JS_UNINITIALIZED_LEXICAL));
+ return true;
+
+ case JSOP_POP: {
+ MDefinition* def = current->pop();
+
+ // POP opcodes frequently appear where values are killed, e.g. after
+ // SET* opcodes. Place a resume point afterwards to avoid capturing
+ // the dead value in later snapshots, except in places where that
+ // resume point is obviously unnecessary.
+ if (pc[JSOP_POP_LENGTH] == JSOP_POP)
+ return true;
+ if (def->isConstant())
+ return true;
+ return maybeInsertResume();
+ }
+
+ case JSOP_POPN:
+ for (uint32_t i = 0, n = GET_UINT16(pc); i < n; i++)
+ current->pop();
+ return true;
+
+ case JSOP_DUPAT:
+ current->pushSlot(current->stackDepth() - 1 - GET_UINT24(pc));
+ return true;
+
+ case JSOP_NEWINIT:
+ if (GET_UINT8(pc) == JSProto_Array)
+ return jsop_newarray(0);
+ return jsop_newobject();
+
+ case JSOP_NEWARRAY:
+ return jsop_newarray(GET_UINT32(pc));
+
+ case JSOP_NEWARRAY_COPYONWRITE:
+ return jsop_newarray_copyonwrite();
+
+ case JSOP_NEWOBJECT:
+ return jsop_newobject();
+
+ case JSOP_INITELEM:
+ case JSOP_INITHIDDENELEM:
+ return jsop_initelem();
+
+ case JSOP_INITELEM_ARRAY:
+ return jsop_initelem_array();
+
+ case JSOP_INITPROP:
+ case JSOP_INITLOCKEDPROP:
+ case JSOP_INITHIDDENPROP:
+ {
+ PropertyName* name = info().getAtom(pc)->asPropertyName();
+ return jsop_initprop(name);
+ }
+
+ case JSOP_MUTATEPROTO:
+ {
+ return jsop_mutateproto();
+ }
+
+ case JSOP_INITPROP_GETTER:
+ case JSOP_INITHIDDENPROP_GETTER:
+ case JSOP_INITPROP_SETTER:
+ case JSOP_INITHIDDENPROP_SETTER: {
+ PropertyName* name = info().getAtom(pc)->asPropertyName();
+ return jsop_initprop_getter_setter(name);
+ }
+
+ case JSOP_INITELEM_GETTER:
+ case JSOP_INITHIDDENELEM_GETTER:
+ case JSOP_INITELEM_SETTER:
+ case JSOP_INITHIDDENELEM_SETTER:
+ return jsop_initelem_getter_setter();
+
+ case JSOP_FUNCALL:
+ return jsop_funcall(GET_ARGC(pc));
+
+ case JSOP_FUNAPPLY:
+ return jsop_funapply(GET_ARGC(pc));
+
+ case JSOP_CALL:
+ case JSOP_CALLITER:
+ case JSOP_NEW:
+ case JSOP_SUPERCALL:
+ return jsop_call(GET_ARGC(pc), (JSOp)*pc == JSOP_NEW || (JSOp)*pc == JSOP_SUPERCALL);
+
+ case JSOP_EVAL:
+ case JSOP_STRICTEVAL:
+ return jsop_eval(GET_ARGC(pc));
+
+ case JSOP_INT8:
+ pushConstant(Int32Value(GET_INT8(pc)));
+ return true;
+
+ case JSOP_UINT16:
+ pushConstant(Int32Value(GET_UINT16(pc)));
+ return true;
+
+ case JSOP_GETGNAME:
+ {
+ PropertyName* name = info().getAtom(pc)->asPropertyName();
+ if (!script()->hasNonSyntacticScope())
+ return jsop_getgname(name);
+ return jsop_getname(name);
+ }
+
+ case JSOP_SETGNAME:
+ case JSOP_STRICTSETGNAME:
+ {
+ PropertyName* name = info().getAtom(pc)->asPropertyName();
+ JSObject* obj = nullptr;
+ if (!script()->hasNonSyntacticScope())
+ obj = testGlobalLexicalBinding(name);
+ if (obj)
+ return setStaticName(obj, name);
+ return jsop_setprop(name);
+ }
+
+ case JSOP_GETNAME:
+ {
+ PropertyName* name = info().getAtom(pc)->asPropertyName();
+ return jsop_getname(name);
+ }
+
+ case JSOP_GETINTRINSIC:
+ {
+ PropertyName* name = info().getAtom(pc)->asPropertyName();
+ return jsop_intrinsic(name);
+ }
+
+ case JSOP_GETIMPORT:
+ {
+ PropertyName* name = info().getAtom(pc)->asPropertyName();
+ return jsop_getimport(name);
+ }
+
+ case JSOP_BINDGNAME:
+ if (!script()->hasNonSyntacticScope()) {
+ if (JSObject* env = testGlobalLexicalBinding(info().getName(pc))) {
+ pushConstant(ObjectValue(*env));
+ return true;
+ }
+ }
+ // Fall through to JSOP_BINDNAME
+ MOZ_FALLTHROUGH;
+ case JSOP_BINDNAME:
+ return jsop_bindname(info().getName(pc));
+
+ case JSOP_BINDVAR:
+ return jsop_bindvar();
+
+ case JSOP_DUP:
+ current->pushSlot(current->stackDepth() - 1);
+ return true;
+
+ case JSOP_DUP2:
+ return jsop_dup2();
+
+ case JSOP_SWAP:
+ current->swapAt(-1);
+ return true;
+
+ case JSOP_PICK:
+ current->pick(-GET_INT8(pc));
+ return true;
+
+ case JSOP_GETALIASEDVAR:
+ return jsop_getaliasedvar(EnvironmentCoordinate(pc));
+
+ case JSOP_SETALIASEDVAR:
+ return jsop_setaliasedvar(EnvironmentCoordinate(pc));
+
+ case JSOP_UINT24:
+ pushConstant(Int32Value(GET_UINT24(pc)));
+ return true;
+
+ case JSOP_INT32:
+ pushConstant(Int32Value(GET_INT32(pc)));
+ return true;
+
+ case JSOP_LOOPHEAD:
+ // JSOP_LOOPHEAD is handled when processing the loop header.
+ MOZ_CRASH("JSOP_LOOPHEAD outside loop");
+
+ case JSOP_GETELEM:
+ case JSOP_CALLELEM:
+ if (!jsop_getelem())
+ return false;
+ if (op == JSOP_CALLELEM && !improveThisTypesForCall())
+ return false;
+ return true;
+
+ case JSOP_SETELEM:
+ case JSOP_STRICTSETELEM:
+ return jsop_setelem();
+
+ case JSOP_LENGTH:
+ return jsop_length();
+
+ case JSOP_NOT:
+ return jsop_not();
+
+ case JSOP_FUNCTIONTHIS:
+ return jsop_functionthis();
+
+ case JSOP_GLOBALTHIS:
+ return jsop_globalthis();
+
+ case JSOP_CALLEE: {
+ MDefinition* callee = getCallee();
+ current->push(callee);
+ return true;
+ }
+
+ case JSOP_GETPROP:
+ case JSOP_CALLPROP:
+ {
+ PropertyName* name = info().getAtom(pc)->asPropertyName();
+ if (!jsop_getprop(name))
+ return false;
+ if (op == JSOP_CALLPROP && !improveThisTypesForCall())
+ return false;
+ return true;
+ }
+
+ case JSOP_SETPROP:
+ case JSOP_STRICTSETPROP:
+ case JSOP_SETNAME:
+ case JSOP_STRICTSETNAME:
+ {
+ PropertyName* name = info().getAtom(pc)->asPropertyName();
+ return jsop_setprop(name);
+ }
+
+ case JSOP_DELPROP:
+ case JSOP_STRICTDELPROP:
+ {
+ PropertyName* name = info().getAtom(pc)->asPropertyName();
+ return jsop_delprop(name);
+ }
+
+ case JSOP_DELELEM:
+ case JSOP_STRICTDELELEM:
+ return jsop_delelem();
+
+ case JSOP_REGEXP:
+ return jsop_regexp(info().getRegExp(pc));
+
+ case JSOP_CALLSITEOBJ:
+ pushConstant(ObjectValue(*(info().getObject(pc))));
+ return true;
+
+ case JSOP_OBJECT:
+ return jsop_object(info().getObject(pc));
+
+ case JSOP_TYPEOF:
+ case JSOP_TYPEOFEXPR:
+ return jsop_typeof();
+
+ case JSOP_TOASYNC:
+ return jsop_toasync();
+
+ case JSOP_TOID:
+ return jsop_toid();
+
+ case JSOP_LAMBDA:
+ return jsop_lambda(info().getFunction(pc));
+
+ case JSOP_LAMBDA_ARROW:
+ return jsop_lambda_arrow(info().getFunction(pc));
+
+ case JSOP_ITER:
+ return jsop_iter(GET_INT8(pc));
+
+ case JSOP_MOREITER:
+ return jsop_itermore();
+
+ case JSOP_ISNOITER:
+ return jsop_isnoiter();
+
+ case JSOP_ENDITER:
+ return jsop_iterend();
+
+ case JSOP_IN:
+ return jsop_in();
+
+ case JSOP_SETRVAL:
+ MOZ_ASSERT(!script()->noScriptRval());
+ current->setSlot(info().returnValueSlot(), current->pop());
+ return true;
+
+ case JSOP_INSTANCEOF:
+ return jsop_instanceof();
+
+ case JSOP_DEBUGLEAVELEXICALENV:
+ return true;
+
+ case JSOP_DEBUGGER:
+ return jsop_debugger();
+
+ case JSOP_GIMPLICITTHIS:
+ if (!script()->hasNonSyntacticScope()) {
+ pushConstant(UndefinedValue());
+ return true;
+ }
+
+ // Just fall through to the unsupported bytecode case.
+ break;
+
+ case JSOP_NEWTARGET:
+ return jsop_newtarget();
+
+ case JSOP_CHECKISOBJ:
+ return jsop_checkisobj(GET_UINT8(pc));
+
+ case JSOP_CHECKOBJCOERCIBLE:
+ return jsop_checkobjcoercible();
+
+ case JSOP_DEBUGCHECKSELFHOSTED:
+ {
+#ifdef DEBUG
+ MDebugCheckSelfHosted* check = MDebugCheckSelfHosted::New(alloc(), current->pop());
+ current->add(check);
+ current->push(check);
+ if (!resumeAfter(check))
+ return false;
+#endif
+ return true;
+ }
+
+ case JSOP_IS_CONSTRUCTING:
+ pushConstant(MagicValue(JS_IS_CONSTRUCTING));
+ return true;
+
+#ifdef DEBUG
+ case JSOP_PUSHLEXICALENV:
+ case JSOP_FRESHENLEXICALENV:
+ case JSOP_RECREATELEXICALENV:
+ case JSOP_POPLEXICALENV:
+ // These opcodes are currently unhandled by Ion, but in principle
+ // there's no reason they couldn't be. Whenever this happens, OSR
+ // will have to consider that JSOP_{FRESHEN,RECREATE}LEXICALENV
+ // mutates the env chain -- right now MBasicBlock::environmentChain()
+ // caches the env chain. JSOP_{FRESHEN,RECREATE}LEXICALENV must
+ // update that stale value.
+#endif
+ default:
+ break;
+ }
+
+ // Track a simpler message, since the actionable abort message is a
+ // static string, and the internal opcode name isn't an actionable
+ // thing anyways.
+ trackActionableAbort("Unsupported bytecode");
+#ifdef DEBUG
+ return abort("Unsupported opcode: %s", CodeName[op]);
+#else
+ return abort("Unsupported opcode: %d", op);
+#endif
+}
+
+// Given that the current control flow structure has ended forcefully,
+// via a return, break, or continue (rather than joining), propagate the
+// termination up. For example, a return nested 5 loops deep may terminate
+// every outer loop at once, if there are no intervening conditionals:
+//
+// for (...) {
+// for (...) {
+// return x;
+// }
+// }
+//
+// If |current| is nullptr when this function returns, then there is no more
+// control flow to be processed.
+IonBuilder::ControlStatus
+IonBuilder::processControlEnd()
+{
+ MOZ_ASSERT(!current);
+
+ if (cfgStack_.empty()) {
+ // If there is no more control flow to process, then this is the
+ // last return in the function.
+ return ControlStatus_Ended;
+ }
+
+ return processCfgStack();
+}
+
+// Processes the top of the CFG stack. This is used from two places:
+// (1) processControlEnd(), whereby a break, continue, or return may interrupt
+// an in-progress CFG structure before reaching its actual termination
+// point in the bytecode.
+// (2) traverseBytecode(), whereby we reach the last instruction in a CFG
+// structure.
+IonBuilder::ControlStatus
+IonBuilder::processCfgStack()
+{
+ ControlStatus status = processCfgEntry(cfgStack_.back());
+
+ // If this terminated a CFG structure, act like processControlEnd() and
+ // keep propagating upward.
+ while (status == ControlStatus_Ended) {
+ popCfgStack();
+ if (cfgStack_.empty())
+ return status;
+ status = processCfgEntry(cfgStack_.back());
+ }
+
+ // If some join took place, the current structure is finished.
+ if (status == ControlStatus_Joined)
+ popCfgStack();
+
+ return status;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processCfgEntry(CFGState& state)
+{
+ switch (state.state) {
+ case CFGState::IF_TRUE:
+ case CFGState::IF_TRUE_EMPTY_ELSE:
+ return processIfEnd(state);
+
+ case CFGState::IF_ELSE_TRUE:
+ return processIfElseTrueEnd(state);
+
+ case CFGState::IF_ELSE_FALSE:
+ return processIfElseFalseEnd(state);
+
+ case CFGState::DO_WHILE_LOOP_BODY:
+ return processDoWhileBodyEnd(state);
+
+ case CFGState::DO_WHILE_LOOP_COND:
+ return processDoWhileCondEnd(state);
+
+ case CFGState::WHILE_LOOP_COND:
+ return processWhileCondEnd(state);
+
+ case CFGState::WHILE_LOOP_BODY:
+ return processWhileBodyEnd(state);
+
+ case CFGState::FOR_LOOP_COND:
+ return processForCondEnd(state);
+
+ case CFGState::FOR_LOOP_BODY:
+ return processForBodyEnd(state);
+
+ case CFGState::FOR_LOOP_UPDATE:
+ return processForUpdateEnd(state);
+
+ case CFGState::TABLE_SWITCH:
+ return processNextTableSwitchCase(state);
+
+ case CFGState::COND_SWITCH_CASE:
+ return processCondSwitchCase(state);
+
+ case CFGState::COND_SWITCH_BODY:
+ return processCondSwitchBody(state);
+
+ case CFGState::AND_OR:
+ return processAndOrEnd(state);
+
+ case CFGState::LABEL:
+ return processLabelEnd(state);
+
+ case CFGState::TRY:
+ return processTryEnd(state);
+
+ default:
+ MOZ_CRASH("unknown cfgstate");
+ }
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processIfEnd(CFGState& state)
+{
+ bool thenBranchTerminated = !current;
+ if (!thenBranchTerminated) {
+ // Here, the false block is the join point. Create an edge from the
+ // current block to the false block. Note that a RETURN opcode
+ // could have already ended the block.
+ current->end(MGoto::New(alloc(), state.branch.ifFalse));
+
+ if (!state.branch.ifFalse->addPredecessor(alloc(), current))
+ return ControlStatus_Error;
+ }
+
+ if (!setCurrentAndSpecializePhis(state.branch.ifFalse))
+ return ControlStatus_Error;
+ graph().moveBlockToEnd(current);
+ pc = current->pc();
+
+ if (thenBranchTerminated) {
+ // If we can't reach here via the then-branch, we can filter the types
+ // after the if-statement based on the if-condition.
+ MTest* test = state.branch.test;
+ if (!improveTypesAtTest(test->getOperand(0), test->ifTrue() == current, test))
+ return ControlStatus_Error;
+ }
+
+ return ControlStatus_Joined;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processIfElseTrueEnd(CFGState& state)
+{
+ // We've reached the end of the true branch of an if-else. Don't
+ // create an edge yet, just transition to parsing the false branch.
+ state.state = CFGState::IF_ELSE_FALSE;
+ state.branch.ifTrue = current;
+ state.stopAt = state.branch.falseEnd;
+ pc = state.branch.ifFalse->pc();
+ if (!setCurrentAndSpecializePhis(state.branch.ifFalse))
+ return ControlStatus_Error;
+ graph().moveBlockToEnd(current);
+
+ MTest* test = state.branch.test;
+ if (!improveTypesAtTest(test->getOperand(0), test->ifTrue() == current, test))
+ return ControlStatus_Error;
+
+ return ControlStatus_Jumped;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processIfElseFalseEnd(CFGState& state)
+{
+ // Update the state to have the latest block from the false path.
+ state.branch.ifFalse = current;
+
+ // To create the join node, we need an incoming edge that has not been
+ // terminated yet.
+ MBasicBlock* pred = state.branch.ifTrue
+ ? state.branch.ifTrue
+ : state.branch.ifFalse;
+ MBasicBlock* other = (pred == state.branch.ifTrue) ? state.branch.ifFalse : state.branch.ifTrue;
+
+ if (!pred)
+ return ControlStatus_Ended;
+
+ // Create a new block to represent the join.
+ MBasicBlock* join = newBlock(pred, state.branch.falseEnd);
+ if (!join)
+ return ControlStatus_Error;
+
+ // Create edges from the true and false blocks as needed.
+ pred->end(MGoto::New(alloc(), join));
+
+ if (other) {
+ other->end(MGoto::New(alloc(), join));
+ if (!join->addPredecessor(alloc(), other))
+ return ControlStatus_Error;
+ }
+
+ // Ignore unreachable remainder of false block if existent.
+ if (!setCurrentAndSpecializePhis(join))
+ return ControlStatus_Error;
+ pc = current->pc();
+ return ControlStatus_Joined;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processBrokenLoop(CFGState& state)
+{
+ MOZ_ASSERT(!current);
+
+ MOZ_ASSERT(loopDepth_);
+ loopDepth_--;
+
+ // A broken loop is not a real loop (it has no header or backedge), so
+ // reset the loop depth.
+ for (MBasicBlockIterator i(graph().begin(state.loop.entry)); i != graph().end(); i++) {
+ if (i->loopDepth() > loopDepth_)
+ i->setLoopDepth(i->loopDepth() - 1);
+ }
+
+ // If the loop started with a condition (while/for) then even if the
+ // structure never actually loops, the condition itself can still fail and
+ // thus we must resume at the successor, if one exists.
+ if (!setCurrentAndSpecializePhis(state.loop.successor))
+ return ControlStatus_Error;
+ if (current) {
+ MOZ_ASSERT(current->loopDepth() == loopDepth_);
+ graph().moveBlockToEnd(current);
+ }
+
+ // Join the breaks together and continue parsing.
+ if (state.loop.breaks) {
+ MBasicBlock* block = createBreakCatchBlock(state.loop.breaks, state.loop.exitpc);
+ if (!block)
+ return ControlStatus_Error;
+
+ if (current) {
+ current->end(MGoto::New(alloc(), block));
+ if (!block->addPredecessor(alloc(), current))
+ return ControlStatus_Error;
+ }
+
+ if (!setCurrentAndSpecializePhis(block))
+ return ControlStatus_Error;
+ }
+
+ // If the loop is not gated on a condition, and has only returns, we'll
+ // reach this case. For example:
+ // do { ... return; } while ();
+ if (!current)
+ return ControlStatus_Ended;
+
+ // Otherwise, the loop is gated on a condition and/or has breaks so keep
+ // parsing at the successor.
+ pc = current->pc();
+ return ControlStatus_Joined;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::finishLoop(CFGState& state, MBasicBlock* successor)
+{
+ MOZ_ASSERT(current);
+
+ MOZ_ASSERT(loopDepth_);
+ loopDepth_--;
+ MOZ_ASSERT_IF(successor, successor->loopDepth() == loopDepth_);
+
+ // Compute phis in the loop header and propagate them throughout the loop,
+ // including the successor.
+ AbortReason r = state.loop.entry->setBackedge(alloc(), current);
+ if (r == AbortReason_Alloc)
+ return ControlStatus_Error;
+ if (r == AbortReason_Disable) {
+ // If there are types for variables on the backedge that were not
+ // present at the original loop header, then uses of the variables'
+ // phis may have generated incorrect nodes. The new types have been
+ // incorporated into the header phis, so remove all blocks for the
+ // loop body and restart with the new types.
+ return restartLoop(state);
+ }
+
+ if (successor) {
+ graph().moveBlockToEnd(successor);
+ successor->inheritPhis(state.loop.entry);
+ }
+
+ if (state.loop.breaks) {
+ // Propagate phis placed in the header to individual break exit points.
+ DeferredEdge* edge = state.loop.breaks;
+ while (edge) {
+ edge->block->inheritPhis(state.loop.entry);
+ edge = edge->next;
+ }
+
+ // Create a catch block to join all break exits.
+ MBasicBlock* block = createBreakCatchBlock(state.loop.breaks, state.loop.exitpc);
+ if (!block)
+ return ControlStatus_Error;
+
+ if (successor) {
+ // Finally, create an unconditional edge from the successor to the
+ // catch block.
+ successor->end(MGoto::New(alloc(), block));
+ if (!block->addPredecessor(alloc(), successor))
+ return ControlStatus_Error;
+ }
+ successor = block;
+ }
+
+ if (!setCurrentAndSpecializePhis(successor))
+ return ControlStatus_Error;
+
+ // An infinite loop (for (;;) { }) will not have a successor.
+ if (!current)
+ return ControlStatus_Ended;
+
+ pc = current->pc();
+ return ControlStatus_Joined;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::restartLoop(const CFGState& state)
+{
+ AutoTraceLog logCompile(traceLogger(), TraceLogger_IonBuilderRestartLoop);
+
+ spew("New types at loop header, restarting loop body");
+
+ if (JitOptions.limitScriptSize) {
+ if (++numLoopRestarts_ >= MAX_LOOP_RESTARTS)
+ return ControlStatus_Abort;
+ }
+
+ MBasicBlock* header = state.loop.entry;
+
+ // Discard unreferenced & pre-allocated resume points.
+ replaceMaybeFallbackFunctionGetter(nullptr);
+
+ // Remove all blocks in the loop body other than the header, which has phis
+ // of the appropriate type and incoming edges to preserve.
+ graph().removeBlocksAfter(header);
+
+ // Remove all instructions from the header itself, and all resume points
+ // except the entry resume point.
+ header->discardAllInstructions();
+ header->discardAllResumePoints(/* discardEntry = */ false);
+ header->setStackDepth(header->getPredecessor(0)->stackDepth());
+
+ popCfgStack();
+
+ loopDepth_++;
+
+ // Keep a local copy for these pointers since state will be overwritten in
+ // pushLoop since state is a reference to cfgStack_.back()
+ jsbytecode* condpc = state.loop.condpc;
+ jsbytecode* updatepc = state.loop.updatepc;
+ jsbytecode* updateEnd = state.loop.updateEnd;
+
+ if (!pushLoop(state.loop.initialState, state.loop.initialStopAt, header, state.loop.osr,
+ state.loop.loopHead, state.loop.initialPc,
+ state.loop.bodyStart, state.loop.bodyEnd,
+ state.loop.exitpc, state.loop.continuepc))
+ {
+ return ControlStatus_Error;
+ }
+
+ CFGState& nstate = cfgStack_.back();
+
+ nstate.loop.condpc = condpc;
+ nstate.loop.updatepc = updatepc;
+ nstate.loop.updateEnd = updateEnd;
+
+ // Don't specializePhis(), as the header has been visited before and the
+ // phis have already had their type set.
+ setCurrent(header);
+
+ if (!jsop_loophead(nstate.loop.loopHead))
+ return ControlStatus_Error;
+
+ pc = nstate.loop.initialPc;
+ return ControlStatus_Jumped;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processDoWhileBodyEnd(CFGState& state)
+{
+ if (!processDeferredContinues(state))
+ return ControlStatus_Error;
+
+ // No current means control flow cannot reach the condition, so this will
+ // never loop.
+ if (!current)
+ return processBrokenLoop(state);
+
+ MBasicBlock* header = newBlock(current, state.loop.updatepc);
+ if (!header)
+ return ControlStatus_Error;
+ current->end(MGoto::New(alloc(), header));
+
+ state.state = CFGState::DO_WHILE_LOOP_COND;
+ state.stopAt = state.loop.updateEnd;
+ pc = state.loop.updatepc;
+ if (!setCurrentAndSpecializePhis(header))
+ return ControlStatus_Error;
+ return ControlStatus_Jumped;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processDoWhileCondEnd(CFGState& state)
+{
+ MOZ_ASSERT(JSOp(*pc) == JSOP_IFNE);
+
+ // We're guaranteed a |current|, it's impossible to break or return from
+ // inside the conditional expression.
+ MOZ_ASSERT(current);
+
+ // Pop the last value, and create the successor block.
+ MDefinition* vins = current->pop();
+ MBasicBlock* successor = newBlock(current, GetNextPc(pc), loopDepth_ - 1);
+ if (!successor)
+ return ControlStatus_Error;
+
+ // Test for do {} while(false) and don't create a loop in that case.
+ if (MConstant* vinsConst = vins->maybeConstantValue()) {
+ bool b;
+ if (vinsConst->valueToBoolean(&b) && !b) {
+ current->end(MGoto::New(alloc(), successor));
+ current = nullptr;
+
+ state.loop.successor = successor;
+ return processBrokenLoop(state);
+ }
+ }
+
+ // Create the test instruction and end the current block.
+ MTest* test = newTest(vins, state.loop.entry, successor);
+ current->end(test);
+ return finishLoop(state, successor);
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processWhileCondEnd(CFGState& state)
+{
+ MOZ_ASSERT(JSOp(*pc) == JSOP_IFNE || JSOp(*pc) == JSOP_IFEQ);
+
+ // Balance the stack past the IFNE.
+ MDefinition* ins = current->pop();
+
+ // Create the body and successor blocks.
+ MBasicBlock* body = newBlock(current, state.loop.bodyStart);
+ state.loop.successor = newBlock(current, state.loop.exitpc, loopDepth_ - 1);
+ if (!body || !state.loop.successor)
+ return ControlStatus_Error;
+
+ MTest* test;
+ if (JSOp(*pc) == JSOP_IFNE)
+ test = newTest(ins, body, state.loop.successor);
+ else
+ test = newTest(ins, state.loop.successor, body);
+ current->end(test);
+
+ state.state = CFGState::WHILE_LOOP_BODY;
+ state.stopAt = state.loop.bodyEnd;
+ pc = state.loop.bodyStart;
+ if (!setCurrentAndSpecializePhis(body))
+ return ControlStatus_Error;
+
+ // Filter the types in the loop body.
+ if (!improveTypesAtTest(test->getOperand(0), test->ifTrue() == current, test))
+ return ControlStatus_Error;
+
+ // If this is a for-in loop, unbox the current value as string if possible.
+ if (ins->isIsNoIter()) {
+ MIteratorMore* iterMore = ins->toIsNoIter()->input()->toIteratorMore();
+ jsbytecode* iterMorePc = iterMore->resumePoint()->pc();
+ MOZ_ASSERT(*iterMorePc == JSOP_MOREITER);
+
+ if (!nonStringIteration_ && !inspector->hasSeenNonStringIterMore(iterMorePc)) {
+ MDefinition* val = current->peek(-1);
+ MOZ_ASSERT(val == iterMore);
+ MInstruction* ins = MUnbox::New(alloc(), val, MIRType::String, MUnbox::Fallible,
+ Bailout_NonStringInputInvalidate);
+ current->add(ins);
+ current->rewriteAtDepth(-1, ins);
+ }
+ }
+
+ return ControlStatus_Jumped;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processWhileBodyEnd(CFGState& state)
+{
+ if (!processDeferredContinues(state))
+ return ControlStatus_Error;
+
+ if (!current)
+ return processBrokenLoop(state);
+
+ current->end(MGoto::New(alloc(), state.loop.entry));
+ return finishLoop(state, state.loop.successor);
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processForCondEnd(CFGState& state)
+{
+ MOZ_ASSERT(JSOp(*pc) == JSOP_IFNE);
+
+ // Balance the stack past the IFNE.
+ MDefinition* ins = current->pop();
+
+ // Create the body and successor blocks.
+ MBasicBlock* body = newBlock(current, state.loop.bodyStart);
+ state.loop.successor = newBlock(current, state.loop.exitpc, loopDepth_ - 1);
+ if (!body || !state.loop.successor)
+ return ControlStatus_Error;
+
+ MTest* test = newTest(ins, body, state.loop.successor);
+ current->end(test);
+
+ state.state = CFGState::FOR_LOOP_BODY;
+ state.stopAt = state.loop.bodyEnd;
+ pc = state.loop.bodyStart;
+ if (!setCurrentAndSpecializePhis(body))
+ return ControlStatus_Error;
+ return ControlStatus_Jumped;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processForBodyEnd(CFGState& state)
+{
+ if (!processDeferredContinues(state))
+ return ControlStatus_Error;
+
+ // If there is no updatepc, just go right to processing what would be the
+ // end of the update clause. Otherwise, |current| might be nullptr; if this is
+ // the case, the udpate is unreachable anyway.
+ if (!state.loop.updatepc || !current)
+ return processForUpdateEnd(state);
+
+ pc = state.loop.updatepc;
+
+ state.state = CFGState::FOR_LOOP_UPDATE;
+ state.stopAt = state.loop.updateEnd;
+ return ControlStatus_Jumped;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processForUpdateEnd(CFGState& state)
+{
+ // If there is no current, we couldn't reach the loop edge and there was no
+ // update clause.
+ if (!current)
+ return processBrokenLoop(state);
+
+ current->end(MGoto::New(alloc(), state.loop.entry));
+ return finishLoop(state, state.loop.successor);
+}
+
+IonBuilder::DeferredEdge*
+IonBuilder::filterDeadDeferredEdges(DeferredEdge* edge)
+{
+ DeferredEdge* head = edge;
+ DeferredEdge* prev = nullptr;
+
+ while (edge) {
+ if (edge->block->isDead()) {
+ if (prev)
+ prev->next = edge->next;
+ else
+ head = edge->next;
+ } else {
+ prev = edge;
+ }
+ edge = edge->next;
+ }
+
+ // There must be at least one deferred edge from a block that was not
+ // deleted; blocks are deleted when restarting processing of a loop, and
+ // the final version of the loop body will have edges from live blocks.
+ MOZ_ASSERT(head);
+
+ return head;
+}
+
+bool
+IonBuilder::processDeferredContinues(CFGState& state)
+{
+ // If there are any continues for this loop, and there is an update block,
+ // then we need to create a new basic block to house the update.
+ if (state.loop.continues) {
+ DeferredEdge* edge = filterDeadDeferredEdges(state.loop.continues);
+
+ MBasicBlock* update = newBlock(edge->block, loops_.back().continuepc);
+ if (!update)
+ return false;
+
+ if (current) {
+ current->end(MGoto::New(alloc(), update));
+ if (!update->addPredecessor(alloc(), current))
+ return false;
+ }
+
+ // No need to use addPredecessor for first edge,
+ // because it is already predecessor.
+ edge->block->end(MGoto::New(alloc(), update));
+ edge = edge->next;
+
+ // Remaining edges
+ while (edge) {
+ edge->block->end(MGoto::New(alloc(), update));
+ if (!update->addPredecessor(alloc(), edge->block))
+ return false;
+ edge = edge->next;
+ }
+ state.loop.continues = nullptr;
+
+ if (!setCurrentAndSpecializePhis(update))
+ return ControlStatus_Error;
+ }
+
+ return true;
+}
+
+MBasicBlock*
+IonBuilder::createBreakCatchBlock(DeferredEdge* edge, jsbytecode* pc)
+{
+ edge = filterDeadDeferredEdges(edge);
+
+ // Create block, using the first break statement as predecessor
+ MBasicBlock* successor = newBlock(edge->block, pc);
+ if (!successor)
+ return nullptr;
+
+ // No need to use addPredecessor for first edge,
+ // because it is already predecessor.
+ edge->block->end(MGoto::New(alloc(), successor));
+ edge = edge->next;
+
+ // Finish up remaining breaks.
+ while (edge) {
+ MGoto* brk = MGoto::New(alloc().fallible(), successor);
+ if (!brk)
+ return nullptr;
+ edge->block->end(brk);
+ if (!successor->addPredecessor(alloc(), edge->block))
+ return nullptr;
+ edge = edge->next;
+ }
+
+ return successor;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processNextTableSwitchCase(CFGState& state)
+{
+ MOZ_ASSERT(state.state == CFGState::TABLE_SWITCH);
+
+ state.tableswitch.currentBlock++;
+
+ // Test if there are still unprocessed successors (cases/default)
+ if (state.tableswitch.currentBlock >= state.tableswitch.ins->numBlocks())
+ return processSwitchEnd(state.tableswitch.breaks, state.tableswitch.exitpc);
+
+ // Get the next successor
+ MBasicBlock* successor = state.tableswitch.ins->getBlock(state.tableswitch.currentBlock);
+
+ // Add current block as predecessor if available.
+ // This means the previous case didn't have a break statement.
+ // So flow will continue in this block.
+ if (current) {
+ current->end(MGoto::New(alloc(), successor));
+ if (!successor->addPredecessor(alloc(), current))
+ return ControlStatus_Error;
+ } else {
+ // If this is an actual case statement, optimize by replacing the
+ // input to the switch case with the actual number of the case.
+ // This constant has been emitted when creating the case blocks.
+ if (state.tableswitch.ins->getDefault() != successor) {
+ MConstant* constant = successor->begin()->toConstant();
+ for (uint32_t j = 0; j < successor->stackDepth(); j++) {
+ MDefinition* ins = successor->getSlot(j);
+ if (ins != state.tableswitch.ins->getOperand(0))
+ continue;
+
+ constant->setDependency(state.tableswitch.ins);
+ successor->setSlot(j, constant);
+ }
+ }
+ }
+
+ // Insert successor after the current block, to maintain RPO.
+ graph().moveBlockToEnd(successor);
+
+ // If this is the last successor the block should stop at the end of the tableswitch
+ // Else it should stop at the start of the next successor
+ if (state.tableswitch.currentBlock+1 < state.tableswitch.ins->numBlocks())
+ state.stopAt = state.tableswitch.ins->getBlock(state.tableswitch.currentBlock+1)->pc();
+ else
+ state.stopAt = state.tableswitch.exitpc;
+
+ if (!setCurrentAndSpecializePhis(successor))
+ return ControlStatus_Error;
+ pc = current->pc();
+ return ControlStatus_Jumped;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processAndOrEnd(CFGState& state)
+{
+ MOZ_ASSERT(current);
+ MBasicBlock* lhs = state.branch.ifFalse;
+
+ // Create a new block to represent the join.
+ MBasicBlock* join = newBlock(current, state.stopAt);
+ if (!join)
+ return ControlStatus_Error;
+
+ // End the rhs.
+ current->end(MGoto::New(alloc(), join));
+
+ // End the lhs.
+ lhs->end(MGoto::New(alloc(), join));
+ if (!join->addPredecessor(alloc(), state.branch.ifFalse))
+ return ControlStatus_Error;
+
+ // Set the join path as current path.
+ if (!setCurrentAndSpecializePhis(join))
+ return ControlStatus_Error;
+ pc = current->pc();
+ return ControlStatus_Joined;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processLabelEnd(CFGState& state)
+{
+ MOZ_ASSERT(state.state == CFGState::LABEL);
+
+ // If there are no breaks and no current, controlflow is terminated.
+ if (!state.label.breaks && !current)
+ return ControlStatus_Ended;
+
+ // If there are no breaks to this label, there's nothing to do.
+ if (!state.label.breaks)
+ return ControlStatus_Joined;
+
+ MBasicBlock* successor = createBreakCatchBlock(state.label.breaks, state.stopAt);
+ if (!successor)
+ return ControlStatus_Error;
+
+ if (current) {
+ current->end(MGoto::New(alloc(), successor));
+ if (!successor->addPredecessor(alloc(), current))
+ return ControlStatus_Error;
+ }
+
+ pc = state.stopAt;
+ if (!setCurrentAndSpecializePhis(successor))
+ return ControlStatus_Error;
+ return ControlStatus_Joined;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processTryEnd(CFGState& state)
+{
+ MOZ_ASSERT(state.state == CFGState::TRY);
+
+ if (!state.try_.successor) {
+ MOZ_ASSERT(!current);
+ return ControlStatus_Ended;
+ }
+
+ if (current) {
+ current->end(MGoto::New(alloc(), state.try_.successor));
+
+ if (!state.try_.successor->addPredecessor(alloc(), current))
+ return ControlStatus_Error;
+ }
+
+ // Start parsing the code after this try-catch statement.
+ if (!setCurrentAndSpecializePhis(state.try_.successor))
+ return ControlStatus_Error;
+ graph().moveBlockToEnd(current);
+ pc = current->pc();
+ return ControlStatus_Joined;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processBreak(JSOp op, jssrcnote* sn)
+{
+ MOZ_ASSERT(op == JSOP_GOTO);
+
+ MOZ_ASSERT(SN_TYPE(sn) == SRC_BREAK ||
+ SN_TYPE(sn) == SRC_BREAK2LABEL);
+
+ // Find the break target.
+ jsbytecode* target = pc + GetJumpOffset(pc);
+ DebugOnly<bool> found = false;
+
+ if (SN_TYPE(sn) == SRC_BREAK2LABEL) {
+ for (size_t i = labels_.length() - 1; i < labels_.length(); i--) {
+ CFGState& cfg = cfgStack_[labels_[i].cfgEntry];
+ MOZ_ASSERT(cfg.state == CFGState::LABEL);
+ if (cfg.stopAt == target) {
+ cfg.label.breaks = new(alloc()) DeferredEdge(current, cfg.label.breaks);
+ found = true;
+ break;
+ }
+ }
+ } else {
+ for (size_t i = loops_.length() - 1; i < loops_.length(); i--) {
+ CFGState& cfg = cfgStack_[loops_[i].cfgEntry];
+ MOZ_ASSERT(cfg.isLoop());
+ if (cfg.loop.exitpc == target) {
+ cfg.loop.breaks = new(alloc()) DeferredEdge(current, cfg.loop.breaks);
+ found = true;
+ break;
+ }
+ }
+ }
+
+ MOZ_ASSERT(found);
+
+ setCurrent(nullptr);
+ pc += CodeSpec[op].length;
+ return processControlEnd();
+}
+
+static inline jsbytecode*
+EffectiveContinue(jsbytecode* pc)
+{
+ if (JSOp(*pc) == JSOP_GOTO)
+ return pc + GetJumpOffset(pc);
+ return pc;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processContinue(JSOp op)
+{
+ MOZ_ASSERT(op == JSOP_GOTO);
+
+ // Find the target loop.
+ CFGState* found = nullptr;
+ jsbytecode* target = pc + GetJumpOffset(pc);
+ for (size_t i = loops_.length() - 1; i < loops_.length(); i--) {
+ // +1 to skip JSOP_JUMPTARGET.
+ if (loops_[i].continuepc == target + 1 ||
+ EffectiveContinue(loops_[i].continuepc) == target)
+ {
+ found = &cfgStack_[loops_[i].cfgEntry];
+ break;
+ }
+ }
+
+ // There must always be a valid target loop structure. If not, there's
+ // probably an off-by-something error in which pc we track.
+ MOZ_ASSERT(found);
+ CFGState& state = *found;
+
+ state.loop.continues = new(alloc()) DeferredEdge(current, state.loop.continues);
+
+ setCurrent(nullptr);
+ pc += CodeSpec[op].length;
+ return processControlEnd();
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processSwitchBreak(JSOp op)
+{
+ MOZ_ASSERT(op == JSOP_GOTO);
+
+ // Find the target switch.
+ CFGState* found = nullptr;
+ jsbytecode* target = pc + GetJumpOffset(pc);
+ for (size_t i = switches_.length() - 1; i < switches_.length(); i--) {
+ if (switches_[i].continuepc == target) {
+ found = &cfgStack_[switches_[i].cfgEntry];
+ break;
+ }
+ }
+
+ // There must always be a valid target loop structure. If not, there's
+ // probably an off-by-something error in which pc we track.
+ MOZ_ASSERT(found);
+ CFGState& state = *found;
+
+ DeferredEdge** breaks = nullptr;
+ switch (state.state) {
+ case CFGState::TABLE_SWITCH:
+ breaks = &state.tableswitch.breaks;
+ break;
+ case CFGState::COND_SWITCH_BODY:
+ breaks = &state.condswitch.breaks;
+ break;
+ default:
+ MOZ_CRASH("Unexpected switch state.");
+ }
+
+ *breaks = new(alloc()) DeferredEdge(current, *breaks);
+
+ setCurrent(nullptr);
+ pc += CodeSpec[op].length;
+ return processControlEnd();
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processSwitchEnd(DeferredEdge* breaks, jsbytecode* exitpc)
+{
+ // No break statements, no current.
+ // This means that control flow is cut-off from this point
+ // (e.g. all cases have return statements).
+ if (!breaks && !current)
+ return ControlStatus_Ended;
+
+ // Create successor block.
+ // If there are breaks, create block with breaks as predecessor
+ // Else create a block with current as predecessor
+ MBasicBlock* successor = nullptr;
+ if (breaks)
+ successor = createBreakCatchBlock(breaks, exitpc);
+ else
+ successor = newBlock(current, exitpc);
+
+ if (!successor)
+ return ControlStatus_Error;
+
+ // If there is current, the current block flows into this one.
+ // So current is also a predecessor to this block
+ if (current) {
+ current->end(MGoto::New(alloc(), successor));
+ if (breaks) {
+ if (!successor->addPredecessor(alloc(), current))
+ return ControlStatus_Error;
+ }
+ }
+
+ pc = exitpc;
+ if (!setCurrentAndSpecializePhis(successor))
+ return ControlStatus_Error;
+ return ControlStatus_Joined;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::maybeLoop(JSOp op, jssrcnote* sn)
+{
+ // This function looks at the opcode and source note and tries to
+ // determine the structure of the loop. For some opcodes, like
+ // POP/NOP which are not explicitly control flow, this source note is
+ // optional. For opcodes with control flow, like GOTO, an unrecognized
+ // or not-present source note is a compilation failure.
+ switch (op) {
+ case JSOP_POP:
+ // for (init; ; update?) ...
+ if (sn && SN_TYPE(sn) == SRC_FOR) {
+ current->pop();
+ return forLoop(op, sn);
+ }
+ break;
+
+ case JSOP_NOP:
+ if (sn) {
+ // do { } while (cond)
+ if (SN_TYPE(sn) == SRC_WHILE)
+ return doWhileLoop(op, sn);
+ // Build a mapping such that given a basic block, whose successor
+ // has a phi
+
+ // for (; ; update?)
+ if (SN_TYPE(sn) == SRC_FOR)
+ return forLoop(op, sn);
+ }
+ break;
+
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+
+ return ControlStatus_None;
+}
+
+void
+IonBuilder::assertValidLoopHeadOp(jsbytecode* pc)
+{
+#ifdef DEBUG
+ MOZ_ASSERT(JSOp(*pc) == JSOP_LOOPHEAD);
+
+ // Make sure this is the next opcode after the loop header,
+ // unless the for loop is unconditional.
+ CFGState& state = cfgStack_.back();
+ MOZ_ASSERT_IF((JSOp)*(state.loop.entry->pc()) == JSOP_GOTO,
+ GetNextPc(state.loop.entry->pc()) == pc);
+
+ // do-while loops have a source note.
+ jssrcnote* sn = info().getNote(gsn, pc);
+ if (sn) {
+ jsbytecode* ifne = pc + GetSrcNoteOffset(sn, 0);
+
+ jsbytecode* expected_ifne;
+ switch (state.state) {
+ case CFGState::DO_WHILE_LOOP_BODY:
+ expected_ifne = state.loop.updateEnd;
+ break;
+
+ default:
+ MOZ_CRASH("JSOP_LOOPHEAD unexpected source note");
+ }
+
+ // Make sure this loop goes to the same ifne as the loop header's
+ // source notes or GOTO.
+ MOZ_ASSERT(ifne == expected_ifne);
+ } else {
+ MOZ_ASSERT(state.state != CFGState::DO_WHILE_LOOP_BODY);
+ }
+#endif
+}
+
+IonBuilder::ControlStatus
+IonBuilder::doWhileLoop(JSOp op, jssrcnote* sn)
+{
+ // do { } while() loops have the following structure:
+ // NOP ; SRC_WHILE (offset to COND)
+ // LOOPHEAD ; SRC_WHILE (offset to IFNE)
+ // LOOPENTRY
+ // ... ; body
+ // ...
+ // COND ; start of condition
+ // ...
+ // IFNE -> ; goes to LOOPHEAD
+ int condition_offset = GetSrcNoteOffset(sn, 0);
+ jsbytecode* conditionpc = pc + condition_offset;
+
+ jssrcnote* sn2 = info().getNote(gsn, pc+1);
+ int offset = GetSrcNoteOffset(sn2, 0);
+ jsbytecode* ifne = pc + offset + 1;
+ MOZ_ASSERT(ifne > pc);
+
+ // Verify that the IFNE goes back to a loophead op.
+ jsbytecode* loopHead = GetNextPc(pc);
+ MOZ_ASSERT(JSOp(*loopHead) == JSOP_LOOPHEAD);
+ MOZ_ASSERT(loopHead == ifne + GetJumpOffset(ifne));
+
+ jsbytecode* loopEntry = GetNextPc(loopHead);
+ bool canOsr = LoopEntryCanIonOsr(loopEntry);
+ bool osr = info().hasOsrAt(loopEntry);
+
+ if (osr) {
+ MBasicBlock* preheader = newOsrPreheader(current, loopEntry, pc);
+ if (!preheader)
+ return ControlStatus_Error;
+ current->end(MGoto::New(alloc(), preheader));
+ if (!setCurrentAndSpecializePhis(preheader))
+ return ControlStatus_Error;
+ }
+
+ unsigned stackPhiCount = 0;
+ MBasicBlock* header = newPendingLoopHeader(current, loopEntry, osr, canOsr, stackPhiCount);
+ if (!header)
+ return ControlStatus_Error;
+ current->end(MGoto::New(alloc(), header));
+
+ jsbytecode* loophead = GetNextPc(pc);
+ jsbytecode* bodyStart = GetNextPc(loophead);
+ jsbytecode* bodyEnd = conditionpc;
+ jsbytecode* exitpc = GetNextPc(ifne);
+ if (!analyzeNewLoopTypes(header, bodyStart, exitpc))
+ return ControlStatus_Error;
+ if (!pushLoop(CFGState::DO_WHILE_LOOP_BODY, conditionpc, header, osr,
+ loopHead, bodyStart, bodyStart, bodyEnd, exitpc, conditionpc))
+ {
+ return ControlStatus_Error;
+ }
+
+ CFGState& state = cfgStack_.back();
+ state.loop.updatepc = conditionpc;
+ state.loop.updateEnd = ifne;
+
+ if (!setCurrentAndSpecializePhis(header))
+ return ControlStatus_Error;
+ if (!jsop_loophead(loophead))
+ return ControlStatus_Error;
+
+ pc = bodyStart;
+ return ControlStatus_Jumped;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::whileOrForInLoop(jssrcnote* sn)
+{
+ // while (cond) { } loops have the following structure:
+ // GOTO cond ; SRC_WHILE (offset to IFNE)
+ // LOOPHEAD
+ // ...
+ // cond:
+ // LOOPENTRY
+ // ...
+ // IFNE ; goes to LOOPHEAD
+ // for (x in y) { } loops are similar; the cond will be a MOREITER.
+ MOZ_ASSERT(SN_TYPE(sn) == SRC_FOR_OF || SN_TYPE(sn) == SRC_FOR_IN || SN_TYPE(sn) == SRC_WHILE);
+ int ifneOffset = GetSrcNoteOffset(sn, 0);
+ jsbytecode* ifne = pc + ifneOffset;
+ MOZ_ASSERT(ifne > pc);
+
+ // Verify that the IFNE goes back to a loophead op.
+ MOZ_ASSERT(JSOp(*GetNextPc(pc)) == JSOP_LOOPHEAD);
+ MOZ_ASSERT(GetNextPc(pc) == ifne + GetJumpOffset(ifne));
+
+ jsbytecode* loopEntry = pc + GetJumpOffset(pc);
+ bool canOsr = LoopEntryCanIonOsr(loopEntry);
+ bool osr = info().hasOsrAt(loopEntry);
+
+ if (osr) {
+ MBasicBlock* preheader = newOsrPreheader(current, loopEntry, pc);
+ if (!preheader)
+ return ControlStatus_Error;
+ current->end(MGoto::New(alloc(), preheader));
+ if (!setCurrentAndSpecializePhis(preheader))
+ return ControlStatus_Error;
+ }
+
+ unsigned stackPhiCount;
+ if (SN_TYPE(sn) == SRC_FOR_OF)
+ stackPhiCount = 2;
+ else if (SN_TYPE(sn) == SRC_FOR_IN)
+ stackPhiCount = 1;
+ else
+ stackPhiCount = 0;
+
+ MBasicBlock* header = newPendingLoopHeader(current, loopEntry, osr, canOsr, stackPhiCount);
+ if (!header)
+ return ControlStatus_Error;
+ current->end(MGoto::New(alloc(), header));
+
+ // Skip past the JSOP_LOOPHEAD for the body start.
+ jsbytecode* loopHead = GetNextPc(pc);
+ jsbytecode* bodyStart = GetNextPc(loopHead);
+ jsbytecode* bodyEnd = pc + GetJumpOffset(pc);
+ jsbytecode* exitpc = GetNextPc(ifne);
+ jsbytecode* continuepc = pc;
+ if (!analyzeNewLoopTypes(header, bodyStart, exitpc))
+ return ControlStatus_Error;
+ if (!pushLoop(CFGState::WHILE_LOOP_COND, ifne, header, osr,
+ loopHead, bodyEnd, bodyStart, bodyEnd, exitpc, continuepc))
+ {
+ return ControlStatus_Error;
+ }
+
+ // Parse the condition first.
+ if (!setCurrentAndSpecializePhis(header))
+ return ControlStatus_Error;
+ if (!jsop_loophead(loopHead))
+ return ControlStatus_Error;
+
+ pc = bodyEnd;
+ return ControlStatus_Jumped;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::forLoop(JSOp op, jssrcnote* sn)
+{
+ // Skip the NOP.
+ MOZ_ASSERT(op == JSOP_NOP);
+ pc = GetNextPc(pc);
+
+ jsbytecode* condpc = pc + GetSrcNoteOffset(sn, 0);
+ jsbytecode* updatepc = pc + GetSrcNoteOffset(sn, 1);
+ jsbytecode* ifne = pc + GetSrcNoteOffset(sn, 2);
+ jsbytecode* exitpc = GetNextPc(ifne);
+
+ // for loops have the following structures:
+ //
+ // NOP or POP
+ // [GOTO cond | NOP]
+ // LOOPHEAD
+ // body:
+ // ; [body]
+ // [increment:]
+ // [{FRESHEN,RECREATE}LEXICALENV, if needed by a lexical env]
+ // ; [increment]
+ // [cond:]
+ // LOOPENTRY
+ // GOTO body
+ //
+ // If there is a condition (condpc != ifne), this acts similar to a while
+ // loop otherwise, it acts like a do-while loop.
+ //
+ // Note that currently Ion doesn't compile pushlexicalenv/poplexicalenv,
+ // necessary prerequisites to {freshen,recreate}lexicalenv. So the code
+ // below doesn't and needn't consider either op's implications.
+ jsbytecode* bodyStart = pc;
+ jsbytecode* bodyEnd = updatepc;
+ jsbytecode* loopEntry = condpc;
+ if (condpc != ifne) {
+ MOZ_ASSERT(JSOp(*bodyStart) == JSOP_GOTO);
+ MOZ_ASSERT(bodyStart + GetJumpOffset(bodyStart) == condpc);
+ bodyStart = GetNextPc(bodyStart);
+ } else {
+ // No loop condition, such as for(j = 0; ; j++)
+ if (op != JSOP_NOP) {
+ // If the loop starts with POP, we have to skip a NOP.
+ MOZ_ASSERT(JSOp(*bodyStart) == JSOP_NOP);
+ bodyStart = GetNextPc(bodyStart);
+ }
+ loopEntry = GetNextPc(bodyStart);
+ }
+ jsbytecode* loopHead = bodyStart;
+ MOZ_ASSERT(JSOp(*bodyStart) == JSOP_LOOPHEAD);
+ MOZ_ASSERT(ifne + GetJumpOffset(ifne) == bodyStart);
+ bodyStart = GetNextPc(bodyStart);
+
+ bool osr = info().hasOsrAt(loopEntry);
+ bool canOsr = LoopEntryCanIonOsr(loopEntry);
+
+ if (osr) {
+ MBasicBlock* preheader = newOsrPreheader(current, loopEntry, pc);
+ if (!preheader)
+ return ControlStatus_Error;
+ current->end(MGoto::New(alloc(), preheader));
+ if (!setCurrentAndSpecializePhis(preheader))
+ return ControlStatus_Error;
+ }
+
+ unsigned stackPhiCount = 0;
+ MBasicBlock* header = newPendingLoopHeader(current, loopEntry, osr, canOsr, stackPhiCount);
+ if (!header)
+ return ControlStatus_Error;
+ current->end(MGoto::New(alloc(), header));
+
+ // If there is no condition, we immediately parse the body. Otherwise, we
+ // parse the condition.
+ jsbytecode* stopAt;
+ CFGState::State initial;
+ if (condpc != ifne) {
+ pc = condpc;
+ stopAt = ifne;
+ initial = CFGState::FOR_LOOP_COND;
+ } else {
+ pc = bodyStart;
+ stopAt = bodyEnd;
+ initial = CFGState::FOR_LOOP_BODY;
+ }
+
+ if (!analyzeNewLoopTypes(header, bodyStart, exitpc))
+ return ControlStatus_Error;
+ if (!pushLoop(initial, stopAt, header, osr,
+ loopHead, pc, bodyStart, bodyEnd, exitpc, updatepc))
+ {
+ return ControlStatus_Error;
+ }
+
+ CFGState& state = cfgStack_.back();
+ state.loop.condpc = (condpc != ifne) ? condpc : nullptr;
+ state.loop.updatepc = (updatepc != condpc) ? updatepc : nullptr;
+ if (state.loop.updatepc)
+ state.loop.updateEnd = condpc;
+
+ if (!setCurrentAndSpecializePhis(header))
+ return ControlStatus_Error;
+ if (!jsop_loophead(loopHead))
+ return ControlStatus_Error;
+
+ return ControlStatus_Jumped;
+}
+
+int
+IonBuilder::CmpSuccessors(const void* a, const void* b)
+{
+ const MBasicBlock* a0 = * (MBasicBlock * const*)a;
+ const MBasicBlock* b0 = * (MBasicBlock * const*)b;
+ if (a0->pc() == b0->pc())
+ return 0;
+
+ return (a0->pc() > b0->pc()) ? 1 : -1;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::tableSwitch(JSOp op, jssrcnote* sn)
+{
+ // TableSwitch op contains the following data
+ // (length between data is JUMP_OFFSET_LEN)
+ //
+ // 0: Offset of default case
+ // 1: Lowest number in tableswitch
+ // 2: Highest number in tableswitch
+ // 3: Offset of case low
+ // 4: Offset of case low+1
+ // .: ...
+ // .: Offset of case high
+
+ MOZ_ASSERT(op == JSOP_TABLESWITCH);
+ MOZ_ASSERT(SN_TYPE(sn) == SRC_TABLESWITCH);
+
+ // Pop input.
+ MDefinition* ins = current->pop();
+
+ // Get the default and exit pc
+ jsbytecode* exitpc = pc + GetSrcNoteOffset(sn, 0);
+ jsbytecode* defaultpc = pc + GET_JUMP_OFFSET(pc);
+
+ MOZ_ASSERT(defaultpc > pc && defaultpc <= exitpc);
+
+ // Get the low and high from the tableswitch
+ jsbytecode* pc2 = pc;
+ pc2 += JUMP_OFFSET_LEN;
+ int low = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ int high = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+
+ // Create MIR instruction
+ MTableSwitch* tableswitch = MTableSwitch::New(alloc(), ins, low, high);
+
+ // Create default case
+ MBasicBlock* defaultcase = newBlock(current, defaultpc);
+ if (!defaultcase)
+ return ControlStatus_Error;
+
+ if (!tableswitch->addDefault(defaultcase))
+ return ControlStatus_Error;
+
+ if (!tableswitch->addBlock(defaultcase))
+ return ControlStatus_Error;
+
+ // Create cases
+ jsbytecode* casepc = nullptr;
+ for (int i = 0; i < high-low+1; i++) {
+ casepc = pc + GET_JUMP_OFFSET(pc2);
+
+ MOZ_ASSERT(casepc >= pc && casepc <= exitpc);
+ MBasicBlock* caseblock;
+
+ if (casepc == pc) {
+ // If the casepc equals the current pc, it is not a written case,
+ // but a filled gap. That way we can use a tableswitch instead of
+ // condswitch, even if not all numbers are consecutive.
+ // In that case this block goes to the default case
+ caseblock = newBlock(current, defaultpc);
+ if (!caseblock)
+ return ControlStatus_Error;
+ caseblock->end(MGoto::New(alloc(), defaultcase));
+ if (!defaultcase->addPredecessor(alloc(), caseblock))
+ return ControlStatus_Error;
+ } else {
+ // If this is an actual case (not filled gap),
+ // add this block to the list that still needs to get processed.
+ caseblock = newBlock(current, casepc);
+ if (!caseblock)
+ return ControlStatus_Error;
+
+ if (!tableswitch->addBlock(caseblock))
+ return ControlStatus_Error;
+
+ // Add constant to indicate which case this is for use by
+ // processNextTableSwitchCase.
+ MConstant* constant = MConstant::New(alloc(), Int32Value(i + low));
+ caseblock->add(constant);
+ }
+
+ size_t caseIndex;
+ if (!tableswitch->addSuccessor(caseblock, &caseIndex))
+ return ControlStatus_Error;
+
+ if (!tableswitch->addCase(caseIndex))
+ return ControlStatus_Error;
+
+ pc2 += JUMP_OFFSET_LEN;
+ }
+
+ // Move defaultcase to the end, to maintain RPO.
+ graph().moveBlockToEnd(defaultcase);
+
+ MOZ_ASSERT(tableswitch->numCases() == (uint32_t)(high - low + 1));
+ MOZ_ASSERT(tableswitch->numSuccessors() > 0);
+
+ // Sort the list of blocks that still needs to get processed by pc
+ qsort(tableswitch->blocks(), tableswitch->numBlocks(),
+ sizeof(MBasicBlock*), CmpSuccessors);
+
+ // Create info
+ ControlFlowInfo switchinfo(cfgStack_.length(), exitpc);
+ if (!switches_.append(switchinfo))
+ return ControlStatus_Error;
+
+ // Use a state to retrieve some information
+ CFGState state = CFGState::TableSwitch(exitpc, tableswitch);
+
+ // Save the MIR instruction as last instruction of this block.
+ current->end(tableswitch);
+
+ // If there is only one successor the block should stop at the end of the switch
+ // Else it should stop at the start of the next successor
+ if (tableswitch->numBlocks() > 1)
+ state.stopAt = tableswitch->getBlock(1)->pc();
+ if (!setCurrentAndSpecializePhis(tableswitch->getBlock(0)))
+ return ControlStatus_Error;
+
+ if (!cfgStack_.append(state))
+ return ControlStatus_Error;
+
+ pc = current->pc();
+ return ControlStatus_Jumped;
+}
+
+bool
+IonBuilder::replaceTypeSet(MDefinition* subject, TemporaryTypeSet* type, MTest* test)
+{
+ if (type->unknown())
+ return true;
+
+ // Don't emit MFilterTypeSet if it doesn't improve the typeset.
+ if (subject->resultTypeSet()) {
+ if (subject->resultTypeSet()->equals(type))
+ return true;
+ } else {
+ TemporaryTypeSet oldTypes(alloc_->lifoAlloc(), subject->type());
+ if (oldTypes.equals(type))
+ return true;
+ }
+
+ MInstruction* replace = nullptr;
+ MDefinition* ins;
+
+ for (uint32_t i = 0; i < current->stackDepth(); i++) {
+ ins = current->getSlot(i);
+
+ // Instead of creating a new MFilterTypeSet, try to update the old one.
+ if (ins->isFilterTypeSet() && ins->getOperand(0) == subject &&
+ ins->dependency() == test)
+ {
+ TemporaryTypeSet* intersect =
+ TypeSet::intersectSets(ins->resultTypeSet(), type, alloc_->lifoAlloc());
+ if (!intersect)
+ return false;
+
+ ins->toFilterTypeSet()->setResultType(intersect->getKnownMIRType());
+ ins->toFilterTypeSet()->setResultTypeSet(intersect);
+
+ if (ins->type() == MIRType::Undefined)
+ current->setSlot(i, constant(UndefinedValue()));
+ else if (ins->type() == MIRType::Null)
+ current->setSlot(i, constant(NullValue()));
+ else if (ins->type() == MIRType::MagicOptimizedArguments)
+ current->setSlot(i, constant(MagicValue(JS_OPTIMIZED_ARGUMENTS)));
+ else
+ MOZ_ASSERT(!IsMagicType(ins->type()));
+ continue;
+ }
+
+ if (ins == subject) {
+ if (!replace) {
+ replace = MFilterTypeSet::New(alloc(), subject, type);
+ if (!replace)
+ return false;
+
+ current->add(replace);
+
+ // Make sure we don't hoist it above the MTest, we can use the
+ // 'dependency' of an MInstruction. This is normally used by
+ // Alias Analysis, but won't get overwritten, since this
+ // instruction doesn't have an AliasSet.
+ replace->setDependency(test);
+
+ if (replace->type() == MIRType::Undefined)
+ replace = constant(UndefinedValue());
+ else if (replace->type() == MIRType::Null)
+ replace = constant(NullValue());
+ else if (replace->type() == MIRType::MagicOptimizedArguments)
+ replace = constant(MagicValue(JS_OPTIMIZED_ARGUMENTS));
+ else
+ MOZ_ASSERT(!IsMagicType(ins->type()));
+ }
+ current->setSlot(i, replace);
+ }
+ }
+ return true;
+}
+
+bool
+IonBuilder::detectAndOrStructure(MPhi* ins, bool* branchIsAnd)
+{
+ // Look for a triangle pattern:
+ //
+ // initialBlock
+ // / |
+ // branchBlock |
+ // \ |
+ // testBlock
+ //
+ // Where ins is a phi from testBlock which combines two values
+ // pushed onto the stack by initialBlock and branchBlock.
+
+ if (ins->numOperands() != 2)
+ return false;
+
+ MBasicBlock* testBlock = ins->block();
+ MOZ_ASSERT(testBlock->numPredecessors() == 2);
+
+ MBasicBlock* initialBlock;
+ MBasicBlock* branchBlock;
+ if (testBlock->getPredecessor(0)->lastIns()->isTest()) {
+ initialBlock = testBlock->getPredecessor(0);
+ branchBlock = testBlock->getPredecessor(1);
+ } else if (testBlock->getPredecessor(1)->lastIns()->isTest()) {
+ initialBlock = testBlock->getPredecessor(1);
+ branchBlock = testBlock->getPredecessor(0);
+ } else {
+ return false;
+ }
+
+ if (branchBlock->numSuccessors() != 1)
+ return false;
+
+ if (branchBlock->numPredecessors() != 1 || branchBlock->getPredecessor(0) != initialBlock)
+ return false;
+
+ if (initialBlock->numSuccessors() != 2)
+ return false;
+
+ MDefinition* branchResult = ins->getOperand(testBlock->indexForPredecessor(branchBlock));
+ MDefinition* initialResult = ins->getOperand(testBlock->indexForPredecessor(initialBlock));
+
+ if (branchBlock->stackDepth() != initialBlock->stackDepth())
+ return false;
+ if (branchBlock->stackDepth() != testBlock->stackDepth() + 1)
+ return false;
+ if (branchResult != branchBlock->peek(-1) || initialResult != initialBlock->peek(-1))
+ return false;
+
+ MTest* initialTest = initialBlock->lastIns()->toTest();
+ bool branchIsTrue = branchBlock == initialTest->ifTrue();
+ if (initialTest->input() == ins->getOperand(0))
+ *branchIsAnd = branchIsTrue != (testBlock->getPredecessor(0) == branchBlock);
+ else if (initialTest->input() == ins->getOperand(1))
+ *branchIsAnd = branchIsTrue != (testBlock->getPredecessor(1) == branchBlock);
+ else
+ return false;
+
+ return true;
+}
+
+bool
+IonBuilder::improveTypesAtCompare(MCompare* ins, bool trueBranch, MTest* test)
+{
+ if (ins->compareType() == MCompare::Compare_Undefined ||
+ ins->compareType() == MCompare::Compare_Null)
+ {
+ return improveTypesAtNullOrUndefinedCompare(ins, trueBranch, test);
+ }
+
+ if ((ins->lhs()->isTypeOf() || ins->rhs()->isTypeOf()) &&
+ (ins->lhs()->isConstant() || ins->rhs()->isConstant()))
+ {
+ return improveTypesAtTypeOfCompare(ins, trueBranch, test);
+ }
+
+ return true;
+}
+
+bool
+IonBuilder::improveTypesAtTypeOfCompare(MCompare* ins, bool trueBranch, MTest* test)
+{
+ MTypeOf* typeOf = ins->lhs()->isTypeOf() ? ins->lhs()->toTypeOf() : ins->rhs()->toTypeOf();
+ MConstant* constant = ins->lhs()->isConstant() ? ins->lhs()->toConstant() : ins->rhs()->toConstant();
+
+ if (constant->type() != MIRType::String)
+ return true;
+
+ bool equal = ins->jsop() == JSOP_EQ || ins->jsop() == JSOP_STRICTEQ;
+ bool notEqual = ins->jsop() == JSOP_NE || ins->jsop() == JSOP_STRICTNE;
+
+ if (notEqual)
+ trueBranch = !trueBranch;
+
+ // Relational compares not supported.
+ if (!equal && !notEqual)
+ return true;
+
+ MDefinition* subject = typeOf->input();
+ TemporaryTypeSet* inputTypes = subject->resultTypeSet();
+
+ // Create temporary typeset equal to the type if there is no resultTypeSet.
+ TemporaryTypeSet tmp;
+ if (!inputTypes) {
+ if (subject->type() == MIRType::Value)
+ return true;
+ inputTypes = &tmp;
+ tmp.addType(TypeSet::PrimitiveType(ValueTypeFromMIRType(subject->type())), alloc_->lifoAlloc());
+ }
+
+ if (inputTypes->unknown())
+ return true;
+
+ // Note: we cannot remove the AnyObject type in the false branch,
+ // since there are multiple ways to get an object. That is the reason
+ // for the 'trueBranch' test.
+ TemporaryTypeSet filter;
+ const JSAtomState& names = GetJitContext()->runtime->names();
+ if (constant->toString() == TypeName(JSTYPE_VOID, names)) {
+ filter.addType(TypeSet::UndefinedType(), alloc_->lifoAlloc());
+ if (typeOf->inputMaybeCallableOrEmulatesUndefined() && trueBranch)
+ filter.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
+ } else if (constant->toString() == TypeName(JSTYPE_BOOLEAN, names)) {
+ filter.addType(TypeSet::BooleanType(), alloc_->lifoAlloc());
+ } else if (constant->toString() == TypeName(JSTYPE_NUMBER, names)) {
+ filter.addType(TypeSet::Int32Type(), alloc_->lifoAlloc());
+ filter.addType(TypeSet::DoubleType(), alloc_->lifoAlloc());
+ } else if (constant->toString() == TypeName(JSTYPE_STRING, names)) {
+ filter.addType(TypeSet::StringType(), alloc_->lifoAlloc());
+ } else if (constant->toString() == TypeName(JSTYPE_SYMBOL, names)) {
+ filter.addType(TypeSet::SymbolType(), alloc_->lifoAlloc());
+ } else if (constant->toString() == TypeName(JSTYPE_OBJECT, names)) {
+ filter.addType(TypeSet::NullType(), alloc_->lifoAlloc());
+ if (trueBranch)
+ filter.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
+ } else if (constant->toString() == TypeName(JSTYPE_FUNCTION, names)) {
+ if (typeOf->inputMaybeCallableOrEmulatesUndefined() && trueBranch)
+ filter.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
+ } else {
+ return true;
+ }
+
+ TemporaryTypeSet* type;
+ if (trueBranch)
+ type = TypeSet::intersectSets(&filter, inputTypes, alloc_->lifoAlloc());
+ else
+ type = TypeSet::removeSet(inputTypes, &filter, alloc_->lifoAlloc());
+
+ if (!type)
+ return false;
+
+ return replaceTypeSet(subject, type, test);
+}
+
+bool
+IonBuilder::improveTypesAtNullOrUndefinedCompare(MCompare* ins, bool trueBranch, MTest* test)
+{
+ MOZ_ASSERT(ins->compareType() == MCompare::Compare_Undefined ||
+ ins->compareType() == MCompare::Compare_Null);
+
+ // altersUndefined/Null represents if we can filter/set Undefined/Null.
+ bool altersUndefined, altersNull;
+ JSOp op = ins->jsop();
+
+ switch(op) {
+ case JSOP_STRICTNE:
+ case JSOP_STRICTEQ:
+ altersUndefined = ins->compareType() == MCompare::Compare_Undefined;
+ altersNull = ins->compareType() == MCompare::Compare_Null;
+ break;
+ case JSOP_NE:
+ case JSOP_EQ:
+ altersUndefined = altersNull = true;
+ break;
+ default:
+ MOZ_CRASH("Relational compares not supported");
+ }
+
+ MDefinition* subject = ins->lhs();
+ TemporaryTypeSet* inputTypes = subject->resultTypeSet();
+
+ MOZ_ASSERT(IsNullOrUndefined(ins->rhs()->type()));
+
+ // Create temporary typeset equal to the type if there is no resultTypeSet.
+ TemporaryTypeSet tmp;
+ if (!inputTypes) {
+ if (subject->type() == MIRType::Value)
+ return true;
+ inputTypes = &tmp;
+ tmp.addType(TypeSet::PrimitiveType(ValueTypeFromMIRType(subject->type())), alloc_->lifoAlloc());
+ }
+
+ if (inputTypes->unknown())
+ return true;
+
+ TemporaryTypeSet* type;
+
+ // Decide if we need to filter the type or set it.
+ if ((op == JSOP_STRICTEQ || op == JSOP_EQ) ^ trueBranch) {
+ // Remove undefined/null
+ TemporaryTypeSet remove;
+ if (altersUndefined)
+ remove.addType(TypeSet::UndefinedType(), alloc_->lifoAlloc());
+ if (altersNull)
+ remove.addType(TypeSet::NullType(), alloc_->lifoAlloc());
+
+ type = TypeSet::removeSet(inputTypes, &remove, alloc_->lifoAlloc());
+ } else {
+ // Set undefined/null.
+ TemporaryTypeSet base;
+ if (altersUndefined) {
+ base.addType(TypeSet::UndefinedType(), alloc_->lifoAlloc());
+ // If TypeSet emulates undefined, then we cannot filter the objects.
+ if (inputTypes->maybeEmulatesUndefined(constraints()))
+ base.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
+ }
+
+ if (altersNull)
+ base.addType(TypeSet::NullType(), alloc_->lifoAlloc());
+
+ type = TypeSet::intersectSets(&base, inputTypes, alloc_->lifoAlloc());
+ }
+
+ if (!type)
+ return false;
+
+ return replaceTypeSet(subject, type, test);
+}
+
+bool
+IonBuilder::improveTypesAtTest(MDefinition* ins, bool trueBranch, MTest* test)
+{
+ // We explore the test condition to try and deduce as much type information
+ // as possible.
+
+ // All branches of this switch that don't want to fall through to the
+ // default behavior must return. The default behavior assumes that a true
+ // test means the incoming ins is not null or undefined and that a false
+ // tests means it's one of null, undefined, false, 0, "", and objects
+ // emulating undefined
+ switch (ins->op()) {
+ case MDefinition::Op_Not:
+ return improveTypesAtTest(ins->toNot()->getOperand(0), !trueBranch, test);
+ case MDefinition::Op_IsObject: {
+ MDefinition* subject = ins->getOperand(0);
+ TemporaryTypeSet* oldType = subject->resultTypeSet();
+
+ // Create temporary typeset equal to the type if there is no resultTypeSet.
+ TemporaryTypeSet tmp;
+ if (!oldType) {
+ if (subject->type() == MIRType::Value)
+ return true;
+ oldType = &tmp;
+ tmp.addType(TypeSet::PrimitiveType(ValueTypeFromMIRType(subject->type())), alloc_->lifoAlloc());
+ }
+
+ if (oldType->unknown())
+ return true;
+
+ TemporaryTypeSet* type = nullptr;
+ if (trueBranch)
+ type = oldType->cloneObjectsOnly(alloc_->lifoAlloc());
+ else
+ type = oldType->cloneWithoutObjects(alloc_->lifoAlloc());
+
+ if (!type)
+ return false;
+
+ return replaceTypeSet(subject, type, test);
+ }
+ case MDefinition::Op_Phi: {
+ bool branchIsAnd = true;
+ if (!detectAndOrStructure(ins->toPhi(), &branchIsAnd)) {
+ // Just fall through to the default behavior.
+ break;
+ }
+
+ // Now we have detected the triangular structure and determined if it
+ // was an AND or an OR.
+ if (branchIsAnd) {
+ if (trueBranch) {
+ if (!improveTypesAtTest(ins->toPhi()->getOperand(0), true, test))
+ return false;
+ if (!improveTypesAtTest(ins->toPhi()->getOperand(1), true, test))
+ return false;
+ }
+ } else {
+ /*
+ * if (a || b) {
+ * ...
+ * } else {
+ * ...
+ * }
+ *
+ * If we have a statements like the one described above,
+ * And we are in the else branch of it. It amounts to:
+ * if (!(a || b)) and being in the true branch.
+ *
+ * Simplifying, we have (!a && !b)
+ * In this case we can use the same logic we use for branchIsAnd
+ *
+ */
+ if (!trueBranch) {
+ if (!improveTypesAtTest(ins->toPhi()->getOperand(0), false, test))
+ return false;
+ if (!improveTypesAtTest(ins->toPhi()->getOperand(1), false, test))
+ return false;
+ }
+ }
+ return true;
+ }
+
+ case MDefinition::Op_Compare:
+ return improveTypesAtCompare(ins->toCompare(), trueBranch, test);
+
+ default:
+ break;
+ }
+
+ // By default MTest tests ToBoolean(input). As a result in the true branch we can filter
+ // undefined and null. In false branch we can only encounter undefined, null, false, 0, ""
+ // and objects that emulate undefined.
+
+ TemporaryTypeSet* oldType = ins->resultTypeSet();
+ TemporaryTypeSet* type;
+
+ // Create temporary typeset equal to the type if there is no resultTypeSet.
+ TemporaryTypeSet tmp;
+ if (!oldType) {
+ if (ins->type() == MIRType::Value)
+ return true;
+ oldType = &tmp;
+ tmp.addType(TypeSet::PrimitiveType(ValueTypeFromMIRType(ins->type())), alloc_->lifoAlloc());
+ }
+
+ // If ins does not have a typeset we return as we cannot optimize.
+ if (oldType->unknown())
+ return true;
+
+ // Decide either to set or remove.
+ if (trueBranch) {
+ TemporaryTypeSet remove;
+ remove.addType(TypeSet::UndefinedType(), alloc_->lifoAlloc());
+ remove.addType(TypeSet::NullType(), alloc_->lifoAlloc());
+ type = TypeSet::removeSet(oldType, &remove, alloc_->lifoAlloc());
+ } else {
+ TemporaryTypeSet base;
+ base.addType(TypeSet::UndefinedType(), alloc_->lifoAlloc()); // ToBoolean(undefined) == false
+ base.addType(TypeSet::NullType(), alloc_->lifoAlloc()); // ToBoolean(null) == false
+ base.addType(TypeSet::BooleanType(), alloc_->lifoAlloc()); // ToBoolean(false) == false
+ base.addType(TypeSet::Int32Type(), alloc_->lifoAlloc()); // ToBoolean(0) == false
+ base.addType(TypeSet::DoubleType(), alloc_->lifoAlloc()); // ToBoolean(0.0) == false
+ base.addType(TypeSet::StringType(), alloc_->lifoAlloc()); // ToBoolean("") == false
+
+ // If the typeset does emulate undefined, then we cannot filter out
+ // objects.
+ if (oldType->maybeEmulatesUndefined(constraints()))
+ base.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
+
+ type = TypeSet::intersectSets(&base, oldType, alloc_->lifoAlloc());
+ }
+
+ return type && replaceTypeSet(ins, type, test);
+}
+
+bool
+IonBuilder::jsop_label()
+{
+ MOZ_ASSERT(JSOp(*pc) == JSOP_LABEL);
+
+ jsbytecode* endpc = pc + GET_JUMP_OFFSET(pc);
+ MOZ_ASSERT(endpc > pc);
+
+ ControlFlowInfo label(cfgStack_.length(), endpc);
+ if (!labels_.append(label))
+ return false;
+
+ return cfgStack_.append(CFGState::Label(endpc));
+}
+
+bool
+IonBuilder::jsop_condswitch()
+{
+ // CondSwitch op looks as follows:
+ // condswitch [length +exit_pc; first case offset +next-case ]
+ // {
+ // {
+ // ... any code ...
+ // case (+jump) [pcdelta offset +next-case]
+ // }+
+ // default (+jump)
+ // ... jump targets ...
+ // }
+ //
+ // The default case is always emitted even if there is no default case in
+ // the source. The last case statement pcdelta source note might have a 0
+ // offset on the last case (not all the time).
+ //
+ // A conditional evaluate the condition of each case and compare it to the
+ // switch value with a strict equality. Cases conditions are iterated
+ // linearly until one is matching. If one case succeeds, the flow jumps into
+ // the corresponding body block. The body block might alias others and
+ // might continue in the next body block if the body is not terminated with
+ // a break.
+ //
+ // Algorithm:
+ // 1/ Loop over the case chain to reach the default target
+ // & Estimate the number of uniq bodies.
+ // 2/ Generate code for all cases (see processCondSwitchCase).
+ // 3/ Generate code for all bodies (see processCondSwitchBody).
+
+ MOZ_ASSERT(JSOp(*pc) == JSOP_CONDSWITCH);
+ jssrcnote* sn = info().getNote(gsn, pc);
+ MOZ_ASSERT(SN_TYPE(sn) == SRC_CONDSWITCH);
+
+ // Get the exit pc
+ jsbytecode* exitpc = pc + GetSrcNoteOffset(sn, 0);
+ jsbytecode* firstCase = pc + GetSrcNoteOffset(sn, 1);
+
+ // Iterate all cases in the conditional switch.
+ // - Stop at the default case. (always emitted after the last case)
+ // - Estimate the number of uniq bodies. This estimation might be off by 1
+ // if the default body alias a case body.
+ jsbytecode* curCase = firstCase;
+ jsbytecode* lastTarget = GetJumpOffset(curCase) + curCase;
+ size_t nbBodies = 2; // default target and the first body.
+
+ MOZ_ASSERT(pc < curCase && curCase <= exitpc);
+ while (JSOp(*curCase) == JSOP_CASE) {
+ // Fetch the next case.
+ jssrcnote* caseSn = info().getNote(gsn, curCase);
+ MOZ_ASSERT(caseSn && SN_TYPE(caseSn) == SRC_NEXTCASE);
+ ptrdiff_t off = GetSrcNoteOffset(caseSn, 0);
+ MOZ_ASSERT_IF(off == 0, JSOp(*GetNextPc(curCase)) == JSOP_JUMPTARGET);
+ curCase = off ? curCase + off : GetNextPc(GetNextPc(curCase));
+ MOZ_ASSERT(pc < curCase && curCase <= exitpc);
+
+ // Count non-aliased cases.
+ jsbytecode* curTarget = GetJumpOffset(curCase) + curCase;
+ if (lastTarget < curTarget)
+ nbBodies++;
+ lastTarget = curTarget;
+ }
+
+ // The current case now be the default case which jump to the body of the
+ // default case, which might be behind the last target.
+ MOZ_ASSERT(JSOp(*curCase) == JSOP_DEFAULT);
+ jsbytecode* defaultTarget = GetJumpOffset(curCase) + curCase;
+ MOZ_ASSERT(curCase < defaultTarget && defaultTarget <= exitpc);
+
+ // Allocate the current graph state.
+ CFGState state = CFGState::CondSwitch(this, exitpc, defaultTarget);
+ if (!state.condswitch.bodies || !state.condswitch.bodies->init(alloc(), nbBodies))
+ return ControlStatus_Error;
+
+ // We loop on case conditions with processCondSwitchCase.
+ MOZ_ASSERT(JSOp(*firstCase) == JSOP_CASE);
+ state.stopAt = firstCase;
+ state.state = CFGState::COND_SWITCH_CASE;
+
+ return cfgStack_.append(state);
+}
+
+IonBuilder::CFGState
+IonBuilder::CFGState::CondSwitch(IonBuilder* builder, jsbytecode* exitpc, jsbytecode* defaultTarget)
+{
+ CFGState state;
+ state.state = COND_SWITCH_CASE;
+ state.stopAt = nullptr;
+ state.condswitch.bodies = (FixedList<MBasicBlock*>*)builder->alloc_->allocate(
+ sizeof(FixedList<MBasicBlock*>));
+ state.condswitch.currentIdx = 0;
+ state.condswitch.defaultTarget = defaultTarget;
+ state.condswitch.defaultIdx = uint32_t(-1);
+ state.condswitch.exitpc = exitpc;
+ state.condswitch.breaks = nullptr;
+ return state;
+}
+
+IonBuilder::CFGState
+IonBuilder::CFGState::Label(jsbytecode* exitpc)
+{
+ CFGState state;
+ state.state = LABEL;
+ state.stopAt = exitpc;
+ state.label.breaks = nullptr;
+ return state;
+}
+
+IonBuilder::CFGState
+IonBuilder::CFGState::Try(jsbytecode* exitpc, MBasicBlock* successor)
+{
+ CFGState state;
+ state.state = TRY;
+ state.stopAt = exitpc;
+ state.try_.successor = successor;
+ return state;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processCondSwitchCase(CFGState& state)
+{
+ MOZ_ASSERT(state.state == CFGState::COND_SWITCH_CASE);
+ MOZ_ASSERT(!state.condswitch.breaks);
+ MOZ_ASSERT(current);
+ MOZ_ASSERT(JSOp(*pc) == JSOP_CASE);
+ FixedList<MBasicBlock*>& bodies = *state.condswitch.bodies;
+ jsbytecode* defaultTarget = state.condswitch.defaultTarget;
+ uint32_t& currentIdx = state.condswitch.currentIdx;
+ jsbytecode* lastTarget = currentIdx ? bodies[currentIdx - 1]->pc() : nullptr;
+
+ // Fetch the following case in which we will continue.
+ jssrcnote* sn = info().getNote(gsn, pc);
+ ptrdiff_t off = GetSrcNoteOffset(sn, 0);
+ MOZ_ASSERT_IF(off == 0, JSOp(*GetNextPc(pc)) == JSOP_JUMPTARGET);
+ jsbytecode* casePc = off ? pc + off : GetNextPc(GetNextPc(pc));
+ bool caseIsDefault = JSOp(*casePc) == JSOP_DEFAULT;
+ MOZ_ASSERT(JSOp(*casePc) == JSOP_CASE || caseIsDefault);
+
+ // Allocate the block of the matching case.
+ bool bodyIsNew = false;
+ MBasicBlock* bodyBlock = nullptr;
+ jsbytecode* bodyTarget = pc + GetJumpOffset(pc);
+ if (lastTarget < bodyTarget) {
+ // If the default body is in the middle or aliasing the current target.
+ if (lastTarget < defaultTarget && defaultTarget <= bodyTarget) {
+ MOZ_ASSERT(state.condswitch.defaultIdx == uint32_t(-1));
+ state.condswitch.defaultIdx = currentIdx;
+ bodies[currentIdx] = nullptr;
+ // If the default body does not alias any and it would be allocated
+ // later and stored in the defaultIdx location.
+ if (defaultTarget < bodyTarget)
+ currentIdx++;
+ }
+
+ bodyIsNew = true;
+ // Pop switch and case operands.
+ bodyBlock = newBlockPopN(current, bodyTarget, 2);
+ bodies[currentIdx++] = bodyBlock;
+ } else {
+ // This body alias the previous one.
+ MOZ_ASSERT(lastTarget == bodyTarget);
+ MOZ_ASSERT(currentIdx > 0);
+ bodyBlock = bodies[currentIdx - 1];
+ }
+
+ if (!bodyBlock)
+ return ControlStatus_Error;
+
+ lastTarget = bodyTarget;
+
+ // Allocate the block of the non-matching case. This can either be a normal
+ // case or the default case.
+ bool caseIsNew = false;
+ MBasicBlock* caseBlock = nullptr;
+ if (!caseIsDefault) {
+ caseIsNew = true;
+ // Pop the case operand.
+ caseBlock = newBlockPopN(current, GetNextPc(pc), 1);
+ } else {
+ // The non-matching case is the default case, which jump directly to its
+ // body. Skip the creation of a default case block and directly create
+ // the default body if it does not alias any previous body.
+
+ if (state.condswitch.defaultIdx == uint32_t(-1)) {
+ // The default target is the last target.
+ MOZ_ASSERT(lastTarget < defaultTarget);
+ state.condswitch.defaultIdx = currentIdx++;
+ caseIsNew = true;
+ } else if (bodies[state.condswitch.defaultIdx] == nullptr) {
+ // The default target is in the middle and it does not alias any
+ // case target.
+ MOZ_ASSERT(defaultTarget < lastTarget);
+ caseIsNew = true;
+ } else {
+ // The default target is in the middle and it alias a case target.
+ MOZ_ASSERT(defaultTarget <= lastTarget);
+ caseBlock = bodies[state.condswitch.defaultIdx];
+ }
+
+ // Allocate and register the default body.
+ if (caseIsNew) {
+ // Pop the case & switch operands.
+ caseBlock = newBlockPopN(current, defaultTarget, 2);
+ bodies[state.condswitch.defaultIdx] = caseBlock;
+ }
+ }
+
+ if (!caseBlock)
+ return ControlStatus_Error;
+
+ // Terminate the last case condition block by emitting the code
+ // corresponding to JSOP_CASE bytecode.
+ if (bodyBlock != caseBlock) {
+ MDefinition* caseOperand = current->pop();
+ MDefinition* switchOperand = current->peek(-1);
+
+ if (!jsop_compare(JSOP_STRICTEQ, switchOperand, caseOperand))
+ return ControlStatus_Error;
+ MInstruction* cmpResult = current->pop()->toInstruction();
+ MOZ_ASSERT(!cmpResult->isEffectful());
+ current->end(newTest(cmpResult, bodyBlock, caseBlock));
+
+ // Add last case as predecessor of the body if the body is aliasing
+ // the previous case body.
+ if (!bodyIsNew && !bodyBlock->addPredecessorPopN(alloc(), current, 1))
+ return ControlStatus_Error;
+
+ // Add last case as predecessor of the non-matching case if the
+ // non-matching case is an aliased default case. We need to pop the
+ // switch operand as we skip the default case block and use the default
+ // body block directly.
+ MOZ_ASSERT_IF(!caseIsNew, caseIsDefault);
+ if (!caseIsNew && !caseBlock->addPredecessorPopN(alloc(), current, 1))
+ return ControlStatus_Error;
+ } else {
+ // The default case alias the last case body.
+ MOZ_ASSERT(caseIsDefault);
+ current->pop(); // Case operand
+ current->pop(); // Switch operand
+ current->end(MGoto::New(alloc(), bodyBlock));
+ if (!bodyIsNew && !bodyBlock->addPredecessor(alloc(), current))
+ return ControlStatus_Error;
+ }
+
+ if (caseIsDefault) {
+ // The last case condition is finished. Loop in processCondSwitchBody,
+ // with potential stops in processSwitchBreak. Check that the bodies
+ // fixed list is over-estimate by at most 1, and shrink the size such as
+ // length can be used as an upper bound while iterating bodies.
+ MOZ_ASSERT(currentIdx == bodies.length() || currentIdx + 1 == bodies.length());
+ bodies.shrink(bodies.length() - currentIdx);
+
+ // Handle break statements in processSwitchBreak while processing
+ // bodies.
+ ControlFlowInfo breakInfo(cfgStack_.length() - 1, state.condswitch.exitpc);
+ if (!switches_.append(breakInfo))
+ return ControlStatus_Error;
+
+ // Jump into the first body.
+ currentIdx = 0;
+ setCurrent(nullptr);
+ state.state = CFGState::COND_SWITCH_BODY;
+ return processCondSwitchBody(state);
+ }
+
+ // Continue until the case condition.
+ if (!setCurrentAndSpecializePhis(caseBlock))
+ return ControlStatus_Error;
+ pc = current->pc();
+ state.stopAt = casePc;
+ return ControlStatus_Jumped;
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processCondSwitchBody(CFGState& state)
+{
+ MOZ_ASSERT(state.state == CFGState::COND_SWITCH_BODY);
+ MOZ_ASSERT(pc <= state.condswitch.exitpc);
+ FixedList<MBasicBlock*>& bodies = *state.condswitch.bodies;
+ uint32_t& currentIdx = state.condswitch.currentIdx;
+
+ MOZ_ASSERT(currentIdx <= bodies.length());
+ if (currentIdx == bodies.length()) {
+ MOZ_ASSERT_IF(current, pc == state.condswitch.exitpc);
+ return processSwitchEnd(state.condswitch.breaks, state.condswitch.exitpc);
+ }
+
+ // Get the next body
+ MBasicBlock* nextBody = bodies[currentIdx++];
+ MOZ_ASSERT_IF(current, pc == nextBody->pc());
+
+ // Fix the reverse post-order iteration.
+ graph().moveBlockToEnd(nextBody);
+
+ // The last body continue into the new one.
+ if (current) {
+ current->end(MGoto::New(alloc(), nextBody));
+ if (!nextBody->addPredecessor(alloc(), current))
+ return ControlStatus_Error;
+ }
+
+ // Continue in the next body.
+ if (!setCurrentAndSpecializePhis(nextBody))
+ return ControlStatus_Error;
+ pc = current->pc();
+
+ if (currentIdx < bodies.length())
+ state.stopAt = bodies[currentIdx]->pc();
+ else
+ state.stopAt = state.condswitch.exitpc;
+ return ControlStatus_Jumped;
+}
+
+bool
+IonBuilder::jsop_andor(JSOp op)
+{
+ MOZ_ASSERT(op == JSOP_AND || op == JSOP_OR);
+
+ jsbytecode* rhsStart = pc + CodeSpec[op].length;
+ jsbytecode* joinStart = pc + GetJumpOffset(pc);
+ MOZ_ASSERT(joinStart > pc);
+
+ // We have to leave the LHS on the stack.
+ MDefinition* lhs = current->peek(-1);
+
+ MBasicBlock* evalLhs = newBlock(current, joinStart);
+ MBasicBlock* evalRhs = newBlock(current, rhsStart);
+ if (!evalLhs || !evalRhs)
+ return false;
+
+ MTest* test = (op == JSOP_AND)
+ ? newTest(lhs, evalRhs, evalLhs)
+ : newTest(lhs, evalLhs, evalRhs);
+ current->end(test);
+
+ // Create the lhs block and specialize.
+ if (!setCurrentAndSpecializePhis(evalLhs))
+ return false;
+
+ if (!improveTypesAtTest(test->getOperand(0), test->ifTrue() == current, test))
+ return false;
+
+ // Create the rhs block.
+ if (!cfgStack_.append(CFGState::AndOr(joinStart, evalLhs)))
+ return false;
+
+ if (!setCurrentAndSpecializePhis(evalRhs))
+ return false;
+
+ if (!improveTypesAtTest(test->getOperand(0), test->ifTrue() == current, test))
+ return false;
+
+ return true;
+}
+
+bool
+IonBuilder::jsop_dup2()
+{
+ uint32_t lhsSlot = current->stackDepth() - 2;
+ uint32_t rhsSlot = current->stackDepth() - 1;
+ current->pushSlot(lhsSlot);
+ current->pushSlot(rhsSlot);
+ return true;
+}
+
+bool
+IonBuilder::jsop_loophead(jsbytecode* pc)
+{
+ assertValidLoopHeadOp(pc);
+
+ current->add(MInterruptCheck::New(alloc()));
+ insertRecompileCheck();
+
+ return true;
+}
+
+bool
+IonBuilder::jsop_ifeq(JSOp op)
+{
+ // IFEQ always has a forward offset.
+ jsbytecode* trueStart = pc + CodeSpec[op].length;
+ jsbytecode* falseStart = pc + GetJumpOffset(pc);
+ MOZ_ASSERT(falseStart > pc);
+
+ // We only handle cases that emit source notes.
+ jssrcnote* sn = info().getNote(gsn, pc);
+ if (!sn)
+ return abort("expected sourcenote");
+
+ MDefinition* ins = current->pop();
+
+ // Create true and false branches.
+ MBasicBlock* ifTrue = newBlock(current, trueStart);
+ MBasicBlock* ifFalse = newBlock(current, falseStart);
+ if (!ifTrue || !ifFalse)
+ return false;
+
+ MTest* test = newTest(ins, ifTrue, ifFalse);
+ current->end(test);
+
+ // The bytecode for if/ternary gets emitted either like this:
+ //
+ // IFEQ X ; src note (IF_ELSE, COND) points to the GOTO
+ // ...
+ // GOTO Z
+ // X: ... ; else/else if
+ // ...
+ // Z: ; join
+ //
+ // Or like this:
+ //
+ // IFEQ X ; src note (IF) has no offset
+ // ...
+ // Z: ... ; join
+ //
+ // We want to parse the bytecode as if we were parsing the AST, so for the
+ // IF_ELSE/COND cases, we use the source note and follow the GOTO. For the
+ // IF case, the IFEQ offset is the join point.
+ switch (SN_TYPE(sn)) {
+ case SRC_IF:
+ if (!cfgStack_.append(CFGState::If(falseStart, test)))
+ return false;
+ break;
+
+ case SRC_IF_ELSE:
+ case SRC_COND:
+ {
+ // Infer the join point from the JSOP_GOTO[X] sitting here, then
+ // assert as we much we can that this is the right GOTO.
+ jsbytecode* trueEnd = pc + GetSrcNoteOffset(sn, 0);
+ MOZ_ASSERT(trueEnd > pc);
+ MOZ_ASSERT(trueEnd < falseStart);
+ MOZ_ASSERT(JSOp(*trueEnd) == JSOP_GOTO);
+ MOZ_ASSERT(!info().getNote(gsn, trueEnd));
+
+ jsbytecode* falseEnd = trueEnd + GetJumpOffset(trueEnd);
+ MOZ_ASSERT(falseEnd > trueEnd);
+ MOZ_ASSERT(falseEnd >= falseStart);
+
+ if (!cfgStack_.append(CFGState::IfElse(trueEnd, falseEnd, test)))
+ return false;
+ break;
+ }
+
+ default:
+ MOZ_CRASH("unexpected source note type");
+ }
+
+ // Switch to parsing the true branch. Note that no PC update is needed,
+ // it's the next instruction.
+ if (!setCurrentAndSpecializePhis(ifTrue))
+ return false;
+
+ // Filter the types in the true branch.
+ if (!improveTypesAtTest(test->getOperand(0), test->ifTrue() == current, test))
+ return false;
+
+ return true;
+}
+
+bool
+IonBuilder::jsop_try()
+{
+ MOZ_ASSERT(JSOp(*pc) == JSOP_TRY);
+
+ // Try-finally is not yet supported.
+ if (analysis().hasTryFinally())
+ return abort("Has try-finally");
+
+ // Try-catch within inline frames is not yet supported.
+ MOZ_ASSERT(!isInlineBuilder());
+
+ // Try-catch during the arguments usage analysis is not yet supported. Code
+ // accessing the arguments within the 'catch' block is not accounted for.
+ if (info().analysisMode() == Analysis_ArgumentsUsage)
+ return abort("Try-catch during arguments usage analysis");
+
+ graph().setHasTryBlock();
+
+ jssrcnote* sn = info().getNote(gsn, pc);
+ MOZ_ASSERT(SN_TYPE(sn) == SRC_TRY);
+
+ // Get the pc of the last instruction in the try block. It's a JSOP_GOTO to
+ // jump over the catch block.
+ jsbytecode* endpc = pc + GetSrcNoteOffset(sn, 0);
+ MOZ_ASSERT(JSOp(*endpc) == JSOP_GOTO);
+ MOZ_ASSERT(GetJumpOffset(endpc) > 0);
+
+ jsbytecode* afterTry = endpc + GetJumpOffset(endpc);
+
+ // If controlflow in the try body is terminated (by a return or throw
+ // statement), the code after the try-statement may still be reachable
+ // via the catch block (which we don't compile) and OSR can enter it.
+ // For example:
+ //
+ // try {
+ // throw 3;
+ // } catch(e) { }
+ //
+ // for (var i=0; i<1000; i++) {}
+ //
+ // To handle this, we create two blocks: one for the try block and one
+ // for the code following the try-catch statement. Both blocks are
+ // connected to the graph with an MGotoWithFake instruction that always
+ // jumps to the try block. This ensures the successor block always has a
+ // predecessor.
+ //
+ // If the code after the try block is unreachable (control flow in both the
+ // try and catch blocks is terminated), only create the try block, to avoid
+ // parsing unreachable code.
+
+ MBasicBlock* tryBlock = newBlock(current, GetNextPc(pc));
+ if (!tryBlock)
+ return false;
+
+ MBasicBlock* successor;
+ if (analysis().maybeInfo(afterTry)) {
+ successor = newBlock(current, afterTry);
+ if (!successor)
+ return false;
+
+ current->end(MGotoWithFake::New(alloc(), tryBlock, successor));
+ } else {
+ successor = nullptr;
+ current->end(MGoto::New(alloc(), tryBlock));
+ }
+
+ if (!cfgStack_.append(CFGState::Try(endpc, successor)))
+ return false;
+
+ // The baseline compiler should not attempt to enter the catch block
+ // via OSR.
+ MOZ_ASSERT(info().osrPc() < endpc || info().osrPc() >= afterTry);
+
+ // Start parsing the try block.
+ return setCurrentAndSpecializePhis(tryBlock);
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processReturn(JSOp op)
+{
+ MDefinition* def;
+ switch (op) {
+ case JSOP_RETURN:
+ // Return the last instruction.
+ def = current->pop();
+ break;
+
+ case JSOP_RETRVAL:
+ // Return undefined eagerly if script doesn't use return value.
+ if (script()->noScriptRval()) {
+ MInstruction* ins = MConstant::New(alloc(), UndefinedValue());
+ current->add(ins);
+ def = ins;
+ break;
+ }
+
+ def = current->getSlot(info().returnValueSlot());
+ break;
+
+ default:
+ def = nullptr;
+ MOZ_CRASH("unknown return op");
+ }
+
+ MReturn* ret = MReturn::New(alloc(), def);
+ current->end(ret);
+
+ if (!graph().addReturn(current))
+ return ControlStatus_Error;
+
+ // Make sure no one tries to use this block now.
+ setCurrent(nullptr);
+ return processControlEnd();
+}
+
+IonBuilder::ControlStatus
+IonBuilder::processThrow()
+{
+ MDefinition* def = current->pop();
+
+ // MThrow is not marked as effectful. This means when it throws and we
+ // are inside a try block, we could use an earlier resume point and this
+ // resume point may not be up-to-date, for example:
+ //
+ // (function() {
+ // try {
+ // var x = 1;
+ // foo(); // resume point
+ // x = 2;
+ // throw foo;
+ // } catch(e) {
+ // print(x);
+ // }
+ // ])();
+ //
+ // If we use the resume point after the call, this will print 1 instead
+ // of 2. To fix this, we create a resume point right before the MThrow.
+ //
+ // Note that this is not a problem for instructions other than MThrow
+ // because they are either marked as effectful (have their own resume
+ // point) or cannot throw a catchable exception.
+ //
+ // We always install this resume point (instead of only when the function
+ // has a try block) in order to handle the Debugger onExceptionUnwind
+ // hook. When we need to handle the hook, we bail out to baseline right
+ // after the throw and propagate the exception when debug mode is on. This
+ // is opposed to the normal behavior of resuming directly in the
+ // associated catch block.
+ MNop* nop = MNop::New(alloc());
+ current->add(nop);
+
+ if (!resumeAfter(nop))
+ return ControlStatus_Error;
+
+ MThrow* ins = MThrow::New(alloc(), def);
+ current->end(ins);
+
+ // Make sure no one tries to use this block now.
+ setCurrent(nullptr);
+ return processControlEnd();
+}
+
+void
+IonBuilder::pushConstant(const Value& v)
+{
+ current->push(constant(v));
+}
+
+bool
+IonBuilder::bitnotTrySpecialized(bool* emitted, MDefinition* input)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ // Try to emit a specialized bitnot instruction based on the input type
+ // of the operand.
+
+ if (input->mightBeType(MIRType::Object) || input->mightBeType(MIRType::Symbol))
+ return true;
+
+ MBitNot* ins = MBitNot::New(alloc(), input);
+ ins->setSpecialization(MIRType::Int32);
+
+ current->add(ins);
+ current->push(ins);
+
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::jsop_bitnot()
+{
+ bool emitted = false;
+
+ MDefinition* input = current->pop();
+
+ if (!forceInlineCaches()) {
+ if (!bitnotTrySpecialized(&emitted, input) || emitted)
+ return emitted;
+ }
+
+ if (!arithTrySharedStub(&emitted, JSOP_BITNOT, nullptr, input) || emitted)
+ return emitted;
+
+ // Not possible to optimize. Do a slow vm call.
+ MBitNot* ins = MBitNot::New(alloc(), input);
+
+ current->add(ins);
+ current->push(ins);
+ MOZ_ASSERT(ins->isEffectful());
+ return resumeAfter(ins);
+}
+
+bool
+IonBuilder::jsop_bitop(JSOp op)
+{
+ // Pop inputs.
+ MDefinition* right = current->pop();
+ MDefinition* left = current->pop();
+
+ MBinaryBitwiseInstruction* ins;
+ switch (op) {
+ case JSOP_BITAND:
+ ins = MBitAnd::New(alloc(), left, right);
+ break;
+
+ case JSOP_BITOR:
+ ins = MBitOr::New(alloc(), left, right);
+ break;
+
+ case JSOP_BITXOR:
+ ins = MBitXor::New(alloc(), left, right);
+ break;
+
+ case JSOP_LSH:
+ ins = MLsh::New(alloc(), left, right);
+ break;
+
+ case JSOP_RSH:
+ ins = MRsh::New(alloc(), left, right);
+ break;
+
+ case JSOP_URSH:
+ ins = MUrsh::New(alloc(), left, right);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected bitop");
+ }
+
+ current->add(ins);
+ ins->infer(inspector, pc);
+
+ current->push(ins);
+ if (ins->isEffectful() && !resumeAfter(ins))
+ return false;
+
+ return true;
+}
+
+MDefinition::Opcode
+JSOpToMDefinition(JSOp op)
+{
+ switch (op) {
+ case JSOP_ADD:
+ return MDefinition::Op_Add;
+ case JSOP_SUB:
+ return MDefinition::Op_Sub;
+ case JSOP_MUL:
+ return MDefinition::Op_Mul;
+ case JSOP_DIV:
+ return MDefinition::Op_Div;
+ case JSOP_MOD:
+ return MDefinition::Op_Mod;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+bool
+IonBuilder::binaryArithTryConcat(bool* emitted, JSOp op, MDefinition* left, MDefinition* right)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ // Try to convert an addition into a concat operation if the inputs
+ // indicate this might be a concatenation.
+
+ // Only try to replace this with concat when we have an addition.
+ if (op != JSOP_ADD)
+ return true;
+
+ trackOptimizationAttempt(TrackedStrategy::BinaryArith_Concat);
+
+ // Make sure one of the inputs is a string.
+ if (left->type() != MIRType::String && right->type() != MIRType::String) {
+ trackOptimizationOutcome(TrackedOutcome::OperandNotString);
+ return true;
+ }
+
+ // The none-string input (if present) should be atleast a numerical type.
+ // Which we can easily coerce to string.
+ if (right->type() != MIRType::String && !IsNumberType(right->type())) {
+ trackOptimizationOutcome(TrackedOutcome::OperandNotStringOrNumber);
+ return true;
+ }
+ if (left->type() != MIRType::String && !IsNumberType(left->type())) {
+ trackOptimizationOutcome(TrackedOutcome::OperandNotStringOrNumber);
+ return true;
+ }
+
+ MConcat* ins = MConcat::New(alloc(), left, right);
+ current->add(ins);
+ current->push(ins);
+
+ if (!maybeInsertResume())
+ return false;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::powTrySpecialized(bool* emitted, MDefinition* base, MDefinition* power,
+ MIRType outputType)
+{
+ // Typechecking.
+ MDefinition* output = nullptr;
+ MIRType baseType = base->type();
+ MIRType powerType = power->type();
+
+ if (outputType != MIRType::Int32 && outputType != MIRType::Double)
+ return true;
+ if (!IsNumberType(baseType))
+ return true;
+ if (!IsNumberType(powerType))
+ return true;
+
+ if (powerType == MIRType::Float32)
+ powerType = MIRType::Double;
+
+ MPow* pow = MPow::New(alloc(), base, power, powerType);
+ current->add(pow);
+ output = pow;
+
+ // Cast to the right type
+ if (outputType == MIRType::Int32 && output->type() != MIRType::Int32) {
+ MToInt32* toInt = MToInt32::New(alloc(), output);
+ current->add(toInt);
+ output = toInt;
+ }
+ if (outputType == MIRType::Double && output->type() != MIRType::Double) {
+ MToDouble* toDouble = MToDouble::New(alloc(), output);
+ current->add(toDouble);
+ output = toDouble;
+ }
+
+ current->push(output);
+ *emitted = true;
+ return true;
+}
+
+static inline bool
+SimpleArithOperand(MDefinition* op)
+{
+ return !op->mightBeType(MIRType::Object)
+ && !op->mightBeType(MIRType::String)
+ && !op->mightBeType(MIRType::Symbol)
+ && !op->mightBeType(MIRType::MagicOptimizedArguments)
+ && !op->mightBeType(MIRType::MagicHole)
+ && !op->mightBeType(MIRType::MagicIsConstructing);
+}
+
+bool
+IonBuilder::binaryArithTrySpecialized(bool* emitted, JSOp op, MDefinition* left, MDefinition* right)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ // Try to emit a specialized binary instruction based on the input types
+ // of the operands.
+
+ trackOptimizationAttempt(TrackedStrategy::BinaryArith_SpecializedTypes);
+
+ // Anything complex - strings, symbols, and objects - are not specialized
+ if (!SimpleArithOperand(left) || !SimpleArithOperand(right)) {
+ trackOptimizationOutcome(TrackedOutcome::OperandNotSimpleArith);
+ return true;
+ }
+
+ // One of the inputs need to be a number.
+ if (!IsNumberType(left->type()) && !IsNumberType(right->type())) {
+ trackOptimizationOutcome(TrackedOutcome::OperandNotNumber);
+ return true;
+ }
+
+ MDefinition::Opcode defOp = JSOpToMDefinition(op);
+ MBinaryArithInstruction* ins = MBinaryArithInstruction::New(alloc(), defOp, left, right);
+ ins->setNumberSpecialization(alloc(), inspector, pc);
+
+ if (op == JSOP_ADD || op == JSOP_MUL)
+ ins->setCommutative();
+
+ current->add(ins);
+ current->push(ins);
+
+ MOZ_ASSERT(!ins->isEffectful());
+ if (!maybeInsertResume())
+ return false;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::binaryArithTrySpecializedOnBaselineInspector(bool* emitted, JSOp op,
+ MDefinition* left, MDefinition* right)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ // Try to emit a specialized binary instruction speculating the
+ // type using the baseline caches.
+
+ trackOptimizationAttempt(TrackedStrategy::BinaryArith_SpecializedOnBaselineTypes);
+
+ MIRType specialization = inspector->expectedBinaryArithSpecialization(pc);
+ if (specialization == MIRType::None) {
+ trackOptimizationOutcome(TrackedOutcome::NoTypeInfo);
+ return true;
+ }
+
+ MDefinition::Opcode def_op = JSOpToMDefinition(op);
+ MBinaryArithInstruction* ins = MBinaryArithInstruction::New(alloc(), def_op, left, right);
+ ins->setSpecialization(specialization);
+
+ current->add(ins);
+ current->push(ins);
+
+ MOZ_ASSERT(!ins->isEffectful());
+ if (!maybeInsertResume())
+ return false;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::arithTrySharedStub(bool* emitted, JSOp op,
+ MDefinition* left, MDefinition* right)
+{
+ MOZ_ASSERT(*emitted == false);
+ JSOp actualOp = JSOp(*pc);
+
+ // Try to emit a shared stub cache.
+
+ if (JitOptions.disableSharedStubs)
+ return true;
+
+ // The actual jsop 'jsop_pos' is not supported yet.
+ if (actualOp == JSOP_POS)
+ return true;
+
+ // FIXME: The JSOP_BITNOT path doesn't track optimizations yet.
+ if (actualOp != JSOP_BITNOT) {
+ trackOptimizationAttempt(TrackedStrategy::BinaryArith_SharedCache);
+ trackOptimizationSuccess();
+ }
+
+ MInstruction* stub = nullptr;
+ switch (actualOp) {
+ case JSOP_NEG:
+ case JSOP_BITNOT:
+ MOZ_ASSERT_IF(op == JSOP_MUL,
+ left->maybeConstantValue() && left->maybeConstantValue()->toInt32() == -1);
+ MOZ_ASSERT_IF(op != JSOP_MUL, !left);
+
+ stub = MUnarySharedStub::New(alloc(), right);
+ break;
+ case JSOP_ADD:
+ case JSOP_SUB:
+ case JSOP_MUL:
+ case JSOP_DIV:
+ case JSOP_MOD:
+ case JSOP_POW:
+ stub = MBinarySharedStub::New(alloc(), left, right);
+ break;
+ default:
+ MOZ_CRASH("unsupported arith");
+ }
+
+ current->add(stub);
+ current->push(stub);
+
+ // Decrease type from 'any type' to 'empty type' when one of the operands
+ // is 'empty typed'.
+ maybeMarkEmpty(stub);
+
+ if (!resumeAfter(stub))
+ return false;
+
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::jsop_binary_arith(JSOp op, MDefinition* left, MDefinition* right)
+{
+ bool emitted = false;
+
+ startTrackingOptimizations();
+
+ trackTypeInfo(TrackedTypeSite::Operand, left->type(), left->resultTypeSet());
+ trackTypeInfo(TrackedTypeSite::Operand, right->type(), right->resultTypeSet());
+
+ if (!forceInlineCaches()) {
+ if (!binaryArithTryConcat(&emitted, op, left, right) || emitted)
+ return emitted;
+
+ if (!binaryArithTrySpecialized(&emitted, op, left, right) || emitted)
+ return emitted;
+
+ if (!binaryArithTrySpecializedOnBaselineInspector(&emitted, op, left, right) || emitted)
+ return emitted;
+ }
+
+ if (!arithTrySharedStub(&emitted, op, left, right) || emitted)
+ return emitted;
+
+ // Not possible to optimize. Do a slow vm call.
+ trackOptimizationAttempt(TrackedStrategy::BinaryArith_Call);
+ trackOptimizationSuccess();
+
+ MDefinition::Opcode def_op = JSOpToMDefinition(op);
+ MBinaryArithInstruction* ins = MBinaryArithInstruction::New(alloc(), def_op, left, right);
+
+ // Decrease type from 'any type' to 'empty type' when one of the operands
+ // is 'empty typed'.
+ maybeMarkEmpty(ins);
+
+ current->add(ins);
+ current->push(ins);
+ MOZ_ASSERT(ins->isEffectful());
+ return resumeAfter(ins);
+}
+
+bool
+IonBuilder::jsop_binary_arith(JSOp op)
+{
+ MDefinition* right = current->pop();
+ MDefinition* left = current->pop();
+
+ return jsop_binary_arith(op, left, right);
+}
+
+
+bool
+IonBuilder::jsop_pow()
+{
+ MDefinition* exponent = current->pop();
+ MDefinition* base = current->pop();
+
+ bool emitted = false;
+
+ if (!forceInlineCaches()) {
+ if (!powTrySpecialized(&emitted, base, exponent, MIRType::Double) || emitted)
+ return emitted;
+ }
+
+ if (!arithTrySharedStub(&emitted, JSOP_POW, base, exponent) || emitted)
+ return emitted;
+
+ // For now, use MIRType::Double, as a safe cover-all. See bug 1188079.
+ MPow* pow = MPow::New(alloc(), base, exponent, MIRType::Double);
+ current->add(pow);
+ current->push(pow);
+ return true;
+}
+
+bool
+IonBuilder::jsop_pos()
+{
+ if (IsNumberType(current->peek(-1)->type())) {
+ // Already int32 or double. Set the operand as implicitly used so it
+ // doesn't get optimized out if it has no other uses, as we could bail
+ // out.
+ current->peek(-1)->setImplicitlyUsedUnchecked();
+ return true;
+ }
+
+ // Compile +x as x * 1.
+ MDefinition* value = current->pop();
+ MConstant* one = MConstant::New(alloc(), Int32Value(1));
+ current->add(one);
+
+ return jsop_binary_arith(JSOP_MUL, value, one);
+}
+
+bool
+IonBuilder::jsop_neg()
+{
+ // Since JSOP_NEG does not use a slot, we cannot push the MConstant.
+ // The MConstant is therefore passed to JSOP_MUL without slot traffic.
+ MConstant* negator = MConstant::New(alloc(), Int32Value(-1));
+ current->add(negator);
+
+ MDefinition* right = current->pop();
+
+ return jsop_binary_arith(JSOP_MUL, negator, right);
+}
+
+bool
+IonBuilder::jsop_tostring()
+{
+ if (current->peek(-1)->type() == MIRType::String)
+ return true;
+
+ MDefinition* value = current->pop();
+ MToString* ins = MToString::New(alloc(), value);
+ current->add(ins);
+ current->push(ins);
+ MOZ_ASSERT(!ins->isEffectful());
+ return true;
+}
+
+class AutoAccumulateReturns
+{
+ MIRGraph& graph_;
+ MIRGraphReturns* prev_;
+
+ public:
+ AutoAccumulateReturns(MIRGraph& graph, MIRGraphReturns& returns)
+ : graph_(graph)
+ {
+ prev_ = graph_.returnAccumulator();
+ graph_.setReturnAccumulator(&returns);
+ }
+ ~AutoAccumulateReturns() {
+ graph_.setReturnAccumulator(prev_);
+ }
+};
+
+IonBuilder::InliningStatus
+IonBuilder::inlineScriptedCall(CallInfo& callInfo, JSFunction* target)
+{
+ MOZ_ASSERT(target->hasScript());
+ MOZ_ASSERT(IsIonInlinablePC(pc));
+
+ MBasicBlock::BackupPoint backup(current);
+ if (!backup.init(alloc()))
+ return InliningStatus_Error;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ // Ensure sufficient space in the slots: needed for inlining from FUNAPPLY.
+ uint32_t depth = current->stackDepth() + callInfo.numFormals();
+ if (depth > current->nslots()) {
+ if (!current->increaseSlots(depth - current->nslots()))
+ return InliningStatus_Error;
+ }
+
+ // Create new |this| on the caller-side for inlined constructors.
+ if (callInfo.constructing()) {
+ MDefinition* thisDefn = createThis(target, callInfo.fun(), callInfo.getNewTarget());
+ if (!thisDefn)
+ return InliningStatus_Error;
+ callInfo.setThis(thisDefn);
+ }
+
+ // Capture formals in the outer resume point.
+ callInfo.pushFormals(current);
+
+ MResumePoint* outerResumePoint =
+ MResumePoint::New(alloc(), current, pc, MResumePoint::Outer);
+ if (!outerResumePoint)
+ return InliningStatus_Error;
+ current->setOuterResumePoint(outerResumePoint);
+
+ // Pop formals again, except leave |fun| on stack for duration of call.
+ callInfo.popFormals(current);
+ current->push(callInfo.fun());
+
+ JSScript* calleeScript = target->nonLazyScript();
+ BaselineInspector inspector(calleeScript);
+
+ // Improve type information of |this| when not set.
+ if (callInfo.constructing() &&
+ !callInfo.thisArg()->resultTypeSet())
+ {
+ StackTypeSet* types = TypeScript::ThisTypes(calleeScript);
+ if (types && !types->unknown()) {
+ TemporaryTypeSet* clonedTypes = types->clone(alloc_->lifoAlloc());
+ if (!clonedTypes)
+ return InliningStatus_Error;
+ MTypeBarrier* barrier = MTypeBarrier::New(alloc(), callInfo.thisArg(), clonedTypes);
+ current->add(barrier);
+ if (barrier->type() == MIRType::Undefined)
+ callInfo.setThis(constant(UndefinedValue()));
+ else if (barrier->type() == MIRType::Null)
+ callInfo.setThis(constant(NullValue()));
+ else
+ callInfo.setThis(barrier);
+ }
+ }
+
+ // Start inlining.
+ LifoAlloc* lifoAlloc = alloc_->lifoAlloc();
+ InlineScriptTree* inlineScriptTree =
+ info().inlineScriptTree()->addCallee(alloc_, pc, calleeScript);
+ if (!inlineScriptTree)
+ return InliningStatus_Error;
+ CompileInfo* info = lifoAlloc->new_<CompileInfo>(calleeScript, target,
+ (jsbytecode*)nullptr,
+ this->info().analysisMode(),
+ /* needsArgsObj = */ false,
+ inlineScriptTree);
+ if (!info)
+ return InliningStatus_Error;
+
+ MIRGraphReturns returns(alloc());
+ AutoAccumulateReturns aar(graph(), returns);
+
+ // Build the graph.
+ IonBuilder inlineBuilder(analysisContext, compartment, options, &alloc(), &graph(), constraints(),
+ &inspector, info, &optimizationInfo(), nullptr, inliningDepth_ + 1,
+ loopDepth_);
+ if (!inlineBuilder.buildInline(this, outerResumePoint, callInfo)) {
+ if (analysisContext && analysisContext->isExceptionPending()) {
+ JitSpew(JitSpew_IonAbort, "Inline builder raised exception.");
+ abortReason_ = AbortReason_Error;
+ return InliningStatus_Error;
+ }
+
+ // Inlining the callee failed. Mark the callee as uninlineable only if
+ // the inlining was aborted for a non-exception reason.
+ if (inlineBuilder.abortReason_ == AbortReason_Disable) {
+ calleeScript->setUninlineable();
+ if (!JitOptions.disableInlineBacktracking) {
+ current = backup.restore();
+ return InliningStatus_NotInlined;
+ }
+ abortReason_ = AbortReason_Inlining;
+ } else if (inlineBuilder.abortReason_ == AbortReason_Inlining) {
+ abortReason_ = AbortReason_Inlining;
+ } else if (inlineBuilder.abortReason_ == AbortReason_Alloc) {
+ abortReason_ = AbortReason_Alloc;
+ } else if (inlineBuilder.abortReason_ == AbortReason_PreliminaryObjects) {
+ const ObjectGroupVector& groups = inlineBuilder.abortedPreliminaryGroups();
+ MOZ_ASSERT(!groups.empty());
+ for (size_t i = 0; i < groups.length(); i++)
+ addAbortedPreliminaryGroup(groups[i]);
+ abortReason_ = AbortReason_PreliminaryObjects;
+ }
+
+ return InliningStatus_Error;
+ }
+
+ // Create return block.
+ jsbytecode* postCall = GetNextPc(pc);
+ MBasicBlock* returnBlock = newBlock(nullptr, postCall);
+ if (!returnBlock)
+ return InliningStatus_Error;
+ returnBlock->setCallerResumePoint(callerResumePoint_);
+
+ // Inherit the slots from current and pop |fun|.
+ returnBlock->inheritSlots(current);
+ returnBlock->pop();
+
+ // Accumulate return values.
+ if (returns.empty()) {
+ // Inlining of functions that have no exit is not supported.
+ calleeScript->setUninlineable();
+ if (!JitOptions.disableInlineBacktracking) {
+ current = backup.restore();
+ return InliningStatus_NotInlined;
+ }
+ abortReason_ = AbortReason_Inlining;
+ return InliningStatus_Error;
+ }
+ MDefinition* retvalDefn = patchInlinedReturns(callInfo, returns, returnBlock);
+ if (!retvalDefn)
+ return InliningStatus_Error;
+ returnBlock->push(retvalDefn);
+
+ // Initialize entry slots now that the stack has been fixed up.
+ if (!returnBlock->initEntrySlots(alloc()))
+ return InliningStatus_Error;
+
+ if (!setCurrentAndSpecializePhis(returnBlock))
+ return InliningStatus_Error;
+
+ return InliningStatus_Inlined;
+}
+
+MDefinition*
+IonBuilder::patchInlinedReturn(CallInfo& callInfo, MBasicBlock* exit, MBasicBlock* bottom)
+{
+ // Replaces the MReturn in the exit block with an MGoto.
+ MDefinition* rdef = exit->lastIns()->toReturn()->input();
+ exit->discardLastIns();
+
+ // Constructors must be patched by the caller to always return an object.
+ if (callInfo.constructing()) {
+ if (rdef->type() == MIRType::Value) {
+ // Unknown return: dynamically detect objects.
+ MReturnFromCtor* filter = MReturnFromCtor::New(alloc(), rdef, callInfo.thisArg());
+ exit->add(filter);
+ rdef = filter;
+ } else if (rdef->type() != MIRType::Object) {
+ // Known non-object return: force |this|.
+ rdef = callInfo.thisArg();
+ }
+ } else if (callInfo.isSetter()) {
+ // Setters return their argument, not whatever value is returned.
+ rdef = callInfo.getArg(0);
+ }
+
+ if (!callInfo.isSetter())
+ rdef = specializeInlinedReturn(rdef, exit);
+
+ MGoto* replacement = MGoto::New(alloc(), bottom);
+ exit->end(replacement);
+ if (!bottom->addPredecessorWithoutPhis(exit))
+ return nullptr;
+
+ return rdef;
+}
+
+MDefinition*
+IonBuilder::specializeInlinedReturn(MDefinition* rdef, MBasicBlock* exit)
+{
+ // Remove types from the return definition that weren't observed.
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+
+ // The observed typeset doesn't contain extra information.
+ if (types->empty() || types->unknown())
+ return rdef;
+
+ // Decide if specializing is needed using the result typeset if available,
+ // else use the result type.
+
+ if (rdef->resultTypeSet()) {
+ // Don't specialize if return typeset is a subset of the
+ // observed typeset. The return typeset is already more specific.
+ if (rdef->resultTypeSet()->isSubset(types))
+ return rdef;
+ } else {
+ MIRType observedType = types->getKnownMIRType();
+
+ // Don't specialize if type is MIRType::Float32 and TI reports
+ // MIRType::Double. Float is more specific than double.
+ if (observedType == MIRType::Double && rdef->type() == MIRType::Float32)
+ return rdef;
+
+ // Don't specialize if types are inaccordance, except for MIRType::Value
+ // and MIRType::Object (when not unknown object), since the typeset
+ // contains more specific information.
+ if (observedType == rdef->type() &&
+ observedType != MIRType::Value &&
+ (observedType != MIRType::Object || types->unknownObject()))
+ {
+ return rdef;
+ }
+ }
+
+ setCurrent(exit);
+
+ MTypeBarrier* barrier = nullptr;
+ rdef = addTypeBarrier(rdef, types, BarrierKind::TypeSet, &barrier);
+ if (barrier)
+ barrier->setNotMovable();
+
+ return rdef;
+}
+
+MDefinition*
+IonBuilder::patchInlinedReturns(CallInfo& callInfo, MIRGraphReturns& returns, MBasicBlock* bottom)
+{
+ // Replaces MReturns with MGotos, returning the MDefinition
+ // representing the return value, or nullptr.
+ MOZ_ASSERT(returns.length() > 0);
+
+ if (returns.length() == 1)
+ return patchInlinedReturn(callInfo, returns[0], bottom);
+
+ // Accumulate multiple returns with a phi.
+ MPhi* phi = MPhi::New(alloc());
+ if (!phi->reserveLength(returns.length()))
+ return nullptr;
+
+ for (size_t i = 0; i < returns.length(); i++) {
+ MDefinition* rdef = patchInlinedReturn(callInfo, returns[i], bottom);
+ if (!rdef)
+ return nullptr;
+ phi->addInput(rdef);
+ }
+
+ bottom->addPhi(phi);
+ return phi;
+}
+
+IonBuilder::InliningDecision
+IonBuilder::makeInliningDecision(JSObject* targetArg, CallInfo& callInfo)
+{
+ // When there is no target, inlining is impossible.
+ if (targetArg == nullptr) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNoTarget);
+ return InliningDecision_DontInline;
+ }
+
+ // Inlining non-function targets is handled by inlineNonFunctionCall().
+ if (!targetArg->is<JSFunction>())
+ return InliningDecision_Inline;
+
+ JSFunction* target = &targetArg->as<JSFunction>();
+
+ // Never inline during the arguments usage analysis.
+ if (info().analysisMode() == Analysis_ArgumentsUsage)
+ return InliningDecision_DontInline;
+
+ // Native functions provide their own detection in inlineNativeCall().
+ if (target->isNative())
+ return InliningDecision_Inline;
+
+ // Determine whether inlining is possible at callee site
+ InliningDecision decision = canInlineTarget(target, callInfo);
+ if (decision != InliningDecision_Inline)
+ return decision;
+
+ // Heuristics!
+ JSScript* targetScript = target->nonLazyScript();
+
+ // Callee must not be excessively large.
+ // This heuristic also applies to the callsite as a whole.
+ bool offThread = options.offThreadCompilationAvailable();
+ if (targetScript->length() > optimizationInfo().inlineMaxBytecodePerCallSite(offThread)) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineBigCallee);
+ return DontInline(targetScript, "Vetoed: callee excessively large");
+ }
+
+ // Callee must have been called a few times to have somewhat stable
+ // type information, except for definite properties analysis,
+ // as the caller has not run yet.
+ if (targetScript->getWarmUpCount() < optimizationInfo().inliningWarmUpThreshold() &&
+ !targetScript->baselineScript()->ionCompiledOrInlined() &&
+ info().analysisMode() != Analysis_DefiniteProperties)
+ {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNotHot);
+ JitSpew(JitSpew_Inlining, "Cannot inline %s:%" PRIuSIZE ": callee is insufficiently hot.",
+ targetScript->filename(), targetScript->lineno());
+ return InliningDecision_WarmUpCountTooLow;
+ }
+
+ // Don't inline if the callee is known to inline a lot of code, to avoid
+ // huge MIR graphs.
+ uint32_t inlinedBytecodeLength = targetScript->baselineScript()->inlinedBytecodeLength();
+ if (inlinedBytecodeLength > optimizationInfo().inlineMaxCalleeInlinedBytecodeLength()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineBigCalleeInlinedBytecodeLength);
+ return DontInline(targetScript, "Vetoed: callee inlinedBytecodeLength is too big");
+ }
+
+ IonBuilder* outerBuilder = outermostBuilder();
+
+ // Cap the total bytecode length we inline under a single script, to avoid
+ // excessive inlining in pathological cases.
+ size_t totalBytecodeLength = outerBuilder->inlinedBytecodeLength_ + targetScript->length();
+ if (totalBytecodeLength > optimizationInfo().inlineMaxTotalBytecodeLength()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineExceededTotalBytecodeLength);
+ return DontInline(targetScript, "Vetoed: exceeding max total bytecode length");
+ }
+
+ // Cap the inlining depth.
+
+ uint32_t maxInlineDepth;
+ if (JitOptions.isSmallFunction(targetScript)) {
+ maxInlineDepth = optimizationInfo().smallFunctionMaxInlineDepth();
+ } else {
+ maxInlineDepth = optimizationInfo().maxInlineDepth();
+
+ // Caller must not be excessively large.
+ if (script()->length() >= optimizationInfo().inliningMaxCallerBytecodeLength()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineBigCaller);
+ return DontInline(targetScript, "Vetoed: caller excessively large");
+ }
+ }
+
+ BaselineScript* outerBaseline = outermostBuilder()->script()->baselineScript();
+ if (inliningDepth_ >= maxInlineDepth) {
+ // We hit the depth limit and won't inline this function. Give the
+ // outermost script a max inlining depth of 0, so that it won't be
+ // inlined in other scripts. This heuristic is currently only used
+ // when we're inlining scripts with loops, see the comment below.
+ outerBaseline->setMaxInliningDepth(0);
+
+ trackOptimizationOutcome(TrackedOutcome::CantInlineExceededDepth);
+ return DontInline(targetScript, "Vetoed: exceeding allowed inline depth");
+ }
+
+ // Inlining functions with loops can be complicated. For instance, if we're
+ // close to the inlining depth limit and we inline the function f below, we
+ // can no longer inline the call to g:
+ //
+ // function f() {
+ // while (cond) {
+ // g();
+ // }
+ // }
+ //
+ // If the loop has many iterations, it's more efficient to call f and inline
+ // g in f.
+ //
+ // To avoid this problem, we record a separate max inlining depth for each
+ // script, indicating at which depth we won't be able to inline all functions
+ // we inlined this time. This solves the issue above, because we will only
+ // inline f if it means we can also inline g.
+ if (targetScript->hasLoops() &&
+ inliningDepth_ >= targetScript->baselineScript()->maxInliningDepth())
+ {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineExceededDepth);
+ return DontInline(targetScript, "Vetoed: exceeding allowed script inline depth");
+ }
+
+ // Update the max depth at which we can inline the outer script.
+ MOZ_ASSERT(maxInlineDepth > inliningDepth_);
+ uint32_t scriptInlineDepth = maxInlineDepth - inliningDepth_ - 1;
+ if (scriptInlineDepth < outerBaseline->maxInliningDepth())
+ outerBaseline->setMaxInliningDepth(scriptInlineDepth);
+
+ // End of heuristics, we will inline this function.
+
+ // TI calls ObjectStateChange to trigger invalidation of the caller.
+ TypeSet::ObjectKey* targetKey = TypeSet::ObjectKey::get(target);
+ targetKey->watchStateChangeForInlinedCall(constraints());
+
+ outerBuilder->inlinedBytecodeLength_ += targetScript->length();
+
+ return InliningDecision_Inline;
+}
+
+bool
+IonBuilder::selectInliningTargets(const ObjectVector& targets, CallInfo& callInfo, BoolVector& choiceSet,
+ uint32_t* numInlineable)
+{
+ *numInlineable = 0;
+ uint32_t totalSize = 0;
+
+ // For each target, ask whether it may be inlined.
+ if (!choiceSet.reserve(targets.length()))
+ return false;
+
+ // Don't inline polymorphic sites during the definite properties analysis.
+ // AddClearDefiniteFunctionUsesInScript depends on this for correctness.
+ if (info().analysisMode() == Analysis_DefiniteProperties && targets.length() > 1)
+ return true;
+
+ for (size_t i = 0; i < targets.length(); i++) {
+ JSObject* target = targets[i];
+
+ trackOptimizationAttempt(TrackedStrategy::Call_Inline);
+ trackTypeInfo(TrackedTypeSite::Call_Target, target);
+
+ bool inlineable;
+ InliningDecision decision = makeInliningDecision(target, callInfo);
+ switch (decision) {
+ case InliningDecision_Error:
+ return false;
+ case InliningDecision_DontInline:
+ case InliningDecision_WarmUpCountTooLow:
+ inlineable = false;
+ break;
+ case InliningDecision_Inline:
+ inlineable = true;
+ break;
+ default:
+ MOZ_CRASH("Unhandled InliningDecision value!");
+ }
+
+ if (target->is<JSFunction>()) {
+ // Enforce a maximum inlined bytecode limit at the callsite.
+ if (inlineable && target->as<JSFunction>().isInterpreted()) {
+ totalSize += target->as<JSFunction>().nonLazyScript()->length();
+ bool offThread = options.offThreadCompilationAvailable();
+ if (totalSize > optimizationInfo().inlineMaxBytecodePerCallSite(offThread))
+ inlineable = false;
+ }
+ } else {
+ // Non-function targets are not supported by polymorphic inlining.
+ inlineable = false;
+ }
+
+ choiceSet.infallibleAppend(inlineable);
+ if (inlineable)
+ *numInlineable += 1;
+ }
+
+ // If optimization tracking is turned on and one of the inlineable targets
+ // is a native, track the type info of the call. Most native inlinings
+ // depend on the types of the arguments and the return value.
+ if (isOptimizationTrackingEnabled()) {
+ for (size_t i = 0; i < targets.length(); i++) {
+ if (choiceSet[i] && targets[i]->as<JSFunction>().isNative()) {
+ trackTypeInfo(callInfo);
+ break;
+ }
+ }
+ }
+
+ MOZ_ASSERT(choiceSet.length() == targets.length());
+ return true;
+}
+
+static bool
+CanInlineGetPropertyCache(MGetPropertyCache* cache, MDefinition* thisDef)
+{
+ MOZ_ASSERT(cache->object()->type() == MIRType::Object);
+ if (cache->object() != thisDef)
+ return false;
+
+ InlinePropertyTable* table = cache->propTable();
+ if (!table)
+ return false;
+ if (table->numEntries() == 0)
+ return false;
+ return true;
+}
+
+class WrapMGetPropertyCache
+{
+ MGetPropertyCache* cache_;
+
+ private:
+ void discardPriorResumePoint() {
+ if (!cache_)
+ return;
+
+ InlinePropertyTable* propTable = cache_->propTable();
+ if (!propTable)
+ return;
+ MResumePoint* rp = propTable->takePriorResumePoint();
+ if (!rp)
+ return;
+ cache_->block()->discardPreAllocatedResumePoint(rp);
+ }
+
+ public:
+ explicit WrapMGetPropertyCache(MGetPropertyCache* cache)
+ : cache_(cache)
+ { }
+
+ ~WrapMGetPropertyCache() {
+ discardPriorResumePoint();
+ }
+
+ MGetPropertyCache* get() {
+ return cache_;
+ }
+ MGetPropertyCache* operator->() {
+ return get();
+ }
+
+ // This function returns the cache given to the constructor if the
+ // GetPropertyCache can be moved into the ObjectGroup fallback path.
+ MGetPropertyCache* moveableCache(bool hasTypeBarrier, MDefinition* thisDef) {
+ // If we have unhandled uses of the MGetPropertyCache, then we cannot
+ // move it to the ObjectGroup fallback path.
+ if (!hasTypeBarrier) {
+ if (cache_->hasUses())
+ return nullptr;
+ } else {
+ // There is the TypeBarrier consumer, so we check that this is the
+ // only consumer.
+ MOZ_ASSERT(cache_->hasUses());
+ if (!cache_->hasOneUse())
+ return nullptr;
+ }
+
+ // If the this-object is not identical to the object of the
+ // MGetPropertyCache, then we cannot use the InlinePropertyTable, or if
+ // we do not yet have enough information from the ObjectGroup.
+ if (!CanInlineGetPropertyCache(cache_, thisDef))
+ return nullptr;
+
+ MGetPropertyCache* ret = cache_;
+ cache_ = nullptr;
+ return ret;
+ }
+};
+
+MGetPropertyCache*
+IonBuilder::getInlineableGetPropertyCache(CallInfo& callInfo)
+{
+ if (callInfo.constructing())
+ return nullptr;
+
+ MDefinition* thisDef = callInfo.thisArg();
+ if (thisDef->type() != MIRType::Object)
+ return nullptr;
+
+ MDefinition* funcDef = callInfo.fun();
+ if (funcDef->type() != MIRType::Object)
+ return nullptr;
+
+ // MGetPropertyCache with no uses may be optimized away.
+ if (funcDef->isGetPropertyCache()) {
+ WrapMGetPropertyCache cache(funcDef->toGetPropertyCache());
+ return cache.moveableCache(/* hasTypeBarrier = */ false, thisDef);
+ }
+
+ // Optimize away the following common pattern:
+ // MTypeBarrier[MIRType::Object] <- MGetPropertyCache
+ if (funcDef->isTypeBarrier()) {
+ MTypeBarrier* barrier = funcDef->toTypeBarrier();
+ if (barrier->hasUses())
+ return nullptr;
+ if (barrier->type() != MIRType::Object)
+ return nullptr;
+ if (!barrier->input()->isGetPropertyCache())
+ return nullptr;
+
+ WrapMGetPropertyCache cache(barrier->input()->toGetPropertyCache());
+ return cache.moveableCache(/* hasTypeBarrier = */ true, thisDef);
+ }
+
+ return nullptr;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineSingleCall(CallInfo& callInfo, JSObject* targetArg)
+{
+ if (!targetArg->is<JSFunction>()) {
+ InliningStatus status = inlineNonFunctionCall(callInfo, targetArg);
+ trackInlineSuccess(status);
+ return status;
+ }
+
+ JSFunction* target = &targetArg->as<JSFunction>();
+ if (target->isNative()) {
+ InliningStatus status = inlineNativeCall(callInfo, target);
+ trackInlineSuccess(status);
+ return status;
+ }
+
+ // Track success now, as inlining a scripted call makes a new return block
+ // which has a different pc than the current call pc.
+ trackInlineSuccess();
+ return inlineScriptedCall(callInfo, target);
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineCallsite(const ObjectVector& targets, CallInfo& callInfo)
+{
+ if (targets.empty()) {
+ trackOptimizationAttempt(TrackedStrategy::Call_Inline);
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNoTarget);
+ return InliningStatus_NotInlined;
+ }
+
+ // Is the function provided by an MGetPropertyCache?
+ // If so, the cache may be movable to a fallback path, with a dispatch
+ // instruction guarding on the incoming ObjectGroup.
+ WrapMGetPropertyCache propCache(getInlineableGetPropertyCache(callInfo));
+ keepFallbackFunctionGetter(propCache.get());
+
+ // Inline single targets -- unless they derive from a cache, in which case
+ // avoiding the cache and guarding is still faster.
+ if (!propCache.get() && targets.length() == 1) {
+ JSObject* target = targets[0];
+
+ trackOptimizationAttempt(TrackedStrategy::Call_Inline);
+ trackTypeInfo(TrackedTypeSite::Call_Target, target);
+
+ InliningDecision decision = makeInliningDecision(target, callInfo);
+ switch (decision) {
+ case InliningDecision_Error:
+ return InliningStatus_Error;
+ case InliningDecision_DontInline:
+ return InliningStatus_NotInlined;
+ case InliningDecision_WarmUpCountTooLow:
+ return InliningStatus_WarmUpCountTooLow;
+ case InliningDecision_Inline:
+ break;
+ }
+
+ // Inlining will elminate uses of the original callee, but it needs to
+ // be preserved in phis if we bail out. Mark the old callee definition as
+ // implicitly used to ensure this happens.
+ callInfo.fun()->setImplicitlyUsedUnchecked();
+
+ // If the callee is not going to be a lambda (which may vary across
+ // different invocations), then the callee definition can be replaced by a
+ // constant.
+ if (target->isSingleton()) {
+ // Replace the function with an MConstant.
+ MConstant* constFun = constant(ObjectValue(*target));
+ if (callInfo.constructing() && callInfo.getNewTarget() == callInfo.fun())
+ callInfo.setNewTarget(constFun);
+ callInfo.setFun(constFun);
+ }
+
+ return inlineSingleCall(callInfo, target);
+ }
+
+ // Choose a subset of the targets for polymorphic inlining.
+ BoolVector choiceSet(alloc());
+ uint32_t numInlined;
+ if (!selectInliningTargets(targets, callInfo, choiceSet, &numInlined))
+ return InliningStatus_Error;
+ if (numInlined == 0)
+ return InliningStatus_NotInlined;
+
+ // Perform a polymorphic dispatch.
+ if (!inlineCalls(callInfo, targets, choiceSet, propCache.get()))
+ return InliningStatus_Error;
+
+ return InliningStatus_Inlined;
+}
+
+bool
+IonBuilder::inlineGenericFallback(JSFunction* target, CallInfo& callInfo, MBasicBlock* dispatchBlock)
+{
+ // Generate a new block with all arguments on-stack.
+ MBasicBlock* fallbackBlock = newBlock(dispatchBlock, pc);
+ if (!fallbackBlock)
+ return false;
+
+ // Create a new CallInfo to track modified state within this block.
+ CallInfo fallbackInfo(alloc(), callInfo.constructing());
+ if (!fallbackInfo.init(callInfo))
+ return false;
+ fallbackInfo.popFormals(fallbackBlock);
+
+ // Generate an MCall, which uses stateful |current|.
+ if (!setCurrentAndSpecializePhis(fallbackBlock))
+ return false;
+ if (!makeCall(target, fallbackInfo))
+ return false;
+
+ // Pass return block to caller as |current|.
+ return true;
+}
+
+bool
+IonBuilder::inlineObjectGroupFallback(CallInfo& callInfo, MBasicBlock* dispatchBlock,
+ MObjectGroupDispatch* dispatch, MGetPropertyCache* cache,
+ MBasicBlock** fallbackTarget)
+{
+ // Getting here implies the following:
+ // 1. The call function is an MGetPropertyCache, or an MGetPropertyCache
+ // followed by an MTypeBarrier.
+ MOZ_ASSERT(callInfo.fun()->isGetPropertyCache() || callInfo.fun()->isTypeBarrier());
+
+ // 2. The MGetPropertyCache has inlineable cases by guarding on the ObjectGroup.
+ MOZ_ASSERT(dispatch->numCases() > 0);
+
+ // 3. The MGetPropertyCache (and, if applicable, MTypeBarrier) only
+ // have at most a single use.
+ MOZ_ASSERT_IF(callInfo.fun()->isGetPropertyCache(), !cache->hasUses());
+ MOZ_ASSERT_IF(callInfo.fun()->isTypeBarrier(), cache->hasOneUse());
+
+ // This means that no resume points yet capture the MGetPropertyCache,
+ // so everything from the MGetPropertyCache up until the call is movable.
+ // We now move the MGetPropertyCache and friends into a fallback path.
+ MOZ_ASSERT(cache->idempotent());
+
+ // Create a new CallInfo to track modified state within the fallback path.
+ CallInfo fallbackInfo(alloc(), callInfo.constructing());
+ if (!fallbackInfo.init(callInfo))
+ return false;
+
+ // Capture stack prior to the call operation. This captures the function.
+ MResumePoint* preCallResumePoint =
+ MResumePoint::New(alloc(), dispatchBlock, pc, MResumePoint::ResumeAt);
+ if (!preCallResumePoint)
+ return false;
+
+ DebugOnly<size_t> preCallFuncIndex = preCallResumePoint->stackDepth() - callInfo.numFormals();
+ MOZ_ASSERT(preCallResumePoint->getOperand(preCallFuncIndex) == fallbackInfo.fun());
+
+ // In the dispatch block, replace the function's slot entry with Undefined.
+ MConstant* undefined = MConstant::New(alloc(), UndefinedValue());
+ dispatchBlock->add(undefined);
+ dispatchBlock->rewriteAtDepth(-int(callInfo.numFormals()), undefined);
+
+ // Construct a block that does nothing but remove formals from the stack.
+ // This is effectively changing the entry resume point of the later fallback block.
+ MBasicBlock* prepBlock = newBlock(dispatchBlock, pc);
+ if (!prepBlock)
+ return false;
+ fallbackInfo.popFormals(prepBlock);
+
+ // Construct a block into which the MGetPropertyCache can be moved.
+ // This is subtle: the pc and resume point are those of the MGetPropertyCache!
+ InlinePropertyTable* propTable = cache->propTable();
+ MResumePoint* priorResumePoint = propTable->takePriorResumePoint();
+ MOZ_ASSERT(propTable->pc() != nullptr);
+ MOZ_ASSERT(priorResumePoint != nullptr);
+ MBasicBlock* getPropBlock = newBlock(prepBlock, propTable->pc(), priorResumePoint);
+ if (!getPropBlock)
+ return false;
+
+ prepBlock->end(MGoto::New(alloc(), getPropBlock));
+
+ // Since the getPropBlock inherited the stack from right before the MGetPropertyCache,
+ // the target of the MGetPropertyCache is still on the stack.
+ DebugOnly<MDefinition*> checkObject = getPropBlock->pop();
+ MOZ_ASSERT(checkObject == cache->object());
+
+ // Move the MGetPropertyCache and friends into the getPropBlock.
+ if (fallbackInfo.fun()->isGetPropertyCache()) {
+ MOZ_ASSERT(fallbackInfo.fun()->toGetPropertyCache() == cache);
+ getPropBlock->addFromElsewhere(cache);
+ getPropBlock->push(cache);
+ } else {
+ MTypeBarrier* barrier = callInfo.fun()->toTypeBarrier();
+ MOZ_ASSERT(barrier->type() == MIRType::Object);
+ MOZ_ASSERT(barrier->input()->isGetPropertyCache());
+ MOZ_ASSERT(barrier->input()->toGetPropertyCache() == cache);
+
+ getPropBlock->addFromElsewhere(cache);
+ getPropBlock->addFromElsewhere(barrier);
+ getPropBlock->push(barrier);
+ }
+
+ // Construct an end block with the correct resume point.
+ MBasicBlock* preCallBlock = newBlock(getPropBlock, pc, preCallResumePoint);
+ if (!preCallBlock)
+ return false;
+ getPropBlock->end(MGoto::New(alloc(), preCallBlock));
+
+ // Now inline the MCallGeneric, using preCallBlock as the dispatch point.
+ if (!inlineGenericFallback(nullptr, fallbackInfo, preCallBlock))
+ return false;
+
+ // inlineGenericFallback() set the return block as |current|.
+ preCallBlock->end(MGoto::New(alloc(), current));
+ *fallbackTarget = prepBlock;
+ return true;
+}
+
+bool
+IonBuilder::inlineCalls(CallInfo& callInfo, const ObjectVector& targets, BoolVector& choiceSet,
+ MGetPropertyCache* maybeCache)
+{
+ // Only handle polymorphic inlining.
+ MOZ_ASSERT(IsIonInlinablePC(pc));
+ MOZ_ASSERT(choiceSet.length() == targets.length());
+ MOZ_ASSERT_IF(!maybeCache, targets.length() >= 2);
+ MOZ_ASSERT_IF(maybeCache, targets.length() >= 1);
+
+ MBasicBlock* dispatchBlock = current;
+ callInfo.setImplicitlyUsedUnchecked();
+ callInfo.pushFormals(dispatchBlock);
+
+ // Patch any InlinePropertyTable to only contain functions that are
+ // inlineable. The InlinePropertyTable will also be patched at the end to
+ // exclude native functions that vetoed inlining.
+ if (maybeCache) {
+ InlinePropertyTable* propTable = maybeCache->propTable();
+ propTable->trimToTargets(targets);
+ if (propTable->numEntries() == 0)
+ maybeCache = nullptr;
+ }
+
+ // Generate a dispatch based on guard kind.
+ MDispatchInstruction* dispatch;
+ if (maybeCache) {
+ dispatch = MObjectGroupDispatch::New(alloc(), maybeCache->object(), maybeCache->propTable());
+ callInfo.fun()->setImplicitlyUsedUnchecked();
+ } else {
+ dispatch = MFunctionDispatch::New(alloc(), callInfo.fun());
+ }
+
+ // Generate a return block to host the rval-collecting MPhi.
+ jsbytecode* postCall = GetNextPc(pc);
+ MBasicBlock* returnBlock = newBlock(nullptr, postCall);
+ if (!returnBlock)
+ return false;
+ returnBlock->setCallerResumePoint(callerResumePoint_);
+
+ // Set up stack, used to manually create a post-call resume point.
+ returnBlock->inheritSlots(dispatchBlock);
+ callInfo.popFormals(returnBlock);
+
+ MPhi* retPhi = MPhi::New(alloc());
+ returnBlock->addPhi(retPhi);
+ returnBlock->push(retPhi);
+
+ // Create a resume point from current stack state.
+ if (!returnBlock->initEntrySlots(alloc()))
+ return false;
+
+ // Reserve the capacity for the phi.
+ // Note: this is an upperbound. Unreachable targets and uninlineable natives are also counted.
+ uint32_t count = 1; // Possible fallback block.
+ for (uint32_t i = 0; i < targets.length(); i++) {
+ if (choiceSet[i])
+ count++;
+ }
+ if (!retPhi->reserveLength(count))
+ return false;
+
+ // Inline each of the inlineable targets.
+ for (uint32_t i = 0; i < targets.length(); i++) {
+ // Target must be inlineable.
+ if (!choiceSet[i])
+ continue;
+
+ // Even though we made one round of inline decisions already, we may
+ // be amending them below.
+ amendOptimizationAttempt(i);
+
+ // Target must be reachable by the MDispatchInstruction.
+ JSFunction* target = &targets[i]->as<JSFunction>();
+ if (maybeCache && !maybeCache->propTable()->hasFunction(target)) {
+ choiceSet[i] = false;
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNotInDispatch);
+ continue;
+ }
+
+ MBasicBlock* inlineBlock = newBlock(dispatchBlock, pc);
+ if (!inlineBlock)
+ return false;
+
+ // Create a function MConstant to use in the entry ResumePoint. If we
+ // can't use a constant, add a no-op MPolyInlineGuard, to prevent
+ // hoisting env chain gets above the dispatch instruction.
+ MInstruction* funcDef;
+ if (target->isSingleton())
+ funcDef = MConstant::New(alloc(), ObjectValue(*target), constraints());
+ else
+ funcDef = MPolyInlineGuard::New(alloc(), callInfo.fun());
+
+ funcDef->setImplicitlyUsedUnchecked();
+ dispatchBlock->add(funcDef);
+
+ // Use the inlined callee in the inline resume point and on stack.
+ int funIndex = inlineBlock->entryResumePoint()->stackDepth() - callInfo.numFormals();
+ inlineBlock->entryResumePoint()->replaceOperand(funIndex, funcDef);
+ inlineBlock->rewriteSlot(funIndex, funcDef);
+
+ // Create a new CallInfo to track modified state within the inline block.
+ CallInfo inlineInfo(alloc(), callInfo.constructing());
+ if (!inlineInfo.init(callInfo))
+ return false;
+ inlineInfo.popFormals(inlineBlock);
+ inlineInfo.setFun(funcDef);
+
+ if (maybeCache) {
+ // Assign the 'this' value a TypeSet specialized to the groups that
+ // can generate this inlining target.
+ MOZ_ASSERT(callInfo.thisArg() == maybeCache->object());
+ TemporaryTypeSet* thisTypes = maybeCache->propTable()->buildTypeSetForFunction(target);
+ if (!thisTypes)
+ return false;
+
+ MFilterTypeSet* filter = MFilterTypeSet::New(alloc(), inlineInfo.thisArg(), thisTypes);
+ inlineBlock->add(filter);
+ inlineInfo.setThis(filter);
+ }
+
+ // Inline the call into the inlineBlock.
+ if (!setCurrentAndSpecializePhis(inlineBlock))
+ return false;
+ InliningStatus status = inlineSingleCall(inlineInfo, target);
+ if (status == InliningStatus_Error)
+ return false;
+
+ // Natives may veto inlining.
+ if (status == InliningStatus_NotInlined) {
+ MOZ_ASSERT(current == inlineBlock);
+ graph().removeBlock(inlineBlock);
+ choiceSet[i] = false;
+ continue;
+ }
+
+ // inlineSingleCall() changed |current| to the inline return block.
+ MBasicBlock* inlineReturnBlock = current;
+ setCurrent(dispatchBlock);
+
+ // Connect the inline path to the returnBlock.
+ ObjectGroup* funcGroup = target->isSingleton() ? nullptr : target->group();
+ if (!dispatch->addCase(target, funcGroup, inlineBlock))
+ return false;
+
+ MDefinition* retVal = inlineReturnBlock->peek(-1);
+ retPhi->addInput(retVal);
+ inlineReturnBlock->end(MGoto::New(alloc(), returnBlock));
+ if (!returnBlock->addPredecessorWithoutPhis(inlineReturnBlock))
+ return false;
+ }
+
+ // Patch the InlinePropertyTable to not dispatch to vetoed paths.
+ bool useFallback;
+ if (maybeCache) {
+ InlinePropertyTable* propTable = maybeCache->propTable();
+ propTable->trimTo(targets, choiceSet);
+
+ if (propTable->numEntries() == 0 || !propTable->hasPriorResumePoint()) {
+ // Output a generic fallback path.
+ MOZ_ASSERT_IF(propTable->numEntries() == 0, dispatch->numCases() == 0);
+ maybeCache = nullptr;
+ useFallback = true;
+ } else {
+ // We need a fallback path if the ObjectGroup dispatch does not
+ // handle all incoming objects.
+ useFallback = false;
+ TemporaryTypeSet* objectTypes = maybeCache->object()->resultTypeSet();
+ for (uint32_t i = 0; i < objectTypes->getObjectCount(); i++) {
+ TypeSet::ObjectKey* obj = objectTypes->getObject(i);
+ if (!obj)
+ continue;
+
+ if (!obj->isGroup()) {
+ useFallback = true;
+ break;
+ }
+
+ if (!propTable->hasObjectGroup(obj->group())) {
+ useFallback = true;
+ break;
+ }
+ }
+
+ if (!useFallback) {
+ // The object group dispatch handles all possible incoming
+ // objects, so the cache and barrier will not be reached and
+ // can be eliminated.
+ if (callInfo.fun()->isGetPropertyCache()) {
+ MOZ_ASSERT(callInfo.fun() == maybeCache);
+ } else {
+ MTypeBarrier* barrier = callInfo.fun()->toTypeBarrier();
+ MOZ_ASSERT(!barrier->hasUses());
+ MOZ_ASSERT(barrier->type() == MIRType::Object);
+ MOZ_ASSERT(barrier->input()->isGetPropertyCache());
+ MOZ_ASSERT(barrier->input()->toGetPropertyCache() == maybeCache);
+ barrier->block()->discard(barrier);
+ }
+
+ MOZ_ASSERT(!maybeCache->hasUses());
+ maybeCache->block()->discard(maybeCache);
+ }
+ }
+ } else {
+ useFallback = dispatch->numCases() < targets.length();
+ }
+
+ // If necessary, generate a fallback path.
+ if (useFallback) {
+ // Generate fallback blocks, and set |current| to the fallback return block.
+ if (maybeCache) {
+ MBasicBlock* fallbackTarget;
+ if (!inlineObjectGroupFallback(callInfo, dispatchBlock,
+ dispatch->toObjectGroupDispatch(),
+ maybeCache, &fallbackTarget))
+ {
+ return false;
+ }
+ dispatch->addFallback(fallbackTarget);
+ } else {
+ JSFunction* remaining = nullptr;
+
+ // If there is only 1 remaining case, we can annotate the fallback call
+ // with the target information.
+ if (dispatch->numCases() + 1 == targets.length()) {
+ for (uint32_t i = 0; i < targets.length(); i++) {
+ if (choiceSet[i])
+ continue;
+
+ MOZ_ASSERT(!remaining);
+ if (targets[i]->is<JSFunction>() && targets[i]->as<JSFunction>().isSingleton())
+ remaining = &targets[i]->as<JSFunction>();
+ break;
+ }
+ }
+
+ if (!inlineGenericFallback(remaining, callInfo, dispatchBlock))
+ return false;
+ dispatch->addFallback(current);
+ }
+
+ MBasicBlock* fallbackReturnBlock = current;
+
+ // Connect fallback case to return infrastructure.
+ MDefinition* retVal = fallbackReturnBlock->peek(-1);
+ retPhi->addInput(retVal);
+ fallbackReturnBlock->end(MGoto::New(alloc(), returnBlock));
+ if (!returnBlock->addPredecessorWithoutPhis(fallbackReturnBlock))
+ return false;
+ }
+
+ // Finally add the dispatch instruction.
+ // This must be done at the end so that add() may be called above.
+ dispatchBlock->end(dispatch);
+
+ // Check the depth change: +1 for retval
+ MOZ_ASSERT(returnBlock->stackDepth() == dispatchBlock->stackDepth() - callInfo.numFormals() + 1);
+
+ graph().moveBlockToEnd(returnBlock);
+ return setCurrentAndSpecializePhis(returnBlock);
+}
+
+MInstruction*
+IonBuilder::createNamedLambdaObject(MDefinition* callee, MDefinition* env)
+{
+ // Get a template CallObject that we'll use to generate inline object
+ // creation.
+ LexicalEnvironmentObject* templateObj = inspector->templateNamedLambdaObject();
+
+ // One field is added to the function to handle its name. This cannot be a
+ // dynamic slot because there is still plenty of room on the NamedLambda object.
+ MOZ_ASSERT(!templateObj->hasDynamicSlots());
+
+ // Allocate the actual object. It is important that no intervening
+ // instructions could potentially bailout, thus leaking the dynamic slots
+ // pointer.
+ MInstruction* declEnvObj = MNewNamedLambdaObject::New(alloc(), templateObj);
+ current->add(declEnvObj);
+
+ // Initialize the object's reserved slots. No post barrier is needed here:
+ // the object will be allocated in the nursery if possible, and if the
+ // tenured heap is used instead, a minor collection will have been performed
+ // that moved env/callee to the tenured heap.
+ current->add(MStoreFixedSlot::New(alloc(), declEnvObj,
+ NamedLambdaObject::enclosingEnvironmentSlot(), env));
+ current->add(MStoreFixedSlot::New(alloc(), declEnvObj,
+ NamedLambdaObject::lambdaSlot(), callee));
+
+ return declEnvObj;
+}
+
+MInstruction*
+IonBuilder::createCallObject(MDefinition* callee, MDefinition* env)
+{
+ // Get a template CallObject that we'll use to generate inline object
+ // creation.
+ CallObject* templateObj = inspector->templateCallObject();
+
+ // Allocate the object. Run-once scripts need a singleton type, so always do
+ // a VM call in such cases.
+ MNewCallObjectBase* callObj;
+ if (script()->treatAsRunOnce() || templateObj->isSingleton())
+ callObj = MNewSingletonCallObject::New(alloc(), templateObj);
+ else
+ callObj = MNewCallObject::New(alloc(), templateObj);
+ current->add(callObj);
+
+ // Initialize the object's reserved slots. No post barrier is needed here,
+ // for the same reason as in createNamedLambdaObject.
+ current->add(MStoreFixedSlot::New(alloc(), callObj, CallObject::enclosingEnvironmentSlot(), env));
+ current->add(MStoreFixedSlot::New(alloc(), callObj, CallObject::calleeSlot(), callee));
+
+ //if (!script()->functionHasParameterExprs()) {
+
+ // Copy closed-over argument slots if there aren't parameter expressions.
+ MSlots* slots = nullptr;
+ for (PositionalFormalParameterIter fi(script()); fi; fi++) {
+ if (!fi.closedOver())
+ continue;
+
+ if (!alloc().ensureBallast())
+ return nullptr;
+
+ unsigned slot = fi.location().slot();
+ unsigned formal = fi.argumentSlot();
+ unsigned numFixedSlots = templateObj->numFixedSlots();
+ MDefinition* param;
+ if (script()->functionHasParameterExprs())
+ param = constant(MagicValue(JS_UNINITIALIZED_LEXICAL));
+ else
+ param = current->getSlot(info().argSlotUnchecked(formal));
+ if (slot >= numFixedSlots) {
+ if (!slots) {
+ slots = MSlots::New(alloc(), callObj);
+ current->add(slots);
+ }
+ current->add(MStoreSlot::New(alloc(), slots, slot - numFixedSlots, param));
+ } else {
+ current->add(MStoreFixedSlot::New(alloc(), callObj, slot, param));
+ }
+ }
+
+ return callObj;
+}
+
+MDefinition*
+IonBuilder::createThisScripted(MDefinition* callee, MDefinition* newTarget)
+{
+ // Get callee.prototype.
+ //
+ // This instruction MUST be idempotent: since it does not correspond to an
+ // explicit operation in the bytecode, we cannot use resumeAfter().
+ // Getters may not override |prototype| fetching, so this operation is indeed idempotent.
+ // - First try an idempotent property cache.
+ // - Upon failing idempotent property cache, we can't use a non-idempotent cache,
+ // therefore we fallback to CallGetProperty
+ //
+ // Note: both CallGetProperty and GetPropertyCache can trigger a GC,
+ // and thus invalidation.
+ MInstruction* getProto;
+ if (!invalidatedIdempotentCache()) {
+ MConstant* id = constant(StringValue(names().prototype));
+ MGetPropertyCache* getPropCache = MGetPropertyCache::New(alloc(), newTarget, id,
+ /* monitored = */ false);
+ getPropCache->setIdempotent();
+ getProto = getPropCache;
+ } else {
+ MCallGetProperty* callGetProp = MCallGetProperty::New(alloc(), newTarget, names().prototype);
+ callGetProp->setIdempotent();
+ getProto = callGetProp;
+ }
+ current->add(getProto);
+
+ // Create this from prototype
+ MCreateThisWithProto* createThis = MCreateThisWithProto::New(alloc(), callee, newTarget, getProto);
+ current->add(createThis);
+
+ return createThis;
+}
+
+JSObject*
+IonBuilder::getSingletonPrototype(JSFunction* target)
+{
+ TypeSet::ObjectKey* targetKey = TypeSet::ObjectKey::get(target);
+ if (targetKey->unknownProperties())
+ return nullptr;
+
+ jsid protoid = NameToId(names().prototype);
+ HeapTypeSetKey protoProperty = targetKey->property(protoid);
+
+ return protoProperty.singleton(constraints());
+}
+
+MDefinition*
+IonBuilder::createThisScriptedSingleton(JSFunction* target, MDefinition* callee)
+{
+ if (!target->hasScript())
+ return nullptr;
+
+ // Get the singleton prototype (if exists)
+ JSObject* proto = getSingletonPrototype(target);
+ if (!proto)
+ return nullptr;
+
+ JSObject* templateObject = inspector->getTemplateObject(pc);
+ if (!templateObject)
+ return nullptr;
+ if (!templateObject->is<PlainObject>() && !templateObject->is<UnboxedPlainObject>())
+ return nullptr;
+ if (templateObject->staticPrototype() != proto)
+ return nullptr;
+
+ TypeSet::ObjectKey* templateObjectKey = TypeSet::ObjectKey::get(templateObject->group());
+ if (templateObjectKey->hasFlags(constraints(), OBJECT_FLAG_NEW_SCRIPT_CLEARED))
+ return nullptr;
+
+ StackTypeSet* thisTypes = TypeScript::ThisTypes(target->nonLazyScript());
+ if (!thisTypes || !thisTypes->hasType(TypeSet::ObjectType(templateObject)))
+ return nullptr;
+
+ // Generate an inline path to create a new |this| object with
+ // the given singleton prototype.
+ MConstant* templateConst = MConstant::NewConstraintlessObject(alloc(), templateObject);
+ MCreateThisWithTemplate* createThis =
+ MCreateThisWithTemplate::New(alloc(), constraints(), templateConst,
+ templateObject->group()->initialHeap(constraints()));
+ current->add(templateConst);
+ current->add(createThis);
+
+ return createThis;
+}
+
+MDefinition*
+IonBuilder::createThisScriptedBaseline(MDefinition* callee)
+{
+ // Try to inline |this| creation based on Baseline feedback.
+
+ JSFunction* target = inspector->getSingleCallee(pc);
+ if (!target || !target->hasScript())
+ return nullptr;
+
+ JSObject* templateObject = inspector->getTemplateObject(pc);
+ if (!templateObject)
+ return nullptr;
+ if (!templateObject->is<PlainObject>() && !templateObject->is<UnboxedPlainObject>())
+ return nullptr;
+
+ Shape* shape = target->lookupPure(compartment->runtime()->names().prototype);
+ if (!shape || !shape->hasDefaultGetter() || !shape->hasSlot())
+ return nullptr;
+
+ Value protov = target->getSlot(shape->slot());
+ if (!protov.isObject())
+ return nullptr;
+
+ JSObject* proto = checkNurseryObject(&protov.toObject());
+ if (proto != templateObject->staticPrototype())
+ return nullptr;
+
+ TypeSet::ObjectKey* templateObjectKey = TypeSet::ObjectKey::get(templateObject->group());
+ if (templateObjectKey->hasFlags(constraints(), OBJECT_FLAG_NEW_SCRIPT_CLEARED))
+ return nullptr;
+
+ StackTypeSet* thisTypes = TypeScript::ThisTypes(target->nonLazyScript());
+ if (!thisTypes || !thisTypes->hasType(TypeSet::ObjectType(templateObject)))
+ return nullptr;
+
+ // Shape guard.
+ callee = addShapeGuard(callee, target->lastProperty(), Bailout_ShapeGuard);
+
+ // Guard callee.prototype == proto.
+ MOZ_ASSERT(shape->numFixedSlots() == 0, "Must be a dynamic slot");
+ MSlots* slots = MSlots::New(alloc(), callee);
+ current->add(slots);
+ MLoadSlot* prototype = MLoadSlot::New(alloc(), slots, shape->slot());
+ current->add(prototype);
+ MDefinition* protoConst = constant(ObjectValue(*proto));
+ MGuardObjectIdentity* guard = MGuardObjectIdentity::New(alloc(), prototype, protoConst,
+ /* bailOnEquality = */ false);
+ current->add(guard);
+
+ // Generate an inline path to create a new |this| object with
+ // the given prototype.
+ MConstant* templateConst = MConstant::NewConstraintlessObject(alloc(), templateObject);
+ MCreateThisWithTemplate* createThis =
+ MCreateThisWithTemplate::New(alloc(), constraints(), templateConst,
+ templateObject->group()->initialHeap(constraints()));
+ current->add(templateConst);
+ current->add(createThis);
+
+ return createThis;
+}
+
+MDefinition*
+IonBuilder::createThis(JSFunction* target, MDefinition* callee, MDefinition* newTarget)
+{
+ // Create |this| for unknown target.
+ if (!target) {
+ if (MDefinition* createThis = createThisScriptedBaseline(callee))
+ return createThis;
+
+ MCreateThis* createThis = MCreateThis::New(alloc(), callee, newTarget);
+ current->add(createThis);
+ return createThis;
+ }
+
+ // Native constructors build the new Object themselves.
+ if (target->isNative()) {
+ if (!target->isConstructor())
+ return nullptr;
+
+ MConstant* magic = MConstant::New(alloc(), MagicValue(JS_IS_CONSTRUCTING));
+ current->add(magic);
+ return magic;
+ }
+
+ if (target->isBoundFunction())
+ return constant(MagicValue(JS_UNINITIALIZED_LEXICAL));
+
+ if (target->isDerivedClassConstructor()) {
+ MOZ_ASSERT(target->isClassConstructor());
+ return constant(MagicValue(JS_UNINITIALIZED_LEXICAL));
+ }
+
+ // Try baking in the prototype.
+ if (MDefinition* createThis = createThisScriptedSingleton(target, callee))
+ return createThis;
+
+ if (MDefinition* createThis = createThisScriptedBaseline(callee))
+ return createThis;
+
+ return createThisScripted(callee, newTarget);
+}
+
+bool
+IonBuilder::jsop_funcall(uint32_t argc)
+{
+ // Stack for JSOP_FUNCALL:
+ // 1: arg0
+ // ...
+ // argc: argN
+ // argc+1: JSFunction*, the 'f' in |f.call()|, in |this| position.
+ // argc+2: The native 'call' function.
+
+ int calleeDepth = -((int)argc + 2);
+ int funcDepth = -((int)argc + 1);
+
+ // If |Function.prototype.call| may be overridden, don't optimize callsite.
+ TemporaryTypeSet* calleeTypes = current->peek(calleeDepth)->resultTypeSet();
+ JSFunction* native = getSingleCallTarget(calleeTypes);
+ if (!native || !native->isNative() || native->native() != &fun_call) {
+ CallInfo callInfo(alloc(), false);
+ if (!callInfo.init(current, argc))
+ return false;
+ return makeCall(native, callInfo);
+ }
+ current->peek(calleeDepth)->setImplicitlyUsedUnchecked();
+
+ // Extract call target.
+ TemporaryTypeSet* funTypes = current->peek(funcDepth)->resultTypeSet();
+ JSFunction* target = getSingleCallTarget(funTypes);
+
+ // Shimmy the slots down to remove the native 'call' function.
+ current->shimmySlots(funcDepth - 1);
+
+ bool zeroArguments = (argc == 0);
+
+ // If no |this| argument was provided, explicitly pass Undefined.
+ // Pushing is safe here, since one stack slot has been removed.
+ if (zeroArguments) {
+ pushConstant(UndefinedValue());
+ } else {
+ // |this| becomes implicit in the call.
+ argc -= 1;
+ }
+
+ CallInfo callInfo(alloc(), false);
+ if (!callInfo.init(current, argc))
+ return false;
+
+ // Try to inline the call.
+ if (!zeroArguments) {
+ InliningDecision decision = makeInliningDecision(target, callInfo);
+ switch (decision) {
+ case InliningDecision_Error:
+ return false;
+ case InliningDecision_DontInline:
+ case InliningDecision_WarmUpCountTooLow:
+ break;
+ case InliningDecision_Inline:
+ if (target->isInterpreted()) {
+ InliningStatus status = inlineScriptedCall(callInfo, target);
+ if (status == InliningStatus_Inlined)
+ return true;
+ if (status == InliningStatus_Error)
+ return false;
+ }
+ break;
+ }
+ }
+
+ // Call without inlining.
+ return makeCall(target, callInfo);
+}
+
+bool
+IonBuilder::jsop_funapply(uint32_t argc)
+{
+ int calleeDepth = -((int)argc + 2);
+
+ TemporaryTypeSet* calleeTypes = current->peek(calleeDepth)->resultTypeSet();
+ JSFunction* native = getSingleCallTarget(calleeTypes);
+ if (argc != 2 || info().analysisMode() == Analysis_ArgumentsUsage) {
+ CallInfo callInfo(alloc(), false);
+ if (!callInfo.init(current, argc))
+ return false;
+ return makeCall(native, callInfo);
+ }
+
+ // Disable compilation if the second argument to |apply| cannot be guaranteed
+ // to be either definitely |arguments| or definitely not |arguments|.
+ MDefinition* argument = current->peek(-1);
+ if (script()->argumentsHasVarBinding() &&
+ argument->mightBeType(MIRType::MagicOptimizedArguments) &&
+ argument->type() != MIRType::MagicOptimizedArguments)
+ {
+ return abort("fun.apply with MaybeArguments");
+ }
+
+ // Fallback to regular call if arg 2 is not definitely |arguments|.
+ if (argument->type() != MIRType::MagicOptimizedArguments) {
+ // Optimize fun.apply(self, array) if the length is sane and there are no holes.
+ TemporaryTypeSet* objTypes = argument->resultTypeSet();
+ if (native && native->isNative() && native->native() == fun_apply &&
+ objTypes &&
+ objTypes->getKnownClass(constraints()) == &ArrayObject::class_ &&
+ !objTypes->hasObjectFlags(constraints(), OBJECT_FLAG_LENGTH_OVERFLOW) &&
+ ElementAccessIsPacked(constraints(), argument))
+ {
+ return jsop_funapplyarray(argc);
+ }
+
+ CallInfo callInfo(alloc(), false);
+ if (!callInfo.init(current, argc))
+ return false;
+ return makeCall(native, callInfo);
+ }
+
+ if ((!native || !native->isNative() ||
+ native->native() != fun_apply) &&
+ info().analysisMode() != Analysis_DefiniteProperties)
+ {
+ return abort("fun.apply speculation failed");
+ }
+
+ // Use funapply that definitely uses |arguments|
+ return jsop_funapplyarguments(argc);
+}
+
+bool
+IonBuilder::jsop_funapplyarray(uint32_t argc)
+{
+ MOZ_ASSERT(argc == 2);
+
+ int funcDepth = -((int)argc + 1);
+
+ // Extract call target.
+ TemporaryTypeSet* funTypes = current->peek(funcDepth)->resultTypeSet();
+ JSFunction* target = getSingleCallTarget(funTypes);
+
+ // Pop the array agument
+ MDefinition* argObj = current->pop();
+
+ MElements* elements = MElements::New(alloc(), argObj);
+ current->add(elements);
+
+ // Pop the |this| argument.
+ MDefinition* argThis = current->pop();
+
+ // Unwrap the (JSFunction *) parameter.
+ MDefinition* argFunc = current->pop();
+
+ // Pop apply function.
+ MDefinition* nativeFunc = current->pop();
+ nativeFunc->setImplicitlyUsedUnchecked();
+
+ WrappedFunction* wrappedTarget = target ? new(alloc()) WrappedFunction(target) : nullptr;
+ MApplyArray* apply = MApplyArray::New(alloc(), wrappedTarget, argFunc, elements, argThis);
+ current->add(apply);
+ current->push(apply);
+ if (!resumeAfter(apply))
+ return false;
+
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+ return pushTypeBarrier(apply, types, BarrierKind::TypeSet);
+}
+
+bool
+IonBuilder::jsop_funapplyarguments(uint32_t argc)
+{
+ // Stack for JSOP_FUNAPPLY:
+ // 1: Vp
+ // 2: This
+ // argc+1: JSFunction*, the 'f' in |f.call()|, in |this| position.
+ // argc+2: The native 'apply' function.
+
+ int funcDepth = -((int)argc + 1);
+
+ // Extract call target.
+ TemporaryTypeSet* funTypes = current->peek(funcDepth)->resultTypeSet();
+ JSFunction* target = getSingleCallTarget(funTypes);
+
+ // When this script isn't inlined, use MApplyArgs,
+ // to copy the arguments from the stack and call the function
+ if (inliningDepth_ == 0 && info().analysisMode() != Analysis_DefiniteProperties) {
+ // The array argument corresponds to the arguments object. As the JIT
+ // is implicitly reading the arguments object in the next instruction,
+ // we need to prevent the deletion of the arguments object from resume
+ // points, so that Baseline will behave correctly after a bailout.
+ MDefinition* vp = current->pop();
+ vp->setImplicitlyUsedUnchecked();
+
+ MDefinition* argThis = current->pop();
+
+ // Unwrap the (JSFunction*) parameter.
+ MDefinition* argFunc = current->pop();
+
+ // Pop apply function.
+ MDefinition* nativeFunc = current->pop();
+ nativeFunc->setImplicitlyUsedUnchecked();
+
+ MArgumentsLength* numArgs = MArgumentsLength::New(alloc());
+ current->add(numArgs);
+
+ WrappedFunction* wrappedTarget = target ? new(alloc()) WrappedFunction(target) : nullptr;
+ MApplyArgs* apply = MApplyArgs::New(alloc(), wrappedTarget, argFunc, numArgs, argThis);
+ current->add(apply);
+ current->push(apply);
+ if (!resumeAfter(apply))
+ return false;
+
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+ return pushTypeBarrier(apply, types, BarrierKind::TypeSet);
+ }
+
+ // When inlining we have the arguments the function gets called with
+ // and can optimize even more, by just calling the functions with the args.
+ // We also try this path when doing the definite properties analysis, as we
+ // can inline the apply() target and don't care about the actual arguments
+ // that were passed in.
+
+ CallInfo callInfo(alloc(), false);
+
+ // Vp
+ MDefinition* vp = current->pop();
+ vp->setImplicitlyUsedUnchecked();
+
+ // Arguments
+ if (inliningDepth_) {
+ if (!callInfo.setArgs(inlineCallInfo_->argv()))
+ return false;
+ }
+
+ // This
+ MDefinition* argThis = current->pop();
+ callInfo.setThis(argThis);
+
+ // Pop function parameter.
+ MDefinition* argFunc = current->pop();
+ callInfo.setFun(argFunc);
+
+ // Pop apply function.
+ MDefinition* nativeFunc = current->pop();
+ nativeFunc->setImplicitlyUsedUnchecked();
+
+ // Try to inline the call.
+ InliningDecision decision = makeInliningDecision(target, callInfo);
+ switch (decision) {
+ case InliningDecision_Error:
+ return false;
+ case InliningDecision_DontInline:
+ case InliningDecision_WarmUpCountTooLow:
+ break;
+ case InliningDecision_Inline:
+ if (target->isInterpreted()) {
+ InliningStatus status = inlineScriptedCall(callInfo, target);
+ if (status == InliningStatus_Inlined)
+ return true;
+ if (status == InliningStatus_Error)
+ return false;
+ }
+ }
+
+ return makeCall(target, callInfo);
+}
+
+bool
+IonBuilder::jsop_call(uint32_t argc, bool constructing)
+{
+ startTrackingOptimizations();
+
+ // If this call has never executed, try to seed the observed type set
+ // based on how the call result is used.
+ TemporaryTypeSet* observed = bytecodeTypes(pc);
+ if (observed->empty()) {
+ if (BytecodeFlowsToBitop(pc)) {
+ observed->addType(TypeSet::Int32Type(), alloc_->lifoAlloc());
+ } else if (*GetNextPc(pc) == JSOP_POS) {
+ // Note: this is lame, overspecialized on the code patterns used
+ // by asm.js and should be replaced by a more general mechanism.
+ // See bug 870847.
+ observed->addType(TypeSet::DoubleType(), alloc_->lifoAlloc());
+ }
+ }
+
+ int calleeDepth = -((int)argc + 2 + constructing);
+
+ // Acquire known call target if existent.
+ ObjectVector targets(alloc());
+ TemporaryTypeSet* calleeTypes = current->peek(calleeDepth)->resultTypeSet();
+ if (calleeTypes && !getPolyCallTargets(calleeTypes, constructing, targets, 4))
+ return false;
+
+ CallInfo callInfo(alloc(), constructing);
+ if (!callInfo.init(current, argc))
+ return false;
+
+ // Try inlining
+ InliningStatus status = inlineCallsite(targets, callInfo);
+ if (status == InliningStatus_Inlined)
+ return true;
+ if (status == InliningStatus_Error)
+ return false;
+
+ // Discard unreferenced & pre-allocated resume points.
+ replaceMaybeFallbackFunctionGetter(nullptr);
+
+ // No inline, just make the call.
+ JSFunction* target = nullptr;
+ if (targets.length() == 1 && targets[0]->is<JSFunction>())
+ target = &targets[0]->as<JSFunction>();
+
+ if (target && status == InliningStatus_WarmUpCountTooLow) {
+ MRecompileCheck* check =
+ MRecompileCheck::New(alloc(), target->nonLazyScript(),
+ optimizationInfo().inliningRecompileThreshold(),
+ MRecompileCheck::RecompileCheck_Inlining);
+ current->add(check);
+ }
+
+ return makeCall(target, callInfo);
+}
+
+bool
+IonBuilder::testShouldDOMCall(TypeSet* inTypes, JSFunction* func, JSJitInfo::OpType opType)
+{
+ if (!func->isNative() || !func->jitInfo())
+ return false;
+
+ // If all the DOM objects flowing through are legal with this
+ // property, we can bake in a call to the bottom half of the DOM
+ // accessor
+ DOMInstanceClassHasProtoAtDepth instanceChecker =
+ compartment->runtime()->DOMcallbacks()->instanceClassMatchesProto;
+
+ const JSJitInfo* jinfo = func->jitInfo();
+ if (jinfo->type() != opType)
+ return false;
+
+ for (unsigned i = 0; i < inTypes->getObjectCount(); i++) {
+ TypeSet::ObjectKey* key = inTypes->getObject(i);
+ if (!key)
+ continue;
+
+ if (!key->hasStableClassAndProto(constraints()))
+ return false;
+
+ if (!instanceChecker(key->clasp(), jinfo->protoID, jinfo->depth))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+ArgumentTypesMatch(MDefinition* def, StackTypeSet* calleeTypes)
+{
+ if (!calleeTypes)
+ return false;
+
+ if (def->resultTypeSet()) {
+ MOZ_ASSERT(def->type() == MIRType::Value || def->mightBeType(def->type()));
+ return def->resultTypeSet()->isSubset(calleeTypes);
+ }
+
+ if (def->type() == MIRType::Value)
+ return false;
+
+ if (def->type() == MIRType::Object)
+ return calleeTypes->unknownObject();
+
+ return calleeTypes->mightBeMIRType(def->type());
+}
+
+bool
+IonBuilder::testNeedsArgumentCheck(JSFunction* target, CallInfo& callInfo)
+{
+ // If we have a known target, check if the caller arg types are a subset of callee.
+ // Since typeset accumulates and can't decrease that means we don't need to check
+ // the arguments anymore.
+ if (!target->hasScript())
+ return true;
+
+ JSScript* targetScript = target->nonLazyScript();
+
+ if (!ArgumentTypesMatch(callInfo.thisArg(), TypeScript::ThisTypes(targetScript)))
+ return true;
+ uint32_t expected_args = Min<uint32_t>(callInfo.argc(), target->nargs());
+ for (size_t i = 0; i < expected_args; i++) {
+ if (!ArgumentTypesMatch(callInfo.getArg(i), TypeScript::ArgTypes(targetScript, i)))
+ return true;
+ }
+ for (size_t i = callInfo.argc(); i < target->nargs(); i++) {
+ if (!TypeScript::ArgTypes(targetScript, i)->mightBeMIRType(MIRType::Undefined))
+ return true;
+ }
+
+ return false;
+}
+
+MCall*
+IonBuilder::makeCallHelper(JSFunction* target, CallInfo& callInfo)
+{
+ // This function may be called with mutated stack.
+ // Querying TI for popped types is invalid.
+
+ uint32_t targetArgs = callInfo.argc();
+
+ // Collect number of missing arguments provided that the target is
+ // scripted. Native functions are passed an explicit 'argc' parameter.
+ if (target && !target->isNative())
+ targetArgs = Max<uint32_t>(target->nargs(), callInfo.argc());
+
+ bool isDOMCall = false;
+ if (target && !callInfo.constructing()) {
+ // We know we have a single call target. Check whether the "this" types
+ // are DOM types and our function a DOM function, and if so flag the
+ // MCall accordingly.
+ TemporaryTypeSet* thisTypes = callInfo.thisArg()->resultTypeSet();
+ if (thisTypes &&
+ thisTypes->getKnownMIRType() == MIRType::Object &&
+ thisTypes->isDOMClass(constraints()) &&
+ testShouldDOMCall(thisTypes, target, JSJitInfo::Method))
+ {
+ isDOMCall = true;
+ }
+ }
+
+ MCall* call = MCall::New(alloc(), target, targetArgs + 1 + callInfo.constructing(),
+ callInfo.argc(), callInfo.constructing(), isDOMCall);
+ if (!call)
+ return nullptr;
+
+ if (callInfo.constructing())
+ call->addArg(targetArgs + 1, callInfo.getNewTarget());
+
+ // Explicitly pad any missing arguments with |undefined|.
+ // This permits skipping the argumentsRectifier.
+ for (int i = targetArgs; i > (int)callInfo.argc(); i--) {
+ MOZ_ASSERT_IF(target, !target->isNative());
+ MConstant* undef = constant(UndefinedValue());
+ if (!alloc().ensureBallast())
+ return nullptr;
+ call->addArg(i, undef);
+ }
+
+ // Add explicit arguments.
+ // Skip addArg(0) because it is reserved for this
+ for (int32_t i = callInfo.argc() - 1; i >= 0; i--)
+ call->addArg(i + 1, callInfo.getArg(i));
+
+ // Now that we've told it about all the args, compute whether it's movable
+ call->computeMovable();
+
+ // Inline the constructor on the caller-side.
+ if (callInfo.constructing()) {
+ MDefinition* create = createThis(target, callInfo.fun(), callInfo.getNewTarget());
+ if (!create) {
+ abort("Failure inlining constructor for call.");
+ return nullptr;
+ }
+
+ callInfo.thisArg()->setImplicitlyUsedUnchecked();
+ callInfo.setThis(create);
+ }
+
+ // Pass |this| and function.
+ MDefinition* thisArg = callInfo.thisArg();
+ call->addArg(0, thisArg);
+
+ if (target && !testNeedsArgumentCheck(target, callInfo))
+ call->disableArgCheck();
+
+ call->initFunction(callInfo.fun());
+
+ current->add(call);
+ return call;
+}
+
+static bool
+DOMCallNeedsBarrier(const JSJitInfo* jitinfo, TemporaryTypeSet* types)
+{
+ MOZ_ASSERT(jitinfo->type() != JSJitInfo::InlinableNative);
+
+ // If the return type of our DOM native is in "types" already, we don't
+ // actually need a barrier.
+ if (jitinfo->returnType() == JSVAL_TYPE_UNKNOWN)
+ return true;
+
+ // JSVAL_TYPE_OBJECT doesn't tell us much; we still have to barrier on the
+ // actual type of the object.
+ if (jitinfo->returnType() == JSVAL_TYPE_OBJECT)
+ return true;
+
+ // No need for a barrier if we're already expecting the type we'll produce.
+ return MIRTypeFromValueType(jitinfo->returnType()) != types->getKnownMIRType();
+}
+
+bool
+IonBuilder::makeCall(JSFunction* target, CallInfo& callInfo)
+{
+ // Constructor calls to non-constructors should throw. We don't want to use
+ // CallKnown in this case.
+ MOZ_ASSERT_IF(callInfo.constructing() && target, target->isConstructor());
+
+ MCall* call = makeCallHelper(target, callInfo);
+ if (!call)
+ return false;
+
+ current->push(call);
+ if (call->isEffectful() && !resumeAfter(call))
+ return false;
+
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+
+ if (call->isCallDOMNative())
+ return pushDOMTypeBarrier(call, types, call->getSingleTarget()->rawJSFunction());
+
+ return pushTypeBarrier(call, types, BarrierKind::TypeSet);
+}
+
+bool
+IonBuilder::jsop_eval(uint32_t argc)
+{
+ int calleeDepth = -((int)argc + 2);
+ TemporaryTypeSet* calleeTypes = current->peek(calleeDepth)->resultTypeSet();
+
+ // Emit a normal call if the eval has never executed. This keeps us from
+ // disabling compilation for the script when testing with --ion-eager.
+ if (calleeTypes && calleeTypes->empty())
+ return jsop_call(argc, /* constructing = */ false);
+
+ JSFunction* singleton = getSingleCallTarget(calleeTypes);
+ if (!singleton)
+ return abort("No singleton callee for eval()");
+
+ if (script()->global().valueIsEval(ObjectValue(*singleton))) {
+ if (argc != 1)
+ return abort("Direct eval with more than one argument");
+
+ if (!info().funMaybeLazy())
+ return abort("Direct eval in global code");
+
+ if (info().funMaybeLazy()->isArrow())
+ return abort("Direct eval from arrow function");
+
+ CallInfo callInfo(alloc(), /* constructing = */ false);
+ if (!callInfo.init(current, argc))
+ return false;
+ callInfo.setImplicitlyUsedUnchecked();
+
+ callInfo.fun()->setImplicitlyUsedUnchecked();
+
+ MDefinition* envChain = current->environmentChain();
+ MDefinition* string = callInfo.getArg(0);
+
+ // Direct eval acts as identity on non-string types according to
+ // ES5 15.1.2.1 step 1.
+ if (!string->mightBeType(MIRType::String)) {
+ current->push(string);
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+ return pushTypeBarrier(string, types, BarrierKind::TypeSet);
+ }
+
+ if (!jsop_newtarget())
+ return false;
+ MDefinition* newTargetValue = current->pop();
+
+ // Try to pattern match 'eval(v + "()")'. In this case v is likely a
+ // name on the env chain and the eval is performing a call on that
+ // value. Use an env chain lookup rather than a full eval.
+ if (string->isConcat() &&
+ string->getOperand(1)->type() == MIRType::String &&
+ string->getOperand(1)->maybeConstantValue())
+ {
+ JSAtom* atom = &string->getOperand(1)->maybeConstantValue()->toString()->asAtom();
+
+ if (StringEqualsAscii(atom, "()")) {
+ MDefinition* name = string->getOperand(0);
+ MInstruction* dynamicName = MGetDynamicName::New(alloc(), envChain, name);
+ current->add(dynamicName);
+
+ current->push(dynamicName);
+ current->push(constant(UndefinedValue())); // thisv
+
+ CallInfo evalCallInfo(alloc(), /* constructing = */ false);
+ if (!evalCallInfo.init(current, /* argc = */ 0))
+ return false;
+
+ return makeCall(nullptr, evalCallInfo);
+ }
+ }
+
+ MInstruction* ins = MCallDirectEval::New(alloc(), envChain, string,
+ newTargetValue, pc);
+ current->add(ins);
+ current->push(ins);
+
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+ return resumeAfter(ins) && pushTypeBarrier(ins, types, BarrierKind::TypeSet);
+ }
+
+ return jsop_call(argc, /* constructing = */ false);
+}
+
+bool
+IonBuilder::jsop_compare(JSOp op)
+{
+ MDefinition* right = current->pop();
+ MDefinition* left = current->pop();
+
+ return jsop_compare(op, left, right);
+}
+
+bool
+IonBuilder::jsop_compare(JSOp op, MDefinition* left, MDefinition* right)
+{
+ bool emitted = false;
+
+ if (!forceInlineCaches()) {
+ if (!compareTrySpecialized(&emitted, op, left, right) || emitted)
+ return emitted;
+ if (!compareTryBitwise(&emitted, op, left, right) || emitted)
+ return emitted;
+ if (!compareTrySpecializedOnBaselineInspector(&emitted, op, left, right) || emitted)
+ return emitted;
+ }
+
+ if (!compareTrySharedStub(&emitted, op, left, right) || emitted)
+ return emitted;
+
+ // Not possible to optimize. Do a slow vm call.
+ MCompare* ins = MCompare::New(alloc(), left, right, op);
+ ins->cacheOperandMightEmulateUndefined(constraints());
+
+ current->add(ins);
+ current->push(ins);
+ if (ins->isEffectful() && !resumeAfter(ins))
+ return false;
+ return true;
+}
+
+static bool
+ObjectOrSimplePrimitive(MDefinition* op)
+{
+ // Return true if op is either undefined/null/boolean/int32 or an object.
+ return !op->mightBeType(MIRType::String)
+ && !op->mightBeType(MIRType::Symbol)
+ && !op->mightBeType(MIRType::Double)
+ && !op->mightBeType(MIRType::Float32)
+ && !op->mightBeType(MIRType::MagicOptimizedArguments)
+ && !op->mightBeType(MIRType::MagicHole)
+ && !op->mightBeType(MIRType::MagicIsConstructing);
+}
+
+bool
+IonBuilder::compareTrySpecialized(bool* emitted, JSOp op, MDefinition* left, MDefinition* right)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ // Try to emit an compare based on the input types.
+
+ MCompare::CompareType type = MCompare::determineCompareType(op, left, right);
+ if (type == MCompare::Compare_Unknown)
+ return true;
+
+ MCompare* ins = MCompare::New(alloc(), left, right, op);
+ ins->setCompareType(type);
+ ins->cacheOperandMightEmulateUndefined(constraints());
+
+ // Some compare types need to have the specific type in the rhs.
+ // Swap operands if that is not the case.
+ if (type == MCompare::Compare_StrictString && right->type() != MIRType::String)
+ ins->swapOperands();
+ else if (type == MCompare::Compare_Null && right->type() != MIRType::Null)
+ ins->swapOperands();
+ else if (type == MCompare::Compare_Undefined && right->type() != MIRType::Undefined)
+ ins->swapOperands();
+ else if (type == MCompare::Compare_Boolean && right->type() != MIRType::Boolean)
+ ins->swapOperands();
+
+ // Replace inputs with unsigned variants if needed.
+ if (type == MCompare::Compare_UInt32)
+ ins->replaceWithUnsignedOperands();
+
+ current->add(ins);
+ current->push(ins);
+
+ MOZ_ASSERT(!ins->isEffectful());
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::compareTryBitwise(bool* emitted, JSOp op, MDefinition* left, MDefinition* right)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ // Try to emit a bitwise compare. Check if a bitwise compare equals the wanted
+ // result for all observed operand types.
+
+ // Onlye allow loose and strict equality.
+ if (op != JSOP_EQ && op != JSOP_NE && op != JSOP_STRICTEQ && op != JSOP_STRICTNE)
+ return true;
+
+ // Only primitive (not double/string) or objects are supported.
+ // I.e. Undefined/Null/Boolean/Int32 and Object
+ if (!ObjectOrSimplePrimitive(left) || !ObjectOrSimplePrimitive(right))
+ return true;
+
+ // Objects that emulate undefined are not supported.
+ if (left->maybeEmulatesUndefined(constraints()) || right->maybeEmulatesUndefined(constraints()))
+ return true;
+
+ // In the loose comparison more values could be the same,
+ // but value comparison reporting otherwise.
+ if (op == JSOP_EQ || op == JSOP_NE) {
+
+ // Undefined compared loosy to Null is not supported,
+ // because tag is different, but value can be the same (undefined == null).
+ if ((left->mightBeType(MIRType::Undefined) && right->mightBeType(MIRType::Null)) ||
+ (left->mightBeType(MIRType::Null) && right->mightBeType(MIRType::Undefined)))
+ {
+ return true;
+ }
+
+ // Int32 compared loosy to Boolean is not supported,
+ // because tag is different, but value can be the same (1 == true).
+ if ((left->mightBeType(MIRType::Int32) && right->mightBeType(MIRType::Boolean)) ||
+ (left->mightBeType(MIRType::Boolean) && right->mightBeType(MIRType::Int32)))
+ {
+ return true;
+ }
+
+ // For loosy comparison of an object with a Boolean/Number/String
+ // the valueOf the object is taken. Therefore not supported.
+ bool simpleLHS = left->mightBeType(MIRType::Boolean) || left->mightBeType(MIRType::Int32);
+ bool simpleRHS = right->mightBeType(MIRType::Boolean) || right->mightBeType(MIRType::Int32);
+ if ((left->mightBeType(MIRType::Object) && simpleRHS) ||
+ (right->mightBeType(MIRType::Object) && simpleLHS))
+ {
+ return true;
+ }
+ }
+
+ MCompare* ins = MCompare::New(alloc(), left, right, op);
+ ins->setCompareType(MCompare::Compare_Bitwise);
+ ins->cacheOperandMightEmulateUndefined(constraints());
+
+ current->add(ins);
+ current->push(ins);
+
+ MOZ_ASSERT(!ins->isEffectful());
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::compareTrySpecializedOnBaselineInspector(bool* emitted, JSOp op, MDefinition* left,
+ MDefinition* right)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ // Try to specialize based on any baseline caches that have been generated
+ // for the opcode. These will cause the instruction's type policy to insert
+ // fallible unboxes to the appropriate input types.
+
+ // Strict equality isn't supported.
+ if (op == JSOP_STRICTEQ || op == JSOP_STRICTNE)
+ return true;
+
+ MCompare::CompareType type = inspector->expectedCompareType(pc);
+ if (type == MCompare::Compare_Unknown)
+ return true;
+
+ MCompare* ins = MCompare::New(alloc(), left, right, op);
+ ins->setCompareType(type);
+ ins->cacheOperandMightEmulateUndefined(constraints());
+
+ current->add(ins);
+ current->push(ins);
+
+ MOZ_ASSERT(!ins->isEffectful());
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::compareTrySharedStub(bool* emitted, JSOp op, MDefinition* left, MDefinition* right)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ // Try to emit a shared stub cache.
+
+ if (JitOptions.disableSharedStubs)
+ return true;
+
+ if (JSOp(*pc) == JSOP_CASE)
+ return true;
+
+ MBinarySharedStub* stub = MBinarySharedStub::New(alloc(), left, right);
+ current->add(stub);
+ current->push(stub);
+ if (!resumeAfter(stub))
+ return false;
+
+ MUnbox* unbox = MUnbox::New(alloc(), current->pop(), MIRType::Boolean, MUnbox::Infallible);
+ current->add(unbox);
+ current->push(unbox);
+
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::newArrayTryTemplateObject(bool* emitted, JSObject* templateObject, uint32_t length)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ if (!templateObject)
+ return true;
+
+ if (templateObject->is<UnboxedArrayObject>()) {
+ MOZ_ASSERT(templateObject->as<UnboxedArrayObject>().capacity() >= length);
+ if (!templateObject->as<UnboxedArrayObject>().hasInlineElements())
+ return true;
+ }
+
+ MOZ_ASSERT(length <= NativeObject::MAX_DENSE_ELEMENTS_COUNT);
+
+ size_t arraySlots =
+ gc::GetGCKindSlots(templateObject->asTenured().getAllocKind()) - ObjectElements::VALUES_PER_HEADER;
+
+ if (length > arraySlots)
+ return true;
+
+ // Emit fastpath.
+
+ gc::InitialHeap heap = templateObject->group()->initialHeap(constraints());
+ MConstant* templateConst = MConstant::NewConstraintlessObject(alloc(), templateObject);
+ current->add(templateConst);
+
+ MNewArray* ins = MNewArray::New(alloc(), constraints(), length, templateConst, heap, pc);
+ current->add(ins);
+ current->push(ins);
+
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::newArrayTrySharedStub(bool* emitted)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ // Try to emit a shared stub cache.
+
+ if (JitOptions.disableSharedStubs)
+ return true;
+
+ if (*pc != JSOP_NEWINIT && *pc != JSOP_NEWARRAY)
+ return true;
+
+ MInstruction* stub = MNullarySharedStub::New(alloc());
+ current->add(stub);
+ current->push(stub);
+
+ if (!resumeAfter(stub))
+ return false;
+
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::newArrayTryVM(bool* emitted, JSObject* templateObject, uint32_t length)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ // Emit a VM call.
+
+ gc::InitialHeap heap = gc::DefaultHeap;
+ MConstant* templateConst = MConstant::New(alloc(), NullValue());
+
+ if (templateObject) {
+ heap = templateObject->group()->initialHeap(constraints());
+ templateConst = MConstant::NewConstraintlessObject(alloc(), templateObject);
+ }
+
+ current->add(templateConst);
+
+ MNewArray* ins = MNewArray::NewVM(alloc(), constraints(), length, templateConst, heap, pc);
+ current->add(ins);
+ current->push(ins);
+
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::jsop_newarray(uint32_t length)
+{
+ JSObject* templateObject = inspector->getTemplateObject(pc);
+ if (!jsop_newarray(templateObject, length))
+ return false;
+
+ // Improve resulting typeset.
+ ObjectGroup* templateGroup = inspector->getTemplateObjectGroup(pc);
+ if (templateGroup) {
+ TemporaryTypeSet* types = MakeSingletonTypeSet(constraints(), templateGroup);
+ current->peek(-1)->setResultTypeSet(types);
+ }
+
+ return true;
+}
+
+bool
+IonBuilder::jsop_newarray(JSObject* templateObject, uint32_t length)
+{
+ bool emitted = false;
+
+ if (!forceInlineCaches()) {
+ if (!newArrayTryTemplateObject(&emitted, templateObject, length) || emitted)
+ return emitted;
+ }
+
+ if (!newArrayTrySharedStub(&emitted) || emitted)
+ return emitted;
+
+ if (!newArrayTryVM(&emitted, templateObject, length) || emitted)
+ return emitted;
+
+ MOZ_CRASH("newarray should have been emited");
+}
+
+bool
+IonBuilder::jsop_newarray_copyonwrite()
+{
+ ArrayObject* templateObject = ObjectGroup::getCopyOnWriteObject(script(), pc);
+
+ // The baseline compiler should have ensured the template object has a type
+ // with the copy on write flag set already. During the arguments usage
+ // analysis the baseline compiler hasn't run yet, however, though in this
+ // case the template object's type doesn't matter.
+ MOZ_ASSERT_IF(info().analysisMode() != Analysis_ArgumentsUsage,
+ templateObject->group()->hasAnyFlags(OBJECT_FLAG_COPY_ON_WRITE));
+
+ MNewArrayCopyOnWrite* ins =
+ MNewArrayCopyOnWrite::New(alloc(), constraints(), templateObject,
+ templateObject->group()->initialHeap(constraints()));
+
+ current->add(ins);
+ current->push(ins);
+
+ return true;
+}
+
+bool
+IonBuilder::newObjectTryTemplateObject(bool* emitted, JSObject* templateObject)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ if (!templateObject)
+ return true;
+
+ if (templateObject->is<PlainObject>() && templateObject->as<PlainObject>().hasDynamicSlots())
+ return true;
+
+ // Emit fastpath.
+
+ MNewObject::Mode mode;
+ if (JSOp(*pc) == JSOP_NEWOBJECT || JSOp(*pc) == JSOP_NEWINIT)
+ mode = MNewObject::ObjectLiteral;
+ else
+ mode = MNewObject::ObjectCreate;
+
+ gc::InitialHeap heap = templateObject->group()->initialHeap(constraints());
+ MConstant* templateConst = MConstant::NewConstraintlessObject(alloc(), templateObject);
+ current->add(templateConst);
+
+ MNewObject* ins = MNewObject::New(alloc(), constraints(), templateConst, heap, mode);
+ current->add(ins);
+ current->push(ins);
+
+ if (!resumeAfter(ins))
+ return false;
+
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::newObjectTrySharedStub(bool* emitted)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ // Try to emit a shared stub cache.
+
+ if (JitOptions.disableSharedStubs)
+ return true;
+
+ MInstruction* stub = MNullarySharedStub::New(alloc());
+ current->add(stub);
+ current->push(stub);
+
+ if (!resumeAfter(stub))
+ return false;
+
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::newObjectTryVM(bool* emitted, JSObject* templateObject)
+{
+ // Emit a VM call.
+ MOZ_ASSERT(JSOp(*pc) == JSOP_NEWOBJECT || JSOp(*pc) == JSOP_NEWINIT);
+
+ gc::InitialHeap heap = gc::DefaultHeap;
+ MConstant* templateConst = MConstant::New(alloc(), NullValue());
+
+ if (templateObject) {
+ heap = templateObject->group()->initialHeap(constraints());
+ templateConst = MConstant::NewConstraintlessObject(alloc(), templateObject);
+ }
+
+ current->add(templateConst);
+
+ MNewObject* ins = MNewObject::NewVM(alloc(), constraints(), templateConst, heap,
+ MNewObject::ObjectLiteral);
+ current->add(ins);
+ current->push(ins);
+
+ if (!resumeAfter(ins))
+ return false;
+
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::jsop_newobject()
+{
+ bool emitted = false;
+
+ JSObject* templateObject = inspector->getTemplateObject(pc);
+
+ if (!forceInlineCaches()) {
+ if (!newObjectTryTemplateObject(&emitted, templateObject) || emitted)
+ return emitted;
+ }
+ if (!newObjectTrySharedStub(&emitted) || emitted)
+ return emitted;
+
+ if (!newObjectTryVM(&emitted, templateObject) || emitted)
+ return emitted;
+
+ MOZ_CRASH("newobject should have been emited");
+}
+
+bool
+IonBuilder::jsop_initelem()
+{
+ MDefinition* value = current->pop();
+ MDefinition* id = current->pop();
+ MDefinition* obj = current->peek(-1);
+
+ MInitElem* initElem = MInitElem::New(alloc(), obj, id, value);
+ current->add(initElem);
+
+ return resumeAfter(initElem);
+}
+
+bool
+IonBuilder::jsop_initelem_array()
+{
+ MDefinition* value = current->pop();
+ MDefinition* obj = current->peek(-1);
+
+ // Make sure that arrays have the type being written to them by the
+ // intializer, and that arrays are marked as non-packed when writing holes
+ // to them during initialization.
+ bool needStub = false;
+ JSValueType unboxedType = JSVAL_TYPE_MAGIC;
+ if (shouldAbortOnPreliminaryGroups(obj)) {
+ needStub = true;
+ } else if (!obj->resultTypeSet() ||
+ obj->resultTypeSet()->unknownObject() ||
+ obj->resultTypeSet()->getObjectCount() != 1)
+ {
+ needStub = true;
+ } else {
+ MOZ_ASSERT(obj->resultTypeSet()->getObjectCount() == 1);
+ TypeSet::ObjectKey* initializer = obj->resultTypeSet()->getObject(0);
+ if (initializer->clasp() == &UnboxedArrayObject::class_) {
+ if (initializer->group()->unboxedLayout().nativeGroup())
+ needStub = true;
+ else
+ unboxedType = initializer->group()->unboxedLayout().elementType();
+ }
+ if (value->type() == MIRType::MagicHole) {
+ if (!initializer->hasFlags(constraints(), OBJECT_FLAG_NON_PACKED))
+ needStub = true;
+ } else if (!initializer->unknownProperties()) {
+ HeapTypeSetKey elemTypes = initializer->property(JSID_VOID);
+ if (!TypeSetIncludes(elemTypes.maybeTypes(), value->type(), value->resultTypeSet())) {
+ elemTypes.freeze(constraints());
+ needStub = true;
+ }
+ }
+ }
+
+ uint32_t index = GET_UINT32(pc);
+ if (needStub) {
+ MCallInitElementArray* store = MCallInitElementArray::New(alloc(), obj, index, value);
+ current->add(store);
+ return resumeAfter(store);
+ }
+
+ return initializeArrayElement(obj, index, value, unboxedType, /* addResumePoint = */ true);
+}
+
+bool
+IonBuilder::initializeArrayElement(MDefinition* obj, size_t index, MDefinition* value,
+ JSValueType unboxedType,
+ bool addResumePointAndIncrementInitializedLength)
+{
+ MConstant* id = MConstant::New(alloc(), Int32Value(index));
+ current->add(id);
+
+ // Get the elements vector.
+ MElements* elements = MElements::New(alloc(), obj, unboxedType != JSVAL_TYPE_MAGIC);
+ current->add(elements);
+
+ if (unboxedType != JSVAL_TYPE_MAGIC) {
+ // Note: storeUnboxedValue takes care of any post barriers on the value.
+ storeUnboxedValue(obj, elements, 0, id, unboxedType, value, /* preBarrier = */ false);
+
+ if (addResumePointAndIncrementInitializedLength) {
+ MInstruction* increment = MIncrementUnboxedArrayInitializedLength::New(alloc(), obj);
+ current->add(increment);
+
+ if (!resumeAfter(increment))
+ return false;
+ }
+ } else {
+ if (NeedsPostBarrier(value))
+ current->add(MPostWriteBarrier::New(alloc(), obj, value));
+
+ if ((obj->isNewArray() && obj->toNewArray()->convertDoubleElements()) ||
+ (obj->isNullarySharedStub() &&
+ obj->resultTypeSet()->convertDoubleElements(constraints()) == TemporaryTypeSet::AlwaysConvertToDoubles))
+ {
+ MInstruction* valueDouble = MToDouble::New(alloc(), value);
+ current->add(valueDouble);
+ value = valueDouble;
+ }
+
+ // Store the value.
+ MStoreElement* store = MStoreElement::New(alloc(), elements, id, value,
+ /* needsHoleCheck = */ false);
+ current->add(store);
+
+ if (addResumePointAndIncrementInitializedLength) {
+ // Update the initialized length. (The template object for this
+ // array has the array's ultimate length, so the length field is
+ // already correct: no updating needed.)
+ MSetInitializedLength* initLength = MSetInitializedLength::New(alloc(), elements, id);
+ current->add(initLength);
+
+ if (!resumeAfter(initLength))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool
+IonBuilder::jsop_mutateproto()
+{
+ MDefinition* value = current->pop();
+ MDefinition* obj = current->peek(-1);
+
+ MMutateProto* mutate = MMutateProto::New(alloc(), obj, value);
+ current->add(mutate);
+ return resumeAfter(mutate);
+}
+
+bool
+IonBuilder::jsop_initprop(PropertyName* name)
+{
+ bool useSlowPath = false;
+
+ MDefinition* value = current->peek(-1);
+ MDefinition* obj = current->peek(-2);
+ if (obj->isLambda()) {
+ useSlowPath = true;
+ } else if (obj->isNewObject()) {
+ if (JSObject* templateObject = obj->toNewObject()->templateObject()) {
+ if (templateObject->is<PlainObject>()) {
+ if (!templateObject->as<PlainObject>().containsPure(name))
+ useSlowPath = true;
+ } else {
+ MOZ_ASSERT(templateObject->as<UnboxedPlainObject>().layout().lookup(name));
+ }
+ } else {
+ useSlowPath = true;
+ }
+ } else {
+ MOZ_ASSERT(obj->isNullarySharedStub());
+ useSlowPath = true;
+ }
+
+ if (useSlowPath) {
+ current->pop();
+ MInitProp* init = MInitProp::New(alloc(), obj, name, value);
+ current->add(init);
+ return resumeAfter(init);
+ }
+
+ MInstruction* last = *current->rbegin();
+
+ // This is definitely initializing an 'own' property of the object, treat
+ // it as an assignment.
+ if (!jsop_setprop(name))
+ return false;
+
+ // SETPROP pushed the value, instead of the object. Fix this on the stack,
+ // and check the most recent resume point to see if it needs updating too.
+ current->pop();
+ current->push(obj);
+ for (MInstructionReverseIterator riter = current->rbegin(); *riter != last; riter++) {
+ if (MResumePoint* resumePoint = riter->resumePoint()) {
+ MOZ_ASSERT(resumePoint->pc() == pc);
+ if (resumePoint->mode() == MResumePoint::ResumeAfter) {
+ size_t index = resumePoint->numOperands() - 1;
+ resumePoint->replaceOperand(index, obj);
+ }
+ break;
+ }
+ }
+
+ return true;
+}
+
+bool
+IonBuilder::jsop_initprop_getter_setter(PropertyName* name)
+{
+ MDefinition* value = current->pop();
+ MDefinition* obj = current->peek(-1);
+
+ MInitPropGetterSetter* init = MInitPropGetterSetter::New(alloc(), obj, name, value);
+ current->add(init);
+ return resumeAfter(init);
+}
+
+bool
+IonBuilder::jsop_initelem_getter_setter()
+{
+ MDefinition* value = current->pop();
+ MDefinition* id = current->pop();
+ MDefinition* obj = current->peek(-1);
+
+ MInitElemGetterSetter* init = MInitElemGetterSetter::New(alloc(), obj, id, value);
+ current->add(init);
+ return resumeAfter(init);
+}
+
+MBasicBlock*
+IonBuilder::addBlock(MBasicBlock* block, uint32_t loopDepth)
+{
+ if (!block)
+ return nullptr;
+ if (block->pc() && script()->hasScriptCounts())
+ block->setHitCount(script()->getHitCount(block->pc()));
+ graph().addBlock(block);
+ block->setLoopDepth(loopDepth);
+ return block;
+}
+
+MBasicBlock*
+IonBuilder::newBlock(MBasicBlock* predecessor, jsbytecode* pc)
+{
+ MBasicBlock* block = MBasicBlock::New(graph(), &analysis(), info(), predecessor,
+ bytecodeSite(pc), MBasicBlock::NORMAL);
+ return addBlock(block, loopDepth_);
+}
+
+MBasicBlock*
+IonBuilder::newBlock(MBasicBlock* predecessor, jsbytecode* pc, MResumePoint* priorResumePoint)
+{
+ MBasicBlock* block = MBasicBlock::NewWithResumePoint(graph(), info(), predecessor,
+ bytecodeSite(pc), priorResumePoint);
+ return addBlock(block, loopDepth_);
+}
+
+MBasicBlock*
+IonBuilder::newBlockPopN(MBasicBlock* predecessor, jsbytecode* pc, uint32_t popped)
+{
+ MBasicBlock* block = MBasicBlock::NewPopN(graph(), info(), predecessor, bytecodeSite(pc),
+ MBasicBlock::NORMAL, popped);
+ return addBlock(block, loopDepth_);
+}
+
+MBasicBlock*
+IonBuilder::newBlockAfter(MBasicBlock* at, MBasicBlock* predecessor, jsbytecode* pc)
+{
+ MBasicBlock* block = MBasicBlock::New(graph(), &analysis(), info(), predecessor,
+ bytecodeSite(pc), MBasicBlock::NORMAL);
+ if (!block)
+ return nullptr;
+ block->setHitCount(0); // osr block
+ graph().insertBlockAfter(at, block);
+ return block;
+}
+
+MBasicBlock*
+IonBuilder::newBlock(MBasicBlock* predecessor, jsbytecode* pc, uint32_t loopDepth)
+{
+ MBasicBlock* block = MBasicBlock::New(graph(), &analysis(), info(), predecessor,
+ bytecodeSite(pc), MBasicBlock::NORMAL);
+ return addBlock(block, loopDepth);
+}
+
+MBasicBlock*
+IonBuilder::newOsrPreheader(MBasicBlock* predecessor, jsbytecode* loopEntry, jsbytecode* beforeLoopEntry)
+{
+ MOZ_ASSERT(LoopEntryCanIonOsr(loopEntry));
+ MOZ_ASSERT(loopEntry == info().osrPc());
+
+ // Create two blocks: one for the OSR entry with no predecessors, one for
+ // the preheader, which has the OSR entry block as a predecessor. The
+ // OSR block is always the second block (with id 1).
+ MBasicBlock* osrBlock = newBlockAfter(*graph().begin(), loopEntry);
+ MBasicBlock* preheader = newBlock(predecessor, loopEntry);
+ if (!osrBlock || !preheader)
+ return nullptr;
+
+ // Give the pre-header the same hit count as the code before the loop.
+ if (script()->hasScriptCounts())
+ preheader->setHitCount(script()->getHitCount(beforeLoopEntry));
+
+ MOsrEntry* entry = MOsrEntry::New(alloc());
+ osrBlock->add(entry);
+
+ // Initialize |envChain|.
+ {
+ uint32_t slot = info().environmentChainSlot();
+
+ MInstruction* envv;
+ if (analysis().usesEnvironmentChain()) {
+ envv = MOsrEnvironmentChain::New(alloc(), entry);
+ } else {
+ // Use an undefined value if the script does not need its env
+ // chain, to match the type that is already being tracked for the
+ // slot.
+ envv = MConstant::New(alloc(), UndefinedValue());
+ }
+
+ osrBlock->add(envv);
+ osrBlock->initSlot(slot, envv);
+ }
+ // Initialize |return value|
+ {
+ MInstruction* returnValue;
+ if (!script()->noScriptRval())
+ returnValue = MOsrReturnValue::New(alloc(), entry);
+ else
+ returnValue = MConstant::New(alloc(), UndefinedValue());
+ osrBlock->add(returnValue);
+ osrBlock->initSlot(info().returnValueSlot(), returnValue);
+ }
+
+ // Initialize arguments object.
+ bool needsArgsObj = info().needsArgsObj();
+ MInstruction* argsObj = nullptr;
+ if (info().hasArguments()) {
+ if (needsArgsObj)
+ argsObj = MOsrArgumentsObject::New(alloc(), entry);
+ else
+ argsObj = MConstant::New(alloc(), UndefinedValue());
+ osrBlock->add(argsObj);
+ osrBlock->initSlot(info().argsObjSlot(), argsObj);
+ }
+
+ if (info().funMaybeLazy()) {
+ // Initialize |this| parameter.
+ MParameter* thisv = MParameter::New(alloc(), MParameter::THIS_SLOT, nullptr);
+ osrBlock->add(thisv);
+ osrBlock->initSlot(info().thisSlot(), thisv);
+
+ // Initialize arguments.
+ for (uint32_t i = 0; i < info().nargs(); i++) {
+ uint32_t slot = needsArgsObj ? info().argSlotUnchecked(i) : info().argSlot(i);
+
+ // Only grab arguments from the arguments object if the arguments object
+ // aliases formals. If the argsobj does not alias formals, then the
+ // formals may have been assigned to during interpretation, and that change
+ // will not be reflected in the argsobj.
+ if (needsArgsObj && info().argsObjAliasesFormals()) {
+ MOZ_ASSERT(argsObj && argsObj->isOsrArgumentsObject());
+ // If this is an aliased formal, then the arguments object
+ // contains a hole at this index. Any references to this
+ // variable in the jitcode will come from JSOP_*ALIASEDVAR
+ // opcodes, so the slot itself can be set to undefined. If
+ // it's not aliased, it must be retrieved from the arguments
+ // object.
+ MInstruction* osrv;
+ if (script()->formalIsAliased(i))
+ osrv = MConstant::New(alloc(), UndefinedValue());
+ else
+ osrv = MGetArgumentsObjectArg::New(alloc(), argsObj, i);
+
+ osrBlock->add(osrv);
+ osrBlock->initSlot(slot, osrv);
+ } else {
+ MParameter* arg = MParameter::New(alloc(), i, nullptr);
+ osrBlock->add(arg);
+ osrBlock->initSlot(slot, arg);
+ }
+ }
+ }
+
+ // Initialize locals.
+ for (uint32_t i = 0; i < info().nlocals(); i++) {
+ uint32_t slot = info().localSlot(i);
+ ptrdiff_t offset = BaselineFrame::reverseOffsetOfLocal(i);
+
+ MOsrValue* osrv = MOsrValue::New(alloc().fallible(), entry, offset);
+ if (!osrv)
+ return nullptr;
+ osrBlock->add(osrv);
+ osrBlock->initSlot(slot, osrv);
+ }
+
+ // Initialize stack.
+ uint32_t numStackSlots = preheader->stackDepth() - info().firstStackSlot();
+ for (uint32_t i = 0; i < numStackSlots; i++) {
+ uint32_t slot = info().stackSlot(i);
+ ptrdiff_t offset = BaselineFrame::reverseOffsetOfLocal(info().nlocals() + i);
+
+ MOsrValue* osrv = MOsrValue::New(alloc().fallible(), entry, offset);
+ if (!osrv)
+ return nullptr;
+ osrBlock->add(osrv);
+ osrBlock->initSlot(slot, osrv);
+ }
+
+ // Create an MStart to hold the first valid MResumePoint.
+ MStart* start = MStart::New(alloc());
+ osrBlock->add(start);
+
+ // MOsrValue instructions are infallible, so the first MResumePoint must
+ // occur after they execute, at the point of the MStart.
+ if (!resumeAt(start, loopEntry))
+ return nullptr;
+
+ // Link the same MResumePoint from the MStart to each MOsrValue.
+ // This causes logic in ShouldSpecializeInput() to not replace Uses with
+ // Unboxes in the MResumePiont, so that the MStart always sees Values.
+ if (!osrBlock->linkOsrValues(start))
+ return nullptr;
+
+ // Clone types of the other predecessor of the pre-header to the osr block,
+ // such as pre-header phi's won't discard specialized type of the
+ // predecessor.
+ MOZ_ASSERT(predecessor->stackDepth() == osrBlock->stackDepth());
+ MOZ_ASSERT(info().environmentChainSlot() == 0);
+
+ // Treat the OSR values as having the same type as the existing values
+ // coming in to the loop. These will be fixed up with appropriate
+ // unboxing and type barriers in finishLoop, once the possible types
+ // at the loop header are known.
+ for (uint32_t i = info().startArgSlot(); i < osrBlock->stackDepth(); i++) {
+ MDefinition* existing = current->getSlot(i);
+ MDefinition* def = osrBlock->getSlot(i);
+ MOZ_ASSERT_IF(!needsArgsObj || !info().isSlotAliased(i), def->type() == MIRType::Value);
+
+ // Aliased slots are never accessed, since they need to go through
+ // the callobject. No need to type them here.
+ if (info().isSlotAliased(i))
+ continue;
+
+ def->setResultType(existing->type());
+ def->setResultTypeSet(existing->resultTypeSet());
+ }
+
+ // Finish the osrBlock.
+ osrBlock->end(MGoto::New(alloc(), preheader));
+ if (!preheader->addPredecessor(alloc(), osrBlock))
+ return nullptr;
+ graph().setOsrBlock(osrBlock);
+
+ return preheader;
+}
+
+MBasicBlock*
+IonBuilder::newPendingLoopHeader(MBasicBlock* predecessor, jsbytecode* pc, bool osr, bool canOsr,
+ unsigned stackPhiCount)
+{
+ loopDepth_++;
+ // If this site can OSR, all values on the expression stack are part of the loop.
+ if (canOsr)
+ stackPhiCount = predecessor->stackDepth() - info().firstStackSlot();
+ MBasicBlock* block = MBasicBlock::NewPendingLoopHeader(graph(), info(), predecessor,
+ bytecodeSite(pc), stackPhiCount);
+ if (!addBlock(block, loopDepth_))
+ return nullptr;
+
+ if (osr) {
+ // Incorporate type information from the OSR frame into the loop
+ // header. The OSR frame may have unexpected types due to type changes
+ // within the loop body or due to incomplete profiling information,
+ // in which case this may avoid restarts of loop analysis or bailouts
+ // during the OSR itself.
+
+ MOZ_ASSERT(info().firstLocalSlot() - info().firstArgSlot() ==
+ baselineFrame_->argTypes.length());
+ MOZ_ASSERT(block->stackDepth() - info().firstLocalSlot() ==
+ baselineFrame_->varTypes.length());
+
+ // Unbox the MOsrValue if it is known to be unboxable.
+ for (uint32_t i = info().startArgSlot(); i < block->stackDepth(); i++) {
+
+ // The value of aliased args and slots are in the callobject. So we can't
+ // the value from the baseline frame.
+ if (info().isSlotAliased(i))
+ continue;
+
+ MPhi* phi = block->getSlot(i)->toPhi();
+
+ // Get the type from the baseline frame.
+ TypeSet::Type existingType = TypeSet::UndefinedType();
+ uint32_t arg = i - info().firstArgSlot();
+ uint32_t var = i - info().firstLocalSlot();
+ if (info().funMaybeLazy() && i == info().thisSlot())
+ existingType = baselineFrame_->thisType;
+ else if (arg < info().nargs())
+ existingType = baselineFrame_->argTypes[arg];
+ else
+ existingType = baselineFrame_->varTypes[var];
+
+ if (existingType.isSingletonUnchecked())
+ checkNurseryObject(existingType.singleton());
+
+ // Extract typeset from value.
+ LifoAlloc* lifoAlloc = alloc().lifoAlloc();
+ TemporaryTypeSet* typeSet =
+ lifoAlloc->new_<TemporaryTypeSet>(lifoAlloc, existingType);
+ if (!typeSet)
+ return nullptr;
+ MIRType type = typeSet->getKnownMIRType();
+ if (!phi->addBackedgeType(alloc(), type, typeSet))
+ return nullptr;
+ }
+ }
+
+ return block;
+}
+
+MTest*
+IonBuilder::newTest(MDefinition* ins, MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+{
+ MTest* test = MTest::New(alloc(), ins, ifTrue, ifFalse);
+ test->cacheOperandMightEmulateUndefined(constraints());
+ return test;
+}
+
+// A resume point is a mapping of stack slots to MDefinitions. It is used to
+// capture the environment such that if a guard fails, and IonMonkey needs
+// to exit back to the interpreter, the interpreter state can be
+// reconstructed.
+//
+// We capture stack state at critical points:
+// * (1) At the beginning of every basic block.
+// * (2) After every effectful operation.
+//
+// As long as these two properties are maintained, instructions can
+// be moved, hoisted, or, eliminated without problems, and ops without side
+// effects do not need to worry about capturing state at precisely the
+// right point in time.
+//
+// Effectful instructions, of course, need to capture state after completion,
+// where the interpreter will not attempt to repeat the operation. For this,
+// ResumeAfter must be used. The state is attached directly to the effectful
+// instruction to ensure that no intermediate instructions could be injected
+// in between by a future analysis pass.
+//
+// During LIR construction, if an instruction can bail back to the interpreter,
+// we create an LSnapshot, which uses the last known resume point to request
+// register/stack assignments for every live value.
+bool
+IonBuilder::resume(MInstruction* ins, jsbytecode* pc, MResumePoint::Mode mode)
+{
+ MOZ_ASSERT(ins->isEffectful() || !ins->isMovable());
+
+ MResumePoint* resumePoint = MResumePoint::New(alloc(), ins->block(), pc,
+ mode);
+ if (!resumePoint) {
+ abortReason_ = AbortReason_Alloc;
+ return false;
+ }
+ ins->setResumePoint(resumePoint);
+ return true;
+}
+
+bool
+IonBuilder::resumeAt(MInstruction* ins, jsbytecode* pc)
+{
+ return resume(ins, pc, MResumePoint::ResumeAt);
+}
+
+bool
+IonBuilder::resumeAfter(MInstruction* ins)
+{
+ return resume(ins, pc, MResumePoint::ResumeAfter);
+}
+
+bool
+IonBuilder::maybeInsertResume()
+{
+ // Create a resume point at the current position, without an existing
+ // effectful instruction. This resume point is not necessary for correct
+ // behavior (see above), but is added to avoid holding any values from the
+ // previous resume point which are now dead. This shortens the live ranges
+ // of such values and improves register allocation.
+ //
+ // This optimization is not performed outside of loop bodies, where good
+ // register allocation is not as critical, in order to avoid creating
+ // excessive resume points.
+
+ if (loopDepth_ == 0)
+ return true;
+
+ MNop* ins = MNop::New(alloc());
+ current->add(ins);
+
+ return resumeAfter(ins);
+}
+
+void
+IonBuilder::maybeMarkEmpty(MDefinition* ins)
+{
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+
+ // When one of the operands has no type information, mark the output
+ // as having no possible types too. This is to avoid degrading
+ // subsequent analysis.
+ for (size_t i = 0; i < ins->numOperands(); i++) {
+ if (!ins->emptyResultTypeSet())
+ continue;
+
+ TemporaryTypeSet* types = alloc().lifoAlloc()->new_<TemporaryTypeSet>();
+ if (types)
+ ins->setResultTypeSet(types);
+ }
+}
+
+// Return whether property lookups can be performed effectlessly on clasp.
+static bool
+ClassHasEffectlessLookup(const Class* clasp)
+{
+ return (clasp == &UnboxedPlainObject::class_) ||
+ (clasp == &UnboxedArrayObject::class_) ||
+ IsTypedObjectClass(clasp) ||
+ (clasp->isNative() && !clasp->getOpsLookupProperty());
+}
+
+// Return whether an object might have a property for name which is not
+// accounted for by type information.
+static bool
+ObjectHasExtraOwnProperty(CompileCompartment* comp, TypeSet::ObjectKey* object, jsid id)
+{
+ // Some typed object properties are not reflected in type information.
+ if (object->isGroup() && object->group()->maybeTypeDescr())
+ return object->group()->typeDescr().hasProperty(comp->runtime()->names(), id);
+
+ const Class* clasp = object->clasp();
+
+ // Array |length| properties are not reflected in type information.
+ if (clasp == &ArrayObject::class_)
+ return JSID_IS_ATOM(id, comp->runtime()->names().length);
+
+ // Resolve hooks can install new properties on objects on demand.
+ JSObject* singleton = object->isSingleton() ? object->singleton() : nullptr;
+ return ClassMayResolveId(comp->runtime()->names(), clasp, id, singleton);
+}
+
+void
+IonBuilder::insertRecompileCheck()
+{
+ // No need for recompile checks if this is the highest optimization level.
+ OptimizationLevel curLevel = optimizationInfo().level();
+ if (IonOptimizations.isLastLevel(curLevel))
+ return;
+
+ // Add recompile check.
+
+ // Get the topmost builder. The topmost script will get recompiled when
+ // warm-up counter is high enough to justify a higher optimization level.
+ IonBuilder* topBuilder = outermostBuilder();
+
+ // Add recompile check to recompile when the warm-up count reaches the
+ // threshold of the next optimization level.
+ OptimizationLevel nextLevel = IonOptimizations.nextLevel(curLevel);
+ const OptimizationInfo* info = IonOptimizations.get(nextLevel);
+ uint32_t warmUpThreshold = info->compilerWarmUpThreshold(topBuilder->script());
+ MRecompileCheck* check = MRecompileCheck::New(alloc(), topBuilder->script(), warmUpThreshold,
+ MRecompileCheck::RecompileCheck_OptimizationLevel);
+ current->add(check);
+}
+
+JSObject*
+IonBuilder::testSingletonProperty(JSObject* obj, jsid id)
+{
+ // We would like to completely no-op property/global accesses which can
+ // produce only a particular JSObject. When indicating the access result is
+ // definitely an object, type inference does not account for the
+ // possibility that the property is entirely missing from the input object
+ // and its prototypes (if this happens, a semantic trigger would be hit and
+ // the pushed types updated, even if there is no type barrier).
+ //
+ // If the access definitely goes through obj, either directly or on the
+ // prototype chain, and the object has singleton type, then the type
+ // information for that property reflects the value that will definitely be
+ // read on accesses to the object. If the property is later deleted or
+ // reconfigured as a getter/setter then the type information for the
+ // property will change and trigger invalidation.
+
+ while (obj) {
+ if (!ClassHasEffectlessLookup(obj->getClass()))
+ return nullptr;
+
+ TypeSet::ObjectKey* objKey = TypeSet::ObjectKey::get(obj);
+ if (analysisContext)
+ objKey->ensureTrackedProperty(analysisContext, id);
+
+ if (objKey->unknownProperties())
+ return nullptr;
+
+ HeapTypeSetKey property = objKey->property(id);
+ if (property.isOwnProperty(constraints())) {
+ if (obj->isSingleton())
+ return property.singleton(constraints());
+ return nullptr;
+ }
+
+ if (ObjectHasExtraOwnProperty(compartment, objKey, id))
+ return nullptr;
+
+ obj = checkNurseryObject(obj->staticPrototype());
+ }
+
+ return nullptr;
+}
+
+JSObject*
+IonBuilder::testSingletonPropertyTypes(MDefinition* obj, jsid id)
+{
+ // As for TestSingletonProperty, but the input is any value in a type set
+ // rather than a specific object.
+
+ TemporaryTypeSet* types = obj->resultTypeSet();
+ if (types && types->unknownObject())
+ return nullptr;
+
+ JSObject* objectSingleton = types ? types->maybeSingleton() : nullptr;
+ if (objectSingleton)
+ return testSingletonProperty(objectSingleton, id);
+
+ MIRType objType = obj->type();
+ if (objType == MIRType::Value && types)
+ objType = types->getKnownMIRType();
+
+ JSProtoKey key;
+ switch (objType) {
+ case MIRType::String:
+ key = JSProto_String;
+ break;
+
+ case MIRType::Symbol:
+ key = JSProto_Symbol;
+ break;
+
+ case MIRType::Int32:
+ case MIRType::Double:
+ key = JSProto_Number;
+ break;
+
+ case MIRType::Boolean:
+ key = JSProto_Boolean;
+ break;
+
+ case MIRType::Object: {
+ if (!types)
+ return nullptr;
+
+ // For property accesses which may be on many objects, we just need to
+ // find a prototype common to all the objects; if that prototype
+ // has the singleton property, the access will not be on a missing property.
+ JSObject* singleton = nullptr;
+ for (unsigned i = 0; i < types->getObjectCount(); i++) {
+ TypeSet::ObjectKey* key = types->getObject(i);
+ if (!key)
+ continue;
+ if (analysisContext)
+ key->ensureTrackedProperty(analysisContext, id);
+
+ const Class* clasp = key->clasp();
+ if (!ClassHasEffectlessLookup(clasp) || ObjectHasExtraOwnProperty(compartment, key, id))
+ return nullptr;
+ if (key->unknownProperties())
+ return nullptr;
+ HeapTypeSetKey property = key->property(id);
+ if (property.isOwnProperty(constraints()))
+ return nullptr;
+
+ if (JSObject* proto = checkNurseryObject(key->proto().toObjectOrNull())) {
+ // Test this type.
+ JSObject* thisSingleton = testSingletonProperty(proto, id);
+ if (!thisSingleton)
+ return nullptr;
+ if (singleton) {
+ if (thisSingleton != singleton)
+ return nullptr;
+ } else {
+ singleton = thisSingleton;
+ }
+ } else {
+ // Can't be on the prototype chain with no prototypes...
+ return nullptr;
+ }
+ }
+ return singleton;
+ }
+ default:
+ return nullptr;
+ }
+
+ JSObject* proto = GetBuiltinPrototypePure(&script()->global(), key);
+ if (proto)
+ return testSingletonProperty(proto, id);
+
+ return nullptr;
+}
+
+ResultWithOOM<bool>
+IonBuilder::testNotDefinedProperty(MDefinition* obj, jsid id)
+{
+ TemporaryTypeSet* types = obj->resultTypeSet();
+ if (!types || types->unknownObject() || types->getKnownMIRType() != MIRType::Object)
+ return ResultWithOOM<bool>::ok(false);
+
+ for (unsigned i = 0, count = types->getObjectCount(); i < count; i++) {
+ TypeSet::ObjectKey* key = types->getObject(i);
+ if (!key)
+ continue;
+
+ while (true) {
+ if (!alloc().ensureBallast())
+ return ResultWithOOM<bool>::fail();
+
+ if (!key->hasStableClassAndProto(constraints()) || key->unknownProperties())
+ return ResultWithOOM<bool>::ok(false);
+
+ const Class* clasp = key->clasp();
+ if (!ClassHasEffectlessLookup(clasp) || ObjectHasExtraOwnProperty(compartment, key, id))
+ return ResultWithOOM<bool>::ok(false);
+
+ // If the object is a singleton, we can do a lookup now to avoid
+ // unnecessary invalidations later on, in case the property types
+ // have not yet been instantiated.
+ if (key->isSingleton() &&
+ key->singleton()->is<NativeObject>() &&
+ key->singleton()->as<NativeObject>().lookupPure(id))
+ {
+ return ResultWithOOM<bool>::ok(false);
+ }
+
+ HeapTypeSetKey property = key->property(id);
+ if (property.isOwnProperty(constraints()))
+ return ResultWithOOM<bool>::ok(false);
+
+ JSObject* proto = checkNurseryObject(key->proto().toObjectOrNull());
+ if (!proto)
+ break;
+ key = TypeSet::ObjectKey::get(proto);
+ }
+ }
+
+ return ResultWithOOM<bool>::ok(true);
+}
+
+bool
+IonBuilder::pushTypeBarrier(MDefinition* def, TemporaryTypeSet* observed, BarrierKind kind)
+{
+ MOZ_ASSERT(def == current->peek(-1));
+
+ MDefinition* replace = addTypeBarrier(current->pop(), observed, kind);
+ if (!replace)
+ return false;
+
+ current->push(replace);
+ return true;
+}
+
+// Given an observed type set, annotates the IR as much as possible:
+// (1) If no type information is provided, the given value is returned.
+// (2) If a single type definitely exists, and no type barrier is needed,
+// then an infallible unbox instruction is returned.
+// (3) If a type barrier is needed, but has an unknown type set, the given
+// value is returned.
+// (4) Lastly, a type barrier instruction is added and returned.
+MDefinition*
+IonBuilder::addTypeBarrier(MDefinition* def, TemporaryTypeSet* observed, BarrierKind kind,
+ MTypeBarrier** pbarrier)
+{
+ // Barriers are never needed for instructions whose result will not be used.
+ if (BytecodeIsPopped(pc))
+ return def;
+
+ // If the instruction has no side effects, we'll resume the entire operation.
+ // The actual type barrier will occur in the interpreter. If the
+ // instruction is effectful, even if it has a singleton type, there
+ // must be a resume point capturing the original def, and resuming
+ // to that point will explicitly monitor the new type.
+ if (kind == BarrierKind::NoBarrier) {
+ MDefinition* replace = ensureDefiniteType(def, observed->getKnownMIRType());
+ replace->setResultTypeSet(observed);
+ return replace;
+ }
+
+ if (observed->unknown())
+ return def;
+
+ MTypeBarrier* barrier = MTypeBarrier::New(alloc(), def, observed, kind);
+ current->add(barrier);
+
+ if (pbarrier)
+ *pbarrier = barrier;
+
+ if (barrier->type() == MIRType::Undefined)
+ return constant(UndefinedValue());
+ if (barrier->type() == MIRType::Null)
+ return constant(NullValue());
+
+ return barrier;
+}
+
+bool
+IonBuilder::pushDOMTypeBarrier(MInstruction* ins, TemporaryTypeSet* observed, JSFunction* func)
+{
+ MOZ_ASSERT(func && func->isNative() && func->jitInfo());
+
+ const JSJitInfo* jitinfo = func->jitInfo();
+ bool barrier = DOMCallNeedsBarrier(jitinfo, observed);
+ // Need to be a bit careful: if jitinfo->returnType is JSVAL_TYPE_DOUBLE but
+ // types->getKnownMIRType() is MIRType::Int32, then don't unconditionally
+ // unbox as a double. Instead, go ahead and barrier on having an int type,
+ // since we know we need a barrier anyway due to the type mismatch. This is
+ // the only situation in which TI actually has more information about the
+ // JSValueType than codegen can, short of jitinfo->returnType just being
+ // JSVAL_TYPE_UNKNOWN.
+ MDefinition* replace = ins;
+ if (jitinfo->returnType() != JSVAL_TYPE_DOUBLE ||
+ observed->getKnownMIRType() != MIRType::Int32) {
+ replace = ensureDefiniteType(ins, MIRTypeFromValueType(jitinfo->returnType()));
+ if (replace != ins) {
+ current->pop();
+ current->push(replace);
+ }
+ } else {
+ MOZ_ASSERT(barrier);
+ }
+
+ return pushTypeBarrier(replace, observed,
+ barrier ? BarrierKind::TypeSet : BarrierKind::NoBarrier);
+}
+
+MDefinition*
+IonBuilder::ensureDefiniteType(MDefinition* def, MIRType definiteType)
+{
+ MInstruction* replace;
+ switch (definiteType) {
+ case MIRType::Undefined:
+ def->setImplicitlyUsedUnchecked();
+ replace = MConstant::New(alloc(), UndefinedValue());
+ break;
+
+ case MIRType::Null:
+ def->setImplicitlyUsedUnchecked();
+ replace = MConstant::New(alloc(), NullValue());
+ break;
+
+ case MIRType::Value:
+ return def;
+
+ default: {
+ if (def->type() != MIRType::Value) {
+ if (def->type() == MIRType::Int32 && definiteType == MIRType::Double) {
+ replace = MToDouble::New(alloc(), def);
+ break;
+ }
+ MOZ_ASSERT(def->type() == definiteType);
+ return def;
+ }
+ replace = MUnbox::New(alloc(), def, definiteType, MUnbox::Infallible);
+ break;
+ }
+ }
+
+ current->add(replace);
+ return replace;
+}
+
+MDefinition*
+IonBuilder::ensureDefiniteTypeSet(MDefinition* def, TemporaryTypeSet* types)
+{
+ // We cannot arbitrarily add a typeset to a definition. It can be shared
+ // in another path. So we always need to create a new MIR.
+
+ // Use ensureDefiniteType to do unboxing. If that happened the type can
+ // be added on the newly created unbox operation.
+ MDefinition* replace = ensureDefiniteType(def, types->getKnownMIRType());
+ if (replace != def) {
+ replace->setResultTypeSet(types);
+ return replace;
+ }
+
+ // Don't replace if input type is more accurate than given typeset.
+ if (def->type() != types->getKnownMIRType()) {
+ MOZ_ASSERT(types->getKnownMIRType() == MIRType::Value);
+ return def;
+ }
+
+ // Create a NOP mir instruction to filter the typeset.
+ MFilterTypeSet* filter = MFilterTypeSet::New(alloc(), def, types);
+ current->add(filter);
+ return filter;
+}
+
+static size_t
+NumFixedSlots(JSObject* object)
+{
+ // Note: we can't use object->numFixedSlots() here, as this will read the
+ // shape and can race with the main thread if we are building off thread.
+ // The allocation kind and object class (which goes through the type) can
+ // be read freely, however.
+ gc::AllocKind kind = object->asTenured().getAllocKind();
+ return gc::GetGCKindSlots(kind, object->getClass());
+}
+
+static bool
+IsUninitializedGlobalLexicalSlot(JSObject* obj, PropertyName* name)
+{
+ LexicalEnvironmentObject &globalLexical = obj->as<LexicalEnvironmentObject>();
+ MOZ_ASSERT(globalLexical.isGlobal());
+ Shape* shape = globalLexical.lookupPure(name);
+ if (!shape)
+ return false;
+ return globalLexical.getSlot(shape->slot()).isMagic(JS_UNINITIALIZED_LEXICAL);
+}
+
+bool
+IonBuilder::getStaticName(JSObject* staticObject, PropertyName* name, bool* psucceeded,
+ MDefinition* lexicalCheck)
+{
+ MOZ_ASSERT(*psucceeded == false);
+
+ jsid id = NameToId(name);
+
+ bool isGlobalLexical = staticObject->is<LexicalEnvironmentObject>() &&
+ staticObject->as<LexicalEnvironmentObject>().isGlobal();
+ MOZ_ASSERT(isGlobalLexical ||
+ staticObject->is<GlobalObject>() ||
+ staticObject->is<CallObject>() ||
+ staticObject->is<ModuleEnvironmentObject>());
+ MOZ_ASSERT(staticObject->isSingleton());
+
+ *psucceeded = true;
+
+ // Always emit the lexical check. This could be optimized, but is
+ // currently not for simplicity's sake.
+ if (lexicalCheck) {
+ *psucceeded = false;
+ return true;
+ }
+
+ TypeSet::ObjectKey* staticKey = TypeSet::ObjectKey::get(staticObject);
+ if (analysisContext)
+ staticKey->ensureTrackedProperty(analysisContext, NameToId(name));
+
+ if (staticKey->unknownProperties()) {
+ *psucceeded = false;
+ return true;
+ }
+
+ HeapTypeSetKey property = staticKey->property(id);
+ if (!property.maybeTypes() ||
+ !property.maybeTypes()->definiteProperty() ||
+ property.nonData(constraints()))
+ {
+ // The property has been reconfigured as non-configurable, non-enumerable
+ // or non-writable.
+ *psucceeded = false;
+ return true;
+ }
+
+ // Don't optimize global lexical bindings if they aren't initialized at
+ // compile time.
+ if (isGlobalLexical && IsUninitializedGlobalLexicalSlot(staticObject, name)) {
+ *psucceeded = false;
+ return true;
+ }
+
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+ BarrierKind barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(), staticKey,
+ name, types, /* updateObserved = */ true);
+
+ if (barrier == BarrierKind::NoBarrier) {
+ // Try to inline properties holding a known constant object.
+ JSObject* singleton = types->maybeSingleton();
+ if (singleton) {
+ if (testSingletonProperty(staticObject, id) == singleton) {
+ pushConstant(ObjectValue(*singleton));
+ return true;
+ }
+ }
+
+ // Try to inline properties that have never been overwritten.
+ Value constantValue;
+ if (property.constant(constraints(), &constantValue)) {
+ pushConstant(constantValue);
+ return true;
+ }
+ }
+
+ if (!loadStaticSlot(staticObject, barrier, types, property.maybeTypes()->definiteSlot())) {
+ *psucceeded = false;
+ return false;
+ }
+
+ return true;
+}
+
+bool
+IonBuilder::loadStaticSlot(JSObject* staticObject, BarrierKind barrier, TemporaryTypeSet* types,
+ uint32_t slot)
+{
+ if (barrier == BarrierKind::NoBarrier) {
+ // Try to inline properties that can only have one value.
+ MIRType knownType = types->getKnownMIRType();
+ if (knownType == MIRType::Undefined) {
+ pushConstant(UndefinedValue());
+ return true;
+ }
+ if (knownType == MIRType::Null) {
+ pushConstant(NullValue());
+ return true;
+ }
+ }
+
+ MInstruction* obj = constant(ObjectValue(*staticObject));
+
+ MIRType rvalType = types->getKnownMIRType();
+ if (barrier != BarrierKind::NoBarrier)
+ rvalType = MIRType::Value;
+
+ return loadSlot(obj, slot, NumFixedSlots(staticObject), rvalType, barrier, types);
+}
+
+// Whether a write of the given value may need a post-write barrier for GC purposes.
+bool
+jit::NeedsPostBarrier(MDefinition* value)
+{
+ if (!GetJitContext()->runtime->gcNursery().exists())
+ return false;
+ return value->mightBeType(MIRType::Object);
+}
+
+bool
+IonBuilder::setStaticName(JSObject* staticObject, PropertyName* name)
+{
+ jsid id = NameToId(name);
+
+ bool isGlobalLexical = staticObject->is<LexicalEnvironmentObject>() &&
+ staticObject->as<LexicalEnvironmentObject>().isGlobal();
+ MOZ_ASSERT(isGlobalLexical ||
+ staticObject->is<GlobalObject>() ||
+ staticObject->is<CallObject>());
+
+ MDefinition* value = current->peek(-1);
+
+ TypeSet::ObjectKey* staticKey = TypeSet::ObjectKey::get(staticObject);
+ if (staticKey->unknownProperties())
+ return jsop_setprop(name);
+
+ HeapTypeSetKey property = staticKey->property(id);
+ if (!property.maybeTypes() ||
+ !property.maybeTypes()->definiteProperty() ||
+ property.nonData(constraints()) ||
+ property.nonWritable(constraints()))
+ {
+ // The property has been reconfigured as non-configurable, non-enumerable
+ // or non-writable.
+ return jsop_setprop(name);
+ }
+
+ if (!CanWriteProperty(alloc(), constraints(), property, value))
+ return jsop_setprop(name);
+
+ // Don't optimize global lexical bindings if they aren't initialized at
+ // compile time.
+ if (isGlobalLexical && IsUninitializedGlobalLexicalSlot(staticObject, name))
+ return jsop_setprop(name);
+
+ current->pop();
+
+ // Pop the bound object on the stack.
+ MDefinition* obj = current->pop();
+ MOZ_ASSERT(&obj->toConstant()->toObject() == staticObject);
+
+ if (NeedsPostBarrier(value))
+ current->add(MPostWriteBarrier::New(alloc(), obj, value));
+
+ // If the property has a known type, we may be able to optimize typed stores by not
+ // storing the type tag.
+ MIRType slotType = MIRType::None;
+ MIRType knownType = property.knownMIRType(constraints());
+ if (knownType != MIRType::Value)
+ slotType = knownType;
+
+ bool needsBarrier = property.needsBarrier(constraints());
+ return storeSlot(obj, property.maybeTypes()->definiteSlot(), NumFixedSlots(staticObject),
+ value, needsBarrier, slotType);
+}
+
+JSObject*
+IonBuilder::testGlobalLexicalBinding(PropertyName* name)
+{
+ MOZ_ASSERT(JSOp(*pc) == JSOP_BINDGNAME ||
+ JSOp(*pc) == JSOP_GETGNAME ||
+ JSOp(*pc) == JSOP_SETGNAME ||
+ JSOp(*pc) == JSOP_STRICTSETGNAME);
+
+ // The global isn't the global lexical env's prototype, but its enclosing
+ // env. Test for the existence of |name| manually on the global lexical
+ // env. If it is not found, look for it on the global itself.
+
+ NativeObject* obj = &script()->global().lexicalEnvironment();
+ TypeSet::ObjectKey* lexicalKey = TypeSet::ObjectKey::get(obj);
+ jsid id = NameToId(name);
+ if (analysisContext)
+ lexicalKey->ensureTrackedProperty(analysisContext, id);
+
+ // If the property is not found on the global lexical env but it is found
+ // on the global and is configurable, try to freeze the typeset for its
+ // non-existence. If we don't have type information then fail.
+ //
+ // In the case that it is found on the global but is non-configurable,
+ // the binding cannot be shadowed by a global lexical binding.
+ Maybe<HeapTypeSetKey> lexicalProperty;
+ if (!lexicalKey->unknownProperties())
+ lexicalProperty.emplace(lexicalKey->property(id));
+ Shape* shape = obj->lookupPure(name);
+ if (shape) {
+ if ((JSOp(*pc) != JSOP_GETGNAME && !shape->writable()) ||
+ obj->getSlot(shape->slot()).isMagic(JS_UNINITIALIZED_LEXICAL))
+ {
+ return nullptr;
+ }
+ } else {
+ shape = script()->global().lookupPure(name);
+ if (!shape || shape->configurable()) {
+ if (lexicalProperty.isSome())
+ MOZ_ALWAYS_FALSE(lexicalProperty->isOwnProperty(constraints()));
+ else
+ return nullptr;
+ }
+ obj = &script()->global();
+ }
+
+ return obj;
+}
+
+bool
+IonBuilder::jsop_getgname(PropertyName* name)
+{
+ // Optimize undefined/NaN/Infinity first. We must ensure we handle these
+ // cases *exactly* like Baseline, because it's invalid to add an Ion IC or
+ // VM call (that might trigger invalidation) if there's no Baseline IC for
+ // this op.
+ if (name == names().undefined) {
+ pushConstant(UndefinedValue());
+ return true;
+ }
+ if (name == names().NaN) {
+ pushConstant(compartment->runtime()->NaNValue());
+ return true;
+ }
+ if (name == names().Infinity) {
+ pushConstant(compartment->runtime()->positiveInfinityValue());
+ return true;
+ }
+
+ if (JSObject* obj = testGlobalLexicalBinding(name)) {
+ bool emitted = false;
+ if (!getStaticName(obj, name, &emitted) || emitted)
+ return emitted;
+
+ if (!forceInlineCaches() && obj->is<GlobalObject>()) {
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+ MDefinition* globalObj = constant(ObjectValue(*obj));
+ if (!getPropTryCommonGetter(&emitted, globalObj, name, types) || emitted)
+ return emitted;
+ }
+ }
+
+ return jsop_getname(name);
+}
+
+bool
+IonBuilder::jsop_getname(PropertyName* name)
+{
+ MDefinition* object;
+ if (IsGlobalOp(JSOp(*pc)) && !script()->hasNonSyntacticScope()) {
+ MInstruction* global = constant(ObjectValue(script()->global().lexicalEnvironment()));
+ object = global;
+ } else {
+ current->push(current->environmentChain());
+ object = current->pop();
+ }
+
+ MGetNameCache* ins;
+ if (JSOp(*GetNextPc(pc)) == JSOP_TYPEOF)
+ ins = MGetNameCache::New(alloc(), object, name, MGetNameCache::NAMETYPEOF);
+ else
+ ins = MGetNameCache::New(alloc(), object, name, MGetNameCache::NAME);
+
+ current->add(ins);
+ current->push(ins);
+
+ if (!resumeAfter(ins))
+ return false;
+
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+ return pushTypeBarrier(ins, types, BarrierKind::TypeSet);
+}
+
+bool
+IonBuilder::jsop_intrinsic(PropertyName* name)
+{
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+
+ Value vp = UndefinedValue();
+ // If the intrinsic value doesn't yet exist, we haven't executed this
+ // opcode yet, so we need to get it and monitor the result.
+ if (!script()->global().maybeExistingIntrinsicValue(name, &vp)) {
+ MCallGetIntrinsicValue* ins = MCallGetIntrinsicValue::New(alloc(), name);
+
+ current->add(ins);
+ current->push(ins);
+
+ if (!resumeAfter(ins))
+ return false;
+
+ return pushTypeBarrier(ins, types, BarrierKind::TypeSet);
+ }
+
+ if (types->empty())
+ types->addType(TypeSet::GetValueType(vp), alloc().lifoAlloc());
+
+ // Bake in the intrinsic, guaranteed to exist because a non-empty typeset
+ // means the intrinsic was successfully gotten in the VM call above.
+ // Assert that TI agrees with us on the type.
+ MOZ_ASSERT(types->hasType(TypeSet::GetValueType(vp)));
+
+ pushConstant(vp);
+ return true;
+}
+
+bool
+IonBuilder::jsop_getimport(PropertyName* name)
+{
+ ModuleEnvironmentObject* env = GetModuleEnvironmentForScript(script());
+ MOZ_ASSERT(env);
+
+ Shape* shape;
+ ModuleEnvironmentObject* targetEnv;
+ MOZ_ALWAYS_TRUE(env->lookupImport(NameToId(name), &targetEnv, &shape));
+
+ PropertyName* localName = JSID_TO_STRING(shape->propid())->asAtom().asPropertyName();
+ bool emitted = false;
+ if (!getStaticName(targetEnv, localName, &emitted))
+ return false;
+
+ if (!emitted) {
+ // This can happen if we don't have type information.
+ TypeSet::ObjectKey* staticKey = TypeSet::ObjectKey::get(targetEnv);
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+ BarrierKind barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(), staticKey,
+ name, types, /* updateObserved = */ true);
+
+ if (!loadStaticSlot(targetEnv, barrier, types, shape->slot()))
+ return false;
+ }
+
+ // In the rare case where this import hasn't been initialized already (we
+ // have an import cycle where modules reference each other's imports), emit
+ // a check.
+ if (targetEnv->getSlot(shape->slot()).isMagic(JS_UNINITIALIZED_LEXICAL)) {
+ MDefinition* checked = addLexicalCheck(current->pop());
+ current->push(checked);
+ }
+
+ return true;
+}
+
+bool
+IonBuilder::jsop_bindname(PropertyName* name)
+{
+ MDefinition* envChain;
+ if (analysis().usesEnvironmentChain()) {
+ envChain = current->environmentChain();
+ } else {
+ // We take the slow path when trying to BINDGNAME a name that resolves
+ // to a 'const' or an uninitialized binding.
+ MOZ_ASSERT(JSOp(*pc) == JSOP_BINDGNAME);
+ envChain = constant(ObjectValue(script()->global().lexicalEnvironment()));
+ }
+ MBindNameCache* ins = MBindNameCache::New(alloc(), envChain, name, script(), pc);
+
+ current->add(ins);
+ current->push(ins);
+
+ return resumeAfter(ins);
+}
+
+bool
+IonBuilder::jsop_bindvar()
+{
+ MOZ_ASSERT(analysis().usesEnvironmentChain());
+ MCallBindVar* ins = MCallBindVar::New(alloc(), current->environmentChain());
+ current->add(ins);
+ current->push(ins);
+ return true;
+}
+
+static MIRType
+GetElemKnownType(bool needsHoleCheck, TemporaryTypeSet* types)
+{
+ MIRType knownType = types->getKnownMIRType();
+
+ // Null and undefined have no payload so they can't be specialized.
+ // Since folding null/undefined while building SSA is not safe (see the
+ // comment in IsPhiObservable), we just add an untyped load instruction
+ // and rely on pushTypeBarrier and DCE to replace it with a null/undefined
+ // constant.
+ if (knownType == MIRType::Undefined || knownType == MIRType::Null)
+ knownType = MIRType::Value;
+
+ // Different architectures may want typed element reads which require
+ // hole checks to be done as either value or typed reads.
+ if (needsHoleCheck && !LIRGenerator::allowTypedElementHoleCheck())
+ knownType = MIRType::Value;
+
+ return knownType;
+}
+
+bool
+IonBuilder::jsop_getelem()
+{
+ startTrackingOptimizations();
+
+ MDefinition* index = current->pop();
+ MDefinition* obj = current->pop();
+
+ trackTypeInfo(TrackedTypeSite::Receiver, obj->type(), obj->resultTypeSet());
+ trackTypeInfo(TrackedTypeSite::Index, index->type(), index->resultTypeSet());
+
+ // Always use a call if we are performing analysis and not actually
+ // emitting code, to simplify later analysis.
+ if (info().isAnalysis() || shouldAbortOnPreliminaryGroups(obj)) {
+ MInstruction* ins = MCallGetElement::New(alloc(), obj, index);
+
+ current->add(ins);
+ current->push(ins);
+
+ if (!resumeAfter(ins))
+ return false;
+
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+ return pushTypeBarrier(ins, types, BarrierKind::TypeSet);
+ }
+
+ obj = maybeUnboxForPropertyAccess(obj);
+ if (obj->type() == MIRType::Object)
+ obj = convertUnboxedObjects(obj);
+
+ bool emitted = false;
+
+ if (!forceInlineCaches()) {
+ trackOptimizationAttempt(TrackedStrategy::GetElem_TypedObject);
+ if (!getElemTryTypedObject(&emitted, obj, index) || emitted)
+ return emitted;
+
+ // Note: no trackOptimizationAttempt call is needed, getElemTryGetProp
+ // will call it.
+ if (!getElemTryGetProp(&emitted, obj, index) || emitted)
+ return emitted;
+
+ trackOptimizationAttempt(TrackedStrategy::GetElem_Dense);
+ if (!getElemTryDense(&emitted, obj, index) || emitted)
+ return emitted;
+
+ trackOptimizationAttempt(TrackedStrategy::GetElem_TypedStatic);
+ if (!getElemTryTypedStatic(&emitted, obj, index) || emitted)
+ return emitted;
+
+ trackOptimizationAttempt(TrackedStrategy::GetElem_TypedArray);
+ if (!getElemTryTypedArray(&emitted, obj, index) || emitted)
+ return emitted;
+
+ trackOptimizationAttempt(TrackedStrategy::GetElem_String);
+ if (!getElemTryString(&emitted, obj, index) || emitted)
+ return emitted;
+
+ trackOptimizationAttempt(TrackedStrategy::GetElem_Arguments);
+ if (!getElemTryArguments(&emitted, obj, index) || emitted)
+ return emitted;
+
+ trackOptimizationAttempt(TrackedStrategy::GetElem_ArgumentsInlined);
+ if (!getElemTryArgumentsInlined(&emitted, obj, index) || emitted)
+ return emitted;
+ }
+
+ if (script()->argumentsHasVarBinding() && obj->mightBeType(MIRType::MagicOptimizedArguments))
+ return abort("Type is not definitely lazy arguments.");
+
+ trackOptimizationAttempt(TrackedStrategy::GetElem_InlineCache);
+ if (!getElemTryCache(&emitted, obj, index) || emitted)
+ return emitted;
+
+ // Emit call.
+ MInstruction* ins = MCallGetElement::New(alloc(), obj, index);
+
+ current->add(ins);
+ current->push(ins);
+
+ if (!resumeAfter(ins))
+ return false;
+
+ if (*pc == JSOP_CALLELEM && IsNullOrUndefined(obj->type())) {
+ // Due to inlining, it's possible the observed TypeSet is non-empty,
+ // even though we know |obj| is null/undefined and the MCallGetElement
+ // will throw. Don't push a TypeBarrier in this case, to avoid
+ // inlining the following (unreachable) JSOP_CALL.
+ return true;
+ }
+
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+ return pushTypeBarrier(ins, types, BarrierKind::TypeSet);
+}
+
+bool
+IonBuilder::getElemTryTypedObject(bool* emitted, MDefinition* obj, MDefinition* index)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ // The next several failures are all due to types not predicting that we
+ // are definitely doing a getelem access on a typed object.
+ trackOptimizationOutcome(TrackedOutcome::AccessNotTypedObject);
+
+ TypedObjectPrediction objPrediction = typedObjectPrediction(obj);
+ if (objPrediction.isUseless())
+ return true;
+
+ if (!objPrediction.ofArrayKind())
+ return true;
+
+ TypedObjectPrediction elemPrediction = objPrediction.arrayElementType();
+ if (elemPrediction.isUseless())
+ return true;
+
+ uint32_t elemSize;
+ if (!elemPrediction.hasKnownSize(&elemSize))
+ return true;
+
+ switch (elemPrediction.kind()) {
+ case type::Simd:
+ // FIXME (bug 894105): load into a MIRType::float32x4 etc
+ trackOptimizationOutcome(TrackedOutcome::GenericFailure);
+ return true;
+
+ case type::Struct:
+ case type::Array:
+ return getElemTryComplexElemOfTypedObject(emitted,
+ obj,
+ index,
+ objPrediction,
+ elemPrediction,
+ elemSize);
+ case type::Scalar:
+ return getElemTryScalarElemOfTypedObject(emitted,
+ obj,
+ index,
+ objPrediction,
+ elemPrediction,
+ elemSize);
+
+ case type::Reference:
+ return getElemTryReferenceElemOfTypedObject(emitted,
+ obj,
+ index,
+ objPrediction,
+ elemPrediction);
+ }
+
+ MOZ_CRASH("Bad kind");
+}
+
+bool
+IonBuilder::checkTypedObjectIndexInBounds(uint32_t elemSize,
+ MDefinition* obj,
+ MDefinition* index,
+ TypedObjectPrediction objPrediction,
+ LinearSum* indexAsByteOffset)
+{
+ // Ensure index is an integer.
+ MInstruction* idInt32 = MToInt32::New(alloc(), index);
+ current->add(idInt32);
+
+ // If we know the length statically from the type, just embed it.
+ // Otherwise, load it from the appropriate reserved slot on the
+ // typed object. We know it's an int32, so we can convert from
+ // Value to int32 using truncation.
+ int32_t lenOfAll;
+ MDefinition* length;
+ if (objPrediction.hasKnownArrayLength(&lenOfAll)) {
+ length = constantInt(lenOfAll);
+
+ // If we are not loading the length from the object itself, only
+ // optimize if the array buffer can never be a detached array buffer.
+ TypeSet::ObjectKey* globalKey = TypeSet::ObjectKey::get(&script()->global());
+ if (globalKey->hasFlags(constraints(), OBJECT_FLAG_TYPED_OBJECT_HAS_DETACHED_BUFFER)) {
+ trackOptimizationOutcome(TrackedOutcome::TypedObjectHasDetachedBuffer);
+ return false;
+ }
+ } else {
+ trackOptimizationOutcome(TrackedOutcome::TypedObjectArrayRange);
+ return false;
+ }
+
+ index = addBoundsCheck(idInt32, length);
+
+ return indexAsByteOffset->add(index, AssertedCast<int32_t>(elemSize));
+}
+
+bool
+IonBuilder::getElemTryScalarElemOfTypedObject(bool* emitted,
+ MDefinition* obj,
+ MDefinition* index,
+ TypedObjectPrediction objPrediction,
+ TypedObjectPrediction elemPrediction,
+ uint32_t elemSize)
+{
+ MOZ_ASSERT(objPrediction.ofArrayKind());
+
+ // Must always be loading the same scalar type
+ ScalarTypeDescr::Type elemType = elemPrediction.scalarType();
+ MOZ_ASSERT(elemSize == ScalarTypeDescr::alignment(elemType));
+
+ LinearSum indexAsByteOffset(alloc());
+ if (!checkTypedObjectIndexInBounds(elemSize, obj, index, objPrediction, &indexAsByteOffset))
+ return true;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+
+ return pushScalarLoadFromTypedObject(obj, indexAsByteOffset, elemType);
+}
+
+bool
+IonBuilder::getElemTryReferenceElemOfTypedObject(bool* emitted,
+ MDefinition* obj,
+ MDefinition* index,
+ TypedObjectPrediction objPrediction,
+ TypedObjectPrediction elemPrediction)
+{
+ MOZ_ASSERT(objPrediction.ofArrayKind());
+
+ ReferenceTypeDescr::Type elemType = elemPrediction.referenceType();
+ uint32_t elemSize = ReferenceTypeDescr::size(elemType);
+
+ LinearSum indexAsByteOffset(alloc());
+ if (!checkTypedObjectIndexInBounds(elemSize, obj, index, objPrediction, &indexAsByteOffset))
+ return true;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+
+ return pushReferenceLoadFromTypedObject(obj, indexAsByteOffset, elemType, nullptr);
+}
+
+bool
+IonBuilder::pushScalarLoadFromTypedObject(MDefinition* obj,
+ const LinearSum& byteOffset,
+ ScalarTypeDescr::Type elemType)
+{
+ uint32_t size = ScalarTypeDescr::size(elemType);
+ MOZ_ASSERT(size == ScalarTypeDescr::alignment(elemType));
+
+ // Find location within the owner object.
+ MDefinition* elements;
+ MDefinition* scaledOffset;
+ int32_t adjustment;
+ loadTypedObjectElements(obj, byteOffset, size, &elements, &scaledOffset, &adjustment);
+
+ // Load the element.
+ MLoadUnboxedScalar* load = MLoadUnboxedScalar::New(alloc(), elements, scaledOffset,
+ elemType,
+ DoesNotRequireMemoryBarrier,
+ adjustment);
+ current->add(load);
+ current->push(load);
+
+ // If we are reading in-bounds elements, we can use knowledge about
+ // the array type to determine the result type, even if the opcode has
+ // never executed. The known pushed type is only used to distinguish
+ // uint32 reads that may produce either doubles or integers.
+ TemporaryTypeSet* resultTypes = bytecodeTypes(pc);
+ bool allowDouble = resultTypes->hasType(TypeSet::DoubleType());
+
+ // Note: knownType is not necessarily in resultTypes; e.g. if we
+ // have only observed integers coming out of float array.
+ MIRType knownType = MIRTypeForTypedArrayRead(elemType, allowDouble);
+
+ // Note: we can ignore the type barrier here, we know the type must
+ // be valid and unbarriered. Also, need not set resultTypeSet,
+ // because knownType is scalar and a resultTypeSet would provide
+ // no useful additional info.
+ load->setResultType(knownType);
+
+ return true;
+}
+
+bool
+IonBuilder::pushReferenceLoadFromTypedObject(MDefinition* typedObj,
+ const LinearSum& byteOffset,
+ ReferenceTypeDescr::Type type,
+ PropertyName* name)
+{
+ // Find location within the owner object.
+ MDefinition* elements;
+ MDefinition* scaledOffset;
+ int32_t adjustment;
+ uint32_t alignment = ReferenceTypeDescr::alignment(type);
+ loadTypedObjectElements(typedObj, byteOffset, alignment, &elements, &scaledOffset, &adjustment);
+
+ TemporaryTypeSet* observedTypes = bytecodeTypes(pc);
+
+ MInstruction* load = nullptr; // initialize to silence GCC warning
+ BarrierKind barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(),
+ typedObj, name, observedTypes);
+
+ switch (type) {
+ case ReferenceTypeDescr::TYPE_ANY: {
+ // Make sure the barrier reflects the possibility of reading undefined.
+ bool bailOnUndefined = barrier == BarrierKind::NoBarrier &&
+ !observedTypes->hasType(TypeSet::UndefinedType());
+ if (bailOnUndefined)
+ barrier = BarrierKind::TypeTagOnly;
+ load = MLoadElement::New(alloc(), elements, scaledOffset, false, false, adjustment);
+ break;
+ }
+ case ReferenceTypeDescr::TYPE_OBJECT: {
+ // Make sure the barrier reflects the possibility of reading null. When
+ // there is no other barrier needed we include the null bailout with
+ // MLoadUnboxedObjectOrNull, which avoids the need to box the result
+ // for a type barrier instruction.
+ MLoadUnboxedObjectOrNull::NullBehavior nullBehavior;
+ if (barrier == BarrierKind::NoBarrier && !observedTypes->hasType(TypeSet::NullType()))
+ nullBehavior = MLoadUnboxedObjectOrNull::BailOnNull;
+ else
+ nullBehavior = MLoadUnboxedObjectOrNull::HandleNull;
+ load = MLoadUnboxedObjectOrNull::New(alloc(), elements, scaledOffset, nullBehavior,
+ adjustment);
+ break;
+ }
+ case ReferenceTypeDescr::TYPE_STRING: {
+ load = MLoadUnboxedString::New(alloc(), elements, scaledOffset, adjustment);
+ observedTypes->addType(TypeSet::StringType(), alloc().lifoAlloc());
+ break;
+ }
+ }
+
+ current->add(load);
+ current->push(load);
+
+ return pushTypeBarrier(load, observedTypes, barrier);
+}
+
+bool
+IonBuilder::getElemTryComplexElemOfTypedObject(bool* emitted,
+ MDefinition* obj,
+ MDefinition* index,
+ TypedObjectPrediction objPrediction,
+ TypedObjectPrediction elemPrediction,
+ uint32_t elemSize)
+{
+ MOZ_ASSERT(objPrediction.ofArrayKind());
+
+ MDefinition* type = loadTypedObjectType(obj);
+ MDefinition* elemTypeObj = typeObjectForElementFromArrayStructType(type);
+
+ LinearSum indexAsByteOffset(alloc());
+ if (!checkTypedObjectIndexInBounds(elemSize, obj, index, objPrediction, &indexAsByteOffset))
+ return true;
+
+ return pushDerivedTypedObject(emitted, obj, indexAsByteOffset,
+ elemPrediction, elemTypeObj);
+}
+
+bool
+IonBuilder::pushDerivedTypedObject(bool* emitted,
+ MDefinition* obj,
+ const LinearSum& baseByteOffset,
+ TypedObjectPrediction derivedPrediction,
+ MDefinition* derivedTypeObj)
+{
+ // Find location within the owner object.
+ MDefinition* owner;
+ LinearSum ownerByteOffset(alloc());
+ loadTypedObjectData(obj, &owner, &ownerByteOffset);
+
+ if (!ownerByteOffset.add(baseByteOffset, 1))
+ setForceAbort();
+
+ MDefinition* offset = ConvertLinearSum(alloc(), current, ownerByteOffset,
+ /* convertConstant = */ true);
+
+ // Create the derived typed object.
+ MInstruction* derivedTypedObj = MNewDerivedTypedObject::New(alloc(),
+ derivedPrediction,
+ derivedTypeObj,
+ owner,
+ offset);
+ current->add(derivedTypedObj);
+ current->push(derivedTypedObj);
+
+ // Determine (if possible) the class/proto that `derivedTypedObj` will
+ // have. For derived typed objects, the opacity will be the same as the
+ // incoming object from which the derived typed object is, well, derived.
+ // The prototype will be determined based on the type descriptor (and is
+ // immutable).
+ TemporaryTypeSet* objTypes = obj->resultTypeSet();
+ const Class* expectedClass = nullptr;
+ if (const Class* objClass = objTypes ? objTypes->getKnownClass(constraints()) : nullptr) {
+ MOZ_ASSERT(IsTypedObjectClass(objClass));
+ expectedClass = GetOutlineTypedObjectClass(IsOpaqueTypedObjectClass(objClass));
+ }
+ const TypedProto* expectedProto = derivedPrediction.getKnownPrototype();
+ MOZ_ASSERT_IF(expectedClass, IsTypedObjectClass(expectedClass));
+
+ // Determine (if possible) the class/proto that the observed type set
+ // describes.
+ TemporaryTypeSet* observedTypes = bytecodeTypes(pc);
+ const Class* observedClass = observedTypes->getKnownClass(constraints());
+
+ // If expectedClass/expectedProto are both non-null (and hence known), we
+ // can predict precisely what object group derivedTypedObj will have.
+ // Therefore, if we observe that this group is already contained in the set
+ // of observedTypes, we can skip the barrier.
+ //
+ // Barriers still wind up being needed in some relatively
+ // rare cases:
+ //
+ // - if multiple kinds of typed objects flow into this point,
+ // in which case we will not be able to predict expectedClass
+ // nor expectedProto.
+ //
+ // - if the code has never executed, in which case the set of
+ // observed types will be incomplete.
+ //
+ // Barriers are particularly expensive here because they prevent
+ // us from optimizing the MNewDerivedTypedObject away.
+ JSObject* observedProto;
+ if (observedTypes->getCommonPrototype(constraints(), &observedProto) &&
+ observedClass && observedProto && observedClass == expectedClass &&
+ observedProto == expectedProto)
+ {
+ derivedTypedObj->setResultTypeSet(observedTypes);
+ } else {
+ if (!pushTypeBarrier(derivedTypedObj, observedTypes, BarrierKind::TypeSet))
+ return false;
+ }
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::getElemTryGetProp(bool* emitted, MDefinition* obj, MDefinition* index)
+{
+ // If index is a constant string or symbol, try to optimize this GETELEM
+ // as a GETPROP.
+
+ MOZ_ASSERT(*emitted == false);
+
+ MConstant* indexConst = index->maybeConstantValue();
+ jsid id;
+ if (!indexConst || !ValueToIdPure(indexConst->toJSValue(), &id))
+ return true;
+
+ if (id != IdToTypeId(id))
+ return true;
+
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+
+ trackOptimizationAttempt(TrackedStrategy::GetProp_Constant);
+ if (!getPropTryConstant(emitted, obj, id, types) || *emitted) {
+ if (*emitted)
+ index->setImplicitlyUsedUnchecked();
+ return *emitted;
+ }
+
+ trackOptimizationAttempt(TrackedStrategy::GetProp_NotDefined);
+ if (!getPropTryNotDefined(emitted, obj, id, types) || *emitted) {
+ if (*emitted)
+ index->setImplicitlyUsedUnchecked();
+ return *emitted;
+ }
+
+ return true;
+}
+
+bool
+IonBuilder::getElemTryDense(bool* emitted, MDefinition* obj, MDefinition* index)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ JSValueType unboxedType = UnboxedArrayElementType(constraints(), obj, index);
+ if (unboxedType == JSVAL_TYPE_MAGIC) {
+ if (!ElementAccessIsDenseNative(constraints(), obj, index)) {
+ trackOptimizationOutcome(TrackedOutcome::AccessNotDense);
+ return true;
+ }
+ }
+
+ // Don't generate a fast path if there have been bounds check failures
+ // and this access might be on a sparse property.
+ if (ElementAccessHasExtraIndexedProperty(this, obj) && failedBoundsCheck_) {
+ trackOptimizationOutcome(TrackedOutcome::ProtoIndexedProps);
+ return true;
+ }
+
+ // Don't generate a fast path if this pc has seen negative indexes accessed,
+ // which will not appear to be extra indexed properties.
+ if (inspector->hasSeenNegativeIndexGetElement(pc)) {
+ trackOptimizationOutcome(TrackedOutcome::ArraySeenNegativeIndex);
+ return true;
+ }
+
+ if (!jsop_getelem_dense(obj, index, unboxedType))
+ return false;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+JSObject*
+IonBuilder::getStaticTypedArrayObject(MDefinition* obj, MDefinition* index)
+{
+ Scalar::Type arrayType;
+ if (!ElementAccessIsTypedArray(constraints(), obj, index, &arrayType)) {
+ trackOptimizationOutcome(TrackedOutcome::AccessNotTypedArray);
+ return nullptr;
+ }
+
+ if (!LIRGenerator::allowStaticTypedArrayAccesses()) {
+ trackOptimizationOutcome(TrackedOutcome::Disabled);
+ return nullptr;
+ }
+
+ if (ElementAccessHasExtraIndexedProperty(this, obj)) {
+ trackOptimizationOutcome(TrackedOutcome::ProtoIndexedProps);
+ return nullptr;
+ }
+
+ if (!obj->resultTypeSet()) {
+ trackOptimizationOutcome(TrackedOutcome::NoTypeInfo);
+ return nullptr;
+ }
+
+ JSObject* tarrObj = obj->resultTypeSet()->maybeSingleton();
+ if (!tarrObj) {
+ trackOptimizationOutcome(TrackedOutcome::NotSingleton);
+ return nullptr;
+ }
+
+ TypeSet::ObjectKey* tarrKey = TypeSet::ObjectKey::get(tarrObj);
+ if (tarrKey->unknownProperties()) {
+ trackOptimizationOutcome(TrackedOutcome::UnknownProperties);
+ return nullptr;
+ }
+
+ return tarrObj;
+}
+
+bool
+IonBuilder::getElemTryTypedStatic(bool* emitted, MDefinition* obj, MDefinition* index)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ JSObject* tarrObj = getStaticTypedArrayObject(obj, index);
+ if (!tarrObj)
+ return true;
+
+ // LoadTypedArrayElementStatic currently treats uint32 arrays as int32.
+ Scalar::Type viewType = tarrObj->as<TypedArrayObject>().type();
+ if (viewType == Scalar::Uint32) {
+ trackOptimizationOutcome(TrackedOutcome::StaticTypedArrayUint32);
+ return true;
+ }
+
+ MDefinition* ptr = convertShiftToMaskForStaticTypedArray(index, viewType);
+ if (!ptr)
+ return true;
+
+ // Emit LoadTypedArrayElementStatic.
+
+ if (tarrObj->is<TypedArrayObject>()) {
+ TypeSet::ObjectKey* tarrKey = TypeSet::ObjectKey::get(tarrObj);
+ tarrKey->watchStateChangeForTypedArrayData(constraints());
+ }
+
+ obj->setImplicitlyUsedUnchecked();
+ index->setImplicitlyUsedUnchecked();
+
+ MLoadTypedArrayElementStatic* load = MLoadTypedArrayElementStatic::New(alloc(), tarrObj, ptr);
+ current->add(load);
+ current->push(load);
+
+ // The load is infallible if an undefined result will be coerced to the
+ // appropriate numeric type if the read is out of bounds. The truncation
+ // analysis picks up some of these cases, but is incomplete with respect
+ // to others. For now, sniff the bytecode for simple patterns following
+ // the load which guarantee a truncation or numeric conversion.
+ if (viewType == Scalar::Float32 || viewType == Scalar::Float64) {
+ jsbytecode* next = pc + JSOP_GETELEM_LENGTH;
+ if (*next == JSOP_POS)
+ load->setInfallible();
+ } else {
+ jsbytecode* next = pc + JSOP_GETELEM_LENGTH;
+ if (*next == JSOP_ZERO && *(next + JSOP_ZERO_LENGTH) == JSOP_BITOR)
+ load->setInfallible();
+ }
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::getElemTryTypedArray(bool* emitted, MDefinition* obj, MDefinition* index)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ Scalar::Type arrayType;
+ if (!ElementAccessIsTypedArray(constraints(), obj, index, &arrayType)) {
+ trackOptimizationOutcome(TrackedOutcome::AccessNotTypedArray);
+ return true;
+ }
+
+ // Emit typed getelem variant.
+ if (!jsop_getelem_typed(obj, index, arrayType))
+ return false;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::getElemTryString(bool* emitted, MDefinition* obj, MDefinition* index)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ if (obj->type() != MIRType::String || !IsNumberType(index->type())) {
+ trackOptimizationOutcome(TrackedOutcome::AccessNotString);
+ return true;
+ }
+
+ // If the index is expected to be out-of-bounds, don't optimize to avoid
+ // frequent bailouts.
+ if (bytecodeTypes(pc)->hasType(TypeSet::UndefinedType())) {
+ trackOptimizationOutcome(TrackedOutcome::OutOfBounds);
+ return true;
+ }
+
+ // Emit fast path for string[index].
+ MInstruction* idInt32 = MToInt32::New(alloc(), index);
+ current->add(idInt32);
+ index = idInt32;
+
+ MStringLength* length = MStringLength::New(alloc(), obj);
+ current->add(length);
+
+ index = addBoundsCheck(index, length);
+
+ MCharCodeAt* charCode = MCharCodeAt::New(alloc(), obj, index);
+ current->add(charCode);
+
+ MFromCharCode* result = MFromCharCode::New(alloc(), charCode);
+ current->add(result);
+ current->push(result);
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::getElemTryArguments(bool* emitted, MDefinition* obj, MDefinition* index)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ if (inliningDepth_ > 0)
+ return true;
+
+ if (obj->type() != MIRType::MagicOptimizedArguments)
+ return true;
+
+ // Emit GetFrameArgument.
+
+ MOZ_ASSERT(!info().argsObjAliasesFormals());
+
+ // Type Inference has guaranteed this is an optimized arguments object.
+ obj->setImplicitlyUsedUnchecked();
+
+ // To ensure that we are not looking above the number of actual arguments.
+ MArgumentsLength* length = MArgumentsLength::New(alloc());
+ current->add(length);
+
+ // Ensure index is an integer.
+ MInstruction* idInt32 = MToInt32::New(alloc(), index);
+ current->add(idInt32);
+ index = idInt32;
+
+ // Bailouts if we read more than the number of actual arguments.
+ index = addBoundsCheck(index, length);
+
+ // Load the argument from the actual arguments.
+ MGetFrameArgument* load = MGetFrameArgument::New(alloc(), index, analysis_.hasSetArg());
+ current->add(load);
+ current->push(load);
+
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+ if (!pushTypeBarrier(load, types, BarrierKind::TypeSet))
+ return false;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::getElemTryArgumentsInlined(bool* emitted, MDefinition* obj, MDefinition* index)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ if (inliningDepth_ == 0)
+ return true;
+
+ if (obj->type() != MIRType::MagicOptimizedArguments)
+ return true;
+
+ // Emit inlined arguments.
+ obj->setImplicitlyUsedUnchecked();
+
+ MOZ_ASSERT(!info().argsObjAliasesFormals());
+
+ // When the id is constant, we can just return the corresponding inlined argument
+ MConstant* indexConst = index->maybeConstantValue();
+ if (indexConst && indexConst->type() == MIRType::Int32) {
+ MOZ_ASSERT(inliningDepth_ > 0);
+
+ int32_t id = indexConst->toInt32();
+ index->setImplicitlyUsedUnchecked();
+
+ if (id < (int32_t)inlineCallInfo_->argc() && id >= 0)
+ current->push(inlineCallInfo_->getArg(id));
+ else
+ pushConstant(UndefinedValue());
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+ }
+
+ // inlined not constant not supported, yet.
+ return abort("NYI inlined not constant get argument element");
+}
+
+bool
+IonBuilder::getElemTryCache(bool* emitted, MDefinition* obj, MDefinition* index)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ // Make sure we have at least an object.
+ if (!obj->mightBeType(MIRType::Object)) {
+ trackOptimizationOutcome(TrackedOutcome::NotObject);
+ return true;
+ }
+
+ // Don't cache for strings.
+ if (obj->mightBeType(MIRType::String)) {
+ trackOptimizationOutcome(TrackedOutcome::GetElemStringNotCached);
+ return true;
+ }
+
+ // Index should be integer, string, or symbol
+ if (!index->mightBeType(MIRType::Int32) &&
+ !index->mightBeType(MIRType::String) &&
+ !index->mightBeType(MIRType::Symbol))
+ {
+ trackOptimizationOutcome(TrackedOutcome::IndexType);
+ return true;
+ }
+
+ // Turn off cacheing if the element is int32 and we've seen non-native objects as the target
+ // of this getelem.
+ bool nonNativeGetElement = inspector->hasSeenNonNativeGetElement(pc);
+ if (index->mightBeType(MIRType::Int32) && nonNativeGetElement) {
+ trackOptimizationOutcome(TrackedOutcome::NonNativeReceiver);
+ return true;
+ }
+
+ // Emit GetElementCache.
+
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+ BarrierKind barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(), obj,
+ nullptr, types);
+
+ // Always add a barrier if the index might be a string or symbol, so that
+ // the cache can attach stubs for particular properties.
+ if (index->mightBeType(MIRType::String) || index->mightBeType(MIRType::Symbol))
+ barrier = BarrierKind::TypeSet;
+
+ MGetPropertyCache* ins = MGetPropertyCache::New(alloc(), obj, index,
+ barrier == BarrierKind::TypeSet);
+ current->add(ins);
+ current->push(ins);
+
+ if (!resumeAfter(ins))
+ return false;
+
+ // Spice up type information.
+ if (index->type() == MIRType::Int32 && barrier == BarrierKind::NoBarrier) {
+ bool needHoleCheck = !ElementAccessIsPacked(constraints(), obj);
+ MIRType knownType = GetElemKnownType(needHoleCheck, types);
+
+ if (knownType != MIRType::Value && knownType != MIRType::Double)
+ ins->setResultType(knownType);
+ }
+
+ if (!pushTypeBarrier(ins, types, barrier))
+ return false;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+TemporaryTypeSet*
+IonBuilder::computeHeapType(const TemporaryTypeSet* objTypes, const jsid id)
+{
+ if (objTypes->unknownObject() || objTypes->getObjectCount() == 0)
+ return nullptr;
+
+ TemporaryTypeSet empty;
+ TemporaryTypeSet* acc = &empty;
+ LifoAlloc* lifoAlloc = alloc().lifoAlloc();
+
+ Vector<HeapTypeSetKey, 4, SystemAllocPolicy> properties;
+ if (!properties.reserve(objTypes->getObjectCount()))
+ return nullptr;
+
+ for (unsigned i = 0; i < objTypes->getObjectCount(); i++) {
+ TypeSet::ObjectKey* key = objTypes->getObject(i);
+
+ if (key->unknownProperties())
+ return nullptr;
+
+ HeapTypeSetKey property = key->property(id);
+ HeapTypeSet* currentSet = property.maybeTypes();
+
+ if (!currentSet || currentSet->unknown())
+ return nullptr;
+
+ properties.infallibleAppend(property);
+ acc = TypeSet::unionSets(acc, currentSet, lifoAlloc);
+ if (!acc)
+ return nullptr;
+ }
+
+ // Freeze all the properties associated with the refined type set.
+ for (HeapTypeSetKey* i = properties.begin(); i != properties.end(); i++)
+ i->freeze(constraints());
+
+ return acc;
+}
+
+bool
+IonBuilder::jsop_getelem_dense(MDefinition* obj, MDefinition* index, JSValueType unboxedType)
+{
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+
+ MOZ_ASSERT(index->type() == MIRType::Int32 || index->type() == MIRType::Double);
+ if (JSOp(*pc) == JSOP_CALLELEM) {
+ // Indexed call on an element of an array. Populate the observed types
+ // with any objects that could be in the array, to avoid extraneous
+ // type barriers.
+ AddObjectsForPropertyRead(obj, nullptr, types);
+ }
+
+ BarrierKind barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(), obj,
+ nullptr, types);
+ bool needsHoleCheck = !ElementAccessIsPacked(constraints(), obj);
+
+ // Reads which are on holes in the object do not have to bail out if
+ // undefined values have been observed at this access site and the access
+ // cannot hit another indexed property on the object or its prototypes.
+ bool readOutOfBounds =
+ types->hasType(TypeSet::UndefinedType()) &&
+ !ElementAccessHasExtraIndexedProperty(this, obj);
+
+ MIRType knownType = MIRType::Value;
+ if (unboxedType == JSVAL_TYPE_MAGIC && barrier == BarrierKind::NoBarrier)
+ knownType = GetElemKnownType(needsHoleCheck, types);
+
+ // Ensure index is an integer.
+ MInstruction* idInt32 = MToInt32::New(alloc(), index);
+ current->add(idInt32);
+ index = idInt32;
+
+ // Get the elements vector.
+ MInstruction* elements = MElements::New(alloc(), obj, unboxedType != JSVAL_TYPE_MAGIC);
+ current->add(elements);
+
+ // Note: to help GVN, use the original MElements instruction and not
+ // MConvertElementsToDoubles as operand. This is fine because converting
+ // elements to double does not change the initialized length.
+ MInstruction* initLength = initializedLength(obj, elements, unboxedType);
+
+ // If we can load the element as a definite double, make sure to check that
+ // the array has been converted to homogenous doubles first.
+ TemporaryTypeSet* objTypes = obj->resultTypeSet();
+ bool inBounds = !readOutOfBounds && !needsHoleCheck;
+
+ if (inBounds) {
+ TemporaryTypeSet* heapTypes = computeHeapType(objTypes, JSID_VOID);
+ if (heapTypes && heapTypes->isSubset(types)) {
+ knownType = heapTypes->getKnownMIRType();
+ types = heapTypes;
+ }
+ }
+
+ bool loadDouble =
+ unboxedType == JSVAL_TYPE_MAGIC &&
+ barrier == BarrierKind::NoBarrier &&
+ loopDepth_ &&
+ inBounds &&
+ knownType == MIRType::Double &&
+ objTypes &&
+ objTypes->convertDoubleElements(constraints()) == TemporaryTypeSet::AlwaysConvertToDoubles;
+ if (loadDouble)
+ elements = addConvertElementsToDoubles(elements);
+
+ MInstruction* load;
+
+ if (!readOutOfBounds) {
+ // This load should not return undefined, so likely we're reading
+ // in-bounds elements, and the array is packed or its holes are not
+ // read. This is the best case: we can separate the bounds check for
+ // hoisting.
+ index = addBoundsCheck(index, initLength);
+
+ if (unboxedType != JSVAL_TYPE_MAGIC) {
+ load = loadUnboxedValue(elements, 0, index, unboxedType, barrier, types);
+ } else {
+ load = MLoadElement::New(alloc(), elements, index, needsHoleCheck, loadDouble);
+ current->add(load);
+ }
+ } else {
+ // This load may return undefined, so assume that we *can* read holes,
+ // or that we can read out-of-bounds accesses. In this case, the bounds
+ // check is part of the opcode.
+ load = MLoadElementHole::New(alloc(), elements, index, initLength,
+ unboxedType, needsHoleCheck);
+ current->add(load);
+
+ // If maybeUndefined was true, the typeset must have undefined, and
+ // then either additional types or a barrier. This means we should
+ // never have a typed version of LoadElementHole.
+ MOZ_ASSERT(knownType == MIRType::Value);
+ }
+
+ if (knownType != MIRType::Value) {
+ if (unboxedType == JSVAL_TYPE_MAGIC)
+ load->setResultType(knownType);
+ load->setResultTypeSet(types);
+ }
+
+ current->push(load);
+ return pushTypeBarrier(load, types, barrier);
+}
+
+MInstruction*
+IonBuilder::addArrayBufferByteLength(MDefinition* obj)
+{
+ MLoadFixedSlot* ins = MLoadFixedSlot::New(alloc(), obj, size_t(ArrayBufferObject::BYTE_LENGTH_SLOT));
+ current->add(ins);
+ ins->setResultType(MIRType::Int32);
+ return ins;
+}
+
+void
+IonBuilder::addTypedArrayLengthAndData(MDefinition* obj,
+ BoundsChecking checking,
+ MDefinition** index,
+ MInstruction** length, MInstruction** elements)
+{
+ MOZ_ASSERT((index != nullptr) == (elements != nullptr));
+
+ JSObject* tarr = nullptr;
+
+ if (MConstant* objConst = obj->maybeConstantValue()) {
+ if (objConst->type() == MIRType::Object)
+ tarr = &objConst->toObject();
+ } else if (TemporaryTypeSet* types = obj->resultTypeSet()) {
+ tarr = types->maybeSingleton();
+ }
+
+ if (tarr) {
+ SharedMem<void*> data = tarr->as<TypedArrayObject>().viewDataEither();
+ // Bug 979449 - Optimistically embed the elements and use TI to
+ // invalidate if we move them.
+ bool isTenured = !tarr->runtimeFromMainThread()->gc.nursery.isInside(data);
+ if (isTenured && tarr->isSingleton()) {
+ // The 'data' pointer of TypedArrayObject can change in rare circumstances
+ // (ArrayBufferObject::changeContents).
+ TypeSet::ObjectKey* tarrKey = TypeSet::ObjectKey::get(tarr);
+ if (!tarrKey->unknownProperties()) {
+ if (tarr->is<TypedArrayObject>())
+ tarrKey->watchStateChangeForTypedArrayData(constraints());
+
+ obj->setImplicitlyUsedUnchecked();
+
+ int32_t len = AssertedCast<int32_t>(tarr->as<TypedArrayObject>().length());
+ *length = MConstant::New(alloc(), Int32Value(len));
+ current->add(*length);
+
+ if (index) {
+ if (checking == DoBoundsCheck)
+ *index = addBoundsCheck(*index, *length);
+
+ *elements = MConstantElements::New(alloc(), data);
+ current->add(*elements);
+ }
+ return;
+ }
+ }
+ }
+
+ *length = MTypedArrayLength::New(alloc(), obj);
+ current->add(*length);
+
+ if (index) {
+ if (checking == DoBoundsCheck)
+ *index = addBoundsCheck(*index, *length);
+
+ *elements = MTypedArrayElements::New(alloc(), obj);
+ current->add(*elements);
+ }
+}
+
+MDefinition*
+IonBuilder::convertShiftToMaskForStaticTypedArray(MDefinition* id,
+ Scalar::Type viewType)
+{
+ trackOptimizationOutcome(TrackedOutcome::StaticTypedArrayCantComputeMask);
+
+ // No shifting is necessary if the typed array has single byte elements.
+ if (TypedArrayShift(viewType) == 0)
+ return id;
+
+ // If the index is an already shifted constant, undo the shift to get the
+ // absolute offset being accessed.
+ if (MConstant* idConst = id->maybeConstantValue()) {
+ if (idConst->type() == MIRType::Int32) {
+ int32_t index = idConst->toInt32();
+ MConstant* offset = MConstant::New(alloc(), Int32Value(index << TypedArrayShift(viewType)));
+ current->add(offset);
+ return offset;
+ }
+ }
+
+ if (!id->isRsh() || id->isEffectful())
+ return nullptr;
+
+ MConstant* shiftAmount = id->toRsh()->rhs()->maybeConstantValue();
+ if (!shiftAmount || shiftAmount->type() != MIRType::Int32)
+ return nullptr;
+ if (uint32_t(shiftAmount->toInt32()) != TypedArrayShift(viewType))
+ return nullptr;
+
+ // Instead of shifting, mask off the low bits of the index so that
+ // a non-scaled access on the typed array can be performed.
+ MConstant* mask = MConstant::New(alloc(), Int32Value(~((1 << shiftAmount->toInt32()) - 1)));
+ MBitAnd* ptr = MBitAnd::New(alloc(), id->getOperand(0), mask);
+
+ ptr->infer(nullptr, nullptr);
+ MOZ_ASSERT(!ptr->isEffectful());
+
+ current->add(mask);
+ current->add(ptr);
+
+ return ptr;
+}
+
+bool
+IonBuilder::jsop_getelem_typed(MDefinition* obj, MDefinition* index,
+ Scalar::Type arrayType)
+{
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+
+ bool maybeUndefined = types->hasType(TypeSet::UndefinedType());
+
+ // Reading from an Uint32Array will result in a double for values
+ // that don't fit in an int32. We have to bailout if this happens
+ // and the instruction is not known to return a double.
+ bool allowDouble = types->hasType(TypeSet::DoubleType());
+
+ // Ensure id is an integer.
+ MInstruction* idInt32 = MToInt32::New(alloc(), index);
+ current->add(idInt32);
+ index = idInt32;
+
+ if (!maybeUndefined) {
+ // Assume the index is in range, so that we can hoist the length,
+ // elements vector and bounds check.
+
+ // If we are reading in-bounds elements, we can use knowledge about
+ // the array type to determine the result type, even if the opcode has
+ // never executed. The known pushed type is only used to distinguish
+ // uint32 reads that may produce either doubles or integers.
+ MIRType knownType = MIRTypeForTypedArrayRead(arrayType, allowDouble);
+
+ // Get length, bounds-check, then get elements, and add all instructions.
+ MInstruction* length;
+ MInstruction* elements;
+ addTypedArrayLengthAndData(obj, DoBoundsCheck, &index, &length, &elements);
+
+ // Load the element.
+ MLoadUnboxedScalar* load = MLoadUnboxedScalar::New(alloc(), elements, index, arrayType);
+ current->add(load);
+ current->push(load);
+
+ // Note: we can ignore the type barrier here, we know the type must
+ // be valid and unbarriered.
+ load->setResultType(knownType);
+ return true;
+ } else {
+ // We need a type barrier if the array's element type has never been
+ // observed (we've only read out-of-bounds values). Note that for
+ // Uint32Array, we only check for int32: if allowDouble is false we
+ // will bailout when we read a double.
+ BarrierKind barrier = BarrierKind::TypeSet;
+ switch (arrayType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ if (types->hasType(TypeSet::Int32Type()))
+ barrier = BarrierKind::NoBarrier;
+ break;
+ case Scalar::Float32:
+ case Scalar::Float64:
+ if (allowDouble)
+ barrier = BarrierKind::NoBarrier;
+ break;
+ default:
+ MOZ_CRASH("Unknown typed array type");
+ }
+
+ // Assume we will read out-of-bound values. In this case the
+ // bounds check will be part of the instruction, and the instruction
+ // will always return a Value.
+ MLoadTypedArrayElementHole* load =
+ MLoadTypedArrayElementHole::New(alloc(), obj, index, arrayType, allowDouble);
+ current->add(load);
+ current->push(load);
+
+ return pushTypeBarrier(load, types, barrier);
+ }
+}
+
+bool
+IonBuilder::jsop_setelem()
+{
+ bool emitted = false;
+ startTrackingOptimizations();
+
+ MDefinition* value = current->pop();
+ MDefinition* index = current->pop();
+ MDefinition* object = convertUnboxedObjects(current->pop());
+
+ trackTypeInfo(TrackedTypeSite::Receiver, object->type(), object->resultTypeSet());
+ trackTypeInfo(TrackedTypeSite::Index, index->type(), index->resultTypeSet());
+ trackTypeInfo(TrackedTypeSite::Value, value->type(), value->resultTypeSet());
+
+ if (shouldAbortOnPreliminaryGroups(object)) {
+ MInstruction* ins = MCallSetElement::New(alloc(), object, index, value, IsStrictSetPC(pc));
+ current->add(ins);
+ current->push(value);
+ return resumeAfter(ins);
+ }
+
+ if (!forceInlineCaches()) {
+ trackOptimizationAttempt(TrackedStrategy::SetElem_TypedObject);
+ if (!setElemTryTypedObject(&emitted, object, index, value) || emitted)
+ return emitted;
+
+ trackOptimizationAttempt(TrackedStrategy::SetElem_TypedStatic);
+ if (!setElemTryTypedStatic(&emitted, object, index, value) || emitted)
+ return emitted;
+
+ trackOptimizationAttempt(TrackedStrategy::SetElem_TypedArray);
+ if (!setElemTryTypedArray(&emitted, object, index, value) || emitted)
+ return emitted;
+
+ trackOptimizationAttempt(TrackedStrategy::SetElem_Dense);
+ SetElemICInspector icInspect(inspector->setElemICInspector(pc));
+ bool writeHole = icInspect.sawOOBDenseWrite();
+ if (!setElemTryDense(&emitted, object, index, value, writeHole) || emitted)
+ return emitted;
+
+ trackOptimizationAttempt(TrackedStrategy::SetElem_Arguments);
+ if (!setElemTryArguments(&emitted, object, index, value) || emitted)
+ return emitted;
+ }
+
+ if (script()->argumentsHasVarBinding() &&
+ object->mightBeType(MIRType::MagicOptimizedArguments) &&
+ info().analysisMode() != Analysis_ArgumentsUsage)
+ {
+ return abort("Type is not definitely lazy arguments.");
+ }
+
+ trackOptimizationAttempt(TrackedStrategy::SetElem_InlineCache);
+ if (!setElemTryCache(&emitted, object, index, value) || emitted)
+ return emitted;
+
+ // Emit call.
+ MInstruction* ins = MCallSetElement::New(alloc(), object, index, value, IsStrictSetPC(pc));
+ current->add(ins);
+ current->push(value);
+
+ return resumeAfter(ins);
+}
+
+bool
+IonBuilder::setElemTryTypedObject(bool* emitted, MDefinition* obj,
+ MDefinition* index, MDefinition* value)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ // The next several failures are all due to types not predicting that we
+ // are definitely doing a getelem access on a typed object.
+ trackOptimizationOutcome(TrackedOutcome::AccessNotTypedObject);
+
+ TypedObjectPrediction objPrediction = typedObjectPrediction(obj);
+ if (objPrediction.isUseless())
+ return true;
+
+ if (!objPrediction.ofArrayKind())
+ return true;
+
+ TypedObjectPrediction elemPrediction = objPrediction.arrayElementType();
+ if (elemPrediction.isUseless())
+ return true;
+
+ uint32_t elemSize;
+ if (!elemPrediction.hasKnownSize(&elemSize))
+ return true;
+
+ switch (elemPrediction.kind()) {
+ case type::Simd:
+ // FIXME (bug 894105): store a MIRType::float32x4 etc
+ trackOptimizationOutcome(TrackedOutcome::GenericFailure);
+ return true;
+
+ case type::Reference:
+ return setElemTryReferenceElemOfTypedObject(emitted, obj, index,
+ objPrediction, value, elemPrediction);
+
+ case type::Scalar:
+ return setElemTryScalarElemOfTypedObject(emitted,
+ obj,
+ index,
+ objPrediction,
+ value,
+ elemPrediction,
+ elemSize);
+
+ case type::Struct:
+ case type::Array:
+ // Not yet optimized.
+ trackOptimizationOutcome(TrackedOutcome::GenericFailure);
+ return true;
+ }
+
+ MOZ_CRASH("Bad kind");
+}
+
+bool
+IonBuilder::setElemTryReferenceElemOfTypedObject(bool* emitted,
+ MDefinition* obj,
+ MDefinition* index,
+ TypedObjectPrediction objPrediction,
+ MDefinition* value,
+ TypedObjectPrediction elemPrediction)
+{
+ ReferenceTypeDescr::Type elemType = elemPrediction.referenceType();
+ uint32_t elemSize = ReferenceTypeDescr::size(elemType);
+
+ LinearSum indexAsByteOffset(alloc());
+ if (!checkTypedObjectIndexInBounds(elemSize, obj, index, objPrediction, &indexAsByteOffset))
+ return true;
+
+ if (!storeReferenceTypedObjectValue(obj, indexAsByteOffset, elemType, value, nullptr))
+ return true;
+
+ current->push(value);
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::setElemTryScalarElemOfTypedObject(bool* emitted,
+ MDefinition* obj,
+ MDefinition* index,
+ TypedObjectPrediction objPrediction,
+ MDefinition* value,
+ TypedObjectPrediction elemPrediction,
+ uint32_t elemSize)
+{
+ // Must always be loading the same scalar type
+ ScalarTypeDescr::Type elemType = elemPrediction.scalarType();
+ MOZ_ASSERT(elemSize == ScalarTypeDescr::alignment(elemType));
+
+ LinearSum indexAsByteOffset(alloc());
+ if (!checkTypedObjectIndexInBounds(elemSize, obj, index, objPrediction, &indexAsByteOffset))
+ return true;
+
+ // Store the element
+ if (!storeScalarTypedObjectValue(obj, indexAsByteOffset, elemType, value))
+ return false;
+
+ current->push(value);
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::setElemTryTypedStatic(bool* emitted, MDefinition* object,
+ MDefinition* index, MDefinition* value)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ JSObject* tarrObj = getStaticTypedArrayObject(object, index);
+ if (!tarrObj)
+ return true;
+
+ SharedMem<void*> viewData = tarrObj->as<TypedArrayObject>().viewDataEither();
+ if (tarrObj->runtimeFromMainThread()->gc.nursery.isInside(viewData))
+ return true;
+
+ Scalar::Type viewType = tarrObj->as<TypedArrayObject>().type();
+ MDefinition* ptr = convertShiftToMaskForStaticTypedArray(index, viewType);
+ if (!ptr)
+ return true;
+
+ // Emit StoreTypedArrayElementStatic.
+
+ if (tarrObj->is<TypedArrayObject>()) {
+ TypeSet::ObjectKey* tarrKey = TypeSet::ObjectKey::get(tarrObj);
+ tarrKey->watchStateChangeForTypedArrayData(constraints());
+ }
+
+ object->setImplicitlyUsedUnchecked();
+ index->setImplicitlyUsedUnchecked();
+
+ // Clamp value to [0, 255] for Uint8ClampedArray.
+ MDefinition* toWrite = value;
+ if (viewType == Scalar::Uint8Clamped) {
+ toWrite = MClampToUint8::New(alloc(), value);
+ current->add(toWrite->toInstruction());
+ }
+
+ MInstruction* store = MStoreTypedArrayElementStatic::New(alloc(), tarrObj, ptr, toWrite);
+ current->add(store);
+ current->push(value);
+
+ if (!resumeAfter(store))
+ return false;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::setElemTryTypedArray(bool* emitted, MDefinition* object,
+ MDefinition* index, MDefinition* value)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ Scalar::Type arrayType;
+ if (!ElementAccessIsTypedArray(constraints(), object, index, &arrayType)) {
+ trackOptimizationOutcome(TrackedOutcome::AccessNotTypedArray);
+ return true;
+ }
+
+ // Emit typed setelem variant.
+ if (!jsop_setelem_typed(arrayType, object, index, value))
+ return false;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::setElemTryDense(bool* emitted, MDefinition* object,
+ MDefinition* index, MDefinition* value, bool writeHole)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ JSValueType unboxedType = UnboxedArrayElementType(constraints(), object, index);
+ if (unboxedType == JSVAL_TYPE_MAGIC) {
+ if (!ElementAccessIsDenseNative(constraints(), object, index)) {
+ trackOptimizationOutcome(TrackedOutcome::AccessNotDense);
+ return true;
+ }
+ }
+
+ if (PropertyWriteNeedsTypeBarrier(alloc(), constraints(), current,
+ &object, nullptr, &value, /* canModify = */ true))
+ {
+ trackOptimizationOutcome(TrackedOutcome::NeedsTypeBarrier);
+ return true;
+ }
+
+ if (!object->resultTypeSet()) {
+ trackOptimizationOutcome(TrackedOutcome::NoTypeInfo);
+ return true;
+ }
+
+ TemporaryTypeSet::DoubleConversion conversion =
+ object->resultTypeSet()->convertDoubleElements(constraints());
+
+ // If AmbiguousDoubleConversion, only handle int32 values for now.
+ if (conversion == TemporaryTypeSet::AmbiguousDoubleConversion &&
+ value->type() != MIRType::Int32)
+ {
+ trackOptimizationOutcome(TrackedOutcome::ArrayDoubleConversion);
+ return true;
+ }
+
+ // Don't generate a fast path if there have been bounds check failures
+ // and this access might be on a sparse property.
+ if (ElementAccessHasExtraIndexedProperty(this, object) && failedBoundsCheck_) {
+ trackOptimizationOutcome(TrackedOutcome::ProtoIndexedProps);
+ return true;
+ }
+
+ // Emit dense setelem variant.
+ if (!jsop_setelem_dense(conversion, object, index, value, unboxedType, writeHole, emitted))
+ return false;
+
+ if (!*emitted) {
+ trackOptimizationOutcome(TrackedOutcome::NonWritableProperty);
+ return true;
+ }
+
+ trackOptimizationSuccess();
+ return true;
+}
+
+bool
+IonBuilder::setElemTryArguments(bool* emitted, MDefinition* object,
+ MDefinition* index, MDefinition* value)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ if (object->type() != MIRType::MagicOptimizedArguments)
+ return true;
+
+ // Arguments are not supported yet.
+ return abort("NYI arguments[]=");
+}
+
+bool
+IonBuilder::setElemTryCache(bool* emitted, MDefinition* object,
+ MDefinition* index, MDefinition* value)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ if (!object->mightBeType(MIRType::Object)) {
+ trackOptimizationOutcome(TrackedOutcome::NotObject);
+ return true;
+ }
+
+ if (!index->mightBeType(MIRType::Int32) &&
+ !index->mightBeType(MIRType::String) &&
+ !index->mightBeType(MIRType::Symbol))
+ {
+ trackOptimizationOutcome(TrackedOutcome::IndexType);
+ return true;
+ }
+
+ bool barrier = true;
+ bool indexIsInt32 = index->type() == MIRType::Int32;
+
+ if (indexIsInt32 &&
+ !PropertyWriteNeedsTypeBarrier(alloc(), constraints(), current,
+ &object, nullptr, &value, /* canModify = */ true))
+ {
+ barrier = false;
+ }
+
+ // We can avoid worrying about holes in the IC if we know a priori we are safe
+ // from them. If TI can guard that there are no indexed properties on the prototype
+ // chain, we know that we anen't missing any setters by overwriting the hole with
+ // another value.
+ bool guardHoles = ElementAccessHasExtraIndexedProperty(this, object);
+
+ // Make sure the object being written to doesn't have copy on write elements.
+ const Class* clasp = object->resultTypeSet() ? object->resultTypeSet()->getKnownClass(constraints()) : nullptr;
+ bool checkNative = !clasp || !clasp->isNative();
+ object = addMaybeCopyElementsForWrite(object, checkNative);
+
+ if (NeedsPostBarrier(value)) {
+ if (indexIsInt32)
+ current->add(MPostWriteElementBarrier::New(alloc(), object, value, index));
+ else
+ current->add(MPostWriteBarrier::New(alloc(), object, value));
+ }
+
+ // Emit SetPropertyCache.
+ bool strict = JSOp(*pc) == JSOP_STRICTSETELEM;
+ MSetPropertyCache* ins =
+ MSetPropertyCache::New(alloc(), object, index, value, strict, barrier, guardHoles);
+ current->add(ins);
+ current->push(value);
+
+ if (!resumeAfter(ins))
+ return false;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::jsop_setelem_dense(TemporaryTypeSet::DoubleConversion conversion,
+ MDefinition* obj, MDefinition* id, MDefinition* value,
+ JSValueType unboxedType, bool writeHole, bool* emitted)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ MIRType elementType = MIRType::None;
+ if (unboxedType == JSVAL_TYPE_MAGIC)
+ elementType = DenseNativeElementType(constraints(), obj);
+ bool packed = ElementAccessIsPacked(constraints(), obj);
+
+ // Writes which are on holes in the object do not have to bail out if they
+ // cannot hit another indexed property on the object or its prototypes.
+ bool hasNoExtraIndexedProperty = !ElementAccessHasExtraIndexedProperty(this, obj);
+
+ bool mayBeFrozen = ElementAccessMightBeFrozen(constraints(), obj);
+
+ if (mayBeFrozen && !hasNoExtraIndexedProperty) {
+ // FallibleStoreElement does not know how to deal with extra indexed
+ // properties on the prototype. This case should be rare so we fall back
+ // to an IC.
+ return true;
+ }
+
+ *emitted = true;
+
+ // Ensure id is an integer.
+ MInstruction* idInt32 = MToInt32::New(alloc(), id);
+ current->add(idInt32);
+ id = idInt32;
+
+ if (NeedsPostBarrier(value))
+ current->add(MPostWriteElementBarrier::New(alloc(), obj, value, id));
+
+ // Copy the elements vector if necessary.
+ obj = addMaybeCopyElementsForWrite(obj, /* checkNative = */ false);
+
+ // Get the elements vector.
+ MElements* elements = MElements::New(alloc(), obj, unboxedType != JSVAL_TYPE_MAGIC);
+ current->add(elements);
+
+ // Ensure the value is a double, if double conversion might be needed.
+ MDefinition* newValue = value;
+ switch (conversion) {
+ case TemporaryTypeSet::AlwaysConvertToDoubles:
+ case TemporaryTypeSet::MaybeConvertToDoubles: {
+ MInstruction* valueDouble = MToDouble::New(alloc(), value);
+ current->add(valueDouble);
+ newValue = valueDouble;
+ break;
+ }
+
+ case TemporaryTypeSet::AmbiguousDoubleConversion: {
+ MOZ_ASSERT(value->type() == MIRType::Int32);
+ MInstruction* maybeDouble = MMaybeToDoubleElement::New(alloc(), elements, value);
+ current->add(maybeDouble);
+ newValue = maybeDouble;
+ break;
+ }
+
+ case TemporaryTypeSet::DontConvertToDoubles:
+ break;
+
+ default:
+ MOZ_CRASH("Unknown double conversion");
+ }
+
+ // Use MStoreElementHole if this SETELEM has written to out-of-bounds
+ // indexes in the past. Otherwise, use MStoreElement so that we can hoist
+ // the initialized length and bounds check.
+ // If an object may have been frozen, no previous expectation hold and we
+ // fallback to MFallibleStoreElement.
+ MInstruction* store;
+ MStoreElementCommon* common = nullptr;
+ if (writeHole && hasNoExtraIndexedProperty && !mayBeFrozen) {
+ MStoreElementHole* ins = MStoreElementHole::New(alloc(), obj, elements, id, newValue, unboxedType);
+ store = ins;
+ common = ins;
+
+ current->add(ins);
+ current->push(value);
+ } else if (mayBeFrozen) {
+ MOZ_ASSERT(hasNoExtraIndexedProperty,
+ "FallibleStoreElement codegen assumes no extra indexed properties");
+
+ bool strict = IsStrictSetPC(pc);
+ MFallibleStoreElement* ins = MFallibleStoreElement::New(alloc(), obj, elements, id,
+ newValue, unboxedType, strict);
+ store = ins;
+ common = ins;
+
+ current->add(ins);
+ current->push(value);
+ } else {
+ MInstruction* initLength = initializedLength(obj, elements, unboxedType);
+
+ id = addBoundsCheck(id, initLength);
+ bool needsHoleCheck = !packed && !hasNoExtraIndexedProperty;
+
+ if (unboxedType != JSVAL_TYPE_MAGIC) {
+ store = storeUnboxedValue(obj, elements, 0, id, unboxedType, newValue);
+ } else {
+ MStoreElement* ins = MStoreElement::New(alloc(), elements, id, newValue, needsHoleCheck);
+ store = ins;
+ common = ins;
+
+ current->add(store);
+ }
+
+ current->push(value);
+ }
+
+ if (!resumeAfter(store))
+ return false;
+
+ if (common) {
+ // Determine whether a write barrier is required.
+ if (obj->resultTypeSet()->propertyNeedsBarrier(constraints(), JSID_VOID))
+ common->setNeedsBarrier();
+
+ if (elementType != MIRType::None && packed)
+ common->setElementType(elementType);
+ }
+
+ return true;
+}
+
+
+bool
+IonBuilder::jsop_setelem_typed(Scalar::Type arrayType,
+ MDefinition* obj, MDefinition* id, MDefinition* value)
+{
+ SetElemICInspector icInspect(inspector->setElemICInspector(pc));
+ bool expectOOB = icInspect.sawOOBTypedArrayWrite();
+
+ if (expectOOB)
+ spew("Emitting OOB TypedArray SetElem");
+
+ // Ensure id is an integer.
+ MInstruction* idInt32 = MToInt32::New(alloc(), id);
+ current->add(idInt32);
+ id = idInt32;
+
+ // Get length, bounds-check, then get elements, and add all instructions.
+ MInstruction* length;
+ MInstruction* elements;
+ BoundsChecking checking = expectOOB ? SkipBoundsCheck : DoBoundsCheck;
+ addTypedArrayLengthAndData(obj, checking, &id, &length, &elements);
+
+ // Clamp value to [0, 255] for Uint8ClampedArray.
+ MDefinition* toWrite = value;
+ if (arrayType == Scalar::Uint8Clamped) {
+ toWrite = MClampToUint8::New(alloc(), value);
+ current->add(toWrite->toInstruction());
+ }
+
+ // Store the value.
+ MInstruction* ins;
+ if (expectOOB) {
+ ins = MStoreTypedArrayElementHole::New(alloc(), elements, length, id, toWrite, arrayType);
+ } else {
+ MStoreUnboxedScalar* store =
+ MStoreUnboxedScalar::New(alloc(), elements, id, toWrite, arrayType,
+ MStoreUnboxedScalar::TruncateInput);
+ ins = store;
+ }
+
+ current->add(ins);
+ current->push(value);
+
+ return resumeAfter(ins);
+}
+
+bool
+IonBuilder::jsop_length()
+{
+ if (jsop_length_fastPath())
+ return true;
+
+ PropertyName* name = info().getAtom(pc)->asPropertyName();
+ return jsop_getprop(name);
+}
+
+bool
+IonBuilder::jsop_length_fastPath()
+{
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+
+ if (types->getKnownMIRType() != MIRType::Int32)
+ return false;
+
+ MDefinition* obj = current->peek(-1);
+
+ if (shouldAbortOnPreliminaryGroups(obj))
+ return false;
+
+ if (obj->mightBeType(MIRType::String)) {
+ if (obj->mightBeType(MIRType::Object))
+ return false;
+ current->pop();
+ MStringLength* ins = MStringLength::New(alloc(), obj);
+ current->add(ins);
+ current->push(ins);
+ return true;
+ }
+
+ if (obj->mightBeType(MIRType::Object)) {
+ TemporaryTypeSet* objTypes = obj->resultTypeSet();
+
+ // Compute the length for array objects.
+ if (objTypes &&
+ objTypes->getKnownClass(constraints()) == &ArrayObject::class_ &&
+ !objTypes->hasObjectFlags(constraints(), OBJECT_FLAG_LENGTH_OVERFLOW))
+ {
+ current->pop();
+ MElements* elements = MElements::New(alloc(), obj);
+ current->add(elements);
+
+ // Read length.
+ MArrayLength* length = MArrayLength::New(alloc(), elements);
+ current->add(length);
+ current->push(length);
+ return true;
+ }
+
+ // Compute the length for unboxed array objects.
+ if (UnboxedArrayElementType(constraints(), obj, nullptr) != JSVAL_TYPE_MAGIC &&
+ !objTypes->hasObjectFlags(constraints(), OBJECT_FLAG_LENGTH_OVERFLOW))
+ {
+ current->pop();
+
+ MUnboxedArrayLength* length = MUnboxedArrayLength::New(alloc(), obj);
+ current->add(length);
+ current->push(length);
+ return true;
+ }
+
+ // Compute the length for array typed objects.
+ TypedObjectPrediction prediction = typedObjectPrediction(obj);
+ if (!prediction.isUseless()) {
+ TypeSet::ObjectKey* globalKey = TypeSet::ObjectKey::get(&script()->global());
+ if (globalKey->hasFlags(constraints(), OBJECT_FLAG_TYPED_OBJECT_HAS_DETACHED_BUFFER))
+ return false;
+
+ MInstruction* length;
+ int32_t sizedLength;
+ if (prediction.hasKnownArrayLength(&sizedLength)) {
+ obj->setImplicitlyUsedUnchecked();
+ length = MConstant::New(alloc(), Int32Value(sizedLength));
+ } else {
+ return false;
+ }
+
+ current->pop();
+ current->add(length);
+ current->push(length);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool
+IonBuilder::jsop_arguments()
+{
+ if (info().needsArgsObj()) {
+ current->push(current->argumentsObject());
+ return true;
+ }
+ MOZ_ASSERT(lazyArguments_);
+ current->push(lazyArguments_);
+ return true;
+}
+
+bool
+IonBuilder::jsop_newtarget()
+{
+ if (!info().funMaybeLazy()) {
+ MOZ_ASSERT(!info().script()->isForEval());
+ pushConstant(NullValue());
+ return true;
+ }
+
+ MOZ_ASSERT(info().funMaybeLazy());
+
+ if (info().funMaybeLazy()->isArrow()) {
+ MArrowNewTarget* arrowNewTarget = MArrowNewTarget::New(alloc(), getCallee());
+ current->add(arrowNewTarget);
+ current->push(arrowNewTarget);
+ return true;
+ }
+
+ if (inliningDepth_ == 0) {
+ MNewTarget* newTarget = MNewTarget::New(alloc());
+ current->add(newTarget);
+ current->push(newTarget);
+ return true;
+ }
+
+ if (!inlineCallInfo_->constructing()) {
+ pushConstant(UndefinedValue());
+ return true;
+ }
+
+ current->push(inlineCallInfo_->getNewTarget());
+ return true;
+}
+
+bool
+IonBuilder::jsop_rest()
+{
+ if (info().analysisMode() == Analysis_ArgumentsUsage) {
+ // There's no BaselineScript with the template object. Just push a
+ // dummy value, it does not affect the arguments analysis.
+ MUnknownValue* unknown = MUnknownValue::New(alloc());
+ current->add(unknown);
+ current->push(unknown);
+ return true;
+ }
+
+ ArrayObject* templateObject = &inspector->getTemplateObject(pc)->as<ArrayObject>();
+
+ if (inliningDepth_ == 0) {
+ // We don't know anything about the callee.
+ MArgumentsLength* numActuals = MArgumentsLength::New(alloc());
+ current->add(numActuals);
+
+ // Pass in the number of actual arguments, the number of formals (not
+ // including the rest parameter slot itself), and the template object.
+ MRest* rest = MRest::New(alloc(), constraints(), numActuals, info().nargs() - 1,
+ templateObject);
+ current->add(rest);
+ current->push(rest);
+ return true;
+ }
+
+ // We know the exact number of arguments the callee pushed.
+ unsigned numActuals = inlineCallInfo_->argc();
+ unsigned numFormals = info().nargs() - 1;
+ unsigned numRest = numActuals > numFormals ? numActuals - numFormals : 0;
+
+ if (!jsop_newarray(numRest))
+ return false;
+
+ if (numRest == 0) {
+ // No more updating to do. (Note that in this one case the length from
+ // the template object is already correct.)
+ return true;
+ }
+
+ MDefinition *array = current->peek(-1);
+ MElements* elements = MElements::New(alloc(), array);
+ current->add(elements);
+
+ // Unroll the argument copy loop. We don't need to do any bounds or hole
+ // checking here.
+ MConstant* index = nullptr;
+ for (unsigned i = numFormals; i < numActuals; i++) {
+ index = MConstant::New(alloc(), Int32Value(i - numFormals));
+ current->add(index);
+
+ MDefinition* arg = inlineCallInfo_->argv()[i];
+ MStoreElement* store = MStoreElement::New(alloc(), elements, index, arg,
+ /* needsHoleCheck = */ false);
+ current->add(store);
+
+ if (NeedsPostBarrier(arg))
+ current->add(MPostWriteBarrier::New(alloc(), array, arg));
+ }
+
+ // The array's length is incorrectly 0 now, from the template object
+ // created by BaselineCompiler::emit_JSOP_REST() before the actual argument
+ // count was known. Set the correct length now that we know that count.
+ MSetArrayLength* length = MSetArrayLength::New(alloc(), elements, index);
+ current->add(length);
+
+ // Update the initialized length for all the (necessarily non-hole)
+ // elements added.
+ MSetInitializedLength* initLength = MSetInitializedLength::New(alloc(), elements, index);
+ current->add(initLength);
+
+ return true;
+}
+
+bool
+IonBuilder::jsop_checkisobj(uint8_t kind)
+{
+ MDefinition* toCheck = current->peek(-1);
+
+ if (toCheck->type() == MIRType::Object) {
+ toCheck->setImplicitlyUsedUnchecked();
+ return true;
+ }
+
+ MCheckIsObj* check = MCheckIsObj::New(alloc(), current->pop(), kind);
+ current->add(check);
+ current->push(check);
+ return true;
+}
+
+bool
+IonBuilder::jsop_checkobjcoercible()
+{
+ MDefinition* toCheck = current->peek(-1);
+
+ if (!toCheck->mightBeType(MIRType::Undefined) &&
+ !toCheck->mightBeType(MIRType::Null))
+ {
+ toCheck->setImplicitlyUsedUnchecked();
+ return true;
+ }
+
+ MOZ_ASSERT(toCheck->type() == MIRType::Value ||
+ toCheck->type() == MIRType::Null ||
+ toCheck->type() == MIRType::Undefined);
+
+ // If we want to squeeze more perf here, we can throw without checking,
+ // if IsNullOrUndefined(toCheck->type()). Since this is a failure case,
+ // it should be OK.
+ MCheckObjCoercible* check = MCheckObjCoercible::New(alloc(), current->pop());
+ current->add(check);
+ current->push(check);
+ return resumeAfter(check);
+}
+
+uint32_t
+IonBuilder::getDefiniteSlot(TemporaryTypeSet* types, PropertyName* name, uint32_t* pnfixed)
+{
+ if (!types || types->unknownObject() || !types->objectOrSentinel()) {
+ trackOptimizationOutcome(TrackedOutcome::NoTypeInfo);
+ return UINT32_MAX;
+ }
+
+ uint32_t slot = UINT32_MAX;
+
+ for (size_t i = 0; i < types->getObjectCount(); i++) {
+ TypeSet::ObjectKey* key = types->getObject(i);
+ if (!key)
+ continue;
+
+ if (key->unknownProperties()) {
+ trackOptimizationOutcome(TrackedOutcome::UnknownProperties);
+ return UINT32_MAX;
+ }
+
+ if (key->isSingleton()) {
+ trackOptimizationOutcome(TrackedOutcome::Singleton);
+ return UINT32_MAX;
+ }
+
+ HeapTypeSetKey property = key->property(NameToId(name));
+ if (!property.maybeTypes() ||
+ !property.maybeTypes()->definiteProperty() ||
+ property.nonData(constraints()))
+ {
+ trackOptimizationOutcome(TrackedOutcome::NotFixedSlot);
+ return UINT32_MAX;
+ }
+
+ // Definite slots will always be fixed slots when they are in the
+ // allowable range for fixed slots, except for objects which were
+ // converted from unboxed objects and have a smaller allocation size.
+ size_t nfixed = NativeObject::MAX_FIXED_SLOTS;
+ if (ObjectGroup* group = key->group()->maybeOriginalUnboxedGroup())
+ nfixed = gc::GetGCKindSlots(group->unboxedLayout().getAllocKind());
+
+ uint32_t propertySlot = property.maybeTypes()->definiteSlot();
+ if (slot == UINT32_MAX) {
+ slot = propertySlot;
+ *pnfixed = nfixed;
+ } else if (slot != propertySlot || nfixed != *pnfixed) {
+ trackOptimizationOutcome(TrackedOutcome::InconsistentFixedSlot);
+ return UINT32_MAX;
+ }
+ }
+
+ return slot;
+}
+
+uint32_t
+IonBuilder::getUnboxedOffset(TemporaryTypeSet* types, PropertyName* name, JSValueType* punboxedType)
+{
+ if (!types || types->unknownObject() || !types->objectOrSentinel()) {
+ trackOptimizationOutcome(TrackedOutcome::NoTypeInfo);
+ return UINT32_MAX;
+ }
+
+ uint32_t offset = UINT32_MAX;
+
+ for (size_t i = 0; i < types->getObjectCount(); i++) {
+ TypeSet::ObjectKey* key = types->getObject(i);
+ if (!key)
+ continue;
+
+ if (key->unknownProperties()) {
+ trackOptimizationOutcome(TrackedOutcome::UnknownProperties);
+ return UINT32_MAX;
+ }
+
+ if (key->isSingleton()) {
+ trackOptimizationOutcome(TrackedOutcome::Singleton);
+ return UINT32_MAX;
+ }
+
+ UnboxedLayout* layout = key->group()->maybeUnboxedLayout();
+ if (!layout) {
+ trackOptimizationOutcome(TrackedOutcome::NotUnboxed);
+ return UINT32_MAX;
+ }
+
+ const UnboxedLayout::Property* property = layout->lookup(name);
+ if (!property) {
+ trackOptimizationOutcome(TrackedOutcome::StructNoField);
+ return UINT32_MAX;
+ }
+
+ if (layout->nativeGroup()) {
+ trackOptimizationOutcome(TrackedOutcome::UnboxedConvertedToNative);
+ return UINT32_MAX;
+ }
+
+ key->watchStateChangeForUnboxedConvertedToNative(constraints());
+
+ if (offset == UINT32_MAX) {
+ offset = property->offset;
+ *punboxedType = property->type;
+ } else if (offset != property->offset) {
+ trackOptimizationOutcome(TrackedOutcome::InconsistentFieldOffset);
+ return UINT32_MAX;
+ } else if (*punboxedType != property->type) {
+ trackOptimizationOutcome(TrackedOutcome::InconsistentFieldType);
+ return UINT32_MAX;
+ }
+ }
+
+ return offset;
+}
+
+bool
+IonBuilder::jsop_runonce()
+{
+ MRunOncePrologue* ins = MRunOncePrologue::New(alloc());
+ current->add(ins);
+ return resumeAfter(ins);
+}
+
+bool
+IonBuilder::jsop_not()
+{
+ MDefinition* value = current->pop();
+
+ MNot* ins = MNot::New(alloc(), value, constraints());
+ current->add(ins);
+ current->push(ins);
+ return true;
+}
+
+bool
+IonBuilder::objectsHaveCommonPrototype(TemporaryTypeSet* types, PropertyName* name,
+ bool isGetter, JSObject* foundProto, bool* guardGlobal)
+{
+ // With foundProto a prototype with a getter or setter for name, return
+ // whether looking up name on any object in |types| will go through
+ // foundProto, i.e. all the objects have foundProto on their prototype
+ // chain and do not have a property for name before reaching foundProto.
+
+ // No sense looking if we don't know what's going on.
+ if (!types || types->unknownObject())
+ return false;
+ *guardGlobal = false;
+
+ for (unsigned i = 0; i < types->getObjectCount(); i++) {
+ if (types->getSingleton(i) == foundProto)
+ continue;
+
+ TypeSet::ObjectKey* key = types->getObject(i);
+ if (!key)
+ continue;
+
+ while (key) {
+ if (key->unknownProperties())
+ return false;
+
+ const Class* clasp = key->clasp();
+ if (!ClassHasEffectlessLookup(clasp))
+ return false;
+ JSObject* singleton = key->isSingleton() ? key->singleton() : nullptr;
+ if (ObjectHasExtraOwnProperty(compartment, key, NameToId(name))) {
+ if (!singleton || !singleton->is<GlobalObject>())
+ return false;
+ *guardGlobal = true;
+ }
+
+ // Look for a getter/setter on the class itself which may need
+ // to be called.
+ if (isGetter && clasp->getOpsGetProperty())
+ return false;
+ if (!isGetter && clasp->getOpsSetProperty())
+ return false;
+
+ // Test for isOwnProperty() without freezing. If we end up
+ // optimizing, freezePropertiesForCommonPropFunc will freeze the
+ // property type sets later on.
+ HeapTypeSetKey property = key->property(NameToId(name));
+ if (TypeSet* types = property.maybeTypes()) {
+ if (!types->empty() || types->nonDataProperty())
+ return false;
+ }
+ if (singleton) {
+ if (CanHaveEmptyPropertyTypesForOwnProperty(singleton)) {
+ MOZ_ASSERT(singleton->is<GlobalObject>());
+ *guardGlobal = true;
+ }
+ }
+
+ JSObject* proto = checkNurseryObject(key->proto().toObjectOrNull());
+
+ if (proto == foundProto)
+ break;
+ if (!proto) {
+ // The foundProto being searched for did not show up on the
+ // object's prototype chain.
+ return false;
+ }
+ key = TypeSet::ObjectKey::get(proto);
+ }
+ }
+
+ return true;
+}
+
+void
+IonBuilder::freezePropertiesForCommonPrototype(TemporaryTypeSet* types, PropertyName* name,
+ JSObject* foundProto,
+ bool allowEmptyTypesforGlobal/* = false*/)
+{
+ for (unsigned i = 0; i < types->getObjectCount(); i++) {
+ // If we found a Singleton object's own-property, there's nothing to
+ // freeze.
+ if (types->getSingleton(i) == foundProto)
+ continue;
+
+ TypeSet::ObjectKey* key = types->getObject(i);
+ if (!key)
+ continue;
+
+ while (true) {
+ HeapTypeSetKey property = key->property(NameToId(name));
+ JS_ALWAYS_TRUE(!property.isOwnProperty(constraints(), allowEmptyTypesforGlobal));
+
+ // Don't mark the proto. It will be held down by the shape
+ // guard. This allows us to use properties found on prototypes
+ // with properties unknown to TI.
+ if (key->proto() == TaggedProto(foundProto))
+ break;
+ key = TypeSet::ObjectKey::get(key->proto().toObjectOrNull());
+ }
+ }
+}
+
+bool
+IonBuilder::testCommonGetterSetter(TemporaryTypeSet* types, PropertyName* name,
+ bool isGetter, JSObject* foundProto, Shape* lastProperty,
+ JSFunction* getterOrSetter,
+ MDefinition** guard,
+ Shape* globalShape/* = nullptr*/,
+ MDefinition** globalGuard/* = nullptr */)
+{
+ MOZ_ASSERT_IF(globalShape, globalGuard);
+ bool guardGlobal;
+
+ // Check if all objects being accessed will lookup the name through foundProto.
+ if (!objectsHaveCommonPrototype(types, name, isGetter, foundProto, &guardGlobal) ||
+ (guardGlobal && !globalShape))
+ {
+ trackOptimizationOutcome(TrackedOutcome::MultiProtoPaths);
+ return false;
+ }
+
+ // We can optimize the getter/setter, so freeze all involved properties to
+ // ensure there isn't a lower shadowing getter or setter installed in the
+ // future.
+ freezePropertiesForCommonPrototype(types, name, foundProto, guardGlobal);
+
+ // Add a shape guard on the prototype we found the property on. The rest of
+ // the prototype chain is guarded by TI freezes, except when name is a global
+ // name. In this case, we also have to guard on the globals shape to be able
+ // to optimize, because the way global property sets are handled means
+ // freezing doesn't work for what we want here. Note that a shape guard is
+ // good enough here, even in the proxy case, because we have ensured there
+ // are no lookup hooks for this property.
+ if (guardGlobal) {
+ JSObject* obj = &script()->global();
+ MDefinition* globalObj = constant(ObjectValue(*obj));
+ *globalGuard = addShapeGuard(globalObj, globalShape, Bailout_ShapeGuard);
+ }
+
+ if (foundProto->isNative()) {
+ NativeObject& nativeProto = foundProto->as<NativeObject>();
+ if (nativeProto.lastProperty() == lastProperty) {
+ // The proto shape is the same as it was at the point when we
+ // created the baseline IC, so looking up the prop on the object as
+ // it is now should be safe.
+ Shape* propShape = nativeProto.lookupPure(name);
+ MOZ_ASSERT_IF(isGetter, propShape->getterObject() == getterOrSetter);
+ MOZ_ASSERT_IF(!isGetter, propShape->setterObject() == getterOrSetter);
+ if (propShape && !propShape->configurable())
+ return true;
+ }
+ }
+
+ MInstruction* wrapper = constant(ObjectValue(*foundProto));
+ *guard = addShapeGuard(wrapper, lastProperty, Bailout_ShapeGuard);
+ return true;
+}
+
+void
+IonBuilder::replaceMaybeFallbackFunctionGetter(MGetPropertyCache* cache)
+{
+ // Discard the last prior resume point of the previous MGetPropertyCache.
+ WrapMGetPropertyCache rai(maybeFallbackFunctionGetter_);
+ maybeFallbackFunctionGetter_ = cache;
+}
+
+bool
+IonBuilder::annotateGetPropertyCache(MDefinition* obj, PropertyName* name,
+ MGetPropertyCache* getPropCache, TemporaryTypeSet* objTypes,
+ TemporaryTypeSet* pushedTypes)
+{
+ // Ensure every pushed value is a singleton.
+ if (pushedTypes->unknownObject() || pushedTypes->baseFlags() != 0)
+ return true;
+
+ for (unsigned i = 0; i < pushedTypes->getObjectCount(); i++) {
+ if (pushedTypes->getGroup(i) != nullptr)
+ return true;
+ }
+
+ // Object's typeset should be a proper object
+ if (!objTypes || objTypes->baseFlags() || objTypes->unknownObject())
+ return true;
+
+ unsigned int objCount = objTypes->getObjectCount();
+ if (objCount == 0)
+ return true;
+
+ InlinePropertyTable* inlinePropTable = getPropCache->initInlinePropertyTable(alloc(), pc);
+ if (!inlinePropTable)
+ return false;
+
+ // Ensure that the relevant property typeset for each group is
+ // is a single-object typeset containing a JSFunction
+ for (unsigned int i = 0; i < objCount; i++) {
+ ObjectGroup* group = objTypes->getGroup(i);
+ if (!group)
+ continue;
+ TypeSet::ObjectKey* key = TypeSet::ObjectKey::get(group);
+ if (key->unknownProperties() || !key->proto().isObject())
+ continue;
+ JSObject* proto = checkNurseryObject(key->proto().toObject());
+
+ const Class* clasp = key->clasp();
+ if (!ClassHasEffectlessLookup(clasp) || ObjectHasExtraOwnProperty(compartment, key, NameToId(name)))
+ continue;
+
+ HeapTypeSetKey ownTypes = key->property(NameToId(name));
+ if (ownTypes.isOwnProperty(constraints()))
+ continue;
+
+ JSObject* singleton = testSingletonProperty(proto, NameToId(name));
+ if (!singleton || !singleton->is<JSFunction>())
+ continue;
+
+ // Don't add cases corresponding to non-observed pushes
+ if (!pushedTypes->hasType(TypeSet::ObjectType(singleton)))
+ continue;
+
+ if (!inlinePropTable->addEntry(alloc(), group, &singleton->as<JSFunction>()))
+ return false;
+ }
+
+ if (inlinePropTable->numEntries() == 0) {
+ getPropCache->clearInlinePropertyTable();
+ return true;
+ }
+
+#ifdef JS_JITSPEW
+ if (inlinePropTable->numEntries() > 0)
+ JitSpew(JitSpew_Inlining, "Annotated GetPropertyCache with %d/%d inline cases",
+ (int) inlinePropTable->numEntries(), (int) objCount);
+#endif
+
+ // If we successfully annotated the GetPropertyCache and there are inline cases,
+ // then keep a resume point of the state right before this instruction for use
+ // later when we have to bail out to this point in the fallback case of a
+ // PolyInlineDispatch.
+ if (inlinePropTable->numEntries() > 0) {
+ // Push the object back onto the stack temporarily to capture the resume point.
+ current->push(obj);
+ MResumePoint* resumePoint = MResumePoint::New(alloc(), current, pc,
+ MResumePoint::ResumeAt);
+ if (!resumePoint)
+ return false;
+ inlinePropTable->setPriorResumePoint(resumePoint);
+ replaceMaybeFallbackFunctionGetter(getPropCache);
+ current->pop();
+ }
+ return true;
+}
+
+// Returns true if an idempotent cache has ever invalidated this script
+// or an outer script.
+bool
+IonBuilder::invalidatedIdempotentCache()
+{
+ IonBuilder* builder = this;
+ do {
+ if (builder->script()->invalidatedIdempotentCache())
+ return true;
+ builder = builder->callerBuilder_;
+ } while (builder);
+
+ return false;
+}
+
+bool
+IonBuilder::loadSlot(MDefinition* obj, size_t slot, size_t nfixed, MIRType rvalType,
+ BarrierKind barrier, TemporaryTypeSet* types)
+{
+ if (slot < nfixed) {
+ MLoadFixedSlot* load = MLoadFixedSlot::New(alloc(), obj, slot);
+ current->add(load);
+ current->push(load);
+
+ load->setResultType(rvalType);
+ return pushTypeBarrier(load, types, barrier);
+ }
+
+ MSlots* slots = MSlots::New(alloc(), obj);
+ current->add(slots);
+
+ MLoadSlot* load = MLoadSlot::New(alloc(), slots, slot - nfixed);
+ current->add(load);
+ current->push(load);
+
+ load->setResultType(rvalType);
+ return pushTypeBarrier(load, types, barrier);
+}
+
+bool
+IonBuilder::loadSlot(MDefinition* obj, Shape* shape, MIRType rvalType,
+ BarrierKind barrier, TemporaryTypeSet* types)
+{
+ return loadSlot(obj, shape->slot(), shape->numFixedSlots(), rvalType, barrier, types);
+}
+
+bool
+IonBuilder::storeSlot(MDefinition* obj, size_t slot, size_t nfixed,
+ MDefinition* value, bool needsBarrier,
+ MIRType slotType /* = MIRType::None */)
+{
+ if (slot < nfixed) {
+ MStoreFixedSlot* store = MStoreFixedSlot::New(alloc(), obj, slot, value);
+ current->add(store);
+ current->push(value);
+ if (needsBarrier)
+ store->setNeedsBarrier();
+ return resumeAfter(store);
+ }
+
+ MSlots* slots = MSlots::New(alloc(), obj);
+ current->add(slots);
+
+ MStoreSlot* store = MStoreSlot::New(alloc(), slots, slot - nfixed, value);
+ current->add(store);
+ current->push(value);
+ if (needsBarrier)
+ store->setNeedsBarrier();
+ if (slotType != MIRType::None)
+ store->setSlotType(slotType);
+ return resumeAfter(store);
+}
+
+bool
+IonBuilder::storeSlot(MDefinition* obj, Shape* shape, MDefinition* value, bool needsBarrier,
+ MIRType slotType /* = MIRType::None */)
+{
+ MOZ_ASSERT(shape->writable());
+ return storeSlot(obj, shape->slot(), shape->numFixedSlots(), value, needsBarrier, slotType);
+}
+
+bool
+IonBuilder::shouldAbortOnPreliminaryGroups(MDefinition *obj)
+{
+ // Watch for groups which still have preliminary object information and
+ // have not had the new script properties or unboxed layout analyses
+ // performed. Normally this is done after a small number of the objects
+ // have been created, but if only a few have been created we can still
+ // perform the analysis with a smaller object population. The analysis can
+ // have side effects so we will end up aborting compilation after building
+ // finishes and retrying later.
+ TemporaryTypeSet *types = obj->resultTypeSet();
+ if (!types || types->unknownObject())
+ return false;
+
+ bool preliminary = false;
+ for (size_t i = 0; i < types->getObjectCount(); i++) {
+ TypeSet::ObjectKey* key = types->getObject(i);
+ if (!key)
+ continue;
+
+ if (ObjectGroup* group = key->maybeGroup()) {
+ if (group->hasUnanalyzedPreliminaryObjects()) {
+ addAbortedPreliminaryGroup(group);
+ preliminary = true;
+ }
+ }
+ }
+
+ return preliminary;
+}
+
+MDefinition*
+IonBuilder::maybeUnboxForPropertyAccess(MDefinition* def)
+{
+ if (def->type() != MIRType::Value)
+ return def;
+
+ MIRType type = inspector->expectedPropertyAccessInputType(pc);
+ if (type == MIRType::Value || !def->mightBeType(type))
+ return def;
+
+ MUnbox* unbox = MUnbox::New(alloc(), def, type, MUnbox::Fallible);
+ current->add(unbox);
+
+ // Fixup type information for a common case where a property call
+ // is converted to the following bytecodes
+ //
+ // a.foo()
+ // ================= Compiles to ================
+ // LOAD "a"
+ // DUP
+ // CALLPROP "foo"
+ // SWAP
+ // CALL 0
+ //
+ // If we have better type information to unbox the first copy going into
+ // the CALLPROP operation, we can replace the duplicated copy on the
+ // stack as well.
+ if (*pc == JSOP_CALLPROP || *pc == JSOP_CALLELEM) {
+ uint32_t idx = current->stackDepth() - 1;
+ MOZ_ASSERT(current->getSlot(idx) == def);
+ current->setSlot(idx, unbox);
+ }
+
+ return unbox;
+}
+
+bool
+IonBuilder::jsop_getprop(PropertyName* name)
+{
+ bool emitted = false;
+ startTrackingOptimizations();
+
+ MDefinition* obj = current->pop();
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+
+ trackTypeInfo(TrackedTypeSite::Receiver, obj->type(), obj->resultTypeSet());
+
+ if (!info().isAnalysis()) {
+ // The calls below can abort compilation, so we only try this if we're
+ // not analyzing.
+ // Try to optimize arguments.length.
+ trackOptimizationAttempt(TrackedStrategy::GetProp_ArgumentsLength);
+ if (!getPropTryArgumentsLength(&emitted, obj) || emitted)
+ return emitted;
+
+ // Try to optimize arguments.callee.
+ trackOptimizationAttempt(TrackedStrategy::GetProp_ArgumentsCallee);
+ if (!getPropTryArgumentsCallee(&emitted, obj, name) || emitted)
+ return emitted;
+ }
+
+ obj = maybeUnboxForPropertyAccess(obj);
+ if (obj->type() == MIRType::Object)
+ obj = convertUnboxedObjects(obj);
+
+ BarrierKind barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(),
+ obj, name, types);
+
+ // Try to optimize to a specific constant.
+ trackOptimizationAttempt(TrackedStrategy::GetProp_InferredConstant);
+ if (barrier == BarrierKind::NoBarrier) {
+ if (!getPropTryInferredConstant(&emitted, obj, name, types) || emitted)
+ return emitted;
+ } else {
+ trackOptimizationOutcome(TrackedOutcome::NeedsTypeBarrier);
+ }
+
+ // Always use a call if we are performing analysis and
+ // not actually emitting code, to simplify later analysis. Also skip deeper
+ // analysis if there are no known types for this operation, as it will
+ // always invalidate when executing.
+ if (info().isAnalysis() || types->empty() || shouldAbortOnPreliminaryGroups(obj)) {
+ if (types->empty()) {
+ // Since no further optimizations will be tried, use the IC
+ // strategy, which would have been the last one to be tried, as a
+ // sentinel value for why everything failed.
+ trackOptimizationAttempt(TrackedStrategy::GetProp_InlineCache);
+ trackOptimizationOutcome(TrackedOutcome::NoTypeInfo);
+ }
+
+ MCallGetProperty* call = MCallGetProperty::New(alloc(), obj, name);
+ current->add(call);
+
+ // During the definite properties analysis we can still try to bake in
+ // constants read off the prototype chain, to allow inlining later on.
+ // In this case we still need the getprop call so that the later
+ // analysis knows when the |this| value has been read from.
+ if (info().isAnalysis()) {
+ if (!getPropTryConstant(&emitted, obj, NameToId(name), types) || emitted)
+ return emitted;
+ }
+
+ current->push(call);
+ return resumeAfter(call) && pushTypeBarrier(call, types, BarrierKind::TypeSet);
+ }
+
+ // Try to optimize accesses on outer window proxies, for example window.foo.
+ // This needs to come before the various strategies getPropTryInnerize tries
+ // internally, since some of those strategies will "succeed" in silly ways
+ // even for an outer object.
+ trackOptimizationAttempt(TrackedStrategy::GetProp_Innerize);
+ if (!getPropTryInnerize(&emitted, obj, name, types) || emitted)
+ return emitted;
+
+ if (!forceInlineCaches()) {
+ // Try to hardcode known constants.
+ trackOptimizationAttempt(TrackedStrategy::GetProp_Constant);
+ if (!getPropTryConstant(&emitted, obj, NameToId(name), types) || emitted)
+ return emitted;
+
+ // Try to hardcode known not-defined
+ trackOptimizationAttempt(TrackedStrategy::GetProp_NotDefined);
+ if (!getPropTryNotDefined(&emitted, obj, NameToId(name), types) || emitted)
+ return emitted;
+
+ // Try to emit loads from known binary data blocks
+ trackOptimizationAttempt(TrackedStrategy::GetProp_TypedObject);
+ if (!getPropTryTypedObject(&emitted, obj, name) || emitted)
+ return emitted;
+
+ // Try to emit loads from definite slots.
+ trackOptimizationAttempt(TrackedStrategy::GetProp_DefiniteSlot);
+ if (!getPropTryDefiniteSlot(&emitted, obj, name, barrier, types) || emitted)
+ return emitted;
+
+ // Try to emit loads from unboxed objects.
+ trackOptimizationAttempt(TrackedStrategy::GetProp_Unboxed);
+ if (!getPropTryUnboxed(&emitted, obj, name, barrier, types) || emitted)
+ return emitted;
+
+ // Try to inline a common property getter, or make a call.
+ trackOptimizationAttempt(TrackedStrategy::GetProp_CommonGetter);
+ if (!getPropTryCommonGetter(&emitted, obj, name, types) || emitted)
+ return emitted;
+
+ // Try to emit a monomorphic/polymorphic access based on baseline caches.
+ trackOptimizationAttempt(TrackedStrategy::GetProp_InlineAccess);
+ if (!getPropTryInlineAccess(&emitted, obj, name, barrier, types) || emitted)
+ return emitted;
+
+ // Try to emit loads from a module namespace.
+ trackOptimizationAttempt(TrackedStrategy::GetProp_ModuleNamespace);
+ if (!getPropTryModuleNamespace(&emitted, obj, name, barrier, types) || emitted)
+ return emitted;
+ }
+
+ // Try to emit a polymorphic cache.
+ trackOptimizationAttempt(TrackedStrategy::GetProp_InlineCache);
+ if (!getPropTryCache(&emitted, obj, name, barrier, types) || emitted)
+ return emitted;
+
+ // Try to emit a shared stub.
+ trackOptimizationAttempt(TrackedStrategy::GetProp_SharedCache);
+ if (!getPropTrySharedStub(&emitted, obj, types) || emitted)
+ return emitted;
+
+ // Emit a call.
+ MCallGetProperty* call = MCallGetProperty::New(alloc(), obj, name);
+ current->add(call);
+ current->push(call);
+ if (!resumeAfter(call))
+ return false;
+
+ if (*pc == JSOP_CALLPROP && IsNullOrUndefined(obj->type())) {
+ // Due to inlining, it's possible the observed TypeSet is non-empty,
+ // even though we know |obj| is null/undefined and the MCallGetProperty
+ // will throw. Don't push a TypeBarrier in this case, to avoid
+ // inlining the following (unreachable) JSOP_CALL.
+ return true;
+ }
+
+ return pushTypeBarrier(call, types, BarrierKind::TypeSet);
+}
+
+bool
+IonBuilder::improveThisTypesForCall()
+{
+ // After a CALLPROP (or CALLELEM) for obj.prop(), the this-value and callee
+ // for the call are on top of the stack:
+ //
+ // ... [this: obj], [callee: obj.prop]
+ //
+ // If obj is null or undefined, obj.prop would have thrown an exception so
+ // at this point we can remove null and undefined from obj's TypeSet, to
+ // improve type information for the call that will follow.
+
+ MOZ_ASSERT(*pc == JSOP_CALLPROP || *pc == JSOP_CALLELEM);
+
+ // Ensure |this| has types {object, null/undefined}.
+ MDefinition* thisDef = current->peek(-2);
+ if (thisDef->type() != MIRType::Value ||
+ !thisDef->mightBeType(MIRType::Object) ||
+ !thisDef->resultTypeSet() ||
+ !thisDef->resultTypeSet()->objectOrSentinel())
+ {
+ return true;
+ }
+
+ // Remove null/undefined from the TypeSet.
+ TemporaryTypeSet* types = thisDef->resultTypeSet()->cloneObjectsOnly(alloc_->lifoAlloc());
+ if (!types)
+ return false;
+
+ MFilterTypeSet* filter = MFilterTypeSet::New(alloc(), thisDef, types);
+ current->add(filter);
+ current->rewriteAtDepth(-2, filter);
+
+ // FilterTypeSetPolicy::adjustInputs will insert an infallible Unbox(Object)
+ // for the input. Don't hoist this unbox above the getprop or getelem
+ // operation.
+ filter->setDependency(current->peek(-1)->toInstruction());
+ return true;
+}
+
+bool
+IonBuilder::checkIsDefinitelyOptimizedArguments(MDefinition* obj, bool* isOptimizedArgs)
+{
+ if (obj->type() != MIRType::MagicOptimizedArguments) {
+ if (script()->argumentsHasVarBinding() &&
+ obj->mightBeType(MIRType::MagicOptimizedArguments))
+ {
+ return abort("Type is not definitely lazy arguments.");
+ }
+
+ *isOptimizedArgs = false;
+ return true;
+ }
+
+ *isOptimizedArgs = true;
+ return true;
+}
+
+bool
+IonBuilder::getPropTryInferredConstant(bool* emitted, MDefinition* obj, PropertyName* name,
+ TemporaryTypeSet* types)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ // Need a result typeset to optimize.
+ TemporaryTypeSet* objTypes = obj->resultTypeSet();
+ if (!objTypes) {
+ trackOptimizationOutcome(TrackedOutcome::NoTypeInfo);
+ return true;
+ }
+
+ JSObject* singleton = objTypes->maybeSingleton();
+ if (!singleton) {
+ trackOptimizationOutcome(TrackedOutcome::NotSingleton);
+ return true;
+ }
+
+ TypeSet::ObjectKey* key = TypeSet::ObjectKey::get(singleton);
+ if (key->unknownProperties()) {
+ trackOptimizationOutcome(TrackedOutcome::UnknownProperties);
+ return true;
+ }
+
+ HeapTypeSetKey property = key->property(NameToId(name));
+
+ Value constantValue = UndefinedValue();
+ if (property.constant(constraints(), &constantValue)) {
+ spew("Optimized constant property");
+ obj->setImplicitlyUsedUnchecked();
+ pushConstant(constantValue);
+ types->addType(TypeSet::GetValueType(constantValue), alloc_->lifoAlloc());
+ trackOptimizationSuccess();
+ *emitted = true;
+ }
+
+ return true;
+}
+
+bool
+IonBuilder::getPropTryArgumentsLength(bool* emitted, MDefinition* obj)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ if (JSOp(*pc) != JSOP_LENGTH)
+ return true;
+
+ bool isOptimizedArgs = false;
+ if (!checkIsDefinitelyOptimizedArguments(obj, &isOptimizedArgs))
+ return false;
+ if (!isOptimizedArgs)
+ return true;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+
+ obj->setImplicitlyUsedUnchecked();
+
+ // We don't know anything from the callee
+ if (inliningDepth_ == 0) {
+ MInstruction* ins = MArgumentsLength::New(alloc());
+ current->add(ins);
+ current->push(ins);
+ return true;
+ }
+
+ // We are inlining and know the number of arguments the callee pushed
+ pushConstant(Int32Value(inlineCallInfo_->argv().length()));
+ return true;
+}
+
+bool
+IonBuilder::getPropTryArgumentsCallee(bool* emitted, MDefinition* obj, PropertyName* name)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ if (name != names().callee)
+ return true;
+
+ bool isOptimizedArgs = false;
+ if (!checkIsDefinitelyOptimizedArguments(obj, &isOptimizedArgs))
+ return false;
+ if (!isOptimizedArgs)
+ return true;
+
+ MOZ_ASSERT(script()->hasMappedArgsObj());
+
+ obj->setImplicitlyUsedUnchecked();
+ current->push(getCallee());
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::getPropTryConstant(bool* emitted, MDefinition* obj, jsid id, TemporaryTypeSet* types)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ if (!types->mightBeMIRType(MIRType::Object)) {
+ // If we have not observed an object result here, don't look for a
+ // singleton constant.
+ trackOptimizationOutcome(TrackedOutcome::NotObject);
+ return true;
+ }
+
+ JSObject* singleton = testSingletonPropertyTypes(obj, id);
+ if (!singleton) {
+ trackOptimizationOutcome(TrackedOutcome::NotSingleton);
+ return true;
+ }
+
+ // Property access is a known constant -- safe to emit.
+ obj->setImplicitlyUsedUnchecked();
+
+ pushConstant(ObjectValue(*singleton));
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::getPropTryNotDefined(bool* emitted, MDefinition* obj, jsid id, TemporaryTypeSet* types)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ if (!types->mightBeMIRType(MIRType::Undefined)) {
+ // Only optimize if we expect this property access to return undefined.
+ trackOptimizationOutcome(TrackedOutcome::NotUndefined);
+ return true;
+ }
+
+ ResultWithOOM<bool> res = testNotDefinedProperty(obj, id);
+ if (res.oom)
+ return false;
+ if (!res.value) {
+ trackOptimizationOutcome(TrackedOutcome::GenericFailure);
+ return true;
+ }
+
+ obj->setImplicitlyUsedUnchecked();
+ pushConstant(UndefinedValue());
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::getPropTryTypedObject(bool* emitted,
+ MDefinition* obj,
+ PropertyName* name)
+{
+ TypedObjectPrediction fieldPrediction;
+ size_t fieldOffset;
+ size_t fieldIndex;
+ if (!typedObjectHasField(obj, name, &fieldOffset, &fieldPrediction, &fieldIndex))
+ return true;
+
+ switch (fieldPrediction.kind()) {
+ case type::Simd:
+ // FIXME (bug 894104): load into a MIRType::float32x4 etc
+ return true;
+
+ case type::Struct:
+ case type::Array:
+ return getPropTryComplexPropOfTypedObject(emitted,
+ obj,
+ fieldOffset,
+ fieldPrediction,
+ fieldIndex);
+
+ case type::Reference:
+ return getPropTryReferencePropOfTypedObject(emitted,
+ obj,
+ fieldOffset,
+ fieldPrediction,
+ name);
+
+ case type::Scalar:
+ return getPropTryScalarPropOfTypedObject(emitted,
+ obj,
+ fieldOffset,
+ fieldPrediction);
+ }
+
+ MOZ_CRASH("Bad kind");
+}
+
+bool
+IonBuilder::getPropTryScalarPropOfTypedObject(bool* emitted, MDefinition* typedObj,
+ int32_t fieldOffset,
+ TypedObjectPrediction fieldPrediction)
+{
+ // Must always be loading the same scalar type
+ Scalar::Type fieldType = fieldPrediction.scalarType();
+
+ // Don't optimize if the typed object's underlying buffer may be detached.
+ TypeSet::ObjectKey* globalKey = TypeSet::ObjectKey::get(&script()->global());
+ if (globalKey->hasFlags(constraints(), OBJECT_FLAG_TYPED_OBJECT_HAS_DETACHED_BUFFER))
+ return true;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+
+ LinearSum byteOffset(alloc());
+ if (!byteOffset.add(fieldOffset))
+ setForceAbort();
+
+ return pushScalarLoadFromTypedObject(typedObj, byteOffset, fieldType);
+}
+
+bool
+IonBuilder::getPropTryReferencePropOfTypedObject(bool* emitted, MDefinition* typedObj,
+ int32_t fieldOffset,
+ TypedObjectPrediction fieldPrediction,
+ PropertyName* name)
+{
+ ReferenceTypeDescr::Type fieldType = fieldPrediction.referenceType();
+
+ TypeSet::ObjectKey* globalKey = TypeSet::ObjectKey::get(&script()->global());
+ if (globalKey->hasFlags(constraints(), OBJECT_FLAG_TYPED_OBJECT_HAS_DETACHED_BUFFER))
+ return true;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+
+ LinearSum byteOffset(alloc());
+ if (!byteOffset.add(fieldOffset))
+ setForceAbort();
+
+ return pushReferenceLoadFromTypedObject(typedObj, byteOffset, fieldType, name);
+}
+
+bool
+IonBuilder::getPropTryComplexPropOfTypedObject(bool* emitted,
+ MDefinition* typedObj,
+ int32_t fieldOffset,
+ TypedObjectPrediction fieldPrediction,
+ size_t fieldIndex)
+{
+ // Don't optimize if the typed object's underlying buffer may be detached.
+ TypeSet::ObjectKey* globalKey = TypeSet::ObjectKey::get(&script()->global());
+ if (globalKey->hasFlags(constraints(), OBJECT_FLAG_TYPED_OBJECT_HAS_DETACHED_BUFFER))
+ return true;
+
+ // OK, perform the optimization
+
+ // Identify the type object for the field.
+ MDefinition* type = loadTypedObjectType(typedObj);
+ MDefinition* fieldTypeObj = typeObjectForFieldFromStructType(type, fieldIndex);
+
+ LinearSum byteOffset(alloc());
+ if (!byteOffset.add(fieldOffset))
+ setForceAbort();
+
+ return pushDerivedTypedObject(emitted, typedObj, byteOffset,
+ fieldPrediction, fieldTypeObj);
+}
+
+MDefinition*
+IonBuilder::convertUnboxedObjects(MDefinition* obj)
+{
+ // If obj might be in any particular unboxed group which should be
+ // converted to a native representation, perform that conversion. This does
+ // not guarantee the object will not have such a group afterwards, if the
+ // object's possible groups are not precisely known.
+ TemporaryTypeSet* types = obj->resultTypeSet();
+ if (!types || types->unknownObject() || !types->objectOrSentinel())
+ return obj;
+
+ BaselineInspector::ObjectGroupVector list(alloc());
+ for (size_t i = 0; i < types->getObjectCount(); i++) {
+ TypeSet::ObjectKey* key = obj->resultTypeSet()->getObject(i);
+ if (!key || !key->isGroup())
+ continue;
+
+ if (UnboxedLayout* layout = key->group()->maybeUnboxedLayout()) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (layout->nativeGroup() && !list.append(key->group()))
+ oomUnsafe.crash("IonBuilder::convertUnboxedObjects");
+ }
+ }
+
+ return convertUnboxedObjects(obj, list);
+}
+
+MDefinition*
+IonBuilder::convertUnboxedObjects(MDefinition* obj,
+ const BaselineInspector::ObjectGroupVector& list)
+{
+ for (size_t i = 0; i < list.length(); i++) {
+ ObjectGroup* group = list[i];
+ if (TemporaryTypeSet* types = obj->resultTypeSet()) {
+ if (!types->hasType(TypeSet::ObjectType(group)))
+ continue;
+ }
+ obj = MConvertUnboxedObjectToNative::New(alloc(), obj, group);
+ current->add(obj->toInstruction());
+ }
+ return obj;
+}
+
+bool
+IonBuilder::getPropTryDefiniteSlot(bool* emitted, MDefinition* obj, PropertyName* name,
+ BarrierKind barrier, TemporaryTypeSet* types)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ uint32_t nfixed;
+ uint32_t slot = getDefiniteSlot(obj->resultTypeSet(), name, &nfixed);
+ if (slot == UINT32_MAX)
+ return true;
+
+ if (obj->type() != MIRType::Object) {
+ MGuardObject* guard = MGuardObject::New(alloc(), obj);
+ current->add(guard);
+ obj = guard;
+ }
+
+ MInstruction* load;
+ if (slot < nfixed) {
+ load = MLoadFixedSlot::New(alloc(), obj, slot);
+ } else {
+ MInstruction* slots = MSlots::New(alloc(), obj);
+ current->add(slots);
+
+ load = MLoadSlot::New(alloc(), slots, slot - nfixed);
+ }
+
+ if (barrier == BarrierKind::NoBarrier)
+ load->setResultType(types->getKnownMIRType());
+
+ current->add(load);
+ current->push(load);
+
+ if (!pushTypeBarrier(load, types, barrier))
+ return false;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::getPropTryModuleNamespace(bool* emitted, MDefinition* obj, PropertyName* name,
+ BarrierKind barrier, TemporaryTypeSet* types)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ TemporaryTypeSet* objTypes = obj->resultTypeSet();
+ if (!objTypes) {
+ trackOptimizationOutcome(TrackedOutcome::NoTypeInfo);
+ return true;
+ }
+
+ JSObject* singleton = objTypes->maybeSingleton();
+ if (!singleton) {
+ trackOptimizationOutcome(TrackedOutcome::NotSingleton);
+ return true;
+ }
+
+ if (!singleton->is<ModuleNamespaceObject>()) {
+ trackOptimizationOutcome(TrackedOutcome::NotModuleNamespace);
+ return true;
+ }
+
+ ModuleNamespaceObject* ns = &singleton->as<ModuleNamespaceObject>();
+ ModuleEnvironmentObject* env;
+ Shape* shape;
+ if (!ns->bindings().lookup(NameToId(name), &env, &shape)) {
+ trackOptimizationOutcome(TrackedOutcome::UnknownProperty);
+ return true;
+ }
+
+ obj->setImplicitlyUsedUnchecked();
+ MConstant* envConst = constant(ObjectValue(*env));
+ uint32_t slot = shape->slot();
+ uint32_t nfixed = env->numFixedSlots();
+ if (!loadSlot(envConst, slot, nfixed, types->getKnownMIRType(), barrier, types))
+ return false;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+MInstruction*
+IonBuilder::loadUnboxedProperty(MDefinition* obj, size_t offset, JSValueType unboxedType,
+ BarrierKind barrier, TemporaryTypeSet* types)
+{
+ // loadUnboxedValue is designed to load any value as if it were contained in
+ // an array. Thus a property offset is converted to an index, when the
+ // object is reinterpreted as an array of properties of the same size.
+ size_t index = offset / UnboxedTypeSize(unboxedType);
+ MInstruction* indexConstant = MConstant::New(alloc(), Int32Value(index));
+ current->add(indexConstant);
+
+ return loadUnboxedValue(obj, UnboxedPlainObject::offsetOfData(),
+ indexConstant, unboxedType, barrier, types);
+}
+
+MInstruction*
+IonBuilder::loadUnboxedValue(MDefinition* elements, size_t elementsOffset,
+ MDefinition* index, JSValueType unboxedType,
+ BarrierKind barrier, TemporaryTypeSet* types)
+{
+ MInstruction* load;
+ switch (unboxedType) {
+ case JSVAL_TYPE_BOOLEAN:
+ load = MLoadUnboxedScalar::New(alloc(), elements, index, Scalar::Uint8,
+ DoesNotRequireMemoryBarrier, elementsOffset);
+ load->setResultType(MIRType::Boolean);
+ break;
+
+ case JSVAL_TYPE_INT32:
+ load = MLoadUnboxedScalar::New(alloc(), elements, index, Scalar::Int32,
+ DoesNotRequireMemoryBarrier, elementsOffset);
+ load->setResultType(MIRType::Int32);
+ break;
+
+ case JSVAL_TYPE_DOUBLE:
+ load = MLoadUnboxedScalar::New(alloc(), elements, index, Scalar::Float64,
+ DoesNotRequireMemoryBarrier, elementsOffset,
+ /* canonicalizeDoubles = */ false);
+ load->setResultType(MIRType::Double);
+ break;
+
+ case JSVAL_TYPE_STRING:
+ load = MLoadUnboxedString::New(alloc(), elements, index, elementsOffset);
+ break;
+
+ case JSVAL_TYPE_OBJECT: {
+ MLoadUnboxedObjectOrNull::NullBehavior nullBehavior;
+ if (types->hasType(TypeSet::NullType()))
+ nullBehavior = MLoadUnboxedObjectOrNull::HandleNull;
+ else if (barrier != BarrierKind::NoBarrier)
+ nullBehavior = MLoadUnboxedObjectOrNull::BailOnNull;
+ else
+ nullBehavior = MLoadUnboxedObjectOrNull::NullNotPossible;
+ load = MLoadUnboxedObjectOrNull::New(alloc(), elements, index, nullBehavior,
+ elementsOffset);
+ break;
+ }
+
+ default:
+ MOZ_CRASH();
+ }
+
+ current->add(load);
+ return load;
+}
+
+bool
+IonBuilder::getPropTryUnboxed(bool* emitted, MDefinition* obj, PropertyName* name,
+ BarrierKind barrier, TemporaryTypeSet* types)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ JSValueType unboxedType;
+ uint32_t offset = getUnboxedOffset(obj->resultTypeSet(), name, &unboxedType);
+ if (offset == UINT32_MAX)
+ return true;
+
+ if (obj->type() != MIRType::Object) {
+ MGuardObject* guard = MGuardObject::New(alloc(), obj);
+ current->add(guard);
+ obj = guard;
+ }
+
+ MInstruction* load = loadUnboxedProperty(obj, offset, unboxedType, barrier, types);
+ current->push(load);
+
+ if (!pushTypeBarrier(load, types, barrier))
+ return false;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+MDefinition*
+IonBuilder::addShapeGuardsForGetterSetter(MDefinition* obj, JSObject* holder, Shape* holderShape,
+ const BaselineInspector::ReceiverVector& receivers,
+ const BaselineInspector::ObjectGroupVector& convertUnboxedGroups,
+ bool isOwnProperty)
+{
+ MOZ_ASSERT(holder);
+ MOZ_ASSERT(holderShape);
+
+ obj = convertUnboxedObjects(obj, convertUnboxedGroups);
+
+ if (isOwnProperty) {
+ MOZ_ASSERT(receivers.empty());
+ return addShapeGuard(obj, holderShape, Bailout_ShapeGuard);
+ }
+
+ MDefinition* holderDef = constant(ObjectValue(*holder));
+ addShapeGuard(holderDef, holderShape, Bailout_ShapeGuard);
+
+ return addGuardReceiverPolymorphic(obj, receivers);
+}
+
+bool
+IonBuilder::getPropTryCommonGetter(bool* emitted, MDefinition* obj, PropertyName* name,
+ TemporaryTypeSet* types)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ Shape* lastProperty = nullptr;
+ JSFunction* commonGetter = nullptr;
+ Shape* globalShape = nullptr;
+ JSObject* foundProto = nullptr;
+ bool isOwnProperty = false;
+ BaselineInspector::ReceiverVector receivers(alloc());
+ BaselineInspector::ObjectGroupVector convertUnboxedGroups(alloc());
+ if (!inspector->commonGetPropFunction(pc, &foundProto, &lastProperty, &commonGetter,
+ &globalShape, &isOwnProperty,
+ receivers, convertUnboxedGroups))
+ {
+ return true;
+ }
+
+ TemporaryTypeSet* objTypes = obj->resultTypeSet();
+ MDefinition* guard = nullptr;
+ MDefinition* globalGuard = nullptr;
+ bool canUseTIForGetter =
+ testCommonGetterSetter(objTypes, name, /* isGetter = */ true,
+ foundProto, lastProperty, commonGetter, &guard,
+ globalShape, &globalGuard);
+ if (!canUseTIForGetter) {
+ // If type information is bad, we can still optimize the getter if we
+ // shape guard.
+ obj = addShapeGuardsForGetterSetter(obj, foundProto, lastProperty,
+ receivers, convertUnboxedGroups,
+ isOwnProperty);
+ if (!obj)
+ return false;
+ }
+
+ bool isDOM = objTypes && objTypes->isDOMClass(constraints());
+
+ if (isDOM && testShouldDOMCall(objTypes, commonGetter, JSJitInfo::Getter)) {
+ const JSJitInfo* jitinfo = commonGetter->jitInfo();
+ MInstruction* get;
+ if (jitinfo->isAlwaysInSlot) {
+ // If our object is a singleton and we know the property is
+ // constant (which is true if and only if the get doesn't alias
+ // anything), we can just read the slot here and use that constant.
+ JSObject* singleton = objTypes->maybeSingleton();
+ if (singleton && jitinfo->aliasSet() == JSJitInfo::AliasNone) {
+ size_t slot = jitinfo->slotIndex;
+ *emitted = true;
+ pushConstant(GetReservedSlot(singleton, slot));
+ return true;
+ }
+
+ // We can't use MLoadFixedSlot here because it might not have the
+ // right aliasing behavior; we want to alias DOM setters as needed.
+ get = MGetDOMMember::New(alloc(), jitinfo, obj, guard, globalGuard);
+ } else {
+ get = MGetDOMProperty::New(alloc(), jitinfo, obj, guard, globalGuard);
+ }
+ if (!get) {
+ return false;
+ }
+ current->add(get);
+ current->push(get);
+
+ if (get->isEffectful() && !resumeAfter(get))
+ return false;
+
+ if (!pushDOMTypeBarrier(get, types, commonGetter))
+ return false;
+
+ trackOptimizationOutcome(TrackedOutcome::DOM);
+ *emitted = true;
+ return true;
+ }
+
+ // Don't call the getter with a primitive value.
+ if (obj->type() != MIRType::Object) {
+ MGuardObject* guardObj = MGuardObject::New(alloc(), obj);
+ current->add(guardObj);
+ obj = guardObj;
+ }
+
+ // Spoof stack to expected state for call.
+
+ // Make sure there's enough room
+ if (!current->ensureHasSlots(2))
+ return false;
+ current->push(constant(ObjectValue(*commonGetter)));
+
+ current->push(obj);
+
+ CallInfo callInfo(alloc(), false);
+ if (!callInfo.init(current, 0))
+ return false;
+
+ if (commonGetter->isNative()) {
+ InliningStatus status = inlineNativeGetter(callInfo, commonGetter);
+ switch (status) {
+ case InliningStatus_Error:
+ return false;
+ case InliningStatus_WarmUpCountTooLow:
+ case InliningStatus_NotInlined:
+ break;
+ case InliningStatus_Inlined:
+ trackOptimizationOutcome(TrackedOutcome::Inlined);
+ *emitted = true;
+ return true;
+ }
+ }
+
+ // Inline if we can, otherwise, forget it and just generate a call.
+ if (commonGetter->isInterpreted()) {
+ InliningDecision decision = makeInliningDecision(commonGetter, callInfo);
+ switch (decision) {
+ case InliningDecision_Error:
+ return false;
+ case InliningDecision_DontInline:
+ case InliningDecision_WarmUpCountTooLow:
+ break;
+ case InliningDecision_Inline: {
+ InliningStatus status = inlineScriptedCall(callInfo, commonGetter);
+ if (status == InliningStatus_Inlined) {
+ *emitted = true;
+ return true;
+ }
+ if (status == InliningStatus_Error)
+ return false;
+ break;
+ }
+ }
+ }
+
+ if (!makeCall(commonGetter, callInfo))
+ return false;
+
+ // If the getter could have been inlined, don't track success. The call to
+ // makeInliningDecision above would have tracked a specific reason why we
+ // couldn't inline.
+ if (!commonGetter->isInterpreted())
+ trackOptimizationSuccess();
+
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::canInlinePropertyOpShapes(const BaselineInspector::ReceiverVector& receivers)
+{
+ if (receivers.empty()) {
+ trackOptimizationOutcome(TrackedOutcome::NoShapeInfo);
+ return false;
+ }
+
+ for (size_t i = 0; i < receivers.length(); i++) {
+ // We inline the property access as long as the shape is not in
+ // dictionary mode. We cannot be sure that the shape is still a
+ // lastProperty, and calling Shape::search() on dictionary mode
+ // shapes that aren't lastProperty is invalid.
+ if (receivers[i].shape && receivers[i].shape->inDictionary()) {
+ trackOptimizationOutcome(TrackedOutcome::InDictionaryMode);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static Shape*
+PropertyShapesHaveSameSlot(const BaselineInspector::ReceiverVector& receivers, jsid id)
+{
+ Shape* firstShape = nullptr;
+ for (size_t i = 0; i < receivers.length(); i++) {
+ if (receivers[i].group)
+ return nullptr;
+
+ Shape* shape = receivers[i].shape->searchLinear(id);
+ MOZ_ASSERT(shape);
+
+ if (i == 0) {
+ firstShape = shape;
+ } else if (shape->slot() != firstShape->slot() ||
+ shape->numFixedSlots() != firstShape->numFixedSlots())
+ {
+ return nullptr;
+ }
+ }
+
+ return firstShape;
+}
+
+bool
+IonBuilder::getPropTryInlineAccess(bool* emitted, MDefinition* obj, PropertyName* name,
+ BarrierKind barrier, TemporaryTypeSet* types)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ BaselineInspector::ReceiverVector receivers(alloc());
+ BaselineInspector::ObjectGroupVector convertUnboxedGroups(alloc());
+ if (!inspector->maybeInfoForPropertyOp(pc, receivers, convertUnboxedGroups))
+ return false;
+
+ if (!canInlinePropertyOpShapes(receivers))
+ return true;
+
+ obj = convertUnboxedObjects(obj, convertUnboxedGroups);
+
+ MIRType rvalType = types->getKnownMIRType();
+ if (barrier != BarrierKind::NoBarrier || IsNullOrUndefined(rvalType))
+ rvalType = MIRType::Value;
+
+ if (receivers.length() == 1) {
+ if (!receivers[0].group) {
+ // Monomorphic load from a native object.
+ spew("Inlining monomorphic native GETPROP");
+
+ obj = addShapeGuard(obj, receivers[0].shape, Bailout_ShapeGuard);
+
+ Shape* shape = receivers[0].shape->searchLinear(NameToId(name));
+ MOZ_ASSERT(shape);
+
+ if (!loadSlot(obj, shape, rvalType, barrier, types))
+ return false;
+
+ trackOptimizationOutcome(TrackedOutcome::Monomorphic);
+ *emitted = true;
+ return true;
+ }
+
+ if (receivers[0].shape) {
+ // Monomorphic load from an unboxed object expando.
+ spew("Inlining monomorphic unboxed expando GETPROP");
+
+ obj = addGroupGuard(obj, receivers[0].group, Bailout_ShapeGuard);
+ obj = addUnboxedExpandoGuard(obj, /* hasExpando = */ true, Bailout_ShapeGuard);
+
+ MInstruction* expando = MLoadUnboxedExpando::New(alloc(), obj);
+ current->add(expando);
+
+ expando = addShapeGuard(expando, receivers[0].shape, Bailout_ShapeGuard);
+
+ Shape* shape = receivers[0].shape->searchLinear(NameToId(name));
+ MOZ_ASSERT(shape);
+
+ if (!loadSlot(expando, shape, rvalType, barrier, types))
+ return false;
+
+ trackOptimizationOutcome(TrackedOutcome::Monomorphic);
+ *emitted = true;
+ return true;
+ }
+
+ // Monomorphic load from an unboxed object.
+ ObjectGroup* group = receivers[0].group;
+ if (obj->resultTypeSet() && !obj->resultTypeSet()->hasType(TypeSet::ObjectType(group)))
+ return true;
+
+ obj = addGroupGuard(obj, group, Bailout_ShapeGuard);
+
+ const UnboxedLayout::Property* property = group->unboxedLayout().lookup(name);
+ MInstruction* load = loadUnboxedProperty(obj, property->offset, property->type, barrier, types);
+ current->push(load);
+
+ if (!pushTypeBarrier(load, types, barrier))
+ return false;
+
+ trackOptimizationOutcome(TrackedOutcome::Monomorphic);
+ *emitted = true;
+ return true;
+ }
+
+ MOZ_ASSERT(receivers.length() > 1);
+ spew("Inlining polymorphic GETPROP");
+
+ if (Shape* propShape = PropertyShapesHaveSameSlot(receivers, NameToId(name))) {
+ obj = addGuardReceiverPolymorphic(obj, receivers);
+ if (!obj)
+ return false;
+
+ if (!loadSlot(obj, propShape, rvalType, barrier, types))
+ return false;
+
+ trackOptimizationOutcome(TrackedOutcome::Polymorphic);
+ *emitted = true;
+ return true;
+ }
+
+ MGetPropertyPolymorphic* load = MGetPropertyPolymorphic::New(alloc(), obj, name);
+ current->add(load);
+ current->push(load);
+
+ for (size_t i = 0; i < receivers.length(); i++) {
+ Shape* propShape = nullptr;
+ if (receivers[i].shape) {
+ propShape = receivers[i].shape->searchLinear(NameToId(name));
+ MOZ_ASSERT(propShape);
+ }
+ if (!load->addReceiver(receivers[i], propShape))
+ return false;
+ }
+
+ if (failedShapeGuard_)
+ load->setNotMovable();
+
+ load->setResultType(rvalType);
+ if (!pushTypeBarrier(load, types, barrier))
+ return false;
+
+ trackOptimizationOutcome(TrackedOutcome::Polymorphic);
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::getPropTryCache(bool* emitted, MDefinition* obj, PropertyName* name,
+ BarrierKind barrier, TemporaryTypeSet* types)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ // The input value must either be an object, or we should have strong suspicions
+ // that it can be safely unboxed to an object.
+ if (obj->type() != MIRType::Object) {
+ TemporaryTypeSet* types = obj->resultTypeSet();
+ if (!types || !types->objectOrSentinel()) {
+ trackOptimizationOutcome(TrackedOutcome::NoTypeInfo);
+ return true;
+ }
+ }
+
+ // Since getters have no guaranteed return values, we must barrier in order to be
+ // able to attach stubs for them.
+ if (inspector->hasSeenAccessedGetter(pc))
+ barrier = BarrierKind::TypeSet;
+
+ // Caches can read values from prototypes, so update the barrier to
+ // reflect such possible values.
+ if (barrier != BarrierKind::TypeSet) {
+ ResultWithOOM<BarrierKind> protoBarrier =
+ PropertyReadOnPrototypeNeedsTypeBarrier(this, obj, name, types);
+ if (protoBarrier.oom)
+ return false;
+ if (protoBarrier.value != BarrierKind::NoBarrier) {
+ MOZ_ASSERT(barrier <= protoBarrier.value);
+ barrier = protoBarrier.value;
+ }
+ }
+
+ MConstant* id = constant(StringValue(name));
+ MGetPropertyCache* load = MGetPropertyCache::New(alloc(), obj, id,
+ barrier == BarrierKind::TypeSet);
+
+ // Try to mark the cache as idempotent.
+ if (obj->type() == MIRType::Object && !invalidatedIdempotentCache()) {
+ if (PropertyReadIsIdempotent(constraints(), obj, name))
+ load->setIdempotent();
+ }
+
+ // When we are in the context of making a call from the value returned from
+ // a property, we query the typeObject for the given property name to fill
+ // the InlinePropertyTable of the GetPropertyCache. This information is
+ // then used in inlineCallsite and inlineCalls, if the "this" definition is
+ // matching the "object" definition of the GetPropertyCache (see
+ // CanInlineGetPropertyCache).
+ //
+ // If this GetPropertyCache is idempotent, then we can dispatch to the right
+ // function only by checking the typed object, instead of querying the value
+ // of the property. Thus this GetPropertyCache can be moved into the
+ // fallback path (see inlineObjectGroupFallback). Otherwise, we always have
+ // to do the GetPropertyCache, and we can dispatch based on the JSFunction
+ // value.
+ if (JSOp(*pc) == JSOP_CALLPROP && load->idempotent()) {
+ if (!annotateGetPropertyCache(obj, name, load, obj->resultTypeSet(), types))
+ return false;
+ }
+
+ current->add(load);
+ current->push(load);
+
+ if (load->isEffectful() && !resumeAfter(load))
+ return false;
+
+ MIRType rvalType = types->getKnownMIRType();
+ if (barrier != BarrierKind::NoBarrier || IsNullOrUndefined(rvalType))
+ rvalType = MIRType::Value;
+ load->setResultType(rvalType);
+
+ if (!pushTypeBarrier(load, types, barrier))
+ return false;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::getPropTrySharedStub(bool* emitted, MDefinition* obj, TemporaryTypeSet* types)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ // Try to emit a shared stub cache.
+
+ if (JitOptions.disableSharedStubs)
+ return true;
+
+ MInstruction* stub = MUnarySharedStub::New(alloc(), obj);
+ current->add(stub);
+ current->push(stub);
+
+ if (!resumeAfter(stub))
+ return false;
+
+ // Due to inlining, it's possible the observed TypeSet is non-empty,
+ // even though we know |obj| is null/undefined and the MCallGetProperty
+ // will throw. Don't push a TypeBarrier in this case, to avoid
+ // inlining the following (unreachable) JSOP_CALL.
+ if (*pc != JSOP_CALLPROP || !IsNullOrUndefined(obj->type())) {
+ if (!pushTypeBarrier(stub, types, BarrierKind::TypeSet))
+ return false;
+ }
+
+ *emitted = true;
+ return true;
+}
+
+MDefinition*
+IonBuilder::tryInnerizeWindow(MDefinition* obj)
+{
+ // Try to optimize accesses on outer window proxies (window.foo, for
+ // example) to go directly to the inner window, the global.
+ //
+ // Callers should be careful not to pass the inner object to getters or
+ // setters that require outerization.
+
+ if (obj->type() != MIRType::Object)
+ return obj;
+
+ TemporaryTypeSet* types = obj->resultTypeSet();
+ if (!types)
+ return obj;
+
+ JSObject* singleton = types->maybeSingleton();
+ if (!singleton)
+ return obj;
+
+ if (!IsWindowProxy(singleton))
+ return obj;
+
+ // This must be a WindowProxy for the current Window/global. Else it'd be
+ // a cross-compartment wrapper and IsWindowProxy returns false for those.
+ MOZ_ASSERT(ToWindowIfWindowProxy(singleton) == &script()->global());
+
+ // When we navigate, the WindowProxy is brain transplanted and we'll mark
+ // its ObjectGroup as having unknown properties. The type constraint we add
+ // here will invalidate JIT code when this happens.
+ TypeSet::ObjectKey* key = TypeSet::ObjectKey::get(singleton);
+ if (key->hasFlags(constraints(), OBJECT_FLAG_UNKNOWN_PROPERTIES))
+ return obj;
+
+ obj->setImplicitlyUsedUnchecked();
+ return constant(ObjectValue(script()->global()));
+}
+
+bool
+IonBuilder::getPropTryInnerize(bool* emitted, MDefinition* obj, PropertyName* name,
+ TemporaryTypeSet* types)
+{
+ // See the comment in tryInnerizeWindow for how this works.
+
+ // Note that it's important that we do this _before_ we'd try to
+ // do the optimizations below on obj normally, since some of those
+ // optimizations have fallback paths that are slower than the path
+ // we'd produce here.
+
+ MOZ_ASSERT(*emitted == false);
+
+ MDefinition* inner = tryInnerizeWindow(obj);
+ if (inner == obj)
+ return true;
+
+ if (!forceInlineCaches()) {
+ trackOptimizationAttempt(TrackedStrategy::GetProp_Constant);
+ if (!getPropTryConstant(emitted, inner, NameToId(name), types) || *emitted)
+ return *emitted;
+
+ trackOptimizationAttempt(TrackedStrategy::GetProp_StaticName);
+ if (!getStaticName(&script()->global(), name, emitted) || *emitted)
+ return *emitted;
+
+ trackOptimizationAttempt(TrackedStrategy::GetProp_CommonGetter);
+ if (!getPropTryCommonGetter(emitted, inner, name, types) || *emitted)
+ return *emitted;
+ }
+
+ // Passing the inner object to GetProperty IC is safe, see the
+ // needsOuterizedThisObject check in IsCacheableGetPropCallNative.
+ BarrierKind barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(),
+ inner, name, types);
+ trackOptimizationAttempt(TrackedStrategy::GetProp_InlineCache);
+ if (!getPropTryCache(emitted, inner, name, barrier, types) || *emitted)
+ return *emitted;
+
+ MOZ_ASSERT(*emitted == false);
+ return true;
+}
+
+bool
+IonBuilder::jsop_setprop(PropertyName* name)
+{
+ MDefinition* value = current->pop();
+ MDefinition* obj = convertUnboxedObjects(current->pop());
+
+ bool emitted = false;
+ startTrackingOptimizations();
+ trackTypeInfo(TrackedTypeSite::Receiver, obj->type(), obj->resultTypeSet());
+ trackTypeInfo(TrackedTypeSite::Value, value->type(), value->resultTypeSet());
+
+ // Always use a call if we are doing the definite properties analysis and
+ // not actually emitting code, to simplify later analysis.
+ if (info().isAnalysis() || shouldAbortOnPreliminaryGroups(obj)) {
+ bool strict = IsStrictSetPC(pc);
+ MInstruction* ins = MCallSetProperty::New(alloc(), obj, value, name, strict);
+ current->add(ins);
+ current->push(value);
+ return resumeAfter(ins);
+ }
+
+ if (!forceInlineCaches()) {
+ // Try to inline a common property setter, or make a call.
+ trackOptimizationAttempt(TrackedStrategy::SetProp_CommonSetter);
+ if (!setPropTryCommonSetter(&emitted, obj, name, value) || emitted)
+ return emitted;
+
+ // Try to emit stores to known binary data blocks
+ trackOptimizationAttempt(TrackedStrategy::SetProp_TypedObject);
+ if (!setPropTryTypedObject(&emitted, obj, name, value) || emitted)
+ return emitted;
+ }
+
+ TemporaryTypeSet* objTypes = obj->resultTypeSet();
+ bool barrier = PropertyWriteNeedsTypeBarrier(alloc(), constraints(), current, &obj, name, &value,
+ /* canModify = */ true);
+
+ if (!forceInlineCaches()) {
+ // Try to emit stores to unboxed objects.
+ trackOptimizationAttempt(TrackedStrategy::SetProp_Unboxed);
+ if (!setPropTryUnboxed(&emitted, obj, name, value, barrier, objTypes) || emitted)
+ return emitted;
+ }
+
+ // Add post barrier if needed. The instructions above manage any post
+ // barriers they need directly.
+ if (NeedsPostBarrier(value))
+ current->add(MPostWriteBarrier::New(alloc(), obj, value));
+
+ if (!forceInlineCaches()) {
+ // Try to emit store from definite slots.
+ trackOptimizationAttempt(TrackedStrategy::SetProp_DefiniteSlot);
+ if (!setPropTryDefiniteSlot(&emitted, obj, name, value, barrier, objTypes) || emitted)
+ return emitted;
+
+ // Try to emit a monomorphic/polymorphic store based on baseline caches.
+ trackOptimizationAttempt(TrackedStrategy::SetProp_InlineAccess);
+ if (!setPropTryInlineAccess(&emitted, obj, name, value, barrier, objTypes) || emitted)
+ return emitted;
+ }
+
+ // Emit a polymorphic cache.
+ trackOptimizationAttempt(TrackedStrategy::SetProp_InlineCache);
+ return setPropTryCache(&emitted, obj, name, value, barrier, objTypes);
+}
+
+bool
+IonBuilder::setPropTryCommonSetter(bool* emitted, MDefinition* obj,
+ PropertyName* name, MDefinition* value)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ Shape* lastProperty = nullptr;
+ JSFunction* commonSetter = nullptr;
+ JSObject* foundProto = nullptr;
+ bool isOwnProperty;
+ BaselineInspector::ReceiverVector receivers(alloc());
+ BaselineInspector::ObjectGroupVector convertUnboxedGroups(alloc());
+ if (!inspector->commonSetPropFunction(pc, &foundProto, &lastProperty, &commonSetter,
+ &isOwnProperty,
+ receivers, convertUnboxedGroups))
+ {
+ trackOptimizationOutcome(TrackedOutcome::NoProtoFound);
+ return true;
+ }
+
+ TemporaryTypeSet* objTypes = obj->resultTypeSet();
+ MDefinition* guard = nullptr;
+ bool canUseTIForSetter =
+ testCommonGetterSetter(objTypes, name, /* isGetter = */ false,
+ foundProto, lastProperty, commonSetter, &guard);
+ if (!canUseTIForSetter) {
+ // If type information is bad, we can still optimize the setter if we
+ // shape guard.
+ obj = addShapeGuardsForGetterSetter(obj, foundProto, lastProperty,
+ receivers, convertUnboxedGroups,
+ isOwnProperty);
+ if (!obj)
+ return false;
+ }
+
+ // Emit common setter.
+
+ // Setters can be called even if the property write needs a type
+ // barrier, as calling the setter does not actually write any data
+ // properties.
+
+ // Try emitting dom call.
+ if (!setPropTryCommonDOMSetter(emitted, obj, value, commonSetter, objTypes))
+ return false;
+
+ if (*emitted) {
+ trackOptimizationOutcome(TrackedOutcome::DOM);
+ return true;
+ }
+
+ // Don't call the setter with a primitive value.
+ if (obj->type() != MIRType::Object) {
+ MGuardObject* guardObj = MGuardObject::New(alloc(), obj);
+ current->add(guardObj);
+ obj = guardObj;
+ }
+
+ // Dummy up the stack, as in getprop. We are pushing an extra value, so
+ // ensure there is enough space.
+ if (!current->ensureHasSlots(3))
+ return false;
+
+ current->push(constant(ObjectValue(*commonSetter)));
+ current->push(obj);
+ current->push(value);
+
+ // Call the setter. Note that we have to push the original value, not
+ // the setter's return value.
+ CallInfo callInfo(alloc(), false);
+ if (!callInfo.init(current, 1))
+ return false;
+
+ // Ensure that we know we are calling a setter in case we inline it.
+ callInfo.markAsSetter();
+
+ // Inline the setter if we can.
+ if (commonSetter->isInterpreted()) {
+ InliningDecision decision = makeInliningDecision(commonSetter, callInfo);
+ switch (decision) {
+ case InliningDecision_Error:
+ return false;
+ case InliningDecision_DontInline:
+ case InliningDecision_WarmUpCountTooLow:
+ break;
+ case InliningDecision_Inline: {
+ InliningStatus status = inlineScriptedCall(callInfo, commonSetter);
+ if (status == InliningStatus_Inlined) {
+ *emitted = true;
+ return true;
+ }
+ if (status == InliningStatus_Error)
+ return false;
+ }
+ }
+ }
+
+ MCall* call = makeCallHelper(commonSetter, callInfo);
+ if (!call)
+ return false;
+
+ current->push(value);
+ if (!resumeAfter(call))
+ return false;
+
+ // If the setter could have been inlined, don't track success. The call to
+ // makeInliningDecision above would have tracked a specific reason why we
+ // couldn't inline.
+ if (!commonSetter->isInterpreted())
+ trackOptimizationSuccess();
+
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::setPropTryCommonDOMSetter(bool* emitted, MDefinition* obj,
+ MDefinition* value, JSFunction* setter,
+ TemporaryTypeSet* objTypes)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ if (!objTypes || !objTypes->isDOMClass(constraints()))
+ return true;
+
+ if (!testShouldDOMCall(objTypes, setter, JSJitInfo::Setter))
+ return true;
+
+ // Emit SetDOMProperty.
+ MOZ_ASSERT(setter->jitInfo()->type() == JSJitInfo::Setter);
+ MSetDOMProperty* set = MSetDOMProperty::New(alloc(), setter->jitInfo()->setter, obj, value);
+
+ current->add(set);
+ current->push(value);
+
+ if (!resumeAfter(set))
+ return false;
+
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::setPropTryTypedObject(bool* emitted, MDefinition* obj,
+ PropertyName* name, MDefinition* value)
+{
+ TypedObjectPrediction fieldPrediction;
+ size_t fieldOffset;
+ size_t fieldIndex;
+ if (!typedObjectHasField(obj, name, &fieldOffset, &fieldPrediction, &fieldIndex))
+ return true;
+
+ switch (fieldPrediction.kind()) {
+ case type::Simd:
+ // FIXME (bug 894104): store into a MIRType::float32x4 etc
+ return true;
+
+ case type::Reference:
+ return setPropTryReferencePropOfTypedObject(emitted, obj, fieldOffset,
+ value, fieldPrediction, name);
+
+ case type::Scalar:
+ return setPropTryScalarPropOfTypedObject(emitted, obj, fieldOffset,
+ value, fieldPrediction);
+
+ case type::Struct:
+ case type::Array:
+ return true;
+ }
+
+ MOZ_CRASH("Unknown kind");
+}
+
+bool
+IonBuilder::setPropTryReferencePropOfTypedObject(bool* emitted,
+ MDefinition* obj,
+ int32_t fieldOffset,
+ MDefinition* value,
+ TypedObjectPrediction fieldPrediction,
+ PropertyName* name)
+{
+ ReferenceTypeDescr::Type fieldType = fieldPrediction.referenceType();
+
+ TypeSet::ObjectKey* globalKey = TypeSet::ObjectKey::get(&script()->global());
+ if (globalKey->hasFlags(constraints(), OBJECT_FLAG_TYPED_OBJECT_HAS_DETACHED_BUFFER))
+ return true;
+
+ LinearSum byteOffset(alloc());
+ if (!byteOffset.add(fieldOffset))
+ setForceAbort();
+
+ if (!storeReferenceTypedObjectValue(obj, byteOffset, fieldType, value, name))
+ return true;
+
+ current->push(value);
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::setPropTryScalarPropOfTypedObject(bool* emitted,
+ MDefinition* obj,
+ int32_t fieldOffset,
+ MDefinition* value,
+ TypedObjectPrediction fieldPrediction)
+{
+ // Must always be loading the same scalar type
+ Scalar::Type fieldType = fieldPrediction.scalarType();
+
+ // Don't optimize if the typed object's underlying buffer may be detached.
+ TypeSet::ObjectKey* globalKey = TypeSet::ObjectKey::get(&script()->global());
+ if (globalKey->hasFlags(constraints(), OBJECT_FLAG_TYPED_OBJECT_HAS_DETACHED_BUFFER))
+ return true;
+
+ LinearSum byteOffset(alloc());
+ if (!byteOffset.add(fieldOffset))
+ setForceAbort();
+
+ if (!storeScalarTypedObjectValue(obj, byteOffset, fieldType, value))
+ return false;
+
+ current->push(value);
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::setPropTryDefiniteSlot(bool* emitted, MDefinition* obj,
+ PropertyName* name, MDefinition* value,
+ bool barrier, TemporaryTypeSet* objTypes)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ if (barrier) {
+ trackOptimizationOutcome(TrackedOutcome::NeedsTypeBarrier);
+ return true;
+ }
+
+ uint32_t nfixed;
+ uint32_t slot = getDefiniteSlot(obj->resultTypeSet(), name, &nfixed);
+ if (slot == UINT32_MAX)
+ return true;
+
+ bool writeBarrier = false;
+ for (size_t i = 0; i < obj->resultTypeSet()->getObjectCount(); i++) {
+ TypeSet::ObjectKey* key = obj->resultTypeSet()->getObject(i);
+ if (!key)
+ continue;
+
+ HeapTypeSetKey property = key->property(NameToId(name));
+ if (property.nonWritable(constraints())) {
+ trackOptimizationOutcome(TrackedOutcome::NonWritableProperty);
+ return true;
+ }
+ writeBarrier |= property.needsBarrier(constraints());
+ }
+
+ MInstruction* store;
+ if (slot < nfixed) {
+ store = MStoreFixedSlot::New(alloc(), obj, slot, value);
+ if (writeBarrier)
+ store->toStoreFixedSlot()->setNeedsBarrier();
+ } else {
+ MInstruction* slots = MSlots::New(alloc(), obj);
+ current->add(slots);
+
+ store = MStoreSlot::New(alloc(), slots, slot - nfixed, value);
+ if (writeBarrier)
+ store->toStoreSlot()->setNeedsBarrier();
+ }
+
+ current->add(store);
+ current->push(value);
+
+ if (!resumeAfter(store))
+ return false;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+MInstruction*
+IonBuilder::storeUnboxedProperty(MDefinition* obj, size_t offset, JSValueType unboxedType,
+ MDefinition* value)
+{
+ size_t scaledOffsetConstant = offset / UnboxedTypeSize(unboxedType);
+ MInstruction* scaledOffset = MConstant::New(alloc(), Int32Value(scaledOffsetConstant));
+ current->add(scaledOffset);
+
+ return storeUnboxedValue(obj, obj, UnboxedPlainObject::offsetOfData(),
+ scaledOffset, unboxedType, value);
+}
+
+MInstruction*
+IonBuilder::storeUnboxedValue(MDefinition* obj, MDefinition* elements, int32_t elementsOffset,
+ MDefinition* scaledOffset, JSValueType unboxedType,
+ MDefinition* value, bool preBarrier /* = true */)
+{
+ MInstruction* store;
+ switch (unboxedType) {
+ case JSVAL_TYPE_BOOLEAN:
+ store = MStoreUnboxedScalar::New(alloc(), elements, scaledOffset, value, Scalar::Uint8,
+ MStoreUnboxedScalar::DontTruncateInput,
+ DoesNotRequireMemoryBarrier, elementsOffset);
+ break;
+
+ case JSVAL_TYPE_INT32:
+ store = MStoreUnboxedScalar::New(alloc(), elements, scaledOffset, value, Scalar::Int32,
+ MStoreUnboxedScalar::DontTruncateInput,
+ DoesNotRequireMemoryBarrier, elementsOffset);
+ break;
+
+ case JSVAL_TYPE_DOUBLE:
+ store = MStoreUnboxedScalar::New(alloc(), elements, scaledOffset, value, Scalar::Float64,
+ MStoreUnboxedScalar::DontTruncateInput,
+ DoesNotRequireMemoryBarrier, elementsOffset);
+ break;
+
+ case JSVAL_TYPE_STRING:
+ store = MStoreUnboxedString::New(alloc(), elements, scaledOffset, value,
+ elementsOffset, preBarrier);
+ break;
+
+ case JSVAL_TYPE_OBJECT:
+ MOZ_ASSERT(value->type() == MIRType::Object ||
+ value->type() == MIRType::Null ||
+ value->type() == MIRType::Value);
+ MOZ_ASSERT(!value->mightBeType(MIRType::Undefined),
+ "MToObjectOrNull slow path is invalid for unboxed objects");
+ store = MStoreUnboxedObjectOrNull::New(alloc(), elements, scaledOffset, value, obj,
+ elementsOffset, preBarrier);
+ break;
+
+ default:
+ MOZ_CRASH();
+ }
+
+ current->add(store);
+ return store;
+}
+
+bool
+IonBuilder::setPropTryUnboxed(bool* emitted, MDefinition* obj,
+ PropertyName* name, MDefinition* value,
+ bool barrier, TemporaryTypeSet* objTypes)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ if (barrier) {
+ trackOptimizationOutcome(TrackedOutcome::NeedsTypeBarrier);
+ return true;
+ }
+
+ JSValueType unboxedType;
+ uint32_t offset = getUnboxedOffset(obj->resultTypeSet(), name, &unboxedType);
+ if (offset == UINT32_MAX)
+ return true;
+
+ if (obj->type() != MIRType::Object) {
+ MGuardObject* guard = MGuardObject::New(alloc(), obj);
+ current->add(guard);
+ obj = guard;
+ }
+
+ MInstruction* store = storeUnboxedProperty(obj, offset, unboxedType, value);
+
+ current->push(value);
+
+ if (!resumeAfter(store))
+ return false;
+
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::setPropTryInlineAccess(bool* emitted, MDefinition* obj,
+ PropertyName* name, MDefinition* value,
+ bool barrier, TemporaryTypeSet* objTypes)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ if (barrier) {
+ trackOptimizationOutcome(TrackedOutcome::NeedsTypeBarrier);
+ return true;
+ }
+
+ BaselineInspector::ReceiverVector receivers(alloc());
+ BaselineInspector::ObjectGroupVector convertUnboxedGroups(alloc());
+ if (!inspector->maybeInfoForPropertyOp(pc, receivers, convertUnboxedGroups))
+ return false;
+
+ if (!canInlinePropertyOpShapes(receivers))
+ return true;
+
+ obj = convertUnboxedObjects(obj, convertUnboxedGroups);
+
+ if (receivers.length() == 1) {
+ if (!receivers[0].group) {
+ // Monomorphic store to a native object.
+ spew("Inlining monomorphic native SETPROP");
+
+ obj = addShapeGuard(obj, receivers[0].shape, Bailout_ShapeGuard);
+
+ Shape* shape = receivers[0].shape->searchLinear(NameToId(name));
+ MOZ_ASSERT(shape);
+
+ bool needsBarrier = objTypes->propertyNeedsBarrier(constraints(), NameToId(name));
+ if (!storeSlot(obj, shape, value, needsBarrier))
+ return false;
+
+ trackOptimizationOutcome(TrackedOutcome::Monomorphic);
+ *emitted = true;
+ return true;
+ }
+
+ if (receivers[0].shape) {
+ // Monomorphic store to an unboxed object expando.
+ spew("Inlining monomorphic unboxed expando SETPROP");
+
+ obj = addGroupGuard(obj, receivers[0].group, Bailout_ShapeGuard);
+ obj = addUnboxedExpandoGuard(obj, /* hasExpando = */ true, Bailout_ShapeGuard);
+
+ MInstruction* expando = MLoadUnboxedExpando::New(alloc(), obj);
+ current->add(expando);
+
+ expando = addShapeGuard(expando, receivers[0].shape, Bailout_ShapeGuard);
+
+ Shape* shape = receivers[0].shape->searchLinear(NameToId(name));
+ MOZ_ASSERT(shape);
+
+ bool needsBarrier = objTypes->propertyNeedsBarrier(constraints(), NameToId(name));
+ if (!storeSlot(expando, shape, value, needsBarrier))
+ return false;
+
+ trackOptimizationOutcome(TrackedOutcome::Monomorphic);
+ *emitted = true;
+ return true;
+ }
+
+ // Monomorphic store to an unboxed object.
+ spew("Inlining monomorphic unboxed SETPROP");
+
+ ObjectGroup* group = receivers[0].group;
+ if (!objTypes->hasType(TypeSet::ObjectType(group)))
+ return true;
+
+ obj = addGroupGuard(obj, group, Bailout_ShapeGuard);
+
+ const UnboxedLayout::Property* property = group->unboxedLayout().lookup(name);
+ storeUnboxedProperty(obj, property->offset, property->type, value);
+
+ current->push(value);
+
+ trackOptimizationOutcome(TrackedOutcome::Monomorphic);
+ *emitted = true;
+ return true;
+ }
+
+ MOZ_ASSERT(receivers.length() > 1);
+ spew("Inlining polymorphic SETPROP");
+
+ if (Shape* propShape = PropertyShapesHaveSameSlot(receivers, NameToId(name))) {
+ obj = addGuardReceiverPolymorphic(obj, receivers);
+ if (!obj)
+ return false;
+
+ bool needsBarrier = objTypes->propertyNeedsBarrier(constraints(), NameToId(name));
+ if (!storeSlot(obj, propShape, value, needsBarrier))
+ return false;
+
+ trackOptimizationOutcome(TrackedOutcome::Polymorphic);
+ *emitted = true;
+ return true;
+ }
+
+ MSetPropertyPolymorphic* ins = MSetPropertyPolymorphic::New(alloc(), obj, value, name);
+ current->add(ins);
+ current->push(value);
+
+ for (size_t i = 0; i < receivers.length(); i++) {
+ Shape* propShape = nullptr;
+ if (receivers[i].shape) {
+ propShape = receivers[i].shape->searchLinear(NameToId(name));
+ MOZ_ASSERT(propShape);
+ }
+ if (!ins->addReceiver(receivers[i], propShape))
+ return false;
+ }
+
+ if (objTypes->propertyNeedsBarrier(constraints(), NameToId(name)))
+ ins->setNeedsBarrier();
+
+ if (!resumeAfter(ins))
+ return false;
+
+ trackOptimizationOutcome(TrackedOutcome::Polymorphic);
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::setPropTryCache(bool* emitted, MDefinition* obj,
+ PropertyName* name, MDefinition* value,
+ bool barrier, TemporaryTypeSet* objTypes)
+{
+ MOZ_ASSERT(*emitted == false);
+
+ bool strict = IsStrictSetPC(pc);
+
+ MConstant* id = constant(StringValue(name));
+ MSetPropertyCache* ins = MSetPropertyCache::New(alloc(), obj, id, value, strict, barrier,
+ /* guardHoles = */ false);
+ current->add(ins);
+ current->push(value);
+
+ if (!resumeAfter(ins))
+ return false;
+
+ trackOptimizationSuccess();
+ *emitted = true;
+ return true;
+}
+
+bool
+IonBuilder::jsop_delprop(PropertyName* name)
+{
+ MDefinition* obj = current->pop();
+
+ bool strict = JSOp(*pc) == JSOP_STRICTDELPROP;
+ MInstruction* ins = MDeleteProperty::New(alloc(), obj, name, strict);
+
+ current->add(ins);
+ current->push(ins);
+
+ return resumeAfter(ins);
+}
+
+bool
+IonBuilder::jsop_delelem()
+{
+ MDefinition* index = current->pop();
+ MDefinition* obj = current->pop();
+
+ bool strict = JSOp(*pc) == JSOP_STRICTDELELEM;
+ MDeleteElement* ins = MDeleteElement::New(alloc(), obj, index, strict);
+ current->add(ins);
+ current->push(ins);
+
+ return resumeAfter(ins);
+}
+
+bool
+IonBuilder::jsop_regexp(RegExpObject* reobj)
+{
+ MRegExp* regexp = MRegExp::New(alloc(), constraints(), reobj);
+ current->add(regexp);
+ current->push(regexp);
+
+ return true;
+}
+
+bool
+IonBuilder::jsop_object(JSObject* obj)
+{
+ if (options.cloneSingletons()) {
+ MCloneLiteral* clone = MCloneLiteral::New(alloc(), constant(ObjectValue(*obj)));
+ current->add(clone);
+ current->push(clone);
+ return resumeAfter(clone);
+ }
+
+ compartment->setSingletonsAsValues();
+ pushConstant(ObjectValue(*obj));
+ return true;
+}
+
+bool
+IonBuilder::jsop_lambda(JSFunction* fun)
+{
+ MOZ_ASSERT(analysis().usesEnvironmentChain());
+ MOZ_ASSERT(!fun->isArrow());
+
+ if (IsAsmJSModule(fun))
+ return abort("asm.js module function");
+
+ MConstant* cst = MConstant::NewConstraintlessObject(alloc(), fun);
+ current->add(cst);
+ MLambda* ins = MLambda::New(alloc(), constraints(), current->environmentChain(), cst);
+ current->add(ins);
+ current->push(ins);
+
+ return resumeAfter(ins);
+}
+
+bool
+IonBuilder::jsop_lambda_arrow(JSFunction* fun)
+{
+ MOZ_ASSERT(analysis().usesEnvironmentChain());
+ MOZ_ASSERT(fun->isArrow());
+ MOZ_ASSERT(!fun->isNative());
+
+ MDefinition* newTargetDef = current->pop();
+ MLambdaArrow* ins = MLambdaArrow::New(alloc(), constraints(), current->environmentChain(),
+ newTargetDef, fun);
+ current->add(ins);
+ current->push(ins);
+
+ return resumeAfter(ins);
+}
+
+bool
+IonBuilder::jsop_setarg(uint32_t arg)
+{
+ // To handle this case, we should spill the arguments to the space where
+ // actual arguments are stored. The tricky part is that if we add a MIR
+ // to wrap the spilling action, we don't want the spilling to be
+ // captured by the GETARG and by the resume point, only by
+ // MGetFrameArgument.
+ MOZ_ASSERT(analysis_.hasSetArg());
+ MDefinition* val = current->peek(-1);
+
+ // If an arguments object is in use, and it aliases formals, then all SETARGs
+ // must go through the arguments object.
+ if (info().argsObjAliasesFormals()) {
+ if (NeedsPostBarrier(val))
+ current->add(MPostWriteBarrier::New(alloc(), current->argumentsObject(), val));
+ current->add(MSetArgumentsObjectArg::New(alloc(), current->argumentsObject(),
+ GET_ARGNO(pc), val));
+ return true;
+ }
+
+ // :TODO: if hasArguments() is true, and the script has a JSOP_SETARG, then
+ // convert all arg accesses to go through the arguments object. (see Bug 957475)
+ if (info().hasArguments())
+ return abort("NYI: arguments & setarg.");
+
+ // Otherwise, if a magic arguments is in use, and it aliases formals, and there exist
+ // arguments[...] GETELEM expressions in the script, then SetFrameArgument must be used.
+ // If no arguments[...] GETELEM expressions are in the script, and an argsobj is not
+ // required, then it means that any aliased argument set can never be observed, and
+ // the frame does not actually need to be updated with the new arg value.
+ if (info().argumentsAliasesFormals()) {
+ // JSOP_SETARG with magic arguments within inline frames is not yet supported.
+ MOZ_ASSERT(script()->uninlineable() && !isInlineBuilder());
+
+ MSetFrameArgument* store = MSetFrameArgument::New(alloc(), arg, val);
+ modifiesFrameArguments_ = true;
+ current->add(store);
+ current->setArg(arg);
+ return true;
+ }
+
+ // If this assignment is at the start of the function and is coercing
+ // the original value for the argument which was passed in, loosen
+ // the type information for that original argument if it is currently
+ // empty due to originally executing in the interpreter.
+ if (graph().numBlocks() == 1 &&
+ (val->isBitOr() || val->isBitAnd() || val->isMul() /* for JSOP_POS */))
+ {
+ for (size_t i = 0; i < val->numOperands(); i++) {
+ MDefinition* op = val->getOperand(i);
+ if (op->isParameter() &&
+ op->toParameter()->index() == (int32_t)arg &&
+ op->resultTypeSet() &&
+ op->resultTypeSet()->empty())
+ {
+ bool otherUses = false;
+ for (MUseDefIterator iter(op); iter; iter++) {
+ MDefinition* def = iter.def();
+ if (def == val)
+ continue;
+ otherUses = true;
+ }
+ if (!otherUses) {
+ MOZ_ASSERT(op->resultTypeSet() == &argTypes[arg]);
+ argTypes[arg].addType(TypeSet::UnknownType(), alloc_->lifoAlloc());
+ if (val->isMul()) {
+ val->setResultType(MIRType::Double);
+ val->toMul()->setSpecialization(MIRType::Double);
+ } else {
+ MOZ_ASSERT(val->type() == MIRType::Int32);
+ }
+ val->setResultTypeSet(nullptr);
+ }
+ }
+ }
+ }
+
+ current->setArg(arg);
+ return true;
+}
+
+bool
+IonBuilder::jsop_defvar(uint32_t index)
+{
+ MOZ_ASSERT(JSOp(*pc) == JSOP_DEFVAR);
+
+ PropertyName* name = script()->getName(index);
+
+ // Bake in attrs.
+ unsigned attrs = JSPROP_ENUMERATE | JSPROP_PERMANENT;
+ MOZ_ASSERT(!script()->isForEval());
+
+ // Pass the EnvironmentChain.
+ MOZ_ASSERT(analysis().usesEnvironmentChain());
+
+ // Bake the name pointer into the MDefVar.
+ MDefVar* defvar = MDefVar::New(alloc(), name, attrs, current->environmentChain());
+ current->add(defvar);
+
+ return resumeAfter(defvar);
+}
+
+bool
+IonBuilder::jsop_deflexical(uint32_t index)
+{
+ MOZ_ASSERT(!script()->hasNonSyntacticScope());
+ MOZ_ASSERT(JSOp(*pc) == JSOP_DEFLET || JSOp(*pc) == JSOP_DEFCONST);
+
+ PropertyName* name = script()->getName(index);
+ unsigned attrs = JSPROP_ENUMERATE | JSPROP_PERMANENT;
+ if (JSOp(*pc) == JSOP_DEFCONST)
+ attrs |= JSPROP_READONLY;
+
+ MDefLexical* deflex = MDefLexical::New(alloc(), name, attrs);
+ current->add(deflex);
+
+ return resumeAfter(deflex);
+}
+
+bool
+IonBuilder::jsop_deffun(uint32_t index)
+{
+ MOZ_ASSERT(analysis().usesEnvironmentChain());
+
+ MDefFun* deffun = MDefFun::New(alloc(), current->pop(), current->environmentChain());
+ current->add(deffun);
+
+ return resumeAfter(deffun);
+}
+
+bool
+IonBuilder::jsop_throwsetconst()
+{
+ current->peek(-1)->setImplicitlyUsedUnchecked();
+ MInstruction* lexicalError = MThrowRuntimeLexicalError::New(alloc(), JSMSG_BAD_CONST_ASSIGN);
+ current->add(lexicalError);
+ return resumeAfter(lexicalError);
+}
+
+bool
+IonBuilder::jsop_checklexical()
+{
+ uint32_t slot = info().localSlot(GET_LOCALNO(pc));
+ MDefinition* lexical = addLexicalCheck(current->getSlot(slot));
+ if (!lexical)
+ return false;
+ current->setSlot(slot, lexical);
+ return true;
+}
+
+bool
+IonBuilder::jsop_checkaliasedlexical(EnvironmentCoordinate ec)
+{
+ MDefinition* let = addLexicalCheck(getAliasedVar(ec));
+ if (!let)
+ return false;
+
+ jsbytecode* nextPc = pc + JSOP_CHECKALIASEDLEXICAL_LENGTH;
+ MOZ_ASSERT(JSOp(*nextPc) == JSOP_GETALIASEDVAR ||
+ JSOp(*nextPc) == JSOP_SETALIASEDVAR ||
+ JSOp(*nextPc) == JSOP_THROWSETALIASEDCONST);
+ MOZ_ASSERT(ec == EnvironmentCoordinate(nextPc));
+
+ // If we are checking for a load, push the checked let so that the load
+ // can use it.
+ if (JSOp(*nextPc) == JSOP_GETALIASEDVAR)
+ setLexicalCheck(let);
+
+ return true;
+}
+
+bool
+IonBuilder::jsop_functionthis()
+{
+ MOZ_ASSERT(info().funMaybeLazy());
+ MOZ_ASSERT(!info().funMaybeLazy()->isArrow());
+
+ if (script()->strict() || info().funMaybeLazy()->isSelfHostedBuiltin()) {
+ // No need to wrap primitive |this| in strict mode or self-hosted code.
+ current->pushSlot(info().thisSlot());
+ return true;
+ }
+
+ if (thisTypes && (thisTypes->getKnownMIRType() == MIRType::Object ||
+ (thisTypes->empty() && baselineFrame_ && baselineFrame_->thisType.isSomeObject())))
+ {
+ // This is safe, because if the entry type of |this| is an object, it
+ // will necessarily be an object throughout the entire function. OSR
+ // can introduce a phi, but this phi will be specialized.
+ current->pushSlot(info().thisSlot());
+ return true;
+ }
+
+ // If we are doing an analysis, we might not yet know the type of |this|.
+ // Instead of bailing out just push the |this| slot, as this code won't
+ // actually execute and it does not matter whether |this| is primitive.
+ if (info().isAnalysis()) {
+ current->pushSlot(info().thisSlot());
+ return true;
+ }
+
+ // Hard case: |this| may be a primitive we have to wrap.
+ MDefinition* def = current->getSlot(info().thisSlot());
+
+ if (def->type() == MIRType::Object) {
+ current->push(def);
+ return true;
+ }
+
+ if (IsNullOrUndefined(def->type())) {
+ pushConstant(GetThisValue(&script()->global()));
+ return true;
+ }
+
+ MComputeThis* thisObj = MComputeThis::New(alloc(), def);
+ current->add(thisObj);
+ current->push(thisObj);
+
+ return resumeAfter(thisObj);
+}
+
+bool
+IonBuilder::jsop_globalthis()
+{
+ if (script()->hasNonSyntacticScope()) {
+ // Ion does not compile global scripts with a non-syntactic scope, but
+ // we can end up here when we're compiling an arrow function.
+ return abort("JSOP_GLOBALTHIS in script with non-syntactic scope");
+ }
+
+ LexicalEnvironmentObject* globalLexical = &script()->global().lexicalEnvironment();
+ pushConstant(globalLexical->thisValue());
+ return true;
+}
+
+bool
+IonBuilder::jsop_typeof()
+{
+ MDefinition* input = current->pop();
+ MTypeOf* ins = MTypeOf::New(alloc(), input, input->type());
+
+ ins->cacheInputMaybeCallableOrEmulatesUndefined(constraints());
+
+ current->add(ins);
+ current->push(ins);
+
+ return true;
+}
+
+bool
+IonBuilder::jsop_toasync()
+{
+ MDefinition* unwrapped = current->pop();
+ MOZ_ASSERT(unwrapped->type() == MIRType::Object);
+
+ MToAsync* ins = MToAsync::New(alloc(), unwrapped);
+
+ current->add(ins);
+ current->push(ins);
+
+ return resumeAfter(ins);
+}
+
+bool
+IonBuilder::jsop_toid()
+{
+ // No-op if the index is an integer.
+ if (current->peek(-1)->type() == MIRType::Int32)
+ return true;
+
+ MDefinition* index = current->pop();
+ MToId* ins = MToId::New(alloc(), index);
+
+ current->add(ins);
+ current->push(ins);
+
+ return resumeAfter(ins);
+}
+
+bool
+IonBuilder::jsop_iter(uint8_t flags)
+{
+ if (flags != JSITER_ENUMERATE)
+ nonStringIteration_ = true;
+
+ MDefinition* obj = current->pop();
+ MInstruction* ins = MIteratorStart::New(alloc(), obj, flags);
+
+ if (!outermostBuilder()->iterators_.append(ins))
+ return false;
+
+ current->add(ins);
+ current->push(ins);
+
+ return resumeAfter(ins);
+}
+
+bool
+IonBuilder::jsop_itermore()
+{
+ MDefinition* iter = current->peek(-1);
+ MInstruction* ins = MIteratorMore::New(alloc(), iter);
+
+ current->add(ins);
+ current->push(ins);
+
+ return resumeAfter(ins);
+}
+
+bool
+IonBuilder::jsop_isnoiter()
+{
+ MDefinition* def = current->peek(-1);
+ MOZ_ASSERT(def->isIteratorMore());
+
+ MInstruction* ins = MIsNoIter::New(alloc(), def);
+ current->add(ins);
+ current->push(ins);
+
+ return true;
+}
+
+bool
+IonBuilder::jsop_iterend()
+{
+ MDefinition* iter = current->pop();
+ MInstruction* ins = MIteratorEnd::New(alloc(), iter);
+
+ current->add(ins);
+
+ return resumeAfter(ins);
+}
+
+MDefinition*
+IonBuilder::walkEnvironmentChain(unsigned hops)
+{
+ MDefinition* env = current->getSlot(info().environmentChainSlot());
+
+ for (unsigned i = 0; i < hops; i++) {
+ MInstruction* ins = MEnclosingEnvironment::New(alloc(), env);
+ current->add(ins);
+ env = ins;
+ }
+
+ return env;
+}
+
+bool
+IonBuilder::hasStaticEnvironmentObject(EnvironmentCoordinate ec, JSObject** pcall)
+{
+ JSScript* outerScript = EnvironmentCoordinateFunctionScript(script(), pc);
+ if (!outerScript || !outerScript->treatAsRunOnce())
+ return false;
+
+ TypeSet::ObjectKey* funKey =
+ TypeSet::ObjectKey::get(outerScript->functionNonDelazifying());
+ if (funKey->hasFlags(constraints(), OBJECT_FLAG_RUNONCE_INVALIDATED))
+ return false;
+
+ // The script this aliased var operation is accessing will run only once,
+ // so there will be only one call object and the aliased var access can be
+ // compiled in the same manner as a global access. We still need to find
+ // the call object though.
+
+ // Look for the call object on the current script's function's env chain.
+ // If the current script is inner to the outer script and the function has
+ // singleton type then it should show up here.
+
+ MDefinition* envDef = current->getSlot(info().environmentChainSlot());
+ envDef->setImplicitlyUsedUnchecked();
+
+ JSObject* environment = script()->functionNonDelazifying()->environment();
+ while (environment && !environment->is<GlobalObject>()) {
+ if (environment->is<CallObject>() &&
+ environment->as<CallObject>().callee().nonLazyScript() == outerScript)
+ {
+ MOZ_ASSERT(environment->isSingleton());
+ *pcall = environment;
+ return true;
+ }
+ environment = environment->enclosingEnvironment();
+ }
+
+ // Look for the call object on the current frame, if we are compiling the
+ // outer script itself. Don't do this if we are at entry to the outer
+ // script, as the call object we see will not be the real one --- after
+ // entering the Ion code a different call object will be created.
+
+ if (script() == outerScript && baselineFrame_ && info().osrPc()) {
+ JSObject* singletonScope = baselineFrame_->singletonEnvChain;
+ if (singletonScope &&
+ singletonScope->is<CallObject>() &&
+ singletonScope->as<CallObject>().callee().nonLazyScript() == outerScript)
+ {
+ MOZ_ASSERT(singletonScope->isSingleton());
+ *pcall = singletonScope;
+ return true;
+ }
+ }
+
+ return true;
+}
+
+MDefinition*
+IonBuilder::getAliasedVar(EnvironmentCoordinate ec)
+{
+ MDefinition* obj = walkEnvironmentChain(ec.hops());
+
+ Shape* shape = EnvironmentCoordinateToEnvironmentShape(script(), pc);
+
+ MInstruction* load;
+ if (shape->numFixedSlots() <= ec.slot()) {
+ MInstruction* slots = MSlots::New(alloc(), obj);
+ current->add(slots);
+
+ load = MLoadSlot::New(alloc(), slots, ec.slot() - shape->numFixedSlots());
+ } else {
+ load = MLoadFixedSlot::New(alloc(), obj, ec.slot());
+ }
+
+ current->add(load);
+ return load;
+}
+
+bool
+IonBuilder::jsop_getaliasedvar(EnvironmentCoordinate ec)
+{
+ JSObject* call = nullptr;
+ if (hasStaticEnvironmentObject(ec, &call) && call) {
+ PropertyName* name = EnvironmentCoordinateName(envCoordinateNameCache, script(), pc);
+ bool emitted = false;
+ if (!getStaticName(call, name, &emitted, takeLexicalCheck()) || emitted)
+ return emitted;
+ }
+
+ // See jsop_checkaliasedlexical.
+ MDefinition* load = takeLexicalCheck();
+ if (!load)
+ load = getAliasedVar(ec);
+ current->push(load);
+
+ TemporaryTypeSet* types = bytecodeTypes(pc);
+ return pushTypeBarrier(load, types, BarrierKind::TypeSet);
+}
+
+bool
+IonBuilder::jsop_setaliasedvar(EnvironmentCoordinate ec)
+{
+ JSObject* call = nullptr;
+ if (hasStaticEnvironmentObject(ec, &call)) {
+ uint32_t depth = current->stackDepth() + 1;
+ if (depth > current->nslots()) {
+ if (!current->increaseSlots(depth - current->nslots()))
+ return false;
+ }
+ MDefinition* value = current->pop();
+ PropertyName* name = EnvironmentCoordinateName(envCoordinateNameCache, script(), pc);
+
+ if (call) {
+ // Push the object on the stack to match the bound object expected in
+ // the global and property set cases.
+ pushConstant(ObjectValue(*call));
+ current->push(value);
+ return setStaticName(call, name);
+ }
+
+ // The call object has type information we need to respect but we
+ // couldn't find it. Just do a normal property assign.
+ MDefinition* obj = walkEnvironmentChain(ec.hops());
+ current->push(obj);
+ current->push(value);
+ return jsop_setprop(name);
+ }
+
+ MDefinition* rval = current->peek(-1);
+ MDefinition* obj = walkEnvironmentChain(ec.hops());
+
+ Shape* shape = EnvironmentCoordinateToEnvironmentShape(script(), pc);
+
+ if (NeedsPostBarrier(rval))
+ current->add(MPostWriteBarrier::New(alloc(), obj, rval));
+
+ MInstruction* store;
+ if (shape->numFixedSlots() <= ec.slot()) {
+ MInstruction* slots = MSlots::New(alloc(), obj);
+ current->add(slots);
+
+ store = MStoreSlot::NewBarriered(alloc(), slots, ec.slot() - shape->numFixedSlots(), rval);
+ } else {
+ store = MStoreFixedSlot::NewBarriered(alloc(), obj, ec.slot(), rval);
+ }
+
+ current->add(store);
+ return resumeAfter(store);
+}
+
+bool
+IonBuilder::jsop_in()
+{
+ MDefinition* obj = convertUnboxedObjects(current->pop());
+ MDefinition* id = current->pop();
+
+ bool emitted = false;
+
+ if (!inTryDense(&emitted, obj, id) || emitted)
+ return emitted;
+
+ if (!inTryFold(&emitted, obj, id) || emitted)
+ return emitted;
+
+ MIn* ins = MIn::New(alloc(), id, obj);
+
+ current->add(ins);
+ current->push(ins);
+
+ return resumeAfter(ins);
+}
+
+bool
+IonBuilder::inTryDense(bool* emitted, MDefinition* obj, MDefinition* id)
+{
+ MOZ_ASSERT(!*emitted);
+
+ if (shouldAbortOnPreliminaryGroups(obj))
+ return true;
+
+ JSValueType unboxedType = UnboxedArrayElementType(constraints(), obj, id);
+ if (unboxedType == JSVAL_TYPE_MAGIC) {
+ if (!ElementAccessIsDenseNative(constraints(), obj, id))
+ return true;
+ }
+
+ if (ElementAccessHasExtraIndexedProperty(this, obj))
+ return true;
+
+ *emitted = true;
+
+ bool needsHoleCheck = !ElementAccessIsPacked(constraints(), obj);
+
+ // Ensure id is an integer.
+ MInstruction* idInt32 = MToInt32::New(alloc(), id);
+ current->add(idInt32);
+ id = idInt32;
+
+ // Get the elements vector.
+ MElements* elements = MElements::New(alloc(), obj, unboxedType != JSVAL_TYPE_MAGIC);
+ current->add(elements);
+
+ MInstruction* initLength = initializedLength(obj, elements, unboxedType);
+
+ // If there are no holes, speculate the InArray check will not fail.
+ if (!needsHoleCheck && !failedBoundsCheck_) {
+ addBoundsCheck(idInt32, initLength);
+ pushConstant(BooleanValue(true));
+ return true;
+ }
+
+ // Check if id < initLength and elem[id] not a hole.
+ MInArray* ins = MInArray::New(alloc(), elements, id, initLength, obj, needsHoleCheck,
+ unboxedType);
+
+ current->add(ins);
+ current->push(ins);
+
+ return true;
+}
+
+bool
+IonBuilder::inTryFold(bool* emitted, MDefinition* obj, MDefinition* id)
+{
+ // Fold |id in obj| to |false|, if we know the object (or an object on its
+ // prototype chain) does not have this property.
+
+ MOZ_ASSERT(!*emitted);
+
+ MConstant* idConst = id->maybeConstantValue();
+ jsid propId;
+ if (!idConst || !ValueToIdPure(idConst->toJSValue(), &propId))
+ return true;
+
+ if (propId != IdToTypeId(propId))
+ return true;
+
+ ResultWithOOM<bool> res = testNotDefinedProperty(obj, propId);
+ if (res.oom)
+ return false;
+ if (!res.value)
+ return true;
+
+ *emitted = true;
+
+ pushConstant(BooleanValue(false));
+ obj->setImplicitlyUsedUnchecked();
+ id->setImplicitlyUsedUnchecked();
+ return true;
+}
+
+bool
+IonBuilder::hasOnProtoChain(TypeSet::ObjectKey* key, JSObject* protoObject, bool* hasOnProto)
+{
+ MOZ_ASSERT(protoObject);
+
+ while (true) {
+ if (!key->hasStableClassAndProto(constraints()) || !key->clasp()->isNative())
+ return false;
+
+ JSObject* proto = checkNurseryObject(key->proto().toObjectOrNull());
+ if (!proto) {
+ *hasOnProto = false;
+ return true;
+ }
+
+ if (proto == protoObject) {
+ *hasOnProto = true;
+ return true;
+ }
+
+ key = TypeSet::ObjectKey::get(proto);
+ }
+
+ MOZ_CRASH("Unreachable");
+}
+
+bool
+IonBuilder::tryFoldInstanceOf(MDefinition* lhs, JSObject* protoObject)
+{
+ // Try to fold the js::IsDelegate part of the instanceof operation.
+
+ if (!lhs->mightBeType(MIRType::Object)) {
+ // If the lhs is a primitive, the result is false.
+ lhs->setImplicitlyUsedUnchecked();
+ pushConstant(BooleanValue(false));
+ return true;
+ }
+
+ TemporaryTypeSet* lhsTypes = lhs->resultTypeSet();
+ if (!lhsTypes || lhsTypes->unknownObject())
+ return false;
+
+ // We can fold if either all objects have protoObject on their proto chain
+ // or none have.
+ bool isFirst = true;
+ bool knownIsInstance = false;
+
+ for (unsigned i = 0; i < lhsTypes->getObjectCount(); i++) {
+ TypeSet::ObjectKey* key = lhsTypes->getObject(i);
+ if (!key)
+ continue;
+
+ bool isInstance;
+ if (!hasOnProtoChain(key, protoObject, &isInstance))
+ return false;
+
+ if (isFirst) {
+ knownIsInstance = isInstance;
+ isFirst = false;
+ } else if (knownIsInstance != isInstance) {
+ // Some of the objects have protoObject on their proto chain and
+ // others don't, so we can't optimize this.
+ return false;
+ }
+ }
+
+ if (knownIsInstance && lhsTypes->getKnownMIRType() != MIRType::Object) {
+ // The result is true for all objects, but the lhs might be a primitive.
+ // We can't fold this completely but we can use a much faster IsObject
+ // test.
+ MIsObject* isObject = MIsObject::New(alloc(), lhs);
+ current->add(isObject);
+ current->push(isObject);
+ return true;
+ }
+
+ lhs->setImplicitlyUsedUnchecked();
+ pushConstant(BooleanValue(knownIsInstance));
+ return true;
+}
+
+bool
+IonBuilder::jsop_instanceof()
+{
+ MDefinition* rhs = current->pop();
+ MDefinition* obj = current->pop();
+
+ // If this is an 'x instanceof function' operation and we can determine the
+ // exact function and prototype object being tested for, use a typed path.
+ do {
+ TemporaryTypeSet* rhsTypes = rhs->resultTypeSet();
+ JSObject* rhsObject = rhsTypes ? rhsTypes->maybeSingleton() : nullptr;
+ if (!rhsObject || !rhsObject->is<JSFunction>() || rhsObject->isBoundFunction())
+ break;
+
+ // Refuse to optimize anything whose [[Prototype]] isn't Function.prototype
+ // since we can't guarantee that it uses the default @@hasInstance method.
+ if (rhsObject->hasUncacheableProto() || !rhsObject->hasStaticPrototype())
+ break;
+
+ Value funProto = script()->global().getPrototype(JSProto_Function);
+ if (!funProto.isObject() || rhsObject->staticPrototype() != &funProto.toObject())
+ break;
+
+ // If the user has supplied their own @@hasInstance method we shouldn't
+ // clobber it.
+ JSFunction* fun = &rhsObject->as<JSFunction>();
+ const WellKnownSymbols* symbols = &compartment->runtime()->wellKnownSymbols();
+ if (!js::FunctionHasDefaultHasInstance(fun, *symbols))
+ break;
+
+ // Ensure that we will bail if the @@hasInstance property or [[Prototype]]
+ // change.
+ TypeSet::ObjectKey* rhsKey = TypeSet::ObjectKey::get(rhsObject);
+ if (!rhsKey->hasStableClassAndProto(constraints()))
+ break;
+
+ if (rhsKey->unknownProperties())
+ break;
+
+ HeapTypeSetKey hasInstanceObject =
+ rhsKey->property(SYMBOL_TO_JSID(symbols->hasInstance));
+ if (hasInstanceObject.isOwnProperty(constraints()))
+ break;
+
+ HeapTypeSetKey protoProperty =
+ rhsKey->property(NameToId(names().prototype));
+ JSObject* protoObject = protoProperty.singleton(constraints());
+ if (!protoObject)
+ break;
+
+ rhs->setImplicitlyUsedUnchecked();
+
+ if (tryFoldInstanceOf(obj, protoObject))
+ return true;
+
+ MInstanceOf* ins = MInstanceOf::New(alloc(), obj, protoObject);
+
+ current->add(ins);
+ current->push(ins);
+
+ return resumeAfter(ins);
+ } while (false);
+
+ // Try to inline a fast path based on Baseline ICs.
+ do {
+ Shape* shape;
+ uint32_t slot;
+ JSObject* protoObject;
+ if (!inspector->instanceOfData(pc, &shape, &slot, &protoObject))
+ break;
+
+ // Shape guard.
+ rhs = addShapeGuard(rhs, shape, Bailout_ShapeGuard);
+
+ // Guard .prototype == protoObject.
+ MOZ_ASSERT(shape->numFixedSlots() == 0, "Must be a dynamic slot");
+ MSlots* slots = MSlots::New(alloc(), rhs);
+ current->add(slots);
+ MLoadSlot* prototype = MLoadSlot::New(alloc(), slots, slot);
+ current->add(prototype);
+ MConstant* protoConst = MConstant::NewConstraintlessObject(alloc(), protoObject);
+ current->add(protoConst);
+ MGuardObjectIdentity* guard = MGuardObjectIdentity::New(alloc(), prototype, protoConst,
+ /* bailOnEquality = */ false);
+ current->add(guard);
+
+ if (tryFoldInstanceOf(obj, protoObject))
+ return true;
+
+ MInstanceOf* ins = MInstanceOf::New(alloc(), obj, protoObject);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins);
+ } while (false);
+
+ MCallInstanceOf* ins = MCallInstanceOf::New(alloc(), obj, rhs);
+
+ current->add(ins);
+ current->push(ins);
+
+ return resumeAfter(ins);
+}
+
+bool
+IonBuilder::jsop_debugger()
+{
+ MDebugger* debugger = MDebugger::New(alloc());
+ current->add(debugger);
+
+ // The |debugger;| statement will always bail out to baseline if
+ // cx->compartment()->isDebuggee(). Resume in-place and have baseline
+ // handle the details.
+ return resumeAt(debugger, pc);
+}
+
+MInstruction*
+IonBuilder::addConvertElementsToDoubles(MDefinition* elements)
+{
+ MInstruction* convert = MConvertElementsToDoubles::New(alloc(), elements);
+ current->add(convert);
+ return convert;
+}
+
+MDefinition*
+IonBuilder::addMaybeCopyElementsForWrite(MDefinition* object, bool checkNative)
+{
+ if (!ElementAccessMightBeCopyOnWrite(constraints(), object))
+ return object;
+ MInstruction* copy = MMaybeCopyElementsForWrite::New(alloc(), object, checkNative);
+ current->add(copy);
+ return copy;
+}
+
+MInstruction*
+IonBuilder::addBoundsCheck(MDefinition* index, MDefinition* length)
+{
+ MInstruction* check = MBoundsCheck::New(alloc(), index, length);
+ current->add(check);
+
+ // If a bounds check failed in the past, don't optimize bounds checks.
+ if (failedBoundsCheck_)
+ check->setNotMovable();
+
+ return check;
+}
+
+MInstruction*
+IonBuilder::addShapeGuard(MDefinition* obj, Shape* const shape, BailoutKind bailoutKind)
+{
+ MGuardShape* guard = MGuardShape::New(alloc(), obj, shape, bailoutKind);
+ current->add(guard);
+
+ // If a shape guard failed in the past, don't optimize shape guard.
+ if (failedShapeGuard_)
+ guard->setNotMovable();
+
+ return guard;
+}
+
+MInstruction*
+IonBuilder::addGroupGuard(MDefinition* obj, ObjectGroup* group, BailoutKind bailoutKind)
+{
+ MGuardObjectGroup* guard = MGuardObjectGroup::New(alloc(), obj, group,
+ /* bailOnEquality = */ false, bailoutKind);
+ current->add(guard);
+
+ // If a shape guard failed in the past, don't optimize group guards.
+ if (failedShapeGuard_)
+ guard->setNotMovable();
+
+ LifoAlloc* lifoAlloc = alloc().lifoAlloc();
+ guard->setResultTypeSet(lifoAlloc->new_<TemporaryTypeSet>(lifoAlloc,
+ TypeSet::ObjectType(group)));
+
+ return guard;
+}
+
+MInstruction*
+IonBuilder::addUnboxedExpandoGuard(MDefinition* obj, bool hasExpando, BailoutKind bailoutKind)
+{
+ MGuardUnboxedExpando* guard = MGuardUnboxedExpando::New(alloc(), obj, hasExpando, bailoutKind);
+ current->add(guard);
+
+ // If a shape guard failed in the past, don't optimize group guards.
+ if (failedShapeGuard_)
+ guard->setNotMovable();
+
+ return guard;
+}
+
+MInstruction*
+IonBuilder::addGuardReceiverPolymorphic(MDefinition* obj,
+ const BaselineInspector::ReceiverVector& receivers)
+{
+ if (receivers.length() == 1) {
+ if (!receivers[0].group) {
+ // Monomorphic guard on a native object.
+ return addShapeGuard(obj, receivers[0].shape, Bailout_ShapeGuard);
+ }
+
+ if (!receivers[0].shape) {
+ // Guard on an unboxed object that does not have an expando.
+ obj = addGroupGuard(obj, receivers[0].group, Bailout_ShapeGuard);
+ return addUnboxedExpandoGuard(obj, /* hasExpando = */ false, Bailout_ShapeGuard);
+ }
+
+ // Monomorphic receiver guards are not yet supported when the receiver
+ // is an unboxed object with an expando.
+ }
+
+ MGuardReceiverPolymorphic* guard = MGuardReceiverPolymorphic::New(alloc(), obj);
+ current->add(guard);
+
+ if (failedShapeGuard_)
+ guard->setNotMovable();
+
+ for (size_t i = 0; i < receivers.length(); i++) {
+ if (!guard->addReceiver(receivers[i]))
+ return nullptr;
+ }
+
+ return guard;
+}
+
+MInstruction*
+IonBuilder::addSharedTypedArrayGuard(MDefinition* obj)
+{
+ MGuardSharedTypedArray* guard = MGuardSharedTypedArray::New(alloc(), obj);
+ current->add(guard);
+ return guard;
+}
+
+TemporaryTypeSet*
+IonBuilder::bytecodeTypes(jsbytecode* pc)
+{
+ return TypeScript::BytecodeTypes(script(), pc, bytecodeTypeMap, &typeArrayHint, typeArray);
+}
+
+TypedObjectPrediction
+IonBuilder::typedObjectPrediction(MDefinition* typedObj)
+{
+ // Extract TypedObjectPrediction directly if we can
+ if (typedObj->isNewDerivedTypedObject()) {
+ return typedObj->toNewDerivedTypedObject()->prediction();
+ }
+
+ TemporaryTypeSet* types = typedObj->resultTypeSet();
+ return typedObjectPrediction(types);
+}
+
+TypedObjectPrediction
+IonBuilder::typedObjectPrediction(TemporaryTypeSet* types)
+{
+ // Type set must be known to be an object.
+ if (!types || types->getKnownMIRType() != MIRType::Object)
+ return TypedObjectPrediction();
+
+ // And only known objects.
+ if (types->unknownObject())
+ return TypedObjectPrediction();
+
+ TypedObjectPrediction out;
+ for (uint32_t i = 0; i < types->getObjectCount(); i++) {
+ ObjectGroup* group = types->getGroup(i);
+ if (!group || !TypeSet::ObjectKey::get(group)->hasStableClassAndProto(constraints()))
+ return TypedObjectPrediction();
+
+ if (!IsTypedObjectClass(group->clasp()))
+ return TypedObjectPrediction();
+
+ out.addDescr(group->typeDescr());
+ }
+
+ return out;
+}
+
+MDefinition*
+IonBuilder::loadTypedObjectType(MDefinition* typedObj)
+{
+ // Shortcircuit derived type objects, meaning the intermediate
+ // objects created to represent `a.b` in an expression like
+ // `a.b.c`. In that case, the type object can be simply pulled
+ // from the operands of that instruction.
+ if (typedObj->isNewDerivedTypedObject())
+ return typedObj->toNewDerivedTypedObject()->type();
+
+ MInstruction* descr = MTypedObjectDescr::New(alloc(), typedObj);
+ current->add(descr);
+
+ return descr;
+}
+
+// Given a typed object `typedObj` and an offset `offset` into that
+// object's data, returns another typed object and adusted offset
+// where the data can be found. Often, these returned values are the
+// same as the inputs, but in cases where intermediate derived type
+// objects have been created, the return values will remove
+// intermediate layers (often rendering those derived type objects
+// into dead code).
+void
+IonBuilder::loadTypedObjectData(MDefinition* typedObj,
+ MDefinition** owner,
+ LinearSum* ownerOffset)
+{
+ MOZ_ASSERT(typedObj->type() == MIRType::Object);
+
+ // Shortcircuit derived type objects, meaning the intermediate
+ // objects created to represent `a.b` in an expression like
+ // `a.b.c`. In that case, the owned and a base offset can be
+ // pulled from the operands of the instruction and combined with
+ // `offset`.
+ if (typedObj->isNewDerivedTypedObject()) {
+ MNewDerivedTypedObject* ins = typedObj->toNewDerivedTypedObject();
+
+ SimpleLinearSum base = ExtractLinearSum(ins->offset());
+ if (!ownerOffset->add(base))
+ setForceAbort();
+
+ *owner = ins->owner();
+ return;
+ }
+
+ *owner = typedObj;
+}
+
+// Takes as input a typed object, an offset into that typed object's
+// memory, and the type repr of the data found at that offset. Returns
+// the elements pointer and a scaled offset. The scaled offset is
+// expressed in units of `unit`; when working with typed array MIR,
+// this is typically the alignment.
+void
+IonBuilder::loadTypedObjectElements(MDefinition* typedObj,
+ const LinearSum& baseByteOffset,
+ uint32_t scale,
+ MDefinition** ownerElements,
+ MDefinition** ownerScaledOffset,
+ int32_t* ownerByteAdjustment)
+{
+ MDefinition* owner;
+ LinearSum ownerByteOffset(alloc());
+ loadTypedObjectData(typedObj, &owner, &ownerByteOffset);
+
+ if (!ownerByteOffset.add(baseByteOffset))
+ setForceAbort();
+
+ TemporaryTypeSet* ownerTypes = owner->resultTypeSet();
+ const Class* clasp = ownerTypes ? ownerTypes->getKnownClass(constraints()) : nullptr;
+ if (clasp && IsInlineTypedObjectClass(clasp)) {
+ // Perform the load directly from the owner pointer.
+ if (!ownerByteOffset.add(InlineTypedObject::offsetOfDataStart()))
+ setForceAbort();
+ *ownerElements = owner;
+ } else {
+ bool definitelyOutline = clasp && IsOutlineTypedObjectClass(clasp);
+ *ownerElements = MTypedObjectElements::New(alloc(), owner, definitelyOutline);
+ current->add((*ownerElements)->toInstruction());
+ }
+
+ // Extract the constant adjustment from the byte offset.
+ *ownerByteAdjustment = ownerByteOffset.constant();
+ int32_t negativeAdjustment;
+ if (!SafeSub(0, *ownerByteAdjustment, &negativeAdjustment))
+ setForceAbort();
+ if (!ownerByteOffset.add(negativeAdjustment))
+ setForceAbort();
+
+ // Scale the byte offset if required by the MIR node which will access the
+ // typed object. In principle we should always be able to cleanly divide
+ // the terms in this lienar sum due to alignment restrictions, but due to
+ // limitations of ExtractLinearSum when applied to the terms in derived
+ // typed objects this isn't always be possible. In these cases, fall back
+ // on an explicit division operation.
+ if (ownerByteOffset.divide(scale)) {
+ *ownerScaledOffset = ConvertLinearSum(alloc(), current, ownerByteOffset);
+ } else {
+ MDefinition* unscaledOffset = ConvertLinearSum(alloc(), current, ownerByteOffset);
+ *ownerScaledOffset = MDiv::New(alloc(), unscaledOffset, constantInt(scale),
+ MIRType::Int32, /* unsigned = */ false);
+ current->add((*ownerScaledOffset)->toInstruction());
+ }
+}
+
+// Looks up the offset/type-repr-set of the field `id`, given the type
+// set `objTypes` of the field owner. If a field is found, returns true
+// and sets *fieldOffset, *fieldPrediction, and *fieldIndex. Returns false
+// otherwise. Infallible.
+bool
+IonBuilder::typedObjectHasField(MDefinition* typedObj,
+ PropertyName* name,
+ size_t* fieldOffset,
+ TypedObjectPrediction* fieldPrediction,
+ size_t* fieldIndex)
+{
+ TypedObjectPrediction objPrediction = typedObjectPrediction(typedObj);
+ if (objPrediction.isUseless()) {
+ trackOptimizationOutcome(TrackedOutcome::AccessNotTypedObject);
+ return false;
+ }
+
+ // Must be accessing a struct.
+ if (objPrediction.kind() != type::Struct) {
+ trackOptimizationOutcome(TrackedOutcome::NotStruct);
+ return false;
+ }
+
+ // Determine the type/offset of the field `name`, if any.
+ if (!objPrediction.hasFieldNamed(NameToId(name), fieldOffset,
+ fieldPrediction, fieldIndex))
+ {
+ trackOptimizationOutcome(TrackedOutcome::StructNoField);
+ return false;
+ }
+
+ return true;
+}
+
+MDefinition*
+IonBuilder::typeObjectForElementFromArrayStructType(MDefinition* typeObj)
+{
+ MInstruction* elemType = MLoadFixedSlot::New(alloc(), typeObj, JS_DESCR_SLOT_ARRAY_ELEM_TYPE);
+ current->add(elemType);
+
+ MInstruction* unboxElemType = MUnbox::New(alloc(), elemType, MIRType::Object, MUnbox::Infallible);
+ current->add(unboxElemType);
+
+ return unboxElemType;
+}
+
+MDefinition*
+IonBuilder::typeObjectForFieldFromStructType(MDefinition* typeObj,
+ size_t fieldIndex)
+{
+ // Load list of field type objects.
+
+ MInstruction* fieldTypes = MLoadFixedSlot::New(alloc(), typeObj, JS_DESCR_SLOT_STRUCT_FIELD_TYPES);
+ current->add(fieldTypes);
+
+ MInstruction* unboxFieldTypes = MUnbox::New(alloc(), fieldTypes, MIRType::Object, MUnbox::Infallible);
+ current->add(unboxFieldTypes);
+
+ // Index into list with index of field.
+
+ MInstruction* fieldTypesElements = MElements::New(alloc(), unboxFieldTypes);
+ current->add(fieldTypesElements);
+
+ MConstant* fieldIndexDef = constantInt(fieldIndex);
+
+ MInstruction* fieldType = MLoadElement::New(alloc(), fieldTypesElements, fieldIndexDef, false, false);
+ current->add(fieldType);
+
+ MInstruction* unboxFieldType = MUnbox::New(alloc(), fieldType, MIRType::Object, MUnbox::Infallible);
+ current->add(unboxFieldType);
+
+ return unboxFieldType;
+}
+
+bool
+IonBuilder::storeScalarTypedObjectValue(MDefinition* typedObj,
+ const LinearSum& byteOffset,
+ ScalarTypeDescr::Type type,
+ MDefinition* value)
+{
+ // Find location within the owner object.
+ MDefinition* elements;
+ MDefinition* scaledOffset;
+ int32_t adjustment;
+ uint32_t alignment = ScalarTypeDescr::alignment(type);
+ loadTypedObjectElements(typedObj, byteOffset, alignment, &elements, &scaledOffset, &adjustment);
+
+ // Clamp value to [0, 255] when type is Uint8Clamped
+ MDefinition* toWrite = value;
+ if (type == Scalar::Uint8Clamped) {
+ toWrite = MClampToUint8::New(alloc(), value);
+ current->add(toWrite->toInstruction());
+ }
+
+ MStoreUnboxedScalar* store =
+ MStoreUnboxedScalar::New(alloc(), elements, scaledOffset, toWrite,
+ type, MStoreUnboxedScalar::TruncateInput,
+ DoesNotRequireMemoryBarrier, adjustment);
+ current->add(store);
+
+ return true;
+}
+
+bool
+IonBuilder::storeReferenceTypedObjectValue(MDefinition* typedObj,
+ const LinearSum& byteOffset,
+ ReferenceTypeDescr::Type type,
+ MDefinition* value,
+ PropertyName* name)
+{
+ // Make sure we aren't adding new type information for writes of object and value
+ // references.
+ if (type != ReferenceTypeDescr::TYPE_STRING) {
+ MOZ_ASSERT(type == ReferenceTypeDescr::TYPE_ANY ||
+ type == ReferenceTypeDescr::TYPE_OBJECT);
+ MIRType implicitType =
+ (type == ReferenceTypeDescr::TYPE_ANY) ? MIRType::Undefined : MIRType::Null;
+
+ if (PropertyWriteNeedsTypeBarrier(alloc(), constraints(), current, &typedObj, name, &value,
+ /* canModify = */ true, implicitType))
+ {
+ trackOptimizationOutcome(TrackedOutcome::NeedsTypeBarrier);
+ return false;
+ }
+ }
+
+ // Find location within the owner object.
+ MDefinition* elements;
+ MDefinition* scaledOffset;
+ int32_t adjustment;
+ uint32_t alignment = ReferenceTypeDescr::alignment(type);
+ loadTypedObjectElements(typedObj, byteOffset, alignment, &elements, &scaledOffset, &adjustment);
+
+ MInstruction* store = nullptr; // initialize to silence GCC warning
+ switch (type) {
+ case ReferenceTypeDescr::TYPE_ANY:
+ if (NeedsPostBarrier(value))
+ current->add(MPostWriteBarrier::New(alloc(), typedObj, value));
+ store = MStoreElement::New(alloc(), elements, scaledOffset, value, false, adjustment);
+ store->toStoreElement()->setNeedsBarrier();
+ break;
+ case ReferenceTypeDescr::TYPE_OBJECT:
+ // Note: We cannot necessarily tell at this point whether a post
+ // barrier is needed, because the type policy may insert ToObjectOrNull
+ // instructions later, and those may require a post barrier. Therefore,
+ // defer the insertion of post barriers to the type policy.
+ store = MStoreUnboxedObjectOrNull::New(alloc(), elements, scaledOffset, value, typedObj, adjustment);
+ break;
+ case ReferenceTypeDescr::TYPE_STRING:
+ // Strings are not nursery allocated, so these writes do not need post
+ // barriers.
+ store = MStoreUnboxedString::New(alloc(), elements, scaledOffset, value, adjustment);
+ break;
+ }
+
+ current->add(store);
+ return true;
+}
+
+JSObject*
+IonBuilder::checkNurseryObject(JSObject* obj)
+{
+ // If we try to use any nursery pointers during compilation, make sure that
+ // the main thread will cancel this compilation before performing a minor
+ // GC. All constants used during compilation should either go through this
+ // function or should come from a type set (which has a similar barrier).
+ if (obj && IsInsideNursery(obj)) {
+ compartment->runtime()->setMinorGCShouldCancelIonCompilations();
+ IonBuilder* builder = this;
+ while (builder) {
+ builder->setNotSafeForMinorGC();
+ builder = builder->callerBuilder_;
+ }
+ }
+
+ return obj;
+}
+
+MConstant*
+IonBuilder::constant(const Value& v)
+{
+ MOZ_ASSERT(!v.isString() || v.toString()->isAtom(),
+ "Handle non-atomized strings outside IonBuilder.");
+
+ if (v.isObject())
+ checkNurseryObject(&v.toObject());
+
+ MConstant* c = MConstant::New(alloc(), v, constraints());
+ current->add(c);
+ return c;
+}
+
+MConstant*
+IonBuilder::constantInt(int32_t i)
+{
+ return constant(Int32Value(i));
+}
+
+MInstruction*
+IonBuilder::initializedLength(MDefinition* obj, MDefinition* elements, JSValueType unboxedType)
+{
+ MInstruction* res;
+ if (unboxedType != JSVAL_TYPE_MAGIC)
+ res = MUnboxedArrayInitializedLength::New(alloc(), obj);
+ else
+ res = MInitializedLength::New(alloc(), elements);
+ current->add(res);
+ return res;
+}
+
+MInstruction*
+IonBuilder::setInitializedLength(MDefinition* obj, JSValueType unboxedType, size_t count)
+{
+ MOZ_ASSERT(count);
+
+ MInstruction* res;
+ if (unboxedType != JSVAL_TYPE_MAGIC) {
+ res = MSetUnboxedArrayInitializedLength::New(alloc(), obj, constant(Int32Value(count)));
+ } else {
+ // MSetInitializedLength takes the index of the last element, rather
+ // than the count itself.
+ MInstruction* elements = MElements::New(alloc(), obj, /* unboxed = */ false);
+ current->add(elements);
+ res = MSetInitializedLength::New(alloc(), elements, constant(Int32Value(count - 1)));
+ }
+ current->add(res);
+ return res;
+}
+
+MDefinition*
+IonBuilder::getCallee()
+{
+ if (inliningDepth_ == 0) {
+ MInstruction* callee = MCallee::New(alloc());
+ current->add(callee);
+ return callee;
+ }
+
+ return inlineCallInfo_->fun();
+}
+
+MDefinition*
+IonBuilder::addLexicalCheck(MDefinition* input)
+{
+ MOZ_ASSERT(JSOp(*pc) == JSOP_CHECKLEXICAL ||
+ JSOp(*pc) == JSOP_CHECKALIASEDLEXICAL ||
+ JSOp(*pc) == JSOP_GETIMPORT);
+
+ MInstruction* lexicalCheck;
+
+ // If we're guaranteed to not be JS_UNINITIALIZED_LEXICAL, no need to check.
+ if (input->type() == MIRType::MagicUninitializedLexical) {
+ // Mark the input as implicitly used so the JS_UNINITIALIZED_LEXICAL
+ // magic value will be preserved on bailout.
+ input->setImplicitlyUsedUnchecked();
+ lexicalCheck = MThrowRuntimeLexicalError::New(alloc(), JSMSG_UNINITIALIZED_LEXICAL);
+ current->add(lexicalCheck);
+ if (!resumeAfter(lexicalCheck))
+ return nullptr;
+ return constant(UndefinedValue());
+ }
+
+ if (input->type() == MIRType::Value) {
+ lexicalCheck = MLexicalCheck::New(alloc(), input);
+ current->add(lexicalCheck);
+ if (failedLexicalCheck_)
+ lexicalCheck->setNotMovableUnchecked();
+ return lexicalCheck;
+ }
+
+ return input;
+}
+
+MDefinition*
+IonBuilder::convertToBoolean(MDefinition* input)
+{
+ // Convert to bool with the '!!' idiom
+ MNot* resultInverted = MNot::New(alloc(), input, constraints());
+ current->add(resultInverted);
+ MNot* result = MNot::New(alloc(), resultInverted, constraints());
+ current->add(result);
+
+ return result;
+}
+
+void
+IonBuilder::trace(JSTracer* trc)
+{
+ if (!compartment->runtime()->runtimeMatches(trc->runtime()))
+ return;
+
+ MOZ_ASSERT(rootList_);
+ rootList_->trace(trc);
+}
diff --git a/js/src/jit/IonBuilder.h b/js/src/jit/IonBuilder.h
new file mode 100644
index 000000000..38647a88f
--- /dev/null
+++ b/js/src/jit/IonBuilder.h
@@ -0,0 +1,1533 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_IonBuilder_h
+#define jit_IonBuilder_h
+
+// This file declares the data structures for building a MIRGraph from a
+// JSScript.
+
+#include "mozilla/LinkedList.h"
+
+#include "jit/BaselineInspector.h"
+#include "jit/BytecodeAnalysis.h"
+#include "jit/IonAnalysis.h"
+#include "jit/IonOptimizationLevels.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+#include "jit/OptimizationTracking.h"
+
+namespace js {
+namespace jit {
+
+class CodeGenerator;
+class CallInfo;
+class BaselineFrameInspector;
+
+enum class InlinableNative : uint16_t;
+
+// Records information about a baseline frame for compilation that is stable
+// when later used off thread.
+BaselineFrameInspector*
+NewBaselineFrameInspector(TempAllocator* temp, BaselineFrame* frame, CompileInfo* info);
+
+class IonBuilder
+ : public MIRGenerator,
+ public mozilla::LinkedListElement<IonBuilder>
+{
+ enum ControlStatus {
+ ControlStatus_Error,
+ ControlStatus_Abort,
+ ControlStatus_Ended, // There is no continuation/join point.
+ ControlStatus_Joined, // Created a join node.
+ ControlStatus_Jumped, // Parsing another branch at the same level.
+ ControlStatus_None // No control flow.
+ };
+
+ struct DeferredEdge : public TempObject
+ {
+ MBasicBlock* block;
+ DeferredEdge* next;
+
+ DeferredEdge(MBasicBlock* block, DeferredEdge* next)
+ : block(block), next(next)
+ { }
+ };
+
+ struct ControlFlowInfo {
+ // Entry in the cfgStack.
+ uint32_t cfgEntry;
+
+ // Label that continues go to.
+ jsbytecode* continuepc;
+
+ ControlFlowInfo(uint32_t cfgEntry, jsbytecode* continuepc)
+ : cfgEntry(cfgEntry),
+ continuepc(continuepc)
+ { }
+ };
+
+ // To avoid recursion, the bytecode analyzer uses a stack where each entry
+ // is a small state machine. As we encounter branches or jumps in the
+ // bytecode, we push information about the edges on the stack so that the
+ // CFG can be built in a tree-like fashion.
+ struct CFGState {
+ enum State {
+ IF_TRUE, // if() { }, no else.
+ IF_TRUE_EMPTY_ELSE, // if() { }, empty else
+ IF_ELSE_TRUE, // if() { X } else { }
+ IF_ELSE_FALSE, // if() { } else { X }
+ DO_WHILE_LOOP_BODY, // do { x } while ()
+ DO_WHILE_LOOP_COND, // do { } while (x)
+ WHILE_LOOP_COND, // while (x) { }
+ WHILE_LOOP_BODY, // while () { x }
+ FOR_LOOP_COND, // for (; x;) { }
+ FOR_LOOP_BODY, // for (; ;) { x }
+ FOR_LOOP_UPDATE, // for (; ; x) { }
+ TABLE_SWITCH, // switch() { x }
+ COND_SWITCH_CASE, // switch() { case X: ... }
+ COND_SWITCH_BODY, // switch() { case ...: X }
+ AND_OR, // && x, || x
+ LABEL, // label: x
+ TRY // try { x } catch(e) { }
+ };
+
+ State state; // Current state of this control structure.
+ jsbytecode* stopAt; // Bytecode at which to stop the processing loop.
+
+ // For if structures, this contains branch information.
+ union {
+ struct {
+ MBasicBlock* ifFalse;
+ jsbytecode* falseEnd;
+ MBasicBlock* ifTrue; // Set when the end of the true path is reached.
+ MTest* test;
+ } branch;
+ struct {
+ // Common entry point.
+ MBasicBlock* entry;
+
+ // Whether OSR is being performed for this loop.
+ bool osr;
+
+ // Position of where the loop body starts and ends.
+ jsbytecode* bodyStart;
+ jsbytecode* bodyEnd;
+
+ // pc immediately after the loop exits.
+ jsbytecode* exitpc;
+
+ // pc for 'continue' jumps.
+ jsbytecode* continuepc;
+
+ // Common exit point. Created lazily, so it may be nullptr.
+ MBasicBlock* successor;
+
+ // Deferred break and continue targets.
+ DeferredEdge* breaks;
+ DeferredEdge* continues;
+
+ // Initial state, in case loop processing is restarted.
+ State initialState;
+ jsbytecode* initialPc;
+ jsbytecode* initialStopAt;
+ jsbytecode* loopHead;
+
+ // For-loops only.
+ jsbytecode* condpc;
+ jsbytecode* updatepc;
+ jsbytecode* updateEnd;
+ } loop;
+ struct {
+ // pc immediately after the switch.
+ jsbytecode* exitpc;
+
+ // Deferred break and continue targets.
+ DeferredEdge* breaks;
+
+ // MIR instruction
+ MTableSwitch* ins;
+
+ // The number of current successor that get mapped into a block.
+ uint32_t currentBlock;
+
+ } tableswitch;
+ struct {
+ // Vector of body blocks to process after the cases.
+ FixedList<MBasicBlock*>* bodies;
+
+ // When processing case statements, this counter points at the
+ // last uninitialized body. When processing bodies, this
+ // counter targets the next body to process.
+ uint32_t currentIdx;
+
+ // Remember the block index of the default case.
+ jsbytecode* defaultTarget;
+ uint32_t defaultIdx;
+
+ // Block immediately after the switch.
+ jsbytecode* exitpc;
+ DeferredEdge* breaks;
+ } condswitch;
+ struct {
+ DeferredEdge* breaks;
+ } label;
+ struct {
+ MBasicBlock* successor;
+ } try_;
+ };
+
+ inline bool isLoop() const {
+ switch (state) {
+ case DO_WHILE_LOOP_COND:
+ case DO_WHILE_LOOP_BODY:
+ case WHILE_LOOP_COND:
+ case WHILE_LOOP_BODY:
+ case FOR_LOOP_COND:
+ case FOR_LOOP_BODY:
+ case FOR_LOOP_UPDATE:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static CFGState If(jsbytecode* join, MTest* test);
+ static CFGState IfElse(jsbytecode* trueEnd, jsbytecode* falseEnd, MTest* test);
+ static CFGState AndOr(jsbytecode* join, MBasicBlock* lhs);
+ static CFGState TableSwitch(jsbytecode* exitpc, MTableSwitch* ins);
+ static CFGState CondSwitch(IonBuilder* builder, jsbytecode* exitpc, jsbytecode* defaultTarget);
+ static CFGState Label(jsbytecode* exitpc);
+ static CFGState Try(jsbytecode* exitpc, MBasicBlock* successor);
+ };
+
+ static int CmpSuccessors(const void* a, const void* b);
+
+ public:
+ IonBuilder(JSContext* analysisContext, CompileCompartment* comp,
+ const JitCompileOptions& options, TempAllocator* temp,
+ MIRGraph* graph, CompilerConstraintList* constraints,
+ BaselineInspector* inspector, CompileInfo* info,
+ const OptimizationInfo* optimizationInfo, BaselineFrameInspector* baselineFrame,
+ size_t inliningDepth = 0, uint32_t loopDepth = 0);
+
+ // Callers of build() and buildInline() should always check whether the
+ // call overrecursed, if false is returned. Overrecursion is not
+ // signaled as OOM and will not in general be caught by OOM paths.
+ MOZ_MUST_USE bool build();
+ MOZ_MUST_USE bool buildInline(IonBuilder* callerBuilder, MResumePoint* callerResumePoint,
+ CallInfo& callInfo);
+
+ private:
+ MOZ_MUST_USE bool traverseBytecode();
+ ControlStatus snoopControlFlow(JSOp op);
+ MOZ_MUST_USE bool processIterators();
+ MOZ_MUST_USE bool inspectOpcode(JSOp op);
+ uint32_t readIndex(jsbytecode* pc);
+ JSAtom* readAtom(jsbytecode* pc);
+ bool abort(const char* message, ...) MOZ_FORMAT_PRINTF(2, 3);
+ void trackActionableAbort(const char* message);
+ void spew(const char* message);
+
+ JSFunction* getSingleCallTarget(TemporaryTypeSet* calleeTypes);
+ MOZ_MUST_USE bool getPolyCallTargets(TemporaryTypeSet* calleeTypes, bool constructing,
+ ObjectVector& targets, uint32_t maxTargets);
+
+ void popCfgStack();
+ DeferredEdge* filterDeadDeferredEdges(DeferredEdge* edge);
+ MOZ_MUST_USE bool processDeferredContinues(CFGState& state);
+ ControlStatus processControlEnd();
+ ControlStatus processCfgStack();
+ ControlStatus processCfgEntry(CFGState& state);
+ ControlStatus processIfEnd(CFGState& state);
+ ControlStatus processIfElseTrueEnd(CFGState& state);
+ ControlStatus processIfElseFalseEnd(CFGState& state);
+ ControlStatus processDoWhileBodyEnd(CFGState& state);
+ ControlStatus processDoWhileCondEnd(CFGState& state);
+ ControlStatus processWhileCondEnd(CFGState& state);
+ ControlStatus processWhileBodyEnd(CFGState& state);
+ ControlStatus processForCondEnd(CFGState& state);
+ ControlStatus processForBodyEnd(CFGState& state);
+ ControlStatus processForUpdateEnd(CFGState& state);
+ ControlStatus processNextTableSwitchCase(CFGState& state);
+ ControlStatus processCondSwitchCase(CFGState& state);
+ ControlStatus processCondSwitchBody(CFGState& state);
+ ControlStatus processSwitchBreak(JSOp op);
+ ControlStatus processSwitchEnd(DeferredEdge* breaks, jsbytecode* exitpc);
+ ControlStatus processAndOrEnd(CFGState& state);
+ ControlStatus processLabelEnd(CFGState& state);
+ ControlStatus processTryEnd(CFGState& state);
+ ControlStatus processReturn(JSOp op);
+ ControlStatus processThrow();
+ ControlStatus processContinue(JSOp op);
+ ControlStatus processBreak(JSOp op, jssrcnote* sn);
+ ControlStatus maybeLoop(JSOp op, jssrcnote* sn);
+ MOZ_MUST_USE bool pushLoop(CFGState::State state, jsbytecode* stopAt, MBasicBlock* entry,
+ bool osr, jsbytecode* loopHead, jsbytecode* initialPc,
+ jsbytecode* bodyStart, jsbytecode* bodyEnd,
+ jsbytecode* exitpc, jsbytecode* continuepc);
+ MOZ_MUST_USE bool analyzeNewLoopTypes(MBasicBlock* entry, jsbytecode* start, jsbytecode* end);
+
+ MBasicBlock* addBlock(MBasicBlock* block, uint32_t loopDepth);
+ MBasicBlock* newBlock(MBasicBlock* predecessor, jsbytecode* pc);
+ MBasicBlock* newBlock(MBasicBlock* predecessor, jsbytecode* pc, uint32_t loopDepth);
+ MBasicBlock* newBlock(MBasicBlock* predecessor, jsbytecode* pc, MResumePoint* priorResumePoint);
+ MBasicBlock* newBlockPopN(MBasicBlock* predecessor, jsbytecode* pc, uint32_t popped);
+ MBasicBlock* newBlockAfter(MBasicBlock* at, MBasicBlock* predecessor, jsbytecode* pc);
+ MBasicBlock* newOsrPreheader(MBasicBlock* header, jsbytecode* loopEntry,
+ jsbytecode* beforeLoopEntry);
+ MBasicBlock* newPendingLoopHeader(MBasicBlock* predecessor, jsbytecode* pc, bool osr, bool canOsr,
+ unsigned stackPhiCount);
+ MBasicBlock* newBlock(jsbytecode* pc) {
+ return newBlock(nullptr, pc);
+ }
+ MBasicBlock* newBlockAfter(MBasicBlock* at, jsbytecode* pc) {
+ return newBlockAfter(at, nullptr, pc);
+ }
+
+ // We want to make sure that our MTest instructions all check whether the
+ // thing being tested might emulate undefined. So we funnel their creation
+ // through this method, to make sure that happens. We don't want to just do
+ // the check in MTest::New, because that can run on background compilation
+ // threads, and we're not sure it's safe to touch that part of the typeset
+ // from a background thread.
+ MTest* newTest(MDefinition* ins, MBasicBlock* ifTrue, MBasicBlock* ifFalse);
+
+ // Given a list of pending breaks, creates a new block and inserts a Goto
+ // linking each break to the new block.
+ MBasicBlock* createBreakCatchBlock(DeferredEdge* edge, jsbytecode* pc);
+
+ // Finishes loops that do not actually loop, containing only breaks and
+ // returns or a do while loop with a condition that is constant false.
+ ControlStatus processBrokenLoop(CFGState& state);
+
+ // Computes loop phis, places them in all successors of a loop, then
+ // handles any pending breaks.
+ ControlStatus finishLoop(CFGState& state, MBasicBlock* successor);
+
+ // Incorporates a type/typeSet into an OSR value for a loop, after the loop
+ // body has been processed.
+ MOZ_MUST_USE bool addOsrValueTypeBarrier(uint32_t slot, MInstruction** def,
+ MIRType type, TemporaryTypeSet* typeSet);
+ MOZ_MUST_USE bool maybeAddOsrTypeBarriers();
+
+ // Restarts processing of a loop if the type information at its header was
+ // incomplete.
+ ControlStatus restartLoop(const CFGState& state);
+
+ void assertValidLoopHeadOp(jsbytecode* pc);
+
+ ControlStatus forLoop(JSOp op, jssrcnote* sn);
+ ControlStatus whileOrForInLoop(jssrcnote* sn);
+ ControlStatus doWhileLoop(JSOp op, jssrcnote* sn);
+ ControlStatus tableSwitch(JSOp op, jssrcnote* sn);
+ ControlStatus condSwitch(JSOp op, jssrcnote* sn);
+
+ // Please see the Big Honkin' Comment about how resume points work in
+ // IonBuilder.cpp, near the definition for this function.
+ MOZ_MUST_USE bool resume(MInstruction* ins, jsbytecode* pc, MResumePoint::Mode mode);
+ MOZ_MUST_USE bool resumeAt(MInstruction* ins, jsbytecode* pc);
+ MOZ_MUST_USE bool resumeAfter(MInstruction* ins);
+ MOZ_MUST_USE bool maybeInsertResume();
+
+ void insertRecompileCheck();
+
+ MOZ_MUST_USE bool initParameters();
+ void initLocals();
+ void rewriteParameter(uint32_t slotIdx, MDefinition* param, int32_t argIndex);
+ MOZ_MUST_USE bool rewriteParameters();
+ MOZ_MUST_USE bool initEnvironmentChain(MDefinition* callee = nullptr);
+ MOZ_MUST_USE bool initArgumentsObject();
+ void pushConstant(const Value& v);
+
+ MConstant* constant(const Value& v);
+ MConstant* constantInt(int32_t i);
+ MInstruction* initializedLength(MDefinition* obj, MDefinition* elements,
+ JSValueType unboxedType);
+ MInstruction* setInitializedLength(MDefinition* obj, JSValueType unboxedType, size_t count);
+
+ // Improve the type information at tests
+ MOZ_MUST_USE bool improveTypesAtTest(MDefinition* ins, bool trueBranch, MTest* test);
+ MOZ_MUST_USE bool improveTypesAtCompare(MCompare* ins, bool trueBranch, MTest* test);
+ MOZ_MUST_USE bool improveTypesAtNullOrUndefinedCompare(MCompare* ins, bool trueBranch,
+ MTest* test);
+ MOZ_MUST_USE bool improveTypesAtTypeOfCompare(MCompare* ins, bool trueBranch, MTest* test);
+
+ // Used to detect triangular structure at test.
+ MOZ_MUST_USE bool detectAndOrStructure(MPhi* ins, bool* branchIsTrue);
+ MOZ_MUST_USE bool replaceTypeSet(MDefinition* subject, TemporaryTypeSet* type, MTest* test);
+
+ // Add a guard which ensure that the set of type which goes through this
+ // generated code correspond to the observed types for the bytecode.
+ MDefinition* addTypeBarrier(MDefinition* def, TemporaryTypeSet* observed,
+ BarrierKind kind, MTypeBarrier** pbarrier = nullptr);
+ MOZ_MUST_USE bool pushTypeBarrier(MDefinition* def, TemporaryTypeSet* observed,
+ BarrierKind kind);
+
+ // As pushTypeBarrier, but will compute the needBarrier boolean itself based
+ // on observed and the JSFunction that we're planning to call. The
+ // JSFunction must be a DOM method or getter.
+ MOZ_MUST_USE bool pushDOMTypeBarrier(MInstruction* ins, TemporaryTypeSet* observed,
+ JSFunction* func);
+
+ // If definiteType is not known or def already has the right type, just
+ // returns def. Otherwise, returns an MInstruction that has that definite
+ // type, infallibly unboxing ins as needed. The new instruction will be
+ // added to |current| in this case.
+ MDefinition* ensureDefiniteType(MDefinition* def, MIRType definiteType);
+
+ // Creates a MDefinition based on the given def improved with type as TypeSet.
+ MDefinition* ensureDefiniteTypeSet(MDefinition* def, TemporaryTypeSet* types);
+
+ void maybeMarkEmpty(MDefinition* ins);
+
+ JSObject* getSingletonPrototype(JSFunction* target);
+
+ MDefinition* createThisScripted(MDefinition* callee, MDefinition* newTarget);
+ MDefinition* createThisScriptedSingleton(JSFunction* target, MDefinition* callee);
+ MDefinition* createThisScriptedBaseline(MDefinition* callee);
+ MDefinition* createThis(JSFunction* target, MDefinition* callee, MDefinition* newTarget);
+ MInstruction* createNamedLambdaObject(MDefinition* callee, MDefinition* envObj);
+ MInstruction* createCallObject(MDefinition* callee, MDefinition* envObj);
+
+ MDefinition* walkEnvironmentChain(unsigned hops);
+
+ MInstruction* addConvertElementsToDoubles(MDefinition* elements);
+ MDefinition* addMaybeCopyElementsForWrite(MDefinition* object, bool checkNative);
+ MInstruction* addBoundsCheck(MDefinition* index, MDefinition* length);
+ MInstruction* addShapeGuard(MDefinition* obj, Shape* const shape, BailoutKind bailoutKind);
+ MInstruction* addGroupGuard(MDefinition* obj, ObjectGroup* group, BailoutKind bailoutKind);
+ MInstruction* addUnboxedExpandoGuard(MDefinition* obj, bool hasExpando, BailoutKind bailoutKind);
+ MInstruction* addSharedTypedArrayGuard(MDefinition* obj);
+
+ MInstruction*
+ addGuardReceiverPolymorphic(MDefinition* obj, const BaselineInspector::ReceiverVector& receivers);
+
+ MDefinition* convertShiftToMaskForStaticTypedArray(MDefinition* id,
+ Scalar::Type viewType);
+
+ bool invalidatedIdempotentCache();
+
+ bool hasStaticEnvironmentObject(EnvironmentCoordinate ec, JSObject** pcall);
+ MOZ_MUST_USE bool loadSlot(MDefinition* obj, size_t slot, size_t nfixed, MIRType rvalType,
+ BarrierKind barrier, TemporaryTypeSet* types);
+ MOZ_MUST_USE bool loadSlot(MDefinition* obj, Shape* shape, MIRType rvalType,
+ BarrierKind barrier, TemporaryTypeSet* types);
+ MOZ_MUST_USE bool storeSlot(MDefinition* obj, size_t slot, size_t nfixed, MDefinition* value,
+ bool needsBarrier, MIRType slotType = MIRType::None);
+ MOZ_MUST_USE bool storeSlot(MDefinition* obj, Shape* shape, MDefinition* value,
+ bool needsBarrier, MIRType slotType = MIRType::None);
+ bool shouldAbortOnPreliminaryGroups(MDefinition *obj);
+
+ MDefinition* tryInnerizeWindow(MDefinition* obj);
+ MDefinition* maybeUnboxForPropertyAccess(MDefinition* def);
+
+ // jsop_getprop() helpers.
+ MOZ_MUST_USE bool checkIsDefinitelyOptimizedArguments(MDefinition* obj, bool* isOptimizedArgs);
+ MOZ_MUST_USE bool getPropTryInferredConstant(bool* emitted, MDefinition* obj,
+ PropertyName* name, TemporaryTypeSet* types);
+ MOZ_MUST_USE bool getPropTryArgumentsLength(bool* emitted, MDefinition* obj);
+ MOZ_MUST_USE bool getPropTryArgumentsCallee(bool* emitted, MDefinition* obj,
+ PropertyName* name);
+ MOZ_MUST_USE bool getPropTryConstant(bool* emitted, MDefinition* obj, jsid id,
+ TemporaryTypeSet* types);
+ MOZ_MUST_USE bool getPropTryNotDefined(bool* emitted, MDefinition* obj, jsid id,
+ TemporaryTypeSet* types);
+ MOZ_MUST_USE bool getPropTryDefiniteSlot(bool* emitted, MDefinition* obj, PropertyName* name,
+ BarrierKind barrier, TemporaryTypeSet* types);
+ MOZ_MUST_USE bool getPropTryModuleNamespace(bool* emitted, MDefinition* obj, PropertyName* name,
+ BarrierKind barrier, TemporaryTypeSet* types);
+ MOZ_MUST_USE bool getPropTryUnboxed(bool* emitted, MDefinition* obj, PropertyName* name,
+ BarrierKind barrier, TemporaryTypeSet* types);
+ MOZ_MUST_USE bool getPropTryCommonGetter(bool* emitted, MDefinition* obj, PropertyName* name,
+ TemporaryTypeSet* types);
+ MOZ_MUST_USE bool getPropTryInlineAccess(bool* emitted, MDefinition* obj, PropertyName* name,
+ BarrierKind barrier, TemporaryTypeSet* types);
+ MOZ_MUST_USE bool getPropTryTypedObject(bool* emitted, MDefinition* obj, PropertyName* name);
+ MOZ_MUST_USE bool getPropTryScalarPropOfTypedObject(bool* emitted, MDefinition* typedObj,
+ int32_t fieldOffset,
+ TypedObjectPrediction fieldTypeReprs);
+ MOZ_MUST_USE bool getPropTryReferencePropOfTypedObject(bool* emitted, MDefinition* typedObj,
+ int32_t fieldOffset,
+ TypedObjectPrediction fieldPrediction,
+ PropertyName* name);
+ MOZ_MUST_USE bool getPropTryComplexPropOfTypedObject(bool* emitted, MDefinition* typedObj,
+ int32_t fieldOffset,
+ TypedObjectPrediction fieldTypeReprs,
+ size_t fieldIndex);
+ MOZ_MUST_USE bool getPropTryInnerize(bool* emitted, MDefinition* obj, PropertyName* name,
+ TemporaryTypeSet* types);
+ MOZ_MUST_USE bool getPropTryCache(bool* emitted, MDefinition* obj, PropertyName* name,
+ BarrierKind barrier, TemporaryTypeSet* types);
+ MOZ_MUST_USE bool getPropTrySharedStub(bool* emitted, MDefinition* obj,
+ TemporaryTypeSet* types);
+
+ // jsop_setprop() helpers.
+ MOZ_MUST_USE bool setPropTryCommonSetter(bool* emitted, MDefinition* obj,
+ PropertyName* name, MDefinition* value);
+ MOZ_MUST_USE bool setPropTryCommonDOMSetter(bool* emitted, MDefinition* obj,
+ MDefinition* value, JSFunction* setter,
+ TemporaryTypeSet* objTypes);
+ MOZ_MUST_USE bool setPropTryDefiniteSlot(bool* emitted, MDefinition* obj,
+ PropertyName* name, MDefinition* value,
+ bool barrier, TemporaryTypeSet* objTypes);
+ MOZ_MUST_USE bool setPropTryUnboxed(bool* emitted, MDefinition* obj,
+ PropertyName* name, MDefinition* value,
+ bool barrier, TemporaryTypeSet* objTypes);
+ MOZ_MUST_USE bool setPropTryInlineAccess(bool* emitted, MDefinition* obj,
+ PropertyName* name, MDefinition* value,
+ bool barrier, TemporaryTypeSet* objTypes);
+ MOZ_MUST_USE bool setPropTryTypedObject(bool* emitted, MDefinition* obj,
+ PropertyName* name, MDefinition* value);
+ MOZ_MUST_USE bool setPropTryReferencePropOfTypedObject(bool* emitted, MDefinition* obj,
+ int32_t fieldOffset, MDefinition* value,
+ TypedObjectPrediction fieldPrediction,
+ PropertyName* name);
+ MOZ_MUST_USE bool setPropTryScalarPropOfTypedObject(bool* emitted,
+ MDefinition* obj,
+ int32_t fieldOffset,
+ MDefinition* value,
+ TypedObjectPrediction fieldTypeReprs);
+ MOZ_MUST_USE bool setPropTryCache(bool* emitted, MDefinition* obj,
+ PropertyName* name, MDefinition* value,
+ bool barrier, TemporaryTypeSet* objTypes);
+
+ // jsop_binary_arith helpers.
+ MBinaryArithInstruction* binaryArithInstruction(JSOp op, MDefinition* left, MDefinition* right);
+ MOZ_MUST_USE bool binaryArithTryConcat(bool* emitted, JSOp op, MDefinition* left,
+ MDefinition* right);
+ MOZ_MUST_USE bool binaryArithTrySpecialized(bool* emitted, JSOp op, MDefinition* left,
+ MDefinition* right);
+ MOZ_MUST_USE bool binaryArithTrySpecializedOnBaselineInspector(bool* emitted, JSOp op,
+ MDefinition* left,
+ MDefinition* right);
+ MOZ_MUST_USE bool arithTrySharedStub(bool* emitted, JSOp op, MDefinition* left,
+ MDefinition* right);
+
+ // jsop_bitnot helpers.
+ MOZ_MUST_USE bool bitnotTrySpecialized(bool* emitted, MDefinition* input);
+
+ // jsop_pow helpers.
+ MOZ_MUST_USE bool powTrySpecialized(bool* emitted, MDefinition* base, MDefinition* power,
+ MIRType outputType);
+
+ // jsop_compare helpers.
+ MOZ_MUST_USE bool compareTrySpecialized(bool* emitted, JSOp op, MDefinition* left,
+ MDefinition* right);
+ MOZ_MUST_USE bool compareTryBitwise(bool* emitted, JSOp op, MDefinition* left,
+ MDefinition* right);
+ MOZ_MUST_USE bool compareTrySpecializedOnBaselineInspector(bool* emitted, JSOp op,
+ MDefinition* left,
+ MDefinition* right);
+ MOZ_MUST_USE bool compareTrySharedStub(bool* emitted, JSOp op, MDefinition* left,
+ MDefinition* right);
+
+ // jsop_newarray helpers.
+ MOZ_MUST_USE bool newArrayTrySharedStub(bool* emitted);
+ MOZ_MUST_USE bool newArrayTryTemplateObject(bool* emitted, JSObject* templateObject,
+ uint32_t length);
+ MOZ_MUST_USE bool newArrayTryVM(bool* emitted, JSObject* templateObject, uint32_t length);
+
+ // jsop_newobject helpers.
+ MOZ_MUST_USE bool newObjectTrySharedStub(bool* emitted);
+ MOZ_MUST_USE bool newObjectTryTemplateObject(bool* emitted, JSObject* templateObject);
+ MOZ_MUST_USE bool newObjectTryVM(bool* emitted, JSObject* templateObject);
+
+ // jsop_in helpers.
+ MOZ_MUST_USE bool inTryDense(bool* emitted, MDefinition* obj, MDefinition* id);
+ MOZ_MUST_USE bool inTryFold(bool* emitted, MDefinition* obj, MDefinition* id);
+
+ // binary data lookup helpers.
+ TypedObjectPrediction typedObjectPrediction(MDefinition* typedObj);
+ TypedObjectPrediction typedObjectPrediction(TemporaryTypeSet* types);
+ MOZ_MUST_USE bool typedObjectHasField(MDefinition* typedObj,
+ PropertyName* name,
+ size_t* fieldOffset,
+ TypedObjectPrediction* fieldTypeReprs,
+ size_t* fieldIndex);
+ MDefinition* loadTypedObjectType(MDefinition* value);
+ void loadTypedObjectData(MDefinition* typedObj,
+ MDefinition** owner,
+ LinearSum* ownerOffset);
+ void loadTypedObjectElements(MDefinition* typedObj,
+ const LinearSum& byteOffset,
+ uint32_t scale,
+ MDefinition** ownerElements,
+ MDefinition** ownerScaledOffset,
+ int32_t* ownerByteAdjustment);
+ MDefinition* typeObjectForElementFromArrayStructType(MDefinition* typedObj);
+ MDefinition* typeObjectForFieldFromStructType(MDefinition* type,
+ size_t fieldIndex);
+ MOZ_MUST_USE bool storeReferenceTypedObjectValue(MDefinition* typedObj,
+ const LinearSum& byteOffset,
+ ReferenceTypeDescr::Type type,
+ MDefinition* value,
+ PropertyName* name);
+ MOZ_MUST_USE bool storeScalarTypedObjectValue(MDefinition* typedObj,
+ const LinearSum& byteOffset,
+ ScalarTypeDescr::Type type,
+ MDefinition* value);
+ MOZ_MUST_USE bool checkTypedObjectIndexInBounds(uint32_t elemSize,
+ MDefinition* obj,
+ MDefinition* index,
+ TypedObjectPrediction objTypeDescrs,
+ LinearSum* indexAsByteOffset);
+ MOZ_MUST_USE bool pushDerivedTypedObject(bool* emitted,
+ MDefinition* obj,
+ const LinearSum& byteOffset,
+ TypedObjectPrediction derivedTypeDescrs,
+ MDefinition* derivedTypeObj);
+ MOZ_MUST_USE bool pushScalarLoadFromTypedObject(MDefinition* obj,
+ const LinearSum& byteoffset,
+ ScalarTypeDescr::Type type);
+ MOZ_MUST_USE bool pushReferenceLoadFromTypedObject(MDefinition* typedObj,
+ const LinearSum& byteOffset,
+ ReferenceTypeDescr::Type type,
+ PropertyName* name);
+ JSObject* getStaticTypedArrayObject(MDefinition* obj, MDefinition* index);
+
+ // jsop_setelem() helpers.
+ MOZ_MUST_USE bool setElemTryTypedArray(bool* emitted, MDefinition* object,
+ MDefinition* index, MDefinition* value);
+ MOZ_MUST_USE bool setElemTryTypedObject(bool* emitted, MDefinition* obj,
+ MDefinition* index, MDefinition* value);
+ MOZ_MUST_USE bool setElemTryTypedStatic(bool* emitted, MDefinition* object,
+ MDefinition* index, MDefinition* value);
+ MOZ_MUST_USE bool setElemTryDense(bool* emitted, MDefinition* object,
+ MDefinition* index, MDefinition* value, bool writeHole);
+ MOZ_MUST_USE bool setElemTryArguments(bool* emitted, MDefinition* object,
+ MDefinition* index, MDefinition* value);
+ MOZ_MUST_USE bool setElemTryCache(bool* emitted, MDefinition* object,
+ MDefinition* index, MDefinition* value);
+ MOZ_MUST_USE bool setElemTryReferenceElemOfTypedObject(bool* emitted,
+ MDefinition* obj,
+ MDefinition* index,
+ TypedObjectPrediction objPrediction,
+ MDefinition* value,
+ TypedObjectPrediction elemPrediction);
+ MOZ_MUST_USE bool setElemTryScalarElemOfTypedObject(bool* emitted,
+ MDefinition* obj,
+ MDefinition* index,
+ TypedObjectPrediction objTypeReprs,
+ MDefinition* value,
+ TypedObjectPrediction elemTypeReprs,
+ uint32_t elemSize);
+ MOZ_MUST_USE bool initializeArrayElement(MDefinition* obj, size_t index, MDefinition* value,
+ JSValueType unboxedType,
+ bool addResumePointAndIncrementInitializedLength);
+
+ // jsop_getelem() helpers.
+ MOZ_MUST_USE bool getElemTryDense(bool* emitted, MDefinition* obj, MDefinition* index);
+ MOZ_MUST_USE bool getElemTryGetProp(bool* emitted, MDefinition* obj, MDefinition* index);
+ MOZ_MUST_USE bool getElemTryTypedStatic(bool* emitted, MDefinition* obj, MDefinition* index);
+ MOZ_MUST_USE bool getElemTryTypedArray(bool* emitted, MDefinition* obj, MDefinition* index);
+ MOZ_MUST_USE bool getElemTryTypedObject(bool* emitted, MDefinition* obj, MDefinition* index);
+ MOZ_MUST_USE bool getElemTryString(bool* emitted, MDefinition* obj, MDefinition* index);
+ MOZ_MUST_USE bool getElemTryArguments(bool* emitted, MDefinition* obj, MDefinition* index);
+ MOZ_MUST_USE bool getElemTryArgumentsInlined(bool* emitted, MDefinition* obj,
+ MDefinition* index);
+ MOZ_MUST_USE bool getElemTryCache(bool* emitted, MDefinition* obj, MDefinition* index);
+ MOZ_MUST_USE bool getElemTryScalarElemOfTypedObject(bool* emitted,
+ MDefinition* obj,
+ MDefinition* index,
+ TypedObjectPrediction objTypeReprs,
+ TypedObjectPrediction elemTypeReprs,
+ uint32_t elemSize);
+ MOZ_MUST_USE bool getElemTryReferenceElemOfTypedObject(bool* emitted,
+ MDefinition* obj,
+ MDefinition* index,
+ TypedObjectPrediction objPrediction,
+ TypedObjectPrediction elemPrediction);
+ MOZ_MUST_USE bool getElemTryComplexElemOfTypedObject(bool* emitted,
+ MDefinition* obj,
+ MDefinition* index,
+ TypedObjectPrediction objTypeReprs,
+ TypedObjectPrediction elemTypeReprs,
+ uint32_t elemSize);
+ TemporaryTypeSet* computeHeapType(const TemporaryTypeSet* objTypes, const jsid id);
+
+ enum BoundsChecking { DoBoundsCheck, SkipBoundsCheck };
+
+ MInstruction* addArrayBufferByteLength(MDefinition* obj);
+
+ // Add instructions to compute a typed array's length and data. Also
+ // optionally convert |*index| into a bounds-checked definition, if
+ // requested.
+ //
+ // If you only need the array's length, use addTypedArrayLength below.
+ void addTypedArrayLengthAndData(MDefinition* obj,
+ BoundsChecking checking,
+ MDefinition** index,
+ MInstruction** length, MInstruction** elements);
+
+ // Add an instruction to compute a typed array's length to the current
+ // block. If you also need the typed array's data, use the above method
+ // instead.
+ MInstruction* addTypedArrayLength(MDefinition* obj) {
+ MInstruction* length;
+ addTypedArrayLengthAndData(obj, SkipBoundsCheck, nullptr, &length, nullptr);
+ return length;
+ }
+
+ MOZ_MUST_USE bool improveThisTypesForCall();
+
+ MDefinition* getCallee();
+ MDefinition* getAliasedVar(EnvironmentCoordinate ec);
+ MDefinition* addLexicalCheck(MDefinition* input);
+
+ MDefinition* convertToBoolean(MDefinition* input);
+
+ MOZ_MUST_USE bool tryFoldInstanceOf(MDefinition* lhs, JSObject* protoObject);
+ MOZ_MUST_USE bool hasOnProtoChain(TypeSet::ObjectKey* key, JSObject* protoObject,
+ bool* hasOnProto);
+
+ MOZ_MUST_USE bool jsop_add(MDefinition* left, MDefinition* right);
+ MOZ_MUST_USE bool jsop_bitnot();
+ MOZ_MUST_USE bool jsop_bitop(JSOp op);
+ MOZ_MUST_USE bool jsop_binary_arith(JSOp op);
+ MOZ_MUST_USE bool jsop_binary_arith(JSOp op, MDefinition* left, MDefinition* right);
+ MOZ_MUST_USE bool jsop_pow();
+ MOZ_MUST_USE bool jsop_pos();
+ MOZ_MUST_USE bool jsop_neg();
+ MOZ_MUST_USE bool jsop_tostring();
+ MOZ_MUST_USE bool jsop_setarg(uint32_t arg);
+ MOZ_MUST_USE bool jsop_defvar(uint32_t index);
+ MOZ_MUST_USE bool jsop_deflexical(uint32_t index);
+ MOZ_MUST_USE bool jsop_deffun(uint32_t index);
+ MOZ_MUST_USE bool jsop_notearg();
+ MOZ_MUST_USE bool jsop_throwsetconst();
+ MOZ_MUST_USE bool jsop_checklexical();
+ MOZ_MUST_USE bool jsop_checkaliasedlexical(EnvironmentCoordinate ec);
+ MOZ_MUST_USE bool jsop_funcall(uint32_t argc);
+ MOZ_MUST_USE bool jsop_funapply(uint32_t argc);
+ MOZ_MUST_USE bool jsop_funapplyarguments(uint32_t argc);
+ MOZ_MUST_USE bool jsop_funapplyarray(uint32_t argc);
+ MOZ_MUST_USE bool jsop_call(uint32_t argc, bool constructing);
+ MOZ_MUST_USE bool jsop_eval(uint32_t argc);
+ MOZ_MUST_USE bool jsop_ifeq(JSOp op);
+ MOZ_MUST_USE bool jsop_try();
+ MOZ_MUST_USE bool jsop_label();
+ MOZ_MUST_USE bool jsop_condswitch();
+ MOZ_MUST_USE bool jsop_andor(JSOp op);
+ MOZ_MUST_USE bool jsop_dup2();
+ MOZ_MUST_USE bool jsop_loophead(jsbytecode* pc);
+ MOZ_MUST_USE bool jsop_compare(JSOp op);
+ MOZ_MUST_USE bool jsop_compare(JSOp op, MDefinition* left, MDefinition* right);
+ MOZ_MUST_USE bool getStaticName(JSObject* staticObject, PropertyName* name, bool* psucceeded,
+ MDefinition* lexicalCheck = nullptr);
+ MOZ_MUST_USE bool loadStaticSlot(JSObject* staticObject, BarrierKind barrier,
+ TemporaryTypeSet* types, uint32_t slot);
+ MOZ_MUST_USE bool setStaticName(JSObject* staticObject, PropertyName* name);
+ MOZ_MUST_USE bool jsop_getgname(PropertyName* name);
+ MOZ_MUST_USE bool jsop_getname(PropertyName* name);
+ MOZ_MUST_USE bool jsop_intrinsic(PropertyName* name);
+ MOZ_MUST_USE bool jsop_getimport(PropertyName* name);
+ MOZ_MUST_USE bool jsop_bindname(PropertyName* name);
+ MOZ_MUST_USE bool jsop_bindvar();
+ MOZ_MUST_USE bool jsop_getelem();
+ MOZ_MUST_USE bool jsop_getelem_dense(MDefinition* obj, MDefinition* index,
+ JSValueType unboxedType);
+ MOZ_MUST_USE bool jsop_getelem_typed(MDefinition* obj, MDefinition* index,
+ ScalarTypeDescr::Type arrayType);
+ MOZ_MUST_USE bool jsop_setelem();
+ MOZ_MUST_USE bool jsop_setelem_dense(TemporaryTypeSet::DoubleConversion conversion,
+ MDefinition* object, MDefinition* index,
+ MDefinition* value, JSValueType unboxedType,
+ bool writeHole, bool* emitted);
+ MOZ_MUST_USE bool jsop_setelem_typed(ScalarTypeDescr::Type arrayType,
+ MDefinition* object, MDefinition* index,
+ MDefinition* value);
+ MOZ_MUST_USE bool jsop_length();
+ MOZ_MUST_USE bool jsop_length_fastPath();
+ MOZ_MUST_USE bool jsop_arguments();
+ MOZ_MUST_USE bool jsop_arguments_getelem();
+ MOZ_MUST_USE bool jsop_runonce();
+ MOZ_MUST_USE bool jsop_rest();
+ MOZ_MUST_USE bool jsop_not();
+ MOZ_MUST_USE bool jsop_getprop(PropertyName* name);
+ MOZ_MUST_USE bool jsop_setprop(PropertyName* name);
+ MOZ_MUST_USE bool jsop_delprop(PropertyName* name);
+ MOZ_MUST_USE bool jsop_delelem();
+ MOZ_MUST_USE bool jsop_newarray(uint32_t length);
+ MOZ_MUST_USE bool jsop_newarray(JSObject* templateObject, uint32_t length);
+ MOZ_MUST_USE bool jsop_newarray_copyonwrite();
+ MOZ_MUST_USE bool jsop_newobject();
+ MOZ_MUST_USE bool jsop_initelem();
+ MOZ_MUST_USE bool jsop_initelem_array();
+ MOZ_MUST_USE bool jsop_initelem_getter_setter();
+ MOZ_MUST_USE bool jsop_mutateproto();
+ MOZ_MUST_USE bool jsop_initprop(PropertyName* name);
+ MOZ_MUST_USE bool jsop_initprop_getter_setter(PropertyName* name);
+ MOZ_MUST_USE bool jsop_regexp(RegExpObject* reobj);
+ MOZ_MUST_USE bool jsop_object(JSObject* obj);
+ MOZ_MUST_USE bool jsop_lambda(JSFunction* fun);
+ MOZ_MUST_USE bool jsop_lambda_arrow(JSFunction* fun);
+ MOZ_MUST_USE bool jsop_functionthis();
+ MOZ_MUST_USE bool jsop_globalthis();
+ MOZ_MUST_USE bool jsop_typeof();
+ MOZ_MUST_USE bool jsop_toasync();
+ MOZ_MUST_USE bool jsop_toid();
+ MOZ_MUST_USE bool jsop_iter(uint8_t flags);
+ MOZ_MUST_USE bool jsop_itermore();
+ MOZ_MUST_USE bool jsop_isnoiter();
+ MOZ_MUST_USE bool jsop_iterend();
+ MOZ_MUST_USE bool jsop_in();
+ MOZ_MUST_USE bool jsop_instanceof();
+ MOZ_MUST_USE bool jsop_getaliasedvar(EnvironmentCoordinate ec);
+ MOZ_MUST_USE bool jsop_setaliasedvar(EnvironmentCoordinate ec);
+ MOZ_MUST_USE bool jsop_debugger();
+ MOZ_MUST_USE bool jsop_newtarget();
+ MOZ_MUST_USE bool jsop_checkisobj(uint8_t kind);
+ MOZ_MUST_USE bool jsop_checkobjcoercible();
+ MOZ_MUST_USE bool jsop_pushcallobj();
+
+ /* Inlining. */
+
+ enum InliningStatus
+ {
+ InliningStatus_Error,
+ InliningStatus_NotInlined,
+ InliningStatus_WarmUpCountTooLow,
+ InliningStatus_Inlined
+ };
+
+ enum InliningDecision
+ {
+ InliningDecision_Error,
+ InliningDecision_Inline,
+ InliningDecision_DontInline,
+ InliningDecision_WarmUpCountTooLow
+ };
+
+ static InliningDecision DontInline(JSScript* targetScript, const char* reason);
+
+ // Helper function for canInlineTarget
+ bool hasCommonInliningPath(const JSScript* scriptToInline);
+
+ // Oracles.
+ InliningDecision canInlineTarget(JSFunction* target, CallInfo& callInfo);
+ InliningDecision makeInliningDecision(JSObject* target, CallInfo& callInfo);
+ MOZ_MUST_USE bool selectInliningTargets(const ObjectVector& targets, CallInfo& callInfo,
+ BoolVector& choiceSet, uint32_t* numInlineable);
+
+ // Native inlining helpers.
+ // The typeset for the return value of our function. These are
+ // the types it's been observed returning in the past.
+ TemporaryTypeSet* getInlineReturnTypeSet();
+ // The known MIR type of getInlineReturnTypeSet.
+ MIRType getInlineReturnType();
+
+ // Array natives.
+ InliningStatus inlineArray(CallInfo& callInfo);
+ InliningStatus inlineArrayIsArray(CallInfo& callInfo);
+ InliningStatus inlineArrayPopShift(CallInfo& callInfo, MArrayPopShift::Mode mode);
+ InliningStatus inlineArrayPush(CallInfo& callInfo);
+ InliningStatus inlineArraySlice(CallInfo& callInfo);
+ InliningStatus inlineArrayJoin(CallInfo& callInfo);
+ InliningStatus inlineArraySplice(CallInfo& callInfo);
+
+ // Math natives.
+ InliningStatus inlineMathAbs(CallInfo& callInfo);
+ InliningStatus inlineMathFloor(CallInfo& callInfo);
+ InliningStatus inlineMathCeil(CallInfo& callInfo);
+ InliningStatus inlineMathClz32(CallInfo& callInfo);
+ InliningStatus inlineMathRound(CallInfo& callInfo);
+ InliningStatus inlineMathSqrt(CallInfo& callInfo);
+ InliningStatus inlineMathAtan2(CallInfo& callInfo);
+ InliningStatus inlineMathHypot(CallInfo& callInfo);
+ InliningStatus inlineMathMinMax(CallInfo& callInfo, bool max);
+ InliningStatus inlineMathPow(CallInfo& callInfo);
+ InliningStatus inlineMathRandom(CallInfo& callInfo);
+ InliningStatus inlineMathImul(CallInfo& callInfo);
+ InliningStatus inlineMathFRound(CallInfo& callInfo);
+ InliningStatus inlineMathFunction(CallInfo& callInfo, MMathFunction::Function function);
+
+ // String natives.
+ InliningStatus inlineStringObject(CallInfo& callInfo);
+ InliningStatus inlineStrCharCodeAt(CallInfo& callInfo);
+ InliningStatus inlineConstantCharCodeAt(CallInfo& callInfo);
+ InliningStatus inlineStrFromCharCode(CallInfo& callInfo);
+ InliningStatus inlineStrFromCodePoint(CallInfo& callInfo);
+ InliningStatus inlineStrCharAt(CallInfo& callInfo);
+
+ // String intrinsics.
+ InliningStatus inlineStringReplaceString(CallInfo& callInfo);
+ InliningStatus inlineConstantStringSplitString(CallInfo& callInfo);
+ InliningStatus inlineStringSplitString(CallInfo& callInfo);
+
+ // RegExp intrinsics.
+ InliningStatus inlineRegExpMatcher(CallInfo& callInfo);
+ InliningStatus inlineRegExpSearcher(CallInfo& callInfo);
+ InliningStatus inlineRegExpTester(CallInfo& callInfo);
+ InliningStatus inlineIsRegExpObject(CallInfo& callInfo);
+ InliningStatus inlineRegExpPrototypeOptimizable(CallInfo& callInfo);
+ InliningStatus inlineRegExpInstanceOptimizable(CallInfo& callInfo);
+ InliningStatus inlineGetFirstDollarIndex(CallInfo& callInfo);
+
+ // Object natives and intrinsics.
+ InliningStatus inlineObjectCreate(CallInfo& callInfo);
+ InliningStatus inlineDefineDataProperty(CallInfo& callInfo);
+
+ // Atomics natives.
+ InliningStatus inlineAtomicsCompareExchange(CallInfo& callInfo);
+ InliningStatus inlineAtomicsExchange(CallInfo& callInfo);
+ InliningStatus inlineAtomicsLoad(CallInfo& callInfo);
+ InliningStatus inlineAtomicsStore(CallInfo& callInfo);
+ InliningStatus inlineAtomicsBinop(CallInfo& callInfo, InlinableNative target);
+ InliningStatus inlineAtomicsIsLockFree(CallInfo& callInfo);
+
+ // Slot intrinsics.
+ InliningStatus inlineUnsafeSetReservedSlot(CallInfo& callInfo);
+ InliningStatus inlineUnsafeGetReservedSlot(CallInfo& callInfo,
+ MIRType knownValueType);
+
+ // Map and Set intrinsics.
+ InliningStatus inlineGetNextEntryForIterator(CallInfo& callInfo,
+ MGetNextEntryForIterator::Mode mode);
+
+ // ArrayBuffer intrinsics.
+ InliningStatus inlineArrayBufferByteLength(CallInfo& callInfo);
+ InliningStatus inlinePossiblyWrappedArrayBufferByteLength(CallInfo& callInfo);
+
+ // TypedArray intrinsics.
+ enum WrappingBehavior { AllowWrappedTypedArrays, RejectWrappedTypedArrays };
+ InliningStatus inlineTypedArray(CallInfo& callInfo, Native native);
+ InliningStatus inlineIsTypedArrayHelper(CallInfo& callInfo, WrappingBehavior wrappingBehavior);
+ InliningStatus inlineIsTypedArray(CallInfo& callInfo);
+ InliningStatus inlineIsPossiblyWrappedTypedArray(CallInfo& callInfo);
+ InliningStatus inlineTypedArrayLength(CallInfo& callInfo);
+ InliningStatus inlinePossiblyWrappedTypedArrayLength(CallInfo& callInfo);
+ InliningStatus inlineSetDisjointTypedElements(CallInfo& callInfo);
+
+ // TypedObject intrinsics and natives.
+ InliningStatus inlineObjectIsTypeDescr(CallInfo& callInfo);
+ InliningStatus inlineSetTypedObjectOffset(CallInfo& callInfo);
+ InliningStatus inlineConstructTypedObject(CallInfo& callInfo, TypeDescr* target);
+
+ // SIMD intrinsics and natives.
+ InliningStatus inlineConstructSimdObject(CallInfo& callInfo, SimdTypeDescr* target);
+
+ // SIMD helpers.
+ bool canInlineSimd(CallInfo& callInfo, JSNative native, unsigned numArgs,
+ InlineTypedObject** templateObj);
+ MDefinition* unboxSimd(MDefinition* ins, SimdType type);
+ IonBuilder::InliningStatus boxSimd(CallInfo& callInfo, MDefinition* ins,
+ InlineTypedObject* templateObj);
+ MDefinition* convertToBooleanSimdLane(MDefinition* scalar);
+
+ InliningStatus inlineSimd(CallInfo& callInfo, JSFunction* target, SimdType type);
+
+ InliningStatus inlineSimdBinaryArith(CallInfo& callInfo, JSNative native,
+ MSimdBinaryArith::Operation op, SimdType type);
+ InliningStatus inlineSimdBinaryBitwise(CallInfo& callInfo, JSNative native,
+ MSimdBinaryBitwise::Operation op, SimdType type);
+ InliningStatus inlineSimdBinarySaturating(CallInfo& callInfo, JSNative native,
+ MSimdBinarySaturating::Operation op, SimdType type);
+ InliningStatus inlineSimdShift(CallInfo& callInfo, JSNative native, MSimdShift::Operation op,
+ SimdType type);
+ InliningStatus inlineSimdComp(CallInfo& callInfo, JSNative native,
+ MSimdBinaryComp::Operation op, SimdType type);
+ InliningStatus inlineSimdUnary(CallInfo& callInfo, JSNative native,
+ MSimdUnaryArith::Operation op, SimdType type);
+ InliningStatus inlineSimdExtractLane(CallInfo& callInfo, JSNative native, SimdType type);
+ InliningStatus inlineSimdReplaceLane(CallInfo& callInfo, JSNative native, SimdType type);
+ InliningStatus inlineSimdSplat(CallInfo& callInfo, JSNative native, SimdType type);
+ InliningStatus inlineSimdShuffle(CallInfo& callInfo, JSNative native, SimdType type,
+ unsigned numVectors);
+ InliningStatus inlineSimdCheck(CallInfo& callInfo, JSNative native, SimdType type);
+ InliningStatus inlineSimdConvert(CallInfo& callInfo, JSNative native, bool isCast,
+ SimdType from, SimdType to);
+ InliningStatus inlineSimdSelect(CallInfo& callInfo, JSNative native, SimdType type);
+
+ MOZ_MUST_USE bool prepareForSimdLoadStore(CallInfo& callInfo, Scalar::Type simdType,
+ MInstruction** elements, MDefinition** index,
+ Scalar::Type* arrayType);
+ InliningStatus inlineSimdLoad(CallInfo& callInfo, JSNative native, SimdType type,
+ unsigned numElems);
+ InliningStatus inlineSimdStore(CallInfo& callInfo, JSNative native, SimdType type,
+ unsigned numElems);
+
+ InliningStatus inlineSimdAnyAllTrue(CallInfo& callInfo, bool IsAllTrue, JSNative native,
+ SimdType type);
+
+ // Utility intrinsics.
+ InliningStatus inlineIsCallable(CallInfo& callInfo);
+ InliningStatus inlineIsConstructor(CallInfo& callInfo);
+ InliningStatus inlineIsObject(CallInfo& callInfo);
+ InliningStatus inlineToObject(CallInfo& callInfo);
+ InliningStatus inlineIsWrappedArrayConstructor(CallInfo& callInfo);
+ InliningStatus inlineToInteger(CallInfo& callInfo);
+ InliningStatus inlineToString(CallInfo& callInfo);
+ InliningStatus inlineDump(CallInfo& callInfo);
+ InliningStatus inlineHasClass(CallInfo& callInfo, const Class* clasp,
+ const Class* clasp2 = nullptr,
+ const Class* clasp3 = nullptr,
+ const Class* clasp4 = nullptr);
+ InliningStatus inlineIsConstructing(CallInfo& callInfo);
+ InliningStatus inlineSubstringKernel(CallInfo& callInfo);
+ InliningStatus inlineObjectHasPrototype(CallInfo& callInfo);
+
+ // Testing functions.
+ InliningStatus inlineBailout(CallInfo& callInfo);
+ InliningStatus inlineAssertFloat32(CallInfo& callInfo);
+ InliningStatus inlineAssertRecoveredOnBailout(CallInfo& callInfo);
+
+ // Bind function.
+ InliningStatus inlineBoundFunction(CallInfo& callInfo, JSFunction* target);
+
+ // Main inlining functions
+ InliningStatus inlineNativeCall(CallInfo& callInfo, JSFunction* target);
+ InliningStatus inlineNativeGetter(CallInfo& callInfo, JSFunction* target);
+ InliningStatus inlineNonFunctionCall(CallInfo& callInfo, JSObject* target);
+ InliningStatus inlineScriptedCall(CallInfo& callInfo, JSFunction* target);
+ InliningStatus inlineSingleCall(CallInfo& callInfo, JSObject* target);
+
+ // Call functions
+ InliningStatus inlineCallsite(const ObjectVector& targets, CallInfo& callInfo);
+ MOZ_MUST_USE bool inlineCalls(CallInfo& callInfo, const ObjectVector& targets,
+ BoolVector& choiceSet, MGetPropertyCache* maybeCache);
+
+ // Inlining helpers.
+ MOZ_MUST_USE bool inlineGenericFallback(JSFunction* target, CallInfo& callInfo,
+ MBasicBlock* dispatchBlock);
+ MOZ_MUST_USE bool inlineObjectGroupFallback(CallInfo& callInfo, MBasicBlock* dispatchBlock,
+ MObjectGroupDispatch* dispatch,
+ MGetPropertyCache* cache,
+ MBasicBlock** fallbackTarget);
+
+ enum AtomicCheckResult {
+ DontCheckAtomicResult,
+ DoCheckAtomicResult
+ };
+
+ MOZ_MUST_USE bool atomicsMeetsPreconditions(CallInfo& callInfo, Scalar::Type* arrayElementType,
+ bool* requiresDynamicCheck,
+ AtomicCheckResult checkResult=DoCheckAtomicResult);
+ void atomicsCheckBounds(CallInfo& callInfo, MInstruction** elements, MDefinition** index);
+
+ MOZ_MUST_USE bool testNeedsArgumentCheck(JSFunction* target, CallInfo& callInfo);
+
+ MCall* makeCallHelper(JSFunction* target, CallInfo& callInfo);
+ MOZ_MUST_USE bool makeCall(JSFunction* target, CallInfo& callInfo);
+
+ MDefinition* patchInlinedReturn(CallInfo& callInfo, MBasicBlock* exit, MBasicBlock* bottom);
+ MDefinition* patchInlinedReturns(CallInfo& callInfo, MIRGraphReturns& returns,
+ MBasicBlock* bottom);
+ MDefinition* specializeInlinedReturn(MDefinition* rdef, MBasicBlock* exit);
+
+ MOZ_MUST_USE bool objectsHaveCommonPrototype(TemporaryTypeSet* types, PropertyName* name,
+ bool isGetter, JSObject* foundProto,
+ bool* guardGlobal);
+ void freezePropertiesForCommonPrototype(TemporaryTypeSet* types, PropertyName* name,
+ JSObject* foundProto, bool allowEmptyTypesForGlobal = false);
+ /*
+ * Callers must pass a non-null globalGuard if they pass a non-null globalShape.
+ */
+ MOZ_MUST_USE bool testCommonGetterSetter(TemporaryTypeSet* types, PropertyName* name,
+ bool isGetter, JSObject* foundProto,
+ Shape* lastProperty, JSFunction* getterOrSetter,
+ MDefinition** guard, Shape* globalShape = nullptr,
+ MDefinition** globalGuard = nullptr);
+ MOZ_MUST_USE bool testShouldDOMCall(TypeSet* inTypes,
+ JSFunction* func, JSJitInfo::OpType opType);
+
+ MDefinition*
+ addShapeGuardsForGetterSetter(MDefinition* obj, JSObject* holder, Shape* holderShape,
+ const BaselineInspector::ReceiverVector& receivers,
+ const BaselineInspector::ObjectGroupVector& convertUnboxedGroups,
+ bool isOwnProperty);
+
+ MOZ_MUST_USE bool annotateGetPropertyCache(MDefinition* obj, PropertyName* name,
+ MGetPropertyCache* getPropCache,
+ TemporaryTypeSet* objTypes,
+ TemporaryTypeSet* pushedTypes);
+
+ MGetPropertyCache* getInlineableGetPropertyCache(CallInfo& callInfo);
+
+ JSObject* testGlobalLexicalBinding(PropertyName* name);
+
+ JSObject* testSingletonProperty(JSObject* obj, jsid id);
+ JSObject* testSingletonPropertyTypes(MDefinition* obj, jsid id);
+
+ ResultWithOOM<bool> testNotDefinedProperty(MDefinition* obj, jsid id);
+
+ uint32_t getDefiniteSlot(TemporaryTypeSet* types, PropertyName* name, uint32_t* pnfixed);
+ MDefinition* convertUnboxedObjects(MDefinition* obj);
+ MDefinition* convertUnboxedObjects(MDefinition* obj,
+ const BaselineInspector::ObjectGroupVector& list);
+ uint32_t getUnboxedOffset(TemporaryTypeSet* types, PropertyName* name,
+ JSValueType* punboxedType);
+ MInstruction* loadUnboxedProperty(MDefinition* obj, size_t offset, JSValueType unboxedType,
+ BarrierKind barrier, TemporaryTypeSet* types);
+ MInstruction* loadUnboxedValue(MDefinition* elements, size_t elementsOffset,
+ MDefinition* scaledOffset, JSValueType unboxedType,
+ BarrierKind barrier, TemporaryTypeSet* types);
+ MInstruction* storeUnboxedProperty(MDefinition* obj, size_t offset, JSValueType unboxedType,
+ MDefinition* value);
+ MInstruction* storeUnboxedValue(MDefinition* obj,
+ MDefinition* elements, int32_t elementsOffset,
+ MDefinition* scaledOffset, JSValueType unboxedType,
+ MDefinition* value, bool preBarrier = true);
+ MOZ_MUST_USE bool checkPreliminaryGroups(MDefinition *obj);
+ MOZ_MUST_USE bool freezePropTypeSets(TemporaryTypeSet* types,
+ JSObject* foundProto, PropertyName* name);
+ bool canInlinePropertyOpShapes(const BaselineInspector::ReceiverVector& receivers);
+
+ TemporaryTypeSet* bytecodeTypes(jsbytecode* pc);
+
+ // Use one of the below methods for updating the current block, rather than
+ // updating |current| directly. setCurrent() should only be used in cases
+ // where the block cannot have phis whose type needs to be computed.
+
+ MOZ_MUST_USE bool setCurrentAndSpecializePhis(MBasicBlock* block) {
+ if (block) {
+ if (!block->specializePhis(alloc()))
+ return false;
+ }
+ setCurrent(block);
+ return true;
+ }
+
+ void setCurrent(MBasicBlock* block) {
+ current = block;
+ }
+
+ // A builder is inextricably tied to a particular script.
+ JSScript* script_;
+
+ // script->hasIonScript() at the start of the compilation. Used to avoid
+ // calling hasIonScript() from background compilation threads.
+ bool scriptHasIonScript_;
+
+ // If off thread compilation is successful, the final code generator is
+ // attached here. Code has been generated, but not linked (there is not yet
+ // an IonScript). This is heap allocated, and must be explicitly destroyed,
+ // performed by FinishOffThreadBuilder().
+ CodeGenerator* backgroundCodegen_;
+
+ // Some aborts are actionable (e.g., using an unsupported bytecode). When
+ // optimization tracking is enabled, the location and message of the abort
+ // are recorded here so they may be propagated to the script's
+ // corresponding JitcodeGlobalEntry::BaselineEntry.
+ JSScript* actionableAbortScript_;
+ jsbytecode* actionableAbortPc_;
+ const char* actionableAbortMessage_;
+
+ MRootList* rootList_;
+
+ public:
+ void setRootList(MRootList& rootList) {
+ MOZ_ASSERT(!rootList_);
+ rootList_ = &rootList;
+ }
+ void clearForBackEnd();
+ JSObject* checkNurseryObject(JSObject* obj);
+
+ JSScript* script() const { return script_; }
+ bool scriptHasIonScript() const { return scriptHasIonScript_; }
+
+ CodeGenerator* backgroundCodegen() const { return backgroundCodegen_; }
+ void setBackgroundCodegen(CodeGenerator* codegen) { backgroundCodegen_ = codegen; }
+
+ CompilerConstraintList* constraints() {
+ return constraints_;
+ }
+
+ bool isInlineBuilder() const {
+ return callerBuilder_ != nullptr;
+ }
+
+ const JSAtomState& names() { return compartment->runtime()->names(); }
+
+ bool hadActionableAbort() const {
+ MOZ_ASSERT(!actionableAbortScript_ ||
+ (actionableAbortPc_ && actionableAbortMessage_));
+ return actionableAbortScript_ != nullptr;
+ }
+
+ TraceLoggerThread *traceLogger() {
+ // Currently ionbuilder only runs on the main thread.
+ return TraceLoggerForMainThread(compartment->runtime()->mainThread()->runtimeFromMainThread());
+ }
+
+ void actionableAbortLocationAndMessage(JSScript** abortScript, jsbytecode** abortPc,
+ const char** abortMessage)
+ {
+ MOZ_ASSERT(hadActionableAbort());
+ *abortScript = actionableAbortScript_;
+ *abortPc = actionableAbortPc_;
+ *abortMessage = actionableAbortMessage_;
+ }
+
+ void trace(JSTracer* trc);
+
+ private:
+ MOZ_MUST_USE bool init();
+
+ JSContext* analysisContext;
+ BaselineFrameInspector* baselineFrame_;
+
+ // Constraints for recording dependencies on type information.
+ CompilerConstraintList* constraints_;
+
+ // Basic analysis information about the script.
+ BytecodeAnalysis analysis_;
+ BytecodeAnalysis& analysis() {
+ return analysis_;
+ }
+
+ TemporaryTypeSet* thisTypes;
+ TemporaryTypeSet* argTypes;
+ TemporaryTypeSet* typeArray;
+ uint32_t typeArrayHint;
+ uint32_t* bytecodeTypeMap;
+
+ GSNCache gsn;
+ EnvironmentCoordinateNameCache envCoordinateNameCache;
+
+ jsbytecode* pc;
+ MBasicBlock* current;
+ uint32_t loopDepth_;
+
+ Vector<BytecodeSite*, 0, JitAllocPolicy> trackedOptimizationSites_;
+
+ BytecodeSite* bytecodeSite(jsbytecode* pc) {
+ MOZ_ASSERT(info().inlineScriptTree()->script()->containsPC(pc));
+ // See comment in maybeTrackedOptimizationSite.
+ if (isOptimizationTrackingEnabled()) {
+ if (BytecodeSite* site = maybeTrackedOptimizationSite(pc))
+ return site;
+ }
+ return new(alloc()) BytecodeSite(info().inlineScriptTree(), pc);
+ }
+
+ BytecodeSite* maybeTrackedOptimizationSite(jsbytecode* pc);
+
+ MDefinition* lexicalCheck_;
+
+ void setLexicalCheck(MDefinition* lexical) {
+ MOZ_ASSERT(!lexicalCheck_);
+ lexicalCheck_ = lexical;
+ }
+ MDefinition* takeLexicalCheck() {
+ MDefinition* lexical = lexicalCheck_;
+ lexicalCheck_ = nullptr;
+ return lexical;
+ }
+
+ /* Information used for inline-call builders. */
+ MResumePoint* callerResumePoint_;
+ jsbytecode* callerPC() {
+ return callerResumePoint_ ? callerResumePoint_->pc() : nullptr;
+ }
+ IonBuilder* callerBuilder_;
+
+ IonBuilder* outermostBuilder();
+
+ struct LoopHeader {
+ jsbytecode* pc;
+ MBasicBlock* header;
+
+ LoopHeader(jsbytecode* pc, MBasicBlock* header)
+ : pc(pc), header(header)
+ {}
+ };
+
+ Vector<CFGState, 8, JitAllocPolicy> cfgStack_;
+ Vector<ControlFlowInfo, 4, JitAllocPolicy> loops_;
+ Vector<ControlFlowInfo, 0, JitAllocPolicy> switches_;
+ Vector<ControlFlowInfo, 2, JitAllocPolicy> labels_;
+ Vector<MInstruction*, 2, JitAllocPolicy> iterators_;
+ Vector<LoopHeader, 0, JitAllocPolicy> loopHeaders_;
+ BaselineInspector* inspector;
+
+ size_t inliningDepth_;
+
+ // Total bytecode length of all inlined scripts. Only tracked for the
+ // outermost builder.
+ size_t inlinedBytecodeLength_;
+
+ // Cutoff to disable compilation if excessive time is spent reanalyzing
+ // loop bodies to compute a fixpoint of the types for loop variables.
+ static const size_t MAX_LOOP_RESTARTS = 40;
+ size_t numLoopRestarts_;
+
+ // True if script->failedBoundsCheck is set for the current script or
+ // an outer script.
+ bool failedBoundsCheck_;
+
+ // True if script->failedShapeGuard is set for the current script or
+ // an outer script.
+ bool failedShapeGuard_;
+
+ // True if script->failedLexicalCheck_ is set for the current script or
+ // an outer script.
+ bool failedLexicalCheck_;
+
+ // Has an iterator other than 'for in'.
+ bool nonStringIteration_;
+
+ // If this script can use a lazy arguments object, it will be pre-created
+ // here.
+ MInstruction* lazyArguments_;
+
+ // If this is an inline builder, the call info for the builder.
+ const CallInfo* inlineCallInfo_;
+
+ // When compiling a call with multiple targets, we are first creating a
+ // MGetPropertyCache. This MGetPropertyCache is following the bytecode, and
+ // is used to recover the JSFunction. In some cases, the Type of the object
+ // which own the property is enough for dispatching to the right function.
+ // In such cases we do not have read the property, except when the type
+ // object is unknown.
+ //
+ // As an optimization, we can dispatch a call based on the object group,
+ // without doing the MGetPropertyCache. This is what is achieved by
+ // |IonBuilder::inlineCalls|. As we might not know all the functions, we
+ // are adding a fallback path, where this MGetPropertyCache would be moved
+ // into.
+ //
+ // In order to build the fallback path, we have to capture a resume point
+ // ahead, for the potential fallback path. This resume point is captured
+ // while building MGetPropertyCache. It is capturing the state of Baseline
+ // before the execution of the MGetPropertyCache, such as we can safely do
+ // it in the fallback path.
+ //
+ // This field is used to discard the resume point if it is not used for
+ // building a fallback path.
+
+ // Discard the prior resume point while setting a new MGetPropertyCache.
+ void replaceMaybeFallbackFunctionGetter(MGetPropertyCache* cache);
+
+ // Discard the MGetPropertyCache if it is handled by WrapMGetPropertyCache.
+ void keepFallbackFunctionGetter(MGetPropertyCache* cache) {
+ if (cache == maybeFallbackFunctionGetter_)
+ maybeFallbackFunctionGetter_ = nullptr;
+ }
+
+ MGetPropertyCache* maybeFallbackFunctionGetter_;
+
+ // Used in tracking outcomes of optimization strategies for devtools.
+ void startTrackingOptimizations();
+
+ // The track* methods below are called often. Do not combine them with the
+ // unchecked variants, despite the unchecked variants having no other
+ // callers.
+ void trackTypeInfo(JS::TrackedTypeSite site, MIRType mirType,
+ TemporaryTypeSet* typeSet)
+ {
+ if (MOZ_UNLIKELY(current->trackedSite()->hasOptimizations()))
+ trackTypeInfoUnchecked(site, mirType, typeSet);
+ }
+ void trackTypeInfo(JS::TrackedTypeSite site, JSObject* obj) {
+ if (MOZ_UNLIKELY(current->trackedSite()->hasOptimizations()))
+ trackTypeInfoUnchecked(site, obj);
+ }
+ void trackTypeInfo(CallInfo& callInfo) {
+ if (MOZ_UNLIKELY(current->trackedSite()->hasOptimizations()))
+ trackTypeInfoUnchecked(callInfo);
+ }
+ void trackOptimizationAttempt(JS::TrackedStrategy strategy) {
+ if (MOZ_UNLIKELY(current->trackedSite()->hasOptimizations()))
+ trackOptimizationAttemptUnchecked(strategy);
+ }
+ void amendOptimizationAttempt(uint32_t index) {
+ if (MOZ_UNLIKELY(current->trackedSite()->hasOptimizations()))
+ amendOptimizationAttemptUnchecked(index);
+ }
+ void trackOptimizationOutcome(JS::TrackedOutcome outcome) {
+ if (MOZ_UNLIKELY(current->trackedSite()->hasOptimizations()))
+ trackOptimizationOutcomeUnchecked(outcome);
+ }
+ void trackOptimizationSuccess() {
+ if (MOZ_UNLIKELY(current->trackedSite()->hasOptimizations()))
+ trackOptimizationSuccessUnchecked();
+ }
+ void trackInlineSuccess(InliningStatus status = InliningStatus_Inlined) {
+ if (MOZ_UNLIKELY(current->trackedSite()->hasOptimizations()))
+ trackInlineSuccessUnchecked(status);
+ }
+
+ bool forceInlineCaches() {
+ return MOZ_UNLIKELY(JitOptions.forceInlineCaches);
+ }
+
+ // Out-of-line variants that don't check if optimization tracking is
+ // enabled.
+ void trackTypeInfoUnchecked(JS::TrackedTypeSite site, MIRType mirType,
+ TemporaryTypeSet* typeSet);
+ void trackTypeInfoUnchecked(JS::TrackedTypeSite site, JSObject* obj);
+ void trackTypeInfoUnchecked(CallInfo& callInfo);
+ void trackOptimizationAttemptUnchecked(JS::TrackedStrategy strategy);
+ void amendOptimizationAttemptUnchecked(uint32_t index);
+ void trackOptimizationOutcomeUnchecked(JS::TrackedOutcome outcome);
+ void trackOptimizationSuccessUnchecked();
+ void trackInlineSuccessUnchecked(InliningStatus status);
+};
+
+class CallInfo
+{
+ MDefinition* fun_;
+ MDefinition* thisArg_;
+ MDefinition* newTargetArg_;
+ MDefinitionVector args_;
+
+ bool constructing_;
+ bool setter_;
+
+ public:
+ CallInfo(TempAllocator& alloc, bool constructing)
+ : fun_(nullptr),
+ thisArg_(nullptr),
+ newTargetArg_(nullptr),
+ args_(alloc),
+ constructing_(constructing),
+ setter_(false)
+ { }
+
+ MOZ_MUST_USE bool init(CallInfo& callInfo) {
+ MOZ_ASSERT(constructing_ == callInfo.constructing());
+
+ fun_ = callInfo.fun();
+ thisArg_ = callInfo.thisArg();
+
+ if (constructing())
+ newTargetArg_ = callInfo.getNewTarget();
+
+ if (!args_.appendAll(callInfo.argv()))
+ return false;
+
+ return true;
+ }
+
+ MOZ_MUST_USE bool init(MBasicBlock* current, uint32_t argc) {
+ MOZ_ASSERT(args_.empty());
+
+ // Get the arguments in the right order
+ if (!args_.reserve(argc))
+ return false;
+
+ if (constructing())
+ setNewTarget(current->pop());
+
+ for (int32_t i = argc; i > 0; i--)
+ args_.infallibleAppend(current->peek(-i));
+ current->popn(argc);
+
+ // Get |this| and |fun|
+ setThis(current->pop());
+ setFun(current->pop());
+
+ return true;
+ }
+
+ void popFormals(MBasicBlock* current) {
+ current->popn(numFormals());
+ }
+
+ void pushFormals(MBasicBlock* current) {
+ current->push(fun());
+ current->push(thisArg());
+
+ for (uint32_t i = 0; i < argc(); i++)
+ current->push(getArg(i));
+
+ if (constructing())
+ current->push(getNewTarget());
+ }
+
+ uint32_t argc() const {
+ return args_.length();
+ }
+ uint32_t numFormals() const {
+ return argc() + 2 + constructing();
+ }
+
+ MOZ_MUST_USE bool setArgs(const MDefinitionVector& args) {
+ MOZ_ASSERT(args_.empty());
+ return args_.appendAll(args);
+ }
+
+ MDefinitionVector& argv() {
+ return args_;
+ }
+
+ const MDefinitionVector& argv() const {
+ return args_;
+ }
+
+ MDefinition* getArg(uint32_t i) const {
+ MOZ_ASSERT(i < argc());
+ return args_[i];
+ }
+
+ MDefinition* getArgWithDefault(uint32_t i, MDefinition* defaultValue) const {
+ if (i < argc())
+ return args_[i];
+
+ return defaultValue;
+ }
+
+ void setArg(uint32_t i, MDefinition* def) {
+ MOZ_ASSERT(i < argc());
+ args_[i] = def;
+ }
+
+ MDefinition* thisArg() const {
+ MOZ_ASSERT(thisArg_);
+ return thisArg_;
+ }
+
+ void setThis(MDefinition* thisArg) {
+ thisArg_ = thisArg;
+ }
+
+ bool constructing() const {
+ return constructing_;
+ }
+
+ void setNewTarget(MDefinition* newTarget) {
+ MOZ_ASSERT(constructing());
+ newTargetArg_ = newTarget;
+ }
+ MDefinition* getNewTarget() const {
+ MOZ_ASSERT(newTargetArg_);
+ return newTargetArg_;
+ }
+
+ bool isSetter() const {
+ return setter_;
+ }
+ void markAsSetter() {
+ setter_ = true;
+ }
+
+ MDefinition* fun() const {
+ MOZ_ASSERT(fun_);
+ return fun_;
+ }
+
+ void setFun(MDefinition* fun) {
+ fun_ = fun;
+ }
+
+ void setImplicitlyUsedUnchecked() {
+ fun_->setImplicitlyUsedUnchecked();
+ thisArg_->setImplicitlyUsedUnchecked();
+ if (newTargetArg_)
+ newTargetArg_->setImplicitlyUsedUnchecked();
+ for (uint32_t i = 0; i < argc(); i++)
+ getArg(i)->setImplicitlyUsedUnchecked();
+ }
+};
+
+bool NeedsPostBarrier(MDefinition* value);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_IonBuilder_h */
diff --git a/js/src/jit/IonCaches.cpp b/js/src/jit/IonCaches.cpp
new file mode 100644
index 000000000..96e488ea8
--- /dev/null
+++ b/js/src/jit/IonCaches.cpp
@@ -0,0 +1,5072 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/IonCaches.h"
+
+#include "mozilla/SizePrintfMacros.h"
+#include "mozilla/TemplateLib.h"
+
+#include "jstypes.h"
+
+#include "builtin/TypedObject.h"
+#include "jit/BaselineIC.h"
+#include "jit/Ion.h"
+#include "jit/JitcodeMap.h"
+#include "jit/JitSpewer.h"
+#include "jit/Linker.h"
+#include "jit/Lowering.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/VMFunctions.h"
+#include "js/Proxy.h"
+#include "vm/Shape.h"
+#include "vm/Stack.h"
+
+#include "jit/JitFrames-inl.h"
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/Lowering-shared-inl.h"
+#include "vm/Interpreter-inl.h"
+#include "vm/Shape-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::tl::FloorLog2;
+
+typedef Rooted<TypedArrayObject*> RootedTypedArrayObject;
+
+void
+CodeLocationJump::repoint(JitCode* code, MacroAssembler* masm)
+{
+ MOZ_ASSERT(state_ == Relative);
+ size_t new_off = (size_t)raw_;
+#ifdef JS_SMALL_BRANCH
+ size_t jumpTableEntryOffset = reinterpret_cast<size_t>(jumpTableEntry_);
+#endif
+ if (masm != nullptr) {
+#ifdef JS_CODEGEN_X64
+ MOZ_ASSERT((uint64_t)raw_ <= UINT32_MAX);
+#endif
+ new_off = (uintptr_t)raw_;
+#ifdef JS_SMALL_BRANCH
+ jumpTableEntryOffset = masm->actualIndex(jumpTableEntryOffset);
+#endif
+ }
+ raw_ = code->raw() + new_off;
+#ifdef JS_SMALL_BRANCH
+ jumpTableEntry_ = Assembler::PatchableJumpAddress(code, (size_t) jumpTableEntryOffset);
+#endif
+ setAbsolute();
+}
+
+void
+CodeLocationLabel::repoint(JitCode* code, MacroAssembler* masm)
+{
+ MOZ_ASSERT(state_ == Relative);
+ size_t new_off = (size_t)raw_;
+ if (masm != nullptr) {
+#ifdef JS_CODEGEN_X64
+ MOZ_ASSERT((uint64_t)raw_ <= UINT32_MAX);
+#endif
+ new_off = (uintptr_t)raw_;
+ }
+ MOZ_ASSERT(new_off < code->instructionsSize());
+
+ raw_ = code->raw() + new_off;
+ setAbsolute();
+}
+
+void
+CodeOffsetJump::fixup(MacroAssembler* masm)
+{
+#ifdef JS_SMALL_BRANCH
+ jumpTableIndex_ = masm->actualIndex(jumpTableIndex_);
+#endif
+}
+
+const char*
+IonCache::CacheName(IonCache::Kind kind)
+{
+ static const char * const names[] =
+ {
+#define NAME(x) #x,
+ IONCACHE_KIND_LIST(NAME)
+#undef NAME
+ };
+ return names[kind];
+}
+
+const size_t IonCache::MAX_STUBS = 16;
+
+// Helper class which encapsulates logic to attach a stub to an IC by hooking
+// up rejoins and next stub jumps.
+//
+// The simplest stubs have a single jump to the next stub and look like the
+// following:
+//
+// branch guard NEXTSTUB
+// ... IC-specific code ...
+// jump REJOIN
+//
+// This corresponds to:
+//
+// attacher.branchNextStub(masm, ...);
+// ... emit IC-specific code ...
+// attacher.jumpRejoin(masm);
+//
+// Whether the stub needs multiple next stub jumps look like:
+//
+// branch guard FAILURES
+// ... IC-specific code ...
+// branch another-guard FAILURES
+// ... IC-specific code ...
+// jump REJOIN
+// FAILURES:
+// jump NEXTSTUB
+//
+// This corresponds to:
+//
+// Label failures;
+// masm.branchX(..., &failures);
+// ... emit IC-specific code ...
+// masm.branchY(..., failures);
+// ... emit more IC-specific code ...
+// attacher.jumpRejoin(masm);
+// masm.bind(&failures);
+// attacher.jumpNextStub(masm);
+//
+// A convenience function |branchNextStubOrLabel| is provided in the case that
+// the stub sometimes has multiple next stub jumps and sometimes a single
+// one. If a non-nullptr label is passed in, a |branchPtr| will be made to
+// that label instead of a |branchPtrWithPatch| to the next stub.
+class IonCache::StubAttacher
+{
+ protected:
+ bool hasNextStubOffset_ : 1;
+ bool hasStubCodePatchOffset_ : 1;
+
+ IonCache& cache_;
+
+ CodeLocationLabel rejoinLabel_;
+ CodeOffsetJump nextStubOffset_;
+ CodeOffsetJump rejoinOffset_;
+ CodeOffset stubCodePatchOffset_;
+
+ public:
+ explicit StubAttacher(IonCache& cache)
+ : hasNextStubOffset_(false),
+ hasStubCodePatchOffset_(false),
+ cache_(cache),
+ rejoinLabel_(cache.rejoinLabel_),
+ nextStubOffset_(),
+ rejoinOffset_(),
+ stubCodePatchOffset_()
+ { }
+
+ // Value used instead of the JitCode self-reference of generated
+ // stubs. This value is needed for marking calls made inside stubs. This
+ // value would be replaced by the attachStub function after the allocation
+ // of the JitCode. The self-reference is used to keep the stub path alive
+ // even if the IonScript is invalidated or if the IC is flushed.
+ static const void* const STUB_ADDR;
+
+ template <class T1, class T2>
+ void branchNextStub(MacroAssembler& masm, Assembler::Condition cond, T1 op1, T2 op2) {
+ MOZ_ASSERT(!hasNextStubOffset_);
+ RepatchLabel nextStub;
+ nextStubOffset_ = masm.branchPtrWithPatch(cond, op1, op2, &nextStub);
+ hasNextStubOffset_ = true;
+ masm.bind(&nextStub);
+ }
+
+ template <class T1, class T2>
+ void branchNextStubOrLabel(MacroAssembler& masm, Assembler::Condition cond, T1 op1, T2 op2,
+ Label* label)
+ {
+ if (label != nullptr)
+ masm.branchPtr(cond, op1, op2, label);
+ else
+ branchNextStub(masm, cond, op1, op2);
+ }
+
+ void jumpRejoin(MacroAssembler& masm) {
+ RepatchLabel rejoin;
+ rejoinOffset_ = masm.jumpWithPatch(&rejoin);
+ masm.bind(&rejoin);
+ }
+
+ void jumpNextStub(MacroAssembler& masm) {
+ MOZ_ASSERT(!hasNextStubOffset_);
+ RepatchLabel nextStub;
+ nextStubOffset_ = masm.jumpWithPatch(&nextStub);
+ hasNextStubOffset_ = true;
+ masm.bind(&nextStub);
+ }
+
+ void pushStubCodePointer(MacroAssembler& masm) {
+ // Push the JitCode pointer for the stub we're generating.
+ // WARNING:
+ // WARNING: If JitCode ever becomes relocatable, the following code is incorrect.
+ // WARNING: Note that we're not marking the pointer being pushed as an ImmGCPtr.
+ // WARNING: This location will be patched with the pointer of the generated stub,
+ // WARNING: such as it can be marked when a call is made with this stub. Be aware
+ // WARNING: that ICs are not marked and so this stub will only be kept alive iff
+ // WARNING: it is on the stack at the time of the GC. No ImmGCPtr is needed as the
+ // WARNING: stubs are flushed on GC.
+ // WARNING:
+ MOZ_ASSERT(!hasStubCodePatchOffset_);
+ stubCodePatchOffset_ = masm.PushWithPatch(ImmPtr(STUB_ADDR));
+ hasStubCodePatchOffset_ = true;
+ }
+
+ void patchRejoinJump(MacroAssembler& masm, JitCode* code) {
+ rejoinOffset_.fixup(&masm);
+ CodeLocationJump rejoinJump(code, rejoinOffset_);
+ PatchJump(rejoinJump, rejoinLabel_);
+ }
+
+ void patchStubCodePointer(JitCode* code) {
+ if (hasStubCodePatchOffset_) {
+ Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, stubCodePatchOffset_),
+ ImmPtr(code), ImmPtr(STUB_ADDR));
+ }
+ }
+
+ void patchNextStubJump(MacroAssembler& masm, JitCode* code) {
+ // If this path is not taken, we are producing an entry which can no
+ // longer go back into the update function.
+ if (hasNextStubOffset_) {
+ nextStubOffset_.fixup(&masm);
+ CodeLocationJump nextStubJump(code, nextStubOffset_);
+ PatchJump(nextStubJump, cache_.fallbackLabel_);
+
+ // When the last stub fails, it fallback to the ool call which can
+ // produce a stub. Next time we generate a stub, we will patch the
+ // nextStub jump to try the new stub.
+ cache_.lastJump_ = nextStubJump;
+ }
+ }
+};
+
+const void* const IonCache::StubAttacher::STUB_ADDR = (void*)0xdeadc0de;
+
+void
+IonCache::emitInitialJump(MacroAssembler& masm, RepatchLabel& entry)
+{
+ initialJump_ = masm.jumpWithPatch(&entry);
+ lastJump_ = initialJump_;
+ Label label;
+ masm.bind(&label);
+ rejoinLabel_ = CodeOffset(label.offset());
+}
+
+void
+IonCache::attachStub(MacroAssembler& masm, StubAttacher& attacher, CodeLocationJump lastJump,
+ Handle<JitCode*> code)
+{
+ MOZ_ASSERT(canAttachStub());
+ incrementStubCount();
+
+ // Patch the previous nextStubJump of the last stub, or the jump from the
+ // codeGen, to jump into the newly allocated code.
+ PatchJump(lastJump, CodeLocationLabel(code), Reprotect);
+}
+
+IonCache::LinkStatus
+IonCache::linkCode(JSContext* cx, MacroAssembler& masm, StubAttacher& attacher, IonScript* ion,
+ JitCode** code)
+{
+ Linker linker(masm);
+ *code = linker.newCode<CanGC>(cx, ION_CODE);
+ if (!*code)
+ return LINK_ERROR;
+
+ if (ion->invalidated())
+ return CACHE_FLUSHED;
+
+ // Update the success path to continue after the IC initial jump.
+ attacher.patchRejoinJump(masm, *code);
+
+ // Replace the STUB_ADDR constant by the address of the generated stub, such
+ // as it can be kept alive even if the cache is flushed (see
+ // MarkJitExitFrame).
+ attacher.patchStubCodePointer(*code);
+
+ // Update the failure path.
+ attacher.patchNextStubJump(masm, *code);
+
+ return LINK_GOOD;
+}
+
+bool
+IonCache::linkAndAttachStub(JSContext* cx, MacroAssembler& masm, StubAttacher& attacher,
+ IonScript* ion, const char* attachKind,
+ JS::TrackedOutcome trackedOutcome)
+{
+ CodeLocationJump lastJumpBefore = lastJump_;
+ Rooted<JitCode*> code(cx);
+ {
+ // Need to exit the AutoFlushICache context to flush the cache
+ // before attaching the stub below.
+ AutoFlushICache afc("IonCache");
+ LinkStatus status = linkCode(cx, masm, attacher, ion, code.address());
+ if (status != LINK_GOOD)
+ return status != LINK_ERROR;
+ }
+
+ if (pc_) {
+ JitSpew(JitSpew_IonIC, "Cache %p(%s:%" PRIuSIZE "/%" PRIuSIZE ") generated %s %s stub at %p",
+ this, script_->filename(), script_->lineno(), script_->pcToOffset(pc_),
+ attachKind, CacheName(kind()), code->raw());
+ } else {
+ JitSpew(JitSpew_IonIC, "Cache %p generated %s %s stub at %p",
+ this, attachKind, CacheName(kind()), code->raw());
+ }
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "IonCache");
+#endif
+
+ attachStub(masm, attacher, lastJumpBefore, code);
+
+ // Add entry to native => bytecode mapping for this stub if needed.
+ if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime())) {
+ JitcodeGlobalEntry::IonCacheEntry entry;
+ entry.init(code, code->raw(), code->rawEnd(), rejoinAddress(), trackedOutcome);
+
+ // Add entry to the global table.
+ JitcodeGlobalTable* globalTable = cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
+ if (!globalTable->addEntry(entry, cx->runtime())) {
+ entry.destroy();
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Mark the jitcode as having a bytecode map.
+ code->setHasBytecodeMap();
+ } else {
+ JitcodeGlobalEntry::DummyEntry entry;
+ entry.init(code, code->raw(), code->rawEnd());
+
+ // Add entry to the global table.
+ JitcodeGlobalTable* globalTable = cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
+ if (!globalTable->addEntry(entry, cx->runtime())) {
+ entry.destroy();
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Mark the jitcode as having a bytecode map.
+ code->setHasBytecodeMap();
+ }
+
+ // Report masm OOM errors here, so all our callers can:
+ // return linkAndAttachStub(...);
+ if (masm.oom()) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+void
+IonCache::updateBaseAddress(JitCode* code, MacroAssembler& masm)
+{
+ fallbackLabel_.repoint(code, &masm);
+ initialJump_.repoint(code, &masm);
+ lastJump_.repoint(code, &masm);
+ rejoinLabel_.repoint(code, &masm);
+}
+
+void IonCache::trace(JSTracer* trc)
+{
+ if (script_)
+ TraceManuallyBarrieredEdge(trc, &script_, "IonCache::script_");
+}
+
+static void*
+GetReturnAddressToIonCode(JSContext* cx)
+{
+ JitFrameIterator iter(cx);
+ MOZ_ASSERT(iter.type() == JitFrame_Exit,
+ "An exit frame is expected as update functions are called with a VMFunction.");
+
+ void* returnAddr = iter.returnAddress();
+#ifdef DEBUG
+ ++iter;
+ MOZ_ASSERT(iter.isIonJS());
+#endif
+ return returnAddr;
+}
+
+static void
+GeneratePrototypeGuards(JSContext* cx, IonScript* ion, MacroAssembler& masm, JSObject* obj,
+ JSObject* holder, Register objectReg, Register scratchReg,
+ Label* failures)
+{
+ /*
+ * The guards here protect against the effects of JSObject::swap(). If the prototype chain
+ * is directly altered, then TI will toss the jitcode, so we don't have to worry about
+ * it, and any other change to the holder, or adding a shadowing property will result
+ * in reshaping the holder, and thus the failure of the shape guard.
+ */
+ MOZ_ASSERT(obj != holder);
+
+ if (obj->hasUncacheableProto()) {
+ // Note: objectReg and scratchReg may be the same register, so we cannot
+ // use objectReg in the rest of this function.
+ masm.loadPtr(Address(objectReg, JSObject::offsetOfGroup()), scratchReg);
+ Address proto(scratchReg, ObjectGroup::offsetOfProto());
+ masm.branchPtr(Assembler::NotEqual, proto, ImmGCPtr(obj->staticPrototype()), failures);
+ }
+
+ JSObject* pobj = obj->staticPrototype();
+ if (!pobj)
+ return;
+ while (pobj != holder) {
+ if (pobj->hasUncacheableProto()) {
+ masm.movePtr(ImmGCPtr(pobj), scratchReg);
+ Address groupAddr(scratchReg, JSObject::offsetOfGroup());
+ if (pobj->isSingleton()) {
+ // Singletons can have their group's |proto| mutated directly.
+ masm.loadPtr(groupAddr, scratchReg);
+ Address protoAddr(scratchReg, ObjectGroup::offsetOfProto());
+ masm.branchPtr(Assembler::NotEqual, protoAddr, ImmGCPtr(pobj->staticPrototype()),
+ failures);
+ } else {
+ masm.branchPtr(Assembler::NotEqual, groupAddr, ImmGCPtr(pobj->group()), failures);
+ }
+ }
+
+ pobj = pobj->staticPrototype();
+ }
+}
+
+// Note: This differs from IsCacheableProtoChain in BaselineIC.cpp in that
+// Ion caches can deal with objects on the proto chain that have uncacheable
+// prototypes.
+bool
+jit::IsCacheableProtoChainForIonOrCacheIR(JSObject* obj, JSObject* holder)
+{
+ while (obj != holder) {
+ /*
+ * We cannot assume that we find the holder object on the prototype
+ * chain and must check for null proto. The prototype chain can be
+ * altered during the lookupProperty call.
+ */
+ JSObject* proto = obj->staticPrototype();
+ if (!proto || !proto->isNative())
+ return false;
+ obj = proto;
+ }
+ return true;
+}
+
+bool
+jit::IsCacheableGetPropReadSlotForIonOrCacheIR(JSObject* obj, JSObject* holder, Shape* shape)
+{
+ if (!shape || !IsCacheableProtoChainForIonOrCacheIR(obj, holder))
+ return false;
+
+ if (!shape->hasSlot() || !shape->hasDefaultGetter())
+ return false;
+
+ return true;
+}
+
+static bool
+IsCacheableNoProperty(JSObject* obj, JSObject* holder, Shape* shape, jsbytecode* pc,
+ const TypedOrValueRegister& output)
+{
+ if (shape)
+ return false;
+
+ MOZ_ASSERT(!holder);
+
+ // Just because we didn't find the property on the object doesn't mean it
+ // won't magically appear through various engine hacks.
+ if (obj->getClass()->getGetProperty())
+ return false;
+
+ // Don't generate missing property ICs if we skipped a non-native object, as
+ // lookups may extend beyond the prototype chain (e.g. for DOMProxy
+ // proxies).
+ JSObject* obj2 = obj;
+ while (obj2) {
+ if (!obj2->isNative())
+ return false;
+ obj2 = obj2->staticPrototype();
+ }
+
+ // The pc is nullptr if the cache is idempotent. We cannot share missing
+ // properties between caches because TI can only try to prove that a type is
+ // contained, but does not attempts to check if something does not exists.
+ // So the infered type of getprop would be missing and would not contain
+ // undefined, as expected for missing properties.
+ if (!pc)
+ return false;
+
+ // TI has not yet monitored an Undefined value. The fallback path will
+ // monitor and invalidate the script.
+ if (!output.hasValue())
+ return false;
+
+ return true;
+}
+
+static bool
+IsOptimizableArgumentsObjectForLength(JSObject* obj)
+{
+ if (!obj->is<ArgumentsObject>())
+ return false;
+
+ if (obj->as<ArgumentsObject>().hasOverriddenLength())
+ return false;
+
+ return true;
+}
+
+static bool
+IsOptimizableArgumentsObjectForGetElem(JSObject* obj, const Value& idval)
+{
+ if (!IsOptimizableArgumentsObjectForLength(obj))
+ return false;
+
+ ArgumentsObject& argsObj = obj->as<ArgumentsObject>();
+
+ if (argsObj.isAnyElementDeleted())
+ return false;
+
+ if (argsObj.hasOverriddenElement())
+ return false;
+
+ if (!idval.isInt32())
+ return false;
+
+ int32_t idint = idval.toInt32();
+ if (idint < 0 || static_cast<uint32_t>(idint) >= argsObj.initialLength())
+ return false;
+
+ return true;
+}
+
+static bool
+IsCacheableGetPropCallNative(JSObject* obj, JSObject* holder, Shape* shape)
+{
+ if (!shape || !IsCacheableProtoChainForIonOrCacheIR(obj, holder))
+ return false;
+
+ if (!shape->hasGetterValue() || !shape->getterValue().isObject())
+ return false;
+
+ if (!shape->getterValue().toObject().is<JSFunction>())
+ return false;
+
+ JSFunction& getter = shape->getterValue().toObject().as<JSFunction>();
+ if (!getter.isNative())
+ return false;
+
+ // Check for a getter that has jitinfo and whose jitinfo says it's
+ // OK with both inner and outer objects.
+ if (getter.jitInfo() && !getter.jitInfo()->needsOuterizedThisObject())
+ return true;
+
+ // For getters that need the WindowProxy (instead of the Window) as this
+ // object, don't cache if obj is the Window, since our cache will pass that
+ // instead of the WindowProxy.
+ return !IsWindow(obj);
+}
+
+static bool
+IsCacheableGetPropCallScripted(JSObject* obj, JSObject* holder, Shape* shape)
+{
+ if (!shape || !IsCacheableProtoChainForIonOrCacheIR(obj, holder))
+ return false;
+
+ if (!shape->hasGetterValue() || !shape->getterValue().isObject())
+ return false;
+
+ if (!shape->getterValue().toObject().is<JSFunction>())
+ return false;
+
+ JSFunction& getter = shape->getterValue().toObject().as<JSFunction>();
+ if (!getter.hasJITCode())
+ return false;
+
+ // See IsCacheableGetPropCallNative.
+ return !IsWindow(obj);
+}
+
+static bool
+IsCacheableGetPropCallPropertyOp(JSObject* obj, JSObject* holder, Shape* shape)
+{
+ if (!shape || !IsCacheableProtoChainForIonOrCacheIR(obj, holder))
+ return false;
+
+ if (shape->hasSlot() || shape->hasGetterValue() || shape->hasDefaultGetter())
+ return false;
+
+ return true;
+}
+
+static void
+TestMatchingReceiver(MacroAssembler& masm, IonCache::StubAttacher& attacher,
+ Register object, JSObject* obj, Label* failure,
+ bool alwaysCheckGroup = false)
+{
+ if (obj->is<UnboxedPlainObject>()) {
+ MOZ_ASSERT(failure);
+
+ masm.branchTestObjGroup(Assembler::NotEqual, object, obj->group(), failure);
+ Address expandoAddress(object, UnboxedPlainObject::offsetOfExpando());
+ if (UnboxedExpandoObject* expando = obj->as<UnboxedPlainObject>().maybeExpando()) {
+ masm.branchPtr(Assembler::Equal, expandoAddress, ImmWord(0), failure);
+ Label success;
+ masm.push(object);
+ masm.loadPtr(expandoAddress, object);
+ masm.branchTestObjShape(Assembler::Equal, object, expando->lastProperty(),
+ &success);
+ masm.pop(object);
+ masm.jump(failure);
+ masm.bind(&success);
+ masm.pop(object);
+ } else {
+ masm.branchPtr(Assembler::NotEqual, expandoAddress, ImmWord(0), failure);
+ }
+ } else if (obj->is<UnboxedArrayObject>()) {
+ MOZ_ASSERT(failure);
+ masm.branchTestObjGroup(Assembler::NotEqual, object, obj->group(), failure);
+ } else if (obj->is<TypedObject>()) {
+ attacher.branchNextStubOrLabel(masm, Assembler::NotEqual,
+ Address(object, JSObject::offsetOfGroup()),
+ ImmGCPtr(obj->group()), failure);
+ } else {
+ Shape* shape = obj->maybeShape();
+ MOZ_ASSERT(shape);
+
+ attacher.branchNextStubOrLabel(masm, Assembler::NotEqual,
+ Address(object, ShapedObject::offsetOfShape()),
+ ImmGCPtr(shape), failure);
+
+ if (alwaysCheckGroup)
+ masm.branchTestObjGroup(Assembler::NotEqual, object, obj->group(), failure);
+ }
+}
+
+static inline void
+EmitLoadSlot(MacroAssembler& masm, NativeObject* holder, Shape* shape, Register holderReg,
+ TypedOrValueRegister output, Register scratchReg)
+{
+ MOZ_ASSERT(holder);
+ NativeObject::slotsSizeMustNotOverflow();
+ if (holder->isFixedSlot(shape->slot())) {
+ Address addr(holderReg, NativeObject::getFixedSlotOffset(shape->slot()));
+ masm.loadTypedOrValue(addr, output);
+ } else {
+ masm.loadPtr(Address(holderReg, NativeObject::offsetOfSlots()), scratchReg);
+
+ Address addr(scratchReg, holder->dynamicSlotIndex(shape->slot()) * sizeof(Value));
+ masm.loadTypedOrValue(addr, output);
+ }
+}
+
+// Callers are expected to have already guarded on the shape of the
+// object, which guarantees the object is a DOM proxy.
+static void
+CheckDOMProxyExpandoDoesNotShadow(JSContext* cx, MacroAssembler& masm, JSObject* obj,
+ jsid id, Register object, Label* stubFailure)
+{
+ MOZ_ASSERT(IsCacheableDOMProxy(obj));
+
+ // Guard that the object does not have expando properties, or has an expando
+ // which is known to not have the desired property.
+
+ // For the remaining code, we need to reserve some registers to load a value.
+ // This is ugly, but unvaoidable.
+ AllocatableRegisterSet domProxyRegSet(RegisterSet::All());
+ domProxyRegSet.take(AnyRegister(object));
+ ValueOperand tempVal = domProxyRegSet.takeAnyValue();
+ masm.pushValue(tempVal);
+
+ Label failDOMProxyCheck;
+ Label domProxyOk;
+
+ Value expandoVal = GetProxyExtra(obj, GetDOMProxyExpandoSlot());
+
+ masm.loadPtr(Address(object, ProxyObject::offsetOfValues()), tempVal.scratchReg());
+ masm.loadValue(Address(tempVal.scratchReg(),
+ ProxyObject::offsetOfExtraSlotInValues(GetDOMProxyExpandoSlot())),
+ tempVal);
+
+ if (!expandoVal.isObject() && !expandoVal.isUndefined()) {
+ masm.branchTestValue(Assembler::NotEqual, tempVal, expandoVal, &failDOMProxyCheck);
+
+ ExpandoAndGeneration* expandoAndGeneration = (ExpandoAndGeneration*)expandoVal.toPrivate();
+ masm.movePtr(ImmPtr(expandoAndGeneration), tempVal.scratchReg());
+
+ masm.branch64(Assembler::NotEqual,
+ Address(tempVal.scratchReg(),
+ ExpandoAndGeneration::offsetOfGeneration()),
+ Imm64(expandoAndGeneration->generation),
+ &failDOMProxyCheck);
+
+ expandoVal = expandoAndGeneration->expando;
+ masm.loadValue(Address(tempVal.scratchReg(),
+ ExpandoAndGeneration::offsetOfExpando()),
+ tempVal);
+ }
+
+ // If the incoming object does not have an expando object then we're sure we're not
+ // shadowing.
+ masm.branchTestUndefined(Assembler::Equal, tempVal, &domProxyOk);
+
+ if (expandoVal.isObject()) {
+ MOZ_ASSERT(!expandoVal.toObject().as<NativeObject>().contains(cx, id));
+
+ // Reference object has an expando object that doesn't define the name. Check that
+ // the incoming object has an expando object with the same shape.
+ masm.branchTestObject(Assembler::NotEqual, tempVal, &failDOMProxyCheck);
+ masm.extractObject(tempVal, tempVal.scratchReg());
+ masm.branchPtr(Assembler::Equal,
+ Address(tempVal.scratchReg(), ShapedObject::offsetOfShape()),
+ ImmGCPtr(expandoVal.toObject().as<NativeObject>().lastProperty()),
+ &domProxyOk);
+ }
+
+ // Failure case: restore the tempVal registers and jump to failures.
+ masm.bind(&failDOMProxyCheck);
+ masm.popValue(tempVal);
+ masm.jump(stubFailure);
+
+ // Success case: restore the tempval and proceed.
+ masm.bind(&domProxyOk);
+ masm.popValue(tempVal);
+}
+
+static void
+GenerateReadSlot(JSContext* cx, IonScript* ion, MacroAssembler& masm,
+ IonCache::StubAttacher& attacher, MaybeCheckTDZ checkTDZ,
+ JSObject* obj, JSObject* holder, Shape* shape, Register object,
+ TypedOrValueRegister output, Label* failures = nullptr)
+{
+ // If there's a single jump to |failures|, we can patch the shape guard
+ // jump directly. Otherwise, jump to the end of the stub, so there's a
+ // common point to patch.
+ bool multipleFailureJumps = (obj != holder)
+ || obj->is<UnboxedPlainObject>()
+ || (checkTDZ && output.hasValue())
+ || (failures != nullptr && failures->used());
+
+ // If we have multiple failure jumps but didn't get a label from the
+ // outside, make one ourselves.
+ Label failures_;
+ if (multipleFailureJumps && !failures)
+ failures = &failures_;
+
+ TestMatchingReceiver(masm, attacher, object, obj, failures);
+
+ // If we need a scratch register, use either an output register or the
+ // object register. After this point, we cannot jump directly to
+ // |failures| since we may still have to pop the object register.
+ bool restoreScratch = false;
+ Register scratchReg = Register::FromCode(0); // Quell compiler warning.
+
+ if (obj != holder ||
+ obj->is<UnboxedPlainObject>() ||
+ !holder->as<NativeObject>().isFixedSlot(shape->slot()))
+ {
+ if (output.hasValue()) {
+ scratchReg = output.valueReg().scratchReg();
+ } else if (output.type() == MIRType::Double) {
+ scratchReg = object;
+ masm.push(scratchReg);
+ restoreScratch = true;
+ } else {
+ scratchReg = output.typedReg().gpr();
+ }
+ }
+
+ // Fast path: single failure jump, no prototype guards.
+ if (!multipleFailureJumps) {
+ EmitLoadSlot(masm, &holder->as<NativeObject>(), shape, object, output, scratchReg);
+ if (restoreScratch)
+ masm.pop(scratchReg);
+ attacher.jumpRejoin(masm);
+ return;
+ }
+
+ // Slow path: multiple jumps; generate prototype guards.
+ Label prototypeFailures;
+ Register holderReg;
+ if (obj != holder) {
+ // Note: this may clobber the object register if it's used as scratch.
+ GeneratePrototypeGuards(cx, ion, masm, obj, holder, object, scratchReg,
+ &prototypeFailures);
+
+ if (holder) {
+ // Guard on the holder's shape.
+ holderReg = scratchReg;
+ masm.movePtr(ImmGCPtr(holder), holderReg);
+ masm.branchPtr(Assembler::NotEqual,
+ Address(holderReg, ShapedObject::offsetOfShape()),
+ ImmGCPtr(holder->as<NativeObject>().lastProperty()),
+ &prototypeFailures);
+ } else {
+ // The property does not exist. Guard on everything in the
+ // prototype chain.
+ JSObject* proto = obj->staticPrototype();
+ Register lastReg = object;
+ MOZ_ASSERT(scratchReg != object);
+ while (proto) {
+ masm.loadObjProto(lastReg, scratchReg);
+
+ // Guard the shape of the current prototype.
+ MOZ_ASSERT(proto->hasStaticPrototype());
+ masm.branchPtr(Assembler::NotEqual,
+ Address(scratchReg, ShapedObject::offsetOfShape()),
+ ImmGCPtr(proto->as<NativeObject>().lastProperty()),
+ &prototypeFailures);
+
+ proto = proto->staticPrototype();
+ lastReg = scratchReg;
+ }
+
+ holderReg = InvalidReg;
+ }
+ } else if (obj->is<UnboxedPlainObject>()) {
+ holder = obj->as<UnboxedPlainObject>().maybeExpando();
+ holderReg = scratchReg;
+ masm.loadPtr(Address(object, UnboxedPlainObject::offsetOfExpando()), holderReg);
+ } else {
+ holderReg = object;
+ }
+
+ // Slot access.
+ if (holder) {
+ EmitLoadSlot(masm, &holder->as<NativeObject>(), shape, holderReg, output, scratchReg);
+ if (checkTDZ && output.hasValue())
+ masm.branchTestMagic(Assembler::Equal, output.valueReg(), failures);
+ } else {
+ masm.moveValue(UndefinedValue(), output.valueReg());
+ }
+
+ // Restore scratch on success.
+ if (restoreScratch)
+ masm.pop(scratchReg);
+
+ attacher.jumpRejoin(masm);
+
+ masm.bind(&prototypeFailures);
+ if (restoreScratch)
+ masm.pop(scratchReg);
+ masm.bind(failures);
+
+ attacher.jumpNextStub(masm);
+}
+
+static void
+GenerateReadUnboxed(JSContext* cx, IonScript* ion, MacroAssembler& masm,
+ IonCache::StubAttacher& attacher, JSObject* obj,
+ const UnboxedLayout::Property* property,
+ Register object, TypedOrValueRegister output,
+ Label* failures = nullptr)
+{
+ // Guard on the group of the object.
+ attacher.branchNextStubOrLabel(masm, Assembler::NotEqual,
+ Address(object, JSObject::offsetOfGroup()),
+ ImmGCPtr(obj->group()), failures);
+
+ Address address(object, UnboxedPlainObject::offsetOfData() + property->offset);
+
+ masm.loadUnboxedProperty(address, property->type, output);
+
+ attacher.jumpRejoin(masm);
+
+ if (failures) {
+ masm.bind(failures);
+ attacher.jumpNextStub(masm);
+ }
+}
+
+static bool
+EmitGetterCall(JSContext* cx, MacroAssembler& masm,
+ IonCache::StubAttacher& attacher, JSObject* obj,
+ JSObject* holder, HandleShape shape, bool holderIsReceiver,
+ LiveRegisterSet liveRegs, Register object,
+ TypedOrValueRegister output,
+ void* returnAddr)
+{
+ MOZ_ASSERT(output.hasValue());
+ MacroAssembler::AfterICSaveLive aic = masm.icSaveLive(liveRegs);
+
+ MOZ_ASSERT_IF(obj != holder, !holderIsReceiver);
+
+ // Remaining registers should basically be free, but we need to use |object| still
+ // so leave it alone.
+ AllocatableRegisterSet regSet(RegisterSet::All());
+ regSet.take(AnyRegister(object));
+
+ // This is a slower stub path, and we're going to be doing a call anyway. Don't need
+ // to try so hard to not use the stack. Scratch regs are just taken from the register
+ // set not including the input, current value saved on the stack, and restored when
+ // we're done with it.
+ Register scratchReg = regSet.takeAnyGeneral();
+
+ // Shape has a JSNative, PropertyOp or scripted getter function.
+ if (IsCacheableGetPropCallNative(obj, holder, shape)) {
+ Register argJSContextReg = regSet.takeAnyGeneral();
+ Register argUintNReg = regSet.takeAnyGeneral();
+ Register argVpReg = regSet.takeAnyGeneral();
+
+ JSFunction* target = &shape->getterValue().toObject().as<JSFunction>();
+ MOZ_ASSERT(target);
+ MOZ_ASSERT(target->isNative());
+
+ // Native functions have the signature:
+ // bool (*)(JSContext*, unsigned, Value* vp)
+ // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
+ // are the function arguments.
+
+ // Construct vp array:
+ // Push object value for |this|
+ masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
+ // Push callee/outparam.
+ masm.Push(ObjectValue(*target));
+
+ // Preload arguments into registers.
+ masm.loadJSContext(argJSContextReg);
+ masm.move32(Imm32(0), argUintNReg);
+ masm.moveStackPtrTo(argVpReg);
+
+ // Push marking data for later use.
+ masm.Push(argUintNReg);
+ attacher.pushStubCodePointer(masm);
+
+ if (!masm.icBuildOOLFakeExitFrame(returnAddr, aic))
+ return false;
+ masm.enterFakeExitFrame(IonOOLNativeExitFrameLayoutToken);
+
+ // Construct and execute call.
+ masm.setupUnalignedABICall(scratchReg);
+ masm.passABIArg(argJSContextReg);
+ masm.passABIArg(argUintNReg);
+ masm.passABIArg(argVpReg);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, target->native()));
+
+ // Test for failure.
+ masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
+
+ // Load the outparam vp[0] into output register(s).
+ Address outparam(masm.getStackPointer(), IonOOLNativeExitFrameLayout::offsetOfResult());
+ masm.loadTypedOrValue(outparam, output);
+
+ // masm.leaveExitFrame & pop locals
+ masm.adjustStack(IonOOLNativeExitFrameLayout::Size(0));
+ } else if (IsCacheableGetPropCallPropertyOp(obj, holder, shape)) {
+ Register argJSContextReg = regSet.takeAnyGeneral();
+ Register argObjReg = regSet.takeAnyGeneral();
+ Register argIdReg = regSet.takeAnyGeneral();
+ Register argVpReg = regSet.takeAnyGeneral();
+
+ GetterOp target = shape->getterOp();
+ MOZ_ASSERT(target);
+
+ // Push stubCode for marking.
+ attacher.pushStubCodePointer(masm);
+
+ // JSGetterOp: bool fn(JSContext* cx, HandleObject obj, HandleId id, MutableHandleValue vp)
+
+ // Push args on stack first so we can take pointers to make handles.
+ masm.Push(UndefinedValue());
+ masm.moveStackPtrTo(argVpReg);
+
+ // Push canonical jsid from shape instead of propertyname.
+ masm.Push(shape->propid(), scratchReg);
+ masm.moveStackPtrTo(argIdReg);
+
+ // Push the holder.
+ if (holderIsReceiver) {
+ // When the holder is also the current receiver, we just have a shape guard,
+ // so we might end up with a random object which is also guaranteed to have
+ // this JSGetterOp.
+ masm.Push(object);
+ } else {
+ // If the holder is on the prototype chain, the prototype-guarding
+ // only allows objects with the same holder.
+ masm.movePtr(ImmGCPtr(holder), scratchReg);
+ masm.Push(scratchReg);
+ }
+ masm.moveStackPtrTo(argObjReg);
+
+ masm.loadJSContext(argJSContextReg);
+
+ if (!masm.icBuildOOLFakeExitFrame(returnAddr, aic))
+ return false;
+ masm.enterFakeExitFrame(IonOOLPropertyOpExitFrameLayoutToken);
+
+ // Make the call.
+ masm.setupUnalignedABICall(scratchReg);
+ masm.passABIArg(argJSContextReg);
+ masm.passABIArg(argObjReg);
+ masm.passABIArg(argIdReg);
+ masm.passABIArg(argVpReg);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, target));
+
+ // Test for failure.
+ masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
+
+ // Load the outparam vp[0] into output register(s).
+ Address outparam(masm.getStackPointer(), IonOOLPropertyOpExitFrameLayout::offsetOfResult());
+ masm.loadTypedOrValue(outparam, output);
+
+ // masm.leaveExitFrame & pop locals.
+ masm.adjustStack(IonOOLPropertyOpExitFrameLayout::Size());
+ } else {
+ MOZ_ASSERT(IsCacheableGetPropCallScripted(obj, holder, shape));
+
+ JSFunction* target = &shape->getterValue().toObject().as<JSFunction>();
+ uint32_t framePushedBefore = masm.framePushed();
+
+ // Construct IonAccessorICFrameLayout.
+ uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS,
+ IonAccessorICFrameLayout::Size());
+ attacher.pushStubCodePointer(masm);
+ masm.Push(Imm32(descriptor));
+ masm.Push(ImmPtr(returnAddr));
+
+ // The JitFrameLayout pushed below will be aligned to JitStackAlignment,
+ // so we just have to make sure the stack is aligned after we push the
+ // |this| + argument Values.
+ uint32_t argSize = (target->nargs() + 1) * sizeof(Value);
+ uint32_t padding = ComputeByteAlignment(masm.framePushed() + argSize, JitStackAlignment);
+ MOZ_ASSERT(padding % sizeof(uintptr_t) == 0);
+ MOZ_ASSERT(padding < JitStackAlignment);
+ masm.reserveStack(padding);
+
+ for (size_t i = 0; i < target->nargs(); i++)
+ masm.Push(UndefinedValue());
+ masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
+
+ masm.movePtr(ImmGCPtr(target), scratchReg);
+
+ descriptor = MakeFrameDescriptor(argSize + padding, JitFrame_IonAccessorIC,
+ JitFrameLayout::Size());
+ masm.Push(Imm32(0)); // argc
+ masm.Push(scratchReg);
+ masm.Push(Imm32(descriptor));
+
+ // Check stack alignment. Add sizeof(uintptr_t) for the return address.
+ MOZ_ASSERT(((masm.framePushed() + sizeof(uintptr_t)) % JitStackAlignment) == 0);
+
+ // The getter has JIT code now and we will only discard the getter's JIT
+ // code when discarding all JIT code in the Zone, so we can assume it'll
+ // still have JIT code.
+ MOZ_ASSERT(target->hasJITCode());
+ masm.loadPtr(Address(scratchReg, JSFunction::offsetOfNativeOrScript()), scratchReg);
+ masm.loadBaselineOrIonRaw(scratchReg, scratchReg, nullptr);
+ masm.callJit(scratchReg);
+ masm.storeCallResultValue(output);
+
+ masm.freeStack(masm.framePushed() - framePushedBefore);
+ }
+
+ masm.icRestoreLive(liveRegs, aic);
+ return true;
+}
+
+static bool
+GenerateCallGetter(JSContext* cx, IonScript* ion, MacroAssembler& masm,
+ IonCache::StubAttacher& attacher, JSObject* obj,
+ JSObject* holder, HandleShape shape, LiveRegisterSet& liveRegs, Register object,
+ TypedOrValueRegister output, void* returnAddr, Label* failures = nullptr)
+{
+ MOZ_ASSERT(output.hasValue());
+
+ // Use the passed in label if there was one. Otherwise, we'll have to make our own.
+ Label stubFailure;
+ failures = failures ? failures : &stubFailure;
+
+ TestMatchingReceiver(masm, attacher, object, obj, failures);
+
+ Register scratchReg = output.valueReg().scratchReg();
+ bool spillObjReg = scratchReg == object;
+ Label pop1AndFail;
+ Label* maybePopAndFail = failures;
+
+ // If we're calling a getter on the global, inline the logic for the
+ // 'this' hook on the global lexical env and manually push the global.
+ if (IsGlobalLexicalEnvironment(obj)) {
+ masm.extractObject(Address(object, EnvironmentObject::offsetOfEnclosingEnvironment()),
+ object);
+ }
+
+ // Save off the object register if it aliases the scratchReg
+ if (spillObjReg) {
+ masm.push(object);
+ maybePopAndFail = &pop1AndFail;
+ }
+
+ // Note: this may clobber the object register if it's used as scratch.
+ if (obj != holder)
+ GeneratePrototypeGuards(cx, ion, masm, obj, holder, object, scratchReg, maybePopAndFail);
+
+ // Guard on the holder's shape.
+ Register holderReg = scratchReg;
+ masm.movePtr(ImmGCPtr(holder), holderReg);
+ masm.branchPtr(Assembler::NotEqual,
+ Address(holderReg, ShapedObject::offsetOfShape()),
+ ImmGCPtr(holder->as<NativeObject>().lastProperty()),
+ maybePopAndFail);
+
+ if (spillObjReg)
+ masm.pop(object);
+
+ // Now we're good to go to invoke the native call.
+ bool holderIsReceiver = (obj == holder);
+ if (!EmitGetterCall(cx, masm, attacher, obj, holder, shape, holderIsReceiver, liveRegs, object,
+ output, returnAddr))
+ return false;
+
+ // Rejoin jump.
+ attacher.jumpRejoin(masm);
+
+ // Jump to next stub.
+ if (spillObjReg) {
+ masm.bind(&pop1AndFail);
+ masm.pop(object);
+ }
+ masm.bind(failures);
+ attacher.jumpNextStub(masm);
+
+ return true;
+}
+
+static bool
+GenerateArrayLength(JSContext* cx, MacroAssembler& masm, IonCache::StubAttacher& attacher,
+ JSObject* obj, Register object, TypedOrValueRegister output, Label* failures)
+{
+ MOZ_ASSERT(obj->is<ArrayObject>());
+
+ // Guard object is a dense array.
+ RootedShape shape(cx, obj->as<ArrayObject>().lastProperty());
+ if (!shape)
+ return false;
+ masm.branchTestObjShape(Assembler::NotEqual, object, shape, failures);
+
+ // Load length.
+ Register outReg;
+ if (output.hasValue()) {
+ outReg = output.valueReg().scratchReg();
+ } else {
+ MOZ_ASSERT(output.type() == MIRType::Int32);
+ outReg = output.typedReg().gpr();
+ }
+
+ masm.loadPtr(Address(object, NativeObject::offsetOfElements()), outReg);
+ masm.load32(Address(outReg, ObjectElements::offsetOfLength()), outReg);
+
+ // The length is an unsigned int, but the value encodes a signed int.
+ MOZ_ASSERT(object != outReg);
+ masm.branchTest32(Assembler::Signed, outReg, outReg, failures);
+
+ if (output.hasValue())
+ masm.tagValue(JSVAL_TYPE_INT32, outReg, output.valueReg());
+
+ /* Success. */
+ attacher.jumpRejoin(masm);
+
+ /* Failure. */
+ masm.bind(failures);
+ attacher.jumpNextStub(masm);
+
+ return true;
+}
+
+static void
+GenerateUnboxedArrayLength(JSContext* cx, MacroAssembler& masm, IonCache::StubAttacher& attacher,
+ JSObject* array, Register object, TypedOrValueRegister output,
+ Label* failures)
+{
+ Register outReg;
+ if (output.hasValue()) {
+ outReg = output.valueReg().scratchReg();
+ } else {
+ MOZ_ASSERT(output.type() == MIRType::Int32);
+ outReg = output.typedReg().gpr();
+ }
+ MOZ_ASSERT(object != outReg);
+
+ TestMatchingReceiver(masm, attacher, object, array, failures);
+
+ // Load length.
+ masm.load32(Address(object, UnboxedArrayObject::offsetOfLength()), outReg);
+
+ // Check for a length that fits in an int32.
+ masm.branchTest32(Assembler::Signed, outReg, outReg, failures);
+
+ if (output.hasValue())
+ masm.tagValue(JSVAL_TYPE_INT32, outReg, output.valueReg());
+
+ // Success.
+ attacher.jumpRejoin(masm);
+
+ // Failure.
+ masm.bind(failures);
+ attacher.jumpNextStub(masm);
+}
+
+// In this case, the code for TypedArray and SharedTypedArray is not the same,
+// because the code embeds pointers to the respective class arrays. Code that
+// caches the stub code must distinguish between the two cases.
+static void
+GenerateTypedArrayLength(JSContext* cx, MacroAssembler& masm, IonCache::StubAttacher& attacher,
+ Register object, TypedOrValueRegister output, Label* failures)
+{
+ Register tmpReg;
+ if (output.hasValue()) {
+ tmpReg = output.valueReg().scratchReg();
+ } else {
+ MOZ_ASSERT(output.type() == MIRType::Int32);
+ tmpReg = output.typedReg().gpr();
+ }
+ MOZ_ASSERT(object != tmpReg);
+
+ // Implement the negated version of JSObject::isTypedArray predicate.
+ masm.loadObjClass(object, tmpReg);
+ masm.branchPtr(Assembler::Below, tmpReg, ImmPtr(&TypedArrayObject::classes[0]),
+ failures);
+ masm.branchPtr(Assembler::AboveOrEqual, tmpReg,
+ ImmPtr(&TypedArrayObject::classes[Scalar::MaxTypedArrayViewType]),
+ failures);
+
+ // Load length.
+ masm.loadTypedOrValue(Address(object, TypedArrayObject::lengthOffset()), output);
+
+ /* Success. */
+ attacher.jumpRejoin(masm);
+
+ /* Failure. */
+ masm.bind(failures);
+ attacher.jumpNextStub(masm);
+}
+
+static bool
+IsCacheableArrayLength(JSContext* cx, HandleObject obj, TypedOrValueRegister output)
+{
+ if (!obj->is<ArrayObject>())
+ return false;
+
+ if (output.type() != MIRType::Value && output.type() != MIRType::Int32) {
+ // The stub assumes that we always output Int32, so make sure our output
+ // is equipped to handle that.
+ return false;
+ }
+
+ // The emitted stub can only handle int32 lengths. If the length of the
+ // actual object does not fit in an int32 then don't attach a stub, as if
+ // the cache is idempotent we won't end up invalidating the compiled script
+ // otherwise.
+ if (obj->as<ArrayObject>().length() > INT32_MAX)
+ return false;
+
+ return true;
+}
+
+template <class GetPropCache>
+static GetPropertyIC::NativeGetPropCacheability
+CanAttachNativeGetProp(JSContext* cx, const GetPropCache& cache,
+ HandleObject obj, HandleId id,
+ MutableHandleNativeObject holder, MutableHandleShape shape,
+ bool skipArrayLen = false)
+{
+ MOZ_ASSERT(JSID_IS_STRING(id) || JSID_IS_SYMBOL(id));
+
+ if (!obj)
+ return GetPropertyIC::CanAttachNone;
+
+ // The lookup needs to be universally pure, otherwise we risk calling hooks out
+ // of turn. We don't mind doing this even when purity isn't required, because we
+ // only miss out on shape hashification, which is only a temporary perf cost.
+ // The limits were arbitrarily set, anyways.
+ JSObject* baseHolder = nullptr;
+ if (!LookupPropertyPure(cx, obj, id, &baseHolder, shape.address()))
+ return GetPropertyIC::CanAttachNone;
+
+ MOZ_ASSERT(!holder);
+ if (baseHolder) {
+ if (!baseHolder->isNative())
+ return GetPropertyIC::CanAttachNone;
+ holder.set(&baseHolder->as<NativeObject>());
+ }
+
+ RootedScript script(cx);
+ jsbytecode* pc;
+ cache.getScriptedLocation(&script, &pc);
+ if (IsCacheableGetPropReadSlotForIonOrCacheIR(obj, holder, shape) ||
+ IsCacheableNoProperty(obj, holder, shape, pc, cache.output()))
+ {
+ return GetPropertyIC::CanAttachReadSlot;
+ }
+
+ // |length| is a non-configurable getter property on ArrayObjects. Any time this
+ // check would have passed, we can install a getter stub instead. Allow people to
+ // make that decision themselves with skipArrayLen
+ if (!skipArrayLen && JSID_IS_ATOM(id, cx->names().length) && cache.allowArrayLength(cx) &&
+ IsCacheableArrayLength(cx, obj, cache.output()))
+ {
+ // The array length property is non-configurable, which means both that
+ // checking the class of the object and the name of the property is enough
+ // and that we don't need to worry about monitoring, since we know the
+ // return type statically.
+ return GetPropertyIC::CanAttachArrayLength;
+ }
+
+ // IonBuilder guarantees that it's impossible to generate a GetPropertyIC with
+ // allowGetters() true and cache.output().hasValue() false. If this isn't true,
+ // we will quickly assert during stub generation.
+ //
+ // Be careful when adding support for other getters here: for outer window
+ // proxies, IonBuilder can innerize and pass us the inner window (the global),
+ // see IonBuilder::getPropTryInnerize. This is fine for native/scripted getters
+ // because IsCacheableGetPropCallNative and IsCacheableGetPropCallScripted
+ // handle this.
+ if (cache.allowGetters() &&
+ (IsCacheableGetPropCallNative(obj, holder, shape) ||
+ IsCacheableGetPropCallPropertyOp(obj, holder, shape) ||
+ IsCacheableGetPropCallScripted(obj, holder, shape)))
+ {
+ // Don't enable getter call if cache is idempotent, since they can be
+ // effectful. This is handled by allowGetters()
+ return GetPropertyIC::CanAttachCallGetter;
+ }
+
+ return GetPropertyIC::CanAttachNone;
+}
+
+static bool
+EqualStringsHelper(JSString* str1, JSString* str2)
+{
+ MOZ_ASSERT(str1->isAtom());
+ MOZ_ASSERT(!str2->isAtom());
+ MOZ_ASSERT(str1->length() == str2->length());
+
+ JSLinearString* str2Linear = str2->ensureLinear(nullptr);
+ if (!str2Linear)
+ return false;
+
+ return EqualChars(&str1->asLinear(), str2Linear);
+}
+
+static void
+EmitIdGuard(MacroAssembler& masm, jsid id, TypedOrValueRegister idReg, Register objReg,
+ Register scratchReg, Label* failures)
+{
+ MOZ_ASSERT(JSID_IS_STRING(id) || JSID_IS_SYMBOL(id));
+
+ MOZ_ASSERT(idReg.type() == MIRType::String ||
+ idReg.type() == MIRType::Symbol ||
+ idReg.type() == MIRType::Value);
+
+ Register payloadReg;
+ if (idReg.type() == MIRType::Value) {
+ ValueOperand val = idReg.valueReg();
+ if (JSID_IS_SYMBOL(id)) {
+ masm.branchTestSymbol(Assembler::NotEqual, val, failures);
+ } else {
+ MOZ_ASSERT(JSID_IS_STRING(id));
+ masm.branchTestString(Assembler::NotEqual, val, failures);
+ }
+ masm.unboxNonDouble(val, scratchReg);
+ payloadReg = scratchReg;
+ } else {
+ payloadReg = idReg.typedReg().gpr();
+ }
+
+ if (JSID_IS_SYMBOL(id)) {
+ // For symbols, we can just do a pointer comparison.
+ masm.branchPtr(Assembler::NotEqual, payloadReg, ImmGCPtr(JSID_TO_SYMBOL(id)), failures);
+ } else {
+ PropertyName* name = JSID_TO_ATOM(id)->asPropertyName();
+
+ Label equal;
+ masm.branchPtr(Assembler::Equal, payloadReg, ImmGCPtr(name), &equal);
+
+ // The pointers are not equal, so if the input string is also an atom it
+ // must be a different string.
+ masm.branchTest32(Assembler::NonZero, Address(payloadReg, JSString::offsetOfFlags()),
+ Imm32(JSString::ATOM_BIT), failures);
+
+ // Check the length.
+ masm.branch32(Assembler::NotEqual, Address(payloadReg, JSString::offsetOfLength()),
+ Imm32(name->length()), failures);
+
+ // We have a non-atomized string with the same length. For now call a helper
+ // function to do the comparison.
+ LiveRegisterSet volatileRegs(RegisterSet::Volatile());
+ masm.PushRegsInMask(volatileRegs);
+
+ if (!volatileRegs.has(objReg))
+ masm.push(objReg);
+
+ masm.setupUnalignedABICall(objReg);
+ masm.movePtr(ImmGCPtr(name), objReg);
+ masm.passABIArg(objReg);
+ masm.passABIArg(payloadReg);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, EqualStringsHelper));
+ masm.mov(ReturnReg, scratchReg);
+
+ if (!volatileRegs.has(objReg))
+ masm.pop(objReg);
+
+ LiveRegisterSet ignore;
+ ignore.add(scratchReg);
+ masm.PopRegsInMaskIgnore(volatileRegs, ignore);
+
+ masm.branchIfFalseBool(scratchReg, failures);
+ masm.bind(&equal);
+ }
+}
+
+void
+GetPropertyIC::emitIdGuard(MacroAssembler& masm, jsid id, Label* fail)
+{
+ if (this->id().constant())
+ return;
+
+ Register scratch = output().valueReg().scratchReg();
+ EmitIdGuard(masm, id, this->id().reg(), object(), scratch, fail);
+}
+
+void
+SetPropertyIC::emitIdGuard(MacroAssembler& masm, jsid id, Label* fail)
+{
+ if (this->id().constant())
+ return;
+
+ EmitIdGuard(masm, id, this->id().reg(), object(), temp(), fail);
+}
+
+bool
+GetPropertyIC::allowArrayLength(JSContext* cx) const
+{
+ if (!idempotent())
+ return true;
+
+ uint32_t locationIndex, numLocations;
+ getLocationInfo(&locationIndex, &numLocations);
+
+ IonScript* ion = GetTopJitJSScript(cx)->ionScript();
+ CacheLocation* locs = ion->getCacheLocs(locationIndex);
+ for (size_t i = 0; i < numLocations; i++) {
+ CacheLocation& curLoc = locs[i];
+ StackTypeSet* bcTypes = TypeScript::BytecodeTypes(curLoc.script, curLoc.pc);
+
+ if (!bcTypes->hasType(TypeSet::Int32Type()))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+GetPropertyIC::tryAttachNative(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, void* returnAddr, bool* emitted)
+{
+ MOZ_ASSERT(canAttachStub());
+ MOZ_ASSERT(!*emitted);
+ MOZ_ASSERT(outerScript->ionScript() == ion);
+
+ RootedShape shape(cx);
+ RootedNativeObject holder(cx);
+
+ NativeGetPropCacheability type =
+ CanAttachNativeGetProp(cx, *this, obj, id, &holder, &shape);
+ if (type == CanAttachNone)
+ return true;
+
+ *emitted = true;
+
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+
+ StubAttacher attacher(*this);
+ const char* attachKind;
+
+ JS::TrackedOutcome outcome = JS::TrackedOutcome::ICOptStub_GenericSuccess;
+
+ Label failures;
+ emitIdGuard(masm, id, &failures);
+ Label* maybeFailures = failures.used() ? &failures : nullptr;
+
+ switch (type) {
+ case CanAttachReadSlot:
+ GenerateReadSlot(cx, ion, masm, attacher, DontCheckTDZ, obj, holder,
+ shape, object(), output(), maybeFailures);
+ attachKind = idempotent() ? "idempotent reading"
+ : "non idempotent reading";
+ outcome = JS::TrackedOutcome::ICGetPropStub_ReadSlot;
+ break;
+ case CanAttachCallGetter:
+ if (!GenerateCallGetter(cx, ion, masm, attacher, obj, holder, shape,
+ liveRegs_, object(), output(), returnAddr, maybeFailures))
+ {
+ return false;
+ }
+ attachKind = "getter call";
+ outcome = JS::TrackedOutcome::ICGetPropStub_CallGetter;
+ break;
+ case CanAttachArrayLength:
+ if (!GenerateArrayLength(cx, masm, attacher, obj, object(), output(), &failures))
+ return false;
+
+ attachKind = "array length";
+ outcome = JS::TrackedOutcome::ICGetPropStub_ArrayLength;
+ break;
+ default:
+ MOZ_CRASH("Bad NativeGetPropCacheability");
+ }
+ return linkAndAttachStub(cx, masm, attacher, ion, attachKind, outcome);
+}
+
+bool
+GetPropertyIC::tryAttachUnboxed(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, void* returnAddr, bool* emitted)
+{
+ MOZ_ASSERT(canAttachStub());
+ MOZ_ASSERT(!*emitted);
+ MOZ_ASSERT(outerScript->ionScript() == ion);
+
+ if (!obj->is<UnboxedPlainObject>())
+ return true;
+ const UnboxedLayout::Property* property = obj->as<UnboxedPlainObject>().layout().lookup(id);
+ if (!property)
+ return true;
+
+ *emitted = true;
+
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+
+ Label failures;
+ emitIdGuard(masm, id, &failures);
+ Label* maybeFailures = failures.used() ? &failures : nullptr;
+
+ StubAttacher attacher(*this);
+ GenerateReadUnboxed(cx, ion, masm, attacher, obj, property, object(), output(), maybeFailures);
+ return linkAndAttachStub(cx, masm, attacher, ion, "read unboxed",
+ JS::TrackedOutcome::ICGetPropStub_UnboxedRead);
+}
+
+bool
+GetPropertyIC::tryAttachUnboxedExpando(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, void* returnAddr, bool* emitted)
+{
+ MOZ_ASSERT(canAttachStub());
+ MOZ_ASSERT(!*emitted);
+ MOZ_ASSERT(outerScript->ionScript() == ion);
+
+ if (!obj->is<UnboxedPlainObject>())
+ return true;
+ Rooted<UnboxedExpandoObject*> expando(cx, obj->as<UnboxedPlainObject>().maybeExpando());
+ if (!expando)
+ return true;
+
+ Shape* shape = expando->lookup(cx, id);
+ if (!shape || !shape->hasDefaultGetter() || !shape->hasSlot())
+ return true;
+
+ *emitted = true;
+
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+
+ Label failures;
+ emitIdGuard(masm, id, &failures);
+ Label* maybeFailures = failures.used() ? &failures : nullptr;
+
+ StubAttacher attacher(*this);
+ GenerateReadSlot(cx, ion, masm, attacher, DontCheckTDZ, obj, obj,
+ shape, object(), output(), maybeFailures);
+ return linkAndAttachStub(cx, masm, attacher, ion, "read unboxed expando",
+ JS::TrackedOutcome::ICGetPropStub_UnboxedReadExpando);
+}
+
+bool
+GetPropertyIC::tryAttachUnboxedArrayLength(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, void* returnAddr,
+ bool* emitted)
+{
+ MOZ_ASSERT(canAttachStub());
+ MOZ_ASSERT(!*emitted);
+ MOZ_ASSERT(outerScript->ionScript() == ion);
+
+ if (!obj->is<UnboxedArrayObject>())
+ return true;
+
+ if (!JSID_IS_ATOM(id, cx->names().length))
+ return true;
+
+ if (obj->as<UnboxedArrayObject>().length() > INT32_MAX)
+ return true;
+
+ if (!allowArrayLength(cx))
+ return true;
+
+ *emitted = true;
+
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+
+ Label failures;
+ emitIdGuard(masm, id, &failures);
+
+ StubAttacher attacher(*this);
+ GenerateUnboxedArrayLength(cx, masm, attacher, obj, object(), output(), &failures);
+ return linkAndAttachStub(cx, masm, attacher, ion, "unboxed array length",
+ JS::TrackedOutcome::ICGetPropStub_UnboxedArrayLength);
+}
+
+bool
+GetPropertyIC::tryAttachTypedArrayLength(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, bool* emitted)
+{
+ MOZ_ASSERT(canAttachStub());
+ MOZ_ASSERT(!*emitted);
+
+ if (!obj->is<TypedArrayObject>())
+ return true;
+
+ if (!JSID_IS_ATOM(id, cx->names().length))
+ return true;
+
+ if (hasTypedArrayLengthStub(obj))
+ return true;
+
+ if (output().type() != MIRType::Value && output().type() != MIRType::Int32) {
+ // The next execution should cause an invalidation because the type
+ // does not fit.
+ return true;
+ }
+
+ if (idempotent())
+ return true;
+
+ *emitted = true;
+
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+
+ Label failures;
+ emitIdGuard(masm, id, &failures);
+
+ GenerateTypedArrayLength(cx, masm, attacher, object(), output(), &failures);
+
+ setHasTypedArrayLengthStub(obj);
+ return linkAndAttachStub(cx, masm, attacher, ion, "typed array length",
+ JS::TrackedOutcome::ICGetPropStub_TypedArrayLength);
+}
+
+static void
+PushObjectOpResult(MacroAssembler& masm)
+{
+ static_assert(sizeof(ObjectOpResult) == sizeof(uintptr_t),
+ "ObjectOpResult size must match size reserved by masm.Push() here");
+ masm.Push(ImmWord(ObjectOpResult::Uninitialized));
+}
+
+static bool
+ProxyGetProperty(JSContext* cx, HandleObject proxy, HandleId id, MutableHandleValue vp)
+{
+ RootedValue receiver(cx, ObjectValue(*proxy));
+ return Proxy::get(cx, proxy, receiver, id, vp);
+}
+
+static bool
+EmitCallProxyGet(JSContext* cx, MacroAssembler& masm, IonCache::StubAttacher& attacher,
+ jsid id, LiveRegisterSet liveRegs, Register object, TypedOrValueRegister output,
+ jsbytecode* pc, void* returnAddr)
+{
+ MOZ_ASSERT(output.hasValue());
+ MacroAssembler::AfterICSaveLive aic = masm.icSaveLive(liveRegs);
+
+ // Remaining registers should be free, but we need to use |object| still
+ // so leave it alone.
+ AllocatableRegisterSet regSet(RegisterSet::All());
+ regSet.take(AnyRegister(object));
+
+ // ProxyGetProperty(JSContext* cx, HandleObject proxy, HandleId id,
+ // MutableHandleValue vp)
+ Register argJSContextReg = regSet.takeAnyGeneral();
+ Register argProxyReg = regSet.takeAnyGeneral();
+ Register argIdReg = regSet.takeAnyGeneral();
+ Register argVpReg = regSet.takeAnyGeneral();
+
+ Register scratch = regSet.takeAnyGeneral();
+
+ // Push stubCode for marking.
+ attacher.pushStubCodePointer(masm);
+
+ // Push args on stack first so we can take pointers to make handles.
+ masm.Push(UndefinedValue());
+ masm.moveStackPtrTo(argVpReg);
+
+ masm.Push(id, scratch);
+ masm.moveStackPtrTo(argIdReg);
+
+ // Push the proxy. Also used as receiver.
+ masm.Push(object);
+ masm.moveStackPtrTo(argProxyReg);
+
+ masm.loadJSContext(argJSContextReg);
+
+ if (!masm.icBuildOOLFakeExitFrame(returnAddr, aic))
+ return false;
+ masm.enterFakeExitFrame(IonOOLProxyExitFrameLayoutToken);
+
+ // Make the call.
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(argJSContextReg);
+ masm.passABIArg(argProxyReg);
+ masm.passABIArg(argIdReg);
+ masm.passABIArg(argVpReg);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ProxyGetProperty));
+
+ // Test for failure.
+ masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
+
+ // Load the outparam vp[0] into output register(s).
+ Address outparam(masm.getStackPointer(), IonOOLProxyExitFrameLayout::offsetOfResult());
+ masm.loadTypedOrValue(outparam, output);
+
+ // masm.leaveExitFrame & pop locals
+ masm.adjustStack(IonOOLProxyExitFrameLayout::Size());
+
+ masm.icRestoreLive(liveRegs, aic);
+ return true;
+}
+
+bool
+GetPropertyIC::tryAttachDOMProxyShadowed(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, void* returnAddr,
+ bool* emitted)
+{
+ MOZ_ASSERT(canAttachStub());
+ MOZ_ASSERT(!*emitted);
+ MOZ_ASSERT(IsCacheableDOMProxy(obj));
+ MOZ_ASSERT(monitoredResult());
+ MOZ_ASSERT(output().hasValue());
+
+ if (idempotent())
+ return true;
+
+ *emitted = true;
+
+ Label failures;
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+
+ emitIdGuard(masm, id, &failures);
+
+ // Guard on the shape of the object.
+ attacher.branchNextStubOrLabel(masm, Assembler::NotEqual,
+ Address(object(), ShapedObject::offsetOfShape()),
+ ImmGCPtr(obj->maybeShape()),
+ &failures);
+
+ // No need for more guards: we know this is a DOM proxy, since the shape
+ // guard enforces a given JSClass, so just go ahead and emit the call to
+ // ProxyGet.
+
+ if (!EmitCallProxyGet(cx, masm, attacher, id, liveRegs_, object(), output(),
+ pc(), returnAddr))
+ {
+ return false;
+ }
+
+ // Success.
+ attacher.jumpRejoin(masm);
+
+ // Failure.
+ masm.bind(&failures);
+ attacher.jumpNextStub(masm);
+
+ return linkAndAttachStub(cx, masm, attacher, ion, "list base shadowed get",
+ JS::TrackedOutcome::ICGetPropStub_DOMProxyShadowed);
+}
+
+bool
+GetPropertyIC::tryAttachDOMProxyUnshadowed(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, bool resetNeeded,
+ void* returnAddr, bool* emitted)
+{
+ MOZ_ASSERT(canAttachStub());
+ MOZ_ASSERT(!*emitted);
+ MOZ_ASSERT(IsCacheableDOMProxy(obj));
+ MOZ_ASSERT(monitoredResult());
+ MOZ_ASSERT(output().hasValue());
+
+ RootedObject checkObj(cx, obj->staticPrototype());
+ RootedNativeObject holder(cx);
+ RootedShape shape(cx);
+
+ NativeGetPropCacheability canCache =
+ CanAttachNativeGetProp(cx, *this, checkObj, id, &holder, &shape,
+ /* skipArrayLen = */true);
+ MOZ_ASSERT(canCache != CanAttachArrayLength);
+
+ if (canCache == CanAttachNone)
+ return true;
+
+ // Make sure we observe our invariants if we're gonna deoptimize.
+ if (!holder && idempotent())
+ return true;
+
+ *emitted = true;
+
+ if (resetNeeded) {
+ // If we know that we have a DoesntShadowUnique object, then
+ // we reset the cache to clear out an existing IC for the object
+ // (if there is one). The generation is a constant in the generated
+ // code and we will not have the same generation again for this
+ // object, so the generation check in the existing IC would always
+ // fail anyway.
+ reset(Reprotect);
+ }
+
+ Label failures;
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+
+ emitIdGuard(masm, id, &failures);
+
+ // Guard on the shape of the object.
+ attacher.branchNextStubOrLabel(masm, Assembler::NotEqual,
+ Address(object(), ShapedObject::offsetOfShape()),
+ ImmGCPtr(obj->maybeShape()),
+ &failures);
+
+ // Guard that our expando object hasn't started shadowing this property.
+ CheckDOMProxyExpandoDoesNotShadow(cx, masm, obj, id, object(), &failures);
+
+ if (holder) {
+ // Found the property on the prototype chain. Treat it like a native
+ // getprop.
+ Register scratchReg = output().valueReg().scratchReg();
+ GeneratePrototypeGuards(cx, ion, masm, obj, holder, object(), scratchReg, &failures);
+
+ // Rename scratch for clarity.
+ Register holderReg = scratchReg;
+
+ // Guard on the holder of the property
+ masm.movePtr(ImmGCPtr(holder), holderReg);
+ masm.branchPtr(Assembler::NotEqual,
+ Address(holderReg, ShapedObject::offsetOfShape()),
+ ImmGCPtr(holder->lastProperty()),
+ &failures);
+
+ if (canCache == CanAttachReadSlot) {
+ EmitLoadSlot(masm, holder, shape, holderReg, output(), scratchReg);
+ } else {
+ // EmitGetterCall() expects |obj| to be the object the property is
+ // on to do some checks. Since we actually looked at checkObj, and
+ // no extra guards will be generated, we can just pass that instead.
+ // The holderIsReceiver check needs to use |obj| though.
+ MOZ_ASSERT(canCache == CanAttachCallGetter);
+ MOZ_ASSERT(!idempotent());
+ bool holderIsReceiver = (obj == holder);
+ if (!EmitGetterCall(cx, masm, attacher, checkObj, holder, shape, holderIsReceiver,
+ liveRegs_, object(), output(), returnAddr))
+ {
+ return false;
+ }
+ }
+ } else {
+ // Property was not found on the prototype chain. Deoptimize down to
+ // proxy get call
+ MOZ_ASSERT(!idempotent());
+ if (!EmitCallProxyGet(cx, masm, attacher, id, liveRegs_, object(), output(),
+ pc(), returnAddr))
+ {
+ return false;
+ }
+ }
+
+ attacher.jumpRejoin(masm);
+ masm.bind(&failures);
+ attacher.jumpNextStub(masm);
+
+ return linkAndAttachStub(cx, masm, attacher, ion, "unshadowed proxy get",
+ JS::TrackedOutcome::ICGetPropStub_DOMProxyUnshadowed);
+}
+
+bool
+GetPropertyIC::tryAttachProxy(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, void* returnAddr, bool* emitted)
+{
+ MOZ_ASSERT(canAttachStub());
+ MOZ_ASSERT(!*emitted);
+
+ if (!obj->is<ProxyObject>())
+ return true;
+
+ // TI can't be sure about our properties, so make sure anything
+ // we return can be monitored directly.
+ if (!monitoredResult())
+ return true;
+
+ // Skim off DOM proxies.
+ if (IsCacheableDOMProxy(obj)) {
+ DOMProxyShadowsResult shadows = GetDOMProxyShadowsCheck()(cx, obj, id);
+ if (shadows == ShadowCheckFailed)
+ return false;
+ if (DOMProxyIsShadowing(shadows))
+ return tryAttachDOMProxyShadowed(cx, outerScript, ion, obj, id, returnAddr, emitted);
+
+ MOZ_ASSERT(shadows == DoesntShadow || shadows == DoesntShadowUnique);
+ return tryAttachDOMProxyUnshadowed(cx, outerScript, ion, obj, id,
+ shadows == DoesntShadowUnique, returnAddr, emitted);
+ }
+
+ return tryAttachGenericProxy(cx, outerScript, ion, obj, id, returnAddr, emitted);
+}
+
+bool
+GetPropertyIC::tryAttachGenericProxy(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, void* returnAddr,
+ bool* emitted)
+{
+ MOZ_ASSERT(canAttachStub());
+ MOZ_ASSERT(!*emitted);
+ MOZ_ASSERT(obj->is<ProxyObject>());
+ MOZ_ASSERT(monitoredResult());
+ MOZ_ASSERT(output().hasValue());
+
+ if (hasGenericProxyStub())
+ return true;
+
+ if (idempotent())
+ return true;
+
+ *emitted = true;
+
+ Label failures;
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+
+ emitIdGuard(masm, id, &failures);
+
+ Register scratchReg = output().valueReg().scratchReg();
+
+ masm.branchTestObjectIsProxy(false, object(), scratchReg, &failures);
+
+ // Ensure that the incoming object is not a DOM proxy, so that we can get to
+ // the specialized stubs
+ masm.branchTestProxyHandlerFamily(Assembler::Equal, object(), scratchReg,
+ GetDOMProxyHandlerFamily(), &failures);
+
+ if (!EmitCallProxyGet(cx, masm, attacher, id, liveRegs_, object(), output(),
+ pc(), returnAddr))
+ {
+ return false;
+ }
+
+ attacher.jumpRejoin(masm);
+
+ masm.bind(&failures);
+ attacher.jumpNextStub(masm);
+
+ MOZ_ASSERT(!hasGenericProxyStub_);
+ hasGenericProxyStub_ = true;
+
+ return linkAndAttachStub(cx, masm, attacher, ion, "Generic Proxy get",
+ JS::TrackedOutcome::ICGetPropStub_GenericProxy);
+}
+
+bool
+GetPropertyIC::tryAttachArgumentsLength(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, bool* emitted)
+{
+ MOZ_ASSERT(canAttachStub());
+ MOZ_ASSERT(!*emitted);
+
+ if (!JSID_IS_ATOM(id, cx->names().length))
+ return true;
+ if (!IsOptimizableArgumentsObjectForLength(obj))
+ return true;
+
+ MIRType outputType = output().type();
+ if (!(outputType == MIRType::Value || outputType == MIRType::Int32))
+ return true;
+
+ if (hasArgumentsLengthStub(obj->is<MappedArgumentsObject>()))
+ return true;
+
+ *emitted = true;
+
+ MOZ_ASSERT(!idempotent());
+
+ Label failures;
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+
+ emitIdGuard(masm, id, &failures);
+
+ Register tmpReg;
+ if (output().hasValue()) {
+ tmpReg = output().valueReg().scratchReg();
+ } else {
+ MOZ_ASSERT(output().type() == MIRType::Int32);
+ tmpReg = output().typedReg().gpr();
+ }
+ MOZ_ASSERT(object() != tmpReg);
+
+ masm.branchTestObjClass(Assembler::NotEqual, object(), tmpReg, obj->getClass(), &failures);
+
+ // Get initial ArgsObj length value, test if length has been overridden.
+ masm.unboxInt32(Address(object(), ArgumentsObject::getInitialLengthSlotOffset()), tmpReg);
+ masm.branchTest32(Assembler::NonZero, tmpReg, Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT),
+ &failures);
+
+ masm.rshiftPtr(Imm32(ArgumentsObject::PACKED_BITS_COUNT), tmpReg);
+
+ // If output is Int32, result is already in right place, otherwise box it into output.
+ if (output().hasValue())
+ masm.tagValue(JSVAL_TYPE_INT32, tmpReg, output().valueReg());
+
+ // Success.
+ attacher.jumpRejoin(masm);
+
+ // Failure.
+ masm.bind(&failures);
+ attacher.jumpNextStub(masm);
+
+ if (obj->is<UnmappedArgumentsObject>()) {
+ MOZ_ASSERT(!hasUnmappedArgumentsLengthStub_);
+ hasUnmappedArgumentsLengthStub_ = true;
+ return linkAndAttachStub(cx, masm, attacher, ion, "ArgsObj length (unmapped)",
+ JS::TrackedOutcome::ICGetPropStub_ArgumentsLength);
+ }
+
+ MOZ_ASSERT(!hasMappedArgumentsLengthStub_);
+ hasMappedArgumentsLengthStub_ = true;
+ return linkAndAttachStub(cx, masm, attacher, ion, "ArgsObj length (mapped)",
+ JS::TrackedOutcome::ICGetPropStub_ArgumentsLength);
+}
+
+static void
+GenerateReadModuleNamespace(JSContext* cx, IonScript* ion, MacroAssembler& masm,
+ IonCache::StubAttacher& attacher, ModuleNamespaceObject* ns,
+ ModuleEnvironmentObject* env, Shape* shape, Register object,
+ TypedOrValueRegister output, Label* failures)
+{
+ MOZ_ASSERT(ns);
+ MOZ_ASSERT(env);
+
+ // If we have multiple failure jumps but didn't get a label from the
+ // outside, make one ourselves.
+ Label failures_;
+ if (!failures)
+ failures = &failures_;
+
+ // Check for the specific namespace object.
+ attacher.branchNextStubOrLabel(masm, Assembler::NotEqual, object, ImmGCPtr(ns), failures);
+
+ // If we need a scratch register, use either an output register or the
+ // object register.
+ bool restoreScratch = false;
+ Register scratchReg = InvalidReg; // Quell compiler warning.
+
+ if (output.hasValue()) {
+ scratchReg = output.valueReg().scratchReg();
+ } else if (output.type() == MIRType::Double) {
+ masm.push(object);
+ scratchReg = object;
+ restoreScratch = true;
+ } else {
+ scratchReg = output.typedReg().gpr();
+ }
+
+ // Slot access.
+ Register envReg = scratchReg;
+ masm.movePtr(ImmGCPtr(env), envReg);
+ EmitLoadSlot(masm, &env->as<NativeObject>(), shape, envReg, output, scratchReg);
+
+ // Restore scratch on success.
+ if (restoreScratch)
+ masm.pop(object);
+
+ attacher.jumpRejoin(masm);
+
+ masm.bind(failures);
+ attacher.jumpNextStub(masm);
+}
+
+bool
+GetPropertyIC::tryAttachModuleNamespace(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, void* returnAddr,
+ bool* emitted)
+{
+ MOZ_ASSERT(canAttachStub());
+ MOZ_ASSERT(!*emitted);
+ MOZ_ASSERT(outerScript->ionScript() == ion);
+
+ if (!obj->is<ModuleNamespaceObject>())
+ return true;
+
+ Rooted<ModuleNamespaceObject*> ns(cx, &obj->as<ModuleNamespaceObject>());
+
+ RootedModuleEnvironmentObject env(cx);
+ RootedShape shape(cx);
+ if (!ns->bindings().lookup(id, env.address(), shape.address()))
+ return true;
+
+ // Don't emit a stub until the target binding has been initialized.
+ if (env->getSlot(shape->slot()).isMagic(JS_UNINITIALIZED_LEXICAL))
+ return true;
+
+ *emitted = true;
+
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+
+ StubAttacher attacher(*this);
+
+ Label failures;
+ emitIdGuard(masm, id, &failures);
+ Label* maybeFailures = failures.used() ? &failures : nullptr;
+
+ GenerateReadModuleNamespace(cx, ion, masm, attacher, ns, env,
+ shape, object(), output(), maybeFailures);
+ return linkAndAttachStub(cx, masm, attacher, ion, "module namespace",
+ JS::TrackedOutcome::ICGetPropStub_ReadSlot);
+}
+
+static bool
+ValueToNameOrSymbolId(JSContext* cx, HandleValue idval, MutableHandleId id, bool* nameOrSymbol)
+{
+ *nameOrSymbol = false;
+
+ if (!idval.isString() && !idval.isSymbol())
+ return true;
+
+ if (!ValueToId<CanGC>(cx, idval, id))
+ return false;
+
+ if (!JSID_IS_STRING(id) && !JSID_IS_SYMBOL(id)) {
+ id.set(JSID_VOID);
+ return true;
+ }
+
+ uint32_t dummy;
+ if (JSID_IS_STRING(id) && JSID_TO_ATOM(id)->isIndex(&dummy)) {
+ id.set(JSID_VOID);
+ return true;
+ }
+
+ *nameOrSymbol = true;
+ return true;
+}
+
+bool
+GetPropertyIC::tryAttachStub(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleValue idval, bool* emitted)
+{
+ MOZ_ASSERT(!*emitted);
+
+ if (!canAttachStub())
+ return true;
+
+ RootedId id(cx);
+ bool nameOrSymbol;
+ if (!ValueToNameOrSymbolId(cx, idval, &id, &nameOrSymbol))
+ return false;
+
+ if (nameOrSymbol) {
+ if (!*emitted && !tryAttachArgumentsLength(cx, outerScript, ion, obj, id, emitted))
+ return false;
+
+ void* returnAddr = GetReturnAddressToIonCode(cx);
+
+ if (!*emitted && !tryAttachModuleNamespace(cx, outerScript, ion, obj, id, returnAddr, emitted))
+ return false;
+
+ if (!*emitted && !tryAttachProxy(cx, outerScript, ion, obj, id, returnAddr, emitted))
+ return false;
+
+ if (!*emitted && !tryAttachNative(cx, outerScript, ion, obj, id, returnAddr, emitted))
+ return false;
+
+ if (!*emitted && !tryAttachUnboxed(cx, outerScript, ion, obj, id, returnAddr, emitted))
+ return false;
+
+ if (!*emitted && !tryAttachUnboxedExpando(cx, outerScript, ion, obj, id, returnAddr, emitted))
+ return false;
+
+ if (!*emitted && !tryAttachUnboxedArrayLength(cx, outerScript, ion, obj, id, returnAddr, emitted))
+ return false;
+
+ if (!*emitted && !tryAttachTypedArrayLength(cx, outerScript, ion, obj, id, emitted))
+ return false;
+ }
+
+ if (idval.isInt32()) {
+ if (!*emitted && !tryAttachArgumentsElement(cx, outerScript, ion, obj, idval, emitted))
+ return false;
+ if (!*emitted && !tryAttachDenseElement(cx, outerScript, ion, obj, idval, emitted))
+ return false;
+ if (!*emitted && !tryAttachDenseElementHole(cx, outerScript, ion, obj, idval, emitted))
+ return false;
+ }
+
+ if (idval.isInt32() || idval.isString()) {
+ if (!*emitted && !tryAttachTypedOrUnboxedArrayElement(cx, outerScript, ion, obj, idval, emitted))
+ return false;
+ }
+
+ if (!*emitted)
+ JitSpew(JitSpew_IonIC, "Failed to attach GETPROP cache");
+
+ return true;
+}
+
+/* static */ bool
+GetPropertyIC::update(JSContext* cx, HandleScript outerScript, size_t cacheIndex,
+ HandleObject obj, HandleValue idval, MutableHandleValue vp)
+{
+ IonScript* ion = outerScript->ionScript();
+
+ GetPropertyIC& cache = ion->getCache(cacheIndex).toGetProperty();
+
+ // Override the return value if we are invalidated (bug 728188).
+ AutoDetectInvalidation adi(cx, vp, ion);
+
+ // If the cache is idempotent, we will redo the op in the interpreter.
+ if (cache.idempotent())
+ adi.disable();
+
+ // For now, just stop generating new stubs once we hit the stub count
+ // limit. Once we can make calls from within generated stubs, a new call
+ // stub will be generated instead and the previous stubs unlinked.
+ bool emitted = false;
+ if (!cache.isDisabled()) {
+ if (!cache.tryAttachStub(cx, outerScript, ion, obj, idval, &emitted))
+ return false;
+ cache.maybeDisable(emitted);
+ }
+
+ if (cache.idempotent() && !emitted) {
+ // Invalidate the cache if the property was not found, or was found on
+ // a non-native object. This ensures:
+ // 1) The property read has no observable side-effects.
+ // 2) There's no need to dynamically monitor the return type. This would
+ // be complicated since (due to GVN) there can be multiple pc's
+ // associated with a single idempotent cache.
+ JitSpew(JitSpew_IonIC, "Invalidating from idempotent cache %s:%" PRIuSIZE,
+ outerScript->filename(), outerScript->lineno());
+
+ outerScript->setInvalidatedIdempotentCache();
+
+ // Do not re-invalidate if the lookup already caused invalidation.
+ if (outerScript->hasIonScript())
+ Invalidate(cx, outerScript);
+
+ return true;
+ }
+
+ jsbytecode* pc = cache.idempotent() ? nullptr : cache.pc();
+
+ if (!pc || *pc == JSOP_GETPROP || *pc == JSOP_CALLPROP || *pc == JSOP_LENGTH) {
+ if (!GetProperty(cx, obj, obj, idval.toString()->asAtom().asPropertyName(), vp))
+ return false;
+ } else {
+ MOZ_ASSERT(*pc == JSOP_GETELEM || *pc == JSOP_CALLELEM);
+ if (!GetObjectElementOperation(cx, JSOp(*pc), obj, obj, idval, vp))
+ return false;
+ }
+
+ if (!cache.idempotent()) {
+ RootedScript script(cx);
+ jsbytecode* pc;
+ cache.getScriptedLocation(&script, &pc);
+
+ // Monitor changes to cache entry.
+ if (!cache.monitoredResult())
+ TypeScript::Monitor(cx, script, pc, vp);
+ }
+
+ return true;
+}
+
+void
+GetPropertyIC::reset(ReprotectCode reprotect)
+{
+ IonCache::reset(reprotect);
+ hasTypedArrayLengthStub_ = false;
+ hasMappedArgumentsLengthStub_ = false;
+ hasUnmappedArgumentsLengthStub_ = false;
+ hasMappedArgumentsElementStub_ = false;
+ hasUnmappedArgumentsElementStub_ = false;
+ hasGenericProxyStub_ = false;
+ hasDenseStub_ = false;
+}
+
+void
+IonCache::disable()
+{
+ reset(Reprotect);
+ this->disabled_ = 1;
+}
+
+void
+GetPropertyIC::maybeDisable(bool emitted)
+{
+ if (emitted) {
+ failedUpdates_ = 0;
+ return;
+ }
+
+ if (!canAttachStub() && id().constant()) {
+ // Don't disable the cache (and discard stubs) if we have a GETPROP and
+ // attached the maximum number of stubs. This can happen when JS code
+ // uses an AST-like data structure and accesses a field of a "base
+ // class", like node.nodeType. This should be temporary until we handle
+ // this case better, see bug 1107515.
+ return;
+ }
+
+ if (++failedUpdates_ > MAX_FAILED_UPDATES) {
+ JitSpew(JitSpew_IonIC, "Disable inline cache");
+ disable();
+ }
+}
+
+void
+IonCache::reset(ReprotectCode reprotect)
+{
+ this->stubCount_ = 0;
+ PatchJump(initialJump_, fallbackLabel_, reprotect);
+ lastJump_ = initialJump_;
+}
+
+// Jump to failure if a value being written is not a property for obj/id.
+static void
+CheckTypeSetForWrite(MacroAssembler& masm, JSObject* obj, jsid id,
+ Register scratch, const ConstantOrRegister& value, Label* failure)
+{
+ TypedOrValueRegister valReg = value.reg();
+ ObjectGroup* group = obj->group();
+ MOZ_ASSERT(!group->unknownProperties());
+
+ HeapTypeSet* propTypes = group->maybeGetProperty(id);
+ MOZ_ASSERT(propTypes);
+
+ // guardTypeSet can read from type sets without triggering read barriers.
+ TypeSet::readBarrier(propTypes);
+
+ masm.guardTypeSet(valReg, propTypes, BarrierKind::TypeSet, scratch, failure);
+}
+
+static void
+GenerateSetSlot(JSContext* cx, MacroAssembler& masm, IonCache::StubAttacher& attacher,
+ JSObject* obj, Shape* shape, Register object, Register tempReg,
+ const ConstantOrRegister& value, bool needsTypeBarrier, bool checkTypeset,
+ Label* failures)
+{
+ TestMatchingReceiver(masm, attacher, object, obj, failures, needsTypeBarrier);
+
+ // Guard that the incoming value is in the type set for the property
+ // if a type barrier is required.
+ if (checkTypeset) {
+ MOZ_ASSERT(needsTypeBarrier);
+ CheckTypeSetForWrite(masm, obj, shape->propid(), tempReg, value, failures);
+ }
+
+ NativeObject::slotsSizeMustNotOverflow();
+
+ if (obj->is<UnboxedPlainObject>()) {
+ obj = obj->as<UnboxedPlainObject>().maybeExpando();
+ masm.loadPtr(Address(object, UnboxedPlainObject::offsetOfExpando()), tempReg);
+ object = tempReg;
+ }
+
+ if (obj->as<NativeObject>().isFixedSlot(shape->slot())) {
+ Address addr(object, NativeObject::getFixedSlotOffset(shape->slot()));
+
+ if (cx->zone()->needsIncrementalBarrier())
+ masm.callPreBarrier(addr, MIRType::Value);
+
+ masm.storeConstantOrRegister(value, addr);
+ } else {
+ masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), tempReg);
+
+ Address addr(tempReg, obj->as<NativeObject>().dynamicSlotIndex(shape->slot()) * sizeof(Value));
+
+ if (cx->zone()->needsIncrementalBarrier())
+ masm.callPreBarrier(addr, MIRType::Value);
+
+ masm.storeConstantOrRegister(value, addr);
+ }
+
+ attacher.jumpRejoin(masm);
+
+ masm.bind(failures);
+ attacher.jumpNextStub(masm);
+}
+
+bool
+SetPropertyIC::attachSetSlot(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleShape shape, bool checkTypeset)
+{
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+
+ Label failures;
+ emitIdGuard(masm, shape->propid(), &failures);
+
+ GenerateSetSlot(cx, masm, attacher, obj, shape, object(), temp(), value(), needsTypeBarrier(),
+ checkTypeset, &failures);
+
+ return linkAndAttachStub(cx, masm, attacher, ion, "setting",
+ JS::TrackedOutcome::ICSetPropStub_Slot);
+}
+
+static bool
+IsCacheableSetPropCallNative(HandleObject obj, HandleObject holder, HandleShape shape)
+{
+ if (!shape || !IsCacheableProtoChainForIonOrCacheIR(obj, holder))
+ return false;
+
+ if (!shape->hasSetterValue())
+ return false;
+
+ if (!shape->setterObject() || !shape->setterObject()->is<JSFunction>())
+ return false;
+
+ JSFunction& setter = shape->setterObject()->as<JSFunction>();
+ if (!setter.isNative())
+ return false;
+
+ if (setter.jitInfo() && !setter.jitInfo()->needsOuterizedThisObject())
+ return true;
+
+ return !IsWindow(obj);
+}
+
+static bool
+IsCacheableSetPropCallScripted(HandleObject obj, HandleObject holder, HandleShape shape)
+{
+ if (!shape || !IsCacheableProtoChainForIonOrCacheIR(obj, holder))
+ return false;
+
+ if (IsWindow(obj))
+ return false;
+
+ return shape->hasSetterValue() && shape->setterObject() &&
+ shape->setterObject()->is<JSFunction>() &&
+ shape->setterObject()->as<JSFunction>().hasJITCode();
+}
+
+static bool
+IsCacheableSetPropCallPropertyOp(HandleObject obj, HandleObject holder, HandleShape shape)
+{
+ if (!shape)
+ return false;
+
+ if (!IsCacheableProtoChainForIonOrCacheIR(obj, holder))
+ return false;
+
+ if (shape->hasSlot())
+ return false;
+
+ if (shape->hasDefaultSetter())
+ return false;
+
+ if (shape->hasSetterValue())
+ return false;
+
+ // Despite the vehement claims of Shape.h that writable() is only relevant
+ // for data descriptors, some SetterOps care desperately about its
+ // value. The flag should be always true, apart from these rare instances.
+ if (!shape->writable())
+ return false;
+
+ return true;
+}
+
+static bool
+ReportStrictErrorOrWarning(JSContext* cx, JS::HandleObject obj, JS::HandleId id, bool strict,
+ JS::ObjectOpResult& result)
+{
+ return result.reportStrictErrorOrWarning(cx, obj, id, strict);
+}
+
+template <class FrameLayout>
+void
+EmitObjectOpResultCheck(MacroAssembler& masm, Label* failure, bool strict,
+ Register scratchReg,
+ Register argJSContextReg,
+ Register argObjReg,
+ Register argIdReg,
+ Register argStrictReg,
+ Register argResultReg)
+{
+ // if (!result) {
+ Label noStrictError;
+ masm.branch32(Assembler::Equal,
+ Address(masm.getStackPointer(),
+ FrameLayout::offsetOfObjectOpResult()),
+ Imm32(ObjectOpResult::OkCode),
+ &noStrictError);
+
+ // if (!ReportStrictErrorOrWarning(cx, obj, id, strict, &result))
+ // goto failure;
+ masm.loadJSContext(argJSContextReg);
+ masm.computeEffectiveAddress(
+ Address(masm.getStackPointer(), FrameLayout::offsetOfObject()),
+ argObjReg);
+ masm.computeEffectiveAddress(
+ Address(masm.getStackPointer(), FrameLayout::offsetOfId()),
+ argIdReg);
+ masm.move32(Imm32(strict), argStrictReg);
+ masm.computeEffectiveAddress(
+ Address(masm.getStackPointer(), FrameLayout::offsetOfObjectOpResult()),
+ argResultReg);
+ masm.setupUnalignedABICall(scratchReg);
+ masm.passABIArg(argJSContextReg);
+ masm.passABIArg(argObjReg);
+ masm.passABIArg(argIdReg);
+ masm.passABIArg(argStrictReg);
+ masm.passABIArg(argResultReg);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ReportStrictErrorOrWarning));
+ masm.branchIfFalseBool(ReturnReg, failure);
+
+ // }
+ masm.bind(&noStrictError);
+}
+
+static bool
+ProxySetProperty(JSContext* cx, HandleObject proxy, HandleId id, HandleValue v, bool strict)
+{
+ RootedValue receiver(cx, ObjectValue(*proxy));
+ ObjectOpResult result;
+ return Proxy::set(cx, proxy, id, v, receiver, result)
+ && result.checkStrictErrorOrWarning(cx, proxy, id, strict);
+}
+
+static bool
+EmitCallProxySet(JSContext* cx, MacroAssembler& masm, IonCache::StubAttacher& attacher,
+ HandleId propId, LiveRegisterSet liveRegs, Register object,
+ const ConstantOrRegister& value, void* returnAddr, bool strict)
+{
+ MacroAssembler::AfterICSaveLive aic = masm.icSaveLive(liveRegs);
+
+ // Remaining registers should be free, but we still need to use |object| so
+ // leave it alone.
+ //
+ // WARNING: We do not take() the register used by |value|, if any, so
+ // regSet is going to re-allocate it. Hence the emitted code must not touch
+ // any of the registers allocated from regSet until after the last use of
+ // |value|. (We can't afford to take it, either, because x86.)
+ AllocatableRegisterSet regSet(RegisterSet::All());
+ regSet.take(AnyRegister(object));
+
+ // ProxySetProperty(JSContext* cx, HandleObject proxy, HandleId id, HandleValue v,
+ // bool strict);
+ Register argJSContextReg = regSet.takeAnyGeneral();
+ Register argProxyReg = regSet.takeAnyGeneral();
+ Register argIdReg = regSet.takeAnyGeneral();
+ Register argValueReg = regSet.takeAnyGeneral();
+ Register argStrictReg = regSet.takeAnyGeneral();
+
+ Register scratch = regSet.takeAnyGeneral();
+
+ // Push stubCode for marking.
+ attacher.pushStubCodePointer(masm);
+
+ // Push args on stack so we can take pointers to make handles.
+ // Push value before touching any other registers (see WARNING above).
+ masm.Push(value);
+ masm.moveStackPtrTo(argValueReg);
+
+ masm.move32(Imm32(strict), argStrictReg);
+
+ masm.Push(propId, scratch);
+ masm.moveStackPtrTo(argIdReg);
+
+ // Push object.
+ masm.Push(object);
+ masm.moveStackPtrTo(argProxyReg);
+
+ masm.loadJSContext(argJSContextReg);
+
+ if (!masm.icBuildOOLFakeExitFrame(returnAddr, aic))
+ return false;
+ masm.enterFakeExitFrame(IonOOLProxyExitFrameLayoutToken);
+
+ // Make the call.
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(argJSContextReg);
+ masm.passABIArg(argProxyReg);
+ masm.passABIArg(argIdReg);
+ masm.passABIArg(argValueReg);
+ masm.passABIArg(argStrictReg);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ProxySetProperty));
+
+ // Test for error.
+ masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
+
+ // masm.leaveExitFrame & pop locals
+ masm.adjustStack(IonOOLProxyExitFrameLayout::Size());
+
+ masm.icRestoreLive(liveRegs, aic);
+ return true;
+}
+
+bool
+SetPropertyIC::attachGenericProxy(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleId id, void* returnAddr)
+{
+ MOZ_ASSERT(!hasGenericProxyStub());
+
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+
+ Label failures;
+ emitIdGuard(masm, id, &failures);
+ {
+ masm.branchTestObjectIsProxy(false, object(), temp(), &failures);
+
+ // Remove the DOM proxies. They'll take care of themselves so this stub doesn't
+ // catch too much. The failure case is actually Equal. Fall through to the failure code.
+ masm.branchTestProxyHandlerFamily(Assembler::Equal, object(), temp(),
+ GetDOMProxyHandlerFamily(), &failures);
+ }
+
+ if (!EmitCallProxySet(cx, masm, attacher, id, liveRegs_, object(), value(),
+ returnAddr, strict()))
+ {
+ return false;
+ }
+
+ attacher.jumpRejoin(masm);
+
+ masm.bind(&failures);
+ attacher.jumpNextStub(masm);
+
+ MOZ_ASSERT(!hasGenericProxyStub_);
+ hasGenericProxyStub_ = true;
+
+ return linkAndAttachStub(cx, masm, attacher, ion, "generic proxy set",
+ JS::TrackedOutcome::ICSetPropStub_GenericProxy);
+}
+
+bool
+SetPropertyIC::attachDOMProxyShadowed(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, void* returnAddr)
+{
+ MOZ_ASSERT(IsCacheableDOMProxy(obj));
+
+ Label failures;
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+
+ emitIdGuard(masm, id, &failures);
+
+ // Guard on the shape of the object.
+ masm.branchPtr(Assembler::NotEqual,
+ Address(object(), ShapedObject::offsetOfShape()),
+ ImmGCPtr(obj->maybeShape()), &failures);
+
+ // No need for more guards: we know this is a DOM proxy, since the shape
+ // guard enforces a given JSClass, so just go ahead and emit the call to
+ // ProxySet.
+
+ if (!EmitCallProxySet(cx, masm, attacher, id, liveRegs_, object(),
+ value(), returnAddr, strict()))
+ {
+ return false;
+ }
+
+ // Success.
+ attacher.jumpRejoin(masm);
+
+ // Failure.
+ masm.bind(&failures);
+ attacher.jumpNextStub(masm);
+
+ return linkAndAttachStub(cx, masm, attacher, ion, "DOM proxy shadowed set",
+ JS::TrackedOutcome::ICSetPropStub_DOMProxyShadowed);
+}
+
+static bool
+GenerateCallSetter(JSContext* cx, IonScript* ion, MacroAssembler& masm,
+ IonCache::StubAttacher& attacher, HandleObject obj, HandleObject holder,
+ HandleShape shape, bool strict, Register object, Register tempReg,
+ const ConstantOrRegister& value, Label* failure, LiveRegisterSet liveRegs,
+ void* returnAddr)
+{
+ // Generate prototype guards if needed.
+ {
+ // Generate prototype/shape guards.
+ if (obj != holder)
+ GeneratePrototypeGuards(cx, ion, masm, obj, holder, object, tempReg, failure);
+
+ masm.movePtr(ImmGCPtr(holder), tempReg);
+ masm.branchPtr(Assembler::NotEqual,
+ Address(tempReg, ShapedObject::offsetOfShape()),
+ ImmGCPtr(holder->as<NativeObject>().lastProperty()),
+ failure);
+ }
+
+ // Good to go for invoking setter.
+
+ MacroAssembler::AfterICSaveLive aic = masm.icSaveLive(liveRegs);
+
+ // Remaining registers should basically be free, but we need to use |object| still
+ // so leave it alone. And of course we need our value, if it's not a constant.
+ AllocatableRegisterSet regSet(RegisterSet::All());
+ if (!value.constant())
+ regSet.take(value.reg());
+ bool valueAliasesObject = !regSet.has(object);
+ if (!valueAliasesObject)
+ regSet.take(object);
+
+ regSet.take(tempReg);
+
+ // This is a slower stub path, and we're going to be doing a call anyway. Don't need
+ // to try so hard to not use the stack. Scratch regs are just taken from the register
+ // set not including the input, current value saved on the stack, and restored when
+ // we're done with it.
+ //
+ // Be very careful not to use any of these before value is pushed, since they
+ // might shadow.
+
+ if (IsCacheableSetPropCallNative(obj, holder, shape)) {
+ Register argJSContextReg = regSet.takeAnyGeneral();
+ Register argVpReg = regSet.takeAnyGeneral();
+
+ MOZ_ASSERT(shape->hasSetterValue() && shape->setterObject() &&
+ shape->setterObject()->is<JSFunction>());
+ JSFunction* target = &shape->setterObject()->as<JSFunction>();
+
+ MOZ_ASSERT(target->isNative());
+
+ Register argUintNReg = regSet.takeAnyGeneral();
+
+ // Set up the call:
+ // bool (*)(JSContext*, unsigned, Value* vp)
+ // vp[0] is callee/outparam
+ // vp[1] is |this|
+ // vp[2] is the value
+
+ // Build vp and move the base into argVpReg.
+ masm.Push(value);
+ masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
+ masm.Push(ObjectValue(*target));
+ masm.moveStackPtrTo(argVpReg);
+
+ // Preload other regs
+ masm.loadJSContext(argJSContextReg);
+ masm.move32(Imm32(1), argUintNReg);
+
+ // Push data for GC marking
+ masm.Push(argUintNReg);
+ attacher.pushStubCodePointer(masm);
+
+ if (!masm.icBuildOOLFakeExitFrame(returnAddr, aic))
+ return false;
+ masm.enterFakeExitFrame(IonOOLNativeExitFrameLayoutToken);
+
+ // Make the call
+ masm.setupUnalignedABICall(tempReg);
+ masm.passABIArg(argJSContextReg);
+ masm.passABIArg(argUintNReg);
+ masm.passABIArg(argVpReg);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, target->native()));
+
+ // Test for failure.
+ masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
+
+ // masm.leaveExitFrame & pop locals.
+ masm.adjustStack(IonOOLNativeExitFrameLayout::Size(1));
+ } else if (IsCacheableSetPropCallPropertyOp(obj, holder, shape)) {
+ // We can't take all our registers up front, because on x86 we need 2
+ // for the value, one for scratch, 5 for the arguments, which makes 8,
+ // but we only have 7 to work with. So only grab the ones we need
+ // before we push value and release its reg back into the set.
+ Register argResultReg = regSet.takeAnyGeneral();
+
+ SetterOp target = shape->setterOp();
+ MOZ_ASSERT(target);
+
+ // JSSetterOp: bool fn(JSContext* cx, HandleObject obj,
+ // HandleId id, HandleValue value, ObjectOpResult& result);
+
+ // First, allocate an ObjectOpResult on the stack. We push this before
+ // the stubCode pointer in order to match the layout of
+ // IonOOLSetterOpExitFrameLayout.
+ PushObjectOpResult(masm);
+ masm.moveStackPtrTo(argResultReg);
+
+ attacher.pushStubCodePointer(masm);
+
+ // Push args on stack so we can take pointers to make handles.
+ if (value.constant()) {
+ masm.Push(value.value());
+ } else {
+ masm.Push(value.reg());
+ if (!valueAliasesObject)
+ regSet.add(value.reg());
+ }
+
+ // OK, now we can grab our remaining registers and grab the pointer to
+ // what we just pushed into one of them.
+ Register argJSContextReg = regSet.takeAnyGeneral();
+ Register argValueReg = regSet.takeAnyGeneral();
+ // We can just reuse the "object" register for argObjReg
+ Register argObjReg = object;
+ Register argIdReg = regSet.takeAnyGeneral();
+ masm.moveStackPtrTo(argValueReg);
+
+ // push canonical jsid from shape instead of propertyname.
+ masm.Push(shape->propid(), argIdReg);
+ masm.moveStackPtrTo(argIdReg);
+
+ masm.Push(object);
+ masm.moveStackPtrTo(argObjReg);
+
+ masm.loadJSContext(argJSContextReg);
+
+ if (!masm.icBuildOOLFakeExitFrame(returnAddr, aic))
+ return false;
+ masm.enterFakeExitFrame(IonOOLSetterOpExitFrameLayoutToken);
+
+ // Make the call.
+ masm.setupUnalignedABICall(tempReg);
+ masm.passABIArg(argJSContextReg);
+ masm.passABIArg(argObjReg);
+ masm.passABIArg(argIdReg);
+ masm.passABIArg(argValueReg);
+ masm.passABIArg(argResultReg);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, target));
+
+ // Test for error.
+ masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
+
+ // Test for strict failure. We emit the check even in non-strict mode
+ // in order to pick up the warning if extraWarnings is enabled.
+ EmitObjectOpResultCheck<IonOOLSetterOpExitFrameLayout>(masm, masm.exceptionLabel(),
+ strict, tempReg,
+ argJSContextReg, argObjReg,
+ argIdReg, argValueReg,
+ argResultReg);
+
+ // masm.leaveExitFrame & pop locals.
+ masm.adjustStack(IonOOLSetterOpExitFrameLayout::Size());
+ } else {
+ MOZ_ASSERT(IsCacheableSetPropCallScripted(obj, holder, shape));
+
+ JSFunction* target = &shape->setterValue().toObject().as<JSFunction>();
+ uint32_t framePushedBefore = masm.framePushed();
+
+ // Construct IonAccessorICFrameLayout.
+ uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS,
+ IonAccessorICFrameLayout::Size());
+ attacher.pushStubCodePointer(masm);
+ masm.Push(Imm32(descriptor));
+ masm.Push(ImmPtr(returnAddr));
+
+ // The JitFrameLayout pushed below will be aligned to JitStackAlignment,
+ // so we just have to make sure the stack is aligned after we push the
+ // |this| + argument Values.
+ uint32_t numArgs = Max(size_t(1), target->nargs());
+ uint32_t argSize = (numArgs + 1) * sizeof(Value);
+ uint32_t padding = ComputeByteAlignment(masm.framePushed() + argSize, JitStackAlignment);
+ MOZ_ASSERT(padding % sizeof(uintptr_t) == 0);
+ MOZ_ASSERT(padding < JitStackAlignment);
+ masm.reserveStack(padding);
+
+ for (size_t i = 1; i < target->nargs(); i++)
+ masm.Push(UndefinedValue());
+ masm.Push(value);
+ masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
+
+ masm.movePtr(ImmGCPtr(target), tempReg);
+
+ descriptor = MakeFrameDescriptor(argSize + padding, JitFrame_IonAccessorIC,
+ JitFrameLayout::Size());
+ masm.Push(Imm32(1)); // argc
+ masm.Push(tempReg);
+ masm.Push(Imm32(descriptor));
+
+ // Check stack alignment. Add sizeof(uintptr_t) for the return address.
+ MOZ_ASSERT(((masm.framePushed() + sizeof(uintptr_t)) % JitStackAlignment) == 0);
+
+ // The setter has JIT code now and we will only discard the setter's JIT
+ // code when discarding all JIT code in the Zone, so we can assume it'll
+ // still have JIT code.
+ MOZ_ASSERT(target->hasJITCode());
+ masm.loadPtr(Address(tempReg, JSFunction::offsetOfNativeOrScript()), tempReg);
+ masm.loadBaselineOrIonRaw(tempReg, tempReg, nullptr);
+ masm.callJit(tempReg);
+
+ masm.freeStack(masm.framePushed() - framePushedBefore);
+ }
+
+ masm.icRestoreLive(liveRegs, aic);
+ return true;
+}
+
+static bool
+IsCacheableDOMProxyUnshadowedSetterCall(JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandleObject holder, MutableHandleShape shape)
+{
+ MOZ_ASSERT(IsCacheableDOMProxy(obj));
+
+ RootedObject checkObj(cx, obj->staticPrototype());
+ if (!checkObj)
+ return false;
+
+ if (!LookupPropertyPure(cx, obj, id, holder.address(), shape.address()))
+ return false;
+
+ if (!holder)
+ return false;
+
+ return IsCacheableSetPropCallNative(checkObj, holder, shape) ||
+ IsCacheableSetPropCallPropertyOp(checkObj, holder, shape) ||
+ IsCacheableSetPropCallScripted(checkObj, holder, shape);
+}
+
+bool
+SetPropertyIC::attachDOMProxyUnshadowed(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, void* returnAddr)
+{
+ MOZ_ASSERT(IsCacheableDOMProxy(obj));
+
+ Label failures;
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+
+ emitIdGuard(masm, id, &failures);
+
+ // Guard on the shape of the object.
+ masm.branchPtr(Assembler::NotEqual,
+ Address(object(), ShapedObject::offsetOfShape()),
+ ImmGCPtr(obj->maybeShape()), &failures);
+
+ // Guard that our expando object hasn't started shadowing this property.
+ CheckDOMProxyExpandoDoesNotShadow(cx, masm, obj, id, object(), &failures);
+
+ RootedObject holder(cx);
+ RootedShape shape(cx);
+ if (IsCacheableDOMProxyUnshadowedSetterCall(cx, obj, id, &holder, &shape)) {
+ if (!GenerateCallSetter(cx, ion, masm, attacher, obj, holder, shape, strict(),
+ object(), temp(), value(), &failures, liveRegs_, returnAddr))
+ {
+ return false;
+ }
+ } else {
+ // Either there was no proto, or the property wasn't appropriately found on it.
+ // Drop back to just a call to Proxy::set().
+ if (!EmitCallProxySet(cx, masm, attacher, id, liveRegs_, object(),
+ value(), returnAddr, strict()))
+ {
+ return false;
+ }
+ }
+
+ // Success.
+ attacher.jumpRejoin(masm);
+
+ // Failure.
+ masm.bind(&failures);
+ attacher.jumpNextStub(masm);
+
+ return linkAndAttachStub(cx, masm, attacher, ion, "DOM proxy unshadowed set",
+ JS::TrackedOutcome::ICSetPropStub_DOMProxyUnshadowed);
+}
+
+bool
+SetPropertyIC::attachCallSetter(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleObject holder, HandleShape shape,
+ void* returnAddr)
+{
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+
+ Label failure;
+ emitIdGuard(masm, shape->propid(), &failure);
+ TestMatchingReceiver(masm, attacher, object(), obj, &failure);
+
+ if (!GenerateCallSetter(cx, ion, masm, attacher, obj, holder, shape, strict(),
+ object(), temp(), value(), &failure, liveRegs_, returnAddr))
+ {
+ return false;
+ }
+
+ // Rejoin jump.
+ attacher.jumpRejoin(masm);
+
+ // Jump to next stub.
+ masm.bind(&failure);
+ attacher.jumpNextStub(masm);
+
+ return linkAndAttachStub(cx, masm, attacher, ion, "setter call",
+ JS::TrackedOutcome::ICSetPropStub_CallSetter);
+}
+
+static void
+GenerateAddSlot(JSContext* cx, MacroAssembler& masm, IonCache::StubAttacher& attacher,
+ JSObject* obj, Shape* oldShape, ObjectGroup* oldGroup,
+ Register object, Register tempReg, const ConstantOrRegister& value,
+ bool checkTypeset, Label* failures)
+{
+ // Use a modified version of TestMatchingReceiver that uses the old shape and group.
+ masm.branchTestObjGroup(Assembler::NotEqual, object, oldGroup, failures);
+ if (obj->maybeShape()) {
+ masm.branchTestObjShape(Assembler::NotEqual, object, oldShape, failures);
+ } else {
+ MOZ_ASSERT(obj->is<UnboxedPlainObject>());
+
+ Address expandoAddress(object, UnboxedPlainObject::offsetOfExpando());
+ masm.branchPtr(Assembler::Equal, expandoAddress, ImmWord(0), failures);
+
+ masm.loadPtr(expandoAddress, tempReg);
+ masm.branchTestObjShape(Assembler::NotEqual, tempReg, oldShape, failures);
+ }
+
+ Shape* newShape = obj->maybeShape();
+ if (!newShape)
+ newShape = obj->as<UnboxedPlainObject>().maybeExpando()->lastProperty();
+
+ // Guard that the incoming value is in the type set for the property
+ // if a type barrier is required.
+ if (checkTypeset)
+ CheckTypeSetForWrite(masm, obj, newShape->propid(), tempReg, value, failures);
+
+ // Guard shapes along prototype chain.
+ JSObject* proto = obj->staticPrototype();
+ Register protoReg = tempReg;
+ bool first = true;
+ while (proto) {
+ Shape* protoShape = proto->as<NativeObject>().lastProperty();
+
+ // Load next prototype.
+ masm.loadObjProto(first ? object : protoReg, protoReg);
+ first = false;
+
+ // Ensure that its shape matches.
+ masm.branchTestObjShape(Assembler::NotEqual, protoReg, protoShape, failures);
+
+ proto = proto->staticPrototype();
+ }
+
+ // Call a stub to (re)allocate dynamic slots, if necessary.
+ uint32_t newNumDynamicSlots = obj->is<UnboxedPlainObject>()
+ ? obj->as<UnboxedPlainObject>().maybeExpando()->numDynamicSlots()
+ : obj->as<NativeObject>().numDynamicSlots();
+ if (NativeObject::dynamicSlotsCount(oldShape) != newNumDynamicSlots) {
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ LiveRegisterSet save(regs.asLiveSet());
+ masm.PushRegsInMask(save);
+
+ // Get 2 temp registers, without clobbering the object register.
+ regs.takeUnchecked(object);
+ Register temp1 = regs.takeAnyGeneral();
+ Register temp2 = regs.takeAnyGeneral();
+
+ if (obj->is<UnboxedPlainObject>()) {
+ // Pass the expando object to the stub.
+ masm.Push(object);
+ masm.loadPtr(Address(object, UnboxedPlainObject::offsetOfExpando()), object);
+ }
+
+ masm.setupUnalignedABICall(temp1);
+ masm.loadJSContext(temp1);
+ masm.passABIArg(temp1);
+ masm.passABIArg(object);
+ masm.move32(Imm32(newNumDynamicSlots), temp2);
+ masm.passABIArg(temp2);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, NativeObject::growSlotsDontReportOOM));
+
+ // Branch on ReturnReg before restoring volatile registers, so
+ // ReturnReg isn't clobbered.
+ uint32_t framePushedAfterCall = masm.framePushed();
+ Label allocFailed, allocDone;
+ masm.branchIfFalseBool(ReturnReg, &allocFailed);
+ masm.jump(&allocDone);
+
+ masm.bind(&allocFailed);
+ if (obj->is<UnboxedPlainObject>())
+ masm.Pop(object);
+ masm.PopRegsInMask(save);
+ masm.jump(failures);
+
+ masm.bind(&allocDone);
+ masm.setFramePushed(framePushedAfterCall);
+ if (obj->is<UnboxedPlainObject>())
+ masm.Pop(object);
+ masm.PopRegsInMask(save);
+ }
+
+ bool popObject = false;
+
+ if (obj->is<UnboxedPlainObject>()) {
+ masm.push(object);
+ popObject = true;
+ obj = obj->as<UnboxedPlainObject>().maybeExpando();
+ masm.loadPtr(Address(object, UnboxedPlainObject::offsetOfExpando()), object);
+ }
+
+ // Write the object or expando object's new shape.
+ Address shapeAddr(object, ShapedObject::offsetOfShape());
+ if (cx->zone()->needsIncrementalBarrier())
+ masm.callPreBarrier(shapeAddr, MIRType::Shape);
+ masm.storePtr(ImmGCPtr(newShape), shapeAddr);
+
+ if (oldGroup != obj->group()) {
+ MOZ_ASSERT(!obj->is<UnboxedPlainObject>());
+
+ // Changing object's group from a partially to fully initialized group,
+ // per the acquired properties analysis. Only change the group if the
+ // old group still has a newScript.
+ Label noTypeChange, skipPop;
+
+ masm.loadPtr(Address(object, JSObject::offsetOfGroup()), tempReg);
+ masm.branchPtr(Assembler::Equal,
+ Address(tempReg, ObjectGroup::offsetOfAddendum()),
+ ImmWord(0),
+ &noTypeChange);
+
+ Address groupAddr(object, JSObject::offsetOfGroup());
+ if (cx->zone()->needsIncrementalBarrier())
+ masm.callPreBarrier(groupAddr, MIRType::ObjectGroup);
+ masm.storePtr(ImmGCPtr(obj->group()), groupAddr);
+
+ masm.bind(&noTypeChange);
+ }
+
+ // Set the value on the object. Since this is an add, obj->lastProperty()
+ // must be the shape of the property we are adding.
+ NativeObject::slotsSizeMustNotOverflow();
+ if (obj->as<NativeObject>().isFixedSlot(newShape->slot())) {
+ Address addr(object, NativeObject::getFixedSlotOffset(newShape->slot()));
+ masm.storeConstantOrRegister(value, addr);
+ } else {
+ masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), tempReg);
+
+ Address addr(tempReg, obj->as<NativeObject>().dynamicSlotIndex(newShape->slot()) * sizeof(Value));
+ masm.storeConstantOrRegister(value, addr);
+ }
+
+ if (popObject)
+ masm.pop(object);
+
+ // Success.
+ attacher.jumpRejoin(masm);
+
+ // Failure.
+ masm.bind(failures);
+
+ attacher.jumpNextStub(masm);
+}
+
+bool
+SetPropertyIC::attachAddSlot(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, HandleShape oldShape,
+ HandleObjectGroup oldGroup, bool checkTypeset)
+{
+ MOZ_ASSERT_IF(!needsTypeBarrier(), !checkTypeset);
+
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+
+ Label failures;
+ emitIdGuard(masm, id, &failures);
+
+ GenerateAddSlot(cx, masm, attacher, obj, oldShape, oldGroup, object(), temp(), value(),
+ checkTypeset, &failures);
+ return linkAndAttachStub(cx, masm, attacher, ion, "adding",
+ JS::TrackedOutcome::ICSetPropStub_AddSlot);
+}
+
+static bool
+CanInlineSetPropTypeCheck(JSObject* obj, jsid id, const ConstantOrRegister& val,
+ bool* checkTypeset)
+{
+ bool shouldCheck = false;
+ ObjectGroup* group = obj->group();
+ if (!group->unknownProperties()) {
+ HeapTypeSet* propTypes = group->maybeGetProperty(id);
+ if (!propTypes)
+ return false;
+ if (!propTypes->unknown()) {
+ if (obj->isSingleton() && !propTypes->nonConstantProperty())
+ return false;
+ shouldCheck = true;
+ if (val.constant()) {
+ // If the input is a constant, then don't bother if the barrier will always fail.
+ if (!propTypes->hasType(TypeSet::GetValueType(val.value())))
+ return false;
+ shouldCheck = false;
+ } else {
+ TypedOrValueRegister reg = val.reg();
+ // We can do the same trick as above for primitive types of specialized registers.
+ // TIs handling of objects is complicated enough to warrant a runtime
+ // check, as we can't statically handle the case where the typeset
+ // contains the specific object, but doesn't have ANYOBJECT set.
+ if (reg.hasTyped() && reg.type() != MIRType::Object) {
+ JSValueType valType = ValueTypeFromMIRType(reg.type());
+ if (!propTypes->hasType(TypeSet::PrimitiveType(valType)))
+ return false;
+ shouldCheck = false;
+ }
+ }
+ }
+ }
+
+ *checkTypeset = shouldCheck;
+ return true;
+}
+
+static bool
+IsPropertySetInlineable(NativeObject* obj, HandleId id, MutableHandleShape pshape,
+ const ConstantOrRegister& val, bool needsTypeBarrier, bool* checkTypeset)
+{
+ // CanInlineSetPropTypeCheck assumes obj has a non-lazy group.
+ MOZ_ASSERT(!obj->hasLazyGroup());
+
+ // Do a pure non-proto chain climbing lookup. See note in
+ // CanAttachNativeGetProp.
+ pshape.set(obj->lookupPure(id));
+
+ if (!pshape)
+ return false;
+
+ if (!pshape->hasSlot())
+ return false;
+
+ if (!pshape->hasDefaultSetter())
+ return false;
+
+ if (!pshape->writable())
+ return false;
+
+ *checkTypeset = false;
+ if (needsTypeBarrier && !CanInlineSetPropTypeCheck(obj, id, val, checkTypeset))
+ return false;
+
+ return true;
+}
+
+static bool
+PrototypeChainShadowsPropertyAdd(JSContext* cx, JSObject* obj, jsid id)
+{
+ // Walk up the object prototype chain and ensure that all prototypes
+ // are native, and that all prototypes have no getter or setter
+ // defined on the property
+ for (JSObject* proto = obj->staticPrototype(); proto; proto = proto->staticPrototype()) {
+ // If prototype is non-native, don't optimize
+ if (!proto->isNative())
+ return true;
+
+ // If prototype defines this property in a non-plain way, don't optimize
+ Shape* protoShape = proto->as<NativeObject>().lookupPure(id);
+ if (protoShape && !protoShape->hasDefaultSetter())
+ return true;
+
+ // Otherwise, if there's no such property, watch out for a resolve
+ // hook that would need to be invoked and thus prevent inlining of
+ // property addition.
+ if (ClassMayResolveId(cx->names(), proto->getClass(), id, proto))
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+IsPropertyAddInlineable(JSContext* cx, NativeObject* obj, HandleId id,
+ const ConstantOrRegister& val,
+ HandleShape oldShape, bool needsTypeBarrier, bool* checkTypeset)
+{
+ // If the shape of the object did not change, then this was not an add.
+ if (obj->lastProperty() == oldShape)
+ return false;
+
+ Shape* shape = obj->lookupPure(id);
+ if (!shape || shape->inDictionary() || !shape->hasSlot() || !shape->hasDefaultSetter())
+ return false;
+
+ // If we have a shape at this point and the object's shape changed, then
+ // the shape must be the one we just added.
+ MOZ_ASSERT(shape == obj->lastProperty());
+
+ // Watch out for resolve hooks.
+ if (ClassMayResolveId(cx->names(), obj->getClass(), id, obj))
+ return false;
+
+ // Likewise for an addProperty hook, since we'll need to invoke it.
+ if (obj->getClass()->getAddProperty())
+ return false;
+
+ if (!obj->nonProxyIsExtensible() || !shape->writable())
+ return false;
+
+ if (PrototypeChainShadowsPropertyAdd(cx, obj, id))
+ return false;
+
+ // Don't attach if we are adding a property to an object which the new
+ // script properties analysis hasn't been performed for yet, as there
+ // may be a group change required here afterwards.
+ if (obj->group()->newScript() && !obj->group()->newScript()->analyzed())
+ return false;
+
+ *checkTypeset = false;
+ if (needsTypeBarrier && !CanInlineSetPropTypeCheck(obj, id, val, checkTypeset))
+ return false;
+
+ return true;
+}
+
+static SetPropertyIC::NativeSetPropCacheability
+CanAttachNativeSetProp(JSContext* cx, HandleObject obj, HandleId id, const ConstantOrRegister& val,
+ bool needsTypeBarrier, MutableHandleObject holder,
+ MutableHandleShape shape, bool* checkTypeset)
+{
+ // See if the property exists on the object.
+ if (obj->isNative() && IsPropertySetInlineable(&obj->as<NativeObject>(), id, shape, val,
+ needsTypeBarrier, checkTypeset))
+ {
+ return SetPropertyIC::CanAttachSetSlot;
+ }
+
+ // If we couldn't find the property on the object itself, do a full, but
+ // still pure lookup for setters.
+ if (!LookupPropertyPure(cx, obj, id, holder.address(), shape.address()))
+ return SetPropertyIC::CanAttachNone;
+
+ // If the object doesn't have the property, we don't know if we can attach
+ // a stub to add the property until we do the VM call to add. If the
+ // property exists as a data property on the prototype, we should add
+ // a new, shadowing property.
+ if (obj->isNative() && (!shape || (obj != holder && holder->isNative() &&
+ shape->hasDefaultSetter() && shape->hasSlot())))
+ {
+ return SetPropertyIC::MaybeCanAttachAddSlot;
+ }
+
+ if (IsImplicitNonNativeProperty(shape))
+ return SetPropertyIC::CanAttachNone;
+
+ if (IsCacheableSetPropCallPropertyOp(obj, holder, shape) ||
+ IsCacheableSetPropCallNative(obj, holder, shape) ||
+ IsCacheableSetPropCallScripted(obj, holder, shape))
+ {
+ return SetPropertyIC::CanAttachCallSetter;
+ }
+
+ return SetPropertyIC::CanAttachNone;
+}
+
+static void
+GenerateSetUnboxed(JSContext* cx, MacroAssembler& masm, IonCache::StubAttacher& attacher,
+ JSObject* obj, jsid id, uint32_t unboxedOffset, JSValueType unboxedType,
+ Register object, Register tempReg, const ConstantOrRegister& value,
+ bool checkTypeset, Label* failures)
+{
+ // Guard on the type of the object.
+ masm.branchPtr(Assembler::NotEqual,
+ Address(object, JSObject::offsetOfGroup()),
+ ImmGCPtr(obj->group()), failures);
+
+ if (checkTypeset)
+ CheckTypeSetForWrite(masm, obj, id, tempReg, value, failures);
+
+ Address address(object, UnboxedPlainObject::offsetOfData() + unboxedOffset);
+
+ if (cx->zone()->needsIncrementalBarrier()) {
+ if (unboxedType == JSVAL_TYPE_OBJECT)
+ masm.callPreBarrier(address, MIRType::Object);
+ else if (unboxedType == JSVAL_TYPE_STRING)
+ masm.callPreBarrier(address, MIRType::String);
+ else
+ MOZ_ASSERT(!UnboxedTypeNeedsPreBarrier(unboxedType));
+ }
+
+ masm.storeUnboxedProperty(address, unboxedType, value, failures);
+
+ attacher.jumpRejoin(masm);
+
+ masm.bind(failures);
+ attacher.jumpNextStub(masm);
+}
+
+static bool
+CanAttachSetUnboxed(JSContext* cx, HandleObject obj, HandleId id, const ConstantOrRegister& val,
+ bool needsTypeBarrier, bool* checkTypeset,
+ uint32_t* unboxedOffset, JSValueType* unboxedType)
+{
+ if (!obj->is<UnboxedPlainObject>())
+ return false;
+
+ const UnboxedLayout::Property* property = obj->as<UnboxedPlainObject>().layout().lookup(id);
+ if (property) {
+ *checkTypeset = false;
+ if (needsTypeBarrier && !CanInlineSetPropTypeCheck(obj, id, val, checkTypeset))
+ return false;
+ *unboxedOffset = property->offset;
+ *unboxedType = property->type;
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+CanAttachSetUnboxedExpando(JSContext* cx, HandleObject obj, HandleId id,
+ const ConstantOrRegister& val,
+ bool needsTypeBarrier, bool* checkTypeset, Shape** pshape)
+{
+ if (!obj->is<UnboxedPlainObject>())
+ return false;
+
+ Rooted<UnboxedExpandoObject*> expando(cx, obj->as<UnboxedPlainObject>().maybeExpando());
+ if (!expando)
+ return false;
+
+ Shape* shape = expando->lookupPure(id);
+ if (!shape || !shape->hasDefaultSetter() || !shape->hasSlot() || !shape->writable())
+ return false;
+
+ *checkTypeset = false;
+ if (needsTypeBarrier && !CanInlineSetPropTypeCheck(obj, id, val, checkTypeset))
+ return false;
+
+ *pshape = shape;
+ return true;
+}
+
+static bool
+CanAttachAddUnboxedExpando(JSContext* cx, HandleObject obj, HandleShape oldShape,
+ HandleId id, const ConstantOrRegister& val,
+ bool needsTypeBarrier, bool* checkTypeset)
+{
+ if (!obj->is<UnboxedPlainObject>())
+ return false;
+
+ Rooted<UnboxedExpandoObject*> expando(cx, obj->as<UnboxedPlainObject>().maybeExpando());
+ if (!expando || expando->inDictionaryMode())
+ return false;
+
+ Shape* newShape = expando->lastProperty();
+ if (newShape->isEmptyShape() || newShape->propid() != id || newShape->previous() != oldShape)
+ return false;
+
+ MOZ_ASSERT(newShape->hasDefaultSetter() && newShape->hasSlot() && newShape->writable());
+
+ if (PrototypeChainShadowsPropertyAdd(cx, obj, id))
+ return false;
+
+ *checkTypeset = false;
+ if (needsTypeBarrier && !CanInlineSetPropTypeCheck(obj, id, val, checkTypeset))
+ return false;
+
+ return true;
+}
+
+bool
+SetPropertyIC::tryAttachUnboxed(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, bool* emitted)
+{
+ MOZ_ASSERT(!*emitted);
+
+ bool checkTypeset = false;
+ uint32_t unboxedOffset;
+ JSValueType unboxedType;
+ if (!CanAttachSetUnboxed(cx, obj, id, value(), needsTypeBarrier(), &checkTypeset,
+ &unboxedOffset, &unboxedType))
+ {
+ return true;
+ }
+
+ *emitted = true;
+
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+
+ Label failures;
+ emitIdGuard(masm, id, &failures);
+
+ GenerateSetUnboxed(cx, masm, attacher, obj, id, unboxedOffset, unboxedType,
+ object(), temp(), value(), checkTypeset, &failures);
+ return linkAndAttachStub(cx, masm, attacher, ion, "set_unboxed",
+ JS::TrackedOutcome::ICSetPropStub_SetUnboxed);
+}
+
+bool
+SetPropertyIC::tryAttachProxy(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, bool* emitted)
+{
+ MOZ_ASSERT(!*emitted);
+
+ if (!obj->is<ProxyObject>())
+ return true;
+
+ void* returnAddr = GetReturnAddressToIonCode(cx);
+ if (IsCacheableDOMProxy(obj)) {
+ DOMProxyShadowsResult shadows = GetDOMProxyShadowsCheck()(cx, obj, id);
+ if (shadows == ShadowCheckFailed)
+ return false;
+
+ if (DOMProxyIsShadowing(shadows)) {
+ if (!attachDOMProxyShadowed(cx, outerScript, ion, obj, id, returnAddr))
+ return false;
+ *emitted = true;
+ return true;
+ }
+
+ MOZ_ASSERT(shadows == DoesntShadow || shadows == DoesntShadowUnique);
+ if (shadows == DoesntShadowUnique)
+ reset(Reprotect);
+ if (!attachDOMProxyUnshadowed(cx, outerScript, ion, obj, id, returnAddr))
+ return false;
+ *emitted = true;
+ return true;
+ }
+
+ if (hasGenericProxyStub())
+ return true;
+
+ if (!attachGenericProxy(cx, outerScript, ion, id, returnAddr))
+ return false;
+ *emitted = true;
+ return true;
+}
+
+bool
+SetPropertyIC::tryAttachNative(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, bool* emitted, bool* tryNativeAddSlot)
+{
+ MOZ_ASSERT(!*emitted);
+ MOZ_ASSERT(!*tryNativeAddSlot);
+
+ RootedShape shape(cx);
+ RootedObject holder(cx);
+ bool checkTypeset = false;
+ NativeSetPropCacheability canCache = CanAttachNativeSetProp(cx, obj, id, value(), needsTypeBarrier(),
+ &holder, &shape, &checkTypeset);
+ switch (canCache) {
+ case CanAttachNone:
+ return true;
+
+ case CanAttachSetSlot: {
+ RootedNativeObject nobj(cx, &obj->as<NativeObject>());
+ if (!attachSetSlot(cx, outerScript, ion, nobj, shape, checkTypeset))
+ return false;
+ *emitted = true;
+ return true;
+ }
+
+ case CanAttachCallSetter: {
+ void* returnAddr = GetReturnAddressToIonCode(cx);
+ if (!attachCallSetter(cx, outerScript, ion, obj, holder, shape, returnAddr))
+ return false;
+ *emitted = true;
+ return true;
+ }
+
+ case MaybeCanAttachAddSlot:
+ *tryNativeAddSlot = true;
+ return true;
+ }
+
+ MOZ_CRASH("Unreachable");
+}
+
+bool
+SetPropertyIC::tryAttachUnboxedExpando(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, bool* emitted)
+{
+ MOZ_ASSERT(!*emitted);
+
+ RootedShape shape(cx);
+ bool checkTypeset = false;
+ if (!CanAttachSetUnboxedExpando(cx, obj, id, value(), needsTypeBarrier(),
+ &checkTypeset, shape.address()))
+ {
+ return true;
+ }
+
+ if (!attachSetSlot(cx, outerScript, ion, obj, shape, checkTypeset))
+ return false;
+ *emitted = true;
+ return true;
+}
+
+bool
+SetPropertyIC::tryAttachStub(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleValue idval, HandleValue value,
+ MutableHandleId id, bool* emitted, bool* tryNativeAddSlot)
+{
+ MOZ_ASSERT(!*emitted);
+ MOZ_ASSERT(!*tryNativeAddSlot);
+
+ if (!canAttachStub() || obj->watched())
+ return true;
+
+ // Fail cache emission if the object is frozen
+ if (obj->is<NativeObject>() && obj->as<NativeObject>().getElementsHeader()->isFrozen())
+ return true;
+
+ bool nameOrSymbol;
+ if (!ValueToNameOrSymbolId(cx, idval, id, &nameOrSymbol))
+ return false;
+
+ if (nameOrSymbol) {
+ if (!*emitted && !tryAttachProxy(cx, outerScript, ion, obj, id, emitted))
+ return false;
+
+ if (!*emitted && !tryAttachNative(cx, outerScript, ion, obj, id, emitted, tryNativeAddSlot))
+ return false;
+
+ if (!*emitted && !tryAttachUnboxed(cx, outerScript, ion, obj, id, emitted))
+ return false;
+
+ if (!*emitted && !tryAttachUnboxedExpando(cx, outerScript, ion, obj, id, emitted))
+ return false;
+ }
+
+ if (idval.isInt32()) {
+ if (!*emitted && !tryAttachDenseElement(cx, outerScript, ion, obj, idval, emitted))
+ return false;
+ if (!*emitted &&
+ !tryAttachTypedArrayElement(cx, outerScript, ion, obj, idval, value, emitted))
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool
+SetPropertyIC::tryAttachAddSlot(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, HandleObjectGroup oldGroup,
+ HandleShape oldShape, bool tryNativeAddSlot, bool* emitted)
+{
+ MOZ_ASSERT(!*emitted);
+
+ if (!canAttachStub())
+ return true;
+
+ if (!JSID_IS_STRING(id) && !JSID_IS_SYMBOL(id))
+ return true;
+
+ // Fail cache emission if the object is frozen
+ if (obj->is<NativeObject>() && obj->as<NativeObject>().getElementsHeader()->isFrozen())
+ return true;
+
+ // A GC may have caused cache.value() to become stale as it is not traced.
+ // In this case the IonScript will have been invalidated, so check for that.
+ // Assert no further GC is possible past this point.
+ JS::AutoAssertNoGC nogc;
+ if (ion->invalidated())
+ return true;
+
+ // The property did not exist before, now we can try to inline the property add.
+ bool checkTypeset = false;
+ if (tryNativeAddSlot &&
+ IsPropertyAddInlineable(cx, &obj->as<NativeObject>(), id, value(), oldShape,
+ needsTypeBarrier(), &checkTypeset))
+ {
+ if (!attachAddSlot(cx, outerScript, ion, obj, id, oldShape, oldGroup, checkTypeset))
+ return false;
+ *emitted = true;
+ return true;
+ }
+
+ checkTypeset = false;
+ if (CanAttachAddUnboxedExpando(cx, obj, oldShape, id, value(), needsTypeBarrier(),
+ &checkTypeset))
+ {
+ if (!attachAddSlot(cx, outerScript, ion, obj, id, oldShape, oldGroup, checkTypeset))
+ return false;
+ *emitted = true;
+ return true;
+ }
+
+ return true;
+}
+
+bool
+SetPropertyIC::update(JSContext* cx, HandleScript outerScript, size_t cacheIndex, HandleObject obj,
+ HandleValue idval, HandleValue value)
+{
+ IonScript* ion = outerScript->ionScript();
+ SetPropertyIC& cache = ion->getCache(cacheIndex).toSetProperty();
+
+ // Remember the old group and shape if we may attach an add-property stub.
+ // Also, some code under tryAttachStub depends on obj having a non-lazy
+ // group, see for instance CanInlineSetPropTypeCheck.
+ RootedObjectGroup oldGroup(cx);
+ RootedShape oldShape(cx);
+ if (cache.canAttachStub()) {
+ oldGroup = obj->getGroup(cx);
+ if (!oldGroup)
+ return false;
+
+ oldShape = obj->maybeShape();
+ if (obj->is<UnboxedPlainObject>()) {
+ MOZ_ASSERT(!oldShape);
+ if (UnboxedExpandoObject* expando = obj->as<UnboxedPlainObject>().maybeExpando())
+ oldShape = expando->lastProperty();
+ }
+ }
+
+ RootedId id(cx);
+ bool emitted = false;
+ bool tryNativeAddSlot = false;
+ if (!cache.tryAttachStub(cx, outerScript, ion, obj, idval, value, &id, &emitted,
+ &tryNativeAddSlot))
+ {
+ return false;
+ }
+
+ // Set/Add the property on the object, the inlined cache are setup for the next execution.
+ if (JSOp(*cache.pc()) == JSOP_INITGLEXICAL) {
+ RootedScript script(cx);
+ jsbytecode* pc;
+ cache.getScriptedLocation(&script, &pc);
+ MOZ_ASSERT(!script->hasNonSyntacticScope());
+ InitGlobalLexicalOperation(cx, &cx->global()->lexicalEnvironment(), script, pc, value);
+ } else if (*cache.pc() == JSOP_SETELEM || *cache.pc() == JSOP_STRICTSETELEM) {
+ if (!SetObjectElement(cx, obj, idval, value, cache.strict()))
+ return false;
+ } else {
+ RootedPropertyName name(cx, idval.toString()->asAtom().asPropertyName());
+ if (!SetProperty(cx, obj, name, value, cache.strict(), cache.pc()))
+ return false;
+ }
+
+ if (!emitted &&
+ !cache.tryAttachAddSlot(cx, outerScript, ion, obj, id, oldGroup, oldShape,
+ tryNativeAddSlot, &emitted))
+ {
+ return false;
+ }
+
+ if (!emitted)
+ JitSpew(JitSpew_IonIC, "Failed to attach SETPROP cache");
+
+ return true;
+}
+
+void
+SetPropertyIC::reset(ReprotectCode reprotect)
+{
+ IonCache::reset(reprotect);
+ hasGenericProxyStub_ = false;
+ hasDenseStub_ = false;
+}
+
+static bool
+GenerateDenseElement(JSContext* cx, MacroAssembler& masm, IonCache::StubAttacher& attacher,
+ JSObject* obj, const Value& idval, Register object,
+ TypedOrValueRegister index, TypedOrValueRegister output)
+{
+ Label failures;
+
+ // Guard object's shape.
+ RootedShape shape(cx, obj->as<NativeObject>().lastProperty());
+ if (!shape)
+ return false;
+ masm.branchTestObjShape(Assembler::NotEqual, object, shape, &failures);
+
+ // Ensure the index is an int32 value.
+ Register indexReg = InvalidReg;
+
+ if (index.hasValue()) {
+ indexReg = output.scratchReg().gpr();
+ MOZ_ASSERT(indexReg != InvalidReg);
+ ValueOperand val = index.valueReg();
+
+ masm.branchTestInt32(Assembler::NotEqual, val, &failures);
+
+ // Unbox the index.
+ masm.unboxInt32(val, indexReg);
+ } else {
+ MOZ_ASSERT(!index.typedReg().isFloat());
+ indexReg = index.typedReg().gpr();
+ }
+
+ // Load elements vector.
+ masm.push(object);
+ masm.loadPtr(Address(object, NativeObject::offsetOfElements()), object);
+
+ Label hole;
+
+ // Guard on the initialized length.
+ Address initLength(object, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::BelowOrEqual, initLength, indexReg, &hole);
+
+ // Check for holes & load the value.
+ masm.loadElementTypedOrValue(BaseObjectElementIndex(object, indexReg),
+ output, true, &hole);
+
+ masm.pop(object);
+ attacher.jumpRejoin(masm);
+
+ // All failures flow to here.
+ masm.bind(&hole);
+ masm.pop(object);
+ masm.bind(&failures);
+
+ attacher.jumpNextStub(masm);
+
+ return true;
+}
+
+bool
+GetPropertyIC::tryAttachDenseElement(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleValue idval, bool* emitted)
+{
+ MOZ_ASSERT(canAttachStub());
+ MOZ_ASSERT(!*emitted);
+
+ if (hasDenseStub())
+ return true;
+
+ if (!obj->isNative() || !idval.isInt32())
+ return true;
+
+ if (uint32_t(idval.toInt32()) >= obj->as<NativeObject>().getDenseInitializedLength())
+ return true;
+
+ *emitted = true;
+
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+ if (!GenerateDenseElement(cx, masm, attacher, obj, idval, object(), id().reg(), output()))
+ return false;
+
+ setHasDenseStub();
+ return linkAndAttachStub(cx, masm, attacher, ion, "dense array",
+ JS::TrackedOutcome::ICGetElemStub_Dense);
+}
+
+
+/* static */ bool
+GetPropertyIC::canAttachDenseElementHole(JSObject* obj, HandleValue idval, TypedOrValueRegister output)
+{
+
+ if (!idval.isInt32() || idval.toInt32() < 0)
+ return false;
+
+ if (!output.hasValue())
+ return false;
+
+ if (!obj->isNative())
+ return false;
+
+ if (obj->as<NativeObject>().getDenseInitializedLength() == 0)
+ return false;
+
+ do {
+ if (obj->isIndexed())
+ return false;
+
+ if (ClassCanHaveExtraProperties(obj->getClass()))
+ return false;
+
+ JSObject* proto = obj->staticPrototype();
+ if (!proto)
+ break;
+
+ if (!proto->isNative())
+ return false;
+
+ // Make sure objects on the prototype don't have dense elements.
+ if (proto->as<NativeObject>().getDenseInitializedLength() != 0)
+ return false;
+
+ obj = proto;
+ } while (obj);
+
+ return true;
+}
+
+static bool
+GenerateDenseElementHole(JSContext* cx, MacroAssembler& masm, IonCache::StubAttacher& attacher,
+ IonScript* ion, JSObject* obj, HandleValue idval,
+ Register object, TypedOrValueRegister index, TypedOrValueRegister output)
+{
+ MOZ_ASSERT(GetPropertyIC::canAttachDenseElementHole(obj, idval, output));
+
+ Register scratchReg = output.valueReg().scratchReg();
+
+ // Guard on the shape and group, to prevent non-dense elements from appearing.
+ Label failures;
+ attacher.branchNextStubOrLabel(masm, Assembler::NotEqual,
+ Address(object, ShapedObject::offsetOfShape()),
+ ImmGCPtr(obj->as<NativeObject>().lastProperty()), &failures);
+
+
+ if (obj->hasUncacheableProto()) {
+ masm.loadPtr(Address(object, JSObject::offsetOfGroup()), scratchReg);
+ Address proto(scratchReg, ObjectGroup::offsetOfProto());
+ masm.branchPtr(Assembler::NotEqual, proto, ImmGCPtr(obj->staticPrototype()), &failures);
+ }
+
+ JSObject* pobj = obj->staticPrototype();
+ while (pobj) {
+ MOZ_ASSERT(pobj->as<NativeObject>().lastProperty());
+
+ masm.movePtr(ImmGCPtr(pobj), scratchReg);
+
+ // Non-singletons with uncacheable protos can change their proto
+ // without a shape change, so also guard on the group (which determines
+ // the proto) in this case.
+ if (pobj->hasUncacheableProto() && !pobj->isSingleton()) {
+ Address groupAddr(scratchReg, JSObject::offsetOfGroup());
+ masm.branchPtr(Assembler::NotEqual, groupAddr, ImmGCPtr(pobj->group()), &failures);
+ }
+
+ // Make sure the shape matches, to avoid non-dense elements.
+ masm.branchPtr(Assembler::NotEqual, Address(scratchReg, ShapedObject::offsetOfShape()),
+ ImmGCPtr(pobj->as<NativeObject>().lastProperty()), &failures);
+
+ // Load elements vector.
+ masm.loadPtr(Address(scratchReg, NativeObject::offsetOfElements()), scratchReg);
+
+ // Also make sure there are no dense elements.
+ Label hole;
+ Address initLength(scratchReg, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::NotEqual, initLength, Imm32(0), &failures);
+
+ pobj = pobj->staticPrototype();
+ }
+
+ // Ensure the index is an int32 value.
+ Register indexReg;
+ if (index.hasValue()) {
+ // Unbox the index.
+ ValueOperand val = index.valueReg();
+ masm.branchTestInt32(Assembler::NotEqual, val, &failures);
+ indexReg = scratchReg;
+ masm.unboxInt32(val, indexReg);
+ } else {
+ MOZ_ASSERT(index.type() == MIRType::Int32);
+ indexReg = index.typedReg().gpr();
+ }
+
+ // Make sure index is nonnegative.
+ masm.branch32(Assembler::LessThan, indexReg, Imm32(0), &failures);
+
+ // Save the object register.
+ Register elementsReg = object;
+ masm.push(object);
+
+ // Load elements vector.
+ masm.loadPtr(Address(object, NativeObject::offsetOfElements()), elementsReg);
+
+ // Guard on the initialized length.
+ Label hole;
+ Address initLength(elementsReg, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::BelowOrEqual, initLength, indexReg, &hole);
+
+ // Load the value.
+ Label done;
+ masm.loadValue(BaseObjectElementIndex(elementsReg, indexReg), output.valueReg());
+ masm.branchTestMagic(Assembler::NotEqual, output.valueReg(), &done);
+
+ // Load undefined for the hole.
+ masm.bind(&hole);
+ masm.moveValue(UndefinedValue(), output.valueReg());
+
+ masm.bind(&done);
+ // Restore the object register.
+ if (elementsReg == object)
+ masm.pop(object);
+ attacher.jumpRejoin(masm);
+
+ // All failure flows through here.
+ masm.bind(&failures);
+ attacher.jumpNextStub(masm);
+
+ return true;
+}
+
+bool
+GetPropertyIC::tryAttachDenseElementHole(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleValue idval, bool* emitted)
+{
+ MOZ_ASSERT(canAttachStub());
+ MOZ_ASSERT(!*emitted);
+
+ if (!monitoredResult())
+ return true;
+
+ if (!canAttachDenseElementHole(obj, idval, output()))
+ return true;
+
+ *emitted = true;
+
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+ GenerateDenseElementHole(cx, masm, attacher, ion, obj, idval, object(), id().reg(), output());
+
+ return linkAndAttachStub(cx, masm, attacher, ion, "dense hole",
+ JS::TrackedOutcome::ICGetElemStub_DenseHole);
+}
+
+/* static */ bool
+GetPropertyIC::canAttachTypedOrUnboxedArrayElement(JSObject* obj, const Value& idval,
+ TypedOrValueRegister output)
+{
+ if (!obj->is<TypedArrayObject>() && !obj->is<UnboxedArrayObject>())
+ return false;
+
+ MOZ_ASSERT(idval.isInt32() || idval.isString());
+
+ // Don't emit a stub if the access is out of bounds. We make to make
+ // certain that we monitor the type coming out of the typed array when
+ // we generate the stub. Out of bounds accesses will hit the fallback
+ // path.
+ uint32_t index;
+ if (idval.isInt32()) {
+ index = idval.toInt32();
+ } else {
+ index = GetIndexFromString(idval.toString());
+ if (index == UINT32_MAX)
+ return false;
+ }
+
+ if (obj->is<TypedArrayObject>()) {
+ if (index >= obj->as<TypedArrayObject>().length())
+ return false;
+
+ // The output register is not yet specialized as a float register, the only
+ // way to accept float typed arrays for now is to return a Value type.
+ uint32_t arrayType = obj->as<TypedArrayObject>().type();
+ if (arrayType == Scalar::Float32 || arrayType == Scalar::Float64)
+ return output.hasValue();
+
+ return output.hasValue() || !output.typedReg().isFloat();
+ }
+
+ if (index >= obj->as<UnboxedArrayObject>().initializedLength())
+ return false;
+
+ JSValueType elementType = obj->as<UnboxedArrayObject>().elementType();
+ if (elementType == JSVAL_TYPE_DOUBLE)
+ return output.hasValue();
+
+ return output.hasValue() || !output.typedReg().isFloat();
+}
+
+static void
+GenerateGetTypedOrUnboxedArrayElement(JSContext* cx, MacroAssembler& masm,
+ IonCache::StubAttacher& attacher,
+ HandleObject array, const Value& idval, Register object,
+ const ConstantOrRegister& index, TypedOrValueRegister output,
+ bool allowDoubleResult)
+{
+ MOZ_ASSERT(GetPropertyIC::canAttachTypedOrUnboxedArrayElement(array, idval, output));
+
+ Label failures;
+
+ TestMatchingReceiver(masm, attacher, object, array, &failures);
+
+ // Decide to what type index the stub should be optimized
+ Register tmpReg = output.scratchReg().gpr();
+ MOZ_ASSERT(tmpReg != InvalidReg);
+ Register indexReg = tmpReg;
+ if (idval.isString()) {
+ MOZ_ASSERT(GetIndexFromString(idval.toString()) != UINT32_MAX);
+
+ if (index.constant()) {
+ MOZ_ASSERT(idval == index.value());
+ masm.move32(Imm32(GetIndexFromString(idval.toString())), indexReg);
+ } else {
+ // Part 1: Get the string into a register
+ Register str;
+ if (index.reg().hasValue()) {
+ ValueOperand val = index.reg().valueReg();
+ masm.branchTestString(Assembler::NotEqual, val, &failures);
+
+ str = masm.extractString(val, indexReg);
+ } else {
+ MOZ_ASSERT(!index.reg().typedReg().isFloat());
+ str = index.reg().typedReg().gpr();
+ }
+
+ // Part 2: Call to translate the str into index
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ LiveRegisterSet save(regs.asLiveSet());
+ masm.PushRegsInMask(save);
+ regs.takeUnchecked(str);
+
+ Register temp = regs.takeAnyGeneral();
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(str);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, GetIndexFromString));
+ masm.mov(ReturnReg, indexReg);
+
+ LiveRegisterSet ignore;
+ ignore.add(indexReg);
+ masm.PopRegsInMaskIgnore(save, ignore);
+
+ masm.branch32(Assembler::Equal, indexReg, Imm32(UINT32_MAX), &failures);
+ }
+ } else {
+ MOZ_ASSERT(idval.isInt32());
+ MOZ_ASSERT(!index.constant());
+
+ if (index.reg().hasValue()) {
+ ValueOperand val = index.reg().valueReg();
+ masm.branchTestInt32(Assembler::NotEqual, val, &failures);
+
+ // Unbox the index.
+ masm.unboxInt32(val, indexReg);
+ } else {
+ MOZ_ASSERT(!index.reg().typedReg().isFloat());
+ indexReg = index.reg().typedReg().gpr();
+ }
+ }
+
+ Label popObjectAndFail;
+
+ if (array->is<TypedArrayObject>()) {
+ // Guard on the initialized length.
+ Address length(object, TypedArrayObject::lengthOffset());
+ masm.branch32(Assembler::BelowOrEqual, length, indexReg, &failures);
+
+ // Save the object register on the stack in case of failure.
+ Register elementReg = object;
+ masm.push(object);
+
+ // Load elements vector.
+ masm.loadPtr(Address(object, TypedArrayObject::dataOffset()), elementReg);
+
+ // Load the value. We use an invalid register because the destination
+ // register is necessary a non double register.
+ Scalar::Type arrayType = array->as<TypedArrayObject>().type();
+ int width = Scalar::byteSize(arrayType);
+ BaseIndex source(elementReg, indexReg, ScaleFromElemWidth(width));
+ if (output.hasValue()) {
+ masm.loadFromTypedArray(arrayType, source, output.valueReg(), allowDoubleResult,
+ elementReg, &popObjectAndFail);
+ } else {
+ masm.loadFromTypedArray(arrayType, source, output.typedReg(), elementReg, &popObjectAndFail);
+ }
+ } else {
+ // Save the object register on the stack in case of failure.
+ masm.push(object);
+
+ // Guard on the initialized length.
+ masm.load32(Address(object, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength()), object);
+ masm.and32(Imm32(UnboxedArrayObject::InitializedLengthMask), object);
+ masm.branch32(Assembler::BelowOrEqual, object, indexReg, &popObjectAndFail);
+
+ // Load elements vector.
+ Register elementReg = object;
+ masm.loadPtr(Address(masm.getStackPointer(), 0), object);
+ masm.loadPtr(Address(object, UnboxedArrayObject::offsetOfElements()), elementReg);
+
+ JSValueType elementType = array->as<UnboxedArrayObject>().elementType();
+ BaseIndex source(elementReg, indexReg, ScaleFromElemWidth(UnboxedTypeSize(elementType)));
+ masm.loadUnboxedProperty(source, elementType, output);
+ }
+
+ masm.pop(object);
+ attacher.jumpRejoin(masm);
+
+ // Restore the object before continuing to the next stub.
+ masm.bind(&popObjectAndFail);
+ masm.pop(object);
+ masm.bind(&failures);
+
+ attacher.jumpNextStub(masm);
+}
+
+bool
+GetPropertyIC::tryAttachTypedOrUnboxedArrayElement(JSContext* cx, HandleScript outerScript,
+ IonScript* ion, HandleObject obj,
+ HandleValue idval, bool* emitted)
+{
+ MOZ_ASSERT(canAttachStub());
+ MOZ_ASSERT(!*emitted);
+
+ if (!canAttachTypedOrUnboxedArrayElement(obj, idval, output()))
+ return true;
+
+ *emitted = true;
+
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+ GenerateGetTypedOrUnboxedArrayElement(cx, masm, attacher, obj, idval, object(), id(),
+ output(), allowDoubleResult_);
+ return linkAndAttachStub(cx, masm, attacher, ion, "typed array",
+ JS::TrackedOutcome::ICGetElemStub_TypedArray);
+}
+
+bool
+GetPropertyIC::tryAttachArgumentsElement(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleValue idval, bool* emitted)
+{
+ MOZ_ASSERT(canAttachStub());
+ MOZ_ASSERT(!*emitted);
+
+ if (!IsOptimizableArgumentsObjectForGetElem(obj, idval))
+ return true;
+
+ MOZ_ASSERT(obj->is<ArgumentsObject>());
+
+ if (hasArgumentsElementStub(obj->is<MappedArgumentsObject>()))
+ return true;
+
+ TypedOrValueRegister index = id().reg();
+ if (index.type() != MIRType::Value && index.type() != MIRType::Int32)
+ return true;
+
+ MOZ_ASSERT(output().hasValue());
+
+ *emitted = true;
+
+ Label failures;
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+
+ Register tmpReg = output().scratchReg().gpr();
+ MOZ_ASSERT(tmpReg != InvalidReg);
+
+ masm.branchTestObjClass(Assembler::NotEqual, object(), tmpReg, obj->getClass(), &failures);
+
+ // Get initial ArgsObj length value, test if length or any element have
+ // been overridden.
+ masm.unboxInt32(Address(object(), ArgumentsObject::getInitialLengthSlotOffset()), tmpReg);
+ masm.branchTest32(Assembler::NonZero, tmpReg,
+ Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT |
+ ArgumentsObject::ELEMENT_OVERRIDDEN_BIT),
+ &failures);
+ masm.rshiftPtr(Imm32(ArgumentsObject::PACKED_BITS_COUNT), tmpReg);
+
+ // Decide to what type index the stub should be optimized
+ Register indexReg;
+
+ // Check index against length.
+ Label failureRestoreIndex;
+ if (index.hasValue()) {
+ ValueOperand val = index.valueReg();
+ masm.branchTestInt32(Assembler::NotEqual, val, &failures);
+ indexReg = val.scratchReg();
+
+ masm.unboxInt32(val, indexReg);
+ masm.branch32(Assembler::AboveOrEqual, indexReg, tmpReg, &failureRestoreIndex);
+ } else {
+ MOZ_ASSERT(index.type() == MIRType::Int32);
+ indexReg = index.typedReg().gpr();
+ masm.branch32(Assembler::AboveOrEqual, indexReg, tmpReg, &failures);
+ }
+
+ // Fail if we have a RareArgumentsData (elements were deleted).
+ masm.loadPrivate(Address(object(), ArgumentsObject::getDataSlotOffset()), tmpReg);
+ masm.branchPtr(Assembler::NotEqual,
+ Address(tmpReg, offsetof(ArgumentsData, rareData)),
+ ImmWord(0),
+ &failureRestoreIndex);
+
+ // Get the address to load from into tmpReg
+ masm.loadPrivate(Address(object(), ArgumentsObject::getDataSlotOffset()), tmpReg);
+ masm.addPtr(Imm32(ArgumentsData::offsetOfArgs()), tmpReg);
+
+ BaseValueIndex elemIdx(tmpReg, indexReg);
+
+ // Ensure result is not magic value, and type-check result.
+ masm.branchTestMagic(Assembler::Equal, elemIdx, &failureRestoreIndex);
+
+ masm.loadTypedOrValue(elemIdx, output());
+
+ // indexReg may need to be reconstructed if it was originally a value.
+ if (index.hasValue())
+ masm.tagValue(JSVAL_TYPE_INT32, indexReg, index.valueReg());
+
+ // Success.
+ attacher.jumpRejoin(masm);
+
+ // Restore the object before continuing to the next stub.
+ masm.pop(indexReg);
+ masm.bind(&failureRestoreIndex);
+ if (index.hasValue())
+ masm.tagValue(JSVAL_TYPE_INT32, indexReg, index.valueReg());
+ masm.bind(&failures);
+ attacher.jumpNextStub(masm);
+
+ if (obj->is<UnmappedArgumentsObject>()) {
+ MOZ_ASSERT(!hasUnmappedArgumentsElementStub_);
+ hasUnmappedArgumentsElementStub_ = true;
+ return linkAndAttachStub(cx, masm, attacher, ion, "ArgsObj element (unmapped)",
+ JS::TrackedOutcome::ICGetElemStub_ArgsElementUnmapped);
+ }
+
+ MOZ_ASSERT(!hasMappedArgumentsElementStub_);
+ hasMappedArgumentsElementStub_ = true;
+ return linkAndAttachStub(cx, masm, attacher, ion, "ArgsObj element (mapped)",
+ JS::TrackedOutcome::ICGetElemStub_ArgsElementMapped);
+}
+
+static bool
+IsDenseElementSetInlineable(JSObject* obj, const Value& idval, const ConstantOrRegister& val,
+ bool needsTypeBarrier, bool* checkTypeset)
+{
+ if (!obj->is<ArrayObject>())
+ return false;
+
+ if (obj->watched())
+ return false;
+
+ if (!idval.isInt32())
+ return false;
+
+ // The object may have a setter definition,
+ // either directly, or via a prototype, or via the target object for a prototype
+ // which is a proxy, that handles a particular integer write.
+ // Scan the prototype and shape chain to make sure that this is not the case.
+ JSObject* curObj = obj;
+ while (curObj) {
+ // Ensure object is native. (This guarantees static prototype below.)
+ if (!curObj->isNative())
+ return false;
+
+ // Ensure all indexed properties are stored in dense elements.
+ if (curObj->isIndexed())
+ return false;
+
+ curObj = curObj->staticPrototype();
+ }
+
+ *checkTypeset = false;
+ if (needsTypeBarrier && !CanInlineSetPropTypeCheck(obj, JSID_VOID, val, checkTypeset))
+ return false;
+
+ return true;
+}
+
+static bool
+IsTypedArrayElementSetInlineable(JSObject* obj, const Value& idval, const Value& value)
+{
+ // Don't bother attaching stubs for assigning strings, objects or symbols.
+ return obj->is<TypedArrayObject>() && idval.isInt32() &&
+ !value.isString() && !value.isObject() && !value.isSymbol();
+}
+
+static void
+StoreDenseElement(MacroAssembler& masm, const ConstantOrRegister& value, Register elements,
+ BaseObjectElementIndex target)
+{
+ // If the ObjectElements::CONVERT_DOUBLE_ELEMENTS flag is set, int32 values
+ // have to be converted to double first. If the value is not int32, it can
+ // always be stored directly.
+
+ Address elementsFlags(elements, ObjectElements::offsetOfFlags());
+ if (value.constant()) {
+ Value v = value.value();
+ Label done;
+ if (v.isInt32()) {
+ Label dontConvert;
+ masm.branchTest32(Assembler::Zero, elementsFlags,
+ Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS),
+ &dontConvert);
+ masm.storeValue(DoubleValue(v.toInt32()), target);
+ masm.jump(&done);
+ masm.bind(&dontConvert);
+ }
+ masm.storeValue(v, target);
+ masm.bind(&done);
+ return;
+ }
+
+ TypedOrValueRegister reg = value.reg();
+ if (reg.hasTyped() && reg.type() != MIRType::Int32) {
+ masm.storeTypedOrValue(reg, target);
+ return;
+ }
+
+ Label convert, storeValue, done;
+ masm.branchTest32(Assembler::NonZero, elementsFlags,
+ Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS),
+ &convert);
+ masm.bind(&storeValue);
+ masm.storeTypedOrValue(reg, target);
+ masm.jump(&done);
+
+ masm.bind(&convert);
+ if (reg.hasValue()) {
+ masm.branchTestInt32(Assembler::NotEqual, reg.valueReg(), &storeValue);
+ masm.int32ValueToDouble(reg.valueReg(), ScratchDoubleReg);
+ masm.storeDouble(ScratchDoubleReg, target);
+ } else {
+ MOZ_ASSERT(reg.type() == MIRType::Int32);
+ masm.convertInt32ToDouble(reg.typedReg().gpr(), ScratchDoubleReg);
+ masm.storeDouble(ScratchDoubleReg, target);
+ }
+
+ masm.bind(&done);
+}
+
+static bool
+GenerateSetDenseElement(JSContext* cx, MacroAssembler& masm, IonCache::StubAttacher& attacher,
+ JSObject* obj, const Value& idval, bool guardHoles, Register object,
+ TypedOrValueRegister index, const ConstantOrRegister& value,
+ Register tempToUnboxIndex, Register temp,
+ bool needsTypeBarrier, bool checkTypeset)
+{
+ MOZ_ASSERT(obj->isNative());
+ MOZ_ASSERT(idval.isInt32());
+
+ Label failures;
+
+ // Guard object is a dense array.
+ Shape* shape = obj->as<NativeObject>().lastProperty();
+ if (!shape)
+ return false;
+ masm.branchTestObjShape(Assembler::NotEqual, object, shape, &failures);
+
+ // Guard that the incoming value is in the type set for the property
+ // if a type barrier is required.
+ if (needsTypeBarrier) {
+ masm.branchTestObjGroup(Assembler::NotEqual, object, obj->group(), &failures);
+ if (checkTypeset)
+ CheckTypeSetForWrite(masm, obj, JSID_VOID, temp, value, &failures);
+ }
+
+ // Ensure the index is an int32 value.
+ Register indexReg;
+ if (index.hasValue()) {
+ ValueOperand val = index.valueReg();
+ masm.branchTestInt32(Assembler::NotEqual, val, &failures);
+
+ indexReg = masm.extractInt32(val, tempToUnboxIndex);
+ } else {
+ MOZ_ASSERT(!index.typedReg().isFloat());
+ indexReg = index.typedReg().gpr();
+ }
+
+ {
+ // Load obj->elements.
+ Register elements = temp;
+ masm.loadPtr(Address(object, NativeObject::offsetOfElements()), elements);
+
+ // Compute the location of the element.
+ BaseObjectElementIndex target(elements, indexReg);
+
+ Label storeElement;
+
+ // If TI cannot help us deal with HOLES by preventing indexed properties
+ // on the prototype chain, we have to be very careful to check for ourselves
+ // to avoid stomping on what should be a setter call. Start by only allowing things
+ // within the initialized length.
+ if (guardHoles) {
+ Address initLength(elements, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::BelowOrEqual, initLength, indexReg, &failures);
+ } else {
+ // Guard that we can increase the initialized length.
+ Address capacity(elements, ObjectElements::offsetOfCapacity());
+ masm.branch32(Assembler::BelowOrEqual, capacity, indexReg, &failures);
+
+ // Guard on the initialized length.
+ Address initLength(elements, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::Below, initLength, indexReg, &failures);
+
+ // if (initLength == index)
+ Label inBounds;
+ masm.branch32(Assembler::NotEqual, initLength, indexReg, &inBounds);
+ {
+ // Increase initialize length.
+ Register newLength = indexReg;
+ masm.add32(Imm32(1), newLength);
+ masm.store32(newLength, initLength);
+
+ // Increase length if needed.
+ Label bumpedLength;
+ Address length(elements, ObjectElements::offsetOfLength());
+ masm.branch32(Assembler::AboveOrEqual, length, indexReg, &bumpedLength);
+ masm.store32(newLength, length);
+ masm.bind(&bumpedLength);
+
+ // Restore the index.
+ masm.add32(Imm32(-1), newLength);
+ masm.jump(&storeElement);
+ }
+ // else
+ masm.bind(&inBounds);
+ }
+
+ if (cx->zone()->needsIncrementalBarrier())
+ masm.callPreBarrier(target, MIRType::Value);
+
+ // Store the value.
+ if (guardHoles)
+ masm.branchTestMagic(Assembler::Equal, target, &failures);
+ else
+ masm.bind(&storeElement);
+ StoreDenseElement(masm, value, elements, target);
+ }
+ attacher.jumpRejoin(masm);
+
+ masm.bind(&failures);
+ attacher.jumpNextStub(masm);
+
+ return true;
+}
+
+bool
+SetPropertyIC::tryAttachDenseElement(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, const Value& idval, bool* emitted)
+{
+ MOZ_ASSERT(!*emitted);
+ MOZ_ASSERT(canAttachStub());
+
+ if (hasDenseStub())
+ return true;
+
+ bool checkTypeset = false;
+ if (!IsDenseElementSetInlineable(obj, idval, value(), needsTypeBarrier(), &checkTypeset))
+ return true;
+
+ *emitted = true;
+
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+ if (!GenerateSetDenseElement(cx, masm, attacher, obj, idval,
+ guardHoles(), object(), id().reg(),
+ value(), tempToUnboxIndex(), temp(),
+ needsTypeBarrier(), checkTypeset))
+ {
+ return false;
+ }
+
+ setHasDenseStub();
+ const char* message = guardHoles() ? "dense array (holes)" : "dense array";
+ return linkAndAttachStub(cx, masm, attacher, ion, message,
+ JS::TrackedOutcome::ICSetElemStub_Dense);
+}
+
+static bool
+GenerateSetTypedArrayElement(JSContext* cx, MacroAssembler& masm, IonCache::StubAttacher& attacher,
+ HandleObject tarr, Register object, TypedOrValueRegister index,
+ const ConstantOrRegister& value, Register tempUnbox, Register temp,
+ FloatRegister tempDouble, FloatRegister tempFloat32)
+{
+ Label failures, done, popObjectAndFail;
+
+ // Guard on the shape.
+ Shape* shape = tarr->as<TypedArrayObject>().lastProperty();
+ if (!shape)
+ return false;
+ masm.branchTestObjShape(Assembler::NotEqual, object, shape, &failures);
+
+ // Ensure the index is an int32.
+ Register indexReg;
+ if (index.hasValue()) {
+ ValueOperand val = index.valueReg();
+ masm.branchTestInt32(Assembler::NotEqual, val, &failures);
+
+ indexReg = masm.extractInt32(val, tempUnbox);
+ } else {
+ MOZ_ASSERT(!index.typedReg().isFloat());
+ indexReg = index.typedReg().gpr();
+ }
+
+ // Guard on the length.
+ Address length(object, TypedArrayObject::lengthOffset());
+ masm.unboxInt32(length, temp);
+ masm.branch32(Assembler::BelowOrEqual, temp, indexReg, &done);
+
+ // Load the elements vector.
+ Register elements = temp;
+ masm.loadPtr(Address(object, TypedArrayObject::dataOffset()), elements);
+
+ // Set the value.
+ Scalar::Type arrayType = tarr->as<TypedArrayObject>().type();
+ int width = Scalar::byteSize(arrayType);
+ BaseIndex target(elements, indexReg, ScaleFromElemWidth(width));
+
+ if (arrayType == Scalar::Float32) {
+ MOZ_ASSERT_IF(hasUnaliasedDouble(), tempFloat32 != InvalidFloatReg);
+ FloatRegister tempFloat = hasUnaliasedDouble() ? tempFloat32 : tempDouble;
+ if (!masm.convertConstantOrRegisterToFloat(cx, value, tempFloat, &failures))
+ return false;
+ masm.storeToTypedFloatArray(arrayType, tempFloat, target);
+ } else if (arrayType == Scalar::Float64) {
+ if (!masm.convertConstantOrRegisterToDouble(cx, value, tempDouble, &failures))
+ return false;
+ masm.storeToTypedFloatArray(arrayType, tempDouble, target);
+ } else {
+ // On x86 we only have 6 registers available to use, so reuse the object
+ // register to compute the intermediate value to store and restore it
+ // afterwards.
+ masm.push(object);
+
+ if (arrayType == Scalar::Uint8Clamped) {
+ if (!masm.clampConstantOrRegisterToUint8(cx, value, tempDouble, object,
+ &popObjectAndFail))
+ {
+ return false;
+ }
+ } else {
+ if (!masm.truncateConstantOrRegisterToInt32(cx, value, tempDouble, object,
+ &popObjectAndFail))
+ {
+ return false;
+ }
+ }
+ masm.storeToTypedIntArray(arrayType, object, target);
+
+ masm.pop(object);
+ }
+
+ // Out-of-bound writes jump here as they are no-ops.
+ masm.bind(&done);
+ attacher.jumpRejoin(masm);
+
+ if (popObjectAndFail.used()) {
+ masm.bind(&popObjectAndFail);
+ masm.pop(object);
+ }
+
+ masm.bind(&failures);
+ attacher.jumpNextStub(masm);
+ return true;
+}
+
+bool
+SetPropertyIC::tryAttachTypedArrayElement(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleValue idval, HandleValue val,
+ bool* emitted)
+{
+ MOZ_ASSERT(!*emitted);
+ MOZ_ASSERT(canAttachStub());
+
+ if (!IsTypedArrayElementSetInlineable(obj, idval, val))
+ return true;
+
+ *emitted = true;
+
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+ if (!GenerateSetTypedArrayElement(cx, masm, attacher, obj,
+ object(), id().reg(), value(),
+ tempToUnboxIndex(), temp(), tempDouble(), tempFloat32()))
+ {
+ return false;
+ }
+
+ return linkAndAttachStub(cx, masm, attacher, ion, "typed array",
+ JS::TrackedOutcome::ICSetElemStub_TypedArray);
+}
+
+bool
+BindNameIC::attachGlobal(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject envChain)
+{
+ MOZ_ASSERT(envChain->is<GlobalObject>());
+
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+
+ // Guard on the env chain.
+ attacher.branchNextStub(masm, Assembler::NotEqual, environmentChainReg(),
+ ImmGCPtr(envChain));
+ masm.movePtr(ImmGCPtr(envChain), outputReg());
+
+ attacher.jumpRejoin(masm);
+
+ return linkAndAttachStub(cx, masm, attacher, ion, "global");
+}
+
+static inline void
+GenerateEnvironmentChainGuard(MacroAssembler& masm, JSObject* envObj,
+ Register envObjReg, Shape* shape, Label* failures)
+{
+ if (envObj->is<CallObject>()) {
+ // We can skip a guard on the call object if the script's bindings are
+ // guaranteed to be immutable (and thus cannot introduce shadowing
+ // variables).
+ CallObject* callObj = &envObj->as<CallObject>();
+ JSFunction* fun = &callObj->callee();
+ // The function might have been relazified under rare conditions.
+ // In that case, we pessimistically create the guard, as we'd
+ // need to root various pointers to delazify,
+ if (fun->hasScript()) {
+ JSScript* script = fun->nonLazyScript();
+ if (!script->funHasExtensibleScope())
+ return;
+ }
+ } else if (envObj->is<GlobalObject>()) {
+ // If this is the last object on the scope walk, and the property we've
+ // found is not configurable, then we don't need a shape guard because
+ // the shape cannot be removed.
+ if (shape && !shape->configurable())
+ return;
+ }
+
+ Address shapeAddr(envObjReg, ShapedObject::offsetOfShape());
+ masm.branchPtr(Assembler::NotEqual, shapeAddr,
+ ImmGCPtr(envObj->as<NativeObject>().lastProperty()), failures);
+}
+
+static void
+GenerateEnvironmentChainGuards(MacroAssembler& masm, JSObject* envChain, JSObject* holder,
+ Register outputReg, Label* failures, bool skipLastGuard = false)
+{
+ JSObject* tobj = envChain;
+
+ // Walk up the env chain. Note that IsCacheableEnvironmentChain guarantees the
+ // |tobj == holder| condition terminates the loop.
+ while (true) {
+ MOZ_ASSERT(IsCacheableEnvironment(tobj) || tobj->is<GlobalObject>());
+
+ if (skipLastGuard && tobj == holder)
+ break;
+
+ GenerateEnvironmentChainGuard(masm, tobj, outputReg, nullptr, failures);
+
+ if (tobj == holder)
+ break;
+
+ // Load the next link.
+ tobj = &tobj->as<EnvironmentObject>().enclosingEnvironment();
+ masm.extractObject(Address(outputReg, EnvironmentObject::offsetOfEnclosingEnvironment()),
+ outputReg);
+ }
+}
+
+bool
+BindNameIC::attachNonGlobal(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject envChain, HandleObject holder)
+{
+ MOZ_ASSERT(IsCacheableEnvironment(envChain));
+
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+
+ // Guard on the shape of the env chain.
+ Label failures;
+ attacher.branchNextStubOrLabel(masm, Assembler::NotEqual,
+ Address(environmentChainReg(), ShapedObject::offsetOfShape()),
+ ImmGCPtr(envChain->as<NativeObject>().lastProperty()),
+ holder != envChain ? &failures : nullptr);
+
+ if (holder != envChain) {
+ JSObject* parent = &envChain->as<EnvironmentObject>().enclosingEnvironment();
+ masm.extractObject(Address(environmentChainReg(),
+ EnvironmentObject::offsetOfEnclosingEnvironment()),
+ outputReg());
+
+ GenerateEnvironmentChainGuards(masm, parent, holder, outputReg(), &failures);
+ } else {
+ masm.movePtr(environmentChainReg(), outputReg());
+ }
+
+ // At this point outputReg holds the object on which the property
+ // was found, so we're done.
+ attacher.jumpRejoin(masm);
+
+ // All failures flow to here, so there is a common point to patch.
+ if (holder != envChain) {
+ masm.bind(&failures);
+ attacher.jumpNextStub(masm);
+ }
+
+ return linkAndAttachStub(cx, masm, attacher, ion, "non-global");
+}
+
+static bool
+IsCacheableNonGlobalEnvironmentChain(JSObject* envChain, JSObject* holder)
+{
+ while (true) {
+ if (!IsCacheableEnvironment(envChain)) {
+ JitSpew(JitSpew_IonIC, "Non-cacheable object on env chain");
+ return false;
+ }
+
+ if (envChain == holder)
+ return true;
+
+ envChain = &envChain->as<EnvironmentObject>().enclosingEnvironment();
+ if (!envChain) {
+ JitSpew(JitSpew_IonIC, "env chain indirect hit");
+ return false;
+ }
+ }
+
+ MOZ_CRASH("Invalid env chain");
+}
+
+JSObject*
+BindNameIC::update(JSContext* cx, HandleScript outerScript, size_t cacheIndex,
+ HandleObject envChain)
+{
+ IonScript* ion = outerScript->ionScript();
+ BindNameIC& cache = ion->getCache(cacheIndex).toBindName();
+ HandlePropertyName name = cache.name();
+
+ RootedObject holder(cx);
+ if (!LookupNameUnqualified(cx, name, envChain, &holder))
+ return nullptr;
+
+ // Stop generating new stubs once we hit the stub count limit, see
+ // GetPropertyCache.
+ if (cache.canAttachStub()) {
+ if (envChain->is<GlobalObject>()) {
+ if (!cache.attachGlobal(cx, outerScript, ion, envChain))
+ return nullptr;
+ } else if (IsCacheableNonGlobalEnvironmentChain(envChain, holder)) {
+ if (!cache.attachNonGlobal(cx, outerScript, ion, envChain, holder))
+ return nullptr;
+ } else {
+ JitSpew(JitSpew_IonIC, "BINDNAME uncacheable env chain");
+ }
+ }
+
+ return holder;
+}
+
+bool
+NameIC::attachReadSlot(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject envChain, HandleObject holderBase,
+ HandleNativeObject holder, HandleShape shape)
+{
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ Label failures;
+ StubAttacher attacher(*this);
+
+ Register scratchReg = outputReg().valueReg().scratchReg();
+
+ // Don't guard the base of the proto chain the name was found on. It will be guarded
+ // by GenerateReadSlot().
+ masm.mov(environmentChainReg(), scratchReg);
+ GenerateEnvironmentChainGuards(masm, envChain, holderBase, scratchReg, &failures,
+ /* skipLastGuard = */true);
+
+ // GenerateEnvironmentChain leaves the last env chain in scratchReg, even though it
+ // doesn't generate the extra guard.
+ //
+ // NAME ops must do their own TDZ checks.
+ GenerateReadSlot(cx, ion, masm, attacher, CheckTDZ, holderBase, holder, shape, scratchReg,
+ outputReg(), failures.used() ? &failures : nullptr);
+
+ return linkAndAttachStub(cx, masm, attacher, ion, "generic",
+ JS::TrackedOutcome::ICNameStub_ReadSlot);
+}
+
+static bool
+IsCacheableEnvironmentChain(JSObject* envChain, JSObject* obj)
+{
+ JSObject* obj2 = envChain;
+ while (obj2) {
+ if (!IsCacheableEnvironment(obj2) && !obj2->is<GlobalObject>())
+ return false;
+
+ // Stop once we hit the global or target obj.
+ if (obj2->is<GlobalObject>() || obj2 == obj)
+ break;
+
+ obj2 = obj2->enclosingEnvironment();
+ }
+
+ return obj == obj2;
+}
+
+static bool
+IsCacheableNameReadSlot(HandleObject envChain, HandleObject obj,
+ HandleObject holder, HandleShape shape, jsbytecode* pc,
+ const TypedOrValueRegister& output)
+{
+ if (!shape)
+ return false;
+ if (!obj->isNative())
+ return false;
+
+ if (obj->is<GlobalObject>()) {
+ // Support only simple property lookups.
+ if (!IsCacheableGetPropReadSlotForIonOrCacheIR(obj, holder, shape) &&
+ !IsCacheableNoProperty(obj, holder, shape, pc, output))
+ return false;
+ } else if (obj->is<ModuleEnvironmentObject>()) {
+ // We don't yet support lookups in a module environment.
+ return false;
+ } else if (obj->is<CallObject>()) {
+ MOZ_ASSERT(obj == holder);
+ if (!shape->hasDefaultGetter())
+ return false;
+ } else {
+ // We don't yet support lookups on Block or DeclEnv objects.
+ return false;
+ }
+
+ return IsCacheableEnvironmentChain(envChain, obj);
+}
+
+bool
+NameIC::attachCallGetter(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject envChain, HandleObject obj, HandleObject holder,
+ HandleShape shape, void* returnAddr)
+{
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ StubAttacher attacher(*this);
+
+ Label failures;
+ Register scratchReg = outputReg().valueReg().scratchReg();
+
+ // Don't guard the base of the proto chain the name was found on. It will be guarded
+ // by GenerateCallGetter().
+ masm.mov(environmentChainReg(), scratchReg);
+ GenerateEnvironmentChainGuards(masm, envChain, obj, scratchReg, &failures,
+ /* skipLastGuard = */true);
+
+ // GenerateEnvironmentChain leaves the last env chain in scratchReg, even though it
+ // doesn't generate the extra guard.
+ if (!GenerateCallGetter(cx, ion, masm, attacher, obj, holder, shape, liveRegs_,
+ scratchReg, outputReg(), returnAddr,
+ failures.used() ? &failures : nullptr))
+ {
+ return false;
+ }
+
+ const char* attachKind = "name getter";
+ return linkAndAttachStub(cx, masm, attacher, ion, attachKind,
+ JS::TrackedOutcome::ICNameStub_CallGetter);
+}
+
+static bool
+IsCacheableNameCallGetter(HandleObject envChain, HandleObject obj, HandleObject holder,
+ HandleShape shape)
+{
+ if (!shape)
+ return false;
+ if (!obj->is<GlobalObject>())
+ return false;
+
+ if (!IsCacheableEnvironmentChain(envChain, obj))
+ return false;
+
+ return IsCacheableGetPropCallNative(obj, holder, shape) ||
+ IsCacheableGetPropCallPropertyOp(obj, holder, shape) ||
+ IsCacheableGetPropCallScripted(obj, holder, shape);
+}
+
+bool
+NameIC::attachTypeOfNoProperty(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject envChain)
+{
+ MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
+ Label failures;
+ StubAttacher attacher(*this);
+
+ Register scratchReg = outputReg().valueReg().scratchReg();
+
+ masm.movePtr(environmentChainReg(), scratchReg);
+
+ // Generate env chain guards.
+ // Since the property was not defined on any object, iterate until reaching the global.
+ JSObject* tobj = envChain;
+ while (true) {
+ GenerateEnvironmentChainGuard(masm, tobj, scratchReg, nullptr, &failures);
+
+ if (tobj->is<GlobalObject>())
+ break;
+
+ // Load the next link.
+ tobj = &tobj->as<EnvironmentObject>().enclosingEnvironment();
+ masm.extractObject(Address(scratchReg, EnvironmentObject::offsetOfEnclosingEnvironment()),
+ scratchReg);
+ }
+
+ masm.moveValue(UndefinedValue(), outputReg().valueReg());
+ attacher.jumpRejoin(masm);
+
+ masm.bind(&failures);
+ attacher.jumpNextStub(masm);
+
+ return linkAndAttachStub(cx, masm, attacher, ion, "generic",
+ JS::TrackedOutcome::ICNameStub_TypeOfNoProperty);
+}
+
+static bool
+IsCacheableNameNoProperty(HandleObject envChain, HandleObject obj,
+ HandleObject holder, HandleShape shape, jsbytecode* pc,
+ NameIC& cache)
+{
+ if (cache.isTypeOf() && !shape) {
+ MOZ_ASSERT(!obj);
+ MOZ_ASSERT(!holder);
+ MOZ_ASSERT(envChain);
+
+ // Assert those extra things checked by IsCacheableNoProperty().
+ MOZ_ASSERT(cache.outputReg().hasValue());
+ MOZ_ASSERT(pc != nullptr);
+
+ return true;
+ }
+
+ return false;
+}
+
+bool
+NameIC::update(JSContext* cx, HandleScript outerScript, size_t cacheIndex, HandleObject envChain,
+ MutableHandleValue vp)
+{
+ IonScript* ion = outerScript->ionScript();
+
+ NameIC& cache = ion->getCache(cacheIndex).toName();
+ RootedPropertyName name(cx, cache.name());
+
+ RootedScript script(cx);
+ jsbytecode* pc;
+ cache.getScriptedLocation(&script, &pc);
+
+ RootedObject obj(cx);
+ RootedObject holder(cx);
+ RootedShape shape(cx);
+ if (!LookupName(cx, name, envChain, &obj, &holder, &shape))
+ return false;
+
+ // Look first. Don't generate cache entries if the lookup fails.
+ if (cache.isTypeOf()) {
+ if (!FetchName<true>(cx, obj, holder, name, shape, vp))
+ return false;
+ } else {
+ if (!FetchName<false>(cx, obj, holder, name, shape, vp))
+ return false;
+ }
+
+ if (cache.canAttachStub()) {
+ if (IsCacheableNameReadSlot(envChain, obj, holder, shape, pc, cache.outputReg())) {
+ if (!cache.attachReadSlot(cx, outerScript, ion, envChain, obj,
+ holder.as<NativeObject>(), shape))
+ {
+ return false;
+ }
+ } else if (IsCacheableNameCallGetter(envChain, obj, holder, shape)) {
+ void* returnAddr = GetReturnAddressToIonCode(cx);
+ if (!cache.attachCallGetter(cx, outerScript, ion, envChain, obj, holder, shape,
+ returnAddr))
+ {
+ return false;
+ }
+ } else if (IsCacheableNameNoProperty(envChain, obj, holder, shape, pc, cache)) {
+ if (!cache.attachTypeOfNoProperty(cx, outerScript, ion, envChain))
+ return false;
+ }
+ }
+
+ // Monitor changes to cache entry.
+ TypeScript::Monitor(cx, script, pc, vp);
+
+ return true;
+}
diff --git a/js/src/jit/IonCaches.h b/js/src/jit/IonCaches.h
new file mode 100644
index 000000000..173e06c6b
--- /dev/null
+++ b/js/src/jit/IonCaches.h
@@ -0,0 +1,848 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_IonCaches_h
+#define jit_IonCaches_h
+
+#if defined(JS_CODEGEN_ARM)
+# include "jit/arm/Assembler-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/Assembler-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/Assembler-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/Assembler-mips64.h"
+#endif
+#include "jit/JitCompartment.h"
+#include "jit/Registers.h"
+#include "jit/shared/Assembler-shared.h"
+#include "js/TrackedOptimizationInfo.h"
+
+#include "vm/TypedArrayCommon.h"
+
+namespace js {
+namespace jit {
+
+class LInstruction;
+
+#define IONCACHE_KIND_LIST(_) \
+ _(GetProperty) \
+ _(SetProperty) \
+ _(BindName) \
+ _(Name)
+
+// Forward declarations of Cache kinds.
+#define FORWARD_DECLARE(kind) class kind##IC;
+IONCACHE_KIND_LIST(FORWARD_DECLARE)
+#undef FORWARD_DECLARE
+
+class IonCacheVisitor
+{
+ public:
+#define VISIT_INS(op) \
+ virtual void visit##op##IC(CodeGenerator* codegen) { \
+ MOZ_CRASH("NYI: " #op "IC"); \
+ }
+
+ IONCACHE_KIND_LIST(VISIT_INS)
+#undef VISIT_INS
+};
+
+// Common structure encoding the state of a polymorphic inline cache contained
+// in the code for an IonScript. IonCaches are used for polymorphic operations
+// where multiple implementations may be required.
+//
+// Roughly speaking, the cache initially jumps to an out of line fragment
+// which invokes a cache function to perform the operation. The cache function
+// may generate a stub to perform the operation in certain cases (e.g. a
+// particular shape for an input object) and attach the stub to existing
+// stubs, forming a daisy chain of tests for how to perform the operation in
+// different circumstances.
+//
+// Eventually, if too many stubs are generated the cache function may disable
+// the cache, by generating a stub to make a call and perform the operation
+// within the VM.
+//
+// The caches initially generate a patchable jump to an out of line call
+// to the cache function. Stubs are attached by appending: when attaching a
+// new stub, we patch the any failure conditions in last generated stub to
+// jump to the new stub. Failure conditions in the new stub jump to the cache
+// function which may generate new stubs.
+//
+// Control flow Pointers
+// =======# ----. .---->
+// # | |
+// #======> \-----/
+//
+// Initial state:
+//
+// JIT Code
+// +--------+ .---------------.
+// | | | |
+// |========| v +----------+ |
+// |== IC ==|====>| Cache Fn | |
+// |========| +----------+ |
+// | |<=# # |
+// | | #=======# |
+// +--------+ Rejoin path |
+// |________ |
+// | |
+// IC | |
+// Entry | |
+// +------------+ |
+// | lastJump_ |---------------/
+// +------------+
+// | ... |
+// +------------+
+//
+// Attaching stubs:
+//
+// Patch the jump pointed to by lastJump_ to jump to the new stub. Update
+// lastJump_ to be the new stub's failure jump. The failure jump of the new
+// stub goes to the fallback label, which is the cache function. In this
+// fashion, new stubs are _appended_ to the chain of stubs, as lastJump_
+// points to the _tail_ of the stub chain.
+//
+// JIT Code
+// +--------+ #=======================#
+// | | # v
+// |========| # +----------+ +------+
+// |== IC ==|=# | Cache Fn |<====| Stub |
+// |========| +----------+ ^ +------+
+// | |<=# # | #
+// | | #======#=========|=====#
+// +--------+ Rejoin path |
+// |________ |
+// | |
+// IC | |
+// Entry | |
+// +------------+ |
+// | lastJump_ |---------------/
+// +------------+
+// | ... |
+// +------------+
+//
+// While calls may be made to the cache function and other VM functions, the
+// cache may still be treated as pure during optimization passes, such that
+// LICM and GVN may be performed on operations around the cache as if the
+// operation cannot reenter scripted code through an Invoke() or otherwise have
+// unexpected behavior. This restricts the sorts of stubs which the cache can
+// generate or the behaviors which called functions can have, and if a called
+// function performs a possibly impure operation then the operation will be
+// marked as such and the calling script will be recompiled.
+//
+// Similarly, despite the presence of functions and multiple stubs generated
+// for a cache, the cache itself may be marked as idempotent and become hoisted
+// or coalesced by LICM or GVN. This also constrains the stubs which can be
+// generated for the cache.
+//
+// * IonCache usage
+//
+// IonCache is the base structure of an inline cache, which generates code stubs
+// dynamically and attaches them to an IonScript.
+//
+// A cache must at least provide a static update function which will usualy have
+// a JSContext*, followed by the cache index. The rest of the arguments of the
+// update function are usualy corresponding to the register inputs of the cache,
+// as it must perform the same operation as any of the stubs that it might
+// produce. The update function call is handled by the visit function of
+// CodeGenerator corresponding to this IC.
+//
+// The CodeGenerator visit function, as opposed to other visit functions, has
+// two arguments. The first one is the OutOfLineUpdateCache which stores the LIR
+// instruction. The second one is the IC object. This function would be called
+// once the IC is registered with the addCache function of CodeGeneratorShared.
+//
+// To register a cache, you must call the addCache function as follow:
+//
+// MyCodeIC cache(inputReg1, inputValueReg2, outputReg);
+// if (!addCache(lir, allocateCache(cache)))
+// return false;
+//
+// Once the cache is allocated with the allocateCache function, any modification
+// made to the cache would be ignored.
+//
+// The addCache function will produce a patchable jump at the location where
+// it is called. This jump will execute generated stubs and fallback on the code
+// of the visitMyCodeIC function if no stub match.
+//
+// Warning: As the addCache function fallback on a VMCall, calls to
+// addCache should not be in the same path as another VMCall or in the same
+// path of another addCache as this is not supported by the invalidation
+// procedure.
+class IonCache
+{
+ public:
+ class StubAttacher;
+
+ enum Kind {
+# define DEFINE_CACHEKINDS(ickind) Cache_##ickind,
+ IONCACHE_KIND_LIST(DEFINE_CACHEKINDS)
+# undef DEFINE_CACHEKINDS
+ Cache_Invalid
+ };
+
+ // Cache testing and cast.
+# define CACHEKIND_CASTS(ickind) \
+ bool is##ickind() const { \
+ return kind() == Cache_##ickind; \
+ } \
+ inline ickind##IC& to##ickind(); \
+ inline const ickind##IC& to##ickind() const;
+ IONCACHE_KIND_LIST(CACHEKIND_CASTS)
+# undef CACHEKIND_CASTS
+
+ virtual Kind kind() const = 0;
+
+ virtual void accept(CodeGenerator* codegen, IonCacheVisitor* visitor) = 0;
+
+ public:
+
+ static const char* CacheName(Kind kind);
+
+ protected:
+ bool pure_ : 1;
+ bool idempotent_ : 1;
+ bool disabled_ : 1;
+ size_t stubCount_ : 5;
+
+ CodeLocationLabel fallbackLabel_;
+
+ // Location of this operation, nullptr for idempotent caches.
+ JSScript* script_;
+ jsbytecode* pc_;
+
+ // Location to use when updating profiler pseudostack when leaving this
+ // IC code to enter a callee.
+ jsbytecode* profilerLeavePc_;
+
+ CodeLocationJump initialJump_;
+ CodeLocationJump lastJump_;
+ CodeLocationLabel rejoinLabel_;
+
+ private:
+ static const size_t MAX_STUBS;
+ void incrementStubCount() {
+ // The IC should stop generating stubs before wrapping stubCount.
+ stubCount_++;
+ MOZ_ASSERT(stubCount_);
+ }
+
+ public:
+
+ IonCache()
+ : pure_(false),
+ idempotent_(false),
+ disabled_(false),
+ stubCount_(0),
+ fallbackLabel_(),
+ script_(nullptr),
+ pc_(nullptr),
+ profilerLeavePc_(nullptr),
+ initialJump_(),
+ lastJump_(),
+ rejoinLabel_()
+ {
+ }
+
+ void disable();
+ inline bool isDisabled() const {
+ return disabled_;
+ }
+
+ // Set the initial 'out-of-line' jump state of the cache. The fallbackLabel is
+ // the location of the out-of-line update (slow) path. This location will
+ // be set to the exitJump of the last generated stub.
+ void setFallbackLabel(CodeOffset fallbackLabel) {
+ fallbackLabel_ = fallbackLabel;
+ }
+
+ void setProfilerLeavePC(jsbytecode* pc) {
+ MOZ_ASSERT(pc != nullptr);
+ profilerLeavePc_ = pc;
+ }
+
+ // Get the address at which IC rejoins the mainline jitcode.
+ void* rejoinAddress() const {
+ return rejoinLabel_.raw();
+ }
+
+ void emitInitialJump(MacroAssembler& masm, RepatchLabel& entry);
+ void updateBaseAddress(JitCode* code, MacroAssembler& masm);
+
+ // Reset the cache around garbage collection.
+ virtual void reset(ReprotectCode reprotect);
+
+ bool canAttachStub() const {
+ return stubCount_ < MAX_STUBS;
+ }
+ bool empty() const {
+ return stubCount_ == 0;
+ }
+
+ enum LinkStatus {
+ LINK_ERROR,
+ CACHE_FLUSHED,
+ LINK_GOOD
+ };
+
+ // Use the Linker to link the generated code and check if any
+ // monitoring/allocation caused an invalidation of the running ion script,
+ // this function returns CACHE_FLUSHED. In case of allocation issue this
+ // function returns LINK_ERROR.
+ LinkStatus linkCode(JSContext* cx, MacroAssembler& masm, StubAttacher& attacher, IonScript* ion,
+ JitCode** code);
+
+ // Fixup variables and update jumps in the list of stubs. Increment the
+ // number of attached stubs accordingly.
+ void attachStub(MacroAssembler& masm, StubAttacher& attacher, CodeLocationJump lastJump,
+ Handle<JitCode*> code);
+
+ // Combine both linkStub and attachStub into one function. In addition, it
+ // produces a spew augmented with the attachKind string.
+ MOZ_MUST_USE bool linkAndAttachStub(JSContext* cx, MacroAssembler& masm, StubAttacher& attacher,
+ IonScript* ion, const char* attachKind,
+ JS::TrackedOutcome = JS::TrackedOutcome::ICOptStub_GenericSuccess);
+
+#ifdef DEBUG
+ bool isAllocated() {
+ return fallbackLabel_.isSet();
+ }
+#endif
+
+ bool pure() const {
+ return pure_;
+ }
+ bool idempotent() const {
+ return idempotent_;
+ }
+ void setIdempotent() {
+ MOZ_ASSERT(!idempotent_);
+ MOZ_ASSERT(!script_);
+ MOZ_ASSERT(!pc_);
+ idempotent_ = true;
+ }
+
+ void setScriptedLocation(JSScript* script, jsbytecode* pc) {
+ MOZ_ASSERT(!idempotent_);
+ script_ = script;
+ pc_ = pc;
+ }
+
+ void getScriptedLocation(MutableHandleScript pscript, jsbytecode** ppc) const {
+ pscript.set(script_);
+ *ppc = pc_;
+ }
+
+ jsbytecode* pc() const {
+ MOZ_ASSERT(pc_);
+ return pc_;
+ }
+
+ void trace(JSTracer* trc);
+};
+
+// Define the cache kind and pre-declare data structures used for calling inline
+// caches.
+#define CACHE_HEADER(ickind) \
+ Kind kind() const { \
+ return IonCache::Cache_##ickind; \
+ } \
+ \
+ void accept(CodeGenerator* codegen, IonCacheVisitor* visitor) { \
+ visitor->visit##ickind##IC(codegen); \
+ } \
+ \
+ static const VMFunction UpdateInfo;
+
+// Subclasses of IonCache for the various kinds of caches. These do not define
+// new data members; all caches must be of the same size.
+
+// Helper for idempotent GetPropertyIC location tracking. Declared externally
+// to be forward declarable.
+//
+// Since all the scripts stored in CacheLocations are guaranteed to have been
+// Ion compiled, and are kept alive by function objects in jitcode, and since
+// the CacheLocations only have the lifespan of the jitcode, there is no need
+// to trace or mark any of the scripts. Since JSScripts are always allocated
+// tenured, and never moved, we can keep raw pointers, and there is no need
+// for GCPtrScripts here.
+struct CacheLocation {
+ jsbytecode* pc;
+ JSScript* script;
+
+ CacheLocation(jsbytecode* pcin, JSScript* scriptin)
+ : pc(pcin), script(scriptin)
+ { }
+};
+
+class GetPropertyIC : public IonCache
+{
+ protected:
+ // Registers live after the cache, excluding output registers. The initial
+ // value of these registers must be preserved by the cache.
+ LiveRegisterSet liveRegs_;
+
+ Register object_;
+ ConstantOrRegister id_;
+ TypedOrValueRegister output_;
+
+ // Only valid if idempotent
+ size_t locationsIndex_;
+ size_t numLocations_;
+
+ static const size_t MAX_FAILED_UPDATES = 16;
+ uint16_t failedUpdates_;
+
+ bool monitoredResult_ : 1;
+ bool allowDoubleResult_ : 1;
+ bool hasTypedArrayLengthStub_ : 1;
+ bool hasMappedArgumentsLengthStub_ : 1;
+ bool hasUnmappedArgumentsLengthStub_ : 1;
+ bool hasMappedArgumentsElementStub_ : 1;
+ bool hasUnmappedArgumentsElementStub_ : 1;
+ bool hasGenericProxyStub_ : 1;
+ bool hasDenseStub_ : 1;
+
+ void emitIdGuard(MacroAssembler& masm, jsid id, Label* fail);
+
+ public:
+ GetPropertyIC(LiveRegisterSet liveRegs,
+ Register object, const ConstantOrRegister& id,
+ TypedOrValueRegister output,
+ bool monitoredResult, bool allowDoubleResult)
+ : liveRegs_(liveRegs),
+ object_(object),
+ id_(id),
+ output_(output),
+ locationsIndex_(0),
+ numLocations_(0),
+ failedUpdates_(0),
+ monitoredResult_(monitoredResult),
+ allowDoubleResult_(allowDoubleResult),
+ hasTypedArrayLengthStub_(false),
+ hasMappedArgumentsLengthStub_(false),
+ hasUnmappedArgumentsLengthStub_(false),
+ hasMappedArgumentsElementStub_(false),
+ hasUnmappedArgumentsElementStub_(false),
+ hasGenericProxyStub_(false),
+ hasDenseStub_(false)
+ {
+ }
+
+ CACHE_HEADER(GetProperty)
+
+ void reset(ReprotectCode reprotect);
+
+ Register object() const {
+ return object_;
+ }
+ ConstantOrRegister id() const {
+ return id_;
+ }
+ TypedOrValueRegister output() const {
+ return output_;
+ }
+ bool monitoredResult() const {
+ return monitoredResult_;
+ }
+ bool hasTypedArrayLengthStub(HandleObject obj) const {
+ return hasTypedArrayLengthStub_;
+ }
+ bool hasArgumentsLengthStub(bool mapped) const {
+ return mapped ? hasMappedArgumentsLengthStub_ : hasUnmappedArgumentsLengthStub_;
+ }
+ bool hasArgumentsElementStub(bool mapped) const {
+ return mapped ? hasMappedArgumentsElementStub_ : hasUnmappedArgumentsElementStub_;
+ }
+ bool hasGenericProxyStub() const {
+ return hasGenericProxyStub_;
+ }
+
+ bool hasDenseStub() const {
+ return hasDenseStub_;
+ }
+ void setHasDenseStub() {
+ MOZ_ASSERT(!hasDenseStub());
+ hasDenseStub_ = true;
+ }
+
+ void setHasTypedArrayLengthStub(HandleObject obj) {
+ MOZ_ASSERT(obj->is<TypedArrayObject>());
+ MOZ_ASSERT(!hasTypedArrayLengthStub_);
+ hasTypedArrayLengthStub_ = true;
+ }
+
+ void setLocationInfo(size_t locationsIndex, size_t numLocations) {
+ MOZ_ASSERT(idempotent());
+ MOZ_ASSERT(!numLocations_);
+ MOZ_ASSERT(numLocations);
+ locationsIndex_ = locationsIndex;
+ numLocations_ = numLocations;
+ }
+ void getLocationInfo(uint32_t* index, uint32_t* num) const {
+ MOZ_ASSERT(idempotent());
+ *index = locationsIndex_;
+ *num = numLocations_;
+ }
+
+ enum NativeGetPropCacheability {
+ CanAttachNone,
+ CanAttachReadSlot,
+ CanAttachArrayLength,
+ CanAttachCallGetter
+ };
+
+ // Helpers for CanAttachNativeGetProp
+ bool allowArrayLength(JSContext* cx) const;
+ bool allowGetters() const {
+ return monitoredResult() && !idempotent();
+ }
+
+ void maybeDisable(bool emitted);
+
+ // Attach the proper stub, if possible
+ MOZ_MUST_USE bool tryAttachStub(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleValue idval, bool* emitted);
+
+ MOZ_MUST_USE bool tryAttachProxy(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, void* returnAddr,
+ bool* emitted);
+
+ MOZ_MUST_USE bool tryAttachGenericProxy(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, void* returnAddr,
+ bool* emitted);
+
+ MOZ_MUST_USE bool tryAttachDOMProxyShadowed(JSContext* cx, HandleScript outerScript,
+ IonScript* ion, HandleObject obj, HandleId id,
+ void* returnAddr, bool* emitted);
+
+ MOZ_MUST_USE bool tryAttachDOMProxyUnshadowed(JSContext* cx, HandleScript outerScript,
+ IonScript* ion, HandleObject obj, HandleId id,
+ bool resetNeeded, void* returnAddr,
+ bool* emitted);
+
+ MOZ_MUST_USE bool tryAttachNative(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, void* returnAddr,
+ bool* emitted);
+
+ MOZ_MUST_USE bool tryAttachUnboxed(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, void* returnAddr,
+ bool* emitted);
+
+ MOZ_MUST_USE bool tryAttachUnboxedExpando(JSContext* cx, HandleScript outerScript,
+ IonScript* ion, HandleObject obj, HandleId id,
+ void* returnAddr, bool* emitted);
+
+ MOZ_MUST_USE bool tryAttachUnboxedArrayLength(JSContext* cx, HandleScript outerScript,
+ IonScript* ion, HandleObject obj, HandleId id,
+ void* returnAddr, bool* emitted);
+
+ MOZ_MUST_USE bool tryAttachTypedArrayLength(JSContext* cx, HandleScript outerScript,
+ IonScript* ion, HandleObject obj, HandleId id,
+ bool* emitted);
+
+ MOZ_MUST_USE bool tryAttachArgumentsLength(JSContext* cx, HandleScript outerScript,
+ IonScript* ion, HandleObject obj, HandleId id,
+ bool* emitted);
+
+ MOZ_MUST_USE bool tryAttachArgumentsElement(JSContext* cx, HandleScript outerScript,
+ IonScript* ion, HandleObject obj, HandleValue idval,
+ bool* emitted);
+
+ MOZ_MUST_USE bool tryAttachDenseElement(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleValue idval, bool* emitted);
+
+ static bool canAttachDenseElementHole(JSObject* obj, HandleValue idval,
+ TypedOrValueRegister output);
+ MOZ_MUST_USE bool tryAttachDenseElementHole(JSContext* cx, HandleScript outerScript,
+ IonScript* ion, HandleObject obj,
+ HandleValue idval, bool* emitted);
+
+ static bool canAttachTypedOrUnboxedArrayElement(JSObject* obj, const Value& idval,
+ TypedOrValueRegister output);
+
+ MOZ_MUST_USE bool tryAttachTypedOrUnboxedArrayElement(JSContext* cx, HandleScript outerScript,
+ IonScript* ion, HandleObject obj,
+ HandleValue idval, bool* emitted);
+
+ MOZ_MUST_USE bool tryAttachModuleNamespace(JSContext* cx, HandleScript outerScript,
+ IonScript* ion, HandleObject obj, HandleId id,
+ void* returnAddr, bool* emitted);
+
+ static MOZ_MUST_USE bool update(JSContext* cx, HandleScript outerScript, size_t cacheIndex,
+ HandleObject obj, HandleValue id, MutableHandleValue vp);
+};
+
+class SetPropertyIC : public IonCache
+{
+ protected:
+ // Registers live after the cache, excluding output registers. The initial
+ // value of these registers must be preserved by the cache.
+ LiveRegisterSet liveRegs_;
+
+ Register object_;
+ Register temp_;
+ Register tempToUnboxIndex_;
+ FloatRegister tempDouble_;
+ FloatRegister tempFloat32_;
+ ConstantOrRegister id_;
+ ConstantOrRegister value_;
+ bool strict_ : 1;
+ bool needsTypeBarrier_ : 1;
+ bool guardHoles_ : 1;
+
+ bool hasGenericProxyStub_ : 1;
+ bool hasDenseStub_ : 1;
+
+ void emitIdGuard(MacroAssembler& masm, jsid id, Label* fail);
+
+ public:
+ SetPropertyIC(LiveRegisterSet liveRegs, Register object, Register temp, Register tempToUnboxIndex,
+ FloatRegister tempDouble, FloatRegister tempFloat32,
+ const ConstantOrRegister& id, const ConstantOrRegister& value,
+ bool strict, bool needsTypeBarrier, bool guardHoles)
+ : liveRegs_(liveRegs),
+ object_(object),
+ temp_(temp),
+ tempToUnboxIndex_(tempToUnboxIndex),
+ tempDouble_(tempDouble),
+ tempFloat32_(tempFloat32),
+ id_(id),
+ value_(value),
+ strict_(strict),
+ needsTypeBarrier_(needsTypeBarrier),
+ guardHoles_(guardHoles),
+ hasGenericProxyStub_(false),
+ hasDenseStub_(false)
+ {
+ }
+
+ CACHE_HEADER(SetProperty)
+
+ void reset(ReprotectCode reprotect);
+
+ Register object() const {
+ return object_;
+ }
+ Register temp() const {
+ return temp_;
+ }
+ Register tempToUnboxIndex() const {
+ return tempToUnboxIndex_;
+ }
+ FloatRegister tempDouble() const {
+ return tempDouble_;
+ }
+ FloatRegister tempFloat32() const {
+ return tempFloat32_;
+ }
+ ConstantOrRegister id() const {
+ return id_;
+ }
+ ConstantOrRegister value() const {
+ return value_;
+ }
+ bool strict() const {
+ return strict_;
+ }
+ bool needsTypeBarrier() const {
+ return needsTypeBarrier_;
+ }
+ bool guardHoles() const {
+ return guardHoles_;
+ }
+ bool hasGenericProxyStub() const {
+ return hasGenericProxyStub_;
+ }
+
+ bool hasDenseStub() const {
+ return hasDenseStub_;
+ }
+ void setHasDenseStub() {
+ MOZ_ASSERT(!hasDenseStub());
+ hasDenseStub_ = true;
+ }
+
+ enum NativeSetPropCacheability {
+ CanAttachNone,
+ CanAttachSetSlot,
+ MaybeCanAttachAddSlot,
+ CanAttachCallSetter
+ };
+
+ MOZ_MUST_USE bool attachSetSlot(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleShape shape, bool checkTypeset);
+
+ MOZ_MUST_USE bool attachCallSetter(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleObject holder, HandleShape shape,
+ void* returnAddr);
+
+ MOZ_MUST_USE bool attachAddSlot(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, HandleShape oldShape,
+ HandleObjectGroup oldGroup, bool checkTypeset);
+
+ MOZ_MUST_USE bool attachGenericProxy(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleId id, void* returnAddr);
+
+ MOZ_MUST_USE bool attachDOMProxyShadowed(JSContext* cx, HandleScript outerScript,
+ IonScript* ion, HandleObject obj, HandleId id,
+ void* returnAddr);
+
+ MOZ_MUST_USE bool attachDOMProxyUnshadowed(JSContext* cx, HandleScript outerScript,
+ IonScript* ion, HandleObject obj, HandleId id,
+ void* returnAddr);
+
+ static MOZ_MUST_USE bool update(JSContext* cx, HandleScript outerScript, size_t cacheIndex,
+ HandleObject obj, HandleValue idval, HandleValue value);
+
+ MOZ_MUST_USE bool tryAttachNative(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, bool* emitted,
+ bool* tryNativeAddSlot);
+
+ MOZ_MUST_USE bool tryAttachUnboxed(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, bool* emitted);
+
+ MOZ_MUST_USE bool tryAttachUnboxedExpando(JSContext* cx, HandleScript outerScript,
+ IonScript* ion, HandleObject obj, HandleId id,
+ bool* emitted);
+
+ MOZ_MUST_USE bool tryAttachProxy(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, bool* emitted);
+
+ MOZ_MUST_USE bool tryAttachStub(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleValue idval, HandleValue value,
+ MutableHandleId id, bool* emitted, bool* tryNativeAddSlot);
+
+ MOZ_MUST_USE bool tryAttachAddSlot(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, HandleId id, HandleObjectGroup oldGroup,
+ HandleShape oldShape, bool tryNativeAddSlot, bool* emitted);
+
+ MOZ_MUST_USE bool tryAttachDenseElement(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject obj, const Value& idval, bool* emitted);
+
+ MOZ_MUST_USE bool tryAttachTypedArrayElement(JSContext* cx, HandleScript outerScript,
+ IonScript* ion, HandleObject obj,
+ HandleValue idval, HandleValue val, bool* emitted);
+};
+
+class BindNameIC : public IonCache
+{
+ protected:
+ Register environmentChain_;
+ PropertyName* name_;
+ Register output_;
+
+ public:
+ BindNameIC(Register envChain, PropertyName* name, Register output)
+ : environmentChain_(envChain),
+ name_(name),
+ output_(output)
+ {
+ }
+
+ CACHE_HEADER(BindName)
+
+ Register environmentChainReg() const {
+ return environmentChain_;
+ }
+ HandlePropertyName name() const {
+ return HandlePropertyName::fromMarkedLocation(&name_);
+ }
+ Register outputReg() const {
+ return output_;
+ }
+
+ MOZ_MUST_USE bool attachGlobal(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject envChain);
+
+ MOZ_MUST_USE bool attachNonGlobal(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject envChain, HandleObject holder);
+
+ static JSObject*
+ update(JSContext* cx, HandleScript outerScript, size_t cacheIndex, HandleObject envChain);
+};
+
+class NameIC : public IonCache
+{
+ protected:
+ // Registers live after the cache, excluding output registers. The initial
+ // value of these registers must be preserved by the cache.
+ LiveRegisterSet liveRegs_;
+
+ bool typeOf_;
+ Register environmentChain_;
+ PropertyName* name_;
+ TypedOrValueRegister output_;
+
+ public:
+ NameIC(LiveRegisterSet liveRegs, bool typeOf,
+ Register envChain, PropertyName* name,
+ TypedOrValueRegister output)
+ : liveRegs_(liveRegs),
+ typeOf_(typeOf),
+ environmentChain_(envChain),
+ name_(name),
+ output_(output)
+ {
+ }
+
+ CACHE_HEADER(Name)
+
+ Register environmentChainReg() const {
+ return environmentChain_;
+ }
+ HandlePropertyName name() const {
+ return HandlePropertyName::fromMarkedLocation(&name_);
+ }
+ TypedOrValueRegister outputReg() const {
+ return output_;
+ }
+ bool isTypeOf() const {
+ return typeOf_;
+ }
+
+ MOZ_MUST_USE bool attachReadSlot(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject envChain, HandleObject holderBase,
+ HandleNativeObject holder, HandleShape shape);
+
+ MOZ_MUST_USE bool attachCallGetter(JSContext* cx, HandleScript outerScript, IonScript* ion,
+ HandleObject envChain, HandleObject obj,
+ HandleObject holder, HandleShape shape,
+ void* returnAddr);
+
+ MOZ_MUST_USE bool attachTypeOfNoProperty(JSContext* cx, HandleScript outerScript,
+ IonScript* ion, HandleObject envChain);
+
+ static MOZ_MUST_USE bool
+ update(JSContext* cx, HandleScript outerScript, size_t cacheIndex, HandleObject envChain,
+ MutableHandleValue vp);
+};
+
+#undef CACHE_HEADER
+
+// Implement cache casts now that the compiler can see the inheritance.
+#define CACHE_CASTS(ickind) \
+ ickind##IC& IonCache::to##ickind() \
+ { \
+ MOZ_ASSERT(is##ickind()); \
+ return *static_cast<ickind##IC*>(this); \
+ } \
+ const ickind##IC& IonCache::to##ickind() const \
+ { \
+ MOZ_ASSERT(is##ickind()); \
+ return *static_cast<const ickind##IC*>(this); \
+ }
+IONCACHE_KIND_LIST(CACHE_CASTS)
+#undef OPCODE_CASTS
+
+bool IsCacheableProtoChainForIonOrCacheIR(JSObject* obj, JSObject* holder);
+bool IsCacheableGetPropReadSlotForIonOrCacheIR(JSObject* obj, JSObject* holder, Shape* shape);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_IonCaches_h */
diff --git a/js/src/jit/IonCode.h b/js/src/jit/IonCode.h
new file mode 100644
index 000000000..c581aa62e
--- /dev/null
+++ b/js/src/jit/IonCode.h
@@ -0,0 +1,825 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_IonCode_h
+#define jit_IonCode_h
+
+#include "mozilla/Atomics.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/PodOperations.h"
+
+#include "jstypes.h"
+
+#include "gc/Heap.h"
+#include "jit/ExecutableAllocator.h"
+#include "jit/ICStubSpace.h"
+#include "jit/IonOptimizationLevels.h"
+#include "jit/IonTypes.h"
+#include "js/UbiNode.h"
+#include "vm/TraceLogging.h"
+#include "vm/TypeInference.h"
+
+namespace js {
+namespace jit {
+
+class MacroAssembler;
+class PatchableBackedge;
+class IonBuilder;
+class IonICEntry;
+
+typedef Vector<JSObject*, 4, JitAllocPolicy> ObjectVector;
+typedef Vector<TraceLoggerEvent, 0, SystemAllocPolicy> TraceLoggerEventVector;
+
+class JitCode : public gc::TenuredCell
+{
+ protected:
+ uint8_t* code_;
+ ExecutablePool* pool_;
+ uint32_t bufferSize_; // Total buffer size. Does not include headerSize_.
+ uint32_t insnSize_; // Instruction stream size.
+ uint32_t dataSize_; // Size of the read-only data area.
+ uint32_t jumpRelocTableBytes_; // Size of the jump relocation table.
+ uint32_t dataRelocTableBytes_; // Size of the data relocation table.
+ uint32_t preBarrierTableBytes_; // Size of the prebarrier table.
+ uint8_t headerSize_ : 5; // Number of bytes allocated before codeStart.
+ uint8_t kind_ : 3; // jit::CodeKind, for the memory reporters.
+ bool invalidated_ : 1; // Whether the code object has been invalidated.
+ // This is necessary to prevent GC tracing.
+ bool hasBytecodeMap_ : 1; // Whether the code object has been registered with
+ // native=>bytecode mapping tables.
+
+#if JS_BITS_PER_WORD == 32
+ // Ensure JitCode is gc::Cell aligned.
+ uint32_t padding_;
+#endif
+
+ JitCode()
+ : code_(nullptr),
+ pool_(nullptr)
+ { }
+ JitCode(uint8_t* code, uint32_t bufferSize, uint32_t headerSize, ExecutablePool* pool,
+ CodeKind kind)
+ : code_(code),
+ pool_(pool),
+ bufferSize_(bufferSize),
+ insnSize_(0),
+ dataSize_(0),
+ jumpRelocTableBytes_(0),
+ dataRelocTableBytes_(0),
+ preBarrierTableBytes_(0),
+ headerSize_(headerSize),
+ kind_(kind),
+ invalidated_(false),
+ hasBytecodeMap_(false)
+ {
+ MOZ_ASSERT(CodeKind(kind_) == kind);
+ MOZ_ASSERT(headerSize_ == headerSize);
+ }
+
+ uint32_t dataOffset() const {
+ return insnSize_;
+ }
+ uint32_t jumpRelocTableOffset() const {
+ return dataOffset() + dataSize_;
+ }
+ uint32_t dataRelocTableOffset() const {
+ return jumpRelocTableOffset() + jumpRelocTableBytes_;
+ }
+ uint32_t preBarrierTableOffset() const {
+ return dataRelocTableOffset() + dataRelocTableBytes_;
+ }
+
+ public:
+ uint8_t* raw() const {
+ return code_;
+ }
+ uint8_t* rawEnd() const {
+ return code_ + insnSize_;
+ }
+ bool containsNativePC(const void* addr) const {
+ const uint8_t* addr_u8 = (const uint8_t*) addr;
+ return raw() <= addr_u8 && addr_u8 < rawEnd();
+ }
+ size_t instructionsSize() const {
+ return insnSize_;
+ }
+ size_t bufferSize() const {
+ return bufferSize_;
+ }
+ size_t headerSize() const {
+ return headerSize_;
+ }
+
+ void traceChildren(JSTracer* trc);
+ void finalize(FreeOp* fop);
+ void setInvalidated() {
+ invalidated_ = true;
+ }
+
+ void setHasBytecodeMap() {
+ hasBytecodeMap_ = true;
+ }
+
+ void togglePreBarriers(bool enabled, ReprotectCode reprotect);
+
+ // If this JitCode object has been, effectively, corrupted due to
+ // invalidation patching, then we have to remember this so we don't try and
+ // trace relocation entries that may now be corrupt.
+ bool invalidated() const {
+ return !!invalidated_;
+ }
+
+ template <typename T> T as() const {
+ return JS_DATA_TO_FUNC_PTR(T, raw());
+ }
+
+ void copyFrom(MacroAssembler& masm);
+
+ static JitCode* FromExecutable(uint8_t* buffer) {
+ JitCode* code = *(JitCode**)(buffer - sizeof(JitCode*));
+ MOZ_ASSERT(code->raw() == buffer);
+ return code;
+ }
+
+ static size_t offsetOfCode() {
+ return offsetof(JitCode, code_);
+ }
+
+ uint8_t* jumpRelocTable() {
+ return code_ + jumpRelocTableOffset();
+ }
+
+ // Allocates a new JitCode object which will be managed by the GC. If no
+ // object can be allocated, nullptr is returned. On failure, |pool| is
+ // automatically released, so the code may be freed.
+ template <AllowGC allowGC>
+ static JitCode* New(JSContext* cx, uint8_t* code, uint32_t bufferSize, uint32_t headerSize,
+ ExecutablePool* pool, CodeKind kind);
+
+ public:
+ static const JS::TraceKind TraceKind = JS::TraceKind::JitCode;
+};
+
+class SnapshotWriter;
+class RecoverWriter;
+class SafepointWriter;
+class SafepointIndex;
+class OsiIndex;
+class IonCache;
+struct PatchableBackedgeInfo;
+struct CacheLocation;
+
+// An IonScript attaches Ion-generated information to a JSScript.
+struct IonScript
+{
+ private:
+ // Code pointer containing the actual method.
+ PreBarrieredJitCode method_;
+
+ // Deoptimization table used by this method.
+ PreBarrieredJitCode deoptTable_;
+
+ // Entrypoint for OSR, or nullptr.
+ jsbytecode* osrPc_;
+
+ // Offset to OSR entrypoint from method_->raw(), or 0.
+ uint32_t osrEntryOffset_;
+
+ // Offset to entrypoint skipping type arg check from method_->raw().
+ uint32_t skipArgCheckEntryOffset_;
+
+ // Offset of the invalidation epilogue (which pushes this IonScript
+ // and calls the invalidation thunk).
+ uint32_t invalidateEpilogueOffset_;
+
+ // The offset immediately after the IonScript immediate.
+ // NOTE: technically a constant delta from
+ // |invalidateEpilogueOffset_|, so we could hard-code this
+ // per-platform if we want.
+ uint32_t invalidateEpilogueDataOffset_;
+
+ // Number of times this script bailed out without invalidation.
+ uint32_t numBailouts_;
+
+ // Flag set if IonScript was compiled with profiling enabled.
+ bool hasProfilingInstrumentation_;
+
+ // Flag for if this script is getting recompiled.
+ uint32_t recompiling_;
+
+ // Any kind of data needed by the runtime, these can be either cache
+ // information or profiling info.
+ uint32_t runtimeData_;
+ uint32_t runtimeSize_;
+
+ // State for polymorphic caches in the compiled code. All caches are stored
+ // in the runtimeData buffer and indexed by the cacheIndex which give a
+ // relative offset in the runtimeData array.
+ uint32_t cacheIndex_;
+ uint32_t cacheEntries_;
+
+ // Map code displacement to safepoint / OSI-patch-delta.
+ uint32_t safepointIndexOffset_;
+ uint32_t safepointIndexEntries_;
+
+ // Offset to and length of the safepoint table in bytes.
+ uint32_t safepointsStart_;
+ uint32_t safepointsSize_;
+
+ // Number of bytes this function reserves on the stack.
+ uint32_t frameSlots_;
+
+ // Number of bytes used passed in as formal arguments or |this|.
+ uint32_t argumentSlots_;
+
+ // Frame size is the value that can be added to the StackPointer along
+ // with the frame prefix to get a valid JitFrameLayout.
+ uint32_t frameSize_;
+
+ // Table mapping bailout IDs to snapshot offsets.
+ uint32_t bailoutTable_;
+ uint32_t bailoutEntries_;
+
+ // Map OSI-point displacement to snapshot.
+ uint32_t osiIndexOffset_;
+ uint32_t osiIndexEntries_;
+
+ // Offset from the start of the code buffer to its snapshot buffer.
+ uint32_t snapshots_;
+ uint32_t snapshotsListSize_;
+ uint32_t snapshotsRVATableSize_;
+
+ // List of instructions needed to recover stack frames.
+ uint32_t recovers_;
+ uint32_t recoversSize_;
+
+ // Constant table for constants stored in snapshots.
+ uint32_t constantTable_;
+ uint32_t constantEntries_;
+
+ // List of patchable backedges which are threaded into the runtime's list.
+ uint32_t backedgeList_;
+ uint32_t backedgeEntries_;
+
+ // List of entries to the shared stub.
+ uint32_t sharedStubList_;
+ uint32_t sharedStubEntries_;
+
+ // Number of references from invalidation records.
+ uint32_t invalidationCount_;
+
+ // Identifier of the compilation which produced this code.
+ RecompileInfo recompileInfo_;
+
+ // The optimization level this script was compiled in.
+ OptimizationLevel optimizationLevel_;
+
+ // Number of times we tried to enter this script via OSR but failed due to
+ // a LOOPENTRY pc other than osrPc_.
+ uint32_t osrPcMismatchCounter_;
+
+ // Allocated space for fallback stubs.
+ FallbackICStubSpace fallbackStubSpace_;
+
+ // TraceLogger events that are baked into the IonScript.
+ TraceLoggerEventVector traceLoggerEvents_;
+
+ private:
+ inline uint8_t* bottomBuffer() {
+ return reinterpret_cast<uint8_t*>(this);
+ }
+ inline const uint8_t* bottomBuffer() const {
+ return reinterpret_cast<const uint8_t*>(this);
+ }
+
+ public:
+
+ SnapshotOffset* bailoutTable() {
+ return (SnapshotOffset*) &bottomBuffer()[bailoutTable_];
+ }
+ PreBarrieredValue* constants() {
+ return (PreBarrieredValue*) &bottomBuffer()[constantTable_];
+ }
+ const SafepointIndex* safepointIndices() const {
+ return const_cast<IonScript*>(this)->safepointIndices();
+ }
+ SafepointIndex* safepointIndices() {
+ return (SafepointIndex*) &bottomBuffer()[safepointIndexOffset_];
+ }
+ const OsiIndex* osiIndices() const {
+ return const_cast<IonScript*>(this)->osiIndices();
+ }
+ OsiIndex* osiIndices() {
+ return (OsiIndex*) &bottomBuffer()[osiIndexOffset_];
+ }
+ uint32_t* cacheIndex() {
+ return (uint32_t*) &bottomBuffer()[cacheIndex_];
+ }
+ uint8_t* runtimeData() {
+ return &bottomBuffer()[runtimeData_];
+ }
+ PatchableBackedge* backedgeList() {
+ return (PatchableBackedge*) &bottomBuffer()[backedgeList_];
+ }
+
+ private:
+ void trace(JSTracer* trc);
+
+ public:
+ // Do not call directly, use IonScript::New. This is public for cx->new_.
+ IonScript();
+
+ ~IonScript() {
+ // The contents of the fallback stub space are removed and freed
+ // separately after the next minor GC. See IonScript::Destroy.
+ MOZ_ASSERT(fallbackStubSpace_.isEmpty());
+ }
+
+ static IonScript* New(JSContext* cx, RecompileInfo recompileInfo,
+ uint32_t frameSlots, uint32_t argumentSlots, uint32_t frameSize,
+ size_t snapshotsListSize, size_t snapshotsRVATableSize,
+ size_t recoversSize, size_t bailoutEntries,
+ size_t constants, size_t safepointIndexEntries,
+ size_t osiIndexEntries, size_t cacheEntries,
+ size_t runtimeSize, size_t safepointsSize,
+ size_t backedgeEntries, size_t sharedStubEntries,
+ OptimizationLevel optimizationLevel);
+ static void Trace(JSTracer* trc, IonScript* script);
+ static void Destroy(FreeOp* fop, IonScript* script);
+
+ static inline size_t offsetOfMethod() {
+ return offsetof(IonScript, method_);
+ }
+ static inline size_t offsetOfOsrEntryOffset() {
+ return offsetof(IonScript, osrEntryOffset_);
+ }
+ static inline size_t offsetOfSkipArgCheckEntryOffset() {
+ return offsetof(IonScript, skipArgCheckEntryOffset_);
+ }
+ static inline size_t offsetOfInvalidationCount() {
+ return offsetof(IonScript, invalidationCount_);
+ }
+ static inline size_t offsetOfRecompiling() {
+ return offsetof(IonScript, recompiling_);
+ }
+
+ public:
+ JitCode* method() const {
+ return method_;
+ }
+ void setMethod(JitCode* code) {
+ MOZ_ASSERT(!invalidated());
+ method_ = code;
+ }
+ void setDeoptTable(JitCode* code) {
+ deoptTable_ = code;
+ }
+ void setOsrPc(jsbytecode* osrPc) {
+ osrPc_ = osrPc;
+ }
+ jsbytecode* osrPc() const {
+ return osrPc_;
+ }
+ void setOsrEntryOffset(uint32_t offset) {
+ MOZ_ASSERT(!osrEntryOffset_);
+ osrEntryOffset_ = offset;
+ }
+ uint32_t osrEntryOffset() const {
+ return osrEntryOffset_;
+ }
+ void setSkipArgCheckEntryOffset(uint32_t offset) {
+ MOZ_ASSERT(!skipArgCheckEntryOffset_);
+ skipArgCheckEntryOffset_ = offset;
+ }
+ uint32_t getSkipArgCheckEntryOffset() const {
+ return skipArgCheckEntryOffset_;
+ }
+ bool containsCodeAddress(uint8_t* addr) const {
+ return method()->raw() <= addr && addr <= method()->raw() + method()->instructionsSize();
+ }
+ bool containsReturnAddress(uint8_t* addr) const {
+ // This accounts for an off by one error caused by the return address of a
+ // bailout sitting outside the range of the containing function.
+ return method()->raw() <= addr && addr <= method()->raw() + method()->instructionsSize();
+ }
+ void setInvalidationEpilogueOffset(uint32_t offset) {
+ MOZ_ASSERT(!invalidateEpilogueOffset_);
+ invalidateEpilogueOffset_ = offset;
+ }
+ uint32_t invalidateEpilogueOffset() const {
+ MOZ_ASSERT(invalidateEpilogueOffset_);
+ return invalidateEpilogueOffset_;
+ }
+ void setInvalidationEpilogueDataOffset(uint32_t offset) {
+ MOZ_ASSERT(!invalidateEpilogueDataOffset_);
+ invalidateEpilogueDataOffset_ = offset;
+ }
+ uint32_t invalidateEpilogueDataOffset() const {
+ MOZ_ASSERT(invalidateEpilogueDataOffset_);
+ return invalidateEpilogueDataOffset_;
+ }
+ void incNumBailouts() {
+ numBailouts_++;
+ }
+ bool bailoutExpected() const {
+ return numBailouts_ >= JitOptions.frequentBailoutThreshold;
+ }
+ void setHasProfilingInstrumentation() {
+ hasProfilingInstrumentation_ = true;
+ }
+ void clearHasProfilingInstrumentation() {
+ hasProfilingInstrumentation_ = false;
+ }
+ bool hasProfilingInstrumentation() const {
+ return hasProfilingInstrumentation_;
+ }
+ MOZ_MUST_USE bool addTraceLoggerEvent(TraceLoggerEvent& event) {
+ MOZ_ASSERT(event.hasPayload());
+ return traceLoggerEvents_.append(Move(event));
+ }
+ const uint8_t* snapshots() const {
+ return reinterpret_cast<const uint8_t*>(this) + snapshots_;
+ }
+ size_t snapshotsListSize() const {
+ return snapshotsListSize_;
+ }
+ size_t snapshotsRVATableSize() const {
+ return snapshotsRVATableSize_;
+ }
+ const uint8_t* recovers() const {
+ return reinterpret_cast<const uint8_t*>(this) + recovers_;
+ }
+ size_t recoversSize() const {
+ return recoversSize_;
+ }
+ const uint8_t* safepoints() const {
+ return reinterpret_cast<const uint8_t*>(this) + safepointsStart_;
+ }
+ size_t safepointsSize() const {
+ return safepointsSize_;
+ }
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this);
+ }
+ PreBarrieredValue& getConstant(size_t index) {
+ MOZ_ASSERT(index < numConstants());
+ return constants()[index];
+ }
+ size_t numConstants() const {
+ return constantEntries_;
+ }
+ uint32_t frameSlots() const {
+ return frameSlots_;
+ }
+ uint32_t argumentSlots() const {
+ return argumentSlots_;
+ }
+ uint32_t frameSize() const {
+ return frameSize_;
+ }
+ SnapshotOffset bailoutToSnapshot(uint32_t bailoutId) {
+ MOZ_ASSERT(bailoutId < bailoutEntries_);
+ return bailoutTable()[bailoutId];
+ }
+ const SafepointIndex* getSafepointIndex(uint32_t disp) const;
+ const SafepointIndex* getSafepointIndex(uint8_t* retAddr) const {
+ MOZ_ASSERT(containsCodeAddress(retAddr));
+ return getSafepointIndex(retAddr - method()->raw());
+ }
+ const OsiIndex* getOsiIndex(uint32_t disp) const;
+ const OsiIndex* getOsiIndex(uint8_t* retAddr) const;
+ inline IonCache& getCacheFromIndex(uint32_t index) {
+ MOZ_ASSERT(index < cacheEntries_);
+ uint32_t offset = cacheIndex()[index];
+ return getCache(offset);
+ }
+ inline IonCache& getCache(uint32_t offset) {
+ MOZ_ASSERT(offset < runtimeSize_);
+ return *(IonCache*) &runtimeData()[offset];
+ }
+ size_t numCaches() const {
+ return cacheEntries_;
+ }
+ IonICEntry* sharedStubList() {
+ return (IonICEntry*) &bottomBuffer()[sharedStubList_];
+ }
+ size_t numSharedStubs() const {
+ return sharedStubEntries_;
+ }
+ size_t runtimeSize() const {
+ return runtimeSize_;
+ }
+ CacheLocation* getCacheLocs(uint32_t locIndex) {
+ MOZ_ASSERT(locIndex < runtimeSize_);
+ return (CacheLocation*) &runtimeData()[locIndex];
+ }
+ void toggleBarriers(bool enabled, ReprotectCode reprotect = Reprotect);
+ void purgeCaches();
+ void unlinkFromRuntime(FreeOp* fop);
+ void copySnapshots(const SnapshotWriter* writer);
+ void copyRecovers(const RecoverWriter* writer);
+ void copyBailoutTable(const SnapshotOffset* table);
+ void copyConstants(const Value* vp);
+ void copySafepointIndices(const SafepointIndex* firstSafepointIndex, MacroAssembler& masm);
+ void copyOsiIndices(const OsiIndex* firstOsiIndex, MacroAssembler& masm);
+ void copyRuntimeData(const uint8_t* data);
+ void copyCacheEntries(const uint32_t* caches, MacroAssembler& masm);
+ void copySafepoints(const SafepointWriter* writer);
+ void copyPatchableBackedges(JSContext* cx, JitCode* code,
+ PatchableBackedgeInfo* backedges,
+ MacroAssembler& masm);
+
+ bool invalidated() const {
+ return invalidationCount_ != 0;
+ }
+
+ // Invalidate the current compilation.
+ void invalidate(JSContext* cx, bool resetUses, const char* reason);
+
+ size_t invalidationCount() const {
+ return invalidationCount_;
+ }
+ void incrementInvalidationCount() {
+ invalidationCount_++;
+ }
+ void decrementInvalidationCount(FreeOp* fop) {
+ MOZ_ASSERT(invalidationCount_);
+ invalidationCount_--;
+ if (!invalidationCount_)
+ Destroy(fop, this);
+ }
+ const RecompileInfo& recompileInfo() const {
+ return recompileInfo_;
+ }
+ RecompileInfo& recompileInfoRef() {
+ return recompileInfo_;
+ }
+ OptimizationLevel optimizationLevel() const {
+ return optimizationLevel_;
+ }
+ uint32_t incrOsrPcMismatchCounter() {
+ return ++osrPcMismatchCounter_;
+ }
+ void resetOsrPcMismatchCounter() {
+ osrPcMismatchCounter_ = 0;
+ }
+
+ void setRecompiling() {
+ recompiling_ = true;
+ }
+
+ bool isRecompiling() const {
+ return recompiling_;
+ }
+
+ void clearRecompiling() {
+ recompiling_ = false;
+ }
+
+ FallbackICStubSpace* fallbackStubSpace() {
+ return &fallbackStubSpace_;
+ }
+ void adoptFallbackStubs(FallbackICStubSpace* stubSpace);
+ void purgeOptimizedStubs(Zone* zone);
+
+ enum ShouldIncreaseAge {
+ IncreaseAge = true,
+ KeepAge = false
+ };
+
+ static void writeBarrierPre(Zone* zone, IonScript* ionScript);
+};
+
+// Execution information for a basic block which may persist after the
+// accompanying IonScript is destroyed, for use during profiling.
+struct IonBlockCounts
+{
+ private:
+ uint32_t id_;
+
+ // Approximate bytecode in the outer (not inlined) script this block
+ // was generated from.
+ uint32_t offset_;
+
+ // File and line of the inner script this block was generated from.
+ char* description_;
+
+ // ids for successors of this block.
+ uint32_t numSuccessors_;
+ uint32_t* successors_;
+
+ // Hit count for this block.
+ uint64_t hitCount_;
+
+ // Text information about the code generated for this block.
+ char* code_;
+
+ public:
+
+ MOZ_MUST_USE bool init(uint32_t id, uint32_t offset, char* description,
+ uint32_t numSuccessors) {
+ id_ = id;
+ offset_ = offset;
+ description_ = description;
+ numSuccessors_ = numSuccessors;
+ if (numSuccessors) {
+ successors_ = js_pod_calloc<uint32_t>(numSuccessors);
+ if (!successors_)
+ return false;
+ }
+ return true;
+ }
+
+ void destroy() {
+ js_free(description_);
+ js_free(successors_);
+ js_free(code_);
+ }
+
+ uint32_t id() const {
+ return id_;
+ }
+
+ uint32_t offset() const {
+ return offset_;
+ }
+
+ const char* description() const {
+ return description_;
+ }
+
+ size_t numSuccessors() const {
+ return numSuccessors_;
+ }
+
+ void setSuccessor(size_t i, uint32_t id) {
+ MOZ_ASSERT(i < numSuccessors_);
+ successors_[i] = id;
+ }
+
+ uint32_t successor(size_t i) const {
+ MOZ_ASSERT(i < numSuccessors_);
+ return successors_[i];
+ }
+
+ uint64_t* addressOfHitCount() {
+ return &hitCount_;
+ }
+
+ uint64_t hitCount() const {
+ return hitCount_;
+ }
+
+ void setCode(const char* code) {
+ char* ncode = js_pod_malloc<char>(strlen(code) + 1);
+ if (ncode) {
+ strcpy(ncode, code);
+ code_ = ncode;
+ }
+ }
+
+ const char* code() const {
+ return code_;
+ }
+};
+
+// Execution information for a compiled script which may persist after the
+// IonScript is destroyed, for use during profiling.
+struct IonScriptCounts
+{
+ private:
+ // Any previous invalidated compilation(s) for the script.
+ IonScriptCounts* previous_;
+
+ // Information about basic blocks in this script.
+ size_t numBlocks_;
+ IonBlockCounts* blocks_;
+
+ public:
+
+ IonScriptCounts() {
+ mozilla::PodZero(this);
+ }
+
+ ~IonScriptCounts() {
+ for (size_t i = 0; i < numBlocks_; i++)
+ blocks_[i].destroy();
+ js_free(blocks_);
+ // The list can be long in some corner cases (bug 1140084), so
+ // unroll the recursion.
+ IonScriptCounts* victims = previous_;
+ while (victims) {
+ IonScriptCounts* victim = victims;
+ victims = victim->previous_;
+ victim->previous_ = nullptr;
+ js_delete(victim);
+ }
+ }
+
+ MOZ_MUST_USE bool init(size_t numBlocks) {
+ blocks_ = js_pod_calloc<IonBlockCounts>(numBlocks);
+ if (!blocks_)
+ return false;
+
+ numBlocks_ = numBlocks;
+ return true;
+ }
+
+ size_t numBlocks() const {
+ return numBlocks_;
+ }
+
+ IonBlockCounts& block(size_t i) {
+ MOZ_ASSERT(i < numBlocks_);
+ return blocks_[i];
+ }
+
+ void setPrevious(IonScriptCounts* previous) {
+ previous_ = previous;
+ }
+
+ IonScriptCounts* previous() const {
+ return previous_;
+ }
+};
+
+struct VMFunction;
+
+struct AutoFlushICache
+{
+ private:
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ uintptr_t start_;
+ uintptr_t stop_;
+ const char* name_;
+ bool inhibit_;
+ AutoFlushICache* prev_;
+#endif
+
+ public:
+ static void setRange(uintptr_t p, size_t len);
+ static void flush(uintptr_t p, size_t len);
+ static void setInhibit();
+ ~AutoFlushICache();
+ explicit AutoFlushICache(const char* nonce, bool inhibit=false);
+};
+
+} // namespace jit
+
+namespace gc {
+
+inline bool
+IsMarked(JSRuntime* rt, const jit::VMFunction*)
+{
+ // VMFunction are only static objects which are used by WeakMaps as keys.
+ // It is considered as a root object which is always marked.
+ return true;
+}
+
+} // namespace gc
+
+} // namespace js
+
+// JS::ubi::Nodes can point to js::jit::JitCode instances; they're js::gc::Cell
+// instances with no associated compartment.
+namespace JS {
+namespace ubi {
+template<>
+class Concrete<js::jit::JitCode> : TracerConcrete<js::jit::JitCode> {
+ protected:
+ explicit Concrete(js::jit::JitCode *ptr) : TracerConcrete<js::jit::JitCode>(ptr) { }
+
+ public:
+ static void construct(void *storage, js::jit::JitCode *ptr) { new (storage) Concrete(ptr); }
+
+ CoarseType coarseType() const final { return CoarseType::Script; }
+
+ Size size(mozilla::MallocSizeOf mallocSizeOf) const override {
+ Size size = js::gc::Arena::thingSize(get().asTenured().getAllocKind());
+ size += get().bufferSize();
+ size += get().headerSize();
+ return size;
+ }
+
+ const char16_t* typeName() const override { return concreteTypeName; }
+ static const char16_t concreteTypeName[];
+};
+
+} // namespace ubi
+
+template <>
+struct DeletePolicy<js::jit::IonScript>
+{
+ explicit DeletePolicy(JSRuntime* rt) : rt_(rt) {}
+ void operator()(const js::jit::IonScript* script);
+
+ private:
+ JSRuntime* rt_;
+};
+
+} // namespace JS
+
+#endif /* jit_IonCode_h */
diff --git a/js/src/jit/IonInstrumentation.h b/js/src/jit/IonInstrumentation.h
new file mode 100644
index 000000000..3163b2263
--- /dev/null
+++ b/js/src/jit/IonInstrumentation.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_IonInstrumentatjit_h
+#define jit_IonInstrumentatjit_h
+
+namespace js {
+
+class SPSProfiler;
+
+namespace jit {
+
+class MacroAssembler;
+
+typedef SPSInstrumentation<MacroAssembler, Register> BaseInstrumentation;
+
+class IonInstrumentation : public BaseInstrumentation
+{
+ public:
+ IonInstrumentation(SPSProfiler* profiler, jsbytecode** pc)
+ : BaseInstrumentation(profiler)
+ {
+ MOZ_ASSERT(pc != nullptr);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_IonInstrumentatjit_h */
diff --git a/js/src/jit/IonOptimizationLevels.cpp b/js/src/jit/IonOptimizationLevels.cpp
new file mode 100644
index 000000000..ece02fdc0
--- /dev/null
+++ b/js/src/jit/IonOptimizationLevels.cpp
@@ -0,0 +1,178 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/IonOptimizationLevels.h"
+
+#include "jsscript.h"
+
+#include "jit/Ion.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+OptimizationLevelInfo IonOptimizations;
+
+void
+OptimizationInfo::initNormalOptimizationInfo()
+{
+ level_ = OptimizationLevel::Normal;
+
+ autoTruncate_ = true;
+ eaa_ = true;
+ eagerSimdUnbox_ = true;
+ edgeCaseAnalysis_ = true;
+ eliminateRedundantChecks_ = true;
+ inlineInterpreted_ = true;
+ inlineNative_ = true;
+ licm_ = true;
+ loopUnrolling_ = true;
+ gvn_ = true;
+ rangeAnalysis_ = true;
+ reordering_ = true;
+ sincos_ = true;
+ sink_ = true;
+
+ registerAllocator_ = RegisterAllocator_Backtracking;
+
+ inlineMaxBytecodePerCallSiteMainThread_ = 550;
+ inlineMaxBytecodePerCallSiteOffThread_ = 1100;
+ inlineMaxCalleeInlinedBytecodeLength_ = 3550;
+ inlineMaxTotalBytecodeLength_ = 85000;
+ inliningMaxCallerBytecodeLength_ = 1600;
+ maxInlineDepth_ = 3;
+ scalarReplacement_ = true;
+ smallFunctionMaxInlineDepth_ = 10;
+ compilerWarmUpThreshold_ = CompilerWarmupThreshold;
+ compilerSmallFunctionWarmUpThreshold_ = CompilerSmallFunctionWarmupThreshold;
+ inliningWarmUpThresholdFactor_ = 0.125;
+ inliningRecompileThresholdFactor_ = 4;
+}
+
+void
+OptimizationInfo::initWasmOptimizationInfo()
+{
+ // The Wasm optimization level
+ // Disables some passes that don't work well with wasm.
+
+ // Take normal option values for not specified values.
+ initNormalOptimizationInfo();
+
+ level_ = OptimizationLevel::Wasm;
+
+ ama_ = true;
+ autoTruncate_ = false;
+ eagerSimdUnbox_ = false; // wasm has no boxing / unboxing.
+ edgeCaseAnalysis_ = false;
+ eliminateRedundantChecks_ = false;
+ scalarReplacement_ = false; // wasm has no objects.
+ sincos_ = false;
+ sink_ = false;
+}
+
+uint32_t
+OptimizationInfo::compilerWarmUpThreshold(JSScript* script, jsbytecode* pc) const
+{
+ MOZ_ASSERT(pc == nullptr || pc == script->code() || JSOp(*pc) == JSOP_LOOPENTRY);
+
+ if (pc == script->code())
+ pc = nullptr;
+
+ uint32_t warmUpThreshold = compilerWarmUpThreshold_;
+ if (JitOptions.forcedDefaultIonWarmUpThreshold.isSome())
+ warmUpThreshold = JitOptions.forcedDefaultIonWarmUpThreshold.ref();
+
+ if (JitOptions.isSmallFunction(script)) {
+ warmUpThreshold = compilerSmallFunctionWarmUpThreshold_;
+ if (JitOptions.forcedDefaultIonSmallFunctionWarmUpThreshold.isSome())
+ warmUpThreshold = JitOptions.forcedDefaultIonSmallFunctionWarmUpThreshold.ref();
+ }
+
+ // If the script is too large to compile on the main thread, we can still
+ // compile it off thread. In these cases, increase the warm-up counter
+ // threshold to improve the compilation's type information and hopefully
+ // avoid later recompilation.
+
+ if (script->length() > MAX_MAIN_THREAD_SCRIPT_SIZE)
+ warmUpThreshold *= (script->length() / (double) MAX_MAIN_THREAD_SCRIPT_SIZE);
+
+ uint32_t numLocalsAndArgs = NumLocalsAndArgs(script);
+ if (numLocalsAndArgs > MAX_MAIN_THREAD_LOCALS_AND_ARGS)
+ warmUpThreshold *= (numLocalsAndArgs / (double) MAX_MAIN_THREAD_LOCALS_AND_ARGS);
+
+ if (!pc || JitOptions.eagerCompilation)
+ return warmUpThreshold;
+
+ // It's more efficient to enter outer loops, rather than inner loops, via OSR.
+ // To accomplish this, we use a slightly higher threshold for inner loops.
+ // Note that the loop depth is always > 0 so we will prefer non-OSR over OSR.
+ uint32_t loopDepth = LoopEntryDepthHint(pc);
+ MOZ_ASSERT(loopDepth > 0);
+ return warmUpThreshold + loopDepth * 100;
+}
+
+OptimizationLevelInfo::OptimizationLevelInfo()
+{
+ infos_[OptimizationLevel::Normal].initNormalOptimizationInfo();
+ infos_[OptimizationLevel::Wasm].initWasmOptimizationInfo();
+
+#ifdef DEBUG
+ OptimizationLevel level = firstLevel();
+ while (!isLastLevel(level)) {
+ OptimizationLevel next = nextLevel(level);
+ MOZ_ASSERT_IF(level != OptimizationLevel::DontCompile, level < next);
+ level = next;
+ }
+#endif
+}
+
+OptimizationLevel
+OptimizationLevelInfo::nextLevel(OptimizationLevel level) const
+{
+ MOZ_ASSERT(!isLastLevel(level));
+ switch (level) {
+ case OptimizationLevel::DontCompile:
+ return OptimizationLevel::Normal;
+ case OptimizationLevel::Normal:
+ case OptimizationLevel::Wasm:
+ case OptimizationLevel::Count:;
+ }
+ MOZ_CRASH("Unknown optimization level.");
+}
+
+OptimizationLevel
+OptimizationLevelInfo::firstLevel() const
+{
+ return nextLevel(OptimizationLevel::DontCompile);
+}
+
+bool
+OptimizationLevelInfo::isLastLevel(OptimizationLevel level) const
+{
+ return level == OptimizationLevel::Normal;
+}
+
+OptimizationLevel
+OptimizationLevelInfo::levelForScript(JSScript* script, jsbytecode* pc) const
+{
+ OptimizationLevel prev = OptimizationLevel::DontCompile;
+
+ while (!isLastLevel(prev)) {
+ OptimizationLevel level = nextLevel(prev);
+ const OptimizationInfo* info = get(level);
+ if (script->getWarmUpCount() < info->compilerWarmUpThreshold(script, pc))
+ return prev;
+
+ prev = level;
+ }
+
+ return prev;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/IonOptimizationLevels.h b/js/src/jit/IonOptimizationLevels.h
new file mode 100644
index 000000000..c38eb69a2
--- /dev/null
+++ b/js/src/jit/IonOptimizationLevels.h
@@ -0,0 +1,302 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_IonOptimizationLevels_h
+#define jit_IonOptimizationLevels_h
+
+#include "mozilla/EnumeratedArray.h"
+
+#include "jsbytecode.h"
+#include "jstypes.h"
+
+#include "jit/JitOptions.h"
+#include "js/TypeDecls.h"
+
+namespace js {
+namespace jit {
+
+enum class OptimizationLevel : uint8_t
+{
+ Normal,
+ Wasm,
+ Count,
+ DontCompile
+};
+
+#ifdef JS_JITSPEW
+inline const char*
+OptimizationLevelString(OptimizationLevel level)
+{
+ switch (level) {
+ case OptimizationLevel::DontCompile:
+ return "Optimization_DontCompile";
+ case OptimizationLevel::Normal:
+ return "Optimization_Normal";
+ case OptimizationLevel::Wasm:
+ return "Optimization_Wasm";
+ case OptimizationLevel::Count:;
+ }
+ MOZ_CRASH("Invalid OptimizationLevel");
+}
+#endif
+
+class OptimizationInfo
+{
+ public:
+ OptimizationLevel level_;
+
+ // Toggles whether Effective Address Analysis is performed.
+ bool eaa_;
+
+ // Toggles whether Alignment Mask Analysis is performed.
+ bool ama_;
+
+ // Toggles whether Edge Case Analysis is used.
+ bool edgeCaseAnalysis_;
+
+ // Toggles whether redundant checks get removed.
+ bool eliminateRedundantChecks_;
+
+ // Toggles whether interpreted scripts get inlined.
+ bool inlineInterpreted_;
+
+ // Toggles whether native scripts get inlined.
+ bool inlineNative_;
+
+ // Toggles whether eager unboxing of SIMD is used.
+ bool eagerSimdUnbox_;
+
+ // Toggles whether global value numbering is used.
+ bool gvn_;
+
+ // Toggles whether loop invariant code motion is performed.
+ bool licm_;
+
+ // Toggles whether Range Analysis is used.
+ bool rangeAnalysis_;
+
+ // Toggles whether loop unrolling is performed.
+ bool loopUnrolling_;
+
+ // Toggles whether instruction reordering is performed.
+ bool reordering_;
+
+ // Toggles whether Truncation based on Range Analysis is used.
+ bool autoTruncate_;
+
+ // Toggles whether sincos is used.
+ bool sincos_;
+
+ // Toggles whether sink is used.
+ bool sink_;
+
+ // Describes which register allocator to use.
+ IonRegisterAllocator registerAllocator_;
+
+ // The maximum total bytecode size of an inline call site. We use a lower
+ // value if off-thread compilation is not available, to avoid stalling the
+ // main thread.
+ uint32_t inlineMaxBytecodePerCallSiteOffThread_;
+ uint32_t inlineMaxBytecodePerCallSiteMainThread_;
+
+ // The maximum value we allow for baselineScript->inlinedBytecodeLength_
+ // when inlining.
+ uint16_t inlineMaxCalleeInlinedBytecodeLength_;
+
+ // The maximum bytecode length we'll inline in a single compilation.
+ uint32_t inlineMaxTotalBytecodeLength_;
+
+ // The maximum bytecode length the caller may have,
+ // before we stop inlining large functions in that caller.
+ uint32_t inliningMaxCallerBytecodeLength_;
+
+ // The maximum inlining depth.
+ uint32_t maxInlineDepth_;
+
+ // Toggles whether scalar replacement is used.
+ bool scalarReplacement_;
+
+ // The maximum inlining depth for functions.
+ //
+ // Inlining small functions has almost no compiling overhead
+ // and removes the otherwise needed call overhead.
+ // The value is currently very low.
+ // Actually it is only needed to make sure we don't blow out the stack.
+ uint32_t smallFunctionMaxInlineDepth_;
+
+ // How many invocations or loop iterations are needed before functions
+ // are compiled.
+ uint32_t compilerWarmUpThreshold_;
+
+ // Default compiler warmup threshold, unless it is overridden.
+ static const uint32_t CompilerWarmupThreshold = 1000;
+
+ // How many invocations or loop iterations are needed before small functions
+ // are compiled.
+ uint32_t compilerSmallFunctionWarmUpThreshold_;
+
+ // Default small function compiler warmup threshold, unless it is overridden.
+ static const uint32_t CompilerSmallFunctionWarmupThreshold = 100;
+
+ // How many invocations or loop iterations are needed before calls
+ // are inlined, as a fraction of compilerWarmUpThreshold.
+ double inliningWarmUpThresholdFactor_;
+
+ // How many invocations or loop iterations are needed before a function
+ // is hot enough to recompile the outerScript to inline that function,
+ // as a multiplication of inliningWarmUpThreshold.
+ uint32_t inliningRecompileThresholdFactor_;
+
+ OptimizationInfo()
+ { }
+
+ void initNormalOptimizationInfo();
+ void initWasmOptimizationInfo();
+
+ OptimizationLevel level() const {
+ return level_;
+ }
+
+ bool inlineInterpreted() const {
+ return inlineInterpreted_ && !JitOptions.disableInlining;
+ }
+
+ bool inlineNative() const {
+ return inlineNative_ && !JitOptions.disableInlining;
+ }
+
+ uint32_t compilerWarmUpThreshold(JSScript* script, jsbytecode* pc = nullptr) const;
+
+ bool eagerSimdUnboxEnabled() const {
+ return eagerSimdUnbox_ && !JitOptions.disableEagerSimdUnbox;
+ }
+
+ bool gvnEnabled() const {
+ return gvn_ && !JitOptions.disableGvn;
+ }
+
+ bool licmEnabled() const {
+ return licm_ && !JitOptions.disableLicm;
+ }
+
+ bool rangeAnalysisEnabled() const {
+ return rangeAnalysis_ && !JitOptions.disableRangeAnalysis;
+ }
+
+ bool loopUnrollingEnabled() const {
+ return loopUnrolling_ && !JitOptions.disableLoopUnrolling;
+ }
+
+ bool instructionReorderingEnabled() const {
+ return reordering_ && !JitOptions.disableInstructionReordering;
+ }
+
+ bool autoTruncateEnabled() const {
+ return autoTruncate_ && rangeAnalysisEnabled();
+ }
+
+ bool sincosEnabled() const {
+ return sincos_ && !JitOptions.disableSincos;
+ }
+
+ bool sinkEnabled() const {
+ return sink_ && !JitOptions.disableSink;
+ }
+
+ bool eaaEnabled() const {
+ return eaa_ && !JitOptions.disableEaa;
+ }
+
+ bool amaEnabled() const {
+ return ama_ && !JitOptions.disableAma;
+ }
+
+ bool edgeCaseAnalysisEnabled() const {
+ return edgeCaseAnalysis_ && !JitOptions.disableEdgeCaseAnalysis;
+ }
+
+ bool eliminateRedundantChecksEnabled() const {
+ return eliminateRedundantChecks_;
+ }
+
+ bool flowAliasAnalysisEnabled() const {
+ return !JitOptions.disableFlowAA;
+ }
+
+ IonRegisterAllocator registerAllocator() const {
+ if (JitOptions.forcedRegisterAllocator.isSome())
+ return JitOptions.forcedRegisterAllocator.ref();
+ return registerAllocator_;
+ }
+
+ bool scalarReplacementEnabled() const {
+ return scalarReplacement_ && !JitOptions.disableScalarReplacement;
+ }
+
+ uint32_t smallFunctionMaxInlineDepth() const {
+ return smallFunctionMaxInlineDepth_;
+ }
+
+ bool isSmallFunction(JSScript* script) const;
+
+ uint32_t maxInlineDepth() const {
+ return maxInlineDepth_;
+ }
+
+ uint32_t inlineMaxBytecodePerCallSite(bool offThread) const {
+ return (offThread || !JitOptions.limitScriptSize)
+ ? inlineMaxBytecodePerCallSiteOffThread_
+ : inlineMaxBytecodePerCallSiteMainThread_;
+ }
+
+ uint16_t inlineMaxCalleeInlinedBytecodeLength() const {
+ return inlineMaxCalleeInlinedBytecodeLength_;
+ }
+
+ uint32_t inlineMaxTotalBytecodeLength() const {
+ return inlineMaxTotalBytecodeLength_;
+ }
+
+ uint32_t inliningMaxCallerBytecodeLength() const {
+ return inliningMaxCallerBytecodeLength_;
+ }
+
+ uint32_t inliningWarmUpThreshold() const {
+ uint32_t compilerWarmUpThreshold = compilerWarmUpThreshold_;
+ if (JitOptions.forcedDefaultIonWarmUpThreshold.isSome())
+ compilerWarmUpThreshold = JitOptions.forcedDefaultIonWarmUpThreshold.ref();
+ return compilerWarmUpThreshold * inliningWarmUpThresholdFactor_;
+ }
+
+ uint32_t inliningRecompileThreshold() const {
+ return inliningWarmUpThreshold() * inliningRecompileThresholdFactor_;
+ }
+};
+
+class OptimizationLevelInfo
+{
+ private:
+ mozilla::EnumeratedArray<OptimizationLevel, OptimizationLevel::Count, OptimizationInfo> infos_;
+
+ public:
+ OptimizationLevelInfo();
+
+ const OptimizationInfo* get(OptimizationLevel level) const {
+ return &infos_[level];
+ }
+
+ OptimizationLevel nextLevel(OptimizationLevel level) const;
+ OptimizationLevel firstLevel() const;
+ bool isLastLevel(OptimizationLevel level) const;
+ OptimizationLevel levelForScript(JSScript* script, jsbytecode* pc = nullptr) const;
+};
+
+extern OptimizationLevelInfo IonOptimizations;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_IonOptimizationLevels_h */
diff --git a/js/src/jit/IonTypes.h b/js/src/jit/IonTypes.h
new file mode 100644
index 000000000..1236e5fe5
--- /dev/null
+++ b/js/src/jit/IonTypes.h
@@ -0,0 +1,875 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_IonTypes_h
+#define jit_IonTypes_h
+
+#include "mozilla/HashFunctions.h"
+
+#include <algorithm>
+
+#include "jsfriendapi.h"
+#include "jstypes.h"
+
+#include "js/GCAPI.h"
+#include "js/Value.h"
+#include "vm/String.h"
+
+namespace js {
+namespace jit {
+
+typedef uint32_t RecoverOffset;
+typedef uint32_t SnapshotOffset;
+typedef uint32_t BailoutId;
+
+// The maximum size of any buffer associated with an assembler or code object.
+// This is chosen to not overflow a signed integer, leaving room for an extra
+// bit on offsets.
+static const uint32_t MAX_BUFFER_SIZE = (1 << 30) - 1;
+
+// Maximum number of scripted arg slots.
+static const uint32_t SNAPSHOT_MAX_NARGS = 127;
+
+static const SnapshotOffset INVALID_RECOVER_OFFSET = uint32_t(-1);
+static const SnapshotOffset INVALID_SNAPSHOT_OFFSET = uint32_t(-1);
+
+// Different kinds of bailouts. When extending this enum, make sure to check
+// the bits reserved for bailout kinds in Bailouts.h
+enum BailoutKind
+{
+ // Normal bailouts, that don't need to be handled specially when restarting
+ // in baseline.
+
+ // An inevitable bailout (MBail instruction or type barrier that always bails)
+ Bailout_Inevitable,
+
+ // Bailing out during a VM call. Many possible causes that are hard
+ // to distinguish statically at snapshot construction time.
+ // We just lump them together.
+ Bailout_DuringVMCall,
+
+ // Call to a non-JSFunction (problem for |apply|)
+ Bailout_NonJSFunctionCallee,
+
+ // Dynamic scope chain lookup produced |undefined|
+ Bailout_DynamicNameNotFound,
+
+ // Input string contains 'arguments' or 'eval'
+ Bailout_StringArgumentsEval,
+
+ // Bailout on overflow, but don't immediately invalidate.
+ // Used for abs, sub and LoadUnboxedScalar (when loading a uint32 that
+ // doesn't fit in an int32).
+ Bailout_Overflow,
+
+ // floor, ceiling and round bail if input is NaN, if output would be -0 or
+ // doesn't fit in int32 range
+ Bailout_Round,
+
+ // Non-primitive value used as input for ToDouble, ToInt32, ToString, etc.
+ // For ToInt32, can also mean that input can't be converted without precision
+ // loss (e.g. 5.5).
+ Bailout_NonPrimitiveInput,
+
+ // For ToInt32, would lose precision when converting (e.g. 5.5).
+ Bailout_PrecisionLoss,
+
+ // We tripped a type barrier (object was not in the expected TypeSet)
+ Bailout_TypeBarrierO,
+ // We tripped a type barrier (value was not in the expected TypeSet)
+ Bailout_TypeBarrierV,
+ // We tripped a type monitor (wrote an unexpected type in a property)
+ Bailout_MonitorTypes,
+
+ // We hit a hole in an array.
+ Bailout_Hole,
+
+ // Array access with negative index
+ Bailout_NegativeIndex,
+
+ // Pretty specific case:
+ // - need a type barrier on a property write
+ // - all but one of the observed types have property types that reflect the value
+ // - we need to guard that we're not given an object of that one other type
+ // also used for the unused GuardClass instruction
+ Bailout_ObjectIdentityOrTypeGuard,
+
+ // Unbox expects a given type, bails out if it doesn't get it.
+ Bailout_NonInt32Input,
+ Bailout_NonNumericInput, // unboxing a double works with int32 too
+ Bailout_NonBooleanInput,
+ Bailout_NonObjectInput,
+ Bailout_NonStringInput,
+ Bailout_NonSymbolInput,
+
+ // SIMD Unbox expects a given type, bails out if it doesn't match.
+ Bailout_UnexpectedSimdInput,
+
+ // Atomic operations require shared memory, bail out if the typed array
+ // maps unshared memory.
+ Bailout_NonSharedTypedArrayInput,
+
+ // We hit a |debugger;| statement.
+ Bailout_Debugger,
+
+ // |this| used uninitialized in a derived constructor
+ Bailout_UninitializedThis,
+
+ // Derived constructors must return object or undefined
+ Bailout_BadDerivedConstructorReturn,
+
+ // We hit this code for the first time.
+ Bailout_FirstExecution,
+
+ // END Normal bailouts
+
+ // Bailouts caused by invalid assumptions based on Baseline code.
+ // Causes immediate invalidation.
+
+ // Like Bailout_Overflow, but causes immediate invalidation.
+ Bailout_OverflowInvalidate,
+
+ // Like NonStringInput, but should cause immediate invalidation.
+ // Used for jsop_iternext.
+ Bailout_NonStringInputInvalidate,
+
+ // Used for integer division, multiplication and modulo.
+ // If there's a remainder, bails to return a double.
+ // Can also signal overflow or result of -0.
+ // Can also signal division by 0 (returns inf, a double).
+ Bailout_DoubleOutput,
+
+ // END Invalid assumptions bailouts
+
+
+ // A bailout at the very start of a function indicates that there may be
+ // a type mismatch in the arguments that necessitates a reflow.
+ Bailout_ArgumentCheck,
+
+ // A bailout triggered by a bounds-check failure.
+ Bailout_BoundsCheck,
+ // A bailout triggered by a typed object whose backing buffer was detached.
+ Bailout_Detached,
+
+ // A shape guard based on TI information failed.
+ // (We saw an object whose shape does not match that / any of those observed
+ // by the baseline IC.)
+ Bailout_ShapeGuard,
+
+ // When we're trying to use an uninitialized lexical.
+ Bailout_UninitializedLexical,
+
+ // A bailout to baseline from Ion on exception to handle Debugger hooks.
+ Bailout_IonExceptionDebugMode
+};
+
+inline const char*
+BailoutKindString(BailoutKind kind)
+{
+ switch (kind) {
+ // Normal bailouts.
+ case Bailout_Inevitable:
+ return "Bailout_Inevitable";
+ case Bailout_DuringVMCall:
+ return "Bailout_DuringVMCall";
+ case Bailout_NonJSFunctionCallee:
+ return "Bailout_NonJSFunctionCallee";
+ case Bailout_DynamicNameNotFound:
+ return "Bailout_DynamicNameNotFound";
+ case Bailout_StringArgumentsEval:
+ return "Bailout_StringArgumentsEval";
+ case Bailout_Overflow:
+ return "Bailout_Overflow";
+ case Bailout_Round:
+ return "Bailout_Round";
+ case Bailout_NonPrimitiveInput:
+ return "Bailout_NonPrimitiveInput";
+ case Bailout_PrecisionLoss:
+ return "Bailout_PrecisionLoss";
+ case Bailout_TypeBarrierO:
+ return "Bailout_TypeBarrierO";
+ case Bailout_TypeBarrierV:
+ return "Bailout_TypeBarrierV";
+ case Bailout_MonitorTypes:
+ return "Bailout_MonitorTypes";
+ case Bailout_Hole:
+ return "Bailout_Hole";
+ case Bailout_NegativeIndex:
+ return "Bailout_NegativeIndex";
+ case Bailout_ObjectIdentityOrTypeGuard:
+ return "Bailout_ObjectIdentityOrTypeGuard";
+ case Bailout_NonInt32Input:
+ return "Bailout_NonInt32Input";
+ case Bailout_NonNumericInput:
+ return "Bailout_NonNumericInput";
+ case Bailout_NonBooleanInput:
+ return "Bailout_NonBooleanInput";
+ case Bailout_NonObjectInput:
+ return "Bailout_NonObjectInput";
+ case Bailout_NonStringInput:
+ return "Bailout_NonStringInput";
+ case Bailout_NonSymbolInput:
+ return "Bailout_NonSymbolInput";
+ case Bailout_UnexpectedSimdInput:
+ return "Bailout_UnexpectedSimdInput";
+ case Bailout_NonSharedTypedArrayInput:
+ return "Bailout_NonSharedTypedArrayInput";
+ case Bailout_Debugger:
+ return "Bailout_Debugger";
+ case Bailout_UninitializedThis:
+ return "Bailout_UninitializedThis";
+ case Bailout_BadDerivedConstructorReturn:
+ return "Bailout_BadDerivedConstructorReturn";
+ case Bailout_FirstExecution:
+ return "Bailout_FirstExecution";
+
+ // Bailouts caused by invalid assumptions.
+ case Bailout_OverflowInvalidate:
+ return "Bailout_OverflowInvalidate";
+ case Bailout_NonStringInputInvalidate:
+ return "Bailout_NonStringInputInvalidate";
+ case Bailout_DoubleOutput:
+ return "Bailout_DoubleOutput";
+
+ // Other bailouts.
+ case Bailout_ArgumentCheck:
+ return "Bailout_ArgumentCheck";
+ case Bailout_BoundsCheck:
+ return "Bailout_BoundsCheck";
+ case Bailout_Detached:
+ return "Bailout_Detached";
+ case Bailout_ShapeGuard:
+ return "Bailout_ShapeGuard";
+ case Bailout_UninitializedLexical:
+ return "Bailout_UninitializedLexical";
+ case Bailout_IonExceptionDebugMode:
+ return "Bailout_IonExceptionDebugMode";
+ default:
+ MOZ_CRASH("Invalid BailoutKind");
+ }
+}
+
+static const uint32_t ELEMENT_TYPE_BITS = 5;
+static const uint32_t ELEMENT_TYPE_SHIFT = 0;
+static const uint32_t ELEMENT_TYPE_MASK = (1 << ELEMENT_TYPE_BITS) - 1;
+static const uint32_t VECTOR_SCALE_BITS = 3;
+static const uint32_t VECTOR_SCALE_SHIFT = ELEMENT_TYPE_BITS + ELEMENT_TYPE_SHIFT;
+static const uint32_t VECTOR_SCALE_MASK = (1 << VECTOR_SCALE_BITS) - 1;
+
+class SimdConstant {
+ public:
+ enum Type {
+ Int8x16,
+ Int16x8,
+ Int32x4,
+ Float32x4,
+ Undefined = -1
+ };
+
+ typedef int8_t I8x16[16];
+ typedef int16_t I16x8[8];
+ typedef int32_t I32x4[4];
+ typedef float F32x4[4];
+
+ private:
+ Type type_;
+ union {
+ I8x16 i8x16;
+ I16x8 i16x8;
+ I32x4 i32x4;
+ F32x4 f32x4;
+ } u;
+
+ bool defined() const {
+ return type_ != Undefined;
+ }
+
+ public:
+ // Doesn't have a default constructor, as it would prevent it from being
+ // included in unions.
+
+ static SimdConstant CreateX16(const int8_t* array) {
+ SimdConstant cst;
+ cst.type_ = Int8x16;
+ memcpy(cst.u.i8x16, array, sizeof(cst.u));
+ return cst;
+ }
+ static SimdConstant SplatX16(int8_t v) {
+ SimdConstant cst;
+ cst.type_ = Int8x16;
+ std::fill_n(cst.u.i8x16, 16, v);
+ return cst;
+ }
+ static SimdConstant CreateX8(const int16_t* array) {
+ SimdConstant cst;
+ cst.type_ = Int16x8;
+ memcpy(cst.u.i16x8, array, sizeof(cst.u));
+ return cst;
+ }
+ static SimdConstant SplatX8(int16_t v) {
+ SimdConstant cst;
+ cst.type_ = Int16x8;
+ std::fill_n(cst.u.i16x8, 8, v);
+ return cst;
+ }
+ static SimdConstant CreateX4(const int32_t* array) {
+ SimdConstant cst;
+ cst.type_ = Int32x4;
+ memcpy(cst.u.i32x4, array, sizeof(cst.u));
+ return cst;
+ }
+ static SimdConstant SplatX4(int32_t v) {
+ SimdConstant cst;
+ cst.type_ = Int32x4;
+ std::fill_n(cst.u.i32x4, 4, v);
+ return cst;
+ }
+ static SimdConstant CreateX4(const float* array) {
+ SimdConstant cst;
+ cst.type_ = Float32x4;
+ memcpy(cst.u.f32x4, array, sizeof(cst.u));
+ return cst;
+ }
+ static SimdConstant SplatX4(float v) {
+ SimdConstant cst;
+ cst.type_ = Float32x4;
+ std::fill_n(cst.u.f32x4, 4, v);
+ return cst;
+ }
+
+ // Overloads for use by templates.
+ static SimdConstant CreateSimd128(const int8_t* array) { return CreateX16(array); }
+ static SimdConstant CreateSimd128(const int16_t* array) { return CreateX8(array); }
+ static SimdConstant CreateSimd128(const int32_t* array) { return CreateX4(array); }
+ static SimdConstant CreateSimd128(const float* array) { return CreateX4(array); }
+
+ Type type() const {
+ MOZ_ASSERT(defined());
+ return type_;
+ }
+
+ // Get the raw bytes of the constant.
+ const void* bytes() const {
+ return u.i8x16;
+ }
+
+ const I8x16& asInt8x16() const {
+ MOZ_ASSERT(defined() && type_ == Int8x16);
+ return u.i8x16;
+ }
+
+ const I16x8& asInt16x8() const {
+ MOZ_ASSERT(defined() && type_ == Int16x8);
+ return u.i16x8;
+ }
+
+ const I32x4& asInt32x4() const {
+ MOZ_ASSERT(defined() && type_ == Int32x4);
+ return u.i32x4;
+ }
+
+ const F32x4& asFloat32x4() const {
+ MOZ_ASSERT(defined() && type_ == Float32x4);
+ return u.f32x4;
+ }
+
+ bool operator==(const SimdConstant& rhs) const {
+ MOZ_ASSERT(defined() && rhs.defined());
+ if (type() != rhs.type())
+ return false;
+ // Takes negative zero into accuont, as it's a bit comparison.
+ return memcmp(&u, &rhs.u, sizeof(u)) == 0;
+ }
+ bool operator!=(const SimdConstant& rhs) const {
+ return !operator==(rhs);
+ }
+
+ // SimdConstant is a HashPolicy
+ typedef SimdConstant Lookup;
+ static HashNumber hash(const SimdConstant& val) {
+ uint32_t hash = mozilla::HashBytes(&val.u, sizeof(val.u));
+ return mozilla::AddToHash(hash, val.type_);
+ }
+ static bool match(const SimdConstant& lhs, const SimdConstant& rhs) {
+ return lhs == rhs;
+ }
+};
+
+// The ordering of this enumeration is important: Anything < Value is a
+// specialized type. Furthermore, anything < String has trivial conversion to
+// a number.
+enum class MIRType
+{
+ Undefined,
+ Null,
+ Boolean,
+ Int32,
+ Int64,
+ Double,
+ Float32,
+ // Types above have trivial conversion to a number.
+ String,
+ Symbol,
+ // Types above are primitive (including undefined and null).
+ Object,
+ MagicOptimizedArguments, // JS_OPTIMIZED_ARGUMENTS magic value.
+ MagicOptimizedOut, // JS_OPTIMIZED_OUT magic value.
+ MagicHole, // JS_ELEMENTS_HOLE magic value.
+ MagicIsConstructing, // JS_IS_CONSTRUCTING magic value.
+ MagicUninitializedLexical, // JS_UNINITIALIZED_LEXICAL magic value.
+ // Types above are specialized.
+ Value,
+ SinCosDouble, // Optimizing a sin/cos to sincos.
+ ObjectOrNull,
+ None, // Invalid, used as a placeholder.
+ Slots, // A slots vector
+ Elements, // An elements vector
+ Pointer, // An opaque pointer that receives no special treatment
+ Shape, // A Shape pointer.
+ ObjectGroup, // An ObjectGroup pointer.
+ Last = ObjectGroup,
+ // Representing both SIMD.IntBxN and SIMD.UintBxN.
+ Int8x16 = Int32 | (4 << VECTOR_SCALE_SHIFT),
+ Int16x8 = Int32 | (3 << VECTOR_SCALE_SHIFT),
+ Int32x4 = Int32 | (2 << VECTOR_SCALE_SHIFT),
+ Float32x4 = Float32 | (2 << VECTOR_SCALE_SHIFT),
+ Bool8x16 = Boolean | (4 << VECTOR_SCALE_SHIFT),
+ Bool16x8 = Boolean | (3 << VECTOR_SCALE_SHIFT),
+ Bool32x4 = Boolean | (2 << VECTOR_SCALE_SHIFT),
+ Doublex2 = Double | (1 << VECTOR_SCALE_SHIFT)
+};
+
+static inline bool
+IsSimdType(MIRType type)
+{
+ return ((unsigned(type) >> VECTOR_SCALE_SHIFT) & VECTOR_SCALE_MASK) != 0;
+}
+
+// Returns the number of vector elements (hereby called "length") for a given
+// SIMD kind. It is the Y part of the name "Foo x Y".
+static inline unsigned
+SimdTypeToLength(MIRType type)
+{
+ MOZ_ASSERT(IsSimdType(type));
+ return 1 << ((unsigned(type) >> VECTOR_SCALE_SHIFT) & VECTOR_SCALE_MASK);
+}
+
+// Get the type of the individual lanes in a SIMD type.
+// For example, Int32x4 -> Int32, Float32x4 -> Float32 etc.
+static inline MIRType
+SimdTypeToLaneType(MIRType type)
+{
+ MOZ_ASSERT(IsSimdType(type));
+ static_assert(unsigned(MIRType::Last) <= ELEMENT_TYPE_MASK,
+ "ELEMENT_TYPE_MASK should be larger than the last MIRType");
+ return MIRType((unsigned(type) >> ELEMENT_TYPE_SHIFT) & ELEMENT_TYPE_MASK);
+}
+
+// Get the type expected when inserting a lane into a SIMD type.
+// This is the argument type expected by the MSimdValue constructors as well as
+// MSimdSplat and MSimdInsertElement.
+static inline MIRType
+SimdTypeToLaneArgumentType(MIRType type)
+{
+ MIRType laneType = SimdTypeToLaneType(type);
+
+ // Boolean lanes should be pre-converted to an Int32 with the values 0 or -1.
+ // All other lane types are inserted directly.
+ return laneType == MIRType::Boolean ? MIRType::Int32 : laneType;
+}
+
+static inline MIRType
+MIRTypeFromValueType(JSValueType type)
+{
+ // This function does not deal with magic types. Magic constants should be
+ // filtered out in MIRTypeFromValue.
+ switch (type) {
+ case JSVAL_TYPE_DOUBLE:
+ return MIRType::Double;
+ case JSVAL_TYPE_INT32:
+ return MIRType::Int32;
+ case JSVAL_TYPE_UNDEFINED:
+ return MIRType::Undefined;
+ case JSVAL_TYPE_STRING:
+ return MIRType::String;
+ case JSVAL_TYPE_SYMBOL:
+ return MIRType::Symbol;
+ case JSVAL_TYPE_BOOLEAN:
+ return MIRType::Boolean;
+ case JSVAL_TYPE_NULL:
+ return MIRType::Null;
+ case JSVAL_TYPE_OBJECT:
+ return MIRType::Object;
+ case JSVAL_TYPE_UNKNOWN:
+ return MIRType::Value;
+ default:
+ MOZ_CRASH("unexpected jsval type");
+ }
+}
+
+static inline JSValueType
+ValueTypeFromMIRType(MIRType type)
+{
+ switch (type) {
+ case MIRType::Undefined:
+ return JSVAL_TYPE_UNDEFINED;
+ case MIRType::Null:
+ return JSVAL_TYPE_NULL;
+ case MIRType::Boolean:
+ return JSVAL_TYPE_BOOLEAN;
+ case MIRType::Int32:
+ return JSVAL_TYPE_INT32;
+ case MIRType::Float32: // Fall through, there's no JSVAL for Float32
+ case MIRType::Double:
+ return JSVAL_TYPE_DOUBLE;
+ case MIRType::String:
+ return JSVAL_TYPE_STRING;
+ case MIRType::Symbol:
+ return JSVAL_TYPE_SYMBOL;
+ case MIRType::MagicOptimizedArguments:
+ case MIRType::MagicOptimizedOut:
+ case MIRType::MagicHole:
+ case MIRType::MagicIsConstructing:
+ case MIRType::MagicUninitializedLexical:
+ return JSVAL_TYPE_MAGIC;
+ default:
+ MOZ_ASSERT(type == MIRType::Object);
+ return JSVAL_TYPE_OBJECT;
+ }
+}
+
+static inline JSValueTag
+MIRTypeToTag(MIRType type)
+{
+ return JSVAL_TYPE_TO_TAG(ValueTypeFromMIRType(type));
+}
+
+static inline const char*
+StringFromMIRType(MIRType type)
+{
+ switch (type) {
+ case MIRType::Undefined:
+ return "Undefined";
+ case MIRType::Null:
+ return "Null";
+ case MIRType::Boolean:
+ return "Bool";
+ case MIRType::Int32:
+ return "Int32";
+ case MIRType::Int64:
+ return "Int64";
+ case MIRType::Double:
+ return "Double";
+ case MIRType::Float32:
+ return "Float32";
+ case MIRType::String:
+ return "String";
+ case MIRType::Symbol:
+ return "Symbol";
+ case MIRType::Object:
+ return "Object";
+ case MIRType::MagicOptimizedArguments:
+ return "MagicOptimizedArguments";
+ case MIRType::MagicOptimizedOut:
+ return "MagicOptimizedOut";
+ case MIRType::MagicHole:
+ return "MagicHole";
+ case MIRType::MagicIsConstructing:
+ return "MagicIsConstructing";
+ case MIRType::MagicUninitializedLexical:
+ return "MagicUninitializedLexical";
+ case MIRType::Value:
+ return "Value";
+ case MIRType::SinCosDouble:
+ return "SinCosDouble";
+ case MIRType::ObjectOrNull:
+ return "ObjectOrNull";
+ case MIRType::None:
+ return "None";
+ case MIRType::Slots:
+ return "Slots";
+ case MIRType::Elements:
+ return "Elements";
+ case MIRType::Pointer:
+ return "Pointer";
+ case MIRType::Shape:
+ return "Shape";
+ case MIRType::ObjectGroup:
+ return "ObjectGroup";
+ case MIRType::Int32x4:
+ return "Int32x4";
+ case MIRType::Int16x8:
+ return "Int16x8";
+ case MIRType::Int8x16:
+ return "Int8x16";
+ case MIRType::Float32x4:
+ return "Float32x4";
+ case MIRType::Bool32x4:
+ return "Bool32x4";
+ case MIRType::Bool16x8:
+ return "Bool16x8";
+ case MIRType::Bool8x16:
+ return "Bool8x16";
+ case MIRType::Doublex2:
+ return "Doublex2";
+ }
+ MOZ_CRASH("Unknown MIRType.");
+}
+
+static inline bool
+IsIntType(MIRType type)
+{
+ return type == MIRType::Int32 ||
+ type == MIRType::Int64;
+}
+
+static inline bool
+IsNumberType(MIRType type)
+{
+ return type == MIRType::Int32 ||
+ type == MIRType::Double ||
+ type == MIRType::Float32 ||
+ type == MIRType::Int64;
+}
+
+static inline bool
+IsTypeRepresentableAsDouble(MIRType type)
+{
+ return type == MIRType::Int32 ||
+ type == MIRType::Double ||
+ type == MIRType::Float32;
+}
+
+static inline bool
+IsFloatType(MIRType type)
+{
+ return type == MIRType::Int32 || type == MIRType::Float32;
+}
+
+static inline bool
+IsFloatingPointType(MIRType type)
+{
+ return type == MIRType::Double || type == MIRType::Float32;
+}
+
+static inline bool
+IsNullOrUndefined(MIRType type)
+{
+ return type == MIRType::Null || type == MIRType::Undefined;
+}
+
+static inline bool
+IsFloatingPointSimdType(MIRType type)
+{
+ return type == MIRType::Float32x4;
+}
+
+static inline bool
+IsIntegerSimdType(MIRType type)
+{
+ return IsSimdType(type) && SimdTypeToLaneType(type) == MIRType::Int32;
+}
+
+static inline bool
+IsBooleanSimdType(MIRType type)
+{
+ return IsSimdType(type) && SimdTypeToLaneType(type) == MIRType::Boolean;
+}
+
+static inline bool
+IsMagicType(MIRType type)
+{
+ return type == MIRType::MagicHole ||
+ type == MIRType::MagicOptimizedOut ||
+ type == MIRType::MagicIsConstructing ||
+ type == MIRType::MagicOptimizedArguments ||
+ type == MIRType::MagicUninitializedLexical;
+}
+
+static inline MIRType
+ScalarTypeToMIRType(Scalar::Type type)
+{
+ switch (type) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Uint8Clamped:
+ return MIRType::Int32;
+ case Scalar::Int64:
+ return MIRType::Int64;
+ case Scalar::Float32:
+ return MIRType::Float32;
+ case Scalar::Float64:
+ return MIRType::Double;
+ case Scalar::Float32x4:
+ return MIRType::Float32x4;
+ case Scalar::Int8x16:
+ return MIRType::Int8x16;
+ case Scalar::Int16x8:
+ return MIRType::Int16x8;
+ case Scalar::Int32x4:
+ return MIRType::Int32x4;
+ case Scalar::MaxTypedArrayViewType:
+ break;
+ }
+ MOZ_CRASH("unexpected SIMD kind");
+}
+
+static inline unsigned
+ScalarTypeToLength(Scalar::Type type)
+{
+ switch (type) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Int64:
+ case Scalar::Float32:
+ case Scalar::Float64:
+ case Scalar::Uint8Clamped:
+ return 1;
+ case Scalar::Float32x4:
+ case Scalar::Int32x4:
+ return 4;
+ case Scalar::Int16x8:
+ return 8;
+ case Scalar::Int8x16:
+ return 16;
+ case Scalar::MaxTypedArrayViewType:
+ break;
+ }
+ MOZ_CRASH("unexpected SIMD kind");
+}
+
+static inline const char*
+PropertyNameToExtraName(PropertyName* name)
+{
+ JS::AutoCheckCannotGC nogc;
+ if (!name->hasLatin1Chars())
+ return nullptr;
+ return reinterpret_cast<const char *>(name->latin1Chars(nogc));
+}
+
+#ifdef DEBUG
+
+// Track the pipeline of opcodes which has produced a snapshot.
+#define TRACK_SNAPSHOTS 1
+
+// Make sure registers are not modified between an instruction and
+// its OsiPoint.
+#define CHECK_OSIPOINT_REGISTERS 1
+
+#endif // DEBUG
+
+enum {
+ ArgType_General = 0x1,
+ ArgType_Double = 0x2,
+ ArgType_Float32 = 0x3,
+ ArgType_Int64 = 0x4,
+
+ RetType_Shift = 0x0,
+ ArgType_Shift = 0x3,
+ ArgType_Mask = 0x7
+};
+
+enum ABIFunctionType
+{
+ // VM functions that take 0-9 non-double arguments
+ // and return a non-double value.
+ Args_General0 = ArgType_General << RetType_Shift,
+ Args_General1 = Args_General0 | (ArgType_General << (ArgType_Shift * 1)),
+ Args_General2 = Args_General1 | (ArgType_General << (ArgType_Shift * 2)),
+ Args_General3 = Args_General2 | (ArgType_General << (ArgType_Shift * 3)),
+ Args_General4 = Args_General3 | (ArgType_General << (ArgType_Shift * 4)),
+ Args_General5 = Args_General4 | (ArgType_General << (ArgType_Shift * 5)),
+ Args_General6 = Args_General5 | (ArgType_General << (ArgType_Shift * 6)),
+ Args_General7 = Args_General6 | (ArgType_General << (ArgType_Shift * 7)),
+ Args_General8 = Args_General7 | (ArgType_General << (ArgType_Shift * 8)),
+
+ // int64 f(double)
+ Args_Int64_Double = (ArgType_Int64 << RetType_Shift) | (ArgType_Double << ArgType_Shift),
+
+ // double f()
+ Args_Double_None = ArgType_Double << RetType_Shift,
+
+ // int f(double)
+ Args_Int_Double = Args_General0 | (ArgType_Double << ArgType_Shift),
+
+ // float f(float)
+ Args_Float32_Float32 = (ArgType_Float32 << RetType_Shift) | (ArgType_Float32 << ArgType_Shift),
+
+ // double f(double)
+ Args_Double_Double = Args_Double_None | (ArgType_Double << ArgType_Shift),
+
+ // double f(int)
+ Args_Double_Int = Args_Double_None | (ArgType_General << ArgType_Shift),
+
+ // double f(int, int)
+ Args_Double_IntInt = Args_Double_Int | (ArgType_General << (ArgType_Shift * 2)),
+
+ // double f(double, int)
+ Args_Double_DoubleInt = Args_Double_None |
+ (ArgType_General << (ArgType_Shift * 1)) |
+ (ArgType_Double << (ArgType_Shift * 2)),
+
+ // double f(double, double)
+ Args_Double_DoubleDouble = Args_Double_Double | (ArgType_Double << (ArgType_Shift * 2)),
+
+ // double f(int, double)
+ Args_Double_IntDouble = Args_Double_None |
+ (ArgType_Double << (ArgType_Shift * 1)) |
+ (ArgType_General << (ArgType_Shift * 2)),
+
+ // int f(int, double)
+ Args_Int_IntDouble = Args_General0 |
+ (ArgType_Double << (ArgType_Shift * 1)) |
+ (ArgType_General << (ArgType_Shift * 2)),
+
+ // double f(double, double, double)
+ Args_Double_DoubleDoubleDouble = Args_Double_DoubleDouble | (ArgType_Double << (ArgType_Shift * 3)),
+
+ // double f(double, double, double, double)
+ Args_Double_DoubleDoubleDoubleDouble = Args_Double_DoubleDoubleDouble | (ArgType_Double << (ArgType_Shift * 4)),
+
+ // int f(double, int, int)
+ Args_Int_DoubleIntInt = Args_General0 |
+ (ArgType_General << (ArgType_Shift * 1)) |
+ (ArgType_General << (ArgType_Shift * 2)) |
+ (ArgType_Double << (ArgType_Shift * 3)),
+
+ // int f(int, double, int, int)
+ Args_Int_IntDoubleIntInt = Args_General0 |
+ (ArgType_General << (ArgType_Shift * 1)) |
+ (ArgType_General << (ArgType_Shift * 2)) |
+ (ArgType_Double << (ArgType_Shift * 3)) |
+ (ArgType_General << (ArgType_Shift * 4))
+
+};
+
+enum class BarrierKind : uint32_t {
+ // No barrier is needed.
+ NoBarrier,
+
+ // The barrier only has to check the value's type tag is in the TypeSet.
+ // Specific object types don't have to be checked.
+ TypeTagOnly,
+
+ // Check if the value is in the TypeSet, including the object type if it's
+ // an object.
+ TypeSet
+};
+
+enum ReprotectCode { Reprotect = true, DontReprotect = false };
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_IonTypes_h */
diff --git a/js/src/jit/JSONSpewer.cpp b/js/src/jit/JSONSpewer.cpp
new file mode 100644
index 000000000..39d211806
--- /dev/null
+++ b/js/src/jit/JSONSpewer.cpp
@@ -0,0 +1,410 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifdef JS_JITSPEW
+
+#include "jit/JSONSpewer.h"
+
+#include "mozilla/SizePrintfMacros.h"
+
+#include <stdarg.h>
+
+#include "jit/BacktrackingAllocator.h"
+#include "jit/LIR.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "jit/RangeAnalysis.h"
+
+using namespace js;
+using namespace js::jit;
+
+void
+JSONSpewer::indent()
+{
+ MOZ_ASSERT(indentLevel_ >= 0);
+ out_.printf("\n");
+ for (int i = 0; i < indentLevel_; i++)
+ out_.printf(" ");
+}
+
+void
+JSONSpewer::property(const char* name)
+{
+ if (!first_)
+ out_.printf(",");
+ indent();
+ out_.printf("\"%s\":", name);
+ first_ = false;
+}
+
+void
+JSONSpewer::beginObject()
+{
+ if (!first_) {
+ out_.printf(",");
+ indent();
+ }
+ out_.printf("{");
+ indentLevel_++;
+ first_ = true;
+}
+
+void
+JSONSpewer::beginObjectProperty(const char* name)
+{
+ property(name);
+ out_.printf("{");
+ indentLevel_++;
+ first_ = true;
+}
+
+void
+JSONSpewer::beginListProperty(const char* name)
+{
+ property(name);
+ out_.printf("[");
+ first_ = true;
+}
+
+void
+JSONSpewer::beginStringProperty(const char* name)
+{
+ property(name);
+ out_.printf("\"");
+}
+
+void
+JSONSpewer::endStringProperty()
+{
+ out_.printf("\"");
+}
+
+void
+JSONSpewer::stringProperty(const char* name, const char* format, ...)
+{
+ va_list ap;
+ va_start(ap, format);
+
+ beginStringProperty(name);
+ out_.vprintf(format, ap);
+ endStringProperty();
+
+ va_end(ap);
+}
+
+void
+JSONSpewer::stringValue(const char* format, ...)
+{
+ va_list ap;
+ va_start(ap, format);
+
+ if (!first_)
+ out_.printf(",");
+ out_.printf("\"");
+ out_.vprintf(format, ap);
+ out_.printf("\"");
+
+ va_end(ap);
+ first_ = false;
+}
+
+void
+JSONSpewer::integerProperty(const char* name, int value)
+{
+ property(name);
+ out_.printf("%d", value);
+}
+
+void
+JSONSpewer::integerValue(int value)
+{
+ if (!first_)
+ out_.printf(",");
+ out_.printf("%d", value);
+ first_ = false;
+}
+
+void
+JSONSpewer::endObject()
+{
+ indentLevel_--;
+ indent();
+ out_.printf("}");
+ first_ = false;
+}
+
+void
+JSONSpewer::endList()
+{
+ out_.printf("]");
+ first_ = false;
+}
+
+void
+JSONSpewer::beginFunction(JSScript* script)
+{
+ beginObject();
+ if (script)
+ stringProperty("name", "%s:%" PRIuSIZE, script->filename(), script->lineno());
+ else
+ stringProperty("name", "wasm compilation");
+ beginListProperty("passes");
+}
+
+void
+JSONSpewer::beginPass(const char* pass)
+{
+ beginObject();
+ stringProperty("name", "%s", pass);
+}
+
+void
+JSONSpewer::spewMResumePoint(MResumePoint* rp)
+{
+ if (!rp)
+ return;
+
+ beginObjectProperty("resumePoint");
+
+ if (rp->caller())
+ integerProperty("caller", rp->caller()->block()->id());
+
+ property("mode");
+ switch (rp->mode()) {
+ case MResumePoint::ResumeAt:
+ out_.printf("\"At\"");
+ break;
+ case MResumePoint::ResumeAfter:
+ out_.printf("\"After\"");
+ break;
+ case MResumePoint::Outer:
+ out_.printf("\"Outer\"");
+ break;
+ }
+
+ beginListProperty("operands");
+ for (MResumePoint* iter = rp; iter; iter = iter->caller()) {
+ for (int i = iter->numOperands() - 1; i >= 0; i--)
+ integerValue(iter->getOperand(i)->id());
+ if (iter->caller())
+ stringValue("|");
+ }
+ endList();
+
+ endObject();
+}
+
+void
+JSONSpewer::spewMDef(MDefinition* def)
+{
+ beginObject();
+
+ integerProperty("id", def->id());
+
+ property("opcode");
+ out_.printf("\"");
+ def->printOpcode(out_);
+ out_.printf("\"");
+
+ beginListProperty("attributes");
+#define OUTPUT_ATTRIBUTE(X) do{ if(def->is##X()) stringValue(#X); } while(0);
+ MIR_FLAG_LIST(OUTPUT_ATTRIBUTE);
+#undef OUTPUT_ATTRIBUTE
+ endList();
+
+ beginListProperty("inputs");
+ for (size_t i = 0, e = def->numOperands(); i < e; i++)
+ integerValue(def->getOperand(i)->id());
+ endList();
+
+ beginListProperty("uses");
+ for (MUseDefIterator use(def); use; use++)
+ integerValue(use.def()->id());
+ endList();
+
+ if (!def->isLowered()) {
+ beginListProperty("memInputs");
+ if (def->dependency())
+ integerValue(def->dependency()->id());
+ endList();
+ }
+
+ bool isTruncated = false;
+ if (def->isAdd() || def->isSub() || def->isMod() || def->isMul() || def->isDiv())
+ isTruncated = static_cast<MBinaryArithInstruction*>(def)->isTruncated();
+
+ if (def->type() != MIRType::None && def->range()) {
+ beginStringProperty("type");
+ def->range()->dump(out_);
+ out_.printf(" : %s%s", StringFromMIRType(def->type()), (isTruncated ? " (t)" : ""));
+ endStringProperty();
+ } else {
+ stringProperty("type", "%s%s", StringFromMIRType(def->type()), (isTruncated ? " (t)" : ""));
+ }
+
+ if (def->isInstruction()) {
+ if (MResumePoint* rp = def->toInstruction()->resumePoint())
+ spewMResumePoint(rp);
+ }
+
+ endObject();
+}
+
+void
+JSONSpewer::spewMIR(MIRGraph* mir)
+{
+ beginObjectProperty("mir");
+ beginListProperty("blocks");
+
+ for (MBasicBlockIterator block(mir->begin()); block != mir->end(); block++) {
+ beginObject();
+
+ integerProperty("number", block->id());
+ if (block->getHitState() == MBasicBlock::HitState::Count)
+ integerProperty("count", block->getHitCount());
+
+ beginListProperty("attributes");
+ if (block->isLoopBackedge())
+ stringValue("backedge");
+ if (block->isLoopHeader())
+ stringValue("loopheader");
+ if (block->isSplitEdge())
+ stringValue("splitedge");
+ endList();
+
+ beginListProperty("predecessors");
+ for (size_t i = 0; i < block->numPredecessors(); i++)
+ integerValue(block->getPredecessor(i)->id());
+ endList();
+
+ beginListProperty("successors");
+ for (size_t i = 0; i < block->numSuccessors(); i++)
+ integerValue(block->getSuccessor(i)->id());
+ endList();
+
+ beginListProperty("instructions");
+ for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++)
+ spewMDef(*phi);
+ for (MInstructionIterator i(block->begin()); i != block->end(); i++)
+ spewMDef(*i);
+ endList();
+
+ spewMResumePoint(block->entryResumePoint());
+
+ endObject();
+ }
+
+ endList();
+ endObject();
+}
+
+void
+JSONSpewer::spewLIns(LNode* ins)
+{
+ beginObject();
+
+ integerProperty("id", ins->id());
+
+ property("opcode");
+ out_.printf("\"");
+ ins->dump(out_);
+ out_.printf("\"");
+
+ beginListProperty("defs");
+ for (size_t i = 0; i < ins->numDefs(); i++)
+ integerValue(ins->getDef(i)->virtualRegister());
+ endList();
+
+ endObject();
+}
+
+void
+JSONSpewer::spewLIR(MIRGraph* mir)
+{
+ beginObjectProperty("lir");
+ beginListProperty("blocks");
+
+ for (MBasicBlockIterator i(mir->begin()); i != mir->end(); i++) {
+ LBlock* block = i->lir();
+ if (!block)
+ continue;
+
+ beginObject();
+ integerProperty("number", i->id());
+
+ beginListProperty("instructions");
+ for (size_t p = 0; p < block->numPhis(); p++)
+ spewLIns(block->getPhi(p));
+ for (LInstructionIterator ins(block->begin()); ins != block->end(); ins++)
+ spewLIns(*ins);
+ endList();
+
+ endObject();
+ }
+
+ endList();
+ endObject();
+}
+
+void
+JSONSpewer::spewRanges(BacktrackingAllocator* regalloc)
+{
+ beginObjectProperty("ranges");
+ beginListProperty("blocks");
+
+ for (size_t bno = 0; bno < regalloc->graph.numBlocks(); bno++) {
+ beginObject();
+ integerProperty("number", bno);
+ beginListProperty("vregs");
+
+ LBlock* lir = regalloc->graph.getBlock(bno);
+ for (LInstructionIterator ins = lir->begin(); ins != lir->end(); ins++) {
+ for (size_t k = 0; k < ins->numDefs(); k++) {
+ uint32_t id = ins->getDef(k)->virtualRegister();
+ VirtualRegister* vreg = &regalloc->vregs[id];
+
+ beginObject();
+ integerProperty("vreg", id);
+ beginListProperty("ranges");
+
+ for (LiveRange::RegisterLinkIterator iter = vreg->rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ beginObject();
+ property("allocation");
+ out_.printf("\"%s\"", range->bundle()->allocation().toString().get());
+ integerProperty("start", range->from().bits());
+ integerProperty("end", range->to().bits());
+ endObject();
+ }
+
+ endList();
+ endObject();
+ }
+ }
+
+ endList();
+ endObject();
+ }
+
+ endList();
+ endObject();
+}
+
+void
+JSONSpewer::endPass()
+{
+ endObject();
+}
+
+void
+JSONSpewer::endFunction()
+{
+ endList();
+ endObject();
+}
+
+#endif /* JS_JITSPEW */
diff --git a/js/src/jit/JSONSpewer.h b/js/src/jit/JSONSpewer.h
new file mode 100644
index 000000000..02f449c7a
--- /dev/null
+++ b/js/src/jit/JSONSpewer.h
@@ -0,0 +1,72 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JSONSpewer_h
+#define jit_JSONSpewer_h
+
+#ifdef JS_JITSPEW
+
+#include <stdio.h>
+
+#include "js/TypeDecls.h"
+#include "vm/Printer.h"
+
+namespace js {
+namespace jit {
+
+class BacktrackingAllocator;
+class MDefinition;
+class MIRGraph;
+class MResumePoint;
+class LNode;
+
+class JSONSpewer
+{
+ private:
+ int indentLevel_;
+ bool first_;
+ GenericPrinter& out_;
+
+ void indent();
+
+ void property(const char* name);
+ void beginObject();
+ void beginObjectProperty(const char* name);
+ void beginListProperty(const char* name);
+ void stringValue(const char* format, ...) MOZ_FORMAT_PRINTF(2, 3);
+ void stringProperty(const char* name, const char* format, ...) MOZ_FORMAT_PRINTF(3, 4);
+ void beginStringProperty(const char* name);
+ void endStringProperty();
+ void integerValue(int value);
+ void integerProperty(const char* name, int value);
+ void endObject();
+ void endList();
+
+ public:
+ explicit JSONSpewer(GenericPrinter& out)
+ : indentLevel_(0),
+ first_(true),
+ out_(out)
+ { }
+
+ void beginFunction(JSScript* script);
+ void beginPass(const char * pass);
+ void spewMDef(MDefinition* def);
+ void spewMResumePoint(MResumePoint* rp);
+ void spewMIR(MIRGraph* mir);
+ void spewLIns(LNode* ins);
+ void spewLIR(MIRGraph* mir);
+ void spewRanges(BacktrackingAllocator* regalloc);
+ void endPass();
+ void endFunction();
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* JS_JITSPEW */
+
+#endif /* jit_JSONSpewer_h */
diff --git a/js/src/jit/JitAllocPolicy.h b/js/src/jit/JitAllocPolicy.h
new file mode 100644
index 000000000..bf1629290
--- /dev/null
+++ b/js/src/jit/JitAllocPolicy.h
@@ -0,0 +1,210 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitAllocPolicy_h
+#define jit_JitAllocPolicy_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/GuardObjects.h"
+#include "mozilla/OperatorNewExtensions.h"
+#include "mozilla/TypeTraits.h"
+
+#include "jscntxt.h"
+
+#include "ds/LifoAlloc.h"
+#include "jit/InlineList.h"
+#include "jit/Ion.h"
+
+namespace js {
+namespace jit {
+
+class TempAllocator
+{
+ LifoAllocScope lifoScope_;
+
+ public:
+ // Most infallible JIT allocations are small, so we use a ballast of 16
+ // KiB. And with a ballast of 16 KiB, a chunk size of 32 KiB works well,
+ // because TempAllocators with a peak allocation size of less than 16 KiB
+ // (which is most of them) only have to allocate a single chunk.
+ static const size_t BallastSize; // 16 KiB
+ static const size_t PreferredLifoChunkSize; // 32 KiB
+
+ explicit TempAllocator(LifoAlloc* lifoAlloc)
+ : lifoScope_(lifoAlloc)
+ {
+ lifoAlloc->setAsInfallibleByDefault();
+ }
+
+ void* allocateInfallible(size_t bytes)
+ {
+ return lifoScope_.alloc().allocInfallible(bytes);
+ }
+
+ MOZ_MUST_USE void* allocate(size_t bytes)
+ {
+ LifoAlloc::AutoFallibleScope fallibleAllocator(lifoAlloc());
+ void* p = lifoScope_.alloc().alloc(bytes);
+ if (!ensureBallast())
+ return nullptr;
+ return p;
+ }
+
+ template <typename T>
+ MOZ_MUST_USE T* allocateArray(size_t n)
+ {
+ LifoAlloc::AutoFallibleScope fallibleAllocator(lifoAlloc());
+ size_t bytes;
+ if (MOZ_UNLIKELY(!CalculateAllocSize<T>(n, &bytes)))
+ return nullptr;
+ T* p = static_cast<T*>(lifoScope_.alloc().alloc(bytes));
+ if (MOZ_UNLIKELY(!ensureBallast()))
+ return nullptr;
+ return p;
+ }
+
+ // View this allocator as a fallible allocator.
+ struct Fallible { TempAllocator& alloc; };
+ Fallible fallible() { return { *this }; }
+
+ LifoAlloc* lifoAlloc() {
+ return &lifoScope_.alloc();
+ }
+
+ MOZ_MUST_USE bool ensureBallast() {
+ JS_OOM_POSSIBLY_FAIL_BOOL();
+ return lifoScope_.alloc().ensureUnusedApproximate(BallastSize);
+ }
+};
+
+class JitAllocPolicy
+{
+ TempAllocator& alloc_;
+
+ public:
+ MOZ_IMPLICIT JitAllocPolicy(TempAllocator& alloc)
+ : alloc_(alloc)
+ {}
+ template <typename T>
+ T* maybe_pod_malloc(size_t numElems) {
+ size_t bytes;
+ if (MOZ_UNLIKELY(!CalculateAllocSize<T>(numElems, &bytes)))
+ return nullptr;
+ return static_cast<T*>(alloc_.allocate(bytes));
+ }
+ template <typename T>
+ T* maybe_pod_calloc(size_t numElems) {
+ T* p = maybe_pod_malloc<T>(numElems);
+ if (MOZ_LIKELY(p))
+ memset(p, 0, numElems * sizeof(T));
+ return p;
+ }
+ template <typename T>
+ T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) {
+ T* n = pod_malloc<T>(newSize);
+ if (MOZ_UNLIKELY(!n))
+ return n;
+ MOZ_ASSERT(!(oldSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value));
+ memcpy(n, p, Min(oldSize * sizeof(T), newSize * sizeof(T)));
+ return n;
+ }
+ template <typename T>
+ T* pod_malloc(size_t numElems) {
+ return maybe_pod_malloc<T>(numElems);
+ }
+ template <typename T>
+ T* pod_calloc(size_t numElems) {
+ return maybe_pod_calloc<T>(numElems);
+ }
+ template <typename T>
+ T* pod_realloc(T* ptr, size_t oldSize, size_t newSize) {
+ return maybe_pod_realloc<T>(ptr, oldSize, newSize);
+ }
+ void free_(void* p) {
+ }
+ void reportAllocOverflow() const {
+ }
+ MOZ_MUST_USE bool checkSimulatedOOM() const {
+ return !js::oom::ShouldFailWithOOM();
+ }
+};
+
+class AutoJitContextAlloc
+{
+ TempAllocator tempAlloc_;
+ JitContext* jcx_;
+ TempAllocator* prevAlloc_;
+
+ public:
+ explicit AutoJitContextAlloc(JSContext* cx)
+ : tempAlloc_(&cx->tempLifoAlloc()),
+ jcx_(GetJitContext()),
+ prevAlloc_(jcx_->temp)
+ {
+ jcx_->temp = &tempAlloc_;
+ }
+
+ ~AutoJitContextAlloc() {
+ MOZ_ASSERT(jcx_->temp == &tempAlloc_);
+ jcx_->temp = prevAlloc_;
+ }
+};
+
+struct TempObject
+{
+ inline void* operator new(size_t nbytes, TempAllocator::Fallible view) throw() {
+ return view.alloc.allocate(nbytes);
+ }
+ inline void* operator new(size_t nbytes, TempAllocator& alloc) {
+ return alloc.allocateInfallible(nbytes);
+ }
+ template <class T>
+ inline void* operator new(size_t nbytes, T* pos) {
+ static_assert(mozilla::IsConvertible<T*, TempObject*>::value,
+ "Placement new argument type must inherit from TempObject");
+ return pos;
+ }
+ template <class T>
+ inline void* operator new(size_t nbytes, mozilla::NotNullTag, T* pos) {
+ static_assert(mozilla::IsConvertible<T*, TempObject*>::value,
+ "Placement new argument type must inherit from TempObject");
+ MOZ_ASSERT(pos);
+ return pos;
+ }
+};
+
+template <typename T>
+class TempObjectPool
+{
+ TempAllocator* alloc_;
+ InlineForwardList<T> freed_;
+
+ public:
+ TempObjectPool()
+ : alloc_(nullptr)
+ {}
+ void setAllocator(TempAllocator& alloc) {
+ MOZ_ASSERT(freed_.empty());
+ alloc_ = &alloc;
+ }
+ T* allocate() {
+ MOZ_ASSERT(alloc_);
+ if (freed_.empty())
+ return new(alloc_->fallible()) T();
+ return freed_.popFront();
+ }
+ void free(T* obj) {
+ freed_.pushFront(obj);
+ }
+ void clear() {
+ freed_.clear();
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JitAllocPolicy_h */
diff --git a/js/src/jit/JitCommon.h b/js/src/jit/JitCommon.h
new file mode 100644
index 000000000..043b1463b
--- /dev/null
+++ b/js/src/jit/JitCommon.h
@@ -0,0 +1,52 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitCommon_h
+#define jit_JitCommon_h
+
+// Various macros used by all JITs.
+
+#if defined(JS_SIMULATOR_ARM)
+#include "jit/arm/Simulator-arm.h"
+#elif defined(JS_SIMULATOR_ARM64)
+# include "jit/arm64/vixl/Simulator-vixl.h"
+#elif defined(JS_SIMULATOR_MIPS32)
+#include "jit/mips32/Simulator-mips32.h"
+#elif defined(JS_SIMULATOR_MIPS64)
+#include "jit/mips64/Simulator-mips64.h"
+#endif
+
+#ifdef JS_SIMULATOR
+// Call into cross-jitted code by following the ABI of the simulated architecture.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ (js::jit::Simulator::Current()->call( \
+ JS_FUNC_TO_DATA_PTR(uint8_t*, entry), 8, p0, p1, p2, p3, p4, p5, p6, p7))
+
+#define CALL_GENERATED_1(entry, p0) \
+ (js::jit::Simulator::Current()->call( \
+ JS_FUNC_TO_DATA_PTR(uint8_t*, entry), 1, p0))
+
+#define CALL_GENERATED_2(entry, p0, p1) \
+ (js::jit::Simulator::Current()->call( \
+ JS_FUNC_TO_DATA_PTR(uint8_t*, entry), 2, p0, p1))
+
+#define CALL_GENERATED_3(entry, p0, p1, p2) \
+ (js::jit::Simulator::Current()->call( \
+ JS_FUNC_TO_DATA_PTR(uint8_t*, entry), 3, p0, p1, p2))
+
+#else
+
+// Call into jitted code by following the ABI of the native architecture.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ entry(p0, p1, p2, p3, p4, p5, p6, p7)
+
+#define CALL_GENERATED_1(entry, p0) entry(p0)
+#define CALL_GENERATED_2(entry, p0, p1) entry(p0, p1)
+#define CALL_GENERATED_3(entry, p0, p1, p2) entry(p0, p1, p2)
+
+#endif
+
+#endif // jit_JitCommon_h
diff --git a/js/src/jit/JitCompartment.h b/js/src/jit/JitCompartment.h
new file mode 100644
index 000000000..48f878e13
--- /dev/null
+++ b/js/src/jit/JitCompartment.h
@@ -0,0 +1,667 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitCompartment_h
+#define jit_JitCompartment_h
+
+#include "mozilla/Array.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MemoryReporting.h"
+
+#include "builtin/TypedObject.h"
+#include "jit/CompileInfo.h"
+#include "jit/ICStubSpace.h"
+#include "jit/IonCode.h"
+#include "jit/JitFrames.h"
+#include "jit/shared/Assembler-shared.h"
+#include "js/GCHashTable.h"
+#include "js/Value.h"
+#include "vm/Stack.h"
+
+namespace js {
+namespace jit {
+
+class FrameSizeClass;
+
+enum EnterJitType {
+ EnterJitBaseline = 0,
+ EnterJitOptimized = 1
+};
+
+struct EnterJitData
+{
+ explicit EnterJitData(JSContext* cx)
+ : envChain(cx),
+ result(cx)
+ {}
+
+ uint8_t* jitcode;
+ InterpreterFrame* osrFrame;
+
+ void* calleeToken;
+
+ Value* maxArgv;
+ unsigned maxArgc;
+ unsigned numActualArgs;
+ unsigned osrNumStackValues;
+
+ RootedObject envChain;
+ RootedValue result;
+
+ bool constructing;
+};
+
+typedef void (*EnterJitCode)(void* code, unsigned argc, Value* argv, InterpreterFrame* fp,
+ CalleeToken calleeToken, JSObject* envChain,
+ size_t numStackValues, Value* vp);
+
+class JitcodeGlobalTable;
+
+// Information about a loop backedge in the runtime, which can be set to
+// point to either the loop header or to an OOL interrupt checking stub,
+// if signal handlers are being used to implement interrupts.
+class PatchableBackedge : public InlineListNode<PatchableBackedge>
+{
+ friend class JitRuntime;
+
+ CodeLocationJump backedge;
+ CodeLocationLabel loopHeader;
+ CodeLocationLabel interruptCheck;
+
+ public:
+ PatchableBackedge(CodeLocationJump backedge,
+ CodeLocationLabel loopHeader,
+ CodeLocationLabel interruptCheck)
+ : backedge(backedge), loopHeader(loopHeader), interruptCheck(interruptCheck)
+ {}
+};
+
+class JitRuntime
+{
+ public:
+ enum BackedgeTarget {
+ BackedgeLoopHeader,
+ BackedgeInterruptCheck
+ };
+
+ private:
+ friend class JitCompartment;
+
+ // Executable allocator for all code except wasm code and Ion code with
+ // patchable backedges (see below).
+ ExecutableAllocator execAlloc_;
+
+ // Executable allocator for Ion scripts with patchable backedges.
+ ExecutableAllocator backedgeExecAlloc_;
+
+ // Shared exception-handler tail.
+ JitCode* exceptionTail_;
+
+ // Shared post-bailout-handler tail.
+ JitCode* bailoutTail_;
+
+ // Shared profiler exit frame tail.
+ JitCode* profilerExitFrameTail_;
+
+ // Trampoline for entering JIT code. Contains OSR prologue.
+ JitCode* enterJIT_;
+
+ // Trampoline for entering baseline JIT code.
+ JitCode* enterBaselineJIT_;
+
+ // Vector mapping frame class sizes to bailout tables.
+ Vector<JitCode*, 4, SystemAllocPolicy> bailoutTables_;
+
+ // Generic bailout table; used if the bailout table overflows.
+ JitCode* bailoutHandler_;
+
+ // Argument-rectifying thunk, in the case of insufficient arguments passed
+ // to a function call site.
+ JitCode* argumentsRectifier_;
+ void* argumentsRectifierReturnAddr_;
+
+ // Thunk that invalides an (Ion compiled) caller on the Ion stack.
+ JitCode* invalidator_;
+
+ // Thunk that calls the GC pre barrier.
+ JitCode* valuePreBarrier_;
+ JitCode* stringPreBarrier_;
+ JitCode* objectPreBarrier_;
+ JitCode* shapePreBarrier_;
+ JitCode* objectGroupPreBarrier_;
+
+ // Thunk to call malloc/free.
+ JitCode* mallocStub_;
+ JitCode* freeStub_;
+
+ // Thunk called to finish compilation of an IonScript.
+ JitCode* lazyLinkStub_;
+
+ // Thunk used by the debugger for breakpoint and step mode.
+ JitCode* debugTrapHandler_;
+
+ // Thunk used to fix up on-stack recompile of baseline scripts.
+ JitCode* baselineDebugModeOSRHandler_;
+ void* baselineDebugModeOSRHandlerNoFrameRegPopAddr_;
+
+ // Map VMFunction addresses to the JitCode of the wrapper.
+ using VMWrapperMap = HashMap<const VMFunction*, JitCode*>;
+ VMWrapperMap* functionWrappers_;
+
+ // Buffer for OSR from baseline to Ion. To avoid holding on to this for
+ // too long, it's also freed in JitCompartment::mark and in EnterBaseline
+ // (after returning from JIT code).
+ uint8_t* osrTempData_;
+
+ // If true, the signal handler to interrupt Ion code should not attempt to
+ // patch backedges, as we're busy modifying data structures.
+ mozilla::Atomic<bool> preventBackedgePatching_;
+
+ // Whether patchable backedges currently jump to the loop header or the
+ // interrupt check.
+ BackedgeTarget backedgeTarget_;
+
+ // List of all backedges in all Ion code. The backedge edge list is accessed
+ // asynchronously when the main thread is paused and preventBackedgePatching_
+ // is false. Thus, the list must only be mutated while preventBackedgePatching_
+ // is true.
+ InlineList<PatchableBackedge> backedgeList_;
+
+ // In certain cases, we want to optimize certain opcodes to typed instructions,
+ // to avoid carrying an extra register to feed into an unbox. Unfortunately,
+ // that's not always possible. For example, a GetPropertyCacheT could return a
+ // typed double, but if it takes its out-of-line path, it could return an
+ // object, and trigger invalidation. The invalidation bailout will consider the
+ // return value to be a double, and create a garbage Value.
+ //
+ // To allow the GetPropertyCacheT optimization, we allow the ability for
+ // GetPropertyCache to override the return value at the top of the stack - the
+ // value that will be temporarily corrupt. This special override value is set
+ // only in callVM() targets that are about to return *and* have invalidated
+ // their callee.
+ js::Value ionReturnOverride_;
+
+ // Global table of jitcode native address => bytecode address mappings.
+ JitcodeGlobalTable* jitcodeGlobalTable_;
+
+ private:
+ JitCode* generateLazyLinkStub(JSContext* cx);
+ JitCode* generateProfilerExitFrameTailStub(JSContext* cx);
+ JitCode* generateExceptionTailStub(JSContext* cx, void* handler);
+ JitCode* generateBailoutTailStub(JSContext* cx);
+ JitCode* generateEnterJIT(JSContext* cx, EnterJitType type);
+ JitCode* generateArgumentsRectifier(JSContext* cx, void** returnAddrOut);
+ JitCode* generateBailoutTable(JSContext* cx, uint32_t frameClass);
+ JitCode* generateBailoutHandler(JSContext* cx);
+ JitCode* generateInvalidator(JSContext* cx);
+ JitCode* generatePreBarrier(JSContext* cx, MIRType type);
+ JitCode* generateMallocStub(JSContext* cx);
+ JitCode* generateFreeStub(JSContext* cx);
+ JitCode* generateDebugTrapHandler(JSContext* cx);
+ JitCode* generateBaselineDebugModeOSRHandler(JSContext* cx, uint32_t* noFrameRegPopOffsetOut);
+ JitCode* generateVMWrapper(JSContext* cx, const VMFunction& f);
+
+ bool generateTLEventVM(JSContext* cx, MacroAssembler& masm, const VMFunction& f, bool enter);
+
+ inline bool generateTLEnterVM(JSContext* cx, MacroAssembler& masm, const VMFunction& f) {
+ return generateTLEventVM(cx, masm, f, /* enter = */ true);
+ }
+ inline bool generateTLExitVM(JSContext* cx, MacroAssembler& masm, const VMFunction& f) {
+ return generateTLEventVM(cx, masm, f, /* enter = */ false);
+ }
+
+ public:
+ explicit JitRuntime(JSRuntime* rt);
+ ~JitRuntime();
+ MOZ_MUST_USE bool initialize(JSContext* cx, js::AutoLockForExclusiveAccess& lock);
+
+ uint8_t* allocateOsrTempData(size_t size);
+ void freeOsrTempData();
+
+ static void Mark(JSTracer* trc, js::AutoLockForExclusiveAccess& lock);
+ static void MarkJitcodeGlobalTableUnconditionally(JSTracer* trc);
+ static MOZ_MUST_USE bool MarkJitcodeGlobalTableIteratively(JSTracer* trc);
+ static void SweepJitcodeGlobalTable(JSRuntime* rt);
+
+ ExecutableAllocator& execAlloc() {
+ return execAlloc_;
+ }
+ ExecutableAllocator& backedgeExecAlloc() {
+ return backedgeExecAlloc_;
+ }
+
+ class AutoPreventBackedgePatching
+ {
+ mozilla::DebugOnly<JSRuntime*> rt_;
+ JitRuntime* jrt_;
+ bool prev_;
+
+ public:
+ // This two-arg constructor is provided for JSRuntime::createJitRuntime,
+ // where we have a JitRuntime but didn't set rt->jitRuntime_ yet.
+ AutoPreventBackedgePatching(JSRuntime* rt, JitRuntime* jrt)
+ : rt_(rt),
+ jrt_(jrt),
+ prev_(false) // silence GCC warning
+ {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ if (jrt_) {
+ prev_ = jrt_->preventBackedgePatching_;
+ jrt_->preventBackedgePatching_ = true;
+ }
+ }
+ explicit AutoPreventBackedgePatching(JSRuntime* rt)
+ : AutoPreventBackedgePatching(rt, rt->jitRuntime())
+ {}
+ ~AutoPreventBackedgePatching() {
+ MOZ_ASSERT(jrt_ == rt_->jitRuntime());
+ if (jrt_) {
+ MOZ_ASSERT(jrt_->preventBackedgePatching_);
+ jrt_->preventBackedgePatching_ = prev_;
+ }
+ }
+ };
+
+ bool preventBackedgePatching() const {
+ return preventBackedgePatching_;
+ }
+ BackedgeTarget backedgeTarget() const {
+ return backedgeTarget_;
+ }
+ void addPatchableBackedge(PatchableBackedge* backedge) {
+ MOZ_ASSERT(preventBackedgePatching_);
+ backedgeList_.pushFront(backedge);
+ }
+ void removePatchableBackedge(PatchableBackedge* backedge) {
+ MOZ_ASSERT(preventBackedgePatching_);
+ backedgeList_.remove(backedge);
+ }
+
+ void patchIonBackedges(JSRuntime* rt, BackedgeTarget target);
+
+ JitCode* getVMWrapper(const VMFunction& f) const;
+ JitCode* debugTrapHandler(JSContext* cx);
+ JitCode* getBaselineDebugModeOSRHandler(JSContext* cx);
+ void* getBaselineDebugModeOSRHandlerAddress(JSContext* cx, bool popFrameReg);
+
+ JitCode* getGenericBailoutHandler() const {
+ return bailoutHandler_;
+ }
+
+ JitCode* getExceptionTail() const {
+ return exceptionTail_;
+ }
+
+ JitCode* getBailoutTail() const {
+ return bailoutTail_;
+ }
+
+ JitCode* getProfilerExitFrameTail() const {
+ return profilerExitFrameTail_;
+ }
+
+ JitCode* getBailoutTable(const FrameSizeClass& frameClass) const;
+
+ JitCode* getArgumentsRectifier() const {
+ return argumentsRectifier_;
+ }
+
+ void* getArgumentsRectifierReturnAddr() const {
+ return argumentsRectifierReturnAddr_;
+ }
+
+ JitCode* getInvalidationThunk() const {
+ return invalidator_;
+ }
+
+ EnterJitCode enterIon() const {
+ return enterJIT_->as<EnterJitCode>();
+ }
+
+ EnterJitCode enterBaseline() const {
+ return enterBaselineJIT_->as<EnterJitCode>();
+ }
+
+ JitCode* preBarrier(MIRType type) const {
+ switch (type) {
+ case MIRType::Value: return valuePreBarrier_;
+ case MIRType::String: return stringPreBarrier_;
+ case MIRType::Object: return objectPreBarrier_;
+ case MIRType::Shape: return shapePreBarrier_;
+ case MIRType::ObjectGroup: return objectGroupPreBarrier_;
+ default: MOZ_CRASH();
+ }
+ }
+
+ JitCode* mallocStub() const {
+ return mallocStub_;
+ }
+
+ JitCode* freeStub() const {
+ return freeStub_;
+ }
+
+ JitCode* lazyLinkStub() const {
+ return lazyLinkStub_;
+ }
+
+ bool hasIonReturnOverride() const {
+ return !ionReturnOverride_.isMagic(JS_ARG_POISON);
+ }
+ js::Value takeIonReturnOverride() {
+ js::Value v = ionReturnOverride_;
+ ionReturnOverride_ = js::MagicValue(JS_ARG_POISON);
+ return v;
+ }
+ void setIonReturnOverride(const js::Value& v) {
+ MOZ_ASSERT(!hasIonReturnOverride());
+ MOZ_ASSERT(!v.isMagic());
+ ionReturnOverride_ = v;
+ }
+
+ bool hasJitcodeGlobalTable() const {
+ return jitcodeGlobalTable_ != nullptr;
+ }
+
+ JitcodeGlobalTable* getJitcodeGlobalTable() {
+ MOZ_ASSERT(hasJitcodeGlobalTable());
+ return jitcodeGlobalTable_;
+ }
+
+ bool isProfilerInstrumentationEnabled(JSRuntime* rt) {
+ return rt->spsProfiler.enabled();
+ }
+
+ bool isOptimizationTrackingEnabled(JSRuntime* rt) {
+ return isProfilerInstrumentationEnabled(rt);
+ }
+};
+
+class JitZone
+{
+ // Allocated space for optimized baseline stubs.
+ OptimizedICStubSpace optimizedStubSpace_;
+
+ public:
+ OptimizedICStubSpace* optimizedStubSpace() {
+ return &optimizedStubSpace_;
+ }
+};
+
+enum class CacheKind;
+class CacheIRStubInfo;
+
+struct CacheIRStubKey : public DefaultHasher<CacheIRStubKey> {
+ struct Lookup {
+ CacheKind kind;
+ const uint8_t* code;
+ uint32_t length;
+
+ Lookup(CacheKind kind, const uint8_t* code, uint32_t length)
+ : kind(kind), code(code), length(length)
+ {}
+ };
+
+ static HashNumber hash(const Lookup& l);
+ static bool match(const CacheIRStubKey& entry, const Lookup& l);
+
+ UniquePtr<CacheIRStubInfo, JS::FreePolicy> stubInfo;
+
+ explicit CacheIRStubKey(CacheIRStubInfo* info) : stubInfo(info) {}
+ CacheIRStubKey(CacheIRStubKey&& other) : stubInfo(Move(other.stubInfo)) { }
+
+ void operator=(CacheIRStubKey&& other) {
+ stubInfo = Move(other.stubInfo);
+ }
+};
+
+class JitCompartment
+{
+ friend class JitActivation;
+
+ template<typename Key>
+ struct IcStubCodeMapGCPolicy {
+ static bool needsSweep(Key*, ReadBarrieredJitCode* value) {
+ return IsAboutToBeFinalized(value);
+ }
+ };
+
+ // Map ICStub keys to ICStub shared code objects.
+ using ICStubCodeMap = GCHashMap<uint32_t,
+ ReadBarrieredJitCode,
+ DefaultHasher<uint32_t>,
+ RuntimeAllocPolicy,
+ IcStubCodeMapGCPolicy<uint32_t>>;
+ ICStubCodeMap* stubCodes_;
+
+ // Map ICStub keys to ICStub shared code objects.
+ using CacheIRStubCodeMap = GCHashMap<CacheIRStubKey,
+ ReadBarrieredJitCode,
+ CacheIRStubKey,
+ RuntimeAllocPolicy,
+ IcStubCodeMapGCPolicy<CacheIRStubKey>>;
+ CacheIRStubCodeMap* cacheIRStubCodes_;
+
+ // Keep track of offset into various baseline stubs' code at return
+ // point from called script.
+ void* baselineCallReturnAddrs_[2];
+ void* baselineGetPropReturnAddr_;
+ void* baselineSetPropReturnAddr_;
+
+ // Stubs to concatenate two strings inline, or perform RegExp calls inline.
+ // These bake in zone and compartment specific pointers and can't be stored
+ // in JitRuntime. These are weak pointers, but are not declared as
+ // ReadBarriered since they are only read from during Ion compilation,
+ // which may occur off thread and whose barriers are captured during
+ // CodeGenerator::link.
+ JitCode* stringConcatStub_;
+ JitCode* regExpMatcherStub_;
+ JitCode* regExpSearcherStub_;
+ JitCode* regExpTesterStub_;
+
+ mozilla::EnumeratedArray<SimdType, SimdType::Count, ReadBarrieredObject> simdTemplateObjects_;
+
+ JitCode* generateStringConcatStub(JSContext* cx);
+ JitCode* generateRegExpMatcherStub(JSContext* cx);
+ JitCode* generateRegExpSearcherStub(JSContext* cx);
+ JitCode* generateRegExpTesterStub(JSContext* cx);
+
+ public:
+ JSObject* getSimdTemplateObjectFor(JSContext* cx, Handle<SimdTypeDescr*> descr) {
+ ReadBarrieredObject& tpl = simdTemplateObjects_[descr->type()];
+ if (!tpl)
+ tpl.set(TypedObject::createZeroed(cx, descr, 0, gc::TenuredHeap));
+ return tpl.get();
+ }
+
+ JSObject* maybeGetSimdTemplateObjectFor(SimdType type) const {
+ const ReadBarrieredObject& tpl = simdTemplateObjects_[type];
+
+ // This function is used by Eager Simd Unbox phase, so we cannot use the
+ // read barrier. For more information, see the comment above
+ // CodeGenerator::simdRefreshTemplatesDuringLink_ .
+ return tpl.unbarrieredGet();
+ }
+
+ // This function is used to call the read barrier, to mark the SIMD template
+ // type as used. This function can only be called from the main thread.
+ void registerSimdTemplateObjectFor(SimdType type) {
+ ReadBarrieredObject& tpl = simdTemplateObjects_[type];
+ MOZ_ASSERT(tpl.unbarrieredGet());
+ tpl.get();
+ }
+
+ JitCode* getStubCode(uint32_t key) {
+ ICStubCodeMap::AddPtr p = stubCodes_->lookupForAdd(key);
+ if (p)
+ return p->value();
+ return nullptr;
+ }
+ MOZ_MUST_USE bool putStubCode(JSContext* cx, uint32_t key, Handle<JitCode*> stubCode) {
+ MOZ_ASSERT(stubCode);
+ if (!stubCodes_->putNew(key, stubCode.get())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ return true;
+ }
+ JitCode* getCacheIRStubCode(const CacheIRStubKey::Lookup& key, CacheIRStubInfo** stubInfo) {
+ CacheIRStubCodeMap::Ptr p = cacheIRStubCodes_->lookup(key);
+ if (p) {
+ *stubInfo = p->key().stubInfo.get();
+ return p->value();
+ }
+ *stubInfo = nullptr;
+ return nullptr;
+ }
+ MOZ_MUST_USE bool putCacheIRStubCode(const CacheIRStubKey::Lookup& lookup, CacheIRStubKey& key,
+ JitCode* stubCode)
+ {
+ CacheIRStubCodeMap::AddPtr p = cacheIRStubCodes_->lookupForAdd(lookup);
+ MOZ_ASSERT(!p);
+ return cacheIRStubCodes_->add(p, Move(key), stubCode);
+ }
+ void initBaselineCallReturnAddr(void* addr, bool constructing) {
+ MOZ_ASSERT(baselineCallReturnAddrs_[constructing] == nullptr);
+ baselineCallReturnAddrs_[constructing] = addr;
+ }
+ void* baselineCallReturnAddr(bool constructing) {
+ MOZ_ASSERT(baselineCallReturnAddrs_[constructing] != nullptr);
+ return baselineCallReturnAddrs_[constructing];
+ }
+ void initBaselineGetPropReturnAddr(void* addr) {
+ MOZ_ASSERT(baselineGetPropReturnAddr_ == nullptr);
+ baselineGetPropReturnAddr_ = addr;
+ }
+ void* baselineGetPropReturnAddr() {
+ MOZ_ASSERT(baselineGetPropReturnAddr_ != nullptr);
+ return baselineGetPropReturnAddr_;
+ }
+ void initBaselineSetPropReturnAddr(void* addr) {
+ MOZ_ASSERT(baselineSetPropReturnAddr_ == nullptr);
+ baselineSetPropReturnAddr_ = addr;
+ }
+ void* baselineSetPropReturnAddr() {
+ MOZ_ASSERT(baselineSetPropReturnAddr_ != nullptr);
+ return baselineSetPropReturnAddr_;
+ }
+
+ void toggleBarriers(bool enabled);
+
+ public:
+ JitCompartment();
+ ~JitCompartment();
+
+ MOZ_MUST_USE bool initialize(JSContext* cx);
+
+ // Initialize code stubs only used by Ion, not Baseline.
+ MOZ_MUST_USE bool ensureIonStubsExist(JSContext* cx);
+
+ void mark(JSTracer* trc, JSCompartment* compartment);
+ void sweep(FreeOp* fop, JSCompartment* compartment);
+
+ JitCode* stringConcatStubNoBarrier() const {
+ return stringConcatStub_;
+ }
+
+ JitCode* regExpMatcherStubNoBarrier() const {
+ return regExpMatcherStub_;
+ }
+
+ MOZ_MUST_USE bool ensureRegExpMatcherStubExists(JSContext* cx) {
+ if (regExpMatcherStub_)
+ return true;
+ regExpMatcherStub_ = generateRegExpMatcherStub(cx);
+ return regExpMatcherStub_ != nullptr;
+ }
+
+ JitCode* regExpSearcherStubNoBarrier() const {
+ return regExpSearcherStub_;
+ }
+
+ MOZ_MUST_USE bool ensureRegExpSearcherStubExists(JSContext* cx) {
+ if (regExpSearcherStub_)
+ return true;
+ regExpSearcherStub_ = generateRegExpSearcherStub(cx);
+ return regExpSearcherStub_ != nullptr;
+ }
+
+ JitCode* regExpTesterStubNoBarrier() const {
+ return regExpTesterStub_;
+ }
+
+ MOZ_MUST_USE bool ensureRegExpTesterStubExists(JSContext* cx) {
+ if (regExpTesterStub_)
+ return true;
+ regExpTesterStub_ = generateRegExpTesterStub(cx);
+ return regExpTesterStub_ != nullptr;
+ }
+
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+};
+
+// Called from JSCompartment::discardJitCode().
+void InvalidateAll(FreeOp* fop, JS::Zone* zone);
+void FinishInvalidation(FreeOp* fop, JSScript* script);
+
+// On windows systems, really large frames need to be incrementally touched.
+// The following constant defines the minimum increment of the touch.
+#ifdef XP_WIN
+const unsigned WINDOWS_BIG_FRAME_TOUCH_INCREMENT = 4096 - 1;
+#endif
+
+// If NON_WRITABLE_JIT_CODE is enabled, this class will ensure
+// JIT code is writable (has RW permissions) in its scope.
+// Otherwise it's a no-op.
+class MOZ_STACK_CLASS AutoWritableJitCode
+{
+ // Backedge patching from the signal handler will change memory protection
+ // flags, so don't allow it in a AutoWritableJitCode scope.
+ JitRuntime::AutoPreventBackedgePatching preventPatching_;
+ JSRuntime* rt_;
+ void* addr_;
+ size_t size_;
+
+ public:
+ AutoWritableJitCode(JSRuntime* rt, void* addr, size_t size)
+ : preventPatching_(rt), rt_(rt), addr_(addr), size_(size)
+ {
+ rt_->toggleAutoWritableJitCodeActive(true);
+ if (!ExecutableAllocator::makeWritable(addr_, size_))
+ MOZ_CRASH();
+ }
+ AutoWritableJitCode(void* addr, size_t size)
+ : AutoWritableJitCode(TlsPerThreadData.get()->runtimeFromMainThread(), addr, size)
+ {}
+ explicit AutoWritableJitCode(JitCode* code)
+ : AutoWritableJitCode(code->runtimeFromMainThread(), code->raw(), code->bufferSize())
+ {}
+ ~AutoWritableJitCode() {
+ if (!ExecutableAllocator::makeExecutable(addr_, size_))
+ MOZ_CRASH();
+ rt_->toggleAutoWritableJitCodeActive(false);
+ }
+};
+
+class MOZ_STACK_CLASS MaybeAutoWritableJitCode
+{
+ mozilla::Maybe<AutoWritableJitCode> awjc_;
+
+ public:
+ MaybeAutoWritableJitCode(void* addr, size_t size, ReprotectCode reprotect) {
+ if (reprotect)
+ awjc_.emplace(addr, size);
+ }
+ MaybeAutoWritableJitCode(JitCode* code, ReprotectCode reprotect) {
+ if (reprotect)
+ awjc_.emplace(code);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JitCompartment_h */
diff --git a/js/src/jit/JitFrameIterator-inl.h b/js/src/jit/JitFrameIterator-inl.h
new file mode 100644
index 000000000..af688b9b1
--- /dev/null
+++ b/js/src/jit/JitFrameIterator-inl.h
@@ -0,0 +1,51 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitFrameIterator_inl_h
+#define jit_JitFrameIterator_inl_h
+
+#include "jit/JitFrameIterator.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+
+namespace js {
+namespace jit {
+
+inline JitFrameLayout*
+JitProfilingFrameIterator::framePtr()
+{
+ MOZ_ASSERT(!done());
+ return (JitFrameLayout*) fp_;
+}
+
+inline JSScript*
+JitProfilingFrameIterator::frameScript()
+{
+ return ScriptFromCalleeToken(framePtr()->calleeToken());
+}
+
+inline BaselineFrame*
+JitFrameIterator::baselineFrame() const
+{
+ MOZ_ASSERT(isBaselineJS());
+ return (BaselineFrame*)(fp() - BaselineFrame::FramePointerOffset - BaselineFrame::Size());
+}
+
+template <typename T>
+bool
+JitFrameIterator::isExitFrameLayout() const
+{
+ if (!isExitFrame())
+ return false;
+ return exitFrame()->is<T>();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JitFrameIterator_inl_h */
diff --git a/js/src/jit/JitFrameIterator.h b/js/src/jit/JitFrameIterator.h
new file mode 100644
index 000000000..ba5efef6a
--- /dev/null
+++ b/js/src/jit/JitFrameIterator.h
@@ -0,0 +1,864 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitFrameIterator_h
+#define jit_JitFrameIterator_h
+
+#include "jsfun.h"
+#include "jsscript.h"
+#include "jstypes.h"
+
+#include "jit/IonCode.h"
+#include "jit/Snapshots.h"
+
+#include "js/ProfilingFrameIterator.h"
+
+namespace js {
+ class ActivationIterator;
+} // namespace js
+
+namespace js {
+namespace jit {
+
+typedef void * CalleeToken;
+
+enum FrameType
+{
+ // A JS frame is analogous to a js::InterpreterFrame, representing one scripted
+ // function activation. IonJS frames are used by the optimizing compiler.
+ JitFrame_IonJS,
+
+ // JS frame used by the baseline JIT.
+ JitFrame_BaselineJS,
+
+ // Frame pushed for JIT stubs that make non-tail calls, so that the
+ // return address -> ICEntry mapping works.
+ JitFrame_BaselineStub,
+ JitFrame_IonStub,
+
+ // The entry frame is the initial prologue block transitioning from the VM
+ // into the Ion world.
+ JitFrame_Entry,
+
+ // A rectifier frame sits in between two JS frames, adapting argc != nargs
+ // mismatches in calls.
+ JitFrame_Rectifier,
+
+ // Ion IC calling a scripted getter/setter.
+ JitFrame_IonAccessorIC,
+
+ // An exit frame is necessary for transitioning from a JS frame into C++.
+ // From within C++, an exit frame is always the last frame in any
+ // JitActivation.
+ JitFrame_Exit,
+
+ // A bailout frame is a special IonJS jit frame after a bailout, and before
+ // the reconstruction of the BaselineJS frame. From within C++, a bailout
+ // frame is always the last frame in a JitActivation iff the bailout frame
+ // information is recorded on the JitActivation.
+ JitFrame_Bailout,
+};
+
+enum ReadFrameArgsBehavior {
+ // Only read formals (i.e. [0 ... callee()->nargs]
+ ReadFrame_Formals,
+
+ // Only read overflown args (i.e. [callee()->nargs ... numActuals()]
+ ReadFrame_Overflown,
+
+ // Read all args (i.e. [0 ... numActuals()])
+ ReadFrame_Actuals
+};
+
+class CommonFrameLayout;
+class JitFrameLayout;
+class ExitFrameLayout;
+
+class BaselineFrame;
+
+class JitActivation;
+
+// Iterate over the JIT stack to assert that all invariants are respected.
+// - Check that all entry frames are aligned on JitStackAlignment.
+// - Check that all rectifier frames keep the JitStackAlignment.
+void AssertJitStackInvariants(JSContext* cx);
+
+class JitFrameIterator
+{
+ protected:
+ uint8_t* current_;
+ FrameType type_;
+ uint8_t* returnAddressToFp_;
+ size_t frameSize_;
+
+ private:
+ mutable const SafepointIndex* cachedSafepointIndex_;
+ const JitActivation* activation_;
+
+ void dumpBaseline() const;
+
+ public:
+ explicit JitFrameIterator();
+ explicit JitFrameIterator(JSContext* cx);
+ explicit JitFrameIterator(const ActivationIterator& activations);
+
+ // Current frame information.
+ FrameType type() const {
+ return type_;
+ }
+ uint8_t* fp() const {
+ return current_;
+ }
+ const JitActivation* activation() const {
+ return activation_;
+ }
+
+ CommonFrameLayout* current() const {
+ return (CommonFrameLayout*)current_;
+ }
+
+ inline uint8_t* returnAddress() const;
+
+ // Return the pointer of the JitFrame, the iterator is assumed to be settled
+ // on a scripted frame.
+ JitFrameLayout* jsFrame() const;
+
+ inline ExitFrameLayout* exitFrame() const;
+
+ // Returns whether the JS frame has been invalidated and, if so,
+ // places the invalidated Ion script in |ionScript|.
+ bool checkInvalidation(IonScript** ionScript) const;
+ bool checkInvalidation() const;
+
+ bool isExitFrame() const {
+ return type_ == JitFrame_Exit;
+ }
+ bool isScripted() const {
+ return type_ == JitFrame_BaselineJS || type_ == JitFrame_IonJS || type_ == JitFrame_Bailout;
+ }
+ bool isBaselineJS() const {
+ return type_ == JitFrame_BaselineJS;
+ }
+ bool isIonScripted() const {
+ return type_ == JitFrame_IonJS || type_ == JitFrame_Bailout;
+ }
+ bool isIonJS() const {
+ return type_ == JitFrame_IonJS;
+ }
+ bool isIonStub() const {
+ return type_ == JitFrame_IonStub;
+ }
+ bool isIonAccessorIC() const {
+ return type_ == JitFrame_IonAccessorIC;
+ }
+ bool isBailoutJS() const {
+ return type_ == JitFrame_Bailout;
+ }
+ bool isBaselineStub() const {
+ return type_ == JitFrame_BaselineStub;
+ }
+ bool isRectifier() const {
+ return type_ == JitFrame_Rectifier;
+ }
+ bool isBareExit() const;
+ template <typename T> bool isExitFrameLayout() const;
+
+ bool isEntry() const {
+ return type_ == JitFrame_Entry;
+ }
+ bool isFunctionFrame() const;
+
+ bool isConstructing() const;
+
+ void* calleeToken() const;
+ JSFunction* callee() const;
+ JSFunction* maybeCallee() const;
+ unsigned numActualArgs() const;
+ JSScript* script() const;
+ void baselineScriptAndPc(JSScript** scriptRes, jsbytecode** pcRes) const;
+ Value* actualArgs() const;
+
+ // Returns the return address of the frame above this one (that is, the
+ // return address that returns back to the current frame).
+ uint8_t* returnAddressToFp() const {
+ return returnAddressToFp_;
+ }
+
+ // Previous frame information extracted from the current frame.
+ inline size_t prevFrameLocalSize() const;
+ inline FrameType prevType() const;
+ uint8_t* prevFp() const;
+
+ // Returns the stack space used by the current frame, in bytes. This does
+ // not include the size of its fixed header.
+ size_t frameSize() const {
+ MOZ_ASSERT(!isExitFrame());
+ return frameSize_;
+ }
+
+ // Functions used to iterate on frames. When prevType is JitFrame_Entry,
+ // the current frame is the last frame.
+ inline bool done() const {
+ return type_ == JitFrame_Entry;
+ }
+ JitFrameIterator& operator++();
+
+ // Returns the IonScript associated with this JS frame.
+ IonScript* ionScript() const;
+
+ // Returns the IonScript associated with this JS frame; the frame must
+ // not be invalidated.
+ IonScript* ionScriptFromCalleeToken() const;
+
+ // Returns the Safepoint associated with this JS frame. Incurs a lookup
+ // overhead.
+ const SafepointIndex* safepoint() const;
+
+ // Returns the OSI index associated with this JS frame. Incurs a lookup
+ // overhead.
+ const OsiIndex* osiIndex() const;
+
+ // Returns the Snapshot offset associated with this JS frame. Incurs a
+ // lookup overhead.
+ SnapshotOffset snapshotOffset() const;
+
+ uintptr_t* spillBase() const;
+ MachineState machineState() const;
+
+ template <class Op>
+ void unaliasedForEachActual(Op op, ReadFrameArgsBehavior behavior) const {
+ MOZ_ASSERT(isBaselineJS());
+
+ unsigned nactual = numActualArgs();
+ unsigned start, end;
+ switch (behavior) {
+ case ReadFrame_Formals:
+ start = 0;
+ end = callee()->nargs();
+ break;
+ case ReadFrame_Overflown:
+ start = callee()->nargs();
+ end = nactual;
+ break;
+ case ReadFrame_Actuals:
+ start = 0;
+ end = nactual;
+ }
+
+ Value* argv = actualArgs();
+ for (unsigned i = start; i < end; i++)
+ op(argv[i]);
+ }
+
+ void dump() const;
+
+ inline BaselineFrame* baselineFrame() const;
+
+ // This function isn't used, but we keep it here (debug-only) because it is
+ // helpful when chasing issues with the jitcode map.
+#ifdef DEBUG
+ bool verifyReturnAddressUsingNativeToBytecodeMap();
+#else
+ inline bool verifyReturnAddressUsingNativeToBytecodeMap() { return true; }
+#endif
+};
+
+class JitcodeGlobalTable;
+
+class JitProfilingFrameIterator
+{
+ uint8_t* fp_;
+ FrameType type_;
+ void* returnAddressToFp_;
+
+ inline JitFrameLayout* framePtr();
+ inline JSScript* frameScript();
+ MOZ_MUST_USE bool tryInitWithPC(void* pc);
+ MOZ_MUST_USE bool tryInitWithTable(JitcodeGlobalTable* table, void* pc, JSRuntime* rt,
+ bool forLastCallSite);
+ void fixBaselineReturnAddress();
+
+ void moveToNextFrame(CommonFrameLayout* frame);
+
+ public:
+ JitProfilingFrameIterator(JSRuntime* rt,
+ const JS::ProfilingFrameIterator::RegisterState& state);
+ explicit JitProfilingFrameIterator(void* exitFrame);
+
+ void operator++();
+ bool done() const { return fp_ == nullptr; }
+
+ void* fp() const { MOZ_ASSERT(!done()); return fp_; }
+ void* stackAddress() const { return fp(); }
+ FrameType frameType() const { MOZ_ASSERT(!done()); return type_; }
+ void* returnAddressToFp() const { MOZ_ASSERT(!done()); return returnAddressToFp_; }
+};
+
+class RInstructionResults
+{
+ // Vector of results of recover instructions.
+ typedef mozilla::Vector<HeapPtr<Value>, 1, SystemAllocPolicy> Values;
+ UniquePtr<Values> results_;
+
+ // The frame pointer is used as a key to check if the current frame already
+ // bailed out.
+ JitFrameLayout* fp_;
+
+ // Record if we tried and succeed at allocating and filling the vector of
+ // recover instruction results, if needed. This flag is needed in order to
+ // avoid evaluating the recover instruction twice.
+ bool initialized_;
+
+ public:
+ explicit RInstructionResults(JitFrameLayout* fp);
+ RInstructionResults(RInstructionResults&& src);
+
+ RInstructionResults& operator=(RInstructionResults&& rhs);
+
+ ~RInstructionResults();
+
+ MOZ_MUST_USE bool init(JSContext* cx, uint32_t numResults);
+ bool isInitialized() const;
+#ifdef DEBUG
+ size_t length() const;
+#endif
+
+ JitFrameLayout* frame() const;
+
+ HeapPtr<Value>& operator[](size_t index);
+
+ void trace(JSTracer* trc);
+};
+
+struct MaybeReadFallback
+{
+ enum NoGCValue {
+ NoGC_UndefinedValue,
+ NoGC_MagicOptimizedOut
+ };
+
+ enum FallbackConsequence {
+ Fallback_Invalidate,
+ Fallback_DoNothing
+ };
+
+ JSContext* maybeCx;
+ JitActivation* activation;
+ const JitFrameIterator* frame;
+ const NoGCValue unreadablePlaceholder_;
+ const FallbackConsequence consequence;
+
+ explicit MaybeReadFallback(const Value& placeholder = UndefinedValue())
+ : maybeCx(nullptr),
+ activation(nullptr),
+ frame(nullptr),
+ unreadablePlaceholder_(noGCPlaceholder(placeholder)),
+ consequence(Fallback_Invalidate)
+ {
+ }
+
+ MaybeReadFallback(JSContext* cx, JitActivation* activation, const JitFrameIterator* frame,
+ FallbackConsequence consequence = Fallback_Invalidate)
+ : maybeCx(cx),
+ activation(activation),
+ frame(frame),
+ unreadablePlaceholder_(NoGC_UndefinedValue),
+ consequence(consequence)
+ {
+ }
+
+ bool canRecoverResults() { return maybeCx; }
+
+ Value unreadablePlaceholder() const {
+ if (unreadablePlaceholder_ == NoGC_MagicOptimizedOut)
+ return MagicValue(JS_OPTIMIZED_OUT);
+ return UndefinedValue();
+ }
+
+ NoGCValue noGCPlaceholder(const Value& v) const {
+ if (v.isMagic(JS_OPTIMIZED_OUT))
+ return NoGC_MagicOptimizedOut;
+ return NoGC_UndefinedValue;
+ }
+};
+
+
+class RResumePoint;
+class RSimdBox;
+
+// Reads frame information in snapshot-encoding order (that is, outermost frame
+// to innermost frame).
+class SnapshotIterator
+{
+ protected:
+ SnapshotReader snapshot_;
+ RecoverReader recover_;
+ JitFrameLayout* fp_;
+ const MachineState* machine_;
+ IonScript* ionScript_;
+ RInstructionResults* instructionResults_;
+
+ enum ReadMethod {
+ // Read the normal value.
+ RM_Normal = 1 << 0,
+
+ // Read the default value, or the normal value if there is no default.
+ RM_AlwaysDefault = 1 << 1,
+
+ // Try to read the normal value if it is readable, otherwise default to
+ // the Default value.
+ RM_NormalOrDefault = RM_Normal | RM_AlwaysDefault,
+ };
+
+ private:
+ // Read a spilled register from the machine state.
+ bool hasRegister(Register reg) const {
+ return machine_->has(reg);
+ }
+ uintptr_t fromRegister(Register reg) const {
+ return machine_->read(reg);
+ }
+
+ bool hasRegister(FloatRegister reg) const {
+ return machine_->has(reg);
+ }
+ double fromRegister(FloatRegister reg) const {
+ return machine_->read(reg);
+ }
+
+ // Read an uintptr_t from the stack.
+ bool hasStack(int32_t offset) const {
+ return true;
+ }
+ uintptr_t fromStack(int32_t offset) const;
+
+ bool hasInstructionResult(uint32_t index) const {
+ return instructionResults_;
+ }
+ bool hasInstructionResults() const {
+ return instructionResults_;
+ }
+ Value fromInstructionResult(uint32_t index) const;
+
+ Value allocationValue(const RValueAllocation& a, ReadMethod rm = RM_Normal);
+ MOZ_MUST_USE bool allocationReadable(const RValueAllocation& a, ReadMethod rm = RM_Normal);
+ void writeAllocationValuePayload(const RValueAllocation& a, const Value& v);
+ void warnUnreadableAllocation();
+
+ private:
+ friend class RSimdBox;
+ const FloatRegisters::RegisterContent* floatAllocationPointer(const RValueAllocation& a) const;
+
+ public:
+ // Handle iterating over RValueAllocations of the snapshots.
+ inline RValueAllocation readAllocation() {
+ MOZ_ASSERT(moreAllocations());
+ return snapshot_.readAllocation();
+ }
+ Value skip() {
+ snapshot_.skipAllocation();
+ return UndefinedValue();
+ }
+
+ const RResumePoint* resumePoint() const;
+ const RInstruction* instruction() const {
+ return recover_.instruction();
+ }
+
+ uint32_t numAllocations() const;
+ inline bool moreAllocations() const {
+ return snapshot_.numAllocationsRead() < numAllocations();
+ }
+
+ int32_t readOuterNumActualArgs() const;
+
+ // Used by recover instruction to store the value back into the instruction
+ // results array.
+ void storeInstructionResult(const Value& v);
+
+ public:
+ // Exhibits frame properties contained in the snapshot.
+ uint32_t pcOffset() const;
+ inline MOZ_MUST_USE bool resumeAfter() const {
+ // Inline frames are inlined on calls, which are considered as being
+ // resumed on the Call as baseline will push the pc once we return from
+ // the call.
+ if (moreFrames())
+ return false;
+ return recover_.resumeAfter();
+ }
+ inline BailoutKind bailoutKind() const {
+ return snapshot_.bailoutKind();
+ }
+
+ public:
+ // Read the next instruction available and get ready to either skip it or
+ // evaluate it.
+ inline void nextInstruction() {
+ MOZ_ASSERT(snapshot_.numAllocationsRead() == numAllocations());
+ recover_.nextInstruction();
+ snapshot_.resetNumAllocationsRead();
+ }
+
+ // Skip an Instruction by walking to the next instruction and by skipping
+ // all the allocations corresponding to this instruction.
+ void skipInstruction();
+
+ inline bool moreInstructions() const {
+ return recover_.moreInstructions();
+ }
+
+ protected:
+ // Register a vector used for storing the results of the evaluation of
+ // recover instructions. This vector should be registered before the
+ // beginning of the iteration. This function is in charge of allocating
+ // enough space for all instructions results, and return false iff it fails.
+ MOZ_MUST_USE bool initInstructionResults(MaybeReadFallback& fallback);
+
+ // This function is used internally for computing the result of the recover
+ // instructions.
+ MOZ_MUST_USE bool computeInstructionResults(JSContext* cx, RInstructionResults* results) const;
+
+ public:
+ // Handle iterating over frames of the snapshots.
+ void nextFrame();
+ void settleOnFrame();
+
+ inline bool moreFrames() const {
+ // The last instruction is recovering the innermost frame, so as long as
+ // there is more instruction there is necesseray more frames.
+ return moreInstructions();
+ }
+
+ public:
+ // Connect all informations about the current script in order to recover the
+ // content of baseline frames.
+
+ SnapshotIterator(const JitFrameIterator& iter, const MachineState* machineState);
+ SnapshotIterator();
+
+ Value read() {
+ return allocationValue(readAllocation());
+ }
+
+ // Read the |Normal| value unless it is not available and that the snapshot
+ // provides a |Default| value. This is useful to avoid invalidations of the
+ // frame while we are only interested in a few properties which are provided
+ // by the |Default| value.
+ Value readWithDefault(RValueAllocation* alloc) {
+ *alloc = RValueAllocation();
+ RValueAllocation a = readAllocation();
+ if (allocationReadable(a))
+ return allocationValue(a);
+
+ *alloc = a;
+ return allocationValue(a, RM_AlwaysDefault);
+ }
+
+ Value maybeRead(const RValueAllocation& a, MaybeReadFallback& fallback);
+ Value maybeRead(MaybeReadFallback& fallback) {
+ RValueAllocation a = readAllocation();
+ return maybeRead(a, fallback);
+ }
+
+ void traceAllocation(JSTracer* trc);
+
+ template <class Op>
+ void readFunctionFrameArgs(Op& op, ArgumentsObject** argsObj, Value* thisv,
+ unsigned start, unsigned end, JSScript* script,
+ MaybeReadFallback& fallback)
+ {
+ // Assumes that the common frame arguments have already been read.
+ if (script->argumentsHasVarBinding()) {
+ if (argsObj) {
+ Value v = read();
+ if (v.isObject())
+ *argsObj = &v.toObject().as<ArgumentsObject>();
+ } else {
+ skip();
+ }
+ }
+
+ if (thisv)
+ *thisv = maybeRead(fallback);
+ else
+ skip();
+
+ unsigned i = 0;
+ if (end < start)
+ i = start;
+
+ for (; i < start; i++)
+ skip();
+ for (; i < end; i++) {
+ // We are not always able to read values from the snapshots, some values
+ // such as non-gc things may still be live in registers and cause an
+ // error while reading the machine state.
+ Value v = maybeRead(fallback);
+ op(v);
+ }
+ }
+
+ // Iterate over all the allocations and return only the value of the
+ // allocation located at one index.
+ Value maybeReadAllocByIndex(size_t index);
+
+#ifdef TRACK_SNAPSHOTS
+ void spewBailingFrom() const {
+ snapshot_.spewBailingFrom();
+ }
+#endif
+};
+
+// Reads frame information in callstack order (that is, innermost frame to
+// outermost frame).
+class InlineFrameIterator
+{
+ const JitFrameIterator* frame_;
+ SnapshotIterator start_;
+ SnapshotIterator si_;
+ uint32_t framesRead_;
+
+ // When the inline-frame-iterator is created, this variable is defined to
+ // UINT32_MAX. Then the first iteration of findNextFrame, which settle on
+ // the innermost frame, is used to update this counter to the number of
+ // frames contained in the recover buffer.
+ uint32_t frameCount_;
+
+ // The |calleeTemplate_| fields contains either the JSFunction or the
+ // template from which it is supposed to be cloned. The |calleeRVA_| is an
+ // Invalid value allocation, if the |calleeTemplate_| field is the effective
+ // JSFunction, and not its template. On the other hand, any other value
+ // allocation implies that the |calleeTemplate_| is the template JSFunction
+ // from which the effective one would be derived and cached by the Recover
+ // instruction result.
+ RootedFunction calleeTemplate_;
+ RValueAllocation calleeRVA_;
+
+ RootedScript script_;
+ jsbytecode* pc_;
+ uint32_t numActualArgs_;
+
+ // Register state, used by all snapshot iterators.
+ MachineState machine_;
+
+ struct Nop {
+ void operator()(const Value& v) { }
+ };
+
+ private:
+ void findNextFrame();
+ JSObject* computeEnvironmentChain(const Value& envChainValue, MaybeReadFallback& fallback,
+ bool* hasInitialEnv = nullptr) const;
+
+ public:
+ InlineFrameIterator(JSContext* cx, const JitFrameIterator* iter);
+ InlineFrameIterator(JSRuntime* rt, const JitFrameIterator* iter);
+ InlineFrameIterator(JSContext* cx, const InlineFrameIterator* iter);
+
+ bool more() const {
+ return frame_ && framesRead_ < frameCount_;
+ }
+
+ // Due to optimizations, we are not always capable of reading the callee of
+ // inlined frames without invalidating the IonCode. This function might
+ // return either the effective callee of the JSFunction which might be used
+ // to create it.
+ //
+ // As such, the |calleeTemplate()| can be used to read most of the metadata
+ // which are conserved across clones.
+ JSFunction* calleeTemplate() const {
+ MOZ_ASSERT(isFunctionFrame());
+ return calleeTemplate_;
+ }
+ JSFunction* maybeCalleeTemplate() const {
+ return calleeTemplate_;
+ }
+
+ JSFunction* callee(MaybeReadFallback& fallback) const;
+
+ unsigned numActualArgs() const {
+ // The number of actual arguments of inline frames is recovered by the
+ // iteration process. It is recovered from the bytecode because this
+ // property still hold since the for inlined frames. This property does not
+ // hold for the parent frame because it can have optimize a call to
+ // js_fun_call or js_fun_apply.
+ if (more())
+ return numActualArgs_;
+
+ return frame_->numActualArgs();
+ }
+
+ template <class ArgOp, class LocalOp>
+ void readFrameArgsAndLocals(JSContext* cx, ArgOp& argOp, LocalOp& localOp,
+ JSObject** envChain, bool* hasInitialEnv,
+ Value* rval, ArgumentsObject** argsObj,
+ Value* thisv, Value* newTarget,
+ ReadFrameArgsBehavior behavior,
+ MaybeReadFallback& fallback) const
+ {
+ SnapshotIterator s(si_);
+
+ // Read the env chain.
+ if (envChain) {
+ Value envChainValue = s.maybeRead(fallback);
+ *envChain = computeEnvironmentChain(envChainValue, fallback, hasInitialEnv);
+ } else {
+ s.skip();
+ }
+
+ // Read return value.
+ if (rval)
+ *rval = s.maybeRead(fallback);
+ else
+ s.skip();
+
+ if (newTarget) {
+ // For now, only support reading new.target when we are reading
+ // overflown arguments.
+ MOZ_ASSERT(behavior != ReadFrame_Formals);
+ newTarget->setUndefined();
+ }
+
+ // Read arguments, which only function frames have.
+ if (isFunctionFrame()) {
+ unsigned nactual = numActualArgs();
+ unsigned nformal = calleeTemplate()->nargs();
+
+ // Get the non overflown arguments, which are taken from the inlined
+ // frame, because it will have the updated value when JSOP_SETARG is
+ // done.
+ if (behavior != ReadFrame_Overflown)
+ s.readFunctionFrameArgs(argOp, argsObj, thisv, 0, nformal, script(), fallback);
+
+ if (behavior != ReadFrame_Formals) {
+ if (more()) {
+ // There is still a parent frame of this inlined frame. All
+ // arguments (also the overflown) are the last pushed values
+ // in the parent frame. To get the overflown arguments, we
+ // need to take them from there.
+
+ // The overflown arguments are not available in current frame.
+ // They are the last pushed arguments in the parent frame of
+ // this inlined frame.
+ InlineFrameIterator it(cx, this);
+ ++it;
+ unsigned argsObjAdj = it.script()->argumentsHasVarBinding() ? 1 : 0;
+ bool hasNewTarget = isConstructing();
+ SnapshotIterator parent_s(it.snapshotIterator());
+
+ // Skip over all slots until we get to the last slots
+ // (= arguments slots of callee) the +3 is for [this], [returnvalue],
+ // [envchain], and maybe +1 for [argsObj]
+ MOZ_ASSERT(parent_s.numAllocations() >= nactual + 3 + argsObjAdj + hasNewTarget);
+ unsigned skip = parent_s.numAllocations() - nactual - 3 - argsObjAdj - hasNewTarget;
+ for (unsigned j = 0; j < skip; j++)
+ parent_s.skip();
+
+ // Get the overflown arguments
+ MaybeReadFallback unusedFallback;
+ parent_s.skip(); // env chain
+ parent_s.skip(); // return value
+ parent_s.readFunctionFrameArgs(argOp, nullptr, nullptr,
+ nformal, nactual, it.script(),
+ fallback);
+ if (newTarget && isConstructing())
+ *newTarget = parent_s.maybeRead(fallback);
+ } else {
+ // There is no parent frame to this inlined frame, we can read
+ // from the frame's Value vector directly.
+ Value* argv = frame_->actualArgs();
+ for (unsigned i = nformal; i < nactual; i++)
+ argOp(argv[i]);
+ if (newTarget && isConstructing())
+ *newTarget = argv[nactual];
+ }
+ }
+ }
+
+ // At this point we've read all the formals in s, and can read the
+ // locals.
+ for (unsigned i = 0; i < script()->nfixed(); i++)
+ localOp(s.maybeRead(fallback));
+ }
+
+ template <class Op>
+ void unaliasedForEachActual(JSContext* cx, Op op,
+ ReadFrameArgsBehavior behavior,
+ MaybeReadFallback& fallback) const
+ {
+ Nop nop;
+ readFrameArgsAndLocals(cx, op, nop, nullptr, nullptr, nullptr, nullptr,
+ nullptr, nullptr, behavior, fallback);
+ }
+
+ JSScript* script() const {
+ return script_;
+ }
+ jsbytecode* pc() const {
+ return pc_;
+ }
+ SnapshotIterator snapshotIterator() const {
+ return si_;
+ }
+ bool isFunctionFrame() const;
+ bool isConstructing() const;
+
+ JSObject* environmentChain(MaybeReadFallback& fallback) const {
+ SnapshotIterator s(si_);
+
+ // envChain
+ Value v = s.maybeRead(fallback);
+ return computeEnvironmentChain(v, fallback);
+ }
+
+ Value thisArgument(MaybeReadFallback& fallback) const {
+ SnapshotIterator s(si_);
+
+ // envChain
+ s.skip();
+
+ // return value
+ s.skip();
+
+ // Arguments object.
+ if (script()->argumentsHasVarBinding())
+ s.skip();
+
+ return s.maybeRead(fallback);
+ }
+
+ InlineFrameIterator& operator++() {
+ findNextFrame();
+ return *this;
+ }
+
+ void dump() const;
+
+ void resetOn(const JitFrameIterator* iter);
+
+ const JitFrameIterator& frame() const {
+ return *frame_;
+ }
+
+ // Inline frame number, 0 for the outermost (non-inlined) frame.
+ size_t frameNo() const {
+ return frameCount() - framesRead_;
+ }
+ size_t frameCount() const {
+ MOZ_ASSERT(frameCount_ != UINT32_MAX);
+ return frameCount_;
+ }
+
+ private:
+ InlineFrameIterator() = delete;
+ InlineFrameIterator(const InlineFrameIterator& iter) = delete;
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JitFrameIterator_h */
diff --git a/js/src/jit/JitFrames-inl.h b/js/src/jit/JitFrames-inl.h
new file mode 100644
index 000000000..3b7dfa378
--- /dev/null
+++ b/js/src/jit/JitFrames-inl.h
@@ -0,0 +1,73 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitFrames_inl_h
+#define jit_JitFrames_inl_h
+
+#include "jit/JitFrames.h"
+
+#include "jit/JitFrameIterator.h"
+#include "jit/LIR.h"
+
+#include "jit/JitFrameIterator-inl.h"
+
+namespace js {
+namespace jit {
+
+inline void
+SafepointIndex::resolve()
+{
+ MOZ_ASSERT(!resolved);
+ safepointOffset_ = safepoint_->offset();
+#ifdef DEBUG
+ resolved = true;
+#endif
+}
+
+inline uint8_t*
+JitFrameIterator::returnAddress() const
+{
+ CommonFrameLayout* current = (CommonFrameLayout*) current_;
+ return current->returnAddress();
+}
+
+inline size_t
+JitFrameIterator::prevFrameLocalSize() const
+{
+ CommonFrameLayout* current = (CommonFrameLayout*) current_;
+ return current->prevFrameLocalSize();
+}
+
+inline FrameType
+JitFrameIterator::prevType() const
+{
+ CommonFrameLayout* current = (CommonFrameLayout*) current_;
+ return current->prevType();
+}
+
+inline ExitFrameLayout*
+JitFrameIterator::exitFrame() const
+{
+ MOZ_ASSERT(isExitFrame());
+ return (ExitFrameLayout*) fp();
+}
+
+inline BaselineFrame*
+GetTopBaselineFrame(JSContext* cx)
+{
+ JitFrameIterator iter(cx);
+ MOZ_ASSERT(iter.type() == JitFrame_Exit);
+ ++iter;
+ if (iter.isBaselineStub())
+ ++iter;
+ MOZ_ASSERT(iter.isBaselineJS());
+ return iter.baselineFrame();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JitFrames_inl_h */
diff --git a/js/src/jit/JitFrames.cpp b/js/src/jit/JitFrames.cpp
new file mode 100644
index 000000000..646442b4c
--- /dev/null
+++ b/js/src/jit/JitFrames.cpp
@@ -0,0 +1,3158 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/JitFrames-inl.h"
+
+#include "mozilla/SizePrintfMacros.h"
+
+#include "jsfun.h"
+#include "jsobj.h"
+#include "jsscript.h"
+#include "jsutil.h"
+
+#include "gc/Marking.h"
+#include "jit/BaselineDebugModeOSR.h"
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Ion.h"
+#include "jit/JitcodeMap.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitSpewer.h"
+#include "jit/MacroAssembler.h"
+#include "jit/PcScriptCache.h"
+#include "jit/Recover.h"
+#include "jit/Safepoints.h"
+#include "jit/Snapshots.h"
+#include "jit/VMFunctions.h"
+#include "vm/ArgumentsObject.h"
+#include "vm/Debugger.h"
+#include "vm/Interpreter.h"
+#include "vm/SPSProfiler.h"
+#include "vm/TraceLogging.h"
+#include "vm/TypeInference.h"
+
+#include "jsscriptinlines.h"
+#include "gc/Nursery-inl.h"
+#include "jit/JitFrameIterator-inl.h"
+#include "vm/Debugger-inl.h"
+#include "vm/Probes-inl.h"
+#include "vm/TypeInference-inl.h"
+
+namespace js {
+namespace jit {
+
+// Given a slot index, returns the offset, in bytes, of that slot from an
+// JitFrameLayout. Slot distances are uniform across architectures, however,
+// the distance does depend on the size of the frame header.
+static inline int32_t
+OffsetOfFrameSlot(int32_t slot)
+{
+ return -slot;
+}
+
+static inline uint8_t*
+AddressOfFrameSlot(JitFrameLayout* fp, int32_t slot)
+{
+ return (uint8_t*) fp + OffsetOfFrameSlot(slot);
+}
+
+static inline uintptr_t
+ReadFrameSlot(JitFrameLayout* fp, int32_t slot)
+{
+ return *(uintptr_t*) AddressOfFrameSlot(fp, slot);
+}
+
+static inline void
+WriteFrameSlot(JitFrameLayout* fp, int32_t slot, uintptr_t value)
+{
+ *(uintptr_t*) AddressOfFrameSlot(fp, slot) = value;
+}
+
+static inline double
+ReadFrameDoubleSlot(JitFrameLayout* fp, int32_t slot)
+{
+ return *(double*) AddressOfFrameSlot(fp, slot);
+}
+
+static inline float
+ReadFrameFloat32Slot(JitFrameLayout* fp, int32_t slot)
+{
+ return *(float*) AddressOfFrameSlot(fp, slot);
+}
+
+static inline int32_t
+ReadFrameInt32Slot(JitFrameLayout* fp, int32_t slot)
+{
+ return *(int32_t*) AddressOfFrameSlot(fp, slot);
+}
+
+static inline bool
+ReadFrameBooleanSlot(JitFrameLayout* fp, int32_t slot)
+{
+ return *(bool*) AddressOfFrameSlot(fp, slot);
+}
+
+JitFrameIterator::JitFrameIterator()
+ : current_(nullptr),
+ type_(JitFrame_Exit),
+ returnAddressToFp_(nullptr),
+ frameSize_(0),
+ cachedSafepointIndex_(nullptr),
+ activation_(nullptr)
+{
+}
+
+JitFrameIterator::JitFrameIterator(JSContext* cx)
+ : current_(cx->runtime()->jitTop),
+ type_(JitFrame_Exit),
+ returnAddressToFp_(nullptr),
+ frameSize_(0),
+ cachedSafepointIndex_(nullptr),
+ activation_(cx->runtime()->activation()->asJit())
+{
+ if (activation_->bailoutData()) {
+ current_ = activation_->bailoutData()->fp();
+ frameSize_ = activation_->bailoutData()->topFrameSize();
+ type_ = JitFrame_Bailout;
+ }
+}
+
+JitFrameIterator::JitFrameIterator(const ActivationIterator& activations)
+ : current_(activations.jitTop()),
+ type_(JitFrame_Exit),
+ returnAddressToFp_(nullptr),
+ frameSize_(0),
+ cachedSafepointIndex_(nullptr),
+ activation_(activations->asJit())
+{
+ if (activation_->bailoutData()) {
+ current_ = activation_->bailoutData()->fp();
+ frameSize_ = activation_->bailoutData()->topFrameSize();
+ type_ = JitFrame_Bailout;
+ }
+}
+
+bool
+JitFrameIterator::checkInvalidation() const
+{
+ IonScript* dummy;
+ return checkInvalidation(&dummy);
+}
+
+bool
+JitFrameIterator::checkInvalidation(IonScript** ionScriptOut) const
+{
+ JSScript* script = this->script();
+ if (isBailoutJS()) {
+ *ionScriptOut = activation_->bailoutData()->ionScript();
+ return !script->hasIonScript() || script->ionScript() != *ionScriptOut;
+ }
+
+ uint8_t* returnAddr = returnAddressToFp();
+ // N.B. the current IonScript is not the same as the frame's
+ // IonScript if the frame has since been invalidated.
+ bool invalidated = !script->hasIonScript() ||
+ !script->ionScript()->containsReturnAddress(returnAddr);
+ if (!invalidated)
+ return false;
+
+ int32_t invalidationDataOffset = ((int32_t*) returnAddr)[-1];
+ uint8_t* ionScriptDataOffset = returnAddr + invalidationDataOffset;
+ IonScript* ionScript = (IonScript*) Assembler::GetPointer(ionScriptDataOffset);
+ MOZ_ASSERT(ionScript->containsReturnAddress(returnAddr));
+ *ionScriptOut = ionScript;
+ return true;
+}
+
+CalleeToken
+JitFrameIterator::calleeToken() const
+{
+ return ((JitFrameLayout*) current_)->calleeToken();
+}
+
+JSFunction*
+JitFrameIterator::callee() const
+{
+ MOZ_ASSERT(isScripted());
+ MOZ_ASSERT(isFunctionFrame());
+ return CalleeTokenToFunction(calleeToken());
+}
+
+JSFunction*
+JitFrameIterator::maybeCallee() const
+{
+ if (isScripted() && (isFunctionFrame()))
+ return callee();
+ return nullptr;
+}
+
+bool
+JitFrameIterator::isBareExit() const
+{
+ if (type_ != JitFrame_Exit)
+ return false;
+ return exitFrame()->isBareExit();
+}
+
+bool
+JitFrameIterator::isFunctionFrame() const
+{
+ return CalleeTokenIsFunction(calleeToken());
+}
+
+JSScript*
+JitFrameIterator::script() const
+{
+ MOZ_ASSERT(isScripted());
+ if (isBaselineJS())
+ return baselineFrame()->script();
+ JSScript* script = ScriptFromCalleeToken(calleeToken());
+ MOZ_ASSERT(script);
+ return script;
+}
+
+void
+JitFrameIterator::baselineScriptAndPc(JSScript** scriptRes, jsbytecode** pcRes) const
+{
+ MOZ_ASSERT(isBaselineJS());
+ JSScript* script = this->script();
+ if (scriptRes)
+ *scriptRes = script;
+
+ MOZ_ASSERT(pcRes);
+
+ // Use the frame's override pc, if we have one. This should only happen
+ // when we're in FinishBailoutToBaseline, handling an exception or toggling
+ // debug mode.
+ if (jsbytecode* overridePc = baselineFrame()->maybeOverridePc()) {
+ *pcRes = overridePc;
+ return;
+ }
+
+ // Else, there must be an ICEntry for the current return address.
+ uint8_t* retAddr = returnAddressToFp();
+ ICEntry& icEntry = script->baselineScript()->icEntryFromReturnAddress(retAddr);
+ *pcRes = icEntry.pc(script);
+}
+
+Value*
+JitFrameIterator::actualArgs() const
+{
+ return jsFrame()->argv() + 1;
+}
+
+uint8_t*
+JitFrameIterator::prevFp() const
+{
+ return current_ + current()->prevFrameLocalSize() + current()->headerSize();
+}
+
+JitFrameIterator&
+JitFrameIterator::operator++()
+{
+ MOZ_ASSERT(type_ != JitFrame_Entry);
+
+ frameSize_ = prevFrameLocalSize();
+ cachedSafepointIndex_ = nullptr;
+
+ // If the next frame is the entry frame, just exit. Don't update current_,
+ // since the entry and first frames overlap.
+ if (current()->prevType() == JitFrame_Entry) {
+ type_ = JitFrame_Entry;
+ return *this;
+ }
+
+ type_ = current()->prevType();
+ returnAddressToFp_ = current()->returnAddress();
+ current_ = prevFp();
+
+ return *this;
+}
+
+uintptr_t*
+JitFrameIterator::spillBase() const
+{
+ MOZ_ASSERT(isIonJS());
+
+ // Get the base address to where safepoint registers are spilled.
+ // Out-of-line calls do not unwind the extra padding space used to
+ // aggregate bailout tables, so we use frameSize instead of frameLocals,
+ // which would only account for local stack slots.
+ return reinterpret_cast<uintptr_t*>(fp() - ionScript()->frameSize());
+}
+
+MachineState
+JitFrameIterator::machineState() const
+{
+ MOZ_ASSERT(isIonScripted());
+
+ // The MachineState is used by GCs for marking call-sites.
+ if (MOZ_UNLIKELY(isBailoutJS()))
+ return *activation_->bailoutData()->machineState();
+
+ SafepointReader reader(ionScript(), safepoint());
+ uintptr_t* spill = spillBase();
+ MachineState machine;
+
+ for (GeneralRegisterBackwardIterator iter(reader.allGprSpills()); iter.more(); ++iter)
+ machine.setRegisterLocation(*iter, --spill);
+
+ uint8_t* spillAlign = alignDoubleSpillWithOffset(reinterpret_cast<uint8_t*>(spill), 0);
+
+ char* floatSpill = reinterpret_cast<char*>(spillAlign);
+ FloatRegisterSet fregs = reader.allFloatSpills().set();
+ fregs = fregs.reduceSetForPush();
+ for (FloatRegisterBackwardIterator iter(fregs); iter.more(); ++iter) {
+ floatSpill -= (*iter).size();
+ for (uint32_t a = 0; a < (*iter).numAlignedAliased(); a++) {
+ // Only say that registers that actually start here start here.
+ // e.g. d0 should not start at s1, only at s0.
+ FloatRegister ftmp;
+ (*iter).alignedAliased(a, &ftmp);
+ machine.setRegisterLocation(ftmp, (double*)floatSpill);
+ }
+ }
+
+ return machine;
+}
+
+static uint32_t
+NumArgAndLocalSlots(const InlineFrameIterator& frame)
+{
+ JSScript* script = frame.script();
+ return CountArgSlots(script, frame.maybeCalleeTemplate()) + script->nfixed();
+}
+
+static void
+CloseLiveIteratorIon(JSContext* cx, const InlineFrameIterator& frame, uint32_t stackSlot)
+{
+ SnapshotIterator si = frame.snapshotIterator();
+
+ // Skip stack slots until we reach the iterator object.
+ uint32_t skipSlots = NumArgAndLocalSlots(frame) + stackSlot - 1;
+
+ for (unsigned i = 0; i < skipSlots; i++)
+ si.skip();
+
+ Value v = si.read();
+ RootedObject obj(cx, &v.toObject());
+
+ if (cx->isExceptionPending())
+ UnwindIteratorForException(cx, obj);
+ else
+ UnwindIteratorForUncatchableException(cx, obj);
+}
+
+class IonFrameStackDepthOp
+{
+ uint32_t depth_;
+
+ public:
+ explicit IonFrameStackDepthOp(const InlineFrameIterator& frame) {
+ uint32_t base = NumArgAndLocalSlots(frame);
+ SnapshotIterator si = frame.snapshotIterator();
+ MOZ_ASSERT(si.numAllocations() >= base);
+ depth_ = si.numAllocations() - base;
+ }
+
+ uint32_t operator()() { return depth_; }
+};
+
+class TryNoteIterIon : public TryNoteIter<IonFrameStackDepthOp>
+{
+ public:
+ TryNoteIterIon(JSContext* cx, const InlineFrameIterator& frame)
+ : TryNoteIter(cx, frame.script(), frame.pc(), IonFrameStackDepthOp(frame))
+ { }
+};
+
+static void
+HandleExceptionIon(JSContext* cx, const InlineFrameIterator& frame, ResumeFromException* rfe,
+ bool* overrecursed)
+{
+ if (cx->compartment()->isDebuggee()) {
+ // We need to bail when there is a catchable exception, and we are the
+ // debuggee of a Debugger with a live onExceptionUnwind hook, or if a
+ // Debugger has observed this frame (e.g., for onPop).
+ bool shouldBail = Debugger::hasLiveHook(cx->global(), Debugger::OnExceptionUnwind);
+ RematerializedFrame* rematFrame = nullptr;
+ if (!shouldBail) {
+ JitActivation* act = cx->runtime()->activation()->asJit();
+ rematFrame = act->lookupRematerializedFrame(frame.frame().fp(), frame.frameNo());
+ shouldBail = rematFrame && rematFrame->isDebuggee();
+ }
+
+ if (shouldBail) {
+ // If we have an exception from within Ion and the debugger is active,
+ // we do the following:
+ //
+ // 1. Bailout to baseline to reconstruct a baseline frame.
+ // 2. Resume immediately into the exception tail afterwards, and
+ // handle the exception again with the top frame now a baseline
+ // frame.
+ //
+ // An empty exception info denotes that we're propagating an Ion
+ // exception due to debug mode, which BailoutIonToBaseline needs to
+ // know. This is because we might not be able to fully reconstruct up
+ // to the stack depth at the snapshot, as we could've thrown in the
+ // middle of a call.
+ ExceptionBailoutInfo propagateInfo;
+ uint32_t retval = ExceptionHandlerBailout(cx, frame, rfe, propagateInfo, overrecursed);
+ if (retval == BAILOUT_RETURN_OK)
+ return;
+ }
+
+ MOZ_ASSERT_IF(rematFrame, !Debugger::inFrameMaps(rematFrame));
+ }
+
+ RootedScript script(cx, frame.script());
+ if (!script->hasTrynotes())
+ return;
+
+ for (TryNoteIterIon tni(cx, frame); !tni.done(); ++tni) {
+ JSTryNote* tn = *tni;
+
+ switch (tn->kind) {
+ case JSTRY_FOR_IN: {
+ MOZ_ASSERT(JSOp(*(script->main() + tn->start + tn->length)) == JSOP_ENDITER);
+ MOZ_ASSERT(tn->stackDepth > 0);
+
+ uint32_t localSlot = tn->stackDepth;
+ CloseLiveIteratorIon(cx, frame, localSlot);
+ break;
+ }
+
+ case JSTRY_FOR_OF:
+ case JSTRY_LOOP:
+ break;
+
+ case JSTRY_CATCH:
+ if (cx->isExceptionPending()) {
+ // Ion can compile try-catch, but bailing out to catch
+ // exceptions is slow. Reset the warm-up counter so that if we
+ // catch many exceptions we won't Ion-compile the script.
+ script->resetWarmUpCounter();
+
+ // Bailout at the start of the catch block.
+ jsbytecode* catchPC = script->main() + tn->start + tn->length;
+ ExceptionBailoutInfo excInfo(frame.frameNo(), catchPC, tn->stackDepth);
+ uint32_t retval = ExceptionHandlerBailout(cx, frame, rfe, excInfo, overrecursed);
+ if (retval == BAILOUT_RETURN_OK)
+ return;
+
+ // Error on bailout clears pending exception.
+ MOZ_ASSERT(!cx->isExceptionPending());
+ }
+ break;
+
+ default:
+ MOZ_CRASH("Unexpected try note");
+ }
+ }
+}
+
+static void
+OnLeaveBaselineFrame(JSContext* cx, const JitFrameIterator& frame, jsbytecode* pc,
+ ResumeFromException* rfe, bool frameOk)
+{
+ BaselineFrame* baselineFrame = frame.baselineFrame();
+ if (jit::DebugEpilogue(cx, baselineFrame, pc, frameOk)) {
+ rfe->kind = ResumeFromException::RESUME_FORCED_RETURN;
+ rfe->framePointer = frame.fp() - BaselineFrame::FramePointerOffset;
+ rfe->stackPointer = reinterpret_cast<uint8_t*>(baselineFrame);
+ }
+}
+
+static inline void
+ForcedReturn(JSContext* cx, const JitFrameIterator& frame, jsbytecode* pc,
+ ResumeFromException* rfe)
+{
+ OnLeaveBaselineFrame(cx, frame, pc, rfe, true);
+}
+
+static inline void
+BaselineFrameAndStackPointersFromTryNote(JSTryNote* tn, const JitFrameIterator& frame,
+ uint8_t** framePointer, uint8_t** stackPointer)
+{
+ JSScript* script = frame.baselineFrame()->script();
+ *framePointer = frame.fp() - BaselineFrame::FramePointerOffset;
+ *stackPointer = *framePointer - BaselineFrame::Size() -
+ (script->nfixed() + tn->stackDepth) * sizeof(Value);
+}
+
+static void
+SettleOnTryNote(JSContext* cx, JSTryNote* tn, const JitFrameIterator& frame,
+ EnvironmentIter& ei, ResumeFromException* rfe, jsbytecode** pc)
+{
+ RootedScript script(cx, frame.baselineFrame()->script());
+
+ // Unwind environment chain (pop block objects).
+ if (cx->isExceptionPending())
+ UnwindEnvironment(cx, ei, UnwindEnvironmentToTryPc(script, tn));
+
+ // Compute base pointer and stack pointer.
+ BaselineFrameAndStackPointersFromTryNote(tn, frame, &rfe->framePointer, &rfe->stackPointer);
+
+ // Compute the pc.
+ *pc = script->main() + tn->start + tn->length;
+}
+
+struct AutoBaselineHandlingException
+{
+ BaselineFrame* frame;
+ AutoBaselineHandlingException(BaselineFrame* frame, jsbytecode* pc)
+ : frame(frame)
+ {
+ frame->setIsHandlingException();
+ frame->setOverridePc(pc);
+ }
+ ~AutoBaselineHandlingException() {
+ frame->unsetIsHandlingException();
+ frame->clearOverridePc();
+ }
+};
+
+class BaselineFrameStackDepthOp
+{
+ BaselineFrame* frame_;
+ public:
+ explicit BaselineFrameStackDepthOp(BaselineFrame* frame)
+ : frame_(frame)
+ { }
+ uint32_t operator()() {
+ MOZ_ASSERT(frame_->numValueSlots() >= frame_->script()->nfixed());
+ return frame_->numValueSlots() - frame_->script()->nfixed();
+ }
+};
+
+class TryNoteIterBaseline : public TryNoteIter<BaselineFrameStackDepthOp>
+{
+ public:
+ TryNoteIterBaseline(JSContext* cx, BaselineFrame* frame, jsbytecode* pc)
+ : TryNoteIter(cx, frame->script(), pc, BaselineFrameStackDepthOp(frame))
+ { }
+};
+
+// Close all live iterators on a BaselineFrame due to exception unwinding. The
+// pc parameter is updated to where the envs have been unwound to.
+static void
+CloseLiveIteratorsBaselineForUncatchableException(JSContext* cx, const JitFrameIterator& frame,
+ jsbytecode* pc)
+{
+ for (TryNoteIterBaseline tni(cx, frame.baselineFrame(), pc); !tni.done(); ++tni) {
+ JSTryNote* tn = *tni;
+
+ if (tn->kind == JSTRY_FOR_IN) {
+ uint8_t* framePointer;
+ uint8_t* stackPointer;
+ BaselineFrameAndStackPointersFromTryNote(tn, frame, &framePointer, &stackPointer);
+ Value iterValue(*(Value*) stackPointer);
+ RootedObject iterObject(cx, &iterValue.toObject());
+ UnwindIteratorForUncatchableException(cx, iterObject);
+ }
+ }
+}
+
+static bool
+ProcessTryNotesBaseline(JSContext* cx, const JitFrameIterator& frame, EnvironmentIter& ei,
+ ResumeFromException* rfe, jsbytecode** pc)
+{
+ RootedScript script(cx, frame.baselineFrame()->script());
+
+ for (TryNoteIterBaseline tni(cx, frame.baselineFrame(), *pc); !tni.done(); ++tni) {
+ JSTryNote* tn = *tni;
+
+ MOZ_ASSERT(cx->isExceptionPending());
+ switch (tn->kind) {
+ case JSTRY_CATCH: {
+ // If we're closing a legacy generator, we have to skip catch
+ // blocks.
+ if (cx->isClosingGenerator())
+ continue;
+
+ SettleOnTryNote(cx, tn, frame, ei, rfe, pc);
+
+ // Ion can compile try-catch, but bailing out to catch
+ // exceptions is slow. Reset the warm-up counter so that if we
+ // catch many exceptions we won't Ion-compile the script.
+ script->resetWarmUpCounter();
+
+ // Resume at the start of the catch block.
+ rfe->kind = ResumeFromException::RESUME_CATCH;
+ rfe->target = script->baselineScript()->nativeCodeForPC(script, *pc);
+ return true;
+ }
+
+ case JSTRY_FINALLY: {
+ SettleOnTryNote(cx, tn, frame, ei, rfe, pc);
+ rfe->kind = ResumeFromException::RESUME_FINALLY;
+ rfe->target = script->baselineScript()->nativeCodeForPC(script, *pc);
+ // Drop the exception instead of leaking cross compartment data.
+ if (!cx->getPendingException(MutableHandleValue::fromMarkedLocation(&rfe->exception)))
+ rfe->exception = UndefinedValue();
+ cx->clearPendingException();
+ return true;
+ }
+
+ case JSTRY_FOR_IN: {
+ uint8_t* framePointer;
+ uint8_t* stackPointer;
+ BaselineFrameAndStackPointersFromTryNote(tn, frame, &framePointer, &stackPointer);
+ Value iterValue(*(Value*) stackPointer);
+ RootedObject iterObject(cx, &iterValue.toObject());
+ if (!UnwindIteratorForException(cx, iterObject)) {
+ // See comment in the JSTRY_FOR_IN case in Interpreter.cpp's
+ // ProcessTryNotes.
+ SettleOnTryNote(cx, tn, frame, ei, rfe, pc);
+ MOZ_ASSERT(**pc == JSOP_ENDITER);
+ return false;
+ }
+ break;
+ }
+
+ case JSTRY_FOR_OF:
+ case JSTRY_LOOP:
+ break;
+
+ default:
+ MOZ_CRASH("Invalid try note");
+ }
+ }
+ return true;
+}
+
+static void
+HandleExceptionBaseline(JSContext* cx, const JitFrameIterator& frame, ResumeFromException* rfe,
+ jsbytecode* pc)
+{
+ MOZ_ASSERT(frame.isBaselineJS());
+
+ bool frameOk = false;
+ RootedScript script(cx, frame.baselineFrame()->script());
+
+ if (script->hasScriptCounts()) {
+ PCCounts* counts = script->getThrowCounts(pc);
+ // If we failed to allocate, then skip the increment and continue to
+ // handle the exception.
+ if (counts)
+ counts->numExec()++;
+ }
+
+ // We may be propagating a forced return from the interrupt
+ // callback, which cannot easily force a return.
+ if (cx->isPropagatingForcedReturn()) {
+ cx->clearPropagatingForcedReturn();
+ ForcedReturn(cx, frame, pc, rfe);
+ return;
+ }
+
+ again:
+ if (cx->isExceptionPending()) {
+ if (!cx->isClosingGenerator()) {
+ switch (Debugger::onExceptionUnwind(cx, frame.baselineFrame())) {
+ case JSTRAP_ERROR:
+ // Uncatchable exception.
+ MOZ_ASSERT(!cx->isExceptionPending());
+ goto again;
+
+ case JSTRAP_CONTINUE:
+ case JSTRAP_THROW:
+ MOZ_ASSERT(cx->isExceptionPending());
+ break;
+
+ case JSTRAP_RETURN:
+ if (script->hasTrynotes())
+ CloseLiveIteratorsBaselineForUncatchableException(cx, frame, pc);
+ ForcedReturn(cx, frame, pc, rfe);
+ return;
+
+ default:
+ MOZ_CRASH("Invalid trap status");
+ }
+ }
+
+ if (script->hasTrynotes()) {
+ EnvironmentIter ei(cx, frame.baselineFrame(), pc);
+ if (!ProcessTryNotesBaseline(cx, frame, ei, rfe, &pc))
+ goto again;
+ if (rfe->kind != ResumeFromException::RESUME_ENTRY_FRAME) {
+ // No need to increment the PCCounts number of execution here,
+ // as the interpreter increments any PCCounts if present.
+ MOZ_ASSERT_IF(script->hasScriptCounts(), script->maybeGetPCCounts(pc));
+ return;
+ }
+ }
+
+ frameOk = HandleClosingGeneratorReturn(cx, frame.baselineFrame(), frameOk);
+ frameOk = Debugger::onLeaveFrame(cx, frame.baselineFrame(), pc, frameOk);
+ } else if (script->hasTrynotes()) {
+ CloseLiveIteratorsBaselineForUncatchableException(cx, frame, pc);
+ }
+
+ OnLeaveBaselineFrame(cx, frame, pc, rfe, frameOk);
+}
+
+struct AutoDeleteDebugModeOSRInfo
+{
+ BaselineFrame* frame;
+ explicit AutoDeleteDebugModeOSRInfo(BaselineFrame* frame) : frame(frame) { MOZ_ASSERT(frame); }
+ ~AutoDeleteDebugModeOSRInfo() { frame->deleteDebugModeOSRInfo(); }
+};
+
+struct AutoResetLastProfilerFrameOnReturnFromException
+{
+ JSContext* cx;
+ ResumeFromException* rfe;
+
+ AutoResetLastProfilerFrameOnReturnFromException(JSContext* cx, ResumeFromException* rfe)
+ : cx(cx), rfe(rfe) {}
+
+ ~AutoResetLastProfilerFrameOnReturnFromException() {
+ if (!cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
+ return;
+
+ MOZ_ASSERT(cx->runtime()->jitActivation == cx->runtime()->profilingActivation());
+
+ void* lastProfilingFrame = getLastProfilingFrame();
+ cx->runtime()->jitActivation->setLastProfilingFrame(lastProfilingFrame);
+ }
+
+ void* getLastProfilingFrame() {
+ switch (rfe->kind) {
+ case ResumeFromException::RESUME_ENTRY_FRAME:
+ return nullptr;
+
+ // The following all return into baseline frames.
+ case ResumeFromException::RESUME_CATCH:
+ case ResumeFromException::RESUME_FINALLY:
+ case ResumeFromException::RESUME_FORCED_RETURN:
+ return rfe->framePointer + BaselineFrame::FramePointerOffset;
+
+ // When resuming into a bailed-out ion frame, use the bailout info to
+ // find the frame we are resuming into.
+ case ResumeFromException::RESUME_BAILOUT:
+ return rfe->bailoutInfo->incomingStack;
+ }
+
+ MOZ_CRASH("Invalid ResumeFromException type!");
+ return nullptr;
+ }
+};
+
+void
+HandleException(ResumeFromException* rfe)
+{
+ JSContext* cx = GetJSContextFromMainThread();
+ TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
+
+ AutoResetLastProfilerFrameOnReturnFromException profFrameReset(cx, rfe);
+
+ rfe->kind = ResumeFromException::RESUME_ENTRY_FRAME;
+
+ JitSpew(JitSpew_IonInvalidate, "handling exception");
+
+ // Clear any Ion return override that's been set.
+ // This may happen if a callVM function causes an invalidation (setting the
+ // override), and then fails, bypassing the bailout handlers that would
+ // otherwise clear the return override.
+ if (cx->runtime()->jitRuntime()->hasIonReturnOverride())
+ cx->runtime()->jitRuntime()->takeIonReturnOverride();
+
+ JitActivation* activation = cx->runtime()->activation()->asJit();
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ if (JitOptions.checkOsiPointRegisters)
+ activation->setCheckRegs(false);
+#endif
+
+ // The Debugger onExceptionUnwind hook (reachable via
+ // HandleExceptionBaseline below) may cause on-stack recompilation of
+ // baseline scripts, which may patch return addresses on the stack. Since
+ // JitFrameIterators cache the previous frame's return address when
+ // iterating, we need a variant here that is automatically updated should
+ // on-stack recompilation occur.
+ DebugModeOSRVolatileJitFrameIterator iter(cx);
+ while (!iter.isEntry()) {
+ bool overrecursed = false;
+ if (iter.isIonJS()) {
+ // Search each inlined frame for live iterator objects, and close
+ // them.
+ InlineFrameIterator frames(cx, &iter);
+
+ // Invalidation state will be the same for all inlined scripts in the frame.
+ IonScript* ionScript = nullptr;
+ bool invalidated = iter.checkInvalidation(&ionScript);
+
+#ifdef JS_TRACE_LOGGING
+ if (logger && cx->compartment()->isDebuggee() && logger->enabled()) {
+ logger->disable(/* force = */ true,
+ "Forcefully disabled tracelogger, due to "
+ "throwing an exception with an active Debugger "
+ "in IonMonkey.");
+ }
+#endif
+
+ for (;;) {
+ HandleExceptionIon(cx, frames, rfe, &overrecursed);
+
+ if (rfe->kind == ResumeFromException::RESUME_BAILOUT) {
+ if (invalidated)
+ ionScript->decrementInvalidationCount(cx->runtime()->defaultFreeOp());
+ return;
+ }
+
+ MOZ_ASSERT(rfe->kind == ResumeFromException::RESUME_ENTRY_FRAME);
+
+ // When profiling, each frame popped needs a notification that
+ // the function has exited, so invoke the probe that a function
+ // is exiting.
+
+ JSScript* script = frames.script();
+ probes::ExitScript(cx, script, script->functionNonDelazifying(),
+ /* popSPSFrame = */ false);
+ if (!frames.more()) {
+ TraceLogStopEvent(logger, TraceLogger_IonMonkey);
+ TraceLogStopEvent(logger, TraceLogger_Scripts);
+ break;
+ }
+ ++frames;
+ }
+
+ activation->removeIonFrameRecovery(iter.jsFrame());
+ if (invalidated)
+ ionScript->decrementInvalidationCount(cx->runtime()->defaultFreeOp());
+
+ } else if (iter.isBaselineJS()) {
+ // Set a flag on the frame to signal to DebugModeOSR that we're
+ // handling an exception. Also ensure the frame has an override
+ // pc. We clear the frame's override pc when we leave this block,
+ // this is fine because we're either:
+ //
+ // (1) Going to enter a catch or finally block. We don't want to
+ // keep the old pc when we're executing JIT code.
+ // (2) Going to pop the frame, either here or a forced return.
+ // In this case nothing will observe the frame's pc.
+ // (3) Performing an exception bailout. In this case
+ // FinishBailoutToBaseline will set the pc to the resume pc
+ // and clear it before it returns to JIT code.
+ jsbytecode* pc;
+ iter.baselineScriptAndPc(nullptr, &pc);
+ AutoBaselineHandlingException handlingException(iter.baselineFrame(), pc);
+
+ HandleExceptionBaseline(cx, iter, rfe, pc);
+
+ // If we are propagating an exception through a frame with
+ // on-stack recompile info, we should free the allocated
+ // RecompileInfo struct before we leave this block, as we will not
+ // be returning to the recompile handler.
+ AutoDeleteDebugModeOSRInfo deleteDebugModeOSRInfo(iter.baselineFrame());
+
+ if (rfe->kind != ResumeFromException::RESUME_ENTRY_FRAME &&
+ rfe->kind != ResumeFromException::RESUME_FORCED_RETURN)
+ {
+ return;
+ }
+
+ TraceLogStopEvent(logger, TraceLogger_Baseline);
+ TraceLogStopEvent(logger, TraceLogger_Scripts);
+
+ // Unwind profiler pseudo-stack
+ JSScript* script = iter.script();
+ probes::ExitScript(cx, script, script->functionNonDelazifying(),
+ /* popSPSFrame = */ false);
+
+ if (rfe->kind == ResumeFromException::RESUME_FORCED_RETURN)
+ return;
+ }
+
+ JitFrameLayout* current = iter.isScripted() ? iter.jsFrame() : nullptr;
+
+ ++iter;
+
+ if (current) {
+ // Unwind the frame by updating jitTop. This is necessary so that
+ // (1) debugger exception unwind and leave frame hooks don't see this
+ // frame when they use ScriptFrameIter, and (2) ScriptFrameIter does
+ // not crash when accessing an IonScript that's destroyed by the
+ // ionScript->decref call.
+ EnsureBareExitFrame(cx, current);
+ }
+
+ if (overrecursed) {
+ // We hit an overrecursion error during bailout. Report it now.
+ ReportOverRecursed(cx);
+ }
+ }
+
+ rfe->stackPointer = iter.fp();
+}
+
+// Turns a JitFrameLayout into an ExitFrameLayout. Note that it has to be a
+// bare exit frame so it's ignored by MarkJitExitFrame.
+void
+EnsureBareExitFrame(JSContext* cx, JitFrameLayout* frame)
+{
+ ExitFrameLayout* exitFrame = reinterpret_cast<ExitFrameLayout*>(frame);
+
+ if (cx->runtime()->jitTop == (uint8_t*)frame) {
+ // If we already called this function for the current frame, do
+ // nothing.
+ MOZ_ASSERT(exitFrame->isBareExit());
+ return;
+ }
+
+#ifdef DEBUG
+ JitFrameIterator iter(cx);
+ while (!iter.isScripted())
+ ++iter;
+ MOZ_ASSERT(iter.current() == frame, "|frame| must be the top JS frame");
+
+ MOZ_ASSERT((uint8_t*)exitFrame->footer() >= cx->runtime()->jitTop,
+ "Must have space for ExitFooterFrame before jitTop");
+#endif
+
+ cx->runtime()->jitTop = (uint8_t*)frame;
+ *exitFrame->footer()->addressOfJitCode() = ExitFrameLayout::BareToken();
+ MOZ_ASSERT(exitFrame->isBareExit());
+}
+
+CalleeToken
+MarkCalleeToken(JSTracer* trc, CalleeToken token)
+{
+ switch (CalleeTokenTag tag = GetCalleeTokenTag(token)) {
+ case CalleeToken_Function:
+ case CalleeToken_FunctionConstructing:
+ {
+ JSFunction* fun = CalleeTokenToFunction(token);
+ TraceRoot(trc, &fun, "jit-callee");
+ return CalleeToToken(fun, tag == CalleeToken_FunctionConstructing);
+ }
+ case CalleeToken_Script:
+ {
+ JSScript* script = CalleeTokenToScript(token);
+ TraceRoot(trc, &script, "jit-script");
+ return CalleeToToken(script);
+ }
+ default:
+ MOZ_CRASH("unknown callee token type");
+ }
+}
+
+uintptr_t*
+JitFrameLayout::slotRef(SafepointSlotEntry where)
+{
+ if (where.stack)
+ return (uintptr_t*)((uint8_t*)this - where.slot);
+ return (uintptr_t*)((uint8_t*)argv() + where.slot);
+}
+
+#ifdef JS_NUNBOX32
+static inline uintptr_t
+ReadAllocation(const JitFrameIterator& frame, const LAllocation* a)
+{
+ if (a->isGeneralReg()) {
+ Register reg = a->toGeneralReg()->reg();
+ return frame.machineState().read(reg);
+ }
+ return *frame.jsFrame()->slotRef(SafepointSlotEntry(a));
+}
+#endif
+
+static void
+MarkThisAndArguments(JSTracer* trc, const JitFrameIterator& frame)
+{
+ // Mark |this| and any extra actual arguments for an Ion frame. Marking of
+ // formal arguments is taken care of by the frame's safepoint/snapshot,
+ // except when the script might have lazy arguments or rest, in which case
+ // we mark them as well. We also have to mark formals if we have a LazyLink
+ // frame.
+
+ JitFrameLayout* layout = frame.isExitFrameLayout<LazyLinkExitFrameLayout>()
+ ? frame.exitFrame()->as<LazyLinkExitFrameLayout>()->jsFrame()
+ : frame.jsFrame();
+
+ if (!CalleeTokenIsFunction(layout->calleeToken()))
+ return;
+
+ size_t nargs = layout->numActualArgs();
+ size_t nformals = 0;
+
+ JSFunction* fun = CalleeTokenToFunction(layout->calleeToken());
+ if (!frame.isExitFrameLayout<LazyLinkExitFrameLayout>() &&
+ !fun->nonLazyScript()->mayReadFrameArgsDirectly())
+ {
+ nformals = fun->nargs();
+ }
+
+ size_t newTargetOffset = Max(nargs, fun->nargs());
+
+ Value* argv = layout->argv();
+
+ // Trace |this|.
+ TraceRoot(trc, argv, "ion-thisv");
+
+ // Trace actual arguments beyond the formals. Note + 1 for thisv.
+ for (size_t i = nformals + 1; i < nargs + 1; i++)
+ TraceRoot(trc, &argv[i], "ion-argv");
+
+ // Always mark the new.target from the frame. It's not in the snapshots.
+ // +1 to pass |this|
+ if (CalleeTokenIsConstructing(layout->calleeToken()))
+ TraceRoot(trc, &argv[1 + newTargetOffset], "ion-newTarget");
+}
+
+#ifdef JS_NUNBOX32
+static inline void
+WriteAllocation(const JitFrameIterator& frame, const LAllocation* a, uintptr_t value)
+{
+ if (a->isGeneralReg()) {
+ Register reg = a->toGeneralReg()->reg();
+ frame.machineState().write(reg, value);
+ } else {
+ *frame.jsFrame()->slotRef(SafepointSlotEntry(a)) = value;
+ }
+}
+#endif
+
+static void
+MarkIonJSFrame(JSTracer* trc, const JitFrameIterator& frame)
+{
+ JitFrameLayout* layout = (JitFrameLayout*)frame.fp();
+
+ layout->replaceCalleeToken(MarkCalleeToken(trc, layout->calleeToken()));
+
+ IonScript* ionScript = nullptr;
+ if (frame.checkInvalidation(&ionScript)) {
+ // This frame has been invalidated, meaning that its IonScript is no
+ // longer reachable through the callee token (JSFunction/JSScript->ion
+ // is now nullptr or recompiled). Manually trace it here.
+ IonScript::Trace(trc, ionScript);
+ } else {
+ ionScript = frame.ionScriptFromCalleeToken();
+ }
+
+ MarkThisAndArguments(trc, frame);
+
+ const SafepointIndex* si = ionScript->getSafepointIndex(frame.returnAddressToFp());
+
+ SafepointReader safepoint(ionScript, si);
+
+ // Scan through slots which contain pointers (or on punboxing systems,
+ // actual values).
+ SafepointSlotEntry entry;
+
+ while (safepoint.getGcSlot(&entry)) {
+ uintptr_t* ref = layout->slotRef(entry);
+ TraceGenericPointerRoot(trc, reinterpret_cast<gc::Cell**>(ref), "ion-gc-slot");
+ }
+
+ while (safepoint.getValueSlot(&entry)) {
+ Value* v = (Value*)layout->slotRef(entry);
+ TraceRoot(trc, v, "ion-gc-slot");
+ }
+
+ uintptr_t* spill = frame.spillBase();
+ LiveGeneralRegisterSet gcRegs = safepoint.gcSpills();
+ LiveGeneralRegisterSet valueRegs = safepoint.valueSpills();
+ for (GeneralRegisterBackwardIterator iter(safepoint.allGprSpills()); iter.more(); ++iter) {
+ --spill;
+ if (gcRegs.has(*iter))
+ TraceGenericPointerRoot(trc, reinterpret_cast<gc::Cell**>(spill), "ion-gc-spill");
+ else if (valueRegs.has(*iter))
+ TraceRoot(trc, reinterpret_cast<Value*>(spill), "ion-value-spill");
+ }
+
+#ifdef JS_NUNBOX32
+ LAllocation type, payload;
+ while (safepoint.getNunboxSlot(&type, &payload)) {
+ JSValueTag tag = JSValueTag(ReadAllocation(frame, &type));
+ uintptr_t rawPayload = ReadAllocation(frame, &payload);
+
+ Value v = Value::fromTagAndPayload(tag, rawPayload);
+ TraceRoot(trc, &v, "ion-torn-value");
+
+ if (v != Value::fromTagAndPayload(tag, rawPayload)) {
+ // GC moved the value, replace the stored payload.
+ rawPayload = *v.payloadUIntPtr();
+ WriteAllocation(frame, &payload, rawPayload);
+ }
+ }
+#endif
+}
+
+static void
+MarkBailoutFrame(JSTracer* trc, const JitFrameIterator& frame)
+{
+ JitFrameLayout* layout = (JitFrameLayout*)frame.fp();
+
+ layout->replaceCalleeToken(MarkCalleeToken(trc, layout->calleeToken()));
+
+ // We have to mark the list of actual arguments, as only formal arguments
+ // are represented in the Snapshot.
+ MarkThisAndArguments(trc, frame);
+
+ // Under a bailout, do not have a Safepoint to only iterate over GC-things.
+ // Thus we use a SnapshotIterator to trace all the locations which would be
+ // used to reconstruct the Baseline frame.
+ //
+ // Note that at the time where this function is called, we have not yet
+ // started to reconstruct baseline frames.
+
+ // The vector of recover instructions is already traced as part of the
+ // JitActivation.
+ SnapshotIterator snapIter(frame, frame.activation()->bailoutData()->machineState());
+
+ // For each instruction, we read the allocations without evaluating the
+ // recover instruction, nor reconstructing the frame. We are only looking at
+ // tracing readable allocations.
+ while (true) {
+ while (snapIter.moreAllocations())
+ snapIter.traceAllocation(trc);
+
+ if (!snapIter.moreInstructions())
+ break;
+ snapIter.nextInstruction();
+ }
+
+}
+
+void
+UpdateIonJSFrameForMinorGC(JSTracer* trc, const JitFrameIterator& frame)
+{
+ // Minor GCs may move slots/elements allocated in the nursery. Update
+ // any slots/elements pointers stored in this frame.
+
+ JitFrameLayout* layout = (JitFrameLayout*)frame.fp();
+
+ IonScript* ionScript = nullptr;
+ if (frame.checkInvalidation(&ionScript)) {
+ // This frame has been invalidated, meaning that its IonScript is no
+ // longer reachable through the callee token (JSFunction/JSScript->ion
+ // is now nullptr or recompiled).
+ } else {
+ ionScript = frame.ionScriptFromCalleeToken();
+ }
+
+ Nursery& nursery = trc->runtime()->gc.nursery;
+
+ const SafepointIndex* si = ionScript->getSafepointIndex(frame.returnAddressToFp());
+ SafepointReader safepoint(ionScript, si);
+
+ LiveGeneralRegisterSet slotsRegs = safepoint.slotsOrElementsSpills();
+ uintptr_t* spill = frame.spillBase();
+ for (GeneralRegisterBackwardIterator iter(safepoint.allGprSpills()); iter.more(); ++iter) {
+ --spill;
+ if (slotsRegs.has(*iter))
+ nursery.forwardBufferPointer(reinterpret_cast<HeapSlot**>(spill));
+ }
+
+ // Skip to the right place in the safepoint
+ SafepointSlotEntry entry;
+ while (safepoint.getGcSlot(&entry));
+ while (safepoint.getValueSlot(&entry));
+#ifdef JS_NUNBOX32
+ LAllocation type, payload;
+ while (safepoint.getNunboxSlot(&type, &payload));
+#endif
+
+ while (safepoint.getSlotsOrElementsSlot(&entry)) {
+ HeapSlot** slots = reinterpret_cast<HeapSlot**>(layout->slotRef(entry));
+ nursery.forwardBufferPointer(slots);
+ }
+}
+
+static void
+MarkJitStubFrame(JSTracer* trc, const JitFrameIterator& frame)
+{
+ // Mark the ICStub pointer stored in the stub frame. This is necessary
+ // so that we don't destroy the stub code after unlinking the stub.
+
+ MOZ_ASSERT(frame.type() == JitFrame_IonStub || frame.type() == JitFrame_BaselineStub);
+ JitStubFrameLayout* layout = (JitStubFrameLayout*)frame.fp();
+
+ if (ICStub* stub = layout->maybeStubPtr()) {
+ MOZ_ASSERT(ICStub::CanMakeCalls(stub->kind()));
+ stub->trace(trc);
+ }
+}
+
+static void
+MarkIonAccessorICFrame(JSTracer* trc, const JitFrameIterator& frame)
+{
+ MOZ_ASSERT(frame.type() == JitFrame_IonAccessorIC);
+ IonAccessorICFrameLayout* layout = (IonAccessorICFrameLayout*)frame.fp();
+ TraceRoot(trc, layout->stubCode(), "ion-ic-accessor-code");
+}
+
+#ifdef JS_CODEGEN_MIPS32
+uint8_t*
+alignDoubleSpillWithOffset(uint8_t* pointer, int32_t offset)
+{
+ uint32_t address = reinterpret_cast<uint32_t>(pointer);
+ address = (address - offset) & ~(ABIStackAlignment - 1);
+ return reinterpret_cast<uint8_t*>(address);
+}
+
+static void
+MarkJitExitFrameCopiedArguments(JSTracer* trc, const VMFunction* f, ExitFooterFrame* footer)
+{
+ uint8_t* doubleArgs = reinterpret_cast<uint8_t*>(footer);
+ doubleArgs = alignDoubleSpillWithOffset(doubleArgs, sizeof(intptr_t));
+ if (f->outParam == Type_Handle)
+ doubleArgs -= sizeof(Value);
+ doubleArgs -= f->doubleByRefArgs() * sizeof(double);
+
+ for (uint32_t explicitArg = 0; explicitArg < f->explicitArgs; explicitArg++) {
+ if (f->argProperties(explicitArg) == VMFunction::DoubleByRef) {
+ // Arguments with double size can only have RootValue type.
+ if (f->argRootType(explicitArg) == VMFunction::RootValue)
+ TraceRoot(trc, reinterpret_cast<Value*>(doubleArgs), "ion-vm-args");
+ else
+ MOZ_ASSERT(f->argRootType(explicitArg) == VMFunction::RootNone);
+ doubleArgs += sizeof(double);
+ }
+ }
+}
+#else
+static void
+MarkJitExitFrameCopiedArguments(JSTracer* trc, const VMFunction* f, ExitFooterFrame* footer)
+{
+ // This is NO-OP on other platforms.
+}
+#endif
+
+static void
+MarkJitExitFrame(JSTracer* trc, const JitFrameIterator& frame)
+{
+ ExitFooterFrame* footer = frame.exitFrame()->footer();
+
+ // Mark the code of the code handling the exit path. This is needed because
+ // invalidated script are no longer marked because data are erased by the
+ // invalidation and relocation data are no longer reliable. So the VM
+ // wrapper or the invalidation code may be GC if no JitCode keep reference
+ // on them.
+ MOZ_ASSERT(uintptr_t(footer->jitCode()) != uintptr_t(-1));
+
+ // This corresponds to the case where we have build a fake exit frame which
+ // handles the case of a native function call. We need to mark the argument
+ // vector of the function call, and also new.target if it was a constructing
+ // call.
+ if (frame.isExitFrameLayout<NativeExitFrameLayout>()) {
+ NativeExitFrameLayout* native = frame.exitFrame()->as<NativeExitFrameLayout>();
+ size_t len = native->argc() + 2;
+ Value* vp = native->vp();
+ TraceRootRange(trc, len, vp, "ion-native-args");
+ if (frame.isExitFrameLayout<ConstructNativeExitFrameLayout>())
+ TraceRoot(trc, vp + len, "ion-native-new-target");
+ return;
+ }
+
+ if (frame.isExitFrameLayout<IonOOLNativeExitFrameLayout>()) {
+ IonOOLNativeExitFrameLayout* oolnative =
+ frame.exitFrame()->as<IonOOLNativeExitFrameLayout>();
+ TraceRoot(trc, oolnative->stubCode(), "ion-ool-native-code");
+ TraceRoot(trc, oolnative->vp(), "iol-ool-native-vp");
+ size_t len = oolnative->argc() + 1;
+ TraceRootRange(trc, len, oolnative->thisp(), "ion-ool-native-thisargs");
+ return;
+ }
+
+ if (frame.isExitFrameLayout<IonOOLPropertyOpExitFrameLayout>() ||
+ frame.isExitFrameLayout<IonOOLSetterOpExitFrameLayout>())
+ {
+ // A SetterOp frame is a different size, but that's the only relevant
+ // difference between the two. The fields that need marking are all in
+ // the common base class.
+ IonOOLPropertyOpExitFrameLayout* oolgetter =
+ frame.isExitFrameLayout<IonOOLPropertyOpExitFrameLayout>()
+ ? frame.exitFrame()->as<IonOOLPropertyOpExitFrameLayout>()
+ : frame.exitFrame()->as<IonOOLSetterOpExitFrameLayout>();
+ TraceRoot(trc, oolgetter->stubCode(), "ion-ool-property-op-code");
+ TraceRoot(trc, oolgetter->vp(), "ion-ool-property-op-vp");
+ TraceRoot(trc, oolgetter->id(), "ion-ool-property-op-id");
+ TraceRoot(trc, oolgetter->obj(), "ion-ool-property-op-obj");
+ return;
+ }
+
+ if (frame.isExitFrameLayout<IonOOLProxyExitFrameLayout>()) {
+ IonOOLProxyExitFrameLayout* oolproxy = frame.exitFrame()->as<IonOOLProxyExitFrameLayout>();
+ TraceRoot(trc, oolproxy->stubCode(), "ion-ool-proxy-code");
+ TraceRoot(trc, oolproxy->vp(), "ion-ool-proxy-vp");
+ TraceRoot(trc, oolproxy->id(), "ion-ool-proxy-id");
+ TraceRoot(trc, oolproxy->proxy(), "ion-ool-proxy-proxy");
+ return;
+ }
+
+ if (frame.isExitFrameLayout<IonDOMExitFrameLayout>()) {
+ IonDOMExitFrameLayout* dom = frame.exitFrame()->as<IonDOMExitFrameLayout>();
+ TraceRoot(trc, dom->thisObjAddress(), "ion-dom-args");
+ if (dom->isMethodFrame()) {
+ IonDOMMethodExitFrameLayout* method =
+ reinterpret_cast<IonDOMMethodExitFrameLayout*>(dom);
+ size_t len = method->argc() + 2;
+ Value* vp = method->vp();
+ TraceRootRange(trc, len, vp, "ion-dom-args");
+ } else {
+ TraceRoot(trc, dom->vp(), "ion-dom-args");
+ }
+ return;
+ }
+
+ if (frame.isExitFrameLayout<LazyLinkExitFrameLayout>()) {
+ LazyLinkExitFrameLayout* ll = frame.exitFrame()->as<LazyLinkExitFrameLayout>();
+ JitFrameLayout* layout = ll->jsFrame();
+
+ TraceRoot(trc, ll->stubCode(), "lazy-link-code");
+ layout->replaceCalleeToken(MarkCalleeToken(trc, layout->calleeToken()));
+ MarkThisAndArguments(trc, frame);
+ return;
+ }
+
+ if (frame.isBareExit()) {
+ // Nothing to mark. Fake exit frame pushed for VM functions with
+ // nothing to mark on the stack.
+ return;
+ }
+
+ TraceRoot(trc, footer->addressOfJitCode(), "ion-exit-code");
+
+ const VMFunction* f = footer->function();
+ if (f == nullptr)
+ return;
+
+ // Mark arguments of the VM wrapper.
+ uint8_t* argBase = frame.exitFrame()->argBase();
+ for (uint32_t explicitArg = 0; explicitArg < f->explicitArgs; explicitArg++) {
+ switch (f->argRootType(explicitArg)) {
+ case VMFunction::RootNone:
+ break;
+ case VMFunction::RootObject: {
+ // Sometimes we can bake in HandleObjects to nullptr.
+ JSObject** pobj = reinterpret_cast<JSObject**>(argBase);
+ if (*pobj)
+ TraceRoot(trc, pobj, "ion-vm-args");
+ break;
+ }
+ case VMFunction::RootString:
+ case VMFunction::RootPropertyName:
+ TraceRoot(trc, reinterpret_cast<JSString**>(argBase), "ion-vm-args");
+ break;
+ case VMFunction::RootFunction:
+ TraceRoot(trc, reinterpret_cast<JSFunction**>(argBase), "ion-vm-args");
+ break;
+ case VMFunction::RootValue:
+ TraceRoot(trc, reinterpret_cast<Value*>(argBase), "ion-vm-args");
+ break;
+ case VMFunction::RootCell:
+ TraceGenericPointerRoot(trc, reinterpret_cast<gc::Cell**>(argBase), "ion-vm-args");
+ break;
+ }
+
+ switch (f->argProperties(explicitArg)) {
+ case VMFunction::WordByValue:
+ case VMFunction::WordByRef:
+ argBase += sizeof(void*);
+ break;
+ case VMFunction::DoubleByValue:
+ case VMFunction::DoubleByRef:
+ argBase += 2 * sizeof(void*);
+ break;
+ }
+ }
+
+ if (f->outParam == Type_Handle) {
+ switch (f->outParamRootType) {
+ case VMFunction::RootNone:
+ MOZ_CRASH("Handle outparam must have root type");
+ case VMFunction::RootObject:
+ TraceRoot(trc, footer->outParam<JSObject*>(), "ion-vm-out");
+ break;
+ case VMFunction::RootString:
+ case VMFunction::RootPropertyName:
+ TraceRoot(trc, footer->outParam<JSString*>(), "ion-vm-out");
+ break;
+ case VMFunction::RootFunction:
+ TraceRoot(trc, footer->outParam<JSFunction*>(), "ion-vm-out");
+ break;
+ case VMFunction::RootValue:
+ TraceRoot(trc, footer->outParam<Value>(), "ion-vm-outvp");
+ break;
+ case VMFunction::RootCell:
+ TraceGenericPointerRoot(trc, footer->outParam<gc::Cell*>(), "ion-vm-out");
+ break;
+ }
+ }
+
+ MarkJitExitFrameCopiedArguments(trc, f, footer);
+}
+
+static void
+MarkRectifierFrame(JSTracer* trc, const JitFrameIterator& frame)
+{
+ // Mark thisv.
+ //
+ // Baseline JIT code generated as part of the ICCall_Fallback stub may use
+ // it if we're calling a constructor that returns a primitive value.
+ RectifierFrameLayout* layout = (RectifierFrameLayout*)frame.fp();
+ TraceRoot(trc, &layout->argv()[0], "ion-thisv");
+}
+
+static void
+MarkJitActivation(JSTracer* trc, const JitActivationIterator& activations)
+{
+ JitActivation* activation = activations->asJit();
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ if (JitOptions.checkOsiPointRegisters) {
+ // GC can modify spilled registers, breaking our register checks.
+ // To handle this, we disable these checks for the current VM call
+ // when a GC happens.
+ activation->setCheckRegs(false);
+ }
+#endif
+
+ activation->markRematerializedFrames(trc);
+ activation->markIonRecovery(trc);
+
+ for (JitFrameIterator frames(activations); !frames.done(); ++frames) {
+ switch (frames.type()) {
+ case JitFrame_Exit:
+ MarkJitExitFrame(trc, frames);
+ break;
+ case JitFrame_BaselineJS:
+ frames.baselineFrame()->trace(trc, frames);
+ break;
+ case JitFrame_IonJS:
+ MarkIonJSFrame(trc, frames);
+ break;
+ case JitFrame_BaselineStub:
+ case JitFrame_IonStub:
+ MarkJitStubFrame(trc, frames);
+ break;
+ case JitFrame_Bailout:
+ MarkBailoutFrame(trc, frames);
+ break;
+ case JitFrame_Rectifier:
+ MarkRectifierFrame(trc, frames);
+ break;
+ case JitFrame_IonAccessorIC:
+ MarkIonAccessorICFrame(trc, frames);
+ break;
+ default:
+ MOZ_CRASH("unexpected frame type");
+ }
+ }
+}
+
+void
+MarkJitActivations(JSRuntime* rt, JSTracer* trc)
+{
+ for (JitActivationIterator activations(rt); !activations.done(); ++activations)
+ MarkJitActivation(trc, activations);
+}
+
+JSCompartment*
+TopmostIonActivationCompartment(JSRuntime* rt)
+{
+ for (JitActivationIterator activations(rt); !activations.done(); ++activations) {
+ for (JitFrameIterator frames(activations); !frames.done(); ++frames) {
+ if (frames.type() == JitFrame_IonJS)
+ return activations.activation()->compartment();
+ }
+ }
+ return nullptr;
+}
+
+void UpdateJitActivationsForMinorGC(JSRuntime* rt, JSTracer* trc)
+{
+ MOZ_ASSERT(trc->runtime()->isHeapMinorCollecting());
+ for (JitActivationIterator activations(rt); !activations.done(); ++activations) {
+ for (JitFrameIterator frames(activations); !frames.done(); ++frames) {
+ if (frames.type() == JitFrame_IonJS)
+ UpdateIonJSFrameForMinorGC(trc, frames);
+ }
+ }
+}
+
+void
+GetPcScript(JSContext* cx, JSScript** scriptRes, jsbytecode** pcRes)
+{
+ JitSpew(JitSpew_IonSnapshots, "Recover PC & Script from the last frame.");
+
+ // Recover the return address so that we can look it up in the
+ // PcScriptCache, as script/pc computation is expensive.
+ JSRuntime* rt = cx->runtime();
+ JitActivationIterator iter(rt);
+ JitFrameIterator it(iter);
+ uint8_t* retAddr;
+ if (it.isExitFrame()) {
+ ++it;
+
+ // Skip rectifier frames.
+ if (it.isRectifier()) {
+ ++it;
+ MOZ_ASSERT(it.isBaselineStub() || it.isBaselineJS() || it.isIonJS());
+ }
+
+ // Skip Baseline/Ion stub and accessor IC frames.
+ if (it.isBaselineStub()) {
+ ++it;
+ MOZ_ASSERT(it.isBaselineJS());
+ } else if (it.isIonStub() || it.isIonAccessorIC()) {
+ ++it;
+ MOZ_ASSERT(it.isIonJS());
+ }
+
+ MOZ_ASSERT(it.isBaselineJS() || it.isIonJS());
+
+ // Don't use the return address if the BaselineFrame has an override pc.
+ // The override pc is cheap to get, so we won't benefit from the cache,
+ // and the override pc could change without the return address changing.
+ // Moreover, sometimes when an override pc is present during exception
+ // handling, the return address is set to nullptr as a sanity check,
+ // since we do not return to the frame that threw the exception.
+ if (!it.isBaselineJS() || !it.baselineFrame()->hasOverridePc()) {
+ retAddr = it.returnAddressToFp();
+ MOZ_ASSERT(retAddr);
+ } else {
+ retAddr = nullptr;
+ }
+ } else {
+ MOZ_ASSERT(it.isBailoutJS());
+ retAddr = it.returnAddress();
+ }
+
+ uint32_t hash;
+ if (retAddr) {
+ hash = PcScriptCache::Hash(retAddr);
+
+ // Lazily initialize the cache. The allocation may safely fail and will not GC.
+ if (MOZ_UNLIKELY(rt->ionPcScriptCache == nullptr)) {
+ rt->ionPcScriptCache = (PcScriptCache*)js_malloc(sizeof(struct PcScriptCache));
+ if (rt->ionPcScriptCache)
+ rt->ionPcScriptCache->clear(rt->gc.gcNumber());
+ }
+
+ if (rt->ionPcScriptCache && rt->ionPcScriptCache->get(rt, hash, retAddr, scriptRes, pcRes))
+ return;
+ }
+
+ // Lookup failed: undertake expensive process to recover the innermost inlined frame.
+ jsbytecode* pc = nullptr;
+ if (it.isIonJS() || it.isBailoutJS()) {
+ InlineFrameIterator ifi(cx, &it);
+ *scriptRes = ifi.script();
+ pc = ifi.pc();
+ } else {
+ MOZ_ASSERT(it.isBaselineJS());
+ it.baselineScriptAndPc(scriptRes, &pc);
+ }
+
+ if (pcRes)
+ *pcRes = pc;
+
+ // Add entry to cache.
+ if (retAddr && rt->ionPcScriptCache)
+ rt->ionPcScriptCache->add(hash, retAddr, pc, *scriptRes);
+}
+
+uint32_t
+OsiIndex::returnPointDisplacement() const
+{
+ // In general, pointer arithmetic on code is bad, but in this case,
+ // getting the return address from a call instruction, stepping over pools
+ // would be wrong.
+ return callPointDisplacement_ + Assembler::PatchWrite_NearCallSize();
+}
+
+RInstructionResults::RInstructionResults(JitFrameLayout* fp)
+ : results_(nullptr),
+ fp_(fp),
+ initialized_(false)
+{
+}
+
+RInstructionResults::RInstructionResults(RInstructionResults&& src)
+ : results_(mozilla::Move(src.results_)),
+ fp_(src.fp_),
+ initialized_(src.initialized_)
+{
+ src.initialized_ = false;
+}
+
+RInstructionResults&
+RInstructionResults::operator=(RInstructionResults&& rhs)
+{
+ MOZ_ASSERT(&rhs != this, "self-moves are prohibited");
+ this->~RInstructionResults();
+ new(this) RInstructionResults(mozilla::Move(rhs));
+ return *this;
+}
+
+RInstructionResults::~RInstructionResults()
+{
+ // results_ is freed by the UniquePtr.
+}
+
+bool
+RInstructionResults::init(JSContext* cx, uint32_t numResults)
+{
+ if (numResults) {
+ results_ = cx->make_unique<Values>();
+ if (!results_ || !results_->growBy(numResults))
+ return false;
+
+ Value guard = MagicValue(JS_ION_BAILOUT);
+ for (size_t i = 0; i < numResults; i++)
+ (*results_)[i].init(guard);
+ }
+
+ initialized_ = true;
+ return true;
+}
+
+bool
+RInstructionResults::isInitialized() const
+{
+ return initialized_;
+}
+
+#ifdef DEBUG
+size_t
+RInstructionResults::length() const
+{
+ return results_->length();
+}
+#endif
+
+JitFrameLayout*
+RInstructionResults::frame() const
+{
+ MOZ_ASSERT(fp_);
+ return fp_;
+}
+
+HeapPtr<Value>&
+RInstructionResults::operator [](size_t index)
+{
+ return (*results_)[index];
+}
+
+void
+RInstructionResults::trace(JSTracer* trc)
+{
+ // Note: The vector necessary exists, otherwise this object would not have
+ // been stored on the activation from where the trace function is called.
+ TraceRange(trc, results_->length(), results_->begin(), "ion-recover-results");
+}
+
+
+SnapshotIterator::SnapshotIterator(const JitFrameIterator& iter, const MachineState* machineState)
+ : snapshot_(iter.ionScript()->snapshots(),
+ iter.snapshotOffset(),
+ iter.ionScript()->snapshotsRVATableSize(),
+ iter.ionScript()->snapshotsListSize()),
+ recover_(snapshot_,
+ iter.ionScript()->recovers(),
+ iter.ionScript()->recoversSize()),
+ fp_(iter.jsFrame()),
+ machine_(machineState),
+ ionScript_(iter.ionScript()),
+ instructionResults_(nullptr)
+{
+}
+
+SnapshotIterator::SnapshotIterator()
+ : snapshot_(nullptr, 0, 0, 0),
+ recover_(snapshot_, nullptr, 0),
+ fp_(nullptr),
+ ionScript_(nullptr),
+ instructionResults_(nullptr)
+{
+}
+
+int32_t
+SnapshotIterator::readOuterNumActualArgs() const
+{
+ return fp_->numActualArgs();
+}
+
+uintptr_t
+SnapshotIterator::fromStack(int32_t offset) const
+{
+ return ReadFrameSlot(fp_, offset);
+}
+
+static Value
+FromObjectPayload(uintptr_t payload)
+{
+ // Note: Both MIRType::Object and MIRType::ObjectOrNull are encoded in
+ // snapshots using JSVAL_TYPE_OBJECT.
+ return ObjectOrNullValue(reinterpret_cast<JSObject*>(payload));
+}
+
+static Value
+FromStringPayload(uintptr_t payload)
+{
+ return StringValue(reinterpret_cast<JSString*>(payload));
+}
+
+static Value
+FromSymbolPayload(uintptr_t payload)
+{
+ return SymbolValue(reinterpret_cast<JS::Symbol*>(payload));
+}
+
+static Value
+FromTypedPayload(JSValueType type, uintptr_t payload)
+{
+ switch (type) {
+ case JSVAL_TYPE_INT32:
+ return Int32Value(payload);
+ case JSVAL_TYPE_BOOLEAN:
+ return BooleanValue(!!payload);
+ case JSVAL_TYPE_STRING:
+ return FromStringPayload(payload);
+ case JSVAL_TYPE_SYMBOL:
+ return FromSymbolPayload(payload);
+ case JSVAL_TYPE_OBJECT:
+ return FromObjectPayload(payload);
+ default:
+ MOZ_CRASH("unexpected type - needs payload");
+ }
+}
+
+bool
+SnapshotIterator::allocationReadable(const RValueAllocation& alloc, ReadMethod rm)
+{
+ // If we have to recover stores, and if we are not interested in the
+ // default value of the instruction, then we have to check if the recover
+ // instruction results are available.
+ if (alloc.needSideEffect() && !(rm & RM_AlwaysDefault)) {
+ if (!hasInstructionResults())
+ return false;
+ }
+
+ switch (alloc.mode()) {
+ case RValueAllocation::DOUBLE_REG:
+ return hasRegister(alloc.fpuReg());
+
+ case RValueAllocation::TYPED_REG:
+ return hasRegister(alloc.reg2());
+
+#if defined(JS_NUNBOX32)
+ case RValueAllocation::UNTYPED_REG_REG:
+ return hasRegister(alloc.reg()) && hasRegister(alloc.reg2());
+ case RValueAllocation::UNTYPED_REG_STACK:
+ return hasRegister(alloc.reg()) && hasStack(alloc.stackOffset2());
+ case RValueAllocation::UNTYPED_STACK_REG:
+ return hasStack(alloc.stackOffset()) && hasRegister(alloc.reg2());
+ case RValueAllocation::UNTYPED_STACK_STACK:
+ return hasStack(alloc.stackOffset()) && hasStack(alloc.stackOffset2());
+#elif defined(JS_PUNBOX64)
+ case RValueAllocation::UNTYPED_REG:
+ return hasRegister(alloc.reg());
+ case RValueAllocation::UNTYPED_STACK:
+ return hasStack(alloc.stackOffset());
+#endif
+
+ case RValueAllocation::RECOVER_INSTRUCTION:
+ return hasInstructionResult(alloc.index());
+ case RValueAllocation::RI_WITH_DEFAULT_CST:
+ return rm & RM_AlwaysDefault || hasInstructionResult(alloc.index());
+
+ default:
+ return true;
+ }
+}
+
+Value
+SnapshotIterator::allocationValue(const RValueAllocation& alloc, ReadMethod rm)
+{
+ switch (alloc.mode()) {
+ case RValueAllocation::CONSTANT:
+ return ionScript_->getConstant(alloc.index());
+
+ case RValueAllocation::CST_UNDEFINED:
+ return UndefinedValue();
+
+ case RValueAllocation::CST_NULL:
+ return NullValue();
+
+ case RValueAllocation::DOUBLE_REG:
+ return DoubleValue(fromRegister(alloc.fpuReg()));
+
+ case RValueAllocation::ANY_FLOAT_REG:
+ {
+ union {
+ double d;
+ float f;
+ } pun;
+ MOZ_ASSERT(alloc.fpuReg().isSingle());
+ pun.d = fromRegister(alloc.fpuReg());
+ // The register contains the encoding of a float32. We just read
+ // the bits without making any conversion.
+ return Float32Value(pun.f);
+ }
+
+ case RValueAllocation::ANY_FLOAT_STACK:
+ return Float32Value(ReadFrameFloat32Slot(fp_, alloc.stackOffset()));
+
+ case RValueAllocation::TYPED_REG:
+ return FromTypedPayload(alloc.knownType(), fromRegister(alloc.reg2()));
+
+ case RValueAllocation::TYPED_STACK:
+ {
+ switch (alloc.knownType()) {
+ case JSVAL_TYPE_DOUBLE:
+ return DoubleValue(ReadFrameDoubleSlot(fp_, alloc.stackOffset2()));
+ case JSVAL_TYPE_INT32:
+ return Int32Value(ReadFrameInt32Slot(fp_, alloc.stackOffset2()));
+ case JSVAL_TYPE_BOOLEAN:
+ return BooleanValue(ReadFrameBooleanSlot(fp_, alloc.stackOffset2()));
+ case JSVAL_TYPE_STRING:
+ return FromStringPayload(fromStack(alloc.stackOffset2()));
+ case JSVAL_TYPE_SYMBOL:
+ return FromSymbolPayload(fromStack(alloc.stackOffset2()));
+ case JSVAL_TYPE_OBJECT:
+ return FromObjectPayload(fromStack(alloc.stackOffset2()));
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+ }
+
+#if defined(JS_NUNBOX32)
+ case RValueAllocation::UNTYPED_REG_REG:
+ {
+ return Value::fromTagAndPayload(JSValueTag(fromRegister(alloc.reg())),
+ fromRegister(alloc.reg2()));
+ }
+
+ case RValueAllocation::UNTYPED_REG_STACK:
+ {
+ return Value::fromTagAndPayload(JSValueTag(fromRegister(alloc.reg())),
+ fromStack(alloc.stackOffset2()));
+ }
+
+ case RValueAllocation::UNTYPED_STACK_REG:
+ {
+ return Value::fromTagAndPayload(JSValueTag(fromStack(alloc.stackOffset())),
+ fromRegister(alloc.reg2()));
+ }
+
+ case RValueAllocation::UNTYPED_STACK_STACK:
+ {
+ return Value::fromTagAndPayload(JSValueTag(fromStack(alloc.stackOffset())),
+ fromStack(alloc.stackOffset2()));
+ }
+#elif defined(JS_PUNBOX64)
+ case RValueAllocation::UNTYPED_REG:
+ {
+ return Value::fromRawBits(fromRegister(alloc.reg()));
+ }
+
+ case RValueAllocation::UNTYPED_STACK:
+ {
+ return Value::fromRawBits(fromStack(alloc.stackOffset()));
+ }
+#endif
+
+ case RValueAllocation::RECOVER_INSTRUCTION:
+ return fromInstructionResult(alloc.index());
+
+ case RValueAllocation::RI_WITH_DEFAULT_CST:
+ if (rm & RM_Normal && hasInstructionResult(alloc.index()))
+ return fromInstructionResult(alloc.index());
+ MOZ_ASSERT(rm & RM_AlwaysDefault);
+ return ionScript_->getConstant(alloc.index2());
+
+ default:
+ MOZ_CRASH("huh?");
+ }
+}
+
+const FloatRegisters::RegisterContent*
+SnapshotIterator::floatAllocationPointer(const RValueAllocation& alloc) const
+{
+ switch (alloc.mode()) {
+ case RValueAllocation::ANY_FLOAT_REG:
+ return machine_->address(alloc.fpuReg());
+
+ case RValueAllocation::ANY_FLOAT_STACK:
+ return (FloatRegisters::RegisterContent*) AddressOfFrameSlot(fp_, alloc.stackOffset());
+
+ default:
+ MOZ_CRASH("Not a float allocation.");
+ }
+}
+
+Value
+SnapshotIterator::maybeRead(const RValueAllocation& a, MaybeReadFallback& fallback)
+{
+ if (allocationReadable(a))
+ return allocationValue(a);
+
+ if (fallback.canRecoverResults()) {
+ // Code paths which are calling maybeRead are not always capable of
+ // returning an error code, as these code paths used to be infallible.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!initInstructionResults(fallback))
+ oomUnsafe.crash("js::jit::SnapshotIterator::maybeRead");
+
+ if (allocationReadable(a))
+ return allocationValue(a);
+
+ MOZ_ASSERT_UNREACHABLE("All allocations should be readable.");
+ }
+
+ return fallback.unreadablePlaceholder();
+}
+
+void
+SnapshotIterator::writeAllocationValuePayload(const RValueAllocation& alloc, const Value& v)
+{
+ uintptr_t payload = *v.payloadUIntPtr();
+#if defined(JS_PUNBOX64)
+ // Do not write back the tag, as this will trigger an assertion when we will
+ // reconstruct the JS Value while marking again or when bailing out.
+ payload &= JSVAL_PAYLOAD_MASK;
+#endif
+
+ switch (alloc.mode()) {
+ case RValueAllocation::CONSTANT:
+ ionScript_->getConstant(alloc.index()) = v;
+ break;
+
+ case RValueAllocation::CST_UNDEFINED:
+ case RValueAllocation::CST_NULL:
+ case RValueAllocation::DOUBLE_REG:
+ case RValueAllocation::ANY_FLOAT_REG:
+ case RValueAllocation::ANY_FLOAT_STACK:
+ MOZ_CRASH("Not a GC thing: Unexpected write");
+ break;
+
+ case RValueAllocation::TYPED_REG:
+ machine_->write(alloc.reg2(), payload);
+ break;
+
+ case RValueAllocation::TYPED_STACK:
+ switch (alloc.knownType()) {
+ default:
+ MOZ_CRASH("Not a GC thing: Unexpected write");
+ break;
+ case JSVAL_TYPE_STRING:
+ case JSVAL_TYPE_SYMBOL:
+ case JSVAL_TYPE_OBJECT:
+ WriteFrameSlot(fp_, alloc.stackOffset2(), payload);
+ break;
+ }
+ break;
+
+#if defined(JS_NUNBOX32)
+ case RValueAllocation::UNTYPED_REG_REG:
+ case RValueAllocation::UNTYPED_STACK_REG:
+ machine_->write(alloc.reg2(), payload);
+ break;
+
+ case RValueAllocation::UNTYPED_REG_STACK:
+ case RValueAllocation::UNTYPED_STACK_STACK:
+ WriteFrameSlot(fp_, alloc.stackOffset2(), payload);
+ break;
+#elif defined(JS_PUNBOX64)
+ case RValueAllocation::UNTYPED_REG:
+ machine_->write(alloc.reg(), v.asRawBits());
+ break;
+
+ case RValueAllocation::UNTYPED_STACK:
+ WriteFrameSlot(fp_, alloc.stackOffset(), v.asRawBits());
+ break;
+#endif
+
+ case RValueAllocation::RECOVER_INSTRUCTION:
+ MOZ_CRASH("Recover instructions are handled by the JitActivation.");
+ break;
+
+ case RValueAllocation::RI_WITH_DEFAULT_CST:
+ // Assume that we are always going to be writing on the default value
+ // while tracing.
+ ionScript_->getConstant(alloc.index2()) = v;
+ break;
+
+ default:
+ MOZ_CRASH("huh?");
+ }
+}
+
+void
+SnapshotIterator::traceAllocation(JSTracer* trc)
+{
+ RValueAllocation alloc = readAllocation();
+ if (!allocationReadable(alloc, RM_AlwaysDefault))
+ return;
+
+ Value v = allocationValue(alloc, RM_AlwaysDefault);
+ if (!v.isMarkable())
+ return;
+
+ Value copy = v;
+ TraceRoot(trc, &v, "ion-typed-reg");
+ if (v != copy) {
+ MOZ_ASSERT(SameType(v, copy));
+ writeAllocationValuePayload(alloc, v);
+ }
+}
+
+const RResumePoint*
+SnapshotIterator::resumePoint() const
+{
+ return instruction()->toResumePoint();
+}
+
+uint32_t
+SnapshotIterator::numAllocations() const
+{
+ return instruction()->numOperands();
+}
+
+uint32_t
+SnapshotIterator::pcOffset() const
+{
+ return resumePoint()->pcOffset();
+}
+
+void
+SnapshotIterator::skipInstruction()
+{
+ MOZ_ASSERT(snapshot_.numAllocationsRead() == 0);
+ size_t numOperands = instruction()->numOperands();
+ for (size_t i = 0; i < numOperands; i++)
+ skip();
+ nextInstruction();
+}
+
+bool
+SnapshotIterator::initInstructionResults(MaybeReadFallback& fallback)
+{
+ MOZ_ASSERT(fallback.canRecoverResults());
+ JSContext* cx = fallback.maybeCx;
+
+ // If there is only one resume point in the list of instructions, then there
+ // is no instruction to recover, and thus no need to register any results.
+ if (recover_.numInstructions() == 1)
+ return true;
+
+ JitFrameLayout* fp = fallback.frame->jsFrame();
+ RInstructionResults* results = fallback.activation->maybeIonFrameRecovery(fp);
+ if (!results) {
+ AutoCompartment ac(cx, fallback.frame->script()->compartment());
+
+ // We do not have the result yet, which means that an observable stack
+ // slot is requested. As we do not want to bailout every time for the
+ // same reason, we need to recompile without optimizing away the
+ // observable stack slots. The script would later be recompiled to have
+ // support for Argument objects.
+ if (fallback.consequence == MaybeReadFallback::Fallback_Invalidate)
+ ionScript_->invalidate(cx, /* resetUses = */ false, "Observe recovered instruction.");
+
+ // Register the list of result on the activation. We need to do that
+ // before we initialize the list such as if any recover instruction
+ // cause a GC, we can ensure that the results are properly traced by the
+ // activation.
+ RInstructionResults tmp(fallback.frame->jsFrame());
+ if (!fallback.activation->registerIonFrameRecovery(mozilla::Move(tmp)))
+ return false;
+
+ results = fallback.activation->maybeIonFrameRecovery(fp);
+
+ // Start a new snapshot at the beginning of the JitFrameIterator. This
+ // SnapshotIterator is used for evaluating the content of all recover
+ // instructions. The result is then saved on the JitActivation.
+ MachineState machine = fallback.frame->machineState();
+ SnapshotIterator s(*fallback.frame, &machine);
+ if (!s.computeInstructionResults(cx, results)) {
+
+ // If the evaluation failed because of OOMs, then we discard the
+ // current set of result that we collected so far.
+ fallback.activation->removeIonFrameRecovery(fp);
+ return false;
+ }
+ }
+
+ MOZ_ASSERT(results->isInitialized());
+ MOZ_ASSERT(results->length() == recover_.numInstructions() - 1);
+ instructionResults_ = results;
+ return true;
+}
+
+bool
+SnapshotIterator::computeInstructionResults(JSContext* cx, RInstructionResults* results) const
+{
+ MOZ_ASSERT(!results->isInitialized());
+ MOZ_ASSERT(recover_.numInstructionsRead() == 1);
+
+ // The last instruction will always be a resume point.
+ size_t numResults = recover_.numInstructions() - 1;
+ if (!results->isInitialized()) {
+ if (!results->init(cx, numResults))
+ return false;
+
+ // No need to iterate over the only resume point.
+ if (!numResults) {
+ MOZ_ASSERT(results->isInitialized());
+ return true;
+ }
+
+ // Use AutoEnterAnalysis to avoid invoking the object metadata callback,
+ // which could try to walk the stack while bailing out.
+ AutoEnterAnalysis enter(cx);
+
+ // Fill with the results of recover instructions.
+ SnapshotIterator s(*this);
+ s.instructionResults_ = results;
+ while (s.moreInstructions()) {
+ // Skip resume point and only interpret recover instructions.
+ if (s.instruction()->isResumePoint()) {
+ s.skipInstruction();
+ continue;
+ }
+
+ if (!s.instruction()->recover(cx, s))
+ return false;
+ s.nextInstruction();
+ }
+ }
+
+ MOZ_ASSERT(results->isInitialized());
+ return true;
+}
+
+void
+SnapshotIterator::storeInstructionResult(const Value& v)
+{
+ uint32_t currIns = recover_.numInstructionsRead() - 1;
+ MOZ_ASSERT((*instructionResults_)[currIns].isMagic(JS_ION_BAILOUT));
+ (*instructionResults_)[currIns] = v;
+}
+
+Value
+SnapshotIterator::fromInstructionResult(uint32_t index) const
+{
+ MOZ_ASSERT(!(*instructionResults_)[index].isMagic(JS_ION_BAILOUT));
+ return (*instructionResults_)[index];
+}
+
+void
+SnapshotIterator::settleOnFrame()
+{
+ // Check that the current instruction can still be use.
+ MOZ_ASSERT(snapshot_.numAllocationsRead() == 0);
+ while (!instruction()->isResumePoint())
+ skipInstruction();
+}
+
+void
+SnapshotIterator::nextFrame()
+{
+ nextInstruction();
+ settleOnFrame();
+}
+
+Value
+SnapshotIterator::maybeReadAllocByIndex(size_t index)
+{
+ while (index--) {
+ MOZ_ASSERT(moreAllocations());
+ skip();
+ }
+
+ Value s;
+ {
+ // This MaybeReadFallback method cannot GC.
+ JS::AutoSuppressGCAnalysis nogc;
+ MaybeReadFallback fallback(UndefinedValue());
+ s = maybeRead(fallback);
+ }
+
+ while (moreAllocations())
+ skip();
+
+ return s;
+}
+
+JitFrameLayout*
+JitFrameIterator::jsFrame() const
+{
+ MOZ_ASSERT(isScripted());
+ if (isBailoutJS())
+ return (JitFrameLayout*) activation_->bailoutData()->fp();
+
+ return (JitFrameLayout*) fp();
+}
+
+IonScript*
+JitFrameIterator::ionScript() const
+{
+ MOZ_ASSERT(isIonScripted());
+ if (isBailoutJS())
+ return activation_->bailoutData()->ionScript();
+
+ IonScript* ionScript = nullptr;
+ if (checkInvalidation(&ionScript))
+ return ionScript;
+ return ionScriptFromCalleeToken();
+}
+
+IonScript*
+JitFrameIterator::ionScriptFromCalleeToken() const
+{
+ MOZ_ASSERT(isIonJS());
+ MOZ_ASSERT(!checkInvalidation());
+ return script()->ionScript();
+}
+
+const SafepointIndex*
+JitFrameIterator::safepoint() const
+{
+ MOZ_ASSERT(isIonJS());
+ if (!cachedSafepointIndex_)
+ cachedSafepointIndex_ = ionScript()->getSafepointIndex(returnAddressToFp());
+ return cachedSafepointIndex_;
+}
+
+SnapshotOffset
+JitFrameIterator::snapshotOffset() const
+{
+ MOZ_ASSERT(isIonScripted());
+ if (isBailoutJS())
+ return activation_->bailoutData()->snapshotOffset();
+ return osiIndex()->snapshotOffset();
+}
+
+const OsiIndex*
+JitFrameIterator::osiIndex() const
+{
+ MOZ_ASSERT(isIonJS());
+ SafepointReader reader(ionScript(), safepoint());
+ return ionScript()->getOsiIndex(reader.osiReturnPointOffset());
+}
+
+InlineFrameIterator::InlineFrameIterator(JSContext* cx, const JitFrameIterator* iter)
+ : calleeTemplate_(cx),
+ calleeRVA_(),
+ script_(cx)
+{
+ resetOn(iter);
+}
+
+InlineFrameIterator::InlineFrameIterator(JSRuntime* rt, const JitFrameIterator* iter)
+ : calleeTemplate_(rt->contextFromMainThread()),
+ calleeRVA_(),
+ script_(rt->contextFromMainThread())
+{
+ resetOn(iter);
+}
+
+InlineFrameIterator::InlineFrameIterator(JSContext* cx, const InlineFrameIterator* iter)
+ : frame_(iter ? iter->frame_ : nullptr),
+ framesRead_(0),
+ frameCount_(iter ? iter->frameCount_ : UINT32_MAX),
+ calleeTemplate_(cx),
+ calleeRVA_(),
+ script_(cx)
+{
+ if (frame_) {
+ machine_ = iter->machine_;
+ start_ = SnapshotIterator(*frame_, &machine_);
+
+ // findNextFrame will iterate to the next frame and init. everything.
+ // Therefore to settle on the same frame, we report one frame less readed.
+ framesRead_ = iter->framesRead_ - 1;
+ findNextFrame();
+ }
+}
+
+void
+InlineFrameIterator::resetOn(const JitFrameIterator* iter)
+{
+ frame_ = iter;
+ framesRead_ = 0;
+ frameCount_ = UINT32_MAX;
+
+ if (iter) {
+ machine_ = iter->machineState();
+ start_ = SnapshotIterator(*iter, &machine_);
+ findNextFrame();
+ }
+}
+
+void
+InlineFrameIterator::findNextFrame()
+{
+ MOZ_ASSERT(more());
+
+ si_ = start_;
+
+ // Read the initial frame out of the C stack.
+ calleeTemplate_ = frame_->maybeCallee();
+ calleeRVA_ = RValueAllocation();
+ script_ = frame_->script();
+ MOZ_ASSERT(script_->hasBaselineScript());
+
+ // Settle on the outermost frame without evaluating any instructions before
+ // looking for a pc.
+ si_.settleOnFrame();
+
+ pc_ = script_->offsetToPC(si_.pcOffset());
+ numActualArgs_ = 0xbadbad;
+
+ // This unfortunately is O(n*m), because we must skip over outer frames
+ // before reading inner ones.
+
+ // The first time (frameCount_ == UINT32_MAX) we do not know the number of
+ // frames that we are going to inspect. So we are iterating until there is
+ // no more frames, to settle on the inner most frame and to count the number
+ // of frames.
+ size_t remaining = (frameCount_ != UINT32_MAX) ? frameNo() - 1 : SIZE_MAX;
+
+ size_t i = 1;
+ for (; i <= remaining && si_.moreFrames(); i++) {
+ MOZ_ASSERT(IsIonInlinablePC(pc_));
+
+ // Recover the number of actual arguments from the script.
+ if (JSOp(*pc_) != JSOP_FUNAPPLY)
+ numActualArgs_ = GET_ARGC(pc_);
+ if (JSOp(*pc_) == JSOP_FUNCALL) {
+ MOZ_ASSERT(GET_ARGC(pc_) > 0);
+ numActualArgs_ = GET_ARGC(pc_) - 1;
+ } else if (IsGetPropPC(pc_)) {
+ numActualArgs_ = 0;
+ } else if (IsSetPropPC(pc_)) {
+ numActualArgs_ = 1;
+ }
+
+ if (numActualArgs_ == 0xbadbad)
+ MOZ_CRASH("Couldn't deduce the number of arguments of an ionmonkey frame");
+
+ // Skip over non-argument slots, as well as |this|.
+ bool skipNewTarget = JSOp(*pc_) == JSOP_NEW;
+ unsigned skipCount = (si_.numAllocations() - 1) - numActualArgs_ - 1 - skipNewTarget;
+ for (unsigned j = 0; j < skipCount; j++)
+ si_.skip();
+
+ // This value should correspond to the function which is being inlined.
+ // The value must be readable to iterate over the inline frame. Most of
+ // the time, these functions are stored as JSFunction constants,
+ // register which are holding the JSFunction pointer, or recover
+ // instruction with Default value.
+ Value funval = si_.readWithDefault(&calleeRVA_);
+
+ // Skip extra value allocations.
+ while (si_.moreAllocations())
+ si_.skip();
+
+ si_.nextFrame();
+
+ calleeTemplate_ = &funval.toObject().as<JSFunction>();
+
+ // Inlined functions may be clones that still point to the lazy script
+ // for the executed script, if they are clones. The actual script
+ // exists though, just make sure the function points to it.
+ script_ = calleeTemplate_->existingScript();
+ MOZ_ASSERT(script_->hasBaselineScript());
+
+ pc_ = script_->offsetToPC(si_.pcOffset());
+ }
+
+ // The first time we do not know the number of frames, we only settle on the
+ // last frame, and update the number of frames based on the number of
+ // iteration that we have done.
+ if (frameCount_ == UINT32_MAX) {
+ MOZ_ASSERT(!si_.moreFrames());
+ frameCount_ = i;
+ }
+
+ framesRead_++;
+}
+
+JSFunction*
+InlineFrameIterator::callee(MaybeReadFallback& fallback) const
+{
+ MOZ_ASSERT(isFunctionFrame());
+ if (calleeRVA_.mode() == RValueAllocation::INVALID || !fallback.canRecoverResults())
+ return calleeTemplate_;
+
+ SnapshotIterator s(si_);
+ // :TODO: Handle allocation failures from recover instruction.
+ Value funval = s.maybeRead(calleeRVA_, fallback);
+ return &funval.toObject().as<JSFunction>();
+}
+
+JSObject*
+InlineFrameIterator::computeEnvironmentChain(const Value& envChainValue,
+ MaybeReadFallback& fallback,
+ bool* hasInitialEnv) const
+{
+ if (envChainValue.isObject()) {
+ if (hasInitialEnv) {
+ if (fallback.canRecoverResults()) {
+ RootedObject obj(fallback.maybeCx, &envChainValue.toObject());
+ *hasInitialEnv = isFunctionFrame() &&
+ callee(fallback)->needsFunctionEnvironmentObjects();
+ return obj;
+ } else {
+ JS::AutoSuppressGCAnalysis nogc; // If we cannot recover then we cannot GC.
+ *hasInitialEnv = isFunctionFrame() &&
+ callee(fallback)->needsFunctionEnvironmentObjects();
+ }
+ }
+
+ return &envChainValue.toObject();
+ }
+
+ // Note we can hit this case even for functions with a CallObject, in case
+ // we are walking the frame during the function prologue, before the env
+ // chain has been initialized.
+ if (isFunctionFrame())
+ return callee(fallback)->environment();
+
+ // Ion does not handle non-function scripts that have anything other than
+ // the global on their env chain.
+ MOZ_ASSERT(!script()->isForEval());
+ MOZ_ASSERT(!script()->hasNonSyntacticScope());
+ return &script()->global().lexicalEnvironment();
+}
+
+bool
+InlineFrameIterator::isFunctionFrame() const
+{
+ return !!calleeTemplate_;
+}
+
+MachineState
+MachineState::FromBailout(RegisterDump::GPRArray& regs, RegisterDump::FPUArray& fpregs)
+{
+ MachineState machine;
+
+ for (unsigned i = 0; i < Registers::Total; i++)
+ machine.setRegisterLocation(Register::FromCode(i), &regs[i].r);
+#ifdef JS_CODEGEN_ARM
+ float* fbase = (float*)&fpregs[0];
+ for (unsigned i = 0; i < FloatRegisters::TotalDouble; i++)
+ machine.setRegisterLocation(FloatRegister(i, FloatRegister::Double), &fpregs[i].d);
+ for (unsigned i = 0; i < FloatRegisters::TotalSingle; i++)
+ machine.setRegisterLocation(FloatRegister(i, FloatRegister::Single), (double*)&fbase[i]);
+#elif defined(JS_CODEGEN_MIPS32)
+ float* fbase = (float*)&fpregs[0];
+ for (unsigned i = 0; i < FloatRegisters::TotalDouble; i++) {
+ machine.setRegisterLocation(FloatRegister::FromIndex(i, FloatRegister::Double),
+ &fpregs[i].d);
+ }
+ for (unsigned i = 0; i < FloatRegisters::TotalSingle; i++) {
+ machine.setRegisterLocation(FloatRegister::FromIndex(i, FloatRegister::Single),
+ (double*)&fbase[i]);
+ }
+#elif defined(JS_CODEGEN_MIPS64)
+ for (unsigned i = 0; i < FloatRegisters::TotalPhys; i++) {
+ machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Double), &fpregs[i]);
+ machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Single), &fpregs[i]);
+ }
+#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ for (unsigned i = 0; i < FloatRegisters::TotalPhys; i++) {
+ machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Single), &fpregs[i]);
+ machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Double), &fpregs[i]);
+ machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Simd128), &fpregs[i]);
+ }
+#elif defined(JS_CODEGEN_ARM64)
+ for (unsigned i = 0; i < FloatRegisters::TotalPhys; i++) {
+ machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Single), &fpregs[i]);
+ machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Double), &fpregs[i]);
+ }
+
+#elif defined(JS_CODEGEN_NONE)
+ MOZ_CRASH();
+#else
+# error "Unknown architecture!"
+#endif
+ return machine;
+}
+
+bool
+InlineFrameIterator::isConstructing() const
+{
+ // Skip the current frame and look at the caller's.
+ if (more()) {
+ InlineFrameIterator parent(GetJSContextFromMainThread(), this);
+ ++parent;
+
+ // Inlined Getters and Setters are never constructing.
+ if (IsGetPropPC(parent.pc()) || IsSetPropPC(parent.pc()))
+ return false;
+
+ // In the case of a JS frame, look up the pc from the snapshot.
+ MOZ_ASSERT(IsCallPC(parent.pc()));
+
+ return (JSOp)*parent.pc() == JSOP_NEW;
+ }
+
+ return frame_->isConstructing();
+}
+
+bool
+JitFrameIterator::isConstructing() const
+{
+ return CalleeTokenIsConstructing(calleeToken());
+}
+
+unsigned
+JitFrameIterator::numActualArgs() const
+{
+ if (isScripted())
+ return jsFrame()->numActualArgs();
+
+ MOZ_ASSERT(isExitFrameLayout<NativeExitFrameLayout>());
+ return exitFrame()->as<NativeExitFrameLayout>()->argc();
+}
+
+void
+SnapshotIterator::warnUnreadableAllocation()
+{
+ fprintf(stderr, "Warning! Tried to access unreadable value allocation (possible f.arguments).\n");
+}
+
+struct DumpOp {
+ explicit DumpOp(unsigned int i) : i_(i) {}
+
+ unsigned int i_;
+ void operator()(const Value& v) {
+ fprintf(stderr, " actual (arg %d): ", i_);
+#ifdef DEBUG
+ DumpValue(v);
+#else
+ fprintf(stderr, "?\n");
+#endif
+ i_++;
+ }
+};
+
+void
+JitFrameIterator::dumpBaseline() const
+{
+ MOZ_ASSERT(isBaselineJS());
+
+ fprintf(stderr, " JS Baseline frame\n");
+ if (isFunctionFrame()) {
+ fprintf(stderr, " callee fun: ");
+#ifdef DEBUG
+ DumpObject(callee());
+#else
+ fprintf(stderr, "?\n");
+#endif
+ } else {
+ fprintf(stderr, " global frame, no callee\n");
+ }
+
+ fprintf(stderr, " file %s line %" PRIuSIZE "\n",
+ script()->filename(), script()->lineno());
+
+ JSContext* cx = GetJSContextFromMainThread();
+ RootedScript script(cx);
+ jsbytecode* pc;
+ baselineScriptAndPc(script.address(), &pc);
+
+ fprintf(stderr, " script = %p, pc = %p (offset %u)\n", (void*)script, pc, uint32_t(script->pcToOffset(pc)));
+ fprintf(stderr, " current op: %s\n", CodeName[*pc]);
+
+ fprintf(stderr, " actual args: %d\n", numActualArgs());
+
+ BaselineFrame* frame = baselineFrame();
+
+ for (unsigned i = 0; i < frame->numValueSlots(); i++) {
+ fprintf(stderr, " slot %u: ", i);
+#ifdef DEBUG
+ Value* v = frame->valueSlot(i);
+ DumpValue(*v);
+#else
+ fprintf(stderr, "?\n");
+#endif
+ }
+}
+
+void
+InlineFrameIterator::dump() const
+{
+ MaybeReadFallback fallback(UndefinedValue());
+
+ if (more())
+ fprintf(stderr, " JS frame (inlined)\n");
+ else
+ fprintf(stderr, " JS frame\n");
+
+ bool isFunction = false;
+ if (isFunctionFrame()) {
+ isFunction = true;
+ fprintf(stderr, " callee fun: ");
+#ifdef DEBUG
+ DumpObject(callee(fallback));
+#else
+ fprintf(stderr, "?\n");
+#endif
+ } else {
+ fprintf(stderr, " global frame, no callee\n");
+ }
+
+ fprintf(stderr, " file %s line %" PRIuSIZE "\n",
+ script()->filename(), script()->lineno());
+
+ fprintf(stderr, " script = %p, pc = %p\n", (void*) script(), pc());
+ fprintf(stderr, " current op: %s\n", CodeName[*pc()]);
+
+ if (!more()) {
+ numActualArgs();
+ }
+
+ SnapshotIterator si = snapshotIterator();
+ fprintf(stderr, " slots: %u\n", si.numAllocations() - 1);
+ for (unsigned i = 0; i < si.numAllocations() - 1; i++) {
+ if (isFunction) {
+ if (i == 0)
+ fprintf(stderr, " env chain: ");
+ else if (i == 1)
+ fprintf(stderr, " this: ");
+ else if (i - 2 < calleeTemplate()->nargs())
+ fprintf(stderr, " formal (arg %d): ", i - 2);
+ else {
+ if (i - 2 == calleeTemplate()->nargs() && numActualArgs() > calleeTemplate()->nargs()) {
+ DumpOp d(calleeTemplate()->nargs());
+ unaliasedForEachActual(GetJSContextFromMainThread(), d, ReadFrame_Overflown, fallback);
+ }
+
+ fprintf(stderr, " slot %d: ", int(i - 2 - calleeTemplate()->nargs()));
+ }
+ } else
+ fprintf(stderr, " slot %u: ", i);
+#ifdef DEBUG
+ DumpValue(si.maybeRead(fallback));
+#else
+ fprintf(stderr, "?\n");
+#endif
+ }
+
+ fputc('\n', stderr);
+}
+
+void
+JitFrameIterator::dump() const
+{
+ switch (type_) {
+ case JitFrame_Entry:
+ fprintf(stderr, " Entry frame\n");
+ fprintf(stderr, " Frame size: %u\n", unsigned(current()->prevFrameLocalSize()));
+ break;
+ case JitFrame_BaselineJS:
+ dumpBaseline();
+ break;
+ case JitFrame_BaselineStub:
+ fprintf(stderr, " Baseline stub frame\n");
+ fprintf(stderr, " Frame size: %u\n", unsigned(current()->prevFrameLocalSize()));
+ break;
+ case JitFrame_Bailout:
+ case JitFrame_IonJS:
+ {
+ InlineFrameIterator frames(GetJSContextFromMainThread(), this);
+ for (;;) {
+ frames.dump();
+ if (!frames.more())
+ break;
+ ++frames;
+ }
+ break;
+ }
+ case JitFrame_IonStub:
+ fprintf(stderr, " Ion stub frame\n");
+ fprintf(stderr, " Frame size: %u\n", unsigned(current()->prevFrameLocalSize()));
+ break;
+ case JitFrame_Rectifier:
+ fprintf(stderr, " Rectifier frame\n");
+ fprintf(stderr, " Frame size: %u\n", unsigned(current()->prevFrameLocalSize()));
+ break;
+ case JitFrame_IonAccessorIC:
+ fprintf(stderr, " Ion scripted accessor IC\n");
+ fprintf(stderr, " Frame size: %u\n", unsigned(current()->prevFrameLocalSize()));
+ break;
+ case JitFrame_Exit:
+ fprintf(stderr, " Exit frame\n");
+ break;
+ };
+ fputc('\n', stderr);
+}
+
+#ifdef DEBUG
+bool
+JitFrameIterator::verifyReturnAddressUsingNativeToBytecodeMap()
+{
+ MOZ_ASSERT(returnAddressToFp_ != nullptr);
+
+ // Only handle Ion frames for now.
+ if (type_ != JitFrame_IonJS && type_ != JitFrame_BaselineJS)
+ return true;
+
+ JSRuntime* rt = js::TlsPerThreadData.get()->runtimeIfOnOwnerThread();
+
+ // Don't verify on non-main-thread.
+ if (!rt)
+ return true;
+
+ // Don't verify if sampling is being suppressed.
+ if (!rt->isProfilerSamplingEnabled())
+ return true;
+
+ if (rt->isHeapMinorCollecting())
+ return true;
+
+ JitRuntime* jitrt = rt->jitRuntime();
+
+ // Look up and print bytecode info for the native address.
+ const JitcodeGlobalEntry* entry = jitrt->getJitcodeGlobalTable()->lookup(returnAddressToFp_);
+ if (!entry)
+ return true;
+
+ JitSpew(JitSpew_Profiling, "Found nativeToBytecode entry for %p: %p - %p",
+ returnAddressToFp_, entry->nativeStartAddr(), entry->nativeEndAddr());
+
+ JitcodeGlobalEntry::BytecodeLocationVector location;
+ uint32_t depth = UINT32_MAX;
+ if (!entry->callStackAtAddr(rt, returnAddressToFp_, location, &depth))
+ return false;
+ MOZ_ASSERT(depth > 0 && depth != UINT32_MAX);
+ MOZ_ASSERT(location.length() == depth);
+
+ JitSpew(JitSpew_Profiling, "Found bytecode location of depth %d:", depth);
+ for (size_t i = 0; i < location.length(); i++) {
+ JitSpew(JitSpew_Profiling, " %s:%" PRIuSIZE " - %" PRIuSIZE,
+ location[i].script->filename(), location[i].script->lineno(),
+ size_t(location[i].pc - location[i].script->code()));
+ }
+
+ if (type_ == JitFrame_IonJS) {
+ // Create an InlineFrameIterator here and verify the mapped info against the iterator info.
+ InlineFrameIterator inlineFrames(GetJSContextFromMainThread(), this);
+ for (size_t idx = 0; idx < location.length(); idx++) {
+ MOZ_ASSERT(idx < location.length());
+ MOZ_ASSERT_IF(idx < location.length() - 1, inlineFrames.more());
+
+ JitSpew(JitSpew_Profiling,
+ "Match %d: ION %s:%" PRIuSIZE "(%" PRIuSIZE ") vs N2B %s:%" PRIuSIZE "(%" PRIuSIZE ")",
+ (int)idx,
+ inlineFrames.script()->filename(),
+ inlineFrames.script()->lineno(),
+ size_t(inlineFrames.pc() - inlineFrames.script()->code()),
+ location[idx].script->filename(),
+ location[idx].script->lineno(),
+ size_t(location[idx].pc - location[idx].script->code()));
+
+ MOZ_ASSERT(inlineFrames.script() == location[idx].script);
+
+ if (inlineFrames.more())
+ ++inlineFrames;
+ }
+ }
+
+ return true;
+}
+#endif // DEBUG
+
+JitProfilingFrameIterator::JitProfilingFrameIterator(
+ JSRuntime* rt, const JS::ProfilingFrameIterator::RegisterState& state)
+{
+ // If no profilingActivation is live, initialize directly to
+ // end-of-iteration state.
+ if (!rt->profilingActivation()) {
+ type_ = JitFrame_Entry;
+ fp_ = nullptr;
+ returnAddressToFp_ = nullptr;
+ return;
+ }
+
+ MOZ_ASSERT(rt->profilingActivation()->isJit());
+
+ JitActivation* act = rt->profilingActivation()->asJit();
+
+ // If the top JitActivation has a null lastProfilingFrame, assume that
+ // it's a trivially empty activation, and initialize directly
+ // to end-of-iteration state.
+ if (!act->lastProfilingFrame()) {
+ type_ = JitFrame_Entry;
+ fp_ = nullptr;
+ returnAddressToFp_ = nullptr;
+ return;
+ }
+
+ // Get the fp from the current profilingActivation
+ fp_ = (uint8_t*) act->lastProfilingFrame();
+ void* lastCallSite = act->lastProfilingCallSite();
+
+ JitcodeGlobalTable* table = rt->jitRuntime()->getJitcodeGlobalTable();
+
+ // Profiler sampling must NOT be suppressed if we are here.
+ MOZ_ASSERT(rt->isProfilerSamplingEnabled());
+
+ // Try initializing with sampler pc
+ if (tryInitWithPC(state.pc))
+ return;
+
+ // Try initializing with sampler pc using native=>bytecode table.
+ if (tryInitWithTable(table, state.pc, rt, /* forLastCallSite = */ false))
+ return;
+
+ // Try initializing with lastProfilingCallSite pc
+ if (lastCallSite) {
+ if (tryInitWithPC(lastCallSite))
+ return;
+
+ // Try initializing with lastProfilingCallSite pc using native=>bytecode table.
+ if (tryInitWithTable(table, lastCallSite, rt, /* forLastCallSite = */ true))
+ return;
+ }
+
+ MOZ_ASSERT(frameScript()->hasBaselineScript());
+
+ // If nothing matches, for now just assume we are at the start of the last frame's
+ // baseline jit code.
+ type_ = JitFrame_BaselineJS;
+ returnAddressToFp_ = frameScript()->baselineScript()->method()->raw();
+}
+
+template <typename ReturnType = CommonFrameLayout*>
+inline ReturnType
+GetPreviousRawFrame(CommonFrameLayout* frame)
+{
+ size_t prevSize = frame->prevFrameLocalSize() + frame->headerSize();
+ return ReturnType((uint8_t*)frame + prevSize);
+}
+
+JitProfilingFrameIterator::JitProfilingFrameIterator(void* exitFrame)
+{
+ // Skip the exit frame.
+ ExitFrameLayout* frame = (ExitFrameLayout*) exitFrame;
+ moveToNextFrame(frame);
+}
+
+bool
+JitProfilingFrameIterator::tryInitWithPC(void* pc)
+{
+ JSScript* callee = frameScript();
+
+ // Check for Ion first, since it's more likely for hot code.
+ if (callee->hasIonScript() && callee->ionScript()->method()->containsNativePC(pc)) {
+ type_ = JitFrame_IonJS;
+ returnAddressToFp_ = pc;
+ return true;
+ }
+
+ // Check for containment in Baseline jitcode second.
+ if (callee->hasBaselineScript() && callee->baselineScript()->method()->containsNativePC(pc)) {
+ type_ = JitFrame_BaselineJS;
+ returnAddressToFp_ = pc;
+ return true;
+ }
+
+ return false;
+}
+
+bool
+JitProfilingFrameIterator::tryInitWithTable(JitcodeGlobalTable* table, void* pc, JSRuntime* rt,
+ bool forLastCallSite)
+{
+ if (!pc)
+ return false;
+
+ const JitcodeGlobalEntry* entry = table->lookup(pc);
+ if (!entry)
+ return false;
+
+ JSScript* callee = frameScript();
+
+ MOZ_ASSERT(entry->isIon() || entry->isBaseline() || entry->isIonCache() || entry->isDummy());
+
+ // Treat dummy lookups as an empty frame sequence.
+ if (entry->isDummy()) {
+ type_ = JitFrame_Entry;
+ fp_ = nullptr;
+ returnAddressToFp_ = nullptr;
+ return true;
+ }
+
+ if (entry->isIon()) {
+ // If looked-up callee doesn't match frame callee, don't accept lastProfilingCallSite
+ if (entry->ionEntry().getScript(0) != callee)
+ return false;
+
+ type_ = JitFrame_IonJS;
+ returnAddressToFp_ = pc;
+ return true;
+ }
+
+ if (entry->isBaseline()) {
+ // If looked-up callee doesn't match frame callee, don't accept lastProfilingCallSite
+ if (forLastCallSite && entry->baselineEntry().script() != callee)
+ return false;
+
+ type_ = JitFrame_BaselineJS;
+ returnAddressToFp_ = pc;
+ return true;
+ }
+
+ if (entry->isIonCache()) {
+ void* ptr = entry->ionCacheEntry().rejoinAddr();
+ const JitcodeGlobalEntry& ionEntry = table->lookupInfallible(ptr);
+ MOZ_ASSERT(ionEntry.isIon());
+
+ if (ionEntry.ionEntry().getScript(0) != callee)
+ return false;
+
+ type_ = JitFrame_IonJS;
+ returnAddressToFp_ = pc;
+ return true;
+ }
+
+ return false;
+}
+
+void
+JitProfilingFrameIterator::fixBaselineReturnAddress()
+{
+ MOZ_ASSERT(type_ == JitFrame_BaselineJS);
+ BaselineFrame* bl = (BaselineFrame*)(fp_ - BaselineFrame::FramePointerOffset -
+ BaselineFrame::Size());
+
+ // Debug mode OSR for Baseline uses a "continuation fixer" and stashes the
+ // actual return address in an auxiliary structure.
+ if (BaselineDebugModeOSRInfo* info = bl->getDebugModeOSRInfo()) {
+ returnAddressToFp_ = info->resumeAddr;
+ return;
+ }
+
+ // Resuming a generator via .throw() pushes a bogus return address onto
+ // the stack. We have the actual jsbytecode* stashed on the frame itself;
+ // translate that into the Baseline code address.
+ if (jsbytecode* override = bl->maybeOverridePc()) {
+ JSScript* script = bl->script();
+ returnAddressToFp_ = script->baselineScript()->nativeCodeForPC(script, override);
+ return;
+ }
+}
+
+void
+JitProfilingFrameIterator::operator++()
+{
+ JitFrameLayout* frame = framePtr();
+ moveToNextFrame(frame);
+}
+
+void
+JitProfilingFrameIterator::moveToNextFrame(CommonFrameLayout* frame)
+{
+ /*
+ * fp_ points to a Baseline or Ion frame. The possible call-stacks
+ * patterns occurring between this frame and a previous Ion or Baseline
+ * frame are as follows:
+ *
+ * <Baseline-Or-Ion>
+ * ^
+ * |
+ * ^--- Ion
+ * |
+ * ^--- Baseline Stub <---- Baseline
+ * |
+ * ^--- Argument Rectifier
+ * | ^
+ * | |
+ * | ^--- Ion
+ * | |
+ * | ^--- Baseline Stub <---- Baseline
+ * |
+ * ^--- Entry Frame (From C++)
+ * Exit Frame (From previous JitActivation)
+ * ^
+ * |
+ * ^--- Ion
+ * |
+ * ^--- Baseline
+ * |
+ * ^--- Baseline Stub <---- Baseline
+ */
+ FrameType prevType = frame->prevType();
+
+ if (prevType == JitFrame_IonJS) {
+ returnAddressToFp_ = frame->returnAddress();
+ fp_ = GetPreviousRawFrame<uint8_t*>(frame);
+ type_ = JitFrame_IonJS;
+ return;
+ }
+
+ if (prevType == JitFrame_BaselineJS) {
+ returnAddressToFp_ = frame->returnAddress();
+ fp_ = GetPreviousRawFrame<uint8_t*>(frame);
+ type_ = JitFrame_BaselineJS;
+ fixBaselineReturnAddress();
+ return;
+ }
+
+ if (prevType == JitFrame_BaselineStub) {
+ BaselineStubFrameLayout* stubFrame = GetPreviousRawFrame<BaselineStubFrameLayout*>(frame);
+ MOZ_ASSERT(stubFrame->prevType() == JitFrame_BaselineJS);
+
+ returnAddressToFp_ = stubFrame->returnAddress();
+ fp_ = ((uint8_t*) stubFrame->reverseSavedFramePtr())
+ + jit::BaselineFrame::FramePointerOffset;
+ type_ = JitFrame_BaselineJS;
+ return;
+ }
+
+ if (prevType == JitFrame_Rectifier) {
+ RectifierFrameLayout* rectFrame = GetPreviousRawFrame<RectifierFrameLayout*>(frame);
+ FrameType rectPrevType = rectFrame->prevType();
+
+ if (rectPrevType == JitFrame_IonJS) {
+ returnAddressToFp_ = rectFrame->returnAddress();
+ fp_ = GetPreviousRawFrame<uint8_t*>(rectFrame);
+ type_ = JitFrame_IonJS;
+ return;
+ }
+
+ if (rectPrevType == JitFrame_BaselineStub) {
+ BaselineStubFrameLayout* stubFrame =
+ GetPreviousRawFrame<BaselineStubFrameLayout*>(rectFrame);
+ returnAddressToFp_ = stubFrame->returnAddress();
+ fp_ = ((uint8_t*) stubFrame->reverseSavedFramePtr())
+ + jit::BaselineFrame::FramePointerOffset;
+ type_ = JitFrame_BaselineJS;
+ return;
+ }
+
+ MOZ_CRASH("Bad frame type prior to rectifier frame.");
+ }
+
+ if (prevType == JitFrame_IonAccessorIC) {
+ IonAccessorICFrameLayout* accessorFrame =
+ GetPreviousRawFrame<IonAccessorICFrameLayout*>(frame);
+
+ MOZ_ASSERT(accessorFrame->prevType() == JitFrame_IonJS);
+
+ returnAddressToFp_ = accessorFrame->returnAddress();
+ fp_ = GetPreviousRawFrame<uint8_t*>(accessorFrame);
+ type_ = JitFrame_IonJS;
+ return;
+ }
+
+ if (prevType == JitFrame_Entry) {
+ // No previous frame, set to null to indicate that JitFrameIterator is done()
+ returnAddressToFp_ = nullptr;
+ fp_ = nullptr;
+ type_ = JitFrame_Entry;
+ return;
+ }
+
+ MOZ_CRASH("Bad frame type.");
+}
+
+JitFrameLayout*
+InvalidationBailoutStack::fp() const
+{
+ return (JitFrameLayout*) (sp() + ionScript_->frameSize());
+}
+
+void
+InvalidationBailoutStack::checkInvariants() const
+{
+#ifdef DEBUG
+ JitFrameLayout* frame = fp();
+ CalleeToken token = frame->calleeToken();
+ MOZ_ASSERT(token);
+
+ uint8_t* rawBase = ionScript()->method()->raw();
+ uint8_t* rawLimit = rawBase + ionScript()->method()->instructionsSize();
+ uint8_t* osiPoint = osiPointReturnAddress();
+ MOZ_ASSERT(rawBase <= osiPoint && osiPoint <= rawLimit);
+#endif
+}
+
+void
+AssertJitStackInvariants(JSContext* cx)
+{
+ for (JitActivationIterator activations(cx->runtime()); !activations.done(); ++activations) {
+ JitFrameIterator frames(activations);
+ size_t prevFrameSize = 0;
+ size_t frameSize = 0;
+ bool isScriptedCallee = false;
+ for (; !frames.done(); ++frames) {
+ size_t calleeFp = reinterpret_cast<size_t>(frames.fp());
+ size_t callerFp = reinterpret_cast<size_t>(frames.prevFp());
+ MOZ_ASSERT(callerFp >= calleeFp);
+ prevFrameSize = frameSize;
+ frameSize = callerFp - calleeFp;
+
+ if (frames.prevType() == JitFrame_Rectifier) {
+ MOZ_RELEASE_ASSERT(frameSize % JitStackAlignment == 0,
+ "The rectifier frame should keep the alignment");
+
+ size_t expectedFrameSize = 0
+#if defined(JS_CODEGEN_X86)
+ + sizeof(void*) /* frame pointer */
+#endif
+ + sizeof(Value) * (frames.callee()->nargs() +
+ 1 /* |this| argument */ +
+ frames.isConstructing() /* new.target */)
+ + sizeof(JitFrameLayout);
+ MOZ_RELEASE_ASSERT(frameSize >= expectedFrameSize,
+ "The frame is large enough to hold all arguments");
+ MOZ_RELEASE_ASSERT(expectedFrameSize + JitStackAlignment > frameSize,
+ "The frame size is optimal");
+ }
+
+ if (frames.isExitFrame()) {
+ // For the moment, we do not keep the JitStackAlignment
+ // alignment for exit frames.
+ frameSize -= ExitFrameLayout::Size();
+ }
+
+ if (frames.isIonJS()) {
+ // Ideally, we should not have such requirement, but keep the
+ // alignment-delta as part of the Safepoint such that we can pad
+ // accordingly when making out-of-line calls. In the mean time,
+ // let us have check-points where we can garantee that
+ // everything can properly be aligned before adding complexity.
+ MOZ_RELEASE_ASSERT(frames.ionScript()->frameSize() % JitStackAlignment == 0,
+ "Ensure that if the Ion frame is aligned, then the spill base is also aligned");
+
+ if (isScriptedCallee) {
+ MOZ_RELEASE_ASSERT(prevFrameSize % JitStackAlignment == 0,
+ "The ion frame should keep the alignment");
+ }
+ }
+
+ // The stack is dynamically aligned by baseline stubs before calling
+ // any jitted code.
+ if (frames.prevType() == JitFrame_BaselineStub && isScriptedCallee) {
+ MOZ_RELEASE_ASSERT(calleeFp % JitStackAlignment == 0,
+ "The baseline stub restores the stack alignment");
+ }
+
+ isScriptedCallee = false
+ || frames.isScripted()
+ || frames.type() == JitFrame_Rectifier;
+ }
+
+ MOZ_RELEASE_ASSERT(frames.type() == JitFrame_Entry,
+ "The first frame of a Jit activation should be an entry frame");
+ MOZ_RELEASE_ASSERT(reinterpret_cast<size_t>(frames.fp()) % JitStackAlignment == 0,
+ "The entry frame should be properly aligned");
+ }
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/JitFrames.h b/js/src/jit/JitFrames.h
new file mode 100644
index 000000000..e8faf9787
--- /dev/null
+++ b/js/src/jit/JitFrames.h
@@ -0,0 +1,1044 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitFrames_h
+#define jit_JitFrames_h
+
+#include <stdint.h>
+
+#include "jscntxt.h"
+#include "jsfun.h"
+
+#include "jit/JitFrameIterator.h"
+#include "jit/Safepoints.h"
+
+namespace js {
+namespace jit {
+
+enum CalleeTokenTag
+{
+ CalleeToken_Function = 0x0, // untagged
+ CalleeToken_FunctionConstructing = 0x1,
+ CalleeToken_Script = 0x2
+};
+
+static const uintptr_t CalleeTokenMask = ~uintptr_t(0x3);
+
+static inline CalleeTokenTag
+GetCalleeTokenTag(CalleeToken token)
+{
+ CalleeTokenTag tag = CalleeTokenTag(uintptr_t(token) & 0x3);
+ MOZ_ASSERT(tag <= CalleeToken_Script);
+ return tag;
+}
+static inline CalleeToken
+CalleeToToken(JSFunction* fun, bool constructing)
+{
+ CalleeTokenTag tag = constructing ? CalleeToken_FunctionConstructing : CalleeToken_Function;
+ return CalleeToken(uintptr_t(fun) | uintptr_t(tag));
+}
+static inline CalleeToken
+CalleeToToken(JSScript* script)
+{
+ return CalleeToken(uintptr_t(script) | uintptr_t(CalleeToken_Script));
+}
+static inline bool
+CalleeTokenIsFunction(CalleeToken token)
+{
+ CalleeTokenTag tag = GetCalleeTokenTag(token);
+ return tag == CalleeToken_Function || tag == CalleeToken_FunctionConstructing;
+}
+static inline bool
+CalleeTokenIsConstructing(CalleeToken token)
+{
+ return GetCalleeTokenTag(token) == CalleeToken_FunctionConstructing;
+}
+static inline JSFunction*
+CalleeTokenToFunction(CalleeToken token)
+{
+ MOZ_ASSERT(CalleeTokenIsFunction(token));
+ return (JSFunction*)(uintptr_t(token) & CalleeTokenMask);
+}
+static inline JSScript*
+CalleeTokenToScript(CalleeToken token)
+{
+ MOZ_ASSERT(GetCalleeTokenTag(token) == CalleeToken_Script);
+ return (JSScript*)(uintptr_t(token) & CalleeTokenMask);
+}
+static inline bool
+CalleeTokenIsModuleScript(CalleeToken token)
+{
+ CalleeTokenTag tag = GetCalleeTokenTag(token);
+ return tag == CalleeToken_Script && CalleeTokenToScript(token)->module();
+}
+
+static inline JSScript*
+ScriptFromCalleeToken(CalleeToken token)
+{
+ switch (GetCalleeTokenTag(token)) {
+ case CalleeToken_Script:
+ return CalleeTokenToScript(token);
+ case CalleeToken_Function:
+ case CalleeToken_FunctionConstructing:
+ return CalleeTokenToFunction(token)->nonLazyScript();
+ }
+ MOZ_CRASH("invalid callee token tag");
+}
+
+// In between every two frames lies a small header describing both frames. This
+// header, minimally, contains a returnAddress word and a descriptor word. The
+// descriptor describes the size and type of the previous frame, whereas the
+// returnAddress describes the address the newer frame (the callee) will return
+// to. The exact mechanism in which frames are laid out is architecture
+// dependent.
+//
+// Two special frame types exist. Entry frames begin an ion activation, and
+// therefore there is exactly one per activation of jit::Cannon. Exit frames
+// are necessary to leave JIT code and enter C++, and thus, C++ code will
+// always begin iterating from the topmost exit frame.
+
+class LSafepoint;
+
+// Two-tuple that lets you look up the safepoint entry given the
+// displacement of a call instruction within the JIT code.
+class SafepointIndex
+{
+ // The displacement is the distance from the first byte of the JIT'd code
+ // to the return address (of the call that the safepoint was generated for).
+ uint32_t displacement_;
+
+ union {
+ LSafepoint* safepoint_;
+
+ // Offset to the start of the encoded safepoint in the safepoint stream.
+ uint32_t safepointOffset_;
+ };
+
+#ifdef DEBUG
+ bool resolved;
+#endif
+
+ public:
+ SafepointIndex(uint32_t displacement, LSafepoint* safepoint)
+ : displacement_(displacement),
+ safepoint_(safepoint)
+#ifdef DEBUG
+ , resolved(false)
+#endif
+ { }
+
+ void resolve();
+
+ LSafepoint* safepoint() {
+ MOZ_ASSERT(!resolved);
+ return safepoint_;
+ }
+ uint32_t displacement() const {
+ return displacement_;
+ }
+ uint32_t safepointOffset() const {
+ return safepointOffset_;
+ }
+ void adjustDisplacement(uint32_t offset) {
+ MOZ_ASSERT(offset >= displacement_);
+ displacement_ = offset;
+ }
+ inline SnapshotOffset snapshotOffset() const;
+ inline bool hasSnapshotOffset() const;
+};
+
+class MacroAssembler;
+// The OSI point is patched to a call instruction. Therefore, the
+// returnPoint for an OSI call is the address immediately following that
+// call instruction. The displacement of that point within the assembly
+// buffer is the |returnPointDisplacement|.
+class OsiIndex
+{
+ uint32_t callPointDisplacement_;
+ uint32_t snapshotOffset_;
+
+ public:
+ OsiIndex(uint32_t callPointDisplacement, uint32_t snapshotOffset)
+ : callPointDisplacement_(callPointDisplacement),
+ snapshotOffset_(snapshotOffset)
+ { }
+
+ uint32_t returnPointDisplacement() const;
+ uint32_t callPointDisplacement() const {
+ return callPointDisplacement_;
+ }
+ uint32_t snapshotOffset() const {
+ return snapshotOffset_;
+ }
+};
+
+// The layout of an Ion frame on the C stack is roughly:
+// argN _
+// ... \ - These are jsvals
+// arg0 /
+// -3 this _/
+// -2 callee
+// -1 descriptor
+// 0 returnAddress
+// .. locals ..
+
+// The descriptor is organized into four sections:
+// [ frame size | has cached saved frame bit | frame header size | frame type ]
+// < highest - - - - - - - - - - - - - - lowest >
+static const uintptr_t FRAMETYPE_BITS = 4;
+static const uintptr_t FRAME_HEADER_SIZE_SHIFT = FRAMETYPE_BITS;
+static const uintptr_t FRAME_HEADER_SIZE_BITS = 3;
+static const uintptr_t FRAME_HEADER_SIZE_MASK = (1 << FRAME_HEADER_SIZE_BITS) - 1;
+static const uintptr_t HASCACHEDSAVEDFRAME_BIT = 1 << (FRAMETYPE_BITS + FRAME_HEADER_SIZE_BITS);
+static const uintptr_t FRAMESIZE_SHIFT = FRAMETYPE_BITS +
+ FRAME_HEADER_SIZE_BITS +
+ 1 /* cached saved frame bit */;
+static const uintptr_t FRAMESIZE_BITS = 32 - FRAMESIZE_SHIFT;
+static const uintptr_t FRAMESIZE_MASK = (1 << FRAMESIZE_BITS) - 1;
+
+// Ion frames have a few important numbers associated with them:
+// Local depth: The number of bytes required to spill local variables.
+// Argument depth: The number of bytes required to push arguments and make
+// a function call.
+// Slack: A frame may temporarily use extra stack to resolve cycles.
+//
+// The (local + argument) depth determines the "fixed frame size". The fixed
+// frame size is the distance between the stack pointer and the frame header.
+// Thus, fixed >= (local + argument).
+//
+// In order to compress guards, we create shared jump tables that recover the
+// script from the stack and recover a snapshot pointer based on which jump was
+// taken. Thus, we create a jump table for each fixed frame size.
+//
+// Jump tables are big. To control the amount of jump tables we generate, each
+// platform chooses how to segregate stack size classes based on its
+// architecture.
+//
+// On some architectures, these jump tables are not used at all, or frame
+// size segregation is not needed. Thus, there is an option for a frame to not
+// have any frame size class, and to be totally dynamic.
+static const uint32_t NO_FRAME_SIZE_CLASS_ID = uint32_t(-1);
+
+class FrameSizeClass
+{
+ uint32_t class_;
+
+ explicit FrameSizeClass(uint32_t class_) : class_(class_)
+ { }
+
+ public:
+ FrameSizeClass()
+ { }
+
+ static FrameSizeClass None() {
+ return FrameSizeClass(NO_FRAME_SIZE_CLASS_ID);
+ }
+ static FrameSizeClass FromClass(uint32_t class_) {
+ return FrameSizeClass(class_);
+ }
+
+ // These functions are implemented in specific CodeGenerator-* files.
+ static FrameSizeClass FromDepth(uint32_t frameDepth);
+ static FrameSizeClass ClassLimit();
+ uint32_t frameSize() const;
+
+ uint32_t classId() const {
+ MOZ_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID);
+ return class_;
+ }
+
+ bool operator ==(const FrameSizeClass& other) const {
+ return class_ == other.class_;
+ }
+ bool operator !=(const FrameSizeClass& other) const {
+ return class_ != other.class_;
+ }
+};
+
+struct BaselineBailoutInfo;
+
+// Data needed to recover from an exception.
+struct ResumeFromException
+{
+ static const uint32_t RESUME_ENTRY_FRAME = 0;
+ static const uint32_t RESUME_CATCH = 1;
+ static const uint32_t RESUME_FINALLY = 2;
+ static const uint32_t RESUME_FORCED_RETURN = 3;
+ static const uint32_t RESUME_BAILOUT = 4;
+
+ uint8_t* framePointer;
+ uint8_t* stackPointer;
+ uint8_t* target;
+ uint32_t kind;
+
+ // Value to push when resuming into a |finally| block.
+ Value exception;
+
+ BaselineBailoutInfo* bailoutInfo;
+};
+
+void HandleException(ResumeFromException* rfe);
+
+void EnsureBareExitFrame(JSContext* cx, JitFrameLayout* frame);
+
+void MarkJitActivations(JSRuntime* rt, JSTracer* trc);
+
+JSCompartment*
+TopmostIonActivationCompartment(JSRuntime* rt);
+
+void UpdateJitActivationsForMinorGC(JSRuntime* rt, JSTracer* trc);
+
+static inline uint32_t
+EncodeFrameHeaderSize(size_t headerSize)
+{
+ MOZ_ASSERT((headerSize % sizeof(uintptr_t)) == 0);
+
+ uint32_t headerSizeWords = headerSize / sizeof(uintptr_t);
+ MOZ_ASSERT(headerSizeWords <= FRAME_HEADER_SIZE_MASK);
+ return headerSizeWords;
+}
+
+static inline uint32_t
+MakeFrameDescriptor(uint32_t frameSize, FrameType type, uint32_t headerSize)
+{
+ MOZ_ASSERT(headerSize < FRAMESIZE_MASK);
+ headerSize = EncodeFrameHeaderSize(headerSize);
+ return 0 | (frameSize << FRAMESIZE_SHIFT) | (headerSize << FRAME_HEADER_SIZE_SHIFT) | type;
+}
+
+// Returns the JSScript associated with the topmost JIT frame.
+inline JSScript*
+GetTopJitJSScript(JSContext* cx)
+{
+ JitFrameIterator iter(cx);
+ MOZ_ASSERT(iter.type() == JitFrame_Exit);
+ ++iter;
+
+ if (iter.isBaselineStub()) {
+ ++iter;
+ MOZ_ASSERT(iter.isBaselineJS());
+ }
+
+ MOZ_ASSERT(iter.isScripted());
+ return iter.script();
+}
+
+#ifdef JS_CODEGEN_MIPS32
+uint8_t* alignDoubleSpillWithOffset(uint8_t* pointer, int32_t offset);
+#else
+inline uint8_t*
+alignDoubleSpillWithOffset(uint8_t* pointer, int32_t offset)
+{
+ // This is NO-OP on non-MIPS platforms.
+ return pointer;
+}
+#endif
+
+// Layout of the frame prefix. This assumes the stack architecture grows down.
+// If this is ever not the case, we'll have to refactor.
+class CommonFrameLayout
+{
+ uint8_t* returnAddress_;
+ uintptr_t descriptor_;
+
+ static const uintptr_t FrameTypeMask = (1 << FRAMETYPE_BITS) - 1;
+
+ public:
+ static size_t offsetOfDescriptor() {
+ return offsetof(CommonFrameLayout, descriptor_);
+ }
+ uintptr_t descriptor() const {
+ return descriptor_;
+ }
+ static size_t offsetOfReturnAddress() {
+ return offsetof(CommonFrameLayout, returnAddress_);
+ }
+ FrameType prevType() const {
+ return FrameType(descriptor_ & FrameTypeMask);
+ }
+ void changePrevType(FrameType type) {
+ descriptor_ &= ~FrameTypeMask;
+ descriptor_ |= type;
+ }
+ size_t prevFrameLocalSize() const {
+ return descriptor_ >> FRAMESIZE_SHIFT;
+ }
+ size_t headerSize() const {
+ return sizeof(uintptr_t) *
+ ((descriptor_ >> FRAME_HEADER_SIZE_SHIFT) & FRAME_HEADER_SIZE_MASK);
+ }
+ bool hasCachedSavedFrame() const {
+ return descriptor_ & HASCACHEDSAVEDFRAME_BIT;
+ }
+ void setHasCachedSavedFrame() {
+ descriptor_ |= HASCACHEDSAVEDFRAME_BIT;
+ }
+ uint8_t* returnAddress() const {
+ return returnAddress_;
+ }
+ void setReturnAddress(uint8_t* addr) {
+ returnAddress_ = addr;
+ }
+};
+
+class JitFrameLayout : public CommonFrameLayout
+{
+ CalleeToken calleeToken_;
+ uintptr_t numActualArgs_;
+
+ public:
+ CalleeToken calleeToken() const {
+ return calleeToken_;
+ }
+ void replaceCalleeToken(CalleeToken calleeToken) {
+ calleeToken_ = calleeToken;
+ }
+
+ static size_t offsetOfCalleeToken() {
+ return offsetof(JitFrameLayout, calleeToken_);
+ }
+ static size_t offsetOfNumActualArgs() {
+ return offsetof(JitFrameLayout, numActualArgs_);
+ }
+ static size_t offsetOfThis() {
+ return sizeof(JitFrameLayout);
+ }
+ static size_t offsetOfEvalNewTarget() {
+ return sizeof(JitFrameLayout);
+ }
+ static size_t offsetOfActualArgs() {
+ return offsetOfThis() + sizeof(Value);
+ }
+ static size_t offsetOfActualArg(size_t arg) {
+ return offsetOfActualArgs() + arg * sizeof(Value);
+ }
+
+ Value thisv() {
+ MOZ_ASSERT(CalleeTokenIsFunction(calleeToken()));
+ return argv()[0];
+ }
+ Value* argv() {
+ MOZ_ASSERT(CalleeTokenIsFunction(calleeToken()));
+ return (Value*)(this + 1);
+ }
+ uintptr_t numActualArgs() const {
+ return numActualArgs_;
+ }
+
+ // Computes a reference to a stack or argument slot, where a slot is a
+ // distance from the base frame pointer, as would be used for LStackSlot
+ // or LArgument.
+ uintptr_t* slotRef(SafepointSlotEntry where);
+
+ static inline size_t Size() {
+ return sizeof(JitFrameLayout);
+ }
+};
+
+// this is the layout of the frame that is used when we enter Ion code from platform ABI code
+class EntryFrameLayout : public JitFrameLayout
+{
+ public:
+ static inline size_t Size() {
+ return sizeof(EntryFrameLayout);
+ }
+};
+
+class RectifierFrameLayout : public JitFrameLayout
+{
+ public:
+ static inline size_t Size() {
+ return sizeof(RectifierFrameLayout);
+ }
+};
+
+class IonAccessorICFrameLayout : public CommonFrameLayout
+{
+ protected:
+ // Pointer to root the stub's JitCode.
+ JitCode* stubCode_;
+
+ public:
+ JitCode** stubCode() {
+ return &stubCode_;
+ }
+ static size_t Size() {
+ return sizeof(IonAccessorICFrameLayout);
+ }
+};
+
+// GC related data used to keep alive data surrounding the Exit frame.
+class ExitFooterFrame
+{
+ const VMFunction* function_;
+ JitCode* jitCode_;
+
+ public:
+ static inline size_t Size() {
+ return sizeof(ExitFooterFrame);
+ }
+ inline JitCode* jitCode() const {
+ return jitCode_;
+ }
+ inline JitCode** addressOfJitCode() {
+ return &jitCode_;
+ }
+ inline const VMFunction* function() const {
+ return function_;
+ }
+
+ // This should only be called for function()->outParam == Type_Handle
+ template <typename T>
+ T* outParam() {
+ uint8_t* address = reinterpret_cast<uint8_t*>(this);
+ address = alignDoubleSpillWithOffset(address, sizeof(intptr_t));
+ return reinterpret_cast<T*>(address - sizeof(T));
+ }
+};
+
+class NativeExitFrameLayout;
+class IonOOLNativeExitFrameLayout;
+class IonOOLPropertyOpExitFrameLayout;
+class IonOOLProxyExitFrameLayout;
+class IonDOMExitFrameLayout;
+
+enum ExitFrameTokenValues
+{
+ CallNativeExitFrameLayoutToken = 0x0,
+ ConstructNativeExitFrameLayoutToken = 0x1,
+ IonDOMExitFrameLayoutGetterToken = 0x2,
+ IonDOMExitFrameLayoutSetterToken = 0x3,
+ IonDOMMethodExitFrameLayoutToken = 0x4,
+ IonOOLNativeExitFrameLayoutToken = 0x5,
+ IonOOLPropertyOpExitFrameLayoutToken = 0x6,
+ IonOOLSetterOpExitFrameLayoutToken = 0x7,
+ IonOOLProxyExitFrameLayoutToken = 0x8,
+ LazyLinkExitFrameLayoutToken = 0xFE,
+ ExitFrameLayoutBareToken = 0xFF
+};
+
+// this is the frame layout when we are exiting ion code, and about to enter platform ABI code
+class ExitFrameLayout : public CommonFrameLayout
+{
+ inline uint8_t* top() {
+ return reinterpret_cast<uint8_t*>(this + 1);
+ }
+
+ public:
+ // Pushed for "bare" fake exit frames that have no GC things on stack to be
+ // marked.
+ static JitCode* BareToken() { return (JitCode*)ExitFrameLayoutBareToken; }
+
+ static inline size_t Size() {
+ return sizeof(ExitFrameLayout);
+ }
+ static inline size_t SizeWithFooter() {
+ return Size() + ExitFooterFrame::Size();
+ }
+
+ inline ExitFooterFrame* footer() {
+ uint8_t* sp = reinterpret_cast<uint8_t*>(this);
+ return reinterpret_cast<ExitFooterFrame*>(sp - ExitFooterFrame::Size());
+ }
+
+ // argBase targets the point which precedes the exit frame. Arguments of VM
+ // each wrapper are pushed before the exit frame. This correspond exactly
+ // to the value of the argBase register of the generateVMWrapper function.
+ inline uint8_t* argBase() {
+ MOZ_ASSERT(footer()->jitCode() != nullptr);
+ return top();
+ }
+
+ inline bool isWrapperExit() {
+ return footer()->function() != nullptr;
+ }
+ inline bool isBareExit() {
+ return footer()->jitCode() == BareToken();
+ }
+
+ // See the various exit frame layouts below.
+ template <typename T> inline bool is() {
+ return footer()->jitCode() == T::Token();
+ }
+ template <typename T> inline T* as() {
+ MOZ_ASSERT(this->is<T>());
+ return reinterpret_cast<T*>(footer());
+ }
+};
+
+// Cannot inherit implementation since we need to extend the top of
+// ExitFrameLayout.
+class NativeExitFrameLayout
+{
+ protected: // only to silence a clang warning about unused private fields
+ ExitFooterFrame footer_;
+ ExitFrameLayout exit_;
+ uintptr_t argc_;
+
+ // We need to split the Value into 2 fields of 32 bits, otherwise the C++
+ // compiler may add some padding between the fields.
+ uint32_t loCalleeResult_;
+ uint32_t hiCalleeResult_;
+
+ public:
+ static inline size_t Size() {
+ return sizeof(NativeExitFrameLayout);
+ }
+
+ static size_t offsetOfResult() {
+ return offsetof(NativeExitFrameLayout, loCalleeResult_);
+ }
+ inline Value* vp() {
+ return reinterpret_cast<Value*>(&loCalleeResult_);
+ }
+ inline uintptr_t argc() const {
+ return argc_;
+ }
+};
+
+class CallNativeExitFrameLayout : public NativeExitFrameLayout
+{
+ public:
+ static JitCode* Token() { return (JitCode*)CallNativeExitFrameLayoutToken; }
+};
+
+class ConstructNativeExitFrameLayout : public NativeExitFrameLayout
+{
+ public:
+ static JitCode* Token() { return (JitCode*)ConstructNativeExitFrameLayoutToken; }
+};
+
+template<>
+inline bool
+ExitFrameLayout::is<NativeExitFrameLayout>()
+{
+ return is<CallNativeExitFrameLayout>() || is<ConstructNativeExitFrameLayout>();
+}
+
+class IonOOLNativeExitFrameLayout
+{
+ protected: // only to silence a clang warning about unused private fields
+ ExitFooterFrame footer_;
+ ExitFrameLayout exit_;
+
+ // pointer to root the stub's JitCode
+ JitCode* stubCode_;
+
+ uintptr_t argc_;
+
+ // We need to split the Value into 2 fields of 32 bits, otherwise the C++
+ // compiler may add some padding between the fields.
+ uint32_t loCalleeResult_;
+ uint32_t hiCalleeResult_;
+
+ // Split Value for |this| and args above.
+ uint32_t loThis_;
+ uint32_t hiThis_;
+
+ public:
+ static JitCode* Token() { return (JitCode*)IonOOLNativeExitFrameLayoutToken; }
+
+ static inline size_t Size(size_t argc) {
+ // The frame accounts for the callee/result and |this|, so we only need args.
+ return sizeof(IonOOLNativeExitFrameLayout) + (argc * sizeof(Value));
+ }
+
+ static size_t offsetOfResult() {
+ return offsetof(IonOOLNativeExitFrameLayout, loCalleeResult_);
+ }
+
+ inline JitCode** stubCode() {
+ return &stubCode_;
+ }
+ inline Value* vp() {
+ return reinterpret_cast<Value*>(&loCalleeResult_);
+ }
+ inline Value* thisp() {
+ return reinterpret_cast<Value*>(&loThis_);
+ }
+ inline uintptr_t argc() const {
+ return argc_;
+ }
+};
+
+class IonOOLPropertyOpExitFrameLayout
+{
+ protected:
+ ExitFooterFrame footer_;
+ ExitFrameLayout exit_;
+
+ // Object for HandleObject
+ JSObject* obj_;
+
+ // id for HandleId
+ jsid id_;
+
+ // space for MutableHandleValue result
+ // use two uint32_t so compiler doesn't align.
+ uint32_t vp0_;
+ uint32_t vp1_;
+
+ // pointer to root the stub's JitCode
+ JitCode* stubCode_;
+
+ public:
+ static JitCode* Token() { return (JitCode*)IonOOLPropertyOpExitFrameLayoutToken; }
+
+ static inline size_t Size() {
+ return sizeof(IonOOLPropertyOpExitFrameLayout);
+ }
+
+ static size_t offsetOfObject() {
+ return offsetof(IonOOLPropertyOpExitFrameLayout, obj_);
+ }
+
+ static size_t offsetOfId() {
+ return offsetof(IonOOLPropertyOpExitFrameLayout, id_);
+ }
+
+ static size_t offsetOfResult() {
+ return offsetof(IonOOLPropertyOpExitFrameLayout, vp0_);
+ }
+
+ inline JitCode** stubCode() {
+ return &stubCode_;
+ }
+ inline Value* vp() {
+ return reinterpret_cast<Value*>(&vp0_);
+ }
+ inline jsid* id() {
+ return &id_;
+ }
+ inline JSObject** obj() {
+ return &obj_;
+ }
+};
+
+class IonOOLSetterOpExitFrameLayout : public IonOOLPropertyOpExitFrameLayout
+{
+ protected: // only to silence a clang warning about unused private fields
+ JS::ObjectOpResult result_;
+
+ public:
+ static JitCode* Token() { return (JitCode*)IonOOLSetterOpExitFrameLayoutToken; }
+
+ static size_t offsetOfObjectOpResult() {
+ return offsetof(IonOOLSetterOpExitFrameLayout, result_);
+ }
+
+ static size_t Size() {
+ return sizeof(IonOOLSetterOpExitFrameLayout);
+ }
+};
+
+// ProxyGetProperty(JSContext* cx, HandleObject proxy, HandleId id, MutableHandleValue vp)
+// ProxyCallProperty(JSContext* cx, HandleObject proxy, HandleId id, MutableHandleValue vp)
+// ProxySetProperty(JSContext* cx, HandleObject proxy, HandleId id, MutableHandleValue vp,
+// bool strict)
+class IonOOLProxyExitFrameLayout
+{
+ protected: // only to silence a clang warning about unused private fields
+ ExitFooterFrame footer_;
+ ExitFrameLayout exit_;
+
+ // The proxy object.
+ JSObject* proxy_;
+
+ // id for HandleId
+ jsid id_;
+
+ // space for MutableHandleValue result
+ // use two uint32_t so compiler doesn't align.
+ uint32_t vp0_;
+ uint32_t vp1_;
+
+ // pointer to root the stub's JitCode
+ JitCode* stubCode_;
+
+ public:
+ static JitCode* Token() { return (JitCode*)IonOOLProxyExitFrameLayoutToken; }
+
+ static inline size_t Size() {
+ return sizeof(IonOOLProxyExitFrameLayout);
+ }
+
+ static size_t offsetOfResult() {
+ return offsetof(IonOOLProxyExitFrameLayout, vp0_);
+ }
+
+ inline JitCode** stubCode() {
+ return &stubCode_;
+ }
+ inline Value* vp() {
+ return reinterpret_cast<Value*>(&vp0_);
+ }
+ inline jsid* id() {
+ return &id_;
+ }
+ inline JSObject** proxy() {
+ return &proxy_;
+ }
+};
+
+class IonDOMExitFrameLayout
+{
+ protected: // only to silence a clang warning about unused private fields
+ ExitFooterFrame footer_;
+ ExitFrameLayout exit_;
+ JSObject* thisObj;
+
+ // We need to split the Value into 2 fields of 32 bits, otherwise the C++
+ // compiler may add some padding between the fields.
+ uint32_t loCalleeResult_;
+ uint32_t hiCalleeResult_;
+
+ public:
+ static JitCode* GetterToken() { return (JitCode*)IonDOMExitFrameLayoutGetterToken; }
+ static JitCode* SetterToken() { return (JitCode*)IonDOMExitFrameLayoutSetterToken; }
+
+ static inline size_t Size() {
+ return sizeof(IonDOMExitFrameLayout);
+ }
+
+ static size_t offsetOfResult() {
+ return offsetof(IonDOMExitFrameLayout, loCalleeResult_);
+ }
+ inline Value* vp() {
+ return reinterpret_cast<Value*>(&loCalleeResult_);
+ }
+ inline JSObject** thisObjAddress() {
+ return &thisObj;
+ }
+ inline bool isMethodFrame();
+};
+
+struct IonDOMMethodExitFrameLayoutTraits;
+
+class IonDOMMethodExitFrameLayout
+{
+ protected: // only to silence a clang warning about unused private fields
+ ExitFooterFrame footer_;
+ ExitFrameLayout exit_;
+ // This must be the last thing pushed, so as to stay common with
+ // IonDOMExitFrameLayout.
+ JSObject* thisObj_;
+ Value* argv_;
+ uintptr_t argc_;
+
+ // We need to split the Value into 2 fields of 32 bits, otherwise the C++
+ // compiler may add some padding between the fields.
+ uint32_t loCalleeResult_;
+ uint32_t hiCalleeResult_;
+
+ friend struct IonDOMMethodExitFrameLayoutTraits;
+
+ public:
+ static JitCode* Token() { return (JitCode*)IonDOMMethodExitFrameLayoutToken; }
+
+ static inline size_t Size() {
+ return sizeof(IonDOMMethodExitFrameLayout);
+ }
+
+ static size_t offsetOfResult() {
+ return offsetof(IonDOMMethodExitFrameLayout, loCalleeResult_);
+ }
+
+ inline Value* vp() {
+ // The code in visitCallDOMNative depends on this static assert holding
+ JS_STATIC_ASSERT(offsetof(IonDOMMethodExitFrameLayout, loCalleeResult_) ==
+ (offsetof(IonDOMMethodExitFrameLayout, argc_) + sizeof(uintptr_t)));
+ return reinterpret_cast<Value*>(&loCalleeResult_);
+ }
+ inline JSObject** thisObjAddress() {
+ return &thisObj_;
+ }
+ inline uintptr_t argc() {
+ return argc_;
+ }
+};
+
+inline bool
+IonDOMExitFrameLayout::isMethodFrame()
+{
+ return footer_.jitCode() == IonDOMMethodExitFrameLayout::Token();
+}
+
+template <>
+inline bool
+ExitFrameLayout::is<IonDOMExitFrameLayout>()
+{
+ JitCode* code = footer()->jitCode();
+ return
+ code == IonDOMExitFrameLayout::GetterToken() ||
+ code == IonDOMExitFrameLayout::SetterToken() ||
+ code == IonDOMMethodExitFrameLayout::Token();
+}
+
+template <>
+inline IonDOMExitFrameLayout*
+ExitFrameLayout::as<IonDOMExitFrameLayout>()
+{
+ MOZ_ASSERT(is<IonDOMExitFrameLayout>());
+ return reinterpret_cast<IonDOMExitFrameLayout*>(footer());
+}
+
+struct IonDOMMethodExitFrameLayoutTraits {
+ static const size_t offsetOfArgcFromArgv =
+ offsetof(IonDOMMethodExitFrameLayout, argc_) -
+ offsetof(IonDOMMethodExitFrameLayout, argv_);
+};
+
+// Cannot inherit implementation since we need to extend the top of
+// ExitFrameLayout.
+class LazyLinkExitFrameLayout
+{
+ protected: // silence clang warning about unused private fields
+ JitCode* stubCode_;
+ ExitFooterFrame footer_;
+ JitFrameLayout exit_;
+
+ public:
+ static JitCode* Token() { return (JitCode*) LazyLinkExitFrameLayoutToken; }
+
+ static inline size_t Size() {
+ return sizeof(LazyLinkExitFrameLayout);
+ }
+
+ inline JitCode** stubCode() {
+ return &stubCode_;
+ }
+ inline JitFrameLayout* jsFrame() {
+ return &exit_;
+ }
+ static size_t offsetOfExitFrame() {
+ return offsetof(LazyLinkExitFrameLayout, exit_);
+ }
+};
+
+template <>
+inline LazyLinkExitFrameLayout*
+ExitFrameLayout::as<LazyLinkExitFrameLayout>()
+{
+ MOZ_ASSERT(is<LazyLinkExitFrameLayout>());
+ uint8_t* sp = reinterpret_cast<uint8_t*>(this);
+ sp -= LazyLinkExitFrameLayout::offsetOfExitFrame();
+ return reinterpret_cast<LazyLinkExitFrameLayout*>(sp);
+}
+
+class ICStub;
+
+class JitStubFrameLayout : public CommonFrameLayout
+{
+ // Info on the stack
+ //
+ // --------------------
+ // |JitStubFrameLayout|
+ // +------------------+
+ // | - Descriptor | => Marks end of JitFrame_IonJS
+ // | - returnaddres |
+ // +------------------+
+ // | - StubPtr | => First thing pushed in a stub only when the stub will do
+ // -------------------- a vmcall. Else we cannot have JitStubFrame. But technically
+ // not a member of the layout.
+
+ public:
+ static size_t Size() {
+ return sizeof(JitStubFrameLayout);
+ }
+
+ static inline int reverseOffsetOfStubPtr() {
+ return -int(sizeof(void*));
+ }
+
+ inline ICStub* maybeStubPtr() {
+ uint8_t* fp = reinterpret_cast<uint8_t*>(this);
+ return *reinterpret_cast<ICStub**>(fp + reverseOffsetOfStubPtr());
+ }
+};
+
+class BaselineStubFrameLayout : public JitStubFrameLayout
+{
+ // Info on the stack
+ //
+ // -------------------------
+ // |BaselineStubFrameLayout|
+ // +-----------------------+
+ // | - Descriptor | => Marks end of JitFrame_BaselineJS
+ // | - returnaddres |
+ // +-----------------------+
+ // | - StubPtr | => First thing pushed in a stub only when the stub will do
+ // +-----------------------+ a vmcall. Else we cannot have BaselineStubFrame.
+ // | - FramePtr | => Baseline stubs also need to push the frame ptr when doing
+ // ------------------------- a vmcall.
+ // Technically these last two variables are not part of the
+ // layout.
+
+ public:
+ static inline size_t Size() {
+ return sizeof(BaselineStubFrameLayout);
+ }
+
+ static inline int reverseOffsetOfSavedFramePtr() {
+ return -int(2 * sizeof(void*));
+ }
+
+ void* reverseSavedFramePtr() {
+ uint8_t* addr = ((uint8_t*) this) + reverseOffsetOfSavedFramePtr();
+ return *(void**)addr;
+ }
+
+ inline void setStubPtr(ICStub* stub) {
+ uint8_t* fp = reinterpret_cast<uint8_t*>(this);
+ *reinterpret_cast<ICStub**>(fp + reverseOffsetOfStubPtr()) = stub;
+ }
+};
+
+// An invalidation bailout stack is at the stack pointer for the callee frame.
+class InvalidationBailoutStack
+{
+ RegisterDump::FPUArray fpregs_;
+ RegisterDump::GPRArray regs_;
+ IonScript* ionScript_;
+ uint8_t* osiPointReturnAddress_;
+
+ public:
+ uint8_t* sp() const {
+ return (uint8_t*) this + sizeof(InvalidationBailoutStack);
+ }
+ JitFrameLayout* fp() const;
+ MachineState machine() {
+ return MachineState::FromBailout(regs_, fpregs_);
+ }
+
+ IonScript* ionScript() const {
+ return ionScript_;
+ }
+ uint8_t* osiPointReturnAddress() const {
+ return osiPointReturnAddress_;
+ }
+ static size_t offsetOfFpRegs() {
+ return offsetof(InvalidationBailoutStack, fpregs_);
+ }
+ static size_t offsetOfRegs() {
+ return offsetof(InvalidationBailoutStack, regs_);
+ }
+
+ void checkInvariants() const;
+};
+
+void
+GetPcScript(JSContext* cx, JSScript** scriptRes, jsbytecode** pcRes);
+
+CalleeToken
+MarkCalleeToken(JSTracer* trc, CalleeToken token);
+
+// Baseline requires one slot for this/argument type checks.
+static const uint32_t MinJITStackSize = 1;
+
+} /* namespace jit */
+} /* namespace js */
+
+#endif /* jit_JitFrames_h */
diff --git a/js/src/jit/JitOptions.cpp b/js/src/jit/JitOptions.cpp
new file mode 100644
index 000000000..eb5a6c1c2
--- /dev/null
+++ b/js/src/jit/JitOptions.cpp
@@ -0,0 +1,298 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/JitOptions.h"
+#include "mozilla/TypeTraits.h"
+
+#include <cstdlib>
+#include "jsfun.h"
+using namespace js;
+using namespace js::jit;
+
+using mozilla::Maybe;
+
+namespace js {
+namespace jit {
+
+DefaultJitOptions JitOptions;
+
+static void Warn(const char* env, const char* value)
+{
+ fprintf(stderr, "Warning: I didn't understand %s=\"%s\"\n", env, value);
+}
+
+template<typename T> struct IsBool : mozilla::FalseType {};
+template<> struct IsBool<bool> : mozilla::TrueType {};
+
+static Maybe<int>
+ParseInt(const char* str)
+{
+ char* endp;
+ int retval = strtol(str, &endp, 0);
+ if (*endp == '\0')
+ return mozilla::Some(retval);
+ return mozilla::Nothing();
+}
+
+template<typename T>
+T overrideDefault(const char* param, T dflt) {
+ char* str = getenv(param);
+ if (!str)
+ return dflt;
+ if (IsBool<T>::value) {
+ if (strcmp(str, "true") == 0 || strcmp(str, "yes") == 0)
+ return true;
+ if (strcmp(str, "false") == 0 || strcmp(str, "no") == 0)
+ return false;
+ Warn(param, str);
+ } else {
+ Maybe<int> value = ParseInt(str);
+ if (value.isSome())
+ return value.ref();
+ Warn(param, str);
+ }
+ return dflt;
+}
+
+#define SET_DEFAULT(var, dflt) var = overrideDefault("JIT_OPTION_" #var, dflt)
+DefaultJitOptions::DefaultJitOptions()
+{
+ // Whether to perform expensive graph-consistency DEBUG-only assertions.
+ // It can be useful to disable this to reduce DEBUG-compile time of large
+ // wasm programs.
+ SET_DEFAULT(checkGraphConsistency, true);
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ // Emit extra code to verify live regs at the start of a VM call
+ // are not modified before its OsiPoint.
+ SET_DEFAULT(checkOsiPointRegisters, false);
+#endif
+
+ // Whether to enable extra code to perform dynamic validation of
+ // RangeAnalysis results.
+ SET_DEFAULT(checkRangeAnalysis, false);
+
+ // Toggles whether IonBuilder fallbacks to a call if we fail to inline.
+ SET_DEFAULT(disableInlineBacktracking, true);
+
+ // Toggles whether Alignment Mask Analysis is globally disabled.
+ SET_DEFAULT(disableAma, false);
+
+ // Toggles whether Effective Address Analysis is globally disabled.
+ SET_DEFAULT(disableEaa, false);
+
+ // Toggle whether eager simd unboxing is globally disabled.
+ SET_DEFAULT(disableEagerSimdUnbox, false);
+
+ // Toggles whether Edge Case Analysis is gobally disabled.
+ SET_DEFAULT(disableEdgeCaseAnalysis, false);
+
+ // Toggles whether to use flow sensitive Alias Analysis.
+ SET_DEFAULT(disableFlowAA, true);
+
+ // Toggle whether global value numbering is globally disabled.
+ SET_DEFAULT(disableGvn, false);
+
+ // Toggles whether inlining is globally disabled.
+ SET_DEFAULT(disableInlining, false);
+
+ // Toggles whether loop invariant code motion is globally disabled.
+ SET_DEFAULT(disableLicm, false);
+
+ // Toggles whether Loop Unrolling is globally disabled.
+ SET_DEFAULT(disableLoopUnrolling, true);
+
+ // Toggle whether Profile Guided Optimization is globally disabled.
+ SET_DEFAULT(disablePgo, false);
+
+ // Toggles whether instruction reordering is globally disabled.
+ SET_DEFAULT(disableInstructionReordering, false);
+
+ // Toggles whether Range Analysis is globally disabled.
+ SET_DEFAULT(disableRangeAnalysis, false);
+
+ // Toggles wheter Recover instructions is globally disabled.
+ SET_DEFAULT(disableRecoverIns, false);
+
+ // Toggle whether eager scalar replacement is globally disabled.
+ SET_DEFAULT(disableScalarReplacement, false);
+
+ // Toggles whether CacheIR stubs are used.
+ SET_DEFAULT(disableCacheIR, false);
+
+ // Toggles whether shared stubs are used in Ionmonkey.
+ SET_DEFAULT(disableSharedStubs, false);
+
+ // Toggles whether sincos optimization is globally disabled.
+ // See bug984018: The MacOS is the only one that has the sincos fast.
+ #if defined(XP_MACOSX)
+ SET_DEFAULT(disableSincos, false);
+ #else
+ SET_DEFAULT(disableSincos, true);
+ #endif
+
+ // Toggles whether sink code motion is globally disabled.
+ SET_DEFAULT(disableSink, true);
+
+ // Whether functions are compiled immediately.
+ SET_DEFAULT(eagerCompilation, false);
+
+ // Whether IonBuilder should prefer IC generation above specialized MIR.
+ SET_DEFAULT(forceInlineCaches, false);
+
+ // Toggles whether large scripts are rejected.
+ SET_DEFAULT(limitScriptSize, true);
+
+ // Toggles whether functions may be entered at loop headers.
+ SET_DEFAULT(osr, true);
+
+ // Whether to enable extra code to perform dynamic validations.
+ SET_DEFAULT(runExtraChecks, false);
+
+ // How many invocations or loop iterations are needed before functions
+ // are compiled with the baseline compiler.
+ SET_DEFAULT(baselineWarmUpThreshold, 10);
+
+ // Number of exception bailouts (resuming into catch/finally block) before
+ // we invalidate and forbid Ion compilation.
+ SET_DEFAULT(exceptionBailoutThreshold, 10);
+
+ // Number of bailouts without invalidation before we set
+ // JSScript::hadFrequentBailouts and invalidate.
+ SET_DEFAULT(frequentBailoutThreshold, 10);
+
+ // How many actual arguments are accepted on the C stack.
+ SET_DEFAULT(maxStackArgs, 4096);
+
+ // How many times we will try to enter a script via OSR before
+ // invalidating the script.
+ SET_DEFAULT(osrPcMismatchesBeforeRecompile, 6000);
+
+ // The bytecode length limit for small function.
+ SET_DEFAULT(smallFunctionMaxBytecodeLength_, 130);
+
+ // An artificial testing limit for the maximum supported offset of
+ // pc-relative jump and call instructions.
+ SET_DEFAULT(jumpThreshold, UINT32_MAX);
+
+ // Branch pruning heuristic is based on a scoring system, which is look at
+ // different metrics and provide a score. The score is computed as a
+ // projection where each factor defines the weight of each metric. Then this
+ // score is compared against a threshold to prevent a branch from being
+ // removed.
+ SET_DEFAULT(branchPruningHitCountFactor, 1);
+ SET_DEFAULT(branchPruningInstFactor, 10);
+ SET_DEFAULT(branchPruningBlockSpanFactor, 100);
+ SET_DEFAULT(branchPruningEffectfulInstFactor, 3500);
+ SET_DEFAULT(branchPruningThreshold, 4000);
+
+ // Force how many invocation or loop iterations are needed before compiling
+ // a function with the highest ionmonkey optimization level.
+ // (i.e. OptimizationLevel_Normal)
+ const char* forcedDefaultIonWarmUpThresholdEnv = "JIT_OPTION_forcedDefaultIonWarmUpThreshold";
+ if (const char* env = getenv(forcedDefaultIonWarmUpThresholdEnv)) {
+ Maybe<int> value = ParseInt(env);
+ if (value.isSome())
+ forcedDefaultIonWarmUpThreshold.emplace(value.ref());
+ else
+ Warn(forcedDefaultIonWarmUpThresholdEnv, env);
+ }
+
+ // Same but for compiling small functions.
+ const char* forcedDefaultIonSmallFunctionWarmUpThresholdEnv =
+ "JIT_OPTION_forcedDefaultIonSmallFunctionWarmUpThreshold";
+ if (const char* env = getenv(forcedDefaultIonSmallFunctionWarmUpThresholdEnv)) {
+ Maybe<int> value = ParseInt(env);
+ if (value.isSome())
+ forcedDefaultIonSmallFunctionWarmUpThreshold.emplace(value.ref());
+ else
+ Warn(forcedDefaultIonSmallFunctionWarmUpThresholdEnv, env);
+ }
+
+ // Force the used register allocator instead of letting the optimization
+ // pass decide.
+ const char* forcedRegisterAllocatorEnv = "JIT_OPTION_forcedRegisterAllocator";
+ if (const char* env = getenv(forcedRegisterAllocatorEnv)) {
+ forcedRegisterAllocator = LookupRegisterAllocator(env);
+ if (!forcedRegisterAllocator.isSome())
+ Warn(forcedRegisterAllocatorEnv, env);
+ }
+
+ // Toggles whether unboxed plain objects can be created by the VM.
+ SET_DEFAULT(disableUnboxedObjects, false);
+
+ // Test whether Atomics are allowed in asm.js code.
+ SET_DEFAULT(asmJSAtomicsEnable, false);
+
+ // Test whether wasm int64 / double NaN bits testing is enabled.
+ SET_DEFAULT(wasmTestMode, false);
+
+ // Test whether wasm bounds check should always be generated.
+ SET_DEFAULT(wasmAlwaysCheckBounds, false);
+
+ // Toggles the optimization whereby offsets are folded into loads and not
+ // included in the bounds check.
+ SET_DEFAULT(wasmFoldOffsets, true);
+
+ // Determines whether we suppress using signal handlers
+ // for interrupting jit-ed code. This is used only for testing.
+ SET_DEFAULT(ionInterruptWithoutSignals, false);
+}
+
+bool
+DefaultJitOptions::isSmallFunction(JSScript* script) const
+{
+ return script->length() <= smallFunctionMaxBytecodeLength_;
+}
+
+void
+DefaultJitOptions::enableGvn(bool enable)
+{
+ disableGvn = !enable;
+}
+
+void
+DefaultJitOptions::setEagerCompilation()
+{
+ eagerCompilation = true;
+ baselineWarmUpThreshold = 0;
+ forcedDefaultIonWarmUpThreshold.reset();
+ forcedDefaultIonWarmUpThreshold.emplace(0);
+ forcedDefaultIonSmallFunctionWarmUpThreshold.reset();
+ forcedDefaultIonSmallFunctionWarmUpThreshold.emplace(0);
+}
+
+void
+DefaultJitOptions::setCompilerWarmUpThreshold(uint32_t warmUpThreshold)
+{
+ forcedDefaultIonWarmUpThreshold.reset();
+ forcedDefaultIonWarmUpThreshold.emplace(warmUpThreshold);
+ forcedDefaultIonSmallFunctionWarmUpThreshold.reset();
+ forcedDefaultIonSmallFunctionWarmUpThreshold.emplace(warmUpThreshold);
+
+ // Undo eager compilation
+ if (eagerCompilation && warmUpThreshold != 0) {
+ jit::DefaultJitOptions defaultValues;
+ eagerCompilation = false;
+ baselineWarmUpThreshold = defaultValues.baselineWarmUpThreshold;
+ }
+}
+
+void
+DefaultJitOptions::resetCompilerWarmUpThreshold()
+{
+ forcedDefaultIonWarmUpThreshold.reset();
+
+ // Undo eager compilation
+ if (eagerCompilation) {
+ jit::DefaultJitOptions defaultValues;
+ eagerCompilation = false;
+ baselineWarmUpThreshold = defaultValues.baselineWarmUpThreshold;
+ }
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/JitOptions.h b/js/src/jit/JitOptions.h
new file mode 100644
index 000000000..076980b4e
--- /dev/null
+++ b/js/src/jit/JitOptions.h
@@ -0,0 +1,110 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitOptions_h
+#define jit_JitOptions_h
+
+#include "mozilla/Maybe.h"
+
+#include "jit/IonTypes.h"
+#include "js/TypeDecls.h"
+
+namespace js {
+namespace jit {
+
+// Longer scripts can only be compiled off thread, as these compilations
+// can be expensive and stall the main thread for too long.
+static const uint32_t MAX_MAIN_THREAD_SCRIPT_SIZE = 2 * 1000;
+static const uint32_t MAX_MAIN_THREAD_LOCALS_AND_ARGS = 256;
+
+// Possible register allocators which may be used.
+enum IonRegisterAllocator {
+ RegisterAllocator_Backtracking,
+ RegisterAllocator_Testbed,
+ RegisterAllocator_Stupid
+};
+
+static inline mozilla::Maybe<IonRegisterAllocator>
+LookupRegisterAllocator(const char* name)
+{
+ if (!strcmp(name, "backtracking"))
+ return mozilla::Some(RegisterAllocator_Backtracking);
+ if (!strcmp(name, "testbed"))
+ return mozilla::Some(RegisterAllocator_Testbed);
+ if (!strcmp(name, "stupid"))
+ return mozilla::Some(RegisterAllocator_Stupid);
+ return mozilla::Nothing();
+}
+
+struct DefaultJitOptions
+{
+ bool checkGraphConsistency;
+#ifdef CHECK_OSIPOINT_REGISTERS
+ bool checkOsiPointRegisters;
+#endif
+ bool checkRangeAnalysis;
+ bool runExtraChecks;
+ bool disableInlineBacktracking;
+ bool disableAma;
+ bool disableEaa;
+ bool disableEagerSimdUnbox;
+ bool disableEdgeCaseAnalysis;
+ bool disableFlowAA;
+ bool disableGvn;
+ bool disableInlining;
+ bool disableLicm;
+ bool disableLoopUnrolling;
+ bool disablePgo;
+ bool disableInstructionReordering;
+ bool disableRangeAnalysis;
+ bool disableRecoverIns;
+ bool disableScalarReplacement;
+ bool disableCacheIR;
+ bool disableSharedStubs;
+ bool disableSincos;
+ bool disableSink;
+ bool eagerCompilation;
+ bool forceInlineCaches;
+ bool limitScriptSize;
+ bool osr;
+ bool asmJSAtomicsEnable;
+ bool wasmTestMode;
+ bool wasmAlwaysCheckBounds;
+ bool wasmFoldOffsets;
+ bool ionInterruptWithoutSignals;
+ uint32_t baselineWarmUpThreshold;
+ uint32_t exceptionBailoutThreshold;
+ uint32_t frequentBailoutThreshold;
+ uint32_t maxStackArgs;
+ uint32_t osrPcMismatchesBeforeRecompile;
+ uint32_t smallFunctionMaxBytecodeLength_;
+ uint32_t jumpThreshold;
+ uint32_t branchPruningHitCountFactor;
+ uint32_t branchPruningInstFactor;
+ uint32_t branchPruningBlockSpanFactor;
+ uint32_t branchPruningEffectfulInstFactor;
+ uint32_t branchPruningThreshold;
+ mozilla::Maybe<uint32_t> forcedDefaultIonWarmUpThreshold;
+ mozilla::Maybe<uint32_t> forcedDefaultIonSmallFunctionWarmUpThreshold;
+ mozilla::Maybe<IonRegisterAllocator> forcedRegisterAllocator;
+
+ // The options below affect the rest of the VM, and not just the JIT.
+ bool disableUnboxedObjects;
+
+ DefaultJitOptions();
+ bool isSmallFunction(JSScript* script) const;
+ void setEagerCompilation();
+ void setCompilerWarmUpThreshold(uint32_t warmUpThreshold);
+ void resetCompilerWarmUpThreshold();
+ void enableGvn(bool val);
+};
+
+extern DefaultJitOptions JitOptions;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JitOptions_h */
diff --git a/js/src/jit/JitSpewer.cpp b/js/src/jit/JitSpewer.cpp
new file mode 100644
index 000000000..b939f5ea2
--- /dev/null
+++ b/js/src/jit/JitSpewer.cpp
@@ -0,0 +1,679 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifdef JS_JITSPEW
+
+#include "jit/JitSpewer.h"
+
+#include "mozilla/Atomics.h"
+
+#if defined(XP_WIN)
+# include <windows.h>
+#else
+# include <unistd.h>
+#endif
+
+#include "jsprf.h"
+
+#include "jit/Ion.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+
+#include "threading/LockGuard.h"
+
+#include "vm/HelperThreads.h"
+#include "vm/MutexIDs.h"
+
+#ifndef JIT_SPEW_DIR
+# if defined(_WIN32)
+# define JIT_SPEW_DIR ""
+# elif defined(__ANDROID__)
+# define JIT_SPEW_DIR "/data/local/tmp/"
+# else
+# define JIT_SPEW_DIR "/tmp/"
+# endif
+#endif
+
+using namespace js;
+using namespace js::jit;
+
+class IonSpewer
+{
+ private:
+ Mutex outputLock_;
+ Fprinter c1Output_;
+ Fprinter jsonOutput_;
+ bool firstFunction_;
+ bool asyncLogging_;
+ bool inited_;
+
+ void release();
+
+ public:
+ IonSpewer()
+ : outputLock_(mutexid::IonSpewer),
+ firstFunction_(false),
+ asyncLogging_(false),
+ inited_(false)
+ { }
+
+ // File output is terminated safely upon destruction.
+ ~IonSpewer();
+
+ bool init();
+ bool isEnabled() {
+ return inited_;
+ }
+ void setAsyncLogging(bool incremental) {
+ asyncLogging_ = incremental;
+ }
+ bool getAsyncLogging() {
+ return asyncLogging_;
+ }
+
+ void beginFunction();
+ void spewPass(GraphSpewer* gs);
+ void endFunction(GraphSpewer* gs);
+};
+
+// IonSpewer singleton.
+static IonSpewer ionspewer;
+
+static bool LoggingChecked = false;
+static_assert(JitSpew_Terminator <= 64, "Increase the size of the LoggingBits global.");
+static uint64_t LoggingBits = 0;
+static mozilla::Atomic<uint32_t, mozilla::Relaxed> filteredOutCompilations(0);
+
+static const char * const ChannelNames[] =
+{
+#define JITSPEW_CHANNEL(name) #name,
+ JITSPEW_CHANNEL_LIST(JITSPEW_CHANNEL)
+#undef JITSPEW_CHANNEL
+};
+
+static size_t ChannelIndentLevel[] =
+{
+#define JITSPEW_CHANNEL(name) 0,
+ JITSPEW_CHANNEL_LIST(JITSPEW_CHANNEL)
+#undef JITSPEW_CHANNEL
+};
+
+static bool
+FilterContainsLocation(JSScript* function)
+{
+ static const char* filter = getenv("IONFILTER");
+
+ // If there is no filter we accept all outputs.
+ if (!filter || !filter[0])
+ return true;
+
+ // Disable wasm output when filter is set.
+ if (!function)
+ return false;
+
+ const char* filename = function->filename();
+ const size_t line = function->lineno();
+ const size_t filelen = strlen(filename);
+ const char* index = strstr(filter, filename);
+ while (index) {
+ if (index == filter || index[-1] == ',') {
+ if (index[filelen] == 0 || index[filelen] == ',')
+ return true;
+ if (index[filelen] == ':' && line != size_t(-1)) {
+ size_t read_line = strtoul(&index[filelen + 1], nullptr, 10);
+ if (read_line == line)
+ return true;
+ }
+ }
+ index = strstr(index + filelen, filename);
+ }
+ return false;
+}
+
+void
+jit::EnableIonDebugSyncLogging()
+{
+ ionspewer.init();
+ ionspewer.setAsyncLogging(false);
+ EnableChannel(JitSpew_IonSyncLogs);
+}
+
+void
+jit::EnableIonDebugAsyncLogging()
+{
+ ionspewer.init();
+ ionspewer.setAsyncLogging(true);
+}
+
+void
+IonSpewer::release()
+{
+ if (c1Output_.isInitialized())
+ c1Output_.finish();
+ if (jsonOutput_.isInitialized())
+ jsonOutput_.finish();
+ inited_ = false;
+}
+
+bool
+IonSpewer::init()
+{
+ if (inited_)
+ return true;
+
+ const size_t bufferLength = 256;
+ char c1Buffer[bufferLength];
+ char jsonBuffer[bufferLength];
+ const char *c1Filename = JIT_SPEW_DIR "/ion.cfg";
+ const char *jsonFilename = JIT_SPEW_DIR "/ion.json";
+
+ const char* usePid = getenv("ION_SPEW_BY_PID");
+ if (usePid && *usePid != 0) {
+#if defined(XP_WIN)
+ size_t pid = GetCurrentProcessId();
+#else
+ size_t pid = getpid();
+#endif
+
+ size_t len;
+ len = snprintf(jsonBuffer, bufferLength, JIT_SPEW_DIR "/ion%" PRIuSIZE ".json", pid);
+ if (bufferLength <= len) {
+ fprintf(stderr, "Warning: IonSpewer::init: Cannot serialize file name.");
+ return false;
+ }
+ jsonFilename = jsonBuffer;
+
+ len = snprintf(c1Buffer, bufferLength, JIT_SPEW_DIR "/ion%" PRIuSIZE ".cfg", pid);
+ if (bufferLength <= len) {
+ fprintf(stderr, "Warning: IonSpewer::init: Cannot serialize file name.");
+ return false;
+ }
+ c1Filename = c1Buffer;
+ }
+
+ if (!c1Output_.init(c1Filename) ||
+ !jsonOutput_.init(jsonFilename))
+ {
+ release();
+ return false;
+ }
+
+ jsonOutput_.printf("{\n \"functions\": [\n");
+ firstFunction_ = true;
+
+ inited_ = true;
+ return true;
+}
+
+void
+IonSpewer::beginFunction()
+{
+ // If we are doing a synchronous logging then we spew everything as we go,
+ // as this is useful in case of failure during the compilation. On the other
+ // hand, it is recommended to disabled off main thread compilation.
+ if (!getAsyncLogging() && !firstFunction_) {
+ LockGuard<Mutex> guard(outputLock_);
+ jsonOutput_.put(","); // separate functions
+ }
+}
+
+void
+IonSpewer::spewPass(GraphSpewer* gs)
+{
+ if (!getAsyncLogging()) {
+ LockGuard<Mutex> guard(outputLock_);
+ gs->dump(c1Output_, jsonOutput_);
+ }
+}
+
+void
+IonSpewer::endFunction(GraphSpewer* gs)
+{
+ LockGuard<Mutex> guard(outputLock_);
+ if (getAsyncLogging() && !firstFunction_)
+ jsonOutput_.put(","); // separate functions
+
+ gs->dump(c1Output_, jsonOutput_);
+ firstFunction_ = false;
+}
+
+IonSpewer::~IonSpewer()
+{
+ if (!inited_)
+ return;
+
+ jsonOutput_.printf("\n]}\n");
+ release();
+}
+
+GraphSpewer::GraphSpewer(TempAllocator *alloc)
+ : graph_(nullptr),
+ c1Printer_(alloc->lifoAlloc()),
+ jsonPrinter_(alloc->lifoAlloc()),
+ c1Spewer_(c1Printer_),
+ jsonSpewer_(jsonPrinter_)
+{
+}
+
+void
+GraphSpewer::init(MIRGraph* graph, JSScript* function)
+{
+ MOZ_ASSERT(!isSpewing());
+ if (!ionspewer.isEnabled())
+ return;
+
+ if (!FilterContainsLocation(function)) {
+ // filter out logs during the compilation.
+ filteredOutCompilations++;
+ MOZ_ASSERT(!isSpewing());
+ return;
+ }
+
+ graph_ = graph;
+ MOZ_ASSERT(isSpewing());
+}
+
+void
+GraphSpewer::beginFunction(JSScript* function)
+{
+ if (!isSpewing())
+ return;
+
+ c1Spewer_.beginFunction(graph_, function);
+ jsonSpewer_.beginFunction(function);
+
+ ionspewer.beginFunction();
+}
+
+void
+GraphSpewer::spewPass(const char* pass)
+{
+ if (!isSpewing())
+ return;
+
+ c1Spewer_.spewPass(pass);
+
+ jsonSpewer_.beginPass(pass);
+ jsonSpewer_.spewMIR(graph_);
+ jsonSpewer_.spewLIR(graph_);
+ jsonSpewer_.endPass();
+
+ ionspewer.spewPass(this);
+
+ // As this function is used for debugging, we ignore any of the previous
+ // failures and ensure there is enough ballast space, such that we do not
+ // exhaust the ballast space before running the next phase.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!graph_->alloc().ensureBallast())
+ oomUnsafe.crash("Could not ensure enough ballast space after spewing graph information.");
+}
+
+void
+GraphSpewer::spewPass(const char* pass, BacktrackingAllocator* ra)
+{
+ if (!isSpewing())
+ return;
+
+ c1Spewer_.spewPass(pass);
+ c1Spewer_.spewRanges(pass, ra);
+
+ jsonSpewer_.beginPass(pass);
+ jsonSpewer_.spewMIR(graph_);
+ jsonSpewer_.spewLIR(graph_);
+ jsonSpewer_.spewRanges(ra);
+ jsonSpewer_.endPass();
+
+ ionspewer.spewPass(this);
+}
+
+void
+GraphSpewer::endFunction()
+{
+ if (!ionspewer.isEnabled())
+ return;
+
+ if (!isSpewing()) {
+ MOZ_ASSERT(filteredOutCompilations != 0);
+ filteredOutCompilations--;
+ return;
+ }
+
+ c1Spewer_.endFunction();
+ jsonSpewer_.endFunction();
+
+ ionspewer.endFunction(this);
+ graph_ = nullptr;
+}
+
+void
+GraphSpewer::dump(Fprinter& c1Out, Fprinter& jsonOut)
+{
+ if (!c1Printer_.hadOutOfMemory()) {
+ c1Printer_.exportInto(c1Out);
+ c1Out.flush();
+ }
+ c1Printer_.clear();
+
+ if (!jsonPrinter_.hadOutOfMemory())
+ jsonPrinter_.exportInto(jsonOut);
+ else
+ jsonOut.put("{}");
+ jsonOut.flush();
+ jsonPrinter_.clear();
+}
+
+void
+jit::SpewBeginFunction(MIRGenerator* mir, JSScript* function)
+{
+ MIRGraph* graph = &mir->graph();
+ mir->graphSpewer().init(graph, function);
+ mir->graphSpewer().beginFunction(function);
+}
+
+AutoSpewEndFunction::~AutoSpewEndFunction()
+{
+ mir_->graphSpewer().endFunction();
+}
+
+Fprinter&
+jit::JitSpewPrinter()
+{
+ static Fprinter out;
+ return out;
+}
+
+
+static bool
+ContainsFlag(const char* str, const char* flag)
+{
+ size_t flaglen = strlen(flag);
+ const char* index = strstr(str, flag);
+ while (index) {
+ if ((index == str || index[-1] == ',') && (index[flaglen] == 0 || index[flaglen] == ','))
+ return true;
+ index = strstr(index + flaglen, flag);
+ }
+ return false;
+}
+
+void
+jit::CheckLogging()
+{
+ if (LoggingChecked)
+ return;
+ LoggingChecked = true;
+ const char* env = getenv("IONFLAGS");
+ if (!env)
+ return;
+ if (strstr(env, "help")) {
+ fflush(nullptr);
+ printf(
+ "\n"
+ "usage: IONFLAGS=option,option,option,... where options can be:\n"
+ "\n"
+ " aborts Compilation abort messages\n"
+ " scripts Compiled scripts\n"
+ " mir MIR information\n"
+ " prune Prune unused branches\n"
+ " escape Escape analysis\n"
+ " alias Alias analysis\n"
+ " alias-sum Alias analysis: shows summaries for every block\n"
+ " gvn Global Value Numbering\n"
+ " licm Loop invariant code motion\n"
+ " flac Fold linear arithmetic constants\n"
+ " eaa Effective address analysis\n"
+ " sincos Replace sin/cos by sincos\n"
+ " sink Sink transformation\n"
+ " regalloc Register allocation\n"
+ " inline Inlining\n"
+ " snapshots Snapshot information\n"
+ " codegen Native code generation\n"
+ " bailouts Bailouts\n"
+ " caches Inline caches\n"
+ " osi Invalidation\n"
+ " safepoints Safepoints\n"
+ " pools Literal Pools (ARM only for now)\n"
+ " cacheflush Instruction Cache flushes (ARM only for now)\n"
+ " range Range Analysis\n"
+ " unroll Loop unrolling\n"
+ " logs C1 and JSON visualization logging\n"
+ " logs-sync Same as logs, but flushes between each pass (sync. compiled functions only).\n"
+ " profiling Profiling-related information\n"
+ " trackopts Optimization tracking information\n"
+ " dump-mir-expr Dump the MIR expressions\n"
+ " all Everything\n"
+ "\n"
+ " bl-aborts Baseline compiler abort messages\n"
+ " bl-scripts Baseline script-compilation\n"
+ " bl-op Baseline compiler detailed op-specific messages\n"
+ " bl-ic Baseline inline-cache messages\n"
+ " bl-ic-fb Baseline IC fallback stub messages\n"
+ " bl-osr Baseline IC OSR messages\n"
+ " bl-bails Baseline bailouts\n"
+ " bl-dbg-osr Baseline debug mode on stack recompile messages\n"
+ " bl-all All baseline spew\n"
+ "\n"
+ );
+ exit(0);
+ /*NOTREACHED*/
+ }
+ if (ContainsFlag(env, "aborts"))
+ EnableChannel(JitSpew_IonAbort);
+ if (ContainsFlag(env, "prune"))
+ EnableChannel(JitSpew_Prune);
+ if (ContainsFlag(env, "escape"))
+ EnableChannel(JitSpew_Escape);
+ if (ContainsFlag(env, "alias"))
+ EnableChannel(JitSpew_Alias);
+ if (ContainsFlag(env, "alias-sum"))
+ EnableChannel(JitSpew_AliasSummaries);
+ if (ContainsFlag(env, "scripts"))
+ EnableChannel(JitSpew_IonScripts);
+ if (ContainsFlag(env, "mir"))
+ EnableChannel(JitSpew_IonMIR);
+ if (ContainsFlag(env, "gvn"))
+ EnableChannel(JitSpew_GVN);
+ if (ContainsFlag(env, "range"))
+ EnableChannel(JitSpew_Range);
+ if (ContainsFlag(env, "unroll"))
+ EnableChannel(JitSpew_Unrolling);
+ if (ContainsFlag(env, "licm"))
+ EnableChannel(JitSpew_LICM);
+ if (ContainsFlag(env, "flac"))
+ EnableChannel(JitSpew_FLAC);
+ if (ContainsFlag(env, "eaa"))
+ EnableChannel(JitSpew_EAA);
+ if (ContainsFlag(env, "sincos"))
+ EnableChannel(JitSpew_Sincos);
+ if (ContainsFlag(env, "sink"))
+ EnableChannel(JitSpew_Sink);
+ if (ContainsFlag(env, "regalloc"))
+ EnableChannel(JitSpew_RegAlloc);
+ if (ContainsFlag(env, "inline"))
+ EnableChannel(JitSpew_Inlining);
+ if (ContainsFlag(env, "snapshots"))
+ EnableChannel(JitSpew_IonSnapshots);
+ if (ContainsFlag(env, "codegen"))
+ EnableChannel(JitSpew_Codegen);
+ if (ContainsFlag(env, "bailouts"))
+ EnableChannel(JitSpew_IonBailouts);
+ if (ContainsFlag(env, "osi"))
+ EnableChannel(JitSpew_IonInvalidate);
+ if (ContainsFlag(env, "caches"))
+ EnableChannel(JitSpew_IonIC);
+ if (ContainsFlag(env, "safepoints"))
+ EnableChannel(JitSpew_Safepoints);
+ if (ContainsFlag(env, "pools"))
+ EnableChannel(JitSpew_Pools);
+ if (ContainsFlag(env, "cacheflush"))
+ EnableChannel(JitSpew_CacheFlush);
+ if (ContainsFlag(env, "logs"))
+ EnableIonDebugAsyncLogging();
+ if (ContainsFlag(env, "logs-sync"))
+ EnableIonDebugSyncLogging();
+ if (ContainsFlag(env, "profiling"))
+ EnableChannel(JitSpew_Profiling);
+ if (ContainsFlag(env, "trackopts"))
+ EnableChannel(JitSpew_OptimizationTracking);
+ if (ContainsFlag(env, "dump-mir-expr"))
+ EnableChannel(JitSpew_MIRExpressions);
+ if (ContainsFlag(env, "all"))
+ LoggingBits = uint64_t(-1);
+
+ if (ContainsFlag(env, "bl-aborts"))
+ EnableChannel(JitSpew_BaselineAbort);
+ if (ContainsFlag(env, "bl-scripts"))
+ EnableChannel(JitSpew_BaselineScripts);
+ if (ContainsFlag(env, "bl-op"))
+ EnableChannel(JitSpew_BaselineOp);
+ if (ContainsFlag(env, "bl-ic"))
+ EnableChannel(JitSpew_BaselineIC);
+ if (ContainsFlag(env, "bl-ic-fb"))
+ EnableChannel(JitSpew_BaselineICFallback);
+ if (ContainsFlag(env, "bl-osr"))
+ EnableChannel(JitSpew_BaselineOSR);
+ if (ContainsFlag(env, "bl-bails"))
+ EnableChannel(JitSpew_BaselineBailouts);
+ if (ContainsFlag(env, "bl-dbg-osr"))
+ EnableChannel(JitSpew_BaselineDebugModeOSR);
+ if (ContainsFlag(env, "bl-all")) {
+ EnableChannel(JitSpew_BaselineAbort);
+ EnableChannel(JitSpew_BaselineScripts);
+ EnableChannel(JitSpew_BaselineOp);
+ EnableChannel(JitSpew_BaselineIC);
+ EnableChannel(JitSpew_BaselineICFallback);
+ EnableChannel(JitSpew_BaselineOSR);
+ EnableChannel(JitSpew_BaselineBailouts);
+ EnableChannel(JitSpew_BaselineDebugModeOSR);
+ }
+
+ JitSpewPrinter().init(stderr);
+}
+
+JitSpewIndent::JitSpewIndent(JitSpewChannel channel)
+ : channel_(channel)
+{
+ ChannelIndentLevel[channel]++;
+}
+
+JitSpewIndent::~JitSpewIndent()
+{
+ ChannelIndentLevel[channel_]--;
+}
+
+void
+jit::JitSpewStartVA(JitSpewChannel channel, const char* fmt, va_list ap)
+{
+ if (!JitSpewEnabled(channel))
+ return;
+
+ JitSpewHeader(channel);
+ Fprinter& out = JitSpewPrinter();
+ out.vprintf(fmt, ap);
+}
+
+void
+jit::JitSpewContVA(JitSpewChannel channel, const char* fmt, va_list ap)
+{
+ if (!JitSpewEnabled(channel))
+ return;
+
+ Fprinter& out = JitSpewPrinter();
+ out.vprintf(fmt, ap);
+}
+
+void
+jit::JitSpewFin(JitSpewChannel channel)
+{
+ if (!JitSpewEnabled(channel))
+ return;
+
+ Fprinter& out = JitSpewPrinter();
+ out.put("\n");
+}
+
+void
+jit::JitSpewVA(JitSpewChannel channel, const char* fmt, va_list ap)
+{
+ JitSpewStartVA(channel, fmt, ap);
+ JitSpewFin(channel);
+}
+
+void
+jit::JitSpew(JitSpewChannel channel, const char* fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ JitSpewVA(channel, fmt, ap);
+ va_end(ap);
+}
+
+void
+jit::JitSpewDef(JitSpewChannel channel, const char* str, MDefinition* def)
+{
+ if (!JitSpewEnabled(channel))
+ return;
+
+ JitSpewHeader(channel);
+ Fprinter& out = JitSpewPrinter();
+ out.put(str);
+ def->dump(out);
+ def->dumpLocation(out);
+}
+
+void
+jit::JitSpewStart(JitSpewChannel channel, const char* fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ JitSpewStartVA(channel, fmt, ap);
+ va_end(ap);
+}
+void
+jit::JitSpewCont(JitSpewChannel channel, const char* fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ JitSpewContVA(channel, fmt, ap);
+ va_end(ap);
+}
+
+void
+jit::JitSpewHeader(JitSpewChannel channel)
+{
+ if (!JitSpewEnabled(channel))
+ return;
+
+ Fprinter& out = JitSpewPrinter();
+ out.printf("[%s] ", ChannelNames[channel]);
+ for (size_t i = ChannelIndentLevel[channel]; i != 0; i--)
+ out.put(" ");
+}
+
+bool
+jit::JitSpewEnabled(JitSpewChannel channel)
+{
+ MOZ_ASSERT(LoggingChecked);
+ return (LoggingBits & (uint64_t(1) << uint32_t(channel))) && !filteredOutCompilations;
+}
+
+void
+jit::EnableChannel(JitSpewChannel channel)
+{
+ MOZ_ASSERT(LoggingChecked);
+ LoggingBits |= uint64_t(1) << uint32_t(channel);
+}
+
+void
+jit::DisableChannel(JitSpewChannel channel)
+{
+ MOZ_ASSERT(LoggingChecked);
+ LoggingBits &= ~(uint64_t(1) << uint32_t(channel));
+}
+
+#endif /* JS_JITSPEW */
+
diff --git a/js/src/jit/JitSpewer.h b/js/src/jit/JitSpewer.h
new file mode 100644
index 000000000..e187f7aa7
--- /dev/null
+++ b/js/src/jit/JitSpewer.h
@@ -0,0 +1,293 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitSpewer_h
+#define jit_JitSpewer_h
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/IntegerPrintfMacros.h"
+
+#include <stdarg.h>
+
+#include "jit/C1Spewer.h"
+#include "jit/JSONSpewer.h"
+
+#include "js/RootingAPI.h"
+
+#include "vm/Printer.h"
+
+namespace js {
+namespace jit {
+
+// New channels may be added below.
+#define JITSPEW_CHANNEL_LIST(_) \
+ /* Information during sinking */ \
+ _(Prune) \
+ /* Information during escape analysis */\
+ _(Escape) \
+ /* Information during alias analysis */ \
+ _(Alias) \
+ /* Information during alias analysis */ \
+ _(AliasSummaries) \
+ /* Information during GVN */ \
+ _(GVN) \
+ /* Information during sincos */ \
+ _(Sincos) \
+ /* Information during sinking */ \
+ _(Sink) \
+ /* Information during Range analysis */ \
+ _(Range) \
+ /* Information during loop unrolling */ \
+ _(Unrolling) \
+ /* Information during LICM */ \
+ _(LICM) \
+ /* Info about fold linear constants */ \
+ _(FLAC) \
+ /* Effective address analysis info */ \
+ _(EAA) \
+ /* Information during regalloc */ \
+ _(RegAlloc) \
+ /* Information during inlining */ \
+ _(Inlining) \
+ /* Information during codegen */ \
+ _(Codegen) \
+ /* Debug info about safepoints */ \
+ _(Safepoints) \
+ /* Debug info about Pools*/ \
+ _(Pools) \
+ /* Profiling-related information */ \
+ _(Profiling) \
+ /* Information of tracked opt strats */ \
+ _(OptimizationTracking) \
+ /* Debug info about the I$ */ \
+ _(CacheFlush) \
+ /* Output a list of MIR expressions */ \
+ _(MIRExpressions) \
+ \
+ /* BASELINE COMPILER SPEW */ \
+ \
+ /* Aborting Script Compilation. */ \
+ _(BaselineAbort) \
+ /* Script Compilation. */ \
+ _(BaselineScripts) \
+ /* Detailed op-specific spew. */ \
+ _(BaselineOp) \
+ /* Inline caches. */ \
+ _(BaselineIC) \
+ /* Inline cache fallbacks. */ \
+ _(BaselineICFallback) \
+ /* OSR from Baseline => Ion. */ \
+ _(BaselineOSR) \
+ /* Bailouts. */ \
+ _(BaselineBailouts) \
+ /* Debug Mode On Stack Recompile . */ \
+ _(BaselineDebugModeOSR) \
+ \
+ /* ION COMPILER SPEW */ \
+ \
+ /* Used to abort SSA construction */ \
+ _(IonAbort) \
+ /* Information about compiled scripts */\
+ _(IonScripts) \
+ /* Info about failing to log script */ \
+ _(IonSyncLogs) \
+ /* Information during MIR building */ \
+ _(IonMIR) \
+ /* Information during bailouts */ \
+ _(IonBailouts) \
+ /* Information during OSI */ \
+ _(IonInvalidate) \
+ /* Debug info about snapshots */ \
+ _(IonSnapshots) \
+ /* Generated inline cache stubs */ \
+ _(IonIC)
+
+enum JitSpewChannel {
+#define JITSPEW_CHANNEL(name) JitSpew_##name,
+ JITSPEW_CHANNEL_LIST(JITSPEW_CHANNEL)
+#undef JITSPEW_CHANNEL
+ JitSpew_Terminator
+};
+
+class BacktrackingAllocator;
+class MDefinition;
+class MIRGenerator;
+class MIRGraph;
+class TempAllocator;
+
+// The JitSpewer is only available on debug builds.
+// None of the global functions have effect on non-debug builds.
+static const int NULL_ID = -1;
+
+#ifdef JS_JITSPEW
+
+// Class made to hold the MIR and LIR graphs of an Wasm / Ion compilation.
+class GraphSpewer
+{
+ private:
+ MIRGraph* graph_;
+ LSprinter c1Printer_;
+ LSprinter jsonPrinter_;
+ C1Spewer c1Spewer_;
+ JSONSpewer jsonSpewer_;
+
+ public:
+ explicit GraphSpewer(TempAllocator *alloc);
+
+ bool isSpewing() const {
+ return graph_;
+ }
+ void init(MIRGraph* graph, JSScript* function);
+ void beginFunction(JSScript* function);
+ void spewPass(const char* pass);
+ void spewPass(const char* pass, BacktrackingAllocator* ra);
+ void endFunction();
+
+ void dump(Fprinter& c1, Fprinter& json);
+};
+
+void SpewBeginFunction(MIRGenerator* mir, JSScript* function);
+class AutoSpewEndFunction
+{
+ private:
+ MIRGenerator* mir_;
+
+ public:
+ explicit AutoSpewEndFunction(MIRGenerator* mir)
+ : mir_(mir)
+ { }
+ ~AutoSpewEndFunction();
+};
+
+void CheckLogging();
+Fprinter& JitSpewPrinter();
+
+class JitSpewIndent
+{
+ JitSpewChannel channel_;
+
+ public:
+ explicit JitSpewIndent(JitSpewChannel channel);
+ ~JitSpewIndent();
+};
+
+void JitSpew(JitSpewChannel channel, const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3);
+void JitSpewStart(JitSpewChannel channel, const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3);
+void JitSpewCont(JitSpewChannel channel, const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3);
+void JitSpewFin(JitSpewChannel channel);
+void JitSpewHeader(JitSpewChannel channel);
+bool JitSpewEnabled(JitSpewChannel channel);
+void JitSpewVA(JitSpewChannel channel, const char* fmt, va_list ap);
+void JitSpewStartVA(JitSpewChannel channel, const char* fmt, va_list ap);
+void JitSpewContVA(JitSpewChannel channel, const char* fmt, va_list ap);
+void JitSpewDef(JitSpewChannel channel, const char* str, MDefinition* def);
+
+void EnableChannel(JitSpewChannel channel);
+void DisableChannel(JitSpewChannel channel);
+void EnableIonDebugSyncLogging();
+void EnableIonDebugAsyncLogging();
+
+#else
+
+class GraphSpewer
+{
+ public:
+ explicit GraphSpewer(TempAllocator *alloc) { }
+
+ bool isSpewing() { return false; }
+ void init(MIRGraph* graph, JSScript* function) { }
+ void beginFunction(JSScript* function) { }
+ void spewPass(const char* pass) { }
+ void spewPass(const char* pass, BacktrackingAllocator* ra) { }
+ void endFunction() { }
+
+ void dump(Fprinter& c1, Fprinter& json) { }
+};
+
+static inline void SpewBeginFunction(MIRGenerator* mir, JSScript* function)
+{ }
+
+class AutoSpewEndFunction
+{
+ public:
+ explicit AutoSpewEndFunction(MIRGenerator* mir) { }
+ ~AutoSpewEndFunction() { }
+};
+
+static inline void CheckLogging()
+{ }
+static inline Fprinter& JitSpewPrinter()
+{
+ MOZ_CRASH("No empty backend for JitSpewPrinter");
+}
+
+class JitSpewIndent
+{
+ public:
+ explicit JitSpewIndent(JitSpewChannel channel) {}
+ ~JitSpewIndent() {}
+};
+
+// The computation of some of the argument of the spewing functions might be
+// costly, thus we use variaidic macros to ignore any argument of these
+// functions.
+static inline void JitSpewCheckArguments(JitSpewChannel channel, const char* fmt)
+{ }
+
+#define JitSpewCheckExpandedArgs(channel, fmt, ...) JitSpewCheckArguments(channel, fmt)
+#define JitSpewCheckExpandedArgs_(ArgList) JitSpewCheckExpandedArgs ArgList /* Fix MSVC issue */
+#define JitSpew(...) JitSpewCheckExpandedArgs_((__VA_ARGS__))
+#define JitSpewStart(...) JitSpewCheckExpandedArgs_((__VA_ARGS__))
+#define JitSpewCont(...) JitSpewCheckExpandedArgs_((__VA_ARGS__))
+
+static inline void JitSpewFin(JitSpewChannel channel)
+{ }
+
+static inline void JitSpewHeader(JitSpewChannel channel)
+{ }
+static inline bool JitSpewEnabled(JitSpewChannel channel)
+{ return false; }
+static inline void JitSpewVA(JitSpewChannel channel, const char* fmt, va_list ap)
+{ }
+static inline void JitSpewDef(JitSpewChannel channel, const char* str, MDefinition* def)
+{ }
+
+static inline void EnableChannel(JitSpewChannel)
+{ }
+static inline void DisableChannel(JitSpewChannel)
+{ }
+static inline void EnableIonDebugSyncLogging()
+{ }
+static inline void EnableIonDebugAsyncLogging()
+{ }
+
+#endif /* JS_JITSPEW */
+
+template <JitSpewChannel Channel>
+class AutoDisableSpew
+{
+ mozilla::DebugOnly<bool> enabled_;
+
+ public:
+ AutoDisableSpew()
+ : enabled_(JitSpewEnabled(Channel))
+ {
+ DisableChannel(Channel);
+ }
+
+ ~AutoDisableSpew()
+ {
+#ifdef JS_JITSPEW
+ if (enabled_)
+ EnableChannel(Channel);
+#endif
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JitSpewer_h */
diff --git a/js/src/jit/JitcodeMap.cpp b/js/src/jit/JitcodeMap.cpp
new file mode 100644
index 000000000..d08218520
--- /dev/null
+++ b/js/src/jit/JitcodeMap.cpp
@@ -0,0 +1,1662 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/JitcodeMap.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/SizePrintfMacros.h"
+#include "mozilla/Sprintf.h"
+
+#include <algorithm>
+
+#include "jsprf.h"
+
+#include "gc/Marking.h"
+#include "gc/Statistics.h"
+#include "jit/BaselineJIT.h"
+#include "jit/JitSpewer.h"
+#include "js/Vector.h"
+#include "vm/SPSProfiler.h"
+
+#include "jsscriptinlines.h"
+
+#include "vm/TypeInference-inl.h"
+
+using mozilla::Maybe;
+
+namespace js {
+namespace jit {
+
+static inline JitcodeRegionEntry
+RegionAtAddr(const JitcodeGlobalEntry::IonEntry& entry, void* ptr,
+ uint32_t* ptrOffset)
+{
+ MOZ_ASSERT(entry.containsPointer(ptr));
+ *ptrOffset = reinterpret_cast<uint8_t*>(ptr) -
+ reinterpret_cast<uint8_t*>(entry.nativeStartAddr());
+
+ uint32_t regionIdx = entry.regionTable()->findRegionEntry(*ptrOffset);
+ MOZ_ASSERT(regionIdx < entry.regionTable()->numRegions());
+
+ return entry.regionTable()->regionEntry(regionIdx);
+}
+
+void*
+JitcodeGlobalEntry::IonEntry::canonicalNativeAddrFor(JSRuntime* rt, void* ptr) const
+{
+ uint32_t ptrOffset;
+ JitcodeRegionEntry region = RegionAtAddr(*this, ptr, &ptrOffset);
+ return (void*)(((uint8_t*) nativeStartAddr()) + region.nativeOffset());
+}
+
+bool
+JitcodeGlobalEntry::IonEntry::callStackAtAddr(JSRuntime* rt, void* ptr,
+ BytecodeLocationVector& results,
+ uint32_t* depth) const
+{
+ uint32_t ptrOffset;
+ JitcodeRegionEntry region = RegionAtAddr(*this, ptr, &ptrOffset);
+ *depth = region.scriptDepth();
+
+ JitcodeRegionEntry::ScriptPcIterator locationIter = region.scriptPcIterator();
+ MOZ_ASSERT(locationIter.hasMore());
+ bool first = true;
+ while (locationIter.hasMore()) {
+ uint32_t scriptIdx, pcOffset;
+ locationIter.readNext(&scriptIdx, &pcOffset);
+ // For the first entry pushed (innermost frame), the pcOffset is obtained
+ // from the delta-run encodings.
+ if (first) {
+ pcOffset = region.findPcOffset(ptrOffset, pcOffset);
+ first = false;
+ }
+ JSScript* script = getScript(scriptIdx);
+ jsbytecode* pc = script->offsetToPC(pcOffset);
+ if (!results.append(BytecodeLocation(script, pc)))
+ return false;
+ }
+
+ return true;
+}
+
+uint32_t
+JitcodeGlobalEntry::IonEntry::callStackAtAddr(JSRuntime* rt, void* ptr,
+ const char** results,
+ uint32_t maxResults) const
+{
+ MOZ_ASSERT(maxResults >= 1);
+
+ uint32_t ptrOffset;
+ JitcodeRegionEntry region = RegionAtAddr(*this, ptr, &ptrOffset);
+
+ JitcodeRegionEntry::ScriptPcIterator locationIter = region.scriptPcIterator();
+ MOZ_ASSERT(locationIter.hasMore());
+ uint32_t count = 0;
+ while (locationIter.hasMore()) {
+ uint32_t scriptIdx, pcOffset;
+
+ locationIter.readNext(&scriptIdx, &pcOffset);
+ MOZ_ASSERT(getStr(scriptIdx));
+
+ results[count++] = getStr(scriptIdx);
+ if (count >= maxResults)
+ break;
+ }
+
+ return count;
+}
+
+void
+JitcodeGlobalEntry::IonEntry::youngestFrameLocationAtAddr(JSRuntime* rt, void* ptr,
+ JSScript** script, jsbytecode** pc) const
+{
+ uint32_t ptrOffset;
+ JitcodeRegionEntry region = RegionAtAddr(*this, ptr, &ptrOffset);
+
+ JitcodeRegionEntry::ScriptPcIterator locationIter = region.scriptPcIterator();
+ MOZ_ASSERT(locationIter.hasMore());
+ uint32_t scriptIdx, pcOffset;
+ locationIter.readNext(&scriptIdx, &pcOffset);
+ pcOffset = region.findPcOffset(ptrOffset, pcOffset);
+
+ *script = getScript(scriptIdx);
+ *pc = (*script)->offsetToPC(pcOffset);
+}
+
+void
+JitcodeGlobalEntry::IonEntry::destroy()
+{
+ // The region table is stored at the tail of the compacted data,
+ // which means the start of the region table is a pointer to
+ // the _middle_ of the memory space allocated for it.
+ //
+ // When freeing it, obtain the payload start pointer first.
+ if (regionTable_)
+ js_free((void*) (regionTable_->payloadStart()));
+ regionTable_ = nullptr;
+
+ // Free the scriptList strs.
+ for (uint32_t i = 0; i < scriptList_->size; i++) {
+ js_free(scriptList_->pairs[i].str);
+ scriptList_->pairs[i].str = nullptr;
+ }
+
+ // Free the script list
+ js_free(scriptList_);
+ scriptList_ = nullptr;
+
+ // The optimizations region and attempts table is in the same block of
+ // memory, the beginning of which is pointed to by
+ // optimizationsRegionTable_->payloadStart().
+ if (optsRegionTable_) {
+ MOZ_ASSERT(optsAttemptsTable_);
+ js_free((void*) optsRegionTable_->payloadStart());
+ }
+ optsRegionTable_ = nullptr;
+ optsTypesTable_ = nullptr;
+ optsAttemptsTable_ = nullptr;
+ js_delete(optsAllTypes_);
+ optsAllTypes_ = nullptr;
+}
+
+void*
+JitcodeGlobalEntry::BaselineEntry::canonicalNativeAddrFor(JSRuntime* rt, void* ptr) const
+{
+ // TODO: We can't yet normalize Baseline addresses until we unify
+ // BaselineScript's PCMappingEntries with JitcodeGlobalMap.
+ return ptr;
+}
+
+bool
+JitcodeGlobalEntry::BaselineEntry::callStackAtAddr(JSRuntime* rt, void* ptr,
+ BytecodeLocationVector& results,
+ uint32_t* depth) const
+{
+ MOZ_ASSERT(containsPointer(ptr));
+ MOZ_ASSERT(script_->hasBaselineScript());
+
+ uint8_t* addr = reinterpret_cast<uint8_t*>(ptr);
+ jsbytecode* pc = script_->baselineScript()->approximatePcForNativeAddress(script_, addr);
+ if (!results.append(BytecodeLocation(script_, pc)))
+ return false;
+
+ *depth = 1;
+
+ return true;
+}
+
+uint32_t
+JitcodeGlobalEntry::BaselineEntry::callStackAtAddr(JSRuntime* rt, void* ptr,
+ const char** results,
+ uint32_t maxResults) const
+{
+ MOZ_ASSERT(containsPointer(ptr));
+ MOZ_ASSERT(maxResults >= 1);
+
+ results[0] = str();
+ return 1;
+}
+
+void
+JitcodeGlobalEntry::BaselineEntry::youngestFrameLocationAtAddr(JSRuntime* rt, void* ptr,
+ JSScript** script,
+ jsbytecode** pc) const
+{
+ uint8_t* addr = reinterpret_cast<uint8_t*>(ptr);
+ *script = script_;
+ *pc = script_->baselineScript()->approximatePcForNativeAddress(script_, addr);
+}
+
+void
+JitcodeGlobalEntry::BaselineEntry::destroy()
+{
+ if (!str_)
+ return;
+ js_free((void*) str_);
+ str_ = nullptr;
+}
+
+static inline JitcodeGlobalEntry&
+RejoinEntry(JSRuntime* rt, const JitcodeGlobalEntry::IonCacheEntry& cache, void* ptr)
+{
+ MOZ_ASSERT(cache.containsPointer(ptr));
+
+ // There must exist an entry for the rejoin addr if this entry exists.
+ JitRuntime* jitrt = rt->jitRuntime();
+ JitcodeGlobalEntry& entry =
+ jitrt->getJitcodeGlobalTable()->lookupInfallible(cache.rejoinAddr());
+ MOZ_ASSERT(entry.isIon());
+ return entry;
+}
+
+void*
+JitcodeGlobalEntry::IonCacheEntry::canonicalNativeAddrFor(JSRuntime* rt, void* ptr) const
+{
+ return nativeStartAddr_;
+}
+
+bool
+JitcodeGlobalEntry::IonCacheEntry::callStackAtAddr(JSRuntime* rt, void* ptr,
+ BytecodeLocationVector& results,
+ uint32_t* depth) const
+{
+ const JitcodeGlobalEntry& entry = RejoinEntry(rt, *this, ptr);
+ return entry.callStackAtAddr(rt, rejoinAddr(), results, depth);
+}
+
+uint32_t
+JitcodeGlobalEntry::IonCacheEntry::callStackAtAddr(JSRuntime* rt, void* ptr,
+ const char** results,
+ uint32_t maxResults) const
+{
+ const JitcodeGlobalEntry& entry = RejoinEntry(rt, *this, ptr);
+ return entry.callStackAtAddr(rt, rejoinAddr(), results, maxResults);
+}
+
+void
+JitcodeGlobalEntry::IonCacheEntry::youngestFrameLocationAtAddr(JSRuntime* rt, void* ptr,
+ JSScript** script,
+ jsbytecode** pc) const
+{
+ const JitcodeGlobalEntry& entry = RejoinEntry(rt, *this, ptr);
+ return entry.youngestFrameLocationAtAddr(rt, rejoinAddr(), script, pc);
+}
+
+
+static int ComparePointers(const void* a, const void* b) {
+ const uint8_t* a_ptr = reinterpret_cast<const uint8_t*>(a);
+ const uint8_t* b_ptr = reinterpret_cast<const uint8_t*>(b);
+ if (a_ptr < b_ptr)
+ return -1;
+ if (a_ptr > b_ptr)
+ return 1;
+ return 0;
+}
+
+/* static */ int
+JitcodeGlobalEntry::compare(const JitcodeGlobalEntry& ent1, const JitcodeGlobalEntry& ent2)
+{
+ // Both parts of compare cannot be a query.
+ MOZ_ASSERT(!(ent1.isQuery() && ent2.isQuery()));
+
+ // Ensure no overlaps for non-query lookups.
+ MOZ_ASSERT_IF(!ent1.isQuery() && !ent2.isQuery(), !ent1.overlapsWith(ent2));
+
+ // For two non-query entries, just comapare the start addresses.
+ if (!ent1.isQuery() && !ent2.isQuery())
+ return ComparePointers(ent1.nativeStartAddr(), ent2.nativeStartAddr());
+
+ void* ptr = ent1.isQuery() ? ent1.nativeStartAddr() : ent2.nativeStartAddr();
+ const JitcodeGlobalEntry& ent = ent1.isQuery() ? ent2 : ent1;
+ int flip = ent1.isQuery() ? 1 : -1;
+
+ if (ent.startsBelowPointer(ptr)) {
+ if (ent.endsAbovePointer(ptr))
+ return 0;
+
+ // query ptr > entry
+ return flip * 1;
+ }
+
+ // query ptr < entry
+ return flip * -1;
+}
+
+/* static */ char*
+JitcodeGlobalEntry::createScriptString(JSContext* cx, JSScript* script, size_t* length)
+{
+ // If the script has a function, try calculating its name.
+ bool hasName = false;
+ size_t nameLength = 0;
+ UniqueChars nameStr;
+ JSFunction* func = script->functionDelazifying();
+ if (func && func->displayAtom()) {
+ nameStr = StringToNewUTF8CharsZ(cx, *func->displayAtom());
+ if (!nameStr)
+ return nullptr;
+
+ nameLength = strlen(nameStr.get());
+ hasName = true;
+ }
+
+ // Calculate filename length
+ const char* filenameStr = script->filename() ? script->filename() : "(null)";
+ size_t filenameLength = strlen(filenameStr);
+
+ // Calculate lineno length
+ bool hasLineno = false;
+ size_t linenoLength = 0;
+ char linenoStr[15];
+ if (hasName || (script->functionNonDelazifying() || script->isForEval())) {
+ linenoLength = SprintfLiteral(linenoStr, "%" PRIuSIZE, script->lineno());
+ hasLineno = true;
+ }
+
+ // Full profile string for scripts with functions is:
+ // FuncName (FileName:Lineno)
+ // Full profile string for scripts without functions is:
+ // FileName:Lineno
+ // Full profile string for scripts without functions and without linenos is:
+ // FileName
+
+ // Calculate full string length.
+ size_t fullLength = 0;
+ if (hasName) {
+ MOZ_ASSERT(hasLineno);
+ fullLength = nameLength + 2 + filenameLength + 1 + linenoLength + 1;
+ } else if (hasLineno) {
+ fullLength = filenameLength + 1 + linenoLength;
+ } else {
+ fullLength = filenameLength;
+ }
+
+ // Allocate string.
+ char* str = cx->pod_malloc<char>(fullLength + 1);
+ if (!str)
+ return nullptr;
+
+ size_t cur = 0;
+
+ // Fill string with func name if needed.
+ if (hasName) {
+ memcpy(str + cur, nameStr.get(), nameLength);
+ cur += nameLength;
+ str[cur++] = ' ';
+ str[cur++] = '(';
+ }
+
+ // Fill string with filename chars.
+ memcpy(str + cur, filenameStr, filenameLength);
+ cur += filenameLength;
+
+ // Fill lineno chars.
+ if (hasLineno) {
+ str[cur++] = ':';
+ memcpy(str + cur, linenoStr, linenoLength);
+ cur += linenoLength;
+ }
+
+ // Terminal ')' if necessary.
+ if (hasName)
+ str[cur++] = ')';
+
+ MOZ_ASSERT(cur == fullLength);
+ str[cur] = 0;
+
+ if (length)
+ *length = fullLength;
+
+ return str;
+}
+
+
+JitcodeGlobalTable::Enum::Enum(JitcodeGlobalTable& table, JSRuntime* rt)
+ : Range(table),
+ rt_(rt),
+ next_(cur_ ? cur_->tower_->next(0) : nullptr)
+{
+ for (int level = JitcodeSkiplistTower::MAX_HEIGHT - 1; level >= 0; level--)
+ prevTower_[level] = nullptr;
+}
+
+void
+JitcodeGlobalTable::Enum::popFront()
+{
+ MOZ_ASSERT(!empty());
+
+ // Did not remove current entry; advance prevTower_.
+ if (cur_ != table_.freeEntries_) {
+ for (int level = cur_->tower_->height() - 1; level >= 0; level--) {
+ JitcodeGlobalEntry* prevTowerEntry = prevTower_[level];
+
+ if (prevTowerEntry) {
+ if (prevTowerEntry->tower_->next(level) == cur_)
+ prevTower_[level] = cur_;
+ } else {
+ prevTower_[level] = table_.startTower_[level];
+ }
+ }
+ }
+
+ cur_ = next_;
+ if (!empty())
+ next_ = cur_->tower_->next(0);
+}
+
+void
+JitcodeGlobalTable::Enum::removeFront()
+{
+ MOZ_ASSERT(!empty());
+ table_.releaseEntry(*cur_, prevTower_, rt_);
+}
+
+const JitcodeGlobalEntry&
+JitcodeGlobalTable::lookupForSamplerInfallible(void* ptr, JSRuntime* rt, uint32_t sampleBufferGen)
+{
+ JitcodeGlobalEntry* entry = lookupInternal(ptr);
+ MOZ_ASSERT(entry);
+
+ entry->setGeneration(sampleBufferGen);
+
+ // IonCache entries must keep their corresponding Ion entries alive.
+ if (entry->isIonCache()) {
+ JitcodeGlobalEntry& rejoinEntry = RejoinEntry(rt, entry->ionCacheEntry(), ptr);
+ rejoinEntry.setGeneration(sampleBufferGen);
+ }
+
+ // JitcodeGlobalEntries are marked at the end of the mark phase. A read
+ // barrier is not needed. Any JS frames sampled during the sweep phase of
+ // the GC must be on stack, and on-stack frames must already be marked at
+ // the beginning of the sweep phase. It's not possible to assert this here
+ // as we may not be running on the main thread when called from the gecko
+ // profiler.
+
+ return *entry;
+}
+
+JitcodeGlobalEntry*
+JitcodeGlobalTable::lookupInternal(void* ptr)
+{
+ JitcodeGlobalEntry query = JitcodeGlobalEntry::MakeQuery(ptr);
+ JitcodeGlobalEntry* searchTower[JitcodeSkiplistTower::MAX_HEIGHT];
+ searchInternal(query, searchTower);
+
+ if (searchTower[0] == nullptr) {
+ // Check startTower
+ if (startTower_[0] == nullptr)
+ return nullptr;
+
+ MOZ_ASSERT(startTower_[0]->compareTo(query) >= 0);
+ int cmp = startTower_[0]->compareTo(query);
+ MOZ_ASSERT(cmp >= 0);
+ return (cmp == 0) ? startTower_[0] : nullptr;
+ }
+
+ JitcodeGlobalEntry* bottom = searchTower[0];
+ MOZ_ASSERT(bottom->compareTo(query) < 0);
+
+ JitcodeGlobalEntry* bottomNext = bottom->tower_->next(0);
+ if (bottomNext == nullptr)
+ return nullptr;
+
+ int cmp = bottomNext->compareTo(query);
+ MOZ_ASSERT(cmp >= 0);
+ return (cmp == 0) ? bottomNext : nullptr;
+}
+
+bool
+JitcodeGlobalTable::addEntry(const JitcodeGlobalEntry& entry, JSRuntime* rt)
+{
+ MOZ_ASSERT(entry.isIon() || entry.isBaseline() || entry.isIonCache() || entry.isDummy());
+
+ JitcodeGlobalEntry* searchTower[JitcodeSkiplistTower::MAX_HEIGHT];
+ searchInternal(entry, searchTower);
+
+ // Allocate a new entry and tower.
+ JitcodeSkiplistTower* newTower = allocateTower(generateTowerHeight());
+ if (!newTower)
+ return false;
+
+ JitcodeGlobalEntry* newEntry = allocateEntry();
+ if (!newEntry)
+ return false;
+
+ *newEntry = entry;
+ newEntry->tower_ = newTower;
+
+ // Suppress profiler sampling while skiplist is being mutated.
+ AutoSuppressProfilerSampling suppressSampling(rt);
+
+ // Link up entry with forward entries taken from tower.
+ for (int level = newTower->height() - 1; level >= 0; level--) {
+ JitcodeGlobalEntry* searchTowerEntry = searchTower[level];
+ if (searchTowerEntry) {
+ MOZ_ASSERT(searchTowerEntry->compareTo(*newEntry) < 0);
+ JitcodeGlobalEntry* searchTowerNextEntry = searchTowerEntry->tower_->next(level);
+
+ MOZ_ASSERT_IF(searchTowerNextEntry, searchTowerNextEntry->compareTo(*newEntry) > 0);
+
+ newTower->setNext(level, searchTowerNextEntry);
+ searchTowerEntry->tower_->setNext(level, newEntry);
+ } else {
+ newTower->setNext(level, startTower_[level]);
+ startTower_[level] = newEntry;
+ }
+ }
+ skiplistSize_++;
+ // verifySkiplist(); - disabled for release.
+ return true;
+}
+
+void
+JitcodeGlobalTable::removeEntry(JitcodeGlobalEntry& entry, JitcodeGlobalEntry** prevTower,
+ JSRuntime* rt)
+{
+ MOZ_ASSERT(!rt->isProfilerSamplingEnabled());
+
+ // Unlink query entry.
+ for (int level = entry.tower_->height() - 1; level >= 0; level--) {
+ JitcodeGlobalEntry* prevTowerEntry = prevTower[level];
+ if (prevTowerEntry) {
+ MOZ_ASSERT(prevTowerEntry->tower_->next(level) == &entry);
+ prevTowerEntry->tower_->setNext(level, entry.tower_->next(level));
+ } else {
+ startTower_[level] = entry.tower_->next(level);
+ }
+ }
+ skiplistSize_--;
+ // verifySkiplist(); - disabled for release.
+
+ // Entry has been unlinked.
+ entry.destroy();
+ entry.tower_->addToFreeList(&(freeTowers_[entry.tower_->height() - 1]));
+ entry.tower_ = nullptr;
+ entry = JitcodeGlobalEntry();
+ entry.addToFreeList(&freeEntries_);
+}
+
+void
+JitcodeGlobalTable::releaseEntry(JitcodeGlobalEntry& entry, JitcodeGlobalEntry** prevTower,
+ JSRuntime* rt)
+{
+ mozilla::DebugOnly<uint32_t> gen = rt->profilerSampleBufferGen();
+ mozilla::DebugOnly<uint32_t> lapCount = rt->profilerSampleBufferLapCount();
+ MOZ_ASSERT_IF(gen != UINT32_MAX, !entry.isSampled(gen, lapCount));
+ removeEntry(entry, prevTower, rt);
+}
+
+void
+JitcodeGlobalTable::searchInternal(const JitcodeGlobalEntry& query, JitcodeGlobalEntry** towerOut)
+{
+ JitcodeGlobalEntry* cur = nullptr;
+ for (int level = JitcodeSkiplistTower::MAX_HEIGHT - 1; level >= 0; level--) {
+ JitcodeGlobalEntry* entry = searchAtHeight(level, cur, query);
+ MOZ_ASSERT_IF(entry == nullptr, cur == nullptr);
+ towerOut[level] = entry;
+ cur = entry;
+ }
+
+ // Validate the resulting tower.
+#ifdef DEBUG
+ for (int level = JitcodeSkiplistTower::MAX_HEIGHT - 1; level >= 0; level--) {
+ if (towerOut[level] == nullptr) {
+ // If we got NULL for a given level, then we should have gotten NULL
+ // for the level above as well.
+ MOZ_ASSERT_IF(unsigned(level) < (JitcodeSkiplistTower::MAX_HEIGHT - 1),
+ towerOut[level + 1] == nullptr);
+ continue;
+ }
+
+ JitcodeGlobalEntry* cur = towerOut[level];
+
+ // Non-null result at a given level must sort < query.
+ MOZ_ASSERT(cur->compareTo(query) < 0);
+
+ // The entry must have a tower height that accomodates level.
+ if (!cur->tower_->next(level))
+ continue;
+
+ JitcodeGlobalEntry* next = cur->tower_->next(level);
+
+ // Next entry must have tower height that accomodates level.
+ MOZ_ASSERT(unsigned(level) < next->tower_->height());
+
+ // Next entry must sort >= query.
+ MOZ_ASSERT(next->compareTo(query) >= 0);
+ }
+#endif // DEBUG
+}
+
+JitcodeGlobalEntry*
+JitcodeGlobalTable::searchAtHeight(unsigned level, JitcodeGlobalEntry* start,
+ const JitcodeGlobalEntry& query)
+{
+ JitcodeGlobalEntry* cur = start;
+
+ // If starting with nullptr, use the start tower.
+ if (start == nullptr) {
+ cur = startTower_[level];
+ if (cur == nullptr || cur->compareTo(query) >= 0)
+ return nullptr;
+ }
+
+ // Keep skipping at |level| until we reach an entry < query whose
+ // successor is an entry >= query.
+ for (;;) {
+ JitcodeGlobalEntry* next = cur->tower_->next(level);
+ if (next == nullptr || next->compareTo(query) >= 0)
+ return cur;
+
+ cur = next;
+ }
+}
+
+unsigned
+JitcodeGlobalTable::generateTowerHeight()
+{
+ // Implementation taken from Hars L. and Pteruska G.,
+ // "Pseudorandom Recursions: Small and fast Pseudorandom number generators for
+ // embedded applications."
+ rand_ ^= mozilla::RotateLeft(rand_, 5) ^ mozilla::RotateLeft(rand_, 24);
+ rand_ += 0x37798849;
+
+ // Return number of lowbit zeros in new randval.
+ unsigned result = 0;
+ for (unsigned i = 0; i < 32; i++) {
+ if ((rand_ >> i) & 0x1)
+ break;
+ result++;
+ }
+ return (std::max)(1U, result);
+}
+
+JitcodeSkiplistTower*
+JitcodeGlobalTable::allocateTower(unsigned height)
+{
+ MOZ_ASSERT(height >= 1);
+ JitcodeSkiplistTower* tower = JitcodeSkiplistTower::PopFromFreeList(&freeTowers_[height - 1]);
+ if (tower)
+ return tower;
+
+ size_t size = JitcodeSkiplistTower::CalculateSize(height);
+ tower = (JitcodeSkiplistTower*) alloc_.alloc(size);
+ if (!tower)
+ return nullptr;
+
+ return new (tower) JitcodeSkiplistTower(height);
+}
+
+JitcodeGlobalEntry*
+JitcodeGlobalTable::allocateEntry()
+{
+ JitcodeGlobalEntry* entry = JitcodeGlobalEntry::PopFromFreeList(&freeEntries_);
+ if (entry)
+ return entry;
+
+ return alloc_.new_<JitcodeGlobalEntry>();
+}
+
+#ifdef DEBUG
+void
+JitcodeGlobalTable::verifySkiplist()
+{
+ JitcodeGlobalEntry* curTower[JitcodeSkiplistTower::MAX_HEIGHT];
+ for (unsigned i = 0; i < JitcodeSkiplistTower::MAX_HEIGHT; i++)
+ curTower[i] = startTower_[i];
+
+ uint32_t count = 0;
+ JitcodeGlobalEntry* curEntry = startTower_[0];
+ while (curEntry) {
+ count++;
+ unsigned curHeight = curEntry->tower_->height();
+ MOZ_ASSERT(curHeight >= 1);
+
+ for (unsigned i = 0; i < JitcodeSkiplistTower::MAX_HEIGHT; i++) {
+ if (i < curHeight) {
+ MOZ_ASSERT(curTower[i] == curEntry);
+ JitcodeGlobalEntry* nextEntry = curEntry->tower_->next(i);
+ MOZ_ASSERT_IF(nextEntry, curEntry->compareTo(*nextEntry) < 0);
+ curTower[i] = nextEntry;
+ } else {
+ MOZ_ASSERT_IF(curTower[i], curTower[i]->compareTo(*curEntry) > 0);
+ }
+ }
+ curEntry = curEntry->tower_->next(0);
+ }
+
+ MOZ_ASSERT(count == skiplistSize_);
+}
+#endif // DEBUG
+
+void
+JitcodeGlobalTable::setAllEntriesAsExpired(JSRuntime* rt)
+{
+ AutoSuppressProfilerSampling suppressSampling(rt);
+ for (Range r(*this); !r.empty(); r.popFront())
+ r.front()->setAsExpired();
+}
+
+struct Unconditionally
+{
+ template <typename T>
+ static bool ShouldMark(JSRuntime* rt, T* thingp) { return true; }
+};
+
+void
+JitcodeGlobalTable::markUnconditionally(JSTracer* trc)
+{
+ // Mark all entries unconditionally. This is done during minor collection
+ // to account for tenuring.
+
+ MOZ_ASSERT(trc->runtime()->spsProfiler.enabled());
+
+ AutoSuppressProfilerSampling suppressSampling(trc->runtime());
+ for (Range r(*this); !r.empty(); r.popFront())
+ r.front()->mark<Unconditionally>(trc);
+}
+
+struct IfUnmarked
+{
+ template <typename T>
+ static bool ShouldMark(JSRuntime* rt, T* thingp) { return !IsMarkedUnbarriered(rt, thingp); }
+};
+
+template <>
+bool IfUnmarked::ShouldMark<TypeSet::Type>(JSRuntime* rt, TypeSet::Type* type)
+{
+ return !TypeSet::IsTypeMarked(rt, type);
+}
+
+bool
+JitcodeGlobalTable::markIteratively(JSTracer* trc)
+{
+ // JitcodeGlobalTable must keep entries that are in the sampler buffer
+ // alive. This conditionality is akin to holding the entries weakly.
+ //
+ // If this table were marked at the beginning of the mark phase, then
+ // sampling would require a read barrier for sampling in between
+ // incremental GC slices. However, invoking read barriers from the sampler
+ // is wildly unsafe. The sampler may run at any time, including during GC
+ // itself.
+ //
+ // Instead, JitcodeGlobalTable is marked at the beginning of the sweep
+ // phase, along with weak references. The key assumption is the
+ // following. At the beginning of the sweep phase, any JS frames that the
+ // sampler may put in its buffer that are not already there at the
+ // beginning of the mark phase must have already been marked, as either 1)
+ // the frame was on-stack at the beginning of the sweep phase, or 2) the
+ // frame was pushed between incremental sweep slices. Frames of case 1)
+ // are already marked. Frames of case 2) must have been reachable to have
+ // been newly pushed, and thus are already marked.
+ //
+ // The approach above obviates the need for read barriers. The assumption
+ // above is checked in JitcodeGlobalTable::lookupForSampler.
+
+ MOZ_ASSERT(!trc->runtime()->isHeapMinorCollecting());
+
+ AutoSuppressProfilerSampling suppressSampling(trc->runtime());
+ uint32_t gen = trc->runtime()->profilerSampleBufferGen();
+ uint32_t lapCount = trc->runtime()->profilerSampleBufferLapCount();
+
+ // If the profiler is off, all entries are considered to be expired.
+ if (!trc->runtime()->spsProfiler.enabled())
+ gen = UINT32_MAX;
+
+ bool markedAny = false;
+ for (Range r(*this); !r.empty(); r.popFront()) {
+ JitcodeGlobalEntry* entry = r.front();
+
+ // If an entry is not sampled, reset its generation to the invalid
+ // generation, and conditionally mark the rest of the entry if its
+ // JitCode is not already marked. This conditional marking ensures
+ // that so long as the JitCode *may* be sampled, we keep any
+ // information that may be handed out to the sampler, like tracked
+ // types used by optimizations and scripts used for pc to line number
+ // mapping, alive as well.
+ if (!entry->isSampled(gen, lapCount)) {
+ entry->setAsExpired();
+ if (!entry->baseEntry().isJitcodeMarkedFromAnyThread(trc->runtime()))
+ continue;
+ }
+
+ // The table is runtime-wide. Not all zones may be participating in
+ // the GC.
+ if (!entry->zone()->isCollecting() || entry->zone()->isGCFinished())
+ continue;
+
+ markedAny |= entry->mark<IfUnmarked>(trc);
+ }
+
+ return markedAny;
+}
+
+void
+JitcodeGlobalTable::sweep(JSRuntime* rt)
+{
+ AutoSuppressProfilerSampling suppressSampling(rt);
+ for (Enum e(*this, rt); !e.empty(); e.popFront()) {
+ JitcodeGlobalEntry* entry = e.front();
+
+ if (!entry->zone()->isCollecting() || entry->zone()->isGCFinished())
+ continue;
+
+ if (entry->baseEntry().isJitcodeAboutToBeFinalized())
+ e.removeFront();
+ else
+ entry->sweepChildren(rt);
+ }
+}
+
+template <class ShouldMarkProvider>
+bool
+JitcodeGlobalEntry::BaseEntry::markJitcode(JSTracer* trc)
+{
+ if (ShouldMarkProvider::ShouldMark(trc->runtime(), &jitcode_)) {
+ TraceManuallyBarrieredEdge(trc, &jitcode_, "jitcodglobaltable-baseentry-jitcode");
+ return true;
+ }
+ return false;
+}
+
+bool
+JitcodeGlobalEntry::BaseEntry::isJitcodeMarkedFromAnyThread(JSRuntime* rt)
+{
+ return IsMarkedUnbarriered(rt, &jitcode_) ||
+ jitcode_->arena()->allocatedDuringIncremental;
+}
+
+bool
+JitcodeGlobalEntry::BaseEntry::isJitcodeAboutToBeFinalized()
+{
+ return IsAboutToBeFinalizedUnbarriered(&jitcode_);
+}
+
+template <class ShouldMarkProvider>
+bool
+JitcodeGlobalEntry::BaselineEntry::mark(JSTracer* trc)
+{
+ if (ShouldMarkProvider::ShouldMark(trc->runtime(), &script_)) {
+ TraceManuallyBarrieredEdge(trc, &script_, "jitcodeglobaltable-baselineentry-script");
+ return true;
+ }
+ return false;
+}
+
+void
+JitcodeGlobalEntry::BaselineEntry::sweepChildren()
+{
+ MOZ_ALWAYS_FALSE(IsAboutToBeFinalizedUnbarriered(&script_));
+}
+
+bool
+JitcodeGlobalEntry::BaselineEntry::isMarkedFromAnyThread(JSRuntime* rt)
+{
+ return IsMarkedUnbarriered(rt, &script_) ||
+ script_->arena()->allocatedDuringIncremental;
+}
+
+template <class ShouldMarkProvider>
+bool
+JitcodeGlobalEntry::IonEntry::mark(JSTracer* trc)
+{
+ bool markedAny = false;
+
+ JSRuntime* rt = trc->runtime();
+ for (unsigned i = 0; i < numScripts(); i++) {
+ if (ShouldMarkProvider::ShouldMark(rt, &sizedScriptList()->pairs[i].script)) {
+ TraceManuallyBarrieredEdge(trc, &sizedScriptList()->pairs[i].script,
+ "jitcodeglobaltable-ionentry-script");
+ markedAny = true;
+ }
+ }
+
+ if (!optsAllTypes_)
+ return markedAny;
+
+ for (IonTrackedTypeWithAddendum* iter = optsAllTypes_->begin();
+ iter != optsAllTypes_->end(); iter++)
+ {
+ if (ShouldMarkProvider::ShouldMark(rt, &iter->type)) {
+ iter->type.trace(trc);
+ markedAny = true;
+ }
+ if (iter->hasAllocationSite() && ShouldMarkProvider::ShouldMark(rt, &iter->script)) {
+ TraceManuallyBarrieredEdge(trc, &iter->script,
+ "jitcodeglobaltable-ionentry-type-addendum-script");
+ markedAny = true;
+ } else if (iter->hasConstructor() && ShouldMarkProvider::ShouldMark(rt, &iter->constructor)) {
+ TraceManuallyBarrieredEdge(trc, &iter->constructor,
+ "jitcodeglobaltable-ionentry-type-addendum-constructor");
+ markedAny = true;
+ }
+ }
+
+ return markedAny;
+}
+
+void
+JitcodeGlobalEntry::IonEntry::sweepChildren()
+{
+ for (unsigned i = 0; i < numScripts(); i++)
+ MOZ_ALWAYS_FALSE(IsAboutToBeFinalizedUnbarriered(&sizedScriptList()->pairs[i].script));
+
+ if (!optsAllTypes_)
+ return;
+
+ for (IonTrackedTypeWithAddendum* iter = optsAllTypes_->begin();
+ iter != optsAllTypes_->end(); iter++)
+ {
+ // Types may move under compacting GC. This method is only called on
+ // entries that are sampled, and thus are not about to be finalized.
+ MOZ_ALWAYS_FALSE(TypeSet::IsTypeAboutToBeFinalized(&iter->type));
+ if (iter->hasAllocationSite())
+ MOZ_ALWAYS_FALSE(IsAboutToBeFinalizedUnbarriered(&iter->script));
+ else if (iter->hasConstructor())
+ MOZ_ALWAYS_FALSE(IsAboutToBeFinalizedUnbarriered(&iter->constructor));
+ }
+}
+
+bool
+JitcodeGlobalEntry::IonEntry::isMarkedFromAnyThread(JSRuntime* rt)
+{
+ for (unsigned i = 0; i < numScripts(); i++) {
+ if (!IsMarkedUnbarriered(rt, &sizedScriptList()->pairs[i].script) &&
+ !sizedScriptList()->pairs[i].script->arena()->allocatedDuringIncremental)
+ {
+ return false;
+ }
+ }
+
+ if (!optsAllTypes_)
+ return true;
+
+ for (IonTrackedTypeWithAddendum* iter = optsAllTypes_->begin();
+ iter != optsAllTypes_->end(); iter++)
+ {
+ if (!TypeSet::IsTypeMarked(rt, &iter->type) &&
+ !TypeSet::IsTypeAllocatedDuringIncremental(iter->type))
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template <class ShouldMarkProvider>
+bool
+JitcodeGlobalEntry::IonCacheEntry::mark(JSTracer* trc)
+{
+ JitcodeGlobalEntry& entry = RejoinEntry(trc->runtime(), *this, nativeStartAddr());
+ return entry.mark<ShouldMarkProvider>(trc);
+}
+
+void
+JitcodeGlobalEntry::IonCacheEntry::sweepChildren(JSRuntime* rt)
+{
+ JitcodeGlobalEntry& entry = RejoinEntry(rt, *this, nativeStartAddr());
+ entry.sweepChildren(rt);
+}
+
+bool
+JitcodeGlobalEntry::IonCacheEntry::isMarkedFromAnyThread(JSRuntime* rt)
+{
+ JitcodeGlobalEntry& entry = RejoinEntry(rt, *this, nativeStartAddr());
+ return entry.isMarkedFromAnyThread(rt);
+}
+
+Maybe<uint8_t>
+JitcodeGlobalEntry::IonCacheEntry::trackedOptimizationIndexAtAddr(
+ JSRuntime *rt,
+ void* ptr,
+ uint32_t* entryOffsetOut)
+{
+ MOZ_ASSERT(hasTrackedOptimizations());
+ MOZ_ASSERT(containsPointer(ptr));
+ JitcodeGlobalEntry& entry = RejoinEntry(rt, *this, ptr);
+
+ if (!entry.hasTrackedOptimizations())
+ return mozilla::Nothing();
+
+ uint32_t mainEntryOffsetOut;
+ Maybe<uint8_t> maybeIndex =
+ entry.trackedOptimizationIndexAtAddr(rt, rejoinAddr(), &mainEntryOffsetOut);
+ if (maybeIndex.isNothing())
+ return mozilla::Nothing();
+
+ // For IonCache, the canonical address is just the start of the addr.
+ *entryOffsetOut = 0;
+ return maybeIndex;
+}
+
+void
+JitcodeGlobalEntry::IonCacheEntry::forEachOptimizationAttempt(
+ JSRuntime *rt, uint8_t index, JS::ForEachTrackedOptimizationAttemptOp& op)
+{
+ JitcodeGlobalEntry& entry = RejoinEntry(rt, *this, nativeStartAddr());
+ if (!entry.hasTrackedOptimizations())
+ return;
+ entry.forEachOptimizationAttempt(rt, index, op);
+
+ // Record the outcome associated with the stub.
+ op(JS::TrackedStrategy::InlineCache_OptimizedStub, trackedOutcome_);
+}
+
+void
+JitcodeGlobalEntry::IonCacheEntry::forEachOptimizationTypeInfo(
+ JSRuntime *rt, uint8_t index,
+ IonTrackedOptimizationsTypeInfo::ForEachOpAdapter& op)
+{
+ JitcodeGlobalEntry& entry = RejoinEntry(rt, *this, nativeStartAddr());
+ if (!entry.hasTrackedOptimizations())
+ return;
+ entry.forEachOptimizationTypeInfo(rt, index, op);
+}
+
+/* static */ void
+JitcodeRegionEntry::WriteHead(CompactBufferWriter& writer,
+ uint32_t nativeOffset, uint8_t scriptDepth)
+{
+ writer.writeUnsigned(nativeOffset);
+ writer.writeByte(scriptDepth);
+}
+
+/* static */ void
+JitcodeRegionEntry::ReadHead(CompactBufferReader& reader,
+ uint32_t* nativeOffset, uint8_t* scriptDepth)
+{
+ *nativeOffset = reader.readUnsigned();
+ *scriptDepth = reader.readByte();
+}
+
+/* static */ void
+JitcodeRegionEntry::WriteScriptPc(CompactBufferWriter& writer,
+ uint32_t scriptIdx, uint32_t pcOffset)
+{
+ writer.writeUnsigned(scriptIdx);
+ writer.writeUnsigned(pcOffset);
+}
+
+/* static */ void
+JitcodeRegionEntry::ReadScriptPc(CompactBufferReader& reader,
+ uint32_t* scriptIdx, uint32_t* pcOffset)
+{
+ *scriptIdx = reader.readUnsigned();
+ *pcOffset = reader.readUnsigned();
+}
+
+/* static */ void
+JitcodeRegionEntry::WriteDelta(CompactBufferWriter& writer,
+ uint32_t nativeDelta, int32_t pcDelta)
+{
+ if (pcDelta >= 0) {
+ // 1 and 2-byte formats possible.
+
+ // NNNN-BBB0
+ if (pcDelta <= ENC1_PC_DELTA_MAX && nativeDelta <= ENC1_NATIVE_DELTA_MAX) {
+ uint8_t encVal = ENC1_MASK_VAL | (pcDelta << ENC1_PC_DELTA_SHIFT) |
+ (nativeDelta << ENC1_NATIVE_DELTA_SHIFT);
+ writer.writeByte(encVal);
+ return;
+ }
+
+ // NNNN-NNNN BBBB-BB01
+ if (pcDelta <= ENC2_PC_DELTA_MAX && nativeDelta <= ENC2_NATIVE_DELTA_MAX) {
+ uint16_t encVal = ENC2_MASK_VAL | (pcDelta << ENC2_PC_DELTA_SHIFT) |
+ (nativeDelta << ENC2_NATIVE_DELTA_SHIFT);
+ writer.writeByte(encVal & 0xff);
+ writer.writeByte((encVal >> 8) & 0xff);
+ return;
+ }
+ }
+
+ // NNNN-NNNN NNNB-BBBB BBBB-B011
+ if (pcDelta >= ENC3_PC_DELTA_MIN && pcDelta <= ENC3_PC_DELTA_MAX &&
+ nativeDelta <= ENC3_NATIVE_DELTA_MAX)
+ {
+ uint32_t encVal = ENC3_MASK_VAL |
+ ((pcDelta << ENC3_PC_DELTA_SHIFT) & ENC3_PC_DELTA_MASK) |
+ (nativeDelta << ENC3_NATIVE_DELTA_SHIFT);
+ writer.writeByte(encVal & 0xff);
+ writer.writeByte((encVal >> 8) & 0xff);
+ writer.writeByte((encVal >> 16) & 0xff);
+ return;
+ }
+
+ // NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
+ if (pcDelta >= ENC4_PC_DELTA_MIN && pcDelta <= ENC4_PC_DELTA_MAX &&
+ nativeDelta <= ENC4_NATIVE_DELTA_MAX)
+ {
+ uint32_t encVal = ENC4_MASK_VAL |
+ ((pcDelta << ENC4_PC_DELTA_SHIFT) & ENC4_PC_DELTA_MASK) |
+ (nativeDelta << ENC4_NATIVE_DELTA_SHIFT);
+ writer.writeByte(encVal & 0xff);
+ writer.writeByte((encVal >> 8) & 0xff);
+ writer.writeByte((encVal >> 16) & 0xff);
+ writer.writeByte((encVal >> 24) & 0xff);
+ return;
+ }
+
+ // Should never get here.
+ MOZ_CRASH("pcDelta/nativeDelta values are too large to encode.");
+}
+
+/* static */ void
+JitcodeRegionEntry::ReadDelta(CompactBufferReader& reader,
+ uint32_t* nativeDelta, int32_t* pcDelta)
+{
+ // NB:
+ // It's possible to get nativeDeltas with value 0 in two cases:
+ //
+ // 1. The last region's run. This is because the region table's start
+ // must be 4-byte aligned, and we must insert padding bytes to align the
+ // payload section before emitting the table.
+ //
+ // 2. A zero-offset nativeDelta with a negative pcDelta.
+ //
+ // So if nativeDelta is zero, then pcDelta must be <= 0.
+
+ // NNNN-BBB0
+ const uint32_t firstByte = reader.readByte();
+ if ((firstByte & ENC1_MASK) == ENC1_MASK_VAL) {
+ uint32_t encVal = firstByte;
+ *nativeDelta = encVal >> ENC1_NATIVE_DELTA_SHIFT;
+ *pcDelta = (encVal & ENC1_PC_DELTA_MASK) >> ENC1_PC_DELTA_SHIFT;
+ MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
+ return;
+ }
+
+ // NNNN-NNNN BBBB-BB01
+ const uint32_t secondByte = reader.readByte();
+ if ((firstByte & ENC2_MASK) == ENC2_MASK_VAL) {
+ uint32_t encVal = firstByte | secondByte << 8;
+ *nativeDelta = encVal >> ENC2_NATIVE_DELTA_SHIFT;
+ *pcDelta = (encVal & ENC2_PC_DELTA_MASK) >> ENC2_PC_DELTA_SHIFT;
+ MOZ_ASSERT(*pcDelta != 0);
+ MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
+ return;
+ }
+
+ // NNNN-NNNN NNNB-BBBB BBBB-B011
+ const uint32_t thirdByte = reader.readByte();
+ if ((firstByte & ENC3_MASK) == ENC3_MASK_VAL) {
+ uint32_t encVal = firstByte | secondByte << 8 | thirdByte << 16;
+ *nativeDelta = encVal >> ENC3_NATIVE_DELTA_SHIFT;
+
+ uint32_t pcDeltaU = (encVal & ENC3_PC_DELTA_MASK) >> ENC3_PC_DELTA_SHIFT;
+ // Fix sign if necessary.
+ if (pcDeltaU > static_cast<uint32_t>(ENC3_PC_DELTA_MAX))
+ pcDeltaU |= ~ENC3_PC_DELTA_MAX;
+ *pcDelta = pcDeltaU;
+ MOZ_ASSERT(*pcDelta != 0);
+ MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
+ return;
+ }
+
+ // NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
+ MOZ_ASSERT((firstByte & ENC4_MASK) == ENC4_MASK_VAL);
+ const uint32_t fourthByte = reader.readByte();
+ uint32_t encVal = firstByte | secondByte << 8 | thirdByte << 16 | fourthByte << 24;
+ *nativeDelta = encVal >> ENC4_NATIVE_DELTA_SHIFT;
+
+ uint32_t pcDeltaU = (encVal & ENC4_PC_DELTA_MASK) >> ENC4_PC_DELTA_SHIFT;
+ // fix sign if necessary
+ if (pcDeltaU > static_cast<uint32_t>(ENC4_PC_DELTA_MAX))
+ pcDeltaU |= ~ENC4_PC_DELTA_MAX;
+ *pcDelta = pcDeltaU;
+
+ MOZ_ASSERT(*pcDelta != 0);
+ MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
+}
+
+/* static */ uint32_t
+JitcodeRegionEntry::ExpectedRunLength(const CodeGeneratorShared::NativeToBytecode* entry,
+ const CodeGeneratorShared::NativeToBytecode* end)
+{
+ MOZ_ASSERT(entry < end);
+
+ // We always use the first entry, so runLength starts at 1
+ uint32_t runLength = 1;
+
+ uint32_t curNativeOffset = entry->nativeOffset.offset();
+ uint32_t curBytecodeOffset = entry->tree->script()->pcToOffset(entry->pc);
+
+ for (auto nextEntry = entry + 1; nextEntry != end; nextEntry += 1) {
+ // If the next run moves to a different inline site, stop the run.
+ if (nextEntry->tree != entry->tree)
+ break;
+
+ uint32_t nextNativeOffset = nextEntry->nativeOffset.offset();
+ uint32_t nextBytecodeOffset = nextEntry->tree->script()->pcToOffset(nextEntry->pc);
+ MOZ_ASSERT(nextNativeOffset >= curNativeOffset);
+
+ uint32_t nativeDelta = nextNativeOffset - curNativeOffset;
+ int32_t bytecodeDelta = int32_t(nextBytecodeOffset) - int32_t(curBytecodeOffset);
+
+ // If deltas are too large (very unlikely), stop the run.
+ if (!IsDeltaEncodeable(nativeDelta, bytecodeDelta))
+ break;
+
+ runLength++;
+
+ // If the run has grown to its maximum length, stop the run.
+ if (runLength == MAX_RUN_LENGTH)
+ break;
+
+ curNativeOffset = nextNativeOffset;
+ curBytecodeOffset = nextBytecodeOffset;
+ }
+
+ return runLength;
+}
+
+struct JitcodeMapBufferWriteSpewer
+{
+#ifdef JS_JITSPEW
+ CompactBufferWriter* writer;
+ uint32_t startPos;
+
+ static const uint32_t DumpMaxBytes = 50;
+
+ explicit JitcodeMapBufferWriteSpewer(CompactBufferWriter& w)
+ : writer(&w), startPos(writer->length())
+ {}
+
+ void spewAndAdvance(const char* name) {
+ if (writer->oom())
+ return;
+
+ uint32_t curPos = writer->length();
+ const uint8_t* start = writer->buffer() + startPos;
+ const uint8_t* end = writer->buffer() + curPos;
+ const char* MAP = "0123456789ABCDEF";
+ uint32_t bytes = end - start;
+
+ char buffer[DumpMaxBytes * 3];
+ for (uint32_t i = 0; i < bytes; i++) {
+ buffer[i*3] = MAP[(start[i] >> 4) & 0xf];
+ buffer[i*3 + 1] = MAP[(start[i] >> 0) & 0xf];
+ buffer[i*3 + 2] = ' ';
+ }
+ if (bytes >= DumpMaxBytes)
+ buffer[DumpMaxBytes*3 - 1] = '\0';
+ else
+ buffer[bytes*3 - 1] = '\0';
+
+ JitSpew(JitSpew_Profiling, "%s@%d[%d bytes] - %s", name, int(startPos), int(bytes), buffer);
+
+ // Move to the end of the current buffer.
+ startPos = writer->length();
+ }
+#else // !JS_JITSPEW
+ explicit JitcodeMapBufferWriteSpewer(CompactBufferWriter& w) {}
+ void spewAndAdvance(const char* name) {}
+#endif // JS_JITSPEW
+};
+
+// Write a run, starting at the given NativeToBytecode entry, into the given buffer writer.
+/* static */ bool
+JitcodeRegionEntry::WriteRun(CompactBufferWriter& writer,
+ JSScript** scriptList, uint32_t scriptListSize,
+ uint32_t runLength, const CodeGeneratorShared::NativeToBytecode* entry)
+{
+ MOZ_ASSERT(runLength > 0);
+ MOZ_ASSERT(runLength <= MAX_RUN_LENGTH);
+
+ // Calculate script depth.
+ MOZ_ASSERT(entry->tree->depth() <= 0xff);
+ uint8_t scriptDepth = entry->tree->depth();
+ uint32_t regionNativeOffset = entry->nativeOffset.offset();
+
+ JitcodeMapBufferWriteSpewer spewer(writer);
+
+ // Write the head info.
+ JitSpew(JitSpew_Profiling, " Head Info: nativeOffset=%d scriptDepth=%d",
+ int(regionNativeOffset), int(scriptDepth));
+ WriteHead(writer, regionNativeOffset, scriptDepth);
+ spewer.spewAndAdvance(" ");
+
+ // Write each script/pc pair.
+ {
+ InlineScriptTree* curTree = entry->tree;
+ jsbytecode* curPc = entry->pc;
+ for (uint8_t i = 0; i < scriptDepth; i++) {
+ // Find the index of the script within the list.
+ // NB: scriptList is guaranteed to contain curTree->script()
+ uint32_t scriptIdx = 0;
+ for (; scriptIdx < scriptListSize; scriptIdx++) {
+ if (scriptList[scriptIdx] == curTree->script())
+ break;
+ }
+ MOZ_ASSERT(scriptIdx < scriptListSize);
+
+ uint32_t pcOffset = curTree->script()->pcToOffset(curPc);
+
+ JitSpew(JitSpew_Profiling, " Script/PC %d: scriptIdx=%d pcOffset=%d",
+ int(i), int(scriptIdx), int(pcOffset));
+ WriteScriptPc(writer, scriptIdx, pcOffset);
+ spewer.spewAndAdvance(" ");
+
+ MOZ_ASSERT_IF(i < scriptDepth - 1, curTree->hasCaller());
+ curPc = curTree->callerPc();
+ curTree = curTree->caller();
+ }
+ }
+
+ // Start writing runs.
+ uint32_t curNativeOffset = entry->nativeOffset.offset();
+ uint32_t curBytecodeOffset = entry->tree->script()->pcToOffset(entry->pc);
+
+ JitSpew(JitSpew_Profiling, " Writing Delta Run from nativeOffset=%d bytecodeOffset=%d",
+ int(curNativeOffset), int(curBytecodeOffset));
+
+ // Skip first entry because it is implicit in the header. Start at subsequent entry.
+ for (uint32_t i = 1; i < runLength; i++) {
+ MOZ_ASSERT(entry[i].tree == entry->tree);
+
+ uint32_t nextNativeOffset = entry[i].nativeOffset.offset();
+ uint32_t nextBytecodeOffset = entry[i].tree->script()->pcToOffset(entry[i].pc);
+ MOZ_ASSERT(nextNativeOffset >= curNativeOffset);
+
+ uint32_t nativeDelta = nextNativeOffset - curNativeOffset;
+ int32_t bytecodeDelta = int32_t(nextBytecodeOffset) - int32_t(curBytecodeOffset);
+ MOZ_ASSERT(IsDeltaEncodeable(nativeDelta, bytecodeDelta));
+
+ JitSpew(JitSpew_Profiling, " RunEntry native: %d-%d [%d] bytecode: %d-%d [%d]",
+ int(curNativeOffset), int(nextNativeOffset), int(nativeDelta),
+ int(curBytecodeOffset), int(nextBytecodeOffset), int(bytecodeDelta));
+ WriteDelta(writer, nativeDelta, bytecodeDelta);
+
+ // Spew the bytecode in these ranges.
+ if (curBytecodeOffset < nextBytecodeOffset) {
+ JitSpewStart(JitSpew_Profiling, " OPS: ");
+ uint32_t curBc = curBytecodeOffset;
+ while (curBc < nextBytecodeOffset) {
+ jsbytecode* pc = entry[i].tree->script()->offsetToPC(curBc);
+#ifdef JS_JITSPEW
+ JSOp op = JSOp(*pc);
+ JitSpewCont(JitSpew_Profiling, "%s ", CodeName[op]);
+#endif
+ curBc += GetBytecodeLength(pc);
+ }
+ JitSpewFin(JitSpew_Profiling);
+ }
+ spewer.spewAndAdvance(" ");
+
+ curNativeOffset = nextNativeOffset;
+ curBytecodeOffset = nextBytecodeOffset;
+ }
+
+ if (writer.oom())
+ return false;
+
+ return true;
+}
+
+void
+JitcodeRegionEntry::unpack()
+{
+ CompactBufferReader reader(data_, end_);
+ ReadHead(reader, &nativeOffset_, &scriptDepth_);
+ MOZ_ASSERT(scriptDepth_ > 0);
+
+ scriptPcStack_ = reader.currentPosition();
+ // Skip past script/pc stack
+ for (unsigned i = 0; i < scriptDepth_; i++) {
+ uint32_t scriptIdx, pcOffset;
+ ReadScriptPc(reader, &scriptIdx, &pcOffset);
+ }
+
+ deltaRun_ = reader.currentPosition();
+}
+
+uint32_t
+JitcodeRegionEntry::findPcOffset(uint32_t queryNativeOffset, uint32_t startPcOffset) const
+{
+ DeltaIterator iter = deltaIterator();
+ uint32_t curNativeOffset = nativeOffset();
+ uint32_t curPcOffset = startPcOffset;
+ while (iter.hasMore()) {
+ uint32_t nativeDelta;
+ int32_t pcDelta;
+ iter.readNext(&nativeDelta, &pcDelta);
+
+ // The start address of the next delta-run entry is counted towards
+ // the current delta-run entry, because return addresses should
+ // associate with the bytecode op prior (the call) not the op after.
+ if (queryNativeOffset <= curNativeOffset + nativeDelta)
+ break;
+ curNativeOffset += nativeDelta;
+ curPcOffset += pcDelta;
+ }
+ return curPcOffset;
+}
+
+typedef js::Vector<char*, 32, SystemAllocPolicy> ProfilingStringVector;
+
+struct AutoFreeProfilingStrings {
+ ProfilingStringVector& profilingStrings_;
+ bool keep_;
+ explicit AutoFreeProfilingStrings(ProfilingStringVector& vec)
+ : profilingStrings_(vec),
+ keep_(false)
+ {}
+
+ void keepStrings() { keep_ = true; }
+
+ ~AutoFreeProfilingStrings() {
+ if (keep_)
+ return;
+ for (size_t i = 0; i < profilingStrings_.length(); i++)
+ js_free(profilingStrings_[i]);
+ }
+};
+
+bool
+JitcodeIonTable::makeIonEntry(JSContext* cx, JitCode* code,
+ uint32_t numScripts, JSScript** scripts,
+ JitcodeGlobalEntry::IonEntry& out)
+{
+ typedef JitcodeGlobalEntry::IonEntry::SizedScriptList SizedScriptList;
+
+ MOZ_ASSERT(numScripts > 0);
+
+ // Create profiling strings for script, within vector.
+ typedef js::Vector<char*, 32, SystemAllocPolicy> ProfilingStringVector;
+
+ ProfilingStringVector profilingStrings;
+ if (!profilingStrings.reserve(numScripts))
+ return false;
+
+ AutoFreeProfilingStrings autoFreeProfilingStrings(profilingStrings);
+ for (uint32_t i = 0; i < numScripts; i++) {
+ char* str = JitcodeGlobalEntry::createScriptString(cx, scripts[i]);
+ if (!str)
+ return false;
+ if (!profilingStrings.append(str))
+ return false;
+ }
+
+ // Create SizedScriptList
+ void* mem = (void*)cx->pod_malloc<uint8_t>(SizedScriptList::AllocSizeFor(numScripts));
+ if (!mem)
+ return false;
+
+ // Keep allocated profiling strings on destruct.
+ autoFreeProfilingStrings.keepStrings();
+
+ SizedScriptList* scriptList = new (mem) SizedScriptList(numScripts, scripts,
+ &profilingStrings[0]);
+ out.init(code, code->raw(), code->rawEnd(), scriptList, this);
+ return true;
+}
+
+uint32_t
+JitcodeIonTable::findRegionEntry(uint32_t nativeOffset) const
+{
+ static const uint32_t LINEAR_SEARCH_THRESHOLD = 8;
+ uint32_t regions = numRegions();
+ MOZ_ASSERT(regions > 0);
+
+ // For small region lists, just search linearly.
+ if (regions <= LINEAR_SEARCH_THRESHOLD) {
+ JitcodeRegionEntry previousEntry = regionEntry(0);
+ for (uint32_t i = 1; i < regions; i++) {
+ JitcodeRegionEntry nextEntry = regionEntry(i);
+ MOZ_ASSERT(nextEntry.nativeOffset() >= previousEntry.nativeOffset());
+
+ // See note in binary-search code below about why we use '<=' here instead of
+ // '<'. Short explanation: regions are closed at their ending addresses,
+ // and open at their starting addresses.
+ if (nativeOffset <= nextEntry.nativeOffset())
+ return i-1;
+
+ previousEntry = nextEntry;
+ }
+ // If nothing found, assume it falls within last region.
+ return regions - 1;
+ }
+
+ // For larger ones, binary search the region table.
+ uint32_t idx = 0;
+ uint32_t count = regions;
+ while (count > 1) {
+ uint32_t step = count/2;
+ uint32_t mid = idx + step;
+ JitcodeRegionEntry midEntry = regionEntry(mid);
+
+ // A region memory range is closed at its ending address, not starting
+ // address. This is because the return address for calls must associate
+ // with the call's bytecode PC, not the PC of the bytecode operator after
+ // the call.
+ //
+ // So a query is < an entry if the query nativeOffset is <= the start address
+ // of the entry, and a query is >= an entry if the query nativeOffset is > the
+ // start address of an entry.
+ if (nativeOffset <= midEntry.nativeOffset()) {
+ // Target entry is below midEntry.
+ count = step;
+ } else { // if (nativeOffset > midEntry.nativeOffset())
+ // Target entry is at midEntry or above.
+ idx = mid;
+ count -= step;
+ }
+ }
+ return idx;
+}
+
+/* static */ bool
+JitcodeIonTable::WriteIonTable(CompactBufferWriter& writer,
+ JSScript** scriptList, uint32_t scriptListSize,
+ const CodeGeneratorShared::NativeToBytecode* start,
+ const CodeGeneratorShared::NativeToBytecode* end,
+ uint32_t* tableOffsetOut, uint32_t* numRegionsOut)
+{
+ MOZ_ASSERT(tableOffsetOut != nullptr);
+ MOZ_ASSERT(numRegionsOut != nullptr);
+ MOZ_ASSERT(writer.length() == 0);
+ MOZ_ASSERT(scriptListSize > 0);
+
+ JitSpew(JitSpew_Profiling, "Writing native to bytecode map for %s:%" PRIuSIZE " (%" PRIuSIZE " entries)",
+ scriptList[0]->filename(), scriptList[0]->lineno(),
+ mozilla::PointerRangeSize(start, end));
+
+ JitSpew(JitSpew_Profiling, " ScriptList of size %d", int(scriptListSize));
+ for (uint32_t i = 0; i < scriptListSize; i++) {
+ JitSpew(JitSpew_Profiling, " Script %d - %s:%" PRIuSIZE,
+ int(i), scriptList[i]->filename(), scriptList[i]->lineno());
+ }
+
+ // Write out runs first. Keep a vector tracking the positive offsets from payload
+ // start to the run.
+ const CodeGeneratorShared::NativeToBytecode* curEntry = start;
+ js::Vector<uint32_t, 32, SystemAllocPolicy> runOffsets;
+
+ while (curEntry != end) {
+ // Calculate the length of the next run.
+ uint32_t runLength = JitcodeRegionEntry::ExpectedRunLength(curEntry, end);
+ MOZ_ASSERT(runLength > 0);
+ MOZ_ASSERT(runLength <= uintptr_t(end - curEntry));
+ JitSpew(JitSpew_Profiling, " Run at entry %d, length %d, buffer offset %d",
+ int(curEntry - start), int(runLength), int(writer.length()));
+
+ // Store the offset of the run.
+ if (!runOffsets.append(writer.length()))
+ return false;
+
+ // Encode the run.
+ if (!JitcodeRegionEntry::WriteRun(writer, scriptList, scriptListSize, runLength, curEntry))
+ return false;
+
+ curEntry += runLength;
+ }
+
+ // Done encoding regions. About to start table. Ensure we are aligned to 4 bytes
+ // since table is composed of uint32_t values.
+ uint32_t padding = sizeof(uint32_t) - (writer.length() % sizeof(uint32_t));
+ if (padding == sizeof(uint32_t))
+ padding = 0;
+ JitSpew(JitSpew_Profiling, " Padding %d bytes after run @%d",
+ int(padding), int(writer.length()));
+ for (uint32_t i = 0; i < padding; i++)
+ writer.writeByte(0);
+
+ // Now at start of table.
+ uint32_t tableOffset = writer.length();
+
+ // The table being written at this point will be accessed directly via uint32_t
+ // pointers, so all writes below use native endianness.
+
+ // Write out numRegions
+ JitSpew(JitSpew_Profiling, " Writing numRuns=%d", int(runOffsets.length()));
+ writer.writeNativeEndianUint32_t(runOffsets.length());
+
+ // Write out region offset table. The offsets in |runOffsets| are currently forward
+ // offsets from the beginning of the buffer. We convert them to backwards offsets
+ // from the start of the table before writing them into their table entries.
+ for (uint32_t i = 0; i < runOffsets.length(); i++) {
+ JitSpew(JitSpew_Profiling, " Run %d offset=%d backOffset=%d @%d",
+ int(i), int(runOffsets[i]), int(tableOffset - runOffsets[i]), int(writer.length()));
+ writer.writeNativeEndianUint32_t(tableOffset - runOffsets[i]);
+ }
+
+ if (writer.oom())
+ return false;
+
+ *tableOffsetOut = tableOffset;
+ *numRegionsOut = runOffsets.length();
+ return true;
+}
+
+
+} // namespace jit
+} // namespace js
+
+JS::ForEachProfiledFrameOp::FrameHandle::FrameHandle(JSRuntime* rt, js::jit::JitcodeGlobalEntry& entry,
+ void* addr, const char* label, uint32_t depth)
+ : rt_(rt),
+ entry_(entry),
+ addr_(addr),
+ canonicalAddr_(nullptr),
+ label_(label),
+ depth_(depth),
+ optsIndex_()
+{
+ updateHasTrackedOptimizations();
+
+ if (!canonicalAddr_) {
+ // If the entry has tracked optimizations, updateHasTrackedOptimizations
+ // would have updated the canonical address.
+ MOZ_ASSERT_IF(entry_.isIon(), !hasTrackedOptimizations());
+ canonicalAddr_ = entry_.canonicalNativeAddrFor(rt_, addr_);
+ }
+}
+
+JS_PUBLIC_API(JS::ProfilingFrameIterator::FrameKind)
+JS::ForEachProfiledFrameOp::FrameHandle::frameKind() const
+{
+ if (entry_.isBaseline())
+ return JS::ProfilingFrameIterator::Frame_Baseline;
+ return JS::ProfilingFrameIterator::Frame_Ion;
+}
+
+JS_PUBLIC_API(void)
+JS::ForEachProfiledFrame(JSContext* cx, void* addr, ForEachProfiledFrameOp& op)
+{
+ js::jit::JitcodeGlobalTable* table = cx->jitRuntime()->getJitcodeGlobalTable();
+ js::jit::JitcodeGlobalEntry& entry = table->lookupInfallible(addr);
+
+ // Extract the stack for the entry. Assume maximum inlining depth is <64
+ const char* labels[64];
+ uint32_t depth = entry.callStackAtAddr(cx, addr, labels, 64);
+ MOZ_ASSERT(depth < 64);
+ for (uint32_t i = depth; i != 0; i--) {
+ JS::ForEachProfiledFrameOp::FrameHandle handle(cx, entry, addr, labels[i - 1], i - 1);
+ op(handle);
+ }
+}
diff --git a/js/src/jit/JitcodeMap.h b/js/src/jit/JitcodeMap.h
new file mode 100644
index 000000000..384a75410
--- /dev/null
+++ b/js/src/jit/JitcodeMap.h
@@ -0,0 +1,1493 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitcodeMap_h
+#define jit_JitcodeMap_h
+
+#include "jit/CompactBuffer.h"
+#include "jit/CompileInfo.h"
+#include "jit/ExecutableAllocator.h"
+#include "jit/OptimizationTracking.h"
+#include "jit/shared/CodeGenerator-shared.h"
+
+namespace js {
+namespace jit {
+
+/*
+ * The Ion jitcode map implements tables to allow mapping from addresses in ion jitcode
+ * to the list of (JSScript*, jsbytecode*) pairs that are implicitly active in the frame at
+ * that point in the native code.
+ *
+ * To represent this information efficiently, a multi-level table is used.
+ *
+ * At the top level, a global splay-tree of JitcodeGlobalEntry describings the mapping for
+ * each individual IonCode script generated by compiles. The entries are ordered by their
+ * nativeStartAddr.
+ *
+ * Every entry in the table is of fixed size, but there are different entry types,
+ * distinguished by the kind field.
+ */
+
+class JitcodeGlobalTable;
+class JitcodeIonTable;
+class JitcodeRegionEntry;
+
+class JitcodeGlobalEntry;
+
+class JitcodeSkiplistTower
+{
+ public:
+ static const unsigned MAX_HEIGHT = 32;
+
+ private:
+ uint8_t height_;
+ bool isFree_;
+ JitcodeGlobalEntry* ptrs_[1];
+
+ public:
+ explicit JitcodeSkiplistTower(unsigned height)
+ : height_(height),
+ isFree_(false)
+ {
+ MOZ_ASSERT(height >= 1 && height <= MAX_HEIGHT);
+ clearPtrs();
+ }
+
+ unsigned height() const {
+ return height_;
+ }
+
+ JitcodeGlobalEntry** ptrs(unsigned level) {
+ return ptrs_;
+ }
+
+ JitcodeGlobalEntry* next(unsigned level) const {
+ MOZ_ASSERT(!isFree_);
+ MOZ_ASSERT(level < height());
+ return ptrs_[level];
+ }
+ void setNext(unsigned level, JitcodeGlobalEntry* entry) {
+ MOZ_ASSERT(!isFree_);
+ MOZ_ASSERT(level < height());
+ ptrs_[level] = entry;
+ }
+
+ //
+ // When stored in a free-list, towers use 'ptrs_[0]' to store a
+ // pointer to the next tower. In this context only, 'ptrs_[0]'
+ // may refer to a |JitcodeSkiplistTower*| instead of a
+ // |JitcodeGlobalEntry*|.
+ //
+
+ void addToFreeList(JitcodeSkiplistTower** freeList) {
+ JitcodeSkiplistTower* nextFreeTower = *freeList;
+ MOZ_ASSERT_IF(nextFreeTower, nextFreeTower->isFree_ &&
+ nextFreeTower->height() == height_);
+ ptrs_[0] = (JitcodeGlobalEntry*) nextFreeTower;
+ isFree_ = true;
+ *freeList = this;
+ }
+
+ static JitcodeSkiplistTower* PopFromFreeList(JitcodeSkiplistTower** freeList) {
+ if (!*freeList)
+ return nullptr;
+
+ JitcodeSkiplistTower* tower = *freeList;
+ MOZ_ASSERT(tower->isFree_);
+ JitcodeSkiplistTower* nextFreeTower = (JitcodeSkiplistTower*) tower->ptrs_[0];
+ tower->clearPtrs();
+ tower->isFree_ = false;
+ *freeList = nextFreeTower;
+ return tower;
+ }
+
+ static size_t CalculateSize(unsigned height) {
+ MOZ_ASSERT(height >= 1);
+ return sizeof(JitcodeSkiplistTower) +
+ (sizeof(JitcodeGlobalEntry*) * (height - 1));
+ }
+
+ private:
+ void clearPtrs() {
+ for (unsigned i = 0; i < height_; i++)
+ ptrs_[0] = nullptr;
+ }
+};
+
+class JitcodeGlobalEntry
+{
+ friend class JitcodeGlobalTable;
+
+ public:
+ enum Kind {
+ INVALID = 0,
+ Ion,
+ Baseline,
+ IonCache,
+ Dummy,
+ Query,
+ LIMIT
+ };
+ JS_STATIC_ASSERT(LIMIT <= 8);
+
+ struct BytecodeLocation {
+ JSScript* script;
+ jsbytecode* pc;
+ BytecodeLocation(JSScript* script, jsbytecode* pc) : script(script), pc(pc) {}
+ };
+ typedef Vector<BytecodeLocation, 0, SystemAllocPolicy> BytecodeLocationVector;
+ typedef Vector<const char*, 0, SystemAllocPolicy> ProfileStringVector;
+
+ struct BaseEntry
+ {
+ JitCode* jitcode_;
+ void* nativeStartAddr_;
+ void* nativeEndAddr_;
+ uint32_t gen_;
+ Kind kind_ : 7;
+
+ void init() {
+ jitcode_ = nullptr;
+ nativeStartAddr_ = nullptr;
+ nativeEndAddr_ = nullptr;
+ gen_ = UINT32_MAX;
+ kind_ = INVALID;
+ }
+
+ void init(Kind kind, JitCode* code,
+ void* nativeStartAddr, void* nativeEndAddr)
+ {
+ MOZ_ASSERT_IF(kind != Query, code);
+ MOZ_ASSERT(nativeStartAddr);
+ MOZ_ASSERT(nativeEndAddr);
+ MOZ_ASSERT(kind > INVALID && kind < LIMIT);
+ jitcode_ = code;
+ nativeStartAddr_ = nativeStartAddr;
+ nativeEndAddr_ = nativeEndAddr;
+ gen_ = UINT32_MAX;
+ kind_ = kind;
+ }
+
+ uint32_t generation() const {
+ return gen_;
+ }
+ void setGeneration(uint32_t gen) {
+ gen_ = gen;
+ }
+ bool isSampled(uint32_t currentGen, uint32_t lapCount) {
+ if (gen_ == UINT32_MAX || currentGen == UINT32_MAX)
+ return false;
+ MOZ_ASSERT(currentGen >= gen_);
+ return (currentGen - gen_) <= lapCount;
+ }
+
+ Kind kind() const {
+ return kind_;
+ }
+ JitCode* jitcode() const {
+ return jitcode_;
+ }
+ void* nativeStartAddr() const {
+ return nativeStartAddr_;
+ }
+ void* nativeEndAddr() const {
+ return nativeEndAddr_;
+ }
+
+ bool startsBelowPointer(void* ptr) const {
+ return ((uint8_t*)nativeStartAddr()) <= ((uint8_t*) ptr);
+ }
+ bool endsAbovePointer(void* ptr) const {
+ return ((uint8_t*)nativeEndAddr()) > ((uint8_t*) ptr);
+ }
+ bool containsPointer(void* ptr) const {
+ return startsBelowPointer(ptr) && endsAbovePointer(ptr);
+ }
+
+ template <class ShouldMarkProvider> bool markJitcode(JSTracer* trc);
+ bool isJitcodeMarkedFromAnyThread(JSRuntime* rt);
+ bool isJitcodeAboutToBeFinalized();
+ };
+
+ struct IonEntry : public BaseEntry
+ {
+ // regionTable_ points to the start of the region table within the
+ // packed map for compile represented by this entry. Since the
+ // region table occurs at the tail of the memory region, this pointer
+ // points somewhere inside the region memory space, and not to the start
+ // of the memory space.
+ JitcodeIonTable* regionTable_;
+
+ // optsRegionTable_ points to the table within the compact
+ // optimizations map indexing all regions that have tracked
+ // optimization attempts. optsTypesTable_ is the tracked typed info
+ // associated with the attempts vectors; it is the same length as the
+ // attempts table. optsAttemptsTable_ is the table indexing those
+ // attempts vectors.
+ //
+ // All pointers point into the same block of memory; the beginning of
+ // the block is optRegionTable_->payloadStart().
+ const IonTrackedOptimizationsRegionTable* optsRegionTable_;
+ const IonTrackedOptimizationsTypesTable* optsTypesTable_;
+ const IonTrackedOptimizationsAttemptsTable* optsAttemptsTable_;
+
+ // The types table above records type sets, which have been gathered
+ // into one vector here.
+ IonTrackedTypeVector* optsAllTypes_;
+
+ struct ScriptNamePair {
+ JSScript* script;
+ char* str;
+ };
+
+ struct SizedScriptList {
+ uint32_t size;
+ ScriptNamePair pairs[1];
+ SizedScriptList(uint32_t sz, JSScript** scrs, char** strs) : size(sz) {
+ for (uint32_t i = 0; i < size; i++) {
+ pairs[i].script = scrs[i];
+ pairs[i].str = strs[i];
+ }
+ }
+
+ static uint32_t AllocSizeFor(uint32_t nscripts) {
+ return sizeof(SizedScriptList) + ((nscripts - 1) * sizeof(ScriptNamePair));
+ }
+ };
+
+ SizedScriptList* scriptList_;
+
+ void init(JitCode* code, void* nativeStartAddr, void* nativeEndAddr,
+ SizedScriptList* scriptList, JitcodeIonTable* regionTable)
+ {
+ MOZ_ASSERT(scriptList);
+ MOZ_ASSERT(regionTable);
+ BaseEntry::init(Ion, code, nativeStartAddr, nativeEndAddr);
+ regionTable_ = regionTable;
+ scriptList_ = scriptList;
+ optsRegionTable_ = nullptr;
+ optsTypesTable_ = nullptr;
+ optsAllTypes_ = nullptr;
+ optsAttemptsTable_ = nullptr;
+ }
+
+ void initTrackedOptimizations(const IonTrackedOptimizationsRegionTable* regionTable,
+ const IonTrackedOptimizationsTypesTable* typesTable,
+ const IonTrackedOptimizationsAttemptsTable* attemptsTable,
+ IonTrackedTypeVector* allTypes)
+ {
+ optsRegionTable_ = regionTable;
+ optsTypesTable_ = typesTable;
+ optsAttemptsTable_ = attemptsTable;
+ optsAllTypes_ = allTypes;
+ }
+
+ SizedScriptList* sizedScriptList() const {
+ return scriptList_;
+ }
+
+ unsigned numScripts() const {
+ return scriptList_->size;
+ }
+
+ JSScript* getScript(unsigned idx) const {
+ MOZ_ASSERT(idx < numScripts());
+ return sizedScriptList()->pairs[idx].script;
+ }
+
+ const char* getStr(unsigned idx) const {
+ MOZ_ASSERT(idx < numScripts());
+ return sizedScriptList()->pairs[idx].str;
+ }
+
+ void destroy();
+
+ JitcodeIonTable* regionTable() const {
+ return regionTable_;
+ }
+
+ int scriptIndex(JSScript* script) const {
+ unsigned count = numScripts();
+ for (unsigned i = 0; i < count; i++) {
+ if (getScript(i) == script)
+ return i;
+ }
+ return -1;
+ }
+
+ void* canonicalNativeAddrFor(JSRuntime*rt, void* ptr) const;
+
+ MOZ_MUST_USE bool callStackAtAddr(JSRuntime* rt, void* ptr, BytecodeLocationVector& results,
+ uint32_t* depth) const;
+
+ uint32_t callStackAtAddr(JSRuntime* rt, void* ptr, const char** results,
+ uint32_t maxResults) const;
+
+ void youngestFrameLocationAtAddr(JSRuntime* rt, void* ptr,
+ JSScript** script, jsbytecode** pc) const;
+
+ bool hasTrackedOptimizations() const {
+ return !!optsRegionTable_;
+ }
+
+ const IonTrackedOptimizationsRegionTable* trackedOptimizationsRegionTable() const {
+ MOZ_ASSERT(hasTrackedOptimizations());
+ return optsRegionTable_;
+ }
+
+ uint8_t numOptimizationAttempts() const {
+ MOZ_ASSERT(hasTrackedOptimizations());
+ return optsAttemptsTable_->numEntries();
+ }
+
+ IonTrackedOptimizationsAttempts trackedOptimizationAttempts(uint8_t index) {
+ MOZ_ASSERT(hasTrackedOptimizations());
+ return optsAttemptsTable_->entry(index);
+ }
+
+ IonTrackedOptimizationsTypeInfo trackedOptimizationTypeInfo(uint8_t index) {
+ MOZ_ASSERT(hasTrackedOptimizations());
+ return optsTypesTable_->entry(index);
+ }
+
+ const IonTrackedTypeVector* allTrackedTypes() {
+ MOZ_ASSERT(hasTrackedOptimizations());
+ return optsAllTypes_;
+ }
+
+ mozilla::Maybe<uint8_t> trackedOptimizationIndexAtAddr(
+ JSRuntime *rt,
+ void* ptr,
+ uint32_t* entryOffsetOut);
+
+ void forEachOptimizationAttempt(JSRuntime* rt, uint8_t index,
+ JS::ForEachTrackedOptimizationAttemptOp& op);
+ void forEachOptimizationTypeInfo(JSRuntime* rt, uint8_t index,
+ IonTrackedOptimizationsTypeInfo::ForEachOpAdapter& op);
+
+ template <class ShouldMarkProvider> bool mark(JSTracer* trc);
+ void sweepChildren();
+ bool isMarkedFromAnyThread(JSRuntime* rt);
+ };
+
+ struct BaselineEntry : public BaseEntry
+ {
+ JSScript* script_;
+ const char* str_;
+
+ // Last location that caused Ion to abort compilation and the reason
+ // therein, if any. Only actionable aborts are tracked. Internal
+ // errors like OOMs are not.
+ jsbytecode* ionAbortPc_;
+ const char* ionAbortMessage_;
+
+ void init(JitCode* code, void* nativeStartAddr, void* nativeEndAddr,
+ JSScript* script, const char* str)
+ {
+ MOZ_ASSERT(script != nullptr);
+ BaseEntry::init(Baseline, code, nativeStartAddr, nativeEndAddr);
+ script_ = script;
+ str_ = str;
+ }
+
+ JSScript* script() const {
+ return script_;
+ }
+
+ const char* str() const {
+ return str_;
+ }
+
+ void trackIonAbort(jsbytecode* pc, const char* message) {
+ MOZ_ASSERT(script_->containsPC(pc));
+ MOZ_ASSERT(message);
+ ionAbortPc_ = pc;
+ ionAbortMessage_ = message;
+ }
+
+ bool hadIonAbort() const {
+ MOZ_ASSERT(!ionAbortPc_ || ionAbortMessage_);
+ return ionAbortPc_ != nullptr;
+ }
+
+ void destroy();
+
+ void* canonicalNativeAddrFor(JSRuntime* rt, void* ptr) const;
+
+ MOZ_MUST_USE bool callStackAtAddr(JSRuntime* rt, void* ptr, BytecodeLocationVector& results,
+ uint32_t* depth) const;
+
+ uint32_t callStackAtAddr(JSRuntime* rt, void* ptr, const char** results,
+ uint32_t maxResults) const;
+
+ void youngestFrameLocationAtAddr(JSRuntime* rt, void* ptr,
+ JSScript** script, jsbytecode** pc) const;
+
+ template <class ShouldMarkProvider> bool mark(JSTracer* trc);
+ void sweepChildren();
+ bool isMarkedFromAnyThread(JSRuntime* rt);
+ };
+
+ struct IonCacheEntry : public BaseEntry
+ {
+ void* rejoinAddr_;
+ JS::TrackedOutcome trackedOutcome_;
+
+ void init(JitCode* code, void* nativeStartAddr, void* nativeEndAddr,
+ void* rejoinAddr, JS::TrackedOutcome trackedOutcome)
+ {
+ MOZ_ASSERT(rejoinAddr != nullptr);
+ BaseEntry::init(IonCache, code, nativeStartAddr, nativeEndAddr);
+ rejoinAddr_ = rejoinAddr;
+ trackedOutcome_ = trackedOutcome;
+ }
+
+ void* rejoinAddr() const {
+ return rejoinAddr_;
+ }
+ JS::TrackedOutcome trackedOutcome() const {
+ return trackedOutcome_;
+ }
+
+ void destroy() {}
+
+ void* canonicalNativeAddrFor(JSRuntime* rt, void* ptr) const;
+
+ MOZ_MUST_USE bool callStackAtAddr(JSRuntime* rt, void* ptr, BytecodeLocationVector& results,
+ uint32_t* depth) const;
+
+ uint32_t callStackAtAddr(JSRuntime* rt, void* ptr, const char** results,
+ uint32_t maxResults) const;
+
+ void youngestFrameLocationAtAddr(JSRuntime* rt, void* ptr,
+ JSScript** script, jsbytecode** pc) const;
+
+ bool hasTrackedOptimizations() const { return true; }
+ mozilla::Maybe<uint8_t> trackedOptimizationIndexAtAddr(
+ JSRuntime *rt,
+ void* ptr,
+ uint32_t* entryOffsetOut);
+ void forEachOptimizationAttempt(JSRuntime* rt, uint8_t index,
+ JS::ForEachTrackedOptimizationAttemptOp& op);
+ void forEachOptimizationTypeInfo(JSRuntime* rt, uint8_t index,
+ IonTrackedOptimizationsTypeInfo::ForEachOpAdapter& op);
+
+ template <class ShouldMarkProvider> bool mark(JSTracer* trc);
+ void sweepChildren(JSRuntime* rt);
+ bool isMarkedFromAnyThread(JSRuntime* rt);
+ };
+
+ // Dummy entries are created for jitcode generated when profiling is not turned on,
+ // so that they have representation in the global table if they are on the
+ // stack when profiling is enabled.
+ struct DummyEntry : public BaseEntry
+ {
+ void init(JitCode* code, void* nativeStartAddr, void* nativeEndAddr) {
+ BaseEntry::init(Dummy, code, nativeStartAddr, nativeEndAddr);
+ }
+
+ void destroy() {}
+
+ void* canonicalNativeAddrFor(JSRuntime* rt, void* ptr) const {
+ return nullptr;
+ }
+
+ MOZ_MUST_USE bool callStackAtAddr(JSRuntime* rt, void* ptr, BytecodeLocationVector& results,
+ uint32_t* depth) const
+ {
+ return true;
+ }
+
+ uint32_t callStackAtAddr(JSRuntime* rt, void* ptr, const char** results,
+ uint32_t maxResults) const
+ {
+ return 0;
+ }
+
+ void youngestFrameLocationAtAddr(JSRuntime* rt, void* ptr,
+ JSScript** script, jsbytecode** pc) const
+ {
+ *script = nullptr;
+ *pc = nullptr;
+ }
+ };
+
+ // QueryEntry is never stored in the table, just used for queries
+ // where an instance of JitcodeGlobalEntry is required to do tree
+ // lookups.
+ struct QueryEntry : public BaseEntry
+ {
+ void init(void* addr) {
+ BaseEntry::init(Query, nullptr, addr, addr);
+ }
+ uint8_t* addr() const {
+ return reinterpret_cast<uint8_t*>(nativeStartAddr());
+ }
+ void destroy() {}
+ };
+
+ private:
+ JitcodeSkiplistTower* tower_;
+
+ union {
+ // Shadowing BaseEntry instance to allow access to base fields
+ // and type extraction.
+ BaseEntry base_;
+
+ // The most common entry type: describing jitcode generated by
+ // Ion main-line code.
+ IonEntry ion_;
+
+ // Baseline jitcode.
+ BaselineEntry baseline_;
+
+ // IonCache stubs.
+ IonCacheEntry ionCache_;
+
+ // Dummy entries.
+ DummyEntry dummy_;
+
+ // When doing queries on the SplayTree for particular addresses,
+ // the query addresses are representd using a QueryEntry.
+ QueryEntry query_;
+ };
+
+ public:
+ JitcodeGlobalEntry()
+ : tower_(nullptr)
+ {
+ base_.init();
+ }
+
+ explicit JitcodeGlobalEntry(const IonEntry& ion)
+ : tower_(nullptr)
+ {
+ ion_ = ion;
+ }
+
+ explicit JitcodeGlobalEntry(const BaselineEntry& baseline)
+ : tower_(nullptr)
+ {
+ baseline_ = baseline;
+ }
+
+ explicit JitcodeGlobalEntry(const IonCacheEntry& ionCache)
+ : tower_(nullptr)
+ {
+ ionCache_ = ionCache;
+ }
+
+ explicit JitcodeGlobalEntry(const DummyEntry& dummy)
+ : tower_(nullptr)
+ {
+ dummy_ = dummy;
+ }
+
+ explicit JitcodeGlobalEntry(const QueryEntry& query)
+ : tower_(nullptr)
+ {
+ query_ = query;
+ }
+
+ static JitcodeGlobalEntry MakeQuery(void* ptr) {
+ QueryEntry query;
+ query.init(ptr);
+ return JitcodeGlobalEntry(query);
+ }
+
+ void destroy() {
+ switch (kind()) {
+ case Ion:
+ ionEntry().destroy();
+ break;
+ case Baseline:
+ baselineEntry().destroy();
+ break;
+ case IonCache:
+ ionCacheEntry().destroy();
+ break;
+ case Dummy:
+ dummyEntry().destroy();
+ break;
+ case Query:
+ queryEntry().destroy();
+ break;
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ }
+
+ JitCode* jitcode() const {
+ return baseEntry().jitcode();
+ }
+ void* nativeStartAddr() const {
+ return base_.nativeStartAddr();
+ }
+ void* nativeEndAddr() const {
+ return base_.nativeEndAddr();
+ }
+
+ uint32_t generation() const {
+ return baseEntry().generation();
+ }
+ void setGeneration(uint32_t gen) {
+ baseEntry().setGeneration(gen);
+ }
+ void setAsExpired() {
+ baseEntry().setGeneration(UINT32_MAX);
+ }
+ bool isSampled(uint32_t currentGen, uint32_t lapCount) {
+ return baseEntry().isSampled(currentGen, lapCount);
+ }
+
+ bool startsBelowPointer(void* ptr) const {
+ return base_.startsBelowPointer(ptr);
+ }
+ bool endsAbovePointer(void* ptr) const {
+ return base_.endsAbovePointer(ptr);
+ }
+ bool containsPointer(void* ptr) const {
+ return base_.containsPointer(ptr);
+ }
+
+ bool overlapsWith(const JitcodeGlobalEntry& entry) const {
+ // Catch full containment of |entry| within |this|, and partial overlaps.
+ if (containsPointer(entry.nativeStartAddr()) || containsPointer(entry.nativeEndAddr()))
+ return true;
+
+ // Catch full containment of |this| within |entry|.
+ if (startsBelowPointer(entry.nativeEndAddr()) && endsAbovePointer(entry.nativeStartAddr()))
+ return true;
+
+ return false;
+ }
+
+ Kind kind() const {
+ return base_.kind();
+ }
+
+ bool isValid() const {
+ return (kind() > INVALID) && (kind() < LIMIT);
+ }
+ bool isIon() const {
+ return kind() == Ion;
+ }
+ bool isBaseline() const {
+ return kind() == Baseline;
+ }
+ bool isIonCache() const {
+ return kind() == IonCache;
+ }
+ bool isDummy() const {
+ return kind() == Dummy;
+ }
+ bool isQuery() const {
+ return kind() == Query;
+ }
+
+ BaseEntry& baseEntry() {
+ MOZ_ASSERT(isValid());
+ return base_;
+ }
+ IonEntry& ionEntry() {
+ MOZ_ASSERT(isIon());
+ return ion_;
+ }
+ BaselineEntry& baselineEntry() {
+ MOZ_ASSERT(isBaseline());
+ return baseline_;
+ }
+ IonCacheEntry& ionCacheEntry() {
+ MOZ_ASSERT(isIonCache());
+ return ionCache_;
+ }
+ DummyEntry& dummyEntry() {
+ MOZ_ASSERT(isDummy());
+ return dummy_;
+ }
+ QueryEntry& queryEntry() {
+ MOZ_ASSERT(isQuery());
+ return query_;
+ }
+
+ const BaseEntry& baseEntry() const {
+ MOZ_ASSERT(isValid());
+ return base_;
+ }
+ const IonEntry& ionEntry() const {
+ MOZ_ASSERT(isIon());
+ return ion_;
+ }
+ const BaselineEntry& baselineEntry() const {
+ MOZ_ASSERT(isBaseline());
+ return baseline_;
+ }
+ const IonCacheEntry& ionCacheEntry() const {
+ MOZ_ASSERT(isIonCache());
+ return ionCache_;
+ }
+ const DummyEntry& dummyEntry() const {
+ MOZ_ASSERT(isDummy());
+ return dummy_;
+ }
+ const QueryEntry& queryEntry() const {
+ MOZ_ASSERT(isQuery());
+ return query_;
+ }
+
+ void* canonicalNativeAddrFor(JSRuntime* rt, void* ptr) const {
+ switch (kind()) {
+ case Ion:
+ return ionEntry().canonicalNativeAddrFor(rt, ptr);
+ case Baseline:
+ return baselineEntry().canonicalNativeAddrFor(rt, ptr);
+ case IonCache:
+ return ionCacheEntry().canonicalNativeAddrFor(rt, ptr);
+ case Dummy:
+ return dummyEntry().canonicalNativeAddrFor(rt, ptr);
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ return nullptr;
+ }
+
+ // Read the inline call stack at a given point in the native code and append into
+ // the given vector. Innermost (script,pc) pair will be appended first, and
+ // outermost appended last.
+ //
+ // Returns false on memory failure.
+ MOZ_MUST_USE bool callStackAtAddr(JSRuntime* rt, void* ptr, BytecodeLocationVector& results,
+ uint32_t* depth) const
+ {
+ switch (kind()) {
+ case Ion:
+ return ionEntry().callStackAtAddr(rt, ptr, results, depth);
+ case Baseline:
+ return baselineEntry().callStackAtAddr(rt, ptr, results, depth);
+ case IonCache:
+ return ionCacheEntry().callStackAtAddr(rt, ptr, results, depth);
+ case Dummy:
+ return dummyEntry().callStackAtAddr(rt, ptr, results, depth);
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ return false;
+ }
+
+ uint32_t callStackAtAddr(JSRuntime* rt, void* ptr, const char** results,
+ uint32_t maxResults) const
+ {
+ switch (kind()) {
+ case Ion:
+ return ionEntry().callStackAtAddr(rt, ptr, results, maxResults);
+ case Baseline:
+ return baselineEntry().callStackAtAddr(rt, ptr, results, maxResults);
+ case IonCache:
+ return ionCacheEntry().callStackAtAddr(rt, ptr, results, maxResults);
+ case Dummy:
+ return dummyEntry().callStackAtAddr(rt, ptr, results, maxResults);
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ return false;
+ }
+
+ void youngestFrameLocationAtAddr(JSRuntime* rt, void* ptr,
+ JSScript** script, jsbytecode** pc) const
+ {
+ switch (kind()) {
+ case Ion:
+ return ionEntry().youngestFrameLocationAtAddr(rt, ptr, script, pc);
+ case Baseline:
+ return baselineEntry().youngestFrameLocationAtAddr(rt, ptr, script, pc);
+ case IonCache:
+ return ionCacheEntry().youngestFrameLocationAtAddr(rt, ptr, script, pc);
+ case Dummy:
+ return dummyEntry().youngestFrameLocationAtAddr(rt, ptr, script, pc);
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ }
+
+ // Figure out the number of the (JSScript*, jsbytecode*) pairs that are active
+ // at this location.
+ uint32_t lookupInlineCallDepth(void* ptr);
+
+ // Compare two global entries.
+ static int compare(const JitcodeGlobalEntry& ent1, const JitcodeGlobalEntry& ent2);
+ int compareTo(const JitcodeGlobalEntry& other) {
+ return compare(*this, other);
+ }
+
+ // Compute a profiling string for a given script.
+ static char* createScriptString(JSContext* cx, JSScript* script, size_t* length=nullptr);
+
+ bool hasTrackedOptimizations() const {
+ switch (kind()) {
+ case Ion:
+ return ionEntry().hasTrackedOptimizations();
+ case IonCache:
+ return ionCacheEntry().hasTrackedOptimizations();
+ case Baseline:
+ case Dummy:
+ break;
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ return false;
+ }
+
+ mozilla::Maybe<uint8_t> trackedOptimizationIndexAtAddr(
+ JSRuntime *rt,
+ void* addr,
+ uint32_t* entryOffsetOut)
+ {
+ switch (kind()) {
+ case Ion:
+ return ionEntry().trackedOptimizationIndexAtAddr(rt, addr, entryOffsetOut);
+ case IonCache:
+ return ionCacheEntry().trackedOptimizationIndexAtAddr(rt, addr, entryOffsetOut);
+ case Baseline:
+ case Dummy:
+ break;
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ return mozilla::Nothing();
+ }
+
+ void forEachOptimizationAttempt(JSRuntime* rt, uint8_t index,
+ JS::ForEachTrackedOptimizationAttemptOp& op)
+ {
+ switch (kind()) {
+ case Ion:
+ ionEntry().forEachOptimizationAttempt(rt, index, op);
+ return;
+ case IonCache:
+ ionCacheEntry().forEachOptimizationAttempt(rt, index, op);
+ return;
+ case Baseline:
+ case Dummy:
+ break;
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ }
+
+ void forEachOptimizationTypeInfo(JSRuntime* rt, uint8_t index,
+ IonTrackedOptimizationsTypeInfo::ForEachOpAdapter& op)
+ {
+ switch (kind()) {
+ case Ion:
+ ionEntry().forEachOptimizationTypeInfo(rt, index, op);
+ return;
+ case IonCache:
+ ionCacheEntry().forEachOptimizationTypeInfo(rt, index, op);
+ return;
+ case Baseline:
+ case Dummy:
+ break;
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ }
+
+ IonTrackedOptimizationsAttempts trackedOptimizationAttempts(uint8_t index) {
+ return ionEntry().trackedOptimizationAttempts(index);
+ }
+
+ IonTrackedOptimizationsTypeInfo trackedOptimizationTypeInfo(uint8_t index) {
+ return ionEntry().trackedOptimizationTypeInfo(index);
+ }
+
+ const IonTrackedTypeVector* allTrackedTypes() {
+ return ionEntry().allTrackedTypes();
+ }
+
+ Zone* zone() {
+ return baseEntry().jitcode()->zone();
+ }
+
+ template <class ShouldMarkProvider>
+ bool mark(JSTracer* trc) {
+ bool markedAny = baseEntry().markJitcode<ShouldMarkProvider>(trc);
+ switch (kind()) {
+ case Ion:
+ markedAny |= ionEntry().mark<ShouldMarkProvider>(trc);
+ break;
+ case Baseline:
+ markedAny |= baselineEntry().mark<ShouldMarkProvider>(trc);
+ break;
+ case IonCache:
+ markedAny |= ionCacheEntry().mark<ShouldMarkProvider>(trc);
+ break;
+ case Dummy:
+ break;
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ return markedAny;
+ }
+
+ void sweepChildren(JSRuntime* rt) {
+ switch (kind()) {
+ case Ion:
+ ionEntry().sweepChildren();
+ break;
+ case Baseline:
+ baselineEntry().sweepChildren();
+ break;
+ case IonCache:
+ ionCacheEntry().sweepChildren(rt);
+ break;
+ case Dummy:
+ break;
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ }
+
+ bool isMarkedFromAnyThread(JSRuntime* rt) {
+ if (!baseEntry().isJitcodeMarkedFromAnyThread(rt))
+ return false;
+ switch (kind()) {
+ case Ion:
+ return ionEntry().isMarkedFromAnyThread(rt);
+ case Baseline:
+ return baselineEntry().isMarkedFromAnyThread(rt);
+ case IonCache:
+ return ionCacheEntry().isMarkedFromAnyThread(rt);
+ case Dummy:
+ break;
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ return true;
+ }
+
+ //
+ // When stored in a free-list, entries use 'tower_' to store a
+ // pointer to the next entry. In this context only, 'tower_'
+ // may refer to a |JitcodeGlobalEntry*| instead of a
+ // |JitcodeSkiplistTower*|.
+ //
+
+ void addToFreeList(JitcodeGlobalEntry** freeList) {
+ MOZ_ASSERT(!isValid());
+
+ JitcodeGlobalEntry* nextFreeEntry = *freeList;
+ MOZ_ASSERT_IF(nextFreeEntry, !nextFreeEntry->isValid());
+
+ tower_ = (JitcodeSkiplistTower*) nextFreeEntry;
+ *freeList = this;
+ }
+
+ static JitcodeGlobalEntry* PopFromFreeList(JitcodeGlobalEntry** freeList) {
+ if (!*freeList)
+ return nullptr;
+
+ JitcodeGlobalEntry* entry = *freeList;
+ MOZ_ASSERT(!entry->isValid());
+ JitcodeGlobalEntry* nextFreeEntry = (JitcodeGlobalEntry*) entry->tower_;
+ entry->tower_ = nullptr;
+ *freeList = nextFreeEntry;
+ return entry;
+ }
+};
+
+/*
+ * Global table of JitcodeGlobalEntry values sorted by native address range.
+ */
+class JitcodeGlobalTable
+{
+ private:
+ static const size_t LIFO_CHUNK_SIZE = 16 * 1024;
+
+ LifoAlloc alloc_;
+ JitcodeGlobalEntry* freeEntries_;
+ uint32_t rand_;
+ uint32_t skiplistSize_;
+
+ JitcodeGlobalEntry* startTower_[JitcodeSkiplistTower::MAX_HEIGHT];
+ JitcodeSkiplistTower* freeTowers_[JitcodeSkiplistTower::MAX_HEIGHT];
+
+ public:
+ JitcodeGlobalTable()
+ : alloc_(LIFO_CHUNK_SIZE), freeEntries_(nullptr), rand_(0), skiplistSize_(0)
+ {
+ for (unsigned i = 0; i < JitcodeSkiplistTower::MAX_HEIGHT; i++)
+ startTower_[i] = nullptr;
+ for (unsigned i = 0; i < JitcodeSkiplistTower::MAX_HEIGHT; i++)
+ freeTowers_[i] = nullptr;
+ }
+ ~JitcodeGlobalTable() {}
+
+ bool empty() const {
+ return skiplistSize_ == 0;
+ }
+
+ const JitcodeGlobalEntry* lookup(void* ptr) {
+ return lookupInternal(ptr);
+ }
+
+ JitcodeGlobalEntry& lookupInfallible(void* ptr) {
+ JitcodeGlobalEntry* entry = lookupInternal(ptr);
+ MOZ_ASSERT(entry);
+ return *entry;
+ }
+
+ const JitcodeGlobalEntry& lookupForSamplerInfallible(void* ptr, JSRuntime* rt,
+ uint32_t sampleBufferGen);
+
+ MOZ_MUST_USE bool addEntry(const JitcodeGlobalEntry::IonEntry& entry, JSRuntime* rt) {
+ return addEntry(JitcodeGlobalEntry(entry), rt);
+ }
+ MOZ_MUST_USE bool addEntry(const JitcodeGlobalEntry::BaselineEntry& entry, JSRuntime* rt) {
+ return addEntry(JitcodeGlobalEntry(entry), rt);
+ }
+ MOZ_MUST_USE bool addEntry(const JitcodeGlobalEntry::IonCacheEntry& entry, JSRuntime* rt) {
+ return addEntry(JitcodeGlobalEntry(entry), rt);
+ }
+ MOZ_MUST_USE bool addEntry(const JitcodeGlobalEntry::DummyEntry& entry, JSRuntime* rt) {
+ return addEntry(JitcodeGlobalEntry(entry), rt);
+ }
+
+ void removeEntry(JitcodeGlobalEntry& entry, JitcodeGlobalEntry** prevTower, JSRuntime* rt);
+ void releaseEntry(JitcodeGlobalEntry& entry, JitcodeGlobalEntry** prevTower, JSRuntime* rt);
+
+ void setAllEntriesAsExpired(JSRuntime* rt);
+ void markUnconditionally(JSTracer* trc);
+ MOZ_MUST_USE bool markIteratively(JSTracer* trc);
+ void sweep(JSRuntime* rt);
+
+ private:
+ MOZ_MUST_USE bool addEntry(const JitcodeGlobalEntry& entry, JSRuntime* rt);
+
+ JitcodeGlobalEntry* lookupInternal(void* ptr);
+
+ // Initialize towerOut such that towerOut[i] (for i in [0, MAX_HEIGHT-1])
+ // is a JitcodeGlobalEntry that is sorted to be <query, whose successor at
+ // level i is either null, or sorted to be >= query.
+ //
+ // If entry with the given properties does not exist for level i, then
+ // towerOut[i] is initialized to nullptr.
+ void searchInternal(const JitcodeGlobalEntry& query, JitcodeGlobalEntry** towerOut);
+
+ JitcodeGlobalEntry* searchAtHeight(unsigned level, JitcodeGlobalEntry* start,
+ const JitcodeGlobalEntry& query);
+
+ // Calculate next random tower height.
+ unsigned generateTowerHeight();
+
+ JitcodeSkiplistTower* allocateTower(unsigned height);
+ JitcodeGlobalEntry* allocateEntry();
+
+#ifdef DEBUG
+ void verifySkiplist();
+#else
+ void verifySkiplist() {}
+#endif
+
+ public:
+ class Range
+ {
+ protected:
+ JitcodeGlobalTable& table_;
+ JitcodeGlobalEntry* cur_;
+
+ public:
+ explicit Range(JitcodeGlobalTable& table)
+ : table_(table),
+ cur_(table.startTower_[0])
+ { }
+
+ JitcodeGlobalEntry* front() const {
+ MOZ_ASSERT(!empty());
+ return cur_;
+ }
+
+ bool empty() const {
+ return !cur_;
+ }
+
+ void popFront() {
+ MOZ_ASSERT(!empty());
+ cur_ = cur_->tower_->next(0);
+ }
+ };
+
+ // An enumerator class that can remove entries as it enumerates. If this
+ // functionality is not needed, use Range instead.
+ class Enum : public Range
+ {
+ JSRuntime* rt_;
+ JitcodeGlobalEntry* next_;
+ JitcodeGlobalEntry* prevTower_[JitcodeSkiplistTower::MAX_HEIGHT];
+
+ public:
+ Enum(JitcodeGlobalTable& table, JSRuntime* rt);
+
+ void popFront();
+ void removeFront();
+ };
+};
+
+
+/*
+ * Container class for main jitcode table.
+ * The Region table's memory is structured as follows:
+ *
+ * +------------------------------------------------+ |
+ * | Region 1 Run | |
+ * |------------------------------------------------| |
+ * | Region 2 Run | |
+ * | | |
+ * | | |
+ * |------------------------------------------------| |
+ * | Region 3 Run | |
+ * | | |
+ * |------------------------------------------------| |-- Payload
+ * | | |
+ * | ... | |
+ * | | |
+ * |------------------------------------------------| |
+ * | Region M Run | |
+ * | | |
+ * +================================================+ <- RegionTable pointer points here
+ * | uint23_t numRegions = M | |
+ * +------------------------------------------------+ |
+ * | Region 1 | |
+ * | uint32_t entryOffset = size(Payload) | |
+ * +------------------------------------------------+ |
+ * | | |-- Table
+ * | ... | |
+ * | | |
+ * +------------------------------------------------+ |
+ * | Region M | |
+ * | uint32_t entryOffset | |
+ * +------------------------------------------------+ |
+ *
+ * The region table is composed of two sections: a tail section that contains a table of
+ * fixed-size entries containing offsets into the the head section, and a head section that
+ * holds a sequence of variable-sized runs. The table in the tail section serves to
+ * locate the variable-length encoded structures in the head section.
+ *
+ * The entryOffsets in the table indicate the bytes offset to subtract from the regionTable
+ * pointer to arrive at the encoded region in the payload.
+ *
+ *
+ * Variable-length entries in payload
+ * ----------------------------------
+ * The entryOffsets in the region table's fixed-sized entries refer to a location within the
+ * variable-length payload section. This location contains a compactly encoded "run" of
+ * mappings.
+ *
+ * Each run starts by describing the offset within the native code it starts at, and the
+ * sequence of (JSScript*, jsbytecode*) pairs active at that site. Following that, there
+ * are a number of variable-length entries encoding (nativeOffsetDelta, bytecodeOffsetDelta)
+ * pairs for the run.
+ *
+ * VarUint32 nativeOffset;
+ * - The offset from nativeStartAddr in the global table entry at which
+ * the jitcode for this region starts.
+ *
+ * Uint8_t scriptDepth;
+ * - The depth of inlined scripts for this region.
+ *
+ * List<VarUint32> inlineScriptPcStack;
+ * - We encode (2 * scriptDepth) VarUint32s here. Each pair of uint32s are taken
+ * as an index into the scriptList in the global table entry, and a pcOffset
+ * respectively.
+ *
+ * List<NativeAndBytecodeDelta> deltaRun;
+ * - The rest of the entry is a deltaRun that stores a series of variable-length
+ * encoded NativeAndBytecodeDelta datums.
+ */
+class JitcodeRegionEntry
+{
+ private:
+ static const unsigned MAX_RUN_LENGTH = 100;
+
+ public:
+ static void WriteHead(CompactBufferWriter& writer,
+ uint32_t nativeOffset, uint8_t scriptDepth);
+ static void ReadHead(CompactBufferReader& reader,
+ uint32_t* nativeOffset, uint8_t* scriptDepth);
+
+ static void WriteScriptPc(CompactBufferWriter& writer, uint32_t scriptIdx, uint32_t pcOffset);
+ static void ReadScriptPc(CompactBufferReader& reader, uint32_t* scriptIdx, uint32_t* pcOffset);
+
+ static void WriteDelta(CompactBufferWriter& writer, uint32_t nativeDelta, int32_t pcDelta);
+ static void ReadDelta(CompactBufferReader& reader, uint32_t* nativeDelta, int32_t* pcDelta);
+
+ // Given a pointer into an array of NativeToBytecode (and a pointer to the end of the array),
+ // compute the number of entries that would be consume by outputting a run starting
+ // at this one.
+ static uint32_t ExpectedRunLength(const CodeGeneratorShared::NativeToBytecode* entry,
+ const CodeGeneratorShared::NativeToBytecode* end);
+
+ // Write a run, starting at the given NativeToBytecode entry, into the given buffer writer.
+ static MOZ_MUST_USE bool WriteRun(CompactBufferWriter& writer, JSScript** scriptList,
+ uint32_t scriptListSize, uint32_t runLength,
+ const CodeGeneratorShared::NativeToBytecode* entry);
+
+ // Delta Run entry formats are encoded little-endian:
+ //
+ // byte 0
+ // NNNN-BBB0
+ // Single byte format. nativeDelta in [0, 15], pcDelta in [0, 7]
+ //
+ static const uint32_t ENC1_MASK = 0x1;
+ static const uint32_t ENC1_MASK_VAL = 0x0;
+
+ static const uint32_t ENC1_NATIVE_DELTA_MAX = 0xf;
+ static const unsigned ENC1_NATIVE_DELTA_SHIFT = 4;
+
+ static const uint32_t ENC1_PC_DELTA_MASK = 0x0e;
+ static const int32_t ENC1_PC_DELTA_MAX = 0x7;
+ static const unsigned ENC1_PC_DELTA_SHIFT = 1;
+
+ // byte 1 byte 0
+ // NNNN-NNNN BBBB-BB01
+ // Two-byte format. nativeDelta in [0, 255], pcDelta in [0, 63]
+ //
+ static const uint32_t ENC2_MASK = 0x3;
+ static const uint32_t ENC2_MASK_VAL = 0x1;
+
+ static const uint32_t ENC2_NATIVE_DELTA_MAX = 0xff;
+ static const unsigned ENC2_NATIVE_DELTA_SHIFT = 8;
+
+ static const uint32_t ENC2_PC_DELTA_MASK = 0x00fc;
+ static const int32_t ENC2_PC_DELTA_MAX = 0x3f;
+ static const unsigned ENC2_PC_DELTA_SHIFT = 2;
+
+ // byte 2 byte 1 byte 0
+ // NNNN-NNNN NNNB-BBBB BBBB-B011
+ // Three-byte format. nativeDelta in [0, 2047], pcDelta in [-512, 511]
+ //
+ static const uint32_t ENC3_MASK = 0x7;
+ static const uint32_t ENC3_MASK_VAL = 0x3;
+
+ static const uint32_t ENC3_NATIVE_DELTA_MAX = 0x7ff;
+ static const unsigned ENC3_NATIVE_DELTA_SHIFT = 13;
+
+ static const uint32_t ENC3_PC_DELTA_MASK = 0x001ff8;
+ static const int32_t ENC3_PC_DELTA_MAX = 0x1ff;
+ static const int32_t ENC3_PC_DELTA_MIN = -ENC3_PC_DELTA_MAX - 1;
+ static const unsigned ENC3_PC_DELTA_SHIFT = 3;
+
+ // byte 3 byte 2 byte 1 byte 0
+ // NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
+ // Three-byte format. nativeDelta in [0, 65535], pcDelta in [-4096, 4095]
+ static const uint32_t ENC4_MASK = 0x7;
+ static const uint32_t ENC4_MASK_VAL = 0x7;
+
+ static const uint32_t ENC4_NATIVE_DELTA_MAX = 0xffff;
+ static const unsigned ENC4_NATIVE_DELTA_SHIFT = 16;
+
+ static const uint32_t ENC4_PC_DELTA_MASK = 0x0000fff8;
+ static const int32_t ENC4_PC_DELTA_MAX = 0xfff;
+ static const int32_t ENC4_PC_DELTA_MIN = -ENC4_PC_DELTA_MAX - 1;
+ static const unsigned ENC4_PC_DELTA_SHIFT = 3;
+
+ static bool IsDeltaEncodeable(uint32_t nativeDelta, int32_t pcDelta) {
+ return (nativeDelta <= ENC4_NATIVE_DELTA_MAX) &&
+ (pcDelta >= ENC4_PC_DELTA_MIN) && (pcDelta <= ENC4_PC_DELTA_MAX);
+ }
+
+ private:
+ const uint8_t* data_;
+ const uint8_t* end_;
+
+ // Unpacked state from jitcode entry.
+ uint32_t nativeOffset_;
+ uint8_t scriptDepth_;
+ const uint8_t* scriptPcStack_;
+ const uint8_t* deltaRun_;
+
+ void unpack();
+
+ public:
+ JitcodeRegionEntry(const uint8_t* data, const uint8_t* end)
+ : data_(data), end_(end),
+ nativeOffset_(0), scriptDepth_(0),
+ scriptPcStack_(nullptr), deltaRun_(nullptr)
+ {
+ MOZ_ASSERT(data_ < end_);
+ unpack();
+ MOZ_ASSERT(scriptPcStack_ < end_);
+ MOZ_ASSERT(deltaRun_ <= end_);
+ }
+
+ uint32_t nativeOffset() const {
+ return nativeOffset_;
+ }
+ uint32_t scriptDepth() const {
+ return scriptDepth_;
+ }
+
+ class ScriptPcIterator
+ {
+ private:
+ uint32_t count_;
+ const uint8_t* start_;
+ const uint8_t* end_;
+
+ uint32_t idx_;
+ const uint8_t* cur_;
+
+ public:
+ ScriptPcIterator(uint32_t count, const uint8_t* start, const uint8_t* end)
+ : count_(count), start_(start), end_(end), idx_(0), cur_(start_)
+ {}
+
+ bool hasMore() const
+ {
+ MOZ_ASSERT((idx_ == count_) == (cur_ == end_));
+ MOZ_ASSERT((idx_ < count_) == (cur_ < end_));
+ return cur_ < end_;
+ }
+
+ void readNext(uint32_t* scriptIdxOut, uint32_t* pcOffsetOut)
+ {
+ MOZ_ASSERT(scriptIdxOut);
+ MOZ_ASSERT(pcOffsetOut);
+ MOZ_ASSERT(hasMore());
+
+ CompactBufferReader reader(cur_, end_);
+ ReadScriptPc(reader, scriptIdxOut, pcOffsetOut);
+
+ cur_ = reader.currentPosition();
+ MOZ_ASSERT(cur_ <= end_);
+
+ idx_++;
+ MOZ_ASSERT_IF(idx_ == count_, cur_ == end_);
+ }
+
+ void reset() {
+ idx_ = 0;
+ cur_ = start_;
+ }
+ };
+
+ ScriptPcIterator scriptPcIterator() const {
+ // End of script+pc sequence is the start of the delta run.
+ return ScriptPcIterator(scriptDepth_, scriptPcStack_, deltaRun_);
+ }
+
+ class DeltaIterator {
+ private:
+ const uint8_t* start_;
+ const uint8_t* end_;
+ const uint8_t* cur_;
+
+ public:
+ DeltaIterator(const uint8_t* start, const uint8_t* end)
+ : start_(start), end_(end), cur_(start)
+ {}
+
+ bool hasMore() const
+ {
+ MOZ_ASSERT(cur_ <= end_);
+ return cur_ < end_;
+ }
+
+ void readNext(uint32_t* nativeDeltaOut, int32_t* pcDeltaOut)
+ {
+ MOZ_ASSERT(nativeDeltaOut != nullptr);
+ MOZ_ASSERT(pcDeltaOut != nullptr);
+
+ MOZ_ASSERT(hasMore());
+
+ CompactBufferReader reader(cur_, end_);
+ ReadDelta(reader, nativeDeltaOut, pcDeltaOut);
+
+ cur_ = reader.currentPosition();
+ MOZ_ASSERT(cur_ <= end_);
+ }
+
+ void reset() {
+ cur_ = start_;
+ }
+ };
+ DeltaIterator deltaIterator() const {
+ return DeltaIterator(deltaRun_, end_);
+ }
+
+ uint32_t findPcOffset(uint32_t queryNativeOffset, uint32_t startPcOffset) const;
+};
+
+class JitcodeIonTable
+{
+ private:
+ /* Variable length payload section "below" here. */
+ uint32_t numRegions_;
+ uint32_t regionOffsets_[1];
+
+ const uint8_t* payloadEnd() const {
+ return reinterpret_cast<const uint8_t*>(this);
+ }
+
+ public:
+ explicit JitcodeIonTable(uint32_t numRegions)
+ : numRegions_(numRegions)
+ {
+ for (uint32_t i = 0; i < numRegions; i++)
+ regionOffsets_[i] = 0;
+ }
+
+ MOZ_MUST_USE bool makeIonEntry(JSContext* cx, JitCode* code, uint32_t numScripts,
+ JSScript** scripts, JitcodeGlobalEntry::IonEntry& out);
+
+ uint32_t numRegions() const {
+ return numRegions_;
+ }
+
+ uint32_t regionOffset(uint32_t regionIndex) const {
+ MOZ_ASSERT(regionIndex < numRegions());
+ return regionOffsets_[regionIndex];
+ }
+
+ JitcodeRegionEntry regionEntry(uint32_t regionIndex) const {
+ const uint8_t* regionStart = payloadEnd() - regionOffset(regionIndex);
+ const uint8_t* regionEnd = payloadEnd();
+ if (regionIndex < numRegions_ - 1)
+ regionEnd -= regionOffset(regionIndex + 1);
+ return JitcodeRegionEntry(regionStart, regionEnd);
+ }
+
+ bool regionContainsOffset(uint32_t regionIndex, uint32_t nativeOffset) {
+ MOZ_ASSERT(regionIndex < numRegions());
+
+ JitcodeRegionEntry ent = regionEntry(regionIndex);
+ if (nativeOffset < ent.nativeOffset())
+ return false;
+
+ if (regionIndex == numRegions_ - 1)
+ return true;
+
+ return nativeOffset < regionEntry(regionIndex + 1).nativeOffset();
+ }
+
+ uint32_t findRegionEntry(uint32_t offset) const;
+
+ const uint8_t* payloadStart() const {
+ // The beginning of the payload the beginning of the first region are the same.
+ return payloadEnd() - regionOffset(0);
+ }
+
+ static MOZ_MUST_USE bool WriteIonTable(CompactBufferWriter& writer,
+ JSScript** scriptList, uint32_t scriptListSize,
+ const CodeGeneratorShared::NativeToBytecode* start,
+ const CodeGeneratorShared::NativeToBytecode* end,
+ uint32_t* tableOffsetOut, uint32_t* numRegionsOut);
+};
+
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JitcodeMap_h */
diff --git a/js/src/jit/LICM.cpp b/js/src/jit/LICM.cpp
new file mode 100644
index 000000000..d661a1c7d
--- /dev/null
+++ b/js/src/jit/LICM.cpp
@@ -0,0 +1,272 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/LICM.h"
+
+#include "jit/IonAnalysis.h"
+#include "jit/JitSpewer.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+
+using namespace js;
+using namespace js::jit;
+
+// Test whether any instruction in the loop possiblyCalls().
+static bool
+LoopContainsPossibleCall(MIRGraph& graph, MBasicBlock* header, MBasicBlock* backedge)
+{
+ for (auto i(graph.rpoBegin(header)); ; ++i) {
+ MOZ_ASSERT(i != graph.rpoEnd(), "Reached end of graph searching for blocks in loop");
+ MBasicBlock* block = *i;
+ if (!block->isMarked())
+ continue;
+
+ for (auto insIter(block->begin()), insEnd(block->end()); insIter != insEnd; ++insIter) {
+ MInstruction* ins = *insIter;
+ if (ins->possiblyCalls()) {
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_LICM, " Possile call found at %s%u", ins->opName(), ins->id());
+#endif
+ return true;
+ }
+ }
+
+ if (block == backedge)
+ break;
+ }
+ return false;
+}
+
+// When a nested loop has no exits back into what would be its parent loop,
+// MarkLoopBlocks on the parent loop doesn't mark the blocks of the nested
+// loop, since they technically aren't part of the loop. However, AliasAnalysis
+// currently does consider such nested loops to be part of their parent
+// loops. Consequently, we can't use IsInLoop on dependency() values; we must
+// test whether a dependency() is *before* the loop, even if it is not
+// technically in the loop.
+static bool
+IsBeforeLoop(MDefinition* ins, MBasicBlock* header)
+{
+ return ins->block()->id() < header->id();
+}
+
+// Test whether the given instruction is inside the loop (and thus not
+// loop-invariant).
+static bool
+IsInLoop(MDefinition* ins)
+{
+ return ins->block()->isMarked();
+}
+
+// Test whether the given instruction is cheap and not worth hoisting unless
+// one of its users will be hoisted as well.
+static bool
+RequiresHoistedUse(const MDefinition* ins, bool hasCalls)
+{
+ if (ins->isConstantElements())
+ return true;
+
+ if (ins->isBox()) {
+ MOZ_ASSERT(!ins->toBox()->input()->isBox(),
+ "Box of a box could lead to unbounded recursion");
+ return true;
+ }
+
+ // Integer constants are usually cheap and aren't worth hoisting on their
+ // own, in general. Floating-point constants typically are worth hoisting,
+ // unless they'll end up being spilled (eg. due to a call).
+ if (ins->isConstant() && (!IsFloatingPointType(ins->type()) || hasCalls))
+ return true;
+
+ return false;
+}
+
+// Test whether the given instruction has any operands defined within the loop.
+static bool
+HasOperandInLoop(MInstruction* ins, bool hasCalls)
+{
+ // An instruction is only loop invariant if it and all of its operands can
+ // be safely hoisted into the loop preheader.
+ for (size_t i = 0, e = ins->numOperands(); i != e; ++i) {
+ MDefinition* op = ins->getOperand(i);
+
+ if (!IsInLoop(op))
+ continue;
+
+ if (RequiresHoistedUse(op, hasCalls)) {
+ // Recursively test for loop invariance. Note that the recursion is
+ // bounded because we require RequiresHoistedUse to be set at each
+ // level.
+ if (!HasOperandInLoop(op->toInstruction(), hasCalls))
+ continue;
+ }
+
+ return true;
+ }
+ return false;
+}
+
+// Test whether the given instruction is hoistable, ignoring memory
+// dependencies.
+static bool
+IsHoistableIgnoringDependency(MInstruction* ins, bool hasCalls)
+{
+ return ins->isMovable() && !ins->isEffectful() && !ins->neverHoist() &&
+ !HasOperandInLoop(ins, hasCalls);
+}
+
+// Test whether the given instruction has a memory dependency inside the loop.
+static bool
+HasDependencyInLoop(MInstruction* ins, MBasicBlock* header)
+{
+ // Don't hoist if this instruction depends on a store inside the loop.
+ if (MDefinition* dep = ins->dependency())
+ return !IsBeforeLoop(dep, header);
+ return false;
+}
+
+// Test whether the given instruction is hoistable.
+static bool
+IsHoistable(MInstruction* ins, MBasicBlock* header, bool hasCalls)
+{
+ return IsHoistableIgnoringDependency(ins, hasCalls) && !HasDependencyInLoop(ins, header);
+}
+
+// In preparation for hoisting an instruction, hoist any of its operands which
+// were too cheap to hoist on their own.
+static void
+MoveDeferredOperands(MInstruction* ins, MInstruction* hoistPoint, bool hasCalls)
+{
+ // If any of our operands were waiting for a user to be hoisted, make a note
+ // to hoist them.
+ for (size_t i = 0, e = ins->numOperands(); i != e; ++i) {
+ MDefinition* op = ins->getOperand(i);
+ if (!IsInLoop(op))
+ continue;
+ MOZ_ASSERT(RequiresHoistedUse(op, hasCalls),
+ "Deferred loop-invariant operand is not cheap");
+ MInstruction* opIns = op->toInstruction();
+
+ // Recursively move the operands. Note that the recursion is bounded
+ // because we require RequiresHoistedUse to be set at each level.
+ MoveDeferredOperands(opIns, hoistPoint, hasCalls);
+
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_LICM, " Hoisting %s%u (now that a user will be hoisted)",
+ opIns->opName(), opIns->id());
+#endif
+
+ opIns->block()->moveBefore(hoistPoint, opIns);
+ }
+}
+
+static void
+VisitLoopBlock(MBasicBlock* block, MBasicBlock* header, MInstruction* hoistPoint, bool hasCalls)
+{
+ for (auto insIter(block->begin()), insEnd(block->end()); insIter != insEnd; ) {
+ MInstruction* ins = *insIter++;
+
+ if (!IsHoistable(ins, header, hasCalls)) {
+#ifdef JS_JITSPEW
+ if (IsHoistableIgnoringDependency(ins, hasCalls)) {
+ JitSpew(JitSpew_LICM, " %s%u isn't hoistable due to dependency on %s%u",
+ ins->opName(), ins->id(),
+ ins->dependency()->opName(), ins->dependency()->id());
+ }
+#endif
+ continue;
+ }
+
+ // Don't hoist a cheap constant if it doesn't enable us to hoist one of
+ // its uses. We want those instructions as close as possible to their
+ // use, to minimize register pressure.
+ if (RequiresHoistedUse(ins, hasCalls)) {
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_LICM, " %s%u will be hoisted only if its users are",
+ ins->opName(), ins->id());
+#endif
+ continue;
+ }
+
+ // Hoist operands which were too cheap to hoist on their own.
+ MoveDeferredOperands(ins, hoistPoint, hasCalls);
+
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_LICM, " Hoisting %s%u", ins->opName(), ins->id());
+#endif
+
+ // Move the instruction to the hoistPoint.
+ block->moveBefore(hoistPoint, ins);
+ }
+}
+
+static void
+VisitLoop(MIRGraph& graph, MBasicBlock* header)
+{
+ MInstruction* hoistPoint = header->loopPredecessor()->lastIns();
+
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_LICM, " Visiting loop with header block%u, hoisting to %s%u",
+ header->id(), hoistPoint->opName(), hoistPoint->id());
+#endif
+
+ MBasicBlock* backedge = header->backedge();
+
+ // This indicates whether the loop contains calls or other things which
+ // clobber most or all floating-point registers. In such loops,
+ // floating-point constants should not be hoisted unless it enables further
+ // hoisting.
+ bool hasCalls = LoopContainsPossibleCall(graph, header, backedge);
+
+ for (auto i(graph.rpoBegin(header)); ; ++i) {
+ MOZ_ASSERT(i != graph.rpoEnd(), "Reached end of graph searching for blocks in loop");
+ MBasicBlock* block = *i;
+ if (!block->isMarked())
+ continue;
+
+ VisitLoopBlock(block, header, hoistPoint, hasCalls);
+
+ if (block == backedge)
+ break;
+ }
+}
+
+bool
+jit::LICM(MIRGenerator* mir, MIRGraph& graph)
+{
+ JitSpew(JitSpew_LICM, "Beginning LICM pass");
+
+ // Iterate in RPO to visit outer loops before inner loops. We'd hoist the
+ // same things either way, but outer first means we do a little less work.
+ for (auto i(graph.rpoBegin()), e(graph.rpoEnd()); i != e; ++i) {
+ MBasicBlock* header = *i;
+ if (!header->isLoopHeader())
+ continue;
+
+ bool canOsr;
+ size_t numBlocks = MarkLoopBlocks(graph, header, &canOsr);
+
+ if (numBlocks == 0) {
+ JitSpew(JitSpew_LICM, " Loop with header block%u isn't actually a loop", header->id());
+ continue;
+ }
+
+ // Hoisting out of a loop that has an entry from the OSR block in
+ // addition to its normal entry is tricky. In theory we could clone
+ // the instruction and insert phis.
+ if (!canOsr)
+ VisitLoop(graph, header);
+ else
+ JitSpew(JitSpew_LICM, " Skipping loop with header block%u due to OSR", header->id());
+
+ UnmarkLoopBlocks(graph, header);
+
+ if (mir->shouldCancel("LICM (main loop)"))
+ return false;
+ }
+
+ return true;
+}
diff --git a/js/src/jit/LICM.h b/js/src/jit/LICM.h
new file mode 100644
index 000000000..5a1d84a4e
--- /dev/null
+++ b/js/src/jit/LICM.h
@@ -0,0 +1,25 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_LICM_h
+#define jit_LICM_h
+
+// This file represents the Loop Invariant Code Motion optimization pass
+
+#include "mozilla/Attributes.h"
+
+namespace js {
+namespace jit {
+
+class MIRGenerator;
+class MIRGraph;
+
+MOZ_MUST_USE bool LICM(MIRGenerator* mir, MIRGraph& graph);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_LICM_h */
diff --git a/js/src/jit/LIR.cpp b/js/src/jit/LIR.cpp
new file mode 100644
index 000000000..c83841809
--- /dev/null
+++ b/js/src/jit/LIR.cpp
@@ -0,0 +1,621 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/LIR.h"
+
+#include <ctype.h>
+
+#include "jsprf.h"
+
+#include "jit/JitSpewer.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+
+using namespace js;
+using namespace js::jit;
+
+LIRGraph::LIRGraph(MIRGraph* mir)
+ : blocks_(),
+ constantPool_(mir->alloc()),
+ constantPoolMap_(mir->alloc()),
+ safepoints_(mir->alloc()),
+ nonCallSafepoints_(mir->alloc()),
+ numVirtualRegisters_(0),
+ numInstructions_(1), // First id is 1.
+ localSlotCount_(0),
+ argumentSlotCount_(0),
+ entrySnapshot_(nullptr),
+ mir_(*mir)
+{
+}
+
+bool
+LIRGraph::addConstantToPool(const Value& v, uint32_t* index)
+{
+ MOZ_ASSERT(constantPoolMap_.initialized());
+
+ ConstantPoolMap::AddPtr p = constantPoolMap_.lookupForAdd(v);
+ if (p) {
+ *index = p->value();
+ return true;
+ }
+ *index = constantPool_.length();
+ return constantPool_.append(v) && constantPoolMap_.add(p, v, *index);
+}
+
+bool
+LIRGraph::noteNeedsSafepoint(LInstruction* ins)
+{
+ // Instructions with safepoints must be in linear order.
+ MOZ_ASSERT_IF(!safepoints_.empty(), safepoints_.back()->id() < ins->id());
+ if (!ins->isCall() && !nonCallSafepoints_.append(ins))
+ return false;
+ return safepoints_.append(ins);
+}
+
+void
+LIRGraph::dump(GenericPrinter& out)
+{
+ for (size_t i = 0; i < numBlocks(); i++) {
+ getBlock(i)->dump(out);
+ out.printf("\n");
+ }
+}
+
+void
+LIRGraph::dump()
+{
+ Fprinter out(stderr);
+ dump(out);
+ out.finish();
+}
+
+LBlock::LBlock(MBasicBlock* from)
+ : block_(from),
+ phis_(),
+ entryMoveGroup_(nullptr),
+ exitMoveGroup_(nullptr)
+{
+ from->assignLir(this);
+}
+
+bool
+LBlock::init(TempAllocator& alloc)
+{
+ // Count the number of LPhis we'll need.
+ size_t numLPhis = 0;
+ for (MPhiIterator i(block_->phisBegin()), e(block_->phisEnd()); i != e; ++i) {
+ MPhi* phi = *i;
+ switch (phi->type()) {
+ case MIRType::Value: numLPhis += BOX_PIECES; break;
+ case MIRType::Int64: numLPhis += INT64_PIECES; break;
+ default: numLPhis += 1; break;
+ }
+ }
+
+ // Allocate space for the LPhis.
+ if (!phis_.init(alloc, numLPhis))
+ return false;
+
+ // For each MIR phi, set up LIR phis as appropriate. We'll fill in their
+ // operands on each incoming edge, and set their definitions at the start of
+ // their defining block.
+ size_t phiIndex = 0;
+ size_t numPreds = block_->numPredecessors();
+ for (MPhiIterator i(block_->phisBegin()), e(block_->phisEnd()); i != e; ++i) {
+ MPhi* phi = *i;
+ MOZ_ASSERT(phi->numOperands() == numPreds);
+
+ int numPhis;
+ switch (phi->type()) {
+ case MIRType::Value: numPhis = BOX_PIECES; break;
+ case MIRType::Int64: numPhis = INT64_PIECES; break;
+ default: numPhis = 1; break;
+ }
+ for (int i = 0; i < numPhis; i++) {
+ LAllocation* inputs = alloc.allocateArray<LAllocation>(numPreds);
+ if (!inputs)
+ return false;
+
+ void* addr = &phis_[phiIndex++];
+ LPhi* lphi = new (addr) LPhi(phi, inputs);
+ lphi->setBlock(this);
+ }
+ }
+ return true;
+}
+
+const LInstruction*
+LBlock::firstInstructionWithId() const
+{
+ for (LInstructionIterator i(instructions_.begin()); i != instructions_.end(); ++i) {
+ if (i->id())
+ return *i;
+ }
+ return 0;
+}
+
+LMoveGroup*
+LBlock::getEntryMoveGroup(TempAllocator& alloc)
+{
+ if (entryMoveGroup_)
+ return entryMoveGroup_;
+ entryMoveGroup_ = LMoveGroup::New(alloc);
+ insertBefore(*begin(), entryMoveGroup_);
+ return entryMoveGroup_;
+}
+
+LMoveGroup*
+LBlock::getExitMoveGroup(TempAllocator& alloc)
+{
+ if (exitMoveGroup_)
+ return exitMoveGroup_;
+ exitMoveGroup_ = LMoveGroup::New(alloc);
+ insertBefore(*rbegin(), exitMoveGroup_);
+ return exitMoveGroup_;
+}
+
+void
+LBlock::dump(GenericPrinter& out)
+{
+ out.printf("block%u:\n", mir()->id());
+ for (size_t i = 0; i < numPhis(); ++i) {
+ getPhi(i)->dump(out);
+ out.printf("\n");
+ }
+ for (LInstructionIterator iter = begin(); iter != end(); iter++) {
+ iter->dump(out);
+ out.printf("\n");
+ }
+}
+
+void
+LBlock::dump()
+{
+ Fprinter out(stderr);
+ dump(out);
+ out.finish();
+}
+
+static size_t
+TotalOperandCount(LRecoverInfo* recoverInfo)
+{
+ size_t accum = 0;
+ for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
+ if (!it->isRecoveredOnBailout())
+ accum++;
+ }
+ return accum;
+}
+
+LRecoverInfo::LRecoverInfo(TempAllocator& alloc)
+ : instructions_(alloc),
+ recoverOffset_(INVALID_RECOVER_OFFSET)
+{ }
+
+LRecoverInfo*
+LRecoverInfo::New(MIRGenerator* gen, MResumePoint* mir)
+{
+ LRecoverInfo* recoverInfo = new(gen->alloc()) LRecoverInfo(gen->alloc());
+ if (!recoverInfo || !recoverInfo->init(mir))
+ return nullptr;
+
+ JitSpew(JitSpew_IonSnapshots, "Generating LIR recover info %p from MIR (%p)",
+ (void*)recoverInfo, (void*)mir);
+
+ return recoverInfo;
+}
+
+bool
+LRecoverInfo::appendOperands(MNode* ins)
+{
+ for (size_t i = 0, end = ins->numOperands(); i < end; i++) {
+ MDefinition* def = ins->getOperand(i);
+
+ // As there is no cycle in the data-flow (without MPhi), checking for
+ // isInWorkList implies that the definition is already in the
+ // instruction vector, and not processed by a caller of the current
+ // function.
+ if (def->isRecoveredOnBailout() && !def->isInWorklist()) {
+ if (!appendDefinition(def))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool
+LRecoverInfo::appendDefinition(MDefinition* def)
+{
+ MOZ_ASSERT(def->isRecoveredOnBailout());
+ def->setInWorklist();
+
+ if (!appendOperands(def))
+ return false;
+ return instructions_.append(def);
+}
+
+bool
+LRecoverInfo::appendResumePoint(MResumePoint* rp)
+{
+ // Stores should be recovered first.
+ for (auto iter(rp->storesBegin()), end(rp->storesEnd()); iter != end; ++iter) {
+ if (!appendDefinition(iter->operand))
+ return false;
+ }
+
+ if (rp->caller() && !appendResumePoint(rp->caller()))
+ return false;
+
+ if (!appendOperands(rp))
+ return false;
+
+ return instructions_.append(rp);
+}
+
+bool
+LRecoverInfo::init(MResumePoint* rp)
+{
+ // Sort operations in the order in which we need to restore the stack. This
+ // implies that outer frames, as well as operations needed to recover the
+ // current frame, are located before the current frame. The inner-most
+ // resume point should be the last element in the list.
+ if (!appendResumePoint(rp))
+ return false;
+
+ // Remove temporary flags from all definitions.
+ for (MNode** it = begin(); it != end(); it++) {
+ if (!(*it)->isDefinition())
+ continue;
+
+ (*it)->toDefinition()->setNotInWorklist();
+ }
+
+ MOZ_ASSERT(mir() == rp);
+ return true;
+}
+
+LSnapshot::LSnapshot(LRecoverInfo* recoverInfo, BailoutKind kind)
+ : numSlots_(TotalOperandCount(recoverInfo) * BOX_PIECES),
+ slots_(nullptr),
+ recoverInfo_(recoverInfo),
+ snapshotOffset_(INVALID_SNAPSHOT_OFFSET),
+ bailoutId_(INVALID_BAILOUT_ID),
+ bailoutKind_(kind)
+{ }
+
+bool
+LSnapshot::init(MIRGenerator* gen)
+{
+ slots_ = gen->allocate<LAllocation>(numSlots_);
+ return !!slots_;
+}
+
+LSnapshot*
+LSnapshot::New(MIRGenerator* gen, LRecoverInfo* recover, BailoutKind kind)
+{
+ LSnapshot* snapshot = new(gen->alloc()) LSnapshot(recover, kind);
+ if (!snapshot || !snapshot->init(gen))
+ return nullptr;
+
+ JitSpew(JitSpew_IonSnapshots, "Generating LIR snapshot %p from recover (%p)",
+ (void*)snapshot, (void*)recover);
+
+ return snapshot;
+}
+
+void
+LSnapshot::rewriteRecoveredInput(LUse input)
+{
+ // Mark any operands to this snapshot with the same value as input as being
+ // equal to the instruction's result.
+ for (size_t i = 0; i < numEntries(); i++) {
+ if (getEntry(i)->isUse() && getEntry(i)->toUse()->virtualRegister() == input.virtualRegister())
+ setEntry(i, LUse(input.virtualRegister(), LUse::RECOVERED_INPUT));
+ }
+}
+
+void
+LNode::printName(GenericPrinter& out, Opcode op)
+{
+ static const char * const names[] =
+ {
+#define LIROP(x) #x,
+ LIR_OPCODE_LIST(LIROP)
+#undef LIROP
+ };
+ const char* name = names[op];
+ size_t len = strlen(name);
+ for (size_t i = 0; i < len; i++)
+ out.printf("%c", tolower(name[i]));
+}
+
+void
+LNode::printName(GenericPrinter& out)
+{
+ printName(out, op());
+}
+
+bool
+LAllocation::aliases(const LAllocation& other) const
+{
+ if (isFloatReg() && other.isFloatReg())
+ return toFloatReg()->reg().aliases(other.toFloatReg()->reg());
+ return *this == other;
+}
+
+static const char*
+typeName(LDefinition::Type type)
+{
+ switch (type) {
+ case LDefinition::GENERAL: return "g";
+ case LDefinition::INT32: return "i";
+ case LDefinition::OBJECT: return "o";
+ case LDefinition::SLOTS: return "s";
+ case LDefinition::FLOAT32: return "f";
+ case LDefinition::DOUBLE: return "d";
+ case LDefinition::SIMD128INT: return "simd128int";
+ case LDefinition::SIMD128FLOAT: return "simd128float";
+ case LDefinition::SINCOS: return "sincos";
+#ifdef JS_NUNBOX32
+ case LDefinition::TYPE: return "t";
+ case LDefinition::PAYLOAD: return "p";
+#else
+ case LDefinition::BOX: return "x";
+#endif
+ }
+ MOZ_CRASH("Invalid type");
+}
+
+UniqueChars
+LDefinition::toString() const
+{
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+
+ char* buf;
+ if (isBogusTemp()) {
+ buf = JS_smprintf("bogus");
+ } else {
+ buf = JS_smprintf("v%u<%s>", virtualRegister(), typeName(type()));
+ if (buf) {
+ if (policy() == LDefinition::FIXED)
+ buf = JS_sprintf_append(buf, ":%s", output()->toString().get());
+ else if (policy() == LDefinition::MUST_REUSE_INPUT)
+ buf = JS_sprintf_append(buf, ":tied(%u)", getReusedInput());
+ }
+ }
+
+ if (!buf)
+ oomUnsafe.crash("LDefinition::toString()");
+
+ return UniqueChars(buf);
+}
+
+static char*
+PrintUse(const LUse* use)
+{
+ switch (use->policy()) {
+ case LUse::REGISTER:
+ return JS_smprintf("v%d:r", use->virtualRegister());
+ case LUse::FIXED:
+ return JS_smprintf("v%d:%s", use->virtualRegister(),
+ AnyRegister::FromCode(use->registerCode()).name());
+ case LUse::ANY:
+ return JS_smprintf("v%d:r?", use->virtualRegister());
+ case LUse::KEEPALIVE:
+ return JS_smprintf("v%d:*", use->virtualRegister());
+ case LUse::RECOVERED_INPUT:
+ return JS_smprintf("v%d:**", use->virtualRegister());
+ default:
+ MOZ_CRASH("invalid use policy");
+ }
+}
+
+UniqueChars
+LAllocation::toString() const
+{
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+
+ char* buf;
+ if (isBogus()) {
+ buf = JS_smprintf("bogus");
+ } else {
+ switch (kind()) {
+ case LAllocation::CONSTANT_VALUE:
+ case LAllocation::CONSTANT_INDEX:
+ buf = JS_smprintf("c");
+ break;
+ case LAllocation::GPR:
+ buf = JS_smprintf("%s", toGeneralReg()->reg().name());
+ break;
+ case LAllocation::FPU:
+ buf = JS_smprintf("%s", toFloatReg()->reg().name());
+ break;
+ case LAllocation::STACK_SLOT:
+ buf = JS_smprintf("stack:%d", toStackSlot()->slot());
+ break;
+ case LAllocation::ARGUMENT_SLOT:
+ buf = JS_smprintf("arg:%d", toArgument()->index());
+ break;
+ case LAllocation::USE:
+ buf = PrintUse(toUse());
+ break;
+ default:
+ MOZ_CRASH("what?");
+ }
+ }
+
+ if (!buf)
+ oomUnsafe.crash("LAllocation::toString()");
+
+ return UniqueChars(buf);
+}
+
+void
+LAllocation::dump() const
+{
+ fprintf(stderr, "%s\n", toString().get());
+}
+
+void
+LDefinition::dump() const
+{
+ fprintf(stderr, "%s\n", toString().get());
+}
+
+void
+LNode::printOperands(GenericPrinter& out)
+{
+ for (size_t i = 0, e = numOperands(); i < e; i++) {
+ out.printf(" (%s)", getOperand(i)->toString().get());
+ if (i != numOperands() - 1)
+ out.printf(",");
+ }
+}
+
+void
+LInstruction::assignSnapshot(LSnapshot* snapshot)
+{
+ MOZ_ASSERT(!snapshot_);
+ snapshot_ = snapshot;
+
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_IonSnapshots)) {
+ JitSpewHeader(JitSpew_IonSnapshots);
+ Fprinter& out = JitSpewPrinter();
+ out.printf("Assigning snapshot %p to instruction %p (",
+ (void*)snapshot, (void*)this);
+ printName(out);
+ out.printf(")\n");
+ }
+#endif
+}
+
+void
+LNode::dump(GenericPrinter& out)
+{
+ if (numDefs() != 0) {
+ out.printf("{");
+ for (size_t i = 0; i < numDefs(); i++) {
+ out.printf("%s", getDef(i)->toString().get());
+ if (i != numDefs() - 1)
+ out.printf(", ");
+ }
+ out.printf("} <- ");
+ }
+
+ printName(out);
+ printOperands(out);
+
+ if (numTemps()) {
+ out.printf(" t=(");
+ for (size_t i = 0; i < numTemps(); i++) {
+ out.printf("%s", getTemp(i)->toString().get());
+ if (i != numTemps() - 1)
+ out.printf(", ");
+ }
+ out.printf(")");
+ }
+
+ if (numSuccessors()) {
+ out.printf(" s=(");
+ for (size_t i = 0; i < numSuccessors(); i++) {
+ out.printf("block%u", getSuccessor(i)->id());
+ if (i != numSuccessors() - 1)
+ out.printf(", ");
+ }
+ out.printf(")");
+ }
+}
+
+void
+LNode::dump()
+{
+ Fprinter out(stderr);
+ dump(out);
+ out.printf("\n");
+ out.finish();
+}
+
+void
+LInstruction::initSafepoint(TempAllocator& alloc)
+{
+ MOZ_ASSERT(!safepoint_);
+ safepoint_ = new(alloc) LSafepoint(alloc);
+ MOZ_ASSERT(safepoint_);
+}
+
+bool
+LMoveGroup::add(LAllocation from, LAllocation to, LDefinition::Type type)
+{
+#ifdef DEBUG
+ MOZ_ASSERT(from != to);
+ for (size_t i = 0; i < moves_.length(); i++)
+ MOZ_ASSERT(to != moves_[i].to());
+
+ // Check that SIMD moves are aligned according to ABI requirements.
+ if (LDefinition(type).isSimdType()) {
+ MOZ_ASSERT(from.isMemory() || from.isFloatReg());
+ if (from.isMemory()) {
+ if (from.isArgument())
+ MOZ_ASSERT(from.toArgument()->index() % SimdMemoryAlignment == 0);
+ else
+ MOZ_ASSERT(from.toStackSlot()->slot() % SimdMemoryAlignment == 0);
+ }
+ MOZ_ASSERT(to.isMemory() || to.isFloatReg());
+ if (to.isMemory()) {
+ if (to.isArgument())
+ MOZ_ASSERT(to.toArgument()->index() % SimdMemoryAlignment == 0);
+ else
+ MOZ_ASSERT(to.toStackSlot()->slot() % SimdMemoryAlignment == 0);
+ }
+ }
+#endif
+ return moves_.append(LMove(from, to, type));
+}
+
+bool
+LMoveGroup::addAfter(LAllocation from, LAllocation to, LDefinition::Type type)
+{
+ // Transform the operands to this move so that performing the result
+ // simultaneously with existing moves in the group will have the same
+ // effect as if the original move took place after the existing moves.
+
+ for (size_t i = 0; i < moves_.length(); i++) {
+ if (moves_[i].to() == from) {
+ from = moves_[i].from();
+ break;
+ }
+ }
+
+ if (from == to)
+ return true;
+
+ for (size_t i = 0; i < moves_.length(); i++) {
+ if (to == moves_[i].to()) {
+ moves_[i] = LMove(from, to, type);
+ return true;
+ }
+ }
+
+ return add(from, to, type);
+}
+
+void
+LMoveGroup::printOperands(GenericPrinter& out)
+{
+ for (size_t i = 0; i < numMoves(); i++) {
+ const LMove& move = getMove(i);
+ out.printf(" [%s -> %s", move.from().toString().get(), move.to().toString().get());
+#ifdef DEBUG
+ out.printf(", %s", typeName(move.type()));
+#endif
+ out.printf("]");
+ if (i != numMoves() - 1)
+ out.printf(",");
+ }
+}
diff --git a/js/src/jit/LIR.h b/js/src/jit/LIR.h
new file mode 100644
index 000000000..4300c5117
--- /dev/null
+++ b/js/src/jit/LIR.h
@@ -0,0 +1,2025 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_LIR_h
+#define jit_LIR_h
+
+// This file declares the core data structures for LIR: storage allocations for
+// inputs and outputs, as well as the interface instructions must conform to.
+
+#include "mozilla/Array.h"
+
+#include "jit/Bailouts.h"
+#include "jit/FixedList.h"
+#include "jit/InlineList.h"
+#include "jit/JitAllocPolicy.h"
+#include "jit/LOpcodes.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "jit/Registers.h"
+#include "jit/Safepoints.h"
+
+namespace js {
+namespace jit {
+
+class LUse;
+class LGeneralReg;
+class LFloatReg;
+class LStackSlot;
+class LArgument;
+class LConstantIndex;
+class MBasicBlock;
+class MIRGenerator;
+
+static const uint32_t VREG_INCREMENT = 1;
+
+static const uint32_t THIS_FRAME_ARGSLOT = 0;
+
+#if defined(JS_NUNBOX32)
+# define BOX_PIECES 2
+static const uint32_t VREG_TYPE_OFFSET = 0;
+static const uint32_t VREG_DATA_OFFSET = 1;
+static const uint32_t TYPE_INDEX = 0;
+static const uint32_t PAYLOAD_INDEX = 1;
+static const uint32_t INT64LOW_INDEX = 0;
+static const uint32_t INT64HIGH_INDEX = 1;
+#elif defined(JS_PUNBOX64)
+# define BOX_PIECES 1
+#else
+# error "Unknown!"
+#endif
+
+static const uint32_t INT64_PIECES = sizeof(int64_t) / sizeof(uintptr_t);
+
+// Represents storage for an operand. For constants, the pointer is tagged
+// with a single bit, and the untagged pointer is a pointer to a Value.
+class LAllocation : public TempObject
+{
+ uintptr_t bits_;
+
+ // 3 bits gives us enough for an interesting set of Kinds and also fits
+ // within the alignment bits of pointers to Value, which are always
+ // 8-byte aligned.
+ static const uintptr_t KIND_BITS = 3;
+ static const uintptr_t KIND_SHIFT = 0;
+ static const uintptr_t KIND_MASK = (1 << KIND_BITS) - 1;
+
+ protected:
+ static const uintptr_t DATA_BITS = (sizeof(uint32_t) * 8) - KIND_BITS;
+ static const uintptr_t DATA_SHIFT = KIND_SHIFT + KIND_BITS;
+
+ public:
+ enum Kind {
+ CONSTANT_VALUE, // MConstant*.
+ CONSTANT_INDEX, // Constant arbitrary index.
+ USE, // Use of a virtual register, with physical allocation policy.
+ GPR, // General purpose register.
+ FPU, // Floating-point register.
+ STACK_SLOT, // Stack slot.
+ ARGUMENT_SLOT // Argument slot.
+ };
+
+ static const uintptr_t DATA_MASK = (1 << DATA_BITS) - 1;
+
+ protected:
+ uint32_t data() const {
+ return uint32_t(bits_) >> DATA_SHIFT;
+ }
+ void setData(uint32_t data) {
+ MOZ_ASSERT(data <= DATA_MASK);
+ bits_ &= ~(DATA_MASK << DATA_SHIFT);
+ bits_ |= (data << DATA_SHIFT);
+ }
+ void setKindAndData(Kind kind, uint32_t data) {
+ MOZ_ASSERT(data <= DATA_MASK);
+ bits_ = (uint32_t(kind) << KIND_SHIFT) | data << DATA_SHIFT;
+ }
+
+ LAllocation(Kind kind, uint32_t data) {
+ setKindAndData(kind, data);
+ }
+ explicit LAllocation(Kind kind) {
+ setKindAndData(kind, 0);
+ }
+
+ public:
+ LAllocation() : bits_(0)
+ {
+ MOZ_ASSERT(isBogus());
+ }
+
+ // The MConstant pointer must have its low bits cleared.
+ explicit LAllocation(const MConstant* c) {
+ MOZ_ASSERT(c);
+ bits_ = uintptr_t(c);
+ MOZ_ASSERT((bits_ & (KIND_MASK << KIND_SHIFT)) == 0);
+ bits_ |= CONSTANT_VALUE << KIND_SHIFT;
+ }
+ inline explicit LAllocation(AnyRegister reg);
+
+ Kind kind() const {
+ return (Kind)((bits_ >> KIND_SHIFT) & KIND_MASK);
+ }
+
+ bool isBogus() const {
+ return bits_ == 0;
+ }
+ bool isUse() const {
+ return kind() == USE;
+ }
+ bool isConstant() const {
+ return isConstantValue() || isConstantIndex();
+ }
+ bool isConstantValue() const {
+ return kind() == CONSTANT_VALUE;
+ }
+ bool isConstantIndex() const {
+ return kind() == CONSTANT_INDEX;
+ }
+ bool isGeneralReg() const {
+ return kind() == GPR;
+ }
+ bool isFloatReg() const {
+ return kind() == FPU;
+ }
+ bool isStackSlot() const {
+ return kind() == STACK_SLOT;
+ }
+ bool isArgument() const {
+ return kind() == ARGUMENT_SLOT;
+ }
+ bool isRegister() const {
+ return isGeneralReg() || isFloatReg();
+ }
+ bool isRegister(bool needFloat) const {
+ return needFloat ? isFloatReg() : isGeneralReg();
+ }
+ bool isMemory() const {
+ return isStackSlot() || isArgument();
+ }
+ inline uint32_t memorySlot() const;
+ inline LUse* toUse();
+ inline const LUse* toUse() const;
+ inline const LGeneralReg* toGeneralReg() const;
+ inline const LFloatReg* toFloatReg() const;
+ inline const LStackSlot* toStackSlot() const;
+ inline const LArgument* toArgument() const;
+ inline const LConstantIndex* toConstantIndex() const;
+ inline AnyRegister toRegister() const;
+
+ const MConstant* toConstant() const {
+ MOZ_ASSERT(isConstantValue());
+ return reinterpret_cast<const MConstant*>(bits_ & ~(KIND_MASK << KIND_SHIFT));
+ }
+
+ bool operator ==(const LAllocation& other) const {
+ return bits_ == other.bits_;
+ }
+
+ bool operator !=(const LAllocation& other) const {
+ return bits_ != other.bits_;
+ }
+
+ HashNumber hash() const {
+ return bits_;
+ }
+
+ UniqueChars toString() const;
+ bool aliases(const LAllocation& other) const;
+ void dump() const;
+};
+
+class LUse : public LAllocation
+{
+ static const uint32_t POLICY_BITS = 3;
+ static const uint32_t POLICY_SHIFT = 0;
+ static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1;
+ static const uint32_t REG_BITS = 6;
+ static const uint32_t REG_SHIFT = POLICY_SHIFT + POLICY_BITS;
+ static const uint32_t REG_MASK = (1 << REG_BITS) - 1;
+
+ // Whether the physical register for this operand may be reused for a def.
+ static const uint32_t USED_AT_START_BITS = 1;
+ static const uint32_t USED_AT_START_SHIFT = REG_SHIFT + REG_BITS;
+ static const uint32_t USED_AT_START_MASK = (1 << USED_AT_START_BITS) - 1;
+
+ public:
+ // Virtual registers get the remaining 19 bits.
+ static const uint32_t VREG_BITS = DATA_BITS - (USED_AT_START_SHIFT + USED_AT_START_BITS);
+ static const uint32_t VREG_SHIFT = USED_AT_START_SHIFT + USED_AT_START_BITS;
+ static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1;
+
+ enum Policy {
+ // Input should be in a read-only register or stack slot.
+ ANY,
+
+ // Input must be in a read-only register.
+ REGISTER,
+
+ // Input must be in a specific, read-only register.
+ FIXED,
+
+ // Keep the used virtual register alive, and use whatever allocation is
+ // available. This is similar to ANY but hints to the register allocator
+ // that it is never useful to optimize this site.
+ KEEPALIVE,
+
+ // For snapshot inputs, indicates that the associated instruction will
+ // write this input to its output register before bailing out.
+ // The register allocator may thus allocate that output register, and
+ // does not need to keep the virtual register alive (alternatively,
+ // this may be treated as KEEPALIVE).
+ RECOVERED_INPUT
+ };
+
+ void set(Policy policy, uint32_t reg, bool usedAtStart) {
+ setKindAndData(USE, (policy << POLICY_SHIFT) |
+ (reg << REG_SHIFT) |
+ ((usedAtStart ? 1 : 0) << USED_AT_START_SHIFT));
+ }
+
+ public:
+ LUse(uint32_t vreg, Policy policy, bool usedAtStart = false) {
+ set(policy, 0, usedAtStart);
+ setVirtualRegister(vreg);
+ }
+ explicit LUse(Policy policy, bool usedAtStart = false) {
+ set(policy, 0, usedAtStart);
+ }
+ explicit LUse(Register reg, bool usedAtStart = false) {
+ set(FIXED, reg.code(), usedAtStart);
+ }
+ explicit LUse(FloatRegister reg, bool usedAtStart = false) {
+ set(FIXED, reg.code(), usedAtStart);
+ }
+ LUse(Register reg, uint32_t virtualRegister, bool usedAtStart = false) {
+ set(FIXED, reg.code(), usedAtStart);
+ setVirtualRegister(virtualRegister);
+ }
+ LUse(FloatRegister reg, uint32_t virtualRegister, bool usedAtStart = false) {
+ set(FIXED, reg.code(), usedAtStart);
+ setVirtualRegister(virtualRegister);
+ }
+
+ void setVirtualRegister(uint32_t index) {
+ MOZ_ASSERT(index < VREG_MASK);
+
+ uint32_t old = data() & ~(VREG_MASK << VREG_SHIFT);
+ setData(old | (index << VREG_SHIFT));
+ }
+
+ Policy policy() const {
+ Policy policy = (Policy)((data() >> POLICY_SHIFT) & POLICY_MASK);
+ return policy;
+ }
+ uint32_t virtualRegister() const {
+ uint32_t index = (data() >> VREG_SHIFT) & VREG_MASK;
+ MOZ_ASSERT(index != 0);
+ return index;
+ }
+ uint32_t registerCode() const {
+ MOZ_ASSERT(policy() == FIXED);
+ return (data() >> REG_SHIFT) & REG_MASK;
+ }
+ bool isFixedRegister() const {
+ return policy() == FIXED;
+ }
+ bool usedAtStart() const {
+ return !!((data() >> USED_AT_START_SHIFT) & USED_AT_START_MASK);
+ }
+};
+
+static const uint32_t MAX_VIRTUAL_REGISTERS = LUse::VREG_MASK;
+
+class LBoxAllocation
+{
+#ifdef JS_NUNBOX32
+ LAllocation type_;
+ LAllocation payload_;
+#else
+ LAllocation value_;
+#endif
+
+ public:
+#ifdef JS_NUNBOX32
+ LBoxAllocation(LAllocation type, LAllocation payload) : type_(type), payload_(payload) {}
+
+ LAllocation type() const { return type_; }
+ LAllocation payload() const { return payload_; }
+#else
+ explicit LBoxAllocation(LAllocation value) : value_(value) {}
+
+ LAllocation value() const { return value_; }
+#endif
+};
+
+template<class ValT>
+class LInt64Value
+{
+#if JS_BITS_PER_WORD == 32
+ ValT high_;
+ ValT low_;
+#else
+ ValT value_;
+#endif
+
+ public:
+#if JS_BITS_PER_WORD == 32
+ LInt64Value(ValT high, ValT low) : high_(high), low_(low) {}
+
+ ValT high() const { return high_; }
+ ValT low() const { return low_; }
+#else
+ explicit LInt64Value(ValT value) : value_(value) {}
+
+ ValT value() const { return value_; }
+#endif
+};
+
+using LInt64Allocation = LInt64Value<LAllocation>;
+
+class LGeneralReg : public LAllocation
+{
+ public:
+ explicit LGeneralReg(Register reg)
+ : LAllocation(GPR, reg.code())
+ { }
+
+ Register reg() const {
+ return Register::FromCode(data());
+ }
+};
+
+class LFloatReg : public LAllocation
+{
+ public:
+ explicit LFloatReg(FloatRegister reg)
+ : LAllocation(FPU, reg.code())
+ { }
+
+ FloatRegister reg() const {
+ return FloatRegister::FromCode(data());
+ }
+};
+
+// Arbitrary constant index.
+class LConstantIndex : public LAllocation
+{
+ explicit LConstantIndex(uint32_t index)
+ : LAllocation(CONSTANT_INDEX, index)
+ { }
+
+ public:
+ static LConstantIndex FromIndex(uint32_t index) {
+ return LConstantIndex(index);
+ }
+
+ uint32_t index() const {
+ return data();
+ }
+};
+
+// Stack slots are indices into the stack. The indices are byte indices.
+class LStackSlot : public LAllocation
+{
+ public:
+ explicit LStackSlot(uint32_t slot)
+ : LAllocation(STACK_SLOT, slot)
+ { }
+
+ uint32_t slot() const {
+ return data();
+ }
+};
+
+// Arguments are reverse indices into the stack. The indices are byte indices.
+class LArgument : public LAllocation
+{
+ public:
+ explicit LArgument(uint32_t index)
+ : LAllocation(ARGUMENT_SLOT, index)
+ { }
+
+ uint32_t index() const {
+ return data();
+ }
+};
+
+inline uint32_t
+LAllocation::memorySlot() const
+{
+ MOZ_ASSERT(isMemory());
+ return isStackSlot() ? toStackSlot()->slot() : toArgument()->index();
+}
+
+// Represents storage for a definition.
+class LDefinition
+{
+ // Bits containing policy, type, and virtual register.
+ uint32_t bits_;
+
+ // Before register allocation, this optionally contains a fixed policy.
+ // Register allocation assigns this field to a physical policy if none is
+ // fixed.
+ //
+ // Right now, pre-allocated outputs are limited to the following:
+ // * Physical argument stack slots.
+ // * Physical registers.
+ LAllocation output_;
+
+ static const uint32_t TYPE_BITS = 4;
+ static const uint32_t TYPE_SHIFT = 0;
+ static const uint32_t TYPE_MASK = (1 << TYPE_BITS) - 1;
+ static const uint32_t POLICY_BITS = 2;
+ static const uint32_t POLICY_SHIFT = TYPE_SHIFT + TYPE_BITS;
+ static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1;
+
+ static const uint32_t VREG_BITS = (sizeof(uint32_t) * 8) - (POLICY_BITS + TYPE_BITS);
+ static const uint32_t VREG_SHIFT = POLICY_SHIFT + POLICY_BITS;
+ static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1;
+
+ public:
+ // Note that definitions, by default, are always allocated a register,
+ // unless the policy specifies that an input can be re-used and that input
+ // is a stack slot.
+ enum Policy {
+ // The policy is predetermined by the LAllocation attached to this
+ // definition. The allocation may be:
+ // * A register, which may not appear as any fixed temporary.
+ // * A stack slot or argument.
+ //
+ // Register allocation will not modify a fixed allocation.
+ FIXED,
+
+ // A random register of an appropriate class will be assigned.
+ REGISTER,
+
+ // One definition per instruction must re-use the first input
+ // allocation, which (for now) must be a register.
+ MUST_REUSE_INPUT
+ };
+
+ // This should be kept in sync with LIR.cpp's TypeChars.
+ enum Type {
+ GENERAL, // Generic, integer or pointer-width data (GPR).
+ INT32, // int32 data (GPR).
+ OBJECT, // Pointer that may be collected as garbage (GPR).
+ SLOTS, // Slots/elements pointer that may be moved by minor GCs (GPR).
+ FLOAT32, // 32-bit floating-point value (FPU).
+ DOUBLE, // 64-bit floating-point value (FPU).
+ SIMD128INT, // 128-bit SIMD integer vector (FPU).
+ SIMD128FLOAT, // 128-bit SIMD floating point vector (FPU).
+ SINCOS,
+#ifdef JS_NUNBOX32
+ // A type virtual register must be followed by a payload virtual
+ // register, as both will be tracked as a single gcthing.
+ TYPE,
+ PAYLOAD
+#else
+ BOX // Joined box, for punbox systems. (GPR, gcthing)
+#endif
+ };
+
+ void set(uint32_t index, Type type, Policy policy) {
+ JS_STATIC_ASSERT(MAX_VIRTUAL_REGISTERS <= VREG_MASK);
+ bits_ = (index << VREG_SHIFT) | (policy << POLICY_SHIFT) | (type << TYPE_SHIFT);
+ MOZ_ASSERT_IF(!SupportsSimd, !isSimdType());
+ }
+
+ public:
+ LDefinition(uint32_t index, Type type, Policy policy = REGISTER) {
+ set(index, type, policy);
+ }
+
+ explicit LDefinition(Type type, Policy policy = REGISTER) {
+ set(0, type, policy);
+ }
+
+ LDefinition(Type type, const LAllocation& a)
+ : output_(a)
+ {
+ set(0, type, FIXED);
+ }
+
+ LDefinition(uint32_t index, Type type, const LAllocation& a)
+ : output_(a)
+ {
+ set(index, type, FIXED);
+ }
+
+ LDefinition() : bits_(0)
+ {
+ MOZ_ASSERT(isBogusTemp());
+ }
+
+ static LDefinition BogusTemp() {
+ return LDefinition();
+ }
+
+ Policy policy() const {
+ return (Policy)((bits_ >> POLICY_SHIFT) & POLICY_MASK);
+ }
+ Type type() const {
+ return (Type)((bits_ >> TYPE_SHIFT) & TYPE_MASK);
+ }
+ bool isSimdType() const {
+ return type() == SIMD128INT || type() == SIMD128FLOAT;
+ }
+ bool isCompatibleReg(const AnyRegister& r) const {
+ if (isFloatReg() && r.isFloat()) {
+ if (type() == FLOAT32)
+ return r.fpu().isSingle();
+ if (type() == DOUBLE)
+ return r.fpu().isDouble();
+ if (isSimdType())
+ return r.fpu().isSimd128();
+ MOZ_CRASH("Unexpected MDefinition type");
+ }
+ return !isFloatReg() && !r.isFloat();
+ }
+ bool isCompatibleDef(const LDefinition& other) const {
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
+ if (isFloatReg() && other.isFloatReg())
+ return type() == other.type();
+ return !isFloatReg() && !other.isFloatReg();
+#else
+ return isFloatReg() == other.isFloatReg();
+#endif
+ }
+
+ bool isFloatReg() const {
+ return type() == FLOAT32 || type() == DOUBLE || isSimdType();
+ }
+ uint32_t virtualRegister() const {
+ uint32_t index = (bits_ >> VREG_SHIFT) & VREG_MASK;
+ //MOZ_ASSERT(index != 0);
+ return index;
+ }
+ LAllocation* output() {
+ return &output_;
+ }
+ const LAllocation* output() const {
+ return &output_;
+ }
+ bool isFixed() const {
+ return policy() == FIXED;
+ }
+ bool isBogusTemp() const {
+ return isFixed() && output()->isBogus();
+ }
+ void setVirtualRegister(uint32_t index) {
+ MOZ_ASSERT(index < VREG_MASK);
+ bits_ &= ~(VREG_MASK << VREG_SHIFT);
+ bits_ |= index << VREG_SHIFT;
+ }
+ void setOutput(const LAllocation& a) {
+ output_ = a;
+ if (!a.isUse()) {
+ bits_ &= ~(POLICY_MASK << POLICY_SHIFT);
+ bits_ |= FIXED << POLICY_SHIFT;
+ }
+ }
+ void setReusedInput(uint32_t operand) {
+ output_ = LConstantIndex::FromIndex(operand);
+ }
+ uint32_t getReusedInput() const {
+ MOZ_ASSERT(policy() == LDefinition::MUST_REUSE_INPUT);
+ return output_.toConstantIndex()->index();
+ }
+
+ static inline Type TypeFrom(MIRType type) {
+ switch (type) {
+ case MIRType::Boolean:
+ case MIRType::Int32:
+ // The stack slot allocator doesn't currently support allocating
+ // 1-byte slots, so for now we lower MIRType::Boolean into INT32.
+ static_assert(sizeof(bool) <= sizeof(int32_t), "bool doesn't fit in an int32 slot");
+ return LDefinition::INT32;
+ case MIRType::String:
+ case MIRType::Symbol:
+ case MIRType::Object:
+ case MIRType::ObjectOrNull:
+ return LDefinition::OBJECT;
+ case MIRType::Double:
+ return LDefinition::DOUBLE;
+ case MIRType::Float32:
+ return LDefinition::FLOAT32;
+#if defined(JS_PUNBOX64)
+ case MIRType::Value:
+ return LDefinition::BOX;
+#endif
+ case MIRType::SinCosDouble:
+ return LDefinition::SINCOS;
+ case MIRType::Slots:
+ case MIRType::Elements:
+ return LDefinition::SLOTS;
+ case MIRType::Pointer:
+ return LDefinition::GENERAL;
+#if defined(JS_PUNBOX64)
+ case MIRType::Int64:
+ return LDefinition::GENERAL;
+#endif
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ return LDefinition::SIMD128INT;
+ case MIRType::Float32x4:
+ return LDefinition::SIMD128FLOAT;
+ default:
+ MOZ_CRASH("unexpected type");
+ }
+ }
+
+ UniqueChars toString() const;
+
+ void dump() const;
+};
+
+using LInt64Definition = LInt64Value<LDefinition>;
+
+// Forward declarations of LIR types.
+#define LIROP(op) class L##op;
+ LIR_OPCODE_LIST(LIROP)
+#undef LIROP
+
+class LSnapshot;
+class LSafepoint;
+class LInstruction;
+class LElementVisitor;
+
+// The common base class for LPhi and LInstruction.
+class LNode
+{
+ uint32_t id_;
+ LBlock* block_;
+
+ protected:
+ MDefinition* mir_;
+
+ public:
+ LNode()
+ : id_(0),
+ block_(nullptr),
+ mir_(nullptr)
+ { }
+
+ enum Opcode {
+# define LIROP(name) LOp_##name,
+ LIR_OPCODE_LIST(LIROP)
+# undef LIROP
+ LOp_Invalid
+ };
+
+ const char* opName() {
+ switch (op()) {
+# define LIR_NAME_INS(name) \
+ case LOp_##name: return #name;
+ LIR_OPCODE_LIST(LIR_NAME_INS)
+# undef LIR_NAME_INS
+ default:
+ return "Invalid";
+ }
+ }
+
+ // Hook for opcodes to add extra high level detail about what code will be
+ // emitted for the op.
+ virtual const char* extraName() const {
+ return nullptr;
+ }
+
+ virtual Opcode op() const = 0;
+
+ bool isInstruction() const {
+ return op() != LOp_Phi;
+ }
+ inline LInstruction* toInstruction();
+ inline const LInstruction* toInstruction() const;
+
+ // Returns the number of outputs of this instruction. If an output is
+ // unallocated, it is an LDefinition, defining a virtual register.
+ virtual size_t numDefs() const = 0;
+ virtual LDefinition* getDef(size_t index) = 0;
+ virtual void setDef(size_t index, const LDefinition& def) = 0;
+
+ // Returns information about operands.
+ virtual size_t numOperands() const = 0;
+ virtual LAllocation* getOperand(size_t index) = 0;
+ virtual void setOperand(size_t index, const LAllocation& a) = 0;
+
+ // Returns information about temporary registers needed. Each temporary
+ // register is an LDefinition with a fixed or virtual register and
+ // either GENERAL, FLOAT32, or DOUBLE type.
+ virtual size_t numTemps() const = 0;
+ virtual LDefinition* getTemp(size_t index) = 0;
+ virtual void setTemp(size_t index, const LDefinition& a) = 0;
+
+ // Returns the number of successors of this instruction, if it is a control
+ // transfer instruction, or zero otherwise.
+ virtual size_t numSuccessors() const = 0;
+ virtual MBasicBlock* getSuccessor(size_t i) const = 0;
+ virtual void setSuccessor(size_t i, MBasicBlock* successor) = 0;
+
+ virtual bool isCall() const {
+ return false;
+ }
+
+ // Does this call preserve the given register?
+ // By default, it is assumed that all registers are clobbered by a call.
+ virtual bool isCallPreserved(AnyRegister reg) const {
+ return false;
+ }
+
+ uint32_t id() const {
+ return id_;
+ }
+ void setId(uint32_t id) {
+ MOZ_ASSERT(!id_);
+ MOZ_ASSERT(id);
+ id_ = id;
+ }
+ void setMir(MDefinition* mir) {
+ mir_ = mir;
+ }
+ MDefinition* mirRaw() const {
+ /* Untyped MIR for this op. Prefer mir() methods in subclasses. */
+ return mir_;
+ }
+ LBlock* block() const {
+ return block_;
+ }
+ void setBlock(LBlock* block) {
+ block_ = block;
+ }
+
+ // For an instruction which has a MUST_REUSE_INPUT output, whether that
+ // output register will be restored to its original value when bailing out.
+ virtual bool recoversInput() const {
+ return false;
+ }
+
+ virtual void dump(GenericPrinter& out);
+ void dump();
+ static void printName(GenericPrinter& out, Opcode op);
+ virtual void printName(GenericPrinter& out);
+ virtual void printOperands(GenericPrinter& out);
+
+ public:
+ // Opcode testing and casts.
+# define LIROP(name) \
+ bool is##name() const { \
+ return op() == LOp_##name; \
+ } \
+ inline L##name* to##name(); \
+ inline const L##name* to##name() const;
+ LIR_OPCODE_LIST(LIROP)
+# undef LIROP
+
+ virtual void accept(LElementVisitor* visitor) = 0;
+
+#define LIR_HEADER(opcode) \
+ Opcode op() const { \
+ return LInstruction::LOp_##opcode; \
+ } \
+ void accept(LElementVisitor* visitor) { \
+ visitor->setElement(this); \
+ visitor->visit##opcode(this); \
+ }
+};
+
+class LInstruction
+ : public LNode
+ , public TempObject
+ , public InlineListNode<LInstruction>
+{
+ // This snapshot could be set after a ResumePoint. It is used to restart
+ // from the resume point pc.
+ LSnapshot* snapshot_;
+
+ // Structure capturing the set of stack slots and registers which are known
+ // to hold either gcthings or Values.
+ LSafepoint* safepoint_;
+
+ LMoveGroup* inputMoves_;
+ LMoveGroup* fixReuseMoves_;
+ LMoveGroup* movesAfter_;
+
+ protected:
+ LInstruction()
+ : snapshot_(nullptr),
+ safepoint_(nullptr),
+ inputMoves_(nullptr),
+ fixReuseMoves_(nullptr),
+ movesAfter_(nullptr)
+ { }
+
+ public:
+ LSnapshot* snapshot() const {
+ return snapshot_;
+ }
+ LSafepoint* safepoint() const {
+ return safepoint_;
+ }
+ LMoveGroup* inputMoves() const {
+ return inputMoves_;
+ }
+ void setInputMoves(LMoveGroup* moves) {
+ inputMoves_ = moves;
+ }
+ LMoveGroup* fixReuseMoves() const {
+ return fixReuseMoves_;
+ }
+ void setFixReuseMoves(LMoveGroup* moves) {
+ fixReuseMoves_ = moves;
+ }
+ LMoveGroup* movesAfter() const {
+ return movesAfter_;
+ }
+ void setMovesAfter(LMoveGroup* moves) {
+ movesAfter_ = moves;
+ }
+ void assignSnapshot(LSnapshot* snapshot);
+ void initSafepoint(TempAllocator& alloc);
+
+ class InputIterator;
+};
+
+LInstruction*
+LNode::toInstruction()
+{
+ MOZ_ASSERT(isInstruction());
+ return static_cast<LInstruction*>(this);
+}
+
+const LInstruction*
+LNode::toInstruction() const
+{
+ MOZ_ASSERT(isInstruction());
+ return static_cast<const LInstruction*>(this);
+}
+
+class LElementVisitor
+{
+ LNode* ins_;
+
+ protected:
+ jsbytecode* lastPC_;
+ jsbytecode* lastNotInlinedPC_;
+
+ LNode* instruction() {
+ return ins_;
+ }
+
+ public:
+ void setElement(LNode* ins) {
+ ins_ = ins;
+ if (ins->mirRaw()) {
+ lastPC_ = ins->mirRaw()->trackedPc();
+ if (ins->mirRaw()->trackedTree())
+ lastNotInlinedPC_ = ins->mirRaw()->profilerLeavePc();
+ }
+ }
+
+ LElementVisitor()
+ : ins_(nullptr),
+ lastPC_(nullptr),
+ lastNotInlinedPC_(nullptr)
+ {}
+
+ public:
+#define VISIT_INS(op) virtual void visit##op(L##op*) { MOZ_CRASH("NYI: " #op); }
+ LIR_OPCODE_LIST(VISIT_INS)
+#undef VISIT_INS
+};
+
+typedef InlineList<LInstruction>::iterator LInstructionIterator;
+typedef InlineList<LInstruction>::reverse_iterator LInstructionReverseIterator;
+
+class MPhi;
+
+// Phi is a pseudo-instruction that emits no code, and is an annotation for the
+// register allocator. Like its equivalent in MIR, phis are collected at the
+// top of blocks and are meant to be executed in parallel, choosing the input
+// corresponding to the predecessor taken in the control flow graph.
+class LPhi final : public LNode
+{
+ LAllocation* const inputs_;
+ LDefinition def_;
+
+ public:
+ LIR_HEADER(Phi)
+
+ LPhi(MPhi* ins, LAllocation* inputs)
+ : inputs_(inputs)
+ {
+ setMir(ins);
+ }
+
+ size_t numDefs() const {
+ return 1;
+ }
+ LDefinition* getDef(size_t index) {
+ MOZ_ASSERT(index == 0);
+ return &def_;
+ }
+ void setDef(size_t index, const LDefinition& def) {
+ MOZ_ASSERT(index == 0);
+ def_ = def;
+ }
+ size_t numOperands() const {
+ return mir_->toPhi()->numOperands();
+ }
+ LAllocation* getOperand(size_t index) {
+ MOZ_ASSERT(index < numOperands());
+ return &inputs_[index];
+ }
+ void setOperand(size_t index, const LAllocation& a) {
+ MOZ_ASSERT(index < numOperands());
+ inputs_[index] = a;
+ }
+ size_t numTemps() const {
+ return 0;
+ }
+ LDefinition* getTemp(size_t index) {
+ MOZ_CRASH("no temps");
+ }
+ void setTemp(size_t index, const LDefinition& temp) {
+ MOZ_CRASH("no temps");
+ }
+ size_t numSuccessors() const {
+ return 0;
+ }
+ MBasicBlock* getSuccessor(size_t i) const {
+ MOZ_CRASH("no successors");
+ }
+ void setSuccessor(size_t i, MBasicBlock*) {
+ MOZ_CRASH("no successors");
+ }
+};
+
+class LMoveGroup;
+class LBlock
+{
+ MBasicBlock* block_;
+ FixedList<LPhi> phis_;
+ InlineList<LInstruction> instructions_;
+ LMoveGroup* entryMoveGroup_;
+ LMoveGroup* exitMoveGroup_;
+ Label label_;
+
+ public:
+ explicit LBlock(MBasicBlock* block);
+ MOZ_MUST_USE bool init(TempAllocator& alloc);
+
+ void add(LInstruction* ins) {
+ ins->setBlock(this);
+ instructions_.pushBack(ins);
+ }
+ size_t numPhis() const {
+ return phis_.length();
+ }
+ LPhi* getPhi(size_t index) {
+ return &phis_[index];
+ }
+ const LPhi* getPhi(size_t index) const {
+ return &phis_[index];
+ }
+ MBasicBlock* mir() const {
+ return block_;
+ }
+ LInstructionIterator begin() {
+ return instructions_.begin();
+ }
+ LInstructionIterator begin(LInstruction* at) {
+ return instructions_.begin(at);
+ }
+ LInstructionIterator end() {
+ return instructions_.end();
+ }
+ LInstructionReverseIterator rbegin() {
+ return instructions_.rbegin();
+ }
+ LInstructionReverseIterator rbegin(LInstruction* at) {
+ return instructions_.rbegin(at);
+ }
+ LInstructionReverseIterator rend() {
+ return instructions_.rend();
+ }
+ InlineList<LInstruction>& instructions() {
+ return instructions_;
+ }
+ void insertAfter(LInstruction* at, LInstruction* ins) {
+ instructions_.insertAfter(at, ins);
+ }
+ void insertBefore(LInstruction* at, LInstruction* ins) {
+ instructions_.insertBefore(at, ins);
+ }
+ const LNode* firstElementWithId() const {
+ return !phis_.empty()
+ ? static_cast<const LNode*>(getPhi(0))
+ : firstInstructionWithId();
+ }
+ uint32_t firstId() const {
+ return firstElementWithId()->id();
+ }
+ uint32_t lastId() const {
+ return lastInstructionWithId()->id();
+ }
+ const LInstruction* firstInstructionWithId() const;
+ const LInstruction* lastInstructionWithId() const {
+ const LInstruction* last = *instructions_.rbegin();
+ MOZ_ASSERT(last->id());
+ // The last instruction is a control flow instruction which does not have
+ // any output.
+ MOZ_ASSERT(last->numDefs() == 0);
+ return last;
+ }
+
+ // Return the label to branch to when branching to this block.
+ Label* label() {
+ MOZ_ASSERT(!isTrivial());
+ return &label_;
+ }
+
+ LMoveGroup* getEntryMoveGroup(TempAllocator& alloc);
+ LMoveGroup* getExitMoveGroup(TempAllocator& alloc);
+
+ // Test whether this basic block is empty except for a simple goto, and
+ // which is not forming a loop. No code will be emitted for such blocks.
+ bool isTrivial() {
+ return begin()->isGoto() && !mir()->isLoopHeader();
+ }
+
+ void dump(GenericPrinter& out);
+ void dump();
+};
+
+namespace details {
+ template <size_t Defs, size_t Temps>
+ class LInstructionFixedDefsTempsHelper : public LInstruction
+ {
+ mozilla::Array<LDefinition, Defs> defs_;
+ mozilla::Array<LDefinition, Temps> temps_;
+
+ public:
+ size_t numDefs() const final override {
+ return Defs;
+ }
+ LDefinition* getDef(size_t index) final override {
+ return &defs_[index];
+ }
+ size_t numTemps() const final override {
+ return Temps;
+ }
+ LDefinition* getTemp(size_t index) final override {
+ return &temps_[index];
+ }
+
+ void setDef(size_t index, const LDefinition& def) final override {
+ defs_[index] = def;
+ }
+ void setTemp(size_t index, const LDefinition& a) final override {
+ temps_[index] = a;
+ }
+ void setInt64Temp(size_t index, const LInt64Definition& a) {
+#if JS_BITS_PER_WORD == 32
+ temps_[index] = a.low();
+ temps_[index + 1] = a.high();
+#else
+ temps_[index] = a.value();
+#endif
+ }
+
+ size_t numSuccessors() const override {
+ return 0;
+ }
+ MBasicBlock* getSuccessor(size_t i) const override {
+ MOZ_ASSERT(false);
+ return nullptr;
+ }
+ void setSuccessor(size_t i, MBasicBlock* successor) override {
+ MOZ_ASSERT(false);
+ }
+
+ // Default accessors, assuming a single input and output, respectively.
+ const LAllocation* input() {
+ MOZ_ASSERT(numOperands() == 1);
+ return getOperand(0);
+ }
+ const LDefinition* output() {
+ MOZ_ASSERT(numDefs() == 1);
+ return getDef(0);
+ }
+ };
+} // namespace details
+
+template <size_t Defs, size_t Operands, size_t Temps>
+class LInstructionHelper : public details::LInstructionFixedDefsTempsHelper<Defs, Temps>
+{
+ mozilla::Array<LAllocation, Operands> operands_;
+
+ public:
+ size_t numOperands() const final override {
+ return Operands;
+ }
+ LAllocation* getOperand(size_t index) final override {
+ return &operands_[index];
+ }
+ void setOperand(size_t index, const LAllocation& a) final override {
+ operands_[index] = a;
+ }
+ void setBoxOperand(size_t index, const LBoxAllocation& alloc) {
+#ifdef JS_NUNBOX32
+ operands_[index + TYPE_INDEX] = alloc.type();
+ operands_[index + PAYLOAD_INDEX] = alloc.payload();
+#else
+ operands_[index] = alloc.value();
+#endif
+ }
+ void setInt64Operand(size_t index, const LInt64Allocation& alloc) {
+#if JS_BITS_PER_WORD == 32
+ operands_[index + INT64LOW_INDEX] = alloc.low();
+ operands_[index + INT64HIGH_INDEX] = alloc.high();
+#else
+ operands_[index] = alloc.value();
+#endif
+ }
+ const LInt64Allocation getInt64Operand(size_t offset) {
+#if JS_BITS_PER_WORD == 32
+ return LInt64Allocation(operands_[offset + INT64HIGH_INDEX],
+ operands_[offset + INT64LOW_INDEX]);
+#else
+ return LInt64Allocation(operands_[offset]);
+#endif
+ }
+};
+
+template<size_t Defs, size_t Temps>
+class LVariadicInstruction : public details::LInstructionFixedDefsTempsHelper<Defs, Temps>
+{
+ FixedList<LAllocation> operands_;
+
+ public:
+ MOZ_MUST_USE bool init(TempAllocator& alloc, size_t length) {
+ return operands_.init(alloc, length);
+ }
+ size_t numOperands() const final override {
+ return operands_.length();
+ }
+ LAllocation* getOperand(size_t index) final override {
+ return &operands_[index];
+ }
+ void setOperand(size_t index, const LAllocation& a) final override {
+ operands_[index] = a;
+ }
+};
+
+template <size_t Defs, size_t Operands, size_t Temps>
+class LCallInstructionHelper : public LInstructionHelper<Defs, Operands, Temps>
+{
+ public:
+ virtual bool isCall() const {
+ return true;
+ }
+};
+
+class LRecoverInfo : public TempObject
+{
+ public:
+ typedef Vector<MNode*, 2, JitAllocPolicy> Instructions;
+
+ private:
+ // List of instructions needed to recover the stack frames.
+ // Outer frames are stored before inner frames.
+ Instructions instructions_;
+
+ // Cached offset where this resume point is encoded.
+ RecoverOffset recoverOffset_;
+
+ explicit LRecoverInfo(TempAllocator& alloc);
+ MOZ_MUST_USE bool init(MResumePoint* mir);
+
+ // Fill the instruction vector such as all instructions needed for the
+ // recovery are pushed before the current instruction.
+ MOZ_MUST_USE bool appendOperands(MNode* ins);
+ MOZ_MUST_USE bool appendDefinition(MDefinition* def);
+ MOZ_MUST_USE bool appendResumePoint(MResumePoint* rp);
+ public:
+ static LRecoverInfo* New(MIRGenerator* gen, MResumePoint* mir);
+
+ // Resume point of the inner most function.
+ MResumePoint* mir() const {
+ return instructions_.back()->toResumePoint();
+ }
+ RecoverOffset recoverOffset() const {
+ return recoverOffset_;
+ }
+ void setRecoverOffset(RecoverOffset offset) {
+ MOZ_ASSERT(recoverOffset_ == INVALID_RECOVER_OFFSET);
+ recoverOffset_ = offset;
+ }
+
+ MNode** begin() {
+ return instructions_.begin();
+ }
+ MNode** end() {
+ return instructions_.end();
+ }
+ size_t numInstructions() const {
+ return instructions_.length();
+ }
+
+ class OperandIter
+ {
+ private:
+ MNode** it_;
+ MNode** end_;
+ size_t op_;
+
+ public:
+ explicit OperandIter(LRecoverInfo* recoverInfo)
+ : it_(recoverInfo->begin()), end_(recoverInfo->end()), op_(0)
+ {
+ settle();
+ }
+
+ void settle() {
+ while ((*it_)->numOperands() == 0) {
+ ++it_;
+ op_ = 0;
+ }
+ }
+
+ MDefinition* operator*() {
+ return (*it_)->getOperand(op_);
+ }
+ MDefinition* operator ->() {
+ return (*it_)->getOperand(op_);
+ }
+
+ OperandIter& operator ++() {
+ ++op_;
+ if (op_ == (*it_)->numOperands()) {
+ op_ = 0;
+ ++it_;
+ }
+ if (!*this)
+ settle();
+
+ return *this;
+ }
+
+ explicit operator bool() const {
+ return it_ == end_;
+ }
+
+#ifdef DEBUG
+ bool canOptimizeOutIfUnused();
+#endif
+ };
+};
+
+// An LSnapshot is the reflection of an MResumePoint in LIR. Unlike MResumePoints,
+// they cannot be shared, as they are filled in by the register allocator in
+// order to capture the precise low-level stack state in between an
+// instruction's input and output. During code generation, LSnapshots are
+// compressed and saved in the compiled script.
+class LSnapshot : public TempObject
+{
+ private:
+ uint32_t numSlots_;
+ LAllocation* slots_;
+ LRecoverInfo* recoverInfo_;
+ SnapshotOffset snapshotOffset_;
+ BailoutId bailoutId_;
+ BailoutKind bailoutKind_;
+
+ LSnapshot(LRecoverInfo* recover, BailoutKind kind);
+ MOZ_MUST_USE bool init(MIRGenerator* gen);
+
+ public:
+ static LSnapshot* New(MIRGenerator* gen, LRecoverInfo* recover, BailoutKind kind);
+
+ size_t numEntries() const {
+ return numSlots_;
+ }
+ size_t numSlots() const {
+ return numSlots_ / BOX_PIECES;
+ }
+ LAllocation* payloadOfSlot(size_t i) {
+ MOZ_ASSERT(i < numSlots());
+ size_t entryIndex = (i * BOX_PIECES) + (BOX_PIECES - 1);
+ return getEntry(entryIndex);
+ }
+#ifdef JS_NUNBOX32
+ LAllocation* typeOfSlot(size_t i) {
+ MOZ_ASSERT(i < numSlots());
+ size_t entryIndex = (i * BOX_PIECES) + (BOX_PIECES - 2);
+ return getEntry(entryIndex);
+ }
+#endif
+ LAllocation* getEntry(size_t i) {
+ MOZ_ASSERT(i < numSlots_);
+ return &slots_[i];
+ }
+ void setEntry(size_t i, const LAllocation& alloc) {
+ MOZ_ASSERT(i < numSlots_);
+ slots_[i] = alloc;
+ }
+ LRecoverInfo* recoverInfo() const {
+ return recoverInfo_;
+ }
+ MResumePoint* mir() const {
+ return recoverInfo()->mir();
+ }
+ SnapshotOffset snapshotOffset() const {
+ return snapshotOffset_;
+ }
+ BailoutId bailoutId() const {
+ return bailoutId_;
+ }
+ void setSnapshotOffset(SnapshotOffset offset) {
+ MOZ_ASSERT(snapshotOffset_ == INVALID_SNAPSHOT_OFFSET);
+ snapshotOffset_ = offset;
+ }
+ void setBailoutId(BailoutId id) {
+ MOZ_ASSERT(bailoutId_ == INVALID_BAILOUT_ID);
+ bailoutId_ = id;
+ }
+ BailoutKind bailoutKind() const {
+ return bailoutKind_;
+ }
+ void rewriteRecoveredInput(LUse input);
+};
+
+struct SafepointSlotEntry {
+ // Flag indicating whether this is a slot in the stack or argument space.
+ uint32_t stack:1;
+
+ // Byte offset of the slot, as in LStackSlot or LArgument.
+ uint32_t slot:31;
+
+ SafepointSlotEntry() { }
+ SafepointSlotEntry(bool stack, uint32_t slot)
+ : stack(stack), slot(slot)
+ { }
+ explicit SafepointSlotEntry(const LAllocation* a)
+ : stack(a->isStackSlot()), slot(a->memorySlot())
+ { }
+};
+
+struct SafepointNunboxEntry {
+ uint32_t typeVreg;
+ LAllocation type;
+ LAllocation payload;
+
+ SafepointNunboxEntry() { }
+ SafepointNunboxEntry(uint32_t typeVreg, LAllocation type, LAllocation payload)
+ : typeVreg(typeVreg), type(type), payload(payload)
+ { }
+};
+
+class LSafepoint : public TempObject
+{
+ typedef SafepointSlotEntry SlotEntry;
+ typedef SafepointNunboxEntry NunboxEntry;
+
+ public:
+ typedef Vector<SlotEntry, 0, JitAllocPolicy> SlotList;
+ typedef Vector<NunboxEntry, 0, JitAllocPolicy> NunboxList;
+
+ private:
+ // The information in a safepoint describes the registers and gc related
+ // values that are live at the start of the associated instruction.
+
+ // The set of registers which are live at an OOL call made within the
+ // instruction. This includes any registers for inputs which are not
+ // use-at-start, any registers for temps, and any registers live after the
+ // call except outputs of the instruction.
+ //
+ // For call instructions, the live regs are empty. Call instructions may
+ // have register inputs or temporaries, which will *not* be in the live
+ // registers: if passed to the call, the values passed will be marked via
+ // MarkJitExitFrame, and no registers can be live after the instruction
+ // except its outputs.
+ LiveRegisterSet liveRegs_;
+
+ // The subset of liveRegs which contains gcthing pointers.
+ LiveGeneralRegisterSet gcRegs_;
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ // Clobbered regs of the current instruction. This set is never written to
+ // the safepoint; it's only used by assertions during compilation.
+ LiveRegisterSet clobberedRegs_;
+#endif
+
+ // Offset to a position in the safepoint stream, or
+ // INVALID_SAFEPOINT_OFFSET.
+ uint32_t safepointOffset_;
+
+ // Assembler buffer displacement to OSI point's call location.
+ uint32_t osiCallPointOffset_;
+
+ // List of slots which have gcthing pointers.
+ SlotList gcSlots_;
+
+ // List of slots which have Values.
+ SlotList valueSlots_;
+
+#ifdef JS_NUNBOX32
+ // List of registers (in liveRegs) and slots which contain pieces of Values.
+ NunboxList nunboxParts_;
+#elif JS_PUNBOX64
+ // The subset of liveRegs which have Values.
+ LiveGeneralRegisterSet valueRegs_;
+#endif
+
+ // The subset of liveRegs which contains pointers to slots/elements.
+ LiveGeneralRegisterSet slotsOrElementsRegs_;
+
+ // List of slots which have slots/elements pointers.
+ SlotList slotsOrElementsSlots_;
+
+ public:
+ void assertInvariants() {
+ // Every register in valueRegs and gcRegs should also be in liveRegs.
+#ifndef JS_NUNBOX32
+ MOZ_ASSERT((valueRegs().bits() & ~liveRegs().gprs().bits()) == 0);
+#endif
+ MOZ_ASSERT((gcRegs().bits() & ~liveRegs().gprs().bits()) == 0);
+ }
+
+ explicit LSafepoint(TempAllocator& alloc)
+ : safepointOffset_(INVALID_SAFEPOINT_OFFSET)
+ , osiCallPointOffset_(0)
+ , gcSlots_(alloc)
+ , valueSlots_(alloc)
+#ifdef JS_NUNBOX32
+ , nunboxParts_(alloc)
+#endif
+ , slotsOrElementsSlots_(alloc)
+ {
+ assertInvariants();
+ }
+ void addLiveRegister(AnyRegister reg) {
+ liveRegs_.addUnchecked(reg);
+ assertInvariants();
+ }
+ const LiveRegisterSet& liveRegs() const {
+ return liveRegs_;
+ }
+#ifdef CHECK_OSIPOINT_REGISTERS
+ void addClobberedRegister(AnyRegister reg) {
+ clobberedRegs_.addUnchecked(reg);
+ assertInvariants();
+ }
+ const LiveRegisterSet& clobberedRegs() const {
+ return clobberedRegs_;
+ }
+#endif
+ void addGcRegister(Register reg) {
+ gcRegs_.addUnchecked(reg);
+ assertInvariants();
+ }
+ LiveGeneralRegisterSet gcRegs() const {
+ return gcRegs_;
+ }
+ MOZ_MUST_USE bool addGcSlot(bool stack, uint32_t slot) {
+ bool result = gcSlots_.append(SlotEntry(stack, slot));
+ if (result)
+ assertInvariants();
+ return result;
+ }
+ SlotList& gcSlots() {
+ return gcSlots_;
+ }
+
+ SlotList& slotsOrElementsSlots() {
+ return slotsOrElementsSlots_;
+ }
+ LiveGeneralRegisterSet slotsOrElementsRegs() const {
+ return slotsOrElementsRegs_;
+ }
+ void addSlotsOrElementsRegister(Register reg) {
+ slotsOrElementsRegs_.addUnchecked(reg);
+ assertInvariants();
+ }
+ MOZ_MUST_USE bool addSlotsOrElementsSlot(bool stack, uint32_t slot) {
+ bool result = slotsOrElementsSlots_.append(SlotEntry(stack, slot));
+ if (result)
+ assertInvariants();
+ return result;
+ }
+ MOZ_MUST_USE bool addSlotsOrElementsPointer(LAllocation alloc) {
+ if (alloc.isMemory())
+ return addSlotsOrElementsSlot(alloc.isStackSlot(), alloc.memorySlot());
+ MOZ_ASSERT(alloc.isRegister());
+ addSlotsOrElementsRegister(alloc.toRegister().gpr());
+ assertInvariants();
+ return true;
+ }
+ bool hasSlotsOrElementsPointer(LAllocation alloc) const {
+ if (alloc.isRegister())
+ return slotsOrElementsRegs().has(alloc.toRegister().gpr());
+ for (size_t i = 0; i < slotsOrElementsSlots_.length(); i++) {
+ const SlotEntry& entry = slotsOrElementsSlots_[i];
+ if (entry.stack == alloc.isStackSlot() && entry.slot == alloc.memorySlot())
+ return true;
+ }
+ return false;
+ }
+
+ MOZ_MUST_USE bool addGcPointer(LAllocation alloc) {
+ if (alloc.isMemory())
+ return addGcSlot(alloc.isStackSlot(), alloc.memorySlot());
+ if (alloc.isRegister())
+ addGcRegister(alloc.toRegister().gpr());
+ assertInvariants();
+ return true;
+ }
+
+ bool hasGcPointer(LAllocation alloc) const {
+ if (alloc.isRegister())
+ return gcRegs().has(alloc.toRegister().gpr());
+ MOZ_ASSERT(alloc.isMemory());
+ for (size_t i = 0; i < gcSlots_.length(); i++) {
+ if (gcSlots_[i].stack == alloc.isStackSlot() && gcSlots_[i].slot == alloc.memorySlot())
+ return true;
+ }
+ return false;
+ }
+
+ MOZ_MUST_USE bool addValueSlot(bool stack, uint32_t slot) {
+ bool result = valueSlots_.append(SlotEntry(stack, slot));
+ if (result)
+ assertInvariants();
+ return result;
+ }
+ SlotList& valueSlots() {
+ return valueSlots_;
+ }
+
+ bool hasValueSlot(bool stack, uint32_t slot) const {
+ for (size_t i = 0; i < valueSlots_.length(); i++) {
+ if (valueSlots_[i].stack == stack && valueSlots_[i].slot == slot)
+ return true;
+ }
+ return false;
+ }
+
+#ifdef JS_NUNBOX32
+
+ MOZ_MUST_USE bool addNunboxParts(uint32_t typeVreg, LAllocation type, LAllocation payload) {
+ bool result = nunboxParts_.append(NunboxEntry(typeVreg, type, payload));
+ if (result)
+ assertInvariants();
+ return result;
+ }
+
+ MOZ_MUST_USE bool addNunboxType(uint32_t typeVreg, LAllocation type) {
+ for (size_t i = 0; i < nunboxParts_.length(); i++) {
+ if (nunboxParts_[i].type == type)
+ return true;
+ if (nunboxParts_[i].type == LUse(typeVreg, LUse::ANY)) {
+ nunboxParts_[i].type = type;
+ return true;
+ }
+ }
+
+ // vregs for nunbox pairs are adjacent, with the type coming first.
+ uint32_t payloadVreg = typeVreg + 1;
+ bool result = nunboxParts_.append(NunboxEntry(typeVreg, type, LUse(payloadVreg, LUse::ANY)));
+ if (result)
+ assertInvariants();
+ return result;
+ }
+
+ MOZ_MUST_USE bool addNunboxPayload(uint32_t payloadVreg, LAllocation payload) {
+ for (size_t i = 0; i < nunboxParts_.length(); i++) {
+ if (nunboxParts_[i].payload == payload)
+ return true;
+ if (nunboxParts_[i].payload == LUse(payloadVreg, LUse::ANY)) {
+ nunboxParts_[i].payload = payload;
+ return true;
+ }
+ }
+
+ // vregs for nunbox pairs are adjacent, with the type coming first.
+ uint32_t typeVreg = payloadVreg - 1;
+ bool result = nunboxParts_.append(NunboxEntry(typeVreg, LUse(typeVreg, LUse::ANY), payload));
+ if (result)
+ assertInvariants();
+ return result;
+ }
+
+ LAllocation findTypeAllocation(uint32_t typeVreg) {
+ // Look for some allocation for the specified type vreg, to go with a
+ // partial nunbox entry for the payload. Note that we don't need to
+ // look at the value slots in the safepoint, as these aren't used by
+ // register allocators which add partial nunbox entries.
+ for (size_t i = 0; i < nunboxParts_.length(); i++) {
+ if (nunboxParts_[i].typeVreg == typeVreg && !nunboxParts_[i].type.isUse())
+ return nunboxParts_[i].type;
+ }
+ return LUse(typeVreg, LUse::ANY);
+ }
+
+#ifdef DEBUG
+ bool hasNunboxPayload(LAllocation payload) const {
+ if (payload.isMemory() && hasValueSlot(payload.isStackSlot(), payload.memorySlot()))
+ return true;
+ for (size_t i = 0; i < nunboxParts_.length(); i++) {
+ if (nunboxParts_[i].payload == payload)
+ return true;
+ }
+ return false;
+ }
+#endif
+
+ NunboxList& nunboxParts() {
+ return nunboxParts_;
+ }
+
+#elif JS_PUNBOX64
+
+ void addValueRegister(Register reg) {
+ valueRegs_.add(reg);
+ assertInvariants();
+ }
+ LiveGeneralRegisterSet valueRegs() const {
+ return valueRegs_;
+ }
+
+ MOZ_MUST_USE bool addBoxedValue(LAllocation alloc) {
+ if (alloc.isRegister()) {
+ Register reg = alloc.toRegister().gpr();
+ if (!valueRegs().has(reg))
+ addValueRegister(reg);
+ return true;
+ }
+ if (hasValueSlot(alloc.isStackSlot(), alloc.memorySlot()))
+ return true;
+ return addValueSlot(alloc.isStackSlot(), alloc.memorySlot());
+ }
+
+ bool hasBoxedValue(LAllocation alloc) const {
+ if (alloc.isRegister())
+ return valueRegs().has(alloc.toRegister().gpr());
+ return hasValueSlot(alloc.isStackSlot(), alloc.memorySlot());
+ }
+
+#endif // JS_PUNBOX64
+
+ bool encoded() const {
+ return safepointOffset_ != INVALID_SAFEPOINT_OFFSET;
+ }
+ uint32_t offset() const {
+ MOZ_ASSERT(encoded());
+ return safepointOffset_;
+ }
+ void setOffset(uint32_t offset) {
+ safepointOffset_ = offset;
+ }
+ uint32_t osiReturnPointOffset() const {
+ // In general, pointer arithmetic on code is bad, but in this case,
+ // getting the return address from a call instruction, stepping over pools
+ // would be wrong.
+ return osiCallPointOffset_ + Assembler::PatchWrite_NearCallSize();
+ }
+ uint32_t osiCallPointOffset() const {
+ return osiCallPointOffset_;
+ }
+ void setOsiCallPointOffset(uint32_t osiCallPointOffset) {
+ MOZ_ASSERT(!osiCallPointOffset_);
+ osiCallPointOffset_ = osiCallPointOffset;
+ }
+};
+
+class LInstruction::InputIterator
+{
+ private:
+ LInstruction& ins_;
+ size_t idx_;
+ bool snapshot_;
+
+ void handleOperandsEnd() {
+ // Iterate on the snapshot when iteration over all operands is done.
+ if (!snapshot_ && idx_ == ins_.numOperands() && ins_.snapshot()) {
+ idx_ = 0;
+ snapshot_ = true;
+ }
+ }
+
+public:
+ explicit InputIterator(LInstruction& ins) :
+ ins_(ins),
+ idx_(0),
+ snapshot_(false)
+ {
+ handleOperandsEnd();
+ }
+
+ bool more() const {
+ if (snapshot_)
+ return idx_ < ins_.snapshot()->numEntries();
+ if (idx_ < ins_.numOperands())
+ return true;
+ if (ins_.snapshot() && ins_.snapshot()->numEntries())
+ return true;
+ return false;
+ }
+
+ bool isSnapshotInput() const {
+ return snapshot_;
+ }
+
+ void next() {
+ MOZ_ASSERT(more());
+ idx_++;
+ handleOperandsEnd();
+ }
+
+ void replace(const LAllocation& alloc) {
+ if (snapshot_)
+ ins_.snapshot()->setEntry(idx_, alloc);
+ else
+ ins_.setOperand(idx_, alloc);
+ }
+
+ LAllocation* operator*() const {
+ if (snapshot_)
+ return ins_.snapshot()->getEntry(idx_);
+ return ins_.getOperand(idx_);
+ }
+
+ LAllocation* operator ->() const {
+ return **this;
+ }
+};
+
+class LIRGraph
+{
+ struct ValueHasher
+ {
+ typedef Value Lookup;
+ static HashNumber hash(const Value& v) {
+ return HashNumber(v.asRawBits());
+ }
+ static bool match(const Value& lhs, const Value& rhs) {
+ return lhs == rhs;
+ }
+ };
+
+ FixedList<LBlock> blocks_;
+ Vector<Value, 0, JitAllocPolicy> constantPool_;
+ typedef HashMap<Value, uint32_t, ValueHasher, JitAllocPolicy> ConstantPoolMap;
+ ConstantPoolMap constantPoolMap_;
+ Vector<LInstruction*, 0, JitAllocPolicy> safepoints_;
+ Vector<LInstruction*, 0, JitAllocPolicy> nonCallSafepoints_;
+ uint32_t numVirtualRegisters_;
+ uint32_t numInstructions_;
+
+ // Number of stack slots needed for local spills.
+ uint32_t localSlotCount_;
+ // Number of stack slots needed for argument construction for calls.
+ uint32_t argumentSlotCount_;
+
+ // Snapshot taken before any LIR has been lowered.
+ LSnapshot* entrySnapshot_;
+
+ MIRGraph& mir_;
+
+ public:
+ explicit LIRGraph(MIRGraph* mir);
+
+ MOZ_MUST_USE bool init() {
+ return constantPoolMap_.init() && blocks_.init(mir_.alloc(), mir_.numBlocks());
+ }
+ MIRGraph& mir() const {
+ return mir_;
+ }
+ size_t numBlocks() const {
+ return blocks_.length();
+ }
+ LBlock* getBlock(size_t i) {
+ return &blocks_[i];
+ }
+ uint32_t numBlockIds() const {
+ return mir_.numBlockIds();
+ }
+ MOZ_MUST_USE bool initBlock(MBasicBlock* mir) {
+ auto* block = &blocks_[mir->id()];
+ auto* lir = new (block) LBlock(mir);
+ return lir->init(mir_.alloc());
+ }
+ uint32_t getVirtualRegister() {
+ numVirtualRegisters_ += VREG_INCREMENT;
+ return numVirtualRegisters_;
+ }
+ uint32_t numVirtualRegisters() const {
+ // Virtual registers are 1-based, not 0-based, so add one as a
+ // convenience for 0-based arrays.
+ return numVirtualRegisters_ + 1;
+ }
+ uint32_t getInstructionId() {
+ return numInstructions_++;
+ }
+ uint32_t numInstructions() const {
+ return numInstructions_;
+ }
+ void setLocalSlotCount(uint32_t localSlotCount) {
+ localSlotCount_ = localSlotCount;
+ }
+ uint32_t localSlotCount() const {
+ return localSlotCount_;
+ }
+ // Return the localSlotCount() value rounded up so that it satisfies the
+ // platform stack alignment requirement, and so that it's a multiple of
+ // the number of slots per Value.
+ uint32_t paddedLocalSlotCount() const {
+ // Round to JitStackAlignment, and implicitly to sizeof(Value) as
+ // JitStackAlignment is a multiple of sizeof(Value). These alignments
+ // are needed for spilling SIMD registers properly, and for
+ // StackOffsetOfPassedArg which rounds argument slots to 8-byte
+ // boundaries.
+ return AlignBytes(localSlotCount(), JitStackAlignment);
+ }
+ size_t paddedLocalSlotsSize() const {
+ return paddedLocalSlotCount();
+ }
+ void setArgumentSlotCount(uint32_t argumentSlotCount) {
+ argumentSlotCount_ = argumentSlotCount;
+ }
+ uint32_t argumentSlotCount() const {
+ return argumentSlotCount_;
+ }
+ size_t argumentsSize() const {
+ return argumentSlotCount() * sizeof(Value);
+ }
+ uint32_t totalSlotCount() const {
+ return paddedLocalSlotCount() + argumentsSize();
+ }
+ MOZ_MUST_USE bool addConstantToPool(const Value& v, uint32_t* index);
+ size_t numConstants() const {
+ return constantPool_.length();
+ }
+ Value* constantPool() {
+ return &constantPool_[0];
+ }
+ void setEntrySnapshot(LSnapshot* snapshot) {
+ MOZ_ASSERT(!entrySnapshot_);
+ entrySnapshot_ = snapshot;
+ }
+ LSnapshot* entrySnapshot() const {
+ MOZ_ASSERT(entrySnapshot_);
+ return entrySnapshot_;
+ }
+ bool noteNeedsSafepoint(LInstruction* ins);
+ size_t numNonCallSafepoints() const {
+ return nonCallSafepoints_.length();
+ }
+ LInstruction* getNonCallSafepoint(size_t i) const {
+ return nonCallSafepoints_[i];
+ }
+ size_t numSafepoints() const {
+ return safepoints_.length();
+ }
+ LInstruction* getSafepoint(size_t i) const {
+ return safepoints_[i];
+ }
+
+ void dump(GenericPrinter& out);
+ void dump();
+};
+
+LAllocation::LAllocation(AnyRegister reg)
+{
+ if (reg.isFloat())
+ *this = LFloatReg(reg.fpu());
+ else
+ *this = LGeneralReg(reg.gpr());
+}
+
+AnyRegister
+LAllocation::toRegister() const
+{
+ MOZ_ASSERT(isRegister());
+ if (isFloatReg())
+ return AnyRegister(toFloatReg()->reg());
+ return AnyRegister(toGeneralReg()->reg());
+}
+
+} // namespace jit
+} // namespace js
+
+#include "jit/shared/LIR-shared.h"
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+# if defined(JS_CODEGEN_X86)
+# include "jit/x86/LIR-x86.h"
+# elif defined(JS_CODEGEN_X64)
+# include "jit/x64/LIR-x64.h"
+# endif
+# include "jit/x86-shared/LIR-x86-shared.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/LIR-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/LIR-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+# if defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/LIR-mips32.h"
+# elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/LIR-mips64.h"
+# endif
+# include "jit/mips-shared/LIR-mips-shared.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/LIR-none.h"
+#else
+# error "Unknown architecture!"
+#endif
+
+#undef LIR_HEADER
+
+namespace js {
+namespace jit {
+
+#define LIROP(name) \
+ L##name* LNode::to##name() \
+ { \
+ MOZ_ASSERT(is##name()); \
+ return static_cast<L##name*>(this); \
+ } \
+ const L##name* LNode::to##name() const \
+ { \
+ MOZ_ASSERT(is##name()); \
+ return static_cast<const L##name*>(this); \
+ }
+ LIR_OPCODE_LIST(LIROP)
+#undef LIROP
+
+#define LALLOC_CAST(type) \
+ L##type* LAllocation::to##type() { \
+ MOZ_ASSERT(is##type()); \
+ return static_cast<L##type*>(this); \
+ }
+#define LALLOC_CONST_CAST(type) \
+ const L##type* LAllocation::to##type() const { \
+ MOZ_ASSERT(is##type()); \
+ return static_cast<const L##type*>(this); \
+ }
+
+LALLOC_CAST(Use)
+LALLOC_CONST_CAST(Use)
+LALLOC_CONST_CAST(GeneralReg)
+LALLOC_CONST_CAST(FloatReg)
+LALLOC_CONST_CAST(StackSlot)
+LALLOC_CONST_CAST(Argument)
+LALLOC_CONST_CAST(ConstantIndex)
+
+#undef LALLOC_CAST
+
+#ifdef JS_NUNBOX32
+static inline signed
+OffsetToOtherHalfOfNunbox(LDefinition::Type type)
+{
+ MOZ_ASSERT(type == LDefinition::TYPE || type == LDefinition::PAYLOAD);
+ signed offset = (type == LDefinition::TYPE)
+ ? PAYLOAD_INDEX - TYPE_INDEX
+ : TYPE_INDEX - PAYLOAD_INDEX;
+ return offset;
+}
+
+static inline void
+AssertTypesFormANunbox(LDefinition::Type type1, LDefinition::Type type2)
+{
+ MOZ_ASSERT((type1 == LDefinition::TYPE && type2 == LDefinition::PAYLOAD) ||
+ (type2 == LDefinition::TYPE && type1 == LDefinition::PAYLOAD));
+}
+
+static inline unsigned
+OffsetOfNunboxSlot(LDefinition::Type type)
+{
+ if (type == LDefinition::PAYLOAD)
+ return NUNBOX32_PAYLOAD_OFFSET;
+ return NUNBOX32_TYPE_OFFSET;
+}
+
+// Note that stack indexes for LStackSlot are modelled backwards, so a
+// double-sized slot starting at 2 has its next word at 1, *not* 3.
+static inline unsigned
+BaseOfNunboxSlot(LDefinition::Type type, unsigned slot)
+{
+ if (type == LDefinition::PAYLOAD)
+ return slot + NUNBOX32_PAYLOAD_OFFSET;
+ return slot + NUNBOX32_TYPE_OFFSET;
+}
+#endif
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_LIR_h */
diff --git a/js/src/jit/LOpcodes.h b/js/src/jit/LOpcodes.h
new file mode 100644
index 000000000..271a92c1f
--- /dev/null
+++ b/js/src/jit/LOpcodes.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_LOpcodes_h
+#define jit_LOpcodes_h
+
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/LOpcodes-x86.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/LOpcodes-x64.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/LOpcodes-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/LOpcodes-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/LOpcodes-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/LOpcodes-mips64.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/LOpcodes-none.h"
+#else
+# error "Unknown architecture!"
+#endif
+
+#define LIR_OPCODE_LIST(_) \
+ LIR_COMMON_OPCODE_LIST(_) \
+ LIR_CPU_OPCODE_LIST(_)
+
+#endif /* jit_LOpcodes_h */
diff --git a/js/src/jit/Label.h b/js/src/jit/Label.h
new file mode 100644
index 000000000..b23477730
--- /dev/null
+++ b/js/src/jit/Label.h
@@ -0,0 +1,117 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Label_h
+#define jit_Label_h
+
+#include "jit/Ion.h"
+
+namespace js {
+namespace jit {
+
+struct LabelBase
+{
+ protected:
+ // offset_ >= 0 means that the label is either bound or has incoming
+ // uses and needs to be bound.
+ int32_t offset_ : 31;
+ bool bound_ : 1;
+
+ // Disallow assignment.
+ void operator =(const LabelBase& label);
+ public:
+ static const int32_t INVALID_OFFSET = -1;
+
+ LabelBase() : offset_(INVALID_OFFSET), bound_(false)
+ { }
+
+ // If the label is bound, all incoming edges have been patched and any
+ // future incoming edges will be immediately patched.
+ bool bound() const {
+ return bound_;
+ }
+ int32_t offset() const {
+ MOZ_ASSERT(bound() || used());
+ return offset_;
+ }
+ void offsetBy(int32_t delta) {
+ MOZ_ASSERT(bound() || used());
+ MOZ_ASSERT(offset() + delta >= offset(), "no overflow");
+ mozilla::DebugOnly<int32_t> oldOffset(offset());
+ offset_ += delta;
+ MOZ_ASSERT(offset_ == delta + oldOffset, "new offset fits in 31 bits");
+ }
+ // Returns whether the label is not bound, but has incoming uses.
+ bool used() const {
+ return !bound() && offset_ > INVALID_OFFSET;
+ }
+ // Binds the label, fixing its final position in the code stream.
+ void bind(int32_t offset) {
+ MOZ_ASSERT(!bound());
+ offset_ = offset;
+ bound_ = true;
+ MOZ_ASSERT(offset_ == offset, "offset fits in 31 bits");
+ }
+ // Marks the label as neither bound nor used.
+ void reset() {
+ offset_ = INVALID_OFFSET;
+ bound_ = false;
+ }
+ // Sets the label's latest used position, returning the old use position in
+ // the process.
+ int32_t use(int32_t offset) {
+ MOZ_ASSERT(!bound());
+
+ int32_t old = offset_;
+ offset_ = offset;
+ MOZ_ASSERT(offset_ == offset, "offset fits in 31 bits");
+
+ return old;
+ }
+};
+
+// A label represents a position in an assembly buffer that may or may not have
+// already been generated. Labels can either be "bound" or "unbound", the
+// former meaning that its position is known and the latter that its position
+// is not yet known.
+//
+// A jump to an unbound label adds that jump to the label's incoming queue. A
+// jump to a bound label automatically computes the jump distance. The process
+// of binding a label automatically corrects all incoming jumps.
+class Label : public LabelBase
+{
+ public:
+ ~Label()
+ {
+#ifdef DEBUG
+ // The assertion below doesn't hold if an error occurred.
+ JitContext* context = MaybeGetJitContext();
+ bool hadError = js::oom::HadSimulatedOOM() ||
+ (context && context->runtime && context->runtime->hadOutOfMemory());
+ MOZ_ASSERT_IF(!hadError, !used());
+#endif
+ }
+};
+
+// Label's destructor asserts that if it has been used it has also been bound.
+// In the case long-lived labels, however, failed compilation (e.g. OOM) will
+// trigger this failure innocuously. This Label silences the assertion.
+class NonAssertingLabel : public Label
+{
+ public:
+ ~NonAssertingLabel()
+ {
+#ifdef DEBUG
+ if (used())
+ bind(0);
+#endif
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_Label_h
diff --git a/js/src/jit/Linker.cpp b/js/src/jit/Linker.cpp
new file mode 100644
index 000000000..33427464c
--- /dev/null
+++ b/js/src/jit/Linker.cpp
@@ -0,0 +1,64 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Linker.h"
+
+#include "gc/StoreBuffer-inl.h"
+
+namespace js {
+namespace jit {
+
+template <AllowGC allowGC>
+JitCode*
+Linker::newCode(JSContext* cx, CodeKind kind, bool hasPatchableBackedges /* = false */)
+{
+ MOZ_ASSERT(masm.numSymbolicAccesses() == 0);
+ MOZ_ASSERT_IF(hasPatchableBackedges, kind == ION_CODE);
+
+ gc::AutoSuppressGC suppressGC(cx);
+ if (masm.oom())
+ return fail(cx);
+
+ ExecutablePool* pool;
+ size_t bytesNeeded = masm.bytesNeeded() + sizeof(JitCode*) + CodeAlignment;
+ if (bytesNeeded >= MAX_BUFFER_SIZE)
+ return fail(cx);
+
+ // ExecutableAllocator requires bytesNeeded to be word-size aligned.
+ bytesNeeded = AlignBytes(bytesNeeded, sizeof(void*));
+
+ ExecutableAllocator& execAlloc = hasPatchableBackedges
+ ? cx->runtime()->jitRuntime()->backedgeExecAlloc()
+ : cx->runtime()->jitRuntime()->execAlloc();
+ uint8_t* result = (uint8_t*)execAlloc.alloc(bytesNeeded, &pool, kind);
+ if (!result)
+ return fail(cx);
+
+ // The JitCode pointer will be stored right before the code buffer.
+ uint8_t* codeStart = result + sizeof(JitCode*);
+
+ // Bump the code up to a nice alignment.
+ codeStart = (uint8_t*)AlignBytes((uintptr_t)codeStart, CodeAlignment);
+ uint32_t headerSize = codeStart - result;
+ JitCode* code = JitCode::New<allowGC>(cx, codeStart, bytesNeeded - headerSize,
+ headerSize, pool, kind);
+ if (!code)
+ return nullptr;
+ if (masm.oom())
+ return fail(cx);
+ awjc.emplace(result, bytesNeeded);
+ code->copyFrom(masm);
+ masm.link(code);
+ if (masm.embedsNurseryPointers())
+ cx->runtime()->gc.storeBuffer.putWholeCell(code);
+ return code;
+}
+
+template JitCode* Linker::newCode<CanGC>(JSContext* cx, CodeKind kind, bool hasPatchableBackedges);
+template JitCode* Linker::newCode<NoGC>(JSContext* cx, CodeKind kind, bool hasPatchableBackedges);
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/Linker.h b/js/src/jit/Linker.h
new file mode 100644
index 000000000..d8574cdcb
--- /dev/null
+++ b/js/src/jit/Linker.h
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Linker_h
+#define jit_Linker_h
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+#include "jsgc.h"
+
+#include "jit/ExecutableAllocator.h"
+#include "jit/IonCode.h"
+#include "jit/JitCompartment.h"
+#include "jit/MacroAssembler.h"
+
+namespace js {
+namespace jit {
+
+class Linker
+{
+ MacroAssembler& masm;
+ mozilla::Maybe<AutoWritableJitCode> awjc;
+
+ JitCode* fail(JSContext* cx) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ public:
+ explicit Linker(MacroAssembler& masm)
+ : masm(masm)
+ {
+ masm.finish();
+ }
+
+ template <AllowGC allowGC>
+ JitCode* newCode(JSContext* cx, CodeKind kind, bool hasPatchableBackedges = false);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Linker_h */
diff --git a/js/src/jit/LoopUnroller.cpp b/js/src/jit/LoopUnroller.cpp
new file mode 100644
index 000000000..5481d6978
--- /dev/null
+++ b/js/src/jit/LoopUnroller.cpp
@@ -0,0 +1,408 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/LoopUnroller.h"
+
+#include "jit/MIRGraph.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::ArrayLength;
+
+namespace {
+
+struct LoopUnroller
+{
+ typedef HashMap<MDefinition*, MDefinition*,
+ PointerHasher<MDefinition*, 2>, SystemAllocPolicy> DefinitionMap;
+
+ explicit LoopUnroller(MIRGraph& graph)
+ : graph(graph), alloc(graph.alloc()),
+ header(nullptr), backedge(nullptr),
+ unrolledHeader(nullptr), unrolledBackedge(nullptr),
+ oldPreheader(nullptr), newPreheader(nullptr)
+ {}
+
+ MIRGraph& graph;
+ TempAllocator& alloc;
+
+ // Header and body of the original loop.
+ MBasicBlock* header;
+ MBasicBlock* backedge;
+
+ // Header and body of the unrolled loop.
+ MBasicBlock* unrolledHeader;
+ MBasicBlock* unrolledBackedge;
+
+ // Old and new preheaders. The old preheader starts out associated with the
+ // original loop, but becomes the preheader of the new loop. The new
+ // preheader will be given to the original loop.
+ MBasicBlock* oldPreheader;
+ MBasicBlock* newPreheader;
+
+ // Map terms in the original loop to terms in the current unrolled iteration.
+ DefinitionMap unrolledDefinitions;
+
+ MDefinition* getReplacementDefinition(MDefinition* def);
+ MResumePoint* makeReplacementResumePoint(MBasicBlock* block, MResumePoint* rp);
+ bool makeReplacementInstruction(MInstruction* ins);
+
+ void go(LoopIterationBound* bound);
+};
+
+} // namespace
+
+MDefinition*
+LoopUnroller::getReplacementDefinition(MDefinition* def)
+{
+ if (def->block()->id() < header->id()) {
+ // The definition is loop invariant.
+ return def;
+ }
+
+ DefinitionMap::Ptr p = unrolledDefinitions.lookup(def);
+ if (!p) {
+ // After phi analysis (TypeAnalyzer::replaceRedundantPhi) the resume
+ // point at the start of a block can contain definitions from within
+ // the block itself.
+ MOZ_ASSERT(def->isConstant());
+
+ MConstant* constant = MConstant::Copy(alloc, def->toConstant());
+ oldPreheader->insertBefore(*oldPreheader->begin(), constant);
+ return constant;
+ }
+
+ return p->value();
+}
+
+bool
+LoopUnroller::makeReplacementInstruction(MInstruction* ins)
+{
+ MDefinitionVector inputs(alloc);
+ for (size_t i = 0; i < ins->numOperands(); i++) {
+ MDefinition* old = ins->getOperand(i);
+ MDefinition* replacement = getReplacementDefinition(old);
+ if (!inputs.append(replacement))
+ return false;
+ }
+
+ MInstruction* clone = ins->clone(alloc, inputs);
+
+ unrolledBackedge->add(clone);
+
+ if (!unrolledDefinitions.putNew(ins, clone))
+ return false;
+
+ if (MResumePoint* old = ins->resumePoint()) {
+ MResumePoint* rp = makeReplacementResumePoint(unrolledBackedge, old);
+ clone->setResumePoint(rp);
+ }
+
+ return true;
+}
+
+MResumePoint*
+LoopUnroller::makeReplacementResumePoint(MBasicBlock* block, MResumePoint* rp)
+{
+ MDefinitionVector inputs(alloc);
+ for (size_t i = 0; i < rp->numOperands(); i++) {
+ MDefinition* old = rp->getOperand(i);
+ MDefinition* replacement = old->isUnused() ? old : getReplacementDefinition(old);
+ if (!inputs.append(replacement))
+ return nullptr;
+ }
+
+ MResumePoint* clone = MResumePoint::New(alloc, block, rp, inputs);
+ if (!clone)
+ return nullptr;
+
+ return clone;
+}
+
+void
+LoopUnroller::go(LoopIterationBound* bound)
+{
+ // For now we always unroll loops the same number of times.
+ static const size_t UnrollCount = 10;
+
+ JitSpew(JitSpew_Unrolling, "Attempting to unroll loop");
+
+ header = bound->header;
+
+ // UCE might have determined this isn't actually a loop.
+ if (!header->isLoopHeader())
+ return;
+
+ backedge = header->backedge();
+ oldPreheader = header->loopPredecessor();
+
+ MOZ_ASSERT(oldPreheader->numSuccessors() == 1);
+
+ // Only unroll loops with two blocks: an initial one ending with the
+ // bound's test, and the body ending with the backedge.
+ MTest* test = bound->test;
+ if (header->lastIns() != test)
+ return;
+ if (test->ifTrue() == backedge) {
+ if (test->ifFalse()->id() <= backedge->id())
+ return;
+ } else if (test->ifFalse() == backedge) {
+ if (test->ifTrue()->id() <= backedge->id())
+ return;
+ } else {
+ return;
+ }
+ if (backedge->numPredecessors() != 1 || backedge->numSuccessors() != 1)
+ return;
+ MOZ_ASSERT(backedge->phisEmpty());
+
+ MBasicBlock* bodyBlocks[] = { header, backedge };
+
+ // All instructions in the header and body must be clonable.
+ for (size_t i = 0; i < ArrayLength(bodyBlocks); i++) {
+ MBasicBlock* block = bodyBlocks[i];
+ for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) {
+ MInstruction* ins = *iter;
+ if (ins->canClone())
+ continue;
+ if (ins->isTest() || ins->isGoto() || ins->isInterruptCheck())
+ continue;
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_Unrolling, "Aborting: can't clone instruction %s", ins->opName());
+#endif
+ return;
+ }
+ }
+
+ // Compute the linear inequality we will use for exiting the unrolled loop:
+ //
+ // iterationBound - iterationCount - UnrollCount >= 0
+ //
+ LinearSum remainingIterationsInequality(bound->boundSum);
+ if (!remainingIterationsInequality.add(bound->currentSum, -1))
+ return;
+ if (!remainingIterationsInequality.add(-int32_t(UnrollCount)))
+ return;
+
+ // Terms in the inequality need to be either loop invariant or phis from
+ // the original header.
+ for (size_t i = 0; i < remainingIterationsInequality.numTerms(); i++) {
+ MDefinition* def = remainingIterationsInequality.term(i).term;
+ if (def->isDiscarded())
+ return;
+ if (def->block()->id() < header->id())
+ continue;
+ if (def->block() == header && def->isPhi())
+ continue;
+ return;
+ }
+
+ // OK, we've checked everything, now unroll the loop.
+
+ JitSpew(JitSpew_Unrolling, "Unrolling loop");
+
+ // The old preheader will go before the unrolled loop, and the old loop
+ // will need a new empty preheader.
+ const CompileInfo& info = oldPreheader->info();
+ if (header->trackedPc()) {
+ unrolledHeader =
+ MBasicBlock::New(graph, nullptr, info,
+ oldPreheader, header->trackedSite(), MBasicBlock::LOOP_HEADER);
+ unrolledBackedge =
+ MBasicBlock::New(graph, nullptr, info,
+ unrolledHeader, backedge->trackedSite(), MBasicBlock::NORMAL);
+ newPreheader =
+ MBasicBlock::New(graph, nullptr, info,
+ unrolledHeader, oldPreheader->trackedSite(), MBasicBlock::NORMAL);
+ } else {
+ unrolledHeader = MBasicBlock::New(graph, info, oldPreheader, MBasicBlock::LOOP_HEADER);
+ unrolledBackedge = MBasicBlock::New(graph, info, unrolledHeader, MBasicBlock::NORMAL);
+ newPreheader = MBasicBlock::New(graph, info, unrolledHeader, MBasicBlock::NORMAL);
+ }
+
+ unrolledHeader->discardAllResumePoints();
+ unrolledBackedge->discardAllResumePoints();
+ newPreheader->discardAllResumePoints();
+
+ // Insert new blocks at their RPO position, and update block ids.
+ graph.insertBlockAfter(oldPreheader, unrolledHeader);
+ graph.insertBlockAfter(unrolledHeader, unrolledBackedge);
+ graph.insertBlockAfter(unrolledBackedge, newPreheader);
+ graph.renumberBlocksAfter(oldPreheader);
+
+ // We don't tolerate allocation failure after this point.
+ // TODO: This is a bit drastic, is it possible to improve this?
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+
+ if (!unrolledDefinitions.init())
+ oomUnsafe.crash("LoopUnroller::go");
+
+ // Add phis to the unrolled loop header which correspond to the phis in the
+ // original loop header.
+ MOZ_ASSERT(header->getPredecessor(0) == oldPreheader);
+ for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) {
+ MPhi* old = *iter;
+ MOZ_ASSERT(old->numOperands() == 2);
+ MPhi* phi = MPhi::New(alloc);
+ phi->setResultType(old->type());
+ phi->setResultTypeSet(old->resultTypeSet());
+ phi->setRange(old->range());
+
+ unrolledHeader->addPhi(phi);
+
+ if (!phi->reserveLength(2))
+ oomUnsafe.crash("LoopUnroller::go");
+
+ // Set the first input for the phi for now. We'll set the second after
+ // finishing the unroll.
+ phi->addInput(old->getOperand(0));
+
+ // The old phi will now take the value produced by the unrolled loop.
+ old->replaceOperand(0, phi);
+
+ if (!unrolledDefinitions.putNew(old, phi))
+ oomUnsafe.crash("LoopUnroller::go");
+ }
+
+ // The loop condition can bail out on e.g. integer overflow, so make a
+ // resume point based on the initial resume point of the original header.
+ MResumePoint* headerResumePoint = header->entryResumePoint();
+ if (headerResumePoint) {
+ MResumePoint* rp = makeReplacementResumePoint(unrolledHeader, headerResumePoint);
+ if (!rp)
+ oomUnsafe.crash("LoopUnroller::makeReplacementResumePoint");
+ unrolledHeader->setEntryResumePoint(rp);
+
+ // Perform an interrupt check at the start of the unrolled loop.
+ unrolledHeader->add(MInterruptCheck::New(alloc));
+ }
+
+ // Generate code for the test in the unrolled loop.
+ for (size_t i = 0; i < remainingIterationsInequality.numTerms(); i++) {
+ MDefinition* def = remainingIterationsInequality.term(i).term;
+ MDefinition* replacement = getReplacementDefinition(def);
+ remainingIterationsInequality.replaceTerm(i, replacement);
+ }
+ MCompare* compare = ConvertLinearInequality(alloc, unrolledHeader, remainingIterationsInequality);
+ MTest* unrolledTest = MTest::New(alloc, compare, unrolledBackedge, newPreheader);
+ unrolledHeader->end(unrolledTest);
+
+ // Make an entry resume point for the unrolled body. The unrolled header
+ // does not have side effects on stack values, even if the original loop
+ // header does, so use the same resume point as for the unrolled header.
+ if (headerResumePoint) {
+ MResumePoint* rp = makeReplacementResumePoint(unrolledBackedge, headerResumePoint);
+ if (!rp)
+ oomUnsafe.crash("LoopUnroller::makeReplacementResumePoint");
+ unrolledBackedge->setEntryResumePoint(rp);
+ }
+
+ // Make an entry resume point for the new preheader. There are no
+ // instructions which use this but some other stuff wants one to be here.
+ if (headerResumePoint) {
+ MResumePoint* rp = makeReplacementResumePoint(newPreheader, headerResumePoint);
+ if (!rp)
+ oomUnsafe.crash("LoopUnroller::makeReplacementResumePoint");
+ newPreheader->setEntryResumePoint(rp);
+ }
+
+ // Generate the unrolled code.
+ MOZ_ASSERT(UnrollCount > 1);
+ size_t unrollIndex = 0;
+ while (true) {
+ // Clone the contents of the original loop into the unrolled loop body.
+ for (size_t i = 0; i < ArrayLength(bodyBlocks); i++) {
+ MBasicBlock* block = bodyBlocks[i];
+ for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) {
+ MInstruction* ins = *iter;
+ if (ins->canClone()) {
+ if (!makeReplacementInstruction(*iter))
+ oomUnsafe.crash("LoopUnroller::makeReplacementDefinition");
+ } else {
+ // Control instructions are handled separately.
+ MOZ_ASSERT(ins->isTest() || ins->isGoto() || ins->isInterruptCheck());
+ }
+ }
+ }
+
+ // Compute the value of each loop header phi after the execution of
+ // this unrolled iteration.
+ MDefinitionVector phiValues(alloc);
+ MOZ_ASSERT(header->getPredecessor(1) == backedge);
+ for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) {
+ MPhi* old = *iter;
+ MDefinition* oldInput = old->getOperand(1);
+ if (!phiValues.append(getReplacementDefinition(oldInput)))
+ oomUnsafe.crash("LoopUnroller::go");
+ }
+
+ unrolledDefinitions.clear();
+
+ if (unrollIndex == UnrollCount - 1) {
+ // We're at the end of the last unrolled iteration, set the
+ // backedge input for the unrolled loop phis.
+ size_t phiIndex = 0;
+ for (MPhiIterator iter(unrolledHeader->phisBegin()); iter != unrolledHeader->phisEnd(); iter++) {
+ MPhi* phi = *iter;
+ phi->addInput(phiValues[phiIndex++]);
+ }
+ MOZ_ASSERT(phiIndex == phiValues.length());
+ break;
+ }
+
+ // Update the map for the phis in the next iteration.
+ size_t phiIndex = 0;
+ for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) {
+ MPhi* old = *iter;
+ if (!unrolledDefinitions.putNew(old, phiValues[phiIndex++]))
+ oomUnsafe.crash("LoopUnroller::go");
+ }
+ MOZ_ASSERT(phiIndex == phiValues.length());
+
+ unrollIndex++;
+ }
+
+ MGoto* backedgeJump = MGoto::New(alloc, unrolledHeader);
+ unrolledBackedge->end(backedgeJump);
+
+ // Place the old preheader before the unrolled loop.
+ MOZ_ASSERT(oldPreheader->lastIns()->isGoto());
+ oldPreheader->discardLastIns();
+ oldPreheader->end(MGoto::New(alloc, unrolledHeader));
+
+ // Place the new preheader before the original loop.
+ newPreheader->end(MGoto::New(alloc, header));
+
+ // Cleanup the MIR graph.
+ if (!unrolledHeader->addPredecessorWithoutPhis(unrolledBackedge))
+ oomUnsafe.crash("LoopUnroller::go");
+ header->replacePredecessor(oldPreheader, newPreheader);
+ oldPreheader->setSuccessorWithPhis(unrolledHeader, 0);
+ newPreheader->setSuccessorWithPhis(header, 0);
+ unrolledBackedge->setSuccessorWithPhis(unrolledHeader, 1);
+}
+
+bool
+jit::UnrollLoops(MIRGraph& graph, const LoopIterationBoundVector& bounds)
+{
+ if (bounds.empty())
+ return true;
+
+ for (size_t i = 0; i < bounds.length(); i++) {
+ LoopUnroller unroller(graph);
+ unroller.go(bounds[i]);
+ }
+
+ // The MIR graph is valid, but now has several new blocks. Update or
+ // recompute previous analysis information for the remaining optimization
+ // passes.
+ ClearDominatorTree(graph);
+ if (!BuildDominatorTree(graph))
+ return false;
+
+ return true;
+}
diff --git a/js/src/jit/LoopUnroller.h b/js/src/jit/LoopUnroller.h
new file mode 100644
index 000000000..10377d8fc
--- /dev/null
+++ b/js/src/jit/LoopUnroller.h
@@ -0,0 +1,21 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_LoopUnroller_h
+#define jit_LoopUnroller_h
+
+#include "jit/RangeAnalysis.h"
+
+namespace js {
+namespace jit {
+
+MOZ_MUST_USE bool
+UnrollLoops(MIRGraph& graph, const LoopIterationBoundVector& bounds);
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_LoopUnroller_h
diff --git a/js/src/jit/Lowering.cpp b/js/src/jit/Lowering.cpp
new file mode 100644
index 000000000..13e50820e
--- /dev/null
+++ b/js/src/jit/Lowering.cpp
@@ -0,0 +1,4930 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Lowering.h"
+
+#include "mozilla/DebugOnly.h"
+
+#include "jit/JitSpewer.h"
+#include "jit/LIR.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "wasm/WasmSignalHandlers.h"
+
+#include "jsobjinlines.h"
+#include "jsopcodeinlines.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace jit;
+
+using mozilla::DebugOnly;
+using JS::GenericNaN;
+
+LBoxAllocation
+LIRGenerator::useBoxFixedAtStart(MDefinition* mir, ValueOperand op)
+{
+#if defined(JS_NUNBOX32)
+ return useBoxFixed(mir, op.typeReg(), op.payloadReg(), true);
+#elif defined(JS_PUNBOX64)
+ return useBoxFixed(mir, op.valueReg(), op.scratchReg(), true);
+#endif
+}
+
+LBoxAllocation
+LIRGenerator::useBoxAtStart(MDefinition* mir, LUse::Policy policy)
+{
+ return useBox(mir, policy, /* useAtStart = */ true);
+}
+
+void
+LIRGenerator::visitCloneLiteral(MCloneLiteral* ins)
+{
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+ MOZ_ASSERT(ins->input()->type() == MIRType::Object);
+
+ LCloneLiteral* lir = new(alloc()) LCloneLiteral(useRegisterAtStart(ins->input()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitParameter(MParameter* param)
+{
+ ptrdiff_t offset;
+ if (param->index() == MParameter::THIS_SLOT)
+ offset = THIS_FRAME_ARGSLOT;
+ else
+ offset = 1 + param->index();
+
+ LParameter* ins = new(alloc()) LParameter;
+ defineBox(ins, param, LDefinition::FIXED);
+
+ offset *= sizeof(Value);
+#if defined(JS_NUNBOX32)
+# if MOZ_BIG_ENDIAN
+ ins->getDef(0)->setOutput(LArgument(offset));
+ ins->getDef(1)->setOutput(LArgument(offset + 4));
+# else
+ ins->getDef(0)->setOutput(LArgument(offset + 4));
+ ins->getDef(1)->setOutput(LArgument(offset));
+# endif
+#elif defined(JS_PUNBOX64)
+ ins->getDef(0)->setOutput(LArgument(offset));
+#endif
+}
+
+void
+LIRGenerator::visitCallee(MCallee* ins)
+{
+ define(new(alloc()) LCallee(), ins);
+}
+
+void
+LIRGenerator::visitIsConstructing(MIsConstructing* ins)
+{
+ define(new(alloc()) LIsConstructing(), ins);
+}
+
+static void
+TryToUseImplicitInterruptCheck(MIRGraph& graph, MBasicBlock* backedge)
+{
+ // Implicit interrupt checks require wasm signal handlers to be installed.
+ if (!wasm::HaveSignalHandlers() || JitOptions.ionInterruptWithoutSignals)
+ return;
+
+ // To avoid triggering expensive interrupts (backedge patching) in
+ // requestMajorGC and requestMinorGC, use an implicit interrupt check only
+ // if the loop body can not trigger GC or affect GC state like the store
+ // buffer. We do this by checking there are no safepoints attached to LIR
+ // instructions inside the loop.
+
+ MBasicBlockIterator block = graph.begin(backedge->loopHeaderOfBackedge());
+ LInterruptCheck* check = nullptr;
+ while (true) {
+ LBlock* lir = block->lir();
+ for (LInstructionIterator iter = lir->begin(); iter != lir->end(); iter++) {
+ if (iter->isInterruptCheck()) {
+ if (!check) {
+ MOZ_ASSERT(*block == backedge->loopHeaderOfBackedge());
+ check = iter->toInterruptCheck();
+ }
+ continue;
+ }
+
+ MOZ_ASSERT_IF(iter->isPostWriteBarrierO() || iter->isPostWriteBarrierV(),
+ iter->safepoint());
+
+ if (iter->safepoint())
+ return;
+ }
+ if (*block == backedge)
+ break;
+ block++;
+ }
+
+ check->setImplicit();
+}
+
+void
+LIRGenerator::visitGoto(MGoto* ins)
+{
+ if (!gen->compilingWasm() && ins->block()->isLoopBackedge())
+ TryToUseImplicitInterruptCheck(graph, ins->block());
+
+ add(new(alloc()) LGoto(ins->target()));
+}
+
+void
+LIRGenerator::visitTableSwitch(MTableSwitch* tableswitch)
+{
+ MDefinition* opd = tableswitch->getOperand(0);
+
+ // There should be at least 1 successor. The default case!
+ MOZ_ASSERT(tableswitch->numSuccessors() > 0);
+
+ // If there are no cases, the default case is always taken.
+ if (tableswitch->numSuccessors() == 1) {
+ add(new(alloc()) LGoto(tableswitch->getDefault()));
+ return;
+ }
+
+ // If we don't know the type.
+ if (opd->type() == MIRType::Value) {
+ LTableSwitchV* lir = newLTableSwitchV(tableswitch);
+ add(lir);
+ return;
+ }
+
+ // Case indices are numeric, so other types will always go to the default case.
+ if (opd->type() != MIRType::Int32 && opd->type() != MIRType::Double) {
+ add(new(alloc()) LGoto(tableswitch->getDefault()));
+ return;
+ }
+
+ // Return an LTableSwitch, capable of handling either an integer or
+ // floating-point index.
+ LAllocation index;
+ LDefinition tempInt;
+ if (opd->type() == MIRType::Int32) {
+ index = useRegisterAtStart(opd);
+ tempInt = tempCopy(opd, 0);
+ } else {
+ index = useRegister(opd);
+ tempInt = temp(LDefinition::GENERAL);
+ }
+ add(newLTableSwitch(index, tempInt, tableswitch));
+}
+
+void
+LIRGenerator::visitCheckOverRecursed(MCheckOverRecursed* ins)
+{
+ LCheckOverRecursed* lir = new(alloc()) LCheckOverRecursed();
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitDefVar(MDefVar* ins)
+{
+ LDefVar* lir = new(alloc()) LDefVar(useRegisterAtStart(ins->environmentChain()));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitDefLexical(MDefLexical* ins)
+{
+ LDefLexical* lir = new(alloc()) LDefLexical();
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitDefFun(MDefFun* ins)
+{
+ MDefinition* fun = ins->fun();
+ MOZ_ASSERT(fun->type() == MIRType::Object);
+
+ LDefFun* lir = new(alloc()) LDefFun(useRegisterAtStart(fun),
+ useRegisterAtStart(ins->environmentChain()));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitNewArray(MNewArray* ins)
+{
+ LNewArray* lir = new(alloc()) LNewArray(temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitNewArrayCopyOnWrite(MNewArrayCopyOnWrite* ins)
+{
+ LNewArrayCopyOnWrite* lir = new(alloc()) LNewArrayCopyOnWrite(temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitNewArrayDynamicLength(MNewArrayDynamicLength* ins)
+{
+ MDefinition* length = ins->length();
+ MOZ_ASSERT(length->type() == MIRType::Int32);
+
+ LNewArrayDynamicLength* lir = new(alloc()) LNewArrayDynamicLength(useRegister(length), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitNewTypedArray(MNewTypedArray* ins)
+{
+ LNewTypedArray* lir = new(alloc()) LNewTypedArray(temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitNewTypedArrayDynamicLength(MNewTypedArrayDynamicLength* ins)
+{
+ MDefinition* length = ins->length();
+ MOZ_ASSERT(length->type() == MIRType::Int32);
+
+ LNewTypedArrayDynamicLength* lir = new(alloc()) LNewTypedArrayDynamicLength(useRegister(length), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitNewObject(MNewObject* ins)
+{
+ LNewObject* lir = new(alloc()) LNewObject(temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitNewTypedObject(MNewTypedObject* ins)
+{
+ LNewTypedObject* lir = new(alloc()) LNewTypedObject(temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitNewNamedLambdaObject(MNewNamedLambdaObject* ins)
+{
+ LNewNamedLambdaObject* lir = new(alloc()) LNewNamedLambdaObject(temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitNewCallObject(MNewCallObject* ins)
+{
+ LNewCallObject* lir = new(alloc()) LNewCallObject(temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitNewSingletonCallObject(MNewSingletonCallObject* ins)
+{
+ LNewSingletonCallObject* lir = new(alloc()) LNewSingletonCallObject(temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitNewDerivedTypedObject(MNewDerivedTypedObject* ins)
+{
+ LNewDerivedTypedObject* lir =
+ new(alloc()) LNewDerivedTypedObject(useRegisterAtStart(ins->type()),
+ useRegisterAtStart(ins->owner()),
+ useRegisterAtStart(ins->offset()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitNewStringObject(MNewStringObject* ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType::String);
+
+ LNewStringObject* lir = new(alloc()) LNewStringObject(useRegister(ins->input()), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitInitElem(MInitElem* ins)
+{
+ LInitElem* lir = new(alloc()) LInitElem(useRegisterAtStart(ins->getObject()),
+ useBoxAtStart(ins->getId()),
+ useBoxAtStart(ins->getValue()));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitInitElemGetterSetter(MInitElemGetterSetter* ins)
+{
+ LInitElemGetterSetter* lir =
+ new(alloc()) LInitElemGetterSetter(useRegisterAtStart(ins->object()),
+ useBoxAtStart(ins->idValue()),
+ useRegisterAtStart(ins->value()));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitMutateProto(MMutateProto* ins)
+{
+ LMutateProto* lir = new(alloc()) LMutateProto(useRegisterAtStart(ins->getObject()),
+ useBoxAtStart(ins->getValue()));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitInitProp(MInitProp* ins)
+{
+ LInitProp* lir = new(alloc()) LInitProp(useRegisterAtStart(ins->getObject()),
+ useBoxAtStart(ins->getValue()));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitInitPropGetterSetter(MInitPropGetterSetter* ins)
+{
+ LInitPropGetterSetter* lir =
+ new(alloc()) LInitPropGetterSetter(useRegisterAtStart(ins->object()),
+ useRegisterAtStart(ins->value()));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitCreateThisWithTemplate(MCreateThisWithTemplate* ins)
+{
+ LCreateThisWithTemplate* lir = new(alloc()) LCreateThisWithTemplate(temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitCreateThisWithProto(MCreateThisWithProto* ins)
+{
+ LCreateThisWithProto* lir =
+ new(alloc()) LCreateThisWithProto(useRegisterOrConstantAtStart(ins->getCallee()),
+ useRegisterOrConstantAtStart(ins->getNewTarget()),
+ useRegisterOrConstantAtStart(ins->getPrototype()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitCreateThis(MCreateThis* ins)
+{
+ LCreateThis* lir = new(alloc()) LCreateThis(useRegisterOrConstantAtStart(ins->getCallee()),
+ useRegisterOrConstantAtStart(ins->getNewTarget()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitCreateArgumentsObject(MCreateArgumentsObject* ins)
+{
+ LAllocation callObj = useFixedAtStart(ins->getCallObject(), CallTempReg0);
+ LCreateArgumentsObject* lir = new(alloc()) LCreateArgumentsObject(callObj, tempFixed(CallTempReg1),
+ tempFixed(CallTempReg2),
+ tempFixed(CallTempReg3));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitGetArgumentsObjectArg(MGetArgumentsObjectArg* ins)
+{
+ LAllocation argsObj = useRegister(ins->getArgsObject());
+ LGetArgumentsObjectArg* lir = new(alloc()) LGetArgumentsObjectArg(argsObj, temp());
+ defineBox(lir, ins);
+}
+
+void
+LIRGenerator::visitSetArgumentsObjectArg(MSetArgumentsObjectArg* ins)
+{
+ LAllocation argsObj = useRegister(ins->getArgsObject());
+ LSetArgumentsObjectArg* lir =
+ new(alloc()) LSetArgumentsObjectArg(argsObj, useBox(ins->getValue()), temp());
+ add(lir, ins);
+}
+
+void
+LIRGenerator::visitReturnFromCtor(MReturnFromCtor* ins)
+{
+ LReturnFromCtor* lir = new(alloc()) LReturnFromCtor(useBox(ins->getValue()),
+ useRegister(ins->getObject()));
+ define(lir, ins);
+}
+
+void
+LIRGenerator::visitComputeThis(MComputeThis* ins)
+{
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+ MOZ_ASSERT(ins->input()->type() == MIRType::Value);
+
+ // Don't use useBoxAtStart because ComputeThis has a safepoint and needs to
+ // have its inputs in different registers than its return value so that
+ // they aren't clobbered.
+ LComputeThis* lir = new(alloc()) LComputeThis(useBox(ins->input()));
+ defineBox(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitArrowNewTarget(MArrowNewTarget* ins)
+{
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+ MOZ_ASSERT(ins->callee()->type() == MIRType::Object);
+
+ LArrowNewTarget* lir = new(alloc()) LArrowNewTarget(useRegister(ins->callee()));
+ defineBox(lir, ins);
+}
+
+bool
+LIRGenerator::lowerCallArguments(MCall* call)
+{
+ uint32_t argc = call->numStackArgs();
+
+ // Align the arguments of a call such that the callee would keep the same
+ // alignment as the caller.
+ uint32_t baseSlot = 0;
+ if (JitStackValueAlignment > 1)
+ baseSlot = AlignBytes(argc, JitStackValueAlignment);
+ else
+ baseSlot = argc;
+
+ // Save the maximum number of argument, such that we can have one unique
+ // frame size.
+ if (baseSlot > maxargslots_)
+ maxargslots_ = baseSlot;
+
+ for (size_t i = 0; i < argc; i++) {
+ MDefinition* arg = call->getArg(i);
+ uint32_t argslot = baseSlot - i;
+
+ // Values take a slow path.
+ if (arg->type() == MIRType::Value) {
+ LStackArgV* stack = new(alloc()) LStackArgV(argslot, useBox(arg));
+ add(stack);
+ } else {
+ // Known types can move constant types and/or payloads.
+ LStackArgT* stack = new(alloc()) LStackArgT(argslot, arg->type(), useRegisterOrConstant(arg));
+ add(stack);
+ }
+
+ if (!alloc().ensureBallast())
+ return false;
+ }
+ return true;
+}
+
+void
+LIRGenerator::visitCall(MCall* call)
+{
+ MOZ_ASSERT(CallTempReg0 != CallTempReg1);
+ MOZ_ASSERT(CallTempReg0 != ArgumentsRectifierReg);
+ MOZ_ASSERT(CallTempReg1 != ArgumentsRectifierReg);
+ MOZ_ASSERT(call->getFunction()->type() == MIRType::Object);
+
+ // In case of oom, skip the rest of the allocations.
+ if (!lowerCallArguments(call)) {
+ gen->abort("OOM: LIRGenerator::visitCall");
+ return;
+ }
+
+ WrappedFunction* target = call->getSingleTarget();
+
+ LInstruction* lir;
+
+ if (call->isCallDOMNative()) {
+ // Call DOM functions.
+ MOZ_ASSERT(target && target->isNative());
+ Register cxReg, objReg, privReg, argsReg;
+ GetTempRegForIntArg(0, 0, &cxReg);
+ GetTempRegForIntArg(1, 0, &objReg);
+ GetTempRegForIntArg(2, 0, &privReg);
+ mozilla::DebugOnly<bool> ok = GetTempRegForIntArg(3, 0, &argsReg);
+ MOZ_ASSERT(ok, "How can we not have four temp registers?");
+ lir = new(alloc()) LCallDOMNative(tempFixed(cxReg), tempFixed(objReg),
+ tempFixed(privReg), tempFixed(argsReg));
+ } else if (target) {
+ // Call known functions.
+ if (target->isNative()) {
+ Register cxReg, numReg, vpReg, tmpReg;
+ GetTempRegForIntArg(0, 0, &cxReg);
+ GetTempRegForIntArg(1, 0, &numReg);
+ GetTempRegForIntArg(2, 0, &vpReg);
+
+ // Even though this is just a temp reg, use the same API to avoid
+ // register collisions.
+ mozilla::DebugOnly<bool> ok = GetTempRegForIntArg(3, 0, &tmpReg);
+ MOZ_ASSERT(ok, "How can we not have four temp registers?");
+
+ lir = new(alloc()) LCallNative(tempFixed(cxReg), tempFixed(numReg),
+ tempFixed(vpReg), tempFixed(tmpReg));
+ } else {
+ lir = new(alloc()) LCallKnown(useFixedAtStart(call->getFunction(), CallTempReg0),
+ tempFixed(CallTempReg2));
+ }
+ } else {
+ // Call anything, using the most generic code.
+ lir = new(alloc()) LCallGeneric(useFixedAtStart(call->getFunction(), CallTempReg0),
+ tempFixed(ArgumentsRectifierReg),
+ tempFixed(CallTempReg2));
+ }
+ defineReturn(lir, call);
+ assignSafepoint(lir, call);
+}
+
+void
+LIRGenerator::visitApplyArgs(MApplyArgs* apply)
+{
+ MOZ_ASSERT(apply->getFunction()->type() == MIRType::Object);
+
+ // Assert if we cannot build a rectifier frame.
+ MOZ_ASSERT(CallTempReg0 != ArgumentsRectifierReg);
+ MOZ_ASSERT(CallTempReg1 != ArgumentsRectifierReg);
+
+ // Assert if the return value is already erased.
+ MOZ_ASSERT(CallTempReg2 != JSReturnReg_Type);
+ MOZ_ASSERT(CallTempReg2 != JSReturnReg_Data);
+
+ LApplyArgsGeneric* lir = new(alloc()) LApplyArgsGeneric(
+ useFixedAtStart(apply->getFunction(), CallTempReg3),
+ useFixedAtStart(apply->getArgc(), CallTempReg0),
+ useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5),
+ tempFixed(CallTempReg1), // object register
+ tempFixed(CallTempReg2)); // stack counter register
+
+ // Bailout is needed in the case of possible non-JSFunction callee or too
+ // many values in the arguments array. I'm going to use NonJSFunctionCallee
+ // for the code even if that is not an adequate description.
+ assignSnapshot(lir, Bailout_NonJSFunctionCallee);
+
+ defineReturn(lir, apply);
+ assignSafepoint(lir, apply);
+}
+
+void
+LIRGenerator::visitApplyArray(MApplyArray* apply)
+{
+ MOZ_ASSERT(apply->getFunction()->type() == MIRType::Object);
+
+ // Assert if we cannot build a rectifier frame.
+ MOZ_ASSERT(CallTempReg0 != ArgumentsRectifierReg);
+ MOZ_ASSERT(CallTempReg1 != ArgumentsRectifierReg);
+
+ // Assert if the return value is already erased.
+ MOZ_ASSERT(CallTempReg2 != JSReturnReg_Type);
+ MOZ_ASSERT(CallTempReg2 != JSReturnReg_Data);
+
+ LApplyArrayGeneric* lir = new(alloc()) LApplyArrayGeneric(
+ useFixedAtStart(apply->getFunction(), CallTempReg3),
+ useFixedAtStart(apply->getElements(), CallTempReg0),
+ useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5),
+ tempFixed(CallTempReg1), // object register
+ tempFixed(CallTempReg2)); // stack counter register
+
+ // Bailout is needed in the case of possible non-JSFunction callee,
+ // too many values in the array, or empty space at the end of the
+ // array. I'm going to use NonJSFunctionCallee for the code even
+ // if that is not an adequate description.
+ assignSnapshot(lir, Bailout_NonJSFunctionCallee);
+
+ defineReturn(lir, apply);
+ assignSafepoint(lir, apply);
+}
+
+void
+LIRGenerator::visitBail(MBail* bail)
+{
+ LBail* lir = new(alloc()) LBail();
+ assignSnapshot(lir, bail->bailoutKind());
+ add(lir, bail);
+}
+
+void
+LIRGenerator::visitUnreachable(MUnreachable* unreachable)
+{
+ LUnreachable* lir = new(alloc()) LUnreachable();
+ add(lir, unreachable);
+}
+
+void
+LIRGenerator::visitEncodeSnapshot(MEncodeSnapshot* mir)
+{
+ LEncodeSnapshot* lir = new(alloc()) LEncodeSnapshot();
+ assignSnapshot(lir, Bailout_Inevitable);
+ add(lir, mir);
+}
+
+void
+LIRGenerator::visitAssertFloat32(MAssertFloat32* assertion)
+{
+ MIRType type = assertion->input()->type();
+ DebugOnly<bool> checkIsFloat32 = assertion->mustBeFloat32();
+
+ if (type != MIRType::Value && !JitOptions.eagerCompilation) {
+ MOZ_ASSERT_IF(checkIsFloat32, type == MIRType::Float32);
+ MOZ_ASSERT_IF(!checkIsFloat32, type != MIRType::Float32);
+ }
+}
+
+void
+LIRGenerator::visitAssertRecoveredOnBailout(MAssertRecoveredOnBailout* assertion)
+{
+ MOZ_CRASH("AssertRecoveredOnBailout nodes are always recovered on bailouts.");
+}
+
+void
+LIRGenerator::visitArraySplice(MArraySplice* ins)
+{
+ LArraySplice* lir = new(alloc()) LArraySplice(useRegisterAtStart(ins->object()),
+ useRegisterAtStart(ins->start()),
+ useRegisterAtStart(ins->deleteCount()));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitGetDynamicName(MGetDynamicName* ins)
+{
+ MDefinition* envChain = ins->getEnvironmentChain();
+ MOZ_ASSERT(envChain->type() == MIRType::Object);
+
+ MDefinition* name = ins->getName();
+ MOZ_ASSERT(name->type() == MIRType::String);
+
+ LGetDynamicName* lir = new(alloc()) LGetDynamicName(useFixedAtStart(envChain, CallTempReg0),
+ useFixedAtStart(name, CallTempReg1),
+ tempFixed(CallTempReg2),
+ tempFixed(CallTempReg3),
+ tempFixed(CallTempReg4));
+
+ assignSnapshot(lir, Bailout_DynamicNameNotFound);
+ defineReturn(lir, ins);
+}
+
+void
+LIRGenerator::visitCallDirectEval(MCallDirectEval* ins)
+{
+ MDefinition* envChain = ins->getEnvironmentChain();
+ MOZ_ASSERT(envChain->type() == MIRType::Object);
+
+ MDefinition* string = ins->getString();
+ MOZ_ASSERT(string->type() == MIRType::String);
+
+ MDefinition* newTargetValue = ins->getNewTargetValue();
+
+ LInstruction* lir = new(alloc()) LCallDirectEval(useRegisterAtStart(envChain),
+ useRegisterAtStart(string),
+ useBoxAtStart(newTargetValue));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+static JSOp
+ReorderComparison(JSOp op, MDefinition** lhsp, MDefinition** rhsp)
+{
+ MDefinition* lhs = *lhsp;
+ MDefinition* rhs = *rhsp;
+
+ if (lhs->maybeConstantValue()) {
+ *rhsp = lhs;
+ *lhsp = rhs;
+ return ReverseCompareOp(op);
+ }
+ return op;
+}
+
+void
+LIRGenerator::visitTest(MTest* test)
+{
+ MDefinition* opd = test->getOperand(0);
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ // String is converted to length of string in the type analysis phase (see
+ // TestPolicy).
+ MOZ_ASSERT(opd->type() != MIRType::String);
+
+ // Testing a constant.
+ if (MConstant* constant = opd->maybeConstantValue()) {
+ bool b;
+ if (constant->valueToBoolean(&b)) {
+ add(new(alloc()) LGoto(b ? ifTrue : ifFalse));
+ return;
+ }
+ }
+
+ if (opd->type() == MIRType::Value) {
+ LDefinition temp0, temp1;
+ if (test->operandMightEmulateUndefined()) {
+ temp0 = temp();
+ temp1 = temp();
+ } else {
+ temp0 = LDefinition::BogusTemp();
+ temp1 = LDefinition::BogusTemp();
+ }
+ LTestVAndBranch* lir =
+ new(alloc()) LTestVAndBranch(ifTrue, ifFalse, useBox(opd), tempDouble(), temp0, temp1);
+ add(lir, test);
+ return;
+ }
+
+ if (opd->type() == MIRType::ObjectOrNull) {
+ LDefinition temp0 = test->operandMightEmulateUndefined() ? temp() : LDefinition::BogusTemp();
+ add(new(alloc()) LTestOAndBranch(useRegister(opd), ifTrue, ifFalse, temp0), test);
+ return;
+ }
+
+ // Objects are truthy, except if it might emulate undefined.
+ if (opd->type() == MIRType::Object) {
+ if (test->operandMightEmulateUndefined())
+ add(new(alloc()) LTestOAndBranch(useRegister(opd), ifTrue, ifFalse, temp()), test);
+ else
+ add(new(alloc()) LGoto(ifTrue));
+ return;
+ }
+
+ // These must be explicitly sniffed out since they are constants and have
+ // no payload.
+ if (opd->type() == MIRType::Undefined || opd->type() == MIRType::Null) {
+ add(new(alloc()) LGoto(ifFalse));
+ return;
+ }
+
+ // All symbols are truthy.
+ if (opd->type() == MIRType::Symbol) {
+ add(new(alloc()) LGoto(ifTrue));
+ return;
+ }
+
+ // Check if the operand for this test is a compare operation. If it is, we want
+ // to emit an LCompare*AndBranch rather than an LTest*AndBranch, to fuse the
+ // compare and jump instructions.
+ if (opd->isCompare() && opd->isEmittedAtUses()) {
+ MCompare* comp = opd->toCompare();
+ MDefinition* left = comp->lhs();
+ MDefinition* right = comp->rhs();
+
+ // Try to fold the comparison so that we don't have to handle all cases.
+ bool result;
+ if (comp->tryFold(&result)) {
+ add(new(alloc()) LGoto(result ? ifTrue : ifFalse));
+ return;
+ }
+
+ // Emit LCompare*AndBranch.
+
+ // Compare and branch null/undefined.
+ // The second operand has known null/undefined type,
+ // so just test the first operand.
+ if (comp->compareType() == MCompare::Compare_Null ||
+ comp->compareType() == MCompare::Compare_Undefined)
+ {
+ if (left->type() == MIRType::Object || left->type() == MIRType::ObjectOrNull) {
+ MOZ_ASSERT(left->type() == MIRType::ObjectOrNull ||
+ comp->operandMightEmulateUndefined(),
+ "MCompare::tryFold should handle the never-emulates-undefined case");
+
+ LDefinition tmp =
+ comp->operandMightEmulateUndefined() ? temp() : LDefinition::BogusTemp();
+ LIsNullOrLikeUndefinedAndBranchT* lir =
+ new(alloc()) LIsNullOrLikeUndefinedAndBranchT(comp, useRegister(left),
+ ifTrue, ifFalse, tmp);
+ add(lir, test);
+ return;
+ }
+
+ LDefinition tmp, tmpToUnbox;
+ if (comp->operandMightEmulateUndefined()) {
+ tmp = temp();
+ tmpToUnbox = tempToUnbox();
+ } else {
+ tmp = LDefinition::BogusTemp();
+ tmpToUnbox = LDefinition::BogusTemp();
+ }
+
+ LIsNullOrLikeUndefinedAndBranchV* lir =
+ new(alloc()) LIsNullOrLikeUndefinedAndBranchV(comp, ifTrue, ifFalse, useBox(left),
+ tmp, tmpToUnbox);
+ add(lir, test);
+ return;
+ }
+
+ // Compare and branch booleans.
+ if (comp->compareType() == MCompare::Compare_Boolean) {
+ MOZ_ASSERT(left->type() == MIRType::Value);
+ MOZ_ASSERT(right->type() == MIRType::Boolean);
+
+ LCompareBAndBranch* lir = new(alloc()) LCompareBAndBranch(comp, useBox(left),
+ useRegisterOrConstant(right),
+ ifTrue, ifFalse);
+ add(lir, test);
+ return;
+ }
+
+ // Compare and branch Int32 or Object pointers.
+ if (comp->isInt32Comparison() ||
+ comp->compareType() == MCompare::Compare_UInt32 ||
+ comp->compareType() == MCompare::Compare_Object)
+ {
+ JSOp op = ReorderComparison(comp->jsop(), &left, &right);
+ LAllocation lhs = useRegister(left);
+ LAllocation rhs;
+ if (comp->isInt32Comparison() || comp->compareType() == MCompare::Compare_UInt32)
+ rhs = useAnyOrConstant(right);
+ else
+ rhs = useRegister(right);
+ LCompareAndBranch* lir = new(alloc()) LCompareAndBranch(comp, op, lhs, rhs,
+ ifTrue, ifFalse);
+ add(lir, test);
+ return;
+ }
+
+ // Compare and branch Int64.
+ if (comp->compareType() == MCompare::Compare_Int64 ||
+ comp->compareType() == MCompare::Compare_UInt64)
+ {
+ JSOp op = ReorderComparison(comp->jsop(), &left, &right);
+ LCompareI64AndBranch* lir = new(alloc()) LCompareI64AndBranch(comp, op,
+ useInt64Register(left),
+ useInt64OrConstant(right),
+ ifTrue, ifFalse);
+ add(lir, test);
+ return;
+ }
+
+ // Compare and branch doubles.
+ if (comp->isDoubleComparison()) {
+ LAllocation lhs = useRegister(left);
+ LAllocation rhs = useRegister(right);
+ LCompareDAndBranch* lir = new(alloc()) LCompareDAndBranch(comp, lhs, rhs,
+ ifTrue, ifFalse);
+ add(lir, test);
+ return;
+ }
+
+ // Compare and branch floats.
+ if (comp->isFloat32Comparison()) {
+ LAllocation lhs = useRegister(left);
+ LAllocation rhs = useRegister(right);
+ LCompareFAndBranch* lir = new(alloc()) LCompareFAndBranch(comp, lhs, rhs,
+ ifTrue, ifFalse);
+ add(lir, test);
+ return;
+ }
+
+ // Compare values.
+ if (comp->compareType() == MCompare::Compare_Bitwise) {
+ LCompareBitwiseAndBranch* lir =
+ new(alloc()) LCompareBitwiseAndBranch(comp, ifTrue, ifFalse,
+ useBoxAtStart(left),
+ useBoxAtStart(right));
+ add(lir, test);
+ return;
+ }
+ }
+
+ // Check if the operand for this test is a bitand operation. If it is, we want
+ // to emit an LBitAndAndBranch rather than an LTest*AndBranch.
+ if (opd->isBitAnd() && opd->isEmittedAtUses()) {
+ MDefinition* lhs = opd->getOperand(0);
+ MDefinition* rhs = opd->getOperand(1);
+ if (lhs->type() == MIRType::Int32 && rhs->type() == MIRType::Int32) {
+ ReorderCommutative(&lhs, &rhs, test);
+ lowerForBitAndAndBranch(new(alloc()) LBitAndAndBranch(ifTrue, ifFalse), test, lhs, rhs);
+ return;
+ }
+ }
+
+ if (opd->isIsObject() && opd->isEmittedAtUses()) {
+ MDefinition* input = opd->toIsObject()->input();
+ MOZ_ASSERT(input->type() == MIRType::Value);
+
+ LIsObjectAndBranch* lir = new(alloc()) LIsObjectAndBranch(ifTrue, ifFalse,
+ useBoxAtStart(input));
+ add(lir, test);
+ return;
+ }
+
+ if (opd->isIsNoIter()) {
+ MOZ_ASSERT(opd->isEmittedAtUses());
+
+ MDefinition* input = opd->toIsNoIter()->input();
+ MOZ_ASSERT(input->type() == MIRType::Value);
+
+ LIsNoIterAndBranch* lir = new(alloc()) LIsNoIterAndBranch(ifTrue, ifFalse,
+ useBox(input));
+ add(lir, test);
+ return;
+ }
+
+ switch (opd->type()) {
+ case MIRType::Double:
+ add(new(alloc()) LTestDAndBranch(useRegister(opd), ifTrue, ifFalse));
+ break;
+ case MIRType::Float32:
+ add(new(alloc()) LTestFAndBranch(useRegister(opd), ifTrue, ifFalse));
+ break;
+ case MIRType::Int32:
+ case MIRType::Boolean:
+ add(new(alloc()) LTestIAndBranch(useRegister(opd), ifTrue, ifFalse));
+ break;
+ case MIRType::Int64:
+ add(new(alloc()) LTestI64AndBranch(useInt64Register(opd), ifTrue, ifFalse));
+ break;
+ default:
+ MOZ_CRASH("Bad type");
+ }
+}
+
+void
+LIRGenerator::visitGotoWithFake(MGotoWithFake* gotoWithFake)
+{
+ add(new(alloc()) LGoto(gotoWithFake->target()));
+}
+
+void
+LIRGenerator::visitFunctionDispatch(MFunctionDispatch* ins)
+{
+ LFunctionDispatch* lir = new(alloc()) LFunctionDispatch(useRegister(ins->input()));
+ add(lir, ins);
+}
+
+void
+LIRGenerator::visitObjectGroupDispatch(MObjectGroupDispatch* ins)
+{
+ LObjectGroupDispatch* lir = new(alloc()) LObjectGroupDispatch(useRegister(ins->input()), temp());
+ add(lir, ins);
+}
+
+static inline bool
+CanEmitCompareAtUses(MInstruction* ins)
+{
+ if (!ins->canEmitAtUses())
+ return false;
+
+ bool foundTest = false;
+ for (MUseIterator iter(ins->usesBegin()); iter != ins->usesEnd(); iter++) {
+ MNode* node = iter->consumer();
+ if (!node->isDefinition())
+ return false;
+ if (!node->toDefinition()->isTest())
+ return false;
+ if (foundTest)
+ return false;
+ foundTest = true;
+ }
+ return true;
+}
+
+void
+LIRGenerator::visitCompare(MCompare* comp)
+{
+ MDefinition* left = comp->lhs();
+ MDefinition* right = comp->rhs();
+
+ // Try to fold the comparison so that we don't have to handle all cases.
+ bool result;
+ if (comp->tryFold(&result)) {
+ define(new(alloc()) LInteger(result), comp);
+ return;
+ }
+
+ // Move below the emitAtUses call if we ever implement
+ // LCompareSAndBranch. Doing this now wouldn't be wrong, but doesn't
+ // make sense and avoids confusion.
+ if (comp->compareType() == MCompare::Compare_String) {
+ LCompareS* lir = new(alloc()) LCompareS(useRegister(left), useRegister(right));
+ define(lir, comp);
+ assignSafepoint(lir, comp);
+ return;
+ }
+
+ // Strict compare between value and string
+ if (comp->compareType() == MCompare::Compare_StrictString) {
+ MOZ_ASSERT(left->type() == MIRType::Value);
+ MOZ_ASSERT(right->type() == MIRType::String);
+
+ LCompareStrictS* lir = new(alloc()) LCompareStrictS(useBox(left), useRegister(right),
+ tempToUnbox());
+ define(lir, comp);
+ assignSafepoint(lir, comp);
+ return;
+ }
+
+ // Unknown/unspecialized compare use a VM call.
+ if (comp->compareType() == MCompare::Compare_Unknown) {
+ LCompareVM* lir = new(alloc()) LCompareVM(useBoxAtStart(left), useBoxAtStart(right));
+ defineReturn(lir, comp);
+ assignSafepoint(lir, comp);
+ return;
+ }
+
+ // Sniff out if the output of this compare is used only for a branching.
+ // If it is, then we will emit an LCompare*AndBranch instruction in place
+ // of this compare and any test that uses this compare. Thus, we can
+ // ignore this Compare.
+ if (CanEmitCompareAtUses(comp)) {
+ emitAtUses(comp);
+ return;
+ }
+
+ // Compare Null and Undefined.
+ if (comp->compareType() == MCompare::Compare_Null ||
+ comp->compareType() == MCompare::Compare_Undefined)
+ {
+ if (left->type() == MIRType::Object || left->type() == MIRType::ObjectOrNull) {
+ MOZ_ASSERT(left->type() == MIRType::ObjectOrNull ||
+ comp->operandMightEmulateUndefined(),
+ "MCompare::tryFold should have folded this away");
+
+ define(new(alloc()) LIsNullOrLikeUndefinedT(useRegister(left)), comp);
+ return;
+ }
+
+ LDefinition tmp, tmpToUnbox;
+ if (comp->operandMightEmulateUndefined()) {
+ tmp = temp();
+ tmpToUnbox = tempToUnbox();
+ } else {
+ tmp = LDefinition::BogusTemp();
+ tmpToUnbox = LDefinition::BogusTemp();
+ }
+
+ LIsNullOrLikeUndefinedV* lir = new(alloc()) LIsNullOrLikeUndefinedV(useBox(left),
+ tmp, tmpToUnbox);
+ define(lir, comp);
+ return;
+ }
+
+ // Compare booleans.
+ if (comp->compareType() == MCompare::Compare_Boolean) {
+ MOZ_ASSERT(left->type() == MIRType::Value);
+ MOZ_ASSERT(right->type() == MIRType::Boolean);
+
+ LCompareB* lir = new(alloc()) LCompareB(useBox(left), useRegisterOrConstant(right));
+ define(lir, comp);
+ return;
+ }
+
+ // Compare Int32 or Object pointers.
+ if (comp->isInt32Comparison() ||
+ comp->compareType() == MCompare::Compare_UInt32 ||
+ comp->compareType() == MCompare::Compare_Object)
+ {
+ JSOp op = ReorderComparison(comp->jsop(), &left, &right);
+ LAllocation lhs = useRegister(left);
+ LAllocation rhs;
+ if (comp->isInt32Comparison() ||
+ comp->compareType() == MCompare::Compare_UInt32)
+ {
+ rhs = useAnyOrConstant(right);
+ } else {
+ rhs = useRegister(right);
+ }
+ define(new(alloc()) LCompare(op, lhs, rhs), comp);
+ return;
+ }
+
+ // Compare Int64.
+ if (comp->compareType() == MCompare::Compare_Int64 ||
+ comp->compareType() == MCompare::Compare_UInt64)
+ {
+ JSOp op = ReorderComparison(comp->jsop(), &left, &right);
+ define(new(alloc()) LCompareI64(op, useInt64Register(left), useInt64OrConstant(right)),
+ comp);
+ return;
+ }
+
+ // Compare doubles.
+ if (comp->isDoubleComparison()) {
+ define(new(alloc()) LCompareD(useRegister(left), useRegister(right)), comp);
+ return;
+ }
+
+ // Compare float32.
+ if (comp->isFloat32Comparison()) {
+ define(new(alloc()) LCompareF(useRegister(left), useRegister(right)), comp);
+ return;
+ }
+
+ // Compare values.
+ if (comp->compareType() == MCompare::Compare_Bitwise) {
+ LCompareBitwise* lir = new(alloc()) LCompareBitwise(useBoxAtStart(left),
+ useBoxAtStart(right));
+ define(lir, comp);
+ return;
+ }
+
+ MOZ_CRASH("Unrecognized compare type.");
+}
+
+void
+LIRGenerator::lowerBitOp(JSOp op, MInstruction* ins)
+{
+ MDefinition* lhs = ins->getOperand(0);
+ MDefinition* rhs = ins->getOperand(1);
+
+ if (lhs->type() == MIRType::Int32) {
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+ ReorderCommutative(&lhs, &rhs, ins);
+ lowerForALU(new(alloc()) LBitOpI(op), ins, lhs, rhs);
+ return;
+ }
+
+ if (lhs->type() == MIRType::Int64) {
+ MOZ_ASSERT(rhs->type() == MIRType::Int64);
+ ReorderCommutative(&lhs, &rhs, ins);
+ lowerForALUInt64(new(alloc()) LBitOpI64(op), ins, lhs, rhs);
+ return;
+ }
+
+ LBitOpV* lir = new(alloc()) LBitOpV(op, useBoxAtStart(lhs), useBoxAtStart(rhs));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitTypeOf(MTypeOf* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LTypeOfV* lir = new(alloc()) LTypeOfV(useBox(opd), tempToUnbox());
+ define(lir, ins);
+}
+
+void
+LIRGenerator::visitToAsync(MToAsync* ins)
+{
+ LToAsync* lir = new(alloc()) LToAsync(useRegisterAtStart(ins->input()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitToId(MToId* ins)
+{
+ LToIdV* lir = new(alloc()) LToIdV(useBox(ins->input()), tempDouble());
+ defineBox(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitBitNot(MBitNot* ins)
+{
+ MDefinition* input = ins->getOperand(0);
+
+ if (input->type() == MIRType::Int32) {
+ lowerForALU(new(alloc()) LBitNotI(), ins, input);
+ return;
+ }
+
+ LBitNotV* lir = new(alloc()) LBitNotV(useBoxAtStart(input));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+static bool
+CanEmitBitAndAtUses(MInstruction* ins)
+{
+ if (!ins->canEmitAtUses())
+ return false;
+
+ if (ins->getOperand(0)->type() != MIRType::Int32 || ins->getOperand(1)->type() != MIRType::Int32)
+ return false;
+
+ MUseIterator iter(ins->usesBegin());
+ if (iter == ins->usesEnd())
+ return false;
+
+ MNode* node = iter->consumer();
+ if (!node->isDefinition())
+ return false;
+
+ if (!node->toDefinition()->isTest())
+ return false;
+
+ iter++;
+ return iter == ins->usesEnd();
+}
+
+void
+LIRGenerator::visitBitAnd(MBitAnd* ins)
+{
+ // Sniff out if the output of this bitand is used only for a branching.
+ // If it is, then we will emit an LBitAndAndBranch instruction in place
+ // of this bitand and any test that uses this bitand. Thus, we can
+ // ignore this BitAnd.
+ if (CanEmitBitAndAtUses(ins))
+ emitAtUses(ins);
+ else
+ lowerBitOp(JSOP_BITAND, ins);
+}
+
+void
+LIRGenerator::visitBitOr(MBitOr* ins)
+{
+ lowerBitOp(JSOP_BITOR, ins);
+}
+
+void
+LIRGenerator::visitBitXor(MBitXor* ins)
+{
+ lowerBitOp(JSOP_BITXOR, ins);
+}
+
+void
+LIRGenerator::lowerShiftOp(JSOp op, MShiftInstruction* ins)
+{
+ MDefinition* lhs = ins->getOperand(0);
+ MDefinition* rhs = ins->getOperand(1);
+
+ if (lhs->type() == MIRType::Int32) {
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+
+ if (ins->type() == MIRType::Double) {
+ MOZ_ASSERT(op == JSOP_URSH);
+ lowerUrshD(ins->toUrsh());
+ return;
+ }
+
+ LShiftI* lir = new(alloc()) LShiftI(op);
+ if (op == JSOP_URSH) {
+ if (ins->toUrsh()->fallible())
+ assignSnapshot(lir, Bailout_OverflowInvalidate);
+ }
+ lowerForShift(lir, ins, lhs, rhs);
+ return;
+ }
+
+ if (lhs->type() == MIRType::Int64) {
+ MOZ_ASSERT(rhs->type() == MIRType::Int64);
+ lowerForShiftInt64(new(alloc()) LShiftI64(op), ins, lhs, rhs);
+ return;
+ }
+
+ MOZ_ASSERT(ins->specialization() == MIRType::None);
+
+ if (op == JSOP_URSH) {
+ // Result is either int32 or double so we have to use BinaryV.
+ lowerBinaryV(JSOP_URSH, ins);
+ return;
+ }
+
+ LBitOpV* lir = new(alloc()) LBitOpV(op, useBoxAtStart(lhs), useBoxAtStart(rhs));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitLsh(MLsh* ins)
+{
+ lowerShiftOp(JSOP_LSH, ins);
+}
+
+void
+LIRGenerator::visitRsh(MRsh* ins)
+{
+ lowerShiftOp(JSOP_RSH, ins);
+}
+
+void
+LIRGenerator::visitUrsh(MUrsh* ins)
+{
+ lowerShiftOp(JSOP_URSH, ins);
+}
+
+void
+LIRGenerator::visitSignExtend(MSignExtend* ins)
+{
+ LInstructionHelper<1, 1, 0>* lir;
+
+ if (ins->mode() == MSignExtend::Byte)
+ lir = new(alloc()) LSignExtend(useByteOpRegisterAtStart(ins->input()), ins->mode());
+ else
+ lir = new(alloc()) LSignExtend(useRegisterAtStart(ins->input()), ins->mode());
+
+ define(lir, ins);
+}
+
+void
+LIRGenerator::visitRotate(MRotate* ins)
+{
+ MDefinition* input = ins->input();
+ MDefinition* count = ins->count();
+
+ if (ins->type() == MIRType::Int32) {
+ auto* lir = new(alloc()) LRotate();
+ lowerForShift(lir, ins, input, count);
+ } else if (ins->type() == MIRType::Int64) {
+ auto* lir = new(alloc()) LRotateI64();
+ lowerForShiftInt64(lir, ins, input, count);
+ } else {
+ MOZ_CRASH("unexpected type in visitRotate");
+ }
+}
+
+void
+LIRGenerator::visitFloor(MFloor* ins)
+{
+ MIRType type = ins->input()->type();
+ MOZ_ASSERT(IsFloatingPointType(type));
+
+ LInstructionHelper<1, 1, 0>* lir;
+ if (type == MIRType::Double)
+ lir = new(alloc()) LFloor(useRegister(ins->input()));
+ else
+ lir = new(alloc()) LFloorF(useRegister(ins->input()));
+
+ assignSnapshot(lir, Bailout_Round);
+ define(lir, ins);
+}
+
+void
+LIRGenerator::visitCeil(MCeil* ins)
+{
+ MIRType type = ins->input()->type();
+ MOZ_ASSERT(IsFloatingPointType(type));
+
+ LInstructionHelper<1, 1, 0>* lir;
+ if (type == MIRType::Double)
+ lir = new(alloc()) LCeil(useRegister(ins->input()));
+ else
+ lir = new(alloc()) LCeilF(useRegister(ins->input()));
+
+ assignSnapshot(lir, Bailout_Round);
+ define(lir, ins);
+}
+
+void
+LIRGenerator::visitRound(MRound* ins)
+{
+ MIRType type = ins->input()->type();
+ MOZ_ASSERT(IsFloatingPointType(type));
+
+ LInstructionHelper<1, 1, 1>* lir;
+ if (type == MIRType::Double)
+ lir = new (alloc()) LRound(useRegister(ins->input()), tempDouble());
+ else
+ lir = new (alloc()) LRoundF(useRegister(ins->input()), tempFloat32());
+
+ assignSnapshot(lir, Bailout_Round);
+ define(lir, ins);
+}
+
+void
+LIRGenerator::visitMinMax(MMinMax* ins)
+{
+ MDefinition* first = ins->getOperand(0);
+ MDefinition* second = ins->getOperand(1);
+
+ ReorderCommutative(&first, &second, ins);
+
+ LMinMaxBase* lir;
+ switch (ins->specialization()) {
+ case MIRType::Int32:
+ lir = new(alloc()) LMinMaxI(useRegisterAtStart(first), useRegisterOrConstant(second));
+ break;
+ case MIRType::Float32:
+ lir = new(alloc()) LMinMaxF(useRegisterAtStart(first), useRegister(second));
+ break;
+ case MIRType::Double:
+ lir = new(alloc()) LMinMaxD(useRegisterAtStart(first), useRegister(second));
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ defineReuseInput(lir, ins, 0);
+}
+
+void
+LIRGenerator::visitAbs(MAbs* ins)
+{
+ MDefinition* num = ins->input();
+ MOZ_ASSERT(IsNumberType(num->type()));
+
+ LInstructionHelper<1, 1, 0>* lir;
+ switch (num->type()) {
+ case MIRType::Int32:
+ lir = new(alloc()) LAbsI(useRegisterAtStart(num));
+ // needed to handle abs(INT32_MIN)
+ if (ins->fallible())
+ assignSnapshot(lir, Bailout_Overflow);
+ break;
+ case MIRType::Float32:
+ lir = new(alloc()) LAbsF(useRegisterAtStart(num));
+ break;
+ case MIRType::Double:
+ lir = new(alloc()) LAbsD(useRegisterAtStart(num));
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ defineReuseInput(lir, ins, 0);
+}
+
+void
+LIRGenerator::visitClz(MClz* ins)
+{
+ MDefinition* num = ins->num();
+
+ MOZ_ASSERT(IsIntType(ins->type()));
+
+ if (ins->type() == MIRType::Int32) {
+ LClzI* lir = new(alloc()) LClzI(useRegisterAtStart(num));
+ define(lir, ins);
+ return;
+ }
+
+ auto* lir = new(alloc()) LClzI64(useInt64RegisterAtStart(num));
+ defineInt64(lir, ins);
+}
+
+void
+LIRGenerator::visitCtz(MCtz* ins)
+{
+ MDefinition* num = ins->num();
+
+ MOZ_ASSERT(IsIntType(ins->type()));
+
+ if (ins->type() == MIRType::Int32) {
+ LCtzI* lir = new(alloc()) LCtzI(useRegisterAtStart(num));
+ define(lir, ins);
+ return;
+ }
+
+ auto* lir = new(alloc()) LCtzI64(useInt64RegisterAtStart(num));
+ defineInt64(lir, ins);
+}
+
+void
+LIRGenerator::visitPopcnt(MPopcnt* ins)
+{
+ MDefinition* num = ins->num();
+
+ MOZ_ASSERT(IsIntType(ins->type()));
+
+ if (ins->type() == MIRType::Int32) {
+ LPopcntI* lir = new(alloc()) LPopcntI(useRegisterAtStart(num), temp());
+ define(lir, ins);
+ return;
+ }
+
+ auto* lir = new(alloc()) LPopcntI64(useInt64RegisterAtStart(num), temp());
+ defineInt64(lir, ins);
+}
+
+void
+LIRGenerator::visitSqrt(MSqrt* ins)
+{
+ MDefinition* num = ins->input();
+ MOZ_ASSERT(IsFloatingPointType(num->type()));
+
+ LInstructionHelper<1, 1, 0>* lir;
+ if (num->type() == MIRType::Double)
+ lir = new(alloc()) LSqrtD(useRegisterAtStart(num));
+ else
+ lir = new(alloc()) LSqrtF(useRegisterAtStart(num));
+ define(lir, ins);
+}
+
+void
+LIRGenerator::visitAtan2(MAtan2* ins)
+{
+ MDefinition* y = ins->y();
+ MOZ_ASSERT(y->type() == MIRType::Double);
+
+ MDefinition* x = ins->x();
+ MOZ_ASSERT(x->type() == MIRType::Double);
+
+ LAtan2D* lir = new(alloc()) LAtan2D(useRegisterAtStart(y), useRegisterAtStart(x),
+ tempFixed(CallTempReg0));
+ defineReturn(lir, ins);
+}
+
+void
+LIRGenerator::visitHypot(MHypot* ins)
+{
+ LHypot* lir = nullptr;
+ uint32_t length = ins->numOperands();
+ for (uint32_t i = 0; i < length; ++i)
+ MOZ_ASSERT(ins->getOperand(i)->type() == MIRType::Double);
+
+ switch(length) {
+ case 2:
+ lir = new(alloc()) LHypot(useRegisterAtStart(ins->getOperand(0)),
+ useRegisterAtStart(ins->getOperand(1)),
+ tempFixed(CallTempReg0));
+ break;
+ case 3:
+ lir = new(alloc()) LHypot(useRegisterAtStart(ins->getOperand(0)),
+ useRegisterAtStart(ins->getOperand(1)),
+ useRegisterAtStart(ins->getOperand(2)),
+ tempFixed(CallTempReg0));
+ break;
+ case 4:
+ lir = new(alloc()) LHypot(useRegisterAtStart(ins->getOperand(0)),
+ useRegisterAtStart(ins->getOperand(1)),
+ useRegisterAtStart(ins->getOperand(2)),
+ useRegisterAtStart(ins->getOperand(3)),
+ tempFixed(CallTempReg0));
+ break;
+ default:
+ MOZ_CRASH("Unexpected number of arguments to LHypot.");
+ }
+
+ defineReturn(lir, ins);
+}
+
+void
+LIRGenerator::visitPow(MPow* ins)
+{
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Double);
+
+ MDefinition* power = ins->power();
+ MOZ_ASSERT(power->type() == MIRType::Int32 || power->type() == MIRType::Double);
+
+ LInstruction* lir;
+ if (power->type() == MIRType::Int32) {
+ // Note: useRegisterAtStart here is safe, the temp is a GP register so
+ // it will never get the same register.
+ lir = new(alloc()) LPowI(useRegisterAtStart(input), useFixedAtStart(power, CallTempReg1),
+ tempFixed(CallTempReg0));
+ } else {
+ lir = new(alloc()) LPowD(useRegisterAtStart(input), useRegisterAtStart(power),
+ tempFixed(CallTempReg0));
+ }
+ defineReturn(lir, ins);
+}
+
+void
+LIRGenerator::visitMathFunction(MMathFunction* ins)
+{
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+ MOZ_ASSERT_IF(ins->input()->type() != MIRType::SinCosDouble,
+ ins->type() == ins->input()->type());
+
+ if (ins->input()->type() == MIRType::SinCosDouble) {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ redefine(ins, ins->input(), ins->function());
+ return;
+ }
+
+ LInstruction* lir;
+ if (ins->type() == MIRType::Double) {
+ // Note: useRegisterAtStart is safe here, the temp is not a FP register.
+ lir = new(alloc()) LMathFunctionD(useRegisterAtStart(ins->input()),
+ tempFixed(CallTempReg0));
+ } else {
+ lir = new(alloc()) LMathFunctionF(useRegisterAtStart(ins->input()),
+ tempFixed(CallTempReg0));
+ }
+ defineReturn(lir, ins);
+}
+
+// Try to mark an add or sub instruction as able to recover its input when
+// bailing out.
+template <typename S, typename T>
+static void
+MaybeSetRecoversInput(S* mir, T* lir)
+{
+ MOZ_ASSERT(lir->mirRaw() == mir);
+ if (!mir->fallible() || !lir->snapshot())
+ return;
+
+ if (lir->output()->policy() != LDefinition::MUST_REUSE_INPUT)
+ return;
+
+ // The original operands to an add or sub can't be recovered if they both
+ // use the same register.
+ if (lir->lhs()->isUse() && lir->rhs()->isUse() &&
+ lir->lhs()->toUse()->virtualRegister() == lir->rhs()->toUse()->virtualRegister())
+ {
+ return;
+ }
+
+ // Add instructions that are on two different values can recover
+ // the input they clobbered via MUST_REUSE_INPUT. Thus, a copy
+ // of that input does not need to be kept alive in the snapshot
+ // for the instruction.
+
+ lir->setRecoversInput();
+
+ const LUse* input = lir->getOperand(lir->output()->getReusedInput())->toUse();
+ lir->snapshot()->rewriteRecoveredInput(*input);
+}
+
+void
+LIRGenerator::visitAdd(MAdd* ins)
+{
+ MDefinition* lhs = ins->getOperand(0);
+ MDefinition* rhs = ins->getOperand(1);
+
+ MOZ_ASSERT(lhs->type() == rhs->type());
+
+ if (ins->specialization() == MIRType::Int32) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ ReorderCommutative(&lhs, &rhs, ins);
+ LAddI* lir = new(alloc()) LAddI;
+
+ if (ins->fallible())
+ assignSnapshot(lir, Bailout_OverflowInvalidate);
+
+ lowerForALU(lir, ins, lhs, rhs);
+ MaybeSetRecoversInput(ins, lir);
+ return;
+ }
+
+ if (ins->specialization() == MIRType::Int64) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int64);
+ ReorderCommutative(&lhs, &rhs, ins);
+ LAddI64* lir = new(alloc()) LAddI64;
+ lowerForALUInt64(lir, ins, lhs, rhs);
+ return;
+ }
+
+ if (ins->specialization() == MIRType::Double) {
+ MOZ_ASSERT(lhs->type() == MIRType::Double);
+ ReorderCommutative(&lhs, &rhs, ins);
+ lowerForFPU(new(alloc()) LMathD(JSOP_ADD), ins, lhs, rhs);
+ return;
+ }
+
+ if (ins->specialization() == MIRType::Float32) {
+ MOZ_ASSERT(lhs->type() == MIRType::Float32);
+ ReorderCommutative(&lhs, &rhs, ins);
+ lowerForFPU(new(alloc()) LMathF(JSOP_ADD), ins, lhs, rhs);
+ return;
+ }
+
+ lowerBinaryV(JSOP_ADD, ins);
+}
+
+void
+LIRGenerator::visitSub(MSub* ins)
+{
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(lhs->type() == rhs->type());
+
+ if (ins->specialization() == MIRType::Int32) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+
+ LSubI* lir = new(alloc()) LSubI;
+ if (ins->fallible())
+ assignSnapshot(lir, Bailout_Overflow);
+
+ lowerForALU(lir, ins, lhs, rhs);
+ MaybeSetRecoversInput(ins, lir);
+ return;
+ }
+
+ if (ins->specialization() == MIRType::Int64) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int64);
+ LSubI64* lir = new(alloc()) LSubI64;
+ lowerForALUInt64(lir, ins, lhs, rhs);
+ return;
+ }
+
+ if (ins->specialization() == MIRType::Double) {
+ MOZ_ASSERT(lhs->type() == MIRType::Double);
+ lowerForFPU(new(alloc()) LMathD(JSOP_SUB), ins, lhs, rhs);
+ return;
+ }
+
+ if (ins->specialization() == MIRType::Float32) {
+ MOZ_ASSERT(lhs->type() == MIRType::Float32);
+ lowerForFPU(new(alloc()) LMathF(JSOP_SUB), ins, lhs, rhs);
+ return;
+ }
+
+ lowerBinaryV(JSOP_SUB, ins);
+}
+
+void
+LIRGenerator::visitMul(MMul* ins)
+{
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+ MOZ_ASSERT(lhs->type() == rhs->type());
+
+ if (ins->specialization() == MIRType::Int32) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ ReorderCommutative(&lhs, &rhs, ins);
+
+ // If our RHS is a constant -1 and we don't have to worry about
+ // overflow, we can optimize to an LNegI.
+ if (!ins->fallible() && rhs->isConstant() && rhs->toConstant()->toInt32() == -1)
+ defineReuseInput(new(alloc()) LNegI(useRegisterAtStart(lhs)), ins, 0);
+ else
+ lowerMulI(ins, lhs, rhs);
+ return;
+ }
+
+ if (ins->specialization() == MIRType::Int64) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int64);
+ ReorderCommutative(&lhs, &rhs, ins);
+ LMulI64* lir = new(alloc()) LMulI64;
+ lowerForMulInt64(lir, ins, lhs, rhs);
+ return;
+ }
+
+ if (ins->specialization() == MIRType::Double) {
+ MOZ_ASSERT(lhs->type() == MIRType::Double);
+ ReorderCommutative(&lhs, &rhs, ins);
+
+ // If our RHS is a constant -1.0, we can optimize to an LNegD.
+ if (!ins->mustPreserveNaN() && rhs->isConstant() && rhs->toConstant()->toDouble() == -1.0)
+ defineReuseInput(new(alloc()) LNegD(useRegisterAtStart(lhs)), ins, 0);
+ else
+ lowerForFPU(new(alloc()) LMathD(JSOP_MUL), ins, lhs, rhs);
+ return;
+ }
+
+ if (ins->specialization() == MIRType::Float32) {
+ MOZ_ASSERT(lhs->type() == MIRType::Float32);
+ ReorderCommutative(&lhs, &rhs, ins);
+
+ // We apply the same optimizations as for doubles
+ if (!ins->mustPreserveNaN() && rhs->isConstant() && rhs->toConstant()->toFloat32() == -1.0f)
+ defineReuseInput(new(alloc()) LNegF(useRegisterAtStart(lhs)), ins, 0);
+ else
+ lowerForFPU(new(alloc()) LMathF(JSOP_MUL), ins, lhs, rhs);
+ return;
+ }
+
+ lowerBinaryV(JSOP_MUL, ins);
+}
+
+void
+LIRGenerator::visitDiv(MDiv* ins)
+{
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+ MOZ_ASSERT(lhs->type() == rhs->type());
+
+ if (ins->specialization() == MIRType::Int32) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ lowerDivI(ins);
+ return;
+ }
+
+ if (ins->specialization() == MIRType::Int64) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int64);
+ lowerDivI64(ins);
+ return;
+ }
+
+ if (ins->specialization() == MIRType::Double) {
+ MOZ_ASSERT(lhs->type() == MIRType::Double);
+ lowerForFPU(new(alloc()) LMathD(JSOP_DIV), ins, lhs, rhs);
+ return;
+ }
+
+ if (ins->specialization() == MIRType::Float32) {
+ MOZ_ASSERT(lhs->type() == MIRType::Float32);
+ lowerForFPU(new(alloc()) LMathF(JSOP_DIV), ins, lhs, rhs);
+ return;
+ }
+
+ lowerBinaryV(JSOP_DIV, ins);
+}
+
+void
+LIRGenerator::visitMod(MMod* ins)
+{
+ MOZ_ASSERT(ins->lhs()->type() == ins->rhs()->type());
+
+ if (ins->specialization() == MIRType::Int32) {
+ MOZ_ASSERT(ins->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->lhs()->type() == MIRType::Int32);
+ lowerModI(ins);
+ return;
+ }
+
+ if (ins->specialization() == MIRType::Int64) {
+ MOZ_ASSERT(ins->type() == MIRType::Int64);
+ MOZ_ASSERT(ins->lhs()->type() == MIRType::Int64);
+ lowerModI64(ins);
+ return;
+ }
+
+ if (ins->specialization() == MIRType::Double) {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ MOZ_ASSERT(ins->lhs()->type() == MIRType::Double);
+ MOZ_ASSERT(ins->rhs()->type() == MIRType::Double);
+
+ // Note: useRegisterAtStart is safe here, the temp is not a FP register.
+ LModD* lir = new(alloc()) LModD(useRegisterAtStart(ins->lhs()), useRegisterAtStart(ins->rhs()),
+ tempFixed(CallTempReg0));
+ defineReturn(lir, ins);
+ return;
+ }
+
+ lowerBinaryV(JSOP_MOD, ins);
+}
+
+void
+LIRGenerator::lowerBinaryV(JSOp op, MBinaryInstruction* ins)
+{
+ MDefinition* lhs = ins->getOperand(0);
+ MDefinition* rhs = ins->getOperand(1);
+
+ MOZ_ASSERT(lhs->type() == MIRType::Value);
+ MOZ_ASSERT(rhs->type() == MIRType::Value);
+
+ LBinaryV* lir = new(alloc()) LBinaryV(op, useBoxAtStart(lhs), useBoxAtStart(rhs));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitConcat(MConcat* ins)
+{
+ MDefinition* lhs = ins->getOperand(0);
+ MDefinition* rhs = ins->getOperand(1);
+
+ MOZ_ASSERT(lhs->type() == MIRType::String);
+ MOZ_ASSERT(rhs->type() == MIRType::String);
+ MOZ_ASSERT(ins->type() == MIRType::String);
+
+ LConcat* lir = new(alloc()) LConcat(useFixedAtStart(lhs, CallTempReg0),
+ useFixedAtStart(rhs, CallTempReg1),
+ tempFixed(CallTempReg0),
+ tempFixed(CallTempReg1),
+ tempFixed(CallTempReg2),
+ tempFixed(CallTempReg3),
+ tempFixed(CallTempReg4));
+ defineFixed(lir, ins, LAllocation(AnyRegister(CallTempReg5)));
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitCharCodeAt(MCharCodeAt* ins)
+{
+ MDefinition* str = ins->getOperand(0);
+ MDefinition* idx = ins->getOperand(1);
+
+ MOZ_ASSERT(str->type() == MIRType::String);
+ MOZ_ASSERT(idx->type() == MIRType::Int32);
+
+ LCharCodeAt* lir = new(alloc()) LCharCodeAt(useRegister(str), useRegister(idx));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitFromCharCode(MFromCharCode* ins)
+{
+ MDefinition* code = ins->getOperand(0);
+
+ MOZ_ASSERT(code->type() == MIRType::Int32);
+
+ LFromCharCode* lir = new(alloc()) LFromCharCode(useRegister(code));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitFromCodePoint(MFromCodePoint* ins)
+{
+ MDefinition* codePoint = ins->getOperand(0);
+
+ MOZ_ASSERT(codePoint->type() == MIRType::Int32);
+
+ LFromCodePoint* lir = new(alloc()) LFromCodePoint(useRegister(codePoint));
+ assignSnapshot(lir, Bailout_BoundsCheck);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitStart(MStart* start)
+{
+ LStart* lir = new(alloc()) LStart;
+
+ // Create a snapshot that captures the initial state of the function.
+ assignSnapshot(lir, Bailout_ArgumentCheck);
+ if (start->block()->graph().entryBlock() == start->block())
+ lirGraph_.setEntrySnapshot(lir->snapshot());
+
+ add(lir);
+}
+
+void
+LIRGenerator::visitNop(MNop* nop)
+{
+}
+
+void
+LIRGenerator::visitLimitedTruncate(MLimitedTruncate* nop)
+{
+ redefine(nop, nop->input());
+}
+
+void
+LIRGenerator::visitOsrEntry(MOsrEntry* entry)
+{
+ LOsrEntry* lir = new(alloc()) LOsrEntry(temp());
+ defineFixed(lir, entry, LAllocation(AnyRegister(OsrFrameReg)));
+}
+
+void
+LIRGenerator::visitOsrValue(MOsrValue* value)
+{
+ LOsrValue* lir = new(alloc()) LOsrValue(useRegister(value->entry()));
+ defineBox(lir, value);
+}
+
+void
+LIRGenerator::visitOsrReturnValue(MOsrReturnValue* value)
+{
+ LOsrReturnValue* lir = new(alloc()) LOsrReturnValue(useRegister(value->entry()));
+ defineBox(lir, value);
+}
+
+void
+LIRGenerator::visitOsrEnvironmentChain(MOsrEnvironmentChain* object)
+{
+ LOsrEnvironmentChain* lir = new(alloc()) LOsrEnvironmentChain(useRegister(object->entry()));
+ define(lir, object);
+}
+
+void
+LIRGenerator::visitOsrArgumentsObject(MOsrArgumentsObject* object)
+{
+ LOsrArgumentsObject* lir = new(alloc()) LOsrArgumentsObject(useRegister(object->entry()));
+ define(lir, object);
+}
+
+void
+LIRGenerator::visitToDouble(MToDouble* convert)
+{
+ MDefinition* opd = convert->input();
+ mozilla::DebugOnly<MToFPInstruction::ConversionKind> conversion = convert->conversion();
+
+ switch (opd->type()) {
+ case MIRType::Value:
+ {
+ LValueToDouble* lir = new(alloc()) LValueToDouble(useBox(opd));
+ assignSnapshot(lir, Bailout_NonPrimitiveInput);
+ define(lir, convert);
+ break;
+ }
+
+ case MIRType::Null:
+ MOZ_ASSERT(conversion != MToFPInstruction::NumbersOnly &&
+ conversion != MToFPInstruction::NonNullNonStringPrimitives);
+ lowerConstantDouble(0, convert);
+ break;
+
+ case MIRType::Undefined:
+ MOZ_ASSERT(conversion != MToFPInstruction::NumbersOnly);
+ lowerConstantDouble(GenericNaN(), convert);
+ break;
+
+ case MIRType::Boolean:
+ MOZ_ASSERT(conversion != MToFPInstruction::NumbersOnly);
+ MOZ_FALLTHROUGH;
+
+ case MIRType::Int32:
+ {
+ LInt32ToDouble* lir = new(alloc()) LInt32ToDouble(useRegisterAtStart(opd));
+ define(lir, convert);
+ break;
+ }
+
+ case MIRType::Float32:
+ {
+ LFloat32ToDouble* lir = new (alloc()) LFloat32ToDouble(useRegisterAtStart(opd));
+ define(lir, convert);
+ break;
+ }
+
+ case MIRType::Double:
+ redefine(convert, opd);
+ break;
+
+ default:
+ // Objects might be effectful. Symbols will throw.
+ // Strings are complicated - we don't handle them yet.
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+void
+LIRGenerator::visitToFloat32(MToFloat32* convert)
+{
+ MDefinition* opd = convert->input();
+ mozilla::DebugOnly<MToFloat32::ConversionKind> conversion = convert->conversion();
+
+ switch (opd->type()) {
+ case MIRType::Value:
+ {
+ LValueToFloat32* lir = new(alloc()) LValueToFloat32(useBox(opd));
+ assignSnapshot(lir, Bailout_NonPrimitiveInput);
+ define(lir, convert);
+ break;
+ }
+
+ case MIRType::Null:
+ MOZ_ASSERT(conversion != MToFPInstruction::NumbersOnly &&
+ conversion != MToFPInstruction::NonNullNonStringPrimitives);
+ lowerConstantFloat32(0, convert);
+ break;
+
+ case MIRType::Undefined:
+ MOZ_ASSERT(conversion != MToFPInstruction::NumbersOnly);
+ lowerConstantFloat32(GenericNaN(), convert);
+ break;
+
+ case MIRType::Boolean:
+ MOZ_ASSERT(conversion != MToFPInstruction::NumbersOnly);
+ MOZ_FALLTHROUGH;
+
+ case MIRType::Int32:
+ {
+ LInt32ToFloat32* lir = new(alloc()) LInt32ToFloat32(useRegisterAtStart(opd));
+ define(lir, convert);
+ break;
+ }
+
+ case MIRType::Double:
+ {
+ LDoubleToFloat32* lir = new(alloc()) LDoubleToFloat32(useRegisterAtStart(opd));
+ define(lir, convert);
+ break;
+ }
+
+ case MIRType::Float32:
+ redefine(convert, opd);
+ break;
+
+ default:
+ // Objects might be effectful. Symbols will throw.
+ // Strings are complicated - we don't handle them yet.
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+void
+LIRGenerator::visitToInt32(MToInt32* convert)
+{
+ MDefinition* opd = convert->input();
+
+ switch (opd->type()) {
+ case MIRType::Value:
+ {
+ LValueToInt32* lir =
+ new(alloc()) LValueToInt32(useBox(opd), tempDouble(), temp(), LValueToInt32::NORMAL);
+ assignSnapshot(lir, Bailout_NonPrimitiveInput);
+ define(lir, convert);
+ assignSafepoint(lir, convert);
+ break;
+ }
+
+ case MIRType::Null:
+ MOZ_ASSERT(convert->conversion() == MacroAssembler::IntConversion_Any);
+ define(new(alloc()) LInteger(0), convert);
+ break;
+
+ case MIRType::Boolean:
+ MOZ_ASSERT(convert->conversion() == MacroAssembler::IntConversion_Any ||
+ convert->conversion() == MacroAssembler::IntConversion_NumbersOrBoolsOnly);
+ redefine(convert, opd);
+ break;
+
+ case MIRType::Int32:
+ redefine(convert, opd);
+ break;
+
+ case MIRType::Float32:
+ {
+ LFloat32ToInt32* lir = new(alloc()) LFloat32ToInt32(useRegister(opd));
+ assignSnapshot(lir, Bailout_PrecisionLoss);
+ define(lir, convert);
+ break;
+ }
+
+ case MIRType::Double:
+ {
+ LDoubleToInt32* lir = new(alloc()) LDoubleToInt32(useRegister(opd));
+ assignSnapshot(lir, Bailout_PrecisionLoss);
+ define(lir, convert);
+ break;
+ }
+
+ case MIRType::String:
+ case MIRType::Symbol:
+ case MIRType::Object:
+ case MIRType::Undefined:
+ // Objects might be effectful. Symbols throw. Undefined coerces to NaN, not int32.
+ MOZ_CRASH("ToInt32 invalid input type");
+
+ default:
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+void
+LIRGenerator::visitTruncateToInt32(MTruncateToInt32* truncate)
+{
+ MDefinition* opd = truncate->input();
+
+ switch (opd->type()) {
+ case MIRType::Value:
+ {
+ LValueToInt32* lir = new(alloc()) LValueToInt32(useBox(opd), tempDouble(), temp(),
+ LValueToInt32::TRUNCATE);
+ assignSnapshot(lir, Bailout_NonPrimitiveInput);
+ define(lir, truncate);
+ assignSafepoint(lir, truncate);
+ break;
+ }
+
+ case MIRType::Null:
+ case MIRType::Undefined:
+ define(new(alloc()) LInteger(0), truncate);
+ break;
+
+ case MIRType::Int32:
+ case MIRType::Boolean:
+ redefine(truncate, opd);
+ break;
+
+ case MIRType::Double:
+ lowerTruncateDToInt32(truncate);
+ break;
+
+ case MIRType::Float32:
+ lowerTruncateFToInt32(truncate);
+ break;
+
+ default:
+ // Objects might be effectful. Symbols throw.
+ // Strings are complicated - we don't handle them yet.
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+void
+LIRGenerator::visitWasmTruncateToInt32(MWasmTruncateToInt32* ins)
+{
+ MDefinition* input = ins->input();
+ switch (input->type()) {
+ case MIRType::Double:
+ case MIRType::Float32: {
+ auto* lir = new(alloc()) LWasmTruncateToInt32(useRegisterAtStart(input));
+ define(lir, ins);
+ break;
+ }
+ default:
+ MOZ_CRASH("unexpected type in WasmTruncateToInt32");
+ }
+}
+
+void
+LIRGenerator::visitWrapInt64ToInt32(MWrapInt64ToInt32* ins)
+{
+ define(new(alloc()) LWrapInt64ToInt32(useInt64AtStart(ins->input())), ins);
+}
+
+void
+LIRGenerator::visitToString(MToString* ins)
+{
+ MDefinition* opd = ins->input();
+
+ switch (opd->type()) {
+ case MIRType::Null: {
+ const JSAtomState& names = GetJitContext()->runtime->names();
+ LPointer* lir = new(alloc()) LPointer(names.null);
+ define(lir, ins);
+ break;
+ }
+
+ case MIRType::Undefined: {
+ const JSAtomState& names = GetJitContext()->runtime->names();
+ LPointer* lir = new(alloc()) LPointer(names.undefined);
+ define(lir, ins);
+ break;
+ }
+
+ case MIRType::Boolean: {
+ LBooleanToString* lir = new(alloc()) LBooleanToString(useRegister(opd));
+ define(lir, ins);
+ break;
+ }
+
+ case MIRType::Double: {
+ LDoubleToString* lir = new(alloc()) LDoubleToString(useRegister(opd), temp());
+
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+
+ case MIRType::Int32: {
+ LIntToString* lir = new(alloc()) LIntToString(useRegister(opd));
+
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+
+ case MIRType::String:
+ redefine(ins, ins->input());
+ break;
+
+ case MIRType::Value: {
+ LValueToString* lir = new(alloc()) LValueToString(useBox(opd), tempToUnbox());
+ if (ins->fallible())
+ assignSnapshot(lir, Bailout_NonPrimitiveInput);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+
+ default:
+ // Float32, symbols, and objects are not supported.
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+void
+LIRGenerator::visitToObjectOrNull(MToObjectOrNull* ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType::Value);
+
+ LValueToObjectOrNull* lir = new(alloc()) LValueToObjectOrNull(useBox(ins->input()));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitRegExp(MRegExp* ins)
+{
+ if (ins->mustClone()) {
+ LRegExp* lir = new(alloc()) LRegExp();
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+ } else {
+ RegExpObject* source = ins->source();
+ define(new(alloc()) LPointer(source), ins);
+ }
+}
+
+void
+LIRGenerator::visitRegExpMatcher(MRegExpMatcher* ins)
+{
+ MOZ_ASSERT(ins->regexp()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->string()->type() == MIRType::String);
+ MOZ_ASSERT(ins->lastIndex()->type() == MIRType::Int32);
+
+ LRegExpMatcher* lir = new(alloc()) LRegExpMatcher(useFixedAtStart(ins->regexp(), RegExpMatcherRegExpReg),
+ useFixedAtStart(ins->string(), RegExpMatcherStringReg),
+ useFixedAtStart(ins->lastIndex(), RegExpMatcherLastIndexReg));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitRegExpSearcher(MRegExpSearcher* ins)
+{
+ MOZ_ASSERT(ins->regexp()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->string()->type() == MIRType::String);
+ MOZ_ASSERT(ins->lastIndex()->type() == MIRType::Int32);
+
+ LRegExpSearcher* lir = new(alloc()) LRegExpSearcher(useFixedAtStart(ins->regexp(), RegExpTesterRegExpReg),
+ useFixedAtStart(ins->string(), RegExpTesterStringReg),
+ useFixedAtStart(ins->lastIndex(), RegExpTesterLastIndexReg));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitRegExpTester(MRegExpTester* ins)
+{
+ MOZ_ASSERT(ins->regexp()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->string()->type() == MIRType::String);
+ MOZ_ASSERT(ins->lastIndex()->type() == MIRType::Int32);
+
+ LRegExpTester* lir = new(alloc()) LRegExpTester(useFixedAtStart(ins->regexp(), RegExpTesterRegExpReg),
+ useFixedAtStart(ins->string(), RegExpTesterStringReg),
+ useFixedAtStart(ins->lastIndex(), RegExpTesterLastIndexReg));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitRegExpPrototypeOptimizable(MRegExpPrototypeOptimizable* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Boolean);
+ LRegExpPrototypeOptimizable* lir = new(alloc()) LRegExpPrototypeOptimizable(useRegister(ins->object()),
+ temp());
+ define(lir, ins);
+}
+
+void
+LIRGenerator::visitRegExpInstanceOptimizable(MRegExpInstanceOptimizable* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->proto()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Boolean);
+ LRegExpInstanceOptimizable* lir = new(alloc()) LRegExpInstanceOptimizable(useRegister(ins->object()),
+ useRegister(ins->proto()),
+ temp());
+ define(lir, ins);
+}
+
+void
+LIRGenerator::visitGetFirstDollarIndex(MGetFirstDollarIndex* ins)
+{
+ MOZ_ASSERT(ins->str()->type() == MIRType::String);
+ MOZ_ASSERT(ins->type() == MIRType::Int32);
+ LGetFirstDollarIndex* lir = new(alloc()) LGetFirstDollarIndex(useRegister(ins->str()),
+ temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitStringReplace(MStringReplace* ins)
+{
+ MOZ_ASSERT(ins->pattern()->type() == MIRType::String);
+ MOZ_ASSERT(ins->string()->type() == MIRType::String);
+ MOZ_ASSERT(ins->replacement()->type() == MIRType::String);
+
+ LStringReplace* lir = new(alloc()) LStringReplace(useRegisterOrConstantAtStart(ins->string()),
+ useRegisterAtStart(ins->pattern()),
+ useRegisterOrConstantAtStart(ins->replacement()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitBinarySharedStub(MBinarySharedStub* ins)
+{
+ MDefinition* lhs = ins->getOperand(0);
+ MDefinition* rhs = ins->getOperand(1);
+
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+
+ LBinarySharedStub* lir = new(alloc()) LBinarySharedStub(useBoxFixedAtStart(lhs, R0),
+ useBoxFixedAtStart(rhs, R1));
+ defineSharedStubReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitUnarySharedStub(MUnarySharedStub* ins)
+{
+ MDefinition* input = ins->getOperand(0);
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+
+ LUnarySharedStub* lir = new(alloc()) LUnarySharedStub(useBoxFixedAtStart(input, R0));
+ defineSharedStubReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitNullarySharedStub(MNullarySharedStub* ins)
+{
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+
+ LNullarySharedStub* lir = new(alloc()) LNullarySharedStub();
+
+ defineSharedStubReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitLambda(MLambda* ins)
+{
+ if (ins->info().singletonType || ins->info().useSingletonForClone) {
+ // If the function has a singleton type, this instruction will only be
+ // executed once so we don't bother inlining it.
+ //
+ // If UseSingletonForClone is true, we will assign a singleton type to
+ // the clone and we have to clone the script, we can't do that inline.
+ LLambdaForSingleton* lir = new(alloc())
+ LLambdaForSingleton(useRegisterAtStart(ins->environmentChain()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+ } else {
+ LLambda* lir = new(alloc()) LLambda(useRegister(ins->environmentChain()), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ }
+}
+
+void
+LIRGenerator::visitLambdaArrow(MLambdaArrow* ins)
+{
+ MOZ_ASSERT(ins->environmentChain()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->newTargetDef()->type() == MIRType::Value);
+
+ LLambdaArrow* lir = new(alloc()) LLambdaArrow(useRegister(ins->environmentChain()),
+ useBox(ins->newTargetDef()));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitKeepAliveObject(MKeepAliveObject* ins)
+{
+ MDefinition* obj = ins->object();
+ MOZ_ASSERT(obj->type() == MIRType::Object);
+
+ add(new(alloc()) LKeepAliveObject(useKeepalive(obj)), ins);
+}
+
+void
+LIRGenerator::visitSlots(MSlots* ins)
+{
+ define(new(alloc()) LSlots(useRegisterAtStart(ins->object())), ins);
+}
+
+void
+LIRGenerator::visitElements(MElements* ins)
+{
+ define(new(alloc()) LElements(useRegisterAtStart(ins->object())), ins);
+}
+
+void
+LIRGenerator::visitConstantElements(MConstantElements* ins)
+{
+ define(new(alloc()) LPointer(ins->value().unwrap(/*safe - pointer does not flow back to C++*/),
+ LPointer::NON_GC_THING),
+ ins);
+}
+
+void
+LIRGenerator::visitConvertElementsToDoubles(MConvertElementsToDoubles* ins)
+{
+ LInstruction* check = new(alloc()) LConvertElementsToDoubles(useRegister(ins->elements()));
+ add(check, ins);
+ assignSafepoint(check, ins);
+}
+
+void
+LIRGenerator::visitMaybeToDoubleElement(MMaybeToDoubleElement* ins)
+{
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->value()->type() == MIRType::Int32);
+
+ LMaybeToDoubleElement* lir = new(alloc()) LMaybeToDoubleElement(useRegisterAtStart(ins->elements()),
+ useRegisterAtStart(ins->value()),
+ tempDouble());
+ defineBox(lir, ins);
+}
+
+void
+LIRGenerator::visitMaybeCopyElementsForWrite(MMaybeCopyElementsForWrite* ins)
+{
+ LInstruction* check = new(alloc()) LMaybeCopyElementsForWrite(useRegister(ins->object()), temp());
+ add(check, ins);
+ assignSafepoint(check, ins);
+}
+
+void
+LIRGenerator::visitLoadSlot(MLoadSlot* ins)
+{
+ switch (ins->type()) {
+ case MIRType::Value:
+ defineBox(new(alloc()) LLoadSlotV(useRegisterAtStart(ins->slots())), ins);
+ break;
+
+ case MIRType::Undefined:
+ case MIRType::Null:
+ MOZ_CRASH("typed load must have a payload");
+
+ default:
+ define(new(alloc()) LLoadSlotT(useRegisterForTypedLoad(ins->slots(), ins->type())), ins);
+ break;
+ }
+}
+
+void
+LIRGenerator::visitFunctionEnvironment(MFunctionEnvironment* ins)
+{
+ define(new(alloc()) LFunctionEnvironment(useRegisterAtStart(ins->function())), ins);
+}
+
+void
+LIRGenerator::visitInterruptCheck(MInterruptCheck* ins)
+{
+ LInstruction* lir = new(alloc()) LInterruptCheck();
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitWasmTrap(MWasmTrap* ins)
+{
+ add(new(alloc()) LWasmTrap, ins);
+}
+
+void
+LIRGenerator::visitWasmReinterpret(MWasmReinterpret* ins)
+{
+ if (ins->type() == MIRType::Int64)
+ defineInt64(new(alloc()) LWasmReinterpretToI64(useRegisterAtStart(ins->input())), ins);
+ else if (ins->input()->type() == MIRType::Int64)
+ define(new(alloc()) LWasmReinterpretFromI64(useInt64RegisterAtStart(ins->input())), ins);
+ else
+ define(new(alloc()) LWasmReinterpret(useRegisterAtStart(ins->input())), ins);
+}
+
+void
+LIRGenerator::visitStoreSlot(MStoreSlot* ins)
+{
+ LInstruction* lir;
+
+ switch (ins->value()->type()) {
+ case MIRType::Value:
+ lir = new(alloc()) LStoreSlotV(useRegister(ins->slots()), useBox(ins->value()));
+ add(lir, ins);
+ break;
+
+ case MIRType::Double:
+ add(new(alloc()) LStoreSlotT(useRegister(ins->slots()), useRegister(ins->value())), ins);
+ break;
+
+ case MIRType::Float32:
+ MOZ_CRASH("Float32 shouldn't be stored in a slot.");
+
+ default:
+ add(new(alloc()) LStoreSlotT(useRegister(ins->slots()),
+ useRegisterOrConstant(ins->value())), ins);
+ break;
+ }
+}
+
+void
+LIRGenerator::visitFilterTypeSet(MFilterTypeSet* ins)
+{
+ redefine(ins, ins->input());
+}
+
+void
+LIRGenerator::visitTypeBarrier(MTypeBarrier* ins)
+{
+ // Requesting a non-GC pointer is safe here since we never re-enter C++
+ // from inside a type barrier test.
+
+ const TemporaryTypeSet* types = ins->resultTypeSet();
+ bool needTemp = !types->unknownObject() && types->getObjectCount() > 0;
+
+ MIRType inputType = ins->getOperand(0)->type();
+ MOZ_ASSERT(inputType == ins->type());
+
+ // Handle typebarrier that will always bail.
+ // (Emit LBail for visibility).
+ if (ins->alwaysBails()) {
+ LBail* bail = new(alloc()) LBail();
+ assignSnapshot(bail, Bailout_Inevitable);
+ add(bail, ins);
+ redefine(ins, ins->input());
+ return;
+ }
+
+ // Handle typebarrier with Value as input.
+ if (inputType == MIRType::Value) {
+ LDefinition tmp = needTemp ? temp() : tempToUnbox();
+ LTypeBarrierV* barrier = new(alloc()) LTypeBarrierV(useBox(ins->input()), tmp);
+ assignSnapshot(barrier, Bailout_TypeBarrierV);
+ add(barrier, ins);
+ redefine(ins, ins->input());
+ return;
+ }
+
+ // The payload needs to be tested if it either might be null or might have
+ // an object that should be excluded from the barrier.
+ bool needsObjectBarrier = false;
+ if (inputType == MIRType::ObjectOrNull)
+ needsObjectBarrier = true;
+ if (inputType == MIRType::Object && !types->hasType(TypeSet::AnyObjectType()) &&
+ ins->barrierKind() != BarrierKind::TypeTagOnly)
+ {
+ needsObjectBarrier = true;
+ }
+
+ if (needsObjectBarrier) {
+ LDefinition tmp = needTemp ? temp() : LDefinition::BogusTemp();
+ LTypeBarrierO* barrier = new(alloc()) LTypeBarrierO(useRegister(ins->getOperand(0)), tmp);
+ assignSnapshot(barrier, Bailout_TypeBarrierO);
+ add(barrier, ins);
+ redefine(ins, ins->getOperand(0));
+ return;
+ }
+
+ // Handle remaining cases: No-op, unbox did everything.
+ redefine(ins, ins->getOperand(0));
+}
+
+void
+LIRGenerator::visitMonitorTypes(MMonitorTypes* ins)
+{
+ // Requesting a non-GC pointer is safe here since we never re-enter C++
+ // from inside a type check.
+
+ const TemporaryTypeSet* types = ins->typeSet();
+ bool needTemp = !types->unknownObject() && types->getObjectCount() > 0;
+ LDefinition tmp = needTemp ? temp() : tempToUnbox();
+
+ LMonitorTypes* lir = new(alloc()) LMonitorTypes(useBox(ins->input()), tmp);
+ assignSnapshot(lir, Bailout_MonitorTypes);
+ add(lir, ins);
+}
+
+// Returns true iff |def| is a constant that's either not a GC thing or is not
+// allocated in the nursery.
+static bool
+IsNonNurseryConstant(MDefinition* def)
+{
+ if (!def->isConstant())
+ return false;
+ Value v = def->toConstant()->toJSValue();
+ return !v.isMarkable() || !IsInsideNursery(v.toMarkablePointer());
+}
+
+void
+LIRGenerator::visitPostWriteBarrier(MPostWriteBarrier* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ // LPostWriteBarrier assumes that if it has a constant object then that
+ // object is tenured, and does not need to be tested for being in the
+ // nursery. Ensure that assumption holds by lowering constant nursery
+ // objects to a register.
+ bool useConstantObject = IsNonNurseryConstant(ins->object());
+
+ switch (ins->value()->type()) {
+ case MIRType::Object:
+ case MIRType::ObjectOrNull: {
+ LDefinition tmp = needTempForPostBarrier() ? temp() : LDefinition::BogusTemp();
+ LPostWriteBarrierO* lir =
+ new(alloc()) LPostWriteBarrierO(useConstantObject
+ ? useOrConstant(ins->object())
+ : useRegister(ins->object()),
+ useRegister(ins->value()), tmp);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+ case MIRType::Value: {
+ LDefinition tmp = needTempForPostBarrier() ? temp() : LDefinition::BogusTemp();
+ LPostWriteBarrierV* lir =
+ new(alloc()) LPostWriteBarrierV(useConstantObject
+ ? useOrConstant(ins->object())
+ : useRegister(ins->object()),
+ useBox(ins->value()),
+ tmp);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+ default:
+ // Currently, only objects can be in the nursery. Other instruction
+ // types cannot hold nursery pointers.
+ break;
+ }
+}
+
+void
+LIRGenerator::visitPostWriteElementBarrier(MPostWriteElementBarrier* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ // LPostWriteElementBarrier assumes that if it has a constant object then that
+ // object is tenured, and does not need to be tested for being in the
+ // nursery. Ensure that assumption holds by lowering constant nursery
+ // objects to a register.
+ bool useConstantObject =
+ ins->object()->isConstant() &&
+ !IsInsideNursery(&ins->object()->toConstant()->toObject());
+
+ switch (ins->value()->type()) {
+ case MIRType::Object:
+ case MIRType::ObjectOrNull: {
+ LDefinition tmp = needTempForPostBarrier() ? temp() : LDefinition::BogusTemp();
+ LPostWriteElementBarrierO* lir =
+ new(alloc()) LPostWriteElementBarrierO(useConstantObject
+ ? useOrConstant(ins->object())
+ : useRegister(ins->object()),
+ useRegister(ins->value()),
+ useRegister(ins->index()),
+ tmp);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+ case MIRType::Value: {
+ LDefinition tmp = needTempForPostBarrier() ? temp() : LDefinition::BogusTemp();
+ LPostWriteElementBarrierV* lir =
+ new(alloc()) LPostWriteElementBarrierV(useConstantObject
+ ? useOrConstant(ins->object())
+ : useRegister(ins->object()),
+ useRegister(ins->index()),
+ useBox(ins->value()),
+ tmp);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+ default:
+ // Currently, only objects can be in the nursery. Other instruction
+ // types cannot hold nursery pointers.
+ break;
+ }
+}
+
+void
+LIRGenerator::visitArrayLength(MArrayLength* ins)
+{
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ define(new(alloc()) LArrayLength(useRegisterAtStart(ins->elements())), ins);
+}
+
+void
+LIRGenerator::visitSetArrayLength(MSetArrayLength* ins)
+{
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ MOZ_ASSERT(ins->index()->isConstant());
+ add(new(alloc()) LSetArrayLength(useRegister(ins->elements()),
+ useRegisterOrConstant(ins->index())), ins);
+}
+
+void
+LIRGenerator::visitGetNextEntryForIterator(MGetNextEntryForIterator* ins)
+{
+ MOZ_ASSERT(ins->iter()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->result()->type() == MIRType::Object);
+ auto lir = new(alloc()) LGetNextEntryForIterator(useRegister(ins->iter()),
+ useRegister(ins->result()),
+ temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitTypedArrayLength(MTypedArrayLength* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ define(new(alloc()) LTypedArrayLength(useRegisterAtStart(ins->object())), ins);
+}
+
+void
+LIRGenerator::visitTypedArrayElements(MTypedArrayElements* ins)
+{
+ MOZ_ASSERT(ins->type() == MIRType::Elements);
+ define(new(alloc()) LTypedArrayElements(useRegisterAtStart(ins->object())), ins);
+}
+
+void
+LIRGenerator::visitSetDisjointTypedElements(MSetDisjointTypedElements* ins)
+{
+ MOZ_ASSERT(ins->type() == MIRType::None);
+
+ MDefinition* target = ins->target();
+ MOZ_ASSERT(target->type() == MIRType::Object);
+
+ MDefinition* targetOffset = ins->targetOffset();
+ MOZ_ASSERT(targetOffset->type() == MIRType::Int32);
+
+ MDefinition* source = ins->source();
+ MOZ_ASSERT(source->type() == MIRType::Object);
+
+ auto lir = new(alloc()) LSetDisjointTypedElements(useRegister(target),
+ useRegister(targetOffset),
+ useRegister(source),
+ temp());
+ add(lir, ins);
+}
+
+void
+LIRGenerator::visitTypedObjectDescr(MTypedObjectDescr* ins)
+{
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+ define(new(alloc()) LTypedObjectDescr(useRegisterAtStart(ins->object())), ins);
+}
+
+void
+LIRGenerator::visitTypedObjectElements(MTypedObjectElements* ins)
+{
+ MOZ_ASSERT(ins->type() == MIRType::Elements);
+ define(new(alloc()) LTypedObjectElements(useRegister(ins->object())), ins);
+}
+
+void
+LIRGenerator::visitSetTypedObjectOffset(MSetTypedObjectOffset* ins)
+{
+ add(new(alloc()) LSetTypedObjectOffset(useRegister(ins->object()),
+ useRegister(ins->offset()),
+ temp(), temp()),
+ ins);
+}
+
+void
+LIRGenerator::visitInitializedLength(MInitializedLength* ins)
+{
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ define(new(alloc()) LInitializedLength(useRegisterAtStart(ins->elements())), ins);
+}
+
+void
+LIRGenerator::visitSetInitializedLength(MSetInitializedLength* ins)
+{
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ MOZ_ASSERT(ins->index()->isConstant());
+ add(new(alloc()) LSetInitializedLength(useRegister(ins->elements()),
+ useRegisterOrConstant(ins->index())), ins);
+}
+
+void
+LIRGenerator::visitUnboxedArrayLength(MUnboxedArrayLength* ins)
+{
+ define(new(alloc()) LUnboxedArrayLength(useRegisterAtStart(ins->object())), ins);
+}
+
+void
+LIRGenerator::visitUnboxedArrayInitializedLength(MUnboxedArrayInitializedLength* ins)
+{
+ define(new(alloc()) LUnboxedArrayInitializedLength(useRegisterAtStart(ins->object())), ins);
+}
+
+void
+LIRGenerator::visitIncrementUnboxedArrayInitializedLength(MIncrementUnboxedArrayInitializedLength* ins)
+{
+ add(new(alloc()) LIncrementUnboxedArrayInitializedLength(useRegister(ins->object())), ins);
+}
+
+void
+LIRGenerator::visitSetUnboxedArrayInitializedLength(MSetUnboxedArrayInitializedLength* ins)
+{
+ add(new(alloc()) LSetUnboxedArrayInitializedLength(useRegister(ins->object()),
+ useRegisterOrConstant(ins->length()),
+ temp()), ins);
+}
+
+void
+LIRGenerator::visitNot(MNot* ins)
+{
+ MDefinition* op = ins->input();
+
+ // String is converted to length of string in the type analysis phase (see
+ // TestPolicy).
+ MOZ_ASSERT(op->type() != MIRType::String);
+
+ // - boolean: x xor 1
+ // - int32: LCompare(x, 0)
+ // - double: LCompare(x, 0)
+ // - null or undefined: true
+ // - object: false if it never emulates undefined, else LNotO(x)
+ switch (op->type()) {
+ case MIRType::Boolean: {
+ MConstant* cons = MConstant::New(alloc(), Int32Value(1));
+ ins->block()->insertBefore(ins, cons);
+ lowerForALU(new(alloc()) LBitOpI(JSOP_BITXOR), ins, op, cons);
+ break;
+ }
+ case MIRType::Int32:
+ define(new(alloc()) LNotI(useRegisterAtStart(op)), ins);
+ break;
+ case MIRType::Int64:
+ define(new(alloc()) LNotI64(useInt64RegisterAtStart(op)), ins);
+ break;
+ case MIRType::Double:
+ define(new(alloc()) LNotD(useRegister(op)), ins);
+ break;
+ case MIRType::Float32:
+ define(new(alloc()) LNotF(useRegister(op)), ins);
+ break;
+ case MIRType::Undefined:
+ case MIRType::Null:
+ define(new(alloc()) LInteger(1), ins);
+ break;
+ case MIRType::Symbol:
+ define(new(alloc()) LInteger(0), ins);
+ break;
+ case MIRType::Object:
+ if (!ins->operandMightEmulateUndefined()) {
+ // Objects that don't emulate undefined can be constant-folded.
+ define(new(alloc()) LInteger(0), ins);
+ } else {
+ // All others require further work.
+ define(new(alloc()) LNotO(useRegister(op)), ins);
+ }
+ break;
+ case MIRType::Value: {
+ LDefinition temp0, temp1;
+ if (ins->operandMightEmulateUndefined()) {
+ temp0 = temp();
+ temp1 = temp();
+ } else {
+ temp0 = LDefinition::BogusTemp();
+ temp1 = LDefinition::BogusTemp();
+ }
+
+ LNotV* lir = new(alloc()) LNotV(useBox(op), tempDouble(), temp0, temp1);
+ define(lir, ins);
+ break;
+ }
+
+ default:
+ MOZ_CRASH("Unexpected MIRType.");
+ }
+}
+
+void
+LIRGenerator::visitBoundsCheck(MBoundsCheck* ins)
+{
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->length()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->type() == MIRType::Int32);
+
+ if (!ins->fallible())
+ return;
+
+ LInstruction* check;
+ if (ins->minimum() || ins->maximum()) {
+ check = new(alloc()) LBoundsCheckRange(useRegisterOrConstant(ins->index()),
+ useAny(ins->length()),
+ temp());
+ } else {
+ check = new(alloc()) LBoundsCheck(useRegisterOrConstant(ins->index()),
+ useAnyOrConstant(ins->length()));
+ }
+ assignSnapshot(check, Bailout_BoundsCheck);
+ add(check, ins);
+}
+
+void
+LIRGenerator::visitBoundsCheckLower(MBoundsCheckLower* ins)
+{
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ if (!ins->fallible())
+ return;
+
+ LInstruction* check = new(alloc()) LBoundsCheckLower(useRegister(ins->index()));
+ assignSnapshot(check, Bailout_BoundsCheck);
+ add(check, ins);
+}
+
+void
+LIRGenerator::visitInArray(MInArray* ins)
+{
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->initLength()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Boolean);
+
+ LAllocation object;
+ if (ins->needsNegativeIntCheck())
+ object = useRegister(ins->object());
+
+ LInArray* lir = new(alloc()) LInArray(useRegister(ins->elements()),
+ useRegisterOrConstant(ins->index()),
+ useRegister(ins->initLength()),
+ object);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitLoadElement(MLoadElement* ins)
+{
+ MOZ_ASSERT(IsValidElementsType(ins->elements(), ins->offsetAdjustment()));
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ switch (ins->type()) {
+ case MIRType::Value:
+ {
+ LLoadElementV* lir = new(alloc()) LLoadElementV(useRegister(ins->elements()),
+ useRegisterOrConstant(ins->index()));
+ if (ins->fallible())
+ assignSnapshot(lir, Bailout_Hole);
+ defineBox(lir, ins);
+ break;
+ }
+ case MIRType::Undefined:
+ case MIRType::Null:
+ MOZ_CRASH("typed load must have a payload");
+
+ default:
+ {
+ LLoadElementT* lir = new(alloc()) LLoadElementT(useRegister(ins->elements()),
+ useRegisterOrConstant(ins->index()));
+ if (ins->fallible())
+ assignSnapshot(lir, Bailout_Hole);
+ define(lir, ins);
+ break;
+ }
+ }
+}
+
+void
+LIRGenerator::visitLoadElementHole(MLoadElementHole* ins)
+{
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->initLength()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+
+ LLoadElementHole* lir = new(alloc()) LLoadElementHole(useRegister(ins->elements()),
+ useRegisterOrConstant(ins->index()),
+ useRegister(ins->initLength()));
+ if (ins->needsNegativeIntCheck())
+ assignSnapshot(lir, Bailout_NegativeIndex);
+ defineBox(lir, ins);
+}
+
+void
+LIRGenerator::visitLoadUnboxedObjectOrNull(MLoadUnboxedObjectOrNull* ins)
+{
+ MOZ_ASSERT(IsValidElementsType(ins->elements(), ins->offsetAdjustment()));
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ if (ins->type() == MIRType::Object || ins->type() == MIRType::ObjectOrNull) {
+ LLoadUnboxedPointerT* lir = new(alloc()) LLoadUnboxedPointerT(useRegister(ins->elements()),
+ useRegisterOrConstant(ins->index()));
+ if (ins->nullBehavior() == MLoadUnboxedObjectOrNull::BailOnNull)
+ assignSnapshot(lir, Bailout_TypeBarrierO);
+ define(lir, ins);
+ } else {
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+ MOZ_ASSERT(ins->nullBehavior() != MLoadUnboxedObjectOrNull::BailOnNull);
+
+ LLoadUnboxedPointerV* lir = new(alloc()) LLoadUnboxedPointerV(useRegister(ins->elements()),
+ useRegisterOrConstant(ins->index()));
+ defineBox(lir, ins);
+ }
+}
+
+void
+LIRGenerator::visitLoadUnboxedString(MLoadUnboxedString* ins)
+{
+ MOZ_ASSERT(IsValidElementsType(ins->elements(), ins->offsetAdjustment()));
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->type() == MIRType::String);
+
+ LLoadUnboxedPointerT* lir = new(alloc()) LLoadUnboxedPointerT(useRegister(ins->elements()),
+ useRegisterOrConstant(ins->index()));
+ define(lir, ins);
+}
+
+void
+LIRGenerator::visitStoreElement(MStoreElement* ins)
+{
+ MOZ_ASSERT(IsValidElementsType(ins->elements(), ins->offsetAdjustment()));
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrConstant(ins->index());
+
+ switch (ins->value()->type()) {
+ case MIRType::Value:
+ {
+ LInstruction* lir = new(alloc()) LStoreElementV(elements, index, useBox(ins->value()));
+ if (ins->fallible())
+ assignSnapshot(lir, Bailout_Hole);
+ add(lir, ins);
+ break;
+ }
+
+ default:
+ {
+ const LAllocation value = useRegisterOrNonDoubleConstant(ins->value());
+ LInstruction* lir = new(alloc()) LStoreElementT(elements, index, value);
+ if (ins->fallible())
+ assignSnapshot(lir, Bailout_Hole);
+ add(lir, ins);
+ break;
+ }
+ }
+}
+
+void
+LIRGenerator::visitStoreElementHole(MStoreElementHole* ins)
+{
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ const LUse object = useRegister(ins->object());
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrConstant(ins->index());
+
+ // Use a temp register when adding new elements to unboxed arrays.
+ LDefinition tempDef = LDefinition::BogusTemp();
+ if (ins->unboxedType() != JSVAL_TYPE_MAGIC)
+ tempDef = temp();
+
+ LInstruction* lir;
+ switch (ins->value()->type()) {
+ case MIRType::Value:
+ lir = new(alloc()) LStoreElementHoleV(object, elements, index, useBox(ins->value()),
+ tempDef);
+ break;
+
+ default:
+ {
+ const LAllocation value = useRegisterOrNonDoubleConstant(ins->value());
+ lir = new(alloc()) LStoreElementHoleT(object, elements, index, value, tempDef);
+ break;
+ }
+ }
+
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitFallibleStoreElement(MFallibleStoreElement* ins)
+{
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ const LUse object = useRegister(ins->object());
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrConstant(ins->index());
+
+ // Use a temp register when adding new elements to unboxed arrays.
+ LDefinition tempDef = LDefinition::BogusTemp();
+ if (ins->unboxedType() != JSVAL_TYPE_MAGIC)
+ tempDef = temp();
+
+ LInstruction* lir;
+ switch (ins->value()->type()) {
+ case MIRType::Value:
+ lir = new(alloc()) LFallibleStoreElementV(object, elements, index, useBox(ins->value()),
+ tempDef);
+ break;
+ default:
+ const LAllocation value = useRegisterOrNonDoubleConstant(ins->value());
+ lir = new(alloc()) LFallibleStoreElementT(object, elements, index, value, tempDef);
+ break;
+ }
+
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+
+void
+LIRGenerator::visitStoreUnboxedObjectOrNull(MStoreUnboxedObjectOrNull* ins)
+{
+ MOZ_ASSERT(IsValidElementsType(ins->elements(), ins->offsetAdjustment()));
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->value()->type() == MIRType::Object ||
+ ins->value()->type() == MIRType::Null ||
+ ins->value()->type() == MIRType::ObjectOrNull);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrNonDoubleConstant(ins->index());
+ const LAllocation value = useRegisterOrNonDoubleConstant(ins->value());
+
+ LInstruction* lir = new(alloc()) LStoreUnboxedPointer(elements, index, value);
+ add(lir, ins);
+}
+
+void
+LIRGenerator::visitStoreUnboxedString(MStoreUnboxedString* ins)
+{
+ MOZ_ASSERT(IsValidElementsType(ins->elements(), ins->offsetAdjustment()));
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->value()->type() == MIRType::String);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrConstant(ins->index());
+ const LAllocation value = useRegisterOrNonDoubleConstant(ins->value());
+
+ LInstruction* lir = new(alloc()) LStoreUnboxedPointer(elements, index, value);
+ add(lir, ins);
+}
+
+void
+LIRGenerator::visitConvertUnboxedObjectToNative(MConvertUnboxedObjectToNative* ins)
+{
+ LInstruction* check = new(alloc()) LConvertUnboxedObjectToNative(useRegister(ins->object()));
+ add(check, ins);
+ assignSafepoint(check, ins);
+}
+
+void
+LIRGenerator::visitEffectiveAddress(MEffectiveAddress* ins)
+{
+ define(new(alloc()) LEffectiveAddress(useRegister(ins->base()), useRegister(ins->index())), ins);
+}
+
+void
+LIRGenerator::visitArrayPopShift(MArrayPopShift* ins)
+{
+ LUse object = useRegister(ins->object());
+
+ switch (ins->type()) {
+ case MIRType::Value:
+ {
+ LArrayPopShiftV* lir = new(alloc()) LArrayPopShiftV(object, temp(), temp());
+ defineBox(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+ case MIRType::Undefined:
+ case MIRType::Null:
+ MOZ_CRASH("typed load must have a payload");
+
+ default:
+ {
+ LArrayPopShiftT* lir = new(alloc()) LArrayPopShiftT(object, temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+ }
+}
+
+void
+LIRGenerator::visitArrayPush(MArrayPush* ins)
+{
+ MOZ_ASSERT(ins->type() == MIRType::Int32);
+
+ LUse object = useRegister(ins->object());
+
+ switch (ins->value()->type()) {
+ case MIRType::Value:
+ {
+ LArrayPushV* lir = new(alloc()) LArrayPushV(object, useBox(ins->value()), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+
+ default:
+ {
+ const LAllocation value = useRegisterOrNonDoubleConstant(ins->value());
+ LArrayPushT* lir = new(alloc()) LArrayPushT(object, value, temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+ }
+}
+
+void
+LIRGenerator::visitArraySlice(MArraySlice* ins)
+{
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->begin()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->end()->type() == MIRType::Int32);
+
+ LArraySlice* lir = new(alloc()) LArraySlice(useFixedAtStart(ins->object(), CallTempReg0),
+ useFixedAtStart(ins->begin(), CallTempReg1),
+ useFixedAtStart(ins->end(), CallTempReg2),
+ tempFixed(CallTempReg3),
+ tempFixed(CallTempReg4));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitArrayJoin(MArrayJoin* ins)
+{
+ MOZ_ASSERT(ins->type() == MIRType::String);
+ MOZ_ASSERT(ins->array()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->sep()->type() == MIRType::String);
+
+ LArrayJoin* lir = new(alloc()) LArrayJoin(useRegisterAtStart(ins->array()),
+ useRegisterAtStart(ins->sep()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitSinCos(MSinCos *ins)
+{
+ MOZ_ASSERT(ins->type() == MIRType::SinCosDouble);
+ MOZ_ASSERT(ins->input()->type() == MIRType::Double ||
+ ins->input()->type() == MIRType::Float32 ||
+ ins->input()->type() == MIRType::Int32);
+
+ LSinCos *lir = new (alloc()) LSinCos(useRegisterAtStart(ins->input()),
+ tempFixed(CallTempReg0),
+ temp());
+ defineSinCos(lir, ins);
+}
+
+void
+LIRGenerator::visitStringSplit(MStringSplit* ins)
+{
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+ MOZ_ASSERT(ins->string()->type() == MIRType::String);
+ MOZ_ASSERT(ins->separator()->type() == MIRType::String);
+
+ LStringSplit* lir = new(alloc()) LStringSplit(useRegisterAtStart(ins->string()),
+ useRegisterAtStart(ins->separator()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitLoadUnboxedScalar(MLoadUnboxedScalar* ins)
+{
+ MOZ_ASSERT(IsValidElementsType(ins->elements(), ins->offsetAdjustment()));
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrConstant(ins->index());
+
+ MOZ_ASSERT(IsNumberType(ins->type()) || IsSimdType(ins->type()) ||
+ ins->type() == MIRType::Boolean);
+
+ // We need a temp register for Uint32Array with known double result.
+ LDefinition tempDef = LDefinition::BogusTemp();
+ if (ins->readType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
+ tempDef = temp();
+
+ if (ins->requiresMemoryBarrier()) {
+ LMemoryBarrier* fence = new(alloc()) LMemoryBarrier(MembarBeforeLoad);
+ add(fence, ins);
+ }
+ LLoadUnboxedScalar* lir = new(alloc()) LLoadUnboxedScalar(elements, index, tempDef);
+ if (ins->fallible())
+ assignSnapshot(lir, Bailout_Overflow);
+ define(lir, ins);
+ if (ins->requiresMemoryBarrier()) {
+ LMemoryBarrier* fence = new(alloc()) LMemoryBarrier(MembarAfterLoad);
+ add(fence, ins);
+ }
+}
+
+void
+LIRGenerator::visitClampToUint8(MClampToUint8* ins)
+{
+ MDefinition* in = ins->input();
+
+ switch (in->type()) {
+ case MIRType::Boolean:
+ redefine(ins, in);
+ break;
+
+ case MIRType::Int32:
+ defineReuseInput(new(alloc()) LClampIToUint8(useRegisterAtStart(in)), ins, 0);
+ break;
+
+ case MIRType::Double:
+ // LClampDToUint8 clobbers its input register. Making it available as
+ // a temp copy describes this behavior to the register allocator.
+ define(new(alloc()) LClampDToUint8(useRegisterAtStart(in), tempCopy(in, 0)), ins);
+ break;
+
+ case MIRType::Value:
+ {
+ LClampVToUint8* lir = new(alloc()) LClampVToUint8(useBox(in), tempDouble());
+ assignSnapshot(lir, Bailout_NonPrimitiveInput);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+
+ default:
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+void
+LIRGenerator::visitLoadTypedArrayElementHole(MLoadTypedArrayElementHole* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+
+ const LUse object = useRegister(ins->object());
+ const LAllocation index = useRegisterOrConstant(ins->index());
+
+ LLoadTypedArrayElementHole* lir = new(alloc()) LLoadTypedArrayElementHole(object, index);
+ if (ins->fallible())
+ assignSnapshot(lir, Bailout_Overflow);
+ defineBox(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitLoadTypedArrayElementStatic(MLoadTypedArrayElementStatic* ins)
+{
+ LLoadTypedArrayElementStatic* lir =
+ new(alloc()) LLoadTypedArrayElementStatic(useRegisterAtStart(ins->ptr()));
+
+ // In case of out of bounds, may bail out, or may jump to ool code.
+ if (ins->fallible())
+ assignSnapshot(lir, Bailout_BoundsCheck);
+ define(lir, ins);
+}
+
+void
+LIRGenerator::visitStoreUnboxedScalar(MStoreUnboxedScalar* ins)
+{
+ MOZ_ASSERT(IsValidElementsType(ins->elements(), ins->offsetAdjustment()));
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ if (ins->isSimdWrite()) {
+ MOZ_ASSERT_IF(ins->writeType() == Scalar::Float32x4, ins->value()->type() == MIRType::Float32x4);
+ MOZ_ASSERT_IF(ins->writeType() == Scalar::Int8x16, ins->value()->type() == MIRType::Int8x16);
+ MOZ_ASSERT_IF(ins->writeType() == Scalar::Int16x8, ins->value()->type() == MIRType::Int16x8);
+ MOZ_ASSERT_IF(ins->writeType() == Scalar::Int32x4, ins->value()->type() == MIRType::Int32x4);
+ } else if (ins->isFloatWrite()) {
+ MOZ_ASSERT_IF(ins->writeType() == Scalar::Float32, ins->value()->type() == MIRType::Float32);
+ MOZ_ASSERT_IF(ins->writeType() == Scalar::Float64, ins->value()->type() == MIRType::Double);
+ } else {
+ MOZ_ASSERT(ins->value()->type() == MIRType::Int32);
+ }
+
+ LUse elements = useRegister(ins->elements());
+ LAllocation index = useRegisterOrConstant(ins->index());
+ LAllocation value;
+
+ // For byte arrays, the value has to be in a byte register on x86.
+ if (ins->isByteWrite())
+ value = useByteOpRegisterOrNonDoubleConstant(ins->value());
+ else
+ value = useRegisterOrNonDoubleConstant(ins->value());
+
+ // Optimization opportunity for atomics: on some platforms there
+ // is a store instruction that incorporates the necessary
+ // barriers, and we could use that instead of separate barrier and
+ // store instructions. See bug #1077027.
+ if (ins->requiresMemoryBarrier()) {
+ LMemoryBarrier* fence = new(alloc()) LMemoryBarrier(MembarBeforeStore);
+ add(fence, ins);
+ }
+ add(new(alloc()) LStoreUnboxedScalar(elements, index, value), ins);
+ if (ins->requiresMemoryBarrier()) {
+ LMemoryBarrier* fence = new(alloc()) LMemoryBarrier(MembarAfterStore);
+ add(fence, ins);
+ }
+}
+
+void
+LIRGenerator::visitStoreTypedArrayElementHole(MStoreTypedArrayElementHole* ins)
+{
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->length()->type() == MIRType::Int32);
+
+ if (ins->isFloatWrite()) {
+ MOZ_ASSERT_IF(ins->arrayType() == Scalar::Float32, ins->value()->type() == MIRType::Float32);
+ MOZ_ASSERT_IF(ins->arrayType() == Scalar::Float64, ins->value()->type() == MIRType::Double);
+ } else {
+ MOZ_ASSERT(ins->value()->type() == MIRType::Int32);
+ }
+
+ LUse elements = useRegister(ins->elements());
+ LAllocation length = useAnyOrConstant(ins->length());
+ LAllocation index = useRegisterOrConstant(ins->index());
+ LAllocation value;
+
+ // For byte arrays, the value has to be in a byte register on x86.
+ if (ins->isByteWrite())
+ value = useByteOpRegisterOrNonDoubleConstant(ins->value());
+ else
+ value = useRegisterOrNonDoubleConstant(ins->value());
+ add(new(alloc()) LStoreTypedArrayElementHole(elements, length, index, value), ins);
+}
+
+void
+LIRGenerator::visitLoadFixedSlot(MLoadFixedSlot* ins)
+{
+ MDefinition* obj = ins->object();
+ MOZ_ASSERT(obj->type() == MIRType::Object);
+
+ MIRType type = ins->type();
+
+ if (type == MIRType::Value) {
+ LLoadFixedSlotV* lir = new(alloc()) LLoadFixedSlotV(useRegisterAtStart(obj));
+ defineBox(lir, ins);
+ } else {
+ LLoadFixedSlotT* lir = new(alloc()) LLoadFixedSlotT(useRegisterForTypedLoad(obj, type));
+ define(lir, ins);
+ }
+}
+
+void
+LIRGenerator::visitLoadFixedSlotAndUnbox(MLoadFixedSlotAndUnbox* ins)
+{
+ MDefinition* obj = ins->object();
+ MOZ_ASSERT(obj->type() == MIRType::Object);
+
+ LLoadFixedSlotAndUnbox* lir = new(alloc()) LLoadFixedSlotAndUnbox(useRegisterAtStart(obj));
+ if (ins->fallible())
+ assignSnapshot(lir, ins->bailoutKind());
+
+ define(lir, ins);
+}
+
+void
+LIRGenerator::visitStoreFixedSlot(MStoreFixedSlot* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ if (ins->value()->type() == MIRType::Value) {
+ LStoreFixedSlotV* lir = new(alloc()) LStoreFixedSlotV(useRegister(ins->object()),
+ useBox(ins->value()));
+ add(lir, ins);
+ } else {
+ LStoreFixedSlotT* lir = new(alloc()) LStoreFixedSlotT(useRegister(ins->object()),
+ useRegisterOrConstant(ins->value()));
+ add(lir, ins);
+ }
+}
+
+void
+LIRGenerator::visitGetNameCache(MGetNameCache* ins)
+{
+ MOZ_ASSERT(ins->envObj()->type() == MIRType::Object);
+
+ // Set the performs-call flag so that we don't omit the overrecursed check.
+ // This is necessary because the cache can attach a scripted getter stub
+ // that calls this script recursively.
+ gen->setPerformsCall();
+
+ LGetNameCache* lir = new(alloc()) LGetNameCache(useRegister(ins->envObj()));
+ defineBox(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitCallGetIntrinsicValue(MCallGetIntrinsicValue* ins)
+{
+ LCallGetIntrinsicValue* lir = new(alloc()) LCallGetIntrinsicValue();
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitGetPropertyCache(MGetPropertyCache* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ MDefinition* id = ins->idval();
+ MOZ_ASSERT(id->type() == MIRType::String ||
+ id->type() == MIRType::Symbol ||
+ id->type() == MIRType::Int32 ||
+ id->type() == MIRType::Value);
+
+ if (ins->monitoredResult()) {
+ // Set the performs-call flag so that we don't omit the overrecursed
+ // check. This is necessary because the cache can attach a scripted
+ // getter stub that calls this script recursively.
+ gen->setPerformsCall();
+ }
+
+ // If this is a GETPROP, the id is a constant string. Allow passing it as a
+ // constant to reduce register allocation pressure.
+ bool useConstId = id->type() == MIRType::String || id->type() == MIRType::Symbol;
+
+ if (ins->type() == MIRType::Value) {
+ LGetPropertyCacheV* lir =
+ new(alloc()) LGetPropertyCacheV(useRegister(ins->object()),
+ useBoxOrTypedOrConstant(id, useConstId));
+ defineBox(lir, ins);
+ assignSafepoint(lir, ins);
+ } else {
+ LGetPropertyCacheT* lir =
+ new(alloc()) LGetPropertyCacheT(useRegister(ins->object()),
+ useBoxOrTypedOrConstant(id, useConstId));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ }
+}
+
+void
+LIRGenerator::visitGetPropertyPolymorphic(MGetPropertyPolymorphic* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ if (ins->type() == MIRType::Value) {
+ LGetPropertyPolymorphicV* lir =
+ new(alloc()) LGetPropertyPolymorphicV(useRegister(ins->object()));
+ assignSnapshot(lir, Bailout_ShapeGuard);
+ defineBox(lir, ins);
+ } else {
+ LDefinition maybeTemp = (ins->type() == MIRType::Double) ? temp() : LDefinition::BogusTemp();
+ LGetPropertyPolymorphicT* lir =
+ new(alloc()) LGetPropertyPolymorphicT(useRegister(ins->object()), maybeTemp);
+ assignSnapshot(lir, Bailout_ShapeGuard);
+ define(lir, ins);
+ }
+}
+
+void
+LIRGenerator::visitSetPropertyPolymorphic(MSetPropertyPolymorphic* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ if (ins->value()->type() == MIRType::Value) {
+ LSetPropertyPolymorphicV* lir =
+ new(alloc()) LSetPropertyPolymorphicV(useRegister(ins->object()),
+ useBox(ins->value()),
+ temp());
+ assignSnapshot(lir, Bailout_ShapeGuard);
+ add(lir, ins);
+ } else {
+ LAllocation value = useRegisterOrConstant(ins->value());
+ LSetPropertyPolymorphicT* lir =
+ new(alloc()) LSetPropertyPolymorphicT(useRegister(ins->object()), value,
+ ins->value()->type(), temp());
+ assignSnapshot(lir, Bailout_ShapeGuard);
+ add(lir, ins);
+ }
+}
+
+void
+LIRGenerator::visitBindNameCache(MBindNameCache* ins)
+{
+ MOZ_ASSERT(ins->environmentChain()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+
+ LBindNameCache* lir = new(alloc()) LBindNameCache(useRegister(ins->environmentChain()));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitCallBindVar(MCallBindVar* ins)
+{
+ MOZ_ASSERT(ins->environmentChain()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+
+ LCallBindVar* lir = new(alloc()) LCallBindVar(useRegister(ins->environmentChain()));
+ define(lir, ins);
+}
+
+void
+LIRGenerator::visitGuardObjectIdentity(MGuardObjectIdentity* ins)
+{
+ LGuardObjectIdentity* guard = new(alloc()) LGuardObjectIdentity(useRegister(ins->object()),
+ useRegister(ins->expected()));
+ assignSnapshot(guard, Bailout_ObjectIdentityOrTypeGuard);
+ add(guard, ins);
+ redefine(ins, ins->object());
+}
+
+void
+LIRGenerator::visitGuardClass(MGuardClass* ins)
+{
+ LDefinition t = temp();
+ LGuardClass* guard = new(alloc()) LGuardClass(useRegister(ins->object()), t);
+ assignSnapshot(guard, Bailout_ObjectIdentityOrTypeGuard);
+ add(guard, ins);
+}
+
+void
+LIRGenerator::visitGuardObject(MGuardObject* ins)
+{
+ // The type policy does all the work, so at this point the input
+ // is guaranteed to be an object.
+ MOZ_ASSERT(ins->input()->type() == MIRType::Object);
+ redefine(ins, ins->input());
+}
+
+void
+LIRGenerator::visitGuardString(MGuardString* ins)
+{
+ // The type policy does all the work, so at this point the input
+ // is guaranteed to be a string.
+ MOZ_ASSERT(ins->input()->type() == MIRType::String);
+ redefine(ins, ins->input());
+}
+
+void
+LIRGenerator::visitGuardSharedTypedArray(MGuardSharedTypedArray* ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType::Object);
+ LGuardSharedTypedArray* guard =
+ new(alloc()) LGuardSharedTypedArray(useRegister(ins->object()), temp());
+ assignSnapshot(guard, Bailout_NonSharedTypedArrayInput);
+ add(guard, ins);
+}
+
+void
+LIRGenerator::visitPolyInlineGuard(MPolyInlineGuard* ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType::Object);
+ redefine(ins, ins->input());
+}
+
+void
+LIRGenerator::visitGuardReceiverPolymorphic(MGuardReceiverPolymorphic* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+
+ LGuardReceiverPolymorphic* guard =
+ new(alloc()) LGuardReceiverPolymorphic(useRegister(ins->object()), temp());
+ assignSnapshot(guard, Bailout_ShapeGuard);
+ add(guard, ins);
+ redefine(ins, ins->object());
+}
+
+void
+LIRGenerator::visitGuardUnboxedExpando(MGuardUnboxedExpando* ins)
+{
+ LGuardUnboxedExpando* guard =
+ new(alloc()) LGuardUnboxedExpando(useRegister(ins->object()));
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, ins->object());
+}
+
+void
+LIRGenerator::visitLoadUnboxedExpando(MLoadUnboxedExpando* ins)
+{
+ LLoadUnboxedExpando* lir =
+ new(alloc()) LLoadUnboxedExpando(useRegisterAtStart(ins->object()));
+ define(lir, ins);
+}
+
+void
+LIRGenerator::visitAssertRange(MAssertRange* ins)
+{
+ MDefinition* input = ins->input();
+ LInstruction* lir = nullptr;
+
+ switch (input->type()) {
+ case MIRType::Boolean:
+ case MIRType::Int32:
+ lir = new(alloc()) LAssertRangeI(useRegisterAtStart(input));
+ break;
+
+ case MIRType::Double:
+ lir = new(alloc()) LAssertRangeD(useRegister(input), tempDouble());
+ break;
+
+ case MIRType::Float32:
+ lir = new(alloc()) LAssertRangeF(useRegister(input), tempDouble(), tempDouble());
+ break;
+
+ case MIRType::Value:
+ lir = new(alloc()) LAssertRangeV(useBox(input), tempToUnbox(), tempDouble(), tempDouble());
+ break;
+
+ default:
+ MOZ_CRASH("Unexpected Range for MIRType");
+ break;
+ }
+
+ lir->setMir(ins);
+ add(lir);
+}
+
+void
+LIRGenerator::visitCallGetProperty(MCallGetProperty* ins)
+{
+ LCallGetProperty* lir = new(alloc()) LCallGetProperty(useBoxAtStart(ins->value()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitCallGetElement(MCallGetElement* ins)
+{
+ MOZ_ASSERT(ins->lhs()->type() == MIRType::Value);
+ MOZ_ASSERT(ins->rhs()->type() == MIRType::Value);
+
+ LCallGetElement* lir = new(alloc()) LCallGetElement(useBoxAtStart(ins->lhs()),
+ useBoxAtStart(ins->rhs()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitCallSetProperty(MCallSetProperty* ins)
+{
+ LInstruction* lir = new(alloc()) LCallSetProperty(useRegisterAtStart(ins->object()),
+ useBoxAtStart(ins->value()));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitDeleteProperty(MDeleteProperty* ins)
+{
+ LCallDeleteProperty* lir = new(alloc()) LCallDeleteProperty(useBoxAtStart(ins->value()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitDeleteElement(MDeleteElement* ins)
+{
+ LCallDeleteElement* lir = new(alloc()) LCallDeleteElement(useBoxAtStart(ins->value()),
+ useBoxAtStart(ins->index()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitSetPropertyCache(MSetPropertyCache* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ MDefinition* id = ins->idval();
+ MOZ_ASSERT(id->type() == MIRType::String ||
+ id->type() == MIRType::Symbol ||
+ id->type() == MIRType::Int32 ||
+ id->type() == MIRType::Value);
+
+ // If this is a SETPROP, the id is a constant string. Allow passing it as a
+ // constant to reduce register allocation pressure.
+ bool useConstId = id->type() == MIRType::String || id->type() == MIRType::Symbol;
+ bool useConstValue = IsNonNurseryConstant(ins->value());
+
+ // Set the performs-call flag so that we don't omit the overrecursed check.
+ // This is necessary because the cache can attach a scripted setter stub
+ // that calls this script recursively.
+ gen->setPerformsCall();
+
+ // If the index might be an integer, we need some extra temp registers for
+ // the dense and typed array element stubs.
+ LDefinition tempToUnboxIndex = LDefinition::BogusTemp();
+ LDefinition tempD = LDefinition::BogusTemp();
+ LDefinition tempF32 = LDefinition::BogusTemp();
+
+ if (id->mightBeType(MIRType::Int32)) {
+ if (id->type() != MIRType::Int32)
+ tempToUnboxIndex = tempToUnbox();
+ tempD = tempDouble();
+ tempF32 = hasUnaliasedDouble() ? tempFloat32() : LDefinition::BogusTemp();
+ }
+
+ LInstruction* lir =
+ new(alloc()) LSetPropertyCache(useRegister(ins->object()),
+ useBoxOrTypedOrConstant(id, useConstId),
+ useBoxOrTypedOrConstant(ins->value(), useConstValue),
+ temp(),
+ tempToUnboxIndex, tempD, tempF32);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitCallSetElement(MCallSetElement* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Value);
+ MOZ_ASSERT(ins->value()->type() == MIRType::Value);
+
+ LCallSetElement* lir = new(alloc()) LCallSetElement(useRegisterAtStart(ins->object()),
+ useBoxAtStart(ins->index()),
+ useBoxAtStart(ins->value()));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitCallInitElementArray(MCallInitElementArray* ins)
+{
+ LCallInitElementArray* lir = new(alloc()) LCallInitElementArray(useRegisterAtStart(ins->object()),
+ useBoxAtStart(ins->value()));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitIteratorStart(MIteratorStart* ins)
+{
+ if (ins->object()->type() == MIRType::Value) {
+ LCallIteratorStartV* lir = new(alloc()) LCallIteratorStartV(useBoxAtStart(ins->object()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ // Call a stub if this is not a simple for-in loop.
+ if (ins->flags() != JSITER_ENUMERATE) {
+ LCallIteratorStartO* lir = new(alloc()) LCallIteratorStartO(useRegisterAtStart(ins->object()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+ } else {
+ LIteratorStartO* lir = new(alloc()) LIteratorStartO(useRegister(ins->object()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ }
+}
+
+void
+LIRGenerator::visitIteratorMore(MIteratorMore* ins)
+{
+ LIteratorMore* lir = new(alloc()) LIteratorMore(useRegister(ins->iterator()), temp());
+ defineBox(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitIsNoIter(MIsNoIter* ins)
+{
+ MOZ_ASSERT(ins->hasOneUse());
+ emitAtUses(ins);
+}
+
+void
+LIRGenerator::visitIteratorEnd(MIteratorEnd* ins)
+{
+ LIteratorEnd* lir = new(alloc()) LIteratorEnd(useRegister(ins->iterator()), temp(), temp(), temp());
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitStringLength(MStringLength* ins)
+{
+ MOZ_ASSERT(ins->string()->type() == MIRType::String);
+ define(new(alloc()) LStringLength(useRegisterAtStart(ins->string())), ins);
+}
+
+void
+LIRGenerator::visitArgumentsLength(MArgumentsLength* ins)
+{
+ define(new(alloc()) LArgumentsLength(), ins);
+}
+
+void
+LIRGenerator::visitGetFrameArgument(MGetFrameArgument* ins)
+{
+ LGetFrameArgument* lir = new(alloc()) LGetFrameArgument(useRegisterOrConstant(ins->index()));
+ defineBox(lir, ins);
+}
+
+void
+LIRGenerator::visitNewTarget(MNewTarget* ins)
+{
+ LNewTarget* lir = new(alloc()) LNewTarget();
+ defineBox(lir, ins);
+}
+
+void
+LIRGenerator::visitSetFrameArgument(MSetFrameArgument* ins)
+{
+ MDefinition* input = ins->input();
+
+ if (input->type() == MIRType::Value) {
+ LSetFrameArgumentV* lir = new(alloc()) LSetFrameArgumentV(useBox(input));
+ add(lir, ins);
+ } else if (input->type() == MIRType::Undefined || input->type() == MIRType::Null) {
+ Value val = input->type() == MIRType::Undefined ? UndefinedValue() : NullValue();
+ LSetFrameArgumentC* lir = new(alloc()) LSetFrameArgumentC(val);
+ add(lir, ins);
+ } else {
+ LSetFrameArgumentT* lir = new(alloc()) LSetFrameArgumentT(useRegister(input));
+ add(lir, ins);
+ }
+}
+
+void
+LIRGenerator::visitRunOncePrologue(MRunOncePrologue* ins)
+{
+ LRunOncePrologue* lir = new(alloc()) LRunOncePrologue;
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitRest(MRest* ins)
+{
+ MOZ_ASSERT(ins->numActuals()->type() == MIRType::Int32);
+
+ LRest* lir = new(alloc()) LRest(useFixedAtStart(ins->numActuals(), CallTempReg0),
+ tempFixed(CallTempReg1),
+ tempFixed(CallTempReg2),
+ tempFixed(CallTempReg3));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitThrow(MThrow* ins)
+{
+ MDefinition* value = ins->getOperand(0);
+ MOZ_ASSERT(value->type() == MIRType::Value);
+
+ LThrow* lir = new(alloc()) LThrow(useBoxAtStart(value));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitIn(MIn* ins)
+{
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Value);
+ MOZ_ASSERT(rhs->type() == MIRType::Object);
+
+ LIn* lir = new(alloc()) LIn(useBoxAtStart(lhs), useRegisterAtStart(rhs));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitInstanceOf(MInstanceOf* ins)
+{
+ MDefinition* lhs = ins->getOperand(0);
+
+ MOZ_ASSERT(lhs->type() == MIRType::Value || lhs->type() == MIRType::Object);
+
+ if (lhs->type() == MIRType::Object) {
+ LInstanceOfO* lir = new(alloc()) LInstanceOfO(useRegister(lhs));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ } else {
+ LInstanceOfV* lir = new(alloc()) LInstanceOfV(useBox(lhs));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ }
+}
+
+void
+LIRGenerator::visitCallInstanceOf(MCallInstanceOf* ins)
+{
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Value);
+ MOZ_ASSERT(rhs->type() == MIRType::Object);
+
+ LCallInstanceOf* lir = new(alloc()) LCallInstanceOf(useBoxAtStart(lhs),
+ useRegisterAtStart(rhs));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitIsCallable(MIsCallable* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Boolean);
+ define(new(alloc()) LIsCallable(useRegister(ins->object())), ins);
+}
+
+void
+LIRGenerator::visitIsConstructor(MIsConstructor* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Boolean);
+ define(new(alloc()) LIsConstructor(useRegister(ins->object())), ins);
+}
+
+static bool
+CanEmitIsObjectAtUses(MInstruction* ins)
+{
+ if (!ins->canEmitAtUses())
+ return false;
+
+ MUseIterator iter(ins->usesBegin());
+ if (iter == ins->usesEnd())
+ return false;
+
+ MNode* node = iter->consumer();
+ if (!node->isDefinition())
+ return false;
+
+ if (!node->toDefinition()->isTest())
+ return false;
+
+ iter++;
+ return iter == ins->usesEnd();
+}
+
+void
+LIRGenerator::visitIsObject(MIsObject* ins)
+{
+ if (CanEmitIsObjectAtUses(ins)) {
+ emitAtUses(ins);
+ return;
+ }
+
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+ LIsObject* lir = new(alloc()) LIsObject(useBoxAtStart(opd));
+ define(lir, ins);
+}
+
+void
+LIRGenerator::visitHasClass(MHasClass* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Boolean);
+ define(new(alloc()) LHasClass(useRegister(ins->object())), ins);
+}
+
+void
+LIRGenerator::visitWasmAddOffset(MWasmAddOffset* ins)
+{
+ MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->type() == MIRType::Int32);
+ define(new(alloc()) LWasmAddOffset(useRegisterAtStart(ins->base())), ins);
+}
+
+void
+LIRGenerator::visitWasmBoundsCheck(MWasmBoundsCheck* ins)
+{
+ if (ins->isRedundant()) {
+ if (MOZ_LIKELY(!JitOptions.wasmAlwaysCheckBounds))
+ return;
+ }
+
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Int32);
+
+ auto* lir = new(alloc()) LWasmBoundsCheck(useRegisterAtStart(input));
+ add(lir, ins);
+}
+
+void
+LIRGenerator::visitWasmLoadGlobalVar(MWasmLoadGlobalVar* ins)
+{
+ if (ins->type() == MIRType::Int64)
+ defineInt64(new(alloc()) LWasmLoadGlobalVarI64, ins);
+ else
+ define(new(alloc()) LWasmLoadGlobalVar, ins);
+}
+
+void
+LIRGenerator::visitWasmStoreGlobalVar(MWasmStoreGlobalVar* ins)
+{
+ MDefinition* value = ins->value();
+ if (value->type() == MIRType::Int64)
+ add(new(alloc()) LWasmStoreGlobalVarI64(useInt64RegisterAtStart(value)), ins);
+ else
+ add(new(alloc()) LWasmStoreGlobalVar(useRegisterAtStart(value)), ins);
+}
+
+void
+LIRGenerator::visitWasmParameter(MWasmParameter* ins)
+{
+ ABIArg abi = ins->abi();
+ if (abi.argInRegister()) {
+#if defined(JS_NUNBOX32)
+ if (abi.isGeneralRegPair()) {
+ defineInt64Fixed(new(alloc()) LWasmParameterI64, ins,
+ LInt64Allocation(LAllocation(AnyRegister(abi.gpr64().high)),
+ LAllocation(AnyRegister(abi.gpr64().low))));
+ return;
+ }
+#endif
+ defineFixed(new(alloc()) LWasmParameter, ins, LAllocation(abi.reg()));
+ return;
+ }
+ if (ins->type() == MIRType::Int64) {
+ MOZ_ASSERT(!abi.argInRegister());
+ defineInt64Fixed(new(alloc()) LWasmParameterI64, ins,
+#if defined(JS_NUNBOX32)
+ LInt64Allocation(LArgument(abi.offsetFromArgBase() + INT64HIGH_OFFSET),
+ LArgument(abi.offsetFromArgBase() + INT64LOW_OFFSET))
+#else
+ LInt64Allocation(LArgument(abi.offsetFromArgBase()))
+#endif
+ );
+ } else {
+ MOZ_ASSERT(IsNumberType(ins->type()) || IsSimdType(ins->type()));
+ defineFixed(new(alloc()) LWasmParameter, ins, LArgument(abi.offsetFromArgBase()));
+ }
+}
+
+void
+LIRGenerator::visitWasmReturn(MWasmReturn* ins)
+{
+ MDefinition* rval = ins->getOperand(0);
+
+ if (rval->type() == MIRType::Int64) {
+ LWasmReturnI64* lir = new(alloc()) LWasmReturnI64(useInt64Fixed(rval, ReturnReg64));
+
+ // Preserve the TLS pointer we were passed in `WasmTlsReg`.
+ MDefinition* tlsPtr = ins->getOperand(1);
+ lir->setOperand(INT64_PIECES, useFixed(tlsPtr, WasmTlsReg));
+
+ add(lir);
+ return;
+ }
+
+ LWasmReturn* lir = new(alloc()) LWasmReturn;
+ if (rval->type() == MIRType::Float32)
+ lir->setOperand(0, useFixed(rval, ReturnFloat32Reg));
+ else if (rval->type() == MIRType::Double)
+ lir->setOperand(0, useFixed(rval, ReturnDoubleReg));
+ else if (IsSimdType(rval->type()))
+ lir->setOperand(0, useFixed(rval, ReturnSimd128Reg));
+ else if (rval->type() == MIRType::Int32)
+ lir->setOperand(0, useFixed(rval, ReturnReg));
+ else
+ MOZ_CRASH("Unexpected wasm return type");
+
+ // Preserve the TLS pointer we were passed in `WasmTlsReg`.
+ MDefinition* tlsPtr = ins->getOperand(1);
+ lir->setOperand(1, useFixed(tlsPtr, WasmTlsReg));
+
+ add(lir);
+}
+
+void
+LIRGenerator::visitWasmReturnVoid(MWasmReturnVoid* ins)
+{
+ auto* lir = new(alloc()) LWasmReturnVoid;
+
+ // Preserve the TLS pointer we were passed in `WasmTlsReg`.
+ MDefinition* tlsPtr = ins->getOperand(0);
+ lir->setOperand(0, useFixed(tlsPtr, WasmTlsReg));
+
+ add(lir);
+}
+
+void
+LIRGenerator::visitWasmStackArg(MWasmStackArg* ins)
+{
+ if (ins->arg()->type() == MIRType::Int64) {
+ add(new(alloc()) LWasmStackArgI64(useInt64RegisterOrConstantAtStart(ins->arg())), ins);
+ } else if (IsFloatingPointType(ins->arg()->type()) || IsSimdType(ins->arg()->type())) {
+ MOZ_ASSERT(!ins->arg()->isEmittedAtUses());
+ add(new(alloc()) LWasmStackArg(useRegisterAtStart(ins->arg())), ins);
+ } else {
+ add(new(alloc()) LWasmStackArg(useRegisterOrConstantAtStart(ins->arg())), ins);
+ }
+}
+
+void
+LIRGenerator::visitWasmCall(MWasmCall* ins)
+{
+ gen->setPerformsCall();
+
+ LAllocation* args = gen->allocate<LAllocation>(ins->numOperands());
+ if (!args) {
+ gen->abort("Couldn't allocate for MWasmCall");
+ return;
+ }
+
+ for (unsigned i = 0; i < ins->numArgs(); i++)
+ args[i] = useFixedAtStart(ins->getOperand(i), ins->registerForArg(i));
+
+ if (ins->callee().isTable())
+ args[ins->numArgs()] = useFixedAtStart(ins->getOperand(ins->numArgs()), WasmTableCallIndexReg);
+
+ LInstruction* lir;
+ if (ins->type() == MIRType::Int64)
+ lir = new(alloc()) LWasmCallI64(args, ins->numOperands());
+ else
+ lir = new(alloc()) LWasmCall(args, ins->numOperands());
+
+ if (ins->type() == MIRType::None)
+ add(lir, ins);
+ else
+ defineReturn(lir, ins);
+}
+
+void
+LIRGenerator::visitSetDOMProperty(MSetDOMProperty* ins)
+{
+ MDefinition* val = ins->value();
+
+ Register cxReg, objReg, privReg, valueReg;
+ GetTempRegForIntArg(0, 0, &cxReg);
+ GetTempRegForIntArg(1, 0, &objReg);
+ GetTempRegForIntArg(2, 0, &privReg);
+ GetTempRegForIntArg(3, 0, &valueReg);
+
+ // Keep using GetTempRegForIntArg, since we want to make sure we
+ // don't clobber registers we're already using.
+ Register tempReg1, tempReg2;
+ GetTempRegForIntArg(4, 0, &tempReg1);
+ mozilla::DebugOnly<bool> ok = GetTempRegForIntArg(5, 0, &tempReg2);
+ MOZ_ASSERT(ok, "How can we not have six temp registers?");
+
+ LSetDOMProperty* lir = new(alloc()) LSetDOMProperty(tempFixed(cxReg),
+ useFixedAtStart(ins->object(), objReg),
+ useBoxFixedAtStart(val, tempReg1, tempReg2),
+ tempFixed(privReg),
+ tempFixed(valueReg));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitGetDOMProperty(MGetDOMProperty* ins)
+{
+ Register cxReg, objReg, privReg, valueReg;
+ GetTempRegForIntArg(0, 0, &cxReg);
+ GetTempRegForIntArg(1, 0, &objReg);
+ GetTempRegForIntArg(2, 0, &privReg);
+ mozilla::DebugOnly<bool> ok = GetTempRegForIntArg(3, 0, &valueReg);
+ MOZ_ASSERT(ok, "How can we not have four temp registers?");
+ LGetDOMProperty* lir = new(alloc()) LGetDOMProperty(tempFixed(cxReg),
+ useFixedAtStart(ins->object(), objReg),
+ tempFixed(privReg),
+ tempFixed(valueReg));
+
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitGetDOMMember(MGetDOMMember* ins)
+{
+ MOZ_ASSERT(ins->isDomMovable(), "Members had better be movable");
+ // We wish we could assert that ins->domAliasSet() == JSJitInfo::AliasNone,
+ // but some MGetDOMMembers are for [Pure], not [Constant] properties, whose
+ // value can in fact change as a result of DOM setters and method calls.
+ MOZ_ASSERT(ins->domAliasSet() != JSJitInfo::AliasEverything,
+ "Member gets had better not alias the world");
+
+ MDefinition* obj = ins->object();
+ MOZ_ASSERT(obj->type() == MIRType::Object);
+
+ MIRType type = ins->type();
+
+ if (type == MIRType::Value) {
+ LGetDOMMemberV* lir = new(alloc()) LGetDOMMemberV(useRegisterAtStart(obj));
+ defineBox(lir, ins);
+ } else {
+ LGetDOMMemberT* lir = new(alloc()) LGetDOMMemberT(useRegisterForTypedLoad(obj, type));
+ define(lir, ins);
+ }
+}
+
+void
+LIRGenerator::visitRecompileCheck(MRecompileCheck* ins)
+{
+ LRecompileCheck* lir = new(alloc()) LRecompileCheck(temp());
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitSimdBox(MSimdBox* ins)
+{
+ MOZ_ASSERT(IsSimdType(ins->input()->type()));
+ LUse in = useRegister(ins->input());
+ LSimdBox* lir = new(alloc()) LSimdBox(in, temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitSimdUnbox(MSimdUnbox* ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType::Object);
+ MOZ_ASSERT(IsSimdType(ins->type()));
+ LUse in = useRegister(ins->input());
+ LSimdUnbox* lir = new(alloc()) LSimdUnbox(in, temp());
+ assignSnapshot(lir, Bailout_UnexpectedSimdInput);
+ define(lir, ins);
+}
+
+void
+LIRGenerator::visitSimdConstant(MSimdConstant* ins)
+{
+ MOZ_ASSERT(IsSimdType(ins->type()));
+
+ switch (ins->type()) {
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ define(new(alloc()) LSimd128Int(), ins);
+ break;
+ case MIRType::Float32x4:
+ define(new(alloc()) LSimd128Float(), ins);
+ break;
+ default:
+ MOZ_CRASH("Unknown SIMD kind when generating constant");
+ }
+}
+
+void
+LIRGenerator::visitSimdConvert(MSimdConvert* ins)
+{
+ MOZ_ASSERT(IsSimdType(ins->type()));
+ MDefinition* input = ins->input();
+ LUse use = useRegister(input);
+ if (ins->type() == MIRType::Int32x4) {
+ MOZ_ASSERT(input->type() == MIRType::Float32x4);
+ switch (ins->signedness()) {
+ case SimdSign::Signed: {
+ LFloat32x4ToInt32x4* lir = new(alloc()) LFloat32x4ToInt32x4(use, temp());
+ if (!gen->compilingWasm())
+ assignSnapshot(lir, Bailout_BoundsCheck);
+ define(lir, ins);
+ break;
+ }
+ case SimdSign::Unsigned: {
+ LFloat32x4ToUint32x4* lir =
+ new (alloc()) LFloat32x4ToUint32x4(use, temp(), temp(LDefinition::SIMD128INT));
+ if (!gen->compilingWasm())
+ assignSnapshot(lir, Bailout_BoundsCheck);
+ define(lir, ins);
+ break;
+ }
+ default:
+ MOZ_CRASH("Unexpected SimdConvert sign");
+ }
+ } else if (ins->type() == MIRType::Float32x4) {
+ MOZ_ASSERT(input->type() == MIRType::Int32x4);
+ MOZ_ASSERT(ins->signedness() == SimdSign::Signed, "Unexpected SimdConvert sign");
+ define(new(alloc()) LInt32x4ToFloat32x4(use), ins);
+ } else {
+ MOZ_CRASH("Unknown SIMD kind when generating constant");
+ }
+}
+
+void
+LIRGenerator::visitSimdReinterpretCast(MSimdReinterpretCast* ins)
+{
+ MOZ_ASSERT(IsSimdType(ins->type()) && IsSimdType(ins->input()->type()));
+ MDefinition* input = ins->input();
+ LUse use = useRegisterAtStart(input);
+ // :TODO: (Bug 1132894) We have to allocate a different register as redefine
+ // and/or defineReuseInput are not yet capable of reusing the same register
+ // with a different register type.
+ define(new(alloc()) LSimdReinterpretCast(use), ins);
+}
+
+void
+LIRGenerator::visitSimdAllTrue(MSimdAllTrue* ins)
+{
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(IsBooleanSimdType(input->type()));
+
+ LUse use = useRegisterAtStart(input);
+ define(new(alloc()) LSimdAllTrue(use), ins);
+}
+
+void
+LIRGenerator::visitSimdAnyTrue(MSimdAnyTrue* ins)
+{
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(IsBooleanSimdType(input->type()));
+
+ LUse use = useRegisterAtStart(input);
+ define(new(alloc()) LSimdAnyTrue(use), ins);
+}
+
+void
+LIRGenerator::visitSimdUnaryArith(MSimdUnaryArith* ins)
+{
+ MOZ_ASSERT(IsSimdType(ins->input()->type()));
+ MOZ_ASSERT(IsSimdType(ins->type()));
+
+ // Cannot be at start, as the ouput is used as a temporary to store values.
+ LUse in = use(ins->input());
+
+ switch (ins->type()) {
+ case MIRType::Int8x16:
+ case MIRType::Bool8x16:
+ define(new (alloc()) LSimdUnaryArithIx16(in), ins);
+ break;
+ case MIRType::Int16x8:
+ case MIRType::Bool16x8:
+ define(new (alloc()) LSimdUnaryArithIx8(in), ins);
+ break;
+ case MIRType::Int32x4:
+ case MIRType::Bool32x4:
+ define(new (alloc()) LSimdUnaryArithIx4(in), ins);
+ break;
+ case MIRType::Float32x4:
+ define(new (alloc()) LSimdUnaryArithFx4(in), ins);
+ break;
+ default:
+ MOZ_CRASH("Unknown SIMD kind for unary operation");
+ }
+}
+
+void
+LIRGenerator::visitSimdBinaryComp(MSimdBinaryComp* ins)
+{
+ MOZ_ASSERT(IsSimdType(ins->lhs()->type()));
+ MOZ_ASSERT(IsSimdType(ins->rhs()->type()));
+ MOZ_ASSERT(IsBooleanSimdType(ins->type()));
+
+ if (ShouldReorderCommutative(ins->lhs(), ins->rhs(), ins))
+ ins->reverse();
+
+ switch (ins->specialization()) {
+ case MIRType::Int8x16: {
+ MOZ_ASSERT(ins->signedness() == SimdSign::Signed);
+ LSimdBinaryCompIx16* add = new (alloc()) LSimdBinaryCompIx16();
+ lowerForFPU(add, ins, ins->lhs(), ins->rhs());
+ return;
+ }
+ case MIRType::Int16x8: {
+ MOZ_ASSERT(ins->signedness() == SimdSign::Signed);
+ LSimdBinaryCompIx8* add = new (alloc()) LSimdBinaryCompIx8();
+ lowerForFPU(add, ins, ins->lhs(), ins->rhs());
+ return;
+ }
+ case MIRType::Int32x4: {
+ MOZ_ASSERT(ins->signedness() == SimdSign::Signed);
+ LSimdBinaryCompIx4* add = new (alloc()) LSimdBinaryCompIx4();
+ lowerForCompIx4(add, ins, ins->lhs(), ins->rhs());
+ return;
+ }
+ case MIRType::Float32x4: {
+ MOZ_ASSERT(ins->signedness() == SimdSign::NotApplicable);
+ LSimdBinaryCompFx4* add = new (alloc()) LSimdBinaryCompFx4();
+ lowerForCompFx4(add, ins, ins->lhs(), ins->rhs());
+ return;
+ }
+ default:
+ MOZ_CRASH("Unknown compare type when comparing values");
+ }
+}
+
+void
+LIRGenerator::visitSimdBinaryBitwise(MSimdBinaryBitwise* ins)
+{
+ MOZ_ASSERT(IsSimdType(ins->lhs()->type()));
+ MOZ_ASSERT(IsSimdType(ins->rhs()->type()));
+ MOZ_ASSERT(IsSimdType(ins->type()));
+
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+ ReorderCommutative(&lhs, &rhs, ins);
+ LSimdBinaryBitwise* lir = new(alloc()) LSimdBinaryBitwise;
+ lowerForFPU(lir, ins, lhs, rhs);
+}
+
+void
+LIRGenerator::visitSimdShift(MSimdShift* ins)
+{
+ MOZ_ASSERT(IsIntegerSimdType(ins->type()));
+ MOZ_ASSERT(ins->lhs()->type() == ins->type());
+ MOZ_ASSERT(ins->rhs()->type() == MIRType::Int32);
+
+ LUse vector = useRegisterAtStart(ins->lhs());
+ LAllocation value = useRegisterOrConstant(ins->rhs());
+ // We need a temp register to mask the shift amount, but not if the shift
+ // amount is a constant.
+ LDefinition tempReg = value.isConstant() ? LDefinition::BogusTemp() : temp();
+ LSimdShift* lir = new(alloc()) LSimdShift(vector, value, tempReg);
+ defineReuseInput(lir, ins, 0);
+}
+
+void
+LIRGenerator::visitLexicalCheck(MLexicalCheck* ins)
+{
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Value);
+ LLexicalCheck* lir = new(alloc()) LLexicalCheck(useBox(input));
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, input);
+}
+
+void
+LIRGenerator::visitThrowRuntimeLexicalError(MThrowRuntimeLexicalError* ins)
+{
+ LThrowRuntimeLexicalError* lir = new(alloc()) LThrowRuntimeLexicalError();
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitGlobalNameConflictsCheck(MGlobalNameConflictsCheck* ins)
+{
+ LGlobalNameConflictsCheck* lir = new(alloc()) LGlobalNameConflictsCheck();
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitDebugger(MDebugger* ins)
+{
+ LDebugger* lir = new(alloc()) LDebugger(tempFixed(CallTempReg0), tempFixed(CallTempReg1));
+ assignSnapshot(lir, Bailout_Debugger);
+ add(lir, ins);
+}
+
+void
+LIRGenerator::visitAtomicIsLockFree(MAtomicIsLockFree* ins)
+{
+ define(new(alloc()) LAtomicIsLockFree(useRegister(ins->input())), ins);
+}
+
+void
+LIRGenerator::visitCheckReturn(MCheckReturn* ins)
+{
+ MDefinition* retVal = ins->returnValue();
+ MDefinition* thisVal = ins->thisValue();
+ MOZ_ASSERT(retVal->type() == MIRType::Value);
+ MOZ_ASSERT(thisVal->type() == MIRType::Value);
+
+ LCheckReturn* lir = new(alloc()) LCheckReturn(useBoxAtStart(retVal), useBoxAtStart(thisVal));
+ assignSnapshot(lir, Bailout_BadDerivedConstructorReturn);
+ add(lir, ins);
+ redefine(ins, retVal);
+}
+
+void
+LIRGenerator::visitCheckIsObj(MCheckIsObj* ins)
+{
+ MDefinition* checkVal = ins->checkValue();
+ MOZ_ASSERT(checkVal->type() == MIRType::Value);
+
+ LCheckIsObj* lir = new(alloc()) LCheckIsObj(useBoxAtStart(checkVal));
+ redefine(ins, checkVal);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitCheckObjCoercible(MCheckObjCoercible* ins)
+{
+ MDefinition* checkVal = ins->checkValue();
+ MOZ_ASSERT(checkVal->type() == MIRType::Value);
+
+ LCheckObjCoercible* lir = new(alloc()) LCheckObjCoercible(useBoxAtStart(checkVal));
+ redefine(ins, checkVal);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGenerator::visitDebugCheckSelfHosted(MDebugCheckSelfHosted* ins)
+{
+ MDefinition* checkVal = ins->checkValue();
+ MOZ_ASSERT(checkVal->type() == MIRType::Value);
+
+ LDebugCheckSelfHosted* lir = new (alloc()) LDebugCheckSelfHosted(useBoxAtStart(checkVal));
+ redefine(ins, checkVal);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+static void
+SpewResumePoint(MBasicBlock* block, MInstruction* ins, MResumePoint* resumePoint)
+{
+ Fprinter& out = JitSpewPrinter();
+ out.printf("Current resume point %p details:\n", (void*)resumePoint);
+ out.printf(" frame count: %u\n", resumePoint->frameCount());
+
+ if (ins) {
+ out.printf(" taken after: ");
+ ins->printName(out);
+ } else {
+ out.printf(" taken at block %d entry", block->id());
+ }
+ out.printf("\n");
+
+ out.printf(" pc: %p (script: %p, offset: %d)\n",
+ (void*)resumePoint->pc(),
+ (void*)resumePoint->block()->info().script(),
+ int(resumePoint->block()->info().script()->pcToOffset(resumePoint->pc())));
+
+ for (size_t i = 0, e = resumePoint->numOperands(); i < e; i++) {
+ MDefinition* in = resumePoint->getOperand(i);
+ out.printf(" slot%u: ", (unsigned)i);
+ in->printName(out);
+ out.printf("\n");
+ }
+}
+
+bool
+LIRGenerator::visitInstruction(MInstruction* ins)
+{
+ if (ins->isRecoveredOnBailout()) {
+ MOZ_ASSERT(!JitOptions.disableRecoverIns);
+ return true;
+ }
+
+ if (!gen->ensureBallast())
+ return false;
+ ins->accept(this);
+
+ if (ins->possiblyCalls())
+ gen->setPerformsCall();
+
+ if (ins->resumePoint())
+ updateResumeState(ins);
+
+#ifdef DEBUG
+ ins->setInWorklistUnchecked();
+#endif
+
+ // If no safepoint was created, there's no need for an OSI point.
+ if (LOsiPoint* osiPoint = popOsiPoint())
+ add(osiPoint);
+
+ return !gen->errored();
+}
+
+void
+LIRGenerator::definePhis()
+{
+ size_t lirIndex = 0;
+ MBasicBlock* block = current->mir();
+ for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
+ if (phi->type() == MIRType::Value) {
+ defineUntypedPhi(*phi, lirIndex);
+ lirIndex += BOX_PIECES;
+ } else if (phi->type() == MIRType::Int64) {
+ defineInt64Phi(*phi, lirIndex);
+ lirIndex += INT64_PIECES;
+ } else {
+ defineTypedPhi(*phi, lirIndex);
+ lirIndex += 1;
+ }
+ }
+}
+
+void
+LIRGenerator::updateResumeState(MInstruction* ins)
+{
+ lastResumePoint_ = ins->resumePoint();
+ if (JitSpewEnabled(JitSpew_IonSnapshots) && lastResumePoint_)
+ SpewResumePoint(nullptr, ins, lastResumePoint_);
+}
+
+void
+LIRGenerator::updateResumeState(MBasicBlock* block)
+{
+ // As Value Numbering phase can remove edges from the entry basic block to a
+ // code paths reachable from the OSR entry point, we have to add fixup
+ // blocks to keep the dominator tree organized the same way. These fixup
+ // blocks are flaged as unreachable, and should only exist iff the graph has
+ // an OSR block.
+ //
+ // Note: RangeAnalysis can flag blocks as unreachable, but they are only
+ // removed iff GVN (including UCE) is enabled.
+ MOZ_ASSERT_IF(!mir()->compilingWasm() && !block->unreachable(), block->entryResumePoint());
+ MOZ_ASSERT_IF(block->unreachable(), block->graph().osrBlock() ||
+ !mir()->optimizationInfo().gvnEnabled());
+ lastResumePoint_ = block->entryResumePoint();
+ if (JitSpewEnabled(JitSpew_IonSnapshots) && lastResumePoint_)
+ SpewResumePoint(block, nullptr, lastResumePoint_);
+}
+
+bool
+LIRGenerator::visitBlock(MBasicBlock* block)
+{
+ current = block->lir();
+ updateResumeState(block);
+
+ definePhis();
+
+ // See fixup blocks added by Value Numbering, to keep the dominator relation
+ // modified by the presence of the OSR block.
+ MOZ_ASSERT_IF(block->unreachable(), *block->begin() == block->lastIns() ||
+ !mir()->optimizationInfo().gvnEnabled());
+ MOZ_ASSERT_IF(block->unreachable(), block->graph().osrBlock() ||
+ !mir()->optimizationInfo().gvnEnabled());
+ for (MInstructionIterator iter = block->begin(); *iter != block->lastIns(); iter++) {
+ if (!visitInstruction(*iter))
+ return false;
+ }
+
+ if (block->successorWithPhis()) {
+ // If we have a successor with phis, lower the phi input now that we
+ // are approaching the join point.
+ MBasicBlock* successor = block->successorWithPhis();
+ uint32_t position = block->positionInPhiSuccessor();
+ size_t lirIndex = 0;
+ for (MPhiIterator phi(successor->phisBegin()); phi != successor->phisEnd(); phi++) {
+ if (!gen->ensureBallast())
+ return false;
+
+ MDefinition* opd = phi->getOperand(position);
+ ensureDefined(opd);
+
+ MOZ_ASSERT(opd->type() == phi->type());
+
+ if (phi->type() == MIRType::Value) {
+ lowerUntypedPhiInput(*phi, position, successor->lir(), lirIndex);
+ lirIndex += BOX_PIECES;
+ } else if (phi->type() == MIRType::Int64) {
+ lowerInt64PhiInput(*phi, position, successor->lir(), lirIndex);
+ lirIndex += INT64_PIECES;
+ } else {
+ lowerTypedPhiInput(*phi, position, successor->lir(), lirIndex);
+ lirIndex += 1;
+ }
+ }
+ }
+
+ // Now emit the last instruction, which is some form of branch.
+ if (!visitInstruction(block->lastIns()))
+ return false;
+
+ return true;
+}
+
+void
+LIRGenerator::visitNaNToZero(MNaNToZero *ins)
+{
+ MDefinition* input = ins->input();
+
+ if (ins->operandIsNeverNaN() && ins->operandIsNeverNegativeZero()) {
+ redefine(ins, input);
+ return;
+ }
+ LNaNToZero* lir = new(alloc()) LNaNToZero(useRegisterAtStart(input), tempDouble());
+ defineReuseInput(lir, ins, 0);
+}
+
+bool
+LIRGenerator::generate()
+{
+ // Create all blocks and prep all phis beforehand.
+ for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
+ if (gen->shouldCancel("Lowering (preparation loop)"))
+ return false;
+
+ if (!lirGraph_.initBlock(*block))
+ return false;
+ }
+
+ for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
+ if (gen->shouldCancel("Lowering (main loop)"))
+ return false;
+
+ if (!visitBlock(*block))
+ return false;
+ }
+
+ lirGraph_.setArgumentSlotCount(maxargslots_);
+ return true;
+}
+
+void
+LIRGenerator::visitPhi(MPhi* phi)
+{
+ // Phi nodes are not lowered because they are only meaningful for the register allocator.
+ MOZ_CRASH("Unexpected Phi node during Lowering.");
+}
+
+void
+LIRGenerator::visitBeta(MBeta* beta)
+{
+ // Beta nodes are supposed to be removed before because they are
+ // only used to carry the range information for Range analysis
+ MOZ_CRASH("Unexpected Beta node during Lowering.");
+}
+
+void
+LIRGenerator::visitObjectState(MObjectState* objState)
+{
+ // ObjectState nodes are always recovered on bailouts
+ MOZ_CRASH("Unexpected ObjectState node during Lowering.");
+}
+
+void
+LIRGenerator::visitArrayState(MArrayState* objState)
+{
+ // ArrayState nodes are always recovered on bailouts
+ MOZ_CRASH("Unexpected ArrayState node during Lowering.");
+}
+
+void
+LIRGenerator::visitUnknownValue(MUnknownValue* ins)
+{
+ MOZ_CRASH("Can not lower unknown value.");
+}
diff --git a/js/src/jit/Lowering.h b/js/src/jit/Lowering.h
new file mode 100644
index 000000000..0f66a3c24
--- /dev/null
+++ b/js/src/jit/Lowering.h
@@ -0,0 +1,338 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Lowering_h
+#define jit_Lowering_h
+
+// This file declares the structures that are used for attaching LIR to a
+// MIRGraph.
+
+#include "jit/LIR.h"
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/Lowering-x86.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/Lowering-x64.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/Lowering-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/Lowering-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/Lowering-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/Lowering-mips64.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/Lowering-none.h"
+#else
+# error "Unknown architecture!"
+#endif
+
+namespace js {
+namespace jit {
+
+class LIRGenerator : public LIRGeneratorSpecific
+{
+ void updateResumeState(MInstruction* ins);
+ void updateResumeState(MBasicBlock* block);
+
+ // The maximum depth, for framesizeclass determination.
+ uint32_t maxargslots_;
+
+ public:
+ LIRGenerator(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorSpecific(gen, graph, lirGraph),
+ maxargslots_(0)
+ { }
+
+ MOZ_MUST_USE bool generate();
+
+ private:
+ LBoxAllocation useBoxFixedAtStart(MDefinition* mir, Register reg1, Register reg2) {
+ return useBoxFixed(mir, reg1, reg2, /* useAtStart = */ true);
+ }
+
+ LBoxAllocation useBoxFixedAtStart(MDefinition* mir, ValueOperand op);
+ LBoxAllocation useBoxAtStart(MDefinition* mir, LUse::Policy policy = LUse::REGISTER);
+
+ void lowerBitOp(JSOp op, MInstruction* ins);
+ void lowerShiftOp(JSOp op, MShiftInstruction* ins);
+ void lowerBinaryV(JSOp op, MBinaryInstruction* ins);
+ void definePhis();
+
+ MOZ_MUST_USE bool lowerCallArguments(MCall* call);
+
+ public:
+ MOZ_MUST_USE bool visitInstruction(MInstruction* ins);
+ MOZ_MUST_USE bool visitBlock(MBasicBlock* block);
+
+ // Visitor hooks are explicit, to give CPU-specific versions a chance to
+ // intercept without a bunch of explicit gunk in the .cpp.
+ void visitCloneLiteral(MCloneLiteral* ins);
+ void visitParameter(MParameter* param);
+ void visitCallee(MCallee* callee);
+ void visitIsConstructing(MIsConstructing* ins);
+ void visitGoto(MGoto* ins);
+ void visitTableSwitch(MTableSwitch* tableswitch);
+ void visitNewArray(MNewArray* ins);
+ void visitNewArrayCopyOnWrite(MNewArrayCopyOnWrite* ins);
+ void visitNewArrayDynamicLength(MNewArrayDynamicLength* ins);
+ void visitNewTypedArray(MNewTypedArray* ins);
+ void visitNewTypedArrayDynamicLength(MNewTypedArrayDynamicLength* ins);
+ void visitNewObject(MNewObject* ins);
+ void visitNewTypedObject(MNewTypedObject* ins);
+ void visitNewNamedLambdaObject(MNewNamedLambdaObject* ins);
+ void visitNewCallObject(MNewCallObject* ins);
+ void visitNewSingletonCallObject(MNewSingletonCallObject* ins);
+ void visitNewStringObject(MNewStringObject* ins);
+ void visitNewDerivedTypedObject(MNewDerivedTypedObject* ins);
+ void visitInitElem(MInitElem* ins);
+ void visitInitElemGetterSetter(MInitElemGetterSetter* ins);
+ void visitMutateProto(MMutateProto* ins);
+ void visitInitProp(MInitProp* ins);
+ void visitInitPropGetterSetter(MInitPropGetterSetter* ins);
+ void visitCheckOverRecursed(MCheckOverRecursed* ins);
+ void visitDefVar(MDefVar* ins);
+ void visitDefLexical(MDefLexical* ins);
+ void visitDefFun(MDefFun* ins);
+ void visitCreateThisWithTemplate(MCreateThisWithTemplate* ins);
+ void visitCreateThisWithProto(MCreateThisWithProto* ins);
+ void visitCreateThis(MCreateThis* ins);
+ void visitCreateArgumentsObject(MCreateArgumentsObject* ins);
+ void visitGetArgumentsObjectArg(MGetArgumentsObjectArg* ins);
+ void visitSetArgumentsObjectArg(MSetArgumentsObjectArg* ins);
+ void visitReturnFromCtor(MReturnFromCtor* ins);
+ void visitComputeThis(MComputeThis* ins);
+ void visitCall(MCall* call);
+ void visitApplyArgs(MApplyArgs* apply);
+ void visitApplyArray(MApplyArray* apply);
+ void visitArraySplice(MArraySplice* splice);
+ void visitBail(MBail* bail);
+ void visitUnreachable(MUnreachable* unreachable);
+ void visitEncodeSnapshot(MEncodeSnapshot* ins);
+ void visitAssertFloat32(MAssertFloat32* ins);
+ void visitAssertRecoveredOnBailout(MAssertRecoveredOnBailout* ins);
+ void visitGetDynamicName(MGetDynamicName* ins);
+ void visitCallDirectEval(MCallDirectEval* ins);
+ void visitTest(MTest* test);
+ void visitGotoWithFake(MGotoWithFake* ins);
+ void visitFunctionDispatch(MFunctionDispatch* ins);
+ void visitObjectGroupDispatch(MObjectGroupDispatch* ins);
+ void visitCompare(MCompare* comp);
+ void visitTypeOf(MTypeOf* ins);
+ void visitToAsync(MToAsync* ins);
+ void visitToId(MToId* ins);
+ void visitBitNot(MBitNot* ins);
+ void visitBitAnd(MBitAnd* ins);
+ void visitBitOr(MBitOr* ins);
+ void visitBitXor(MBitXor* ins);
+ void visitLsh(MLsh* ins);
+ void visitRsh(MRsh* ins);
+ void visitUrsh(MUrsh* ins);
+ void visitSignExtend(MSignExtend* ins);
+ void visitRotate(MRotate* ins);
+ void visitFloor(MFloor* ins);
+ void visitCeil(MCeil* ins);
+ void visitRound(MRound* ins);
+ void visitMinMax(MMinMax* ins);
+ void visitAbs(MAbs* ins);
+ void visitClz(MClz* ins);
+ void visitCtz(MCtz* ins);
+ void visitSqrt(MSqrt* ins);
+ void visitPopcnt(MPopcnt* ins);
+ void visitAtan2(MAtan2* ins);
+ void visitHypot(MHypot* ins);
+ void visitPow(MPow* ins);
+ void visitMathFunction(MMathFunction* ins);
+ void visitAdd(MAdd* ins);
+ void visitSub(MSub* ins);
+ void visitMul(MMul* ins);
+ void visitDiv(MDiv* ins);
+ void visitMod(MMod* ins);
+ void visitConcat(MConcat* ins);
+ void visitCharCodeAt(MCharCodeAt* ins);
+ void visitFromCharCode(MFromCharCode* ins);
+ void visitFromCodePoint(MFromCodePoint* ins);
+ void visitSinCos(MSinCos *ins);
+ void visitStringSplit(MStringSplit* ins);
+ void visitStart(MStart* start);
+ void visitOsrEntry(MOsrEntry* entry);
+ void visitNop(MNop* nop);
+ void visitLimitedTruncate(MLimitedTruncate* nop);
+ void visitOsrValue(MOsrValue* value);
+ void visitOsrEnvironmentChain(MOsrEnvironmentChain* object);
+ void visitOsrReturnValue(MOsrReturnValue* value);
+ void visitOsrArgumentsObject(MOsrArgumentsObject* object);
+ void visitToDouble(MToDouble* convert);
+ void visitToFloat32(MToFloat32* convert);
+ void visitToInt32(MToInt32* convert);
+ void visitTruncateToInt32(MTruncateToInt32* truncate);
+ void visitWasmTruncateToInt32(MWasmTruncateToInt32* truncate);
+ void visitWrapInt64ToInt32(MWrapInt64ToInt32* ins);
+ void visitToString(MToString* convert);
+ void visitToObjectOrNull(MToObjectOrNull* convert);
+ void visitRegExp(MRegExp* ins);
+ void visitRegExpMatcher(MRegExpMatcher* ins);
+ void visitRegExpSearcher(MRegExpSearcher* ins);
+ void visitRegExpTester(MRegExpTester* ins);
+ void visitRegExpPrototypeOptimizable(MRegExpPrototypeOptimizable* ins);
+ void visitRegExpInstanceOptimizable(MRegExpInstanceOptimizable* ins);
+ void visitGetFirstDollarIndex(MGetFirstDollarIndex* ins);
+ void visitStringReplace(MStringReplace* ins);
+ void visitBinarySharedStub(MBinarySharedStub* ins);
+ void visitUnarySharedStub(MUnarySharedStub* ins);
+ void visitNullarySharedStub(MNullarySharedStub* ins);
+ void visitLambda(MLambda* ins);
+ void visitLambdaArrow(MLambdaArrow* ins);
+ void visitKeepAliveObject(MKeepAliveObject* ins);
+ void visitSlots(MSlots* ins);
+ void visitElements(MElements* ins);
+ void visitConstantElements(MConstantElements* ins);
+ void visitConvertElementsToDoubles(MConvertElementsToDoubles* ins);
+ void visitMaybeToDoubleElement(MMaybeToDoubleElement* ins);
+ void visitMaybeCopyElementsForWrite(MMaybeCopyElementsForWrite* ins);
+ void visitLoadSlot(MLoadSlot* ins);
+ void visitLoadFixedSlotAndUnbox(MLoadFixedSlotAndUnbox* ins);
+ void visitFunctionEnvironment(MFunctionEnvironment* ins);
+ void visitInterruptCheck(MInterruptCheck* ins);
+ void visitWasmTrap(MWasmTrap* ins);
+ void visitWasmReinterpret(MWasmReinterpret* ins);
+ void visitStoreSlot(MStoreSlot* ins);
+ void visitFilterTypeSet(MFilterTypeSet* ins);
+ void visitTypeBarrier(MTypeBarrier* ins);
+ void visitMonitorTypes(MMonitorTypes* ins);
+ void visitPostWriteBarrier(MPostWriteBarrier* ins);
+ void visitPostWriteElementBarrier(MPostWriteElementBarrier* ins);
+ void visitArrayLength(MArrayLength* ins);
+ void visitSetArrayLength(MSetArrayLength* ins);
+ void visitGetNextEntryForIterator(MGetNextEntryForIterator* ins);
+ void visitTypedArrayLength(MTypedArrayLength* ins);
+ void visitTypedArrayElements(MTypedArrayElements* ins);
+ void visitSetDisjointTypedElements(MSetDisjointTypedElements* ins);
+ void visitTypedObjectElements(MTypedObjectElements* ins);
+ void visitSetTypedObjectOffset(MSetTypedObjectOffset* ins);
+ void visitTypedObjectDescr(MTypedObjectDescr* ins);
+ void visitInitializedLength(MInitializedLength* ins);
+ void visitSetInitializedLength(MSetInitializedLength* ins);
+ void visitUnboxedArrayLength(MUnboxedArrayLength* ins);
+ void visitUnboxedArrayInitializedLength(MUnboxedArrayInitializedLength* ins);
+ void visitIncrementUnboxedArrayInitializedLength(MIncrementUnboxedArrayInitializedLength* ins);
+ void visitSetUnboxedArrayInitializedLength(MSetUnboxedArrayInitializedLength* ins);
+ void visitNot(MNot* ins);
+ void visitBoundsCheck(MBoundsCheck* ins);
+ void visitBoundsCheckLower(MBoundsCheckLower* ins);
+ void visitLoadElement(MLoadElement* ins);
+ void visitLoadElementHole(MLoadElementHole* ins);
+ void visitLoadUnboxedObjectOrNull(MLoadUnboxedObjectOrNull* ins);
+ void visitLoadUnboxedString(MLoadUnboxedString* ins);
+ void visitStoreElement(MStoreElement* ins);
+ void visitStoreElementHole(MStoreElementHole* ins);
+ void visitFallibleStoreElement(MFallibleStoreElement* ins);
+ void visitStoreUnboxedObjectOrNull(MStoreUnboxedObjectOrNull* ins);
+ void visitStoreUnboxedString(MStoreUnboxedString* ins);
+ void visitConvertUnboxedObjectToNative(MConvertUnboxedObjectToNative* ins);
+ void visitEffectiveAddress(MEffectiveAddress* ins);
+ void visitArrayPopShift(MArrayPopShift* ins);
+ void visitArrayPush(MArrayPush* ins);
+ void visitArraySlice(MArraySlice* ins);
+ void visitArrayJoin(MArrayJoin* ins);
+ void visitLoadUnboxedScalar(MLoadUnboxedScalar* ins);
+ void visitLoadTypedArrayElementHole(MLoadTypedArrayElementHole* ins);
+ void visitLoadTypedArrayElementStatic(MLoadTypedArrayElementStatic* ins);
+ void visitStoreUnboxedScalar(MStoreUnboxedScalar* ins);
+ void visitStoreTypedArrayElementHole(MStoreTypedArrayElementHole* ins);
+ void visitClampToUint8(MClampToUint8* ins);
+ void visitLoadFixedSlot(MLoadFixedSlot* ins);
+ void visitStoreFixedSlot(MStoreFixedSlot* ins);
+ void visitGetPropertyCache(MGetPropertyCache* ins);
+ void visitGetPropertyPolymorphic(MGetPropertyPolymorphic* ins);
+ void visitSetPropertyPolymorphic(MSetPropertyPolymorphic* ins);
+ void visitBindNameCache(MBindNameCache* ins);
+ void visitCallBindVar(MCallBindVar* ins);
+ void visitGuardObjectIdentity(MGuardObjectIdentity* ins);
+ void visitGuardClass(MGuardClass* ins);
+ void visitGuardObject(MGuardObject* ins);
+ void visitGuardString(MGuardString* ins);
+ void visitGuardReceiverPolymorphic(MGuardReceiverPolymorphic* ins);
+ void visitGuardUnboxedExpando(MGuardUnboxedExpando* ins);
+ void visitLoadUnboxedExpando(MLoadUnboxedExpando* ins);
+ void visitPolyInlineGuard(MPolyInlineGuard* ins);
+ void visitAssertRange(MAssertRange* ins);
+ void visitCallGetProperty(MCallGetProperty* ins);
+ void visitDeleteProperty(MDeleteProperty* ins);
+ void visitDeleteElement(MDeleteElement* ins);
+ void visitGetNameCache(MGetNameCache* ins);
+ void visitCallGetIntrinsicValue(MCallGetIntrinsicValue* ins);
+ void visitCallGetElement(MCallGetElement* ins);
+ void visitCallSetElement(MCallSetElement* ins);
+ void visitCallInitElementArray(MCallInitElementArray* ins);
+ void visitSetPropertyCache(MSetPropertyCache* ins);
+ void visitCallSetProperty(MCallSetProperty* ins);
+ void visitIteratorStart(MIteratorStart* ins);
+ void visitIteratorMore(MIteratorMore* ins);
+ void visitIsNoIter(MIsNoIter* ins);
+ void visitIteratorEnd(MIteratorEnd* ins);
+ void visitStringLength(MStringLength* ins);
+ void visitArgumentsLength(MArgumentsLength* ins);
+ void visitGetFrameArgument(MGetFrameArgument* ins);
+ void visitSetFrameArgument(MSetFrameArgument* ins);
+ void visitRunOncePrologue(MRunOncePrologue* ins);
+ void visitRest(MRest* ins);
+ void visitThrow(MThrow* ins);
+ void visitIn(MIn* ins);
+ void visitInArray(MInArray* ins);
+ void visitInstanceOf(MInstanceOf* ins);
+ void visitCallInstanceOf(MCallInstanceOf* ins);
+ void visitIsCallable(MIsCallable* ins);
+ void visitIsConstructor(MIsConstructor* ins);
+ void visitIsObject(MIsObject* ins);
+ void visitHasClass(MHasClass* ins);
+ void visitWasmAddOffset(MWasmAddOffset* ins);
+ void visitWasmBoundsCheck(MWasmBoundsCheck* ins);
+ void visitWasmLoadGlobalVar(MWasmLoadGlobalVar* ins);
+ void visitWasmStoreGlobalVar(MWasmStoreGlobalVar* ins);
+ void visitWasmParameter(MWasmParameter* ins);
+ void visitWasmReturn(MWasmReturn* ins);
+ void visitWasmReturnVoid(MWasmReturnVoid* ins);
+ void visitWasmStackArg(MWasmStackArg* ins);
+ void visitWasmCall(MWasmCall* ins);
+ void visitSetDOMProperty(MSetDOMProperty* ins);
+ void visitGetDOMProperty(MGetDOMProperty* ins);
+ void visitGetDOMMember(MGetDOMMember* ins);
+ void visitRecompileCheck(MRecompileCheck* ins);
+ void visitSimdBox(MSimdBox* ins);
+ void visitSimdUnbox(MSimdUnbox* ins);
+ void visitSimdUnaryArith(MSimdUnaryArith* ins);
+ void visitSimdBinaryComp(MSimdBinaryComp* ins);
+ void visitSimdBinaryBitwise(MSimdBinaryBitwise* ins);
+ void visitSimdShift(MSimdShift* ins);
+ void visitSimdConstant(MSimdConstant* ins);
+ void visitSimdConvert(MSimdConvert* ins);
+ void visitSimdReinterpretCast(MSimdReinterpretCast* ins);
+ void visitSimdAllTrue(MSimdAllTrue* ins);
+ void visitSimdAnyTrue(MSimdAnyTrue* ins);
+ void visitPhi(MPhi* ins);
+ void visitBeta(MBeta* ins);
+ void visitObjectState(MObjectState* ins);
+ void visitArrayState(MArrayState* ins);
+ void visitUnknownValue(MUnknownValue* ins);
+ void visitLexicalCheck(MLexicalCheck* ins);
+ void visitThrowRuntimeLexicalError(MThrowRuntimeLexicalError* ins);
+ void visitGlobalNameConflictsCheck(MGlobalNameConflictsCheck* ins);
+ void visitDebugger(MDebugger* ins);
+ void visitNewTarget(MNewTarget* ins);
+ void visitArrowNewTarget(MArrowNewTarget* ins);
+ void visitNaNToZero(MNaNToZero *ins);
+ void visitAtomicIsLockFree(MAtomicIsLockFree* ins);
+ void visitGuardSharedTypedArray(MGuardSharedTypedArray* ins);
+ void visitCheckReturn(MCheckReturn* ins);
+ void visitCheckIsObj(MCheckIsObj* ins);
+ void visitCheckObjCoercible(MCheckObjCoercible* ins);
+ void visitDebugCheckSelfHosted(MDebugCheckSelfHosted* ins);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Lowering_h */
diff --git a/js/src/jit/MCallOptimize.cpp b/js/src/jit/MCallOptimize.cpp
new file mode 100644
index 000000000..202aef497
--- /dev/null
+++ b/js/src/jit/MCallOptimize.cpp
@@ -0,0 +1,4099 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Casting.h"
+
+#include "jsmath.h"
+#include "jsobj.h"
+#include "jsstr.h"
+
+#include "builtin/AtomicsObject.h"
+#include "builtin/SIMD.h"
+#include "builtin/TestingFunctions.h"
+#include "builtin/TypedObject.h"
+#include "jit/BaselineInspector.h"
+#include "jit/InlinableNatives.h"
+#include "jit/IonBuilder.h"
+#include "jit/Lowering.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "vm/ArgumentsObject.h"
+#include "vm/ProxyObject.h"
+#include "vm/SelfHosting.h"
+#include "vm/TypedArrayObject.h"
+
+#include "jsscriptinlines.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+#include "vm/NativeObject-inl.h"
+#include "vm/StringObject-inl.h"
+#include "vm/UnboxedObject-inl.h"
+
+using mozilla::ArrayLength;
+using mozilla::AssertedCast;
+
+using JS::DoubleNaNValue;
+using JS::TrackedOutcome;
+using JS::TrackedStrategy;
+using JS::TrackedTypeSite;
+
+namespace js {
+namespace jit {
+
+IonBuilder::InliningStatus
+IonBuilder::inlineNativeCall(CallInfo& callInfo, JSFunction* target)
+{
+ MOZ_ASSERT(target->isNative());
+
+ if (!optimizationInfo().inlineNative()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineDisabledIon);
+ return InliningStatus_NotInlined;
+ }
+
+ if (!target->jitInfo() || target->jitInfo()->type() != JSJitInfo::InlinableNative) {
+ // Reaching here means we tried to inline a native for which there is no
+ // Ion specialization.
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeNoSpecialization);
+ return InliningStatus_NotInlined;
+ }
+
+ // Default failure reason is observing an unsupported type.
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadType);
+
+ if (shouldAbortOnPreliminaryGroups(callInfo.thisArg()))
+ return InliningStatus_NotInlined;
+ for (size_t i = 0; i < callInfo.argc(); i++) {
+ if (shouldAbortOnPreliminaryGroups(callInfo.getArg(i)))
+ return InliningStatus_NotInlined;
+ }
+
+ switch (InlinableNative inlNative = target->jitInfo()->inlinableNative) {
+ // Array natives.
+ case InlinableNative::Array:
+ return inlineArray(callInfo);
+ case InlinableNative::ArrayIsArray:
+ return inlineArrayIsArray(callInfo);
+ case InlinableNative::ArrayJoin:
+ return inlineArrayJoin(callInfo);
+ case InlinableNative::ArrayPop:
+ return inlineArrayPopShift(callInfo, MArrayPopShift::Pop);
+ case InlinableNative::ArrayShift:
+ return inlineArrayPopShift(callInfo, MArrayPopShift::Shift);
+ case InlinableNative::ArrayPush:
+ return inlineArrayPush(callInfo);
+ case InlinableNative::ArraySlice:
+ return inlineArraySlice(callInfo);
+ case InlinableNative::ArraySplice:
+ return inlineArraySplice(callInfo);
+
+ // Atomic natives.
+ case InlinableNative::AtomicsCompareExchange:
+ return inlineAtomicsCompareExchange(callInfo);
+ case InlinableNative::AtomicsExchange:
+ return inlineAtomicsExchange(callInfo);
+ case InlinableNative::AtomicsLoad:
+ return inlineAtomicsLoad(callInfo);
+ case InlinableNative::AtomicsStore:
+ return inlineAtomicsStore(callInfo);
+ case InlinableNative::AtomicsAdd:
+ case InlinableNative::AtomicsSub:
+ case InlinableNative::AtomicsAnd:
+ case InlinableNative::AtomicsOr:
+ case InlinableNative::AtomicsXor:
+ return inlineAtomicsBinop(callInfo, inlNative);
+ case InlinableNative::AtomicsIsLockFree:
+ return inlineAtomicsIsLockFree(callInfo);
+
+ // Math natives.
+ case InlinableNative::MathAbs:
+ return inlineMathAbs(callInfo);
+ case InlinableNative::MathFloor:
+ return inlineMathFloor(callInfo);
+ case InlinableNative::MathCeil:
+ return inlineMathCeil(callInfo);
+ case InlinableNative::MathRound:
+ return inlineMathRound(callInfo);
+ case InlinableNative::MathClz32:
+ return inlineMathClz32(callInfo);
+ case InlinableNative::MathSqrt:
+ return inlineMathSqrt(callInfo);
+ case InlinableNative::MathATan2:
+ return inlineMathAtan2(callInfo);
+ case InlinableNative::MathHypot:
+ return inlineMathHypot(callInfo);
+ case InlinableNative::MathMax:
+ return inlineMathMinMax(callInfo, true /* max */);
+ case InlinableNative::MathMin:
+ return inlineMathMinMax(callInfo, false /* max */);
+ case InlinableNative::MathPow:
+ return inlineMathPow(callInfo);
+ case InlinableNative::MathRandom:
+ return inlineMathRandom(callInfo);
+ case InlinableNative::MathImul:
+ return inlineMathImul(callInfo);
+ case InlinableNative::MathFRound:
+ return inlineMathFRound(callInfo);
+ case InlinableNative::MathSin:
+ return inlineMathFunction(callInfo, MMathFunction::Sin);
+ case InlinableNative::MathTan:
+ return inlineMathFunction(callInfo, MMathFunction::Tan);
+ case InlinableNative::MathCos:
+ return inlineMathFunction(callInfo, MMathFunction::Cos);
+ case InlinableNative::MathExp:
+ return inlineMathFunction(callInfo, MMathFunction::Exp);
+ case InlinableNative::MathLog:
+ return inlineMathFunction(callInfo, MMathFunction::Log);
+ case InlinableNative::MathASin:
+ return inlineMathFunction(callInfo, MMathFunction::ASin);
+ case InlinableNative::MathATan:
+ return inlineMathFunction(callInfo, MMathFunction::ATan);
+ case InlinableNative::MathACos:
+ return inlineMathFunction(callInfo, MMathFunction::ACos);
+ case InlinableNative::MathLog10:
+ return inlineMathFunction(callInfo, MMathFunction::Log10);
+ case InlinableNative::MathLog2:
+ return inlineMathFunction(callInfo, MMathFunction::Log2);
+ case InlinableNative::MathLog1P:
+ return inlineMathFunction(callInfo, MMathFunction::Log1P);
+ case InlinableNative::MathExpM1:
+ return inlineMathFunction(callInfo, MMathFunction::ExpM1);
+ case InlinableNative::MathCosH:
+ return inlineMathFunction(callInfo, MMathFunction::CosH);
+ case InlinableNative::MathSinH:
+ return inlineMathFunction(callInfo, MMathFunction::SinH);
+ case InlinableNative::MathTanH:
+ return inlineMathFunction(callInfo, MMathFunction::TanH);
+ case InlinableNative::MathACosH:
+ return inlineMathFunction(callInfo, MMathFunction::ACosH);
+ case InlinableNative::MathASinH:
+ return inlineMathFunction(callInfo, MMathFunction::ASinH);
+ case InlinableNative::MathATanH:
+ return inlineMathFunction(callInfo, MMathFunction::ATanH);
+ case InlinableNative::MathSign:
+ return inlineMathFunction(callInfo, MMathFunction::Sign);
+ case InlinableNative::MathTrunc:
+ return inlineMathFunction(callInfo, MMathFunction::Trunc);
+ case InlinableNative::MathCbrt:
+ return inlineMathFunction(callInfo, MMathFunction::Cbrt);
+
+ // RegExp natives.
+ case InlinableNative::RegExpMatcher:
+ return inlineRegExpMatcher(callInfo);
+ case InlinableNative::RegExpSearcher:
+ return inlineRegExpSearcher(callInfo);
+ case InlinableNative::RegExpTester:
+ return inlineRegExpTester(callInfo);
+ case InlinableNative::IsRegExpObject:
+ return inlineIsRegExpObject(callInfo);
+ case InlinableNative::RegExpPrototypeOptimizable:
+ return inlineRegExpPrototypeOptimizable(callInfo);
+ case InlinableNative::RegExpInstanceOptimizable:
+ return inlineRegExpInstanceOptimizable(callInfo);
+ case InlinableNative::GetFirstDollarIndex:
+ return inlineGetFirstDollarIndex(callInfo);
+
+ // String natives.
+ case InlinableNative::String:
+ return inlineStringObject(callInfo);
+ case InlinableNative::StringCharCodeAt:
+ return inlineStrCharCodeAt(callInfo);
+ case InlinableNative::StringFromCharCode:
+ return inlineStrFromCharCode(callInfo);
+ case InlinableNative::StringFromCodePoint:
+ return inlineStrFromCodePoint(callInfo);
+ case InlinableNative::StringCharAt:
+ return inlineStrCharAt(callInfo);
+
+ // String intrinsics.
+ case InlinableNative::IntrinsicStringReplaceString:
+ return inlineStringReplaceString(callInfo);
+ case InlinableNative::IntrinsicStringSplitString:
+ return inlineStringSplitString(callInfo);
+
+ // Object natives.
+ case InlinableNative::ObjectCreate:
+ return inlineObjectCreate(callInfo);
+
+ // SIMD natives.
+ case InlinableNative::SimdInt32x4:
+ return inlineSimd(callInfo, target, SimdType::Int32x4);
+ case InlinableNative::SimdUint32x4:
+ return inlineSimd(callInfo, target, SimdType::Uint32x4);
+ case InlinableNative::SimdInt16x8:
+ return inlineSimd(callInfo, target, SimdType::Int16x8);
+ case InlinableNative::SimdUint16x8:
+ return inlineSimd(callInfo, target, SimdType::Uint16x8);
+ case InlinableNative::SimdInt8x16:
+ return inlineSimd(callInfo, target, SimdType::Int8x16);
+ case InlinableNative::SimdUint8x16:
+ return inlineSimd(callInfo, target, SimdType::Uint8x16);
+ case InlinableNative::SimdFloat32x4:
+ return inlineSimd(callInfo, target, SimdType::Float32x4);
+ case InlinableNative::SimdBool32x4:
+ return inlineSimd(callInfo, target, SimdType::Bool32x4);
+ case InlinableNative::SimdBool16x8:
+ return inlineSimd(callInfo, target, SimdType::Bool16x8);
+ case InlinableNative::SimdBool8x16:
+ return inlineSimd(callInfo, target, SimdType::Bool8x16);
+
+ // Testing functions.
+ case InlinableNative::TestBailout:
+ return inlineBailout(callInfo);
+ case InlinableNative::TestAssertFloat32:
+ return inlineAssertFloat32(callInfo);
+ case InlinableNative::TestAssertRecoveredOnBailout:
+ return inlineAssertRecoveredOnBailout(callInfo);
+
+ // Slot intrinsics.
+ case InlinableNative::IntrinsicUnsafeSetReservedSlot:
+ return inlineUnsafeSetReservedSlot(callInfo);
+ case InlinableNative::IntrinsicUnsafeGetReservedSlot:
+ return inlineUnsafeGetReservedSlot(callInfo, MIRType::Value);
+ case InlinableNative::IntrinsicUnsafeGetObjectFromReservedSlot:
+ return inlineUnsafeGetReservedSlot(callInfo, MIRType::Object);
+ case InlinableNative::IntrinsicUnsafeGetInt32FromReservedSlot:
+ return inlineUnsafeGetReservedSlot(callInfo, MIRType::Int32);
+ case InlinableNative::IntrinsicUnsafeGetStringFromReservedSlot:
+ return inlineUnsafeGetReservedSlot(callInfo, MIRType::String);
+ case InlinableNative::IntrinsicUnsafeGetBooleanFromReservedSlot:
+ return inlineUnsafeGetReservedSlot(callInfo, MIRType::Boolean);
+
+ // Utility intrinsics.
+ case InlinableNative::IntrinsicIsCallable:
+ return inlineIsCallable(callInfo);
+ case InlinableNative::IntrinsicIsConstructor:
+ return inlineIsConstructor(callInfo);
+ case InlinableNative::IntrinsicToObject:
+ return inlineToObject(callInfo);
+ case InlinableNative::IntrinsicIsObject:
+ return inlineIsObject(callInfo);
+ case InlinableNative::IntrinsicIsWrappedArrayConstructor:
+ return inlineIsWrappedArrayConstructor(callInfo);
+ case InlinableNative::IntrinsicToInteger:
+ return inlineToInteger(callInfo);
+ case InlinableNative::IntrinsicToString:
+ return inlineToString(callInfo);
+ case InlinableNative::IntrinsicIsConstructing:
+ return inlineIsConstructing(callInfo);
+ case InlinableNative::IntrinsicSubstringKernel:
+ return inlineSubstringKernel(callInfo);
+ case InlinableNative::IntrinsicIsArrayIterator:
+ return inlineHasClass(callInfo, &ArrayIteratorObject::class_);
+ case InlinableNative::IntrinsicIsMapIterator:
+ return inlineHasClass(callInfo, &MapIteratorObject::class_);
+ case InlinableNative::IntrinsicIsSetIterator:
+ return inlineHasClass(callInfo, &SetIteratorObject::class_);
+ case InlinableNative::IntrinsicIsStringIterator:
+ return inlineHasClass(callInfo, &StringIteratorObject::class_);
+ case InlinableNative::IntrinsicIsListIterator:
+ return inlineHasClass(callInfo, &ListIteratorObject::class_);
+ case InlinableNative::IntrinsicDefineDataProperty:
+ return inlineDefineDataProperty(callInfo);
+ case InlinableNative::IntrinsicObjectHasPrototype:
+ return inlineObjectHasPrototype(callInfo);
+
+ // Map intrinsics.
+ case InlinableNative::IntrinsicGetNextMapEntryForIterator:
+ return inlineGetNextEntryForIterator(callInfo, MGetNextEntryForIterator::Map);
+
+ // Set intrinsics.
+ case InlinableNative::IntrinsicGetNextSetEntryForIterator:
+ return inlineGetNextEntryForIterator(callInfo, MGetNextEntryForIterator::Set);
+
+ // ArrayBuffer intrinsics.
+ case InlinableNative::IntrinsicArrayBufferByteLength:
+ return inlineArrayBufferByteLength(callInfo);
+ case InlinableNative::IntrinsicPossiblyWrappedArrayBufferByteLength:
+ return inlinePossiblyWrappedArrayBufferByteLength(callInfo);
+
+ // TypedArray intrinsics.
+ case InlinableNative::TypedArrayConstructor:
+ return inlineTypedArray(callInfo, target->native());
+ case InlinableNative::IntrinsicIsTypedArray:
+ return inlineIsTypedArray(callInfo);
+ case InlinableNative::IntrinsicIsPossiblyWrappedTypedArray:
+ return inlineIsPossiblyWrappedTypedArray(callInfo);
+ case InlinableNative::IntrinsicPossiblyWrappedTypedArrayLength:
+ return inlinePossiblyWrappedTypedArrayLength(callInfo);
+ case InlinableNative::IntrinsicTypedArrayLength:
+ return inlineTypedArrayLength(callInfo);
+ case InlinableNative::IntrinsicSetDisjointTypedElements:
+ return inlineSetDisjointTypedElements(callInfo);
+
+ // TypedObject intrinsics.
+ case InlinableNative::IntrinsicObjectIsTypedObject:
+ return inlineHasClass(callInfo,
+ &OutlineTransparentTypedObject::class_,
+ &OutlineOpaqueTypedObject::class_,
+ &InlineTransparentTypedObject::class_,
+ &InlineOpaqueTypedObject::class_);
+ case InlinableNative::IntrinsicObjectIsTransparentTypedObject:
+ return inlineHasClass(callInfo,
+ &OutlineTransparentTypedObject::class_,
+ &InlineTransparentTypedObject::class_);
+ case InlinableNative::IntrinsicObjectIsOpaqueTypedObject:
+ return inlineHasClass(callInfo,
+ &OutlineOpaqueTypedObject::class_,
+ &InlineOpaqueTypedObject::class_);
+ case InlinableNative::IntrinsicObjectIsTypeDescr:
+ return inlineObjectIsTypeDescr(callInfo);
+ case InlinableNative::IntrinsicTypeDescrIsSimpleType:
+ return inlineHasClass(callInfo,
+ &ScalarTypeDescr::class_, &ReferenceTypeDescr::class_);
+ case InlinableNative::IntrinsicTypeDescrIsArrayType:
+ return inlineHasClass(callInfo, &ArrayTypeDescr::class_);
+ case InlinableNative::IntrinsicSetTypedObjectOffset:
+ return inlineSetTypedObjectOffset(callInfo);
+ }
+
+ MOZ_CRASH("Shouldn't get here");
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineNativeGetter(CallInfo& callInfo, JSFunction* target)
+{
+ MOZ_ASSERT(target->isNative());
+ JSNative native = target->native();
+
+ if (!optimizationInfo().inlineNative())
+ return InliningStatus_NotInlined;
+
+ MDefinition* thisArg = callInfo.thisArg();
+ TemporaryTypeSet* thisTypes = thisArg->resultTypeSet();
+ MOZ_ASSERT(callInfo.argc() == 0);
+
+ if (!thisTypes)
+ return InliningStatus_NotInlined;
+
+ // Try to optimize typed array lengths.
+ if (TypedArrayObject::isOriginalLengthGetter(native)) {
+ Scalar::Type type = thisTypes->getTypedArrayType(constraints());
+ if (type == Scalar::MaxTypedArrayViewType)
+ return InliningStatus_NotInlined;
+
+ MInstruction* length = addTypedArrayLength(thisArg);
+ current->push(length);
+ return InliningStatus_Inlined;
+ }
+
+ // Try to optimize RegExp getters.
+ RegExpFlag mask = NoFlags;
+ if (RegExpObject::isOriginalFlagGetter(native, &mask)) {
+ const Class* clasp = thisTypes->getKnownClass(constraints());
+ if (clasp != &RegExpObject::class_)
+ return InliningStatus_NotInlined;
+
+ MLoadFixedSlot* flags = MLoadFixedSlot::New(alloc(), thisArg, RegExpObject::flagsSlot());
+ current->add(flags);
+ flags->setResultType(MIRType::Int32);
+ MConstant* maskConst = MConstant::New(alloc(), Int32Value(mask));
+ current->add(maskConst);
+ MBitAnd* maskedFlag = MBitAnd::New(alloc(), flags, maskConst);
+ maskedFlag->setInt32Specialization();
+ current->add(maskedFlag);
+
+ MDefinition* result = convertToBoolean(maskedFlag);
+ current->push(result);
+ return InliningStatus_Inlined;
+ }
+
+ return InliningStatus_NotInlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineNonFunctionCall(CallInfo& callInfo, JSObject* target)
+{
+ // Inline a call to a non-function object, invoking the object's call or
+ // construct hook.
+
+ if (callInfo.constructing() && target->constructHook() == TypedObject::construct)
+ return inlineConstructTypedObject(callInfo, &target->as<TypeDescr>());
+
+ if (!callInfo.constructing() && target->callHook() == SimdTypeDescr::call)
+ return inlineConstructSimdObject(callInfo, &target->as<SimdTypeDescr>());
+
+ return InliningStatus_NotInlined;
+}
+
+TemporaryTypeSet*
+IonBuilder::getInlineReturnTypeSet()
+{
+ return bytecodeTypes(pc);
+}
+
+MIRType
+IonBuilder::getInlineReturnType()
+{
+ TemporaryTypeSet* returnTypes = getInlineReturnTypeSet();
+ return returnTypes->getKnownMIRType();
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineMathFunction(CallInfo& callInfo, MMathFunction::Function function)
+{
+ if (callInfo.constructing())
+ return InliningStatus_NotInlined;
+
+ if (callInfo.argc() != 1)
+ return InliningStatus_NotInlined;
+
+ if (getInlineReturnType() != MIRType::Double)
+ return InliningStatus_NotInlined;
+ if (!IsNumberType(callInfo.getArg(0)->type()))
+ return InliningStatus_NotInlined;
+
+ const MathCache* cache = GetJSContextFromMainThread()->caches.maybeGetMathCache();
+
+ callInfo.fun()->setImplicitlyUsedUnchecked();
+ callInfo.thisArg()->setImplicitlyUsedUnchecked();
+
+ MMathFunction* ins = MMathFunction::New(alloc(), callInfo.getArg(0), function, cache);
+ current->add(ins);
+ current->push(ins);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineArray(CallInfo& callInfo)
+{
+ uint32_t initLength = 0;
+
+ JSObject* templateObject = inspector->getTemplateObjectForNative(pc, ArrayConstructor);
+ // This is shared by ArrayConstructor and array_construct (std_Array).
+ if (!templateObject)
+ templateObject = inspector->getTemplateObjectForNative(pc, array_construct);
+
+ if (!templateObject) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeNoTemplateObj);
+ return InliningStatus_NotInlined;
+ }
+
+ if (templateObject->is<UnboxedArrayObject>()) {
+ if (templateObject->group()->unboxedLayout().nativeGroup())
+ return InliningStatus_NotInlined;
+ }
+
+ // Multiple arguments imply array initialization, not just construction.
+ if (callInfo.argc() >= 2) {
+ initLength = callInfo.argc();
+
+ TypeSet::ObjectKey* key = TypeSet::ObjectKey::get(templateObject);
+ if (!key->unknownProperties()) {
+ HeapTypeSetKey elemTypes = key->property(JSID_VOID);
+
+ for (uint32_t i = 0; i < initLength; i++) {
+ MDefinition* value = callInfo.getArg(i);
+ if (!TypeSetIncludes(elemTypes.maybeTypes(), value->type(), value->resultTypeSet())) {
+ elemTypes.freeze(constraints());
+ return InliningStatus_NotInlined;
+ }
+ }
+ }
+ }
+
+ // A single integer argument denotes initial length.
+ if (callInfo.argc() == 1) {
+ MDefinition* arg = callInfo.getArg(0);
+ if (arg->type() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ if (!arg->isConstant()) {
+ callInfo.setImplicitlyUsedUnchecked();
+ MNewArrayDynamicLength* ins =
+ MNewArrayDynamicLength::New(alloc(), constraints(), templateObject,
+ templateObject->group()->initialHeap(constraints()),
+ arg);
+ current->add(ins);
+ current->push(ins);
+ return InliningStatus_Inlined;
+ }
+
+ // The next several checks all may fail due to range conditions.
+ trackOptimizationOutcome(TrackedOutcome::ArrayRange);
+
+ // Negative lengths generate a RangeError, unhandled by the inline path.
+ initLength = arg->toConstant()->toInt32();
+ if (initLength > NativeObject::MAX_DENSE_ELEMENTS_COUNT)
+ return InliningStatus_NotInlined;
+ MOZ_ASSERT(initLength <= INT32_MAX);
+
+ // Make sure initLength matches the template object's length. This is
+ // not guaranteed to be the case, for instance if we're inlining the
+ // MConstant may come from an outer script.
+ if (initLength != GetAnyBoxedOrUnboxedArrayLength(templateObject))
+ return InliningStatus_NotInlined;
+
+ // Don't inline large allocations.
+ if (initLength > ArrayObject::EagerAllocationMaxLength)
+ return InliningStatus_NotInlined;
+ }
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ if (!jsop_newarray(templateObject, initLength))
+ return InliningStatus_Error;
+
+ MDefinition* array = current->peek(-1);
+ if (callInfo.argc() >= 2) {
+ JSValueType unboxedType = GetBoxedOrUnboxedType(templateObject);
+ for (uint32_t i = 0; i < initLength; i++) {
+ if (!alloc().ensureBallast())
+ return InliningStatus_Error;
+ MDefinition* value = callInfo.getArg(i);
+ if (!initializeArrayElement(array, i, value, unboxedType, /* addResumePoint = */ false))
+ return InliningStatus_Error;
+ }
+
+ MInstruction* setLength = setInitializedLength(array, unboxedType, initLength);
+ if (!resumeAfter(setLength))
+ return InliningStatus_Error;
+ }
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineArrayIsArray(CallInfo& callInfo)
+{
+ if (callInfo.constructing() || callInfo.argc() != 1) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ if (getInlineReturnType() != MIRType::Boolean)
+ return InliningStatus_NotInlined;
+
+ MDefinition* arg = callInfo.getArg(0);
+
+ bool isArray;
+ if (!arg->mightBeType(MIRType::Object)) {
+ isArray = false;
+ } else {
+ if (arg->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ TemporaryTypeSet* types = arg->resultTypeSet();
+ const Class* clasp = types ? types->getKnownClass(constraints()) : nullptr;
+ if (!clasp || clasp->isProxy())
+ return InliningStatus_NotInlined;
+
+ isArray = (clasp == &ArrayObject::class_ || clasp == &UnboxedArrayObject::class_);
+ }
+
+ pushConstant(BooleanValue(isArray));
+
+ callInfo.setImplicitlyUsedUnchecked();
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineArrayPopShift(CallInfo& callInfo, MArrayPopShift::Mode mode)
+{
+ if (callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MIRType returnType = getInlineReturnType();
+ if (returnType == MIRType::Undefined || returnType == MIRType::Null)
+ return InliningStatus_NotInlined;
+ if (callInfo.thisArg()->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ // Pop and shift are only handled for dense arrays that have never been
+ // used in an iterator: popping elements does not account for suppressing
+ // deleted properties in active iterators.
+ ObjectGroupFlags unhandledFlags =
+ OBJECT_FLAG_SPARSE_INDEXES |
+ OBJECT_FLAG_LENGTH_OVERFLOW |
+ OBJECT_FLAG_ITERATED;
+
+ MDefinition* obj = convertUnboxedObjects(callInfo.thisArg());
+ TemporaryTypeSet* thisTypes = obj->resultTypeSet();
+ if (!thisTypes)
+ return InliningStatus_NotInlined;
+ const Class* clasp = thisTypes->getKnownClass(constraints());
+ if (clasp != &ArrayObject::class_ && clasp != &UnboxedArrayObject::class_)
+ return InliningStatus_NotInlined;
+ if (thisTypes->hasObjectFlags(constraints(), unhandledFlags)) {
+ trackOptimizationOutcome(TrackedOutcome::ArrayBadFlags);
+ return InliningStatus_NotInlined;
+ }
+
+ if (ArrayPrototypeHasIndexedProperty(this, script())) {
+ trackOptimizationOutcome(TrackedOutcome::ProtoIndexedProps);
+ return InliningStatus_NotInlined;
+ }
+
+ JSValueType unboxedType = JSVAL_TYPE_MAGIC;
+ if (clasp == &UnboxedArrayObject::class_) {
+ unboxedType = UnboxedArrayElementType(constraints(), obj, nullptr);
+ if (unboxedType == JSVAL_TYPE_MAGIC)
+ return InliningStatus_NotInlined;
+ }
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ if (clasp == &ArrayObject::class_)
+ obj = addMaybeCopyElementsForWrite(obj, /* checkNative = */ false);
+
+ TemporaryTypeSet* returnTypes = getInlineReturnTypeSet();
+ bool needsHoleCheck = thisTypes->hasObjectFlags(constraints(), OBJECT_FLAG_NON_PACKED);
+ bool maybeUndefined = returnTypes->hasType(TypeSet::UndefinedType());
+
+ BarrierKind barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(),
+ obj, nullptr, returnTypes);
+ if (barrier != BarrierKind::NoBarrier)
+ returnType = MIRType::Value;
+
+ MArrayPopShift* ins = MArrayPopShift::New(alloc(), obj, mode,
+ unboxedType, needsHoleCheck, maybeUndefined);
+ current->add(ins);
+ current->push(ins);
+ ins->setResultType(returnType);
+
+ if (!resumeAfter(ins))
+ return InliningStatus_Error;
+
+ if (!pushTypeBarrier(ins, returnTypes, barrier))
+ return InliningStatus_Error;
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineArraySplice(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 2 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ // Ensure |this|, argument and result are objects.
+ if (getInlineReturnType() != MIRType::Object)
+ return InliningStatus_NotInlined;
+ if (callInfo.thisArg()->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+ if (callInfo.getArg(0)->type() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+ if (callInfo.getArg(1)->type() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ // Specialize arr.splice(start, deleteCount) with unused return value and
+ // avoid creating the result array in this case.
+ if (!BytecodeIsPopped(pc)) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineGeneric);
+ return InliningStatus_NotInlined;
+ }
+
+ MArraySplice* ins = MArraySplice::New(alloc(),
+ callInfo.thisArg(),
+ callInfo.getArg(0),
+ callInfo.getArg(1));
+
+ current->add(ins);
+ pushConstant(UndefinedValue());
+
+ if (!resumeAfter(ins))
+ return InliningStatus_Error;
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineArrayJoin(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ if (getInlineReturnType() != MIRType::String)
+ return InliningStatus_NotInlined;
+ if (callInfo.thisArg()->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+ if (callInfo.getArg(0)->type() != MIRType::String)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MArrayJoin* ins = MArrayJoin::New(alloc(), callInfo.thisArg(), callInfo.getArg(0));
+
+ current->add(ins);
+ current->push(ins);
+
+ if (!resumeAfter(ins))
+ return InliningStatus_Error;
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineArrayPush(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MDefinition* obj = convertUnboxedObjects(callInfo.thisArg());
+ MDefinition* value = callInfo.getArg(0);
+ if (PropertyWriteNeedsTypeBarrier(alloc(), constraints(), current,
+ &obj, nullptr, &value, /* canModify = */ false))
+ {
+ trackOptimizationOutcome(TrackedOutcome::NeedsTypeBarrier);
+ return InliningStatus_NotInlined;
+ }
+
+ if (getInlineReturnType() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+ if (obj->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ TemporaryTypeSet* thisTypes = obj->resultTypeSet();
+ if (!thisTypes)
+ return InliningStatus_NotInlined;
+ const Class* clasp = thisTypes->getKnownClass(constraints());
+ if (clasp != &ArrayObject::class_ && clasp != &UnboxedArrayObject::class_)
+ return InliningStatus_NotInlined;
+ if (thisTypes->hasObjectFlags(constraints(), OBJECT_FLAG_SPARSE_INDEXES |
+ OBJECT_FLAG_LENGTH_OVERFLOW))
+ {
+ trackOptimizationOutcome(TrackedOutcome::ArrayBadFlags);
+ return InliningStatus_NotInlined;
+ }
+
+ if (ArrayPrototypeHasIndexedProperty(this, script())) {
+ trackOptimizationOutcome(TrackedOutcome::ProtoIndexedProps);
+ return InliningStatus_NotInlined;
+ }
+
+ TemporaryTypeSet::DoubleConversion conversion =
+ thisTypes->convertDoubleElements(constraints());
+ if (conversion == TemporaryTypeSet::AmbiguousDoubleConversion) {
+ trackOptimizationOutcome(TrackedOutcome::ArrayDoubleConversion);
+ return InliningStatus_NotInlined;
+ }
+
+ JSValueType unboxedType = JSVAL_TYPE_MAGIC;
+ if (clasp == &UnboxedArrayObject::class_) {
+ unboxedType = UnboxedArrayElementType(constraints(), obj, nullptr);
+ if (unboxedType == JSVAL_TYPE_MAGIC)
+ return InliningStatus_NotInlined;
+ }
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ if (conversion == TemporaryTypeSet::AlwaysConvertToDoubles ||
+ conversion == TemporaryTypeSet::MaybeConvertToDoubles)
+ {
+ MInstruction* valueDouble = MToDouble::New(alloc(), value);
+ current->add(valueDouble);
+ value = valueDouble;
+ }
+
+ if (unboxedType == JSVAL_TYPE_MAGIC)
+ obj = addMaybeCopyElementsForWrite(obj, /* checkNative = */ false);
+
+ if (NeedsPostBarrier(value))
+ current->add(MPostWriteBarrier::New(alloc(), obj, value));
+
+ MArrayPush* ins = MArrayPush::New(alloc(), obj, value, unboxedType);
+ current->add(ins);
+ current->push(ins);
+
+ if (!resumeAfter(ins))
+ return InliningStatus_Error;
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineArraySlice(CallInfo& callInfo)
+{
+ if (callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MDefinition* obj = convertUnboxedObjects(callInfo.thisArg());
+
+ // Ensure |this| and result are objects.
+ if (getInlineReturnType() != MIRType::Object)
+ return InliningStatus_NotInlined;
+ if (obj->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ // Arguments for the sliced region must be integers.
+ if (callInfo.argc() > 0) {
+ if (callInfo.getArg(0)->type() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+ if (callInfo.argc() > 1) {
+ if (callInfo.getArg(1)->type() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+ }
+ }
+
+ // |this| must be a dense array.
+ TemporaryTypeSet* thisTypes = obj->resultTypeSet();
+ if (!thisTypes)
+ return InliningStatus_NotInlined;
+
+ const Class* clasp = thisTypes->getKnownClass(constraints());
+ if (clasp != &ArrayObject::class_ && clasp != &UnboxedArrayObject::class_)
+ return InliningStatus_NotInlined;
+ if (thisTypes->hasObjectFlags(constraints(), OBJECT_FLAG_SPARSE_INDEXES |
+ OBJECT_FLAG_LENGTH_OVERFLOW))
+ {
+ trackOptimizationOutcome(TrackedOutcome::ArrayBadFlags);
+ return InliningStatus_NotInlined;
+ }
+
+ JSValueType unboxedType = JSVAL_TYPE_MAGIC;
+ if (clasp == &UnboxedArrayObject::class_) {
+ unboxedType = UnboxedArrayElementType(constraints(), obj, nullptr);
+ if (unboxedType == JSVAL_TYPE_MAGIC)
+ return InliningStatus_NotInlined;
+ }
+
+ // Watch out for indexed properties on the prototype.
+ if (ArrayPrototypeHasIndexedProperty(this, script())) {
+ trackOptimizationOutcome(TrackedOutcome::ProtoIndexedProps);
+ return InliningStatus_NotInlined;
+ }
+
+ // The group of the result will be dynamically fixed up to match the input
+ // object, allowing us to handle 'this' objects that might have more than
+ // one group. Make sure that no singletons can be sliced here.
+ for (unsigned i = 0; i < thisTypes->getObjectCount(); i++) {
+ TypeSet::ObjectKey* key = thisTypes->getObject(i);
+ if (key && key->isSingleton())
+ return InliningStatus_NotInlined;
+ }
+
+ // Inline the call.
+ JSObject* templateObj = inspector->getTemplateObjectForNative(pc, js::array_slice);
+ if (!templateObj)
+ return InliningStatus_NotInlined;
+
+ if (unboxedType == JSVAL_TYPE_MAGIC) {
+ if (!templateObj->is<ArrayObject>())
+ return InliningStatus_NotInlined;
+ } else {
+ if (!templateObj->is<UnboxedArrayObject>())
+ return InliningStatus_NotInlined;
+ if (templateObj->as<UnboxedArrayObject>().elementType() != unboxedType)
+ return InliningStatus_NotInlined;
+ }
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MDefinition* begin;
+ if (callInfo.argc() > 0)
+ begin = callInfo.getArg(0);
+ else
+ begin = constant(Int32Value(0));
+
+ MDefinition* end;
+ if (callInfo.argc() > 1) {
+ end = callInfo.getArg(1);
+ } else if (clasp == &ArrayObject::class_) {
+ MElements* elements = MElements::New(alloc(), obj);
+ current->add(elements);
+
+ end = MArrayLength::New(alloc(), elements);
+ current->add(end->toInstruction());
+ } else {
+ end = MUnboxedArrayLength::New(alloc(), obj);
+ current->add(end->toInstruction());
+ }
+
+ MArraySlice* ins = MArraySlice::New(alloc(), constraints(),
+ obj, begin, end,
+ templateObj,
+ templateObj->group()->initialHeap(constraints()),
+ unboxedType);
+ current->add(ins);
+ current->push(ins);
+
+ if (!resumeAfter(ins))
+ return InliningStatus_Error;
+
+ if (!pushTypeBarrier(ins, getInlineReturnTypeSet(), BarrierKind::TypeSet))
+ return InliningStatus_Error;
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineMathAbs(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MIRType returnType = getInlineReturnType();
+ MIRType argType = callInfo.getArg(0)->type();
+ if (!IsNumberType(argType))
+ return InliningStatus_NotInlined;
+
+ // Either argType == returnType, or
+ // argType == Double or Float32, returnType == Int, or
+ // argType == Float32, returnType == Double
+ if (argType != returnType && !(IsFloatingPointType(argType) && returnType == MIRType::Int32)
+ && !(argType == MIRType::Float32 && returnType == MIRType::Double))
+ {
+ return InliningStatus_NotInlined;
+ }
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ // If the arg is a Float32, we specialize the op as double, it will be specialized
+ // as float32 if necessary later.
+ MIRType absType = (argType == MIRType::Float32) ? MIRType::Double : argType;
+ MInstruction* ins = MAbs::New(alloc(), callInfo.getArg(0), absType);
+ current->add(ins);
+
+ current->push(ins);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineMathFloor(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MIRType argType = callInfo.getArg(0)->type();
+ MIRType returnType = getInlineReturnType();
+
+ // Math.floor(int(x)) == int(x)
+ if (argType == MIRType::Int32 && returnType == MIRType::Int32) {
+ callInfo.setImplicitlyUsedUnchecked();
+ // The int operand may be something which bails out if the actual value
+ // is not in the range of the result type of the MIR. We need to tell
+ // the optimizer to preserve this bailout even if the final result is
+ // fully truncated.
+ MLimitedTruncate* ins = MLimitedTruncate::New(alloc(), callInfo.getArg(0),
+ MDefinition::IndirectTruncate);
+ current->add(ins);
+ current->push(ins);
+ return InliningStatus_Inlined;
+ }
+
+ if (IsFloatingPointType(argType) && returnType == MIRType::Int32) {
+ callInfo.setImplicitlyUsedUnchecked();
+ MFloor* ins = MFloor::New(alloc(), callInfo.getArg(0));
+ current->add(ins);
+ current->push(ins);
+ return InliningStatus_Inlined;
+ }
+
+ if (IsFloatingPointType(argType) && returnType == MIRType::Double) {
+ callInfo.setImplicitlyUsedUnchecked();
+ MMathFunction* ins = MMathFunction::New(alloc(), callInfo.getArg(0), MMathFunction::Floor, nullptr);
+ current->add(ins);
+ current->push(ins);
+ return InliningStatus_Inlined;
+ }
+
+ return InliningStatus_NotInlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineMathCeil(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MIRType argType = callInfo.getArg(0)->type();
+ MIRType returnType = getInlineReturnType();
+
+ // Math.ceil(int(x)) == int(x)
+ if (argType == MIRType::Int32 && returnType == MIRType::Int32) {
+ callInfo.setImplicitlyUsedUnchecked();
+ // The int operand may be something which bails out if the actual value
+ // is not in the range of the result type of the MIR. We need to tell
+ // the optimizer to preserve this bailout even if the final result is
+ // fully truncated.
+ MLimitedTruncate* ins = MLimitedTruncate::New(alloc(), callInfo.getArg(0),
+ MDefinition::IndirectTruncate);
+ current->add(ins);
+ current->push(ins);
+ return InliningStatus_Inlined;
+ }
+
+ if (IsFloatingPointType(argType) && returnType == MIRType::Int32) {
+ callInfo.setImplicitlyUsedUnchecked();
+ MCeil* ins = MCeil::New(alloc(), callInfo.getArg(0));
+ current->add(ins);
+ current->push(ins);
+ return InliningStatus_Inlined;
+ }
+
+ if (IsFloatingPointType(argType) && returnType == MIRType::Double) {
+ callInfo.setImplicitlyUsedUnchecked();
+ MMathFunction* ins = MMathFunction::New(alloc(), callInfo.getArg(0), MMathFunction::Ceil, nullptr);
+ current->add(ins);
+ current->push(ins);
+ return InliningStatus_Inlined;
+ }
+
+ return InliningStatus_NotInlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineMathClz32(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MIRType returnType = getInlineReturnType();
+ if (returnType != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ if (!IsNumberType(callInfo.getArg(0)->type()))
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MClz* ins = MClz::New(alloc(), callInfo.getArg(0), MIRType::Int32);
+ current->add(ins);
+ current->push(ins);
+ return InliningStatus_Inlined;
+
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineMathRound(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MIRType returnType = getInlineReturnType();
+ MIRType argType = callInfo.getArg(0)->type();
+
+ // Math.round(int(x)) == int(x)
+ if (argType == MIRType::Int32 && returnType == MIRType::Int32) {
+ callInfo.setImplicitlyUsedUnchecked();
+ // The int operand may be something which bails out if the actual value
+ // is not in the range of the result type of the MIR. We need to tell
+ // the optimizer to preserve this bailout even if the final result is
+ // fully truncated.
+ MLimitedTruncate* ins = MLimitedTruncate::New(alloc(), callInfo.getArg(0),
+ MDefinition::IndirectTruncate);
+ current->add(ins);
+ current->push(ins);
+ return InliningStatus_Inlined;
+ }
+
+ if (IsFloatingPointType(argType) && returnType == MIRType::Int32) {
+ callInfo.setImplicitlyUsedUnchecked();
+ MRound* ins = MRound::New(alloc(), callInfo.getArg(0));
+ current->add(ins);
+ current->push(ins);
+ return InliningStatus_Inlined;
+ }
+
+ if (IsFloatingPointType(argType) && returnType == MIRType::Double) {
+ callInfo.setImplicitlyUsedUnchecked();
+ MMathFunction* ins = MMathFunction::New(alloc(), callInfo.getArg(0), MMathFunction::Round, nullptr);
+ current->add(ins);
+ current->push(ins);
+ return InliningStatus_Inlined;
+ }
+
+ return InliningStatus_NotInlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineMathSqrt(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MIRType argType = callInfo.getArg(0)->type();
+ if (getInlineReturnType() != MIRType::Double)
+ return InliningStatus_NotInlined;
+ if (!IsNumberType(argType))
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MSqrt* sqrt = MSqrt::New(alloc(), callInfo.getArg(0), MIRType::Double);
+ current->add(sqrt);
+ current->push(sqrt);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineMathAtan2(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 2 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ if (getInlineReturnType() != MIRType::Double)
+ return InliningStatus_NotInlined;
+
+ MIRType argType0 = callInfo.getArg(0)->type();
+ MIRType argType1 = callInfo.getArg(1)->type();
+
+ if (!IsNumberType(argType0) || !IsNumberType(argType1))
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MAtan2* atan2 = MAtan2::New(alloc(), callInfo.getArg(0), callInfo.getArg(1));
+ current->add(atan2);
+ current->push(atan2);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineMathHypot(CallInfo& callInfo)
+{
+ if (callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ uint32_t argc = callInfo.argc();
+ if (argc < 2 || argc > 4) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ if (getInlineReturnType() != MIRType::Double)
+ return InliningStatus_NotInlined;
+
+ MDefinitionVector vector(alloc());
+ if (!vector.reserve(argc))
+ return InliningStatus_NotInlined;
+
+ for (uint32_t i = 0; i < argc; ++i) {
+ MDefinition * arg = callInfo.getArg(i);
+ if (!IsNumberType(arg->type()))
+ return InliningStatus_NotInlined;
+ vector.infallibleAppend(arg);
+ }
+
+ callInfo.setImplicitlyUsedUnchecked();
+ MHypot* hypot = MHypot::New(alloc(), vector);
+
+ if (!hypot)
+ return InliningStatus_NotInlined;
+
+ current->add(hypot);
+ current->push(hypot);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineMathPow(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 2 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ bool emitted = false;
+ if (!powTrySpecialized(&emitted, callInfo.getArg(0), callInfo.getArg(1),
+ getInlineReturnType()))
+ {
+ return InliningStatus_Error;
+ }
+
+ if (!emitted)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineMathRandom(CallInfo& callInfo)
+{
+ if (callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ if (getInlineReturnType() != MIRType::Double)
+ return InliningStatus_NotInlined;
+
+ // MRandom JIT code directly accesses the RNG. It's (barely) possible to
+ // inline Math.random without it having been called yet, so ensure RNG
+ // state that isn't guaranteed to be initialized already.
+ script()->compartment()->ensureRandomNumberGenerator();
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MRandom* rand = MRandom::New(alloc());
+ current->add(rand);
+ current->push(rand);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineMathImul(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 2 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MIRType returnType = getInlineReturnType();
+ if (returnType != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ if (!IsNumberType(callInfo.getArg(0)->type()))
+ return InliningStatus_NotInlined;
+ if (!IsNumberType(callInfo.getArg(1)->type()))
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MInstruction* first = MTruncateToInt32::New(alloc(), callInfo.getArg(0));
+ current->add(first);
+
+ MInstruction* second = MTruncateToInt32::New(alloc(), callInfo.getArg(1));
+ current->add(second);
+
+ MMul* ins = MMul::New(alloc(), first, second, MIRType::Int32, MMul::Integer);
+ current->add(ins);
+ current->push(ins);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineMathFRound(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ // MIRType can't be Float32, as this point, as getInlineReturnType uses JSVal types
+ // to infer the returned MIR type.
+ TemporaryTypeSet* returned = getInlineReturnTypeSet();
+ if (returned->empty()) {
+ // As there's only one possible returned type, just add it to the observed
+ // returned typeset
+ returned->addType(TypeSet::DoubleType(), alloc_->lifoAlloc());
+ } else {
+ MIRType returnType = getInlineReturnType();
+ if (!IsNumberType(returnType))
+ return InliningStatus_NotInlined;
+ }
+
+ MIRType arg = callInfo.getArg(0)->type();
+ if (!IsNumberType(arg))
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MToFloat32* ins = MToFloat32::New(alloc(), callInfo.getArg(0));
+ current->add(ins);
+ current->push(ins);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineMathMinMax(CallInfo& callInfo, bool max)
+{
+ if (callInfo.argc() < 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MIRType returnType = getInlineReturnType();
+ if (!IsNumberType(returnType))
+ return InliningStatus_NotInlined;
+
+ MDefinitionVector int32_cases(alloc());
+ for (unsigned i = 0; i < callInfo.argc(); i++) {
+ MDefinition* arg = callInfo.getArg(i);
+
+ switch (arg->type()) {
+ case MIRType::Int32:
+ if (!int32_cases.append(arg))
+ return InliningStatus_Error;
+ break;
+ case MIRType::Double:
+ case MIRType::Float32:
+ // Don't force a double MMinMax for arguments that would be a NOP
+ // when doing an integer MMinMax.
+ if (arg->isConstant()) {
+ double cte = arg->toConstant()->numberToDouble();
+ // min(int32, cte >= INT32_MAX) = int32
+ if (cte >= INT32_MAX && !max)
+ break;
+ // max(int32, cte <= INT32_MIN) = int32
+ if (cte <= INT32_MIN && max)
+ break;
+ }
+
+ // Force double MMinMax if argument is a "effectfull" double.
+ returnType = MIRType::Double;
+ break;
+ default:
+ return InliningStatus_NotInlined;
+ }
+ }
+
+ if (int32_cases.length() == 0)
+ returnType = MIRType::Double;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MDefinitionVector& cases = (returnType == MIRType::Int32) ? int32_cases : callInfo.argv();
+
+ if (cases.length() == 1) {
+ MLimitedTruncate* limit = MLimitedTruncate::New(alloc(), cases[0], MDefinition::NoTruncate);
+ current->add(limit);
+ current->push(limit);
+ return InliningStatus_Inlined;
+ }
+
+ // Chain N-1 MMinMax instructions to compute the MinMax.
+ MMinMax* last = MMinMax::New(alloc(), cases[0], cases[1], returnType, max);
+ current->add(last);
+
+ for (unsigned i = 2; i < cases.length(); i++) {
+ MMinMax* ins = MMinMax::New(alloc(), last, cases[i], returnType, max);
+ current->add(ins);
+ last = ins;
+ }
+
+ current->push(last);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineStringObject(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || !callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ // ConvertToString doesn't support objects.
+ if (callInfo.getArg(0)->mightBeType(MIRType::Object))
+ return InliningStatus_NotInlined;
+
+ JSObject* templateObj = inspector->getTemplateObjectForNative(pc, StringConstructor);
+ if (!templateObj)
+ return InliningStatus_NotInlined;
+ MOZ_ASSERT(templateObj->is<StringObject>());
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MNewStringObject* ins = MNewStringObject::New(alloc(), callInfo.getArg(0), templateObj);
+ current->add(ins);
+ current->push(ins);
+
+ if (!resumeAfter(ins))
+ return InliningStatus_Error;
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineConstantStringSplitString(CallInfo& callInfo)
+{
+ if (!callInfo.getArg(0)->isConstant())
+ return InliningStatus_NotInlined;
+
+ if (!callInfo.getArg(1)->isConstant())
+ return InliningStatus_NotInlined;
+
+ MConstant* strval = callInfo.getArg(0)->toConstant();
+ if (strval->type() != MIRType::String)
+ return InliningStatus_NotInlined;
+
+ MConstant* sepval = callInfo.getArg(1)->toConstant();
+ if (strval->type() != MIRType::String)
+ return InliningStatus_NotInlined;
+
+ // Check if exist a template object in stub.
+ JSString* stringStr = nullptr;
+ JSString* stringSep = nullptr;
+ JSObject* templateObject = nullptr;
+ if (!inspector->isOptimizableCallStringSplit(pc, &stringStr, &stringSep, &templateObject))
+ return InliningStatus_NotInlined;
+
+ MOZ_ASSERT(stringStr);
+ MOZ_ASSERT(stringSep);
+ MOZ_ASSERT(templateObject);
+
+ if (strval->toString() != stringStr)
+ return InliningStatus_NotInlined;
+
+ if (sepval->toString() != stringSep)
+ return InliningStatus_NotInlined;
+
+ // Check if |templateObject| is valid.
+ TypeSet::ObjectKey* retType = TypeSet::ObjectKey::get(templateObject);
+ if (retType->unknownProperties())
+ return InliningStatus_NotInlined;
+
+ HeapTypeSetKey key = retType->property(JSID_VOID);
+ if (!key.maybeTypes())
+ return InliningStatus_NotInlined;
+
+ if (!key.maybeTypes()->hasType(TypeSet::StringType()))
+ return InliningStatus_NotInlined;
+
+ uint32_t initLength = GetAnyBoxedOrUnboxedArrayLength(templateObject);
+ if (GetAnyBoxedOrUnboxedInitializedLength(templateObject) != initLength)
+ return InliningStatus_NotInlined;
+
+ Vector<MConstant*, 0, SystemAllocPolicy> arrayValues;
+ for (uint32_t i = 0; i < initLength; i++) {
+ Value str = GetAnyBoxedOrUnboxedDenseElement(templateObject, i);
+ MOZ_ASSERT(str.toString()->isAtom());
+ MConstant* value = MConstant::New(alloc().fallible(), str, constraints());
+ if (!value)
+ return InliningStatus_Error;
+ if (!TypeSetIncludes(key.maybeTypes(), value->type(), value->resultTypeSet()))
+ return InliningStatus_NotInlined;
+
+ if (!arrayValues.append(value))
+ return InliningStatus_Error;
+ }
+ callInfo.setImplicitlyUsedUnchecked();
+
+ TemporaryTypeSet::DoubleConversion conversion =
+ getInlineReturnTypeSet()->convertDoubleElements(constraints());
+ if (conversion == TemporaryTypeSet::AlwaysConvertToDoubles)
+ return InliningStatus_NotInlined;
+
+ if (!jsop_newarray(templateObject, initLength))
+ return InliningStatus_Error;
+
+ MDefinition* array = current->peek(-1);
+
+ if (!initLength) {
+ if (!array->isResumePoint()) {
+ if (!resumeAfter(array->toNewArray()))
+ return InliningStatus_Error;
+ }
+ return InliningStatus_Inlined;
+ }
+
+ JSValueType unboxedType = GetBoxedOrUnboxedType(templateObject);
+
+ // Store all values, no need to initialize the length after each as
+ // jsop_initelem_array is doing because we do not expect to bailout
+ // because the memory is supposed to be allocated by now.
+ for (uint32_t i = 0; i < initLength; i++) {
+ if (!alloc().ensureBallast())
+ return InliningStatus_Error;
+
+ MConstant* value = arrayValues[i];
+ current->add(value);
+
+ if (!initializeArrayElement(array, i, value, unboxedType, /* addResumePoint = */ false))
+ return InliningStatus_Error;
+ }
+
+ MInstruction* setLength = setInitializedLength(array, unboxedType, initLength);
+ if (!resumeAfter(setLength))
+ return InliningStatus_Error;
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineStringSplitString(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 2 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MDefinition* strArg = callInfo.getArg(0);
+ MDefinition* sepArg = callInfo.getArg(1);
+
+ if (strArg->type() != MIRType::String)
+ return InliningStatus_NotInlined;
+
+ if (sepArg->type() != MIRType::String)
+ return InliningStatus_NotInlined;
+
+ IonBuilder::InliningStatus resultConstStringSplit = inlineConstantStringSplitString(callInfo);
+ if (resultConstStringSplit != InliningStatus_NotInlined)
+ return resultConstStringSplit;
+
+ JSObject* templateObject = inspector->getTemplateObjectForNative(pc, js::intrinsic_StringSplitString);
+ if (!templateObject)
+ return InliningStatus_NotInlined;
+
+ TypeSet::ObjectKey* retKey = TypeSet::ObjectKey::get(templateObject);
+ if (retKey->unknownProperties())
+ return InliningStatus_NotInlined;
+
+ HeapTypeSetKey key = retKey->property(JSID_VOID);
+ if (!key.maybeTypes())
+ return InliningStatus_NotInlined;
+
+ if (!key.maybeTypes()->hasType(TypeSet::StringType())) {
+ key.freeze(constraints());
+ return InliningStatus_NotInlined;
+ }
+
+ callInfo.setImplicitlyUsedUnchecked();
+ MConstant* templateObjectDef = MConstant::New(alloc(), ObjectValue(*templateObject),
+ constraints());
+ current->add(templateObjectDef);
+
+ MStringSplit* ins = MStringSplit::New(alloc(), constraints(), strArg, sepArg,
+ templateObjectDef);
+ current->add(ins);
+ current->push(ins);
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineObjectHasPrototype(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 2 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MDefinition* objArg = callInfo.getArg(0);
+ MDefinition* protoArg = callInfo.getArg(1);
+
+ if (objArg->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+ if (protoArg->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ // Inline only when both obj and proto are singleton objects and
+ // obj does not have uncacheable proto and obj.__proto__ is proto.
+ TemporaryTypeSet* objTypes = objArg->resultTypeSet();
+ if (!objTypes || objTypes->unknownObject() || objTypes->getObjectCount() != 1)
+ return InliningStatus_NotInlined;
+
+ TypeSet::ObjectKey* objKey = objTypes->getObject(0);
+ if (!objKey || !objKey->hasStableClassAndProto(constraints()))
+ return InliningStatus_NotInlined;
+ if (!objKey->isSingleton() || !objKey->singleton()->is<NativeObject>())
+ return InliningStatus_NotInlined;
+
+ JSObject* obj = &objKey->singleton()->as<NativeObject>();
+ if (obj->hasUncacheableProto())
+ return InliningStatus_NotInlined;
+
+ JSObject* actualProto = checkNurseryObject(objKey->proto().toObjectOrNull());
+ if (actualProto == nullptr)
+ return InliningStatus_NotInlined;
+
+ TemporaryTypeSet* protoTypes = protoArg->resultTypeSet();
+ if (!protoTypes || protoTypes->unknownObject() || protoTypes->getObjectCount() != 1)
+ return InliningStatus_NotInlined;
+
+ TypeSet::ObjectKey* protoKey = protoTypes->getObject(0);
+ if (!protoKey || !protoKey->hasStableClassAndProto(constraints()))
+ return InliningStatus_NotInlined;
+ if (!protoKey->isSingleton() || !protoKey->singleton()->is<NativeObject>())
+ return InliningStatus_NotInlined;
+
+ JSObject* proto = &protoKey->singleton()->as<NativeObject>();
+ pushConstant(BooleanValue(proto == actualProto));
+ callInfo.setImplicitlyUsedUnchecked();
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineStrCharCodeAt(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ if (getInlineReturnType() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+ if (callInfo.thisArg()->type() != MIRType::String && callInfo.thisArg()->type() != MIRType::Value)
+ return InliningStatus_NotInlined;
+ MIRType argType = callInfo.getArg(0)->type();
+ if (argType != MIRType::Int32 && argType != MIRType::Double)
+ return InliningStatus_NotInlined;
+
+ // Check for STR.charCodeAt(IDX) where STR is a constant string and IDX is a
+ // constant integer.
+ InliningStatus constInlineStatus = inlineConstantCharCodeAt(callInfo);
+ if (constInlineStatus != InliningStatus_NotInlined)
+ return constInlineStatus;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MInstruction* index = MToInt32::New(alloc(), callInfo.getArg(0));
+ current->add(index);
+
+ MStringLength* length = MStringLength::New(alloc(), callInfo.thisArg());
+ current->add(length);
+
+ index = addBoundsCheck(index, length);
+
+ MCharCodeAt* charCode = MCharCodeAt::New(alloc(), callInfo.thisArg(), index);
+ current->add(charCode);
+ current->push(charCode);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineConstantCharCodeAt(CallInfo& callInfo)
+{
+ if (!callInfo.thisArg()->maybeConstantValue() || !callInfo.getArg(0)->maybeConstantValue()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineGeneric);
+ return InliningStatus_NotInlined;
+ }
+
+ MConstant* strval = callInfo.thisArg()->maybeConstantValue();
+ MConstant* idxval = callInfo.getArg(0)->maybeConstantValue();
+
+ if (strval->type() != MIRType::String || idxval->type() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ JSString* str = strval->toString();
+ if (!str->isLinear()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineGeneric);
+ return InliningStatus_NotInlined;
+ }
+
+ int32_t idx = idxval->toInt32();
+ if (idx < 0 || (uint32_t(idx) >= str->length())) {
+ trackOptimizationOutcome(TrackedOutcome::OutOfBounds);
+ return InliningStatus_NotInlined;
+ }
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ JSLinearString& linstr = str->asLinear();
+ char16_t ch = linstr.latin1OrTwoByteChar(idx);
+ MConstant* result = MConstant::New(alloc(), Int32Value(ch));
+ current->add(result);
+ current->push(result);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineStrFromCharCode(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ if (getInlineReturnType() != MIRType::String)
+ return InliningStatus_NotInlined;
+ if (callInfo.getArg(0)->type() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MFromCharCode* string = MFromCharCode::New(alloc(), callInfo.getArg(0));
+ current->add(string);
+ current->push(string);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineStrFromCodePoint(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ if (getInlineReturnType() != MIRType::String)
+ return InliningStatus_NotInlined;
+ if (callInfo.getArg(0)->type() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MFromCodePoint* string = MFromCodePoint::New(alloc(), callInfo.getArg(0));
+ current->add(string);
+ current->push(string);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineStrCharAt(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ if (getInlineReturnType() != MIRType::String)
+ return InliningStatus_NotInlined;
+ if (callInfo.thisArg()->type() != MIRType::String)
+ return InliningStatus_NotInlined;
+ MIRType argType = callInfo.getArg(0)->type();
+ if (argType != MIRType::Int32 && argType != MIRType::Double)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MInstruction* index = MToInt32::New(alloc(), callInfo.getArg(0));
+ current->add(index);
+
+ MStringLength* length = MStringLength::New(alloc(), callInfo.thisArg());
+ current->add(length);
+
+ index = addBoundsCheck(index, length);
+
+ // String.charAt(x) = String.fromCharCode(String.charCodeAt(x))
+ MCharCodeAt* charCode = MCharCodeAt::New(alloc(), callInfo.thisArg(), index);
+ current->add(charCode);
+
+ MFromCharCode* string = MFromCharCode::New(alloc(), charCode);
+ current->add(string);
+ current->push(string);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineRegExpMatcher(CallInfo& callInfo)
+{
+ // This is called from Self-hosted JS, after testing each argument,
+ // most of following tests should be passed.
+
+ if (callInfo.argc() != 3 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MDefinition* rxArg = callInfo.getArg(0);
+ MDefinition* strArg = callInfo.getArg(1);
+ MDefinition* lastIndexArg = callInfo.getArg(2);
+
+ if (rxArg->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ TemporaryTypeSet* rxTypes = rxArg->resultTypeSet();
+ const Class* clasp = rxTypes ? rxTypes->getKnownClass(constraints()) : nullptr;
+ if (clasp != &RegExpObject::class_)
+ return InliningStatus_NotInlined;
+
+ if (strArg->mightBeType(MIRType::Object))
+ return InliningStatus_NotInlined;
+
+ if (lastIndexArg->type() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ JSContext* cx = GetJitContext()->cx;
+ if (!cx->compartment()->jitCompartment()->ensureRegExpMatcherStubExists(cx)) {
+ cx->clearPendingException(); // OOM or overrecursion.
+ return InliningStatus_NotInlined;
+ }
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MInstruction* matcher = MRegExpMatcher::New(alloc(), rxArg, strArg, lastIndexArg);
+ current->add(matcher);
+ current->push(matcher);
+
+ if (!resumeAfter(matcher))
+ return InliningStatus_Error;
+
+ if (!pushTypeBarrier(matcher, getInlineReturnTypeSet(), BarrierKind::TypeSet))
+ return InliningStatus_Error;
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineRegExpSearcher(CallInfo& callInfo)
+{
+ // This is called from Self-hosted JS, after testing each argument,
+ // most of following tests should be passed.
+
+ if (callInfo.argc() != 3 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MDefinition* rxArg = callInfo.getArg(0);
+ MDefinition* strArg = callInfo.getArg(1);
+ MDefinition* lastIndexArg = callInfo.getArg(2);
+
+ if (rxArg->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ TemporaryTypeSet* regexpTypes = rxArg->resultTypeSet();
+ const Class* clasp = regexpTypes ? regexpTypes->getKnownClass(constraints()) : nullptr;
+ if (clasp != &RegExpObject::class_)
+ return InliningStatus_NotInlined;
+
+ if (strArg->mightBeType(MIRType::Object))
+ return InliningStatus_NotInlined;
+
+ if (lastIndexArg->type() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ JSContext* cx = GetJitContext()->cx;
+ if (!cx->compartment()->jitCompartment()->ensureRegExpSearcherStubExists(cx)) {
+ cx->clearPendingException(); // OOM or overrecursion.
+ return InliningStatus_Error;
+ }
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MInstruction* searcher = MRegExpSearcher::New(alloc(), rxArg, strArg, lastIndexArg);
+ current->add(searcher);
+ current->push(searcher);
+
+ if (!resumeAfter(searcher))
+ return InliningStatus_Error;
+
+ if (!pushTypeBarrier(searcher, getInlineReturnTypeSet(), BarrierKind::TypeSet))
+ return InliningStatus_Error;
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineRegExpTester(CallInfo& callInfo)
+{
+ // This is called from Self-hosted JS, after testing each argument,
+ // most of following tests should be passed.
+
+ if (callInfo.argc() != 3 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MDefinition* rxArg = callInfo.getArg(0);
+ MDefinition* strArg = callInfo.getArg(1);
+ MDefinition* lastIndexArg = callInfo.getArg(2);
+
+ if (rxArg->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ TemporaryTypeSet* rxTypes = rxArg->resultTypeSet();
+ const Class* clasp = rxTypes ? rxTypes->getKnownClass(constraints()) : nullptr;
+ if (clasp != &RegExpObject::class_)
+ return InliningStatus_NotInlined;
+
+ if (strArg->mightBeType(MIRType::Object))
+ return InliningStatus_NotInlined;
+
+ if (lastIndexArg->type() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ JSContext* cx = GetJitContext()->cx;
+ if (!cx->compartment()->jitCompartment()->ensureRegExpTesterStubExists(cx)) {
+ cx->clearPendingException(); // OOM or overrecursion.
+ return InliningStatus_NotInlined;
+ }
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MInstruction* tester = MRegExpTester::New(alloc(), rxArg, strArg, lastIndexArg);
+ current->add(tester);
+ current->push(tester);
+
+ if (!resumeAfter(tester))
+ return InliningStatus_Error;
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineIsRegExpObject(CallInfo& callInfo)
+{
+ if (callInfo.constructing() || callInfo.argc() != 1) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ if (getInlineReturnType() != MIRType::Boolean)
+ return InliningStatus_NotInlined;
+
+ MDefinition* arg = callInfo.getArg(0);
+
+ bool isRegExpObject;
+ if (!arg->mightBeType(MIRType::Object)) {
+ isRegExpObject = false;
+ } else {
+ if (arg->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ TemporaryTypeSet* types = arg->resultTypeSet();
+ const Class* clasp = types ? types->getKnownClass(constraints()) : nullptr;
+ if (!clasp || clasp->isProxy())
+ return InliningStatus_NotInlined;
+
+ isRegExpObject = (clasp == &RegExpObject::class_);
+ }
+
+ pushConstant(BooleanValue(isRegExpObject));
+
+ callInfo.setImplicitlyUsedUnchecked();
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineRegExpPrototypeOptimizable(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MDefinition* protoArg = callInfo.getArg(0);
+
+ if (protoArg->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ if (getInlineReturnType() != MIRType::Boolean)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MInstruction* opt = MRegExpPrototypeOptimizable::New(alloc(), protoArg);
+ current->add(opt);
+ current->push(opt);
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineRegExpInstanceOptimizable(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 2 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MDefinition* rxArg = callInfo.getArg(0);
+ MDefinition* protoArg = callInfo.getArg(1);
+
+ if (rxArg->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ if (protoArg->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ if (getInlineReturnType() != MIRType::Boolean)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MInstruction* opt = MRegExpInstanceOptimizable::New(alloc(), rxArg, protoArg);
+ current->add(opt);
+ current->push(opt);
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineGetFirstDollarIndex(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MDefinition* strArg = callInfo.getArg(0);
+
+ if (strArg->type() != MIRType::String)
+ return InliningStatus_NotInlined;
+
+ if (getInlineReturnType() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MInstruction* ins = MGetFirstDollarIndex::New(alloc(), strArg);
+ current->add(ins);
+ current->push(ins);
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineStringReplaceString(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 3 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ if (getInlineReturnType() != MIRType::String)
+ return InliningStatus_NotInlined;
+
+ MDefinition* strArg = callInfo.getArg(0);
+ MDefinition* patArg = callInfo.getArg(1);
+ MDefinition* replArg = callInfo.getArg(2);
+
+ if (strArg->type() != MIRType::String)
+ return InliningStatus_NotInlined;
+
+ if (patArg->type() != MIRType::String)
+ return InliningStatus_NotInlined;
+
+ if (replArg->type() != MIRType::String)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MInstruction* cte = MStringReplace::New(alloc(), strArg, patArg, replArg);
+ current->add(cte);
+ current->push(cte);
+ if (cte->isEffectful() && !resumeAfter(cte))
+ return InliningStatus_Error;
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineSubstringKernel(CallInfo& callInfo)
+{
+ MOZ_ASSERT(callInfo.argc() == 3);
+ MOZ_ASSERT(!callInfo.constructing());
+
+ // Return: String.
+ if (getInlineReturnType() != MIRType::String)
+ return InliningStatus_NotInlined;
+
+ // Arg 0: String.
+ if (callInfo.getArg(0)->type() != MIRType::String)
+ return InliningStatus_NotInlined;
+
+ // Arg 1: Int.
+ if (callInfo.getArg(1)->type() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ // Arg 2: Int.
+ if (callInfo.getArg(2)->type() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MSubstr* substr = MSubstr::New(alloc(), callInfo.getArg(0), callInfo.getArg(1),
+ callInfo.getArg(2));
+ current->add(substr);
+ current->push(substr);
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineObjectCreate(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing())
+ return InliningStatus_NotInlined;
+
+ JSObject* templateObject = inspector->getTemplateObjectForNative(pc, obj_create);
+ if (!templateObject)
+ return InliningStatus_NotInlined;
+
+ MOZ_ASSERT(templateObject->is<PlainObject>());
+ MOZ_ASSERT(!templateObject->isSingleton());
+
+ // Ensure the argument matches the template object's prototype.
+ MDefinition* arg = callInfo.getArg(0);
+ if (JSObject* proto = templateObject->staticPrototype()) {
+ if (IsInsideNursery(proto))
+ return InliningStatus_NotInlined;
+
+ TemporaryTypeSet* types = arg->resultTypeSet();
+ if (!types || types->maybeSingleton() != proto)
+ return InliningStatus_NotInlined;
+
+ MOZ_ASSERT(types->getKnownMIRType() == MIRType::Object);
+ } else {
+ if (arg->type() != MIRType::Null)
+ return InliningStatus_NotInlined;
+ }
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ bool emitted = false;
+ if (!newObjectTryTemplateObject(&emitted, templateObject))
+ return InliningStatus_Error;
+
+ MOZ_ASSERT(emitted);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineDefineDataProperty(CallInfo& callInfo)
+{
+ MOZ_ASSERT(!callInfo.constructing());
+
+ // Only handle definitions of plain data properties.
+ if (callInfo.argc() != 3)
+ return InliningStatus_NotInlined;
+
+ MDefinition* obj = convertUnboxedObjects(callInfo.getArg(0));
+ MDefinition* id = callInfo.getArg(1);
+ MDefinition* value = callInfo.getArg(2);
+
+ if (ElementAccessHasExtraIndexedProperty(this, obj))
+ return InliningStatus_NotInlined;
+
+ // setElemTryDense will push the value as the result of the define instead
+ // of |undefined|, but this is fine if the rval is ignored (as it should be
+ // in self hosted code.)
+ MOZ_ASSERT(*GetNextPc(pc) == JSOP_POP);
+
+ bool emitted = false;
+ if (!setElemTryDense(&emitted, obj, id, value, /* writeHole = */ true))
+ return InliningStatus_Error;
+ if (!emitted)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineHasClass(CallInfo& callInfo,
+ const Class* clasp1, const Class* clasp2,
+ const Class* clasp3, const Class* clasp4)
+{
+ if (callInfo.constructing() || callInfo.argc() != 1) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ if (callInfo.getArg(0)->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+ if (getInlineReturnType() != MIRType::Boolean)
+ return InliningStatus_NotInlined;
+
+ TemporaryTypeSet* types = callInfo.getArg(0)->resultTypeSet();
+ const Class* knownClass = types ? types->getKnownClass(constraints()) : nullptr;
+ if (knownClass) {
+ pushConstant(BooleanValue(knownClass == clasp1 ||
+ knownClass == clasp2 ||
+ knownClass == clasp3 ||
+ knownClass == clasp4));
+ } else {
+ MHasClass* hasClass1 = MHasClass::New(alloc(), callInfo.getArg(0), clasp1);
+ current->add(hasClass1);
+
+ if (!clasp2 && !clasp3 && !clasp4) {
+ current->push(hasClass1);
+ } else {
+ const Class* remaining[] = { clasp2, clasp3, clasp4 };
+ MDefinition* last = hasClass1;
+ for (size_t i = 0; i < ArrayLength(remaining); i++) {
+ MHasClass* hasClass = MHasClass::New(alloc(), callInfo.getArg(0), remaining[i]);
+ current->add(hasClass);
+ MBitOr* either = MBitOr::New(alloc(), last, hasClass);
+ either->infer(inspector, pc);
+ current->add(either);
+ last = either;
+ }
+
+ MDefinition* result = convertToBoolean(last);
+ current->push(result);
+ }
+ }
+
+ callInfo.setImplicitlyUsedUnchecked();
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineGetNextEntryForIterator(CallInfo& callInfo, MGetNextEntryForIterator::Mode mode)
+{
+ if (callInfo.argc() != 2 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MDefinition* iterArg = callInfo.getArg(0);
+ MDefinition* resultArg = callInfo.getArg(1);
+
+ if (iterArg->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ TemporaryTypeSet* iterTypes = iterArg->resultTypeSet();
+ const Class* iterClasp = iterTypes ? iterTypes->getKnownClass(constraints()) : nullptr;
+ if (mode == MGetNextEntryForIterator::Map) {
+ if (iterClasp != &MapIteratorObject::class_)
+ return InliningStatus_NotInlined;
+ } else {
+ MOZ_ASSERT(mode == MGetNextEntryForIterator::Set);
+
+ if (iterClasp != &SetIteratorObject::class_)
+ return InliningStatus_NotInlined;
+ }
+
+ if (resultArg->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ TemporaryTypeSet* resultTypes = resultArg->resultTypeSet();
+ const Class* resultClasp = resultTypes ? resultTypes->getKnownClass(constraints()) : nullptr;
+ if (resultClasp != &ArrayObject::class_)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MInstruction* next = MGetNextEntryForIterator::New(alloc(), iterArg, resultArg, mode);
+ current->add(next);
+ current->push(next);
+
+ if (!resumeAfter(next))
+ return InliningStatus_Error;
+
+ return InliningStatus_Inlined;
+}
+
+static bool
+IsArrayBufferObject(CompilerConstraintList* constraints, MDefinition* def)
+{
+ MOZ_ASSERT(def->type() == MIRType::Object);
+
+ TemporaryTypeSet* types = def->resultTypeSet();
+ if (!types)
+ return false;
+
+ return types->getKnownClass(constraints) == &ArrayBufferObject::class_;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineArrayBufferByteLength(CallInfo& callInfo)
+{
+ MOZ_ASSERT(!callInfo.constructing());
+ MOZ_ASSERT(callInfo.argc() == 1);
+
+ MDefinition* objArg = callInfo.getArg(0);
+ if (objArg->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+ if (getInlineReturnType() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ MInstruction* ins = addArrayBufferByteLength(objArg);
+ current->push(ins);
+
+ callInfo.setImplicitlyUsedUnchecked();
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlinePossiblyWrappedArrayBufferByteLength(CallInfo& callInfo)
+{
+ MOZ_ASSERT(!callInfo.constructing());
+ MOZ_ASSERT(callInfo.argc() == 1);
+
+ MDefinition* objArg = callInfo.getArg(0);
+ if (objArg->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+ if (getInlineReturnType() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ if (!IsArrayBufferObject(constraints(), objArg))
+ return InliningStatus_NotInlined;
+
+ MInstruction* ins = addArrayBufferByteLength(objArg);
+ current->push(ins);
+
+ callInfo.setImplicitlyUsedUnchecked();
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineTypedArray(CallInfo& callInfo, Native native)
+{
+ if (!callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ if (getInlineReturnType() != MIRType::Object)
+ return InliningStatus_NotInlined;
+ if (callInfo.argc() != 1)
+ return InliningStatus_NotInlined;
+
+ MDefinition* arg = callInfo.getArg(0);
+
+ if (arg->type() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ JSObject* templateObject = inspector->getTemplateObjectForNative(pc, native);
+
+ if (!templateObject) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeNoTemplateObj);
+ return InliningStatus_NotInlined;
+ }
+
+ MOZ_ASSERT(templateObject->is<TypedArrayObject>());
+ TypedArrayObject* obj = &templateObject->as<TypedArrayObject>();
+
+ // Do not optimize when we see a template object with a singleton type,
+ // since it hits at most once.
+ if (templateObject->isSingleton())
+ return InliningStatus_NotInlined;
+
+ MInstruction* ins = nullptr;
+
+ if (!arg->isConstant()) {
+ callInfo.setImplicitlyUsedUnchecked();
+ ins = MNewTypedArrayDynamicLength::New(alloc(), constraints(), templateObject,
+ templateObject->group()->initialHeap(constraints()),
+ arg);
+ } else {
+ // Negative lengths must throw a RangeError. (We don't track that this
+ // might have previously thrown, when determining whether to inline, so we
+ // have to deal with this error case when inlining.)
+ int32_t providedLen = arg->maybeConstantValue()->toInt32();
+ if (providedLen <= 0)
+ return InliningStatus_NotInlined;
+
+ uint32_t len = AssertedCast<uint32_t>(providedLen);
+
+ if (obj->length() != len)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+ MConstant* templateConst = MConstant::NewConstraintlessObject(alloc(), obj);
+ current->add(templateConst);
+ ins = MNewTypedArray::New(alloc(), constraints(), templateConst,
+ obj->group()->initialHeap(constraints()));
+ }
+
+ current->add(ins);
+ current->push(ins);
+ if (!resumeAfter(ins))
+ return InliningStatus_Error;
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineIsTypedArrayHelper(CallInfo& callInfo, WrappingBehavior wrappingBehavior)
+{
+ MOZ_ASSERT(!callInfo.constructing());
+ MOZ_ASSERT(callInfo.argc() == 1);
+
+ if (callInfo.getArg(0)->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+ if (getInlineReturnType() != MIRType::Boolean)
+ return InliningStatus_NotInlined;
+
+ // The test is elaborate: in-line only if there is exact
+ // information.
+
+ TemporaryTypeSet* types = callInfo.getArg(0)->resultTypeSet();
+ if (!types)
+ return InliningStatus_NotInlined;
+
+ bool result = false;
+ switch (types->forAllClasses(constraints(), IsTypedArrayClass)) {
+ case TemporaryTypeSet::ForAllResult::ALL_FALSE:
+ // Wrapped typed arrays won't appear to be typed arrays per a
+ // |forAllClasses| query. If wrapped typed arrays are to be considered
+ // typed arrays, a negative answer is not conclusive. Don't inline in
+ // that case.
+ if (wrappingBehavior == AllowWrappedTypedArrays) {
+ switch (types->forAllClasses(constraints(), IsProxyClass)) {
+ case TemporaryTypeSet::ForAllResult::ALL_FALSE:
+ case TemporaryTypeSet::ForAllResult::EMPTY:
+ break;
+ case TemporaryTypeSet::ForAllResult::ALL_TRUE:
+ case TemporaryTypeSet::ForAllResult::MIXED:
+ return InliningStatus_NotInlined;
+ }
+ }
+
+ MOZ_FALLTHROUGH;
+
+ case TemporaryTypeSet::ForAllResult::EMPTY:
+ result = false;
+ break;
+
+ case TemporaryTypeSet::ForAllResult::ALL_TRUE:
+ result = true;
+ break;
+
+ case TemporaryTypeSet::ForAllResult::MIXED:
+ return InliningStatus_NotInlined;
+ }
+
+ pushConstant(BooleanValue(result));
+
+ callInfo.setImplicitlyUsedUnchecked();
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineIsTypedArray(CallInfo& callInfo)
+{
+ return inlineIsTypedArrayHelper(callInfo, RejectWrappedTypedArrays);
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineIsPossiblyWrappedTypedArray(CallInfo& callInfo)
+{
+ return inlineIsTypedArrayHelper(callInfo, AllowWrappedTypedArrays);
+}
+
+static bool
+IsTypedArrayObject(CompilerConstraintList* constraints, MDefinition* def)
+{
+ MOZ_ASSERT(def->type() == MIRType::Object);
+
+ TemporaryTypeSet* types = def->resultTypeSet();
+ if (!types)
+ return false;
+
+ return types->forAllClasses(constraints, IsTypedArrayClass) ==
+ TemporaryTypeSet::ForAllResult::ALL_TRUE;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlinePossiblyWrappedTypedArrayLength(CallInfo& callInfo)
+{
+ MOZ_ASSERT(!callInfo.constructing());
+ MOZ_ASSERT(callInfo.argc() == 1);
+ if (callInfo.getArg(0)->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+ if (getInlineReturnType() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ if (!IsTypedArrayObject(constraints(), callInfo.getArg(0)))
+ return InliningStatus_NotInlined;
+
+ MInstruction* length = addTypedArrayLength(callInfo.getArg(0));
+ current->push(length);
+
+ callInfo.setImplicitlyUsedUnchecked();
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineTypedArrayLength(CallInfo& callInfo)
+{
+ return inlinePossiblyWrappedTypedArrayLength(callInfo);
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineSetDisjointTypedElements(CallInfo& callInfo)
+{
+ MOZ_ASSERT(!callInfo.constructing());
+ MOZ_ASSERT(callInfo.argc() == 3);
+
+ // Initial argument requirements.
+
+ MDefinition* target = callInfo.getArg(0);
+ if (target->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ if (getInlineReturnType() != MIRType::Undefined)
+ return InliningStatus_NotInlined;
+
+ MDefinition* targetOffset = callInfo.getArg(1);
+ MOZ_ASSERT(targetOffset->type() == MIRType::Int32);
+
+ MDefinition* sourceTypedArray = callInfo.getArg(2);
+ if (sourceTypedArray->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ // Only attempt to optimize if |target| and |sourceTypedArray| are both
+ // definitely typed arrays. (The former always is. The latter is not,
+ // necessarily, because of wrappers.)
+ if (!IsTypedArrayObject(constraints(), target) ||
+ !IsTypedArrayObject(constraints(), sourceTypedArray))
+ {
+ return InliningStatus_NotInlined;
+ }
+
+ auto sets = MSetDisjointTypedElements::New(alloc(), target, targetOffset, sourceTypedArray);
+ current->add(sets);
+
+ pushConstant(UndefinedValue());
+
+ if (!resumeAfter(sets))
+ return InliningStatus_Error;
+
+ callInfo.setImplicitlyUsedUnchecked();
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineObjectIsTypeDescr(CallInfo& callInfo)
+{
+ if (callInfo.constructing() || callInfo.argc() != 1) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ if (callInfo.getArg(0)->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+ if (getInlineReturnType() != MIRType::Boolean)
+ return InliningStatus_NotInlined;
+
+ // The test is elaborate: in-line only if there is exact
+ // information.
+
+ TemporaryTypeSet* types = callInfo.getArg(0)->resultTypeSet();
+ if (!types)
+ return InliningStatus_NotInlined;
+
+ bool result = false;
+ switch (types->forAllClasses(constraints(), IsTypeDescrClass)) {
+ case TemporaryTypeSet::ForAllResult::ALL_FALSE:
+ case TemporaryTypeSet::ForAllResult::EMPTY:
+ result = false;
+ break;
+ case TemporaryTypeSet::ForAllResult::ALL_TRUE:
+ result = true;
+ break;
+ case TemporaryTypeSet::ForAllResult::MIXED:
+ return InliningStatus_NotInlined;
+ }
+
+ pushConstant(BooleanValue(result));
+
+ callInfo.setImplicitlyUsedUnchecked();
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineSetTypedObjectOffset(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 2 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MDefinition* typedObj = callInfo.getArg(0);
+ MDefinition* offset = callInfo.getArg(1);
+
+ // Return type should be undefined or something wacky is going on.
+ if (getInlineReturnType() != MIRType::Undefined)
+ return InliningStatus_NotInlined;
+
+ // Check typedObj is a, well, typed object. Go ahead and use TI
+ // data. If this check should fail, that is almost certainly a bug
+ // in self-hosted code -- either because it's not being careful
+ // with TI or because of something else -- but we'll just let it
+ // fall through to the SetTypedObjectOffset intrinsic in such
+ // cases.
+ TemporaryTypeSet* types = typedObj->resultTypeSet();
+ if (typedObj->type() != MIRType::Object || !types)
+ return InliningStatus_NotInlined;
+ switch (types->forAllClasses(constraints(), IsTypedObjectClass)) {
+ case TemporaryTypeSet::ForAllResult::ALL_FALSE:
+ case TemporaryTypeSet::ForAllResult::EMPTY:
+ case TemporaryTypeSet::ForAllResult::MIXED:
+ return InliningStatus_NotInlined;
+ case TemporaryTypeSet::ForAllResult::ALL_TRUE:
+ break;
+ }
+
+ // Check type of offset argument is an integer.
+ if (offset->type() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+ MInstruction* ins = MSetTypedObjectOffset::New(alloc(), typedObj, offset);
+ current->add(ins);
+ current->push(ins);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineUnsafeSetReservedSlot(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 3 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+ if (getInlineReturnType() != MIRType::Undefined)
+ return InliningStatus_NotInlined;
+ if (callInfo.getArg(0)->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+ if (callInfo.getArg(1)->type() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ // Don't inline if we don't have a constant slot.
+ MDefinition* arg = callInfo.getArg(1);
+ if (!arg->isConstant())
+ return InliningStatus_NotInlined;
+ uint32_t slot = uint32_t(arg->toConstant()->toInt32());
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MStoreFixedSlot* store =
+ MStoreFixedSlot::NewBarriered(alloc(), callInfo.getArg(0), slot, callInfo.getArg(2));
+ current->add(store);
+ current->push(store);
+
+ if (NeedsPostBarrier(callInfo.getArg(2)))
+ current->add(MPostWriteBarrier::New(alloc(), callInfo.getArg(0), callInfo.getArg(2)));
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineUnsafeGetReservedSlot(CallInfo& callInfo, MIRType knownValueType)
+{
+ if (callInfo.argc() != 2 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+ if (callInfo.getArg(0)->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+ if (callInfo.getArg(1)->type() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ // Don't inline if we don't have a constant slot.
+ MDefinition* arg = callInfo.getArg(1);
+ if (!arg->isConstant())
+ return InliningStatus_NotInlined;
+ uint32_t slot = uint32_t(arg->toConstant()->toInt32());
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MLoadFixedSlot* load = MLoadFixedSlot::New(alloc(), callInfo.getArg(0), slot);
+ current->add(load);
+ current->push(load);
+ if (knownValueType != MIRType::Value) {
+ // We know what type we have in this slot. Assert that this is in fact
+ // what we've seen coming from this slot in the past, then tell the
+ // MLoadFixedSlot about its result type. That will make us do an
+ // infallible unbox as part of the slot load and then we'll barrier on
+ // the unbox result. That way the type barrier code won't end up doing
+ // MIRType checks and conditional unboxing.
+ MOZ_ASSERT_IF(!getInlineReturnTypeSet()->empty(),
+ getInlineReturnType() == knownValueType);
+ load->setResultType(knownValueType);
+ }
+
+ // We don't track reserved slot types, so always emit a barrier.
+ if (!pushTypeBarrier(load, getInlineReturnTypeSet(), BarrierKind::TypeSet))
+ return InliningStatus_Error;
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineIsCallable(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ if (getInlineReturnType() != MIRType::Boolean)
+ return InliningStatus_NotInlined;
+
+ MDefinition* arg = callInfo.getArg(0);
+ // Do not inline if the type of arg is neither primitive nor object.
+ if (arg->type() > MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ // Try inlining with constant true/false: only objects may be callable at
+ // all, and if we know the class check if it is callable.
+ bool isCallableKnown = false;
+ bool isCallableConstant;
+ if (arg->type() != MIRType::Object) {
+ // Primitive (including undefined and null).
+ isCallableKnown = true;
+ isCallableConstant = false;
+ } else {
+ TemporaryTypeSet* types = arg->resultTypeSet();
+ const Class* clasp = types ? types->getKnownClass(constraints()) : nullptr;
+ if (clasp && !clasp->isProxy()) {
+ isCallableKnown = true;
+ isCallableConstant = clasp->nonProxyCallable();
+ }
+ }
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ if (isCallableKnown) {
+ MConstant* constant = MConstant::New(alloc(), BooleanValue(isCallableConstant));
+ current->add(constant);
+ current->push(constant);
+ return InliningStatus_Inlined;
+ }
+
+ MIsCallable* isCallable = MIsCallable::New(alloc(), arg);
+ current->add(isCallable);
+ current->push(isCallable);
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineIsConstructor(CallInfo& callInfo)
+{
+ MOZ_ASSERT(!callInfo.constructing());
+ MOZ_ASSERT(callInfo.argc() == 1);
+
+ if (getInlineReturnType() != MIRType::Boolean)
+ return InliningStatus_NotInlined;
+ if (callInfo.getArg(0)->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MIsConstructor* ins = MIsConstructor::New(alloc(), callInfo.getArg(0));
+ current->add(ins);
+ current->push(ins);
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineIsObject(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+ if (getInlineReturnType() != MIRType::Boolean)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+ if (callInfo.getArg(0)->type() == MIRType::Object) {
+ pushConstant(BooleanValue(true));
+ } else {
+ MIsObject* isObject = MIsObject::New(alloc(), callInfo.getArg(0));
+ current->add(isObject);
+ current->push(isObject);
+ }
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineToObject(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ // If we know the input type is an object, nop ToObject.
+ if (getInlineReturnType() != MIRType::Object)
+ return InliningStatus_NotInlined;
+ if (callInfo.getArg(0)->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+ MDefinition* object = callInfo.getArg(0);
+
+ current->push(object);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineIsWrappedArrayConstructor(CallInfo& callInfo)
+{
+ if (callInfo.constructing() || callInfo.argc() != 1) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ if (getInlineReturnType() != MIRType::Boolean)
+ return InliningStatus_NotInlined;
+ MDefinition* arg = callInfo.getArg(0);
+ if (arg->type() != MIRType::Object)
+ return InliningStatus_NotInlined;
+
+ TemporaryTypeSet* types = arg->resultTypeSet();
+ switch (types->forAllClasses(constraints(), IsProxyClass)) {
+ case TemporaryTypeSet::ForAllResult::ALL_FALSE:
+ break;
+ case TemporaryTypeSet::ForAllResult::EMPTY:
+ case TemporaryTypeSet::ForAllResult::ALL_TRUE:
+ case TemporaryTypeSet::ForAllResult::MIXED:
+ return InliningStatus_NotInlined;
+ }
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ // Inline only if argument is absolutely *not* a Proxy.
+ pushConstant(BooleanValue(false));
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineToInteger(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MDefinition* input = callInfo.getArg(0);
+
+ // Only optimize cases where input contains only number, null or boolean
+ if (input->mightBeType(MIRType::Object) ||
+ input->mightBeType(MIRType::String) ||
+ input->mightBeType(MIRType::Symbol) ||
+ input->mightBeType(MIRType::Undefined) ||
+ input->mightBeMagicType())
+ {
+ return InliningStatus_NotInlined;
+ }
+
+ MOZ_ASSERT(input->type() == MIRType::Value || input->type() == MIRType::Null ||
+ input->type() == MIRType::Boolean || IsNumberType(input->type()));
+
+ // Only optimize cases where output is int32
+ if (getInlineReturnType() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MToInt32* toInt32 = MToInt32::New(alloc(), callInfo.getArg(0));
+ current->add(toInt32);
+ current->push(toInt32);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineToString(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing())
+ return InliningStatus_NotInlined;
+
+ if (getInlineReturnType() != MIRType::String)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+ MToString* toString = MToString::New(alloc(), callInfo.getArg(0));
+ current->add(toString);
+ current->push(toString);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineBailout(CallInfo& callInfo)
+{
+ callInfo.setImplicitlyUsedUnchecked();
+
+ current->add(MBail::New(alloc()));
+
+ MConstant* undefined = MConstant::New(alloc(), UndefinedValue());
+ current->add(undefined);
+ current->push(undefined);
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineAssertFloat32(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 2)
+ return InliningStatus_NotInlined;
+
+ MDefinition* secondArg = callInfo.getArg(1);
+
+ MOZ_ASSERT(secondArg->type() == MIRType::Boolean);
+ MOZ_ASSERT(secondArg->isConstant());
+
+ bool mustBeFloat32 = secondArg->toConstant()->toBoolean();
+ current->add(MAssertFloat32::New(alloc(), callInfo.getArg(0), mustBeFloat32));
+
+ MConstant* undefined = MConstant::New(alloc(), UndefinedValue());
+ current->add(undefined);
+ current->push(undefined);
+ callInfo.setImplicitlyUsedUnchecked();
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineAssertRecoveredOnBailout(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 2)
+ return InliningStatus_NotInlined;
+
+ // Don't assert for recovered instructions when recovering is disabled.
+ if (JitOptions.disableRecoverIns)
+ return InliningStatus_NotInlined;
+
+ if (JitOptions.checkRangeAnalysis) {
+ // If we are checking the range of all instructions, then the guards
+ // inserted by Range Analysis prevent the use of recover
+ // instruction. Thus, we just disable these checks.
+ current->push(constant(UndefinedValue()));
+ callInfo.setImplicitlyUsedUnchecked();
+ return InliningStatus_Inlined;
+ }
+
+ MDefinition* secondArg = callInfo.getArg(1);
+
+ MOZ_ASSERT(secondArg->type() == MIRType::Boolean);
+ MOZ_ASSERT(secondArg->isConstant());
+
+ bool mustBeRecovered = secondArg->toConstant()->toBoolean();
+ MAssertRecoveredOnBailout* assert =
+ MAssertRecoveredOnBailout::New(alloc(), callInfo.getArg(0), mustBeRecovered);
+ current->add(assert);
+ current->push(assert);
+
+ // Create an instruction sequence which implies that the argument of the
+ // assertRecoveredOnBailout function would be encoded at least in one
+ // Snapshot.
+ MNop* nop = MNop::New(alloc());
+ current->add(nop);
+ if (!resumeAfter(nop))
+ return InliningStatus_Error;
+ current->add(MEncodeSnapshot::New(alloc()));
+
+ current->pop();
+ current->push(constant(UndefinedValue()));
+ callInfo.setImplicitlyUsedUnchecked();
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineAtomicsCompareExchange(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 4 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ // These guards are desirable here and in subsequent atomics to
+ // avoid bad bailouts with MTruncateToInt32, see https://bugzilla.mozilla.org/show_bug.cgi?id=1141986#c20.
+ MDefinition* oldval = callInfo.getArg(2);
+ if (oldval->mightBeType(MIRType::Object) || oldval->mightBeType(MIRType::Symbol))
+ return InliningStatus_NotInlined;
+
+ MDefinition* newval = callInfo.getArg(3);
+ if (newval->mightBeType(MIRType::Object) || newval->mightBeType(MIRType::Symbol))
+ return InliningStatus_NotInlined;
+
+ Scalar::Type arrayType;
+ bool requiresCheck = false;
+ if (!atomicsMeetsPreconditions(callInfo, &arrayType, &requiresCheck))
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MInstruction* elements;
+ MDefinition* index;
+ atomicsCheckBounds(callInfo, &elements, &index);
+
+ if (requiresCheck)
+ addSharedTypedArrayGuard(callInfo.getArg(0));
+
+ MCompareExchangeTypedArrayElement* cas =
+ MCompareExchangeTypedArrayElement::New(alloc(), elements, index, arrayType, oldval, newval);
+ cas->setResultType(getInlineReturnType());
+ current->add(cas);
+ current->push(cas);
+
+ if (!resumeAfter(cas))
+ return InliningStatus_Error;
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineAtomicsExchange(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 3 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MDefinition* value = callInfo.getArg(2);
+ if (value->mightBeType(MIRType::Object) || value->mightBeType(MIRType::Symbol))
+ return InliningStatus_NotInlined;
+
+ Scalar::Type arrayType;
+ bool requiresCheck = false;
+ if (!atomicsMeetsPreconditions(callInfo, &arrayType, &requiresCheck))
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MInstruction* elements;
+ MDefinition* index;
+ atomicsCheckBounds(callInfo, &elements, &index);
+
+ if (requiresCheck)
+ addSharedTypedArrayGuard(callInfo.getArg(0));
+
+ MInstruction* exchange =
+ MAtomicExchangeTypedArrayElement::New(alloc(), elements, index, value, arrayType);
+ exchange->setResultType(getInlineReturnType());
+ current->add(exchange);
+ current->push(exchange);
+
+ if (!resumeAfter(exchange))
+ return InliningStatus_Error;
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineAtomicsLoad(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 2 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ Scalar::Type arrayType;
+ bool requiresCheck = false;
+ if (!atomicsMeetsPreconditions(callInfo, &arrayType, &requiresCheck))
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MInstruction* elements;
+ MDefinition* index;
+ atomicsCheckBounds(callInfo, &elements, &index);
+
+ if (requiresCheck)
+ addSharedTypedArrayGuard(callInfo.getArg(0));
+
+ MLoadUnboxedScalar* load =
+ MLoadUnboxedScalar::New(alloc(), elements, index, arrayType,
+ DoesRequireMemoryBarrier);
+ load->setResultType(getInlineReturnType());
+ current->add(load);
+ current->push(load);
+
+ // Loads are considered effectful (they execute a memory barrier).
+ if (!resumeAfter(load))
+ return InliningStatus_Error;
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineAtomicsStore(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 3 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ // Atomics.store() is annoying because it returns the result of converting
+ // the value by ToInteger(), not the input value, nor the result of
+ // converting the value by ToInt32(). It is especially annoying because
+ // almost nobody uses the result value.
+ //
+ // As an expedient compromise, therefore, we inline only if the result is
+ // obviously unused or if the argument is already Int32 and thus requires no
+ // conversion.
+
+ MDefinition* value = callInfo.getArg(2);
+ if (!BytecodeIsPopped(pc) && value->type() != MIRType::Int32) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadType);
+ return InliningStatus_NotInlined;
+ }
+
+ if (value->mightBeType(MIRType::Object) || value->mightBeType(MIRType::Symbol))
+ return InliningStatus_NotInlined;
+
+ Scalar::Type arrayType;
+ bool requiresCheck = false;
+ if (!atomicsMeetsPreconditions(callInfo, &arrayType, &requiresCheck, DontCheckAtomicResult))
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MInstruction* elements;
+ MDefinition* index;
+ atomicsCheckBounds(callInfo, &elements, &index);
+
+ if (requiresCheck)
+ addSharedTypedArrayGuard(callInfo.getArg(0));
+
+ MDefinition* toWrite = value;
+ if (toWrite->type() != MIRType::Int32) {
+ toWrite = MTruncateToInt32::New(alloc(), toWrite);
+ current->add(toWrite->toInstruction());
+ }
+ MStoreUnboxedScalar* store =
+ MStoreUnboxedScalar::New(alloc(), elements, index, toWrite, arrayType,
+ MStoreUnboxedScalar::TruncateInput, DoesRequireMemoryBarrier);
+ current->add(store);
+ current->push(value); // Either Int32 or not used; in either case correct
+
+ if (!resumeAfter(store))
+ return InliningStatus_Error;
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineAtomicsBinop(CallInfo& callInfo, InlinableNative target)
+{
+ if (callInfo.argc() != 3 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MDefinition* value = callInfo.getArg(2);
+ if (value->mightBeType(MIRType::Object) || value->mightBeType(MIRType::Symbol))
+ return InliningStatus_NotInlined;
+
+ Scalar::Type arrayType;
+ bool requiresCheck = false;
+ if (!atomicsMeetsPreconditions(callInfo, &arrayType, &requiresCheck))
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ if (requiresCheck)
+ addSharedTypedArrayGuard(callInfo.getArg(0));
+
+ MInstruction* elements;
+ MDefinition* index;
+ atomicsCheckBounds(callInfo, &elements, &index);
+
+ AtomicOp k = AtomicFetchAddOp;
+ switch (target) {
+ case InlinableNative::AtomicsAdd:
+ k = AtomicFetchAddOp;
+ break;
+ case InlinableNative::AtomicsSub:
+ k = AtomicFetchSubOp;
+ break;
+ case InlinableNative::AtomicsAnd:
+ k = AtomicFetchAndOp;
+ break;
+ case InlinableNative::AtomicsOr:
+ k = AtomicFetchOrOp;
+ break;
+ case InlinableNative::AtomicsXor:
+ k = AtomicFetchXorOp;
+ break;
+ default:
+ MOZ_CRASH("Bad atomic operation");
+ }
+
+ MAtomicTypedArrayElementBinop* binop =
+ MAtomicTypedArrayElementBinop::New(alloc(), k, elements, index, arrayType, value);
+ binop->setResultType(getInlineReturnType());
+ current->add(binop);
+ current->push(binop);
+
+ if (!resumeAfter(binop))
+ return InliningStatus_Error;
+
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineAtomicsIsLockFree(CallInfo& callInfo)
+{
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MAtomicIsLockFree* ilf =
+ MAtomicIsLockFree::New(alloc(), callInfo.getArg(0));
+ current->add(ilf);
+ current->push(ilf);
+
+ return InliningStatus_Inlined;
+}
+
+bool
+IonBuilder::atomicsMeetsPreconditions(CallInfo& callInfo, Scalar::Type* arrayType,
+ bool* requiresTagCheck, AtomicCheckResult checkResult)
+{
+ if (!JitSupportsAtomics())
+ return false;
+
+ if (callInfo.getArg(0)->type() != MIRType::Object)
+ return false;
+
+ if (callInfo.getArg(1)->type() != MIRType::Int32)
+ return false;
+
+ // Ensure that the first argument is a TypedArray that maps shared
+ // memory.
+ //
+ // Then check both that the element type is something we can
+ // optimize and that the return type is suitable for that element
+ // type.
+
+ TemporaryTypeSet* arg0Types = callInfo.getArg(0)->resultTypeSet();
+ if (!arg0Types)
+ return false;
+
+ TemporaryTypeSet::TypedArraySharedness sharedness;
+ *arrayType = arg0Types->getTypedArrayType(constraints(), &sharedness);
+ *requiresTagCheck = sharedness != TemporaryTypeSet::KnownShared;
+ switch (*arrayType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ return checkResult == DontCheckAtomicResult || getInlineReturnType() == MIRType::Int32;
+ case Scalar::Uint32:
+ // Bug 1077305: it would be attractive to allow inlining even
+ // if the inline return type is Int32, which it will frequently
+ // be.
+ return checkResult == DontCheckAtomicResult || getInlineReturnType() == MIRType::Double;
+ default:
+ // Excludes floating types and Uint8Clamped.
+ return false;
+ }
+}
+
+void
+IonBuilder::atomicsCheckBounds(CallInfo& callInfo, MInstruction** elements, MDefinition** index)
+{
+ // Perform bounds checking and extract the elements vector.
+ MDefinition* obj = callInfo.getArg(0);
+ MInstruction* length = nullptr;
+ *index = callInfo.getArg(1);
+ *elements = nullptr;
+ addTypedArrayLengthAndData(obj, DoBoundsCheck, index, &length, elements);
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineIsConstructing(CallInfo& callInfo)
+{
+ MOZ_ASSERT(!callInfo.constructing());
+ MOZ_ASSERT(callInfo.argc() == 0);
+ MOZ_ASSERT(script()->functionNonDelazifying(),
+ "isConstructing() should only be called in function scripts");
+
+ if (getInlineReturnType() != MIRType::Boolean)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ if (inliningDepth_ == 0) {
+ MInstruction* ins = MIsConstructing::New(alloc());
+ current->add(ins);
+ current->push(ins);
+ return InliningStatus_Inlined;
+ }
+
+ bool constructing = inlineCallInfo_->constructing();
+ pushConstant(BooleanValue(constructing));
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineConstructTypedObject(CallInfo& callInfo, TypeDescr* descr)
+{
+ // Only inline default constructors for now.
+ if (callInfo.argc() != 0) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ if (size_t(descr->size()) > InlineTypedObject::MaximumSize)
+ return InliningStatus_NotInlined;
+
+ JSObject* obj = inspector->getTemplateObjectForClassHook(pc, descr->getClass());
+ if (!obj || !obj->is<InlineTypedObject>())
+ return InliningStatus_NotInlined;
+
+ InlineTypedObject* templateObject = &obj->as<InlineTypedObject>();
+ if (&templateObject->typeDescr() != descr)
+ return InliningStatus_NotInlined;
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ MNewTypedObject* ins = MNewTypedObject::New(alloc(), constraints(), templateObject,
+ templateObject->group()->initialHeap(constraints()));
+ current->add(ins);
+ current->push(ins);
+
+ return InliningStatus_Inlined;
+}
+
+// Main entry point for SIMD inlining.
+// When the controlling simdType is an integer type, sign indicates whether the lanes should
+// be treated as signed or unsigned integers.
+IonBuilder::InliningStatus
+IonBuilder::inlineSimd(CallInfo& callInfo, JSFunction* target, SimdType type)
+{
+ if (!JitSupportsSimd()) {
+ trackOptimizationOutcome(TrackedOutcome::NoSimdJitSupport);
+ return InliningStatus_NotInlined;
+ }
+
+ JSNative native = target->native();
+ const JSJitInfo* jitInfo = target->jitInfo();
+ MOZ_ASSERT(jitInfo && jitInfo->type() == JSJitInfo::InlinableNative);
+ SimdOperation simdOp = SimdOperation(jitInfo->nativeOp);
+
+ switch(simdOp) {
+ case SimdOperation::Constructor:
+ // SIMD constructor calls are handled via inlineNonFunctionCall(), so
+ // they won't show up here where target is required to be a JSFunction.
+ // See also inlineConstructSimdObject().
+ MOZ_CRASH("SIMD constructor call not expected.");
+ case SimdOperation::Fn_check:
+ return inlineSimdCheck(callInfo, native, type);
+ case SimdOperation::Fn_splat:
+ return inlineSimdSplat(callInfo, native, type);
+ case SimdOperation::Fn_extractLane:
+ return inlineSimdExtractLane(callInfo, native, type);
+ case SimdOperation::Fn_replaceLane:
+ return inlineSimdReplaceLane(callInfo, native, type);
+ case SimdOperation::Fn_select:
+ return inlineSimdSelect(callInfo, native, type);
+ case SimdOperation::Fn_swizzle:
+ return inlineSimdShuffle(callInfo, native, type, 1);
+ case SimdOperation::Fn_shuffle:
+ return inlineSimdShuffle(callInfo, native, type, 2);
+
+ // Unary arithmetic.
+ case SimdOperation::Fn_abs:
+ return inlineSimdUnary(callInfo, native, MSimdUnaryArith::abs, type);
+ case SimdOperation::Fn_neg:
+ return inlineSimdUnary(callInfo, native, MSimdUnaryArith::neg, type);
+ case SimdOperation::Fn_not:
+ return inlineSimdUnary(callInfo, native, MSimdUnaryArith::not_, type);
+ case SimdOperation::Fn_reciprocalApproximation:
+ return inlineSimdUnary(callInfo, native, MSimdUnaryArith::reciprocalApproximation,
+ type);
+ case SimdOperation::Fn_reciprocalSqrtApproximation:
+ return inlineSimdUnary(callInfo, native, MSimdUnaryArith::reciprocalSqrtApproximation,
+ type);
+ case SimdOperation::Fn_sqrt:
+ return inlineSimdUnary(callInfo, native, MSimdUnaryArith::sqrt, type);
+
+ // Binary arithmetic.
+ case SimdOperation::Fn_add:
+ return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_add, type);
+ case SimdOperation::Fn_sub:
+ return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_sub, type);
+ case SimdOperation::Fn_mul:
+ return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_mul, type);
+ case SimdOperation::Fn_div:
+ return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_div, type);
+ case SimdOperation::Fn_max:
+ return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_max, type);
+ case SimdOperation::Fn_min:
+ return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_min, type);
+ case SimdOperation::Fn_maxNum:
+ return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_maxNum, type);
+ case SimdOperation::Fn_minNum:
+ return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_minNum, type);
+
+ // Binary saturating.
+ case SimdOperation::Fn_addSaturate:
+ return inlineSimdBinarySaturating(callInfo, native, MSimdBinarySaturating::add, type);
+ case SimdOperation::Fn_subSaturate:
+ return inlineSimdBinarySaturating(callInfo, native, MSimdBinarySaturating::sub, type);
+
+ // Binary bitwise.
+ case SimdOperation::Fn_and:
+ return inlineSimdBinaryBitwise(callInfo, native, MSimdBinaryBitwise::and_, type);
+ case SimdOperation::Fn_or:
+ return inlineSimdBinaryBitwise(callInfo, native, MSimdBinaryBitwise::or_, type);
+ case SimdOperation::Fn_xor:
+ return inlineSimdBinaryBitwise(callInfo, native, MSimdBinaryBitwise::xor_, type);
+
+ // Shifts.
+ case SimdOperation::Fn_shiftLeftByScalar:
+ return inlineSimdShift(callInfo, native, MSimdShift::lsh, type);
+ case SimdOperation::Fn_shiftRightByScalar:
+ return inlineSimdShift(callInfo, native, MSimdShift::rshForSign(GetSimdSign(type)), type);
+
+ // Boolean unary.
+ case SimdOperation::Fn_allTrue:
+ return inlineSimdAnyAllTrue(callInfo, /* IsAllTrue= */true, native, type);
+ case SimdOperation::Fn_anyTrue:
+ return inlineSimdAnyAllTrue(callInfo, /* IsAllTrue= */false, native, type);
+
+ // Comparisons.
+ case SimdOperation::Fn_lessThan:
+ return inlineSimdComp(callInfo, native, MSimdBinaryComp::lessThan, type);
+ case SimdOperation::Fn_lessThanOrEqual:
+ return inlineSimdComp(callInfo, native, MSimdBinaryComp::lessThanOrEqual, type);
+ case SimdOperation::Fn_equal:
+ return inlineSimdComp(callInfo, native, MSimdBinaryComp::equal, type);
+ case SimdOperation::Fn_notEqual:
+ return inlineSimdComp(callInfo, native, MSimdBinaryComp::notEqual, type);
+ case SimdOperation::Fn_greaterThan:
+ return inlineSimdComp(callInfo, native, MSimdBinaryComp::greaterThan, type);
+ case SimdOperation::Fn_greaterThanOrEqual:
+ return inlineSimdComp(callInfo, native, MSimdBinaryComp::greaterThanOrEqual, type);
+
+ // Int <-> Float conversions.
+ case SimdOperation::Fn_fromInt32x4:
+ return inlineSimdConvert(callInfo, native, false, SimdType::Int32x4, type);
+ case SimdOperation::Fn_fromUint32x4:
+ return inlineSimdConvert(callInfo, native, false, SimdType::Uint32x4, type);
+ case SimdOperation::Fn_fromFloat32x4:
+ return inlineSimdConvert(callInfo, native, false, SimdType::Float32x4, type);
+
+ // Load/store.
+ case SimdOperation::Fn_load:
+ return inlineSimdLoad(callInfo, native, type, GetSimdLanes(type));
+ case SimdOperation::Fn_load1:
+ return inlineSimdLoad(callInfo, native, type, 1);
+ case SimdOperation::Fn_load2:
+ return inlineSimdLoad(callInfo, native, type, 2);
+ case SimdOperation::Fn_load3:
+ return inlineSimdLoad(callInfo, native, type, 3);
+ case SimdOperation::Fn_store:
+ return inlineSimdStore(callInfo, native, type, GetSimdLanes(type));
+ case SimdOperation::Fn_store1:
+ return inlineSimdStore(callInfo, native, type, 1);
+ case SimdOperation::Fn_store2:
+ return inlineSimdStore(callInfo, native, type, 2);
+ case SimdOperation::Fn_store3:
+ return inlineSimdStore(callInfo, native, type, 3);
+
+ // Bitcasts. One for each type with a memory representation.
+ case SimdOperation::Fn_fromInt32x4Bits:
+ return inlineSimdConvert(callInfo, native, true, SimdType::Int32x4, type);
+ case SimdOperation::Fn_fromUint32x4Bits:
+ return inlineSimdConvert(callInfo, native, true, SimdType::Uint32x4, type);
+ case SimdOperation::Fn_fromInt16x8Bits:
+ return inlineSimdConvert(callInfo, native, true, SimdType::Int16x8, type);
+ case SimdOperation::Fn_fromUint16x8Bits:
+ return inlineSimdConvert(callInfo, native, true, SimdType::Uint16x8, type);
+ case SimdOperation::Fn_fromInt8x16Bits:
+ return inlineSimdConvert(callInfo, native, true, SimdType::Int8x16, type);
+ case SimdOperation::Fn_fromUint8x16Bits:
+ return inlineSimdConvert(callInfo, native, true, SimdType::Uint8x16, type);
+ case SimdOperation::Fn_fromFloat32x4Bits:
+ return inlineSimdConvert(callInfo, native, true, SimdType::Float32x4, type);
+ case SimdOperation::Fn_fromFloat64x2Bits:
+ return InliningStatus_NotInlined;
+ }
+
+ MOZ_CRASH("Unexpected SIMD opcode");
+}
+
+// The representation of boolean SIMD vectors is the same as the corresponding
+// integer SIMD vectors with -1 lanes meaning true and 0 lanes meaning false.
+//
+// Functions that set the value of a boolean vector lane work by applying
+// ToBoolean on the input argument, so they accept any argument type, just like
+// the MNot and MTest instructions.
+//
+// Convert any scalar value into an appropriate SIMD lane value: An Int32 value
+// that is either 0 for false or -1 for true.
+MDefinition*
+IonBuilder::convertToBooleanSimdLane(MDefinition* scalar)
+{
+ MSub* result;
+
+ if (scalar->type() == MIRType::Boolean) {
+ // The input scalar is already a boolean with the int32 values 0 / 1.
+ // Compute result = 0 - scalar.
+ result = MSub::New(alloc(), constant(Int32Value(0)), scalar);
+ } else {
+ // For any other type, let MNot handle the conversion to boolean.
+ // Compute result = !scalar - 1.
+ MNot* inv = MNot::New(alloc(), scalar);
+ current->add(inv);
+ result = MSub::New(alloc(), inv, constant(Int32Value(1)));
+ }
+
+ result->setInt32Specialization();
+ current->add(result);
+ return result;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineConstructSimdObject(CallInfo& callInfo, SimdTypeDescr* descr)
+{
+ if (!JitSupportsSimd()) {
+ trackOptimizationOutcome(TrackedOutcome::NoSimdJitSupport);
+ return InliningStatus_NotInlined;
+ }
+
+ // Generic constructor of SIMD valuesX4.
+ MIRType simdType;
+ if (!MaybeSimdTypeToMIRType(descr->type(), &simdType)) {
+ trackOptimizationOutcome(TrackedOutcome::SimdTypeNotOptimized);
+ return InliningStatus_NotInlined;
+ }
+
+ // Take the templateObject out of Baseline ICs, such that we can box
+ // SIMD value type in the same kind of objects.
+ MOZ_ASSERT(size_t(descr->size(descr->type())) < InlineTypedObject::MaximumSize);
+ MOZ_ASSERT(descr->getClass() == &SimdTypeDescr::class_,
+ "getTemplateObjectForSimdCtor needs an update");
+
+ JSObject* templateObject = inspector->getTemplateObjectForSimdCtor(pc, descr->type());
+ if (!templateObject)
+ return InliningStatus_NotInlined;
+
+ // The previous assertion ensures this will never fail if we were able to
+ // allocate a templateObject in Baseline.
+ InlineTypedObject* inlineTypedObject = &templateObject->as<InlineTypedObject>();
+ MOZ_ASSERT(&inlineTypedObject->typeDescr() == descr);
+
+ // When there are missing arguments, provide a default value
+ // containing the coercion of 'undefined' to the right type.
+ MConstant* defVal = nullptr;
+ MIRType laneType = SimdTypeToLaneType(simdType);
+ unsigned lanes = SimdTypeToLength(simdType);
+ if (lanes != 4 || callInfo.argc() < lanes) {
+ if (laneType == MIRType::Int32 || laneType == MIRType::Boolean) {
+ // The default lane for a boolean vector is |false|, but
+ // |MSimdSplat|, |MSimdValueX4|, and |MSimdInsertElement| all
+ // require an Int32 argument with the value 0 or 01 to initialize a
+ // boolean lane. See also convertToBooleanSimdLane() which is
+ // idempotent with a 0 argument after constant folding.
+ defVal = constant(Int32Value(0));
+ } else if (laneType == MIRType::Double) {
+ defVal = constant(DoubleNaNValue());
+ } else {
+ MOZ_ASSERT(laneType == MIRType::Float32);
+ defVal = MConstant::NewFloat32(alloc(), JS::GenericNaN());
+ current->add(defVal);
+ }
+ }
+
+ MInstruction *values = nullptr;
+
+ // Use the MSimdValueX4 constructor for X4 vectors.
+ if (lanes == 4) {
+ MDefinition* lane[4];
+ for (unsigned i = 0; i < 4; i++)
+ lane[i] = callInfo.getArgWithDefault(i, defVal);
+
+ // Convert boolean lanes into Int32 0 / -1.
+ if (laneType == MIRType::Boolean) {
+ for (unsigned i = 0; i < 4; i++)
+ lane[i] = convertToBooleanSimdLane(lane[i]);
+ }
+
+ values = MSimdValueX4::New(alloc(), simdType, lane[0], lane[1], lane[2], lane[3]);
+ current->add(values);
+ } else {
+ // For general constructor calls, start from splat(defVal), insert one
+ // lane at a time.
+ values = MSimdSplat::New(alloc(), defVal, simdType);
+ current->add(values);
+
+ // Stop early if constructor doesn't have enough arguments. These lanes
+ // then get the default value.
+ if (callInfo.argc() < lanes)
+ lanes = callInfo.argc();
+
+ for (unsigned i = 0; i < lanes; i++) {
+ MDefinition* lane = callInfo.getArg(i);
+ if (laneType == MIRType::Boolean)
+ lane = convertToBooleanSimdLane(lane);
+ values = MSimdInsertElement::New(alloc(), values, lane, i);
+ current->add(values);
+ }
+ }
+
+ MSimdBox* obj = MSimdBox::New(alloc(), constraints(), values, inlineTypedObject, descr->type(),
+ inlineTypedObject->group()->initialHeap(constraints()));
+ current->add(obj);
+ current->push(obj);
+
+ callInfo.setImplicitlyUsedUnchecked();
+ return InliningStatus_Inlined;
+}
+
+bool
+IonBuilder::canInlineSimd(CallInfo& callInfo, JSNative native, unsigned numArgs,
+ InlineTypedObject** templateObj)
+{
+ if (callInfo.argc() != numArgs)
+ return false;
+
+ JSObject* templateObject = inspector->getTemplateObjectForNative(pc, native);
+ if (!templateObject)
+ return false;
+
+ *templateObj = &templateObject->as<InlineTypedObject>();
+ return true;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineSimdCheck(CallInfo& callInfo, JSNative native, SimdType type)
+{
+ InlineTypedObject* templateObj = nullptr;
+ if (!canInlineSimd(callInfo, native, 1, &templateObj))
+ return InliningStatus_NotInlined;
+
+ // Unboxing checks the SIMD object type and throws a TypeError if it doesn't
+ // match type.
+ MDefinition *arg = unboxSimd(callInfo.getArg(0), type);
+
+ // Create an unbox/box pair, expecting the box to be optimized away if
+ // anyone use the return value from this check() call. This is what you want
+ // for code like this:
+ //
+ // function f(x) {
+ // x = Int32x4.check(x)
+ // for(...) {
+ // y = Int32x4.add(x, ...)
+ // }
+ //
+ // The unboxing of x happens as early as possible, and only once.
+ return boxSimd(callInfo, arg, templateObj);
+}
+
+// Given a value or object, insert a dynamic check that this is a SIMD object of
+// the required SimdType, and unbox it into the corresponding SIMD MIRType.
+//
+// This represents the standard type checking that all the SIMD operations
+// perform on their arguments.
+MDefinition*
+IonBuilder::unboxSimd(MDefinition* ins, SimdType type)
+{
+ // Trivial optimization: If ins is a MSimdBox of the same SIMD type, there
+ // is no way the unboxing could fail, and we can skip it altogether.
+ // This is the same thing MSimdUnbox::foldsTo() does, but we can save the
+ // memory allocation here.
+ if (ins->isSimdBox()) {
+ MSimdBox* box = ins->toSimdBox();
+ if (box->simdType() == type) {
+ MDefinition* value = box->input();
+ MOZ_ASSERT(value->type() == SimdTypeToMIRType(type));
+ return value;
+ }
+ }
+
+ MSimdUnbox* unbox = MSimdUnbox::New(alloc(), ins, type);
+ current->add(unbox);
+ return unbox;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::boxSimd(CallInfo& callInfo, MDefinition* ins, InlineTypedObject* templateObj)
+{
+ SimdType simdType = templateObj->typeDescr().as<SimdTypeDescr>().type();
+ MSimdBox* obj = MSimdBox::New(alloc(), constraints(), ins, templateObj, simdType,
+ templateObj->group()->initialHeap(constraints()));
+
+ // In some cases, ins has already been added to current.
+ if (!ins->block() && ins->isInstruction())
+ current->add(ins->toInstruction());
+ current->add(obj);
+ current->push(obj);
+
+ callInfo.setImplicitlyUsedUnchecked();
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineSimdBinaryArith(CallInfo& callInfo, JSNative native,
+ MSimdBinaryArith::Operation op, SimdType type)
+{
+ InlineTypedObject* templateObj = nullptr;
+ if (!canInlineSimd(callInfo, native, 2, &templateObj))
+ return InliningStatus_NotInlined;
+
+ MDefinition* lhs = unboxSimd(callInfo.getArg(0), type);
+ MDefinition* rhs = unboxSimd(callInfo.getArg(1), type);
+
+ auto* ins = MSimdBinaryArith::AddLegalized(alloc(), current, lhs, rhs, op);
+ return boxSimd(callInfo, ins, templateObj);
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineSimdBinaryBitwise(CallInfo& callInfo, JSNative native,
+ MSimdBinaryBitwise::Operation op, SimdType type)
+{
+ InlineTypedObject* templateObj = nullptr;
+ if (!canInlineSimd(callInfo, native, 2, &templateObj))
+ return InliningStatus_NotInlined;
+
+ MDefinition* lhs = unboxSimd(callInfo.getArg(0), type);
+ MDefinition* rhs = unboxSimd(callInfo.getArg(1), type);
+
+ auto* ins = MSimdBinaryBitwise::New(alloc(), lhs, rhs, op);
+ return boxSimd(callInfo, ins, templateObj);
+}
+
+// Inline a binary SIMD operation where both arguments are SIMD types.
+IonBuilder::InliningStatus
+IonBuilder::inlineSimdBinarySaturating(CallInfo& callInfo, JSNative native,
+ MSimdBinarySaturating::Operation op, SimdType type)
+{
+ InlineTypedObject* templateObj = nullptr;
+ if (!canInlineSimd(callInfo, native, 2, &templateObj))
+ return InliningStatus_NotInlined;
+
+ MDefinition* lhs = unboxSimd(callInfo.getArg(0), type);
+ MDefinition* rhs = unboxSimd(callInfo.getArg(1), type);
+
+ MSimdBinarySaturating* ins =
+ MSimdBinarySaturating::New(alloc(), lhs, rhs, op, GetSimdSign(type));
+ return boxSimd(callInfo, ins, templateObj);
+}
+
+// Inline a SIMD shiftByScalar operation.
+IonBuilder::InliningStatus
+IonBuilder::inlineSimdShift(CallInfo& callInfo, JSNative native, MSimdShift::Operation op,
+ SimdType type)
+{
+ InlineTypedObject* templateObj = nullptr;
+ if (!canInlineSimd(callInfo, native, 2, &templateObj))
+ return InliningStatus_NotInlined;
+
+ MDefinition* vec = unboxSimd(callInfo.getArg(0), type);
+
+ MInstruction* ins = MSimdShift::AddLegalized(alloc(), current, vec, callInfo.getArg(1), op);
+ return boxSimd(callInfo, ins, templateObj);
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineSimdComp(CallInfo& callInfo, JSNative native, MSimdBinaryComp::Operation op,
+ SimdType type)
+{
+ InlineTypedObject* templateObj = nullptr;
+ if (!canInlineSimd(callInfo, native, 2, &templateObj))
+ return InliningStatus_NotInlined;
+
+ MDefinition* lhs = unboxSimd(callInfo.getArg(0), type);
+ MDefinition* rhs = unboxSimd(callInfo.getArg(1), type);
+ MInstruction* ins =
+ MSimdBinaryComp::AddLegalized(alloc(), current, lhs, rhs, op, GetSimdSign(type));
+ return boxSimd(callInfo, ins, templateObj);
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineSimdUnary(CallInfo& callInfo, JSNative native, MSimdUnaryArith::Operation op,
+ SimdType type)
+{
+ InlineTypedObject* templateObj = nullptr;
+ if (!canInlineSimd(callInfo, native, 1, &templateObj))
+ return InliningStatus_NotInlined;
+
+ MDefinition* arg = unboxSimd(callInfo.getArg(0), type);
+
+ MSimdUnaryArith* ins = MSimdUnaryArith::New(alloc(), arg, op);
+ return boxSimd(callInfo, ins, templateObj);
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineSimdSplat(CallInfo& callInfo, JSNative native, SimdType type)
+{
+ InlineTypedObject* templateObj = nullptr;
+ if (!canInlineSimd(callInfo, native, 1, &templateObj))
+ return InliningStatus_NotInlined;
+
+ MIRType mirType = SimdTypeToMIRType(type);
+ MDefinition* arg = callInfo.getArg(0);
+
+ // Convert to 0 / -1 before splatting a boolean lane.
+ if (SimdTypeToLaneType(mirType) == MIRType::Boolean)
+ arg = convertToBooleanSimdLane(arg);
+
+ MSimdSplat* ins = MSimdSplat::New(alloc(), arg, mirType);
+ return boxSimd(callInfo, ins, templateObj);
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineSimdExtractLane(CallInfo& callInfo, JSNative native, SimdType type)
+{
+ // extractLane() returns a scalar, so don't use canInlineSimd() which looks
+ // for a template object.
+ if (callInfo.argc() != 2 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ // Lane index.
+ MDefinition* arg = callInfo.getArg(1);
+ if (!arg->isConstant() || arg->type() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+ unsigned lane = arg->toConstant()->toInt32();
+ if (lane >= GetSimdLanes(type))
+ return InliningStatus_NotInlined;
+
+ // Original vector.
+ MDefinition* orig = unboxSimd(callInfo.getArg(0), type);
+ MIRType vecType = orig->type();
+ MIRType laneType = SimdTypeToLaneType(vecType);
+ SimdSign sign = GetSimdSign(type);
+
+ // An Uint32 lane can't be represented in MIRType::Int32. Get it as a double.
+ if (type == SimdType::Uint32x4)
+ laneType = MIRType::Double;
+
+ MSimdExtractElement* ins =
+ MSimdExtractElement::New(alloc(), orig, laneType, lane, sign);
+ current->add(ins);
+ current->push(ins);
+ callInfo.setImplicitlyUsedUnchecked();
+ return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineSimdReplaceLane(CallInfo& callInfo, JSNative native, SimdType type)
+{
+ InlineTypedObject* templateObj = nullptr;
+ if (!canInlineSimd(callInfo, native, 3, &templateObj))
+ return InliningStatus_NotInlined;
+
+ // Lane index.
+ MDefinition* arg = callInfo.getArg(1);
+ if (!arg->isConstant() || arg->type() != MIRType::Int32)
+ return InliningStatus_NotInlined;
+
+ unsigned lane = arg->toConstant()->toInt32();
+ if (lane >= GetSimdLanes(type))
+ return InliningStatus_NotInlined;
+
+ // Original vector.
+ MDefinition* orig = unboxSimd(callInfo.getArg(0), type);
+ MIRType vecType = orig->type();
+
+ // Convert to 0 / -1 before inserting a boolean lane.
+ MDefinition* value = callInfo.getArg(2);
+ if (SimdTypeToLaneType(vecType) == MIRType::Boolean)
+ value = convertToBooleanSimdLane(value);
+
+ MSimdInsertElement* ins = MSimdInsertElement::New(alloc(), orig, value, lane);
+ return boxSimd(callInfo, ins, templateObj);
+}
+
+// Inline a SIMD conversion or bitcast. When isCast==false, one of the types
+// must be floating point and the other integer. In this case, sign indicates if
+// the integer lanes should be treated as signed or unsigned integers.
+IonBuilder::InliningStatus
+IonBuilder::inlineSimdConvert(CallInfo& callInfo, JSNative native, bool isCast, SimdType fromType,
+ SimdType toType)
+{
+ InlineTypedObject* templateObj = nullptr;
+ if (!canInlineSimd(callInfo, native, 1, &templateObj))
+ return InliningStatus_NotInlined;
+
+ MDefinition* arg = unboxSimd(callInfo.getArg(0), fromType);
+ MIRType mirType = SimdTypeToMIRType(toType);
+
+ MInstruction* ins;
+ if (isCast) {
+ // Signed/Unsigned doesn't matter for bitcasts.
+ ins = MSimdReinterpretCast::New(alloc(), arg, mirType);
+ } else {
+ // Exactly one of fromType, toType must be an integer type.
+ SimdSign sign = GetSimdSign(fromType);
+ if (sign == SimdSign::NotApplicable)
+ sign = GetSimdSign(toType);
+
+ // Possibly expand into multiple instructions.
+ ins = MSimdConvert::AddLegalized(alloc(), current, arg, mirType, sign);
+ }
+
+ return boxSimd(callInfo, ins, templateObj);
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineSimdSelect(CallInfo& callInfo, JSNative native, SimdType type)
+{
+ InlineTypedObject* templateObj = nullptr;
+ if (!canInlineSimd(callInfo, native, 3, &templateObj))
+ return InliningStatus_NotInlined;
+
+ MDefinition* mask = unboxSimd(callInfo.getArg(0), GetBooleanSimdType(type));
+ MDefinition* tval = unboxSimd(callInfo.getArg(1), type);
+ MDefinition* fval = unboxSimd(callInfo.getArg(2), type);
+
+ MSimdSelect* ins = MSimdSelect::New(alloc(), mask, tval, fval);
+ return boxSimd(callInfo, ins, templateObj);
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineSimdShuffle(CallInfo& callInfo, JSNative native, SimdType type,
+ unsigned numVectors)
+{
+ unsigned numLanes = GetSimdLanes(type);
+ InlineTypedObject* templateObj = nullptr;
+ if (!canInlineSimd(callInfo, native, numVectors + numLanes, &templateObj))
+ return InliningStatus_NotInlined;
+
+ MIRType mirType = SimdTypeToMIRType(type);
+
+ MSimdGeneralShuffle* ins = MSimdGeneralShuffle::New(alloc(), numVectors, numLanes, mirType);
+
+ if (!ins->init(alloc()))
+ return InliningStatus_Error;
+
+ for (unsigned i = 0; i < numVectors; i++)
+ ins->setVector(i, unboxSimd(callInfo.getArg(i), type));
+ for (size_t i = 0; i < numLanes; i++)
+ ins->setLane(i, callInfo.getArg(numVectors + i));
+
+ return boxSimd(callInfo, ins, templateObj);
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineSimdAnyAllTrue(CallInfo& callInfo, bool IsAllTrue, JSNative native,
+ SimdType type)
+{
+ // anyTrue() / allTrue() return a scalar, so don't use canInlineSimd() which looks
+ // for a template object.
+ if (callInfo.argc() != 1 || callInfo.constructing()) {
+ trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+ return InliningStatus_NotInlined;
+ }
+
+ MDefinition* arg = unboxSimd(callInfo.getArg(0), type);
+
+ MUnaryInstruction* ins;
+ if (IsAllTrue)
+ ins = MSimdAllTrue::New(alloc(), arg, MIRType::Boolean);
+ else
+ ins = MSimdAnyTrue::New(alloc(), arg, MIRType::Boolean);
+
+ current->add(ins);
+ current->push(ins);
+ callInfo.setImplicitlyUsedUnchecked();
+ return InliningStatus_Inlined;
+}
+
+// Get the typed array element type corresponding to the lanes in a SIMD vector type.
+// This only applies to SIMD types that can be loaded and stored to a typed array.
+static Scalar::Type
+SimdTypeToArrayElementType(SimdType type)
+{
+ switch (type) {
+ case SimdType::Float32x4: return Scalar::Float32x4;
+ case SimdType::Int8x16:
+ case SimdType::Uint8x16: return Scalar::Int8x16;
+ case SimdType::Int16x8:
+ case SimdType::Uint16x8: return Scalar::Int16x8;
+ case SimdType::Int32x4:
+ case SimdType::Uint32x4: return Scalar::Int32x4;
+ default: MOZ_CRASH("unexpected simd type");
+ }
+}
+
+bool
+IonBuilder::prepareForSimdLoadStore(CallInfo& callInfo, Scalar::Type simdType, MInstruction** elements,
+ MDefinition** index, Scalar::Type* arrayType)
+{
+ MDefinition* array = callInfo.getArg(0);
+ *index = callInfo.getArg(1);
+
+ if (!ElementAccessIsTypedArray(constraints(), array, *index, arrayType))
+ return false;
+
+ MInstruction* indexAsInt32 = MToInt32::New(alloc(), *index);
+ current->add(indexAsInt32);
+ *index = indexAsInt32;
+
+ MDefinition* indexForBoundsCheck = *index;
+
+ // Artificially make sure the index is in bounds by adding the difference
+ // number of slots needed (e.g. reading from Float32Array we need to make
+ // sure to be in bounds for 4 slots, so add 3, etc.).
+ MOZ_ASSERT(Scalar::byteSize(simdType) % Scalar::byteSize(*arrayType) == 0);
+ int32_t suppSlotsNeeded = Scalar::byteSize(simdType) / Scalar::byteSize(*arrayType) - 1;
+ if (suppSlotsNeeded) {
+ MConstant* suppSlots = constant(Int32Value(suppSlotsNeeded));
+ MAdd* addedIndex = MAdd::New(alloc(), *index, suppSlots);
+ // We're fine even with the add overflows, as long as the generated code
+ // for the bounds check uses an unsigned comparison.
+ addedIndex->setInt32Specialization();
+ current->add(addedIndex);
+ indexForBoundsCheck = addedIndex;
+ }
+
+ MInstruction* length;
+ addTypedArrayLengthAndData(array, SkipBoundsCheck, index, &length, elements);
+
+ // It can be that the index is out of bounds, while the added index for the
+ // bounds check is in bounds, so we actually need two bounds checks here.
+ MInstruction* positiveCheck = MBoundsCheck::New(alloc(), *index, length);
+ current->add(positiveCheck);
+
+ MInstruction* fullCheck = MBoundsCheck::New(alloc(), indexForBoundsCheck, length);
+ current->add(fullCheck);
+ return true;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineSimdLoad(CallInfo& callInfo, JSNative native, SimdType type, unsigned numElems)
+{
+ InlineTypedObject* templateObj = nullptr;
+ if (!canInlineSimd(callInfo, native, 2, &templateObj))
+ return InliningStatus_NotInlined;
+
+ Scalar::Type elemType = SimdTypeToArrayElementType(type);
+
+ MDefinition* index = nullptr;
+ MInstruction* elements = nullptr;
+ Scalar::Type arrayType;
+ if (!prepareForSimdLoadStore(callInfo, elemType, &elements, &index, &arrayType))
+ return InliningStatus_NotInlined;
+
+ MLoadUnboxedScalar* load = MLoadUnboxedScalar::New(alloc(), elements, index, arrayType);
+ load->setResultType(SimdTypeToMIRType(type));
+ load->setSimdRead(elemType, numElems);
+
+ return boxSimd(callInfo, load, templateObj);
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineSimdStore(CallInfo& callInfo, JSNative native, SimdType type, unsigned numElems)
+{
+ InlineTypedObject* templateObj = nullptr;
+ if (!canInlineSimd(callInfo, native, 3, &templateObj))
+ return InliningStatus_NotInlined;
+
+ Scalar::Type elemType = SimdTypeToArrayElementType(type);
+
+ MDefinition* index = nullptr;
+ MInstruction* elements = nullptr;
+ Scalar::Type arrayType;
+ if (!prepareForSimdLoadStore(callInfo, elemType, &elements, &index, &arrayType))
+ return InliningStatus_NotInlined;
+
+ MDefinition* valueToWrite = unboxSimd(callInfo.getArg(2), type);
+ MStoreUnboxedScalar* store = MStoreUnboxedScalar::New(alloc(), elements, index,
+ valueToWrite, arrayType,
+ MStoreUnboxedScalar::TruncateInput);
+ store->setSimdWrite(elemType, numElems);
+
+ current->add(store);
+ // Produce the original boxed value as our return value.
+ // This is unlikely to be used, so don't bother reboxing valueToWrite.
+ current->push(callInfo.getArg(2));
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ if (!resumeAfter(store))
+ return InliningStatus_Error;
+
+ return InliningStatus_Inlined;
+}
+
+// Note that SIMD.cpp provides its own JSJitInfo objects for SIMD.foo.* functions.
+// The Simd* objects defined here represent SIMD.foo() constructor calls.
+// They are encoded with .nativeOp = 0. That is the sub-opcode within the SIMD type.
+static_assert(uint16_t(SimdOperation::Constructor) == 0, "Constructor opcode must be 0");
+
+#define ADD_NATIVE(native) const JSJitInfo JitInfo_##native { \
+ { nullptr }, { uint16_t(InlinableNative::native) }, { 0 }, JSJitInfo::InlinableNative };
+ INLINABLE_NATIVE_LIST(ADD_NATIVE)
+#undef ADD_NATIVE
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/MIR.cpp b/js/src/jit/MIR.cpp
new file mode 100644
index 000000000..287b87582
--- /dev/null
+++ b/js/src/jit/MIR.cpp
@@ -0,0 +1,6642 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/MIR.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/SizePrintfMacros.h"
+
+#include <ctype.h>
+
+#include "jslibmath.h"
+#include "jsstr.h"
+
+#include "jit/AtomicOperations.h"
+#include "jit/BaselineInspector.h"
+#include "jit/IonBuilder.h"
+#include "jit/JitSpewer.h"
+#include "jit/MIRGraph.h"
+#include "jit/RangeAnalysis.h"
+#include "js/Conversions.h"
+
+#include "jsatominlines.h"
+#include "jsboolinlines.h"
+#include "jsobjinlines.h"
+#include "jsscriptinlines.h"
+
+using namespace js;
+using namespace js::jit;
+
+using JS::ToInt32;
+
+using mozilla::CheckedInt;
+using mozilla::NumbersAreIdentical;
+using mozilla::IsFloat32Representable;
+using mozilla::IsNaN;
+using mozilla::IsPowerOfTwo;
+using mozilla::Maybe;
+using mozilla::DebugOnly;
+
+#ifdef DEBUG
+size_t MUse::index() const
+{
+ return consumer()->indexOf(this);
+}
+#endif
+
+template<size_t Op> static void
+ConvertDefinitionToDouble(TempAllocator& alloc, MDefinition* def, MInstruction* consumer)
+{
+ MInstruction* replace = MToDouble::New(alloc, def);
+ consumer->replaceOperand(Op, replace);
+ consumer->block()->insertBefore(consumer, replace);
+}
+
+static bool
+CheckUsesAreFloat32Consumers(const MInstruction* ins)
+{
+ bool allConsumerUses = true;
+ for (MUseDefIterator use(ins); allConsumerUses && use; use++)
+ allConsumerUses &= use.def()->canConsumeFloat32(use.use());
+ return allConsumerUses;
+}
+
+void
+MDefinition::PrintOpcodeName(GenericPrinter& out, MDefinition::Opcode op)
+{
+ static const char * const names[] =
+ {
+#define NAME(x) #x,
+ MIR_OPCODE_LIST(NAME)
+#undef NAME
+ };
+ const char* name = names[op];
+ size_t len = strlen(name);
+ for (size_t i = 0; i < len; i++)
+ out.printf("%c", tolower(name[i]));
+}
+
+static MConstant*
+EvaluateConstantOperands(TempAllocator& alloc, MBinaryInstruction* ins, bool* ptypeChange = nullptr)
+{
+ MDefinition* left = ins->getOperand(0);
+ MDefinition* right = ins->getOperand(1);
+
+ MOZ_ASSERT(IsTypeRepresentableAsDouble(left->type()));
+ MOZ_ASSERT(IsTypeRepresentableAsDouble(right->type()));
+
+ if (!left->isConstant() || !right->isConstant())
+ return nullptr;
+
+ MConstant* lhs = left->toConstant();
+ MConstant* rhs = right->toConstant();
+ double ret = JS::GenericNaN();
+
+ switch (ins->op()) {
+ case MDefinition::Op_BitAnd:
+ ret = double(lhs->toInt32() & rhs->toInt32());
+ break;
+ case MDefinition::Op_BitOr:
+ ret = double(lhs->toInt32() | rhs->toInt32());
+ break;
+ case MDefinition::Op_BitXor:
+ ret = double(lhs->toInt32() ^ rhs->toInt32());
+ break;
+ case MDefinition::Op_Lsh:
+ ret = double(uint32_t(lhs->toInt32()) << (rhs->toInt32() & 0x1F));
+ break;
+ case MDefinition::Op_Rsh:
+ ret = double(lhs->toInt32() >> (rhs->toInt32() & 0x1F));
+ break;
+ case MDefinition::Op_Ursh:
+ ret = double(uint32_t(lhs->toInt32()) >> (rhs->toInt32() & 0x1F));
+ break;
+ case MDefinition::Op_Add:
+ ret = lhs->numberToDouble() + rhs->numberToDouble();
+ break;
+ case MDefinition::Op_Sub:
+ ret = lhs->numberToDouble() - rhs->numberToDouble();
+ break;
+ case MDefinition::Op_Mul:
+ ret = lhs->numberToDouble() * rhs->numberToDouble();
+ break;
+ case MDefinition::Op_Div:
+ if (ins->toDiv()->isUnsigned()) {
+ if (rhs->isInt32(0)) {
+ if (ins->toDiv()->trapOnError())
+ return nullptr;
+ ret = 0.0;
+ } else {
+ ret = double(uint32_t(lhs->toInt32()) / uint32_t(rhs->toInt32()));
+ }
+ } else {
+ ret = NumberDiv(lhs->numberToDouble(), rhs->numberToDouble());
+ }
+ break;
+ case MDefinition::Op_Mod:
+ if (ins->toMod()->isUnsigned()) {
+ if (rhs->isInt32(0)) {
+ if (ins->toMod()->trapOnError())
+ return nullptr;
+ ret = 0.0;
+ } else {
+ ret = double(uint32_t(lhs->toInt32()) % uint32_t(rhs->toInt32()));
+ }
+ } else {
+ ret = NumberMod(lhs->numberToDouble(), rhs->numberToDouble());
+ }
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+
+ // For a float32 or double value, use the Raw* New so that we preserve NaN
+ // bits. This isn't strictly required for either ES or wasm, but it does
+ // avoid making constant-folding observable.
+ if (ins->type() == MIRType::Double)
+ return MConstant::New(alloc, wasm::RawF64(ret));
+ if (ins->type() == MIRType::Float32)
+ return MConstant::New(alloc, wasm::RawF32(float(ret)));
+
+ Value retVal;
+ retVal.setNumber(JS::CanonicalizeNaN(ret));
+
+ // If this was an int32 operation but the result isn't an int32 (for
+ // example, a division where the numerator isn't evenly divisible by the
+ // denominator), decline folding.
+ MOZ_ASSERT(ins->type() == MIRType::Int32);
+ if (!retVal.isInt32()) {
+ if (ptypeChange)
+ *ptypeChange = true;
+ return nullptr;
+ }
+
+ return MConstant::New(alloc, retVal);
+}
+
+static MMul*
+EvaluateExactReciprocal(TempAllocator& alloc, MDiv* ins)
+{
+ // we should fold only when it is a floating point operation
+ if (!IsFloatingPointType(ins->type()))
+ return nullptr;
+
+ MDefinition* left = ins->getOperand(0);
+ MDefinition* right = ins->getOperand(1);
+
+ if (!right->isConstant())
+ return nullptr;
+
+ int32_t num;
+ if (!mozilla::NumberIsInt32(right->toConstant()->numberToDouble(), &num))
+ return nullptr;
+
+ // check if rhs is a power of two
+ if (mozilla::Abs(num) & (mozilla::Abs(num) - 1))
+ return nullptr;
+
+ Value ret;
+ ret.setDouble(1.0 / double(num));
+
+ MConstant* foldedRhs;
+ if (ins->type() == MIRType::Float32)
+ foldedRhs = MConstant::NewFloat32(alloc, ret.toDouble());
+ else
+ foldedRhs = MConstant::New(alloc, ret);
+
+ MOZ_ASSERT(foldedRhs->type() == ins->type());
+ ins->block()->insertBefore(ins, foldedRhs);
+
+ MMul* mul = MMul::New(alloc, left, foldedRhs, ins->type());
+ mul->setCommutative();
+ mul->setMustPreserveNaN(ins->mustPreserveNaN());
+ return mul;
+}
+
+void
+MDefinition::printName(GenericPrinter& out) const
+{
+ PrintOpcodeName(out, op());
+ out.printf("%u", id());
+}
+
+HashNumber
+MDefinition::addU32ToHash(HashNumber hash, uint32_t data)
+{
+ return data + (hash << 6) + (hash << 16) - hash;
+}
+
+HashNumber
+MDefinition::valueHash() const
+{
+ HashNumber out = op();
+ for (size_t i = 0, e = numOperands(); i < e; i++)
+ out = addU32ToHash(out, getOperand(i)->id());
+ if (MDefinition* dep = dependency())
+ out = addU32ToHash(out, dep->id());
+ return out;
+}
+
+bool
+MDefinition::congruentIfOperandsEqual(const MDefinition* ins) const
+{
+ if (op() != ins->op())
+ return false;
+
+ if (type() != ins->type())
+ return false;
+
+ if (isEffectful() || ins->isEffectful())
+ return false;
+
+ if (numOperands() != ins->numOperands())
+ return false;
+
+ for (size_t i = 0, e = numOperands(); i < e; i++) {
+ if (getOperand(i) != ins->getOperand(i))
+ return false;
+ }
+
+ return true;
+}
+
+MDefinition*
+MDefinition::foldsTo(TempAllocator& alloc)
+{
+ // In the default case, there are no constants to fold.
+ return this;
+}
+
+bool
+MDefinition::mightBeMagicType() const
+{
+ if (IsMagicType(type()))
+ return true;
+
+ if (MIRType::Value != type())
+ return false;
+
+ return !resultTypeSet() || resultTypeSet()->hasType(TypeSet::MagicArgType());
+}
+
+MDefinition*
+MInstruction::foldsToStore(TempAllocator& alloc)
+{
+ if (!dependency())
+ return nullptr;
+
+ MDefinition* store = dependency();
+ if (mightAlias(store) != AliasType::MustAlias)
+ return nullptr;
+
+ if (!store->block()->dominates(block()))
+ return nullptr;
+
+ MDefinition* value;
+ switch (store->op()) {
+ case Op_StoreFixedSlot:
+ value = store->toStoreFixedSlot()->value();
+ break;
+ case Op_StoreSlot:
+ value = store->toStoreSlot()->value();
+ break;
+ case Op_StoreElement:
+ value = store->toStoreElement()->value();
+ break;
+ case Op_StoreUnboxedObjectOrNull:
+ value = store->toStoreUnboxedObjectOrNull()->value();
+ break;
+ default:
+ MOZ_CRASH("unknown store");
+ }
+
+ // If the type are matching then we return the value which is used as
+ // argument of the store.
+ if (value->type() != type()) {
+ // If we expect to read a type which is more generic than the type seen
+ // by the store, then we box the value used by the store.
+ if (type() != MIRType::Value)
+ return nullptr;
+ // We cannot unbox ObjectOrNull yet.
+ if (value->type() == MIRType::ObjectOrNull)
+ return nullptr;
+
+ MOZ_ASSERT(value->type() < MIRType::Value);
+ MBox* box = MBox::New(alloc, value);
+ value = box;
+ }
+
+ return value;
+}
+
+void
+MDefinition::analyzeEdgeCasesForward()
+{
+}
+
+void
+MDefinition::analyzeEdgeCasesBackward()
+{
+}
+
+void
+MInstruction::setResumePoint(MResumePoint* resumePoint)
+{
+ MOZ_ASSERT(!resumePoint_);
+ resumePoint_ = resumePoint;
+ resumePoint_->setInstruction(this);
+}
+
+void
+MInstruction::stealResumePoint(MInstruction* ins)
+{
+ MOZ_ASSERT(ins->resumePoint_->instruction() == ins);
+ resumePoint_ = ins->resumePoint_;
+ ins->resumePoint_ = nullptr;
+ resumePoint_->replaceInstruction(this);
+}
+
+void
+MInstruction::moveResumePointAsEntry()
+{
+ MOZ_ASSERT(isNop());
+ block()->clearEntryResumePoint();
+ block()->setEntryResumePoint(resumePoint_);
+ resumePoint_->resetInstruction();
+ resumePoint_ = nullptr;
+}
+
+void
+MInstruction::clearResumePoint()
+{
+ resumePoint_->resetInstruction();
+ block()->discardPreAllocatedResumePoint(resumePoint_);
+ resumePoint_ = nullptr;
+}
+
+bool
+MDefinition::maybeEmulatesUndefined(CompilerConstraintList* constraints)
+{
+ if (!mightBeType(MIRType::Object))
+ return false;
+
+ TemporaryTypeSet* types = resultTypeSet();
+ if (!types)
+ return true;
+
+ return types->maybeEmulatesUndefined(constraints);
+}
+
+static bool
+MaybeCallable(CompilerConstraintList* constraints, MDefinition* op)
+{
+ if (!op->mightBeType(MIRType::Object))
+ return false;
+
+ TemporaryTypeSet* types = op->resultTypeSet();
+ if (!types)
+ return true;
+
+ return types->maybeCallable(constraints);
+}
+
+/* static */ const char*
+AliasSet::Name(size_t flag)
+{
+ switch(flag) {
+ case 0: return "ObjectFields";
+ case 1: return "Element";
+ case 2: return "UnboxedElement";
+ case 3: return "DynamicSlot";
+ case 4: return "FixedSlot";
+ case 5: return "DOMProperty";
+ case 6: return "FrameArgument";
+ case 7: return "WasmGlobalVar";
+ case 8: return "WasmHeap";
+ case 9: return "TypedArrayLength";
+ default:
+ MOZ_CRASH("Unknown flag");
+ }
+}
+
+void
+MTest::cacheOperandMightEmulateUndefined(CompilerConstraintList* constraints)
+{
+ MOZ_ASSERT(operandMightEmulateUndefined());
+
+ if (!getOperand(0)->maybeEmulatesUndefined(constraints))
+ markNoOperandEmulatesUndefined();
+}
+
+MDefinition*
+MTest::foldsDoubleNegation(TempAllocator& alloc)
+{
+ MDefinition* op = getOperand(0);
+
+ if (op->isNot()) {
+ // If the operand of the Not is itself a Not, they cancel out.
+ MDefinition* opop = op->getOperand(0);
+ if (opop->isNot())
+ return MTest::New(alloc, opop->toNot()->input(), ifTrue(), ifFalse());
+ return MTest::New(alloc, op->toNot()->input(), ifFalse(), ifTrue());
+ }
+ return nullptr;
+}
+
+MDefinition*
+MTest::foldsConstant(TempAllocator& alloc)
+{
+ MDefinition* op = getOperand(0);
+ if (MConstant* opConst = op->maybeConstantValue()) {
+ bool b;
+ if (opConst->valueToBoolean(&b))
+ return MGoto::New(alloc, b ? ifTrue() : ifFalse());
+ }
+ return nullptr;
+}
+
+MDefinition*
+MTest::foldsTypes(TempAllocator& alloc)
+{
+ MDefinition* op = getOperand(0);
+
+ switch (op->type()) {
+ case MIRType::Undefined:
+ case MIRType::Null:
+ return MGoto::New(alloc, ifFalse());
+ case MIRType::Symbol:
+ return MGoto::New(alloc, ifTrue());
+ case MIRType::Object:
+ if (!operandMightEmulateUndefined())
+ return MGoto::New(alloc, ifTrue());
+ break;
+ default:
+ break;
+ }
+ return nullptr;
+}
+
+MDefinition*
+MTest::foldsNeedlessControlFlow(TempAllocator& alloc)
+{
+ for (MInstructionIterator iter(ifTrue()->begin()), end(ifTrue()->end()); iter != end; ) {
+ MInstruction* ins = *iter++;
+ if (ins->isNop() || ins->isGoto())
+ continue;
+ if (ins->hasUses())
+ return nullptr;
+ if (!DeadIfUnused(ins))
+ return nullptr;
+ }
+
+ for (MInstructionIterator iter(ifFalse()->begin()), end(ifFalse()->end()); iter != end; ) {
+ MInstruction* ins = *iter++;
+ if (ins->isNop() || ins->isGoto())
+ continue;
+ if (ins->hasUses())
+ return nullptr;
+ if (!DeadIfUnused(ins))
+ return nullptr;
+ }
+
+ if (ifTrue()->numSuccessors() != 1 || ifFalse()->numSuccessors() != 1)
+ return nullptr;
+ if (ifTrue()->getSuccessor(0) != ifFalse()->getSuccessor(0))
+ return nullptr;
+
+ if (ifTrue()->successorWithPhis())
+ return nullptr;
+
+ return MGoto::New(alloc, ifTrue());
+}
+
+MDefinition*
+MTest::foldsTo(TempAllocator& alloc)
+{
+
+ if (MDefinition* def = foldsDoubleNegation(alloc))
+ return def;
+
+ if (MDefinition* def = foldsConstant(alloc))
+ return def;
+
+ if (MDefinition* def = foldsTypes(alloc))
+ return def;
+
+ if (MDefinition* def = foldsNeedlessControlFlow(alloc))
+ return def;
+
+ return this;
+}
+
+void
+MTest::filtersUndefinedOrNull(bool trueBranch, MDefinition** subject, bool* filtersUndefined,
+ bool* filtersNull)
+{
+ MDefinition* ins = getOperand(0);
+ if (ins->isCompare()) {
+ ins->toCompare()->filtersUndefinedOrNull(trueBranch, subject, filtersUndefined, filtersNull);
+ return;
+ }
+
+ if (!trueBranch && ins->isNot()) {
+ *subject = ins->getOperand(0);
+ *filtersUndefined = *filtersNull = true;
+ return;
+ }
+
+ if (trueBranch) {
+ *subject = ins;
+ *filtersUndefined = *filtersNull = true;
+ return;
+ }
+
+ *filtersUndefined = *filtersNull = false;
+ *subject = nullptr;
+}
+
+void
+MDefinition::printOpcode(GenericPrinter& out) const
+{
+ PrintOpcodeName(out, op());
+ for (size_t j = 0, e = numOperands(); j < e; j++) {
+ out.printf(" ");
+ if (getUseFor(j)->hasProducer())
+ getOperand(j)->printName(out);
+ else
+ out.printf("(null)");
+ }
+}
+
+void
+MDefinition::dump(GenericPrinter& out) const
+{
+ printName(out);
+ out.printf(" = ");
+ printOpcode(out);
+ out.printf("\n");
+
+ if (isInstruction()) {
+ if (MResumePoint* resume = toInstruction()->resumePoint())
+ resume->dump(out);
+ }
+}
+
+void
+MDefinition::dump() const
+{
+ Fprinter out(stderr);
+ dump(out);
+ out.finish();
+}
+
+void
+MDefinition::dumpLocation(GenericPrinter& out) const
+{
+ MResumePoint* rp = nullptr;
+ const char* linkWord = nullptr;
+ if (isInstruction() && toInstruction()->resumePoint()) {
+ rp = toInstruction()->resumePoint();
+ linkWord = "at";
+ } else {
+ rp = block()->entryResumePoint();
+ linkWord = "after";
+ }
+
+ while (rp) {
+ JSScript* script = rp->block()->info().script();
+ uint32_t lineno = PCToLineNumber(rp->block()->info().script(), rp->pc());
+ out.printf(" %s %s:%d\n", linkWord, script->filename(), lineno);
+ rp = rp->caller();
+ linkWord = "in";
+ }
+}
+
+void
+MDefinition::dumpLocation() const
+{
+ Fprinter out(stderr);
+ dumpLocation(out);
+ out.finish();
+}
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+size_t
+MDefinition::useCount() const
+{
+ size_t count = 0;
+ for (MUseIterator i(uses_.begin()); i != uses_.end(); i++)
+ count++;
+ return count;
+}
+
+size_t
+MDefinition::defUseCount() const
+{
+ size_t count = 0;
+ for (MUseIterator i(uses_.begin()); i != uses_.end(); i++)
+ if ((*i)->consumer()->isDefinition())
+ count++;
+ return count;
+}
+#endif
+
+bool
+MDefinition::hasOneUse() const
+{
+ MUseIterator i(uses_.begin());
+ if (i == uses_.end())
+ return false;
+ i++;
+ return i == uses_.end();
+}
+
+bool
+MDefinition::hasOneDefUse() const
+{
+ bool hasOneDefUse = false;
+ for (MUseIterator i(uses_.begin()); i != uses_.end(); i++) {
+ if (!(*i)->consumer()->isDefinition())
+ continue;
+
+ // We already have a definition use. So 1+
+ if (hasOneDefUse)
+ return false;
+
+ // We saw one definition. Loop to test if there is another.
+ hasOneDefUse = true;
+ }
+
+ return hasOneDefUse;
+}
+
+bool
+MDefinition::hasDefUses() const
+{
+ for (MUseIterator i(uses_.begin()); i != uses_.end(); i++) {
+ if ((*i)->consumer()->isDefinition())
+ return true;
+ }
+
+ return false;
+}
+
+bool
+MDefinition::hasLiveDefUses() const
+{
+ for (MUseIterator i(uses_.begin()); i != uses_.end(); i++) {
+ MNode* ins = (*i)->consumer();
+ if (ins->isDefinition()) {
+ if (!ins->toDefinition()->isRecoveredOnBailout())
+ return true;
+ } else {
+ MOZ_ASSERT(ins->isResumePoint());
+ if (!ins->toResumePoint()->isRecoverableOperand(*i))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void
+MDefinition::replaceAllUsesWith(MDefinition* dom)
+{
+ for (size_t i = 0, e = numOperands(); i < e; ++i)
+ getOperand(i)->setUseRemovedUnchecked();
+
+ justReplaceAllUsesWith(dom);
+}
+
+void
+MDefinition::justReplaceAllUsesWith(MDefinition* dom)
+{
+ MOZ_ASSERT(dom != nullptr);
+ MOZ_ASSERT(dom != this);
+
+ // Carry over the fact the value has uses which are no longer inspectable
+ // with the graph.
+ if (isUseRemoved())
+ dom->setUseRemovedUnchecked();
+
+ for (MUseIterator i(usesBegin()), e(usesEnd()); i != e; ++i)
+ i->setProducerUnchecked(dom);
+ dom->uses_.takeElements(uses_);
+}
+
+void
+MDefinition::justReplaceAllUsesWithExcept(MDefinition* dom)
+{
+ MOZ_ASSERT(dom != nullptr);
+ MOZ_ASSERT(dom != this);
+
+ // Carry over the fact the value has uses which are no longer inspectable
+ // with the graph.
+ if (isUseRemoved())
+ dom->setUseRemovedUnchecked();
+
+ // Move all uses to new dom. Save the use of the dominating instruction.
+ MUse *exceptUse = nullptr;
+ for (MUseIterator i(usesBegin()), e(usesEnd()); i != e; ++i) {
+ if (i->consumer() != dom) {
+ i->setProducerUnchecked(dom);
+ } else {
+ MOZ_ASSERT(!exceptUse);
+ exceptUse = *i;
+ }
+ }
+ dom->uses_.takeElements(uses_);
+
+ // Restore the use to the original definition.
+ dom->uses_.remove(exceptUse);
+ exceptUse->setProducerUnchecked(this);
+ uses_.pushFront(exceptUse);
+}
+
+bool
+MDefinition::optimizeOutAllUses(TempAllocator& alloc)
+{
+ for (MUseIterator i(usesBegin()), e(usesEnd()); i != e;) {
+ MUse* use = *i++;
+ MConstant* constant = use->consumer()->block()->optimizedOutConstant(alloc);
+ if (!alloc.ensureBallast())
+ return false;
+
+ // Update the resume point operand to use the optimized-out constant.
+ use->setProducerUnchecked(constant);
+ constant->addUseUnchecked(use);
+ }
+
+ // Remove dangling pointers.
+ this->uses_.clear();
+ return true;
+}
+
+void
+MDefinition::replaceAllLiveUsesWith(MDefinition* dom)
+{
+ for (MUseIterator i(usesBegin()), e(usesEnd()); i != e; ) {
+ MUse* use = *i++;
+ MNode* consumer = use->consumer();
+ if (consumer->isResumePoint())
+ continue;
+ if (consumer->isDefinition() && consumer->toDefinition()->isRecoveredOnBailout())
+ continue;
+
+ // Update the operand to use the dominating definition.
+ use->replaceProducer(dom);
+ }
+}
+
+bool
+MDefinition::emptyResultTypeSet() const
+{
+ return resultTypeSet() && resultTypeSet()->empty();
+}
+
+MConstant*
+MConstant::New(TempAllocator& alloc, const Value& v, CompilerConstraintList* constraints)
+{
+ return new(alloc) MConstant(v, constraints);
+}
+
+MConstant*
+MConstant::New(TempAllocator::Fallible alloc, const Value& v, CompilerConstraintList* constraints)
+{
+ return new(alloc) MConstant(v, constraints);
+}
+
+MConstant*
+MConstant::NewFloat32(TempAllocator& alloc, double d)
+{
+ MOZ_ASSERT(IsNaN(d) || d == double(float(d)));
+ return new(alloc) MConstant(float(d));
+}
+
+MConstant*
+MConstant::New(TempAllocator& alloc, wasm::RawF32 f)
+{
+ auto* c = new(alloc) MConstant(Int32Value(f.bits()), nullptr);
+ c->setResultType(MIRType::Float32);
+ return c;
+}
+
+MConstant*
+MConstant::New(TempAllocator& alloc, wasm::RawF64 d)
+{
+ auto* c = new(alloc) MConstant(int64_t(d.bits()));
+ c->setResultType(MIRType::Double);
+ return c;
+}
+
+MConstant*
+MConstant::NewInt64(TempAllocator& alloc, int64_t i)
+{
+ return new(alloc) MConstant(i);
+}
+
+MConstant*
+MConstant::New(TempAllocator& alloc, const Value& v, MIRType type)
+{
+ if (type == MIRType::Float32)
+ return NewFloat32(alloc, v.toNumber());
+ MConstant* res = New(alloc, v);
+ MOZ_ASSERT(res->type() == type);
+ return res;
+}
+
+MConstant*
+MConstant::NewConstraintlessObject(TempAllocator& alloc, JSObject* v)
+{
+ return new(alloc) MConstant(v);
+}
+
+static TemporaryTypeSet*
+MakeSingletonTypeSetFromKey(CompilerConstraintList* constraints, TypeSet::ObjectKey* key)
+{
+ // Invalidate when this object's ObjectGroup gets unknown properties. This
+ // happens for instance when we mutate an object's __proto__, in this case
+ // we want to invalidate and mark this TypeSet as containing AnyObject
+ // (because mutating __proto__ will change an object's ObjectGroup).
+ MOZ_ASSERT(constraints);
+ (void)key->hasStableClassAndProto(constraints);
+
+ LifoAlloc* alloc = GetJitContext()->temp->lifoAlloc();
+ return alloc->new_<TemporaryTypeSet>(alloc, TypeSet::ObjectType(key));
+}
+
+TemporaryTypeSet*
+jit::MakeSingletonTypeSet(CompilerConstraintList* constraints, JSObject* obj)
+{
+ return MakeSingletonTypeSetFromKey(constraints, TypeSet::ObjectKey::get(obj));
+}
+
+TemporaryTypeSet*
+jit::MakeSingletonTypeSet(CompilerConstraintList* constraints, ObjectGroup* obj)
+{
+ return MakeSingletonTypeSetFromKey(constraints, TypeSet::ObjectKey::get(obj));
+}
+
+static TemporaryTypeSet*
+MakeUnknownTypeSet()
+{
+ LifoAlloc* alloc = GetJitContext()->temp->lifoAlloc();
+ return alloc->new_<TemporaryTypeSet>(alloc, TypeSet::UnknownType());
+}
+
+#ifdef DEBUG
+
+bool
+jit::IonCompilationCanUseNurseryPointers()
+{
+ // If we are doing backend compilation, which could occur on a helper
+ // thread but might actually be on the main thread, check the flag set on
+ // the PerThreadData by AutoEnterIonCompilation.
+ if (CurrentThreadIsIonCompiling())
+ return !CurrentThreadIsIonCompilingSafeForMinorGC();
+
+ // Otherwise, we must be on the main thread during MIR construction. The
+ // store buffer must have been notified that minor GCs must cancel pending
+ // or in progress Ion compilations.
+ JSRuntime* rt = TlsPerThreadData.get()->runtimeFromMainThread();
+ return rt->gc.storeBuffer.cancelIonCompilations();
+}
+
+#endif // DEBUG
+
+MConstant::MConstant(const js::Value& vp, CompilerConstraintList* constraints)
+{
+ setResultType(MIRTypeFromValue(vp));
+
+ MOZ_ASSERT(payload_.asBits == 0);
+
+ switch (type()) {
+ case MIRType::Undefined:
+ case MIRType::Null:
+ break;
+ case MIRType::Boolean:
+ payload_.b = vp.toBoolean();
+ break;
+ case MIRType::Int32:
+ payload_.i32 = vp.toInt32();
+ break;
+ case MIRType::Double:
+ payload_.d = vp.toDouble();
+ break;
+ case MIRType::String:
+ MOZ_ASSERT(vp.toString()->isAtom());
+ payload_.str = vp.toString();
+ break;
+ case MIRType::Symbol:
+ payload_.sym = vp.toSymbol();
+ break;
+ case MIRType::Object:
+ payload_.obj = &vp.toObject();
+ // Create a singleton type set for the object. This isn't necessary for
+ // other types as the result type encodes all needed information.
+ MOZ_ASSERT_IF(IsInsideNursery(&vp.toObject()), IonCompilationCanUseNurseryPointers());
+ setResultTypeSet(MakeSingletonTypeSet(constraints, &vp.toObject()));
+ break;
+ case MIRType::MagicOptimizedArguments:
+ case MIRType::MagicOptimizedOut:
+ case MIRType::MagicHole:
+ case MIRType::MagicIsConstructing:
+ break;
+ case MIRType::MagicUninitializedLexical:
+ // JS_UNINITIALIZED_LEXICAL does not escape to script and is not
+ // observed in type sets. However, it may flow around freely during
+ // Ion compilation. Give it an unknown typeset to poison any type sets
+ // it merges with.
+ //
+ // TODO We could track uninitialized lexicals more precisely by tracking
+ // them in type sets.
+ setResultTypeSet(MakeUnknownTypeSet());
+ break;
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+
+ setMovable();
+}
+
+MConstant::MConstant(JSObject* obj)
+{
+ MOZ_ASSERT_IF(IsInsideNursery(obj), IonCompilationCanUseNurseryPointers());
+ setResultType(MIRType::Object);
+ payload_.obj = obj;
+ setMovable();
+}
+
+MConstant::MConstant(float f)
+{
+ setResultType(MIRType::Float32);
+ payload_.f = f;
+ setMovable();
+}
+
+MConstant::MConstant(double d)
+{
+ setResultType(MIRType::Double);
+ payload_.d = d;
+ setMovable();
+}
+
+MConstant::MConstant(int64_t i)
+{
+ setResultType(MIRType::Int64);
+ payload_.i64 = i;
+ setMovable();
+}
+
+#ifdef DEBUG
+void
+MConstant::assertInitializedPayload() const
+{
+ // valueHash() and equals() expect the unused payload bits to be
+ // initialized to zero. Assert this in debug builds.
+
+ switch (type()) {
+ case MIRType::Int32:
+ case MIRType::Float32:
+ MOZ_ASSERT((payload_.asBits >> 32) == 0);
+ break;
+ case MIRType::Boolean:
+ MOZ_ASSERT((payload_.asBits >> 1) == 0);
+ break;
+ case MIRType::Double:
+ case MIRType::Int64:
+ break;
+ case MIRType::String:
+ case MIRType::Object:
+ case MIRType::Symbol:
+ MOZ_ASSERT_IF(JS_BITS_PER_WORD == 32, (payload_.asBits >> 32) == 0);
+ break;
+ default:
+ MOZ_ASSERT(IsNullOrUndefined(type()) || IsMagicType(type()));
+ MOZ_ASSERT(payload_.asBits == 0);
+ break;
+ }
+}
+#endif
+
+HashNumber
+MConstant::valueHash() const
+{
+ static_assert(sizeof(Payload) == sizeof(uint64_t),
+ "Code below assumes payload fits in 64 bits");
+
+ assertInitializedPayload();
+
+ // Build a 64-bit value holding both the payload and the type.
+ static const size_t TypeBits = 8;
+ static const size_t TypeShift = 64 - TypeBits;
+ MOZ_ASSERT(uintptr_t(type()) <= (1 << TypeBits) - 1);
+ uint64_t bits = (uint64_t(type()) << TypeShift) ^ payload_.asBits;
+
+ // Fold all 64 bits into the 32-bit result. It's tempting to just discard
+ // half of the bits, as this is just a hash, however there are many common
+ // patterns of values where only the low or the high bits vary, so
+ // discarding either side would lead to excessive hash collisions.
+ return (HashNumber)bits ^ (HashNumber)(bits >> 32);
+}
+
+bool
+MConstant::congruentTo(const MDefinition* ins) const
+{
+ return ins->isConstant() && equals(ins->toConstant());
+}
+
+void
+MConstant::printOpcode(GenericPrinter& out) const
+{
+ PrintOpcodeName(out, op());
+ out.printf(" ");
+ switch (type()) {
+ case MIRType::Undefined:
+ out.printf("undefined");
+ break;
+ case MIRType::Null:
+ out.printf("null");
+ break;
+ case MIRType::Boolean:
+ out.printf(toBoolean() ? "true" : "false");
+ break;
+ case MIRType::Int32:
+ out.printf("0x%x", toInt32());
+ break;
+ case MIRType::Int64:
+ out.printf("0x%" PRIx64, toInt64());
+ break;
+ case MIRType::Double:
+ out.printf("%.16g", toDouble());
+ break;
+ case MIRType::Float32:
+ {
+ float val = toFloat32();
+ out.printf("%.16g", val);
+ break;
+ }
+ case MIRType::Object:
+ if (toObject().is<JSFunction>()) {
+ JSFunction* fun = &toObject().as<JSFunction>();
+ if (fun->displayAtom()) {
+ out.put("function ");
+ EscapedStringPrinter(out, fun->displayAtom(), 0);
+ } else {
+ out.put("unnamed function");
+ }
+ if (fun->hasScript()) {
+ JSScript* script = fun->nonLazyScript();
+ out.printf(" (%s:%" PRIuSIZE ")",
+ script->filename() ? script->filename() : "", script->lineno());
+ }
+ out.printf(" at %p", (void*) fun);
+ break;
+ }
+ out.printf("object %p (%s)", (void*)&toObject(), toObject().getClass()->name);
+ break;
+ case MIRType::Symbol:
+ out.printf("symbol at %p", (void*)toSymbol());
+ break;
+ case MIRType::String:
+ out.printf("string %p", (void*)toString());
+ break;
+ case MIRType::MagicOptimizedArguments:
+ out.printf("magic lazyargs");
+ break;
+ case MIRType::MagicHole:
+ out.printf("magic hole");
+ break;
+ case MIRType::MagicIsConstructing:
+ out.printf("magic is-constructing");
+ break;
+ case MIRType::MagicOptimizedOut:
+ out.printf("magic optimized-out");
+ break;
+ case MIRType::MagicUninitializedLexical:
+ out.printf("magic uninitialized-lexical");
+ break;
+ default:
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+bool
+MConstant::canProduceFloat32() const
+{
+ if (!isTypeRepresentableAsDouble())
+ return false;
+
+ if (type() == MIRType::Int32)
+ return IsFloat32Representable(static_cast<double>(toInt32()));
+ if (type() == MIRType::Double)
+ return IsFloat32Representable(toDouble());
+ MOZ_ASSERT(type() == MIRType::Float32);
+ return true;
+}
+
+Value
+MConstant::toJSValue() const
+{
+ // Wasm has types like int64 that cannot be stored as js::Value. It also
+ // doesn't want the NaN canonicalization enforced by js::Value.
+ MOZ_ASSERT(!IsCompilingWasm());
+
+ switch (type()) {
+ case MIRType::Undefined:
+ return UndefinedValue();
+ case MIRType::Null:
+ return NullValue();
+ case MIRType::Boolean:
+ return BooleanValue(toBoolean());
+ case MIRType::Int32:
+ return Int32Value(toInt32());
+ case MIRType::Double:
+ return DoubleValue(toDouble());
+ case MIRType::Float32:
+ return Float32Value(toFloat32());
+ case MIRType::String:
+ return StringValue(toString());
+ case MIRType::Symbol:
+ return SymbolValue(toSymbol());
+ case MIRType::Object:
+ return ObjectValue(toObject());
+ case MIRType::MagicOptimizedArguments:
+ return MagicValue(JS_OPTIMIZED_ARGUMENTS);
+ case MIRType::MagicOptimizedOut:
+ return MagicValue(JS_OPTIMIZED_OUT);
+ case MIRType::MagicHole:
+ return MagicValue(JS_ELEMENTS_HOLE);
+ case MIRType::MagicIsConstructing:
+ return MagicValue(JS_IS_CONSTRUCTING);
+ case MIRType::MagicUninitializedLexical:
+ return MagicValue(JS_UNINITIALIZED_LEXICAL);
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+}
+
+bool
+MConstant::valueToBoolean(bool* res) const
+{
+ switch (type()) {
+ case MIRType::Boolean:
+ *res = toBoolean();
+ return true;
+ case MIRType::Int32:
+ *res = toInt32() != 0;
+ return true;
+ case MIRType::Int64:
+ *res = toInt64() != 0;
+ return true;
+ case MIRType::Double:
+ *res = !mozilla::IsNaN(toDouble()) && toDouble() != 0.0;
+ return true;
+ case MIRType::Float32:
+ *res = !mozilla::IsNaN(toFloat32()) && toFloat32() != 0.0f;
+ return true;
+ case MIRType::Null:
+ case MIRType::Undefined:
+ *res = false;
+ return true;
+ case MIRType::Symbol:
+ *res = true;
+ return true;
+ case MIRType::String:
+ *res = toString()->length() != 0;
+ return true;
+ case MIRType::Object:
+ *res = !EmulatesUndefined(&toObject());
+ return true;
+ default:
+ MOZ_ASSERT(IsMagicType(type()));
+ return false;
+ }
+}
+
+MDefinition*
+MSimdValueX4::foldsTo(TempAllocator& alloc)
+{
+#ifdef DEBUG
+ MIRType laneType = SimdTypeToLaneArgumentType(type());
+#endif
+ bool allConstants = true;
+ bool allSame = true;
+
+ for (size_t i = 0; i < 4; ++i) {
+ MDefinition* op = getOperand(i);
+ MOZ_ASSERT(op->type() == laneType);
+ if (!op->isConstant())
+ allConstants = false;
+ if (i > 0 && op != getOperand(i - 1))
+ allSame = false;
+ }
+
+ if (!allConstants && !allSame)
+ return this;
+
+ if (allConstants) {
+ SimdConstant cst;
+ switch (type()) {
+ case MIRType::Bool32x4: {
+ int32_t a[4];
+ for (size_t i = 0; i < 4; ++i)
+ a[i] = getOperand(i)->toConstant()->valueToBooleanInfallible() ? -1 : 0;
+ cst = SimdConstant::CreateX4(a);
+ break;
+ }
+ case MIRType::Int32x4: {
+ int32_t a[4];
+ for (size_t i = 0; i < 4; ++i)
+ a[i] = getOperand(i)->toConstant()->toInt32();
+ cst = SimdConstant::CreateX4(a);
+ break;
+ }
+ case MIRType::Float32x4: {
+ float a[4];
+ for (size_t i = 0; i < 4; ++i)
+ a[i] = getOperand(i)->toConstant()->numberToDouble();
+ cst = SimdConstant::CreateX4(a);
+ break;
+ }
+ default: MOZ_CRASH("unexpected type in MSimdValueX4::foldsTo");
+ }
+
+ return MSimdConstant::New(alloc, cst, type());
+ }
+
+ MOZ_ASSERT(allSame);
+ return MSimdSplat::New(alloc, getOperand(0), type());
+}
+
+MDefinition*
+MSimdSplat::foldsTo(TempAllocator& alloc)
+{
+#ifdef DEBUG
+ MIRType laneType = SimdTypeToLaneArgumentType(type());
+#endif
+ MDefinition* op = getOperand(0);
+ if (!op->isConstant())
+ return this;
+ MOZ_ASSERT(op->type() == laneType);
+
+ SimdConstant cst;
+ switch (type()) {
+ case MIRType::Bool8x16: {
+ int8_t v = op->toConstant()->valueToBooleanInfallible() ? -1 : 0;
+ cst = SimdConstant::SplatX16(v);
+ break;
+ }
+ case MIRType::Bool16x8: {
+ int16_t v = op->toConstant()->valueToBooleanInfallible() ? -1 : 0;
+ cst = SimdConstant::SplatX8(v);
+ break;
+ }
+ case MIRType::Bool32x4: {
+ int32_t v = op->toConstant()->valueToBooleanInfallible() ? -1 : 0;
+ cst = SimdConstant::SplatX4(v);
+ break;
+ }
+ case MIRType::Int8x16: {
+ int32_t v = op->toConstant()->toInt32();
+ cst = SimdConstant::SplatX16(v);
+ break;
+ }
+ case MIRType::Int16x8: {
+ int32_t v = op->toConstant()->toInt32();
+ cst = SimdConstant::SplatX8(v);
+ break;
+ }
+ case MIRType::Int32x4: {
+ int32_t v = op->toConstant()->toInt32();
+ cst = SimdConstant::SplatX4(v);
+ break;
+ }
+ case MIRType::Float32x4: {
+ float v = op->toConstant()->numberToDouble();
+ cst = SimdConstant::SplatX4(v);
+ break;
+ }
+ default: MOZ_CRASH("unexpected type in MSimdSplat::foldsTo");
+ }
+
+ return MSimdConstant::New(alloc, cst, type());
+}
+
+MDefinition*
+MSimdUnbox::foldsTo(TempAllocator& alloc)
+{
+ MDefinition* in = input();
+
+ if (in->isSimdBox()) {
+ MSimdBox* box = in->toSimdBox();
+ // If the operand is a MSimdBox, then we just reuse the operand of the
+ // MSimdBox as long as the type corresponds to what we are supposed to
+ // unbox.
+ in = box->input();
+ if (box->simdType() != simdType())
+ return this;
+ MOZ_ASSERT(in->type() == type());
+ return in;
+ }
+
+ return this;
+}
+
+MDefinition*
+MSimdSwizzle::foldsTo(TempAllocator& alloc)
+{
+ if (lanesMatch(0, 1, 2, 3))
+ return input();
+ return this;
+}
+
+MDefinition*
+MSimdGeneralShuffle::foldsTo(TempAllocator& alloc)
+{
+ FixedList<uint8_t> lanes;
+ if (!lanes.init(alloc, numLanes()))
+ return this;
+
+ for (size_t i = 0; i < numLanes(); i++) {
+ if (!lane(i)->isConstant() || lane(i)->type() != MIRType::Int32)
+ return this;
+ int32_t temp = lane(i)->toConstant()->toInt32();
+ if (temp < 0 || unsigned(temp) >= numLanes() * numVectors())
+ return this;
+ lanes[i] = uint8_t(temp);
+ }
+
+ if (numVectors() == 1)
+ return MSimdSwizzle::New(alloc, vector(0), lanes.data());
+
+ MOZ_ASSERT(numVectors() == 2);
+ return MSimdShuffle::New(alloc, vector(0), vector(1), lanes.data());
+}
+
+MInstruction*
+MSimdConvert::AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* obj,
+ MIRType toType, SimdSign sign, wasm::TrapOffset trapOffset)
+{
+ MIRType fromType = obj->type();
+
+ if (SupportsUint32x4FloatConversions || sign != SimdSign::Unsigned) {
+ MInstruction* ins = New(alloc, obj, toType, sign, trapOffset);
+ addTo->add(ins);
+ return ins;
+ }
+
+ // This architecture can't do Uint32x4 <-> Float32x4 conversions (Hi SSE!)
+ MOZ_ASSERT(sign == SimdSign::Unsigned);
+ if (fromType == MIRType::Int32x4 && toType == MIRType::Float32x4) {
+ // Converting Uint32x4 -> Float32x4. This algorithm is from LLVM.
+ //
+ // Split the input number into high and low parts:
+ //
+ // uint32_t hi = x >> 16;
+ // uint32_t lo = x & 0xffff;
+ //
+ // Insert these parts as the low mantissa bits in a float32 number with
+ // the corresponding exponent:
+ //
+ // float fhi = (bits-as-float)(hi | 0x53000000); // 0x1.0p39f + hi*2^16
+ // float flo = (bits-as-float)(lo | 0x4b000000); // 0x1.0p23f + lo
+ //
+ // Subtract the bias from the hi part:
+ //
+ // fhi -= (0x1.0p39 + 0x1.0p23) // hi*2^16 - 0x1.0p23
+ //
+ // And finally combine:
+ //
+ // result = flo + fhi // lo + hi*2^16.
+
+ // Compute hi = obj >> 16 (lane-wise unsigned shift).
+ MInstruction* c16 = MConstant::New(alloc, Int32Value(16));
+ addTo->add(c16);
+ MInstruction* hi = MSimdShift::AddLegalized(alloc, addTo, obj, c16, MSimdShift::ursh);
+
+ // Compute lo = obj & 0xffff (lane-wise).
+ MInstruction* m16 =
+ MSimdConstant::New(alloc, SimdConstant::SplatX4(0xffff), MIRType::Int32x4);
+ addTo->add(m16);
+ MInstruction* lo = MSimdBinaryBitwise::New(alloc, obj, m16, MSimdBinaryBitwise::and_);
+ addTo->add(lo);
+
+ // Mix in the exponents.
+ MInstruction* exphi =
+ MSimdConstant::New(alloc, SimdConstant::SplatX4(0x53000000), MIRType::Int32x4);
+ addTo->add(exphi);
+ MInstruction* mhi = MSimdBinaryBitwise::New(alloc, hi, exphi, MSimdBinaryBitwise::or_);
+ addTo->add(mhi);
+ MInstruction* explo =
+ MSimdConstant::New(alloc, SimdConstant::SplatX4(0x4b000000), MIRType::Int32x4);
+ addTo->add(explo);
+ MInstruction* mlo = MSimdBinaryBitwise::New(alloc, lo, explo, MSimdBinaryBitwise::or_);
+ addTo->add(mlo);
+
+ // Bit-cast both to Float32x4.
+ MInstruction* fhi = MSimdReinterpretCast::New(alloc, mhi, MIRType::Float32x4);
+ addTo->add(fhi);
+ MInstruction* flo = MSimdReinterpretCast::New(alloc, mlo, MIRType::Float32x4);
+ addTo->add(flo);
+
+ // Subtract out the bias: 0x1.0p39f + 0x1.0p23f.
+ // MSVC doesn't support the hexadecimal float syntax.
+ const float BiasValue = 549755813888.f + 8388608.f;
+ MInstruction* bias =
+ MSimdConstant::New(alloc, SimdConstant::SplatX4(BiasValue), MIRType::Float32x4);
+ addTo->add(bias);
+ MInstruction* fhi_debiased =
+ MSimdBinaryArith::AddLegalized(alloc, addTo, fhi, bias, MSimdBinaryArith::Op_sub);
+
+ // Compute the final result.
+ return MSimdBinaryArith::AddLegalized(alloc, addTo, fhi_debiased, flo,
+ MSimdBinaryArith::Op_add);
+ }
+
+ if (fromType == MIRType::Float32x4 && toType == MIRType::Int32x4) {
+ // The Float32x4 -> Uint32x4 conversion can throw if the input is out of
+ // range. This is handled by the LFloat32x4ToUint32x4 expansion.
+ MInstruction* ins = New(alloc, obj, toType, sign, trapOffset);
+ addTo->add(ins);
+ return ins;
+ }
+
+ MOZ_CRASH("Unhandled SIMD type conversion");
+}
+
+MInstruction*
+MSimdBinaryComp::AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
+ MDefinition* right, Operation op, SimdSign sign)
+{
+ MOZ_ASSERT(left->type() == right->type());
+ MIRType opType = left->type();
+ MOZ_ASSERT(IsSimdType(opType));
+ bool IsEquality = op == equal || op == notEqual;
+
+ // Check if this is an unsupported unsigned compare that needs to be biased.
+ // If so, put the bias vector in `bias`.
+ if (sign == SimdSign::Unsigned && !IsEquality) {
+ MInstruction* bias = nullptr;
+
+ // This is an order comparison of Uint32x4 vectors which are not supported on this target.
+ // Simply offset |left| and |right| by INT_MIN, then do a signed comparison.
+ if (!SupportsUint32x4Compares && opType == MIRType::Int32x4)
+ bias = MSimdConstant::New(alloc, SimdConstant::SplatX4(int32_t(0x80000000)), opType);
+ else if (!SupportsUint16x8Compares && opType == MIRType::Int16x8)
+ bias = MSimdConstant::New(alloc, SimdConstant::SplatX8(int16_t(0x8000)), opType);
+ if (!SupportsUint8x16Compares && opType == MIRType::Int8x16)
+ bias = MSimdConstant::New(alloc, SimdConstant::SplatX16(int8_t(0x80)), opType);
+
+ if (bias) {
+ addTo->add(bias);
+
+ // Add the bias.
+ MInstruction* bleft =
+ MSimdBinaryArith::AddLegalized(alloc, addTo, left, bias, MSimdBinaryArith::Op_add);
+ MInstruction* bright =
+ MSimdBinaryArith::AddLegalized(alloc, addTo, right, bias, MSimdBinaryArith::Op_add);
+
+ // Do the equivalent signed comparison.
+ MInstruction* result =
+ MSimdBinaryComp::New(alloc, bleft, bright, op, SimdSign::Signed);
+ addTo->add(result);
+
+ return result;
+ }
+ }
+
+ if (sign == SimdSign::Unsigned &&
+ ((!SupportsUint32x4Compares && opType == MIRType::Int32x4) ||
+ (!SupportsUint16x8Compares && opType == MIRType::Int16x8) ||
+ (!SupportsUint8x16Compares && opType == MIRType::Int8x16))) {
+ // The sign doesn't matter for equality tests. Flip it to make the
+ // backend assertions happy.
+ MOZ_ASSERT(IsEquality);
+ sign = SimdSign::Signed;
+ }
+
+ // This is a legal operation already. Just create the instruction requested.
+ MInstruction* result = MSimdBinaryComp::New(alloc, left, right, op, sign);
+ addTo->add(result);
+ return result;
+}
+
+MInstruction*
+MSimdBinaryArith::AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
+ MDefinition* right, Operation op)
+{
+ MOZ_ASSERT(left->type() == right->type());
+ MIRType opType = left->type();
+ MOZ_ASSERT(IsSimdType(opType));
+
+ // SSE does not have 8x16 multiply instructions.
+ if (opType == MIRType::Int8x16 && op == Op_mul) {
+ // Express the multiply in terms of Int16x8 multiplies by handling the
+ // even and odd lanes separately.
+
+ MInstruction* wideL = MSimdReinterpretCast::New(alloc, left, MIRType::Int16x8);
+ addTo->add(wideL);
+ MInstruction* wideR = MSimdReinterpretCast::New(alloc, right, MIRType::Int16x8);
+ addTo->add(wideR);
+
+ // wideL = yyxx yyxx yyxx yyxx yyxx yyxx yyxx yyxx
+ // wideR = bbaa bbaa bbaa bbaa bbaa bbaa bbaa bbaa
+
+ // Shift the odd lanes down to the low bits of the 16x8 vectors.
+ MInstruction* eight = MConstant::New(alloc, Int32Value(8));
+ addTo->add(eight);
+ MInstruction* evenL = wideL;
+ MInstruction* evenR = wideR;
+ MInstruction* oddL =
+ MSimdShift::AddLegalized(alloc, addTo, wideL, eight, MSimdShift::ursh);
+ MInstruction* oddR =
+ MSimdShift::AddLegalized(alloc, addTo, wideR, eight, MSimdShift::ursh);
+
+ // evenL = yyxx yyxx yyxx yyxx yyxx yyxx yyxx yyxx
+ // evenR = bbaa bbaa bbaa bbaa bbaa bbaa bbaa bbaa
+ // oddL = 00yy 00yy 00yy 00yy 00yy 00yy 00yy 00yy
+ // oddR = 00bb 00bb 00bb 00bb 00bb 00bb 00bb 00bb
+
+ // Now do two 16x8 multiplications. We can use the low bits of each.
+ MInstruction* even = MSimdBinaryArith::AddLegalized(alloc, addTo, evenL, evenR, Op_mul);
+ MInstruction* odd = MSimdBinaryArith::AddLegalized(alloc, addTo, oddL, oddR, Op_mul);
+
+ // even = ~~PP ~~PP ~~PP ~~PP ~~PP ~~PP ~~PP ~~PP
+ // odd = ~~QQ ~~QQ ~~QQ ~~QQ ~~QQ ~~QQ ~~QQ ~~QQ
+
+ MInstruction* mask =
+ MSimdConstant::New(alloc, SimdConstant::SplatX8(int16_t(0x00ff)), MIRType::Int16x8);
+ addTo->add(mask);
+ even = MSimdBinaryBitwise::New(alloc, even, mask, MSimdBinaryBitwise::and_);
+ addTo->add(even);
+ odd = MSimdShift::AddLegalized(alloc, addTo, odd, eight, MSimdShift::lsh);
+
+ // even = 00PP 00PP 00PP 00PP 00PP 00PP 00PP 00PP
+ // odd = QQ00 QQ00 QQ00 QQ00 QQ00 QQ00 QQ00 QQ00
+
+ // Combine:
+ MInstruction* result = MSimdBinaryBitwise::New(alloc, even, odd, MSimdBinaryBitwise::or_);
+ addTo->add(result);
+ result = MSimdReinterpretCast::New(alloc, result, opType);
+ addTo->add(result);
+ return result;
+ }
+
+ // This is a legal operation already. Just create the instruction requested.
+ MInstruction* result = MSimdBinaryArith::New(alloc, left, right, op);
+ addTo->add(result);
+ return result;
+}
+
+MInstruction*
+MSimdShift::AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
+ MDefinition* right, Operation op)
+{
+ MIRType opType = left->type();
+ MOZ_ASSERT(IsIntegerSimdType(opType));
+
+ // SSE does not provide 8x16 shift instructions.
+ if (opType == MIRType::Int8x16) {
+ // Express the shift in terms of Int16x8 shifts by splitting into even
+ // and odd lanes, place 8-bit lanes into the high bits of Int16x8
+ // vectors `even` and `odd`. Shift, mask, combine.
+ //
+ // wide = Int16x8.fromInt8x16Bits(left);
+ // shiftBy = right & 7
+ // mask = Int16x8.splat(0xff00);
+ //
+ MInstruction* wide = MSimdReinterpretCast::New(alloc, left, MIRType::Int16x8);
+ addTo->add(wide);
+
+ // wide = yyxx yyxx yyxx yyxx yyxx yyxx yyxx yyxx
+
+ MInstruction* shiftMask = MConstant::New(alloc, Int32Value(7));
+ addTo->add(shiftMask);
+ MBinaryBitwiseInstruction* shiftBy = MBitAnd::New(alloc, right, shiftMask);
+ shiftBy->setInt32Specialization();
+ addTo->add(shiftBy);
+
+ // Move the even 8x16 lanes into the high bits of the 16x8 lanes.
+ MInstruction* eight = MConstant::New(alloc, Int32Value(8));
+ addTo->add(eight);
+ MInstruction* even = MSimdShift::AddLegalized(alloc, addTo, wide, eight, lsh);
+
+ // Leave the odd lanes in place.
+ MInstruction* odd = wide;
+
+ // even = xx00 xx00 xx00 xx00 xx00 xx00 xx00 xx00
+ // odd = yyxx yyxx yyxx yyxx yyxx yyxx yyxx yyxx
+
+ MInstruction* mask =
+ MSimdConstant::New(alloc, SimdConstant::SplatX8(int16_t(0xff00)), MIRType::Int16x8);
+ addTo->add(mask);
+
+ // Left-shift: Clear the low bits in `odd` before shifting.
+ if (op == lsh) {
+ odd = MSimdBinaryBitwise::New(alloc, odd, mask, MSimdBinaryBitwise::and_);
+ addTo->add(odd);
+ // odd = yy00 yy00 yy00 yy00 yy00 yy00 yy00 yy00
+ }
+
+ // Do the real shift twice: once for the even lanes, once for the odd
+ // lanes. This is a recursive call, but with a different type.
+ even = MSimdShift::AddLegalized(alloc, addTo, even, shiftBy, op);
+ odd = MSimdShift::AddLegalized(alloc, addTo, odd, shiftBy, op);
+
+ // even = XX~~ XX~~ XX~~ XX~~ XX~~ XX~~ XX~~ XX~~
+ // odd = YY~~ YY~~ YY~~ YY~~ YY~~ YY~~ YY~~ YY~~
+
+ // Right-shift: Clear the low bits in `odd` after shifting.
+ if (op != lsh) {
+ odd = MSimdBinaryBitwise::New(alloc, odd, mask, MSimdBinaryBitwise::and_);
+ addTo->add(odd);
+ // odd = YY00 YY00 YY00 YY00 YY00 YY00 YY00 YY00
+ }
+
+ // Move the even lanes back to their original place.
+ even = MSimdShift::AddLegalized(alloc, addTo, even, eight, ursh);
+
+ // Now, `odd` contains the odd lanes properly shifted, and `even`
+ // contains the even lanes properly shifted:
+ //
+ // even = 00XX 00XX 00XX 00XX 00XX 00XX 00XX 00XX
+ // odd = YY00 YY00 YY00 YY00 YY00 YY00 YY00 YY00
+ //
+ // Combine:
+ MInstruction* result = MSimdBinaryBitwise::New(alloc, even, odd, MSimdBinaryBitwise::or_);
+ addTo->add(result);
+ result = MSimdReinterpretCast::New(alloc, result, opType);
+ addTo->add(result);
+ return result;
+ }
+
+ // This is a legal operation already. Just create the instruction requested.
+ MInstruction* result = MSimdShift::New(alloc, left, right, op);
+ addTo->add(result);
+ return result;
+}
+
+template <typename T>
+static void
+PrintOpcodeOperation(T* mir, GenericPrinter& out)
+{
+ mir->MDefinition::printOpcode(out);
+ out.printf(" (%s)", T::OperationName(mir->operation()));
+}
+
+void
+MSimdBinaryArith::printOpcode(GenericPrinter& out) const
+{
+ PrintOpcodeOperation(this, out);
+}
+void
+MSimdBinarySaturating::printOpcode(GenericPrinter& out) const
+{
+ PrintOpcodeOperation(this, out);
+}
+void
+MSimdBinaryBitwise::printOpcode(GenericPrinter& out) const
+{
+ PrintOpcodeOperation(this, out);
+}
+void
+MSimdUnaryArith::printOpcode(GenericPrinter& out) const
+{
+ PrintOpcodeOperation(this, out);
+}
+void
+MSimdBinaryComp::printOpcode(GenericPrinter& out) const
+{
+ PrintOpcodeOperation(this, out);
+}
+void
+MSimdShift::printOpcode(GenericPrinter& out) const
+{
+ PrintOpcodeOperation(this, out);
+}
+
+void
+MSimdInsertElement::printOpcode(GenericPrinter& out) const
+{
+ MDefinition::printOpcode(out);
+ out.printf(" (lane %u)", lane());
+}
+
+void
+MSimdBox::printOpcode(GenericPrinter& out) const
+{
+ MDefinition::printOpcode(out);
+ out.printf(" (%s%s)", SimdTypeToString(simdType()),
+ initialHeap() == gc::TenuredHeap ? ", tenured" : "");
+}
+
+void
+MSimdUnbox::printOpcode(GenericPrinter& out) const
+{
+ MDefinition::printOpcode(out);
+ out.printf(" (%s)", SimdTypeToString(simdType()));
+}
+
+void
+MControlInstruction::printOpcode(GenericPrinter& out) const
+{
+ MDefinition::printOpcode(out);
+ for (size_t j = 0; j < numSuccessors(); j++) {
+ if (getSuccessor(j))
+ out.printf(" block%u", getSuccessor(j)->id());
+ else
+ out.printf(" (null-to-be-patched)");
+ }
+}
+
+void
+MCompare::printOpcode(GenericPrinter& out) const
+{
+ MDefinition::printOpcode(out);
+ out.printf(" %s", CodeName[jsop()]);
+}
+
+void
+MConstantElements::printOpcode(GenericPrinter& out) const
+{
+ PrintOpcodeName(out, op());
+ out.printf(" 0x%" PRIxPTR, value().asValue());
+}
+
+void
+MLoadUnboxedScalar::printOpcode(GenericPrinter& out) const
+{
+ MDefinition::printOpcode(out);
+ out.printf(" %s", ScalarTypeDescr::typeName(storageType()));
+}
+
+void
+MAssertRange::printOpcode(GenericPrinter& out) const
+{
+ MDefinition::printOpcode(out);
+ out.put(" ");
+ assertedRange()->dump(out);
+}
+
+const char*
+MMathFunction::FunctionName(Function function)
+{
+ switch (function) {
+ case Log: return "Log";
+ case Sin: return "Sin";
+ case Cos: return "Cos";
+ case Exp: return "Exp";
+ case Tan: return "Tan";
+ case ACos: return "ACos";
+ case ASin: return "ASin";
+ case ATan: return "ATan";
+ case Log10: return "Log10";
+ case Log2: return "Log2";
+ case Log1P: return "Log1P";
+ case ExpM1: return "ExpM1";
+ case CosH: return "CosH";
+ case SinH: return "SinH";
+ case TanH: return "TanH";
+ case ACosH: return "ACosH";
+ case ASinH: return "ASinH";
+ case ATanH: return "ATanH";
+ case Sign: return "Sign";
+ case Trunc: return "Trunc";
+ case Cbrt: return "Cbrt";
+ case Floor: return "Floor";
+ case Ceil: return "Ceil";
+ case Round: return "Round";
+ default:
+ MOZ_CRASH("Unknown math function");
+ }
+}
+
+void
+MMathFunction::printOpcode(GenericPrinter& out) const
+{
+ MDefinition::printOpcode(out);
+ out.printf(" %s", FunctionName(function()));
+}
+
+MDefinition*
+MMathFunction::foldsTo(TempAllocator& alloc)
+{
+ MDefinition* input = getOperand(0);
+ if (!input->isConstant() || !input->toConstant()->isTypeRepresentableAsDouble())
+ return this;
+
+ double in = input->toConstant()->numberToDouble();
+ double out;
+ switch (function_) {
+ case Log:
+ out = js::math_log_uncached(in);
+ break;
+ case Sin:
+ out = js::math_sin_uncached(in);
+ break;
+ case Cos:
+ out = js::math_cos_uncached(in);
+ break;
+ case Exp:
+ out = js::math_exp_uncached(in);
+ break;
+ case Tan:
+ out = js::math_tan_uncached(in);
+ break;
+ case ACos:
+ out = js::math_acos_uncached(in);
+ break;
+ case ASin:
+ out = js::math_asin_uncached(in);
+ break;
+ case ATan:
+ out = js::math_atan_uncached(in);
+ break;
+ case Log10:
+ out = js::math_log10_uncached(in);
+ break;
+ case Log2:
+ out = js::math_log2_uncached(in);
+ break;
+ case Log1P:
+ out = js::math_log1p_uncached(in);
+ break;
+ case ExpM1:
+ out = js::math_expm1_uncached(in);
+ break;
+ case CosH:
+ out = js::math_cosh_uncached(in);
+ break;
+ case SinH:
+ out = js::math_sinh_uncached(in);
+ break;
+ case TanH:
+ out = js::math_tanh_uncached(in);
+ break;
+ case ACosH:
+ out = js::math_acosh_uncached(in);
+ break;
+ case ASinH:
+ out = js::math_asinh_uncached(in);
+ break;
+ case ATanH:
+ out = js::math_atanh_uncached(in);
+ break;
+ case Sign:
+ out = js::math_sign_uncached(in);
+ break;
+ case Trunc:
+ out = js::math_trunc_uncached(in);
+ break;
+ case Cbrt:
+ out = js::math_cbrt_uncached(in);
+ break;
+ case Floor:
+ out = js::math_floor_impl(in);
+ break;
+ case Ceil:
+ out = js::math_ceil_impl(in);
+ break;
+ case Round:
+ out = js::math_round_impl(in);
+ break;
+ default:
+ return this;
+ }
+
+ if (input->type() == MIRType::Float32)
+ return MConstant::NewFloat32(alloc, out);
+ return MConstant::New(alloc, DoubleValue(out));
+}
+
+MDefinition*
+MAtomicIsLockFree::foldsTo(TempAllocator& alloc)
+{
+ MDefinition* input = getOperand(0);
+ if (!input->isConstant() || input->type() != MIRType::Int32)
+ return this;
+
+ int32_t i = input->toConstant()->toInt32();
+ return MConstant::New(alloc, BooleanValue(AtomicOperations::isLockfree(i)));
+}
+
+// Define |THIS_SLOT| as part of this translation unit, as it is used to
+// specialized the parameterized |New| function calls introduced by
+// TRIVIAL_NEW_WRAPPERS.
+const int32_t MParameter::THIS_SLOT;
+
+void
+MParameter::printOpcode(GenericPrinter& out) const
+{
+ PrintOpcodeName(out, op());
+ if (index() == THIS_SLOT)
+ out.printf(" THIS_SLOT");
+ else
+ out.printf(" %d", index());
+}
+
+HashNumber
+MParameter::valueHash() const
+{
+ HashNumber hash = MDefinition::valueHash();
+ hash = addU32ToHash(hash, index_);
+ return hash;
+}
+
+bool
+MParameter::congruentTo(const MDefinition* ins) const
+{
+ if (!ins->isParameter())
+ return false;
+
+ return ins->toParameter()->index() == index_;
+}
+
+WrappedFunction::WrappedFunction(JSFunction* fun)
+ : fun_(fun),
+ nargs_(fun->nargs()),
+ isNative_(fun->isNative()),
+ isConstructor_(fun->isConstructor()),
+ isClassConstructor_(fun->isClassConstructor()),
+ isSelfHostedBuiltin_(fun->isSelfHostedBuiltin())
+{}
+
+MCall*
+MCall::New(TempAllocator& alloc, JSFunction* target, size_t maxArgc, size_t numActualArgs,
+ bool construct, bool isDOMCall)
+{
+ WrappedFunction* wrappedTarget = target ? new(alloc) WrappedFunction(target) : nullptr;
+ MOZ_ASSERT(maxArgc >= numActualArgs);
+ MCall* ins;
+ if (isDOMCall) {
+ MOZ_ASSERT(!construct);
+ ins = new(alloc) MCallDOMNative(wrappedTarget, numActualArgs);
+ } else {
+ ins = new(alloc) MCall(wrappedTarget, numActualArgs, construct);
+ }
+ if (!ins->init(alloc, maxArgc + NumNonArgumentOperands))
+ return nullptr;
+ return ins;
+}
+
+AliasSet
+MCallDOMNative::getAliasSet() const
+{
+ const JSJitInfo* jitInfo = getJitInfo();
+
+ // If we don't know anything about the types of our arguments, we have to
+ // assume that type-coercions can have side-effects, so we need to alias
+ // everything.
+ if (jitInfo->aliasSet() == JSJitInfo::AliasEverything || !jitInfo->isTypedMethodJitInfo())
+ return AliasSet::Store(AliasSet::Any);
+
+ uint32_t argIndex = 0;
+ const JSTypedMethodJitInfo* methodInfo =
+ reinterpret_cast<const JSTypedMethodJitInfo*>(jitInfo);
+ for (const JSJitInfo::ArgType* argType = methodInfo->argTypes;
+ *argType != JSJitInfo::ArgTypeListEnd;
+ ++argType, ++argIndex)
+ {
+ if (argIndex >= numActualArgs()) {
+ // Passing through undefined can't have side-effects
+ continue;
+ }
+ // getArg(0) is "this", so skip it
+ MDefinition* arg = getArg(argIndex+1);
+ MIRType actualType = arg->type();
+ // The only way to reliably avoid side-effects given the information we
+ // have here is if we're passing in a known primitive value to an
+ // argument that expects a primitive value.
+ //
+ // XXXbz maybe we need to communicate better information. For example,
+ // a sequence argument will sort of unavoidably have side effects, while
+ // a typed array argument won't have any, but both are claimed to be
+ // JSJitInfo::Object. But if we do that, we need to watch out for our
+ // movability/DCE-ability bits: if we have an arg type that can reliably
+ // throw an exception on conversion, that might not affect our alias set
+ // per se, but it should prevent us being moved or DCE-ed, unless we
+ // know the incoming things match that arg type and won't throw.
+ //
+ if ((actualType == MIRType::Value || actualType == MIRType::Object) ||
+ (*argType & JSJitInfo::Object))
+ {
+ return AliasSet::Store(AliasSet::Any);
+ }
+ }
+
+ // We checked all the args, and they check out. So we only alias DOM
+ // mutations or alias nothing, depending on the alias set in the jitinfo.
+ if (jitInfo->aliasSet() == JSJitInfo::AliasNone)
+ return AliasSet::None();
+
+ MOZ_ASSERT(jitInfo->aliasSet() == JSJitInfo::AliasDOMSets);
+ return AliasSet::Load(AliasSet::DOMProperty);
+}
+
+void
+MCallDOMNative::computeMovable()
+{
+ // We are movable if the jitinfo says we can be and if we're also not
+ // effectful. The jitinfo can't check for the latter, since it depends on
+ // the types of our arguments.
+ const JSJitInfo* jitInfo = getJitInfo();
+
+ MOZ_ASSERT_IF(jitInfo->isMovable,
+ jitInfo->aliasSet() != JSJitInfo::AliasEverything);
+
+ if (jitInfo->isMovable && !isEffectful())
+ setMovable();
+}
+
+bool
+MCallDOMNative::congruentTo(const MDefinition* ins) const
+{
+ if (!isMovable())
+ return false;
+
+ if (!ins->isCall())
+ return false;
+
+ const MCall* call = ins->toCall();
+
+ if (!call->isCallDOMNative())
+ return false;
+
+ if (getSingleTarget() != call->getSingleTarget())
+ return false;
+
+ if (isConstructing() != call->isConstructing())
+ return false;
+
+ if (numActualArgs() != call->numActualArgs())
+ return false;
+
+ if (needsArgCheck() != call->needsArgCheck())
+ return false;
+
+ if (!congruentIfOperandsEqual(call))
+ return false;
+
+ // The other call had better be movable at this point!
+ MOZ_ASSERT(call->isMovable());
+
+ return true;
+}
+
+const JSJitInfo*
+MCallDOMNative::getJitInfo() const
+{
+ MOZ_ASSERT(getSingleTarget() && getSingleTarget()->isNative());
+
+ const JSJitInfo* jitInfo = getSingleTarget()->jitInfo();
+ MOZ_ASSERT(jitInfo);
+
+ return jitInfo;
+}
+
+MDefinition*
+MStringLength::foldsTo(TempAllocator& alloc)
+{
+ if (type() == MIRType::Int32 && string()->isConstant()) {
+ JSAtom* atom = &string()->toConstant()->toString()->asAtom();
+ return MConstant::New(alloc, Int32Value(atom->length()));
+ }
+
+ return this;
+}
+
+MDefinition*
+MConcat::foldsTo(TempAllocator& alloc)
+{
+ if (lhs()->isConstant() && lhs()->toConstant()->toString()->empty())
+ return rhs();
+
+ if (rhs()->isConstant() && rhs()->toConstant()->toString()->empty())
+ return lhs();
+
+ return this;
+}
+
+static bool
+EnsureFloatInputOrConvert(MUnaryInstruction* owner, TempAllocator& alloc)
+{
+ MDefinition* input = owner->input();
+ if (!input->canProduceFloat32()) {
+ if (input->type() == MIRType::Float32)
+ ConvertDefinitionToDouble<0>(alloc, input, owner);
+ return false;
+ }
+ return true;
+}
+
+void
+MFloor::trySpecializeFloat32(TempAllocator& alloc)
+{
+ MOZ_ASSERT(type() == MIRType::Int32);
+ if (EnsureFloatInputOrConvert(this, alloc))
+ specialization_ = MIRType::Float32;
+}
+
+void
+MCeil::trySpecializeFloat32(TempAllocator& alloc)
+{
+ MOZ_ASSERT(type() == MIRType::Int32);
+ if (EnsureFloatInputOrConvert(this, alloc))
+ specialization_ = MIRType::Float32;
+}
+
+void
+MRound::trySpecializeFloat32(TempAllocator& alloc)
+{
+ MOZ_ASSERT(type() == MIRType::Int32);
+ if (EnsureFloatInputOrConvert(this, alloc))
+ specialization_ = MIRType::Float32;
+}
+
+MTableSwitch*
+MTableSwitch::New(TempAllocator& alloc, MDefinition* ins, int32_t low, int32_t high)
+{
+ return new(alloc) MTableSwitch(alloc, ins, low, high);
+}
+
+MGoto*
+MGoto::New(TempAllocator& alloc, MBasicBlock* target)
+{
+ MOZ_ASSERT(target);
+ return new(alloc) MGoto(target);
+}
+
+MGoto*
+MGoto::New(TempAllocator::Fallible alloc, MBasicBlock* target)
+{
+ MOZ_ASSERT(target);
+ return new(alloc) MGoto(target);
+}
+
+MGoto*
+MGoto::New(TempAllocator& alloc)
+{
+ return new(alloc) MGoto(nullptr);
+}
+
+void
+MUnbox::printOpcode(GenericPrinter& out) const
+{
+ PrintOpcodeName(out, op());
+ out.printf(" ");
+ getOperand(0)->printName(out);
+ out.printf(" ");
+
+ switch (type()) {
+ case MIRType::Int32: out.printf("to Int32"); break;
+ case MIRType::Double: out.printf("to Double"); break;
+ case MIRType::Boolean: out.printf("to Boolean"); break;
+ case MIRType::String: out.printf("to String"); break;
+ case MIRType::Symbol: out.printf("to Symbol"); break;
+ case MIRType::Object: out.printf("to Object"); break;
+ default: break;
+ }
+
+ switch (mode()) {
+ case Fallible: out.printf(" (fallible)"); break;
+ case Infallible: out.printf(" (infallible)"); break;
+ case TypeBarrier: out.printf(" (typebarrier)"); break;
+ default: break;
+ }
+}
+
+MDefinition*
+MUnbox::foldsTo(TempAllocator &alloc)
+{
+ if (!input()->isLoadFixedSlot())
+ return this;
+ MLoadFixedSlot* load = input()->toLoadFixedSlot();
+ if (load->type() != MIRType::Value)
+ return this;
+ if (type() != MIRType::Boolean && !IsNumberType(type()))
+ return this;
+ // Only optimize if the load comes immediately before the unbox, so it's
+ // safe to copy the load's dependency field.
+ MInstructionIterator iter(load->block()->begin(load));
+ ++iter;
+ if (*iter != this)
+ return this;
+
+ MLoadFixedSlotAndUnbox* ins = MLoadFixedSlotAndUnbox::New(alloc, load->object(), load->slot(),
+ mode(), type(), bailoutKind());
+ // As GVN runs after the Alias Analysis, we have to set the dependency by hand
+ ins->setDependency(load->dependency());
+ return ins;
+}
+
+void
+MTypeBarrier::printOpcode(GenericPrinter& out) const
+{
+ PrintOpcodeName(out, op());
+ out.printf(" ");
+ getOperand(0)->printName(out);
+}
+
+bool
+MTypeBarrier::congruentTo(const MDefinition* def) const
+{
+ if (!def->isTypeBarrier())
+ return false;
+ const MTypeBarrier* other = def->toTypeBarrier();
+ if (barrierKind() != other->barrierKind() || isGuard() != other->isGuard())
+ return false;
+ if (!resultTypeSet()->equals(other->resultTypeSet()))
+ return false;
+ return congruentIfOperandsEqual(other);
+}
+
+#ifdef DEBUG
+void
+MPhi::assertLoopPhi() const
+{
+ // getLoopPredecessorOperand and getLoopBackedgeOperand rely on these
+ // predecessors being at indices 0 and 1.
+ MBasicBlock* pred = block()->getPredecessor(0);
+ MBasicBlock* back = block()->getPredecessor(1);
+ MOZ_ASSERT(pred == block()->loopPredecessor());
+ MOZ_ASSERT(pred->successorWithPhis() == block());
+ MOZ_ASSERT(pred->positionInPhiSuccessor() == 0);
+ MOZ_ASSERT(back == block()->backedge());
+ MOZ_ASSERT(back->successorWithPhis() == block());
+ MOZ_ASSERT(back->positionInPhiSuccessor() == 1);
+}
+#endif
+
+void
+MPhi::removeOperand(size_t index)
+{
+ MOZ_ASSERT(index < numOperands());
+ MOZ_ASSERT(getUseFor(index)->index() == index);
+ MOZ_ASSERT(getUseFor(index)->consumer() == this);
+
+ // If we have phi(..., a, b, c, d, ..., z) and we plan
+ // on removing a, then first shift downward so that we have
+ // phi(..., b, c, d, ..., z, z):
+ MUse* p = inputs_.begin() + index;
+ MUse* e = inputs_.end();
+ p->producer()->removeUse(p);
+ for (; p < e - 1; ++p) {
+ MDefinition* producer = (p + 1)->producer();
+ p->setProducerUnchecked(producer);
+ producer->replaceUse(p + 1, p);
+ }
+
+ // truncate the inputs_ list:
+ inputs_.popBack();
+}
+
+void
+MPhi::removeAllOperands()
+{
+ for (MUse& p : inputs_)
+ p.producer()->removeUse(&p);
+ inputs_.clear();
+}
+
+MDefinition*
+MPhi::foldsTernary(TempAllocator& alloc)
+{
+ /* Look if this MPhi is a ternary construct.
+ * This is a very loose term as it actually only checks for
+ *
+ * MTest X
+ * / \
+ * ... ...
+ * \ /
+ * MPhi X Y
+ *
+ * Which we will simply call:
+ * x ? x : y or x ? y : x
+ */
+
+ if (numOperands() != 2)
+ return nullptr;
+
+ MOZ_ASSERT(block()->numPredecessors() == 2);
+
+ MBasicBlock* pred = block()->immediateDominator();
+ if (!pred || !pred->lastIns()->isTest())
+ return nullptr;
+
+ MTest* test = pred->lastIns()->toTest();
+
+ // True branch may only dominate one edge of MPhi.
+ if (test->ifTrue()->dominates(block()->getPredecessor(0)) ==
+ test->ifTrue()->dominates(block()->getPredecessor(1)))
+ {
+ return nullptr;
+ }
+
+ // False branch may only dominate one edge of MPhi.
+ if (test->ifFalse()->dominates(block()->getPredecessor(0)) ==
+ test->ifFalse()->dominates(block()->getPredecessor(1)))
+ {
+ return nullptr;
+ }
+
+ // True and false branch must dominate different edges of MPhi.
+ if (test->ifTrue()->dominates(block()->getPredecessor(0)) ==
+ test->ifFalse()->dominates(block()->getPredecessor(0)))
+ {
+ return nullptr;
+ }
+
+ // We found a ternary construct.
+ bool firstIsTrueBranch = test->ifTrue()->dominates(block()->getPredecessor(0));
+ MDefinition* trueDef = firstIsTrueBranch ? getOperand(0) : getOperand(1);
+ MDefinition* falseDef = firstIsTrueBranch ? getOperand(1) : getOperand(0);
+
+ // Accept either
+ // testArg ? testArg : constant or
+ // testArg ? constant : testArg
+ if (!trueDef->isConstant() && !falseDef->isConstant())
+ return nullptr;
+
+ MConstant* c = trueDef->isConstant() ? trueDef->toConstant() : falseDef->toConstant();
+ MDefinition* testArg = (trueDef == c) ? falseDef : trueDef;
+ if (testArg != test->input())
+ return nullptr;
+
+ // This check should be a tautology, except that the constant might be the
+ // result of the removal of a branch. In such case the domination scope of
+ // the block which is holding the constant might be incomplete. This
+ // condition is used to prevent doing this optimization based on incomplete
+ // information.
+ //
+ // As GVN removed a branch, it will update the dominations rules before
+ // trying to fold this MPhi again. Thus, this condition does not inhibit
+ // this optimization.
+ MBasicBlock* truePred = block()->getPredecessor(firstIsTrueBranch ? 0 : 1);
+ MBasicBlock* falsePred = block()->getPredecessor(firstIsTrueBranch ? 1 : 0);
+ if (!trueDef->block()->dominates(truePred) ||
+ !falseDef->block()->dominates(falsePred))
+ {
+ return nullptr;
+ }
+
+ // If testArg is an int32 type we can:
+ // - fold testArg ? testArg : 0 to testArg
+ // - fold testArg ? 0 : testArg to 0
+ if (testArg->type() == MIRType::Int32 && c->numberToDouble() == 0) {
+ testArg->setGuardRangeBailoutsUnchecked();
+
+ // When folding to the constant we need to hoist it.
+ if (trueDef == c && !c->block()->dominates(block()))
+ c->block()->moveBefore(pred->lastIns(), c);
+ return trueDef;
+ }
+
+ // If testArg is an double type we can:
+ // - fold testArg ? testArg : 0.0 to MNaNToZero(testArg)
+ if (testArg->type() == MIRType::Double && mozilla::IsPositiveZero(c->numberToDouble()) &&
+ c != trueDef)
+ {
+ MNaNToZero* replace = MNaNToZero::New(alloc, testArg);
+ test->block()->insertBefore(test, replace);
+ return replace;
+ }
+
+ // If testArg is a string type we can:
+ // - fold testArg ? testArg : "" to testArg
+ // - fold testArg ? "" : testArg to ""
+ if (testArg->type() == MIRType::String &&
+ c->toString() == GetJitContext()->runtime->emptyString())
+ {
+ // When folding to the constant we need to hoist it.
+ if (trueDef == c && !c->block()->dominates(block()))
+ c->block()->moveBefore(pred->lastIns(), c);
+ return trueDef;
+ }
+
+ return nullptr;
+}
+
+MDefinition*
+MPhi::operandIfRedundant()
+{
+ if (inputs_.length() == 0)
+ return nullptr;
+
+ // If this phi is redundant (e.g., phi(a,a) or b=phi(a,this)),
+ // returns the operand that it will always be equal to (a, in
+ // those two cases).
+ MDefinition* first = getOperand(0);
+ for (size_t i = 1, e = numOperands(); i < e; i++) {
+ MDefinition* op = getOperand(i);
+ if (op != first && op != this)
+ return nullptr;
+ }
+ return first;
+}
+
+MDefinition*
+MPhi::foldsFilterTypeSet()
+{
+ // Fold phi with as operands a combination of 'subject' and
+ // MFilterTypeSet(subject) to 'subject'.
+
+ if (inputs_.length() == 0)
+ return nullptr;
+
+ MDefinition* subject = getOperand(0);
+ if (subject->isFilterTypeSet())
+ subject = subject->toFilterTypeSet()->input();
+
+ // Not same type, don't fold.
+ if (subject->type() != type())
+ return nullptr;
+
+ // Phi is better typed (has typeset). Don't fold.
+ if (resultTypeSet() && !subject->resultTypeSet())
+ return nullptr;
+
+ // Phi is better typed (according to typeset). Don't fold.
+ if (subject->resultTypeSet() && resultTypeSet()) {
+ if (!subject->resultTypeSet()->isSubset(resultTypeSet()))
+ return nullptr;
+ }
+
+ for (size_t i = 1, e = numOperands(); i < e; i++) {
+ MDefinition* op = getOperand(i);
+ if (op == subject)
+ continue;
+ if (op->isFilterTypeSet() && op->toFilterTypeSet()->input() == subject)
+ continue;
+
+ return nullptr;
+ }
+
+ return subject;
+}
+
+MDefinition*
+MPhi::foldsTo(TempAllocator& alloc)
+{
+ if (MDefinition* def = operandIfRedundant())
+ return def;
+
+ if (MDefinition* def = foldsTernary(alloc))
+ return def;
+
+ if (MDefinition* def = foldsFilterTypeSet())
+ return def;
+
+ return this;
+}
+
+bool
+MPhi::congruentTo(const MDefinition* ins) const
+{
+ if (!ins->isPhi())
+ return false;
+
+ // Phis in different blocks may have different control conditions.
+ // For example, these phis:
+ //
+ // if (p)
+ // goto a
+ // a:
+ // t = phi(x, y)
+ //
+ // if (q)
+ // goto b
+ // b:
+ // s = phi(x, y)
+ //
+ // have identical operands, but they are not equvalent because t is
+ // effectively p?x:y and s is effectively q?x:y.
+ //
+ // For now, consider phis in different blocks incongruent.
+ if (ins->block() != block())
+ return false;
+
+ return congruentIfOperandsEqual(ins);
+}
+
+static inline TemporaryTypeSet*
+MakeMIRTypeSet(TempAllocator& alloc, MIRType type)
+{
+ MOZ_ASSERT(type != MIRType::Value);
+ TypeSet::Type ntype = type == MIRType::Object
+ ? TypeSet::AnyObjectType()
+ : TypeSet::PrimitiveType(ValueTypeFromMIRType(type));
+ return alloc.lifoAlloc()->new_<TemporaryTypeSet>(alloc.lifoAlloc(), ntype);
+}
+
+bool
+jit::MergeTypes(TempAllocator& alloc, MIRType* ptype, TemporaryTypeSet** ptypeSet,
+ MIRType newType, TemporaryTypeSet* newTypeSet)
+{
+ if (newTypeSet && newTypeSet->empty())
+ return true;
+ LifoAlloc::AutoFallibleScope fallibleAllocator(alloc.lifoAlloc());
+ if (newType != *ptype) {
+ if (IsTypeRepresentableAsDouble(newType) && IsTypeRepresentableAsDouble(*ptype)) {
+ *ptype = MIRType::Double;
+ } else if (*ptype != MIRType::Value) {
+ if (!*ptypeSet) {
+ *ptypeSet = MakeMIRTypeSet(alloc, *ptype);
+ if (!*ptypeSet)
+ return false;
+ }
+ *ptype = MIRType::Value;
+ } else if (*ptypeSet && (*ptypeSet)->empty()) {
+ *ptype = newType;
+ }
+ }
+ if (*ptypeSet) {
+ if (!newTypeSet && newType != MIRType::Value) {
+ newTypeSet = MakeMIRTypeSet(alloc, newType);
+ if (!newTypeSet)
+ return false;
+ }
+ if (newTypeSet) {
+ if (!newTypeSet->isSubset(*ptypeSet)) {
+ *ptypeSet = TypeSet::unionSets(*ptypeSet, newTypeSet, alloc.lifoAlloc());
+ if (!*ptypeSet)
+ return false;
+ }
+ } else {
+ *ptypeSet = nullptr;
+ }
+ }
+ return true;
+}
+
+// Tests whether 'types' includes all possible values represented by
+// input/inputTypes.
+bool
+jit::TypeSetIncludes(TypeSet* types, MIRType input, TypeSet* inputTypes)
+{
+ if (!types)
+ return inputTypes && inputTypes->empty();
+
+ switch (input) {
+ case MIRType::Undefined:
+ case MIRType::Null:
+ case MIRType::Boolean:
+ case MIRType::Int32:
+ case MIRType::Double:
+ case MIRType::Float32:
+ case MIRType::String:
+ case MIRType::Symbol:
+ case MIRType::MagicOptimizedArguments:
+ return types->hasType(TypeSet::PrimitiveType(ValueTypeFromMIRType(input)));
+
+ case MIRType::Object:
+ return types->unknownObject() || (inputTypes && inputTypes->isSubset(types));
+
+ case MIRType::Value:
+ return types->unknown() || (inputTypes && inputTypes->isSubset(types));
+
+ default:
+ MOZ_CRASH("Bad input type");
+ }
+}
+
+// Tests if two type combos (type/typeset) are equal.
+bool
+jit::EqualTypes(MIRType type1, TemporaryTypeSet* typeset1,
+ MIRType type2, TemporaryTypeSet* typeset2)
+{
+ // Types should equal.
+ if (type1 != type2)
+ return false;
+
+ // Both have equal type and no typeset.
+ if (!typeset1 && !typeset2)
+ return true;
+
+ // If only one instructions has a typeset.
+ // Test if the typset contains the same information as the MIRType.
+ if (typeset1 && !typeset2)
+ return TypeSetIncludes(typeset1, type2, nullptr);
+ if (!typeset1 && typeset2)
+ return TypeSetIncludes(typeset2, type1, nullptr);
+
+ // Typesets should equal.
+ return typeset1->equals(typeset2);
+}
+
+// Tests whether input/inputTypes can always be stored to an unboxed
+// object/array property with the given unboxed type.
+bool
+jit::CanStoreUnboxedType(TempAllocator& alloc,
+ JSValueType unboxedType, MIRType input, TypeSet* inputTypes)
+{
+ TemporaryTypeSet types;
+
+ switch (unboxedType) {
+ case JSVAL_TYPE_BOOLEAN:
+ case JSVAL_TYPE_INT32:
+ case JSVAL_TYPE_DOUBLE:
+ case JSVAL_TYPE_STRING:
+ types.addType(TypeSet::PrimitiveType(unboxedType), alloc.lifoAlloc());
+ break;
+
+ case JSVAL_TYPE_OBJECT:
+ types.addType(TypeSet::AnyObjectType(), alloc.lifoAlloc());
+ types.addType(TypeSet::NullType(), alloc.lifoAlloc());
+ break;
+
+ default:
+ MOZ_CRASH("Bad unboxed type");
+ }
+
+ return TypeSetIncludes(&types, input, inputTypes);
+}
+
+static bool
+CanStoreUnboxedType(TempAllocator& alloc, JSValueType unboxedType, MDefinition* value)
+{
+ return CanStoreUnboxedType(alloc, unboxedType, value->type(), value->resultTypeSet());
+}
+
+bool
+MPhi::specializeType(TempAllocator& alloc)
+{
+#ifdef DEBUG
+ MOZ_ASSERT(!specialized_);
+ specialized_ = true;
+#endif
+
+ MOZ_ASSERT(!inputs_.empty());
+
+ size_t start;
+ if (hasBackedgeType_) {
+ // The type of this phi has already been populated with potential types
+ // that could come in via loop backedges.
+ start = 0;
+ } else {
+ setResultType(getOperand(0)->type());
+ setResultTypeSet(getOperand(0)->resultTypeSet());
+ start = 1;
+ }
+
+ MIRType resultType = this->type();
+ TemporaryTypeSet* resultTypeSet = this->resultTypeSet();
+
+ for (size_t i = start; i < inputs_.length(); i++) {
+ MDefinition* def = getOperand(i);
+ if (!MergeTypes(alloc, &resultType, &resultTypeSet, def->type(), def->resultTypeSet()))
+ return false;
+ }
+
+ setResultType(resultType);
+ setResultTypeSet(resultTypeSet);
+ return true;
+}
+
+bool
+MPhi::addBackedgeType(TempAllocator& alloc, MIRType type, TemporaryTypeSet* typeSet)
+{
+ MOZ_ASSERT(!specialized_);
+
+ if (hasBackedgeType_) {
+ MIRType resultType = this->type();
+ TemporaryTypeSet* resultTypeSet = this->resultTypeSet();
+
+ if (!MergeTypes(alloc, &resultType, &resultTypeSet, type, typeSet))
+ return false;
+
+ setResultType(resultType);
+ setResultTypeSet(resultTypeSet);
+ } else {
+ setResultType(type);
+ setResultTypeSet(typeSet);
+ hasBackedgeType_ = true;
+ }
+ return true;
+}
+
+bool
+MPhi::typeIncludes(MDefinition* def)
+{
+ if (def->type() == MIRType::Int32 && this->type() == MIRType::Double)
+ return true;
+
+ if (TemporaryTypeSet* types = def->resultTypeSet()) {
+ if (this->resultTypeSet())
+ return types->isSubset(this->resultTypeSet());
+ if (this->type() == MIRType::Value || types->empty())
+ return true;
+ return this->type() == types->getKnownMIRType();
+ }
+
+ if (def->type() == MIRType::Value) {
+ // This phi must be able to be any value.
+ return this->type() == MIRType::Value
+ && (!this->resultTypeSet() || this->resultTypeSet()->unknown());
+ }
+
+ return this->mightBeType(def->type());
+}
+
+bool
+MPhi::checkForTypeChange(TempAllocator& alloc, MDefinition* ins, bool* ptypeChange)
+{
+ MIRType resultType = this->type();
+ TemporaryTypeSet* resultTypeSet = this->resultTypeSet();
+
+ if (!MergeTypes(alloc, &resultType, &resultTypeSet, ins->type(), ins->resultTypeSet()))
+ return false;
+
+ if (resultType != this->type() || resultTypeSet != this->resultTypeSet()) {
+ *ptypeChange = true;
+ setResultType(resultType);
+ setResultTypeSet(resultTypeSet);
+ }
+ return true;
+}
+
+void
+MCall::addArg(size_t argnum, MDefinition* arg)
+{
+ // The operand vector is initialized in reverse order by the IonBuilder.
+ // It cannot be checked for consistency until all arguments are added.
+ // FixedList doesn't initialize its elements, so do an unchecked init.
+ initOperand(argnum + NumNonArgumentOperands, arg);
+}
+
+static inline bool
+IsConstant(MDefinition* def, double v)
+{
+ if (!def->isConstant())
+ return false;
+
+ return NumbersAreIdentical(def->toConstant()->numberToDouble(), v);
+}
+
+MDefinition*
+MBinaryBitwiseInstruction::foldsTo(TempAllocator& alloc)
+{
+ if (specialization_ != MIRType::Int32)
+ return this;
+
+ if (MDefinition* folded = EvaluateConstantOperands(alloc, this))
+ return folded;
+
+ return this;
+}
+
+MDefinition*
+MBinaryBitwiseInstruction::foldUnnecessaryBitop()
+{
+ if (specialization_ != MIRType::Int32)
+ return this;
+
+ // Fold unsigned shift right operator when the second operand is zero and
+ // the only use is an unsigned modulo. Thus, the expression
+ // |(x >>> 0) % y| becomes |x % y|.
+ if (isUrsh() && hasOneDefUse() && IsUint32Type(this)) {
+ MUseDefIterator use(this);
+ if (use.def()->isMod() && use.def()->toMod()->isUnsigned())
+ return getOperand(0);
+ MOZ_ASSERT(!(++use));
+ }
+
+ // Eliminate bitwise operations that are no-ops when used on integer
+ // inputs, such as (x | 0).
+
+ MDefinition* lhs = getOperand(0);
+ MDefinition* rhs = getOperand(1);
+
+ if (IsConstant(lhs, 0))
+ return foldIfZero(0);
+
+ if (IsConstant(rhs, 0))
+ return foldIfZero(1);
+
+ if (IsConstant(lhs, -1))
+ return foldIfNegOne(0);
+
+ if (IsConstant(rhs, -1))
+ return foldIfNegOne(1);
+
+ if (lhs == rhs)
+ return foldIfEqual();
+
+ if (maskMatchesRightRange) {
+ MOZ_ASSERT(lhs->isConstant());
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ return foldIfAllBitsSet(0);
+ }
+
+ if (maskMatchesLeftRange) {
+ MOZ_ASSERT(rhs->isConstant());
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+ return foldIfAllBitsSet(1);
+ }
+
+ return this;
+}
+
+void
+MBinaryBitwiseInstruction::infer(BaselineInspector*, jsbytecode*)
+{
+ if (getOperand(0)->mightBeType(MIRType::Object) || getOperand(0)->mightBeType(MIRType::Symbol) ||
+ getOperand(1)->mightBeType(MIRType::Object) || getOperand(1)->mightBeType(MIRType::Symbol))
+ {
+ specialization_ = MIRType::None;
+ } else {
+ specializeAs(MIRType::Int32);
+ }
+}
+
+void
+MBinaryBitwiseInstruction::specializeAs(MIRType type)
+{
+ MOZ_ASSERT(type == MIRType::Int32 || type == MIRType::Int64);
+ MOZ_ASSERT(this->type() == type);
+
+ specialization_ = type;
+
+ if (isBitOr() || isBitAnd() || isBitXor())
+ setCommutative();
+}
+
+void
+MShiftInstruction::infer(BaselineInspector*, jsbytecode*)
+{
+ if (getOperand(0)->mightBeType(MIRType::Object) || getOperand(1)->mightBeType(MIRType::Object) ||
+ getOperand(0)->mightBeType(MIRType::Symbol) || getOperand(1)->mightBeType(MIRType::Symbol))
+ specialization_ = MIRType::None;
+ else
+ specialization_ = MIRType::Int32;
+}
+
+void
+MUrsh::infer(BaselineInspector* inspector, jsbytecode* pc)
+{
+ if (getOperand(0)->mightBeType(MIRType::Object) || getOperand(1)->mightBeType(MIRType::Object) ||
+ getOperand(0)->mightBeType(MIRType::Symbol) || getOperand(1)->mightBeType(MIRType::Symbol))
+ {
+ specialization_ = MIRType::None;
+ setResultType(MIRType::Value);
+ return;
+ }
+
+ if (inspector->hasSeenDoubleResult(pc)) {
+ specialization_ = MIRType::Double;
+ setResultType(MIRType::Double);
+ return;
+ }
+
+ specialization_ = MIRType::Int32;
+ setResultType(MIRType::Int32);
+}
+
+static inline bool
+CanProduceNegativeZero(MDefinition* def)
+{
+ // Test if this instruction can produce negative zero even when bailing out
+ // and changing types.
+ switch (def->op()) {
+ case MDefinition::Op_Constant:
+ if (def->type() == MIRType::Double && def->toConstant()->toDouble() == -0.0)
+ return true;
+ MOZ_FALLTHROUGH;
+ case MDefinition::Op_BitAnd:
+ case MDefinition::Op_BitOr:
+ case MDefinition::Op_BitXor:
+ case MDefinition::Op_BitNot:
+ case MDefinition::Op_Lsh:
+ case MDefinition::Op_Rsh:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static inline bool
+NeedNegativeZeroCheck(MDefinition* def)
+{
+ if (def->isGuardRangeBailouts())
+ return true;
+
+ // Test if all uses have the same semantics for -0 and 0
+ for (MUseIterator use = def->usesBegin(); use != def->usesEnd(); use++) {
+ if (use->consumer()->isResumePoint())
+ continue;
+
+ MDefinition* use_def = use->consumer()->toDefinition();
+ switch (use_def->op()) {
+ case MDefinition::Op_Add: {
+ // If add is truncating -0 and 0 are observed as the same.
+ if (use_def->toAdd()->isTruncated())
+ break;
+
+ // x + y gives -0, when both x and y are -0
+
+ // Figure out the order in which the addition's operands will
+ // execute. EdgeCaseAnalysis::analyzeLate has renumbered the MIR
+ // definitions for us so that this just requires comparing ids.
+ MDefinition* first = use_def->toAdd()->lhs();
+ MDefinition* second = use_def->toAdd()->rhs();
+ if (first->id() > second->id()) {
+ MDefinition* temp = first;
+ first = second;
+ second = temp;
+ }
+ // Negative zero checks can be removed on the first executed
+ // operand only if it is guaranteed the second executed operand
+ // will produce a value other than -0. While the second is
+ // typed as an int32, a bailout taken between execution of the
+ // operands may change that type and cause a -0 to flow to the
+ // second.
+ //
+ // There is no way to test whether there are any bailouts
+ // between execution of the operands, so remove negative
+ // zero checks from the first only if the second's type is
+ // independent from type changes that may occur after bailing.
+ if (def == first && CanProduceNegativeZero(second))
+ return true;
+
+ // The negative zero check can always be removed on the second
+ // executed operand; by the time this executes the first will have
+ // been evaluated as int32 and the addition's result cannot be -0.
+ break;
+ }
+ case MDefinition::Op_Sub: {
+ // If sub is truncating -0 and 0 are observed as the same
+ if (use_def->toSub()->isTruncated())
+ break;
+
+ // x + y gives -0, when x is -0 and y is 0
+
+ // We can remove the negative zero check on the rhs, only if we
+ // are sure the lhs isn't negative zero.
+
+ // The lhs is typed as integer (i.e. not -0.0), but it can bailout
+ // and change type. This should be fine if the lhs is executed
+ // first. However if the rhs is executed first, the lhs can bail,
+ // change type and become -0.0 while the rhs has already been
+ // optimized to not make a difference between zero and negative zero.
+ MDefinition* lhs = use_def->toSub()->lhs();
+ MDefinition* rhs = use_def->toSub()->rhs();
+ if (rhs->id() < lhs->id() && CanProduceNegativeZero(lhs))
+ return true;
+
+ MOZ_FALLTHROUGH;
+ }
+ case MDefinition::Op_StoreElement:
+ case MDefinition::Op_StoreElementHole:
+ case MDefinition::Op_FallibleStoreElement:
+ case MDefinition::Op_LoadElement:
+ case MDefinition::Op_LoadElementHole:
+ case MDefinition::Op_LoadUnboxedScalar:
+ case MDefinition::Op_LoadTypedArrayElementHole:
+ case MDefinition::Op_CharCodeAt:
+ case MDefinition::Op_Mod:
+ // Only allowed to remove check when definition is the second operand
+ if (use_def->getOperand(0) == def)
+ return true;
+ for (size_t i = 2, e = use_def->numOperands(); i < e; i++) {
+ if (use_def->getOperand(i) == def)
+ return true;
+ }
+ break;
+ case MDefinition::Op_BoundsCheck:
+ // Only allowed to remove check when definition is the first operand
+ if (use_def->toBoundsCheck()->getOperand(1) == def)
+ return true;
+ break;
+ case MDefinition::Op_ToString:
+ case MDefinition::Op_FromCharCode:
+ case MDefinition::Op_TableSwitch:
+ case MDefinition::Op_Compare:
+ case MDefinition::Op_BitAnd:
+ case MDefinition::Op_BitOr:
+ case MDefinition::Op_BitXor:
+ case MDefinition::Op_Abs:
+ case MDefinition::Op_TruncateToInt32:
+ // Always allowed to remove check. No matter which operand.
+ break;
+ default:
+ return true;
+ }
+ }
+ return false;
+}
+
+void
+MBinaryArithInstruction::printOpcode(GenericPrinter& out) const
+{
+ MDefinition::printOpcode(out);
+
+ switch (type()) {
+ case MIRType::Int32:
+ if (isDiv())
+ out.printf(" [%s]", toDiv()->isUnsigned() ? "uint32" : "int32");
+ else if (isMod())
+ out.printf(" [%s]", toMod()->isUnsigned() ? "uint32" : "int32");
+ else
+ out.printf(" [int32]");
+ break;
+ case MIRType::Int64:
+ if (isDiv())
+ out.printf(" [%s]", toDiv()->isUnsigned() ? "uint64" : "int64");
+ else if (isMod())
+ out.printf(" [%s]", toMod()->isUnsigned() ? "uint64" : "int64");
+ else
+ out.printf(" [int64]");
+ break;
+ case MIRType::Float32:
+ out.printf(" [float]");
+ break;
+ case MIRType::Double:
+ out.printf(" [double]");
+ break;
+ default:
+ break;
+ }
+}
+
+MBinaryArithInstruction*
+MBinaryArithInstruction::New(TempAllocator& alloc, Opcode op,
+ MDefinition* left, MDefinition* right)
+{
+ switch (op) {
+ case Op_Add:
+ return MAdd::New(alloc, left, right);
+ case Op_Sub:
+ return MSub::New(alloc, left, right);
+ case Op_Mul:
+ return MMul::New(alloc, left, right);
+ case Op_Div:
+ return MDiv::New(alloc, left, right);
+ case Op_Mod:
+ return MMod::New(alloc, left, right);
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void
+MBinaryArithInstruction::setNumberSpecialization(TempAllocator& alloc, BaselineInspector* inspector,
+ jsbytecode* pc)
+{
+ setSpecialization(MIRType::Double);
+
+ // Try to specialize as int32.
+ if (getOperand(0)->type() == MIRType::Int32 && getOperand(1)->type() == MIRType::Int32) {
+ bool seenDouble = inspector->hasSeenDoubleResult(pc);
+
+ // Use int32 specialization if the operation doesn't overflow on its
+ // constant operands and if the operation has never overflowed.
+ if (!seenDouble && !constantDoubleResult(alloc))
+ setInt32Specialization();
+ }
+}
+
+bool
+MBinaryArithInstruction::constantDoubleResult(TempAllocator& alloc)
+{
+ bool typeChange = false;
+ EvaluateConstantOperands(alloc, this, &typeChange);
+ return typeChange;
+}
+
+MDefinition*
+MRsh::foldsTo(TempAllocator& alloc)
+{
+ MDefinition* f = MBinaryBitwiseInstruction::foldsTo(alloc);
+
+ if (f != this)
+ return f;
+
+ MDefinition* lhs = getOperand(0);
+ MDefinition* rhs = getOperand(1);
+
+ if (!lhs->isLsh() || !rhs->isConstant() || rhs->type() != MIRType::Int32)
+ return this;
+
+ if (!lhs->getOperand(1)->isConstant() || lhs->getOperand(1)->type() != MIRType::Int32)
+ return this;
+
+ uint32_t shift = rhs->toConstant()->toInt32();
+ uint32_t shift_lhs = lhs->getOperand(1)->toConstant()->toInt32();
+ if (shift != shift_lhs)
+ return this;
+
+ switch (shift) {
+ case 16:
+ return MSignExtend::New(alloc, lhs->getOperand(0), MSignExtend::Half);
+ case 24:
+ return MSignExtend::New(alloc, lhs->getOperand(0), MSignExtend::Byte);
+ }
+
+ return this;
+}
+
+MDefinition*
+MBinaryArithInstruction::foldsTo(TempAllocator& alloc)
+{
+ if (specialization_ == MIRType::None)
+ return this;
+
+ if (specialization_ == MIRType::Int64)
+ return this;
+
+ MDefinition* lhs = getOperand(0);
+ MDefinition* rhs = getOperand(1);
+ if (MConstant* folded = EvaluateConstantOperands(alloc, this)) {
+ if (isTruncated()) {
+ if (!folded->block())
+ block()->insertBefore(this, folded);
+ return MTruncateToInt32::New(alloc, folded);
+ }
+ return folded;
+ }
+
+ if (mustPreserveNaN_)
+ return this;
+
+ // 0 + -0 = 0. So we can't remove addition
+ if (isAdd() && specialization_ != MIRType::Int32)
+ return this;
+
+ if (IsConstant(rhs, getIdentity())) {
+ if (isTruncated())
+ return MTruncateToInt32::New(alloc, lhs);
+ return lhs;
+ }
+
+ // subtraction isn't commutative. So we can't remove subtraction when lhs equals 0
+ if (isSub())
+ return this;
+
+ if (IsConstant(lhs, getIdentity())) {
+ if (isTruncated())
+ return MTruncateToInt32::New(alloc, rhs);
+ return rhs; // x op id => x
+ }
+
+ return this;
+}
+
+void
+MFilterTypeSet::trySpecializeFloat32(TempAllocator& alloc)
+{
+ MDefinition* in = input();
+ if (in->type() != MIRType::Float32)
+ return;
+
+ setResultType(MIRType::Float32);
+}
+
+bool
+MFilterTypeSet::canProduceFloat32() const
+{
+ // A FilterTypeSet should be a producer if the input is a producer too.
+ // Also, be overly conservative by marking as not float32 producer when the
+ // input is a phi, as phis can be cyclic (phiA -> FilterTypeSet -> phiB ->
+ // phiA) and FilterTypeSet doesn't belong in the Float32 phi analysis.
+ return !input()->isPhi() && input()->canProduceFloat32();
+}
+
+bool
+MFilterTypeSet::canConsumeFloat32(MUse* operand) const
+{
+ MOZ_ASSERT(getUseFor(0) == operand);
+ // A FilterTypeSet should be a consumer if all uses are consumer. See also
+ // comment below MFilterTypeSet::canProduceFloat32.
+ bool allConsumerUses = true;
+ for (MUseDefIterator use(this); allConsumerUses && use; use++)
+ allConsumerUses &= !use.def()->isPhi() && use.def()->canConsumeFloat32(use.use());
+ return allConsumerUses;
+}
+
+void
+MBinaryArithInstruction::trySpecializeFloat32(TempAllocator& alloc)
+{
+ // Do not use Float32 if we can use int32.
+ if (specialization_ == MIRType::Int32)
+ return;
+ if (specialization_ == MIRType::None)
+ return;
+
+ MDefinition* left = lhs();
+ MDefinition* right = rhs();
+
+ if (!left->canProduceFloat32() || !right->canProduceFloat32() ||
+ !CheckUsesAreFloat32Consumers(this))
+ {
+ if (left->type() == MIRType::Float32)
+ ConvertDefinitionToDouble<0>(alloc, left, this);
+ if (right->type() == MIRType::Float32)
+ ConvertDefinitionToDouble<1>(alloc, right, this);
+ return;
+ }
+
+ specialization_ = MIRType::Float32;
+ setResultType(MIRType::Float32);
+}
+
+void
+MMinMax::trySpecializeFloat32(TempAllocator& alloc)
+{
+ if (specialization_ == MIRType::Int32)
+ return;
+
+ MDefinition* left = lhs();
+ MDefinition* right = rhs();
+
+ if (!(left->canProduceFloat32() || (left->isMinMax() && left->type() == MIRType::Float32)) ||
+ !(right->canProduceFloat32() || (right->isMinMax() && right->type() == MIRType::Float32)))
+ {
+ if (left->type() == MIRType::Float32)
+ ConvertDefinitionToDouble<0>(alloc, left, this);
+ if (right->type() == MIRType::Float32)
+ ConvertDefinitionToDouble<1>(alloc, right, this);
+ return;
+ }
+
+ specialization_ = MIRType::Float32;
+ setResultType(MIRType::Float32);
+}
+
+MDefinition*
+MMinMax::foldsTo(TempAllocator& alloc)
+{
+ if (!lhs()->isConstant() && !rhs()->isConstant())
+ return this;
+
+ // Directly apply math utility to compare the rhs() and lhs() when
+ // they are both constants.
+ if (lhs()->isConstant() && rhs()->isConstant()) {
+ if (!lhs()->toConstant()->isTypeRepresentableAsDouble() ||
+ !rhs()->toConstant()->isTypeRepresentableAsDouble())
+ {
+ return this;
+ }
+
+ double lnum = lhs()->toConstant()->numberToDouble();
+ double rnum = rhs()->toConstant()->numberToDouble();
+
+ double result;
+ if (isMax())
+ result = js::math_max_impl(lnum, rnum);
+ else
+ result = js::math_min_impl(lnum, rnum);
+
+ // The folded MConstant should maintain the same MIRType with
+ // the original MMinMax.
+ if (type() == MIRType::Int32) {
+ int32_t cast;
+ if (mozilla::NumberEqualsInt32(result, &cast))
+ return MConstant::New(alloc, Int32Value(cast));
+ } else if (type() == MIRType::Float32) {
+ return MConstant::New(alloc, wasm::RawF32(float(result)));
+ } else {
+ MOZ_ASSERT(type() == MIRType::Double);
+ return MConstant::New(alloc, wasm::RawF64(result));
+ }
+ }
+
+ MDefinition* operand = lhs()->isConstant() ? rhs() : lhs();
+ MConstant* constant = lhs()->isConstant() ? lhs()->toConstant() : rhs()->toConstant();
+
+ if (operand->isToDouble() && operand->getOperand(0)->type() == MIRType::Int32) {
+ // min(int32, cte >= INT32_MAX) = int32
+ if (!isMax() &&
+ constant->isTypeRepresentableAsDouble() &&
+ constant->numberToDouble() >= INT32_MAX)
+ {
+ MLimitedTruncate* limit =
+ MLimitedTruncate::New(alloc, operand->getOperand(0), MDefinition::NoTruncate);
+ block()->insertBefore(this, limit);
+ MToDouble* toDouble = MToDouble::New(alloc, limit);
+ return toDouble;
+ }
+
+ // max(int32, cte <= INT32_MIN) = int32
+ if (isMax() &&
+ constant->isTypeRepresentableAsDouble() &&
+ constant->numberToDouble() <= INT32_MIN)
+ {
+ MLimitedTruncate* limit =
+ MLimitedTruncate::New(alloc, operand->getOperand(0), MDefinition::NoTruncate);
+ block()->insertBefore(this, limit);
+ MToDouble* toDouble = MToDouble::New(alloc, limit);
+ return toDouble;
+ }
+ }
+ return this;
+}
+
+MDefinition*
+MPow::foldsTo(TempAllocator& alloc)
+{
+ if (!power()->isConstant() || !power()->toConstant()->isTypeRepresentableAsDouble())
+ return this;
+
+ double pow = power()->toConstant()->numberToDouble();
+ MIRType outputType = type();
+
+ // Math.pow(x, 0.5) is a sqrt with edge-case detection.
+ if (pow == 0.5)
+ return MPowHalf::New(alloc, input());
+
+ // Math.pow(x, -0.5) == 1 / Math.pow(x, 0.5), even for edge cases.
+ if (pow == -0.5) {
+ MPowHalf* half = MPowHalf::New(alloc, input());
+ block()->insertBefore(this, half);
+ MConstant* one = MConstant::New(alloc, DoubleValue(1.0));
+ block()->insertBefore(this, one);
+ return MDiv::New(alloc, one, half, MIRType::Double);
+ }
+
+ // Math.pow(x, 1) == x.
+ if (pow == 1.0)
+ return input();
+
+ // Math.pow(x, 2) == x*x.
+ if (pow == 2.0)
+ return MMul::New(alloc, input(), input(), outputType);
+
+ // Math.pow(x, 3) == x*x*x.
+ if (pow == 3.0) {
+ MMul* mul1 = MMul::New(alloc, input(), input(), outputType);
+ block()->insertBefore(this, mul1);
+ return MMul::New(alloc, input(), mul1, outputType);
+ }
+
+ // Math.pow(x, 4) == y*y, where y = x*x.
+ if (pow == 4.0) {
+ MMul* y = MMul::New(alloc, input(), input(), outputType);
+ block()->insertBefore(this, y);
+ return MMul::New(alloc, y, y, outputType);
+ }
+
+ return this;
+}
+
+bool
+MAbs::fallible() const
+{
+ return !implicitTruncate_ && (!range() || !range()->hasInt32Bounds());
+}
+
+void
+MAbs::trySpecializeFloat32(TempAllocator& alloc)
+{
+ // Do not use Float32 if we can use int32.
+ if (input()->type() == MIRType::Int32)
+ return;
+
+ if (!input()->canProduceFloat32() || !CheckUsesAreFloat32Consumers(this)) {
+ if (input()->type() == MIRType::Float32)
+ ConvertDefinitionToDouble<0>(alloc, input(), this);
+ return;
+ }
+
+ setResultType(MIRType::Float32);
+ specialization_ = MIRType::Float32;
+}
+
+MDefinition*
+MDiv::foldsTo(TempAllocator& alloc)
+{
+ if (specialization_ == MIRType::None)
+ return this;
+
+ if (specialization_ == MIRType::Int64)
+ return this;
+
+ if (MDefinition* folded = EvaluateConstantOperands(alloc, this))
+ return folded;
+
+ if (MDefinition* folded = EvaluateExactReciprocal(alloc, this))
+ return folded;
+
+ return this;
+}
+
+void
+MDiv::analyzeEdgeCasesForward()
+{
+ // This is only meaningful when doing integer division.
+ if (specialization_ != MIRType::Int32)
+ return;
+
+ MOZ_ASSERT(lhs()->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs()->type() == MIRType::Int32);
+
+ // Try removing divide by zero check
+ if (rhs()->isConstant() && !rhs()->toConstant()->isInt32(0))
+ canBeDivideByZero_ = false;
+
+ // If lhs is a constant int != INT32_MIN, then
+ // negative overflow check can be skipped.
+ if (lhs()->isConstant() && !lhs()->toConstant()->isInt32(INT32_MIN))
+ canBeNegativeOverflow_ = false;
+
+ // If rhs is a constant int != -1, likewise.
+ if (rhs()->isConstant() && !rhs()->toConstant()->isInt32(-1))
+ canBeNegativeOverflow_ = false;
+
+ // If lhs is != 0, then negative zero check can be skipped.
+ if (lhs()->isConstant() && !lhs()->toConstant()->isInt32(0))
+ setCanBeNegativeZero(false);
+
+ // If rhs is >= 0, likewise.
+ if (rhs()->isConstant() && rhs()->type() == MIRType::Int32) {
+ if (rhs()->toConstant()->toInt32() >= 0)
+ setCanBeNegativeZero(false);
+ }
+}
+
+void
+MDiv::analyzeEdgeCasesBackward()
+{
+ if (canBeNegativeZero() && !NeedNegativeZeroCheck(this))
+ setCanBeNegativeZero(false);
+}
+
+bool
+MDiv::fallible() const
+{
+ return !isTruncated();
+}
+
+MDefinition*
+MMod::foldsTo(TempAllocator& alloc)
+{
+ if (specialization_ == MIRType::None)
+ return this;
+
+ if (specialization_ == MIRType::Int64)
+ return this;
+
+ if (MDefinition* folded = EvaluateConstantOperands(alloc, this))
+ return folded;
+
+ return this;
+}
+
+void
+MMod::analyzeEdgeCasesForward()
+{
+ // These optimizations make sense only for integer division
+ if (specialization_ != MIRType::Int32)
+ return;
+
+ if (rhs()->isConstant() && !rhs()->toConstant()->isInt32(0))
+ canBeDivideByZero_ = false;
+
+ if (rhs()->isConstant()) {
+ int32_t n = rhs()->toConstant()->toInt32();
+ if (n > 0 && !IsPowerOfTwo(uint32_t(n)))
+ canBePowerOfTwoDivisor_ = false;
+ }
+}
+
+bool
+MMod::fallible() const
+{
+ return !isTruncated() &&
+ (isUnsigned() || canBeDivideByZero() || canBeNegativeDividend());
+}
+
+void
+MMathFunction::trySpecializeFloat32(TempAllocator& alloc)
+{
+ if (!input()->canProduceFloat32() || !CheckUsesAreFloat32Consumers(this)) {
+ if (input()->type() == MIRType::Float32)
+ ConvertDefinitionToDouble<0>(alloc, input(), this);
+ return;
+ }
+
+ setResultType(MIRType::Float32);
+ specialization_ = MIRType::Float32;
+}
+
+MHypot* MHypot::New(TempAllocator& alloc, const MDefinitionVector & vector)
+{
+ uint32_t length = vector.length();
+ MHypot * hypot = new(alloc) MHypot;
+ if (!hypot->init(alloc, length))
+ return nullptr;
+
+ for (uint32_t i = 0; i < length; ++i)
+ hypot->initOperand(i, vector[i]);
+ return hypot;
+}
+
+bool
+MAdd::fallible() const
+{
+ // the add is fallible if range analysis does not say that it is finite, AND
+ // either the truncation analysis shows that there are non-truncated uses.
+ if (truncateKind() >= IndirectTruncate)
+ return false;
+ if (range() && range()->hasInt32Bounds())
+ return false;
+ return true;
+}
+
+bool
+MSub::fallible() const
+{
+ // see comment in MAdd::fallible()
+ if (truncateKind() >= IndirectTruncate)
+ return false;
+ if (range() && range()->hasInt32Bounds())
+ return false;
+ return true;
+}
+
+MDefinition*
+MMul::foldsTo(TempAllocator& alloc)
+{
+ MDefinition* out = MBinaryArithInstruction::foldsTo(alloc);
+ if (out != this)
+ return out;
+
+ if (specialization() != MIRType::Int32)
+ return this;
+
+ if (lhs() == rhs())
+ setCanBeNegativeZero(false);
+
+ return this;
+}
+
+void
+MMul::analyzeEdgeCasesForward()
+{
+ // Try to remove the check for negative zero
+ // This only makes sense when using the integer multiplication
+ if (specialization() != MIRType::Int32)
+ return;
+
+ // If lhs is > 0, no need for negative zero check.
+ if (lhs()->isConstant() && lhs()->type() == MIRType::Int32) {
+ if (lhs()->toConstant()->toInt32() > 0)
+ setCanBeNegativeZero(false);
+ }
+
+ // If rhs is > 0, likewise.
+ if (rhs()->isConstant() && rhs()->type() == MIRType::Int32) {
+ if (rhs()->toConstant()->toInt32() > 0)
+ setCanBeNegativeZero(false);
+ }
+}
+
+void
+MMul::analyzeEdgeCasesBackward()
+{
+ if (canBeNegativeZero() && !NeedNegativeZeroCheck(this))
+ setCanBeNegativeZero(false);
+}
+
+bool
+MMul::updateForReplacement(MDefinition* ins_)
+{
+ MMul* ins = ins_->toMul();
+ bool negativeZero = canBeNegativeZero() || ins->canBeNegativeZero();
+ setCanBeNegativeZero(negativeZero);
+ // Remove the imul annotation when merging imul and normal multiplication.
+ if (mode_ == Integer && ins->mode() != Integer)
+ mode_ = Normal;
+ return true;
+}
+
+bool
+MMul::canOverflow() const
+{
+ if (isTruncated())
+ return false;
+ return !range() || !range()->hasInt32Bounds();
+}
+
+bool
+MUrsh::fallible() const
+{
+ if (bailoutsDisabled())
+ return false;
+ return !range() || !range()->hasInt32Bounds();
+}
+
+static inline bool
+SimpleArithOperand(MDefinition* op)
+{
+ return !op->mightBeType(MIRType::Object)
+ && !op->mightBeType(MIRType::String)
+ && !op->mightBeType(MIRType::Symbol)
+ && !op->mightBeType(MIRType::MagicOptimizedArguments)
+ && !op->mightBeType(MIRType::MagicHole)
+ && !op->mightBeType(MIRType::MagicIsConstructing);
+}
+
+static bool
+SafelyCoercesToDouble(MDefinition* op)
+{
+ // Strings and symbols are unhandled -- visitToDouble() doesn't support them yet.
+ // Null is unhandled -- ToDouble(null) == 0, but (0 == null) is false.
+ return SimpleArithOperand(op) && !op->mightBeType(MIRType::Null);
+}
+
+MIRType
+MCompare::inputType()
+{
+ switch(compareType_) {
+ case Compare_Undefined:
+ return MIRType::Undefined;
+ case Compare_Null:
+ return MIRType::Null;
+ case Compare_Boolean:
+ return MIRType::Boolean;
+ case Compare_UInt32:
+ case Compare_Int32:
+ case Compare_Int32MaybeCoerceBoth:
+ case Compare_Int32MaybeCoerceLHS:
+ case Compare_Int32MaybeCoerceRHS:
+ return MIRType::Int32;
+ case Compare_Double:
+ case Compare_DoubleMaybeCoerceLHS:
+ case Compare_DoubleMaybeCoerceRHS:
+ return MIRType::Double;
+ case Compare_Float32:
+ return MIRType::Float32;
+ case Compare_String:
+ case Compare_StrictString:
+ return MIRType::String;
+ case Compare_Object:
+ return MIRType::Object;
+ case Compare_Unknown:
+ case Compare_Bitwise:
+ return MIRType::Value;
+ default:
+ MOZ_CRASH("No known conversion");
+ }
+}
+
+static inline bool
+MustBeUInt32(MDefinition* def, MDefinition** pwrapped)
+{
+ if (def->isUrsh()) {
+ *pwrapped = def->toUrsh()->lhs();
+ MDefinition* rhs = def->toUrsh()->rhs();
+ return def->toUrsh()->bailoutsDisabled() &&
+ rhs->maybeConstantValue() &&
+ rhs->maybeConstantValue()->isInt32(0);
+ }
+
+ if (MConstant* defConst = def->maybeConstantValue()) {
+ *pwrapped = defConst;
+ return defConst->type() == MIRType::Int32 && defConst->toInt32() >= 0;
+ }
+
+ *pwrapped = nullptr; // silence GCC warning
+ return false;
+}
+
+/* static */ bool
+MBinaryInstruction::unsignedOperands(MDefinition* left, MDefinition* right)
+{
+ MDefinition* replace;
+ if (!MustBeUInt32(left, &replace))
+ return false;
+ if (replace->type() != MIRType::Int32)
+ return false;
+ if (!MustBeUInt32(right, &replace))
+ return false;
+ if (replace->type() != MIRType::Int32)
+ return false;
+ return true;
+}
+
+bool
+MBinaryInstruction::unsignedOperands()
+{
+ return unsignedOperands(getOperand(0), getOperand(1));
+}
+
+void
+MBinaryInstruction::replaceWithUnsignedOperands()
+{
+ MOZ_ASSERT(unsignedOperands());
+
+ for (size_t i = 0; i < numOperands(); i++) {
+ MDefinition* replace;
+ MustBeUInt32(getOperand(i), &replace);
+ if (replace == getOperand(i))
+ continue;
+
+ getOperand(i)->setImplicitlyUsedUnchecked();
+ replaceOperand(i, replace);
+ }
+}
+
+MCompare::CompareType
+MCompare::determineCompareType(JSOp op, MDefinition* left, MDefinition* right)
+{
+ MIRType lhs = left->type();
+ MIRType rhs = right->type();
+
+ bool looseEq = op == JSOP_EQ || op == JSOP_NE;
+ bool strictEq = op == JSOP_STRICTEQ || op == JSOP_STRICTNE;
+ bool relationalEq = !(looseEq || strictEq);
+
+ // Comparisons on unsigned integers may be treated as UInt32.
+ if (unsignedOperands(left, right))
+ return Compare_UInt32;
+
+ // Integer to integer or boolean to boolean comparisons may be treated as Int32.
+ if ((lhs == MIRType::Int32 && rhs == MIRType::Int32) ||
+ (lhs == MIRType::Boolean && rhs == MIRType::Boolean))
+ {
+ return Compare_Int32MaybeCoerceBoth;
+ }
+
+ // Loose/relational cross-integer/boolean comparisons may be treated as Int32.
+ if (!strictEq &&
+ (lhs == MIRType::Int32 || lhs == MIRType::Boolean) &&
+ (rhs == MIRType::Int32 || rhs == MIRType::Boolean))
+ {
+ return Compare_Int32MaybeCoerceBoth;
+ }
+
+ // Numeric comparisons against a double coerce to double.
+ if (IsTypeRepresentableAsDouble(lhs) && IsTypeRepresentableAsDouble(rhs))
+ return Compare_Double;
+
+ // Any comparison is allowed except strict eq.
+ if (!strictEq && IsFloatingPointType(rhs) && SafelyCoercesToDouble(left))
+ return Compare_DoubleMaybeCoerceLHS;
+ if (!strictEq && IsFloatingPointType(lhs) && SafelyCoercesToDouble(right))
+ return Compare_DoubleMaybeCoerceRHS;
+
+ // Handle object comparison.
+ if (!relationalEq && lhs == MIRType::Object && rhs == MIRType::Object)
+ return Compare_Object;
+
+ // Handle string comparisons. (Relational string compares are still unsupported).
+ if (!relationalEq && lhs == MIRType::String && rhs == MIRType::String)
+ return Compare_String;
+
+ // Handle strict string compare.
+ if (strictEq && lhs == MIRType::String)
+ return Compare_StrictString;
+ if (strictEq && rhs == MIRType::String)
+ return Compare_StrictString;
+
+ // Handle compare with lhs or rhs being Undefined or Null.
+ if (!relationalEq && IsNullOrUndefined(lhs))
+ return (lhs == MIRType::Null) ? Compare_Null : Compare_Undefined;
+ if (!relationalEq && IsNullOrUndefined(rhs))
+ return (rhs == MIRType::Null) ? Compare_Null : Compare_Undefined;
+
+ // Handle strict comparison with lhs/rhs being typed Boolean.
+ if (strictEq && (lhs == MIRType::Boolean || rhs == MIRType::Boolean)) {
+ // bool/bool case got an int32 specialization earlier.
+ MOZ_ASSERT(!(lhs == MIRType::Boolean && rhs == MIRType::Boolean));
+ return Compare_Boolean;
+ }
+
+ return Compare_Unknown;
+}
+
+void
+MCompare::cacheOperandMightEmulateUndefined(CompilerConstraintList* constraints)
+{
+ MOZ_ASSERT(operandMightEmulateUndefined());
+
+ if (getOperand(0)->maybeEmulatesUndefined(constraints))
+ return;
+ if (getOperand(1)->maybeEmulatesUndefined(constraints))
+ return;
+
+ markNoOperandEmulatesUndefined();
+}
+
+MBitNot*
+MBitNot::NewInt32(TempAllocator& alloc, MDefinition* input)
+{
+ MBitNot* ins = new(alloc) MBitNot(input);
+ ins->specialization_ = MIRType::Int32;
+ MOZ_ASSERT(ins->type() == MIRType::Int32);
+ return ins;
+}
+
+MDefinition*
+MBitNot::foldsTo(TempAllocator& alloc)
+{
+ if (specialization_ != MIRType::Int32)
+ return this;
+
+ MDefinition* input = getOperand(0);
+
+ if (input->isConstant()) {
+ js::Value v = Int32Value(~(input->toConstant()->toInt32()));
+ return MConstant::New(alloc, v);
+ }
+
+ if (input->isBitNot() && input->toBitNot()->specialization_ == MIRType::Int32) {
+ MOZ_ASSERT(input->toBitNot()->getOperand(0)->type() == MIRType::Int32);
+ return MTruncateToInt32::New(alloc, input->toBitNot()->input()); // ~~x => x | 0
+ }
+
+ return this;
+}
+
+MDefinition*
+MTypeOf::foldsTo(TempAllocator& alloc)
+{
+ // Note: we can't use input->type() here, type analysis has
+ // boxed the input.
+ MOZ_ASSERT(input()->type() == MIRType::Value);
+
+ JSType type;
+
+ switch (inputType()) {
+ case MIRType::Double:
+ case MIRType::Float32:
+ case MIRType::Int32:
+ type = JSTYPE_NUMBER;
+ break;
+ case MIRType::String:
+ type = JSTYPE_STRING;
+ break;
+ case MIRType::Symbol:
+ type = JSTYPE_SYMBOL;
+ break;
+ case MIRType::Null:
+ type = JSTYPE_OBJECT;
+ break;
+ case MIRType::Undefined:
+ type = JSTYPE_VOID;
+ break;
+ case MIRType::Boolean:
+ type = JSTYPE_BOOLEAN;
+ break;
+ case MIRType::Object:
+ if (!inputMaybeCallableOrEmulatesUndefined()) {
+ // Object is not callable and does not emulate undefined, so it's
+ // safe to fold to "object".
+ type = JSTYPE_OBJECT;
+ break;
+ }
+ MOZ_FALLTHROUGH;
+ default:
+ return this;
+ }
+
+ return MConstant::New(alloc, StringValue(TypeName(type, GetJitContext()->runtime->names())));
+}
+
+void
+MTypeOf::cacheInputMaybeCallableOrEmulatesUndefined(CompilerConstraintList* constraints)
+{
+ MOZ_ASSERT(inputMaybeCallableOrEmulatesUndefined());
+
+ if (!input()->maybeEmulatesUndefined(constraints) && !MaybeCallable(constraints, input()))
+ markInputNotCallableOrEmulatesUndefined();
+}
+
+MBitAnd*
+MBitAnd::New(TempAllocator& alloc, MDefinition* left, MDefinition* right)
+{
+ return new(alloc) MBitAnd(left, right, MIRType::Int32);
+}
+
+MBitAnd*
+MBitAnd::New(TempAllocator& alloc, MDefinition* left, MDefinition* right, MIRType type)
+{
+ MBitAnd* ins = new(alloc) MBitAnd(left, right, type);
+ ins->specializeAs(type);
+ return ins;
+}
+
+MBitOr*
+MBitOr::New(TempAllocator& alloc, MDefinition* left, MDefinition* right)
+{
+ return new(alloc) MBitOr(left, right, MIRType::Int32);
+}
+
+MBitOr*
+MBitOr::New(TempAllocator& alloc, MDefinition* left, MDefinition* right, MIRType type)
+{
+ MBitOr* ins = new(alloc) MBitOr(left, right, type);
+ ins->specializeAs(type);
+ return ins;
+}
+
+MBitXor*
+MBitXor::New(TempAllocator& alloc, MDefinition* left, MDefinition* right)
+{
+ return new(alloc) MBitXor(left, right, MIRType::Int32);
+}
+
+MBitXor*
+MBitXor::New(TempAllocator& alloc, MDefinition* left, MDefinition* right, MIRType type)
+{
+ MBitXor* ins = new(alloc) MBitXor(left, right, type);
+ ins->specializeAs(type);
+ return ins;
+}
+
+MLsh*
+MLsh::New(TempAllocator& alloc, MDefinition* left, MDefinition* right)
+{
+ return new(alloc) MLsh(left, right, MIRType::Int32);
+}
+
+MLsh*
+MLsh::New(TempAllocator& alloc, MDefinition* left, MDefinition* right, MIRType type)
+{
+ MLsh* ins = new(alloc) MLsh(left, right, type);
+ ins->specializeAs(type);
+ return ins;
+}
+
+MRsh*
+MRsh::New(TempAllocator& alloc, MDefinition* left, MDefinition* right)
+{
+ return new(alloc) MRsh(left, right, MIRType::Int32);
+}
+
+MRsh*
+MRsh::New(TempAllocator& alloc, MDefinition* left, MDefinition* right, MIRType type)
+{
+ MRsh* ins = new(alloc) MRsh(left, right, type);
+ ins->specializeAs(type);
+ return ins;
+}
+
+MUrsh*
+MUrsh::New(TempAllocator& alloc, MDefinition* left, MDefinition* right)
+{
+ return new(alloc) MUrsh(left, right, MIRType::Int32);
+}
+
+MUrsh*
+MUrsh::New(TempAllocator& alloc, MDefinition* left, MDefinition* right, MIRType type)
+{
+ MUrsh* ins = new(alloc) MUrsh(left, right, type);
+ ins->specializeAs(type);
+
+ // Since Ion has no UInt32 type, we use Int32 and we have a special
+ // exception to the type rules: we can return values in
+ // (INT32_MIN,UINT32_MAX] and still claim that we have an Int32 type
+ // without bailing out. This is necessary because Ion has no UInt32
+ // type and we can't have bailouts in wasm code.
+ ins->bailoutsDisabled_ = true;
+
+ return ins;
+}
+
+MResumePoint*
+MResumePoint::New(TempAllocator& alloc, MBasicBlock* block, jsbytecode* pc,
+ Mode mode)
+{
+ MResumePoint* resume = new(alloc) MResumePoint(block, pc, mode);
+ if (!resume->init(alloc)) {
+ block->discardPreAllocatedResumePoint(resume);
+ return nullptr;
+ }
+ resume->inherit(block);
+ return resume;
+}
+
+MResumePoint*
+MResumePoint::New(TempAllocator& alloc, MBasicBlock* block, MResumePoint* model,
+ const MDefinitionVector& operands)
+{
+ MResumePoint* resume = new(alloc) MResumePoint(block, model->pc(), model->mode());
+
+ // Allocate the same number of operands as the original resume point, and
+ // copy operands from the operands vector and not the not from the current
+ // block stack.
+ if (!resume->operands_.init(alloc, model->numAllocatedOperands())) {
+ block->discardPreAllocatedResumePoint(resume);
+ return nullptr;
+ }
+
+ // Copy the operands.
+ for (size_t i = 0; i < operands.length(); i++)
+ resume->initOperand(i, operands[i]);
+
+ return resume;
+}
+
+MResumePoint*
+MResumePoint::Copy(TempAllocator& alloc, MResumePoint* src)
+{
+ MResumePoint* resume = new(alloc) MResumePoint(src->block(), src->pc(),
+ src->mode());
+ // Copy the operands from the original resume point, and not from the
+ // current block stack.
+ if (!resume->operands_.init(alloc, src->numAllocatedOperands())) {
+ src->block()->discardPreAllocatedResumePoint(resume);
+ return nullptr;
+ }
+
+ // Copy the operands.
+ for (size_t i = 0; i < resume->numOperands(); i++)
+ resume->initOperand(i, src->getOperand(i));
+ return resume;
+}
+
+MResumePoint::MResumePoint(MBasicBlock* block, jsbytecode* pc, Mode mode)
+ : MNode(block),
+ pc_(pc),
+ instruction_(nullptr),
+ mode_(mode)
+{
+ block->addResumePoint(this);
+}
+
+bool
+MResumePoint::init(TempAllocator& alloc)
+{
+ return operands_.init(alloc, block()->stackDepth());
+}
+
+MResumePoint*
+MResumePoint::caller() const
+{
+ return block_->callerResumePoint();
+}
+
+void
+MResumePoint::inherit(MBasicBlock* block)
+{
+ // FixedList doesn't initialize its elements, so do unchecked inits.
+ for (size_t i = 0; i < stackDepth(); i++)
+ initOperand(i, block->getSlot(i));
+}
+
+void
+MResumePoint::addStore(TempAllocator& alloc, MDefinition* store, const MResumePoint* cache)
+{
+ MOZ_ASSERT(block()->outerResumePoint() != this);
+ MOZ_ASSERT_IF(cache, !cache->stores_.empty());
+
+ if (cache && cache->stores_.begin()->operand == store) {
+ // If the last resume point had the same side-effect stack, then we can
+ // reuse the current side effect without cloning it. This is a simple
+ // way to share common context by making a spaghetti stack.
+ if (++cache->stores_.begin() == stores_.begin()) {
+ stores_.copy(cache->stores_);
+ return;
+ }
+ }
+
+ // Ensure that the store would not be deleted by DCE.
+ MOZ_ASSERT(store->isEffectful());
+
+ MStoreToRecover* top = new(alloc) MStoreToRecover(store);
+ stores_.push(top);
+}
+
+void
+MResumePoint::dump(GenericPrinter& out) const
+{
+ out.printf("resumepoint mode=");
+
+ switch (mode()) {
+ case MResumePoint::ResumeAt:
+ out.printf("At");
+ break;
+ case MResumePoint::ResumeAfter:
+ out.printf("After");
+ break;
+ case MResumePoint::Outer:
+ out.printf("Outer");
+ break;
+ }
+
+ if (MResumePoint* c = caller())
+ out.printf(" (caller in block%u)", c->block()->id());
+
+ for (size_t i = 0; i < numOperands(); i++) {
+ out.printf(" ");
+ if (operands_[i].hasProducer())
+ getOperand(i)->printName(out);
+ else
+ out.printf("(null)");
+ }
+ out.printf("\n");
+}
+
+void
+MResumePoint::dump() const
+{
+ Fprinter out(stderr);
+ dump(out);
+ out.finish();
+}
+
+bool
+MResumePoint::isObservableOperand(MUse* u) const
+{
+ return isObservableOperand(indexOf(u));
+}
+
+bool
+MResumePoint::isObservableOperand(size_t index) const
+{
+ return block()->info().isObservableSlot(index);
+}
+
+bool
+MResumePoint::isRecoverableOperand(MUse* u) const
+{
+ return block()->info().isRecoverableOperand(indexOf(u));
+}
+
+MDefinition*
+MToInt32::foldsTo(TempAllocator& alloc)
+{
+ MDefinition* input = getOperand(0);
+
+ // Fold this operation if the input operand is constant.
+ if (input->isConstant()) {
+ DebugOnly<MacroAssembler::IntConversionInputKind> convert = conversion();
+ switch (input->type()) {
+ case MIRType::Null:
+ MOZ_ASSERT(convert == MacroAssembler::IntConversion_Any);
+ return MConstant::New(alloc, Int32Value(0));
+ case MIRType::Boolean:
+ MOZ_ASSERT(convert == MacroAssembler::IntConversion_Any ||
+ convert == MacroAssembler::IntConversion_NumbersOrBoolsOnly);
+ return MConstant::New(alloc, Int32Value(input->toConstant()->toBoolean()));
+ case MIRType::Int32:
+ return MConstant::New(alloc, Int32Value(input->toConstant()->toInt32()));
+ case MIRType::Float32:
+ case MIRType::Double:
+ int32_t ival;
+ // Only the value within the range of Int32 can be substituted as constant.
+ if (mozilla::NumberIsInt32(input->toConstant()->numberToDouble(), &ival))
+ return MConstant::New(alloc, Int32Value(ival));
+ break;
+ default:
+ break;
+ }
+ }
+
+ // Do not fold the TruncateToInt32 node when the input is uint32 (e.g. ursh
+ // with a zero constant. Consider the test jit-test/tests/ion/bug1247880.js,
+ // where the relevant code is: |(imul(1, x >>> 0) % 2)|. The imul operator
+ // is folded to a MTruncateToInt32 node, which will result in this MIR:
+ // MMod(MTruncateToInt32(MUrsh(x, MConstant(0))), MConstant(2)). Note that
+ // the MUrsh node's type is int32 (since uint32 is not implemented), and
+ // that would fold the MTruncateToInt32 node. This will make the modulo
+ // unsigned, while is should have been signed.
+ if (input->type() == MIRType::Int32 && !IsUint32Type(input))
+ return input;
+
+ return this;
+}
+
+void
+MToInt32::analyzeEdgeCasesBackward()
+{
+ if (!NeedNegativeZeroCheck(this))
+ setCanBeNegativeZero(false);
+}
+
+MDefinition*
+MTruncateToInt32::foldsTo(TempAllocator& alloc)
+{
+ MDefinition* input = getOperand(0);
+ if (input->isBox())
+ input = input->getOperand(0);
+
+ // Do not fold the TruncateToInt32 node when the input is uint32 (e.g. ursh
+ // with a zero constant. Consider the test jit-test/tests/ion/bug1247880.js,
+ // where the relevant code is: |(imul(1, x >>> 0) % 2)|. The imul operator
+ // is folded to a MTruncateToInt32 node, which will result in this MIR:
+ // MMod(MTruncateToInt32(MUrsh(x, MConstant(0))), MConstant(2)). Note that
+ // the MUrsh node's type is int32 (since uint32 is not implemented), and
+ // that would fold the MTruncateToInt32 node. This will make the modulo
+ // unsigned, while is should have been signed.
+ if (input->type() == MIRType::Int32 && !IsUint32Type(input))
+ return input;
+
+ if (input->type() == MIRType::Double && input->isConstant()) {
+ int32_t ret = ToInt32(input->toConstant()->toDouble());
+ return MConstant::New(alloc, Int32Value(ret));
+ }
+
+ return this;
+}
+
+MDefinition*
+MWasmTruncateToInt32::foldsTo(TempAllocator& alloc)
+{
+ MDefinition* input = getOperand(0);
+ if (input->type() == MIRType::Int32)
+ return input;
+
+ if (input->type() == MIRType::Double && input->isConstant()) {
+ double d = input->toConstant()->toDouble();
+ if (IsNaN(d))
+ return this;
+
+ if (!isUnsigned_ && d <= double(INT32_MAX) && d >= double(INT32_MIN))
+ return MConstant::New(alloc, Int32Value(ToInt32(d)));
+
+ if (isUnsigned_ && d <= double(UINT32_MAX) && d >= 0)
+ return MConstant::New(alloc, Int32Value(ToInt32(d)));
+ }
+
+ if (input->type() == MIRType::Float32 && input->isConstant()) {
+ double f = double(input->toConstant()->toFloat32());
+ if (IsNaN(f))
+ return this;
+
+ if (!isUnsigned_ && f <= double(INT32_MAX) && f >= double(INT32_MIN))
+ return MConstant::New(alloc, Int32Value(ToInt32(f)));
+
+ if (isUnsigned_ && f <= double(UINT32_MAX) && f >= 0)
+ return MConstant::New(alloc, Int32Value(ToInt32(f)));
+ }
+
+ return this;
+}
+
+MDefinition*
+MWrapInt64ToInt32::foldsTo(TempAllocator& alloc)
+{
+ MDefinition* input = this->input();
+ if (input->isConstant()) {
+ uint64_t c = input->toConstant()->toInt64();
+ int32_t output = bottomHalf() ? int32_t(c) : int32_t(c >> 32);
+ return MConstant::New(alloc, Int32Value(output));
+ }
+
+ return this;
+}
+
+MDefinition*
+MExtendInt32ToInt64::foldsTo(TempAllocator& alloc)
+{
+ MDefinition* input = this->input();
+ if (input->isConstant()) {
+ int32_t c = input->toConstant()->toInt32();
+ int64_t res = isUnsigned() ? int64_t(uint32_t(c)) : int64_t(c);
+ return MConstant::NewInt64(alloc, res);
+ }
+
+ return this;
+}
+
+MDefinition*
+MToDouble::foldsTo(TempAllocator& alloc)
+{
+ MDefinition* input = getOperand(0);
+ if (input->isBox())
+ input = input->getOperand(0);
+
+ if (input->type() == MIRType::Double)
+ return input;
+
+ if (input->isConstant() && input->toConstant()->isTypeRepresentableAsDouble()) {
+ double out = input->toConstant()->numberToDouble();
+ return MConstant::New(alloc, wasm::RawF64(out));
+ }
+
+ return this;
+}
+
+MDefinition*
+MToFloat32::foldsTo(TempAllocator& alloc)
+{
+ MDefinition* input = getOperand(0);
+ if (input->isBox())
+ input = input->getOperand(0);
+
+ if (input->type() == MIRType::Float32)
+ return input;
+
+ // If x is a Float32, Float32(Double(x)) == x
+ if (!mustPreserveNaN_ &&
+ input->isToDouble() &&
+ input->toToDouble()->input()->type() == MIRType::Float32)
+ {
+ return input->toToDouble()->input();
+ }
+
+ if (input->isConstant() && input->toConstant()->isTypeRepresentableAsDouble()) {
+ float out = float(input->toConstant()->numberToDouble());
+ return MConstant::New(alloc, wasm::RawF32(out));
+ }
+
+ return this;
+}
+
+MDefinition*
+MToString::foldsTo(TempAllocator& alloc)
+{
+ MDefinition* in = input();
+ if (in->isBox())
+ in = in->getOperand(0);
+
+ if (in->type() == MIRType::String)
+ return in;
+ return this;
+}
+
+MDefinition*
+MClampToUint8::foldsTo(TempAllocator& alloc)
+{
+ if (MConstant* inputConst = input()->maybeConstantValue()) {
+ if (inputConst->isTypeRepresentableAsDouble()) {
+ int32_t clamped = ClampDoubleToUint8(inputConst->numberToDouble());
+ return MConstant::New(alloc, Int32Value(clamped));
+ }
+ }
+ return this;
+}
+
+bool
+MCompare::tryFoldEqualOperands(bool* result)
+{
+ if (lhs() != rhs())
+ return false;
+
+ // Intuitively somebody would think that if lhs == rhs,
+ // then we can just return true. (Or false for !==)
+ // However NaN !== NaN is true! So we spend some time trying
+ // to eliminate this case.
+
+ if (jsop() != JSOP_STRICTEQ && jsop() != JSOP_STRICTNE)
+ return false;
+
+ if (compareType_ == Compare_Unknown)
+ return false;
+
+ MOZ_ASSERT(compareType_ == Compare_Undefined || compareType_ == Compare_Null ||
+ compareType_ == Compare_Boolean || compareType_ == Compare_Int32 ||
+ compareType_ == Compare_Int32MaybeCoerceBoth ||
+ compareType_ == Compare_Int32MaybeCoerceLHS ||
+ compareType_ == Compare_Int32MaybeCoerceRHS || compareType_ == Compare_UInt32 ||
+ compareType_ == Compare_Double || compareType_ == Compare_DoubleMaybeCoerceLHS ||
+ compareType_ == Compare_DoubleMaybeCoerceRHS || compareType_ == Compare_Float32 ||
+ compareType_ == Compare_String || compareType_ == Compare_StrictString ||
+ compareType_ == Compare_Object || compareType_ == Compare_Bitwise);
+
+ if (isDoubleComparison() || isFloat32Comparison()) {
+ if (!operandsAreNeverNaN())
+ return false;
+ }
+
+ lhs()->setGuardRangeBailoutsUnchecked();
+
+ *result = (jsop() == JSOP_STRICTEQ);
+ return true;
+}
+
+bool
+MCompare::tryFoldTypeOf(bool* result)
+{
+ if (!lhs()->isTypeOf() && !rhs()->isTypeOf())
+ return false;
+ if (!lhs()->isConstant() && !rhs()->isConstant())
+ return false;
+
+ MTypeOf* typeOf = lhs()->isTypeOf() ? lhs()->toTypeOf() : rhs()->toTypeOf();
+ MConstant* constant = lhs()->isConstant() ? lhs()->toConstant() : rhs()->toConstant();
+
+ if (constant->type() != MIRType::String)
+ return false;
+
+ if (jsop() != JSOP_STRICTEQ && jsop() != JSOP_STRICTNE &&
+ jsop() != JSOP_EQ && jsop() != JSOP_NE)
+ {
+ return false;
+ }
+
+ const JSAtomState& names = GetJitContext()->runtime->names();
+ if (constant->toString() == TypeName(JSTYPE_VOID, names)) {
+ if (!typeOf->input()->mightBeType(MIRType::Undefined) &&
+ !typeOf->inputMaybeCallableOrEmulatesUndefined())
+ {
+ *result = (jsop() == JSOP_STRICTNE || jsop() == JSOP_NE);
+ return true;
+ }
+ } else if (constant->toString() == TypeName(JSTYPE_BOOLEAN, names)) {
+ if (!typeOf->input()->mightBeType(MIRType::Boolean)) {
+ *result = (jsop() == JSOP_STRICTNE || jsop() == JSOP_NE);
+ return true;
+ }
+ } else if (constant->toString() == TypeName(JSTYPE_NUMBER, names)) {
+ if (!typeOf->input()->mightBeType(MIRType::Int32) &&
+ !typeOf->input()->mightBeType(MIRType::Float32) &&
+ !typeOf->input()->mightBeType(MIRType::Double))
+ {
+ *result = (jsop() == JSOP_STRICTNE || jsop() == JSOP_NE);
+ return true;
+ }
+ } else if (constant->toString() == TypeName(JSTYPE_STRING, names)) {
+ if (!typeOf->input()->mightBeType(MIRType::String)) {
+ *result = (jsop() == JSOP_STRICTNE || jsop() == JSOP_NE);
+ return true;
+ }
+ } else if (constant->toString() == TypeName(JSTYPE_SYMBOL, names)) {
+ if (!typeOf->input()->mightBeType(MIRType::Symbol)) {
+ *result = (jsop() == JSOP_STRICTNE || jsop() == JSOP_NE);
+ return true;
+ }
+ } else if (constant->toString() == TypeName(JSTYPE_OBJECT, names)) {
+ if (!typeOf->input()->mightBeType(MIRType::Object) &&
+ !typeOf->input()->mightBeType(MIRType::Null))
+ {
+ *result = (jsop() == JSOP_STRICTNE || jsop() == JSOP_NE);
+ return true;
+ }
+ } else if (constant->toString() == TypeName(JSTYPE_FUNCTION, names)) {
+ if (!typeOf->inputMaybeCallableOrEmulatesUndefined()) {
+ *result = (jsop() == JSOP_STRICTNE || jsop() == JSOP_NE);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool
+MCompare::tryFold(bool* result)
+{
+ JSOp op = jsop();
+
+ if (tryFoldEqualOperands(result))
+ return true;
+
+ if (tryFoldTypeOf(result))
+ return true;
+
+ if (compareType_ == Compare_Null || compareType_ == Compare_Undefined) {
+ // The LHS is the value we want to test against null or undefined.
+ if (op == JSOP_STRICTEQ || op == JSOP_STRICTNE) {
+ if (lhs()->type() == inputType()) {
+ *result = (op == JSOP_STRICTEQ);
+ return true;
+ }
+ if (!lhs()->mightBeType(inputType())) {
+ *result = (op == JSOP_STRICTNE);
+ return true;
+ }
+ } else {
+ MOZ_ASSERT(op == JSOP_EQ || op == JSOP_NE);
+ if (IsNullOrUndefined(lhs()->type())) {
+ *result = (op == JSOP_EQ);
+ return true;
+ }
+ if (!lhs()->mightBeType(MIRType::Null) &&
+ !lhs()->mightBeType(MIRType::Undefined) &&
+ !(lhs()->mightBeType(MIRType::Object) && operandMightEmulateUndefined()))
+ {
+ *result = (op == JSOP_NE);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ if (compareType_ == Compare_Boolean) {
+ MOZ_ASSERT(op == JSOP_STRICTEQ || op == JSOP_STRICTNE);
+ MOZ_ASSERT(rhs()->type() == MIRType::Boolean);
+ MOZ_ASSERT(lhs()->type() != MIRType::Boolean, "Should use Int32 comparison");
+
+ if (!lhs()->mightBeType(MIRType::Boolean)) {
+ *result = (op == JSOP_STRICTNE);
+ return true;
+ }
+ return false;
+ }
+
+ if (compareType_ == Compare_StrictString) {
+ MOZ_ASSERT(op == JSOP_STRICTEQ || op == JSOP_STRICTNE);
+ MOZ_ASSERT(rhs()->type() == MIRType::String);
+ MOZ_ASSERT(lhs()->type() != MIRType::String, "Should use String comparison");
+
+ if (!lhs()->mightBeType(MIRType::String)) {
+ *result = (op == JSOP_STRICTNE);
+ return true;
+ }
+ return false;
+ }
+
+ return false;
+}
+
+template <typename T>
+static bool
+FoldComparison(JSOp op, T left, T right)
+{
+ switch (op) {
+ case JSOP_LT: return left < right;
+ case JSOP_LE: return left <= right;
+ case JSOP_GT: return left > right;
+ case JSOP_GE: return left >= right;
+ case JSOP_STRICTEQ: case JSOP_EQ: return left == right;
+ case JSOP_STRICTNE: case JSOP_NE: return left != right;
+ default: MOZ_CRASH("Unexpected op.");
+ }
+}
+
+bool
+MCompare::evaluateConstantOperands(TempAllocator& alloc, bool* result)
+{
+ if (type() != MIRType::Boolean && type() != MIRType::Int32)
+ return false;
+
+ MDefinition* left = getOperand(0);
+ MDefinition* right = getOperand(1);
+
+ if (compareType() == Compare_Double) {
+ // Optimize "MCompare MConstant (MToDouble SomethingInInt32Range).
+ // In most cases the MToDouble was added, because the constant is
+ // a double.
+ // e.g. v < 9007199254740991, where v is an int32 is always true.
+ if (!lhs()->isConstant() && !rhs()->isConstant())
+ return false;
+
+ MDefinition* operand = left->isConstant() ? right : left;
+ MConstant* constant = left->isConstant() ? left->toConstant() : right->toConstant();
+ MOZ_ASSERT(constant->type() == MIRType::Double);
+ double cte = constant->toDouble();
+
+ if (operand->isToDouble() && operand->getOperand(0)->type() == MIRType::Int32) {
+ bool replaced = false;
+ switch (jsop_) {
+ case JSOP_LT:
+ if (cte > INT32_MAX || cte < INT32_MIN) {
+ *result = !((constant == lhs()) ^ (cte < INT32_MIN));
+ replaced = true;
+ }
+ break;
+ case JSOP_LE:
+ if (constant == lhs()) {
+ if (cte > INT32_MAX || cte <= INT32_MIN) {
+ *result = (cte <= INT32_MIN);
+ replaced = true;
+ }
+ } else {
+ if (cte >= INT32_MAX || cte < INT32_MIN) {
+ *result = (cte >= INT32_MIN);
+ replaced = true;
+ }
+ }
+ break;
+ case JSOP_GT:
+ if (cte > INT32_MAX || cte < INT32_MIN) {
+ *result = !((constant == rhs()) ^ (cte < INT32_MIN));
+ replaced = true;
+ }
+ break;
+ case JSOP_GE:
+ if (constant == lhs()) {
+ if (cte >= INT32_MAX || cte < INT32_MIN) {
+ *result = (cte >= INT32_MAX);
+ replaced = true;
+ }
+ } else {
+ if (cte > INT32_MAX || cte <= INT32_MIN) {
+ *result = (cte <= INT32_MIN);
+ replaced = true;
+ }
+ }
+ break;
+ case JSOP_STRICTEQ: // Fall through.
+ case JSOP_EQ:
+ if (cte > INT32_MAX || cte < INT32_MIN) {
+ *result = false;
+ replaced = true;
+ }
+ break;
+ case JSOP_STRICTNE: // Fall through.
+ case JSOP_NE:
+ if (cte > INT32_MAX || cte < INT32_MIN) {
+ *result = true;
+ replaced = true;
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected op.");
+ }
+ if (replaced) {
+ MLimitedTruncate* limit =
+ MLimitedTruncate::New(alloc, operand->getOperand(0), MDefinition::NoTruncate);
+ limit->setGuardUnchecked();
+ block()->insertBefore(this, limit);
+ return true;
+ }
+ }
+ }
+
+ if (!left->isConstant() || !right->isConstant())
+ return false;
+
+ MConstant* lhs = left->toConstant();
+ MConstant* rhs = right->toConstant();
+
+ // Fold away some String equality comparisons.
+ if (lhs->type() == MIRType::String && rhs->type() == MIRType::String) {
+ int32_t comp = 0; // Default to equal.
+ if (left != right)
+ comp = CompareAtoms(&lhs->toString()->asAtom(), &rhs->toString()->asAtom());
+ *result = FoldComparison(jsop_, comp, 0);
+ return true;
+ }
+
+ if (compareType_ == Compare_UInt32) {
+ *result = FoldComparison(jsop_, uint32_t(lhs->toInt32()), uint32_t(rhs->toInt32()));
+ return true;
+ }
+
+ if (compareType_ == Compare_Int64) {
+ *result = FoldComparison(jsop_, lhs->toInt64(), rhs->toInt64());
+ return true;
+ }
+
+ if (compareType_ == Compare_UInt64) {
+ *result = FoldComparison(jsop_, uint64_t(lhs->toInt64()), uint64_t(rhs->toInt64()));
+ return true;
+ }
+
+ if (lhs->isTypeRepresentableAsDouble() && rhs->isTypeRepresentableAsDouble()) {
+ *result = FoldComparison(jsop_, lhs->numberToDouble(), rhs->numberToDouble());
+ return true;
+ }
+
+ return false;
+}
+
+MDefinition*
+MCompare::foldsTo(TempAllocator& alloc)
+{
+ bool result;
+
+ if (tryFold(&result) || evaluateConstantOperands(alloc, &result)) {
+ if (type() == MIRType::Int32)
+ return MConstant::New(alloc, Int32Value(result));
+
+ MOZ_ASSERT(type() == MIRType::Boolean);
+ return MConstant::New(alloc, BooleanValue(result));
+ }
+
+ return this;
+}
+
+void
+MCompare::trySpecializeFloat32(TempAllocator& alloc)
+{
+ MDefinition* lhs = getOperand(0);
+ MDefinition* rhs = getOperand(1);
+
+ if (lhs->canProduceFloat32() && rhs->canProduceFloat32() && compareType_ == Compare_Double) {
+ compareType_ = Compare_Float32;
+ } else {
+ if (lhs->type() == MIRType::Float32)
+ ConvertDefinitionToDouble<0>(alloc, lhs, this);
+ if (rhs->type() == MIRType::Float32)
+ ConvertDefinitionToDouble<1>(alloc, rhs, this);
+ }
+}
+
+void
+MCompare::filtersUndefinedOrNull(bool trueBranch, MDefinition** subject, bool* filtersUndefined,
+ bool* filtersNull)
+{
+ *filtersNull = *filtersUndefined = false;
+ *subject = nullptr;
+
+ if (compareType() != Compare_Undefined && compareType() != Compare_Null)
+ return;
+
+ MOZ_ASSERT(jsop() == JSOP_STRICTNE || jsop() == JSOP_NE ||
+ jsop() == JSOP_STRICTEQ || jsop() == JSOP_EQ);
+
+ // JSOP_*NE only removes undefined/null from if/true branch
+ if (!trueBranch && (jsop() == JSOP_STRICTNE || jsop() == JSOP_NE))
+ return;
+
+ // JSOP_*EQ only removes undefined/null from else/false branch
+ if (trueBranch && (jsop() == JSOP_STRICTEQ || jsop() == JSOP_EQ))
+ return;
+
+ if (jsop() == JSOP_STRICTEQ || jsop() == JSOP_STRICTNE) {
+ *filtersUndefined = compareType() == Compare_Undefined;
+ *filtersNull = compareType() == Compare_Null;
+ } else {
+ *filtersUndefined = *filtersNull = true;
+ }
+
+ *subject = lhs();
+}
+
+void
+MNot::cacheOperandMightEmulateUndefined(CompilerConstraintList* constraints)
+{
+ MOZ_ASSERT(operandMightEmulateUndefined());
+
+ if (!getOperand(0)->maybeEmulatesUndefined(constraints))
+ markNoOperandEmulatesUndefined();
+}
+
+MDefinition*
+MNot::foldsTo(TempAllocator& alloc)
+{
+ // Fold if the input is constant
+ if (MConstant* inputConst = input()->maybeConstantValue()) {
+ bool b;
+ if (inputConst->valueToBoolean(&b)) {
+ if (type() == MIRType::Int32 || type() == MIRType::Int64)
+ return MConstant::New(alloc, Int32Value(!b));
+ return MConstant::New(alloc, BooleanValue(!b));
+ }
+ }
+
+ // If the operand of the Not is itself a Not, they cancel out. But we can't
+ // always convert Not(Not(x)) to x because that may loose the conversion to
+ // boolean. We can simplify Not(Not(Not(x))) to Not(x) though.
+ MDefinition* op = getOperand(0);
+ if (op->isNot()) {
+ MDefinition* opop = op->getOperand(0);
+ if (opop->isNot())
+ return opop;
+ }
+
+ // NOT of an undefined or null value is always true
+ if (input()->type() == MIRType::Undefined || input()->type() == MIRType::Null)
+ return MConstant::New(alloc, BooleanValue(true));
+
+ // NOT of a symbol is always false.
+ if (input()->type() == MIRType::Symbol)
+ return MConstant::New(alloc, BooleanValue(false));
+
+ // NOT of an object that can't emulate undefined is always false.
+ if (input()->type() == MIRType::Object && !operandMightEmulateUndefined())
+ return MConstant::New(alloc, BooleanValue(false));
+
+ return this;
+}
+
+void
+MNot::trySpecializeFloat32(TempAllocator& alloc)
+{
+ MDefinition* in = input();
+ if (!in->canProduceFloat32() && in->type() == MIRType::Float32)
+ ConvertDefinitionToDouble<0>(alloc, in, this);
+}
+
+void
+MBeta::printOpcode(GenericPrinter& out) const
+{
+ MDefinition::printOpcode(out);
+
+ out.printf(" ");
+ comparison_->dump(out);
+}
+
+bool
+MCreateThisWithTemplate::canRecoverOnBailout() const
+{
+ MOZ_ASSERT(templateObject()->is<PlainObject>() || templateObject()->is<UnboxedPlainObject>());
+ MOZ_ASSERT_IF(templateObject()->is<PlainObject>(),
+ !templateObject()->as<PlainObject>().denseElementsAreCopyOnWrite());
+ return true;
+}
+
+bool
+OperandIndexMap::init(TempAllocator& alloc, JSObject* templateObject)
+{
+ const UnboxedLayout& layout =
+ templateObject->as<UnboxedPlainObject>().layoutDontCheckGeneration();
+
+ const UnboxedLayout::PropertyVector& properties = layout.properties();
+ MOZ_ASSERT(properties.length() < 255);
+
+ // Allocate an array of indexes, where the top of each field correspond to
+ // the index of the operand in the MObjectState instance.
+ if (!map.init(alloc, layout.size()))
+ return false;
+
+ // Reset all indexes to 0, which is an error code.
+ for (size_t i = 0; i < map.length(); i++)
+ map[i] = 0;
+
+ // Map the property offsets to the indexes of MObjectState operands.
+ uint8_t index = 1;
+ for (size_t i = 0; i < properties.length(); i++, index++)
+ map[properties[i].offset] = index;
+
+ return true;
+}
+
+MObjectState::MObjectState(MObjectState* state)
+ : numSlots_(state->numSlots_),
+ numFixedSlots_(state->numFixedSlots_),
+ operandIndex_(state->operandIndex_)
+{
+ // This instruction is only used as a summary for bailout paths.
+ setResultType(MIRType::Object);
+ setRecoveredOnBailout();
+}
+
+MObjectState::MObjectState(JSObject *templateObject, OperandIndexMap* operandIndex)
+{
+ // This instruction is only used as a summary for bailout paths.
+ setResultType(MIRType::Object);
+ setRecoveredOnBailout();
+
+ if (templateObject->is<NativeObject>()) {
+ NativeObject* nativeObject = &templateObject->as<NativeObject>();
+ numSlots_ = nativeObject->slotSpan();
+ numFixedSlots_ = nativeObject->numFixedSlots();
+ } else {
+ const UnboxedLayout& layout =
+ templateObject->as<UnboxedPlainObject>().layoutDontCheckGeneration();
+ // Same as UnboxedLayout::makeNativeGroup
+ numSlots_ = layout.properties().length();
+ numFixedSlots_ = gc::GetGCKindSlots(layout.getAllocKind());
+ }
+
+ operandIndex_ = operandIndex;
+}
+
+JSObject*
+MObjectState::templateObjectOf(MDefinition* obj)
+{
+ if (obj->isNewObject())
+ return obj->toNewObject()->templateObject();
+ else if (obj->isCreateThisWithTemplate())
+ return obj->toCreateThisWithTemplate()->templateObject();
+ else
+ return obj->toNewCallObject()->templateObject();
+
+ return nullptr;
+}
+
+bool
+MObjectState::init(TempAllocator& alloc, MDefinition* obj)
+{
+ if (!MVariadicInstruction::init(alloc, numSlots() + 1))
+ return false;
+ // +1, for the Object.
+ initOperand(0, obj);
+ return true;
+}
+
+bool
+MObjectState::initFromTemplateObject(TempAllocator& alloc, MDefinition* undefinedVal)
+{
+ JSObject* templateObject = templateObjectOf(object());
+
+ // Initialize all the slots of the object state with the value contained in
+ // the template object. This is needed to account values which are baked in
+ // the template objects and not visible in IonMonkey, such as the
+ // uninitialized-lexical magic value of call objects.
+ if (templateObject->is<UnboxedPlainObject>()) {
+ UnboxedPlainObject& unboxedObject = templateObject->as<UnboxedPlainObject>();
+ const UnboxedLayout& layout = unboxedObject.layoutDontCheckGeneration();
+ const UnboxedLayout::PropertyVector& properties = layout.properties();
+
+ for (size_t i = 0; i < properties.length(); i++) {
+ Value val = unboxedObject.getValue(properties[i], /* maybeUninitialized = */ true);
+ MDefinition *def = undefinedVal;
+ if (!val.isUndefined()) {
+ MConstant* ins = val.isObject() ?
+ MConstant::NewConstraintlessObject(alloc, &val.toObject()) :
+ MConstant::New(alloc, val);
+ block()->insertBefore(this, ins);
+ def = ins;
+ }
+ initSlot(i, def);
+ }
+ } else {
+ NativeObject& nativeObject = templateObject->as<NativeObject>();
+ MOZ_ASSERT(nativeObject.slotSpan() == numSlots());
+
+ for (size_t i = 0; i < numSlots(); i++) {
+ Value val = nativeObject.getSlot(i);
+ MDefinition *def = undefinedVal;
+ if (!val.isUndefined()) {
+ MConstant* ins = val.isObject() ?
+ MConstant::NewConstraintlessObject(alloc, &val.toObject()) :
+ MConstant::New(alloc, val);
+ block()->insertBefore(this, ins);
+ def = ins;
+ }
+ initSlot(i, def);
+ }
+ }
+ return true;
+}
+
+MObjectState*
+MObjectState::New(TempAllocator& alloc, MDefinition* obj)
+{
+ JSObject* templateObject = templateObjectOf(obj);
+ MOZ_ASSERT(templateObject, "Unexpected object creation.");
+
+ OperandIndexMap* operandIndex = nullptr;
+ if (templateObject->is<UnboxedPlainObject>()) {
+ operandIndex = new(alloc) OperandIndexMap;
+ if (!operandIndex || !operandIndex->init(alloc, templateObject))
+ return nullptr;
+ }
+
+ MObjectState* res = new(alloc) MObjectState(templateObject, operandIndex);
+ if (!res || !res->init(alloc, obj))
+ return nullptr;
+ return res;
+}
+
+MObjectState*
+MObjectState::Copy(TempAllocator& alloc, MObjectState* state)
+{
+ MObjectState* res = new(alloc) MObjectState(state);
+ if (!res || !res->init(alloc, state->object()))
+ return nullptr;
+ for (size_t i = 0; i < res->numSlots(); i++)
+ res->initSlot(i, state->getSlot(i));
+ return res;
+}
+
+MArrayState::MArrayState(MDefinition* arr)
+{
+ // This instruction is only used as a summary for bailout paths.
+ setResultType(MIRType::Object);
+ setRecoveredOnBailout();
+ numElements_ = arr->toNewArray()->length();
+}
+
+bool
+MArrayState::init(TempAllocator& alloc, MDefinition* obj, MDefinition* len)
+{
+ if (!MVariadicInstruction::init(alloc, numElements() + 2))
+ return false;
+ // +1, for the Array object.
+ initOperand(0, obj);
+ // +1, for the length value of the array.
+ initOperand(1, len);
+ return true;
+}
+
+MArrayState*
+MArrayState::New(TempAllocator& alloc, MDefinition* arr, MDefinition* undefinedVal,
+ MDefinition* initLength)
+{
+ MArrayState* res = new(alloc) MArrayState(arr);
+ if (!res || !res->init(alloc, arr, initLength))
+ return nullptr;
+ for (size_t i = 0; i < res->numElements(); i++)
+ res->initElement(i, undefinedVal);
+ return res;
+}
+
+MArrayState*
+MArrayState::Copy(TempAllocator& alloc, MArrayState* state)
+{
+ MDefinition* arr = state->array();
+ MDefinition* len = state->initializedLength();
+ MArrayState* res = new(alloc) MArrayState(arr);
+ if (!res || !res->init(alloc, arr, len))
+ return nullptr;
+ for (size_t i = 0; i < res->numElements(); i++)
+ res->initElement(i, state->getElement(i));
+ return res;
+}
+
+MNewArray::MNewArray(CompilerConstraintList* constraints, uint32_t length, MConstant* templateConst,
+ gc::InitialHeap initialHeap, jsbytecode* pc, bool vmCall)
+ : MUnaryInstruction(templateConst),
+ length_(length),
+ initialHeap_(initialHeap),
+ convertDoubleElements_(false),
+ pc_(pc),
+ vmCall_(vmCall)
+{
+ setResultType(MIRType::Object);
+ if (templateObject()) {
+ if (TemporaryTypeSet* types = MakeSingletonTypeSet(constraints, templateObject())) {
+ setResultTypeSet(types);
+ if (types->convertDoubleElements(constraints) == TemporaryTypeSet::AlwaysConvertToDoubles)
+ convertDoubleElements_ = true;
+ }
+ }
+}
+
+MDefinition::AliasType
+MLoadFixedSlot::mightAlias(const MDefinition* def) const
+{
+ if (def->isStoreFixedSlot()) {
+ const MStoreFixedSlot* store = def->toStoreFixedSlot();
+ if (store->slot() != slot())
+ return AliasType::NoAlias;
+ if (store->object() != object())
+ return AliasType::MayAlias;
+ return AliasType::MustAlias;
+ }
+ return AliasType::MayAlias;
+}
+
+MDefinition*
+MLoadFixedSlot::foldsTo(TempAllocator& alloc)
+{
+ if (MDefinition* def = foldsToStore(alloc))
+ return def;
+
+ return this;
+}
+
+MDefinition::AliasType
+MLoadFixedSlotAndUnbox::mightAlias(const MDefinition* def) const
+{
+ if (def->isStoreFixedSlot()) {
+ const MStoreFixedSlot* store = def->toStoreFixedSlot();
+ if (store->slot() != slot())
+ return AliasType::NoAlias;
+ if (store->object() != object())
+ return AliasType::MayAlias;
+ return AliasType::MustAlias;
+ }
+ return AliasType::MayAlias;
+}
+
+MDefinition*
+MLoadFixedSlotAndUnbox::foldsTo(TempAllocator& alloc)
+{
+ if (MDefinition* def = foldsToStore(alloc))
+ return def;
+
+ return this;
+}
+
+MDefinition*
+MWasmAddOffset::foldsTo(TempAllocator& alloc)
+{
+ MDefinition* baseArg = base();
+ if (!baseArg->isConstant())
+ return this;
+
+ MOZ_ASSERT(baseArg->type() == MIRType::Int32);
+ CheckedInt<uint32_t> ptr = baseArg->toConstant()->toInt32();
+
+ ptr += offset();
+
+ if (!ptr.isValid())
+ return this;
+
+ return MConstant::New(alloc, Int32Value(ptr.value()));
+}
+
+MDefinition::AliasType
+MAsmJSLoadHeap::mightAlias(const MDefinition* def) const
+{
+ if (def->isAsmJSStoreHeap()) {
+ const MAsmJSStoreHeap* store = def->toAsmJSStoreHeap();
+ if (store->accessType() != accessType())
+ return AliasType::MayAlias;
+ if (!base()->isConstant() || !store->base()->isConstant())
+ return AliasType::MayAlias;
+ const MConstant* otherBase = store->base()->toConstant();
+ if (base()->toConstant()->equals(otherBase) && offset() == store->offset())
+ return AliasType::MayAlias;
+ return AliasType::NoAlias;
+ }
+ return AliasType::MayAlias;
+}
+
+bool
+MAsmJSLoadHeap::congruentTo(const MDefinition* ins) const
+{
+ if (!ins->isAsmJSLoadHeap())
+ return false;
+ const MAsmJSLoadHeap* load = ins->toAsmJSLoadHeap();
+ return load->accessType() == accessType() &&
+ load->offset() == offset() &&
+ congruentIfOperandsEqual(load);
+}
+
+MDefinition::AliasType
+MWasmLoadGlobalVar::mightAlias(const MDefinition* def) const
+{
+ if (def->isWasmStoreGlobalVar()) {
+ const MWasmStoreGlobalVar* store = def->toWasmStoreGlobalVar();
+ // Global variables can't alias each other or be type-reinterpreted.
+ return (store->globalDataOffset() == globalDataOffset_) ? AliasType::MayAlias :
+ AliasType::NoAlias;
+ }
+ return AliasType::MayAlias;
+}
+
+HashNumber
+MWasmLoadGlobalVar::valueHash() const
+{
+ HashNumber hash = MDefinition::valueHash();
+ hash = addU32ToHash(hash, globalDataOffset_);
+ return hash;
+}
+
+bool
+MWasmLoadGlobalVar::congruentTo(const MDefinition* ins) const
+{
+ if (ins->isWasmLoadGlobalVar())
+ return globalDataOffset_ == ins->toWasmLoadGlobalVar()->globalDataOffset_;
+ return false;
+}
+
+MDefinition*
+MWasmLoadGlobalVar::foldsTo(TempAllocator& alloc)
+{
+ if (!dependency() || !dependency()->isWasmStoreGlobalVar())
+ return this;
+
+ MWasmStoreGlobalVar* store = dependency()->toWasmStoreGlobalVar();
+ if (!store->block()->dominates(block()))
+ return this;
+
+ if (store->globalDataOffset() != globalDataOffset())
+ return this;
+
+ if (store->value()->type() != type())
+ return this;
+
+ return store->value();
+}
+
+MDefinition::AliasType
+MLoadSlot::mightAlias(const MDefinition* def) const
+{
+ if (def->isStoreSlot()) {
+ const MStoreSlot* store = def->toStoreSlot();
+ if (store->slot() != slot())
+ return AliasType::NoAlias;
+
+ if (store->slots() != slots())
+ return AliasType::MayAlias;
+
+ return AliasType::MustAlias;
+ }
+ return AliasType::MayAlias;
+}
+
+HashNumber
+MLoadSlot::valueHash() const
+{
+ HashNumber hash = MDefinition::valueHash();
+ hash = addU32ToHash(hash, slot_);
+ return hash;
+}
+
+MDefinition*
+MLoadSlot::foldsTo(TempAllocator& alloc)
+{
+ if (MDefinition* def = foldsToStore(alloc))
+ return def;
+
+ return this;
+}
+
+void
+MLoadSlot::printOpcode(GenericPrinter& out) const
+{
+ MDefinition::printOpcode(out);
+ out.printf(" %d", slot());
+}
+
+void
+MStoreSlot::printOpcode(GenericPrinter& out) const
+{
+ PrintOpcodeName(out, op());
+ out.printf(" ");
+ getOperand(0)->printName(out);
+ out.printf(" %d ", slot());
+ getOperand(1)->printName(out);
+}
+
+MDefinition*
+MFunctionEnvironment::foldsTo(TempAllocator& alloc)
+{
+ if (!input()->isLambda())
+ return this;
+
+ return input()->toLambda()->environmentChain();
+}
+
+static bool
+AddIsANonZeroAdditionOf(MAdd* add, MDefinition* ins)
+{
+ if (add->lhs() != ins && add->rhs() != ins)
+ return false;
+ MDefinition* other = (add->lhs() == ins) ? add->rhs() : add->lhs();
+ if (!IsNumberType(other->type()))
+ return false;
+ if (!other->isConstant())
+ return false;
+ if (other->toConstant()->numberToDouble() == 0)
+ return false;
+ return true;
+}
+
+static bool
+DefinitelyDifferentValue(MDefinition* ins1, MDefinition* ins2)
+{
+ if (ins1 == ins2)
+ return false;
+
+ // Drop the MToInt32 added by the TypePolicy for double and float values.
+ if (ins1->isToInt32())
+ return DefinitelyDifferentValue(ins1->toToInt32()->input(), ins2);
+ if (ins2->isToInt32())
+ return DefinitelyDifferentValue(ins2->toToInt32()->input(), ins1);
+
+ // Ignore the bounds check, which in most cases will contain the same info.
+ if (ins1->isBoundsCheck())
+ return DefinitelyDifferentValue(ins1->toBoundsCheck()->index(), ins2);
+ if (ins2->isBoundsCheck())
+ return DefinitelyDifferentValue(ins2->toBoundsCheck()->index(), ins1);
+
+ // For constants check they are not equal.
+ if (ins1->isConstant() && ins2->isConstant())
+ return !ins1->toConstant()->equals(ins2->toConstant());
+
+ // Check if "ins1 = ins2 + cte", which would make both instructions
+ // have different values.
+ if (ins1->isAdd()) {
+ if (AddIsANonZeroAdditionOf(ins1->toAdd(), ins2))
+ return true;
+ }
+ if (ins2->isAdd()) {
+ if (AddIsANonZeroAdditionOf(ins2->toAdd(), ins1))
+ return true;
+ }
+
+ return false;
+}
+
+MDefinition::AliasType
+MLoadElement::mightAlias(const MDefinition* def) const
+{
+ if (def->isStoreElement()) {
+ const MStoreElement* store = def->toStoreElement();
+ if (store->index() != index()) {
+ if (DefinitelyDifferentValue(store->index(), index()))
+ return AliasType::NoAlias;
+ return AliasType::MayAlias;
+ }
+
+ if (store->elements() != elements())
+ return AliasType::MayAlias;
+
+ return AliasType::MustAlias;
+ }
+ return AliasType::MayAlias;
+}
+
+MDefinition*
+MLoadElement::foldsTo(TempAllocator& alloc)
+{
+ if (MDefinition* def = foldsToStore(alloc))
+ return def;
+
+ return this;
+}
+
+MDefinition::AliasType
+MLoadUnboxedObjectOrNull::mightAlias(const MDefinition* def) const
+{
+ if (def->isStoreUnboxedObjectOrNull()) {
+ const MStoreUnboxedObjectOrNull* store = def->toStoreUnboxedObjectOrNull();
+ if (store->index() != index()) {
+ if (DefinitelyDifferentValue(store->index(), index()))
+ return AliasType::NoAlias;
+ return AliasType::MayAlias;
+ }
+
+ if (store->elements() != elements())
+ return AliasType::MayAlias;
+
+ if (store->offsetAdjustment() != offsetAdjustment())
+ return AliasType::MayAlias;
+
+ return AliasType::MustAlias;
+ }
+ return AliasType::MayAlias;
+}
+
+MDefinition*
+MLoadUnboxedObjectOrNull::foldsTo(TempAllocator& alloc)
+{
+ if (MDefinition* def = foldsToStore(alloc))
+ return def;
+
+ return this;
+}
+
+bool
+MGuardReceiverPolymorphic::congruentTo(const MDefinition* ins) const
+{
+ if (!ins->isGuardReceiverPolymorphic())
+ return false;
+
+ const MGuardReceiverPolymorphic* other = ins->toGuardReceiverPolymorphic();
+
+ if (numReceivers() != other->numReceivers())
+ return false;
+ for (size_t i = 0; i < numReceivers(); i++) {
+ if (receiver(i) != other->receiver(i))
+ return false;
+ }
+
+ return congruentIfOperandsEqual(ins);
+}
+
+void
+InlinePropertyTable::trimTo(const ObjectVector& targets, const BoolVector& choiceSet)
+{
+ for (size_t i = 0; i < targets.length(); i++) {
+ // If the target was inlined, don't erase the entry.
+ if (choiceSet[i])
+ continue;
+
+ JSFunction* target = &targets[i]->as<JSFunction>();
+
+ // Eliminate all entries containing the vetoed function from the map.
+ size_t j = 0;
+ while (j < numEntries()) {
+ if (entries_[j]->func == target)
+ entries_.erase(&entries_[j]);
+ else
+ j++;
+ }
+ }
+}
+
+void
+InlinePropertyTable::trimToTargets(const ObjectVector& targets)
+{
+ JitSpew(JitSpew_Inlining, "Got inlineable property cache with %d cases",
+ (int)numEntries());
+
+ size_t i = 0;
+ while (i < numEntries()) {
+ bool foundFunc = false;
+ for (size_t j = 0; j < targets.length(); j++) {
+ if (entries_[i]->func == targets[j]) {
+ foundFunc = true;
+ break;
+ }
+ }
+ if (!foundFunc)
+ entries_.erase(&(entries_[i]));
+ else
+ i++;
+ }
+
+ JitSpew(JitSpew_Inlining, "%d inlineable cases left after trimming to %d targets",
+ (int)numEntries(), (int)targets.length());
+}
+
+bool
+InlinePropertyTable::hasFunction(JSFunction* func) const
+{
+ for (size_t i = 0; i < numEntries(); i++) {
+ if (entries_[i]->func == func)
+ return true;
+ }
+ return false;
+}
+
+bool
+InlinePropertyTable::hasObjectGroup(ObjectGroup* group) const
+{
+ for (size_t i = 0; i < numEntries(); i++) {
+ if (entries_[i]->group == group)
+ return true;
+ }
+ return false;
+}
+
+TemporaryTypeSet*
+InlinePropertyTable::buildTypeSetForFunction(JSFunction* func) const
+{
+ LifoAlloc* alloc = GetJitContext()->temp->lifoAlloc();
+ TemporaryTypeSet* types = alloc->new_<TemporaryTypeSet>();
+ if (!types)
+ return nullptr;
+ for (size_t i = 0; i < numEntries(); i++) {
+ if (entries_[i]->func == func)
+ types->addType(TypeSet::ObjectType(entries_[i]->group), alloc);
+ }
+ return types;
+}
+
+bool
+InlinePropertyTable::appendRoots(MRootList& roots) const
+{
+ for (const Entry* entry : entries_) {
+ if (!entry->appendRoots(roots))
+ return false;
+ }
+ return true;
+}
+
+SharedMem<void*>
+MLoadTypedArrayElementStatic::base() const
+{
+ return someTypedArray_->as<TypedArrayObject>().viewDataEither();
+}
+
+size_t
+MLoadTypedArrayElementStatic::length() const
+{
+ return someTypedArray_->as<TypedArrayObject>().byteLength();
+}
+
+bool
+MLoadTypedArrayElementStatic::congruentTo(const MDefinition* ins) const
+{
+ if (!ins->isLoadTypedArrayElementStatic())
+ return false;
+ const MLoadTypedArrayElementStatic* other = ins->toLoadTypedArrayElementStatic();
+ if (offset() != other->offset())
+ return false;
+ if (needsBoundsCheck() != other->needsBoundsCheck())
+ return false;
+ if (accessType() != other->accessType())
+ return false;
+ if (base() != other->base())
+ return false;
+ return congruentIfOperandsEqual(other);
+}
+
+SharedMem<void*>
+MStoreTypedArrayElementStatic::base() const
+{
+ return someTypedArray_->as<TypedArrayObject>().viewDataEither();
+}
+
+bool
+MGetPropertyCache::allowDoubleResult() const
+{
+ if (!resultTypeSet())
+ return true;
+
+ return resultTypeSet()->hasType(TypeSet::DoubleType());
+}
+
+size_t
+MStoreTypedArrayElementStatic::length() const
+{
+ return someTypedArray_->as<TypedArrayObject>().byteLength();
+}
+
+MDefinition::AliasType
+MGetPropertyPolymorphic::mightAlias(const MDefinition* store) const
+{
+ // Allow hoisting this instruction if the store does not write to a
+ // slot read by this instruction.
+
+ if (!store->isStoreFixedSlot() && !store->isStoreSlot())
+ return AliasType::MayAlias;
+
+ for (size_t i = 0; i < numReceivers(); i++) {
+ const Shape* shape = this->shape(i);
+ if (!shape)
+ continue;
+ if (shape->slot() < shape->numFixedSlots()) {
+ // Fixed slot.
+ uint32_t slot = shape->slot();
+ if (store->isStoreFixedSlot() && store->toStoreFixedSlot()->slot() != slot)
+ continue;
+ if (store->isStoreSlot())
+ continue;
+ } else {
+ // Dynamic slot.
+ uint32_t slot = shape->slot() - shape->numFixedSlots();
+ if (store->isStoreSlot() && store->toStoreSlot()->slot() != slot)
+ continue;
+ if (store->isStoreFixedSlot())
+ continue;
+ }
+
+ return AliasType::MayAlias;
+ }
+
+ return AliasType::NoAlias;
+}
+
+bool
+MGetPropertyPolymorphic::appendRoots(MRootList& roots) const
+{
+ if (!roots.append(name_))
+ return false;
+
+ for (const PolymorphicEntry& entry : receivers_) {
+ if (!entry.appendRoots(roots))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+MSetPropertyPolymorphic::appendRoots(MRootList& roots) const
+{
+ if (!roots.append(name_))
+ return false;
+
+ for (const PolymorphicEntry& entry : receivers_) {
+ if (!entry.appendRoots(roots))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+MGuardReceiverPolymorphic::appendRoots(MRootList& roots) const
+{
+ for (const ReceiverGuard& guard : receivers_) {
+ if (!roots.append(guard))
+ return false;
+ }
+ return true;
+}
+
+bool
+MDispatchInstruction::appendRoots(MRootList& roots) const
+{
+ for (const Entry& entry : map_) {
+ if (!entry.appendRoots(roots))
+ return false;
+ }
+ return true;
+}
+
+bool
+MObjectGroupDispatch::appendRoots(MRootList& roots) const
+{
+ if (inlinePropertyTable_ && !inlinePropertyTable_->appendRoots(roots))
+ return false;
+ return MDispatchInstruction::appendRoots(roots);
+}
+
+bool
+MFunctionDispatch::appendRoots(MRootList& roots) const
+{
+ return MDispatchInstruction::appendRoots(roots);
+}
+
+bool
+MConstant::appendRoots(MRootList& roots) const
+{
+ switch (type()) {
+ case MIRType::String:
+ return roots.append(toString());
+ case MIRType::Symbol:
+ return roots.append(toSymbol());
+ case MIRType::Object:
+ return roots.append(&toObject());
+ case MIRType::Undefined:
+ case MIRType::Null:
+ case MIRType::Boolean:
+ case MIRType::Int32:
+ case MIRType::Double:
+ case MIRType::Float32:
+ case MIRType::MagicOptimizedArguments:
+ case MIRType::MagicOptimizedOut:
+ case MIRType::MagicHole:
+ case MIRType::MagicIsConstructing:
+ case MIRType::MagicUninitializedLexical:
+ return true;
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+}
+
+void
+MGetPropertyCache::setBlock(MBasicBlock* block)
+{
+ MDefinition::setBlock(block);
+ // Track where we started.
+ if (!location_.pc) {
+ location_.pc = block->trackedPc();
+ location_.script = block->info().script();
+ }
+}
+
+bool
+MGetPropertyCache::updateForReplacement(MDefinition* ins)
+{
+ MGetPropertyCache* other = ins->toGetPropertyCache();
+ location_.append(&other->location_);
+ return true;
+}
+
+MDefinition*
+MWasmUnsignedToDouble::foldsTo(TempAllocator& alloc)
+{
+ if (input()->isConstant() && input()->type() == MIRType::Int32)
+ return MConstant::New(alloc, DoubleValue(uint32_t(input()->toConstant()->toInt32())));
+
+ return this;
+}
+
+MDefinition*
+MWasmUnsignedToFloat32::foldsTo(TempAllocator& alloc)
+{
+ if (input()->isConstant() && input()->type() == MIRType::Int32) {
+ double dval = double(uint32_t(input()->toConstant()->toInt32()));
+ if (IsFloat32Representable(dval))
+ return MConstant::New(alloc, JS::Float32Value(float(dval)), MIRType::Float32);
+ }
+
+ return this;
+}
+
+MWasmCall*
+MWasmCall::New(TempAllocator& alloc, const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee,
+ const Args& args, MIRType resultType, uint32_t spIncrement, uint32_t tlsStackOffset,
+ MDefinition* tableIndex)
+{
+ MWasmCall* call = new(alloc) MWasmCall(desc, callee, spIncrement, tlsStackOffset);
+ call->setResultType(resultType);
+
+ if (!call->argRegs_.init(alloc, args.length()))
+ return nullptr;
+ for (size_t i = 0; i < call->argRegs_.length(); i++)
+ call->argRegs_[i] = args[i].reg;
+
+ if (!call->init(alloc, call->argRegs_.length() + (callee.isTable() ? 1 : 0)))
+ return nullptr;
+ // FixedList doesn't initialize its elements, so do an unchecked init.
+ for (size_t i = 0; i < call->argRegs_.length(); i++)
+ call->initOperand(i, args[i].def);
+ if (callee.isTable())
+ call->initOperand(call->argRegs_.length(), tableIndex);
+
+ return call;
+}
+
+MWasmCall*
+MWasmCall::NewBuiltinInstanceMethodCall(TempAllocator& alloc,
+ const wasm::CallSiteDesc& desc,
+ const wasm::SymbolicAddress builtin,
+ const ABIArg& instanceArg,
+ const Args& args,
+ MIRType resultType,
+ uint32_t spIncrement,
+ uint32_t tlsStackOffset)
+{
+ auto callee = wasm::CalleeDesc::builtinInstanceMethod(builtin);
+ MWasmCall* call = MWasmCall::New(alloc, desc, callee, args, resultType, spIncrement,
+ tlsStackOffset, nullptr);
+ if (!call)
+ return nullptr;
+
+ MOZ_ASSERT(instanceArg != ABIArg());
+ call->instanceArg_ = instanceArg;
+ return call;
+}
+
+void
+MSqrt::trySpecializeFloat32(TempAllocator& alloc) {
+ if (!input()->canProduceFloat32() || !CheckUsesAreFloat32Consumers(this)) {
+ if (input()->type() == MIRType::Float32)
+ ConvertDefinitionToDouble<0>(alloc, input(), this);
+ return;
+ }
+
+ setResultType(MIRType::Float32);
+ specialization_ = MIRType::Float32;
+}
+
+MDefinition*
+MClz::foldsTo(TempAllocator& alloc)
+{
+ if (num()->isConstant()) {
+ MConstant* c = num()->toConstant();
+ if (type() == MIRType::Int32) {
+ int32_t n = c->toInt32();
+ if (n == 0)
+ return MConstant::New(alloc, Int32Value(32));
+ return MConstant::New(alloc, Int32Value(mozilla::CountLeadingZeroes32(n)));
+ }
+ int64_t n = c->toInt64();
+ if (n == 0)
+ return MConstant::NewInt64(alloc, int64_t(64));
+ return MConstant::NewInt64(alloc, int64_t(mozilla::CountLeadingZeroes64(n)));
+ }
+
+ return this;
+}
+
+MDefinition*
+MCtz::foldsTo(TempAllocator& alloc)
+{
+ if (num()->isConstant()) {
+ MConstant* c = num()->toConstant();
+ if (type() == MIRType::Int32) {
+ int32_t n = num()->toConstant()->toInt32();
+ if (n == 0)
+ return MConstant::New(alloc, Int32Value(32));
+ return MConstant::New(alloc, Int32Value(mozilla::CountTrailingZeroes32(n)));
+ }
+ int64_t n = c->toInt64();
+ if (n == 0)
+ return MConstant::NewInt64(alloc, int64_t(64));
+ return MConstant::NewInt64(alloc, int64_t(mozilla::CountTrailingZeroes64(n)));
+ }
+
+ return this;
+}
+
+MDefinition*
+MPopcnt::foldsTo(TempAllocator& alloc)
+{
+ if (num()->isConstant()) {
+ MConstant* c = num()->toConstant();
+ if (type() == MIRType::Int32) {
+ int32_t n = num()->toConstant()->toInt32();
+ return MConstant::New(alloc, Int32Value(mozilla::CountPopulation32(n)));
+ }
+ int64_t n = c->toInt64();
+ return MConstant::NewInt64(alloc, int64_t(mozilla::CountPopulation64(n)));
+ }
+
+ return this;
+}
+
+MDefinition*
+MBoundsCheck::foldsTo(TempAllocator& alloc)
+{
+ if (index()->isConstant() && length()->isConstant()) {
+ uint32_t len = length()->toConstant()->toInt32();
+ uint32_t idx = index()->toConstant()->toInt32();
+ if (idx + uint32_t(minimum()) < len && idx + uint32_t(maximum()) < len)
+ return index();
+ }
+
+ return this;
+}
+
+MDefinition*
+MTableSwitch::foldsTo(TempAllocator& alloc)
+{
+ MDefinition* op = getOperand(0);
+
+ // If we only have one successor, convert to a plain goto to the only
+ // successor. TableSwitch indices are numeric; other types will always go to
+ // the only successor.
+ if (numSuccessors() == 1 || (op->type() != MIRType::Value && !IsNumberType(op->type())))
+ return MGoto::New(alloc, getDefault());
+
+ if (MConstant* opConst = op->maybeConstantValue()) {
+ if (op->type() == MIRType::Int32) {
+ int32_t i = opConst->toInt32() - low_;
+ MBasicBlock* target;
+ if (size_t(i) < numCases())
+ target = getCase(size_t(i));
+ else
+ target = getDefault();
+ MOZ_ASSERT(target);
+ return MGoto::New(alloc, target);
+ }
+ }
+
+ return this;
+}
+
+MDefinition*
+MArrayJoin::foldsTo(TempAllocator& alloc)
+{
+ MDefinition* arr = array();
+
+ if (!arr->isStringSplit())
+ return this;
+
+ setRecoveredOnBailout();
+ if (arr->hasLiveDefUses()) {
+ setNotRecoveredOnBailout();
+ return this;
+ }
+
+ // The MStringSplit won't generate any code.
+ arr->setRecoveredOnBailout();
+
+ // We're replacing foo.split(bar).join(baz) by
+ // foo.replace(bar, baz). MStringSplit could be recovered by
+ // a bailout. As we are removing its last use, and its result
+ // could be captured by a resume point, this MStringSplit will
+ // be executed on the bailout path.
+ MDefinition* string = arr->toStringSplit()->string();
+ MDefinition* pattern = arr->toStringSplit()->separator();
+ MDefinition* replacement = sep();
+
+ MStringReplace *substr = MStringReplace::New(alloc, string, pattern, replacement);
+ substr->setFlatReplacement();
+ return substr;
+}
+
+MDefinition*
+MGetFirstDollarIndex::foldsTo(TempAllocator& alloc)
+{
+ MDefinition* strArg = str();
+ if (!strArg->isConstant())
+ return this;
+
+ JSAtom* atom = &strArg->toConstant()->toString()->asAtom();
+ int32_t index = GetFirstDollarIndexRawFlat(atom);
+ return MConstant::New(alloc, Int32Value(index));
+}
+
+MConvertUnboxedObjectToNative*
+MConvertUnboxedObjectToNative::New(TempAllocator& alloc, MDefinition* obj, ObjectGroup* group)
+{
+ MConvertUnboxedObjectToNative* res = new(alloc) MConvertUnboxedObjectToNative(obj, group);
+
+ ObjectGroup* nativeGroup = group->unboxedLayout().nativeGroup();
+
+ // Make a new type set for the result of this instruction which replaces
+ // the input group with the native group we will convert it to.
+ TemporaryTypeSet* types = obj->resultTypeSet();
+ if (types && !types->unknownObject()) {
+ TemporaryTypeSet* newTypes = types->cloneWithoutObjects(alloc.lifoAlloc());
+ if (newTypes) {
+ for (size_t i = 0; i < types->getObjectCount(); i++) {
+ TypeSet::ObjectKey* key = types->getObject(i);
+ if (!key)
+ continue;
+ if (key->unknownProperties() || !key->isGroup() || key->group() != group)
+ newTypes->addType(TypeSet::ObjectType(key), alloc.lifoAlloc());
+ else
+ newTypes->addType(TypeSet::ObjectType(nativeGroup), alloc.lifoAlloc());
+ }
+ res->setResultTypeSet(newTypes);
+ }
+ }
+
+ return res;
+}
+
+bool
+jit::ElementAccessIsDenseNative(CompilerConstraintList* constraints,
+ MDefinition* obj, MDefinition* id)
+{
+ if (obj->mightBeType(MIRType::String))
+ return false;
+
+ if (id->type() != MIRType::Int32 && id->type() != MIRType::Double)
+ return false;
+
+ TemporaryTypeSet* types = obj->resultTypeSet();
+ if (!types)
+ return false;
+
+ // Typed arrays are native classes but do not have dense elements.
+ const Class* clasp = types->getKnownClass(constraints);
+ return clasp && clasp->isNative() && !IsTypedArrayClass(clasp);
+}
+
+JSValueType
+jit::UnboxedArrayElementType(CompilerConstraintList* constraints, MDefinition* obj,
+ MDefinition* id)
+{
+ if (obj->mightBeType(MIRType::String))
+ return JSVAL_TYPE_MAGIC;
+
+ if (id && id->type() != MIRType::Int32 && id->type() != MIRType::Double)
+ return JSVAL_TYPE_MAGIC;
+
+ TemporaryTypeSet* types = obj->resultTypeSet();
+ if (!types || types->unknownObject())
+ return JSVAL_TYPE_MAGIC;
+
+ JSValueType elementType = JSVAL_TYPE_MAGIC;
+ for (unsigned i = 0; i < types->getObjectCount(); i++) {
+ TypeSet::ObjectKey* key = types->getObject(i);
+ if (!key)
+ continue;
+
+ if (key->unknownProperties() || !key->isGroup())
+ return JSVAL_TYPE_MAGIC;
+
+ if (key->clasp() != &UnboxedArrayObject::class_)
+ return JSVAL_TYPE_MAGIC;
+
+ const UnboxedLayout &layout = key->group()->unboxedLayout();
+
+ if (layout.nativeGroup())
+ return JSVAL_TYPE_MAGIC;
+
+ if (elementType == layout.elementType() || elementType == JSVAL_TYPE_MAGIC)
+ elementType = layout.elementType();
+ else
+ return JSVAL_TYPE_MAGIC;
+
+ key->watchStateChangeForUnboxedConvertedToNative(constraints);
+ }
+
+ return elementType;
+}
+
+bool
+jit::ElementAccessIsTypedArray(CompilerConstraintList* constraints,
+ MDefinition* obj, MDefinition* id,
+ Scalar::Type* arrayType)
+{
+ if (obj->mightBeType(MIRType::String))
+ return false;
+
+ if (id->type() != MIRType::Int32 && id->type() != MIRType::Double)
+ return false;
+
+ TemporaryTypeSet* types = obj->resultTypeSet();
+ if (!types)
+ return false;
+
+ *arrayType = types->getTypedArrayType(constraints);
+ return *arrayType != Scalar::MaxTypedArrayViewType;
+}
+
+bool
+jit::ElementAccessIsPacked(CompilerConstraintList* constraints, MDefinition* obj)
+{
+ TemporaryTypeSet* types = obj->resultTypeSet();
+ return types && !types->hasObjectFlags(constraints, OBJECT_FLAG_NON_PACKED);
+}
+
+bool
+jit::ElementAccessMightBeCopyOnWrite(CompilerConstraintList* constraints, MDefinition* obj)
+{
+ TemporaryTypeSet* types = obj->resultTypeSet();
+ return !types || types->hasObjectFlags(constraints, OBJECT_FLAG_COPY_ON_WRITE);
+}
+
+bool
+jit::ElementAccessMightBeFrozen(CompilerConstraintList* constraints, MDefinition* obj)
+{
+ TemporaryTypeSet* types = obj->resultTypeSet();
+ return !types || types->hasObjectFlags(constraints, OBJECT_FLAG_FROZEN);
+}
+
+bool
+jit::ElementAccessHasExtraIndexedProperty(IonBuilder* builder, MDefinition* obj)
+{
+ TemporaryTypeSet* types = obj->resultTypeSet();
+
+ if (!types || types->hasObjectFlags(builder->constraints(), OBJECT_FLAG_LENGTH_OVERFLOW))
+ return true;
+
+ return TypeCanHaveExtraIndexedProperties(builder, types);
+}
+
+MIRType
+jit::DenseNativeElementType(CompilerConstraintList* constraints, MDefinition* obj)
+{
+ TemporaryTypeSet* types = obj->resultTypeSet();
+ MIRType elementType = MIRType::None;
+ unsigned count = types->getObjectCount();
+
+ for (unsigned i = 0; i < count; i++) {
+ TypeSet::ObjectKey* key = types->getObject(i);
+ if (!key)
+ continue;
+
+ if (key->unknownProperties())
+ return MIRType::None;
+
+ HeapTypeSetKey elementTypes = key->property(JSID_VOID);
+
+ MIRType type = elementTypes.knownMIRType(constraints);
+ if (type == MIRType::None)
+ return MIRType::None;
+
+ if (elementType == MIRType::None)
+ elementType = type;
+ else if (elementType != type)
+ return MIRType::None;
+ }
+
+ return elementType;
+}
+
+static BarrierKind
+PropertyReadNeedsTypeBarrier(CompilerConstraintList* constraints,
+ TypeSet::ObjectKey* key, PropertyName* name,
+ TypeSet* observed)
+{
+ // If the object being read from has types for the property which haven't
+ // been observed at this access site, the read could produce a new type and
+ // a barrier is needed. Note that this only covers reads from properties
+ // which are accounted for by type information, i.e. native data properties
+ // and elements.
+ //
+ // We also need a barrier if the object is a proxy, because then all bets
+ // are off, just as if it has unknown properties.
+ if (key->unknownProperties() || observed->empty() ||
+ key->clasp()->isProxy())
+ {
+ return BarrierKind::TypeSet;
+ }
+
+ if (!name && IsTypedArrayClass(key->clasp())) {
+ Scalar::Type arrayType = Scalar::Type(key->clasp() - &TypedArrayObject::classes[0]);
+ MIRType type = MIRTypeForTypedArrayRead(arrayType, true);
+ if (observed->mightBeMIRType(type))
+ return BarrierKind::NoBarrier;
+ return BarrierKind::TypeSet;
+ }
+
+ jsid id = name ? NameToId(name) : JSID_VOID;
+ HeapTypeSetKey property = key->property(id);
+ if (property.maybeTypes()) {
+ if (!TypeSetIncludes(observed, MIRType::Value, property.maybeTypes())) {
+ // If all possible objects have been observed, we don't have to
+ // guard on the specific object types.
+ if (property.maybeTypes()->objectsAreSubset(observed)) {
+ property.freeze(constraints);
+ return BarrierKind::TypeTagOnly;
+ }
+ return BarrierKind::TypeSet;
+ }
+ }
+
+ // Type information for global objects is not required to reflect the
+ // initial 'undefined' value for properties, in particular global
+ // variables declared with 'var'. Until the property is assigned a value
+ // other than undefined, a barrier is required.
+ if (key->isSingleton()) {
+ JSObject* obj = key->singleton();
+ if (name && CanHaveEmptyPropertyTypesForOwnProperty(obj) &&
+ (!property.maybeTypes() || property.maybeTypes()->empty()))
+ {
+ return BarrierKind::TypeSet;
+ }
+ }
+
+ property.freeze(constraints);
+ return BarrierKind::NoBarrier;
+}
+
+static bool
+ObjectSubsumes(TypeSet::ObjectKey* first, TypeSet::ObjectKey* second)
+{
+ if (first->isSingleton() ||
+ second->isSingleton() ||
+ first->clasp() != second->clasp() ||
+ first->unknownProperties() ||
+ second->unknownProperties())
+ {
+ return false;
+ }
+
+ if (first->clasp() == &ArrayObject::class_) {
+ HeapTypeSetKey firstElements = first->property(JSID_VOID);
+ HeapTypeSetKey secondElements = second->property(JSID_VOID);
+
+ return firstElements.maybeTypes() && secondElements.maybeTypes() &&
+ firstElements.maybeTypes()->equals(secondElements.maybeTypes());
+ }
+
+ if (first->clasp() == &UnboxedArrayObject::class_) {
+ return first->group()->unboxedLayout().elementType() ==
+ second->group()->unboxedLayout().elementType();
+ }
+
+ return false;
+}
+
+BarrierKind
+jit::PropertyReadNeedsTypeBarrier(JSContext* propertycx,
+ CompilerConstraintList* constraints,
+ TypeSet::ObjectKey* key, PropertyName* name,
+ TemporaryTypeSet* observed, bool updateObserved)
+{
+ if (!updateObserved)
+ return PropertyReadNeedsTypeBarrier(constraints, key, name, observed);
+
+ // If this access has never executed, try to add types to the observed set
+ // according to any property which exists on the object or its prototype.
+ if (observed->empty() && name) {
+ JSObject* obj;
+ if (key->isSingleton())
+ obj = key->singleton();
+ else
+ obj = key->proto().isDynamic() ? nullptr : key->proto().toObjectOrNull();
+
+ while (obj) {
+ if (!obj->getClass()->isNative())
+ break;
+
+ TypeSet::ObjectKey* key = TypeSet::ObjectKey::get(obj);
+ if (propertycx)
+ key->ensureTrackedProperty(propertycx, NameToId(name));
+
+ if (!key->unknownProperties()) {
+ HeapTypeSetKey property = key->property(NameToId(name));
+ if (property.maybeTypes()) {
+ TypeSet::TypeList types;
+ if (!property.maybeTypes()->enumerateTypes(&types))
+ break;
+ if (types.length() == 1) {
+ // Note: the return value here is ignored.
+ observed->addType(types[0], GetJitContext()->temp->lifoAlloc());
+ break;
+ }
+ }
+ }
+
+ obj = obj->staticPrototype();
+ }
+ }
+
+ // If any objects which could be observed are similar to ones that have
+ // already been observed, add them to the observed type set.
+ if (!key->unknownProperties()) {
+ HeapTypeSetKey property = key->property(name ? NameToId(name) : JSID_VOID);
+
+ if (property.maybeTypes() && !property.maybeTypes()->unknownObject()) {
+ for (size_t i = 0; i < property.maybeTypes()->getObjectCount(); i++) {
+ TypeSet::ObjectKey* key = property.maybeTypes()->getObject(i);
+ if (!key || observed->unknownObject())
+ continue;
+
+ for (size_t j = 0; j < observed->getObjectCount(); j++) {
+ TypeSet::ObjectKey* observedKey = observed->getObject(j);
+ if (observedKey && ObjectSubsumes(observedKey, key)) {
+ // Note: the return value here is ignored.
+ observed->addType(TypeSet::ObjectType(key),
+ GetJitContext()->temp->lifoAlloc());
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return PropertyReadNeedsTypeBarrier(constraints, key, name, observed);
+}
+
+BarrierKind
+jit::PropertyReadNeedsTypeBarrier(JSContext* propertycx,
+ CompilerConstraintList* constraints,
+ MDefinition* obj, PropertyName* name,
+ TemporaryTypeSet* observed)
+{
+ if (observed->unknown())
+ return BarrierKind::NoBarrier;
+
+ TypeSet* types = obj->resultTypeSet();
+ if (!types || types->unknownObject())
+ return BarrierKind::TypeSet;
+
+ BarrierKind res = BarrierKind::NoBarrier;
+
+ bool updateObserved = types->getObjectCount() == 1;
+ for (size_t i = 0; i < types->getObjectCount(); i++) {
+ if (TypeSet::ObjectKey* key = types->getObject(i)) {
+ BarrierKind kind = PropertyReadNeedsTypeBarrier(propertycx, constraints, key, name,
+ observed, updateObserved);
+ if (kind == BarrierKind::TypeSet)
+ return BarrierKind::TypeSet;
+
+ if (kind == BarrierKind::TypeTagOnly) {
+ MOZ_ASSERT(res == BarrierKind::NoBarrier || res == BarrierKind::TypeTagOnly);
+ res = BarrierKind::TypeTagOnly;
+ } else {
+ MOZ_ASSERT(kind == BarrierKind::NoBarrier);
+ }
+ }
+ }
+
+ return res;
+}
+
+ResultWithOOM<BarrierKind>
+jit::PropertyReadOnPrototypeNeedsTypeBarrier(IonBuilder* builder,
+ MDefinition* obj, PropertyName* name,
+ TemporaryTypeSet* observed)
+{
+ if (observed->unknown())
+ return ResultWithOOM<BarrierKind>::ok(BarrierKind::NoBarrier);
+
+ TypeSet* types = obj->resultTypeSet();
+ if (!types || types->unknownObject())
+ return ResultWithOOM<BarrierKind>::ok(BarrierKind::TypeSet);
+
+ BarrierKind res = BarrierKind::NoBarrier;
+
+ for (size_t i = 0; i < types->getObjectCount(); i++) {
+ TypeSet::ObjectKey* key = types->getObject(i);
+ if (!key)
+ continue;
+ while (true) {
+ if (!builder->alloc().ensureBallast())
+ return ResultWithOOM<BarrierKind>::fail();
+ if (!key->hasStableClassAndProto(builder->constraints()))
+ return ResultWithOOM<BarrierKind>::ok(BarrierKind::TypeSet);
+ if (!key->proto().isObject())
+ break;
+ JSObject* proto = builder->checkNurseryObject(key->proto().toObject());
+ key = TypeSet::ObjectKey::get(proto);
+ BarrierKind kind = PropertyReadNeedsTypeBarrier(builder->constraints(),
+ key, name, observed);
+ if (kind == BarrierKind::TypeSet)
+ return ResultWithOOM<BarrierKind>::ok(BarrierKind::TypeSet);
+
+ if (kind == BarrierKind::TypeTagOnly) {
+ MOZ_ASSERT(res == BarrierKind::NoBarrier || res == BarrierKind::TypeTagOnly);
+ res = BarrierKind::TypeTagOnly;
+ } else {
+ MOZ_ASSERT(kind == BarrierKind::NoBarrier);
+ }
+ }
+ }
+
+ return ResultWithOOM<BarrierKind>::ok(res);
+}
+
+bool
+jit::PropertyReadIsIdempotent(CompilerConstraintList* constraints,
+ MDefinition* obj, PropertyName* name)
+{
+ // Determine if reading a property from obj is likely to be idempotent.
+
+ TypeSet* types = obj->resultTypeSet();
+ if (!types || types->unknownObject())
+ return false;
+
+ for (size_t i = 0; i < types->getObjectCount(); i++) {
+ if (TypeSet::ObjectKey* key = types->getObject(i)) {
+ if (key->unknownProperties())
+ return false;
+
+ // Check if the property has been reconfigured or is a getter.
+ HeapTypeSetKey property = key->property(NameToId(name));
+ if (property.nonData(constraints))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void
+jit::AddObjectsForPropertyRead(MDefinition* obj, PropertyName* name,
+ TemporaryTypeSet* observed)
+{
+ // Add objects to observed which *could* be observed by reading name from obj,
+ // to hopefully avoid unnecessary type barriers and code invalidations.
+
+ LifoAlloc* alloc = GetJitContext()->temp->lifoAlloc();
+
+ TemporaryTypeSet* types = obj->resultTypeSet();
+ if (!types || types->unknownObject()) {
+ observed->addType(TypeSet::AnyObjectType(), alloc);
+ return;
+ }
+
+ for (size_t i = 0; i < types->getObjectCount(); i++) {
+ TypeSet::ObjectKey* key = types->getObject(i);
+ if (!key)
+ continue;
+
+ if (key->unknownProperties()) {
+ observed->addType(TypeSet::AnyObjectType(), alloc);
+ return;
+ }
+
+ jsid id = name ? NameToId(name) : JSID_VOID;
+ HeapTypeSetKey property = key->property(id);
+ HeapTypeSet* types = property.maybeTypes();
+ if (!types)
+ continue;
+
+ if (types->unknownObject()) {
+ observed->addType(TypeSet::AnyObjectType(), alloc);
+ return;
+ }
+
+ for (size_t i = 0; i < types->getObjectCount(); i++) {
+ if (TypeSet::ObjectKey* key = types->getObject(i))
+ observed->addType(TypeSet::ObjectType(key), alloc);
+ }
+ }
+}
+
+static bool
+PrototypeHasIndexedProperty(IonBuilder* builder, JSObject* obj)
+{
+ do {
+ TypeSet::ObjectKey* key = TypeSet::ObjectKey::get(builder->checkNurseryObject(obj));
+ if (ClassCanHaveExtraProperties(key->clasp()))
+ return true;
+ if (key->unknownProperties())
+ return true;
+ HeapTypeSetKey index = key->property(JSID_VOID);
+ if (index.nonData(builder->constraints()) || index.isOwnProperty(builder->constraints()))
+ return true;
+ obj = obj->staticPrototype();
+ } while (obj);
+
+ return false;
+}
+
+// Whether Array.prototype, or an object on its proto chain, has an indexed property.
+bool
+jit::ArrayPrototypeHasIndexedProperty(IonBuilder* builder, JSScript* script)
+{
+ if (JSObject* proto = script->global().maybeGetArrayPrototype())
+ return PrototypeHasIndexedProperty(builder, proto);
+ return true;
+}
+
+// Whether obj or any of its prototypes have an indexed property.
+bool
+jit::TypeCanHaveExtraIndexedProperties(IonBuilder* builder, TemporaryTypeSet* types)
+{
+ const Class* clasp = types->getKnownClass(builder->constraints());
+
+ // Note: typed arrays have indexed properties not accounted for by type
+ // information, though these are all in bounds and will be accounted for
+ // by JIT paths.
+ if (!clasp || (ClassCanHaveExtraProperties(clasp) && !IsTypedArrayClass(clasp)))
+ return true;
+
+ if (types->hasObjectFlags(builder->constraints(), OBJECT_FLAG_SPARSE_INDEXES))
+ return true;
+
+ JSObject* proto;
+ if (!types->getCommonPrototype(builder->constraints(), &proto))
+ return true;
+
+ if (!proto)
+ return false;
+
+ return PrototypeHasIndexedProperty(builder, proto);
+}
+
+static bool
+PropertyTypeIncludes(TempAllocator& alloc, HeapTypeSetKey property,
+ MDefinition* value, MIRType implicitType)
+{
+ // If implicitType is not MIRType::None, it is an additional type which the
+ // property implicitly includes. In this case, make a new type set which
+ // explicitly contains the type.
+ TypeSet* types = property.maybeTypes();
+ if (implicitType != MIRType::None) {
+ TypeSet::Type newType = TypeSet::PrimitiveType(ValueTypeFromMIRType(implicitType));
+ if (types)
+ types = types->clone(alloc.lifoAlloc());
+ else
+ types = alloc.lifoAlloc()->new_<TemporaryTypeSet>();
+ if (!types) {
+ return false;
+ }
+ types->addType(newType, alloc.lifoAlloc());
+ }
+
+ return TypeSetIncludes(types, value->type(), value->resultTypeSet());
+}
+
+static bool
+TryAddTypeBarrierForWrite(TempAllocator& alloc, CompilerConstraintList* constraints,
+ MBasicBlock* current, TemporaryTypeSet* objTypes,
+ PropertyName* name, MDefinition** pvalue, MIRType implicitType)
+{
+ // Return whether pvalue was modified to include a type barrier ensuring
+ // that writing the value to objTypes/id will not require changing type
+ // information.
+
+ // All objects in the set must have the same types for name. Otherwise, we
+ // could bail out without subsequently triggering a type change that
+ // invalidates the compiled code.
+ Maybe<HeapTypeSetKey> aggregateProperty;
+
+ for (size_t i = 0; i < objTypes->getObjectCount(); i++) {
+ TypeSet::ObjectKey* key = objTypes->getObject(i);
+ if (!key)
+ continue;
+
+ if (key->unknownProperties())
+ return false;
+
+ jsid id = name ? NameToId(name) : JSID_VOID;
+ HeapTypeSetKey property = key->property(id);
+ if (!property.maybeTypes() || property.couldBeConstant(constraints))
+ return false;
+
+ if (PropertyTypeIncludes(alloc, property, *pvalue, implicitType))
+ return false;
+
+ // This freeze is not required for correctness, but ensures that we
+ // will recompile if the property types change and the barrier can
+ // potentially be removed.
+ property.freeze(constraints);
+
+ if (!aggregateProperty) {
+ aggregateProperty.emplace(property);
+ } else {
+ if (!aggregateProperty->maybeTypes()->equals(property.maybeTypes()))
+ return false;
+ }
+ }
+
+ MOZ_ASSERT(aggregateProperty);
+
+ MIRType propertyType = aggregateProperty->knownMIRType(constraints);
+ switch (propertyType) {
+ case MIRType::Boolean:
+ case MIRType::Int32:
+ case MIRType::Double:
+ case MIRType::String:
+ case MIRType::Symbol: {
+ // The property is a particular primitive type, guard by unboxing the
+ // value before the write.
+ if (!(*pvalue)->mightBeType(propertyType)) {
+ // The value's type does not match the property type. Just do a VM
+ // call as it will always trigger invalidation of the compiled code.
+ MOZ_ASSERT_IF((*pvalue)->type() != MIRType::Value, (*pvalue)->type() != propertyType);
+ return false;
+ }
+ MInstruction* ins = MUnbox::New(alloc, *pvalue, propertyType, MUnbox::Fallible);
+ current->add(ins);
+ *pvalue = ins;
+ return true;
+ }
+ default:;
+ }
+
+ if ((*pvalue)->type() != MIRType::Value)
+ return false;
+
+ TemporaryTypeSet* types = aggregateProperty->maybeTypes()->clone(alloc.lifoAlloc());
+ if (!types)
+ return false;
+
+ // If all possible objects can be stored without a barrier, we don't have to
+ // guard on the specific object types.
+ BarrierKind kind = BarrierKind::TypeSet;
+ if ((*pvalue)->resultTypeSet() && (*pvalue)->resultTypeSet()->objectsAreSubset(types))
+ kind = BarrierKind::TypeTagOnly;
+
+ MInstruction* ins = MMonitorTypes::New(alloc, *pvalue, types, kind);
+ current->add(ins);
+ return true;
+}
+
+static MInstruction*
+AddGroupGuard(TempAllocator& alloc, MBasicBlock* current, MDefinition* obj,
+ TypeSet::ObjectKey* key, bool bailOnEquality)
+{
+ MInstruction* guard;
+
+ if (key->isGroup()) {
+ guard = MGuardObjectGroup::New(alloc, obj, key->group(), bailOnEquality,
+ Bailout_ObjectIdentityOrTypeGuard);
+ } else {
+ MConstant* singletonConst = MConstant::NewConstraintlessObject(alloc, key->singleton());
+ current->add(singletonConst);
+ guard = MGuardObjectIdentity::New(alloc, obj, singletonConst, bailOnEquality);
+ }
+
+ current->add(guard);
+
+ // For now, never move object group / identity guards.
+ guard->setNotMovable();
+
+ return guard;
+}
+
+// Whether value can be written to property without changing type information.
+bool
+jit::CanWriteProperty(TempAllocator& alloc, CompilerConstraintList* constraints,
+ HeapTypeSetKey property, MDefinition* value,
+ MIRType implicitType /* = MIRType::None */)
+{
+ if (property.couldBeConstant(constraints))
+ return false;
+ return PropertyTypeIncludes(alloc, property, value, implicitType);
+}
+
+bool
+jit::PropertyWriteNeedsTypeBarrier(TempAllocator& alloc, CompilerConstraintList* constraints,
+ MBasicBlock* current, MDefinition** pobj,
+ PropertyName* name, MDefinition** pvalue,
+ bool canModify, MIRType implicitType)
+{
+ // If any value being written is not reflected in the type information for
+ // objects which obj could represent, a type barrier is needed when writing
+ // the value. As for propertyReadNeedsTypeBarrier, this only applies for
+ // properties that are accounted for by type information, i.e. normal data
+ // properties and elements.
+
+ TemporaryTypeSet* types = (*pobj)->resultTypeSet();
+ if (!types || types->unknownObject())
+ return true;
+
+ // If all of the objects being written to have property types which already
+ // reflect the value, no barrier at all is needed. Additionally, if all
+ // objects being written to have the same types for the property, and those
+ // types do *not* reflect the value, add a type barrier for the value.
+
+ bool success = true;
+ for (size_t i = 0; i < types->getObjectCount(); i++) {
+ TypeSet::ObjectKey* key = types->getObject(i);
+ if (!key || key->unknownProperties())
+ continue;
+
+ // TI doesn't track TypedArray indexes and should never insert a type
+ // barrier for them.
+ if (!name && IsTypedArrayClass(key->clasp()))
+ continue;
+
+ jsid id = name ? NameToId(name) : JSID_VOID;
+ HeapTypeSetKey property = key->property(id);
+ if (!CanWriteProperty(alloc, constraints, property, *pvalue, implicitType)) {
+ // Either pobj or pvalue needs to be modified to filter out the
+ // types which the value could have but are not in the property,
+ // or a VM call is required. A VM call is always required if pobj
+ // and pvalue cannot be modified.
+ if (!canModify)
+ return true;
+ success = TryAddTypeBarrierForWrite(alloc, constraints, current, types, name, pvalue,
+ implicitType);
+ break;
+ }
+ }
+
+ // Perform additional filtering to make sure that any unboxed property
+ // being written can accommodate the value.
+ for (size_t i = 0; i < types->getObjectCount(); i++) {
+ TypeSet::ObjectKey* key = types->getObject(i);
+ if (key && key->isGroup() && key->group()->maybeUnboxedLayout()) {
+ const UnboxedLayout& layout = key->group()->unboxedLayout();
+ if (name) {
+ const UnboxedLayout::Property* property = layout.lookup(name);
+ if (property && !CanStoreUnboxedType(alloc, property->type, *pvalue))
+ return true;
+ } else {
+ if (layout.isArray() && !CanStoreUnboxedType(alloc, layout.elementType(), *pvalue))
+ return true;
+ }
+ }
+ }
+
+ if (success)
+ return false;
+
+ // If all of the objects except one have property types which reflect the
+ // value, and the remaining object has no types at all for the property,
+ // add a guard that the object does not have that remaining object's type.
+
+ if (types->getObjectCount() <= 1)
+ return true;
+
+ TypeSet::ObjectKey* excluded = nullptr;
+ for (size_t i = 0; i < types->getObjectCount(); i++) {
+ TypeSet::ObjectKey* key = types->getObject(i);
+ if (!key || key->unknownProperties())
+ continue;
+ if (!name && IsTypedArrayClass(key->clasp()))
+ continue;
+
+ jsid id = name ? NameToId(name) : JSID_VOID;
+ HeapTypeSetKey property = key->property(id);
+ if (CanWriteProperty(alloc, constraints, property, *pvalue, implicitType))
+ continue;
+
+ if ((property.maybeTypes() && !property.maybeTypes()->empty()) || excluded)
+ return true;
+ excluded = key;
+ }
+
+ MOZ_ASSERT(excluded);
+
+ // If the excluded object is a group with an unboxed layout, make sure it
+ // does not have a corresponding native group. Objects with the native
+ // group might appear even though they are not in the type set.
+ if (excluded->isGroup()) {
+ if (UnboxedLayout* layout = excluded->group()->maybeUnboxedLayout()) {
+ if (layout->nativeGroup())
+ return true;
+ excluded->watchStateChangeForUnboxedConvertedToNative(constraints);
+ }
+ }
+
+ *pobj = AddGroupGuard(alloc, current, *pobj, excluded, /* bailOnEquality = */ true);
+ return false;
+}
diff --git a/js/src/jit/MIR.h b/js/src/jit/MIR.h
new file mode 100644
index 000000000..dcb08c317
--- /dev/null
+++ b/js/src/jit/MIR.h
@@ -0,0 +1,14267 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Everything needed to build actual MIR instructions: the actual opcodes and
+ * instructions, the instruction interface, and use chains.
+ */
+
+#ifndef jit_MIR_h
+#define jit_MIR_h
+
+#include "mozilla/Array.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/MacroForEach.h"
+
+#include "builtin/SIMD.h"
+#include "jit/AtomicOp.h"
+#include "jit/BaselineIC.h"
+#include "jit/FixedList.h"
+#include "jit/InlineList.h"
+#include "jit/JitAllocPolicy.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MOpcodes.h"
+#include "jit/TypedObjectPrediction.h"
+#include "jit/TypePolicy.h"
+#include "vm/ArrayObject.h"
+#include "vm/EnvironmentObject.h"
+#include "vm/SharedMem.h"
+#include "vm/TypedArrayCommon.h"
+#include "vm/UnboxedObject.h"
+
+// Undo windows.h damage on Win64
+#undef MemoryBarrier
+
+namespace js {
+
+class StringObject;
+
+namespace jit {
+
+class BaselineInspector;
+class Range;
+
+template <typename T>
+struct ResultWithOOM {
+ T value;
+ bool oom;
+
+ static ResultWithOOM<T> ok(T val) {
+ return { val, false };
+ }
+ static ResultWithOOM<T> fail() {
+ return { T(), true };
+ }
+};
+
+static inline
+MIRType MIRTypeFromValue(const js::Value& vp)
+{
+ if (vp.isDouble())
+ return MIRType::Double;
+ if (vp.isMagic()) {
+ switch (vp.whyMagic()) {
+ case JS_OPTIMIZED_ARGUMENTS:
+ return MIRType::MagicOptimizedArguments;
+ case JS_OPTIMIZED_OUT:
+ return MIRType::MagicOptimizedOut;
+ case JS_ELEMENTS_HOLE:
+ return MIRType::MagicHole;
+ case JS_IS_CONSTRUCTING:
+ return MIRType::MagicIsConstructing;
+ case JS_UNINITIALIZED_LEXICAL:
+ return MIRType::MagicUninitializedLexical;
+ default:
+ MOZ_ASSERT_UNREACHABLE("Unexpected magic constant");
+ }
+ }
+ return MIRTypeFromValueType(vp.extractNonDoubleType());
+}
+
+// If simdType is one of the SIMD types suported by Ion, set mirType to the
+// corresponding MIRType, and return true.
+//
+// If simdType is not suported by Ion, return false.
+static inline MOZ_MUST_USE
+bool MaybeSimdTypeToMIRType(SimdType type, MIRType* mirType)
+{
+ switch (type) {
+ case SimdType::Uint32x4:
+ case SimdType::Int32x4: *mirType = MIRType::Int32x4; return true;
+ case SimdType::Uint16x8:
+ case SimdType::Int16x8: *mirType = MIRType::Int16x8; return true;
+ case SimdType::Uint8x16:
+ case SimdType::Int8x16: *mirType = MIRType::Int8x16; return true;
+ case SimdType::Float32x4: *mirType = MIRType::Float32x4; return true;
+ case SimdType::Bool32x4: *mirType = MIRType::Bool32x4; return true;
+ case SimdType::Bool16x8: *mirType = MIRType::Bool16x8; return true;
+ case SimdType::Bool8x16: *mirType = MIRType::Bool8x16; return true;
+ default: return false;
+ }
+}
+
+// Convert a SimdType to the corresponding MIRType, or crash.
+//
+// Note that this is not an injective mapping: SimdType has signed and unsigned
+// integer types that map to the same MIRType.
+static inline
+MIRType SimdTypeToMIRType(SimdType type)
+{
+ MIRType ret = MIRType::None;
+ JS_ALWAYS_TRUE(MaybeSimdTypeToMIRType(type, &ret));
+ return ret;
+}
+
+static inline
+SimdType MIRTypeToSimdType(MIRType type)
+{
+ switch (type) {
+ case MIRType::Int32x4: return SimdType::Int32x4;
+ case MIRType::Int16x8: return SimdType::Int16x8;
+ case MIRType::Int8x16: return SimdType::Int8x16;
+ case MIRType::Float32x4: return SimdType::Float32x4;
+ case MIRType::Bool32x4: return SimdType::Bool32x4;
+ case MIRType::Bool16x8: return SimdType::Bool16x8;
+ case MIRType::Bool8x16: return SimdType::Bool8x16;
+ default: break;
+ }
+ MOZ_CRASH("unhandled MIRType");
+}
+
+// Get the boolean MIRType with the same shape as type.
+static inline
+MIRType MIRTypeToBooleanSimdType(MIRType type)
+{
+ return SimdTypeToMIRType(GetBooleanSimdType(MIRTypeToSimdType(type)));
+}
+
+#define MIR_FLAG_LIST(_) \
+ _(InWorklist) \
+ _(EmittedAtUses) \
+ _(Commutative) \
+ _(Movable) /* Allow passes like LICM to move this instruction */ \
+ _(Lowered) /* (Debug only) has a virtual register */ \
+ _(Guard) /* Not removable if uses == 0 */ \
+ \
+ /* Flag an instruction to be considered as a Guard if the instructions
+ * bails out on some inputs.
+ *
+ * Some optimizations can replace an instruction, and leave its operands
+ * unused. When the type information of the operand got used as a
+ * predicate of the transformation, then we have to flag the operands as
+ * GuardRangeBailouts.
+ *
+ * This flag prevents further optimization of instructions, which
+ * might remove the run-time checks (bailout conditions) used as a
+ * predicate of the previous transformation.
+ */ \
+ _(GuardRangeBailouts) \
+ \
+ /* Keep the flagged instruction in resume points and do not substitute this
+ * instruction by an UndefinedValue. This might be used by call inlining
+ * when a function argument is not used by the inlined instructions.
+ */ \
+ _(ImplicitlyUsed) \
+ \
+ /* The instruction has been marked dead for lazy removal from resume
+ * points.
+ */ \
+ _(Unused) \
+ \
+ /* When a branch is removed, the uses of multiple instructions are removed.
+ * The removal of branches is based on hypotheses. These hypotheses might
+ * fail, in which case we need to bailout from the current code.
+ *
+ * When we implement a destructive optimization, we need to consider the
+ * failing cases, and consider the fact that we might resume the execution
+ * into a branch which was removed from the compiler. As such, a
+ * destructive optimization need to take into acount removed branches.
+ *
+ * In order to let destructive optimizations know about removed branches, we
+ * have to annotate instructions with the UseRemoved flag. This flag
+ * annotates instruction which were used in removed branches.
+ */ \
+ _(UseRemoved) \
+ \
+ /* Marks if the current instruction should go to the bailout paths instead
+ * of producing code as part of the control flow. This flag can only be set
+ * on instructions which are only used by ResumePoint or by other flagged
+ * instructions.
+ */ \
+ _(RecoveredOnBailout) \
+ \
+ /* Some instructions might represent an object, but the memory of these
+ * objects might be incomplete if we have not recovered all the stores which
+ * were supposed to happen before. This flag is used to annotate
+ * instructions which might return a pointer to a memory area which is not
+ * yet fully initialized. This flag is used to ensure that stores are
+ * executed before returning the value.
+ */ \
+ _(IncompleteObject) \
+ \
+ /* The current instruction got discarded from the MIR Graph. This is useful
+ * when we want to iterate over resume points and instructions, while
+ * handling instructions which are discarded without reporting to the
+ * iterator.
+ */ \
+ _(Discarded)
+
+class MDefinition;
+class MInstruction;
+class MBasicBlock;
+class MNode;
+class MUse;
+class MPhi;
+class MIRGraph;
+class MResumePoint;
+class MControlInstruction;
+
+// Represents a use of a node.
+class MUse : public TempObject, public InlineListNode<MUse>
+{
+ // Grant access to setProducerUnchecked.
+ friend class MDefinition;
+ friend class MPhi;
+
+ MDefinition* producer_; // MDefinition that is being used.
+ MNode* consumer_; // The node that is using this operand.
+
+ // Low-level unchecked edit method for replaceAllUsesWith and
+ // MPhi::removeOperand. This doesn't update use lists!
+ // replaceAllUsesWith and MPhi::removeOperand do that manually.
+ void setProducerUnchecked(MDefinition* producer) {
+ MOZ_ASSERT(consumer_);
+ MOZ_ASSERT(producer_);
+ MOZ_ASSERT(producer);
+ producer_ = producer;
+ }
+
+ public:
+ // Default constructor for use in vectors.
+ MUse()
+ : producer_(nullptr), consumer_(nullptr)
+ { }
+
+ // Move constructor for use in vectors. When an MUse is moved, it stays
+ // in its containing use list.
+ MUse(MUse&& other)
+ : InlineListNode<MUse>(mozilla::Move(other)),
+ producer_(other.producer_), consumer_(other.consumer_)
+ { }
+
+ // Construct an MUse initialized with |producer| and |consumer|.
+ MUse(MDefinition* producer, MNode* consumer)
+ {
+ initUnchecked(producer, consumer);
+ }
+
+ // Set this use, which was previously clear.
+ inline void init(MDefinition* producer, MNode* consumer);
+ // Like init, but works even when the use contains uninitialized data.
+ inline void initUnchecked(MDefinition* producer, MNode* consumer);
+ // Like initUnchecked, but set the producer to nullptr.
+ inline void initUncheckedWithoutProducer(MNode* consumer);
+ // Set this use, which was not previously clear.
+ inline void replaceProducer(MDefinition* producer);
+ // Clear this use.
+ inline void releaseProducer();
+
+ MDefinition* producer() const {
+ MOZ_ASSERT(producer_ != nullptr);
+ return producer_;
+ }
+ bool hasProducer() const {
+ return producer_ != nullptr;
+ }
+ MNode* consumer() const {
+ MOZ_ASSERT(consumer_ != nullptr);
+ return consumer_;
+ }
+
+#ifdef DEBUG
+ // Return the operand index of this MUse in its consumer. This is DEBUG-only
+ // as normal code should instead to call indexOf on the casted consumer
+ // directly, to allow it to be devirtualized and inlined.
+ size_t index() const;
+#endif
+};
+
+typedef InlineList<MUse>::iterator MUseIterator;
+
+// A node is an entry in the MIR graph. It has two kinds:
+// MInstruction: an instruction which appears in the IR stream.
+// MResumePoint: a list of instructions that correspond to the state of the
+// interpreter/Baseline stack.
+//
+// Nodes can hold references to MDefinitions. Each MDefinition has a list of
+// nodes holding such a reference (its use chain).
+class MNode : public TempObject
+{
+ protected:
+ MBasicBlock* block_; // Containing basic block.
+
+ public:
+ enum Kind {
+ Definition,
+ ResumePoint
+ };
+
+ MNode()
+ : block_(nullptr)
+ { }
+
+ explicit MNode(MBasicBlock* block)
+ : block_(block)
+ { }
+
+ virtual Kind kind() const = 0;
+
+ // Returns the definition at a given operand.
+ virtual MDefinition* getOperand(size_t index) const = 0;
+ virtual size_t numOperands() const = 0;
+ virtual size_t indexOf(const MUse* u) const = 0;
+
+ bool isDefinition() const {
+ return kind() == Definition;
+ }
+ bool isResumePoint() const {
+ return kind() == ResumePoint;
+ }
+ MBasicBlock* block() const {
+ return block_;
+ }
+ MBasicBlock* caller() const;
+
+ // Sets an already set operand, updating use information. If you're looking
+ // for setOperand, this is probably what you want.
+ virtual void replaceOperand(size_t index, MDefinition* operand) = 0;
+
+ // Resets the operand to an uninitialized state, breaking the link
+ // with the previous operand's producer.
+ void releaseOperand(size_t index) {
+ getUseFor(index)->releaseProducer();
+ }
+ bool hasOperand(size_t index) const {
+ return getUseFor(index)->hasProducer();
+ }
+
+ inline MDefinition* toDefinition();
+ inline MResumePoint* toResumePoint();
+
+ virtual MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const;
+
+ virtual void dump(GenericPrinter& out) const = 0;
+ virtual void dump() const = 0;
+
+ protected:
+ // Need visibility on getUseFor to avoid O(n^2) complexity.
+ friend void AssertBasicGraphCoherency(MIRGraph& graph);
+
+ // Gets the MUse corresponding to given operand.
+ virtual MUse* getUseFor(size_t index) = 0;
+ virtual const MUse* getUseFor(size_t index) const = 0;
+};
+
+class AliasSet {
+ private:
+ uint32_t flags_;
+
+ public:
+ enum Flag {
+ None_ = 0,
+ ObjectFields = 1 << 0, // shape, class, slots, length etc.
+ Element = 1 << 1, // A Value member of obj->elements or
+ // a typed object.
+ UnboxedElement = 1 << 2, // An unboxed scalar or reference member of
+ // a typed array, typed object, or unboxed
+ // object.
+ DynamicSlot = 1 << 3, // A Value member of obj->slots.
+ FixedSlot = 1 << 4, // A Value member of obj->fixedSlots().
+ DOMProperty = 1 << 5, // A DOM property
+ FrameArgument = 1 << 6, // An argument kept on the stack frame
+ WasmGlobalVar = 1 << 7, // An asm.js/wasm global var
+ WasmHeap = 1 << 8, // An asm.js/wasm heap load
+ TypedArrayLength = 1 << 9,// A typed array's length
+ Last = TypedArrayLength,
+ Any = Last | (Last - 1),
+
+ NumCategories = 10,
+
+ // Indicates load or store.
+ Store_ = 1 << 31
+ };
+
+ static_assert((1 << NumCategories) - 1 == Any,
+ "NumCategories must include all flags present in Any");
+
+ explicit AliasSet(uint32_t flags)
+ : flags_(flags)
+ {
+ }
+
+ public:
+ static const char* Name(size_t flag);
+
+ inline bool isNone() const {
+ return flags_ == None_;
+ }
+ uint32_t flags() const {
+ return flags_ & Any;
+ }
+ inline bool isStore() const {
+ return !!(flags_ & Store_);
+ }
+ inline bool isLoad() const {
+ return !isStore() && !isNone();
+ }
+ inline AliasSet operator |(const AliasSet& other) const {
+ return AliasSet(flags_ | other.flags_);
+ }
+ inline AliasSet operator&(const AliasSet& other) const {
+ return AliasSet(flags_ & other.flags_);
+ }
+ static AliasSet None() {
+ return AliasSet(None_);
+ }
+ static AliasSet Load(uint32_t flags) {
+ MOZ_ASSERT(flags && !(flags & Store_));
+ return AliasSet(flags);
+ }
+ static AliasSet Store(uint32_t flags) {
+ MOZ_ASSERT(flags && !(flags & Store_));
+ return AliasSet(flags | Store_);
+ }
+ static uint32_t BoxedOrUnboxedElements(JSValueType type) {
+ return (type == JSVAL_TYPE_MAGIC) ? Element : UnboxedElement;
+ }
+};
+
+typedef Vector<MDefinition*, 6, JitAllocPolicy> MDefinitionVector;
+typedef Vector<MInstruction*, 6, JitAllocPolicy> MInstructionVector;
+typedef Vector<MDefinition*, 1, JitAllocPolicy> MStoreVector;
+
+class StoreDependency : public TempObject
+{
+ MStoreVector all_;
+
+ public:
+ explicit StoreDependency(TempAllocator& alloc)
+ : all_(alloc)
+ { }
+
+ MOZ_MUST_USE bool init(MDefinitionVector& all) {
+ if (!all_.appendAll(all))
+ return false;
+ return true;
+ }
+
+ MStoreVector& get() {
+ return all_;
+ }
+};
+
+// An MDefinition is an SSA name.
+class MDefinition : public MNode
+{
+ friend class MBasicBlock;
+
+ public:
+ enum Opcode {
+# define DEFINE_OPCODES(op) Op_##op,
+ MIR_OPCODE_LIST(DEFINE_OPCODES)
+# undef DEFINE_OPCODES
+ Op_Invalid
+ };
+
+ private:
+ InlineList<MUse> uses_; // Use chain.
+ uint32_t id_; // Instruction ID, which after block re-ordering
+ // is sorted within a basic block.
+ uint32_t flags_; // Bit flags.
+ Range* range_; // Any computed range for this def.
+ MIRType resultType_; // Representation of result type.
+ TemporaryTypeSet* resultTypeSet_; // Optional refinement of the result type.
+ union {
+ MDefinition* loadDependency_; // Implicit dependency (store, call, etc.) of this
+ StoreDependency* storeDependency_; // instruction. Used by alias analysis, GVN and LICM.
+ uint32_t virtualRegister_; // Used by lowering to map definitions to virtual registers.
+ };
+
+ // Track bailouts by storing the current pc in MIR instruction. Also used
+ // for profiling and keeping track of what the last known pc was.
+ const BytecodeSite* trackedSite_;
+
+ private:
+ enum Flag {
+ None = 0,
+# define DEFINE_FLAG(flag) flag,
+ MIR_FLAG_LIST(DEFINE_FLAG)
+# undef DEFINE_FLAG
+ Total
+ };
+
+ bool hasFlags(uint32_t flags) const {
+ return (flags_ & flags) == flags;
+ }
+ void removeFlags(uint32_t flags) {
+ flags_ &= ~flags;
+ }
+ void setFlags(uint32_t flags) {
+ flags_ |= flags;
+ }
+
+ protected:
+ virtual void setBlock(MBasicBlock* block) {
+ block_ = block;
+ }
+
+ static HashNumber addU32ToHash(HashNumber hash, uint32_t data);
+
+ public:
+ MDefinition()
+ : id_(0),
+ flags_(0),
+ range_(nullptr),
+ resultType_(MIRType::None),
+ resultTypeSet_(nullptr),
+ loadDependency_(nullptr),
+ trackedSite_(nullptr)
+ { }
+
+ // Copying a definition leaves the list of uses and the block empty.
+ explicit MDefinition(const MDefinition& other)
+ : id_(0),
+ flags_(other.flags_),
+ range_(other.range_),
+ resultType_(other.resultType_),
+ resultTypeSet_(other.resultTypeSet_),
+ loadDependency_(other.loadDependency_),
+ trackedSite_(other.trackedSite_)
+ { }
+
+ virtual Opcode op() const = 0;
+ virtual const char* opName() const = 0;
+ virtual void accept(MDefinitionVisitor* visitor) = 0;
+
+ void printName(GenericPrinter& out) const;
+ static void PrintOpcodeName(GenericPrinter& out, Opcode op);
+ virtual void printOpcode(GenericPrinter& out) const;
+ void dump(GenericPrinter& out) const override;
+ void dump() const override;
+ void dumpLocation(GenericPrinter& out) const;
+ void dumpLocation() const;
+
+ // For LICM.
+ virtual bool neverHoist() const { return false; }
+
+ // Also for LICM. Test whether this definition is likely to be a call, which
+ // would clobber all or many of the floating-point registers, such that
+ // hoisting floating-point constants out of containing loops isn't likely to
+ // be worthwhile.
+ virtual bool possiblyCalls() const { return false; }
+
+ void setTrackedSite(const BytecodeSite* site) {
+ MOZ_ASSERT(site);
+ trackedSite_ = site;
+ }
+ const BytecodeSite* trackedSite() const {
+ return trackedSite_;
+ }
+ jsbytecode* trackedPc() const {
+ return trackedSite_ ? trackedSite_->pc() : nullptr;
+ }
+ InlineScriptTree* trackedTree() const {
+ return trackedSite_ ? trackedSite_->tree() : nullptr;
+ }
+ TrackedOptimizations* trackedOptimizations() const {
+ return trackedSite_ && trackedSite_->hasOptimizations()
+ ? trackedSite_->optimizations()
+ : nullptr;
+ }
+
+ JSScript* profilerLeaveScript() const {
+ return trackedTree()->outermostCaller()->script();
+ }
+
+ jsbytecode* profilerLeavePc() const {
+ // If this is in a top-level function, use the pc directly.
+ if (trackedTree()->isOutermostCaller())
+ return trackedPc();
+
+ // Walk up the InlineScriptTree chain to find the top-most callPC
+ InlineScriptTree* curTree = trackedTree();
+ InlineScriptTree* callerTree = curTree->caller();
+ while (!callerTree->isOutermostCaller()) {
+ curTree = callerTree;
+ callerTree = curTree->caller();
+ }
+
+ // Return the callPc of the topmost inlined script.
+ return curTree->callerPc();
+ }
+
+ // Return the range of this value, *before* any bailout checks. Contrast
+ // this with the type() method, and the Range constructor which takes an
+ // MDefinition*, which describe the value *after* any bailout checks.
+ //
+ // Warning: Range analysis is removing the bit-operations such as '| 0' at
+ // the end of the transformations. Using this function to analyse any
+ // operands after the truncate phase of the range analysis will lead to
+ // errors. Instead, one should define the collectRangeInfoPreTrunc() to set
+ // the right set of flags which are dependent on the range of the inputs.
+ Range* range() const {
+ MOZ_ASSERT(type() != MIRType::None);
+ return range_;
+ }
+ void setRange(Range* range) {
+ MOZ_ASSERT(type() != MIRType::None);
+ range_ = range;
+ }
+
+ virtual HashNumber valueHash() const;
+ virtual bool congruentTo(const MDefinition* ins) const {
+ return false;
+ }
+ bool congruentIfOperandsEqual(const MDefinition* ins) const;
+ virtual MDefinition* foldsTo(TempAllocator& alloc);
+ virtual void analyzeEdgeCasesForward();
+ virtual void analyzeEdgeCasesBackward();
+
+ // When a floating-point value is used by nodes which would prefer to
+ // recieve integer inputs, we may be able to help by computing our result
+ // into an integer directly.
+ //
+ // A value can be truncated in 4 differents ways:
+ // 1. Ignore Infinities (x / 0 --> 0).
+ // 2. Ignore overflow (INT_MIN / -1 == (INT_MAX + 1) --> INT_MIN)
+ // 3. Ignore negative zeros. (-0 --> 0)
+ // 4. Ignore remainder. (3 / 4 --> 0)
+ //
+ // Indirect truncation is used to represent that we are interested in the
+ // truncated result, but only if it can safely flow into operations which
+ // are computed modulo 2^32, such as (2) and (3). Infinities are not safe,
+ // as they would have absorbed other math operations. Remainders are not
+ // safe, as fractions can be scaled up by multiplication.
+ //
+ // Division is a particularly interesting node here because it covers all 4
+ // cases even when its own operands are integers.
+ //
+ // Note that these enum values are ordered from least value-modifying to
+ // most value-modifying, and code relies on this ordering.
+ enum TruncateKind {
+ // No correction.
+ NoTruncate = 0,
+ // An integer is desired, but we can't skip bailout checks.
+ TruncateAfterBailouts = 1,
+ // The value will be truncated after some arithmetic (see above).
+ IndirectTruncate = 2,
+ // Direct and infallible truncation to int32.
+ Truncate = 3
+ };
+
+ static const char * TruncateKindString(TruncateKind kind) {
+ switch(kind) {
+ case NoTruncate:
+ return "NoTruncate";
+ case TruncateAfterBailouts:
+ return "TruncateAfterBailouts";
+ case IndirectTruncate:
+ return "IndirectTruncate";
+ case Truncate:
+ return "Truncate";
+ default:
+ MOZ_CRASH("Unknown truncate kind.");
+ }
+ }
+
+ // |needTruncation| records the truncation kind of the results, such that it
+ // can be used to truncate the operands of this instruction. If
+ // |needTruncation| function returns true, then the |truncate| function is
+ // called on the same instruction to mutate the instruction, such as
+ // updating the return type, the range and the specialization of the
+ // instruction.
+ virtual bool needTruncation(TruncateKind kind);
+ virtual void truncate();
+
+ // Determine what kind of truncate this node prefers for the operand at the
+ // given index.
+ virtual TruncateKind operandTruncateKind(size_t index) const;
+
+ // Compute an absolute or symbolic range for the value of this node.
+ virtual void computeRange(TempAllocator& alloc) {
+ }
+
+ // Collect information from the pre-truncated ranges.
+ virtual void collectRangeInfoPreTrunc() {
+ }
+
+ MNode::Kind kind() const override {
+ return MNode::Definition;
+ }
+
+ uint32_t id() const {
+ MOZ_ASSERT(block_);
+ return id_;
+ }
+ void setId(uint32_t id) {
+ id_ = id;
+ }
+
+#define FLAG_ACCESSOR(flag) \
+ bool is##flag() const {\
+ return hasFlags(1 << flag);\
+ }\
+ void set##flag() {\
+ MOZ_ASSERT(!hasFlags(1 << flag));\
+ setFlags(1 << flag);\
+ }\
+ void setNot##flag() {\
+ MOZ_ASSERT(hasFlags(1 << flag));\
+ removeFlags(1 << flag);\
+ }\
+ void set##flag##Unchecked() {\
+ setFlags(1 << flag);\
+ } \
+ void setNot##flag##Unchecked() {\
+ removeFlags(1 << flag);\
+ }
+
+ MIR_FLAG_LIST(FLAG_ACCESSOR)
+#undef FLAG_ACCESSOR
+
+ // Return the type of this value. This may be speculative, and enforced
+ // dynamically with the use of bailout checks. If all the bailout checks
+ // pass, the value will have this type.
+ //
+ // Unless this is an MUrsh that has bailouts disabled, which, as a special
+ // case, may return a value in (INT32_MAX,UINT32_MAX] even when its type()
+ // is MIRType::Int32.
+ MIRType type() const {
+ return resultType_;
+ }
+
+ TemporaryTypeSet* resultTypeSet() const {
+ return resultTypeSet_;
+ }
+ bool emptyResultTypeSet() const;
+
+ bool mightBeType(MIRType type) const {
+ MOZ_ASSERT(type != MIRType::Value);
+ MOZ_ASSERT(type != MIRType::ObjectOrNull);
+
+ if (type == this->type())
+ return true;
+
+ if (this->type() == MIRType::ObjectOrNull)
+ return type == MIRType::Object || type == MIRType::Null;
+
+ if (this->type() == MIRType::Value)
+ return !resultTypeSet() || resultTypeSet()->mightBeMIRType(type);
+
+ return false;
+ }
+
+ bool mightBeMagicType() const;
+
+ bool maybeEmulatesUndefined(CompilerConstraintList* constraints);
+
+ // Float32 specialization operations (see big comment in IonAnalysis before the Float32
+ // specialization algorithm).
+ virtual bool isFloat32Commutative() const { return false; }
+ virtual bool canProduceFloat32() const { return false; }
+ virtual bool canConsumeFloat32(MUse* use) const { return false; }
+ virtual void trySpecializeFloat32(TempAllocator& alloc) {}
+#ifdef DEBUG
+ // Used during the pass that checks that Float32 flow into valid MDefinitions
+ virtual bool isConsistentFloat32Use(MUse* use) const {
+ return type() == MIRType::Float32 || canConsumeFloat32(use);
+ }
+#endif
+
+ // Returns the beginning of this definition's use chain.
+ MUseIterator usesBegin() const {
+ return uses_.begin();
+ }
+
+ // Returns the end of this definition's use chain.
+ MUseIterator usesEnd() const {
+ return uses_.end();
+ }
+
+ bool canEmitAtUses() const {
+ return !isEmittedAtUses();
+ }
+
+ // Removes a use at the given position
+ void removeUse(MUse* use) {
+ uses_.remove(use);
+ }
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ // Number of uses of this instruction. This function is only available
+ // in DEBUG mode since it requires traversing the list. Most users should
+ // use hasUses() or hasOneUse() instead.
+ size_t useCount() const;
+
+ // Number of uses of this instruction (only counting MDefinitions, ignoring
+ // MResumePoints). This function is only available in DEBUG mode since it
+ // requires traversing the list. Most users should use hasUses() or
+ // hasOneUse() instead.
+ size_t defUseCount() const;
+#endif
+
+ // Test whether this MDefinition has exactly one use.
+ bool hasOneUse() const;
+
+ // Test whether this MDefinition has exactly one use.
+ // (only counting MDefinitions, ignoring MResumePoints)
+ bool hasOneDefUse() const;
+
+ // Test whether this MDefinition has at least one use.
+ // (only counting MDefinitions, ignoring MResumePoints)
+ bool hasDefUses() const;
+
+ // Test whether this MDefinition has at least one non-recovered use.
+ // (only counting MDefinitions, ignoring MResumePoints)
+ bool hasLiveDefUses() const;
+
+ bool hasUses() const {
+ return !uses_.empty();
+ }
+
+ void addUse(MUse* use) {
+ MOZ_ASSERT(use->producer() == this);
+ uses_.pushFront(use);
+ }
+ void addUseUnchecked(MUse* use) {
+ MOZ_ASSERT(use->producer() == this);
+ uses_.pushFrontUnchecked(use);
+ }
+ void replaceUse(MUse* old, MUse* now) {
+ MOZ_ASSERT(now->producer() == this);
+ uses_.replace(old, now);
+ }
+
+ // Replace the current instruction by a dominating instruction |dom| in all
+ // uses of the current instruction.
+ void replaceAllUsesWith(MDefinition* dom);
+
+ // Like replaceAllUsesWith, but doesn't set UseRemoved on |this|'s operands.
+ void justReplaceAllUsesWith(MDefinition* dom);
+
+ // Like justReplaceAllUsesWith, but doesn't replace its own use to the
+ // dominating instruction (which would introduce a circular dependency).
+ void justReplaceAllUsesWithExcept(MDefinition* dom);
+
+ // Replace the current instruction by an optimized-out constant in all uses
+ // of the current instruction. Note, that optimized-out constant should not
+ // be observed, and thus they should not flow in any computation.
+ MOZ_MUST_USE bool optimizeOutAllUses(TempAllocator& alloc);
+
+ // Replace the current instruction by a dominating instruction |dom| in all
+ // instruction, but keep the current instruction for resume point and
+ // instruction which are recovered on bailouts.
+ void replaceAllLiveUsesWith(MDefinition* dom);
+
+ // Mark this instruction as having replaced all uses of ins, as during GVN,
+ // returning false if the replacement should not be performed. For use when
+ // GVN eliminates instructions which are not equivalent to one another.
+ virtual MOZ_MUST_USE bool updateForReplacement(MDefinition* ins) {
+ return true;
+ }
+
+ void setVirtualRegister(uint32_t vreg) {
+ virtualRegister_ = vreg;
+ setLoweredUnchecked();
+ }
+ uint32_t virtualRegister() const {
+ MOZ_ASSERT(isLowered());
+ return virtualRegister_;
+ }
+
+ public:
+ // Opcode testing and casts.
+ template<typename MIRType> bool is() const {
+ return op() == MIRType::classOpcode;
+ }
+ template<typename MIRType> MIRType* to() {
+ MOZ_ASSERT(this->is<MIRType>());
+ return static_cast<MIRType*>(this);
+ }
+ template<typename MIRType> const MIRType* to() const {
+ MOZ_ASSERT(this->is<MIRType>());
+ return static_cast<const MIRType*>(this);
+ }
+# define OPCODE_CASTS(opcode) \
+ bool is##opcode() const { \
+ return this->is<M##opcode>(); \
+ } \
+ M##opcode* to##opcode() { \
+ return this->to<M##opcode>(); \
+ } \
+ const M##opcode* to##opcode() const { \
+ return this->to<M##opcode>(); \
+ }
+ MIR_OPCODE_LIST(OPCODE_CASTS)
+# undef OPCODE_CASTS
+
+ inline MConstant* maybeConstantValue();
+
+ inline MInstruction* toInstruction();
+ inline const MInstruction* toInstruction() const;
+ bool isInstruction() const {
+ return !isPhi();
+ }
+
+ virtual bool isControlInstruction() const {
+ return false;
+ }
+ inline MControlInstruction* toControlInstruction();
+
+ void setResultType(MIRType type) {
+ resultType_ = type;
+ }
+ void setResultTypeSet(TemporaryTypeSet* types) {
+ resultTypeSet_ = types;
+ }
+ virtual AliasSet getAliasSet() const {
+ // Instructions are effectful by default.
+ return AliasSet::Store(AliasSet::Any);
+ }
+
+ MDefinition* dependency() const {
+ if (getAliasSet().isStore())
+ return nullptr;
+ return loadDependency_;
+ }
+ void setDependency(MDefinition* dependency) {
+ MOZ_ASSERT(!getAliasSet().isStore());
+ loadDependency_ = dependency;
+ }
+ void setStoreDependency(StoreDependency* dependency) {
+ MOZ_ASSERT(getAliasSet().isStore());
+ storeDependency_ = dependency;
+ }
+ StoreDependency* storeDependency() {
+ MOZ_ASSERT_IF(!getAliasSet().isStore(), !storeDependency_);
+ return storeDependency_;
+ }
+ bool isEffectful() const {
+ return getAliasSet().isStore();
+ }
+
+#ifdef DEBUG
+ virtual bool needsResumePoint() const {
+ // Return whether this instruction should have its own resume point.
+ return isEffectful();
+ }
+#endif
+
+ enum class AliasType : uint32_t {
+ NoAlias = 0,
+ MayAlias = 1,
+ MustAlias = 2
+ };
+ virtual AliasType mightAlias(const MDefinition* store) const {
+ // Return whether this load may depend on the specified store, given
+ // that the alias sets intersect. This may be refined to exclude
+ // possible aliasing in cases where alias set flags are too imprecise.
+ if (!(getAliasSet().flags() & store->getAliasSet().flags()))
+ return AliasType::NoAlias;
+ MOZ_ASSERT(!isEffectful() && store->isEffectful());
+ return AliasType::MayAlias;
+ }
+
+ virtual bool canRecoverOnBailout() const {
+ return false;
+ }
+};
+
+// An MUseDefIterator walks over uses in a definition, skipping any use that is
+// not a definition. Items from the use list must not be deleted during
+// iteration.
+class MUseDefIterator
+{
+ const MDefinition* def_;
+ MUseIterator current_;
+
+ MUseIterator search(MUseIterator start) {
+ MUseIterator i(start);
+ for (; i != def_->usesEnd(); i++) {
+ if (i->consumer()->isDefinition())
+ return i;
+ }
+ return def_->usesEnd();
+ }
+
+ public:
+ explicit MUseDefIterator(const MDefinition* def)
+ : def_(def),
+ current_(search(def->usesBegin()))
+ { }
+
+ explicit operator bool() const {
+ return current_ != def_->usesEnd();
+ }
+ MUseDefIterator operator ++() {
+ MOZ_ASSERT(current_ != def_->usesEnd());
+ ++current_;
+ current_ = search(current_);
+ return *this;
+ }
+ MUseDefIterator operator ++(int) {
+ MUseDefIterator old(*this);
+ operator++();
+ return old;
+ }
+ MUse* use() const {
+ return *current_;
+ }
+ MDefinition* def() const {
+ return current_->consumer()->toDefinition();
+ }
+};
+
+#ifdef DEBUG
+bool
+IonCompilationCanUseNurseryPointers();
+#endif
+
+// Helper class to check that GC pointers embedded in MIR instructions are in
+// in the nursery only when the store buffer has been marked as needing to
+// cancel all ion compilations. Otherwise, off-thread Ion compilation and
+// nursery GCs can happen in parallel, so it's invalid to store pointers to
+// nursery things. There's no need to root these pointers, as GC is suppressed
+// during compilation and off-thread compilations are canceled on major GCs.
+template <typename T>
+class CompilerGCPointer
+{
+ js::gc::Cell* ptr_;
+
+ public:
+ explicit CompilerGCPointer(T ptr)
+ : ptr_(ptr)
+ {
+ MOZ_ASSERT_IF(IsInsideNursery(ptr), IonCompilationCanUseNurseryPointers());
+#ifdef DEBUG
+ PerThreadData* pt = TlsPerThreadData.get();
+ MOZ_ASSERT_IF(pt->runtimeIfOnOwnerThread(), pt->suppressGC);
+#endif
+ }
+
+ operator T() const { return static_cast<T>(ptr_); }
+ T operator->() const { return static_cast<T>(ptr_); }
+
+ private:
+ CompilerGCPointer() = delete;
+ CompilerGCPointer(const CompilerGCPointer<T>&) = delete;
+ CompilerGCPointer<T>& operator=(const CompilerGCPointer<T>&) = delete;
+};
+
+typedef CompilerGCPointer<JSObject*> CompilerObject;
+typedef CompilerGCPointer<NativeObject*> CompilerNativeObject;
+typedef CompilerGCPointer<JSFunction*> CompilerFunction;
+typedef CompilerGCPointer<JSScript*> CompilerScript;
+typedef CompilerGCPointer<PropertyName*> CompilerPropertyName;
+typedef CompilerGCPointer<Shape*> CompilerShape;
+typedef CompilerGCPointer<ObjectGroup*> CompilerObjectGroup;
+
+class MRootList : public TempObject
+{
+ public:
+ using RootVector = Vector<void*, 0, JitAllocPolicy>;
+
+ private:
+ mozilla::EnumeratedArray<JS::RootKind, JS::RootKind::Limit, mozilla::Maybe<RootVector>> roots_;
+
+ MRootList(const MRootList&) = delete;
+ void operator=(const MRootList&) = delete;
+
+ public:
+ explicit MRootList(TempAllocator& alloc);
+
+ void trace(JSTracer* trc);
+
+ template <typename T>
+ MOZ_MUST_USE bool append(T ptr) {
+ if (ptr)
+ return roots_[JS::MapTypeToRootKind<T>::kind]->append(ptr);
+ return true;
+ }
+
+ template <typename T>
+ MOZ_MUST_USE bool append(const CompilerGCPointer<T>& ptr) {
+ return append(static_cast<T>(ptr));
+ }
+ MOZ_MUST_USE bool append(const ReceiverGuard& guard) {
+ return append(guard.group) && append(guard.shape);
+ }
+};
+
+// An instruction is an SSA name that is inserted into a basic block's IR
+// stream.
+class MInstruction
+ : public MDefinition,
+ public InlineListNode<MInstruction>
+{
+ MResumePoint* resumePoint_;
+
+ protected:
+ // All MInstructions are using the "MFoo::New(alloc)" notation instead of
+ // the TempObject new operator. This code redefines the new operator as
+ // protected, and delegates to the TempObject new operator. Thus, the
+ // following code prevents calls to "new(alloc) MFoo" outside the MFoo
+ // members.
+ inline void* operator new(size_t nbytes, TempAllocator::Fallible view) throw() {
+ return TempObject::operator new(nbytes, view);
+ }
+ inline void* operator new(size_t nbytes, TempAllocator& alloc) {
+ return TempObject::operator new(nbytes, alloc);
+ }
+ template <class T>
+ inline void* operator new(size_t nbytes, T* pos) {
+ return TempObject::operator new(nbytes, pos);
+ }
+
+ public:
+ MInstruction()
+ : resumePoint_(nullptr)
+ { }
+
+ // Copying an instruction leaves the block and resume point as empty.
+ explicit MInstruction(const MInstruction& other)
+ : MDefinition(other),
+ resumePoint_(nullptr)
+ { }
+
+ // Convenient function used for replacing a load by the value of the store
+ // if the types are match, and boxing the value if they do not match.
+ MDefinition* foldsToStore(TempAllocator& alloc);
+
+ void setResumePoint(MResumePoint* resumePoint);
+
+ // Used to transfer the resume point to the rewritten instruction.
+ void stealResumePoint(MInstruction* ins);
+ void moveResumePointAsEntry();
+ void clearResumePoint();
+ MResumePoint* resumePoint() const {
+ return resumePoint_;
+ }
+
+ // For instructions which can be cloned with new inputs, with all other
+ // information being the same. clone() implementations do not need to worry
+ // about cloning generic MInstruction/MDefinition state like flags and
+ // resume points.
+ virtual bool canClone() const {
+ return false;
+ }
+ virtual MInstruction* clone(TempAllocator& alloc, const MDefinitionVector& inputs) const {
+ MOZ_CRASH();
+ }
+
+ // MIR instructions containing GC pointers should override this to append
+ // these pointers to the root list.
+ virtual bool appendRoots(MRootList& roots) const {
+ return true;
+ }
+
+ // Instructions needing to hook into type analysis should return a
+ // TypePolicy.
+ virtual TypePolicy* typePolicy() = 0;
+ virtual MIRType typePolicySpecialization() = 0;
+};
+
+#define INSTRUCTION_HEADER_WITHOUT_TYPEPOLICY(opcode) \
+ static const Opcode classOpcode = MDefinition::Op_##opcode; \
+ using MThisOpcode = M##opcode; \
+ Opcode op() const override { \
+ return classOpcode; \
+ } \
+ const char* opName() const override { \
+ return #opcode; \
+ } \
+ void accept(MDefinitionVisitor* visitor) override { \
+ visitor->visit##opcode(this); \
+ }
+
+#define INSTRUCTION_HEADER(opcode) \
+ INSTRUCTION_HEADER_WITHOUT_TYPEPOLICY(opcode) \
+ virtual TypePolicy* typePolicy() override; \
+ virtual MIRType typePolicySpecialization() override;
+
+#define ALLOW_CLONE(typename) \
+ bool canClone() const override { \
+ return true; \
+ } \
+ MInstruction* clone(TempAllocator& alloc, \
+ const MDefinitionVector& inputs) const override { \
+ MInstruction* res = new(alloc) typename(*this); \
+ for (size_t i = 0; i < numOperands(); i++) \
+ res->replaceOperand(i, inputs[i]); \
+ return res; \
+ }
+
+// Adds MFoo::New functions which are mirroring the arguments of the
+// constructors. Opcodes which are using this macro can be called with a
+// TempAllocator, or the fallible version of the TempAllocator.
+#define TRIVIAL_NEW_WRAPPERS \
+ template <typename... Args> \
+ static MThisOpcode* New(TempAllocator& alloc, Args&&... args) { \
+ return new(alloc) MThisOpcode(mozilla::Forward<Args>(args)...); \
+ } \
+ template <typename... Args> \
+ static MThisOpcode* New(TempAllocator::Fallible alloc, Args&&... args) \
+ { \
+ return new(alloc) MThisOpcode(mozilla::Forward<Args>(args)...); \
+ }
+
+
+// These macros are used as a syntactic sugar for writting getOperand
+// accessors. They are meant to be used in the body of MIR Instructions as
+// follows:
+//
+// public:
+// INSTRUCTION_HEADER(Foo)
+// NAMED_OPERANDS((0, lhs), (1, rhs))
+//
+// The above example defines 2 accessors, one named "lhs" accessing the first
+// operand, and a one named "rhs" accessing the second operand.
+#define NAMED_OPERAND_ACCESSOR(Index, Name) \
+ MDefinition* Name() const { \
+ return getOperand(Index); \
+ }
+#define NAMED_OPERAND_ACCESSOR_APPLY(Args) \
+ NAMED_OPERAND_ACCESSOR Args
+#define NAMED_OPERANDS(...) \
+ MOZ_FOR_EACH(NAMED_OPERAND_ACCESSOR_APPLY, (), (__VA_ARGS__))
+
+template <size_t Arity>
+class MAryInstruction : public MInstruction
+{
+ mozilla::Array<MUse, Arity> operands_;
+
+ protected:
+ MUse* getUseFor(size_t index) final override {
+ return &operands_[index];
+ }
+ const MUse* getUseFor(size_t index) const final override {
+ return &operands_[index];
+ }
+ void initOperand(size_t index, MDefinition* operand) {
+ operands_[index].init(operand, this);
+ }
+
+ public:
+ MDefinition* getOperand(size_t index) const final override {
+ return operands_[index].producer();
+ }
+ size_t numOperands() const final override {
+ return Arity;
+ }
+#ifdef DEBUG
+ static const size_t staticNumOperands = Arity;
+#endif
+ size_t indexOf(const MUse* u) const final override {
+ MOZ_ASSERT(u >= &operands_[0]);
+ MOZ_ASSERT(u <= &operands_[numOperands() - 1]);
+ return u - &operands_[0];
+ }
+ void replaceOperand(size_t index, MDefinition* operand) final override {
+ operands_[index].replaceProducer(operand);
+ }
+
+ MAryInstruction() { }
+
+ explicit MAryInstruction(const MAryInstruction<Arity>& other)
+ : MInstruction(other)
+ {
+ for (int i = 0; i < (int) Arity; i++) // N.B. use |int| to avoid warnings when Arity == 0
+ operands_[i].init(other.operands_[i].producer(), this);
+ }
+};
+
+class MNullaryInstruction
+ : public MAryInstruction<0>,
+ public NoTypePolicy::Data
+{ };
+
+class MUnaryInstruction : public MAryInstruction<1>
+{
+ protected:
+ explicit MUnaryInstruction(MDefinition* ins)
+ {
+ initOperand(0, ins);
+ }
+
+ public:
+ NAMED_OPERANDS((0, input))
+};
+
+class MBinaryInstruction : public MAryInstruction<2>
+{
+ protected:
+ MBinaryInstruction(MDefinition* left, MDefinition* right)
+ {
+ initOperand(0, left);
+ initOperand(1, right);
+ }
+
+ public:
+ NAMED_OPERANDS((0, lhs), (1, rhs))
+ void swapOperands() {
+ MDefinition* temp = getOperand(0);
+ replaceOperand(0, getOperand(1));
+ replaceOperand(1, temp);
+ }
+
+ protected:
+ HashNumber valueHash() const
+ {
+ MDefinition* lhs = getOperand(0);
+ MDefinition* rhs = getOperand(1);
+
+ return op() + lhs->id() + rhs->id();
+ }
+ bool binaryCongruentTo(const MDefinition* ins) const
+ {
+ if (op() != ins->op())
+ return false;
+
+ if (type() != ins->type())
+ return false;
+
+ if (isEffectful() || ins->isEffectful())
+ return false;
+
+ const MDefinition* left = getOperand(0);
+ const MDefinition* right = getOperand(1);
+ const MDefinition* tmp;
+
+ if (isCommutative() && left->id() > right->id()) {
+ tmp = right;
+ right = left;
+ left = tmp;
+ }
+
+ const MBinaryInstruction* bi = static_cast<const MBinaryInstruction*>(ins);
+ const MDefinition* insLeft = bi->getOperand(0);
+ const MDefinition* insRight = bi->getOperand(1);
+ if (isCommutative() && insLeft->id() > insRight->id()) {
+ tmp = insRight;
+ insRight = insLeft;
+ insLeft = tmp;
+ }
+
+ return left == insLeft &&
+ right == insRight;
+ }
+
+ public:
+ // Return if the operands to this instruction are both unsigned.
+ static bool unsignedOperands(MDefinition* left, MDefinition* right);
+ bool unsignedOperands();
+
+ // Replace any wrapping operands with the underlying int32 operands
+ // in case of unsigned operands.
+ void replaceWithUnsignedOperands();
+};
+
+class MTernaryInstruction : public MAryInstruction<3>
+{
+ protected:
+ MTernaryInstruction(MDefinition* first, MDefinition* second, MDefinition* third)
+ {
+ initOperand(0, first);
+ initOperand(1, second);
+ initOperand(2, third);
+ }
+
+ protected:
+ HashNumber valueHash() const
+ {
+ MDefinition* first = getOperand(0);
+ MDefinition* second = getOperand(1);
+ MDefinition* third = getOperand(2);
+
+ return op() + first->id() + second->id() + third->id();
+ }
+};
+
+class MQuaternaryInstruction : public MAryInstruction<4>
+{
+ protected:
+ MQuaternaryInstruction(MDefinition* first, MDefinition* second,
+ MDefinition* third, MDefinition* fourth)
+ {
+ initOperand(0, first);
+ initOperand(1, second);
+ initOperand(2, third);
+ initOperand(3, fourth);
+ }
+
+ protected:
+ HashNumber valueHash() const
+ {
+ MDefinition* first = getOperand(0);
+ MDefinition* second = getOperand(1);
+ MDefinition* third = getOperand(2);
+ MDefinition* fourth = getOperand(3);
+
+ return op() + first->id() + second->id() +
+ third->id() + fourth->id();
+ }
+};
+
+template <class T>
+class MVariadicT : public T
+{
+ FixedList<MUse> operands_;
+
+ protected:
+ MOZ_MUST_USE bool init(TempAllocator& alloc, size_t length) {
+ return operands_.init(alloc, length);
+ }
+ void initOperand(size_t index, MDefinition* operand) {
+ // FixedList doesn't initialize its elements, so do an unchecked init.
+ operands_[index].initUnchecked(operand, this);
+ }
+ MUse* getUseFor(size_t index) final override {
+ return &operands_[index];
+ }
+ const MUse* getUseFor(size_t index) const final override {
+ return &operands_[index];
+ }
+
+ public:
+ // Will assert if called before initialization.
+ MDefinition* getOperand(size_t index) const final override {
+ return operands_[index].producer();
+ }
+ size_t numOperands() const final override {
+ return operands_.length();
+ }
+ size_t indexOf(const MUse* u) const final override {
+ MOZ_ASSERT(u >= &operands_[0]);
+ MOZ_ASSERT(u <= &operands_[numOperands() - 1]);
+ return u - &operands_[0];
+ }
+ void replaceOperand(size_t index, MDefinition* operand) final override {
+ operands_[index].replaceProducer(operand);
+ }
+};
+
+typedef MVariadicT<MInstruction> MVariadicInstruction;
+
+// Generates an LSnapshot without further effect.
+class MStart : public MNullaryInstruction
+{
+ public:
+ INSTRUCTION_HEADER(Start)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+// Instruction marking on entrypoint for on-stack replacement.
+// OSR may occur at loop headers (at JSOP_TRACE).
+// There is at most one MOsrEntry per MIRGraph.
+class MOsrEntry : public MNullaryInstruction
+{
+ protected:
+ MOsrEntry() {
+ setResultType(MIRType::Pointer);
+ }
+
+ public:
+ INSTRUCTION_HEADER(OsrEntry)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+// No-op instruction. This cannot be moved or eliminated, and is intended for
+// anchoring resume points at arbitrary points in a block.
+class MNop : public MNullaryInstruction
+{
+ protected:
+ MNop() {
+ }
+
+ public:
+ INSTRUCTION_HEADER(Nop)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ ALLOW_CLONE(MNop)
+};
+
+// Truncation barrier. This is intended for protecting its input against
+// follow-up truncation optimizations.
+class MLimitedTruncate
+ : public MUnaryInstruction,
+ public ConvertToInt32Policy<0>::Data
+{
+ public:
+ TruncateKind truncate_;
+ TruncateKind truncateLimit_;
+
+ protected:
+ MLimitedTruncate(MDefinition* input, TruncateKind limit)
+ : MUnaryInstruction(input),
+ truncate_(NoTruncate),
+ truncateLimit_(limit)
+ {
+ setResultType(MIRType::Int32);
+ setResultTypeSet(input->resultTypeSet());
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(LimitedTruncate)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ void computeRange(TempAllocator& alloc) override;
+ bool needTruncation(TruncateKind kind) override;
+ TruncateKind operandTruncateKind(size_t index) const override;
+ TruncateKind truncateKind() const {
+ return truncate_;
+ }
+ void setTruncateKind(TruncateKind kind) {
+ truncate_ = kind;
+ }
+};
+
+// A constant js::Value.
+class MConstant : public MNullaryInstruction
+{
+ struct Payload {
+ union {
+ bool b;
+ int32_t i32;
+ int64_t i64;
+ float f;
+ double d;
+ JSString* str;
+ JS::Symbol* sym;
+ JSObject* obj;
+ uint64_t asBits;
+ };
+ Payload() : asBits(0) {}
+ };
+
+ Payload payload_;
+
+ static_assert(sizeof(Payload) == sizeof(uint64_t),
+ "asBits must be big enough for all payload bits");
+
+#ifdef DEBUG
+ void assertInitializedPayload() const;
+#else
+ void assertInitializedPayload() const {}
+#endif
+
+ protected:
+ MConstant(const Value& v, CompilerConstraintList* constraints);
+ explicit MConstant(JSObject* obj);
+ explicit MConstant(float f);
+ explicit MConstant(double d);
+ explicit MConstant(int64_t i);
+
+ public:
+ INSTRUCTION_HEADER(Constant)
+ static MConstant* New(TempAllocator& alloc, const Value& v,
+ CompilerConstraintList* constraints = nullptr);
+ static MConstant* New(TempAllocator::Fallible alloc, const Value& v,
+ CompilerConstraintList* constraints = nullptr);
+ static MConstant* New(TempAllocator& alloc, const Value& v, MIRType type);
+ static MConstant* New(TempAllocator& alloc, wasm::RawF32 bits);
+ static MConstant* New(TempAllocator& alloc, wasm::RawF64 bits);
+ static MConstant* NewFloat32(TempAllocator& alloc, double d);
+ static MConstant* NewInt64(TempAllocator& alloc, int64_t i);
+ static MConstant* NewConstraintlessObject(TempAllocator& alloc, JSObject* v);
+ static MConstant* Copy(TempAllocator& alloc, MConstant* src) {
+ return new(alloc) MConstant(*src);
+ }
+
+ // Try to convert this constant to boolean, similar to js::ToBoolean.
+ // Returns false if the type is MIRType::Magic*.
+ bool MOZ_MUST_USE valueToBoolean(bool* res) const;
+
+ // Like valueToBoolean, but returns the result directly instead of using
+ // an outparam. Should not be used if this constant might be a magic value.
+ bool valueToBooleanInfallible() const {
+ bool res;
+ MOZ_ALWAYS_TRUE(valueToBoolean(&res));
+ return res;
+ }
+
+ void printOpcode(GenericPrinter& out) const override;
+
+ HashNumber valueHash() const override;
+ bool congruentTo(const MDefinition* ins) const override;
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ MOZ_MUST_USE bool updateForReplacement(MDefinition* def) override {
+ MConstant* c = def->toConstant();
+ // During constant folding, we don't want to replace a float32
+ // value by a double value.
+ if (type() == MIRType::Float32)
+ return c->type() == MIRType::Float32;
+ if (type() == MIRType::Double)
+ return c->type() != MIRType::Float32;
+ return true;
+ }
+
+ void computeRange(TempAllocator& alloc) override;
+ bool needTruncation(TruncateKind kind) override;
+ void truncate() override;
+
+ bool canProduceFloat32() const override;
+
+ ALLOW_CLONE(MConstant)
+
+ bool equals(const MConstant* other) const {
+ assertInitializedPayload();
+ return type() == other->type() && payload_.asBits == other->payload_.asBits;
+ }
+
+ bool toBoolean() const {
+ MOZ_ASSERT(type() == MIRType::Boolean);
+ return payload_.b;
+ }
+ int32_t toInt32() const {
+ MOZ_ASSERT(type() == MIRType::Int32);
+ return payload_.i32;
+ }
+ int64_t toInt64() const {
+ MOZ_ASSERT(type() == MIRType::Int64);
+ return payload_.i64;
+ }
+ bool isInt32(int32_t i) const {
+ return type() == MIRType::Int32 && payload_.i32 == i;
+ }
+ double toDouble() const {
+ MOZ_ASSERT(type() == MIRType::Double);
+ return payload_.d;
+ }
+ wasm::RawF64 toRawF64() const {
+ MOZ_ASSERT(type() == MIRType::Double);
+ return wasm::RawF64::fromBits(payload_.i64);
+ }
+ float toFloat32() const {
+ MOZ_ASSERT(type() == MIRType::Float32);
+ return payload_.f;
+ }
+ wasm::RawF32 toRawF32() const {
+ MOZ_ASSERT(type() == MIRType::Float32);
+ return wasm::RawF32::fromBits(payload_.i32);
+ }
+ JSString* toString() const {
+ MOZ_ASSERT(type() == MIRType::String);
+ return payload_.str;
+ }
+ JS::Symbol* toSymbol() const {
+ MOZ_ASSERT(type() == MIRType::Symbol);
+ return payload_.sym;
+ }
+ JSObject& toObject() const {
+ MOZ_ASSERT(type() == MIRType::Object);
+ return *payload_.obj;
+ }
+ JSObject* toObjectOrNull() const {
+ if (type() == MIRType::Object)
+ return payload_.obj;
+ MOZ_ASSERT(type() == MIRType::Null);
+ return nullptr;
+ }
+
+ bool isTypeRepresentableAsDouble() const {
+ return IsTypeRepresentableAsDouble(type());
+ }
+ double numberToDouble() const {
+ MOZ_ASSERT(isTypeRepresentableAsDouble());
+ if (type() == MIRType::Int32)
+ return toInt32();
+ if (type() == MIRType::Double)
+ return toDouble();
+ return toFloat32();
+ }
+
+ // Convert this constant to a js::Value. Float32 constants will be stored
+ // as DoubleValue and NaNs are canonicalized. Callers must be careful: not
+ // all constants can be represented by js::Value (wasm supports int64).
+ Value toJSValue() const;
+
+ bool appendRoots(MRootList& roots) const override;
+};
+
+// Generic constructor of SIMD valuesX4.
+class MSimdValueX4
+ : public MQuaternaryInstruction,
+ public Mix4Policy<SimdScalarPolicy<0>, SimdScalarPolicy<1>,
+ SimdScalarPolicy<2>, SimdScalarPolicy<3> >::Data
+{
+ protected:
+ MSimdValueX4(MIRType type, MDefinition* x, MDefinition* y, MDefinition* z, MDefinition* w)
+ : MQuaternaryInstruction(x, y, z, w)
+ {
+ MOZ_ASSERT(IsSimdType(type));
+ MOZ_ASSERT(SimdTypeToLength(type) == 4);
+
+ setMovable();
+ setResultType(type);
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdValueX4)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool canConsumeFloat32(MUse* use) const override {
+ return SimdTypeToLaneType(type()) == MIRType::Float32;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ ALLOW_CLONE(MSimdValueX4)
+};
+
+// Generic constructor of SIMD values with identical lanes.
+class MSimdSplat
+ : public MUnaryInstruction,
+ public SimdScalarPolicy<0>::Data
+{
+ protected:
+ MSimdSplat(MDefinition* v, MIRType type)
+ : MUnaryInstruction(v)
+ {
+ MOZ_ASSERT(IsSimdType(type));
+ setMovable();
+ setResultType(type);
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdSplat)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool canConsumeFloat32(MUse* use) const override {
+ return SimdTypeToLaneType(type()) == MIRType::Float32;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ ALLOW_CLONE(MSimdSplat)
+};
+
+// A constant SIMD value.
+class MSimdConstant
+ : public MNullaryInstruction
+{
+ SimdConstant value_;
+
+ protected:
+ MSimdConstant(const SimdConstant& v, MIRType type) : value_(v) {
+ MOZ_ASSERT(IsSimdType(type));
+ setMovable();
+ setResultType(type);
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdConstant)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isSimdConstant())
+ return false;
+ // Bool32x4 and Int32x4 share the same underlying SimdConstant representation.
+ if (type() != ins->type())
+ return false;
+ return value() == ins->toSimdConstant()->value();
+ }
+
+ const SimdConstant& value() const {
+ return value_;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ ALLOW_CLONE(MSimdConstant)
+};
+
+// Converts all lanes of a given vector into the type of another vector
+class MSimdConvert
+ : public MUnaryInstruction,
+ public SimdPolicy<0>::Data
+{
+ // When either fromType or toType is an integer vector, should it be treated
+ // as signed or unsigned. Note that we don't support int-int conversions -
+ // use MSimdReinterpretCast for that.
+ SimdSign sign_;
+ wasm::TrapOffset trapOffset_;
+
+ MSimdConvert(MDefinition* obj, MIRType toType, SimdSign sign, wasm::TrapOffset trapOffset)
+ : MUnaryInstruction(obj), sign_(sign), trapOffset_(trapOffset)
+ {
+ MIRType fromType = obj->type();
+ MOZ_ASSERT(IsSimdType(fromType));
+ MOZ_ASSERT(IsSimdType(toType));
+ // All conversions are int <-> float, so signedness is required.
+ MOZ_ASSERT(sign != SimdSign::NotApplicable);
+
+ setResultType(toType);
+ specialization_ = fromType; // expects fromType as input
+
+ setMovable();
+ if (IsFloatingPointSimdType(fromType) && IsIntegerSimdType(toType)) {
+ // Does the extra range check => do not remove
+ setGuard();
+ }
+ }
+
+ static MSimdConvert* New(TempAllocator& alloc, MDefinition* obj, MIRType toType, SimdSign sign,
+ wasm::TrapOffset trapOffset)
+ {
+ return new (alloc) MSimdConvert(obj, toType, sign, trapOffset);
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdConvert)
+
+ // Create a MSimdConvert instruction and add it to the basic block.
+ // Possibly create and add an equivalent sequence of instructions instead if
+ // the current target doesn't support the requested conversion directly.
+ // Return the inserted MInstruction that computes the converted value.
+ static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* obj,
+ MIRType toType, SimdSign sign,
+ wasm::TrapOffset trapOffset = wasm::TrapOffset());
+
+ SimdSign signedness() const {
+ return sign_;
+ }
+ wasm::TrapOffset trapOffset() const {
+ return trapOffset_;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!congruentIfOperandsEqual(ins))
+ return false;
+ const MSimdConvert* other = ins->toSimdConvert();
+ return sign_ == other->sign_;
+ }
+ ALLOW_CLONE(MSimdConvert)
+};
+
+// Casts bits of a vector input to another SIMD type (doesn't generate code).
+class MSimdReinterpretCast
+ : public MUnaryInstruction,
+ public SimdPolicy<0>::Data
+{
+ MSimdReinterpretCast(MDefinition* obj, MIRType toType)
+ : MUnaryInstruction(obj)
+ {
+ MIRType fromType = obj->type();
+ MOZ_ASSERT(IsSimdType(fromType));
+ MOZ_ASSERT(IsSimdType(toType));
+ setMovable();
+ setResultType(toType);
+ specialization_ = fromType; // expects fromType as input
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdReinterpretCast)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ ALLOW_CLONE(MSimdReinterpretCast)
+};
+
+// Extracts a lane element from a given vector type, given by its lane symbol.
+//
+// For integer SIMD types, a SimdSign must be provided so the lane value can be
+// converted to a scalar correctly.
+class MSimdExtractElement
+ : public MUnaryInstruction,
+ public SimdPolicy<0>::Data
+{
+ protected:
+ unsigned lane_;
+ SimdSign sign_;
+
+ MSimdExtractElement(MDefinition* obj, MIRType laneType, unsigned lane, SimdSign sign)
+ : MUnaryInstruction(obj), lane_(lane), sign_(sign)
+ {
+ MIRType vecType = obj->type();
+ MOZ_ASSERT(IsSimdType(vecType));
+ MOZ_ASSERT(lane < SimdTypeToLength(vecType));
+ MOZ_ASSERT(!IsSimdType(laneType));
+ MOZ_ASSERT((sign != SimdSign::NotApplicable) == IsIntegerSimdType(vecType),
+ "Signedness must be specified for integer SIMD extractLanes");
+ // The resulting type should match the lane type.
+ // Allow extracting boolean lanes directly into an Int32 (for wasm).
+ // Allow extracting Uint32 lanes into a double.
+ //
+ // We also allow extracting Uint32 lanes into a MIRType::Int32. This is
+ // equivalent to extracting the Uint32 lane to a double and then
+ // applying MTruncateToInt32, but it bypasses the conversion to/from
+ // double.
+ MOZ_ASSERT(SimdTypeToLaneType(vecType) == laneType ||
+ (IsBooleanSimdType(vecType) && laneType == MIRType::Int32) ||
+ (vecType == MIRType::Int32x4 && laneType == MIRType::Double &&
+ sign == SimdSign::Unsigned));
+
+ setMovable();
+ specialization_ = vecType;
+ setResultType(laneType);
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdExtractElement)
+ TRIVIAL_NEW_WRAPPERS
+
+ unsigned lane() const {
+ return lane_;
+ }
+
+ SimdSign signedness() const {
+ return sign_;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isSimdExtractElement())
+ return false;
+ const MSimdExtractElement* other = ins->toSimdExtractElement();
+ if (other->lane_ != lane_ || other->sign_ != sign_)
+ return false;
+ return congruentIfOperandsEqual(other);
+ }
+ ALLOW_CLONE(MSimdExtractElement)
+};
+
+// Replaces the datum in the given lane by a scalar value of the same type.
+class MSimdInsertElement
+ : public MBinaryInstruction,
+ public MixPolicy< SimdSameAsReturnedTypePolicy<0>, SimdScalarPolicy<1> >::Data
+{
+ private:
+ unsigned lane_;
+
+ MSimdInsertElement(MDefinition* vec, MDefinition* val, unsigned lane)
+ : MBinaryInstruction(vec, val), lane_(lane)
+ {
+ MIRType type = vec->type();
+ MOZ_ASSERT(IsSimdType(type));
+ MOZ_ASSERT(lane < SimdTypeToLength(type));
+ setMovable();
+ setResultType(type);
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdInsertElement)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, vector), (1, value))
+
+ unsigned lane() const {
+ return lane_;
+ }
+
+ bool canConsumeFloat32(MUse* use) const override {
+ return use == getUseFor(1) && SimdTypeToLaneType(type()) == MIRType::Float32;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return binaryCongruentTo(ins) && lane_ == ins->toSimdInsertElement()->lane();
+ }
+
+ void printOpcode(GenericPrinter& out) const override;
+
+ ALLOW_CLONE(MSimdInsertElement)
+};
+
+// Returns true if all lanes are true.
+class MSimdAllTrue
+ : public MUnaryInstruction,
+ public SimdPolicy<0>::Data
+{
+ protected:
+ explicit MSimdAllTrue(MDefinition* obj, MIRType result)
+ : MUnaryInstruction(obj)
+ {
+ MIRType simdType = obj->type();
+ MOZ_ASSERT(IsBooleanSimdType(simdType));
+ MOZ_ASSERT(result == MIRType::Boolean || result == MIRType::Int32);
+ setResultType(result);
+ specialization_ = simdType;
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdAllTrue)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ ALLOW_CLONE(MSimdAllTrue)
+};
+
+// Returns true if any lane is true.
+class MSimdAnyTrue
+ : public MUnaryInstruction,
+ public SimdPolicy<0>::Data
+{
+ protected:
+ explicit MSimdAnyTrue(MDefinition* obj, MIRType result)
+ : MUnaryInstruction(obj)
+ {
+ MIRType simdType = obj->type();
+ MOZ_ASSERT(IsBooleanSimdType(simdType));
+ MOZ_ASSERT(result == MIRType::Boolean || result == MIRType::Int32);
+ setResultType(result);
+ specialization_ = simdType;
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdAnyTrue)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ ALLOW_CLONE(MSimdAnyTrue)
+};
+
+// Base for the MSimdSwizzle and MSimdShuffle classes.
+class MSimdShuffleBase
+{
+ protected:
+ // As of now, there are at most 16 lanes. For each lane, we need to know
+ // which input we choose and which of the lanes we choose.
+ mozilla::Array<uint8_t, 16> lane_;
+ uint32_t arity_;
+
+ MSimdShuffleBase(const uint8_t lanes[], MIRType type)
+ {
+ arity_ = SimdTypeToLength(type);
+ for (unsigned i = 0; i < arity_; i++)
+ lane_[i] = lanes[i];
+ }
+
+ bool sameLanes(const MSimdShuffleBase* other) const {
+ return arity_ == other->arity_ &&
+ memcmp(&lane_[0], &other->lane_[0], arity_) == 0;
+ }
+
+ public:
+ unsigned numLanes() const {
+ return arity_;
+ }
+
+ unsigned lane(unsigned i) const {
+ MOZ_ASSERT(i < arity_);
+ return lane_[i];
+ }
+
+ bool lanesMatch(uint32_t x, uint32_t y, uint32_t z, uint32_t w) const {
+ return arity_ == 4 && lane(0) == x && lane(1) == y && lane(2) == z &&
+ lane(3) == w;
+ }
+};
+
+// Applies a swizzle operation to the input, putting the input lanes as
+// indicated in the output register's lanes. This implements the SIMD.js
+// "swizzle" function, that takes one vector and an array of lane indexes.
+class MSimdSwizzle
+ : public MUnaryInstruction,
+ public MSimdShuffleBase,
+ public NoTypePolicy::Data
+{
+ protected:
+ MSimdSwizzle(MDefinition* obj, const uint8_t lanes[])
+ : MUnaryInstruction(obj), MSimdShuffleBase(lanes, obj->type())
+ {
+ for (unsigned i = 0; i < arity_; i++)
+ MOZ_ASSERT(lane(i) < arity_);
+ setResultType(obj->type());
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdSwizzle)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isSimdSwizzle())
+ return false;
+ const MSimdSwizzle* other = ins->toSimdSwizzle();
+ return sameLanes(other) && congruentIfOperandsEqual(other);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ ALLOW_CLONE(MSimdSwizzle)
+};
+
+// A "general shuffle" is a swizzle or a shuffle with non-constant lane
+// indices. This is the one that Ion inlines and it can be folded into a
+// MSimdSwizzle/MSimdShuffle if lane indices are constant. Performance of
+// general swizzle/shuffle does not really matter, as we expect to get
+// constant indices most of the time.
+class MSimdGeneralShuffle :
+ public MVariadicInstruction,
+ public SimdShufflePolicy::Data
+{
+ unsigned numVectors_;
+ unsigned numLanes_;
+
+ protected:
+ MSimdGeneralShuffle(unsigned numVectors, unsigned numLanes, MIRType type)
+ : numVectors_(numVectors), numLanes_(numLanes)
+ {
+ MOZ_ASSERT(IsSimdType(type));
+ MOZ_ASSERT(SimdTypeToLength(type) == numLanes_);
+
+ setResultType(type);
+ specialization_ = type;
+ setGuard(); // throws if lane index is out of bounds
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdGeneralShuffle);
+ TRIVIAL_NEW_WRAPPERS
+
+ MOZ_MUST_USE bool init(TempAllocator& alloc) {
+ return MVariadicInstruction::init(alloc, numVectors_ + numLanes_);
+ }
+ void setVector(unsigned i, MDefinition* vec) {
+ MOZ_ASSERT(i < numVectors_);
+ initOperand(i, vec);
+ }
+ void setLane(unsigned i, MDefinition* laneIndex) {
+ MOZ_ASSERT(i < numLanes_);
+ initOperand(numVectors_ + i, laneIndex);
+ }
+
+ unsigned numVectors() const {
+ return numVectors_;
+ }
+ unsigned numLanes() const {
+ return numLanes_;
+ }
+ MDefinition* vector(unsigned i) const {
+ MOZ_ASSERT(i < numVectors_);
+ return getOperand(i);
+ }
+ MDefinition* lane(unsigned i) const {
+ MOZ_ASSERT(i < numLanes_);
+ return getOperand(numVectors_ + i);
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isSimdGeneralShuffle())
+ return false;
+ const MSimdGeneralShuffle* other = ins->toSimdGeneralShuffle();
+ return numVectors_ == other->numVectors() &&
+ numLanes_ == other->numLanes() &&
+ congruentIfOperandsEqual(other);
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+// Applies a shuffle operation to the inputs. The lane indexes select a source
+// lane from the concatenation of the two input vectors.
+class MSimdShuffle
+ : public MBinaryInstruction,
+ public MSimdShuffleBase,
+ public NoTypePolicy::Data
+{
+ MSimdShuffle(MDefinition* lhs, MDefinition* rhs, const uint8_t lanes[])
+ : MBinaryInstruction(lhs, rhs), MSimdShuffleBase(lanes, lhs->type())
+ {
+ MOZ_ASSERT(IsSimdType(lhs->type()));
+ MOZ_ASSERT(IsSimdType(rhs->type()));
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ for (unsigned i = 0; i < arity_; i++)
+ MOZ_ASSERT(lane(i) < 2 * arity_);
+ setResultType(lhs->type());
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdShuffle)
+
+ static MInstruction* New(TempAllocator& alloc, MDefinition* lhs, MDefinition* rhs,
+ const uint8_t lanes[])
+ {
+ unsigned arity = SimdTypeToLength(lhs->type());
+
+ // Swap operands so that new lanes come from LHS in majority.
+ // In the balanced case, swap operands if needs be, in order to be able
+ // to do only one vshufps on x86.
+ unsigned lanesFromLHS = 0;
+ for (unsigned i = 0; i < arity; i++) {
+ if (lanes[i] < arity)
+ lanesFromLHS++;
+ }
+
+ if (lanesFromLHS < arity / 2 ||
+ (arity == 4 && lanesFromLHS == 2 && lanes[0] >= 4 && lanes[1] >= 4)) {
+ mozilla::Array<uint8_t, 16> newLanes;
+ for (unsigned i = 0; i < arity; i++)
+ newLanes[i] = (lanes[i] + arity) % (2 * arity);
+ return New(alloc, rhs, lhs, &newLanes[0]);
+ }
+
+ // If all lanes come from the same vector, just use swizzle instead.
+ if (lanesFromLHS == arity)
+ return MSimdSwizzle::New(alloc, lhs, lanes);
+
+ return new(alloc) MSimdShuffle(lhs, rhs, lanes);
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isSimdShuffle())
+ return false;
+ const MSimdShuffle* other = ins->toSimdShuffle();
+ return sameLanes(other) && binaryCongruentTo(other);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ ALLOW_CLONE(MSimdShuffle)
+};
+
+class MSimdUnaryArith
+ : public MUnaryInstruction,
+ public SimdSameAsReturnedTypePolicy<0>::Data
+{
+ public:
+ enum Operation {
+#define OP_LIST_(OP) OP,
+ FOREACH_FLOAT_SIMD_UNOP(OP_LIST_)
+ neg,
+ not_
+#undef OP_LIST_
+ };
+
+ static const char* OperationName(Operation op) {
+ switch (op) {
+ case abs: return "abs";
+ case neg: return "neg";
+ case not_: return "not";
+ case reciprocalApproximation: return "reciprocalApproximation";
+ case reciprocalSqrtApproximation: return "reciprocalSqrtApproximation";
+ case sqrt: return "sqrt";
+ }
+ MOZ_CRASH("unexpected operation");
+ }
+
+ private:
+ Operation operation_;
+
+ MSimdUnaryArith(MDefinition* def, Operation op)
+ : MUnaryInstruction(def), operation_(op)
+ {
+ MIRType type = def->type();
+ MOZ_ASSERT(IsSimdType(type));
+ MOZ_ASSERT_IF(IsIntegerSimdType(type), op == neg || op == not_);
+ setResultType(type);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdUnaryArith)
+ TRIVIAL_NEW_WRAPPERS
+
+ Operation operation() const { return operation_; }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins) && ins->toSimdUnaryArith()->operation() == operation();
+ }
+
+ void printOpcode(GenericPrinter& out) const override;
+
+ ALLOW_CLONE(MSimdUnaryArith);
+};
+
+// Compares each value of a SIMD vector to each corresponding lane's value of
+// another SIMD vector, and returns a boolean vector containing the results of
+// the comparison: all bits are set to 1 if the comparison is true, 0 otherwise.
+// When comparing integer vectors, a SimdSign must be provided to request signed
+// or unsigned comparison.
+class MSimdBinaryComp
+ : public MBinaryInstruction,
+ public SimdAllPolicy::Data
+{
+ public:
+ enum Operation {
+#define NAME_(x) x,
+ FOREACH_COMP_SIMD_OP(NAME_)
+#undef NAME_
+ };
+
+ static const char* OperationName(Operation op) {
+ switch (op) {
+#define NAME_(x) case x: return #x;
+ FOREACH_COMP_SIMD_OP(NAME_)
+#undef NAME_
+ }
+ MOZ_CRASH("unexpected operation");
+ }
+
+ private:
+ Operation operation_;
+ SimdSign sign_;
+
+ MSimdBinaryComp(MDefinition* left, MDefinition* right, Operation op, SimdSign sign)
+ : MBinaryInstruction(left, right), operation_(op), sign_(sign)
+ {
+ MOZ_ASSERT(left->type() == right->type());
+ MIRType opType = left->type();
+ MOZ_ASSERT(IsSimdType(opType));
+ MOZ_ASSERT((sign != SimdSign::NotApplicable) == IsIntegerSimdType(opType),
+ "Signedness must be specified for integer SIMD compares");
+ setResultType(MIRTypeToBooleanSimdType(opType));
+ specialization_ = opType;
+ setMovable();
+ if (op == equal || op == notEqual)
+ setCommutative();
+ }
+
+ static MSimdBinaryComp* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
+ Operation op, SimdSign sign)
+ {
+ return new (alloc) MSimdBinaryComp(left, right, op, sign);
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdBinaryComp)
+
+ // Create a MSimdBinaryComp or an equivalent sequence of instructions
+ // supported by the current target.
+ // Add all instructions to the basic block |addTo|.
+ static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
+ MDefinition* right, Operation op, SimdSign sign);
+
+ AliasSet getAliasSet() const override
+ {
+ return AliasSet::None();
+ }
+
+ Operation operation() const { return operation_; }
+ SimdSign signedness() const { return sign_; }
+ MIRType specialization() const { return specialization_; }
+
+ // Swap the operands and reverse the comparison predicate.
+ void reverse() {
+ switch (operation()) {
+ case greaterThan: operation_ = lessThan; break;
+ case greaterThanOrEqual: operation_ = lessThanOrEqual; break;
+ case lessThan: operation_ = greaterThan; break;
+ case lessThanOrEqual: operation_ = greaterThanOrEqual; break;
+ case equal:
+ case notEqual:
+ break;
+ default: MOZ_CRASH("Unexpected compare operation");
+ }
+ swapOperands();
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!binaryCongruentTo(ins))
+ return false;
+ const MSimdBinaryComp* other = ins->toSimdBinaryComp();
+ return specialization_ == other->specialization() &&
+ operation_ == other->operation() &&
+ sign_ == other->signedness();
+ }
+
+ void printOpcode(GenericPrinter& out) const override;
+
+ ALLOW_CLONE(MSimdBinaryComp)
+};
+
+class MSimdBinaryArith
+ : public MBinaryInstruction,
+ public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdSameAsReturnedTypePolicy<1> >::Data
+{
+ public:
+ enum Operation {
+#define OP_LIST_(OP) Op_##OP,
+ FOREACH_NUMERIC_SIMD_BINOP(OP_LIST_)
+ FOREACH_FLOAT_SIMD_BINOP(OP_LIST_)
+#undef OP_LIST_
+ };
+
+ static const char* OperationName(Operation op) {
+ switch (op) {
+#define OP_CASE_LIST_(OP) case Op_##OP: return #OP;
+ FOREACH_NUMERIC_SIMD_BINOP(OP_CASE_LIST_)
+ FOREACH_FLOAT_SIMD_BINOP(OP_CASE_LIST_)
+#undef OP_CASE_LIST_
+ }
+ MOZ_CRASH("unexpected operation");
+ }
+
+ private:
+ Operation operation_;
+
+ MSimdBinaryArith(MDefinition* left, MDefinition* right, Operation op)
+ : MBinaryInstruction(left, right), operation_(op)
+ {
+ MOZ_ASSERT(left->type() == right->type());
+ MIRType type = left->type();
+ MOZ_ASSERT(IsSimdType(type));
+ MOZ_ASSERT_IF(IsIntegerSimdType(type), op == Op_add || op == Op_sub || op == Op_mul);
+ setResultType(type);
+ setMovable();
+ if (op == Op_add || op == Op_mul || op == Op_min || op == Op_max)
+ setCommutative();
+ }
+
+ static MSimdBinaryArith* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
+ Operation op)
+ {
+ return new (alloc) MSimdBinaryArith(left, right, op);
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdBinaryArith)
+
+ // Create an MSimdBinaryArith instruction and add it to the basic block. Possibly
+ // create and add an equivalent sequence of instructions instead if the
+ // current target doesn't support the requested shift operation directly.
+ static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
+ MDefinition* right, Operation op);
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ Operation operation() const { return operation_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!binaryCongruentTo(ins))
+ return false;
+ return operation_ == ins->toSimdBinaryArith()->operation();
+ }
+
+ void printOpcode(GenericPrinter& out) const override;
+
+ ALLOW_CLONE(MSimdBinaryArith)
+};
+
+class MSimdBinarySaturating
+ : public MBinaryInstruction,
+ public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdSameAsReturnedTypePolicy<1>>::Data
+{
+ public:
+ enum Operation
+ {
+ add,
+ sub,
+ };
+
+ static const char* OperationName(Operation op)
+ {
+ switch (op) {
+ case add:
+ return "add";
+ case sub:
+ return "sub";
+ }
+ MOZ_CRASH("unexpected operation");
+ }
+
+ private:
+ Operation operation_;
+ SimdSign sign_;
+
+ MSimdBinarySaturating(MDefinition* left, MDefinition* right, Operation op, SimdSign sign)
+ : MBinaryInstruction(left, right)
+ , operation_(op)
+ , sign_(sign)
+ {
+ MOZ_ASSERT(left->type() == right->type());
+ MIRType type = left->type();
+ MOZ_ASSERT(type == MIRType::Int8x16 || type == MIRType::Int16x8);
+ setResultType(type);
+ setMovable();
+ if (op == add)
+ setCommutative();
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdBinarySaturating)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ Operation operation() const { return operation_; }
+ SimdSign signedness() const { return sign_; }
+
+ bool congruentTo(const MDefinition* ins) const override
+ {
+ if (!binaryCongruentTo(ins))
+ return false;
+ return operation_ == ins->toSimdBinarySaturating()->operation() &&
+ sign_ == ins->toSimdBinarySaturating()->signedness();
+ }
+
+ void printOpcode(GenericPrinter& out) const override;
+
+ ALLOW_CLONE(MSimdBinarySaturating)
+};
+
+class MSimdBinaryBitwise
+ : public MBinaryInstruction,
+ public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdSameAsReturnedTypePolicy<1> >::Data
+{
+ public:
+ enum Operation {
+ and_,
+ or_,
+ xor_
+ };
+
+ static const char* OperationName(Operation op) {
+ switch (op) {
+ case and_: return "and";
+ case or_: return "or";
+ case xor_: return "xor";
+ }
+ MOZ_CRASH("unexpected operation");
+ }
+
+ private:
+ Operation operation_;
+
+ MSimdBinaryBitwise(MDefinition* left, MDefinition* right, Operation op)
+ : MBinaryInstruction(left, right), operation_(op)
+ {
+ MOZ_ASSERT(left->type() == right->type());
+ MIRType type = left->type();
+ MOZ_ASSERT(IsSimdType(type));
+ setResultType(type);
+ setMovable();
+ setCommutative();
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdBinaryBitwise)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ Operation operation() const { return operation_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!binaryCongruentTo(ins))
+ return false;
+ return operation_ == ins->toSimdBinaryBitwise()->operation();
+ }
+
+ void printOpcode(GenericPrinter& out) const override;
+
+ ALLOW_CLONE(MSimdBinaryBitwise)
+};
+
+class MSimdShift
+ : public MBinaryInstruction,
+ public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdScalarPolicy<1> >::Data
+{
+ public:
+ enum Operation {
+ lsh,
+ rsh,
+ ursh
+ };
+
+ private:
+ Operation operation_;
+
+ MSimdShift(MDefinition* left, MDefinition* right, Operation op)
+ : MBinaryInstruction(left, right), operation_(op)
+ {
+ MIRType type = left->type();
+ MOZ_ASSERT(IsIntegerSimdType(type));
+ setResultType(type);
+ setMovable();
+ }
+
+ static MSimdShift* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
+ Operation op)
+ {
+ return new (alloc) MSimdShift(left, right, op);
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdShift)
+
+ // Create an MSimdShift instruction and add it to the basic block. Possibly
+ // create and add an equivalent sequence of instructions instead if the
+ // current target doesn't support the requested shift operation directly.
+ // Return the inserted MInstruction that computes the shifted value.
+ static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
+ MDefinition* right, Operation op);
+
+ // Get the relevant right shift operation given the signedness of a type.
+ static Operation rshForSign(SimdSign sign) {
+ return sign == SimdSign::Unsigned ? ursh : rsh;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ Operation operation() const { return operation_; }
+
+ static const char* OperationName(Operation op) {
+ switch (op) {
+ case lsh: return "lsh";
+ case rsh: return "rsh-arithmetic";
+ case ursh: return "rsh-logical";
+ }
+ MOZ_CRASH("unexpected operation");
+ }
+
+ void printOpcode(GenericPrinter& out) const override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!binaryCongruentTo(ins))
+ return false;
+ return operation_ == ins->toSimdShift()->operation();
+ }
+
+ ALLOW_CLONE(MSimdShift)
+};
+
+class MSimdSelect
+ : public MTernaryInstruction,
+ public SimdSelectPolicy::Data
+{
+ MSimdSelect(MDefinition* mask, MDefinition* lhs, MDefinition* rhs)
+ : MTernaryInstruction(mask, lhs, rhs)
+ {
+ MOZ_ASSERT(IsBooleanSimdType(mask->type()));
+ MOZ_ASSERT(lhs->type() == lhs->type());
+ MIRType type = lhs->type();
+ MOZ_ASSERT(IsSimdType(type));
+ setResultType(type);
+ specialization_ = type;
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdSelect)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, mask))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ ALLOW_CLONE(MSimdSelect)
+};
+
+// Deep clone a constant JSObject.
+class MCloneLiteral
+ : public MUnaryInstruction,
+ public ObjectPolicy<0>::Data
+{
+ protected:
+ explicit MCloneLiteral(MDefinition* obj)
+ : MUnaryInstruction(obj)
+ {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(CloneLiteral)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+class MParameter : public MNullaryInstruction
+{
+ int32_t index_;
+
+ MParameter(int32_t index, TemporaryTypeSet* types)
+ : index_(index)
+ {
+ setResultType(MIRType::Value);
+ setResultTypeSet(types);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Parameter)
+ TRIVIAL_NEW_WRAPPERS
+
+ static const int32_t THIS_SLOT = -1;
+ int32_t index() const {
+ return index_;
+ }
+ void printOpcode(GenericPrinter& out) const override;
+
+ HashNumber valueHash() const override;
+ bool congruentTo(const MDefinition* ins) const override;
+};
+
+class MCallee : public MNullaryInstruction
+{
+ public:
+ MCallee()
+ {
+ setResultType(MIRType::Object);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Callee)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+class MIsConstructing : public MNullaryInstruction
+{
+ public:
+ MIsConstructing() {
+ setResultType(MIRType::Boolean);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(IsConstructing)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+class MControlInstruction : public MInstruction
+{
+ public:
+ MControlInstruction()
+ { }
+
+ virtual size_t numSuccessors() const = 0;
+ virtual MBasicBlock* getSuccessor(size_t i) const = 0;
+ virtual void replaceSuccessor(size_t i, MBasicBlock* successor) = 0;
+
+ bool isControlInstruction() const override {
+ return true;
+ }
+
+ void printOpcode(GenericPrinter& out) const override;
+};
+
+class MTableSwitch final
+ : public MControlInstruction,
+ public NoFloatPolicy<0>::Data
+{
+ // The successors of the tableswitch
+ // - First successor = the default case
+ // - Successor 2 and higher = the cases sorted on case index.
+ Vector<MBasicBlock*, 0, JitAllocPolicy> successors_;
+ Vector<size_t, 0, JitAllocPolicy> cases_;
+
+ // Contains the blocks/cases that still need to get build
+ Vector<MBasicBlock*, 0, JitAllocPolicy> blocks_;
+
+ MUse operand_;
+ int32_t low_;
+ int32_t high_;
+
+ void initOperand(size_t index, MDefinition* operand) {
+ MOZ_ASSERT(index == 0);
+ operand_.init(operand, this);
+ }
+
+ MTableSwitch(TempAllocator& alloc, MDefinition* ins,
+ int32_t low, int32_t high)
+ : successors_(alloc),
+ cases_(alloc),
+ blocks_(alloc),
+ low_(low),
+ high_(high)
+ {
+ initOperand(0, ins);
+ }
+
+ protected:
+ MUse* getUseFor(size_t index) override {
+ MOZ_ASSERT(index == 0);
+ return &operand_;
+ }
+
+ const MUse* getUseFor(size_t index) const override {
+ MOZ_ASSERT(index == 0);
+ return &operand_;
+ }
+
+ public:
+ INSTRUCTION_HEADER(TableSwitch)
+ static MTableSwitch* New(TempAllocator& alloc, MDefinition* ins, int32_t low, int32_t high);
+
+ size_t numSuccessors() const override {
+ return successors_.length();
+ }
+
+ MOZ_MUST_USE bool addSuccessor(MBasicBlock* successor, size_t* index) {
+ MOZ_ASSERT(successors_.length() < (size_t)(high_ - low_ + 2));
+ MOZ_ASSERT(!successors_.empty());
+ *index = successors_.length();
+ return successors_.append(successor);
+ }
+
+ MBasicBlock* getSuccessor(size_t i) const override {
+ MOZ_ASSERT(i < numSuccessors());
+ return successors_[i];
+ }
+
+ void replaceSuccessor(size_t i, MBasicBlock* successor) override {
+ MOZ_ASSERT(i < numSuccessors());
+ successors_[i] = successor;
+ }
+
+ MBasicBlock** blocks() {
+ return &blocks_[0];
+ }
+
+ size_t numBlocks() const {
+ return blocks_.length();
+ }
+
+ int32_t low() const {
+ return low_;
+ }
+
+ int32_t high() const {
+ return high_;
+ }
+
+ MBasicBlock* getDefault() const {
+ return getSuccessor(0);
+ }
+
+ MBasicBlock* getCase(size_t i) const {
+ return getSuccessor(cases_[i]);
+ }
+
+ size_t numCases() const {
+ return high() - low() + 1;
+ }
+
+ MOZ_MUST_USE bool addDefault(MBasicBlock* block, size_t* index = nullptr) {
+ MOZ_ASSERT(successors_.empty());
+ if (index)
+ *index = 0;
+ return successors_.append(block);
+ }
+
+ MOZ_MUST_USE bool addCase(size_t successorIndex) {
+ return cases_.append(successorIndex);
+ }
+
+ MBasicBlock* getBlock(size_t i) const {
+ MOZ_ASSERT(i < numBlocks());
+ return blocks_[i];
+ }
+
+ MOZ_MUST_USE bool addBlock(MBasicBlock* block) {
+ return blocks_.append(block);
+ }
+
+ MDefinition* getOperand(size_t index) const override {
+ MOZ_ASSERT(index == 0);
+ return operand_.producer();
+ }
+
+ size_t numOperands() const override {
+ return 1;
+ }
+
+ size_t indexOf(const MUse* u) const final override {
+ MOZ_ASSERT(u == getUseFor(0));
+ return 0;
+ }
+
+ void replaceOperand(size_t index, MDefinition* operand) final override {
+ MOZ_ASSERT(index == 0);
+ operand_.replaceProducer(operand);
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+};
+
+template <size_t Arity, size_t Successors>
+class MAryControlInstruction : public MControlInstruction
+{
+ mozilla::Array<MUse, Arity> operands_;
+ mozilla::Array<MBasicBlock*, Successors> successors_;
+
+ protected:
+ void setSuccessor(size_t index, MBasicBlock* successor) {
+ successors_[index] = successor;
+ }
+
+ MUse* getUseFor(size_t index) final override {
+ return &operands_[index];
+ }
+ const MUse* getUseFor(size_t index) const final override {
+ return &operands_[index];
+ }
+ void initOperand(size_t index, MDefinition* operand) {
+ operands_[index].init(operand, this);
+ }
+
+ public:
+ MDefinition* getOperand(size_t index) const final override {
+ return operands_[index].producer();
+ }
+ size_t numOperands() const final override {
+ return Arity;
+ }
+ size_t indexOf(const MUse* u) const final override {
+ MOZ_ASSERT(u >= &operands_[0]);
+ MOZ_ASSERT(u <= &operands_[numOperands() - 1]);
+ return u - &operands_[0];
+ }
+ void replaceOperand(size_t index, MDefinition* operand) final override {
+ operands_[index].replaceProducer(operand);
+ }
+ size_t numSuccessors() const final override {
+ return Successors;
+ }
+ MBasicBlock* getSuccessor(size_t i) const final override {
+ return successors_[i];
+ }
+ void replaceSuccessor(size_t i, MBasicBlock* succ) final override {
+ successors_[i] = succ;
+ }
+};
+
+// Jump to the start of another basic block.
+class MGoto
+ : public MAryControlInstruction<0, 1>,
+ public NoTypePolicy::Data
+{
+ explicit MGoto(MBasicBlock* target) {
+ setSuccessor(0, target);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Goto)
+ static MGoto* New(TempAllocator& alloc, MBasicBlock* target);
+ static MGoto* New(TempAllocator::Fallible alloc, MBasicBlock* target);
+
+ // Variant that may patch the target later.
+ static MGoto* New(TempAllocator& alloc);
+
+ static const size_t TargetIndex = 0;
+
+ MBasicBlock* target() {
+ return getSuccessor(0);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+enum BranchDirection {
+ FALSE_BRANCH,
+ TRUE_BRANCH
+};
+
+static inline BranchDirection
+NegateBranchDirection(BranchDirection dir)
+{
+ return (dir == FALSE_BRANCH) ? TRUE_BRANCH : FALSE_BRANCH;
+}
+
+// Tests if the input instruction evaluates to true or false, and jumps to the
+// start of a corresponding basic block.
+class MTest
+ : public MAryControlInstruction<1, 2>,
+ public TestPolicy::Data
+{
+ bool operandMightEmulateUndefined_;
+
+ MTest(MDefinition* ins, MBasicBlock* trueBranch, MBasicBlock* falseBranch)
+ : operandMightEmulateUndefined_(true)
+ {
+ initOperand(0, ins);
+ setSuccessor(0, trueBranch);
+ setSuccessor(1, falseBranch);
+ }
+
+ // Variant which may patch the ifTrue branch later.
+ MTest(MDefinition* ins, MBasicBlock* falseBranch)
+ : MTest(ins, nullptr, falseBranch)
+ {}
+
+ public:
+ INSTRUCTION_HEADER(Test)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, input))
+
+ static const size_t TrueBranchIndex = 0;
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+ MBasicBlock* branchSuccessor(BranchDirection dir) const {
+ return (dir == TRUE_BRANCH) ? ifTrue() : ifFalse();
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ // We cache whether our operand might emulate undefined, but we don't want
+ // to do that from New() or the constructor, since those can be called on
+ // background threads. So make callers explicitly call it if they want us
+ // to check whether the operand might do this. If this method is never
+ // called, we'll assume our operand can emulate undefined.
+ void cacheOperandMightEmulateUndefined(CompilerConstraintList* constraints);
+ MDefinition* foldsDoubleNegation(TempAllocator& alloc);
+ MDefinition* foldsConstant(TempAllocator& alloc);
+ MDefinition* foldsTypes(TempAllocator& alloc);
+ MDefinition* foldsNeedlessControlFlow(TempAllocator& alloc);
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ void filtersUndefinedOrNull(bool trueBranch, MDefinition** subject, bool* filtersUndefined,
+ bool* filtersNull);
+
+ void markNoOperandEmulatesUndefined() {
+ operandMightEmulateUndefined_ = false;
+ }
+ bool operandMightEmulateUndefined() const {
+ return operandMightEmulateUndefined_;
+ }
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override {
+ return true;
+ }
+#endif
+};
+
+// Equivalent to MTest(true, successor, fake), except without the foldsTo
+// method. This allows IonBuilder to insert fake CFG edges to magically protect
+// control flow for try-catch blocks.
+class MGotoWithFake
+ : public MAryControlInstruction<0, 2>,
+ public NoTypePolicy::Data
+{
+ MGotoWithFake(MBasicBlock* successor, MBasicBlock* fake)
+ {
+ setSuccessor(0, successor);
+ setSuccessor(1, fake);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GotoWithFake)
+ TRIVIAL_NEW_WRAPPERS
+
+ MBasicBlock* target() const {
+ return getSuccessor(0);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+// Returns from this function to the previous caller.
+class MReturn
+ : public MAryControlInstruction<1, 0>,
+ public BoxInputsPolicy::Data
+{
+ explicit MReturn(MDefinition* ins) {
+ initOperand(0, ins);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Return)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, input))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+class MThrow
+ : public MAryControlInstruction<1, 0>,
+ public BoxInputsPolicy::Data
+{
+ explicit MThrow(MDefinition* ins) {
+ initOperand(0, ins);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Throw)
+ TRIVIAL_NEW_WRAPPERS
+
+ virtual AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+// Fabricate a type set containing only the type of the specified object.
+TemporaryTypeSet*
+MakeSingletonTypeSet(CompilerConstraintList* constraints, JSObject* obj);
+
+TemporaryTypeSet*
+MakeSingletonTypeSet(CompilerConstraintList* constraints, ObjectGroup* obj);
+
+MOZ_MUST_USE bool
+MergeTypes(TempAllocator& alloc, MIRType* ptype, TemporaryTypeSet** ptypeSet,
+ MIRType newType, TemporaryTypeSet* newTypeSet);
+
+bool
+TypeSetIncludes(TypeSet* types, MIRType input, TypeSet* inputTypes);
+
+bool
+EqualTypes(MIRType type1, TemporaryTypeSet* typeset1,
+ MIRType type2, TemporaryTypeSet* typeset2);
+
+bool
+CanStoreUnboxedType(TempAllocator& alloc,
+ JSValueType unboxedType, MIRType input, TypeSet* inputTypes);
+
+class MNewArray
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ private:
+ // Number of elements to allocate for the array.
+ uint32_t length_;
+
+ // Heap where the array should be allocated.
+ gc::InitialHeap initialHeap_;
+
+ // Whether values written to this array should be converted to double first.
+ bool convertDoubleElements_;
+
+ jsbytecode* pc_;
+
+ bool vmCall_;
+
+ MNewArray(CompilerConstraintList* constraints, uint32_t length, MConstant* templateConst,
+ gc::InitialHeap initialHeap, jsbytecode* pc, bool vmCall = false);
+
+ public:
+ INSTRUCTION_HEADER(NewArray)
+ TRIVIAL_NEW_WRAPPERS
+
+ static MNewArray* NewVM(TempAllocator& alloc, CompilerConstraintList* constraints,
+ uint32_t length, MConstant* templateConst,
+ gc::InitialHeap initialHeap, jsbytecode* pc)
+ {
+ return new(alloc) MNewArray(constraints, length, templateConst, initialHeap, pc, true);
+ }
+
+ uint32_t length() const {
+ return length_;
+ }
+
+ JSObject* templateObject() const {
+ return getOperand(0)->toConstant()->toObjectOrNull();
+ }
+
+ gc::InitialHeap initialHeap() const {
+ return initialHeap_;
+ }
+
+ jsbytecode* pc() const {
+ return pc_;
+ }
+
+ bool isVMCall() const {
+ return vmCall_;
+ }
+
+ bool convertDoubleElements() const {
+ return convertDoubleElements_;
+ }
+
+ // NewArray is marked as non-effectful because all our allocations are
+ // either lazy when we are using "new Array(length)" or bounded by the
+ // script or the stack size when we are using "new Array(...)" or "[...]"
+ // notations. So we might have to allocate the array twice if we bail
+ // during the computation of the first element of the square braket
+ // notation.
+ virtual AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ // The template object can safely be used in the recover instruction
+ // because it can never be mutated by any other function execution.
+ return templateObject() != nullptr;
+ }
+};
+
+class MNewArrayCopyOnWrite : public MNullaryInstruction
+{
+ CompilerGCPointer<ArrayObject*> templateObject_;
+ gc::InitialHeap initialHeap_;
+
+ MNewArrayCopyOnWrite(CompilerConstraintList* constraints, ArrayObject* templateObject,
+ gc::InitialHeap initialHeap)
+ : templateObject_(templateObject),
+ initialHeap_(initialHeap)
+ {
+ MOZ_ASSERT(!templateObject->isSingleton());
+ setResultType(MIRType::Object);
+ setResultTypeSet(MakeSingletonTypeSet(constraints, templateObject));
+ }
+
+ public:
+ INSTRUCTION_HEADER(NewArrayCopyOnWrite)
+ TRIVIAL_NEW_WRAPPERS
+
+ ArrayObject* templateObject() const {
+ return templateObject_;
+ }
+
+ gc::InitialHeap initialHeap() const {
+ return initialHeap_;
+ }
+
+ virtual AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(templateObject_);
+ }
+};
+
+class MNewArrayDynamicLength
+ : public MUnaryInstruction,
+ public IntPolicy<0>::Data
+{
+ CompilerObject templateObject_;
+ gc::InitialHeap initialHeap_;
+
+ MNewArrayDynamicLength(CompilerConstraintList* constraints, JSObject* templateObject,
+ gc::InitialHeap initialHeap, MDefinition* length)
+ : MUnaryInstruction(length),
+ templateObject_(templateObject),
+ initialHeap_(initialHeap)
+ {
+ setGuard(); // Need to throw if length is negative.
+ setResultType(MIRType::Object);
+ if (!templateObject->isSingleton())
+ setResultTypeSet(MakeSingletonTypeSet(constraints, templateObject));
+ }
+
+ public:
+ INSTRUCTION_HEADER(NewArrayDynamicLength)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, length))
+
+ JSObject* templateObject() const {
+ return templateObject_;
+ }
+ gc::InitialHeap initialHeap() const {
+ return initialHeap_;
+ }
+
+ virtual AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(templateObject_);
+ }
+};
+
+class MNewTypedArray
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ gc::InitialHeap initialHeap_;
+
+ MNewTypedArray(CompilerConstraintList* constraints, MConstant* templateConst,
+ gc::InitialHeap initialHeap)
+ : MUnaryInstruction(templateConst),
+ initialHeap_(initialHeap)
+ {
+ MOZ_ASSERT(!templateObject()->isSingleton());
+ setResultType(MIRType::Object);
+ setResultTypeSet(MakeSingletonTypeSet(constraints, templateObject()));
+ }
+
+ public:
+ INSTRUCTION_HEADER(NewTypedArray)
+ TRIVIAL_NEW_WRAPPERS
+
+ TypedArrayObject* templateObject() const {
+ return &getOperand(0)->toConstant()->toObject().as<TypedArrayObject>();
+ }
+
+ gc::InitialHeap initialHeap() const {
+ return initialHeap_;
+ }
+
+ virtual AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+};
+
+class MNewTypedArrayDynamicLength
+ : public MUnaryInstruction,
+ public IntPolicy<0>::Data
+{
+ CompilerObject templateObject_;
+ gc::InitialHeap initialHeap_;
+
+ MNewTypedArrayDynamicLength(CompilerConstraintList* constraints, JSObject* templateObject,
+ gc::InitialHeap initialHeap, MDefinition* length)
+ : MUnaryInstruction(length),
+ templateObject_(templateObject),
+ initialHeap_(initialHeap)
+ {
+ setGuard(); // Need to throw if length is negative.
+ setResultType(MIRType::Object);
+ if (!templateObject->isSingleton())
+ setResultTypeSet(MakeSingletonTypeSet(constraints, templateObject));
+ }
+
+ public:
+ INSTRUCTION_HEADER(NewTypedArrayDynamicLength)
+
+ static MNewTypedArrayDynamicLength* New(TempAllocator& alloc, CompilerConstraintList* constraints,
+ JSObject* templateObject, gc::InitialHeap initialHeap,
+ MDefinition* length)
+ {
+ return new(alloc) MNewTypedArrayDynamicLength(constraints, templateObject, initialHeap, length);
+ }
+
+ MDefinition* length() const {
+ return getOperand(0);
+ }
+ JSObject* templateObject() const {
+ return templateObject_;
+ }
+ gc::InitialHeap initialHeap() const {
+ return initialHeap_;
+ }
+
+ virtual AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(templateObject_);
+ }
+};
+
+class MNewObject
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ public:
+ enum Mode { ObjectLiteral, ObjectCreate };
+
+ private:
+ gc::InitialHeap initialHeap_;
+ Mode mode_;
+ bool vmCall_;
+
+ MNewObject(CompilerConstraintList* constraints, MConstant* templateConst,
+ gc::InitialHeap initialHeap, Mode mode, bool vmCall = false)
+ : MUnaryInstruction(templateConst),
+ initialHeap_(initialHeap),
+ mode_(mode),
+ vmCall_(vmCall)
+ {
+ MOZ_ASSERT_IF(mode != ObjectLiteral, templateObject());
+ setResultType(MIRType::Object);
+
+ if (JSObject* obj = templateObject())
+ setResultTypeSet(MakeSingletonTypeSet(constraints, obj));
+
+ // The constant is kept separated in a MConstant, this way we can safely
+ // mark it during GC if we recover the object allocation. Otherwise, by
+ // making it emittedAtUses, we do not produce register allocations for
+ // it and inline its content inside the code produced by the
+ // CodeGenerator.
+ if (templateConst->toConstant()->type() == MIRType::Object)
+ templateConst->setEmittedAtUses();
+ }
+
+ public:
+ INSTRUCTION_HEADER(NewObject)
+ TRIVIAL_NEW_WRAPPERS
+
+ static MNewObject* NewVM(TempAllocator& alloc, CompilerConstraintList* constraints,
+ MConstant* templateConst, gc::InitialHeap initialHeap,
+ Mode mode)
+ {
+ return new(alloc) MNewObject(constraints, templateConst, initialHeap, mode, true);
+ }
+
+ Mode mode() const {
+ return mode_;
+ }
+
+ JSObject* templateObject() const {
+ return getOperand(0)->toConstant()->toObjectOrNull();
+ }
+
+ gc::InitialHeap initialHeap() const {
+ return initialHeap_;
+ }
+
+ bool isVMCall() const {
+ return vmCall_;
+ }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ // The template object can safely be used in the recover instruction
+ // because it can never be mutated by any other function execution.
+ return templateObject() != nullptr;
+ }
+};
+
+class MNewTypedObject : public MNullaryInstruction
+{
+ CompilerGCPointer<InlineTypedObject*> templateObject_;
+ gc::InitialHeap initialHeap_;
+
+ MNewTypedObject(CompilerConstraintList* constraints,
+ InlineTypedObject* templateObject,
+ gc::InitialHeap initialHeap)
+ : templateObject_(templateObject),
+ initialHeap_(initialHeap)
+ {
+ setResultType(MIRType::Object);
+ setResultTypeSet(MakeSingletonTypeSet(constraints, templateObject));
+ }
+
+ public:
+ INSTRUCTION_HEADER(NewTypedObject)
+ TRIVIAL_NEW_WRAPPERS
+
+ InlineTypedObject* templateObject() const {
+ return templateObject_;
+ }
+
+ gc::InitialHeap initialHeap() const {
+ return initialHeap_;
+ }
+
+ virtual AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(templateObject_);
+ }
+};
+
+class MTypedObjectDescr
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ private:
+ explicit MTypedObjectDescr(MDefinition* object)
+ : MUnaryInstruction(object)
+ {
+ setResultType(MIRType::Object);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(TypedObjectDescr)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+};
+
+// Generic way for constructing a SIMD object in IonMonkey, this instruction
+// takes as argument a SIMD instruction and returns a new SIMD object which
+// corresponds to the MIRType of its operand.
+class MSimdBox
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ protected:
+ CompilerGCPointer<InlineTypedObject*> templateObject_;
+ SimdType simdType_;
+ gc::InitialHeap initialHeap_;
+
+ MSimdBox(CompilerConstraintList* constraints,
+ MDefinition* op,
+ InlineTypedObject* templateObject,
+ SimdType simdType,
+ gc::InitialHeap initialHeap)
+ : MUnaryInstruction(op),
+ templateObject_(templateObject),
+ simdType_(simdType),
+ initialHeap_(initialHeap)
+ {
+ MOZ_ASSERT(IsSimdType(op->type()));
+ setMovable();
+ setResultType(MIRType::Object);
+ if (constraints)
+ setResultTypeSet(MakeSingletonTypeSet(constraints, templateObject));
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdBox)
+ TRIVIAL_NEW_WRAPPERS
+
+ InlineTypedObject* templateObject() const {
+ return templateObject_;
+ }
+
+ SimdType simdType() const {
+ return simdType_;
+ }
+
+ gc::InitialHeap initialHeap() const {
+ return initialHeap_;
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!congruentIfOperandsEqual(ins))
+ return false;
+ const MSimdBox* box = ins->toSimdBox();
+ if (box->simdType() != simdType())
+ return false;
+ MOZ_ASSERT(box->templateObject() == templateObject());
+ if (box->initialHeap() != initialHeap())
+ return false;
+ return true;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ void printOpcode(GenericPrinter& out) const override;
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(templateObject_);
+ }
+};
+
+class MSimdUnbox
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ protected:
+ SimdType simdType_;
+
+ MSimdUnbox(MDefinition* op, SimdType simdType)
+ : MUnaryInstruction(op),
+ simdType_(simdType)
+ {
+ MIRType type = SimdTypeToMIRType(simdType);
+ MOZ_ASSERT(IsSimdType(type));
+ setGuard();
+ setMovable();
+ setResultType(type);
+ }
+
+ public:
+ INSTRUCTION_HEADER(SimdUnbox)
+ TRIVIAL_NEW_WRAPPERS
+ ALLOW_CLONE(MSimdUnbox)
+
+ SimdType simdType() const { return simdType_; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!congruentIfOperandsEqual(ins))
+ return false;
+ return ins->toSimdUnbox()->simdType() == simdType();
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ void printOpcode(GenericPrinter& out) const override;
+};
+
+// Creates a new derived type object. At runtime, this is just a call
+// to `BinaryBlock::createDerived()`. That is, the MIR itself does not
+// compile to particularly optimized code. However, using a distinct
+// MIR for creating derived type objects allows the compiler to
+// optimize ephemeral typed objects as would be created for a
+// reference like `a.b.c` -- here, the `a.b` will create an ephemeral
+// derived type object that aliases the memory of `a` itself. The
+// specific nature of `a.b` is revealed by using
+// `MNewDerivedTypedObject` rather than `MGetProperty` or what have
+// you. Moreover, the compiler knows that there are no side-effects,
+// so `MNewDerivedTypedObject` instructions can be reordered or pruned
+// as dead code.
+class MNewDerivedTypedObject
+ : public MTernaryInstruction,
+ public Mix3Policy<ObjectPolicy<0>,
+ ObjectPolicy<1>,
+ IntPolicy<2> >::Data
+{
+ private:
+ TypedObjectPrediction prediction_;
+
+ MNewDerivedTypedObject(TypedObjectPrediction prediction,
+ MDefinition* type,
+ MDefinition* owner,
+ MDefinition* offset)
+ : MTernaryInstruction(type, owner, offset),
+ prediction_(prediction)
+ {
+ setMovable();
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(NewDerivedTypedObject)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, type), (1, owner), (2, offset))
+
+ TypedObjectPrediction prediction() const {
+ return prediction_;
+ }
+
+ virtual AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+};
+
+// This vector is used when the recovered object is kept unboxed. We map the
+// offset of each property to the index of the corresponding operands in the
+// object state.
+struct OperandIndexMap : public TempObject
+{
+ // The number of properties is limited by scalar replacement. Thus we cannot
+ // have any large number of properties.
+ FixedList<uint8_t> map;
+
+ MOZ_MUST_USE bool init(TempAllocator& alloc, JSObject* templateObject);
+};
+
+// Represent the content of all slots of an object. This instruction is not
+// lowered and is not used to generate code.
+class MObjectState
+ : public MVariadicInstruction,
+ public NoFloatPolicyAfter<1>::Data
+{
+ private:
+ uint32_t numSlots_;
+ uint32_t numFixedSlots_; // valid if isUnboxed() == false.
+ OperandIndexMap* operandIndex_; // valid if isUnboxed() == true.
+
+ bool isUnboxed() const {
+ return operandIndex_ != nullptr;
+ }
+
+ MObjectState(JSObject *templateObject, OperandIndexMap* operandIndex);
+ explicit MObjectState(MObjectState* state);
+
+ MOZ_MUST_USE bool init(TempAllocator& alloc, MDefinition* obj);
+
+ void initSlot(uint32_t slot, MDefinition* def) {
+ initOperand(slot + 1, def);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ObjectState)
+ NAMED_OPERANDS((0, object))
+
+ // Return the template object of any object creation which can be recovered
+ // on bailout.
+ static JSObject* templateObjectOf(MDefinition* obj);
+
+ static MObjectState* New(TempAllocator& alloc, MDefinition* obj);
+ static MObjectState* Copy(TempAllocator& alloc, MObjectState* state);
+
+ // As we might do read of uninitialized properties, we have to copy the
+ // initial values from the template object.
+ MOZ_MUST_USE bool initFromTemplateObject(TempAllocator& alloc, MDefinition* undefinedVal);
+
+ size_t numFixedSlots() const {
+ MOZ_ASSERT(!isUnboxed());
+ return numFixedSlots_;
+ }
+ size_t numSlots() const {
+ return numSlots_;
+ }
+
+ MDefinition* getSlot(uint32_t slot) const {
+ return getOperand(slot + 1);
+ }
+ void setSlot(uint32_t slot, MDefinition* def) {
+ replaceOperand(slot + 1, def);
+ }
+
+ bool hasFixedSlot(uint32_t slot) const {
+ return slot < numSlots() && slot < numFixedSlots();
+ }
+ MDefinition* getFixedSlot(uint32_t slot) const {
+ MOZ_ASSERT(slot < numFixedSlots());
+ return getSlot(slot);
+ }
+ void setFixedSlot(uint32_t slot, MDefinition* def) {
+ MOZ_ASSERT(slot < numFixedSlots());
+ setSlot(slot, def);
+ }
+
+ bool hasDynamicSlot(uint32_t slot) const {
+ return numFixedSlots() < numSlots() && slot < numSlots() - numFixedSlots();
+ }
+ MDefinition* getDynamicSlot(uint32_t slot) const {
+ return getSlot(slot + numFixedSlots());
+ }
+ void setDynamicSlot(uint32_t slot, MDefinition* def) {
+ setSlot(slot + numFixedSlots(), def);
+ }
+
+ // Interface reserved for unboxed objects.
+ bool hasOffset(uint32_t offset) const {
+ MOZ_ASSERT(isUnboxed());
+ return offset < operandIndex_->map.length() && operandIndex_->map[offset] != 0;
+ }
+ MDefinition* getOffset(uint32_t offset) const {
+ return getOperand(operandIndex_->map[offset]);
+ }
+ void setOffset(uint32_t offset, MDefinition* def) {
+ replaceOperand(operandIndex_->map[offset], def);
+ }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+};
+
+// Represent the contents of all elements of an array. This instruction is not
+// lowered and is not used to generate code.
+class MArrayState
+ : public MVariadicInstruction,
+ public NoFloatPolicyAfter<2>::Data
+{
+ private:
+ uint32_t numElements_;
+
+ explicit MArrayState(MDefinition* arr);
+
+ MOZ_MUST_USE bool init(TempAllocator& alloc, MDefinition* obj, MDefinition* len);
+
+ void initElement(uint32_t index, MDefinition* def) {
+ initOperand(index + 2, def);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ArrayState)
+ NAMED_OPERANDS((0, array), (1, initializedLength))
+
+ static MArrayState* New(TempAllocator& alloc, MDefinition* arr, MDefinition* undefinedVal,
+ MDefinition* initLength);
+ static MArrayState* Copy(TempAllocator& alloc, MArrayState* state);
+
+ void setInitializedLength(MDefinition* def) {
+ replaceOperand(1, def);
+ }
+
+
+ size_t numElements() const {
+ return numElements_;
+ }
+
+ MDefinition* getElement(uint32_t index) const {
+ return getOperand(index + 2);
+ }
+ void setElement(uint32_t index, MDefinition* def) {
+ replaceOperand(index + 2, def);
+ }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+};
+
+// Setting __proto__ in an object literal.
+class MMutateProto
+ : public MAryInstruction<2>,
+ public MixPolicy<ObjectPolicy<0>, BoxPolicy<1> >::Data
+{
+ protected:
+ MMutateProto(MDefinition* obj, MDefinition* value)
+ {
+ initOperand(0, obj);
+ initOperand(1, value);
+ setResultType(MIRType::None);
+ }
+
+ public:
+ INSTRUCTION_HEADER(MutateProto)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, getObject), (1, getValue))
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+// Slow path for adding a property to an object without a known base.
+class MInitProp
+ : public MAryInstruction<2>,
+ public MixPolicy<ObjectPolicy<0>, BoxPolicy<1> >::Data
+{
+ CompilerPropertyName name_;
+
+ protected:
+ MInitProp(MDefinition* obj, PropertyName* name, MDefinition* value)
+ : name_(name)
+ {
+ initOperand(0, obj);
+ initOperand(1, value);
+ setResultType(MIRType::None);
+ }
+
+ public:
+ INSTRUCTION_HEADER(InitProp)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, getObject), (1, getValue))
+
+ PropertyName* propertyName() const {
+ return name_;
+ }
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(name_);
+ }
+};
+
+class MInitPropGetterSetter
+ : public MBinaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1> >::Data
+{
+ CompilerPropertyName name_;
+
+ MInitPropGetterSetter(MDefinition* obj, PropertyName* name, MDefinition* value)
+ : MBinaryInstruction(obj, value),
+ name_(name)
+ { }
+
+ public:
+ INSTRUCTION_HEADER(InitPropGetterSetter)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, value))
+
+ PropertyName* name() const {
+ return name_;
+ }
+
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(name_);
+ }
+};
+
+class MInitElem
+ : public MAryInstruction<3>,
+ public Mix3Policy<ObjectPolicy<0>, BoxPolicy<1>, BoxPolicy<2> >::Data
+{
+ MInitElem(MDefinition* obj, MDefinition* id, MDefinition* value)
+ {
+ initOperand(0, obj);
+ initOperand(1, id);
+ initOperand(2, value);
+ setResultType(MIRType::None);
+ }
+
+ public:
+ INSTRUCTION_HEADER(InitElem)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, getObject), (1, getId), (2, getValue))
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+class MInitElemGetterSetter
+ : public MTernaryInstruction,
+ public Mix3Policy<ObjectPolicy<0>, BoxPolicy<1>, ObjectPolicy<2> >::Data
+{
+ MInitElemGetterSetter(MDefinition* obj, MDefinition* id, MDefinition* value)
+ : MTernaryInstruction(obj, id, value)
+ { }
+
+ public:
+ INSTRUCTION_HEADER(InitElemGetterSetter)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, idValue), (2, value))
+
+};
+
+// WrappedFunction wraps a JSFunction so it can safely be used off-thread.
+// In particular, a function's flags can be modified on the main thread as
+// functions are relazified and delazified, so we must be careful not to access
+// these flags off-thread.
+class WrappedFunction : public TempObject
+{
+ CompilerFunction fun_;
+ uint16_t nargs_;
+ bool isNative_ : 1;
+ bool isConstructor_ : 1;
+ bool isClassConstructor_ : 1;
+ bool isSelfHostedBuiltin_ : 1;
+
+ public:
+ explicit WrappedFunction(JSFunction* fun);
+ size_t nargs() const { return nargs_; }
+ bool isNative() const { return isNative_; }
+ bool isConstructor() const { return isConstructor_; }
+ bool isClassConstructor() const { return isClassConstructor_; }
+ bool isSelfHostedBuiltin() const { return isSelfHostedBuiltin_; }
+
+ // fun->native() and fun->jitInfo() can safely be called off-thread: these
+ // fields never change.
+ JSNative native() const { return fun_->native(); }
+ const JSJitInfo* jitInfo() const { return fun_->jitInfo(); }
+
+ JSFunction* rawJSFunction() const { return fun_; }
+
+ bool appendRoots(MRootList& roots) const {
+ return roots.append(fun_);
+ }
+};
+
+class MCall
+ : public MVariadicInstruction,
+ public CallPolicy::Data
+{
+ private:
+ // An MCall uses the MPrepareCall, MDefinition for the function, and
+ // MPassArg instructions. They are stored in the same list.
+ static const size_t FunctionOperandIndex = 0;
+ static const size_t NumNonArgumentOperands = 1;
+
+ protected:
+ // Monomorphic cache of single target from TI, or nullptr.
+ WrappedFunction* target_;
+
+ // Original value of argc from the bytecode.
+ uint32_t numActualArgs_;
+
+ // True if the call is for JSOP_NEW.
+ bool construct_;
+
+ bool needsArgCheck_;
+
+ MCall(WrappedFunction* target, uint32_t numActualArgs, bool construct)
+ : target_(target),
+ numActualArgs_(numActualArgs),
+ construct_(construct),
+ needsArgCheck_(true)
+ {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Call)
+ static MCall* New(TempAllocator& alloc, JSFunction* target, size_t maxArgc, size_t numActualArgs,
+ bool construct, bool isDOMCall);
+
+ void initFunction(MDefinition* func) {
+ initOperand(FunctionOperandIndex, func);
+ }
+
+ bool needsArgCheck() const {
+ return needsArgCheck_;
+ }
+
+ void disableArgCheck() {
+ needsArgCheck_ = false;
+ }
+ MDefinition* getFunction() const {
+ return getOperand(FunctionOperandIndex);
+ }
+ void replaceFunction(MInstruction* newfunc) {
+ replaceOperand(FunctionOperandIndex, newfunc);
+ }
+
+ void addArg(size_t argnum, MDefinition* arg);
+
+ MDefinition* getArg(uint32_t index) const {
+ return getOperand(NumNonArgumentOperands + index);
+ }
+
+ static size_t IndexOfThis() {
+ return NumNonArgumentOperands;
+ }
+ static size_t IndexOfArgument(size_t index) {
+ return NumNonArgumentOperands + index + 1; // +1 to skip |this|.
+ }
+ static size_t IndexOfStackArg(size_t index) {
+ return NumNonArgumentOperands + index;
+ }
+
+ // For TI-informed monomorphic callsites.
+ WrappedFunction* getSingleTarget() const {
+ return target_;
+ }
+
+ bool isConstructing() const {
+ return construct_;
+ }
+
+ // The number of stack arguments is the max between the number of formal
+ // arguments and the number of actual arguments. The number of stack
+ // argument includes the |undefined| padding added in case of underflow.
+ // Includes |this|.
+ uint32_t numStackArgs() const {
+ return numOperands() - NumNonArgumentOperands;
+ }
+
+ // Does not include |this|.
+ uint32_t numActualArgs() const {
+ return numActualArgs_;
+ }
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+
+ virtual bool isCallDOMNative() const {
+ return false;
+ }
+
+ // A method that can be called to tell the MCall to figure out whether it's
+ // movable or not. This can't be done in the constructor, because it
+ // depends on the arguments to the call, and those aren't passed to the
+ // constructor but are set up later via addArg.
+ virtual void computeMovable() {
+ }
+
+ bool appendRoots(MRootList& roots) const override {
+ if (target_)
+ return target_->appendRoots(roots);
+ return true;
+ }
+};
+
+class MCallDOMNative : public MCall
+{
+ // A helper class for MCalls for DOM natives. Note that this is NOT
+ // actually a separate MIR op from MCall, because all sorts of places use
+ // isCall() to check for calls and all we really want is to overload a few
+ // virtual things from MCall.
+ protected:
+ MCallDOMNative(WrappedFunction* target, uint32_t numActualArgs)
+ : MCall(target, numActualArgs, false)
+ {
+ MOZ_ASSERT(getJitInfo()->type() != JSJitInfo::InlinableNative);
+
+ // If our jitinfo is not marked eliminatable, that means that our C++
+ // implementation is fallible or that it never wants to be eliminated or
+ // that we have no hope of ever doing the sort of argument analysis that
+ // would allow us to detemine that we're side-effect-free. In the
+ // latter case we wouldn't get DCEd no matter what, but for the former
+ // two cases we have to explicitly say that we can't be DCEd.
+ if (!getJitInfo()->isEliminatable)
+ setGuard();
+ }
+
+ friend MCall* MCall::New(TempAllocator& alloc, JSFunction* target, size_t maxArgc,
+ size_t numActualArgs, bool construct, bool isDOMCall);
+
+ const JSJitInfo* getJitInfo() const;
+ public:
+ virtual AliasSet getAliasSet() const override;
+
+ virtual bool congruentTo(const MDefinition* ins) const override;
+
+ virtual bool isCallDOMNative() const override {
+ return true;
+ }
+
+ virtual void computeMovable() override;
+};
+
+// arr.splice(start, deleteCount) with unused return value.
+class MArraySplice
+ : public MTernaryInstruction,
+ public Mix3Policy<ObjectPolicy<0>, IntPolicy<1>, IntPolicy<2> >::Data
+{
+ private:
+
+ MArraySplice(MDefinition* object, MDefinition* start, MDefinition* deleteCount)
+ : MTernaryInstruction(object, start, deleteCount)
+ { }
+
+ public:
+ INSTRUCTION_HEADER(ArraySplice)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, start), (2, deleteCount))
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+// fun.apply(self, arguments)
+class MApplyArgs
+ : public MAryInstruction<3>,
+ public Mix3Policy<ObjectPolicy<0>, IntPolicy<1>, BoxPolicy<2> >::Data
+{
+ protected:
+ // Monomorphic cache of single target from TI, or nullptr.
+ WrappedFunction* target_;
+
+ MApplyArgs(WrappedFunction* target, MDefinition* fun, MDefinition* argc, MDefinition* self)
+ : target_(target)
+ {
+ initOperand(0, fun);
+ initOperand(1, argc);
+ initOperand(2, self);
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ApplyArgs)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, getFunction), (1, getArgc), (2, getThis))
+
+ // For TI-informed monomorphic callsites.
+ WrappedFunction* getSingleTarget() const {
+ return target_;
+ }
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+
+ bool appendRoots(MRootList& roots) const override {
+ if (target_)
+ return target_->appendRoots(roots);
+ return true;
+ }
+};
+
+// fun.apply(fn, array)
+class MApplyArray
+ : public MAryInstruction<3>,
+ public Mix3Policy<ObjectPolicy<0>, ObjectPolicy<1>, BoxPolicy<2> >::Data
+{
+ protected:
+ // Monomorphic cache of single target from TI, or nullptr.
+ WrappedFunction* target_;
+
+ MApplyArray(WrappedFunction* target, MDefinition* fun, MDefinition* elements, MDefinition* self)
+ : target_(target)
+ {
+ initOperand(0, fun);
+ initOperand(1, elements);
+ initOperand(2, self);
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ApplyArray)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, getFunction), (1, getElements), (2, getThis))
+
+ // For TI-informed monomorphic callsites.
+ WrappedFunction* getSingleTarget() const {
+ return target_;
+ }
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+
+ bool appendRoots(MRootList& roots) const override {
+ if (target_)
+ return target_->appendRoots(roots);
+ return true;
+ }
+};
+
+class MBail : public MNullaryInstruction
+{
+ protected:
+ explicit MBail(BailoutKind kind)
+ : MNullaryInstruction()
+ {
+ bailoutKind_ = kind;
+ setGuard();
+ }
+
+ private:
+ BailoutKind bailoutKind_;
+
+ public:
+ INSTRUCTION_HEADER(Bail)
+
+ static MBail*
+ New(TempAllocator& alloc, BailoutKind kind) {
+ return new(alloc) MBail(kind);
+ }
+ static MBail*
+ New(TempAllocator& alloc) {
+ return new(alloc) MBail(Bailout_Inevitable);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ BailoutKind bailoutKind() const {
+ return bailoutKind_;
+ }
+};
+
+class MUnreachable
+ : public MAryControlInstruction<0, 0>,
+ public NoTypePolicy::Data
+{
+ public:
+ INSTRUCTION_HEADER(Unreachable)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+// This class serve as a way to force the encoding of a snapshot, even if there
+// is no resume point using it. This is useful to run MAssertRecoveredOnBailout
+// assertions.
+class MEncodeSnapshot : public MNullaryInstruction
+{
+ protected:
+ MEncodeSnapshot()
+ : MNullaryInstruction()
+ {
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(EncodeSnapshot)
+
+ static MEncodeSnapshot*
+ New(TempAllocator& alloc) {
+ return new(alloc) MEncodeSnapshot();
+ }
+};
+
+class MAssertRecoveredOnBailout
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ protected:
+ bool mustBeRecovered_;
+
+ MAssertRecoveredOnBailout(MDefinition* ins, bool mustBeRecovered)
+ : MUnaryInstruction(ins), mustBeRecovered_(mustBeRecovered)
+ {
+ setResultType(MIRType::Value);
+ setRecoveredOnBailout();
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(AssertRecoveredOnBailout)
+ TRIVIAL_NEW_WRAPPERS
+
+ // Needed to assert that float32 instructions are correctly recovered.
+ bool canConsumeFloat32(MUse* use) const override { return true; }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+};
+
+class MAssertFloat32
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ protected:
+ bool mustBeFloat32_;
+
+ MAssertFloat32(MDefinition* value, bool mustBeFloat32)
+ : MUnaryInstruction(value), mustBeFloat32_(mustBeFloat32)
+ {
+ }
+
+ public:
+ INSTRUCTION_HEADER(AssertFloat32)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool canConsumeFloat32(MUse* use) const override { return true; }
+
+ bool mustBeFloat32() const { return mustBeFloat32_; }
+};
+
+class MGetDynamicName
+ : public MAryInstruction<2>,
+ public MixPolicy<ObjectPolicy<0>, ConvertToStringPolicy<1> >::Data
+{
+ protected:
+ MGetDynamicName(MDefinition* envChain, MDefinition* name)
+ {
+ initOperand(0, envChain);
+ initOperand(1, name);
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GetDynamicName)
+ NAMED_OPERANDS((0, getEnvironmentChain), (1, getName))
+
+ static MGetDynamicName*
+ New(TempAllocator& alloc, MDefinition* envChain, MDefinition* name) {
+ return new(alloc) MGetDynamicName(envChain, name);
+ }
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+class MCallDirectEval
+ : public MAryInstruction<3>,
+ public Mix3Policy<ObjectPolicy<0>,
+ StringPolicy<1>,
+ BoxPolicy<2> >::Data
+{
+ protected:
+ MCallDirectEval(MDefinition* envChain, MDefinition* string,
+ MDefinition* newTargetValue, jsbytecode* pc)
+ : pc_(pc)
+ {
+ initOperand(0, envChain);
+ initOperand(1, string);
+ initOperand(2, newTargetValue);
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(CallDirectEval)
+ NAMED_OPERANDS((0, getEnvironmentChain), (1, getString), (2, getNewTargetValue))
+
+ static MCallDirectEval*
+ New(TempAllocator& alloc, MDefinition* envChain, MDefinition* string,
+ MDefinition* newTargetValue, jsbytecode* pc)
+ {
+ return new(alloc) MCallDirectEval(envChain, string, newTargetValue, pc);
+ }
+
+ jsbytecode* pc() const {
+ return pc_;
+ }
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+
+ private:
+ jsbytecode* pc_;
+};
+
+class MCompare
+ : public MBinaryInstruction,
+ public ComparePolicy::Data
+{
+ public:
+ enum CompareType {
+
+ // Anything compared to Undefined
+ Compare_Undefined,
+
+ // Anything compared to Null
+ Compare_Null,
+
+ // Undefined compared to Boolean
+ // Null compared to Boolean
+ // Double compared to Boolean
+ // String compared to Boolean
+ // Symbol compared to Boolean
+ // Object compared to Boolean
+ // Value compared to Boolean
+ Compare_Boolean,
+
+ // Int32 compared to Int32
+ // Boolean compared to Boolean
+ Compare_Int32,
+ Compare_Int32MaybeCoerceBoth,
+ Compare_Int32MaybeCoerceLHS,
+ Compare_Int32MaybeCoerceRHS,
+
+ // Int32 compared as unsigneds
+ Compare_UInt32,
+
+ // Int64 compared to Int64.
+ Compare_Int64,
+
+ // Int64 compared as unsigneds.
+ Compare_UInt64,
+
+ // Double compared to Double
+ Compare_Double,
+
+ Compare_DoubleMaybeCoerceLHS,
+ Compare_DoubleMaybeCoerceRHS,
+
+ // Float compared to Float
+ Compare_Float32,
+
+ // String compared to String
+ Compare_String,
+
+ // Undefined compared to String
+ // Null compared to String
+ // Boolean compared to String
+ // Int32 compared to String
+ // Double compared to String
+ // Object compared to String
+ // Value compared to String
+ Compare_StrictString,
+
+ // Object compared to Object
+ Compare_Object,
+
+ // Compare 2 values bitwise
+ Compare_Bitwise,
+
+ // All other possible compares
+ Compare_Unknown
+ };
+
+ private:
+ CompareType compareType_;
+ JSOp jsop_;
+ bool operandMightEmulateUndefined_;
+ bool operandsAreNeverNaN_;
+
+ // When a floating-point comparison is converted to an integer comparison
+ // (when range analysis proves it safe), we need to convert the operands
+ // to integer as well.
+ bool truncateOperands_;
+
+ MCompare(MDefinition* left, MDefinition* right, JSOp jsop)
+ : MBinaryInstruction(left, right),
+ compareType_(Compare_Unknown),
+ jsop_(jsop),
+ operandMightEmulateUndefined_(true),
+ operandsAreNeverNaN_(false),
+ truncateOperands_(false)
+ {
+ setResultType(MIRType::Boolean);
+ setMovable();
+ }
+
+ MCompare(MDefinition* left, MDefinition* right, JSOp jsop, CompareType compareType)
+ : MCompare(left, right, jsop)
+ {
+ MOZ_ASSERT(compareType == Compare_Int32 || compareType == Compare_UInt32 ||
+ compareType == Compare_Int64 || compareType == Compare_UInt64 ||
+ compareType == Compare_Double || compareType == Compare_Float32);
+ compareType_ = compareType;
+ operandMightEmulateUndefined_ = false;
+ setResultType(MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Compare)
+ TRIVIAL_NEW_WRAPPERS
+
+ MOZ_MUST_USE bool tryFold(bool* result);
+ MOZ_MUST_USE bool evaluateConstantOperands(TempAllocator& alloc, bool* result);
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ void filtersUndefinedOrNull(bool trueBranch, MDefinition** subject, bool* filtersUndefined,
+ bool* filtersNull);
+
+ CompareType compareType() const {
+ return compareType_;
+ }
+ bool isInt32Comparison() const {
+ return compareType() == Compare_Int32 ||
+ compareType() == Compare_Int32MaybeCoerceBoth ||
+ compareType() == Compare_Int32MaybeCoerceLHS ||
+ compareType() == Compare_Int32MaybeCoerceRHS;
+ }
+ bool isDoubleComparison() const {
+ return compareType() == Compare_Double ||
+ compareType() == Compare_DoubleMaybeCoerceLHS ||
+ compareType() == Compare_DoubleMaybeCoerceRHS;
+ }
+ bool isFloat32Comparison() const {
+ return compareType() == Compare_Float32;
+ }
+ bool isNumericComparison() const {
+ return isInt32Comparison() ||
+ isDoubleComparison() ||
+ isFloat32Comparison();
+ }
+ void setCompareType(CompareType type) {
+ compareType_ = type;
+ }
+ MIRType inputType();
+
+ JSOp jsop() const {
+ return jsop_;
+ }
+ void markNoOperandEmulatesUndefined() {
+ operandMightEmulateUndefined_ = false;
+ }
+ bool operandMightEmulateUndefined() const {
+ return operandMightEmulateUndefined_;
+ }
+ bool operandsAreNeverNaN() const {
+ return operandsAreNeverNaN_;
+ }
+ AliasSet getAliasSet() const override {
+ // Strict equality is never effectful.
+ if (jsop_ == JSOP_STRICTEQ || jsop_ == JSOP_STRICTNE)
+ return AliasSet::None();
+ if (compareType_ == Compare_Unknown)
+ return AliasSet::Store(AliasSet::Any);
+ MOZ_ASSERT(compareType_ <= Compare_Bitwise);
+ return AliasSet::None();
+ }
+
+ void printOpcode(GenericPrinter& out) const override;
+ void collectRangeInfoPreTrunc() override;
+
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+ bool isFloat32Commutative() const override { return true; }
+ bool needTruncation(TruncateKind kind) override;
+ void truncate() override;
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+ static CompareType determineCompareType(JSOp op, MDefinition* left, MDefinition* right);
+ void cacheOperandMightEmulateUndefined(CompilerConstraintList* constraints);
+
+# ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override {
+ // Both sides of the compare can be Float32
+ return compareType_ == Compare_Float32;
+ }
+# endif
+
+ ALLOW_CLONE(MCompare)
+
+ protected:
+ MOZ_MUST_USE bool tryFoldEqualOperands(bool* result);
+ MOZ_MUST_USE bool tryFoldTypeOf(bool* result);
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!binaryCongruentTo(ins))
+ return false;
+ return compareType() == ins->toCompare()->compareType() &&
+ jsop() == ins->toCompare()->jsop();
+ }
+};
+
+// Takes a typed value and returns an untyped value.
+class MBox
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ MBox(TempAllocator& alloc, MDefinition* ins)
+ : MUnaryInstruction(ins)
+ {
+ setResultType(MIRType::Value);
+ if (ins->resultTypeSet()) {
+ setResultTypeSet(ins->resultTypeSet());
+ } else if (ins->type() != MIRType::Value) {
+ TypeSet::Type ntype = ins->type() == MIRType::Object
+ ? TypeSet::AnyObjectType()
+ : TypeSet::PrimitiveType(ValueTypeFromMIRType(ins->type()));
+ setResultTypeSet(alloc.lifoAlloc()->new_<TemporaryTypeSet>(alloc.lifoAlloc(), ntype));
+ }
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Box)
+ static MBox* New(TempAllocator& alloc, MDefinition* ins)
+ {
+ // Cannot box a box.
+ MOZ_ASSERT(ins->type() != MIRType::Value);
+
+ return new(alloc) MBox(alloc, ins);
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ ALLOW_CLONE(MBox)
+};
+
+// Note: the op may have been inverted during lowering (to put constants in a
+// position where they can be immediates), so it is important to use the
+// lir->jsop() instead of the mir->jsop() when it is present.
+static inline Assembler::Condition
+JSOpToCondition(MCompare::CompareType compareType, JSOp op)
+{
+ bool isSigned = (compareType != MCompare::Compare_UInt32);
+ return JSOpToCondition(op, isSigned);
+}
+
+// Takes a typed value and checks if it is a certain type. If so, the payload
+// is unpacked and returned as that type. Otherwise, it is considered a
+// deoptimization.
+class MUnbox final : public MUnaryInstruction, public BoxInputsPolicy::Data
+{
+ public:
+ enum Mode {
+ Fallible, // Check the type, and deoptimize if unexpected.
+ Infallible, // Type guard is not necessary.
+ TypeBarrier // Guard on the type, and act like a TypeBarrier on failure.
+ };
+
+ private:
+ Mode mode_;
+ BailoutKind bailoutKind_;
+
+ MUnbox(MDefinition* ins, MIRType type, Mode mode, BailoutKind kind, TempAllocator& alloc)
+ : MUnaryInstruction(ins),
+ mode_(mode)
+ {
+ // Only allow unboxing a non MIRType::Value when input and output types
+ // don't match. This is often used to force a bailout. Boxing happens
+ // during type analysis.
+ MOZ_ASSERT_IF(ins->type() != MIRType::Value, type != ins->type());
+
+ MOZ_ASSERT(type == MIRType::Boolean ||
+ type == MIRType::Int32 ||
+ type == MIRType::Double ||
+ type == MIRType::String ||
+ type == MIRType::Symbol ||
+ type == MIRType::Object);
+
+ TemporaryTypeSet* resultSet = ins->resultTypeSet();
+ if (resultSet && type == MIRType::Object)
+ resultSet = resultSet->cloneObjectsOnly(alloc.lifoAlloc());
+
+ setResultType(type);
+ setResultTypeSet(resultSet);
+ setMovable();
+
+ if (mode_ == TypeBarrier || mode_ == Fallible)
+ setGuard();
+
+ bailoutKind_ = kind;
+ }
+ public:
+ INSTRUCTION_HEADER(Unbox)
+ static MUnbox* New(TempAllocator& alloc, MDefinition* ins, MIRType type, Mode mode)
+ {
+ // Unless we were given a specific BailoutKind, pick a default based on
+ // the type we expect.
+ BailoutKind kind;
+ switch (type) {
+ case MIRType::Boolean:
+ kind = Bailout_NonBooleanInput;
+ break;
+ case MIRType::Int32:
+ kind = Bailout_NonInt32Input;
+ break;
+ case MIRType::Double:
+ kind = Bailout_NonNumericInput; // Int32s are fine too
+ break;
+ case MIRType::String:
+ kind = Bailout_NonStringInput;
+ break;
+ case MIRType::Symbol:
+ kind = Bailout_NonSymbolInput;
+ break;
+ case MIRType::Object:
+ kind = Bailout_NonObjectInput;
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+
+ return new(alloc) MUnbox(ins, type, mode, kind, alloc);
+ }
+
+ static MUnbox* New(TempAllocator& alloc, MDefinition* ins, MIRType type, Mode mode,
+ BailoutKind kind)
+ {
+ return new(alloc) MUnbox(ins, type, mode, kind, alloc);
+ }
+
+ Mode mode() const {
+ return mode_;
+ }
+ BailoutKind bailoutKind() const {
+ // If infallible, no bailout should be generated.
+ MOZ_ASSERT(fallible());
+ return bailoutKind_;
+ }
+ bool fallible() const {
+ return mode() != Infallible;
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isUnbox() || ins->toUnbox()->mode() != mode())
+ return false;
+ return congruentIfOperandsEqual(ins);
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ void printOpcode(GenericPrinter& out) const override;
+ void makeInfallible() {
+ // Should only be called if we're already Infallible or TypeBarrier
+ MOZ_ASSERT(mode() != Fallible);
+ mode_ = Infallible;
+ }
+
+ ALLOW_CLONE(MUnbox)
+};
+
+class MGuardObject
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ explicit MGuardObject(MDefinition* ins)
+ : MUnaryInstruction(ins)
+ {
+ setGuard();
+ setMovable();
+ setResultType(MIRType::Object);
+ setResultTypeSet(ins->resultTypeSet());
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardObject)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+class MGuardString
+ : public MUnaryInstruction,
+ public StringPolicy<0>::Data
+{
+ explicit MGuardString(MDefinition* ins)
+ : MUnaryInstruction(ins)
+ {
+ setGuard();
+ setMovable();
+ setResultType(MIRType::String);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardString)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+class MPolyInlineGuard
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ explicit MPolyInlineGuard(MDefinition* ins)
+ : MUnaryInstruction(ins)
+ {
+ setGuard();
+ setResultType(MIRType::Object);
+ setResultTypeSet(ins->resultTypeSet());
+ }
+
+ public:
+ INSTRUCTION_HEADER(PolyInlineGuard)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+class MAssertRange
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ // This is the range checked by the assertion. Don't confuse this with the
+ // range_ member or the range() accessor. Since MAssertRange doesn't return
+ // a value, it doesn't use those.
+ const Range* assertedRange_;
+
+ MAssertRange(MDefinition* ins, const Range* assertedRange)
+ : MUnaryInstruction(ins), assertedRange_(assertedRange)
+ {
+ setGuard();
+ setResultType(MIRType::None);
+ }
+
+ public:
+ INSTRUCTION_HEADER(AssertRange)
+ TRIVIAL_NEW_WRAPPERS
+
+ const Range* assertedRange() const {
+ return assertedRange_;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ void printOpcode(GenericPrinter& out) const override;
+};
+
+// Caller-side allocation of |this| for |new|:
+// Given a templateobject, construct |this| for JSOP_NEW
+class MCreateThisWithTemplate
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ gc::InitialHeap initialHeap_;
+
+ MCreateThisWithTemplate(CompilerConstraintList* constraints, MConstant* templateConst,
+ gc::InitialHeap initialHeap)
+ : MUnaryInstruction(templateConst),
+ initialHeap_(initialHeap)
+ {
+ setResultType(MIRType::Object);
+ setResultTypeSet(MakeSingletonTypeSet(constraints, templateObject()));
+ }
+
+ public:
+ INSTRUCTION_HEADER(CreateThisWithTemplate)
+ TRIVIAL_NEW_WRAPPERS
+
+ // Template for |this|, provided by TI.
+ JSObject* templateObject() const {
+ return &getOperand(0)->toConstant()->toObject();
+ }
+
+ gc::InitialHeap initialHeap() const {
+ return initialHeap_;
+ }
+
+ // Although creation of |this| modifies global state, it is safely repeatable.
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override;
+};
+
+// Caller-side allocation of |this| for |new|:
+// Given a prototype operand, construct |this| for JSOP_NEW.
+class MCreateThisWithProto
+ : public MTernaryInstruction,
+ public Mix3Policy<ObjectPolicy<0>, ObjectPolicy<1>, ObjectPolicy<2> >::Data
+{
+ MCreateThisWithProto(MDefinition* callee, MDefinition* newTarget, MDefinition* prototype)
+ : MTernaryInstruction(callee, newTarget, prototype)
+ {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(CreateThisWithProto)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, getCallee), (1, getNewTarget), (2, getPrototype))
+
+ // Although creation of |this| modifies global state, it is safely repeatable.
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+// Caller-side allocation of |this| for |new|:
+// Constructs |this| when possible, else MagicValue(JS_IS_CONSTRUCTING).
+class MCreateThis
+ : public MBinaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1> >::Data
+{
+ explicit MCreateThis(MDefinition* callee, MDefinition* newTarget)
+ : MBinaryInstruction(callee, newTarget)
+ {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(CreateThis)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, getCallee), (1, getNewTarget))
+
+ // Although creation of |this| modifies global state, it is safely repeatable.
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+// Eager initialization of arguments object.
+class MCreateArgumentsObject
+ : public MUnaryInstruction,
+ public ObjectPolicy<0>::Data
+{
+ CompilerGCPointer<ArgumentsObject*> templateObj_;
+
+ MCreateArgumentsObject(MDefinition* callObj, ArgumentsObject* templateObj)
+ : MUnaryInstruction(callObj),
+ templateObj_(templateObj)
+ {
+ setResultType(MIRType::Object);
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(CreateArgumentsObject)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, getCallObject))
+
+ ArgumentsObject* templateObject() const {
+ return templateObj_;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(templateObj_);
+ }
+};
+
+class MGetArgumentsObjectArg
+ : public MUnaryInstruction,
+ public ObjectPolicy<0>::Data
+{
+ size_t argno_;
+
+ MGetArgumentsObjectArg(MDefinition* argsObject, size_t argno)
+ : MUnaryInstruction(argsObject),
+ argno_(argno)
+ {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GetArgumentsObjectArg)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, getArgsObject))
+
+ size_t argno() const {
+ return argno_;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::Any);
+ }
+};
+
+class MSetArgumentsObjectArg
+ : public MBinaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, BoxPolicy<1> >::Data
+{
+ size_t argno_;
+
+ MSetArgumentsObjectArg(MDefinition* argsObj, size_t argno, MDefinition* value)
+ : MBinaryInstruction(argsObj, value),
+ argno_(argno)
+ {
+ }
+
+ public:
+ INSTRUCTION_HEADER(SetArgumentsObjectArg)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, getArgsObject), (1, getValue))
+
+ size_t argno() const {
+ return argno_;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::Any);
+ }
+};
+
+class MRunOncePrologue
+ : public MNullaryInstruction
+{
+ protected:
+ MRunOncePrologue()
+ {
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(RunOncePrologue)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+// Given a MIRType::Value A and a MIRType::Object B:
+// If the Value may be safely unboxed to an Object, return Object(A).
+// Otherwise, return B.
+// Used to implement return behavior for inlined constructors.
+class MReturnFromCtor
+ : public MAryInstruction<2>,
+ public MixPolicy<BoxPolicy<0>, ObjectPolicy<1> >::Data
+{
+ MReturnFromCtor(MDefinition* value, MDefinition* object) {
+ initOperand(0, value);
+ initOperand(1, object);
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ReturnFromCtor)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, getValue), (1, getObject))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+class MToFPInstruction
+ : public MUnaryInstruction,
+ public ToDoublePolicy::Data
+{
+ public:
+ // Types of values which can be converted.
+ enum ConversionKind {
+ NonStringPrimitives,
+ NonNullNonStringPrimitives,
+ NumbersOnly
+ };
+
+ private:
+ ConversionKind conversion_;
+
+ protected:
+ explicit MToFPInstruction(MDefinition* def, ConversionKind conversion = NonStringPrimitives)
+ : MUnaryInstruction(def), conversion_(conversion)
+ { }
+
+ public:
+ ConversionKind conversion() const {
+ return conversion_;
+ }
+};
+
+// Converts a primitive (either typed or untyped) to a double. If the input is
+// not primitive at runtime, a bailout occurs.
+class MToDouble
+ : public MToFPInstruction
+{
+ private:
+ TruncateKind implicitTruncate_;
+
+ explicit MToDouble(MDefinition* def, ConversionKind conversion = NonStringPrimitives)
+ : MToFPInstruction(def, conversion), implicitTruncate_(NoTruncate)
+ {
+ setResultType(MIRType::Double);
+ setMovable();
+
+ // An object might have "valueOf", which means it is effectful.
+ // ToNumber(symbol) throws.
+ if (def->mightBeType(MIRType::Object) || def->mightBeType(MIRType::Symbol))
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(ToDouble)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isToDouble() || ins->toToDouble()->conversion() != conversion())
+ return false;
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ void computeRange(TempAllocator& alloc) override;
+ bool needTruncation(TruncateKind kind) override;
+ void truncate() override;
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override { return true; }
+#endif
+
+ TruncateKind truncateKind() const {
+ return implicitTruncate_;
+ }
+ void setTruncateKind(TruncateKind kind) {
+ implicitTruncate_ = Max(implicitTruncate_, kind);
+ }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ if (input()->type() == MIRType::Value)
+ return false;
+ if (input()->type() == MIRType::Symbol)
+ return false;
+
+ return true;
+ }
+
+ ALLOW_CLONE(MToDouble)
+};
+
+// Converts a primitive (either typed or untyped) to a float32. If the input is
+// not primitive at runtime, a bailout occurs.
+class MToFloat32
+ : public MToFPInstruction
+{
+ protected:
+ bool mustPreserveNaN_;
+
+ explicit MToFloat32(MDefinition* def, ConversionKind conversion = NonStringPrimitives)
+ : MToFPInstruction(def, conversion),
+ mustPreserveNaN_(false)
+ {
+ setResultType(MIRType::Float32);
+ setMovable();
+
+ // An object might have "valueOf", which means it is effectful.
+ // ToNumber(symbol) throws.
+ if (def->mightBeType(MIRType::Object) || def->mightBeType(MIRType::Symbol))
+ setGuard();
+ }
+
+ explicit MToFloat32(MDefinition* def, bool mustPreserveNaN)
+ : MToFloat32(def)
+ {
+ mustPreserveNaN_ = mustPreserveNaN;
+ }
+
+ public:
+ INSTRUCTION_HEADER(ToFloat32)
+ TRIVIAL_NEW_WRAPPERS
+
+ virtual MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!congruentIfOperandsEqual(ins))
+ return false;
+ auto* other = ins->toToFloat32();
+ return other->conversion() == conversion() &&
+ other->mustPreserveNaN_ == mustPreserveNaN_;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ void computeRange(TempAllocator& alloc) override;
+
+ bool canConsumeFloat32(MUse* use) const override { return true; }
+ bool canProduceFloat32() const override { return true; }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ ALLOW_CLONE(MToFloat32)
+};
+
+// Converts a uint32 to a double (coming from wasm).
+class MWasmUnsignedToDouble
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ explicit MWasmUnsignedToDouble(MDefinition* def)
+ : MUnaryInstruction(def)
+ {
+ setResultType(MIRType::Double);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmUnsignedToDouble)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+// Converts a uint32 to a float32 (coming from wasm).
+class MWasmUnsignedToFloat32
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ explicit MWasmUnsignedToFloat32(MDefinition* def)
+ : MUnaryInstruction(def)
+ {
+ setResultType(MIRType::Float32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmUnsignedToFloat32)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool canProduceFloat32() const override { return true; }
+};
+
+class MWrapInt64ToInt32
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ bool bottomHalf_;
+
+ explicit MWrapInt64ToInt32(MDefinition* def, bool bottomHalf = true)
+ : MUnaryInstruction(def),
+ bottomHalf_(bottomHalf)
+ {
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WrapInt64ToInt32)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isWrapInt64ToInt32())
+ return false;
+ if (ins->toWrapInt64ToInt32()->bottomHalf() != bottomHalf())
+ return false;
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool bottomHalf() const {
+ return bottomHalf_;
+ }
+};
+
+class MExtendInt32ToInt64
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ bool isUnsigned_;
+
+ MExtendInt32ToInt64(MDefinition* def, bool isUnsigned)
+ : MUnaryInstruction(def),
+ isUnsigned_(isUnsigned)
+ {
+ setResultType(MIRType::Int64);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(ExtendInt32ToInt64)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool isUnsigned() const { return isUnsigned_; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isExtendInt32ToInt64())
+ return false;
+ if (ins->toExtendInt32ToInt64()->isUnsigned_ != isUnsigned_)
+ return false;
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+class MWasmTruncateToInt64
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ bool isUnsigned_;
+ wasm::TrapOffset trapOffset_;
+
+ MWasmTruncateToInt64(MDefinition* def, bool isUnsigned, wasm::TrapOffset trapOffset)
+ : MUnaryInstruction(def),
+ isUnsigned_(isUnsigned),
+ trapOffset_(trapOffset)
+ {
+ setResultType(MIRType::Int64);
+ setGuard(); // neither removable nor movable because of possible side-effects.
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmTruncateToInt64)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool isUnsigned() const { return isUnsigned_; }
+ wasm::TrapOffset trapOffset() const { return trapOffset_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins) &&
+ ins->toWasmTruncateToInt64()->isUnsigned() == isUnsigned_;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+// Truncate a value to an int32, with wasm semantics: this will trap when the
+// value is out of range.
+class MWasmTruncateToInt32
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ bool isUnsigned_;
+ wasm::TrapOffset trapOffset_;
+
+ explicit MWasmTruncateToInt32(MDefinition* def, bool isUnsigned, wasm::TrapOffset trapOffset)
+ : MUnaryInstruction(def), isUnsigned_(isUnsigned), trapOffset_(trapOffset)
+ {
+ setResultType(MIRType::Int32);
+ setGuard(); // neither removable nor movable because of possible side-effects.
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmTruncateToInt32)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool isUnsigned() const {
+ return isUnsigned_;
+ }
+ wasm::TrapOffset trapOffset() const {
+ return trapOffset_;
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins) &&
+ ins->toWasmTruncateToInt32()->isUnsigned() == isUnsigned_;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+class MInt64ToFloatingPoint
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ bool isUnsigned_;
+
+ MInt64ToFloatingPoint(MDefinition* def, MIRType type, bool isUnsigned)
+ : MUnaryInstruction(def),
+ isUnsigned_(isUnsigned)
+ {
+ MOZ_ASSERT(IsFloatingPointType(type));
+ setResultType(type);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Int64ToFloatingPoint)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool isUnsigned() const { return isUnsigned_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isInt64ToFloatingPoint())
+ return false;
+ if (ins->toInt64ToFloatingPoint()->isUnsigned_ != isUnsigned_)
+ return false;
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+// Converts a primitive (either typed or untyped) to an int32. If the input is
+// not primitive at runtime, a bailout occurs. If the input cannot be converted
+// to an int32 without loss (i.e. "5.5" or undefined) then a bailout occurs.
+class MToInt32
+ : public MUnaryInstruction,
+ public ToInt32Policy::Data
+{
+ bool canBeNegativeZero_;
+ MacroAssembler::IntConversionInputKind conversion_;
+
+ explicit MToInt32(MDefinition* def, MacroAssembler::IntConversionInputKind conversion =
+ MacroAssembler::IntConversion_Any)
+ : MUnaryInstruction(def),
+ canBeNegativeZero_(true),
+ conversion_(conversion)
+ {
+ setResultType(MIRType::Int32);
+ setMovable();
+
+ // An object might have "valueOf", which means it is effectful.
+ // ToNumber(symbol) throws.
+ if (def->mightBeType(MIRType::Object) || def->mightBeType(MIRType::Symbol))
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(ToInt32)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ // this only has backwards information flow.
+ void analyzeEdgeCasesBackward() override;
+
+ bool canBeNegativeZero() const {
+ return canBeNegativeZero_;
+ }
+ void setCanBeNegativeZero(bool negativeZero) {
+ canBeNegativeZero_ = negativeZero;
+ }
+
+ MacroAssembler::IntConversionInputKind conversion() const {
+ return conversion_;
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isToInt32() || ins->toToInt32()->conversion() != conversion())
+ return false;
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ void computeRange(TempAllocator& alloc) override;
+ void collectRangeInfoPreTrunc() override;
+
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override { return true; }
+#endif
+
+ ALLOW_CLONE(MToInt32)
+};
+
+// Converts a value or typed input to a truncated int32, for use with bitwise
+// operations. This is an infallible ValueToECMAInt32.
+class MTruncateToInt32
+ : public MUnaryInstruction,
+ public ToInt32Policy::Data
+{
+ explicit MTruncateToInt32(MDefinition* def)
+ : MUnaryInstruction(def)
+ {
+ setResultType(MIRType::Int32);
+ setMovable();
+
+ // An object might have "valueOf", which means it is effectful.
+ // ToInt32(symbol) throws.
+ if (def->mightBeType(MIRType::Object) || def->mightBeType(MIRType::Symbol))
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(TruncateToInt32)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ void computeRange(TempAllocator& alloc) override;
+ TruncateKind operandTruncateKind(size_t index) const override;
+# ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override {
+ return true;
+ }
+#endif
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return input()->type() < MIRType::Symbol;
+ }
+
+ ALLOW_CLONE(MTruncateToInt32)
+};
+
+// Converts any type to a string
+class MToString :
+ public MUnaryInstruction,
+ public ToStringPolicy::Data
+{
+ explicit MToString(MDefinition* def)
+ : MUnaryInstruction(def)
+ {
+ setResultType(MIRType::String);
+ setMovable();
+
+ // Objects might override toString and Symbols throw.
+ if (def->mightBeType(MIRType::Object) || def->mightBeType(MIRType::Symbol))
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(ToString)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool fallible() const {
+ return input()->mightBeType(MIRType::Object);
+ }
+
+ ALLOW_CLONE(MToString)
+};
+
+// Converts any type to an object or null value, throwing on undefined.
+class MToObjectOrNull :
+ public MUnaryInstruction,
+ public BoxInputsPolicy::Data
+{
+ explicit MToObjectOrNull(MDefinition* def)
+ : MUnaryInstruction(def)
+ {
+ setResultType(MIRType::ObjectOrNull);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(ToObjectOrNull)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ ALLOW_CLONE(MToObjectOrNull)
+};
+
+class MBitNot
+ : public MUnaryInstruction,
+ public BitwisePolicy::Data
+{
+ protected:
+ explicit MBitNot(MDefinition* input)
+ : MUnaryInstruction(input)
+ {
+ specialization_ = MIRType::None;
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(BitNot)
+ TRIVIAL_NEW_WRAPPERS
+
+ static MBitNot* NewInt32(TempAllocator& alloc, MDefinition* input);
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ void setSpecialization(MIRType type) {
+ specialization_ = type;
+ setResultType(type);
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ if (specialization_ == MIRType::None)
+ return AliasSet::Store(AliasSet::Any);
+ return AliasSet::None();
+ }
+ void computeRange(TempAllocator& alloc) override;
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return specialization_ != MIRType::None;
+ }
+
+ ALLOW_CLONE(MBitNot)
+};
+
+class MTypeOf
+ : public MUnaryInstruction,
+ public BoxInputsPolicy::Data
+{
+ MIRType inputType_;
+ bool inputMaybeCallableOrEmulatesUndefined_;
+
+ MTypeOf(MDefinition* def, MIRType inputType)
+ : MUnaryInstruction(def), inputType_(inputType),
+ inputMaybeCallableOrEmulatesUndefined_(true)
+ {
+ setResultType(MIRType::String);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(TypeOf)
+ TRIVIAL_NEW_WRAPPERS
+
+ MIRType inputType() const {
+ return inputType_;
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ void cacheInputMaybeCallableOrEmulatesUndefined(CompilerConstraintList* constraints);
+
+ bool inputMaybeCallableOrEmulatesUndefined() const {
+ return inputMaybeCallableOrEmulatesUndefined_;
+ }
+ void markInputNotCallableOrEmulatesUndefined() {
+ inputMaybeCallableOrEmulatesUndefined_ = false;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isTypeOf())
+ return false;
+ if (inputType() != ins->toTypeOf()->inputType())
+ return false;
+ if (inputMaybeCallableOrEmulatesUndefined() !=
+ ins->toTypeOf()->inputMaybeCallableOrEmulatesUndefined())
+ {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+};
+
+class MToAsync
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ explicit MToAsync(MDefinition* unwrapped)
+ : MUnaryInstruction(unwrapped)
+ {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ToAsync)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+class MToId
+ : public MUnaryInstruction,
+ public BoxInputsPolicy::Data
+{
+ explicit MToId(MDefinition* index)
+ : MUnaryInstruction(index)
+ {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ToId)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+class MBinaryBitwiseInstruction
+ : public MBinaryInstruction,
+ public BitwisePolicy::Data
+{
+ protected:
+ MBinaryBitwiseInstruction(MDefinition* left, MDefinition* right, MIRType type)
+ : MBinaryInstruction(left, right), maskMatchesLeftRange(false),
+ maskMatchesRightRange(false)
+ {
+ MOZ_ASSERT(type == MIRType::Int32 || type == MIRType::Int64);
+ setResultType(type);
+ setMovable();
+ }
+
+ void specializeAs(MIRType type);
+ bool maskMatchesLeftRange;
+ bool maskMatchesRightRange;
+
+ public:
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ MDefinition* foldUnnecessaryBitop();
+ virtual MDefinition* foldIfZero(size_t operand) = 0;
+ virtual MDefinition* foldIfNegOne(size_t operand) = 0;
+ virtual MDefinition* foldIfEqual() = 0;
+ virtual MDefinition* foldIfAllBitsSet(size_t operand) = 0;
+ virtual void infer(BaselineInspector* inspector, jsbytecode* pc);
+ void collectRangeInfoPreTrunc() override;
+
+ void setInt32Specialization() {
+ specialization_ = MIRType::Int32;
+ setResultType(MIRType::Int32);
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return binaryCongruentTo(ins);
+ }
+ AliasSet getAliasSet() const override {
+ if (specialization_ >= MIRType::Object)
+ return AliasSet::Store(AliasSet::Any);
+ return AliasSet::None();
+ }
+
+ TruncateKind operandTruncateKind(size_t index) const override;
+};
+
+class MBitAnd : public MBinaryBitwiseInstruction
+{
+ MBitAnd(MDefinition* left, MDefinition* right, MIRType type)
+ : MBinaryBitwiseInstruction(left, right, type)
+ { }
+
+ public:
+ INSTRUCTION_HEADER(BitAnd)
+ static MBitAnd* New(TempAllocator& alloc, MDefinition* left, MDefinition* right);
+ static MBitAnd* New(TempAllocator& alloc, MDefinition* left, MDefinition* right, MIRType type);
+
+ MDefinition* foldIfZero(size_t operand) override {
+ return getOperand(operand); // 0 & x => 0;
+ }
+ MDefinition* foldIfNegOne(size_t operand) override {
+ return getOperand(1 - operand); // x & -1 => x
+ }
+ MDefinition* foldIfEqual() override {
+ return getOperand(0); // x & x => x;
+ }
+ MDefinition* foldIfAllBitsSet(size_t operand) override {
+ // e.g. for uint16: x & 0xffff => x;
+ return getOperand(1 - operand);
+ }
+ void computeRange(TempAllocator& alloc) override;
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return specialization_ != MIRType::None;
+ }
+
+ ALLOW_CLONE(MBitAnd)
+};
+
+class MBitOr : public MBinaryBitwiseInstruction
+{
+ MBitOr(MDefinition* left, MDefinition* right, MIRType type)
+ : MBinaryBitwiseInstruction(left, right, type)
+ { }
+
+ public:
+ INSTRUCTION_HEADER(BitOr)
+ static MBitOr* New(TempAllocator& alloc, MDefinition* left, MDefinition* right);
+ static MBitOr* New(TempAllocator& alloc, MDefinition* left, MDefinition* right, MIRType type);
+
+ MDefinition* foldIfZero(size_t operand) override {
+ return getOperand(1 - operand); // 0 | x => x, so if ith is 0, return (1-i)th
+ }
+ MDefinition* foldIfNegOne(size_t operand) override {
+ return getOperand(operand); // x | -1 => -1
+ }
+ MDefinition* foldIfEqual() override {
+ return getOperand(0); // x | x => x
+ }
+ MDefinition* foldIfAllBitsSet(size_t operand) override {
+ return this;
+ }
+ void computeRange(TempAllocator& alloc) override;
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return specialization_ != MIRType::None;
+ }
+
+ ALLOW_CLONE(MBitOr)
+};
+
+class MBitXor : public MBinaryBitwiseInstruction
+{
+ MBitXor(MDefinition* left, MDefinition* right, MIRType type)
+ : MBinaryBitwiseInstruction(left, right, type)
+ { }
+
+ public:
+ INSTRUCTION_HEADER(BitXor)
+ static MBitXor* New(TempAllocator& alloc, MDefinition* left, MDefinition* right);
+ static MBitXor* New(TempAllocator& alloc, MDefinition* left, MDefinition* right, MIRType type);
+
+ MDefinition* foldIfZero(size_t operand) override {
+ return getOperand(1 - operand); // 0 ^ x => x
+ }
+ MDefinition* foldIfNegOne(size_t operand) override {
+ return this;
+ }
+ MDefinition* foldIfEqual() override {
+ return this;
+ }
+ MDefinition* foldIfAllBitsSet(size_t operand) override {
+ return this;
+ }
+ void computeRange(TempAllocator& alloc) override;
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return specialization_ < MIRType::Object;
+ }
+
+ ALLOW_CLONE(MBitXor)
+};
+
+class MShiftInstruction
+ : public MBinaryBitwiseInstruction
+{
+ protected:
+ MShiftInstruction(MDefinition* left, MDefinition* right, MIRType type)
+ : MBinaryBitwiseInstruction(left, right, type)
+ { }
+
+ public:
+ MDefinition* foldIfNegOne(size_t operand) override {
+ return this;
+ }
+ MDefinition* foldIfEqual() override {
+ return this;
+ }
+ MDefinition* foldIfAllBitsSet(size_t operand) override {
+ return this;
+ }
+ virtual void infer(BaselineInspector* inspector, jsbytecode* pc) override;
+};
+
+class MLsh : public MShiftInstruction
+{
+ MLsh(MDefinition* left, MDefinition* right, MIRType type)
+ : MShiftInstruction(left, right, type)
+ { }
+
+ public:
+ INSTRUCTION_HEADER(Lsh)
+ static MLsh* New(TempAllocator& alloc, MDefinition* left, MDefinition* right);
+ static MLsh* New(TempAllocator& alloc, MDefinition* left, MDefinition* right, MIRType type);
+
+ MDefinition* foldIfZero(size_t operand) override {
+ // 0 << x => 0
+ // x << 0 => x
+ return getOperand(0);
+ }
+
+ void computeRange(TempAllocator& alloc) override;
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return specialization_ != MIRType::None;
+ }
+
+ ALLOW_CLONE(MLsh)
+};
+
+class MRsh : public MShiftInstruction
+{
+ MRsh(MDefinition* left, MDefinition* right, MIRType type)
+ : MShiftInstruction(left, right, type)
+ { }
+
+ public:
+ INSTRUCTION_HEADER(Rsh)
+ static MRsh* New(TempAllocator& alloc, MDefinition* left, MDefinition* right);
+ static MRsh* New(TempAllocator& alloc, MDefinition* left, MDefinition* right, MIRType type);
+
+ MDefinition* foldIfZero(size_t operand) override {
+ // 0 >> x => 0
+ // x >> 0 => x
+ return getOperand(0);
+ }
+ void computeRange(TempAllocator& alloc) override;
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return specialization_ < MIRType::Object;
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ ALLOW_CLONE(MRsh)
+};
+
+class MUrsh : public MShiftInstruction
+{
+ bool bailoutsDisabled_;
+
+ MUrsh(MDefinition* left, MDefinition* right, MIRType type)
+ : MShiftInstruction(left, right, type),
+ bailoutsDisabled_(false)
+ { }
+
+ public:
+ INSTRUCTION_HEADER(Ursh)
+ static MUrsh* New(TempAllocator& alloc, MDefinition* left, MDefinition* right);
+ static MUrsh* New(TempAllocator& alloc, MDefinition* left, MDefinition* right, MIRType type);
+
+ MDefinition* foldIfZero(size_t operand) override {
+ // 0 >>> x => 0
+ if (operand == 0)
+ return getOperand(0);
+
+ return this;
+ }
+
+ void infer(BaselineInspector* inspector, jsbytecode* pc) override;
+
+ bool bailoutsDisabled() const {
+ return bailoutsDisabled_;
+ }
+
+ bool fallible() const;
+
+ void computeRange(TempAllocator& alloc) override;
+ void collectRangeInfoPreTrunc() override;
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return specialization_ < MIRType::Object;
+ }
+
+ ALLOW_CLONE(MUrsh)
+};
+
+class MSignExtend
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ public:
+ enum Mode {
+ Byte,
+ Half
+ };
+
+ private:
+ Mode mode_;
+
+ MSignExtend(MDefinition* op, Mode mode)
+ : MUnaryInstruction(op), mode_(mode)
+ {
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(SignExtend)
+ TRIVIAL_NEW_WRAPPERS
+
+ Mode mode() { return mode_; }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ ALLOW_CLONE(MSignExtend)
+};
+
+class MBinaryArithInstruction
+ : public MBinaryInstruction,
+ public ArithPolicy::Data
+{
+ // Implicit truncate flag is set by the truncate backward range analysis
+ // optimization phase, and by wasm pre-processing. It is used in
+ // NeedNegativeZeroCheck to check if the result of a multiplication needs to
+ // produce -0 double value, and for avoiding overflow checks.
+
+ // This optimization happens when the multiplication cannot be truncated
+ // even if all uses are truncating its result, such as when the range
+ // analysis detect a precision loss in the multiplication.
+ TruncateKind implicitTruncate_;
+
+ // Whether we must preserve NaN semantics, and in particular not fold
+ // (x op id) or (id op x) to x, or replace a division by a multiply of the
+ // exact reciprocal.
+ bool mustPreserveNaN_;
+
+ public:
+ MBinaryArithInstruction(MDefinition* left, MDefinition* right)
+ : MBinaryInstruction(left, right),
+ implicitTruncate_(NoTruncate),
+ mustPreserveNaN_(false)
+ {
+ specialization_ = MIRType::None;
+ setMovable();
+ }
+
+ static MBinaryArithInstruction* New(TempAllocator& alloc, Opcode op,
+ MDefinition* left, MDefinition* right);
+
+ bool constantDoubleResult(TempAllocator& alloc);
+
+ void setMustPreserveNaN(bool b) { mustPreserveNaN_ = b; }
+ bool mustPreserveNaN() const { return mustPreserveNaN_; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ void printOpcode(GenericPrinter& out) const override;
+
+ virtual double getIdentity() = 0;
+
+ void setSpecialization(MIRType type) {
+ specialization_ = type;
+ setResultType(type);
+ }
+ void setInt32Specialization() {
+ specialization_ = MIRType::Int32;
+ setResultType(MIRType::Int32);
+ }
+ void setNumberSpecialization(TempAllocator& alloc, BaselineInspector* inspector, jsbytecode* pc);
+
+ virtual void trySpecializeFloat32(TempAllocator& alloc) override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!binaryCongruentTo(ins))
+ return false;
+ const auto* other = static_cast<const MBinaryArithInstruction*>(ins);
+ return other->mustPreserveNaN_ == mustPreserveNaN_;
+ }
+ AliasSet getAliasSet() const override {
+ if (specialization_ >= MIRType::Object)
+ return AliasSet::Store(AliasSet::Any);
+ return AliasSet::None();
+ }
+
+ bool isTruncated() const {
+ return implicitTruncate_ == Truncate;
+ }
+ TruncateKind truncateKind() const {
+ return implicitTruncate_;
+ }
+ void setTruncateKind(TruncateKind kind) {
+ implicitTruncate_ = Max(implicitTruncate_, kind);
+ }
+};
+
+class MMinMax
+ : public MBinaryInstruction,
+ public ArithPolicy::Data
+{
+ bool isMax_;
+
+ MMinMax(MDefinition* left, MDefinition* right, MIRType type, bool isMax)
+ : MBinaryInstruction(left, right),
+ isMax_(isMax)
+ {
+ MOZ_ASSERT(IsNumberType(type));
+ setResultType(type);
+ setMovable();
+ specialization_ = type;
+ }
+
+ public:
+ INSTRUCTION_HEADER(MinMax)
+ TRIVIAL_NEW_WRAPPERS
+
+ static MMinMax* NewWasm(TempAllocator& alloc, MDefinition* left, MDefinition* right,
+ MIRType type, bool isMax)
+ {
+ return New(alloc, left, right, type, isMax);
+ }
+
+ bool isMax() const {
+ return isMax_;
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!congruentIfOperandsEqual(ins))
+ return false;
+ const MMinMax* other = ins->toMinMax();
+ return other->isMax() == isMax();
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ void computeRange(TempAllocator& alloc) override;
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ bool isFloat32Commutative() const override { return true; }
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+
+ ALLOW_CLONE(MMinMax)
+};
+
+class MAbs
+ : public MUnaryInstruction,
+ public ArithPolicy::Data
+{
+ bool implicitTruncate_;
+
+ MAbs(MDefinition* num, MIRType type)
+ : MUnaryInstruction(num),
+ implicitTruncate_(false)
+ {
+ MOZ_ASSERT(IsNumberType(type));
+ setResultType(type);
+ setMovable();
+ specialization_ = type;
+ }
+
+ public:
+ INSTRUCTION_HEADER(Abs)
+ TRIVIAL_NEW_WRAPPERS
+
+ static MAbs* NewWasm(TempAllocator& alloc, MDefinition* num, MIRType type) {
+ auto* ins = new(alloc) MAbs(num, type);
+ if (type == MIRType::Int32)
+ ins->implicitTruncate_ = true;
+ return ins;
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ bool fallible() const;
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ void computeRange(TempAllocator& alloc) override;
+ bool isFloat32Commutative() const override { return true; }
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ ALLOW_CLONE(MAbs)
+};
+
+class MClz
+ : public MUnaryInstruction
+ , public BitwisePolicy::Data
+{
+ bool operandIsNeverZero_;
+
+ explicit MClz(MDefinition* num, MIRType type)
+ : MUnaryInstruction(num),
+ operandIsNeverZero_(false)
+ {
+ MOZ_ASSERT(IsIntType(type));
+ MOZ_ASSERT(IsNumberType(num->type()));
+ specialization_ = type;
+ setResultType(type);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Clz)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, num))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool operandIsNeverZero() const {
+ return operandIsNeverZero_;
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ void computeRange(TempAllocator& alloc) override;
+ void collectRangeInfoPreTrunc() override;
+};
+
+class MCtz
+ : public MUnaryInstruction
+ , public BitwisePolicy::Data
+{
+ bool operandIsNeverZero_;
+
+ explicit MCtz(MDefinition* num, MIRType type)
+ : MUnaryInstruction(num),
+ operandIsNeverZero_(false)
+ {
+ MOZ_ASSERT(IsIntType(type));
+ MOZ_ASSERT(IsNumberType(num->type()));
+ specialization_ = type;
+ setResultType(type);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Ctz)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, num))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool operandIsNeverZero() const {
+ return operandIsNeverZero_;
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ void computeRange(TempAllocator& alloc) override;
+ void collectRangeInfoPreTrunc() override;
+};
+
+class MPopcnt
+ : public MUnaryInstruction
+ , public BitwisePolicy::Data
+{
+ explicit MPopcnt(MDefinition* num, MIRType type)
+ : MUnaryInstruction(num)
+ {
+ MOZ_ASSERT(IsNumberType(num->type()));
+ MOZ_ASSERT(IsIntType(type));
+ specialization_ = type;
+ setResultType(type);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Popcnt)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, num))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ void computeRange(TempAllocator& alloc) override;
+};
+
+// Inline implementation of Math.sqrt().
+class MSqrt
+ : public MUnaryInstruction,
+ public FloatingPointPolicy<0>::Data
+{
+ MSqrt(MDefinition* num, MIRType type)
+ : MUnaryInstruction(num)
+ {
+ setResultType(type);
+ specialization_ = type;
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Sqrt)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ void computeRange(TempAllocator& alloc) override;
+
+ bool isFloat32Commutative() const override { return true; }
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ ALLOW_CLONE(MSqrt)
+};
+
+class MCopySign
+ : public MBinaryInstruction,
+ public NoTypePolicy::Data
+{
+ MCopySign(MDefinition* lhs, MDefinition* rhs, MIRType type)
+ : MBinaryInstruction(lhs, rhs)
+ {
+ setResultType(type);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(CopySign)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ ALLOW_CLONE(MCopySign)
+};
+
+// Inline implementation of atan2 (arctangent of y/x).
+class MAtan2
+ : public MBinaryInstruction,
+ public MixPolicy<DoublePolicy<0>, DoublePolicy<1> >::Data
+{
+ MAtan2(MDefinition* y, MDefinition* x)
+ : MBinaryInstruction(y, x)
+ {
+ setResultType(MIRType::Double);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Atan2)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, y), (1, x))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ ALLOW_CLONE(MAtan2)
+};
+
+// Inline implementation of Math.hypot().
+class MHypot
+ : public MVariadicInstruction,
+ public AllDoublePolicy::Data
+{
+ MHypot()
+ {
+ setResultType(MIRType::Double);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Hypot)
+ static MHypot* New(TempAllocator& alloc, const MDefinitionVector& vector);
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ bool canClone() const override {
+ return true;
+ }
+
+ MInstruction* clone(TempAllocator& alloc,
+ const MDefinitionVector& inputs) const override {
+ return MHypot::New(alloc, inputs);
+ }
+};
+
+// Inline implementation of Math.pow().
+class MPow
+ : public MBinaryInstruction,
+ public PowPolicy::Data
+{
+ MPow(MDefinition* input, MDefinition* power, MIRType powerType)
+ : MBinaryInstruction(input, power)
+ {
+ MOZ_ASSERT(powerType == MIRType::Double || powerType == MIRType::Int32);
+ specialization_ = powerType;
+ setResultType(MIRType::Double);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Pow)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* input() const {
+ return lhs();
+ }
+ MDefinition* power() const {
+ return rhs();
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool possiblyCalls() const override {
+ return true;
+ }
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ // Temporarily disable recovery to relieve fuzzer pressure. See bug 1188586.
+ return false;
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ ALLOW_CLONE(MPow)
+};
+
+// Inline implementation of Math.pow(x, 0.5), which subtly differs from Math.sqrt(x).
+class MPowHalf
+ : public MUnaryInstruction,
+ public DoublePolicy<0>::Data
+{
+ bool operandIsNeverNegativeInfinity_;
+ bool operandIsNeverNegativeZero_;
+ bool operandIsNeverNaN_;
+
+ explicit MPowHalf(MDefinition* input)
+ : MUnaryInstruction(input),
+ operandIsNeverNegativeInfinity_(false),
+ operandIsNeverNegativeZero_(false),
+ operandIsNeverNaN_(false)
+ {
+ setResultType(MIRType::Double);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(PowHalf)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ bool operandIsNeverNegativeInfinity() const {
+ return operandIsNeverNegativeInfinity_;
+ }
+ bool operandIsNeverNegativeZero() const {
+ return operandIsNeverNegativeZero_;
+ }
+ bool operandIsNeverNaN() const {
+ return operandIsNeverNaN_;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ void collectRangeInfoPreTrunc() override;
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ ALLOW_CLONE(MPowHalf)
+};
+
+// Inline implementation of Math.random().
+class MRandom : public MNullaryInstruction
+{
+ MRandom()
+ {
+ setResultType(MIRType::Double);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Random)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+
+ void computeRange(TempAllocator& alloc) override;
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+
+ bool canRecoverOnBailout() const override {
+#ifdef JS_MORE_DETERMINISTIC
+ return false;
+#else
+ return true;
+#endif
+ }
+
+ ALLOW_CLONE(MRandom)
+};
+
+class MMathFunction
+ : public MUnaryInstruction,
+ public FloatingPointPolicy<0>::Data
+{
+ public:
+ enum Function {
+ Log,
+ Sin,
+ Cos,
+ Exp,
+ Tan,
+ ACos,
+ ASin,
+ ATan,
+ Log10,
+ Log2,
+ Log1P,
+ ExpM1,
+ CosH,
+ SinH,
+ TanH,
+ ACosH,
+ ASinH,
+ ATanH,
+ Sign,
+ Trunc,
+ Cbrt,
+ Floor,
+ Ceil,
+ Round
+ };
+
+ private:
+ Function function_;
+ const MathCache* cache_;
+
+ // A nullptr cache means this function will neither access nor update the cache.
+ MMathFunction(MDefinition* input, Function function, const MathCache* cache)
+ : MUnaryInstruction(input), function_(function), cache_(cache)
+ {
+ setResultType(MIRType::Double);
+ specialization_ = MIRType::Double;
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(MathFunction)
+ TRIVIAL_NEW_WRAPPERS
+
+ Function function() const {
+ return function_;
+ }
+ const MathCache* cache() const {
+ return cache_;
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isMathFunction())
+ return false;
+ if (ins->toMathFunction()->function() != function())
+ return false;
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ void printOpcode(GenericPrinter& out) const override;
+
+ static const char* FunctionName(Function function);
+
+ bool isFloat32Commutative() const override {
+ return function_ == Floor || function_ == Ceil || function_ == Round;
+ }
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+ void computeRange(TempAllocator& alloc) override;
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ if (input()->type() == MIRType::SinCosDouble)
+ return false;
+ switch(function_) {
+ case Sin:
+ case Log:
+ case Round:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ ALLOW_CLONE(MMathFunction)
+};
+
+class MAdd : public MBinaryArithInstruction
+{
+ MAdd(MDefinition* left, MDefinition* right)
+ : MBinaryArithInstruction(left, right)
+ {
+ setResultType(MIRType::Value);
+ }
+
+ MAdd(MDefinition* left, MDefinition* right, MIRType type)
+ : MAdd(left, right)
+ {
+ specialization_ = type;
+ setResultType(type);
+ if (type == MIRType::Int32) {
+ setTruncateKind(Truncate);
+ setCommutative();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(Add)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool isFloat32Commutative() const override { return true; }
+
+ double getIdentity() override {
+ return 0;
+ }
+
+ bool fallible() const;
+ void computeRange(TempAllocator& alloc) override;
+ bool needTruncation(TruncateKind kind) override;
+ void truncate() override;
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return specialization_ < MIRType::Object;
+ }
+
+ ALLOW_CLONE(MAdd)
+};
+
+class MSub : public MBinaryArithInstruction
+{
+ MSub(MDefinition* left, MDefinition* right)
+ : MBinaryArithInstruction(left, right)
+ {
+ setResultType(MIRType::Value);
+ }
+
+ MSub(MDefinition* left, MDefinition* right, MIRType type, bool mustPreserveNaN = false)
+ : MSub(left, right)
+ {
+ specialization_ = type;
+ setResultType(type);
+ setMustPreserveNaN(mustPreserveNaN);
+ if (type == MIRType::Int32)
+ setTruncateKind(Truncate);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Sub)
+ TRIVIAL_NEW_WRAPPERS
+
+ double getIdentity() override {
+ return 0;
+ }
+
+ bool isFloat32Commutative() const override { return true; }
+
+ bool fallible() const;
+ void computeRange(TempAllocator& alloc) override;
+ bool needTruncation(TruncateKind kind) override;
+ void truncate() override;
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return specialization_ < MIRType::Object;
+ }
+
+ ALLOW_CLONE(MSub)
+};
+
+class MMul : public MBinaryArithInstruction
+{
+ public:
+ enum Mode {
+ Normal,
+ Integer
+ };
+
+ private:
+ // Annotation the result could be a negative zero
+ // and we need to guard this during execution.
+ bool canBeNegativeZero_;
+
+ Mode mode_;
+
+ MMul(MDefinition* left, MDefinition* right, MIRType type, Mode mode)
+ : MBinaryArithInstruction(left, right),
+ canBeNegativeZero_(true),
+ mode_(mode)
+ {
+ if (mode == Integer) {
+ // This implements the required behavior for Math.imul, which
+ // can never fail and always truncates its output to int32.
+ canBeNegativeZero_ = false;
+ setTruncateKind(Truncate);
+ setCommutative();
+ }
+ MOZ_ASSERT_IF(mode != Integer, mode == Normal);
+
+ if (type != MIRType::Value)
+ specialization_ = type;
+ setResultType(type);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Mul)
+ static MMul* New(TempAllocator& alloc, MDefinition* left, MDefinition* right) {
+ return new(alloc) MMul(left, right, MIRType::Value, MMul::Normal);
+ }
+ static MMul* New(TempAllocator& alloc, MDefinition* left, MDefinition* right, MIRType type,
+ Mode mode = Normal)
+ {
+ return new(alloc) MMul(left, right, type, mode);
+ }
+ static MMul* NewWasm(TempAllocator& alloc, MDefinition* left, MDefinition* right, MIRType type,
+ Mode mode, bool mustPreserveNaN)
+ {
+ auto* ret = new(alloc) MMul(left, right, type, mode);
+ ret->setMustPreserveNaN(mustPreserveNaN);
+ return ret;
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ void analyzeEdgeCasesForward() override;
+ void analyzeEdgeCasesBackward() override;
+ void collectRangeInfoPreTrunc() override;
+
+ double getIdentity() override {
+ return 1;
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isMul())
+ return false;
+
+ const MMul* mul = ins->toMul();
+ if (canBeNegativeZero_ != mul->canBeNegativeZero())
+ return false;
+
+ if (mode_ != mul->mode())
+ return false;
+
+ if (mustPreserveNaN() != mul->mustPreserveNaN())
+ return false;
+
+ return binaryCongruentTo(ins);
+ }
+
+ bool canOverflow() const;
+
+ bool canBeNegativeZero() const {
+ return canBeNegativeZero_;
+ }
+ void setCanBeNegativeZero(bool negativeZero) {
+ canBeNegativeZero_ = negativeZero;
+ }
+
+ MOZ_MUST_USE bool updateForReplacement(MDefinition* ins) override;
+
+ bool fallible() const {
+ return canBeNegativeZero_ || canOverflow();
+ }
+
+ void setSpecialization(MIRType type) {
+ specialization_ = type;
+ }
+
+ bool isFloat32Commutative() const override { return true; }
+
+ void computeRange(TempAllocator& alloc) override;
+ bool needTruncation(TruncateKind kind) override;
+ void truncate() override;
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+ Mode mode() const { return mode_; }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return specialization_ < MIRType::Object;
+ }
+
+ ALLOW_CLONE(MMul)
+};
+
+class MDiv : public MBinaryArithInstruction
+{
+ bool canBeNegativeZero_;
+ bool canBeNegativeOverflow_;
+ bool canBeDivideByZero_;
+ bool canBeNegativeDividend_;
+ bool unsigned_;
+ bool trapOnError_;
+ wasm::TrapOffset trapOffset_;
+
+ MDiv(MDefinition* left, MDefinition* right, MIRType type)
+ : MBinaryArithInstruction(left, right),
+ canBeNegativeZero_(true),
+ canBeNegativeOverflow_(true),
+ canBeDivideByZero_(true),
+ canBeNegativeDividend_(true),
+ unsigned_(false),
+ trapOnError_(false)
+ {
+ if (type != MIRType::Value)
+ specialization_ = type;
+ setResultType(type);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Div)
+ static MDiv* New(TempAllocator& alloc, MDefinition* left, MDefinition* right) {
+ return new(alloc) MDiv(left, right, MIRType::Value);
+ }
+ static MDiv* New(TempAllocator& alloc, MDefinition* left, MDefinition* right, MIRType type) {
+ return new(alloc) MDiv(left, right, type);
+ }
+ static MDiv* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
+ MIRType type, bool unsignd, bool trapOnError = false,
+ wasm::TrapOffset trapOffset = wasm::TrapOffset(),
+ bool mustPreserveNaN = false)
+ {
+ auto* div = new(alloc) MDiv(left, right, type);
+ div->unsigned_ = unsignd;
+ div->trapOnError_ = trapOnError;
+ div->trapOffset_ = trapOffset;
+ if (trapOnError) {
+ div->setGuard(); // not removable because of possible side-effects.
+ div->setNotMovable();
+ }
+ div->setMustPreserveNaN(mustPreserveNaN);
+ if (type == MIRType::Int32)
+ div->setTruncateKind(Truncate);
+ return div;
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ void analyzeEdgeCasesForward() override;
+ void analyzeEdgeCasesBackward() override;
+
+ double getIdentity() override {
+ MOZ_CRASH("not used");
+ }
+
+ bool canBeNegativeZero() const {
+ return canBeNegativeZero_;
+ }
+ void setCanBeNegativeZero(bool negativeZero) {
+ canBeNegativeZero_ = negativeZero;
+ }
+
+ bool canBeNegativeOverflow() const {
+ return canBeNegativeOverflow_;
+ }
+
+ bool canBeDivideByZero() const {
+ return canBeDivideByZero_;
+ }
+
+ bool canBeNegativeDividend() const {
+ // "Dividend" is an ambiguous concept for unsigned truncated
+ // division, because of the truncation procedure:
+ // ((x>>>0)/2)|0, for example, gets transformed in
+ // MDiv::truncate into a node with lhs representing x (not
+ // x>>>0) and rhs representing the constant 2; in other words,
+ // the MIR node corresponds to "cast operands to unsigned and
+ // divide" operation. In this case, is the dividend x or is it
+ // x>>>0? In order to resolve such ambiguities, we disallow
+ // the usage of this method for unsigned division.
+ MOZ_ASSERT(!unsigned_);
+ return canBeNegativeDividend_;
+ }
+
+ bool isUnsigned() const {
+ return unsigned_;
+ }
+
+ bool isTruncatedIndirectly() const {
+ return truncateKind() >= IndirectTruncate;
+ }
+
+ bool canTruncateInfinities() const {
+ return isTruncated();
+ }
+ bool canTruncateRemainder() const {
+ return isTruncated();
+ }
+ bool canTruncateOverflow() const {
+ return isTruncated() || isTruncatedIndirectly();
+ }
+ bool canTruncateNegativeZero() const {
+ return isTruncated() || isTruncatedIndirectly();
+ }
+
+ bool trapOnError() const {
+ return trapOnError_;
+ }
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(trapOnError_);
+ return trapOffset_;
+ }
+
+ bool isFloat32Commutative() const override { return true; }
+
+ void computeRange(TempAllocator& alloc) override;
+ bool fallible() const;
+ bool needTruncation(TruncateKind kind) override;
+ void truncate() override;
+ void collectRangeInfoPreTrunc() override;
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return specialization_ < MIRType::Object;
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!MBinaryArithInstruction::congruentTo(ins))
+ return false;
+ const MDiv* other = ins->toDiv();
+ MOZ_ASSERT(other->trapOnError() == trapOnError_);
+ return unsigned_ == other->isUnsigned();
+ }
+
+ ALLOW_CLONE(MDiv)
+};
+
+class MMod : public MBinaryArithInstruction
+{
+ bool unsigned_;
+ bool canBeNegativeDividend_;
+ bool canBePowerOfTwoDivisor_;
+ bool canBeDivideByZero_;
+ bool trapOnError_;
+ wasm::TrapOffset trapOffset_;
+
+ MMod(MDefinition* left, MDefinition* right, MIRType type)
+ : MBinaryArithInstruction(left, right),
+ unsigned_(false),
+ canBeNegativeDividend_(true),
+ canBePowerOfTwoDivisor_(true),
+ canBeDivideByZero_(true),
+ trapOnError_(false)
+ {
+ if (type != MIRType::Value)
+ specialization_ = type;
+ setResultType(type);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Mod)
+ static MMod* New(TempAllocator& alloc, MDefinition* left, MDefinition* right) {
+ return new(alloc) MMod(left, right, MIRType::Value);
+ }
+ static MMod* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
+ MIRType type, bool unsignd, bool trapOnError = false,
+ wasm::TrapOffset trapOffset = wasm::TrapOffset())
+ {
+ auto* mod = new(alloc) MMod(left, right, type);
+ mod->unsigned_ = unsignd;
+ mod->trapOnError_ = trapOnError;
+ mod->trapOffset_ = trapOffset;
+ if (trapOnError) {
+ mod->setGuard(); // not removable because of possible side-effects.
+ mod->setNotMovable();
+ }
+ if (type == MIRType::Int32)
+ mod->setTruncateKind(Truncate);
+ return mod;
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ double getIdentity() override {
+ MOZ_CRASH("not used");
+ }
+
+ bool canBeNegativeDividend() const {
+ MOZ_ASSERT(specialization_ == MIRType::Int32 || specialization_ == MIRType::Int64);
+ MOZ_ASSERT(!unsigned_);
+ return canBeNegativeDividend_;
+ }
+
+ bool canBeDivideByZero() const {
+ MOZ_ASSERT(specialization_ == MIRType::Int32 || specialization_ == MIRType::Int64);
+ return canBeDivideByZero_;
+ }
+
+ bool canBePowerOfTwoDivisor() const {
+ MOZ_ASSERT(specialization_ == MIRType::Int32);
+ return canBePowerOfTwoDivisor_;
+ }
+
+ void analyzeEdgeCasesForward() override;
+
+ bool isUnsigned() const {
+ return unsigned_;
+ }
+
+ bool trapOnError() const {
+ return trapOnError_;
+ }
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(trapOnError_);
+ return trapOffset_;
+ }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return specialization_ < MIRType::Object;
+ }
+
+ bool fallible() const;
+
+ void computeRange(TempAllocator& alloc) override;
+ bool needTruncation(TruncateKind kind) override;
+ void truncate() override;
+ void collectRangeInfoPreTrunc() override;
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return MBinaryArithInstruction::congruentTo(ins) &&
+ unsigned_ == ins->toMod()->isUnsigned();
+ }
+
+ ALLOW_CLONE(MMod)
+};
+
+class MConcat
+ : public MBinaryInstruction,
+ public MixPolicy<ConvertToStringPolicy<0>, ConvertToStringPolicy<1> >::Data
+{
+ MConcat(MDefinition* left, MDefinition* right)
+ : MBinaryInstruction(left, right)
+ {
+ // At least one input should be definitely string
+ MOZ_ASSERT(left->type() == MIRType::String || right->type() == MIRType::String);
+
+ setMovable();
+ setResultType(MIRType::String);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Concat)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ ALLOW_CLONE(MConcat)
+};
+
+class MCharCodeAt
+ : public MBinaryInstruction,
+ public MixPolicy<StringPolicy<0>, IntPolicy<1> >::Data
+{
+ MCharCodeAt(MDefinition* str, MDefinition* index)
+ : MBinaryInstruction(str, index)
+ {
+ setMovable();
+ setResultType(MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(CharCodeAt)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ virtual AliasSet getAliasSet() const override {
+ // Strings are immutable, so there is no implicit dependency.
+ return AliasSet::None();
+ }
+
+ void computeRange(TempAllocator& alloc) override;
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ ALLOW_CLONE(MCharCodeAt)
+};
+
+class MFromCharCode
+ : public MUnaryInstruction,
+ public IntPolicy<0>::Data
+{
+ explicit MFromCharCode(MDefinition* code)
+ : MUnaryInstruction(code)
+ {
+ setMovable();
+ setResultType(MIRType::String);
+ }
+
+ public:
+ INSTRUCTION_HEADER(FromCharCode)
+ TRIVIAL_NEW_WRAPPERS
+
+ virtual AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ ALLOW_CLONE(MFromCharCode)
+};
+
+class MFromCodePoint
+ : public MUnaryInstruction,
+ public IntPolicy<0>::Data
+{
+ explicit MFromCodePoint(MDefinition* codePoint)
+ : MUnaryInstruction(codePoint)
+ {
+ setGuard(); // throws on invalid code point
+ setMovable();
+ setResultType(MIRType::String);
+ }
+
+ public:
+ INSTRUCTION_HEADER(FromCodePoint)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+class MSinCos
+ : public MUnaryInstruction,
+ public FloatingPointPolicy<0>::Data
+{
+ const MathCache* cache_;
+
+ MSinCos(MDefinition *input, const MathCache *cache) : MUnaryInstruction(input), cache_(cache)
+ {
+ setResultType(MIRType::SinCosDouble);
+ specialization_ = MIRType::Double;
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(SinCos)
+
+ static MSinCos *New(TempAllocator &alloc, MDefinition *input, const MathCache *cache)
+ {
+ return new (alloc) MSinCos(input, cache);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool congruentTo(const MDefinition *ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ bool possiblyCalls() const override {
+ return true;
+ }
+ const MathCache* cache() const {
+ return cache_;
+ }
+};
+
+class MStringSplit
+ : public MTernaryInstruction,
+ public MixPolicy<StringPolicy<0>, StringPolicy<1> >::Data
+{
+ MStringSplit(CompilerConstraintList* constraints, MDefinition* string, MDefinition* sep,
+ MConstant* templateObject)
+ : MTernaryInstruction(string, sep, templateObject)
+ {
+ setResultType(MIRType::Object);
+ setResultTypeSet(templateObject->resultTypeSet());
+ }
+
+ public:
+ INSTRUCTION_HEADER(StringSplit)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, string), (1, separator))
+
+ JSObject* templateObject() const {
+ return &getOperand(2)->toConstant()->toObject();
+ }
+ ObjectGroup* group() const {
+ return templateObject()->group();
+ }
+ bool possiblyCalls() const override {
+ return true;
+ }
+ virtual AliasSet getAliasSet() const override {
+ // Although this instruction returns a new array, we don't have to mark
+ // it as store instruction, see also MNewArray.
+ return AliasSet::None();
+ }
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+};
+
+// Returns the value to use as |this| value. See also ComputeThis and
+// BoxNonStrictThis in Interpreter.h.
+class MComputeThis
+ : public MUnaryInstruction,
+ public BoxPolicy<0>::Data
+{
+ explicit MComputeThis(MDefinition* def)
+ : MUnaryInstruction(def)
+ {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ComputeThis)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+
+ // Note: don't override getAliasSet: the thisValue hook can be effectful.
+};
+
+// Load an arrow function's |new.target| value.
+class MArrowNewTarget
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ explicit MArrowNewTarget(MDefinition* callee)
+ : MUnaryInstruction(callee)
+ {
+ setResultType(MIRType::Value);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(ArrowNewTarget)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, callee))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ // An arrow function's lexical |this| value is immutable.
+ return AliasSet::None();
+ }
+};
+
+class MPhi final
+ : public MDefinition,
+ public InlineListNode<MPhi>,
+ public NoTypePolicy::Data
+{
+ using InputVector = js::Vector<MUse, 2, JitAllocPolicy>;
+ InputVector inputs_;
+
+ TruncateKind truncateKind_;
+ bool hasBackedgeType_;
+ bool triedToSpecialize_;
+ bool isIterator_;
+ bool canProduceFloat32_;
+ bool canConsumeFloat32_;
+
+#if DEBUG
+ bool specialized_;
+#endif
+
+ protected:
+ MUse* getUseFor(size_t index) override {
+ // Note: after the initial IonBuilder pass, it is OK to change phi
+ // operands such that they do not include the type sets of their
+ // operands. This can arise during e.g. value numbering, where
+ // definitions producing the same value may have different type sets.
+ MOZ_ASSERT(index < numOperands());
+ return &inputs_[index];
+ }
+ const MUse* getUseFor(size_t index) const override {
+ return &inputs_[index];
+ }
+
+ public:
+ INSTRUCTION_HEADER_WITHOUT_TYPEPOLICY(Phi)
+ virtual TypePolicy* typePolicy();
+ virtual MIRType typePolicySpecialization();
+
+ MPhi(TempAllocator& alloc, MIRType resultType)
+ : inputs_(alloc),
+ truncateKind_(NoTruncate),
+ hasBackedgeType_(false),
+ triedToSpecialize_(false),
+ isIterator_(false),
+ canProduceFloat32_(false),
+ canConsumeFloat32_(false)
+#if DEBUG
+ , specialized_(false)
+#endif
+ {
+ setResultType(resultType);
+ }
+
+ static MPhi* New(TempAllocator& alloc, MIRType resultType = MIRType::Value) {
+ return new(alloc) MPhi(alloc, resultType);
+ }
+ static MPhi* New(TempAllocator::Fallible alloc, MIRType resultType = MIRType::Value) {
+ return new(alloc) MPhi(alloc.alloc, resultType);
+ }
+
+ void removeOperand(size_t index);
+ void removeAllOperands();
+
+ MDefinition* getOperand(size_t index) const override {
+ return inputs_[index].producer();
+ }
+ size_t numOperands() const override {
+ return inputs_.length();
+ }
+ size_t indexOf(const MUse* u) const final override {
+ MOZ_ASSERT(u >= &inputs_[0]);
+ MOZ_ASSERT(u <= &inputs_[numOperands() - 1]);
+ return u - &inputs_[0];
+ }
+ void replaceOperand(size_t index, MDefinition* operand) final override {
+ inputs_[index].replaceProducer(operand);
+ }
+ bool hasBackedgeType() const {
+ return hasBackedgeType_;
+ }
+ bool triedToSpecialize() const {
+ return triedToSpecialize_;
+ }
+ void specialize(MIRType type) {
+ triedToSpecialize_ = true;
+ setResultType(type);
+ }
+ bool specializeType(TempAllocator& alloc);
+
+#ifdef DEBUG
+ // Assert that this is a phi in a loop header with a unique predecessor and
+ // a unique backedge.
+ void assertLoopPhi() const;
+#else
+ void assertLoopPhi() const {}
+#endif
+
+ // Assuming this phi is in a loop header with a unique loop entry, return
+ // the phi operand along the loop entry.
+ MDefinition* getLoopPredecessorOperand() const {
+ assertLoopPhi();
+ return getOperand(0);
+ }
+
+ // Assuming this phi is in a loop header with a unique loop entry, return
+ // the phi operand along the loop backedge.
+ MDefinition* getLoopBackedgeOperand() const {
+ assertLoopPhi();
+ return getOperand(1);
+ }
+
+ // Whether this phi's type already includes information for def.
+ bool typeIncludes(MDefinition* def);
+
+ // Add types for this phi which speculate about new inputs that may come in
+ // via a loop backedge.
+ MOZ_MUST_USE bool addBackedgeType(TempAllocator& alloc, MIRType type,
+ TemporaryTypeSet* typeSet);
+
+ // Initializes the operands vector to the given capacity,
+ // permitting use of addInput() instead of addInputSlow().
+ MOZ_MUST_USE bool reserveLength(size_t length) {
+ return inputs_.reserve(length);
+ }
+
+ // Use only if capacity has been reserved by reserveLength
+ void addInput(MDefinition* ins) {
+ inputs_.infallibleEmplaceBack(ins, this);
+ }
+
+ // Appends a new input to the input vector. May perform reallocation.
+ // Prefer reserveLength() and addInput() instead, where possible.
+ MOZ_MUST_USE bool addInputSlow(MDefinition* ins) {
+ return inputs_.emplaceBack(ins, this);
+ }
+
+ // Appends a new input to the input vector. Infallible because
+ // we know the inputs fits in the vector's inline storage.
+ void addInlineInput(MDefinition* ins) {
+ MOZ_ASSERT(inputs_.length() < InputVector::InlineLength);
+ MOZ_ALWAYS_TRUE(addInputSlow(ins));
+ }
+
+ // Update the type of this phi after adding |ins| as an input. Set
+ // |*ptypeChange| to true if the type changed.
+ bool checkForTypeChange(TempAllocator& alloc, MDefinition* ins, bool* ptypeChange);
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ MDefinition* foldsTernary(TempAllocator& alloc);
+ MDefinition* foldsFilterTypeSet();
+
+ bool congruentTo(const MDefinition* ins) const override;
+
+ bool isIterator() const {
+ return isIterator_;
+ }
+ void setIterator() {
+ isIterator_ = true;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ void computeRange(TempAllocator& alloc) override;
+
+ MDefinition* operandIfRedundant();
+
+ bool canProduceFloat32() const override {
+ return canProduceFloat32_;
+ }
+
+ void setCanProduceFloat32(bool can) {
+ canProduceFloat32_ = can;
+ }
+
+ bool canConsumeFloat32(MUse* use) const override {
+ return canConsumeFloat32_;
+ }
+
+ void setCanConsumeFloat32(bool can) {
+ canConsumeFloat32_ = can;
+ }
+
+ TruncateKind operandTruncateKind(size_t index) const override;
+ bool needTruncation(TruncateKind kind) override;
+ void truncate() override;
+};
+
+// The goal of a Beta node is to split a def at a conditionally taken
+// branch, so that uses dominated by it have a different name.
+class MBeta
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ private:
+ // This is the range induced by a comparison and branch in a preceding
+ // block. Note that this does not reflect any range constraints from
+ // the input value itself, so this value may differ from the range()
+ // range after it is computed.
+ const Range* comparison_;
+
+ MBeta(MDefinition* val, const Range* comp)
+ : MUnaryInstruction(val),
+ comparison_(comp)
+ {
+ setResultType(val->type());
+ setResultTypeSet(val->resultTypeSet());
+ }
+
+ public:
+ INSTRUCTION_HEADER(Beta)
+ TRIVIAL_NEW_WRAPPERS
+
+ void printOpcode(GenericPrinter& out) const override;
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ void computeRange(TempAllocator& alloc) override;
+};
+
+// If input evaluates to false (i.e. it's NaN, 0 or -0), 0 is returned, else the input is returned
+class MNaNToZero
+ : public MUnaryInstruction,
+ public DoublePolicy<0>::Data
+{
+ bool operandIsNeverNaN_;
+ bool operandIsNeverNegativeZero_;
+ explicit MNaNToZero(MDefinition* input)
+ : MUnaryInstruction(input), operandIsNeverNaN_(false), operandIsNeverNegativeZero_(false)
+ {
+ setResultType(MIRType::Double);
+ setMovable();
+ }
+ public:
+ INSTRUCTION_HEADER(NaNToZero)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool operandIsNeverNaN() const {
+ return operandIsNeverNaN_;
+ }
+
+ bool operandIsNeverNegativeZero() const {
+ return operandIsNeverNegativeZero_;
+ }
+
+ void collectRangeInfoPreTrunc() override;
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ void computeRange(TempAllocator& alloc) override;
+
+ bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ ALLOW_CLONE(MNaNToZero)
+};
+
+// MIR representation of a Value on the OSR BaselineFrame.
+// The Value is indexed off of OsrFrameReg.
+class MOsrValue
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ private:
+ ptrdiff_t frameOffset_;
+
+ MOsrValue(MOsrEntry* entry, ptrdiff_t frameOffset)
+ : MUnaryInstruction(entry),
+ frameOffset_(frameOffset)
+ {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(OsrValue)
+ TRIVIAL_NEW_WRAPPERS
+
+ ptrdiff_t frameOffset() const {
+ return frameOffset_;
+ }
+
+ MOsrEntry* entry() {
+ return getOperand(0)->toOsrEntry();
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+// MIR representation of a JSObject scope chain pointer on the OSR BaselineFrame.
+// The pointer is indexed off of OsrFrameReg.
+class MOsrEnvironmentChain
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ private:
+ explicit MOsrEnvironmentChain(MOsrEntry* entry)
+ : MUnaryInstruction(entry)
+ {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(OsrEnvironmentChain)
+ TRIVIAL_NEW_WRAPPERS
+
+ MOsrEntry* entry() {
+ return getOperand(0)->toOsrEntry();
+ }
+};
+
+// MIR representation of a JSObject ArgumentsObject pointer on the OSR BaselineFrame.
+// The pointer is indexed off of OsrFrameReg.
+class MOsrArgumentsObject
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ private:
+ explicit MOsrArgumentsObject(MOsrEntry* entry)
+ : MUnaryInstruction(entry)
+ {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(OsrArgumentsObject)
+ TRIVIAL_NEW_WRAPPERS
+
+ MOsrEntry* entry() {
+ return getOperand(0)->toOsrEntry();
+ }
+};
+
+// MIR representation of the return value on the OSR BaselineFrame.
+// The Value is indexed off of OsrFrameReg.
+class MOsrReturnValue
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ private:
+ explicit MOsrReturnValue(MOsrEntry* entry)
+ : MUnaryInstruction(entry)
+ {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(OsrReturnValue)
+ TRIVIAL_NEW_WRAPPERS
+
+ MOsrEntry* entry() {
+ return getOperand(0)->toOsrEntry();
+ }
+};
+
+class MBinarySharedStub
+ : public MBinaryInstruction,
+ public MixPolicy<BoxPolicy<0>, BoxPolicy<1> >::Data
+{
+ protected:
+ explicit MBinarySharedStub(MDefinition* left, MDefinition* right)
+ : MBinaryInstruction(left, right)
+ {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(BinarySharedStub)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+class MUnarySharedStub
+ : public MUnaryInstruction,
+ public BoxPolicy<0>::Data
+{
+ explicit MUnarySharedStub(MDefinition* input)
+ : MUnaryInstruction(input)
+ {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(UnarySharedStub)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+class MNullarySharedStub
+ : public MNullaryInstruction
+{
+ explicit MNullarySharedStub()
+ : MNullaryInstruction()
+ {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(NullarySharedStub)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+// Check the current frame for over-recursion past the global stack limit.
+class MCheckOverRecursed
+ : public MNullaryInstruction
+{
+ public:
+ INSTRUCTION_HEADER(CheckOverRecursed)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+// Check whether we need to fire the interrupt handler.
+class MInterruptCheck : public MNullaryInstruction
+{
+ MInterruptCheck() {
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(InterruptCheck)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+// Directly jumps to the indicated trap, leaving Wasm code and reporting a
+// runtime error.
+
+class MWasmTrap
+ : public MAryControlInstruction<0, 0>,
+ public NoTypePolicy::Data
+{
+ wasm::Trap trap_;
+ wasm::TrapOffset trapOffset_;
+
+ explicit MWasmTrap(wasm::Trap trap, wasm::TrapOffset trapOffset)
+ : trap_(trap),
+ trapOffset_(trapOffset)
+ {}
+
+ public:
+ INSTRUCTION_HEADER(WasmTrap)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ wasm::Trap trap() const { return trap_; }
+ wasm::TrapOffset trapOffset() const { return trapOffset_; }
+};
+
+// Checks if a value is JS_UNINITIALIZED_LEXICAL, bailout out if so, leaving
+// it to baseline to throw at the correct pc.
+class MLexicalCheck
+ : public MUnaryInstruction,
+ public BoxPolicy<0>::Data
+{
+ BailoutKind kind_;
+ explicit MLexicalCheck(MDefinition* input, BailoutKind kind = Bailout_UninitializedLexical)
+ : MUnaryInstruction(input),
+ kind_(kind)
+ {
+ setResultType(MIRType::Value);
+ setResultTypeSet(input->resultTypeSet());
+ setMovable();
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(LexicalCheck)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ BailoutKind bailoutKind() const {
+ return kind_;
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+};
+
+// Unconditionally throw an uninitialized let error.
+class MThrowRuntimeLexicalError : public MNullaryInstruction
+{
+ unsigned errorNumber_;
+
+ explicit MThrowRuntimeLexicalError(unsigned errorNumber)
+ : errorNumber_(errorNumber)
+ {
+ setGuard();
+ setResultType(MIRType::None);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ThrowRuntimeLexicalError)
+ TRIVIAL_NEW_WRAPPERS
+
+ unsigned errorNumber() const {
+ return errorNumber_;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+// In the prologues of global and eval scripts, check for redeclarations.
+class MGlobalNameConflictsCheck : public MNullaryInstruction
+{
+ MGlobalNameConflictsCheck() {
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(GlobalNameConflictsCheck)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+// If not defined, set a global variable to |undefined|.
+class MDefVar
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ CompilerPropertyName name_; // Target name to be defined.
+ unsigned attrs_; // Attributes to be set.
+
+ private:
+ MDefVar(PropertyName* name, unsigned attrs, MDefinition* envChain)
+ : MUnaryInstruction(envChain),
+ name_(name),
+ attrs_(attrs)
+ {
+ }
+
+ public:
+ INSTRUCTION_HEADER(DefVar)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, environmentChain))
+
+ PropertyName* name() const {
+ return name_;
+ }
+ unsigned attrs() const {
+ return attrs_;
+ }
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(name_);
+ }
+};
+
+class MDefLexical
+ : public MNullaryInstruction
+{
+ CompilerPropertyName name_; // Target name to be defined.
+ unsigned attrs_; // Attributes to be set.
+
+ private:
+ MDefLexical(PropertyName* name, unsigned attrs)
+ : name_(name),
+ attrs_(attrs)
+ { }
+
+ public:
+ INSTRUCTION_HEADER(DefLexical)
+ TRIVIAL_NEW_WRAPPERS
+
+ PropertyName* name() const {
+ return name_;
+ }
+ unsigned attrs() const {
+ return attrs_;
+ }
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(name_);
+ }
+};
+
+class MDefFun
+ : public MBinaryInstruction,
+ public ObjectPolicy<0>::Data
+{
+ private:
+ MDefFun(MDefinition* fun, MDefinition* envChain)
+ : MBinaryInstruction(fun, envChain)
+ {}
+
+ public:
+ INSTRUCTION_HEADER(DefFun)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, fun), (1, environmentChain))
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+class MRegExp : public MNullaryInstruction
+{
+ CompilerGCPointer<RegExpObject*> source_;
+ bool mustClone_;
+
+ MRegExp(CompilerConstraintList* constraints, RegExpObject* source)
+ : source_(source),
+ mustClone_(true)
+ {
+ setResultType(MIRType::Object);
+ setResultTypeSet(MakeSingletonTypeSet(constraints, source));
+ }
+
+ public:
+ INSTRUCTION_HEADER(RegExp)
+ TRIVIAL_NEW_WRAPPERS
+
+ void setDoNotClone() {
+ mustClone_ = false;
+ }
+ bool mustClone() const {
+ return mustClone_;
+ }
+ RegExpObject* source() const {
+ return source_;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool possiblyCalls() const override {
+ return true;
+ }
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(source_);
+ }
+};
+
+class MRegExpMatcher
+ : public MAryInstruction<3>,
+ public Mix3Policy<ObjectPolicy<0>,
+ StringPolicy<1>,
+ IntPolicy<2> >::Data
+{
+ private:
+
+ MRegExpMatcher(MDefinition* regexp, MDefinition* string, MDefinition* lastIndex)
+ : MAryInstruction<3>()
+ {
+ initOperand(0, regexp);
+ initOperand(1, string);
+ initOperand(2, lastIndex);
+
+ setMovable();
+ // May be object or null.
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(RegExpMatcher)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, regexp), (1, string), (2, lastIndex))
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+class MRegExpSearcher
+ : public MAryInstruction<3>,
+ public Mix3Policy<ObjectPolicy<0>,
+ StringPolicy<1>,
+ IntPolicy<2> >::Data
+{
+ private:
+
+ MRegExpSearcher(MDefinition* regexp, MDefinition* string, MDefinition* lastIndex)
+ : MAryInstruction<3>()
+ {
+ initOperand(0, regexp);
+ initOperand(1, string);
+ initOperand(2, lastIndex);
+
+ setMovable();
+ setResultType(MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(RegExpSearcher)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, regexp), (1, string), (2, lastIndex))
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+class MRegExpTester
+ : public MAryInstruction<3>,
+ public Mix3Policy<ObjectPolicy<0>,
+ StringPolicy<1>,
+ IntPolicy<2> >::Data
+{
+ private:
+
+ MRegExpTester(MDefinition* regexp, MDefinition* string, MDefinition* lastIndex)
+ : MAryInstruction<3>()
+ {
+ initOperand(0, regexp);
+ initOperand(1, string);
+ initOperand(2, lastIndex);
+
+ setMovable();
+ setResultType(MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(RegExpTester)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, regexp), (1, string), (2, lastIndex))
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+};
+
+class MRegExpPrototypeOptimizable
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ explicit MRegExpPrototypeOptimizable(MDefinition* object)
+ : MUnaryInstruction(object)
+ {
+ setResultType(MIRType::Boolean);
+ }
+
+ public:
+ INSTRUCTION_HEADER(RegExpPrototypeOptimizable)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+class MRegExpInstanceOptimizable
+ : public MBinaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1> >::Data
+{
+ explicit MRegExpInstanceOptimizable(MDefinition* object, MDefinition* proto)
+ : MBinaryInstruction(object, proto)
+ {
+ setResultType(MIRType::Boolean);
+ }
+
+ public:
+ INSTRUCTION_HEADER(RegExpInstanceOptimizable)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, proto))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+class MGetFirstDollarIndex
+ : public MUnaryInstruction,
+ public StringPolicy<0>::Data
+{
+ explicit MGetFirstDollarIndex(MDefinition* str)
+ : MUnaryInstruction(str)
+ {
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(GetFirstDollarIndex)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, str))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+};
+
+class MStringReplace
+ : public MTernaryInstruction,
+ public Mix3Policy<StringPolicy<0>, StringPolicy<1>, StringPolicy<2> >::Data
+{
+ private:
+
+ bool isFlatReplacement_;
+
+ MStringReplace(MDefinition* string, MDefinition* pattern, MDefinition* replacement)
+ : MTernaryInstruction(string, pattern, replacement), isFlatReplacement_(false)
+ {
+ setMovable();
+ setResultType(MIRType::String);
+ }
+
+ public:
+ INSTRUCTION_HEADER(StringReplace)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, string), (1, pattern), (2, replacement))
+
+ void setFlatReplacement() {
+ MOZ_ASSERT(!isFlatReplacement_);
+ isFlatReplacement_ = true;
+ }
+
+ bool isFlatReplacement() const {
+ return isFlatReplacement_;
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isStringReplace())
+ return false;
+ if (isFlatReplacement_ != ins->toStringReplace()->isFlatReplacement())
+ return false;
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ if (isFlatReplacement_) {
+ MOZ_ASSERT(!pattern()->isRegExp());
+ return true;
+ }
+ return false;
+ }
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+class MSubstr
+ : public MTernaryInstruction,
+ public Mix3Policy<StringPolicy<0>, IntPolicy<1>, IntPolicy<2>>::Data
+{
+ private:
+
+ MSubstr(MDefinition* string, MDefinition* begin, MDefinition* length)
+ : MTernaryInstruction(string, begin, length)
+ {
+ setResultType(MIRType::String);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Substr)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, string), (1, begin), (2, length))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+struct LambdaFunctionInfo
+{
+ // The functions used in lambdas are the canonical original function in
+ // the script, and are immutable except for delazification. Record this
+ // information while still on the main thread to avoid races.
+ CompilerFunction fun;
+ uint16_t flags;
+ uint16_t nargs;
+ gc::Cell* scriptOrLazyScript;
+ bool singletonType;
+ bool useSingletonForClone;
+
+ explicit LambdaFunctionInfo(JSFunction* fun)
+ : fun(fun), flags(fun->flags()), nargs(fun->nargs()),
+ scriptOrLazyScript(fun->hasScript()
+ ? (gc::Cell*) fun->nonLazyScript()
+ : (gc::Cell*) fun->lazyScript()),
+ singletonType(fun->isSingleton()),
+ useSingletonForClone(ObjectGroup::useSingletonForClone(fun))
+ {}
+
+ bool appendRoots(MRootList& roots) const {
+ if (!roots.append(fun))
+ return false;
+ if (fun->hasScript())
+ return roots.append(fun->nonLazyScript());
+ return roots.append(fun->lazyScript());
+ }
+
+ private:
+ LambdaFunctionInfo(const LambdaFunctionInfo&) = delete;
+ void operator=(const LambdaFunctionInfo&) = delete;
+};
+
+class MLambda
+ : public MBinaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ const LambdaFunctionInfo info_;
+
+ MLambda(CompilerConstraintList* constraints, MDefinition* envChain, MConstant* cst)
+ : MBinaryInstruction(envChain, cst), info_(&cst->toObject().as<JSFunction>())
+ {
+ setResultType(MIRType::Object);
+ if (!info().fun->isSingleton() && !ObjectGroup::useSingletonForClone(info().fun))
+ setResultTypeSet(MakeSingletonTypeSet(constraints, info().fun));
+ }
+
+ public:
+ INSTRUCTION_HEADER(Lambda)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, environmentChain))
+
+ MConstant* functionOperand() const {
+ return getOperand(1)->toConstant();
+ }
+ const LambdaFunctionInfo& info() const {
+ return info_;
+ }
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+ bool appendRoots(MRootList& roots) const override {
+ return info_.appendRoots(roots);
+ }
+};
+
+class MLambdaArrow
+ : public MBinaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, BoxPolicy<1>>::Data
+{
+ const LambdaFunctionInfo info_;
+
+ MLambdaArrow(CompilerConstraintList* constraints, MDefinition* envChain,
+ MDefinition* newTarget_, JSFunction* fun)
+ : MBinaryInstruction(envChain, newTarget_), info_(fun)
+ {
+ setResultType(MIRType::Object);
+ MOZ_ASSERT(!ObjectGroup::useSingletonForClone(fun));
+ if (!fun->isSingleton())
+ setResultTypeSet(MakeSingletonTypeSet(constraints, fun));
+ }
+
+ public:
+ INSTRUCTION_HEADER(LambdaArrow)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, environmentChain), (1, newTargetDef))
+
+ const LambdaFunctionInfo& info() const {
+ return info_;
+ }
+ bool appendRoots(MRootList& roots) const override {
+ return info_.appendRoots(roots);
+ }
+};
+
+// Returns obj->slots.
+class MSlots
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ explicit MSlots(MDefinition* object)
+ : MUnaryInstruction(object)
+ {
+ setResultType(MIRType::Slots);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Slots)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+
+ ALLOW_CLONE(MSlots)
+};
+
+// Returns obj->elements.
+class MElements
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ bool unboxed_;
+
+ explicit MElements(MDefinition* object, bool unboxed = false)
+ : MUnaryInstruction(object), unboxed_(unboxed)
+ {
+ setResultType(MIRType::Elements);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Elements)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ bool unboxed() const {
+ return unboxed_;
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins) &&
+ ins->toElements()->unboxed() == unboxed();
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+
+ ALLOW_CLONE(MElements)
+};
+
+// A constant value for some object's typed array elements.
+class MConstantElements : public MNullaryInstruction
+{
+ SharedMem<void*> value_;
+
+ protected:
+ explicit MConstantElements(SharedMem<void*> v)
+ : value_(v)
+ {
+ setResultType(MIRType::Elements);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(ConstantElements)
+ TRIVIAL_NEW_WRAPPERS
+
+ SharedMem<void*> value() const {
+ return value_;
+ }
+
+ void printOpcode(GenericPrinter& out) const override;
+
+ HashNumber valueHash() const override {
+ return (HashNumber)(size_t) value_.asValue();
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return ins->isConstantElements() && ins->toConstantElements()->value() == value();
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ ALLOW_CLONE(MConstantElements)
+};
+
+// Passes through an object's elements, after ensuring it is entirely doubles.
+class MConvertElementsToDoubles
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ explicit MConvertElementsToDoubles(MDefinition* elements)
+ : MUnaryInstruction(elements)
+ {
+ setGuard();
+ setMovable();
+ setResultType(MIRType::Elements);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ConvertElementsToDoubles)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ // This instruction can read and write to the elements' contents.
+ // However, it is alright to hoist this from loops which explicitly
+ // read or write to the elements: such reads and writes will use double
+ // values and can be reordered freely wrt this conversion, except that
+ // definite double loads must follow the conversion. The latter
+ // property is ensured by chaining this instruction with the elements
+ // themselves, in the same manner as MBoundsCheck.
+ return AliasSet::None();
+ }
+};
+
+// If |elements| has the CONVERT_DOUBLE_ELEMENTS flag, convert value to
+// double. Else return the original value.
+class MMaybeToDoubleElement
+ : public MBinaryInstruction,
+ public IntPolicy<1>::Data
+{
+ MMaybeToDoubleElement(MDefinition* elements, MDefinition* value)
+ : MBinaryInstruction(elements, value)
+ {
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ setMovable();
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(MaybeToDoubleElement)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, value))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+};
+
+// Passes through an object, after ensuring its elements are not copy on write.
+class MMaybeCopyElementsForWrite
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ bool checkNative_;
+
+ explicit MMaybeCopyElementsForWrite(MDefinition* object, bool checkNative)
+ : MUnaryInstruction(object), checkNative_(checkNative)
+ {
+ setGuard();
+ setMovable();
+ setResultType(MIRType::Object);
+ setResultTypeSet(object->resultTypeSet());
+ }
+
+ public:
+ INSTRUCTION_HEADER(MaybeCopyElementsForWrite)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ bool checkNative() const {
+ return checkNative_;
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins) &&
+ checkNative() == ins->toMaybeCopyElementsForWrite()->checkNative();
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::ObjectFields);
+ }
+#ifdef DEBUG
+ bool needsResumePoint() const override {
+ // This instruction is idempotent and does not change observable
+ // behavior, so does not need its own resume point.
+ return false;
+ }
+#endif
+
+};
+
+// Load the initialized length from an elements header.
+class MInitializedLength
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ explicit MInitializedLength(MDefinition* elements)
+ : MUnaryInstruction(elements)
+ {
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(InitializedLength)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+
+ void computeRange(TempAllocator& alloc) override;
+
+ ALLOW_CLONE(MInitializedLength)
+};
+
+// Store to the initialized length in an elements header. Note the input is an
+// *index*, one less than the desired length.
+class MSetInitializedLength
+ : public MAryInstruction<2>,
+ public NoTypePolicy::Data
+{
+ MSetInitializedLength(MDefinition* elements, MDefinition* index) {
+ initOperand(0, elements);
+ initOperand(1, index);
+ }
+
+ public:
+ INSTRUCTION_HEADER(SetInitializedLength)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::ObjectFields);
+ }
+
+ ALLOW_CLONE(MSetInitializedLength)
+};
+
+// Load the length from an unboxed array.
+class MUnboxedArrayLength
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ explicit MUnboxedArrayLength(MDefinition* object)
+ : MUnaryInstruction(object)
+ {
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(UnboxedArrayLength)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+
+ ALLOW_CLONE(MUnboxedArrayLength)
+};
+
+// Load the initialized length from an unboxed array.
+class MUnboxedArrayInitializedLength
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ explicit MUnboxedArrayInitializedLength(MDefinition* object)
+ : MUnaryInstruction(object)
+ {
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(UnboxedArrayInitializedLength)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+
+ ALLOW_CLONE(MUnboxedArrayInitializedLength)
+};
+
+// Increment the initialized length of an unboxed array object.
+class MIncrementUnboxedArrayInitializedLength
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ explicit MIncrementUnboxedArrayInitializedLength(MDefinition* obj)
+ : MUnaryInstruction(obj)
+ {}
+
+ public:
+ INSTRUCTION_HEADER(IncrementUnboxedArrayInitializedLength)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::ObjectFields);
+ }
+
+ ALLOW_CLONE(MIncrementUnboxedArrayInitializedLength)
+};
+
+// Set the initialized length of an unboxed array object.
+class MSetUnboxedArrayInitializedLength
+ : public MBinaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ explicit MSetUnboxedArrayInitializedLength(MDefinition* obj, MDefinition* length)
+ : MBinaryInstruction(obj, length)
+ {}
+
+ public:
+ INSTRUCTION_HEADER(SetUnboxedArrayInitializedLength)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, length))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::ObjectFields);
+ }
+
+ ALLOW_CLONE(MSetUnboxedArrayInitializedLength)
+};
+
+// Load the array length from an elements header.
+class MArrayLength
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ explicit MArrayLength(MDefinition* elements)
+ : MUnaryInstruction(elements)
+ {
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(ArrayLength)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+
+ void computeRange(TempAllocator& alloc) override;
+
+ ALLOW_CLONE(MArrayLength)
+};
+
+// Store to the length in an elements header. Note the input is an *index*, one
+// less than the desired length.
+class MSetArrayLength
+ : public MAryInstruction<2>,
+ public NoTypePolicy::Data
+{
+ MSetArrayLength(MDefinition* elements, MDefinition* index) {
+ initOperand(0, elements);
+ initOperand(1, index);
+ }
+
+ public:
+ INSTRUCTION_HEADER(SetArrayLength)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::ObjectFields);
+ }
+};
+
+class MGetNextEntryForIterator
+ : public MBinaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1> >::Data
+{
+ public:
+ enum Mode {
+ Map,
+ Set
+ };
+
+ private:
+ Mode mode_;
+
+ explicit MGetNextEntryForIterator(MDefinition* iter, MDefinition* result, Mode mode)
+ : MBinaryInstruction(iter, result), mode_(mode)
+ {
+ setResultType(MIRType::Boolean);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GetNextEntryForIterator)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, iter), (1, result))
+
+ Mode mode() const {
+ return mode_;
+ }
+};
+
+// Read the length of a typed array.
+class MTypedArrayLength
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ explicit MTypedArrayLength(MDefinition* obj)
+ : MUnaryInstruction(obj)
+ {
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(TypedArrayLength)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::TypedArrayLength);
+ }
+
+ void computeRange(TempAllocator& alloc) override;
+};
+
+// Load a typed array's elements vector.
+class MTypedArrayElements
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ explicit MTypedArrayElements(MDefinition* object)
+ : MUnaryInstruction(object)
+ {
+ setResultType(MIRType::Elements);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(TypedArrayElements)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+
+ ALLOW_CLONE(MTypedArrayElements)
+};
+
+class MSetDisjointTypedElements
+ : public MTernaryInstruction,
+ public NoTypePolicy::Data
+{
+ explicit MSetDisjointTypedElements(MDefinition* target, MDefinition* targetOffset,
+ MDefinition* source)
+ : MTernaryInstruction(target, targetOffset, source)
+ {
+ MOZ_ASSERT(target->type() == MIRType::Object);
+ MOZ_ASSERT(targetOffset->type() == MIRType::Int32);
+ MOZ_ASSERT(source->type() == MIRType::Object);
+ setResultType(MIRType::None);
+ }
+
+ public:
+ INSTRUCTION_HEADER(SetDisjointTypedElements)
+ NAMED_OPERANDS((0, target), (1, targetOffset), (2, source))
+
+ static MSetDisjointTypedElements*
+ New(TempAllocator& alloc, MDefinition* target, MDefinition* targetOffset,
+ MDefinition* source)
+ {
+ return new(alloc) MSetDisjointTypedElements(target, targetOffset, source);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::UnboxedElement);
+ }
+
+ ALLOW_CLONE(MSetDisjointTypedElements)
+};
+
+// Load a binary data object's "elements", which is just its opaque
+// binary data space. Eventually this should probably be
+// unified with `MTypedArrayElements`.
+class MTypedObjectElements
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ bool definitelyOutline_;
+
+ private:
+ explicit MTypedObjectElements(MDefinition* object, bool definitelyOutline)
+ : MUnaryInstruction(object),
+ definitelyOutline_(definitelyOutline)
+ {
+ setResultType(MIRType::Elements);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(TypedObjectElements)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ bool definitelyOutline() const {
+ return definitelyOutline_;
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isTypedObjectElements())
+ return false;
+ const MTypedObjectElements* other = ins->toTypedObjectElements();
+ if (other->definitelyOutline() != definitelyOutline())
+ return false;
+ return congruentIfOperandsEqual(other);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+};
+
+// Inlined version of the js::SetTypedObjectOffset() intrinsic.
+class MSetTypedObjectOffset
+ : public MBinaryInstruction,
+ public NoTypePolicy::Data
+{
+ private:
+ MSetTypedObjectOffset(MDefinition* object, MDefinition* offset)
+ : MBinaryInstruction(object, offset)
+ {
+ MOZ_ASSERT(object->type() == MIRType::Object);
+ MOZ_ASSERT(offset->type() == MIRType::Int32);
+ setResultType(MIRType::None);
+ }
+
+ public:
+ INSTRUCTION_HEADER(SetTypedObjectOffset)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, offset))
+
+ AliasSet getAliasSet() const override {
+ // This affects the result of MTypedObjectElements,
+ // which is described as a load of ObjectFields.
+ return AliasSet::Store(AliasSet::ObjectFields);
+ }
+};
+
+class MKeepAliveObject
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ explicit MKeepAliveObject(MDefinition* object)
+ : MUnaryInstruction(object)
+ {
+ setResultType(MIRType::None);
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(KeepAliveObject)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+};
+
+// Perform !-operation
+class MNot
+ : public MUnaryInstruction,
+ public TestPolicy::Data
+{
+ bool operandMightEmulateUndefined_;
+ bool operandIsNeverNaN_;
+
+ explicit MNot(MDefinition* input, CompilerConstraintList* constraints = nullptr)
+ : MUnaryInstruction(input),
+ operandMightEmulateUndefined_(true),
+ operandIsNeverNaN_(false)
+ {
+ setResultType(MIRType::Boolean);
+ setMovable();
+ if (constraints)
+ cacheOperandMightEmulateUndefined(constraints);
+ }
+
+ void cacheOperandMightEmulateUndefined(CompilerConstraintList* constraints);
+
+ public:
+ static MNot* NewInt32(TempAllocator& alloc, MDefinition* input) {
+ MOZ_ASSERT(input->type() == MIRType::Int32 || input->type() == MIRType::Int64);
+ auto* ins = new(alloc) MNot(input);
+ ins->setResultType(MIRType::Int32);
+ return ins;
+ }
+
+ INSTRUCTION_HEADER(Not)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ void markNoOperandEmulatesUndefined() {
+ operandMightEmulateUndefined_ = false;
+ }
+ bool operandMightEmulateUndefined() const {
+ return operandMightEmulateUndefined_;
+ }
+ bool operandIsNeverNaN() const {
+ return operandIsNeverNaN_;
+ }
+
+ virtual AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ void collectRangeInfoPreTrunc() override;
+
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+ bool isFloat32Commutative() const override { return true; }
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override {
+ return true;
+ }
+#endif
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+};
+
+// Bailout if index + minimum < 0 or index + maximum >= length. The length used
+// in a bounds check must not be negative, or the wrong result may be computed
+// (unsigned comparisons may be used).
+class MBoundsCheck
+ : public MBinaryInstruction,
+ public MixPolicy<IntPolicy<0>, IntPolicy<1>>::Data
+{
+ // Range over which to perform the bounds check, may be modified by GVN.
+ int32_t minimum_;
+ int32_t maximum_;
+ bool fallible_;
+
+ MBoundsCheck(MDefinition* index, MDefinition* length)
+ : MBinaryInstruction(index, length), minimum_(0), maximum_(0), fallible_(true)
+ {
+ setGuard();
+ setMovable();
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ MOZ_ASSERT(length->type() == MIRType::Int32);
+
+ // Returns the checked index.
+ setResultType(MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(BoundsCheck)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, index), (1, length))
+
+ int32_t minimum() const {
+ return minimum_;
+ }
+ void setMinimum(int32_t n) {
+ MOZ_ASSERT(fallible_);
+ minimum_ = n;
+ }
+ int32_t maximum() const {
+ return maximum_;
+ }
+ void setMaximum(int32_t n) {
+ MOZ_ASSERT(fallible_);
+ maximum_ = n;
+ }
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isBoundsCheck())
+ return false;
+ const MBoundsCheck* other = ins->toBoundsCheck();
+ if (minimum() != other->minimum() || maximum() != other->maximum())
+ return false;
+ if (fallible() != other->fallible())
+ return false;
+ return congruentIfOperandsEqual(other);
+ }
+ virtual AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ void computeRange(TempAllocator& alloc) override;
+ bool fallible() const {
+ return fallible_;
+ }
+ void collectRangeInfoPreTrunc() override;
+
+ ALLOW_CLONE(MBoundsCheck)
+};
+
+// Bailout if index < minimum.
+class MBoundsCheckLower
+ : public MUnaryInstruction,
+ public IntPolicy<0>::Data
+{
+ int32_t minimum_;
+ bool fallible_;
+
+ explicit MBoundsCheckLower(MDefinition* index)
+ : MUnaryInstruction(index), minimum_(0), fallible_(true)
+ {
+ setGuard();
+ setMovable();
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(BoundsCheckLower)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, index))
+
+ int32_t minimum() const {
+ return minimum_;
+ }
+ void setMinimum(int32_t n) {
+ minimum_ = n;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool fallible() const {
+ return fallible_;
+ }
+ void collectRangeInfoPreTrunc() override;
+};
+
+// Instructions which access an object's elements can either do so on a
+// definition accessing that elements pointer, or on the object itself, if its
+// elements are inline. In the latter case there must be an offset associated
+// with the access.
+static inline bool
+IsValidElementsType(MDefinition* elements, int32_t offsetAdjustment)
+{
+ return elements->type() == MIRType::Elements ||
+ (elements->type() == MIRType::Object && offsetAdjustment != 0);
+}
+
+// Load a value from a dense array's element vector and does a hole check if the
+// array is not known to be packed.
+class MLoadElement
+ : public MBinaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ bool needsHoleCheck_;
+ bool loadDoubles_;
+ int32_t offsetAdjustment_;
+
+ MLoadElement(MDefinition* elements, MDefinition* index,
+ bool needsHoleCheck, bool loadDoubles, int32_t offsetAdjustment = 0)
+ : MBinaryInstruction(elements, index),
+ needsHoleCheck_(needsHoleCheck),
+ loadDoubles_(loadDoubles),
+ offsetAdjustment_(offsetAdjustment)
+ {
+ if (needsHoleCheck) {
+ // Uses may be optimized away based on this instruction's result
+ // type. This means it's invalid to DCE this instruction, as we
+ // have to invalidate when we read a hole.
+ setGuard();
+ }
+ setResultType(MIRType::Value);
+ setMovable();
+ MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadElement)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index))
+
+ bool needsHoleCheck() const {
+ return needsHoleCheck_;
+ }
+ bool loadDoubles() const {
+ return loadDoubles_;
+ }
+ int32_t offsetAdjustment() const {
+ return offsetAdjustment_;
+ }
+ bool fallible() const {
+ return needsHoleCheck();
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isLoadElement())
+ return false;
+ const MLoadElement* other = ins->toLoadElement();
+ if (needsHoleCheck() != other->needsHoleCheck())
+ return false;
+ if (loadDoubles() != other->loadDoubles())
+ return false;
+ if (offsetAdjustment() != other->offsetAdjustment())
+ return false;
+ return congruentIfOperandsEqual(other);
+ }
+ AliasType mightAlias(const MDefinition* store) const override;
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::Element);
+ }
+
+ ALLOW_CLONE(MLoadElement)
+};
+
+// Load a value from the elements vector for a dense native or unboxed array.
+// If the index is out-of-bounds, or the indexed slot has a hole, undefined is
+// returned instead.
+class MLoadElementHole
+ : public MTernaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ // Unboxed element type, JSVAL_TYPE_MAGIC for dense native elements.
+ JSValueType unboxedType_;
+
+ bool needsNegativeIntCheck_;
+ bool needsHoleCheck_;
+
+ MLoadElementHole(MDefinition* elements, MDefinition* index, MDefinition* initLength,
+ JSValueType unboxedType, bool needsHoleCheck)
+ : MTernaryInstruction(elements, index, initLength),
+ unboxedType_(unboxedType),
+ needsNegativeIntCheck_(true),
+ needsHoleCheck_(needsHoleCheck)
+ {
+ setResultType(MIRType::Value);
+ setMovable();
+
+ // Set the guard flag to make sure we bail when we see a negative
+ // index. We can clear this flag (and needsNegativeIntCheck_) in
+ // collectRangeInfoPreTrunc.
+ setGuard();
+
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ MOZ_ASSERT(initLength->type() == MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadElementHole)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index), (2, initLength))
+
+ JSValueType unboxedType() const {
+ return unboxedType_;
+ }
+ bool needsNegativeIntCheck() const {
+ return needsNegativeIntCheck_;
+ }
+ bool needsHoleCheck() const {
+ return needsHoleCheck_;
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isLoadElementHole())
+ return false;
+ const MLoadElementHole* other = ins->toLoadElementHole();
+ if (unboxedType() != other->unboxedType())
+ return false;
+ if (needsHoleCheck() != other->needsHoleCheck())
+ return false;
+ if (needsNegativeIntCheck() != other->needsNegativeIntCheck())
+ return false;
+ return congruentIfOperandsEqual(other);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::BoxedOrUnboxedElements(unboxedType()));
+ }
+ void collectRangeInfoPreTrunc() override;
+
+ ALLOW_CLONE(MLoadElementHole)
+};
+
+class MLoadUnboxedObjectOrNull
+ : public MBinaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ public:
+ enum NullBehavior {
+ HandleNull,
+ BailOnNull,
+ NullNotPossible
+ };
+
+ private:
+ NullBehavior nullBehavior_;
+ int32_t offsetAdjustment_;
+
+ MLoadUnboxedObjectOrNull(MDefinition* elements, MDefinition* index,
+ NullBehavior nullBehavior, int32_t offsetAdjustment)
+ : MBinaryInstruction(elements, index),
+ nullBehavior_(nullBehavior),
+ offsetAdjustment_(offsetAdjustment)
+ {
+ if (nullBehavior == BailOnNull) {
+ // Don't eliminate loads which bail out on a null pointer, for the
+ // same reason as MLoadElement.
+ setGuard();
+ }
+ setResultType(nullBehavior == HandleNull ? MIRType::Value : MIRType::Object);
+ setMovable();
+ MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadUnboxedObjectOrNull)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index))
+
+ NullBehavior nullBehavior() const {
+ return nullBehavior_;
+ }
+ int32_t offsetAdjustment() const {
+ return offsetAdjustment_;
+ }
+ bool fallible() const {
+ return nullBehavior() == BailOnNull;
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isLoadUnboxedObjectOrNull())
+ return false;
+ const MLoadUnboxedObjectOrNull* other = ins->toLoadUnboxedObjectOrNull();
+ if (nullBehavior() != other->nullBehavior())
+ return false;
+ if (offsetAdjustment() != other->offsetAdjustment())
+ return false;
+ return congruentIfOperandsEqual(other);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::UnboxedElement);
+ }
+ AliasType mightAlias(const MDefinition* store) const override;
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ ALLOW_CLONE(MLoadUnboxedObjectOrNull)
+};
+
+class MLoadUnboxedString
+ : public MBinaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ int32_t offsetAdjustment_;
+
+ MLoadUnboxedString(MDefinition* elements, MDefinition* index, int32_t offsetAdjustment = 0)
+ : MBinaryInstruction(elements, index),
+ offsetAdjustment_(offsetAdjustment)
+ {
+ setResultType(MIRType::String);
+ setMovable();
+ MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadUnboxedString)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index))
+
+ int32_t offsetAdjustment() const {
+ return offsetAdjustment_;
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isLoadUnboxedString())
+ return false;
+ const MLoadUnboxedString* other = ins->toLoadUnboxedString();
+ if (offsetAdjustment() != other->offsetAdjustment())
+ return false;
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::UnboxedElement);
+ }
+
+ ALLOW_CLONE(MLoadUnboxedString)
+};
+
+class MStoreElementCommon
+{
+ MIRType elementType_;
+ bool needsBarrier_;
+
+ protected:
+ MStoreElementCommon()
+ : elementType_(MIRType::Value),
+ needsBarrier_(false)
+ { }
+
+ public:
+ MIRType elementType() const {
+ return elementType_;
+ }
+ void setElementType(MIRType elementType) {
+ MOZ_ASSERT(elementType != MIRType::None);
+ elementType_ = elementType;
+ }
+ bool needsBarrier() const {
+ return needsBarrier_;
+ }
+ void setNeedsBarrier() {
+ needsBarrier_ = true;
+ }
+};
+
+// Store a value to a dense array slots vector.
+class MStoreElement
+ : public MAryInstruction<3>,
+ public MStoreElementCommon,
+ public MixPolicy<SingleObjectPolicy, NoFloatPolicy<2> >::Data
+{
+ bool needsHoleCheck_;
+ int32_t offsetAdjustment_;
+
+ MStoreElement(MDefinition* elements, MDefinition* index, MDefinition* value,
+ bool needsHoleCheck, int32_t offsetAdjustment = 0)
+ {
+ initOperand(0, elements);
+ initOperand(1, index);
+ initOperand(2, value);
+ needsHoleCheck_ = needsHoleCheck;
+ offsetAdjustment_ = offsetAdjustment;
+ MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(StoreElement)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index), (2, value))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::Element);
+ }
+ bool needsHoleCheck() const {
+ return needsHoleCheck_;
+ }
+ int32_t offsetAdjustment() const {
+ return offsetAdjustment_;
+ }
+ bool fallible() const {
+ return needsHoleCheck();
+ }
+
+ ALLOW_CLONE(MStoreElement)
+};
+
+// Like MStoreElement, but supports indexes >= initialized length, and can
+// handle unboxed arrays. The downside is that we cannot hoist the elements
+// vector and bounds check, since this instruction may update the (initialized)
+// length and reallocate the elements vector.
+class MStoreElementHole
+ : public MAryInstruction<4>,
+ public MStoreElementCommon,
+ public MixPolicy<SingleObjectPolicy, NoFloatPolicy<3> >::Data
+{
+ JSValueType unboxedType_;
+
+ MStoreElementHole(MDefinition* object, MDefinition* elements,
+ MDefinition* index, MDefinition* value, JSValueType unboxedType)
+ : unboxedType_(unboxedType)
+ {
+ initOperand(0, object);
+ initOperand(1, elements);
+ initOperand(2, index);
+ initOperand(3, value);
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(StoreElementHole)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, elements), (2, index), (3, value))
+
+ JSValueType unboxedType() const {
+ return unboxedType_;
+ }
+ AliasSet getAliasSet() const override {
+ // StoreElementHole can update the initialized length, the array length
+ // or reallocate obj->elements.
+ return AliasSet::Store(AliasSet::ObjectFields |
+ AliasSet::BoxedOrUnboxedElements(unboxedType()));
+ }
+
+ ALLOW_CLONE(MStoreElementHole)
+};
+
+// Try to store a value to a dense array slots vector. May fail due to the object being frozen.
+// Cannot be used on an object that has extra indexed properties.
+class MFallibleStoreElement
+ : public MAryInstruction<4>,
+ public MStoreElementCommon,
+ public MixPolicy<SingleObjectPolicy, NoFloatPolicy<3> >::Data
+{
+ JSValueType unboxedType_;
+ bool strict_;
+
+ MFallibleStoreElement(MDefinition* object, MDefinition* elements,
+ MDefinition* index, MDefinition* value,
+ JSValueType unboxedType, bool strict)
+ : unboxedType_(unboxedType)
+ {
+ initOperand(0, object);
+ initOperand(1, elements);
+ initOperand(2, index);
+ initOperand(3, value);
+ strict_ = strict;
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(FallibleStoreElement)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, elements), (2, index), (3, value))
+
+ JSValueType unboxedType() const {
+ return unboxedType_;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::ObjectFields |
+ AliasSet::BoxedOrUnboxedElements(unboxedType()));
+ }
+ bool strict() const {
+ return strict_;
+ }
+
+ ALLOW_CLONE(MFallibleStoreElement)
+};
+
+
+// Store an unboxed object or null pointer to a v\ector.
+class MStoreUnboxedObjectOrNull
+ : public MAryInstruction<4>,
+ public StoreUnboxedObjectOrNullPolicy::Data
+{
+ int32_t offsetAdjustment_;
+ bool preBarrier_;
+
+ MStoreUnboxedObjectOrNull(MDefinition* elements, MDefinition* index,
+ MDefinition* value, MDefinition* typedObj,
+ int32_t offsetAdjustment = 0, bool preBarrier = true)
+ : offsetAdjustment_(offsetAdjustment), preBarrier_(preBarrier)
+ {
+ initOperand(0, elements);
+ initOperand(1, index);
+ initOperand(2, value);
+ initOperand(3, typedObj);
+ MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ MOZ_ASSERT(typedObj->type() == MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(StoreUnboxedObjectOrNull)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index), (2, value), (3, typedObj))
+
+ int32_t offsetAdjustment() const {
+ return offsetAdjustment_;
+ }
+ bool preBarrier() const {
+ return preBarrier_;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::UnboxedElement);
+ }
+
+ // For StoreUnboxedObjectOrNullPolicy.
+ void setValue(MDefinition* def) {
+ replaceOperand(2, def);
+ }
+
+ ALLOW_CLONE(MStoreUnboxedObjectOrNull)
+};
+
+// Store an unboxed object or null pointer to a vector.
+class MStoreUnboxedString
+ : public MAryInstruction<3>,
+ public MixPolicy<SingleObjectPolicy, ConvertToStringPolicy<2> >::Data
+{
+ int32_t offsetAdjustment_;
+ bool preBarrier_;
+
+ MStoreUnboxedString(MDefinition* elements, MDefinition* index, MDefinition* value,
+ int32_t offsetAdjustment = 0, bool preBarrier = true)
+ : offsetAdjustment_(offsetAdjustment), preBarrier_(preBarrier)
+ {
+ initOperand(0, elements);
+ initOperand(1, index);
+ initOperand(2, value);
+ MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(StoreUnboxedString)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index), (2, value))
+
+ int32_t offsetAdjustment() const {
+ return offsetAdjustment_;
+ }
+ bool preBarrier() const {
+ return preBarrier_;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::UnboxedElement);
+ }
+
+ ALLOW_CLONE(MStoreUnboxedString)
+};
+
+// Passes through an object, after ensuring it is converted from an unboxed
+// object to a native representation.
+class MConvertUnboxedObjectToNative
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ CompilerObjectGroup group_;
+
+ explicit MConvertUnboxedObjectToNative(MDefinition* obj, ObjectGroup* group)
+ : MUnaryInstruction(obj),
+ group_(group)
+ {
+ setGuard();
+ setMovable();
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ConvertUnboxedObjectToNative)
+ NAMED_OPERANDS((0, object))
+
+ static MConvertUnboxedObjectToNative* New(TempAllocator& alloc, MDefinition* obj,
+ ObjectGroup* group);
+
+ ObjectGroup* group() const {
+ return group_;
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!congruentIfOperandsEqual(ins))
+ return false;
+ return ins->toConvertUnboxedObjectToNative()->group() == group();
+ }
+ AliasSet getAliasSet() const override {
+ // This instruction can read and write to all parts of the object, but
+ // is marked as non-effectful so it can be consolidated by LICM and GVN
+ // and avoid inhibiting other optimizations.
+ //
+ // This is valid to do because when unboxed objects might have a native
+ // group they can be converted to, we do not optimize accesses to the
+ // unboxed objects and do not guard on their group or shape (other than
+ // in this opcode).
+ //
+ // Later accesses can assume the object has a native representation
+ // and optimize accordingly. Those accesses cannot be reordered before
+ // this instruction, however. This is prevented by chaining this
+ // instruction with the object itself, in the same way as MBoundsCheck.
+ return AliasSet::None();
+ }
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(group_);
+ }
+};
+
+// Array.prototype.pop or Array.prototype.shift on a dense array.
+class MArrayPopShift
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ public:
+ enum Mode {
+ Pop,
+ Shift
+ };
+
+ private:
+ Mode mode_;
+ JSValueType unboxedType_;
+ bool needsHoleCheck_;
+ bool maybeUndefined_;
+
+ MArrayPopShift(MDefinition* object, Mode mode, JSValueType unboxedType,
+ bool needsHoleCheck, bool maybeUndefined)
+ : MUnaryInstruction(object), mode_(mode), unboxedType_(unboxedType),
+ needsHoleCheck_(needsHoleCheck), maybeUndefined_(maybeUndefined)
+ { }
+
+ public:
+ INSTRUCTION_HEADER(ArrayPopShift)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ bool needsHoleCheck() const {
+ return needsHoleCheck_;
+ }
+ bool maybeUndefined() const {
+ return maybeUndefined_;
+ }
+ bool mode() const {
+ return mode_;
+ }
+ JSValueType unboxedType() const {
+ return unboxedType_;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::ObjectFields |
+ AliasSet::BoxedOrUnboxedElements(unboxedType()));
+ }
+
+ ALLOW_CLONE(MArrayPopShift)
+};
+
+// Array.prototype.push on a dense array. Returns the new array length.
+class MArrayPush
+ : public MBinaryInstruction,
+ public MixPolicy<SingleObjectPolicy, NoFloatPolicy<1> >::Data
+{
+ JSValueType unboxedType_;
+
+ MArrayPush(MDefinition* object, MDefinition* value, JSValueType unboxedType)
+ : MBinaryInstruction(object, value), unboxedType_(unboxedType)
+ {
+ setResultType(MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ArrayPush)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, value))
+
+ JSValueType unboxedType() const {
+ return unboxedType_;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::ObjectFields |
+ AliasSet::BoxedOrUnboxedElements(unboxedType()));
+ }
+ void computeRange(TempAllocator& alloc) override;
+
+ ALLOW_CLONE(MArrayPush)
+};
+
+// Array.prototype.slice on a dense array.
+class MArraySlice
+ : public MTernaryInstruction,
+ public Mix3Policy<ObjectPolicy<0>, IntPolicy<1>, IntPolicy<2>>::Data
+{
+ CompilerObject templateObj_;
+ gc::InitialHeap initialHeap_;
+ JSValueType unboxedType_;
+
+ MArraySlice(CompilerConstraintList* constraints, MDefinition* obj,
+ MDefinition* begin, MDefinition* end,
+ JSObject* templateObj, gc::InitialHeap initialHeap, JSValueType unboxedType)
+ : MTernaryInstruction(obj, begin, end),
+ templateObj_(templateObj),
+ initialHeap_(initialHeap),
+ unboxedType_(unboxedType)
+ {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ArraySlice)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, begin), (2, end))
+
+ JSObject* templateObj() const {
+ return templateObj_;
+ }
+
+ gc::InitialHeap initialHeap() const {
+ return initialHeap_;
+ }
+
+ JSValueType unboxedType() const {
+ return unboxedType_;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::BoxedOrUnboxedElements(unboxedType()) |
+ AliasSet::ObjectFields);
+ }
+ bool possiblyCalls() const override {
+ return true;
+ }
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(templateObj_);
+ }
+};
+
+class MArrayJoin
+ : public MBinaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, StringPolicy<1> >::Data
+{
+ MArrayJoin(MDefinition* array, MDefinition* sep)
+ : MBinaryInstruction(array, sep)
+ {
+ setResultType(MIRType::String);
+ }
+ public:
+ INSTRUCTION_HEADER(ArrayJoin)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, array), (1, sep))
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+ virtual AliasSet getAliasSet() const override {
+ // Array.join might coerce the elements of the Array to strings. This
+ // coercion might cause the evaluation of the some JavaScript code.
+ return AliasSet::Store(AliasSet::Any);
+ }
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+};
+
+// All barriered operations - MCompareExchangeTypedArrayElement,
+// MExchangeTypedArrayElement, and MAtomicTypedArrayElementBinop, as
+// well as MLoadUnboxedScalar and MStoreUnboxedScalar when they are
+// marked as requiring a memory barrer - have the following
+// attributes:
+//
+// - Not movable
+// - Not removable
+// - Not congruent with any other instruction
+// - Effectful (they alias every TypedArray store)
+//
+// The intended effect of those constraints is to prevent all loads
+// and stores preceding the barriered operation from being moved to
+// after the barriered operation, and vice versa, and to prevent the
+// barriered operation from being removed or hoisted.
+
+enum MemoryBarrierRequirement
+{
+ DoesNotRequireMemoryBarrier,
+ DoesRequireMemoryBarrier
+};
+
+// Also see comments at MMemoryBarrierRequirement, above.
+
+// Load an unboxed scalar value from a typed array or other object.
+class MLoadUnboxedScalar
+ : public MBinaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ Scalar::Type storageType_;
+ Scalar::Type readType_;
+ unsigned numElems_; // used only for SIMD
+ bool requiresBarrier_;
+ int32_t offsetAdjustment_;
+ bool canonicalizeDoubles_;
+
+ MLoadUnboxedScalar(MDefinition* elements, MDefinition* index, Scalar::Type storageType,
+ MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier,
+ int32_t offsetAdjustment = 0, bool canonicalizeDoubles = true)
+ : MBinaryInstruction(elements, index),
+ storageType_(storageType),
+ readType_(storageType),
+ numElems_(1),
+ requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier),
+ offsetAdjustment_(offsetAdjustment),
+ canonicalizeDoubles_(canonicalizeDoubles)
+ {
+ setResultType(MIRType::Value);
+ if (requiresBarrier_)
+ setGuard(); // Not removable or movable
+ else
+ setMovable();
+ MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ MOZ_ASSERT(storageType >= 0 && storageType < Scalar::MaxTypedArrayViewType);
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadUnboxedScalar)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index))
+
+ void setSimdRead(Scalar::Type type, unsigned numElems) {
+ readType_ = type;
+ numElems_ = numElems;
+ }
+ unsigned numElems() const {
+ return numElems_;
+ }
+ Scalar::Type readType() const {
+ return readType_;
+ }
+
+ Scalar::Type storageType() const {
+ return storageType_;
+ }
+ bool fallible() const {
+ // Bailout if the result does not fit in an int32.
+ return readType_ == Scalar::Uint32 && type() == MIRType::Int32;
+ }
+ bool requiresMemoryBarrier() const {
+ return requiresBarrier_;
+ }
+ bool canonicalizeDoubles() const {
+ return canonicalizeDoubles_;
+ }
+ int32_t offsetAdjustment() const {
+ return offsetAdjustment_;
+ }
+ void setOffsetAdjustment(int32_t offsetAdjustment) {
+ offsetAdjustment_ = offsetAdjustment;
+ }
+ AliasSet getAliasSet() const override {
+ // When a barrier is needed make the instruction effectful by
+ // giving it a "store" effect.
+ if (requiresBarrier_)
+ return AliasSet::Store(AliasSet::UnboxedElement);
+ return AliasSet::Load(AliasSet::UnboxedElement);
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (requiresBarrier_)
+ return false;
+ if (!ins->isLoadUnboxedScalar())
+ return false;
+ const MLoadUnboxedScalar* other = ins->toLoadUnboxedScalar();
+ if (storageType_ != other->storageType_)
+ return false;
+ if (readType_ != other->readType_)
+ return false;
+ if (numElems_ != other->numElems_)
+ return false;
+ if (offsetAdjustment() != other->offsetAdjustment())
+ return false;
+ if (canonicalizeDoubles() != other->canonicalizeDoubles())
+ return false;
+ return congruentIfOperandsEqual(other);
+ }
+
+ void printOpcode(GenericPrinter& out) const override;
+
+ void computeRange(TempAllocator& alloc) override;
+
+ bool canProduceFloat32() const override { return storageType_ == Scalar::Float32; }
+
+ ALLOW_CLONE(MLoadUnboxedScalar)
+};
+
+// Load a value from a typed array. Out-of-bounds accesses are handled in-line.
+class MLoadTypedArrayElementHole
+ : public MBinaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ Scalar::Type arrayType_;
+ bool allowDouble_;
+
+ MLoadTypedArrayElementHole(MDefinition* object, MDefinition* index, Scalar::Type arrayType, bool allowDouble)
+ : MBinaryInstruction(object, index), arrayType_(arrayType), allowDouble_(allowDouble)
+ {
+ setResultType(MIRType::Value);
+ setMovable();
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ MOZ_ASSERT(arrayType >= 0 && arrayType < Scalar::MaxTypedArrayViewType);
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadTypedArrayElementHole)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, index))
+
+ Scalar::Type arrayType() const {
+ return arrayType_;
+ }
+ bool allowDouble() const {
+ return allowDouble_;
+ }
+ bool fallible() const {
+ return arrayType_ == Scalar::Uint32 && !allowDouble_;
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isLoadTypedArrayElementHole())
+ return false;
+ const MLoadTypedArrayElementHole* other = ins->toLoadTypedArrayElementHole();
+ if (arrayType() != other->arrayType())
+ return false;
+ if (allowDouble() != other->allowDouble())
+ return false;
+ return congruentIfOperandsEqual(other);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::UnboxedElement);
+ }
+ bool canProduceFloat32() const override { return arrayType_ == Scalar::Float32; }
+
+ ALLOW_CLONE(MLoadTypedArrayElementHole)
+};
+
+// Load a value fallibly or infallibly from a statically known typed array.
+class MLoadTypedArrayElementStatic
+ : public MUnaryInstruction,
+ public ConvertToInt32Policy<0>::Data
+{
+ MLoadTypedArrayElementStatic(JSObject* someTypedArray, MDefinition* ptr,
+ int32_t offset = 0, bool needsBoundsCheck = true)
+ : MUnaryInstruction(ptr), someTypedArray_(someTypedArray), offset_(offset),
+ needsBoundsCheck_(needsBoundsCheck), fallible_(true)
+ {
+ int type = accessType();
+ if (type == Scalar::Float32)
+ setResultType(MIRType::Float32);
+ else if (type == Scalar::Float64)
+ setResultType(MIRType::Double);
+ else
+ setResultType(MIRType::Int32);
+ }
+
+ CompilerObject someTypedArray_;
+
+ // An offset to be encoded in the load instruction - taking advantage of the
+ // addressing modes. This is only non-zero when the access is proven to be
+ // within bounds.
+ int32_t offset_;
+ bool needsBoundsCheck_;
+ bool fallible_;
+
+ public:
+ INSTRUCTION_HEADER(LoadTypedArrayElementStatic)
+ TRIVIAL_NEW_WRAPPERS
+
+ Scalar::Type accessType() const {
+ return someTypedArray_->as<TypedArrayObject>().type();
+ }
+ SharedMem<void*> base() const;
+ size_t length() const;
+
+ MDefinition* ptr() const { return getOperand(0); }
+ int32_t offset() const { return offset_; }
+ void setOffset(int32_t offset) { offset_ = offset; }
+ bool congruentTo(const MDefinition* ins) const override;
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::UnboxedElement);
+ }
+
+ bool needsBoundsCheck() const { return needsBoundsCheck_; }
+ void setNeedsBoundsCheck(bool v) { needsBoundsCheck_ = v; }
+
+ bool fallible() const {
+ return fallible_;
+ }
+
+ void setInfallible() {
+ fallible_ = false;
+ }
+
+ void computeRange(TempAllocator& alloc) override;
+ bool needTruncation(TruncateKind kind) override;
+ bool canProduceFloat32() const override { return accessType() == Scalar::Float32; }
+ void collectRangeInfoPreTrunc() override;
+
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(someTypedArray_);
+ }
+};
+
+// Base class for MIR ops that write unboxed scalar values.
+class StoreUnboxedScalarBase
+{
+ Scalar::Type writeType_;
+
+ protected:
+ explicit StoreUnboxedScalarBase(Scalar::Type writeType)
+ : writeType_(writeType)
+ {
+ MOZ_ASSERT(isIntegerWrite() || isFloatWrite() || isSimdWrite());
+ }
+
+ public:
+ void setWriteType(Scalar::Type type) {
+ writeType_ = type;
+ }
+ Scalar::Type writeType() const {
+ return writeType_;
+ }
+ bool isByteWrite() const {
+ return writeType_ == Scalar::Int8 ||
+ writeType_ == Scalar::Uint8 ||
+ writeType_ == Scalar::Uint8Clamped;
+ }
+ bool isIntegerWrite() const {
+ return isByteWrite () ||
+ writeType_ == Scalar::Int16 ||
+ writeType_ == Scalar::Uint16 ||
+ writeType_ == Scalar::Int32 ||
+ writeType_ == Scalar::Uint32;
+ }
+ bool isFloatWrite() const {
+ return writeType_ == Scalar::Float32 ||
+ writeType_ == Scalar::Float64;
+ }
+ bool isSimdWrite() const {
+ return Scalar::isSimdType(writeType());
+ }
+};
+
+// Store an unboxed scalar value to a typed array or other object.
+class MStoreUnboxedScalar
+ : public MTernaryInstruction,
+ public StoreUnboxedScalarBase,
+ public StoreUnboxedScalarPolicy::Data
+{
+ public:
+ enum TruncateInputKind {
+ DontTruncateInput,
+ TruncateInput
+ };
+
+ private:
+ Scalar::Type storageType_;
+
+ // Whether this store truncates out of range inputs, for use by range analysis.
+ TruncateInputKind truncateInput_;
+
+ bool requiresBarrier_;
+ int32_t offsetAdjustment_;
+ unsigned numElems_; // used only for SIMD
+
+ MStoreUnboxedScalar(MDefinition* elements, MDefinition* index, MDefinition* value,
+ Scalar::Type storageType, TruncateInputKind truncateInput,
+ MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier,
+ int32_t offsetAdjustment = 0)
+ : MTernaryInstruction(elements, index, value),
+ StoreUnboxedScalarBase(storageType),
+ storageType_(storageType),
+ truncateInput_(truncateInput),
+ requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier),
+ offsetAdjustment_(offsetAdjustment),
+ numElems_(1)
+ {
+ if (requiresBarrier_)
+ setGuard(); // Not removable or movable
+ else
+ setMovable();
+ MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ MOZ_ASSERT(storageType >= 0 && storageType < Scalar::MaxTypedArrayViewType);
+ }
+
+ public:
+ INSTRUCTION_HEADER(StoreUnboxedScalar)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index), (2, value))
+
+ void setSimdWrite(Scalar::Type writeType, unsigned numElems) {
+ MOZ_ASSERT(Scalar::isSimdType(writeType));
+ setWriteType(writeType);
+ numElems_ = numElems;
+ }
+ unsigned numElems() const {
+ return numElems_;
+ }
+ Scalar::Type storageType() const {
+ return storageType_;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::UnboxedElement);
+ }
+ TruncateInputKind truncateInput() const {
+ return truncateInput_;
+ }
+ bool requiresMemoryBarrier() const {
+ return requiresBarrier_;
+ }
+ int32_t offsetAdjustment() const {
+ return offsetAdjustment_;
+ }
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+ bool canConsumeFloat32(MUse* use) const override {
+ return use == getUseFor(2) && writeType() == Scalar::Float32;
+ }
+
+ ALLOW_CLONE(MStoreUnboxedScalar)
+};
+
+class MStoreTypedArrayElementHole
+ : public MAryInstruction<4>,
+ public StoreUnboxedScalarBase,
+ public StoreTypedArrayHolePolicy::Data
+{
+ MStoreTypedArrayElementHole(MDefinition* elements, MDefinition* length, MDefinition* index,
+ MDefinition* value, Scalar::Type arrayType)
+ : MAryInstruction<4>(),
+ StoreUnboxedScalarBase(arrayType)
+ {
+ initOperand(0, elements);
+ initOperand(1, length);
+ initOperand(2, index);
+ initOperand(3, value);
+ setMovable();
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(length->type() == MIRType::Int32);
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ MOZ_ASSERT(arrayType >= 0 && arrayType < Scalar::MaxTypedArrayViewType);
+ }
+
+ public:
+ INSTRUCTION_HEADER(StoreTypedArrayElementHole)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, length), (2, index), (3, value))
+
+ Scalar::Type arrayType() const {
+ MOZ_ASSERT(!Scalar::isSimdType(writeType()),
+ "arrayType == writeType iff the write type isn't SIMD");
+ return writeType();
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::UnboxedElement);
+ }
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+ bool canConsumeFloat32(MUse* use) const override {
+ return use == getUseFor(3) && arrayType() == Scalar::Float32;
+ }
+
+ ALLOW_CLONE(MStoreTypedArrayElementHole)
+};
+
+// Store a value infallibly to a statically known typed array.
+class MStoreTypedArrayElementStatic :
+ public MBinaryInstruction,
+ public StoreUnboxedScalarBase,
+ public StoreTypedArrayElementStaticPolicy::Data
+{
+ MStoreTypedArrayElementStatic(JSObject* someTypedArray, MDefinition* ptr, MDefinition* v,
+ int32_t offset = 0, bool needsBoundsCheck = true)
+ : MBinaryInstruction(ptr, v),
+ StoreUnboxedScalarBase(someTypedArray->as<TypedArrayObject>().type()),
+ someTypedArray_(someTypedArray),
+ offset_(offset), needsBoundsCheck_(needsBoundsCheck)
+ {}
+
+ CompilerObject someTypedArray_;
+
+ // An offset to be encoded in the store instruction - taking advantage of the
+ // addressing modes. This is only non-zero when the access is proven to be
+ // within bounds.
+ int32_t offset_;
+ bool needsBoundsCheck_;
+
+ public:
+ INSTRUCTION_HEADER(StoreTypedArrayElementStatic)
+ TRIVIAL_NEW_WRAPPERS
+
+ Scalar::Type accessType() const {
+ return writeType();
+ }
+
+ SharedMem<void*> base() const;
+ size_t length() const;
+
+ MDefinition* ptr() const { return getOperand(0); }
+ MDefinition* value() const { return getOperand(1); }
+ bool needsBoundsCheck() const { return needsBoundsCheck_; }
+ void setNeedsBoundsCheck(bool v) { needsBoundsCheck_ = v; }
+ int32_t offset() const { return offset_; }
+ void setOffset(int32_t offset) { offset_ = offset; }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::UnboxedElement);
+ }
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+ bool canConsumeFloat32(MUse* use) const override {
+ return use == getUseFor(1) && accessType() == Scalar::Float32;
+ }
+ void collectRangeInfoPreTrunc() override;
+
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(someTypedArray_);
+ }
+};
+
+// Compute an "effective address", i.e., a compound computation of the form:
+// base + index * scale + displacement
+class MEffectiveAddress
+ : public MBinaryInstruction,
+ public NoTypePolicy::Data
+{
+ MEffectiveAddress(MDefinition* base, MDefinition* index, Scale scale, int32_t displacement)
+ : MBinaryInstruction(base, index), scale_(scale), displacement_(displacement)
+ {
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ setMovable();
+ setResultType(MIRType::Int32);
+ }
+
+ Scale scale_;
+ int32_t displacement_;
+
+ public:
+ INSTRUCTION_HEADER(EffectiveAddress)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* base() const {
+ return lhs();
+ }
+ MDefinition* index() const {
+ return rhs();
+ }
+ Scale scale() const {
+ return scale_;
+ }
+ int32_t displacement() const {
+ return displacement_;
+ }
+
+ ALLOW_CLONE(MEffectiveAddress)
+};
+
+// Clamp input to range [0, 255] for Uint8ClampedArray.
+class MClampToUint8
+ : public MUnaryInstruction,
+ public ClampPolicy::Data
+{
+ explicit MClampToUint8(MDefinition* input)
+ : MUnaryInstruction(input)
+ {
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(ClampToUint8)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ void computeRange(TempAllocator& alloc) override;
+
+ ALLOW_CLONE(MClampToUint8)
+};
+
+class MLoadFixedSlot
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ size_t slot_;
+
+ protected:
+ MLoadFixedSlot(MDefinition* obj, size_t slot)
+ : MUnaryInstruction(obj), slot_(slot)
+ {
+ setResultType(MIRType::Value);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadFixedSlot)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ size_t slot() const {
+ return slot_;
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isLoadFixedSlot())
+ return false;
+ if (slot() != ins->toLoadFixedSlot()->slot())
+ return false;
+ return congruentIfOperandsEqual(ins);
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::FixedSlot);
+ }
+
+ AliasType mightAlias(const MDefinition* store) const override;
+
+ ALLOW_CLONE(MLoadFixedSlot)
+};
+
+class MLoadFixedSlotAndUnbox
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ size_t slot_;
+ MUnbox::Mode mode_;
+ BailoutKind bailoutKind_;
+ protected:
+ MLoadFixedSlotAndUnbox(MDefinition* obj, size_t slot, MUnbox::Mode mode, MIRType type,
+ BailoutKind kind)
+ : MUnaryInstruction(obj), slot_(slot), mode_(mode), bailoutKind_(kind)
+ {
+ setResultType(type);
+ setMovable();
+ if (mode_ == MUnbox::TypeBarrier || mode_ == MUnbox::Fallible)
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadFixedSlotAndUnbox)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ size_t slot() const {
+ return slot_;
+ }
+ MUnbox::Mode mode() const {
+ return mode_;
+ }
+ BailoutKind bailoutKind() const {
+ return bailoutKind_;
+ }
+ bool fallible() const {
+ return mode_ != MUnbox::Infallible;
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isLoadFixedSlotAndUnbox() ||
+ slot() != ins->toLoadFixedSlotAndUnbox()->slot() ||
+ mode() != ins->toLoadFixedSlotAndUnbox()->mode())
+ {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::FixedSlot);
+ }
+
+ AliasType mightAlias(const MDefinition* store) const override;
+
+ ALLOW_CLONE(MLoadFixedSlotAndUnbox);
+};
+
+class MStoreFixedSlot
+ : public MBinaryInstruction,
+ public MixPolicy<SingleObjectPolicy, NoFloatPolicy<1> >::Data
+{
+ bool needsBarrier_;
+ size_t slot_;
+
+ MStoreFixedSlot(MDefinition* obj, MDefinition* rval, size_t slot, bool barrier)
+ : MBinaryInstruction(obj, rval),
+ needsBarrier_(barrier),
+ slot_(slot)
+ { }
+
+ public:
+ INSTRUCTION_HEADER(StoreFixedSlot)
+ NAMED_OPERANDS((0, object), (1, value))
+
+ static MStoreFixedSlot* New(TempAllocator& alloc, MDefinition* obj, size_t slot,
+ MDefinition* rval)
+ {
+ return new(alloc) MStoreFixedSlot(obj, rval, slot, false);
+ }
+ static MStoreFixedSlot* NewBarriered(TempAllocator& alloc, MDefinition* obj, size_t slot,
+ MDefinition* rval)
+ {
+ return new(alloc) MStoreFixedSlot(obj, rval, slot, true);
+ }
+
+ size_t slot() const {
+ return slot_;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::FixedSlot);
+ }
+ bool needsBarrier() const {
+ return needsBarrier_;
+ }
+ void setNeedsBarrier(bool needsBarrier = true) {
+ needsBarrier_ = needsBarrier;
+ }
+
+ ALLOW_CLONE(MStoreFixedSlot)
+};
+
+typedef Vector<JSObject*, 4, JitAllocPolicy> ObjectVector;
+typedef Vector<bool, 4, JitAllocPolicy> BoolVector;
+
+class InlinePropertyTable : public TempObject
+{
+ struct Entry : public TempObject {
+ CompilerObjectGroup group;
+ CompilerFunction func;
+
+ Entry(ObjectGroup* group, JSFunction* func)
+ : group(group), func(func)
+ { }
+ bool appendRoots(MRootList& roots) const {
+ return roots.append(group) && roots.append(func);
+ }
+ };
+
+ jsbytecode* pc_;
+ MResumePoint* priorResumePoint_;
+ Vector<Entry*, 4, JitAllocPolicy> entries_;
+
+ public:
+ InlinePropertyTable(TempAllocator& alloc, jsbytecode* pc)
+ : pc_(pc), priorResumePoint_(nullptr), entries_(alloc)
+ { }
+
+ void setPriorResumePoint(MResumePoint* resumePoint) {
+ MOZ_ASSERT(priorResumePoint_ == nullptr);
+ priorResumePoint_ = resumePoint;
+ }
+ bool hasPriorResumePoint() { return bool(priorResumePoint_); }
+ MResumePoint* takePriorResumePoint() {
+ MResumePoint* rp = priorResumePoint_;
+ priorResumePoint_ = nullptr;
+ return rp;
+ }
+
+ jsbytecode* pc() const {
+ return pc_;
+ }
+
+ MOZ_MUST_USE bool addEntry(TempAllocator& alloc, ObjectGroup* group, JSFunction* func) {
+ return entries_.append(new(alloc) Entry(group, func));
+ }
+
+ size_t numEntries() const {
+ return entries_.length();
+ }
+
+ ObjectGroup* getObjectGroup(size_t i) const {
+ MOZ_ASSERT(i < numEntries());
+ return entries_[i]->group;
+ }
+
+ JSFunction* getFunction(size_t i) const {
+ MOZ_ASSERT(i < numEntries());
+ return entries_[i]->func;
+ }
+
+ bool hasFunction(JSFunction* func) const;
+ bool hasObjectGroup(ObjectGroup* group) const;
+
+ TemporaryTypeSet* buildTypeSetForFunction(JSFunction* func) const;
+
+ // Remove targets that vetoed inlining from the InlinePropertyTable.
+ void trimTo(const ObjectVector& targets, const BoolVector& choiceSet);
+
+ // Ensure that the InlinePropertyTable's domain is a subset of |targets|.
+ void trimToTargets(const ObjectVector& targets);
+
+ bool appendRoots(MRootList& roots) const;
+};
+
+class CacheLocationList : public InlineConcatList<CacheLocationList>
+{
+ public:
+ CacheLocationList()
+ : pc(nullptr),
+ script(nullptr)
+ { }
+
+ jsbytecode* pc;
+ JSScript* script;
+};
+
+class MGetPropertyCache
+ : public MBinaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, CacheIdPolicy<1>>::Data
+{
+ bool idempotent_ : 1;
+ bool monitoredResult_ : 1;
+
+ CacheLocationList location_;
+
+ InlinePropertyTable* inlinePropertyTable_;
+
+ MGetPropertyCache(MDefinition* obj, MDefinition* id, bool monitoredResult)
+ : MBinaryInstruction(obj, id),
+ idempotent_(false),
+ monitoredResult_(monitoredResult),
+ location_(),
+ inlinePropertyTable_(nullptr)
+ {
+ setResultType(MIRType::Value);
+
+ // The cache will invalidate if there are objects with e.g. lookup or
+ // resolve hooks on the proto chain. setGuard ensures this check is not
+ // eliminated.
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(GetPropertyCache)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, idval))
+
+ InlinePropertyTable* initInlinePropertyTable(TempAllocator& alloc, jsbytecode* pc) {
+ MOZ_ASSERT(inlinePropertyTable_ == nullptr);
+ inlinePropertyTable_ = new(alloc) InlinePropertyTable(alloc, pc);
+ return inlinePropertyTable_;
+ }
+
+ void clearInlinePropertyTable() {
+ inlinePropertyTable_ = nullptr;
+ }
+
+ InlinePropertyTable* propTable() const {
+ return inlinePropertyTable_;
+ }
+
+ bool idempotent() const {
+ return idempotent_;
+ }
+ void setIdempotent() {
+ idempotent_ = true;
+ setMovable();
+ }
+ bool monitoredResult() const {
+ return monitoredResult_;
+ }
+ CacheLocationList& location() {
+ return location_;
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!idempotent_)
+ return false;
+ if (!ins->isGetPropertyCache())
+ return false;
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ if (idempotent_) {
+ return AliasSet::Load(AliasSet::ObjectFields |
+ AliasSet::FixedSlot |
+ AliasSet::DynamicSlot);
+ }
+ return AliasSet::Store(AliasSet::Any);
+ }
+
+ void setBlock(MBasicBlock* block) override;
+ MOZ_MUST_USE bool updateForReplacement(MDefinition* ins) override;
+
+ bool allowDoubleResult() const;
+
+ bool appendRoots(MRootList& roots) const override {
+ if (inlinePropertyTable_)
+ return inlinePropertyTable_->appendRoots(roots);
+ return true;
+ }
+};
+
+struct PolymorphicEntry {
+ // The group and/or shape to guard against.
+ ReceiverGuard receiver;
+
+ // The property to load, null for loads from unboxed properties.
+ Shape* shape;
+
+ bool appendRoots(MRootList& roots) const {
+ return roots.append(receiver) && roots.append(shape);
+ }
+};
+
+// Emit code to load a value from an object if it matches one of the receivers
+// observed by the baseline IC, else bails out.
+class MGetPropertyPolymorphic
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ Vector<PolymorphicEntry, 4, JitAllocPolicy> receivers_;
+ CompilerPropertyName name_;
+
+ MGetPropertyPolymorphic(TempAllocator& alloc, MDefinition* obj, PropertyName* name)
+ : MUnaryInstruction(obj),
+ receivers_(alloc),
+ name_(name)
+ {
+ setGuard();
+ setMovable();
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GetPropertyPolymorphic)
+ NAMED_OPERANDS((0, object))
+
+ static MGetPropertyPolymorphic* New(TempAllocator& alloc, MDefinition* obj, PropertyName* name) {
+ return new(alloc) MGetPropertyPolymorphic(alloc, obj, name);
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isGetPropertyPolymorphic())
+ return false;
+ if (name() != ins->toGetPropertyPolymorphic()->name())
+ return false;
+ return congruentIfOperandsEqual(ins);
+ }
+
+ MOZ_MUST_USE bool addReceiver(const ReceiverGuard& receiver, Shape* shape) {
+ PolymorphicEntry entry;
+ entry.receiver = receiver;
+ entry.shape = shape;
+ return receivers_.append(entry);
+ }
+ size_t numReceivers() const {
+ return receivers_.length();
+ }
+ const ReceiverGuard receiver(size_t i) const {
+ return receivers_[i].receiver;
+ }
+ Shape* shape(size_t i) const {
+ return receivers_[i].shape;
+ }
+ PropertyName* name() const {
+ return name_;
+ }
+ AliasSet getAliasSet() const override {
+ bool hasUnboxedLoad = false;
+ for (size_t i = 0; i < numReceivers(); i++) {
+ if (!shape(i)) {
+ hasUnboxedLoad = true;
+ break;
+ }
+ }
+ return AliasSet::Load(AliasSet::ObjectFields |
+ AliasSet::FixedSlot |
+ AliasSet::DynamicSlot |
+ (hasUnboxedLoad ? AliasSet::UnboxedElement : 0));
+ }
+
+ AliasType mightAlias(const MDefinition* store) const override;
+
+ bool appendRoots(MRootList& roots) const override;
+};
+
+// Emit code to store a value to an object's slots if its shape/group matches
+// one of the shapes/groups observed by the baseline IC, else bails out.
+class MSetPropertyPolymorphic
+ : public MBinaryInstruction,
+ public MixPolicy<SingleObjectPolicy, NoFloatPolicy<1> >::Data
+{
+ Vector<PolymorphicEntry, 4, JitAllocPolicy> receivers_;
+ CompilerPropertyName name_;
+ bool needsBarrier_;
+
+ MSetPropertyPolymorphic(TempAllocator& alloc, MDefinition* obj, MDefinition* value,
+ PropertyName* name)
+ : MBinaryInstruction(obj, value),
+ receivers_(alloc),
+ name_(name),
+ needsBarrier_(false)
+ {
+ }
+
+ public:
+ INSTRUCTION_HEADER(SetPropertyPolymorphic)
+ NAMED_OPERANDS((0, object), (1, value))
+
+ static MSetPropertyPolymorphic* New(TempAllocator& alloc, MDefinition* obj, MDefinition* value,
+ PropertyName* name) {
+ return new(alloc) MSetPropertyPolymorphic(alloc, obj, value, name);
+ }
+
+ MOZ_MUST_USE bool addReceiver(const ReceiverGuard& receiver, Shape* shape) {
+ PolymorphicEntry entry;
+ entry.receiver = receiver;
+ entry.shape = shape;
+ return receivers_.append(entry);
+ }
+ size_t numReceivers() const {
+ return receivers_.length();
+ }
+ const ReceiverGuard& receiver(size_t i) const {
+ return receivers_[i].receiver;
+ }
+ Shape* shape(size_t i) const {
+ return receivers_[i].shape;
+ }
+ PropertyName* name() const {
+ return name_;
+ }
+ bool needsBarrier() const {
+ return needsBarrier_;
+ }
+ void setNeedsBarrier() {
+ needsBarrier_ = true;
+ }
+ AliasSet getAliasSet() const override {
+ bool hasUnboxedStore = false;
+ for (size_t i = 0; i < numReceivers(); i++) {
+ if (!shape(i)) {
+ hasUnboxedStore = true;
+ break;
+ }
+ }
+ return AliasSet::Store(AliasSet::ObjectFields |
+ AliasSet::FixedSlot |
+ AliasSet::DynamicSlot |
+ (hasUnboxedStore ? AliasSet::UnboxedElement : 0));
+ }
+ bool appendRoots(MRootList& roots) const override;
+};
+
+class MDispatchInstruction
+ : public MControlInstruction,
+ public SingleObjectPolicy::Data
+{
+ // Map from JSFunction* -> MBasicBlock.
+ struct Entry {
+ JSFunction* func;
+ // If |func| has a singleton group, |funcGroup| is null. Otherwise,
+ // |funcGroup| holds the ObjectGroup for |func|, and dispatch guards
+ // on the group instead of directly on the function.
+ ObjectGroup* funcGroup;
+ MBasicBlock* block;
+
+ Entry(JSFunction* func, ObjectGroup* funcGroup, MBasicBlock* block)
+ : func(func), funcGroup(funcGroup), block(block)
+ { }
+ bool appendRoots(MRootList& roots) const {
+ return roots.append(func) && roots.append(funcGroup);
+ }
+ };
+ Vector<Entry, 4, JitAllocPolicy> map_;
+
+ // An optional fallback path that uses MCall.
+ MBasicBlock* fallback_;
+ MUse operand_;
+
+ void initOperand(size_t index, MDefinition* operand) {
+ MOZ_ASSERT(index == 0);
+ operand_.init(operand, this);
+ }
+
+ public:
+ NAMED_OPERANDS((0, input))
+ MDispatchInstruction(TempAllocator& alloc, MDefinition* input)
+ : map_(alloc), fallback_(nullptr)
+ {
+ initOperand(0, input);
+ }
+
+ protected:
+ MUse* getUseFor(size_t index) final override {
+ MOZ_ASSERT(index == 0);
+ return &operand_;
+ }
+ const MUse* getUseFor(size_t index) const final override {
+ MOZ_ASSERT(index == 0);
+ return &operand_;
+ }
+ MDefinition* getOperand(size_t index) const final override {
+ MOZ_ASSERT(index == 0);
+ return operand_.producer();
+ }
+ size_t numOperands() const final override {
+ return 1;
+ }
+ size_t indexOf(const MUse* u) const final override {
+ MOZ_ASSERT(u == getUseFor(0));
+ return 0;
+ }
+ void replaceOperand(size_t index, MDefinition* operand) final override {
+ MOZ_ASSERT(index == 0);
+ operand_.replaceProducer(operand);
+ }
+
+ public:
+ void setSuccessor(size_t i, MBasicBlock* successor) {
+ MOZ_ASSERT(i < numSuccessors());
+ if (i == map_.length())
+ fallback_ = successor;
+ else
+ map_[i].block = successor;
+ }
+ size_t numSuccessors() const final override {
+ return map_.length() + (fallback_ ? 1 : 0);
+ }
+ void replaceSuccessor(size_t i, MBasicBlock* successor) final override {
+ setSuccessor(i, successor);
+ }
+ MBasicBlock* getSuccessor(size_t i) const final override {
+ MOZ_ASSERT(i < numSuccessors());
+ if (i == map_.length())
+ return fallback_;
+ return map_[i].block;
+ }
+
+ public:
+ MOZ_MUST_USE bool addCase(JSFunction* func, ObjectGroup* funcGroup, MBasicBlock* block) {
+ return map_.append(Entry(func, funcGroup, block));
+ }
+ uint32_t numCases() const {
+ return map_.length();
+ }
+ JSFunction* getCase(uint32_t i) const {
+ return map_[i].func;
+ }
+ ObjectGroup* getCaseObjectGroup(uint32_t i) const {
+ return map_[i].funcGroup;
+ }
+ MBasicBlock* getCaseBlock(uint32_t i) const {
+ return map_[i].block;
+ }
+
+ bool hasFallback() const {
+ return bool(fallback_);
+ }
+ void addFallback(MBasicBlock* block) {
+ MOZ_ASSERT(!hasFallback());
+ fallback_ = block;
+ }
+ MBasicBlock* getFallback() const {
+ MOZ_ASSERT(hasFallback());
+ return fallback_;
+ }
+ bool appendRoots(MRootList& roots) const override;
+};
+
+// Polymorphic dispatch for inlining, keyed off incoming ObjectGroup.
+class MObjectGroupDispatch : public MDispatchInstruction
+{
+ // Map ObjectGroup (of CallProp's Target Object) -> JSFunction (yielded by the CallProp).
+ InlinePropertyTable* inlinePropertyTable_;
+
+ MObjectGroupDispatch(TempAllocator& alloc, MDefinition* input, InlinePropertyTable* table)
+ : MDispatchInstruction(alloc, input),
+ inlinePropertyTable_(table)
+ { }
+
+ public:
+ INSTRUCTION_HEADER(ObjectGroupDispatch)
+
+ static MObjectGroupDispatch* New(TempAllocator& alloc, MDefinition* ins,
+ InlinePropertyTable* table)
+ {
+ return new(alloc) MObjectGroupDispatch(alloc, ins, table);
+ }
+
+ InlinePropertyTable* propTable() const {
+ return inlinePropertyTable_;
+ }
+ bool appendRoots(MRootList& roots) const override;
+};
+
+// Polymorphic dispatch for inlining, keyed off incoming JSFunction*.
+class MFunctionDispatch : public MDispatchInstruction
+{
+ MFunctionDispatch(TempAllocator& alloc, MDefinition* input)
+ : MDispatchInstruction(alloc, input)
+ { }
+
+ public:
+ INSTRUCTION_HEADER(FunctionDispatch)
+
+ static MFunctionDispatch* New(TempAllocator& alloc, MDefinition* ins) {
+ return new(alloc) MFunctionDispatch(alloc, ins);
+ }
+ bool appendRoots(MRootList& roots) const override;
+};
+
+class MBindNameCache
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ CompilerPropertyName name_;
+ CompilerScript script_;
+ jsbytecode* pc_;
+
+ MBindNameCache(MDefinition* envChain, PropertyName* name, JSScript* script, jsbytecode* pc)
+ : MUnaryInstruction(envChain), name_(name), script_(script), pc_(pc)
+ {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(BindNameCache)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, environmentChain))
+
+ PropertyName* name() const {
+ return name_;
+ }
+ JSScript* script() const {
+ return script_;
+ }
+ jsbytecode* pc() const {
+ return pc_;
+ }
+ bool appendRoots(MRootList& roots) const override {
+ // Don't append the script, all scripts are added anyway.
+ return roots.append(name_);
+ }
+};
+
+class MCallBindVar
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ explicit MCallBindVar(MDefinition* envChain)
+ : MUnaryInstruction(envChain)
+ {
+ setResultType(MIRType::Object);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(CallBindVar)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, environmentChain))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isCallBindVar())
+ return false;
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+// Guard on an object's shape.
+class MGuardShape
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ CompilerShape shape_;
+ BailoutKind bailoutKind_;
+
+ MGuardShape(MDefinition* obj, Shape* shape, BailoutKind bailoutKind)
+ : MUnaryInstruction(obj),
+ shape_(shape),
+ bailoutKind_(bailoutKind)
+ {
+ setGuard();
+ setMovable();
+ setResultType(MIRType::Object);
+ setResultTypeSet(obj->resultTypeSet());
+
+ // Disallow guarding on unboxed object shapes. The group is better to
+ // guard on, and guarding on the shape can interact badly with
+ // MConvertUnboxedObjectToNative.
+ MOZ_ASSERT(shape->getObjectClass() != &UnboxedPlainObject::class_);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardShape)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ const Shape* shape() const {
+ return shape_;
+ }
+ BailoutKind bailoutKind() const {
+ return bailoutKind_;
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isGuardShape())
+ return false;
+ if (shape() != ins->toGuardShape()->shape())
+ return false;
+ if (bailoutKind() != ins->toGuardShape()->bailoutKind())
+ return false;
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(shape_);
+ }
+};
+
+// Bail if the object's shape or unboxed group is not in the input list.
+class MGuardReceiverPolymorphic
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ Vector<ReceiverGuard, 4, JitAllocPolicy> receivers_;
+
+ MGuardReceiverPolymorphic(TempAllocator& alloc, MDefinition* obj)
+ : MUnaryInstruction(obj),
+ receivers_(alloc)
+ {
+ setGuard();
+ setMovable();
+ setResultType(MIRType::Object);
+ setResultTypeSet(obj->resultTypeSet());
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardReceiverPolymorphic)
+ NAMED_OPERANDS((0, object))
+
+ static MGuardReceiverPolymorphic* New(TempAllocator& alloc, MDefinition* obj) {
+ return new(alloc) MGuardReceiverPolymorphic(alloc, obj);
+ }
+
+ MOZ_MUST_USE bool addReceiver(const ReceiverGuard& receiver) {
+ return receivers_.append(receiver);
+ }
+ size_t numReceivers() const {
+ return receivers_.length();
+ }
+ const ReceiverGuard& receiver(size_t i) const {
+ return receivers_[i];
+ }
+
+ bool congruentTo(const MDefinition* ins) const override;
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+
+ bool appendRoots(MRootList& roots) const override;
+
+};
+
+// Guard on an object's group, inclusively or exclusively.
+class MGuardObjectGroup
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ CompilerObjectGroup group_;
+ bool bailOnEquality_;
+ BailoutKind bailoutKind_;
+
+ MGuardObjectGroup(MDefinition* obj, ObjectGroup* group, bool bailOnEquality,
+ BailoutKind bailoutKind)
+ : MUnaryInstruction(obj),
+ group_(group),
+ bailOnEquality_(bailOnEquality),
+ bailoutKind_(bailoutKind)
+ {
+ setGuard();
+ setMovable();
+ setResultType(MIRType::Object);
+
+ // Unboxed groups which might be converted to natives can't be guarded
+ // on, due to MConvertUnboxedObjectToNative.
+ MOZ_ASSERT_IF(group->maybeUnboxedLayoutDontCheckGeneration(),
+ !group->unboxedLayoutDontCheckGeneration().nativeGroup());
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardObjectGroup)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ const ObjectGroup* group() const {
+ return group_;
+ }
+ bool bailOnEquality() const {
+ return bailOnEquality_;
+ }
+ BailoutKind bailoutKind() const {
+ return bailoutKind_;
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isGuardObjectGroup())
+ return false;
+ if (group() != ins->toGuardObjectGroup()->group())
+ return false;
+ if (bailOnEquality() != ins->toGuardObjectGroup()->bailOnEquality())
+ return false;
+ if (bailoutKind() != ins->toGuardObjectGroup()->bailoutKind())
+ return false;
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(group_);
+ }
+};
+
+// Guard on an object's identity, inclusively or exclusively.
+class MGuardObjectIdentity
+ : public MBinaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ bool bailOnEquality_;
+
+ MGuardObjectIdentity(MDefinition* obj, MDefinition* expected, bool bailOnEquality)
+ : MBinaryInstruction(obj, expected),
+ bailOnEquality_(bailOnEquality)
+ {
+ setGuard();
+ setMovable();
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardObjectIdentity)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, expected))
+
+ bool bailOnEquality() const {
+ return bailOnEquality_;
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isGuardObjectIdentity())
+ return false;
+ if (bailOnEquality() != ins->toGuardObjectIdentity()->bailOnEquality())
+ return false;
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+};
+
+// Guard on an object's class.
+class MGuardClass
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ const Class* class_;
+
+ MGuardClass(MDefinition* obj, const Class* clasp)
+ : MUnaryInstruction(obj),
+ class_(clasp)
+ {
+ setGuard();
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardClass)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ const Class* getClass() const {
+ return class_;
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isGuardClass())
+ return false;
+ if (getClass() != ins->toGuardClass()->getClass())
+ return false;
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+
+ ALLOW_CLONE(MGuardClass)
+};
+
+// Guard on the presence or absence of an unboxed object's expando.
+class MGuardUnboxedExpando
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ bool requireExpando_;
+ BailoutKind bailoutKind_;
+
+ MGuardUnboxedExpando(MDefinition* obj, bool requireExpando, BailoutKind bailoutKind)
+ : MUnaryInstruction(obj),
+ requireExpando_(requireExpando),
+ bailoutKind_(bailoutKind)
+ {
+ setGuard();
+ setMovable();
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardUnboxedExpando)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ bool requireExpando() const {
+ return requireExpando_;
+ }
+ BailoutKind bailoutKind() const {
+ return bailoutKind_;
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!congruentIfOperandsEqual(ins))
+ return false;
+ if (requireExpando() != ins->toGuardUnboxedExpando()->requireExpando())
+ return false;
+ return true;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+};
+
+// Load an unboxed plain object's expando.
+class MLoadUnboxedExpando
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ private:
+ explicit MLoadUnboxedExpando(MDefinition* object)
+ : MUnaryInstruction(object)
+ {
+ setResultType(MIRType::Object);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadUnboxedExpando)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+};
+
+// Load from vp[slot] (slots that are not inline in an object).
+class MLoadSlot
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ uint32_t slot_;
+
+ MLoadSlot(MDefinition* slots, uint32_t slot)
+ : MUnaryInstruction(slots),
+ slot_(slot)
+ {
+ setResultType(MIRType::Value);
+ setMovable();
+ MOZ_ASSERT(slots->type() == MIRType::Slots);
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadSlot)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, slots))
+
+ uint32_t slot() const {
+ return slot_;
+ }
+
+ HashNumber valueHash() const override;
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isLoadSlot())
+ return false;
+ if (slot() != ins->toLoadSlot()->slot())
+ return false;
+ return congruentIfOperandsEqual(ins);
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ AliasSet getAliasSet() const override {
+ MOZ_ASSERT(slots()->type() == MIRType::Slots);
+ return AliasSet::Load(AliasSet::DynamicSlot);
+ }
+ AliasType mightAlias(const MDefinition* store) const override;
+
+ void printOpcode(GenericPrinter& out) const override;
+
+ ALLOW_CLONE(MLoadSlot)
+};
+
+// Inline call to access a function's environment (scope chain).
+class MFunctionEnvironment
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ explicit MFunctionEnvironment(MDefinition* function)
+ : MUnaryInstruction(function)
+ {
+ setResultType(MIRType::Object);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(FunctionEnvironment)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, function))
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ // A function's environment is fixed.
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+// Store to vp[slot] (slots that are not inline in an object).
+class MStoreSlot
+ : public MBinaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, NoFloatPolicy<1> >::Data
+{
+ uint32_t slot_;
+ MIRType slotType_;
+ bool needsBarrier_;
+
+ MStoreSlot(MDefinition* slots, uint32_t slot, MDefinition* value, bool barrier)
+ : MBinaryInstruction(slots, value),
+ slot_(slot),
+ slotType_(MIRType::Value),
+ needsBarrier_(barrier)
+ {
+ MOZ_ASSERT(slots->type() == MIRType::Slots);
+ }
+
+ public:
+ INSTRUCTION_HEADER(StoreSlot)
+ NAMED_OPERANDS((0, slots), (1, value))
+
+ static MStoreSlot* New(TempAllocator& alloc, MDefinition* slots, uint32_t slot,
+ MDefinition* value)
+ {
+ return new(alloc) MStoreSlot(slots, slot, value, false);
+ }
+ static MStoreSlot* NewBarriered(TempAllocator& alloc, MDefinition* slots, uint32_t slot,
+ MDefinition* value)
+ {
+ return new(alloc) MStoreSlot(slots, slot, value, true);
+ }
+
+ uint32_t slot() const {
+ return slot_;
+ }
+ MIRType slotType() const {
+ return slotType_;
+ }
+ void setSlotType(MIRType slotType) {
+ MOZ_ASSERT(slotType != MIRType::None);
+ slotType_ = slotType;
+ }
+ bool needsBarrier() const {
+ return needsBarrier_;
+ }
+ void setNeedsBarrier() {
+ needsBarrier_ = true;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::DynamicSlot);
+ }
+ void printOpcode(GenericPrinter& out) const override;
+
+ ALLOW_CLONE(MStoreSlot)
+};
+
+class MGetNameCache
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ public:
+ enum AccessKind {
+ NAMETYPEOF,
+ NAME
+ };
+
+ private:
+ CompilerPropertyName name_;
+ AccessKind kind_;
+
+ MGetNameCache(MDefinition* obj, PropertyName* name, AccessKind kind)
+ : MUnaryInstruction(obj),
+ name_(name),
+ kind_(kind)
+ {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GetNameCache)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, envObj))
+
+ PropertyName* name() const {
+ return name_;
+ }
+ AccessKind accessKind() const {
+ return kind_;
+ }
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(name_);
+ }
+};
+
+class MCallGetIntrinsicValue : public MNullaryInstruction
+{
+ CompilerPropertyName name_;
+
+ explicit MCallGetIntrinsicValue(PropertyName* name)
+ : name_(name)
+ {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(CallGetIntrinsicValue)
+ TRIVIAL_NEW_WRAPPERS
+
+ PropertyName* name() const {
+ return name_;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool possiblyCalls() const override {
+ return true;
+ }
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(name_);
+ }
+};
+
+class MSetPropertyInstruction : public MBinaryInstruction
+{
+ CompilerPropertyName name_;
+ bool strict_;
+
+ protected:
+ MSetPropertyInstruction(MDefinition* obj, MDefinition* value, PropertyName* name,
+ bool strict)
+ : MBinaryInstruction(obj, value),
+ name_(name), strict_(strict)
+ {}
+
+ public:
+ NAMED_OPERANDS((0, object), (1, value))
+ PropertyName* name() const {
+ return name_;
+ }
+ bool strict() const {
+ return strict_;
+ }
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(name_);
+ }
+};
+
+class MSetElementInstruction
+ : public MTernaryInstruction
+{
+ bool strict_;
+ protected:
+ MSetElementInstruction(MDefinition* object, MDefinition* index, MDefinition* value, bool strict)
+ : MTernaryInstruction(object, index, value),
+ strict_(strict)
+ {
+ }
+
+ public:
+ NAMED_OPERANDS((0, object), (1, index), (2, value))
+ bool strict() const {
+ return strict_;
+ }
+};
+
+class MDeleteProperty
+ : public MUnaryInstruction,
+ public BoxInputsPolicy::Data
+{
+ CompilerPropertyName name_;
+ bool strict_;
+
+ protected:
+ MDeleteProperty(MDefinition* val, PropertyName* name, bool strict)
+ : MUnaryInstruction(val),
+ name_(name),
+ strict_(strict)
+ {
+ setResultType(MIRType::Boolean);
+ }
+
+ public:
+ INSTRUCTION_HEADER(DeleteProperty)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, value))
+
+ PropertyName* name() const {
+ return name_;
+ }
+ bool strict() const {
+ return strict_;
+ }
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(name_);
+ }
+};
+
+class MDeleteElement
+ : public MBinaryInstruction,
+ public BoxInputsPolicy::Data
+{
+ bool strict_;
+
+ MDeleteElement(MDefinition* value, MDefinition* index, bool strict)
+ : MBinaryInstruction(value, index),
+ strict_(strict)
+ {
+ setResultType(MIRType::Boolean);
+ }
+
+ public:
+ INSTRUCTION_HEADER(DeleteElement)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, value), (1, index))
+
+ bool strict() const {
+ return strict_;
+ }
+};
+
+// Note: This uses CallSetElementPolicy to always box its second input,
+// ensuring we don't need two LIR instructions to lower this.
+class MCallSetProperty
+ : public MSetPropertyInstruction,
+ public CallSetElementPolicy::Data
+{
+ MCallSetProperty(MDefinition* obj, MDefinition* value, PropertyName* name, bool strict)
+ : MSetPropertyInstruction(obj, value, name, strict)
+ {
+ }
+
+ public:
+ INSTRUCTION_HEADER(CallSetProperty)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+class MSetPropertyCache
+ : public MTernaryInstruction,
+ public Mix3Policy<SingleObjectPolicy, CacheIdPolicy<1>, NoFloatPolicy<2>>::Data
+{
+ bool strict_ : 1;
+ bool needsTypeBarrier_ : 1;
+ bool guardHoles_ : 1;
+
+ MSetPropertyCache(MDefinition* obj, MDefinition* id, MDefinition* value, bool strict,
+ bool typeBarrier, bool guardHoles)
+ : MTernaryInstruction(obj, id, value),
+ strict_(strict),
+ needsTypeBarrier_(typeBarrier),
+ guardHoles_(guardHoles)
+ {
+ }
+
+ public:
+ INSTRUCTION_HEADER(SetPropertyCache)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, idval), (2, value))
+
+ bool needsTypeBarrier() const {
+ return needsTypeBarrier_;
+ }
+
+ bool guardHoles() const {
+ return guardHoles_;
+ }
+
+ bool strict() const {
+ return strict_;
+ }
+};
+
+class MCallGetProperty
+ : public MUnaryInstruction,
+ public BoxInputsPolicy::Data
+{
+ CompilerPropertyName name_;
+ bool idempotent_;
+
+ MCallGetProperty(MDefinition* value, PropertyName* name)
+ : MUnaryInstruction(value), name_(name),
+ idempotent_(false)
+ {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(CallGetProperty)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, value))
+
+ PropertyName* name() const {
+ return name_;
+ }
+
+ // Constructors need to perform a GetProp on the function prototype.
+ // Since getters cannot be set on the prototype, fetching is non-effectful.
+ // The operation may be safely repeated in case of bailout.
+ void setIdempotent() {
+ idempotent_ = true;
+ }
+ AliasSet getAliasSet() const override {
+ if (!idempotent_)
+ return AliasSet::Store(AliasSet::Any);
+ return AliasSet::None();
+ }
+ bool possiblyCalls() const override {
+ return true;
+ }
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(name_);
+ }
+};
+
+// Inline call to handle lhs[rhs]. The first input is a Value so that this
+// instruction can handle both objects and strings.
+class MCallGetElement
+ : public MBinaryInstruction,
+ public BoxInputsPolicy::Data
+{
+ MCallGetElement(MDefinition* lhs, MDefinition* rhs)
+ : MBinaryInstruction(lhs, rhs)
+ {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(CallGetElement)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+class MCallSetElement
+ : public MSetElementInstruction,
+ public CallSetElementPolicy::Data
+{
+ MCallSetElement(MDefinition* object, MDefinition* index, MDefinition* value, bool strict)
+ : MSetElementInstruction(object, index, value, strict)
+ {
+ }
+
+ public:
+ INSTRUCTION_HEADER(CallSetElement)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+class MCallInitElementArray
+ : public MAryInstruction<2>,
+ public MixPolicy<ObjectPolicy<0>, BoxPolicy<1> >::Data
+{
+ uint32_t index_;
+
+ MCallInitElementArray(MDefinition* obj, uint32_t index, MDefinition* val)
+ : index_(index)
+ {
+ initOperand(0, obj);
+ initOperand(1, val);
+ }
+
+ public:
+ INSTRUCTION_HEADER(CallInitElementArray)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, value))
+
+ uint32_t index() const {
+ return index_;
+ }
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+class MSetDOMProperty
+ : public MAryInstruction<2>,
+ public MixPolicy<ObjectPolicy<0>, BoxPolicy<1> >::Data
+{
+ const JSJitSetterOp func_;
+
+ MSetDOMProperty(const JSJitSetterOp func, MDefinition* obj, MDefinition* val)
+ : func_(func)
+ {
+ initOperand(0, obj);
+ initOperand(1, val);
+ }
+
+ public:
+ INSTRUCTION_HEADER(SetDOMProperty)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, value))
+
+ JSJitSetterOp fun() const {
+ return func_;
+ }
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+class MGetDOMProperty
+ : public MVariadicInstruction,
+ public ObjectPolicy<0>::Data
+{
+ const JSJitInfo* info_;
+
+ protected:
+ explicit MGetDOMProperty(const JSJitInfo* jitinfo)
+ : info_(jitinfo)
+ {
+ MOZ_ASSERT(jitinfo);
+ MOZ_ASSERT(jitinfo->type() == JSJitInfo::Getter);
+
+ // We are movable iff the jitinfo says we can be.
+ if (isDomMovable()) {
+ MOZ_ASSERT(jitinfo->aliasSet() != JSJitInfo::AliasEverything);
+ setMovable();
+ } else {
+ // If we're not movable, that means we shouldn't be DCEd either,
+ // because we might throw an exception when called, and getting rid
+ // of that is observable.
+ setGuard();
+ }
+
+ setResultType(MIRType::Value);
+ }
+
+ const JSJitInfo* info() const {
+ return info_;
+ }
+
+ MOZ_MUST_USE bool init(TempAllocator& alloc, MDefinition* obj, MDefinition* guard,
+ MDefinition* globalGuard) {
+ MOZ_ASSERT(obj);
+ // guard can be null.
+ // globalGuard can be null.
+ size_t operandCount = 1;
+ if (guard)
+ ++operandCount;
+ if (globalGuard)
+ ++operandCount;
+ if (!MVariadicInstruction::init(alloc, operandCount))
+ return false;
+ initOperand(0, obj);
+
+ size_t operandIndex = 1;
+ // Pin the guard, if we have one as an operand if we want to hoist later.
+ if (guard)
+ initOperand(operandIndex++, guard);
+
+ // And the same for the global guard, if we have one.
+ if (globalGuard)
+ initOperand(operandIndex, globalGuard);
+
+ return true;
+ }
+
+ public:
+ INSTRUCTION_HEADER(GetDOMProperty)
+ NAMED_OPERANDS((0, object))
+
+ static MGetDOMProperty* New(TempAllocator& alloc, const JSJitInfo* info, MDefinition* obj,
+ MDefinition* guard, MDefinition* globalGuard)
+ {
+ auto* res = new(alloc) MGetDOMProperty(info);
+ if (!res || !res->init(alloc, obj, guard, globalGuard))
+ return nullptr;
+ return res;
+ }
+
+ JSJitGetterOp fun() const {
+ return info_->getter;
+ }
+ bool isInfallible() const {
+ return info_->isInfallible;
+ }
+ bool isDomMovable() const {
+ return info_->isMovable;
+ }
+ JSJitInfo::AliasSet domAliasSet() const {
+ return info_->aliasSet();
+ }
+ size_t domMemberSlotIndex() const {
+ MOZ_ASSERT(info_->isAlwaysInSlot || info_->isLazilyCachedInSlot);
+ return info_->slotIndex;
+ }
+ bool valueMayBeInSlot() const {
+ return info_->isLazilyCachedInSlot;
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isGetDOMProperty())
+ return false;
+
+ return congruentTo(ins->toGetDOMProperty());
+ }
+
+ bool congruentTo(const MGetDOMProperty* ins) const {
+ if (!isDomMovable())
+ return false;
+
+ // Checking the jitinfo is the same as checking the constant function
+ if (!(info() == ins->info()))
+ return false;
+
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ JSJitInfo::AliasSet aliasSet = domAliasSet();
+ if (aliasSet == JSJitInfo::AliasNone)
+ return AliasSet::None();
+ if (aliasSet == JSJitInfo::AliasDOMSets)
+ return AliasSet::Load(AliasSet::DOMProperty);
+ MOZ_ASSERT(aliasSet == JSJitInfo::AliasEverything);
+ return AliasSet::Store(AliasSet::Any);
+ }
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+class MGetDOMMember : public MGetDOMProperty
+{
+ // We inherit everything from MGetDOMProperty except our
+ // possiblyCalls value and the congruentTo behavior.
+ explicit MGetDOMMember(const JSJitInfo* jitinfo)
+ : MGetDOMProperty(jitinfo)
+ {
+ setResultType(MIRTypeFromValueType(jitinfo->returnType()));
+ }
+
+ public:
+ INSTRUCTION_HEADER(GetDOMMember)
+
+ static MGetDOMMember* New(TempAllocator& alloc, const JSJitInfo* info, MDefinition* obj,
+ MDefinition* guard, MDefinition* globalGuard)
+ {
+ auto* res = new(alloc) MGetDOMMember(info);
+ if (!res || !res->init(alloc, obj, guard, globalGuard))
+ return nullptr;
+ return res;
+ }
+
+ bool possiblyCalls() const override {
+ return false;
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isGetDOMMember())
+ return false;
+
+ return MGetDOMProperty::congruentTo(ins->toGetDOMMember());
+ }
+};
+
+class MStringLength
+ : public MUnaryInstruction,
+ public StringPolicy<0>::Data
+{
+ explicit MStringLength(MDefinition* string)
+ : MUnaryInstruction(string)
+ {
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+ public:
+ INSTRUCTION_HEADER(StringLength)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, string))
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ // The string |length| property is immutable, so there is no
+ // implicit dependency.
+ return AliasSet::None();
+ }
+
+ void computeRange(TempAllocator& alloc) override;
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ ALLOW_CLONE(MStringLength)
+};
+
+// Inlined version of Math.floor().
+class MFloor
+ : public MUnaryInstruction,
+ public FloatingPointPolicy<0>::Data
+{
+ explicit MFloor(MDefinition* num)
+ : MUnaryInstruction(num)
+ {
+ setResultType(MIRType::Int32);
+ specialization_ = MIRType::Double;
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Floor)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool isFloat32Commutative() const override {
+ return true;
+ }
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override {
+ return true;
+ }
+#endif
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ void computeRange(TempAllocator& alloc) override;
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ ALLOW_CLONE(MFloor)
+};
+
+// Inlined version of Math.ceil().
+class MCeil
+ : public MUnaryInstruction,
+ public FloatingPointPolicy<0>::Data
+{
+ explicit MCeil(MDefinition* num)
+ : MUnaryInstruction(num)
+ {
+ setResultType(MIRType::Int32);
+ specialization_ = MIRType::Double;
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Ceil)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool isFloat32Commutative() const override {
+ return true;
+ }
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override {
+ return true;
+ }
+#endif
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ void computeRange(TempAllocator& alloc) override;
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ ALLOW_CLONE(MCeil)
+};
+
+// Inlined version of Math.round().
+class MRound
+ : public MUnaryInstruction,
+ public FloatingPointPolicy<0>::Data
+{
+ explicit MRound(MDefinition* num)
+ : MUnaryInstruction(num)
+ {
+ setResultType(MIRType::Int32);
+ specialization_ = MIRType::Double;
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Round)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool isFloat32Commutative() const override {
+ return true;
+ }
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override {
+ return true;
+ }
+#endif
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ ALLOW_CLONE(MRound)
+};
+
+class MIteratorStart
+ : public MUnaryInstruction,
+ public BoxExceptPolicy<0, MIRType::Object>::Data
+{
+ uint8_t flags_;
+
+ MIteratorStart(MDefinition* obj, uint8_t flags)
+ : MUnaryInstruction(obj), flags_(flags)
+ {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(IteratorStart)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ uint8_t flags() const {
+ return flags_;
+ }
+};
+
+class MIteratorMore
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ explicit MIteratorMore(MDefinition* iter)
+ : MUnaryInstruction(iter)
+ {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(IteratorMore)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, iterator))
+
+};
+
+class MIsNoIter
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ explicit MIsNoIter(MDefinition* def)
+ : MUnaryInstruction(def)
+ {
+ setResultType(MIRType::Boolean);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(IsNoIter)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+class MIteratorEnd
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ explicit MIteratorEnd(MDefinition* iter)
+ : MUnaryInstruction(iter)
+ { }
+
+ public:
+ INSTRUCTION_HEADER(IteratorEnd)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, iterator))
+
+};
+
+// Implementation for 'in' operator.
+class MIn
+ : public MBinaryInstruction,
+ public MixPolicy<BoxPolicy<0>, ObjectPolicy<1> >::Data
+{
+ MIn(MDefinition* key, MDefinition* obj)
+ : MBinaryInstruction(key, obj)
+ {
+ setResultType(MIRType::Boolean);
+ }
+
+ public:
+ INSTRUCTION_HEADER(In)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+};
+
+
+// Test whether the index is in the array bounds or a hole.
+class MInArray
+ : public MQuaternaryInstruction,
+ public ObjectPolicy<3>::Data
+{
+ bool needsHoleCheck_;
+ bool needsNegativeIntCheck_;
+ JSValueType unboxedType_;
+
+ MInArray(MDefinition* elements, MDefinition* index,
+ MDefinition* initLength, MDefinition* object,
+ bool needsHoleCheck, JSValueType unboxedType)
+ : MQuaternaryInstruction(elements, index, initLength, object),
+ needsHoleCheck_(needsHoleCheck),
+ needsNegativeIntCheck_(true),
+ unboxedType_(unboxedType)
+ {
+ setResultType(MIRType::Boolean);
+ setMovable();
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ MOZ_ASSERT(initLength->type() == MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(InArray)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index), (2, initLength), (3, object))
+
+ bool needsHoleCheck() const {
+ return needsHoleCheck_;
+ }
+ bool needsNegativeIntCheck() const {
+ return needsNegativeIntCheck_;
+ }
+ JSValueType unboxedType() const {
+ return unboxedType_;
+ }
+ void collectRangeInfoPreTrunc() override;
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::Element);
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isInArray())
+ return false;
+ const MInArray* other = ins->toInArray();
+ if (needsHoleCheck() != other->needsHoleCheck())
+ return false;
+ if (needsNegativeIntCheck() != other->needsNegativeIntCheck())
+ return false;
+ if (unboxedType() != other->unboxedType())
+ return false;
+ return congruentIfOperandsEqual(other);
+ }
+};
+
+// Implementation for instanceof operator with specific rhs.
+class MInstanceOf
+ : public MUnaryInstruction,
+ public InstanceOfPolicy::Data
+{
+ CompilerObject protoObj_;
+
+ MInstanceOf(MDefinition* obj, JSObject* proto)
+ : MUnaryInstruction(obj),
+ protoObj_(proto)
+ {
+ setResultType(MIRType::Boolean);
+ }
+
+ public:
+ INSTRUCTION_HEADER(InstanceOf)
+ TRIVIAL_NEW_WRAPPERS
+
+ JSObject* prototypeObject() {
+ return protoObj_;
+ }
+
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(protoObj_);
+ }
+};
+
+// Implementation for instanceof operator with unknown rhs.
+class MCallInstanceOf
+ : public MBinaryInstruction,
+ public MixPolicy<BoxPolicy<0>, ObjectPolicy<1> >::Data
+{
+ MCallInstanceOf(MDefinition* obj, MDefinition* proto)
+ : MBinaryInstruction(obj, proto)
+ {
+ setResultType(MIRType::Boolean);
+ }
+
+ public:
+ INSTRUCTION_HEADER(CallInstanceOf)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+class MArgumentsLength : public MNullaryInstruction
+{
+ MArgumentsLength()
+ {
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(ArgumentsLength)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ // Arguments |length| cannot be mutated by Ion Code.
+ return AliasSet::None();
+ }
+
+ void computeRange(TempAllocator& alloc) override;
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+};
+
+// This MIR instruction is used to get an argument from the actual arguments.
+class MGetFrameArgument
+ : public MUnaryInstruction,
+ public IntPolicy<0>::Data
+{
+ bool scriptHasSetArg_;
+
+ MGetFrameArgument(MDefinition* idx, bool scriptHasSetArg)
+ : MUnaryInstruction(idx),
+ scriptHasSetArg_(scriptHasSetArg)
+ {
+ setResultType(MIRType::Value);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(GetFrameArgument)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, index))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ // If the script doesn't have any JSOP_SETARG ops, then this instruction is never
+ // aliased.
+ if (scriptHasSetArg_)
+ return AliasSet::Load(AliasSet::FrameArgument);
+ return AliasSet::None();
+ }
+};
+
+class MNewTarget : public MNullaryInstruction
+{
+ MNewTarget() : MNullaryInstruction() {
+ setResultType(MIRType::Value);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(NewTarget)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+// This MIR instruction is used to set an argument value in the frame.
+class MSetFrameArgument
+ : public MUnaryInstruction,
+ public NoFloatPolicy<0>::Data
+{
+ uint32_t argno_;
+
+ MSetFrameArgument(uint32_t argno, MDefinition* value)
+ : MUnaryInstruction(value),
+ argno_(argno)
+ {
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(SetFrameArgument)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, value))
+
+ uint32_t argno() const {
+ return argno_;
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return false;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::FrameArgument);
+ }
+};
+
+class MRestCommon
+{
+ unsigned numFormals_;
+ CompilerGCPointer<ArrayObject*> templateObject_;
+
+ protected:
+ MRestCommon(unsigned numFormals, ArrayObject* templateObject)
+ : numFormals_(numFormals),
+ templateObject_(templateObject)
+ { }
+
+ public:
+ unsigned numFormals() const {
+ return numFormals_;
+ }
+ ArrayObject* templateObject() const {
+ return templateObject_;
+ }
+};
+
+class MRest
+ : public MUnaryInstruction,
+ public MRestCommon,
+ public IntPolicy<0>::Data
+{
+ MRest(CompilerConstraintList* constraints, MDefinition* numActuals, unsigned numFormals,
+ ArrayObject* templateObject)
+ : MUnaryInstruction(numActuals),
+ MRestCommon(numFormals, templateObject)
+ {
+ setResultType(MIRType::Object);
+ setResultTypeSet(MakeSingletonTypeSet(constraints, templateObject));
+ }
+
+ public:
+ INSTRUCTION_HEADER(Rest)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, numActuals))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool possiblyCalls() const override {
+ return true;
+ }
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(templateObject());
+ }
+};
+
+class MFilterTypeSet
+ : public MUnaryInstruction,
+ public FilterTypeSetPolicy::Data
+{
+ MFilterTypeSet(MDefinition* def, TemporaryTypeSet* types)
+ : MUnaryInstruction(def)
+ {
+ MOZ_ASSERT(!types->unknown());
+ setResultType(types->getKnownMIRType());
+ setResultTypeSet(types);
+ }
+
+ public:
+ INSTRUCTION_HEADER(FilterTypeSet)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* def) const override {
+ return false;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ virtual bool neverHoist() const override {
+ return resultTypeSet()->empty();
+ }
+ void computeRange(TempAllocator& alloc) override;
+
+ bool isFloat32Commutative() const override {
+ return IsFloatingPointType(type());
+ }
+
+ bool canProduceFloat32() const override;
+ bool canConsumeFloat32(MUse* operand) const override;
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+};
+
+// Given a value, guard that the value is in a particular TypeSet, then returns
+// that value.
+class MTypeBarrier
+ : public MUnaryInstruction,
+ public TypeBarrierPolicy::Data
+{
+ BarrierKind barrierKind_;
+
+ MTypeBarrier(MDefinition* def, TemporaryTypeSet* types,
+ BarrierKind kind = BarrierKind::TypeSet)
+ : MUnaryInstruction(def),
+ barrierKind_(kind)
+ {
+ MOZ_ASSERT(kind == BarrierKind::TypeTagOnly || kind == BarrierKind::TypeSet);
+
+ MOZ_ASSERT(!types->unknown());
+ setResultType(types->getKnownMIRType());
+ setResultTypeSet(types);
+
+ setGuard();
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(TypeBarrier)
+ TRIVIAL_NEW_WRAPPERS
+
+ void printOpcode(GenericPrinter& out) const override;
+ bool congruentTo(const MDefinition* def) const override;
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ virtual bool neverHoist() const override {
+ return resultTypeSet()->empty();
+ }
+ BarrierKind barrierKind() const {
+ return barrierKind_;
+ }
+
+ bool alwaysBails() const {
+ // If mirtype of input doesn't agree with mirtype of barrier,
+ // we will definitely bail.
+ MIRType type = resultTypeSet()->getKnownMIRType();
+ if (type == MIRType::Value)
+ return false;
+ if (input()->type() == MIRType::Value)
+ return false;
+ if (input()->type() == MIRType::ObjectOrNull) {
+ // The ObjectOrNull optimization is only performed when the
+ // barrier's type is MIRType::Null.
+ MOZ_ASSERT(type == MIRType::Null);
+ return false;
+ }
+ return input()->type() != type;
+ }
+
+ ALLOW_CLONE(MTypeBarrier)
+};
+
+// Like MTypeBarrier, guard that the value is in the given type set. This is
+// used before property writes to ensure the value being written is represented
+// in the property types for the object.
+class MMonitorTypes
+ : public MUnaryInstruction,
+ public BoxInputsPolicy::Data
+{
+ const TemporaryTypeSet* typeSet_;
+ BarrierKind barrierKind_;
+
+ MMonitorTypes(MDefinition* def, const TemporaryTypeSet* types, BarrierKind kind)
+ : MUnaryInstruction(def),
+ typeSet_(types),
+ barrierKind_(kind)
+ {
+ MOZ_ASSERT(kind == BarrierKind::TypeTagOnly || kind == BarrierKind::TypeSet);
+
+ setGuard();
+ MOZ_ASSERT(!types->unknown());
+ }
+
+ public:
+ INSTRUCTION_HEADER(MonitorTypes)
+ TRIVIAL_NEW_WRAPPERS
+
+ const TemporaryTypeSet* typeSet() const {
+ return typeSet_;
+ }
+ BarrierKind barrierKind() const {
+ return barrierKind_;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+// Given a value being written to another object, update the generational store
+// buffer if the value is in the nursery and object is in the tenured heap.
+class MPostWriteBarrier : public MBinaryInstruction, public ObjectPolicy<0>::Data
+{
+ MPostWriteBarrier(MDefinition* obj, MDefinition* value)
+ : MBinaryInstruction(obj, value)
+ {
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(PostWriteBarrier)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, value))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override {
+ // During lowering, values that neither have object nor value MIR type
+ // are ignored, thus Float32 can show up at this point without any issue.
+ return use == getUseFor(1);
+ }
+#endif
+
+ ALLOW_CLONE(MPostWriteBarrier)
+};
+
+// Given a value being written to another object's elements at the specified
+// index, update the generational store buffer if the value is in the nursery
+// and object is in the tenured heap.
+class MPostWriteElementBarrier : public MTernaryInstruction
+ , public MixPolicy<ObjectPolicy<0>, IntPolicy<2>>::Data
+{
+ MPostWriteElementBarrier(MDefinition* obj, MDefinition* value, MDefinition* index)
+ : MTernaryInstruction(obj, value, index)
+ {
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(PostWriteElementBarrier)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, value), (2, index))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override {
+ // During lowering, values that neither have object nor value MIR type
+ // are ignored, thus Float32 can show up at this point without any issue.
+ return use == getUseFor(1);
+ }
+#endif
+
+ ALLOW_CLONE(MPostWriteElementBarrier)
+};
+
+class MNewNamedLambdaObject : public MNullaryInstruction
+{
+ CompilerGCPointer<LexicalEnvironmentObject*> templateObj_;
+
+ explicit MNewNamedLambdaObject(LexicalEnvironmentObject* templateObj)
+ : MNullaryInstruction(),
+ templateObj_(templateObj)
+ {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(NewNamedLambdaObject)
+ TRIVIAL_NEW_WRAPPERS
+
+ LexicalEnvironmentObject* templateObj() {
+ return templateObj_;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(templateObj_);
+ }
+};
+
+class MNewCallObjectBase : public MNullaryInstruction
+{
+ CompilerGCPointer<CallObject*> templateObj_;
+
+ protected:
+ explicit MNewCallObjectBase(CallObject* templateObj)
+ : MNullaryInstruction(),
+ templateObj_(templateObj)
+ {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ CallObject* templateObject() {
+ return templateObj_;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(templateObj_);
+ }
+};
+
+class MNewCallObject : public MNewCallObjectBase
+{
+ public:
+ INSTRUCTION_HEADER(NewCallObject)
+
+ explicit MNewCallObject(CallObject* templateObj)
+ : MNewCallObjectBase(templateObj)
+ {
+ MOZ_ASSERT(!templateObj->isSingleton());
+ }
+
+ static MNewCallObject*
+ New(TempAllocator& alloc, CallObject* templateObj)
+ {
+ return new(alloc) MNewCallObject(templateObj);
+ }
+};
+
+class MNewSingletonCallObject : public MNewCallObjectBase
+{
+ public:
+ INSTRUCTION_HEADER(NewSingletonCallObject)
+
+ explicit MNewSingletonCallObject(CallObject* templateObj)
+ : MNewCallObjectBase(templateObj)
+ {}
+
+ static MNewSingletonCallObject*
+ New(TempAllocator& alloc, CallObject* templateObj)
+ {
+ return new(alloc) MNewSingletonCallObject(templateObj);
+ }
+};
+
+class MNewStringObject :
+ public MUnaryInstruction,
+ public ConvertToStringPolicy<0>::Data
+{
+ CompilerObject templateObj_;
+
+ MNewStringObject(MDefinition* input, JSObject* templateObj)
+ : MUnaryInstruction(input),
+ templateObj_(templateObj)
+ {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(NewStringObject)
+ TRIVIAL_NEW_WRAPPERS
+
+ StringObject* templateObj() const;
+
+ bool appendRoots(MRootList& roots) const override {
+ return roots.append(templateObj_);
+ }
+};
+
+// This is an alias for MLoadFixedSlot.
+class MEnclosingEnvironment : public MLoadFixedSlot
+{
+ explicit MEnclosingEnvironment(MDefinition* obj)
+ : MLoadFixedSlot(obj, EnvironmentObject::enclosingEnvironmentSlot())
+ {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ static MEnclosingEnvironment* New(TempAllocator& alloc, MDefinition* obj) {
+ return new(alloc) MEnclosingEnvironment(obj);
+ }
+
+ AliasSet getAliasSet() const override {
+ // EnvironmentObject reserved slots are immutable.
+ return AliasSet::None();
+ }
+};
+
+// This is an element of a spaghetti stack which is used to represent the memory
+// context which has to be restored in case of a bailout.
+struct MStoreToRecover : public TempObject, public InlineSpaghettiStackNode<MStoreToRecover>
+{
+ MDefinition* operand;
+
+ explicit MStoreToRecover(MDefinition* operand)
+ : operand(operand)
+ { }
+};
+
+typedef InlineSpaghettiStack<MStoreToRecover> MStoresToRecoverList;
+
+// A resume point contains the information needed to reconstruct the Baseline
+// state from a position in the JIT. See the big comment near resumeAfter() in
+// IonBuilder.cpp.
+class MResumePoint final :
+ public MNode
+#ifdef DEBUG
+ , public InlineForwardListNode<MResumePoint>
+#endif
+{
+ public:
+ enum Mode {
+ ResumeAt, // Resume until before the current instruction
+ ResumeAfter, // Resume after the current instruction
+ Outer // State before inlining.
+ };
+
+ private:
+ friend class MBasicBlock;
+ friend void AssertBasicGraphCoherency(MIRGraph& graph);
+
+ // List of stack slots needed to reconstruct the frame corresponding to the
+ // function which is compiled by IonBuilder.
+ FixedList<MUse> operands_;
+
+ // List of stores needed to reconstruct the content of objects which are
+ // emulated by EmulateStateOf variants.
+ MStoresToRecoverList stores_;
+
+ jsbytecode* pc_;
+ MInstruction* instruction_;
+ Mode mode_;
+
+ MResumePoint(MBasicBlock* block, jsbytecode* pc, Mode mode);
+ void inherit(MBasicBlock* state);
+
+ protected:
+ // Initializes operands_ to an empty array of a fixed length.
+ // The array may then be filled in by inherit().
+ MOZ_MUST_USE bool init(TempAllocator& alloc);
+
+ void clearOperand(size_t index) {
+ // FixedList doesn't initialize its elements, so do an unchecked init.
+ operands_[index].initUncheckedWithoutProducer(this);
+ }
+
+ MUse* getUseFor(size_t index) override {
+ return &operands_[index];
+ }
+ const MUse* getUseFor(size_t index) const override {
+ return &operands_[index];
+ }
+
+ public:
+ static MResumePoint* New(TempAllocator& alloc, MBasicBlock* block, jsbytecode* pc,
+ Mode mode);
+ static MResumePoint* New(TempAllocator& alloc, MBasicBlock* block, MResumePoint* model,
+ const MDefinitionVector& operands);
+ static MResumePoint* Copy(TempAllocator& alloc, MResumePoint* src);
+
+ MNode::Kind kind() const override {
+ return MNode::ResumePoint;
+ }
+ size_t numAllocatedOperands() const {
+ return operands_.length();
+ }
+ uint32_t stackDepth() const {
+ return numAllocatedOperands();
+ }
+ size_t numOperands() const override {
+ return numAllocatedOperands();
+ }
+ size_t indexOf(const MUse* u) const final override {
+ MOZ_ASSERT(u >= &operands_[0]);
+ MOZ_ASSERT(u <= &operands_[numOperands() - 1]);
+ return u - &operands_[0];
+ }
+ void initOperand(size_t index, MDefinition* operand) {
+ // FixedList doesn't initialize its elements, so do an unchecked init.
+ operands_[index].initUnchecked(operand, this);
+ }
+ void replaceOperand(size_t index, MDefinition* operand) final override {
+ operands_[index].replaceProducer(operand);
+ }
+
+ bool isObservableOperand(MUse* u) const;
+ bool isObservableOperand(size_t index) const;
+ bool isRecoverableOperand(MUse* u) const;
+
+ MDefinition* getOperand(size_t index) const override {
+ return operands_[index].producer();
+ }
+ jsbytecode* pc() const {
+ return pc_;
+ }
+ MResumePoint* caller() const;
+ uint32_t frameCount() const {
+ uint32_t count = 1;
+ for (MResumePoint* it = caller(); it; it = it->caller())
+ count++;
+ return count;
+ }
+ MInstruction* instruction() {
+ return instruction_;
+ }
+ void setInstruction(MInstruction* ins) {
+ MOZ_ASSERT(!instruction_);
+ instruction_ = ins;
+ }
+ // Only to be used by stealResumePoint.
+ void replaceInstruction(MInstruction* ins) {
+ MOZ_ASSERT(instruction_);
+ instruction_ = ins;
+ }
+ void resetInstruction() {
+ MOZ_ASSERT(instruction_);
+ instruction_ = nullptr;
+ }
+ Mode mode() const {
+ return mode_;
+ }
+
+ void releaseUses() {
+ for (size_t i = 0, e = numOperands(); i < e; i++) {
+ if (operands_[i].hasProducer())
+ operands_[i].releaseProducer();
+ }
+ }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+
+ // Register a store instruction on the current resume point. This
+ // instruction would be recovered when we are bailing out. The |cache|
+ // argument can be any resume point, it is used to share memory if we are
+ // doing the same modification.
+ void addStore(TempAllocator& alloc, MDefinition* store, const MResumePoint* cache = nullptr);
+
+ MStoresToRecoverList::iterator storesBegin() const {
+ return stores_.begin();
+ }
+ MStoresToRecoverList::iterator storesEnd() const {
+ return stores_.end();
+ }
+
+ virtual void dump(GenericPrinter& out) const override;
+ virtual void dump() const override;
+};
+
+class MIsCallable
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ explicit MIsCallable(MDefinition* object)
+ : MUnaryInstruction(object)
+ {
+ setResultType(MIRType::Boolean);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(IsCallable)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+class MIsConstructor
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ public:
+ explicit MIsConstructor(MDefinition* object)
+ : MUnaryInstruction(object)
+ {
+ setResultType(MIRType::Boolean);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(IsConstructor)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+class MIsObject
+ : public MUnaryInstruction,
+ public BoxInputsPolicy::Data
+{
+ explicit MIsObject(MDefinition* object)
+ : MUnaryInstruction(object)
+ {
+ setResultType(MIRType::Boolean);
+ setMovable();
+ }
+ public:
+ INSTRUCTION_HEADER(IsObject)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+class MHasClass
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ const Class* class_;
+
+ MHasClass(MDefinition* object, const Class* clasp)
+ : MUnaryInstruction(object)
+ , class_(clasp)
+ {
+ MOZ_ASSERT(object->type() == MIRType::Object);
+ setResultType(MIRType::Boolean);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(HasClass)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ const Class* getClass() const {
+ return class_;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isHasClass())
+ return false;
+ if (getClass() != ins->toHasClass()->getClass())
+ return false;
+ return congruentIfOperandsEqual(ins);
+ }
+};
+
+class MCheckReturn
+ : public MBinaryInstruction,
+ public BoxInputsPolicy::Data
+{
+ explicit MCheckReturn(MDefinition* retVal, MDefinition* thisVal)
+ : MBinaryInstruction(retVal, thisVal)
+ {
+ setGuard();
+ setResultType(MIRType::Value);
+ setResultTypeSet(retVal->resultTypeSet());
+ }
+
+ public:
+ INSTRUCTION_HEADER(CheckReturn)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, returnValue), (1, thisValue))
+
+};
+
+// Increase the warm-up counter of the provided script upon execution and test if
+// the warm-up counter surpasses the threshold. Upon hit it will recompile the
+// outermost script (i.e. not the inlined script).
+class MRecompileCheck : public MNullaryInstruction
+{
+ public:
+ enum RecompileCheckType {
+ RecompileCheck_OptimizationLevel,
+ RecompileCheck_Inlining
+ };
+
+ private:
+ JSScript* script_;
+ uint32_t recompileThreshold_;
+ bool forceRecompilation_;
+ bool increaseWarmUpCounter_;
+
+ MRecompileCheck(JSScript* script, uint32_t recompileThreshold, RecompileCheckType type)
+ : script_(script),
+ recompileThreshold_(recompileThreshold)
+ {
+ switch (type) {
+ case RecompileCheck_OptimizationLevel:
+ forceRecompilation_ = false;
+ increaseWarmUpCounter_ = true;
+ break;
+ case RecompileCheck_Inlining:
+ forceRecompilation_ = true;
+ increaseWarmUpCounter_ = false;
+ break;
+ default:
+ MOZ_CRASH("Unexpected recompile check type");
+ }
+
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(RecompileCheck)
+ TRIVIAL_NEW_WRAPPERS
+
+ JSScript* script() const {
+ return script_;
+ }
+
+ uint32_t recompileThreshold() const {
+ return recompileThreshold_;
+ }
+
+ bool forceRecompilation() const {
+ return forceRecompilation_;
+ }
+
+ bool increaseWarmUpCounter() const {
+ return increaseWarmUpCounter_;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+class MAtomicIsLockFree
+ : public MUnaryInstruction,
+ public ConvertToInt32Policy<0>::Data
+{
+ explicit MAtomicIsLockFree(MDefinition* value)
+ : MUnaryInstruction(value)
+ {
+ setResultType(MIRType::Boolean);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(AtomicIsLockFree)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return true;
+ }
+
+ ALLOW_CLONE(MAtomicIsLockFree)
+};
+
+// This applies to an object that is known to be a TypedArray, it bails out
+// if the obj does not map a SharedArrayBuffer.
+
+class MGuardSharedTypedArray
+ : public MUnaryInstruction,
+ public SingleObjectPolicy::Data
+{
+ explicit MGuardSharedTypedArray(MDefinition* obj)
+ : MUnaryInstruction(obj)
+ {
+ setGuard();
+ setMovable();
+ }
+
+public:
+ INSTRUCTION_HEADER(GuardSharedTypedArray)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+class MCompareExchangeTypedArrayElement
+ : public MAryInstruction<4>,
+ public Mix4Policy<ObjectPolicy<0>, IntPolicy<1>, TruncateToInt32Policy<2>, TruncateToInt32Policy<3>>::Data
+{
+ Scalar::Type arrayType_;
+
+ explicit MCompareExchangeTypedArrayElement(MDefinition* elements, MDefinition* index,
+ Scalar::Type arrayType, MDefinition* oldval,
+ MDefinition* newval)
+ : arrayType_(arrayType)
+ {
+ initOperand(0, elements);
+ initOperand(1, index);
+ initOperand(2, oldval);
+ initOperand(3, newval);
+ setGuard(); // Not removable
+ }
+
+ public:
+ INSTRUCTION_HEADER(CompareExchangeTypedArrayElement)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index), (2, oldval), (3, newval))
+
+ bool isByteArray() const {
+ return (arrayType_ == Scalar::Int8 ||
+ arrayType_ == Scalar::Uint8);
+ }
+ int oldvalOperand() {
+ return 2;
+ }
+ Scalar::Type arrayType() const {
+ return arrayType_;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::UnboxedElement);
+ }
+};
+
+class MAtomicExchangeTypedArrayElement
+ : public MAryInstruction<3>,
+ public Mix3Policy<ObjectPolicy<0>, IntPolicy<1>, TruncateToInt32Policy<2>>::Data
+{
+ Scalar::Type arrayType_;
+
+ MAtomicExchangeTypedArrayElement(MDefinition* elements, MDefinition* index, MDefinition* value,
+ Scalar::Type arrayType)
+ : arrayType_(arrayType)
+ {
+ MOZ_ASSERT(arrayType <= Scalar::Uint32);
+ initOperand(0, elements);
+ initOperand(1, index);
+ initOperand(2, value);
+ setGuard(); // Not removable
+ }
+
+ public:
+ INSTRUCTION_HEADER(AtomicExchangeTypedArrayElement)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index), (2, value))
+
+ bool isByteArray() const {
+ return (arrayType_ == Scalar::Int8 ||
+ arrayType_ == Scalar::Uint8);
+ }
+ Scalar::Type arrayType() const {
+ return arrayType_;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::UnboxedElement);
+ }
+};
+
+class MAtomicTypedArrayElementBinop
+ : public MAryInstruction<3>,
+ public Mix3Policy< ObjectPolicy<0>, IntPolicy<1>, TruncateToInt32Policy<2> >::Data
+{
+ private:
+ AtomicOp op_;
+ Scalar::Type arrayType_;
+
+ protected:
+ explicit MAtomicTypedArrayElementBinop(AtomicOp op, MDefinition* elements, MDefinition* index,
+ Scalar::Type arrayType, MDefinition* value)
+ : op_(op),
+ arrayType_(arrayType)
+ {
+ initOperand(0, elements);
+ initOperand(1, index);
+ initOperand(2, value);
+ setGuard(); // Not removable
+ }
+
+ public:
+ INSTRUCTION_HEADER(AtomicTypedArrayElementBinop)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index), (2, value))
+
+ bool isByteArray() const {
+ return (arrayType_ == Scalar::Int8 ||
+ arrayType_ == Scalar::Uint8);
+ }
+ AtomicOp operation() const {
+ return op_;
+ }
+ Scalar::Type arrayType() const {
+ return arrayType_;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::UnboxedElement);
+ }
+};
+
+class MDebugger : public MNullaryInstruction
+{
+ public:
+ INSTRUCTION_HEADER(Debugger)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+class MCheckIsObj
+ : public MUnaryInstruction,
+ public BoxInputsPolicy::Data
+{
+ uint8_t checkKind_;
+
+ explicit MCheckIsObj(MDefinition* toCheck, uint8_t checkKind)
+ : MUnaryInstruction(toCheck), checkKind_(checkKind)
+ {
+ setResultType(MIRType::Value);
+ setResultTypeSet(toCheck->resultTypeSet());
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(CheckIsObj)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, checkValue))
+
+ uint8_t checkKind() const { return checkKind_; }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+};
+
+class MCheckObjCoercible
+ : public MUnaryInstruction,
+ public BoxInputsPolicy::Data
+{
+ explicit MCheckObjCoercible(MDefinition* toCheck)
+ : MUnaryInstruction(toCheck)
+ {
+ setGuard();
+ setResultType(MIRType::Value);
+ setResultTypeSet(toCheck->resultTypeSet());
+ }
+
+ public:
+ INSTRUCTION_HEADER(CheckObjCoercible)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, checkValue))
+};
+
+class MDebugCheckSelfHosted
+ : public MUnaryInstruction,
+ public BoxInputsPolicy::Data
+{
+ explicit MDebugCheckSelfHosted(MDefinition* toCheck)
+ : MUnaryInstruction(toCheck)
+ {
+ setGuard();
+ setResultType(MIRType::Value);
+ setResultTypeSet(toCheck->resultTypeSet());
+ }
+
+ public:
+ INSTRUCTION_HEADER(DebugCheckSelfHosted)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, checkValue))
+
+};
+
+class MAsmJSNeg
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ MAsmJSNeg(MDefinition* op, MIRType type)
+ : MUnaryInstruction(op)
+ {
+ setResultType(type);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(AsmJSNeg)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+class MWasmBoundsCheck
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ bool redundant_;
+ wasm::TrapOffset trapOffset_;
+
+ explicit MWasmBoundsCheck(MDefinition* index, wasm::TrapOffset trapOffset)
+ : MUnaryInstruction(index),
+ redundant_(false),
+ trapOffset_(trapOffset)
+ {
+ setGuard(); // Effectful: throws for OOB.
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmBoundsCheck)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool isRedundant() const {
+ return redundant_;
+ }
+
+ void setRedundant(bool val) {
+ redundant_ = val;
+ }
+
+ wasm::TrapOffset trapOffset() const {
+ return trapOffset_;
+ }
+};
+
+class MWasmAddOffset
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ uint32_t offset_;
+ wasm::TrapOffset trapOffset_;
+
+ MWasmAddOffset(MDefinition* base, uint32_t offset, wasm::TrapOffset trapOffset)
+ : MUnaryInstruction(base),
+ offset_(offset),
+ trapOffset_(trapOffset)
+ {
+ setGuard();
+ setResultType(MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmAddOffset)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, base))
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ uint32_t offset() const {
+ return offset_;
+ }
+ wasm::TrapOffset trapOffset() const {
+ return trapOffset_;
+ }
+};
+
+class MWasmLoad
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ wasm::MemoryAccessDesc access_;
+
+ MWasmLoad(MDefinition* base, const wasm::MemoryAccessDesc& access, MIRType resultType)
+ : MUnaryInstruction(base),
+ access_(access)
+ {
+ setGuard();
+ setResultType(resultType);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmLoad)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, base))
+
+ const wasm::MemoryAccessDesc& access() const {
+ return access_;
+ }
+
+ AliasSet getAliasSet() const override {
+ // When a barrier is needed, make the instruction effectful by giving
+ // it a "store" effect.
+ if (access_.isAtomic())
+ return AliasSet::Store(AliasSet::WasmHeap);
+ return AliasSet::Load(AliasSet::WasmHeap);
+ }
+};
+
+class MWasmStore
+ : public MBinaryInstruction,
+ public NoTypePolicy::Data
+{
+ wasm::MemoryAccessDesc access_;
+
+ MWasmStore(MDefinition* base, const wasm::MemoryAccessDesc& access, MDefinition* value)
+ : MBinaryInstruction(base, value),
+ access_(access)
+ {
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmStore)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, base), (1, value))
+
+ const wasm::MemoryAccessDesc& access() const {
+ return access_;
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::WasmHeap);
+ }
+};
+
+class MAsmJSMemoryAccess
+{
+ uint32_t offset_;
+ Scalar::Type accessType_;
+ bool needsBoundsCheck_;
+
+ public:
+ explicit MAsmJSMemoryAccess(Scalar::Type accessType)
+ : offset_(0),
+ accessType_(accessType),
+ needsBoundsCheck_(true)
+ {
+ MOZ_ASSERT(accessType != Scalar::Uint8Clamped);
+ MOZ_ASSERT(!Scalar::isSimdType(accessType));
+ }
+
+ uint32_t offset() const { return offset_; }
+ uint32_t endOffset() const { return offset() + byteSize(); }
+ Scalar::Type accessType() const { return accessType_; }
+ unsigned byteSize() const { return TypedArrayElemSize(accessType()); }
+ bool needsBoundsCheck() const { return needsBoundsCheck_; }
+
+ wasm::MemoryAccessDesc access() const {
+ return wasm::MemoryAccessDesc(accessType_, Scalar::byteSize(accessType_), offset_,
+ mozilla::Nothing());
+ }
+
+ void removeBoundsCheck() { needsBoundsCheck_ = false; }
+ void setOffset(uint32_t o) { offset_ = o; }
+};
+
+class MAsmJSLoadHeap
+ : public MUnaryInstruction,
+ public MAsmJSMemoryAccess,
+ public NoTypePolicy::Data
+{
+ MAsmJSLoadHeap(MDefinition* base, Scalar::Type accessType)
+ : MUnaryInstruction(base),
+ MAsmJSMemoryAccess(accessType)
+ {
+ setResultType(ScalarTypeToMIRType(accessType));
+ }
+
+ public:
+ INSTRUCTION_HEADER(AsmJSLoadHeap)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* base() const { return getOperand(0); }
+ void replaceBase(MDefinition* newBase) { replaceOperand(0, newBase); }
+
+ bool congruentTo(const MDefinition* ins) const override;
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::WasmHeap);
+ }
+ AliasType mightAlias(const MDefinition* def) const override;
+};
+
+class MAsmJSStoreHeap
+ : public MBinaryInstruction,
+ public MAsmJSMemoryAccess,
+ public NoTypePolicy::Data
+{
+ MAsmJSStoreHeap(MDefinition* base, Scalar::Type accessType, MDefinition* v)
+ : MBinaryInstruction(base, v),
+ MAsmJSMemoryAccess(accessType)
+ {}
+
+ public:
+ INSTRUCTION_HEADER(AsmJSStoreHeap)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* base() const { return getOperand(0); }
+ void replaceBase(MDefinition* newBase) { replaceOperand(0, newBase); }
+ MDefinition* value() const { return getOperand(1); }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::WasmHeap);
+ }
+};
+
+class MAsmJSCompareExchangeHeap
+ : public MQuaternaryInstruction,
+ public NoTypePolicy::Data
+{
+ wasm::MemoryAccessDesc access_;
+
+ MAsmJSCompareExchangeHeap(MDefinition* base, const wasm::MemoryAccessDesc& access,
+ MDefinition* oldv, MDefinition* newv, MDefinition* tls)
+ : MQuaternaryInstruction(base, oldv, newv, tls),
+ access_(access)
+ {
+ setGuard(); // Not removable
+ setResultType(MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(AsmJSCompareExchangeHeap)
+ TRIVIAL_NEW_WRAPPERS
+
+ const wasm::MemoryAccessDesc& access() const { return access_; }
+
+ MDefinition* base() const { return getOperand(0); }
+ MDefinition* oldValue() const { return getOperand(1); }
+ MDefinition* newValue() const { return getOperand(2); }
+ MDefinition* tls() const { return getOperand(3); }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::WasmHeap);
+ }
+};
+
+class MAsmJSAtomicExchangeHeap
+ : public MTernaryInstruction,
+ public NoTypePolicy::Data
+{
+ wasm::MemoryAccessDesc access_;
+
+ MAsmJSAtomicExchangeHeap(MDefinition* base, const wasm::MemoryAccessDesc& access,
+ MDefinition* value, MDefinition* tls)
+ : MTernaryInstruction(base, value, tls),
+ access_(access)
+ {
+ setGuard(); // Not removable
+ setResultType(MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(AsmJSAtomicExchangeHeap)
+ TRIVIAL_NEW_WRAPPERS
+
+ const wasm::MemoryAccessDesc& access() const { return access_; }
+
+ MDefinition* base() const { return getOperand(0); }
+ MDefinition* value() const { return getOperand(1); }
+ MDefinition* tls() const { return getOperand(2); }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::WasmHeap);
+ }
+};
+
+class MAsmJSAtomicBinopHeap
+ : public MTernaryInstruction,
+ public NoTypePolicy::Data
+{
+ AtomicOp op_;
+ wasm::MemoryAccessDesc access_;
+
+ MAsmJSAtomicBinopHeap(AtomicOp op, MDefinition* base, const wasm::MemoryAccessDesc& access,
+ MDefinition* v, MDefinition* tls)
+ : MTernaryInstruction(base, v, tls),
+ op_(op),
+ access_(access)
+ {
+ setGuard(); // Not removable
+ setResultType(MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(AsmJSAtomicBinopHeap)
+ TRIVIAL_NEW_WRAPPERS
+
+ AtomicOp operation() const { return op_; }
+ const wasm::MemoryAccessDesc& access() const { return access_; }
+
+ MDefinition* base() const { return getOperand(0); }
+ MDefinition* value() const { return getOperand(1); }
+ MDefinition* tls() const { return getOperand(2); }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::WasmHeap);
+ }
+};
+
+class MWasmLoadGlobalVar : public MNullaryInstruction
+{
+ MWasmLoadGlobalVar(MIRType type, unsigned globalDataOffset, bool isConstant)
+ : globalDataOffset_(globalDataOffset), isConstant_(isConstant)
+ {
+ MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
+ setResultType(type);
+ setMovable();
+ }
+
+ unsigned globalDataOffset_;
+ bool isConstant_;
+
+ public:
+ INSTRUCTION_HEADER(WasmLoadGlobalVar)
+ TRIVIAL_NEW_WRAPPERS
+
+ unsigned globalDataOffset() const { return globalDataOffset_; }
+
+ HashNumber valueHash() const override;
+ bool congruentTo(const MDefinition* ins) const override;
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ AliasSet getAliasSet() const override {
+ return isConstant_ ? AliasSet::None() : AliasSet::Load(AliasSet::WasmGlobalVar);
+ }
+
+ AliasType mightAlias(const MDefinition* def) const override;
+};
+
+class MWasmStoreGlobalVar
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ MWasmStoreGlobalVar(unsigned globalDataOffset, MDefinition* v)
+ : MUnaryInstruction(v), globalDataOffset_(globalDataOffset)
+ {}
+
+ unsigned globalDataOffset_;
+
+ public:
+ INSTRUCTION_HEADER(WasmStoreGlobalVar)
+ TRIVIAL_NEW_WRAPPERS
+
+ unsigned globalDataOffset() const { return globalDataOffset_; }
+ MDefinition* value() const { return getOperand(0); }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::WasmGlobalVar);
+ }
+};
+
+class MWasmParameter : public MNullaryInstruction
+{
+ ABIArg abi_;
+
+ MWasmParameter(ABIArg abi, MIRType mirType)
+ : abi_(abi)
+ {
+ setResultType(mirType);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmParameter)
+ TRIVIAL_NEW_WRAPPERS
+
+ ABIArg abi() const { return abi_; }
+};
+
+class MWasmReturn
+ : public MAryControlInstruction<2, 0>,
+ public NoTypePolicy::Data
+{
+ explicit MWasmReturn(MDefinition* ins, MDefinition* tlsPtr) {
+ initOperand(0, ins);
+ initOperand(1, tlsPtr);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmReturn)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+class MWasmReturnVoid
+ : public MAryControlInstruction<1, 0>,
+ public NoTypePolicy::Data
+{
+ explicit MWasmReturnVoid(MDefinition* tlsPtr) {
+ initOperand(0, tlsPtr);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmReturnVoid)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+class MWasmStackArg
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ MWasmStackArg(uint32_t spOffset, MDefinition* ins)
+ : MUnaryInstruction(ins),
+ spOffset_(spOffset)
+ {}
+
+ uint32_t spOffset_;
+
+ public:
+ INSTRUCTION_HEADER(WasmStackArg)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, arg))
+
+ uint32_t spOffset() const {
+ return spOffset_;
+ }
+ void incrementOffset(uint32_t inc) {
+ spOffset_ += inc;
+ }
+};
+
+class MWasmCall final
+ : public MVariadicInstruction,
+ public NoTypePolicy::Data
+{
+ wasm::CallSiteDesc desc_;
+ wasm::CalleeDesc callee_;
+ FixedList<AnyRegister> argRegs_;
+ uint32_t spIncrement_;
+ uint32_t tlsStackOffset_;
+ ABIArg instanceArg_;
+
+ MWasmCall(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee, uint32_t spIncrement,
+ uint32_t tlsStackOffset)
+ : desc_(desc),
+ callee_(callee),
+ spIncrement_(spIncrement),
+ tlsStackOffset_(tlsStackOffset)
+ { }
+
+ public:
+ INSTRUCTION_HEADER(WasmCall)
+
+ struct Arg {
+ AnyRegister reg;
+ MDefinition* def;
+ Arg(AnyRegister reg, MDefinition* def) : reg(reg), def(def) {}
+ };
+ typedef Vector<Arg, 8, SystemAllocPolicy> Args;
+
+ static const uint32_t DontSaveTls = UINT32_MAX;
+
+ static MWasmCall* New(TempAllocator& alloc,
+ const wasm::CallSiteDesc& desc,
+ const wasm::CalleeDesc& callee,
+ const Args& args,
+ MIRType resultType,
+ uint32_t spIncrement,
+ uint32_t tlsStackOffset,
+ MDefinition* tableIndex = nullptr);
+
+ static MWasmCall* NewBuiltinInstanceMethodCall(TempAllocator& alloc,
+ const wasm::CallSiteDesc& desc,
+ const wasm::SymbolicAddress builtin,
+ const ABIArg& instanceArg,
+ const Args& args,
+ MIRType resultType,
+ uint32_t spIncrement,
+ uint32_t tlsStackOffset);
+
+ size_t numArgs() const {
+ return argRegs_.length();
+ }
+ AnyRegister registerForArg(size_t index) const {
+ MOZ_ASSERT(index < numArgs());
+ return argRegs_[index];
+ }
+ const wasm::CallSiteDesc& desc() const {
+ return desc_;
+ }
+ const wasm::CalleeDesc &callee() const {
+ return callee_;
+ }
+ uint32_t spIncrement() const {
+ return spIncrement_;
+ }
+ bool saveTls() const {
+ return tlsStackOffset_ != DontSaveTls;
+ }
+ uint32_t tlsStackOffset() const {
+ MOZ_ASSERT(saveTls());
+ return tlsStackOffset_;
+ }
+
+ bool possiblyCalls() const override {
+ return true;
+ }
+
+ const ABIArg& instanceArg() const {
+ return instanceArg_;
+ }
+};
+
+class MWasmSelect
+ : public MTernaryInstruction,
+ public NoTypePolicy::Data
+{
+ MWasmSelect(MDefinition* trueExpr, MDefinition* falseExpr, MDefinition *condExpr)
+ : MTernaryInstruction(trueExpr, falseExpr, condExpr)
+ {
+ MOZ_ASSERT(condExpr->type() == MIRType::Int32);
+ MOZ_ASSERT(trueExpr->type() == falseExpr->type());
+ setResultType(trueExpr->type());
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmSelect)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, trueExpr), (1, falseExpr), (2, condExpr))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ ALLOW_CLONE(MWasmSelect)
+};
+
+class MWasmReinterpret
+ : public MUnaryInstruction,
+ public NoTypePolicy::Data
+{
+ MWasmReinterpret(MDefinition* val, MIRType toType)
+ : MUnaryInstruction(val)
+ {
+ switch (val->type()) {
+ case MIRType::Int32: MOZ_ASSERT(toType == MIRType::Float32); break;
+ case MIRType::Float32: MOZ_ASSERT(toType == MIRType::Int32); break;
+ case MIRType::Double: MOZ_ASSERT(toType == MIRType::Int64); break;
+ case MIRType::Int64: MOZ_ASSERT(toType == MIRType::Double); break;
+ default: MOZ_CRASH("unexpected reinterpret conversion");
+ }
+ setMovable();
+ setResultType(toType);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmReinterpret)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ ALLOW_CLONE(MWasmReinterpret)
+};
+
+class MRotate
+ : public MBinaryInstruction,
+ public NoTypePolicy::Data
+{
+ bool isLeftRotate_;
+
+ MRotate(MDefinition* input, MDefinition* count, MIRType type, bool isLeftRotate)
+ : MBinaryInstruction(input, count), isLeftRotate_(isLeftRotate)
+ {
+ setMovable();
+ setResultType(type);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Rotate)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, input), (1, count))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::None();
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins) && ins->toRotate()->isLeftRotate() == isLeftRotate_;
+ }
+
+ bool isLeftRotate() const {
+ return isLeftRotate_;
+ }
+
+ ALLOW_CLONE(MRotate)
+};
+
+class MUnknownValue : public MNullaryInstruction
+{
+ protected:
+ MUnknownValue() {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(UnknownValue)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+#undef INSTRUCTION_HEADER
+
+void MUse::init(MDefinition* producer, MNode* consumer)
+{
+ MOZ_ASSERT(!consumer_, "Initializing MUse that already has a consumer");
+ MOZ_ASSERT(!producer_, "Initializing MUse that already has a producer");
+ initUnchecked(producer, consumer);
+}
+
+void MUse::initUnchecked(MDefinition* producer, MNode* consumer)
+{
+ MOZ_ASSERT(consumer, "Initializing to null consumer");
+ consumer_ = consumer;
+ producer_ = producer;
+ producer_->addUseUnchecked(this);
+}
+
+void MUse::initUncheckedWithoutProducer(MNode* consumer)
+{
+ MOZ_ASSERT(consumer, "Initializing to null consumer");
+ consumer_ = consumer;
+ producer_ = nullptr;
+}
+
+void MUse::replaceProducer(MDefinition* producer)
+{
+ MOZ_ASSERT(consumer_, "Resetting MUse without a consumer");
+ producer_->removeUse(this);
+ producer_ = producer;
+ producer_->addUse(this);
+}
+
+void MUse::releaseProducer()
+{
+ MOZ_ASSERT(consumer_, "Clearing MUse without a consumer");
+ producer_->removeUse(this);
+ producer_ = nullptr;
+}
+
+// Implement cast functions now that the compiler can see the inheritance.
+
+MDefinition*
+MNode::toDefinition()
+{
+ MOZ_ASSERT(isDefinition());
+ return (MDefinition*)this;
+}
+
+MResumePoint*
+MNode::toResumePoint()
+{
+ MOZ_ASSERT(isResumePoint());
+ return (MResumePoint*)this;
+}
+
+MInstruction*
+MDefinition::toInstruction()
+{
+ MOZ_ASSERT(!isPhi());
+ return (MInstruction*)this;
+}
+
+const MInstruction*
+MDefinition::toInstruction() const
+{
+ MOZ_ASSERT(!isPhi());
+ return (const MInstruction*)this;
+}
+
+MControlInstruction*
+MDefinition::toControlInstruction()
+{
+ MOZ_ASSERT(isControlInstruction());
+ return (MControlInstruction*)this;
+}
+
+MConstant*
+MDefinition::maybeConstantValue()
+{
+ MDefinition* op = this;
+ if (op->isBox())
+ op = op->toBox()->input();
+ if (op->isConstant())
+ return op->toConstant();
+ return nullptr;
+}
+
+// Helper functions used to decide how to build MIR.
+
+bool ElementAccessIsDenseNative(CompilerConstraintList* constraints,
+ MDefinition* obj, MDefinition* id);
+JSValueType UnboxedArrayElementType(CompilerConstraintList* constraints, MDefinition* obj,
+ MDefinition* id);
+bool ElementAccessIsTypedArray(CompilerConstraintList* constraints,
+ MDefinition* obj, MDefinition* id,
+ Scalar::Type* arrayType);
+bool ElementAccessIsPacked(CompilerConstraintList* constraints, MDefinition* obj);
+bool ElementAccessMightBeCopyOnWrite(CompilerConstraintList* constraints, MDefinition* obj);
+bool ElementAccessMightBeFrozen(CompilerConstraintList* constraints, MDefinition* obj);
+bool ElementAccessHasExtraIndexedProperty(IonBuilder* builder, MDefinition* obj);
+MIRType DenseNativeElementType(CompilerConstraintList* constraints, MDefinition* obj);
+BarrierKind PropertyReadNeedsTypeBarrier(JSContext* propertycx,
+ CompilerConstraintList* constraints,
+ TypeSet::ObjectKey* key, PropertyName* name,
+ TemporaryTypeSet* observed, bool updateObserved);
+BarrierKind PropertyReadNeedsTypeBarrier(JSContext* propertycx,
+ CompilerConstraintList* constraints,
+ MDefinition* obj, PropertyName* name,
+ TemporaryTypeSet* observed);
+ResultWithOOM<BarrierKind>
+PropertyReadOnPrototypeNeedsTypeBarrier(IonBuilder* builder,
+ MDefinition* obj, PropertyName* name,
+ TemporaryTypeSet* observed);
+bool PropertyReadIsIdempotent(CompilerConstraintList* constraints,
+ MDefinition* obj, PropertyName* name);
+void AddObjectsForPropertyRead(MDefinition* obj, PropertyName* name,
+ TemporaryTypeSet* observed);
+bool CanWriteProperty(TempAllocator& alloc, CompilerConstraintList* constraints,
+ HeapTypeSetKey property, MDefinition* value,
+ MIRType implicitType = MIRType::None);
+bool PropertyWriteNeedsTypeBarrier(TempAllocator& alloc, CompilerConstraintList* constraints,
+ MBasicBlock* current, MDefinition** pobj,
+ PropertyName* name, MDefinition** pvalue,
+ bool canModify, MIRType implicitType = MIRType::None);
+bool ArrayPrototypeHasIndexedProperty(IonBuilder* builder, JSScript* script);
+bool TypeCanHaveExtraIndexedProperties(IonBuilder* builder, TemporaryTypeSet* types);
+
+inline MIRType
+MIRTypeForTypedArrayRead(Scalar::Type arrayType, bool observedDouble)
+{
+ switch (arrayType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ return MIRType::Int32;
+ case Scalar::Uint32:
+ return observedDouble ? MIRType::Double : MIRType::Int32;
+ case Scalar::Float32:
+ return MIRType::Float32;
+ case Scalar::Float64:
+ return MIRType::Double;
+ default:
+ break;
+ }
+ MOZ_CRASH("Unknown typed array type");
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_MIR_h */
diff --git a/js/src/jit/MIRGenerator.h b/js/src/jit/MIRGenerator.h
new file mode 100644
index 000000000..d184bab1a
--- /dev/null
+++ b/js/src/jit/MIRGenerator.h
@@ -0,0 +1,229 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_MIRGenerator_h
+#define jit_MIRGenerator_h
+
+// This file declares the data structures used to build a control-flow graph
+// containing MIR.
+
+#include "mozilla/Atomics.h"
+
+#include <stdarg.h>
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+
+#include "jit/CompileInfo.h"
+#include "jit/JitAllocPolicy.h"
+#include "jit/JitCompartment.h"
+#include "jit/MIR.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+class MIRGraph;
+class OptimizationInfo;
+
+class MIRGenerator
+{
+ public:
+ MIRGenerator(CompileCompartment* compartment, const JitCompileOptions& options,
+ TempAllocator* alloc, MIRGraph* graph,
+ const CompileInfo* info, const OptimizationInfo* optimizationInfo);
+
+ void initMinWasmHeapLength(uint32_t init) {
+ minWasmHeapLength_ = init;
+ }
+
+ TempAllocator& alloc() {
+ return *alloc_;
+ }
+ MIRGraph& graph() {
+ return *graph_;
+ }
+ MOZ_MUST_USE bool ensureBallast() {
+ return alloc().ensureBallast();
+ }
+ const JitRuntime* jitRuntime() const {
+ return GetJitContext()->runtime->jitRuntime();
+ }
+ const CompileInfo& info() const {
+ return *info_;
+ }
+ const OptimizationInfo& optimizationInfo() const {
+ return *optimizationInfo_;
+ }
+
+ template <typename T>
+ T* allocate(size_t count = 1) {
+ size_t bytes;
+ if (MOZ_UNLIKELY(!CalculateAllocSize<T>(count, &bytes)))
+ return nullptr;
+ return static_cast<T*>(alloc().allocate(bytes));
+ }
+
+ // Set an error state and prints a message. Returns false so errors can be
+ // propagated up.
+ bool abort(const char* message, ...) MOZ_FORMAT_PRINTF(2, 3); // always returns false
+ bool abortFmt(const char* message, va_list ap); // always returns false
+
+ bool errored() const {
+ return error_;
+ }
+
+ MOZ_MUST_USE bool instrumentedProfiling() {
+ if (!instrumentedProfilingIsCached_) {
+ instrumentedProfiling_ = GetJitContext()->runtime->spsProfiler().enabled();
+ instrumentedProfilingIsCached_ = true;
+ }
+ return instrumentedProfiling_;
+ }
+
+ bool isProfilerInstrumentationEnabled() {
+ return !compilingWasm() && instrumentedProfiling();
+ }
+
+ bool isOptimizationTrackingEnabled() {
+ return isProfilerInstrumentationEnabled() && !info().isAnalysis();
+ }
+
+ bool safeForMinorGC() const {
+ return safeForMinorGC_;
+ }
+ void setNotSafeForMinorGC() {
+ safeForMinorGC_ = false;
+ }
+
+ // Whether the main thread is trying to cancel this build.
+ bool shouldCancel(const char* why) {
+ maybePause();
+ return cancelBuild_;
+ }
+ void cancel() {
+ cancelBuild_ = true;
+ }
+
+ void maybePause() {
+ if (pauseBuild_ && *pauseBuild_)
+ PauseCurrentHelperThread();
+ }
+ void setPauseFlag(mozilla::Atomic<bool, mozilla::Relaxed>* pauseBuild) {
+ pauseBuild_ = pauseBuild;
+ }
+
+ void disable() {
+ abortReason_ = AbortReason_Disable;
+ }
+ AbortReason abortReason() {
+ return abortReason_;
+ }
+
+ bool compilingWasm() const {
+ return info_->compilingWasm();
+ }
+
+ uint32_t wasmMaxStackArgBytes() const {
+ MOZ_ASSERT(compilingWasm());
+ return wasmMaxStackArgBytes_;
+ }
+ void initWasmMaxStackArgBytes(uint32_t n) {
+ MOZ_ASSERT(compilingWasm());
+ MOZ_ASSERT(wasmMaxStackArgBytes_ == 0);
+ wasmMaxStackArgBytes_ = n;
+ }
+ uint32_t minWasmHeapLength() const {
+ return minWasmHeapLength_;
+ }
+ void setPerformsCall() {
+ performsCall_ = true;
+ }
+ bool performsCall() const {
+ return performsCall_;
+ }
+ // Traverses the graph to find if there's any SIMD instruction. Costful but
+ // the value is cached, so don't worry about calling it several times.
+ bool usesSimd();
+
+ bool modifiesFrameArguments() const {
+ return modifiesFrameArguments_;
+ }
+
+ typedef Vector<ObjectGroup*, 0, JitAllocPolicy> ObjectGroupVector;
+
+ // When abortReason() == AbortReason_PreliminaryObjects, all groups with
+ // preliminary objects which haven't been analyzed yet.
+ const ObjectGroupVector& abortedPreliminaryGroups() const {
+ return abortedPreliminaryGroups_;
+ }
+
+ public:
+ CompileCompartment* compartment;
+
+ protected:
+ const CompileInfo* info_;
+ const OptimizationInfo* optimizationInfo_;
+ TempAllocator* alloc_;
+ MIRGraph* graph_;
+ AbortReason abortReason_;
+ bool shouldForceAbort_; // Force AbortReason_Disable
+ ObjectGroupVector abortedPreliminaryGroups_;
+ bool error_;
+ mozilla::Atomic<bool, mozilla::Relaxed>* pauseBuild_;
+ mozilla::Atomic<bool, mozilla::Relaxed> cancelBuild_;
+
+ uint32_t wasmMaxStackArgBytes_;
+ bool performsCall_;
+ bool usesSimd_;
+ bool cachedUsesSimd_;
+
+ // Keep track of whether frame arguments are modified during execution.
+ // RegAlloc needs to know this as spilling values back to their register
+ // slots is not compatible with that.
+ bool modifiesFrameArguments_;
+
+ bool instrumentedProfiling_;
+ bool instrumentedProfilingIsCached_;
+ bool safeForMinorGC_;
+
+ void addAbortedPreliminaryGroup(ObjectGroup* group);
+
+ uint32_t minWasmHeapLength_;
+
+ void setForceAbort() {
+ shouldForceAbort_ = true;
+ }
+ bool shouldForceAbort() {
+ return shouldForceAbort_;
+ }
+
+#if defined(JS_ION_PERF)
+ WasmPerfSpewer wasmPerfSpewer_;
+
+ public:
+ WasmPerfSpewer& perfSpewer() { return wasmPerfSpewer_; }
+#endif
+
+ public:
+ const JitCompileOptions options;
+
+ private:
+ GraphSpewer gs_;
+
+ public:
+ GraphSpewer& graphSpewer() {
+ return gs_;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_MIRGenerator_h */
diff --git a/js/src/jit/MIRGraph.cpp b/js/src/jit/MIRGraph.cpp
new file mode 100644
index 000000000..3a363a5bf
--- /dev/null
+++ b/js/src/jit/MIRGraph.cpp
@@ -0,0 +1,1750 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/MIRGraph.h"
+
+#include "jit/BytecodeAnalysis.h"
+#include "jit/Ion.h"
+#include "jit/JitSpewer.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "wasm/WasmTypes.h"
+
+using namespace js;
+using namespace js::jit;
+using mozilla::Swap;
+
+MIRGenerator::MIRGenerator(CompileCompartment* compartment, const JitCompileOptions& options,
+ TempAllocator* alloc, MIRGraph* graph, const CompileInfo* info,
+ const OptimizationInfo* optimizationInfo)
+ : compartment(compartment),
+ info_(info),
+ optimizationInfo_(optimizationInfo),
+ alloc_(alloc),
+ graph_(graph),
+ abortReason_(AbortReason_NoAbort),
+ shouldForceAbort_(false),
+ abortedPreliminaryGroups_(*alloc_),
+ error_(false),
+ pauseBuild_(nullptr),
+ cancelBuild_(false),
+ wasmMaxStackArgBytes_(0),
+ performsCall_(false),
+ usesSimd_(false),
+ cachedUsesSimd_(false),
+ modifiesFrameArguments_(false),
+ instrumentedProfiling_(false),
+ instrumentedProfilingIsCached_(false),
+ safeForMinorGC_(true),
+ minWasmHeapLength_(0),
+ options(options),
+ gs_(alloc)
+{ }
+
+bool
+MIRGenerator::usesSimd()
+{
+ if (cachedUsesSimd_)
+ return usesSimd_;
+
+ cachedUsesSimd_ = true;
+ for (ReversePostorderIterator block = graph_->rpoBegin(),
+ end = graph_->rpoEnd();
+ block != end;
+ block++)
+ {
+ // It's fine to use MInstructionIterator here because we don't have to
+ // worry about Phis, since any reachable phi (or phi cycle) will have at
+ // least one instruction as an input.
+ for (MInstructionIterator inst = block->begin(); inst != block->end(); inst++) {
+ // Instructions that have SIMD inputs but not a SIMD type are fine
+ // to ignore, as their inputs are also reached at some point. By
+ // induction, at least one instruction with a SIMD type is reached
+ // at some point.
+ if (IsSimdType(inst->type())) {
+ MOZ_ASSERT(SupportsSimd);
+ usesSimd_ = true;
+ return true;
+ }
+ }
+ }
+ usesSimd_ = false;
+ return false;
+}
+
+bool
+MIRGenerator::abortFmt(const char* message, va_list ap)
+{
+ JitSpewVA(JitSpew_IonAbort, message, ap);
+ error_ = true;
+ return false;
+}
+
+bool
+MIRGenerator::abort(const char* message, ...)
+{
+ va_list ap;
+ va_start(ap, message);
+ abortFmt(message, ap);
+ va_end(ap);
+ return false;
+}
+
+void
+MIRGenerator::addAbortedPreliminaryGroup(ObjectGroup* group)
+{
+ for (size_t i = 0; i < abortedPreliminaryGroups_.length(); i++) {
+ if (group == abortedPreliminaryGroups_[i])
+ return;
+ }
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!abortedPreliminaryGroups_.append(group))
+ oomUnsafe.crash("addAbortedPreliminaryGroup");
+}
+
+void
+MIRGraph::addBlock(MBasicBlock* block)
+{
+ MOZ_ASSERT(block);
+ block->setId(blockIdGen_++);
+ blocks_.pushBack(block);
+ numBlocks_++;
+}
+
+void
+MIRGraph::insertBlockAfter(MBasicBlock* at, MBasicBlock* block)
+{
+ block->setId(blockIdGen_++);
+ blocks_.insertAfter(at, block);
+ numBlocks_++;
+}
+
+void
+MIRGraph::insertBlockBefore(MBasicBlock* at, MBasicBlock* block)
+{
+ block->setId(blockIdGen_++);
+ blocks_.insertBefore(at, block);
+ numBlocks_++;
+}
+
+void
+MIRGraph::renumberBlocksAfter(MBasicBlock* at)
+{
+ MBasicBlockIterator iter = begin(at);
+ iter++;
+
+ uint32_t id = at->id();
+ for (; iter != end(); iter++)
+ iter->setId(++id);
+}
+
+void
+MIRGraph::removeBlocksAfter(MBasicBlock* start)
+{
+ MBasicBlockIterator iter(begin());
+ iter++;
+ while (iter != end()) {
+ MBasicBlock* block = *iter;
+ iter++;
+
+ if (block->id() <= start->id())
+ continue;
+
+ removeBlock(block);
+ }
+}
+
+void
+MIRGraph::removeBlock(MBasicBlock* block)
+{
+ // Remove a block from the graph. It will also cleanup the block.
+
+ if (block == osrBlock_)
+ osrBlock_ = nullptr;
+
+ if (returnAccumulator_) {
+ size_t i = 0;
+ while (i < returnAccumulator_->length()) {
+ if ((*returnAccumulator_)[i] == block)
+ returnAccumulator_->erase(returnAccumulator_->begin() + i);
+ else
+ i++;
+ }
+ }
+
+ block->discardAllInstructions();
+ block->discardAllResumePoints();
+
+ // Note: phis are disconnected from the rest of the graph, but are not
+ // removed entirely. If the block being removed is a loop header then
+ // IonBuilder may need to access these phis to more quickly converge on the
+ // possible types in the graph. See IonBuilder::analyzeNewLoopTypes.
+ block->discardAllPhiOperands();
+
+ block->markAsDead();
+ blocks_.remove(block);
+ numBlocks_--;
+}
+
+void
+MIRGraph::removeBlockIncludingPhis(MBasicBlock* block)
+{
+ // removeBlock doesn't clear phis because of IonBuilder constraints. Here,
+ // we want to totally clear everything.
+ removeBlock(block);
+ block->discardAllPhis();
+}
+
+void
+MIRGraph::unmarkBlocks()
+{
+ for (MBasicBlockIterator i(blocks_.begin()); i != blocks_.end(); i++)
+ i->unmark();
+}
+
+MBasicBlock*
+MBasicBlock::New(MIRGraph& graph, BytecodeAnalysis* analysis, const CompileInfo& info,
+ MBasicBlock* pred, BytecodeSite* site, Kind kind)
+{
+ MOZ_ASSERT(site->pc() != nullptr);
+
+ MBasicBlock* block = new(graph.alloc()) MBasicBlock(graph, info, site, kind);
+ if (!block->init())
+ return nullptr;
+
+ if (!block->inherit(graph.alloc(), analysis, pred, 0))
+ return nullptr;
+
+ return block;
+}
+
+MBasicBlock*
+MBasicBlock::NewPopN(MIRGraph& graph, const CompileInfo& info,
+ MBasicBlock* pred, BytecodeSite* site, Kind kind, uint32_t popped)
+{
+ MBasicBlock* block = new(graph.alloc()) MBasicBlock(graph, info, site, kind);
+ if (!block->init())
+ return nullptr;
+
+ if (!block->inherit(graph.alloc(), nullptr, pred, popped))
+ return nullptr;
+
+ return block;
+}
+
+MBasicBlock*
+MBasicBlock::NewWithResumePoint(MIRGraph& graph, const CompileInfo& info,
+ MBasicBlock* pred, BytecodeSite* site,
+ MResumePoint* resumePoint)
+{
+ MBasicBlock* block = new(graph.alloc()) MBasicBlock(graph, info, site, NORMAL);
+
+ MOZ_ASSERT(!resumePoint->instruction());
+ resumePoint->block()->discardResumePoint(resumePoint, RefType_None);
+ resumePoint->block_ = block;
+ block->addResumePoint(resumePoint);
+ block->entryResumePoint_ = resumePoint;
+
+ if (!block->init())
+ return nullptr;
+
+ if (!block->inheritResumePoint(pred))
+ return nullptr;
+
+ return block;
+}
+
+MBasicBlock*
+MBasicBlock::NewPendingLoopHeader(MIRGraph& graph, const CompileInfo& info,
+ MBasicBlock* pred, BytecodeSite* site,
+ unsigned stackPhiCount)
+{
+ MOZ_ASSERT(site->pc() != nullptr);
+
+ MBasicBlock* block = new(graph.alloc()) MBasicBlock(graph, info, site, PENDING_LOOP_HEADER);
+ if (!block->init())
+ return nullptr;
+
+ if (!block->inherit(graph.alloc(), nullptr, pred, 0, stackPhiCount))
+ return nullptr;
+
+ return block;
+}
+
+MBasicBlock*
+MBasicBlock::NewSplitEdge(MIRGraph& graph, MBasicBlock* pred, size_t predEdgeIdx, MBasicBlock* succ)
+{
+ MBasicBlock* split = nullptr;
+ if (!succ->pc()) {
+ // The predecessor does not have a PC, this is a Wasm compilation.
+ split = MBasicBlock::New(graph, succ->info(), pred, SPLIT_EDGE);
+ if (!split)
+ return nullptr;
+ } else {
+ // The predecessor has a PC, this is an IonBuilder compilation.
+ MResumePoint* succEntry = succ->entryResumePoint();
+
+ BytecodeSite* site = new(graph.alloc()) BytecodeSite(succ->trackedTree(), succEntry->pc());
+ split = new(graph.alloc()) MBasicBlock(graph, succ->info(), site, SPLIT_EDGE);
+
+ if (!split->init())
+ return nullptr;
+
+ // A split edge is used to simplify the graph to avoid having a
+ // predecessor with multiple successors as well as a successor with
+ // multiple predecessors. As instructions can be moved in this
+ // split-edge block, we need to give this block a resume point. To do
+ // so, we copy the entry resume points of the successor and filter the
+ // phis to keep inputs from the current edge.
+
+ // Propagate the caller resume point from the inherited block.
+ split->callerResumePoint_ = succ->callerResumePoint();
+
+ // Split-edge are created after the interpreter stack emulation. Thus,
+ // there is no need for creating slots.
+ split->stackPosition_ = succEntry->stackDepth();
+
+ // Create a resume point using our initial stack position.
+ MResumePoint* splitEntry = new(graph.alloc()) MResumePoint(split, succEntry->pc(),
+ MResumePoint::ResumeAt);
+ if (!splitEntry->init(graph.alloc()))
+ return nullptr;
+ split->entryResumePoint_ = splitEntry;
+
+ // The target entry resume point might have phi operands, keep the
+ // operands of the phi coming from our edge.
+ size_t succEdgeIdx = succ->indexForPredecessor(pred);
+
+ for (size_t i = 0, e = splitEntry->numOperands(); i < e; i++) {
+ MDefinition* def = succEntry->getOperand(i);
+ // This early in the pipeline, we have no recover instructions in
+ // any entry resume point.
+ MOZ_ASSERT_IF(def->block() == succ, def->isPhi());
+ if (def->block() == succ)
+ def = def->toPhi()->getOperand(succEdgeIdx);
+
+ splitEntry->initOperand(i, def);
+ }
+
+ // This is done in the New variant for wasm, so we cannot keep this
+ // line below, where the rest of the graph is modified.
+ if (!split->predecessors_.append(pred))
+ return nullptr;
+ }
+
+ split->setLoopDepth(succ->loopDepth());
+
+ // Insert the split edge block in-between.
+ split->end(MGoto::New(graph.alloc(), succ));
+
+ graph.insertBlockAfter(pred, split);
+
+ pred->replaceSuccessor(predEdgeIdx, split);
+ succ->replacePredecessor(pred, split);
+ return split;
+}
+
+MBasicBlock*
+MBasicBlock::New(MIRGraph& graph, const CompileInfo& info, MBasicBlock* pred, Kind kind)
+{
+ BytecodeSite* site = new(graph.alloc()) BytecodeSite();
+ MBasicBlock* block = new(graph.alloc()) MBasicBlock(graph, info, site, kind);
+ if (!block->init())
+ return nullptr;
+
+ if (pred) {
+ block->stackPosition_ = pred->stackPosition_;
+
+ if (block->kind_ == PENDING_LOOP_HEADER) {
+ size_t nphis = block->stackPosition_;
+
+ size_t nfree = graph.phiFreeListLength();
+
+ TempAllocator& alloc = graph.alloc();
+ MPhi* phis = nullptr;
+ if (nphis > nfree) {
+ phis = alloc.allocateArray<MPhi>(nphis - nfree);
+ if (!phis)
+ return nullptr;
+ }
+
+ // Note: Phis are inserted in the same order as the slots.
+ for (size_t i = 0; i < nphis; i++) {
+ MDefinition* predSlot = pred->getSlot(i);
+
+ MOZ_ASSERT(predSlot->type() != MIRType::Value);
+
+ MPhi* phi;
+ if (i < nfree)
+ phi = graph.takePhiFromFreeList();
+ else
+ phi = phis + (i - nfree);
+ new(phi) MPhi(alloc, predSlot->type());
+
+ phi->addInlineInput(predSlot);
+
+ // Add append Phis in the block.
+ block->addPhi(phi);
+ block->setSlot(i, phi);
+ }
+ } else {
+ block->copySlots(pred);
+ }
+
+ if (!block->predecessors_.append(pred))
+ return nullptr;
+ }
+
+ return block;
+}
+
+MBasicBlock::MBasicBlock(MIRGraph& graph, const CompileInfo& info, BytecodeSite* site, Kind kind)
+ : unreachable_(false),
+ graph_(graph),
+ info_(info),
+ predecessors_(graph.alloc()),
+ stackPosition_(info_.firstStackSlot()),
+ numDominated_(0),
+ pc_(site->pc()),
+ lir_(nullptr),
+ callerResumePoint_(nullptr),
+ entryResumePoint_(nullptr),
+ outerResumePoint_(nullptr),
+ successorWithPhis_(nullptr),
+ positionInPhiSuccessor_(0),
+ loopDepth_(0),
+ kind_(kind),
+ mark_(false),
+ immediatelyDominated_(graph.alloc()),
+ immediateDominator_(nullptr),
+ trackedSite_(site),
+ hitCount_(0),
+ hitState_(HitState::NotDefined)
+#if defined (JS_ION_PERF)
+ , lineno_(0u),
+ columnIndex_(0u)
+#endif
+{
+}
+
+bool
+MBasicBlock::init()
+{
+ return slots_.init(graph_.alloc(), info_.nslots());
+}
+
+bool
+MBasicBlock::increaseSlots(size_t num)
+{
+ return slots_.growBy(graph_.alloc(), num);
+}
+
+bool
+MBasicBlock::ensureHasSlots(size_t num)
+{
+ size_t depth = stackDepth() + num;
+ if (depth > nslots()) {
+ if (!increaseSlots(depth - nslots()))
+ return false;
+ }
+ return true;
+}
+
+void
+MBasicBlock::copySlots(MBasicBlock* from)
+{
+ MOZ_ASSERT(stackPosition_ <= from->stackPosition_);
+
+ MDefinition** thisSlots = slots_.begin();
+ MDefinition** fromSlots = from->slots_.begin();
+ for (size_t i = 0, e = stackPosition_; i < e; ++i)
+ thisSlots[i] = fromSlots[i];
+}
+
+bool
+MBasicBlock::inherit(TempAllocator& alloc, BytecodeAnalysis* analysis, MBasicBlock* pred,
+ uint32_t popped, unsigned stackPhiCount)
+{
+ if (pred) {
+ stackPosition_ = pred->stackPosition_;
+ MOZ_ASSERT(stackPosition_ >= popped);
+ stackPosition_ -= popped;
+ if (kind_ != PENDING_LOOP_HEADER)
+ copySlots(pred);
+ } else {
+ uint32_t stackDepth = analysis->info(pc()).stackDepth;
+ stackPosition_ = info().firstStackSlot() + stackDepth;
+ MOZ_ASSERT(stackPosition_ >= popped);
+ stackPosition_ -= popped;
+ }
+
+ MOZ_ASSERT(info_.nslots() >= stackPosition_);
+ MOZ_ASSERT(!entryResumePoint_);
+
+ // Propagate the caller resume point from the inherited block.
+ callerResumePoint_ = pred ? pred->callerResumePoint() : nullptr;
+
+ // Create a resume point using our initial stack state.
+ entryResumePoint_ = new(alloc) MResumePoint(this, pc(), MResumePoint::ResumeAt);
+ if (!entryResumePoint_->init(alloc))
+ return false;
+
+ if (pred) {
+ if (!predecessors_.append(pred))
+ return false;
+
+ if (kind_ == PENDING_LOOP_HEADER) {
+ size_t i = 0;
+ for (i = 0; i < info().firstStackSlot(); i++) {
+ MPhi* phi = MPhi::New(alloc.fallible());
+ if (!phi)
+ return false;
+ phi->addInlineInput(pred->getSlot(i));
+ addPhi(phi);
+ setSlot(i, phi);
+ entryResumePoint()->initOperand(i, phi);
+ }
+
+ MOZ_ASSERT(stackPhiCount <= stackDepth());
+ MOZ_ASSERT(info().firstStackSlot() <= stackDepth() - stackPhiCount);
+
+ // Avoid creating new phis for stack values that aren't part of the
+ // loop. Note that for loop headers that can OSR, all values on the
+ // stack are part of the loop.
+ for (; i < stackDepth() - stackPhiCount; i++) {
+ MDefinition* val = pred->getSlot(i);
+ setSlot(i, val);
+ entryResumePoint()->initOperand(i, val);
+ }
+
+ for (; i < stackDepth(); i++) {
+ MPhi* phi = MPhi::New(alloc.fallible());
+ if (!phi)
+ return false;
+ phi->addInlineInput(pred->getSlot(i));
+ addPhi(phi);
+ setSlot(i, phi);
+ entryResumePoint()->initOperand(i, phi);
+ }
+ } else {
+ for (size_t i = 0; i < stackDepth(); i++)
+ entryResumePoint()->initOperand(i, getSlot(i));
+ }
+ } else {
+ /*
+ * Don't leave the operands uninitialized for the caller, as it may not
+ * initialize them later on.
+ */
+ for (size_t i = 0; i < stackDepth(); i++)
+ entryResumePoint()->clearOperand(i);
+ }
+
+ return true;
+}
+
+bool
+MBasicBlock::inheritResumePoint(MBasicBlock* pred)
+{
+ // Copy slots from the resume point.
+ stackPosition_ = entryResumePoint_->stackDepth();
+ for (uint32_t i = 0; i < stackPosition_; i++)
+ slots_[i] = entryResumePoint_->getOperand(i);
+
+ MOZ_ASSERT(info_.nslots() >= stackPosition_);
+ MOZ_ASSERT(kind_ != PENDING_LOOP_HEADER);
+ MOZ_ASSERT(pred != nullptr);
+
+ callerResumePoint_ = pred->callerResumePoint();
+
+ if (!predecessors_.append(pred))
+ return false;
+
+ return true;
+}
+
+void
+MBasicBlock::inheritSlots(MBasicBlock* parent)
+{
+ stackPosition_ = parent->stackPosition_;
+ copySlots(parent);
+}
+
+bool
+MBasicBlock::initEntrySlots(TempAllocator& alloc)
+{
+ // Remove the previous resume point.
+ discardResumePoint(entryResumePoint_);
+
+ // Create a resume point using our initial stack state.
+ entryResumePoint_ = MResumePoint::New(alloc, this, pc(), MResumePoint::ResumeAt);
+ if (!entryResumePoint_)
+ return false;
+ return true;
+}
+
+MDefinition*
+MBasicBlock::getSlot(uint32_t index)
+{
+ MOZ_ASSERT(index < stackPosition_);
+ return slots_[index];
+}
+
+void
+MBasicBlock::initSlot(uint32_t slot, MDefinition* ins)
+{
+ slots_[slot] = ins;
+ if (entryResumePoint())
+ entryResumePoint()->initOperand(slot, ins);
+}
+
+void
+MBasicBlock::shimmySlots(int discardDepth)
+{
+ // Move all slots above the given depth down by one,
+ // overwriting the MDefinition at discardDepth.
+
+ MOZ_ASSERT(discardDepth < 0);
+ MOZ_ASSERT(stackPosition_ + discardDepth >= info_.firstStackSlot());
+
+ for (int i = discardDepth; i < -1; i++)
+ slots_[stackPosition_ + i] = slots_[stackPosition_ + i + 1];
+
+ --stackPosition_;
+}
+
+bool
+MBasicBlock::linkOsrValues(MStart* start)
+{
+ MResumePoint* res = start->resumePoint();
+
+ for (uint32_t i = 0; i < stackDepth(); i++) {
+ MDefinition* def = slots_[i];
+ MInstruction* cloneRp = nullptr;
+ if (i == info().environmentChainSlot()) {
+ if (def->isOsrEnvironmentChain())
+ cloneRp = def->toOsrEnvironmentChain();
+ } else if (i == info().returnValueSlot()) {
+ if (def->isOsrReturnValue())
+ cloneRp = def->toOsrReturnValue();
+ } else if (info().hasArguments() && i == info().argsObjSlot()) {
+ MOZ_ASSERT(def->isConstant() || def->isOsrArgumentsObject());
+ MOZ_ASSERT_IF(def->isConstant(), def->toConstant()->type() == MIRType::Undefined);
+ if (def->isOsrArgumentsObject())
+ cloneRp = def->toOsrArgumentsObject();
+ } else {
+ MOZ_ASSERT(def->isOsrValue() || def->isGetArgumentsObjectArg() || def->isConstant() ||
+ def->isParameter());
+
+ // A constant Undefined can show up here for an argument slot when
+ // the function has an arguments object, but the argument in
+ // question is stored on the scope chain.
+ MOZ_ASSERT_IF(def->isConstant(), def->toConstant()->type() == MIRType::Undefined);
+
+ if (def->isOsrValue())
+ cloneRp = def->toOsrValue();
+ else if (def->isGetArgumentsObjectArg())
+ cloneRp = def->toGetArgumentsObjectArg();
+ else if (def->isParameter())
+ cloneRp = def->toParameter();
+ }
+
+ if (cloneRp) {
+ MResumePoint* clone = MResumePoint::Copy(graph().alloc(), res);
+ if (!clone)
+ return false;
+ cloneRp->setResumePoint(clone);
+ }
+ }
+
+ return true;
+}
+
+void
+MBasicBlock::setSlot(uint32_t slot, MDefinition* ins)
+{
+ slots_[slot] = ins;
+}
+
+void
+MBasicBlock::setVariable(uint32_t index)
+{
+ MOZ_ASSERT(stackPosition_ > info_.firstStackSlot());
+ setSlot(index, slots_[stackPosition_ - 1]);
+}
+
+void
+MBasicBlock::setArg(uint32_t arg)
+{
+ setVariable(info_.argSlot(arg));
+}
+
+void
+MBasicBlock::setLocal(uint32_t local)
+{
+ setVariable(info_.localSlot(local));
+}
+
+void
+MBasicBlock::setSlot(uint32_t slot)
+{
+ setVariable(slot);
+}
+
+void
+MBasicBlock::rewriteSlot(uint32_t slot, MDefinition* ins)
+{
+ setSlot(slot, ins);
+}
+
+void
+MBasicBlock::rewriteAtDepth(int32_t depth, MDefinition* ins)
+{
+ MOZ_ASSERT(depth < 0);
+ MOZ_ASSERT(stackPosition_ + depth >= info_.firstStackSlot());
+ rewriteSlot(stackPosition_ + depth, ins);
+}
+
+void
+MBasicBlock::push(MDefinition* ins)
+{
+ MOZ_ASSERT(stackPosition_ < nslots());
+ slots_[stackPosition_++] = ins;
+}
+
+void
+MBasicBlock::pushVariable(uint32_t slot)
+{
+ push(slots_[slot]);
+}
+
+void
+MBasicBlock::pushArg(uint32_t arg)
+{
+ pushVariable(info_.argSlot(arg));
+}
+
+void
+MBasicBlock::pushLocal(uint32_t local)
+{
+ pushVariable(info_.localSlot(local));
+}
+
+void
+MBasicBlock::pushSlot(uint32_t slot)
+{
+ pushVariable(slot);
+}
+
+MDefinition*
+MBasicBlock::pop()
+{
+ MOZ_ASSERT(stackPosition_ > info_.firstStackSlot());
+ return slots_[--stackPosition_];
+}
+
+void
+MBasicBlock::popn(uint32_t n)
+{
+ MOZ_ASSERT(stackPosition_ - n >= info_.firstStackSlot());
+ MOZ_ASSERT(stackPosition_ >= stackPosition_ - n);
+ stackPosition_ -= n;
+}
+
+MDefinition*
+MBasicBlock::environmentChain()
+{
+ return getSlot(info().environmentChainSlot());
+}
+
+MDefinition*
+MBasicBlock::argumentsObject()
+{
+ return getSlot(info().argsObjSlot());
+}
+
+void
+MBasicBlock::setEnvironmentChain(MDefinition* scopeObj)
+{
+ setSlot(info().environmentChainSlot(), scopeObj);
+}
+
+void
+MBasicBlock::setArgumentsObject(MDefinition* argsObj)
+{
+ setSlot(info().argsObjSlot(), argsObj);
+}
+
+void
+MBasicBlock::pick(int32_t depth)
+{
+ // pick take an element and move it to the top.
+ // pick(-2):
+ // A B C D E
+ // A B D C E [ swapAt(-2) ]
+ // A B D E C [ swapAt(-1) ]
+ for (; depth < 0; depth++)
+ swapAt(depth);
+}
+
+void
+MBasicBlock::swapAt(int32_t depth)
+{
+ uint32_t lhsDepth = stackPosition_ + depth - 1;
+ uint32_t rhsDepth = stackPosition_ + depth;
+
+ MDefinition* temp = slots_[lhsDepth];
+ slots_[lhsDepth] = slots_[rhsDepth];
+ slots_[rhsDepth] = temp;
+}
+
+MDefinition*
+MBasicBlock::peek(int32_t depth)
+{
+ MOZ_ASSERT(depth < 0);
+ MOZ_ASSERT(stackPosition_ + depth >= info_.firstStackSlot());
+ return getSlot(stackPosition_ + depth);
+}
+
+void
+MBasicBlock::discardLastIns()
+{
+ discard(lastIns());
+}
+
+MConstant*
+MBasicBlock::optimizedOutConstant(TempAllocator& alloc)
+{
+ // If the first instruction is a MConstant(MagicValue(JS_OPTIMIZED_OUT))
+ // then reuse it.
+ MInstruction* ins = *begin();
+ if (ins->type() == MIRType::MagicOptimizedOut)
+ return ins->toConstant();
+
+ MConstant* constant = MConstant::New(alloc, MagicValue(JS_OPTIMIZED_OUT));
+ insertBefore(ins, constant);
+ return constant;
+}
+
+void
+MBasicBlock::addFromElsewhere(MInstruction* ins)
+{
+ MOZ_ASSERT(ins->block() != this);
+
+ // Remove |ins| from its containing block.
+ ins->block()->instructions_.remove(ins);
+
+ // Add it to this block.
+ add(ins);
+}
+
+void
+MBasicBlock::moveBefore(MInstruction* at, MInstruction* ins)
+{
+ // Remove |ins| from the current block.
+ MOZ_ASSERT(ins->block() == this);
+ instructions_.remove(ins);
+
+ // Insert into new block, which may be distinct.
+ // Uses and operands are untouched.
+ ins->setBlock(at->block());
+ at->block()->instructions_.insertBefore(at, ins);
+ ins->setTrackedSite(at->trackedSite());
+}
+
+MInstruction*
+MBasicBlock::safeInsertTop(MDefinition* ins, IgnoreTop ignore)
+{
+ MOZ_ASSERT(graph().osrBlock() != this,
+ "We are not supposed to add any instruction in OSR blocks.");
+
+ // Beta nodes and interrupt checks are required to be located at the
+ // beginnings of basic blocks, so we must insert new instructions after any
+ // such instructions.
+ MInstructionIterator insertIter = !ins || ins->isPhi()
+ ? begin()
+ : begin(ins->toInstruction());
+ while (insertIter->isBeta() ||
+ insertIter->isInterruptCheck() ||
+ insertIter->isConstant() ||
+ insertIter->isParameter() ||
+ (!(ignore & IgnoreRecover) && insertIter->isRecoveredOnBailout()))
+ {
+ insertIter++;
+ }
+
+ return *insertIter;
+}
+
+void
+MBasicBlock::discardResumePoint(MResumePoint* rp, ReferencesType refType /* = RefType_Default */)
+{
+ if (refType & RefType_DiscardOperands)
+ rp->releaseUses();
+#ifdef DEBUG
+ MResumePointIterator iter = resumePointsBegin();
+ while (*iter != rp) {
+ // We should reach it before reaching the end.
+ MOZ_ASSERT(iter != resumePointsEnd());
+ iter++;
+ }
+ resumePoints_.removeAt(iter);
+#endif
+}
+
+void
+MBasicBlock::prepareForDiscard(MInstruction* ins, ReferencesType refType /* = RefType_Default */)
+{
+ // Only remove instructions from the same basic block. This is needed for
+ // correctly removing the resume point if any.
+ MOZ_ASSERT(ins->block() == this);
+
+ MResumePoint* rp = ins->resumePoint();
+ if ((refType & RefType_DiscardResumePoint) && rp)
+ discardResumePoint(rp, refType);
+
+ // We need to assert that instructions have no uses after removing the their
+ // resume points operands as they could be captured by their own resume
+ // point.
+ MOZ_ASSERT_IF(refType & RefType_AssertNoUses, !ins->hasUses());
+
+ const uint32_t InstructionOperands = RefType_DiscardOperands | RefType_DiscardInstruction;
+ if ((refType & InstructionOperands) == InstructionOperands) {
+ for (size_t i = 0, e = ins->numOperands(); i < e; i++)
+ ins->releaseOperand(i);
+ }
+
+ ins->setDiscarded();
+}
+
+void
+MBasicBlock::discard(MInstruction* ins)
+{
+ prepareForDiscard(ins);
+ instructions_.remove(ins);
+}
+
+void
+MBasicBlock::discardIgnoreOperands(MInstruction* ins)
+{
+#ifdef DEBUG
+ for (size_t i = 0, e = ins->numOperands(); i < e; i++)
+ MOZ_ASSERT(!ins->hasOperand(i));
+#endif
+
+ prepareForDiscard(ins, RefType_IgnoreOperands);
+ instructions_.remove(ins);
+}
+
+void
+MBasicBlock::discardDef(MDefinition* at)
+{
+ if (at->isPhi())
+ at->block_->discardPhi(at->toPhi());
+ else
+ at->block_->discard(at->toInstruction());
+}
+
+void
+MBasicBlock::discardAllInstructions()
+{
+ MInstructionIterator iter = begin();
+ discardAllInstructionsStartingAt(iter);
+}
+
+void
+MBasicBlock::discardAllInstructionsStartingAt(MInstructionIterator iter)
+{
+ while (iter != end()) {
+ // Discard operands and resume point operands and flag the instruction
+ // as discarded. Also we do not assert that we have no uses as blocks
+ // might be removed in reverse post order.
+ MInstruction* ins = *iter++;
+ prepareForDiscard(ins, RefType_DefaultNoAssert);
+ instructions_.remove(ins);
+ }
+}
+
+void
+MBasicBlock::discardAllPhiOperands()
+{
+ for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++)
+ iter->removeAllOperands();
+
+ for (MBasicBlock** pred = predecessors_.begin(); pred != predecessors_.end(); pred++)
+ (*pred)->clearSuccessorWithPhis();
+}
+
+void
+MBasicBlock::discardAllPhis()
+{
+ discardAllPhiOperands();
+ phis_.clear();
+}
+
+void
+MBasicBlock::discardAllResumePoints(bool discardEntry)
+{
+ if (outerResumePoint_)
+ clearOuterResumePoint();
+
+ if (discardEntry && entryResumePoint_)
+ clearEntryResumePoint();
+
+#ifdef DEBUG
+ if (!entryResumePoint()) {
+ MOZ_ASSERT(resumePointsEmpty());
+ } else {
+ MResumePointIterator iter(resumePointsBegin());
+ MOZ_ASSERT(iter != resumePointsEnd());
+ iter++;
+ MOZ_ASSERT(iter == resumePointsEnd());
+ }
+#endif
+}
+
+void
+MBasicBlock::insertBefore(MInstruction* at, MInstruction* ins)
+{
+ MOZ_ASSERT(at->block() == this);
+ ins->setBlock(this);
+ graph().allocDefinitionId(ins);
+ instructions_.insertBefore(at, ins);
+ ins->setTrackedSite(at->trackedSite());
+}
+
+void
+MBasicBlock::insertAfter(MInstruction* at, MInstruction* ins)
+{
+ MOZ_ASSERT(at->block() == this);
+ ins->setBlock(this);
+ graph().allocDefinitionId(ins);
+ instructions_.insertAfter(at, ins);
+ ins->setTrackedSite(at->trackedSite());
+}
+
+void
+MBasicBlock::insertAtEnd(MInstruction* ins)
+{
+ if (hasLastIns())
+ insertBefore(lastIns(), ins);
+ else
+ add(ins);
+}
+
+void
+MBasicBlock::add(MInstruction* ins)
+{
+ MOZ_ASSERT(!hasLastIns());
+ ins->setBlock(this);
+ graph().allocDefinitionId(ins);
+ instructions_.pushBack(ins);
+ ins->setTrackedSite(trackedSite_);
+}
+
+void
+MBasicBlock::end(MControlInstruction* ins)
+{
+ MOZ_ASSERT(!hasLastIns()); // Existing control instructions should be removed first.
+ MOZ_ASSERT(ins);
+ add(ins);
+}
+
+void
+MBasicBlock::addPhi(MPhi* phi)
+{
+ phis_.pushBack(phi);
+ phi->setBlock(this);
+ graph().allocDefinitionId(phi);
+}
+
+void
+MBasicBlock::discardPhi(MPhi* phi)
+{
+ MOZ_ASSERT(!phis_.empty());
+
+ phi->removeAllOperands();
+ phi->setDiscarded();
+
+ phis_.remove(phi);
+
+ if (phis_.empty()) {
+ for (MBasicBlock* pred : predecessors_)
+ pred->clearSuccessorWithPhis();
+ }
+}
+
+void
+MBasicBlock::flagOperandsOfPrunedBranches(MInstruction* ins)
+{
+ // Find the previous resume point which would be used for bailing out.
+ MResumePoint* rp = nullptr;
+ for (MInstructionReverseIterator iter = rbegin(ins); iter != rend(); iter++) {
+ rp = iter->resumePoint();
+ if (rp)
+ break;
+ }
+
+ // If none, take the entry resume point.
+ if (!rp)
+ rp = entryResumePoint();
+
+ // The only blocks which do not have any entryResumePoint in Ion, are the
+ // SplitEdge blocks. SplitEdge blocks only have a Goto instruction before
+ // Range Analysis phase. In adjustInputs, we are manipulating instructions
+ // which have a TypePolicy. So, as a Goto has no operand and no type
+ // policy, the entry resume point should exists.
+ MOZ_ASSERT(rp);
+
+ // Flag all operand as being potentially used.
+ while (rp) {
+ for (size_t i = 0, end = rp->numOperands(); i < end; i++)
+ rp->getOperand(i)->setUseRemovedUnchecked();
+ rp = rp->caller();
+ }
+}
+
+bool
+MBasicBlock::addPredecessor(TempAllocator& alloc, MBasicBlock* pred)
+{
+ return addPredecessorPopN(alloc, pred, 0);
+}
+
+bool
+MBasicBlock::addPredecessorPopN(TempAllocator& alloc, MBasicBlock* pred, uint32_t popped)
+{
+ MOZ_ASSERT(pred);
+ MOZ_ASSERT(predecessors_.length() > 0);
+
+ // Predecessors must be finished, and at the correct stack depth.
+ MOZ_ASSERT(pred->hasLastIns());
+ MOZ_ASSERT(pred->stackPosition_ == stackPosition_ + popped);
+
+ for (uint32_t i = 0, e = stackPosition_; i < e; ++i) {
+ MDefinition* mine = getSlot(i);
+ MDefinition* other = pred->getSlot(i);
+
+ if (mine != other) {
+ // If the current instruction is a phi, and it was created in this
+ // basic block, then we have already placed this phi and should
+ // instead append to its operands.
+ if (mine->isPhi() && mine->block() == this) {
+ MOZ_ASSERT(predecessors_.length());
+ if (!mine->toPhi()->addInputSlow(other))
+ return false;
+ } else {
+ // Otherwise, create a new phi node.
+ MPhi* phi;
+ if (mine->type() == other->type())
+ phi = MPhi::New(alloc.fallible(), mine->type());
+ else
+ phi = MPhi::New(alloc.fallible());
+ if (!phi)
+ return false;
+ addPhi(phi);
+
+ // Prime the phi for each predecessor, so input(x) comes from
+ // predecessor(x).
+ if (!phi->reserveLength(predecessors_.length() + 1))
+ return false;
+
+ for (size_t j = 0, numPreds = predecessors_.length(); j < numPreds; ++j) {
+ MOZ_ASSERT(predecessors_[j]->getSlot(i) == mine);
+ phi->addInput(mine);
+ }
+ phi->addInput(other);
+
+ setSlot(i, phi);
+ if (entryResumePoint())
+ entryResumePoint()->replaceOperand(i, phi);
+ }
+ }
+ }
+
+ return predecessors_.append(pred);
+}
+
+void
+MBasicBlock::addPredecessorSameInputsAs(MBasicBlock* pred, MBasicBlock* existingPred)
+{
+ MOZ_ASSERT(pred);
+ MOZ_ASSERT(predecessors_.length() > 0);
+
+ // Predecessors must be finished, and at the correct stack depth.
+ MOZ_ASSERT(pred->hasLastIns());
+ MOZ_ASSERT(!pred->successorWithPhis());
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+
+ if (!phisEmpty()) {
+ size_t existingPosition = indexForPredecessor(existingPred);
+ for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) {
+ if (!iter->addInputSlow(iter->getOperand(existingPosition)))
+ oomUnsafe.crash("MBasicBlock::addPredecessorAdjustPhis");
+ }
+ }
+
+ if (!predecessors_.append(pred))
+ oomUnsafe.crash("MBasicBlock::addPredecessorAdjustPhis");
+}
+
+bool
+MBasicBlock::addPredecessorWithoutPhis(MBasicBlock* pred)
+{
+ // Predecessors must be finished.
+ MOZ_ASSERT(pred && pred->hasLastIns());
+ return predecessors_.append(pred);
+}
+
+bool
+MBasicBlock::addImmediatelyDominatedBlock(MBasicBlock* child)
+{
+ return immediatelyDominated_.append(child);
+}
+
+void
+MBasicBlock::removeImmediatelyDominatedBlock(MBasicBlock* child)
+{
+ for (size_t i = 0; ; ++i) {
+ MOZ_ASSERT(i < immediatelyDominated_.length(),
+ "Dominated block to remove not present");
+ if (immediatelyDominated_[i] == child) {
+ immediatelyDominated_[i] = immediatelyDominated_.back();
+ immediatelyDominated_.popBack();
+ return;
+ }
+ }
+}
+
+void
+MBasicBlock::assertUsesAreNotWithin(MUseIterator use, MUseIterator end)
+{
+#ifdef DEBUG
+ for (; use != end; use++) {
+ MOZ_ASSERT_IF(use->consumer()->isDefinition(),
+ use->consumer()->toDefinition()->block()->id() < id());
+ }
+#endif
+}
+
+AbortReason
+MBasicBlock::setBackedge(TempAllocator& alloc, MBasicBlock* pred)
+{
+ // Predecessors must be finished, and at the correct stack depth.
+ MOZ_ASSERT(hasLastIns());
+ MOZ_ASSERT(pred->hasLastIns());
+ MOZ_ASSERT(pred->stackDepth() == entryResumePoint()->stackDepth());
+
+ // We must be a pending loop header
+ MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
+
+ bool hadTypeChange = false;
+
+ // Add exit definitions to each corresponding phi at the entry.
+ if (!inheritPhisFromBackedge(alloc, pred, &hadTypeChange))
+ return AbortReason_Alloc;
+
+ if (hadTypeChange) {
+ for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++)
+ phi->removeOperand(phi->numOperands() - 1);
+ return AbortReason_Disable;
+ }
+
+ // We are now a loop header proper
+ kind_ = LOOP_HEADER;
+
+ if (!predecessors_.append(pred))
+ return AbortReason_Alloc;
+
+ return AbortReason_NoAbort;
+}
+
+bool
+MBasicBlock::setBackedgeWasm(MBasicBlock* pred)
+{
+ // Predecessors must be finished, and at the correct stack depth.
+ MOZ_ASSERT(hasLastIns());
+ MOZ_ASSERT(pred->hasLastIns());
+ MOZ_ASSERT(stackDepth() == pred->stackDepth());
+
+ // We must be a pending loop header
+ MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
+
+ // Add exit definitions to each corresponding phi at the entry.
+ // Note: Phis are inserted in the same order as the slots. (see
+ // MBasicBlock::New)
+ size_t slot = 0;
+ for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++, slot++) {
+ MPhi* entryDef = *phi;
+ MDefinition* exitDef = pred->getSlot(slot);
+
+ // Assert that we already placed phis for each slot.
+ MOZ_ASSERT(entryDef->block() == this);
+
+ // Assert that the phi already has the correct type.
+ MOZ_ASSERT(entryDef->type() == exitDef->type());
+ MOZ_ASSERT(entryDef->type() != MIRType::Value);
+
+ if (entryDef == exitDef) {
+ // If the exit def is the same as the entry def, make a redundant
+ // phi. Since loop headers have exactly two incoming edges, we
+ // know that that's just the first input.
+ //
+ // Note that we eliminate later rather than now, to avoid any
+ // weirdness around pending continue edges which might still hold
+ // onto phis.
+ exitDef = entryDef->getOperand(0);
+ }
+
+ // Phis always have room for 2 operands, so this can't fail.
+ MOZ_ASSERT(phi->numOperands() == 1);
+ entryDef->addInlineInput(exitDef);
+
+ MOZ_ASSERT(slot < pred->stackDepth());
+ setSlot(slot, entryDef);
+ }
+
+ // We are now a loop header proper
+ kind_ = LOOP_HEADER;
+
+ return predecessors_.append(pred);
+}
+
+void
+MBasicBlock::clearLoopHeader()
+{
+ MOZ_ASSERT(isLoopHeader());
+ kind_ = NORMAL;
+}
+
+void
+MBasicBlock::setLoopHeader(MBasicBlock* newBackedge)
+{
+ MOZ_ASSERT(!isLoopHeader());
+ kind_ = LOOP_HEADER;
+
+ size_t numPreds = numPredecessors();
+ MOZ_ASSERT(numPreds != 0);
+
+ size_t lastIndex = numPreds - 1;
+ size_t oldIndex = 0;
+ for (; ; ++oldIndex) {
+ MOZ_ASSERT(oldIndex < numPreds);
+ MBasicBlock* pred = getPredecessor(oldIndex);
+ if (pred == newBackedge)
+ break;
+ }
+
+ // Set the loop backedge to be the last element in predecessors_.
+ Swap(predecessors_[oldIndex], predecessors_[lastIndex]);
+
+ // If we have phis, reorder their operands accordingly.
+ if (!phisEmpty()) {
+ getPredecessor(oldIndex)->setSuccessorWithPhis(this, oldIndex);
+ getPredecessor(lastIndex)->setSuccessorWithPhis(this, lastIndex);
+ for (MPhiIterator iter(phisBegin()), end(phisEnd()); iter != end; ++iter) {
+ MPhi* phi = *iter;
+ MDefinition* last = phi->getOperand(oldIndex);
+ MDefinition* old = phi->getOperand(lastIndex);
+ phi->replaceOperand(oldIndex, old);
+ phi->replaceOperand(lastIndex, last);
+ }
+ }
+
+ MOZ_ASSERT(newBackedge->loopHeaderOfBackedge() == this);
+ MOZ_ASSERT(backedge() == newBackedge);
+}
+
+size_t
+MBasicBlock::numSuccessors() const
+{
+ MOZ_ASSERT(lastIns());
+ return lastIns()->numSuccessors();
+}
+
+MBasicBlock*
+MBasicBlock::getSuccessor(size_t index) const
+{
+ MOZ_ASSERT(lastIns());
+ return lastIns()->getSuccessor(index);
+}
+
+size_t
+MBasicBlock::getSuccessorIndex(MBasicBlock* block) const
+{
+ MOZ_ASSERT(lastIns());
+ for (size_t i = 0; i < numSuccessors(); i++) {
+ if (getSuccessor(i) == block)
+ return i;
+ }
+ MOZ_CRASH("Invalid successor");
+}
+
+size_t
+MBasicBlock::getPredecessorIndex(MBasicBlock* block) const
+{
+ for (size_t i = 0, e = numPredecessors(); i < e; ++i) {
+ if (getPredecessor(i) == block)
+ return i;
+ }
+ MOZ_CRASH("Invalid predecessor");
+}
+
+void
+MBasicBlock::replaceSuccessor(size_t pos, MBasicBlock* split)
+{
+ MOZ_ASSERT(lastIns());
+
+ // Note, during split-critical-edges, successors-with-phis is not yet set.
+ // During PAA, this case is handled before we enter.
+ MOZ_ASSERT_IF(successorWithPhis_, successorWithPhis_ != getSuccessor(pos));
+
+ lastIns()->replaceSuccessor(pos, split);
+}
+
+void
+MBasicBlock::replacePredecessor(MBasicBlock* old, MBasicBlock* split)
+{
+ for (size_t i = 0; i < numPredecessors(); i++) {
+ if (getPredecessor(i) == old) {
+ predecessors_[i] = split;
+
+#ifdef DEBUG
+ // The same block should not appear twice in the predecessor list.
+ for (size_t j = i; j < numPredecessors(); j++)
+ MOZ_ASSERT(predecessors_[j] != old);
+#endif
+
+ return;
+ }
+ }
+
+ MOZ_CRASH("predecessor was not found");
+}
+
+void
+MBasicBlock::clearDominatorInfo()
+{
+ setImmediateDominator(nullptr);
+ immediatelyDominated_.clear();
+ numDominated_ = 0;
+}
+
+void
+MBasicBlock::removePredecessorWithoutPhiOperands(MBasicBlock* pred, size_t predIndex)
+{
+ // If we're removing the last backedge, this is no longer a loop.
+ if (isLoopHeader() && hasUniqueBackedge() && backedge() == pred)
+ clearLoopHeader();
+
+ // Adjust phis. Note that this can leave redundant phis behind.
+ // Don't adjust successorWithPhis() if we haven't constructed this
+ // information yet.
+ if (pred->successorWithPhis()) {
+ MOZ_ASSERT(pred->positionInPhiSuccessor() == predIndex);
+ pred->clearSuccessorWithPhis();
+ for (size_t j = predIndex+1; j < numPredecessors(); j++)
+ getPredecessor(j)->setSuccessorWithPhis(this, j - 1);
+ }
+
+ // Remove from pred list.
+ predecessors_.erase(predecessors_.begin() + predIndex);
+}
+
+void
+MBasicBlock::removePredecessor(MBasicBlock* pred)
+{
+ size_t predIndex = getPredecessorIndex(pred);
+
+ // Remove the phi operands.
+ for (MPhiIterator iter(phisBegin()), end(phisEnd()); iter != end; ++iter)
+ iter->removeOperand(predIndex);
+
+ // Now we can call the underlying function, which expects that phi
+ // operands have been removed.
+ removePredecessorWithoutPhiOperands(pred, predIndex);
+}
+
+void
+MBasicBlock::inheritPhis(MBasicBlock* header)
+{
+ MResumePoint* headerRp = header->entryResumePoint();
+ size_t stackDepth = headerRp->stackDepth();
+ for (size_t slot = 0; slot < stackDepth; slot++) {
+ MDefinition* exitDef = getSlot(slot);
+ MDefinition* loopDef = headerRp->getOperand(slot);
+ if (loopDef->block() != header) {
+ MOZ_ASSERT(loopDef->block()->id() < header->id());
+ MOZ_ASSERT(loopDef == exitDef);
+ continue;
+ }
+
+ // Phis are allocated by NewPendingLoopHeader.
+ MPhi* phi = loopDef->toPhi();
+ MOZ_ASSERT(phi->numOperands() == 2);
+
+ // The entry definition is always the leftmost input to the phi.
+ MDefinition* entryDef = phi->getOperand(0);
+
+ if (entryDef != exitDef)
+ continue;
+
+ // If the entryDef is the same as exitDef, then we must propagate the
+ // phi down to this successor. This chance was missed as part of
+ // setBackedge() because exits are not captured in resume points.
+ setSlot(slot, phi);
+ }
+}
+
+bool
+MBasicBlock::inheritPhisFromBackedge(TempAllocator& alloc, MBasicBlock* backedge, bool* hadTypeChange)
+{
+ // We must be a pending loop header
+ MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
+
+ size_t stackDepth = entryResumePoint()->stackDepth();
+ for (size_t slot = 0; slot < stackDepth; slot++) {
+ // Get the value stack-slot of the back edge.
+ MDefinition* exitDef = backedge->getSlot(slot);
+
+ // Get the value of the loop header.
+ MDefinition* loopDef = entryResumePoint()->getOperand(slot);
+ if (loopDef->block() != this) {
+ // If we are finishing a pending loop header, then we need to ensure
+ // that all operands are phis. This is usualy the case, except for
+ // object/arrays build with generators, in which case we share the
+ // same allocations across all blocks.
+ MOZ_ASSERT(loopDef->block()->id() < id());
+ MOZ_ASSERT(loopDef == exitDef);
+ continue;
+ }
+
+ // Phis are allocated by NewPendingLoopHeader.
+ MPhi* entryDef = loopDef->toPhi();
+ MOZ_ASSERT(entryDef->block() == this);
+
+ if (entryDef == exitDef) {
+ // If the exit def is the same as the entry def, make a redundant
+ // phi. Since loop headers have exactly two incoming edges, we
+ // know that that's just the first input.
+ //
+ // Note that we eliminate later rather than now, to avoid any
+ // weirdness around pending continue edges which might still hold
+ // onto phis.
+ exitDef = entryDef->getOperand(0);
+ }
+
+ bool typeChange = false;
+
+ if (!entryDef->addInputSlow(exitDef))
+ return false;
+ if (!entryDef->checkForTypeChange(alloc, exitDef, &typeChange))
+ return false;
+ *hadTypeChange |= typeChange;
+ setSlot(slot, entryDef);
+ }
+
+ return true;
+}
+
+bool
+MBasicBlock::specializePhis(TempAllocator& alloc)
+{
+ for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) {
+ MPhi* phi = *iter;
+ if (!phi->specializeType(alloc))
+ return false;
+ }
+ return true;
+}
+
+MTest*
+MBasicBlock::immediateDominatorBranch(BranchDirection* pdirection)
+{
+ *pdirection = FALSE_BRANCH;
+
+ if (numPredecessors() != 1)
+ return nullptr;
+
+ MBasicBlock* dom = immediateDominator();
+ if (dom != getPredecessor(0))
+ return nullptr;
+
+ // Look for a trailing MTest branching to this block.
+ MInstruction* ins = dom->lastIns();
+ if (ins->isTest()) {
+ MTest* test = ins->toTest();
+
+ MOZ_ASSERT(test->ifTrue() == this || test->ifFalse() == this);
+ if (test->ifTrue() == this && test->ifFalse() == this)
+ return nullptr;
+
+ *pdirection = (test->ifTrue() == this) ? TRUE_BRANCH : FALSE_BRANCH;
+ return test;
+ }
+
+ return nullptr;
+}
+
+MBasicBlock::BackupPoint::BackupPoint(MBasicBlock* current)
+ : current_(current),
+ lastBlock_(nullptr),
+ lastIns_(current->hasAnyIns() ? *current->rbegin() : nullptr),
+ stackPosition_(current->stackDepth()),
+ slots_()
+#ifdef DEBUG
+ , lastPhi_(!current->phisEmpty() ? *current->phis_.rbegin() : nullptr),
+ predecessorsCheckSum_(computePredecessorsCheckSum(current)),
+ instructionsCheckSum_(computeInstructionsCheckSum(current)),
+ id_(current->id()),
+ callerResumePoint_(current->callerResumePoint()),
+ entryResumePoint_(current->entryResumePoint())
+#endif
+{
+ // The block is not yet jumping into a block of an inlined function yet.
+ MOZ_ASSERT(current->outerResumePoint_ == nullptr);
+
+ // The CFG reconstruction might add blocks and move them around.
+ uint32_t lastBlockId = 0;
+ PostorderIterator e = current->graph().poEnd();
+ for (PostorderIterator b = current->graph().poBegin(); b != e; ++b) {
+ if (lastBlockId <= b->id()) {
+ lastBlock_ = *b;
+ lastBlockId = b->id();
+ }
+ }
+ MOZ_ASSERT(lastBlock_);
+}
+
+bool
+MBasicBlock::BackupPoint::init(TempAllocator& alloc)
+{
+ if (!slots_.init(alloc, stackPosition_))
+ return false;
+ for (size_t i = 0, e = stackPosition_; i < e; ++i)
+ slots_[i] = current_->slots_[i];
+ return true;
+}
+
+#ifdef DEBUG
+uintptr_t
+MBasicBlock::BackupPoint::computePredecessorsCheckSum(MBasicBlock* block)
+{
+ uintptr_t hash = 0;
+ for (size_t i = 0; i < block->numPredecessors(); i++) {
+ MBasicBlock* pred = block->getPredecessor(i);
+ uintptr_t data = reinterpret_cast<uintptr_t>(pred);
+ hash = data + (hash << 6) + (hash << 16) - hash;
+ }
+ return hash;
+}
+
+HashNumber
+MBasicBlock::BackupPoint::computeInstructionsCheckSum(MBasicBlock* block)
+{
+ HashNumber h = 0;
+ MOZ_ASSERT_IF(lastIns_, lastIns_->block() == block);
+ for (MInstructionIterator ins = block->begin(); ins != block->end(); ++ins) {
+ h += ins->valueHash();
+ h += h << 10;
+ h ^= h >> 6;
+ }
+ return h;
+}
+#endif
+
+MBasicBlock*
+MBasicBlock::BackupPoint::restore()
+{
+ // No extra Phi got added.
+ MOZ_ASSERT((!current_->phisEmpty() ? *current_->phis_.rbegin() : nullptr) == lastPhi_);
+
+ MOZ_ASSERT_IF(lastIns_, lastIns_->block() == current_);
+ MOZ_ASSERT_IF(lastIns_, !lastIns_->isDiscarded());
+ MInstructionIterator lastIns(lastIns_ ? ++(current_->begin(lastIns_)) : current_->begin());
+ current_->discardAllInstructionsStartingAt(lastIns);
+ current_->clearOuterResumePoint();
+
+ MOZ_ASSERT(current_->slots_.length() >= stackPosition_);
+ if (current_->stackPosition_ != stackPosition_)
+ current_->setStackDepth(stackPosition_);
+ for (size_t i = 0, e = stackPosition_; i < e; ++i)
+ current_->slots_[i] = slots_[i];
+
+ MOZ_ASSERT(current_->id() == id_);
+ MOZ_ASSERT(predecessorsCheckSum_ == computePredecessorsCheckSum(current_));
+ MOZ_ASSERT(instructionsCheckSum_ == computeInstructionsCheckSum(current_));
+ MOZ_ASSERT(current_->callerResumePoint() == callerResumePoint_);
+ MOZ_ASSERT(current_->entryResumePoint() == entryResumePoint_);
+
+ current_->graph().removeBlocksAfter(lastBlock_);
+
+ return current_;
+}
+
+void
+MBasicBlock::dumpStack(GenericPrinter& out)
+{
+#ifdef DEBUG
+ out.printf(" %-3s %-16s %-6s %-10s\n", "#", "name", "copyOf", "first/next");
+ out.printf("-------------------------------------------\n");
+ for (uint32_t i = 0; i < stackPosition_; i++) {
+ out.printf(" %-3d", i);
+ out.printf(" %-16p\n", (void*)slots_[i]);
+ }
+#endif
+}
+
+void
+MBasicBlock::dumpStack()
+{
+ Fprinter out(stderr);
+ dumpStack(out);
+ out.finish();
+}
+
+void
+MIRGraph::dump(GenericPrinter& out)
+{
+#ifdef DEBUG
+ for (MBasicBlockIterator iter(begin()); iter != end(); iter++) {
+ iter->dump(out);
+ out.printf("\n");
+ }
+#endif
+}
+
+void
+MIRGraph::dump()
+{
+ Fprinter out(stderr);
+ dump(out);
+ out.finish();
+}
+
+void
+MBasicBlock::dump(GenericPrinter& out)
+{
+#ifdef DEBUG
+ out.printf("block%u:%s%s%s\n", id(),
+ isLoopHeader() ? " (loop header)" : "",
+ unreachable() ? " (unreachable)" : "",
+ isMarked() ? " (marked)" : "");
+ if (MResumePoint* resume = entryResumePoint())
+ resume->dump(out);
+ for (MPhiIterator iter(phisBegin()); iter != phisEnd(); iter++)
+ iter->dump(out);
+ for (MInstructionIterator iter(begin()); iter != end(); iter++)
+ iter->dump(out);
+#endif
+}
+
+void
+MBasicBlock::dump()
+{
+ Fprinter out(stderr);
+ dump(out);
+ out.finish();
+}
diff --git a/js/src/jit/MIRGraph.h b/js/src/jit/MIRGraph.h
new file mode 100644
index 000000000..b986218f4
--- /dev/null
+++ b/js/src/jit/MIRGraph.h
@@ -0,0 +1,1060 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_MIRGraph_h
+#define jit_MIRGraph_h
+
+// This file declares the data structures used to build a control-flow graph
+// containing MIR.
+
+#include "jit/FixedList.h"
+#include "jit/JitAllocPolicy.h"
+#include "jit/MIR.h"
+
+namespace js {
+namespace jit {
+
+class BytecodeAnalysis;
+class MBasicBlock;
+class MIRGraph;
+class MStart;
+
+class MDefinitionIterator;
+
+typedef InlineListIterator<MInstruction> MInstructionIterator;
+typedef InlineListReverseIterator<MInstruction> MInstructionReverseIterator;
+typedef InlineListIterator<MPhi> MPhiIterator;
+
+#ifdef DEBUG
+typedef InlineForwardListIterator<MResumePoint> MResumePointIterator;
+#endif
+
+class LBlock;
+
+class MBasicBlock : public TempObject, public InlineListNode<MBasicBlock>
+{
+ public:
+ enum Kind {
+ NORMAL,
+ PENDING_LOOP_HEADER,
+ LOOP_HEADER,
+ SPLIT_EDGE,
+ DEAD
+ };
+
+ private:
+ MBasicBlock(MIRGraph& graph, const CompileInfo& info, BytecodeSite* site, Kind kind);
+ MOZ_MUST_USE bool init();
+ void copySlots(MBasicBlock* from);
+ MOZ_MUST_USE bool inherit(TempAllocator& alloc, BytecodeAnalysis* analysis, MBasicBlock* pred,
+ uint32_t popped, unsigned stackPhiCount = 0);
+ MOZ_MUST_USE bool inheritResumePoint(MBasicBlock* pred);
+ void assertUsesAreNotWithin(MUseIterator use, MUseIterator end);
+
+ // This block cannot be reached by any means.
+ bool unreachable_;
+
+ // Pushes a copy of a local variable or argument.
+ void pushVariable(uint32_t slot);
+
+ // Sets a variable slot to the top of the stack, correctly creating copies
+ // as needed.
+ void setVariable(uint32_t slot);
+
+ enum ReferencesType {
+ RefType_None = 0,
+
+ // Assert that the instruction is unused.
+ RefType_AssertNoUses = 1 << 0,
+
+ // Discard the operands of the resume point / instructions if the
+ // following flag are given too.
+ RefType_DiscardOperands = 1 << 1,
+ RefType_DiscardResumePoint = 1 << 2,
+ RefType_DiscardInstruction = 1 << 3,
+
+ // Discard operands of the instruction and its resume point.
+ RefType_DefaultNoAssert = RefType_DiscardOperands |
+ RefType_DiscardResumePoint |
+ RefType_DiscardInstruction,
+
+ // Discard everything and assert that the instruction is not used.
+ RefType_Default = RefType_AssertNoUses | RefType_DefaultNoAssert,
+
+ // Discard resume point operands only, without discarding the operands
+ // of the current instruction. Asserts that the instruction is unused.
+ RefType_IgnoreOperands = RefType_AssertNoUses |
+ RefType_DiscardOperands |
+ RefType_DiscardResumePoint
+ };
+
+ void discardResumePoint(MResumePoint* rp, ReferencesType refType = RefType_Default);
+
+ // Remove all references to an instruction such that it can be removed from
+ // the list of instruction, without keeping any dangling pointer to it. This
+ // includes the operands of the instruction, and the resume point if
+ // present.
+ void prepareForDiscard(MInstruction* ins, ReferencesType refType = RefType_Default);
+
+ public:
+ ///////////////////////////////////////////////////////
+ ////////// BEGIN GRAPH BUILDING INSTRUCTIONS //////////
+ ///////////////////////////////////////////////////////
+
+ // Creates a new basic block for a MIR generator. If |pred| is not nullptr,
+ // its slots and stack depth are initialized from |pred|.
+ static MBasicBlock* New(MIRGraph& graph, BytecodeAnalysis* analysis, const CompileInfo& info,
+ MBasicBlock* pred, BytecodeSite* site, Kind kind);
+ static MBasicBlock* New(MIRGraph& graph, const CompileInfo& info, MBasicBlock* pred, Kind kind);
+ static MBasicBlock* NewPopN(MIRGraph& graph, const CompileInfo& info,
+ MBasicBlock* pred, BytecodeSite* site, Kind kind, uint32_t popn);
+ static MBasicBlock* NewWithResumePoint(MIRGraph& graph, const CompileInfo& info,
+ MBasicBlock* pred, BytecodeSite* site,
+ MResumePoint* resumePoint);
+ static MBasicBlock* NewPendingLoopHeader(MIRGraph& graph, const CompileInfo& info,
+ MBasicBlock* pred, BytecodeSite* site,
+ unsigned loopStateSlots);
+ static MBasicBlock* NewSplitEdge(MIRGraph& graph, MBasicBlock* pred,
+ size_t predEdgeIdx, MBasicBlock* succ);
+
+ bool dominates(const MBasicBlock* other) const {
+ return other->domIndex() - domIndex() < numDominated();
+ }
+
+ void setId(uint32_t id) {
+ id_ = id;
+ }
+
+ // Mark this block (and only this block) as unreachable.
+ void setUnreachable() {
+ MOZ_ASSERT(!unreachable_);
+ setUnreachableUnchecked();
+ }
+ void setUnreachableUnchecked() {
+ unreachable_ = true;
+ }
+ bool unreachable() const {
+ return unreachable_;
+ }
+ // Move the definition to the top of the stack.
+ void pick(int32_t depth);
+
+ // Exchange 2 stack slots at the defined depth
+ void swapAt(int32_t depth);
+
+ // Gets the instruction associated with various slot types.
+ MDefinition* peek(int32_t depth);
+
+ MDefinition* environmentChain();
+ MDefinition* argumentsObject();
+
+ // Increase the number of slots available
+ MOZ_MUST_USE bool increaseSlots(size_t num);
+ MOZ_MUST_USE bool ensureHasSlots(size_t num);
+
+ // Initializes a slot value; must not be called for normal stack
+ // operations, as it will not create new SSA names for copies.
+ void initSlot(uint32_t index, MDefinition* ins);
+
+ // Discard the slot at the given depth, lowering all slots above.
+ void shimmySlots(int discardDepth);
+
+ // In an OSR block, set all MOsrValues to use the MResumePoint attached to
+ // the MStart.
+ MOZ_MUST_USE bool linkOsrValues(MStart* start);
+
+ // Sets the instruction associated with various slot types. The
+ // instruction must lie at the top of the stack.
+ void setLocal(uint32_t local);
+ void setArg(uint32_t arg);
+ void setSlot(uint32_t slot);
+ void setSlot(uint32_t slot, MDefinition* ins);
+
+ // Rewrites a slot directly, bypassing the stack transition. This should
+ // not be used under most circumstances.
+ void rewriteSlot(uint32_t slot, MDefinition* ins);
+
+ // Rewrites a slot based on its depth (same as argument to peek()).
+ void rewriteAtDepth(int32_t depth, MDefinition* ins);
+
+ // Tracks an instruction as being pushed onto the operand stack.
+ void push(MDefinition* ins);
+ void pushArg(uint32_t arg);
+ void pushLocal(uint32_t local);
+ void pushSlot(uint32_t slot);
+ void setEnvironmentChain(MDefinition* ins);
+ void setArgumentsObject(MDefinition* ins);
+
+ // Returns the top of the stack, then decrements the virtual stack pointer.
+ MDefinition* pop();
+ void popn(uint32_t n);
+
+ // Adds an instruction to this block's instruction list.
+ void add(MInstruction* ins);
+
+ // Marks the last instruction of the block; no further instructions
+ // can be added.
+ void end(MControlInstruction* ins);
+
+ // Adds a phi instruction, but does not set successorWithPhis.
+ void addPhi(MPhi* phi);
+
+ // Adds a resume point to this block.
+ void addResumePoint(MResumePoint* resume) {
+#ifdef DEBUG
+ resumePoints_.pushFront(resume);
+#endif
+ }
+
+ // Discard pre-allocated resume point.
+ void discardPreAllocatedResumePoint(MResumePoint* resume) {
+ MOZ_ASSERT(!resume->instruction());
+ discardResumePoint(resume);
+ }
+
+ // Adds a predecessor. Every predecessor must have the same exit stack
+ // depth as the entry state to this block. Adding a predecessor
+ // automatically creates phi nodes and rewrites uses as needed.
+ MOZ_MUST_USE bool addPredecessor(TempAllocator& alloc, MBasicBlock* pred);
+ MOZ_MUST_USE bool addPredecessorPopN(TempAllocator& alloc, MBasicBlock* pred, uint32_t popped);
+
+ // Add a predecessor which won't introduce any new phis to this block.
+ // This may be called after the contents of this block have been built.
+ void addPredecessorSameInputsAs(MBasicBlock* pred, MBasicBlock* existingPred);
+
+ // Stranger utilities used for inlining.
+ MOZ_MUST_USE bool addPredecessorWithoutPhis(MBasicBlock* pred);
+ void inheritSlots(MBasicBlock* parent);
+ MOZ_MUST_USE bool initEntrySlots(TempAllocator& alloc);
+
+ // Replaces an edge for a given block with a new block. This is
+ // used for critical edge splitting.
+ //
+ // Note: If successorWithPhis is set, you must not be replacing it.
+ void replacePredecessor(MBasicBlock* old, MBasicBlock* split);
+ void replaceSuccessor(size_t pos, MBasicBlock* split);
+
+ // Removes `pred` from the predecessor list. If this block defines phis,
+ // removes the entry for `pred` and updates the indices of later entries.
+ // This may introduce redundant phis if the new block has fewer
+ // than two predecessors.
+ void removePredecessor(MBasicBlock* pred);
+
+ // A version of removePredecessor which expects that phi operands to
+ // |pred| have already been removed.
+ void removePredecessorWithoutPhiOperands(MBasicBlock* pred, size_t predIndex);
+
+ // Resets all the dominator info so that it can be recomputed.
+ void clearDominatorInfo();
+
+ // Sets a back edge. This places phi nodes and rewrites instructions within
+ // the current loop as necessary. If the backedge introduces new types for
+ // phis at the loop header, returns a disabling abort.
+ MOZ_MUST_USE AbortReason setBackedge(TempAllocator& alloc, MBasicBlock* block);
+ MOZ_MUST_USE bool setBackedgeWasm(MBasicBlock* block);
+
+ // Resets a LOOP_HEADER block to a NORMAL block. This is needed when
+ // optimizations remove the backedge.
+ void clearLoopHeader();
+
+ // Sets a block to a LOOP_HEADER block, with newBackedge as its backedge.
+ // This is needed when optimizations remove the normal entry to a loop
+ // with multiple entries.
+ void setLoopHeader(MBasicBlock* newBackedge);
+
+ // Propagates phis placed in a loop header down to this successor block.
+ void inheritPhis(MBasicBlock* header);
+
+ // Propagates backedge slots into phis operands of the loop header.
+ MOZ_MUST_USE bool inheritPhisFromBackedge(TempAllocator& alloc, MBasicBlock* backedge,
+ bool* hadTypeChange);
+
+ // Compute the types for phis in this block according to their inputs.
+ MOZ_MUST_USE bool specializePhis(TempAllocator& alloc);
+
+ void insertBefore(MInstruction* at, MInstruction* ins);
+ void insertAfter(MInstruction* at, MInstruction* ins);
+
+ void insertAtEnd(MInstruction* ins);
+
+ // Add an instruction to this block, from elsewhere in the graph.
+ void addFromElsewhere(MInstruction* ins);
+
+ // Move an instruction. Movement may cross block boundaries.
+ void moveBefore(MInstruction* at, MInstruction* ins);
+
+ enum IgnoreTop {
+ IgnoreNone = 0,
+ IgnoreRecover = 1 << 0
+ };
+
+ // Locate the top of the |block|, where it is safe to insert a new
+ // instruction.
+ MInstruction* safeInsertTop(MDefinition* ins = nullptr, IgnoreTop ignore = IgnoreNone);
+
+ // Removes an instruction with the intention to discard it.
+ void discard(MInstruction* ins);
+ void discardLastIns();
+ void discardDef(MDefinition* def);
+ void discardAllInstructions();
+ void discardAllInstructionsStartingAt(MInstructionIterator iter);
+ void discardAllPhiOperands();
+ void discardAllPhis();
+ void discardAllResumePoints(bool discardEntry = true);
+
+ // Same as |void discard(MInstruction* ins)| but assuming that
+ // all operands are already discarded.
+ void discardIgnoreOperands(MInstruction* ins);
+
+ // Discards a phi instruction and updates predecessor successorWithPhis.
+ void discardPhi(MPhi* phi);
+
+ // Some instruction which are guarding against some MIRType value, or
+ // against a type expectation should be considered as removing a potenatial
+ // branch where the guard does not hold. We need to register such
+ // instructions in order to do destructive optimizations correctly, such as
+ // Range Analysis.
+ void flagOperandsOfPrunedBranches(MInstruction* ins);
+
+ // Mark this block as having been removed from the graph.
+ void markAsDead() {
+ MOZ_ASSERT(kind_ != DEAD);
+ kind_ = DEAD;
+ }
+
+ ///////////////////////////////////////////////////////
+ /////////// END GRAPH BUILDING INSTRUCTIONS ///////////
+ ///////////////////////////////////////////////////////
+
+ MIRGraph& graph() {
+ return graph_;
+ }
+ const CompileInfo& info() const {
+ return info_;
+ }
+ jsbytecode* pc() const {
+ return pc_;
+ }
+ uint32_t nslots() const {
+ return slots_.length();
+ }
+ uint32_t id() const {
+ return id_;
+ }
+ uint32_t numPredecessors() const {
+ return predecessors_.length();
+ }
+
+ uint32_t domIndex() const {
+ MOZ_ASSERT(!isDead());
+ return domIndex_;
+ }
+ void setDomIndex(uint32_t d) {
+ domIndex_ = d;
+ }
+
+ MBasicBlock* getPredecessor(uint32_t i) const {
+ return predecessors_[i];
+ }
+ size_t indexForPredecessor(MBasicBlock* block) const {
+ // This should only be called before critical edge splitting.
+ MOZ_ASSERT(!block->successorWithPhis());
+
+ for (size_t i = 0; i < predecessors_.length(); i++) {
+ if (predecessors_[i] == block)
+ return i;
+ }
+ MOZ_CRASH();
+ }
+ bool hasAnyIns() const {
+ return !instructions_.empty();
+ }
+ bool hasLastIns() const {
+ return hasAnyIns() && instructions_.rbegin()->isControlInstruction();
+ }
+ MControlInstruction* lastIns() const {
+ MOZ_ASSERT(hasLastIns());
+ return instructions_.rbegin()->toControlInstruction();
+ }
+ // Find or allocate an optimized out constant.
+ MConstant* optimizedOutConstant(TempAllocator& alloc);
+ MPhiIterator phisBegin() const {
+ return phis_.begin();
+ }
+ MPhiIterator phisBegin(MPhi* at) const {
+ return phis_.begin(at);
+ }
+ MPhiIterator phisEnd() const {
+ return phis_.end();
+ }
+ bool phisEmpty() const {
+ return phis_.empty();
+ }
+#ifdef DEBUG
+ MResumePointIterator resumePointsBegin() const {
+ return resumePoints_.begin();
+ }
+ MResumePointIterator resumePointsEnd() const {
+ return resumePoints_.end();
+ }
+ bool resumePointsEmpty() const {
+ return resumePoints_.empty();
+ }
+#endif
+ MInstructionIterator begin() {
+ return instructions_.begin();
+ }
+ MInstructionIterator begin(MInstruction* at) {
+ MOZ_ASSERT(at->block() == this);
+ return instructions_.begin(at);
+ }
+ MInstructionIterator end() {
+ return instructions_.end();
+ }
+ MInstructionReverseIterator rbegin() {
+ return instructions_.rbegin();
+ }
+ MInstructionReverseIterator rbegin(MInstruction* at) {
+ MOZ_ASSERT(at->block() == this);
+ return instructions_.rbegin(at);
+ }
+ MInstructionReverseIterator rend() {
+ return instructions_.rend();
+ }
+ bool isLoopHeader() const {
+ return kind_ == LOOP_HEADER;
+ }
+ bool hasUniqueBackedge() const {
+ MOZ_ASSERT(isLoopHeader());
+ MOZ_ASSERT(numPredecessors() >= 2);
+ if (numPredecessors() == 2)
+ return true;
+ if (numPredecessors() == 3) // fixup block added by ValueNumbering phase.
+ return getPredecessor(1)->numPredecessors() == 0;
+ return false;
+ }
+ MBasicBlock* backedge() const {
+ MOZ_ASSERT(hasUniqueBackedge());
+ return getPredecessor(numPredecessors() - 1);
+ }
+ MBasicBlock* loopHeaderOfBackedge() const {
+ MOZ_ASSERT(isLoopBackedge());
+ return getSuccessor(numSuccessors() - 1);
+ }
+ MBasicBlock* loopPredecessor() const {
+ MOZ_ASSERT(isLoopHeader());
+ return getPredecessor(0);
+ }
+ bool isLoopBackedge() const {
+ if (!numSuccessors())
+ return false;
+ MBasicBlock* lastSuccessor = getSuccessor(numSuccessors() - 1);
+ return lastSuccessor->isLoopHeader() &&
+ lastSuccessor->hasUniqueBackedge() &&
+ lastSuccessor->backedge() == this;
+ }
+ bool isSplitEdge() const {
+ return kind_ == SPLIT_EDGE;
+ }
+ bool isDead() const {
+ return kind_ == DEAD;
+ }
+
+ uint32_t stackDepth() const {
+ return stackPosition_;
+ }
+ void setStackDepth(uint32_t depth) {
+ stackPosition_ = depth;
+ }
+ bool isMarked() const {
+ return mark_;
+ }
+ void mark() {
+ MOZ_ASSERT(!mark_, "Marking already-marked block");
+ markUnchecked();
+ }
+ void markUnchecked() {
+ mark_ = true;
+ }
+ void unmark() {
+ MOZ_ASSERT(mark_, "Unarking unmarked block");
+ unmarkUnchecked();
+ }
+ void unmarkUnchecked() {
+ mark_ = false;
+ }
+
+ MBasicBlock* immediateDominator() const {
+ return immediateDominator_;
+ }
+
+ void setImmediateDominator(MBasicBlock* dom) {
+ immediateDominator_ = dom;
+ }
+
+ MTest* immediateDominatorBranch(BranchDirection* pdirection);
+
+ size_t numImmediatelyDominatedBlocks() const {
+ return immediatelyDominated_.length();
+ }
+
+ MBasicBlock* getImmediatelyDominatedBlock(size_t i) const {
+ return immediatelyDominated_[i];
+ }
+
+ MBasicBlock** immediatelyDominatedBlocksBegin() {
+ return immediatelyDominated_.begin();
+ }
+
+ MBasicBlock** immediatelyDominatedBlocksEnd() {
+ return immediatelyDominated_.end();
+ }
+
+ // Return the number of blocks dominated by this block. All blocks
+ // dominate at least themselves, so this will always be non-zero.
+ size_t numDominated() const {
+ MOZ_ASSERT(numDominated_ != 0);
+ return numDominated_;
+ }
+
+ void addNumDominated(size_t n) {
+ numDominated_ += n;
+ }
+
+ // Add |child| to this block's immediately-dominated set.
+ bool addImmediatelyDominatedBlock(MBasicBlock* child);
+
+ // Remove |child| from this block's immediately-dominated set.
+ void removeImmediatelyDominatedBlock(MBasicBlock* child);
+
+ // This function retrieves the internal instruction associated with a
+ // slot, and should not be used for normal stack operations. It is an
+ // internal helper that is also used to enhance spew.
+ MDefinition* getSlot(uint32_t index);
+
+ MResumePoint* entryResumePoint() const {
+ return entryResumePoint_;
+ }
+ void setEntryResumePoint(MResumePoint* rp) {
+ entryResumePoint_ = rp;
+ }
+ void clearEntryResumePoint() {
+ discardResumePoint(entryResumePoint_);
+ entryResumePoint_ = nullptr;
+ }
+ MResumePoint* outerResumePoint() const {
+ return outerResumePoint_;
+ }
+ void setOuterResumePoint(MResumePoint* outer) {
+ MOZ_ASSERT(!outerResumePoint_);
+ outerResumePoint_ = outer;
+ }
+ void clearOuterResumePoint() {
+ discardResumePoint(outerResumePoint_);
+ outerResumePoint_ = nullptr;
+ }
+ MResumePoint* callerResumePoint() const {
+ return callerResumePoint_;
+ }
+ void setCallerResumePoint(MResumePoint* caller) {
+ callerResumePoint_ = caller;
+ }
+ size_t numEntrySlots() const {
+ return entryResumePoint()->stackDepth();
+ }
+ MDefinition* getEntrySlot(size_t i) const {
+ MOZ_ASSERT(i < numEntrySlots());
+ return entryResumePoint()->getOperand(i);
+ }
+
+ LBlock* lir() const {
+ return lir_;
+ }
+ void assignLir(LBlock* lir) {
+ MOZ_ASSERT(!lir_);
+ lir_ = lir;
+ }
+
+ MBasicBlock* successorWithPhis() const {
+ return successorWithPhis_;
+ }
+ uint32_t positionInPhiSuccessor() const {
+ MOZ_ASSERT(successorWithPhis());
+ return positionInPhiSuccessor_;
+ }
+ void setSuccessorWithPhis(MBasicBlock* successor, uint32_t id) {
+ successorWithPhis_ = successor;
+ positionInPhiSuccessor_ = id;
+ }
+ void clearSuccessorWithPhis() {
+ successorWithPhis_ = nullptr;
+ }
+ size_t numSuccessors() const;
+ MBasicBlock* getSuccessor(size_t index) const;
+ size_t getSuccessorIndex(MBasicBlock*) const;
+ size_t getPredecessorIndex(MBasicBlock*) const;
+
+ void setLoopDepth(uint32_t loopDepth) {
+ loopDepth_ = loopDepth;
+ }
+ uint32_t loopDepth() const {
+ return loopDepth_;
+ }
+
+ bool strict() const {
+ return info_.script()->strict();
+ }
+
+ void dumpStack(GenericPrinter& out);
+ void dumpStack();
+
+ void dump(GenericPrinter& out);
+ void dump();
+
+ // Hit count
+ enum class HitState {
+ // Not hit information is attached to this basic block.
+ NotDefined,
+
+ // The hit information is a raw counter. Note that due to inlining this
+ // counter is not guaranteed to be consistent over the graph.
+ Count,
+
+ // The hit information is a frequency, which is a form of normalized
+ // counter, where a hit-count can be compared against any previous block
+ // in the graph.
+ Frequency
+ };
+ HitState getHitState() const {
+ return hitState_;
+ }
+ void setHitCount(uint64_t count) {
+ hitCount_ = count;
+ hitState_ = HitState::Count;
+ }
+ uint64_t getHitCount() const {
+ MOZ_ASSERT(hitState_ == HitState::Count);
+ return hitCount_;
+ }
+
+ // Track bailouts by storing the current pc in MIR instruction added at
+ // this cycle. This is also used for tracking calls and optimizations when
+ // profiling.
+ void updateTrackedSite(BytecodeSite* site) {
+ MOZ_ASSERT(site->tree() == trackedSite_->tree());
+ trackedSite_ = site;
+ }
+ BytecodeSite* trackedSite() const {
+ return trackedSite_;
+ }
+ jsbytecode* trackedPc() const {
+ return trackedSite_ ? trackedSite_->pc() : nullptr;
+ }
+ InlineScriptTree* trackedTree() const {
+ return trackedSite_ ? trackedSite_->tree() : nullptr;
+ }
+
+ // This class is used for reverting the graph within IonBuilder.
+ class BackupPoint {
+ friend MBasicBlock;
+
+ MBasicBlock* current_;
+ MBasicBlock* lastBlock_;
+ MInstruction* lastIns_;
+ uint32_t stackPosition_;
+ FixedList<MDefinition*> slots_;
+#ifdef DEBUG
+ // The following fields should remain identical during IonBuilder
+ // construction, these are used for assertions.
+ MPhi* lastPhi_;
+ uintptr_t predecessorsCheckSum_;
+ HashNumber instructionsCheckSum_;
+ uint32_t id_;
+ MResumePoint* callerResumePoint_;
+ MResumePoint* entryResumePoint_;
+
+ size_t computePredecessorsCheckSum(MBasicBlock* block);
+ HashNumber computeInstructionsCheckSum(MBasicBlock* block);
+#endif
+ public:
+ explicit BackupPoint(MBasicBlock* current);
+ MOZ_MUST_USE bool init(TempAllocator& alloc);
+ MBasicBlock* restore();
+ };
+
+ friend BackupPoint;
+
+ private:
+ MIRGraph& graph_;
+ const CompileInfo& info_; // Each block originates from a particular script.
+ InlineList<MInstruction> instructions_;
+ Vector<MBasicBlock*, 1, JitAllocPolicy> predecessors_;
+ InlineList<MPhi> phis_;
+ FixedList<MDefinition*> slots_;
+ uint32_t stackPosition_;
+ uint32_t id_;
+ uint32_t domIndex_; // Index in the dominator tree.
+ uint32_t numDominated_;
+ jsbytecode* pc_;
+ LBlock* lir_;
+
+ // Copy of a dominator block's outerResumePoint_ which holds the state of
+ // caller frame at the time of the call. If not null, this implies that this
+ // basic block corresponds to an inlined script.
+ MResumePoint* callerResumePoint_;
+
+ // Resume point holding baseline-like frame for the PC corresponding to the
+ // entry of this basic block.
+ MResumePoint* entryResumePoint_;
+
+ // Resume point holding baseline-like frame for the PC corresponding to the
+ // beginning of the call-site which is being inlined after this block.
+ MResumePoint* outerResumePoint_;
+
+#ifdef DEBUG
+ // Unordered list used to verify that all the resume points which are
+ // registered are correctly removed when a basic block is removed.
+ InlineForwardList<MResumePoint> resumePoints_;
+#endif
+
+ MBasicBlock* successorWithPhis_;
+ uint32_t positionInPhiSuccessor_;
+ uint32_t loopDepth_;
+ Kind kind_ : 8;
+
+ // Utility mark for traversal algorithms.
+ bool mark_;
+
+ Vector<MBasicBlock*, 1, JitAllocPolicy> immediatelyDominated_;
+ MBasicBlock* immediateDominator_;
+
+ BytecodeSite* trackedSite_;
+
+ // Record the number of times a block got visited. Note, due to inlined
+ // scripts these numbers might not be continuous.
+ uint64_t hitCount_;
+ HitState hitState_;
+
+#if defined(JS_ION_PERF) || defined(DEBUG)
+ unsigned lineno_;
+ unsigned columnIndex_;
+
+ public:
+ void setLineno(unsigned l) { lineno_ = l; }
+ unsigned lineno() const { return lineno_; }
+ void setColumnIndex(unsigned c) { columnIndex_ = c; }
+ unsigned columnIndex() const { return columnIndex_; }
+#endif
+};
+
+typedef InlineListIterator<MBasicBlock> MBasicBlockIterator;
+typedef InlineListIterator<MBasicBlock> ReversePostorderIterator;
+typedef InlineListReverseIterator<MBasicBlock> PostorderIterator;
+
+typedef Vector<MBasicBlock*, 1, JitAllocPolicy> MIRGraphReturns;
+
+class MIRGraph
+{
+ InlineList<MBasicBlock> blocks_;
+ TempAllocator* alloc_;
+ MIRGraphReturns* returnAccumulator_;
+ uint32_t blockIdGen_;
+ uint32_t idGen_;
+ MBasicBlock* osrBlock_;
+
+ size_t numBlocks_;
+ bool hasTryBlock_;
+
+ InlineList<MPhi> phiFreeList_;
+ size_t phiFreeListLength_;
+
+ public:
+ explicit MIRGraph(TempAllocator* alloc)
+ : alloc_(alloc),
+ returnAccumulator_(nullptr),
+ blockIdGen_(0),
+ idGen_(0),
+ osrBlock_(nullptr),
+ numBlocks_(0),
+ hasTryBlock_(false),
+ phiFreeListLength_(0)
+ { }
+
+ TempAllocator& alloc() const {
+ return *alloc_;
+ }
+
+ void addBlock(MBasicBlock* block);
+ void insertBlockAfter(MBasicBlock* at, MBasicBlock* block);
+ void insertBlockBefore(MBasicBlock* at, MBasicBlock* block);
+
+ void renumberBlocksAfter(MBasicBlock* at);
+
+ void unmarkBlocks();
+
+ void setReturnAccumulator(MIRGraphReturns* accum) {
+ returnAccumulator_ = accum;
+ }
+ MIRGraphReturns* returnAccumulator() const {
+ return returnAccumulator_;
+ }
+
+ MOZ_MUST_USE bool addReturn(MBasicBlock* returnBlock) {
+ if (!returnAccumulator_)
+ return true;
+
+ return returnAccumulator_->append(returnBlock);
+ }
+
+ MBasicBlock* entryBlock() {
+ return *blocks_.begin();
+ }
+ MBasicBlockIterator begin() {
+ return blocks_.begin();
+ }
+ MBasicBlockIterator begin(MBasicBlock* at) {
+ return blocks_.begin(at);
+ }
+ MBasicBlockIterator end() {
+ return blocks_.end();
+ }
+ PostorderIterator poBegin() {
+ return blocks_.rbegin();
+ }
+ PostorderIterator poBegin(MBasicBlock* at) {
+ return blocks_.rbegin(at);
+ }
+ PostorderIterator poEnd() {
+ return blocks_.rend();
+ }
+ ReversePostorderIterator rpoBegin() {
+ return blocks_.begin();
+ }
+ ReversePostorderIterator rpoBegin(MBasicBlock* at) {
+ return blocks_.begin(at);
+ }
+ ReversePostorderIterator rpoEnd() {
+ return blocks_.end();
+ }
+ void removeBlocksAfter(MBasicBlock* block);
+ void removeBlock(MBasicBlock* block);
+ void removeBlockIncludingPhis(MBasicBlock* block);
+ void moveBlockToEnd(MBasicBlock* block) {
+ MOZ_ASSERT(block->id());
+ blocks_.remove(block);
+ blocks_.pushBack(block);
+ }
+ void moveBlockBefore(MBasicBlock* at, MBasicBlock* block) {
+ MOZ_ASSERT(block->id());
+ blocks_.remove(block);
+ blocks_.insertBefore(at, block);
+ }
+ size_t numBlocks() const {
+ return numBlocks_;
+ }
+ uint32_t numBlockIds() const {
+ return blockIdGen_;
+ }
+ void allocDefinitionId(MDefinition* ins) {
+ ins->setId(idGen_++);
+ }
+ uint32_t getNumInstructionIds() {
+ return idGen_;
+ }
+ MResumePoint* entryResumePoint() {
+ return entryBlock()->entryResumePoint();
+ }
+
+ void copyIds(const MIRGraph& other) {
+ idGen_ = other.idGen_;
+ blockIdGen_ = other.blockIdGen_;
+ numBlocks_ = other.numBlocks_;
+ }
+
+ void setOsrBlock(MBasicBlock* osrBlock) {
+ MOZ_ASSERT(!osrBlock_);
+ osrBlock_ = osrBlock;
+ }
+ MBasicBlock* osrBlock() {
+ return osrBlock_;
+ }
+
+ bool hasTryBlock() const {
+ return hasTryBlock_;
+ }
+ void setHasTryBlock() {
+ hasTryBlock_ = true;
+ }
+
+ void dump(GenericPrinter& out);
+ void dump();
+
+ void addPhiToFreeList(MPhi* phi) {
+ phiFreeList_.pushBack(phi);
+ phiFreeListLength_++;
+ }
+ size_t phiFreeListLength() const {
+ return phiFreeListLength_;
+ }
+ MPhi* takePhiFromFreeList() {
+ MOZ_ASSERT(phiFreeListLength_ > 0);
+ phiFreeListLength_--;
+ return phiFreeList_.popBack();
+ }
+};
+
+class MDefinitionIterator
+{
+ friend class MBasicBlock;
+ friend class MNodeIterator;
+
+ private:
+ MBasicBlock* block_;
+ MPhiIterator phiIter_;
+ MInstructionIterator iter_;
+
+ bool atPhi() const {
+ return phiIter_ != block_->phisEnd();
+ }
+
+ MDefinition* getIns() {
+ if (atPhi())
+ return *phiIter_;
+ return *iter_;
+ }
+
+ bool more() const {
+ return atPhi() || (*iter_) != block_->lastIns();
+ }
+
+ public:
+ explicit MDefinitionIterator(MBasicBlock* block)
+ : block_(block),
+ phiIter_(block->phisBegin()),
+ iter_(block->begin())
+ { }
+
+ MDefinitionIterator operator ++() {
+ MOZ_ASSERT(more());
+ if (atPhi())
+ ++phiIter_;
+ else
+ ++iter_;
+ return *this;
+ }
+
+ MDefinitionIterator operator ++(int) {
+ MDefinitionIterator old(*this);
+ operator++ ();
+ return old;
+ }
+
+ explicit operator bool() const {
+ return more();
+ }
+
+ MDefinition* operator*() {
+ return getIns();
+ }
+
+ MDefinition* operator ->() {
+ return getIns();
+ }
+};
+
+// Iterates on all resume points, phis, and instructions of a MBasicBlock.
+// Resume points are visited as long as the instruction which holds it is not
+// discarded.
+class MNodeIterator
+{
+ private:
+ // Last instruction which holds a resume point. To handle the entry point
+ // resume point, it is set to the last instruction, assuming that the last
+ // instruction is not discarded before we visit it.
+ MInstruction* last_;
+
+ // Definition iterator which is one step ahead when visiting resume points.
+ // This is in order to avoid incrementing the iterator while it is settled
+ // on a discarded instruction.
+ MDefinitionIterator defIter_;
+
+ MBasicBlock* block() const {
+ return defIter_.block_;
+ }
+
+ bool atResumePoint() const {
+ return last_ && !last_->isDiscarded();
+ }
+
+ MNode* getNode() {
+ if (!atResumePoint())
+ return *defIter_;
+
+ // We use the last instruction as a sentinelle to iterate over the entry
+ // resume point of the basic block, before even starting to iterate on
+ // the instruction list. Otherwise, the last_ corresponds to the
+ // previous instruction.
+ if (last_ != block()->lastIns())
+ return last_->resumePoint();
+ return block()->entryResumePoint();
+ }
+
+ void next() {
+ if (!atResumePoint()) {
+ if (defIter_->isInstruction() && defIter_->toInstruction()->resumePoint()) {
+ // In theory, we could but in practice this does not happen.
+ MOZ_ASSERT(*defIter_ != block()->lastIns());
+ last_ = defIter_->toInstruction();
+ }
+
+ defIter_++;
+ } else {
+ last_ = nullptr;
+ }
+ }
+
+ bool more() const {
+ return defIter_ || atResumePoint();
+ }
+
+ public:
+ explicit MNodeIterator(MBasicBlock* block)
+ : last_(block->entryResumePoint() ? block->lastIns() : nullptr),
+ defIter_(block)
+ {
+ MOZ_ASSERT(bool(block->entryResumePoint()) == atResumePoint());
+
+ // We use the last instruction to check for the entry resume point,
+ // assert that no control instruction has any resume point. If so, then
+ // we need to handle this case in this iterator.
+ MOZ_ASSERT(!block->lastIns()->resumePoint());
+ }
+
+ MNodeIterator operator ++(int) {
+ MNodeIterator old(*this);
+ if (more())
+ next();
+ return old;
+ }
+
+ explicit operator bool() const {
+ return more();
+ }
+
+ MNode* operator*() {
+ return getNode();
+ }
+
+ MNode* operator ->() {
+ return getNode();
+ }
+
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_MIRGraph_h */
diff --git a/js/src/jit/MOpcodes.h b/js/src/jit/MOpcodes.h
new file mode 100644
index 000000000..74594cb35
--- /dev/null
+++ b/js/src/jit/MOpcodes.h
@@ -0,0 +1,349 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_MOpcodes_h
+#define jit_MOpcodes_h
+
+namespace js {
+namespace jit {
+
+#define MIR_OPCODE_LIST(_) \
+ _(Constant) \
+ _(SimdBox) \
+ _(SimdUnbox) \
+ _(SimdValueX4) \
+ _(SimdSplat) \
+ _(SimdConstant) \
+ _(SimdConvert) \
+ _(SimdReinterpretCast) \
+ _(SimdExtractElement) \
+ _(SimdInsertElement) \
+ _(SimdSwizzle) \
+ _(SimdGeneralShuffle) \
+ _(SimdShuffle) \
+ _(SimdUnaryArith) \
+ _(SimdBinaryComp) \
+ _(SimdBinaryArith) \
+ _(SimdBinarySaturating) \
+ _(SimdBinaryBitwise) \
+ _(SimdShift) \
+ _(SimdSelect) \
+ _(SimdAllTrue) \
+ _(SimdAnyTrue) \
+ _(CloneLiteral) \
+ _(Parameter) \
+ _(Callee) \
+ _(IsConstructing) \
+ _(TableSwitch) \
+ _(Goto) \
+ _(Test) \
+ _(GotoWithFake) \
+ _(ObjectGroupDispatch) \
+ _(FunctionDispatch) \
+ _(Compare) \
+ _(Phi) \
+ _(Beta) \
+ _(NaNToZero) \
+ _(OsrValue) \
+ _(OsrEnvironmentChain) \
+ _(OsrReturnValue) \
+ _(OsrArgumentsObject) \
+ _(ReturnFromCtor) \
+ _(BinarySharedStub) \
+ _(UnarySharedStub) \
+ _(NullarySharedStub) \
+ _(CheckOverRecursed) \
+ _(DefVar) \
+ _(DefLexical) \
+ _(DefFun) \
+ _(CreateThis) \
+ _(CreateThisWithProto) \
+ _(CreateThisWithTemplate) \
+ _(CreateArgumentsObject) \
+ _(GetArgumentsObjectArg) \
+ _(SetArgumentsObjectArg) \
+ _(ComputeThis) \
+ _(Call) \
+ _(ApplyArgs) \
+ _(ApplyArray) \
+ _(ArraySplice) \
+ _(Bail) \
+ _(Unreachable) \
+ _(EncodeSnapshot) \
+ _(AssertFloat32) \
+ _(AssertRecoveredOnBailout) \
+ _(GetDynamicName) \
+ _(CallDirectEval) \
+ _(BitNot) \
+ _(TypeOf) \
+ _(ToAsync) \
+ _(ToId) \
+ _(BitAnd) \
+ _(BitOr) \
+ _(BitXor) \
+ _(Lsh) \
+ _(Rsh) \
+ _(Ursh) \
+ _(SignExtend) \
+ _(MinMax) \
+ _(Abs) \
+ _(Clz) \
+ _(Ctz) \
+ _(Popcnt) \
+ _(Sqrt) \
+ _(Atan2) \
+ _(Hypot) \
+ _(Pow) \
+ _(PowHalf) \
+ _(Random) \
+ _(MathFunction) \
+ _(Add) \
+ _(Sub) \
+ _(Mul) \
+ _(Div) \
+ _(Mod) \
+ _(Concat) \
+ _(CharCodeAt) \
+ _(FromCharCode) \
+ _(FromCodePoint) \
+ _(SinCos) \
+ _(StringSplit) \
+ _(Substr) \
+ _(Return) \
+ _(Throw) \
+ _(Box) \
+ _(Unbox) \
+ _(GuardObject) \
+ _(GuardString) \
+ _(PolyInlineGuard) \
+ _(AssertRange) \
+ _(ToDouble) \
+ _(ToFloat32) \
+ _(ToInt32) \
+ _(TruncateToInt32) \
+ _(WrapInt64ToInt32) \
+ _(ExtendInt32ToInt64) \
+ _(Int64ToFloatingPoint) \
+ _(ToString) \
+ _(ToObjectOrNull) \
+ _(NewArray) \
+ _(NewArrayCopyOnWrite) \
+ _(NewArrayDynamicLength) \
+ _(NewTypedArray) \
+ _(NewTypedArrayDynamicLength) \
+ _(NewObject) \
+ _(NewTypedObject) \
+ _(NewNamedLambdaObject) \
+ _(NewCallObject) \
+ _(NewSingletonCallObject) \
+ _(NewStringObject) \
+ _(ObjectState) \
+ _(ArrayState) \
+ _(InitElem) \
+ _(InitElemGetterSetter) \
+ _(MutateProto) \
+ _(InitProp) \
+ _(InitPropGetterSetter) \
+ _(Start) \
+ _(OsrEntry) \
+ _(Nop) \
+ _(LimitedTruncate) \
+ _(RegExp) \
+ _(RegExpMatcher) \
+ _(RegExpSearcher) \
+ _(RegExpTester) \
+ _(RegExpPrototypeOptimizable) \
+ _(RegExpInstanceOptimizable) \
+ _(GetFirstDollarIndex) \
+ _(StringReplace) \
+ _(Lambda) \
+ _(LambdaArrow) \
+ _(KeepAliveObject) \
+ _(Slots) \
+ _(Elements) \
+ _(ConstantElements) \
+ _(ConvertElementsToDoubles) \
+ _(MaybeToDoubleElement) \
+ _(MaybeCopyElementsForWrite) \
+ _(LoadSlot) \
+ _(StoreSlot) \
+ _(FunctionEnvironment) \
+ _(FilterTypeSet) \
+ _(TypeBarrier) \
+ _(MonitorTypes) \
+ _(PostWriteBarrier) \
+ _(PostWriteElementBarrier) \
+ _(GetPropertyCache) \
+ _(GetPropertyPolymorphic) \
+ _(SetPropertyPolymorphic) \
+ _(BindNameCache) \
+ _(CallBindVar) \
+ _(GuardShape) \
+ _(GuardReceiverPolymorphic) \
+ _(GuardObjectGroup) \
+ _(GuardObjectIdentity) \
+ _(GuardClass) \
+ _(GuardUnboxedExpando) \
+ _(LoadUnboxedExpando) \
+ _(ArrayLength) \
+ _(SetArrayLength) \
+ _(GetNextEntryForIterator) \
+ _(TypedArrayLength) \
+ _(TypedArrayElements) \
+ _(SetDisjointTypedElements) \
+ _(TypedObjectDescr) \
+ _(TypedObjectElements) \
+ _(SetTypedObjectOffset) \
+ _(InitializedLength) \
+ _(SetInitializedLength) \
+ _(UnboxedArrayLength) \
+ _(UnboxedArrayInitializedLength) \
+ _(IncrementUnboxedArrayInitializedLength) \
+ _(SetUnboxedArrayInitializedLength) \
+ _(Not) \
+ _(BoundsCheck) \
+ _(BoundsCheckLower) \
+ _(InArray) \
+ _(LoadElement) \
+ _(LoadElementHole) \
+ _(LoadUnboxedScalar) \
+ _(LoadUnboxedObjectOrNull) \
+ _(LoadUnboxedString) \
+ _(StoreElement) \
+ _(StoreElementHole) \
+ _(FallibleStoreElement) \
+ _(StoreUnboxedScalar) \
+ _(StoreUnboxedObjectOrNull) \
+ _(StoreUnboxedString) \
+ _(ConvertUnboxedObjectToNative) \
+ _(ArrayPopShift) \
+ _(ArrayPush) \
+ _(ArraySlice) \
+ _(ArrayJoin) \
+ _(LoadTypedArrayElementHole) \
+ _(LoadTypedArrayElementStatic) \
+ _(StoreTypedArrayElementHole) \
+ _(StoreTypedArrayElementStatic) \
+ _(AtomicIsLockFree) \
+ _(GuardSharedTypedArray) \
+ _(CompareExchangeTypedArrayElement) \
+ _(AtomicExchangeTypedArrayElement) \
+ _(AtomicTypedArrayElementBinop) \
+ _(EffectiveAddress) \
+ _(ClampToUint8) \
+ _(LoadFixedSlot) \
+ _(LoadFixedSlotAndUnbox) \
+ _(StoreFixedSlot) \
+ _(CallGetProperty) \
+ _(GetNameCache) \
+ _(CallGetIntrinsicValue) \
+ _(CallGetElement) \
+ _(CallSetElement) \
+ _(CallSetProperty) \
+ _(CallInitElementArray) \
+ _(DeleteProperty) \
+ _(DeleteElement) \
+ _(SetPropertyCache) \
+ _(IteratorStart) \
+ _(IteratorMore) \
+ _(IsNoIter) \
+ _(IteratorEnd) \
+ _(StringLength) \
+ _(ArgumentsLength) \
+ _(GetFrameArgument) \
+ _(SetFrameArgument) \
+ _(RunOncePrologue) \
+ _(Rest) \
+ _(Floor) \
+ _(Ceil) \
+ _(Round) \
+ _(In) \
+ _(InstanceOf) \
+ _(CallInstanceOf) \
+ _(InterruptCheck) \
+ _(GetDOMProperty) \
+ _(GetDOMMember) \
+ _(SetDOMProperty) \
+ _(IsConstructor) \
+ _(IsCallable) \
+ _(IsObject) \
+ _(HasClass) \
+ _(CopySign) \
+ _(Rotate) \
+ _(NewDerivedTypedObject) \
+ _(RecompileCheck) \
+ _(UnknownValue) \
+ _(LexicalCheck) \
+ _(ThrowRuntimeLexicalError) \
+ _(GlobalNameConflictsCheck) \
+ _(Debugger) \
+ _(NewTarget) \
+ _(ArrowNewTarget) \
+ _(CheckReturn) \
+ _(CheckIsObj) \
+ _(CheckObjCoercible) \
+ _(DebugCheckSelfHosted) \
+ _(AsmJSNeg) \
+ _(AsmJSLoadHeap) \
+ _(AsmJSStoreHeap) \
+ _(AsmJSCompareExchangeHeap) \
+ _(AsmJSAtomicExchangeHeap) \
+ _(AsmJSAtomicBinopHeap) \
+ _(WasmBoundsCheck) \
+ _(WasmAddOffset) \
+ _(WasmLoad) \
+ _(WasmStore) \
+ _(WasmTrap) \
+ _(WasmTruncateToInt32) \
+ _(WasmUnsignedToDouble) \
+ _(WasmUnsignedToFloat32) \
+ _(WasmLoadGlobalVar) \
+ _(WasmStoreGlobalVar) \
+ _(WasmReturn) \
+ _(WasmReturnVoid) \
+ _(WasmParameter) \
+ _(WasmStackArg) \
+ _(WasmCall) \
+ _(WasmSelect) \
+ _(WasmReinterpret) \
+ _(WasmTruncateToInt64)
+
+// Forward declarations of MIR types.
+#define FORWARD_DECLARE(op) class M##op;
+ MIR_OPCODE_LIST(FORWARD_DECLARE)
+#undef FORWARD_DECLARE
+
+class MDefinitionVisitor // interface i.e. pure abstract class
+{
+ public:
+#define VISIT_INS(op) virtual void visit##op(M##op*) = 0;
+ MIR_OPCODE_LIST(VISIT_INS)
+#undef VISIT_INS
+};
+
+// MDefinition visitor which raises a Not Yet Implemented error for
+// non-overloaded visit functions.
+class MDefinitionVisitorDefaultNYI : public MDefinitionVisitor
+{
+ public:
+#define VISIT_INS(op) virtual void visit##op(M##op*) { MOZ_CRASH("NYI: " #op); }
+ MIR_OPCODE_LIST(VISIT_INS)
+#undef VISIT_INS
+};
+
+// MDefinition visitor which ignores non-overloaded visit functions.
+class MDefinitionVisitorDefaultNoop : public MDefinitionVisitor
+{
+ public:
+#define VISIT_INS(op) virtual void visit##op(M##op*) { }
+ MIR_OPCODE_LIST(VISIT_INS)
+#undef VISIT_INS
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_MOpcodes_h */
diff --git a/js/src/jit/MacroAssembler-inl.h b/js/src/jit/MacroAssembler-inl.h
new file mode 100644
index 000000000..8d7e14a0a
--- /dev/null
+++ b/js/src/jit/MacroAssembler-inl.h
@@ -0,0 +1,819 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_MacroAssembler_inl_h
+#define jit_MacroAssembler_inl_h
+
+#include "jit/MacroAssembler.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/MacroAssembler-x86-inl.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/MacroAssembler-x64-inl.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/MacroAssembler-arm-inl.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/MacroAssembler-arm64-inl.h"
+#elif defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/MacroAssembler-mips32-inl.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/MacroAssembler-mips64-inl.h"
+#elif !defined(JS_CODEGEN_NONE)
+# error "Unknown architecture!"
+#endif
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// Frame manipulation functions.
+
+uint32_t
+MacroAssembler::framePushed() const
+{
+ return framePushed_;
+}
+
+void
+MacroAssembler::setFramePushed(uint32_t framePushed)
+{
+ framePushed_ = framePushed;
+}
+
+void
+MacroAssembler::adjustFrame(int32_t value)
+{
+ MOZ_ASSERT_IF(value < 0, framePushed_ >= uint32_t(-value));
+ setFramePushed(framePushed_ + value);
+}
+
+void
+MacroAssembler::implicitPop(uint32_t bytes)
+{
+ MOZ_ASSERT(bytes % sizeof(intptr_t) == 0);
+ MOZ_ASSERT(bytes <= INT32_MAX);
+ adjustFrame(-int32_t(bytes));
+}
+
+// ===============================================================
+// Stack manipulation functions.
+
+CodeOffset
+MacroAssembler::PushWithPatch(ImmWord word)
+{
+ framePushed_ += sizeof(word.value);
+ return pushWithPatch(word);
+}
+
+CodeOffset
+MacroAssembler::PushWithPatch(ImmPtr imm)
+{
+ return PushWithPatch(ImmWord(uintptr_t(imm.value)));
+}
+
+// ===============================================================
+// Simple call functions.
+
+void
+MacroAssembler::call(const wasm::CallSiteDesc& desc, const Register reg)
+{
+ CodeOffset l = call(reg);
+ append(desc, l, framePushed());
+}
+
+void
+MacroAssembler::call(const wasm::CallSiteDesc& desc, uint32_t funcDefIndex)
+{
+ CodeOffset l = callWithPatch();
+ append(desc, l, framePushed(), funcDefIndex);
+}
+
+void
+MacroAssembler::call(const wasm::CallSiteDesc& desc, wasm::Trap trap)
+{
+ CodeOffset l = callWithPatch();
+ append(desc, l, framePushed(), trap);
+}
+
+// ===============================================================
+// ABI function calls.
+
+void
+MacroAssembler::passABIArg(Register reg)
+{
+ passABIArg(MoveOperand(reg), MoveOp::GENERAL);
+}
+
+void
+MacroAssembler::passABIArg(FloatRegister reg, MoveOp::Type type)
+{
+ passABIArg(MoveOperand(reg), type);
+}
+
+template <typename T> void
+MacroAssembler::callWithABI(const T& fun, MoveOp::Type result)
+{
+ AutoProfilerCallInstrumentation profiler(*this);
+ callWithABINoProfiler(fun, result);
+}
+
+void
+MacroAssembler::appendSignatureType(MoveOp::Type type)
+{
+#ifdef JS_SIMULATOR
+ signature_ <<= ArgType_Shift;
+ switch (type) {
+ case MoveOp::GENERAL: signature_ |= ArgType_General; break;
+ case MoveOp::DOUBLE: signature_ |= ArgType_Double; break;
+ case MoveOp::FLOAT32: signature_ |= ArgType_Float32; break;
+ default: MOZ_CRASH("Invalid argument type");
+ }
+#endif
+}
+
+ABIFunctionType
+MacroAssembler::signature() const
+{
+#ifdef JS_SIMULATOR
+#ifdef DEBUG
+ switch (signature_) {
+ case Args_General0:
+ case Args_General1:
+ case Args_General2:
+ case Args_General3:
+ case Args_General4:
+ case Args_General5:
+ case Args_General6:
+ case Args_General7:
+ case Args_General8:
+ case Args_Double_None:
+ case Args_Int_Double:
+ case Args_Float32_Float32:
+ case Args_Double_Double:
+ case Args_Double_Int:
+ case Args_Double_DoubleInt:
+ case Args_Double_DoubleDouble:
+ case Args_Double_IntDouble:
+ case Args_Int_IntDouble:
+ case Args_Int_DoubleIntInt:
+ case Args_Int_IntDoubleIntInt:
+ case Args_Double_DoubleDoubleDouble:
+ case Args_Double_DoubleDoubleDoubleDouble:
+ break;
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+#endif // DEBUG
+
+ return ABIFunctionType(signature_);
+#else
+ // No simulator enabled.
+ MOZ_CRASH("Only available for making calls within a simulator.");
+#endif
+}
+
+// ===============================================================
+// Jit Frames.
+
+uint32_t
+MacroAssembler::callJitNoProfiler(Register callee)
+{
+#ifdef JS_USE_LINK_REGISTER
+ // The return address is pushed by the callee.
+ call(callee);
+#else
+ callAndPushReturnAddress(callee);
+#endif
+ return currentOffset();
+}
+
+uint32_t
+MacroAssembler::callJit(Register callee)
+{
+ AutoProfilerCallInstrumentation profiler(*this);
+ uint32_t ret = callJitNoProfiler(callee);
+ return ret;
+}
+
+uint32_t
+MacroAssembler::callJit(JitCode* callee)
+{
+ AutoProfilerCallInstrumentation profiler(*this);
+ call(callee);
+ return currentOffset();
+}
+
+void
+MacroAssembler::makeFrameDescriptor(Register frameSizeReg, FrameType type, uint32_t headerSize)
+{
+ // See JitFrames.h for a description of the frame descriptor format.
+ // The saved-frame bit is zero for new frames. See js::SavedStacks.
+
+ lshiftPtr(Imm32(FRAMESIZE_SHIFT), frameSizeReg);
+
+ headerSize = EncodeFrameHeaderSize(headerSize);
+ orPtr(Imm32((headerSize << FRAME_HEADER_SIZE_SHIFT) | type), frameSizeReg);
+}
+
+void
+MacroAssembler::pushStaticFrameDescriptor(FrameType type, uint32_t headerSize)
+{
+ uint32_t descriptor = MakeFrameDescriptor(framePushed(), type, headerSize);
+ Push(Imm32(descriptor));
+}
+
+void
+MacroAssembler::PushCalleeToken(Register callee, bool constructing)
+{
+ if (constructing) {
+ orPtr(Imm32(CalleeToken_FunctionConstructing), callee);
+ Push(callee);
+ andPtr(Imm32(uint32_t(CalleeTokenMask)), callee);
+ } else {
+ static_assert(CalleeToken_Function == 0, "Non-constructing call requires no tagging");
+ Push(callee);
+ }
+}
+
+void
+MacroAssembler::loadFunctionFromCalleeToken(Address token, Register dest)
+{
+#ifdef DEBUG
+ Label ok;
+ loadPtr(token, dest);
+ andPtr(Imm32(uint32_t(~CalleeTokenMask)), dest);
+ branchPtr(Assembler::Equal, dest, Imm32(CalleeToken_Function), &ok);
+ branchPtr(Assembler::Equal, dest, Imm32(CalleeToken_FunctionConstructing), &ok);
+ assumeUnreachable("Unexpected CalleeToken tag");
+ bind(&ok);
+#endif
+ loadPtr(token, dest);
+ andPtr(Imm32(uint32_t(CalleeTokenMask)), dest);
+}
+
+uint32_t
+MacroAssembler::buildFakeExitFrame(Register scratch)
+{
+ mozilla::DebugOnly<uint32_t> initialDepth = framePushed();
+
+ pushStaticFrameDescriptor(JitFrame_IonJS, ExitFrameLayout::Size());
+ uint32_t retAddr = pushFakeReturnAddress(scratch);
+
+ MOZ_ASSERT(framePushed() == initialDepth + ExitFrameLayout::Size());
+ return retAddr;
+}
+
+// ===============================================================
+// Exit frame footer.
+
+void
+MacroAssembler::PushStubCode()
+{
+ // Make sure that we do not erase an existing self-reference.
+ MOZ_ASSERT(!hasSelfReference());
+ selfReferencePatch_ = PushWithPatch(ImmWord(-1));
+}
+
+void
+MacroAssembler::enterExitFrame(const VMFunction* f)
+{
+ linkExitFrame();
+ // Push the JitCode pointer. (Keep the code alive, when on the stack)
+ PushStubCode();
+ // Push VMFunction pointer, to mark arguments.
+ Push(ImmPtr(f));
+}
+
+void
+MacroAssembler::enterFakeExitFrame(enum ExitFrameTokenValues token)
+{
+ linkExitFrame();
+ Push(Imm32(token));
+ Push(ImmPtr(nullptr));
+}
+
+void
+MacroAssembler::enterFakeExitFrameForNative(bool isConstructing)
+{
+ enterFakeExitFrame(isConstructing ? ConstructNativeExitFrameLayoutToken
+ : CallNativeExitFrameLayoutToken);
+}
+
+void
+MacroAssembler::leaveExitFrame(size_t extraFrame)
+{
+ freeStack(ExitFooterFrame::Size() + extraFrame);
+}
+
+bool
+MacroAssembler::hasSelfReference() const
+{
+ return selfReferencePatch_.bound();
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void
+MacroAssembler::addPtr(ImmPtr imm, Register dest)
+{
+ addPtr(ImmWord(uintptr_t(imm.value)), dest);
+}
+
+void
+MacroAssembler::inc32(RegisterOrInt32Constant* key)
+{
+ if (key->isRegister())
+ add32(Imm32(1), key->reg());
+ else
+ key->bumpConstant(1);
+}
+
+void
+MacroAssembler::dec32(RegisterOrInt32Constant* key)
+{
+ if (key->isRegister())
+ add32(Imm32(-1), key->reg());
+ else
+ key->bumpConstant(-1);
+}
+
+// ===============================================================
+// Branch functions
+
+void
+MacroAssembler::branch32(Condition cond, Register length, const RegisterOrInt32Constant& key,
+ Label* label)
+{
+ branch32Impl(cond, length, key, label);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const Address& length, const RegisterOrInt32Constant& key,
+ Label* label)
+{
+ branch32Impl(cond, length, key, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branch32Impl(Condition cond, const T& length, const RegisterOrInt32Constant& key,
+ Label* label)
+{
+ if (key.isRegister())
+ branch32(cond, length, key.reg(), label);
+ else
+ branch32(cond, length, Imm32(key.constant()), label);
+}
+
+template <class L>
+void
+MacroAssembler::branchIfFalseBool(Register reg, L label)
+{
+ // Note that C++ bool is only 1 byte, so ignore the higher-order bits.
+ branchTest32(Assembler::Zero, reg, Imm32(0xFF), label);
+}
+
+void
+MacroAssembler::branchIfTrueBool(Register reg, Label* label)
+{
+ // Note that C++ bool is only 1 byte, so ignore the higher-order bits.
+ branchTest32(Assembler::NonZero, reg, Imm32(0xFF), label);
+}
+
+void
+MacroAssembler::branchIfRope(Register str, Label* label)
+{
+ Address flags(str, JSString::offsetOfFlags());
+ static_assert(JSString::ROPE_FLAGS == 0, "Rope type flags must be 0");
+ branchTest32(Assembler::Zero, flags, Imm32(JSString::TYPE_FLAGS_MASK), label);
+}
+
+void
+MacroAssembler::branchIfRopeOrExternal(Register str, Register temp, Label* label)
+{
+ Address flags(str, JSString::offsetOfFlags());
+ move32(Imm32(JSString::TYPE_FLAGS_MASK), temp);
+ and32(flags, temp);
+
+ static_assert(JSString::ROPE_FLAGS == 0, "Rope type flags must be 0");
+ branchTest32(Assembler::Zero, temp, temp, label);
+
+ branch32(Assembler::Equal, temp, Imm32(JSString::EXTERNAL_FLAGS), label);
+}
+
+void
+MacroAssembler::branchLatin1String(Register string, Label* label)
+{
+ branchTest32(Assembler::NonZero, Address(string, JSString::offsetOfFlags()),
+ Imm32(JSString::LATIN1_CHARS_BIT), label);
+}
+
+void
+MacroAssembler::branchTwoByteString(Register string, Label* label)
+{
+ branchTest32(Assembler::Zero, Address(string, JSString::offsetOfFlags()),
+ Imm32(JSString::LATIN1_CHARS_BIT), label);
+}
+
+void
+MacroAssembler::branchIfFunctionHasNoScript(Register fun, Label* label)
+{
+ // 16-bit loads are slow and unaligned 32-bit loads may be too so
+ // perform an aligned 32-bit load and adjust the bitmask accordingly.
+ MOZ_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0);
+ MOZ_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2);
+ Address address(fun, JSFunction::offsetOfNargs());
+ int32_t bit = IMM32_16ADJ(JSFunction::INTERPRETED);
+ branchTest32(Assembler::Zero, address, Imm32(bit), label);
+}
+
+void
+MacroAssembler::branchIfInterpreted(Register fun, Label* label)
+{
+ // 16-bit loads are slow and unaligned 32-bit loads may be too so
+ // perform an aligned 32-bit load and adjust the bitmask accordingly.
+ MOZ_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0);
+ MOZ_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2);
+ Address address(fun, JSFunction::offsetOfNargs());
+ int32_t bit = IMM32_16ADJ(JSFunction::INTERPRETED);
+ branchTest32(Assembler::NonZero, address, Imm32(bit), label);
+}
+
+void
+MacroAssembler::branchFunctionKind(Condition cond, JSFunction::FunctionKind kind, Register fun,
+ Register scratch, Label* label)
+{
+ // 16-bit loads are slow and unaligned 32-bit loads may be too so
+ // perform an aligned 32-bit load and adjust the bitmask accordingly.
+ MOZ_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0);
+ MOZ_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2);
+ Address address(fun, JSFunction::offsetOfNargs());
+ int32_t mask = IMM32_16ADJ(JSFunction::FUNCTION_KIND_MASK);
+ int32_t bit = IMM32_16ADJ(kind << JSFunction::FUNCTION_KIND_SHIFT);
+ load32(address, scratch);
+ and32(Imm32(mask), scratch);
+ branch32(cond, scratch, Imm32(bit), label);
+}
+
+void
+MacroAssembler::branchTestObjClass(Condition cond, Register obj, Register scratch, const js::Class* clasp,
+ Label* label)
+{
+ loadObjGroup(obj, scratch);
+ branchPtr(cond, Address(scratch, ObjectGroup::offsetOfClasp()), ImmPtr(clasp), label);
+}
+
+void
+MacroAssembler::branchTestObjShape(Condition cond, Register obj, const Shape* shape, Label* label)
+{
+ branchPtr(cond, Address(obj, ShapedObject::offsetOfShape()), ImmGCPtr(shape), label);
+}
+
+void
+MacroAssembler::branchTestObjShape(Condition cond, Register obj, Register shape, Label* label)
+{
+ branchPtr(cond, Address(obj, ShapedObject::offsetOfShape()), shape, label);
+}
+
+void
+MacroAssembler::branchTestObjGroup(Condition cond, Register obj, ObjectGroup* group, Label* label)
+{
+ branchPtr(cond, Address(obj, JSObject::offsetOfGroup()), ImmGCPtr(group), label);
+}
+
+void
+MacroAssembler::branchTestObjGroup(Condition cond, Register obj, Register group, Label* label)
+{
+ branchPtr(cond, Address(obj, JSObject::offsetOfGroup()), group, label);
+}
+
+void
+MacroAssembler::branchTestObjectTruthy(bool truthy, Register objReg, Register scratch,
+ Label* slowCheck, Label* checked)
+{
+ // The branches to out-of-line code here implement a conservative version
+ // of the JSObject::isWrapper test performed in EmulatesUndefined. If none
+ // of the branches are taken, we can check class flags directly.
+ loadObjClass(objReg, scratch);
+ Address flags(scratch, Class::offsetOfFlags());
+
+ branchTestClassIsProxy(true, scratch, slowCheck);
+
+ Condition cond = truthy ? Assembler::Zero : Assembler::NonZero;
+ branchTest32(cond, flags, Imm32(JSCLASS_EMULATES_UNDEFINED), checked);
+}
+
+void
+MacroAssembler::branchTestClassIsProxy(bool proxy, Register clasp, Label* label)
+{
+ branchTest32(proxy ? Assembler::NonZero : Assembler::Zero,
+ Address(clasp, Class::offsetOfFlags()),
+ Imm32(JSCLASS_IS_PROXY), label);
+}
+
+void
+MacroAssembler::branchTestObjectIsProxy(bool proxy, Register object, Register scratch, Label* label)
+{
+ loadObjClass(object, scratch);
+ branchTestClassIsProxy(proxy, scratch, label);
+}
+
+void
+MacroAssembler::branchTestProxyHandlerFamily(Condition cond, Register proxy, Register scratch,
+ const void* handlerp, Label* label)
+{
+ Address handlerAddr(proxy, ProxyObject::offsetOfHandler());
+ loadPtr(handlerAddr, scratch);
+ Address familyAddr(scratch, BaseProxyHandler::offsetOfFamily());
+ branchPtr(cond, familyAddr, ImmPtr(handlerp), label);
+}
+
+template <typename Value>
+void
+MacroAssembler::branchTestMIRType(Condition cond, const Value& val, MIRType type, Label* label)
+{
+ switch (type) {
+ case MIRType::Null: return branchTestNull(cond, val, label);
+ case MIRType::Undefined: return branchTestUndefined(cond, val, label);
+ case MIRType::Boolean: return branchTestBoolean(cond, val, label);
+ case MIRType::Int32: return branchTestInt32(cond, val, label);
+ case MIRType::String: return branchTestString(cond, val, label);
+ case MIRType::Symbol: return branchTestSymbol(cond, val, label);
+ case MIRType::Object: return branchTestObject(cond, val, label);
+ case MIRType::Double: return branchTestDouble(cond, val, label);
+ case MIRType::MagicOptimizedArguments: // Fall through.
+ case MIRType::MagicIsConstructing:
+ case MIRType::MagicHole: return branchTestMagic(cond, val, label);
+ default:
+ MOZ_CRASH("Bad MIRType");
+ }
+}
+
+void
+MacroAssembler::branchTestNeedsIncrementalBarrier(Condition cond, Label* label)
+{
+ MOZ_ASSERT(cond == Zero || cond == NonZero);
+ CompileZone* zone = GetJitContext()->compartment->zone();
+ AbsoluteAddress needsBarrierAddr(zone->addressOfNeedsIncrementalBarrier());
+ branchTest32(cond, needsBarrierAddr, Imm32(0x1), label);
+}
+
+void
+MacroAssembler::branchTestMagicValue(Condition cond, const ValueOperand& val, JSWhyMagic why,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ branchTestValue(cond, val, MagicValue(why), label);
+}
+
+void
+MacroAssembler::branchDoubleNotInInt64Range(Address src, Register temp, Label* fail)
+{
+ // Tests if double is in [INT64_MIN; INT64_MAX] range
+ uint32_t EXPONENT_MASK = 0x7ff00000;
+ uint32_t EXPONENT_SHIFT = FloatingPoint<double>::kExponentShift - 32;
+ uint32_t TOO_BIG_EXPONENT = (FloatingPoint<double>::kExponentBias + 63) << EXPONENT_SHIFT;
+
+ load32(Address(src.base, src.offset + sizeof(int32_t)), temp);
+ and32(Imm32(EXPONENT_MASK), temp);
+ branch32(Assembler::GreaterThanOrEqual, temp, Imm32(TOO_BIG_EXPONENT), fail);
+}
+
+void
+MacroAssembler::branchDoubleNotInUInt64Range(Address src, Register temp, Label* fail)
+{
+ // Note: returns failure on -0.0
+ // Tests if double is in [0; UINT64_MAX] range
+ // Take the sign also in the equation. That way we can compare in one test?
+ uint32_t EXPONENT_MASK = 0xfff00000;
+ uint32_t EXPONENT_SHIFT = FloatingPoint<double>::kExponentShift - 32;
+ uint32_t TOO_BIG_EXPONENT = (FloatingPoint<double>::kExponentBias + 64) << EXPONENT_SHIFT;
+
+ load32(Address(src.base, src.offset + sizeof(int32_t)), temp);
+ and32(Imm32(EXPONENT_MASK), temp);
+ branch32(Assembler::AboveOrEqual, temp, Imm32(TOO_BIG_EXPONENT), fail);
+}
+
+void
+MacroAssembler::branchFloat32NotInInt64Range(Address src, Register temp, Label* fail)
+{
+ // Tests if float is in [INT64_MIN; INT64_MAX] range
+ uint32_t EXPONENT_MASK = 0x7f800000;
+ uint32_t EXPONENT_SHIFT = FloatingPoint<float>::kExponentShift;
+ uint32_t TOO_BIG_EXPONENT = (FloatingPoint<float>::kExponentBias + 63) << EXPONENT_SHIFT;
+
+ load32(src, temp);
+ and32(Imm32(EXPONENT_MASK), temp);
+ branch32(Assembler::GreaterThanOrEqual, temp, Imm32(TOO_BIG_EXPONENT), fail);
+}
+
+void
+MacroAssembler::branchFloat32NotInUInt64Range(Address src, Register temp, Label* fail)
+{
+ // Note: returns failure on -0.0
+ // Tests if float is in [0; UINT64_MAX] range
+ // Take the sign also in the equation. That way we can compare in one test?
+ uint32_t EXPONENT_MASK = 0xff800000;
+ uint32_t EXPONENT_SHIFT = FloatingPoint<float>::kExponentShift;
+ uint32_t TOO_BIG_EXPONENT = (FloatingPoint<float>::kExponentBias + 64) << EXPONENT_SHIFT;
+
+ load32(src, temp);
+ and32(Imm32(EXPONENT_MASK), temp);
+ branch32(Assembler::AboveOrEqual, temp, Imm32(TOO_BIG_EXPONENT), fail);
+}
+
+// ========================================================================
+// Canonicalization primitives.
+void
+MacroAssembler::canonicalizeFloat(FloatRegister reg)
+{
+ Label notNaN;
+ branchFloat(DoubleOrdered, reg, reg, &notNaN);
+ loadConstantFloat32(float(JS::GenericNaN()), reg);
+ bind(&notNaN);
+}
+
+void
+MacroAssembler::canonicalizeFloatIfDeterministic(FloatRegister reg)
+{
+#ifdef JS_MORE_DETERMINISTIC
+ // See the comment in TypedArrayObjectTemplate::getIndexValue.
+ canonicalizeFloat(reg);
+#endif // JS_MORE_DETERMINISTIC
+}
+
+void
+MacroAssembler::canonicalizeDouble(FloatRegister reg)
+{
+ Label notNaN;
+ branchDouble(DoubleOrdered, reg, reg, &notNaN);
+ loadConstantDouble(JS::GenericNaN(), reg);
+ bind(&notNaN);
+}
+
+void
+MacroAssembler::canonicalizeDoubleIfDeterministic(FloatRegister reg)
+{
+#ifdef JS_MORE_DETERMINISTIC
+ // See the comment in TypedArrayObjectTemplate::getIndexValue.
+ canonicalizeDouble(reg);
+#endif // JS_MORE_DETERMINISTIC
+}
+
+// ========================================================================
+// Memory access primitives.
+template<class T> void
+MacroAssembler::storeDouble(FloatRegister src, const T& dest)
+{
+ canonicalizeDoubleIfDeterministic(src);
+ storeUncanonicalizedDouble(src, dest);
+}
+
+template void MacroAssembler::storeDouble(FloatRegister src, const Address& dest);
+template void MacroAssembler::storeDouble(FloatRegister src, const BaseIndex& dest);
+
+template<class T> void
+MacroAssembler::storeFloat32(FloatRegister src, const T& dest)
+{
+ canonicalizeFloatIfDeterministic(src);
+ storeUncanonicalizedFloat32(src, dest);
+}
+
+template void MacroAssembler::storeFloat32(FloatRegister src, const Address& dest);
+template void MacroAssembler::storeFloat32(FloatRegister src, const BaseIndex& dest);
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+#ifndef JS_CODEGEN_ARM64
+
+template <typename T>
+void
+MacroAssembler::branchTestStackPtr(Condition cond, T t, Label* label)
+{
+ branchTestPtr(cond, getStackPointer(), t, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchStackPtr(Condition cond, T rhs, Label* label)
+{
+ branchPtr(cond, getStackPointer(), rhs, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchStackPtrRhs(Condition cond, T lhs, Label* label)
+{
+ branchPtr(cond, lhs, getStackPointer(), label);
+}
+
+template <typename T> void
+MacroAssembler::addToStackPtr(T t)
+{
+ addPtr(t, getStackPointer());
+}
+
+template <typename T> void
+MacroAssembler::addStackPtrTo(T t)
+{
+ addPtr(getStackPointer(), t);
+}
+
+void
+MacroAssembler::reserveStack(uint32_t amount)
+{
+ subFromStackPtr(Imm32(amount));
+ adjustFrame(amount);
+}
+#endif // !JS_CODEGEN_ARM64
+
+template <typename T>
+void
+MacroAssembler::storeObjectOrNull(Register src, const T& dest)
+{
+ Label notNull, done;
+ branchTestPtr(Assembler::NonZero, src, src, &notNull);
+ storeValue(NullValue(), dest);
+ jump(&done);
+ bind(&notNull);
+ storeValue(JSVAL_TYPE_OBJECT, src, dest);
+ bind(&done);
+}
+
+void
+MacroAssembler::assertStackAlignment(uint32_t alignment, int32_t offset /* = 0 */)
+{
+#ifdef DEBUG
+ Label ok, bad;
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(alignment));
+
+ // Wrap around the offset to be a non-negative number.
+ offset %= alignment;
+ if (offset < 0)
+ offset += alignment;
+
+ // Test if each bit from offset is set.
+ uint32_t off = offset;
+ while (off) {
+ uint32_t lowestBit = 1 << mozilla::CountTrailingZeroes32(off);
+ branchTestStackPtr(Assembler::Zero, Imm32(lowestBit), &bad);
+ off ^= lowestBit;
+ }
+
+ // Check that all remaining bits are zero.
+ branchTestStackPtr(Assembler::Zero, Imm32((alignment - 1) ^ offset), &ok);
+
+ bind(&bad);
+ breakpoint();
+ bind(&ok);
+#endif
+}
+
+void
+MacroAssembler::storeCallBoolResult(Register reg)
+{
+ if (reg != ReturnReg)
+ mov(ReturnReg, reg);
+ // C++ compilers like to only use the bottom byte for bools, but we
+ // need to maintain the entire register.
+ and32(Imm32(0xFF), reg);
+}
+
+void
+MacroAssembler::storeCallInt32Result(Register reg)
+{
+#if JS_BITS_PER_WORD == 32
+ storeCallPointerResult(reg);
+#else
+ // Ensure the upper 32 bits are cleared.
+ move32(ReturnReg, reg);
+#endif
+}
+
+void
+MacroAssembler::storeCallResultValue(AnyRegister dest)
+{
+ unboxValue(JSReturnOperand, dest);
+}
+
+void
+MacroAssembler::storeCallResultValue(TypedOrValueRegister dest)
+{
+ if (dest.hasValue())
+ storeCallResultValue(dest.valueReg());
+ else
+ storeCallResultValue(dest.typedReg());
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_MacroAssembler_inl_h */
diff --git a/js/src/jit/MacroAssembler.cpp b/js/src/jit/MacroAssembler.cpp
new file mode 100644
index 000000000..f633b9b7b
--- /dev/null
+++ b/js/src/jit/MacroAssembler.cpp
@@ -0,0 +1,2980 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/MacroAssembler-inl.h"
+
+#include "mozilla/CheckedInt.h"
+
+#include "jsfriendapi.h"
+#include "jsprf.h"
+
+#include "builtin/TypedObject.h"
+#include "gc/GCTrace.h"
+#include "jit/AtomicOp.h"
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Lowering.h"
+#include "jit/MIR.h"
+#include "js/Conversions.h"
+#include "js/GCAPI.h"
+#include "vm/TraceLogging.h"
+
+#include "jsobjinlines.h"
+
+#include "gc/Nursery-inl.h"
+#include "jit/shared/Lowering-shared-inl.h"
+#include "vm/Interpreter-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using JS::GenericNaN;
+using JS::ToInt32;
+
+using mozilla::CheckedUint32;
+
+template <typename Source> void
+MacroAssembler::guardTypeSet(const Source& address, const TypeSet* types, BarrierKind kind,
+ Register scratch, Label* miss)
+{
+ MOZ_ASSERT(kind == BarrierKind::TypeTagOnly || kind == BarrierKind::TypeSet);
+ MOZ_ASSERT(!types->unknown());
+
+ Label matched;
+ TypeSet::Type tests[8] = {
+ TypeSet::Int32Type(),
+ TypeSet::UndefinedType(),
+ TypeSet::BooleanType(),
+ TypeSet::StringType(),
+ TypeSet::SymbolType(),
+ TypeSet::NullType(),
+ TypeSet::MagicArgType(),
+ TypeSet::AnyObjectType()
+ };
+
+ // The double type also implies Int32.
+ // So replace the int32 test with the double one.
+ if (types->hasType(TypeSet::DoubleType())) {
+ MOZ_ASSERT(types->hasType(TypeSet::Int32Type()));
+ tests[0] = TypeSet::DoubleType();
+ }
+
+ Register tag = extractTag(address, scratch);
+
+ // Emit all typed tests.
+ BranchType lastBranch;
+ for (size_t i = 0; i < mozilla::ArrayLength(tests); i++) {
+ if (!types->hasType(tests[i]))
+ continue;
+
+ if (lastBranch.isInitialized())
+ lastBranch.emit(*this);
+ lastBranch = BranchType(Equal, tag, tests[i], &matched);
+ }
+
+ // If this is the last check, invert the last branch.
+ if (types->hasType(TypeSet::AnyObjectType()) || !types->getObjectCount()) {
+ if (!lastBranch.isInitialized()) {
+ jump(miss);
+ return;
+ }
+
+ lastBranch.invertCondition();
+ lastBranch.relink(miss);
+ lastBranch.emit(*this);
+
+ bind(&matched);
+ return;
+ }
+
+ if (lastBranch.isInitialized())
+ lastBranch.emit(*this);
+
+ // Test specific objects.
+ MOZ_ASSERT(scratch != InvalidReg);
+ branchTestObject(NotEqual, tag, miss);
+ if (kind != BarrierKind::TypeTagOnly) {
+ Register obj = extractObject(address, scratch);
+ guardObjectType(obj, types, scratch, miss);
+ } else {
+#ifdef DEBUG
+ Label fail;
+ Register obj = extractObject(address, scratch);
+ guardObjectType(obj, types, scratch, &fail);
+ jump(&matched);
+ bind(&fail);
+
+ if (obj == scratch)
+ extractObject(address, scratch);
+ guardTypeSetMightBeIncomplete(types, obj, scratch, &matched);
+
+ assumeUnreachable("Unexpected object type");
+#endif
+ }
+
+ bind(&matched);
+}
+
+template <typename TypeSet>
+void
+MacroAssembler::guardTypeSetMightBeIncomplete(TypeSet* types, Register obj, Register scratch, Label* label)
+{
+ // Type set guards might miss when an object's group changes. In this case
+ // either its old group's properties will become unknown, or it will change
+ // to a native object with an original unboxed group. Jump to label if this
+ // might have happened for the input object.
+
+ if (types->unknownObject()) {
+ jump(label);
+ return;
+ }
+
+ loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
+ load32(Address(scratch, ObjectGroup::offsetOfFlags()), scratch);
+ and32(Imm32(OBJECT_FLAG_ADDENDUM_MASK), scratch);
+ branch32(Assembler::Equal,
+ scratch, Imm32(ObjectGroup::addendumOriginalUnboxedGroupValue()), label);
+
+ for (size_t i = 0; i < types->getObjectCount(); i++) {
+ if (JSObject* singleton = types->getSingletonNoBarrier(i)) {
+ movePtr(ImmGCPtr(singleton), scratch);
+ loadPtr(Address(scratch, JSObject::offsetOfGroup()), scratch);
+ } else if (ObjectGroup* group = types->getGroupNoBarrier(i)) {
+ movePtr(ImmGCPtr(group), scratch);
+ } else {
+ continue;
+ }
+ branchTest32(Assembler::NonZero, Address(scratch, ObjectGroup::offsetOfFlags()),
+ Imm32(OBJECT_FLAG_UNKNOWN_PROPERTIES), label);
+ }
+}
+
+void
+MacroAssembler::guardObjectType(Register obj, const TypeSet* types,
+ Register scratch, Label* miss)
+{
+ MOZ_ASSERT(!types->unknown());
+ MOZ_ASSERT(!types->hasType(TypeSet::AnyObjectType()));
+ MOZ_ASSERT_IF(types->getObjectCount() > 0, scratch != InvalidReg);
+
+ // Note: this method elides read barriers on values read from type sets, as
+ // this may be called off the main thread during Ion compilation. This is
+ // safe to do as the final JitCode object will be allocated during the
+ // incremental GC (or the compilation canceled before we start sweeping),
+ // see CodeGenerator::link. Other callers should use TypeSet::readBarrier
+ // to trigger the barrier on the contents of type sets passed in here.
+ Label matched;
+
+ BranchGCPtr lastBranch;
+ MOZ_ASSERT(!lastBranch.isInitialized());
+ bool hasObjectGroups = false;
+ unsigned count = types->getObjectCount();
+ for (unsigned i = 0; i < count; i++) {
+ if (!types->getSingletonNoBarrier(i)) {
+ hasObjectGroups = hasObjectGroups || types->getGroupNoBarrier(i);
+ continue;
+ }
+
+ if (lastBranch.isInitialized()) {
+ comment("emit GC pointer checks");
+ lastBranch.emit(*this);
+ }
+
+ JSObject* object = types->getSingletonNoBarrier(i);
+ lastBranch = BranchGCPtr(Equal, obj, ImmGCPtr(object), &matched);
+ }
+
+ if (hasObjectGroups) {
+ comment("has object groups");
+ // We are possibly going to overwrite the obj register. So already
+ // emit the branch, since branch depends on previous value of obj
+ // register and there is definitely a branch following. So no need
+ // to invert the condition.
+ if (lastBranch.isInitialized())
+ lastBranch.emit(*this);
+ lastBranch = BranchGCPtr();
+
+ // Note: Some platforms give the same register for obj and scratch.
+ // Make sure when writing to scratch, the obj register isn't used anymore!
+ loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
+
+ for (unsigned i = 0; i < count; i++) {
+ if (!types->getGroupNoBarrier(i))
+ continue;
+
+ if (lastBranch.isInitialized())
+ lastBranch.emit(*this);
+
+ ObjectGroup* group = types->getGroupNoBarrier(i);
+ lastBranch = BranchGCPtr(Equal, scratch, ImmGCPtr(group), &matched);
+ }
+ }
+
+ if (!lastBranch.isInitialized()) {
+ jump(miss);
+ return;
+ }
+
+ lastBranch.invertCondition();
+ lastBranch.relink(miss);
+ lastBranch.emit(*this);
+
+ bind(&matched);
+}
+
+template void MacroAssembler::guardTypeSet(const Address& address, const TypeSet* types,
+ BarrierKind kind, Register scratch, Label* miss);
+template void MacroAssembler::guardTypeSet(const ValueOperand& value, const TypeSet* types,
+ BarrierKind kind, Register scratch, Label* miss);
+template void MacroAssembler::guardTypeSet(const TypedOrValueRegister& value, const TypeSet* types,
+ BarrierKind kind, Register scratch, Label* miss);
+
+template void MacroAssembler::guardTypeSetMightBeIncomplete(const TemporaryTypeSet* types,
+ Register obj, Register scratch,
+ Label* label);
+
+template<typename S, typename T>
+static void
+StoreToTypedFloatArray(MacroAssembler& masm, int arrayType, const S& value, const T& dest,
+ unsigned numElems)
+{
+ switch (arrayType) {
+ case Scalar::Float32:
+ masm.storeFloat32(value, dest);
+ break;
+ case Scalar::Float64:
+ masm.storeDouble(value, dest);
+ break;
+ case Scalar::Float32x4:
+ switch (numElems) {
+ case 1:
+ masm.storeFloat32(value, dest);
+ break;
+ case 2:
+ masm.storeDouble(value, dest);
+ break;
+ case 3:
+ masm.storeFloat32x3(value, dest);
+ break;
+ case 4:
+ masm.storeUnalignedSimd128Float(value, dest);
+ break;
+ default: MOZ_CRASH("unexpected number of elements in simd write");
+ }
+ break;
+ case Scalar::Int32x4:
+ switch (numElems) {
+ case 1:
+ masm.storeInt32x1(value, dest);
+ break;
+ case 2:
+ masm.storeInt32x2(value, dest);
+ break;
+ case 3:
+ masm.storeInt32x3(value, dest);
+ break;
+ case 4:
+ masm.storeUnalignedSimd128Int(value, dest);
+ break;
+ default: MOZ_CRASH("unexpected number of elements in simd write");
+ }
+ break;
+ case Scalar::Int8x16:
+ MOZ_ASSERT(numElems == 16, "unexpected partial store");
+ masm.storeUnalignedSimd128Int(value, dest);
+ break;
+ case Scalar::Int16x8:
+ MOZ_ASSERT(numElems == 8, "unexpected partial store");
+ masm.storeUnalignedSimd128Int(value, dest);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+void
+MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
+ const BaseIndex& dest, unsigned numElems)
+{
+ StoreToTypedFloatArray(*this, arrayType, value, dest, numElems);
+}
+void
+MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
+ const Address& dest, unsigned numElems)
+{
+ StoreToTypedFloatArray(*this, arrayType, value, dest, numElems);
+}
+
+template<typename T>
+void
+MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src, AnyRegister dest, Register temp,
+ Label* fail, bool canonicalizeDoubles, unsigned numElems)
+{
+ switch (arrayType) {
+ case Scalar::Int8:
+ load8SignExtend(src, dest.gpr());
+ break;
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ load8ZeroExtend(src, dest.gpr());
+ break;
+ case Scalar::Int16:
+ load16SignExtend(src, dest.gpr());
+ break;
+ case Scalar::Uint16:
+ load16ZeroExtend(src, dest.gpr());
+ break;
+ case Scalar::Int32:
+ load32(src, dest.gpr());
+ break;
+ case Scalar::Uint32:
+ if (dest.isFloat()) {
+ load32(src, temp);
+ convertUInt32ToDouble(temp, dest.fpu());
+ } else {
+ load32(src, dest.gpr());
+
+ // Bail out if the value doesn't fit into a signed int32 value. This
+ // is what allows MLoadUnboxedScalar to have a type() of
+ // MIRType::Int32 for UInt32 array loads.
+ branchTest32(Assembler::Signed, dest.gpr(), dest.gpr(), fail);
+ }
+ break;
+ case Scalar::Float32:
+ loadFloat32(src, dest.fpu());
+ canonicalizeFloat(dest.fpu());
+ break;
+ case Scalar::Float64:
+ loadDouble(src, dest.fpu());
+ if (canonicalizeDoubles)
+ canonicalizeDouble(dest.fpu());
+ break;
+ case Scalar::Int32x4:
+ switch (numElems) {
+ case 1:
+ loadInt32x1(src, dest.fpu());
+ break;
+ case 2:
+ loadInt32x2(src, dest.fpu());
+ break;
+ case 3:
+ loadInt32x3(src, dest.fpu());
+ break;
+ case 4:
+ loadUnalignedSimd128Int(src, dest.fpu());
+ break;
+ default: MOZ_CRASH("unexpected number of elements in SIMD load");
+ }
+ break;
+ case Scalar::Float32x4:
+ switch (numElems) {
+ case 1:
+ loadFloat32(src, dest.fpu());
+ break;
+ case 2:
+ loadDouble(src, dest.fpu());
+ break;
+ case 3:
+ loadFloat32x3(src, dest.fpu());
+ break;
+ case 4:
+ loadUnalignedSimd128Float(src, dest.fpu());
+ break;
+ default: MOZ_CRASH("unexpected number of elements in SIMD load");
+ }
+ break;
+ case Scalar::Int8x16:
+ MOZ_ASSERT(numElems == 16, "unexpected partial load");
+ loadUnalignedSimd128Int(src, dest.fpu());
+ break;
+ case Scalar::Int16x8:
+ MOZ_ASSERT(numElems == 8, "unexpected partial load");
+ loadUnalignedSimd128Int(src, dest.fpu());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const Address& src, AnyRegister dest,
+ Register temp, Label* fail, bool canonicalizeDoubles,
+ unsigned numElems);
+template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const BaseIndex& src, AnyRegister dest,
+ Register temp, Label* fail, bool canonicalizeDoubles,
+ unsigned numElems);
+
+template<typename T>
+void
+MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src, const ValueOperand& dest,
+ bool allowDouble, Register temp, Label* fail)
+{
+ switch (arrayType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ loadFromTypedArray(arrayType, src, AnyRegister(dest.scratchReg()), InvalidReg, nullptr);
+ tagValue(JSVAL_TYPE_INT32, dest.scratchReg(), dest);
+ break;
+ case Scalar::Uint32:
+ // Don't clobber dest when we could fail, instead use temp.
+ load32(src, temp);
+ if (allowDouble) {
+ // If the value fits in an int32, store an int32 type tag.
+ // Else, convert the value to double and box it.
+ Label done, isDouble;
+ branchTest32(Assembler::Signed, temp, temp, &isDouble);
+ {
+ tagValue(JSVAL_TYPE_INT32, temp, dest);
+ jump(&done);
+ }
+ bind(&isDouble);
+ {
+ convertUInt32ToDouble(temp, ScratchDoubleReg);
+ boxDouble(ScratchDoubleReg, dest);
+ }
+ bind(&done);
+ } else {
+ // Bailout if the value does not fit in an int32.
+ branchTest32(Assembler::Signed, temp, temp, fail);
+ tagValue(JSVAL_TYPE_INT32, temp, dest);
+ }
+ break;
+ case Scalar::Float32:
+ loadFromTypedArray(arrayType, src, AnyRegister(ScratchFloat32Reg), dest.scratchReg(),
+ nullptr);
+ convertFloat32ToDouble(ScratchFloat32Reg, ScratchDoubleReg);
+ boxDouble(ScratchDoubleReg, dest);
+ break;
+ case Scalar::Float64:
+ loadFromTypedArray(arrayType, src, AnyRegister(ScratchDoubleReg), dest.scratchReg(),
+ nullptr);
+ boxDouble(ScratchDoubleReg, dest);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const Address& src, const ValueOperand& dest,
+ bool allowDouble, Register temp, Label* fail);
+template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const BaseIndex& src, const ValueOperand& dest,
+ bool allowDouble, Register temp, Label* fail);
+
+template <typename T>
+void
+MacroAssembler::loadUnboxedProperty(T address, JSValueType type, TypedOrValueRegister output)
+{
+ switch (type) {
+ case JSVAL_TYPE_INT32: {
+ // Handle loading an int32 into a double reg.
+ if (output.type() == MIRType::Double) {
+ convertInt32ToDouble(address, output.typedReg().fpu());
+ break;
+ }
+ MOZ_FALLTHROUGH;
+ }
+
+ case JSVAL_TYPE_BOOLEAN:
+ case JSVAL_TYPE_STRING: {
+ Register outReg;
+ if (output.hasValue()) {
+ outReg = output.valueReg().scratchReg();
+ } else {
+ MOZ_ASSERT(output.type() == MIRTypeFromValueType(type));
+ outReg = output.typedReg().gpr();
+ }
+
+ switch (type) {
+ case JSVAL_TYPE_BOOLEAN:
+ load8ZeroExtend(address, outReg);
+ break;
+ case JSVAL_TYPE_INT32:
+ load32(address, outReg);
+ break;
+ case JSVAL_TYPE_STRING:
+ loadPtr(address, outReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ if (output.hasValue())
+ tagValue(type, outReg, output.valueReg());
+ break;
+ }
+
+ case JSVAL_TYPE_OBJECT:
+ if (output.hasValue()) {
+ Register scratch = output.valueReg().scratchReg();
+ loadPtr(address, scratch);
+
+ Label notNull, done;
+ branchPtr(Assembler::NotEqual, scratch, ImmWord(0), &notNull);
+
+ moveValue(NullValue(), output.valueReg());
+ jump(&done);
+
+ bind(&notNull);
+ tagValue(JSVAL_TYPE_OBJECT, scratch, output.valueReg());
+
+ bind(&done);
+ } else {
+ // Reading null can't be possible here, as otherwise the result
+ // would be a value (either because null has been read before or
+ // because there is a barrier).
+ Register reg = output.typedReg().gpr();
+ loadPtr(address, reg);
+#ifdef DEBUG
+ Label ok;
+ branchTestPtr(Assembler::NonZero, reg, reg, &ok);
+ assumeUnreachable("Null not possible");
+ bind(&ok);
+#endif
+ }
+ break;
+
+ case JSVAL_TYPE_DOUBLE:
+ // Note: doubles in unboxed objects are not accessed through other
+ // views and do not need canonicalization.
+ if (output.hasValue())
+ loadValue(address, output.valueReg());
+ else
+ loadDouble(address, output.typedReg().fpu());
+ break;
+
+ default:
+ MOZ_CRASH();
+ }
+}
+
+template void
+MacroAssembler::loadUnboxedProperty(Address address, JSValueType type,
+ TypedOrValueRegister output);
+
+template void
+MacroAssembler::loadUnboxedProperty(BaseIndex address, JSValueType type,
+ TypedOrValueRegister output);
+
+static void
+StoreUnboxedFailure(MacroAssembler& masm, Label* failure)
+{
+ // Storing a value to an unboxed property is a fallible operation and
+ // the caller must provide a failure label if a particular unboxed store
+ // might fail. Sometimes, however, a store that cannot succeed (such as
+ // storing a string to an int32 property) will be marked as infallible.
+ // This can only happen if the code involved is unreachable.
+ if (failure)
+ masm.jump(failure);
+ else
+ masm.assumeUnreachable("Incompatible write to unboxed property");
+}
+
+template <typename T>
+void
+MacroAssembler::storeUnboxedProperty(T address, JSValueType type,
+ const ConstantOrRegister& value, Label* failure)
+{
+ switch (type) {
+ case JSVAL_TYPE_BOOLEAN:
+ if (value.constant()) {
+ if (value.value().isBoolean())
+ store8(Imm32(value.value().toBoolean()), address);
+ else
+ StoreUnboxedFailure(*this, failure);
+ } else if (value.reg().hasTyped()) {
+ if (value.reg().type() == MIRType::Boolean)
+ store8(value.reg().typedReg().gpr(), address);
+ else
+ StoreUnboxedFailure(*this, failure);
+ } else {
+ if (failure)
+ branchTestBoolean(Assembler::NotEqual, value.reg().valueReg(), failure);
+ storeUnboxedPayload(value.reg().valueReg(), address, /* width = */ 1);
+ }
+ break;
+
+ case JSVAL_TYPE_INT32:
+ if (value.constant()) {
+ if (value.value().isInt32())
+ store32(Imm32(value.value().toInt32()), address);
+ else
+ StoreUnboxedFailure(*this, failure);
+ } else if (value.reg().hasTyped()) {
+ if (value.reg().type() == MIRType::Int32)
+ store32(value.reg().typedReg().gpr(), address);
+ else
+ StoreUnboxedFailure(*this, failure);
+ } else {
+ if (failure)
+ branchTestInt32(Assembler::NotEqual, value.reg().valueReg(), failure);
+ storeUnboxedPayload(value.reg().valueReg(), address, /* width = */ 4);
+ }
+ break;
+
+ case JSVAL_TYPE_DOUBLE:
+ if (value.constant()) {
+ if (value.value().isNumber()) {
+ loadConstantDouble(value.value().toNumber(), ScratchDoubleReg);
+ storeDouble(ScratchDoubleReg, address);
+ } else {
+ StoreUnboxedFailure(*this, failure);
+ }
+ } else if (value.reg().hasTyped()) {
+ if (value.reg().type() == MIRType::Int32) {
+ convertInt32ToDouble(value.reg().typedReg().gpr(), ScratchDoubleReg);
+ storeDouble(ScratchDoubleReg, address);
+ } else if (value.reg().type() == MIRType::Double) {
+ storeDouble(value.reg().typedReg().fpu(), address);
+ } else {
+ StoreUnboxedFailure(*this, failure);
+ }
+ } else {
+ ValueOperand reg = value.reg().valueReg();
+ Label notInt32, end;
+ branchTestInt32(Assembler::NotEqual, reg, &notInt32);
+ int32ValueToDouble(reg, ScratchDoubleReg);
+ storeDouble(ScratchDoubleReg, address);
+ jump(&end);
+ bind(&notInt32);
+ if (failure)
+ branchTestDouble(Assembler::NotEqual, reg, failure);
+ storeValue(reg, address);
+ bind(&end);
+ }
+ break;
+
+ case JSVAL_TYPE_OBJECT:
+ if (value.constant()) {
+ if (value.value().isObjectOrNull())
+ storePtr(ImmGCPtr(value.value().toObjectOrNull()), address);
+ else
+ StoreUnboxedFailure(*this, failure);
+ } else if (value.reg().hasTyped()) {
+ MOZ_ASSERT(value.reg().type() != MIRType::Null);
+ if (value.reg().type() == MIRType::Object)
+ storePtr(value.reg().typedReg().gpr(), address);
+ else
+ StoreUnboxedFailure(*this, failure);
+ } else {
+ if (failure) {
+ Label ok;
+ branchTestNull(Assembler::Equal, value.reg().valueReg(), &ok);
+ branchTestObject(Assembler::NotEqual, value.reg().valueReg(), failure);
+ bind(&ok);
+ }
+ storeUnboxedPayload(value.reg().valueReg(), address, /* width = */ sizeof(uintptr_t));
+ }
+ break;
+
+ case JSVAL_TYPE_STRING:
+ if (value.constant()) {
+ if (value.value().isString())
+ storePtr(ImmGCPtr(value.value().toString()), address);
+ else
+ StoreUnboxedFailure(*this, failure);
+ } else if (value.reg().hasTyped()) {
+ if (value.reg().type() == MIRType::String)
+ storePtr(value.reg().typedReg().gpr(), address);
+ else
+ StoreUnboxedFailure(*this, failure);
+ } else {
+ if (failure)
+ branchTestString(Assembler::NotEqual, value.reg().valueReg(), failure);
+ storeUnboxedPayload(value.reg().valueReg(), address, /* width = */ sizeof(uintptr_t));
+ }
+ break;
+
+ default:
+ MOZ_CRASH();
+ }
+}
+
+template void
+MacroAssembler::storeUnboxedProperty(Address address, JSValueType type,
+ const ConstantOrRegister& value, Label* failure);
+
+template void
+MacroAssembler::storeUnboxedProperty(BaseIndex address, JSValueType type,
+ const ConstantOrRegister& value, Label* failure);
+
+void
+MacroAssembler::checkUnboxedArrayCapacity(Register obj, const RegisterOrInt32Constant& index,
+ Register temp, Label* failure)
+{
+ Address initLengthAddr(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength());
+ Address lengthAddr(obj, UnboxedArrayObject::offsetOfLength());
+
+ Label capacityIsIndex, done;
+ load32(initLengthAddr, temp);
+ branchTest32(Assembler::NonZero, temp, Imm32(UnboxedArrayObject::CapacityMask), &capacityIsIndex);
+ branch32(Assembler::BelowOrEqual, lengthAddr, index, failure);
+ jump(&done);
+ bind(&capacityIsIndex);
+
+ // Do a partial shift so that we can get an absolute offset from the base
+ // of CapacityArray to use.
+ JS_STATIC_ASSERT(sizeof(UnboxedArrayObject::CapacityArray[0]) == 4);
+ rshiftPtr(Imm32(UnboxedArrayObject::CapacityShift - 2), temp);
+ and32(Imm32(~0x3), temp);
+
+ addPtr(ImmPtr(&UnboxedArrayObject::CapacityArray), temp);
+ branch32(Assembler::BelowOrEqual, Address(temp, 0), index, failure);
+ bind(&done);
+}
+
+// Inlined version of gc::CheckAllocatorState that checks the bare essentials
+// and bails for anything that cannot be handled with our jit allocators.
+void
+MacroAssembler::checkAllocatorState(Label* fail)
+{
+ // Don't execute the inline path if we are tracing allocations,
+ // or when the memory profiler is enabled.
+ if (js::gc::TraceEnabled() || MemProfiler::enabled())
+ jump(fail);
+
+#ifdef JS_GC_ZEAL
+ // Don't execute the inline path if gc zeal or tracing are active.
+ branch32(Assembler::NotEqual,
+ AbsoluteAddress(GetJitContext()->runtime->addressOfGCZealModeBits()), Imm32(0),
+ fail);
+#endif
+
+ // Don't execute the inline path if the compartment has an object metadata callback,
+ // as the metadata to use for the object may vary between executions of the op.
+ if (GetJitContext()->compartment->hasAllocationMetadataBuilder())
+ jump(fail);
+}
+
+// Inline version of ShouldNurseryAllocate.
+bool
+MacroAssembler::shouldNurseryAllocate(gc::AllocKind allocKind, gc::InitialHeap initialHeap)
+{
+ // Note that Ion elides barriers on writes to objects known to be in the
+ // nursery, so any allocation that can be made into the nursery must be made
+ // into the nursery, even if the nursery is disabled. At runtime these will
+ // take the out-of-line path, which is required to insert a barrier for the
+ // initializing writes.
+ return IsNurseryAllocable(allocKind) && initialHeap != gc::TenuredHeap;
+}
+
+// Inline version of Nursery::allocateObject. If the object has dynamic slots,
+// this fills in the slots_ pointer.
+void
+MacroAssembler::nurseryAllocate(Register result, Register temp, gc::AllocKind allocKind,
+ size_t nDynamicSlots, gc::InitialHeap initialHeap, Label* fail)
+{
+ MOZ_ASSERT(IsNurseryAllocable(allocKind));
+ MOZ_ASSERT(initialHeap != gc::TenuredHeap);
+
+ // We still need to allocate in the nursery, per the comment in
+ // shouldNurseryAllocate; however, we need to insert into the
+ // mallocedBuffers set, so bail to do the nursery allocation in the
+ // interpreter.
+ if (nDynamicSlots >= Nursery::MaxNurseryBufferSize / sizeof(Value)) {
+ jump(fail);
+ return;
+ }
+
+ // No explicit check for nursery.isEnabled() is needed, as the comparison
+ // with the nursery's end will always fail in such cases.
+ const Nursery& nursery = GetJitContext()->runtime->gcNursery();
+ int thingSize = int(gc::Arena::thingSize(allocKind));
+ int totalSize = thingSize + nDynamicSlots * sizeof(HeapSlot);
+ MOZ_ASSERT(totalSize % gc::CellSize == 0);
+ loadPtr(AbsoluteAddress(nursery.addressOfPosition()), result);
+ computeEffectiveAddress(Address(result, totalSize), temp);
+ branchPtr(Assembler::Below, AbsoluteAddress(nursery.addressOfCurrentEnd()), temp, fail);
+ storePtr(temp, AbsoluteAddress(nursery.addressOfPosition()));
+
+ if (nDynamicSlots) {
+ computeEffectiveAddress(Address(result, thingSize), temp);
+ storePtr(temp, Address(result, NativeObject::offsetOfSlots()));
+ }
+}
+
+// Inlined version of FreeSpan::allocate. This does not fill in slots_.
+void
+MacroAssembler::freeListAllocate(Register result, Register temp, gc::AllocKind allocKind, Label* fail)
+{
+ CompileZone* zone = GetJitContext()->compartment->zone();
+ int thingSize = int(gc::Arena::thingSize(allocKind));
+
+ Label fallback;
+ Label success;
+
+ // Load the first and last offsets of |zone|'s free list for |allocKind|.
+ // If there is no room remaining in the span, fall back to get the next one.
+ loadPtr(AbsoluteAddress(zone->addressOfFreeList(allocKind)), temp);
+ load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfFirst()), result);
+ load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfLast()), temp);
+ branch32(Assembler::AboveOrEqual, result, temp, &fallback);
+
+ // Bump the offset for the next allocation.
+ add32(Imm32(thingSize), result);
+ loadPtr(AbsoluteAddress(zone->addressOfFreeList(allocKind)), temp);
+ store16(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
+ sub32(Imm32(thingSize), result);
+ addPtr(temp, result); // Turn the offset into a pointer.
+ jump(&success);
+
+ bind(&fallback);
+ // If there are no free spans left, we bail to finish the allocation. The
+ // interpreter will call the GC allocator to set up a new arena to allocate
+ // from, after which we can resume allocating in the jit.
+ branchTest32(Assembler::Zero, result, result, fail);
+ loadPtr(AbsoluteAddress(zone->addressOfFreeList(allocKind)), temp);
+ addPtr(temp, result); // Turn the offset into a pointer.
+ Push(result);
+ // Update the free list to point to the next span (which may be empty).
+ load32(Address(result, 0), result);
+ store32(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
+ Pop(result);
+
+ bind(&success);
+}
+
+void
+MacroAssembler::callMallocStub(size_t nbytes, Register result, Label* fail)
+{
+ // This register must match the one in JitRuntime::generateMallocStub.
+ const Register regNBytes = CallTempReg0;
+
+ MOZ_ASSERT(nbytes > 0);
+ MOZ_ASSERT(nbytes <= INT32_MAX);
+
+ if (regNBytes != result)
+ push(regNBytes);
+ move32(Imm32(nbytes), regNBytes);
+ call(GetJitContext()->runtime->jitRuntime()->mallocStub());
+ if (regNBytes != result) {
+ movePtr(regNBytes, result);
+ pop(regNBytes);
+ }
+ branchTest32(Assembler::Zero, result, result, fail);
+}
+
+void
+MacroAssembler::callFreeStub(Register slots)
+{
+ // This register must match the one in JitRuntime::generateFreeStub.
+ const Register regSlots = CallTempReg0;
+
+ push(regSlots);
+ movePtr(slots, regSlots);
+ call(GetJitContext()->runtime->jitRuntime()->freeStub());
+ pop(regSlots);
+}
+
+// Inlined equivalent of gc::AllocateObject, without failure case handling.
+void
+MacroAssembler::allocateObject(Register result, Register temp, gc::AllocKind allocKind,
+ uint32_t nDynamicSlots, gc::InitialHeap initialHeap, Label* fail)
+{
+ MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
+
+ checkAllocatorState(fail);
+
+ if (shouldNurseryAllocate(allocKind, initialHeap))
+ return nurseryAllocate(result, temp, allocKind, nDynamicSlots, initialHeap, fail);
+
+ if (!nDynamicSlots)
+ return freeListAllocate(result, temp, allocKind, fail);
+
+ callMallocStub(nDynamicSlots * sizeof(GCPtrValue), temp, fail);
+
+ Label failAlloc;
+ Label success;
+
+ push(temp);
+ freeListAllocate(result, temp, allocKind, &failAlloc);
+
+ pop(temp);
+ storePtr(temp, Address(result, NativeObject::offsetOfSlots()));
+
+ jump(&success);
+
+ bind(&failAlloc);
+ pop(temp);
+ callFreeStub(temp);
+ jump(fail);
+
+ bind(&success);
+}
+
+void
+MacroAssembler::createGCObject(Register obj, Register temp, JSObject* templateObj,
+ gc::InitialHeap initialHeap, Label* fail, bool initContents,
+ bool convertDoubleElements)
+{
+ gc::AllocKind allocKind = templateObj->asTenured().getAllocKind();
+ MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
+
+ uint32_t nDynamicSlots = 0;
+ if (templateObj->isNative()) {
+ nDynamicSlots = templateObj->as<NativeObject>().numDynamicSlots();
+
+ // Arrays with copy on write elements do not need fixed space for an
+ // elements header. The template object, which owns the original
+ // elements, might have another allocation kind.
+ if (templateObj->as<NativeObject>().denseElementsAreCopyOnWrite())
+ allocKind = gc::AllocKind::OBJECT0_BACKGROUND;
+ }
+
+ allocateObject(obj, temp, allocKind, nDynamicSlots, initialHeap, fail);
+ initGCThing(obj, temp, templateObj, initContents, convertDoubleElements);
+}
+
+
+// Inlined equivalent of gc::AllocateNonObject, without failure case handling.
+// Non-object allocation does not need to worry about slots, so can take a
+// simpler path.
+void
+MacroAssembler::allocateNonObject(Register result, Register temp, gc::AllocKind allocKind, Label* fail)
+{
+ checkAllocatorState(fail);
+ freeListAllocate(result, temp, allocKind, fail);
+}
+
+void
+MacroAssembler::newGCString(Register result, Register temp, Label* fail)
+{
+ allocateNonObject(result, temp, js::gc::AllocKind::STRING, fail);
+}
+
+void
+MacroAssembler::newGCFatInlineString(Register result, Register temp, Label* fail)
+{
+ allocateNonObject(result, temp, js::gc::AllocKind::FAT_INLINE_STRING, fail);
+}
+
+void
+MacroAssembler::copySlotsFromTemplate(Register obj, const NativeObject* templateObj,
+ uint32_t start, uint32_t end)
+{
+ uint32_t nfixed = Min(templateObj->numFixedSlotsForCompilation(), end);
+ for (unsigned i = start; i < nfixed; i++)
+ storeValue(templateObj->getFixedSlot(i), Address(obj, NativeObject::getFixedSlotOffset(i)));
+}
+
+void
+MacroAssembler::fillSlotsWithConstantValue(Address base, Register temp,
+ uint32_t start, uint32_t end, const Value& v)
+{
+ MOZ_ASSERT(v.isUndefined() || IsUninitializedLexical(v));
+
+ if (start >= end)
+ return;
+
+#ifdef JS_NUNBOX32
+ // We only have a single spare register, so do the initialization as two
+ // strided writes of the tag and body.
+ Address addr = base;
+ move32(Imm32(v.toNunboxPayload()), temp);
+ for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtrValue))
+ store32(temp, ToPayload(addr));
+
+ addr = base;
+ move32(Imm32(v.toNunboxTag()), temp);
+ for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtrValue))
+ store32(temp, ToType(addr));
+#else
+ moveValue(v, temp);
+ for (uint32_t i = start; i < end; ++i, base.offset += sizeof(GCPtrValue))
+ storePtr(temp, base);
+#endif
+}
+
+void
+MacroAssembler::fillSlotsWithUndefined(Address base, Register temp, uint32_t start, uint32_t end)
+{
+ fillSlotsWithConstantValue(base, temp, start, end, UndefinedValue());
+}
+
+void
+MacroAssembler::fillSlotsWithUninitialized(Address base, Register temp, uint32_t start, uint32_t end)
+{
+ fillSlotsWithConstantValue(base, temp, start, end, MagicValue(JS_UNINITIALIZED_LEXICAL));
+}
+
+static void
+FindStartOfUninitializedAndUndefinedSlots(NativeObject* templateObj, uint32_t nslots,
+ uint32_t* startOfUninitialized,
+ uint32_t* startOfUndefined)
+{
+ MOZ_ASSERT(nslots == templateObj->lastProperty()->slotSpan(templateObj->getClass()));
+ MOZ_ASSERT(nslots > 0);
+
+ uint32_t first = nslots;
+ for (; first != 0; --first) {
+ if (templateObj->getSlot(first - 1) != UndefinedValue())
+ break;
+ }
+ *startOfUndefined = first;
+
+ if (first != 0 && IsUninitializedLexical(templateObj->getSlot(first - 1))) {
+ for (; first != 0; --first) {
+ if (!IsUninitializedLexical(templateObj->getSlot(first - 1)))
+ break;
+ }
+ *startOfUninitialized = first;
+ } else {
+ *startOfUninitialized = *startOfUndefined;
+ }
+}
+
+static void
+AllocateObjectBufferWithInit(JSContext* cx, TypedArrayObject* obj, int32_t count)
+{
+ JS::AutoCheckCannotGC nogc(cx);
+
+ obj->initPrivate(nullptr);
+
+ // Negative numbers or zero will bail out to the slow path, which in turn will raise
+ // an invalid argument exception or create a correct object with zero elements.
+ if (count <= 0 || uint32_t(count) >= INT32_MAX / obj->bytesPerElement()) {
+ obj->setFixedSlot(TypedArrayObject::LENGTH_SLOT, Int32Value(0));
+ return;
+ }
+
+ obj->setFixedSlot(TypedArrayObject::LENGTH_SLOT, Int32Value(count));
+ size_t nbytes;
+
+ switch (obj->type()) {
+#define CREATE_TYPED_ARRAY(T, N) \
+ case Scalar::N: \
+ MOZ_ALWAYS_TRUE(js::CalculateAllocSize<T>(count, &nbytes)); \
+ break;
+JS_FOR_EACH_TYPED_ARRAY(CREATE_TYPED_ARRAY)
+#undef CREATE_TYPED_ARRAY
+ default:
+ MOZ_CRASH("Unsupported TypedArray type");
+ }
+
+ MOZ_ASSERT((CheckedUint32(nbytes) + sizeof(Value)).isValid());
+
+ nbytes = JS_ROUNDUP(nbytes, sizeof(Value));
+ Nursery& nursery = cx->runtime()->gc.nursery;
+ void* buf = nursery.allocateBuffer(obj, nbytes);
+ if (buf) {
+ obj->initPrivate(buf);
+ memset(buf, 0, nbytes);
+ }
+}
+
+void
+MacroAssembler::initTypedArraySlots(Register obj, Register temp, Register lengthReg,
+ LiveRegisterSet liveRegs, Label* fail,
+ TypedArrayObject* templateObj, TypedArrayLength lengthKind)
+{
+ MOZ_ASSERT(templateObj->hasPrivate());
+ MOZ_ASSERT(!templateObj->hasBuffer());
+
+ size_t dataSlotOffset = TypedArrayObject::dataOffset();
+ size_t dataOffset = TypedArrayObject::dataOffset() + sizeof(HeapSlot);
+
+ static_assert(TypedArrayObject::FIXED_DATA_START == TypedArrayObject::DATA_SLOT + 1,
+ "fixed inline element data assumed to begin after the data slot");
+
+ // Initialise data elements to zero.
+ int32_t length = templateObj->length();
+ size_t nbytes = length * templateObj->bytesPerElement();
+
+ if (lengthKind == TypedArrayLength::Fixed && dataOffset + nbytes <= JSObject::MAX_BYTE_SIZE) {
+ MOZ_ASSERT(dataOffset + nbytes <= templateObj->tenuredSizeOfThis());
+
+ // Store data elements inside the remaining JSObject slots.
+ computeEffectiveAddress(Address(obj, dataOffset), temp);
+ storePtr(temp, Address(obj, dataSlotOffset));
+
+ // Write enough zero pointers into fixed data to zero every
+ // element. (This zeroes past the end of a byte count that's
+ // not a multiple of pointer size. That's okay, because fixed
+ // data is a count of 8-byte HeapSlots (i.e. <= pointer size),
+ // and we won't inline unless the desired memory fits in that
+ // space.)
+ static_assert(sizeof(HeapSlot) == 8, "Assumed 8 bytes alignment");
+
+ size_t numZeroPointers = ((nbytes + 7) & ~0x7) / sizeof(char *);
+ for (size_t i = 0; i < numZeroPointers; i++)
+ storePtr(ImmWord(0), Address(obj, dataOffset + i * sizeof(char *)));
+#ifdef DEBUG
+ if (nbytes == 0)
+ store8(Imm32(TypedArrayObject::ZeroLengthArrayData), Address(obj, dataSlotOffset));
+#endif
+ } else {
+ if (lengthKind == TypedArrayLength::Fixed)
+ move32(Imm32(length), lengthReg);
+
+ // Allocate a buffer on the heap to store the data elements.
+ liveRegs.addUnchecked(temp);
+ liveRegs.addUnchecked(obj);
+ liveRegs.addUnchecked(lengthReg);
+ PushRegsInMask(liveRegs);
+ setupUnalignedABICall(temp);
+ loadJSContext(temp);
+ passABIArg(temp);
+ passABIArg(obj);
+ passABIArg(lengthReg);
+ callWithABI(JS_FUNC_TO_DATA_PTR(void*, AllocateObjectBufferWithInit));
+ PopRegsInMask(liveRegs);
+
+ // Fail when data elements is set to NULL.
+ branchPtr(Assembler::Equal, Address(obj, dataSlotOffset), ImmWord(0), fail);
+ }
+}
+
+void
+MacroAssembler::initGCSlots(Register obj, Register temp, NativeObject* templateObj,
+ bool initContents)
+{
+ // Slots of non-array objects are required to be initialized.
+ // Use the values currently in the template object.
+ uint32_t nslots = templateObj->lastProperty()->slotSpan(templateObj->getClass());
+ if (nslots == 0)
+ return;
+
+ uint32_t nfixed = templateObj->numUsedFixedSlots();
+ uint32_t ndynamic = templateObj->numDynamicSlots();
+
+ // Attempt to group slot writes such that we minimize the amount of
+ // duplicated data we need to embed in code and load into registers. In
+ // general, most template object slots will be undefined except for any
+ // reserved slots. Since reserved slots come first, we split the object
+ // logically into independent non-UndefinedValue writes to the head and
+ // duplicated writes of UndefinedValue to the tail. For the majority of
+ // objects, the "tail" will be the entire slot range.
+ //
+ // The template object may be a CallObject, in which case we need to
+ // account for uninitialized lexical slots as well as undefined
+ // slots. Unitialized lexical slots appears in CallObjects if the function
+ // has parameter expressions, in which case closed over parameters have
+ // TDZ. Uninitialized slots come before undefined slots in CallObjects.
+ uint32_t startOfUninitialized = nslots;
+ uint32_t startOfUndefined = nslots;
+ FindStartOfUninitializedAndUndefinedSlots(templateObj, nslots,
+ &startOfUninitialized, &startOfUndefined);
+ MOZ_ASSERT(startOfUninitialized <= nfixed); // Reserved slots must be fixed.
+ MOZ_ASSERT(startOfUndefined >= startOfUninitialized);
+ MOZ_ASSERT_IF(!templateObj->is<CallObject>(), startOfUninitialized == startOfUndefined);
+
+ // Copy over any preserved reserved slots.
+ copySlotsFromTemplate(obj, templateObj, 0, startOfUninitialized);
+
+ // Fill the rest of the fixed slots with undefined and uninitialized.
+ if (initContents) {
+ size_t offset = NativeObject::getFixedSlotOffset(startOfUninitialized);
+ fillSlotsWithUninitialized(Address(obj, offset), temp,
+ startOfUninitialized, Min(startOfUndefined, nfixed));
+
+ offset = NativeObject::getFixedSlotOffset(startOfUndefined);
+ fillSlotsWithUndefined(Address(obj, offset), temp,
+ startOfUndefined, nfixed);
+ }
+
+ if (ndynamic) {
+ // We are short one register to do this elegantly. Borrow the obj
+ // register briefly for our slots base address.
+ push(obj);
+ loadPtr(Address(obj, NativeObject::offsetOfSlots()), obj);
+
+ // Fill uninitialized slots if necessary. Otherwise initialize all
+ // slots to undefined.
+ if (startOfUndefined > nfixed) {
+ MOZ_ASSERT(startOfUninitialized != startOfUndefined);
+ fillSlotsWithUninitialized(Address(obj, 0), temp, 0, startOfUndefined - nfixed);
+ size_t offset = (startOfUndefined - nfixed) * sizeof(Value);
+ fillSlotsWithUndefined(Address(obj, offset), temp, startOfUndefined - nfixed, ndynamic);
+ } else {
+ fillSlotsWithUndefined(Address(obj, 0), temp, 0, ndynamic);
+ }
+
+ pop(obj);
+ }
+}
+
+void
+MacroAssembler::initGCThing(Register obj, Register temp, JSObject* templateObj,
+ bool initContents, bool convertDoubleElements)
+{
+ // Fast initialization of an empty object returned by allocateObject().
+
+ storePtr(ImmGCPtr(templateObj->group()), Address(obj, JSObject::offsetOfGroup()));
+
+ if (Shape* shape = templateObj->maybeShape())
+ storePtr(ImmGCPtr(shape), Address(obj, ShapedObject::offsetOfShape()));
+
+ MOZ_ASSERT_IF(convertDoubleElements, templateObj->is<ArrayObject>());
+
+ if (templateObj->isNative()) {
+ NativeObject* ntemplate = &templateObj->as<NativeObject>();
+ MOZ_ASSERT_IF(!ntemplate->denseElementsAreCopyOnWrite(), !ntemplate->hasDynamicElements());
+
+ // If the object has dynamic slots, the slots member has already been
+ // filled in.
+ if (!ntemplate->hasDynamicSlots())
+ storePtr(ImmPtr(nullptr), Address(obj, NativeObject::offsetOfSlots()));
+
+ if (ntemplate->denseElementsAreCopyOnWrite()) {
+ storePtr(ImmPtr((const Value*) ntemplate->getDenseElements()),
+ Address(obj, NativeObject::offsetOfElements()));
+ } else if (ntemplate->is<ArrayObject>()) {
+ int elementsOffset = NativeObject::offsetOfFixedElements();
+
+ computeEffectiveAddress(Address(obj, elementsOffset), temp);
+ storePtr(temp, Address(obj, NativeObject::offsetOfElements()));
+
+ // Fill in the elements header.
+ store32(Imm32(ntemplate->getDenseCapacity()),
+ Address(obj, elementsOffset + ObjectElements::offsetOfCapacity()));
+ store32(Imm32(ntemplate->getDenseInitializedLength()),
+ Address(obj, elementsOffset + ObjectElements::offsetOfInitializedLength()));
+ store32(Imm32(ntemplate->as<ArrayObject>().length()),
+ Address(obj, elementsOffset + ObjectElements::offsetOfLength()));
+ store32(Imm32(convertDoubleElements
+ ? ObjectElements::CONVERT_DOUBLE_ELEMENTS
+ : 0),
+ Address(obj, elementsOffset + ObjectElements::offsetOfFlags()));
+ MOZ_ASSERT(!ntemplate->hasPrivate());
+ } else if (ntemplate->is<ArgumentsObject>()) {
+ // The caller will initialize the reserved slots.
+ MOZ_ASSERT(!initContents);
+ MOZ_ASSERT(!ntemplate->hasPrivate());
+ storePtr(ImmPtr(emptyObjectElements), Address(obj, NativeObject::offsetOfElements()));
+ } else {
+ // If the target type could be a TypedArray that maps shared memory
+ // then this would need to store emptyObjectElementsShared in that case.
+ MOZ_ASSERT(!ntemplate->isSharedMemory());
+
+ storePtr(ImmPtr(emptyObjectElements), Address(obj, NativeObject::offsetOfElements()));
+
+ initGCSlots(obj, temp, ntemplate, initContents);
+
+ if (ntemplate->hasPrivate() && !ntemplate->is<TypedArrayObject>()) {
+ uint32_t nfixed = ntemplate->numFixedSlotsForCompilation();
+ storePtr(ImmPtr(ntemplate->getPrivate()),
+ Address(obj, NativeObject::getPrivateDataOffset(nfixed)));
+ }
+ }
+ } else if (templateObj->is<InlineTypedObject>()) {
+ JS::AutoAssertNoGC nogc; // off-thread, so cannot GC
+ size_t nbytes = templateObj->as<InlineTypedObject>().size();
+ const uint8_t* memory = templateObj->as<InlineTypedObject>().inlineTypedMem(nogc);
+
+ // Memcpy the contents of the template object to the new object.
+ size_t offset = 0;
+ while (nbytes) {
+ uintptr_t value = *(uintptr_t*)(memory + offset);
+ storePtr(ImmWord(value),
+ Address(obj, InlineTypedObject::offsetOfDataStart() + offset));
+ nbytes = (nbytes < sizeof(uintptr_t)) ? 0 : nbytes - sizeof(uintptr_t);
+ offset += sizeof(uintptr_t);
+ }
+ } else if (templateObj->is<UnboxedPlainObject>()) {
+ storePtr(ImmWord(0), Address(obj, UnboxedPlainObject::offsetOfExpando()));
+ if (initContents)
+ initUnboxedObjectContents(obj, &templateObj->as<UnboxedPlainObject>());
+ } else if (templateObj->is<UnboxedArrayObject>()) {
+ MOZ_ASSERT(templateObj->as<UnboxedArrayObject>().hasInlineElements());
+ int elementsOffset = UnboxedArrayObject::offsetOfInlineElements();
+ computeEffectiveAddress(Address(obj, elementsOffset), temp);
+ storePtr(temp, Address(obj, UnboxedArrayObject::offsetOfElements()));
+ store32(Imm32(templateObj->as<UnboxedArrayObject>().length()),
+ Address(obj, UnboxedArrayObject::offsetOfLength()));
+ uint32_t capacityIndex = templateObj->as<UnboxedArrayObject>().capacityIndex();
+ store32(Imm32(capacityIndex << UnboxedArrayObject::CapacityShift),
+ Address(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength()));
+ } else {
+ MOZ_CRASH("Unknown object");
+ }
+
+#ifdef JS_GC_TRACE
+ RegisterSet regs = RegisterSet::Volatile();
+ PushRegsInMask(regs);
+ regs.takeUnchecked(obj);
+ Register temp = regs.takeAnyGeneral();
+
+ setupUnalignedABICall(temp);
+ passABIArg(obj);
+ movePtr(ImmGCPtr(templateObj->type()), temp);
+ passABIArg(temp);
+ callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::gc::TraceCreateObject));
+
+ PopRegsInMask(RegisterSet::Volatile());
+#endif
+}
+
+void
+MacroAssembler::initUnboxedObjectContents(Register object, UnboxedPlainObject* templateObject)
+{
+ const UnboxedLayout& layout = templateObject->layoutDontCheckGeneration();
+
+ // Initialize reference fields of the object, per UnboxedPlainObject::create.
+ if (const int32_t* list = layout.traceList()) {
+ while (*list != -1) {
+ storePtr(ImmGCPtr(GetJitContext()->runtime->names().empty),
+ Address(object, UnboxedPlainObject::offsetOfData() + *list));
+ list++;
+ }
+ list++;
+ while (*list != -1) {
+ storePtr(ImmWord(0),
+ Address(object, UnboxedPlainObject::offsetOfData() + *list));
+ list++;
+ }
+ // Unboxed objects don't have Values to initialize.
+ MOZ_ASSERT(*(list + 1) == -1);
+ }
+}
+
+void
+MacroAssembler::compareStrings(JSOp op, Register left, Register right, Register result,
+ Label* fail)
+{
+ MOZ_ASSERT(IsEqualityOp(op));
+
+ Label done;
+ Label notPointerEqual;
+ // Fast path for identical strings.
+ branchPtr(Assembler::NotEqual, left, right, &notPointerEqual);
+ move32(Imm32(op == JSOP_EQ || op == JSOP_STRICTEQ), result);
+ jump(&done);
+
+ bind(&notPointerEqual);
+
+ Label notAtom;
+ // Optimize the equality operation to a pointer compare for two atoms.
+ Imm32 atomBit(JSString::ATOM_BIT);
+ branchTest32(Assembler::Zero, Address(left, JSString::offsetOfFlags()), atomBit, &notAtom);
+ branchTest32(Assembler::Zero, Address(right, JSString::offsetOfFlags()), atomBit, &notAtom);
+
+ cmpPtrSet(JSOpToCondition(MCompare::Compare_String, op), left, right, result);
+ jump(&done);
+
+ bind(&notAtom);
+ // Strings of different length can never be equal.
+ loadStringLength(left, result);
+ branch32(Assembler::Equal, Address(right, JSString::offsetOfLength()), result, fail);
+ move32(Imm32(op == JSOP_NE || op == JSOP_STRICTNE), result);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::loadStringChars(Register str, Register dest)
+{
+ Label isInline, done;
+ branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
+ Imm32(JSString::INLINE_CHARS_BIT), &isInline);
+
+ loadPtr(Address(str, JSString::offsetOfNonInlineChars()), dest);
+ jump(&done);
+
+ bind(&isInline);
+ computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()), dest);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::loadStringChar(Register str, Register index, Register output)
+{
+ MOZ_ASSERT(str != output);
+ MOZ_ASSERT(index != output);
+
+ loadStringChars(str, output);
+
+ Label isLatin1, done;
+ branchLatin1String(str, &isLatin1);
+ load16ZeroExtend(BaseIndex(output, index, TimesTwo), output);
+ jump(&done);
+
+ bind(&isLatin1);
+ load8ZeroExtend(BaseIndex(output, index, TimesOne), output);
+
+ bind(&done);
+}
+
+static void
+BailoutReportOverRecursed(JSContext* cx)
+{
+ ReportOverRecursed(cx);
+}
+
+void
+MacroAssembler::generateBailoutTail(Register scratch, Register bailoutInfo)
+{
+ enterExitFrame();
+
+ Label baseline;
+
+ // The return value from Bailout is tagged as:
+ // - 0x0: done (enter baseline)
+ // - 0x1: error (handle exception)
+ // - 0x2: overrecursed
+ JS_STATIC_ASSERT(BAILOUT_RETURN_OK == 0);
+ JS_STATIC_ASSERT(BAILOUT_RETURN_FATAL_ERROR == 1);
+ JS_STATIC_ASSERT(BAILOUT_RETURN_OVERRECURSED == 2);
+
+ branch32(Equal, ReturnReg, Imm32(BAILOUT_RETURN_OK), &baseline);
+ branch32(Equal, ReturnReg, Imm32(BAILOUT_RETURN_FATAL_ERROR), exceptionLabel());
+
+ // Fall-through: overrecursed.
+ {
+ loadJSContext(ReturnReg);
+ setupUnalignedABICall(scratch);
+ passABIArg(ReturnReg);
+ callWithABI(JS_FUNC_TO_DATA_PTR(void*, BailoutReportOverRecursed));
+ jump(exceptionLabel());
+ }
+
+ bind(&baseline);
+ {
+ // Prepare a register set for use in this case.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(getStackPointer()));
+ regs.take(bailoutInfo);
+
+ // Reset SP to the point where clobbering starts.
+ loadStackPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, incomingStack)));
+
+ Register copyCur = regs.takeAny();
+ Register copyEnd = regs.takeAny();
+ Register temp = regs.takeAny();
+
+ // Copy data onto stack.
+ loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackTop)), copyCur);
+ loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackBottom)), copyEnd);
+ {
+ Label copyLoop;
+ Label endOfCopy;
+ bind(&copyLoop);
+ branchPtr(Assembler::BelowOrEqual, copyCur, copyEnd, &endOfCopy);
+ subPtr(Imm32(4), copyCur);
+ subFromStackPtr(Imm32(4));
+ load32(Address(copyCur, 0), temp);
+ store32(temp, Address(getStackPointer(), 0));
+ jump(&copyLoop);
+ bind(&endOfCopy);
+ }
+
+ // Enter exit frame for the FinishBailoutToBaseline call.
+ loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)), temp);
+ load32(Address(temp, BaselineFrame::reverseOffsetOfFrameSize()), temp);
+ makeFrameDescriptor(temp, JitFrame_BaselineJS, ExitFrameLayout::Size());
+ push(temp);
+ push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
+ // No GC things to mark on the stack, push a bare token.
+ enterFakeExitFrame(ExitFrameLayoutBareToken);
+
+ // If monitorStub is non-null, handle resumeAddr appropriately.
+ Label noMonitor;
+ Label done;
+ branchPtr(Assembler::Equal,
+ Address(bailoutInfo, offsetof(BaselineBailoutInfo, monitorStub)),
+ ImmPtr(nullptr),
+ &noMonitor);
+
+ //
+ // Resuming into a monitoring stub chain.
+ //
+ {
+ // Save needed values onto stack temporarily.
+ pushValue(Address(bailoutInfo, offsetof(BaselineBailoutInfo, valueR0)));
+ push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)));
+ push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
+ push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, monitorStub)));
+
+ // Call a stub to free allocated memory and create arguments objects.
+ setupUnalignedABICall(temp);
+ passABIArg(bailoutInfo);
+ callWithABI(JS_FUNC_TO_DATA_PTR(void*, FinishBailoutToBaseline));
+ branchTest32(Zero, ReturnReg, ReturnReg, exceptionLabel());
+
+ // Restore values where they need to be and resume execution.
+ AllocatableGeneralRegisterSet enterMonRegs(GeneralRegisterSet::All());
+ enterMonRegs.take(R0);
+ enterMonRegs.take(ICStubReg);
+ enterMonRegs.take(BaselineFrameReg);
+ enterMonRegs.takeUnchecked(ICTailCallReg);
+
+ pop(ICStubReg);
+ pop(ICTailCallReg);
+ pop(BaselineFrameReg);
+ popValue(R0);
+
+ // Discard exit frame.
+ addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
+
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ push(ICTailCallReg);
+#endif
+ jump(Address(ICStubReg, ICStub::offsetOfStubCode()));
+ }
+
+ //
+ // Resuming into main jitcode.
+ //
+ bind(&noMonitor);
+ {
+ // Save needed values onto stack temporarily.
+ pushValue(Address(bailoutInfo, offsetof(BaselineBailoutInfo, valueR0)));
+ pushValue(Address(bailoutInfo, offsetof(BaselineBailoutInfo, valueR1)));
+ push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)));
+ push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
+
+ // Call a stub to free allocated memory and create arguments objects.
+ setupUnalignedABICall(temp);
+ passABIArg(bailoutInfo);
+ callWithABI(JS_FUNC_TO_DATA_PTR(void*, FinishBailoutToBaseline));
+ branchTest32(Zero, ReturnReg, ReturnReg, exceptionLabel());
+
+ // Restore values where they need to be and resume execution.
+ AllocatableGeneralRegisterSet enterRegs(GeneralRegisterSet::All());
+ enterRegs.take(R0);
+ enterRegs.take(R1);
+ enterRegs.take(BaselineFrameReg);
+ Register jitcodeReg = enterRegs.takeAny();
+
+ pop(jitcodeReg);
+ pop(BaselineFrameReg);
+ popValue(R1);
+ popValue(R0);
+
+ // Discard exit frame.
+ addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
+
+ jump(jitcodeReg);
+ }
+ }
+}
+
+void
+MacroAssembler::loadBaselineOrIonRaw(Register script, Register dest, Label* failure)
+{
+ loadPtr(Address(script, JSScript::offsetOfBaselineOrIonRaw()), dest);
+ if (failure)
+ branchTestPtr(Assembler::Zero, dest, dest, failure);
+}
+
+void
+MacroAssembler::loadBaselineOrIonNoArgCheck(Register script, Register dest, Label* failure)
+{
+ loadPtr(Address(script, JSScript::offsetOfBaselineOrIonSkipArgCheck()), dest);
+ if (failure)
+ branchTestPtr(Assembler::Zero, dest, dest, failure);
+}
+
+void
+MacroAssembler::loadBaselineFramePtr(Register framePtr, Register dest)
+{
+ if (framePtr != dest)
+ movePtr(framePtr, dest);
+ subPtr(Imm32(BaselineFrame::Size()), dest);
+}
+
+void
+MacroAssembler::handleFailure()
+{
+ // Re-entry code is irrelevant because the exception will leave the
+ // running function and never come back
+ JitCode* excTail = GetJitContext()->runtime->jitRuntime()->getExceptionTail();
+ jump(excTail);
+}
+
+#ifdef DEBUG
+static void
+AssumeUnreachable_(const char* output) {
+ MOZ_ReportAssertionFailure(output, __FILE__, __LINE__);
+}
+#endif
+
+void
+MacroAssembler::assumeUnreachable(const char* output)
+{
+#ifdef DEBUG
+ if (!IsCompilingWasm()) {
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ LiveRegisterSet save(regs.asLiveSet());
+ PushRegsInMask(save);
+ Register temp = regs.takeAnyGeneral();
+
+ setupUnalignedABICall(temp);
+ movePtr(ImmPtr(output), temp);
+ passABIArg(temp);
+ callWithABI(JS_FUNC_TO_DATA_PTR(void*, AssumeUnreachable_));
+
+ PopRegsInMask(save);
+ }
+#endif
+
+ breakpoint();
+}
+
+template<typename T>
+void
+MacroAssembler::assertTestInt32(Condition cond, const T& value, const char* output)
+{
+#ifdef DEBUG
+ Label ok;
+ branchTestInt32(cond, value, &ok);
+ assumeUnreachable(output);
+ bind(&ok);
+#endif
+}
+
+template void MacroAssembler::assertTestInt32(Condition, const Address&, const char*);
+
+static void
+Printf0_(const char* output) {
+ // Use stderr instead of stdout because this is only used for debug
+ // output. stderr is less likely to interfere with the program's normal
+ // output, and it's always unbuffered.
+ fprintf(stderr, "%s", output);
+}
+
+void
+MacroAssembler::printf(const char* output)
+{
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ LiveRegisterSet save(regs.asLiveSet());
+ PushRegsInMask(save);
+
+ Register temp = regs.takeAnyGeneral();
+
+ setupUnalignedABICall(temp);
+ movePtr(ImmPtr(output), temp);
+ passABIArg(temp);
+ callWithABI(JS_FUNC_TO_DATA_PTR(void*, Printf0_));
+
+ PopRegsInMask(save);
+}
+
+static void
+Printf1_(const char* output, uintptr_t value) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ char* line = JS_sprintf_append(nullptr, output, value);
+ if (!line)
+ oomUnsafe.crash("OOM at masm.printf");
+ fprintf(stderr, "%s", line);
+ js_free(line);
+}
+
+void
+MacroAssembler::printf(const char* output, Register value)
+{
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ LiveRegisterSet save(regs.asLiveSet());
+ PushRegsInMask(save);
+
+ regs.takeUnchecked(value);
+
+ Register temp = regs.takeAnyGeneral();
+
+ setupUnalignedABICall(temp);
+ movePtr(ImmPtr(output), temp);
+ passABIArg(temp);
+ passABIArg(value);
+ callWithABI(JS_FUNC_TO_DATA_PTR(void*, Printf1_));
+
+ PopRegsInMask(save);
+}
+
+#ifdef JS_TRACE_LOGGING
+void
+MacroAssembler::tracelogStartId(Register logger, uint32_t textId, bool force)
+{
+ if (!force && !TraceLogTextIdEnabled(textId))
+ return;
+
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ LiveRegisterSet save(regs.asLiveSet());
+ PushRegsInMask(save);
+ regs.takeUnchecked(logger);
+
+ Register temp = regs.takeAnyGeneral();
+
+ setupUnalignedABICall(temp);
+ passABIArg(logger);
+ move32(Imm32(textId), temp);
+ passABIArg(temp);
+ callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStartEventPrivate));
+
+ PopRegsInMask(save);
+}
+
+void
+MacroAssembler::tracelogStartId(Register logger, Register textId)
+{
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ LiveRegisterSet save(regs.asLiveSet());
+ PushRegsInMask(save);
+ regs.takeUnchecked(logger);
+ regs.takeUnchecked(textId);
+
+ Register temp = regs.takeAnyGeneral();
+
+ setupUnalignedABICall(temp);
+ passABIArg(logger);
+ passABIArg(textId);
+ callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStartEventPrivate));
+
+ PopRegsInMask(save);
+}
+
+void
+MacroAssembler::tracelogStartEvent(Register logger, Register event)
+{
+ void (&TraceLogFunc)(TraceLoggerThread*, const TraceLoggerEvent&) = TraceLogStartEvent;
+
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ LiveRegisterSet save(regs.asLiveSet());
+ PushRegsInMask(save);
+ regs.takeUnchecked(logger);
+ regs.takeUnchecked(event);
+
+ Register temp = regs.takeAnyGeneral();
+
+ setupUnalignedABICall(temp);
+ passABIArg(logger);
+ passABIArg(event);
+ callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogFunc));
+
+ PopRegsInMask(save);
+}
+
+void
+MacroAssembler::tracelogStopId(Register logger, uint32_t textId, bool force)
+{
+ if (!force && !TraceLogTextIdEnabled(textId))
+ return;
+
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ LiveRegisterSet save(regs.asLiveSet());
+ PushRegsInMask(save);
+ regs.takeUnchecked(logger);
+
+ Register temp = regs.takeAnyGeneral();
+
+ setupUnalignedABICall(temp);
+ passABIArg(logger);
+ move32(Imm32(textId), temp);
+ passABIArg(temp);
+
+ callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStopEventPrivate));
+
+ PopRegsInMask(save);
+}
+
+void
+MacroAssembler::tracelogStopId(Register logger, Register textId)
+{
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ LiveRegisterSet save(regs.asLiveSet());
+ PushRegsInMask(save);
+ regs.takeUnchecked(logger);
+ regs.takeUnchecked(textId);
+
+ Register temp = regs.takeAnyGeneral();
+
+ setupUnalignedABICall(temp);
+ passABIArg(logger);
+ passABIArg(textId);
+ callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStopEventPrivate));
+
+ PopRegsInMask(save);
+}
+#endif
+
+void
+MacroAssembler::convertInt32ValueToDouble(const Address& address, Register scratch, Label* done)
+{
+ branchTestInt32(Assembler::NotEqual, address, done);
+ unboxInt32(address, scratch);
+ convertInt32ToDouble(scratch, ScratchDoubleReg);
+ storeDouble(ScratchDoubleReg, address);
+}
+
+void
+MacroAssembler::convertValueToFloatingPoint(ValueOperand value, FloatRegister output,
+ Label* fail, MIRType outputType)
+{
+ Register tag = splitTagForTest(value);
+
+ Label isDouble, isInt32, isBool, isNull, done;
+
+ branchTestDouble(Assembler::Equal, tag, &isDouble);
+ branchTestInt32(Assembler::Equal, tag, &isInt32);
+ branchTestBoolean(Assembler::Equal, tag, &isBool);
+ branchTestNull(Assembler::Equal, tag, &isNull);
+ branchTestUndefined(Assembler::NotEqual, tag, fail);
+
+ // fall-through: undefined
+ loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
+ jump(&done);
+
+ bind(&isNull);
+ loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
+ jump(&done);
+
+ bind(&isBool);
+ boolValueToFloatingPoint(value, output, outputType);
+ jump(&done);
+
+ bind(&isInt32);
+ int32ValueToFloatingPoint(value, output, outputType);
+ jump(&done);
+
+ bind(&isDouble);
+ FloatRegister tmp = output;
+ if (outputType == MIRType::Float32 && hasMultiAlias())
+ tmp = ScratchDoubleReg;
+
+ unboxDouble(value, tmp);
+ if (outputType == MIRType::Float32)
+ convertDoubleToFloat32(tmp, output);
+
+ bind(&done);
+}
+
+bool
+MacroAssembler::convertValueToFloatingPoint(JSContext* cx, const Value& v, FloatRegister output,
+ Label* fail, MIRType outputType)
+{
+ if (v.isNumber() || v.isString()) {
+ double d;
+ if (v.isNumber())
+ d = v.toNumber();
+ else if (!StringToNumber(cx, v.toString(), &d))
+ return false;
+
+ loadConstantFloatingPoint(d, (float)d, output, outputType);
+ return true;
+ }
+
+ if (v.isBoolean()) {
+ if (v.toBoolean())
+ loadConstantFloatingPoint(1.0, 1.0f, output, outputType);
+ else
+ loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
+ return true;
+ }
+
+ if (v.isNull()) {
+ loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
+ return true;
+ }
+
+ if (v.isUndefined()) {
+ loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
+ return true;
+ }
+
+ MOZ_ASSERT(v.isObject() || v.isSymbol());
+ jump(fail);
+ return true;
+}
+
+bool
+MacroAssembler::convertConstantOrRegisterToFloatingPoint(JSContext* cx,
+ const ConstantOrRegister& src,
+ FloatRegister output, Label* fail,
+ MIRType outputType)
+{
+ if (src.constant())
+ return convertValueToFloatingPoint(cx, src.value(), output, fail, outputType);
+
+ convertTypedOrValueToFloatingPoint(src.reg(), output, fail, outputType);
+ return true;
+}
+
+void
+MacroAssembler::convertTypedOrValueToFloatingPoint(TypedOrValueRegister src, FloatRegister output,
+ Label* fail, MIRType outputType)
+{
+ MOZ_ASSERT(IsFloatingPointType(outputType));
+
+ if (src.hasValue()) {
+ convertValueToFloatingPoint(src.valueReg(), output, fail, outputType);
+ return;
+ }
+
+ bool outputIsDouble = outputType == MIRType::Double;
+ switch (src.type()) {
+ case MIRType::Null:
+ loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
+ break;
+ case MIRType::Boolean:
+ case MIRType::Int32:
+ convertInt32ToFloatingPoint(src.typedReg().gpr(), output, outputType);
+ break;
+ case MIRType::Float32:
+ if (outputIsDouble) {
+ convertFloat32ToDouble(src.typedReg().fpu(), output);
+ } else {
+ if (src.typedReg().fpu() != output)
+ moveFloat32(src.typedReg().fpu(), output);
+ }
+ break;
+ case MIRType::Double:
+ if (outputIsDouble) {
+ if (src.typedReg().fpu() != output)
+ moveDouble(src.typedReg().fpu(), output);
+ } else {
+ convertDoubleToFloat32(src.typedReg().fpu(), output);
+ }
+ break;
+ case MIRType::Object:
+ case MIRType::String:
+ case MIRType::Symbol:
+ jump(fail);
+ break;
+ case MIRType::Undefined:
+ loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
+ break;
+ default:
+ MOZ_CRASH("Bad MIRType");
+ }
+}
+
+void
+MacroAssembler::outOfLineTruncateSlow(FloatRegister src, Register dest, bool widenFloatToDouble,
+ bool compilingWasm)
+{
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ if (widenFloatToDouble) {
+ convertFloat32ToDouble(src, ScratchDoubleReg);
+ src = ScratchDoubleReg;
+ }
+#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ FloatRegister srcSingle;
+ if (widenFloatToDouble) {
+ MOZ_ASSERT(src.isSingle());
+ srcSingle = src;
+ src = src.asDouble();
+ push(srcSingle);
+ convertFloat32ToDouble(srcSingle, src);
+ }
+#else
+ // Also see below
+ MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
+#endif
+
+ MOZ_ASSERT(src.isDouble());
+
+ setupUnalignedABICall(dest);
+ passABIArg(src, MoveOp::DOUBLE);
+ if (compilingWasm)
+ callWithABI(wasm::SymbolicAddress::ToInt32);
+ else
+ callWithABI(mozilla::BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32));
+ storeCallInt32Result(dest);
+
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ // Nothing
+#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ if (widenFloatToDouble)
+ pop(srcSingle);
+#else
+ MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
+#endif
+}
+
+void
+MacroAssembler::convertDoubleToInt(FloatRegister src, Register output, FloatRegister temp,
+ Label* truncateFail, Label* fail,
+ IntConversionBehavior behavior)
+{
+ switch (behavior) {
+ case IntConversion_Normal:
+ case IntConversion_NegativeZeroCheck:
+ convertDoubleToInt32(src, output, fail, behavior == IntConversion_NegativeZeroCheck);
+ break;
+ case IntConversion_Truncate:
+ branchTruncateDoubleMaybeModUint32(src, output, truncateFail ? truncateFail : fail);
+ break;
+ case IntConversion_ClampToUint8:
+ // Clamping clobbers the input register, so use a temp.
+ moveDouble(src, temp);
+ clampDoubleToUint8(temp, output);
+ break;
+ }
+}
+
+void
+MacroAssembler::convertValueToInt(ValueOperand value, MDefinition* maybeInput,
+ Label* handleStringEntry, Label* handleStringRejoin,
+ Label* truncateDoubleSlow,
+ Register stringReg, FloatRegister temp, Register output,
+ Label* fail, IntConversionBehavior behavior,
+ IntConversionInputKind conversion)
+{
+ Register tag = splitTagForTest(value);
+ bool handleStrings = (behavior == IntConversion_Truncate ||
+ behavior == IntConversion_ClampToUint8) &&
+ handleStringEntry &&
+ handleStringRejoin;
+
+ MOZ_ASSERT_IF(handleStrings, conversion == IntConversion_Any);
+
+ Label done, isInt32, isBool, isDouble, isNull, isString;
+
+ maybeBranchTestType(MIRType::Int32, maybeInput, tag, &isInt32);
+ if (conversion == IntConversion_Any || conversion == IntConversion_NumbersOrBoolsOnly)
+ maybeBranchTestType(MIRType::Boolean, maybeInput, tag, &isBool);
+ maybeBranchTestType(MIRType::Double, maybeInput, tag, &isDouble);
+
+ if (conversion == IntConversion_Any) {
+ // If we are not truncating, we fail for anything that's not
+ // null. Otherwise we might be able to handle strings and objects.
+ switch (behavior) {
+ case IntConversion_Normal:
+ case IntConversion_NegativeZeroCheck:
+ branchTestNull(Assembler::NotEqual, tag, fail);
+ break;
+
+ case IntConversion_Truncate:
+ case IntConversion_ClampToUint8:
+ maybeBranchTestType(MIRType::Null, maybeInput, tag, &isNull);
+ if (handleStrings)
+ maybeBranchTestType(MIRType::String, maybeInput, tag, &isString);
+ maybeBranchTestType(MIRType::Object, maybeInput, tag, fail);
+ branchTestUndefined(Assembler::NotEqual, tag, fail);
+ break;
+ }
+ } else {
+ jump(fail);
+ }
+
+ // The value is null or undefined in truncation contexts - just emit 0.
+ if (isNull.used())
+ bind(&isNull);
+ mov(ImmWord(0), output);
+ jump(&done);
+
+ // Try converting a string into a double, then jump to the double case.
+ if (handleStrings) {
+ bind(&isString);
+ unboxString(value, stringReg);
+ jump(handleStringEntry);
+ }
+
+ // Try converting double into integer.
+ if (isDouble.used() || handleStrings) {
+ if (isDouble.used()) {
+ bind(&isDouble);
+ unboxDouble(value, temp);
+ }
+
+ if (handleStrings)
+ bind(handleStringRejoin);
+
+ convertDoubleToInt(temp, output, temp, truncateDoubleSlow, fail, behavior);
+ jump(&done);
+ }
+
+ // Just unbox a bool, the result is 0 or 1.
+ if (isBool.used()) {
+ bind(&isBool);
+ unboxBoolean(value, output);
+ jump(&done);
+ }
+
+ // Integers can be unboxed.
+ if (isInt32.used()) {
+ bind(&isInt32);
+ unboxInt32(value, output);
+ if (behavior == IntConversion_ClampToUint8)
+ clampIntToUint8(output);
+ }
+
+ bind(&done);
+}
+
+bool
+MacroAssembler::convertValueToInt(JSContext* cx, const Value& v, Register output, Label* fail,
+ IntConversionBehavior behavior)
+{
+ bool handleStrings = (behavior == IntConversion_Truncate ||
+ behavior == IntConversion_ClampToUint8);
+
+ if (v.isNumber() || (handleStrings && v.isString())) {
+ double d;
+ if (v.isNumber())
+ d = v.toNumber();
+ else if (!StringToNumber(cx, v.toString(), &d))
+ return false;
+
+ switch (behavior) {
+ case IntConversion_Normal:
+ case IntConversion_NegativeZeroCheck: {
+ // -0 is checked anyways if we have a constant value.
+ int i;
+ if (mozilla::NumberIsInt32(d, &i))
+ move32(Imm32(i), output);
+ else
+ jump(fail);
+ break;
+ }
+ case IntConversion_Truncate:
+ move32(Imm32(ToInt32(d)), output);
+ break;
+ case IntConversion_ClampToUint8:
+ move32(Imm32(ClampDoubleToUint8(d)), output);
+ break;
+ }
+
+ return true;
+ }
+
+ if (v.isBoolean()) {
+ move32(Imm32(v.toBoolean() ? 1 : 0), output);
+ return true;
+ }
+
+ if (v.isNull() || v.isUndefined()) {
+ move32(Imm32(0), output);
+ return true;
+ }
+
+ MOZ_ASSERT(v.isObject() || v.isSymbol());
+
+ jump(fail);
+ return true;
+}
+
+bool
+MacroAssembler::convertConstantOrRegisterToInt(JSContext* cx,
+ const ConstantOrRegister& src,
+ FloatRegister temp, Register output,
+ Label* fail, IntConversionBehavior behavior)
+{
+ if (src.constant())
+ return convertValueToInt(cx, src.value(), output, fail, behavior);
+
+ convertTypedOrValueToInt(src.reg(), temp, output, fail, behavior);
+ return true;
+}
+
+void
+MacroAssembler::convertTypedOrValueToInt(TypedOrValueRegister src, FloatRegister temp,
+ Register output, Label* fail,
+ IntConversionBehavior behavior)
+{
+ if (src.hasValue()) {
+ convertValueToInt(src.valueReg(), temp, output, fail, behavior);
+ return;
+ }
+
+ switch (src.type()) {
+ case MIRType::Undefined:
+ case MIRType::Null:
+ move32(Imm32(0), output);
+ break;
+ case MIRType::Boolean:
+ case MIRType::Int32:
+ if (src.typedReg().gpr() != output)
+ move32(src.typedReg().gpr(), output);
+ if (src.type() == MIRType::Int32 && behavior == IntConversion_ClampToUint8)
+ clampIntToUint8(output);
+ break;
+ case MIRType::Double:
+ convertDoubleToInt(src.typedReg().fpu(), output, temp, nullptr, fail, behavior);
+ break;
+ case MIRType::Float32:
+ // Conversion to Double simplifies implementation at the expense of performance.
+ convertFloat32ToDouble(src.typedReg().fpu(), temp);
+ convertDoubleToInt(temp, output, temp, nullptr, fail, behavior);
+ break;
+ case MIRType::String:
+ case MIRType::Symbol:
+ case MIRType::Object:
+ jump(fail);
+ break;
+ default:
+ MOZ_CRASH("Bad MIRType");
+ }
+}
+
+void
+MacroAssembler::finish()
+{
+ if (failureLabel_.used()) {
+ bind(&failureLabel_);
+ handleFailure();
+ }
+
+ MacroAssemblerSpecific::finish();
+}
+
+void
+MacroAssembler::link(JitCode* code)
+{
+ MOZ_ASSERT(!oom());
+ linkSelfReference(code);
+ linkProfilerCallSites(code);
+}
+
+MacroAssembler::AutoProfilerCallInstrumentation::AutoProfilerCallInstrumentation(
+ MacroAssembler& masm
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)
+{
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ if (!masm.emitProfilingInstrumentation_)
+ return;
+
+ Register reg = CallTempReg0;
+ Register reg2 = CallTempReg1;
+ masm.push(reg);
+ masm.push(reg2);
+
+ JitContext* icx = GetJitContext();
+ AbsoluteAddress profilingActivation(icx->runtime->addressOfProfilingActivation());
+
+ CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), reg);
+ masm.loadPtr(profilingActivation, reg2);
+ masm.storePtr(reg, Address(reg2, JitActivation::offsetOfLastProfilingCallSite()));
+
+ masm.appendProfilerCallSite(label);
+
+ masm.pop(reg2);
+ masm.pop(reg);
+}
+
+void
+MacroAssembler::linkProfilerCallSites(JitCode* code)
+{
+ for (size_t i = 0; i < profilerCallSites_.length(); i++) {
+ CodeOffset offset = profilerCallSites_[i];
+ CodeLocationLabel location(code, offset);
+ PatchDataWithValueCheck(location, ImmPtr(location.raw()), ImmPtr((void*)-1));
+ }
+}
+
+void
+MacroAssembler::alignJitStackBasedOnNArgs(Register nargs)
+{
+ if (JitStackValueAlignment == 1)
+ return;
+
+ // A JitFrameLayout is composed of the following:
+ // [padding?] [argN] .. [arg1] [this] [[argc] [callee] [descr] [raddr]]
+ //
+ // We want to ensure that the |raddr| address is aligned.
+ // Which implies that we want to ensure that |this| is aligned.
+ static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+
+ // Which implies that |argN| is aligned if |nargs| is even, and offset by
+ // |sizeof(Value)| if |nargs| is odd.
+ MOZ_ASSERT(JitStackValueAlignment == 2);
+
+ // Thus the |padding| is offset by |sizeof(Value)| if |nargs| is even, and
+ // aligned if |nargs| is odd.
+
+ // if (nargs % 2 == 0) {
+ // if (sp % JitStackAlignment == 0)
+ // sp -= sizeof(Value);
+ // MOZ_ASSERT(sp % JitStackAlignment == JitStackAlignment - sizeof(Value));
+ // } else {
+ // sp = sp & ~(JitStackAlignment - 1);
+ // }
+ Label odd, end;
+ Label* maybeAssert = &end;
+#ifdef DEBUG
+ Label assert;
+ maybeAssert = &assert;
+#endif
+ assertStackAlignment(sizeof(Value), 0);
+ branchTestPtr(Assembler::NonZero, nargs, Imm32(1), &odd);
+ branchTestStackPtr(Assembler::NonZero, Imm32(JitStackAlignment - 1), maybeAssert);
+ subFromStackPtr(Imm32(sizeof(Value)));
+#ifdef DEBUG
+ bind(&assert);
+#endif
+ assertStackAlignment(JitStackAlignment, sizeof(Value));
+ jump(&end);
+ bind(&odd);
+ andToStackPtr(Imm32(~(JitStackAlignment - 1)));
+ bind(&end);
+}
+
+void
+MacroAssembler::alignJitStackBasedOnNArgs(uint32_t nargs)
+{
+ if (JitStackValueAlignment == 1)
+ return;
+
+ // A JitFrameLayout is composed of the following:
+ // [padding?] [argN] .. [arg1] [this] [[argc] [callee] [descr] [raddr]]
+ //
+ // We want to ensure that the |raddr| address is aligned.
+ // Which implies that we want to ensure that |this| is aligned.
+ static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+
+ // Which implies that |argN| is aligned if |nargs| is even, and offset by
+ // |sizeof(Value)| if |nargs| is odd.
+ MOZ_ASSERT(JitStackValueAlignment == 2);
+
+ // Thus the |padding| is offset by |sizeof(Value)| if |nargs| is even, and
+ // aligned if |nargs| is odd.
+
+ assertStackAlignment(sizeof(Value), 0);
+ if (nargs % 2 == 0) {
+ Label end;
+ branchTestStackPtr(Assembler::NonZero, Imm32(JitStackAlignment - 1), &end);
+ subFromStackPtr(Imm32(sizeof(Value)));
+ bind(&end);
+ assertStackAlignment(JitStackAlignment, sizeof(Value));
+ } else {
+ andToStackPtr(Imm32(~(JitStackAlignment - 1)));
+ }
+}
+
+// ===============================================================
+
+MacroAssembler::MacroAssembler(JSContext* cx, IonScript* ion,
+ JSScript* script, jsbytecode* pc)
+ : framePushed_(0),
+#ifdef DEBUG
+ inCall_(false),
+#endif
+ emitProfilingInstrumentation_(false)
+{
+ constructRoot(cx);
+ jitContext_.emplace(cx, (js::jit::TempAllocator*)nullptr);
+ alloc_.emplace(cx);
+ moveResolver_.setAllocator(*jitContext_->temp);
+#if defined(JS_CODEGEN_ARM)
+ initWithAllocator();
+ m_buffer.id = GetJitContext()->getNextAssemblerId();
+#elif defined(JS_CODEGEN_ARM64)
+ initWithAllocator();
+ armbuffer_.id = GetJitContext()->getNextAssemblerId();
+#endif
+ if (ion) {
+ setFramePushed(ion->frameSize());
+ if (pc && cx->runtime()->spsProfiler.enabled())
+ enableProfilingInstrumentation();
+ }
+}
+
+MacroAssembler::AfterICSaveLive
+MacroAssembler::icSaveLive(LiveRegisterSet& liveRegs)
+{
+ PushRegsInMask(liveRegs);
+ AfterICSaveLive aic(framePushed());
+ alignFrameForICArguments(aic);
+ return aic;
+}
+
+bool
+MacroAssembler::icBuildOOLFakeExitFrame(void* fakeReturnAddr, AfterICSaveLive& aic)
+{
+ return buildOOLFakeExitFrame(fakeReturnAddr);
+}
+
+void
+MacroAssembler::icRestoreLive(LiveRegisterSet& liveRegs, AfterICSaveLive& aic)
+{
+ restoreFrameAlignmentForICArguments(aic);
+ MOZ_ASSERT(framePushed() == aic.initialStack);
+ PopRegsInMask(liveRegs);
+}
+
+#ifndef JS_CODEGEN_ARM64
+void
+MacroAssembler::subFromStackPtr(Register reg)
+{
+ subPtr(reg, getStackPointer());
+}
+#endif // JS_CODEGEN_ARM64
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// Stack manipulation functions.
+
+void
+MacroAssembler::PushRegsInMask(LiveGeneralRegisterSet set)
+{
+ PushRegsInMask(LiveRegisterSet(set.set(), FloatRegisterSet()));
+}
+
+void
+MacroAssembler::PopRegsInMask(LiveRegisterSet set)
+{
+ PopRegsInMaskIgnore(set, LiveRegisterSet());
+}
+
+void
+MacroAssembler::PopRegsInMask(LiveGeneralRegisterSet set)
+{
+ PopRegsInMask(LiveRegisterSet(set.set(), FloatRegisterSet()));
+}
+
+void
+MacroAssembler::Push(jsid id, Register scratchReg)
+{
+ if (JSID_IS_GCTHING(id)) {
+ // If we're pushing a gcthing, then we can't just push the tagged jsid
+ // value since the GC won't have any idea that the push instruction
+ // carries a reference to a gcthing. Need to unpack the pointer,
+ // push it using ImmGCPtr, and then rematerialize the id at runtime.
+
+ if (JSID_IS_STRING(id)) {
+ JSString* str = JSID_TO_STRING(id);
+ MOZ_ASSERT(((size_t)str & JSID_TYPE_MASK) == 0);
+ MOZ_ASSERT(JSID_TYPE_STRING == 0x0);
+ Push(ImmGCPtr(str));
+ } else {
+ MOZ_ASSERT(JSID_IS_SYMBOL(id));
+ JS::Symbol* sym = JSID_TO_SYMBOL(id);
+ movePtr(ImmGCPtr(sym), scratchReg);
+ orPtr(Imm32(JSID_TYPE_SYMBOL), scratchReg);
+ Push(scratchReg);
+ }
+ } else {
+ Push(ImmWord(JSID_BITS(id)));
+ }
+}
+
+void
+MacroAssembler::Push(TypedOrValueRegister v)
+{
+ if (v.hasValue()) {
+ Push(v.valueReg());
+ } else if (IsFloatingPointType(v.type())) {
+ FloatRegister reg = v.typedReg().fpu();
+ if (v.type() == MIRType::Float32) {
+ convertFloat32ToDouble(reg, ScratchDoubleReg);
+ reg = ScratchDoubleReg;
+ }
+ Push(reg);
+ } else {
+ Push(ValueTypeFromMIRType(v.type()), v.typedReg().gpr());
+ }
+}
+
+void
+MacroAssembler::Push(const ConstantOrRegister& v)
+{
+ if (v.constant())
+ Push(v.value());
+ else
+ Push(v.reg());
+}
+
+void
+MacroAssembler::Push(const ValueOperand& val)
+{
+ pushValue(val);
+ framePushed_ += sizeof(Value);
+}
+
+void
+MacroAssembler::Push(const Value& val)
+{
+ pushValue(val);
+ framePushed_ += sizeof(Value);
+}
+
+void
+MacroAssembler::Push(JSValueType type, Register reg)
+{
+ pushValue(type, reg);
+ framePushed_ += sizeof(Value);
+}
+
+void
+MacroAssembler::PushValue(const Address& addr)
+{
+ MOZ_ASSERT(addr.base != getStackPointer());
+ pushValue(addr);
+ framePushed_ += sizeof(Value);
+}
+
+void
+MacroAssembler::PushEmptyRooted(VMFunction::RootType rootType)
+{
+ switch (rootType) {
+ case VMFunction::RootNone:
+ MOZ_CRASH("Handle must have root type");
+ case VMFunction::RootObject:
+ case VMFunction::RootString:
+ case VMFunction::RootPropertyName:
+ case VMFunction::RootFunction:
+ case VMFunction::RootCell:
+ Push(ImmPtr(nullptr));
+ break;
+ case VMFunction::RootValue:
+ Push(UndefinedValue());
+ break;
+ }
+}
+
+void
+MacroAssembler::popRooted(VMFunction::RootType rootType, Register cellReg,
+ const ValueOperand& valueReg)
+{
+ switch (rootType) {
+ case VMFunction::RootNone:
+ MOZ_CRASH("Handle must have root type");
+ case VMFunction::RootObject:
+ case VMFunction::RootString:
+ case VMFunction::RootPropertyName:
+ case VMFunction::RootFunction:
+ case VMFunction::RootCell:
+ Pop(cellReg);
+ break;
+ case VMFunction::RootValue:
+ Pop(valueReg);
+ break;
+ }
+}
+
+void
+MacroAssembler::adjustStack(int amount)
+{
+ if (amount > 0)
+ freeStack(amount);
+ else if (amount < 0)
+ reserveStack(-amount);
+}
+
+void
+MacroAssembler::freeStack(uint32_t amount)
+{
+ MOZ_ASSERT(amount <= framePushed_);
+ if (amount)
+ addToStackPtr(Imm32(amount));
+ framePushed_ -= amount;
+}
+
+void
+MacroAssembler::freeStack(Register amount)
+{
+ addToStackPtr(amount);
+}
+
+// ===============================================================
+// ABI function calls.
+
+void
+MacroAssembler::setupABICall()
+{
+#ifdef DEBUG
+ MOZ_ASSERT(!inCall_);
+ inCall_ = true;
+#endif
+
+#ifdef JS_SIMULATOR
+ signature_ = 0;
+#endif
+
+ // Reinitialize the ABIArg generator.
+ abiArgs_ = ABIArgGenerator();
+
+#if defined(JS_CODEGEN_ARM)
+ // On ARM, we need to know what ABI we are using, either in the
+ // simulator, or based on the configure flags.
+#if defined(JS_SIMULATOR_ARM)
+ abiArgs_.setUseHardFp(UseHardFpABI());
+#elif defined(JS_CODEGEN_ARM_HARDFP)
+ abiArgs_.setUseHardFp(true);
+#else
+ abiArgs_.setUseHardFp(false);
+#endif
+#endif
+
+#if defined(JS_CODEGEN_MIPS32)
+ // On MIPS, the system ABI use general registers pairs to encode double
+ // arguments, after one or 2 integer-like arguments. Unfortunately, the
+ // Lowering phase is not capable to express it at the moment. So we enforce
+ // the system ABI here.
+ abiArgs_.enforceO32ABI();
+#endif
+}
+
+void
+MacroAssembler::setupAlignedABICall()
+{
+ setupABICall();
+ dynamicAlignment_ = false;
+ assertStackAlignment(ABIStackAlignment);
+
+#if defined(JS_CODEGEN_ARM64)
+ MOZ_CRASH("Not supported on arm64");
+#endif
+}
+
+void
+MacroAssembler::passABIArg(const MoveOperand& from, MoveOp::Type type)
+{
+ MOZ_ASSERT(inCall_);
+ appendSignatureType(type);
+
+ ABIArg arg;
+ switch (type) {
+ case MoveOp::FLOAT32:
+ arg = abiArgs_.next(MIRType::Float32);
+ break;
+ case MoveOp::DOUBLE:
+ arg = abiArgs_.next(MIRType::Double);
+ break;
+ case MoveOp::GENERAL:
+ arg = abiArgs_.next(MIRType::Pointer);
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+
+ MoveOperand to(*this, arg);
+ if (from == to)
+ return;
+
+ if (oom())
+ return;
+ propagateOOM(moveResolver_.addMove(from, to, type));
+}
+
+void
+MacroAssembler::callWithABINoProfiler(void* fun, MoveOp::Type result)
+{
+ appendSignatureType(result);
+#ifdef JS_SIMULATOR
+ fun = Simulator::RedirectNativeFunction(fun, signature());
+#endif
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(ImmPtr(fun));
+ callWithABIPost(stackAdjust, result);
+}
+
+void
+MacroAssembler::callWithABINoProfiler(wasm::SymbolicAddress imm, MoveOp::Type result)
+{
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust, /* callFromWasm = */ true);
+ call(imm);
+ callWithABIPost(stackAdjust, result);
+}
+
+// ===============================================================
+// Exit frame footer.
+
+void
+MacroAssembler::linkExitFrame()
+{
+ AbsoluteAddress jitTop(GetJitContext()->runtime->addressOfJitTop());
+ storeStackPtr(jitTop);
+}
+
+void
+MacroAssembler::linkSelfReference(JitCode* code)
+{
+ // If this code can transition to C++ code and witness a GC, then we need to store
+ // the JitCode onto the stack in order to GC it correctly. exitCodePatch should
+ // be unset if the code never needed to push its JitCode*.
+ if (hasSelfReference()) {
+ PatchDataWithValueCheck(CodeLocationLabel(code, selfReferencePatch_),
+ ImmPtr(code),
+ ImmPtr((void*)-1));
+ }
+}
+
+// ===============================================================
+// Branch functions
+
+void
+MacroAssembler::branchIfNotInterpretedConstructor(Register fun, Register scratch, Label* label)
+{
+ // 16-bit loads are slow and unaligned 32-bit loads may be too so
+ // perform an aligned 32-bit load and adjust the bitmask accordingly.
+ MOZ_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0);
+ MOZ_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2);
+
+ // First, ensure it's a scripted function.
+ load32(Address(fun, JSFunction::offsetOfNargs()), scratch);
+ int32_t bits = IMM32_16ADJ(JSFunction::INTERPRETED);
+ branchTest32(Assembler::Zero, scratch, Imm32(bits), label);
+
+ // Check if the CONSTRUCTOR bit is set.
+ bits = IMM32_16ADJ(JSFunction::CONSTRUCTOR);
+ branchTest32(Assembler::Zero, scratch, Imm32(bits), label);
+}
+
+void
+MacroAssembler::maybeBranchTestType(MIRType type, MDefinition* maybeDef, Register tag, Label* label)
+{
+ if (!maybeDef || maybeDef->mightBeType(type)) {
+ switch (type) {
+ case MIRType::Null:
+ branchTestNull(Equal, tag, label);
+ break;
+ case MIRType::Boolean:
+ branchTestBoolean(Equal, tag, label);
+ break;
+ case MIRType::Int32:
+ branchTestInt32(Equal, tag, label);
+ break;
+ case MIRType::Double:
+ branchTestDouble(Equal, tag, label);
+ break;
+ case MIRType::String:
+ branchTestString(Equal, tag, label);
+ break;
+ case MIRType::Symbol:
+ branchTestSymbol(Equal, tag, label);
+ break;
+ case MIRType::Object:
+ branchTestObject(Equal, tag, label);
+ break;
+ default:
+ MOZ_CRASH("Unsupported type");
+ }
+ }
+}
+
+void
+MacroAssembler::wasmCallImport(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee)
+{
+ // Load the callee, before the caller's registers are clobbered.
+ uint32_t globalDataOffset = callee.importGlobalDataOffset();
+ loadWasmGlobalPtr(globalDataOffset + offsetof(wasm::FuncImportTls, code), ABINonArgReg0);
+
+ MOZ_ASSERT(ABINonArgReg0 != WasmTlsReg, "by constraint");
+
+ // Switch to the callee's TLS and pinned registers and make the call.
+ loadWasmGlobalPtr(globalDataOffset + offsetof(wasm::FuncImportTls, tls), WasmTlsReg);
+ loadWasmPinnedRegsFromTls();
+
+ call(desc, ABINonArgReg0);
+}
+
+void
+MacroAssembler::wasmCallBuiltinInstanceMethod(const ABIArg& instanceArg,
+ wasm::SymbolicAddress builtin)
+{
+ MOZ_ASSERT(instanceArg != ABIArg());
+
+ if (instanceArg.kind() == ABIArg::GPR) {
+ loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, instance)), instanceArg.gpr());
+ } else if (instanceArg.kind() == ABIArg::Stack) {
+ // Safe to use ABINonArgReg0 since its the last thing before the call
+ Register scratch = ABINonArgReg0;
+ loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, instance)), scratch);
+ storePtr(scratch, Address(getStackPointer(), instanceArg.offsetFromArgBase()));
+ } else {
+ MOZ_CRASH("Unknown abi passing style for pointer");
+ }
+
+ call(builtin);
+}
+
+void
+MacroAssembler::wasmCallIndirect(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee)
+{
+ Register scratch = WasmTableCallScratchReg;
+ Register index = WasmTableCallIndexReg;
+
+ if (callee.which() == wasm::CalleeDesc::AsmJSTable) {
+ // asm.js tables require no signature check, have had their index masked
+ // into range and thus need no bounds check and cannot be external.
+ loadWasmGlobalPtr(callee.tableBaseGlobalDataOffset(), scratch);
+ loadPtr(BaseIndex(scratch, index, ScalePointer), scratch);
+ call(desc, scratch);
+ return;
+ }
+
+ MOZ_ASSERT(callee.which() == wasm::CalleeDesc::WasmTable);
+
+ // Write the sig-id into the ABI sig-id register.
+ wasm::SigIdDesc sigId = callee.wasmTableSigId();
+ switch (sigId.kind()) {
+ case wasm::SigIdDesc::Kind::Global:
+ loadWasmGlobalPtr(sigId.globalDataOffset(), WasmTableCallSigReg);
+ break;
+ case wasm::SigIdDesc::Kind::Immediate:
+ move32(Imm32(sigId.immediate()), WasmTableCallSigReg);
+ break;
+ case wasm::SigIdDesc::Kind::None:
+ break;
+ }
+
+ // WebAssembly throws if the index is out-of-bounds.
+ loadWasmGlobalPtr(callee.tableLengthGlobalDataOffset(), scratch);
+
+ wasm::TrapOffset trapOffset(desc.lineOrBytecode());
+ wasm::TrapDesc oobTrap(trapOffset, wasm::Trap::OutOfBounds, framePushed());
+ branch32(Assembler::Condition::AboveOrEqual, index, scratch, oobTrap);
+
+ // Load the base pointer of the table.
+ loadWasmGlobalPtr(callee.tableBaseGlobalDataOffset(), scratch);
+
+ // Load the callee from the table.
+ wasm::TrapDesc nullTrap(trapOffset, wasm::Trap::IndirectCallToNull, framePushed());
+ if (callee.wasmTableIsExternal()) {
+ static_assert(sizeof(wasm::ExternalTableElem) == 8 || sizeof(wasm::ExternalTableElem) == 16,
+ "elements of external tables are two words");
+ if (sizeof(wasm::ExternalTableElem) == 8) {
+ computeEffectiveAddress(BaseIndex(scratch, index, TimesEight), scratch);
+ } else {
+ lshift32(Imm32(4), index);
+ addPtr(index, scratch);
+ }
+
+ loadPtr(Address(scratch, offsetof(wasm::ExternalTableElem, tls)), WasmTlsReg);
+ branchTest32(Assembler::Zero, WasmTlsReg, WasmTlsReg, nullTrap);
+
+ loadWasmPinnedRegsFromTls();
+
+ loadPtr(Address(scratch, offsetof(wasm::ExternalTableElem, code)), scratch);
+ } else {
+ loadPtr(BaseIndex(scratch, index, ScalePointer), scratch);
+ branchTest32(Assembler::Zero, scratch, scratch, nullTrap);
+ }
+
+ call(desc, scratch);
+}
+
+void
+MacroAssembler::wasmEmitTrapOutOfLineCode()
+{
+ for (const wasm::TrapSite& site : trapSites()) {
+ // Trap out-of-line codes are created for two kinds of trap sites:
+ // - jumps, which are bound directly to the trap out-of-line path
+ // - memory accesses, which can fault and then have control transferred
+ // to the out-of-line path directly via signal handler setting pc
+ switch (site.kind) {
+ case wasm::TrapSite::Jump: {
+ RepatchLabel jump;
+ jump.use(site.codeOffset);
+ bind(&jump);
+ break;
+ }
+ case wasm::TrapSite::MemoryAccess: {
+ append(wasm::MemoryAccess(site.codeOffset, currentOffset()));
+ break;
+ }
+ }
+
+ if (site.trap == wasm::Trap::IndirectCallBadSig) {
+ // The indirect call bad-signature trap is a special case for two
+ // reasons:
+ // - the check happens in the very first instructions of the
+ // prologue, before the stack frame has been set up which messes
+ // up everything (stack depth computations, unwinding)
+ // - the check happens in the callee while the trap should be
+ // reported at the caller's call_indirect
+ // To solve both problems at once, the out-of-line path (far) jumps
+ // directly to the trap exit stub. This takes advantage of the fact
+ // that there is already a CallSite for call_indirect and the
+ // current pre-prologue stack/register state.
+ append(wasm::TrapFarJump(site.trap, farJumpWithPatch()));
+ } else {
+ // Inherit the frame depth of the trap site. This value is captured
+ // by the wasm::CallSite to allow unwinding this frame.
+ setFramePushed(site.framePushed);
+
+ // Align the stack for a nullary call.
+ size_t alreadyPushed = sizeof(wasm::Frame) + framePushed();
+ size_t toPush = ABIArgGenerator().stackBytesConsumedSoFar();
+ if (size_t dec = StackDecrementForCall(ABIStackAlignment, alreadyPushed, toPush))
+ reserveStack(dec);
+
+ // Call the trap's exit, using the bytecode offset of the trap site.
+ // Note that this code is inside the same CodeRange::Function as the
+ // trap site so it's as if the trapping instruction called the
+ // trap-handling function. The frame iterator knows to skip the trap
+ // exit's frame so that unwinding begins at the frame and offset of
+ // the trapping instruction.
+ wasm::CallSiteDesc desc(site.bytecodeOffset, wasm::CallSiteDesc::TrapExit);
+ call(desc, site.trap);
+ }
+
+#ifdef DEBUG
+ // Traps do not return, so no need to freeStack().
+ breakpoint();
+#endif
+ }
+
+ // Ensure that the return address of the last emitted call above is always
+ // within this function's CodeRange which is necessary for the stack
+ // iterator to find the right CodeRange while walking the stack.
+ breakpoint();
+
+ clearTrapSites();
+}
+
+//}}} check_macroassembler_style
+
+void
+MacroAssembler::BranchType::emit(MacroAssembler& masm)
+{
+ MOZ_ASSERT(isInitialized());
+ MIRType mirType = MIRType::None;
+
+ if (type_.isPrimitive()) {
+ if (type_.isMagicArguments())
+ mirType = MIRType::MagicOptimizedArguments;
+ else
+ mirType = MIRTypeFromValueType(type_.primitive());
+ } else if (type_.isAnyObject()) {
+ mirType = MIRType::Object;
+ } else {
+ MOZ_CRASH("Unknown conversion to mirtype");
+ }
+
+ if (mirType == MIRType::Double)
+ masm.branchTestNumber(cond(), reg(), jump());
+ else
+ masm.branchTestMIRType(cond(), reg(), mirType, jump());
+}
+
+void
+MacroAssembler::BranchGCPtr::emit(MacroAssembler& masm)
+{
+ MOZ_ASSERT(isInitialized());
+ masm.branchPtr(cond(), reg(), ptr_, jump());
+}
+
+namespace js {
+namespace jit {
+
+#ifdef DEBUG
+template <class RegisterType>
+AutoGenericRegisterScope<RegisterType>::AutoGenericRegisterScope(MacroAssembler& masm, RegisterType reg)
+ : RegisterType(reg), masm_(masm)
+{
+ masm.debugTrackedRegisters_.add(reg);
+}
+
+template AutoGenericRegisterScope<Register>::AutoGenericRegisterScope(MacroAssembler& masm, Register reg);
+template AutoGenericRegisterScope<FloatRegister>::AutoGenericRegisterScope(MacroAssembler& masm, FloatRegister reg);
+#endif // DEBUG
+
+#ifdef DEBUG
+template <class RegisterType>
+AutoGenericRegisterScope<RegisterType>::~AutoGenericRegisterScope()
+{
+ const RegisterType& reg = *dynamic_cast<RegisterType*>(this);
+ masm_.debugTrackedRegisters_.take(reg);
+}
+
+template AutoGenericRegisterScope<Register>::~AutoGenericRegisterScope();
+template AutoGenericRegisterScope<FloatRegister>::~AutoGenericRegisterScope();
+#endif // DEBUG
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h
new file mode 100644
index 000000000..b6616321c
--- /dev/null
+++ b/js/src/jit/MacroAssembler.h
@@ -0,0 +1,2233 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_MacroAssembler_h
+#define jit_MacroAssembler_h
+
+#include "mozilla/MacroForEach.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jscompartment.h"
+
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/MacroAssembler-x86.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/MacroAssembler-x64.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/MacroAssembler-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/MacroAssembler-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/MacroAssembler-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/MacroAssembler-mips64.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/MacroAssembler-none.h"
+#else
+# error "Unknown architecture!"
+#endif
+#include "jit/AtomicOp.h"
+#include "jit/IonInstrumentation.h"
+#include "jit/JitCompartment.h"
+#include "jit/VMFunctions.h"
+#include "vm/ProxyObject.h"
+#include "vm/Shape.h"
+#include "vm/TypedArrayObject.h"
+#include "vm/UnboxedObject.h"
+
+using mozilla::FloatingPoint;
+
+// * How to read/write MacroAssembler method declarations:
+//
+// The following macros are made to avoid #ifdef around each method declarations
+// of the Macro Assembler, and they are also used as an hint on the location of
+// the implementations of each method. For example, the following declaration
+//
+// void Pop(FloatRegister t) DEFINED_ON(x86_shared, arm);
+//
+// suggests the MacroAssembler::Pop(FloatRegister) method is implemented in
+// x86-shared/MacroAssembler-x86-shared.h, and also in arm/MacroAssembler-arm.h.
+//
+// - If there is no annotation, then there is only one generic definition in
+// MacroAssembler.cpp.
+//
+// - If the declaration is "inline", then the method definition(s) would be in
+// the "-inl.h" variant of the same file(s).
+//
+// The script check_macroassembler_style.py (check-masm target of the Makefile)
+// is used to verify that method definitions are matching the annotation added
+// to the method declarations. If there is any difference, then you either
+// forgot to define the method in one of the macro assembler, or you forgot to
+// update the annotation of the macro assembler declaration.
+//
+// Some convenient short-cuts are used to avoid repeating the same list of
+// architectures on each method declaration, such as PER_ARCH and
+// PER_SHARED_ARCH.
+
+# define ALL_ARCH mips32, mips64, arm, arm64, x86, x64
+# define ALL_SHARED_ARCH arm, arm64, x86_shared, mips_shared
+
+// * How this macro works:
+//
+// DEFINED_ON is a macro which check if, for the current architecture, the
+// method is defined on the macro assembler or not.
+//
+// For each architecture, we have a macro named DEFINED_ON_arch. This macro is
+// empty if this is not the current architecture. Otherwise it must be either
+// set to "define" or "crash" (only use for the none target so-far).
+//
+// The DEFINED_ON macro maps the list of architecture names given as argument to
+// a list of macro names. For example,
+//
+// DEFINED_ON(arm, x86_shared)
+//
+// is expanded to
+//
+// DEFINED_ON_none DEFINED_ON_arm DEFINED_ON_x86_shared
+//
+// which are later expanded on ARM, x86, x64 by DEFINED_ON_EXPAND_ARCH_RESULTS
+// to
+//
+// define
+//
+// or if the JIT is disabled or set to no architecture to
+//
+// crash
+//
+// or to nothing, if the current architecture is not listed in the list of
+// arguments of DEFINED_ON. Note, only one of the DEFINED_ON_arch macro
+// contributes to the non-empty result, which is the macro of the current
+// architecture if it is listed in the arguments of DEFINED_ON.
+//
+// This result is appended to DEFINED_ON_RESULT_ before expanding the macro,
+// which result is either no annotation, a MOZ_CRASH(), or a "= delete"
+// annotation on the method declaration.
+
+# define DEFINED_ON_x86
+# define DEFINED_ON_x64
+# define DEFINED_ON_x86_shared
+# define DEFINED_ON_arm
+# define DEFINED_ON_arm64
+# define DEFINED_ON_mips32
+# define DEFINED_ON_mips64
+# define DEFINED_ON_mips_shared
+# define DEFINED_ON_none
+
+// Specialize for each architecture.
+#if defined(JS_CODEGEN_X86)
+# undef DEFINED_ON_x86
+# define DEFINED_ON_x86 define
+# undef DEFINED_ON_x86_shared
+# define DEFINED_ON_x86_shared define
+#elif defined(JS_CODEGEN_X64)
+# undef DEFINED_ON_x64
+# define DEFINED_ON_x64 define
+# undef DEFINED_ON_x86_shared
+# define DEFINED_ON_x86_shared define
+#elif defined(JS_CODEGEN_ARM)
+# undef DEFINED_ON_arm
+# define DEFINED_ON_arm define
+#elif defined(JS_CODEGEN_ARM64)
+# undef DEFINED_ON_arm64
+# define DEFINED_ON_arm64 define
+#elif defined(JS_CODEGEN_MIPS32)
+# undef DEFINED_ON_mips32
+# define DEFINED_ON_mips32 define
+# undef DEFINED_ON_mips_shared
+# define DEFINED_ON_mips_shared define
+#elif defined(JS_CODEGEN_MIPS64)
+# undef DEFINED_ON_mips64
+# define DEFINED_ON_mips64 define
+# undef DEFINED_ON_mips_shared
+# define DEFINED_ON_mips_shared define
+#elif defined(JS_CODEGEN_NONE)
+# undef DEFINED_ON_none
+# define DEFINED_ON_none crash
+#else
+# error "Unknown architecture!"
+#endif
+
+# define DEFINED_ON_RESULT_crash { MOZ_CRASH(); }
+# define DEFINED_ON_RESULT_define
+# define DEFINED_ON_RESULT_ = delete
+
+# define DEFINED_ON_DISPATCH_RESULT_2(Macro, Result) \
+ Macro ## Result
+# define DEFINED_ON_DISPATCH_RESULT(...) \
+ DEFINED_ON_DISPATCH_RESULT_2(DEFINED_ON_RESULT_, __VA_ARGS__)
+
+// We need to let the evaluation of MOZ_FOR_EACH terminates.
+# define DEFINED_ON_EXPAND_ARCH_RESULTS_3(ParenResult) \
+ DEFINED_ON_DISPATCH_RESULT ParenResult
+# define DEFINED_ON_EXPAND_ARCH_RESULTS_2(ParenResult) \
+ DEFINED_ON_EXPAND_ARCH_RESULTS_3 (ParenResult)
+# define DEFINED_ON_EXPAND_ARCH_RESULTS(ParenResult) \
+ DEFINED_ON_EXPAND_ARCH_RESULTS_2 (ParenResult)
+
+# define DEFINED_ON_FWDARCH(Arch) DEFINED_ON_ ## Arch
+# define DEFINED_ON_MAP_ON_ARCHS(ArchList) \
+ DEFINED_ON_EXPAND_ARCH_RESULTS( \
+ (MOZ_FOR_EACH(DEFINED_ON_FWDARCH, (), ArchList)))
+
+# define DEFINED_ON(...) \
+ DEFINED_ON_MAP_ON_ARCHS((none, __VA_ARGS__))
+
+# define PER_ARCH DEFINED_ON(ALL_ARCH)
+# define PER_SHARED_ARCH DEFINED_ON(ALL_SHARED_ARCH)
+
+
+#if MOZ_LITTLE_ENDIAN
+#define IMM32_16ADJ(X) X << 16
+#else
+#define IMM32_16ADJ(X) X
+#endif
+
+namespace js {
+namespace jit {
+
+// Defined in JitFrames.h
+enum ExitFrameTokenValues;
+
+// The public entrypoint for emitting assembly. Note that a MacroAssembler can
+// use cx->lifoAlloc, so take care not to interleave masm use with other
+// lifoAlloc use if one will be destroyed before the other.
+class MacroAssembler : public MacroAssemblerSpecific
+{
+ MacroAssembler* thisFromCtor() {
+ return this;
+ }
+
+ public:
+ class AutoRooter : public JS::AutoGCRooter
+ {
+ MacroAssembler* masm_;
+
+ public:
+ AutoRooter(JSContext* cx, MacroAssembler* masm)
+ : JS::AutoGCRooter(cx, IONMASM),
+ masm_(masm)
+ { }
+
+ MacroAssembler* masm() const {
+ return masm_;
+ }
+ };
+
+ /*
+ * Base class for creating a branch.
+ */
+ class Branch
+ {
+ bool init_;
+ Condition cond_;
+ Label* jump_;
+ Register reg_;
+
+ public:
+ Branch()
+ : init_(false),
+ cond_(Equal),
+ jump_(nullptr),
+ reg_(Register::FromCode(0)) // Quell compiler warnings.
+ { }
+
+ Branch(Condition cond, Register reg, Label* jump)
+ : init_(true),
+ cond_(cond),
+ jump_(jump),
+ reg_(reg)
+ { }
+
+ bool isInitialized() const {
+ return init_;
+ }
+
+ Condition cond() const {
+ return cond_;
+ }
+
+ Label* jump() const {
+ return jump_;
+ }
+
+ Register reg() const {
+ return reg_;
+ }
+
+ void invertCondition() {
+ cond_ = InvertCondition(cond_);
+ }
+
+ void relink(Label* jump) {
+ jump_ = jump;
+ }
+
+ virtual void emit(MacroAssembler& masm) = 0;
+ };
+
+ /*
+ * Creates a branch based on a specific TypeSet::Type.
+ * Note: emits number test (int/double) for TypeSet::DoubleType()
+ */
+ class BranchType : public Branch
+ {
+ TypeSet::Type type_;
+
+ public:
+ BranchType()
+ : Branch(),
+ type_(TypeSet::UnknownType())
+ { }
+
+ BranchType(Condition cond, Register reg, TypeSet::Type type, Label* jump)
+ : Branch(cond, reg, jump),
+ type_(type)
+ { }
+
+ void emit(MacroAssembler& masm);
+ };
+
+ /*
+ * Creates a branch based on a GCPtr.
+ */
+ class BranchGCPtr : public Branch
+ {
+ ImmGCPtr ptr_;
+
+ public:
+ BranchGCPtr()
+ : Branch(),
+ ptr_(ImmGCPtr(nullptr))
+ { }
+
+ BranchGCPtr(Condition cond, Register reg, ImmGCPtr ptr, Label* jump)
+ : Branch(cond, reg, jump),
+ ptr_(ptr)
+ { }
+
+ void emit(MacroAssembler& masm);
+ };
+
+ mozilla::Maybe<AutoRooter> autoRooter_;
+ mozilla::Maybe<JitContext> jitContext_;
+ mozilla::Maybe<AutoJitContextAlloc> alloc_;
+
+ private:
+ // Labels for handling exceptions and failures.
+ NonAssertingLabel failureLabel_;
+
+ public:
+ MacroAssembler()
+ : framePushed_(0),
+#ifdef DEBUG
+ inCall_(false),
+#endif
+ emitProfilingInstrumentation_(false)
+ {
+ JitContext* jcx = GetJitContext();
+ JSContext* cx = jcx->cx;
+ if (cx)
+ constructRoot(cx);
+
+ if (!jcx->temp) {
+ MOZ_ASSERT(cx);
+ alloc_.emplace(cx);
+ }
+
+ moveResolver_.setAllocator(*jcx->temp);
+
+#if defined(JS_CODEGEN_ARM)
+ initWithAllocator();
+ m_buffer.id = jcx->getNextAssemblerId();
+#elif defined(JS_CODEGEN_ARM64)
+ initWithAllocator();
+ armbuffer_.id = jcx->getNextAssemblerId();
+#endif
+ }
+
+ // This constructor should only be used when there is no JitContext active
+ // (for example, Trampoline-$(ARCH).cpp and IonCaches.cpp).
+ explicit MacroAssembler(JSContext* cx, IonScript* ion = nullptr,
+ JSScript* script = nullptr, jsbytecode* pc = nullptr);
+
+ // wasm compilation handles its own JitContext-pushing
+ struct WasmToken {};
+ explicit MacroAssembler(WasmToken, TempAllocator& alloc)
+ : framePushed_(0),
+#ifdef DEBUG
+ inCall_(false),
+#endif
+ emitProfilingInstrumentation_(false)
+ {
+ moveResolver_.setAllocator(alloc);
+
+#if defined(JS_CODEGEN_ARM)
+ initWithAllocator();
+ m_buffer.id = 0;
+#elif defined(JS_CODEGEN_ARM64)
+ initWithAllocator();
+ armbuffer_.id = 0;
+#endif
+ }
+
+ void constructRoot(JSContext* cx) {
+ autoRooter_.emplace(cx, this);
+ }
+
+ MoveResolver& moveResolver() {
+ return moveResolver_;
+ }
+
+ size_t instructionsSize() const {
+ return size();
+ }
+
+ //{{{ check_macroassembler_style
+ public:
+ // ===============================================================
+ // MacroAssembler high-level usage.
+
+ // Flushes the assembly buffer, on platforms that need it.
+ void flush() PER_SHARED_ARCH;
+
+ // Add a comment that is visible in the pretty printed assembly code.
+ void comment(const char* msg) PER_SHARED_ARCH;
+
+ // ===============================================================
+ // Frame manipulation functions.
+
+ inline uint32_t framePushed() const;
+ inline void setFramePushed(uint32_t framePushed);
+ inline void adjustFrame(int32_t value);
+
+ // Adjust the frame, to account for implicit modification of the stack
+ // pointer, such that callee can remove arguments on the behalf of the
+ // caller.
+ inline void implicitPop(uint32_t bytes);
+
+ private:
+ // This field is used to statically (at compilation time) emulate a frame
+ // pointer by keeping track of stack manipulations.
+ //
+ // It is maintained by all stack manipulation functions below.
+ uint32_t framePushed_;
+
+ public:
+ // ===============================================================
+ // Stack manipulation functions.
+
+ void PushRegsInMask(LiveRegisterSet set)
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
+ void PushRegsInMask(LiveGeneralRegisterSet set);
+
+ void PopRegsInMask(LiveRegisterSet set);
+ void PopRegsInMask(LiveGeneralRegisterSet set);
+ void PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
+
+ void Push(const Operand op) DEFINED_ON(x86_shared);
+ void Push(Register reg) PER_SHARED_ARCH;
+ void Push(Register reg1, Register reg2, Register reg3, Register reg4) DEFINED_ON(arm64);
+ void Push(const Imm32 imm) PER_SHARED_ARCH;
+ void Push(const ImmWord imm) PER_SHARED_ARCH;
+ void Push(const ImmPtr imm) PER_SHARED_ARCH;
+ void Push(const ImmGCPtr ptr) PER_SHARED_ARCH;
+ void Push(FloatRegister reg) PER_SHARED_ARCH;
+ void Push(jsid id, Register scratchReg);
+ void Push(TypedOrValueRegister v);
+ void Push(const ConstantOrRegister& v);
+ void Push(const ValueOperand& val);
+ void Push(const Value& val);
+ void Push(JSValueType type, Register reg);
+ void PushValue(const Address& addr);
+ void PushEmptyRooted(VMFunction::RootType rootType);
+ inline CodeOffset PushWithPatch(ImmWord word);
+ inline CodeOffset PushWithPatch(ImmPtr imm);
+
+ void Pop(const Operand op) DEFINED_ON(x86_shared);
+ void Pop(Register reg) PER_SHARED_ARCH;
+ void Pop(FloatRegister t) PER_SHARED_ARCH;
+ void Pop(const ValueOperand& val) PER_SHARED_ARCH;
+ void popRooted(VMFunction::RootType rootType, Register cellReg, const ValueOperand& valueReg);
+
+ // Move the stack pointer based on the requested amount.
+ void adjustStack(int amount);
+ void freeStack(uint32_t amount);
+
+ // Warning: This method does not update the framePushed() counter.
+ void freeStack(Register amount);
+
+ private:
+ // ===============================================================
+ // Register allocation fields.
+#ifdef DEBUG
+ friend AutoRegisterScope;
+ friend AutoFloatRegisterScope;
+ // Used to track register scopes for debug builds.
+ // Manipulated by the AutoGenericRegisterScope class.
+ AllocatableRegisterSet debugTrackedRegisters_;
+#endif // DEBUG
+
+ public:
+ // ===============================================================
+ // Simple call functions.
+
+ CodeOffset call(Register reg) PER_SHARED_ARCH;
+ CodeOffset call(Label* label) PER_SHARED_ARCH;
+ void call(const Address& addr) DEFINED_ON(x86_shared);
+ void call(ImmWord imm) PER_SHARED_ARCH;
+ // Call a target native function, which is neither traceable nor movable.
+ void call(ImmPtr imm) PER_SHARED_ARCH;
+ void call(wasm::SymbolicAddress imm) PER_SHARED_ARCH;
+ // Call a target JitCode, which must be traceable, and may be movable.
+ void call(JitCode* c) PER_SHARED_ARCH;
+
+ inline void call(const wasm::CallSiteDesc& desc, const Register reg);
+ inline void call(const wasm::CallSiteDesc& desc, uint32_t funcDefIndex);
+ inline void call(const wasm::CallSiteDesc& desc, wasm::Trap trap);
+
+ CodeOffset callWithPatch() PER_SHARED_ARCH;
+ void patchCall(uint32_t callerOffset, uint32_t calleeOffset) PER_SHARED_ARCH;
+
+ // Push the return address and make a call. On platforms where this function
+ // is not defined, push the link register (pushReturnAddress) at the entry
+ // point of the callee.
+ void callAndPushReturnAddress(Register reg) DEFINED_ON(x86_shared);
+ void callAndPushReturnAddress(Label* label) DEFINED_ON(x86_shared);
+
+ void pushReturnAddress() DEFINED_ON(mips_shared, arm, arm64);
+ void popReturnAddress() DEFINED_ON(mips_shared, arm, arm64);
+
+ public:
+ // ===============================================================
+ // Patchable near/far jumps.
+
+ // "Far jumps" provide the ability to jump to any uint32_t offset from any
+ // other uint32_t offset without using a constant pool (thus returning a
+ // simple CodeOffset instead of a CodeOffsetJump).
+ CodeOffset farJumpWithPatch() PER_SHARED_ARCH;
+ void patchFarJump(CodeOffset farJump, uint32_t targetOffset) PER_SHARED_ARCH;
+ static void repatchFarJump(uint8_t* code, uint32_t farJumpOffset, uint32_t targetOffset) PER_SHARED_ARCH;
+
+ // Emit a nop that can be patched to and from a nop and a jump with an int8
+ // relative displacement.
+ CodeOffset nopPatchableToNearJump() PER_SHARED_ARCH;
+ static void patchNopToNearJump(uint8_t* jump, uint8_t* target) PER_SHARED_ARCH;
+ static void patchNearJumpToNop(uint8_t* jump) PER_SHARED_ARCH;
+
+ public:
+ // ===============================================================
+ // ABI function calls.
+
+ // Setup a call to C/C++ code, given the assumption that the framePushed
+ // accruately define the state of the stack, and that the top of the stack
+ // was properly aligned. Note that this only supports cdecl.
+ void setupAlignedABICall(); // CRASH_ON(arm64)
+
+ // Setup an ABI call for when the alignment is not known. This may need a
+ // scratch register.
+ void setupUnalignedABICall(Register scratch) PER_ARCH;
+
+ // Arguments must be assigned to a C/C++ call in order. They are moved
+ // in parallel immediately before performing the call. This process may
+ // temporarily use more stack, in which case esp-relative addresses will be
+ // automatically adjusted. It is extremely important that esp-relative
+ // addresses are computed *after* setupABICall(). Furthermore, no
+ // operations should be emitted while setting arguments.
+ void passABIArg(const MoveOperand& from, MoveOp::Type type);
+ inline void passABIArg(Register reg);
+ inline void passABIArg(FloatRegister reg, MoveOp::Type type);
+
+ template <typename T>
+ inline void callWithABI(const T& fun, MoveOp::Type result = MoveOp::GENERAL);
+
+ private:
+ // Reinitialize the variables which have to be cleared before making a call
+ // with callWithABI.
+ void setupABICall();
+
+ // Reserve the stack and resolve the arguments move.
+ void callWithABIPre(uint32_t* stackAdjust, bool callFromWasm = false) PER_ARCH;
+
+ // Emits a call to a C/C++ function, resolving all argument moves.
+ void callWithABINoProfiler(void* fun, MoveOp::Type result);
+ void callWithABINoProfiler(wasm::SymbolicAddress imm, MoveOp::Type result);
+ void callWithABINoProfiler(Register fun, MoveOp::Type result) PER_ARCH;
+ void callWithABINoProfiler(const Address& fun, MoveOp::Type result) PER_ARCH;
+
+ // Restore the stack to its state before the setup function call.
+ void callWithABIPost(uint32_t stackAdjust, MoveOp::Type result) PER_ARCH;
+
+ // Create the signature to be able to decode the arguments of a native
+ // function, when calling a function within the simulator.
+ inline void appendSignatureType(MoveOp::Type type);
+ inline ABIFunctionType signature() const;
+
+ // Private variables used to handle moves between registers given as
+ // arguments to passABIArg and the list of ABI registers expected for the
+ // signature of the function.
+ MoveResolver moveResolver_;
+
+ // Architecture specific implementation which specify how registers & stack
+ // offsets are used for calling a function.
+ ABIArgGenerator abiArgs_;
+
+#ifdef DEBUG
+ // Flag use to assert that we use ABI function in the right context.
+ bool inCall_;
+#endif
+
+ // If set by setupUnalignedABICall then callWithABI will pop the stack
+ // register which is on the stack.
+ bool dynamicAlignment_;
+
+#ifdef JS_SIMULATOR
+ // The signature is used to accumulate all types of arguments which are used
+ // by the caller. This is used by the simulators to decode the arguments
+ // properly, and cast the function pointer to the right type.
+ uint32_t signature_;
+#endif
+
+ public:
+ // ===============================================================
+ // Jit Frames.
+ //
+ // These functions are used to build the content of the Jit frames. See
+ // CommonFrameLayout class, and all its derivatives. The content should be
+ // pushed in the opposite order as the fields of the structures, such that
+ // the structures can be used to interpret the content of the stack.
+
+ // Call the Jit function, and push the return address (or let the callee
+ // push the return address).
+ //
+ // These functions return the offset of the return address, in order to use
+ // the return address to index the safepoints, which are used to list all
+ // live registers.
+ inline uint32_t callJitNoProfiler(Register callee);
+ inline uint32_t callJit(Register callee);
+ inline uint32_t callJit(JitCode* code);
+
+ // The frame descriptor is the second field of all Jit frames, pushed before
+ // calling the Jit function. It is a composite value defined in JitFrames.h
+ inline void makeFrameDescriptor(Register frameSizeReg, FrameType type, uint32_t headerSize);
+
+ // Push the frame descriptor, based on the statically known framePushed.
+ inline void pushStaticFrameDescriptor(FrameType type, uint32_t headerSize);
+
+ // Push the callee token of a JSFunction which pointer is stored in the
+ // |callee| register. The callee token is packed with a |constructing| flag
+ // which correspond to the fact that the JS function is called with "new" or
+ // not.
+ inline void PushCalleeToken(Register callee, bool constructing);
+
+ // Unpack a callee token located at the |token| address, and return the
+ // JSFunction pointer in the |dest| register.
+ inline void loadFunctionFromCalleeToken(Address token, Register dest);
+
+ // This function emulates a call by pushing an exit frame on the stack,
+ // except that the fake-function is inlined within the body of the caller.
+ //
+ // This function assumes that the current frame is an IonJS frame.
+ //
+ // This function returns the offset of the /fake/ return address, in order to use
+ // the return address to index the safepoints, which are used to list all
+ // live registers.
+ //
+ // This function should be balanced with a call to adjustStack, to pop the
+ // exit frame and emulate the return statement of the inlined function.
+ inline uint32_t buildFakeExitFrame(Register scratch);
+
+ private:
+ // This function is used by buildFakeExitFrame to push a fake return address
+ // on the stack. This fake return address should never be used for resuming
+ // any execution, and can even be an invalid pointer into the instruction
+ // stream, as long as it does not alias any other.
+ uint32_t pushFakeReturnAddress(Register scratch) PER_SHARED_ARCH;
+
+ public:
+ // ===============================================================
+ // Exit frame footer.
+ //
+ // When calling outside the Jit we push an exit frame. To mark the stack
+ // correctly, we have to push additional information, called the Exit frame
+ // footer, which is used to identify how the stack is marked.
+ //
+ // See JitFrames.h, and MarkJitExitFrame in JitFrames.cpp.
+
+ // If the current piece of code might be garbage collected, then the exit
+ // frame footer must contain a pointer to the current JitCode, such that the
+ // garbage collector can keep the code alive as long this code is on the
+ // stack. This function pushes a placeholder which is replaced when the code
+ // is linked.
+ inline void PushStubCode();
+
+ // Return true if the code contains a self-reference which needs to be
+ // patched when the code is linked.
+ inline bool hasSelfReference() const;
+
+ // Push stub code and the VMFunction pointer.
+ inline void enterExitFrame(const VMFunction* f = nullptr);
+
+ // Push an exit frame token to identify which fake exit frame this footer
+ // corresponds to.
+ inline void enterFakeExitFrame(enum ExitFrameTokenValues token);
+
+ // Push an exit frame token for a native call.
+ inline void enterFakeExitFrameForNative(bool isConstructing);
+
+ // Pop ExitFrame footer in addition to the extra frame.
+ inline void leaveExitFrame(size_t extraFrame = 0);
+
+ private:
+ // Save the top of the stack into PerThreadData::jitTop of the main thread,
+ // which should be the location of the latest exit frame.
+ void linkExitFrame();
+
+ // Patch the value of PushStubCode with the pointer to the finalized code.
+ void linkSelfReference(JitCode* code);
+
+ // If the JitCode that created this assembler needs to transition into the VM,
+ // we want to store the JitCode on the stack in order to mark it during a GC.
+ // This is a reference to a patch location where the JitCode* will be written.
+ CodeOffset selfReferencePatch_;
+
+ public:
+ // ===============================================================
+ // Move instructions
+
+ inline void move64(Imm64 imm, Register64 dest) PER_ARCH;
+ inline void move64(Register64 src, Register64 dest) PER_ARCH;
+
+ inline void moveFloat32ToGPR(FloatRegister src, Register dest) PER_SHARED_ARCH;
+ inline void moveGPRToFloat32(Register src, FloatRegister dest) PER_SHARED_ARCH;
+
+ inline void move8SignExtend(Register src, Register dest) PER_SHARED_ARCH;
+ inline void move16SignExtend(Register src, Register dest) PER_SHARED_ARCH;
+
+ // ===============================================================
+ // Logical instructions
+
+ inline void not32(Register reg) PER_SHARED_ARCH;
+
+ inline void and32(Register src, Register dest) PER_SHARED_ARCH;
+ inline void and32(Imm32 imm, Register dest) PER_SHARED_ARCH;
+ inline void and32(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64);
+ inline void and32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
+ inline void and32(const Address& src, Register dest) PER_SHARED_ARCH;
+
+ inline void andPtr(Register src, Register dest) PER_ARCH;
+ inline void andPtr(Imm32 imm, Register dest) PER_ARCH;
+
+ inline void and64(Imm64 imm, Register64 dest) PER_ARCH;
+ inline void or64(Imm64 imm, Register64 dest) PER_ARCH;
+ inline void xor64(Imm64 imm, Register64 dest) PER_ARCH;
+
+ inline void or32(Register src, Register dest) PER_SHARED_ARCH;
+ inline void or32(Imm32 imm, Register dest) PER_SHARED_ARCH;
+ inline void or32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
+
+ inline void orPtr(Register src, Register dest) PER_ARCH;
+ inline void orPtr(Imm32 imm, Register dest) PER_ARCH;
+
+ inline void and64(Register64 src, Register64 dest) PER_ARCH;
+ inline void or64(Register64 src, Register64 dest) PER_ARCH;
+ inline void xor64(Register64 src, Register64 dest) PER_ARCH;
+
+ inline void xor32(Register src, Register dest) PER_SHARED_ARCH;
+ inline void xor32(Imm32 imm, Register dest) PER_SHARED_ARCH;
+
+ inline void xorPtr(Register src, Register dest) PER_ARCH;
+ inline void xorPtr(Imm32 imm, Register dest) PER_ARCH;
+
+ inline void and64(const Operand& src, Register64 dest) DEFINED_ON(x64, mips64);
+ inline void or64(const Operand& src, Register64 dest) DEFINED_ON(x64, mips64);
+ inline void xor64(const Operand& src, Register64 dest) DEFINED_ON(x64, mips64);
+
+ // ===============================================================
+ // Arithmetic functions
+
+ inline void add32(Register src, Register dest) PER_SHARED_ARCH;
+ inline void add32(Imm32 imm, Register dest) PER_SHARED_ARCH;
+ inline void add32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
+ inline void add32(Imm32 imm, const AbsoluteAddress& dest) DEFINED_ON(x86_shared);
+
+ inline void addPtr(Register src, Register dest) PER_ARCH;
+ inline void addPtr(Register src1, Register src2, Register dest) DEFINED_ON(arm64);
+ inline void addPtr(Imm32 imm, Register dest) PER_ARCH;
+ inline void addPtr(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64);
+ inline void addPtr(ImmWord imm, Register dest) PER_ARCH;
+ inline void addPtr(ImmPtr imm, Register dest);
+ inline void addPtr(Imm32 imm, const Address& dest) DEFINED_ON(mips_shared, arm, arm64, x86, x64);
+ inline void addPtr(Imm32 imm, const AbsoluteAddress& dest) DEFINED_ON(x86, x64);
+ inline void addPtr(const Address& src, Register dest) DEFINED_ON(mips_shared, arm, arm64, x86, x64);
+
+ inline void add64(Register64 src, Register64 dest) PER_ARCH;
+ inline void add64(Imm32 imm, Register64 dest) PER_ARCH;
+ inline void add64(Imm64 imm, Register64 dest) DEFINED_ON(x86, x64, arm, mips32, mips64);
+ inline void add64(const Operand& src, Register64 dest) DEFINED_ON(x64, mips64);
+
+ inline void addFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+
+ inline void addDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+ inline void addConstantDouble(double d, FloatRegister dest) DEFINED_ON(x86);
+
+ inline void sub32(const Address& src, Register dest) PER_SHARED_ARCH;
+ inline void sub32(Register src, Register dest) PER_SHARED_ARCH;
+ inline void sub32(Imm32 imm, Register dest) PER_SHARED_ARCH;
+
+ inline void subPtr(Register src, Register dest) PER_ARCH;
+ inline void subPtr(Register src, const Address& dest) DEFINED_ON(mips_shared, arm, arm64, x86, x64);
+ inline void subPtr(Imm32 imm, Register dest) PER_ARCH;
+ inline void subPtr(ImmWord imm, Register dest) DEFINED_ON(x64);
+ inline void subPtr(const Address& addr, Register dest) DEFINED_ON(mips_shared, arm, arm64, x86, x64);
+
+ inline void sub64(Register64 src, Register64 dest) PER_ARCH;
+ inline void sub64(Imm64 imm, Register64 dest) DEFINED_ON(x86, x64, arm, mips32, mips64);
+ inline void sub64(const Operand& src, Register64 dest) DEFINED_ON(x64, mips64);
+
+ inline void subFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+
+ inline void subDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+
+ // On x86-shared, srcDest must be eax and edx will be clobbered.
+ inline void mul32(Register rhs, Register srcDest) PER_SHARED_ARCH;
+
+ inline void mul32(Register src1, Register src2, Register dest, Label* onOver, Label* onZero) DEFINED_ON(arm64);
+
+ inline void mul64(const Operand& src, const Register64& dest) DEFINED_ON(x64);
+ inline void mul64(const Operand& src, const Register64& dest, const Register temp)
+ DEFINED_ON(x64, mips64);
+ inline void mul64(Imm64 imm, const Register64& dest) PER_ARCH;
+ inline void mul64(Imm64 imm, const Register64& dest, const Register temp)
+ DEFINED_ON(x86, x64, arm, mips32, mips64);
+ inline void mul64(const Register64& src, const Register64& dest, const Register temp)
+ PER_ARCH;
+
+ inline void mulBy3(Register src, Register dest) PER_ARCH;
+
+ inline void mulFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+ inline void mulDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+
+ inline void mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest) DEFINED_ON(mips_shared, arm, arm64, x86, x64);
+
+ // Perform an integer division, returning the integer part rounded toward zero.
+ // rhs must not be zero, and the division must not overflow.
+ //
+ // On x86_shared, srcDest must be eax and edx will be clobbered.
+ // On ARM, the chip must have hardware division instructions.
+ inline void quotient32(Register rhs, Register srcDest, bool isUnsigned) PER_SHARED_ARCH;
+
+ // Perform an integer division, returning the remainder part.
+ // rhs must not be zero, and the division must not overflow.
+ //
+ // On x86_shared, srcDest must be eax and edx will be clobbered.
+ // On ARM, the chip must have hardware division instructions.
+ inline void remainder32(Register rhs, Register srcDest, bool isUnsigned) PER_SHARED_ARCH;
+
+ inline void divFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+ inline void divDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+
+ inline void inc32(RegisterOrInt32Constant* key);
+ inline void inc64(AbsoluteAddress dest) PER_ARCH;
+
+ inline void dec32(RegisterOrInt32Constant* key);
+
+ inline void neg32(Register reg) PER_SHARED_ARCH;
+ inline void neg64(Register64 reg) DEFINED_ON(x86, x64, arm, mips32, mips64);
+
+ inline void negateFloat(FloatRegister reg) PER_SHARED_ARCH;
+
+ inline void negateDouble(FloatRegister reg) PER_SHARED_ARCH;
+
+ inline void absFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+ inline void absDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+
+ inline void sqrtFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+ inline void sqrtDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+
+ // srcDest = {min,max}{Float32,Double}(srcDest, other)
+ // For min and max, handle NaN specially if handleNaN is true.
+
+ inline void minFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN) PER_SHARED_ARCH;
+ inline void minDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN) PER_SHARED_ARCH;
+
+ inline void maxFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN) PER_SHARED_ARCH;
+ inline void maxDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN) PER_SHARED_ARCH;
+
+ // ===============================================================
+ // Shift functions
+
+ // For shift-by-register there may be platform-specific
+ // variations, for example, x86 will perform the shift mod 32 but
+ // ARM will perform the shift mod 256.
+ //
+ // For shift-by-immediate the platform assembler may restrict the
+ // immediate, for example, the ARM assembler requires the count
+ // for 32-bit shifts to be in the range [0,31].
+
+ inline void lshift32(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
+ inline void rshift32(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
+ inline void rshift32Arithmetic(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
+
+ inline void lshiftPtr(Imm32 imm, Register dest) PER_ARCH;
+ inline void rshiftPtr(Imm32 imm, Register dest) PER_ARCH;
+ inline void rshiftPtr(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64);
+ inline void rshiftPtrArithmetic(Imm32 imm, Register dest) PER_ARCH;
+
+ inline void lshift64(Imm32 imm, Register64 dest) PER_ARCH;
+ inline void rshift64(Imm32 imm, Register64 dest) PER_ARCH;
+ inline void rshift64Arithmetic(Imm32 imm, Register64 dest) PER_ARCH;
+
+ // On x86_shared these have the constraint that shift must be in CL.
+ inline void lshift32(Register shift, Register srcDest) PER_SHARED_ARCH;
+ inline void rshift32(Register shift, Register srcDest) PER_SHARED_ARCH;
+ inline void rshift32Arithmetic(Register shift, Register srcDest) PER_SHARED_ARCH;
+
+ inline void lshift64(Register shift, Register64 srcDest) PER_ARCH;
+ inline void rshift64(Register shift, Register64 srcDest) PER_ARCH;
+ inline void rshift64Arithmetic(Register shift, Register64 srcDest) PER_ARCH;
+
+ // ===============================================================
+ // Rotation functions
+ // Note: - on x86 and x64 the count register must be in CL.
+ // - on x64 the temp register should be InvalidReg.
+
+ inline void rotateLeft(Imm32 count, Register input, Register dest) PER_SHARED_ARCH;
+ inline void rotateLeft(Register count, Register input, Register dest) PER_SHARED_ARCH;
+ inline void rotateLeft64(Imm32 count, Register64 input, Register64 dest) DEFINED_ON(x64);
+ inline void rotateLeft64(Register count, Register64 input, Register64 dest) DEFINED_ON(x64);
+ inline void rotateLeft64(Imm32 count, Register64 input, Register64 dest, Register temp)
+ DEFINED_ON(x86, x64, arm, mips32, mips64);
+ inline void rotateLeft64(Register count, Register64 input, Register64 dest, Register temp)
+ PER_ARCH;
+
+ inline void rotateRight(Imm32 count, Register input, Register dest) PER_SHARED_ARCH;
+ inline void rotateRight(Register count, Register input, Register dest) PER_SHARED_ARCH;
+ inline void rotateRight64(Imm32 count, Register64 input, Register64 dest) DEFINED_ON(x64);
+ inline void rotateRight64(Register count, Register64 input, Register64 dest) DEFINED_ON(x64);
+ inline void rotateRight64(Imm32 count, Register64 input, Register64 dest, Register temp)
+ DEFINED_ON(x86, x64, arm, mips32, mips64);
+ inline void rotateRight64(Register count, Register64 input, Register64 dest, Register temp)
+ PER_ARCH;
+
+ // ===============================================================
+ // Bit counting functions
+
+ // knownNotZero may be true only if the src is known not to be zero.
+ inline void clz32(Register src, Register dest, bool knownNotZero) PER_SHARED_ARCH;
+ inline void ctz32(Register src, Register dest, bool knownNotZero) PER_SHARED_ARCH;
+
+ inline void clz64(Register64 src, Register dest) PER_ARCH;
+ inline void ctz64(Register64 src, Register dest) PER_ARCH;
+
+ // On x86_shared, temp may be Invalid only if the chip has the POPCNT instruction.
+ // On ARM, temp may never be Invalid.
+ inline void popcnt32(Register src, Register dest, Register temp) PER_SHARED_ARCH;
+
+ // temp may be invalid only if the chip has the POPCNT instruction.
+ inline void popcnt64(Register64 src, Register64 dest, Register temp) PER_ARCH;
+
+ // ===============================================================
+ // Condition functions
+
+ template <typename T1, typename T2>
+ inline void cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest)
+ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
+
+ template <typename T1, typename T2>
+ inline void cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest)
+ PER_ARCH;
+
+ // ===============================================================
+ // Branch functions
+
+ template <class L>
+ inline void branch32(Condition cond, Register lhs, Register rhs, L label) PER_SHARED_ARCH;
+ template <class L>
+ inline void branch32(Condition cond, Register lhs, Imm32 rhs, L label) PER_SHARED_ARCH;
+ inline void branch32(Condition cond, Register length, const RegisterOrInt32Constant& key,
+ Label* label);
+
+ inline void branch32(Condition cond, const Address& lhs, Register rhs, Label* label) PER_SHARED_ARCH;
+ inline void branch32(Condition cond, const Address& lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
+ inline void branch32(Condition cond, const Address& length, const RegisterOrInt32Constant& key,
+ Label* label);
+
+ inline void branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64);
+ inline void branch32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64);
+
+ inline void branch32(Condition cond, const BaseIndex& lhs, Register rhs, Label* label)
+ DEFINED_ON(x86_shared);
+ inline void branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
+
+ inline void branch32(Condition cond, const Operand& lhs, Register rhs, Label* label) DEFINED_ON(x86_shared);
+ inline void branch32(Condition cond, const Operand& lhs, Imm32 rhs, Label* label) DEFINED_ON(x86_shared);
+
+ inline void branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, Label* label)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64);
+
+ // The supported condition are Equal, NotEqual, LessThan(orEqual), GreaterThan(orEqual),
+ // Below(orEqual) and Above(orEqual).
+ // When a fail label is not defined it will fall through to next instruction,
+ // else jump to the fail label.
+ inline void branch64(Condition cond, Register64 lhs, Imm64 val, Label* success,
+ Label* fail = nullptr) PER_ARCH;
+ inline void branch64(Condition cond, Register64 lhs, Register64 rhs, Label* success,
+ Label* fail = nullptr) DEFINED_ON(x86, x64, arm, mips32, mips64);
+ // On x86 and x64 NotEqual and Equal conditions are allowed for the branch64 variants
+ // with Address as lhs. On others only the NotEqual condition.
+ inline void branch64(Condition cond, const Address& lhs, Imm64 val, Label* label) PER_ARCH;
+
+ // Compare the value at |lhs| with the value at |rhs|. The scratch
+ // register *must not* be the base of |lhs| or |rhs|.
+ inline void branch64(Condition cond, const Address& lhs, const Address& rhs, Register scratch,
+ Label* label) PER_ARCH;
+
+ template <class L>
+ inline void branchPtr(Condition cond, Register lhs, Register rhs, L label) PER_SHARED_ARCH;
+ inline void branchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
+ inline void branchPtr(Condition cond, Register lhs, ImmPtr rhs, Label* label) PER_SHARED_ARCH;
+ inline void branchPtr(Condition cond, Register lhs, ImmGCPtr rhs, Label* label) PER_SHARED_ARCH;
+ inline void branchPtr(Condition cond, Register lhs, ImmWord rhs, Label* label) PER_SHARED_ARCH;
+
+ template <class L>
+ inline void branchPtr(Condition cond, const Address& lhs, Register rhs, L label) PER_SHARED_ARCH;
+ inline void branchPtr(Condition cond, const Address& lhs, ImmPtr rhs, Label* label) PER_SHARED_ARCH;
+ inline void branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs, Label* label) PER_SHARED_ARCH;
+ inline void branchPtr(Condition cond, const Address& lhs, ImmWord rhs, Label* label) PER_SHARED_ARCH;
+
+ inline void branchPtr(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64);
+ inline void branchPtr(Condition cond, const AbsoluteAddress& lhs, ImmWord rhs, Label* label)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64);
+
+ inline void branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs, Label* label)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64);
+
+ template <typename T>
+ inline CodeOffsetJump branchPtrWithPatch(Condition cond, Register lhs, T rhs, RepatchLabel* label) PER_SHARED_ARCH;
+ template <typename T>
+ inline CodeOffsetJump branchPtrWithPatch(Condition cond, Address lhs, T rhs, RepatchLabel* label) PER_SHARED_ARCH;
+
+ void branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp, Label* label)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64);
+ void branchPtrInNurseryChunk(Condition cond, const Address& address, Register temp, Label* label)
+ DEFINED_ON(x86);
+ void branchValueIsNurseryObject(Condition cond, const Address& address, Register temp, Label* label) PER_ARCH;
+ void branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp, Label* label) PER_ARCH;
+
+ // This function compares a Value (lhs) which is having a private pointer
+ // boxed inside a js::Value, with a raw pointer (rhs).
+ inline void branchPrivatePtr(Condition cond, const Address& lhs, Register rhs, Label* label) PER_ARCH;
+
+ inline void branchFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
+ Label* label) PER_SHARED_ARCH;
+
+ // Truncate a double/float32 to int32 and when it doesn't fit an int32 it will jump to
+ // the failure label. This particular variant is allowed to return the value module 2**32,
+ // which isn't implemented on all architectures.
+ // E.g. the x64 variants will do this only in the int64_t range.
+ inline void branchTruncateFloat32MaybeModUint32(FloatRegister src, Register dest, Label* fail)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64);
+ inline void branchTruncateDoubleMaybeModUint32(FloatRegister src, Register dest, Label* fail)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64);
+
+ // Truncate a double/float32 to intptr and when it doesn't fit jump to the failure label.
+ inline void branchTruncateFloat32ToPtr(FloatRegister src, Register dest, Label* fail)
+ DEFINED_ON(x86, x64);
+ inline void branchTruncateDoubleToPtr(FloatRegister src, Register dest, Label* fail)
+ DEFINED_ON(x86, x64);
+
+ // Truncate a double/float32 to int32 and when it doesn't fit jump to the failure label.
+ inline void branchTruncateFloat32ToInt32(FloatRegister src, Register dest, Label* fail)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64);
+ inline void branchTruncateDoubleToInt32(FloatRegister src, Register dest, Label* fail)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64);
+
+ inline void branchDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
+ Label* label) PER_SHARED_ARCH;
+
+ inline void branchDoubleNotInInt64Range(Address src, Register temp, Label* fail);
+ inline void branchDoubleNotInUInt64Range(Address src, Register temp, Label* fail);
+ inline void branchFloat32NotInInt64Range(Address src, Register temp, Label* fail);
+ inline void branchFloat32NotInUInt64Range(Address src, Register temp, Label* fail);
+
+ template <typename T, typename L>
+ inline void branchAdd32(Condition cond, T src, Register dest, L label) PER_SHARED_ARCH;
+ template <typename T>
+ inline void branchSub32(Condition cond, T src, Register dest, Label* label) PER_SHARED_ARCH;
+
+ inline void decBranchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
+
+ template <class L>
+ inline void branchTest32(Condition cond, Register lhs, Register rhs, L label) PER_SHARED_ARCH;
+ template <class L>
+ inline void branchTest32(Condition cond, Register lhs, Imm32 rhs, L label) PER_SHARED_ARCH;
+ inline void branchTest32(Condition cond, const Address& lhs, Imm32 rhh, Label* label) PER_SHARED_ARCH;
+ inline void branchTest32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64);
+
+ template <class L>
+ inline void branchTestPtr(Condition cond, Register lhs, Register rhs, L label) PER_SHARED_ARCH;
+ inline void branchTestPtr(Condition cond, Register lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
+ inline void branchTestPtr(Condition cond, const Address& lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
+
+ template <class L>
+ inline void branchTest64(Condition cond, Register64 lhs, Register64 rhs, Register temp,
+ L label) PER_ARCH;
+
+ // Branches to |label| if |reg| is false. |reg| should be a C++ bool.
+ template <class L>
+ inline void branchIfFalseBool(Register reg, L label);
+
+ // Branches to |label| if |reg| is true. |reg| should be a C++ bool.
+ inline void branchIfTrueBool(Register reg, Label* label);
+
+ inline void branchIfRope(Register str, Label* label);
+ inline void branchIfRopeOrExternal(Register str, Register temp, Label* label);
+
+ inline void branchLatin1String(Register string, Label* label);
+ inline void branchTwoByteString(Register string, Label* label);
+
+ inline void branchIfFunctionHasNoScript(Register fun, Label* label);
+ inline void branchIfInterpreted(Register fun, Label* label);
+
+ inline void branchFunctionKind(Condition cond, JSFunction::FunctionKind kind, Register fun,
+ Register scratch, Label* label);
+
+ void branchIfNotInterpretedConstructor(Register fun, Register scratch, Label* label);
+
+ inline void branchTestObjClass(Condition cond, Register obj, Register scratch, const js::Class* clasp,
+ Label* label);
+ inline void branchTestObjShape(Condition cond, Register obj, const Shape* shape, Label* label);
+ inline void branchTestObjShape(Condition cond, Register obj, Register shape, Label* label);
+ inline void branchTestObjGroup(Condition cond, Register obj, ObjectGroup* group, Label* label);
+ inline void branchTestObjGroup(Condition cond, Register obj, Register group, Label* label);
+
+ inline void branchTestObjectTruthy(bool truthy, Register objReg, Register scratch,
+ Label* slowCheck, Label* checked);
+
+ inline void branchTestClassIsProxy(bool proxy, Register clasp, Label* label);
+
+ inline void branchTestObjectIsProxy(bool proxy, Register object, Register scratch, Label* label);
+
+ inline void branchTestProxyHandlerFamily(Condition cond, Register proxy, Register scratch,
+ const void* handlerp, Label* label);
+
+ template <typename Value>
+ inline void branchTestMIRType(Condition cond, const Value& val, MIRType type, Label* label);
+
+ // Emit type case branch on tag matching if the type tag in the definition
+ // might actually be that type.
+ void maybeBranchTestType(MIRType type, MDefinition* maybeDef, Register tag, Label* label);
+
+ inline void branchTestNeedsIncrementalBarrier(Condition cond, Label* label);
+
+ // Perform a type-test on a tag of a Value (32bits boxing), or the tagged
+ // value (64bits boxing).
+ inline void branchTestUndefined(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
+ inline void branchTestInt32(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
+ inline void branchTestDouble(Condition cond, Register tag, Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
+ inline void branchTestNumber(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
+ inline void branchTestBoolean(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
+ inline void branchTestString(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
+ inline void branchTestSymbol(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
+ inline void branchTestNull(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
+ inline void branchTestObject(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
+ inline void branchTestPrimitive(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
+ inline void branchTestMagic(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
+
+ // Perform a type-test on a Value, addressed by Address or BaseIndex, or
+ // loaded into ValueOperand.
+ // BaseIndex and ValueOperand variants clobber the ScratchReg on x64.
+ // All Variants clobber the ScratchReg on arm64.
+ inline void branchTestUndefined(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
+ inline void branchTestUndefined(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
+ inline void branchTestUndefined(Condition cond, const ValueOperand& value, Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
+
+ inline void branchTestInt32(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
+ inline void branchTestInt32(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
+ inline void branchTestInt32(Condition cond, const ValueOperand& value, Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
+
+ inline void branchTestDouble(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
+ inline void branchTestDouble(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
+ inline void branchTestDouble(Condition cond, const ValueOperand& value, Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
+
+ inline void branchTestNumber(Condition cond, const ValueOperand& value, Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
+
+ inline void branchTestBoolean(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
+ inline void branchTestBoolean(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
+ inline void branchTestBoolean(Condition cond, const ValueOperand& value, Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
+
+ inline void branchTestString(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
+ inline void branchTestString(Condition cond, const ValueOperand& value, Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
+
+ inline void branchTestSymbol(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
+ inline void branchTestSymbol(Condition cond, const ValueOperand& value, Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
+
+ inline void branchTestNull(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
+ inline void branchTestNull(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
+ inline void branchTestNull(Condition cond, const ValueOperand& value, Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
+
+ // Clobbers the ScratchReg on x64.
+ inline void branchTestObject(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
+ inline void branchTestObject(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
+ inline void branchTestObject(Condition cond, const ValueOperand& value, Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
+
+ inline void branchTestGCThing(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
+ inline void branchTestGCThing(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
+
+ inline void branchTestPrimitive(Condition cond, const ValueOperand& value, Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
+
+ inline void branchTestMagic(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
+ inline void branchTestMagic(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
+ template <class L>
+ inline void branchTestMagic(Condition cond, const ValueOperand& value, L label)
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
+
+ inline void branchTestMagic(Condition cond, const Address& valaddr, JSWhyMagic why, Label* label) PER_ARCH;
+
+ inline void branchTestMagicValue(Condition cond, const ValueOperand& val, JSWhyMagic why,
+ Label* label);
+
+ void branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label) PER_ARCH;
+
+ // Checks if given Value is evaluated to true or false in a condition.
+ // The type of the value should match the type of the method.
+ inline void branchTestInt32Truthy(bool truthy, const ValueOperand& value, Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
+ inline void branchTestDoubleTruthy(bool truthy, FloatRegister reg, Label* label) PER_SHARED_ARCH;
+ inline void branchTestBooleanTruthy(bool truthy, const ValueOperand& value, Label* label) PER_ARCH;
+ inline void branchTestStringTruthy(bool truthy, const ValueOperand& value, Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
+
+ private:
+
+ // Implementation for branch* methods.
+ template <typename T>
+ inline void branch32Impl(Condition cond, const T& length, const RegisterOrInt32Constant& key,
+ Label* label);
+
+ template <typename T, typename S, typename L>
+ inline void branchPtrImpl(Condition cond, const T& lhs, const S& rhs, L label)
+ DEFINED_ON(x86_shared);
+
+ void branchPtrInNurseryChunkImpl(Condition cond, Register ptr, Label* label)
+ DEFINED_ON(x86);
+ template <typename T>
+ void branchValueIsNurseryObjectImpl(Condition cond, const T& value, Register temp, Label* label)
+ DEFINED_ON(arm64, mips64, x64);
+
+ template <typename T>
+ inline void branchTestUndefinedImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestInt32Impl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestDoubleImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestNumberImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestBooleanImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestStringImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestSymbolImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestNullImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestObjectImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestGCThingImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestPrimitiveImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T, class L>
+ inline void branchTestMagicImpl(Condition cond, const T& t, L label)
+ DEFINED_ON(arm, arm64, x86_shared);
+
+ public:
+ // ========================================================================
+ // Canonicalization primitives.
+ inline void canonicalizeDouble(FloatRegister reg);
+ inline void canonicalizeDoubleIfDeterministic(FloatRegister reg);
+
+ inline void canonicalizeFloat(FloatRegister reg);
+ inline void canonicalizeFloatIfDeterministic(FloatRegister reg);
+
+ inline void canonicalizeFloat32x4(FloatRegister reg, FloatRegister scratch)
+ DEFINED_ON(x86_shared);
+
+ public:
+ // ========================================================================
+ // Memory access primitives.
+ inline void storeUncanonicalizedDouble(FloatRegister src, const Address& dest)
+ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
+ inline void storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& dest)
+ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
+ inline void storeUncanonicalizedDouble(FloatRegister src, const Operand& dest)
+ DEFINED_ON(x86_shared);
+
+ template<class T>
+ inline void storeDouble(FloatRegister src, const T& dest);
+
+ inline void storeUncanonicalizedFloat32(FloatRegister src, const Address& dest)
+ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
+ inline void storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& dest)
+ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
+ inline void storeUncanonicalizedFloat32(FloatRegister src, const Operand& dest)
+ DEFINED_ON(x86_shared);
+
+ template<class T>
+ inline void storeFloat32(FloatRegister src, const T& dest);
+
+ inline void storeFloat32x3(FloatRegister src, const Address& dest) PER_SHARED_ARCH;
+ inline void storeFloat32x3(FloatRegister src, const BaseIndex& dest) PER_SHARED_ARCH;
+
+ template <typename T>
+ void storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType, const T& dest,
+ MIRType slotType) PER_ARCH;
+
+ inline void memoryBarrier(MemoryBarrierBits barrier) PER_SHARED_ARCH;
+
+ public:
+ // ========================================================================
+ // Truncate floating point.
+
+ // Undefined behaviour when truncation is outside Int64 range.
+ // Needs a temp register if SSE3 is not present.
+ inline void truncateFloat32ToInt64(Address src, Address dest, Register temp)
+ DEFINED_ON(x86_shared);
+ inline void truncateFloat32ToUInt64(Address src, Address dest, Register temp,
+ FloatRegister floatTemp)
+ DEFINED_ON(x86, x64);
+ inline void truncateDoubleToInt64(Address src, Address dest, Register temp)
+ DEFINED_ON(x86_shared);
+ inline void truncateDoubleToUInt64(Address src, Address dest, Register temp,
+ FloatRegister floatTemp)
+ DEFINED_ON(x86, x64);
+
+ public:
+ // ========================================================================
+ // wasm support
+
+ // Emit a bounds check against the (dynamically-patched) wasm bounds check
+ // limit, jumping to 'label' if 'cond' holds.
+ template <class L>
+ inline void wasmBoundsCheck(Condition cond, Register index, L label) PER_ARCH;
+
+ // Called after compilation completes to patch the given limit into the
+ // given instruction's immediate.
+ static inline void wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit) PER_ARCH;
+
+ // On x86, each instruction adds its own wasm::MemoryAccess's to the
+ // wasm::MemoryAccessVector (there can be multiple when i64 is involved).
+ // On x64, only some asm.js accesses need a wasm::MemoryAccess so the caller
+ // is responsible for doing this instead.
+ void wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr, AnyRegister out) DEFINED_ON(x86, x64);
+ void wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr, Register64 out) DEFINED_ON(x86, x64);
+ void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Operand dstAddr) DEFINED_ON(x86, x64);
+ void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Operand dstAddr) DEFINED_ON(x86);
+
+ // wasm specific methods, used in both the wasm baseline compiler and ion.
+ void wasmTruncateDoubleToUInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86, x64, arm);
+ void wasmTruncateDoubleToInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86_shared, arm);
+ void outOfLineWasmTruncateDoubleToInt32(FloatRegister input, bool isUnsigned, wasm::TrapOffset off, Label* rejoin) DEFINED_ON(x86_shared);
+
+ void wasmTruncateFloat32ToUInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86, x64, arm);
+ void wasmTruncateFloat32ToInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86_shared, arm);
+ void outOfLineWasmTruncateFloat32ToInt32(FloatRegister input, bool isUnsigned, wasm::TrapOffset off, Label* rejoin) DEFINED_ON(x86_shared);
+
+ void outOfLineWasmTruncateDoubleToInt64(FloatRegister input, bool isUnsigned, wasm::TrapOffset off, Label* rejoin) DEFINED_ON(x86_shared);
+ void outOfLineWasmTruncateFloat32ToInt64(FloatRegister input, bool isUnsigned, wasm::TrapOffset off, Label* rejoin) DEFINED_ON(x86_shared);
+
+ // This function takes care of loading the callee's TLS and pinned regs but
+ // it is the caller's responsibility to save/restore TLS or pinned regs.
+ void wasmCallImport(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee);
+
+ // WasmTableCallIndexReg must contain the index of the indirect call.
+ void wasmCallIndirect(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee);
+
+ // This function takes care of loading the pointer to the current instance
+ // as the implicit first argument. It preserves TLS and pinned registers.
+ // (TLS & pinned regs are non-volatile registers in the system ABI).
+ void wasmCallBuiltinInstanceMethod(const ABIArg& instanceArg,
+ wasm::SymbolicAddress builtin);
+
+ // Emit the out-of-line trap code to which trapping jumps/branches are
+ // bound. This should be called once per function after all other codegen,
+ // including "normal" OutOfLineCode.
+ void wasmEmitTrapOutOfLineCode();
+
+ public:
+ // ========================================================================
+ // Clamping functions.
+
+ inline void clampIntToUint8(Register reg) PER_SHARED_ARCH;
+
+ //}}} check_macroassembler_style
+ public:
+
+ // Emits a test of a value against all types in a TypeSet. A scratch
+ // register is required.
+ template <typename Source>
+ void guardTypeSet(const Source& address, const TypeSet* types, BarrierKind kind, Register scratch, Label* miss);
+
+ void guardObjectType(Register obj, const TypeSet* types, Register scratch, Label* miss);
+
+ template <typename TypeSet>
+ void guardTypeSetMightBeIncomplete(TypeSet* types, Register obj, Register scratch, Label* label);
+
+ void loadObjShape(Register objReg, Register dest) {
+ loadPtr(Address(objReg, ShapedObject::offsetOfShape()), dest);
+ }
+ void loadObjGroup(Register objReg, Register dest) {
+ loadPtr(Address(objReg, JSObject::offsetOfGroup()), dest);
+ }
+ void loadBaseShape(Register objReg, Register dest) {
+ loadObjShape(objReg, dest);
+ loadPtr(Address(dest, Shape::offsetOfBase()), dest);
+ }
+ void loadObjClass(Register objReg, Register dest) {
+ loadObjGroup(objReg, dest);
+ loadPtr(Address(dest, ObjectGroup::offsetOfClasp()), dest);
+ }
+
+ void loadObjPrivate(Register obj, uint32_t nfixed, Register dest) {
+ loadPtr(Address(obj, NativeObject::getPrivateDataOffset(nfixed)), dest);
+ }
+
+ void loadObjProto(Register obj, Register dest) {
+ loadPtr(Address(obj, JSObject::offsetOfGroup()), dest);
+ loadPtr(Address(dest, ObjectGroup::offsetOfProto()), dest);
+ }
+
+ void loadStringLength(Register str, Register dest) {
+ load32(Address(str, JSString::offsetOfLength()), dest);
+ }
+
+ void loadStringChars(Register str, Register dest);
+ void loadStringChar(Register str, Register index, Register output);
+
+ void loadJSContext(Register dest) {
+ movePtr(ImmPtr(GetJitContext()->runtime->getJSContext()), dest);
+ }
+ void loadJitActivation(Register dest) {
+ loadPtr(AbsoluteAddress(GetJitContext()->runtime->addressOfActivation()), dest);
+ }
+ void loadWasmActivationFromTls(Register dest) {
+ loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, cx)), dest);
+ loadPtr(Address(dest, JSContext::offsetOfWasmActivation()), dest);
+ }
+ void loadWasmActivationFromSymbolicAddress(Register dest) {
+ movePtr(wasm::SymbolicAddress::Context, dest);
+ loadPtr(Address(dest, JSContext::offsetOfWasmActivation()), dest);
+ }
+
+ template<typename T>
+ void loadTypedOrValue(const T& src, TypedOrValueRegister dest) {
+ if (dest.hasValue())
+ loadValue(src, dest.valueReg());
+ else
+ loadUnboxedValue(src, dest.type(), dest.typedReg());
+ }
+
+ template<typename T>
+ void loadElementTypedOrValue(const T& src, TypedOrValueRegister dest, bool holeCheck,
+ Label* hole) {
+ if (dest.hasValue()) {
+ loadValue(src, dest.valueReg());
+ if (holeCheck)
+ branchTestMagic(Assembler::Equal, dest.valueReg(), hole);
+ } else {
+ if (holeCheck)
+ branchTestMagic(Assembler::Equal, src, hole);
+ loadUnboxedValue(src, dest.type(), dest.typedReg());
+ }
+ }
+
+ template <typename T>
+ void storeTypedOrValue(TypedOrValueRegister src, const T& dest) {
+ if (src.hasValue()) {
+ storeValue(src.valueReg(), dest);
+ } else if (IsFloatingPointType(src.type())) {
+ FloatRegister reg = src.typedReg().fpu();
+ if (src.type() == MIRType::Float32) {
+ convertFloat32ToDouble(reg, ScratchDoubleReg);
+ reg = ScratchDoubleReg;
+ }
+ storeDouble(reg, dest);
+ } else {
+ storeValue(ValueTypeFromMIRType(src.type()), src.typedReg().gpr(), dest);
+ }
+ }
+
+ template <typename T>
+ inline void storeObjectOrNull(Register src, const T& dest);
+
+ template <typename T>
+ void storeConstantOrRegister(const ConstantOrRegister& src, const T& dest) {
+ if (src.constant())
+ storeValue(src.value(), dest);
+ else
+ storeTypedOrValue(src.reg(), dest);
+ }
+
+ void storeCallPointerResult(Register reg) {
+ if (reg != ReturnReg)
+ mov(ReturnReg, reg);
+ }
+
+ inline void storeCallBoolResult(Register reg);
+ inline void storeCallInt32Result(Register reg);
+
+ void storeCallFloatResult(FloatRegister reg) {
+ if (reg != ReturnDoubleReg)
+ moveDouble(ReturnDoubleReg, reg);
+ }
+
+ inline void storeCallResultValue(AnyRegister dest);
+
+ void storeCallResultValue(ValueOperand dest) {
+#if defined(JS_NUNBOX32)
+ // reshuffle the return registers used for a call result to store into
+ // dest, using ReturnReg as a scratch register if necessary. This must
+ // only be called after returning from a call, at a point when the
+ // return register is not live. XXX would be better to allow wrappers
+ // to store the return value to different places.
+ if (dest.typeReg() == JSReturnReg_Data) {
+ if (dest.payloadReg() == JSReturnReg_Type) {
+ // swap the two registers.
+ mov(JSReturnReg_Type, ReturnReg);
+ mov(JSReturnReg_Data, JSReturnReg_Type);
+ mov(ReturnReg, JSReturnReg_Data);
+ } else {
+ mov(JSReturnReg_Data, dest.payloadReg());
+ mov(JSReturnReg_Type, dest.typeReg());
+ }
+ } else {
+ mov(JSReturnReg_Type, dest.typeReg());
+ mov(JSReturnReg_Data, dest.payloadReg());
+ }
+#elif defined(JS_PUNBOX64)
+ if (dest.valueReg() != JSReturnReg)
+ mov(JSReturnReg, dest.valueReg());
+#else
+#error "Bad architecture"
+#endif
+ }
+
+ inline void storeCallResultValue(TypedOrValueRegister dest);
+
+ template <typename T>
+ Register extractString(const T& source, Register scratch) {
+ return extractObject(source, scratch);
+ }
+ using MacroAssemblerSpecific::store32;
+ void store32(const RegisterOrInt32Constant& key, const Address& dest) {
+ if (key.isRegister())
+ store32(key.reg(), dest);
+ else
+ store32(Imm32(key.constant()), dest);
+ }
+
+ template <typename T>
+ void callPreBarrier(const T& address, MIRType type) {
+ Label done;
+
+ if (type == MIRType::Value)
+ branchTestGCThing(Assembler::NotEqual, address, &done);
+
+ Push(PreBarrierReg);
+ computeEffectiveAddress(address, PreBarrierReg);
+
+ const JitRuntime* rt = GetJitContext()->runtime->jitRuntime();
+ JitCode* preBarrier = rt->preBarrier(type);
+
+ call(preBarrier);
+ Pop(PreBarrierReg);
+
+ bind(&done);
+ }
+
+ template <typename T>
+ void patchableCallPreBarrier(const T& address, MIRType type) {
+ Label done;
+
+ // All barriers are off by default.
+ // They are enabled if necessary at the end of CodeGenerator::generate().
+ CodeOffset nopJump = toggledJump(&done);
+ writePrebarrierOffset(nopJump);
+
+ callPreBarrier(address, type);
+ jump(&done);
+
+ haltingAlign(8);
+ bind(&done);
+ }
+
+ template<typename T>
+ void loadFromTypedArray(Scalar::Type arrayType, const T& src, AnyRegister dest, Register temp, Label* fail,
+ bool canonicalizeDoubles = true, unsigned numElems = 0);
+
+ template<typename T>
+ void loadFromTypedArray(Scalar::Type arrayType, const T& src, const ValueOperand& dest, bool allowDouble,
+ Register temp, Label* fail);
+
+ template<typename S, typename T>
+ void storeToTypedIntArray(Scalar::Type arrayType, const S& value, const T& dest) {
+ switch (arrayType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ store8(value, dest);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ store16(value, dest);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ store32(value, dest);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+ }
+
+ void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const BaseIndex& dest,
+ unsigned numElems = 0);
+ void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const Address& dest,
+ unsigned numElems = 0);
+
+ // Load a property from an UnboxedPlainObject or UnboxedArrayObject.
+ template <typename T>
+ void loadUnboxedProperty(T address, JSValueType type, TypedOrValueRegister output);
+
+ // Store a property to an UnboxedPlainObject, without triggering barriers.
+ // If failure is null, the value definitely has a type suitable for storing
+ // in the property.
+ template <typename T>
+ void storeUnboxedProperty(T address, JSValueType type,
+ const ConstantOrRegister& value, Label* failure);
+
+ void checkUnboxedArrayCapacity(Register obj, const RegisterOrInt32Constant& index,
+ Register temp, Label* failure);
+
+ Register extractString(const Address& address, Register scratch) {
+ return extractObject(address, scratch);
+ }
+ Register extractString(const ValueOperand& value, Register scratch) {
+ return extractObject(value, scratch);
+ }
+
+ using MacroAssemblerSpecific::extractTag;
+ Register extractTag(const TypedOrValueRegister& reg, Register scratch) {
+ if (reg.hasValue())
+ return extractTag(reg.valueReg(), scratch);
+ mov(ImmWord(MIRTypeToTag(reg.type())), scratch);
+ return scratch;
+ }
+
+ using MacroAssemblerSpecific::extractObject;
+ Register extractObject(const TypedOrValueRegister& reg, Register scratch) {
+ if (reg.hasValue())
+ return extractObject(reg.valueReg(), scratch);
+ MOZ_ASSERT(reg.type() == MIRType::Object);
+ return reg.typedReg().gpr();
+ }
+
+ // Inline version of js_TypedArray_uint8_clamp_double.
+ // This function clobbers the input register.
+ void clampDoubleToUint8(FloatRegister input, Register output) PER_ARCH;
+
+ using MacroAssemblerSpecific::ensureDouble;
+
+ template <typename S>
+ void ensureDouble(const S& source, FloatRegister dest, Label* failure) {
+ Label isDouble, done;
+ branchTestDouble(Assembler::Equal, source, &isDouble);
+ branchTestInt32(Assembler::NotEqual, source, failure);
+
+ convertInt32ToDouble(source, dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+ }
+
+ // Inline allocation.
+ private:
+ void checkAllocatorState(Label* fail);
+ bool shouldNurseryAllocate(gc::AllocKind allocKind, gc::InitialHeap initialHeap);
+ void nurseryAllocate(Register result, Register temp, gc::AllocKind allocKind,
+ size_t nDynamicSlots, gc::InitialHeap initialHeap, Label* fail);
+ void freeListAllocate(Register result, Register temp, gc::AllocKind allocKind, Label* fail);
+ void allocateObject(Register result, Register temp, gc::AllocKind allocKind,
+ uint32_t nDynamicSlots, gc::InitialHeap initialHeap, Label* fail);
+ void allocateNonObject(Register result, Register temp, gc::AllocKind allocKind, Label* fail);
+ void copySlotsFromTemplate(Register obj, const NativeObject* templateObj,
+ uint32_t start, uint32_t end);
+ void fillSlotsWithConstantValue(Address addr, Register temp, uint32_t start, uint32_t end,
+ const Value& v);
+ void fillSlotsWithUndefined(Address addr, Register temp, uint32_t start, uint32_t end);
+ void fillSlotsWithUninitialized(Address addr, Register temp, uint32_t start, uint32_t end);
+
+ void initGCSlots(Register obj, Register temp, NativeObject* templateObj, bool initContents);
+
+ public:
+ void callMallocStub(size_t nbytes, Register result, Label* fail);
+ void callFreeStub(Register slots);
+ void createGCObject(Register result, Register temp, JSObject* templateObj,
+ gc::InitialHeap initialHeap, Label* fail, bool initContents = true,
+ bool convertDoubleElements = false);
+
+ void initGCThing(Register obj, Register temp, JSObject* templateObj,
+ bool initContents = true, bool convertDoubleElements = false);
+ void initTypedArraySlots(Register obj, Register temp, Register lengthReg,
+ LiveRegisterSet liveRegs, Label* fail,
+ TypedArrayObject* templateObj, TypedArrayLength lengthKind);
+
+ void initUnboxedObjectContents(Register object, UnboxedPlainObject* templateObject);
+
+ void newGCString(Register result, Register temp, Label* fail);
+ void newGCFatInlineString(Register result, Register temp, Label* fail);
+
+ // Compares two strings for equality based on the JSOP.
+ // This checks for identical pointers, atoms and length and fails for everything else.
+ void compareStrings(JSOp op, Register left, Register right, Register result,
+ Label* fail);
+
+ public:
+ // Generates code used to complete a bailout.
+ void generateBailoutTail(Register scratch, Register bailoutInfo);
+
+ public:
+#ifndef JS_CODEGEN_ARM64
+ // StackPointer manipulation functions.
+ // On ARM64, the StackPointer is implemented as two synchronized registers.
+ // Code shared across platforms must use these functions to be valid.
+ template <typename T> inline void addToStackPtr(T t);
+ template <typename T> inline void addStackPtrTo(T t);
+
+ void subFromStackPtr(Imm32 imm32) DEFINED_ON(mips32, mips64, arm, x86, x64);
+ void subFromStackPtr(Register reg);
+
+ template <typename T>
+ void subStackPtrFrom(T t) { subPtr(getStackPointer(), t); }
+
+ template <typename T>
+ void andToStackPtr(T t) { andPtr(t, getStackPointer()); }
+ template <typename T>
+ void andStackPtrTo(T t) { andPtr(getStackPointer(), t); }
+
+ template <typename T>
+ void moveToStackPtr(T t) { movePtr(t, getStackPointer()); }
+ template <typename T>
+ void moveStackPtrTo(T t) { movePtr(getStackPointer(), t); }
+
+ template <typename T>
+ void loadStackPtr(T t) { loadPtr(t, getStackPointer()); }
+ template <typename T>
+ void storeStackPtr(T t) { storePtr(getStackPointer(), t); }
+
+ // StackPointer testing functions.
+ // On ARM64, sp can function as the zero register depending on context.
+ // Code shared across platforms must use these functions to be valid.
+ template <typename T>
+ inline void branchTestStackPtr(Condition cond, T t, Label* label);
+ template <typename T>
+ inline void branchStackPtr(Condition cond, T rhs, Label* label);
+ template <typename T>
+ inline void branchStackPtrRhs(Condition cond, T lhs, Label* label);
+
+ // Move the stack pointer based on the requested amount.
+ inline void reserveStack(uint32_t amount);
+#else // !JS_CODEGEN_ARM64
+ void reserveStack(uint32_t amount);
+#endif
+
+ public:
+ void enableProfilingInstrumentation() {
+ emitProfilingInstrumentation_ = true;
+ }
+
+ private:
+ // This class is used to surround call sites throughout the assembler. This
+ // is used by callWithABI, and callJit functions, except if suffixed by
+ // NoProfiler.
+ class AutoProfilerCallInstrumentation {
+ MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER;
+
+ public:
+ explicit AutoProfilerCallInstrumentation(MacroAssembler& masm
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM);
+ ~AutoProfilerCallInstrumentation() {}
+ };
+ friend class AutoProfilerCallInstrumentation;
+
+ void appendProfilerCallSite(CodeOffset label) {
+ propagateOOM(profilerCallSites_.append(label));
+ }
+
+ // Fix up the code pointers to be written for locations where profilerCallSite
+ // emitted moves of RIP to a register.
+ void linkProfilerCallSites(JitCode* code);
+
+ // This field is used to manage profiling instrumentation output. If
+ // provided and enabled, then instrumentation will be emitted around call
+ // sites.
+ bool emitProfilingInstrumentation_;
+
+ // Record locations of the call sites.
+ Vector<CodeOffset, 0, SystemAllocPolicy> profilerCallSites_;
+
+ public:
+ void loadBaselineOrIonRaw(Register script, Register dest, Label* failure);
+ void loadBaselineOrIonNoArgCheck(Register callee, Register dest, Label* failure);
+
+ void loadBaselineFramePtr(Register framePtr, Register dest);
+
+ void pushBaselineFramePtr(Register framePtr, Register scratch) {
+ loadBaselineFramePtr(framePtr, scratch);
+ push(scratch);
+ }
+
+ void PushBaselineFramePtr(Register framePtr, Register scratch) {
+ loadBaselineFramePtr(framePtr, scratch);
+ Push(scratch);
+ }
+
+ private:
+ void handleFailure();
+
+ public:
+ Label* exceptionLabel() {
+ // Exceptions are currently handled the same way as sequential failures.
+ return &failureLabel_;
+ }
+
+ Label* failureLabel() {
+ return &failureLabel_;
+ }
+
+ void finish();
+ void link(JitCode* code);
+
+ void assumeUnreachable(const char* output);
+
+ template<typename T>
+ void assertTestInt32(Condition cond, const T& value, const char* output);
+
+ void printf(const char* output);
+ void printf(const char* output, Register value);
+
+#ifdef JS_TRACE_LOGGING
+ void tracelogStartId(Register logger, uint32_t textId, bool force = false);
+ void tracelogStartId(Register logger, Register textId);
+ void tracelogStartEvent(Register logger, Register event);
+ void tracelogStopId(Register logger, uint32_t textId, bool force = false);
+ void tracelogStopId(Register logger, Register textId);
+#endif
+
+#define DISPATCH_FLOATING_POINT_OP(method, type, arg1d, arg1f, arg2) \
+ MOZ_ASSERT(IsFloatingPointType(type)); \
+ if (type == MIRType::Double) \
+ method##Double(arg1d, arg2); \
+ else \
+ method##Float32(arg1f, arg2); \
+
+ void loadConstantFloatingPoint(double d, float f, FloatRegister dest, MIRType destType) {
+ DISPATCH_FLOATING_POINT_OP(loadConstant, destType, d, f, dest);
+ }
+ void boolValueToFloatingPoint(ValueOperand value, FloatRegister dest, MIRType destType) {
+ DISPATCH_FLOATING_POINT_OP(boolValueTo, destType, value, value, dest);
+ }
+ void int32ValueToFloatingPoint(ValueOperand value, FloatRegister dest, MIRType destType) {
+ DISPATCH_FLOATING_POINT_OP(int32ValueTo, destType, value, value, dest);
+ }
+ void convertInt32ToFloatingPoint(Register src, FloatRegister dest, MIRType destType) {
+ DISPATCH_FLOATING_POINT_OP(convertInt32To, destType, src, src, dest);
+ }
+
+#undef DISPATCH_FLOATING_POINT_OP
+
+ void convertValueToFloatingPoint(ValueOperand value, FloatRegister output, Label* fail,
+ MIRType outputType);
+ MOZ_MUST_USE bool convertValueToFloatingPoint(JSContext* cx, const Value& v,
+ FloatRegister output, Label* fail,
+ MIRType outputType);
+ MOZ_MUST_USE bool convertConstantOrRegisterToFloatingPoint(JSContext* cx,
+ const ConstantOrRegister& src,
+ FloatRegister output, Label* fail,
+ MIRType outputType);
+ void convertTypedOrValueToFloatingPoint(TypedOrValueRegister src, FloatRegister output,
+ Label* fail, MIRType outputType);
+
+ void outOfLineTruncateSlow(FloatRegister src, Register dest, bool widenFloatToDouble,
+ bool compilingWasm);
+
+ void convertInt32ValueToDouble(const Address& address, Register scratch, Label* done);
+ void convertValueToDouble(ValueOperand value, FloatRegister output, Label* fail) {
+ convertValueToFloatingPoint(value, output, fail, MIRType::Double);
+ }
+ MOZ_MUST_USE bool convertValueToDouble(JSContext* cx, const Value& v, FloatRegister output,
+ Label* fail) {
+ return convertValueToFloatingPoint(cx, v, output, fail, MIRType::Double);
+ }
+ MOZ_MUST_USE bool convertConstantOrRegisterToDouble(JSContext* cx,
+ const ConstantOrRegister& src,
+ FloatRegister output, Label* fail)
+ {
+ return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType::Double);
+ }
+ void convertTypedOrValueToDouble(TypedOrValueRegister src, FloatRegister output, Label* fail) {
+ convertTypedOrValueToFloatingPoint(src, output, fail, MIRType::Double);
+ }
+
+ void convertValueToFloat(ValueOperand value, FloatRegister output, Label* fail) {
+ convertValueToFloatingPoint(value, output, fail, MIRType::Float32);
+ }
+ MOZ_MUST_USE bool convertValueToFloat(JSContext* cx, const Value& v, FloatRegister output,
+ Label* fail) {
+ return convertValueToFloatingPoint(cx, v, output, fail, MIRType::Float32);
+ }
+ MOZ_MUST_USE bool convertConstantOrRegisterToFloat(JSContext* cx,
+ const ConstantOrRegister& src,
+ FloatRegister output, Label* fail)
+ {
+ return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType::Float32);
+ }
+ void convertTypedOrValueToFloat(TypedOrValueRegister src, FloatRegister output, Label* fail) {
+ convertTypedOrValueToFloatingPoint(src, output, fail, MIRType::Float32);
+ }
+
+ enum IntConversionBehavior {
+ IntConversion_Normal,
+ IntConversion_NegativeZeroCheck,
+ IntConversion_Truncate,
+ IntConversion_ClampToUint8,
+ };
+
+ enum IntConversionInputKind {
+ IntConversion_NumbersOnly,
+ IntConversion_NumbersOrBoolsOnly,
+ IntConversion_Any
+ };
+
+ //
+ // Functions for converting values to int.
+ //
+ void convertDoubleToInt(FloatRegister src, Register output, FloatRegister temp,
+ Label* truncateFail, Label* fail, IntConversionBehavior behavior);
+
+ // Strings may be handled by providing labels to jump to when the behavior
+ // is truncation or clamping. The subroutine, usually an OOL call, is
+ // passed the unboxed string in |stringReg| and should convert it to a
+ // double store into |temp|.
+ void convertValueToInt(ValueOperand value, MDefinition* input,
+ Label* handleStringEntry, Label* handleStringRejoin,
+ Label* truncateDoubleSlow,
+ Register stringReg, FloatRegister temp, Register output,
+ Label* fail, IntConversionBehavior behavior,
+ IntConversionInputKind conversion = IntConversion_Any);
+ void convertValueToInt(ValueOperand value, FloatRegister temp, Register output, Label* fail,
+ IntConversionBehavior behavior)
+ {
+ convertValueToInt(value, nullptr, nullptr, nullptr, nullptr, InvalidReg, temp, output,
+ fail, behavior);
+ }
+ MOZ_MUST_USE bool convertValueToInt(JSContext* cx, const Value& v, Register output, Label* fail,
+ IntConversionBehavior behavior);
+ MOZ_MUST_USE bool convertConstantOrRegisterToInt(JSContext* cx,
+ const ConstantOrRegister& src,
+ FloatRegister temp, Register output,
+ Label* fail, IntConversionBehavior behavior);
+ void convertTypedOrValueToInt(TypedOrValueRegister src, FloatRegister temp, Register output,
+ Label* fail, IntConversionBehavior behavior);
+
+ //
+ // Convenience functions for converting values to int32.
+ //
+ void convertValueToInt32(ValueOperand value, FloatRegister temp, Register output, Label* fail,
+ bool negativeZeroCheck)
+ {
+ convertValueToInt(value, temp, output, fail, negativeZeroCheck
+ ? IntConversion_NegativeZeroCheck
+ : IntConversion_Normal);
+ }
+ void convertValueToInt32(ValueOperand value, MDefinition* input,
+ FloatRegister temp, Register output, Label* fail,
+ bool negativeZeroCheck, IntConversionInputKind conversion = IntConversion_Any)
+ {
+ convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail,
+ negativeZeroCheck
+ ? IntConversion_NegativeZeroCheck
+ : IntConversion_Normal,
+ conversion);
+ }
+ MOZ_MUST_USE bool convertValueToInt32(JSContext* cx, const Value& v, Register output,
+ Label* fail, bool negativeZeroCheck)
+ {
+ return convertValueToInt(cx, v, output, fail, negativeZeroCheck
+ ? IntConversion_NegativeZeroCheck
+ : IntConversion_Normal);
+ }
+ MOZ_MUST_USE bool convertConstantOrRegisterToInt32(JSContext* cx,
+ const ConstantOrRegister& src,
+ FloatRegister temp, Register output,
+ Label* fail, bool negativeZeroCheck)
+ {
+ return convertConstantOrRegisterToInt(cx, src, temp, output, fail, negativeZeroCheck
+ ? IntConversion_NegativeZeroCheck
+ : IntConversion_Normal);
+ }
+ void convertTypedOrValueToInt32(TypedOrValueRegister src, FloatRegister temp, Register output,
+ Label* fail, bool negativeZeroCheck)
+ {
+ convertTypedOrValueToInt(src, temp, output, fail, negativeZeroCheck
+ ? IntConversion_NegativeZeroCheck
+ : IntConversion_Normal);
+ }
+
+ //
+ // Convenience functions for truncating values to int32.
+ //
+ void truncateValueToInt32(ValueOperand value, FloatRegister temp, Register output, Label* fail) {
+ convertValueToInt(value, temp, output, fail, IntConversion_Truncate);
+ }
+ void truncateValueToInt32(ValueOperand value, MDefinition* input,
+ Label* handleStringEntry, Label* handleStringRejoin,
+ Label* truncateDoubleSlow,
+ Register stringReg, FloatRegister temp, Register output, Label* fail)
+ {
+ convertValueToInt(value, input, handleStringEntry, handleStringRejoin, truncateDoubleSlow,
+ stringReg, temp, output, fail, IntConversion_Truncate);
+ }
+ void truncateValueToInt32(ValueOperand value, MDefinition* input,
+ FloatRegister temp, Register output, Label* fail)
+ {
+ convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail,
+ IntConversion_Truncate);
+ }
+ MOZ_MUST_USE bool truncateValueToInt32(JSContext* cx, const Value& v, Register output,
+ Label* fail) {
+ return convertValueToInt(cx, v, output, fail, IntConversion_Truncate);
+ }
+ MOZ_MUST_USE bool truncateConstantOrRegisterToInt32(JSContext* cx,
+ const ConstantOrRegister& src,
+ FloatRegister temp, Register output,
+ Label* fail)
+ {
+ return convertConstantOrRegisterToInt(cx, src, temp, output, fail, IntConversion_Truncate);
+ }
+ void truncateTypedOrValueToInt32(TypedOrValueRegister src, FloatRegister temp, Register output,
+ Label* fail)
+ {
+ convertTypedOrValueToInt(src, temp, output, fail, IntConversion_Truncate);
+ }
+
+ // Convenience functions for clamping values to uint8.
+ void clampValueToUint8(ValueOperand value, FloatRegister temp, Register output, Label* fail) {
+ convertValueToInt(value, temp, output, fail, IntConversion_ClampToUint8);
+ }
+ void clampValueToUint8(ValueOperand value, MDefinition* input,
+ Label* handleStringEntry, Label* handleStringRejoin,
+ Register stringReg, FloatRegister temp, Register output, Label* fail)
+ {
+ convertValueToInt(value, input, handleStringEntry, handleStringRejoin, nullptr,
+ stringReg, temp, output, fail, IntConversion_ClampToUint8);
+ }
+ void clampValueToUint8(ValueOperand value, MDefinition* input,
+ FloatRegister temp, Register output, Label* fail)
+ {
+ convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail,
+ IntConversion_ClampToUint8);
+ }
+ MOZ_MUST_USE bool clampValueToUint8(JSContext* cx, const Value& v, Register output,
+ Label* fail) {
+ return convertValueToInt(cx, v, output, fail, IntConversion_ClampToUint8);
+ }
+ MOZ_MUST_USE bool clampConstantOrRegisterToUint8(JSContext* cx,
+ const ConstantOrRegister& src,
+ FloatRegister temp, Register output,
+ Label* fail)
+ {
+ return convertConstantOrRegisterToInt(cx, src, temp, output, fail,
+ IntConversion_ClampToUint8);
+ }
+ void clampTypedOrValueToUint8(TypedOrValueRegister src, FloatRegister temp, Register output,
+ Label* fail)
+ {
+ convertTypedOrValueToInt(src, temp, output, fail, IntConversion_ClampToUint8);
+ }
+
+ public:
+ class AfterICSaveLive {
+ friend class MacroAssembler;
+ explicit AfterICSaveLive(uint32_t initialStack)
+#ifdef JS_DEBUG
+ : initialStack(initialStack)
+#endif
+ {}
+
+ public:
+#ifdef JS_DEBUG
+ uint32_t initialStack;
+#endif
+ uint32_t alignmentPadding;
+ };
+
+ void alignFrameForICArguments(AfterICSaveLive& aic) PER_ARCH;
+ void restoreFrameAlignmentForICArguments(AfterICSaveLive& aic) PER_ARCH;
+
+ AfterICSaveLive icSaveLive(LiveRegisterSet& liveRegs);
+ MOZ_MUST_USE bool icBuildOOLFakeExitFrame(void* fakeReturnAddr, AfterICSaveLive& aic);
+ void icRestoreLive(LiveRegisterSet& liveRegs, AfterICSaveLive& aic);
+
+ // Align the stack pointer based on the number of arguments which are pushed
+ // on the stack, such that the JitFrameLayout would be correctly aligned on
+ // the JitStackAlignment.
+ void alignJitStackBasedOnNArgs(Register nargs);
+ void alignJitStackBasedOnNArgs(uint32_t nargs);
+
+ inline void assertStackAlignment(uint32_t alignment, int32_t offset = 0);
+};
+
+static inline Assembler::DoubleCondition
+JSOpToDoubleCondition(JSOp op)
+{
+ switch (op) {
+ case JSOP_EQ:
+ case JSOP_STRICTEQ:
+ return Assembler::DoubleEqual;
+ case JSOP_NE:
+ case JSOP_STRICTNE:
+ return Assembler::DoubleNotEqualOrUnordered;
+ case JSOP_LT:
+ return Assembler::DoubleLessThan;
+ case JSOP_LE:
+ return Assembler::DoubleLessThanOrEqual;
+ case JSOP_GT:
+ return Assembler::DoubleGreaterThan;
+ case JSOP_GE:
+ return Assembler::DoubleGreaterThanOrEqual;
+ default:
+ MOZ_CRASH("Unexpected comparison operation");
+ }
+}
+
+// Note: the op may have been inverted during lowering (to put constants in a
+// position where they can be immediates), so it is important to use the
+// lir->jsop() instead of the mir->jsop() when it is present.
+static inline Assembler::Condition
+JSOpToCondition(JSOp op, bool isSigned)
+{
+ if (isSigned) {
+ switch (op) {
+ case JSOP_EQ:
+ case JSOP_STRICTEQ:
+ return Assembler::Equal;
+ case JSOP_NE:
+ case JSOP_STRICTNE:
+ return Assembler::NotEqual;
+ case JSOP_LT:
+ return Assembler::LessThan;
+ case JSOP_LE:
+ return Assembler::LessThanOrEqual;
+ case JSOP_GT:
+ return Assembler::GreaterThan;
+ case JSOP_GE:
+ return Assembler::GreaterThanOrEqual;
+ default:
+ MOZ_CRASH("Unrecognized comparison operation");
+ }
+ } else {
+ switch (op) {
+ case JSOP_EQ:
+ case JSOP_STRICTEQ:
+ return Assembler::Equal;
+ case JSOP_NE:
+ case JSOP_STRICTNE:
+ return Assembler::NotEqual;
+ case JSOP_LT:
+ return Assembler::Below;
+ case JSOP_LE:
+ return Assembler::BelowOrEqual;
+ case JSOP_GT:
+ return Assembler::Above;
+ case JSOP_GE:
+ return Assembler::AboveOrEqual;
+ default:
+ MOZ_CRASH("Unrecognized comparison operation");
+ }
+ }
+}
+
+static inline size_t
+StackDecrementForCall(uint32_t alignment, size_t bytesAlreadyPushed, size_t bytesToPush)
+{
+ return bytesToPush +
+ ComputeByteAlignment(bytesAlreadyPushed + bytesToPush, alignment);
+}
+
+static inline MIRType
+ToMIRType(MIRType t)
+{
+ return t;
+}
+
+template <class VecT>
+class ABIArgIter
+{
+ ABIArgGenerator gen_;
+ const VecT& types_;
+ unsigned i_;
+
+ void settle() { if (!done()) gen_.next(ToMIRType(types_[i_])); }
+
+ public:
+ explicit ABIArgIter(const VecT& types) : types_(types), i_(0) { settle(); }
+ void operator++(int) { MOZ_ASSERT(!done()); i_++; settle(); }
+ bool done() const { return i_ == types_.length(); }
+
+ ABIArg* operator->() { MOZ_ASSERT(!done()); return &gen_.current(); }
+ ABIArg& operator*() { MOZ_ASSERT(!done()); return gen_.current(); }
+
+ unsigned index() const { MOZ_ASSERT(!done()); return i_; }
+ MIRType mirType() const { MOZ_ASSERT(!done()); return ToMIRType(types_[i_]); }
+ uint32_t stackBytesConsumedSoFar() const { return gen_.stackBytesConsumedSoFar(); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_MacroAssembler_h */
diff --git a/js/src/jit/MoveEmitter.h b/js/src/jit/MoveEmitter.h
new file mode 100644
index 000000000..133ba5a74
--- /dev/null
+++ b/js/src/jit/MoveEmitter.h
@@ -0,0 +1,26 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_MoveEmitter_h
+#define jit_MoveEmitter_h
+
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+# include "jit/x86-shared/MoveEmitter-x86-shared.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/MoveEmitter-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/MoveEmitter-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/MoveEmitter-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/MoveEmitter-mips64.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/MoveEmitter-none.h"
+#else
+# error "Unknown architecture!"
+#endif
+
+#endif /* jit_MoveEmitter_h */
diff --git a/js/src/jit/MoveResolver.cpp b/js/src/jit/MoveResolver.cpp
new file mode 100644
index 000000000..5fd6c7bd5
--- /dev/null
+++ b/js/src/jit/MoveResolver.cpp
@@ -0,0 +1,321 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/MoveResolver.h"
+
+#include "mozilla/Attributes.h"
+
+#include "jit/MacroAssembler.h"
+#include "jit/RegisterSets.h"
+
+using namespace js;
+using namespace js::jit;
+
+MoveOperand::MoveOperand(MacroAssembler& masm, const ABIArg& arg)
+{
+ switch (arg.kind()) {
+ case ABIArg::GPR:
+ kind_ = REG;
+ code_ = arg.gpr().code();
+ break;
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ case ABIArg::GPR_PAIR:
+ kind_ = REG_PAIR;
+ code_ = arg.evenGpr().code();
+ MOZ_ASSERT(code_ % 2 == 0);
+ MOZ_ASSERT(code_ + 1 == arg.oddGpr().code());
+ break;
+#endif
+ case ABIArg::FPU:
+ kind_ = FLOAT_REG;
+ code_ = arg.fpu().code();
+ break;
+ case ABIArg::Stack:
+ kind_ = MEMORY;
+ code_ = masm.getStackPointer().code();
+ disp_ = arg.offsetFromArgBase();
+ break;
+ }
+}
+
+MoveResolver::MoveResolver()
+ : numCycles_(0), curCycles_(0)
+{
+}
+
+void
+MoveResolver::resetState()
+{
+ numCycles_ = 0;
+ curCycles_ = 0;
+}
+
+bool
+MoveResolver::addMove(const MoveOperand& from, const MoveOperand& to, MoveOp::Type type)
+{
+ // Assert that we're not doing no-op moves.
+ MOZ_ASSERT(!(from == to));
+ PendingMove* pm = movePool_.allocate();
+ if (!pm)
+ return false;
+ new (pm) PendingMove(from, to, type);
+ pending_.pushBack(pm);
+ return true;
+}
+
+// Given move (A -> B), this function attempts to find any move (B -> *) in the
+// pending move list, and returns the first one.
+MoveResolver::PendingMove*
+MoveResolver::findBlockingMove(const PendingMove* last)
+{
+ for (PendingMoveIterator iter = pending_.begin(); iter != pending_.end(); iter++) {
+ PendingMove* other = *iter;
+
+ if (other->from().aliases(last->to())) {
+ // We now have pairs in the form (A -> X) (X -> y). The second pair
+ // blocks the move in the first pair, so return it.
+ return other;
+ }
+ }
+
+ // No blocking moves found.
+ return nullptr;
+}
+
+// Given move (A -> B), this function attempts to find any move (B -> *) in the
+// move list iterator, and returns the first one.
+// N.B. It is unclear if a single move can complete more than one cycle, so to be
+// conservative, this function operates on iterators, so the caller can process all
+// instructions that start a cycle.
+MoveResolver::PendingMove*
+MoveResolver::findCycledMove(PendingMoveIterator* iter, PendingMoveIterator end, const PendingMove* last)
+{
+ for (; *iter != end; (*iter)++) {
+ PendingMove* other = **iter;
+ if (other->from().aliases(last->to())) {
+ // We now have pairs in the form (A -> X) (X -> y). The second pair
+ // blocks the move in the first pair, so return it.
+ (*iter)++;
+ return other;
+ }
+ }
+ // No blocking moves found.
+ return nullptr;
+}
+
+bool
+MoveResolver::resolve()
+{
+ resetState();
+ orderedMoves_.clear();
+
+ InlineList<PendingMove> stack;
+
+ // This is a depth-first-search without recursion, which tries to find
+ // cycles in a list of moves.
+ //
+ // Algorithm.
+ //
+ // S = Traversal stack.
+ // P = Pending move list.
+ // O = Ordered list of moves.
+ //
+ // As long as there are pending moves in P:
+ // Let |root| be any pending move removed from P
+ // Add |root| to the traversal stack.
+ // As long as S is not empty:
+ // Let |L| be the most recent move added to S.
+ //
+ // Find any pending move M whose source is L's destination, thus
+ // preventing L's move until M has completed.
+ //
+ // If a move M was found,
+ // Remove M from the pending list.
+ // If M's destination is |root|,
+ // Annotate M and |root| as cycles.
+ // Add M to S.
+ // do not Add M to O, since M may have other conflictors in P that have not yet been processed.
+ // Otherwise,
+ // Add M to S.
+ // Otherwise,
+ // Remove L from S.
+ // Add L to O.
+ //
+ while (!pending_.empty()) {
+ PendingMove* pm = pending_.popBack();
+
+ // Add this pending move to the cycle detection stack.
+ stack.pushBack(pm);
+
+ while (!stack.empty()) {
+ PendingMove* blocking = findBlockingMove(stack.peekBack());
+
+ if (blocking) {
+ PendingMoveIterator stackiter = stack.begin();
+ PendingMove* cycled = findCycledMove(&stackiter, stack.end(), blocking);
+ if (cycled) {
+ // Find the cycle's start.
+ // We annotate cycles at each move in the cycle, and
+ // assert that we do not find two cycles in one move chain
+ // traversal (which would indicate two moves to the same
+ // destination).
+ // Since there can be more than one cycle, find them all.
+ do {
+ cycled->setCycleEnd(curCycles_);
+ cycled = findCycledMove(&stackiter, stack.end(), blocking);
+ } while (cycled);
+
+ blocking->setCycleBegin(pm->type(), curCycles_);
+ curCycles_++;
+ pending_.remove(blocking);
+ stack.pushBack(blocking);
+ } else {
+ // This is a new link in the move chain, so keep
+ // searching for a cycle.
+ pending_.remove(blocking);
+ stack.pushBack(blocking);
+ }
+ } else {
+ // Otherwise, pop the last move on the search stack because it's
+ // complete and not participating in a cycle. The resulting
+ // move can safely be added to the ordered move list.
+ PendingMove* done = stack.popBack();
+ if (!addOrderedMove(*done))
+ return false;
+ movePool_.free(done);
+ }
+ }
+ // If the current queue is empty, it is certain that there are
+ // all previous cycles cannot conflict with future cycles,
+ // so re-set the counter of pending cycles, while keeping a high-water mark.
+ if (numCycles_ < curCycles_)
+ numCycles_ = curCycles_;
+ curCycles_ = 0;
+ }
+
+ return true;
+}
+
+bool
+MoveResolver::addOrderedMove(const MoveOp& move)
+{
+ // Sometimes the register allocator generates move groups where multiple
+ // moves have the same source. Try to optimize these cases when the source
+ // is in memory and the target of one of the moves is in a register.
+ MOZ_ASSERT(!move.from().aliases(move.to()));
+
+ if (!move.from().isMemory() || move.isCycleBegin() || move.isCycleEnd())
+ return orderedMoves_.append(move);
+
+ // Look for an earlier move with the same source, where no intervening move
+ // touches either the source or destination of the new move.
+ for (int i = orderedMoves_.length() - 1; i >= 0; i--) {
+ const MoveOp& existing = orderedMoves_[i];
+
+ if (existing.from() == move.from() &&
+ !existing.to().aliases(move.to()) &&
+ existing.type() == move.type() &&
+ !existing.isCycleBegin() &&
+ !existing.isCycleEnd())
+ {
+ MoveOp* after = orderedMoves_.begin() + i + 1;
+ if (existing.to().isGeneralReg() || existing.to().isFloatReg()) {
+ MoveOp nmove(existing.to(), move.to(), move.type());
+ return orderedMoves_.insert(after, nmove);
+ } else if (move.to().isGeneralReg() || move.to().isFloatReg()) {
+ MoveOp nmove(move.to(), existing.to(), move.type());
+ orderedMoves_[i] = move;
+ return orderedMoves_.insert(after, nmove);
+ }
+ }
+
+ if (existing.aliases(move))
+ break;
+ }
+
+ return orderedMoves_.append(move);
+}
+
+void
+MoveResolver::reorderMove(size_t from, size_t to)
+{
+ MOZ_ASSERT(from != to);
+
+ MoveOp op = orderedMoves_[from];
+ if (from < to) {
+ for (size_t i = from; i < to; i++)
+ orderedMoves_[i] = orderedMoves_[i + 1];
+ } else {
+ for (size_t i = from; i > to; i--)
+ orderedMoves_[i] = orderedMoves_[i - 1];
+ }
+ orderedMoves_[to] = op;
+}
+
+void
+MoveResolver::sortMemoryToMemoryMoves()
+{
+ // Try to reorder memory->memory moves so that they are executed right
+ // before a move that clobbers some register. This will allow the move
+ // emitter to use that clobbered register as a scratch register for the
+ // memory->memory move, if necessary.
+ for (size_t i = 0; i < orderedMoves_.length(); i++) {
+ const MoveOp& base = orderedMoves_[i];
+ if (!base.from().isMemory() || !base.to().isMemory())
+ continue;
+ if (base.type() != MoveOp::GENERAL && base.type() != MoveOp::INT32)
+ continue;
+
+ // Look for an earlier move clobbering a register.
+ bool found = false;
+ for (int j = i - 1; j >= 0; j--) {
+ const MoveOp& previous = orderedMoves_[j];
+ if (previous.aliases(base) || previous.isCycleBegin() || previous.isCycleEnd())
+ break;
+
+ if (previous.to().isGeneralReg()) {
+ reorderMove(i, j);
+ found = true;
+ break;
+ }
+ }
+ if (found)
+ continue;
+
+ // Look for a later move clobbering a register.
+ if (i + 1 < orderedMoves_.length()) {
+ bool found = false, skippedRegisterUse = false;
+ for (size_t j = i + 1; j < orderedMoves_.length(); j++) {
+ const MoveOp& later = orderedMoves_[j];
+ if (later.aliases(base) || later.isCycleBegin() || later.isCycleEnd())
+ break;
+
+ if (later.to().isGeneralReg()) {
+ if (skippedRegisterUse) {
+ reorderMove(i, j);
+ found = true;
+ } else {
+ // There is no move that uses a register between the
+ // original memory->memory move and this move that
+ // clobbers a register. The move should already be able
+ // to use a scratch register, so don't shift anything
+ // around.
+ }
+ break;
+ }
+
+ if (later.from().isGeneralReg())
+ skippedRegisterUse = true;
+ }
+
+ if (found) {
+ // Redo the search for memory->memory moves at the current
+ // index, so we don't skip the move just shifted back.
+ i--;
+ }
+ }
+ }
+}
diff --git a/js/src/jit/MoveResolver.h b/js/src/jit/MoveResolver.h
new file mode 100644
index 000000000..fad2ba9e3
--- /dev/null
+++ b/js/src/jit/MoveResolver.h
@@ -0,0 +1,333 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_MoveResolver_h
+#define jit_MoveResolver_h
+
+#include "jit/InlineList.h"
+#include "jit/JitAllocPolicy.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+class MacroAssembler;
+
+// This is similar to Operand, but carries more information. We're also not
+// guaranteed that Operand looks like this on all ISAs.
+class MoveOperand
+{
+ public:
+ enum Kind {
+ // A register in the "integer", aka "general purpose", class.
+ REG,
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ // Two consecutive "integer" register (aka "general purpose"). The even
+ // register contains the lower part, the odd register has the high bits
+ // of the content.
+ REG_PAIR,
+#endif
+ // A register in the "float" register class.
+ FLOAT_REG,
+ // A memory region.
+ MEMORY,
+ // The address of a memory region.
+ EFFECTIVE_ADDRESS
+ };
+
+ private:
+ Kind kind_;
+ uint32_t code_;
+ int32_t disp_;
+
+ public:
+ MoveOperand()
+ { }
+ explicit MoveOperand(Register reg) : kind_(REG), code_(reg.code())
+ { }
+ explicit MoveOperand(FloatRegister reg) : kind_(FLOAT_REG), code_(reg.code())
+ { }
+ MoveOperand(Register reg, int32_t disp, Kind kind = MEMORY)
+ : kind_(kind),
+ code_(reg.code()),
+ disp_(disp)
+ {
+ MOZ_ASSERT(isMemoryOrEffectiveAddress());
+
+ // With a zero offset, this is a plain reg-to-reg move.
+ if (disp == 0 && kind_ == EFFECTIVE_ADDRESS)
+ kind_ = REG;
+ }
+ MoveOperand(MacroAssembler& masm, const ABIArg& arg);
+ MoveOperand(const MoveOperand& other)
+ : kind_(other.kind_),
+ code_(other.code_),
+ disp_(other.disp_)
+ { }
+ bool isFloatReg() const {
+ return kind_ == FLOAT_REG;
+ }
+ bool isGeneralReg() const {
+ return kind_ == REG;
+ }
+ bool isGeneralRegPair() const {
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ return kind_ == REG_PAIR;
+#else
+ return false;
+#endif
+ }
+ bool isMemory() const {
+ return kind_ == MEMORY;
+ }
+ bool isEffectiveAddress() const {
+ return kind_ == EFFECTIVE_ADDRESS;
+ }
+ bool isMemoryOrEffectiveAddress() const {
+ return isMemory() || isEffectiveAddress();
+ }
+ Register reg() const {
+ MOZ_ASSERT(isGeneralReg());
+ return Register::FromCode(code_);
+ }
+ Register evenReg() const {
+ MOZ_ASSERT(isGeneralRegPair());
+ return Register::FromCode(code_);
+ }
+ Register oddReg() const {
+ MOZ_ASSERT(isGeneralRegPair());
+ return Register::FromCode(code_ + 1);
+ }
+ FloatRegister floatReg() const {
+ MOZ_ASSERT(isFloatReg());
+ return FloatRegister::FromCode(code_);
+ }
+ Register base() const {
+ MOZ_ASSERT(isMemoryOrEffectiveAddress());
+ return Register::FromCode(code_);
+ }
+ int32_t disp() const {
+ MOZ_ASSERT(isMemoryOrEffectiveAddress());
+ return disp_;
+ }
+
+ bool aliases(MoveOperand other) const {
+
+ // These are not handled presently, but MEMORY and EFFECTIVE_ADDRESS
+ // only appear in controlled circumstances in the trampoline code
+ // which ensures these cases never come up.
+
+ MOZ_ASSERT_IF(isMemoryOrEffectiveAddress() && other.isGeneralReg(),
+ base() != other.reg());
+ MOZ_ASSERT_IF(other.isMemoryOrEffectiveAddress() && isGeneralReg(),
+ other.base() != reg());
+
+ // Check if one of the operand is a registe rpair, in which case, we
+ // have to check any other register, or register pair.
+ if (isGeneralRegPair() || other.isGeneralRegPair()) {
+ if (isGeneralRegPair() && other.isGeneralRegPair()) {
+ // Assume that register pairs are aligned on even registers.
+ MOZ_ASSERT(!evenReg().aliases(other.oddReg()));
+ MOZ_ASSERT(!oddReg().aliases(other.evenReg()));
+ // Pair of registers are composed of consecutive registers, thus
+ // if the first registers are aliased, then the second registers
+ // are aliased too.
+ MOZ_ASSERT(evenReg().aliases(other.evenReg()) == oddReg().aliases(other.oddReg()));
+ return evenReg().aliases(other.evenReg());
+ } else if (other.isGeneralReg()) {
+ MOZ_ASSERT(isGeneralRegPair());
+ return evenReg().aliases(other.reg()) ||
+ oddReg().aliases(other.reg());
+ } else if (isGeneralReg()) {
+ MOZ_ASSERT(other.isGeneralRegPair());
+ return other.evenReg().aliases(reg()) ||
+ other.oddReg().aliases(reg());
+ }
+ return false;
+ }
+
+ if (kind_ != other.kind_)
+ return false;
+ if (kind_ == FLOAT_REG)
+ return floatReg().aliases(other.floatReg());
+ if (code_ != other.code_)
+ return false;
+ if (isMemoryOrEffectiveAddress())
+ return disp_ == other.disp_;
+ return true;
+ }
+
+ bool operator ==(const MoveOperand& other) const {
+ if (kind_ != other.kind_)
+ return false;
+ if (code_ != other.code_)
+ return false;
+ if (isMemoryOrEffectiveAddress())
+ return disp_ == other.disp_;
+ return true;
+ }
+ bool operator !=(const MoveOperand& other) const {
+ return !operator==(other);
+ }
+};
+
+// This represents a move operation.
+class MoveOp
+{
+ protected:
+ MoveOperand from_;
+ MoveOperand to_;
+ bool cycleBegin_;
+ bool cycleEnd_;
+ int cycleBeginSlot_;
+ int cycleEndSlot_;
+ public:
+ enum Type {
+ GENERAL,
+ INT32,
+ FLOAT32,
+ DOUBLE,
+ SIMD128INT,
+ SIMD128FLOAT
+ };
+
+ protected:
+ Type type_;
+
+ // If cycleBegin_ is true, endCycleType_ is the type of the move at the end
+ // of the cycle. For example, given these moves:
+ // INT32 move a -> b
+ // GENERAL move b -> a
+ // the move resolver starts by copying b into a temporary location, so that
+ // the last move can read it. This copy needs to use use type GENERAL.
+ Type endCycleType_;
+
+ public:
+ MoveOp()
+ { }
+ MoveOp(const MoveOperand& from, const MoveOperand& to, Type type)
+ : from_(from),
+ to_(to),
+ cycleBegin_(false),
+ cycleEnd_(false),
+ cycleBeginSlot_(-1),
+ cycleEndSlot_(-1),
+ type_(type)
+ { }
+
+ bool isCycleBegin() const {
+ return cycleBegin_;
+ }
+ bool isCycleEnd() const {
+ return cycleEnd_;
+ }
+ uint32_t cycleBeginSlot() const {
+ MOZ_ASSERT(cycleBeginSlot_ != -1);
+ return cycleBeginSlot_;
+ }
+ uint32_t cycleEndSlot() const {
+ MOZ_ASSERT(cycleEndSlot_ != -1);
+ return cycleEndSlot_;
+ }
+ const MoveOperand& from() const {
+ return from_;
+ }
+ const MoveOperand& to() const {
+ return to_;
+ }
+ Type type() const {
+ return type_;
+ }
+ Type endCycleType() const {
+ MOZ_ASSERT(isCycleBegin());
+ return endCycleType_;
+ }
+ bool aliases(const MoveOperand& op) const {
+ return from().aliases(op) || to().aliases(op);
+ }
+ bool aliases(const MoveOp& other) const {
+ return aliases(other.from()) || aliases(other.to());
+ }
+};
+
+class MoveResolver
+{
+ private:
+ struct PendingMove
+ : public MoveOp,
+ public TempObject,
+ public InlineListNode<PendingMove>
+ {
+ PendingMove()
+ { }
+ PendingMove(const MoveOperand& from, const MoveOperand& to, Type type)
+ : MoveOp(from, to, type)
+ { }
+
+ void setCycleBegin(Type endCycleType, int cycleSlot) {
+ MOZ_ASSERT(!cycleBegin_);
+ cycleBegin_ = true;
+ cycleBeginSlot_ = cycleSlot;
+ endCycleType_ = endCycleType;
+ }
+ void setCycleEnd(int cycleSlot) {
+ MOZ_ASSERT(!cycleEnd_);
+ cycleEnd_ = true;
+ cycleEndSlot_ = cycleSlot;
+ }
+ };
+
+ typedef InlineList<MoveResolver::PendingMove>::iterator PendingMoveIterator;
+
+ private:
+ js::Vector<MoveOp, 16, SystemAllocPolicy> orderedMoves_;
+ int numCycles_;
+ int curCycles_;
+ TempObjectPool<PendingMove> movePool_;
+
+ InlineList<PendingMove> pending_;
+
+ PendingMove* findBlockingMove(const PendingMove* last);
+ PendingMove* findCycledMove(PendingMoveIterator* stack, PendingMoveIterator end, const PendingMove* first);
+ MOZ_MUST_USE bool addOrderedMove(const MoveOp& move);
+ void reorderMove(size_t from, size_t to);
+
+ // Internal reset function. Does not clear lists.
+ void resetState();
+
+ public:
+ MoveResolver();
+
+ // Resolves a move group into two lists of ordered moves. These moves must
+ // be executed in the order provided. Some moves may indicate that they
+ // participate in a cycle. For every cycle there are two such moves, and it
+ // is guaranteed that cycles do not nest inside each other in the list.
+ //
+ // After calling addMove() for each parallel move, resolve() performs the
+ // cycle resolution algorithm. Calling addMove() again resets the resolver.
+ MOZ_MUST_USE bool addMove(const MoveOperand& from, const MoveOperand& to, MoveOp::Type type);
+ MOZ_MUST_USE bool resolve();
+ void sortMemoryToMemoryMoves();
+
+ size_t numMoves() const {
+ return orderedMoves_.length();
+ }
+ const MoveOp& getMove(size_t i) const {
+ return orderedMoves_[i];
+ }
+ uint32_t numCycles() const {
+ return numCycles_;
+ }
+ void setAllocator(TempAllocator& alloc) {
+ movePool_.setAllocator(alloc);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_MoveResolver_h */
diff --git a/js/src/jit/OptimizationTracking.cpp b/js/src/jit/OptimizationTracking.cpp
new file mode 100644
index 000000000..308def041
--- /dev/null
+++ b/js/src/jit/OptimizationTracking.cpp
@@ -0,0 +1,1305 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/OptimizationTracking.h"
+
+#include "mozilla/SizePrintfMacros.h"
+
+#include "jsprf.h"
+
+#include "ds/Sort.h"
+#include "jit/IonBuilder.h"
+#include "jit/JitcodeMap.h"
+#include "jit/JitSpewer.h"
+#include "js/TrackedOptimizationInfo.h"
+
+#include "vm/ObjectGroup-inl.h"
+#include "vm/TypeInference-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::Maybe;
+using mozilla::Some;
+using mozilla::Nothing;
+
+using JS::TrackedStrategy;
+using JS::TrackedOutcome;
+using JS::TrackedTypeSite;
+using JS::ForEachTrackedOptimizationAttemptOp;
+using JS::ForEachTrackedOptimizationTypeInfoOp;
+
+bool
+TrackedOptimizations::trackTypeInfo(OptimizationTypeInfo&& ty)
+{
+ return types_.append(mozilla::Move(ty));
+}
+
+bool
+TrackedOptimizations::trackAttempt(TrackedStrategy strategy)
+{
+ OptimizationAttempt attempt(strategy, TrackedOutcome::GenericFailure);
+ currentAttempt_ = attempts_.length();
+ return attempts_.append(attempt);
+}
+
+void
+TrackedOptimizations::amendAttempt(uint32_t index)
+{
+ currentAttempt_ = index;
+}
+
+void
+TrackedOptimizations::trackOutcome(TrackedOutcome outcome)
+{
+ attempts_[currentAttempt_].setOutcome(outcome);
+}
+
+void
+TrackedOptimizations::trackSuccess()
+{
+ attempts_[currentAttempt_].setOutcome(TrackedOutcome::GenericSuccess);
+}
+
+template <class Vec>
+static bool
+VectorContentsMatch(const Vec* xs, const Vec* ys)
+{
+ if (xs->length() != ys->length())
+ return false;
+ for (auto x = xs->begin(), y = ys->begin(); x != xs->end(); x++, y++) {
+ MOZ_ASSERT(y != ys->end());
+ if (*x != *y)
+ return false;
+ }
+ return true;
+}
+
+bool
+TrackedOptimizations::matchTypes(const TempOptimizationTypeInfoVector& other) const
+{
+ return VectorContentsMatch(&types_, &other);
+}
+
+bool
+TrackedOptimizations::matchAttempts(const TempOptimizationAttemptsVector& other) const
+{
+ return VectorContentsMatch(&attempts_, &other);
+}
+
+JS_PUBLIC_API(const char*)
+JS::TrackedStrategyString(TrackedStrategy strategy)
+{
+ switch (strategy) {
+#define STRATEGY_CASE(name) \
+ case TrackedStrategy::name: \
+ return #name;
+ TRACKED_STRATEGY_LIST(STRATEGY_CASE)
+#undef STRATEGY_CASE
+
+ default:
+ MOZ_CRASH("bad strategy");
+ }
+}
+
+JS_PUBLIC_API(const char*)
+JS::TrackedOutcomeString(TrackedOutcome outcome)
+{
+ switch (outcome) {
+#define OUTCOME_CASE(name) \
+ case TrackedOutcome::name: \
+ return #name;
+ TRACKED_OUTCOME_LIST(OUTCOME_CASE)
+#undef OUTCOME_CASE
+
+ default:
+ MOZ_CRASH("bad outcome");
+ }
+}
+
+JS_PUBLIC_API(const char*)
+JS::TrackedTypeSiteString(TrackedTypeSite site)
+{
+ switch (site) {
+#define TYPESITE_CASE(name) \
+ case TrackedTypeSite::name: \
+ return #name;
+ TRACKED_TYPESITE_LIST(TYPESITE_CASE)
+#undef TYPESITE_CASE
+
+ default:
+ MOZ_CRASH("bad type site");
+ }
+}
+
+void
+SpewTempOptimizationTypeInfoVector(const TempOptimizationTypeInfoVector* types,
+ const char* indent = nullptr)
+{
+#ifdef JS_JITSPEW
+ for (const OptimizationTypeInfo* t = types->begin(); t != types->end(); t++) {
+ JitSpewStart(JitSpew_OptimizationTracking, " %s%s of type %s, type set",
+ indent ? indent : "",
+ TrackedTypeSiteString(t->site()), StringFromMIRType(t->mirType()));
+ for (uint32_t i = 0; i < t->types().length(); i++)
+ JitSpewCont(JitSpew_OptimizationTracking, " %s", TypeSet::TypeString(t->types()[i]));
+ JitSpewFin(JitSpew_OptimizationTracking);
+ }
+#endif
+}
+
+void
+SpewTempOptimizationAttemptsVector(const TempOptimizationAttemptsVector* attempts,
+ const char* indent = nullptr)
+{
+#ifdef JS_JITSPEW
+ for (const OptimizationAttempt* a = attempts->begin(); a != attempts->end(); a++) {
+ JitSpew(JitSpew_OptimizationTracking, " %s%s: %s", indent ? indent : "",
+ TrackedStrategyString(a->strategy()), TrackedOutcomeString(a->outcome()));
+ }
+#endif
+}
+
+void
+TrackedOptimizations::spew() const
+{
+#ifdef JS_JITSPEW
+ SpewTempOptimizationTypeInfoVector(&types_);
+ SpewTempOptimizationAttemptsVector(&attempts_);
+#endif
+}
+
+bool
+OptimizationTypeInfo::trackTypeSet(TemporaryTypeSet* typeSet)
+{
+ if (!typeSet)
+ return true;
+ return typeSet->enumerateTypes(&types_);
+}
+
+bool
+OptimizationTypeInfo::trackType(TypeSet::Type type)
+{
+ return types_.append(type);
+}
+
+bool
+OptimizationTypeInfo::operator ==(const OptimizationTypeInfo& other) const
+{
+ return site_ == other.site_ && mirType_ == other.mirType_ &&
+ VectorContentsMatch(&types_, &other.types_);
+}
+
+bool
+OptimizationTypeInfo::operator !=(const OptimizationTypeInfo& other) const
+{
+ return !(*this == other);
+}
+
+static inline HashNumber
+CombineHash(HashNumber h, HashNumber n)
+{
+ h += n;
+ h += (h << 10);
+ h ^= (h >> 6);
+ return h;
+}
+
+static inline HashNumber
+HashType(TypeSet::Type ty)
+{
+ if (ty.isObjectUnchecked())
+ return PointerHasher<TypeSet::ObjectKey*, 3>::hash(ty.objectKey());
+ return HashNumber(ty.raw());
+}
+
+static HashNumber
+HashTypeList(const TempTypeList& types)
+{
+ HashNumber h = 0;
+ for (uint32_t i = 0; i < types.length(); i++)
+ h = CombineHash(h, HashType(types[i]));
+ return h;
+}
+
+HashNumber
+OptimizationTypeInfo::hash() const
+{
+ return ((HashNumber(site_) << 24) + (HashNumber(mirType_) << 16)) ^ HashTypeList(types_);
+}
+
+template <class Vec>
+static HashNumber
+HashVectorContents(const Vec* xs, HashNumber h)
+{
+ for (auto x = xs->begin(); x != xs->end(); x++)
+ h = CombineHash(h, x->hash());
+ return h;
+}
+
+/* static */ HashNumber
+UniqueTrackedOptimizations::Key::hash(const Lookup& lookup)
+{
+ HashNumber h = HashVectorContents(lookup.types, 0);
+ h = HashVectorContents(lookup.attempts, h);
+ h += (h << 3);
+ h ^= (h >> 11);
+ h += (h << 15);
+ return h;
+}
+
+/* static */ bool
+UniqueTrackedOptimizations::Key::match(const Key& key, const Lookup& lookup)
+{
+ return VectorContentsMatch(key.attempts, lookup.attempts) &&
+ VectorContentsMatch(key.types, lookup.types);
+}
+
+bool
+UniqueTrackedOptimizations::add(const TrackedOptimizations* optimizations)
+{
+ MOZ_ASSERT(!sorted());
+ Key key;
+ key.types = &optimizations->types_;
+ key.attempts = &optimizations->attempts_;
+ AttemptsMap::AddPtr p = map_.lookupForAdd(key);
+ if (p) {
+ p->value().frequency++;
+ return true;
+ }
+ Entry entry;
+ entry.index = UINT8_MAX;
+ entry.frequency = 1;
+ return map_.add(p, key, entry);
+}
+
+struct FrequencyComparator
+{
+ bool operator()(const UniqueTrackedOptimizations::SortEntry& a,
+ const UniqueTrackedOptimizations::SortEntry& b,
+ bool* lessOrEqualp)
+ {
+ *lessOrEqualp = b.frequency <= a.frequency;
+ return true;
+ }
+};
+
+bool
+UniqueTrackedOptimizations::sortByFrequency(JSContext* cx)
+{
+ MOZ_ASSERT(!sorted());
+
+ JitSpew(JitSpew_OptimizationTracking, "=> Sorting unique optimizations by frequency");
+
+ // Sort by frequency.
+ Vector<SortEntry> entries(cx);
+ for (AttemptsMap::Range r = map_.all(); !r.empty(); r.popFront()) {
+ SortEntry entry;
+ entry.types = r.front().key().types;
+ entry.attempts = r.front().key().attempts;
+ entry.frequency = r.front().value().frequency;
+ if (!entries.append(entry))
+ return false;
+ }
+
+ // The compact table stores indices as a max of uint8_t. In practice each
+ // script has fewer unique optimization attempts than UINT8_MAX.
+ if (entries.length() >= UINT8_MAX - 1)
+ return false;
+
+ Vector<SortEntry> scratch(cx);
+ if (!scratch.resize(entries.length()))
+ return false;
+
+ FrequencyComparator comparator;
+ MOZ_ALWAYS_TRUE(MergeSort(entries.begin(), entries.length(), scratch.begin(), comparator));
+
+ // Update map entries' indices.
+ for (size_t i = 0; i < entries.length(); i++) {
+ Key key;
+ key.types = entries[i].types;
+ key.attempts = entries[i].attempts;
+ AttemptsMap::Ptr p = map_.lookup(key);
+ MOZ_ASSERT(p);
+ p->value().index = sorted_.length();
+
+ JitSpew(JitSpew_OptimizationTracking, " Entry %" PRIuSIZE " has frequency %" PRIu32,
+ sorted_.length(), p->value().frequency);
+
+ if (!sorted_.append(entries[i]))
+ return false;
+ }
+
+ return true;
+}
+
+uint8_t
+UniqueTrackedOptimizations::indexOf(const TrackedOptimizations* optimizations) const
+{
+ MOZ_ASSERT(sorted());
+ Key key;
+ key.types = &optimizations->types_;
+ key.attempts = &optimizations->attempts_;
+ AttemptsMap::Ptr p = map_.lookup(key);
+ MOZ_ASSERT(p);
+ MOZ_ASSERT(p->value().index != UINT8_MAX);
+ return p->value().index;
+}
+
+// Assigns each unique tracked type an index; outputs a compact list.
+class jit::UniqueTrackedTypes
+{
+ public:
+ struct TypeHasher
+ {
+ typedef TypeSet::Type Lookup;
+
+ static HashNumber hash(const Lookup& ty) { return HashType(ty); }
+ static bool match(const TypeSet::Type& ty1, const TypeSet::Type& ty2) { return ty1 == ty2; }
+ };
+
+ private:
+ // Map of unique TypeSet::Types to indices.
+ typedef HashMap<TypeSet::Type, uint8_t, TypeHasher> TypesMap;
+ TypesMap map_;
+
+ Vector<TypeSet::Type, 1> list_;
+
+ public:
+ explicit UniqueTrackedTypes(JSContext* cx)
+ : map_(cx),
+ list_(cx)
+ { }
+
+ bool init() { return map_.init(); }
+ bool getIndexOf(JSContext* cx, TypeSet::Type ty, uint8_t* indexp);
+
+ uint32_t count() const { MOZ_ASSERT(map_.count() == list_.length()); return list_.length(); }
+ bool enumerate(TypeSet::TypeList* types) const;
+};
+
+bool
+UniqueTrackedTypes::getIndexOf(JSContext* cx, TypeSet::Type ty, uint8_t* indexp)
+{
+ TypesMap::AddPtr p = map_.lookupForAdd(ty);
+ if (p) {
+ *indexp = p->value();
+ return true;
+ }
+
+ // Store indices as max of uint8_t. In practice each script has fewer than
+ // UINT8_MAX of unique observed types.
+ if (count() >= UINT8_MAX)
+ return false;
+
+ uint8_t index = (uint8_t) count();
+ if (!map_.add(p, ty, index))
+ return false;
+ if (!list_.append(ty))
+ return false;
+ *indexp = index;
+ return true;
+}
+
+bool
+UniqueTrackedTypes::enumerate(TypeSet::TypeList* types) const
+{
+ return types->append(list_.begin(), list_.end());
+}
+
+void
+IonTrackedOptimizationsRegion::unpackHeader()
+{
+ CompactBufferReader reader(start_, end_);
+ startOffset_ = reader.readUnsigned();
+ endOffset_ = reader.readUnsigned();
+ rangesStart_ = reader.currentPosition();
+ MOZ_ASSERT(startOffset_ < endOffset_);
+}
+
+void
+IonTrackedOptimizationsRegion::RangeIterator::readNext(uint32_t* startOffset, uint32_t* endOffset,
+ uint8_t* index)
+{
+ MOZ_ASSERT(more());
+
+ CompactBufferReader reader(cur_, end_);
+
+ // The very first entry isn't delta-encoded.
+ if (cur_ == start_) {
+ *startOffset = firstStartOffset_;
+ *endOffset = prevEndOffset_ = reader.readUnsigned();
+ *index = reader.readByte();
+ cur_ = reader.currentPosition();
+ MOZ_ASSERT(cur_ <= end_);
+ return;
+ }
+
+ // Otherwise, read a delta.
+ uint32_t startDelta, length;
+ ReadDelta(reader, &startDelta, &length, index);
+ *startOffset = prevEndOffset_ + startDelta;
+ *endOffset = prevEndOffset_ = *startOffset + length;
+ cur_ = reader.currentPosition();
+ MOZ_ASSERT(cur_ <= end_);
+}
+
+Maybe<uint8_t>
+JitcodeGlobalEntry::IonEntry::trackedOptimizationIndexAtAddr(JSRuntime *rt, void* ptr,
+ uint32_t* entryOffsetOut)
+{
+ MOZ_ASSERT(hasTrackedOptimizations());
+ MOZ_ASSERT(containsPointer(ptr));
+ uint32_t ptrOffset = ((uint8_t*) ptr) - ((uint8_t*) nativeStartAddr());
+ Maybe<IonTrackedOptimizationsRegion> region = optsRegionTable_->findRegion(ptrOffset);
+ if (region.isNothing())
+ return Nothing();
+ return region->findIndex(ptrOffset, entryOffsetOut);
+}
+
+void
+JitcodeGlobalEntry::IonEntry::forEachOptimizationAttempt(JSRuntime *rt, uint8_t index,
+ ForEachTrackedOptimizationAttemptOp& op)
+{
+ trackedOptimizationAttempts(index).forEach(op);
+}
+
+void
+JitcodeGlobalEntry::IonEntry::forEachOptimizationTypeInfo(JSRuntime *rt, uint8_t index,
+ IonTrackedOptimizationsTypeInfo::ForEachOpAdapter& op)
+{
+ trackedOptimizationTypeInfo(index).forEach(op, allTrackedTypes());
+}
+
+void
+IonTrackedOptimizationsAttempts::forEach(ForEachTrackedOptimizationAttemptOp& op)
+{
+ CompactBufferReader reader(start_, end_);
+ const uint8_t* cur = start_;
+ while (cur != end_) {
+ TrackedStrategy strategy = TrackedStrategy(reader.readUnsigned());
+ TrackedOutcome outcome = TrackedOutcome(reader.readUnsigned());
+ MOZ_ASSERT(strategy < TrackedStrategy::Count);
+ MOZ_ASSERT(outcome < TrackedOutcome::Count);
+ op(strategy, outcome);
+ cur = reader.currentPosition();
+ MOZ_ASSERT(cur <= end_);
+ }
+}
+
+void
+IonTrackedOptimizationsTypeInfo::forEach(ForEachOp& op, const IonTrackedTypeVector* allTypes)
+{
+ CompactBufferReader reader(start_, end_);
+ const uint8_t* cur = start_;
+ while (cur != end_) {
+ TrackedTypeSite site = JS::TrackedTypeSite(reader.readUnsigned());
+ MOZ_ASSERT(site < JS::TrackedTypeSite::Count);
+ MIRType mirType = MIRType(reader.readUnsigned());
+ uint32_t length = reader.readUnsigned();
+ for (uint32_t i = 0; i < length; i++)
+ op.readType((*allTypes)[reader.readByte()]);
+ op(site, mirType);
+ cur = reader.currentPosition();
+ MOZ_ASSERT(cur <= end_);
+ }
+}
+
+Maybe<uint8_t>
+IonTrackedOptimizationsRegion::findIndex(uint32_t offset, uint32_t* entryOffsetOut) const
+{
+ if (offset <= startOffset_ || offset > endOffset_)
+ return Nothing();
+
+ // Linear search through the run.
+ RangeIterator iter = ranges();
+ while (iter.more()) {
+ uint32_t startOffset, endOffset;
+ uint8_t index;
+ iter.readNext(&startOffset, &endOffset, &index);
+ if (startOffset < offset && offset <= endOffset) {
+ *entryOffsetOut = endOffset;
+ return Some(index);
+ }
+ }
+ return Nothing();
+}
+
+Maybe<IonTrackedOptimizationsRegion>
+IonTrackedOptimizationsRegionTable::findRegion(uint32_t offset) const
+{
+ // For two contiguous regions, e.g., [i, j] and [j, k], an offset exactly
+ // at j will be associated with [i, j] instead of [j, k]. An offset
+ // exactly at j is often a return address from a younger frame, which case
+ // the next region, despite starting at j, has not yet logically started
+ // execution.
+
+ static const uint32_t LINEAR_SEARCH_THRESHOLD = 8;
+ uint32_t regions = numEntries();
+ MOZ_ASSERT(regions > 0);
+
+ // For small numbers of regions, do linear search.
+ if (regions <= LINEAR_SEARCH_THRESHOLD) {
+ for (uint32_t i = 0; i < regions; i++) {
+ IonTrackedOptimizationsRegion region = entry(i);
+ if (region.startOffset() < offset && offset <= region.endOffset()) {
+ return Some(entry(i));
+ }
+ }
+ return Nothing();
+ }
+
+ // Otherwise, do binary search.
+ uint32_t i = 0;
+ while (regions > 1) {
+ uint32_t step = regions / 2;
+ uint32_t mid = i + step;
+ IonTrackedOptimizationsRegion region = entry(mid);
+
+ if (offset <= region.startOffset()) {
+ // Entry is below mid.
+ regions = step;
+ } else if (offset > region.endOffset()) {
+ // Entry is above mid.
+ i = mid;
+ regions -= step;
+ } else {
+ // Entry is in mid.
+ return Some(entry(i));
+ }
+ }
+ return Nothing();
+}
+
+/* static */ uint32_t
+IonTrackedOptimizationsRegion::ExpectedRunLength(const NativeToTrackedOptimizations* start,
+ const NativeToTrackedOptimizations* end)
+{
+ MOZ_ASSERT(start < end);
+
+ // A run always has at least 1 entry, which is not delta encoded.
+ uint32_t runLength = 1;
+ uint32_t prevEndOffset = start->endOffset.offset();
+
+ for (const NativeToTrackedOptimizations* entry = start + 1; entry != end; entry++) {
+ uint32_t startOffset = entry->startOffset.offset();
+ uint32_t endOffset = entry->endOffset.offset();
+ uint32_t startDelta = startOffset - prevEndOffset;
+ uint32_t length = endOffset - startOffset;
+
+ if (!IsDeltaEncodeable(startDelta, length))
+ break;
+
+ runLength++;
+ if (runLength == MAX_RUN_LENGTH)
+ break;
+
+ prevEndOffset = endOffset;
+ }
+
+ return runLength;
+}
+
+void
+OptimizationAttempt::writeCompact(CompactBufferWriter& writer) const
+{
+ writer.writeUnsigned((uint32_t) strategy_);
+ writer.writeUnsigned((uint32_t) outcome_);
+}
+
+bool
+OptimizationTypeInfo::writeCompact(JSContext* cx, CompactBufferWriter& writer,
+ UniqueTrackedTypes& uniqueTypes) const
+{
+ writer.writeUnsigned((uint32_t) site_);
+ writer.writeUnsigned((uint32_t) mirType_);
+ writer.writeUnsigned(types_.length());
+ for (uint32_t i = 0; i < types_.length(); i++) {
+ uint8_t index;
+ if (!uniqueTypes.getIndexOf(cx, types_[i], &index))
+ return false;
+ writer.writeByte(index);
+ }
+ return true;
+}
+
+/* static */ void
+IonTrackedOptimizationsRegion::ReadDelta(CompactBufferReader& reader,
+ uint32_t* startDelta, uint32_t* length,
+ uint8_t* index)
+{
+ // 2 bytes
+ // SSSS-SSSL LLLL-LII0
+ const uint32_t firstByte = reader.readByte();
+ const uint32_t secondByte = reader.readByte();
+ if ((firstByte & ENC1_MASK) == ENC1_MASK_VAL) {
+ uint32_t encVal = firstByte | secondByte << 8;
+ *startDelta = encVal >> ENC1_START_DELTA_SHIFT;
+ *length = (encVal >> ENC1_LENGTH_SHIFT) & ENC1_LENGTH_MAX;
+ *index = (encVal >> ENC1_INDEX_SHIFT) & ENC1_INDEX_MAX;
+ MOZ_ASSERT(length != 0);
+ return;
+ }
+
+ // 3 bytes
+ // SSSS-SSSS SSSS-LLLL LLII-II01
+ const uint32_t thirdByte = reader.readByte();
+ if ((firstByte & ENC2_MASK) == ENC2_MASK_VAL) {
+ uint32_t encVal = firstByte | secondByte << 8 | thirdByte << 16;
+ *startDelta = encVal >> ENC2_START_DELTA_SHIFT;
+ *length = (encVal >> ENC2_LENGTH_SHIFT) & ENC2_LENGTH_MAX;
+ *index = (encVal >> ENC2_INDEX_SHIFT) & ENC2_INDEX_MAX;
+ MOZ_ASSERT(length != 0);
+ return;
+ }
+
+ // 4 bytes
+ // SSSS-SSSS SSSL-LLLL LLLL-LIII IIII-I011
+ const uint32_t fourthByte = reader.readByte();
+ if ((firstByte & ENC3_MASK) == ENC3_MASK_VAL) {
+ uint32_t encVal = firstByte | secondByte << 8 | thirdByte << 16 | fourthByte << 24;
+ *startDelta = encVal >> ENC3_START_DELTA_SHIFT;
+ *length = (encVal >> ENC3_LENGTH_SHIFT) & ENC3_LENGTH_MAX;
+ *index = (encVal >> ENC3_INDEX_SHIFT) & ENC3_INDEX_MAX;
+ MOZ_ASSERT(length != 0);
+ return;
+ }
+
+ // 5 bytes
+ // SSSS-SSSS SSSS-SSSL LLLL-LLLL LLLL-LIII IIII-I111
+ MOZ_ASSERT((firstByte & ENC4_MASK) == ENC4_MASK_VAL);
+ uint64_t fifthByte = reader.readByte();
+ uint64_t encVal = firstByte | secondByte << 8 | thirdByte << 16 | fourthByte << 24 |
+ fifthByte << 32;
+ *startDelta = encVal >> ENC4_START_DELTA_SHIFT;
+ *length = (encVal >> ENC4_LENGTH_SHIFT) & ENC4_LENGTH_MAX;
+ *index = (encVal >> ENC4_INDEX_SHIFT) & ENC4_INDEX_MAX;
+ MOZ_ASSERT(length != 0);
+}
+
+/* static */ void
+IonTrackedOptimizationsRegion::WriteDelta(CompactBufferWriter& writer,
+ uint32_t startDelta, uint32_t length,
+ uint8_t index)
+{
+ // 2 bytes
+ // SSSS-SSSL LLLL-LII0
+ if (startDelta <= ENC1_START_DELTA_MAX &&
+ length <= ENC1_LENGTH_MAX &&
+ index <= ENC1_INDEX_MAX)
+ {
+ uint16_t val = ENC1_MASK_VAL |
+ (startDelta << ENC1_START_DELTA_SHIFT) |
+ (length << ENC1_LENGTH_SHIFT) |
+ (index << ENC1_INDEX_SHIFT);
+ writer.writeByte(val & 0xff);
+ writer.writeByte((val >> 8) & 0xff);
+ return;
+ }
+
+ // 3 bytes
+ // SSSS-SSSS SSSS-LLLL LLII-II01
+ if (startDelta <= ENC2_START_DELTA_MAX &&
+ length <= ENC2_LENGTH_MAX &&
+ index <= ENC2_INDEX_MAX)
+ {
+ uint32_t val = ENC2_MASK_VAL |
+ (startDelta << ENC2_START_DELTA_SHIFT) |
+ (length << ENC2_LENGTH_SHIFT) |
+ (index << ENC2_INDEX_SHIFT);
+ writer.writeByte(val & 0xff);
+ writer.writeByte((val >> 8) & 0xff);
+ writer.writeByte((val >> 16) & 0xff);
+ return;
+ }
+
+ // 4 bytes
+ // SSSS-SSSS SSSL-LLLL LLLL-LIII IIII-I011
+ if (startDelta <= ENC3_START_DELTA_MAX &&
+ length <= ENC3_LENGTH_MAX)
+ {
+ // index always fits because it's an uint8_t; change this if
+ // ENC3_INDEX_MAX changes.
+ MOZ_ASSERT(ENC3_INDEX_MAX == UINT8_MAX);
+ uint32_t val = ENC3_MASK_VAL |
+ (startDelta << ENC3_START_DELTA_SHIFT) |
+ (length << ENC3_LENGTH_SHIFT) |
+ (index << ENC3_INDEX_SHIFT);
+ writer.writeByte(val & 0xff);
+ writer.writeByte((val >> 8) & 0xff);
+ writer.writeByte((val >> 16) & 0xff);
+ writer.writeByte((val >> 24) & 0xff);
+ return;
+ }
+
+ // 5 bytes
+ // SSSS-SSSS SSSS-SSSL LLLL-LLLL LLLL-LIII IIII-I111
+ if (startDelta <= ENC4_START_DELTA_MAX &&
+ length <= ENC4_LENGTH_MAX)
+ {
+ // index always fits because it's an uint8_t; change this if
+ // ENC4_INDEX_MAX changes.
+ MOZ_ASSERT(ENC4_INDEX_MAX == UINT8_MAX);
+ uint64_t val = ENC4_MASK_VAL |
+ (((uint64_t) startDelta) << ENC4_START_DELTA_SHIFT) |
+ (((uint64_t) length) << ENC4_LENGTH_SHIFT) |
+ (((uint64_t) index) << ENC4_INDEX_SHIFT);
+ writer.writeByte(val & 0xff);
+ writer.writeByte((val >> 8) & 0xff);
+ writer.writeByte((val >> 16) & 0xff);
+ writer.writeByte((val >> 24) & 0xff);
+ writer.writeByte((val >> 32) & 0xff);
+ return;
+ }
+
+ MOZ_CRASH("startDelta,length,index triple too large to encode.");
+}
+
+/* static */ bool
+IonTrackedOptimizationsRegion::WriteRun(CompactBufferWriter& writer,
+ const NativeToTrackedOptimizations* start,
+ const NativeToTrackedOptimizations* end,
+ const UniqueTrackedOptimizations& unique)
+{
+ // Write the header, which is the range that this whole run encompasses.
+ JitSpew(JitSpew_OptimizationTracking, " Header: [%" PRIuSIZE ", %" PRIuSIZE "]",
+ start->startOffset.offset(), (end - 1)->endOffset.offset());
+ writer.writeUnsigned(start->startOffset.offset());
+ writer.writeUnsigned((end - 1)->endOffset.offset());
+
+ // Write the first entry of the run, which is not delta-encoded.
+ JitSpew(JitSpew_OptimizationTracking,
+ " [%6" PRIuSIZE ", %6" PRIuSIZE "] vector %3u, offset %4" PRIuSIZE,
+ start->startOffset.offset(), start->endOffset.offset(),
+ unique.indexOf(start->optimizations), writer.length());
+ uint32_t prevEndOffset = start->endOffset.offset();
+ writer.writeUnsigned(prevEndOffset);
+ writer.writeByte(unique.indexOf(start->optimizations));
+
+ // Delta encode the run.
+ for (const NativeToTrackedOptimizations* entry = start + 1; entry != end; entry++) {
+ uint32_t startOffset = entry->startOffset.offset();
+ uint32_t endOffset = entry->endOffset.offset();
+
+ uint32_t startDelta = startOffset - prevEndOffset;
+ uint32_t length = endOffset - startOffset;
+ uint8_t index = unique.indexOf(entry->optimizations);
+
+ JitSpew(JitSpew_OptimizationTracking,
+ " [%6u, %6u] delta [+%5u, +%5u] vector %3u, offset %4" PRIuSIZE,
+ startOffset, endOffset, startDelta, length, index, writer.length());
+
+ WriteDelta(writer, startDelta, length, index);
+
+ prevEndOffset = endOffset;
+ }
+
+ if (writer.oom())
+ return false;
+
+ return true;
+}
+
+static bool
+WriteOffsetsTable(CompactBufferWriter& writer, const Vector<uint32_t, 16>& offsets,
+ uint32_t* tableOffsetp)
+{
+ // 4-byte align for the uint32s.
+ uint32_t padding = sizeof(uint32_t) - (writer.length() % sizeof(uint32_t));
+ if (padding == sizeof(uint32_t))
+ padding = 0;
+ JitSpew(JitSpew_OptimizationTracking, " Padding %u byte%s",
+ padding, padding == 1 ? "" : "s");
+ for (uint32_t i = 0; i < padding; i++)
+ writer.writeByte(0);
+
+ // Record the start of the table to compute reverse offsets for entries.
+ uint32_t tableOffset = writer.length();
+
+ // Write how many bytes were padded and numEntries.
+ writer.writeNativeEndianUint32_t(padding);
+ writer.writeNativeEndianUint32_t(offsets.length());
+
+ // Write entry offset table.
+ for (size_t i = 0; i < offsets.length(); i++) {
+ JitSpew(JitSpew_OptimizationTracking, " Entry %" PRIuSIZE " reverse offset %u",
+ i, tableOffset - padding - offsets[i]);
+ writer.writeNativeEndianUint32_t(tableOffset - padding - offsets[i]);
+ }
+
+ if (writer.oom())
+ return false;
+
+ *tableOffsetp = tableOffset;
+ return true;
+}
+
+static JSFunction*
+MaybeConstructorFromType(TypeSet::Type ty)
+{
+ if (ty.isUnknown() || ty.isAnyObject() || !ty.isGroup())
+ return nullptr;
+ ObjectGroup* obj = ty.group();
+ TypeNewScript* newScript = obj->newScript();
+ if (!newScript && obj->maybeUnboxedLayout())
+ newScript = obj->unboxedLayout().newScript();
+ return newScript ? newScript->function() : nullptr;
+}
+
+static void
+InterpretedFunctionFilenameAndLineNumber(JSFunction* fun, const char** filename,
+ Maybe<unsigned>* lineno)
+{
+ if (fun->hasScript()) {
+ *filename = fun->nonLazyScript()->maybeForwardedScriptSource()->filename();
+ *lineno = Some((unsigned) fun->nonLazyScript()->lineno());
+ } else if (fun->lazyScriptOrNull()) {
+ *filename = fun->lazyScript()->maybeForwardedScriptSource()->filename();
+ *lineno = Some((unsigned) fun->lazyScript()->lineno());
+ } else {
+ *filename = "(self-hosted builtin)";
+ *lineno = Nothing();
+ }
+}
+
+static void
+SpewConstructor(TypeSet::Type ty, JSFunction* constructor)
+{
+#ifdef JS_JITSPEW
+ if (!constructor->isInterpreted()) {
+ JitSpew(JitSpew_OptimizationTracking, " Unique type %s has native constructor",
+ TypeSet::TypeString(ty));
+ return;
+ }
+
+ char buf[512];
+ if (constructor->displayAtom())
+ PutEscapedString(buf, 512, constructor->displayAtom(), 0);
+ else
+ snprintf(buf, mozilla::ArrayLength(buf), "??");
+
+ const char* filename;
+ Maybe<unsigned> lineno;
+ InterpretedFunctionFilenameAndLineNumber(constructor, &filename, &lineno);
+
+ JitSpew(JitSpew_OptimizationTracking, " Unique type %s has constructor %s (%s:%u)",
+ TypeSet::TypeString(ty), buf, filename, lineno.isSome() ? *lineno : 0);
+#endif
+}
+
+static void
+SpewAllocationSite(TypeSet::Type ty, JSScript* script, uint32_t offset)
+{
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_OptimizationTracking, " Unique type %s has alloc site %s:%u",
+ TypeSet::TypeString(ty), script->filename(),
+ PCToLineNumber(script, script->offsetToPC(offset)));
+#endif
+}
+
+bool
+jit::WriteIonTrackedOptimizationsTable(JSContext* cx, CompactBufferWriter& writer,
+ const NativeToTrackedOptimizations* start,
+ const NativeToTrackedOptimizations* end,
+ const UniqueTrackedOptimizations& unique,
+ uint32_t* numRegions,
+ uint32_t* regionTableOffsetp,
+ uint32_t* typesTableOffsetp,
+ uint32_t* optimizationTableOffsetp,
+ IonTrackedTypeVector* allTypes)
+{
+ MOZ_ASSERT(unique.sorted());
+
+#ifdef JS_JITSPEW
+ // Spew training data, which may be fed into a script to determine a good
+ // encoding strategy.
+ if (JitSpewEnabled(JitSpew_OptimizationTracking)) {
+ JitSpewStart(JitSpew_OptimizationTracking, "=> Training data: ");
+ for (const NativeToTrackedOptimizations* entry = start; entry != end; entry++) {
+ JitSpewCont(JitSpew_OptimizationTracking, "%" PRIuSIZE ",%" PRIuSIZE ",%u ",
+ entry->startOffset.offset(), entry->endOffset.offset(),
+ unique.indexOf(entry->optimizations));
+ }
+ JitSpewFin(JitSpew_OptimizationTracking);
+ }
+#endif
+
+ Vector<uint32_t, 16> offsets(cx);
+ const NativeToTrackedOptimizations* entry = start;
+
+ // Write out region offloads, partitioned into runs.
+ JitSpew(JitSpew_Profiling, "=> Writing regions");
+ while (entry != end) {
+ uint32_t runLength = IonTrackedOptimizationsRegion::ExpectedRunLength(entry, end);
+ JitSpew(JitSpew_OptimizationTracking,
+ " Run at entry %" PRIuSIZE ", length %" PRIu32 ", offset %" PRIuSIZE,
+ size_t(entry - start), runLength, writer.length());
+
+ if (!offsets.append(writer.length()))
+ return false;
+
+ if (!IonTrackedOptimizationsRegion::WriteRun(writer, entry, entry + runLength, unique))
+ return false;
+
+ entry += runLength;
+ }
+
+ // Write out the table indexing into the payloads. 4-byte align for the uint32s.
+ if (!WriteOffsetsTable(writer, offsets, regionTableOffsetp))
+ return false;
+
+ *numRegions = offsets.length();
+
+ // Clear offsets so that it may be reused below for the unique
+ // optimizations table.
+ offsets.clear();
+
+ const UniqueTrackedOptimizations::SortedVector& vec = unique.sortedVector();
+ JitSpew(JitSpew_OptimizationTracking, "=> Writing unique optimizations table with %" PRIuSIZE " entr%s",
+ vec.length(), vec.length() == 1 ? "y" : "ies");
+
+ // Write out type info payloads.
+ UniqueTrackedTypes uniqueTypes(cx);
+ if (!uniqueTypes.init())
+ return false;
+
+ for (const UniqueTrackedOptimizations::SortEntry* p = vec.begin(); p != vec.end(); p++) {
+ const TempOptimizationTypeInfoVector* v = p->types;
+ JitSpew(JitSpew_OptimizationTracking,
+ " Type info entry %" PRIuSIZE " of length %" PRIuSIZE ", offset %" PRIuSIZE,
+ size_t(p - vec.begin()), v->length(), writer.length());
+ SpewTempOptimizationTypeInfoVector(v, " ");
+
+ if (!offsets.append(writer.length()))
+ return false;
+
+ for (const OptimizationTypeInfo* t = v->begin(); t != v->end(); t++) {
+ if (!t->writeCompact(cx, writer, uniqueTypes))
+ return false;
+ }
+ }
+
+ // Enumerate the unique types, and pull out any 'new' script constructor
+ // functions and allocation site information. We do this during linking
+ // instead of during profiling to avoid touching compartment tables during
+ // profiling. Additionally, TypeNewScript is subject to GC in the
+ // meantime.
+ TypeSet::TypeList uniqueTypeList;
+ if (!uniqueTypes.enumerate(&uniqueTypeList))
+ return false;
+ for (uint32_t i = 0; i < uniqueTypeList.length(); i++) {
+ TypeSet::Type ty = uniqueTypeList[i];
+ if (JSFunction* constructor = MaybeConstructorFromType(ty)) {
+ if (!allTypes->append(IonTrackedTypeWithAddendum(ty, constructor)))
+ return false;
+ SpewConstructor(ty, constructor);
+ } else {
+ JSScript* script;
+ uint32_t offset;
+ if (!ty.isUnknown() && !ty.isAnyObject() && ty.isGroup() &&
+ ObjectGroup::findAllocationSite(cx, ty.group(), &script, &offset))
+ {
+ if (!allTypes->append(IonTrackedTypeWithAddendum(ty, script, offset)))
+ return false;
+ SpewAllocationSite(ty, script, offset);
+ } else {
+ if (!allTypes->append(IonTrackedTypeWithAddendum(ty)))
+ return false;
+ }
+ }
+ }
+
+ if (!WriteOffsetsTable(writer, offsets, typesTableOffsetp))
+ return false;
+ offsets.clear();
+
+ // Write out attempts payloads.
+ for (const UniqueTrackedOptimizations::SortEntry* p = vec.begin(); p != vec.end(); p++) {
+ const TempOptimizationAttemptsVector* v = p->attempts;
+ JitSpew(JitSpew_OptimizationTracking,
+ " Attempts entry %" PRIuSIZE " of length %" PRIuSIZE ", offset %" PRIuSIZE,
+ size_t(p - vec.begin()), v->length(), writer.length());
+ SpewTempOptimizationAttemptsVector(v, " ");
+
+ if (!offsets.append(writer.length()))
+ return false;
+
+ for (const OptimizationAttempt* a = v->begin(); a != v->end(); a++)
+ a->writeCompact(writer);
+ }
+
+ return WriteOffsetsTable(writer, offsets, optimizationTableOffsetp);
+}
+
+
+BytecodeSite*
+IonBuilder::maybeTrackedOptimizationSite(jsbytecode* pc)
+{
+ // BytecodeSites that track optimizations need to be 1-1 with the pc
+ // when optimization tracking is enabled, so that all MIR generated by
+ // a single pc are tracked at one place, even across basic blocks.
+ //
+ // Alternatively, we could make all BytecodeSites 1-1 with the pc, but
+ // there is no real need as optimization tracking is a toggled
+ // feature.
+ //
+ // Since sites that track optimizations should be sparse, just do a
+ // reverse linear search, as we're most likely advancing in pc.
+ MOZ_ASSERT(isOptimizationTrackingEnabled());
+ for (size_t i = trackedOptimizationSites_.length(); i != 0; i--) {
+ BytecodeSite* site = trackedOptimizationSites_[i - 1];
+ if (site->pc() == pc) {
+ MOZ_ASSERT(site->tree() == info().inlineScriptTree());
+ return site;
+ }
+ }
+ return nullptr;
+}
+
+void
+IonBuilder::startTrackingOptimizations()
+{
+ if (isOptimizationTrackingEnabled()) {
+ BytecodeSite* site = maybeTrackedOptimizationSite(current->trackedSite()->pc());
+
+ if (!site) {
+ site = current->trackedSite();
+ site->setOptimizations(new(alloc()) TrackedOptimizations(alloc()));
+ // OOMs are handled as if optimization tracking were turned off.
+ if (!trackedOptimizationSites_.append(site))
+ site = nullptr;
+ } else if (site->hasOptimizations()) {
+ // The same bytecode may be visited multiple times (see
+ // restartLoop). Only the last time matters, so clear any previous
+ // tracked optimizations.
+ site->optimizations()->clear();
+ }
+
+ // The case of !site->hasOptimizations() means we had an OOM when
+ // previously attempting to track optimizations. Leave
+ // site->optimizations_ nullptr to leave optimization tracking off.
+
+ if (site)
+ current->updateTrackedSite(site);
+ }
+}
+
+void
+IonBuilder::trackTypeInfoUnchecked(TrackedTypeSite kind, MIRType mirType,
+ TemporaryTypeSet* typeSet)
+{
+ BytecodeSite* site = current->trackedSite();
+ // OOMs are handled as if optimization tracking were turned off.
+ OptimizationTypeInfo typeInfo(alloc(), kind, mirType);
+ if (!typeInfo.trackTypeSet(typeSet)) {
+ site->setOptimizations(nullptr);
+ return;
+ }
+ if (!site->optimizations()->trackTypeInfo(mozilla::Move(typeInfo)))
+ site->setOptimizations(nullptr);
+}
+
+void
+IonBuilder::trackTypeInfoUnchecked(TrackedTypeSite kind, JSObject* obj)
+{
+ BytecodeSite* site = current->trackedSite();
+ // OOMs are handled as if optimization tracking were turned off.
+ OptimizationTypeInfo typeInfo(alloc(), kind, MIRType::Object);
+ if (!typeInfo.trackType(TypeSet::ObjectType(obj)))
+ return;
+ if (!site->optimizations()->trackTypeInfo(mozilla::Move(typeInfo)))
+ site->setOptimizations(nullptr);
+}
+
+void
+IonBuilder::trackTypeInfoUnchecked(CallInfo& callInfo)
+{
+ MDefinition* thisArg = callInfo.thisArg();
+ trackTypeInfoUnchecked(TrackedTypeSite::Call_This, thisArg->type(), thisArg->resultTypeSet());
+
+ for (uint32_t i = 0; i < callInfo.argc(); i++) {
+ MDefinition* arg = callInfo.getArg(i);
+ trackTypeInfoUnchecked(TrackedTypeSite::Call_Arg, arg->type(), arg->resultTypeSet());
+ }
+
+ TemporaryTypeSet* returnTypes = getInlineReturnTypeSet();
+ trackTypeInfoUnchecked(TrackedTypeSite::Call_Return, returnTypes->getKnownMIRType(),
+ returnTypes);
+}
+
+void
+IonBuilder::trackOptimizationAttemptUnchecked(TrackedStrategy strategy)
+{
+ BytecodeSite* site = current->trackedSite();
+ // OOMs are handled as if optimization tracking were turned off.
+ if (!site->optimizations()->trackAttempt(strategy))
+ site->setOptimizations(nullptr);
+}
+
+void
+IonBuilder::amendOptimizationAttemptUnchecked(uint32_t index)
+{
+ const BytecodeSite* site = current->trackedSite();
+ site->optimizations()->amendAttempt(index);
+}
+
+void
+IonBuilder::trackOptimizationOutcomeUnchecked(TrackedOutcome outcome)
+{
+ const BytecodeSite* site = current->trackedSite();
+ site->optimizations()->trackOutcome(outcome);
+}
+
+void
+IonBuilder::trackOptimizationSuccessUnchecked()
+{
+ const BytecodeSite* site = current->trackedSite();
+ site->optimizations()->trackSuccess();
+}
+
+void
+IonBuilder::trackInlineSuccessUnchecked(InliningStatus status)
+{
+ if (status == InliningStatus_Inlined)
+ trackOptimizationOutcome(TrackedOutcome::Inlined);
+}
+
+static JSFunction*
+FunctionFromTrackedType(const IonTrackedTypeWithAddendum& tracked)
+{
+ if (tracked.hasConstructor())
+ return tracked.constructor;
+
+ TypeSet::Type ty = tracked.type;
+
+ if (ty.isSingleton()) {
+ JSObject* obj = ty.singleton();
+ return obj->is<JSFunction>() ? &obj->as<JSFunction>() : nullptr;
+ }
+
+ return ty.group()->maybeInterpretedFunction();
+}
+
+void
+IonTrackedOptimizationsTypeInfo::ForEachOpAdapter::readType(const IonTrackedTypeWithAddendum& tracked)
+{
+ TypeSet::Type ty = tracked.type;
+
+ if (ty.isPrimitive() || ty.isUnknown() || ty.isAnyObject()) {
+ op_.readType("primitive", TypeSet::NonObjectTypeString(ty), nullptr, Nothing());
+ return;
+ }
+
+ char buf[512];
+ const uint32_t bufsize = mozilla::ArrayLength(buf);
+
+ if (JSFunction* fun = FunctionFromTrackedType(tracked)) {
+ // The displayAtom is useful for identifying both native and
+ // interpreted functions.
+ char* name = nullptr;
+ if (fun->displayAtom()) {
+ PutEscapedString(buf, bufsize, fun->displayAtom(), 0);
+ name = buf;
+ }
+
+ if (fun->isNative()) {
+ //
+ // Try printing out the displayAtom of the native function and the
+ // absolute address of the native function pointer.
+ //
+ // Note that this address is not usable without knowing the
+ // starting address at which our shared library is loaded. Shared
+ // library information is exposed by the profiler. If this address
+ // needs to be symbolicated manually (e.g., when it is gotten via
+ // debug spewing of all optimization information), it needs to be
+ // converted to an offset from the beginning of the shared library
+ // for use with utilities like `addr2line` on Linux and `atos` on
+ // OS X. Converting to an offset may be done via dladdr():
+ //
+ // void* addr = JS_FUNC_TO_DATA_PTR(void*, fun->native());
+ // uintptr_t offset;
+ // Dl_info info;
+ // if (dladdr(addr, &info) != 0)
+ // offset = uintptr_t(addr) - uintptr_t(info.dli_fbase);
+ //
+ char locationBuf[20];
+ if (!name) {
+ uintptr_t addr = JS_FUNC_TO_DATA_PTR(uintptr_t, fun->native());
+ snprintf(locationBuf, mozilla::ArrayLength(locationBuf), "%" PRIxPTR, addr);
+ }
+ op_.readType("native", name, name ? nullptr : locationBuf, Nothing());
+ return;
+ }
+
+ const char* filename;
+ Maybe<unsigned> lineno;
+ InterpretedFunctionFilenameAndLineNumber(fun, &filename, &lineno);
+ op_.readType(tracked.constructor ? "constructor" : "function",
+ name, filename, lineno);
+ return;
+ }
+
+ const char* className = ty.objectKey()->clasp()->name;
+ snprintf(buf, bufsize, "[object %s]", className);
+
+ if (tracked.hasAllocationSite()) {
+ JSScript* script = tracked.script;
+ op_.readType("alloc site", buf,
+ script->maybeForwardedScriptSource()->filename(),
+ Some(PCToLineNumber(script, script->offsetToPC(tracked.offset))));
+ return;
+ }
+
+ if (ty.isGroup()) {
+ op_.readType("prototype", buf, nullptr, Nothing());
+ return;
+ }
+
+ op_.readType("singleton", buf, nullptr, Nothing());
+}
+
+void
+IonTrackedOptimizationsTypeInfo::ForEachOpAdapter::operator()(JS::TrackedTypeSite site,
+ MIRType mirType)
+{
+ op_(site, StringFromMIRType(mirType));
+}
+
+typedef JS::ForEachProfiledFrameOp::FrameHandle FrameHandle;
+
+void
+FrameHandle::updateHasTrackedOptimizations()
+{
+ // All inlined frames will have the same optimization information by
+ // virtue of sharing the JitcodeGlobalEntry, but such information is
+ // only interpretable on the youngest frame.
+ if (depth() != 0)
+ return;
+ if (!entry_.hasTrackedOptimizations())
+ return;
+
+ uint32_t entryOffset;
+ optsIndex_ = entry_.trackedOptimizationIndexAtAddr(rt_, addr_, &entryOffset);
+ if (optsIndex_.isSome())
+ canonicalAddr_ = (void*)(((uint8_t*) entry_.nativeStartAddr()) + entryOffset);
+}
+
+JS_PUBLIC_API(void)
+FrameHandle::forEachOptimizationAttempt(ForEachTrackedOptimizationAttemptOp& op,
+ JSScript** scriptOut, jsbytecode** pcOut) const
+{
+ MOZ_ASSERT(optsIndex_.isSome());
+ entry_.forEachOptimizationAttempt(rt_, *optsIndex_, op);
+ entry_.youngestFrameLocationAtAddr(rt_, addr_, scriptOut, pcOut);
+}
+
+JS_PUBLIC_API(void)
+FrameHandle::forEachOptimizationTypeInfo(ForEachTrackedOptimizationTypeInfoOp& op) const
+{
+ MOZ_ASSERT(optsIndex_.isSome());
+ IonTrackedOptimizationsTypeInfo::ForEachOpAdapter adapter(op);
+ entry_.forEachOptimizationTypeInfo(rt_, *optsIndex_, adapter);
+}
diff --git a/js/src/jit/OptimizationTracking.h b/js/src/jit/OptimizationTracking.h
new file mode 100644
index 000000000..a37c6377c
--- /dev/null
+++ b/js/src/jit/OptimizationTracking.h
@@ -0,0 +1,575 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_OptimizationTracking_h
+#define jit_OptimizationTracking_h
+
+#include "mozilla/Maybe.h"
+
+#include "jit/CompactBuffer.h"
+#include "jit/CompileInfo.h"
+#include "jit/JitAllocPolicy.h"
+#include "js/TrackedOptimizationInfo.h"
+#include "vm/TypeInference.h"
+
+namespace js {
+
+namespace jit {
+
+struct NativeToTrackedOptimizations;
+
+class OptimizationAttempt
+{
+ JS::TrackedStrategy strategy_;
+ JS::TrackedOutcome outcome_;
+
+ public:
+ OptimizationAttempt(JS::TrackedStrategy strategy, JS::TrackedOutcome outcome)
+ : strategy_(strategy),
+ outcome_(outcome)
+ { }
+
+ void setOutcome(JS::TrackedOutcome outcome) { outcome_ = outcome; }
+ bool succeeded() const { return outcome_ >= JS::TrackedOutcome::GenericSuccess; }
+ bool failed() const { return outcome_ < JS::TrackedOutcome::GenericSuccess; }
+ JS::TrackedStrategy strategy() const { return strategy_; }
+ JS::TrackedOutcome outcome() const { return outcome_; }
+
+ bool operator ==(const OptimizationAttempt& other) const {
+ return strategy_ == other.strategy_ && outcome_ == other.outcome_;
+ }
+ bool operator !=(const OptimizationAttempt& other) const {
+ return strategy_ != other.strategy_ || outcome_ != other.outcome_;
+ }
+ HashNumber hash() const {
+ return (HashNumber(strategy_) << 8) + HashNumber(outcome_);
+ }
+
+ void writeCompact(CompactBufferWriter& writer) const;
+};
+
+typedef Vector<OptimizationAttempt, 4, JitAllocPolicy> TempOptimizationAttemptsVector;
+typedef Vector<TypeSet::Type, 1, JitAllocPolicy> TempTypeList;
+
+class UniqueTrackedTypes;
+
+class OptimizationTypeInfo
+{
+ JS::TrackedTypeSite site_;
+ MIRType mirType_;
+ TempTypeList types_;
+
+ public:
+ OptimizationTypeInfo(OptimizationTypeInfo&& other)
+ : site_(other.site_),
+ mirType_(other.mirType_),
+ types_(mozilla::Move(other.types_))
+ { }
+
+ OptimizationTypeInfo(TempAllocator& alloc, JS::TrackedTypeSite site, MIRType mirType)
+ : site_(site),
+ mirType_(mirType),
+ types_(alloc)
+ { }
+
+ MOZ_MUST_USE bool trackTypeSet(TemporaryTypeSet* typeSet);
+ MOZ_MUST_USE bool trackType(TypeSet::Type type);
+
+ JS::TrackedTypeSite site() const { return site_; }
+ MIRType mirType() const { return mirType_; }
+ const TempTypeList& types() const { return types_; }
+
+ bool operator ==(const OptimizationTypeInfo& other) const;
+ bool operator !=(const OptimizationTypeInfo& other) const;
+
+ HashNumber hash() const;
+
+ MOZ_MUST_USE bool writeCompact(JSContext* cx, CompactBufferWriter& writer,
+ UniqueTrackedTypes& uniqueTypes) const;
+};
+
+typedef Vector<OptimizationTypeInfo, 1, JitAllocPolicy> TempOptimizationTypeInfoVector;
+
+// Tracks the optimization attempts made at a bytecode location.
+class TrackedOptimizations : public TempObject
+{
+ friend class UniqueTrackedOptimizations;
+ TempOptimizationTypeInfoVector types_;
+ TempOptimizationAttemptsVector attempts_;
+ uint32_t currentAttempt_;
+
+ public:
+ explicit TrackedOptimizations(TempAllocator& alloc)
+ : types_(alloc),
+ attempts_(alloc),
+ currentAttempt_(UINT32_MAX)
+ { }
+
+ void clear() {
+ types_.clear();
+ attempts_.clear();
+ currentAttempt_ = UINT32_MAX;
+ }
+
+ MOZ_MUST_USE bool trackTypeInfo(OptimizationTypeInfo&& ty);
+
+ MOZ_MUST_USE bool trackAttempt(JS::TrackedStrategy strategy);
+ void amendAttempt(uint32_t index);
+ void trackOutcome(JS::TrackedOutcome outcome);
+ void trackSuccess();
+
+ bool matchTypes(const TempOptimizationTypeInfoVector& other) const;
+ bool matchAttempts(const TempOptimizationAttemptsVector& other) const;
+
+ void spew() const;
+};
+
+// Assigns each unique sequence of optimization attempts an index; outputs a
+// compact table.
+class UniqueTrackedOptimizations
+{
+ public:
+ struct SortEntry
+ {
+ const TempOptimizationTypeInfoVector* types;
+ const TempOptimizationAttemptsVector* attempts;
+ uint32_t frequency;
+ };
+ typedef Vector<SortEntry, 4> SortedVector;
+
+ private:
+ struct Key
+ {
+ const TempOptimizationTypeInfoVector* types;
+ const TempOptimizationAttemptsVector* attempts;
+
+ typedef Key Lookup;
+ static HashNumber hash(const Lookup& lookup);
+ static bool match(const Key& key, const Lookup& lookup);
+ static void rekey(Key& key, const Key& newKey) {
+ key = newKey;
+ }
+ };
+
+ struct Entry
+ {
+ uint8_t index;
+ uint32_t frequency;
+ };
+
+ // Map of unique (TempOptimizationTypeInfoVector,
+ // TempOptimizationAttemptsVector) pairs to indices.
+ typedef HashMap<Key, Entry, Key> AttemptsMap;
+ AttemptsMap map_;
+
+ // TempOptimizationAttemptsVectors sorted by frequency.
+ SortedVector sorted_;
+
+ public:
+ explicit UniqueTrackedOptimizations(JSContext* cx)
+ : map_(cx),
+ sorted_(cx)
+ { }
+
+ MOZ_MUST_USE bool init() { return map_.init(); }
+ MOZ_MUST_USE bool add(const TrackedOptimizations* optimizations);
+
+ MOZ_MUST_USE bool sortByFrequency(JSContext* cx);
+ bool sorted() const { return !sorted_.empty(); }
+ uint32_t count() const { MOZ_ASSERT(sorted()); return sorted_.length(); }
+ const SortedVector& sortedVector() const { MOZ_ASSERT(sorted()); return sorted_; }
+ uint8_t indexOf(const TrackedOptimizations* optimizations) const;
+};
+
+// A compact table of tracked optimization information. Pictorially,
+//
+// +------------------------------------------------+
+// | Region 1 | |
+// |------------------------------------------------| |
+// | Region 2 | |
+// |------------------------------------------------| |-- PayloadR of list-of-list of
+// | ... | | range triples (see below)
+// |------------------------------------------------| |
+// | Region M | |
+// +================================================+ <- IonTrackedOptimizationsRegionTable
+// | uint32_t numRegions_ = M | |
+// +------------------------------------------------+ |
+// | Region 1 | |
+// | uint32_t regionOffset = size(PayloadR) | |
+// +------------------------------------------------+ |-- Table
+// | ... | |
+// +------------------------------------------------+ |
+// | Region M | |
+// | uint32_t regionOffset | |
+// +================================================+
+// | Optimization type info 1 | |
+// |------------------------------------------------| |
+// | Optimization type info 2 | |-- PayloadT of list of
+// |------------------------------------------------| | OptimizationTypeInfo in
+// | ... | | order of decreasing frequency
+// |------------------------------------------------| |
+// | Optimization type info N | |
+// +================================================+ <- IonTrackedOptimizationsTypesTable
+// | uint32_t numEntries_ = N | |
+// +------------------------------------------------+ |
+// | Optimization type info 1 | |
+// | uint32_t entryOffset = size(PayloadT) | |
+// +------------------------------------------------+ |-- Table
+// | ... | |
+// +------------------------------------------------+ |
+// | Optimization type info N | |
+// | uint32_t entryOffset | |
+// +================================================+
+// | Optimization attempts 1 | |
+// |------------------------------------------------| |
+// | Optimization attempts 2 | |-- PayloadA of list of
+// |------------------------------------------------| | OptimizationAttempts in
+// | ... | | order of decreasing frequency
+// |------------------------------------------------| |
+// | Optimization attempts N | |
+// +================================================+ <- IonTrackedOptimizationsAttemptsTable
+// | uint32_t numEntries_ = N | |
+// +------------------------------------------------+ |
+// | Optimization attempts 1 | |
+// | uint32_t entryOffset = size(PayloadA) | |
+// +------------------------------------------------+ |-- Table
+// | ... | |
+// +------------------------------------------------+ |
+// | Optimization attempts N | |
+// | uint32_t entryOffset | |
+// +------------------------------------------------+
+//
+// Abstractly, each region in the PayloadR section is a list of triples of the
+// following, in order of ascending startOffset:
+//
+// (startOffset, endOffset, optimization attempts index)
+//
+// The range of [startOffset, endOffset) is the native machine code offsets
+// for which the optimization attempts referred to by the index applies.
+//
+// Concretely, each region starts with a header of:
+//
+// { startOffset : 32, endOffset : 32 }
+//
+// followed by an (endOffset, index) pair, then by delta-encoded variants
+// triples described below.
+//
+// Each list of type infos in the PayloadT section is a list of triples:
+//
+// (kind, MIR type, type set)
+//
+// The type set is separately in another vector, and what is encoded instead
+// is the (offset, length) pair needed to index into that vector.
+//
+// Each list of optimization attempts in the PayloadA section is a list of
+// pairs:
+//
+// (strategy, outcome)
+//
+// Both tail tables for PayloadR and PayloadA use reverse offsets from the
+// table pointers.
+
+class IonTrackedOptimizationsRegion
+{
+ const uint8_t* start_;
+ const uint8_t* end_;
+
+ // Unpacked state.
+ uint32_t startOffset_;
+ uint32_t endOffset_;
+ const uint8_t* rangesStart_;
+
+ void unpackHeader();
+
+ public:
+ IonTrackedOptimizationsRegion(const uint8_t* start, const uint8_t* end)
+ : start_(start), end_(end),
+ startOffset_(0), endOffset_(0), rangesStart_(nullptr)
+ {
+ MOZ_ASSERT(start < end);
+ unpackHeader();
+ }
+
+ // Offsets for the entire range that this region covers.
+ //
+ // This, as well as the offsets for the deltas, is open at the ending
+ // address: [startOffset, endOffset).
+ uint32_t startOffset() const { return startOffset_; }
+ uint32_t endOffset() const { return endOffset_; }
+
+ class RangeIterator
+ {
+ const uint8_t* cur_;
+ const uint8_t* start_;
+ const uint8_t* end_;
+
+ uint32_t firstStartOffset_;
+ uint32_t prevEndOffset_;
+
+ public:
+ RangeIterator(const uint8_t* start, const uint8_t* end, uint32_t startOffset)
+ : cur_(start), start_(start), end_(end),
+ firstStartOffset_(startOffset), prevEndOffset_(0)
+ { }
+
+ bool more() const { return cur_ < end_; }
+ void readNext(uint32_t* startOffset, uint32_t* endOffset, uint8_t* index);
+ };
+
+ RangeIterator ranges() const { return RangeIterator(rangesStart_, end_, startOffset_); }
+
+ // Find the index of tracked optimization info (e.g., type info and
+ // attempts) at a native code offset.
+ mozilla::Maybe<uint8_t> findIndex(uint32_t offset, uint32_t* entryOffsetOut) const;
+
+ // For the variants below, S stands for startDelta, L for length, and I
+ // for index. These were automatically generated from training on the
+ // Octane benchmark.
+ //
+ // byte 1 byte 0
+ // SSSS-SSSL LLLL-LII0
+ // startDelta max 127, length max 63, index max 3
+
+ static const uint32_t ENC1_MASK = 0x1;
+ static const uint32_t ENC1_MASK_VAL = 0x0;
+
+ static const uint32_t ENC1_START_DELTA_MAX = 0x7f;
+ static const uint32_t ENC1_START_DELTA_SHIFT = 9;
+
+ static const uint32_t ENC1_LENGTH_MAX = 0x3f;
+ static const uint32_t ENC1_LENGTH_SHIFT = 3;
+
+ static const uint32_t ENC1_INDEX_MAX = 0x3;
+ static const uint32_t ENC1_INDEX_SHIFT = 1;
+
+ // byte 2 byte 1 byte 0
+ // SSSS-SSSS SSSS-LLLL LLII-II01
+ // startDelta max 4095, length max 63, index max 15
+
+ static const uint32_t ENC2_MASK = 0x3;
+ static const uint32_t ENC2_MASK_VAL = 0x1;
+
+ static const uint32_t ENC2_START_DELTA_MAX = 0xfff;
+ static const uint32_t ENC2_START_DELTA_SHIFT = 12;
+
+ static const uint32_t ENC2_LENGTH_MAX = 0x3f;
+ static const uint32_t ENC2_LENGTH_SHIFT = 6;
+
+ static const uint32_t ENC2_INDEX_MAX = 0xf;
+ static const uint32_t ENC2_INDEX_SHIFT = 2;
+
+ // byte 3 byte 2 byte 1 byte 0
+ // SSSS-SSSS SSSL-LLLL LLLL-LIII IIII-I011
+ // startDelta max 2047, length max 1023, index max 255
+
+ static const uint32_t ENC3_MASK = 0x7;
+ static const uint32_t ENC3_MASK_VAL = 0x3;
+
+ static const uint32_t ENC3_START_DELTA_MAX = 0x7ff;
+ static const uint32_t ENC3_START_DELTA_SHIFT = 21;
+
+ static const uint32_t ENC3_LENGTH_MAX = 0x3ff;
+ static const uint32_t ENC3_LENGTH_SHIFT = 11;
+
+ static const uint32_t ENC3_INDEX_MAX = 0xff;
+ static const uint32_t ENC3_INDEX_SHIFT = 3;
+
+ // byte 4 byte 3 byte 2 byte 1 byte 0
+ // SSSS-SSSS SSSS-SSSL LLLL-LLLL LLLL-LIII IIII-I111
+ // startDelta max 32767, length max 16383, index max 255
+
+ static const uint32_t ENC4_MASK = 0x7;
+ static const uint32_t ENC4_MASK_VAL = 0x7;
+
+ static const uint32_t ENC4_START_DELTA_MAX = 0x7fff;
+ static const uint32_t ENC4_START_DELTA_SHIFT = 25;
+
+ static const uint32_t ENC4_LENGTH_MAX = 0x3fff;
+ static const uint32_t ENC4_LENGTH_SHIFT = 11;
+
+ static const uint32_t ENC4_INDEX_MAX = 0xff;
+ static const uint32_t ENC4_INDEX_SHIFT = 3;
+
+ static bool IsDeltaEncodeable(uint32_t startDelta, uint32_t length) {
+ MOZ_ASSERT(length != 0);
+ return startDelta <= ENC4_START_DELTA_MAX && length <= ENC4_LENGTH_MAX;
+ }
+
+ static const uint32_t MAX_RUN_LENGTH = 100;
+
+ static uint32_t ExpectedRunLength(const NativeToTrackedOptimizations* start,
+ const NativeToTrackedOptimizations* end);
+
+ static void ReadDelta(CompactBufferReader& reader, uint32_t* startDelta, uint32_t* length,
+ uint8_t* index);
+ static void WriteDelta(CompactBufferWriter& writer, uint32_t startDelta, uint32_t length,
+ uint8_t index);
+ static MOZ_MUST_USE bool WriteRun(CompactBufferWriter& writer,
+ const NativeToTrackedOptimizations* start,
+ const NativeToTrackedOptimizations* end,
+ const UniqueTrackedOptimizations& unique);
+};
+
+class IonTrackedOptimizationsAttempts
+{
+ const uint8_t* start_;
+ const uint8_t* end_;
+
+ public:
+ IonTrackedOptimizationsAttempts(const uint8_t* start, const uint8_t* end)
+ : start_(start), end_(end)
+ {
+ // Cannot be empty.
+ MOZ_ASSERT(start < end);
+ }
+
+ void forEach(JS::ForEachTrackedOptimizationAttemptOp& op);
+};
+
+struct IonTrackedTypeWithAddendum
+{
+ TypeSet::Type type;
+
+ enum HasAddendum {
+ HasNothing,
+ HasAllocationSite,
+ HasConstructor
+ };
+ HasAddendum hasAddendum;
+
+ // If type is a type object and is tied to a site, the script and pc are
+ // resolved early and stored below. This is done to avoid accessing the
+ // compartment during profiling time.
+ union {
+ struct {
+ JSScript* script;
+ uint32_t offset;
+ };
+ JSFunction* constructor;
+ };
+
+ explicit IonTrackedTypeWithAddendum(TypeSet::Type type)
+ : type(type),
+ hasAddendum(HasNothing)
+ { }
+
+ IonTrackedTypeWithAddendum(TypeSet::Type type, JSScript* script, uint32_t offset)
+ : type(type),
+ hasAddendum(HasAllocationSite),
+ script(script),
+ offset(offset)
+ { }
+
+ IonTrackedTypeWithAddendum(TypeSet::Type type, JSFunction* constructor)
+ : type(type),
+ hasAddendum(HasConstructor),
+ constructor(constructor)
+ { }
+
+ bool hasAllocationSite() const { return hasAddendum == HasAllocationSite; }
+ bool hasConstructor() const { return hasAddendum == HasConstructor; }
+};
+
+typedef Vector<IonTrackedTypeWithAddendum, 1, SystemAllocPolicy> IonTrackedTypeVector;
+
+class IonTrackedOptimizationsTypeInfo
+{
+ const uint8_t* start_;
+ const uint8_t* end_;
+
+ public:
+ IonTrackedOptimizationsTypeInfo(const uint8_t* start, const uint8_t* end)
+ : start_(start), end_(end)
+ {
+ // Can be empty; i.e., no type info was tracked.
+ }
+
+ bool empty() const { return start_ == end_; }
+
+ // Unlike IonTrackedOptimizationAttempts,
+ // JS::ForEachTrackedOptimizationTypeInfoOp cannot be used directly. The
+ // internal API needs to deal with engine-internal data structures (e.g.,
+ // TypeSet::Type) directly.
+ //
+ // An adapter is provided below.
+ struct ForEachOp
+ {
+ virtual void readType(const IonTrackedTypeWithAddendum& tracked) = 0;
+ virtual void operator()(JS::TrackedTypeSite site, MIRType mirType) = 0;
+ };
+
+ class ForEachOpAdapter : public ForEachOp
+ {
+ JS::ForEachTrackedOptimizationTypeInfoOp& op_;
+
+ public:
+ explicit ForEachOpAdapter(JS::ForEachTrackedOptimizationTypeInfoOp& op)
+ : op_(op)
+ { }
+
+ void readType(const IonTrackedTypeWithAddendum& tracked) override;
+ void operator()(JS::TrackedTypeSite site, MIRType mirType) override;
+ };
+
+ void forEach(ForEachOp& op, const IonTrackedTypeVector* allTypes);
+};
+
+template <class Entry>
+class IonTrackedOptimizationsOffsetsTable
+{
+ uint32_t padding_;
+ uint32_t numEntries_;
+ uint32_t entryOffsets_[1];
+
+ protected:
+ const uint8_t* payloadEnd() const {
+ return (uint8_t*)(this) - padding_;
+ }
+
+ public:
+ uint32_t numEntries() const { return numEntries_; }
+ uint32_t entryOffset(uint32_t index) const {
+ MOZ_ASSERT(index < numEntries());
+ return entryOffsets_[index];
+ }
+
+ Entry entry(uint32_t index) const {
+ const uint8_t* start = payloadEnd() - entryOffset(index);
+ const uint8_t* end = payloadEnd();
+ if (index < numEntries() - 1)
+ end -= entryOffset(index + 1);
+ return Entry(start, end);
+ }
+};
+
+class IonTrackedOptimizationsRegionTable
+ : public IonTrackedOptimizationsOffsetsTable<IonTrackedOptimizationsRegion>
+{
+ public:
+ mozilla::Maybe<IonTrackedOptimizationsRegion> findRegion(uint32_t offset) const;
+
+ const uint8_t* payloadStart() const { return payloadEnd() - entryOffset(0); }
+};
+
+typedef IonTrackedOptimizationsOffsetsTable<IonTrackedOptimizationsAttempts>
+ IonTrackedOptimizationsAttemptsTable;
+
+typedef IonTrackedOptimizationsOffsetsTable<IonTrackedOptimizationsTypeInfo>
+ IonTrackedOptimizationsTypesTable;
+
+MOZ_MUST_USE bool
+WriteIonTrackedOptimizationsTable(JSContext* cx, CompactBufferWriter& writer,
+ const NativeToTrackedOptimizations* start,
+ const NativeToTrackedOptimizations* end,
+ const UniqueTrackedOptimizations& unique,
+ uint32_t* numRegions, uint32_t* regionTableOffsetp,
+ uint32_t* typesTableOffsetp, uint32_t* attemptsTableOffsetp,
+ IonTrackedTypeVector* allTypes);
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_OptimizationTracking_h
diff --git a/js/src/jit/PcScriptCache.h b/js/src/jit/PcScriptCache.h
new file mode 100644
index 000000000..eed32f01c
--- /dev/null
+++ b/js/src/jit/PcScriptCache.h
@@ -0,0 +1,81 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_PcScriptCache_h
+#define jit_PcScriptCache_h
+
+// Defines a fixed-size hash table solely for the purpose of caching jit::GetPcScript().
+// One cache is attached to each JSRuntime; it functions as if cleared on GC.
+
+struct JSRuntime;
+
+namespace js {
+namespace jit {
+
+struct PcScriptCacheEntry
+{
+ uint8_t* returnAddress; // Key into the hash table.
+ jsbytecode* pc; // Cached PC.
+ JSScript* script; // Cached script.
+};
+
+struct PcScriptCache
+{
+ static const uint32_t Length = 73;
+
+ // GC number at the time the cache was filled or created.
+ // Storing and checking against this number allows us to not bother
+ // clearing this cache on every GC -- only when actually necessary.
+ uint64_t gcNumber;
+
+ // List of cache entries.
+ mozilla::Array<PcScriptCacheEntry, Length> entries;
+
+ void clear(uint64_t gcNumber) {
+ for (uint32_t i = 0; i < Length; i++)
+ entries[i].returnAddress = nullptr;
+ this->gcNumber = gcNumber;
+ }
+
+ // Get a value from the cache. May perform lazy allocation.
+ MOZ_MUST_USE bool get(JSRuntime* rt, uint32_t hash, uint8_t* addr,
+ JSScript** scriptRes, jsbytecode** pcRes)
+ {
+ // If a GC occurred, lazily clear the cache now.
+ if (gcNumber != rt->gc.gcNumber()) {
+ clear(rt->gc.gcNumber());
+ return false;
+ }
+
+ if (entries[hash].returnAddress != addr)
+ return false;
+
+ *scriptRes = entries[hash].script;
+ if (pcRes)
+ *pcRes = entries[hash].pc;
+
+ return true;
+ }
+
+ void add(uint32_t hash, uint8_t* addr, jsbytecode* pc, JSScript* script) {
+ MOZ_ASSERT(addr);
+ MOZ_ASSERT(pc);
+ MOZ_ASSERT(script);
+ entries[hash].returnAddress = addr;
+ entries[hash].pc = pc;
+ entries[hash].script = script;
+ }
+
+ static uint32_t Hash(uint8_t* addr) {
+ uint32_t key = (uint32_t)((uintptr_t)addr);
+ return ((key >> 3) * 2654435761u) % Length;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_PcScriptCache_h */
diff --git a/js/src/jit/PerfSpewer.cpp b/js/src/jit/PerfSpewer.cpp
new file mode 100644
index 000000000..cb80d04aa
--- /dev/null
+++ b/js/src/jit/PerfSpewer.cpp
@@ -0,0 +1,340 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/PerfSpewer.h"
+
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/SizePrintfMacros.h"
+
+#ifdef XP_UNIX
+# include <unistd.h>
+#endif
+
+#ifdef JS_ION_PERF
+# include "jit/JitSpewer.h"
+# include "jit/LIR.h"
+# include "jit/MIR.h"
+# include "jit/MIRGraph.h"
+#endif
+
+#include "vm/MutexIDs.h"
+
+// perf expects its data to be in a file /tmp/perf-PID.map, but for Android
+// and B2G the map files are written to /data/local/tmp/perf-PID.map
+//
+// Except that Android 4.3 no longer allows the browser to write to /data/local/tmp/
+// so also try /sdcard/.
+
+#ifndef PERF_SPEW_DIR
+# if defined(__ANDROID__)
+# define PERF_SPEW_DIR "/data/local/tmp/"
+# define PERF_SPEW_DIR_2 "/sdcard/"
+# else
+# define PERF_SPEW_DIR "/tmp/"
+# endif
+#endif
+
+using namespace js;
+using namespace js::jit;
+
+#define PERF_MODE_NONE 1
+#define PERF_MODE_FUNC 2
+#define PERF_MODE_BLOCK 3
+
+#ifdef JS_ION_PERF
+
+static uint32_t PerfMode = 0;
+
+static bool PerfChecked = false;
+
+static FILE* PerfFilePtr = nullptr;
+
+static js::Mutex* PerfMutex;
+
+static bool
+openPerfMap(const char* dir)
+{
+ const ssize_t bufferSize = 256;
+ char filenameBuffer[bufferSize];
+
+ if (snprintf(filenameBuffer, bufferSize, "%sperf-%d.map", dir, getpid()) >= bufferSize)
+ return false;
+
+ MOZ_ASSERT(!PerfFilePtr);
+ PerfFilePtr = fopen(filenameBuffer, "a");
+
+ if (!PerfFilePtr)
+ return false;
+
+ return true;
+}
+
+void
+js::jit::CheckPerf() {
+ if (!PerfChecked) {
+ const char* env = getenv("IONPERF");
+ if (env == nullptr) {
+ PerfMode = PERF_MODE_NONE;
+ fprintf(stderr, "Warning: JIT perf reporting requires IONPERF set to \"block\" or \"func\". ");
+ fprintf(stderr, "Perf mapping will be deactivated.\n");
+ } else if (!strcmp(env, "none")) {
+ PerfMode = PERF_MODE_NONE;
+ } else if (!strcmp(env, "block")) {
+ PerfMode = PERF_MODE_BLOCK;
+ } else if (!strcmp(env, "func")) {
+ PerfMode = PERF_MODE_FUNC;
+ } else {
+ fprintf(stderr, "Use IONPERF=func to record at function granularity\n");
+ fprintf(stderr, "Use IONPERF=block to record at basic block granularity\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, "Be advised that using IONPERF will cause all scripts\n");
+ fprintf(stderr, "to be leaked.\n");
+ exit(0);
+ }
+
+ if (PerfMode != PERF_MODE_NONE) {
+ PerfMutex = js_new<js::Mutex>(mutexid::PerfSpewer);
+ if (!PerfMutex)
+ MOZ_CRASH("failed to allocate PerfMutex");
+
+ if (openPerfMap(PERF_SPEW_DIR)) {
+ PerfChecked = true;
+ return;
+ }
+
+#if defined(__ANDROID__)
+ if (openPerfMap(PERF_SPEW_DIR_2)) {
+ PerfChecked = true;
+ return;
+ }
+#endif
+ fprintf(stderr, "Failed to open perf map file. Disabling IONPERF.\n");
+ PerfMode = PERF_MODE_NONE;
+ }
+ PerfChecked = true;
+ }
+}
+
+bool
+js::jit::PerfBlockEnabled() {
+ MOZ_ASSERT(PerfMode);
+ return PerfMode == PERF_MODE_BLOCK;
+}
+
+bool
+js::jit::PerfFuncEnabled() {
+ MOZ_ASSERT(PerfMode);
+ return PerfMode == PERF_MODE_FUNC;
+}
+
+static bool
+lockPerfMap(void)
+{
+ if (!PerfEnabled())
+ return false;
+
+ PerfMutex->lock();
+
+ MOZ_ASSERT(PerfFilePtr);
+ return true;
+}
+
+static void
+unlockPerfMap()
+{
+ MOZ_ASSERT(PerfFilePtr);
+ fflush(PerfFilePtr);
+ PerfMutex->unlock();
+}
+
+uint32_t PerfSpewer::nextFunctionIndex = 0;
+
+bool
+PerfSpewer::startBasicBlock(MBasicBlock* blk,
+ MacroAssembler& masm)
+{
+ if (!PerfBlockEnabled())
+ return true;
+
+ const char* filename = blk->info().script()->filename();
+ unsigned lineNumber, columnNumber;
+ if (blk->pc()) {
+ lineNumber = PCToLineNumber(blk->info().script(),
+ blk->pc(),
+ &columnNumber);
+ } else {
+ lineNumber = 0;
+ columnNumber = 0;
+ }
+ Record r(filename, lineNumber, columnNumber, blk->id());
+ masm.bind(&r.start);
+ return basicBlocks_.append(r);
+}
+
+bool
+PerfSpewer::endBasicBlock(MacroAssembler& masm)
+{
+ if (!PerfBlockEnabled())
+ return true;
+
+ masm.bind(&basicBlocks_.back().end);
+ return true;
+}
+
+bool
+PerfSpewer::noteEndInlineCode(MacroAssembler& masm)
+{
+ if (!PerfBlockEnabled())
+ return true;
+
+ masm.bind(&endInlineCode);
+ return true;
+}
+
+void
+PerfSpewer::writeProfile(JSScript* script,
+ JitCode* code,
+ MacroAssembler& masm)
+{
+ if (PerfFuncEnabled()) {
+ if (!lockPerfMap())
+ return;
+
+ uint32_t thisFunctionIndex = nextFunctionIndex++;
+
+ size_t size = code->instructionsSize();
+ if (size > 0) {
+ fprintf(PerfFilePtr, "%p %" PRIxSIZE " %s:%" PRIuSIZE ": Func%02d\n",
+ code->raw(),
+ size,
+ script->filename(),
+ script->lineno(),
+ thisFunctionIndex);
+ }
+ unlockPerfMap();
+ return;
+ }
+
+ if (PerfBlockEnabled() && basicBlocks_.length() > 0) {
+ if (!lockPerfMap())
+ return;
+
+ uint32_t thisFunctionIndex = nextFunctionIndex++;
+ uintptr_t funcStart = uintptr_t(code->raw());
+ uintptr_t funcEndInlineCode = funcStart + endInlineCode.offset();
+ uintptr_t funcEnd = funcStart + code->instructionsSize();
+
+ // function begins with the prologue, which is located before the first basic block
+ size_t prologueSize = basicBlocks_[0].start.offset();
+
+ if (prologueSize > 0) {
+ fprintf(PerfFilePtr, "%" PRIxSIZE " %" PRIxSIZE " %s:%" PRIuSIZE ": Func%02d-Prologue\n",
+ funcStart, prologueSize, script->filename(), script->lineno(), thisFunctionIndex);
+ }
+
+ uintptr_t cur = funcStart + prologueSize;
+ for (uint32_t i = 0; i < basicBlocks_.length(); i++) {
+ Record& r = basicBlocks_[i];
+
+ uintptr_t blockStart = funcStart + r.start.offset();
+ uintptr_t blockEnd = funcStart + r.end.offset();
+
+ MOZ_ASSERT(cur <= blockStart);
+ if (cur < blockStart) {
+ fprintf(PerfFilePtr, "%" PRIxPTR " %" PRIxPTR " %s:%" PRIuSIZE ": Func%02d-Block?\n",
+ cur, blockStart - cur,
+ script->filename(), script->lineno(),
+ thisFunctionIndex);
+ }
+ cur = blockEnd;
+
+ size_t size = blockEnd - blockStart;
+
+ if (size > 0) {
+ fprintf(PerfFilePtr, "%" PRIxPTR " %" PRIxSIZE " %s:%d:%d: Func%02d-Block%d\n",
+ blockStart, size,
+ r.filename, r.lineNumber, r.columnNumber,
+ thisFunctionIndex, r.id);
+ }
+ }
+
+ MOZ_ASSERT(cur <= funcEndInlineCode);
+ if (cur < funcEndInlineCode) {
+ fprintf(PerfFilePtr, "%" PRIxPTR " %" PRIxPTR " %s:%" PRIuSIZE ": Func%02d-Epilogue\n",
+ cur, funcEndInlineCode - cur,
+ script->filename(), script->lineno(),
+ thisFunctionIndex);
+ }
+
+ MOZ_ASSERT(funcEndInlineCode <= funcEnd);
+ if (funcEndInlineCode < funcEnd) {
+ fprintf(PerfFilePtr, "%" PRIxPTR " %" PRIxPTR " %s:%" PRIuSIZE ": Func%02d-OOL\n",
+ funcEndInlineCode, funcEnd - funcEndInlineCode,
+ script->filename(), script->lineno(),
+ thisFunctionIndex);
+ }
+
+ unlockPerfMap();
+ return;
+ }
+}
+
+void
+js::jit::writePerfSpewerBaselineProfile(JSScript* script, JitCode* code)
+{
+ if (!PerfEnabled())
+ return;
+
+ if (!lockPerfMap())
+ return;
+
+ size_t size = code->instructionsSize();
+ if (size > 0) {
+ fprintf(PerfFilePtr, "%" PRIxPTR " %" PRIxSIZE " %s:%" PRIuSIZE ": Baseline\n",
+ reinterpret_cast<uintptr_t>(code->raw()),
+ size, script->filename(), script->lineno());
+ }
+
+ unlockPerfMap();
+}
+
+void
+js::jit::writePerfSpewerJitCodeProfile(JitCode* code, const char* msg)
+{
+ if (!code || !PerfEnabled())
+ return;
+
+ if (!lockPerfMap())
+ return;
+
+ size_t size = code->instructionsSize();
+ if (size > 0) {
+ fprintf(PerfFilePtr, "%" PRIxPTR " %" PRIxSIZE " %s (%p 0x%" PRIxSIZE ")\n",
+ reinterpret_cast<uintptr_t>(code->raw()),
+ size, msg, code->raw(), size);
+ }
+
+ unlockPerfMap();
+}
+
+void
+js::jit::writePerfSpewerWasmFunctionMap(uintptr_t base, uintptr_t size,
+ const char* filename, unsigned lineno, unsigned colIndex,
+ const char* funcName)
+{
+ if (!PerfFuncEnabled() || size == 0U)
+ return;
+
+ if (!lockPerfMap())
+ return;
+
+ fprintf(PerfFilePtr, "%" PRIxPTR " %" PRIxPTR " %s:%u:%u: Function %s\n",
+ base, size, filename, lineno, colIndex, funcName);
+
+ unlockPerfMap();
+}
+
+#endif // defined (JS_ION_PERF)
diff --git a/js/src/jit/PerfSpewer.h b/js/src/jit/PerfSpewer.h
new file mode 100644
index 000000000..4fd449746
--- /dev/null
+++ b/js/src/jit/PerfSpewer.h
@@ -0,0 +1,95 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_PerfSpewer_h
+#define jit_PerfSpewer_h
+
+#ifdef JS_ION_PERF
+# include <stdio.h>
+# include "jit/MacroAssembler.h"
+#endif
+
+namespace js {
+namespace jit {
+
+class MBasicBlock;
+class MacroAssembler;
+
+#ifdef JS_ION_PERF
+void CheckPerf();
+bool PerfBlockEnabled();
+bool PerfFuncEnabled();
+static inline bool PerfEnabled() {
+ return PerfBlockEnabled() || PerfFuncEnabled();
+}
+#else
+static inline void CheckPerf() {}
+static inline bool PerfBlockEnabled() { return false; }
+static inline bool PerfFuncEnabled() { return false; }
+static inline bool PerfEnabled() { return false; }
+#endif
+
+#ifdef JS_ION_PERF
+
+struct Record {
+ const char* filename;
+ unsigned lineNumber;
+ unsigned columnNumber;
+ uint32_t id;
+ Label start, end;
+ size_t startOffset, endOffset;
+
+ Record(const char* filename,
+ unsigned lineNumber,
+ unsigned columnNumber,
+ uint32_t id)
+ : filename(filename), lineNumber(lineNumber),
+ columnNumber(columnNumber), id(id),
+ startOffset(0u), endOffset(0u)
+ {}
+};
+
+typedef Vector<Record, 1, SystemAllocPolicy> BasicBlocksVector;
+
+class PerfSpewer
+{
+ protected:
+ static uint32_t nextFunctionIndex;
+
+ public:
+ Label endInlineCode;
+
+ protected:
+ BasicBlocksVector basicBlocks_;
+
+ public:
+ virtual MOZ_MUST_USE bool startBasicBlock(MBasicBlock* blk, MacroAssembler& masm);
+ virtual MOZ_MUST_USE bool endBasicBlock(MacroAssembler& masm);
+ MOZ_MUST_USE bool noteEndInlineCode(MacroAssembler& masm);
+
+ void writeProfile(JSScript* script, JitCode* code, MacroAssembler& masm);
+};
+
+void writePerfSpewerBaselineProfile(JSScript* script, JitCode* code);
+void writePerfSpewerJitCodeProfile(JitCode* code, const char* msg);
+
+// wasm doesn't support block annotations.
+class WasmPerfSpewer : public PerfSpewer
+{
+ public:
+ MOZ_MUST_USE bool startBasicBlock(MBasicBlock* blk, MacroAssembler& masm) { return true; }
+ MOZ_MUST_USE bool endBasicBlock(MacroAssembler& masm) { return true; }
+};
+
+void writePerfSpewerWasmFunctionMap(uintptr_t base, uintptr_t size, const char* filename,
+ unsigned lineno, unsigned colIndex, const char* funcName);
+
+#endif // JS_ION_PERF
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_PerfSpewer_h */
diff --git a/js/src/jit/ProcessExecutableMemory.cpp b/js/src/jit/ProcessExecutableMemory.cpp
new file mode 100644
index 000000000..71c2ab0dc
--- /dev/null
+++ b/js/src/jit/ProcessExecutableMemory.cpp
@@ -0,0 +1,656 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/ProcessExecutableMemory.h"
+
+#include "mozilla/Array.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/TaggedAnonymousMemory.h"
+#include "mozilla/XorShift128PlusRNG.h"
+
+#include "jsfriendapi.h"
+#include "jsmath.h"
+#include "jsutil.h"
+#include "jswin.h"
+
+#include <errno.h>
+
+#include "gc/Memory.h"
+#include "threading/LockGuard.h"
+#include "threading/Mutex.h"
+#include "vm/MutexIDs.h"
+
+#ifdef XP_WIN
+# include "mozilla/StackWalk_windows.h"
+# include "mozilla/WindowsVersion.h"
+#else
+# include <sys/mman.h>
+# include <unistd.h>
+#endif
+
+using namespace js;
+using namespace js::jit;
+
+#ifdef XP_WIN
+static void*
+ComputeRandomAllocationAddress()
+{
+ /*
+ * Inspiration is V8's OS::Allocate in platform-win32.cc.
+ *
+ * VirtualAlloc takes 64K chunks out of the virtual address space, so we
+ * keep 16b alignment.
+ *
+ * x86: V8 comments say that keeping addresses in the [64MiB, 1GiB) range
+ * tries to avoid system default DLL mapping space. In the end, we get 13
+ * bits of randomness in our selection.
+ * x64: [2GiB, 4TiB), with 25 bits of randomness.
+ */
+# ifdef HAVE_64BIT_BUILD
+ static const uintptr_t base = 0x0000000080000000;
+ static const uintptr_t mask = 0x000003ffffff0000;
+# elif defined(_M_IX86) || defined(__i386__)
+ static const uintptr_t base = 0x04000000;
+ static const uintptr_t mask = 0x3fff0000;
+# else
+# error "Unsupported architecture"
+# endif
+
+ uint64_t rand = js::GenerateRandomSeed();
+ return (void*) (base | (rand & mask));
+}
+
+# ifdef HAVE_64BIT_BUILD
+static js::JitExceptionHandler sJitExceptionHandler;
+
+JS_FRIEND_API(void)
+js::SetJitExceptionHandler(JitExceptionHandler handler)
+{
+ MOZ_ASSERT(!sJitExceptionHandler);
+ sJitExceptionHandler = handler;
+}
+
+// From documentation for UNWIND_INFO on
+// http://msdn.microsoft.com/en-us/library/ddssxxy8.aspx
+struct UnwindInfo
+{
+ uint8_t version : 3;
+ uint8_t flags : 5;
+ uint8_t sizeOfPrologue;
+ uint8_t countOfUnwindCodes;
+ uint8_t frameRegister : 4;
+ uint8_t frameOffset : 4;
+ ULONG exceptionHandler;
+};
+
+static const unsigned ThunkLength = 12;
+
+struct ExceptionHandlerRecord
+{
+ RUNTIME_FUNCTION runtimeFunction;
+ UnwindInfo unwindInfo;
+ uint8_t thunk[ThunkLength];
+};
+
+// This function must match the function pointer type PEXCEPTION_HANDLER
+// mentioned in:
+// http://msdn.microsoft.com/en-us/library/ssa62fwe.aspx.
+// This type is rather elusive in documentation; Wine is the best I've found:
+// http://source.winehq.org/source/include/winnt.h
+static DWORD
+ExceptionHandler(PEXCEPTION_RECORD exceptionRecord, _EXCEPTION_REGISTRATION_RECORD*,
+ PCONTEXT context, _EXCEPTION_REGISTRATION_RECORD**)
+{
+ return sJitExceptionHandler(exceptionRecord, context);
+}
+
+// For an explanation of the problem being solved here, see
+// SetJitExceptionFilter in jsfriendapi.h.
+static bool
+RegisterExecutableMemory(void* p, size_t bytes, size_t pageSize)
+{
+ if (!VirtualAlloc(p, pageSize, MEM_COMMIT, PAGE_READWRITE))
+ MOZ_CRASH();
+
+ ExceptionHandlerRecord* r = reinterpret_cast<ExceptionHandlerRecord*>(p);
+
+ // All these fields are specified to be offsets from the base of the
+ // executable code (which is 'p'), even if they have 'Address' in their
+ // names. In particular, exceptionHandler is a ULONG offset which is a
+ // 32-bit integer. Since 'p' can be farther than INT32_MAX away from
+ // sJitExceptionHandler, we must generate a little thunk inside the
+ // record. The record is put on its own page so that we can take away write
+ // access to protect against accidental clobbering.
+
+ r->runtimeFunction.BeginAddress = pageSize;
+ r->runtimeFunction.EndAddress = (DWORD)bytes;
+ r->runtimeFunction.UnwindData = offsetof(ExceptionHandlerRecord, unwindInfo);
+
+ r->unwindInfo.version = 1;
+ r->unwindInfo.flags = UNW_FLAG_EHANDLER;
+ r->unwindInfo.sizeOfPrologue = 0;
+ r->unwindInfo.countOfUnwindCodes = 0;
+ r->unwindInfo.frameRegister = 0;
+ r->unwindInfo.frameOffset = 0;
+ r->unwindInfo.exceptionHandler = offsetof(ExceptionHandlerRecord, thunk);
+
+ // mov imm64, rax
+ r->thunk[0] = 0x48;
+ r->thunk[1] = 0xb8;
+ void* handler = JS_FUNC_TO_DATA_PTR(void*, ExceptionHandler);
+ memcpy(&r->thunk[2], &handler, 8);
+
+ // jmp rax
+ r->thunk[10] = 0xff;
+ r->thunk[11] = 0xe0;
+
+ DWORD oldProtect;
+ if (!VirtualProtect(p, pageSize, PAGE_EXECUTE_READ, &oldProtect))
+ MOZ_CRASH();
+
+ // XXX NB: The profiler believes this function is only called from the main
+ // thread. If that ever becomes untrue, the profiler must be updated
+ // immediately.
+ AcquireStackWalkWorkaroundLock();
+
+ bool success = RtlAddFunctionTable(&r->runtimeFunction, 1, reinterpret_cast<DWORD64>(p));
+
+ ReleaseStackWalkWorkaroundLock();
+
+ return success;
+}
+
+static void
+UnregisterExecutableMemory(void* p, size_t bytes, size_t pageSize)
+{
+ ExceptionHandlerRecord* r = reinterpret_cast<ExceptionHandlerRecord*>(p);
+
+ // XXX NB: The profiler believes this function is only called from the main
+ // thread. If that ever becomes untrue, the profiler must be updated
+ // immediately.
+ AcquireStackWalkWorkaroundLock();
+
+ RtlDeleteFunctionTable(&r->runtimeFunction);
+
+ ReleaseStackWalkWorkaroundLock();
+}
+# endif
+
+static void*
+ReserveProcessExecutableMemory(size_t bytes)
+{
+# ifdef HAVE_64BIT_BUILD
+ size_t pageSize = gc::SystemPageSize();
+ if (sJitExceptionHandler)
+ bytes += pageSize;
+# endif
+
+ void* p = nullptr;
+ for (size_t i = 0; i < 10; i++) {
+ void* randomAddr = ComputeRandomAllocationAddress();
+ p = VirtualAlloc(randomAddr, bytes, MEM_RESERVE, PAGE_NOACCESS);
+ if (p)
+ break;
+ }
+
+ if (!p) {
+ // Try again without randomization.
+ p = VirtualAlloc(nullptr, bytes, MEM_RESERVE, PAGE_NOACCESS);
+ if (!p)
+ return nullptr;
+ }
+
+# ifdef HAVE_64BIT_BUILD
+ if (sJitExceptionHandler) {
+ if (!RegisterExecutableMemory(p, bytes, pageSize)) {
+ VirtualFree(p, 0, MEM_RELEASE);
+ return nullptr;
+ }
+
+ p = (uint8_t*)p + pageSize;
+ }
+# endif
+
+ return p;
+}
+
+static void
+DeallocateProcessExecutableMemory(void* addr, size_t bytes)
+{
+# ifdef HAVE_64BIT_BUILD
+ if (sJitExceptionHandler) {
+ size_t pageSize = gc::SystemPageSize();
+ addr = (uint8_t*)addr - pageSize;
+ UnregisterExecutableMemory(addr, bytes, pageSize);
+ }
+# endif
+
+ VirtualFree(addr, 0, MEM_RELEASE);
+}
+
+static DWORD
+ProtectionSettingToFlags(ProtectionSetting protection)
+{
+ switch (protection) {
+ case ProtectionSetting::Protected: return PAGE_NOACCESS;
+ case ProtectionSetting::Writable: return PAGE_READWRITE;
+ case ProtectionSetting::Executable: return PAGE_EXECUTE_READ;
+ }
+ MOZ_CRASH();
+}
+
+static void
+CommitPages(void* addr, size_t bytes, ProtectionSetting protection)
+{
+ if (!VirtualAlloc(addr, bytes, MEM_COMMIT, ProtectionSettingToFlags(protection)))
+ MOZ_CRASH("CommitPages failed");
+}
+
+static void
+DecommitPages(void* addr, size_t bytes)
+{
+ if (!VirtualFree(addr, bytes, MEM_DECOMMIT))
+ MOZ_CRASH("DecommitPages failed");
+}
+#else // !XP_WIN
+static void*
+ComputeRandomAllocationAddress()
+{
+ uint64_t rand = js::GenerateRandomSeed();
+
+# ifdef HAVE_64BIT_BUILD
+ // x64 CPUs have a 48-bit address space and on some platforms the OS will
+ // give us access to 47 bits, so to be safe we right shift by 18 to leave
+ // 46 bits.
+ rand >>= 18;
+# else
+ // On 32-bit, right shift by 34 to leave 30 bits, range [0, 1GiB). Then add
+ // 512MiB to get range [512MiB, 1.5GiB), or [0x20000000, 0x60000000). This
+ // is based on V8 comments in platform-posix.cc saying this range is
+ // relatively unpopulated across a variety of kernels.
+ rand >>= 34;
+ rand += 512 * 1024 * 1024;
+# endif
+
+ // Ensure page alignment.
+ uintptr_t mask = ~uintptr_t(gc::SystemPageSize() - 1);
+ return (void*) uintptr_t(rand & mask);
+}
+
+static void*
+ReserveProcessExecutableMemory(size_t bytes)
+{
+ // Note that randomAddr is just a hint: if the address is not available
+ // mmap will pick a different address.
+ void* randomAddr = ComputeRandomAllocationAddress();
+ void* p = MozTaggedAnonymousMmap(randomAddr, bytes, PROT_NONE, MAP_PRIVATE | MAP_ANON,
+ -1, 0, "js-executable-memory");
+ if (p == MAP_FAILED)
+ return nullptr;
+ return p;
+}
+
+static void
+DeallocateProcessExecutableMemory(void* addr, size_t bytes)
+{
+ mozilla::DebugOnly<int> result = munmap(addr, bytes);
+ MOZ_ASSERT(!result || errno == ENOMEM);
+}
+
+static unsigned
+ProtectionSettingToFlags(ProtectionSetting protection)
+{
+ switch (protection) {
+ case ProtectionSetting::Protected: return PROT_NONE;
+ case ProtectionSetting::Writable: return PROT_READ | PROT_WRITE;
+ case ProtectionSetting::Executable: return PROT_READ | PROT_EXEC;
+ }
+ MOZ_CRASH();
+}
+
+static void
+CommitPages(void* addr, size_t bytes, ProtectionSetting protection)
+{
+ void* p = MozTaggedAnonymousMmap(addr, bytes, ProtectionSettingToFlags(protection),
+ MAP_FIXED | MAP_PRIVATE | MAP_ANON,
+ -1, 0, "js-executable-memory");
+ MOZ_RELEASE_ASSERT(addr == p);
+}
+
+static void
+DecommitPages(void* addr, size_t bytes)
+{
+ // Use mmap with MAP_FIXED and PROT_NONE. Inspired by jemalloc's
+ // pages_decommit.
+ void* p = MozTaggedAnonymousMmap(addr, bytes, PROT_NONE,
+ MAP_FIXED | MAP_PRIVATE | MAP_ANON,
+ -1, 0, "js-executable-memory");
+ MOZ_RELEASE_ASSERT(addr == p);
+}
+#endif
+
+template <size_t NumBits>
+class PageBitSet
+{
+ using WordType = uint32_t;
+ static const size_t BitsPerWord = sizeof(WordType) * 8;
+
+ static_assert((NumBits % BitsPerWord) == 0,
+ "NumBits must be a multiple of BitsPerWord");
+ static const size_t NumWords = NumBits / BitsPerWord;
+
+ mozilla::Array<WordType, NumWords> words_;
+
+ uint32_t indexToWord(uint32_t index) const {
+ MOZ_ASSERT(index < NumBits);
+ return index / BitsPerWord;
+ }
+ WordType indexToBit(uint32_t index) const {
+ MOZ_ASSERT(index < NumBits);
+ return WordType(1) << (index % BitsPerWord);
+ }
+
+ public:
+ void init() {
+ mozilla::PodArrayZero(words_);
+ }
+ bool contains(size_t index) const {
+ uint32_t word = indexToWord(index);
+ return words_[word] & indexToBit(index);
+ }
+ void insert(size_t index) {
+ MOZ_ASSERT(!contains(index));
+ uint32_t word = indexToWord(index);
+ words_[word] |= indexToBit(index);
+ }
+ void remove(size_t index) {
+ MOZ_ASSERT(contains(index));
+ uint32_t word = indexToWord(index);
+ words_[word] &= ~indexToBit(index);
+ }
+
+#ifdef DEBUG
+ bool empty() const {
+ for (size_t i = 0; i < NumWords; i++) {
+ if (words_[i] != 0)
+ return false;
+ }
+ return true;
+ }
+#endif
+};
+
+// Limit on the number of bytes of executable memory to prevent JIT spraying
+// attacks.
+#if JS_BITS_PER_WORD == 32
+static const size_t MaxCodeBytesPerProcess = 128 * 1024 * 1024;
+#else
+static const size_t MaxCodeBytesPerProcess = 1 * 1024 * 1024 * 1024;
+#endif
+
+// Per-process executable memory allocator. It reserves a block of memory of
+// MaxCodeBytesPerProcess bytes, then allocates/deallocates pages from that.
+//
+// This has a number of benefits compared to raw mmap/VirtualAlloc:
+//
+// * More resillient against certain attacks.
+//
+// * Behaves more consistently across platforms: it avoids the 64K granularity
+// issues on Windows, for instance.
+//
+// * On x64, near jumps can be used for jumps to other JIT pages.
+//
+// * On Win64, we have to register the exception handler only once (at process
+// startup). This saves some memory and avoids RtlAddFunctionTable profiler
+// deadlocks.
+class ProcessExecutableMemory
+{
+ static_assert((MaxCodeBytesPerProcess % ExecutableCodePageSize) == 0,
+ "MaxCodeBytesPerProcess must be a multiple of ExecutableCodePageSize");
+ static const size_t MaxCodePages = MaxCodeBytesPerProcess / ExecutableCodePageSize;
+
+ // Start of the MaxCodeBytesPerProcess memory block or nullptr if
+ // uninitialized. Note that this is NOT guaranteed to be aligned to
+ // ExecutableCodePageSize.
+ uint8_t* base_;
+
+ // The fields below should only be accessed while we hold the lock.
+ Mutex lock_;
+
+ // pagesAllocated_ is an Atomic so that bytesAllocated does not have to
+ // take the lock.
+ mozilla::Atomic<size_t, mozilla::ReleaseAcquire> pagesAllocated_;
+
+ // Page where we should try to allocate next.
+ size_t cursor_;
+
+ mozilla::Maybe<mozilla::non_crypto::XorShift128PlusRNG> rng_;
+ PageBitSet<MaxCodePages> pages_;
+
+ public:
+ ProcessExecutableMemory()
+ : base_(nullptr),
+ lock_(mutexid::ProcessExecutableRegion),
+ pagesAllocated_(0),
+ cursor_(0),
+ rng_(),
+ pages_()
+ {}
+
+ MOZ_MUST_USE bool init() {
+ pages_.init();
+
+ MOZ_RELEASE_ASSERT(!initialized());
+ MOZ_RELEASE_ASSERT(gc::SystemPageSize() <= ExecutableCodePageSize);
+
+ void* p = ReserveProcessExecutableMemory(MaxCodeBytesPerProcess);
+ if (!p)
+ return false;
+
+ base_ = static_cast<uint8_t*>(p);
+
+ mozilla::Array<uint64_t, 2> seed;
+ GenerateXorShift128PlusSeed(seed);
+ rng_.emplace(seed[0], seed[1]);
+ return true;
+ }
+
+ bool initialized() const {
+ return base_ != nullptr;
+ }
+
+ size_t bytesAllocated() const {
+ MOZ_ASSERT(pagesAllocated_ <= MaxCodePages);
+ return pagesAllocated_ * ExecutableCodePageSize;
+ }
+
+ void release() {
+ MOZ_ASSERT(initialized());
+ MOZ_ASSERT(pages_.empty());
+ MOZ_ASSERT(pagesAllocated_ == 0);
+ DeallocateProcessExecutableMemory(base_, MaxCodeBytesPerProcess);
+ base_ = nullptr;
+ rng_.reset();
+ MOZ_ASSERT(!initialized());
+ }
+
+ void assertValidAddress(void* p, size_t bytes) const {
+ MOZ_RELEASE_ASSERT(p >= base_ &&
+ uintptr_t(p) + bytes <= uintptr_t(base_) + MaxCodeBytesPerProcess);
+ }
+
+ void* allocate(size_t bytes, ProtectionSetting protection);
+ void deallocate(void* addr, size_t bytes);
+};
+
+void*
+ProcessExecutableMemory::allocate(size_t bytes, ProtectionSetting protection)
+{
+ MOZ_ASSERT(initialized());
+ MOZ_ASSERT(bytes > 0);
+ MOZ_ASSERT((bytes % ExecutableCodePageSize) == 0);
+
+ size_t numPages = bytes / ExecutableCodePageSize;
+
+ // Take the lock and try to allocate.
+ void* p = nullptr;
+ {
+ LockGuard<Mutex> guard(lock_);
+ MOZ_ASSERT(pagesAllocated_ <= MaxCodePages);
+
+ // Check if we have enough pages available.
+ if (pagesAllocated_ + numPages >= MaxCodePages)
+ return nullptr;
+
+ MOZ_ASSERT(bytes <= MaxCodeBytesPerProcess);
+
+ // Maybe skip a page to make allocations less predictable.
+ size_t page = cursor_ + (rng_.ref().next() % 2);
+
+ for (size_t i = 0; i < MaxCodePages; i++) {
+ // Make sure page + numPages - 1 is a valid index.
+ if (page + numPages > MaxCodePages)
+ page = 0;
+
+ bool available = true;
+ for (size_t j = 0; j < numPages; j++) {
+ if (pages_.contains(page + j)) {
+ available = false;
+ break;
+ }
+ }
+ if (!available) {
+ page++;
+ continue;
+ }
+
+ // Mark the pages as unavailable.
+ for (size_t j = 0; j < numPages; j++)
+ pages_.insert(page + j);
+
+ pagesAllocated_ += numPages;
+ MOZ_ASSERT(pagesAllocated_ <= MaxCodePages);
+
+ // If we allocated a small number of pages, move cursor_ to the
+ // next page. We don't do this for larger allocations to avoid
+ // skipping a large number of small holes.
+ if (numPages <= 2)
+ cursor_ = page + numPages;
+
+ p = base_ + page * ExecutableCodePageSize;
+ break;
+ }
+ if (!p)
+ return nullptr;
+ }
+
+ // Commit the pages after releasing the lock.
+ CommitPages(p, bytes, protection);
+ return p;
+}
+
+void
+ProcessExecutableMemory::deallocate(void* addr, size_t bytes)
+{
+ MOZ_ASSERT(initialized());
+ MOZ_ASSERT(addr);
+ MOZ_ASSERT((uintptr_t(addr) % gc::SystemPageSize()) == 0);
+ MOZ_ASSERT(bytes > 0);
+ MOZ_ASSERT((bytes % ExecutableCodePageSize) == 0);
+
+ assertValidAddress(addr, bytes);
+
+ size_t firstPage = (static_cast<uint8_t*>(addr) - base_) / ExecutableCodePageSize;
+ size_t numPages = bytes / ExecutableCodePageSize;
+
+ // Decommit before taking the lock.
+ DecommitPages(addr, bytes);
+
+ LockGuard<Mutex> guard(lock_);
+ MOZ_ASSERT(numPages <= pagesAllocated_);
+ pagesAllocated_ -= numPages;
+
+ for (size_t i = 0; i < numPages; i++)
+ pages_.remove(firstPage + i);
+
+ // Move the cursor back so we can reuse pages instead of fragmenting the
+ // whole region.
+ if (firstPage < cursor_)
+ cursor_ = firstPage;
+}
+
+static ProcessExecutableMemory execMemory;
+
+void*
+js::jit::AllocateExecutableMemory(size_t bytes, ProtectionSetting protection)
+{
+ return execMemory.allocate(bytes, protection);
+}
+
+void
+js::jit::DeallocateExecutableMemory(void* addr, size_t bytes)
+{
+ execMemory.deallocate(addr, bytes);
+}
+
+bool
+js::jit::InitProcessExecutableMemory()
+{
+ return execMemory.init();
+}
+
+void
+js::jit::ReleaseProcessExecutableMemory()
+{
+ execMemory.release();
+}
+
+bool
+js::jit::CanLikelyAllocateMoreExecutableMemory()
+{
+ // Use a 16 MB buffer.
+ static const size_t BufferSize = 16 * 1024 * 1024;
+
+ MOZ_ASSERT(execMemory.bytesAllocated() <= MaxCodeBytesPerProcess);
+
+ return execMemory.bytesAllocated() + BufferSize <= MaxCodeBytesPerProcess;
+}
+
+bool
+js::jit::ReprotectRegion(void* start, size_t size, ProtectionSetting protection)
+{
+ // Calculate the start of the page containing this region,
+ // and account for this extra memory within size.
+ size_t pageSize = gc::SystemPageSize();
+ intptr_t startPtr = reinterpret_cast<intptr_t>(start);
+ intptr_t pageStartPtr = startPtr & ~(pageSize - 1);
+ void* pageStart = reinterpret_cast<void*>(pageStartPtr);
+ size += (startPtr - pageStartPtr);
+
+ // Round size up
+ size += (pageSize - 1);
+ size &= ~(pageSize - 1);
+
+ MOZ_ASSERT((uintptr_t(pageStart) % pageSize) == 0);
+
+ execMemory.assertValidAddress(pageStart, size);
+
+#ifdef XP_WIN
+ DWORD oldProtect;
+ DWORD flags = ProtectionSettingToFlags(protection);
+ if (!VirtualProtect(pageStart, size, flags, &oldProtect))
+ return false;
+#else
+ unsigned flags = ProtectionSettingToFlags(protection);
+ if (mprotect(pageStart, size, flags))
+ return false;
+#endif
+
+ execMemory.assertValidAddress(pageStart, size);
+ return true;
+}
diff --git a/js/src/jit/ProcessExecutableMemory.h b/js/src/jit/ProcessExecutableMemory.h
new file mode 100644
index 000000000..078ce7cb7
--- /dev/null
+++ b/js/src/jit/ProcessExecutableMemory.h
@@ -0,0 +1,48 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_ProcessExecutableMemory_h
+#define jit_ProcessExecutableMemory_h
+
+#include "mozilla/Attributes.h"
+
+namespace js {
+namespace jit {
+
+// Executable code is allocated in 64K chunks. ExecutableAllocator uses pools
+// that are at least this big. Code we allocate does not necessarily have 64K
+// alignment though.
+static const size_t ExecutableCodePageSize = 64 * 1024;
+
+enum class ProtectionSetting {
+ Protected, // Not readable, writable, or executable.
+ Writable,
+ Executable,
+};
+
+extern MOZ_MUST_USE bool ReprotectRegion(void* start, size_t size, ProtectionSetting protection);
+
+// Functions called at process start-up/shutdown to initialize/release the
+// executable memory region.
+extern MOZ_MUST_USE bool InitProcessExecutableMemory();
+extern void ReleaseProcessExecutableMemory();
+
+// Allocate/deallocate executable pages.
+extern void* AllocateExecutableMemory(size_t bytes, ProtectionSetting protection);
+extern void DeallocateExecutableMemory(void* addr, size_t bytes);
+
+// Returns true if we can allocate a few more MB of executable code without
+// hitting our code limit. This function can be used to stop compiling things
+// that are optional (like Baseline and Ion code) when we're about to reach the
+// limit, so we are less likely to OOM or crash. Note that the limit is
+// per-process, so other threads can also allocate code after we call this
+// function.
+extern bool CanLikelyAllocateMoreExecutableMemory();
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_ProcessExecutableMemory_h
diff --git a/js/src/jit/RangeAnalysis.cpp b/js/src/jit/RangeAnalysis.cpp
new file mode 100644
index 000000000..95484c249
--- /dev/null
+++ b/js/src/jit/RangeAnalysis.cpp
@@ -0,0 +1,3634 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/RangeAnalysis.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/Ion.h"
+#include "jit/IonAnalysis.h"
+#include "jit/JitSpewer.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+#include "js/Conversions.h"
+#include "vm/ArgumentsObject.h"
+#include "vm/TypedArrayCommon.h"
+
+#include "jsopcodeinlines.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::Abs;
+using mozilla::CountLeadingZeroes32;
+using mozilla::NumberEqualsInt32;
+using mozilla::ExponentComponent;
+using mozilla::FloorLog2;
+using mozilla::IsInfinite;
+using mozilla::IsNaN;
+using mozilla::IsNegative;
+using mozilla::IsNegativeZero;
+using mozilla::NegativeInfinity;
+using mozilla::PositiveInfinity;
+using mozilla::Swap;
+using JS::GenericNaN;
+using JS::ToInt32;
+
+// This algorithm is based on the paper "Eliminating Range Checks Using
+// Static Single Assignment Form" by Gough and Klaren.
+//
+// We associate a range object with each SSA name, and the ranges are consulted
+// in order to determine whether overflow is possible for arithmetic
+// computations.
+//
+// An important source of range information that requires care to take
+// advantage of is conditional control flow. Consider the code below:
+//
+// if (x < 0) {
+// y = x + 2000000000;
+// } else {
+// if (x < 1000000000) {
+// y = x * 2;
+// } else {
+// y = x - 3000000000;
+// }
+// }
+//
+// The arithmetic operations in this code cannot overflow, but it is not
+// sufficient to simply associate each name with a range, since the information
+// differs between basic blocks. The traditional dataflow approach would be
+// associate ranges with (name, basic block) pairs. This solution is not
+// satisfying, since we lose the benefit of SSA form: in SSA form, each
+// definition has a unique name, so there is no need to track information about
+// the control flow of the program.
+//
+// The approach used here is to add a new form of pseudo operation called a
+// beta node, which associates range information with a value. These beta
+// instructions take one argument and additionally have an auxiliary constant
+// range associated with them. Operationally, beta nodes are just copies, but
+// the invariant expressed by beta node copies is that the output will fall
+// inside the range given by the beta node. Gough and Klaeren refer to SSA
+// extended with these beta nodes as XSA form. The following shows the example
+// code transformed into XSA form:
+//
+// if (x < 0) {
+// x1 = Beta(x, [INT_MIN, -1]);
+// y1 = x1 + 2000000000;
+// } else {
+// x2 = Beta(x, [0, INT_MAX]);
+// if (x2 < 1000000000) {
+// x3 = Beta(x2, [INT_MIN, 999999999]);
+// y2 = x3*2;
+// } else {
+// x4 = Beta(x2, [1000000000, INT_MAX]);
+// y3 = x4 - 3000000000;
+// }
+// y4 = Phi(y2, y3);
+// }
+// y = Phi(y1, y4);
+//
+// We insert beta nodes for the purposes of range analysis (they might also be
+// usefully used for other forms of bounds check elimination) and remove them
+// after range analysis is performed. The remaining compiler phases do not ever
+// encounter beta nodes.
+
+static bool
+IsDominatedUse(MBasicBlock* block, MUse* use)
+{
+ MNode* n = use->consumer();
+ bool isPhi = n->isDefinition() && n->toDefinition()->isPhi();
+
+ if (isPhi) {
+ MPhi* phi = n->toDefinition()->toPhi();
+ return block->dominates(phi->block()->getPredecessor(phi->indexOf(use)));
+ }
+
+ return block->dominates(n->block());
+}
+
+static inline void
+SpewRange(MDefinition* def)
+{
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_Range) && def->type() != MIRType::None && def->range()) {
+ JitSpewHeader(JitSpew_Range);
+ Fprinter& out = JitSpewPrinter();
+ def->printName(out);
+ out.printf(" has range ");
+ def->range()->dump(out);
+ }
+#endif
+}
+
+static inline void
+SpewTruncate(MDefinition* def, MDefinition::TruncateKind kind, bool shouldClone)
+{
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_Range)) {
+ JitSpewHeader(JitSpew_Range);
+ Fprinter& out = JitSpewPrinter();
+ out.printf("truncating ");
+ def->printName(out);
+ out.printf(" (kind: %s, clone: %d)\n", MDefinition::TruncateKindString(kind), shouldClone);
+ }
+#endif
+}
+
+TempAllocator&
+RangeAnalysis::alloc() const
+{
+ return graph_.alloc();
+}
+
+void
+RangeAnalysis::replaceDominatedUsesWith(MDefinition* orig, MDefinition* dom,
+ MBasicBlock* block)
+{
+ for (MUseIterator i(orig->usesBegin()); i != orig->usesEnd(); ) {
+ MUse* use = *i++;
+ if (use->consumer() != dom && IsDominatedUse(block, use))
+ use->replaceProducer(dom);
+ }
+}
+
+bool
+RangeAnalysis::addBetaNodes()
+{
+ JitSpew(JitSpew_Range, "Adding beta nodes");
+
+ for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
+ MBasicBlock* block = *i;
+ JitSpew(JitSpew_Range, "Looking at block %d", block->id());
+
+ BranchDirection branch_dir;
+ MTest* test = block->immediateDominatorBranch(&branch_dir);
+
+ if (!test || !test->getOperand(0)->isCompare())
+ continue;
+
+ MCompare* compare = test->getOperand(0)->toCompare();
+
+ if (!compare->isNumericComparison())
+ continue;
+
+ // TODO: support unsigned comparisons
+ if (compare->compareType() == MCompare::Compare_UInt32)
+ continue;
+
+ MDefinition* left = compare->getOperand(0);
+ MDefinition* right = compare->getOperand(1);
+ double bound;
+ double conservativeLower = NegativeInfinity<double>();
+ double conservativeUpper = PositiveInfinity<double>();
+ MDefinition* val = nullptr;
+
+ JSOp jsop = compare->jsop();
+
+ if (branch_dir == FALSE_BRANCH) {
+ jsop = NegateCompareOp(jsop);
+ conservativeLower = GenericNaN();
+ conservativeUpper = GenericNaN();
+ }
+
+ MConstant* leftConst = left->maybeConstantValue();
+ MConstant* rightConst = right->maybeConstantValue();
+ if (leftConst && leftConst->isTypeRepresentableAsDouble()) {
+ bound = leftConst->numberToDouble();
+ val = right;
+ jsop = ReverseCompareOp(jsop);
+ } else if (rightConst && rightConst->isTypeRepresentableAsDouble()) {
+ bound = rightConst->numberToDouble();
+ val = left;
+ } else if (left->type() == MIRType::Int32 && right->type() == MIRType::Int32) {
+ MDefinition* smaller = nullptr;
+ MDefinition* greater = nullptr;
+ if (jsop == JSOP_LT) {
+ smaller = left;
+ greater = right;
+ } else if (jsop == JSOP_GT) {
+ smaller = right;
+ greater = left;
+ }
+ if (smaller && greater) {
+ if (!alloc().ensureBallast())
+ return false;
+
+ MBeta* beta;
+ beta = MBeta::New(alloc(), smaller,
+ Range::NewInt32Range(alloc(), JSVAL_INT_MIN, JSVAL_INT_MAX-1));
+ block->insertBefore(*block->begin(), beta);
+ replaceDominatedUsesWith(smaller, beta, block);
+ JitSpew(JitSpew_Range, "Adding beta node for smaller %d", smaller->id());
+ beta = MBeta::New(alloc(), greater,
+ Range::NewInt32Range(alloc(), JSVAL_INT_MIN+1, JSVAL_INT_MAX));
+ block->insertBefore(*block->begin(), beta);
+ replaceDominatedUsesWith(greater, beta, block);
+ JitSpew(JitSpew_Range, "Adding beta node for greater %d", greater->id());
+ }
+ continue;
+ } else {
+ continue;
+ }
+
+ // At this point, one of the operands if the compare is a constant, and
+ // val is the other operand.
+ MOZ_ASSERT(val);
+
+ Range comp;
+ switch (jsop) {
+ case JSOP_LE:
+ comp.setDouble(conservativeLower, bound);
+ break;
+ case JSOP_LT:
+ // For integers, if x < c, the upper bound of x is c-1.
+ if (val->type() == MIRType::Int32) {
+ int32_t intbound;
+ if (NumberEqualsInt32(bound, &intbound) && SafeSub(intbound, 1, &intbound))
+ bound = intbound;
+ }
+ comp.setDouble(conservativeLower, bound);
+
+ // Negative zero is not less than zero.
+ if (bound == 0)
+ comp.refineToExcludeNegativeZero();
+ break;
+ case JSOP_GE:
+ comp.setDouble(bound, conservativeUpper);
+ break;
+ case JSOP_GT:
+ // For integers, if x > c, the lower bound of x is c+1.
+ if (val->type() == MIRType::Int32) {
+ int32_t intbound;
+ if (NumberEqualsInt32(bound, &intbound) && SafeAdd(intbound, 1, &intbound))
+ bound = intbound;
+ }
+ comp.setDouble(bound, conservativeUpper);
+
+ // Negative zero is not greater than zero.
+ if (bound == 0)
+ comp.refineToExcludeNegativeZero();
+ break;
+ case JSOP_STRICTEQ:
+ // A strict comparison can test for things other than numeric value.
+ if (!compare->isNumericComparison())
+ continue;
+ // Otherwise fall through to handle JSOP_STRICTEQ the same as JSOP_EQ.
+ MOZ_FALLTHROUGH;
+ case JSOP_EQ:
+ comp.setDouble(bound, bound);
+ break;
+ case JSOP_STRICTNE:
+ // A strict comparison can test for things other than numeric value.
+ if (!compare->isNumericComparison())
+ continue;
+ // Otherwise fall through to handle JSOP_STRICTNE the same as JSOP_NE.
+ MOZ_FALLTHROUGH;
+ case JSOP_NE:
+ // Negative zero is not not-equal to zero.
+ if (bound == 0) {
+ comp.refineToExcludeNegativeZero();
+ break;
+ }
+ continue; // well, we could have
+ // [-\inf, bound-1] U [bound+1, \inf] but we only use contiguous ranges.
+ default:
+ continue;
+ }
+
+ if (JitSpewEnabled(JitSpew_Range)) {
+ JitSpewHeader(JitSpew_Range);
+ Fprinter& out = JitSpewPrinter();
+ out.printf("Adding beta node for %d with range ", val->id());
+ comp.dump(out);
+ }
+
+ if (!alloc().ensureBallast())
+ return false;
+
+ MBeta* beta = MBeta::New(alloc(), val, new(alloc()) Range(comp));
+ block->insertBefore(*block->begin(), beta);
+ replaceDominatedUsesWith(val, beta, block);
+ }
+
+ return true;
+}
+
+bool
+RangeAnalysis::removeBetaNodes()
+{
+ JitSpew(JitSpew_Range, "Removing beta nodes");
+
+ for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
+ MBasicBlock* block = *i;
+ for (MDefinitionIterator iter(*i); iter; ) {
+ MDefinition* def = *iter++;
+ if (def->isBeta()) {
+ MDefinition* op = def->getOperand(0);
+ JitSpew(JitSpew_Range, "Removing beta node %d for %d",
+ def->id(), op->id());
+ def->justReplaceAllUsesWith(op);
+ block->discardDef(def);
+ } else {
+ // We only place Beta nodes at the beginning of basic
+ // blocks, so if we see something else, we can move on
+ // to the next block.
+ break;
+ }
+ }
+ }
+ return true;
+}
+
+void
+SymbolicBound::dump(GenericPrinter& out) const
+{
+ if (loop)
+ out.printf("[loop] ");
+ sum.dump(out);
+}
+
+void
+SymbolicBound::dump() const
+{
+ Fprinter out(stderr);
+ dump(out);
+ out.printf("\n");
+ out.finish();
+}
+
+// Test whether the given range's exponent tells us anything that its lower
+// and upper bound values don't.
+static bool
+IsExponentInteresting(const Range* r)
+{
+ // If it lacks either a lower or upper bound, the exponent is interesting.
+ if (!r->hasInt32Bounds())
+ return true;
+
+ // Otherwise if there's no fractional part, the lower and upper bounds,
+ // which are integers, are perfectly precise.
+ if (!r->canHaveFractionalPart())
+ return false;
+
+ // Otherwise, if the bounds are conservatively rounded across a power-of-two
+ // boundary, the exponent may imply a tighter range.
+ return FloorLog2(Max(Abs(r->lower()), Abs(r->upper()))) > r->exponent();
+}
+
+void
+Range::dump(GenericPrinter& out) const
+{
+ assertInvariants();
+
+ // Floating-point or Integer subset.
+ if (canHaveFractionalPart_)
+ out.printf("F");
+ else
+ out.printf("I");
+
+ out.printf("[");
+
+ if (!hasInt32LowerBound_)
+ out.printf("?");
+ else
+ out.printf("%d", lower_);
+ if (symbolicLower_) {
+ out.printf(" {");
+ symbolicLower_->dump(out);
+ out.printf("}");
+ }
+
+ out.printf(", ");
+
+ if (!hasInt32UpperBound_)
+ out.printf("?");
+ else
+ out.printf("%d", upper_);
+ if (symbolicUpper_) {
+ out.printf(" {");
+ symbolicUpper_->dump(out);
+ out.printf("}");
+ }
+
+ out.printf("]");
+
+ bool includesNaN = max_exponent_ == IncludesInfinityAndNaN;
+ bool includesNegativeInfinity = max_exponent_ >= IncludesInfinity && !hasInt32LowerBound_;
+ bool includesPositiveInfinity = max_exponent_ >= IncludesInfinity && !hasInt32UpperBound_;
+ bool includesNegativeZero = canBeNegativeZero_;
+
+ if (includesNaN ||
+ includesNegativeInfinity ||
+ includesPositiveInfinity ||
+ includesNegativeZero)
+ {
+ out.printf(" (");
+ bool first = true;
+ if (includesNaN) {
+ if (first)
+ first = false;
+ else
+ out.printf(" ");
+ out.printf("U NaN");
+ }
+ if (includesNegativeInfinity) {
+ if (first)
+ first = false;
+ else
+ out.printf(" ");
+ out.printf("U -Infinity");
+ }
+ if (includesPositiveInfinity) {
+ if (first)
+ first = false;
+ else
+ out.printf(" ");
+ out.printf("U Infinity");
+ }
+ if (includesNegativeZero) {
+ if (first)
+ first = false;
+ else
+ out.printf(" ");
+ out.printf("U -0");
+ }
+ out.printf(")");
+ }
+ if (max_exponent_ < IncludesInfinity && IsExponentInteresting(this))
+ out.printf(" (< pow(2, %d+1))", max_exponent_);
+}
+
+void
+Range::dump() const
+{
+ Fprinter out(stderr);
+ dump(out);
+ out.printf("\n");
+ out.finish();
+}
+
+Range*
+Range::intersect(TempAllocator& alloc, const Range* lhs, const Range* rhs, bool* emptyRange)
+{
+ *emptyRange = false;
+
+ if (!lhs && !rhs)
+ return nullptr;
+
+ if (!lhs)
+ return new(alloc) Range(*rhs);
+ if (!rhs)
+ return new(alloc) Range(*lhs);
+
+ int32_t newLower = Max(lhs->lower_, rhs->lower_);
+ int32_t newUpper = Min(lhs->upper_, rhs->upper_);
+
+ // If upper < lower, then we have conflicting constraints. Consider:
+ //
+ // if (x < 0) {
+ // if (x > 0) {
+ // [Some code.]
+ // }
+ // }
+ //
+ // In this case, the block is unreachable.
+ if (newUpper < newLower) {
+ // If both ranges can be NaN, the result can still be NaN.
+ if (!lhs->canBeNaN() || !rhs->canBeNaN())
+ *emptyRange = true;
+ return nullptr;
+ }
+
+ bool newHasInt32LowerBound = lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_;
+ bool newHasInt32UpperBound = lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_;
+
+ FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(lhs->canHaveFractionalPart_ &&
+ rhs->canHaveFractionalPart_);
+ NegativeZeroFlag newMayIncludeNegativeZero = NegativeZeroFlag(lhs->canBeNegativeZero_ &&
+ rhs->canBeNegativeZero_);
+
+ uint16_t newExponent = Min(lhs->max_exponent_, rhs->max_exponent_);
+
+ // NaN is a special value which is neither greater than infinity or less than
+ // negative infinity. When we intersect two ranges like [?, 0] and [0, ?], we
+ // can end up thinking we have both a lower and upper bound, even though NaN
+ // is still possible. In this case, just be conservative, since any case where
+ // we can have NaN is not especially interesting.
+ if (newHasInt32LowerBound && newHasInt32UpperBound && newExponent == IncludesInfinityAndNaN)
+ return nullptr;
+
+ // If one of the ranges has a fractional part and the other doesn't, it's
+ // possible that we will have computed a newExponent that's more precise
+ // than our newLower and newUpper. This is unusual, so we handle it here
+ // instead of in optimize().
+ //
+ // For example, consider the range F[0,1.5]. Range analysis represents the
+ // lower and upper bound as integers, so we'd actually have
+ // F[0,2] (< pow(2, 0+1)). In this case, the exponent gives us a slightly
+ // more precise upper bound than the integer upper bound.
+ //
+ // When intersecting such a range with an integer range, the fractional part
+ // of the range is dropped. The max exponent of 0 remains valid, so the
+ // upper bound needs to be adjusted to 1.
+ //
+ // When intersecting F[0,2] (< pow(2, 0+1)) with a range like F[2,4],
+ // the naive intersection is I[2,2], but since the max exponent tells us
+ // that the value is always less than 2, the intersection is actually empty.
+ if (lhs->canHaveFractionalPart() != rhs->canHaveFractionalPart() ||
+ (lhs->canHaveFractionalPart() &&
+ newHasInt32LowerBound && newHasInt32UpperBound &&
+ newLower == newUpper))
+ {
+ refineInt32BoundsByExponent(newExponent,
+ &newLower, &newHasInt32LowerBound,
+ &newUpper, &newHasInt32UpperBound);
+
+ // If we're intersecting two ranges that don't overlap, this could also
+ // push the bounds past each other, since the actual intersection is
+ // the empty set.
+ if (newLower > newUpper) {
+ *emptyRange = true;
+ return nullptr;
+ }
+ }
+
+ return new(alloc) Range(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound,
+ newCanHaveFractionalPart,
+ newMayIncludeNegativeZero,
+ newExponent);
+}
+
+void
+Range::unionWith(const Range* other)
+{
+ int32_t newLower = Min(lower_, other->lower_);
+ int32_t newUpper = Max(upper_, other->upper_);
+
+ bool newHasInt32LowerBound = hasInt32LowerBound_ && other->hasInt32LowerBound_;
+ bool newHasInt32UpperBound = hasInt32UpperBound_ && other->hasInt32UpperBound_;
+
+ FractionalPartFlag newCanHaveFractionalPart =
+ FractionalPartFlag(canHaveFractionalPart_ ||
+ other->canHaveFractionalPart_);
+ NegativeZeroFlag newMayIncludeNegativeZero = NegativeZeroFlag(canBeNegativeZero_ ||
+ other->canBeNegativeZero_);
+
+ uint16_t newExponent = Max(max_exponent_, other->max_exponent_);
+
+ rawInitialize(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound,
+ newCanHaveFractionalPart,
+ newMayIncludeNegativeZero,
+ newExponent);
+}
+
+Range::Range(const MDefinition* def)
+ : symbolicLower_(nullptr),
+ symbolicUpper_(nullptr)
+{
+ if (const Range* other = def->range()) {
+ // The instruction has range information; use it.
+ *this = *other;
+
+ // Simulate the effect of converting the value to its type.
+ // Note: we cannot clamp here, since ranges aren't allowed to shrink
+ // and truncation can increase range again. So doing wrapAround to
+ // mimick a possible truncation.
+ switch (def->type()) {
+ case MIRType::Int32:
+ // MToInt32 cannot truncate. So we can safely clamp.
+ if (def->isToInt32())
+ clampToInt32();
+ else
+ wrapAroundToInt32();
+ break;
+ case MIRType::Boolean:
+ wrapAroundToBoolean();
+ break;
+ case MIRType::None:
+ MOZ_CRASH("Asking for the range of an instruction with no value");
+ default:
+ break;
+ }
+ } else {
+ // Otherwise just use type information. We can trust the type here
+ // because we don't care what value the instruction actually produces,
+ // but what value we might get after we get past the bailouts.
+ switch (def->type()) {
+ case MIRType::Int32:
+ setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
+ break;
+ case MIRType::Boolean:
+ setInt32(0, 1);
+ break;
+ case MIRType::None:
+ MOZ_CRASH("Asking for the range of an instruction with no value");
+ default:
+ setUnknown();
+ break;
+ }
+ }
+
+ // As a special case, MUrsh is permitted to claim a result type of
+ // MIRType::Int32 while actually returning values in [0,UINT32_MAX] without
+ // bailouts. If range analysis hasn't ruled out values in
+ // (INT32_MAX,UINT32_MAX], set the range to be conservatively correct for
+ // use as either a uint32 or an int32.
+ if (!hasInt32UpperBound() &&
+ def->isUrsh() &&
+ def->toUrsh()->bailoutsDisabled() &&
+ def->type() != MIRType::Int64)
+ {
+ lower_ = INT32_MIN;
+ }
+
+ assertInvariants();
+}
+
+static uint16_t
+ExponentImpliedByDouble(double d)
+{
+ // Handle the special values.
+ if (IsNaN(d))
+ return Range::IncludesInfinityAndNaN;
+ if (IsInfinite(d))
+ return Range::IncludesInfinity;
+
+ // Otherwise take the exponent part and clamp it at zero, since the Range
+ // class doesn't track fractional ranges.
+ return uint16_t(Max(int_fast16_t(0), ExponentComponent(d)));
+}
+
+void
+Range::setDouble(double l, double h)
+{
+ MOZ_ASSERT(!(l > h));
+
+ // Infer lower_, upper_, hasInt32LowerBound_, and hasInt32UpperBound_.
+ if (l >= INT32_MIN && l <= INT32_MAX) {
+ lower_ = int32_t(::floor(l));
+ hasInt32LowerBound_ = true;
+ } else if (l >= INT32_MAX) {
+ lower_ = INT32_MAX;
+ hasInt32LowerBound_ = true;
+ } else {
+ lower_ = INT32_MIN;
+ hasInt32LowerBound_ = false;
+ }
+ if (h >= INT32_MIN && h <= INT32_MAX) {
+ upper_ = int32_t(::ceil(h));
+ hasInt32UpperBound_ = true;
+ } else if (h <= INT32_MIN) {
+ upper_ = INT32_MIN;
+ hasInt32UpperBound_ = true;
+ } else {
+ upper_ = INT32_MAX;
+ hasInt32UpperBound_ = false;
+ }
+
+ // Infer max_exponent_.
+ uint16_t lExp = ExponentImpliedByDouble(l);
+ uint16_t hExp = ExponentImpliedByDouble(h);
+ max_exponent_ = Max(lExp, hExp);
+
+ canHaveFractionalPart_ = ExcludesFractionalParts;
+ canBeNegativeZero_ = ExcludesNegativeZero;
+
+ // Infer the canHaveFractionalPart_ setting. We can have a
+ // fractional part if the range crosses through the neighborhood of zero. We
+ // won't have a fractional value if the value is always beyond the point at
+ // which double precision can't represent fractional values.
+ uint16_t minExp = Min(lExp, hExp);
+ bool includesNegative = IsNaN(l) || l < 0;
+ bool includesPositive = IsNaN(h) || h > 0;
+ bool crossesZero = includesNegative && includesPositive;
+ if (crossesZero || minExp < MaxTruncatableExponent)
+ canHaveFractionalPart_ = IncludesFractionalParts;
+
+ // Infer the canBeNegativeZero_ setting. We can have a negative zero if
+ // either bound is zero.
+ if (!(l > 0) && !(h < 0))
+ canBeNegativeZero_ = IncludesNegativeZero;
+
+ optimize();
+}
+
+void
+Range::setDoubleSingleton(double d)
+{
+ setDouble(d, d);
+
+ // The above setDouble call is for comparisons, and treats negative zero
+ // as equal to zero. We're aiming for a minimum range, so we can clear the
+ // negative zero flag if the value isn't actually negative zero.
+ if (!IsNegativeZero(d))
+ canBeNegativeZero_ = ExcludesNegativeZero;
+
+ assertInvariants();
+}
+
+static inline bool
+MissingAnyInt32Bounds(const Range* lhs, const Range* rhs)
+{
+ return !lhs->hasInt32Bounds() || !rhs->hasInt32Bounds();
+}
+
+Range*
+Range::add(TempAllocator& alloc, const Range* lhs, const Range* rhs)
+{
+ int64_t l = (int64_t) lhs->lower_ + (int64_t) rhs->lower_;
+ if (!lhs->hasInt32LowerBound() || !rhs->hasInt32LowerBound())
+ l = NoInt32LowerBound;
+
+ int64_t h = (int64_t) lhs->upper_ + (int64_t) rhs->upper_;
+ if (!lhs->hasInt32UpperBound() || !rhs->hasInt32UpperBound())
+ h = NoInt32UpperBound;
+
+ // The exponent is at most one greater than the greater of the operands'
+ // exponents, except for NaN and infinity cases.
+ uint16_t e = Max(lhs->max_exponent_, rhs->max_exponent_);
+ if (e <= Range::MaxFiniteExponent)
+ ++e;
+
+ // Infinity + -Infinity is NaN.
+ if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN())
+ e = Range::IncludesInfinityAndNaN;
+
+ return new(alloc) Range(l, h,
+ FractionalPartFlag(lhs->canHaveFractionalPart() ||
+ rhs->canHaveFractionalPart()),
+ NegativeZeroFlag(lhs->canBeNegativeZero() &&
+ rhs->canBeNegativeZero()),
+ e);
+}
+
+Range*
+Range::sub(TempAllocator& alloc, const Range* lhs, const Range* rhs)
+{
+ int64_t l = (int64_t) lhs->lower_ - (int64_t) rhs->upper_;
+ if (!lhs->hasInt32LowerBound() || !rhs->hasInt32UpperBound())
+ l = NoInt32LowerBound;
+
+ int64_t h = (int64_t) lhs->upper_ - (int64_t) rhs->lower_;
+ if (!lhs->hasInt32UpperBound() || !rhs->hasInt32LowerBound())
+ h = NoInt32UpperBound;
+
+ // The exponent is at most one greater than the greater of the operands'
+ // exponents, except for NaN and infinity cases.
+ uint16_t e = Max(lhs->max_exponent_, rhs->max_exponent_);
+ if (e <= Range::MaxFiniteExponent)
+ ++e;
+
+ // Infinity - Infinity is NaN.
+ if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN())
+ e = Range::IncludesInfinityAndNaN;
+
+ return new(alloc) Range(l, h,
+ FractionalPartFlag(lhs->canHaveFractionalPart() ||
+ rhs->canHaveFractionalPart()),
+ NegativeZeroFlag(lhs->canBeNegativeZero() &&
+ rhs->canBeZero()),
+ e);
+}
+
+Range*
+Range::and_(TempAllocator& alloc, const Range* lhs, const Range* rhs)
+{
+ MOZ_ASSERT(lhs->isInt32());
+ MOZ_ASSERT(rhs->isInt32());
+
+ // If both numbers can be negative, result can be negative in the whole range
+ if (lhs->lower() < 0 && rhs->lower() < 0)
+ return Range::NewInt32Range(alloc, INT32_MIN, Max(lhs->upper(), rhs->upper()));
+
+ // Only one of both numbers can be negative.
+ // - result can't be negative
+ // - Upper bound is minimum of both upper range,
+ int32_t lower = 0;
+ int32_t upper = Min(lhs->upper(), rhs->upper());
+
+ // EXCEPT when upper bound of non negative number is max value,
+ // because negative value can return the whole max value.
+ // -1 & 5 = 5
+ if (lhs->lower() < 0)
+ upper = rhs->upper();
+ if (rhs->lower() < 0)
+ upper = lhs->upper();
+
+ return Range::NewInt32Range(alloc, lower, upper);
+}
+
+Range*
+Range::or_(TempAllocator& alloc, const Range* lhs, const Range* rhs)
+{
+ MOZ_ASSERT(lhs->isInt32());
+ MOZ_ASSERT(rhs->isInt32());
+ // When one operand is always 0 or always -1, it's a special case where we
+ // can compute a fully precise result. Handling these up front also
+ // protects the code below from calling CountLeadingZeroes32 with a zero
+ // operand or from shifting an int32_t by 32.
+ if (lhs->lower() == lhs->upper()) {
+ if (lhs->lower() == 0)
+ return new(alloc) Range(*rhs);
+ if (lhs->lower() == -1)
+ return new(alloc) Range(*lhs);
+ }
+ if (rhs->lower() == rhs->upper()) {
+ if (rhs->lower() == 0)
+ return new(alloc) Range(*lhs);
+ if (rhs->lower() == -1)
+ return new(alloc) Range(*rhs);
+ }
+
+ // The code below uses CountLeadingZeroes32, which has undefined behavior
+ // if its operand is 0. We rely on the code above to protect it.
+ MOZ_ASSERT_IF(lhs->lower() >= 0, lhs->upper() != 0);
+ MOZ_ASSERT_IF(rhs->lower() >= 0, rhs->upper() != 0);
+ MOZ_ASSERT_IF(lhs->upper() < 0, lhs->lower() != -1);
+ MOZ_ASSERT_IF(rhs->upper() < 0, rhs->lower() != -1);
+
+ int32_t lower = INT32_MIN;
+ int32_t upper = INT32_MAX;
+
+ if (lhs->lower() >= 0 && rhs->lower() >= 0) {
+ // Both operands are non-negative, so the result won't be less than either.
+ lower = Max(lhs->lower(), rhs->lower());
+ // The result will have leading zeros where both operands have leading zeros.
+ // CountLeadingZeroes32 of a non-negative int32 will at least be 1 to account
+ // for the bit of sign.
+ upper = int32_t(UINT32_MAX >> Min(CountLeadingZeroes32(lhs->upper()),
+ CountLeadingZeroes32(rhs->upper())));
+ } else {
+ // The result will have leading ones where either operand has leading ones.
+ if (lhs->upper() < 0) {
+ unsigned leadingOnes = CountLeadingZeroes32(~lhs->lower());
+ lower = Max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
+ upper = -1;
+ }
+ if (rhs->upper() < 0) {
+ unsigned leadingOnes = CountLeadingZeroes32(~rhs->lower());
+ lower = Max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
+ upper = -1;
+ }
+ }
+
+ return Range::NewInt32Range(alloc, lower, upper);
+}
+
+Range*
+Range::xor_(TempAllocator& alloc, const Range* lhs, const Range* rhs)
+{
+ MOZ_ASSERT(lhs->isInt32());
+ MOZ_ASSERT(rhs->isInt32());
+ int32_t lhsLower = lhs->lower();
+ int32_t lhsUpper = lhs->upper();
+ int32_t rhsLower = rhs->lower();
+ int32_t rhsUpper = rhs->upper();
+ bool invertAfter = false;
+
+ // If either operand is negative, bitwise-negate it, and arrange to negate
+ // the result; ~((~x)^y) == x^y. If both are negative the negations on the
+ // result cancel each other out; effectively this is (~x)^(~y) == x^y.
+ // These transformations reduce the number of cases we have to handle below.
+ if (lhsUpper < 0) {
+ lhsLower = ~lhsLower;
+ lhsUpper = ~lhsUpper;
+ Swap(lhsLower, lhsUpper);
+ invertAfter = !invertAfter;
+ }
+ if (rhsUpper < 0) {
+ rhsLower = ~rhsLower;
+ rhsUpper = ~rhsUpper;
+ Swap(rhsLower, rhsUpper);
+ invertAfter = !invertAfter;
+ }
+
+ // Handle cases where lhs or rhs is always zero specially, because they're
+ // easy cases where we can be perfectly precise, and because it protects the
+ // CountLeadingZeroes32 calls below from seeing 0 operands, which would be
+ // undefined behavior.
+ int32_t lower = INT32_MIN;
+ int32_t upper = INT32_MAX;
+ if (lhsLower == 0 && lhsUpper == 0) {
+ upper = rhsUpper;
+ lower = rhsLower;
+ } else if (rhsLower == 0 && rhsUpper == 0) {
+ upper = lhsUpper;
+ lower = lhsLower;
+ } else if (lhsLower >= 0 && rhsLower >= 0) {
+ // Both operands are non-negative. The result will be non-negative.
+ lower = 0;
+ // To compute the upper value, take each operand's upper value and
+ // set all bits that don't correspond to leading zero bits in the
+ // other to one. For each one, this gives an upper bound for the
+ // result, so we can take the minimum between the two.
+ unsigned lhsLeadingZeros = CountLeadingZeroes32(lhsUpper);
+ unsigned rhsLeadingZeros = CountLeadingZeroes32(rhsUpper);
+ upper = Min(rhsUpper | int32_t(UINT32_MAX >> lhsLeadingZeros),
+ lhsUpper | int32_t(UINT32_MAX >> rhsLeadingZeros));
+ }
+
+ // If we bitwise-negated one (but not both) of the operands above, apply the
+ // bitwise-negate to the result, completing ~((~x)^y) == x^y.
+ if (invertAfter) {
+ lower = ~lower;
+ upper = ~upper;
+ Swap(lower, upper);
+ }
+
+ return Range::NewInt32Range(alloc, lower, upper);
+}
+
+Range*
+Range::not_(TempAllocator& alloc, const Range* op)
+{
+ MOZ_ASSERT(op->isInt32());
+ return Range::NewInt32Range(alloc, ~op->upper(), ~op->lower());
+}
+
+Range*
+Range::mul(TempAllocator& alloc, const Range* lhs, const Range* rhs)
+{
+ FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(lhs->canHaveFractionalPart_ ||
+ rhs->canHaveFractionalPart_);
+
+ NegativeZeroFlag newMayIncludeNegativeZero =
+ NegativeZeroFlag((lhs->canHaveSignBitSet() && rhs->canBeFiniteNonNegative()) ||
+ (rhs->canHaveSignBitSet() && lhs->canBeFiniteNonNegative()));
+
+ uint16_t exponent;
+ if (!lhs->canBeInfiniteOrNaN() && !rhs->canBeInfiniteOrNaN()) {
+ // Two finite values.
+ exponent = lhs->numBits() + rhs->numBits() - 1;
+ if (exponent > Range::MaxFiniteExponent)
+ exponent = Range::IncludesInfinity;
+ } else if (!lhs->canBeNaN() &&
+ !rhs->canBeNaN() &&
+ !(lhs->canBeZero() && rhs->canBeInfiniteOrNaN()) &&
+ !(rhs->canBeZero() && lhs->canBeInfiniteOrNaN()))
+ {
+ // Two values that multiplied together won't produce a NaN.
+ exponent = Range::IncludesInfinity;
+ } else {
+ // Could be anything.
+ exponent = Range::IncludesInfinityAndNaN;
+ }
+
+ if (MissingAnyInt32Bounds(lhs, rhs))
+ return new(alloc) Range(NoInt32LowerBound, NoInt32UpperBound,
+ newCanHaveFractionalPart,
+ newMayIncludeNegativeZero,
+ exponent);
+ int64_t a = (int64_t)lhs->lower() * (int64_t)rhs->lower();
+ int64_t b = (int64_t)lhs->lower() * (int64_t)rhs->upper();
+ int64_t c = (int64_t)lhs->upper() * (int64_t)rhs->lower();
+ int64_t d = (int64_t)lhs->upper() * (int64_t)rhs->upper();
+ return new(alloc) Range(
+ Min( Min(a, b), Min(c, d) ),
+ Max( Max(a, b), Max(c, d) ),
+ newCanHaveFractionalPart,
+ newMayIncludeNegativeZero,
+ exponent);
+}
+
+Range*
+Range::lsh(TempAllocator& alloc, const Range* lhs, int32_t c)
+{
+ MOZ_ASSERT(lhs->isInt32());
+ int32_t shift = c & 0x1f;
+
+ // If the shift doesn't loose bits or shift bits into the sign bit, we
+ // can simply compute the correct range by shifting.
+ if ((int32_t)((uint32_t)lhs->lower() << shift << 1 >> shift >> 1) == lhs->lower() &&
+ (int32_t)((uint32_t)lhs->upper() << shift << 1 >> shift >> 1) == lhs->upper())
+ {
+ return Range::NewInt32Range(alloc,
+ uint32_t(lhs->lower()) << shift,
+ uint32_t(lhs->upper()) << shift);
+ }
+
+ return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
+}
+
+Range*
+Range::rsh(TempAllocator& alloc, const Range* lhs, int32_t c)
+{
+ MOZ_ASSERT(lhs->isInt32());
+ int32_t shift = c & 0x1f;
+ return Range::NewInt32Range(alloc,
+ lhs->lower() >> shift,
+ lhs->upper() >> shift);
+}
+
+Range*
+Range::ursh(TempAllocator& alloc, const Range* lhs, int32_t c)
+{
+ // ursh's left operand is uint32, not int32, but for range analysis we
+ // currently approximate it as int32. We assume here that the range has
+ // already been adjusted accordingly by our callers.
+ MOZ_ASSERT(lhs->isInt32());
+
+ int32_t shift = c & 0x1f;
+
+ // If the value is always non-negative or always negative, we can simply
+ // compute the correct range by shifting.
+ if (lhs->isFiniteNonNegative() || lhs->isFiniteNegative()) {
+ return Range::NewUInt32Range(alloc,
+ uint32_t(lhs->lower()) >> shift,
+ uint32_t(lhs->upper()) >> shift);
+ }
+
+ // Otherwise return the most general range after the shift.
+ return Range::NewUInt32Range(alloc, 0, UINT32_MAX >> shift);
+}
+
+Range*
+Range::lsh(TempAllocator& alloc, const Range* lhs, const Range* rhs)
+{
+ MOZ_ASSERT(lhs->isInt32());
+ MOZ_ASSERT(rhs->isInt32());
+ return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
+}
+
+Range*
+Range::rsh(TempAllocator& alloc, const Range* lhs, const Range* rhs)
+{
+ MOZ_ASSERT(lhs->isInt32());
+ MOZ_ASSERT(rhs->isInt32());
+
+ // Canonicalize the shift range to 0 to 31.
+ int32_t shiftLower = rhs->lower();
+ int32_t shiftUpper = rhs->upper();
+ if ((int64_t(shiftUpper) - int64_t(shiftLower)) >= 31) {
+ shiftLower = 0;
+ shiftUpper = 31;
+ } else {
+ shiftLower &= 0x1f;
+ shiftUpper &= 0x1f;
+ if (shiftLower > shiftUpper) {
+ shiftLower = 0;
+ shiftUpper = 31;
+ }
+ }
+ MOZ_ASSERT(shiftLower >= 0 && shiftUpper <= 31);
+
+ // The lhs bounds are signed, thus the minimum is either the lower bound
+ // shift by the smallest shift if negative or the lower bound shifted by the
+ // biggest shift otherwise. And the opposite for the maximum.
+ int32_t lhsLower = lhs->lower();
+ int32_t min = lhsLower < 0 ? lhsLower >> shiftLower : lhsLower >> shiftUpper;
+ int32_t lhsUpper = lhs->upper();
+ int32_t max = lhsUpper >= 0 ? lhsUpper >> shiftLower : lhsUpper >> shiftUpper;
+
+ return Range::NewInt32Range(alloc, min, max);
+}
+
+Range*
+Range::ursh(TempAllocator& alloc, const Range* lhs, const Range* rhs)
+{
+ // ursh's left operand is uint32, not int32, but for range analysis we
+ // currently approximate it as int32. We assume here that the range has
+ // already been adjusted accordingly by our callers.
+ MOZ_ASSERT(lhs->isInt32());
+ MOZ_ASSERT(rhs->isInt32());
+ return Range::NewUInt32Range(alloc, 0, lhs->isFiniteNonNegative() ? lhs->upper() : UINT32_MAX);
+}
+
+Range*
+Range::abs(TempAllocator& alloc, const Range* op)
+{
+ int32_t l = op->lower_;
+ int32_t u = op->upper_;
+ FractionalPartFlag canHaveFractionalPart = op->canHaveFractionalPart_;
+
+ // Abs never produces a negative zero.
+ NegativeZeroFlag canBeNegativeZero = ExcludesNegativeZero;
+
+ return new(alloc) Range(Max(Max(int32_t(0), l), u == INT32_MIN ? INT32_MAX : -u),
+ true,
+ Max(Max(int32_t(0), u), l == INT32_MIN ? INT32_MAX : -l),
+ op->hasInt32Bounds() && l != INT32_MIN,
+ canHaveFractionalPart,
+ canBeNegativeZero,
+ op->max_exponent_);
+}
+
+Range*
+Range::min(TempAllocator& alloc, const Range* lhs, const Range* rhs)
+{
+ // If either operand is NaN, the result is NaN.
+ if (lhs->canBeNaN() || rhs->canBeNaN())
+ return nullptr;
+
+ FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(lhs->canHaveFractionalPart_ ||
+ rhs->canHaveFractionalPart_);
+ NegativeZeroFlag newMayIncludeNegativeZero = NegativeZeroFlag(lhs->canBeNegativeZero_ ||
+ rhs->canBeNegativeZero_);
+
+ return new(alloc) Range(Min(lhs->lower_, rhs->lower_),
+ lhs->hasInt32LowerBound_ && rhs->hasInt32LowerBound_,
+ Min(lhs->upper_, rhs->upper_),
+ lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_,
+ newCanHaveFractionalPart,
+ newMayIncludeNegativeZero,
+ Max(lhs->max_exponent_, rhs->max_exponent_));
+}
+
+Range*
+Range::max(TempAllocator& alloc, const Range* lhs, const Range* rhs)
+{
+ // If either operand is NaN, the result is NaN.
+ if (lhs->canBeNaN() || rhs->canBeNaN())
+ return nullptr;
+
+ FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(lhs->canHaveFractionalPart_ ||
+ rhs->canHaveFractionalPart_);
+ NegativeZeroFlag newMayIncludeNegativeZero = NegativeZeroFlag(lhs->canBeNegativeZero_ ||
+ rhs->canBeNegativeZero_);
+
+ return new(alloc) Range(Max(lhs->lower_, rhs->lower_),
+ lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_,
+ Max(lhs->upper_, rhs->upper_),
+ lhs->hasInt32UpperBound_ && rhs->hasInt32UpperBound_,
+ newCanHaveFractionalPart,
+ newMayIncludeNegativeZero,
+ Max(lhs->max_exponent_, rhs->max_exponent_));
+}
+
+Range*
+Range::floor(TempAllocator& alloc, const Range* op)
+{
+ Range* copy = new(alloc) Range(*op);
+ // Decrement lower bound of copy range if op have a factional part and lower
+ // bound is Int32 defined. Also we avoid to decrement when op have a
+ // fractional part but lower_ >= JSVAL_INT_MAX.
+ if (op->canHaveFractionalPart() && op->hasInt32LowerBound())
+ copy->setLowerInit(int64_t(copy->lower_) - 1);
+
+ // Also refine max_exponent_ because floor may have decremented int value
+ // If we've got int32 defined bounds, just deduce it using defined bounds.
+ // But, if we don't have those, value's max_exponent_ may have changed.
+ // Because we're looking to maintain an over estimation, if we can,
+ // we increment it.
+ if(copy->hasInt32Bounds())
+ copy->max_exponent_ = copy->exponentImpliedByInt32Bounds();
+ else if(copy->max_exponent_ < MaxFiniteExponent)
+ copy->max_exponent_++;
+
+ copy->canHaveFractionalPart_ = ExcludesFractionalParts;
+ copy->assertInvariants();
+ return copy;
+}
+
+Range*
+Range::ceil(TempAllocator& alloc, const Range* op)
+{
+ Range* copy = new(alloc) Range(*op);
+
+ // We need to refine max_exponent_ because ceil may have incremented the int value.
+ // If we have got int32 bounds defined, just deduce it using the defined bounds.
+ // Else we can just increment its value,
+ // as we are looking to maintain an over estimation.
+ if (copy->hasInt32Bounds())
+ copy->max_exponent_ = copy->exponentImpliedByInt32Bounds();
+ else if (copy->max_exponent_ < MaxFiniteExponent)
+ copy->max_exponent_++;
+
+ copy->canHaveFractionalPart_ = ExcludesFractionalParts;
+ copy->assertInvariants();
+ return copy;
+}
+
+Range*
+Range::sign(TempAllocator& alloc, const Range* op)
+{
+ if (op->canBeNaN())
+ return nullptr;
+
+ return new(alloc) Range(Max(Min(op->lower_, 1), -1),
+ Max(Min(op->upper_, 1), -1),
+ Range::ExcludesFractionalParts,
+ NegativeZeroFlag(op->canBeNegativeZero()),
+ 0);
+}
+
+Range*
+Range::NaNToZero(TempAllocator& alloc, const Range *op)
+{
+ Range* copy = new(alloc) Range(*op);
+ if (copy->canBeNaN()) {
+ copy->max_exponent_ = Range::IncludesInfinity;
+ if (!copy->canBeZero()) {
+ Range zero;
+ zero.setDoubleSingleton(0);
+ copy->unionWith(&zero);
+ }
+ }
+ copy->refineToExcludeNegativeZero();
+ return copy;
+}
+
+bool
+Range::negativeZeroMul(const Range* lhs, const Range* rhs)
+{
+ // The result can only be negative zero if both sides are finite and they
+ // have differing signs.
+ return (lhs->canHaveSignBitSet() && rhs->canBeFiniteNonNegative()) ||
+ (rhs->canHaveSignBitSet() && lhs->canBeFiniteNonNegative());
+}
+
+bool
+Range::update(const Range* other)
+{
+ bool changed =
+ lower_ != other->lower_ ||
+ hasInt32LowerBound_ != other->hasInt32LowerBound_ ||
+ upper_ != other->upper_ ||
+ hasInt32UpperBound_ != other->hasInt32UpperBound_ ||
+ canHaveFractionalPart_ != other->canHaveFractionalPart_ ||
+ canBeNegativeZero_ != other->canBeNegativeZero_ ||
+ max_exponent_ != other->max_exponent_;
+ if (changed) {
+ lower_ = other->lower_;
+ hasInt32LowerBound_ = other->hasInt32LowerBound_;
+ upper_ = other->upper_;
+ hasInt32UpperBound_ = other->hasInt32UpperBound_;
+ canHaveFractionalPart_ = other->canHaveFractionalPart_;
+ canBeNegativeZero_ = other->canBeNegativeZero_;
+ max_exponent_ = other->max_exponent_;
+ assertInvariants();
+ }
+
+ return changed;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Range Computation for MIR Nodes
+///////////////////////////////////////////////////////////////////////////////
+
+void
+MPhi::computeRange(TempAllocator& alloc)
+{
+ if (type() != MIRType::Int32 && type() != MIRType::Double)
+ return;
+
+ Range* range = nullptr;
+ for (size_t i = 0, e = numOperands(); i < e; i++) {
+ if (getOperand(i)->block()->unreachable()) {
+ JitSpew(JitSpew_Range, "Ignoring unreachable input %d", getOperand(i)->id());
+ continue;
+ }
+
+ // Peek at the pre-bailout range so we can take a short-cut; if any of
+ // the operands has an unknown range, this phi has an unknown range.
+ if (!getOperand(i)->range())
+ return;
+
+ Range input(getOperand(i));
+
+ if (range)
+ range->unionWith(&input);
+ else
+ range = new(alloc) Range(input);
+ }
+
+ setRange(range);
+}
+
+void
+MBeta::computeRange(TempAllocator& alloc)
+{
+ bool emptyRange = false;
+
+ Range opRange(getOperand(0));
+ Range* range = Range::intersect(alloc, &opRange, comparison_, &emptyRange);
+ if (emptyRange) {
+ JitSpew(JitSpew_Range, "Marking block for inst %d unreachable", id());
+ block()->setUnreachableUnchecked();
+ } else {
+ setRange(range);
+ }
+}
+
+void
+MConstant::computeRange(TempAllocator& alloc)
+{
+ if (isTypeRepresentableAsDouble()) {
+ double d = numberToDouble();
+ setRange(Range::NewDoubleSingletonRange(alloc, d));
+ } else if (type() == MIRType::Boolean) {
+ bool b = toBoolean();
+ setRange(Range::NewInt32Range(alloc, b, b));
+ }
+}
+
+void
+MCharCodeAt::computeRange(TempAllocator& alloc)
+{
+ // ECMA 262 says that the integer will be non-negative and at most 65535.
+ setRange(Range::NewInt32Range(alloc, 0, 65535));
+}
+
+void
+MClampToUint8::computeRange(TempAllocator& alloc)
+{
+ setRange(Range::NewUInt32Range(alloc, 0, 255));
+}
+
+void
+MBitAnd::computeRange(TempAllocator& alloc)
+{
+ if (specialization_ == MIRType::Int64)
+ return;
+
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+ left.wrapAroundToInt32();
+ right.wrapAroundToInt32();
+
+ setRange(Range::and_(alloc, &left, &right));
+}
+
+void
+MBitOr::computeRange(TempAllocator& alloc)
+{
+ if (specialization_ == MIRType::Int64)
+ return;
+
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+ left.wrapAroundToInt32();
+ right.wrapAroundToInt32();
+
+ setRange(Range::or_(alloc, &left, &right));
+}
+
+void
+MBitXor::computeRange(TempAllocator& alloc)
+{
+ if (specialization_ == MIRType::Int64)
+ return;
+
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+ left.wrapAroundToInt32();
+ right.wrapAroundToInt32();
+
+ setRange(Range::xor_(alloc, &left, &right));
+}
+
+void
+MBitNot::computeRange(TempAllocator& alloc)
+{
+ Range op(getOperand(0));
+ op.wrapAroundToInt32();
+
+ setRange(Range::not_(alloc, &op));
+}
+
+void
+MLsh::computeRange(TempAllocator& alloc)
+{
+ if (specialization_ == MIRType::Int64)
+ return;
+
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+ left.wrapAroundToInt32();
+
+ MConstant* rhsConst = getOperand(1)->maybeConstantValue();
+ if (rhsConst && rhsConst->type() == MIRType::Int32) {
+ int32_t c = rhsConst->toInt32();
+ setRange(Range::lsh(alloc, &left, c));
+ return;
+ }
+
+ right.wrapAroundToShiftCount();
+ setRange(Range::lsh(alloc, &left, &right));
+}
+
+void
+MRsh::computeRange(TempAllocator& alloc)
+{
+ if (specialization_ == MIRType::Int64)
+ return;
+
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+ left.wrapAroundToInt32();
+
+ MConstant* rhsConst = getOperand(1)->maybeConstantValue();
+ if (rhsConst && rhsConst->type() == MIRType::Int32) {
+ int32_t c = rhsConst->toInt32();
+ setRange(Range::rsh(alloc, &left, c));
+ return;
+ }
+
+ right.wrapAroundToShiftCount();
+ setRange(Range::rsh(alloc, &left, &right));
+}
+
+void
+MUrsh::computeRange(TempAllocator& alloc)
+{
+ if (specialization_ == MIRType::Int64)
+ return;
+
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+
+ // ursh can be thought of as converting its left operand to uint32, or it
+ // can be thought of as converting its left operand to int32, and then
+ // reinterpreting the int32 bits as a uint32 value. Both approaches yield
+ // the same result. Since we lack support for full uint32 ranges, we use
+ // the second interpretation, though it does cause us to be conservative.
+ left.wrapAroundToInt32();
+ right.wrapAroundToShiftCount();
+
+ MConstant* rhsConst = getOperand(1)->maybeConstantValue();
+ if (rhsConst && rhsConst->type() == MIRType::Int32) {
+ int32_t c = rhsConst->toInt32();
+ setRange(Range::ursh(alloc, &left, c));
+ } else {
+ setRange(Range::ursh(alloc, &left, &right));
+ }
+
+ MOZ_ASSERT(range()->lower() >= 0);
+}
+
+void
+MAbs::computeRange(TempAllocator& alloc)
+{
+ if (specialization_ != MIRType::Int32 && specialization_ != MIRType::Double)
+ return;
+
+ Range other(getOperand(0));
+ Range* next = Range::abs(alloc, &other);
+ if (implicitTruncate_)
+ next->wrapAroundToInt32();
+ setRange(next);
+}
+
+void
+MFloor::computeRange(TempAllocator& alloc)
+{
+ Range other(getOperand(0));
+ setRange(Range::floor(alloc, &other));
+}
+
+void
+MCeil::computeRange(TempAllocator& alloc)
+{
+ Range other(getOperand(0));
+ setRange(Range::ceil(alloc, &other));
+}
+
+void
+MClz::computeRange(TempAllocator& alloc)
+{
+ if (type() != MIRType::Int32)
+ return;
+ setRange(Range::NewUInt32Range(alloc, 0, 32));
+}
+
+void
+MCtz::computeRange(TempAllocator& alloc)
+{
+ if (type() != MIRType::Int32)
+ return;
+ setRange(Range::NewUInt32Range(alloc, 0, 32));
+}
+
+void
+MPopcnt::computeRange(TempAllocator& alloc)
+{
+ if (type() != MIRType::Int32)
+ return;
+ setRange(Range::NewUInt32Range(alloc, 0, 32));
+}
+
+void
+MMinMax::computeRange(TempAllocator& alloc)
+{
+ if (specialization_ != MIRType::Int32 && specialization_ != MIRType::Double)
+ return;
+
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+ setRange(isMax() ? Range::max(alloc, &left, &right) : Range::min(alloc, &left, &right));
+}
+
+void
+MAdd::computeRange(TempAllocator& alloc)
+{
+ if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
+ return;
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+ Range* next = Range::add(alloc, &left, &right);
+ if (isTruncated())
+ next->wrapAroundToInt32();
+ setRange(next);
+}
+
+void
+MSub::computeRange(TempAllocator& alloc)
+{
+ if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
+ return;
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+ Range* next = Range::sub(alloc, &left, &right);
+ if (isTruncated())
+ next->wrapAroundToInt32();
+ setRange(next);
+}
+
+void
+MMul::computeRange(TempAllocator& alloc)
+{
+ if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
+ return;
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+ if (canBeNegativeZero())
+ canBeNegativeZero_ = Range::negativeZeroMul(&left, &right);
+ Range* next = Range::mul(alloc, &left, &right);
+ if (!next->canBeNegativeZero())
+ canBeNegativeZero_ = false;
+ // Truncated multiplications could overflow in both directions
+ if (isTruncated())
+ next->wrapAroundToInt32();
+ setRange(next);
+}
+
+void
+MMod::computeRange(TempAllocator& alloc)
+{
+ if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
+ return;
+ Range lhs(getOperand(0));
+ Range rhs(getOperand(1));
+
+ // If either operand is a NaN, the result is NaN. This also conservatively
+ // handles Infinity cases.
+ if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds())
+ return;
+
+ // If RHS can be zero, the result can be NaN.
+ if (rhs.lower() <= 0 && rhs.upper() >= 0)
+ return;
+
+ // If both operands are non-negative integers, we can optimize this to an
+ // unsigned mod.
+ if (specialization() == MIRType::Int32 && rhs.lower() > 0) {
+ bool hasDoubles = lhs.lower() < 0 || lhs.canHaveFractionalPart() ||
+ rhs.canHaveFractionalPart();
+ // It is not possible to check that lhs.lower() >= 0, since the range
+ // of a ursh with rhs a 0 constant is wrapped around the int32 range in
+ // Range::Range(). However, IsUint32Type() will only return true for
+ // nodes that lie in the range [0, UINT32_MAX].
+ bool hasUint32s = IsUint32Type(getOperand(0)) &&
+ getOperand(1)->type() == MIRType::Int32 &&
+ (IsUint32Type(getOperand(1)) || getOperand(1)->isConstant());
+ if (!hasDoubles || hasUint32s)
+ unsigned_ = true;
+ }
+
+ // For unsigned mod, we have to convert both operands to unsigned.
+ // Note that we handled the case of a zero rhs above.
+ if (unsigned_) {
+ // The result of an unsigned mod will never be unsigned-greater than
+ // either operand.
+ uint32_t lhsBound = Max<uint32_t>(lhs.lower(), lhs.upper());
+ uint32_t rhsBound = Max<uint32_t>(rhs.lower(), rhs.upper());
+
+ // If either range crosses through -1 as a signed value, it could be
+ // the maximum unsigned value when interpreted as unsigned. If the range
+ // doesn't include -1, then the simple max value we computed above is
+ // correct.
+ if (lhs.lower() <= -1 && lhs.upper() >= -1)
+ lhsBound = UINT32_MAX;
+ if (rhs.lower() <= -1 && rhs.upper() >= -1)
+ rhsBound = UINT32_MAX;
+
+ // The result will never be equal to the rhs, and we shouldn't have
+ // any rounding to worry about.
+ MOZ_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
+ --rhsBound;
+
+ // This gives us two upper bounds, so we can take the best one.
+ setRange(Range::NewUInt32Range(alloc, 0, Min(lhsBound, rhsBound)));
+ return;
+ }
+
+ // Math.abs(lhs % rhs) == Math.abs(lhs) % Math.abs(rhs).
+ // First, the absolute value of the result will always be less than the
+ // absolute value of rhs. (And if rhs is zero, the result is NaN).
+ int64_t a = Abs<int64_t>(rhs.lower());
+ int64_t b = Abs<int64_t>(rhs.upper());
+ if (a == 0 && b == 0)
+ return;
+ int64_t rhsAbsBound = Max(a, b);
+
+ // If the value is known to be integer, less-than abs(rhs) is equivalent
+ // to less-than-or-equal abs(rhs)-1. This is important for being able to
+ // say that the result of x%256 is an 8-bit unsigned number.
+ if (!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart())
+ --rhsAbsBound;
+
+ // Next, the absolute value of the result will never be greater than the
+ // absolute value of lhs.
+ int64_t lhsAbsBound = Max(Abs<int64_t>(lhs.lower()), Abs<int64_t>(lhs.upper()));
+
+ // This gives us two upper bounds, so we can take the best one.
+ int64_t absBound = Min(lhsAbsBound, rhsAbsBound);
+
+ // Now consider the sign of the result.
+ // If lhs is non-negative, the result will be non-negative.
+ // If lhs is non-positive, the result will be non-positive.
+ int64_t lower = lhs.lower() >= 0 ? 0 : -absBound;
+ int64_t upper = lhs.upper() <= 0 ? 0 : absBound;
+
+ Range::FractionalPartFlag newCanHaveFractionalPart =
+ Range::FractionalPartFlag(lhs.canHaveFractionalPart() ||
+ rhs.canHaveFractionalPart());
+
+ // If the lhs can have the sign bit set and we can return a zero, it'll be a
+ // negative zero.
+ Range::NegativeZeroFlag newMayIncludeNegativeZero =
+ Range::NegativeZeroFlag(lhs.canHaveSignBitSet());
+
+ setRange(new(alloc) Range(lower, upper,
+ newCanHaveFractionalPart,
+ newMayIncludeNegativeZero,
+ Min(lhs.exponent(), rhs.exponent())));
+}
+
+void
+MDiv::computeRange(TempAllocator& alloc)
+{
+ if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
+ return;
+ Range lhs(getOperand(0));
+ Range rhs(getOperand(1));
+
+ // If either operand is a NaN, the result is NaN. This also conservatively
+ // handles Infinity cases.
+ if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds())
+ return;
+
+ // Something simple for now: When dividing by a positive rhs, the result
+ // won't be further from zero than lhs.
+ if (lhs.lower() >= 0 && rhs.lower() >= 1) {
+ setRange(new(alloc) Range(0, lhs.upper(),
+ Range::IncludesFractionalParts,
+ Range::IncludesNegativeZero,
+ lhs.exponent()));
+ } else if (unsigned_ && rhs.lower() >= 1) {
+ // We shouldn't set the unsigned flag if the inputs can have
+ // fractional parts.
+ MOZ_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
+ // We shouldn't set the unsigned flag if the inputs can be
+ // negative zero.
+ MOZ_ASSERT(!lhs.canBeNegativeZero() && !rhs.canBeNegativeZero());
+ // Unsigned division by a non-zero rhs will return a uint32 value.
+ setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX));
+ }
+}
+
+void
+MSqrt::computeRange(TempAllocator& alloc)
+{
+ Range input(getOperand(0));
+
+ // If either operand is a NaN, the result is NaN. This also conservatively
+ // handles Infinity cases.
+ if (!input.hasInt32Bounds())
+ return;
+
+ // Sqrt of a negative non-zero value is NaN.
+ if (input.lower() < 0)
+ return;
+
+ // Something simple for now: When taking the sqrt of a positive value, the
+ // result won't be further from zero than the input.
+ // And, sqrt of an integer may have a fractional part.
+ setRange(new(alloc) Range(0, input.upper(),
+ Range::IncludesFractionalParts,
+ input.canBeNegativeZero(),
+ input.exponent()));
+}
+
+void
+MToDouble::computeRange(TempAllocator& alloc)
+{
+ setRange(new(alloc) Range(getOperand(0)));
+}
+
+void
+MToFloat32::computeRange(TempAllocator& alloc)
+{
+}
+
+void
+MTruncateToInt32::computeRange(TempAllocator& alloc)
+{
+ Range* output = new(alloc) Range(getOperand(0));
+ output->wrapAroundToInt32();
+ setRange(output);
+}
+
+void
+MToInt32::computeRange(TempAllocator& alloc)
+{
+ // No clamping since this computes the range *before* bailouts.
+ setRange(new(alloc) Range(getOperand(0)));
+}
+
+void
+MLimitedTruncate::computeRange(TempAllocator& alloc)
+{
+ Range* output = new(alloc) Range(input());
+ setRange(output);
+}
+
+void
+MFilterTypeSet::computeRange(TempAllocator& alloc)
+{
+ setRange(new(alloc) Range(getOperand(0)));
+}
+
+static Range*
+GetTypedArrayRange(TempAllocator& alloc, Scalar::Type type)
+{
+ switch (type) {
+ case Scalar::Uint8Clamped:
+ case Scalar::Uint8:
+ return Range::NewUInt32Range(alloc, 0, UINT8_MAX);
+ case Scalar::Uint16:
+ return Range::NewUInt32Range(alloc, 0, UINT16_MAX);
+ case Scalar::Uint32:
+ return Range::NewUInt32Range(alloc, 0, UINT32_MAX);
+
+ case Scalar::Int8:
+ return Range::NewInt32Range(alloc, INT8_MIN, INT8_MAX);
+ case Scalar::Int16:
+ return Range::NewInt32Range(alloc, INT16_MIN, INT16_MAX);
+ case Scalar::Int32:
+ return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
+
+ case Scalar::Int64:
+ case Scalar::Float32:
+ case Scalar::Float64:
+ case Scalar::Float32x4:
+ case Scalar::Int8x16:
+ case Scalar::Int16x8:
+ case Scalar::Int32x4:
+ case Scalar::MaxTypedArrayViewType:
+ break;
+ }
+ return nullptr;
+}
+
+void
+MLoadUnboxedScalar::computeRange(TempAllocator& alloc)
+{
+ // We have an Int32 type and if this is a UInt32 load it may produce a value
+ // outside of our range, but we have a bailout to handle those cases.
+ setRange(GetTypedArrayRange(alloc, readType()));
+}
+
+void
+MLoadTypedArrayElementStatic::computeRange(TempAllocator& alloc)
+{
+ // We don't currently use MLoadTypedArrayElementStatic for uint32, so we
+ // don't have to worry about it returning a value outside our type.
+ MOZ_ASSERT(someTypedArray_->as<TypedArrayObject>().type() != Scalar::Uint32);
+
+ setRange(GetTypedArrayRange(alloc, someTypedArray_->as<TypedArrayObject>().type()));
+}
+
+void
+MArrayLength::computeRange(TempAllocator& alloc)
+{
+ // Array lengths can go up to UINT32_MAX, but we only create MArrayLength
+ // nodes when the value is known to be int32 (see the
+ // OBJECT_FLAG_LENGTH_OVERFLOW flag).
+ setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
+}
+
+void
+MInitializedLength::computeRange(TempAllocator& alloc)
+{
+ setRange(Range::NewUInt32Range(alloc, 0, NativeObject::MAX_DENSE_ELEMENTS_COUNT));
+}
+
+void
+MTypedArrayLength::computeRange(TempAllocator& alloc)
+{
+ setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
+}
+
+void
+MStringLength::computeRange(TempAllocator& alloc)
+{
+ static_assert(JSString::MAX_LENGTH <= UINT32_MAX,
+ "NewUInt32Range requires a uint32 value");
+ setRange(Range::NewUInt32Range(alloc, 0, JSString::MAX_LENGTH));
+}
+
+void
+MArgumentsLength::computeRange(TempAllocator& alloc)
+{
+ // This is is a conservative upper bound on what |TooManyActualArguments|
+ // checks. If exceeded, Ion will not be entered in the first place.
+ static_assert(ARGS_LENGTH_MAX <= UINT32_MAX,
+ "NewUInt32Range requires a uint32 value");
+ setRange(Range::NewUInt32Range(alloc, 0, ARGS_LENGTH_MAX));
+}
+
+void
+MBoundsCheck::computeRange(TempAllocator& alloc)
+{
+ // Just transfer the incoming index range to the output. The length() is
+ // also interesting, but it is handled as a bailout check, and we're
+ // computing a pre-bailout range here.
+ setRange(new(alloc) Range(index()));
+}
+
+void
+MArrayPush::computeRange(TempAllocator& alloc)
+{
+ // MArrayPush returns the new array length.
+ setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX));
+}
+
+void
+MMathFunction::computeRange(TempAllocator& alloc)
+{
+ Range opRange(getOperand(0));
+ switch (function()) {
+ case Sin:
+ case Cos:
+ if (!opRange.canBeInfiniteOrNaN())
+ setRange(Range::NewDoubleRange(alloc, -1.0, 1.0));
+ break;
+ case Sign:
+ setRange(Range::sign(alloc, &opRange));
+ break;
+ default:
+ break;
+ }
+}
+
+void
+MRandom::computeRange(TempAllocator& alloc)
+{
+ Range* r = Range::NewDoubleRange(alloc, 0.0, 1.0);
+
+ // Random never returns negative zero.
+ r->refineToExcludeNegativeZero();
+
+ setRange(r);
+}
+
+void
+MNaNToZero::computeRange(TempAllocator& alloc)
+{
+ Range other(input());
+ setRange(Range::NaNToZero(alloc, &other));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Range Analysis
+///////////////////////////////////////////////////////////////////////////////
+
+bool
+RangeAnalysis::analyzeLoop(MBasicBlock* header)
+{
+ MOZ_ASSERT(header->hasUniqueBackedge());
+
+ // Try to compute an upper bound on the number of times the loop backedge
+ // will be taken. Look for tests that dominate the backedge and which have
+ // an edge leaving the loop body.
+ MBasicBlock* backedge = header->backedge();
+
+ // Ignore trivial infinite loops.
+ if (backedge == header)
+ return true;
+
+ bool canOsr;
+ size_t numBlocks = MarkLoopBlocks(graph_, header, &canOsr);
+
+ // Ignore broken loops.
+ if (numBlocks == 0)
+ return true;
+
+ LoopIterationBound* iterationBound = nullptr;
+
+ MBasicBlock* block = backedge;
+ do {
+ BranchDirection direction;
+ MTest* branch = block->immediateDominatorBranch(&direction);
+
+ if (block == block->immediateDominator())
+ break;
+
+ block = block->immediateDominator();
+
+ if (branch) {
+ direction = NegateBranchDirection(direction);
+ MBasicBlock* otherBlock = branch->branchSuccessor(direction);
+ if (!otherBlock->isMarked()) {
+ if (!alloc().ensureBallast())
+ return false;
+ iterationBound =
+ analyzeLoopIterationCount(header, branch, direction);
+ if (iterationBound)
+ break;
+ }
+ }
+ } while (block != header);
+
+ if (!iterationBound) {
+ UnmarkLoopBlocks(graph_, header);
+ return true;
+ }
+
+ if (!loopIterationBounds.append(iterationBound))
+ return false;
+
+#ifdef DEBUG
+ if (JitSpewEnabled(JitSpew_Range)) {
+ Sprinter sp(GetJitContext()->cx);
+ if (!sp.init())
+ return false;
+ iterationBound->boundSum.dump(sp);
+ JitSpew(JitSpew_Range, "computed symbolic bound on backedges: %s",
+ sp.string());
+ }
+#endif
+
+ // Try to compute symbolic bounds for the phi nodes at the head of this
+ // loop, expressed in terms of the iteration bound just computed.
+
+ for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++)
+ analyzeLoopPhi(header, iterationBound, *iter);
+
+ if (!mir->compilingWasm()) {
+ // Try to hoist any bounds checks from the loop using symbolic bounds.
+
+ Vector<MBoundsCheck*, 0, JitAllocPolicy> hoistedChecks(alloc());
+
+ for (ReversePostorderIterator iter(graph_.rpoBegin(header)); iter != graph_.rpoEnd(); iter++) {
+ MBasicBlock* block = *iter;
+ if (!block->isMarked())
+ continue;
+
+ for (MDefinitionIterator iter(block); iter; iter++) {
+ MDefinition* def = *iter;
+ if (def->isBoundsCheck() && def->isMovable()) {
+ if (!alloc().ensureBallast())
+ return false;
+ if (tryHoistBoundsCheck(header, def->toBoundsCheck())) {
+ if (!hoistedChecks.append(def->toBoundsCheck()))
+ return false;
+ }
+ }
+ }
+ }
+
+ // Note: replace all uses of the original bounds check with the
+ // actual index. This is usually done during bounds check elimination,
+ // but in this case it's safe to do it here since the load/store is
+ // definitely not loop-invariant, so we will never move it before
+ // one of the bounds checks we just added.
+ for (size_t i = 0; i < hoistedChecks.length(); i++) {
+ MBoundsCheck* ins = hoistedChecks[i];
+ ins->replaceAllUsesWith(ins->index());
+ ins->block()->discard(ins);
+ }
+ }
+
+ UnmarkLoopBlocks(graph_, header);
+ return true;
+}
+
+// Unbox beta nodes in order to hoist instruction properly, and not be limited
+// by the beta nodes which are added after each branch.
+static inline MDefinition*
+DefinitionOrBetaInputDefinition(MDefinition* ins)
+{
+ while (ins->isBeta())
+ ins = ins->toBeta()->input();
+ return ins;
+}
+
+LoopIterationBound*
+RangeAnalysis::analyzeLoopIterationCount(MBasicBlock* header,
+ MTest* test, BranchDirection direction)
+{
+ SimpleLinearSum lhs(nullptr, 0);
+ MDefinition* rhs;
+ bool lessEqual;
+ if (!ExtractLinearInequality(test, direction, &lhs, &rhs, &lessEqual))
+ return nullptr;
+
+ // Ensure the rhs is a loop invariant term.
+ if (rhs && rhs->block()->isMarked()) {
+ if (lhs.term && lhs.term->block()->isMarked())
+ return nullptr;
+ MDefinition* temp = lhs.term;
+ lhs.term = rhs;
+ rhs = temp;
+ if (!SafeSub(0, lhs.constant, &lhs.constant))
+ return nullptr;
+ lessEqual = !lessEqual;
+ }
+
+ MOZ_ASSERT_IF(rhs, !rhs->block()->isMarked());
+
+ // Ensure the lhs is a phi node from the start of the loop body.
+ if (!lhs.term || !lhs.term->isPhi() || lhs.term->block() != header)
+ return nullptr;
+
+ // Check that the value of the lhs changes by a constant amount with each
+ // loop iteration. This requires that the lhs be written in every loop
+ // iteration with a value that is a constant difference from its value at
+ // the start of the iteration.
+
+ if (lhs.term->toPhi()->numOperands() != 2)
+ return nullptr;
+
+ // The first operand of the phi should be the lhs' value at the start of
+ // the first executed iteration, and not a value written which could
+ // replace the second operand below during the middle of execution.
+ MDefinition* lhsInitial = lhs.term->toPhi()->getLoopPredecessorOperand();
+ if (lhsInitial->block()->isMarked())
+ return nullptr;
+
+ // The second operand of the phi should be a value written by an add/sub
+ // in every loop iteration, i.e. in a block which dominates the backedge.
+ MDefinition* lhsWrite =
+ DefinitionOrBetaInputDefinition(lhs.term->toPhi()->getLoopBackedgeOperand());
+ if (!lhsWrite->isAdd() && !lhsWrite->isSub())
+ return nullptr;
+ if (!lhsWrite->block()->isMarked())
+ return nullptr;
+ MBasicBlock* bb = header->backedge();
+ for (; bb != lhsWrite->block() && bb != header; bb = bb->immediateDominator()) {}
+ if (bb != lhsWrite->block())
+ return nullptr;
+
+ SimpleLinearSum lhsModified = ExtractLinearSum(lhsWrite);
+
+ // Check that the value of the lhs at the backedge is of the form
+ // 'old(lhs) + N'. We can be sure that old(lhs) is the value at the start
+ // of the iteration, and not that written to lhs in a previous iteration,
+ // as such a previous value could not appear directly in the addition:
+ // it could not be stored in lhs as the lhs add/sub executes in every
+ // iteration, and if it were stored in another variable its use here would
+ // be as an operand to a phi node for that variable.
+ if (lhsModified.term != lhs.term)
+ return nullptr;
+
+ LinearSum iterationBound(alloc());
+ LinearSum currentIteration(alloc());
+
+ if (lhsModified.constant == 1 && !lessEqual) {
+ // The value of lhs is 'initial(lhs) + iterCount' and this will end
+ // execution of the loop if 'lhs + lhsN >= rhs'. Thus, an upper bound
+ // on the number of backedges executed is:
+ //
+ // initial(lhs) + iterCount + lhsN == rhs
+ // iterCount == rhsN - initial(lhs) - lhsN
+
+ if (rhs) {
+ if (!iterationBound.add(rhs, 1))
+ return nullptr;
+ }
+ if (!iterationBound.add(lhsInitial, -1))
+ return nullptr;
+
+ int32_t lhsConstant;
+ if (!SafeSub(0, lhs.constant, &lhsConstant))
+ return nullptr;
+ if (!iterationBound.add(lhsConstant))
+ return nullptr;
+
+ if (!currentIteration.add(lhs.term, 1))
+ return nullptr;
+ if (!currentIteration.add(lhsInitial, -1))
+ return nullptr;
+ } else if (lhsModified.constant == -1 && lessEqual) {
+ // The value of lhs is 'initial(lhs) - iterCount'. Similar to the above
+ // case, an upper bound on the number of backedges executed is:
+ //
+ // initial(lhs) - iterCount + lhsN == rhs
+ // iterCount == initial(lhs) - rhs + lhsN
+
+ if (!iterationBound.add(lhsInitial, 1))
+ return nullptr;
+ if (rhs) {
+ if (!iterationBound.add(rhs, -1))
+ return nullptr;
+ }
+ if (!iterationBound.add(lhs.constant))
+ return nullptr;
+
+ if (!currentIteration.add(lhsInitial, 1))
+ return nullptr;
+ if (!currentIteration.add(lhs.term, -1))
+ return nullptr;
+ } else {
+ return nullptr;
+ }
+
+ return new(alloc()) LoopIterationBound(header, test, iterationBound, currentIteration);
+}
+
+void
+RangeAnalysis::analyzeLoopPhi(MBasicBlock* header, LoopIterationBound* loopBound, MPhi* phi)
+{
+ // Given a bound on the number of backedges taken, compute an upper and
+ // lower bound for a phi node that may change by a constant amount each
+ // iteration. Unlike for the case when computing the iteration bound
+ // itself, the phi does not need to change the same amount every iteration,
+ // but is required to change at most N and be either nondecreasing or
+ // nonincreasing.
+
+ MOZ_ASSERT(phi->numOperands() == 2);
+
+ MDefinition* initial = phi->getLoopPredecessorOperand();
+ if (initial->block()->isMarked())
+ return;
+
+ SimpleLinearSum modified = ExtractLinearSum(phi->getLoopBackedgeOperand());
+
+ if (modified.term != phi || modified.constant == 0)
+ return;
+
+ if (!phi->range())
+ phi->setRange(new(alloc()) Range(phi));
+
+ LinearSum initialSum(alloc());
+ if (!initialSum.add(initial, 1))
+ return;
+
+ // The phi may change by N each iteration, and is either nondecreasing or
+ // nonincreasing. initial(phi) is either a lower or upper bound for the
+ // phi, and initial(phi) + loopBound * N is either an upper or lower bound,
+ // at all points within the loop, provided that loopBound >= 0.
+ //
+ // We are more interested, however, in the bound for phi at points
+ // dominated by the loop bound's test; if the test dominates e.g. a bounds
+ // check we want to hoist from the loop, using the value of the phi at the
+ // head of the loop for this will usually be too imprecise to hoist the
+ // check. These points will execute only if the backedge executes at least
+ // one more time (as the test passed and the test dominates the backedge),
+ // so we know both that loopBound >= 1 and that the phi's value has changed
+ // at most loopBound - 1 times. Thus, another upper or lower bound for the
+ // phi is initial(phi) + (loopBound - 1) * N, without requiring us to
+ // ensure that loopBound >= 0.
+
+ LinearSum limitSum(loopBound->boundSum);
+ if (!limitSum.multiply(modified.constant) || !limitSum.add(initialSum))
+ return;
+
+ int32_t negativeConstant;
+ if (!SafeSub(0, modified.constant, &negativeConstant) || !limitSum.add(negativeConstant))
+ return;
+
+ Range* initRange = initial->range();
+ if (modified.constant > 0) {
+ if (initRange && initRange->hasInt32LowerBound())
+ phi->range()->refineLower(initRange->lower());
+ phi->range()->setSymbolicLower(SymbolicBound::New(alloc(), nullptr, initialSum));
+ phi->range()->setSymbolicUpper(SymbolicBound::New(alloc(), loopBound, limitSum));
+ } else {
+ if (initRange && initRange->hasInt32UpperBound())
+ phi->range()->refineUpper(initRange->upper());
+ phi->range()->setSymbolicUpper(SymbolicBound::New(alloc(), nullptr, initialSum));
+ phi->range()->setSymbolicLower(SymbolicBound::New(alloc(), loopBound, limitSum));
+ }
+
+ JitSpew(JitSpew_Range, "added symbolic range on %d", phi->id());
+ SpewRange(phi);
+}
+
+// Whether bound is valid at the specified bounds check instruction in a loop,
+// and may be used to hoist ins.
+static inline bool
+SymbolicBoundIsValid(MBasicBlock* header, MBoundsCheck* ins, const SymbolicBound* bound)
+{
+ if (!bound->loop)
+ return true;
+ if (ins->block() == header)
+ return false;
+ MBasicBlock* bb = ins->block()->immediateDominator();
+ while (bb != header && bb != bound->loop->test->block())
+ bb = bb->immediateDominator();
+ return bb == bound->loop->test->block();
+}
+
+bool
+RangeAnalysis::tryHoistBoundsCheck(MBasicBlock* header, MBoundsCheck* ins)
+{
+ // The bounds check's length must be loop invariant.
+ MDefinition *length = DefinitionOrBetaInputDefinition(ins->length());
+ if (length->block()->isMarked())
+ return false;
+
+ // The bounds check's index should not be loop invariant (else we would
+ // already have hoisted it during LICM).
+ SimpleLinearSum index = ExtractLinearSum(ins->index());
+ if (!index.term || !index.term->block()->isMarked())
+ return false;
+
+ // Check for a symbolic lower and upper bound on the index. If either
+ // condition depends on an iteration bound for the loop, only hoist if
+ // the bounds check is dominated by the iteration bound's test.
+ if (!index.term->range())
+ return false;
+ const SymbolicBound* lower = index.term->range()->symbolicLower();
+ if (!lower || !SymbolicBoundIsValid(header, ins, lower))
+ return false;
+ const SymbolicBound* upper = index.term->range()->symbolicUpper();
+ if (!upper || !SymbolicBoundIsValid(header, ins, upper))
+ return false;
+
+ MBasicBlock* preLoop = header->loopPredecessor();
+ MOZ_ASSERT(!preLoop->isMarked());
+
+ MDefinition* lowerTerm = ConvertLinearSum(alloc(), preLoop, lower->sum);
+ if (!lowerTerm)
+ return false;
+
+ MDefinition* upperTerm = ConvertLinearSum(alloc(), preLoop, upper->sum);
+ if (!upperTerm)
+ return false;
+
+ // We are checking that index + indexConstant >= 0, and know that
+ // index >= lowerTerm + lowerConstant. Thus, check that:
+ //
+ // lowerTerm + lowerConstant + indexConstant >= 0
+ // lowerTerm >= -lowerConstant - indexConstant
+
+ int32_t lowerConstant = 0;
+ if (!SafeSub(lowerConstant, index.constant, &lowerConstant))
+ return false;
+ if (!SafeSub(lowerConstant, lower->sum.constant(), &lowerConstant))
+ return false;
+
+ // We are checking that index < boundsLength, and know that
+ // index <= upperTerm + upperConstant. Thus, check that:
+ //
+ // upperTerm + upperConstant < boundsLength
+
+ int32_t upperConstant = index.constant;
+ if (!SafeAdd(upper->sum.constant(), upperConstant, &upperConstant))
+ return false;
+
+ // Hoist the loop invariant lower bounds checks.
+ MBoundsCheckLower* lowerCheck = MBoundsCheckLower::New(alloc(), lowerTerm);
+ lowerCheck->setMinimum(lowerConstant);
+ lowerCheck->computeRange(alloc());
+ lowerCheck->collectRangeInfoPreTrunc();
+ preLoop->insertBefore(preLoop->lastIns(), lowerCheck);
+
+ // Hoist the loop invariant upper bounds checks.
+ if (upperTerm != length || upperConstant >= 0) {
+ MBoundsCheck* upperCheck = MBoundsCheck::New(alloc(), upperTerm, length);
+ upperCheck->setMinimum(upperConstant);
+ upperCheck->setMaximum(upperConstant);
+ upperCheck->computeRange(alloc());
+ upperCheck->collectRangeInfoPreTrunc();
+ preLoop->insertBefore(preLoop->lastIns(), upperCheck);
+ }
+
+ return true;
+}
+
+bool
+RangeAnalysis::analyze()
+{
+ JitSpew(JitSpew_Range, "Doing range propagation");
+
+ for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) {
+ MBasicBlock* block = *iter;
+ // No blocks are supposed to be unreachable, except when we have an OSR
+ // block, in which case the Value Numbering phase add fixup blocks which
+ // are unreachable.
+ MOZ_ASSERT(!block->unreachable() || graph_.osrBlock());
+
+ // If the block's immediate dominator is unreachable, the block is
+ // unreachable. Iterating in RPO, we'll always see the immediate
+ // dominator before the block.
+ if (block->immediateDominator()->unreachable()) {
+ block->setUnreachableUnchecked();
+ continue;
+ }
+
+ for (MDefinitionIterator iter(block); iter; iter++) {
+ MDefinition* def = *iter;
+ if (!alloc().ensureBallast())
+ return false;
+
+ def->computeRange(alloc());
+ JitSpew(JitSpew_Range, "computing range on %d", def->id());
+ SpewRange(def);
+ }
+
+ // Beta node range analysis may have marked this block unreachable. If
+ // so, it's no longer interesting to continue processing it.
+ if (block->unreachable())
+ continue;
+
+ if (block->isLoopHeader()) {
+ if (!analyzeLoop(block))
+ return false;
+ }
+
+ // First pass at collecting range info - while the beta nodes are still
+ // around and before truncation.
+ for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++)
+ iter->collectRangeInfoPreTrunc();
+ }
+
+ return true;
+}
+
+bool
+RangeAnalysis::addRangeAssertions()
+{
+ if (!JitOptions.checkRangeAnalysis)
+ return true;
+
+ // Check the computed range for this instruction, if the option is set. Note
+ // that this code is quite invasive; it adds numerous additional
+ // instructions for each MInstruction with a computed range, and it uses
+ // registers, so it also affects register allocation.
+ for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) {
+ MBasicBlock* block = *iter;
+
+ // Do not add assertions in unreachable blocks.
+ if (block->unreachable())
+ continue;
+
+ for (MDefinitionIterator iter(block); iter; iter++) {
+ MDefinition* ins = *iter;
+
+ // Perform range checking for all numeric and numeric-like types.
+ if (!IsNumberType(ins->type()) &&
+ ins->type() != MIRType::Boolean &&
+ ins->type() != MIRType::Value)
+ {
+ continue;
+ }
+
+ // MIsNoIter is fused with the MTest that follows it and emitted as
+ // LIsNoIterAndBranch. Skip it to avoid complicating MIsNoIter
+ // lowering.
+ if (ins->isIsNoIter())
+ continue;
+
+ Range r(ins);
+
+ MOZ_ASSERT_IF(ins->type() == MIRType::Int64, r.isUnknown());
+
+ // Don't insert assertions if there's nothing interesting to assert.
+ if (r.isUnknown() || (ins->type() == MIRType::Int32 && r.isUnknownInt32()))
+ continue;
+
+ // Don't add a use to an instruction that is recovered on bailout.
+ if (ins->isRecoveredOnBailout())
+ continue;
+
+ if (!alloc().ensureBallast())
+ return false;
+ MAssertRange* guard = MAssertRange::New(alloc(), ins, new(alloc()) Range(r));
+
+ // Beta nodes and interrupt checks are required to be located at the
+ // beginnings of basic blocks, so we must insert range assertions
+ // after any such instructions.
+ MInstruction* insertAt = nullptr;
+ if (block->graph().osrBlock() == block)
+ insertAt = ins->toInstruction();
+ else
+ insertAt = block->safeInsertTop(ins);
+
+ if (insertAt == *iter)
+ block->insertAfter(insertAt, guard);
+ else
+ block->insertBefore(insertAt, guard);
+ }
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Range based Truncation
+///////////////////////////////////////////////////////////////////////////////
+
+void
+Range::clampToInt32()
+{
+ if (isInt32())
+ return;
+ int32_t l = hasInt32LowerBound() ? lower() : JSVAL_INT_MIN;
+ int32_t h = hasInt32UpperBound() ? upper() : JSVAL_INT_MAX;
+ setInt32(l, h);
+}
+
+void
+Range::wrapAroundToInt32()
+{
+ if (!hasInt32Bounds()) {
+ setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
+ } else if (canHaveFractionalPart()) {
+ // Clearing the fractional field may provide an opportunity to refine
+ // lower_ or upper_.
+ canHaveFractionalPart_ = ExcludesFractionalParts;
+ canBeNegativeZero_ = ExcludesNegativeZero;
+ refineInt32BoundsByExponent(max_exponent_,
+ &lower_, &hasInt32LowerBound_,
+ &upper_, &hasInt32UpperBound_);
+
+ assertInvariants();
+ } else {
+ // If nothing else, we can clear the negative zero flag.
+ canBeNegativeZero_ = ExcludesNegativeZero;
+ }
+ MOZ_ASSERT(isInt32());
+}
+
+void
+Range::wrapAroundToShiftCount()
+{
+ wrapAroundToInt32();
+ if (lower() < 0 || upper() >= 32)
+ setInt32(0, 31);
+}
+
+void
+Range::wrapAroundToBoolean()
+{
+ wrapAroundToInt32();
+ if (!isBoolean())
+ setInt32(0, 1);
+ MOZ_ASSERT(isBoolean());
+}
+
+bool
+MDefinition::needTruncation(TruncateKind kind)
+{
+ // No procedure defined for truncating this instruction.
+ return false;
+}
+
+void
+MDefinition::truncate()
+{
+ MOZ_CRASH("No procedure defined for truncating this instruction.");
+}
+
+bool
+MConstant::needTruncation(TruncateKind kind)
+{
+ return IsFloatingPointType(type());
+}
+
+void
+MConstant::truncate()
+{
+ MOZ_ASSERT(needTruncation(Truncate));
+
+ // Truncate the double to int, since all uses truncates it.
+ int32_t res = ToInt32(numberToDouble());
+ payload_.asBits = 0;
+ payload_.i32 = res;
+ setResultType(MIRType::Int32);
+ if (range())
+ range()->setInt32(res, res);
+}
+
+bool
+MPhi::needTruncation(TruncateKind kind)
+{
+ if (type() == MIRType::Double || type() == MIRType::Int32) {
+ truncateKind_ = kind;
+ return true;
+ }
+
+ return false;
+}
+
+void
+MPhi::truncate()
+{
+ setResultType(MIRType::Int32);
+ if (truncateKind_ >= IndirectTruncate && range())
+ range()->wrapAroundToInt32();
+}
+
+bool
+MAdd::needTruncation(TruncateKind kind)
+{
+ // Remember analysis, needed for fallible checks.
+ setTruncateKind(kind);
+
+ return type() == MIRType::Double || type() == MIRType::Int32;
+}
+
+void
+MAdd::truncate()
+{
+ MOZ_ASSERT(needTruncation(truncateKind()));
+ specialization_ = MIRType::Int32;
+ setResultType(MIRType::Int32);
+ if (truncateKind() >= IndirectTruncate && range())
+ range()->wrapAroundToInt32();
+}
+
+bool
+MSub::needTruncation(TruncateKind kind)
+{
+ // Remember analysis, needed for fallible checks.
+ setTruncateKind(kind);
+
+ return type() == MIRType::Double || type() == MIRType::Int32;
+}
+
+void
+MSub::truncate()
+{
+ MOZ_ASSERT(needTruncation(truncateKind()));
+ specialization_ = MIRType::Int32;
+ setResultType(MIRType::Int32);
+ if (truncateKind() >= IndirectTruncate && range())
+ range()->wrapAroundToInt32();
+}
+
+bool
+MMul::needTruncation(TruncateKind kind)
+{
+ // Remember analysis, needed for fallible checks.
+ setTruncateKind(kind);
+
+ return type() == MIRType::Double || type() == MIRType::Int32;
+}
+
+void
+MMul::truncate()
+{
+ MOZ_ASSERT(needTruncation(truncateKind()));
+ specialization_ = MIRType::Int32;
+ setResultType(MIRType::Int32);
+ if (truncateKind() >= IndirectTruncate) {
+ setCanBeNegativeZero(false);
+ if (range())
+ range()->wrapAroundToInt32();
+ }
+}
+
+bool
+MDiv::needTruncation(TruncateKind kind)
+{
+ // Remember analysis, needed for fallible checks.
+ setTruncateKind(kind);
+
+ return type() == MIRType::Double || type() == MIRType::Int32;
+}
+
+void
+MDiv::truncate()
+{
+ MOZ_ASSERT(needTruncation(truncateKind()));
+ specialization_ = MIRType::Int32;
+ setResultType(MIRType::Int32);
+
+ // Divisions where the lhs and rhs are unsigned and the result is
+ // truncated can be lowered more efficiently.
+ if (unsignedOperands()) {
+ replaceWithUnsignedOperands();
+ unsigned_ = true;
+ }
+}
+
+bool
+MMod::needTruncation(TruncateKind kind)
+{
+ // Remember analysis, needed for fallible checks.
+ setTruncateKind(kind);
+
+ return type() == MIRType::Double || type() == MIRType::Int32;
+}
+
+void
+MMod::truncate()
+{
+ // As for division, handle unsigned modulus with a truncated result.
+ MOZ_ASSERT(needTruncation(truncateKind()));
+ specialization_ = MIRType::Int32;
+ setResultType(MIRType::Int32);
+
+ if (unsignedOperands()) {
+ replaceWithUnsignedOperands();
+ unsigned_ = true;
+ }
+}
+
+bool
+MToDouble::needTruncation(TruncateKind kind)
+{
+ MOZ_ASSERT(type() == MIRType::Double);
+ setTruncateKind(kind);
+
+ return true;
+}
+
+void
+MToDouble::truncate()
+{
+ MOZ_ASSERT(needTruncation(truncateKind()));
+
+ // We use the return type to flag that this MToDouble should be replaced by
+ // a MTruncateToInt32 when modifying the graph.
+ setResultType(MIRType::Int32);
+ if (truncateKind() >= IndirectTruncate) {
+ if (range())
+ range()->wrapAroundToInt32();
+ }
+}
+
+bool
+MLoadTypedArrayElementStatic::needTruncation(TruncateKind kind)
+{
+ // IndirectTruncate not possible, since it returns 'undefined'
+ // upon out of bounds read. Doing arithmetic on 'undefined' gives wrong
+ // results. So only set infallible if explicitly truncated.
+ if (kind == Truncate)
+ setInfallible();
+
+ return false;
+}
+
+bool
+MLimitedTruncate::needTruncation(TruncateKind kind)
+{
+ setTruncateKind(kind);
+ setResultType(MIRType::Int32);
+ if (kind >= IndirectTruncate && range())
+ range()->wrapAroundToInt32();
+ return false;
+}
+
+bool
+MCompare::needTruncation(TruncateKind kind)
+{
+ // If we're compiling wasm, don't try to optimize the comparison type, as
+ // the code presumably is already using the type it wants. Also, wasm
+ // doesn't support bailouts, so we woudn't be able to rely on
+ // TruncateAfterBailouts to convert our inputs.
+ if (block()->info().compilingWasm())
+ return false;
+
+ if (!isDoubleComparison())
+ return false;
+
+ // If both operands are naturally in the int32 range, we can convert from
+ // a double comparison to being an int32 comparison.
+ if (!Range(lhs()).isInt32() || !Range(rhs()).isInt32())
+ return false;
+
+ return true;
+}
+
+void
+MCompare::truncate()
+{
+ compareType_ = Compare_Int32;
+
+ // Truncating the operands won't change their value because we don't force a
+ // truncation, but it will change their type, which we need because we
+ // now expect integer inputs.
+ truncateOperands_ = true;
+}
+
+MDefinition::TruncateKind
+MDefinition::operandTruncateKind(size_t index) const
+{
+ // Generic routine: We don't know anything.
+ return NoTruncate;
+}
+
+MDefinition::TruncateKind
+MPhi::operandTruncateKind(size_t index) const
+{
+ // The truncation applied to a phi is effectively applied to the phi's
+ // operands.
+ return truncateKind_;
+}
+
+MDefinition::TruncateKind
+MTruncateToInt32::operandTruncateKind(size_t index) const
+{
+ // This operator is an explicit truncate to int32.
+ return Truncate;
+}
+
+MDefinition::TruncateKind
+MBinaryBitwiseInstruction::operandTruncateKind(size_t index) const
+{
+ // The bitwise operators truncate to int32.
+ return Truncate;
+}
+
+MDefinition::TruncateKind
+MLimitedTruncate::operandTruncateKind(size_t index) const
+{
+ return Min(truncateKind(), truncateLimit_);
+}
+
+MDefinition::TruncateKind
+MAdd::operandTruncateKind(size_t index) const
+{
+ // This operator is doing some arithmetic. If its result is truncated,
+ // it's an indirect truncate for its operands.
+ return Min(truncateKind(), IndirectTruncate);
+}
+
+MDefinition::TruncateKind
+MSub::operandTruncateKind(size_t index) const
+{
+ // See the comment in MAdd::operandTruncateKind.
+ return Min(truncateKind(), IndirectTruncate);
+}
+
+MDefinition::TruncateKind
+MMul::operandTruncateKind(size_t index) const
+{
+ // See the comment in MAdd::operandTruncateKind.
+ return Min(truncateKind(), IndirectTruncate);
+}
+
+MDefinition::TruncateKind
+MToDouble::operandTruncateKind(size_t index) const
+{
+ // MToDouble propagates its truncate kind to its operand.
+ return truncateKind();
+}
+
+MDefinition::TruncateKind
+MStoreUnboxedScalar::operandTruncateKind(size_t index) const
+{
+ // Some receiver objects, such as typed arrays, will truncate out of range integer inputs.
+ return (truncateInput() && index == 2 && isIntegerWrite()) ? Truncate : NoTruncate;
+}
+
+MDefinition::TruncateKind
+MStoreTypedArrayElementHole::operandTruncateKind(size_t index) const
+{
+ // An integer store truncates the stored value.
+ return index == 3 && isIntegerWrite() ? Truncate : NoTruncate;
+}
+
+MDefinition::TruncateKind
+MStoreTypedArrayElementStatic::operandTruncateKind(size_t index) const
+{
+ // An integer store truncates the stored value.
+ return index == 1 && isIntegerWrite() ? Truncate : NoTruncate;
+}
+
+MDefinition::TruncateKind
+MDiv::operandTruncateKind(size_t index) const
+{
+ return Min(truncateKind(), TruncateAfterBailouts);
+}
+
+MDefinition::TruncateKind
+MMod::operandTruncateKind(size_t index) const
+{
+ return Min(truncateKind(), TruncateAfterBailouts);
+}
+
+MDefinition::TruncateKind
+MCompare::operandTruncateKind(size_t index) const
+{
+ // If we're doing an int32 comparison on operands which were previously
+ // floating-point, convert them!
+ MOZ_ASSERT_IF(truncateOperands_, isInt32Comparison());
+ return truncateOperands_ ? TruncateAfterBailouts : NoTruncate;
+}
+
+static bool
+TruncateTest(TempAllocator& alloc, MTest* test)
+{
+ // If all possible inputs to the test are either int32 or boolean,
+ // convert those inputs to int32 so that an int32 test can be performed.
+
+ if (test->input()->type() != MIRType::Value)
+ return true;
+
+ if (!test->input()->isPhi() || !test->input()->hasOneDefUse() || test->input()->isImplicitlyUsed())
+ return true;
+
+ MPhi* phi = test->input()->toPhi();
+ for (size_t i = 0; i < phi->numOperands(); i++) {
+ MDefinition* def = phi->getOperand(i);
+ if (!def->isBox())
+ return true;
+ MDefinition* inner = def->getOperand(0);
+ if (inner->type() != MIRType::Boolean && inner->type() != MIRType::Int32)
+ return true;
+ }
+
+ for (size_t i = 0; i < phi->numOperands(); i++) {
+ MDefinition* inner = phi->getOperand(i)->getOperand(0);
+ if (inner->type() != MIRType::Int32) {
+ if (!alloc.ensureBallast())
+ return false;
+ MBasicBlock* block = inner->block();
+ inner = MToInt32::New(alloc, inner);
+ block->insertBefore(block->lastIns(), inner->toInstruction());
+ }
+ MOZ_ASSERT(inner->type() == MIRType::Int32);
+ phi->replaceOperand(i, inner);
+ }
+
+ phi->setResultType(MIRType::Int32);
+ return true;
+}
+
+// Truncating instruction result is an optimization which implies
+// knowing all uses of an instruction. This implies that if one of
+// the uses got removed, then Range Analysis is not be allowed to do
+// any modification which can change the result, especially if the
+// result can be observed.
+//
+// This corner can easily be understood with UCE examples, but it
+// might also happen with type inference assumptions. Note: Type
+// inference is implicitly branches where other types might be
+// flowing into.
+static bool
+CloneForDeadBranches(TempAllocator& alloc, MInstruction* candidate)
+{
+ // Compare returns a boolean so it doesn't have to be recovered on bailout
+ // because the output would remain correct.
+ if (candidate->isCompare())
+ return true;
+
+ MOZ_ASSERT(candidate->canClone());
+ if (!alloc.ensureBallast())
+ return false;
+
+ MDefinitionVector operands(alloc);
+ size_t end = candidate->numOperands();
+ if (!operands.reserve(end))
+ return false;
+ for (size_t i = 0; i < end; ++i)
+ operands.infallibleAppend(candidate->getOperand(i));
+
+ MInstruction* clone = candidate->clone(alloc, operands);
+ clone->setRange(nullptr);
+
+ // Set UseRemoved flag on the cloned instruction in order to chain recover
+ // instruction for the bailout path.
+ clone->setUseRemovedUnchecked();
+
+ candidate->block()->insertBefore(candidate, clone);
+
+ if (!candidate->maybeConstantValue()) {
+ MOZ_ASSERT(clone->canRecoverOnBailout());
+ clone->setRecoveredOnBailout();
+ }
+
+ // Replace the candidate by its recovered on bailout clone within recovered
+ // instructions and resume points operands.
+ for (MUseIterator i(candidate->usesBegin()); i != candidate->usesEnd(); ) {
+ MUse* use = *i++;
+ MNode* ins = use->consumer();
+ if (ins->isDefinition() && !ins->toDefinition()->isRecoveredOnBailout())
+ continue;
+
+ use->replaceProducer(clone);
+ }
+
+ return true;
+}
+
+// Examine all the users of |candidate| and determine the most aggressive
+// truncate kind that satisfies all of them.
+static MDefinition::TruncateKind
+ComputeRequestedTruncateKind(MDefinition* candidate, bool* shouldClone)
+{
+ bool isCapturedResult = false; // Check if used by a recovered instruction or a resume point.
+ bool isObservableResult = false; // Check if it can be read from another frame.
+ bool isRecoverableResult = true; // Check if it can safely be reconstructed.
+ bool hasUseRemoved = candidate->isUseRemoved();
+
+ MDefinition::TruncateKind kind = MDefinition::Truncate;
+ for (MUseIterator use(candidate->usesBegin()); use != candidate->usesEnd(); use++) {
+ if (use->consumer()->isResumePoint()) {
+ // Truncation is a destructive optimization, as such, we need to pay
+ // attention to removed branches and prevent optimization
+ // destructive optimizations if we have no alternative. (see
+ // UseRemoved flag)
+ isCapturedResult = true;
+ isObservableResult = isObservableResult ||
+ use->consumer()->toResumePoint()->isObservableOperand(*use);
+ isRecoverableResult = isRecoverableResult &&
+ use->consumer()->toResumePoint()->isRecoverableOperand(*use);
+ continue;
+ }
+
+ MDefinition* consumer = use->consumer()->toDefinition();
+ if (consumer->isRecoveredOnBailout()) {
+ isCapturedResult = true;
+ hasUseRemoved = hasUseRemoved || consumer->isUseRemoved();
+ continue;
+ }
+
+ MDefinition::TruncateKind consumerKind = consumer->operandTruncateKind(consumer->indexOf(*use));
+ kind = Min(kind, consumerKind);
+ if (kind == MDefinition::NoTruncate)
+ break;
+ }
+
+ // We cannot do full trunction on guarded instructions.
+ if (candidate->isGuard() || candidate->isGuardRangeBailouts())
+ kind = Min(kind, MDefinition::TruncateAfterBailouts);
+
+ // If the value naturally produces an int32 value (before bailout checks)
+ // that needs no conversion, we don't have to worry about resume points
+ // seeing truncated values.
+ bool needsConversion = !candidate->range() || !candidate->range()->isInt32();
+
+ // If the instruction is explicitly truncated (not indirectly) by all its
+ // uses and if it has no removed uses, then we can safely encode its
+ // truncated result as part of the resume point operands. This is safe,
+ // because even if we resume with a truncated double, the next baseline
+ // instruction operating on this instruction is going to be a no-op.
+ //
+ // Note, that if the result can be observed from another frame, then this
+ // optimization is not safe.
+ bool safeToConvert = kind == MDefinition::Truncate && !hasUseRemoved && !isObservableResult;
+
+ // If the candidate instruction appears as operand of a resume point or a
+ // recover instruction, and we have to truncate its result, then we might
+ // have to either recover the result during the bailout, or avoid the
+ // truncation.
+ if (isCapturedResult && needsConversion && !safeToConvert) {
+
+ // If the result can be recovered from all the resume points (not needed
+ // for iterating over the inlined frames), and this instruction can be
+ // recovered on bailout, then we can clone it and use the cloned
+ // instruction to encode the recover instruction. Otherwise, we should
+ // keep the original result and bailout if the value is not in the int32
+ // range.
+ if (!JitOptions.disableRecoverIns && isRecoverableResult && candidate->canRecoverOnBailout())
+ *shouldClone = true;
+ else
+ kind = Min(kind, MDefinition::TruncateAfterBailouts);
+ }
+
+ return kind;
+}
+
+static MDefinition::TruncateKind
+ComputeTruncateKind(MDefinition* candidate, bool* shouldClone)
+{
+ // Compare operations might coerce its inputs to int32 if the ranges are
+ // correct. So we do not need to check if all uses are coerced.
+ if (candidate->isCompare())
+ return MDefinition::TruncateAfterBailouts;
+
+ // Set truncated flag if range analysis ensure that it has no
+ // rounding errors and no fractional part. Note that we can't use
+ // the MDefinition Range constructor, because we need to know if
+ // the value will have rounding errors before any bailout checks.
+ const Range* r = candidate->range();
+ bool canHaveRoundingErrors = !r || r->canHaveRoundingErrors();
+
+ // Special case integer division and modulo: a/b can be infinite, and a%b
+ // can be NaN but cannot actually have rounding errors induced by truncation.
+ if ((candidate->isDiv() || candidate->isMod()) &&
+ static_cast<const MBinaryArithInstruction *>(candidate)->specialization() == MIRType::Int32)
+ {
+ canHaveRoundingErrors = false;
+ }
+
+ if (canHaveRoundingErrors)
+ return MDefinition::NoTruncate;
+
+ // Ensure all observable uses are truncated.
+ return ComputeRequestedTruncateKind(candidate, shouldClone);
+}
+
+static void
+RemoveTruncatesOnOutput(MDefinition* truncated)
+{
+ // Compare returns a boolean so it doen't have any output truncates.
+ if (truncated->isCompare())
+ return;
+
+ MOZ_ASSERT(truncated->type() == MIRType::Int32);
+ MOZ_ASSERT(Range(truncated).isInt32());
+
+ for (MUseDefIterator use(truncated); use; use++) {
+ MDefinition* def = use.def();
+ if (!def->isTruncateToInt32() || !def->isToInt32())
+ continue;
+
+ def->replaceAllUsesWith(truncated);
+ }
+}
+
+static void
+AdjustTruncatedInputs(TempAllocator& alloc, MDefinition* truncated)
+{
+ MBasicBlock* block = truncated->block();
+ for (size_t i = 0, e = truncated->numOperands(); i < e; i++) {
+ MDefinition::TruncateKind kind = truncated->operandTruncateKind(i);
+ if (kind == MDefinition::NoTruncate)
+ continue;
+
+ MDefinition* input = truncated->getOperand(i);
+ if (input->type() == MIRType::Int32)
+ continue;
+
+ if (input->isToDouble() && input->getOperand(0)->type() == MIRType::Int32) {
+ truncated->replaceOperand(i, input->getOperand(0));
+ } else {
+ MInstruction* op;
+ if (kind == MDefinition::TruncateAfterBailouts)
+ op = MToInt32::New(alloc, truncated->getOperand(i));
+ else
+ op = MTruncateToInt32::New(alloc, truncated->getOperand(i));
+
+ if (truncated->isPhi()) {
+ MBasicBlock* pred = block->getPredecessor(i);
+ pred->insertBefore(pred->lastIns(), op);
+ } else {
+ block->insertBefore(truncated->toInstruction(), op);
+ }
+ truncated->replaceOperand(i, op);
+ }
+ }
+
+ if (truncated->isToDouble()) {
+ truncated->replaceAllUsesWith(truncated->toToDouble()->getOperand(0));
+ block->discard(truncated->toToDouble());
+ }
+}
+
+// Iterate backward on all instruction and attempt to truncate operations for
+// each instruction which respect the following list of predicates: Has been
+// analyzed by range analysis, the range has no rounding errors, all uses cases
+// are truncating the result.
+//
+// If the truncation of the operation is successful, then the instruction is
+// queue for later updating the graph to restore the type correctness by
+// converting the operands that need to be truncated.
+//
+// We iterate backward because it is likely that a truncated operation truncates
+// some of its operands.
+bool
+RangeAnalysis::truncate()
+{
+ JitSpew(JitSpew_Range, "Do range-base truncation (backward loop)");
+
+ // Automatic truncation is disabled for wasm because the truncation logic
+ // is based on IonMonkey which assumes that we can bailout if the truncation
+ // logic fails. As wasm code has no bailout mechanism, it is safer to avoid
+ // any automatic truncations.
+ MOZ_ASSERT(!mir->compilingWasm());
+
+ Vector<MDefinition*, 16, SystemAllocPolicy> worklist;
+
+ for (PostorderIterator block(graph_.poBegin()); block != graph_.poEnd(); block++) {
+ for (MInstructionReverseIterator iter(block->rbegin()); iter != block->rend(); iter++) {
+ if (iter->isRecoveredOnBailout())
+ continue;
+
+ if (iter->type() == MIRType::None) {
+ if (iter->isTest()) {
+ if (!TruncateTest(alloc(), iter->toTest()))
+ return false;
+ }
+ continue;
+ }
+
+ // Remember all bitop instructions for folding after range analysis.
+ switch (iter->op()) {
+ case MDefinition::Op_BitAnd:
+ case MDefinition::Op_BitOr:
+ case MDefinition::Op_BitXor:
+ case MDefinition::Op_Lsh:
+ case MDefinition::Op_Rsh:
+ case MDefinition::Op_Ursh:
+ if (!bitops.append(static_cast<MBinaryBitwiseInstruction*>(*iter)))
+ return false;
+ break;
+ default:;
+ }
+
+ bool shouldClone = false;
+ MDefinition::TruncateKind kind = ComputeTruncateKind(*iter, &shouldClone);
+ if (kind == MDefinition::NoTruncate)
+ continue;
+
+ // Range Analysis is sometimes eager to do optimizations, even if we
+ // are not be able to truncate an instruction. In such case, we
+ // speculatively compile the instruction to an int32 instruction
+ // while adding a guard. This is what is implied by
+ // TruncateAfterBailout.
+ //
+ // If we already experienced an overflow bailout while executing
+ // code within the current JSScript, we no longer attempt to make
+ // this kind of eager optimizations.
+ if (kind <= MDefinition::TruncateAfterBailouts && block->info().hadOverflowBailout())
+ continue;
+
+ // Truncate this instruction if possible.
+ if (!iter->needTruncation(kind))
+ continue;
+
+ SpewTruncate(*iter, kind, shouldClone);
+
+ // If needed, clone the current instruction for keeping it for the
+ // bailout path. This give us the ability to truncate instructions
+ // even after the removal of branches.
+ if (shouldClone && !CloneForDeadBranches(alloc(), *iter))
+ return false;
+
+ iter->truncate();
+
+ // Delay updates of inputs/outputs to avoid creating node which
+ // would be removed by the truncation of the next operations.
+ iter->setInWorklist();
+ if (!worklist.append(*iter))
+ return false;
+ }
+ for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ++iter) {
+ bool shouldClone = false;
+ MDefinition::TruncateKind kind = ComputeTruncateKind(*iter, &shouldClone);
+ if (kind == MDefinition::NoTruncate)
+ continue;
+
+ // Truncate this phi if possible.
+ if (shouldClone || !iter->needTruncation(kind))
+ continue;
+
+ SpewTruncate(*iter, kind, shouldClone);
+
+ iter->truncate();
+
+ // Delay updates of inputs/outputs to avoid creating node which
+ // would be removed by the truncation of the next operations.
+ iter->setInWorklist();
+ if (!worklist.append(*iter))
+ return false;
+ }
+ }
+
+ // Update inputs/outputs of truncated instructions.
+ JitSpew(JitSpew_Range, "Do graph type fixup (dequeue)");
+ while (!worklist.empty()) {
+ if (!alloc().ensureBallast())
+ return false;
+ MDefinition* def = worklist.popCopy();
+ def->setNotInWorklist();
+ RemoveTruncatesOnOutput(def);
+ AdjustTruncatedInputs(alloc(), def);
+ }
+
+ return true;
+}
+
+bool
+RangeAnalysis::removeUnnecessaryBitops()
+{
+ // Note: This operation change the semantic of the program in a way which
+ // uniquely works with Int32, Recover Instructions added by the Sink phase
+ // expects the MIR Graph to still have a valid flow as-if they were double
+ // operations instead of Int32 operations. Thus, this phase should be
+ // executed after the Sink phase, and before DCE.
+
+ // Fold any unnecessary bitops in the graph, such as (x | 0) on an integer
+ // input. This is done after range analysis rather than during GVN as the
+ // presence of the bitop can change which instructions are truncated.
+ for (size_t i = 0; i < bitops.length(); i++) {
+ MBinaryBitwiseInstruction* ins = bitops[i];
+ if (ins->isRecoveredOnBailout())
+ continue;
+
+ MDefinition* folded = ins->foldUnnecessaryBitop();
+ if (folded != ins) {
+ ins->replaceAllLiveUsesWith(folded);
+ ins->setRecoveredOnBailout();
+ }
+ }
+
+ bitops.clear();
+ return true;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Collect Range information of operands
+///////////////////////////////////////////////////////////////////////////////
+
+void
+MInArray::collectRangeInfoPreTrunc()
+{
+ Range indexRange(index());
+ if (indexRange.isFiniteNonNegative())
+ needsNegativeIntCheck_ = false;
+}
+
+void
+MLoadElementHole::collectRangeInfoPreTrunc()
+{
+ Range indexRange(index());
+ if (indexRange.isFiniteNonNegative()) {
+ needsNegativeIntCheck_ = false;
+ setNotGuard();
+ }
+}
+
+void
+MLoadTypedArrayElementStatic::collectRangeInfoPreTrunc()
+{
+ Range range(ptr());
+
+ if (range.hasInt32LowerBound() && range.hasInt32UpperBound()) {
+ int64_t offset = this->offset();
+ int64_t lower = range.lower() + offset;
+ int64_t upper = range.upper() + offset;
+ int64_t length = this->length();
+ if (lower >= 0 && upper < length)
+ setNeedsBoundsCheck(false);
+ }
+}
+
+void
+MStoreTypedArrayElementStatic::collectRangeInfoPreTrunc()
+{
+ Range range(ptr());
+
+ if (range.hasInt32LowerBound() && range.hasInt32UpperBound()) {
+ int64_t offset = this->offset();
+ int64_t lower = range.lower() + offset;
+ int64_t upper = range.upper() + offset;
+ int64_t length = this->length();
+ if (lower >= 0 && upper < length)
+ setNeedsBoundsCheck(false);
+ }
+}
+
+void
+MClz::collectRangeInfoPreTrunc()
+{
+ Range inputRange(input());
+ if (!inputRange.canBeZero())
+ operandIsNeverZero_ = true;
+}
+
+void
+MCtz::collectRangeInfoPreTrunc()
+{
+ Range inputRange(input());
+ if (!inputRange.canBeZero())
+ operandIsNeverZero_ = true;
+}
+
+void
+MDiv::collectRangeInfoPreTrunc()
+{
+ Range lhsRange(lhs());
+ Range rhsRange(rhs());
+
+ // Test if Dividend is non-negative.
+ if (lhsRange.isFiniteNonNegative())
+ canBeNegativeDividend_ = false;
+
+ // Try removing divide by zero check.
+ if (!rhsRange.canBeZero())
+ canBeDivideByZero_ = false;
+
+ // If lhsRange does not contain INT32_MIN in its range,
+ // negative overflow check can be skipped.
+ if (!lhsRange.contains(INT32_MIN))
+ canBeNegativeOverflow_ = false;
+
+ // If rhsRange does not contain -1 likewise.
+ if (!rhsRange.contains(-1))
+ canBeNegativeOverflow_ = false;
+
+ // If lhsRange does not contain a zero,
+ // negative zero check can be skipped.
+ if (!lhsRange.canBeZero())
+ canBeNegativeZero_ = false;
+
+ // If rhsRange >= 0 negative zero check can be skipped.
+ if (rhsRange.isFiniteNonNegative())
+ canBeNegativeZero_ = false;
+}
+
+void
+MMul::collectRangeInfoPreTrunc()
+{
+ Range lhsRange(lhs());
+ Range rhsRange(rhs());
+
+ // If lhsRange contains only positive then we can skip negative zero check.
+ if (lhsRange.isFiniteNonNegative() && !lhsRange.canBeZero())
+ setCanBeNegativeZero(false);
+
+ // Likewise rhsRange.
+ if (rhsRange.isFiniteNonNegative() && !rhsRange.canBeZero())
+ setCanBeNegativeZero(false);
+
+ // If rhsRange and lhsRange contain Non-negative integers only,
+ // We skip negative zero check.
+ if (rhsRange.isFiniteNonNegative() && lhsRange.isFiniteNonNegative())
+ setCanBeNegativeZero(false);
+
+ //If rhsRange and lhsRange < 0. Then we skip negative zero check.
+ if (rhsRange.isFiniteNegative() && lhsRange.isFiniteNegative())
+ setCanBeNegativeZero(false);
+}
+
+void
+MMod::collectRangeInfoPreTrunc()
+{
+ Range lhsRange(lhs());
+ Range rhsRange(rhs());
+ if (lhsRange.isFiniteNonNegative())
+ canBeNegativeDividend_ = false;
+ if (!rhsRange.canBeZero())
+ canBeDivideByZero_ = false;
+
+}
+
+void
+MToInt32::collectRangeInfoPreTrunc()
+{
+ Range inputRange(input());
+ if (!inputRange.canBeNegativeZero())
+ canBeNegativeZero_ = false;
+}
+
+void
+MBoundsCheck::collectRangeInfoPreTrunc()
+{
+ Range indexRange(index());
+ Range lengthRange(length());
+ if (!indexRange.hasInt32LowerBound() || !indexRange.hasInt32UpperBound())
+ return;
+ if (!lengthRange.hasInt32LowerBound() || lengthRange.canBeNaN())
+ return;
+
+ int64_t indexLower = indexRange.lower();
+ int64_t indexUpper = indexRange.upper();
+ int64_t lengthLower = lengthRange.lower();
+ int64_t min = minimum();
+ int64_t max = maximum();
+
+ if (indexLower + min >= 0 && indexUpper + max < lengthLower)
+ fallible_ = false;
+}
+
+void
+MBoundsCheckLower::collectRangeInfoPreTrunc()
+{
+ Range indexRange(index());
+ if (indexRange.hasInt32LowerBound() && indexRange.lower() >= minimum_)
+ fallible_ = false;
+}
+
+void
+MCompare::collectRangeInfoPreTrunc()
+{
+ if (!Range(lhs()).canBeNaN() && !Range(rhs()).canBeNaN())
+ operandsAreNeverNaN_ = true;
+}
+
+void
+MNot::collectRangeInfoPreTrunc()
+{
+ if (!Range(input()).canBeNaN())
+ operandIsNeverNaN_ = true;
+}
+
+void
+MPowHalf::collectRangeInfoPreTrunc()
+{
+ Range inputRange(input());
+ if (!inputRange.canBeInfiniteOrNaN() || inputRange.hasInt32LowerBound())
+ operandIsNeverNegativeInfinity_ = true;
+ if (!inputRange.canBeNegativeZero())
+ operandIsNeverNegativeZero_ = true;
+ if (!inputRange.canBeNaN())
+ operandIsNeverNaN_ = true;
+}
+
+void
+MUrsh::collectRangeInfoPreTrunc()
+{
+ if (specialization_ == MIRType::Int64)
+ return;
+
+ Range lhsRange(lhs()), rhsRange(rhs());
+
+ // As in MUrsh::computeRange(), convert the inputs.
+ lhsRange.wrapAroundToInt32();
+ rhsRange.wrapAroundToShiftCount();
+
+ // If the most significant bit of our result is always going to be zero,
+ // we can optimize by disabling bailout checks for enforcing an int32 range.
+ if (lhsRange.lower() >= 0 || rhsRange.lower() >= 1)
+ bailoutsDisabled_ = true;
+}
+
+static bool
+DoesMaskMatchRange(int32_t mask, Range& range)
+{
+ // Check if range is positive, because the bitand operator in `(-3) & 0xff` can't be
+ // eliminated.
+ if (range.lower() >= 0) {
+ MOZ_ASSERT(range.isInt32());
+ // Check that the mask value has all bits set given the range upper bound. Note that the
+ // upper bound does not have to be exactly the mask value. For example, consider `x &
+ // 0xfff` where `x` is a uint8. That expression can still be optimized to `x`.
+ int bits = 1 + FloorLog2(range.upper());
+ uint32_t maskNeeded = (bits == 32) ? 0xffffffff : (uint32_t(1) << bits) - 1;
+ if ((mask & maskNeeded) == maskNeeded)
+ return true;
+ }
+
+ return false;
+}
+
+void
+MBinaryBitwiseInstruction::collectRangeInfoPreTrunc()
+{
+ Range lhsRange(lhs());
+ Range rhsRange(rhs());
+
+ if (lhs()->isConstant() && lhs()->type() == MIRType::Int32 &&
+ DoesMaskMatchRange(lhs()->toConstant()->toInt32(), rhsRange))
+ {
+ maskMatchesRightRange = true;
+ }
+
+ if (rhs()->isConstant() && rhs()->type() == MIRType::Int32 &&
+ DoesMaskMatchRange(rhs()->toConstant()->toInt32(), lhsRange))
+ {
+ maskMatchesLeftRange = true;
+ }
+}
+
+void
+MNaNToZero::collectRangeInfoPreTrunc()
+{
+ Range inputRange(input());
+
+ if (!inputRange.canBeNaN())
+ operandIsNeverNaN_ = true;
+ if (!inputRange.canBeNegativeZero())
+ operandIsNeverNegativeZero_ = true;
+}
+
+bool
+RangeAnalysis::prepareForUCE(bool* shouldRemoveDeadCode)
+{
+ *shouldRemoveDeadCode = false;
+
+ for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) {
+ MBasicBlock* block = *iter;
+
+ if (!block->unreachable())
+ continue;
+
+ // Filter out unreachable fake entries.
+ if (block->numPredecessors() == 0) {
+ // Ignore fixup blocks added by the Value Numbering phase, in order
+ // to keep the dominator tree as-is when we have OSR Block which are
+ // no longer reachable from the main entry point of the graph.
+ MOZ_ASSERT(graph_.osrBlock());
+ continue;
+ }
+
+ MControlInstruction* cond = block->getPredecessor(0)->lastIns();
+ if (!cond->isTest())
+ continue;
+
+ // Replace the condition of the test control instruction by a constant
+ // chosen based which of the successors has the unreachable flag which is
+ // added by MBeta::computeRange on its own block.
+ MTest* test = cond->toTest();
+ MDefinition* condition = test->input();
+
+ // If the false-branch is unreachable, then the test condition must be true.
+ // If the true-branch is unreachable, then the test condition must be false.
+ MOZ_ASSERT(block == test->ifTrue() || block == test->ifFalse());
+ bool value = block == test->ifFalse();
+ MConstant* constant = MConstant::New(alloc().fallible(), BooleanValue(value));
+ if (!constant)
+ return false;
+
+ condition->setGuardRangeBailoutsUnchecked();
+
+ test->block()->insertBefore(test, constant);
+
+ test->replaceOperand(0, constant);
+ JitSpew(JitSpew_Range, "Update condition of %d to reflect unreachable branches.",
+ test->id());
+
+ *shouldRemoveDeadCode = true;
+ }
+
+ return tryRemovingGuards();
+}
+
+bool RangeAnalysis::tryRemovingGuards()
+{
+ MDefinitionVector guards(alloc());
+
+ for (ReversePostorderIterator block = graph_.rpoBegin(); block != graph_.rpoEnd(); block++) {
+ for (MDefinitionIterator iter(*block); iter; iter++) {
+ if (!iter->isGuardRangeBailouts())
+ continue;
+
+ iter->setInWorklist();
+ if (!guards.append(*iter))
+ return false;
+ }
+ }
+
+ // Flag all fallible instructions which were indirectly used in the
+ // computation of the condition, such that we do not ignore
+ // bailout-paths which are used to shrink the input range of the
+ // operands of the condition.
+ for (size_t i = 0; i < guards.length(); i++) {
+ MDefinition* guard = guards[i];
+
+ // If this ins is a guard even without guardRangeBailouts,
+ // there is no reason in trying to hoist the guardRangeBailouts check.
+ guard->setNotGuardRangeBailouts();
+ if (!DeadIfUnused(guard)) {
+ guard->setGuardRangeBailouts();
+ continue;
+ }
+ guard->setGuardRangeBailouts();
+
+ if (!guard->isPhi()) {
+ if (!guard->range())
+ continue;
+
+ // Filter the range of the instruction based on its MIRType.
+ Range typeFilteredRange(guard);
+
+ // If the output range is updated by adding the inner range,
+ // then the MIRType act as an effectful filter. As we do not know if
+ // this filtered Range might change or not the result of the
+ // previous comparison, we have to keep this instruction as a guard
+ // because it has to bailout in order to restrict the Range to its
+ // MIRType.
+ if (typeFilteredRange.update(guard->range()))
+ continue;
+ }
+
+ guard->setNotGuardRangeBailouts();
+
+ // Propagate the guard to its operands.
+ for (size_t op = 0, e = guard->numOperands(); op < e; op++) {
+ MDefinition* operand = guard->getOperand(op);
+
+ // Already marked.
+ if (operand->isInWorklist())
+ continue;
+
+ MOZ_ASSERT(!operand->isGuardRangeBailouts());
+
+ operand->setInWorklist();
+ operand->setGuardRangeBailouts();
+ if (!guards.append(operand))
+ return false;
+ }
+ }
+
+ for (size_t i = 0; i < guards.length(); i++) {
+ MDefinition* guard = guards[i];
+ guard->setNotInWorklist();
+ }
+
+ return true;
+}
diff --git a/js/src/jit/RangeAnalysis.h b/js/src/jit/RangeAnalysis.h
new file mode 100644
index 000000000..913f59152
--- /dev/null
+++ b/js/src/jit/RangeAnalysis.h
@@ -0,0 +1,711 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_RangeAnalysis_h
+#define jit_RangeAnalysis_h
+
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/IonAnalysis.h"
+#include "jit/MIR.h"
+
+// windows.h defines those, which messes with the definitions below.
+#undef min
+#undef max
+
+namespace js {
+namespace jit {
+
+class MBasicBlock;
+class MIRGraph;
+
+// An upper bound computed on the number of backedges a loop will take.
+// This count only includes backedges taken while running Ion code: for OSR
+// loops, this will exclude iterations that executed in the interpreter or in
+// baseline compiled code.
+struct LoopIterationBound : public TempObject
+{
+ // Loop for which this bound applies.
+ MBasicBlock* header;
+
+ // Test from which this bound was derived; after executing exactly 'bound'
+ // times this test will exit the loop. Code in the loop body which this
+ // test dominates (will include the backedge) will execute at most 'bound'
+ // times. Other code in the loop will execute at most '1 + Max(bound, 0)'
+ // times.
+ MTest* test;
+
+ // Symbolic bound computed for the number of backedge executions. The terms
+ // in this bound are all loop invariant.
+ LinearSum boundSum;
+
+ // Linear sum for the number of iterations already executed, at the start
+ // of the loop header. This will use loop invariant terms and header phis.
+ LinearSum currentSum;
+
+ LoopIterationBound(MBasicBlock* header, MTest* test, LinearSum boundSum, LinearSum currentSum)
+ : header(header), test(test),
+ boundSum(boundSum), currentSum(currentSum)
+ {
+ }
+};
+
+typedef Vector<LoopIterationBound*, 0, SystemAllocPolicy> LoopIterationBoundVector;
+
+// A symbolic upper or lower bound computed for a term.
+struct SymbolicBound : public TempObject
+{
+ private:
+ SymbolicBound(LoopIterationBound* loop, LinearSum sum)
+ : loop(loop), sum(sum)
+ {
+ }
+
+ public:
+ // Any loop iteration bound from which this was derived.
+ //
+ // If non-nullptr, then 'sum' is only valid within the loop body, at
+ // points dominated by the loop bound's test (see LoopIterationBound).
+ //
+ // If nullptr, then 'sum' is always valid.
+ LoopIterationBound* loop;
+
+ static SymbolicBound* New(TempAllocator& alloc, LoopIterationBound* loop, LinearSum sum) {
+ return new(alloc) SymbolicBound(loop, sum);
+ }
+
+ // Computed symbolic bound, see above.
+ LinearSum sum;
+
+ void dump(GenericPrinter& out) const;
+ void dump() const;
+};
+
+class RangeAnalysis
+{
+ protected:
+ bool blockDominates(MBasicBlock* b, MBasicBlock* b2);
+ void replaceDominatedUsesWith(MDefinition* orig, MDefinition* dom,
+ MBasicBlock* block);
+
+ protected:
+ MIRGenerator* mir;
+ MIRGraph& graph_;
+ Vector<MBinaryBitwiseInstruction*, 16, SystemAllocPolicy> bitops;
+
+ TempAllocator& alloc() const;
+
+ public:
+ RangeAnalysis(MIRGenerator* mir, MIRGraph& graph) :
+ mir(mir), graph_(graph) {}
+ MOZ_MUST_USE bool addBetaNodes();
+ MOZ_MUST_USE bool analyze();
+ MOZ_MUST_USE bool addRangeAssertions();
+ MOZ_MUST_USE bool removeBetaNodes();
+ MOZ_MUST_USE bool prepareForUCE(bool* shouldRemoveDeadCode);
+ MOZ_MUST_USE bool tryRemovingGuards();
+ MOZ_MUST_USE bool truncate();
+ MOZ_MUST_USE bool removeUnnecessaryBitops();
+
+ // Any iteration bounds discovered for loops in the graph.
+ LoopIterationBoundVector loopIterationBounds;
+
+ private:
+ MOZ_MUST_USE bool analyzeLoop(MBasicBlock* header);
+ LoopIterationBound* analyzeLoopIterationCount(MBasicBlock* header,
+ MTest* test, BranchDirection direction);
+ void analyzeLoopPhi(MBasicBlock* header, LoopIterationBound* loopBound, MPhi* phi);
+ MOZ_MUST_USE bool tryHoistBoundsCheck(MBasicBlock* header, MBoundsCheck* ins);
+};
+
+class Range : public TempObject {
+ public:
+ // Int32 are signed. INT32_MAX is pow(2,31)-1 and INT32_MIN is -pow(2,31),
+ // so the greatest exponent we need is 31.
+ static const uint16_t MaxInt32Exponent = 31;
+
+ // UInt32 are unsigned. UINT32_MAX is pow(2,32)-1, so it's the greatest
+ // value that has an exponent of 31.
+ static const uint16_t MaxUInt32Exponent = 31;
+
+ // Maximal exponenent under which we have no precission loss on double
+ // operations. Double has 52 bits of mantissa, so 2^52+1 cannot be
+ // represented without loss.
+ static const uint16_t MaxTruncatableExponent = mozilla::FloatingPoint<double>::kExponentShift;
+
+ // Maximum exponent for finite values.
+ static const uint16_t MaxFiniteExponent = mozilla::FloatingPoint<double>::kExponentBias;
+
+ // An special exponent value representing all non-NaN values. This
+ // includes finite values and the infinities.
+ static const uint16_t IncludesInfinity = MaxFiniteExponent + 1;
+
+ // An special exponent value representing all possible double-precision
+ // values. This includes finite values, the infinities, and NaNs.
+ static const uint16_t IncludesInfinityAndNaN = UINT16_MAX;
+
+ // This range class uses int32_t ranges, but has several interfaces which
+ // use int64_t, which either holds an int32_t value, or one of the following
+ // special values which mean a value which is beyond the int32 range,
+ // potentially including infinity or NaN. These special values are
+ // guaranteed to compare greater, and less than, respectively, any int32_t
+ // value.
+ static const int64_t NoInt32UpperBound = int64_t(JSVAL_INT_MAX) + 1;
+ static const int64_t NoInt32LowerBound = int64_t(JSVAL_INT_MIN) - 1;
+
+ enum FractionalPartFlag {
+ ExcludesFractionalParts = false,
+ IncludesFractionalParts = true
+ };
+ enum NegativeZeroFlag {
+ ExcludesNegativeZero = false,
+ IncludesNegativeZero = true
+ };
+
+ private:
+ // Absolute ranges.
+ //
+ // We represent ranges where the endpoints can be in the set:
+ // {-infty} U [INT_MIN, INT_MAX] U {infty}. A bound of +/-
+ // infty means that the value may have overflowed in that
+ // direction. When computing the range of an integer
+ // instruction, the ranges of the operands can be clamped to
+ // [INT_MIN, INT_MAX], since if they had overflowed they would
+ // no longer be integers. This is important for optimizations
+ // and somewhat subtle.
+ //
+ // N.B.: All of the operations that compute new ranges based
+ // on existing ranges will ignore the hasInt32*Bound_ flags of the
+ // input ranges; that is, they implicitly clamp the ranges of
+ // the inputs to [INT_MIN, INT_MAX]. Therefore, while our range might
+ // be unbounded (and could overflow), when using this information to
+ // propagate through other ranges, we disregard this fact; if that code
+ // executes, then the overflow did not occur, so we may safely assume
+ // that the range is [INT_MIN, INT_MAX] instead.
+ //
+ // To facilitate this trick, we maintain the invariants that:
+ // 1) hasInt32LowerBound_ == false implies lower_ == JSVAL_INT_MIN
+ // 2) hasInt32UpperBound_ == false implies upper_ == JSVAL_INT_MAX
+ //
+ // As a second and less precise range analysis, we represent the maximal
+ // exponent taken by a value. The exponent is calculated by taking the
+ // absolute value and looking at the position of the highest bit. All
+ // exponent computation have to be over-estimations of the actual result. On
+ // the Int32 this over approximation is rectified.
+
+ int32_t lower_;
+ int32_t upper_;
+
+ bool hasInt32LowerBound_;
+ bool hasInt32UpperBound_;
+
+ FractionalPartFlag canHaveFractionalPart_ : 1;
+ NegativeZeroFlag canBeNegativeZero_ : 1;
+ uint16_t max_exponent_;
+
+ // Any symbolic lower or upper bound computed for this term.
+ const SymbolicBound* symbolicLower_;
+ const SymbolicBound* symbolicUpper_;
+
+ // This function simply makes several MOZ_ASSERTs to verify the internal
+ // consistency of this range.
+ void assertInvariants() const {
+ // Basic sanity :).
+ MOZ_ASSERT(lower_ <= upper_);
+
+ // When hasInt32LowerBound_ or hasInt32UpperBound_ are false, we set
+ // lower_ and upper_ to these specific values as it simplifies the
+ // implementation in some places.
+ MOZ_ASSERT_IF(!hasInt32LowerBound_, lower_ == JSVAL_INT_MIN);
+ MOZ_ASSERT_IF(!hasInt32UpperBound_, upper_ == JSVAL_INT_MAX);
+
+ // max_exponent_ must be one of three possible things.
+ MOZ_ASSERT(max_exponent_ <= MaxFiniteExponent ||
+ max_exponent_ == IncludesInfinity ||
+ max_exponent_ == IncludesInfinityAndNaN);
+
+ // Forbid the max_exponent_ field from implying better bounds for
+ // lower_/upper_ fields. We have to add 1 to the max_exponent_ when
+ // canHaveFractionalPart_ is true in order to accomodate
+ // fractional offsets. For example, 2147483647.9 is greater than
+ // INT32_MAX, so a range containing that value will have
+ // hasInt32UpperBound_ set to false, however that value also has
+ // exponent 30, which is strictly less than MaxInt32Exponent. For
+ // another example, 1.9 has an exponent of 0 but requires upper_ to be
+ // at least 2, which has exponent 1.
+ mozilla::DebugOnly<uint32_t> adjustedExponent = max_exponent_ +
+ (canHaveFractionalPart_ ? 1 : 0);
+ MOZ_ASSERT_IF(!hasInt32LowerBound_ || !hasInt32UpperBound_,
+ adjustedExponent >= MaxInt32Exponent);
+ MOZ_ASSERT(adjustedExponent >= mozilla::FloorLog2(mozilla::Abs(upper_)));
+ MOZ_ASSERT(adjustedExponent >= mozilla::FloorLog2(mozilla::Abs(lower_)));
+
+ // The following are essentially static assertions, but FloorLog2 isn't
+ // trivially suitable for constexpr :(.
+ MOZ_ASSERT(mozilla::FloorLog2(JSVAL_INT_MIN) == MaxInt32Exponent);
+ MOZ_ASSERT(mozilla::FloorLog2(JSVAL_INT_MAX) == 30);
+ MOZ_ASSERT(mozilla::FloorLog2(UINT32_MAX) == MaxUInt32Exponent);
+ MOZ_ASSERT(mozilla::FloorLog2(0) == 0);
+ }
+
+ // Set the lower_ and hasInt32LowerBound_ values.
+ void setLowerInit(int64_t x) {
+ if (x > JSVAL_INT_MAX) {
+ lower_ = JSVAL_INT_MAX;
+ hasInt32LowerBound_ = true;
+ } else if (x < JSVAL_INT_MIN) {
+ lower_ = JSVAL_INT_MIN;
+ hasInt32LowerBound_ = false;
+ } else {
+ lower_ = int32_t(x);
+ hasInt32LowerBound_ = true;
+ }
+ }
+ // Set the upper_ and hasInt32UpperBound_ values.
+ void setUpperInit(int64_t x) {
+ if (x > JSVAL_INT_MAX) {
+ upper_ = JSVAL_INT_MAX;
+ hasInt32UpperBound_ = false;
+ } else if (x < JSVAL_INT_MIN) {
+ upper_ = JSVAL_INT_MIN;
+ hasInt32UpperBound_ = true;
+ } else {
+ upper_ = int32_t(x);
+ hasInt32UpperBound_ = true;
+ }
+ }
+
+ // Compute the least exponent value that would be compatible with the
+ // values of lower() and upper().
+ //
+ // Note:
+ // exponent of JSVAL_INT_MIN == 31
+ // exponent of JSVAL_INT_MAX == 30
+ uint16_t exponentImpliedByInt32Bounds() const {
+ // The number of bits needed to encode |max| is the power of 2 plus one.
+ uint32_t max = Max(mozilla::Abs(lower()), mozilla::Abs(upper()));
+ uint16_t result = mozilla::FloorLog2(max);
+ MOZ_ASSERT(result == (max == 0 ? 0 : mozilla::ExponentComponent(double(max))));
+ return result;
+ }
+
+ // When converting a range which contains fractional values to a range
+ // containing only integers, the old max_exponent_ value may imply a better
+ // lower and/or upper bound than was previously available, because they no
+ // longer need to be conservative about fractional offsets and the ends of
+ // the range.
+ //
+ // Given an exponent value and pointers to the lower and upper bound values,
+ // this function refines the lower and upper bound values to the tighest
+ // bound for integer values implied by the exponent.
+ static void refineInt32BoundsByExponent(uint16_t e,
+ int32_t* l, bool* lb,
+ int32_t* h, bool* hb)
+ {
+ if (e < MaxInt32Exponent) {
+ // pow(2, max_exponent_+1)-1 to compute a maximum absolute value.
+ int32_t limit = (uint32_t(1) << (e + 1)) - 1;
+ *h = Min(*h, limit);
+ *l = Max(*l, -limit);
+ *hb = true;
+ *lb = true;
+ }
+ }
+
+ // If the value of any of the fields implies a stronger possible value for
+ // any other field, update that field to the stronger value. The range must
+ // be completely valid before and it is guaranteed to be kept valid.
+ void optimize() {
+ assertInvariants();
+
+ if (hasInt32Bounds()) {
+ // Examine lower() and upper(), and if they imply a better exponent
+ // bound than max_exponent_, set that value as the new
+ // max_exponent_.
+ uint16_t newExponent = exponentImpliedByInt32Bounds();
+ if (newExponent < max_exponent_) {
+ max_exponent_ = newExponent;
+ assertInvariants();
+ }
+
+ // If we have a completely precise range, the value is an integer,
+ // since we can only represent integers.
+ if (canHaveFractionalPart_ && lower_ == upper_) {
+ canHaveFractionalPart_ = ExcludesFractionalParts;
+ assertInvariants();
+ }
+ }
+
+ // If the range doesn't include zero, it doesn't include negative zero.
+ if (canBeNegativeZero_ && !canBeZero()) {
+ canBeNegativeZero_ = ExcludesNegativeZero;
+ assertInvariants();
+ }
+ }
+
+ // Set the range fields to the given raw values.
+ void rawInitialize(int32_t l, bool lb, int32_t h, bool hb,
+ FractionalPartFlag canHaveFractionalPart,
+ NegativeZeroFlag canBeNegativeZero,
+ uint16_t e)
+ {
+ lower_ = l;
+ upper_ = h;
+ hasInt32LowerBound_ = lb;
+ hasInt32UpperBound_ = hb;
+ canHaveFractionalPart_ = canHaveFractionalPart;
+ canBeNegativeZero_ = canBeNegativeZero;
+ max_exponent_ = e;
+ optimize();
+ }
+
+ // Construct a range from the given raw values.
+ Range(int32_t l, bool lb, int32_t h, bool hb,
+ FractionalPartFlag canHaveFractionalPart,
+ NegativeZeroFlag canBeNegativeZero,
+ uint16_t e)
+ : symbolicLower_(nullptr),
+ symbolicUpper_(nullptr)
+ {
+ rawInitialize(l, lb, h, hb, canHaveFractionalPart, canBeNegativeZero, e);
+ }
+
+ public:
+ Range()
+ : symbolicLower_(nullptr),
+ symbolicUpper_(nullptr)
+ {
+ setUnknown();
+ }
+
+ Range(int64_t l, int64_t h,
+ FractionalPartFlag canHaveFractionalPart,
+ NegativeZeroFlag canBeNegativeZero,
+ uint16_t e)
+ : symbolicLower_(nullptr),
+ symbolicUpper_(nullptr)
+ {
+ set(l, h, canHaveFractionalPart, canBeNegativeZero, e);
+ }
+
+ Range(const Range& other)
+ : lower_(other.lower_),
+ upper_(other.upper_),
+ hasInt32LowerBound_(other.hasInt32LowerBound_),
+ hasInt32UpperBound_(other.hasInt32UpperBound_),
+ canHaveFractionalPart_(other.canHaveFractionalPart_),
+ canBeNegativeZero_(other.canBeNegativeZero_),
+ max_exponent_(other.max_exponent_),
+ symbolicLower_(nullptr),
+ symbolicUpper_(nullptr)
+ {
+ assertInvariants();
+ }
+
+ // Construct a range from the given MDefinition. This differs from the
+ // MDefinition's range() method in that it describes the range of values
+ // *after* any bailout checks.
+ explicit Range(const MDefinition* def);
+
+ static Range* NewInt32Range(TempAllocator& alloc, int32_t l, int32_t h) {
+ return new(alloc) Range(l, h, ExcludesFractionalParts, ExcludesNegativeZero, MaxInt32Exponent);
+ }
+
+ // Construct an int32 range containing just i. This is just a convenience
+ // wrapper around NewInt32Range.
+ static Range* NewInt32SingletonRange(TempAllocator& alloc, int32_t i) {
+ return NewInt32Range(alloc, i, i);
+ }
+
+ static Range* NewUInt32Range(TempAllocator& alloc, uint32_t l, uint32_t h) {
+ // For now, just pass them to the constructor as int64_t values.
+ // They'll become unbounded if they're not in the int32_t range.
+ return new(alloc) Range(l, h, ExcludesFractionalParts, ExcludesNegativeZero, MaxUInt32Exponent);
+ }
+
+ // Construct a range containing values >= l and <= h. Note that this
+ // function treats negative zero as equal to zero, as >= and <= do. If the
+ // range includes zero, it is assumed to include negative zero too.
+ static Range* NewDoubleRange(TempAllocator& alloc, double l, double h) {
+ if (mozilla::IsNaN(l) && mozilla::IsNaN(h))
+ return nullptr;
+
+ Range* r = new(alloc) Range();
+ r->setDouble(l, h);
+ return r;
+ }
+
+ // Construct the strictest possible range containing d, or null if d is NaN.
+ // This function treats negative zero as distinct from zero, since this
+ // makes the strictest possible range containin zero a range which
+ // contains one value rather than two.
+ static Range* NewDoubleSingletonRange(TempAllocator& alloc, double d) {
+ if (mozilla::IsNaN(d))
+ return nullptr;
+
+ Range* r = new(alloc) Range();
+ r->setDoubleSingleton(d);
+ return r;
+ }
+
+ void dump(GenericPrinter& out) const;
+ void dump() const;
+ MOZ_MUST_USE bool update(const Range* other);
+
+ // Unlike the other operations, unionWith is an in-place
+ // modification. This is to avoid a bunch of useless extra
+ // copying when chaining together unions when handling Phi
+ // nodes.
+ void unionWith(const Range* other);
+ static Range* intersect(TempAllocator& alloc, const Range* lhs, const Range* rhs,
+ bool* emptyRange);
+ static Range* add(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* sub(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* mul(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* and_(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* or_(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* xor_(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* not_(TempAllocator& alloc, const Range* op);
+ static Range* lsh(TempAllocator& alloc, const Range* lhs, int32_t c);
+ static Range* rsh(TempAllocator& alloc, const Range* lhs, int32_t c);
+ static Range* ursh(TempAllocator& alloc, const Range* lhs, int32_t c);
+ static Range* lsh(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* rsh(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* ursh(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* abs(TempAllocator& alloc, const Range* op);
+ static Range* min(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* max(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* floor(TempAllocator& alloc, const Range* op);
+ static Range* ceil(TempAllocator& alloc, const Range* op);
+ static Range* sign(TempAllocator& alloc, const Range* op);
+ static Range* NaNToZero(TempAllocator& alloc, const Range* op);
+
+ static MOZ_MUST_USE bool negativeZeroMul(const Range* lhs, const Range* rhs);
+
+ bool isUnknownInt32() const {
+ return isInt32() && lower() == INT32_MIN && upper() == INT32_MAX;
+ }
+
+ bool isUnknown() const {
+ return !hasInt32LowerBound_ &&
+ !hasInt32UpperBound_ &&
+ canHaveFractionalPart_ &&
+ canBeNegativeZero_ &&
+ max_exponent_ == IncludesInfinityAndNaN;
+ }
+
+ bool hasInt32LowerBound() const {
+ return hasInt32LowerBound_;
+ }
+ bool hasInt32UpperBound() const {
+ return hasInt32UpperBound_;
+ }
+
+ // Test whether the value is known to be within [INT32_MIN,INT32_MAX].
+ // Note that this does not necessarily mean the value is an integer.
+ bool hasInt32Bounds() const {
+ return hasInt32LowerBound() && hasInt32UpperBound();
+ }
+
+ // Test whether the value is known to be representable as an int32.
+ bool isInt32() const {
+ return hasInt32Bounds() &&
+ !canHaveFractionalPart_ &&
+ !canBeNegativeZero_;
+ }
+
+ // Test whether the given value is known to be either 0 or 1.
+ bool isBoolean() const {
+ return lower() >= 0 && upper() <= 1 &&
+ !canHaveFractionalPart_ &&
+ !canBeNegativeZero_;
+ }
+
+ bool canHaveRoundingErrors() const {
+ return canHaveFractionalPart_ ||
+ canBeNegativeZero_ ||
+ max_exponent_ >= MaxTruncatableExponent;
+ }
+
+ // Test if an integer x belongs to the range.
+ bool contains(int32_t x) const {
+ return x >= lower_ && x <= upper_;
+ }
+
+ // Test whether the range contains zero (of either sign).
+ bool canBeZero() const {
+ return contains(0);
+ }
+
+ // Test whether the range contains NaN values.
+ bool canBeNaN() const {
+ return max_exponent_ == IncludesInfinityAndNaN;
+ }
+
+ // Test whether the range contains infinities or NaN values.
+ bool canBeInfiniteOrNaN() const {
+ return max_exponent_ >= IncludesInfinity;
+ }
+
+ FractionalPartFlag canHaveFractionalPart() const {
+ return canHaveFractionalPart_;
+ }
+
+ NegativeZeroFlag canBeNegativeZero() const {
+ return canBeNegativeZero_;
+ }
+
+ uint16_t exponent() const {
+ MOZ_ASSERT(!canBeInfiniteOrNaN());
+ return max_exponent_;
+ }
+
+ uint16_t numBits() const {
+ return exponent() + 1; // 2^0 -> 1
+ }
+
+ // Return the lower bound. Asserts that the value has an int32 bound.
+ int32_t lower() const {
+ MOZ_ASSERT(hasInt32LowerBound());
+ return lower_;
+ }
+
+ // Return the upper bound. Asserts that the value has an int32 bound.
+ int32_t upper() const {
+ MOZ_ASSERT(hasInt32UpperBound());
+ return upper_;
+ }
+
+ // Test whether all values in this range can are finite and negative.
+ bool isFiniteNegative() const {
+ return upper_ < 0 && !canBeInfiniteOrNaN();
+ }
+
+ // Test whether all values in this range can are finite and non-negative.
+ bool isFiniteNonNegative() const {
+ return lower_ >= 0 && !canBeInfiniteOrNaN();
+ }
+
+ // Test whether a value in this range can possibly be a finite
+ // negative value. Note that "negative zero" is not considered negative.
+ bool canBeFiniteNegative() const {
+ return lower_ < 0;
+ }
+
+ // Test whether a value in this range can possibly be a finite
+ // non-negative value.
+ bool canBeFiniteNonNegative() const {
+ return upper_ >= 0;
+ }
+
+ // Test whether a value in this range can have the sign bit set (not
+ // counting NaN, where the sign bit is meaningless).
+ bool canHaveSignBitSet() const {
+ return !hasInt32LowerBound() || canBeFiniteNegative() || canBeNegativeZero();
+ }
+
+ // Set this range to have a lower bound not less than x.
+ void refineLower(int32_t x) {
+ assertInvariants();
+ hasInt32LowerBound_ = true;
+ lower_ = Max(lower_, x);
+ optimize();
+ }
+
+ // Set this range to have an upper bound not greater than x.
+ void refineUpper(int32_t x) {
+ assertInvariants();
+ hasInt32UpperBound_ = true;
+ upper_ = Min(upper_, x);
+ optimize();
+ }
+
+ // Set this range to exclude negative zero.
+ void refineToExcludeNegativeZero() {
+ assertInvariants();
+ canBeNegativeZero_ = ExcludesNegativeZero;
+ optimize();
+ }
+
+ void setInt32(int32_t l, int32_t h) {
+ hasInt32LowerBound_ = true;
+ hasInt32UpperBound_ = true;
+ lower_ = l;
+ upper_ = h;
+ canHaveFractionalPart_ = ExcludesFractionalParts;
+ canBeNegativeZero_ = ExcludesNegativeZero;
+ max_exponent_ = exponentImpliedByInt32Bounds();
+ assertInvariants();
+ }
+
+ // Set this range to include values >= l and <= h. Note that this
+ // function treats negative zero as equal to zero, as >= and <= do. If the
+ // range includes zero, it is assumed to include negative zero too.
+ void setDouble(double l, double h);
+
+ // Set this range to the narrowest possible range containing d.
+ // This function treats negative zero as distinct from zero, since this
+ // makes the narrowest possible range containin zero a range which
+ // contains one value rather than two.
+ void setDoubleSingleton(double d);
+
+ void setUnknown() {
+ set(NoInt32LowerBound, NoInt32UpperBound,
+ IncludesFractionalParts,
+ IncludesNegativeZero,
+ IncludesInfinityAndNaN);
+ MOZ_ASSERT(isUnknown());
+ }
+
+ void set(int64_t l, int64_t h,
+ FractionalPartFlag canHaveFractionalPart,
+ NegativeZeroFlag canBeNegativeZero,
+ uint16_t e)
+ {
+ max_exponent_ = e;
+ canHaveFractionalPart_ = canHaveFractionalPart;
+ canBeNegativeZero_ = canBeNegativeZero;
+ setLowerInit(l);
+ setUpperInit(h);
+ optimize();
+ }
+
+ // Make the lower end of this range at least INT32_MIN, and make
+ // the upper end of this range at most INT32_MAX.
+ void clampToInt32();
+
+ // If this range exceeds int32_t range, at either or both ends, change
+ // it to int32_t range. Otherwise do nothing.
+ void wrapAroundToInt32();
+
+ // If this range exceeds [0, 32) range, at either or both ends, change
+ // it to the [0, 32) range. Otherwise do nothing.
+ void wrapAroundToShiftCount();
+
+ // If this range exceeds [0, 1] range, at either or both ends, change
+ // it to the [0, 1] range. Otherwise do nothing.
+ void wrapAroundToBoolean();
+
+ const SymbolicBound* symbolicLower() const {
+ return symbolicLower_;
+ }
+ const SymbolicBound* symbolicUpper() const {
+ return symbolicUpper_;
+ }
+
+ void setSymbolicLower(SymbolicBound* bound) {
+ symbolicLower_ = bound;
+ }
+ void setSymbolicUpper(SymbolicBound* bound) {
+ symbolicUpper_ = bound;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_RangeAnalysis_h */
diff --git a/js/src/jit/Recover.cpp b/js/src/jit/Recover.cpp
new file mode 100644
index 000000000..13bf9224b
--- /dev/null
+++ b/js/src/jit/Recover.cpp
@@ -0,0 +1,1694 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Recover.h"
+
+#include "mozilla/SizePrintfMacros.h"
+
+#include "jsapi.h"
+#include "jscntxt.h"
+#include "jsmath.h"
+#include "jsobj.h"
+#include "jsstr.h"
+
+#include "builtin/RegExp.h"
+#include "builtin/SIMD.h"
+#include "builtin/TypedObject.h"
+
+#include "gc/Heap.h"
+
+#include "jit/JitFrameIterator.h"
+#include "jit/JitSpewer.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "jit/VMFunctions.h"
+#include "vm/Interpreter.h"
+#include "vm/String.h"
+
+#include "vm/Interpreter-inl.h"
+#include "vm/NativeObject-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+bool
+MNode::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_CRASH("This instruction is not serializable");
+}
+
+void
+RInstruction::readRecoverData(CompactBufferReader& reader, RInstructionStorage* raw)
+{
+ uint32_t op = reader.readUnsigned();
+ switch (Opcode(op)) {
+# define MATCH_OPCODES_(op) \
+ case Recover_##op: \
+ static_assert(sizeof(R##op) <= sizeof(RInstructionStorage), \
+ "Storage space is too small to decode R" #op " instructions."); \
+ new (raw->addr()) R##op(reader); \
+ break;
+
+ RECOVER_OPCODE_LIST(MATCH_OPCODES_)
+# undef MATCH_OPCODES_
+
+ case Recover_Invalid:
+ default:
+ MOZ_CRASH("Bad decoding of the previous instruction?");
+ }
+}
+
+bool
+MResumePoint::writeRecoverData(CompactBufferWriter& writer) const
+{
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_ResumePoint));
+
+ MBasicBlock* bb = block();
+ JSFunction* fun = bb->info().funMaybeLazy();
+ JSScript* script = bb->info().script();
+ uint32_t exprStack = stackDepth() - bb->info().ninvoke();
+
+#ifdef DEBUG
+ // Ensure that all snapshot which are encoded can safely be used for
+ // bailouts.
+ if (GetJitContext()->cx) {
+ uint32_t stackDepth;
+ bool reachablePC;
+ jsbytecode* bailPC = pc();
+
+ if (mode() == MResumePoint::ResumeAfter)
+ bailPC = GetNextPc(pc());
+
+ if (!ReconstructStackDepth(GetJitContext()->cx, script,
+ bailPC, &stackDepth, &reachablePC))
+ {
+ return false;
+ }
+
+ if (reachablePC) {
+ if (JSOp(*bailPC) == JSOP_FUNCALL) {
+ // For fun.call(this, ...); the reconstructStackDepth will
+ // include the this. When inlining that is not included. So the
+ // exprStackSlots will be one less.
+ MOZ_ASSERT(stackDepth - exprStack <= 1);
+ } else if (JSOp(*bailPC) != JSOP_FUNAPPLY &&
+ !IsGetPropPC(bailPC) && !IsSetPropPC(bailPC))
+ {
+ // For fun.apply({}, arguments) the reconstructStackDepth will
+ // have stackdepth 4, but it could be that we inlined the
+ // funapply. In that case exprStackSlots, will have the real
+ // arguments in the slots and not be 4.
+
+ // With accessors, we have different stack depths depending on
+ // whether or not we inlined the accessor, as the inlined stack
+ // contains a callee function that should never have been there
+ // and we might just be capturing an uneventful property site,
+ // in which case there won't have been any violence.
+ MOZ_ASSERT(exprStack == stackDepth);
+ }
+ }
+ }
+#endif
+
+ // Test if we honor the maximum of arguments at all times. This is a sanity
+ // check and not an algorithm limit. So check might be a bit too loose. +4
+ // to account for scope chain, return value, this value and maybe
+ // arguments_object.
+ MOZ_ASSERT(CountArgSlots(script, fun) < SNAPSHOT_MAX_NARGS + 4);
+
+#ifdef JS_JITSPEW
+ uint32_t implicit = StartArgSlot(script);
+#endif
+ uint32_t formalArgs = CountArgSlots(script, fun);
+ uint32_t nallocs = formalArgs + script->nfixed() + exprStack;
+
+ JitSpew(JitSpew_IonSnapshots, "Starting frame; implicit %u, formals %u, fixed %" PRIuSIZE ", exprs %u",
+ implicit, formalArgs - implicit, script->nfixed(), exprStack);
+
+ uint32_t pcoff = script->pcToOffset(pc());
+ JitSpew(JitSpew_IonSnapshots, "Writing pc offset %u, nslots %u", pcoff, nallocs);
+ writer.writeUnsigned(pcoff);
+ writer.writeUnsigned(nallocs);
+ return true;
+}
+
+RResumePoint::RResumePoint(CompactBufferReader& reader)
+{
+ pcOffset_ = reader.readUnsigned();
+ numOperands_ = reader.readUnsigned();
+ JitSpew(JitSpew_IonSnapshots, "Read RResumePoint (pc offset %u, nslots %u)",
+ pcOffset_, numOperands_);
+}
+
+bool
+RResumePoint::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ MOZ_CRASH("This instruction is not recoverable.");
+}
+
+bool
+MBitNot::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BitNot));
+ return true;
+}
+
+RBitNot::RBitNot(CompactBufferReader& reader)
+{ }
+
+bool
+RBitNot::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue operand(cx, iter.read());
+
+ int32_t result;
+ if (!js::BitNot(cx, operand, &result))
+ return false;
+
+ RootedValue rootedResult(cx, js::Int32Value(result));
+ iter.storeInstructionResult(rootedResult);
+ return true;
+}
+
+bool
+MBitAnd::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BitAnd));
+ return true;
+}
+
+RBitAnd::RBitAnd(CompactBufferReader& reader)
+{ }
+
+bool
+RBitAnd::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ int32_t result;
+ MOZ_ASSERT(!lhs.isObject() && !rhs.isObject());
+
+ if (!js::BitAnd(cx, lhs, rhs, &result))
+ return false;
+
+ RootedValue rootedResult(cx, js::Int32Value(result));
+ iter.storeInstructionResult(rootedResult);
+ return true;
+}
+
+bool
+MBitOr::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BitOr));
+ return true;
+}
+
+RBitOr::RBitOr(CompactBufferReader& reader)
+{}
+
+bool
+RBitOr::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ int32_t result;
+ MOZ_ASSERT(!lhs.isObject() && !rhs.isObject());
+
+ if (!js::BitOr(cx, lhs, rhs, &result))
+ return false;
+
+ RootedValue asValue(cx, js::Int32Value(result));
+ iter.storeInstructionResult(asValue);
+ return true;
+}
+
+bool
+MBitXor::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BitXor));
+ return true;
+}
+
+RBitXor::RBitXor(CompactBufferReader& reader)
+{ }
+
+bool
+RBitXor::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+
+ int32_t result;
+ if (!js::BitXor(cx, lhs, rhs, &result))
+ return false;
+
+ RootedValue rootedResult(cx, js::Int32Value(result));
+ iter.storeInstructionResult(rootedResult);
+ return true;
+}
+
+bool
+MLsh::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Lsh));
+ return true;
+}
+
+RLsh::RLsh(CompactBufferReader& reader)
+{}
+
+bool
+RLsh::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ int32_t result;
+ MOZ_ASSERT(!lhs.isObject() && !rhs.isObject());
+
+ if (!js::BitLsh(cx, lhs, rhs, &result))
+ return false;
+
+ RootedValue asValue(cx, js::Int32Value(result));
+ iter.storeInstructionResult(asValue);
+ return true;
+}
+
+bool
+MRsh::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Rsh));
+ return true;
+}
+
+RRsh::RRsh(CompactBufferReader& reader)
+{ }
+
+bool
+RRsh::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ MOZ_ASSERT(!lhs.isObject() && !rhs.isObject());
+
+ int32_t result;
+ if (!js::BitRsh(cx, lhs, rhs, &result))
+ return false;
+
+ RootedValue rootedResult(cx, js::Int32Value(result));
+ iter.storeInstructionResult(rootedResult);
+ return true;
+}
+
+bool
+MUrsh::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Ursh));
+ return true;
+}
+
+RUrsh::RUrsh(CompactBufferReader& reader)
+{ }
+
+bool
+RUrsh::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ MOZ_ASSERT(!lhs.isObject() && !rhs.isObject());
+
+ RootedValue result(cx);
+ if (!js::UrshOperation(cx, lhs, rhs, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MSignExtend::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_SignExtend));
+ MOZ_ASSERT(Mode(uint8_t(mode_)) == mode_);
+ writer.writeByte(uint8_t(mode_));
+ return true;
+}
+
+RSignExtend::RSignExtend(CompactBufferReader& reader)
+{
+ mode_ = reader.readByte();
+}
+
+bool
+RSignExtend::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue operand(cx, iter.read());
+
+ int32_t result;
+ switch (MSignExtend::Mode(mode_)) {
+ case MSignExtend::Byte:
+ if (!js::SignExtendOperation<int8_t>(cx, operand, &result))
+ return false;
+ break;
+ case MSignExtend::Half:
+ if (!js::SignExtendOperation<int16_t>(cx, operand, &result))
+ return false;
+ break;
+ }
+
+ RootedValue rootedResult(cx, js::Int32Value(result));
+ iter.storeInstructionResult(rootedResult);
+ return true;
+}
+
+bool
+MAdd::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Add));
+ writer.writeByte(specialization_ == MIRType::Float32);
+ return true;
+}
+
+RAdd::RAdd(CompactBufferReader& reader)
+{
+ isFloatOperation_ = reader.readByte();
+}
+
+bool
+RAdd::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(!lhs.isObject() && !rhs.isObject());
+ if (!js::AddValues(cx, &lhs, &rhs, &result))
+ return false;
+
+ // MIRType::Float32 is a specialization embedding the fact that the result is
+ // rounded to a Float32.
+ if (isFloatOperation_ && !RoundFloat32(cx, result, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MSub::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Sub));
+ writer.writeByte(specialization_ == MIRType::Float32);
+ return true;
+}
+
+RSub::RSub(CompactBufferReader& reader)
+{
+ isFloatOperation_ = reader.readByte();
+}
+
+bool
+RSub::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(!lhs.isObject() && !rhs.isObject());
+ if (!js::SubValues(cx, &lhs, &rhs, &result))
+ return false;
+
+ // MIRType::Float32 is a specialization embedding the fact that the result is
+ // rounded to a Float32.
+ if (isFloatOperation_ && !RoundFloat32(cx, result, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MMul::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Mul));
+ writer.writeByte(specialization_ == MIRType::Float32);
+ MOZ_ASSERT(Mode(uint8_t(mode_)) == mode_);
+ writer.writeByte(uint8_t(mode_));
+ return true;
+}
+
+RMul::RMul(CompactBufferReader& reader)
+{
+ isFloatOperation_ = reader.readByte();
+ mode_ = reader.readByte();
+}
+
+bool
+RMul::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ if (MMul::Mode(mode_) == MMul::Normal) {
+ if (!js::MulValues(cx, &lhs, &rhs, &result))
+ return false;
+
+ // MIRType::Float32 is a specialization embedding the fact that the
+ // result is rounded to a Float32.
+ if (isFloatOperation_ && !RoundFloat32(cx, result, &result))
+ return false;
+ } else {
+ MOZ_ASSERT(MMul::Mode(mode_) == MMul::Integer);
+ if (!js::math_imul_handle(cx, lhs, rhs, &result))
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MDiv::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Div));
+ writer.writeByte(specialization_ == MIRType::Float32);
+ return true;
+}
+
+RDiv::RDiv(CompactBufferReader& reader)
+{
+ isFloatOperation_ = reader.readByte();
+}
+
+bool
+RDiv::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ if (!js::DivValues(cx, &lhs, &rhs, &result))
+ return false;
+
+ // MIRType::Float32 is a specialization embedding the fact that the result is
+ // rounded to a Float32.
+ if (isFloatOperation_ && !RoundFloat32(cx, result, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MMod::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Mod));
+ return true;
+}
+
+RMod::RMod(CompactBufferReader& reader)
+{ }
+
+bool
+RMod::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(!lhs.isObject() && !rhs.isObject());
+ if (!js::ModValues(cx, &lhs, &rhs, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MNot::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Not));
+ return true;
+}
+
+RNot::RNot(CompactBufferReader& reader)
+{ }
+
+bool
+RNot::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue v(cx, iter.read());
+ RootedValue result(cx);
+
+ result.setBoolean(!ToBoolean(v));
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MConcat::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Concat));
+ return true;
+}
+
+RConcat::RConcat(CompactBufferReader& reader)
+{}
+
+bool
+RConcat::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(!lhs.isObject() && !rhs.isObject());
+ if (!js::AddValues(cx, &lhs, &rhs, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+RStringLength::RStringLength(CompactBufferReader& reader)
+{}
+
+bool
+RStringLength::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue operand(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(!operand.isObject());
+ if (!js::GetLengthProperty(operand, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MStringLength::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_StringLength));
+ return true;
+}
+
+bool
+MArgumentsLength::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_ArgumentsLength));
+ return true;
+}
+
+RArgumentsLength::RArgumentsLength(CompactBufferReader& reader)
+{ }
+
+bool
+RArgumentsLength::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue result(cx);
+
+ result.setInt32(iter.readOuterNumActualArgs());
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MFloor::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Floor));
+ return true;
+}
+
+RFloor::RFloor(CompactBufferReader& reader)
+{ }
+
+bool RFloor::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue v(cx, iter.read());
+ RootedValue result(cx);
+
+ if (!js::math_floor_handle(cx, v, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MCeil::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Ceil));
+ return true;
+}
+
+RCeil::RCeil(CompactBufferReader& reader)
+{ }
+
+
+bool
+RCeil::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue v(cx, iter.read());
+ RootedValue result(cx);
+
+ if (!js::math_ceil_handle(cx, v, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MRound::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Round));
+ return true;
+}
+
+RRound::RRound(CompactBufferReader& reader)
+{}
+
+bool
+RRound::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue arg(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(!arg.isObject());
+ if(!js::math_round_handle(cx, arg, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MCharCodeAt::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_CharCodeAt));
+ return true;
+}
+
+RCharCodeAt::RCharCodeAt(CompactBufferReader& reader)
+{}
+
+bool
+RCharCodeAt::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedString lhs(cx, iter.read().toString());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ if (!js::str_charCodeAt_impl(cx, lhs, rhs, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MFromCharCode::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_FromCharCode));
+ return true;
+}
+
+RFromCharCode::RFromCharCode(CompactBufferReader& reader)
+{}
+
+bool
+RFromCharCode::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue operand(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(!operand.isObject());
+ if (!js::str_fromCharCode_one_arg(cx, operand, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MPow::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Pow));
+ return true;
+}
+
+RPow::RPow(CompactBufferReader& reader)
+{ }
+
+bool
+RPow::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue base(cx, iter.read());
+ RootedValue power(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(base.isNumber() && power.isNumber());
+ if (!js::math_pow_handle(cx, base, power, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MPowHalf::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_PowHalf));
+ return true;
+}
+
+RPowHalf::RPowHalf(CompactBufferReader& reader)
+{ }
+
+bool
+RPowHalf::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue base(cx, iter.read());
+ RootedValue power(cx);
+ RootedValue result(cx);
+ power.setNumber(0.5);
+
+ MOZ_ASSERT(base.isNumber());
+ if (!js::math_pow_handle(cx, base, power, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MMinMax::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_MinMax));
+ writer.writeByte(isMax_);
+ return true;
+}
+
+RMinMax::RMinMax(CompactBufferReader& reader)
+{
+ isMax_ = reader.readByte();
+}
+
+bool
+RMinMax::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue a(cx, iter.read());
+ RootedValue b(cx, iter.read());
+ RootedValue result(cx);
+
+ if (!js::minmax_impl(cx, isMax_, a, b, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MAbs::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Abs));
+ return true;
+}
+
+RAbs::RAbs(CompactBufferReader& reader)
+{ }
+
+bool
+RAbs::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue v(cx, iter.read());
+ RootedValue result(cx);
+
+ if (!js::math_abs_handle(cx, v, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MSqrt::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Sqrt));
+ writer.writeByte(type() == MIRType::Float32);
+ return true;
+}
+
+RSqrt::RSqrt(CompactBufferReader& reader)
+{
+ isFloatOperation_ = reader.readByte();
+}
+
+bool
+RSqrt::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue num(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(num.isNumber());
+ if (!math_sqrt_handle(cx, num, &result))
+ return false;
+
+ // MIRType::Float32 is a specialization embedding the fact that the result is
+ // rounded to a Float32.
+ if (isFloatOperation_ && !RoundFloat32(cx, result, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MAtan2::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Atan2));
+ return true;
+}
+
+RAtan2::RAtan2(CompactBufferReader& reader)
+{ }
+
+bool
+RAtan2::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue y(cx, iter.read());
+ RootedValue x(cx, iter.read());
+ RootedValue result(cx);
+
+ if(!math_atan2_handle(cx, y, x, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MHypot::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Hypot));
+ writer.writeUnsigned(uint32_t(numOperands()));
+ return true;
+}
+
+RHypot::RHypot(CompactBufferReader& reader)
+ : numOperands_(reader.readUnsigned())
+{ }
+
+bool
+RHypot::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ JS::AutoValueVector vec(cx);
+
+ if (!vec.reserve(numOperands_))
+ return false;
+
+ for (uint32_t i = 0 ; i < numOperands_ ; ++i)
+ vec.infallibleAppend(iter.read());
+
+ RootedValue result(cx);
+
+ if(!js::math_hypot_handle(cx, vec, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MMathFunction::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ switch (function_) {
+ case Round:
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Round));
+ return true;
+ case Sin:
+ case Log:
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_MathFunction));
+ writer.writeByte(function_);
+ return true;
+ default:
+ MOZ_CRASH("Unknown math function.");
+ }
+}
+
+RMathFunction::RMathFunction(CompactBufferReader& reader)
+{
+ function_ = reader.readByte();
+}
+
+bool
+RMathFunction::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ switch (function_) {
+ case MMathFunction::Sin: {
+ RootedValue arg(cx, iter.read());
+ RootedValue result(cx);
+
+ if (!js::math_sin_handle(cx, arg, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+ }
+ case MMathFunction::Log: {
+ RootedValue arg(cx, iter.read());
+ RootedValue result(cx);
+
+ if (!js::math_log_handle(cx, arg, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+ }
+ default:
+ MOZ_CRASH("Unknown math function.");
+ }
+}
+
+bool
+MRandom::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(this->canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Random));
+ return true;
+}
+
+RRandom::RRandom(CompactBufferReader& reader)
+{}
+
+bool
+RRandom::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ iter.storeInstructionResult(DoubleValue(math_random_impl(cx)));
+ return true;
+}
+
+bool
+MStringSplit::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_StringSplit));
+ return true;
+}
+
+RStringSplit::RStringSplit(CompactBufferReader& reader)
+{}
+
+bool
+RStringSplit::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedString str(cx, iter.read().toString());
+ RootedString sep(cx, iter.read().toString());
+ RootedObjectGroup group(cx, iter.read().toObject().group());
+ RootedValue result(cx);
+
+ JSObject* res = str_split_string(cx, group, str, sep, INT32_MAX);
+ if (!res)
+ return false;
+
+ result.setObject(*res);
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MNaNToZero::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_NaNToZero));
+ return true;
+}
+
+RNaNToZero::RNaNToZero(CompactBufferReader& reader)
+{ }
+
+
+bool
+RNaNToZero::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue v(cx, iter.read());
+ RootedValue result(cx);
+ MOZ_ASSERT(v.isDouble() || v.isInt32());
+
+ // x ? x : 0.0
+ if (ToBoolean(v))
+ result = v;
+ else
+ result.setDouble(0.0);
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MRegExpMatcher::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_RegExpMatcher));
+ return true;
+}
+
+RRegExpMatcher::RRegExpMatcher(CompactBufferReader& reader)
+{}
+
+bool
+RRegExpMatcher::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedObject regexp(cx, &iter.read().toObject());
+ RootedString input(cx, iter.read().toString());
+ int32_t lastIndex = iter.read().toInt32();
+
+ RootedValue result(cx);
+ if (!RegExpMatcherRaw(cx, regexp, input, lastIndex, nullptr, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MRegExpSearcher::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_RegExpSearcher));
+ return true;
+}
+
+RRegExpSearcher::RRegExpSearcher(CompactBufferReader& reader)
+{}
+
+bool
+RRegExpSearcher::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedObject regexp(cx, &iter.read().toObject());
+ RootedString input(cx, iter.read().toString());
+ int32_t lastIndex = iter.read().toInt32();
+
+ int32_t result;
+ if (!RegExpSearcherRaw(cx, regexp, input, lastIndex, nullptr, &result))
+ return false;
+
+ RootedValue resultVal(cx);
+ resultVal.setInt32(result);
+ iter.storeInstructionResult(resultVal);
+ return true;
+}
+
+bool
+MRegExpTester::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_RegExpTester));
+ return true;
+}
+
+RRegExpTester::RRegExpTester(CompactBufferReader& reader)
+{ }
+
+bool
+RRegExpTester::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedString string(cx, iter.read().toString());
+ RootedObject regexp(cx, &iter.read().toObject());
+ int32_t lastIndex = iter.read().toInt32();
+ int32_t endIndex;
+
+ if (!js::RegExpTesterRaw(cx, regexp, string, lastIndex, &endIndex))
+ return false;
+
+ RootedValue result(cx);
+ result.setInt32(endIndex);
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MTypeOf::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_TypeOf));
+ return true;
+}
+
+RTypeOf::RTypeOf(CompactBufferReader& reader)
+{ }
+
+bool
+RTypeOf::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue v(cx, iter.read());
+
+ RootedValue result(cx, StringValue(TypeOfOperation(v, cx->runtime())));
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MToDouble::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_ToDouble));
+ return true;
+}
+
+RToDouble::RToDouble(CompactBufferReader& reader)
+{ }
+
+bool
+RToDouble::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue v(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(!v.isObject());
+ MOZ_ASSERT(!v.isSymbol());
+
+ double dbl;
+ if (!ToNumber(cx, v, &dbl))
+ return false;
+
+ result.setDouble(dbl);
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MToFloat32::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_ToFloat32));
+ return true;
+}
+
+RToFloat32::RToFloat32(CompactBufferReader& reader)
+{ }
+
+bool
+RToFloat32::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue v(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(!v.isObject());
+ if (!RoundFloat32(cx, v, &result))
+ return false;
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MTruncateToInt32::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_TruncateToInt32));
+ return true;
+}
+
+RTruncateToInt32::RTruncateToInt32(CompactBufferReader& reader)
+{ }
+
+bool
+RTruncateToInt32::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue value(cx, iter.read());
+ RootedValue result(cx);
+
+ int32_t trunc;
+ if (!JS::ToInt32(cx, value, &trunc))
+ return false;
+
+ result.setInt32(trunc);
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MNewObject::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_NewObject));
+ MOZ_ASSERT(Mode(uint8_t(mode_)) == mode_);
+ writer.writeByte(uint8_t(mode_));
+ return true;
+}
+
+RNewObject::RNewObject(CompactBufferReader& reader)
+{
+ mode_ = MNewObject::Mode(reader.readByte());
+}
+
+bool
+RNewObject::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedObject templateObject(cx, &iter.read().toObject());
+ RootedValue result(cx);
+ JSObject* resultObject = nullptr;
+
+ // See CodeGenerator::visitNewObjectVMCall
+ switch (mode_) {
+ case MNewObject::ObjectLiteral:
+ resultObject = NewObjectOperationWithTemplate(cx, templateObject);
+ break;
+ case MNewObject::ObjectCreate:
+ resultObject = ObjectCreateWithTemplate(cx, templateObject.as<PlainObject>());
+ break;
+ }
+
+ if (!resultObject)
+ return false;
+
+ result.setObject(*resultObject);
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MNewTypedArray::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_NewTypedArray));
+ return true;
+}
+
+RNewTypedArray::RNewTypedArray(CompactBufferReader& reader)
+{
+}
+
+bool
+RNewTypedArray::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedObject templateObject(cx, &iter.read().toObject());
+ RootedValue result(cx);
+
+ uint32_t length = templateObject.as<TypedArrayObject>()->length();
+ JSObject* resultObject = TypedArrayCreateWithTemplate(cx, templateObject, length);
+ if (!resultObject)
+ return false;
+
+ result.setObject(*resultObject);
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MNewArray::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_NewArray));
+ writer.writeUnsigned(length());
+ return true;
+}
+
+RNewArray::RNewArray(CompactBufferReader& reader)
+{
+ count_ = reader.readUnsigned();
+}
+
+bool
+RNewArray::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedObject templateObject(cx, &iter.read().toObject());
+ RootedValue result(cx);
+ RootedObjectGroup group(cx, templateObject->group());
+
+ JSObject* resultObject = NewFullyAllocatedArrayTryUseGroup(cx, group, count_);
+ if (!resultObject)
+ return false;
+
+ result.setObject(*resultObject);
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MNewDerivedTypedObject::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_NewDerivedTypedObject));
+ return true;
+}
+
+RNewDerivedTypedObject::RNewDerivedTypedObject(CompactBufferReader& reader)
+{ }
+
+bool
+RNewDerivedTypedObject::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ Rooted<TypeDescr*> descr(cx, &iter.read().toObject().as<TypeDescr>());
+ Rooted<TypedObject*> owner(cx, &iter.read().toObject().as<TypedObject>());
+ int32_t offset = iter.read().toInt32();
+
+ JSObject* obj = OutlineTypedObject::createDerived(cx, descr, owner, offset);
+ if (!obj)
+ return false;
+
+ RootedValue result(cx, ObjectValue(*obj));
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MCreateThisWithTemplate::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_CreateThisWithTemplate));
+ return true;
+}
+
+RCreateThisWithTemplate::RCreateThisWithTemplate(CompactBufferReader& reader)
+{
+}
+
+bool
+RCreateThisWithTemplate::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedObject templateObject(cx, &iter.read().toObject());
+
+ // See CodeGenerator::visitCreateThisWithTemplate
+ JSObject* resultObject = NewObjectOperationWithTemplate(cx, templateObject);
+ if (!resultObject)
+ return false;
+
+ RootedValue result(cx);
+ result.setObject(*resultObject);
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MLambda::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Lambda));
+ return true;
+}
+
+RLambda::RLambda(CompactBufferReader& reader)
+{
+}
+
+bool
+RLambda::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedObject scopeChain(cx, &iter.read().toObject());
+ RootedFunction fun(cx, &iter.read().toObject().as<JSFunction>());
+
+ JSObject* resultObject = js::Lambda(cx, fun, scopeChain);
+ if (!resultObject)
+ return false;
+
+ RootedValue result(cx);
+ result.setObject(*resultObject);
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MSimdBox::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_SimdBox));
+ static_assert(unsigned(SimdType::Count) < 0x100, "assuming SimdType fits in 8 bits");
+ writer.writeByte(uint8_t(simdType()));
+ return true;
+}
+
+RSimdBox::RSimdBox(CompactBufferReader& reader)
+{
+ type_ = reader.readByte();
+}
+
+bool
+RSimdBox::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ JSObject* resultObject = nullptr;
+ RValueAllocation a = iter.readAllocation();
+ MOZ_ASSERT(iter.allocationReadable(a));
+ MOZ_ASSERT_IF(a.mode() == RValueAllocation::ANY_FLOAT_REG, a.fpuReg().isSimd128());
+ const FloatRegisters::RegisterContent* raw = iter.floatAllocationPointer(a);
+ switch (SimdType(type_)) {
+ case SimdType::Bool8x16:
+ resultObject = js::CreateSimd<Bool8x16>(cx, (const Bool8x16::Elem*) raw);
+ break;
+ case SimdType::Int8x16:
+ resultObject = js::CreateSimd<Int8x16>(cx, (const Int8x16::Elem*) raw);
+ break;
+ case SimdType::Uint8x16:
+ resultObject = js::CreateSimd<Uint8x16>(cx, (const Uint8x16::Elem*) raw);
+ break;
+ case SimdType::Bool16x8:
+ resultObject = js::CreateSimd<Bool16x8>(cx, (const Bool16x8::Elem*) raw);
+ break;
+ case SimdType::Int16x8:
+ resultObject = js::CreateSimd<Int16x8>(cx, (const Int16x8::Elem*) raw);
+ break;
+ case SimdType::Uint16x8:
+ resultObject = js::CreateSimd<Uint16x8>(cx, (const Uint16x8::Elem*) raw);
+ break;
+ case SimdType::Bool32x4:
+ resultObject = js::CreateSimd<Bool32x4>(cx, (const Bool32x4::Elem*) raw);
+ break;
+ case SimdType::Int32x4:
+ resultObject = js::CreateSimd<Int32x4>(cx, (const Int32x4::Elem*) raw);
+ break;
+ case SimdType::Uint32x4:
+ resultObject = js::CreateSimd<Uint32x4>(cx, (const Uint32x4::Elem*) raw);
+ break;
+ case SimdType::Float32x4:
+ resultObject = js::CreateSimd<Float32x4>(cx, (const Float32x4::Elem*) raw);
+ break;
+ case SimdType::Float64x2:
+ MOZ_CRASH("NYI, RSimdBox of Float64x2");
+ break;
+ case SimdType::Bool64x2:
+ MOZ_CRASH("NYI, RSimdBox of Bool64x2");
+ break;
+ case SimdType::Count:
+ MOZ_CRASH("RSimdBox of Count is unreachable");
+ }
+
+ if (!resultObject)
+ return false;
+
+ RootedValue result(cx);
+ result.setObject(*resultObject);
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MObjectState::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_ObjectState));
+ writer.writeUnsigned(numSlots());
+ return true;
+}
+
+RObjectState::RObjectState(CompactBufferReader& reader)
+{
+ numSlots_ = reader.readUnsigned();
+}
+
+bool
+RObjectState::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedObject object(cx, &iter.read().toObject());
+ RootedValue val(cx);
+
+ if (object->is<UnboxedPlainObject>()) {
+ const UnboxedLayout& layout = object->as<UnboxedPlainObject>().layout();
+
+ RootedId id(cx);
+ RootedValue receiver(cx, ObjectValue(*object));
+ const UnboxedLayout::PropertyVector& properties = layout.properties();
+ for (size_t i = 0; i < properties.length(); i++) {
+ val = iter.read();
+
+ // This is the default placeholder value of MObjectState, when no
+ // properties are defined yet.
+ if (val.isUndefined())
+ continue;
+
+ id = NameToId(properties[i].name);
+ ObjectOpResult result;
+
+ // SetProperty can only fail due to OOM.
+ if (!SetProperty(cx, object, id, val, receiver, result))
+ return false;
+ if (!result)
+ return result.reportError(cx, object, id);
+ }
+ } else {
+ RootedNativeObject nativeObject(cx, &object->as<NativeObject>());
+ MOZ_ASSERT(nativeObject->slotSpan() == numSlots());
+
+ for (size_t i = 0; i < numSlots(); i++) {
+ val = iter.read();
+ nativeObject->setSlot(i, val);
+ }
+ }
+
+ val.setObject(*object);
+ iter.storeInstructionResult(val);
+ return true;
+}
+
+bool
+MArrayState::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_ArrayState));
+ writer.writeUnsigned(numElements());
+ return true;
+}
+
+RArrayState::RArrayState(CompactBufferReader& reader)
+{
+ numElements_ = reader.readUnsigned();
+}
+
+bool
+RArrayState::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue result(cx);
+ ArrayObject* object = &iter.read().toObject().as<ArrayObject>();
+ uint32_t initLength = iter.read().toInt32();
+
+ object->setDenseInitializedLength(initLength);
+ for (size_t index = 0; index < numElements(); index++) {
+ Value val = iter.read();
+
+ if (index >= initLength) {
+ MOZ_ASSERT(val.isUndefined());
+ continue;
+ }
+
+ object->initDenseElement(index, val);
+ }
+
+ result.setObject(*object);
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MAssertRecoveredOnBailout::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ MOZ_RELEASE_ASSERT(input()->isRecoveredOnBailout() == mustBeRecovered_,
+ "assertRecoveredOnBailout failed during compilation");
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_AssertRecoveredOnBailout));
+ return true;
+}
+
+RAssertRecoveredOnBailout::RAssertRecoveredOnBailout(CompactBufferReader& reader)
+{ }
+
+bool RAssertRecoveredOnBailout::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue result(cx);
+ iter.read(); // skip the unused operand.
+ result.setUndefined();
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool
+MStringReplace::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_StringReplace));
+ writer.writeByte(isFlatReplacement_);
+ return true;
+}
+
+RStringReplace::RStringReplace(CompactBufferReader& reader)
+{
+ isFlatReplacement_ = reader.readByte();
+}
+
+bool RStringReplace::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedString string(cx, iter.read().toString());
+ RootedString pattern(cx, iter.read().toString());
+ RootedString replace(cx, iter.read().toString());
+
+ JSString* result = isFlatReplacement_ ? js::str_flat_replace_string(cx, string, pattern, replace) :
+ js::str_replace_string_raw(cx, string, pattern, replace);
+
+ if (!result)
+ return false;
+
+ iter.storeInstructionResult(StringValue(result));
+ return true;
+}
+
+bool
+MAtomicIsLockFree::writeRecoverData(CompactBufferWriter& writer) const
+{
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_AtomicIsLockFree));
+ return true;
+}
+
+RAtomicIsLockFree::RAtomicIsLockFree(CompactBufferReader& reader)
+{ }
+
+bool
+RAtomicIsLockFree::recover(JSContext* cx, SnapshotIterator& iter) const
+{
+ RootedValue operand(cx, iter.read());
+ MOZ_ASSERT(operand.isInt32());
+
+ int32_t result;
+ if (!js::AtomicIsLockFree(cx, operand, &result))
+ return false;
+
+ RootedValue rootedResult(cx, js::Int32Value(result));
+ iter.storeInstructionResult(rootedResult);
+ return true;
+}
diff --git a/js/src/jit/Recover.h b/js/src/jit/Recover.h
new file mode 100644
index 000000000..0a70c1acc
--- /dev/null
+++ b/js/src/jit/Recover.h
@@ -0,0 +1,692 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Recover_h
+#define jit_Recover_h
+
+#include "mozilla/Attributes.h"
+
+#include "jsarray.h"
+
+#include "jit/MIR.h"
+#include "jit/Snapshots.h"
+
+struct JSContext;
+
+namespace js {
+namespace jit {
+
+// This file contains all recover instructions.
+//
+// A recover instruction is an equivalent of a MIR instruction which is executed
+// before the reconstruction of a baseline frame. Recover instructions are used
+// by resume points to fill the value which are not produced by the code
+// compiled by IonMonkey. For example, if a value is optimized away by
+// IonMonkey, but required by Baseline, then we should have a recover
+// instruction to fill the missing baseline frame slot.
+//
+// Recover instructions are executed either during a bailout, or under a call
+// when the stack frame is introspected. If the stack is introspected, then any
+// use of recover instruction must lead to an invalidation of the code.
+//
+// For each MIR instruction where |canRecoverOnBailout| might return true, we
+// have a RInstruction of the same name.
+//
+// Recover instructions are encoded by the code generator into a compact buffer
+// (RecoverWriter). The MIR instruction method |writeRecoverData| should write a
+// tag in the |CompactBufferWriter| which is used by
+// |RInstruction::readRecoverData| to dispatch to the right Recover
+// instruction. Then |writeRecoverData| writes any local fields which are
+// necessary for the execution of the |recover| method. These fields are decoded
+// by the Recover instruction constructor which has a |CompactBufferReader| as
+// argument. The constructor of the Recover instruction should follow the same
+// sequence as the |writeRecoverData| method of the MIR instruction.
+//
+// Recover instructions are decoded by the |SnapshotIterator| (RecoverReader),
+// which is given as argument of the |recover| methods, in order to read the
+// operands. The number of operands read should be the same as the result of
+// |numOperands|, which corresponds to the number of operands of the MIR
+// instruction. Operands should be decoded in the same order as the operands of
+// the MIR instruction.
+//
+// The result of the |recover| method should either be a failure, or a value
+// stored on the |SnapshotIterator|, by using the |storeInstructionResult|
+// method.
+
+#define RECOVER_OPCODE_LIST(_) \
+ _(ResumePoint) \
+ _(BitNot) \
+ _(BitAnd) \
+ _(BitOr) \
+ _(BitXor) \
+ _(Lsh) \
+ _(Rsh) \
+ _(Ursh) \
+ _(SignExtend) \
+ _(Add) \
+ _(Sub) \
+ _(Mul) \
+ _(Div) \
+ _(Mod) \
+ _(Not) \
+ _(Concat) \
+ _(StringLength) \
+ _(ArgumentsLength) \
+ _(Floor) \
+ _(Ceil) \
+ _(Round) \
+ _(CharCodeAt) \
+ _(FromCharCode) \
+ _(Pow) \
+ _(PowHalf) \
+ _(MinMax) \
+ _(Abs) \
+ _(Sqrt) \
+ _(Atan2) \
+ _(Hypot) \
+ _(MathFunction) \
+ _(Random) \
+ _(StringSplit) \
+ _(NaNToZero) \
+ _(RegExpMatcher) \
+ _(RegExpSearcher) \
+ _(RegExpTester) \
+ _(StringReplace) \
+ _(TypeOf) \
+ _(ToDouble) \
+ _(ToFloat32) \
+ _(TruncateToInt32) \
+ _(NewObject) \
+ _(NewTypedArray) \
+ _(NewArray) \
+ _(NewDerivedTypedObject) \
+ _(CreateThisWithTemplate) \
+ _(Lambda) \
+ _(SimdBox) \
+ _(ObjectState) \
+ _(ArrayState) \
+ _(AtomicIsLockFree) \
+ _(AssertRecoveredOnBailout)
+
+class RResumePoint;
+class SnapshotIterator;
+
+class RInstruction
+{
+ public:
+ enum Opcode
+ {
+# define DEFINE_OPCODES_(op) Recover_##op,
+ RECOVER_OPCODE_LIST(DEFINE_OPCODES_)
+# undef DEFINE_OPCODES_
+ Recover_Invalid
+ };
+
+ virtual Opcode opcode() const = 0;
+
+ // As opposed to the MIR, there is no need to add more methods as every
+ // other instruction is well abstracted under the "recover" method.
+ bool isResumePoint() const {
+ return opcode() == Recover_ResumePoint;
+ }
+ inline const RResumePoint* toResumePoint() const;
+
+ // Number of allocations which are encoded in the Snapshot for recovering
+ // the current instruction.
+ virtual uint32_t numOperands() const = 0;
+
+ // Function used to recover the value computed by this instruction. This
+ // function reads its arguments from the allocations listed on the snapshot
+ // iterator and stores its returned value on the snapshot iterator too.
+ virtual MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const = 0;
+
+ // Decode an RInstruction on top of the reserved storage space, based on the
+ // tag written by the writeRecoverData function of the corresponding MIR
+ // instruction.
+ static void readRecoverData(CompactBufferReader& reader, RInstructionStorage* raw);
+};
+
+#define RINSTRUCTION_HEADER_(op) \
+ private: \
+ friend class RInstruction; \
+ explicit R##op(CompactBufferReader& reader); \
+ \
+ public: \
+ Opcode opcode() const { \
+ return RInstruction::Recover_##op; \
+ }
+
+#define RINSTRUCTION_HEADER_NUM_OP_MAIN(op, numOp) \
+ RINSTRUCTION_HEADER_(op) \
+ virtual uint32_t numOperands() const { \
+ return numOp; \
+ }
+
+#ifdef DEBUG
+# define RINSTRUCTION_HEADER_NUM_OP_(op, numOp) \
+ RINSTRUCTION_HEADER_NUM_OP_MAIN(op, numOp) \
+ static_assert(M##op::staticNumOperands == numOp, "The recover instructions's numOperands should equal to the MIR's numOperands");
+#else
+# define RINSTRUCTION_HEADER_NUM_OP_(op, numOp) \
+ RINSTRUCTION_HEADER_NUM_OP_MAIN(op, numOp)
+#endif
+
+class RResumePoint final : public RInstruction
+{
+ private:
+ uint32_t pcOffset_; // Offset from script->code.
+ uint32_t numOperands_; // Number of slots.
+
+ public:
+ RINSTRUCTION_HEADER_(ResumePoint)
+
+ uint32_t pcOffset() const {
+ return pcOffset_;
+ }
+ virtual uint32_t numOperands() const {
+ return numOperands_;
+ }
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RBitNot final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BitNot, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RBitAnd final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BitAnd, 2)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RBitOr final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BitOr, 2)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RBitXor final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BitXor, 2)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RLsh final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Lsh, 2)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RRsh final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Rsh, 2)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RUrsh final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Ursh, 2)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RSignExtend final : public RInstruction
+{
+ private:
+ uint8_t mode_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(SignExtend, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RAdd final : public RInstruction
+{
+ private:
+ bool isFloatOperation_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Add, 2)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RSub final : public RInstruction
+{
+ private:
+ bool isFloatOperation_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Sub, 2)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RMul final : public RInstruction
+{
+ private:
+ bool isFloatOperation_;
+ uint8_t mode_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Mul, 2)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RDiv final : public RInstruction
+{
+ private:
+ bool isFloatOperation_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Div, 2)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RMod final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Mod, 2)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RNot final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Not, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RConcat final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Concat, 2)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RStringLength final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(StringLength, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RArgumentsLength final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(ArgumentsLength, 0)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+
+class RFloor final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Floor, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RCeil final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Ceil, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RRound final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Round, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RCharCodeAt final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(CharCodeAt, 2)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RFromCharCode final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(FromCharCode, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RPow final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Pow, 2)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RPowHalf final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(PowHalf, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RMinMax final : public RInstruction
+{
+ private:
+ bool isMax_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(MinMax, 2)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RAbs final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Abs, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RSqrt final : public RInstruction
+{
+ private:
+ bool isFloatOperation_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Sqrt, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RAtan2 final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Atan2, 2)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RHypot final : public RInstruction
+{
+ private:
+ uint32_t numOperands_;
+
+ public:
+ RINSTRUCTION_HEADER_(Hypot)
+
+ virtual uint32_t numOperands() const {
+ return numOperands_;
+ }
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RMathFunction final : public RInstruction
+{
+ private:
+ uint8_t function_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(MathFunction, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RRandom final : public RInstruction
+{
+ RINSTRUCTION_HEADER_NUM_OP_(Random, 0)
+ public:
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RStringSplit final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(StringSplit, 3)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RNaNToZero final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(NaNToZero, 1);
+
+ bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RRegExpMatcher final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(RegExpMatcher, 3)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RRegExpSearcher final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(RegExpSearcher, 3)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RRegExpTester final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(RegExpTester, 3)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RStringReplace final : public RInstruction
+{
+ private:
+ bool isFlatReplacement_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(StringReplace, 3)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RTypeOf final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(TypeOf, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RToDouble final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(ToDouble, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RToFloat32 final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(ToFloat32, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RTruncateToInt32 final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(TruncateToInt32, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RNewObject final : public RInstruction
+{
+ private:
+ MNewObject::Mode mode_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(NewObject, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RNewTypedArray final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(NewTypedArray, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RNewArray final : public RInstruction
+{
+ private:
+ uint32_t count_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(NewArray, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RNewDerivedTypedObject final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(NewDerivedTypedObject, 3)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RCreateThisWithTemplate final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(CreateThisWithTemplate, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RLambda final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Lambda, 2)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RSimdBox final : public RInstruction
+{
+ private:
+ uint8_t type_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(SimdBox, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RObjectState final : public RInstruction
+{
+ private:
+ uint32_t numSlots_; // Number of slots.
+
+ public:
+ RINSTRUCTION_HEADER_(ObjectState)
+
+ uint32_t numSlots() const {
+ return numSlots_;
+ }
+ virtual uint32_t numOperands() const {
+ // +1 for the object.
+ return numSlots() + 1;
+ }
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RArrayState final : public RInstruction
+{
+ private:
+ uint32_t numElements_;
+
+ public:
+ RINSTRUCTION_HEADER_(ArrayState)
+
+ uint32_t numElements() const {
+ return numElements_;
+ }
+ virtual uint32_t numOperands() const {
+ // +1 for the array.
+ // +1 for the initalized length.
+ return numElements() + 2;
+ }
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RAtomicIsLockFree final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(AtomicIsLockFree, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+class RAssertRecoveredOnBailout final : public RInstruction
+{
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(AssertRecoveredOnBailout, 1)
+
+ MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const;
+};
+
+#undef RINSTRUCTION_HEADER_
+#undef RINSTRUCTION_HEADER_NUM_OP_
+#undef RINSTRUCTION_HEADER_NUM_OP_MAIN
+
+const RResumePoint*
+RInstruction::toResumePoint() const
+{
+ MOZ_ASSERT(isResumePoint());
+ return static_cast<const RResumePoint*>(this);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Recover_h */
diff --git a/js/src/jit/RegisterAllocator.cpp b/js/src/jit/RegisterAllocator.cpp
new file mode 100644
index 000000000..0ed1480fe
--- /dev/null
+++ b/js/src/jit/RegisterAllocator.cpp
@@ -0,0 +1,614 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/RegisterAllocator.h"
+
+using namespace js;
+using namespace js::jit;
+
+bool
+AllocationIntegrityState::record()
+{
+ // Ignore repeated record() calls.
+ if (!instructions.empty())
+ return true;
+
+ if (!instructions.appendN(InstructionInfo(), graph.numInstructions()))
+ return false;
+
+ if (!virtualRegisters.appendN((LDefinition*)nullptr, graph.numVirtualRegisters()))
+ return false;
+
+ if (!blocks.reserve(graph.numBlocks()))
+ return false;
+ for (size_t i = 0; i < graph.numBlocks(); i++) {
+ blocks.infallibleAppend(BlockInfo());
+ LBlock* block = graph.getBlock(i);
+ MOZ_ASSERT(block->mir()->id() == i);
+
+ BlockInfo& blockInfo = blocks[i];
+ if (!blockInfo.phis.reserve(block->numPhis()))
+ return false;
+
+ for (size_t j = 0; j < block->numPhis(); j++) {
+ blockInfo.phis.infallibleAppend(InstructionInfo());
+ InstructionInfo& info = blockInfo.phis[j];
+ LPhi* phi = block->getPhi(j);
+ MOZ_ASSERT(phi->numDefs() == 1);
+ uint32_t vreg = phi->getDef(0)->virtualRegister();
+ virtualRegisters[vreg] = phi->getDef(0);
+ if (!info.outputs.append(*phi->getDef(0)))
+ return false;
+ for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
+ if (!info.inputs.append(*phi->getOperand(k)))
+ return false;
+ }
+ }
+
+ for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
+ LInstruction* ins = *iter;
+ InstructionInfo& info = instructions[ins->id()];
+
+ for (size_t k = 0; k < ins->numTemps(); k++) {
+ if (!ins->getTemp(k)->isBogusTemp()) {
+ uint32_t vreg = ins->getTemp(k)->virtualRegister();
+ virtualRegisters[vreg] = ins->getTemp(k);
+ }
+ if (!info.temps.append(*ins->getTemp(k)))
+ return false;
+ }
+ for (size_t k = 0; k < ins->numDefs(); k++) {
+ if (!ins->getDef(k)->isBogusTemp()) {
+ uint32_t vreg = ins->getDef(k)->virtualRegister();
+ virtualRegisters[vreg] = ins->getDef(k);
+ }
+ if (!info.outputs.append(*ins->getDef(k)))
+ return false;
+ }
+ for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
+ if (!info.inputs.append(**alloc))
+ return false;
+ }
+ }
+ }
+
+ return seen.init();
+}
+
+bool
+AllocationIntegrityState::check(bool populateSafepoints)
+{
+ MOZ_ASSERT(!instructions.empty());
+
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_RegAlloc))
+ dump();
+#endif
+#ifdef DEBUG
+ for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
+ LBlock* block = graph.getBlock(blockIndex);
+
+ // Check that all instruction inputs and outputs have been assigned an allocation.
+ for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
+ LInstruction* ins = *iter;
+
+ for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next())
+ MOZ_ASSERT(!alloc->isUse());
+
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ LDefinition* def = ins->getDef(i);
+ MOZ_ASSERT(!def->output()->isUse());
+
+ LDefinition oldDef = instructions[ins->id()].outputs[i];
+ MOZ_ASSERT_IF(oldDef.policy() == LDefinition::MUST_REUSE_INPUT,
+ *def->output() == *ins->getOperand(oldDef.getReusedInput()));
+ }
+
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ LDefinition* temp = ins->getTemp(i);
+ MOZ_ASSERT_IF(!temp->isBogusTemp(), temp->output()->isRegister());
+
+ LDefinition oldTemp = instructions[ins->id()].temps[i];
+ MOZ_ASSERT_IF(oldTemp.policy() == LDefinition::MUST_REUSE_INPUT,
+ *temp->output() == *ins->getOperand(oldTemp.getReusedInput()));
+ }
+ }
+ }
+#endif
+
+ // Check that the register assignment and move groups preserve the original
+ // semantics of the virtual registers. Each virtual register has a single
+ // write (owing to the SSA representation), but the allocation may move the
+ // written value around between registers and memory locations along
+ // different paths through the script.
+ //
+ // For each use of an allocation, follow the physical value which is read
+ // backward through the script, along all paths to the value's virtual
+ // register's definition.
+ for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
+ LBlock* block = graph.getBlock(blockIndex);
+ for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
+ LInstruction* ins = *iter;
+ const InstructionInfo& info = instructions[ins->id()];
+
+ LSafepoint* safepoint = ins->safepoint();
+ if (safepoint) {
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ if (ins->getTemp(i)->isBogusTemp())
+ continue;
+ uint32_t vreg = info.temps[i].virtualRegister();
+ LAllocation* alloc = ins->getTemp(i)->output();
+ if (!checkSafepointAllocation(ins, vreg, *alloc, populateSafepoints))
+ return false;
+ }
+ MOZ_ASSERT_IF(ins->isCall() && !populateSafepoints,
+ safepoint->liveRegs().emptyFloat() &&
+ safepoint->liveRegs().emptyGeneral());
+ }
+
+ size_t inputIndex = 0;
+ for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
+ LAllocation oldInput = info.inputs[inputIndex++];
+ if (!oldInput.isUse())
+ continue;
+
+ uint32_t vreg = oldInput.toUse()->virtualRegister();
+
+ if (safepoint && !oldInput.toUse()->usedAtStart()) {
+ if (!checkSafepointAllocation(ins, vreg, **alloc, populateSafepoints))
+ return false;
+ }
+
+ // Start checking at the previous instruction, in case this
+ // instruction reuses its input register for an output.
+ LInstructionReverseIterator riter = block->rbegin(ins);
+ riter++;
+ if (!checkIntegrity(block, *riter, vreg, **alloc, populateSafepoints))
+ return false;
+
+ while (!worklist.empty()) {
+ IntegrityItem item = worklist.popCopy();
+ if (!checkIntegrity(item.block, *item.block->rbegin(), item.vreg, item.alloc,
+ populateSafepoints)) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+bool
+AllocationIntegrityState::checkIntegrity(LBlock* block, LInstruction* ins,
+ uint32_t vreg, LAllocation alloc, bool populateSafepoints)
+{
+ for (LInstructionReverseIterator iter(block->rbegin(ins)); iter != block->rend(); iter++) {
+ ins = *iter;
+
+ // Follow values through assignments in move groups. All assignments in
+ // a move group are considered to happen simultaneously, so stop after
+ // the first matching move is found.
+ if (ins->isMoveGroup()) {
+ LMoveGroup* group = ins->toMoveGroup();
+ for (int i = group->numMoves() - 1; i >= 0; i--) {
+ if (group->getMove(i).to() == alloc) {
+ alloc = group->getMove(i).from();
+ break;
+ }
+ }
+ }
+
+ const InstructionInfo& info = instructions[ins->id()];
+
+ // Make sure the physical location being tracked is not clobbered by
+ // another instruction, and that if the originating vreg definition is
+ // found that it is writing to the tracked location.
+
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ LDefinition* def = ins->getDef(i);
+ if (def->isBogusTemp())
+ continue;
+ if (info.outputs[i].virtualRegister() == vreg) {
+ MOZ_ASSERT(*def->output() == alloc);
+
+ // Found the original definition, done scanning.
+ return true;
+ } else {
+ MOZ_ASSERT(*def->output() != alloc);
+ }
+ }
+
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ LDefinition* temp = ins->getTemp(i);
+ if (!temp->isBogusTemp())
+ MOZ_ASSERT(*temp->output() != alloc);
+ }
+
+ if (ins->safepoint()) {
+ if (!checkSafepointAllocation(ins, vreg, alloc, populateSafepoints))
+ return false;
+ }
+ }
+
+ // Phis are effectless, but change the vreg we are tracking. Check if there
+ // is one which produced this vreg. We need to follow back through the phi
+ // inputs as it is not guaranteed the register allocator filled in physical
+ // allocations for the inputs and outputs of the phis.
+ for (size_t i = 0; i < block->numPhis(); i++) {
+ const InstructionInfo& info = blocks[block->mir()->id()].phis[i];
+ LPhi* phi = block->getPhi(i);
+ if (info.outputs[0].virtualRegister() == vreg) {
+ for (size_t j = 0, jend = phi->numOperands(); j < jend; j++) {
+ uint32_t newvreg = info.inputs[j].toUse()->virtualRegister();
+ LBlock* predecessor = block->mir()->getPredecessor(j)->lir();
+ if (!addPredecessor(predecessor, newvreg, alloc))
+ return false;
+ }
+ return true;
+ }
+ }
+
+ // No phi which defined the vreg we are tracking, follow back through all
+ // predecessors with the existing vreg.
+ for (size_t i = 0, iend = block->mir()->numPredecessors(); i < iend; i++) {
+ LBlock* predecessor = block->mir()->getPredecessor(i)->lir();
+ if (!addPredecessor(predecessor, vreg, alloc))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+AllocationIntegrityState::checkSafepointAllocation(LInstruction* ins,
+ uint32_t vreg, LAllocation alloc,
+ bool populateSafepoints)
+{
+ LSafepoint* safepoint = ins->safepoint();
+ MOZ_ASSERT(safepoint);
+
+ if (ins->isCall() && alloc.isRegister())
+ return true;
+
+ if (alloc.isRegister()) {
+ AnyRegister reg = alloc.toRegister();
+ if (populateSafepoints)
+ safepoint->addLiveRegister(reg);
+
+ MOZ_ASSERT(safepoint->liveRegs().has(reg));
+ }
+
+ // The |this| argument slot is implicitly included in all safepoints.
+ if (alloc.isArgument() && alloc.toArgument()->index() < THIS_FRAME_ARGSLOT + sizeof(Value))
+ return true;
+
+ LDefinition::Type type = virtualRegisters[vreg]
+ ? virtualRegisters[vreg]->type()
+ : LDefinition::GENERAL;
+
+ switch (type) {
+ case LDefinition::OBJECT:
+ if (populateSafepoints) {
+ JitSpew(JitSpew_RegAlloc, "Safepoint object v%u i%u %s",
+ vreg, ins->id(), alloc.toString().get());
+ if (!safepoint->addGcPointer(alloc))
+ return false;
+ }
+ MOZ_ASSERT(safepoint->hasGcPointer(alloc));
+ break;
+ case LDefinition::SLOTS:
+ if (populateSafepoints) {
+ JitSpew(JitSpew_RegAlloc, "Safepoint slots v%u i%u %s",
+ vreg, ins->id(), alloc.toString().get());
+ if (!safepoint->addSlotsOrElementsPointer(alloc))
+ return false;
+ }
+ MOZ_ASSERT(safepoint->hasSlotsOrElementsPointer(alloc));
+ break;
+#ifdef JS_NUNBOX32
+ // Do not assert that safepoint information for nunbox types is complete,
+ // as if a vreg for a value's components are copied in multiple places
+ // then the safepoint information may not reflect all copies. All copies
+ // of payloads must be reflected, however, for generational GC.
+ case LDefinition::TYPE:
+ if (populateSafepoints) {
+ JitSpew(JitSpew_RegAlloc, "Safepoint type v%u i%u %s",
+ vreg, ins->id(), alloc.toString().get());
+ if (!safepoint->addNunboxType(vreg, alloc))
+ return false;
+ }
+ break;
+ case LDefinition::PAYLOAD:
+ if (populateSafepoints) {
+ JitSpew(JitSpew_RegAlloc, "Safepoint payload v%u i%u %s",
+ vreg, ins->id(), alloc.toString().get());
+ if (!safepoint->addNunboxPayload(vreg, alloc))
+ return false;
+ }
+ MOZ_ASSERT(safepoint->hasNunboxPayload(alloc));
+ break;
+#else
+ case LDefinition::BOX:
+ if (populateSafepoints) {
+ JitSpew(JitSpew_RegAlloc, "Safepoint boxed value v%u i%u %s",
+ vreg, ins->id(), alloc.toString().get());
+ if (!safepoint->addBoxedValue(alloc))
+ return false;
+ }
+ MOZ_ASSERT(safepoint->hasBoxedValue(alloc));
+ break;
+#endif
+ default:
+ break;
+ }
+
+ return true;
+}
+
+bool
+AllocationIntegrityState::addPredecessor(LBlock* block, uint32_t vreg, LAllocation alloc)
+{
+ // There is no need to reanalyze if we have already seen this predecessor.
+ // We share the seen allocations across analysis of each use, as there will
+ // likely be common ground between different uses of the same vreg.
+ IntegrityItem item;
+ item.block = block;
+ item.vreg = vreg;
+ item.alloc = alloc;
+ item.index = seen.count();
+
+ IntegrityItemSet::AddPtr p = seen.lookupForAdd(item);
+ if (p)
+ return true;
+ if (!seen.add(p, item))
+ return false;
+
+ return worklist.append(item);
+}
+
+void
+AllocationIntegrityState::dump()
+{
+#ifdef DEBUG
+ fprintf(stderr, "Register Allocation Integrity State:\n");
+
+ for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
+ LBlock* block = graph.getBlock(blockIndex);
+ MBasicBlock* mir = block->mir();
+
+ fprintf(stderr, "\nBlock %lu", static_cast<unsigned long>(blockIndex));
+ for (size_t i = 0; i < mir->numSuccessors(); i++)
+ fprintf(stderr, " [successor %u]", mir->getSuccessor(i)->id());
+ fprintf(stderr, "\n");
+
+ for (size_t i = 0; i < block->numPhis(); i++) {
+ const InstructionInfo& info = blocks[blockIndex].phis[i];
+ LPhi* phi = block->getPhi(i);
+ CodePosition input(block->getPhi(0)->id(), CodePosition::INPUT);
+ CodePosition output(block->getPhi(block->numPhis() - 1)->id(), CodePosition::OUTPUT);
+
+ fprintf(stderr, "[%u,%u Phi] [def %s] ",
+ input.bits(),
+ output.bits(),
+ phi->getDef(0)->toString().get());
+ for (size_t j = 0; j < phi->numOperands(); j++)
+ fprintf(stderr, " [use %s]", info.inputs[j].toString().get());
+ fprintf(stderr, "\n");
+ }
+
+ for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
+ LInstruction* ins = *iter;
+ const InstructionInfo& info = instructions[ins->id()];
+
+ CodePosition input(ins->id(), CodePosition::INPUT);
+ CodePosition output(ins->id(), CodePosition::OUTPUT);
+
+ fprintf(stderr, "[");
+ if (input != CodePosition::MIN)
+ fprintf(stderr, "%u,%u ", input.bits(), output.bits());
+ fprintf(stderr, "%s]", ins->opName());
+
+ if (ins->isMoveGroup()) {
+ LMoveGroup* group = ins->toMoveGroup();
+ for (int i = group->numMoves() - 1; i >= 0; i--) {
+ fprintf(stderr, " [%s -> %s]",
+ group->getMove(i).from().toString().get(),
+ group->getMove(i).to().toString().get());
+ }
+ fprintf(stderr, "\n");
+ continue;
+ }
+
+ for (size_t i = 0; i < ins->numDefs(); i++)
+ fprintf(stderr, " [def %s]", ins->getDef(i)->toString().get());
+
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ LDefinition* temp = ins->getTemp(i);
+ if (!temp->isBogusTemp())
+ fprintf(stderr, " [temp v%u %s]", info.temps[i].virtualRegister(),
+ temp->toString().get());
+ }
+
+ size_t index = 0;
+ for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
+ fprintf(stderr, " [use %s", info.inputs[index++].toString().get());
+ if (!alloc->isConstant())
+ fprintf(stderr, " %s", alloc->toString().get());
+ fprintf(stderr, "]");
+ }
+
+ fprintf(stderr, "\n");
+ }
+ }
+
+ // Print discovered allocations at the ends of blocks, in the order they
+ // were discovered.
+
+ Vector<IntegrityItem, 20, SystemAllocPolicy> seenOrdered;
+ if (!seenOrdered.appendN(IntegrityItem(), seen.count())) {
+ fprintf(stderr, "OOM while dumping allocations\n");
+ return;
+ }
+
+ for (IntegrityItemSet::Enum iter(seen); !iter.empty(); iter.popFront()) {
+ IntegrityItem item = iter.front();
+ seenOrdered[item.index] = item;
+ }
+
+ if (!seenOrdered.empty()) {
+ fprintf(stderr, "Intermediate Allocations:\n");
+
+ for (size_t i = 0; i < seenOrdered.length(); i++) {
+ IntegrityItem item = seenOrdered[i];
+ fprintf(stderr, " block %u reg v%u alloc %s\n",
+ item.block->mir()->id(), item.vreg, item.alloc.toString().get());
+ }
+ }
+
+ fprintf(stderr, "\n");
+#endif
+}
+
+const CodePosition CodePosition::MAX(UINT_MAX);
+const CodePosition CodePosition::MIN(0);
+
+bool
+RegisterAllocator::init()
+{
+ if (!insData.init(mir, graph.numInstructions()))
+ return false;
+
+ if (!entryPositions.reserve(graph.numBlocks()) || !exitPositions.reserve(graph.numBlocks()))
+ return false;
+
+ for (size_t i = 0; i < graph.numBlocks(); i++) {
+ LBlock* block = graph.getBlock(i);
+ for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++)
+ insData[ins->id()] = *ins;
+ for (size_t j = 0; j < block->numPhis(); j++) {
+ LPhi* phi = block->getPhi(j);
+ insData[phi->id()] = phi;
+ }
+
+ CodePosition entry = block->numPhis() != 0
+ ? CodePosition(block->getPhi(0)->id(), CodePosition::INPUT)
+ : inputOf(block->firstInstructionWithId());
+ CodePosition exit = outputOf(block->lastInstructionWithId());
+
+ MOZ_ASSERT(block->mir()->id() == i);
+ entryPositions.infallibleAppend(entry);
+ exitPositions.infallibleAppend(exit);
+ }
+
+ return true;
+}
+
+LMoveGroup*
+RegisterAllocator::getInputMoveGroup(LInstruction* ins)
+{
+ MOZ_ASSERT(!ins->fixReuseMoves());
+ if (ins->inputMoves())
+ return ins->inputMoves();
+
+ LMoveGroup* moves = LMoveGroup::New(alloc());
+ ins->setInputMoves(moves);
+ ins->block()->insertBefore(ins, moves);
+ return moves;
+}
+
+LMoveGroup*
+RegisterAllocator::getFixReuseMoveGroup(LInstruction* ins)
+{
+ if (ins->fixReuseMoves())
+ return ins->fixReuseMoves();
+
+ LMoveGroup* moves = LMoveGroup::New(alloc());
+ ins->setFixReuseMoves(moves);
+ ins->block()->insertBefore(ins, moves);
+ return moves;
+}
+
+LMoveGroup*
+RegisterAllocator::getMoveGroupAfter(LInstruction* ins)
+{
+ if (ins->movesAfter())
+ return ins->movesAfter();
+
+ LMoveGroup* moves = LMoveGroup::New(alloc());
+ ins->setMovesAfter(moves);
+
+ ins->block()->insertAfter(ins, moves);
+ return moves;
+}
+
+void
+RegisterAllocator::dumpInstructions()
+{
+#ifdef JS_JITSPEW
+ fprintf(stderr, "Instructions:\n");
+
+ for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
+ LBlock* block = graph.getBlock(blockIndex);
+ MBasicBlock* mir = block->mir();
+
+ fprintf(stderr, "\nBlock %lu", static_cast<unsigned long>(blockIndex));
+ for (size_t i = 0; i < mir->numSuccessors(); i++)
+ fprintf(stderr, " [successor %u]", mir->getSuccessor(i)->id());
+ fprintf(stderr, "\n");
+
+ for (size_t i = 0; i < block->numPhis(); i++) {
+ LPhi* phi = block->getPhi(i);
+
+ fprintf(stderr, "[%u,%u Phi] [def %s]",
+ inputOf(phi).bits(),
+ outputOf(phi).bits(),
+ phi->getDef(0)->toString().get());
+ for (size_t j = 0; j < phi->numOperands(); j++)
+ fprintf(stderr, " [use %s]", phi->getOperand(j)->toString().get());
+ fprintf(stderr, "\n");
+ }
+
+ for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
+ LInstruction* ins = *iter;
+
+ fprintf(stderr, "[");
+ if (ins->id() != 0)
+ fprintf(stderr, "%u,%u ", inputOf(ins).bits(), outputOf(ins).bits());
+ fprintf(stderr, "%s]", ins->opName());
+
+ if (ins->isMoveGroup()) {
+ LMoveGroup* group = ins->toMoveGroup();
+ for (int i = group->numMoves() - 1; i >= 0; i--) {
+ // Use two printfs, as LAllocation::toString is not reentant.
+ fprintf(stderr, " [%s", group->getMove(i).from().toString().get());
+ fprintf(stderr, " -> %s]", group->getMove(i).to().toString().get());
+ }
+ fprintf(stderr, "\n");
+ continue;
+ }
+
+ for (size_t i = 0; i < ins->numDefs(); i++)
+ fprintf(stderr, " [def %s]", ins->getDef(i)->toString().get());
+
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ LDefinition* temp = ins->getTemp(i);
+ if (!temp->isBogusTemp())
+ fprintf(stderr, " [temp %s]", temp->toString().get());
+ }
+
+ for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
+ if (!alloc->isBogus())
+ fprintf(stderr, " [use %s]", alloc->toString().get());
+ }
+
+ fprintf(stderr, "\n");
+ }
+ }
+ fprintf(stderr, "\n");
+#endif // JS_JITSPEW
+}
diff --git a/js/src/jit/RegisterAllocator.h b/js/src/jit/RegisterAllocator.h
new file mode 100644
index 000000000..4ff16d523
--- /dev/null
+++ b/js/src/jit/RegisterAllocator.h
@@ -0,0 +1,375 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_RegisterAllocator_h
+#define jit_RegisterAllocator_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/LIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+
+// Generic structures and functions for use by register allocators.
+
+namespace js {
+namespace jit {
+
+class LIRGenerator;
+
+// Structure for running a liveness analysis on a finished register allocation.
+// This analysis can be used for two purposes:
+//
+// - Check the integrity of the allocation, i.e. that the reads and writes of
+// physical values preserve the semantics of the original virtual registers.
+//
+// - Populate safepoints with live registers, GC thing and value data, to
+// streamline the process of prototyping new allocators.
+struct AllocationIntegrityState
+{
+ explicit AllocationIntegrityState(LIRGraph& graph)
+ : graph(graph)
+ {}
+
+ // Record all virtual registers in the graph. This must be called before
+ // register allocation, to pick up the original LUses.
+ MOZ_MUST_USE bool record();
+
+ // Perform the liveness analysis on the graph, and assert on an invalid
+ // allocation. This must be called after register allocation, to pick up
+ // all assigned physical values. If populateSafepoints is specified,
+ // safepoints will be filled in with liveness information.
+ MOZ_MUST_USE bool check(bool populateSafepoints);
+
+ private:
+
+ LIRGraph& graph;
+
+ // For all instructions and phis in the graph, keep track of the virtual
+ // registers for all inputs and outputs of the nodes. These are overwritten
+ // in place during register allocation. This information is kept on the
+ // side rather than in the instructions and phis themselves to avoid
+ // debug-builds-only bloat in the size of the involved structures.
+
+ struct InstructionInfo {
+ Vector<LAllocation, 2, SystemAllocPolicy> inputs;
+ Vector<LDefinition, 0, SystemAllocPolicy> temps;
+ Vector<LDefinition, 1, SystemAllocPolicy> outputs;
+
+ InstructionInfo()
+ { }
+
+ InstructionInfo(const InstructionInfo& o)
+ {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!inputs.appendAll(o.inputs) ||
+ !temps.appendAll(o.temps) ||
+ !outputs.appendAll(o.outputs))
+ {
+ oomUnsafe.crash("InstructionInfo::InstructionInfo");
+ }
+ }
+ };
+ Vector<InstructionInfo, 0, SystemAllocPolicy> instructions;
+
+ struct BlockInfo {
+ Vector<InstructionInfo, 5, SystemAllocPolicy> phis;
+ BlockInfo() {}
+ BlockInfo(const BlockInfo& o) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!phis.appendAll(o.phis))
+ oomUnsafe.crash("BlockInfo::BlockInfo");
+ }
+ };
+ Vector<BlockInfo, 0, SystemAllocPolicy> blocks;
+
+ Vector<LDefinition*, 20, SystemAllocPolicy> virtualRegisters;
+
+ // Describes a correspondence that should hold at the end of a block.
+ // The value which was written to vreg in the original LIR should be
+ // physically stored in alloc after the register allocation.
+ struct IntegrityItem
+ {
+ LBlock* block;
+ uint32_t vreg;
+ LAllocation alloc;
+
+ // Order of insertion into seen, for sorting.
+ uint32_t index;
+
+ typedef IntegrityItem Lookup;
+ static HashNumber hash(const IntegrityItem& item) {
+ HashNumber hash = item.alloc.hash();
+ hash = mozilla::RotateLeft(hash, 4) ^ item.vreg;
+ hash = mozilla::RotateLeft(hash, 4) ^ HashNumber(item.block->mir()->id());
+ return hash;
+ }
+ static bool match(const IntegrityItem& one, const IntegrityItem& two) {
+ return one.block == two.block
+ && one.vreg == two.vreg
+ && one.alloc == two.alloc;
+ }
+ };
+
+ // Items still to be processed.
+ Vector<IntegrityItem, 10, SystemAllocPolicy> worklist;
+
+ // Set of all items that have already been processed.
+ typedef HashSet<IntegrityItem, IntegrityItem, SystemAllocPolicy> IntegrityItemSet;
+ IntegrityItemSet seen;
+
+ MOZ_MUST_USE bool checkIntegrity(LBlock* block, LInstruction* ins, uint32_t vreg,
+ LAllocation alloc, bool populateSafepoints);
+ MOZ_MUST_USE bool checkSafepointAllocation(LInstruction* ins, uint32_t vreg, LAllocation alloc,
+ bool populateSafepoints);
+ MOZ_MUST_USE bool addPredecessor(LBlock* block, uint32_t vreg, LAllocation alloc);
+
+ void dump();
+};
+
+// Represents with better-than-instruction precision a position in the
+// instruction stream.
+//
+// An issue comes up when performing register allocation as to how to represent
+// information such as "this register is only needed for the input of
+// this instruction, it can be clobbered in the output". Just having ranges
+// of instruction IDs is insufficiently expressive to denote all possibilities.
+// This class solves this issue by associating an extra bit with the instruction
+// ID which indicates whether the position is the input half or output half of
+// an instruction.
+class CodePosition
+{
+ private:
+ constexpr explicit CodePosition(uint32_t bits)
+ : bits_(bits)
+ { }
+
+ static const unsigned int INSTRUCTION_SHIFT = 1;
+ static const unsigned int SUBPOSITION_MASK = 1;
+ uint32_t bits_;
+
+ public:
+ static const CodePosition MAX;
+ static const CodePosition MIN;
+
+ // This is the half of the instruction this code position represents, as
+ // described in the huge comment above.
+ enum SubPosition {
+ INPUT,
+ OUTPUT
+ };
+
+ constexpr CodePosition() : bits_(0)
+ { }
+
+ CodePosition(uint32_t instruction, SubPosition where) {
+ MOZ_ASSERT(instruction < 0x80000000u);
+ MOZ_ASSERT(((uint32_t)where & SUBPOSITION_MASK) == (uint32_t)where);
+ bits_ = (instruction << INSTRUCTION_SHIFT) | (uint32_t)where;
+ }
+
+ uint32_t ins() const {
+ return bits_ >> INSTRUCTION_SHIFT;
+ }
+
+ uint32_t bits() const {
+ return bits_;
+ }
+
+ SubPosition subpos() const {
+ return (SubPosition)(bits_ & SUBPOSITION_MASK);
+ }
+
+ bool operator <(CodePosition other) const {
+ return bits_ < other.bits_;
+ }
+
+ bool operator <=(CodePosition other) const {
+ return bits_ <= other.bits_;
+ }
+
+ bool operator !=(CodePosition other) const {
+ return bits_ != other.bits_;
+ }
+
+ bool operator ==(CodePosition other) const {
+ return bits_ == other.bits_;
+ }
+
+ bool operator >(CodePosition other) const {
+ return bits_ > other.bits_;
+ }
+
+ bool operator >=(CodePosition other) const {
+ return bits_ >= other.bits_;
+ }
+
+ uint32_t operator -(CodePosition other) const {
+ MOZ_ASSERT(bits_ >= other.bits_);
+ return bits_ - other.bits_;
+ }
+
+ CodePosition previous() const {
+ MOZ_ASSERT(*this != MIN);
+ return CodePosition(bits_ - 1);
+ }
+ CodePosition next() const {
+ MOZ_ASSERT(*this != MAX);
+ return CodePosition(bits_ + 1);
+ }
+};
+
+// Structure to track all moves inserted next to instructions in a graph.
+class InstructionDataMap
+{
+ FixedList<LNode*> insData_;
+
+ public:
+ InstructionDataMap()
+ : insData_()
+ { }
+
+ MOZ_MUST_USE bool init(MIRGenerator* gen, uint32_t numInstructions) {
+ if (!insData_.init(gen->alloc(), numInstructions))
+ return false;
+ memset(&insData_[0], 0, sizeof(LNode*) * numInstructions);
+ return true;
+ }
+
+ LNode*& operator[](CodePosition pos) {
+ return operator[](pos.ins());
+ }
+ LNode* const& operator[](CodePosition pos) const {
+ return operator[](pos.ins());
+ }
+ LNode*& operator[](uint32_t ins) {
+ return insData_[ins];
+ }
+ LNode* const& operator[](uint32_t ins) const {
+ return insData_[ins];
+ }
+};
+
+// Common superclass for register allocators.
+class RegisterAllocator
+{
+ void operator=(const RegisterAllocator&) = delete;
+ RegisterAllocator(const RegisterAllocator&) = delete;
+
+ protected:
+ // Context
+ MIRGenerator* mir;
+ LIRGenerator* lir;
+ LIRGraph& graph;
+
+ // Pool of all registers that should be considered allocateable
+ AllocatableRegisterSet allRegisters_;
+
+ // Computed data
+ InstructionDataMap insData;
+ Vector<CodePosition, 12, SystemAllocPolicy> entryPositions;
+ Vector<CodePosition, 12, SystemAllocPolicy> exitPositions;
+
+ RegisterAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph)
+ : mir(mir),
+ lir(lir),
+ graph(graph),
+ allRegisters_(RegisterSet::All())
+ {
+ if (mir->compilingWasm()) {
+#if defined(JS_CODEGEN_X64)
+ allRegisters_.take(AnyRegister(HeapReg));
+#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ allRegisters_.take(AnyRegister(HeapReg));
+ allRegisters_.take(AnyRegister(GlobalReg));
+#elif defined(JS_CODEGEN_ARM64)
+ allRegisters_.take(AnyRegister(HeapReg));
+ allRegisters_.take(AnyRegister(HeapLenReg));
+ allRegisters_.take(AnyRegister(GlobalReg));
+#endif
+ } else {
+ if (FramePointer != InvalidReg && mir->instrumentedProfiling())
+ allRegisters_.take(AnyRegister(FramePointer));
+ }
+ }
+
+ MOZ_MUST_USE bool init();
+
+ TempAllocator& alloc() const {
+ return mir->alloc();
+ }
+
+ CodePosition outputOf(const LNode* ins) const {
+ return ins->isPhi()
+ ? outputOf(ins->toPhi())
+ : outputOf(ins->toInstruction());
+ }
+ CodePosition outputOf(const LPhi* ins) const {
+ // All phis in a block write their outputs after all of them have
+ // read their inputs. Consequently, it doesn't make sense to talk
+ // about code positions in the middle of a series of phis.
+ LBlock* block = ins->block();
+ return CodePosition(block->getPhi(block->numPhis() - 1)->id(), CodePosition::OUTPUT);
+ }
+ CodePosition outputOf(const LInstruction* ins) const {
+ return CodePosition(ins->id(), CodePosition::OUTPUT);
+ }
+ CodePosition inputOf(const LNode* ins) const {
+ return ins->isPhi()
+ ? inputOf(ins->toPhi())
+ : inputOf(ins->toInstruction());
+ }
+ CodePosition inputOf(const LPhi* ins) const {
+ // All phis in a block read their inputs before any of them write their
+ // outputs. Consequently, it doesn't make sense to talk about code
+ // positions in the middle of a series of phis.
+ return CodePosition(ins->block()->getPhi(0)->id(), CodePosition::INPUT);
+ }
+ CodePosition inputOf(const LInstruction* ins) const {
+ return CodePosition(ins->id(), CodePosition::INPUT);
+ }
+ CodePosition entryOf(const LBlock* block) {
+ return entryPositions[block->mir()->id()];
+ }
+ CodePosition exitOf(const LBlock* block) {
+ return exitPositions[block->mir()->id()];
+ }
+
+ LMoveGroup* getInputMoveGroup(LInstruction* ins);
+ LMoveGroup* getFixReuseMoveGroup(LInstruction* ins);
+ LMoveGroup* getMoveGroupAfter(LInstruction* ins);
+
+ CodePosition minimalDefEnd(LNode* ins) {
+ // Compute the shortest interval that captures vregs defined by ins.
+ // Watch for instructions that are followed by an OSI point.
+ // If moves are introduced between the instruction and the OSI point then
+ // safepoint information for the instruction may be incorrect.
+ while (true) {
+ LNode* next = insData[ins->id() + 1];
+ if (!next->isOsiPoint())
+ break;
+ ins = next;
+ }
+
+ return outputOf(ins);
+ }
+
+ void dumpInstructions();
+};
+
+static inline AnyRegister
+GetFixedRegister(const LDefinition* def, const LUse* use)
+{
+ return def->isFloatReg()
+ ? AnyRegister(FloatRegister::FromCode(use->registerCode()))
+ : AnyRegister(Register::FromCode(use->registerCode()));
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_RegisterAllocator_h */
diff --git a/js/src/jit/RegisterSets.h b/js/src/jit/RegisterSets.h
new file mode 100644
index 000000000..0a4045dd7
--- /dev/null
+++ b/js/src/jit/RegisterSets.h
@@ -0,0 +1,1333 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_RegisterSets_h
+#define jit_RegisterSets_h
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/JitAllocPolicy.h"
+#include "jit/Registers.h"
+
+namespace js {
+namespace jit {
+
+struct AnyRegister {
+ typedef uint32_t Code;
+
+ static const uint32_t Total = Registers::Total + FloatRegisters::Total;
+ static const uint32_t Invalid = UINT_MAX;
+
+ private:
+ Code code_;
+
+ public:
+ AnyRegister() = default;
+
+ explicit AnyRegister(Register gpr) {
+ code_ = gpr.code();
+ }
+ explicit AnyRegister(FloatRegister fpu) {
+ code_ = fpu.code() + Registers::Total;
+ }
+ static AnyRegister FromCode(uint32_t i) {
+ MOZ_ASSERT(i < Total);
+ AnyRegister r;
+ r.code_ = i;
+ return r;
+ }
+ bool isFloat() const {
+ return code_ >= Registers::Total;
+ }
+ Register gpr() const {
+ MOZ_ASSERT(!isFloat());
+ return Register::FromCode(code_);
+ }
+ FloatRegister fpu() const {
+ MOZ_ASSERT(isFloat());
+ return FloatRegister::FromCode(code_ - Registers::Total);
+ }
+ bool operator ==(AnyRegister other) const {
+ return code_ == other.code_;
+ }
+ bool operator !=(AnyRegister other) const {
+ return code_ != other.code_;
+ }
+ const char* name() const {
+ return isFloat() ? fpu().name() : gpr().name();
+ }
+ Code code() const {
+ return code_;
+ }
+ bool volatile_() const {
+ return isFloat() ? fpu().volatile_() : gpr().volatile_();
+ }
+ AnyRegister aliased(uint32_t aliasIdx) const {
+ AnyRegister ret;
+ if (isFloat()) {
+ FloatRegister fret;
+ fpu().aliased(aliasIdx, &fret);
+ ret = AnyRegister(fret);
+ } else {
+ Register gret;
+ gpr().aliased(aliasIdx, &gret);
+ ret = AnyRegister(gret);
+ }
+ MOZ_ASSERT_IF(aliasIdx == 0, ret == *this);
+ return ret;
+ }
+ uint32_t numAliased() const {
+ if (isFloat())
+ return fpu().numAliased();
+ return gpr().numAliased();
+ }
+ bool aliases(const AnyRegister& other) const {
+ if (isFloat() && other.isFloat())
+ return fpu().aliases(other.fpu());
+ if (!isFloat() && !other.isFloat())
+ return gpr().aliases(other.gpr());
+ return false;
+ }
+ // do the two registers hold the same type of data (e.g. both float32, both gpr)
+ bool isCompatibleReg (const AnyRegister other) const {
+ if (isFloat() && other.isFloat())
+ return fpu().equiv(other.fpu());
+ if (!isFloat() && !other.isFloat())
+ return true;
+ return false;
+ }
+
+};
+
+// Registers to hold a boxed value. Uses one register on 64 bit
+// platforms, two registers on 32 bit platforms.
+class ValueOperand
+{
+#if defined(JS_NUNBOX32)
+ Register type_;
+ Register payload_;
+
+ public:
+ constexpr ValueOperand(Register type, Register payload)
+ : type_(type), payload_(payload)
+ { }
+
+ Register typeReg() const {
+ return type_;
+ }
+ Register payloadReg() const {
+ return payload_;
+ }
+ bool aliases(Register reg) const {
+ return type_ == reg || payload_ == reg;
+ }
+ Register scratchReg() const {
+ return payloadReg();
+ }
+ bool operator==(const ValueOperand& o) const {
+ return type_ == o.type_ && payload_ == o.payload_;
+ }
+ bool operator!=(const ValueOperand& o) const {
+ return !(*this == o);
+ }
+
+#elif defined(JS_PUNBOX64)
+ Register value_;
+
+ public:
+ explicit constexpr ValueOperand(Register value)
+ : value_(value)
+ { }
+
+ Register valueReg() const {
+ return value_;
+ }
+ bool aliases(Register reg) const {
+ return value_ == reg;
+ }
+ Register scratchReg() const {
+ return valueReg();
+ }
+ bool operator==(const ValueOperand& o) const {
+ return value_ == o.value_;
+ }
+ bool operator!=(const ValueOperand& o) const {
+ return !(*this == o);
+ }
+#endif
+
+ ValueOperand() = default;
+};
+
+// Registers to hold either either a typed or untyped value.
+class TypedOrValueRegister
+{
+ // Type of value being stored.
+ MIRType type_;
+
+ union U {
+ AnyRegister typed;
+ ValueOperand value;
+ } data;
+
+ public:
+
+ TypedOrValueRegister() = default;
+
+ TypedOrValueRegister(MIRType type, AnyRegister reg)
+ : type_(type)
+ {
+ data.typed = reg;
+ }
+
+ MOZ_IMPLICIT TypedOrValueRegister(ValueOperand value)
+ : type_(MIRType::Value)
+ {
+ data.value = value;
+ }
+
+ MIRType type() const {
+ return type_;
+ }
+
+ bool hasTyped() const {
+ return type() != MIRType::None && type() != MIRType::Value;
+ }
+
+ bool hasValue() const {
+ return type() == MIRType::Value;
+ }
+
+ AnyRegister typedReg() const {
+ MOZ_ASSERT(hasTyped());
+ return data.typed;
+ }
+
+ ValueOperand valueReg() const {
+ MOZ_ASSERT(hasValue());
+ return data.value;
+ }
+
+ AnyRegister scratchReg() {
+ if (hasValue())
+ return AnyRegister(valueReg().scratchReg());
+ return typedReg();
+ }
+};
+
+// A constant value, or registers to hold a typed/untyped value.
+class ConstantOrRegister
+{
+ // Whether a constant value is being stored.
+ bool constant_;
+
+ // Space to hold either a Value or a TypedOrValueRegister.
+ union U {
+ Value constant;
+ TypedOrValueRegister reg;
+ } data;
+
+ const Value& dataValue() const {
+ MOZ_ASSERT(constant());
+ return data.constant;
+ }
+ void setDataValue(const Value& value) {
+ MOZ_ASSERT(constant());
+ data.constant = value;
+ }
+ const TypedOrValueRegister& dataReg() const {
+ MOZ_ASSERT(!constant());
+ return data.reg;
+ }
+ void setDataReg(const TypedOrValueRegister& reg) {
+ MOZ_ASSERT(!constant());
+ data.reg = reg;
+ }
+
+ public:
+
+ ConstantOrRegister()
+ {}
+
+ MOZ_IMPLICIT ConstantOrRegister(const Value& value)
+ : constant_(true)
+ {
+ setDataValue(value);
+ }
+
+ MOZ_IMPLICIT ConstantOrRegister(TypedOrValueRegister reg)
+ : constant_(false)
+ {
+ setDataReg(reg);
+ }
+
+ bool constant() const {
+ return constant_;
+ }
+
+ const Value& value() const {
+ return dataValue();
+ }
+
+ const TypedOrValueRegister& reg() const {
+ return dataReg();
+ }
+};
+
+struct RegisterOrInt32Constant {
+ bool isRegister_;
+ union {
+ Register reg_;
+ int32_t constant_;
+ };
+
+ explicit RegisterOrInt32Constant(Register reg)
+ : isRegister_(true), reg_(reg)
+ { }
+
+ explicit RegisterOrInt32Constant(int32_t index)
+ : isRegister_(false), constant_(index)
+ { }
+
+ inline void bumpConstant(int diff) {
+ MOZ_ASSERT(!isRegister_);
+ constant_ += diff;
+ }
+ inline Register reg() const {
+ MOZ_ASSERT(isRegister_);
+ return reg_;
+ }
+ inline int32_t constant() const {
+ MOZ_ASSERT(!isRegister_);
+ return constant_;
+ }
+ inline bool isRegister() const {
+ return isRegister_;
+ }
+ inline bool isConstant() const {
+ return !isRegister_;
+ }
+};
+
+template <typename T>
+class TypedRegisterSet
+{
+ public:
+ typedef T RegType;
+ typedef typename T::SetType SetType;
+
+ private:
+ SetType bits_;
+
+ public:
+ explicit constexpr TypedRegisterSet(SetType bits)
+ : bits_(bits)
+ { }
+
+ constexpr TypedRegisterSet() : bits_(0)
+ { }
+ constexpr TypedRegisterSet(const TypedRegisterSet<T>& set) : bits_(set.bits_)
+ { }
+
+ static inline TypedRegisterSet All() {
+ return TypedRegisterSet(T::Codes::AllocatableMask);
+ }
+ static inline TypedRegisterSet Intersect(const TypedRegisterSet& lhs,
+ const TypedRegisterSet& rhs) {
+ return TypedRegisterSet(lhs.bits_ & rhs.bits_);
+ }
+ static inline TypedRegisterSet Union(const TypedRegisterSet& lhs,
+ const TypedRegisterSet& rhs) {
+ return TypedRegisterSet(lhs.bits_ | rhs.bits_);
+ }
+ static inline TypedRegisterSet Not(const TypedRegisterSet& in) {
+ return TypedRegisterSet(~in.bits_ & T::Codes::AllocatableMask);
+ }
+ static inline TypedRegisterSet Subtract(const TypedRegisterSet& lhs,
+ const TypedRegisterSet& rhs)
+ {
+ return TypedRegisterSet(lhs.bits_ & ~rhs.bits_);
+ }
+ static inline TypedRegisterSet VolatileNot(const TypedRegisterSet& in) {
+ const SetType allocatableVolatile =
+ T::Codes::AllocatableMask & T::Codes::VolatileMask;
+ return TypedRegisterSet(~in.bits_ & allocatableVolatile);
+ }
+ static inline TypedRegisterSet Volatile() {
+ return TypedRegisterSet(T::Codes::AllocatableMask & T::Codes::VolatileMask);
+ }
+ static inline TypedRegisterSet NonVolatile() {
+ return TypedRegisterSet(T::Codes::AllocatableMask & T::Codes::NonVolatileMask);
+ }
+
+ bool empty() const {
+ return !bits_;
+ }
+ void clear() {
+ bits_ = 0;
+ }
+
+ bool hasRegisterIndex(T reg) const {
+ return !!(bits_ & (SetType(1) << reg.code()));
+ }
+ bool hasAllocatable(T reg) const {
+ return !(~bits_ & reg.alignedOrDominatedAliasedSet());
+ }
+
+ void addRegisterIndex(T reg) {
+ bits_ |= (SetType(1) << reg.code());
+ }
+ void addAllocatable(T reg) {
+ bits_ |= reg.alignedOrDominatedAliasedSet();
+ }
+
+
+ void takeRegisterIndex(T reg) {
+ bits_ &= ~(SetType(1) << reg.code());
+ }
+ void takeAllocatable(T reg) {
+ bits_ &= ~reg.alignedOrDominatedAliasedSet();
+ }
+
+ T getAny() const {
+ // The choice of first or last here is mostly arbitrary, as they are
+ // about the same speed on popular architectures. We choose first, as
+ // it has the advantage of using the "lower" registers more often. These
+ // registers are sometimes more efficient (e.g. optimized encodings for
+ // EAX on x86).
+ return getFirst();
+ }
+ T getFirst() const {
+ MOZ_ASSERT(!empty());
+ return T::FromCode(T::FirstBit(bits_));
+ }
+ T getLast() const {
+ MOZ_ASSERT(!empty());
+ int ireg = T::LastBit(bits_);
+ return T::FromCode(ireg);
+ }
+
+ SetType bits() const {
+ return bits_;
+ }
+ uint32_t size() const {
+ return T::SetSize(bits_);
+ }
+ bool operator ==(const TypedRegisterSet<T>& other) const {
+ return other.bits_ == bits_;
+ }
+ TypedRegisterSet<T> reduceSetForPush() const {
+ return T::ReduceSetForPush(*this);
+ }
+ uint32_t getPushSizeInBytes() const {
+ return T::GetPushSizeInBytes(*this);
+ }
+};
+
+typedef TypedRegisterSet<Register> GeneralRegisterSet;
+typedef TypedRegisterSet<FloatRegister> FloatRegisterSet;
+
+class AnyRegisterIterator;
+
+class RegisterSet {
+ GeneralRegisterSet gpr_;
+ FloatRegisterSet fpu_;
+
+ friend class AnyRegisterIterator;
+
+ public:
+ RegisterSet()
+ { }
+ constexpr RegisterSet(const GeneralRegisterSet& gpr, const FloatRegisterSet& fpu)
+ : gpr_(gpr),
+ fpu_(fpu)
+ { }
+ static inline RegisterSet All() {
+ return RegisterSet(GeneralRegisterSet::All(), FloatRegisterSet::All());
+ }
+ static inline RegisterSet Intersect(const RegisterSet& lhs, const RegisterSet& rhs) {
+ return RegisterSet(GeneralRegisterSet::Intersect(lhs.gpr_, rhs.gpr_),
+ FloatRegisterSet::Intersect(lhs.fpu_, rhs.fpu_));
+ }
+ static inline RegisterSet Union(const RegisterSet& lhs, const RegisterSet& rhs) {
+ return RegisterSet(GeneralRegisterSet::Union(lhs.gpr_, rhs.gpr_),
+ FloatRegisterSet::Union(lhs.fpu_, rhs.fpu_));
+ }
+ static inline RegisterSet Not(const RegisterSet& in) {
+ return RegisterSet(GeneralRegisterSet::Not(in.gpr_),
+ FloatRegisterSet::Not(in.fpu_));
+ }
+ static inline RegisterSet VolatileNot(const RegisterSet& in) {
+ return RegisterSet(GeneralRegisterSet::VolatileNot(in.gpr_),
+ FloatRegisterSet::VolatileNot(in.fpu_));
+ }
+ static inline RegisterSet Volatile() {
+ return RegisterSet(GeneralRegisterSet::Volatile(), FloatRegisterSet::Volatile());
+ }
+
+ bool empty() const {
+ return fpu_.empty() && gpr_.empty();
+ }
+ void clear() {
+ fpu_.clear();
+ gpr_.clear();
+ }
+ bool emptyGeneral() const {
+ return gpr_.empty();
+ }
+ bool emptyFloat() const {
+ return fpu_.empty();
+ }
+ constexpr GeneralRegisterSet gprs() const {
+ return gpr_;
+ }
+ GeneralRegisterSet& gprs() {
+ return gpr_;
+ }
+ constexpr FloatRegisterSet fpus() const {
+ return fpu_;
+ }
+ FloatRegisterSet& fpus() {
+ return fpu_;
+ }
+ bool operator ==(const RegisterSet& other) const {
+ return other.gpr_ == gpr_ && other.fpu_ == fpu_;
+ }
+
+};
+
+// There are 2 use cases for register sets:
+//
+// 1. To serve as a pool of allocatable register. This is useful for working
+// on the code produced by some stub where free registers are available, or
+// when we can release some registers.
+//
+// 2. To serve as a list of typed registers. This is useful for working with
+// live registers and to manipulate them with the proper instructions. This
+// is used by the register allocator to fill the Safepoints.
+//
+// These 2 uses cases can be used on top of 3 different backend representation
+// of register sets, which are either GeneralRegisterSet, FloatRegisterSet, or
+// RegisterSet (for both). These classes are used to store the bit sets to
+// represent each register.
+//
+// Each use case defines an Accessor class, such as AllocatableSetAccessor or
+// LiveSetAccessor, which is parameterized with the type of the register
+// set. These accessors are in charge of manipulating the register set in a
+// consistent way.
+//
+// The RegSetCommonInterface class is used to wrap the accessors with convenient
+// shortcuts which are based on the accessors.
+//
+// Then, to avoid to many levels of complexity while using these interfaces,
+// shortcut templates are created to make it easy to distinguish between a
+// register set used for allocating registers, or a register set used for making
+// a collection of allocated (live) registers.
+//
+// This separation exists to prevent mixing LiveSet and AllocatableSet
+// manipulations of the same register set, and ensure safety while avoiding
+// false positive.
+
+template <typename RegisterSet>
+class AllocatableSet;
+
+template <typename RegisterSet>
+class LiveSet;
+
+// Base accessors classes have the minimal set of raw methods to manipulate the register set
+// given as parameter in a consistent manner. These methods are:
+//
+// - has: Returns if all the bits needed to take a register are present.
+//
+// - takeUnchecked: Subtracts the bits used to represent the register in the
+// register set.
+//
+// - addUnchecked: Adds the bits used to represent the register in the
+// register set.
+
+// The AllocatableSet accessors are used to make a pool of unused
+// registers. Taking or adding registers should consider the aliasing rules of
+// the architecture. For example, on ARM, the following piece of code should
+// work fine, knowing that the double register |d0| is composed of float
+// registers |s0| and |s1|:
+//
+// AllocatableFloatRegisterSet regs;
+// regs.add(s0);
+// regs.add(s1);
+// // d0 is now available.
+// regs.take(d0);
+//
+// These accessors are useful for allocating registers within the functions used
+// to generate stubs, trampolines, and inline caches (BaselineIC, IonCache).
+template <typename Set>
+class AllocatableSetAccessors
+{
+ public:
+ typedef Set RegSet;
+ typedef typename RegSet::RegType RegType;
+ typedef typename RegSet::SetType SetType;
+
+ protected:
+ RegSet set_;
+
+ public:
+ AllocatableSetAccessors() : set_() {}
+ explicit constexpr AllocatableSetAccessors(SetType set) : set_(set) {}
+ explicit constexpr AllocatableSetAccessors(RegSet set) : set_(set) {}
+
+ bool has(RegType reg) const {
+ return set_.hasAllocatable(reg);
+ }
+
+ void addUnchecked(RegType reg) {
+ set_.addAllocatable(reg);
+ }
+
+ void takeUnchecked(RegType reg) {
+ set_.takeAllocatable(reg);
+ }
+};
+
+// Specialization of the AllocatableSet accessors for the RegisterSet aggregate.
+template <>
+class AllocatableSetAccessors<RegisterSet>
+{
+ public:
+ typedef RegisterSet RegSet;
+ typedef AnyRegister RegType;
+ typedef char SetType;
+
+ protected:
+ RegisterSet set_;
+
+ public:
+ AllocatableSetAccessors() : set_() {}
+ explicit constexpr AllocatableSetAccessors(SetType) = delete;
+ explicit constexpr AllocatableSetAccessors(RegisterSet set) : set_(set) {}
+
+ bool has(Register reg) const {
+ return set_.gprs().hasAllocatable(reg);
+ }
+ bool has(FloatRegister reg) const {
+ return set_.fpus().hasAllocatable(reg);
+ }
+
+ void addUnchecked(Register reg) {
+ set_.gprs().addAllocatable(reg);
+ }
+ void addUnchecked(FloatRegister reg) {
+ set_.fpus().addAllocatable(reg);
+ }
+
+ void takeUnchecked(Register reg) {
+ set_.gprs().takeAllocatable(reg);
+ }
+ void takeUnchecked(FloatRegister reg) {
+ set_.fpus().takeAllocatable(reg);
+ }
+};
+
+
+// The LiveSet accessors are used to collect a list of allocated
+// registers. Taking or adding a register should *not* consider the aliases, as
+// we care about interpreting the registers with the correct type. For example,
+// on x64, where one float registers can be interpreted as an Simd128, a Double,
+// or a Float, adding xmm0 as an Simd128, does not make the register available
+// as a Double.
+//
+// LiveFloatRegisterSet regs;
+// regs.add(xmm0.asSimd128());
+// regs.take(xmm0); // Assert!
+//
+// These accessors are useful for recording the result of a register allocator,
+// such as what the Backtracking allocator do on the Safepoints.
+template <typename Set>
+class LiveSetAccessors
+{
+ public:
+ typedef Set RegSet;
+ typedef typename RegSet::RegType RegType;
+ typedef typename RegSet::SetType SetType;
+
+ protected:
+ RegSet set_;
+
+ public:
+ LiveSetAccessors() : set_() {}
+ explicit constexpr LiveSetAccessors(SetType set) : set_(set) {}
+ explicit constexpr LiveSetAccessors(RegSet set) : set_(set) {}
+
+ bool has(RegType reg) const {
+ return set_.hasRegisterIndex(reg);
+ }
+
+ void addUnchecked(RegType reg) {
+ set_.addRegisterIndex(reg);
+ }
+
+ void takeUnchecked(RegType reg) {
+ set_.takeRegisterIndex(reg);
+ }
+};
+
+// Specialization of the LiveSet accessors for the RegisterSet aggregate.
+template <>
+class LiveSetAccessors<RegisterSet>
+{
+ public:
+ typedef RegisterSet RegSet;
+ typedef AnyRegister RegType;
+ typedef char SetType;
+
+ protected:
+ RegisterSet set_;
+
+ public:
+ LiveSetAccessors() : set_() {}
+ explicit constexpr LiveSetAccessors(SetType) = delete;
+ explicit constexpr LiveSetAccessors(RegisterSet set) : set_(set) {}
+
+ bool has(Register reg) const {
+ return set_.gprs().hasRegisterIndex(reg);
+ }
+ bool has(FloatRegister reg) const {
+ return set_.fpus().hasRegisterIndex(reg);
+ }
+
+ void addUnchecked(Register reg) {
+ set_.gprs().addRegisterIndex(reg);
+ }
+ void addUnchecked(FloatRegister reg) {
+ set_.fpus().addRegisterIndex(reg);
+ }
+
+ void takeUnchecked(Register reg) {
+ set_.gprs().takeRegisterIndex(reg);
+ }
+ void takeUnchecked(FloatRegister reg) {
+ set_.fpus().takeRegisterIndex(reg);
+ }
+};
+
+#define DEFINE_ACCESSOR_CONSTRUCTORS_(REGSET) \
+ typedef typename Parent::RegSet RegSet; \
+ typedef typename Parent::RegType RegType; \
+ typedef typename Parent::SetType SetType; \
+ \
+ constexpr REGSET() : Parent() {} \
+ explicit constexpr REGSET(SetType set) : Parent(set) {} \
+ explicit constexpr REGSET(RegSet set) : Parent(set) {}
+
+// This class adds checked accessors on top of the unchecked variants defined by
+// AllocatableSet and LiveSet accessors. Also it defines interface which are
+// specialized to the register set implementation, such as |getAny| and
+// |takeAny| variants.
+template <class Accessors, typename Set>
+class SpecializedRegSet : public Accessors
+{
+ typedef Accessors Parent;
+
+ public:
+ DEFINE_ACCESSOR_CONSTRUCTORS_(SpecializedRegSet)
+
+ SetType bits() const {
+ return this->Parent::set_.bits();
+ }
+
+ using Parent::has;
+
+ using Parent::addUnchecked;
+ void add(RegType reg) {
+ MOZ_ASSERT(!has(reg));
+ addUnchecked(reg);
+ }
+
+ using Parent::takeUnchecked;
+ void take(RegType reg) {
+ MOZ_ASSERT(has(reg));
+ takeUnchecked(reg);
+ }
+
+ RegType getAny() const {
+ return this->Parent::set_.getAny();
+ }
+ RegType getFirst() const {
+ return this->Parent::set_.getFirst();
+ }
+ RegType getLast() const {
+ return this->Parent::set_.getLast();
+ }
+
+ RegType getAnyExcluding(RegType preclude) {
+ if (!has(preclude))
+ return getAny();
+
+ take(preclude);
+ RegType result = getAny();
+ add(preclude);
+ return result;
+ }
+
+ RegType takeAny() {
+ RegType reg = getAny();
+ take(reg);
+ return reg;
+ }
+ RegType takeFirst() {
+ RegType reg = getFirst();
+ take(reg);
+ return reg;
+ }
+ RegType takeLast() {
+ RegType reg = getLast();
+ take(reg);
+ return reg;
+ }
+
+ ValueOperand takeAnyValue() {
+#if defined(JS_NUNBOX32)
+ return ValueOperand(takeAny(), takeAny());
+#elif defined(JS_PUNBOX64)
+ return ValueOperand(takeAny());
+#else
+#error "Bad architecture"
+#endif
+ }
+
+ bool aliases(ValueOperand v) const {
+#ifdef JS_NUNBOX32
+ return has(v.typeReg()) || has(v.payloadReg());
+#else
+ return has(v.valueReg());
+#endif
+ }
+
+ RegType takeAnyExcluding(RegType preclude) {
+ RegType reg = getAnyExcluding(preclude);
+ take(reg);
+ return reg;
+ }
+};
+
+// Specialization of the accessors for the RegisterSet aggregate.
+template <class Accessors>
+class SpecializedRegSet<Accessors, RegisterSet> : public Accessors
+{
+ typedef Accessors Parent;
+
+ public:
+ DEFINE_ACCESSOR_CONSTRUCTORS_(SpecializedRegSet)
+
+ GeneralRegisterSet gprs() const {
+ return this->Parent::set_.gprs();
+ }
+ GeneralRegisterSet& gprs() {
+ return this->Parent::set_.gprs();
+ }
+ FloatRegisterSet fpus() const {
+ return this->Parent::set_.fpus();
+ }
+ FloatRegisterSet& fpus() {
+ return this->Parent::set_.fpus();
+ }
+
+ bool emptyGeneral() const {
+ return this->Parent::set_.emptyGeneral();
+ }
+ bool emptyFloat() const {
+ return this->Parent::set_.emptyFloat();
+ }
+
+
+ using Parent::has;
+ bool has(AnyRegister reg) const {
+ return reg.isFloat() ? has(reg.fpu()) : has(reg.gpr());
+ }
+
+
+ using Parent::addUnchecked;
+ void addUnchecked(AnyRegister reg) {
+ if (reg.isFloat())
+ addUnchecked(reg.fpu());
+ else
+ addUnchecked(reg.gpr());
+ }
+
+ void add(Register reg) {
+ MOZ_ASSERT(!has(reg));
+ addUnchecked(reg);
+ }
+ void add(FloatRegister reg) {
+ MOZ_ASSERT(!has(reg));
+ addUnchecked(reg);
+ }
+ void add(AnyRegister reg) {
+ if (reg.isFloat())
+ add(reg.fpu());
+ else
+ add(reg.gpr());
+ }
+
+ using Parent::takeUnchecked;
+ void takeUnchecked(AnyRegister reg) {
+ if (reg.isFloat())
+ takeUnchecked(reg.fpu());
+ else
+ takeUnchecked(reg.gpr());
+ }
+
+ void take(Register reg) {
+ MOZ_ASSERT(has(reg));
+ takeUnchecked(reg);
+ }
+ void take(FloatRegister reg) {
+ MOZ_ASSERT(has(reg));
+ takeUnchecked(reg);
+ }
+ void take(AnyRegister reg) {
+ if (reg.isFloat())
+ take(reg.fpu());
+ else
+ take(reg.gpr());
+ }
+
+ Register getAnyGeneral() const {
+ return this->Parent::set_.gprs().getAny();
+ }
+ FloatRegister getAnyFloat() const {
+ return this->Parent::set_.fpus().getAny();
+ }
+
+ Register takeAnyGeneral() {
+ Register reg = getAnyGeneral();
+ take(reg);
+ return reg;
+ }
+ FloatRegister takeAnyFloat() {
+ FloatRegister reg = getAnyFloat();
+ take(reg);
+ return reg;
+ }
+ ValueOperand takeAnyValue() {
+#if defined(JS_NUNBOX32)
+ return ValueOperand(takeAnyGeneral(), takeAnyGeneral());
+#elif defined(JS_PUNBOX64)
+ return ValueOperand(takeAnyGeneral());
+#else
+#error "Bad architecture"
+#endif
+ }
+};
+
+
+// Interface which is common to all register set implementations. It overloads
+// |add|, |take| and |takeUnchecked| methods for types such as |ValueOperand|
+// and |TypedOrValueRegister|.
+template <class Accessors, typename Set>
+class CommonRegSet : public SpecializedRegSet<Accessors, Set>
+{
+ typedef SpecializedRegSet<Accessors, Set> Parent;
+
+ public:
+ DEFINE_ACCESSOR_CONSTRUCTORS_(CommonRegSet)
+
+ RegSet set() const {
+ return this->Parent::set_;
+ }
+ RegSet& set() {
+ return this->Parent::set_;
+ }
+
+ bool empty() const {
+ return this->Parent::set_.empty();
+ }
+ void clear() {
+ this->Parent::set_.clear();
+ }
+
+ using Parent::add;
+ void add(ValueOperand value) {
+#if defined(JS_NUNBOX32)
+ add(value.payloadReg());
+ add(value.typeReg());
+#elif defined(JS_PUNBOX64)
+ add(value.valueReg());
+#else
+#error "Bad architecture"
+#endif
+ }
+ void add(TypedOrValueRegister reg) {
+ if (reg.hasValue())
+ add(reg.valueReg());
+ else if (reg.hasTyped())
+ add(reg.typedReg());
+ }
+
+ using Parent::take;
+ void take(ValueOperand value) {
+#if defined(JS_NUNBOX32)
+ take(value.payloadReg());
+ take(value.typeReg());
+#elif defined(JS_PUNBOX64)
+ take(value.valueReg());
+#else
+#error "Bad architecture"
+#endif
+ }
+ void take(TypedOrValueRegister reg) {
+ if (reg.hasValue())
+ take(reg.valueReg());
+ else if (reg.hasTyped())
+ take(reg.typedReg());
+ }
+
+ using Parent::takeUnchecked;
+ void takeUnchecked(ValueOperand value) {
+#if defined(JS_NUNBOX32)
+ takeUnchecked(value.payloadReg());
+ takeUnchecked(value.typeReg());
+#elif defined(JS_PUNBOX64)
+ takeUnchecked(value.valueReg());
+#else
+#error "Bad architecture"
+#endif
+ }
+ void takeUnchecked(TypedOrValueRegister reg) {
+ if (reg.hasValue())
+ takeUnchecked(reg.valueReg());
+ else if (reg.hasTyped())
+ takeUnchecked(reg.typedReg());
+ }
+};
+
+
+// These classes do not provide any additional members, they only use their
+// constructors to forward to the common interface for all register sets. The
+// only benefit of these classes is to provide user friendly names.
+template <typename Set>
+class LiveSet : public CommonRegSet<LiveSetAccessors<Set>, Set>
+{
+ typedef CommonRegSet<LiveSetAccessors<Set>, Set> Parent;
+
+ public:
+ DEFINE_ACCESSOR_CONSTRUCTORS_(LiveSet)
+};
+
+template <typename Set>
+class AllocatableSet : public CommonRegSet<AllocatableSetAccessors<Set>, Set>
+{
+ typedef CommonRegSet<AllocatableSetAccessors<Set>, Set> Parent;
+
+ public:
+ DEFINE_ACCESSOR_CONSTRUCTORS_(AllocatableSet)
+
+ LiveSet<Set> asLiveSet() const {
+ return LiveSet<Set>(this->set());
+ }
+};
+
+#define DEFINE_ACCESSOR_CONSTRUCTORS_FOR_REGISTERSET_(REGSET) \
+ typedef Parent::RegSet RegSet; \
+ typedef Parent::RegType RegType; \
+ typedef Parent::SetType SetType; \
+ \
+ constexpr REGSET() : Parent() {} \
+ explicit constexpr REGSET(SetType) = delete; \
+ explicit constexpr REGSET(RegSet set) : Parent(set) {} \
+ constexpr REGSET(GeneralRegisterSet gpr, FloatRegisterSet fpu) \
+ : Parent(RegisterSet(gpr, fpu)) \
+ {} \
+ REGSET(REGSET<GeneralRegisterSet> gpr, REGSET<FloatRegisterSet> fpu) \
+ : Parent(RegisterSet(gpr.set(), fpu.set())) \
+ {}
+
+template <>
+class LiveSet<RegisterSet>
+ : public CommonRegSet<LiveSetAccessors<RegisterSet>, RegisterSet>
+{
+ // Note: We have to provide a qualified name for LiveSetAccessors, as it is
+ // interpreted as being the specialized class name inherited from the parent
+ // class specialization.
+ typedef CommonRegSet<jit::LiveSetAccessors<RegisterSet>, RegisterSet> Parent;
+
+ public:
+ DEFINE_ACCESSOR_CONSTRUCTORS_FOR_REGISTERSET_(LiveSet)
+};
+
+template <>
+class AllocatableSet<RegisterSet>
+ : public CommonRegSet<AllocatableSetAccessors<RegisterSet>, RegisterSet>
+{
+ // Note: We have to provide a qualified name for AllocatableSetAccessors, as
+ // it is interpreted as being the specialized class name inherited from the
+ // parent class specialization.
+ typedef CommonRegSet<jit::AllocatableSetAccessors<RegisterSet>, RegisterSet> Parent;
+
+ public:
+ DEFINE_ACCESSOR_CONSTRUCTORS_FOR_REGISTERSET_(AllocatableSet)
+
+ LiveSet<RegisterSet> asLiveSet() const {
+ return LiveSet<RegisterSet>(this->set());
+ }
+};
+
+#undef DEFINE_ACCESSOR_CONSTRUCTORS_FOR_REGISTERSET_
+#undef DEFINE_ACCESSOR_CONSTRUCTORS_
+
+typedef AllocatableSet<GeneralRegisterSet> AllocatableGeneralRegisterSet;
+typedef AllocatableSet<FloatRegisterSet> AllocatableFloatRegisterSet;
+typedef AllocatableSet<RegisterSet> AllocatableRegisterSet;
+
+typedef LiveSet<GeneralRegisterSet> LiveGeneralRegisterSet;
+typedef LiveSet<FloatRegisterSet> LiveFloatRegisterSet;
+typedef LiveSet<RegisterSet> LiveRegisterSet;
+
+// iterates in whatever order happens to be convenient.
+// Use TypedRegisterBackwardIterator or TypedRegisterForwardIterator if a
+// specific order is required.
+template <typename T>
+class TypedRegisterIterator
+{
+ LiveSet<TypedRegisterSet<T>> regset_;
+
+ public:
+ explicit TypedRegisterIterator(TypedRegisterSet<T> regset) : regset_(regset)
+ { }
+ explicit TypedRegisterIterator(LiveSet<TypedRegisterSet<T>> regset) : regset_(regset)
+ { }
+ TypedRegisterIterator(const TypedRegisterIterator& other) : regset_(other.regset_)
+ { }
+
+ bool more() const {
+ return !regset_.empty();
+ }
+ TypedRegisterIterator<T>& operator ++() {
+ regset_.takeAny();
+ return *this;
+ }
+ T operator*() const {
+ return regset_.getAny();
+ }
+};
+
+// iterates backwards, that is, rn to r0
+template <typename T>
+class TypedRegisterBackwardIterator
+{
+ LiveSet<TypedRegisterSet<T>> regset_;
+
+ public:
+ explicit TypedRegisterBackwardIterator(TypedRegisterSet<T> regset) : regset_(regset)
+ { }
+ explicit TypedRegisterBackwardIterator(LiveSet<TypedRegisterSet<T>> regset) : regset_(regset)
+ { }
+ TypedRegisterBackwardIterator(const TypedRegisterBackwardIterator& other)
+ : regset_(other.regset_)
+ { }
+
+ bool more() const {
+ return !regset_.empty();
+ }
+ TypedRegisterBackwardIterator<T>& operator ++() {
+ regset_.takeLast();
+ return *this;
+ }
+ T operator*() const {
+ return regset_.getLast();
+ }
+};
+
+// iterates forwards, that is r0 to rn
+template <typename T>
+class TypedRegisterForwardIterator
+{
+ LiveSet<TypedRegisterSet<T>> regset_;
+
+ public:
+ explicit TypedRegisterForwardIterator(TypedRegisterSet<T> regset) : regset_(regset)
+ { }
+ explicit TypedRegisterForwardIterator(LiveSet<TypedRegisterSet<T>> regset) : regset_(regset)
+ { }
+ TypedRegisterForwardIterator(const TypedRegisterForwardIterator& other) : regset_(other.regset_)
+ { }
+
+ bool more() const {
+ return !regset_.empty();
+ }
+ TypedRegisterForwardIterator<T>& operator ++() {
+ regset_.takeFirst();
+ return *this;
+ }
+ T operator*() const {
+ return regset_.getFirst();
+ }
+};
+
+typedef TypedRegisterIterator<Register> GeneralRegisterIterator;
+typedef TypedRegisterIterator<FloatRegister> FloatRegisterIterator;
+typedef TypedRegisterBackwardIterator<Register> GeneralRegisterBackwardIterator;
+typedef TypedRegisterBackwardIterator<FloatRegister> FloatRegisterBackwardIterator;
+typedef TypedRegisterForwardIterator<Register> GeneralRegisterForwardIterator;
+typedef TypedRegisterForwardIterator<FloatRegister> FloatRegisterForwardIterator;
+
+class AnyRegisterIterator
+{
+ GeneralRegisterIterator geniter_;
+ FloatRegisterIterator floatiter_;
+
+ public:
+ AnyRegisterIterator()
+ : geniter_(GeneralRegisterSet::All()), floatiter_(FloatRegisterSet::All())
+ { }
+ AnyRegisterIterator(GeneralRegisterSet genset, FloatRegisterSet floatset)
+ : geniter_(genset), floatiter_(floatset)
+ { }
+ explicit AnyRegisterIterator(const RegisterSet& set)
+ : geniter_(set.gpr_), floatiter_(set.fpu_)
+ { }
+ explicit AnyRegisterIterator(const LiveSet<RegisterSet>& set)
+ : geniter_(set.gprs()), floatiter_(set.fpus())
+ { }
+ AnyRegisterIterator(const AnyRegisterIterator& other)
+ : geniter_(other.geniter_), floatiter_(other.floatiter_)
+ { }
+ bool more() const {
+ return geniter_.more() || floatiter_.more();
+ }
+ AnyRegisterIterator& operator ++() {
+ if (geniter_.more())
+ ++geniter_;
+ else
+ ++floatiter_;
+ return *this;
+ }
+ AnyRegister operator*() const {
+ if (geniter_.more())
+ return AnyRegister(*geniter_);
+ return AnyRegister(*floatiter_);
+ }
+};
+
+class ABIArg
+{
+ public:
+ enum Kind {
+ GPR,
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ GPR_PAIR,
+#endif
+ FPU,
+ Stack
+ };
+
+ private:
+ Kind kind_;
+ union {
+ Register::Code gpr_;
+ FloatRegister::Code fpu_;
+ uint32_t offset_;
+ } u;
+
+ public:
+ ABIArg() : kind_(Kind(-1)) { u.offset_ = -1; }
+ explicit ABIArg(Register gpr) : kind_(GPR) { u.gpr_ = gpr.code(); }
+ explicit ABIArg(Register gprLow, Register gprHigh)
+ {
+#if defined(JS_CODEGEN_REGISTER_PAIR)
+ kind_ = GPR_PAIR;
+#else
+ MOZ_CRASH("Unsupported type of ABI argument.");
+#endif
+ u.gpr_ = gprLow.code();
+ MOZ_ASSERT(u.gpr_ % 2 == 0);
+ MOZ_ASSERT(u.gpr_ + 1 == gprHigh.code());
+ }
+ explicit ABIArg(FloatRegister fpu) : kind_(FPU) { u.fpu_ = fpu.code(); }
+ explicit ABIArg(uint32_t offset) : kind_(Stack) { u.offset_ = offset; }
+
+ Kind kind() const { return kind_; }
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ bool isGeneralRegPair() const { return kind_ == GPR_PAIR; }
+#else
+ bool isGeneralRegPair() const { return false; }
+#endif
+
+ Register gpr() const {
+ MOZ_ASSERT(kind() == GPR);
+ return Register::FromCode(u.gpr_);
+ }
+ Register64 gpr64() const {
+#ifdef JS_PUNBOX64
+ return Register64(gpr());
+#else
+ return Register64(oddGpr(), evenGpr());
+#endif
+ }
+ Register evenGpr() const {
+ MOZ_ASSERT(isGeneralRegPair());
+ return Register::FromCode(u.gpr_);
+ }
+ Register oddGpr() const {
+ MOZ_ASSERT(isGeneralRegPair());
+ return Register::FromCode(u.gpr_ + 1);
+ }
+ FloatRegister fpu() const { MOZ_ASSERT(kind() == FPU); return FloatRegister::FromCode(u.fpu_); }
+ uint32_t offsetFromArgBase() const { MOZ_ASSERT(kind() == Stack); return u.offset_; }
+
+ bool argInRegister() const { return kind() != Stack; }
+ AnyRegister reg() const { return kind_ == GPR ? AnyRegister(gpr()) : AnyRegister(fpu()); }
+
+ bool operator==(const ABIArg& rhs) const {
+ if (kind_ != rhs.kind_)
+ return false;
+
+ switch((int8_t)kind_) {
+ case GPR: return u.gpr_ == rhs.u.gpr_;
+#if defined(JS_CODEGEN_REGISTER_PAIR)
+ case GPR_PAIR: return u.gpr_ == rhs.u.gpr_;
+#endif
+ case FPU: return u.fpu_ == rhs.u.fpu_;
+ case Stack: return u.offset_ == rhs.u.offset_;
+ case -1: return true;
+ default: MOZ_CRASH("Invalid value for ABIArg kind");
+ }
+ }
+
+ bool operator!=(const ABIArg& rhs) const {
+ return !(*this == rhs);
+ }
+};
+
+// Get the set of registers which should be saved by a block of code which
+// clobbers all registers besides |unused|, but does not clobber floating point
+// registers.
+inline LiveGeneralRegisterSet
+SavedNonVolatileRegisters(AllocatableGeneralRegisterSet unused)
+{
+ LiveGeneralRegisterSet result;
+
+ for (GeneralRegisterIterator iter(GeneralRegisterSet::NonVolatile()); iter.more(); ++iter) {
+ Register reg = *iter;
+ if (!unused.has(reg))
+ result.add(reg);
+ }
+
+ // Some platforms require the link register to be saved, if calls can be made.
+#if defined(JS_CODEGEN_ARM)
+ result.add(Register::FromCode(Registers::lr));
+#elif defined(JS_CODEGEN_ARM64)
+ result.add(Register::FromCode(Registers::lr));
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ result.add(Register::FromCode(Registers::ra));
+#endif
+
+ return result;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_RegisterSets_h */
diff --git a/js/src/jit/Registers.h b/js/src/jit/Registers.h
new file mode 100644
index 000000000..4ecf3ae9c
--- /dev/null
+++ b/js/src/jit/Registers.h
@@ -0,0 +1,250 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Registers_h
+#define jit_Registers_h
+
+#include "mozilla/Array.h"
+
+#include "jit/IonTypes.h"
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+# include "jit/x86-shared/Architecture-x86-shared.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/Architecture-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/Architecture-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/Architecture-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/Architecture-mips64.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/Architecture-none.h"
+#else
+# error "Unknown architecture!"
+#endif
+
+namespace js {
+namespace jit {
+
+struct Register {
+ typedef Registers Codes;
+ typedef Codes::Encoding Encoding;
+ typedef Codes::Code Code;
+ typedef Codes::SetType SetType;
+
+ Codes::Encoding reg_;
+ static Register FromCode(Code i) {
+ MOZ_ASSERT(i < Registers::Total);
+ Register r = { Encoding(i) };
+ return r;
+ }
+ static Register FromName(const char* name) {
+ Code code = Registers::FromName(name);
+ Register r = { Encoding(code) };
+ return r;
+ }
+ static Register Invalid() {
+ Register r = { Encoding(Codes::Invalid) };
+ return r;
+ }
+ constexpr Code code() const {
+ return Code(reg_);
+ }
+ Encoding encoding() const {
+ MOZ_ASSERT(Code(reg_) < Registers::Total);
+ return reg_;
+ }
+ const char* name() const {
+ return Registers::GetName(code());
+ }
+ bool operator ==(Register other) const {
+ return reg_ == other.reg_;
+ }
+ bool operator !=(Register other) const {
+ return reg_ != other.reg_;
+ }
+ bool volatile_() const {
+ return !!((SetType(1) << code()) & Registers::VolatileMask);
+ }
+ bool aliases(const Register& other) const {
+ return reg_ == other.reg_;
+ }
+ uint32_t numAliased() const {
+ return 1;
+ }
+
+ // N.B. FloatRegister is an explicit outparam here because msvc-2010
+ // miscompiled it on win64 when the value was simply returned. This
+ // now has an explicit outparam for compatability.
+ void aliased(uint32_t aliasIdx, Register* ret) const {
+ MOZ_ASSERT(aliasIdx == 0);
+ *ret = *this;
+ }
+
+ SetType alignedOrDominatedAliasedSet() const {
+ return SetType(1) << code();
+ }
+
+ static uint32_t SetSize(SetType x) {
+ return Codes::SetSize(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ return Codes::FirstBit(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return Codes::LastBit(x);
+ }
+};
+
+#if defined(JS_NUNBOX32)
+static const uint32_t INT64LOW_OFFSET = 0 * sizeof(int32_t);
+static const uint32_t INT64HIGH_OFFSET = 1 * sizeof(int32_t);
+#endif
+
+struct Register64
+{
+#ifdef JS_PUNBOX64
+ Register reg;
+#else
+ Register high;
+ Register low;
+#endif
+
+#ifdef JS_PUNBOX64
+ explicit constexpr Register64(Register r)
+ : reg(r)
+ {}
+ bool operator ==(Register64 other) const {
+ return reg == other.reg;
+ }
+ bool operator !=(Register64 other) const {
+ return reg != other.reg;
+ }
+ static Register64 Invalid() {
+ return Register64(Register::Invalid());
+ }
+#else
+ constexpr Register64(Register h, Register l)
+ : high(h), low(l)
+ {}
+ bool operator ==(Register64 other) const {
+ return high == other.high && low == other.low;
+ }
+ bool operator !=(Register64 other) const {
+ return high != other.high || low != other.low;
+ }
+ static Register64 Invalid() {
+ return Register64(Register::Invalid(), Register::Invalid());
+ }
+#endif
+};
+
+class RegisterDump
+{
+ public:
+ typedef mozilla::Array<Registers::RegisterContent, Registers::Total> GPRArray;
+ typedef mozilla::Array<FloatRegisters::RegisterContent, FloatRegisters::TotalPhys> FPUArray;
+
+ protected: // Silence Clang warning.
+ GPRArray regs_;
+ FPUArray fpregs_;
+
+ public:
+ static size_t offsetOfRegister(Register reg) {
+ return offsetof(RegisterDump, regs_) + reg.code() * sizeof(uintptr_t);
+ }
+ static size_t offsetOfRegister(FloatRegister reg) {
+ return offsetof(RegisterDump, fpregs_) + reg.getRegisterDumpOffsetInBytes();
+ }
+};
+
+// Information needed to recover machine register state. This records the
+// location of spilled register and not the content of the spilled
+// registers. Thus we can safely assume that this structure is unchanged, even
+// if the GC pointers mapped by this structure are relocated.
+class MachineState
+{
+ mozilla::Array<Registers::RegisterContent*, Registers::Total> regs_;
+ mozilla::Array<FloatRegisters::RegisterContent*, FloatRegisters::Total> fpregs_;
+
+ public:
+ MachineState() {
+#ifndef JS_CODEGEN_NONE
+ for (uintptr_t i = 0; i < Registers::Total; i++)
+ regs_[i] = reinterpret_cast<Registers::RegisterContent*>(i + 0x100);
+ for (uintptr_t i = 0; i < FloatRegisters::Total; i++)
+ fpregs_[i] = reinterpret_cast<FloatRegisters::RegisterContent*>(i + 0x200);
+#endif
+ }
+
+ static MachineState FromBailout(RegisterDump::GPRArray& regs, RegisterDump::FPUArray& fpregs);
+
+ void setRegisterLocation(Register reg, uintptr_t* up) {
+ regs_[reg.code()] = (Registers::RegisterContent*) up;
+ }
+ void setRegisterLocation(FloatRegister reg, float* fp) {
+ MOZ_ASSERT(reg.isSingle());
+ fpregs_[reg.code()] = (FloatRegisters::RegisterContent*) fp;
+ }
+ void setRegisterLocation(FloatRegister reg, double* dp) {
+ fpregs_[reg.code()] = (FloatRegisters::RegisterContent*) dp;
+ }
+ void setRegisterLocation(FloatRegister reg, FloatRegisters::RegisterContent* rp) {
+ fpregs_[reg.code()] = rp;
+ }
+
+ bool has(Register reg) const {
+ return regs_[reg.code()] != nullptr;
+ }
+ bool has(FloatRegister reg) const {
+ return fpregs_[reg.code()] != nullptr;
+ }
+ uintptr_t read(Register reg) const {
+ return regs_[reg.code()]->r;
+ }
+ double read(FloatRegister reg) const {
+ return fpregs_[reg.code()]->d;
+ }
+ void write(Register reg, uintptr_t value) const {
+ regs_[reg.code()]->r = value;
+ }
+ const FloatRegisters::RegisterContent* address(FloatRegister reg) const {
+ return fpregs_[reg.code()];
+ }
+};
+
+class MacroAssembler;
+
+// Declares a register as owned within the scope of the object.
+// In debug mode, owned register state is tracked within the MacroAssembler,
+// and an assert will fire if ownership is conflicting.
+// In contrast to ARM64's UseScratchRegisterScope, this class has no overhead
+// in non-debug builds.
+template <class RegisterType>
+struct AutoGenericRegisterScope : public RegisterType
+{
+ // Prevent MacroAssembler templates from creating copies,
+ // which causes the destructor to fire more than once.
+ AutoGenericRegisterScope(const AutoGenericRegisterScope& other) = delete;
+
+#ifdef DEBUG
+ MacroAssembler& masm_;
+ explicit AutoGenericRegisterScope(MacroAssembler& masm, RegisterType reg);
+ ~AutoGenericRegisterScope();
+#else
+ constexpr explicit AutoGenericRegisterScope(MacroAssembler& masm, RegisterType reg)
+ : RegisterType(reg)
+ { }
+#endif
+};
+
+typedef AutoGenericRegisterScope<Register> AutoRegisterScope;
+typedef AutoGenericRegisterScope<FloatRegister> AutoFloatRegisterScope;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Registers_h */
diff --git a/js/src/jit/RematerializedFrame.cpp b/js/src/jit/RematerializedFrame.cpp
new file mode 100644
index 000000000..cb324220c
--- /dev/null
+++ b/js/src/jit/RematerializedFrame.cpp
@@ -0,0 +1,222 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/RematerializedFrame.h"
+
+#include "mozilla/SizePrintfMacros.h"
+
+#include "jit/JitFrames.h"
+#include "vm/ArgumentsObject.h"
+#include "vm/Debugger.h"
+
+#include "jsscriptinlines.h"
+#include "jit/JitFrames-inl.h"
+#include "vm/EnvironmentObject-inl.h"
+
+using namespace js;
+using namespace jit;
+
+struct CopyValueToRematerializedFrame
+{
+ Value* slots;
+
+ explicit CopyValueToRematerializedFrame(Value* slots)
+ : slots(slots)
+ { }
+
+ void operator()(const Value& v) {
+ *slots++ = v;
+ }
+};
+
+RematerializedFrame::RematerializedFrame(JSContext* cx, uint8_t* top, unsigned numActualArgs,
+ InlineFrameIterator& iter, MaybeReadFallback& fallback)
+ : prevUpToDate_(false),
+ isDebuggee_(iter.script()->isDebuggee()),
+ isConstructing_(iter.isConstructing()),
+ hasCachedSavedFrame_(false),
+ top_(top),
+ pc_(iter.pc()),
+ frameNo_(iter.frameNo()),
+ numActualArgs_(numActualArgs),
+ script_(iter.script())
+{
+ if (iter.isFunctionFrame())
+ callee_ = iter.callee(fallback);
+ else
+ callee_ = nullptr;
+
+ CopyValueToRematerializedFrame op(slots_);
+ iter.readFrameArgsAndLocals(cx, op, op, &envChain_, &hasInitialEnv_, &returnValue_,
+ &argsObj_, &thisArgument_, &newTarget_, ReadFrame_Actuals,
+ fallback);
+}
+
+/* static */ RematerializedFrame*
+RematerializedFrame::New(JSContext* cx, uint8_t* top, InlineFrameIterator& iter,
+ MaybeReadFallback& fallback)
+{
+ unsigned numFormals = iter.isFunctionFrame() ? iter.calleeTemplate()->nargs() : 0;
+ unsigned argSlots = Max(numFormals, iter.numActualArgs());
+ size_t numBytes = sizeof(RematerializedFrame) +
+ (argSlots + iter.script()->nfixed()) * sizeof(Value) -
+ sizeof(Value); // 1 Value included in sizeof(RematerializedFrame)
+
+ void* buf = cx->pod_calloc<uint8_t>(numBytes);
+ if (!buf)
+ return nullptr;
+
+ return new (buf) RematerializedFrame(cx, top, iter.numActualArgs(), iter, fallback);
+}
+
+/* static */ bool
+RematerializedFrame::RematerializeInlineFrames(JSContext* cx, uint8_t* top,
+ InlineFrameIterator& iter,
+ MaybeReadFallback& fallback,
+ GCVector<RematerializedFrame*>& frames)
+{
+ Rooted<GCVector<RematerializedFrame*>> tempFrames(cx, GCVector<RematerializedFrame*>(cx));
+ if (!tempFrames.resize(iter.frameCount()))
+ return false;
+
+ while (true) {
+ size_t frameNo = iter.frameNo();
+ tempFrames[frameNo].set(RematerializedFrame::New(cx, top, iter, fallback));
+ if (!tempFrames[frameNo])
+ return false;
+ if (tempFrames[frameNo]->environmentChain()) {
+ if (!EnsureHasEnvironmentObjects(cx, tempFrames[frameNo].get()))
+ return false;
+ }
+
+ if (!iter.more())
+ break;
+ ++iter;
+ }
+
+ frames = Move(tempFrames.get());
+ return true;
+}
+
+/* static */ void
+RematerializedFrame::FreeInVector(GCVector<RematerializedFrame*>& frames)
+{
+ for (size_t i = 0; i < frames.length(); i++) {
+ RematerializedFrame* f = frames[i];
+ MOZ_ASSERT(!Debugger::inFrameMaps(f));
+ f->RematerializedFrame::~RematerializedFrame();
+ js_free(f);
+ }
+ frames.clear();
+}
+
+CallObject&
+RematerializedFrame::callObj() const
+{
+ MOZ_ASSERT(hasInitialEnvironment());
+
+ JSObject* env = environmentChain();
+ while (!env->is<CallObject>())
+ env = env->enclosingEnvironment();
+ return env->as<CallObject>();
+}
+
+bool
+RematerializedFrame::initFunctionEnvironmentObjects(JSContext* cx)
+{
+ return js::InitFunctionEnvironmentObjects(cx, this);
+}
+
+bool
+RematerializedFrame::pushVarEnvironment(JSContext* cx, HandleScope scope)
+{
+ return js::PushVarEnvironmentObject(cx, scope, this);
+}
+
+void
+RematerializedFrame::trace(JSTracer* trc)
+{
+ TraceRoot(trc, &script_, "remat ion frame script");
+ TraceRoot(trc, &envChain_, "remat ion frame env chain");
+ if (callee_)
+ TraceRoot(trc, &callee_, "remat ion frame callee");
+ if (argsObj_)
+ TraceRoot(trc, &argsObj_, "remat ion frame argsobj");
+ TraceRoot(trc, &returnValue_, "remat ion frame return value");
+ TraceRoot(trc, &thisArgument_, "remat ion frame this");
+ TraceRoot(trc, &newTarget_, "remat ion frame newTarget");
+ TraceRootRange(trc, numArgSlots() + script_->nfixed(), slots_, "remat ion frame stack");
+}
+
+void
+RematerializedFrame::dump()
+{
+ fprintf(stderr, " Rematerialized Ion Frame%s\n", inlined() ? " (inlined)" : "");
+ if (isFunctionFrame()) {
+ fprintf(stderr, " callee fun: ");
+#ifdef DEBUG
+ DumpValue(ObjectValue(*callee()));
+#else
+ fprintf(stderr, "?\n");
+#endif
+ } else {
+ fprintf(stderr, " global frame, no callee\n");
+ }
+
+ fprintf(stderr, " file %s line %" PRIuSIZE " offset %" PRIuSIZE "\n",
+ script()->filename(), script()->lineno(),
+ script()->pcToOffset(pc()));
+
+ fprintf(stderr, " script = %p\n", (void*) script());
+
+ if (isFunctionFrame()) {
+ fprintf(stderr, " env chain: ");
+#ifdef DEBUG
+ DumpValue(ObjectValue(*environmentChain()));
+#else
+ fprintf(stderr, "?\n");
+#endif
+
+ if (hasArgsObj()) {
+ fprintf(stderr, " args obj: ");
+#ifdef DEBUG
+ DumpValue(ObjectValue(argsObj()));
+#else
+ fprintf(stderr, "?\n");
+#endif
+ }
+
+ fprintf(stderr, " this: ");
+#ifdef DEBUG
+ DumpValue(thisArgument());
+#else
+ fprintf(stderr, "?\n");
+#endif
+
+ for (unsigned i = 0; i < numActualArgs(); i++) {
+ if (i < numFormalArgs())
+ fprintf(stderr, " formal (arg %d): ", i);
+ else
+ fprintf(stderr, " overflown (arg %d): ", i);
+#ifdef DEBUG
+ DumpValue(argv()[i]);
+#else
+ fprintf(stderr, "?\n");
+#endif
+ }
+
+ for (unsigned i = 0; i < script()->nfixed(); i++) {
+ fprintf(stderr, " local %d: ", i);
+#ifdef DEBUG
+ DumpValue(locals()[i]);
+#else
+ fprintf(stderr, "?\n");
+#endif
+ }
+ }
+
+ fputc('\n', stderr);
+}
diff --git a/js/src/jit/RematerializedFrame.h b/js/src/jit/RematerializedFrame.h
new file mode 100644
index 000000000..80bbca34a
--- /dev/null
+++ b/js/src/jit/RematerializedFrame.h
@@ -0,0 +1,275 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_RematerializedFrame_h
+#define jit_RematerializedFrame_h
+
+#include <algorithm>
+
+#include "jsfun.h"
+
+#include "jit/JitFrameIterator.h"
+#include "jit/JitFrames.h"
+
+#include "vm/EnvironmentObject.h"
+#include "vm/Stack.h"
+
+namespace js {
+namespace jit {
+
+//
+// An optimized frame that has been rematerialized with values read out of
+// Snapshots.
+//
+class RematerializedFrame
+{
+ // See DebugScopes::updateLiveScopes.
+ bool prevUpToDate_;
+
+ // Propagated to the Baseline frame once this is popped.
+ bool isDebuggee_;
+
+ // Has an initial environment has been pushed on the environment chain for
+ // function frames that need a CallObject or eval frames that need a
+ // VarEnvironmentObject?
+ bool hasInitialEnv_;
+
+ // Is this frame constructing?
+ bool isConstructing_;
+
+ // If true, this frame has been on the stack when
+ // |js::SavedStacks::saveCurrentStack| was called, and so there is a
+ // |js::SavedFrame| object cached for this frame.
+ bool hasCachedSavedFrame_;
+
+ // The fp of the top frame associated with this possibly inlined frame.
+ uint8_t* top_;
+
+ // The bytecode at the time of rematerialization.
+ jsbytecode* pc_;
+
+ size_t frameNo_;
+ unsigned numActualArgs_;
+
+ JSScript* script_;
+ JSObject* envChain_;
+ JSFunction* callee_;
+ ArgumentsObject* argsObj_;
+
+ Value returnValue_;
+ Value thisArgument_;
+ Value newTarget_;
+ Value slots_[1];
+
+ RematerializedFrame(JSContext* cx, uint8_t* top, unsigned numActualArgs,
+ InlineFrameIterator& iter, MaybeReadFallback& fallback);
+
+ public:
+ static RematerializedFrame* New(JSContext* cx, uint8_t* top, InlineFrameIterator& iter,
+ MaybeReadFallback& fallback);
+
+ // Rematerialize all remaining frames pointed to by |iter| into |frames|
+ // in older-to-younger order, e.g., frames[0] is the oldest frame.
+ static MOZ_MUST_USE bool RematerializeInlineFrames(JSContext* cx, uint8_t* top,
+ InlineFrameIterator& iter,
+ MaybeReadFallback& fallback,
+ GCVector<RematerializedFrame*>& frames);
+
+ // Free a vector of RematerializedFrames; takes care to call the
+ // destructor. Also clears the vector.
+ static void FreeInVector(GCVector<RematerializedFrame*>& frames);
+
+ bool prevUpToDate() const {
+ return prevUpToDate_;
+ }
+ void setPrevUpToDate() {
+ prevUpToDate_ = true;
+ }
+ void unsetPrevUpToDate() {
+ prevUpToDate_ = false;
+ }
+
+ bool isDebuggee() const {
+ return isDebuggee_;
+ }
+ void setIsDebuggee() {
+ isDebuggee_ = true;
+ }
+ void unsetIsDebuggee() {
+ MOZ_ASSERT(!script()->isDebuggee());
+ isDebuggee_ = false;
+ }
+
+ uint8_t* top() const {
+ return top_;
+ }
+ JSScript* outerScript() const {
+ JitFrameLayout* jsFrame = (JitFrameLayout*)top_;
+ return ScriptFromCalleeToken(jsFrame->calleeToken());
+ }
+ jsbytecode* pc() const {
+ return pc_;
+ }
+ size_t frameNo() const {
+ return frameNo_;
+ }
+ bool inlined() const {
+ return frameNo_ > 0;
+ }
+
+ JSObject* environmentChain() const {
+ return envChain_;
+ }
+
+ template <typename SpecificEnvironment>
+ void pushOnEnvironmentChain(SpecificEnvironment& env) {
+ MOZ_ASSERT(*environmentChain() == env.enclosingEnvironment());
+ envChain_ = &env;
+ if (IsFrameInitialEnvironment(this, env))
+ hasInitialEnv_ = true;
+ }
+
+ template <typename SpecificEnvironment>
+ void popOffEnvironmentChain() {
+ MOZ_ASSERT(envChain_->is<SpecificEnvironment>());
+ envChain_ = &envChain_->as<SpecificEnvironment>().enclosingEnvironment();
+ }
+
+ MOZ_MUST_USE bool initFunctionEnvironmentObjects(JSContext* cx);
+ MOZ_MUST_USE bool pushVarEnvironment(JSContext* cx, HandleScope scope);
+
+ bool hasInitialEnvironment() const {
+ return hasInitialEnv_;
+ }
+ CallObject& callObj() const;
+
+ bool hasArgsObj() const {
+ return !!argsObj_;
+ }
+ ArgumentsObject& argsObj() const {
+ MOZ_ASSERT(hasArgsObj());
+ MOZ_ASSERT(script()->needsArgsObj());
+ return *argsObj_;
+ }
+
+ bool isFunctionFrame() const {
+ return !!script_->functionNonDelazifying();
+ }
+ bool isGlobalFrame() const {
+ return script_->isGlobalCode();
+ }
+ bool isModuleFrame() const {
+ return script_->module();
+ }
+
+ JSScript* script() const {
+ return script_;
+ }
+ JSFunction* callee() const {
+ MOZ_ASSERT(isFunctionFrame());
+ MOZ_ASSERT(callee_);
+ return callee_;
+ }
+ Value calleev() const {
+ return ObjectValue(*callee());
+ }
+ Value& thisArgument() {
+ return thisArgument_;
+ }
+
+ bool isConstructing() const {
+ return isConstructing_;
+ }
+
+ bool hasCachedSavedFrame() const {
+ return hasCachedSavedFrame_;
+ }
+
+ void setHasCachedSavedFrame() {
+ hasCachedSavedFrame_ = true;
+ }
+
+ unsigned numFormalArgs() const {
+ return isFunctionFrame() ? callee()->nargs() : 0;
+ }
+ unsigned numActualArgs() const {
+ return numActualArgs_;
+ }
+ unsigned numArgSlots() const {
+ return (std::max)(numFormalArgs(), numActualArgs());
+ }
+
+ Value* argv() {
+ return slots_;
+ }
+ Value* locals() {
+ return slots_ + numArgSlots();
+ }
+
+ Value& unaliasedLocal(unsigned i) {
+ MOZ_ASSERT(i < script()->nfixed());
+ return locals()[i];
+ }
+ Value& unaliasedFormal(unsigned i, MaybeCheckAliasing checkAliasing = CHECK_ALIASING) {
+ MOZ_ASSERT(i < numFormalArgs());
+ MOZ_ASSERT_IF(checkAliasing, !script()->argsObjAliasesFormals() &&
+ !script()->formalIsAliased(i));
+ return argv()[i];
+ }
+ Value& unaliasedActual(unsigned i, MaybeCheckAliasing checkAliasing = CHECK_ALIASING) {
+ MOZ_ASSERT(i < numActualArgs());
+ MOZ_ASSERT_IF(checkAliasing, !script()->argsObjAliasesFormals());
+ MOZ_ASSERT_IF(checkAliasing && i < numFormalArgs(), !script()->formalIsAliased(i));
+ return argv()[i];
+ }
+
+ Value newTarget() {
+ MOZ_ASSERT(isFunctionFrame());
+ if (callee()->isArrow())
+ return callee()->getExtendedSlot(FunctionExtended::ARROW_NEWTARGET_SLOT);
+ MOZ_ASSERT_IF(!isConstructing(), newTarget_.isUndefined());
+ return newTarget_;
+ }
+
+ void setReturnValue(const Value& value) {
+ returnValue_ = value;
+ }
+
+ Value& returnValue() {
+ return returnValue_;
+ }
+
+ void trace(JSTracer* trc);
+ void dump();
+};
+
+} // namespace jit
+} // namespace js
+
+namespace JS {
+
+template <>
+struct MapTypeToRootKind<js::jit::RematerializedFrame*>
+{
+ static const RootKind kind = RootKind::Traceable;
+};
+
+template <>
+struct GCPolicy<js::jit::RematerializedFrame*>
+{
+ static js::jit::RematerializedFrame* initial() {
+ return nullptr;
+ }
+
+ static void trace(JSTracer* trc, js::jit::RematerializedFrame** frame, const char* name) {
+ if (*frame)
+ (*frame)->trace(trc);
+ }
+};
+
+} // namespace JS
+
+#endif // jit_RematerializedFrame_h
diff --git a/js/src/jit/Safepoints.cpp b/js/src/jit/Safepoints.cpp
new file mode 100644
index 000000000..b0bb530f9
--- /dev/null
+++ b/js/src/jit/Safepoints.cpp
@@ -0,0 +1,562 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Safepoints.h"
+
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/SizePrintfMacros.h"
+
+#include "jit/BitSet.h"
+#include "jit/JitSpewer.h"
+#include "jit/LIR.h"
+
+using namespace js;
+using namespace jit;
+
+using mozilla::FloorLog2;
+
+SafepointWriter::SafepointWriter(uint32_t slotCount, uint32_t argumentCount)
+ : frameSlots_((slotCount / sizeof(intptr_t)) + 1), // Stack slot counts are inclusive.
+ argumentSlots_(argumentCount / sizeof(intptr_t))
+{ }
+
+bool
+SafepointWriter::init(TempAllocator& alloc)
+{
+ return frameSlots_.init(alloc) && argumentSlots_.init(alloc);
+}
+
+uint32_t
+SafepointWriter::startEntry()
+{
+ JitSpew(JitSpew_Safepoints, "Encoding safepoint (position %" PRIuSIZE "):", stream_.length());
+ return uint32_t(stream_.length());
+}
+
+void
+SafepointWriter::writeOsiCallPointOffset(uint32_t osiCallPointOffset)
+{
+ stream_.writeUnsigned(osiCallPointOffset);
+}
+
+static void
+WriteRegisterMask(CompactBufferWriter& stream, uint32_t bits)
+{
+ if (sizeof(PackedRegisterMask) == 1)
+ stream.writeByte(bits);
+ else
+ stream.writeUnsigned(bits);
+}
+
+static int32_t
+ReadRegisterMask(CompactBufferReader& stream)
+{
+ if (sizeof(PackedRegisterMask) == 1)
+ return stream.readByte();
+ return stream.readUnsigned();
+}
+
+static void
+WriteFloatRegisterMask(CompactBufferWriter& stream, uint64_t bits)
+{
+ if (sizeof(FloatRegisters::SetType) == 1) {
+ stream.writeByte(bits);
+ } else if (sizeof(FloatRegisters::SetType) == 4) {
+ stream.writeUnsigned(bits);
+ } else {
+ MOZ_ASSERT(sizeof(FloatRegisters::SetType) == 8);
+ stream.writeUnsigned(bits & 0xffffffff);
+ stream.writeUnsigned(bits >> 32);
+ }
+}
+
+static int64_t
+ReadFloatRegisterMask(CompactBufferReader& stream)
+{
+ if (sizeof(FloatRegisters::SetType) == 1)
+ return stream.readByte();
+ if (sizeof(FloatRegisters::SetType) <= 4)
+ return stream.readUnsigned();
+ MOZ_ASSERT(sizeof(FloatRegisters::SetType) == 8);
+ uint64_t ret = stream.readUnsigned();
+ ret |= uint64_t(stream.readUnsigned()) << 32;
+ return ret;
+}
+
+void
+SafepointWriter::writeGcRegs(LSafepoint* safepoint)
+{
+ LiveGeneralRegisterSet gc(safepoint->gcRegs());
+ LiveGeneralRegisterSet spilledGpr(safepoint->liveRegs().gprs());
+ LiveFloatRegisterSet spilledFloat(safepoint->liveRegs().fpus());
+ LiveGeneralRegisterSet slots(safepoint->slotsOrElementsRegs());
+ LiveGeneralRegisterSet valueRegs;
+
+ WriteRegisterMask(stream_, spilledGpr.bits());
+ if (!spilledGpr.empty()) {
+ WriteRegisterMask(stream_, gc.bits());
+ WriteRegisterMask(stream_, slots.bits());
+
+#ifdef JS_PUNBOX64
+ valueRegs = safepoint->valueRegs();
+ WriteRegisterMask(stream_, valueRegs.bits());
+#endif
+ }
+
+ // GC registers are a subset of the spilled registers.
+ MOZ_ASSERT((valueRegs.bits() & ~spilledGpr.bits()) == 0);
+ MOZ_ASSERT((gc.bits() & ~spilledGpr.bits()) == 0);
+
+ WriteFloatRegisterMask(stream_, spilledFloat.bits());
+
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_Safepoints)) {
+ for (GeneralRegisterForwardIterator iter(spilledGpr); iter.more(); ++iter) {
+ const char* type = gc.has(*iter)
+ ? "gc"
+ : slots.has(*iter)
+ ? "slots"
+ : valueRegs.has(*iter)
+ ? "value"
+ : "any";
+ JitSpew(JitSpew_Safepoints, " %s reg: %s", type, (*iter).name());
+ }
+ for (FloatRegisterForwardIterator iter(spilledFloat); iter.more(); ++iter)
+ JitSpew(JitSpew_Safepoints, " float reg: %s", (*iter).name());
+ }
+#endif
+}
+
+static void
+WriteBitset(const BitSet& set, CompactBufferWriter& stream)
+{
+ size_t count = set.rawLength();
+ const uint32_t* words = set.raw();
+ for (size_t i = 0; i < count; i++)
+ stream.writeUnsigned(words[i]);
+}
+
+static void
+MapSlotsToBitset(BitSet& stackSet, BitSet& argumentSet,
+ CompactBufferWriter& stream, const LSafepoint::SlotList& slots)
+{
+ stackSet.clear();
+ argumentSet.clear();
+
+ for (uint32_t i = 0; i < slots.length(); i++) {
+ // Slots are represented at a distance from |fp|. We divide by the
+ // pointer size, since we only care about pointer-sized/aligned slots
+ // here.
+ MOZ_ASSERT(slots[i].slot % sizeof(intptr_t) == 0);
+ size_t index = slots[i].slot / sizeof(intptr_t);
+ (slots[i].stack ? stackSet : argumentSet).insert(index);
+ }
+
+ WriteBitset(stackSet, stream);
+ WriteBitset(argumentSet, stream);
+}
+
+void
+SafepointWriter::writeGcSlots(LSafepoint* safepoint)
+{
+ LSafepoint::SlotList& slots = safepoint->gcSlots();
+
+#ifdef JS_JITSPEW
+ for (uint32_t i = 0; i < slots.length(); i++)
+ JitSpew(JitSpew_Safepoints, " gc slot: %u", slots[i].slot);
+#endif
+
+ MapSlotsToBitset(frameSlots_, argumentSlots_, stream_, slots);
+}
+
+void
+SafepointWriter::writeSlotsOrElementsSlots(LSafepoint* safepoint)
+{
+ LSafepoint::SlotList& slots = safepoint->slotsOrElementsSlots();
+
+ stream_.writeUnsigned(slots.length());
+
+ for (uint32_t i = 0; i < slots.length(); i++) {
+ if (!slots[i].stack)
+ MOZ_CRASH();
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_Safepoints, " slots/elements slot: %d", slots[i].slot);
+#endif
+ stream_.writeUnsigned(slots[i].slot);
+ }
+}
+
+void
+SafepointWriter::writeValueSlots(LSafepoint* safepoint)
+{
+ LSafepoint::SlotList& slots = safepoint->valueSlots();
+
+#ifdef JS_JITSPEW
+ for (uint32_t i = 0; i < slots.length(); i++)
+ JitSpew(JitSpew_Safepoints, " gc value: %u", slots[i].slot);
+#endif
+
+ MapSlotsToBitset(frameSlots_, argumentSlots_, stream_, slots);
+}
+
+#if defined(JS_JITSPEW) && defined(JS_NUNBOX32)
+static void
+DumpNunboxPart(const LAllocation& a)
+{
+ Fprinter& out = JitSpewPrinter();
+ if (a.isStackSlot()) {
+ out.printf("stack %d", a.toStackSlot()->slot());
+ } else if (a.isArgument()) {
+ out.printf("arg %d", a.toArgument()->index());
+ } else {
+ out.printf("reg %s", a.toGeneralReg()->reg().name());
+ }
+}
+#endif // DEBUG
+
+// Nunbox part encoding:
+//
+// Reg = 000
+// Stack = 001
+// Arg = 010
+//
+// [vwu] nentries:
+// uint16_t: tttp ppXX XXXY YYYY
+//
+// If ttt = Reg, type is reg XXXXX
+// If ppp = Reg, payload is reg YYYYY
+//
+// If ttt != Reg, type is:
+// XXXXX if not 11111, otherwise followed by [vwu]
+// If ppp != Reg, payload is:
+// YYYYY if not 11111, otherwise followed by [vwu]
+//
+enum NunboxPartKind {
+ Part_Reg,
+ Part_Stack,
+ Part_Arg
+};
+
+static const uint32_t PART_KIND_BITS = 3;
+static const uint32_t PART_KIND_MASK = (1 << PART_KIND_BITS) - 1;
+static const uint32_t PART_INFO_BITS = 5;
+static const uint32_t PART_INFO_MASK = (1 << PART_INFO_BITS) - 1;
+
+static const uint32_t MAX_INFO_VALUE = (1 << PART_INFO_BITS) - 1;
+static const uint32_t TYPE_KIND_SHIFT = 16 - PART_KIND_BITS;
+static const uint32_t PAYLOAD_KIND_SHIFT = TYPE_KIND_SHIFT - PART_KIND_BITS;
+static const uint32_t TYPE_INFO_SHIFT = PAYLOAD_KIND_SHIFT - PART_INFO_BITS;
+static const uint32_t PAYLOAD_INFO_SHIFT = TYPE_INFO_SHIFT - PART_INFO_BITS;
+
+JS_STATIC_ASSERT(PAYLOAD_INFO_SHIFT == 0);
+
+#ifdef JS_NUNBOX32
+static inline NunboxPartKind
+AllocationToPartKind(const LAllocation& a)
+{
+ if (a.isRegister())
+ return Part_Reg;
+ if (a.isStackSlot())
+ return Part_Stack;
+ MOZ_ASSERT(a.isArgument());
+ return Part_Arg;
+}
+
+// gcc 4.5 doesn't actually inline CanEncodeInfoInHeader when only
+// using the "inline" keyword, and miscompiles the function as well
+// when doing block reordering with branch prediction information.
+// See bug 799295 comment 71.
+static MOZ_ALWAYS_INLINE bool
+CanEncodeInfoInHeader(const LAllocation& a, uint32_t* out)
+{
+ if (a.isGeneralReg()) {
+ *out = a.toGeneralReg()->reg().code();
+ return true;
+ }
+
+ if (a.isStackSlot())
+ *out = a.toStackSlot()->slot();
+ else
+ *out = a.toArgument()->index();
+
+ return *out < MAX_INFO_VALUE;
+}
+
+void
+SafepointWriter::writeNunboxParts(LSafepoint* safepoint)
+{
+ LSafepoint::NunboxList& entries = safepoint->nunboxParts();
+
+# ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_Safepoints)) {
+ for (uint32_t i = 0; i < entries.length(); i++) {
+ SafepointNunboxEntry& entry = entries[i];
+ if (entry.type.isUse() || entry.payload.isUse())
+ continue;
+ JitSpewHeader(JitSpew_Safepoints);
+ Fprinter& out = JitSpewPrinter();
+ out.printf(" nunbox (type in ");
+ DumpNunboxPart(entry.type);
+ out.printf(", payload in ");
+ DumpNunboxPart(entry.payload);
+ out.printf(")\n");
+ }
+ }
+# endif
+
+ // Safepoints are permitted to have partially filled in entries for nunboxes,
+ // provided that only the type is live and not the payload. Omit these from
+ // the written safepoint.
+
+ size_t pos = stream_.length();
+ stream_.writeUnsigned(entries.length());
+
+ size_t count = 0;
+ for (size_t i = 0; i < entries.length(); i++) {
+ SafepointNunboxEntry& entry = entries[i];
+
+ if (entry.payload.isUse()) {
+ // No allocation associated with the payload.
+ continue;
+ }
+
+ if (entry.type.isUse()) {
+ // No allocation associated with the type. Look for another
+ // safepoint entry with an allocation for the type.
+ entry.type = safepoint->findTypeAllocation(entry.typeVreg);
+ if (entry.type.isUse())
+ continue;
+ }
+
+ count++;
+
+ uint16_t header = 0;
+
+ header |= (AllocationToPartKind(entry.type) << TYPE_KIND_SHIFT);
+ header |= (AllocationToPartKind(entry.payload) << PAYLOAD_KIND_SHIFT);
+
+ uint32_t typeVal;
+ bool typeExtra = !CanEncodeInfoInHeader(entry.type, &typeVal);
+ if (!typeExtra)
+ header |= (typeVal << TYPE_INFO_SHIFT);
+ else
+ header |= (MAX_INFO_VALUE << TYPE_INFO_SHIFT);
+
+ uint32_t payloadVal;
+ bool payloadExtra = !CanEncodeInfoInHeader(entry.payload, &payloadVal);
+ if (!payloadExtra)
+ header |= (payloadVal << PAYLOAD_INFO_SHIFT);
+ else
+ header |= (MAX_INFO_VALUE << PAYLOAD_INFO_SHIFT);
+
+ stream_.writeFixedUint16_t(header);
+ if (typeExtra)
+ stream_.writeUnsigned(typeVal);
+ if (payloadExtra)
+ stream_.writeUnsigned(payloadVal);
+ }
+
+ // Update the stream with the actual number of safepoint entries written.
+ stream_.writeUnsignedAt(pos, count, entries.length());
+}
+#endif
+
+void
+SafepointWriter::encode(LSafepoint* safepoint)
+{
+ uint32_t safepointOffset = startEntry();
+
+ MOZ_ASSERT(safepoint->osiCallPointOffset());
+
+ writeOsiCallPointOffset(safepoint->osiCallPointOffset());
+ writeGcRegs(safepoint);
+ writeGcSlots(safepoint);
+ writeValueSlots(safepoint);
+
+#ifdef JS_NUNBOX32
+ writeNunboxParts(safepoint);
+#endif
+
+ writeSlotsOrElementsSlots(safepoint);
+
+ endEntry();
+ safepoint->setOffset(safepointOffset);
+}
+
+void
+SafepointWriter::endEntry()
+{
+ JitSpew(JitSpew_Safepoints, " -- entry ended at %d", uint32_t(stream_.length()));
+}
+
+SafepointReader::SafepointReader(IonScript* script, const SafepointIndex* si)
+ : stream_(script->safepoints() + si->safepointOffset(),
+ script->safepoints() + script->safepointsSize()),
+ frameSlots_((script->frameSlots() / sizeof(intptr_t)) + 1), // Stack slot counts are inclusive.
+ argumentSlots_(script->argumentSlots() / sizeof(intptr_t))
+{
+ osiCallPointOffset_ = stream_.readUnsigned();
+
+ // gcSpills is a subset of allGprSpills.
+ allGprSpills_ = GeneralRegisterSet(ReadRegisterMask(stream_));
+ if (allGprSpills_.empty()) {
+ gcSpills_ = allGprSpills_;
+ valueSpills_ = allGprSpills_;
+ slotsOrElementsSpills_ = allGprSpills_;
+ } else {
+ gcSpills_ = GeneralRegisterSet(ReadRegisterMask(stream_));
+ slotsOrElementsSpills_ = GeneralRegisterSet(ReadRegisterMask(stream_));
+#ifdef JS_PUNBOX64
+ valueSpills_ = GeneralRegisterSet(ReadRegisterMask(stream_));
+#endif
+ }
+ allFloatSpills_ = FloatRegisterSet(ReadFloatRegisterMask(stream_));
+
+ advanceFromGcRegs();
+}
+
+uint32_t
+SafepointReader::osiReturnPointOffset() const
+{
+ return osiCallPointOffset_ + Assembler::PatchWrite_NearCallSize();
+}
+
+CodeLocationLabel
+SafepointReader::InvalidationPatchPoint(IonScript* script, const SafepointIndex* si)
+{
+ SafepointReader reader(script, si);
+
+ return CodeLocationLabel(script->method(), CodeOffset(reader.osiCallPointOffset()));
+}
+
+void
+SafepointReader::advanceFromGcRegs()
+{
+ currentSlotChunk_ = 0;
+ nextSlotChunkNumber_ = 0;
+ currentSlotsAreStack_ = true;
+}
+
+bool
+SafepointReader::getSlotFromBitmap(SafepointSlotEntry* entry)
+{
+ while (currentSlotChunk_ == 0) {
+ // Are there any more chunks to read?
+ if (currentSlotsAreStack_) {
+ if (nextSlotChunkNumber_ == BitSet::RawLengthForBits(frameSlots_)) {
+ nextSlotChunkNumber_ = 0;
+ currentSlotsAreStack_ = false;
+ continue;
+ }
+ } else if (nextSlotChunkNumber_ == BitSet::RawLengthForBits(argumentSlots_)) {
+ return false;
+ }
+
+ // Yes, read the next chunk.
+ currentSlotChunk_ = stream_.readUnsigned();
+ nextSlotChunkNumber_++;
+ }
+
+ // The current chunk still has bits in it, so get the next bit, then mask
+ // it out of the slot chunk.
+ uint32_t bit = FloorLog2(currentSlotChunk_);
+ currentSlotChunk_ &= ~(1 << bit);
+
+ // Return the slot, and re-scale it by the pointer size, reversing the
+ // transformation in MapSlotsToBitset.
+ entry->stack = currentSlotsAreStack_;
+ entry->slot = (((nextSlotChunkNumber_ - 1) * BitSet::BitsPerWord) + bit) * sizeof(intptr_t);
+ return true;
+}
+
+bool
+SafepointReader::getGcSlot(SafepointSlotEntry* entry)
+{
+ if (getSlotFromBitmap(entry))
+ return true;
+ advanceFromGcSlots();
+ return false;
+}
+
+void
+SafepointReader::advanceFromGcSlots()
+{
+ // No, reset the counter.
+ currentSlotChunk_ = 0;
+ nextSlotChunkNumber_ = 0;
+ currentSlotsAreStack_ = true;
+}
+
+bool
+SafepointReader::getValueSlot(SafepointSlotEntry* entry)
+{
+ if (getSlotFromBitmap(entry))
+ return true;
+ advanceFromValueSlots();
+ return false;
+}
+
+void
+SafepointReader::advanceFromValueSlots()
+{
+#ifdef JS_NUNBOX32
+ nunboxSlotsRemaining_ = stream_.readUnsigned();
+#else
+ nunboxSlotsRemaining_ = 0;
+ advanceFromNunboxSlots();
+#endif
+}
+
+static inline LAllocation
+PartFromStream(CompactBufferReader& stream, NunboxPartKind kind, uint32_t info)
+{
+ if (kind == Part_Reg)
+ return LGeneralReg(Register::FromCode(info));
+
+ if (info == MAX_INFO_VALUE)
+ info = stream.readUnsigned();
+
+ if (kind == Part_Stack)
+ return LStackSlot(info);
+
+ MOZ_ASSERT(kind == Part_Arg);
+ return LArgument(info);
+}
+
+bool
+SafepointReader::getNunboxSlot(LAllocation* type, LAllocation* payload)
+{
+ if (!nunboxSlotsRemaining_--) {
+ advanceFromNunboxSlots();
+ return false;
+ }
+
+ uint16_t header = stream_.readFixedUint16_t();
+ NunboxPartKind typeKind = (NunboxPartKind)((header >> TYPE_KIND_SHIFT) & PART_KIND_MASK);
+ NunboxPartKind payloadKind = (NunboxPartKind)((header >> PAYLOAD_KIND_SHIFT) & PART_KIND_MASK);
+ uint32_t typeInfo = (header >> TYPE_INFO_SHIFT) & PART_INFO_MASK;
+ uint32_t payloadInfo = (header >> PAYLOAD_INFO_SHIFT) & PART_INFO_MASK;
+
+ *type = PartFromStream(stream_, typeKind, typeInfo);
+ *payload = PartFromStream(stream_, payloadKind, payloadInfo);
+ return true;
+}
+
+void
+SafepointReader::advanceFromNunboxSlots()
+{
+ slotsOrElementsSlotsRemaining_ = stream_.readUnsigned();
+}
+
+bool
+SafepointReader::getSlotsOrElementsSlot(SafepointSlotEntry* entry)
+{
+ if (!slotsOrElementsSlotsRemaining_--)
+ return false;
+ entry->stack = true;
+ entry->slot = stream_.readUnsigned();
+ return true;
+}
diff --git a/js/src/jit/Safepoints.h b/js/src/jit/Safepoints.h
new file mode 100644
index 000000000..c79a68059
--- /dev/null
+++ b/js/src/jit/Safepoints.h
@@ -0,0 +1,131 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Safepoints_h
+#define jit_Safepoints_h
+
+#include "jit/BitSet.h"
+#include "jit/CompactBuffer.h"
+#include "jit/shared/Assembler-shared.h"
+
+namespace js {
+namespace jit {
+
+struct SafepointSlotEntry;
+
+class LAllocation;
+class LSafepoint;
+
+static const uint32_t INVALID_SAFEPOINT_OFFSET = uint32_t(-1);
+
+class SafepointWriter
+{
+ CompactBufferWriter stream_;
+ BitSet frameSlots_;
+ BitSet argumentSlots_;
+
+ public:
+ explicit SafepointWriter(uint32_t slotCount, uint32_t argumentCount);
+ MOZ_MUST_USE bool init(TempAllocator& alloc);
+
+ private:
+ // A safepoint entry is written in the order these functions appear.
+ uint32_t startEntry();
+
+ void writeOsiCallPointOffset(uint32_t osiPointOffset);
+ void writeGcRegs(LSafepoint* safepoint);
+ void writeGcSlots(LSafepoint* safepoint);
+ void writeValueSlots(LSafepoint* safepoint);
+
+ void writeSlotsOrElementsSlots(LSafepoint* safepoint);
+
+#ifdef JS_NUNBOX32
+ void writeNunboxParts(LSafepoint* safepoint);
+#endif
+
+ void endEntry();
+
+ public:
+ void encode(LSafepoint* safepoint);
+
+ size_t size() const {
+ return stream_.length();
+ }
+ const uint8_t* buffer() const {
+ return stream_.buffer();
+ }
+ bool oom() const {
+ return stream_.oom();
+ }
+};
+
+class SafepointReader
+{
+ CompactBufferReader stream_;
+ uint32_t frameSlots_;
+ uint32_t argumentSlots_;
+ uint32_t currentSlotChunk_;
+ bool currentSlotsAreStack_;
+ uint32_t nextSlotChunkNumber_;
+ uint32_t osiCallPointOffset_;
+ GeneralRegisterSet gcSpills_;
+ GeneralRegisterSet valueSpills_;
+ GeneralRegisterSet slotsOrElementsSpills_;
+ GeneralRegisterSet allGprSpills_;
+ FloatRegisterSet allFloatSpills_;
+ uint32_t nunboxSlotsRemaining_;
+ uint32_t slotsOrElementsSlotsRemaining_;
+
+ private:
+ void advanceFromGcRegs();
+ void advanceFromGcSlots();
+ void advanceFromValueSlots();
+ void advanceFromNunboxSlots();
+ MOZ_MUST_USE bool getSlotFromBitmap(SafepointSlotEntry* entry);
+
+ public:
+ SafepointReader(IonScript* script, const SafepointIndex* si);
+
+ static CodeLocationLabel InvalidationPatchPoint(IonScript* script, const SafepointIndex* si);
+
+ uint32_t osiCallPointOffset() const {
+ return osiCallPointOffset_;
+ }
+ LiveGeneralRegisterSet gcSpills() const {
+ return LiveGeneralRegisterSet(gcSpills_);
+ }
+ LiveGeneralRegisterSet slotsOrElementsSpills() const {
+ return LiveGeneralRegisterSet(slotsOrElementsSpills_);
+ }
+ LiveGeneralRegisterSet valueSpills() const {
+ return LiveGeneralRegisterSet(valueSpills_);
+ }
+ LiveGeneralRegisterSet allGprSpills() const {
+ return LiveGeneralRegisterSet(allGprSpills_);
+ }
+ LiveFloatRegisterSet allFloatSpills() const {
+ return LiveFloatRegisterSet(allFloatSpills_);
+ }
+ uint32_t osiReturnPointOffset() const;
+
+ // Returns true if a slot was read, false if there are no more slots.
+ MOZ_MUST_USE bool getGcSlot(SafepointSlotEntry* entry);
+
+ // Returns true if a slot was read, false if there are no more value slots.
+ MOZ_MUST_USE bool getValueSlot(SafepointSlotEntry* entry);
+
+ // Returns true if a nunbox slot was read, false if there are no more
+ // nunbox slots.
+ MOZ_MUST_USE bool getNunboxSlot(LAllocation* type, LAllocation* payload);
+
+ // Returns true if a slot was read, false if there are no more slots.
+ MOZ_MUST_USE bool getSlotsOrElementsSlot(SafepointSlotEntry* entry);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Safepoints_h */
diff --git a/js/src/jit/ScalarReplacement.cpp b/js/src/jit/ScalarReplacement.cpp
new file mode 100644
index 000000000..4614b2162
--- /dev/null
+++ b/js/src/jit/ScalarReplacement.cpp
@@ -0,0 +1,1350 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/ScalarReplacement.h"
+
+#include "mozilla/Vector.h"
+
+#include "jit/IonAnalysis.h"
+#include "jit/JitSpewer.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+#include "vm/UnboxedObject.h"
+
+#include "jsobjinlines.h"
+
+namespace js {
+namespace jit {
+
+template <typename MemoryView>
+class EmulateStateOf
+{
+ private:
+ typedef typename MemoryView::BlockState BlockState;
+
+ MIRGenerator* mir_;
+ MIRGraph& graph_;
+
+ // Block state at the entrance of all basic blocks.
+ Vector<BlockState*, 8, SystemAllocPolicy> states_;
+
+ public:
+ EmulateStateOf(MIRGenerator* mir, MIRGraph& graph)
+ : mir_(mir),
+ graph_(graph)
+ {
+ }
+
+ bool run(MemoryView& view);
+};
+
+template <typename MemoryView>
+bool
+EmulateStateOf<MemoryView>::run(MemoryView& view)
+{
+ // Initialize the current block state of each block to an unknown state.
+ if (!states_.appendN(nullptr, graph_.numBlocks()))
+ return false;
+
+ // Initialize the first block which needs to be traversed in RPO.
+ MBasicBlock* startBlock = view.startingBlock();
+ if (!view.initStartingState(&states_[startBlock->id()]))
+ return false;
+
+ // Iterate over each basic block which has a valid entry state, and merge
+ // the state in the successor blocks.
+ for (ReversePostorderIterator block = graph_.rpoBegin(startBlock); block != graph_.rpoEnd(); block++) {
+ if (mir_->shouldCancel(MemoryView::phaseName))
+ return false;
+
+ // Get the block state as the result of the merge of all predecessors
+ // which have already been visited in RPO. This means that backedges
+ // are not yet merged into the loop.
+ BlockState* state = states_[block->id()];
+ if (!state)
+ continue;
+ view.setEntryBlockState(state);
+
+ // Iterates over resume points, phi and instructions.
+ for (MNodeIterator iter(*block); iter; ) {
+ // Increment the iterator before visiting the instruction, as the
+ // visit function might discard itself from the basic block.
+ MNode* ins = *iter++;
+ if (ins->isDefinition())
+ ins->toDefinition()->accept(&view);
+ else
+ view.visitResumePoint(ins->toResumePoint());
+ if (view.oom())
+ return false;
+ }
+
+ // For each successor, merge the current state into the state of the
+ // successors.
+ for (size_t s = 0; s < block->numSuccessors(); s++) {
+ MBasicBlock* succ = block->getSuccessor(s);
+ if (!view.mergeIntoSuccessorState(*block, succ, &states_[succ->id()]))
+ return false;
+ }
+ }
+
+ states_.clear();
+ return true;
+}
+
+static bool
+IsObjectEscaped(MInstruction* ins, JSObject* objDefault = nullptr);
+
+// Returns False if the lambda is not escaped and if it is optimizable by
+// ScalarReplacementOfObject.
+static bool
+IsLambdaEscaped(MLambda* lambda, JSObject* obj)
+{
+ JitSpewDef(JitSpew_Escape, "Check lambda\n", lambda);
+ JitSpewIndent spewIndent(JitSpew_Escape);
+
+ // The scope chain is not escaped if none of the Lambdas which are
+ // capturing it are escaped.
+ for (MUseIterator i(lambda->usesBegin()); i != lambda->usesEnd(); i++) {
+ MNode* consumer = (*i)->consumer();
+ if (!consumer->isDefinition()) {
+ // Cannot optimize if it is observable from fun.arguments or others.
+ if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
+ JitSpew(JitSpew_Escape, "Observable lambda cannot be recovered");
+ return true;
+ }
+ continue;
+ }
+
+ MDefinition* def = consumer->toDefinition();
+ if (!def->isFunctionEnvironment()) {
+ JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
+ return true;
+ }
+
+ if (IsObjectEscaped(def->toInstruction(), obj)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ }
+ JitSpew(JitSpew_Escape, "Lambda is not escaped");
+ return false;
+}
+
+// Returns False if the object is not escaped and if it is optimizable by
+// ScalarReplacementOfObject.
+//
+// For the moment, this code is dumb as it only supports objects which are not
+// changing shape, and which are known by TI at the object creation.
+static bool
+IsObjectEscaped(MInstruction* ins, JSObject* objDefault)
+{
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+ MOZ_ASSERT(ins->isNewObject() || ins->isGuardShape() || ins->isCreateThisWithTemplate() ||
+ ins->isNewCallObject() || ins->isFunctionEnvironment());
+
+ JitSpewDef(JitSpew_Escape, "Check object\n", ins);
+ JitSpewIndent spewIndent(JitSpew_Escape);
+
+ JSObject* obj = objDefault;
+ if (!obj)
+ obj = MObjectState::templateObjectOf(ins);
+
+ if (!obj) {
+ JitSpew(JitSpew_Escape, "No template object defined.");
+ return true;
+ }
+
+ // Check if the object is escaped. If the object is not the first argument
+ // of either a known Store / Load, then we consider it as escaped. This is a
+ // cheap and conservative escape analysis.
+ for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
+ MNode* consumer = (*i)->consumer();
+ if (!consumer->isDefinition()) {
+ // Cannot optimize if it is observable from fun.arguments or others.
+ if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
+ JitSpew(JitSpew_Escape, "Observable object cannot be recovered");
+ return true;
+ }
+ continue;
+ }
+
+ MDefinition* def = consumer->toDefinition();
+ switch (def->op()) {
+ case MDefinition::Op_StoreFixedSlot:
+ case MDefinition::Op_LoadFixedSlot:
+ // Not escaped if it is the first argument.
+ if (def->indexOf(*i) == 0)
+ break;
+
+ JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
+ return true;
+
+ case MDefinition::Op_LoadUnboxedScalar:
+ case MDefinition::Op_StoreUnboxedScalar:
+ case MDefinition::Op_LoadUnboxedObjectOrNull:
+ case MDefinition::Op_StoreUnboxedObjectOrNull:
+ case MDefinition::Op_LoadUnboxedString:
+ case MDefinition::Op_StoreUnboxedString:
+ // Not escaped if it is the first argument.
+ if (def->indexOf(*i) != 0) {
+ JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
+ return true;
+ }
+
+ if (!def->getOperand(1)->isConstant()) {
+ JitSpewDef(JitSpew_Escape, "is addressed with unknown index\n", def);
+ return true;
+ }
+
+ break;
+
+ case MDefinition::Op_PostWriteBarrier:
+ break;
+
+ case MDefinition::Op_Slots: {
+#ifdef DEBUG
+ // Assert that MSlots are only used by MStoreSlot and MLoadSlot.
+ MSlots* ins = def->toSlots();
+ MOZ_ASSERT(ins->object() != 0);
+ for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
+ // toDefinition should normally never fail, since they don't get
+ // captured by resume points.
+ MDefinition* def = (*i)->consumer()->toDefinition();
+ MOZ_ASSERT(def->op() == MDefinition::Op_StoreSlot ||
+ def->op() == MDefinition::Op_LoadSlot);
+ }
+#endif
+ break;
+ }
+
+ case MDefinition::Op_GuardShape: {
+ MGuardShape* guard = def->toGuardShape();
+ MOZ_ASSERT(!ins->isGuardShape());
+ if (obj->maybeShape() != guard->shape()) {
+ JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", guard);
+ return true;
+ }
+ if (IsObjectEscaped(def->toInstruction(), obj)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Op_Lambda: {
+ MLambda* lambda = def->toLambda();
+ if (IsLambdaEscaped(lambda, obj)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", lambda);
+ return true;
+ }
+ break;
+ }
+
+ // This instruction is a no-op used to verify that scalar replacement
+ // is working as expected in jit-test.
+ case MDefinition::Op_AssertRecoveredOnBailout:
+ break;
+
+ default:
+ JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
+ return true;
+ }
+ }
+
+ JitSpew(JitSpew_Escape, "Object is not escaped");
+ return false;
+}
+
+class ObjectMemoryView : public MDefinitionVisitorDefaultNoop
+{
+ public:
+ typedef MObjectState BlockState;
+ static const char* phaseName;
+
+ private:
+ TempAllocator& alloc_;
+ MConstant* undefinedVal_;
+ MInstruction* obj_;
+ MBasicBlock* startBlock_;
+ BlockState* state_;
+
+ // Used to improve the memory usage by sharing common modification.
+ const MResumePoint* lastResumePoint_;
+
+ bool oom_;
+
+ public:
+ ObjectMemoryView(TempAllocator& alloc, MInstruction* obj);
+
+ MBasicBlock* startingBlock();
+ bool initStartingState(BlockState** pState);
+
+ void setEntryBlockState(BlockState* state);
+ bool mergeIntoSuccessorState(MBasicBlock* curr, MBasicBlock* succ, BlockState** pSuccState);
+
+#ifdef DEBUG
+ void assertSuccess();
+#else
+ void assertSuccess() {}
+#endif
+
+ bool oom() const { return oom_; }
+
+ public:
+ void visitResumePoint(MResumePoint* rp);
+ void visitObjectState(MObjectState* ins);
+ void visitStoreFixedSlot(MStoreFixedSlot* ins);
+ void visitLoadFixedSlot(MLoadFixedSlot* ins);
+ void visitPostWriteBarrier(MPostWriteBarrier* ins);
+ void visitStoreSlot(MStoreSlot* ins);
+ void visitLoadSlot(MLoadSlot* ins);
+ void visitGuardShape(MGuardShape* ins);
+ void visitFunctionEnvironment(MFunctionEnvironment* ins);
+ void visitLambda(MLambda* ins);
+ void visitStoreUnboxedScalar(MStoreUnboxedScalar* ins);
+ void visitLoadUnboxedScalar(MLoadUnboxedScalar* ins);
+ void visitStoreUnboxedObjectOrNull(MStoreUnboxedObjectOrNull* ins);
+ void visitLoadUnboxedObjectOrNull(MLoadUnboxedObjectOrNull* ins);
+ void visitStoreUnboxedString(MStoreUnboxedString* ins);
+ void visitLoadUnboxedString(MLoadUnboxedString* ins);
+
+ private:
+ void storeOffset(MInstruction* ins, size_t offset, MDefinition* value);
+ void loadOffset(MInstruction* ins, size_t offset);
+};
+
+const char* ObjectMemoryView::phaseName = "Scalar Replacement of Object";
+
+ObjectMemoryView::ObjectMemoryView(TempAllocator& alloc, MInstruction* obj)
+ : alloc_(alloc),
+ obj_(obj),
+ startBlock_(obj->block()),
+ state_(nullptr),
+ lastResumePoint_(nullptr),
+ oom_(false)
+{
+ // Annotate snapshots RValue such that we recover the store first.
+ obj_->setIncompleteObject();
+
+ // Annotate the instruction such that we do not replace it by a
+ // Magic(JS_OPTIMIZED_OUT) in case of removed uses.
+ obj_->setImplicitlyUsedUnchecked();
+}
+
+MBasicBlock*
+ObjectMemoryView::startingBlock()
+{
+ return startBlock_;
+}
+
+bool
+ObjectMemoryView::initStartingState(BlockState** pState)
+{
+ // Uninitialized slots have an "undefined" value.
+ undefinedVal_ = MConstant::New(alloc_, UndefinedValue());
+ startBlock_->insertBefore(obj_, undefinedVal_);
+
+ // Create a new block state and insert at it at the location of the new object.
+ BlockState* state = BlockState::New(alloc_, obj_);
+ if (!state)
+ return false;
+
+ startBlock_->insertAfter(obj_, state);
+
+ // Initialize the properties of the object state.
+ if (!state->initFromTemplateObject(alloc_, undefinedVal_))
+ return false;
+
+ // Hold out of resume point until it is visited.
+ state->setInWorklist();
+
+ *pState = state;
+ return true;
+}
+
+void
+ObjectMemoryView::setEntryBlockState(BlockState* state)
+{
+ state_ = state;
+}
+
+bool
+ObjectMemoryView::mergeIntoSuccessorState(MBasicBlock* curr, MBasicBlock* succ,
+ BlockState** pSuccState)
+{
+ BlockState* succState = *pSuccState;
+
+ // When a block has no state yet, create an empty one for the
+ // successor.
+ if (!succState) {
+ // If the successor is not dominated then the object cannot flow
+ // in this basic block without a Phi. We know that no Phi exist
+ // in non-dominated successors as the conservative escaped
+ // analysis fails otherwise. Such condition can succeed if the
+ // successor is a join at the end of a if-block and the object
+ // only exists within the branch.
+ if (!startBlock_->dominates(succ))
+ return true;
+
+ // If there is only one predecessor, carry over the last state of the
+ // block to the successor. As the block state is immutable, if the
+ // current block has multiple successors, they will share the same entry
+ // state.
+ if (succ->numPredecessors() <= 1 || !state_->numSlots()) {
+ *pSuccState = state_;
+ return true;
+ }
+
+ // If we have multiple predecessors, then we allocate one Phi node for
+ // each predecessor, and create a new block state which only has phi
+ // nodes. These would later be removed by the removal of redundant phi
+ // nodes.
+ succState = BlockState::Copy(alloc_, state_);
+ if (!succState)
+ return false;
+
+ size_t numPreds = succ->numPredecessors();
+ for (size_t slot = 0; slot < state_->numSlots(); slot++) {
+ MPhi* phi = MPhi::New(alloc_);
+ if (!phi->reserveLength(numPreds))
+ return false;
+
+ // Fill the input of the successors Phi with undefined
+ // values, and each block later fills the Phi inputs.
+ for (size_t p = 0; p < numPreds; p++)
+ phi->addInput(undefinedVal_);
+
+ // Add Phi in the list of Phis of the basic block.
+ succ->addPhi(phi);
+ succState->setSlot(slot, phi);
+ }
+
+ // Insert the newly created block state instruction at the beginning
+ // of the successor block, after all the phi nodes. Note that it
+ // would be captured by the entry resume point of the successor
+ // block.
+ succ->insertBefore(succ->safeInsertTop(), succState);
+ *pSuccState = succState;
+ }
+
+ MOZ_ASSERT_IF(succ == startBlock_, startBlock_->isLoopHeader());
+ if (succ->numPredecessors() > 1 && succState->numSlots() && succ != startBlock_) {
+ // We need to re-compute successorWithPhis as the previous EliminatePhis
+ // phase might have removed all the Phis from the successor block.
+ size_t currIndex;
+ MOZ_ASSERT(!succ->phisEmpty());
+ if (curr->successorWithPhis()) {
+ MOZ_ASSERT(curr->successorWithPhis() == succ);
+ currIndex = curr->positionInPhiSuccessor();
+ } else {
+ currIndex = succ->indexForPredecessor(curr);
+ curr->setSuccessorWithPhis(succ, currIndex);
+ }
+ MOZ_ASSERT(succ->getPredecessor(currIndex) == curr);
+
+ // Copy the current slot states to the index of current block in all the
+ // Phi created during the first visit of the successor.
+ for (size_t slot = 0; slot < state_->numSlots(); slot++) {
+ MPhi* phi = succState->getSlot(slot)->toPhi();
+ phi->replaceOperand(currIndex, state_->getSlot(slot));
+ }
+ }
+
+ return true;
+}
+
+#ifdef DEBUG
+void
+ObjectMemoryView::assertSuccess()
+{
+ for (MUseIterator i(obj_->usesBegin()); i != obj_->usesEnd(); i++) {
+ MNode* ins = (*i)->consumer();
+ MDefinition* def = nullptr;
+
+ // Resume points have been replaced by the object state.
+ if (ins->isResumePoint() || (def = ins->toDefinition())->isRecoveredOnBailout()) {
+ MOZ_ASSERT(obj_->isIncompleteObject());
+ continue;
+ }
+
+ // The only remaining uses would be removed by DCE, which will also
+ // recover the object on bailouts.
+ MOZ_ASSERT(def->isSlots() || def->isLambda());
+ MOZ_ASSERT(!def->hasDefUses());
+ }
+}
+#endif
+
+void
+ObjectMemoryView::visitResumePoint(MResumePoint* rp)
+{
+ // As long as the MObjectState is not yet seen next to the allocation, we do
+ // not patch the resume point to recover the side effects.
+ if (!state_->isInWorklist()) {
+ rp->addStore(alloc_, state_, lastResumePoint_);
+ lastResumePoint_ = rp;
+ }
+}
+
+void
+ObjectMemoryView::visitObjectState(MObjectState* ins)
+{
+ if (ins->isInWorklist())
+ ins->setNotInWorklist();
+}
+
+void
+ObjectMemoryView::visitStoreFixedSlot(MStoreFixedSlot* ins)
+{
+ // Skip stores made on other objects.
+ if (ins->object() != obj_)
+ return;
+
+ // Clone the state and update the slot value.
+ if (state_->hasFixedSlot(ins->slot())) {
+ state_ = BlockState::Copy(alloc_, state_);
+ if (!state_) {
+ oom_ = true;
+ return;
+ }
+
+ state_->setFixedSlot(ins->slot(), ins->value());
+ ins->block()->insertBefore(ins->toInstruction(), state_);
+ } else {
+ // UnsafeSetReserveSlot can access baked-in slots which are guarded by
+ // conditions, which are not seen by the escape analysis.
+ MBail* bailout = MBail::New(alloc_, Bailout_Inevitable);
+ ins->block()->insertBefore(ins, bailout);
+ }
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void
+ObjectMemoryView::visitLoadFixedSlot(MLoadFixedSlot* ins)
+{
+ // Skip loads made on other objects.
+ if (ins->object() != obj_)
+ return;
+
+ // Replace load by the slot value.
+ if (state_->hasFixedSlot(ins->slot())) {
+ ins->replaceAllUsesWith(state_->getFixedSlot(ins->slot()));
+ } else {
+ // UnsafeGetReserveSlot can access baked-in slots which are guarded by
+ // conditions, which are not seen by the escape analysis.
+ MBail* bailout = MBail::New(alloc_, Bailout_Inevitable);
+ ins->block()->insertBefore(ins, bailout);
+ ins->replaceAllUsesWith(undefinedVal_);
+ }
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void
+ObjectMemoryView::visitPostWriteBarrier(MPostWriteBarrier* ins)
+{
+ // Skip loads made on other objects.
+ if (ins->object() != obj_)
+ return;
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void
+ObjectMemoryView::visitStoreSlot(MStoreSlot* ins)
+{
+ // Skip stores made on other objects.
+ MSlots* slots = ins->slots()->toSlots();
+ if (slots->object() != obj_) {
+ // Guard objects are replaced when they are visited.
+ MOZ_ASSERT(!slots->object()->isGuardShape() || slots->object()->toGuardShape()->object() != obj_);
+ return;
+ }
+
+ // Clone the state and update the slot value.
+ if (state_->hasDynamicSlot(ins->slot())) {
+ state_ = BlockState::Copy(alloc_, state_);
+ if (!state_) {
+ oom_ = true;
+ return;
+ }
+
+ state_->setDynamicSlot(ins->slot(), ins->value());
+ ins->block()->insertBefore(ins->toInstruction(), state_);
+ } else {
+ // UnsafeSetReserveSlot can access baked-in slots which are guarded by
+ // conditions, which are not seen by the escape analysis.
+ MBail* bailout = MBail::New(alloc_, Bailout_Inevitable);
+ ins->block()->insertBefore(ins, bailout);
+ }
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void
+ObjectMemoryView::visitLoadSlot(MLoadSlot* ins)
+{
+ // Skip loads made on other objects.
+ MSlots* slots = ins->slots()->toSlots();
+ if (slots->object() != obj_) {
+ // Guard objects are replaced when they are visited.
+ MOZ_ASSERT(!slots->object()->isGuardShape() || slots->object()->toGuardShape()->object() != obj_);
+ return;
+ }
+
+ // Replace load by the slot value.
+ if (state_->hasDynamicSlot(ins->slot())) {
+ ins->replaceAllUsesWith(state_->getDynamicSlot(ins->slot()));
+ } else {
+ // UnsafeGetReserveSlot can access baked-in slots which are guarded by
+ // conditions, which are not seen by the escape analysis.
+ MBail* bailout = MBail::New(alloc_, Bailout_Inevitable);
+ ins->block()->insertBefore(ins, bailout);
+ ins->replaceAllUsesWith(undefinedVal_);
+ }
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void
+ObjectMemoryView::visitGuardShape(MGuardShape* ins)
+{
+ // Skip loads made on other objects.
+ if (ins->object() != obj_)
+ return;
+
+ // Replace the shape guard by its object.
+ ins->replaceAllUsesWith(obj_);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void
+ObjectMemoryView::visitFunctionEnvironment(MFunctionEnvironment* ins)
+{
+ // Skip function environment which are not aliases of the NewCallObject.
+ MDefinition* input = ins->input();
+ if (!input->isLambda() || input->toLambda()->environmentChain() != obj_)
+ return;
+
+ // Replace the function environment by the scope chain of the lambda.
+ ins->replaceAllUsesWith(obj_);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void
+ObjectMemoryView::visitLambda(MLambda* ins)
+{
+ if (ins->environmentChain() != obj_)
+ return;
+
+ // In order to recover the lambda we need to recover the scope chain, as the
+ // lambda is holding it.
+ ins->setIncompleteObject();
+}
+
+static size_t
+GetOffsetOf(MDefinition* index, size_t width, int32_t baseOffset)
+{
+ int32_t idx = index->toConstant()->toInt32();
+ MOZ_ASSERT(idx >= 0);
+ MOZ_ASSERT(baseOffset >= 0 && size_t(baseOffset) >= UnboxedPlainObject::offsetOfData());
+ return idx * width + baseOffset - UnboxedPlainObject::offsetOfData();
+}
+
+static size_t
+GetOffsetOf(MDefinition* index, Scalar::Type type, int32_t baseOffset)
+{
+ return GetOffsetOf(index, Scalar::byteSize(type), baseOffset);
+}
+
+void
+ObjectMemoryView::storeOffset(MInstruction* ins, size_t offset, MDefinition* value)
+{
+ // Clone the state and update the slot value.
+ MOZ_ASSERT(state_->hasOffset(offset));
+ state_ = BlockState::Copy(alloc_, state_);
+ if (!state_) {
+ oom_ = true;
+ return;
+ }
+
+ state_->setOffset(offset, value);
+ ins->block()->insertBefore(ins, state_);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void
+ObjectMemoryView::loadOffset(MInstruction* ins, size_t offset)
+{
+ // Replace load by the slot value.
+ MOZ_ASSERT(state_->hasOffset(offset));
+ ins->replaceAllUsesWith(state_->getOffset(offset));
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void
+ObjectMemoryView::visitStoreUnboxedScalar(MStoreUnboxedScalar* ins)
+{
+ // Skip stores made on other objects.
+ if (ins->elements() != obj_)
+ return;
+
+ size_t offset = GetOffsetOf(ins->index(), ins->storageType(), ins->offsetAdjustment());
+ storeOffset(ins, offset, ins->value());
+}
+
+void
+ObjectMemoryView::visitLoadUnboxedScalar(MLoadUnboxedScalar* ins)
+{
+ // Skip loads made on other objects.
+ if (ins->elements() != obj_)
+ return;
+
+ // Replace load by the slot value.
+ size_t offset = GetOffsetOf(ins->index(), ins->storageType(), ins->offsetAdjustment());
+ loadOffset(ins, offset);
+}
+
+void
+ObjectMemoryView::visitStoreUnboxedObjectOrNull(MStoreUnboxedObjectOrNull* ins)
+{
+ // Skip stores made on other objects.
+ if (ins->elements() != obj_)
+ return;
+
+ // Clone the state and update the slot value.
+ size_t offset = GetOffsetOf(ins->index(), sizeof(uintptr_t), ins->offsetAdjustment());
+ storeOffset(ins, offset, ins->value());
+}
+
+void
+ObjectMemoryView::visitLoadUnboxedObjectOrNull(MLoadUnboxedObjectOrNull* ins)
+{
+ // Skip loads made on other objects.
+ if (ins->elements() != obj_)
+ return;
+
+ // Replace load by the slot value.
+ size_t offset = GetOffsetOf(ins->index(), sizeof(uintptr_t), ins->offsetAdjustment());
+ loadOffset(ins, offset);
+}
+
+void
+ObjectMemoryView::visitStoreUnboxedString(MStoreUnboxedString* ins)
+{
+ // Skip stores made on other objects.
+ if (ins->elements() != obj_)
+ return;
+
+ // Clone the state and update the slot value.
+ size_t offset = GetOffsetOf(ins->index(), sizeof(uintptr_t), ins->offsetAdjustment());
+ storeOffset(ins, offset, ins->value());
+}
+
+void
+ObjectMemoryView::visitLoadUnboxedString(MLoadUnboxedString* ins)
+{
+ // Skip loads made on other objects.
+ if (ins->elements() != obj_)
+ return;
+
+ // Replace load by the slot value.
+ size_t offset = GetOffsetOf(ins->index(), sizeof(uintptr_t), ins->offsetAdjustment());
+ loadOffset(ins, offset);
+}
+
+static bool
+IndexOf(MDefinition* ins, int32_t* res)
+{
+ MOZ_ASSERT(ins->isLoadElement() || ins->isStoreElement());
+ MDefinition* indexDef = ins->getOperand(1); // ins->index();
+ if (indexDef->isBoundsCheck())
+ indexDef = indexDef->toBoundsCheck()->index();
+ if (indexDef->isToInt32())
+ indexDef = indexDef->toToInt32()->getOperand(0);
+ MConstant* indexDefConst = indexDef->maybeConstantValue();
+ if (!indexDefConst || indexDefConst->type() != MIRType::Int32)
+ return false;
+ *res = indexDefConst->toInt32();
+ return true;
+}
+
+// Returns False if the elements is not escaped and if it is optimizable by
+// ScalarReplacementOfArray.
+static bool
+IsElementEscaped(MElements* def, uint32_t arraySize)
+{
+ JitSpewDef(JitSpew_Escape, "Check elements\n", def);
+ JitSpewIndent spewIndent(JitSpew_Escape);
+
+ for (MUseIterator i(def->usesBegin()); i != def->usesEnd(); i++) {
+ // The MIRType::Elements cannot be captured in a resume point as
+ // it does not represent a value allocation.
+ MDefinition* access = (*i)->consumer()->toDefinition();
+
+ switch (access->op()) {
+ case MDefinition::Op_LoadElement: {
+ MOZ_ASSERT(access->toLoadElement()->elements() == def);
+
+ // If we need hole checks, then the array cannot be escaped
+ // as the array might refer to the prototype chain to look
+ // for properties, thus it might do additional side-effects
+ // which are not reflected by the alias set, is we are
+ // bailing on holes.
+ if (access->toLoadElement()->needsHoleCheck()) {
+ JitSpewDef(JitSpew_Escape,
+ "has a load element with a hole check\n", access);
+ return true;
+ }
+
+ // If the index is not a constant then this index can alias
+ // all others. We do not handle this case.
+ int32_t index;
+ if (!IndexOf(access, &index)) {
+ JitSpewDef(JitSpew_Escape,
+ "has a load element with a non-trivial index\n", access);
+ return true;
+ }
+ if (index < 0 || arraySize <= uint32_t(index)) {
+ JitSpewDef(JitSpew_Escape,
+ "has a load element with an out-of-bound index\n", access);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Op_StoreElement: {
+ MOZ_ASSERT(access->toStoreElement()->elements() == def);
+
+ // If we need hole checks, then the array cannot be escaped
+ // as the array might refer to the prototype chain to look
+ // for properties, thus it might do additional side-effects
+ // which are not reflected by the alias set, is we are
+ // bailing on holes.
+ if (access->toStoreElement()->needsHoleCheck()) {
+ JitSpewDef(JitSpew_Escape,
+ "has a store element with a hole check\n", access);
+ return true;
+ }
+
+ // If the index is not a constant then this index can alias
+ // all others. We do not handle this case.
+ int32_t index;
+ if (!IndexOf(access, &index)) {
+ JitSpewDef(JitSpew_Escape, "has a store element with a non-trivial index\n", access);
+ return true;
+ }
+ if (index < 0 || arraySize <= uint32_t(index)) {
+ JitSpewDef(JitSpew_Escape, "has a store element with an out-of-bound index\n", access);
+ return true;
+ }
+
+ // We are not yet encoding magic hole constants in resume points.
+ if (access->toStoreElement()->value()->type() == MIRType::MagicHole) {
+ JitSpewDef(JitSpew_Escape, "has a store element with an magic-hole constant\n", access);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Op_SetInitializedLength:
+ MOZ_ASSERT(access->toSetInitializedLength()->elements() == def);
+ break;
+
+ case MDefinition::Op_InitializedLength:
+ MOZ_ASSERT(access->toInitializedLength()->elements() == def);
+ break;
+
+ case MDefinition::Op_ArrayLength:
+ MOZ_ASSERT(access->toArrayLength()->elements() == def);
+ break;
+
+ default:
+ JitSpewDef(JitSpew_Escape, "is escaped by\n", access);
+ return true;
+ }
+ }
+ JitSpew(JitSpew_Escape, "Elements is not escaped");
+ return false;
+}
+
+// Returns False if the array is not escaped and if it is optimizable by
+// ScalarReplacementOfArray.
+//
+// For the moment, this code is dumb as it only supports arrays which are not
+// changing length, with only access with known constants.
+static bool
+IsArrayEscaped(MInstruction* ins)
+{
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+ MOZ_ASSERT(ins->isNewArray());
+ uint32_t length = ins->toNewArray()->length();
+
+ JitSpewDef(JitSpew_Escape, "Check array\n", ins);
+ JitSpewIndent spewIndent(JitSpew_Escape);
+
+ JSObject* obj = ins->toNewArray()->templateObject();
+ if (!obj) {
+ JitSpew(JitSpew_Escape, "No template object defined.");
+ return true;
+ }
+
+ if (obj->is<UnboxedArrayObject>()) {
+ JitSpew(JitSpew_Escape, "Template object is an unboxed plain object.");
+ return true;
+ }
+
+ if (length >= 16) {
+ JitSpew(JitSpew_Escape, "Array has too many elements");
+ return true;
+ }
+
+ // Check if the object is escaped. If the object is not the first argument
+ // of either a known Store / Load, then we consider it as escaped. This is a
+ // cheap and conservative escape analysis.
+ for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
+ MNode* consumer = (*i)->consumer();
+ if (!consumer->isDefinition()) {
+ // Cannot optimize if it is observable from fun.arguments or others.
+ if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
+ JitSpew(JitSpew_Escape, "Observable array cannot be recovered");
+ return true;
+ }
+ continue;
+ }
+
+ MDefinition* def = consumer->toDefinition();
+ switch (def->op()) {
+ case MDefinition::Op_Elements: {
+ MElements *elem = def->toElements();
+ MOZ_ASSERT(elem->object() == ins);
+ if (IsElementEscaped(elem, length)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", elem);
+ return true;
+ }
+
+ break;
+ }
+
+ // This instruction is a no-op used to verify that scalar replacement
+ // is working as expected in jit-test.
+ case MDefinition::Op_AssertRecoveredOnBailout:
+ break;
+
+ default:
+ JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
+ return true;
+ }
+ }
+
+ JitSpew(JitSpew_Escape, "Array is not escaped");
+ return false;
+}
+
+// This class replaces every MStoreElement and MSetInitializedLength by an
+// MArrayState which emulates the content of the array. All MLoadElement,
+// MInitializedLength and MArrayLength are replaced by the corresponding value.
+//
+// In order to restore the value of the array correctly in case of bailouts, we
+// replace all reference of the allocation by the MArrayState definition.
+class ArrayMemoryView : public MDefinitionVisitorDefaultNoop
+{
+ public:
+ typedef MArrayState BlockState;
+ static const char* phaseName;
+
+ private:
+ TempAllocator& alloc_;
+ MConstant* undefinedVal_;
+ MConstant* length_;
+ MInstruction* arr_;
+ MBasicBlock* startBlock_;
+ BlockState* state_;
+
+ // Used to improve the memory usage by sharing common modification.
+ const MResumePoint* lastResumePoint_;
+
+ bool oom_;
+
+ public:
+ ArrayMemoryView(TempAllocator& alloc, MInstruction* arr);
+
+ MBasicBlock* startingBlock();
+ bool initStartingState(BlockState** pState);
+
+ void setEntryBlockState(BlockState* state);
+ bool mergeIntoSuccessorState(MBasicBlock* curr, MBasicBlock* succ, BlockState** pSuccState);
+
+#ifdef DEBUG
+ void assertSuccess();
+#else
+ void assertSuccess() {}
+#endif
+
+ bool oom() const { return oom_; }
+
+ private:
+ bool isArrayStateElements(MDefinition* elements);
+ void discardInstruction(MInstruction* ins, MDefinition* elements);
+
+ public:
+ void visitResumePoint(MResumePoint* rp);
+ void visitArrayState(MArrayState* ins);
+ void visitStoreElement(MStoreElement* ins);
+ void visitLoadElement(MLoadElement* ins);
+ void visitSetInitializedLength(MSetInitializedLength* ins);
+ void visitInitializedLength(MInitializedLength* ins);
+ void visitArrayLength(MArrayLength* ins);
+};
+
+const char* ArrayMemoryView::phaseName = "Scalar Replacement of Array";
+
+ArrayMemoryView::ArrayMemoryView(TempAllocator& alloc, MInstruction* arr)
+ : alloc_(alloc),
+ undefinedVal_(nullptr),
+ length_(nullptr),
+ arr_(arr),
+ startBlock_(arr->block()),
+ state_(nullptr),
+ lastResumePoint_(nullptr),
+ oom_(false)
+{
+ // Annotate snapshots RValue such that we recover the store first.
+ arr_->setIncompleteObject();
+
+ // Annotate the instruction such that we do not replace it by a
+ // Magic(JS_OPTIMIZED_OUT) in case of removed uses.
+ arr_->setImplicitlyUsedUnchecked();
+}
+
+MBasicBlock*
+ArrayMemoryView::startingBlock()
+{
+ return startBlock_;
+}
+
+bool
+ArrayMemoryView::initStartingState(BlockState** pState)
+{
+ // Uninitialized elements have an "undefined" value.
+ undefinedVal_ = MConstant::New(alloc_, UndefinedValue());
+ MConstant* initLength = MConstant::New(alloc_, Int32Value(0));
+ arr_->block()->insertBefore(arr_, undefinedVal_);
+ arr_->block()->insertBefore(arr_, initLength);
+
+ // Create a new block state and insert at it at the location of the new array.
+ BlockState* state = BlockState::New(alloc_, arr_, undefinedVal_, initLength);
+ if (!state)
+ return false;
+
+ startBlock_->insertAfter(arr_, state);
+
+ // Hold out of resume point until it is visited.
+ state->setInWorklist();
+
+ *pState = state;
+ return true;
+}
+
+void
+ArrayMemoryView::setEntryBlockState(BlockState* state)
+{
+ state_ = state;
+}
+
+bool
+ArrayMemoryView::mergeIntoSuccessorState(MBasicBlock* curr, MBasicBlock* succ,
+ BlockState** pSuccState)
+{
+ BlockState* succState = *pSuccState;
+
+ // When a block has no state yet, create an empty one for the
+ // successor.
+ if (!succState) {
+ // If the successor is not dominated then the array cannot flow
+ // in this basic block without a Phi. We know that no Phi exist
+ // in non-dominated successors as the conservative escaped
+ // analysis fails otherwise. Such condition can succeed if the
+ // successor is a join at the end of a if-block and the array
+ // only exists within the branch.
+ if (!startBlock_->dominates(succ))
+ return true;
+
+ // If there is only one predecessor, carry over the last state of the
+ // block to the successor. As the block state is immutable, if the
+ // current block has multiple successors, they will share the same entry
+ // state.
+ if (succ->numPredecessors() <= 1 || !state_->numElements()) {
+ *pSuccState = state_;
+ return true;
+ }
+
+ // If we have multiple predecessors, then we allocate one Phi node for
+ // each predecessor, and create a new block state which only has phi
+ // nodes. These would later be removed by the removal of redundant phi
+ // nodes.
+ succState = BlockState::Copy(alloc_, state_);
+ if (!succState)
+ return false;
+
+ size_t numPreds = succ->numPredecessors();
+ for (size_t index = 0; index < state_->numElements(); index++) {
+ MPhi* phi = MPhi::New(alloc_);
+ if (!phi->reserveLength(numPreds))
+ return false;
+
+ // Fill the input of the successors Phi with undefined
+ // values, and each block later fills the Phi inputs.
+ for (size_t p = 0; p < numPreds; p++)
+ phi->addInput(undefinedVal_);
+
+ // Add Phi in the list of Phis of the basic block.
+ succ->addPhi(phi);
+ succState->setElement(index, phi);
+ }
+
+ // Insert the newly created block state instruction at the beginning
+ // of the successor block, after all the phi nodes. Note that it
+ // would be captured by the entry resume point of the successor
+ // block.
+ succ->insertBefore(succ->safeInsertTop(), succState);
+ *pSuccState = succState;
+ }
+
+ MOZ_ASSERT_IF(succ == startBlock_, startBlock_->isLoopHeader());
+ if (succ->numPredecessors() > 1 && succState->numElements() && succ != startBlock_) {
+ // We need to re-compute successorWithPhis as the previous EliminatePhis
+ // phase might have removed all the Phis from the successor block.
+ size_t currIndex;
+ MOZ_ASSERT(!succ->phisEmpty());
+ if (curr->successorWithPhis()) {
+ MOZ_ASSERT(curr->successorWithPhis() == succ);
+ currIndex = curr->positionInPhiSuccessor();
+ } else {
+ currIndex = succ->indexForPredecessor(curr);
+ curr->setSuccessorWithPhis(succ, currIndex);
+ }
+ MOZ_ASSERT(succ->getPredecessor(currIndex) == curr);
+
+ // Copy the current element states to the index of current block in all
+ // the Phi created during the first visit of the successor.
+ for (size_t index = 0; index < state_->numElements(); index++) {
+ MPhi* phi = succState->getElement(index)->toPhi();
+ phi->replaceOperand(currIndex, state_->getElement(index));
+ }
+ }
+
+ return true;
+}
+
+#ifdef DEBUG
+void
+ArrayMemoryView::assertSuccess()
+{
+ MOZ_ASSERT(!arr_->hasLiveDefUses());
+}
+#endif
+
+void
+ArrayMemoryView::visitResumePoint(MResumePoint* rp)
+{
+ // As long as the MArrayState is not yet seen next to the allocation, we do
+ // not patch the resume point to recover the side effects.
+ if (!state_->isInWorklist()) {
+ rp->addStore(alloc_, state_, lastResumePoint_);
+ lastResumePoint_ = rp;
+ }
+}
+
+void
+ArrayMemoryView::visitArrayState(MArrayState* ins)
+{
+ if (ins->isInWorklist())
+ ins->setNotInWorklist();
+}
+
+bool
+ArrayMemoryView::isArrayStateElements(MDefinition* elements)
+{
+ return elements->isElements() && elements->toElements()->object() == arr_;
+}
+
+void
+ArrayMemoryView::discardInstruction(MInstruction* ins, MDefinition* elements)
+{
+ MOZ_ASSERT(elements->isElements());
+ ins->block()->discard(ins);
+ if (!elements->hasLiveDefUses())
+ elements->block()->discard(elements->toInstruction());
+}
+
+void
+ArrayMemoryView::visitStoreElement(MStoreElement* ins)
+{
+ // Skip other array objects.
+ MDefinition* elements = ins->elements();
+ if (!isArrayStateElements(elements))
+ return;
+
+ // Register value of the setter in the state.
+ int32_t index;
+ MOZ_ALWAYS_TRUE(IndexOf(ins, &index));
+ state_ = BlockState::Copy(alloc_, state_);
+ if (!state_) {
+ oom_ = true;
+ return;
+ }
+
+ state_->setElement(index, ins->value());
+ ins->block()->insertBefore(ins, state_);
+
+ // Remove original instruction.
+ discardInstruction(ins, elements);
+}
+
+void
+ArrayMemoryView::visitLoadElement(MLoadElement* ins)
+{
+ // Skip other array objects.
+ MDefinition* elements = ins->elements();
+ if (!isArrayStateElements(elements))
+ return;
+
+ // Replace by the value contained at the index.
+ int32_t index;
+ MOZ_ALWAYS_TRUE(IndexOf(ins, &index));
+ ins->replaceAllUsesWith(state_->getElement(index));
+
+ // Remove original instruction.
+ discardInstruction(ins, elements);
+}
+
+void
+ArrayMemoryView::visitSetInitializedLength(MSetInitializedLength* ins)
+{
+ // Skip other array objects.
+ MDefinition* elements = ins->elements();
+ if (!isArrayStateElements(elements))
+ return;
+
+ // Replace by the new initialized length. Note that the argument of
+ // MSetInitalizedLength is the last index and not the initialized length.
+ // To obtain the length, we need to add 1 to it, and thus we need to create
+ // a new constant that we register in the ArrayState.
+ state_ = BlockState::Copy(alloc_, state_);
+ if (!state_) {
+ oom_ = true;
+ return;
+ }
+
+ int32_t initLengthValue = ins->index()->maybeConstantValue()->toInt32() + 1;
+ MConstant* initLength = MConstant::New(alloc_, Int32Value(initLengthValue));
+ ins->block()->insertBefore(ins, initLength);
+ ins->block()->insertBefore(ins, state_);
+ state_->setInitializedLength(initLength);
+
+ // Remove original instruction.
+ discardInstruction(ins, elements);
+}
+
+void
+ArrayMemoryView::visitInitializedLength(MInitializedLength* ins)
+{
+ // Skip other array objects.
+ MDefinition* elements = ins->elements();
+ if (!isArrayStateElements(elements))
+ return;
+
+ // Replace by the value of the length.
+ ins->replaceAllUsesWith(state_->initializedLength());
+
+ // Remove original instruction.
+ discardInstruction(ins, elements);
+}
+
+void
+ArrayMemoryView::visitArrayLength(MArrayLength* ins)
+{
+ // Skip other array objects.
+ MDefinition* elements = ins->elements();
+ if (!isArrayStateElements(elements))
+ return;
+
+ // Replace by the value of the length.
+ if (!length_) {
+ length_ = MConstant::New(alloc_, Int32Value(state_->numElements()));
+ arr_->block()->insertBefore(arr_, length_);
+ }
+ ins->replaceAllUsesWith(length_);
+
+ // Remove original instruction.
+ discardInstruction(ins, elements);
+}
+
+bool
+ScalarReplacement(MIRGenerator* mir, MIRGraph& graph)
+{
+ EmulateStateOf<ObjectMemoryView> replaceObject(mir, graph);
+ EmulateStateOf<ArrayMemoryView> replaceArray(mir, graph);
+ bool addedPhi = false;
+
+ for (ReversePostorderIterator block = graph.rpoBegin(); block != graph.rpoEnd(); block++) {
+ if (mir->shouldCancel("Scalar Replacement (main loop)"))
+ return false;
+
+ for (MInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
+ if ((ins->isNewObject() || ins->isCreateThisWithTemplate() || ins->isNewCallObject()) &&
+ !IsObjectEscaped(*ins))
+ {
+ ObjectMemoryView view(graph.alloc(), *ins);
+ if (!replaceObject.run(view))
+ return false;
+ view.assertSuccess();
+ addedPhi = true;
+ continue;
+ }
+
+ if (ins->isNewArray() && !IsArrayEscaped(*ins)) {
+ ArrayMemoryView view(graph.alloc(), *ins);
+ if (!replaceArray.run(view))
+ return false;
+ view.assertSuccess();
+ addedPhi = true;
+ continue;
+ }
+ }
+ }
+
+ if (addedPhi) {
+ // Phis added by Scalar Replacement are only redundant Phis which are
+ // not directly captured by any resume point but only by the MDefinition
+ // state. The conservative observability only focuses on Phis which are
+ // not used as resume points operands.
+ AssertExtendedGraphCoherency(graph);
+ if (!EliminatePhis(mir, graph, ConservativeObservability))
+ return false;
+ }
+
+ return true;
+}
+
+} /* namespace jit */
+} /* namespace js */
diff --git a/js/src/jit/ScalarReplacement.h b/js/src/jit/ScalarReplacement.h
new file mode 100644
index 000000000..98654bd58
--- /dev/null
+++ b/js/src/jit/ScalarReplacement.h
@@ -0,0 +1,25 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// This file declares scalar replacement of objects transformation.
+#ifndef jit_ScalarReplacement_h
+#define jit_ScalarReplacement_h
+
+#include "mozilla/Attributes.h"
+
+namespace js {
+namespace jit {
+
+class MIRGenerator;
+class MIRGraph;
+
+MOZ_MUST_USE bool
+ScalarReplacement(MIRGenerator* mir, MIRGraph& graph);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_ScalarReplacement_h */
diff --git a/js/src/jit/SharedIC.cpp b/js/src/jit/SharedIC.cpp
new file mode 100644
index 000000000..767cff661
--- /dev/null
+++ b/js/src/jit/SharedIC.cpp
@@ -0,0 +1,4306 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/SharedIC.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/SizePrintfMacros.h"
+#include "mozilla/Sprintf.h"
+
+#include "jslibmath.h"
+#include "jstypes.h"
+
+#include "gc/Policy.h"
+#include "jit/BaselineCacheIR.h"
+#include "jit/BaselineDebugModeOSR.h"
+#include "jit/BaselineIC.h"
+#include "jit/JitSpewer.h"
+#include "jit/Linker.h"
+#include "jit/SharedICHelpers.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/VMFunctions.h"
+#include "vm/Interpreter.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "vm/Interpreter-inl.h"
+
+using mozilla::BitwiseCast;
+using mozilla::DebugOnly;
+
+namespace js {
+namespace jit {
+
+#ifdef JS_JITSPEW
+void
+FallbackICSpew(JSContext* cx, ICFallbackStub* stub, const char* fmt, ...)
+{
+ if (JitSpewEnabled(JitSpew_BaselineICFallback)) {
+ RootedScript script(cx, GetTopJitJSScript(cx));
+ jsbytecode* pc = stub->icEntry()->pc(script);
+
+ char fmtbuf[100];
+ va_list args;
+ va_start(args, fmt);
+ (void) VsprintfLiteral(fmtbuf, fmt, args);
+ va_end(args);
+
+ JitSpew(JitSpew_BaselineICFallback,
+ "Fallback hit for (%s:%" PRIuSIZE ") (pc=%" PRIuSIZE ",line=%d,uses=%d,stubs=%" PRIuSIZE "): %s",
+ script->filename(),
+ script->lineno(),
+ script->pcToOffset(pc),
+ PCToLineNumber(script, pc),
+ script->getWarmUpCount(),
+ stub->numOptimizedStubs(),
+ fmtbuf);
+ }
+}
+
+void
+TypeFallbackICSpew(JSContext* cx, ICTypeMonitor_Fallback* stub, const char* fmt, ...)
+{
+ if (JitSpewEnabled(JitSpew_BaselineICFallback)) {
+ RootedScript script(cx, GetTopJitJSScript(cx));
+ jsbytecode* pc = stub->icEntry()->pc(script);
+
+ char fmtbuf[100];
+ va_list args;
+ va_start(args, fmt);
+ (void) VsprintfLiteral(fmtbuf, fmt, args);
+ va_end(args);
+
+ JitSpew(JitSpew_BaselineICFallback,
+ "Type monitor fallback hit for (%s:%" PRIuSIZE ") (pc=%" PRIuSIZE ",line=%d,uses=%d,stubs=%d): %s",
+ script->filename(),
+ script->lineno(),
+ script->pcToOffset(pc),
+ PCToLineNumber(script, pc),
+ script->getWarmUpCount(),
+ (int) stub->numOptimizedMonitorStubs(),
+ fmtbuf);
+ }
+}
+#endif // JS_JITSPEW
+
+ICFallbackStub*
+ICEntry::fallbackStub() const
+{
+ return firstStub()->getChainFallback();
+}
+
+void
+IonICEntry::trace(JSTracer* trc)
+{
+ TraceManuallyBarrieredEdge(trc, &script_, "IonICEntry::script_");
+ traceEntry(trc);
+}
+
+void
+BaselineICEntry::trace(JSTracer* trc)
+{
+ traceEntry(trc);
+}
+
+void
+ICEntry::traceEntry(JSTracer* trc)
+{
+ if (!hasStub())
+ return;
+ for (ICStub* stub = firstStub(); stub; stub = stub->next())
+ stub->trace(trc);
+}
+
+ICStubConstIterator&
+ICStubConstIterator::operator++()
+{
+ MOZ_ASSERT(currentStub_ != nullptr);
+ currentStub_ = currentStub_->next();
+ return *this;
+}
+
+
+ICStubIterator::ICStubIterator(ICFallbackStub* fallbackStub, bool end)
+ : icEntry_(fallbackStub->icEntry()),
+ fallbackStub_(fallbackStub),
+ previousStub_(nullptr),
+ currentStub_(end ? fallbackStub : icEntry_->firstStub()),
+ unlinked_(false)
+{ }
+
+ICStubIterator&
+ICStubIterator::operator++()
+{
+ MOZ_ASSERT(currentStub_->next() != nullptr);
+ if (!unlinked_)
+ previousStub_ = currentStub_;
+ currentStub_ = currentStub_->next();
+ unlinked_ = false;
+ return *this;
+}
+
+void
+ICStubIterator::unlink(JSContext* cx)
+{
+ MOZ_ASSERT(currentStub_->next() != nullptr);
+ MOZ_ASSERT(currentStub_ != fallbackStub_);
+ MOZ_ASSERT(!unlinked_);
+
+ fallbackStub_->unlinkStub(cx->zone(), previousStub_, currentStub_);
+
+ // Mark the current iterator position as unlinked, so operator++ works properly.
+ unlinked_ = true;
+}
+
+
+void
+ICStub::markCode(JSTracer* trc, const char* name)
+{
+ JitCode* stubJitCode = jitCode();
+ TraceManuallyBarrieredEdge(trc, &stubJitCode, name);
+}
+
+void
+ICStub::updateCode(JitCode* code)
+{
+ // Write barrier on the old code.
+ JitCode::writeBarrierPre(jitCode());
+ stubCode_ = code->raw();
+}
+
+/* static */ void
+ICStub::trace(JSTracer* trc)
+{
+ markCode(trc, "shared-stub-jitcode");
+
+ // If the stub is a monitored fallback stub, then mark the monitor ICs hanging
+ // off of that stub. We don't need to worry about the regular monitored stubs,
+ // because the regular monitored stubs will always have a monitored fallback stub
+ // that references the same stub chain.
+ if (isMonitoredFallback()) {
+ ICTypeMonitor_Fallback* lastMonStub = toMonitoredFallbackStub()->fallbackMonitorStub();
+ for (ICStubConstIterator iter(lastMonStub->firstMonitorStub()); !iter.atEnd(); iter++) {
+ MOZ_ASSERT_IF(iter->next() == nullptr, *iter == lastMonStub);
+ iter->trace(trc);
+ }
+ }
+
+ if (isUpdated()) {
+ for (ICStubConstIterator iter(toUpdatedStub()->firstUpdateStub()); !iter.atEnd(); iter++) {
+ MOZ_ASSERT_IF(iter->next() == nullptr, iter->isTypeUpdate_Fallback());
+ iter->trace(trc);
+ }
+ }
+
+ switch (kind()) {
+ case ICStub::Call_Scripted: {
+ ICCall_Scripted* callStub = toCall_Scripted();
+ TraceEdge(trc, &callStub->callee(), "baseline-callscripted-callee");
+ TraceNullableEdge(trc, &callStub->templateObject(), "baseline-callscripted-template");
+ break;
+ }
+ case ICStub::Call_Native: {
+ ICCall_Native* callStub = toCall_Native();
+ TraceEdge(trc, &callStub->callee(), "baseline-callnative-callee");
+ TraceNullableEdge(trc, &callStub->templateObject(), "baseline-callnative-template");
+ break;
+ }
+ case ICStub::Call_ClassHook: {
+ ICCall_ClassHook* callStub = toCall_ClassHook();
+ TraceNullableEdge(trc, &callStub->templateObject(), "baseline-callclasshook-template");
+ break;
+ }
+ case ICStub::Call_StringSplit: {
+ ICCall_StringSplit* callStub = toCall_StringSplit();
+ TraceEdge(trc, &callStub->templateObject(), "baseline-callstringsplit-template");
+ TraceEdge(trc, &callStub->expectedSep(), "baseline-callstringsplit-sep");
+ TraceEdge(trc, &callStub->expectedStr(), "baseline-callstringsplit-str");
+ break;
+ }
+ case ICStub::GetElem_NativeSlotName:
+ case ICStub::GetElem_NativeSlotSymbol:
+ case ICStub::GetElem_UnboxedPropertyName: {
+ ICGetElemNativeStub* getElemStub = static_cast<ICGetElemNativeStub*>(this);
+ getElemStub->receiverGuard().trace(trc);
+ if (getElemStub->isSymbol()) {
+ ICGetElem_NativeSlot<JS::Symbol*>* typedGetElemStub = toGetElem_NativeSlotSymbol();
+ TraceEdge(trc, &typedGetElemStub->key(), "baseline-getelem-native-key");
+ } else {
+ ICGetElemNativeSlotStub<PropertyName*>* typedGetElemStub =
+ reinterpret_cast<ICGetElemNativeSlotStub<PropertyName*>*>(this);
+ TraceEdge(trc, &typedGetElemStub->key(), "baseline-getelem-native-key");
+ }
+ break;
+ }
+ case ICStub::GetElem_NativePrototypeSlotName:
+ case ICStub::GetElem_NativePrototypeSlotSymbol: {
+ ICGetElemNativeStub* getElemStub = static_cast<ICGetElemNativeStub*>(this);
+ getElemStub->receiverGuard().trace(trc);
+ if (getElemStub->isSymbol()) {
+ ICGetElem_NativePrototypeSlot<JS::Symbol*>* typedGetElemStub
+ = toGetElem_NativePrototypeSlotSymbol();
+ TraceEdge(trc, &typedGetElemStub->key(), "baseline-getelem-nativeproto-key");
+ TraceEdge(trc, &typedGetElemStub->holder(), "baseline-getelem-nativeproto-holder");
+ TraceEdge(trc, &typedGetElemStub->holderShape(), "baseline-getelem-nativeproto-holdershape");
+ } else {
+ ICGetElem_NativePrototypeSlot<PropertyName*>* typedGetElemStub
+ = toGetElem_NativePrototypeSlotName();
+ TraceEdge(trc, &typedGetElemStub->key(), "baseline-getelem-nativeproto-key");
+ TraceEdge(trc, &typedGetElemStub->holder(), "baseline-getelem-nativeproto-holder");
+ TraceEdge(trc, &typedGetElemStub->holderShape(), "baseline-getelem-nativeproto-holdershape");
+ }
+ break;
+ }
+ case ICStub::GetElem_NativePrototypeCallNativeName:
+ case ICStub::GetElem_NativePrototypeCallNativeSymbol:
+ case ICStub::GetElem_NativePrototypeCallScriptedName:
+ case ICStub::GetElem_NativePrototypeCallScriptedSymbol: {
+ ICGetElemNativeStub* getElemStub = static_cast<ICGetElemNativeStub*>(this);
+ getElemStub->receiverGuard().trace(trc);
+ if (getElemStub->isSymbol()) {
+ ICGetElemNativePrototypeCallStub<JS::Symbol*>* callStub =
+ reinterpret_cast<ICGetElemNativePrototypeCallStub<JS::Symbol*>*>(this);
+ TraceEdge(trc, &callStub->key(), "baseline-getelem-nativeprotocall-key");
+ TraceEdge(trc, &callStub->getter(), "baseline-getelem-nativeprotocall-getter");
+ TraceEdge(trc, &callStub->holder(), "baseline-getelem-nativeprotocall-holder");
+ TraceEdge(trc, &callStub->holderShape(), "baseline-getelem-nativeprotocall-holdershape");
+ } else {
+ ICGetElemNativePrototypeCallStub<PropertyName*>* callStub =
+ reinterpret_cast<ICGetElemNativePrototypeCallStub<PropertyName*>*>(this);
+ TraceEdge(trc, &callStub->key(), "baseline-getelem-nativeprotocall-key");
+ TraceEdge(trc, &callStub->getter(), "baseline-getelem-nativeprotocall-getter");
+ TraceEdge(trc, &callStub->holder(), "baseline-getelem-nativeprotocall-holder");
+ TraceEdge(trc, &callStub->holderShape(), "baseline-getelem-nativeprotocall-holdershape");
+ }
+ break;
+ }
+ case ICStub::GetElem_Dense: {
+ ICGetElem_Dense* getElemStub = toGetElem_Dense();
+ TraceEdge(trc, &getElemStub->shape(), "baseline-getelem-dense-shape");
+ break;
+ }
+ case ICStub::GetElem_UnboxedArray: {
+ ICGetElem_UnboxedArray* getElemStub = toGetElem_UnboxedArray();
+ TraceEdge(trc, &getElemStub->group(), "baseline-getelem-unboxed-array-group");
+ break;
+ }
+ case ICStub::GetElem_TypedArray: {
+ ICGetElem_TypedArray* getElemStub = toGetElem_TypedArray();
+ TraceEdge(trc, &getElemStub->shape(), "baseline-getelem-typedarray-shape");
+ break;
+ }
+ case ICStub::SetElem_DenseOrUnboxedArray: {
+ ICSetElem_DenseOrUnboxedArray* setElemStub = toSetElem_DenseOrUnboxedArray();
+ TraceNullableEdge(trc, &setElemStub->shape(), "baseline-getelem-dense-shape");
+ TraceEdge(trc, &setElemStub->group(), "baseline-setelem-dense-group");
+ break;
+ }
+ case ICStub::SetElem_DenseOrUnboxedArrayAdd: {
+ ICSetElem_DenseOrUnboxedArrayAdd* setElemStub = toSetElem_DenseOrUnboxedArrayAdd();
+ TraceEdge(trc, &setElemStub->group(), "baseline-setelem-denseadd-group");
+
+ JS_STATIC_ASSERT(ICSetElem_DenseOrUnboxedArrayAdd::MAX_PROTO_CHAIN_DEPTH == 4);
+
+ switch (setElemStub->protoChainDepth()) {
+ case 0: setElemStub->toImpl<0>()->traceShapes(trc); break;
+ case 1: setElemStub->toImpl<1>()->traceShapes(trc); break;
+ case 2: setElemStub->toImpl<2>()->traceShapes(trc); break;
+ case 3: setElemStub->toImpl<3>()->traceShapes(trc); break;
+ case 4: setElemStub->toImpl<4>()->traceShapes(trc); break;
+ default: MOZ_CRASH("Invalid proto stub.");
+ }
+ break;
+ }
+ case ICStub::SetElem_TypedArray: {
+ ICSetElem_TypedArray* setElemStub = toSetElem_TypedArray();
+ TraceEdge(trc, &setElemStub->shape(), "baseline-setelem-typedarray-shape");
+ break;
+ }
+ case ICStub::TypeMonitor_SingleObject: {
+ ICTypeMonitor_SingleObject* monitorStub = toTypeMonitor_SingleObject();
+ TraceEdge(trc, &monitorStub->object(), "baseline-monitor-singleton");
+ break;
+ }
+ case ICStub::TypeMonitor_ObjectGroup: {
+ ICTypeMonitor_ObjectGroup* monitorStub = toTypeMonitor_ObjectGroup();
+ TraceEdge(trc, &monitorStub->group(), "baseline-monitor-group");
+ break;
+ }
+ case ICStub::TypeUpdate_SingleObject: {
+ ICTypeUpdate_SingleObject* updateStub = toTypeUpdate_SingleObject();
+ TraceEdge(trc, &updateStub->object(), "baseline-update-singleton");
+ break;
+ }
+ case ICStub::TypeUpdate_ObjectGroup: {
+ ICTypeUpdate_ObjectGroup* updateStub = toTypeUpdate_ObjectGroup();
+ TraceEdge(trc, &updateStub->group(), "baseline-update-group");
+ break;
+ }
+ case ICStub::In_Native: {
+ ICIn_Native* inStub = toIn_Native();
+ TraceEdge(trc, &inStub->shape(), "baseline-innative-stub-shape");
+ TraceEdge(trc, &inStub->name(), "baseline-innative-stub-name");
+ break;
+ }
+ case ICStub::In_NativePrototype: {
+ ICIn_NativePrototype* inStub = toIn_NativePrototype();
+ TraceEdge(trc, &inStub->shape(), "baseline-innativeproto-stub-shape");
+ TraceEdge(trc, &inStub->name(), "baseline-innativeproto-stub-name");
+ TraceEdge(trc, &inStub->holder(), "baseline-innativeproto-stub-holder");
+ TraceEdge(trc, &inStub->holderShape(), "baseline-innativeproto-stub-holdershape");
+ break;
+ }
+ case ICStub::In_NativeDoesNotExist: {
+ ICIn_NativeDoesNotExist* inStub = toIn_NativeDoesNotExist();
+ TraceEdge(trc, &inStub->name(), "baseline-innativedoesnotexist-stub-name");
+ JS_STATIC_ASSERT(ICIn_NativeDoesNotExist::MAX_PROTO_CHAIN_DEPTH == 8);
+ switch (inStub->protoChainDepth()) {
+ case 0: inStub->toImpl<0>()->traceShapes(trc); break;
+ case 1: inStub->toImpl<1>()->traceShapes(trc); break;
+ case 2: inStub->toImpl<2>()->traceShapes(trc); break;
+ case 3: inStub->toImpl<3>()->traceShapes(trc); break;
+ case 4: inStub->toImpl<4>()->traceShapes(trc); break;
+ case 5: inStub->toImpl<5>()->traceShapes(trc); break;
+ case 6: inStub->toImpl<6>()->traceShapes(trc); break;
+ case 7: inStub->toImpl<7>()->traceShapes(trc); break;
+ case 8: inStub->toImpl<8>()->traceShapes(trc); break;
+ default: MOZ_CRASH("Invalid proto stub.");
+ }
+ break;
+ }
+ case ICStub::In_Dense: {
+ ICIn_Dense* inStub = toIn_Dense();
+ TraceEdge(trc, &inStub->shape(), "baseline-in-dense-shape");
+ break;
+ }
+ case ICStub::GetName_Global: {
+ ICGetName_Global* globalStub = toGetName_Global();
+ globalStub->receiverGuard().trace(trc);
+ TraceEdge(trc, &globalStub->holder(), "baseline-global-stub-holder");
+ TraceEdge(trc, &globalStub->holderShape(), "baseline-global-stub-holdershape");
+ TraceEdge(trc, &globalStub->globalShape(), "baseline-global-stub-globalshape");
+ break;
+ }
+ case ICStub::GetName_Env0:
+ static_cast<ICGetName_Env<0>*>(this)->traceEnvironments(trc);
+ break;
+ case ICStub::GetName_Env1:
+ static_cast<ICGetName_Env<1>*>(this)->traceEnvironments(trc);
+ break;
+ case ICStub::GetName_Env2:
+ static_cast<ICGetName_Env<2>*>(this)->traceEnvironments(trc);
+ break;
+ case ICStub::GetName_Env3:
+ static_cast<ICGetName_Env<3>*>(this)->traceEnvironments(trc);
+ break;
+ case ICStub::GetName_Env4:
+ static_cast<ICGetName_Env<4>*>(this)->traceEnvironments(trc);
+ break;
+ case ICStub::GetName_Env5:
+ static_cast<ICGetName_Env<5>*>(this)->traceEnvironments(trc);
+ break;
+ case ICStub::GetName_Env6:
+ static_cast<ICGetName_Env<6>*>(this)->traceEnvironments(trc);
+ break;
+ case ICStub::GetIntrinsic_Constant: {
+ ICGetIntrinsic_Constant* constantStub = toGetIntrinsic_Constant();
+ TraceEdge(trc, &constantStub->value(), "baseline-getintrinsic-constant-value");
+ break;
+ }
+ case ICStub::GetProp_CallDOMProxyNative:
+ case ICStub::GetProp_CallDOMProxyWithGenerationNative: {
+ ICGetPropCallDOMProxyNativeStub* propStub;
+ if (kind() == ICStub::GetProp_CallDOMProxyNative)
+ propStub = toGetProp_CallDOMProxyNative();
+ else
+ propStub = toGetProp_CallDOMProxyWithGenerationNative();
+ propStub->receiverGuard().trace(trc);
+ TraceNullableEdge(trc, &propStub->expandoShape(),
+ "baseline-getproplistbasenative-stub-expandoshape");
+ TraceEdge(trc, &propStub->holder(), "baseline-getproplistbasenative-stub-holder");
+ TraceEdge(trc, &propStub->holderShape(), "baseline-getproplistbasenative-stub-holdershape");
+ TraceEdge(trc, &propStub->getter(), "baseline-getproplistbasenative-stub-getter");
+ break;
+ }
+ case ICStub::GetProp_DOMProxyShadowed: {
+ ICGetProp_DOMProxyShadowed* propStub = toGetProp_DOMProxyShadowed();
+ TraceEdge(trc, &propStub->shape(), "baseline-getproplistbaseshadowed-stub-shape");
+ TraceEdge(trc, &propStub->name(), "baseline-getproplistbaseshadowed-stub-name");
+ break;
+ }
+ case ICStub::GetProp_CallScripted: {
+ ICGetProp_CallScripted* callStub = toGetProp_CallScripted();
+ callStub->receiverGuard().trace(trc);
+ TraceEdge(trc, &callStub->holder(), "baseline-getpropcallscripted-stub-holder");
+ TraceEdge(trc, &callStub->holderShape(), "baseline-getpropcallscripted-stub-holdershape");
+ TraceEdge(trc, &callStub->getter(), "baseline-getpropcallscripted-stub-getter");
+ break;
+ }
+ case ICStub::GetProp_CallNative: {
+ ICGetProp_CallNative* callStub = toGetProp_CallNative();
+ callStub->receiverGuard().trace(trc);
+ TraceEdge(trc, &callStub->holder(), "baseline-getpropcallnative-stub-holder");
+ TraceEdge(trc, &callStub->holderShape(), "baseline-getpropcallnative-stub-holdershape");
+ TraceEdge(trc, &callStub->getter(), "baseline-getpropcallnative-stub-getter");
+ break;
+ }
+ case ICStub::GetProp_CallNativeGlobal: {
+ ICGetProp_CallNativeGlobal* callStub = toGetProp_CallNativeGlobal();
+ callStub->receiverGuard().trace(trc);
+ TraceEdge(trc, &callStub->holder(), "baseline-getpropcallnativeglobal-stub-holder");
+ TraceEdge(trc, &callStub->holderShape(), "baseline-getpropcallnativeglobal-stub-holdershape");
+ TraceEdge(trc, &callStub->globalShape(), "baseline-getpropcallnativeglobal-stub-globalshape");
+ TraceEdge(trc, &callStub->getter(), "baseline-getpropcallnativeglobal-stub-getter");
+ break;
+ }
+ case ICStub::SetProp_Native: {
+ ICSetProp_Native* propStub = toSetProp_Native();
+ TraceEdge(trc, &propStub->shape(), "baseline-setpropnative-stub-shape");
+ TraceEdge(trc, &propStub->group(), "baseline-setpropnative-stub-group");
+ break;
+ }
+ case ICStub::SetProp_NativeAdd: {
+ ICSetProp_NativeAdd* propStub = toSetProp_NativeAdd();
+ TraceEdge(trc, &propStub->group(), "baseline-setpropnativeadd-stub-group");
+ TraceEdge(trc, &propStub->newShape(), "baseline-setpropnativeadd-stub-newshape");
+ TraceNullableEdge(trc, &propStub->newGroup(), "baseline-setpropnativeadd-stub-new-group");
+ JS_STATIC_ASSERT(ICSetProp_NativeAdd::MAX_PROTO_CHAIN_DEPTH == 4);
+ switch (propStub->protoChainDepth()) {
+ case 0: propStub->toImpl<0>()->traceShapes(trc); break;
+ case 1: propStub->toImpl<1>()->traceShapes(trc); break;
+ case 2: propStub->toImpl<2>()->traceShapes(trc); break;
+ case 3: propStub->toImpl<3>()->traceShapes(trc); break;
+ case 4: propStub->toImpl<4>()->traceShapes(trc); break;
+ default: MOZ_CRASH("Invalid proto stub.");
+ }
+ break;
+ }
+ case ICStub::SetProp_Unboxed: {
+ ICSetProp_Unboxed* propStub = toSetProp_Unboxed();
+ TraceEdge(trc, &propStub->group(), "baseline-setprop-unboxed-stub-group");
+ break;
+ }
+ case ICStub::SetProp_TypedObject: {
+ ICSetProp_TypedObject* propStub = toSetProp_TypedObject();
+ TraceEdge(trc, &propStub->shape(), "baseline-setprop-typedobject-stub-shape");
+ TraceEdge(trc, &propStub->group(), "baseline-setprop-typedobject-stub-group");
+ break;
+ }
+ case ICStub::SetProp_CallScripted: {
+ ICSetProp_CallScripted* callStub = toSetProp_CallScripted();
+ callStub->receiverGuard().trace(trc);
+ TraceEdge(trc, &callStub->holder(), "baseline-setpropcallscripted-stub-holder");
+ TraceEdge(trc, &callStub->holderShape(), "baseline-setpropcallscripted-stub-holdershape");
+ TraceEdge(trc, &callStub->setter(), "baseline-setpropcallscripted-stub-setter");
+ break;
+ }
+ case ICStub::SetProp_CallNative: {
+ ICSetProp_CallNative* callStub = toSetProp_CallNative();
+ callStub->receiverGuard().trace(trc);
+ TraceEdge(trc, &callStub->holder(), "baseline-setpropcallnative-stub-holder");
+ TraceEdge(trc, &callStub->holderShape(), "baseline-setpropcallnative-stub-holdershape");
+ TraceEdge(trc, &callStub->setter(), "baseline-setpropcallnative-stub-setter");
+ break;
+ }
+ case ICStub::InstanceOf_Function: {
+ ICInstanceOf_Function* instanceofStub = toInstanceOf_Function();
+ TraceEdge(trc, &instanceofStub->shape(), "baseline-instanceof-fun-shape");
+ TraceEdge(trc, &instanceofStub->prototypeObject(), "baseline-instanceof-fun-prototype");
+ break;
+ }
+ case ICStub::NewArray_Fallback: {
+ ICNewArray_Fallback* stub = toNewArray_Fallback();
+ TraceNullableEdge(trc, &stub->templateObject(), "baseline-newarray-template");
+ TraceEdge(trc, &stub->templateGroup(), "baseline-newarray-template-group");
+ break;
+ }
+ case ICStub::NewObject_Fallback: {
+ ICNewObject_Fallback* stub = toNewObject_Fallback();
+ TraceNullableEdge(trc, &stub->templateObject(), "baseline-newobject-template");
+ break;
+ }
+ case ICStub::Rest_Fallback: {
+ ICRest_Fallback* stub = toRest_Fallback();
+ TraceEdge(trc, &stub->templateObject(), "baseline-rest-template");
+ break;
+ }
+ case ICStub::CacheIR_Monitored:
+ TraceBaselineCacheIRStub(trc, this, toCacheIR_Monitored()->stubInfo());
+ break;
+ default:
+ break;
+ }
+}
+
+void
+ICFallbackStub::unlinkStub(Zone* zone, ICStub* prev, ICStub* stub)
+{
+ MOZ_ASSERT(stub->next());
+
+ // If stub is the last optimized stub, update lastStubPtrAddr.
+ if (stub->next() == this) {
+ MOZ_ASSERT(lastStubPtrAddr_ == stub->addressOfNext());
+ if (prev)
+ lastStubPtrAddr_ = prev->addressOfNext();
+ else
+ lastStubPtrAddr_ = icEntry()->addressOfFirstStub();
+ *lastStubPtrAddr_ = this;
+ } else {
+ if (prev) {
+ MOZ_ASSERT(prev->next() == stub);
+ prev->setNext(stub->next());
+ } else {
+ MOZ_ASSERT(icEntry()->firstStub() == stub);
+ icEntry()->setFirstStub(stub->next());
+ }
+ }
+
+ MOZ_ASSERT(numOptimizedStubs_ > 0);
+ numOptimizedStubs_--;
+
+ if (zone->needsIncrementalBarrier()) {
+ // We are removing edges from ICStub to gcthings. Perform one final trace
+ // of the stub for incremental GC, as it must know about those edges.
+ stub->trace(zone->barrierTracer());
+ }
+
+ if (ICStub::CanMakeCalls(stub->kind()) && stub->isMonitored()) {
+ // This stub can make calls so we can return to it if it's on the stack.
+ // We just have to reset its firstMonitorStub_ field to avoid a stale
+ // pointer when purgeOptimizedStubs destroys all optimized monitor
+ // stubs (unlinked stubs won't be updated).
+ ICTypeMonitor_Fallback* monitorFallback = toMonitoredFallbackStub()->fallbackMonitorStub();
+ stub->toMonitoredStub()->resetFirstMonitorStub(monitorFallback);
+ }
+
+#ifdef DEBUG
+ // Poison stub code to ensure we don't call this stub again. However, if this
+ // stub can make calls, a pointer to it may be stored in a stub frame on the
+ // stack, so we can't touch the stubCode_ or GC will crash when marking this
+ // pointer.
+ if (!ICStub::CanMakeCalls(stub->kind()))
+ stub->stubCode_ = (uint8_t*)0xbad;
+#endif
+}
+
+void
+ICFallbackStub::unlinkStubsWithKind(JSContext* cx, ICStub::Kind kind)
+{
+ for (ICStubIterator iter = beginChain(); !iter.atEnd(); iter++) {
+ if (iter->kind() == kind)
+ iter.unlink(cx);
+ }
+}
+
+void
+ICTypeMonitor_Fallback::resetMonitorStubChain(Zone* zone)
+{
+ if (zone->needsIncrementalBarrier()) {
+ // We are removing edges from monitored stubs to gcthings (JitCode).
+ // Perform one final trace of all monitor stubs for incremental GC,
+ // as it must know about those edges.
+ for (ICStub* s = firstMonitorStub_; !s->isTypeMonitor_Fallback(); s = s->next())
+ s->trace(zone->barrierTracer());
+ }
+
+ firstMonitorStub_ = this;
+ numOptimizedMonitorStubs_ = 0;
+
+ if (hasFallbackStub_) {
+ lastMonitorStubPtrAddr_ = nullptr;
+
+ // Reset firstMonitorStub_ field of all monitored stubs.
+ for (ICStubConstIterator iter = mainFallbackStub_->beginChainConst();
+ !iter.atEnd(); iter++)
+ {
+ if (!iter->isMonitored())
+ continue;
+ iter->toMonitoredStub()->resetFirstMonitorStub(this);
+ }
+ } else {
+ icEntry_->setFirstStub(this);
+ lastMonitorStubPtrAddr_ = icEntry_->addressOfFirstStub();
+ }
+}
+
+ICMonitoredStub::ICMonitoredStub(Kind kind, JitCode* stubCode, ICStub* firstMonitorStub)
+ : ICStub(kind, ICStub::Monitored, stubCode),
+ firstMonitorStub_(firstMonitorStub)
+{
+ // In order to silence Coverity - null pointer dereference checker
+ MOZ_ASSERT(firstMonitorStub_);
+ // If the first monitored stub is a ICTypeMonitor_Fallback stub, then
+ // double check that _its_ firstMonitorStub is the same as this one.
+ MOZ_ASSERT_IF(firstMonitorStub_->isTypeMonitor_Fallback(),
+ firstMonitorStub_->toTypeMonitor_Fallback()->firstMonitorStub() ==
+ firstMonitorStub_);
+}
+
+bool
+ICMonitoredFallbackStub::initMonitoringChain(JSContext* cx, ICStubSpace* space,
+ ICStubCompiler::Engine engine)
+{
+ MOZ_ASSERT(fallbackMonitorStub_ == nullptr);
+
+ ICTypeMonitor_Fallback::Compiler compiler(cx, engine, this);
+ ICTypeMonitor_Fallback* stub = compiler.getStub(space);
+ if (!stub)
+ return false;
+ fallbackMonitorStub_ = stub;
+ return true;
+}
+
+bool
+ICMonitoredFallbackStub::addMonitorStubForValue(JSContext* cx, SharedStubInfo* stub,
+ HandleValue val)
+{
+ return fallbackMonitorStub_->addMonitorStubForValue(cx, stub, val);
+}
+
+bool
+ICUpdatedStub::initUpdatingChain(JSContext* cx, ICStubSpace* space)
+{
+ MOZ_ASSERT(firstUpdateStub_ == nullptr);
+
+ ICTypeUpdate_Fallback::Compiler compiler(cx);
+ ICTypeUpdate_Fallback* stub = compiler.getStub(space);
+ if (!stub)
+ return false;
+
+ firstUpdateStub_ = stub;
+ return true;
+}
+
+JitCode*
+ICStubCompiler::getStubCode()
+{
+ JitCompartment* comp = cx->compartment()->jitCompartment();
+
+ // Check for existing cached stubcode.
+ uint32_t stubKey = getKey();
+ JitCode* stubCode = comp->getStubCode(stubKey);
+ if (stubCode)
+ return stubCode;
+
+ // Compile new stubcode.
+ JitContext jctx(cx, nullptr);
+ MacroAssembler masm;
+#ifndef JS_USE_LINK_REGISTER
+ // The first value contains the return addres,
+ // which we pull into ICTailCallReg for tail calls.
+ masm.adjustFrame(sizeof(intptr_t));
+#endif
+#ifdef JS_CODEGEN_ARM
+ masm.setSecondScratchReg(BaselineSecondScratchReg);
+#endif
+
+ if (!generateStubCode(masm))
+ return nullptr;
+ Linker linker(masm);
+ AutoFlushICache afc("getStubCode");
+ Rooted<JitCode*> newStubCode(cx, linker.newCode<CanGC>(cx, BASELINE_CODE));
+ if (!newStubCode)
+ return nullptr;
+
+ // All barriers are emitted off-by-default, enable them if needed.
+ if (cx->zone()->needsIncrementalBarrier())
+ newStubCode->togglePreBarriers(true, DontReprotect);
+
+ // Cache newly compiled stubcode.
+ if (!comp->putStubCode(cx, stubKey, newStubCode))
+ return nullptr;
+
+ // After generating code, run postGenerateStubCode(). We must not fail
+ // after this point.
+ postGenerateStubCode(masm, newStubCode);
+
+ MOZ_ASSERT(entersStubFrame_ == ICStub::CanMakeCalls(kind));
+ MOZ_ASSERT(!inStubFrame_);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(newStubCode, "BaselineIC");
+#endif
+
+ return newStubCode;
+}
+
+bool
+ICStubCompiler::tailCallVM(const VMFunction& fun, MacroAssembler& masm)
+{
+ JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(fun);
+ if (!code)
+ return false;
+
+ MOZ_ASSERT(fun.expectTailCall == TailCall);
+ uint32_t argSize = fun.explicitStackSlots() * sizeof(void*);
+ if (engine_ == Engine::Baseline) {
+ EmitBaselineTailCallVM(code, masm, argSize);
+ } else {
+ uint32_t stackSize = argSize + fun.extraValuesToPop * sizeof(Value);
+ EmitIonTailCallVM(code, masm, stackSize);
+ }
+ return true;
+}
+
+bool
+ICStubCompiler::callVM(const VMFunction& fun, MacroAssembler& masm)
+{
+ MOZ_ASSERT(inStubFrame_);
+
+ JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(fun);
+ if (!code)
+ return false;
+
+ MOZ_ASSERT(fun.expectTailCall == NonTailCall);
+ if (engine_ == Engine::Baseline)
+ EmitBaselineCallVM(code, masm);
+ else
+ EmitIonCallVM(code, fun.explicitStackSlots(), masm);
+ return true;
+}
+
+bool
+ICStubCompiler::callTypeUpdateIC(MacroAssembler& masm, uint32_t objectOffset)
+{
+ JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(DoTypeUpdateFallbackInfo);
+ if (!code)
+ return false;
+
+ EmitCallTypeUpdateIC(masm, code, objectOffset);
+ return true;
+}
+
+void
+ICStubCompiler::enterStubFrame(MacroAssembler& masm, Register scratch)
+{
+ if (engine_ == Engine::Baseline) {
+ EmitBaselineEnterStubFrame(masm, scratch);
+#ifdef DEBUG
+ framePushedAtEnterStubFrame_ = masm.framePushed();
+#endif
+ } else {
+ EmitIonEnterStubFrame(masm, scratch);
+ }
+
+ MOZ_ASSERT(!inStubFrame_);
+ inStubFrame_ = true;
+
+#ifdef DEBUG
+ entersStubFrame_ = true;
+#endif
+}
+
+void
+ICStubCompiler::leaveStubFrame(MacroAssembler& masm, bool calledIntoIon)
+{
+ MOZ_ASSERT(entersStubFrame_ && inStubFrame_);
+ inStubFrame_ = false;
+
+ if (engine_ == Engine::Baseline) {
+#ifdef DEBUG
+ masm.setFramePushed(framePushedAtEnterStubFrame_);
+ if (calledIntoIon)
+ masm.adjustFrame(sizeof(intptr_t)); // Calls into ion have this extra.
+#endif
+
+ EmitBaselineLeaveStubFrame(masm, calledIntoIon);
+ } else {
+ EmitIonLeaveStubFrame(masm);
+ }
+}
+
+void
+ICStubCompiler::pushStubPayload(MacroAssembler& masm, Register scratch)
+{
+ if (engine_ == Engine::IonMonkey) {
+ masm.push(Imm32(0));
+ return;
+ }
+
+ if (inStubFrame_) {
+ masm.loadPtr(Address(BaselineFrameReg, 0), scratch);
+ masm.pushBaselineFramePtr(scratch, scratch);
+ } else {
+ masm.pushBaselineFramePtr(BaselineFrameReg, scratch);
+ }
+}
+
+void
+ICStubCompiler::PushStubPayload(MacroAssembler& masm, Register scratch)
+{
+ pushStubPayload(masm, scratch);
+ masm.adjustFrame(sizeof(intptr_t));
+}
+
+void
+ICStubCompiler::emitPostWriteBarrierSlot(MacroAssembler& masm, Register obj, ValueOperand val,
+ Register scratch, LiveGeneralRegisterSet saveRegs)
+{
+ Label skipBarrier;
+ masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, &skipBarrier);
+ masm.branchValueIsNurseryObject(Assembler::NotEqual, val, scratch, &skipBarrier);
+
+ // void PostWriteBarrier(JSRuntime* rt, JSObject* obj);
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ saveRegs.add(ICTailCallReg);
+#endif
+ saveRegs.set() = GeneralRegisterSet::Intersect(saveRegs.set(), GeneralRegisterSet::Volatile());
+ masm.PushRegsInMask(saveRegs);
+ masm.setupUnalignedABICall(scratch);
+ masm.movePtr(ImmPtr(cx->runtime()), scratch);
+ masm.passABIArg(scratch);
+ masm.passABIArg(obj);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, PostWriteBarrier));
+ masm.PopRegsInMask(saveRegs);
+
+ masm.bind(&skipBarrier);
+}
+
+SharedStubInfo::SharedStubInfo(JSContext* cx, void* payload, ICEntry* icEntry)
+ : maybeFrame_(nullptr),
+ outerScript_(cx),
+ innerScript_(cx),
+ icEntry_(icEntry)
+{
+ if (payload) {
+ maybeFrame_ = (BaselineFrame*) payload;
+ outerScript_ = maybeFrame_->script();
+ innerScript_ = maybeFrame_->script();
+ } else {
+ IonICEntry* entry = (IonICEntry*) icEntry;
+ innerScript_ = entry->script();
+ // outerScript_ is initialized lazily.
+ }
+}
+
+HandleScript
+SharedStubInfo::outerScript(JSContext* cx)
+{
+ if (!outerScript_) {
+ js::jit::JitActivationIterator iter(cx->runtime());
+ JitFrameIterator it(iter);
+ MOZ_ASSERT(it.isExitFrame());
+ ++it;
+ MOZ_ASSERT(it.isIonJS());
+ outerScript_ = it.script();
+ MOZ_ASSERT(!it.ionScript()->invalidated());
+ }
+ return outerScript_;
+}
+
+//
+// BinaryArith_Fallback
+//
+
+static bool
+DoBinaryArithFallback(JSContext* cx, void* payload, ICBinaryArith_Fallback* stub_,
+ HandleValue lhs, HandleValue rhs, MutableHandleValue ret)
+{
+ SharedStubInfo info(cx, payload, stub_->icEntry());
+ ICStubCompiler::Engine engine = info.engine();
+
+ // This fallback stub may trigger debug mode toggling.
+ DebugModeOSRVolatileStub<ICBinaryArith_Fallback*> stub(engine, info.maybeFrame(), stub_);
+
+ jsbytecode* pc = info.pc();
+ JSOp op = JSOp(*pc);
+ FallbackICSpew(cx, stub, "BinaryArith(%s,%d,%d)", CodeName[op],
+ int(lhs.isDouble() ? JSVAL_TYPE_DOUBLE : lhs.extractNonDoubleType()),
+ int(rhs.isDouble() ? JSVAL_TYPE_DOUBLE : rhs.extractNonDoubleType()));
+
+ // Don't pass lhs/rhs directly, we need the original values when
+ // generating stubs.
+ RootedValue lhsCopy(cx, lhs);
+ RootedValue rhsCopy(cx, rhs);
+
+ // Perform the compare operation.
+ switch(op) {
+ case JSOP_ADD:
+ // Do an add.
+ if (!AddValues(cx, &lhsCopy, &rhsCopy, ret))
+ return false;
+ break;
+ case JSOP_SUB:
+ if (!SubValues(cx, &lhsCopy, &rhsCopy, ret))
+ return false;
+ break;
+ case JSOP_MUL:
+ if (!MulValues(cx, &lhsCopy, &rhsCopy, ret))
+ return false;
+ break;
+ case JSOP_DIV:
+ if (!DivValues(cx, &lhsCopy, &rhsCopy, ret))
+ return false;
+ break;
+ case JSOP_MOD:
+ if (!ModValues(cx, &lhsCopy, &rhsCopy, ret))
+ return false;
+ break;
+ case JSOP_POW:
+ if (!math_pow_handle(cx, lhsCopy, rhsCopy, ret))
+ return false;
+ break;
+ case JSOP_BITOR: {
+ int32_t result;
+ if (!BitOr(cx, lhs, rhs, &result))
+ return false;
+ ret.setInt32(result);
+ break;
+ }
+ case JSOP_BITXOR: {
+ int32_t result;
+ if (!BitXor(cx, lhs, rhs, &result))
+ return false;
+ ret.setInt32(result);
+ break;
+ }
+ case JSOP_BITAND: {
+ int32_t result;
+ if (!BitAnd(cx, lhs, rhs, &result))
+ return false;
+ ret.setInt32(result);
+ break;
+ }
+ case JSOP_LSH: {
+ int32_t result;
+ if (!BitLsh(cx, lhs, rhs, &result))
+ return false;
+ ret.setInt32(result);
+ break;
+ }
+ case JSOP_RSH: {
+ int32_t result;
+ if (!BitRsh(cx, lhs, rhs, &result))
+ return false;
+ ret.setInt32(result);
+ break;
+ }
+ case JSOP_URSH: {
+ if (!UrshOperation(cx, lhs, rhs, ret))
+ return false;
+ break;
+ }
+ default:
+ MOZ_CRASH("Unhandled baseline arith op");
+ }
+
+ // Check if debug mode toggling made the stub invalid.
+ if (stub.invalid())
+ return true;
+
+ if (ret.isDouble())
+ stub->setSawDoubleResult();
+
+ // Check to see if a new stub should be generated.
+ if (stub->numOptimizedStubs() >= ICBinaryArith_Fallback::MAX_OPTIMIZED_STUBS) {
+ stub->noteUnoptimizableOperands();
+ return true;
+ }
+
+ // Handle string concat.
+ if (op == JSOP_ADD) {
+ if (lhs.isString() && rhs.isString()) {
+ JitSpew(JitSpew_BaselineIC, " Generating %s(String, String) stub", CodeName[op]);
+ MOZ_ASSERT(ret.isString());
+ ICBinaryArith_StringConcat::Compiler compiler(cx, engine);
+ ICStub* strcatStub = compiler.getStub(compiler.getStubSpace(info.outerScript(cx)));
+ if (!strcatStub)
+ return false;
+ stub->addNewStub(strcatStub);
+ return true;
+ }
+
+ if ((lhs.isString() && rhs.isObject()) || (lhs.isObject() && rhs.isString())) {
+ JitSpew(JitSpew_BaselineIC, " Generating %s(%s, %s) stub", CodeName[op],
+ lhs.isString() ? "String" : "Object",
+ lhs.isString() ? "Object" : "String");
+ MOZ_ASSERT(ret.isString());
+ ICBinaryArith_StringObjectConcat::Compiler compiler(cx, engine, lhs.isString());
+ ICStub* strcatStub = compiler.getStub(compiler.getStubSpace(info.outerScript(cx)));
+ if (!strcatStub)
+ return false;
+ stub->addNewStub(strcatStub);
+ return true;
+ }
+ }
+
+ if (((lhs.isBoolean() && (rhs.isBoolean() || rhs.isInt32())) ||
+ (rhs.isBoolean() && (lhs.isBoolean() || lhs.isInt32()))) &&
+ (op == JSOP_ADD || op == JSOP_SUB || op == JSOP_BITOR || op == JSOP_BITAND ||
+ op == JSOP_BITXOR))
+ {
+ JitSpew(JitSpew_BaselineIC, " Generating %s(%s, %s) stub", CodeName[op],
+ lhs.isBoolean() ? "Boolean" : "Int32", rhs.isBoolean() ? "Boolean" : "Int32");
+ ICBinaryArith_BooleanWithInt32::Compiler compiler(cx, op, engine,
+ lhs.isBoolean(), rhs.isBoolean());
+ ICStub* arithStub = compiler.getStub(compiler.getStubSpace(info.outerScript(cx)));
+ if (!arithStub)
+ return false;
+ stub->addNewStub(arithStub);
+ return true;
+ }
+
+ // Handle only int32 or double.
+ if (!lhs.isNumber() || !rhs.isNumber()) {
+ stub->noteUnoptimizableOperands();
+ return true;
+ }
+
+ MOZ_ASSERT(ret.isNumber());
+
+ if (lhs.isDouble() || rhs.isDouble() || ret.isDouble()) {
+ if (!cx->runtime()->jitSupportsFloatingPoint)
+ return true;
+
+ switch (op) {
+ case JSOP_ADD:
+ case JSOP_SUB:
+ case JSOP_MUL:
+ case JSOP_DIV:
+ case JSOP_MOD: {
+ // Unlink int32 stubs, it's faster to always use the double stub.
+ stub->unlinkStubsWithKind(cx, ICStub::BinaryArith_Int32);
+ JitSpew(JitSpew_BaselineIC, " Generating %s(Double, Double) stub", CodeName[op]);
+
+ ICBinaryArith_Double::Compiler compiler(cx, op, engine);
+ ICStub* doubleStub = compiler.getStub(compiler.getStubSpace(info.outerScript(cx)));
+ if (!doubleStub)
+ return false;
+ stub->addNewStub(doubleStub);
+ return true;
+ }
+ default:
+ break;
+ }
+ }
+
+ if (lhs.isInt32() && rhs.isInt32() && op != JSOP_POW) {
+ bool allowDouble = ret.isDouble();
+ if (allowDouble)
+ stub->unlinkStubsWithKind(cx, ICStub::BinaryArith_Int32);
+ JitSpew(JitSpew_BaselineIC, " Generating %s(Int32, Int32%s) stub", CodeName[op],
+ allowDouble ? " => Double" : "");
+ ICBinaryArith_Int32::Compiler compilerInt32(cx, op, engine, allowDouble);
+ ICStub* int32Stub = compilerInt32.getStub(compilerInt32.getStubSpace(info.outerScript(cx)));
+ if (!int32Stub)
+ return false;
+ stub->addNewStub(int32Stub);
+ return true;
+ }
+
+ // Handle Double <BITOP> Int32 or Int32 <BITOP> Double case.
+ if (((lhs.isDouble() && rhs.isInt32()) || (lhs.isInt32() && rhs.isDouble())) &&
+ ret.isInt32())
+ {
+ switch(op) {
+ case JSOP_BITOR:
+ case JSOP_BITXOR:
+ case JSOP_BITAND: {
+ JitSpew(JitSpew_BaselineIC, " Generating %s(%s, %s) stub", CodeName[op],
+ lhs.isDouble() ? "Double" : "Int32",
+ lhs.isDouble() ? "Int32" : "Double");
+ ICBinaryArith_DoubleWithInt32::Compiler compiler(cx, op, engine, lhs.isDouble());
+ ICStub* optStub = compiler.getStub(compiler.getStubSpace(info.outerScript(cx)));
+ if (!optStub)
+ return false;
+ stub->addNewStub(optStub);
+ return true;
+ }
+ default:
+ break;
+ }
+ }
+
+ stub->noteUnoptimizableOperands();
+ return true;
+}
+
+typedef bool (*DoBinaryArithFallbackFn)(JSContext*, void*, ICBinaryArith_Fallback*,
+ HandleValue, HandleValue, MutableHandleValue);
+static const VMFunction DoBinaryArithFallbackInfo =
+ FunctionInfo<DoBinaryArithFallbackFn>(DoBinaryArithFallback, "DoBinaryArithFallback",
+ TailCall, PopValues(2));
+
+bool
+ICBinaryArith_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(R0 == JSReturnOperand);
+
+ // Restore the tail call register.
+ EmitRestoreTailCallReg(masm);
+
+ // Ensure stack is fully synced for the expression decompiler.
+ masm.pushValue(R0);
+ masm.pushValue(R1);
+
+ // Push arguments.
+ masm.pushValue(R1);
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ return tailCallVM(DoBinaryArithFallbackInfo, masm);
+}
+
+static bool
+DoConcatStrings(JSContext* cx, HandleString lhs, HandleString rhs, MutableHandleValue res)
+{
+ JSString* result = ConcatStrings<CanGC>(cx, lhs, rhs);
+ if (!result)
+ return false;
+
+ res.setString(result);
+ return true;
+}
+
+typedef bool (*DoConcatStringsFn)(JSContext*, HandleString, HandleString, MutableHandleValue);
+static const VMFunction DoConcatStringsInfo =
+ FunctionInfo<DoConcatStringsFn>(DoConcatStrings, "DoConcatStrings", TailCall);
+
+bool
+ICBinaryArith_StringConcat::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ masm.branchTestString(Assembler::NotEqual, R0, &failure);
+ masm.branchTestString(Assembler::NotEqual, R1, &failure);
+
+ // Restore the tail call register.
+ EmitRestoreTailCallReg(masm);
+
+ masm.unboxString(R0, R0.scratchReg());
+ masm.unboxString(R1, R1.scratchReg());
+
+ masm.push(R1.scratchReg());
+ masm.push(R0.scratchReg());
+ if (!tailCallVM(DoConcatStringsInfo, masm))
+ return false;
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+static JSString*
+ConvertObjectToStringForConcat(JSContext* cx, HandleValue obj)
+{
+ MOZ_ASSERT(obj.isObject());
+ RootedValue rootedObj(cx, obj);
+ if (!ToPrimitive(cx, &rootedObj))
+ return nullptr;
+ return ToString<CanGC>(cx, rootedObj);
+}
+
+static bool
+DoConcatStringObject(JSContext* cx, bool lhsIsString, HandleValue lhs, HandleValue rhs,
+ MutableHandleValue res)
+{
+ JSString* lstr = nullptr;
+ JSString* rstr = nullptr;
+ if (lhsIsString) {
+ // Convert rhs first.
+ MOZ_ASSERT(lhs.isString() && rhs.isObject());
+ rstr = ConvertObjectToStringForConcat(cx, rhs);
+ if (!rstr)
+ return false;
+
+ // lhs is already string.
+ lstr = lhs.toString();
+ } else {
+ MOZ_ASSERT(rhs.isString() && lhs.isObject());
+ // Convert lhs first.
+ lstr = ConvertObjectToStringForConcat(cx, lhs);
+ if (!lstr)
+ return false;
+
+ // rhs is already string.
+ rstr = rhs.toString();
+ }
+
+ JSString* str = ConcatStrings<NoGC>(cx, lstr, rstr);
+ if (!str) {
+ RootedString nlstr(cx, lstr), nrstr(cx, rstr);
+ str = ConcatStrings<CanGC>(cx, nlstr, nrstr);
+ if (!str)
+ return false;
+ }
+
+ // Technically, we need to call TypeScript::MonitorString for this PC, however
+ // it was called when this stub was attached so it's OK.
+
+ res.setString(str);
+ return true;
+}
+
+typedef bool (*DoConcatStringObjectFn)(JSContext*, bool lhsIsString, HandleValue, HandleValue,
+ MutableHandleValue);
+static const VMFunction DoConcatStringObjectInfo =
+ FunctionInfo<DoConcatStringObjectFn>(DoConcatStringObject, "DoConcatStringObject", TailCall,
+ PopValues(2));
+
+bool
+ICBinaryArith_StringObjectConcat::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ if (lhsIsString_) {
+ masm.branchTestString(Assembler::NotEqual, R0, &failure);
+ masm.branchTestObject(Assembler::NotEqual, R1, &failure);
+ } else {
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+ masm.branchTestString(Assembler::NotEqual, R1, &failure);
+ }
+
+ // Restore the tail call register.
+ EmitRestoreTailCallReg(masm);
+
+ // Sync for the decompiler.
+ masm.pushValue(R0);
+ masm.pushValue(R1);
+
+ // Push arguments.
+ masm.pushValue(R1);
+ masm.pushValue(R0);
+ masm.push(Imm32(lhsIsString_));
+ if (!tailCallVM(DoConcatStringObjectInfo, masm))
+ return false;
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+bool
+ICBinaryArith_Double::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ masm.ensureDouble(R0, FloatReg0, &failure);
+ masm.ensureDouble(R1, FloatReg1, &failure);
+
+ switch (op) {
+ case JSOP_ADD:
+ masm.addDouble(FloatReg1, FloatReg0);
+ break;
+ case JSOP_SUB:
+ masm.subDouble(FloatReg1, FloatReg0);
+ break;
+ case JSOP_MUL:
+ masm.mulDouble(FloatReg1, FloatReg0);
+ break;
+ case JSOP_DIV:
+ masm.divDouble(FloatReg1, FloatReg0);
+ break;
+ case JSOP_MOD:
+ masm.setupUnalignedABICall(R0.scratchReg());
+ masm.passABIArg(FloatReg0, MoveOp::DOUBLE);
+ masm.passABIArg(FloatReg1, MoveOp::DOUBLE);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, NumberMod), MoveOp::DOUBLE);
+ MOZ_ASSERT(ReturnDoubleReg == FloatReg0);
+ break;
+ default:
+ MOZ_CRASH("Unexpected op");
+ }
+
+ masm.boxDouble(FloatReg0, R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+bool
+ICBinaryArith_BooleanWithInt32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ if (lhsIsBool_)
+ masm.branchTestBoolean(Assembler::NotEqual, R0, &failure);
+ else
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+
+ if (rhsIsBool_)
+ masm.branchTestBoolean(Assembler::NotEqual, R1, &failure);
+ else
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ Register lhsReg = lhsIsBool_ ? masm.extractBoolean(R0, ExtractTemp0)
+ : masm.extractInt32(R0, ExtractTemp0);
+ Register rhsReg = rhsIsBool_ ? masm.extractBoolean(R1, ExtractTemp1)
+ : masm.extractInt32(R1, ExtractTemp1);
+
+ MOZ_ASSERT(op_ == JSOP_ADD || op_ == JSOP_SUB ||
+ op_ == JSOP_BITOR || op_ == JSOP_BITXOR || op_ == JSOP_BITAND);
+
+ switch(op_) {
+ case JSOP_ADD: {
+ Label fixOverflow;
+
+ masm.branchAdd32(Assembler::Overflow, rhsReg, lhsReg, &fixOverflow);
+ masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&fixOverflow);
+ masm.sub32(rhsReg, lhsReg);
+ // Proceed to failure below.
+ break;
+ }
+ case JSOP_SUB: {
+ Label fixOverflow;
+
+ masm.branchSub32(Assembler::Overflow, rhsReg, lhsReg, &fixOverflow);
+ masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&fixOverflow);
+ masm.add32(rhsReg, lhsReg);
+ // Proceed to failure below.
+ break;
+ }
+ case JSOP_BITOR: {
+ masm.or32(rhsReg, lhsReg);
+ masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
+ EmitReturnFromIC(masm);
+ break;
+ }
+ case JSOP_BITXOR: {
+ masm.xor32(rhsReg, lhsReg);
+ masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
+ EmitReturnFromIC(masm);
+ break;
+ }
+ case JSOP_BITAND: {
+ masm.and32(rhsReg, lhsReg);
+ masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
+ EmitReturnFromIC(masm);
+ break;
+ }
+ default:
+ MOZ_CRASH("Unhandled op for BinaryArith_BooleanWithInt32.");
+ }
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+bool
+ICBinaryArith_DoubleWithInt32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(op == JSOP_BITOR || op == JSOP_BITAND || op == JSOP_BITXOR);
+
+ Label failure;
+ Register intReg;
+ Register scratchReg;
+ if (lhsIsDouble_) {
+ masm.branchTestDouble(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+ intReg = masm.extractInt32(R1, ExtractTemp0);
+ masm.unboxDouble(R0, FloatReg0);
+ scratchReg = R0.scratchReg();
+ } else {
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestDouble(Assembler::NotEqual, R1, &failure);
+ intReg = masm.extractInt32(R0, ExtractTemp0);
+ masm.unboxDouble(R1, FloatReg0);
+ scratchReg = R1.scratchReg();
+ }
+
+ // Truncate the double to an int32.
+ {
+ Label doneTruncate;
+ Label truncateABICall;
+ masm.branchTruncateDoubleMaybeModUint32(FloatReg0, scratchReg, &truncateABICall);
+ masm.jump(&doneTruncate);
+
+ masm.bind(&truncateABICall);
+ masm.push(intReg);
+ masm.setupUnalignedABICall(scratchReg);
+ masm.passABIArg(FloatReg0, MoveOp::DOUBLE);
+ masm.callWithABI(mozilla::BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32));
+ masm.storeCallInt32Result(scratchReg);
+ masm.pop(intReg);
+
+ masm.bind(&doneTruncate);
+ }
+
+ Register intReg2 = scratchReg;
+ // All handled ops commute, so no need to worry about ordering.
+ switch(op) {
+ case JSOP_BITOR:
+ masm.or32(intReg, intReg2);
+ break;
+ case JSOP_BITXOR:
+ masm.xor32(intReg, intReg2);
+ break;
+ case JSOP_BITAND:
+ masm.and32(intReg, intReg2);
+ break;
+ default:
+ MOZ_CRASH("Unhandled op for BinaryArith_DoubleWithInt32.");
+ }
+ masm.tagValue(JSVAL_TYPE_INT32, intReg2, R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// UnaryArith_Fallback
+//
+
+static bool
+DoUnaryArithFallback(JSContext* cx, void* payload, ICUnaryArith_Fallback* stub_,
+ HandleValue val, MutableHandleValue res)
+{
+ SharedStubInfo info(cx, payload, stub_->icEntry());
+ ICStubCompiler::Engine engine = info.engine();
+ HandleScript script = info.innerScript();
+
+ // This fallback stub may trigger debug mode toggling.
+ DebugModeOSRVolatileStub<ICUnaryArith_Fallback*> stub(engine, info.maybeFrame(), stub_);
+
+ jsbytecode* pc = info.pc();
+ JSOp op = JSOp(*pc);
+ FallbackICSpew(cx, stub, "UnaryArith(%s)", CodeName[op]);
+
+ switch (op) {
+ case JSOP_BITNOT: {
+ int32_t result;
+ if (!BitNot(cx, val, &result))
+ return false;
+ res.setInt32(result);
+ break;
+ }
+ case JSOP_NEG:
+ if (!NegOperation(cx, script, pc, val, res))
+ return false;
+ break;
+ default:
+ MOZ_CRASH("Unexpected op");
+ }
+
+ // Check if debug mode toggling made the stub invalid.
+ if (stub.invalid())
+ return true;
+
+ if (res.isDouble())
+ stub->setSawDoubleResult();
+
+ if (stub->numOptimizedStubs() >= ICUnaryArith_Fallback::MAX_OPTIMIZED_STUBS) {
+ // TODO: Discard/replace stubs.
+ return true;
+ }
+
+ if (val.isInt32() && res.isInt32()) {
+ JitSpew(JitSpew_BaselineIC, " Generating %s(Int32 => Int32) stub", CodeName[op]);
+ ICUnaryArith_Int32::Compiler compiler(cx, op, engine);
+ ICStub* int32Stub = compiler.getStub(compiler.getStubSpace(info.outerScript(cx)));
+ if (!int32Stub)
+ return false;
+ stub->addNewStub(int32Stub);
+ return true;
+ }
+
+ if (val.isNumber() && res.isNumber() && cx->runtime()->jitSupportsFloatingPoint) {
+ JitSpew(JitSpew_BaselineIC, " Generating %s(Number => Number) stub", CodeName[op]);
+
+ // Unlink int32 stubs, the double stub handles both cases and TI specializes for both.
+ stub->unlinkStubsWithKind(cx, ICStub::UnaryArith_Int32);
+
+ ICUnaryArith_Double::Compiler compiler(cx, op, engine);
+ ICStub* doubleStub = compiler.getStub(compiler.getStubSpace(info.outerScript(cx)));
+ if (!doubleStub)
+ return false;
+ stub->addNewStub(doubleStub);
+ return true;
+ }
+
+ return true;
+}
+
+typedef bool (*DoUnaryArithFallbackFn)(JSContext*, void*, ICUnaryArith_Fallback*,
+ HandleValue, MutableHandleValue);
+static const VMFunction DoUnaryArithFallbackInfo =
+ FunctionInfo<DoUnaryArithFallbackFn>(DoUnaryArithFallback, "DoUnaryArithFallback", TailCall,
+ PopValues(1));
+
+bool
+ICUnaryArith_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(R0 == JSReturnOperand);
+
+ // Restore the tail call register.
+ EmitRestoreTailCallReg(masm);
+
+ // Ensure stack is fully synced for the expression decompiler.
+ masm.pushValue(R0);
+
+ // Push arguments.
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ return tailCallVM(DoUnaryArithFallbackInfo, masm);
+}
+
+bool
+ICUnaryArith_Double::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ masm.ensureDouble(R0, FloatReg0, &failure);
+
+ MOZ_ASSERT(op == JSOP_NEG || op == JSOP_BITNOT);
+
+ if (op == JSOP_NEG) {
+ masm.negateDouble(FloatReg0);
+ masm.boxDouble(FloatReg0, R0);
+ } else {
+ // Truncate the double to an int32.
+ Register scratchReg = R1.scratchReg();
+
+ Label doneTruncate;
+ Label truncateABICall;
+ masm.branchTruncateDoubleMaybeModUint32(FloatReg0, scratchReg, &truncateABICall);
+ masm.jump(&doneTruncate);
+
+ masm.bind(&truncateABICall);
+ masm.setupUnalignedABICall(scratchReg);
+ masm.passABIArg(FloatReg0, MoveOp::DOUBLE);
+ masm.callWithABI(BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32));
+ masm.storeCallInt32Result(scratchReg);
+
+ masm.bind(&doneTruncate);
+ masm.not32(scratchReg);
+ masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
+ }
+
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// Compare_Fallback
+//
+
+static bool
+DoCompareFallback(JSContext* cx, void* payload, ICCompare_Fallback* stub_, HandleValue lhs,
+ HandleValue rhs, MutableHandleValue ret)
+{
+ SharedStubInfo info(cx, payload, stub_->icEntry());
+ ICStubCompiler::Engine engine = info.engine();
+
+ // This fallback stub may trigger debug mode toggling.
+ DebugModeOSRVolatileStub<ICCompare_Fallback*> stub(engine, info.maybeFrame(), stub_);
+
+ jsbytecode* pc = info.pc();
+ JSOp op = JSOp(*pc);
+
+ FallbackICSpew(cx, stub, "Compare(%s)", CodeName[op]);
+
+ // Case operations in a CONDSWITCH are performing strict equality.
+ if (op == JSOP_CASE)
+ op = JSOP_STRICTEQ;
+
+ // Don't pass lhs/rhs directly, we need the original values when
+ // generating stubs.
+ RootedValue lhsCopy(cx, lhs);
+ RootedValue rhsCopy(cx, rhs);
+
+ // Perform the compare operation.
+ bool out;
+ switch(op) {
+ case JSOP_LT:
+ if (!LessThan(cx, &lhsCopy, &rhsCopy, &out))
+ return false;
+ break;
+ case JSOP_LE:
+ if (!LessThanOrEqual(cx, &lhsCopy, &rhsCopy, &out))
+ return false;
+ break;
+ case JSOP_GT:
+ if (!GreaterThan(cx, &lhsCopy, &rhsCopy, &out))
+ return false;
+ break;
+ case JSOP_GE:
+ if (!GreaterThanOrEqual(cx, &lhsCopy, &rhsCopy, &out))
+ return false;
+ break;
+ case JSOP_EQ:
+ if (!LooselyEqual<true>(cx, &lhsCopy, &rhsCopy, &out))
+ return false;
+ break;
+ case JSOP_NE:
+ if (!LooselyEqual<false>(cx, &lhsCopy, &rhsCopy, &out))
+ return false;
+ break;
+ case JSOP_STRICTEQ:
+ if (!StrictlyEqual<true>(cx, &lhsCopy, &rhsCopy, &out))
+ return false;
+ break;
+ case JSOP_STRICTNE:
+ if (!StrictlyEqual<false>(cx, &lhsCopy, &rhsCopy, &out))
+ return false;
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE("Unhandled baseline compare op");
+ return false;
+ }
+
+ ret.setBoolean(out);
+
+ // Check if debug mode toggling made the stub invalid.
+ if (stub.invalid())
+ return true;
+
+ // Check to see if a new stub should be generated.
+ if (stub->numOptimizedStubs() >= ICCompare_Fallback::MAX_OPTIMIZED_STUBS) {
+ // TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
+ // But for now we just bail.
+ return true;
+ }
+
+ // Try to generate new stubs.
+ if (lhs.isInt32() && rhs.isInt32()) {
+ JitSpew(JitSpew_BaselineIC, " Generating %s(Int32, Int32) stub", CodeName[op]);
+ ICCompare_Int32::Compiler compiler(cx, op, engine);
+ ICStub* int32Stub = compiler.getStub(compiler.getStubSpace(info.outerScript(cx)));
+ if (!int32Stub)
+ return false;
+
+ stub->addNewStub(int32Stub);
+ return true;
+ }
+
+ if (!cx->runtime()->jitSupportsFloatingPoint && (lhs.isNumber() || rhs.isNumber()))
+ return true;
+
+ if (lhs.isNumber() && rhs.isNumber()) {
+ JitSpew(JitSpew_BaselineIC, " Generating %s(Number, Number) stub", CodeName[op]);
+
+ // Unlink int32 stubs, it's faster to always use the double stub.
+ stub->unlinkStubsWithKind(cx, ICStub::Compare_Int32);
+
+ ICCompare_Double::Compiler compiler(cx, op, engine);
+ ICStub* doubleStub = compiler.getStub(compiler.getStubSpace(info.outerScript(cx)));
+ if (!doubleStub)
+ return false;
+
+ stub->addNewStub(doubleStub);
+ return true;
+ }
+
+ if ((lhs.isNumber() && rhs.isUndefined()) ||
+ (lhs.isUndefined() && rhs.isNumber()))
+ {
+ JitSpew(JitSpew_BaselineIC, " Generating %s(%s, %s) stub", CodeName[op],
+ rhs.isUndefined() ? "Number" : "Undefined",
+ rhs.isUndefined() ? "Undefined" : "Number");
+ ICCompare_NumberWithUndefined::Compiler compiler(cx, op, engine, lhs.isUndefined());
+ ICStub* doubleStub = compiler.getStub(compiler.getStubSpace(info.outerScript(cx)));
+ if (!doubleStub)
+ return false;
+
+ stub->addNewStub(doubleStub);
+ return true;
+ }
+
+ if (lhs.isBoolean() && rhs.isBoolean()) {
+ JitSpew(JitSpew_BaselineIC, " Generating %s(Boolean, Boolean) stub", CodeName[op]);
+ ICCompare_Boolean::Compiler compiler(cx, op, engine);
+ ICStub* booleanStub = compiler.getStub(compiler.getStubSpace(info.outerScript(cx)));
+ if (!booleanStub)
+ return false;
+
+ stub->addNewStub(booleanStub);
+ return true;
+ }
+
+ if ((lhs.isBoolean() && rhs.isInt32()) || (lhs.isInt32() && rhs.isBoolean())) {
+ JitSpew(JitSpew_BaselineIC, " Generating %s(%s, %s) stub", CodeName[op],
+ rhs.isInt32() ? "Boolean" : "Int32",
+ rhs.isInt32() ? "Int32" : "Boolean");
+ ICCompare_Int32WithBoolean::Compiler compiler(cx, op, engine, lhs.isInt32());
+ ICStub* optStub = compiler.getStub(compiler.getStubSpace(info.outerScript(cx)));
+ if (!optStub)
+ return false;
+
+ stub->addNewStub(optStub);
+ return true;
+ }
+
+ if (IsEqualityOp(op)) {
+ if (lhs.isString() && rhs.isString() && !stub->hasStub(ICStub::Compare_String)) {
+ JitSpew(JitSpew_BaselineIC, " Generating %s(String, String) stub", CodeName[op]);
+ ICCompare_String::Compiler compiler(cx, op, engine);
+ ICStub* stringStub = compiler.getStub(compiler.getStubSpace(info.outerScript(cx)));
+ if (!stringStub)
+ return false;
+
+ stub->addNewStub(stringStub);
+ return true;
+ }
+
+ if (lhs.isObject() && rhs.isObject()) {
+ MOZ_ASSERT(!stub->hasStub(ICStub::Compare_Object));
+ JitSpew(JitSpew_BaselineIC, " Generating %s(Object, Object) stub", CodeName[op]);
+ ICCompare_Object::Compiler compiler(cx, op, engine);
+ ICStub* objectStub = compiler.getStub(compiler.getStubSpace(info.outerScript(cx)));
+ if (!objectStub)
+ return false;
+
+ stub->addNewStub(objectStub);
+ return true;
+ }
+
+ if ((lhs.isObject() || lhs.isNull() || lhs.isUndefined()) &&
+ (rhs.isObject() || rhs.isNull() || rhs.isUndefined()) &&
+ !stub->hasStub(ICStub::Compare_ObjectWithUndefined))
+ {
+ JitSpew(JitSpew_BaselineIC, " Generating %s(Obj/Null/Undef, Obj/Null/Undef) stub",
+ CodeName[op]);
+ bool lhsIsUndefined = lhs.isNull() || lhs.isUndefined();
+ bool compareWithNull = lhs.isNull() || rhs.isNull();
+ ICCompare_ObjectWithUndefined::Compiler compiler(cx, op, engine,
+ lhsIsUndefined, compareWithNull);
+ ICStub* objectStub = compiler.getStub(compiler.getStubSpace(info.outerScript(cx)));
+ if (!objectStub)
+ return false;
+
+ stub->addNewStub(objectStub);
+ return true;
+ }
+ }
+
+ stub->noteUnoptimizableAccess();
+
+ return true;
+}
+
+typedef bool (*DoCompareFallbackFn)(JSContext*, void*, ICCompare_Fallback*,
+ HandleValue, HandleValue, MutableHandleValue);
+static const VMFunction DoCompareFallbackInfo =
+ FunctionInfo<DoCompareFallbackFn>(DoCompareFallback, "DoCompareFallback", TailCall,
+ PopValues(2));
+
+bool
+ICCompare_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(R0 == JSReturnOperand);
+
+ // Restore the tail call register.
+ EmitRestoreTailCallReg(masm);
+
+ // Ensure stack is fully synced for the expression decompiler.
+ masm.pushValue(R0);
+ masm.pushValue(R1);
+
+ // Push arguments.
+ masm.pushValue(R1);
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+ return tailCallVM(DoCompareFallbackInfo, masm);
+}
+
+//
+// Compare_String
+//
+
+bool
+ICCompare_String::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ masm.branchTestString(Assembler::NotEqual, R0, &failure);
+ masm.branchTestString(Assembler::NotEqual, R1, &failure);
+
+ MOZ_ASSERT(IsEqualityOp(op));
+
+ Register left = masm.extractString(R0, ExtractTemp0);
+ Register right = masm.extractString(R1, ExtractTemp1);
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
+ Register scratchReg = regs.takeAny();
+
+ masm.compareStrings(op, left, right, scratchReg, &failure);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratchReg, R0);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// Compare_Boolean
+//
+
+bool
+ICCompare_Boolean::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ masm.branchTestBoolean(Assembler::NotEqual, R0, &failure);
+ masm.branchTestBoolean(Assembler::NotEqual, R1, &failure);
+
+ Register left = masm.extractInt32(R0, ExtractTemp0);
+ Register right = masm.extractInt32(R1, ExtractTemp1);
+
+ // Compare payload regs of R0 and R1.
+ Assembler::Condition cond = JSOpToCondition(op, /* signed = */true);
+ masm.cmp32Set(cond, left, right, left);
+
+ // Box the result and return
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, left, R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// Compare_NumberWithUndefined
+//
+
+bool
+ICCompare_NumberWithUndefined::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ ValueOperand numberOperand, undefinedOperand;
+ if (lhsIsUndefined) {
+ numberOperand = R1;
+ undefinedOperand = R0;
+ } else {
+ numberOperand = R0;
+ undefinedOperand = R1;
+ }
+
+ Label failure;
+ masm.branchTestNumber(Assembler::NotEqual, numberOperand, &failure);
+ masm.branchTestUndefined(Assembler::NotEqual, undefinedOperand, &failure);
+
+ // Comparing a number with undefined will always be true for NE/STRICTNE,
+ // and always be false for other compare ops.
+ masm.moveValue(BooleanValue(op == JSOP_NE || op == JSOP_STRICTNE), R0);
+
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// Compare_Object
+//
+
+bool
+ICCompare_Object::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+ masm.branchTestObject(Assembler::NotEqual, R1, &failure);
+
+ MOZ_ASSERT(IsEqualityOp(op));
+
+ Register left = masm.extractObject(R0, ExtractTemp0);
+ Register right = masm.extractObject(R1, ExtractTemp1);
+
+ Label ifTrue;
+ masm.branchPtr(JSOpToCondition(op, /* signed = */true), left, right, &ifTrue);
+
+ masm.moveValue(BooleanValue(false), R0);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&ifTrue);
+ masm.moveValue(BooleanValue(true), R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// Compare_ObjectWithUndefined
+//
+
+bool
+ICCompare_ObjectWithUndefined::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(IsEqualityOp(op));
+
+ ValueOperand objectOperand, undefinedOperand;
+ if (lhsIsUndefined) {
+ objectOperand = R1;
+ undefinedOperand = R0;
+ } else {
+ objectOperand = R0;
+ undefinedOperand = R1;
+ }
+
+ Label failure;
+ if (compareWithNull)
+ masm.branchTestNull(Assembler::NotEqual, undefinedOperand, &failure);
+ else
+ masm.branchTestUndefined(Assembler::NotEqual, undefinedOperand, &failure);
+
+ Label notObject;
+ masm.branchTestObject(Assembler::NotEqual, objectOperand, &notObject);
+
+ if (op == JSOP_STRICTEQ || op == JSOP_STRICTNE) {
+ // obj !== undefined for all objects.
+ masm.moveValue(BooleanValue(op == JSOP_STRICTNE), R0);
+ EmitReturnFromIC(masm);
+ } else {
+ // obj != undefined only where !obj->getClass()->emulatesUndefined()
+ Label emulatesUndefined;
+ Register obj = masm.extractObject(objectOperand, ExtractTemp0);
+ masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), obj);
+ masm.loadPtr(Address(obj, ObjectGroup::offsetOfClasp()), obj);
+ masm.branchTest32(Assembler::NonZero,
+ Address(obj, Class::offsetOfFlags()),
+ Imm32(JSCLASS_EMULATES_UNDEFINED),
+ &emulatesUndefined);
+ masm.moveValue(BooleanValue(op == JSOP_NE), R0);
+ EmitReturnFromIC(masm);
+ masm.bind(&emulatesUndefined);
+ masm.moveValue(BooleanValue(op == JSOP_EQ), R0);
+ EmitReturnFromIC(masm);
+ }
+
+ masm.bind(&notObject);
+
+ // Also support null == null or undefined == undefined comparisons.
+ if (compareWithNull)
+ masm.branchTestNull(Assembler::NotEqual, objectOperand, &failure);
+ else
+ masm.branchTestUndefined(Assembler::NotEqual, objectOperand, &failure);
+
+ masm.moveValue(BooleanValue(op == JSOP_STRICTEQ || op == JSOP_EQ), R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// Compare_Int32WithBoolean
+//
+
+bool
+ICCompare_Int32WithBoolean::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ ValueOperand int32Val;
+ ValueOperand boolVal;
+ if (lhsIsInt32_) {
+ int32Val = R0;
+ boolVal = R1;
+ } else {
+ boolVal = R0;
+ int32Val = R1;
+ }
+ masm.branchTestBoolean(Assembler::NotEqual, boolVal, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, int32Val, &failure);
+
+ if (op_ == JSOP_STRICTEQ || op_ == JSOP_STRICTNE) {
+ // Ints and booleans are never strictly equal, always strictly not equal.
+ masm.moveValue(BooleanValue(op_ == JSOP_STRICTNE), R0);
+ EmitReturnFromIC(masm);
+ } else {
+ Register boolReg = masm.extractBoolean(boolVal, ExtractTemp0);
+ Register int32Reg = masm.extractInt32(int32Val, ExtractTemp1);
+
+ // Compare payload regs of R0 and R1.
+ Assembler::Condition cond = JSOpToCondition(op_, /* signed = */true);
+ masm.cmp32Set(cond, (lhsIsInt32_ ? int32Reg : boolReg),
+ (lhsIsInt32_ ? boolReg : int32Reg), R0.scratchReg());
+
+ // Box the result and return
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.scratchReg(), R0);
+ EmitReturnFromIC(masm);
+ }
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// GetProp_Fallback
+//
+
+static bool
+TryAttachMagicArgumentsGetPropStub(JSContext* cx, SharedStubInfo* info,
+ ICGetProp_Fallback* stub, HandlePropertyName name,
+ HandleValue val, HandleValue res, bool* attached)
+{
+ MOZ_ASSERT(!*attached);
+
+ if (!val.isMagic(JS_OPTIMIZED_ARGUMENTS))
+ return true;
+
+ // Try handling arguments.callee on optimized arguments.
+ if (name == cx->names().callee) {
+ MOZ_ASSERT(info->script()->hasMappedArgsObj());
+
+ JitSpew(JitSpew_BaselineIC, " Generating GetProp(MagicArgs.callee) stub");
+
+ // Unlike ICGetProp_ArgumentsLength, only magic argument stubs are
+ // supported at the moment.
+ ICStub* monitorStub = stub->fallbackMonitorStub()->firstMonitorStub();
+ ICGetProp_ArgumentsCallee::Compiler compiler(cx, info->engine(), monitorStub);
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(info->outerScript(cx)));
+ if (!newStub)
+ return false;
+ stub->addNewStub(newStub);
+
+ *attached = true;
+ return true;
+ }
+
+ return true;
+}
+
+static bool
+TryAttachLengthStub(JSContext* cx, SharedStubInfo* info,
+ ICGetProp_Fallback* stub, HandleValue val,
+ HandleValue res, bool* attached)
+{
+ MOZ_ASSERT(!*attached);
+
+ if (val.isString()) {
+ MOZ_ASSERT(res.isInt32());
+ JitSpew(JitSpew_BaselineIC, " Generating GetProp(String.length) stub");
+ ICGetProp_StringLength::Compiler compiler(cx, info->engine());
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(info->outerScript(cx)));
+ if (!newStub)
+ return false;
+
+ *attached = true;
+ stub->addNewStub(newStub);
+ return true;
+ }
+
+ if (val.isMagic(JS_OPTIMIZED_ARGUMENTS) && res.isInt32()) {
+ JitSpew(JitSpew_BaselineIC, " Generating GetProp(MagicArgs.length) stub");
+ ICGetProp_ArgumentsLength::Compiler compiler(cx, info->engine(), ICGetProp_ArgumentsLength::Magic);
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(info->outerScript(cx)));
+ if (!newStub)
+ return false;
+
+ *attached = true;
+ stub->addNewStub(newStub);
+ return true;
+ }
+
+ return true;
+}
+
+static bool
+UpdateExistingGenerationalDOMProxyStub(ICGetProp_Fallback* stub,
+ HandleObject obj)
+{
+ Value expandoSlot = GetProxyExtra(obj, GetDOMProxyExpandoSlot());
+ MOZ_ASSERT(!expandoSlot.isObject() && !expandoSlot.isUndefined());
+ ExpandoAndGeneration* expandoAndGeneration = (ExpandoAndGeneration*)expandoSlot.toPrivate();
+ for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
+ if (iter->isGetProp_CallDOMProxyWithGenerationNative()) {
+ ICGetProp_CallDOMProxyWithGenerationNative* updateStub =
+ iter->toGetProp_CallDOMProxyWithGenerationNative();
+ if (updateStub->expandoAndGeneration() == expandoAndGeneration) {
+ // Update generation
+ uint64_t generation = expandoAndGeneration->generation;
+ JitSpew(JitSpew_BaselineIC,
+ " Updating existing stub with generation, old value: %" PRIu64 ", "
+ "new value: %" PRIu64 "", updateStub->generation(),
+ generation);
+ updateStub->setGeneration(generation);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+// Return whether obj is in some PreliminaryObjectArray and has a structure
+// that might change in the future.
+bool
+IsPreliminaryObject(JSObject* obj)
+{
+ if (obj->isSingleton())
+ return false;
+
+ TypeNewScript* newScript = obj->group()->newScript();
+ if (newScript && !newScript->analyzed())
+ return true;
+
+ if (obj->group()->maybePreliminaryObjects())
+ return true;
+
+ return false;
+}
+
+void
+StripPreliminaryObjectStubs(JSContext* cx, ICFallbackStub* stub)
+{
+ // Before the new script properties analysis has been performed on a type,
+ // all instances of that type have the maximum number of fixed slots.
+ // Afterwards, the objects (even the preliminary ones) might be changed
+ // to reduce the number of fixed slots they have. If we generate stubs for
+ // both the old and new number of fixed slots, the stub will look
+ // polymorphic to IonBuilder when it is actually monomorphic. To avoid
+ // this, strip out any stubs for preliminary objects before attaching a new
+ // stub which isn't on a preliminary object.
+
+ for (ICStubIterator iter = stub->beginChain(); !iter.atEnd(); iter++) {
+ if (iter->isCacheIR_Monitored() && iter->toCacheIR_Monitored()->hasPreliminaryObject())
+ iter.unlink(cx);
+ else if (iter->isSetProp_Native() && iter->toSetProp_Native()->hasPreliminaryObject())
+ iter.unlink(cx);
+ }
+}
+
+JSObject*
+GetDOMProxyProto(JSObject* obj)
+{
+ MOZ_ASSERT(IsCacheableDOMProxy(obj));
+ return obj->staticPrototype();
+}
+
+// Look up a property's shape on an object, being careful never to do any effectful
+// operations. This procedure not yielding a shape should not be taken as a lack of
+// existence of the property on the object.
+bool
+EffectlesslyLookupProperty(JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandleObject holder, MutableHandleShape shape,
+ bool* checkDOMProxy,
+ DOMProxyShadowsResult* shadowsResult,
+ bool* domProxyHasGeneration)
+{
+ shape.set(nullptr);
+ holder.set(nullptr);
+
+ if (checkDOMProxy) {
+ *checkDOMProxy = false;
+ *shadowsResult = ShadowCheckFailed;
+ }
+
+ // Check for list base if asked to.
+ RootedObject checkObj(cx, obj);
+ if (checkDOMProxy && IsCacheableDOMProxy(obj)) {
+ MOZ_ASSERT(domProxyHasGeneration);
+ MOZ_ASSERT(shadowsResult);
+
+ *checkDOMProxy = true;
+ if (obj->hasUncacheableProto())
+ return true;
+
+ *shadowsResult = GetDOMProxyShadowsCheck()(cx, obj, id);
+ if (*shadowsResult == ShadowCheckFailed)
+ return false;
+
+ if (DOMProxyIsShadowing(*shadowsResult)) {
+ holder.set(obj);
+ return true;
+ }
+
+ *domProxyHasGeneration = (*shadowsResult == DoesntShadowUnique);
+
+ checkObj = GetDOMProxyProto(obj);
+ if (!checkObj)
+ return true;
+ }
+
+ if (LookupPropertyPure(cx, checkObj, id, holder.address(), shape.address()))
+ return true;
+
+ holder.set(nullptr);
+ shape.set(nullptr);
+ return true;
+}
+
+bool
+IsCacheableProtoChain(JSObject* obj, JSObject* holder, bool isDOMProxy)
+{
+ MOZ_ASSERT_IF(isDOMProxy, IsCacheableDOMProxy(obj));
+
+ if (!isDOMProxy && !obj->isNative()) {
+ if (obj == holder)
+ return false;
+ if (!obj->is<UnboxedPlainObject>() &&
+ !obj->is<UnboxedArrayObject>() &&
+ !obj->is<TypedObject>())
+ {
+ return false;
+ }
+ }
+
+ JSObject* cur = obj;
+ while (cur != holder) {
+ // We cannot assume that we find the holder object on the prototype
+ // chain and must check for null proto. The prototype chain can be
+ // altered during the lookupProperty call.
+ MOZ_ASSERT(!cur->hasDynamicPrototype());
+
+ // Don't handle objects which require a prototype guard. This should
+ // be uncommon so handling it is likely not worth the complexity.
+ if (cur->hasUncacheableProto())
+ return false;
+
+ JSObject* proto = cur->staticPrototype();
+ if (!proto || !proto->isNative())
+ return false;
+
+ cur = proto;
+ }
+
+ return true;
+}
+
+bool
+IsCacheableGetPropReadSlot(JSObject* obj, JSObject* holder, Shape* shape, bool isDOMProxy)
+{
+ if (!shape || !IsCacheableProtoChain(obj, holder, isDOMProxy))
+ return false;
+
+ if (!shape->hasSlot() || !shape->hasDefaultGetter())
+ return false;
+
+ return true;
+}
+
+void
+GetFixedOrDynamicSlotOffset(Shape* shape, bool* isFixed, uint32_t* offset)
+{
+ MOZ_ASSERT(isFixed);
+ MOZ_ASSERT(offset);
+ *isFixed = shape->slot() < shape->numFixedSlots();
+ *offset = *isFixed ? NativeObject::getFixedSlotOffset(shape->slot())
+ : (shape->slot() - shape->numFixedSlots()) * sizeof(Value);
+}
+
+bool
+IsCacheableGetPropCall(JSContext* cx, JSObject* obj, JSObject* holder, Shape* shape,
+ bool* isScripted, bool* isTemporarilyUnoptimizable, bool isDOMProxy)
+{
+ MOZ_ASSERT(isScripted);
+
+ if (!shape || !IsCacheableProtoChain(obj, holder, isDOMProxy))
+ return false;
+
+ if (shape->hasSlot() || shape->hasDefaultGetter())
+ return false;
+
+ if (!shape->hasGetterValue())
+ return false;
+
+ if (!shape->getterValue().isObject() || !shape->getterObject()->is<JSFunction>())
+ return false;
+
+ JSFunction* func = &shape->getterObject()->as<JSFunction>();
+ if (IsWindow(obj)) {
+ if (!func->isNative())
+ return false;
+
+ if (!func->jitInfo() || func->jitInfo()->needsOuterizedThisObject())
+ return false;
+ }
+
+ if (func->isNative()) {
+ *isScripted = false;
+ return true;
+ }
+
+ if (!func->hasJITCode()) {
+ *isTemporarilyUnoptimizable = true;
+ return false;
+ }
+
+ *isScripted = true;
+ return true;
+}
+
+// Try to update all existing GetProp/GetName getter call stubs that match the
+// given holder in place with a new shape and getter. fallbackStub can be
+// either an ICGetProp_Fallback or an ICGetName_Fallback.
+//
+// If 'getter' is an own property, holder == receiver must be true.
+bool
+UpdateExistingGetPropCallStubs(ICFallbackStub* fallbackStub,
+ ICStub::Kind kind,
+ HandleNativeObject holder,
+ HandleObject receiver,
+ HandleFunction getter)
+{
+ MOZ_ASSERT(kind == ICStub::GetProp_CallScripted ||
+ kind == ICStub::GetProp_CallNative ||
+ kind == ICStub::GetProp_CallNativeGlobal);
+ MOZ_ASSERT(fallbackStub->isGetName_Fallback() ||
+ fallbackStub->isGetProp_Fallback());
+ MOZ_ASSERT(holder);
+ MOZ_ASSERT(receiver);
+
+ bool isOwnGetter = (holder == receiver);
+ bool foundMatchingStub = false;
+ ReceiverGuard receiverGuard(receiver);
+ for (ICStubConstIterator iter = fallbackStub->beginChainConst(); !iter.atEnd(); iter++) {
+ if (iter->kind() == kind) {
+ ICGetPropCallGetter* getPropStub = static_cast<ICGetPropCallGetter*>(*iter);
+ if (getPropStub->holder() == holder && getPropStub->isOwnGetter() == isOwnGetter) {
+ // If this is an own getter, update the receiver guard as well,
+ // since that's the shape we'll be guarding on. Furthermore,
+ // isOwnGetter() relies on holderShape_ and receiverGuard_ being
+ // the same shape.
+ if (isOwnGetter)
+ getPropStub->receiverGuard().update(receiverGuard);
+
+ MOZ_ASSERT(getPropStub->holderShape() != holder->lastProperty() ||
+ !getPropStub->receiverGuard().matches(receiverGuard) ||
+ getPropStub->toGetProp_CallNativeGlobal()->globalShape() !=
+ receiver->as<LexicalEnvironmentObject>().global().lastProperty(),
+ "Why didn't we end up using this stub?");
+
+ // We want to update the holder shape to match the new one no
+ // matter what, even if the receiver shape is different.
+ getPropStub->holderShape() = holder->lastProperty();
+
+ // Make sure to update the getter, since a shape change might
+ // have changed which getter we want to use.
+ getPropStub->getter() = getter;
+
+ if (getPropStub->isGetProp_CallNativeGlobal()) {
+ ICGetProp_CallNativeGlobal* globalStub =
+ getPropStub->toGetProp_CallNativeGlobal();
+ globalStub->globalShape() =
+ receiver->as<LexicalEnvironmentObject>().global().lastProperty();
+ }
+
+ if (getPropStub->receiverGuard().matches(receiverGuard))
+ foundMatchingStub = true;
+ }
+ }
+ }
+
+ return foundMatchingStub;
+}
+
+static bool
+TryAttachNativeGetAccessorPropStub(JSContext* cx, SharedStubInfo* info,
+ ICGetProp_Fallback* stub, HandlePropertyName name,
+ HandleValue val, HandleValue res, bool* attached,
+ bool* isTemporarilyUnoptimizable)
+{
+ MOZ_ASSERT(!*attached);
+ MOZ_ASSERT(!*isTemporarilyUnoptimizable);
+
+ if (!val.isObject())
+ return true;
+
+ RootedObject obj(cx, &val.toObject());
+
+ bool isDOMProxy;
+ bool domProxyHasGeneration;
+ DOMProxyShadowsResult domProxyShadowsResult;
+ RootedShape shape(cx);
+ RootedObject holder(cx);
+ RootedId id(cx, NameToId(name));
+ if (!EffectlesslyLookupProperty(cx, obj, id, &holder, &shape, &isDOMProxy,
+ &domProxyShadowsResult, &domProxyHasGeneration))
+ {
+ return false;
+ }
+
+ ICStub* monitorStub = stub->fallbackMonitorStub()->firstMonitorStub();
+
+ bool isScripted = false;
+ bool cacheableCall = IsCacheableGetPropCall(cx, obj, holder, shape, &isScripted,
+ isTemporarilyUnoptimizable,
+ isDOMProxy);
+
+ // Try handling scripted getters.
+ if (cacheableCall && isScripted && !isDOMProxy &&
+ info->engine() == ICStubCompiler::Engine::Baseline)
+ {
+ RootedFunction callee(cx, &shape->getterObject()->as<JSFunction>());
+ MOZ_ASSERT(callee->hasScript());
+
+ if (UpdateExistingGetPropCallStubs(stub, ICStub::GetProp_CallScripted,
+ holder.as<NativeObject>(), obj, callee)) {
+ *attached = true;
+ return true;
+ }
+
+ JitSpew(JitSpew_BaselineIC, " Generating GetProp(NativeObj/ScriptedGetter %s:%" PRIuSIZE ") stub",
+ callee->nonLazyScript()->filename(), callee->nonLazyScript()->lineno());
+
+ ICGetProp_CallScripted::Compiler compiler(cx, monitorStub, obj, holder, callee,
+ info->pcOffset());
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(info->outerScript(cx)));
+ if (!newStub)
+ return false;
+
+ stub->addNewStub(newStub);
+ *attached = true;
+ return true;
+ }
+
+ // If it's a shadowed listbase proxy property, attach stub to call Proxy::get instead.
+ if (isDOMProxy && DOMProxyIsShadowing(domProxyShadowsResult)) {
+ MOZ_ASSERT(obj == holder);
+
+ JitSpew(JitSpew_BaselineIC, " Generating GetProp(DOMProxyProxy) stub");
+ Rooted<ProxyObject*> proxy(cx, &obj->as<ProxyObject>());
+ ICGetProp_DOMProxyShadowed::Compiler compiler(cx, info->engine(), monitorStub, proxy, name,
+ info->pcOffset());
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(info->outerScript(cx)));
+ if (!newStub)
+ return false;
+ stub->addNewStub(newStub);
+ *attached = true;
+ return true;
+ }
+
+ const Class* outerClass = nullptr;
+ if (!isDOMProxy && !obj->isNative()) {
+ outerClass = obj->getClass();
+ if (!IsWindowProxy(obj))
+ return true;
+
+ // This must be a WindowProxy for the current Window/global. Else it'd
+ // be a cross-compartment wrapper and IsWindowProxy returns false for
+ // those.
+ MOZ_ASSERT(ToWindowIfWindowProxy(obj) == cx->global());
+ obj = cx->global();
+
+ if (!EffectlesslyLookupProperty(cx, obj, id, &holder, &shape, &isDOMProxy,
+ &domProxyShadowsResult, &domProxyHasGeneration))
+ {
+ return false;
+ }
+ cacheableCall = IsCacheableGetPropCall(cx, obj, holder, shape, &isScripted,
+ isTemporarilyUnoptimizable, isDOMProxy);
+ }
+
+ // Try handling JSNative getters.
+ if (!cacheableCall || isScripted)
+ return true;
+
+ if (!shape || !shape->hasGetterValue() || !shape->getterValue().isObject() ||
+ !shape->getterObject()->is<JSFunction>())
+ {
+ return true;
+ }
+
+ RootedFunction callee(cx, &shape->getterObject()->as<JSFunction>());
+ MOZ_ASSERT(callee->isNative());
+
+ if (outerClass && (!callee->jitInfo() || callee->jitInfo()->needsOuterizedThisObject()))
+ return true;
+
+ JitSpew(JitSpew_BaselineIC, " Generating GetProp(%s%s/NativeGetter %p) stub",
+ isDOMProxy ? "DOMProxyObj" : "NativeObj",
+ isDOMProxy && domProxyHasGeneration ? "WithGeneration" : "",
+ callee->native());
+
+ ICStub* newStub = nullptr;
+ if (isDOMProxy) {
+ MOZ_ASSERT(obj != holder);
+ ICStub::Kind kind;
+ if (domProxyHasGeneration) {
+ if (UpdateExistingGenerationalDOMProxyStub(stub, obj)) {
+ *attached = true;
+ return true;
+ }
+ kind = ICStub::GetProp_CallDOMProxyWithGenerationNative;
+ } else {
+ kind = ICStub::GetProp_CallDOMProxyNative;
+ }
+ Rooted<ProxyObject*> proxy(cx, &obj->as<ProxyObject>());
+ ICGetPropCallDOMProxyNativeCompiler compiler(cx, kind, info->engine(), monitorStub, proxy, holder,
+ callee, info->pcOffset());
+ newStub = compiler.getStub(compiler.getStubSpace(info->outerScript(cx)));
+ } else {
+ if (UpdateExistingGetPropCallStubs(stub, ICStub::GetProp_CallNative,
+ holder.as<NativeObject>(), obj, callee))
+ {
+ *attached = true;
+ return true;
+ }
+
+ ICGetPropCallNativeCompiler compiler(cx, ICStub::GetProp_CallNative, info->engine(),
+ monitorStub, obj, holder, callee,
+ info->pcOffset(), outerClass);
+ newStub = compiler.getStub(compiler.getStubSpace(info->outerScript(cx)));
+ }
+ if (!newStub)
+ return false;
+ stub->addNewStub(newStub);
+ *attached = true;
+ return true;
+}
+
+bool
+CheckHasNoSuchProperty(JSContext* cx, JSObject* obj, PropertyName* name,
+ JSObject** lastProto, size_t* protoChainDepthOut)
+{
+ size_t depth = 0;
+ JSObject* curObj = obj;
+ while (curObj) {
+ if (curObj->isNative()) {
+ // Don't handle proto chains with resolve hooks.
+ if (ClassMayResolveId(cx->names(), curObj->getClass(), NameToId(name), curObj))
+ return false;
+ if (curObj->as<NativeObject>().contains(cx, NameToId(name)))
+ return false;
+ if (curObj->getClass()->getGetProperty())
+ return false;
+ } else if (curObj != obj) {
+ // Non-native objects are only handled as the original receiver.
+ return false;
+ } else if (curObj->is<UnboxedPlainObject>()) {
+ if (curObj->as<UnboxedPlainObject>().containsUnboxedOrExpandoProperty(cx, NameToId(name)))
+ return false;
+ } else if (curObj->is<UnboxedArrayObject>()) {
+ if (name == cx->names().length)
+ return false;
+ } else if (curObj->is<TypedObject>()) {
+ if (curObj->as<TypedObject>().typeDescr().hasProperty(cx->names(), NameToId(name)))
+ return false;
+ } else {
+ return false;
+ }
+
+ JSObject* proto = curObj->staticPrototype();
+ if (!proto)
+ break;
+
+ curObj = proto;
+ depth++;
+ }
+
+ if (lastProto)
+ *lastProto = curObj;
+ if (protoChainDepthOut)
+ *protoChainDepthOut = depth;
+ return true;
+}
+
+static bool
+ComputeGetPropResult(JSContext* cx, BaselineFrame* frame, JSOp op, HandlePropertyName name,
+ MutableHandleValue val, MutableHandleValue res)
+{
+ // Handle arguments.length and arguments.callee on optimized arguments, as
+ // it is not an object.
+ if (frame && val.isMagic(JS_OPTIMIZED_ARGUMENTS) && IsOptimizedArguments(frame, val)) {
+ if (op == JSOP_LENGTH) {
+ res.setInt32(frame->numActualArgs());
+ } else {
+ MOZ_ASSERT(name == cx->names().callee);
+ MOZ_ASSERT(frame->script()->hasMappedArgsObj());
+ res.setObject(*frame->callee());
+ }
+ } else {
+ if (op == JSOP_GETXPROP) {
+ RootedObject obj(cx, &val.toObject());
+ RootedId id(cx, NameToId(name));
+ if (!GetPropertyForNameLookup(cx, obj, id, res))
+ return false;
+ } else {
+ MOZ_ASSERT(op == JSOP_GETPROP || op == JSOP_CALLPROP || op == JSOP_LENGTH);
+ if (!GetProperty(cx, val, name, res))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool
+DoGetPropFallback(JSContext* cx, void* payload, ICGetProp_Fallback* stub_,
+ MutableHandleValue val, MutableHandleValue res)
+{
+ SharedStubInfo info(cx, payload, stub_->icEntry());
+ ICStubCompiler::Engine engine = info.engine();
+ HandleScript script = info.innerScript();
+
+ // This fallback stub may trigger debug mode toggling.
+ DebugModeOSRVolatileStub<ICGetProp_Fallback*> stub(engine, info.maybeFrame(), stub_);
+
+ jsbytecode* pc = info.pc();
+ JSOp op = JSOp(*pc);
+ FallbackICSpew(cx, stub, "GetProp(%s)", CodeName[op]);
+
+ MOZ_ASSERT(op == JSOP_GETPROP || op == JSOP_CALLPROP || op == JSOP_LENGTH || op == JSOP_GETXPROP);
+
+ // Grab our old shape before it goes away.
+ RootedShape oldShape(cx);
+ if (val.isObject())
+ oldShape = val.toObject().maybeShape();
+
+ bool attached = false;
+ // There are some reasons we can fail to attach a stub that are temporary.
+ // We want to avoid calling noteUnoptimizableAccess() if the reason we
+ // failed to attach a stub is one of those temporary reasons, since we might
+ // end up attaching a stub for the exact same access later.
+ bool isTemporarilyUnoptimizable = false;
+
+ RootedPropertyName name(cx, script->getName(pc));
+
+ // After the Genericstub was added, we should never reach the Fallbackstub again.
+ MOZ_ASSERT(!stub->hasStub(ICStub::GetProp_Generic));
+
+ if (stub->numOptimizedStubs() >= ICGetProp_Fallback::MAX_OPTIMIZED_STUBS && !stub.invalid()) {
+ // Discard all stubs in this IC and replace with generic getprop stub.
+ for (ICStubIterator iter = stub->beginChain(); !iter.atEnd(); iter++)
+ iter.unlink(cx);
+ ICGetProp_Generic::Compiler compiler(cx, engine,
+ stub->fallbackMonitorStub()->firstMonitorStub());
+ ICStub* newStub = compiler.getStub(compiler.getStubSpace(info.outerScript(cx)));
+ if (!newStub)
+ return false;
+ stub->addNewStub(newStub);
+ attached = true;
+ }
+
+ if (!attached && !JitOptions.disableCacheIR) {
+ mozilla::Maybe<CacheIRWriter> writer;
+ GetPropIRGenerator gen(cx, pc, val, name, res);
+ if (!gen.tryAttachStub(writer))
+ return false;
+ if (gen.emitted()) {
+ ICStub* newStub = AttachBaselineCacheIRStub(cx, writer.ref(), CacheKind::GetProp, stub);
+ if (newStub) {
+ JitSpew(JitSpew_BaselineIC, " Attached CacheIR stub");
+ attached = true;
+ if (gen.shouldNotePreliminaryObjectStub())
+ newStub->toCacheIR_Monitored()->notePreliminaryObject();
+ else if (gen.shouldUnlinkPreliminaryObjectStubs())
+ StripPreliminaryObjectStubs(cx, stub);
+ }
+ }
+ }
+
+ if (!attached && !stub.invalid() &&
+ !TryAttachNativeGetAccessorPropStub(cx, &info, stub, name, val, res, &attached,
+ &isTemporarilyUnoptimizable))
+ {
+ return false;
+ }
+
+ if (!ComputeGetPropResult(cx, info.maybeFrame(), op, name, val, res))
+ return false;
+
+ TypeScript::Monitor(cx, script, pc, res);
+
+ // Check if debug mode toggling made the stub invalid.
+ if (stub.invalid())
+ return true;
+
+ // Add a type monitor stub for the resulting value.
+ if (!stub->addMonitorStubForValue(cx, &info, res))
+ return false;
+
+ if (attached)
+ return true;
+
+ if (op == JSOP_LENGTH) {
+ if (!TryAttachLengthStub(cx, &info, stub, val, res, &attached))
+ return false;
+ if (attached)
+ return true;
+ }
+
+ if (!TryAttachMagicArgumentsGetPropStub(cx, &info, stub, name, val,
+ res, &attached))
+ return false;
+ if (attached)
+ return true;
+
+
+ MOZ_ASSERT(!attached);
+ if (!isTemporarilyUnoptimizable)
+ stub->noteUnoptimizableAccess();
+
+ return true;
+}
+
+typedef bool (*DoGetPropFallbackFn)(JSContext*, void*, ICGetProp_Fallback*,
+ MutableHandleValue, MutableHandleValue);
+static const VMFunction DoGetPropFallbackInfo =
+ FunctionInfo<DoGetPropFallbackFn>(DoGetPropFallback, "DoGetPropFallback", TailCall,
+ PopValues(1));
+
+bool
+ICGetProp_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(R0 == JSReturnOperand);
+
+ EmitRestoreTailCallReg(masm);
+
+ // Ensure stack is fully synced for the expression decompiler.
+ masm.pushValue(R0);
+
+ // Push arguments.
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ if (!tailCallVM(DoGetPropFallbackInfo, masm))
+ return false;
+
+ // Even though the fallback frame doesn't enter a stub frame, the CallScripted
+ // frame that we are emulating does. Again, we lie.
+#ifdef DEBUG
+ EmitRepushTailCallReg(masm);
+ enterStubFrame(masm, R0.scratchReg());
+#else
+ inStubFrame_ = true;
+#endif
+
+ // What follows is bailout for inlined scripted getters.
+ // The return address pointed to by the baseline stack points here.
+ returnOffset_ = masm.currentOffset();
+
+ leaveStubFrame(masm, true);
+
+ // When we get here, ICStubReg contains the ICGetProp_Fallback stub,
+ // which we can't use to enter the TypeMonitor IC, because it's a MonitoredFallbackStub
+ // instead of a MonitoredStub. So, we cheat.
+ masm.loadPtr(Address(ICStubReg, ICMonitoredFallbackStub::offsetOfFallbackMonitorStub()),
+ ICStubReg);
+ EmitEnterTypeMonitorIC(masm, ICTypeMonitor_Fallback::offsetOfFirstMonitorStub());
+
+ return true;
+}
+
+void
+ICGetProp_Fallback::Compiler::postGenerateStubCode(MacroAssembler& masm, Handle<JitCode*> code)
+{
+ if (engine_ == Engine::Baseline) {
+ void* address = code->raw() + returnOffset_;
+ cx->compartment()->jitCompartment()->initBaselineGetPropReturnAddr(address);
+ }
+}
+
+bool
+ICGetProp_StringLength::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ masm.branchTestString(Assembler::NotEqual, R0, &failure);
+
+ // Unbox string and load its length.
+ Register string = masm.extractString(R0, ExtractTemp0);
+ masm.loadStringLength(string, string);
+
+ masm.tagValue(JSVAL_TYPE_INT32, string, R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+ICGetPropNativeStub*
+ICGetPropNativeCompiler::getStub(ICStubSpace* space)
+{
+ ReceiverGuard guard(obj_);
+
+ switch (kind) {
+ case ICStub::GetName_Global: {
+ MOZ_ASSERT(obj_ != holder_);
+ Shape* holderShape = holder_->as<NativeObject>().lastProperty();
+ Shape* globalShape = obj_->as<LexicalEnvironmentObject>().global().lastProperty();
+ return newStub<ICGetName_Global>(space, getStubCode(), firstMonitorStub_, guard,
+ offset_, holder_, holderShape, globalShape);
+ }
+
+ default:
+ MOZ_CRASH("Bad stub kind");
+ }
+}
+
+void
+GuardReceiverObject(MacroAssembler& masm, ReceiverGuard guard,
+ Register object, Register scratch,
+ size_t receiverGuardOffset, Label* failure)
+{
+ Address groupAddress(ICStubReg, receiverGuardOffset + HeapReceiverGuard::offsetOfGroup());
+ Address shapeAddress(ICStubReg, receiverGuardOffset + HeapReceiverGuard::offsetOfShape());
+ Address expandoAddress(object, UnboxedPlainObject::offsetOfExpando());
+
+ if (guard.group) {
+ masm.loadPtr(groupAddress, scratch);
+ masm.branchTestObjGroup(Assembler::NotEqual, object, scratch, failure);
+
+ if (guard.group->clasp() == &UnboxedPlainObject::class_ && !guard.shape) {
+ // Guard the unboxed object has no expando object.
+ masm.branchPtr(Assembler::NotEqual, expandoAddress, ImmWord(0), failure);
+ }
+ }
+
+ if (guard.shape) {
+ masm.loadPtr(shapeAddress, scratch);
+ if (guard.group && guard.group->clasp() == &UnboxedPlainObject::class_) {
+ // Guard the unboxed object has a matching expando object.
+ masm.branchPtr(Assembler::Equal, expandoAddress, ImmWord(0), failure);
+ Label done;
+ masm.push(object);
+ masm.loadPtr(expandoAddress, object);
+ masm.branchTestObjShape(Assembler::Equal, object, scratch, &done);
+ masm.pop(object);
+ masm.jump(failure);
+ masm.bind(&done);
+ masm.pop(object);
+ } else {
+ masm.branchTestObjShape(Assembler::NotEqual, object, scratch, failure);
+ }
+ }
+}
+
+static void
+GuardGlobalObject(MacroAssembler& masm, HandleObject holder, Register globalLexicalReg,
+ Register holderReg, Register scratch, size_t globalShapeOffset, Label* failure)
+{
+ if (holder->is<GlobalObject>())
+ return;
+ masm.extractObject(Address(globalLexicalReg, EnvironmentObject::offsetOfEnclosingEnvironment()),
+ holderReg);
+ masm.loadPtr(Address(ICStubReg, globalShapeOffset), scratch);
+ masm.branchTestObjShape(Assembler::NotEqual, holderReg, scratch, failure);
+}
+
+bool
+ICGetPropNativeCompiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(0));
+ Register objReg = InvalidReg;
+
+ if (inputDefinitelyObject_) {
+ objReg = R0.scratchReg();
+ } else {
+ regs.take(R0);
+ // Guard input is an object and unbox.
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+ objReg = masm.extractObject(R0, ExtractTemp0);
+ }
+ regs.takeUnchecked(objReg);
+
+ Register scratch = regs.takeAnyExcluding(ICTailCallReg);
+
+ // Shape/group guard.
+ GuardReceiverObject(masm, ReceiverGuard(obj_), objReg, scratch,
+ ICGetPropNativeStub::offsetOfReceiverGuard(), &failure);
+
+ MOZ_ASSERT(obj_ != holder_);
+ MOZ_ASSERT(kind == ICStub::GetName_Global);
+
+ Register holderReg = regs.takeAny();
+
+ // If we are generating a non-lexical GETGNAME stub, we must also
+ // guard on the shape of the GlobalObject.
+ MOZ_ASSERT(obj_->is<LexicalEnvironmentObject>() &&
+ obj_->as<LexicalEnvironmentObject>().isGlobal());
+ GuardGlobalObject(masm, holder_, objReg, holderReg, scratch,
+ ICGetName_Global::offsetOfGlobalShape(), &failure);
+
+ // Shape guard holder.
+ masm.loadPtr(Address(ICStubReg, ICGetName_Global::offsetOfHolder()),
+ holderReg);
+ masm.loadPtr(Address(ICStubReg, ICGetName_Global::offsetOfHolderShape()),
+ scratch);
+ masm.branchTestObjShape(Assembler::NotEqual, holderReg, scratch, &failure);
+
+ if (!isFixedSlot_) {
+ // Don't overwrite actual holderReg if we need to load a dynamic slots object.
+ // May need to preserve object for noSuchMethod check later.
+ Register nextHolder = regs.takeAny();
+ masm.loadPtr(Address(holderReg, NativeObject::offsetOfSlots()), nextHolder);
+ holderReg = nextHolder;
+ }
+
+ masm.load32(Address(ICStubReg, ICGetPropNativeStub::offsetOfOffset()), scratch);
+ BaseIndex result(holderReg, scratch, TimesOne);
+
+ masm.loadValue(result, R0);
+
+ // Enter type monitor IC to type-check result.
+ EmitEnterTypeMonitorIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+bool
+GetProtoShapes(JSObject* obj, size_t protoChainDepth, MutableHandle<ShapeVector> shapes)
+{
+ JSObject* curProto = obj->staticPrototype();
+ for (size_t i = 0; i < protoChainDepth; i++) {
+ if (!shapes.append(curProto->as<NativeObject>().lastProperty()))
+ return false;
+ curProto = curProto->staticPrototype();
+ }
+
+ MOZ_ASSERT(!curProto,
+ "longer prototype chain encountered than this stub permits!");
+ return true;
+}
+
+bool
+ICGetProp_CallScripted::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(engine_ == Engine::Baseline);
+
+ Label failure;
+ Label failureLeaveStubFrame;
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(1));
+ Register scratch = regs.takeAnyExcluding(ICTailCallReg);
+
+ // Guard input is an object.
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+
+ // Unbox and shape guard.
+ Register objReg = masm.extractObject(R0, ExtractTemp0);
+ GuardReceiverObject(masm, ReceiverGuard(receiver_), objReg, scratch,
+ ICGetProp_CallScripted::offsetOfReceiverGuard(), &failure);
+
+ if (receiver_ != holder_) {
+ Register holderReg = regs.takeAny();
+ masm.loadPtr(Address(ICStubReg, ICGetProp_CallScripted::offsetOfHolder()), holderReg);
+ masm.loadPtr(Address(ICStubReg, ICGetProp_CallScripted::offsetOfHolderShape()), scratch);
+ masm.branchTestObjShape(Assembler::NotEqual, holderReg, scratch, &failure);
+ regs.add(holderReg);
+ }
+
+ // Push a stub frame so that we can perform a non-tail call.
+ enterStubFrame(masm, scratch);
+
+ // Load callee function and code. To ensure that |code| doesn't end up being
+ // ArgumentsRectifierReg, if it's available we assign it to |callee| instead.
+ Register callee;
+ if (regs.has(ArgumentsRectifierReg)) {
+ callee = ArgumentsRectifierReg;
+ regs.take(callee);
+ } else {
+ callee = regs.takeAny();
+ }
+ Register code = regs.takeAny();
+ masm.loadPtr(Address(ICStubReg, ICGetProp_CallScripted::offsetOfGetter()), callee);
+ masm.branchIfFunctionHasNoScript(callee, &failureLeaveStubFrame);
+ masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), code);
+ masm.loadBaselineOrIonRaw(code, code, &failureLeaveStubFrame);
+
+ // Align the stack such that the JitFrameLayout is aligned on
+ // JitStackAlignment.
+ masm.alignJitStackBasedOnNArgs(0);
+
+ // Getter is called with 0 arguments, just |obj| as thisv.
+ // Note that we use Push, not push, so that callJit will align the stack
+ // properly on ARM.
+ masm.Push(R0);
+ EmitBaselineCreateStubFrameDescriptor(masm, scratch, JitFrameLayout::Size());
+ masm.Push(Imm32(0)); // ActualArgc is 0
+ masm.Push(callee);
+ masm.Push(scratch);
+
+ // Handle arguments underflow.
+ Label noUnderflow;
+ masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), scratch);
+ masm.branch32(Assembler::Equal, scratch, Imm32(0), &noUnderflow);
+ {
+ // Call the arguments rectifier.
+ MOZ_ASSERT(ArgumentsRectifierReg != code);
+
+ JitCode* argumentsRectifier =
+ cx->runtime()->jitRuntime()->getArgumentsRectifier();
+
+ masm.movePtr(ImmGCPtr(argumentsRectifier), code);
+ masm.loadPtr(Address(code, JitCode::offsetOfCode()), code);
+ masm.movePtr(ImmWord(0), ArgumentsRectifierReg);
+ }
+
+ masm.bind(&noUnderflow);
+ masm.callJit(code);
+
+ leaveStubFrame(masm, true);
+
+ // Enter type monitor IC to type-check result.
+ EmitEnterTypeMonitorIC(masm);
+
+ // Leave stub frame and go to next stub.
+ masm.bind(&failureLeaveStubFrame);
+ inStubFrame_ = true;
+ leaveStubFrame(masm, false);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+//
+// VM function to help call native getters.
+//
+
+bool
+DoCallNativeGetter(JSContext* cx, HandleFunction callee, HandleObject obj,
+ MutableHandleValue result)
+{
+ MOZ_ASSERT(callee->isNative());
+ JSNative natfun = callee->native();
+
+ JS::AutoValueArray<2> vp(cx);
+ vp[0].setObject(*callee.get());
+ vp[1].setObject(*obj.get());
+
+ if (!natfun(cx, 0, vp.begin()))
+ return false;
+
+ result.set(vp[0]);
+ return true;
+}
+
+typedef bool (*DoCallNativeGetterFn)(JSContext*, HandleFunction, HandleObject, MutableHandleValue);
+static const VMFunction DoCallNativeGetterInfo =
+ FunctionInfo<DoCallNativeGetterFn>(DoCallNativeGetter, "DoCallNativeGetter");
+
+bool
+ICGetPropCallNativeCompiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(1));
+ Register objReg = InvalidReg;
+
+ MOZ_ASSERT(!(inputDefinitelyObject_ && outerClass_));
+ if (inputDefinitelyObject_) {
+ objReg = R0.scratchReg();
+ } else {
+ // Guard input is an object and unbox.
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+ objReg = masm.extractObject(R0, ExtractTemp0);
+ if (outerClass_) {
+ Register tmp = regs.takeAny();
+ masm.branchTestObjClass(Assembler::NotEqual, objReg, tmp, outerClass_, &failure);
+ masm.movePtr(ImmGCPtr(cx->global()), objReg);
+ regs.add(tmp);
+ }
+ }
+
+ Register scratch = regs.takeAnyExcluding(ICTailCallReg);
+
+ // Shape guard.
+ GuardReceiverObject(masm, ReceiverGuard(receiver_), objReg, scratch,
+ ICGetPropCallGetter::offsetOfReceiverGuard(), &failure);
+
+ if (receiver_ != holder_) {
+ Register holderReg = regs.takeAny();
+
+ // If we are generating a non-lexical GETGNAME stub, we must also
+ // guard on the shape of the GlobalObject.
+ if (kind == ICStub::GetProp_CallNativeGlobal) {
+ MOZ_ASSERT(receiver_->is<LexicalEnvironmentObject>() &&
+ receiver_->as<LexicalEnvironmentObject>().isGlobal());
+ GuardGlobalObject(masm, holder_, objReg, holderReg, scratch,
+ ICGetProp_CallNativeGlobal::offsetOfGlobalShape(), &failure);
+ }
+
+ masm.loadPtr(Address(ICStubReg, ICGetPropCallGetter::offsetOfHolder()), holderReg);
+ masm.loadPtr(Address(ICStubReg, ICGetPropCallGetter::offsetOfHolderShape()), scratch);
+ masm.branchTestObjShape(Assembler::NotEqual, holderReg, scratch, &failure);
+ regs.add(holderReg);
+ }
+
+ // Box and push obj onto baseline frame stack for decompiler
+ if (engine_ == Engine::Baseline) {
+ if (inputDefinitelyObject_)
+ masm.tagValue(JSVAL_TYPE_OBJECT, objReg, R0);
+ EmitStowICValues(masm, 1);
+ if (inputDefinitelyObject_)
+ objReg = masm.extractObject(R0, ExtractTemp0);
+ }
+
+ // Push a stub frame so that we can perform a non-tail call.
+ enterStubFrame(masm, scratch);
+
+ // Load callee function.
+ Register callee = regs.takeAny();
+ masm.loadPtr(Address(ICStubReg, ICGetPropCallGetter::offsetOfGetter()), callee);
+
+ // If we're calling a getter on the global, inline the logic for the
+ // 'this' hook on the global lexical scope and manually push the global.
+ if (kind == ICStub::GetProp_CallNativeGlobal)
+ masm.extractObject(Address(objReg, EnvironmentObject::offsetOfEnclosingEnvironment()),
+ objReg);
+
+ // Push args for vm call.
+ masm.Push(objReg);
+ masm.Push(callee);
+
+ regs.add(R0);
+
+ if (!callVM(DoCallNativeGetterInfo, masm))
+ return false;
+ leaveStubFrame(masm);
+
+ if (engine_ == Engine::Baseline)
+ EmitUnstowICValues(masm, 1, /* discard = */true);
+
+ // Enter type monitor IC to type-check result.
+ EmitEnterTypeMonitorIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+ICStub*
+ICGetPropCallNativeCompiler::getStub(ICStubSpace* space)
+{
+ ReceiverGuard guard(receiver_);
+ Shape* holderShape = holder_->as<NativeObject>().lastProperty();
+
+ switch (kind) {
+ case ICStub::GetProp_CallNative:
+ return newStub<ICGetProp_CallNative>(space, getStubCode(), firstMonitorStub_,
+ guard, holder_, holderShape,
+ getter_, pcOffset_);
+
+ case ICStub::GetProp_CallNativeGlobal: {
+ Shape* globalShape = receiver_->as<LexicalEnvironmentObject>().global().lastProperty();
+ return newStub<ICGetProp_CallNativeGlobal>(space, getStubCode(), firstMonitorStub_,
+ guard, holder_, holderShape, globalShape,
+ getter_, pcOffset_);
+ }
+
+ default:
+ MOZ_CRASH("Bad stub kind");
+ }
+}
+
+// Callers are expected to have already guarded on the shape of the
+// object, which guarantees the object is a DOM proxy.
+void
+CheckDOMProxyExpandoDoesNotShadow(JSContext* cx, MacroAssembler& masm, Register object,
+ const Address& checkExpandoShapeAddr,
+ Address* expandoAndGenerationAddr,
+ Address* generationAddr,
+ Register scratch,
+ AllocatableGeneralRegisterSet& domProxyRegSet,
+ Label* checkFailed)
+{
+ // Guard that the object does not have expando properties, or has an expando
+ // which is known to not have the desired property.
+
+ // For the remaining code, we need to reserve some registers to load a value.
+ // This is ugly, but unavoidable.
+ ValueOperand tempVal = domProxyRegSet.takeAnyValue();
+ masm.pushValue(tempVal);
+
+ Label failDOMProxyCheck;
+ Label domProxyOk;
+
+ masm.loadPtr(Address(object, ProxyObject::offsetOfValues()), scratch);
+ Address expandoAddr(scratch, ProxyObject::offsetOfExtraSlotInValues(GetDOMProxyExpandoSlot()));
+
+ if (expandoAndGenerationAddr) {
+ MOZ_ASSERT(generationAddr);
+
+ masm.loadPtr(*expandoAndGenerationAddr, tempVal.scratchReg());
+ masm.branchPrivatePtr(Assembler::NotEqual, expandoAddr, tempVal.scratchReg(),
+ &failDOMProxyCheck);
+
+ masm.branch64(Assembler::NotEqual,
+ Address(tempVal.scratchReg(), ExpandoAndGeneration::offsetOfGeneration()),
+ *generationAddr,
+ scratch, &failDOMProxyCheck);
+
+ masm.loadValue(Address(tempVal.scratchReg(), 0), tempVal);
+ } else {
+ masm.loadValue(expandoAddr, tempVal);
+ }
+
+ // If the incoming object does not have an expando object then we're sure we're not
+ // shadowing.
+ masm.branchTestUndefined(Assembler::Equal, tempVal, &domProxyOk);
+
+ // The reference object used to generate this check may not have had an
+ // expando object at all, in which case the presence of a non-undefined
+ // expando value in the incoming object is automatically a failure.
+ masm.loadPtr(checkExpandoShapeAddr, scratch);
+ masm.branchPtr(Assembler::Equal, scratch, ImmPtr(nullptr), &failDOMProxyCheck);
+
+ // Otherwise, ensure that the incoming object has an object for its expando value and that
+ // the shape matches.
+ masm.branchTestObject(Assembler::NotEqual, tempVal, &failDOMProxyCheck);
+ Register objReg = masm.extractObject(tempVal, tempVal.scratchReg());
+ masm.branchTestObjShape(Assembler::Equal, objReg, scratch, &domProxyOk);
+
+ // Failure case: restore the tempVal registers and jump to failures.
+ masm.bind(&failDOMProxyCheck);
+ masm.popValue(tempVal);
+ masm.jump(checkFailed);
+
+ // Success case: restore the tempval and proceed.
+ masm.bind(&domProxyOk);
+ masm.popValue(tempVal);
+}
+
+bool
+ICGetPropCallDOMProxyNativeCompiler::generateStubCode(MacroAssembler& masm,
+ Address* expandoAndGenerationAddr,
+ Address* generationAddr)
+{
+ Label failure;
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(1));
+ Register scratch = regs.takeAnyExcluding(ICTailCallReg);
+
+ // Guard input is an object.
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+
+ // Unbox.
+ Register objReg = masm.extractObject(R0, ExtractTemp0);
+
+ // Shape guard.
+ static const size_t receiverShapeOffset =
+ ICGetProp_CallDOMProxyNative::offsetOfReceiverGuard() +
+ HeapReceiverGuard::offsetOfShape();
+ masm.loadPtr(Address(ICStubReg, receiverShapeOffset), scratch);
+ masm.branchTestObjShape(Assembler::NotEqual, objReg, scratch, &failure);
+
+ // Guard that our expando object hasn't started shadowing this property.
+ {
+ AllocatableGeneralRegisterSet domProxyRegSet(GeneralRegisterSet::All());
+ domProxyRegSet.take(ICStubReg);
+ domProxyRegSet.take(objReg);
+ domProxyRegSet.take(scratch);
+ Address expandoShapeAddr(ICStubReg, ICGetProp_CallDOMProxyNative::offsetOfExpandoShape());
+ CheckDOMProxyExpandoDoesNotShadow(
+ cx, masm, objReg,
+ expandoShapeAddr, expandoAndGenerationAddr, generationAddr,
+ scratch,
+ domProxyRegSet,
+ &failure);
+ }
+
+ Register holderReg = regs.takeAny();
+ masm.loadPtr(Address(ICStubReg, ICGetProp_CallDOMProxyNative::offsetOfHolder()),
+ holderReg);
+ masm.loadPtr(Address(ICStubReg, ICGetProp_CallDOMProxyNative::offsetOfHolderShape()),
+ scratch);
+ masm.branchTestObjShape(Assembler::NotEqual, holderReg, scratch, &failure);
+ regs.add(holderReg);
+
+ // Push a stub frame so that we can perform a non-tail call.
+ enterStubFrame(masm, scratch);
+
+ // Load callee function.
+ Register callee = regs.takeAny();
+ masm.loadPtr(Address(ICStubReg, ICGetProp_CallDOMProxyNative::offsetOfGetter()), callee);
+
+ // Push args for vm call.
+ masm.Push(objReg);
+ masm.Push(callee);
+
+ // Don't have to preserve R0 anymore.
+ regs.add(R0);
+
+ if (!callVM(DoCallNativeGetterInfo, masm))
+ return false;
+ leaveStubFrame(masm);
+
+ // Enter type monitor IC to type-check result.
+ EmitEnterTypeMonitorIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+bool
+ICGetPropCallDOMProxyNativeCompiler::generateStubCode(MacroAssembler& masm)
+{
+ if (kind == ICStub::GetProp_CallDOMProxyNative)
+ return generateStubCode(masm, nullptr, nullptr);
+
+ Address internalStructAddress(ICStubReg,
+ ICGetProp_CallDOMProxyWithGenerationNative::offsetOfInternalStruct());
+ Address generationAddress(ICStubReg,
+ ICGetProp_CallDOMProxyWithGenerationNative::offsetOfGeneration());
+ return generateStubCode(masm, &internalStructAddress, &generationAddress);
+}
+
+ICStub*
+ICGetPropCallDOMProxyNativeCompiler::getStub(ICStubSpace* space)
+{
+ RootedShape shape(cx, proxy_->maybeShape());
+ RootedShape holderShape(cx, holder_->as<NativeObject>().lastProperty());
+
+ Value expandoSlot = GetProxyExtra(proxy_, GetDOMProxyExpandoSlot());
+ RootedShape expandoShape(cx, nullptr);
+ ExpandoAndGeneration* expandoAndGeneration;
+ uint64_t generation;
+ Value expandoVal;
+ if (kind == ICStub::GetProp_CallDOMProxyNative) {
+ expandoVal = expandoSlot;
+ expandoAndGeneration = nullptr; // initialize to silence GCC warning
+ generation = 0; // initialize to silence GCC warning
+ } else {
+ MOZ_ASSERT(kind == ICStub::GetProp_CallDOMProxyWithGenerationNative);
+ MOZ_ASSERT(!expandoSlot.isObject() && !expandoSlot.isUndefined());
+ expandoAndGeneration = (ExpandoAndGeneration*)expandoSlot.toPrivate();
+ expandoVal = expandoAndGeneration->expando;
+ generation = expandoAndGeneration->generation;
+ }
+
+ if (expandoVal.isObject())
+ expandoShape = expandoVal.toObject().as<NativeObject>().lastProperty();
+
+ if (kind == ICStub::GetProp_CallDOMProxyNative) {
+ return newStub<ICGetProp_CallDOMProxyNative>(
+ space, getStubCode(), firstMonitorStub_, shape,
+ expandoShape, holder_, holderShape, getter_, pcOffset_);
+ }
+
+ return newStub<ICGetProp_CallDOMProxyWithGenerationNative>(
+ space, getStubCode(), firstMonitorStub_, shape,
+ expandoAndGeneration, generation, expandoShape, holder_, holderShape, getter_,
+ pcOffset_);
+}
+
+ICStub*
+ICGetProp_DOMProxyShadowed::Compiler::getStub(ICStubSpace* space)
+{
+ RootedShape shape(cx, proxy_->maybeShape());
+ return New<ICGetProp_DOMProxyShadowed>(cx, space, getStubCode(), firstMonitorStub_, shape,
+ proxy_->handler(), name_, pcOffset_);
+}
+
+static bool
+ProxyGet(JSContext* cx, HandleObject proxy, HandlePropertyName name, MutableHandleValue vp)
+{
+ RootedValue receiver(cx, ObjectValue(*proxy));
+ RootedId id(cx, NameToId(name));
+ return Proxy::get(cx, proxy, receiver, id, vp);
+}
+
+typedef bool (*ProxyGetFn)(JSContext* cx, HandleObject proxy, HandlePropertyName name,
+ MutableHandleValue vp);
+static const VMFunction ProxyGetInfo = FunctionInfo<ProxyGetFn>(ProxyGet, "ProxyGet");
+
+bool
+ICGetProp_DOMProxyShadowed::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(1));
+ // Need to reserve a scratch register, but the scratch register should not be
+ // ICTailCallReg, because it's used for |enterStubFrame| which needs a
+ // non-ICTailCallReg scratch reg.
+ Register scratch = regs.takeAnyExcluding(ICTailCallReg);
+
+ // Guard input is an object.
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+
+ // Unbox.
+ Register objReg = masm.extractObject(R0, ExtractTemp0);
+
+ // Shape guard.
+ masm.loadPtr(Address(ICStubReg, ICGetProp_DOMProxyShadowed::offsetOfShape()), scratch);
+ masm.branchTestObjShape(Assembler::NotEqual, objReg, scratch, &failure);
+
+ // No need to do any more guards; it's safe to call ProxyGet even
+ // if we've since stopped shadowing.
+
+ // Call ProxyGet(JSContext* cx, HandleObject proxy, HandlePropertyName name, MutableHandleValue vp);
+
+ // Push a stub frame so that we can perform a non-tail call.
+ enterStubFrame(masm, scratch);
+
+ // Push property name and proxy object.
+ masm.loadPtr(Address(ICStubReg, ICGetProp_DOMProxyShadowed::offsetOfName()), scratch);
+ masm.Push(scratch);
+ masm.Push(objReg);
+
+ // Don't have to preserve R0 anymore.
+ regs.add(R0);
+
+ if (!callVM(ProxyGetInfo, masm))
+ return false;
+ leaveStubFrame(masm);
+
+ // Enter type monitor IC to type-check result.
+ EmitEnterTypeMonitorIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+bool
+ICGetProp_ArgumentsLength::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(which_ == ICGetProp_ArgumentsLength::Magic);
+
+ Label failure;
+
+ // Ensure that this is lazy arguments.
+ masm.branchTestMagicValue(Assembler::NotEqual, R0, JS_OPTIMIZED_ARGUMENTS, &failure);
+
+ // Ensure that frame has not loaded different arguments object since.
+ masm.branchTest32(Assembler::NonZero,
+ Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFlags()),
+ Imm32(BaselineFrame::HAS_ARGS_OBJ),
+ &failure);
+
+ Address actualArgs(BaselineFrameReg, BaselineFrame::offsetOfNumActualArgs());
+ masm.loadPtr(actualArgs, R0.scratchReg());
+ masm.tagValue(JSVAL_TYPE_INT32, R0.scratchReg(), R0);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+ICGetProp_ArgumentsCallee::ICGetProp_ArgumentsCallee(JitCode* stubCode, ICStub* firstMonitorStub)
+ : ICMonitoredStub(GetProp_ArgumentsCallee, stubCode, firstMonitorStub)
+{ }
+
+bool
+ICGetProp_ArgumentsCallee::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+
+ // Ensure that this is lazy arguments.
+ masm.branchTestMagicValue(Assembler::NotEqual, R0, JS_OPTIMIZED_ARGUMENTS, &failure);
+
+ // Ensure that frame has not loaded different arguments object since.
+ masm.branchTest32(Assembler::NonZero,
+ Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFlags()),
+ Imm32(BaselineFrame::HAS_ARGS_OBJ),
+ &failure);
+
+ Address callee(BaselineFrameReg, BaselineFrame::offsetOfCalleeToken());
+ masm.loadFunctionFromCalleeToken(callee, R0.scratchReg());
+ masm.tagValue(JSVAL_TYPE_OBJECT, R0.scratchReg(), R0);
+
+ EmitEnterTypeMonitorIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+/* static */ ICGetProp_Generic*
+ICGetProp_Generic::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICGetProp_Generic& other)
+{
+ return New<ICGetProp_Generic>(cx, space, other.jitCode(), firstMonitorStub);
+}
+
+static bool
+DoGetPropGeneric(JSContext* cx, void* payload, ICGetProp_Generic* stub,
+ MutableHandleValue val, MutableHandleValue res)
+{
+ ICFallbackStub* fallback = stub->getChainFallback();
+ SharedStubInfo info(cx, payload, fallback->icEntry());
+ HandleScript script = info.innerScript();
+ jsbytecode* pc = info.pc();
+ JSOp op = JSOp(*pc);
+ RootedPropertyName name(cx, script->getName(pc));
+ return ComputeGetPropResult(cx, info.maybeFrame(), op, name, val, res);
+}
+
+typedef bool (*DoGetPropGenericFn)(JSContext*, void*, ICGetProp_Generic*, MutableHandleValue, MutableHandleValue);
+static const VMFunction DoGetPropGenericInfo =
+ FunctionInfo<DoGetPropGenericFn>(DoGetPropGeneric, "DoGetPropGeneric");
+
+bool
+ICGetProp_Generic::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ AllocatableGeneralRegisterSet regs(availableGeneralRegs(1));
+
+ Register scratch = regs.takeAnyExcluding(ICTailCallReg);
+
+ // Sync for the decompiler.
+ if (engine_ == Engine::Baseline)
+ EmitStowICValues(masm, 1);
+
+ enterStubFrame(masm, scratch);
+
+ // Push arguments.
+ masm.Push(R0);
+ masm.Push(ICStubReg);
+ PushStubPayload(masm, R0.scratchReg());
+
+ if (!callVM(DoGetPropGenericInfo, masm))
+ return false;
+
+ leaveStubFrame(masm);
+
+ if (engine_ == Engine::Baseline)
+ EmitUnstowICValues(masm, 1, /* discard = */ true);
+
+ EmitEnterTypeMonitorIC(masm);
+ return true;
+}
+
+void
+CheckForTypedObjectWithDetachedStorage(JSContext* cx, MacroAssembler& masm, Label* failure)
+{
+ // All stubs manipulating typed objects must check the compartment-wide
+ // flag indicating whether their underlying storage might be detached, to
+ // bail out if needed.
+ int32_t* address = &cx->compartment()->detachedTypedObjects;
+ masm.branch32(Assembler::NotEqual, AbsoluteAddress(address), Imm32(0), failure);
+}
+
+void
+LoadTypedThingData(MacroAssembler& masm, TypedThingLayout layout, Register obj, Register result)
+{
+ switch (layout) {
+ case Layout_TypedArray:
+ masm.loadPtr(Address(obj, TypedArrayObject::dataOffset()), result);
+ break;
+ case Layout_OutlineTypedObject:
+ masm.loadPtr(Address(obj, OutlineTypedObject::offsetOfData()), result);
+ break;
+ case Layout_InlineTypedObject:
+ masm.computeEffectiveAddress(Address(obj, InlineTypedObject::offsetOfDataStart()), result);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+void
+BaselineScript::noteAccessedGetter(uint32_t pcOffset)
+{
+ ICEntry& entry = icEntryFromPCOffset(pcOffset);
+ ICFallbackStub* stub = entry.fallbackStub();
+
+ if (stub->isGetProp_Fallback())
+ stub->toGetProp_Fallback()->noteAccessedGetter();
+}
+
+ICGetPropNativeStub::ICGetPropNativeStub(ICStub::Kind kind, JitCode* stubCode,
+ ICStub* firstMonitorStub,
+ ReceiverGuard guard, uint32_t offset)
+ : ICMonitoredStub(kind, stubCode, firstMonitorStub),
+ receiverGuard_(guard),
+ offset_(offset)
+{ }
+
+ICGetPropNativePrototypeStub::ICGetPropNativePrototypeStub(ICStub::Kind kind, JitCode* stubCode,
+ ICStub* firstMonitorStub,
+ ReceiverGuard guard, uint32_t offset,
+ JSObject* holder, Shape* holderShape)
+ : ICGetPropNativeStub(kind, stubCode, firstMonitorStub, guard, offset),
+ holder_(holder),
+ holderShape_(holderShape)
+{ }
+
+ICGetPropCallGetter::ICGetPropCallGetter(Kind kind, JitCode* stubCode, ICStub* firstMonitorStub,
+ ReceiverGuard receiverGuard, JSObject* holder,
+ Shape* holderShape, JSFunction* getter,
+ uint32_t pcOffset)
+ : ICMonitoredStub(kind, stubCode, firstMonitorStub),
+ receiverGuard_(receiverGuard),
+ holder_(holder),
+ holderShape_(holderShape),
+ getter_(getter),
+ pcOffset_(pcOffset)
+{
+ MOZ_ASSERT(kind == ICStub::GetProp_CallScripted ||
+ kind == ICStub::GetProp_CallNative ||
+ kind == ICStub::GetProp_CallNativeGlobal ||
+ kind == ICStub::GetProp_CallDOMProxyNative ||
+ kind == ICStub::GetProp_CallDOMProxyWithGenerationNative);
+}
+
+/* static */ ICGetProp_CallScripted*
+ICGetProp_CallScripted::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICGetProp_CallScripted& other)
+{
+ return New<ICGetProp_CallScripted>(cx, space, other.jitCode(), firstMonitorStub,
+ other.receiverGuard(),
+ other.holder_, other.holderShape_,
+ other.getter_, other.pcOffset_);
+}
+
+/* static */ ICGetProp_CallNative*
+ICGetProp_CallNative::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICGetProp_CallNative& other)
+{
+ return New<ICGetProp_CallNative>(cx, space, other.jitCode(), firstMonitorStub,
+ other.receiverGuard(), other.holder_,
+ other.holderShape_, other.getter_, other.pcOffset_);
+}
+
+/* static */ ICGetProp_CallNativeGlobal*
+ICGetProp_CallNativeGlobal::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICGetProp_CallNativeGlobal& other)
+{
+ return New<ICGetProp_CallNativeGlobal>(cx, space, other.jitCode(), firstMonitorStub,
+ other.receiverGuard(), other.holder_,
+ other.holderShape_, other.globalShape_,
+ other.getter_, other.pcOffset_);
+}
+
+ICGetPropCallDOMProxyNativeStub::ICGetPropCallDOMProxyNativeStub(Kind kind, JitCode* stubCode,
+ ICStub* firstMonitorStub,
+ Shape* shape,
+ Shape* expandoShape,
+ JSObject* holder,
+ Shape* holderShape,
+ JSFunction* getter,
+ uint32_t pcOffset)
+ : ICGetPropCallGetter(kind, stubCode, firstMonitorStub, ReceiverGuard(nullptr, shape),
+ holder, holderShape, getter, pcOffset),
+ expandoShape_(expandoShape)
+{ }
+
+ICGetPropCallDOMProxyNativeCompiler::ICGetPropCallDOMProxyNativeCompiler(JSContext* cx,
+ ICStub::Kind kind,
+ ICStubCompiler::Engine engine,
+ ICStub* firstMonitorStub,
+ Handle<ProxyObject*> proxy,
+ HandleObject holder,
+ HandleFunction getter,
+ uint32_t pcOffset)
+ : ICStubCompiler(cx, kind, engine),
+ firstMonitorStub_(firstMonitorStub),
+ proxy_(cx, proxy),
+ holder_(cx, holder),
+ getter_(cx, getter),
+ pcOffset_(pcOffset)
+{
+ MOZ_ASSERT(kind == ICStub::GetProp_CallDOMProxyNative ||
+ kind == ICStub::GetProp_CallDOMProxyWithGenerationNative);
+ MOZ_ASSERT(proxy_->handler()->family() == GetDOMProxyHandlerFamily());
+}
+
+/* static */ ICGetProp_CallDOMProxyNative*
+ICGetProp_CallDOMProxyNative::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICGetProp_CallDOMProxyNative& other)
+{
+ return New<ICGetProp_CallDOMProxyNative>(cx, space, other.jitCode(), firstMonitorStub,
+ other.receiverGuard_.shape(), other.expandoShape_,
+ other.holder_, other.holderShape_, other.getter_,
+ other.pcOffset_);
+}
+
+/* static */ ICGetProp_CallDOMProxyWithGenerationNative*
+ICGetProp_CallDOMProxyWithGenerationNative::Clone(JSContext* cx,
+ ICStubSpace* space,
+ ICStub* firstMonitorStub,
+ ICGetProp_CallDOMProxyWithGenerationNative& other)
+{
+ return New<ICGetProp_CallDOMProxyWithGenerationNative>(cx, space, other.jitCode(),
+ firstMonitorStub,
+ other.receiverGuard_.shape(),
+ other.expandoAndGeneration_,
+ other.generation_,
+ other.expandoShape_, other.holder_,
+ other.holderShape_, other.getter_,
+ other.pcOffset_);
+}
+
+ICGetProp_DOMProxyShadowed::ICGetProp_DOMProxyShadowed(JitCode* stubCode,
+ ICStub* firstMonitorStub,
+ Shape* shape,
+ const BaseProxyHandler* proxyHandler,
+ PropertyName* name,
+ uint32_t pcOffset)
+ : ICMonitoredStub(ICStub::GetProp_DOMProxyShadowed, stubCode, firstMonitorStub),
+ shape_(shape),
+ proxyHandler_(proxyHandler),
+ name_(name),
+ pcOffset_(pcOffset)
+{ }
+
+/* static */ ICGetProp_DOMProxyShadowed*
+ICGetProp_DOMProxyShadowed::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICGetProp_DOMProxyShadowed& other)
+{
+ return New<ICGetProp_DOMProxyShadowed>(cx, space, other.jitCode(), firstMonitorStub,
+ other.shape_, other.proxyHandler_, other.name_,
+ other.pcOffset_);
+}
+
+//
+// TypeMonitor_Fallback
+//
+
+bool
+ICTypeMonitor_Fallback::addMonitorStubForValue(JSContext* cx, SharedStubInfo* info, HandleValue val)
+{
+ bool wasDetachedMonitorChain = lastMonitorStubPtrAddr_ == nullptr;
+ MOZ_ASSERT_IF(wasDetachedMonitorChain, numOptimizedMonitorStubs_ == 0);
+
+ if (numOptimizedMonitorStubs_ >= MAX_OPTIMIZED_STUBS) {
+ // TODO: if the TypeSet becomes unknown or has the AnyObject type,
+ // replace stubs with a single stub to handle these.
+ return true;
+ }
+
+ if (val.isPrimitive()) {
+ if (val.isMagic(JS_UNINITIALIZED_LEXICAL))
+ return true;
+ MOZ_ASSERT(!val.isMagic());
+ JSValueType type = val.isDouble() ? JSVAL_TYPE_DOUBLE : val.extractNonDoubleType();
+
+ // Check for existing TypeMonitor stub.
+ ICTypeMonitor_PrimitiveSet* existingStub = nullptr;
+ for (ICStubConstIterator iter(firstMonitorStub()); !iter.atEnd(); iter++) {
+ if (iter->isTypeMonitor_PrimitiveSet()) {
+ existingStub = iter->toTypeMonitor_PrimitiveSet();
+ if (existingStub->containsType(type))
+ return true;
+ }
+ }
+
+ ICTypeMonitor_PrimitiveSet::Compiler compiler(cx, info->engine(), existingStub, type);
+ ICStub* stub = existingStub ? compiler.updateStub()
+ : compiler.getStub(compiler.getStubSpace(info->outerScript(cx)));
+ if (!stub) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ JitSpew(JitSpew_BaselineIC, " %s TypeMonitor stub %p for primitive type %d",
+ existingStub ? "Modified existing" : "Created new", stub, type);
+
+ if (!existingStub) {
+ MOZ_ASSERT(!hasStub(TypeMonitor_PrimitiveSet));
+ addOptimizedMonitorStub(stub);
+ }
+
+ } else if (val.toObject().isSingleton()) {
+ RootedObject obj(cx, &val.toObject());
+
+ // Check for existing TypeMonitor stub.
+ for (ICStubConstIterator iter(firstMonitorStub()); !iter.atEnd(); iter++) {
+ if (iter->isTypeMonitor_SingleObject() &&
+ iter->toTypeMonitor_SingleObject()->object() == obj)
+ {
+ return true;
+ }
+ }
+
+ ICTypeMonitor_SingleObject::Compiler compiler(cx, obj);
+ ICStub* stub = compiler.getStub(compiler.getStubSpace(info->outerScript(cx)));
+ if (!stub) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ JitSpew(JitSpew_BaselineIC, " Added TypeMonitor stub %p for singleton %p",
+ stub, obj.get());
+
+ addOptimizedMonitorStub(stub);
+
+ } else {
+ RootedObjectGroup group(cx, val.toObject().group());
+
+ // Check for existing TypeMonitor stub.
+ for (ICStubConstIterator iter(firstMonitorStub()); !iter.atEnd(); iter++) {
+ if (iter->isTypeMonitor_ObjectGroup() &&
+ iter->toTypeMonitor_ObjectGroup()->group() == group)
+ {
+ return true;
+ }
+ }
+
+ ICTypeMonitor_ObjectGroup::Compiler compiler(cx, group);
+ ICStub* stub = compiler.getStub(compiler.getStubSpace(info->outerScript(cx)));
+ if (!stub) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ JitSpew(JitSpew_BaselineIC, " Added TypeMonitor stub %p for ObjectGroup %p",
+ stub, group.get());
+
+ addOptimizedMonitorStub(stub);
+ }
+
+ bool firstMonitorStubAdded = wasDetachedMonitorChain && (numOptimizedMonitorStubs_ > 0);
+
+ if (firstMonitorStubAdded) {
+ // Was an empty monitor chain before, but a new stub was added. This is the
+ // only time that any main stubs' firstMonitorStub fields need to be updated to
+ // refer to the newly added monitor stub.
+ ICStub* firstStub = mainFallbackStub_->icEntry()->firstStub();
+ for (ICStubConstIterator iter(firstStub); !iter.atEnd(); iter++) {
+ // Non-monitored stubs are used if the result has always the same type,
+ // e.g. a StringLength stub will always return int32.
+ if (!iter->isMonitored())
+ continue;
+
+ // Since we just added the first optimized monitoring stub, any
+ // existing main stub's |firstMonitorStub| MUST be pointing to the fallback
+ // monitor stub (i.e. this stub).
+ MOZ_ASSERT(iter->toMonitoredStub()->firstMonitorStub() == this);
+ iter->toMonitoredStub()->updateFirstMonitorStub(firstMonitorStub_);
+ }
+ }
+
+ return true;
+}
+
+static bool
+DoTypeMonitorFallback(JSContext* cx, void* payload, ICTypeMonitor_Fallback* stub,
+ HandleValue value, MutableHandleValue res)
+{
+ SharedStubInfo info(cx, payload, stub->icEntry());
+ HandleScript script = info.innerScript();
+ jsbytecode* pc = stub->icEntry()->pc(script);
+ TypeFallbackICSpew(cx, stub, "TypeMonitor");
+
+ if (value.isMagic()) {
+ // It's possible that we arrived here from bailing out of Ion, and that
+ // Ion proved that the value is dead and optimized out. In such cases,
+ // do nothing. However, it's also possible that we have an uninitialized
+ // this, in which case we should not look for other magic values.
+
+ if (value.whyMagic() == JS_OPTIMIZED_OUT) {
+ MOZ_ASSERT(!stub->monitorsThis());
+ res.set(value);
+ return true;
+ }
+
+ // In derived class constructors (including nested arrows/eval), the
+ // |this| argument or GETALIASEDVAR can return the magic TDZ value.
+ MOZ_ASSERT(value.isMagic(JS_UNINITIALIZED_LEXICAL));
+ MOZ_ASSERT(info.frame()->isFunctionFrame() || info.frame()->isEvalFrame());
+ MOZ_ASSERT(stub->monitorsThis() ||
+ *GetNextPc(pc) == JSOP_CHECKTHIS ||
+ *GetNextPc(pc) == JSOP_CHECKRETURN);
+ }
+
+ uint32_t argument;
+ if (stub->monitorsThis()) {
+ MOZ_ASSERT(pc == script->code());
+ if (value.isMagic(JS_UNINITIALIZED_LEXICAL))
+ TypeScript::SetThis(cx, script, TypeSet::UnknownType());
+ else
+ TypeScript::SetThis(cx, script, value);
+ } else if (stub->monitorsArgument(&argument)) {
+ MOZ_ASSERT(pc == script->code());
+ MOZ_ASSERT(!value.isMagic(JS_UNINITIALIZED_LEXICAL));
+ TypeScript::SetArgument(cx, script, argument, value);
+ } else {
+ if (value.isMagic(JS_UNINITIALIZED_LEXICAL))
+ TypeScript::Monitor(cx, script, pc, TypeSet::UnknownType());
+ else
+ TypeScript::Monitor(cx, script, pc, value);
+ }
+
+ if (!stub->invalid() && !stub->addMonitorStubForValue(cx, &info, value))
+ return false;
+
+ // Copy input value to res.
+ res.set(value);
+ return true;
+}
+
+typedef bool (*DoTypeMonitorFallbackFn)(JSContext*, void*, ICTypeMonitor_Fallback*,
+ HandleValue, MutableHandleValue);
+static const VMFunction DoTypeMonitorFallbackInfo =
+ FunctionInfo<DoTypeMonitorFallbackFn>(DoTypeMonitorFallback, "DoTypeMonitorFallback",
+ TailCall);
+
+bool
+ICTypeMonitor_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ MOZ_ASSERT(R0 == JSReturnOperand);
+
+ // Restore the tail call register.
+ EmitRestoreTailCallReg(masm);
+
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ return tailCallVM(DoTypeMonitorFallbackInfo, masm);
+}
+
+bool
+ICTypeMonitor_PrimitiveSet::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label success;
+ if ((flags_ & TypeToFlag(JSVAL_TYPE_INT32)) && !(flags_ & TypeToFlag(JSVAL_TYPE_DOUBLE)))
+ masm.branchTestInt32(Assembler::Equal, R0, &success);
+
+ if (flags_ & TypeToFlag(JSVAL_TYPE_DOUBLE))
+ masm.branchTestNumber(Assembler::Equal, R0, &success);
+
+ if (flags_ & TypeToFlag(JSVAL_TYPE_UNDEFINED))
+ masm.branchTestUndefined(Assembler::Equal, R0, &success);
+
+ if (flags_ & TypeToFlag(JSVAL_TYPE_BOOLEAN))
+ masm.branchTestBoolean(Assembler::Equal, R0, &success);
+
+ if (flags_ & TypeToFlag(JSVAL_TYPE_STRING))
+ masm.branchTestString(Assembler::Equal, R0, &success);
+
+ if (flags_ & TypeToFlag(JSVAL_TYPE_SYMBOL))
+ masm.branchTestSymbol(Assembler::Equal, R0, &success);
+
+ // Currently, we will never generate primitive stub checks for object. However,
+ // when we do get to the point where we want to collapse our monitor chains of
+ // objects and singletons down (when they get too long) to a generic "any object"
+ // in coordination with the typeset doing the same thing, this will need to
+ // be re-enabled.
+ /*
+ if (flags_ & TypeToFlag(JSVAL_TYPE_OBJECT))
+ masm.branchTestObject(Assembler::Equal, R0, &success);
+ */
+ MOZ_ASSERT(!(flags_ & TypeToFlag(JSVAL_TYPE_OBJECT)));
+
+ if (flags_ & TypeToFlag(JSVAL_TYPE_NULL))
+ masm.branchTestNull(Assembler::Equal, R0, &success);
+
+ EmitStubGuardFailure(masm);
+
+ masm.bind(&success);
+ EmitReturnFromIC(masm);
+ return true;
+}
+
+static void
+MaybeWorkAroundAmdBug(MacroAssembler& masm)
+{
+ // Attempt to work around an AMD bug (see bug 1034706 and bug 1281759), by
+ // inserting 32-bytes of NOPs.
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ if (CPUInfo::NeedAmdBugWorkaround()) {
+ masm.nop(9);
+ masm.nop(9);
+ masm.nop(9);
+ masm.nop(5);
+ }
+#endif
+}
+
+bool
+ICTypeMonitor_SingleObject::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+ MaybeWorkAroundAmdBug(masm);
+
+ // Guard on the object's identity.
+ Register obj = masm.extractObject(R0, ExtractTemp0);
+ Address expectedObject(ICStubReg, ICTypeMonitor_SingleObject::offsetOfObject());
+ masm.branchPtr(Assembler::NotEqual, expectedObject, obj, &failure);
+ MaybeWorkAroundAmdBug(masm);
+
+ EmitReturnFromIC(masm);
+ MaybeWorkAroundAmdBug(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+bool
+ICTypeMonitor_ObjectGroup::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ masm.branchTestObject(Assembler::NotEqual, R0, &failure);
+ MaybeWorkAroundAmdBug(masm);
+
+ // Guard on the object's ObjectGroup.
+ Register obj = masm.extractObject(R0, ExtractTemp0);
+ masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), R1.scratchReg());
+
+ Address expectedGroup(ICStubReg, ICTypeMonitor_ObjectGroup::offsetOfGroup());
+ masm.branchPtr(Assembler::NotEqual, expectedGroup, R1.scratchReg(), &failure);
+ MaybeWorkAroundAmdBug(masm);
+
+ EmitReturnFromIC(masm);
+ MaybeWorkAroundAmdBug(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+bool
+ICUpdatedStub::addUpdateStubForValue(JSContext* cx, HandleScript outerScript, HandleObject obj,
+ HandleId id, HandleValue val)
+{
+ if (numOptimizedStubs_ >= MAX_OPTIMIZED_STUBS) {
+ // TODO: if the TypeSet becomes unknown or has the AnyObject type,
+ // replace stubs with a single stub to handle these.
+ return true;
+ }
+
+ EnsureTrackPropertyTypes(cx, obj, id);
+
+ // Make sure that undefined values are explicitly included in the property
+ // types for an object if generating a stub to write an undefined value.
+ if (val.isUndefined() && CanHaveEmptyPropertyTypesForOwnProperty(obj))
+ AddTypePropertyId(cx, obj, id, val);
+
+ if (val.isPrimitive()) {
+ JSValueType type = val.isDouble() ? JSVAL_TYPE_DOUBLE : val.extractNonDoubleType();
+
+ // Check for existing TypeUpdate stub.
+ ICTypeUpdate_PrimitiveSet* existingStub = nullptr;
+ for (ICStubConstIterator iter(firstUpdateStub_); !iter.atEnd(); iter++) {
+ if (iter->isTypeUpdate_PrimitiveSet()) {
+ existingStub = iter->toTypeUpdate_PrimitiveSet();
+ if (existingStub->containsType(type))
+ return true;
+ }
+ }
+
+ ICTypeUpdate_PrimitiveSet::Compiler compiler(cx, existingStub, type);
+ ICStub* stub = existingStub ? compiler.updateStub()
+ : compiler.getStub(compiler.getStubSpace(outerScript));
+ if (!stub)
+ return false;
+ if (!existingStub) {
+ MOZ_ASSERT(!hasTypeUpdateStub(TypeUpdate_PrimitiveSet));
+ addOptimizedUpdateStub(stub);
+ }
+
+ JitSpew(JitSpew_BaselineIC, " %s TypeUpdate stub %p for primitive type %d",
+ existingStub ? "Modified existing" : "Created new", stub, type);
+
+ } else if (val.toObject().isSingleton()) {
+ RootedObject obj(cx, &val.toObject());
+
+ // Check for existing TypeUpdate stub.
+ for (ICStubConstIterator iter(firstUpdateStub_); !iter.atEnd(); iter++) {
+ if (iter->isTypeUpdate_SingleObject() &&
+ iter->toTypeUpdate_SingleObject()->object() == obj)
+ {
+ return true;
+ }
+ }
+
+ ICTypeUpdate_SingleObject::Compiler compiler(cx, obj);
+ ICStub* stub = compiler.getStub(compiler.getStubSpace(outerScript));
+ if (!stub)
+ return false;
+
+ JitSpew(JitSpew_BaselineIC, " Added TypeUpdate stub %p for singleton %p", stub, obj.get());
+
+ addOptimizedUpdateStub(stub);
+
+ } else {
+ RootedObjectGroup group(cx, val.toObject().group());
+
+ // Check for existing TypeUpdate stub.
+ for (ICStubConstIterator iter(firstUpdateStub_); !iter.atEnd(); iter++) {
+ if (iter->isTypeUpdate_ObjectGroup() &&
+ iter->toTypeUpdate_ObjectGroup()->group() == group)
+ {
+ return true;
+ }
+ }
+
+ ICTypeUpdate_ObjectGroup::Compiler compiler(cx, group);
+ ICStub* stub = compiler.getStub(compiler.getStubSpace(outerScript));
+ if (!stub)
+ return false;
+
+ JitSpew(JitSpew_BaselineIC, " Added TypeUpdate stub %p for ObjectGroup %p",
+ stub, group.get());
+
+ addOptimizedUpdateStub(stub);
+ }
+
+ return true;
+}
+
+//
+// NewArray_Fallback
+//
+
+static bool
+DoNewArray(JSContext* cx, void* payload, ICNewArray_Fallback* stub, uint32_t length,
+ MutableHandleValue res)
+{
+ SharedStubInfo info(cx, payload, stub->icEntry());
+
+ FallbackICSpew(cx, stub, "NewArray");
+
+ RootedObject obj(cx);
+ if (stub->templateObject()) {
+ RootedObject templateObject(cx, stub->templateObject());
+ obj = NewArrayOperationWithTemplate(cx, templateObject);
+ if (!obj)
+ return false;
+ } else {
+ HandleScript script = info.script();
+ jsbytecode* pc = info.pc();
+ obj = NewArrayOperation(cx, script, pc, length);
+ if (!obj)
+ return false;
+
+ if (obj && !obj->isSingleton() && !obj->group()->maybePreliminaryObjects()) {
+ JSObject* templateObject = NewArrayOperation(cx, script, pc, length, TenuredObject);
+ if (!templateObject)
+ return false;
+ stub->setTemplateObject(templateObject);
+ }
+ }
+
+ res.setObject(*obj);
+ return true;
+}
+
+typedef bool(*DoNewArrayFn)(JSContext*, void*, ICNewArray_Fallback*, uint32_t,
+ MutableHandleValue);
+static const VMFunction DoNewArrayInfo =
+ FunctionInfo<DoNewArrayFn>(DoNewArray, "DoNewArray", TailCall);
+
+bool
+ICNewArray_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ EmitRestoreTailCallReg(masm);
+
+ masm.push(R0.scratchReg()); // length
+ masm.push(ICStubReg); // stub.
+ pushStubPayload(masm, R0.scratchReg());
+
+ return tailCallVM(DoNewArrayInfo, masm);
+}
+
+//
+// NewObject_Fallback
+//
+
+// Unlike typical baseline IC stubs, the code for NewObject_WithTemplate is
+// specialized for the template object being allocated.
+static JitCode*
+GenerateNewObjectWithTemplateCode(JSContext* cx, JSObject* templateObject)
+{
+ JitContext jctx(cx, nullptr);
+ MacroAssembler masm;
+#ifdef JS_CODEGEN_ARM
+ masm.setSecondScratchReg(BaselineSecondScratchReg);
+#endif
+
+ Label failure;
+ Register objReg = R0.scratchReg();
+ Register tempReg = R1.scratchReg();
+ masm.movePtr(ImmGCPtr(templateObject->group()), tempReg);
+ masm.branchTest32(Assembler::NonZero, Address(tempReg, ObjectGroup::offsetOfFlags()),
+ Imm32(OBJECT_FLAG_PRE_TENURE), &failure);
+ masm.branchPtr(Assembler::NotEqual, AbsoluteAddress(cx->compartment()->addressOfMetadataBuilder()),
+ ImmWord(0), &failure);
+ masm.createGCObject(objReg, tempReg, templateObject, gc::DefaultHeap, &failure);
+ masm.tagValue(JSVAL_TYPE_OBJECT, objReg, R0);
+
+ EmitReturnFromIC(masm);
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ Linker linker(masm);
+ AutoFlushICache afc("GenerateNewObjectWithTemplateCode");
+ return linker.newCode<CanGC>(cx, BASELINE_CODE);
+}
+
+static bool
+DoNewObject(JSContext* cx, void* payload, ICNewObject_Fallback* stub, MutableHandleValue res)
+{
+ SharedStubInfo info(cx, payload, stub->icEntry());
+
+ FallbackICSpew(cx, stub, "NewObject");
+
+ RootedObject obj(cx);
+
+ RootedObject templateObject(cx, stub->templateObject());
+ if (templateObject) {
+ MOZ_ASSERT(!templateObject->group()->maybePreliminaryObjects());
+ obj = NewObjectOperationWithTemplate(cx, templateObject);
+ } else {
+ HandleScript script = info.script();
+ jsbytecode* pc = info.pc();
+ obj = NewObjectOperation(cx, script, pc);
+
+ if (obj && !obj->isSingleton() && !obj->group()->maybePreliminaryObjects()) {
+ JSObject* templateObject = NewObjectOperation(cx, script, pc, TenuredObject);
+ if (!templateObject)
+ return false;
+
+ if (!stub->invalid() &&
+ (templateObject->is<UnboxedPlainObject>() ||
+ !templateObject->as<PlainObject>().hasDynamicSlots()))
+ {
+ JitCode* code = GenerateNewObjectWithTemplateCode(cx, templateObject);
+ if (!code)
+ return false;
+
+ ICStubSpace* space =
+ ICStubCompiler::StubSpaceForKind(ICStub::NewObject_WithTemplate, script,
+ ICStubCompiler::Engine::Baseline);
+ ICStub* templateStub = ICStub::New<ICNewObject_WithTemplate>(cx, space, code);
+ if (!templateStub)
+ return false;
+
+ stub->addNewStub(templateStub);
+ }
+
+ stub->setTemplateObject(templateObject);
+ }
+ }
+
+ if (!obj)
+ return false;
+
+ res.setObject(*obj);
+ return true;
+}
+
+typedef bool(*DoNewObjectFn)(JSContext*, void*, ICNewObject_Fallback*, MutableHandleValue);
+static const VMFunction DoNewObjectInfo =
+ FunctionInfo<DoNewObjectFn>(DoNewObject, "DoNewObject", TailCall);
+
+bool
+ICNewObject_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ EmitRestoreTailCallReg(masm);
+
+ masm.push(ICStubReg); // stub.
+ pushStubPayload(masm, R0.scratchReg());
+
+ return tailCallVM(DoNewObjectInfo, masm);
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/SharedIC.h b/js/src/jit/SharedIC.h
new file mode 100644
index 000000000..42198c890
--- /dev/null
+++ b/js/src/jit/SharedIC.h
@@ -0,0 +1,3120 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_SharedIC_h
+#define jit_SharedIC_h
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+#include "jsgc.h"
+
+#include "jit/BaselineICList.h"
+#include "jit/BaselineJIT.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICList.h"
+#include "jit/SharedICRegisters.h"
+#include "vm/ReceiverGuard.h"
+#include "vm/TypedArrayCommon.h"
+
+namespace js {
+namespace jit {
+
+class AutoShapeVector;
+
+//
+// Baseline Inline Caches are polymorphic caches that aggressively
+// share their stub code.
+//
+// Every polymorphic site contains a linked list of stubs which are
+// specific to that site. These stubs are composed of a |StubData|
+// structure that stores parametrization information (e.g.
+// the shape pointer for a shape-check-and-property-get stub), any
+// dynamic information (e.g. warm-up counters), a pointer to the stub code,
+// and a pointer to the next stub state in the linked list.
+//
+// Every BaselineScript keeps an table of |CacheDescriptor| data
+// structures, which store the following:
+// A pointer to the first StubData in the cache.
+// The bytecode PC of the relevant IC.
+// The machine-code PC where the call to the stubcode returns.
+//
+// A diagram:
+//
+// Control flow Pointers
+// =======# ----. .---->
+// # | |
+// #======> \-----/
+//
+//
+// .---------------------------------------.
+// | .-------------------------. |
+// | | .----. | |
+// Baseline | | | | | |
+// JIT Code 0 ^ 1 ^ 2 ^ | | |
+// +--------------+ .-->+-----+ +-----+ +-----+ | | |
+// | | #=|==>| |==>| |==>| FB | | | |
+// | | # | +-----+ +-----+ +-----+ | | |
+// | | # | # # # | | |
+// |==============|==# | # # # | | |
+// |=== IC =======| | # # # | | |
+// .->|==============|<===|======#=========#=========# | | |
+// | | | | | | |
+// | | | | | | |
+// | | | | | | |
+// | | | | v | |
+// | | | | +---------+ | |
+// | | | | | Fallback| | |
+// | | | | | Stub | | |
+// | | | | | Code | | |
+// | | | | +---------+ | |
+// | +--------------+ | | |
+// | |_______ | +---------+ | |
+// | | | | Stub |<---/ |
+// | IC | \--. | Code | |
+// | Descriptor | | +---------+ |
+// | Table v | |
+// | +-----------------+ | +---------+ |
+// \--| Ins | PC | Stub |----/ | Stub |<-------/
+// +-----------------+ | Code |
+// | ... | +---------+
+// +-----------------+
+// Shared
+// Stub Code
+//
+//
+// Type ICs
+// ========
+//
+// Type ICs are otherwise regular ICs that are actually nested within
+// other IC chains. They serve to optimize locations in the code where the
+// baseline compiler would have otherwise had to perform a type Monitor operation
+// (e.g. the result of GetProp, GetElem, etc.), or locations where the baseline
+// compiler would have had to modify a heap typeset using the type of an input
+// value (e.g. SetProp, SetElem, etc.)
+//
+// There are two kinds of Type ICs: Monitor and Update.
+//
+// Note that type stub bodies are no-ops. The stubs only exist for their
+// guards, and their existence simply signifies that the typeset (implicit)
+// that is being checked already contains that type.
+//
+// TypeMonitor ICs
+// ---------------
+// Monitor ICs are shared between stubs in the general IC, and monitor the resulting
+// types of getter operations (call returns, getprop outputs, etc.)
+//
+// +-----------+ +-----------+ +-----------+ +-----------+
+// ---->| Stub 1 |---->| Stub 2 |---->| Stub 3 |---->| FB Stub |
+// +-----------+ +-----------+ +-----------+ +-----------+
+// | | | |
+// |------------------/-----------------/ |
+// v |
+// +-----------+ +-----------+ +-----------+ |
+// | Type 1 |---->| Type 2 |---->| Type FB | |
+// +-----------+ +-----------+ +-----------+ |
+// | | | |
+// <----------/-----------------/------------------/------------------/
+// r e t u r n p a t h
+//
+// After an optimized IC stub successfully executes, it passes control to the type stub
+// chain to check the resulting type. If no type stub succeeds, and the monitor fallback
+// stub is reached, the monitor fallback stub performs a manual monitor, and also adds the
+// appropriate type stub to the chain.
+//
+// The IC's main fallback, in addition to generating new mainline stubs, also generates
+// type stubs as reflected by its returned value.
+//
+// NOTE: The type IC chain returns directly to the mainline code, not back to the
+// stub it was entered from. Thus, entering a type IC is a matter of a |jump|, not
+// a |call|. This allows us to safely call a VM Monitor function from within the monitor IC's
+// fallback chain, since the return address (needed for stack inspection) is preserved.
+//
+//
+// TypeUpdate ICs
+// --------------
+// Update ICs update heap typesets and monitor the input types of setter operations
+// (setelem, setprop inputs, etc.). Unlike monitor ICs, they are not shared
+// between stubs on an IC, but instead are kept track of on a per-stub basis.
+//
+// This is because the main stubs for the operation will each identify a potentially
+// different ObjectGroup to update. New input types must be tracked on a group-to-
+// group basis.
+//
+// Type-update ICs cannot be called in tail position (they must return to the
+// the stub that called them so that the stub may continue to perform its original
+// purpose). This means that any VMCall to perform a manual type update from C++ must be
+// done from within the main IC stub. This necessitates that the stub enter a
+// "BaselineStub" frame before making the call.
+//
+// If the type-update IC chain could itself make the VMCall, then the BaselineStub frame
+// must be entered before calling the type-update chain, and exited afterward. This
+// is very expensive for a common case where we expect the type-update fallback to not
+// be called. To avoid the cost of entering and exiting a BaselineStub frame when
+// using the type-update IC chain, we design the chain to not perform any VM-calls
+// in its fallback.
+//
+// Instead, the type-update IC chain is responsible for returning 1 or 0, depending
+// on if a type is represented in the chain or not. The fallback stub simply returns
+// 0, and all other optimized stubs return 1.
+// If the chain returns 1, then the IC stub goes ahead and performs its operation.
+// If the chain returns 0, then the IC stub performs a call to the fallback function
+// inline (doing the requisite BaselineStub frame enter/exit).
+// This allows us to avoid the expensive subfram enter/exit in the common case.
+//
+// r e t u r n p a t h
+// <--------------.-----------------.-----------------.-----------------.
+// | | | |
+// +-----------+ +-----------+ +-----------+ +-----------+
+// ---->| Stub 1 |---->| Stub 2 |---->| Stub 3 |---->| FB Stub |
+// +-----------+ +-----------+ +-----------+ +-----------+
+// | ^ | ^ | ^
+// | | | | | |
+// | | | | | |----------------.
+// | | | | v |1 |0
+// | | | | +-----------+ +-----------+
+// | | | | | Type 3.1 |--->| FB 3 |
+// | | | | +-----------+ +-----------+
+// | | | |
+// | | | \-------------.-----------------.
+// | | | | | |
+// | | v |1 |1 |0
+// | | +-----------+ +-----------+ +-----------+
+// | | | Type 2.1 |---->| Type 2.2 |---->| FB 2 |
+// | | +-----------+ +-----------+ +-----------+
+// | |
+// | \-------------.-----------------.
+// | | | |
+// v |1 |1 |0
+// +-----------+ +-----------+ +-----------+
+// | Type 1.1 |---->| Type 1.2 |---->| FB 1 |
+// +-----------+ +-----------+ +-----------+
+//
+
+class ICStub;
+class ICFallbackStub;
+
+#define FORWARD_DECLARE_STUBS(kindName) class IC##kindName;
+ IC_BASELINE_STUB_KIND_LIST(FORWARD_DECLARE_STUBS)
+ IC_SHARED_STUB_KIND_LIST(FORWARD_DECLARE_STUBS)
+#undef FORWARD_DECLARE_STUBS
+
+#ifdef JS_JITSPEW
+void FallbackICSpew(JSContext* cx, ICFallbackStub* stub, const char* fmt, ...)
+ MOZ_FORMAT_PRINTF(3, 4);
+void TypeFallbackICSpew(JSContext* cx, ICTypeMonitor_Fallback* stub, const char* fmt, ...)
+ MOZ_FORMAT_PRINTF(3, 4);
+#else
+#define FallbackICSpew(...)
+#define TypeFallbackICSpew(...)
+#endif
+
+//
+// An entry in the JIT IC descriptor table.
+//
+class ICEntry
+{
+ private:
+ // A pointer to the shared IC stub for this instruction.
+ ICStub* firstStub_;
+
+ // Offset from the start of the JIT code where the IC
+ // load and call instructions are.
+ uint32_t returnOffset_;
+
+ // The PC of this IC's bytecode op within the JSScript.
+ uint32_t pcOffset_ : 28;
+
+ public:
+ enum Kind {
+ // A for-op IC entry.
+ Kind_Op = 0,
+
+ // A non-op IC entry.
+ Kind_NonOp,
+
+ // A fake IC entry for returning from a callVM for an op.
+ Kind_CallVM,
+
+ // A fake IC entry for returning from a callVM not for an op (e.g., in
+ // the prologue).
+ Kind_NonOpCallVM,
+
+ // A fake IC entry for returning from a callVM to after the
+ // warmup counter.
+ Kind_WarmupCounter,
+
+ // A fake IC entry for returning from a callVM to the interrupt
+ // handler via the over-recursion check on function entry.
+ Kind_StackCheck,
+
+ // As above, but for the early check. See emitStackCheck.
+ Kind_EarlyStackCheck,
+
+ // A fake IC entry for returning from DebugTrapHandler.
+ Kind_DebugTrap,
+
+ // A fake IC entry for returning from a callVM to
+ // Debug{Prologue,Epilogue}.
+ Kind_DebugPrologue,
+ Kind_DebugEpilogue,
+
+ Kind_Invalid
+ };
+
+ private:
+ // What this IC is for.
+ Kind kind_ : 4;
+
+ // Set the kind and asserts that it's sane.
+ void setKind(Kind kind) {
+ MOZ_ASSERT(kind < Kind_Invalid);
+ kind_ = kind;
+ MOZ_ASSERT(this->kind() == kind);
+ }
+
+ public:
+ ICEntry(uint32_t pcOffset, Kind kind)
+ : firstStub_(nullptr), returnOffset_(), pcOffset_(pcOffset)
+ {
+ // The offset must fit in at least 28 bits, since we shave off 4 for
+ // the Kind enum.
+ MOZ_ASSERT(pcOffset_ == pcOffset);
+ JS_STATIC_ASSERT(BaselineScript::MAX_JSSCRIPT_LENGTH <= (1u << 28) - 1);
+ MOZ_ASSERT(pcOffset <= BaselineScript::MAX_JSSCRIPT_LENGTH);
+ setKind(kind);
+ }
+
+ CodeOffset returnOffset() const {
+ return CodeOffset(returnOffset_);
+ }
+
+ void setReturnOffset(CodeOffset offset) {
+ MOZ_ASSERT(offset.offset() <= (size_t) UINT32_MAX);
+ returnOffset_ = (uint32_t) offset.offset();
+ }
+
+ uint32_t pcOffset() const {
+ return pcOffset_;
+ }
+
+ jsbytecode* pc(JSScript* script) const {
+ return script->offsetToPC(pcOffset_);
+ }
+
+ Kind kind() const {
+ // MSVC compiles enums as signed.
+ return Kind(kind_ & 0xf);
+ }
+ bool isForOp() const {
+ return kind() == Kind_Op;
+ }
+
+ void setFakeKind(Kind kind) {
+ MOZ_ASSERT(kind != Kind_Op && kind != Kind_NonOp);
+ setKind(kind);
+ }
+
+ bool hasStub() const {
+ return firstStub_ != nullptr;
+ }
+ ICStub* firstStub() const {
+ MOZ_ASSERT(hasStub());
+ return firstStub_;
+ }
+
+ ICFallbackStub* fallbackStub() const;
+
+ void setFirstStub(ICStub* stub) {
+ firstStub_ = stub;
+ }
+
+ static inline size_t offsetOfFirstStub() {
+ return offsetof(ICEntry, firstStub_);
+ }
+
+ inline ICStub** addressOfFirstStub() {
+ return &firstStub_;
+ }
+
+ protected:
+ void traceEntry(JSTracer* trc);
+};
+
+class BaselineICEntry : public ICEntry
+{
+ public:
+ BaselineICEntry(uint32_t pcOffset, Kind kind)
+ : ICEntry(pcOffset, kind)
+ { }
+
+ void trace(JSTracer* trc);
+};
+
+class IonICEntry : public ICEntry
+{
+ JSScript* script_;
+
+ public:
+ IonICEntry(uint32_t pcOffset, Kind kind, JSScript* script)
+ : ICEntry(pcOffset, kind),
+ script_(script)
+ { }
+
+ JSScript* script() {
+ return script_;
+ }
+
+ void trace(JSTracer* trc);
+};
+
+class ICMonitoredStub;
+class ICMonitoredFallbackStub;
+class ICUpdatedStub;
+
+// Constant iterator that traverses arbitrary chains of ICStubs.
+// No requirements are made of the ICStub used to construct this
+// iterator, aside from that the stub be part of a nullptr-terminated
+// chain.
+// The iterator is considered to be at its end once it has been
+// incremented _past_ the last stub. Thus, if 'atEnd()' returns
+// true, the '*' and '->' operations are not valid.
+class ICStubConstIterator
+{
+ friend class ICStub;
+ friend class ICFallbackStub;
+
+ private:
+ ICStub* currentStub_;
+
+ public:
+ explicit ICStubConstIterator(ICStub* currentStub) : currentStub_(currentStub) {}
+
+ static ICStubConstIterator StartingAt(ICStub* stub) {
+ return ICStubConstIterator(stub);
+ }
+ static ICStubConstIterator End(ICStub* stub) {
+ return ICStubConstIterator(nullptr);
+ }
+
+ bool operator ==(const ICStubConstIterator& other) const {
+ return currentStub_ == other.currentStub_;
+ }
+ bool operator !=(const ICStubConstIterator& other) const {
+ return !(*this == other);
+ }
+
+ ICStubConstIterator& operator++();
+
+ ICStubConstIterator operator++(int) {
+ ICStubConstIterator oldThis(*this);
+ ++(*this);
+ return oldThis;
+ }
+
+ ICStub* operator*() const {
+ MOZ_ASSERT(currentStub_);
+ return currentStub_;
+ }
+
+ ICStub* operator ->() const {
+ MOZ_ASSERT(currentStub_);
+ return currentStub_;
+ }
+
+ bool atEnd() const {
+ return currentStub_ == nullptr;
+ }
+};
+
+// Iterator that traverses "regular" IC chains that start at an ICEntry
+// and are terminated with an ICFallbackStub.
+//
+// The iterator is considered to be at its end once it is _at_ the
+// fallback stub. Thus, unlike the ICStubConstIterator, operators
+// '*' and '->' are valid even if 'atEnd()' returns true - they
+// will act on the fallback stub.
+//
+// This iterator also allows unlinking of stubs being traversed.
+// Note that 'unlink' does not implicitly advance the iterator -
+// it must be advanced explicitly using '++'.
+class ICStubIterator
+{
+ friend class ICFallbackStub;
+
+ private:
+ ICEntry* icEntry_;
+ ICFallbackStub* fallbackStub_;
+ ICStub* previousStub_;
+ ICStub* currentStub_;
+ bool unlinked_;
+
+ explicit ICStubIterator(ICFallbackStub* fallbackStub, bool end=false);
+ public:
+
+ bool operator ==(const ICStubIterator& other) const {
+ // == should only ever be called on stubs from the same chain.
+ MOZ_ASSERT(icEntry_ == other.icEntry_);
+ MOZ_ASSERT(fallbackStub_ == other.fallbackStub_);
+ return currentStub_ == other.currentStub_;
+ }
+ bool operator !=(const ICStubIterator& other) const {
+ return !(*this == other);
+ }
+
+ ICStubIterator& operator++();
+
+ ICStubIterator operator++(int) {
+ ICStubIterator oldThis(*this);
+ ++(*this);
+ return oldThis;
+ }
+
+ ICStub* operator*() const {
+ return currentStub_;
+ }
+
+ ICStub* operator ->() const {
+ return currentStub_;
+ }
+
+ bool atEnd() const {
+ return currentStub_ == (ICStub*) fallbackStub_;
+ }
+
+ void unlink(JSContext* cx);
+};
+
+//
+// Base class for all IC stubs.
+//
+class ICStub
+{
+ friend class ICFallbackStub;
+
+ public:
+ enum Kind {
+ INVALID = 0,
+#define DEF_ENUM_KIND(kindName) kindName,
+ IC_BASELINE_STUB_KIND_LIST(DEF_ENUM_KIND)
+ IC_SHARED_STUB_KIND_LIST(DEF_ENUM_KIND)
+#undef DEF_ENUM_KIND
+ LIMIT
+ };
+
+ static inline bool IsValidKind(Kind k) {
+ return (k > INVALID) && (k < LIMIT);
+ }
+
+ static const char* KindString(Kind k) {
+ switch(k) {
+#define DEF_KIND_STR(kindName) case kindName: return #kindName;
+ IC_BASELINE_STUB_KIND_LIST(DEF_KIND_STR)
+ IC_SHARED_STUB_KIND_LIST(DEF_KIND_STR)
+#undef DEF_KIND_STR
+ default:
+ MOZ_CRASH("Invalid kind.");
+ }
+ }
+
+ enum Trait {
+ Regular = 0x0,
+ Fallback = 0x1,
+ Monitored = 0x2,
+ MonitoredFallback = 0x3,
+ Updated = 0x4
+ };
+
+ void markCode(JSTracer* trc, const char* name);
+ void updateCode(JitCode* stubCode);
+ void trace(JSTracer* trc);
+
+ template <typename T, typename... Args>
+ static T* New(JSContext* cx, ICStubSpace* space, JitCode* code, Args&&... args) {
+ if (!code)
+ return nullptr;
+ T* result = space->allocate<T>(code, mozilla::Forward<Args>(args)...);
+ if (!result)
+ ReportOutOfMemory(cx);
+ return result;
+ }
+
+ protected:
+ // The raw jitcode to call for this stub.
+ uint8_t* stubCode_;
+
+ // Pointer to next IC stub. This is null for the last IC stub, which should
+ // either be a fallback or inert IC stub.
+ ICStub* next_;
+
+ // A 16-bit field usable by subtypes of ICStub for subtype-specific small-info
+ uint16_t extra_;
+
+ // The kind of the stub.
+ // High bit is 'isFallback' flag.
+ // Second high bit is 'isMonitored' flag.
+ Trait trait_ : 3;
+ Kind kind_ : 13;
+
+ inline ICStub(Kind kind, JitCode* stubCode)
+ : stubCode_(stubCode->raw()),
+ next_(nullptr),
+ extra_(0),
+ trait_(Regular),
+ kind_(kind)
+ {
+ MOZ_ASSERT(stubCode != nullptr);
+ }
+
+ inline ICStub(Kind kind, Trait trait, JitCode* stubCode)
+ : stubCode_(stubCode->raw()),
+ next_(nullptr),
+ extra_(0),
+ trait_(trait),
+ kind_(kind)
+ {
+ MOZ_ASSERT(stubCode != nullptr);
+ }
+
+ inline Trait trait() const {
+ // Workaround for MSVC reading trait_ as signed value.
+ return (Trait)(trait_ & 0x7);
+ }
+
+ public:
+
+ inline Kind kind() const {
+ return static_cast<Kind>(kind_);
+ }
+
+ inline bool isFallback() const {
+ return trait() == Fallback || trait() == MonitoredFallback;
+ }
+
+ inline bool isMonitored() const {
+ return trait() == Monitored;
+ }
+
+ inline bool isUpdated() const {
+ return trait() == Updated;
+ }
+
+ inline bool isMonitoredFallback() const {
+ return trait() == MonitoredFallback;
+ }
+
+ inline const ICFallbackStub* toFallbackStub() const {
+ MOZ_ASSERT(isFallback());
+ return reinterpret_cast<const ICFallbackStub*>(this);
+ }
+
+ inline ICFallbackStub* toFallbackStub() {
+ MOZ_ASSERT(isFallback());
+ return reinterpret_cast<ICFallbackStub*>(this);
+ }
+
+ inline const ICMonitoredStub* toMonitoredStub() const {
+ MOZ_ASSERT(isMonitored());
+ return reinterpret_cast<const ICMonitoredStub*>(this);
+ }
+
+ inline ICMonitoredStub* toMonitoredStub() {
+ MOZ_ASSERT(isMonitored());
+ return reinterpret_cast<ICMonitoredStub*>(this);
+ }
+
+ inline const ICMonitoredFallbackStub* toMonitoredFallbackStub() const {
+ MOZ_ASSERT(isMonitoredFallback());
+ return reinterpret_cast<const ICMonitoredFallbackStub*>(this);
+ }
+
+ inline ICMonitoredFallbackStub* toMonitoredFallbackStub() {
+ MOZ_ASSERT(isMonitoredFallback());
+ return reinterpret_cast<ICMonitoredFallbackStub*>(this);
+ }
+
+ inline const ICUpdatedStub* toUpdatedStub() const {
+ MOZ_ASSERT(isUpdated());
+ return reinterpret_cast<const ICUpdatedStub*>(this);
+ }
+
+ inline ICUpdatedStub* toUpdatedStub() {
+ MOZ_ASSERT(isUpdated());
+ return reinterpret_cast<ICUpdatedStub*>(this);
+ }
+
+#define KIND_METHODS(kindName) \
+ inline bool is##kindName() const { return kind() == kindName; } \
+ inline const IC##kindName* to##kindName() const { \
+ MOZ_ASSERT(is##kindName()); \
+ return reinterpret_cast<const IC##kindName*>(this); \
+ } \
+ inline IC##kindName* to##kindName() { \
+ MOZ_ASSERT(is##kindName()); \
+ return reinterpret_cast<IC##kindName*>(this); \
+ }
+ IC_BASELINE_STUB_KIND_LIST(KIND_METHODS)
+ IC_SHARED_STUB_KIND_LIST(KIND_METHODS)
+#undef KIND_METHODS
+
+ inline ICStub* next() const {
+ return next_;
+ }
+
+ inline bool hasNext() const {
+ return next_ != nullptr;
+ }
+
+ inline void setNext(ICStub* stub) {
+ // Note: next_ only needs to be changed under the compilation lock for
+ // non-type-monitor/update ICs.
+ next_ = stub;
+ }
+
+ inline ICStub** addressOfNext() {
+ return &next_;
+ }
+
+ inline JitCode* jitCode() {
+ return JitCode::FromExecutable(stubCode_);
+ }
+
+ inline uint8_t* rawStubCode() const {
+ return stubCode_;
+ }
+
+ // This method is not valid on TypeUpdate stub chains!
+ inline ICFallbackStub* getChainFallback() {
+ ICStub* lastStub = this;
+ while (lastStub->next_)
+ lastStub = lastStub->next_;
+ MOZ_ASSERT(lastStub->isFallback());
+ return lastStub->toFallbackStub();
+ }
+
+ inline ICStubConstIterator beginHere() {
+ return ICStubConstIterator::StartingAt(this);
+ }
+
+ static inline size_t offsetOfNext() {
+ return offsetof(ICStub, next_);
+ }
+
+ static inline size_t offsetOfStubCode() {
+ return offsetof(ICStub, stubCode_);
+ }
+
+ static inline size_t offsetOfExtra() {
+ return offsetof(ICStub, extra_);
+ }
+
+ static bool CanMakeCalls(ICStub::Kind kind) {
+ MOZ_ASSERT(IsValidKind(kind));
+ switch (kind) {
+ case Call_Fallback:
+ case Call_Scripted:
+ case Call_AnyScripted:
+ case Call_Native:
+ case Call_ClassHook:
+ case Call_ScriptedApplyArray:
+ case Call_ScriptedApplyArguments:
+ case Call_ScriptedFunCall:
+ case Call_StringSplit:
+ case WarmUpCounter_Fallback:
+ case GetElem_NativeSlotName:
+ case GetElem_NativeSlotSymbol:
+ case GetElem_NativePrototypeSlotName:
+ case GetElem_NativePrototypeSlotSymbol:
+ case GetElem_NativePrototypeCallNativeName:
+ case GetElem_NativePrototypeCallNativeSymbol:
+ case GetElem_NativePrototypeCallScriptedName:
+ case GetElem_NativePrototypeCallScriptedSymbol:
+ case GetElem_UnboxedPropertyName:
+ case GetProp_CallScripted:
+ case GetProp_CallNative:
+ case GetProp_CallNativeGlobal:
+ case GetProp_CallDOMProxyNative:
+ case GetProp_CallDOMProxyWithGenerationNative:
+ case GetProp_DOMProxyShadowed:
+ case GetProp_Generic:
+ case SetProp_CallScripted:
+ case SetProp_CallNative:
+ case RetSub_Fallback:
+ // These two fallback stubs don't actually make non-tail calls,
+ // but the fallback code for the bailout path needs to pop the stub frame
+ // pushed during the bailout.
+ case GetProp_Fallback:
+ case SetProp_Fallback:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ // Optimized stubs get purged on GC. But some stubs can be active on the
+ // stack during GC - specifically the ones that can make calls. To ensure
+ // that these do not get purged, all stubs that can make calls are allocated
+ // in the fallback stub space.
+ bool allocatedInFallbackSpace() const {
+ MOZ_ASSERT(next());
+ return CanMakeCalls(kind());
+ }
+};
+
+class ICFallbackStub : public ICStub
+{
+ friend class ICStubConstIterator;
+ protected:
+ // Fallback stubs need these fields to easily add new stubs to
+ // the linked list of stubs for an IC.
+
+ // The IC entry for this linked list of stubs.
+ ICEntry* icEntry_;
+
+ // The number of stubs kept in the IC entry.
+ uint32_t numOptimizedStubs_ : 31;
+ uint32_t invalid_ : 1;
+
+ // A pointer to the location stub pointer that needs to be
+ // changed to add a new "last" stub immediately before the fallback
+ // stub. This'll start out pointing to the icEntry's "firstStub_"
+ // field, and as new stubs are added, it'll point to the current
+ // last stub's "next_" field.
+ ICStub** lastStubPtrAddr_;
+
+ ICFallbackStub(Kind kind, JitCode* stubCode)
+ : ICStub(kind, ICStub::Fallback, stubCode),
+ icEntry_(nullptr),
+ numOptimizedStubs_(0),
+ invalid_(false),
+ lastStubPtrAddr_(nullptr) {}
+
+ ICFallbackStub(Kind kind, Trait trait, JitCode* stubCode)
+ : ICStub(kind, trait, stubCode),
+ icEntry_(nullptr),
+ numOptimizedStubs_(0),
+ invalid_(false),
+ lastStubPtrAddr_(nullptr)
+ {
+ MOZ_ASSERT(trait == ICStub::Fallback ||
+ trait == ICStub::MonitoredFallback);
+ }
+
+ public:
+ inline ICEntry* icEntry() const {
+ return icEntry_;
+ }
+
+ inline size_t numOptimizedStubs() const {
+ return (size_t) numOptimizedStubs_;
+ }
+
+ void setInvalid() {
+ invalid_ = 1;
+ }
+
+ bool invalid() const {
+ return invalid_;
+ }
+
+ // The icEntry and lastStubPtrAddr_ fields can't be initialized when the stub is
+ // created since the stub is created at compile time, and we won't know the IC entry
+ // address until after compile when the JitScript is created. This method
+ // allows these fields to be fixed up at that point.
+ void fixupICEntry(ICEntry* icEntry) {
+ MOZ_ASSERT(icEntry_ == nullptr);
+ MOZ_ASSERT(lastStubPtrAddr_ == nullptr);
+ icEntry_ = icEntry;
+ lastStubPtrAddr_ = icEntry_->addressOfFirstStub();
+ }
+
+ // Add a new stub to the IC chain terminated by this fallback stub.
+ void addNewStub(ICStub* stub) {
+ MOZ_ASSERT(!invalid());
+ MOZ_ASSERT(*lastStubPtrAddr_ == this);
+ MOZ_ASSERT(stub->next() == nullptr);
+ stub->setNext(this);
+ *lastStubPtrAddr_ = stub;
+ lastStubPtrAddr_ = stub->addressOfNext();
+ numOptimizedStubs_++;
+ }
+
+ ICStubConstIterator beginChainConst() const {
+ return ICStubConstIterator(icEntry_->firstStub());
+ }
+
+ ICStubIterator beginChain() {
+ return ICStubIterator(this);
+ }
+
+ bool hasStub(ICStub::Kind kind) const {
+ for (ICStubConstIterator iter = beginChainConst(); !iter.atEnd(); iter++) {
+ if (iter->kind() == kind)
+ return true;
+ }
+ return false;
+ }
+
+ unsigned numStubsWithKind(ICStub::Kind kind) const {
+ unsigned count = 0;
+ for (ICStubConstIterator iter = beginChainConst(); !iter.atEnd(); iter++) {
+ if (iter->kind() == kind)
+ count++;
+ }
+ return count;
+ }
+
+ void unlinkStub(Zone* zone, ICStub* prev, ICStub* stub);
+ void unlinkStubsWithKind(JSContext* cx, ICStub::Kind kind);
+};
+
+// Monitored stubs are IC stubs that feed a single resulting value out to a
+// type monitor operation.
+class ICMonitoredStub : public ICStub
+{
+ protected:
+ // Pointer to the start of the type monitoring stub chain.
+ ICStub* firstMonitorStub_;
+
+ ICMonitoredStub(Kind kind, JitCode* stubCode, ICStub* firstMonitorStub);
+
+ public:
+ inline void updateFirstMonitorStub(ICStub* monitorStub) {
+ // This should only be called once: when the first optimized monitor stub
+ // is added to the type monitor IC chain.
+ MOZ_ASSERT(firstMonitorStub_ && firstMonitorStub_->isTypeMonitor_Fallback());
+ firstMonitorStub_ = monitorStub;
+ }
+ inline void resetFirstMonitorStub(ICStub* monitorFallback) {
+ MOZ_ASSERT(monitorFallback->isTypeMonitor_Fallback());
+ firstMonitorStub_ = monitorFallback;
+ }
+ inline ICStub* firstMonitorStub() const {
+ return firstMonitorStub_;
+ }
+
+ static inline size_t offsetOfFirstMonitorStub() {
+ return offsetof(ICMonitoredStub, firstMonitorStub_);
+ }
+};
+
+class ICCacheIR_Monitored : public ICMonitoredStub
+{
+ CacheIRStubInfo* stubInfo_;
+
+ public:
+ ICCacheIR_Monitored(JitCode* stubCode, ICStub* firstMonitorStub, CacheIRStubInfo* stubInfo)
+ : ICMonitoredStub(ICStub::CacheIR_Monitored, stubCode, firstMonitorStub),
+ stubInfo_(stubInfo)
+ {}
+
+ void notePreliminaryObject() {
+ extra_ = 1;
+ }
+ bool hasPreliminaryObject() const {
+ return extra_;
+ }
+
+ const CacheIRStubInfo* stubInfo() const {
+ return stubInfo_;
+ }
+};
+
+// Updated stubs are IC stubs that use a TypeUpdate IC to track
+// the status of heap typesets that need to be updated.
+class ICUpdatedStub : public ICStub
+{
+ protected:
+ // Pointer to the start of the type updating stub chain.
+ ICStub* firstUpdateStub_;
+
+ static const uint32_t MAX_OPTIMIZED_STUBS = 8;
+ uint32_t numOptimizedStubs_;
+
+ ICUpdatedStub(Kind kind, JitCode* stubCode)
+ : ICStub(kind, ICStub::Updated, stubCode),
+ firstUpdateStub_(nullptr),
+ numOptimizedStubs_(0)
+ {}
+
+ public:
+ MOZ_MUST_USE bool initUpdatingChain(JSContext* cx, ICStubSpace* space);
+
+ MOZ_MUST_USE bool addUpdateStubForValue(JSContext* cx, HandleScript script, HandleObject obj,
+ HandleId id, HandleValue val);
+
+ void addOptimizedUpdateStub(ICStub* stub) {
+ if (firstUpdateStub_->isTypeUpdate_Fallback()) {
+ stub->setNext(firstUpdateStub_);
+ firstUpdateStub_ = stub;
+ } else {
+ ICStub* iter = firstUpdateStub_;
+ MOZ_ASSERT(iter->next() != nullptr);
+ while (!iter->next()->isTypeUpdate_Fallback())
+ iter = iter->next();
+ MOZ_ASSERT(iter->next()->next() == nullptr);
+ stub->setNext(iter->next());
+ iter->setNext(stub);
+ }
+
+ numOptimizedStubs_++;
+ }
+
+ inline ICStub* firstUpdateStub() const {
+ return firstUpdateStub_;
+ }
+
+ bool hasTypeUpdateStub(ICStub::Kind kind) {
+ ICStub* stub = firstUpdateStub_;
+ do {
+ if (stub->kind() == kind)
+ return true;
+
+ stub = stub->next();
+ } while (stub);
+
+ return false;
+ }
+
+ inline uint32_t numOptimizedStubs() const {
+ return numOptimizedStubs_;
+ }
+
+ static inline size_t offsetOfFirstUpdateStub() {
+ return offsetof(ICUpdatedStub, firstUpdateStub_);
+ }
+};
+
+// Base class for stubcode compilers.
+class ICStubCompiler
+{
+ // Prevent GC in the middle of stub compilation.
+ js::gc::AutoSuppressGC suppressGC;
+
+ public:
+ enum class Engine {
+ Baseline = 0,
+ IonMonkey
+ };
+
+ protected:
+ JSContext* cx;
+ ICStub::Kind kind;
+ Engine engine_;
+ bool inStubFrame_;
+
+#ifdef DEBUG
+ bool entersStubFrame_;
+ uint32_t framePushedAtEnterStubFrame_;
+#endif
+
+ // By default the stubcode key is just the kind.
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1);
+ }
+
+ virtual MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) = 0;
+ virtual void postGenerateStubCode(MacroAssembler& masm, Handle<JitCode*> genCode) {}
+
+ JitCode* getStubCode();
+
+ ICStubCompiler(JSContext* cx, ICStub::Kind kind, Engine engine)
+ : suppressGC(cx), cx(cx), kind(kind), engine_(engine), inStubFrame_(false)
+#ifdef DEBUG
+ , entersStubFrame_(false), framePushedAtEnterStubFrame_(0)
+#endif
+ {}
+
+ // Push a payload specialized per compiler needed to execute stubs.
+ void PushStubPayload(MacroAssembler& masm, Register scratch);
+ void pushStubPayload(MacroAssembler& masm, Register scratch);
+
+ // Emits a tail call to a VMFunction wrapper.
+ MOZ_MUST_USE bool tailCallVM(const VMFunction& fun, MacroAssembler& masm);
+
+ // Emits a normal (non-tail) call to a VMFunction wrapper.
+ MOZ_MUST_USE bool callVM(const VMFunction& fun, MacroAssembler& masm);
+
+ // Emits a call to a type-update IC, assuming that the value to be
+ // checked is already in R0.
+ MOZ_MUST_USE bool callTypeUpdateIC(MacroAssembler& masm, uint32_t objectOffset);
+
+ // A stub frame is used when a stub wants to call into the VM without
+ // performing a tail call. This is required for the return address
+ // to pc mapping to work.
+ void enterStubFrame(MacroAssembler& masm, Register scratch);
+ void leaveStubFrame(MacroAssembler& masm, bool calledIntoIon = false);
+
+ // Some stubs need to emit SPS profiler updates. This emits the guarding
+ // jitcode for those stubs. If profiling is not enabled, jumps to the
+ // given label.
+ void guardProfilingEnabled(MacroAssembler& masm, Register scratch, Label* skip);
+
+ public:
+ static inline AllocatableGeneralRegisterSet availableGeneralRegs(size_t numInputs) {
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+#if defined(JS_CODEGEN_ARM)
+ MOZ_ASSERT(!regs.has(BaselineStackReg));
+ MOZ_ASSERT(!regs.has(ICTailCallReg));
+ regs.take(BaselineSecondScratchReg);
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ MOZ_ASSERT(!regs.has(BaselineStackReg));
+ MOZ_ASSERT(!regs.has(ICTailCallReg));
+ MOZ_ASSERT(!regs.has(BaselineSecondScratchReg));
+#elif defined(JS_CODEGEN_ARM64)
+ MOZ_ASSERT(!regs.has(PseudoStackPointer));
+ MOZ_ASSERT(!regs.has(RealStackPointer));
+ MOZ_ASSERT(!regs.has(ICTailCallReg));
+#else
+ MOZ_ASSERT(!regs.has(BaselineStackReg));
+#endif
+ regs.take(BaselineFrameReg);
+ regs.take(ICStubReg);
+#ifdef JS_CODEGEN_X64
+ regs.take(ExtractTemp0);
+ regs.take(ExtractTemp1);
+#endif
+
+ switch (numInputs) {
+ case 0:
+ break;
+ case 1:
+ regs.take(R0);
+ break;
+ case 2:
+ regs.take(R0);
+ regs.take(R1);
+ break;
+ default:
+ MOZ_CRASH("Invalid numInputs");
+ }
+
+ return regs;
+ }
+
+ protected:
+ void emitPostWriteBarrierSlot(MacroAssembler& masm, Register obj, ValueOperand val,
+ Register scratch, LiveGeneralRegisterSet saveRegs);
+
+ template <typename T, typename... Args>
+ T* newStub(Args&&... args) {
+ return ICStub::New<T>(cx, mozilla::Forward<Args>(args)...);
+ }
+
+ public:
+ virtual ICStub* getStub(ICStubSpace* space) = 0;
+
+ static ICStubSpace* StubSpaceForKind(ICStub::Kind kind, JSScript* outerScript, Engine engine) {
+ if (ICStub::CanMakeCalls(kind)) {
+ if (engine == ICStubCompiler::Engine::Baseline)
+ return outerScript->baselineScript()->fallbackStubSpace();
+ return outerScript->ionScript()->fallbackStubSpace();
+ }
+ return outerScript->zone()->jitZone()->optimizedStubSpace();
+ }
+
+ ICStubSpace* getStubSpace(JSScript* outerScript) {
+ return StubSpaceForKind(kind, outerScript, engine_);
+ }
+};
+
+class SharedStubInfo
+{
+ BaselineFrame* maybeFrame_;
+ RootedScript outerScript_;
+ RootedScript innerScript_;
+ ICEntry* icEntry_;
+
+ public:
+ SharedStubInfo(JSContext* cx, void* payload, ICEntry* entry);
+
+ ICStubCompiler::Engine engine() const {
+ return maybeFrame_ ? ICStubCompiler::Engine::Baseline : ICStubCompiler::Engine::IonMonkey;
+ }
+
+ HandleScript script() const {
+ MOZ_ASSERT(innerScript_);
+ return innerScript_;
+ }
+
+ HandleScript innerScript() const {
+ MOZ_ASSERT(innerScript_);
+ return innerScript_;
+ }
+
+ HandleScript outerScript(JSContext* cx);
+
+ jsbytecode* pc() const {
+ return icEntry()->pc(innerScript());
+ }
+
+ uint32_t pcOffset() const {
+ return script()->pcToOffset(pc());
+ }
+
+ BaselineFrame* frame() const {
+ MOZ_ASSERT(maybeFrame_);
+ return maybeFrame_;
+ }
+
+ BaselineFrame* maybeFrame() const {
+ return maybeFrame_;
+ }
+
+ ICEntry* icEntry() const {
+ return icEntry_;
+ }
+};
+
+// Monitored fallback stubs - as the name implies.
+class ICMonitoredFallbackStub : public ICFallbackStub
+{
+ protected:
+ // Pointer to the fallback monitor stub.
+ ICTypeMonitor_Fallback* fallbackMonitorStub_;
+
+ ICMonitoredFallbackStub(Kind kind, JitCode* stubCode)
+ : ICFallbackStub(kind, ICStub::MonitoredFallback, stubCode),
+ fallbackMonitorStub_(nullptr) {}
+
+ public:
+ MOZ_MUST_USE bool initMonitoringChain(JSContext* cx, ICStubSpace* space,
+ ICStubCompiler::Engine engine);
+ MOZ_MUST_USE bool addMonitorStubForValue(JSContext* cx, SharedStubInfo* info, HandleValue val);
+
+ inline ICTypeMonitor_Fallback* fallbackMonitorStub() const {
+ return fallbackMonitorStub_;
+ }
+
+ static inline size_t offsetOfFallbackMonitorStub() {
+ return offsetof(ICMonitoredFallbackStub, fallbackMonitorStub_);
+ }
+};
+
+
+// Base class for stub compilers that can generate multiple stubcodes.
+// These compilers need access to the JSOp they are compiling for.
+class ICMultiStubCompiler : public ICStubCompiler
+{
+ protected:
+ JSOp op;
+
+ // Stub keys for multi-stub kinds are composed of both the kind
+ // and the op they are compiled for.
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(op) << 17);
+ }
+
+ ICMultiStubCompiler(JSContext* cx, ICStub::Kind kind, JSOp op, Engine engine)
+ : ICStubCompiler(cx, kind, engine), op(op) {}
+};
+
+// TypeCheckPrimitiveSetStub
+// Base class for IC stubs (TypeUpdate or TypeMonitor) that check that a given
+// value's type falls within a set of primitive types.
+
+class TypeCheckPrimitiveSetStub : public ICStub
+{
+ friend class ICStubSpace;
+ protected:
+ inline static uint16_t TypeToFlag(JSValueType type) {
+ return 1u << static_cast<unsigned>(type);
+ }
+
+ inline static uint16_t ValidFlags() {
+ return ((TypeToFlag(JSVAL_TYPE_OBJECT) << 1) - 1) & ~TypeToFlag(JSVAL_TYPE_MAGIC);
+ }
+
+ TypeCheckPrimitiveSetStub(Kind kind, JitCode* stubCode, uint16_t flags)
+ : ICStub(kind, stubCode)
+ {
+ MOZ_ASSERT(kind == TypeMonitor_PrimitiveSet || kind == TypeUpdate_PrimitiveSet);
+ MOZ_ASSERT(flags && !(flags & ~ValidFlags()));
+ extra_ = flags;
+ }
+
+ TypeCheckPrimitiveSetStub* updateTypesAndCode(uint16_t flags, JitCode* code) {
+ MOZ_ASSERT(flags && !(flags & ~ValidFlags()));
+ if (!code)
+ return nullptr;
+ extra_ = flags;
+ updateCode(code);
+ return this;
+ }
+
+ public:
+ uint16_t typeFlags() const {
+ return extra_;
+ }
+
+ bool containsType(JSValueType type) const {
+ MOZ_ASSERT(type <= JSVAL_TYPE_OBJECT);
+ MOZ_ASSERT(type != JSVAL_TYPE_MAGIC);
+ return extra_ & TypeToFlag(type);
+ }
+
+ ICTypeMonitor_PrimitiveSet* toMonitorStub() {
+ return toTypeMonitor_PrimitiveSet();
+ }
+
+ ICTypeUpdate_PrimitiveSet* toUpdateStub() {
+ return toTypeUpdate_PrimitiveSet();
+ }
+
+ class Compiler : public ICStubCompiler {
+ protected:
+ TypeCheckPrimitiveSetStub* existingStub_;
+ uint16_t flags_;
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(flags_) << 17);
+ }
+
+ public:
+ Compiler(JSContext* cx, Kind kind, Engine engine_, TypeCheckPrimitiveSetStub* existingStub,
+ JSValueType type)
+ : ICStubCompiler(cx, kind, engine_),
+ existingStub_(existingStub),
+ flags_((existingStub ? existingStub->typeFlags() : 0) | TypeToFlag(type))
+ {
+ MOZ_ASSERT_IF(existingStub_, flags_ != existingStub_->typeFlags());
+ }
+
+ TypeCheckPrimitiveSetStub* updateStub() {
+ MOZ_ASSERT(existingStub_);
+ return existingStub_->updateTypesAndCode(flags_, getStubCode());
+ }
+ };
+};
+
+// TypeMonitor
+
+// The TypeMonitor fallback stub is not always a regular fallback stub. When
+// used for monitoring the values pushed by a bytecode it doesn't hold a
+// pointer to the IC entry, but rather back to the main fallback stub for the
+// IC (from which a pointer to the IC entry can be retrieved). When monitoring
+// the types of 'this', arguments or other values with no associated IC, there
+// is no main fallback stub, and the IC entry is referenced directly.
+class ICTypeMonitor_Fallback : public ICStub
+{
+ friend class ICStubSpace;
+
+ static const uint32_t MAX_OPTIMIZED_STUBS = 8;
+
+ // Pointer to the main fallback stub for the IC or to the main IC entry,
+ // depending on hasFallbackStub.
+ union {
+ ICMonitoredFallbackStub* mainFallbackStub_;
+ ICEntry* icEntry_;
+ };
+
+ // Pointer to the first monitor stub.
+ ICStub* firstMonitorStub_;
+
+ // Address of the last monitor stub's field pointing to this
+ // fallback monitor stub. This will get updated when new
+ // monitor stubs are created and added.
+ ICStub** lastMonitorStubPtrAddr_;
+
+ // Count of optimized type monitor stubs in this chain.
+ uint32_t numOptimizedMonitorStubs_ : 7;
+
+ uint32_t invalid_ : 1;
+
+ // Whether this has a fallback stub referring to the IC entry.
+ bool hasFallbackStub_ : 1;
+
+ // Index of 'this' or argument which is being monitored, or BYTECODE_INDEX
+ // if this is monitoring the types of values pushed at some bytecode.
+ uint32_t argumentIndex_ : 23;
+
+ static const uint32_t BYTECODE_INDEX = (1 << 23) - 1;
+
+ ICTypeMonitor_Fallback(JitCode* stubCode, ICMonitoredFallbackStub* mainFallbackStub,
+ uint32_t argumentIndex)
+ : ICStub(ICStub::TypeMonitor_Fallback, stubCode),
+ mainFallbackStub_(mainFallbackStub),
+ firstMonitorStub_(thisFromCtor()),
+ lastMonitorStubPtrAddr_(nullptr),
+ numOptimizedMonitorStubs_(0),
+ invalid_(false),
+ hasFallbackStub_(mainFallbackStub != nullptr),
+ argumentIndex_(argumentIndex)
+ { }
+
+ ICTypeMonitor_Fallback* thisFromCtor() {
+ return this;
+ }
+
+ void addOptimizedMonitorStub(ICStub* stub) {
+ MOZ_ASSERT(!invalid());
+ stub->setNext(this);
+
+ MOZ_ASSERT((lastMonitorStubPtrAddr_ != nullptr) ==
+ (numOptimizedMonitorStubs_ || !hasFallbackStub_));
+
+ if (lastMonitorStubPtrAddr_)
+ *lastMonitorStubPtrAddr_ = stub;
+
+ if (numOptimizedMonitorStubs_ == 0) {
+ MOZ_ASSERT(firstMonitorStub_ == this);
+ firstMonitorStub_ = stub;
+ } else {
+ MOZ_ASSERT(firstMonitorStub_ != nullptr);
+ }
+
+ lastMonitorStubPtrAddr_ = stub->addressOfNext();
+ numOptimizedMonitorStubs_++;
+ }
+
+ public:
+ bool hasStub(ICStub::Kind kind) {
+ ICStub* stub = firstMonitorStub_;
+ do {
+ if (stub->kind() == kind)
+ return true;
+
+ stub = stub->next();
+ } while (stub);
+
+ return false;
+ }
+
+ inline ICFallbackStub* mainFallbackStub() const {
+ MOZ_ASSERT(hasFallbackStub_);
+ return mainFallbackStub_;
+ }
+
+ inline ICEntry* icEntry() const {
+ return hasFallbackStub_ ? mainFallbackStub()->icEntry() : icEntry_;
+ }
+
+ inline ICStub* firstMonitorStub() const {
+ return firstMonitorStub_;
+ }
+
+ static inline size_t offsetOfFirstMonitorStub() {
+ return offsetof(ICTypeMonitor_Fallback, firstMonitorStub_);
+ }
+
+ inline uint32_t numOptimizedMonitorStubs() const {
+ return numOptimizedMonitorStubs_;
+ }
+
+ void setInvalid() {
+ invalid_ = 1;
+ }
+
+ bool invalid() const {
+ return invalid_;
+ }
+
+ inline bool monitorsThis() const {
+ return argumentIndex_ == 0;
+ }
+
+ inline bool monitorsArgument(uint32_t* pargument) const {
+ if (argumentIndex_ > 0 && argumentIndex_ < BYTECODE_INDEX) {
+ *pargument = argumentIndex_ - 1;
+ return true;
+ }
+ return false;
+ }
+
+ inline bool monitorsBytecode() const {
+ return argumentIndex_ == BYTECODE_INDEX;
+ }
+
+ // Fixup the IC entry as for a normal fallback stub, for this/arguments.
+ void fixupICEntry(ICEntry* icEntry) {
+ MOZ_ASSERT(!hasFallbackStub_);
+ MOZ_ASSERT(icEntry_ == nullptr);
+ MOZ_ASSERT(lastMonitorStubPtrAddr_ == nullptr);
+ icEntry_ = icEntry;
+ lastMonitorStubPtrAddr_ = icEntry_->addressOfFirstStub();
+ }
+
+ // Create a new monitor stub for the type of the given value, and
+ // add it to this chain.
+ MOZ_MUST_USE bool addMonitorStubForValue(JSContext* cx, SharedStubInfo* info, HandleValue val);
+
+ void resetMonitorStubChain(Zone* zone);
+
+ // Compiler for this stub kind.
+ class Compiler : public ICStubCompiler {
+ ICMonitoredFallbackStub* mainFallbackStub_;
+ uint32_t argumentIndex_;
+
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, Engine engine, ICMonitoredFallbackStub* mainFallbackStub)
+ : ICStubCompiler(cx, ICStub::TypeMonitor_Fallback, engine),
+ mainFallbackStub_(mainFallbackStub),
+ argumentIndex_(BYTECODE_INDEX)
+ { }
+
+ Compiler(JSContext* cx, Engine engine, uint32_t argumentIndex)
+ : ICStubCompiler(cx, ICStub::TypeMonitor_Fallback, engine),
+ mainFallbackStub_(nullptr),
+ argumentIndex_(argumentIndex)
+ { }
+
+ ICTypeMonitor_Fallback* getStub(ICStubSpace* space) {
+ return newStub<ICTypeMonitor_Fallback>(space, getStubCode(), mainFallbackStub_,
+ argumentIndex_);
+ }
+ };
+};
+
+class ICTypeMonitor_PrimitiveSet : public TypeCheckPrimitiveSetStub
+{
+ friend class ICStubSpace;
+
+ ICTypeMonitor_PrimitiveSet(JitCode* stubCode, uint16_t flags)
+ : TypeCheckPrimitiveSetStub(TypeMonitor_PrimitiveSet, stubCode, flags)
+ {}
+
+ public:
+ class Compiler : public TypeCheckPrimitiveSetStub::Compiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, Engine engine, ICTypeMonitor_PrimitiveSet* existingStub,
+ JSValueType type)
+ : TypeCheckPrimitiveSetStub::Compiler(cx, TypeMonitor_PrimitiveSet, engine, existingStub,
+ type)
+ {}
+
+ ICTypeMonitor_PrimitiveSet* updateStub() {
+ TypeCheckPrimitiveSetStub* stub =
+ this->TypeCheckPrimitiveSetStub::Compiler::updateStub();
+ if (!stub)
+ return nullptr;
+ return stub->toMonitorStub();
+ }
+
+ ICTypeMonitor_PrimitiveSet* getStub(ICStubSpace* space) {
+ MOZ_ASSERT(!existingStub_);
+ return newStub<ICTypeMonitor_PrimitiveSet>(space, getStubCode(), flags_);
+ }
+ };
+};
+
+class ICTypeMonitor_SingleObject : public ICStub
+{
+ friend class ICStubSpace;
+
+ GCPtrObject obj_;
+
+ ICTypeMonitor_SingleObject(JitCode* stubCode, JSObject* obj);
+
+ public:
+ GCPtrObject& object() {
+ return obj_;
+ }
+
+ static size_t offsetOfObject() {
+ return offsetof(ICTypeMonitor_SingleObject, obj_);
+ }
+
+ class Compiler : public ICStubCompiler {
+ protected:
+ HandleObject obj_;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, HandleObject obj)
+ : ICStubCompiler(cx, TypeMonitor_SingleObject, Engine::Baseline),
+ obj_(obj)
+ { }
+
+ ICTypeMonitor_SingleObject* getStub(ICStubSpace* space) {
+ return newStub<ICTypeMonitor_SingleObject>(space, getStubCode(), obj_);
+ }
+ };
+};
+
+class ICTypeMonitor_ObjectGroup : public ICStub
+{
+ friend class ICStubSpace;
+
+ GCPtrObjectGroup group_;
+
+ ICTypeMonitor_ObjectGroup(JitCode* stubCode, ObjectGroup* group);
+
+ public:
+ GCPtrObjectGroup& group() {
+ return group_;
+ }
+
+ static size_t offsetOfGroup() {
+ return offsetof(ICTypeMonitor_ObjectGroup, group_);
+ }
+
+ class Compiler : public ICStubCompiler {
+ protected:
+ HandleObjectGroup group_;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, HandleObjectGroup group)
+ : ICStubCompiler(cx, TypeMonitor_ObjectGroup, Engine::Baseline),
+ group_(group)
+ { }
+
+ ICTypeMonitor_ObjectGroup* getStub(ICStubSpace* space) {
+ return newStub<ICTypeMonitor_ObjectGroup>(space, getStubCode(), group_);
+ }
+ };
+};
+
+
+// BinaryArith
+// JSOP_ADD, JSOP_SUB, JSOP_MUL, JOP_DIV, JSOP_MOD
+// JSOP_BITAND, JSOP_BITXOR, JSOP_BITOR
+// JSOP_LSH, JSOP_RSH, JSOP_URSH
+
+class ICBinaryArith_Fallback : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICBinaryArith_Fallback(JitCode* stubCode)
+ : ICFallbackStub(BinaryArith_Fallback, stubCode)
+ {
+ extra_ = 0;
+ }
+
+ static const uint16_t SAW_DOUBLE_RESULT_BIT = 0x1;
+ static const uint16_t UNOPTIMIZABLE_OPERANDS_BIT = 0x2;
+
+ public:
+ static const uint32_t MAX_OPTIMIZED_STUBS = 8;
+
+ bool sawDoubleResult() const {
+ return extra_ & SAW_DOUBLE_RESULT_BIT;
+ }
+ void setSawDoubleResult() {
+ extra_ |= SAW_DOUBLE_RESULT_BIT;
+ }
+ bool hadUnoptimizableOperands() const {
+ return extra_ & UNOPTIMIZABLE_OPERANDS_BIT;
+ }
+ void noteUnoptimizableOperands() {
+ extra_ |= UNOPTIMIZABLE_OPERANDS_BIT;
+ }
+
+ // Compiler for this stub kind.
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx, Engine engine)
+ : ICStubCompiler(cx, ICStub::BinaryArith_Fallback, engine) {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICBinaryArith_Fallback>(space, getStubCode());
+ }
+ };
+};
+
+class ICBinaryArith_Int32 : public ICStub
+{
+ friend class ICStubSpace;
+
+ ICBinaryArith_Int32(JitCode* stubCode, bool allowDouble)
+ : ICStub(BinaryArith_Int32, stubCode)
+ {
+ extra_ = allowDouble;
+ }
+
+ public:
+ bool allowDouble() const {
+ return extra_;
+ }
+
+ // Compiler for this stub kind.
+ class Compiler : public ICStubCompiler {
+ protected:
+ JSOp op_;
+ bool allowDouble_;
+
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ // Stub keys shift-stubs need to encode the kind, the JSOp and if we allow doubles.
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(op_) << 17) |
+ (static_cast<int32_t>(allowDouble_) << 25);
+ }
+
+ public:
+ Compiler(JSContext* cx, JSOp op, Engine engine, bool allowDouble)
+ : ICStubCompiler(cx, ICStub::BinaryArith_Int32, engine),
+ op_(op), allowDouble_(allowDouble) {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICBinaryArith_Int32>(space, getStubCode(), allowDouble_);
+ }
+ };
+};
+
+class ICBinaryArith_StringConcat : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICBinaryArith_StringConcat(JitCode* stubCode)
+ : ICStub(BinaryArith_StringConcat, stubCode)
+ {}
+
+ public:
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx, Engine engine)
+ : ICStubCompiler(cx, ICStub::BinaryArith_StringConcat, engine)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICBinaryArith_StringConcat>(space, getStubCode());
+ }
+ };
+};
+
+class ICBinaryArith_StringObjectConcat : public ICStub
+{
+ friend class ICStubSpace;
+
+ ICBinaryArith_StringObjectConcat(JitCode* stubCode, bool lhsIsString)
+ : ICStub(BinaryArith_StringObjectConcat, stubCode)
+ {
+ extra_ = lhsIsString;
+ }
+
+ public:
+ bool lhsIsString() const {
+ return extra_;
+ }
+
+ class Compiler : public ICStubCompiler {
+ protected:
+ bool lhsIsString_;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(lhsIsString_) << 17);
+ }
+
+ public:
+ Compiler(JSContext* cx, Engine engine, bool lhsIsString)
+ : ICStubCompiler(cx, ICStub::BinaryArith_StringObjectConcat, engine),
+ lhsIsString_(lhsIsString)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICBinaryArith_StringObjectConcat>(space, getStubCode(),
+ lhsIsString_);
+ }
+ };
+};
+
+class ICBinaryArith_Double : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICBinaryArith_Double(JitCode* stubCode)
+ : ICStub(BinaryArith_Double, stubCode)
+ {}
+
+ public:
+ class Compiler : public ICMultiStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, JSOp op, Engine engine)
+ : ICMultiStubCompiler(cx, ICStub::BinaryArith_Double, op, engine)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICBinaryArith_Double>(space, getStubCode());
+ }
+ };
+};
+
+class ICBinaryArith_BooleanWithInt32 : public ICStub
+{
+ friend class ICStubSpace;
+
+ ICBinaryArith_BooleanWithInt32(JitCode* stubCode, bool lhsIsBool, bool rhsIsBool)
+ : ICStub(BinaryArith_BooleanWithInt32, stubCode)
+ {
+ MOZ_ASSERT(lhsIsBool || rhsIsBool);
+ extra_ = 0;
+ if (lhsIsBool)
+ extra_ |= 1;
+ if (rhsIsBool)
+ extra_ |= 2;
+ }
+
+ public:
+ bool lhsIsBoolean() const {
+ return extra_ & 1;
+ }
+
+ bool rhsIsBoolean() const {
+ return extra_ & 2;
+ }
+
+ class Compiler : public ICStubCompiler {
+ protected:
+ JSOp op_;
+ bool lhsIsBool_;
+ bool rhsIsBool_;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(op_) << 17) |
+ (static_cast<int32_t>(lhsIsBool_) << 25) |
+ (static_cast<int32_t>(rhsIsBool_) << 26);
+ }
+
+ public:
+ Compiler(JSContext* cx, JSOp op, Engine engine, bool lhsIsBool, bool rhsIsBool)
+ : ICStubCompiler(cx, ICStub::BinaryArith_BooleanWithInt32, engine),
+ op_(op), lhsIsBool_(lhsIsBool), rhsIsBool_(rhsIsBool)
+ {
+ MOZ_ASSERT(op_ == JSOP_ADD || op_ == JSOP_SUB || op_ == JSOP_BITOR ||
+ op_ == JSOP_BITAND || op_ == JSOP_BITXOR);
+ MOZ_ASSERT(lhsIsBool_ || rhsIsBool_);
+ }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICBinaryArith_BooleanWithInt32>(space, getStubCode(),
+ lhsIsBool_, rhsIsBool_);
+ }
+ };
+};
+
+class ICBinaryArith_DoubleWithInt32 : public ICStub
+{
+ friend class ICStubSpace;
+
+ ICBinaryArith_DoubleWithInt32(JitCode* stubCode, bool lhsIsDouble)
+ : ICStub(BinaryArith_DoubleWithInt32, stubCode)
+ {
+ extra_ = lhsIsDouble;
+ }
+
+ public:
+ bool lhsIsDouble() const {
+ return extra_;
+ }
+
+ class Compiler : public ICMultiStubCompiler {
+ protected:
+ bool lhsIsDouble_;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(op) << 17) |
+ (static_cast<int32_t>(lhsIsDouble_) << 25);
+ }
+
+ public:
+ Compiler(JSContext* cx, JSOp op, Engine engine, bool lhsIsDouble)
+ : ICMultiStubCompiler(cx, ICStub::BinaryArith_DoubleWithInt32, op, engine),
+ lhsIsDouble_(lhsIsDouble)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICBinaryArith_DoubleWithInt32>(space, getStubCode(),
+ lhsIsDouble_);
+ }
+ };
+};
+
+// UnaryArith
+// JSOP_BITNOT
+// JSOP_NEG
+
+class ICUnaryArith_Fallback : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICUnaryArith_Fallback(JitCode* stubCode)
+ : ICFallbackStub(UnaryArith_Fallback, stubCode)
+ {
+ extra_ = 0;
+ }
+
+ public:
+ static const uint32_t MAX_OPTIMIZED_STUBS = 8;
+
+ bool sawDoubleResult() {
+ return extra_;
+ }
+ void setSawDoubleResult() {
+ extra_ = 1;
+ }
+
+ // Compiler for this stub kind.
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx, Engine engine)
+ : ICStubCompiler(cx, ICStub::UnaryArith_Fallback, engine)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICUnaryArith_Fallback>(space, getStubCode());
+ }
+ };
+};
+
+class ICUnaryArith_Int32 : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICUnaryArith_Int32(JitCode* stubCode)
+ : ICStub(UnaryArith_Int32, stubCode)
+ {}
+
+ public:
+ class Compiler : public ICMultiStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, JSOp op, Engine engine)
+ : ICMultiStubCompiler(cx, ICStub::UnaryArith_Int32, op, engine)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICUnaryArith_Int32>(space, getStubCode());
+ }
+ };
+};
+
+class ICUnaryArith_Double : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICUnaryArith_Double(JitCode* stubCode)
+ : ICStub(UnaryArith_Double, stubCode)
+ {}
+
+ public:
+ class Compiler : public ICMultiStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, JSOp op, Engine engine)
+ : ICMultiStubCompiler(cx, ICStub::UnaryArith_Double, op, engine)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICUnaryArith_Double>(space, getStubCode());
+ }
+ };
+};
+
+// Compare
+// JSOP_LT
+// JSOP_LE
+// JSOP_GT
+// JSOP_GE
+// JSOP_EQ
+// JSOP_NE
+// JSOP_STRICTEQ
+// JSOP_STRICTNE
+
+class ICCompare_Fallback : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICCompare_Fallback(JitCode* stubCode)
+ : ICFallbackStub(ICStub::Compare_Fallback, stubCode) {}
+
+ public:
+ static const uint32_t MAX_OPTIMIZED_STUBS = 8;
+
+ static const size_t UNOPTIMIZABLE_ACCESS_BIT = 0;
+ void noteUnoptimizableAccess() {
+ extra_ |= (1u << UNOPTIMIZABLE_ACCESS_BIT);
+ }
+ bool hadUnoptimizableAccess() const {
+ return extra_ & (1u << UNOPTIMIZABLE_ACCESS_BIT);
+ }
+
+ // Compiler for this stub kind.
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx, Engine engine)
+ : ICStubCompiler(cx, ICStub::Compare_Fallback, engine) {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICCompare_Fallback>(space, getStubCode());
+ }
+ };
+};
+
+class ICCompare_Int32 : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICCompare_Int32(JitCode* stubCode)
+ : ICStub(ICStub::Compare_Int32, stubCode) {}
+
+ public:
+ // Compiler for this stub kind.
+ class Compiler : public ICMultiStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, JSOp op, Engine engine)
+ : ICMultiStubCompiler(cx, ICStub::Compare_Int32, op, engine) {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICCompare_Int32>(space, getStubCode());
+ }
+ };
+};
+
+class ICCompare_Double : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICCompare_Double(JitCode* stubCode)
+ : ICStub(ICStub::Compare_Double, stubCode)
+ {}
+
+ public:
+ class Compiler : public ICMultiStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, JSOp op, Engine engine)
+ : ICMultiStubCompiler(cx, ICStub::Compare_Double, op, engine)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICCompare_Double>(space, getStubCode());
+ }
+ };
+};
+
+class ICCompare_NumberWithUndefined : public ICStub
+{
+ friend class ICStubSpace;
+
+ ICCompare_NumberWithUndefined(JitCode* stubCode, bool lhsIsUndefined)
+ : ICStub(ICStub::Compare_NumberWithUndefined, stubCode)
+ {
+ extra_ = lhsIsUndefined;
+ }
+
+ public:
+ bool lhsIsUndefined() {
+ return extra_;
+ }
+
+ class Compiler : public ICMultiStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ bool lhsIsUndefined;
+
+ public:
+ Compiler(JSContext* cx, JSOp op, Engine engine, bool lhsIsUndefined)
+ : ICMultiStubCompiler(cx, ICStub::Compare_NumberWithUndefined, op, engine),
+ lhsIsUndefined(lhsIsUndefined)
+ {}
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(op) << 17) |
+ (static_cast<int32_t>(lhsIsUndefined) << 25);
+ }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICCompare_NumberWithUndefined>(space, getStubCode(),
+ lhsIsUndefined);
+ }
+ };
+};
+
+class ICCompare_String : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICCompare_String(JitCode* stubCode)
+ : ICStub(ICStub::Compare_String, stubCode)
+ {}
+
+ public:
+ class Compiler : public ICMultiStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, JSOp op, Engine engine)
+ : ICMultiStubCompiler(cx, ICStub::Compare_String, op, engine)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICCompare_String>(space, getStubCode());
+ }
+ };
+};
+
+class ICCompare_Boolean : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICCompare_Boolean(JitCode* stubCode)
+ : ICStub(ICStub::Compare_Boolean, stubCode)
+ {}
+
+ public:
+ class Compiler : public ICMultiStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, JSOp op, Engine engine)
+ : ICMultiStubCompiler(cx, ICStub::Compare_Boolean, op, engine)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICCompare_Boolean>(space, getStubCode());
+ }
+ };
+};
+
+class ICCompare_Object : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICCompare_Object(JitCode* stubCode)
+ : ICStub(ICStub::Compare_Object, stubCode)
+ {}
+
+ public:
+ class Compiler : public ICMultiStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, JSOp op, Engine engine)
+ : ICMultiStubCompiler(cx, ICStub::Compare_Object, op, engine)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICCompare_Object>(space, getStubCode());
+ }
+ };
+};
+
+class ICCompare_ObjectWithUndefined : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICCompare_ObjectWithUndefined(JitCode* stubCode)
+ : ICStub(ICStub::Compare_ObjectWithUndefined, stubCode)
+ {}
+
+ public:
+ class Compiler : public ICMultiStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ bool lhsIsUndefined;
+ bool compareWithNull;
+
+ public:
+ Compiler(JSContext* cx, JSOp op, Engine engine, bool lhsIsUndefined, bool compareWithNull)
+ : ICMultiStubCompiler(cx, ICStub::Compare_ObjectWithUndefined, op, engine),
+ lhsIsUndefined(lhsIsUndefined),
+ compareWithNull(compareWithNull)
+ {}
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(op) << 17) |
+ (static_cast<int32_t>(lhsIsUndefined) << 25) |
+ (static_cast<int32_t>(compareWithNull) << 26);
+ }
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICCompare_ObjectWithUndefined>(space, getStubCode());
+ }
+ };
+};
+
+class ICCompare_Int32WithBoolean : public ICStub
+{
+ friend class ICStubSpace;
+
+ ICCompare_Int32WithBoolean(JitCode* stubCode, bool lhsIsInt32)
+ : ICStub(ICStub::Compare_Int32WithBoolean, stubCode)
+ {
+ extra_ = lhsIsInt32;
+ }
+
+ public:
+ bool lhsIsInt32() const {
+ return extra_;
+ }
+
+ // Compiler for this stub kind.
+ class Compiler : public ICStubCompiler {
+ protected:
+ JSOp op_;
+ bool lhsIsInt32_;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(op_) << 17) |
+ (static_cast<int32_t>(lhsIsInt32_) << 25);
+ }
+
+ public:
+ Compiler(JSContext* cx, JSOp op, Engine engine, bool lhsIsInt32)
+ : ICStubCompiler(cx, ICStub::Compare_Int32WithBoolean, engine),
+ op_(op),
+ lhsIsInt32_(lhsIsInt32)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICCompare_Int32WithBoolean>(space, getStubCode(), lhsIsInt32_);
+ }
+ };
+};
+
+// Enum for stubs handling a combination of typed arrays and typed objects.
+enum TypedThingLayout {
+ Layout_TypedArray,
+ Layout_OutlineTypedObject,
+ Layout_InlineTypedObject
+};
+
+static inline TypedThingLayout
+GetTypedThingLayout(const Class* clasp)
+{
+ if (IsTypedArrayClass(clasp))
+ return Layout_TypedArray;
+ if (IsOutlineTypedObjectClass(clasp))
+ return Layout_OutlineTypedObject;
+ if (IsInlineTypedObjectClass(clasp))
+ return Layout_InlineTypedObject;
+ MOZ_CRASH("Bad object class");
+}
+
+bool
+IsPreliminaryObject(JSObject* obj);
+
+void
+StripPreliminaryObjectStubs(JSContext* cx, ICFallbackStub* stub);
+
+MOZ_MUST_USE bool
+EffectlesslyLookupProperty(JSContext* cx, HandleObject obj, HandleId name,
+ MutableHandleObject holder, MutableHandleShape shape,
+ bool* checkDOMProxy=nullptr,
+ DOMProxyShadowsResult* shadowsResult=nullptr,
+ bool* domProxyHasGeneration=nullptr);
+
+JSObject*
+GetDOMProxyProto(JSObject* obj);
+
+bool
+IsCacheableProtoChain(JSObject* obj, JSObject* holder, bool isDOMProxy=false);
+
+bool
+IsCacheableGetPropReadSlot(JSObject* obj, JSObject* holder, Shape* shape, bool isDOMProxy=false);
+
+void
+GetFixedOrDynamicSlotOffset(Shape* shape, bool* isFixed, uint32_t* offset);
+
+MOZ_MUST_USE bool
+IsCacheableGetPropCall(JSContext* cx, JSObject* obj, JSObject* holder, Shape* shape,
+ bool* isScripted, bool* isTemporarilyUnoptimizable, bool isDOMProxy=false);
+
+MOZ_MUST_USE bool
+UpdateExistingGetPropCallStubs(ICFallbackStub* fallbackStub,
+ ICStub::Kind kind,
+ HandleNativeObject holder,
+ HandleObject receiver,
+ HandleFunction getter);
+MOZ_MUST_USE bool
+CheckHasNoSuchProperty(JSContext* cx, JSObject* obj, PropertyName* name,
+ JSObject** lastProto = nullptr, size_t* protoChainDepthOut = nullptr);
+
+void
+GuardReceiverObject(MacroAssembler& masm, ReceiverGuard guard,
+ Register object, Register scratch,
+ size_t receiverGuardOffset, Label* failure);
+
+MOZ_MUST_USE bool
+GetProtoShapes(JSObject* obj, size_t protoChainDepth, MutableHandle<ShapeVector> shapes);
+
+void
+CheckDOMProxyExpandoDoesNotShadow(JSContext* cx, MacroAssembler& masm, Register object,
+ const Address& checkExpandoShapeAddr,
+ Address* expandoAndGenerationAddr,
+ Address* generationAddr,
+ Register scratch,
+ AllocatableGeneralRegisterSet& domProxyRegSet,
+ Label* checkFailed);
+
+void
+CheckForTypedObjectWithDetachedStorage(JSContext* cx, MacroAssembler& masm, Label* failure);
+
+MOZ_MUST_USE bool
+DoCallNativeGetter(JSContext* cx, HandleFunction callee, HandleObject obj,
+ MutableHandleValue result);
+
+void
+LoadTypedThingData(MacroAssembler& masm, TypedThingLayout layout, Register obj, Register result);
+
+class ICGetProp_Fallback : public ICMonitoredFallbackStub
+{
+ friend class ICStubSpace;
+
+ explicit ICGetProp_Fallback(JitCode* stubCode)
+ : ICMonitoredFallbackStub(ICStub::GetProp_Fallback, stubCode)
+ { }
+
+ public:
+ static const uint32_t MAX_OPTIMIZED_STUBS = 16;
+ static const size_t UNOPTIMIZABLE_ACCESS_BIT = 0;
+ static const size_t ACCESSED_GETTER_BIT = 1;
+
+ void noteUnoptimizableAccess() {
+ extra_ |= (1u << UNOPTIMIZABLE_ACCESS_BIT);
+ }
+ bool hadUnoptimizableAccess() const {
+ return extra_ & (1u << UNOPTIMIZABLE_ACCESS_BIT);
+ }
+
+ void noteAccessedGetter() {
+ extra_ |= (1u << ACCESSED_GETTER_BIT);
+ }
+ bool hasAccessedGetter() const {
+ return extra_ & (1u << ACCESSED_GETTER_BIT);
+ }
+
+ class Compiler : public ICStubCompiler {
+ public:
+ static const int32_t BASELINE_KEY =
+ (static_cast<int32_t>(Engine::Baseline)) |
+ (static_cast<int32_t>(ICStub::GetProp_Fallback) << 1);
+
+ protected:
+ uint32_t returnOffset_;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+ void postGenerateStubCode(MacroAssembler& masm, Handle<JitCode*> code);
+
+ public:
+ explicit Compiler(JSContext* cx, Engine engine)
+ : ICStubCompiler(cx, ICStub::GetProp_Fallback, engine)
+ { }
+
+ ICStub* getStub(ICStubSpace* space) {
+ ICGetProp_Fallback* stub = newStub<ICGetProp_Fallback>(space, getStubCode());
+ if (!stub || !stub->initMonitoringChain(cx, space, engine_))
+ return nullptr;
+ return stub;
+ }
+ };
+};
+
+// Stub for sites, which are too polymorphic (i.e. MAX_OPTIMIZED_STUBS was reached)
+class ICGetProp_Generic : public ICMonitoredStub
+{
+ friend class ICStubSpace;
+
+ protected:
+ explicit ICGetProp_Generic(JitCode* stubCode, ICStub* firstMonitorStub)
+ : ICMonitoredStub(ICStub::GetProp_Generic, stubCode, firstMonitorStub) {}
+
+ public:
+ static ICGetProp_Generic* Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICGetProp_Generic& other);
+
+ class Compiler : public ICStubCompiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+ ICStub* firstMonitorStub_;
+ public:
+ explicit Compiler(JSContext* cx, Engine engine, ICStub* firstMonitorStub)
+ : ICStubCompiler(cx, ICStub::GetProp_Generic, engine),
+ firstMonitorStub_(firstMonitorStub)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICGetProp_Generic>(space, getStubCode(), firstMonitorStub_);
+ }
+ };
+};
+
+// Stub for accessing a string's length.
+class ICGetProp_StringLength : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICGetProp_StringLength(JitCode* stubCode)
+ : ICStub(GetProp_StringLength, stubCode)
+ {}
+
+ public:
+ class Compiler : public ICStubCompiler {
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx, Engine engine)
+ : ICStubCompiler(cx, ICStub::GetProp_StringLength, engine)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICGetProp_StringLength>(space, getStubCode());
+ }
+ };
+};
+
+// Base class for native GetProp stubs.
+class ICGetPropNativeStub : public ICMonitoredStub
+{
+ // Object shape/group.
+ HeapReceiverGuard receiverGuard_;
+
+ // Fixed or dynamic slot offset.
+ uint32_t offset_;
+
+ protected:
+ ICGetPropNativeStub(ICStub::Kind kind, JitCode* stubCode, ICStub* firstMonitorStub,
+ ReceiverGuard guard, uint32_t offset);
+
+ public:
+ HeapReceiverGuard& receiverGuard() {
+ return receiverGuard_;
+ }
+ uint32_t offset() const {
+ return offset_;
+ }
+
+ void notePreliminaryObject() {
+ extra_ = 1;
+ }
+ bool hasPreliminaryObject() const {
+ return extra_;
+ }
+
+ static size_t offsetOfReceiverGuard() {
+ return offsetof(ICGetPropNativeStub, receiverGuard_);
+ }
+ static size_t offsetOfOffset() {
+ return offsetof(ICGetPropNativeStub, offset_);
+ }
+};
+
+class ICGetPropNativePrototypeStub : public ICGetPropNativeStub
+{
+ // Holder and its shape.
+ GCPtrObject holder_;
+ GCPtrShape holderShape_;
+
+ protected:
+ ICGetPropNativePrototypeStub(ICStub::Kind kind, JitCode* stubCode, ICStub* firstMonitorStub,
+ ReceiverGuard guard, uint32_t offset, JSObject* holder,
+ Shape* holderShape);
+
+ public:
+ GCPtrObject& holder() {
+ return holder_;
+ }
+ GCPtrShape& holderShape() {
+ return holderShape_;
+ }
+ static size_t offsetOfHolder() {
+ return offsetof(ICGetPropNativePrototypeStub, holder_);
+ }
+ static size_t offsetOfHolderShape() {
+ return offsetof(ICGetPropNativePrototypeStub, holderShape_);
+ }
+};
+
+// Stub for accessing a non-lexical global name. Semantically, it is really a
+// getprop: the name is either on the GlobalObject or its prototype chain. We
+// teleport to the object that has the name, but we also need to guard on the
+// shape of the global object.
+//
+// The receiver object is the global lexical scope.
+class ICGetName_Global : public ICGetPropNativePrototypeStub
+{
+ friend class ICStubSpace;
+
+ protected:
+ GCPtrShape globalShape_;
+
+ ICGetName_Global(JitCode* stubCode, ICStub* firstMonitorStub, ReceiverGuard guard,
+ uint32_t slot, JSObject* holder, Shape* holderShape, Shape* globalShape);
+
+ public:
+ static ICGetName_Global* Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICGetName_Global& other);
+
+ GCPtrShape& globalShape() {
+ return globalShape_;
+ }
+ static size_t offsetOfGlobalShape() {
+ return offsetof(ICGetName_Global, globalShape_);
+ }
+};
+
+// Compiler for native GetProp stubs.
+class ICGetPropNativeCompiler : public ICStubCompiler
+{
+ ICStub* firstMonitorStub_;
+ HandleObject obj_;
+ HandleObject holder_;
+ HandlePropertyName propName_;
+ bool isFixedSlot_;
+ uint32_t offset_;
+ bool inputDefinitelyObject_;
+
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ protected:
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(isFixedSlot_) << 17) |
+ (static_cast<int32_t>(inputDefinitelyObject_) << 18) |
+ (HeapReceiverGuard::keyBits(obj_) << 19);
+ }
+
+ public:
+ ICGetPropNativeCompiler(JSContext* cx, ICStub::Kind kind, ICStubCompiler::Engine engine,
+ ICStub* firstMonitorStub, HandleObject obj, HandleObject holder,
+ HandlePropertyName propName, bool isFixedSlot, uint32_t offset,
+ bool inputDefinitelyObject = false)
+ : ICStubCompiler(cx, kind, engine),
+ firstMonitorStub_(firstMonitorStub),
+ obj_(obj),
+ holder_(holder),
+ propName_(propName),
+ isFixedSlot_(isFixedSlot),
+ offset_(offset),
+ inputDefinitelyObject_(inputDefinitelyObject)
+ {}
+
+ ICGetPropNativeStub* getStub(ICStubSpace* space);
+};
+
+static uint32_t
+SimpleTypeDescrKey(SimpleTypeDescr* descr)
+{
+ if (descr->is<ScalarTypeDescr>())
+ return uint32_t(descr->as<ScalarTypeDescr>().type()) << 1;
+ return (uint32_t(descr->as<ReferenceTypeDescr>().type()) << 1) | 1;
+}
+
+inline bool
+SimpleTypeDescrKeyIsScalar(uint32_t key)
+{
+ return !(key & 1);
+}
+
+inline ScalarTypeDescr::Type
+ScalarTypeFromSimpleTypeDescrKey(uint32_t key)
+{
+ MOZ_ASSERT(SimpleTypeDescrKeyIsScalar(key));
+ return ScalarTypeDescr::Type(key >> 1);
+}
+
+inline ReferenceTypeDescr::Type
+ReferenceTypeFromSimpleTypeDescrKey(uint32_t key)
+{
+ MOZ_ASSERT(!SimpleTypeDescrKeyIsScalar(key));
+ return ReferenceTypeDescr::Type(key >> 1);
+}
+
+class ICGetPropCallGetter : public ICMonitoredStub
+{
+ friend class ICStubSpace;
+
+ protected:
+ // Shape/group of receiver object. Used for both own and proto getters.
+ // In the GetPropCallDOMProxyNative case, the receiver guard enforces
+ // the proxy handler, because Shape implies Class.
+ HeapReceiverGuard receiverGuard_;
+
+ // Holder and holder shape. For own getters, guarding on receiverGuard_ is
+ // sufficient, although Ion may use holder_ and holderShape_ even for own
+ // getters. In this case holderShape_ == receiverGuard_.shape_ (isOwnGetter
+ // below relies on this).
+ GCPtrObject holder_;
+
+ GCPtrShape holderShape_;
+
+ // Function to call.
+ GCPtrFunction getter_;
+
+ // PC offset of call
+ uint32_t pcOffset_;
+
+ ICGetPropCallGetter(Kind kind, JitCode* stubCode, ICStub* firstMonitorStub,
+ ReceiverGuard receiverGuard, JSObject* holder,
+ Shape* holderShape, JSFunction* getter, uint32_t pcOffset);
+
+ public:
+ GCPtrObject& holder() {
+ return holder_;
+ }
+ GCPtrShape& holderShape() {
+ return holderShape_;
+ }
+ GCPtrFunction& getter() {
+ return getter_;
+ }
+ HeapReceiverGuard& receiverGuard() {
+ return receiverGuard_;
+ }
+
+ bool isOwnGetter() const {
+ MOZ_ASSERT(holder_->isNative());
+ MOZ_ASSERT(holderShape_);
+ return receiverGuard_.shape() == holderShape_;
+ }
+
+ static size_t offsetOfHolder() {
+ return offsetof(ICGetPropCallGetter, holder_);
+ }
+ static size_t offsetOfHolderShape() {
+ return offsetof(ICGetPropCallGetter, holderShape_);
+ }
+ static size_t offsetOfGetter() {
+ return offsetof(ICGetPropCallGetter, getter_);
+ }
+ static size_t offsetOfPCOffset() {
+ return offsetof(ICGetPropCallGetter, pcOffset_);
+ }
+ static size_t offsetOfReceiverGuard() {
+ return offsetof(ICGetPropCallGetter, receiverGuard_);
+ }
+
+ class Compiler : public ICStubCompiler {
+ protected:
+ ICStub* firstMonitorStub_;
+ RootedObject receiver_;
+ RootedObject holder_;
+ RootedFunction getter_;
+ uint32_t pcOffset_;
+ const Class* outerClass_;
+
+ virtual int32_t getKey() const {
+ // ICGetPropCallNativeCompiler::getKey adds more bits to our
+ // return value, so be careful when making changes here.
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (HeapReceiverGuard::keyBits(receiver_) << 17) |
+ (static_cast<int32_t>(!!outerClass_) << 19) |
+ (static_cast<int32_t>(receiver_ != holder_) << 20);
+ }
+
+ public:
+ Compiler(JSContext* cx, ICStub::Kind kind, Engine engine, ICStub* firstMonitorStub,
+ HandleObject receiver, HandleObject holder, HandleFunction getter,
+ uint32_t pcOffset, const Class* outerClass)
+ : ICStubCompiler(cx, kind, engine),
+ firstMonitorStub_(firstMonitorStub),
+ receiver_(cx, receiver),
+ holder_(cx, holder),
+ getter_(cx, getter),
+ pcOffset_(pcOffset),
+ outerClass_(outerClass)
+ {
+ MOZ_ASSERT(kind == ICStub::GetProp_CallScripted ||
+ kind == ICStub::GetProp_CallNative ||
+ kind == ICStub::GetProp_CallNativeGlobal);
+ }
+ };
+};
+
+// Stub for calling a scripted getter on a native object when the getter is kept on the
+// proto-chain.
+class ICGetProp_CallScripted : public ICGetPropCallGetter
+{
+ friend class ICStubSpace;
+
+ protected:
+ ICGetProp_CallScripted(JitCode* stubCode, ICStub* firstMonitorStub,
+ ReceiverGuard receiverGuard,
+ JSObject* holder, Shape* holderShape,
+ JSFunction* getter, uint32_t pcOffset)
+ : ICGetPropCallGetter(GetProp_CallScripted, stubCode, firstMonitorStub,
+ receiverGuard, holder, holderShape, getter, pcOffset)
+ {}
+
+ public:
+ static ICGetProp_CallScripted* Clone(JSContext* cx, ICStubSpace* space,
+ ICStub* firstMonitorStub, ICGetProp_CallScripted& other);
+
+ class Compiler : public ICGetPropCallGetter::Compiler {
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, ICStub* firstMonitorStub, HandleObject obj,
+ HandleObject holder, HandleFunction getter, uint32_t pcOffset)
+ : ICGetPropCallGetter::Compiler(cx, ICStub::GetProp_CallScripted, Engine::Baseline,
+ firstMonitorStub, obj, holder,
+ getter, pcOffset, /* outerClass = */ nullptr)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ ReceiverGuard guard(receiver_);
+ Shape* holderShape = holder_->as<NativeObject>().lastProperty();
+ return newStub<ICGetProp_CallScripted>(space, getStubCode(), firstMonitorStub_,
+ guard, holder_, holderShape, getter_,
+ pcOffset_);
+ }
+ };
+};
+
+// Stub for calling a native getter on a native object.
+class ICGetProp_CallNative : public ICGetPropCallGetter
+{
+ friend class ICStubSpace;
+
+ protected:
+
+ ICGetProp_CallNative(JitCode* stubCode, ICStub* firstMonitorStub,
+ ReceiverGuard receiverGuard,
+ JSObject* holder, Shape* holderShape,
+ JSFunction* getter, uint32_t pcOffset)
+ : ICGetPropCallGetter(GetProp_CallNative, stubCode, firstMonitorStub,
+ receiverGuard, holder, holderShape, getter, pcOffset)
+ {}
+
+ public:
+ static ICGetProp_CallNative* Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICGetProp_CallNative& other);
+
+};
+
+// Stub for calling a native getter on the GlobalObject.
+class ICGetProp_CallNativeGlobal : public ICGetPropCallGetter
+{
+ friend class ICStubSpace;
+
+ protected:
+ GCPtrShape globalShape_;
+
+ ICGetProp_CallNativeGlobal(JitCode* stubCode, ICStub* firstMonitorStub,
+ ReceiverGuard receiverGuard,
+ JSObject* holder, Shape* holderShape, Shape* globalShape,
+ JSFunction* getter, uint32_t pcOffset)
+ : ICGetPropCallGetter(GetProp_CallNativeGlobal, stubCode, firstMonitorStub,
+ receiverGuard, holder, holderShape, getter, pcOffset),
+ globalShape_(globalShape)
+ { }
+
+ public:
+ static ICGetProp_CallNativeGlobal* Clone(JSContext* cx, ICStubSpace* space,
+ ICStub* firstMonitorStub,
+ ICGetProp_CallNativeGlobal& other);
+
+ GCPtrShape& globalShape() {
+ return globalShape_;
+ }
+ static size_t offsetOfGlobalShape() {
+ return offsetof(ICGetProp_CallNativeGlobal, globalShape_);
+ }
+};
+
+class ICGetPropCallNativeCompiler : public ICGetPropCallGetter::Compiler
+{
+ bool inputDefinitelyObject_;
+ protected:
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ int32_t baseKey = ICGetPropCallGetter::Compiler::getKey();
+ MOZ_ASSERT((baseKey >> 21) == 0);
+ return baseKey | (static_cast<int32_t>(inputDefinitelyObject_) << 21);
+ }
+
+ public:
+ ICGetPropCallNativeCompiler(JSContext* cx, ICStub::Kind kind, ICStubCompiler::Engine engine,
+ ICStub* firstMonitorStub, HandleObject receiver,
+ HandleObject holder, HandleFunction getter, uint32_t pcOffset,
+ const Class* outerClass, bool inputDefinitelyObject = false)
+ : ICGetPropCallGetter::Compiler(cx, kind, engine, firstMonitorStub, receiver, holder,
+ getter, pcOffset, outerClass),
+ inputDefinitelyObject_(inputDefinitelyObject)
+ {}
+
+ ICStub* getStub(ICStubSpace* space);
+};
+
+class ICGetPropCallDOMProxyNativeStub : public ICGetPropCallGetter
+{
+ friend class ICStubSpace;
+ protected:
+ // Object shape of expected expando object. (nullptr if no expando object should be there)
+ GCPtrShape expandoShape_;
+
+ ICGetPropCallDOMProxyNativeStub(ICStub::Kind kind, JitCode* stubCode,
+ ICStub* firstMonitorStub, Shape* shape,
+ Shape* expandoShape,
+ JSObject* holder, Shape* holderShape,
+ JSFunction* getter, uint32_t pcOffset);
+
+ public:
+ GCPtrShape& expandoShape() {
+ return expandoShape_;
+ }
+ static size_t offsetOfExpandoShape() {
+ return offsetof(ICGetPropCallDOMProxyNativeStub, expandoShape_);
+ }
+};
+
+class ICGetProp_CallDOMProxyNative : public ICGetPropCallDOMProxyNativeStub
+{
+ friend class ICStubSpace;
+ ICGetProp_CallDOMProxyNative(JitCode* stubCode, ICStub* firstMonitorStub, Shape* shape,
+ Shape* expandoShape,
+ JSObject* holder, Shape* holderShape,
+ JSFunction* getter, uint32_t pcOffset)
+ : ICGetPropCallDOMProxyNativeStub(ICStub::GetProp_CallDOMProxyNative, stubCode,
+ firstMonitorStub, shape, expandoShape,
+ holder, holderShape, getter, pcOffset)
+ {}
+
+ public:
+ static ICGetProp_CallDOMProxyNative* Clone(JSContext* cx,
+ ICStubSpace* space,
+ ICStub* firstMonitorStub,
+ ICGetProp_CallDOMProxyNative& other);
+};
+
+class ICGetProp_CallDOMProxyWithGenerationNative : public ICGetPropCallDOMProxyNativeStub
+{
+ protected:
+ ExpandoAndGeneration* expandoAndGeneration_;
+ uint64_t generation_;
+
+ public:
+ ICGetProp_CallDOMProxyWithGenerationNative(JitCode* stubCode, ICStub* firstMonitorStub,
+ Shape* shape,
+ ExpandoAndGeneration* expandoAndGeneration,
+ uint64_t generation, Shape* expandoShape,
+ JSObject* holder, Shape* holderShape,
+ JSFunction* getter, uint32_t pcOffset)
+ : ICGetPropCallDOMProxyNativeStub(ICStub::GetProp_CallDOMProxyWithGenerationNative,
+ stubCode, firstMonitorStub, shape,
+ expandoShape, holder, holderShape, getter, pcOffset),
+ expandoAndGeneration_(expandoAndGeneration),
+ generation_(generation)
+ {
+ }
+
+ static ICGetProp_CallDOMProxyWithGenerationNative*
+ Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+ ICGetProp_CallDOMProxyWithGenerationNative& other);
+
+ void* expandoAndGeneration() const {
+ return expandoAndGeneration_;
+ }
+ uint64_t generation() const {
+ return generation_;
+ }
+
+ void setGeneration(uint64_t value) {
+ generation_ = value;
+ }
+
+ static size_t offsetOfInternalStruct() {
+ return offsetof(ICGetProp_CallDOMProxyWithGenerationNative, expandoAndGeneration_);
+ }
+ static size_t offsetOfGeneration() {
+ return offsetof(ICGetProp_CallDOMProxyWithGenerationNative, generation_);
+ }
+};
+
+class ICGetPropCallDOMProxyNativeCompiler : public ICStubCompiler {
+ ICStub* firstMonitorStub_;
+ Rooted<ProxyObject*> proxy_;
+ RootedObject holder_;
+ RootedFunction getter_;
+ uint32_t pcOffset_;
+
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm, Address* internalStructAddr,
+ Address* generationAddr);
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ ICGetPropCallDOMProxyNativeCompiler(JSContext* cx, ICStub::Kind kind,
+ ICStubCompiler::Engine engine,
+ ICStub* firstMonitorStub, Handle<ProxyObject*> proxy,
+ HandleObject holder, HandleFunction getter,
+ uint32_t pcOffset);
+
+ ICStub* getStub(ICStubSpace* space);
+};
+
+class ICGetProp_DOMProxyShadowed : public ICMonitoredStub
+{
+ friend class ICStubSpace;
+ protected:
+ GCPtrShape shape_;
+ const BaseProxyHandler* proxyHandler_;
+ GCPtrPropertyName name_;
+ uint32_t pcOffset_;
+
+ ICGetProp_DOMProxyShadowed(JitCode* stubCode, ICStub* firstMonitorStub, Shape* shape,
+ const BaseProxyHandler* proxyHandler, PropertyName* name,
+ uint32_t pcOffset);
+
+ public:
+ static ICGetProp_DOMProxyShadowed* Clone(JSContext* cx, ICStubSpace* space,
+ ICStub* firstMonitorStub,
+ ICGetProp_DOMProxyShadowed& other);
+
+ GCPtrShape& shape() {
+ return shape_;
+ }
+ GCPtrPropertyName& name() {
+ return name_;
+ }
+
+ static size_t offsetOfShape() {
+ return offsetof(ICGetProp_DOMProxyShadowed, shape_);
+ }
+ static size_t offsetOfProxyHandler() {
+ return offsetof(ICGetProp_DOMProxyShadowed, proxyHandler_);
+ }
+ static size_t offsetOfName() {
+ return offsetof(ICGetProp_DOMProxyShadowed, name_);
+ }
+ static size_t offsetOfPCOffset() {
+ return offsetof(ICGetProp_DOMProxyShadowed, pcOffset_);
+ }
+
+ class Compiler : public ICStubCompiler {
+ ICStub* firstMonitorStub_;
+ Rooted<ProxyObject*> proxy_;
+ RootedPropertyName name_;
+ uint32_t pcOffset_;
+
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, Engine engine, ICStub* firstMonitorStub, Handle<ProxyObject*> proxy,
+ HandlePropertyName name, uint32_t pcOffset)
+ : ICStubCompiler(cx, ICStub::GetProp_CallNative, engine),
+ firstMonitorStub_(firstMonitorStub),
+ proxy_(cx, proxy),
+ name_(cx, name),
+ pcOffset_(pcOffset)
+ {}
+
+ ICStub* getStub(ICStubSpace* space);
+ };
+};
+
+class ICGetProp_ArgumentsLength : public ICStub
+{
+ friend class ICStubSpace;
+ public:
+ enum Which { Magic };
+
+ protected:
+ explicit ICGetProp_ArgumentsLength(JitCode* stubCode)
+ : ICStub(ICStub::GetProp_ArgumentsLength, stubCode)
+ { }
+
+ public:
+ class Compiler : public ICStubCompiler {
+ protected:
+ Which which_;
+
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ virtual int32_t getKey() const {
+ return static_cast<int32_t>(engine_) |
+ (static_cast<int32_t>(kind) << 1) |
+ (static_cast<int32_t>(which_) << 17);
+ }
+
+ public:
+ Compiler(JSContext* cx, Engine engine, Which which)
+ : ICStubCompiler(cx, ICStub::GetProp_ArgumentsLength, engine),
+ which_(which)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICGetProp_ArgumentsLength>(space, getStubCode());
+ }
+ };
+};
+
+class ICGetProp_ArgumentsCallee : public ICMonitoredStub
+{
+ friend class ICStubSpace;
+
+ protected:
+ ICGetProp_ArgumentsCallee(JitCode* stubCode, ICStub* firstMonitorStub);
+
+ public:
+ class Compiler : public ICStubCompiler {
+ protected:
+ ICStub* firstMonitorStub_;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, Engine engine, ICStub* firstMonitorStub)
+ : ICStubCompiler(cx, ICStub::GetProp_ArgumentsCallee, engine),
+ firstMonitorStub_(firstMonitorStub)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICGetProp_ArgumentsCallee>(space, getStubCode(), firstMonitorStub_);
+ }
+ };
+};
+
+// JSOP_NEWARRAY
+// JSOP_NEWINIT
+
+class ICNewArray_Fallback : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ GCPtrObject templateObject_;
+
+ // The group used for objects created here is always available, even if the
+ // template object itself is not.
+ GCPtrObjectGroup templateGroup_;
+
+ ICNewArray_Fallback(JitCode* stubCode, ObjectGroup* templateGroup)
+ : ICFallbackStub(ICStub::NewArray_Fallback, stubCode),
+ templateObject_(nullptr), templateGroup_(templateGroup)
+ {}
+
+ public:
+ class Compiler : public ICStubCompiler {
+ RootedObjectGroup templateGroup;
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ Compiler(JSContext* cx, ObjectGroup* templateGroup, Engine engine)
+ : ICStubCompiler(cx, ICStub::NewArray_Fallback, engine),
+ templateGroup(cx, templateGroup)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICNewArray_Fallback>(space, getStubCode(), templateGroup);
+ }
+ };
+
+ GCPtrObject& templateObject() {
+ return templateObject_;
+ }
+
+ void setTemplateObject(JSObject* obj) {
+ MOZ_ASSERT(obj->group() == templateGroup());
+ templateObject_ = obj;
+ }
+
+ GCPtrObjectGroup& templateGroup() {
+ return templateGroup_;
+ }
+
+ void setTemplateGroup(ObjectGroup* group) {
+ templateObject_ = nullptr;
+ templateGroup_ = group;
+ }
+};
+
+// JSOP_NEWOBJECT
+
+class ICNewObject_Fallback : public ICFallbackStub
+{
+ friend class ICStubSpace;
+
+ GCPtrObject templateObject_;
+
+ explicit ICNewObject_Fallback(JitCode* stubCode)
+ : ICFallbackStub(ICStub::NewObject_Fallback, stubCode), templateObject_(nullptr)
+ {}
+
+ public:
+ class Compiler : public ICStubCompiler {
+ MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
+
+ public:
+ explicit Compiler(JSContext* cx, Engine engine)
+ : ICStubCompiler(cx, ICStub::NewObject_Fallback, engine)
+ {}
+
+ ICStub* getStub(ICStubSpace* space) {
+ return newStub<ICNewObject_Fallback>(space, getStubCode());
+ }
+ };
+
+ GCPtrObject& templateObject() {
+ return templateObject_;
+ }
+
+ void setTemplateObject(JSObject* obj) {
+ templateObject_ = obj;
+ }
+};
+
+class ICNewObject_WithTemplate : public ICStub
+{
+ friend class ICStubSpace;
+
+ explicit ICNewObject_WithTemplate(JitCode* stubCode)
+ : ICStub(ICStub::NewObject_WithTemplate, stubCode)
+ {}
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_SharedIC_h */
diff --git a/js/src/jit/SharedICHelpers.h b/js/src/jit/SharedICHelpers.h
new file mode 100644
index 000000000..d569f4948
--- /dev/null
+++ b/js/src/jit/SharedICHelpers.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_SharedICHelpers_h
+#define jit_SharedICHelpers_h
+
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/SharedICHelpers-x86.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/SharedICHelpers-x64.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/SharedICHelpers-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/SharedICHelpers-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+#include "jit/mips-shared/SharedICHelpers-mips-shared.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/SharedICHelpers-none.h"
+#else
+# error "Unknown architecture!"
+#endif
+
+namespace js {
+namespace jit {
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_SharedICHelpers_h */
diff --git a/js/src/jit/SharedICList.h b/js/src/jit/SharedICList.h
new file mode 100644
index 000000000..08e4e2705
--- /dev/null
+++ b/js/src/jit/SharedICList.h
@@ -0,0 +1,55 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_SharedICList_h
+#define jit_SharedICList_h
+
+namespace js {
+namespace jit {
+
+// List of IC stub kinds that can run in Baseline and in IonMonkey
+#define IC_SHARED_STUB_KIND_LIST(_) \
+ _(BinaryArith_Fallback) \
+ _(BinaryArith_Int32) \
+ _(BinaryArith_Double) \
+ _(BinaryArith_StringConcat) \
+ _(BinaryArith_StringObjectConcat) \
+ _(BinaryArith_BooleanWithInt32) \
+ _(BinaryArith_DoubleWithInt32) \
+ \
+ _(UnaryArith_Fallback) \
+ _(UnaryArith_Int32) \
+ _(UnaryArith_Double) \
+ \
+ _(Compare_Fallback) \
+ _(Compare_Int32) \
+ _(Compare_Double) \
+ _(Compare_NumberWithUndefined) \
+ _(Compare_String) \
+ _(Compare_Boolean) \
+ _(Compare_Object) \
+ _(Compare_ObjectWithUndefined) \
+ _(Compare_Int32WithBoolean) \
+ \
+ _(GetProp_Fallback) \
+ _(GetProp_StringLength) \
+ _(GetProp_CallScripted) \
+ _(GetProp_CallNative) \
+ _(GetProp_CallNativeGlobal) \
+ _(GetProp_CallDOMProxyNative) \
+ _(GetProp_CallDOMProxyWithGenerationNative) \
+ _(GetProp_DOMProxyShadowed) \
+ _(GetProp_ArgumentsLength) \
+ _(GetProp_ArgumentsCallee) \
+ _(GetProp_Generic) \
+ \
+ _(CacheIR_Monitored) \
+ \
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_SharedICList_h */
diff --git a/js/src/jit/SharedICRegisters.h b/js/src/jit/SharedICRegisters.h
new file mode 100644
index 000000000..9923385ff
--- /dev/null
+++ b/js/src/jit/SharedICRegisters.h
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_SharedICRegisters_h
+#define jit_SharedICRegisters_h
+
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/SharedICRegisters-x86.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/SharedICRegisters-x64.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/SharedICRegisters-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/SharedICRegisters-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/SharedICRegisters-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/SharedICRegisters-mips64.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/SharedICRegisters-none.h"
+#else
+# error "Unknown architecture!"
+#endif
+
+namespace js {
+namespace jit {
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_SharedICRegisters_h */
diff --git a/js/src/jit/Sink.cpp b/js/src/jit/Sink.cpp
new file mode 100644
index 000000000..b2c36fae5
--- /dev/null
+++ b/js/src/jit/Sink.cpp
@@ -0,0 +1,232 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Sink.h"
+
+#include "mozilla/Vector.h"
+
+#include "jit/IonAnalysis.h"
+#include "jit/JitSpewer.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+
+namespace js {
+namespace jit {
+
+// Given the last found common dominator and a new definition to dominate, the
+// CommonDominator function returns the basic block which dominate the last
+// common dominator and the definition. If no such block exists, then this
+// functions return null.
+static MBasicBlock*
+CommonDominator(MBasicBlock* commonDominator, MBasicBlock* defBlock)
+{
+ // This is the first instruction visited, record its basic block as being
+ // the only interesting one.
+ if (!commonDominator)
+ return defBlock;
+
+ // Iterate on immediate dominators of the known common dominator to find a
+ // block which dominates all previous uses as well as this instruction.
+ while (!commonDominator->dominates(defBlock)) {
+ MBasicBlock* nextBlock = commonDominator->immediateDominator();
+ // All uses are dominated, so, this cannot happen unless the graph
+ // coherency is not respected.
+ MOZ_ASSERT(commonDominator != nextBlock);
+ commonDominator = nextBlock;
+ }
+
+ return commonDominator;
+}
+
+bool
+Sink(MIRGenerator* mir, MIRGraph& graph)
+{
+ TempAllocator& alloc = graph.alloc();
+ bool sinkEnabled = mir->optimizationInfo().sinkEnabled();
+
+ for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
+ if (mir->shouldCancel("Sink"))
+ return false;
+
+ for (MInstructionReverseIterator iter = block->rbegin(); iter != block->rend(); ) {
+ MInstruction* ins = *iter++;
+
+ // Only instructions which can be recovered on bailout can be moved
+ // into the bailout paths.
+ if (ins->isGuard() || ins->isGuardRangeBailouts() ||
+ ins->isRecoveredOnBailout() || !ins->canRecoverOnBailout())
+ {
+ continue;
+ }
+
+ // Compute a common dominator for all uses of the current
+ // instruction.
+ bool hasLiveUses = false;
+ bool hasUses = false;
+ MBasicBlock* usesDominator = nullptr;
+ for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e; i++) {
+ hasUses = true;
+ MNode* consumerNode = (*i)->consumer();
+ if (consumerNode->isResumePoint())
+ continue;
+
+ MDefinition* consumer = consumerNode->toDefinition();
+ if (consumer->isRecoveredOnBailout())
+ continue;
+
+ hasLiveUses = true;
+
+ // If the instruction is a Phi, then we should dominate the
+ // predecessor from which the value is coming from.
+ MBasicBlock* consumerBlock = consumer->block();
+ if (consumer->isPhi())
+ consumerBlock = consumerBlock->getPredecessor(consumer->indexOf(*i));
+
+ usesDominator = CommonDominator(usesDominator, consumerBlock);
+ if (usesDominator == *block)
+ break;
+ }
+
+ // Leave this instruction for DCE.
+ if (!hasUses)
+ continue;
+
+ // We have no uses, so sink this instruction in all the bailout
+ // paths.
+ if (!hasLiveUses) {
+ MOZ_ASSERT(!usesDominator);
+ ins->setRecoveredOnBailout();
+ JitSpewDef(JitSpew_Sink, " No live uses, recover the instruction on bailout\n", ins);
+ continue;
+ }
+
+ // This guard is temporarly moved here as the above code deals with
+ // Dead Code elimination, which got moved into this Sink phase, as
+ // the Dead Code elimination used to move instructions with no-live
+ // uses to the bailout path.
+ if (!sinkEnabled)
+ continue;
+
+ // To move an effectful instruction, we would have to verify that the
+ // side-effect is not observed. In the mean time, we just inhibit
+ // this optimization on effectful instructions.
+ if (ins->isEffectful())
+ continue;
+
+ // If all the uses are under a loop, we might not want to work
+ // against LICM by moving everything back into the loop, but if the
+ // loop is it-self inside an if, then we still want to move the
+ // computation under this if statement.
+ while (block->loopDepth() < usesDominator->loopDepth()) {
+ MOZ_ASSERT(usesDominator != usesDominator->immediateDominator());
+ usesDominator = usesDominator->immediateDominator();
+ }
+
+ // Only move instructions if there is a branch between the dominator
+ // of the uses and the original instruction. This prevent moving the
+ // computation of the arguments into an inline function if there is
+ // no major win.
+ MBasicBlock* lastJoin = usesDominator;
+ while (*block != lastJoin && lastJoin->numPredecessors() == 1) {
+ MOZ_ASSERT(lastJoin != lastJoin->immediateDominator());
+ MBasicBlock* next = lastJoin->immediateDominator();
+ if (next->numSuccessors() > 1)
+ break;
+ lastJoin = next;
+ }
+ if (*block == lastJoin)
+ continue;
+
+ // Skip to the next instruction if we cannot find a common dominator
+ // for all the uses of this instruction, or if the common dominator
+ // correspond to the block of the current instruction.
+ if (!usesDominator || usesDominator == *block)
+ continue;
+
+ // Only instruction which can be recovered on bailout and which are
+ // sinkable can be moved into blocks which are below while filling
+ // the resume points with a clone which is recovered on bailout.
+
+ // If the instruction has live uses and if it is clonable, then we
+ // can clone the instruction for all non-dominated uses and move the
+ // instruction into the block which is dominating all live uses.
+ if (!ins->canClone())
+ continue;
+
+ // If the block is a split-edge block, which is created for folding
+ // test conditions, then the block has no resume point and has
+ // multiple predecessors. In such case, we cannot safely move
+ // bailing instruction to these blocks as we have no way to bailout.
+ if (!usesDominator->entryResumePoint() && usesDominator->numPredecessors() != 1)
+ continue;
+
+ JitSpewDef(JitSpew_Sink, " Can Clone & Recover, sink instruction\n", ins);
+ JitSpew(JitSpew_Sink, " into Block %u", usesDominator->id());
+
+ // Copy the arguments and clone the instruction.
+ MDefinitionVector operands(alloc);
+ for (size_t i = 0, end = ins->numOperands(); i < end; i++) {
+ if (!operands.append(ins->getOperand(i)))
+ return false;
+ }
+
+ MInstruction* clone = ins->clone(alloc, operands);
+ ins->block()->insertBefore(ins, clone);
+ clone->setRecoveredOnBailout();
+
+ // We should not update the producer of the entry resume point, as
+ // it cannot refer to any instruction within the basic block excepts
+ // for Phi nodes.
+ MResumePoint* entry = usesDominator->entryResumePoint();
+
+ // Replace the instruction by its clone in all the resume points /
+ // recovered-on-bailout instructions which are not in blocks which
+ // are dominated by the usesDominator block.
+ for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e; ) {
+ MUse* use = *i++;
+ MNode* consumer = use->consumer();
+
+ // If the consumer is a Phi, then we look for the index of the
+ // use to find the corresponding predecessor block, which is
+ // then used as the consumer block.
+ MBasicBlock* consumerBlock = consumer->block();
+ if (consumer->isDefinition() && consumer->toDefinition()->isPhi()) {
+ consumerBlock = consumerBlock->getPredecessor(
+ consumer->toDefinition()->toPhi()->indexOf(use));
+ }
+
+ // Keep the current instruction for all dominated uses, except
+ // for the entry resume point of the block in which the
+ // instruction would be moved into.
+ if (usesDominator->dominates(consumerBlock) &&
+ (!consumer->isResumePoint() || consumer->toResumePoint() != entry))
+ {
+ continue;
+ }
+
+ use->replaceProducer(clone);
+ }
+
+ // As we move this instruction in a different block, we should
+ // verify that we do not carry over a resume point which would refer
+ // to an outdated state of the control flow.
+ if (ins->resumePoint())
+ ins->clearResumePoint();
+
+ // Now, that all uses which are not dominated by usesDominator are
+ // using the cloned instruction, we can safely move the instruction
+ // into the usesDominator block.
+ MInstruction* at = usesDominator->safeInsertTop(nullptr, MBasicBlock::IgnoreRecover);
+ block->moveBefore(at, ins);
+ }
+ }
+
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/Sink.h b/js/src/jit/Sink.h
new file mode 100644
index 000000000..ff32d617c
--- /dev/null
+++ b/js/src/jit/Sink.h
@@ -0,0 +1,25 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// This file declares sink transformation.
+#ifndef jit_Sink_h
+#define jit_Sink_h
+
+#include "mozilla/Attributes.h"
+
+namespace js {
+namespace jit {
+
+class MIRGenerator;
+class MIRGraph;
+
+MOZ_MUST_USE bool
+Sink(MIRGenerator* mir, MIRGraph& graph);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Sink_h */
diff --git a/js/src/jit/Snapshots.cpp b/js/src/jit/Snapshots.cpp
new file mode 100644
index 000000000..9923e41fc
--- /dev/null
+++ b/js/src/jit/Snapshots.cpp
@@ -0,0 +1,731 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Snapshots.h"
+
+#include "jsscript.h"
+
+#include "jit/CompileInfo.h"
+#include "jit/JitSpewer.h"
+#ifdef TRACK_SNAPSHOTS
+# include "jit/LIR.h"
+#endif
+#include "jit/MIR.h"
+#include "jit/Recover.h"
+
+#include "vm/Printer.h"
+
+using namespace js;
+using namespace js::jit;
+
+// Encodings:
+// [ptr] A fixed-size pointer.
+// [vwu] A variable-width unsigned integer.
+// [vws] A variable-width signed integer.
+// [u8] An 8-bit unsigned integer.
+// [u8'] An 8-bit unsigned integer which is potentially extended with packed
+// data.
+// [u8"] Packed data which is stored and packed in the previous [u8'].
+// [vwu*] A list of variable-width unsigned integers.
+// [pld] Payload of Recover Value Allocation:
+// PAYLOAD_NONE:
+// There is no payload.
+//
+// PAYLOAD_INDEX:
+// [vwu] Index, such as the constant pool index.
+//
+// PAYLOAD_STACK_OFFSET:
+// [vws] Stack offset based on the base of the Ion frame.
+//
+// PAYLOAD_GPR:
+// [u8] Code of the general register.
+//
+// PAYLOAD_FPU:
+// [u8] Code of the FPU register.
+//
+// PAYLOAD_PACKED_TAG:
+// [u8"] Bits 5-7: JSValueType is encoded on the low bits of the Mode
+// of the RValueAllocation.
+//
+// Snapshot header:
+//
+// [vwu] bits ((n+1)-31]: recover instruction offset
+// bits [0,n): bailout kind (n = SNAPSHOT_BAILOUTKIND_BITS)
+//
+// Snapshot body, repeated "frame count" times, from oldest frame to newest frame.
+// Note that the first frame doesn't have the "parent PC" field.
+//
+// [ptr] Debug only: JSScript*
+// [vwu] pc offset
+// [vwu] # of RVA's indexes, including nargs
+// [vwu*] List of indexes to R(ecover)ValueAllocation table. Contains
+// nargs + nfixed + stackDepth items.
+//
+// Recover value allocations are encoded at the end of the Snapshot buffer, and
+// they are padded on ALLOCATION_TABLE_ALIGNMENT. The encoding of each
+// allocation is determined by the RValueAllocation::Layout, which can be
+// obtained from the RValueAllocation::Mode with layoutFromMode function. The
+// layout structure list the type of payload which are used to serialized /
+// deserialized / dumped the content of the allocations.
+//
+// R(ecover)ValueAllocation items:
+// [u8'] Mode, which defines the type of the payload as well as the
+// interpretation.
+// [pld] first payload (packed tag, index, stack offset, register, ...)
+// [pld] second payload (register, stack offset, none)
+//
+// Modes:
+// CONSTANT [INDEX]
+// Index into the constant pool.
+//
+// CST_UNDEFINED []
+// Constant value which correspond to the "undefined" JS value.
+//
+// CST_NULL []
+// Constant value which correspond to the "null" JS value.
+//
+// DOUBLE_REG [FPU_REG]
+// Double value stored in a FPU register.
+//
+// ANY_FLOAT_REG [FPU_REG]
+// Any Float value (float32, simd) stored in a FPU register.
+//
+// ANY_FLOAT_STACK [STACK_OFFSET]
+// Any Float value (float32, simd) stored on the stack.
+//
+// UNTYPED_REG [GPR_REG]
+// UNTYPED_STACK [STACK_OFFSET]
+// UNTYPED_REG_REG [GPR_REG, GPR_REG]
+// UNTYPED_REG_STACK [GPR_REG, STACK_OFFSET]
+// UNTYPED_STACK_REG [STACK_OFFSET, GPR_REG]
+// UNTYPED_STACK_STACK [STACK_OFFSET, STACK_OFFSET]
+// Value with dynamically known type. On 32 bits architecture, the
+// first register/stack-offset correspond to the holder of the type,
+// and the second correspond to the payload of the JS Value.
+//
+// RECOVER_INSTRUCTION [INDEX]
+// Index into the list of recovered instruction results.
+//
+// RI_WITH_DEFAULT_CST [INDEX] [INDEX]
+// The first payload is the index into the list of recovered
+// instruction results. The second payload is the index in the
+// constant pool.
+//
+// TYPED_REG [PACKED_TAG, GPR_REG]:
+// Value with statically known type, which payload is stored in a
+// register.
+//
+// TYPED_STACK [PACKED_TAG, STACK_OFFSET]:
+// Value with statically known type, which payload is stored at an
+// offset on the stack.
+//
+
+const RValueAllocation::Layout&
+RValueAllocation::layoutFromMode(Mode mode)
+{
+ switch (mode) {
+ case CONSTANT: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_INDEX,
+ PAYLOAD_NONE,
+ "constant"
+ };
+ return layout;
+ }
+
+ case CST_UNDEFINED: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_NONE,
+ PAYLOAD_NONE,
+ "undefined"
+ };
+ return layout;
+ }
+
+ case CST_NULL: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_NONE,
+ PAYLOAD_NONE,
+ "null"
+ };
+ return layout;
+ }
+
+ case DOUBLE_REG: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_FPU,
+ PAYLOAD_NONE,
+ "double"
+ };
+ return layout;
+ }
+ case ANY_FLOAT_REG: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_FPU,
+ PAYLOAD_NONE,
+ "float register content"
+ };
+ return layout;
+ }
+ case ANY_FLOAT_STACK: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_STACK_OFFSET,
+ PAYLOAD_NONE,
+ "float register content"
+ };
+ return layout;
+ }
+#if defined(JS_NUNBOX32)
+ case UNTYPED_REG_REG: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_GPR,
+ PAYLOAD_GPR,
+ "value"
+ };
+ return layout;
+ }
+ case UNTYPED_REG_STACK: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_GPR,
+ PAYLOAD_STACK_OFFSET,
+ "value"
+ };
+ return layout;
+ }
+ case UNTYPED_STACK_REG: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_STACK_OFFSET,
+ PAYLOAD_GPR,
+ "value"
+ };
+ return layout;
+ }
+ case UNTYPED_STACK_STACK: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_STACK_OFFSET,
+ PAYLOAD_STACK_OFFSET,
+ "value"
+ };
+ return layout;
+ }
+#elif defined(JS_PUNBOX64)
+ case UNTYPED_REG: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_GPR,
+ PAYLOAD_NONE,
+ "value"
+ };
+ return layout;
+ }
+ case UNTYPED_STACK: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_STACK_OFFSET,
+ PAYLOAD_NONE,
+ "value"
+ };
+ return layout;
+ }
+#endif
+ case RECOVER_INSTRUCTION: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_INDEX,
+ PAYLOAD_NONE,
+ "instruction"
+ };
+ return layout;
+ }
+ case RI_WITH_DEFAULT_CST: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_INDEX,
+ PAYLOAD_INDEX,
+ "instruction with default"
+ };
+ return layout;
+ }
+
+ default: {
+ static const RValueAllocation::Layout regLayout = {
+ PAYLOAD_PACKED_TAG,
+ PAYLOAD_GPR,
+ "typed value"
+ };
+
+ static const RValueAllocation::Layout stackLayout = {
+ PAYLOAD_PACKED_TAG,
+ PAYLOAD_STACK_OFFSET,
+ "typed value"
+ };
+
+ if (mode >= TYPED_REG_MIN && mode <= TYPED_REG_MAX)
+ return regLayout;
+ if (mode >= TYPED_STACK_MIN && mode <= TYPED_STACK_MAX)
+ return stackLayout;
+ }
+ }
+
+ MOZ_CRASH("Wrong mode type?");
+}
+
+// Pad serialized RValueAllocations by a multiple of X bytes in the allocation
+// buffer. By padding serialized value allocations, we are building an
+// indexable table of elements of X bytes, and thus we can safely divide any
+// offset within the buffer by X to obtain an index.
+//
+// By padding, we are loosing space within the allocation buffer, but we
+// multiple by X the number of indexes that we can store on one byte in each
+// snapshots.
+//
+// Some value allocations are taking more than X bytes to be encoded, in which
+// case we will pad to a multiple of X, and we are wasting indexes. The choice
+// of X should be balanced between the wasted padding of serialized value
+// allocation, and the saving made in snapshot indexes.
+static const size_t ALLOCATION_TABLE_ALIGNMENT = 2; /* bytes */
+
+void
+RValueAllocation::readPayload(CompactBufferReader& reader, PayloadType type,
+ uint8_t* mode, Payload* p)
+{
+ switch (type) {
+ case PAYLOAD_NONE:
+ break;
+ case PAYLOAD_INDEX:
+ p->index = reader.readUnsigned();
+ break;
+ case PAYLOAD_STACK_OFFSET:
+ p->stackOffset = reader.readSigned();
+ break;
+ case PAYLOAD_GPR:
+ p->gpr = Register::FromCode(reader.readByte());
+ break;
+ case PAYLOAD_FPU:
+ p->fpu.data = reader.readByte();
+ break;
+ case PAYLOAD_PACKED_TAG:
+ p->type = JSValueType(*mode & PACKED_TAG_MASK);
+ *mode = *mode & ~PACKED_TAG_MASK;
+ break;
+ }
+}
+
+RValueAllocation
+RValueAllocation::read(CompactBufferReader& reader)
+{
+ uint8_t mode = reader.readByte();
+ const Layout& layout = layoutFromMode(Mode(mode & MODE_BITS_MASK));
+ Payload arg1, arg2;
+
+ readPayload(reader, layout.type1, &mode, &arg1);
+ readPayload(reader, layout.type2, &mode, &arg2);
+ return RValueAllocation(Mode(mode), arg1, arg2);
+}
+
+void
+RValueAllocation::writePayload(CompactBufferWriter& writer, PayloadType type, Payload p)
+{
+ switch (type) {
+ case PAYLOAD_NONE:
+ break;
+ case PAYLOAD_INDEX:
+ writer.writeUnsigned(p.index);
+ break;
+ case PAYLOAD_STACK_OFFSET:
+ writer.writeSigned(p.stackOffset);
+ break;
+ case PAYLOAD_GPR:
+ static_assert(Registers::Total <= 0x100,
+ "Not enough bytes to encode all registers.");
+ writer.writeByte(p.gpr.code());
+ break;
+ case PAYLOAD_FPU:
+ static_assert(FloatRegisters::Total <= 0x100,
+ "Not enough bytes to encode all float registers.");
+ writer.writeByte(p.fpu.code());
+ break;
+ case PAYLOAD_PACKED_TAG: {
+ // This code assumes that the PACKED_TAG payload is following the
+ // writeByte of the mode.
+ if (!writer.oom()) {
+ MOZ_ASSERT(writer.length());
+ uint8_t* mode = writer.buffer() + (writer.length() - 1);
+ MOZ_ASSERT((*mode & PACKED_TAG_MASK) == 0 && (p.type & ~PACKED_TAG_MASK) == 0);
+ *mode = *mode | p.type;
+ }
+ break;
+ }
+ }
+}
+
+void
+RValueAllocation::writePadding(CompactBufferWriter& writer)
+{
+ // Write 0x7f in all padding bytes.
+ while (writer.length() % ALLOCATION_TABLE_ALIGNMENT)
+ writer.writeByte(0x7f);
+}
+
+void
+RValueAllocation::write(CompactBufferWriter& writer) const
+{
+ const Layout& layout = layoutFromMode(mode());
+ MOZ_ASSERT(layout.type2 != PAYLOAD_PACKED_TAG);
+ MOZ_ASSERT(writer.length() % ALLOCATION_TABLE_ALIGNMENT == 0);
+
+ writer.writeByte(mode_);
+ writePayload(writer, layout.type1, arg1_);
+ writePayload(writer, layout.type2, arg2_);
+ writePadding(writer);
+}
+
+HashNumber
+RValueAllocation::hash() const {
+ CompactBufferWriter writer;
+ write(writer);
+
+ // We should never oom because the compact buffer writer has 32 inlined
+ // bytes, and in the worse case scenario, only encode 12 bytes
+ // (12 == mode + signed + signed + pad).
+ MOZ_ASSERT(!writer.oom());
+ MOZ_ASSERT(writer.length() <= 12);
+
+ HashNumber res = 0;
+ for (size_t i = 0; i < writer.length(); i++) {
+ res = ((res << 8) | (res >> (sizeof(res) - 1)));
+ res ^= writer.buffer()[i];
+ }
+ return res;
+}
+
+static const char*
+ValTypeToString(JSValueType type)
+{
+ switch (type) {
+ case JSVAL_TYPE_INT32:
+ return "int32_t";
+ case JSVAL_TYPE_DOUBLE:
+ return "double";
+ case JSVAL_TYPE_STRING:
+ return "string";
+ case JSVAL_TYPE_SYMBOL:
+ return "symbol";
+ case JSVAL_TYPE_BOOLEAN:
+ return "boolean";
+ case JSVAL_TYPE_OBJECT:
+ return "object";
+ case JSVAL_TYPE_MAGIC:
+ return "magic";
+ default:
+ MOZ_CRASH("no payload");
+ }
+}
+
+void
+RValueAllocation::dumpPayload(GenericPrinter& out, PayloadType type, Payload p)
+{
+ switch (type) {
+ case PAYLOAD_NONE:
+ break;
+ case PAYLOAD_INDEX:
+ out.printf("index %u", p.index);
+ break;
+ case PAYLOAD_STACK_OFFSET:
+ out.printf("stack %d", p.stackOffset);
+ break;
+ case PAYLOAD_GPR:
+ out.printf("reg %s", p.gpr.name());
+ break;
+ case PAYLOAD_FPU:
+ out.printf("reg %s", p.fpu.name());
+ break;
+ case PAYLOAD_PACKED_TAG:
+ out.printf("%s", ValTypeToString(p.type));
+ break;
+ }
+}
+
+void
+RValueAllocation::dump(GenericPrinter& out) const
+{
+ const Layout& layout = layoutFromMode(mode());
+ out.printf("%s", layout.name);
+
+ if (layout.type1 != PAYLOAD_NONE)
+ out.printf(" (");
+ dumpPayload(out, layout.type1, arg1_);
+ if (layout.type2 != PAYLOAD_NONE)
+ out.printf(", ");
+ dumpPayload(out, layout.type2, arg2_);
+ if (layout.type1 != PAYLOAD_NONE)
+ out.printf(")");
+}
+
+bool
+RValueAllocation::equalPayloads(PayloadType type, Payload lhs, Payload rhs)
+{
+ switch (type) {
+ case PAYLOAD_NONE:
+ return true;
+ case PAYLOAD_INDEX:
+ return lhs.index == rhs.index;
+ case PAYLOAD_STACK_OFFSET:
+ return lhs.stackOffset == rhs.stackOffset;
+ case PAYLOAD_GPR:
+ return lhs.gpr == rhs.gpr;
+ case PAYLOAD_FPU:
+ return lhs.fpu == rhs.fpu;
+ case PAYLOAD_PACKED_TAG:
+ return lhs.type == rhs.type;
+ }
+
+ return false;
+}
+
+SnapshotReader::SnapshotReader(const uint8_t* snapshots, uint32_t offset,
+ uint32_t RVATableSize, uint32_t listSize)
+ : reader_(snapshots + offset, snapshots + listSize),
+ allocReader_(snapshots + listSize, snapshots + listSize + RVATableSize),
+ allocTable_(snapshots + listSize),
+ allocRead_(0)
+{
+ if (!snapshots)
+ return;
+ JitSpew(JitSpew_IonSnapshots, "Creating snapshot reader");
+ readSnapshotHeader();
+}
+
+#define COMPUTE_SHIFT_AFTER_(name) (name ## _BITS + name ##_SHIFT)
+#define COMPUTE_MASK_(name) ((uint32_t(1 << name ## _BITS) - 1) << name ##_SHIFT)
+
+// Details of snapshot header packing.
+static const uint32_t SNAPSHOT_BAILOUTKIND_SHIFT = 0;
+static const uint32_t SNAPSHOT_BAILOUTKIND_BITS = 6;
+static const uint32_t SNAPSHOT_BAILOUTKIND_MASK = COMPUTE_MASK_(SNAPSHOT_BAILOUTKIND);
+
+static const uint32_t SNAPSHOT_ROFFSET_SHIFT = COMPUTE_SHIFT_AFTER_(SNAPSHOT_BAILOUTKIND);
+static const uint32_t SNAPSHOT_ROFFSET_BITS = 32 - SNAPSHOT_ROFFSET_SHIFT;
+static const uint32_t SNAPSHOT_ROFFSET_MASK = COMPUTE_MASK_(SNAPSHOT_ROFFSET);
+
+// Details of recover header packing.
+static const uint32_t RECOVER_RESUMEAFTER_SHIFT = 0;
+static const uint32_t RECOVER_RESUMEAFTER_BITS = 1;
+static const uint32_t RECOVER_RESUMEAFTER_MASK = COMPUTE_MASK_(RECOVER_RESUMEAFTER);
+
+static const uint32_t RECOVER_RINSCOUNT_SHIFT = COMPUTE_SHIFT_AFTER_(RECOVER_RESUMEAFTER);
+static const uint32_t RECOVER_RINSCOUNT_BITS = 32 - RECOVER_RINSCOUNT_SHIFT;
+static const uint32_t RECOVER_RINSCOUNT_MASK = COMPUTE_MASK_(RECOVER_RINSCOUNT);
+
+#undef COMPUTE_MASK_
+#undef COMPUTE_SHIFT_AFTER_
+
+void
+SnapshotReader::readSnapshotHeader()
+{
+ uint32_t bits = reader_.readUnsigned();
+
+ bailoutKind_ = BailoutKind((bits & SNAPSHOT_BAILOUTKIND_MASK) >> SNAPSHOT_BAILOUTKIND_SHIFT);
+ recoverOffset_ = (bits & SNAPSHOT_ROFFSET_MASK) >> SNAPSHOT_ROFFSET_SHIFT;
+
+ JitSpew(JitSpew_IonSnapshots, "Read snapshot header with bailout kind %u",
+ bailoutKind_);
+
+#ifdef TRACK_SNAPSHOTS
+ readTrackSnapshot();
+#endif
+}
+
+#ifdef TRACK_SNAPSHOTS
+void
+SnapshotReader::readTrackSnapshot()
+{
+ pcOpcode_ = reader_.readUnsigned();
+ mirOpcode_ = reader_.readUnsigned();
+ mirId_ = reader_.readUnsigned();
+ lirOpcode_ = reader_.readUnsigned();
+ lirId_ = reader_.readUnsigned();
+}
+
+void
+SnapshotReader::spewBailingFrom() const
+{
+ if (JitSpewEnabled(JitSpew_IonBailouts)) {
+ JitSpewHeader(JitSpew_IonBailouts);
+ Fprinter& out = JitSpewPrinter();
+ out.printf(" bailing from bytecode: %s, MIR: ", CodeName[pcOpcode_]);
+ MDefinition::PrintOpcodeName(out, MDefinition::Opcode(mirOpcode_));
+ out.printf(" [%u], LIR: ", mirId_);
+ LInstruction::printName(out, LInstruction::Opcode(lirOpcode_));
+ out.printf(" [%u]", lirId_);
+ out.printf("\n");
+ }
+}
+#endif
+
+uint32_t
+SnapshotReader::readAllocationIndex()
+{
+ allocRead_++;
+ return reader_.readUnsigned();
+}
+
+RValueAllocation
+SnapshotReader::readAllocation()
+{
+ JitSpew(JitSpew_IonSnapshots, "Reading slot %u", allocRead_);
+ uint32_t offset = readAllocationIndex() * ALLOCATION_TABLE_ALIGNMENT;
+ allocReader_.seek(allocTable_, offset);
+ return RValueAllocation::read(allocReader_);
+}
+
+bool
+SnapshotWriter::init()
+{
+ // Based on the measurements made in Bug 962555 comment 20, this should be
+ // enough to prevent the reallocation of the hash table for at least half of
+ // the compilations.
+ return allocMap_.init(32);
+}
+
+RecoverReader::RecoverReader(SnapshotReader& snapshot, const uint8_t* recovers, uint32_t size)
+ : reader_(nullptr, nullptr),
+ numInstructions_(0),
+ numInstructionsRead_(0)
+{
+ if (!recovers)
+ return;
+ reader_ = CompactBufferReader(recovers + snapshot.recoverOffset(), recovers + size);
+ readRecoverHeader();
+ readInstruction();
+}
+
+void
+RecoverReader::readRecoverHeader()
+{
+ uint32_t bits = reader_.readUnsigned();
+
+ numInstructions_ = (bits & RECOVER_RINSCOUNT_MASK) >> RECOVER_RINSCOUNT_SHIFT;
+ resumeAfter_ = (bits & RECOVER_RESUMEAFTER_MASK) >> RECOVER_RESUMEAFTER_SHIFT;
+ MOZ_ASSERT(numInstructions_);
+
+ JitSpew(JitSpew_IonSnapshots, "Read recover header with instructionCount %u (ra: %d)",
+ numInstructions_, resumeAfter_);
+}
+
+void
+RecoverReader::readInstruction()
+{
+ MOZ_ASSERT(moreInstructions());
+ RInstruction::readRecoverData(reader_, &rawData_);
+ numInstructionsRead_++;
+}
+
+SnapshotOffset
+SnapshotWriter::startSnapshot(RecoverOffset recoverOffset, BailoutKind kind)
+{
+ lastStart_ = writer_.length();
+ allocWritten_ = 0;
+
+ JitSpew(JitSpew_IonSnapshots, "starting snapshot with recover offset %u, bailout kind %u",
+ recoverOffset, kind);
+
+ MOZ_ASSERT(uint32_t(kind) < (1 << SNAPSHOT_BAILOUTKIND_BITS));
+ MOZ_ASSERT(recoverOffset < (1 << SNAPSHOT_ROFFSET_BITS));
+ uint32_t bits =
+ (uint32_t(kind) << SNAPSHOT_BAILOUTKIND_SHIFT) |
+ (recoverOffset << SNAPSHOT_ROFFSET_SHIFT);
+
+ writer_.writeUnsigned(bits);
+ return lastStart_;
+}
+
+#ifdef TRACK_SNAPSHOTS
+void
+SnapshotWriter::trackSnapshot(uint32_t pcOpcode, uint32_t mirOpcode, uint32_t mirId,
+ uint32_t lirOpcode, uint32_t lirId)
+{
+ writer_.writeUnsigned(pcOpcode);
+ writer_.writeUnsigned(mirOpcode);
+ writer_.writeUnsigned(mirId);
+ writer_.writeUnsigned(lirOpcode);
+ writer_.writeUnsigned(lirId);
+}
+#endif
+
+bool
+SnapshotWriter::add(const RValueAllocation& alloc)
+{
+ MOZ_ASSERT(allocMap_.initialized());
+
+ uint32_t offset;
+ RValueAllocMap::AddPtr p = allocMap_.lookupForAdd(alloc);
+ if (!p) {
+ offset = allocWriter_.length();
+ alloc.write(allocWriter_);
+ if (!allocMap_.add(p, alloc, offset)) {
+ allocWriter_.setOOM();
+ return false;
+ }
+ } else {
+ offset = p->value();
+ }
+
+ if (JitSpewEnabled(JitSpew_IonSnapshots)) {
+ JitSpewHeader(JitSpew_IonSnapshots);
+ Fprinter& out = JitSpewPrinter();
+ out.printf(" slot %u (%d): ", allocWritten_, offset);
+ alloc.dump(out);
+ out.printf("\n");
+ }
+
+ allocWritten_++;
+ writer_.writeUnsigned(offset / ALLOCATION_TABLE_ALIGNMENT);
+ return true;
+}
+
+void
+SnapshotWriter::endSnapshot()
+{
+ // Place a sentinel for asserting on the other end.
+#ifdef DEBUG
+ writer_.writeSigned(-1);
+#endif
+
+ JitSpew(JitSpew_IonSnapshots, "ending snapshot total size: %u bytes (start %u)",
+ uint32_t(writer_.length() - lastStart_), lastStart_);
+}
+
+RecoverOffset
+RecoverWriter::startRecover(uint32_t instructionCount, bool resumeAfter)
+{
+ MOZ_ASSERT(instructionCount);
+ instructionCount_ = instructionCount;
+ instructionsWritten_ = 0;
+
+ JitSpew(JitSpew_IonSnapshots, "starting recover with %u instruction(s)",
+ instructionCount);
+
+ MOZ_ASSERT(!(uint32_t(resumeAfter) &~ RECOVER_RESUMEAFTER_MASK));
+ MOZ_ASSERT(instructionCount < uint32_t(1 << RECOVER_RINSCOUNT_BITS));
+ uint32_t bits =
+ (uint32_t(resumeAfter) << RECOVER_RESUMEAFTER_SHIFT) |
+ (instructionCount << RECOVER_RINSCOUNT_SHIFT);
+
+ RecoverOffset recoverOffset = writer_.length();
+ writer_.writeUnsigned(bits);
+ return recoverOffset;
+}
+
+void
+RecoverWriter::writeInstruction(const MNode* rp)
+{
+ if (!rp->writeRecoverData(writer_))
+ writer_.setOOM();
+ instructionsWritten_++;
+}
+
+void
+RecoverWriter::endRecover()
+{
+ MOZ_ASSERT(instructionCount_ == instructionsWritten_);
+}
diff --git a/js/src/jit/Snapshots.h b/js/src/jit/Snapshots.h
new file mode 100644
index 000000000..7aac3ccf6
--- /dev/null
+++ b/js/src/jit/Snapshots.h
@@ -0,0 +1,579 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Snapshot_h
+#define jit_Snapshot_h
+
+#include "mozilla/Alignment.h"
+
+#include "jsalloc.h"
+#include "jsbytecode.h"
+
+#include "jit/CompactBuffer.h"
+#include "jit/IonTypes.h"
+#include "jit/Registers.h"
+
+#include "js/HashTable.h"
+
+namespace js {
+class GenericPrinter;
+
+namespace jit {
+
+class RValueAllocation;
+
+// A Recover Value Allocation mirror what is known at compiled time as being the
+// MIRType and the LAllocation. This is read out of the snapshot to recover the
+// value which would be there if this frame was an interpreter frame instead of
+// an Ion frame.
+//
+// It is used with the SnapshotIterator to recover a Value from the stack,
+// spilled registers or the list of constant of the compiled script.
+//
+// Unit tests are located in jsapi-tests/testJitRValueAlloc.cpp.
+class RValueAllocation
+{
+ public:
+
+ // See RValueAllocation encoding in Snapshots.cpp
+ enum Mode
+ {
+ CONSTANT = 0x00,
+ CST_UNDEFINED = 0x01,
+ CST_NULL = 0x02,
+ DOUBLE_REG = 0x03,
+ ANY_FLOAT_REG = 0x04,
+ ANY_FLOAT_STACK = 0x05,
+#if defined(JS_NUNBOX32)
+ UNTYPED_REG_REG = 0x06,
+ UNTYPED_REG_STACK = 0x07,
+ UNTYPED_STACK_REG = 0x08,
+ UNTYPED_STACK_STACK = 0x09,
+#elif defined(JS_PUNBOX64)
+ UNTYPED_REG = 0x06,
+ UNTYPED_STACK = 0x07,
+#endif
+
+ // Recover instructions.
+ RECOVER_INSTRUCTION = 0x0a,
+ RI_WITH_DEFAULT_CST = 0x0b,
+
+ // The JSValueType is packed in the Mode.
+ TYPED_REG_MIN = 0x10,
+ TYPED_REG_MAX = 0x1f,
+ TYPED_REG = TYPED_REG_MIN,
+
+ // The JSValueType is packed in the Mode.
+ TYPED_STACK_MIN = 0x20,
+ TYPED_STACK_MAX = 0x2f,
+ TYPED_STACK = TYPED_STACK_MIN,
+
+ // This mask can be used with any other valid mode. When this flag is
+ // set on the mode, this inform the snapshot iterator that even if the
+ // allocation is readable, the content of if might be incomplete unless
+ // all side-effects are executed.
+ RECOVER_SIDE_EFFECT_MASK = 0x80,
+
+ // This mask represents the set of bits which can be used to encode a
+ // value in a snapshot. The mode is used to determine how to interpret
+ // the union of values and how to pack the value in memory.
+ MODE_BITS_MASK = 0x17f,
+
+ INVALID = 0x100,
+ };
+
+ enum { PACKED_TAG_MASK = 0x0f };
+
+ // See Payload encoding in Snapshots.cpp
+ enum PayloadType {
+ PAYLOAD_NONE,
+ PAYLOAD_INDEX,
+ PAYLOAD_STACK_OFFSET,
+ PAYLOAD_GPR,
+ PAYLOAD_FPU,
+ PAYLOAD_PACKED_TAG
+ };
+
+ struct Layout {
+ PayloadType type1;
+ PayloadType type2;
+ const char* name;
+ };
+
+ private:
+ Mode mode_;
+
+ // Additional information to recover the content of the allocation.
+ struct FloatRegisterBits {
+ uint32_t data;
+ bool operator == (const FloatRegisterBits& other) const {
+ return data == other.data;
+ }
+ uint32_t code() const {
+ return data;
+ }
+ const char* name() const {
+ FloatRegister tmp = FloatRegister::FromCode(data);
+ return tmp.name();
+ }
+ };
+
+ union Payload {
+ uint32_t index;
+ int32_t stackOffset;
+ Register gpr;
+ FloatRegisterBits fpu;
+ JSValueType type;
+ };
+
+ Payload arg1_;
+ Payload arg2_;
+
+ static Payload payloadOfIndex(uint32_t index) {
+ Payload p;
+ p.index = index;
+ return p;
+ }
+ static Payload payloadOfStackOffset(int32_t offset) {
+ Payload p;
+ p.stackOffset = offset;
+ return p;
+ }
+ static Payload payloadOfRegister(Register reg) {
+ Payload p;
+ p.gpr = reg;
+ return p;
+ }
+ static Payload payloadOfFloatRegister(FloatRegister reg) {
+ Payload p;
+ FloatRegisterBits b;
+ b.data = reg.code();
+ p.fpu = b;
+ return p;
+ }
+ static Payload payloadOfValueType(JSValueType type) {
+ Payload p;
+ p.type = type;
+ return p;
+ }
+
+ static const Layout& layoutFromMode(Mode mode);
+
+ static void readPayload(CompactBufferReader& reader, PayloadType t,
+ uint8_t* mode, Payload* p);
+ static void writePayload(CompactBufferWriter& writer, PayloadType t,
+ Payload p);
+ static void writePadding(CompactBufferWriter& writer);
+ static void dumpPayload(GenericPrinter& out, PayloadType t, Payload p);
+ static bool equalPayloads(PayloadType t, Payload lhs, Payload rhs);
+
+ RValueAllocation(Mode mode, Payload a1, Payload a2)
+ : mode_(mode),
+ arg1_(a1),
+ arg2_(a2)
+ {
+ }
+
+ RValueAllocation(Mode mode, Payload a1)
+ : mode_(mode),
+ arg1_(a1)
+ {
+ }
+
+ explicit RValueAllocation(Mode mode)
+ : mode_(mode)
+ {
+ }
+
+ public:
+ RValueAllocation()
+ : mode_(INVALID)
+ { }
+
+ // DOUBLE_REG
+ static RValueAllocation Double(FloatRegister reg) {
+ return RValueAllocation(DOUBLE_REG, payloadOfFloatRegister(reg));
+ }
+
+ // ANY_FLOAT_REG or ANY_FLOAT_STACK
+ static RValueAllocation AnyFloat(FloatRegister reg) {
+ return RValueAllocation(ANY_FLOAT_REG, payloadOfFloatRegister(reg));
+ }
+ static RValueAllocation AnyFloat(int32_t offset) {
+ return RValueAllocation(ANY_FLOAT_STACK, payloadOfStackOffset(offset));
+ }
+
+ // TYPED_REG or TYPED_STACK
+ static RValueAllocation Typed(JSValueType type, Register reg) {
+ MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE &&
+ type != JSVAL_TYPE_MAGIC &&
+ type != JSVAL_TYPE_NULL &&
+ type != JSVAL_TYPE_UNDEFINED);
+ return RValueAllocation(TYPED_REG, payloadOfValueType(type),
+ payloadOfRegister(reg));
+ }
+ static RValueAllocation Typed(JSValueType type, int32_t offset) {
+ MOZ_ASSERT(type != JSVAL_TYPE_MAGIC &&
+ type != JSVAL_TYPE_NULL &&
+ type != JSVAL_TYPE_UNDEFINED);
+ return RValueAllocation(TYPED_STACK, payloadOfValueType(type),
+ payloadOfStackOffset(offset));
+ }
+
+ // UNTYPED
+#if defined(JS_NUNBOX32)
+ static RValueAllocation Untyped(Register type, Register payload) {
+ return RValueAllocation(UNTYPED_REG_REG,
+ payloadOfRegister(type),
+ payloadOfRegister(payload));
+ }
+
+ static RValueAllocation Untyped(Register type, int32_t payloadStackOffset) {
+ return RValueAllocation(UNTYPED_REG_STACK,
+ payloadOfRegister(type),
+ payloadOfStackOffset(payloadStackOffset));
+ }
+
+ static RValueAllocation Untyped(int32_t typeStackOffset, Register payload) {
+ return RValueAllocation(UNTYPED_STACK_REG,
+ payloadOfStackOffset(typeStackOffset),
+ payloadOfRegister(payload));
+ }
+
+ static RValueAllocation Untyped(int32_t typeStackOffset, int32_t payloadStackOffset) {
+ return RValueAllocation(UNTYPED_STACK_STACK,
+ payloadOfStackOffset(typeStackOffset),
+ payloadOfStackOffset(payloadStackOffset));
+ }
+
+#elif defined(JS_PUNBOX64)
+ static RValueAllocation Untyped(Register reg) {
+ return RValueAllocation(UNTYPED_REG, payloadOfRegister(reg));
+ }
+
+ static RValueAllocation Untyped(int32_t stackOffset) {
+ return RValueAllocation(UNTYPED_STACK, payloadOfStackOffset(stackOffset));
+ }
+#endif
+
+ // common constants.
+ static RValueAllocation Undefined() {
+ return RValueAllocation(CST_UNDEFINED);
+ }
+ static RValueAllocation Null() {
+ return RValueAllocation(CST_NULL);
+ }
+
+ // CONSTANT's index
+ static RValueAllocation ConstantPool(uint32_t index) {
+ return RValueAllocation(CONSTANT, payloadOfIndex(index));
+ }
+
+ // Recover instruction's index
+ static RValueAllocation RecoverInstruction(uint32_t index) {
+ return RValueAllocation(RECOVER_INSTRUCTION, payloadOfIndex(index));
+ }
+ static RValueAllocation RecoverInstruction(uint32_t riIndex, uint32_t cstIndex) {
+ return RValueAllocation(RI_WITH_DEFAULT_CST,
+ payloadOfIndex(riIndex),
+ payloadOfIndex(cstIndex));
+ }
+
+ void setNeedSideEffect() {
+ MOZ_ASSERT(!needSideEffect() && mode_ != INVALID);
+ mode_ = Mode(mode_ | RECOVER_SIDE_EFFECT_MASK);
+ }
+
+ void writeHeader(CompactBufferWriter& writer, JSValueType type, uint32_t regCode) const;
+ public:
+ static RValueAllocation read(CompactBufferReader& reader);
+ void write(CompactBufferWriter& writer) const;
+
+ public:
+ Mode mode() const {
+ return Mode(mode_ & MODE_BITS_MASK);
+ }
+ bool needSideEffect() const {
+ return mode_ & RECOVER_SIDE_EFFECT_MASK;
+ }
+
+ uint32_t index() const {
+ MOZ_ASSERT(layoutFromMode(mode()).type1 == PAYLOAD_INDEX);
+ return arg1_.index;
+ }
+ int32_t stackOffset() const {
+ MOZ_ASSERT(layoutFromMode(mode()).type1 == PAYLOAD_STACK_OFFSET);
+ return arg1_.stackOffset;
+ }
+ Register reg() const {
+ MOZ_ASSERT(layoutFromMode(mode()).type1 == PAYLOAD_GPR);
+ return arg1_.gpr;
+ }
+ FloatRegister fpuReg() const {
+ MOZ_ASSERT(layoutFromMode(mode()).type1 == PAYLOAD_FPU);
+ FloatRegisterBits b = arg1_.fpu;
+ return FloatRegister::FromCode(b.data);
+ }
+ JSValueType knownType() const {
+ MOZ_ASSERT(layoutFromMode(mode()).type1 == PAYLOAD_PACKED_TAG);
+ return arg1_.type;
+ }
+
+ uint32_t index2() const {
+ MOZ_ASSERT(layoutFromMode(mode()).type2 == PAYLOAD_INDEX);
+ return arg2_.index;
+ }
+ int32_t stackOffset2() const {
+ MOZ_ASSERT(layoutFromMode(mode()).type2 == PAYLOAD_STACK_OFFSET);
+ return arg2_.stackOffset;
+ }
+ Register reg2() const {
+ MOZ_ASSERT(layoutFromMode(mode()).type2 == PAYLOAD_GPR);
+ return arg2_.gpr;
+ }
+
+ public:
+ void dump(GenericPrinter& out) const;
+
+ public:
+ bool operator==(const RValueAllocation& rhs) const {
+ if (mode_ != rhs.mode_)
+ return false;
+
+ const Layout& layout = layoutFromMode(mode());
+ return equalPayloads(layout.type1, arg1_, rhs.arg1_) &&
+ equalPayloads(layout.type2, arg2_, rhs.arg2_);
+ }
+
+ HashNumber hash() const;
+
+ struct Hasher
+ {
+ typedef RValueAllocation Key;
+ typedef Key Lookup;
+ static HashNumber hash(const Lookup& v) {
+ return v.hash();
+ }
+ static bool match(const Key& k, const Lookup& l) {
+ return k == l;
+ }
+ };
+};
+
+class RecoverWriter;
+
+// Collects snapshots in a contiguous buffer, which is copied into IonScript
+// memory after code generation.
+class SnapshotWriter
+{
+ CompactBufferWriter writer_;
+ CompactBufferWriter allocWriter_;
+
+ // Map RValueAllocations to an offset in the allocWriter_ buffer. This is
+ // useful as value allocations are repeated frequently.
+ typedef RValueAllocation RVA;
+ typedef HashMap<RVA, uint32_t, RVA::Hasher, SystemAllocPolicy> RValueAllocMap;
+ RValueAllocMap allocMap_;
+
+ // This is only used to assert sanity.
+ uint32_t allocWritten_;
+
+ // Used to report size of the snapshot in the spew messages.
+ SnapshotOffset lastStart_;
+
+ public:
+ MOZ_MUST_USE bool init();
+
+ SnapshotOffset startSnapshot(RecoverOffset recoverOffset, BailoutKind kind);
+#ifdef TRACK_SNAPSHOTS
+ void trackSnapshot(uint32_t pcOpcode, uint32_t mirOpcode, uint32_t mirId,
+ uint32_t lirOpcode, uint32_t lirId);
+#endif
+ MOZ_MUST_USE bool add(const RValueAllocation& slot);
+
+ uint32_t allocWritten() const {
+ return allocWritten_;
+ }
+ void endSnapshot();
+
+ bool oom() const {
+ return writer_.oom() || writer_.length() >= MAX_BUFFER_SIZE ||
+ allocWriter_.oom() || allocWriter_.length() >= MAX_BUFFER_SIZE;
+ }
+
+ size_t listSize() const {
+ return writer_.length();
+ }
+ const uint8_t* listBuffer() const {
+ return writer_.buffer();
+ }
+
+ size_t RVATableSize() const {
+ return allocWriter_.length();
+ }
+ const uint8_t* RVATableBuffer() const {
+ return allocWriter_.buffer();
+ }
+};
+
+class MNode;
+
+class RecoverWriter
+{
+ CompactBufferWriter writer_;
+
+ uint32_t instructionCount_;
+ uint32_t instructionsWritten_;
+
+ public:
+ SnapshotOffset startRecover(uint32_t instructionCount, bool resumeAfter);
+
+ void writeInstruction(const MNode* rp);
+
+ void endRecover();
+
+ size_t size() const {
+ return writer_.length();
+ }
+ const uint8_t* buffer() const {
+ return writer_.buffer();
+ }
+
+ bool oom() const {
+ return writer_.oom() || writer_.length() >= MAX_BUFFER_SIZE;
+ }
+};
+
+class RecoverReader;
+
+// A snapshot reader reads the entries out of the compressed snapshot buffer in
+// a script. These entries describe the equivalent interpreter frames at a given
+// position in JIT code. Each entry is an Ion's value allocations, used to
+// recover the corresponding Value from an Ion frame.
+class SnapshotReader
+{
+ CompactBufferReader reader_;
+ CompactBufferReader allocReader_;
+ const uint8_t* allocTable_;
+
+ BailoutKind bailoutKind_;
+ uint32_t allocRead_; // Number of slots that have been read.
+ RecoverOffset recoverOffset_; // Offset of the recover instructions.
+
+#ifdef TRACK_SNAPSHOTS
+ private:
+ uint32_t pcOpcode_;
+ uint32_t mirOpcode_;
+ uint32_t mirId_;
+ uint32_t lirOpcode_;
+ uint32_t lirId_;
+
+ public:
+ void readTrackSnapshot();
+ void spewBailingFrom() const;
+#endif
+
+ private:
+ void readSnapshotHeader();
+ uint32_t readAllocationIndex();
+
+ public:
+ SnapshotReader(const uint8_t* snapshots, uint32_t offset,
+ uint32_t RVATableSize, uint32_t listSize);
+
+ RValueAllocation readAllocation();
+ void skipAllocation() {
+ readAllocationIndex();
+ }
+
+ BailoutKind bailoutKind() const {
+ return bailoutKind_;
+ }
+ RecoverOffset recoverOffset() const {
+ return recoverOffset_;
+ }
+
+ uint32_t numAllocationsRead() const {
+ return allocRead_;
+ }
+ void resetNumAllocationsRead() {
+ allocRead_ = 0;
+ }
+};
+
+class RInstructionStorage
+{
+ static const size_t Size = 4 * sizeof(uint32_t);
+ mozilla::AlignedStorage<Size> mem;
+
+ public:
+ const void* addr() const { return mem.addr(); }
+ void* addr() { return mem.addr(); }
+
+ RInstructionStorage() = default;
+
+ RInstructionStorage(const RInstructionStorage& other) {
+ memcpy(addr(), other.addr(), Size);
+ }
+ void operator=(const RInstructionStorage& other) {
+ memcpy(addr(), other.addr(), Size);
+ }
+};
+
+class RInstruction;
+
+class RecoverReader
+{
+ CompactBufferReader reader_;
+
+ // Number of encoded instructions.
+ uint32_t numInstructions_;
+
+ // Number of instruction read.
+ uint32_t numInstructionsRead_;
+
+ // True if we need to resume after the Resume Point instruction of the
+ // innermost frame.
+ bool resumeAfter_;
+
+ // Space is reserved as part of the RecoverReader to avoid allocations of
+ // data which is needed to decode the current instruction.
+ RInstructionStorage rawData_;
+
+ private:
+ void readRecoverHeader();
+ void readInstruction();
+
+ public:
+ RecoverReader(SnapshotReader& snapshot, const uint8_t* recovers, uint32_t size);
+
+ uint32_t numInstructions() const {
+ return numInstructions_;
+ }
+ uint32_t numInstructionsRead() const {
+ return numInstructionsRead_;
+ }
+
+ bool moreInstructions() const {
+ return numInstructionsRead_ < numInstructions_;
+ }
+ void nextInstruction() {
+ readInstruction();
+ }
+
+ const RInstruction* instruction() const {
+ return reinterpret_cast<const RInstruction*>(rawData_.addr());
+ }
+
+ bool resumeAfter() const {
+ return resumeAfter_;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Snapshot_h */
diff --git a/js/src/jit/StackSlotAllocator.h b/js/src/jit/StackSlotAllocator.h
new file mode 100644
index 000000000..07c9ee763
--- /dev/null
+++ b/js/src/jit/StackSlotAllocator.h
@@ -0,0 +1,110 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_StackSlotAllocator_h
+#define jit_StackSlotAllocator_h
+
+#include "mozilla/Unused.h"
+
+#include "jit/Registers.h"
+
+namespace js {
+namespace jit {
+
+class StackSlotAllocator
+{
+ js::Vector<uint32_t, 4, SystemAllocPolicy> normalSlots;
+ js::Vector<uint32_t, 4, SystemAllocPolicy> doubleSlots;
+ uint32_t height_;
+
+ void addAvailableSlot(uint32_t index) {
+ // Ignoring OOM here (and below) is fine; it just means the stack slot
+ // will be unused.
+ mozilla::Unused << normalSlots.append(index);
+ }
+ void addAvailableDoubleSlot(uint32_t index) {
+ mozilla::Unused << doubleSlots.append(index);
+ }
+
+ uint32_t allocateQuadSlot() {
+ MOZ_ASSERT(SupportsSimd);
+ // This relies on the fact that any architecture specific
+ // alignment of the stack pointer is done a priori.
+ if (height_ % 8 != 0)
+ addAvailableSlot(height_ += 4);
+ if (height_ % 16 != 0)
+ addAvailableDoubleSlot(height_ += 8);
+ return height_ += 16;
+ }
+ uint32_t allocateDoubleSlot() {
+ if (!doubleSlots.empty())
+ return doubleSlots.popCopy();
+ if (height_ % 8 != 0)
+ addAvailableSlot(height_ += 4);
+ return height_ += 8;
+ }
+ uint32_t allocateSlot() {
+ if (!normalSlots.empty())
+ return normalSlots.popCopy();
+ if (!doubleSlots.empty()) {
+ uint32_t index = doubleSlots.popCopy();
+ addAvailableSlot(index - 4);
+ return index;
+ }
+ return height_ += 4;
+ }
+
+ public:
+ StackSlotAllocator() : height_(0)
+ { }
+
+ static uint32_t width(LDefinition::Type type) {
+ switch (type) {
+#if JS_BITS_PER_WORD == 32
+ case LDefinition::GENERAL:
+ case LDefinition::OBJECT:
+ case LDefinition::SLOTS:
+#endif
+ case LDefinition::INT32:
+ case LDefinition::FLOAT32: return 4;
+#if JS_BITS_PER_WORD == 64
+ case LDefinition::GENERAL:
+ case LDefinition::OBJECT:
+ case LDefinition::SLOTS:
+#endif
+#ifdef JS_PUNBOX64
+ case LDefinition::BOX:
+#endif
+#ifdef JS_NUNBOX32
+ case LDefinition::TYPE:
+ case LDefinition::PAYLOAD:
+#endif
+ case LDefinition::DOUBLE: return 8;
+ case LDefinition::SINCOS:
+ case LDefinition::SIMD128INT:
+ case LDefinition::SIMD128FLOAT: return 16;
+ }
+ MOZ_CRASH("Unknown slot type");
+ }
+
+ uint32_t allocateSlot(LDefinition::Type type) {
+ switch (width(type)) {
+ case 4: return allocateSlot();
+ case 8: return allocateDoubleSlot();
+ case 16: return allocateQuadSlot();
+ }
+ MOZ_CRASH("Unknown slot width");
+ }
+
+ uint32_t stackHeight() const {
+ return height_;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_StackSlotAllocator_h */
diff --git a/js/src/jit/StupidAllocator.cpp b/js/src/jit/StupidAllocator.cpp
new file mode 100644
index 000000000..8e3ea6286
--- /dev/null
+++ b/js/src/jit/StupidAllocator.cpp
@@ -0,0 +1,434 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/StupidAllocator.h"
+
+#include "jstypes.h"
+
+using namespace js;
+using namespace js::jit;
+
+static inline uint32_t
+DefaultStackSlot(uint32_t vreg)
+{
+ // On x86/x64, we have to keep the stack aligned on 16 bytes for spilling
+ // SIMD registers. To avoid complexity in this stupid allocator, we just
+ // allocate 16 bytes stack slot for all vreg.
+ return vreg * 2 * sizeof(Value);
+}
+
+LAllocation*
+StupidAllocator::stackLocation(uint32_t vreg)
+{
+ LDefinition* def = virtualRegisters[vreg];
+ if (def->policy() == LDefinition::FIXED && def->output()->isArgument())
+ return def->output();
+
+ return new(alloc()) LStackSlot(DefaultStackSlot(vreg));
+}
+
+StupidAllocator::RegisterIndex
+StupidAllocator::registerIndex(AnyRegister reg)
+{
+ for (size_t i = 0; i < registerCount; i++) {
+ if (reg == registers[i].reg)
+ return i;
+ }
+ MOZ_CRASH("Bad register");
+}
+
+bool
+StupidAllocator::init()
+{
+ if (!RegisterAllocator::init())
+ return false;
+
+ if (!virtualRegisters.appendN((LDefinition*)nullptr, graph.numVirtualRegisters()))
+ return false;
+
+ for (size_t i = 0; i < graph.numBlocks(); i++) {
+ LBlock* block = graph.getBlock(i);
+ for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
+ for (size_t j = 0; j < ins->numDefs(); j++) {
+ LDefinition* def = ins->getDef(j);
+ virtualRegisters[def->virtualRegister()] = def;
+ }
+
+ for (size_t j = 0; j < ins->numTemps(); j++) {
+ LDefinition* def = ins->getTemp(j);
+ if (def->isBogusTemp())
+ continue;
+ virtualRegisters[def->virtualRegister()] = def;
+ }
+ }
+ for (size_t j = 0; j < block->numPhis(); j++) {
+ LPhi* phi = block->getPhi(j);
+ LDefinition* def = phi->getDef(0);
+ uint32_t vreg = def->virtualRegister();
+
+ virtualRegisters[vreg] = def;
+ }
+ }
+
+ // Assign physical registers to the tracked allocation.
+ {
+ registerCount = 0;
+ LiveRegisterSet remainingRegisters(allRegisters_.asLiveSet());
+ while (!remainingRegisters.emptyGeneral())
+ registers[registerCount++].reg = AnyRegister(remainingRegisters.takeAnyGeneral());
+
+ while (!remainingRegisters.emptyFloat())
+ registers[registerCount++].reg = AnyRegister(remainingRegisters.takeAnyFloat());
+
+ MOZ_ASSERT(registerCount <= MAX_REGISTERS);
+ }
+
+ return true;
+}
+
+bool
+StupidAllocator::allocationRequiresRegister(const LAllocation* alloc, AnyRegister reg)
+{
+ if (alloc->isRegister() && alloc->toRegister() == reg)
+ return true;
+ if (alloc->isUse()) {
+ const LUse* use = alloc->toUse();
+ if (use->policy() == LUse::FIXED) {
+ AnyRegister usedReg = GetFixedRegister(virtualRegisters[use->virtualRegister()], use);
+ if (usedReg.aliases(reg))
+ return true;
+ }
+ }
+ return false;
+}
+
+bool
+StupidAllocator::registerIsReserved(LInstruction* ins, AnyRegister reg)
+{
+ // Whether reg is already reserved for an input or output of ins.
+ for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
+ if (allocationRequiresRegister(*alloc, reg))
+ return true;
+ }
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ if (allocationRequiresRegister(ins->getTemp(i)->output(), reg))
+ return true;
+ }
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ if (allocationRequiresRegister(ins->getDef(i)->output(), reg))
+ return true;
+ }
+ return false;
+}
+
+AnyRegister
+StupidAllocator::ensureHasRegister(LInstruction* ins, uint32_t vreg)
+{
+ // Ensure that vreg is held in a register before ins.
+
+ // Check if the virtual register is already held in a physical register.
+ RegisterIndex existing = findExistingRegister(vreg);
+ if (existing != UINT32_MAX) {
+ if (registerIsReserved(ins, registers[existing].reg)) {
+ evictAliasedRegister(ins, existing);
+ } else {
+ registers[existing].age = ins->id();
+ return registers[existing].reg;
+ }
+ }
+
+ RegisterIndex best = allocateRegister(ins, vreg);
+ loadRegister(ins, vreg, best, virtualRegisters[vreg]->type());
+
+ return registers[best].reg;
+}
+
+StupidAllocator::RegisterIndex
+StupidAllocator::allocateRegister(LInstruction* ins, uint32_t vreg)
+{
+ // Pick a register for vreg, evicting an existing register if necessary.
+ // Spill code will be placed before ins, and no existing allocated input
+ // for ins will be touched.
+ MOZ_ASSERT(ins);
+
+ LDefinition* def = virtualRegisters[vreg];
+ MOZ_ASSERT(def);
+
+ RegisterIndex best = UINT32_MAX;
+
+ for (size_t i = 0; i < registerCount; i++) {
+ AnyRegister reg = registers[i].reg;
+
+ if (!def->isCompatibleReg(reg))
+ continue;
+
+ // Skip the register if it is in use for an allocated input or output.
+ if (registerIsReserved(ins, reg))
+ continue;
+
+ if (registers[i].vreg == MISSING_ALLOCATION ||
+ best == UINT32_MAX ||
+ registers[best].age > registers[i].age)
+ {
+ best = i;
+ }
+ }
+
+ evictAliasedRegister(ins, best);
+ return best;
+}
+
+void
+StupidAllocator::syncRegister(LInstruction* ins, RegisterIndex index)
+{
+ if (registers[index].dirty) {
+ LMoveGroup* input = getInputMoveGroup(ins);
+ LAllocation source(registers[index].reg);
+
+ uint32_t existing = registers[index].vreg;
+ LAllocation* dest = stackLocation(existing);
+ input->addAfter(source, *dest, registers[index].type);
+
+ registers[index].dirty = false;
+ }
+}
+
+void
+StupidAllocator::evictRegister(LInstruction* ins, RegisterIndex index)
+{
+ syncRegister(ins, index);
+ registers[index].set(MISSING_ALLOCATION);
+}
+
+void
+StupidAllocator::evictAliasedRegister(LInstruction* ins, RegisterIndex index)
+{
+ for (size_t i = 0; i < registers[index].reg.numAliased(); i++) {
+ uint32_t aindex = registerIndex(registers[index].reg.aliased(i));
+ syncRegister(ins, aindex);
+ registers[aindex].set(MISSING_ALLOCATION);
+ }
+}
+
+void
+StupidAllocator::loadRegister(LInstruction* ins, uint32_t vreg, RegisterIndex index, LDefinition::Type type)
+{
+ // Load a vreg from its stack location to a register.
+ LMoveGroup* input = getInputMoveGroup(ins);
+ LAllocation* source = stackLocation(vreg);
+ LAllocation dest(registers[index].reg);
+ input->addAfter(*source, dest, type);
+ registers[index].set(vreg, ins);
+ registers[index].type = type;
+}
+
+StupidAllocator::RegisterIndex
+StupidAllocator::findExistingRegister(uint32_t vreg)
+{
+ for (size_t i = 0; i < registerCount; i++) {
+ if (registers[i].vreg == vreg)
+ return i;
+ }
+ return UINT32_MAX;
+}
+
+bool
+StupidAllocator::go()
+{
+ // This register allocator is intended to be as simple as possible, while
+ // still being complicated enough to share properties with more complicated
+ // allocators. Namely, physical registers may be used to carry virtual
+ // registers across LIR instructions, but not across basic blocks.
+ //
+ // This algorithm does not pay any attention to liveness. It is performed
+ // as a single forward pass through the basic blocks in the program. As
+ // virtual registers and temporaries are defined they are assigned physical
+ // registers, evicting existing allocations in an LRU fashion.
+
+ // For virtual registers not carried in a register, a canonical spill
+ // location is used. Each vreg has a different spill location; since we do
+ // not track liveness we cannot determine that two vregs have disjoint
+ // lifetimes. Thus, the maximum stack height is the number of vregs (scaled
+ // by two on 32 bit platforms to allow storing double values).
+ graph.setLocalSlotCount(DefaultStackSlot(graph.numVirtualRegisters()));
+
+ if (!init())
+ return false;
+
+ for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
+ LBlock* block = graph.getBlock(blockIndex);
+ MOZ_ASSERT(block->mir()->id() == blockIndex);
+
+ for (size_t i = 0; i < registerCount; i++)
+ registers[i].set(MISSING_ALLOCATION);
+
+ for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
+ LInstruction* ins = *iter;
+
+ if (ins == *block->rbegin())
+ syncForBlockEnd(block, ins);
+
+ allocateForInstruction(ins);
+ }
+ }
+
+ return true;
+}
+
+void
+StupidAllocator::syncForBlockEnd(LBlock* block, LInstruction* ins)
+{
+ // Sync any dirty registers, and update the synced state for phi nodes at
+ // each successor of a block. We cannot conflate the storage for phis with
+ // that of their inputs, as we cannot prove the live ranges of the phi and
+ // its input do not overlap. The values for the two may additionally be
+ // different, as the phi could be for the value of the input in a previous
+ // loop iteration.
+
+ for (size_t i = 0; i < registerCount; i++)
+ syncRegister(ins, i);
+
+ LMoveGroup* group = nullptr;
+
+ MBasicBlock* successor = block->mir()->successorWithPhis();
+ if (successor) {
+ uint32_t position = block->mir()->positionInPhiSuccessor();
+ LBlock* lirsuccessor = successor->lir();
+ for (size_t i = 0; i < lirsuccessor->numPhis(); i++) {
+ LPhi* phi = lirsuccessor->getPhi(i);
+
+ uint32_t sourcevreg = phi->getOperand(position)->toUse()->virtualRegister();
+ uint32_t destvreg = phi->getDef(0)->virtualRegister();
+
+ if (sourcevreg == destvreg)
+ continue;
+
+ LAllocation* source = stackLocation(sourcevreg);
+ LAllocation* dest = stackLocation(destvreg);
+
+ if (!group) {
+ // The moves we insert here need to happen simultaneously with
+ // each other, yet after any existing moves before the instruction.
+ LMoveGroup* input = getInputMoveGroup(ins);
+ if (input->numMoves() == 0) {
+ group = input;
+ } else {
+ group = LMoveGroup::New(alloc());
+ block->insertAfter(input, group);
+ }
+ }
+
+ group->add(*source, *dest, phi->getDef(0)->type());
+ }
+ }
+}
+
+void
+StupidAllocator::allocateForInstruction(LInstruction* ins)
+{
+ // Sync all registers before making a call.
+ if (ins->isCall()) {
+ for (size_t i = 0; i < registerCount; i++)
+ syncRegister(ins, i);
+ }
+
+ // Allocate for inputs which are required to be in registers.
+ for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
+ if (!alloc->isUse())
+ continue;
+ LUse* use = alloc->toUse();
+ uint32_t vreg = use->virtualRegister();
+ if (use->policy() == LUse::REGISTER) {
+ AnyRegister reg = ensureHasRegister(ins, vreg);
+ alloc.replace(LAllocation(reg));
+ } else if (use->policy() == LUse::FIXED) {
+ AnyRegister reg = GetFixedRegister(virtualRegisters[vreg], use);
+ RegisterIndex index = registerIndex(reg);
+ if (registers[index].vreg != vreg) {
+ // Need to evict multiple registers
+ evictAliasedRegister(ins, registerIndex(reg));
+ // If this vreg is already assigned to an incorrect register
+ RegisterIndex existing = findExistingRegister(vreg);
+ if (existing != UINT32_MAX)
+ evictRegister(ins, existing);
+ loadRegister(ins, vreg, index, virtualRegisters[vreg]->type());
+ }
+ alloc.replace(LAllocation(reg));
+ } else {
+ // Inputs which are not required to be in a register are not
+ // allocated until after temps/definitions, as the latter may need
+ // to evict registers which hold these inputs.
+ }
+ }
+
+ // Find registers to hold all temporaries and outputs of the instruction.
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ LDefinition* def = ins->getTemp(i);
+ if (!def->isBogusTemp())
+ allocateForDefinition(ins, def);
+ }
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ LDefinition* def = ins->getDef(i);
+ allocateForDefinition(ins, def);
+ }
+
+ // Allocate for remaining inputs which do not need to be in registers.
+ for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
+ if (!alloc->isUse())
+ continue;
+ LUse* use = alloc->toUse();
+ uint32_t vreg = use->virtualRegister();
+ MOZ_ASSERT(use->policy() != LUse::REGISTER && use->policy() != LUse::FIXED);
+
+ RegisterIndex index = findExistingRegister(vreg);
+ if (index == UINT32_MAX) {
+ LAllocation* stack = stackLocation(use->virtualRegister());
+ alloc.replace(*stack);
+ } else {
+ registers[index].age = ins->id();
+ alloc.replace(LAllocation(registers[index].reg));
+ }
+ }
+
+ // If this is a call, evict all registers except for those holding outputs.
+ if (ins->isCall()) {
+ for (size_t i = 0; i < registerCount; i++) {
+ if (!registers[i].dirty)
+ registers[i].set(MISSING_ALLOCATION);
+ }
+ }
+}
+
+void
+StupidAllocator::allocateForDefinition(LInstruction* ins, LDefinition* def)
+{
+ uint32_t vreg = def->virtualRegister();
+
+ CodePosition from;
+ if ((def->output()->isRegister() && def->policy() == LDefinition::FIXED) ||
+ def->policy() == LDefinition::MUST_REUSE_INPUT)
+ {
+ // Result will be in a specific register, spill any vreg held in
+ // that register before the instruction.
+ RegisterIndex index =
+ registerIndex(def->policy() == LDefinition::FIXED
+ ? def->output()->toRegister()
+ : ins->getOperand(def->getReusedInput())->toRegister());
+ evictRegister(ins, index);
+ registers[index].set(vreg, ins, true);
+ registers[index].type = virtualRegisters[vreg]->type();
+ def->setOutput(LAllocation(registers[index].reg));
+ } else if (def->policy() == LDefinition::FIXED) {
+ // The result must be a stack location.
+ def->setOutput(*stackLocation(vreg));
+ } else {
+ // Find a register to hold the result of the instruction.
+ RegisterIndex best = allocateRegister(ins, vreg);
+ registers[best].set(vreg, ins, true);
+ registers[best].type = virtualRegisters[vreg]->type();
+ def->setOutput(LAllocation(registers[best].reg));
+ }
+}
diff --git a/js/src/jit/StupidAllocator.h b/js/src/jit/StupidAllocator.h
new file mode 100644
index 000000000..053aa0595
--- /dev/null
+++ b/js/src/jit/StupidAllocator.h
@@ -0,0 +1,90 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_StupidAllocator_h
+#define jit_StupidAllocator_h
+
+#include "jit/RegisterAllocator.h"
+
+// Simple register allocator that only carries registers within basic blocks.
+
+namespace js {
+namespace jit {
+
+class StupidAllocator : public RegisterAllocator
+{
+ static const uint32_t MAX_REGISTERS = AnyRegister::Total;
+ static const uint32_t MISSING_ALLOCATION = UINT32_MAX;
+
+ struct AllocatedRegister {
+ AnyRegister reg;
+
+ // The type of the value in the register.
+ LDefinition::Type type;
+
+ // Virtual register this physical reg backs, or MISSING_ALLOCATION.
+ uint32_t vreg;
+
+ // id of the instruction which most recently used this register.
+ uint32_t age;
+
+ // Whether the physical register is not synced with the backing stack slot.
+ bool dirty;
+
+ void set(uint32_t vreg, LInstruction* ins = nullptr, bool dirty = false) {
+ this->vreg = vreg;
+ this->age = ins ? ins->id() : 0;
+ this->dirty = dirty;
+ }
+ };
+
+ // Active allocation for the current code position.
+ mozilla::Array<AllocatedRegister, MAX_REGISTERS> registers;
+ uint32_t registerCount;
+
+ // Type indicating an index into registers.
+ typedef uint32_t RegisterIndex;
+
+ // Information about each virtual register.
+ Vector<LDefinition*, 0, SystemAllocPolicy> virtualRegisters;
+
+ public:
+ StupidAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph)
+ : RegisterAllocator(mir, lir, graph)
+ {
+ }
+
+ MOZ_MUST_USE bool go();
+
+ private:
+ MOZ_MUST_USE bool init();
+
+ void syncForBlockEnd(LBlock* block, LInstruction* ins);
+ void allocateForInstruction(LInstruction* ins);
+ void allocateForDefinition(LInstruction* ins, LDefinition* def);
+
+ LAllocation* stackLocation(uint32_t vreg);
+
+ RegisterIndex registerIndex(AnyRegister reg);
+
+ AnyRegister ensureHasRegister(LInstruction* ins, uint32_t vreg);
+ RegisterIndex allocateRegister(LInstruction* ins, uint32_t vreg);
+
+ void syncRegister(LInstruction* ins, RegisterIndex index);
+ void evictRegister(LInstruction* ins, RegisterIndex index);
+ void evictAliasedRegister(LInstruction* ins, RegisterIndex index);
+ void loadRegister(LInstruction* ins, uint32_t vreg, RegisterIndex index, LDefinition::Type type);
+
+ RegisterIndex findExistingRegister(uint32_t vreg);
+
+ bool allocationRequiresRegister(const LAllocation* alloc, AnyRegister reg);
+ bool registerIsReserved(LInstruction* ins, AnyRegister reg);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_StupidAllocator_h */
diff --git a/js/src/jit/TypePolicy.cpp b/js/src/jit/TypePolicy.cpp
new file mode 100644
index 000000000..2a7480c39
--- /dev/null
+++ b/js/src/jit/TypePolicy.cpp
@@ -0,0 +1,1330 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/TypePolicy.h"
+
+#include "jit/Lowering.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using JS::DoubleNaNValue;
+
+static void
+EnsureOperandNotFloat32(TempAllocator& alloc, MInstruction* def, unsigned op)
+{
+ MDefinition* in = def->getOperand(op);
+ if (in->type() == MIRType::Float32) {
+ MToDouble* replace = MToDouble::New(alloc, in);
+ def->block()->insertBefore(def, replace);
+ if (def->isRecoveredOnBailout())
+ replace->setRecoveredOnBailout();
+ def->replaceOperand(op, replace);
+ }
+}
+
+MDefinition*
+js::jit::AlwaysBoxAt(TempAllocator& alloc, MInstruction* at, MDefinition* operand)
+{
+ MDefinition* boxedOperand = operand;
+ // Replace Float32 by double
+ if (operand->type() == MIRType::Float32) {
+ MInstruction* replace = MToDouble::New(alloc, operand);
+ at->block()->insertBefore(at, replace);
+ boxedOperand = replace;
+ }
+ MBox* box = MBox::New(alloc, boxedOperand);
+ at->block()->insertBefore(at, box);
+ return box;
+}
+
+static MDefinition*
+BoxAt(TempAllocator& alloc, MInstruction* at, MDefinition* operand)
+{
+ if (operand->isUnbox())
+ return operand->toUnbox()->input();
+ return AlwaysBoxAt(alloc, at, operand);
+}
+
+bool
+BoxInputsPolicy::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
+ MDefinition* in = ins->getOperand(i);
+ if (in->type() == MIRType::Value)
+ continue;
+ ins->replaceOperand(i, BoxAt(alloc, ins, in));
+ }
+ return true;
+}
+
+bool
+ArithPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MIRType specialization = ins->typePolicySpecialization();
+ if (specialization == MIRType::None)
+ return BoxInputsPolicy::staticAdjustInputs(alloc, ins);
+
+ MOZ_ASSERT(ins->type() == MIRType::Double || ins->type() == MIRType::Int32 || ins->type() == MIRType::Float32);
+
+ for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
+ MDefinition* in = ins->getOperand(i);
+ if (in->type() == ins->type())
+ continue;
+
+ MInstruction* replace;
+
+ if (ins->type() == MIRType::Double)
+ replace = MToDouble::New(alloc, in);
+ else if (ins->type() == MIRType::Float32)
+ replace = MToFloat32::New(alloc, in);
+ else
+ replace = MToInt32::New(alloc, in);
+
+ ins->block()->insertBefore(ins, replace);
+ ins->replaceOperand(i, replace);
+
+ if (!replace->typePolicy()->adjustInputs(alloc, replace))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+AllDoublePolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
+ MDefinition* in = ins->getOperand(i);
+ if (in->type() == MIRType::Double)
+ continue;
+
+ if (!alloc.ensureBallast())
+ return false;
+ MInstruction* replace = MToDouble::New(alloc, in);
+
+ ins->block()->insertBefore(ins, replace);
+ ins->replaceOperand(i, replace);
+
+ if (!replace->typePolicy()->adjustInputs(alloc, replace))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+ComparePolicy::adjustInputs(TempAllocator& alloc, MInstruction* def)
+{
+ MOZ_ASSERT(def->isCompare());
+ MCompare* compare = def->toCompare();
+
+ // Convert Float32 operands to doubles
+ for (size_t i = 0; i < 2; i++) {
+ MDefinition* in = def->getOperand(i);
+ if (in->type() == MIRType::Float32) {
+ MInstruction* replace = MToDouble::New(alloc, in);
+ def->block()->insertBefore(def, replace);
+ def->replaceOperand(i, replace);
+ }
+ }
+
+ // Box inputs to get value
+ if (compare->compareType() == MCompare::Compare_Unknown ||
+ compare->compareType() == MCompare::Compare_Bitwise)
+ {
+ return BoxInputsPolicy::staticAdjustInputs(alloc, def);
+ }
+
+ // Compare_Boolean specialization is done for "Anything === Bool"
+ // If the LHS is boolean, we set the specialization to Compare_Int32.
+ // This matches other comparisons of the form bool === bool and
+ // generated code of Compare_Int32 is more efficient.
+ if (compare->compareType() == MCompare::Compare_Boolean &&
+ def->getOperand(0)->type() == MIRType::Boolean)
+ {
+ compare->setCompareType(MCompare::Compare_Int32MaybeCoerceBoth);
+ }
+
+ // Compare_Boolean specialization is done for "Anything === Bool"
+ // As of previous line Anything can't be Boolean
+ if (compare->compareType() == MCompare::Compare_Boolean) {
+ // Unbox rhs that is definitely Boolean
+ MDefinition* rhs = def->getOperand(1);
+ if (rhs->type() != MIRType::Boolean) {
+ MInstruction* unbox = MUnbox::New(alloc, rhs, MIRType::Boolean, MUnbox::Infallible);
+ def->block()->insertBefore(def, unbox);
+ def->replaceOperand(1, unbox);
+ if (!unbox->typePolicy()->adjustInputs(alloc, unbox))
+ return false;
+ }
+
+ MOZ_ASSERT(def->getOperand(0)->type() != MIRType::Boolean);
+ MOZ_ASSERT(def->getOperand(1)->type() == MIRType::Boolean);
+ return true;
+ }
+
+ // Compare_StrictString specialization is done for "Anything === String"
+ // If the LHS is string, we set the specialization to Compare_String.
+ if (compare->compareType() == MCompare::Compare_StrictString &&
+ def->getOperand(0)->type() == MIRType::String)
+ {
+ compare->setCompareType(MCompare::Compare_String);
+ }
+
+ // Compare_StrictString specialization is done for "Anything === String"
+ // As of previous line Anything can't be String
+ if (compare->compareType() == MCompare::Compare_StrictString) {
+ // Unbox rhs that is definitely String
+ MDefinition* rhs = def->getOperand(1);
+ if (rhs->type() != MIRType::String) {
+ MInstruction* unbox = MUnbox::New(alloc, rhs, MIRType::String, MUnbox::Infallible);
+ def->block()->insertBefore(def, unbox);
+ def->replaceOperand(1, unbox);
+ if (!unbox->typePolicy()->adjustInputs(alloc, unbox))
+ return false;
+ }
+
+ MOZ_ASSERT(def->getOperand(0)->type() != MIRType::String);
+ MOZ_ASSERT(def->getOperand(1)->type() == MIRType::String);
+ return true;
+ }
+
+ if (compare->compareType() == MCompare::Compare_Undefined ||
+ compare->compareType() == MCompare::Compare_Null)
+ {
+ // Nothing to do for undefined and null, lowering handles all types.
+ return true;
+ }
+
+ // Convert all inputs to the right input type
+ MIRType type = compare->inputType();
+ MOZ_ASSERT(type == MIRType::Int32 || type == MIRType::Double ||
+ type == MIRType::Object || type == MIRType::String || type == MIRType::Float32);
+ for (size_t i = 0; i < 2; i++) {
+ MDefinition* in = def->getOperand(i);
+ if (in->type() == type)
+ continue;
+
+ MInstruction* replace;
+
+ switch (type) {
+ case MIRType::Double: {
+ MToFPInstruction::ConversionKind convert = MToFPInstruction::NumbersOnly;
+ if (compare->compareType() == MCompare::Compare_DoubleMaybeCoerceLHS && i == 0)
+ convert = MToFPInstruction::NonNullNonStringPrimitives;
+ else if (compare->compareType() == MCompare::Compare_DoubleMaybeCoerceRHS && i == 1)
+ convert = MToFPInstruction::NonNullNonStringPrimitives;
+ replace = MToDouble::New(alloc, in, convert);
+ break;
+ }
+ case MIRType::Float32: {
+ MToFPInstruction::ConversionKind convert = MToFPInstruction::NumbersOnly;
+ if (compare->compareType() == MCompare::Compare_DoubleMaybeCoerceLHS && i == 0)
+ convert = MToFPInstruction::NonNullNonStringPrimitives;
+ else if (compare->compareType() == MCompare::Compare_DoubleMaybeCoerceRHS && i == 1)
+ convert = MToFPInstruction::NonNullNonStringPrimitives;
+ replace = MToFloat32::New(alloc, in, convert);
+ break;
+ }
+ case MIRType::Int32: {
+ MacroAssembler::IntConversionInputKind convert = MacroAssembler::IntConversion_NumbersOnly;
+ if (compare->compareType() == MCompare::Compare_Int32MaybeCoerceBoth ||
+ (compare->compareType() == MCompare::Compare_Int32MaybeCoerceLHS && i == 0) ||
+ (compare->compareType() == MCompare::Compare_Int32MaybeCoerceRHS && i == 1))
+ {
+ convert = MacroAssembler::IntConversion_NumbersOrBoolsOnly;
+ }
+ replace = MToInt32::New(alloc, in, convert);
+ break;
+ }
+ case MIRType::Object:
+ replace = MUnbox::New(alloc, in, MIRType::Object, MUnbox::Infallible);
+ break;
+ case MIRType::String:
+ replace = MUnbox::New(alloc, in, MIRType::String, MUnbox::Infallible);
+ break;
+ default:
+ MOZ_CRASH("Unknown compare specialization");
+ }
+
+ def->block()->insertBefore(def, replace);
+ def->replaceOperand(i, replace);
+
+ if (!replace->typePolicy()->adjustInputs(alloc, replace))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+TypeBarrierPolicy::adjustInputs(TempAllocator& alloc, MInstruction* def)
+{
+ MTypeBarrier* ins = def->toTypeBarrier();
+ MIRType inputType = ins->getOperand(0)->type();
+ MIRType outputType = ins->type();
+
+ // Input and output type are already in accordance.
+ if (inputType == outputType)
+ return true;
+
+ // Output is a value, currently box the input.
+ if (outputType == MIRType::Value) {
+ // XXX: Possible optimization: decrease resultTypeSet to only include
+ // the inputType. This will remove the need for boxing.
+ MOZ_ASSERT(inputType != MIRType::Value);
+ ins->replaceOperand(0, BoxAt(alloc, ins, ins->getOperand(0)));
+ return true;
+ }
+
+ // Box input if needed.
+ if (inputType != MIRType::Value) {
+ MOZ_ASSERT(ins->alwaysBails());
+ ins->replaceOperand(0, BoxAt(alloc, ins, ins->getOperand(0)));
+ }
+
+ // We can't unbox a value to null/undefined/lazyargs. So keep output
+ // also a value.
+ // Note: Using setResultType shouldn't be done in TypePolicies,
+ // Here it is fine, since the type barrier has no uses.
+ if (IsNullOrUndefined(outputType) || outputType == MIRType::MagicOptimizedArguments) {
+ MOZ_ASSERT(!ins->hasDefUses());
+ ins->setResultType(MIRType::Value);
+ return true;
+ }
+
+ // Unbox / propagate the right type.
+ MUnbox::Mode mode = MUnbox::TypeBarrier;
+ MInstruction* replace = MUnbox::New(alloc, ins->getOperand(0), ins->type(), mode);
+ if (!ins->isMovable())
+ replace->setNotMovable();
+
+ ins->block()->insertBefore(ins, replace);
+ ins->replaceOperand(0, replace);
+ if (!replace->typePolicy()->adjustInputs(alloc, replace))
+ return false;
+
+ // The TypeBarrier is equivalent to removing branches with unexpected
+ // types. The unexpected types would have changed Range Analysis
+ // predictions. As such, we need to prevent destructive optimizations.
+ ins->block()->flagOperandsOfPrunedBranches(replace);
+
+ return true;
+}
+
+bool
+TestPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MDefinition* op = ins->getOperand(0);
+ switch (op->type()) {
+ case MIRType::Value:
+ case MIRType::Null:
+ case MIRType::Undefined:
+ case MIRType::Boolean:
+ case MIRType::Int32:
+ case MIRType::Double:
+ case MIRType::Float32:
+ case MIRType::Symbol:
+ case MIRType::Object:
+ break;
+
+ case MIRType::String:
+ {
+ MStringLength* length = MStringLength::New(alloc, op);
+ ins->block()->insertBefore(ins, length);
+ ins->replaceOperand(0, length);
+ break;
+ }
+
+ default:
+ ins->replaceOperand(0, BoxAt(alloc, ins, op));
+ break;
+ }
+ return true;
+}
+
+bool
+BitwisePolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MIRType specialization = ins->typePolicySpecialization();
+ if (specialization == MIRType::None)
+ return BoxInputsPolicy::staticAdjustInputs(alloc, ins);
+
+ MOZ_ASSERT(ins->type() == specialization);
+ MOZ_ASSERT(specialization == MIRType::Int32 || specialization == MIRType::Double);
+
+ // This policy works for both unary and binary bitwise operations.
+ for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
+ MDefinition* in = ins->getOperand(i);
+ if (in->type() == MIRType::Int32)
+ continue;
+
+ MInstruction* replace = MTruncateToInt32::New(alloc, in);
+ ins->block()->insertBefore(ins, replace);
+ ins->replaceOperand(i, replace);
+
+ if (!replace->typePolicy()->adjustInputs(alloc, replace))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+PowPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MIRType specialization = ins->typePolicySpecialization();
+ MOZ_ASSERT(specialization == MIRType::Int32 || specialization == MIRType::Double);
+
+ // Input must be a double.
+ if (!DoublePolicy<0>::staticAdjustInputs(alloc, ins))
+ return false;
+
+ // Power may be an int32 or a double. Integers receive a faster path.
+ if (specialization == MIRType::Double)
+ return DoublePolicy<1>::staticAdjustInputs(alloc, ins);
+ return IntPolicy<1>::staticAdjustInputs(alloc, ins);
+}
+
+template <unsigned Op>
+bool
+StringPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MDefinition* in = ins->getOperand(Op);
+ if (in->type() == MIRType::String)
+ return true;
+
+ MUnbox* replace = MUnbox::New(alloc, in, MIRType::String, MUnbox::Fallible);
+ ins->block()->insertBefore(ins, replace);
+ ins->replaceOperand(Op, replace);
+
+ return replace->typePolicy()->adjustInputs(alloc, replace);
+}
+
+template bool StringPolicy<0>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+template bool StringPolicy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+template bool StringPolicy<2>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+
+template <unsigned Op>
+bool
+ConvertToStringPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MDefinition* in = ins->getOperand(Op);
+ if (in->type() == MIRType::String)
+ return true;
+
+ MToString* replace = MToString::New(alloc, in);
+ ins->block()->insertBefore(ins, replace);
+ ins->replaceOperand(Op, replace);
+
+ if (!ToStringPolicy::staticAdjustInputs(alloc, replace))
+ return false;
+
+ return true;
+}
+
+template bool ConvertToStringPolicy<0>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+template bool ConvertToStringPolicy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+template bool ConvertToStringPolicy<2>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+
+template <unsigned Op>
+bool
+BooleanPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def)
+{
+ MDefinition* in = def->getOperand(Op);
+ if (in->type() == MIRType::Boolean)
+ return true;
+
+ MUnbox* replace = MUnbox::New(alloc, in, MIRType::Boolean, MUnbox::Fallible);
+ def->block()->insertBefore(def, replace);
+ def->replaceOperand(Op, replace);
+
+ return replace->typePolicy()->adjustInputs(alloc, replace);
+}
+
+template bool BooleanPolicy<3>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+
+template <unsigned Op>
+bool
+IntPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def)
+{
+ MDefinition* in = def->getOperand(Op);
+ if (in->type() == MIRType::Int32)
+ return true;
+
+ MUnbox* replace = MUnbox::New(alloc, in, MIRType::Int32, MUnbox::Fallible);
+ def->block()->insertBefore(def, replace);
+ def->replaceOperand(Op, replace);
+
+ return replace->typePolicy()->adjustInputs(alloc, replace);
+}
+
+template bool IntPolicy<0>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+template bool IntPolicy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+template bool IntPolicy<2>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+template bool IntPolicy<3>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+
+template <unsigned Op>
+bool
+ConvertToInt32Policy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def)
+{
+ MDefinition* in = def->getOperand(Op);
+ if (in->type() == MIRType::Int32)
+ return true;
+
+ MToInt32* replace = MToInt32::New(alloc, in);
+ def->block()->insertBefore(def, replace);
+ def->replaceOperand(Op, replace);
+
+ return replace->typePolicy()->adjustInputs(alloc, replace);
+}
+
+template bool ConvertToInt32Policy<0>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+
+template <unsigned Op>
+bool
+TruncateToInt32Policy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def)
+{
+ MDefinition* in = def->getOperand(Op);
+ if (in->type() == MIRType::Int32)
+ return true;
+
+ MTruncateToInt32* replace = MTruncateToInt32::New(alloc, in);
+ def->block()->insertBefore(def, replace);
+ def->replaceOperand(Op, replace);
+
+ return replace->typePolicy()->adjustInputs(alloc, replace);
+}
+
+template bool TruncateToInt32Policy<2>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+template bool TruncateToInt32Policy<3>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+
+template <unsigned Op>
+bool
+DoublePolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def)
+{
+ MDefinition* in = def->getOperand(Op);
+ if (in->type() == MIRType::Double || in->type() == MIRType::SinCosDouble)
+ return true;
+
+ MToDouble* replace = MToDouble::New(alloc, in);
+ def->block()->insertBefore(def, replace);
+ def->replaceOperand(Op, replace);
+
+ return replace->typePolicy()->adjustInputs(alloc, replace);
+}
+
+template bool DoublePolicy<0>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+template bool DoublePolicy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+
+template <unsigned Op>
+bool
+Float32Policy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def)
+{
+ MDefinition* in = def->getOperand(Op);
+ if (in->type() == MIRType::Float32)
+ return true;
+
+ MToFloat32* replace = MToFloat32::New(alloc, in);
+ def->block()->insertBefore(def, replace);
+ def->replaceOperand(Op, replace);
+
+ return replace->typePolicy()->adjustInputs(alloc, replace);
+}
+
+template bool Float32Policy<0>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+template bool Float32Policy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+template bool Float32Policy<2>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+
+template <unsigned Op>
+bool
+FloatingPointPolicy<Op>::adjustInputs(TempAllocator& alloc, MInstruction* def)
+{
+ MIRType policyType = def->typePolicySpecialization();
+ if (policyType == MIRType::Double)
+ return DoublePolicy<Op>::staticAdjustInputs(alloc, def);
+ return Float32Policy<Op>::staticAdjustInputs(alloc, def);
+}
+
+template bool FloatingPointPolicy<0>::adjustInputs(TempAllocator& alloc, MInstruction* def);
+
+template <unsigned Op>
+bool
+NoFloatPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def)
+{
+ EnsureOperandNotFloat32(alloc, def, Op);
+ return true;
+}
+
+template bool NoFloatPolicy<0>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+template bool NoFloatPolicy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+template bool NoFloatPolicy<2>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+template bool NoFloatPolicy<3>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+
+template <unsigned FirstOp>
+bool
+NoFloatPolicyAfter<FirstOp>::adjustInputs(TempAllocator& alloc, MInstruction* def)
+{
+ for (size_t op = FirstOp, e = def->numOperands(); op < e; op++)
+ EnsureOperandNotFloat32(alloc, def, op);
+ return true;
+}
+
+template bool NoFloatPolicyAfter<1>::adjustInputs(TempAllocator& alloc, MInstruction* def);
+template bool NoFloatPolicyAfter<2>::adjustInputs(TempAllocator& alloc, MInstruction* def);
+
+template <unsigned Op>
+bool
+SimdScalarPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MOZ_ASSERT(IsSimdType(ins->type()));
+ MIRType laneType = SimdTypeToLaneType(ins->type());
+
+ MDefinition* in = ins->getOperand(Op);
+
+ // A vector with boolean lanes requires Int32 inputs that have already been
+ // converted to 0/-1.
+ // We can't insert a MIRType::Boolean lane directly - it requires conversion.
+ if (laneType == MIRType::Boolean) {
+ MOZ_ASSERT(in->type() == MIRType::Int32, "Boolean SIMD vector requires Int32 lanes.");
+ return true;
+ }
+
+ if (in->type() == laneType)
+ return true;
+
+ MInstruction* replace;
+ if (laneType == MIRType::Int32) {
+ replace = MTruncateToInt32::New(alloc, in);
+ } else {
+ MOZ_ASSERT(laneType == MIRType::Float32);
+ replace = MToFloat32::New(alloc, in);
+ }
+
+ ins->block()->insertBefore(ins, replace);
+ ins->replaceOperand(Op, replace);
+
+ return replace->typePolicy()->adjustInputs(alloc, replace);
+}
+
+template bool SimdScalarPolicy<0>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+template bool SimdScalarPolicy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+template bool SimdScalarPolicy<2>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+template bool SimdScalarPolicy<3>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+
+template <unsigned Op>
+bool
+BoxPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MDefinition* in = ins->getOperand(Op);
+ if (in->type() == MIRType::Value)
+ return true;
+
+ ins->replaceOperand(Op, BoxAt(alloc, ins, in));
+ return true;
+}
+
+template bool BoxPolicy<0>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+template bool BoxPolicy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+template bool BoxPolicy<2>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+
+template <unsigned Op, MIRType Type>
+bool
+BoxExceptPolicy<Op, Type>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MDefinition* in = ins->getOperand(Op);
+ if (in->type() == Type)
+ return true;
+ return BoxPolicy<Op>::staticAdjustInputs(alloc, ins);
+}
+
+template bool BoxExceptPolicy<0, MIRType::Object>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+
+template <unsigned Op>
+bool
+CacheIdPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MDefinition* in = ins->getOperand(Op);
+ switch (in->type()) {
+ case MIRType::Int32:
+ case MIRType::String:
+ case MIRType::Symbol:
+ return true;
+ default:
+ return BoxPolicy<Op>::staticAdjustInputs(alloc, ins);
+ }
+}
+
+template bool CacheIdPolicy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+
+bool
+ToDoublePolicy::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MOZ_ASSERT(ins->isToDouble() || ins->isToFloat32());
+
+ MDefinition* in = ins->getOperand(0);
+ MToFPInstruction::ConversionKind conversion;
+ if (ins->isToDouble())
+ conversion = ins->toToDouble()->conversion();
+ else
+ conversion = ins->toToFloat32()->conversion();
+
+ switch (in->type()) {
+ case MIRType::Int32:
+ case MIRType::Float32:
+ case MIRType::Double:
+ case MIRType::Value:
+ // No need for boxing for these types.
+ return true;
+ case MIRType::Null:
+ // No need for boxing, when we will convert.
+ if (conversion == MToFPInstruction::NonStringPrimitives)
+ return true;
+ break;
+ case MIRType::Undefined:
+ case MIRType::Boolean:
+ // No need for boxing, when we will convert.
+ if (conversion == MToFPInstruction::NonStringPrimitives)
+ return true;
+ if (conversion == MToFPInstruction::NonNullNonStringPrimitives)
+ return true;
+ break;
+ case MIRType::Object:
+ case MIRType::String:
+ case MIRType::Symbol:
+ // Objects might be effectful. Symbols give TypeError.
+ break;
+ default:
+ break;
+ }
+
+ in = BoxAt(alloc, ins, in);
+ ins->replaceOperand(0, in);
+ return true;
+}
+
+bool
+ToInt32Policy::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MOZ_ASSERT(ins->isToInt32() || ins->isTruncateToInt32());
+
+ MacroAssembler::IntConversionInputKind conversion = MacroAssembler::IntConversion_Any;
+ if (ins->isToInt32())
+ conversion = ins->toToInt32()->conversion();
+
+ MDefinition* in = ins->getOperand(0);
+ switch (in->type()) {
+ case MIRType::Int32:
+ case MIRType::Float32:
+ case MIRType::Double:
+ case MIRType::Value:
+ // No need for boxing for these types.
+ return true;
+ case MIRType::Undefined:
+ // No need for boxing when truncating.
+ if (ins->isTruncateToInt32())
+ return true;
+ break;
+ case MIRType::Null:
+ // No need for boxing, when we will convert.
+ if (conversion == MacroAssembler::IntConversion_Any)
+ return true;
+ break;
+ case MIRType::Boolean:
+ // No need for boxing, when we will convert.
+ if (conversion == MacroAssembler::IntConversion_Any)
+ return true;
+ if (conversion == MacroAssembler::IntConversion_NumbersOrBoolsOnly)
+ return true;
+ break;
+ case MIRType::Object:
+ case MIRType::String:
+ case MIRType::Symbol:
+ // Objects might be effectful. Symbols give TypeError.
+ break;
+ default:
+ break;
+ }
+
+ in = BoxAt(alloc, ins, in);
+ ins->replaceOperand(0, in);
+ return true;
+}
+
+bool
+ToStringPolicy::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MOZ_ASSERT(ins->isToString());
+
+ MIRType type = ins->getOperand(0)->type();
+ if (type == MIRType::Object || type == MIRType::Symbol) {
+ ins->replaceOperand(0, BoxAt(alloc, ins, ins->getOperand(0)));
+ return true;
+ }
+
+ // TODO remove the following line once 966957 has landed
+ EnsureOperandNotFloat32(alloc, ins, 0);
+
+ return true;
+}
+
+template <unsigned Op>
+bool
+ObjectPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MDefinition* in = ins->getOperand(Op);
+ if (in->type() == MIRType::Object || in->type() == MIRType::Slots ||
+ in->type() == MIRType::Elements)
+ {
+ return true;
+ }
+
+ MUnbox* replace = MUnbox::New(alloc, in, MIRType::Object, MUnbox::Fallible);
+ ins->block()->insertBefore(ins, replace);
+ ins->replaceOperand(Op, replace);
+
+ return replace->typePolicy()->adjustInputs(alloc, replace);
+}
+
+template bool ObjectPolicy<0>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+template bool ObjectPolicy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+template bool ObjectPolicy<2>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+template bool ObjectPolicy<3>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+
+template <unsigned Op>
+bool
+SimdSameAsReturnedTypePolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MOZ_ASSERT(ins->type() == ins->getOperand(Op)->type());
+ return true;
+}
+
+template bool
+SimdSameAsReturnedTypePolicy<0>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+template bool
+SimdSameAsReturnedTypePolicy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+
+bool
+SimdAllPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ for (unsigned i = 0, e = ins->numOperands(); i < e; i++)
+ MOZ_ASSERT(ins->getOperand(i)->type() == ins->typePolicySpecialization());
+ return true;
+}
+
+template <unsigned Op>
+bool
+SimdPolicy<Op>::adjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MOZ_ASSERT(ins->typePolicySpecialization() == ins->getOperand(Op)->type());
+ return true;
+}
+
+template bool
+SimdPolicy<0>::adjustInputs(TempAllocator& alloc, MInstruction* ins);
+
+bool
+SimdShufflePolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MSimdGeneralShuffle* s = ins->toSimdGeneralShuffle();
+
+ for (unsigned i = 0; i < s->numVectors(); i++)
+ MOZ_ASSERT(ins->getOperand(i)->type() == ins->typePolicySpecialization());
+
+ // Next inputs are the lanes, which need to be int32
+ for (unsigned i = 0; i < s->numLanes(); i++) {
+ MDefinition* in = ins->getOperand(s->numVectors() + i);
+ if (in->type() == MIRType::Int32)
+ continue;
+
+ MInstruction* replace = MToInt32::New(alloc, in, MacroAssembler::IntConversion_NumbersOnly);
+ ins->block()->insertBefore(ins, replace);
+ ins->replaceOperand(s->numVectors() + i, replace);
+ if (!replace->typePolicy()->adjustInputs(alloc, replace))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+SimdSelectPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ // First input is the mask, which has to be a boolean.
+ MOZ_ASSERT(IsBooleanSimdType(ins->getOperand(0)->type()));
+
+ // Next inputs are the two vectors of a particular type.
+ for (unsigned i = 1; i < 3; i++)
+ MOZ_ASSERT(ins->getOperand(i)->type() == ins->typePolicySpecialization());
+
+ return true;
+}
+
+bool
+CallPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MCall* call = ins->toCall();
+
+ MDefinition* func = call->getFunction();
+ if (func->type() != MIRType::Object) {
+ MInstruction* unbox = MUnbox::New(alloc, func, MIRType::Object, MUnbox::Fallible);
+ call->block()->insertBefore(call, unbox);
+ call->replaceFunction(unbox);
+
+ if (!unbox->typePolicy()->adjustInputs(alloc, unbox))
+ return false;
+ }
+
+ for (uint32_t i = 0; i < call->numStackArgs(); i++) {
+ if (!alloc.ensureBallast())
+ return false;
+ EnsureOperandNotFloat32(alloc, call, MCall::IndexOfStackArg(i));
+ }
+
+ return true;
+}
+
+bool
+CallSetElementPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ // The first operand should be an object.
+ if (!SingleObjectPolicy::staticAdjustInputs(alloc, ins))
+ return false;
+
+ // Box the index and value operands.
+ for (size_t i = 1, e = ins->numOperands(); i < e; i++) {
+ MDefinition* in = ins->getOperand(i);
+ if (in->type() == MIRType::Value)
+ continue;
+ ins->replaceOperand(i, BoxAt(alloc, ins, in));
+ }
+ return true;
+}
+
+bool
+InstanceOfPolicy::adjustInputs(TempAllocator& alloc, MInstruction* def)
+{
+ // Box first operand if it isn't object
+ if (def->getOperand(0)->type() != MIRType::Object)
+ if (!BoxPolicy<0>::staticAdjustInputs(alloc, def))
+ return false;
+
+ return true;
+}
+
+bool
+StoreUnboxedScalarPolicy::adjustValueInput(TempAllocator& alloc, MInstruction* ins,
+ Scalar::Type writeType, MDefinition* value,
+ int valueOperand)
+{
+ // Storing a SIMD value requires a valueOperand that has already been
+ // SimdUnboxed. See IonBuilder::inlineSimdStore(()
+ if (Scalar::isSimdType(writeType)) {
+ MOZ_ASSERT(IsSimdType(value->type()));
+ return true;
+ }
+
+ MDefinition* curValue = value;
+ // First, ensure the value is int32, boolean, double or Value.
+ // The conversion is based on TypedArrayObjectTemplate::setElementTail.
+ switch (value->type()) {
+ case MIRType::Int32:
+ case MIRType::Double:
+ case MIRType::Float32:
+ case MIRType::Boolean:
+ case MIRType::Value:
+ break;
+ case MIRType::Null:
+ value->setImplicitlyUsedUnchecked();
+ value = MConstant::New(alloc, Int32Value(0));
+ ins->block()->insertBefore(ins, value->toInstruction());
+ break;
+ case MIRType::Undefined:
+ value->setImplicitlyUsedUnchecked();
+ value = MConstant::New(alloc, DoubleNaNValue());
+ ins->block()->insertBefore(ins, value->toInstruction());
+ break;
+ case MIRType::Object:
+ case MIRType::String:
+ case MIRType::Symbol:
+ value = BoxAt(alloc, ins, value);
+ break;
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+
+ if (value != curValue) {
+ ins->replaceOperand(valueOperand, value);
+ curValue = value;
+ }
+
+ MOZ_ASSERT(value->type() == MIRType::Int32 ||
+ value->type() == MIRType::Boolean ||
+ value->type() == MIRType::Double ||
+ value->type() == MIRType::Float32 ||
+ value->type() == MIRType::Value);
+
+ switch (writeType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ if (value->type() != MIRType::Int32) {
+ value = MTruncateToInt32::New(alloc, value);
+ ins->block()->insertBefore(ins, value->toInstruction());
+ }
+ break;
+ case Scalar::Uint8Clamped:
+ // IonBuilder should have inserted ClampToUint8.
+ MOZ_ASSERT(value->type() == MIRType::Int32);
+ break;
+ case Scalar::Float32:
+ if (value->type() != MIRType::Float32) {
+ value = MToFloat32::New(alloc, value);
+ ins->block()->insertBefore(ins, value->toInstruction());
+ }
+ break;
+ case Scalar::Float64:
+ if (value->type() != MIRType::Double) {
+ value = MToDouble::New(alloc, value);
+ ins->block()->insertBefore(ins, value->toInstruction());
+ }
+ break;
+ default:
+ MOZ_CRASH("Invalid array type");
+ }
+
+ if (value != curValue)
+ ins->replaceOperand(valueOperand, value);
+
+ return true;
+}
+
+bool
+StoreUnboxedScalarPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ if (!SingleObjectPolicy::staticAdjustInputs(alloc, ins))
+ return false;
+
+ MStoreUnboxedScalar* store = ins->toStoreUnboxedScalar();
+ MOZ_ASSERT(IsValidElementsType(store->elements(), store->offsetAdjustment()));
+ MOZ_ASSERT(store->index()->type() == MIRType::Int32);
+
+ return adjustValueInput(alloc, store, store->writeType(), store->value(), 2);
+}
+
+bool
+StoreTypedArrayHolePolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MStoreTypedArrayElementHole* store = ins->toStoreTypedArrayElementHole();
+ MOZ_ASSERT(store->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(store->index()->type() == MIRType::Int32);
+ MOZ_ASSERT(store->length()->type() == MIRType::Int32);
+
+ return StoreUnboxedScalarPolicy::adjustValueInput(alloc, ins, store->arrayType(), store->value(), 3);
+}
+
+bool
+StoreTypedArrayElementStaticPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MStoreTypedArrayElementStatic* store = ins->toStoreTypedArrayElementStatic();
+
+ return ConvertToInt32Policy<0>::staticAdjustInputs(alloc, ins) &&
+ StoreUnboxedScalarPolicy::adjustValueInput(alloc, ins, store->accessType(), store->value(), 1);
+}
+
+bool
+StoreUnboxedObjectOrNullPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ if (!ObjectPolicy<0>::staticAdjustInputs(alloc, ins))
+ return false;
+
+ if (!ObjectPolicy<3>::staticAdjustInputs(alloc, ins))
+ return false;
+
+ // Change the value input to a ToObjectOrNull instruction if it might be
+ // a non-null primitive. Insert a post barrier for the instruction's object
+ // and whatever its new value is, unless the value is definitely null.
+ MStoreUnboxedObjectOrNull* store = ins->toStoreUnboxedObjectOrNull();
+
+ MOZ_ASSERT(store->typedObj()->type() == MIRType::Object);
+
+ MDefinition* value = store->value();
+ if (value->type() == MIRType::Object ||
+ value->type() == MIRType::Null ||
+ value->type() == MIRType::ObjectOrNull)
+ {
+ if (value->type() != MIRType::Null) {
+ MInstruction* barrier = MPostWriteBarrier::New(alloc, store->typedObj(), value);
+ store->block()->insertBefore(store, barrier);
+ }
+ return true;
+ }
+
+ MToObjectOrNull* replace = MToObjectOrNull::New(alloc, value);
+ store->block()->insertBefore(store, replace);
+ store->setValue(replace);
+
+ if (!BoxPolicy<0>::staticAdjustInputs(alloc, replace))
+ return false;
+
+ MInstruction* barrier = MPostWriteBarrier::New(alloc, store->typedObj(), replace);
+ store->block()->insertBefore(store, barrier);
+
+ return true;
+}
+
+bool
+ClampPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MDefinition* in = ins->toClampToUint8()->input();
+
+ switch (in->type()) {
+ case MIRType::Int32:
+ case MIRType::Double:
+ case MIRType::Value:
+ break;
+ default:
+ ins->replaceOperand(0, BoxAt(alloc, ins, in));
+ break;
+ }
+
+ return true;
+}
+
+bool
+FilterTypeSetPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
+{
+ MOZ_ASSERT(ins->numOperands() == 1);
+ MIRType inputType = ins->getOperand(0)->type();
+ MIRType outputType = ins->type();
+
+ // Special case when output is a Float32, but input isn't.
+ if (outputType == MIRType::Float32 && inputType != MIRType::Float32) {
+ // Create a MToFloat32 to add between the MFilterTypeSet and
+ // its uses.
+ MInstruction* replace = MToFloat32::New(alloc, ins);
+ ins->justReplaceAllUsesWithExcept(replace);
+ ins->block()->insertAfter(ins, replace);
+
+ // Reset the type to not MIRType::Float32
+ // Note: setResultType shouldn't happen in TypePolicies,
+ // Here it is fine, since there is just one use we just
+ // added ourself. And the resulting type after MToFloat32
+ // equals the original type.
+ ins->setResultType(ins->resultTypeSet()->getKnownMIRType());
+ outputType = ins->type();
+
+ // Do the type analysis
+ if (!replace->typePolicy()->adjustInputs(alloc, replace))
+ return false;
+
+ // Fall through to let the MFilterTypeSet adjust its input based
+ // on its new type.
+ }
+
+ // Input and output type are already in accordance.
+ if (inputType == outputType)
+ return true;
+
+ // Output is a value, box the input.
+ if (outputType == MIRType::Value) {
+ MOZ_ASSERT(inputType != MIRType::Value);
+ ins->replaceOperand(0, BoxAt(alloc, ins, ins->getOperand(0)));
+ return true;
+ }
+
+ // The outputType should be a subset of the inputType else we are in code
+ // that has never executed yet. Bail to see the new type (if that hasn't
+ // happened yet).
+ if (inputType != MIRType::Value) {
+ MBail* bail = MBail::New(alloc);
+ ins->block()->insertBefore(ins, bail);
+ bail->setDependency(ins->dependency());
+ ins->setDependency(bail);
+ ins->replaceOperand(0, BoxAt(alloc, ins, ins->getOperand(0)));
+ }
+
+ // We can't unbox a value to null/undefined/lazyargs. So keep output
+ // also a value.
+ // Note: Using setResultType shouldn't be done in TypePolicies,
+ // Here it is fine, since the type barrier has no uses.
+ if (IsNullOrUndefined(outputType) || outputType == MIRType::MagicOptimizedArguments) {
+ MOZ_ASSERT(!ins->hasDefUses());
+ ins->setResultType(MIRType::Value);
+ return true;
+ }
+
+ // Unbox / propagate the right type.
+ MUnbox::Mode mode = MUnbox::Infallible;
+ MInstruction* replace = MUnbox::New(alloc, ins->getOperand(0), ins->type(), mode);
+
+ ins->block()->insertBefore(ins, replace);
+ ins->replaceOperand(0, replace);
+ if (!replace->typePolicy()->adjustInputs(alloc, replace))
+ return false;
+
+ // Carry over the dependency the MFilterTypeSet had.
+ replace->setDependency(ins->dependency());
+
+ return true;
+}
+
+// Lists of all TypePolicy specializations which are used by MIR Instructions.
+#define TYPE_POLICY_LIST(_) \
+ _(ArithPolicy) \
+ _(BitwisePolicy) \
+ _(BoxInputsPolicy) \
+ _(CallPolicy) \
+ _(CallSetElementPolicy) \
+ _(ClampPolicy) \
+ _(ComparePolicy) \
+ _(FilterTypeSetPolicy) \
+ _(InstanceOfPolicy) \
+ _(PowPolicy) \
+ _(SimdAllPolicy) \
+ _(SimdSelectPolicy) \
+ _(SimdShufflePolicy) \
+ _(StoreTypedArrayElementStaticPolicy) \
+ _(StoreTypedArrayHolePolicy) \
+ _(StoreUnboxedScalarPolicy) \
+ _(StoreUnboxedObjectOrNullPolicy) \
+ _(TestPolicy) \
+ _(AllDoublePolicy) \
+ _(ToDoublePolicy) \
+ _(ToInt32Policy) \
+ _(ToStringPolicy) \
+ _(TypeBarrierPolicy)
+
+#define TEMPLATE_TYPE_POLICY_LIST(_) \
+ _(BoxExceptPolicy<0, MIRType::Object>) \
+ _(BoxPolicy<0>) \
+ _(ConvertToInt32Policy<0>) \
+ _(ConvertToStringPolicy<0>) \
+ _(ConvertToStringPolicy<2>) \
+ _(DoublePolicy<0>) \
+ _(FloatingPointPolicy<0>) \
+ _(IntPolicy<0>) \
+ _(IntPolicy<1>) \
+ _(Mix3Policy<ObjectPolicy<0>, StringPolicy<1>, BoxPolicy<2> >) \
+ _(Mix3Policy<ObjectPolicy<0>, BoxPolicy<1>, BoxPolicy<2> >) \
+ _(Mix3Policy<ObjectPolicy<0>, BoxPolicy<1>, ObjectPolicy<2> >) \
+ _(Mix3Policy<ObjectPolicy<0>, IntPolicy<1>, BoxPolicy<2> >) \
+ _(Mix3Policy<ObjectPolicy<0>, IntPolicy<1>, IntPolicy<2> >) \
+ _(Mix3Policy<ObjectPolicy<0>, IntPolicy<1>, TruncateToInt32Policy<2> >) \
+ _(Mix3Policy<ObjectPolicy<0>, ObjectPolicy<1>, BoxPolicy<2> >) \
+ _(Mix3Policy<ObjectPolicy<0>, ObjectPolicy<1>, IntPolicy<2> >) \
+ _(Mix3Policy<ObjectPolicy<0>, ObjectPolicy<1>, ObjectPolicy<2> >) \
+ _(Mix3Policy<StringPolicy<0>, IntPolicy<1>, IntPolicy<2>>) \
+ _(Mix3Policy<StringPolicy<0>, ObjectPolicy<1>, StringPolicy<2> >) \
+ _(Mix3Policy<StringPolicy<0>, StringPolicy<1>, StringPolicy<2> >) \
+ _(Mix3Policy<ObjectPolicy<0>, StringPolicy<1>, IntPolicy<2>>) \
+ _(Mix4Policy<ObjectPolicy<0>, IntPolicy<1>, IntPolicy<2>, IntPolicy<3>>) \
+ _(Mix4Policy<ObjectPolicy<0>, IntPolicy<1>, TruncateToInt32Policy<2>, TruncateToInt32Policy<3> >) \
+ _(Mix3Policy<ObjectPolicy<0>, CacheIdPolicy<1>, NoFloatPolicy<2>>) \
+ _(Mix4Policy<SimdScalarPolicy<0>, SimdScalarPolicy<1>, SimdScalarPolicy<2>, SimdScalarPolicy<3> >) \
+ _(MixPolicy<BoxPolicy<0>, ObjectPolicy<1> >) \
+ _(MixPolicy<ConvertToStringPolicy<0>, ConvertToStringPolicy<1> >) \
+ _(MixPolicy<ConvertToStringPolicy<0>, ObjectPolicy<1> >) \
+ _(MixPolicy<DoublePolicy<0>, DoublePolicy<1> >) \
+ _(MixPolicy<IntPolicy<0>, IntPolicy<1> >) \
+ _(MixPolicy<ObjectPolicy<0>, BoxPolicy<1> >) \
+ _(MixPolicy<ObjectPolicy<0>, CacheIdPolicy<1>>) \
+ _(MixPolicy<ObjectPolicy<0>, ConvertToStringPolicy<1> >) \
+ _(MixPolicy<ObjectPolicy<0>, IntPolicy<1> >) \
+ _(MixPolicy<ObjectPolicy<0>, IntPolicy<2> >) \
+ _(MixPolicy<ObjectPolicy<0>, NoFloatPolicy<1> >) \
+ _(MixPolicy<ObjectPolicy<0>, NoFloatPolicy<2> >) \
+ _(MixPolicy<ObjectPolicy<0>, NoFloatPolicy<3> >) \
+ _(MixPolicy<ObjectPolicy<0>, ObjectPolicy<1> >) \
+ _(MixPolicy<ObjectPolicy<0>, StringPolicy<1> >) \
+ _(MixPolicy<ObjectPolicy<0>, ConvertToStringPolicy<2> >) \
+ _(MixPolicy<ObjectPolicy<1>, ConvertToStringPolicy<0> >) \
+ _(MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdSameAsReturnedTypePolicy<1> >) \
+ _(MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdScalarPolicy<1> >) \
+ _(MixPolicy<StringPolicy<0>, IntPolicy<1> >) \
+ _(MixPolicy<StringPolicy<0>, StringPolicy<1> >) \
+ _(MixPolicy<BoxPolicy<0>, BoxPolicy<1> >) \
+ _(NoFloatPolicy<0>) \
+ _(NoFloatPolicyAfter<1>) \
+ _(NoFloatPolicyAfter<2>) \
+ _(ObjectPolicy<0>) \
+ _(ObjectPolicy<1>) \
+ _(ObjectPolicy<3>) \
+ _(SimdPolicy<0>) \
+ _(SimdSameAsReturnedTypePolicy<0>) \
+ _(SimdScalarPolicy<0>) \
+ _(StringPolicy<0>)
+
+
+namespace js {
+namespace jit {
+
+// Define for all used TypePolicy specialization, the definition for
+// |TypePolicy::Data::thisTypePolicy|. This function returns one constant
+// instance of the TypePolicy which is shared among all MIR Instructions of the
+// same type.
+//
+// This Macro use __VA_ARGS__ to account for commas of template parameters.
+#define DEFINE_TYPE_POLICY_SINGLETON_INSTANCES_(...) \
+ TypePolicy * \
+ __VA_ARGS__::Data::thisTypePolicy() \
+ { \
+ static __VA_ARGS__ singletonType; \
+ return &singletonType; \
+ }
+
+ TYPE_POLICY_LIST(DEFINE_TYPE_POLICY_SINGLETON_INSTANCES_)
+ TEMPLATE_TYPE_POLICY_LIST(template<> DEFINE_TYPE_POLICY_SINGLETON_INSTANCES_)
+#undef DEFINE_TYPE_POLICY_SINGLETON_INSTANCES_
+
+} // namespace jit
+} // namespace js
+
+namespace {
+
+// For extra-good measure in case an unqualified use is ever introduced. (The
+// main use in the macro below is explicitly qualified so as not to consult
+// this scope and find this function.)
+inline TypePolicy*
+thisTypePolicy() = delete;
+
+static MIRType
+thisTypeSpecialization()
+{
+ MOZ_CRASH("TypeSpecialization lacks definition of thisTypeSpecialization.");
+}
+
+} // namespace
+
+// For each MIR Instruction, this macro define the |typePolicy| method which is
+// using the |thisTypePolicy| method. The |thisTypePolicy| method is either a
+// member of the MIR Instruction, such as with MGetElementCache, a member
+// inherited from the TypePolicy::Data structure, or a member inherited from
+// NoTypePolicy if the MIR instruction has no type policy.
+#define DEFINE_MIR_TYPEPOLICY_MEMBERS_(op) \
+ TypePolicy * \
+ js::jit::M##op::typePolicy() \
+ { \
+ return M##op::thisTypePolicy(); \
+ } \
+ \
+ MIRType \
+ js::jit::M##op::typePolicySpecialization() \
+ { \
+ return thisTypeSpecialization(); \
+ }
+
+ MIR_OPCODE_LIST(DEFINE_MIR_TYPEPOLICY_MEMBERS_)
+#undef DEFINE_MIR_TYPEPOLICY_MEMBERS_
diff --git a/js/src/jit/TypePolicy.h b/js/src/jit/TypePolicy.h
new file mode 100644
index 000000000..1c7160220
--- /dev/null
+++ b/js/src/jit/TypePolicy.h
@@ -0,0 +1,536 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_TypePolicy_h
+#define jit_TypePolicy_h
+
+#include "jit/IonTypes.h"
+#include "jit/JitAllocPolicy.h"
+
+namespace js {
+namespace jit {
+
+class MInstruction;
+class MDefinition;
+
+extern MDefinition*
+AlwaysBoxAt(TempAllocator& alloc, MInstruction* at, MDefinition* operand);
+
+// A type policy directs the type analysis phases, which insert conversion,
+// boxing, unboxing, and type changes as necessary.
+class TypePolicy
+{
+ public:
+ // Analyze the inputs of the instruction and perform one of the following
+ // actions for each input:
+ // * Nothing; the input already type-checks.
+ // * If untyped, optionally ask the input to try and specialize its value.
+ // * Replace the operand with a conversion instruction.
+ // * Insert an unconditional deoptimization (no conversion possible).
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) = 0;
+};
+
+struct TypeSpecializationData
+{
+ protected:
+ // Specifies three levels of specialization:
+ // - < Value. This input is expected and required.
+ // - == None. This op should not be specialized.
+ MIRType specialization_;
+
+ MIRType thisTypeSpecialization() {
+ return specialization_;
+ }
+
+ public:
+ MIRType specialization() const {
+ return specialization_;
+ }
+};
+
+#define EMPTY_DATA_ \
+ struct Data \
+ { \
+ static TypePolicy* thisTypePolicy(); \
+ }
+
+#define INHERIT_DATA_(DATA_TYPE) \
+ struct Data : public DATA_TYPE \
+ { \
+ static TypePolicy* thisTypePolicy(); \
+ }
+
+#define SPECIALIZATION_DATA_ INHERIT_DATA_(TypeSpecializationData)
+
+class NoTypePolicy
+{
+ public:
+ struct Data
+ {
+ static TypePolicy* thisTypePolicy() {
+ return nullptr;
+ }
+ };
+};
+
+class BoxInputsPolicy final : public TypePolicy
+{
+ public:
+ SPECIALIZATION_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+class ArithPolicy final : public TypePolicy
+{
+ public:
+ SPECIALIZATION_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override;
+};
+
+class AllDoublePolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def);
+};
+
+class BitwisePolicy final : public TypePolicy
+{
+ public:
+ SPECIALIZATION_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override;
+};
+
+class ComparePolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override;
+};
+
+// Policy for MTest instructions.
+class TestPolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override;
+};
+
+class TypeBarrierPolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override;
+};
+
+class CallPolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override;
+};
+
+// Policy for MPow. First operand Double; second Double or Int32.
+class PowPolicy final : public TypePolicy
+{
+ public:
+ SPECIALIZATION_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override;
+};
+
+// Expect a string for operand Op. If the input is a Value, it is unboxed.
+template <unsigned Op>
+class StringPolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Expect a string for operand Op. Else a ToString instruction is inserted.
+template <unsigned Op>
+class ConvertToStringPolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Expect an Boolean for operand Op. If the input is a Value, it is unboxed.
+template <unsigned Op>
+class BooleanPolicy final : private TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Expect an Int for operand Op. If the input is a Value, it is unboxed.
+template <unsigned Op>
+class IntPolicy final : private TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Expect an Int for operand Op. Else a ToInt32 instruction is inserted.
+template <unsigned Op>
+class ConvertToInt32Policy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Expect an Int for operand Op. Else a TruncateToInt32 instruction is inserted.
+template <unsigned Op>
+class TruncateToInt32Policy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Expect a double for operand Op. If the input is a Value, it is unboxed.
+template <unsigned Op>
+class DoublePolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Expect a float32 for operand Op. If the input is a Value, it is unboxed.
+template <unsigned Op>
+class Float32Policy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Expect a float32 OR a double for operand Op, but will prioritize Float32
+// if the result type is set as such. If the input is a Value, it is unboxed.
+template <unsigned Op>
+class FloatingPointPolicy final : public TypePolicy
+{
+ public:
+ SPECIALIZATION_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override;
+};
+
+template <unsigned Op>
+class NoFloatPolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Policy for guarding variadic instructions such as object / array state
+// instructions.
+template <unsigned FirstOp>
+class NoFloatPolicyAfter final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override;
+};
+
+// Box objects or strings as an input to a ToDouble instruction.
+class ToDoublePolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Box objects, strings and undefined as input to a ToInt32 instruction.
+class ToInt32Policy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Box objects as input to a ToString instruction.
+class ToStringPolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+template <unsigned Op>
+class ObjectPolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override {
+ return staticAdjustInputs(alloc, ins);
+ }
+};
+
+// Single-object input. If the input is a Value, it is unboxed. If it is
+// a primitive, we use ValueToNonNullObject.
+typedef ObjectPolicy<0> SingleObjectPolicy;
+
+// Convert an operand to have a type identical to the scalar type of the
+// returned type of the instruction.
+template <unsigned Op>
+class SimdScalarPolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+class SimdAllPolicy final : public TypePolicy
+{
+ public:
+ SPECIALIZATION_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override;
+};
+
+template <unsigned Op>
+class SimdPolicy final : public TypePolicy
+{
+ public:
+ SPECIALIZATION_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override;
+};
+
+class SimdSelectPolicy final : public TypePolicy
+{
+ public:
+ SPECIALIZATION_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override;
+};
+
+class SimdShufflePolicy final : public TypePolicy
+{
+ public:
+ SPECIALIZATION_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override;
+};
+
+// SIMD value-type policy, use the returned type of the instruction to determine
+// how to unbox its operand.
+template <unsigned Op>
+class SimdSameAsReturnedTypePolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override {
+ return staticAdjustInputs(alloc, ins);
+ }
+};
+
+template <unsigned Op>
+class BoxPolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override {
+ return staticAdjustInputs(alloc, ins);
+ }
+};
+
+// Boxes everything except inputs of type Type.
+template <unsigned Op, MIRType Type>
+class BoxExceptPolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+ MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) {
+ return staticAdjustInputs(alloc, ins);
+ }
+};
+
+// Box if not a typical property id (string, symbol, int32).
+template <unsigned Op>
+class CacheIdPolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+ MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) {
+ return staticAdjustInputs(alloc, ins);
+ }
+};
+
+// Combine multiple policies.
+template <class Lhs, class Rhs>
+class MixPolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* ins) {
+ return Lhs::staticAdjustInputs(alloc, ins) && Rhs::staticAdjustInputs(alloc, ins);
+ }
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override {
+ return staticAdjustInputs(alloc, ins);
+ }
+};
+
+// Combine three policies.
+template <class Policy1, class Policy2, class Policy3>
+class Mix3Policy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* ins) {
+ return Policy1::staticAdjustInputs(alloc, ins) &&
+ Policy2::staticAdjustInputs(alloc, ins) &&
+ Policy3::staticAdjustInputs(alloc, ins);
+ }
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override {
+ return staticAdjustInputs(alloc, ins);
+ }
+};
+
+// Combine four policies. (Missing variadic templates yet?)
+template <class Policy1, class Policy2, class Policy3, class Policy4>
+class Mix4Policy : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* ins) {
+ return Policy1::staticAdjustInputs(alloc, ins) &&
+ Policy2::staticAdjustInputs(alloc, ins) &&
+ Policy3::staticAdjustInputs(alloc, ins) &&
+ Policy4::staticAdjustInputs(alloc, ins);
+ }
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override {
+ return staticAdjustInputs(alloc, ins);
+ }
+};
+
+class CallSetElementPolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override;
+};
+
+// First operand will be boxed to a Value (except for an object)
+// Second operand (if specified) will forcefully be unboxed to an object
+class InstanceOfPolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override;
+};
+
+class StoreTypedArrayHolePolicy;
+class StoreTypedArrayElementStaticPolicy;
+
+class StoreUnboxedScalarPolicy : public TypePolicy
+{
+ private:
+ static MOZ_MUST_USE bool adjustValueInput(TempAllocator& alloc, MInstruction* ins,
+ Scalar::Type arrayType, MDefinition* value,
+ int valueOperand);
+
+ friend class StoreTypedArrayHolePolicy;
+ friend class StoreTypedArrayElementStaticPolicy;
+
+ public:
+ EMPTY_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override;
+};
+
+class StoreTypedArrayHolePolicy final : public StoreUnboxedScalarPolicy
+{
+ public:
+ EMPTY_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override;
+};
+
+class StoreTypedArrayElementStaticPolicy final : public StoreUnboxedScalarPolicy
+{
+ public:
+ EMPTY_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override;
+};
+
+class StoreUnboxedObjectOrNullPolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override;
+};
+
+// Accepts integers and doubles. Everything else is boxed.
+class ClampPolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override;
+};
+
+class FilterTypeSetPolicy final : public TypePolicy
+{
+ public:
+ EMPTY_DATA_;
+ virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override;
+};
+
+#undef SPECIALIZATION_DATA_
+#undef INHERIT_DATA_
+#undef EMPTY_DATA_
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_TypePolicy_h */
diff --git a/js/src/jit/TypedObjectPrediction.cpp b/js/src/jit/TypedObjectPrediction.cpp
new file mode 100644
index 000000000..c01ad5eda
--- /dev/null
+++ b/js/src/jit/TypedObjectPrediction.cpp
@@ -0,0 +1,308 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/TypedObjectPrediction.h"
+
+using namespace js;
+using namespace jit;
+
+static const size_t ALL_FIELDS = SIZE_MAX;
+
+// Sets the prediction to be the common prefix of descrA and descrB,
+// considering at most the first max fields.
+//
+// In the case where the current prediction is a specific struct,
+// and we are now seeing a second struct, then descrA and descrB will be
+// the current and new struct and max will be ALL_FIELDS.
+//
+// In the case where the current prediction is already a prefix, and
+// we are now seeing an additional struct, then descrA will be the
+// current struct and max will be the current prefix length, and
+// descrB will be the new struct.
+//
+// (Note that in general it is not important which struct is passed as
+// descrA and which struct is passed as descrB, as the operation is
+// symmetric.)
+void
+TypedObjectPrediction::markAsCommonPrefix(const StructTypeDescr& descrA,
+ const StructTypeDescr& descrB,
+ size_t max)
+{
+ // count is the number of fields in common. It begins as the min
+ // of the number of fields from descrA, descrB, and max, and then
+ // is decremented as we find uncommon fields.
+ if (max > descrA.fieldCount())
+ max = descrA.fieldCount();
+ if (max > descrB.fieldCount())
+ max = descrB.fieldCount();
+
+ size_t i = 0;
+ for (; i < max; i++) {
+ if (&descrA.fieldName(i) != &descrB.fieldName(i))
+ break;
+ if (&descrA.fieldDescr(i) != &descrB.fieldDescr(i))
+ break;
+ MOZ_ASSERT(descrA.fieldOffset(i) == descrB.fieldOffset(i));
+ }
+
+ if (i == 0) {
+ // empty prefix is not particularly useful.
+ markInconsistent();
+ } else {
+ setPrefix(descrA, i);
+ }
+}
+
+void
+TypedObjectPrediction::addDescr(const TypeDescr& descr)
+{
+ switch (predictionKind()) {
+ case Empty:
+ return setDescr(descr);
+
+ case Inconsistent:
+ return; // keep same state
+
+ case Descr: {
+ if (&descr == data_.descr)
+ return; // keep same state
+
+ if (descr.kind() != data_.descr->kind())
+ return markInconsistent();
+
+ if (descr.kind() != type::Struct)
+ return markInconsistent();
+
+ const StructTypeDescr& structDescr = descr.as<StructTypeDescr>();
+ const StructTypeDescr& currentDescr = data_.descr->as<StructTypeDescr>();
+ markAsCommonPrefix(structDescr, currentDescr, ALL_FIELDS);
+ return;
+ }
+
+ case Prefix:
+ if (descr.kind() != type::Struct)
+ return markInconsistent();
+
+ markAsCommonPrefix(*data_.prefix.descr,
+ descr.as<StructTypeDescr>(),
+ data_.prefix.fields);
+ return;
+ }
+
+ MOZ_CRASH("Bad predictionKind");
+}
+
+type::Kind
+TypedObjectPrediction::kind() const
+{
+ switch (predictionKind()) {
+ case TypedObjectPrediction::Empty:
+ case TypedObjectPrediction::Inconsistent:
+ break;
+
+ case TypedObjectPrediction::Descr:
+ return descr().kind();
+
+ case TypedObjectPrediction::Prefix:
+ return prefix().descr->kind();
+ }
+
+ MOZ_CRASH("Bad prediction kind");
+}
+
+bool
+TypedObjectPrediction::ofArrayKind() const
+{
+ switch (kind()) {
+ case type::Scalar:
+ case type::Reference:
+ case type::Simd:
+ case type::Struct:
+ return false;
+
+ case type::Array:
+ return true;
+ }
+
+ MOZ_CRASH("Bad kind");
+}
+
+bool
+TypedObjectPrediction::hasKnownSize(uint32_t* out) const
+{
+ switch (predictionKind()) {
+ case TypedObjectPrediction::Empty:
+ case TypedObjectPrediction::Inconsistent:
+ return false;
+
+ case TypedObjectPrediction::Descr:
+ *out = descr().size();
+ return true;
+
+ case TypedObjectPrediction::Prefix:
+ // We only know a prefix of the struct fields, hence we do not
+ // know its complete size.
+ return false;
+
+ default:
+ MOZ_CRASH("Bad prediction kind");
+ }
+}
+
+const TypedProto*
+TypedObjectPrediction::getKnownPrototype() const
+{
+ switch (predictionKind()) {
+ case TypedObjectPrediction::Empty:
+ case TypedObjectPrediction::Inconsistent:
+ return nullptr;
+
+ case TypedObjectPrediction::Descr:
+ if (descr().is<ComplexTypeDescr>())
+ return &descr().as<ComplexTypeDescr>().instancePrototype();
+ return nullptr;
+
+ case TypedObjectPrediction::Prefix:
+ // We only know a prefix of the struct fields, hence we cannot
+ // say for certain what its prototype will be.
+ return nullptr;
+
+ default:
+ MOZ_CRASH("Bad prediction kind");
+ }
+}
+
+template<typename T>
+typename T::Type
+TypedObjectPrediction::extractType() const
+{
+ MOZ_ASSERT(kind() == T::Kind);
+ switch (predictionKind()) {
+ case TypedObjectPrediction::Empty:
+ case TypedObjectPrediction::Inconsistent:
+ break;
+
+ case TypedObjectPrediction::Descr:
+ return descr().as<T>().type();
+
+ case TypedObjectPrediction::Prefix:
+ break; // Prefixes are always structs, never scalars etc
+ }
+
+ MOZ_CRASH("Bad prediction kind");
+}
+
+ScalarTypeDescr::Type
+TypedObjectPrediction::scalarType() const
+{
+ return extractType<ScalarTypeDescr>();
+}
+
+ReferenceTypeDescr::Type
+TypedObjectPrediction::referenceType() const
+{
+ return extractType<ReferenceTypeDescr>();
+}
+
+SimdType
+TypedObjectPrediction::simdType() const
+{
+ return descr().as<SimdTypeDescr>().type();
+}
+
+bool
+TypedObjectPrediction::hasKnownArrayLength(int32_t* length) const
+{
+ switch (predictionKind()) {
+ case TypedObjectPrediction::Empty:
+ case TypedObjectPrediction::Inconsistent:
+ return false;
+
+ case TypedObjectPrediction::Descr:
+ // In later patches, this condition will always be true
+ // so long as this represents an array
+ if (descr().is<ArrayTypeDescr>()) {
+ *length = descr().as<ArrayTypeDescr>().length();
+ return true;
+ }
+ return false;
+
+ case TypedObjectPrediction::Prefix:
+ // Prefixes are always structs, never arrays
+ return false;
+
+ default:
+ MOZ_CRASH("Bad prediction kind");
+ }
+}
+
+TypedObjectPrediction
+TypedObjectPrediction::arrayElementType() const
+{
+ MOZ_ASSERT(ofArrayKind());
+ switch (predictionKind()) {
+ case TypedObjectPrediction::Empty:
+ case TypedObjectPrediction::Inconsistent:
+ break;
+
+ case TypedObjectPrediction::Descr:
+ return TypedObjectPrediction(descr().as<ArrayTypeDescr>().elementType());
+
+ case TypedObjectPrediction::Prefix:
+ break; // Prefixes are always structs, never arrays
+ }
+ MOZ_CRASH("Bad prediction kind");
+}
+
+bool
+TypedObjectPrediction::hasFieldNamedPrefix(const StructTypeDescr& descr,
+ size_t fieldCount,
+ jsid id,
+ size_t* fieldOffset,
+ TypedObjectPrediction* out,
+ size_t* index) const
+{
+ // Find the index of the field |id| if any.
+ if (!descr.fieldIndex(id, index))
+ return false;
+
+ // Check whether the index falls within our known safe prefix.
+ if (*index >= fieldCount)
+ return false;
+
+ // Load the offset and type.
+ *fieldOffset = descr.fieldOffset(*index);
+ *out = TypedObjectPrediction(descr.fieldDescr(*index));
+ return true;
+}
+
+bool
+TypedObjectPrediction::hasFieldNamed(jsid id,
+ size_t* fieldOffset,
+ TypedObjectPrediction* fieldType,
+ size_t* fieldIndex) const
+{
+ MOZ_ASSERT(kind() == type::Struct);
+
+ switch (predictionKind()) {
+ case TypedObjectPrediction::Empty:
+ case TypedObjectPrediction::Inconsistent:
+ return false;
+
+ case TypedObjectPrediction::Descr:
+ return hasFieldNamedPrefix(
+ descr().as<StructTypeDescr>(), ALL_FIELDS,
+ id, fieldOffset, fieldType, fieldIndex);
+
+ case TypedObjectPrediction::Prefix:
+ return hasFieldNamedPrefix(
+ *prefix().descr, prefix().fields,
+ id, fieldOffset, fieldType, fieldIndex);
+
+ default:
+ MOZ_CRASH("Bad prediction kind");
+ }
+}
diff --git a/js/src/jit/TypedObjectPrediction.h b/js/src/jit/TypedObjectPrediction.h
new file mode 100644
index 000000000..2e3caf2cf
--- /dev/null
+++ b/js/src/jit/TypedObjectPrediction.h
@@ -0,0 +1,201 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_TypedObjectPrediction_h
+#define jit_TypedObjectPrediction_h
+
+#include "builtin/TypedObject.h"
+#include "jit/JitAllocPolicy.h"
+
+namespace js {
+namespace jit {
+
+// A TypedObjectPrediction summarizes what we know about the type of a
+// typed object at a given point (if anything). The prediction will
+// begin as precise as possible and degrade to less precise as more
+// typed object types are merged using |addDescr()|.
+//
+// To create a TypedObjectPrediction from TI, one initially creates an
+// empty prediction using the |TypedObjectPrediction()| constructor,
+// and then invokes |addDescr()| with the prototype of each typed
+// object. The prediction will automatically downgrade to less and
+// less specific settings as needed. Note that creating a prediction
+// in this way can never yield precise array dimensions, since TI only
+// tracks the prototype.
+//
+// TypedObjectPredictions can also result from other predictions using
+// the query methods (e.g., |arrayElementType()|). In those cases, the
+// precise array dimensions may be known.
+//
+// To query a prediction, you must first check whether it is "useless"
+// using |isUseless()|. If this is true, there is no usable
+// information to be extracted. Otherwise, you can inquire after the
+// |kind()| of the data (struct, array, etc) and from there make more
+// specific queries.
+class TypedObjectPrediction {
+ public:
+ enum PredictionKind {
+ // No data.
+ Empty,
+
+ // Inconsistent data.
+ Inconsistent,
+
+ // Multiple different struct types flow into the same location,
+ // but they share fields in common. Prefix indicates that the first
+ // N fields of some struct type are known to be valid. This occurs
+ // in a subtyping scenario.
+ Prefix,
+
+ // The TypeDescr of the value is known. This is the most specific
+ // possible value and includes precise array bounds.
+ Descr
+ };
+
+ struct PrefixData {
+ const StructTypeDescr* descr;
+ size_t fields;
+ };
+
+ union Data {
+ const TypeDescr* descr;
+ PrefixData prefix;
+ };
+
+ private:
+ PredictionKind kind_;
+ Data data_;
+
+ PredictionKind predictionKind() const {
+ return kind_;
+ }
+
+ void markInconsistent() {
+ kind_ = Inconsistent;
+ }
+
+ const TypeDescr& descr() const {
+ MOZ_ASSERT(predictionKind() == Descr);
+ return *data_.descr;
+ }
+
+ const PrefixData& prefix() const {
+ MOZ_ASSERT(predictionKind() == Prefix);
+ return data_.prefix;
+ }
+
+ void setDescr(const TypeDescr& descr) {
+ kind_ = Descr;
+ data_.descr = &descr;
+ }
+
+ void setPrefix(const StructTypeDescr& descr, size_t fields) {
+ kind_ = Prefix;
+ data_.prefix.descr = &descr;
+ data_.prefix.fields = fields;
+ }
+
+ void markAsCommonPrefix(const StructTypeDescr& descrA,
+ const StructTypeDescr& descrB,
+ size_t max);
+
+ template<typename T>
+ typename T::Type extractType() const;
+
+ bool hasFieldNamedPrefix(const StructTypeDescr& descr,
+ size_t fieldCount,
+ jsid id,
+ size_t* fieldOffset,
+ TypedObjectPrediction* out,
+ size_t* index) const;
+
+ public:
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Constructing a prediction. Generally, you start with an empty
+ // prediction and invoke addDescr() repeatedly.
+
+ TypedObjectPrediction() {
+ kind_ = Empty;
+ }
+
+ explicit TypedObjectPrediction(const TypeDescr& descr) {
+ setDescr(descr);
+ }
+
+ TypedObjectPrediction(const StructTypeDescr& descr, size_t fields) {
+ setPrefix(descr, fields);
+ }
+
+ void addDescr(const TypeDescr& descr);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Queries that are always valid.
+
+ bool isUseless() const {
+ return predictionKind() == Empty || predictionKind() == Inconsistent;
+ }
+
+ // Determines whether we can predict the prototype for the typed
+ // object instance. Returns null if we cannot or if the typed
+ // object is of scalar/reference kind, in which case instances are
+ // not objects and hence do not have a (publicly available)
+ // prototype.
+ const TypedProto* getKnownPrototype() const;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Queries that are valid if not useless.
+
+ type::Kind kind() const;
+
+ bool ofArrayKind() const;
+
+ // Returns true if the size of this typed object is statically
+ // known and sets |*out| to that size. Otherwise returns false.
+ //
+ // The size may not be statically known if (1) the object is
+ // an array whose dimensions are unknown or (2) only a prefix
+ // of its type is known.
+ bool hasKnownSize(uint32_t* out) const;
+
+ //////////////////////////////////////////////////////////////////////
+ // Simple operations
+ //
+ // Only valid when |kind()| is Scalar, Reference, or Simd (as appropriate).
+
+ ScalarTypeDescr::Type scalarType() const;
+ ReferenceTypeDescr::Type referenceType() const;
+ SimdType simdType() const;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Queries valid only for arrays.
+
+ // Returns true if the length of the array is statically known,
+ // and sets |*length| appropriately. Otherwise returns false.
+ bool hasKnownArrayLength(int32_t* length) const;
+
+ // Returns a prediction for the array element type, if any.
+ TypedObjectPrediction arrayElementType() const;
+
+ //////////////////////////////////////////////////////////////////////
+ // Struct operations
+ //
+ // Only valid when |kind() == TypeDescr::Struct|
+
+ // Returns true if the predicted type includes a field named |id|
+ // and sets |*fieldOffset|, |*fieldType|, and |*fieldIndex| with
+ // the offset (in bytes), type, and index of the field
+ // respectively. Otherwise returns false.
+ bool hasFieldNamed(jsid id,
+ size_t* fieldOffset,
+ TypedObjectPrediction* fieldType,
+ size_t* fieldIndex) const;
+};
+
+} // namespace jit
+} // namespace js
+
+#endif
diff --git a/js/src/jit/VMFunctions.cpp b/js/src/jit/VMFunctions.cpp
new file mode 100644
index 000000000..628b31fae
--- /dev/null
+++ b/js/src/jit/VMFunctions.cpp
@@ -0,0 +1,1361 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/VMFunctions.h"
+
+#include "jsgc.h"
+
+#include "builtin/TypedObject.h"
+#include "frontend/BytecodeCompiler.h"
+#include "jit/arm/Simulator-arm.h"
+#include "jit/BaselineIC.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/mips32/Simulator-mips32.h"
+#include "jit/mips64/Simulator-mips64.h"
+#include "vm/ArrayObject.h"
+#include "vm/Debugger.h"
+#include "vm/Interpreter.h"
+#include "vm/TraceLogging.h"
+
+#include "jit/BaselineFrame-inl.h"
+#include "jit/JitFrames-inl.h"
+#include "vm/Debugger-inl.h"
+#include "vm/Interpreter-inl.h"
+#include "vm/NativeObject-inl.h"
+#include "vm/StringObject-inl.h"
+#include "vm/TypeInference-inl.h"
+#include "vm/UnboxedObject-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// Statics are initialized to null.
+/* static */ VMFunction* VMFunction::functions;
+
+AutoDetectInvalidation::AutoDetectInvalidation(JSContext* cx, MutableHandleValue rval)
+ : cx_(cx),
+ ionScript_(GetTopJitJSScript(cx)->ionScript()),
+ rval_(rval),
+ disabled_(false)
+{ }
+
+void
+VMFunction::addToFunctions()
+{
+ this->next = functions;
+ functions = this;
+}
+
+bool
+InvokeFunction(JSContext* cx, HandleObject obj, bool constructing, uint32_t argc, Value* argv,
+ MutableHandleValue rval)
+{
+ TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
+ TraceLogStartEvent(logger, TraceLogger_Call);
+
+ AutoArrayRooter argvRoot(cx, argc + 1 + constructing, argv);
+
+ // Data in the argument vector is arranged for a JIT -> JIT call.
+ RootedValue thisv(cx, argv[0]);
+ Value* argvWithoutThis = argv + 1;
+
+ RootedValue fval(cx, ObjectValue(*obj));
+ if (constructing) {
+ if (!IsConstructor(fval)) {
+ ReportValueError(cx, JSMSG_NOT_CONSTRUCTOR, JSDVG_IGNORE_STACK, fval, nullptr);
+ return false;
+ }
+
+ ConstructArgs cargs(cx);
+ if (!cargs.init(cx, argc))
+ return false;
+
+ for (uint32_t i = 0; i < argc; i++)
+ cargs[i].set(argvWithoutThis[i]);
+
+ RootedValue newTarget(cx, argvWithoutThis[argc]);
+
+ // If |this| hasn't been created, or is JS_UNINITIALIZED_LEXICAL,
+ // we can use normal construction code without creating an extraneous
+ // object.
+ if (thisv.isMagic()) {
+ MOZ_ASSERT(thisv.whyMagic() == JS_IS_CONSTRUCTING ||
+ thisv.whyMagic() == JS_UNINITIALIZED_LEXICAL);
+
+ RootedObject obj(cx);
+ if (!Construct(cx, fval, cargs, newTarget, &obj))
+ return false;
+
+ rval.setObject(*obj);
+ return true;
+ }
+
+ // Otherwise the default |this| has already been created. We could
+ // almost perform a *call* at this point, but we'd break |new.target|
+ // in the function. So in this one weird case we call a one-off
+ // construction path that *won't* set |this| to JS_IS_CONSTRUCTING.
+ return InternalConstructWithProvidedThis(cx, fval, thisv, cargs, newTarget, rval);
+ }
+
+ InvokeArgs args(cx);
+ if (!args.init(cx, argc))
+ return false;
+
+ for (size_t i = 0; i < argc; i++)
+ args[i].set(argvWithoutThis[i]);
+
+ return Call(cx, fval, thisv, args, rval);
+}
+
+bool
+InvokeFunctionShuffleNewTarget(JSContext* cx, HandleObject obj, uint32_t numActualArgs,
+ uint32_t numFormalArgs, Value* argv, MutableHandleValue rval)
+{
+ MOZ_ASSERT(numFormalArgs > numActualArgs);
+ argv[1 + numActualArgs] = argv[1 + numFormalArgs];
+ return InvokeFunction(cx, obj, true, numActualArgs, argv, rval);
+}
+
+bool
+CheckOverRecursed(JSContext* cx)
+{
+ // We just failed the jitStackLimit check. There are two possible reasons:
+ // - jitStackLimit was the real stack limit and we're over-recursed
+ // - jitStackLimit was set to UINTPTR_MAX by JSRuntime::requestInterrupt
+ // and we need to call JSRuntime::handleInterrupt.
+#ifdef JS_SIMULATOR
+ JS_CHECK_SIMULATOR_RECURSION_WITH_EXTRA(cx, 0, return false);
+#else
+ JS_CHECK_RECURSION(cx, return false);
+#endif
+ gc::MaybeVerifyBarriers(cx);
+ return cx->runtime()->handleInterrupt(cx);
+}
+
+// This function can get called in two contexts. In the usual context, it's
+// called with earlyCheck=false, after the env chain has been initialized on
+// a baseline frame. In this case, it's ok to throw an exception, so a failed
+// stack check returns false, and a successful stack check promps a check for
+// an interrupt from the runtime, which may also cause a false return.
+//
+// In the second case, it's called with earlyCheck=true, prior to frame
+// initialization. An exception cannot be thrown in this instance, so instead
+// an error flag is set on the frame and true returned.
+bool
+CheckOverRecursedWithExtra(JSContext* cx, BaselineFrame* frame,
+ uint32_t extra, uint32_t earlyCheck)
+{
+ MOZ_ASSERT_IF(earlyCheck, !frame->overRecursed());
+
+ // See |CheckOverRecursed| above. This is a variant of that function which
+ // accepts an argument holding the extra stack space needed for the Baseline
+ // frame that's about to be pushed.
+ uint8_t spDummy;
+ uint8_t* checkSp = (&spDummy) - extra;
+ if (earlyCheck) {
+#ifdef JS_SIMULATOR
+ (void)checkSp;
+ JS_CHECK_SIMULATOR_RECURSION_WITH_EXTRA(cx, extra, frame->setOverRecursed());
+#else
+ JS_CHECK_RECURSION_WITH_SP(cx, checkSp, frame->setOverRecursed());
+#endif
+ return true;
+ }
+
+ // The OVERRECURSED flag may have already been set on the frame by an
+ // early over-recursed check. If so, throw immediately.
+ if (frame->overRecursed())
+ return false;
+
+#ifdef JS_SIMULATOR
+ JS_CHECK_SIMULATOR_RECURSION_WITH_EXTRA(cx, extra, return false);
+#else
+ JS_CHECK_RECURSION_WITH_SP(cx, checkSp, return false);
+#endif
+
+ gc::MaybeVerifyBarriers(cx);
+ return cx->runtime()->handleInterrupt(cx);
+}
+
+JSObject*
+BindVar(JSContext* cx, HandleObject envChain)
+{
+ JSObject* obj = envChain;
+ while (!obj->isQualifiedVarObj())
+ obj = obj->enclosingEnvironment();
+ MOZ_ASSERT(obj);
+ return obj;
+}
+
+bool
+DefVar(JSContext* cx, HandlePropertyName dn, unsigned attrs, HandleObject envChain)
+{
+ // Given the ScopeChain, extract the VarObj.
+ RootedObject obj(cx, BindVar(cx, envChain));
+ return DefVarOperation(cx, obj, dn, attrs);
+}
+
+bool
+DefLexical(JSContext* cx, HandlePropertyName dn, unsigned attrs, HandleObject envChain)
+{
+ // Find the extensible lexical scope.
+ Rooted<LexicalEnvironmentObject*> lexicalEnv(cx,
+ &NearestEnclosingExtensibleLexicalEnvironment(envChain));
+
+ // Find the variables object.
+ RootedObject varObj(cx, BindVar(cx, envChain));
+ return DefLexicalOperation(cx, lexicalEnv, varObj, dn, attrs);
+}
+
+bool
+DefGlobalLexical(JSContext* cx, HandlePropertyName dn, unsigned attrs)
+{
+ Rooted<LexicalEnvironmentObject*> globalLexical(cx, &cx->global()->lexicalEnvironment());
+ return DefLexicalOperation(cx, globalLexical, cx->global(), dn, attrs);
+}
+
+bool
+MutatePrototype(JSContext* cx, HandlePlainObject obj, HandleValue value)
+{
+ if (!value.isObjectOrNull())
+ return true;
+
+ RootedObject newProto(cx, value.toObjectOrNull());
+ return SetPrototype(cx, obj, newProto);
+}
+
+bool
+InitProp(JSContext* cx, HandleObject obj, HandlePropertyName name, HandleValue value,
+ jsbytecode* pc)
+{
+ RootedId id(cx, NameToId(name));
+ return InitPropertyOperation(cx, JSOp(*pc), obj, id, value);
+}
+
+template<bool Equal>
+bool
+LooselyEqual(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs, bool* res)
+{
+ if (!js::LooselyEqual(cx, lhs, rhs, res))
+ return false;
+ if (!Equal)
+ *res = !*res;
+ return true;
+}
+
+template bool LooselyEqual<true>(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs, bool* res);
+template bool LooselyEqual<false>(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs, bool* res);
+
+template<bool Equal>
+bool
+StrictlyEqual(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs, bool* res)
+{
+ if (!js::StrictlyEqual(cx, lhs, rhs, res))
+ return false;
+ if (!Equal)
+ *res = !*res;
+ return true;
+}
+
+template bool StrictlyEqual<true>(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs, bool* res);
+template bool StrictlyEqual<false>(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs, bool* res);
+
+bool
+LessThan(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs, bool* res)
+{
+ return LessThanOperation(cx, lhs, rhs, res);
+}
+
+bool
+LessThanOrEqual(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs, bool* res)
+{
+ return LessThanOrEqualOperation(cx, lhs, rhs, res);
+}
+
+bool
+GreaterThan(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs, bool* res)
+{
+ return GreaterThanOperation(cx, lhs, rhs, res);
+}
+
+bool
+GreaterThanOrEqual(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs, bool* res)
+{
+ return GreaterThanOrEqualOperation(cx, lhs, rhs, res);
+}
+
+template<bool Equal>
+bool
+StringsEqual(JSContext* cx, HandleString lhs, HandleString rhs, bool* res)
+{
+ if (!js::EqualStrings(cx, lhs, rhs, res))
+ return false;
+ if (!Equal)
+ *res = !*res;
+ return true;
+}
+
+template bool StringsEqual<true>(JSContext* cx, HandleString lhs, HandleString rhs, bool* res);
+template bool StringsEqual<false>(JSContext* cx, HandleString lhs, HandleString rhs, bool* res);
+
+bool
+ArraySpliceDense(JSContext* cx, HandleObject obj, uint32_t start, uint32_t deleteCount)
+{
+ JS::AutoValueArray<4> argv(cx);
+ argv[0].setUndefined();
+ argv[1].setObject(*obj);
+ argv[2].set(Int32Value(start));
+ argv[3].set(Int32Value(deleteCount));
+
+ return js::array_splice_impl(cx, 2, argv.begin(), false);
+}
+
+bool
+ArrayPopDense(JSContext* cx, HandleObject obj, MutableHandleValue rval)
+{
+ MOZ_ASSERT(obj->is<ArrayObject>() || obj->is<UnboxedArrayObject>());
+
+ AutoDetectInvalidation adi(cx, rval);
+
+ JS::AutoValueArray<2> argv(cx);
+ argv[0].setUndefined();
+ argv[1].setObject(*obj);
+ if (!js::array_pop(cx, 0, argv.begin()))
+ return false;
+
+ // If the result is |undefined|, the array was probably empty and we
+ // have to monitor the return value.
+ rval.set(argv[0]);
+ if (rval.isUndefined())
+ TypeScript::Monitor(cx, rval);
+ return true;
+}
+
+bool
+ArrayPushDense(JSContext* cx, HandleObject obj, HandleValue v, uint32_t* length)
+{
+ *length = GetAnyBoxedOrUnboxedArrayLength(obj);
+ DenseElementResult result =
+ SetOrExtendAnyBoxedOrUnboxedDenseElements(cx, obj, *length, v.address(), 1,
+ ShouldUpdateTypes::DontUpdate);
+ if (result != DenseElementResult::Incomplete) {
+ (*length)++;
+ return result == DenseElementResult::Success;
+ }
+
+ JS::AutoValueArray<3> argv(cx);
+ argv[0].setUndefined();
+ argv[1].setObject(*obj);
+ argv[2].set(v);
+ if (!js::array_push(cx, 1, argv.begin()))
+ return false;
+
+ *length = argv[0].toInt32();
+ return true;
+}
+
+bool
+ArrayShiftDense(JSContext* cx, HandleObject obj, MutableHandleValue rval)
+{
+ MOZ_ASSERT(obj->is<ArrayObject>() || obj->is<UnboxedArrayObject>());
+
+ AutoDetectInvalidation adi(cx, rval);
+
+ JS::AutoValueArray<2> argv(cx);
+ argv[0].setUndefined();
+ argv[1].setObject(*obj);
+ if (!js::array_shift(cx, 0, argv.begin()))
+ return false;
+
+ // If the result is |undefined|, the array was probably empty and we
+ // have to monitor the return value.
+ rval.set(argv[0]);
+ if (rval.isUndefined())
+ TypeScript::Monitor(cx, rval);
+ return true;
+}
+
+JSString*
+ArrayJoin(JSContext* cx, HandleObject array, HandleString sep)
+{
+ JS::AutoValueArray<3> argv(cx);
+ argv[0].setUndefined();
+ argv[1].setObject(*array);
+ argv[2].setString(sep);
+ if (!js::array_join(cx, 1, argv.begin()))
+ return nullptr;
+ return argv[0].toString();
+}
+
+bool
+CharCodeAt(JSContext* cx, HandleString str, int32_t index, uint32_t* code)
+{
+ char16_t c;
+ if (!str->getChar(cx, index, &c))
+ return false;
+ *code = c;
+ return true;
+}
+
+JSFlatString*
+StringFromCharCode(JSContext* cx, int32_t code)
+{
+ char16_t c = char16_t(code);
+
+ if (StaticStrings::hasUnit(c))
+ return cx->staticStrings().getUnit(c);
+
+ return NewStringCopyN<CanGC>(cx, &c, 1);
+}
+
+JSString*
+StringFromCodePoint(JSContext* cx, int32_t codePoint)
+{
+ RootedValue rval(cx, Int32Value(codePoint));
+ if (!str_fromCodePoint_one_arg(cx, rval, &rval))
+ return nullptr;
+
+ return rval.toString();
+}
+
+bool
+SetProperty(JSContext* cx, HandleObject obj, HandlePropertyName name, HandleValue value,
+ bool strict, jsbytecode* pc)
+{
+ RootedId id(cx, NameToId(name));
+
+ JSOp op = JSOp(*pc);
+
+ if (op == JSOP_SETALIASEDVAR || op == JSOP_INITALIASEDLEXICAL) {
+ // Aliased var assigns ignore readonly attributes on the property, as
+ // required for initializing 'const' closure variables.
+ Shape* shape = obj->as<NativeObject>().lookup(cx, name);
+ MOZ_ASSERT(shape && shape->hasSlot());
+ obj->as<NativeObject>().setSlotWithType(cx, shape, value);
+ return true;
+ }
+
+ RootedValue receiver(cx, ObjectValue(*obj));
+ ObjectOpResult result;
+ if (MOZ_LIKELY(!obj->getOpsSetProperty())) {
+ if (!NativeSetProperty(
+ cx, obj.as<NativeObject>(), id, value, receiver,
+ (op == JSOP_SETNAME || op == JSOP_STRICTSETNAME ||
+ op == JSOP_SETGNAME || op == JSOP_STRICTSETGNAME)
+ ? Unqualified
+ : Qualified,
+ result))
+ {
+ return false;
+ }
+ } else {
+ if (!SetProperty(cx, obj, id, value, receiver, result))
+ return false;
+ }
+ return result.checkStrictErrorOrWarning(cx, obj, id, strict);
+}
+
+bool
+InterruptCheck(JSContext* cx)
+{
+ gc::MaybeVerifyBarriers(cx);
+
+ {
+ JSRuntime* rt = cx->runtime();
+ JitRuntime::AutoPreventBackedgePatching apbp(rt);
+ rt->jitRuntime()->patchIonBackedges(rt, JitRuntime::BackedgeLoopHeader);
+ }
+
+ return CheckForInterrupt(cx);
+}
+
+void*
+MallocWrapper(JSRuntime* rt, size_t nbytes)
+{
+ return rt->pod_malloc<uint8_t>(nbytes);
+}
+
+JSObject*
+NewCallObject(JSContext* cx, HandleShape shape, HandleObjectGroup group)
+{
+ JSObject* obj = CallObject::create(cx, shape, group);
+ if (!obj)
+ return nullptr;
+
+ // The JIT creates call objects in the nursery, so elides barriers for
+ // the initializing writes. The interpreter, however, may have allocated
+ // the call object tenured, so barrier as needed before re-entering.
+ if (!IsInsideNursery(obj))
+ cx->runtime()->gc.storeBuffer.putWholeCell(obj);
+
+ return obj;
+}
+
+JSObject*
+NewSingletonCallObject(JSContext* cx, HandleShape shape)
+{
+ JSObject* obj = CallObject::createSingleton(cx, shape);
+ if (!obj)
+ return nullptr;
+
+ // The JIT creates call objects in the nursery, so elides barriers for
+ // the initializing writes. The interpreter, however, may have allocated
+ // the call object tenured, so barrier as needed before re-entering.
+ MOZ_ASSERT(!IsInsideNursery(obj),
+ "singletons are created in the tenured heap");
+ cx->runtime()->gc.storeBuffer.putWholeCell(obj);
+
+ return obj;
+}
+
+JSObject*
+NewStringObject(JSContext* cx, HandleString str)
+{
+ return StringObject::create(cx, str);
+}
+
+bool
+OperatorIn(JSContext* cx, HandleValue key, HandleObject obj, bool* out)
+{
+ RootedId id(cx);
+ return ToPropertyKey(cx, key, &id) &&
+ HasProperty(cx, obj, id, out);
+}
+
+bool
+OperatorInI(JSContext* cx, uint32_t index, HandleObject obj, bool* out)
+{
+ RootedValue key(cx, Int32Value(index));
+ return OperatorIn(cx, key, obj, out);
+}
+
+bool
+GetIntrinsicValue(JSContext* cx, HandlePropertyName name, MutableHandleValue rval)
+{
+ if (!GlobalObject::getIntrinsicValue(cx, cx->global(), name, rval))
+ return false;
+
+ // This function is called when we try to compile a cold getintrinsic
+ // op. MCallGetIntrinsicValue has an AliasSet of None for optimization
+ // purposes, as its side effect is not observable from JS. We are
+ // guaranteed to bail out after this function, but because of its AliasSet,
+ // type info will not be reflowed. Manually monitor here.
+ TypeScript::Monitor(cx, rval);
+
+ return true;
+}
+
+bool
+CreateThis(JSContext* cx, HandleObject callee, HandleObject newTarget, MutableHandleValue rval)
+{
+ rval.set(MagicValue(JS_IS_CONSTRUCTING));
+
+ if (callee->is<JSFunction>()) {
+ RootedFunction fun(cx, &callee->as<JSFunction>());
+ if (fun->isInterpreted() && fun->isConstructor()) {
+ JSScript* script = fun->getOrCreateScript(cx);
+ if (!script || !script->ensureHasTypes(cx))
+ return false;
+ if (fun->isBoundFunction() || script->isDerivedClassConstructor()) {
+ rval.set(MagicValue(JS_UNINITIALIZED_LEXICAL));
+ } else {
+ JSObject* thisObj = CreateThisForFunction(cx, callee, newTarget, GenericObject);
+ if (!thisObj)
+ return false;
+ rval.set(ObjectValue(*thisObj));
+ }
+ }
+ }
+
+ return true;
+}
+
+void
+GetDynamicName(JSContext* cx, JSObject* envChain, JSString* str, Value* vp)
+{
+ // Lookup a string on the env chain, returning either the value found or
+ // undefined through rval. This function is infallible, and cannot GC or
+ // invalidate.
+
+ JSAtom* atom;
+ if (str->isAtom()) {
+ atom = &str->asAtom();
+ } else {
+ atom = AtomizeString(cx, str);
+ if (!atom) {
+ vp->setUndefined();
+ return;
+ }
+ }
+
+ if (!frontend::IsIdentifier(atom) || frontend::IsKeyword(atom)) {
+ vp->setUndefined();
+ return;
+ }
+
+ Shape* shape = nullptr;
+ JSObject* scope = nullptr;
+ JSObject* pobj = nullptr;
+ if (LookupNameNoGC(cx, atom->asPropertyName(), envChain, &scope, &pobj, &shape)) {
+ if (FetchNameNoGC(pobj, shape, MutableHandleValue::fromMarkedLocation(vp)))
+ return;
+ }
+
+ vp->setUndefined();
+}
+
+void
+PostWriteBarrier(JSRuntime* rt, JSObject* obj)
+{
+ MOZ_ASSERT(!IsInsideNursery(obj));
+ rt->gc.storeBuffer.putWholeCell(obj);
+}
+
+static const size_t MAX_WHOLE_CELL_BUFFER_SIZE = 4096;
+
+void
+PostWriteElementBarrier(JSRuntime* rt, JSObject* obj, int32_t index)
+{
+ MOZ_ASSERT(!IsInsideNursery(obj));
+ if (obj->is<NativeObject>() &&
+ !obj->as<NativeObject>().isInWholeCellBuffer() &&
+ uint32_t(index) < obj->as<NativeObject>().getDenseInitializedLength() &&
+ (obj->as<NativeObject>().getDenseInitializedLength() > MAX_WHOLE_CELL_BUFFER_SIZE
+#ifdef JS_GC_ZEAL
+ || rt->hasZealMode(gc::ZealMode::ElementsBarrier)
+#endif
+ ))
+ {
+ rt->gc.storeBuffer.putSlot(&obj->as<NativeObject>(), HeapSlot::Element, index, 1);
+ return;
+ }
+
+ rt->gc.storeBuffer.putWholeCell(obj);
+}
+
+void
+PostGlobalWriteBarrier(JSRuntime* rt, JSObject* obj)
+{
+ MOZ_ASSERT(obj->is<GlobalObject>());
+ if (!obj->compartment()->globalWriteBarriered) {
+ PostWriteBarrier(rt, obj);
+ obj->compartment()->globalWriteBarriered = 1;
+ }
+}
+
+uint32_t
+GetIndexFromString(JSString* str)
+{
+ // Masks the return value UINT32_MAX as failure to get the index.
+ // I.e. it is impossible to distinguish between failing to get the index
+ // or the actual index UINT32_MAX.
+
+ if (!str->isAtom())
+ return UINT32_MAX;
+
+ uint32_t index;
+ JSAtom* atom = &str->asAtom();
+ if (!atom->isIndex(&index))
+ return UINT32_MAX;
+
+ return index;
+}
+
+bool
+DebugPrologue(JSContext* cx, BaselineFrame* frame, jsbytecode* pc, bool* mustReturn)
+{
+ *mustReturn = false;
+
+ switch (Debugger::onEnterFrame(cx, frame)) {
+ case JSTRAP_CONTINUE:
+ return true;
+
+ case JSTRAP_RETURN:
+ // The script is going to return immediately, so we have to call the
+ // debug epilogue handler as well.
+ MOZ_ASSERT(frame->hasReturnValue());
+ *mustReturn = true;
+ return jit::DebugEpilogue(cx, frame, pc, true);
+
+ case JSTRAP_THROW:
+ case JSTRAP_ERROR:
+ return false;
+
+ default:
+ MOZ_CRASH("bad Debugger::onEnterFrame status");
+ }
+}
+
+bool
+DebugEpilogueOnBaselineReturn(JSContext* cx, BaselineFrame* frame, jsbytecode* pc)
+{
+ if (!DebugEpilogue(cx, frame, pc, true)) {
+ // DebugEpilogue popped the frame by updating jitTop, so run the stop event
+ // here before we enter the exception handler.
+ TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
+ TraceLogStopEvent(logger, TraceLogger_Baseline);
+ TraceLogStopEvent(logger, TraceLogger_Scripts);
+ return false;
+ }
+
+ return true;
+}
+
+bool
+DebugEpilogue(JSContext* cx, BaselineFrame* frame, jsbytecode* pc, bool ok)
+{
+ // If Debugger::onLeaveFrame returns |true| we have to return the frame's
+ // return value. If it returns |false|, the debugger threw an exception.
+ // In both cases we have to pop debug scopes.
+ ok = Debugger::onLeaveFrame(cx, frame, pc, ok);
+
+ // Unwind to the outermost environment and set pc to the end of the
+ // script, regardless of error.
+ EnvironmentIter ei(cx, frame, pc);
+ UnwindAllEnvironmentsInFrame(cx, ei);
+ JSScript* script = frame->script();
+ frame->setOverridePc(script->lastPC());
+
+ if (!ok) {
+ // Pop this frame by updating jitTop, so that the exception handling
+ // code will start at the previous frame.
+
+ JitFrameLayout* prefix = frame->framePrefix();
+ EnsureBareExitFrame(cx, prefix);
+ return false;
+ }
+
+ // Clear the override pc. This is not necessary for correctness: the frame
+ // will return immediately, but this simplifies the check we emit in debug
+ // builds after each callVM, to ensure this flag is not set.
+ frame->clearOverridePc();
+ return true;
+}
+
+void
+FrameIsDebuggeeCheck(BaselineFrame* frame)
+{
+ if (frame->script()->isDebuggee())
+ frame->setIsDebuggee();
+}
+
+JSObject*
+CreateGenerator(JSContext* cx, BaselineFrame* frame)
+{
+ return GeneratorObject::create(cx, frame);
+}
+
+bool
+NormalSuspend(JSContext* cx, HandleObject obj, BaselineFrame* frame, jsbytecode* pc,
+ uint32_t stackDepth)
+{
+ MOZ_ASSERT(*pc == JSOP_YIELD);
+
+ // Return value is still on the stack.
+ MOZ_ASSERT(stackDepth >= 1);
+
+ // The expression stack slots are stored on the stack in reverse order, so
+ // we copy them to a Vector and pass a pointer to that instead. We use
+ // stackDepth - 1 because we don't want to include the return value.
+ AutoValueVector exprStack(cx);
+ if (!exprStack.reserve(stackDepth - 1))
+ return false;
+
+ size_t firstSlot = frame->numValueSlots() - stackDepth;
+ for (size_t i = 0; i < stackDepth - 1; i++)
+ exprStack.infallibleAppend(*frame->valueSlot(firstSlot + i));
+
+ MOZ_ASSERT(exprStack.length() == stackDepth - 1);
+
+ return GeneratorObject::normalSuspend(cx, obj, frame, pc, exprStack.begin(), stackDepth - 1);
+}
+
+bool
+FinalSuspend(JSContext* cx, HandleObject obj, BaselineFrame* frame, jsbytecode* pc)
+{
+ MOZ_ASSERT(*pc == JSOP_FINALYIELDRVAL);
+
+ if (!GeneratorObject::finalSuspend(cx, obj)) {
+
+ TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
+ TraceLogStopEvent(logger, TraceLogger_Engine);
+ TraceLogStopEvent(logger, TraceLogger_Scripts);
+
+ // Leave this frame and propagate the exception to the caller.
+ return DebugEpilogue(cx, frame, pc, /* ok = */ false);
+ }
+
+ return true;
+}
+
+bool
+InterpretResume(JSContext* cx, HandleObject obj, HandleValue val, HandlePropertyName kind,
+ MutableHandleValue rval)
+{
+ MOZ_ASSERT(obj->is<GeneratorObject>());
+
+ RootedValue selfHostedFun(cx);
+ if (!GlobalObject::getIntrinsicValue(cx, cx->global(), cx->names().InterpretGeneratorResume,
+ &selfHostedFun))
+ {
+ return false;
+ }
+
+ MOZ_ASSERT(selfHostedFun.toObject().is<JSFunction>());
+
+ FixedInvokeArgs<3> args(cx);
+
+ args[0].setObject(*obj);
+ args[1].set(val);
+ args[2].setString(kind);
+
+ return Call(cx, selfHostedFun, UndefinedHandleValue, args, rval);
+}
+
+bool
+DebugAfterYield(JSContext* cx, BaselineFrame* frame)
+{
+ // The BaselineFrame has just been constructed by JSOP_RESUME in the
+ // caller. We need to set its debuggee flag as necessary.
+ if (frame->script()->isDebuggee())
+ frame->setIsDebuggee();
+ return true;
+}
+
+bool
+GeneratorThrowOrClose(JSContext* cx, BaselineFrame* frame, Handle<GeneratorObject*> genObj,
+ HandleValue arg, uint32_t resumeKind)
+{
+ // Set the frame's pc to the current resume pc, so that frame iterators
+ // work. This function always returns false, so we're guaranteed to enter
+ // the exception handler where we will clear the pc.
+ JSScript* script = frame->script();
+ uint32_t offset = script->yieldOffsets()[genObj->yieldIndex()];
+ frame->setOverridePc(script->offsetToPC(offset));
+
+ MOZ_ALWAYS_TRUE(DebugAfterYield(cx, frame));
+ MOZ_ALWAYS_FALSE(js::GeneratorThrowOrClose(cx, frame, genObj, arg, resumeKind));
+ return false;
+}
+
+bool
+CheckGlobalOrEvalDeclarationConflicts(JSContext* cx, BaselineFrame* frame)
+{
+ RootedScript script(cx, frame->script());
+ RootedObject envChain(cx, frame->environmentChain());
+ RootedObject varObj(cx, BindVar(cx, envChain));
+
+ if (script->isForEval()) {
+ // Strict eval and eval in parameter default expressions have their
+ // own call objects.
+ //
+ // Non-strict eval may introduce 'var' bindings that conflict with
+ // lexical bindings in an enclosing lexical scope.
+ if (!script->bodyScope()->hasEnvironment()) {
+ MOZ_ASSERT(!script->strict() &&
+ (!script->enclosingScope()->is<FunctionScope>() ||
+ !script->enclosingScope()->as<FunctionScope>().hasParameterExprs()));
+ if (!CheckEvalDeclarationConflicts(cx, script, envChain, varObj))
+ return false;
+ }
+ } else {
+ Rooted<LexicalEnvironmentObject*> lexicalEnv(cx,
+ &NearestEnclosingExtensibleLexicalEnvironment(envChain));
+ if (!CheckGlobalDeclarationConflicts(cx, script, lexicalEnv, varObj))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+GlobalNameConflictsCheckFromIon(JSContext* cx, HandleScript script)
+{
+ Rooted<LexicalEnvironmentObject*> globalLexical(cx, &cx->global()->lexicalEnvironment());
+ return CheckGlobalDeclarationConflicts(cx, script, globalLexical, cx->global());
+}
+
+bool
+InitFunctionEnvironmentObjects(JSContext* cx, BaselineFrame* frame)
+{
+ return frame->initFunctionEnvironmentObjects(cx);
+}
+
+bool
+NewArgumentsObject(JSContext* cx, BaselineFrame* frame, MutableHandleValue res)
+{
+ ArgumentsObject* obj = ArgumentsObject::createExpected(cx, frame);
+ if (!obj)
+ return false;
+ res.setObject(*obj);
+ return true;
+}
+
+JSObject*
+InitRestParameter(JSContext* cx, uint32_t length, Value* rest, HandleObject templateObj,
+ HandleObject objRes)
+{
+ if (objRes) {
+ Rooted<ArrayObject*> arrRes(cx, &objRes->as<ArrayObject>());
+
+ MOZ_ASSERT(!arrRes->getDenseInitializedLength());
+ MOZ_ASSERT(arrRes->group() == templateObj->group());
+
+ // Fast path: we managed to allocate the array inline; initialize the
+ // slots.
+ if (length > 0) {
+ if (!arrRes->ensureElements(cx, length))
+ return nullptr;
+ arrRes->setDenseInitializedLength(length);
+ arrRes->initDenseElements(0, rest, length);
+ arrRes->setLengthInt32(length);
+ }
+ return arrRes;
+ }
+
+ NewObjectKind newKind = templateObj->group()->shouldPreTenure()
+ ? TenuredObject
+ : GenericObject;
+ ArrayObject* arrRes = NewDenseCopiedArray(cx, length, rest, nullptr, newKind);
+ if (arrRes)
+ arrRes->setGroup(templateObj->group());
+ return arrRes;
+}
+
+bool
+HandleDebugTrap(JSContext* cx, BaselineFrame* frame, uint8_t* retAddr, bool* mustReturn)
+{
+ *mustReturn = false;
+
+ RootedScript script(cx, frame->script());
+ jsbytecode* pc = script->baselineScript()->icEntryFromReturnAddress(retAddr).pc(script);
+
+ MOZ_ASSERT(frame->isDebuggee());
+ MOZ_ASSERT(script->stepModeEnabled() || script->hasBreakpointsAt(pc));
+
+ RootedValue rval(cx);
+ JSTrapStatus status = JSTRAP_CONTINUE;
+
+ if (script->stepModeEnabled())
+ status = Debugger::onSingleStep(cx, &rval);
+
+ if (status == JSTRAP_CONTINUE && script->hasBreakpointsAt(pc))
+ status = Debugger::onTrap(cx, &rval);
+
+ switch (status) {
+ case JSTRAP_CONTINUE:
+ break;
+
+ case JSTRAP_ERROR:
+ return false;
+
+ case JSTRAP_RETURN:
+ *mustReturn = true;
+ frame->setReturnValue(rval);
+ return jit::DebugEpilogue(cx, frame, pc, true);
+
+ case JSTRAP_THROW:
+ cx->setPendingException(rval);
+ return false;
+
+ default:
+ MOZ_CRASH("Invalid trap status");
+ }
+
+ return true;
+}
+
+bool
+OnDebuggerStatement(JSContext* cx, BaselineFrame* frame, jsbytecode* pc, bool* mustReturn)
+{
+ *mustReturn = false;
+
+ switch (Debugger::onDebuggerStatement(cx, frame)) {
+ case JSTRAP_ERROR:
+ return false;
+
+ case JSTRAP_CONTINUE:
+ return true;
+
+ case JSTRAP_RETURN:
+ *mustReturn = true;
+ return jit::DebugEpilogue(cx, frame, pc, true);
+
+ case JSTRAP_THROW:
+ return false;
+
+ default:
+ MOZ_CRASH("Invalid trap status");
+ }
+}
+
+bool
+GlobalHasLiveOnDebuggerStatement(JSContext* cx)
+{
+ return cx->compartment()->isDebuggee() &&
+ Debugger::hasLiveHook(cx->global(), Debugger::OnDebuggerStatement);
+}
+
+bool
+PushLexicalEnv(JSContext* cx, BaselineFrame* frame, Handle<LexicalScope*> scope)
+{
+ return frame->pushLexicalEnvironment(cx, scope);
+}
+
+bool
+PopLexicalEnv(JSContext* cx, BaselineFrame* frame)
+{
+ frame->popOffEnvironmentChain<LexicalEnvironmentObject>();
+ return true;
+}
+
+bool
+DebugLeaveThenPopLexicalEnv(JSContext* cx, BaselineFrame* frame, jsbytecode* pc)
+{
+ MOZ_ALWAYS_TRUE(DebugLeaveLexicalEnv(cx, frame, pc));
+ frame->popOffEnvironmentChain<LexicalEnvironmentObject>();
+ return true;
+}
+
+bool
+FreshenLexicalEnv(JSContext* cx, BaselineFrame* frame)
+{
+ return frame->freshenLexicalEnvironment(cx);
+}
+
+bool
+DebugLeaveThenFreshenLexicalEnv(JSContext* cx, BaselineFrame* frame, jsbytecode* pc)
+{
+ MOZ_ALWAYS_TRUE(DebugLeaveLexicalEnv(cx, frame, pc));
+ return frame->freshenLexicalEnvironment(cx);
+}
+
+bool
+RecreateLexicalEnv(JSContext* cx, BaselineFrame* frame)
+{
+ return frame->recreateLexicalEnvironment(cx);
+}
+
+bool
+DebugLeaveThenRecreateLexicalEnv(JSContext* cx, BaselineFrame* frame, jsbytecode* pc)
+{
+ MOZ_ALWAYS_TRUE(DebugLeaveLexicalEnv(cx, frame, pc));
+ return frame->recreateLexicalEnvironment(cx);
+}
+
+bool
+DebugLeaveLexicalEnv(JSContext* cx, BaselineFrame* frame, jsbytecode* pc)
+{
+ MOZ_ASSERT(frame->script()->baselineScript()->hasDebugInstrumentation());
+ if (cx->compartment()->isDebuggee())
+ DebugEnvironments::onPopLexical(cx, frame, pc);
+ return true;
+}
+
+bool
+PushVarEnv(JSContext* cx, BaselineFrame* frame, HandleScope scope)
+{
+ return frame->pushVarEnvironment(cx, scope);
+}
+
+bool
+PopVarEnv(JSContext* cx, BaselineFrame* frame)
+{
+ frame->popOffEnvironmentChain<VarEnvironmentObject>();
+ return true;
+}
+
+bool
+EnterWith(JSContext* cx, BaselineFrame* frame, HandleValue val, Handle<WithScope*> templ)
+{
+ return EnterWithOperation(cx, frame, val, templ);
+}
+
+bool
+LeaveWith(JSContext* cx, BaselineFrame* frame)
+{
+ if (MOZ_UNLIKELY(frame->isDebuggee()))
+ DebugEnvironments::onPopWith(frame);
+ frame->popOffEnvironmentChain<WithEnvironmentObject>();
+ return true;
+}
+
+bool
+InitBaselineFrameForOsr(BaselineFrame* frame, InterpreterFrame* interpFrame,
+ uint32_t numStackValues)
+{
+ return frame->initForOsr(interpFrame, numStackValues);
+}
+
+JSObject*
+CreateDerivedTypedObj(JSContext* cx, HandleObject descr,
+ HandleObject owner, int32_t offset)
+{
+ MOZ_ASSERT(descr->is<TypeDescr>());
+ MOZ_ASSERT(owner->is<TypedObject>());
+ Rooted<TypeDescr*> descr1(cx, &descr->as<TypeDescr>());
+ Rooted<TypedObject*> owner1(cx, &owner->as<TypedObject>());
+ return OutlineTypedObject::createDerived(cx, descr1, owner1, offset);
+}
+
+JSString*
+StringReplace(JSContext* cx, HandleString string, HandleString pattern, HandleString repl)
+{
+ MOZ_ASSERT(string);
+ MOZ_ASSERT(pattern);
+ MOZ_ASSERT(repl);
+
+ return str_replace_string_raw(cx, string, pattern, repl);
+}
+
+bool
+RecompileImpl(JSContext* cx, bool force)
+{
+ MOZ_ASSERT(cx->currentlyRunningInJit());
+ JitActivationIterator activations(cx->runtime());
+ JitFrameIterator iter(activations);
+
+ MOZ_ASSERT(iter.type() == JitFrame_Exit);
+ ++iter;
+
+ RootedScript script(cx, iter.script());
+ MOZ_ASSERT(script->hasIonScript());
+
+ if (!IsIonEnabled(cx))
+ return true;
+
+ MethodStatus status = Recompile(cx, script, nullptr, nullptr, force);
+ if (status == Method_Error)
+ return false;
+
+ return true;
+}
+
+bool
+ForcedRecompile(JSContext* cx)
+{
+ return RecompileImpl(cx, /* force = */ true);
+}
+
+bool
+Recompile(JSContext* cx)
+{
+ return RecompileImpl(cx, /* force = */ false);
+}
+
+bool
+SetDenseOrUnboxedArrayElement(JSContext* cx, HandleObject obj, int32_t index,
+ HandleValue value, bool strict)
+{
+ // This function is called from Ion code for StoreElementHole's OOL path.
+ // In this case we know the object is native or an unboxed array and that
+ // no type changes are needed.
+
+ DenseElementResult result =
+ SetOrExtendAnyBoxedOrUnboxedDenseElements(cx, obj, index, value.address(), 1,
+ ShouldUpdateTypes::DontUpdate);
+ if (result != DenseElementResult::Incomplete)
+ return result == DenseElementResult::Success;
+
+ RootedValue indexVal(cx, Int32Value(index));
+ return SetObjectElement(cx, obj, indexVal, value, strict);
+}
+
+void
+AutoDetectInvalidation::setReturnOverride()
+{
+ cx_->runtime()->jitRuntime()->setIonReturnOverride(rval_.get());
+}
+
+void
+AssertValidObjectPtr(JSContext* cx, JSObject* obj)
+{
+#ifdef DEBUG
+ // Check what we can, so that we'll hopefully assert/crash if we get a
+ // bogus object (pointer).
+ MOZ_ASSERT(obj->compartment() == cx->compartment());
+ MOZ_ASSERT(obj->runtimeFromMainThread() == cx->runtime());
+
+ MOZ_ASSERT_IF(!obj->hasLazyGroup() && obj->maybeShape(),
+ obj->group()->clasp() == obj->maybeShape()->getObjectClass());
+
+ if (obj->isTenured()) {
+ MOZ_ASSERT(obj->isAligned());
+ gc::AllocKind kind = obj->asTenured().getAllocKind();
+ MOZ_ASSERT(gc::IsObjectAllocKind(kind));
+ MOZ_ASSERT(obj->asTenured().zone() == cx->zone());
+ }
+#endif
+}
+
+void
+AssertValidObjectOrNullPtr(JSContext* cx, JSObject* obj)
+{
+ if (obj)
+ AssertValidObjectPtr(cx, obj);
+}
+
+void
+AssertValidStringPtr(JSContext* cx, JSString* str)
+{
+#ifdef DEBUG
+ // We can't closely inspect strings from another runtime.
+ if (str->runtimeFromAnyThread() != cx->runtime()) {
+ MOZ_ASSERT(str->isPermanentAtom());
+ return;
+ }
+
+ if (str->isAtom())
+ MOZ_ASSERT(str->zone()->isAtomsZone());
+ else
+ MOZ_ASSERT(str->zone() == cx->zone());
+
+ MOZ_ASSERT(str->isAligned());
+ MOZ_ASSERT(str->length() <= JSString::MAX_LENGTH);
+
+ gc::AllocKind kind = str->getAllocKind();
+ if (str->isFatInline()) {
+ MOZ_ASSERT(kind == gc::AllocKind::FAT_INLINE_STRING ||
+ kind == gc::AllocKind::FAT_INLINE_ATOM);
+ } else if (str->isExternal()) {
+ MOZ_ASSERT(kind == gc::AllocKind::EXTERNAL_STRING);
+ } else if (str->isAtom()) {
+ MOZ_ASSERT(kind == gc::AllocKind::ATOM);
+ } else if (str->isFlat()) {
+ MOZ_ASSERT(kind == gc::AllocKind::STRING ||
+ kind == gc::AllocKind::FAT_INLINE_STRING ||
+ kind == gc::AllocKind::EXTERNAL_STRING);
+ } else {
+ MOZ_ASSERT(kind == gc::AllocKind::STRING);
+ }
+#endif
+}
+
+void
+AssertValidSymbolPtr(JSContext* cx, JS::Symbol* sym)
+{
+ // We can't closely inspect symbols from another runtime.
+ if (sym->runtimeFromAnyThread() != cx->runtime()) {
+ MOZ_ASSERT(sym->isWellKnownSymbol());
+ return;
+ }
+
+ MOZ_ASSERT(sym->zone()->isAtomsZone());
+ MOZ_ASSERT(sym->isAligned());
+ if (JSString* desc = sym->description()) {
+ MOZ_ASSERT(desc->isAtom());
+ AssertValidStringPtr(cx, desc);
+ }
+
+ MOZ_ASSERT(sym->getAllocKind() == gc::AllocKind::SYMBOL);
+}
+
+void
+AssertValidValue(JSContext* cx, Value* v)
+{
+ if (v->isObject())
+ AssertValidObjectPtr(cx, &v->toObject());
+ else if (v->isString())
+ AssertValidStringPtr(cx, v->toString());
+ else if (v->isSymbol())
+ AssertValidSymbolPtr(cx, v->toSymbol());
+}
+
+bool
+ObjectIsCallable(JSObject* obj)
+{
+ return obj->isCallable();
+}
+
+bool
+ObjectIsConstructor(JSObject* obj)
+{
+ return obj->isConstructor();
+}
+
+void
+MarkValueFromIon(JSRuntime* rt, Value* vp)
+{
+ TraceManuallyBarrieredEdge(&rt->gc.marker, vp, "write barrier");
+}
+
+void
+MarkStringFromIon(JSRuntime* rt, JSString** stringp)
+{
+ if (*stringp)
+ TraceManuallyBarrieredEdge(&rt->gc.marker, stringp, "write barrier");
+}
+
+void
+MarkObjectFromIon(JSRuntime* rt, JSObject** objp)
+{
+ if (*objp)
+ TraceManuallyBarrieredEdge(&rt->gc.marker, objp, "write barrier");
+}
+
+void
+MarkShapeFromIon(JSRuntime* rt, Shape** shapep)
+{
+ TraceManuallyBarrieredEdge(&rt->gc.marker, shapep, "write barrier");
+}
+
+void
+MarkObjectGroupFromIon(JSRuntime* rt, ObjectGroup** groupp)
+{
+ TraceManuallyBarrieredEdge(&rt->gc.marker, groupp, "write barrier");
+}
+
+bool
+ThrowRuntimeLexicalError(JSContext* cx, unsigned errorNumber)
+{
+ ScriptFrameIter iter(cx);
+ RootedScript script(cx, iter.script());
+ ReportRuntimeLexicalError(cx, errorNumber, script, iter.pc());
+ return false;
+}
+
+bool
+ThrowReadOnlyError(JSContext* cx, int32_t index)
+{
+ RootedValue val(cx, Int32Value(index));
+ ReportValueError(cx, JSMSG_READ_ONLY, JSDVG_IGNORE_STACK, val, nullptr);
+ return false;
+}
+
+bool
+ThrowBadDerivedReturn(JSContext* cx, HandleValue v)
+{
+ ReportValueError(cx, JSMSG_BAD_DERIVED_RETURN, JSDVG_IGNORE_STACK, v, nullptr);
+ return false;
+}
+
+bool
+BaselineThrowUninitializedThis(JSContext* cx, BaselineFrame* frame)
+{
+ return ThrowUninitializedThis(cx, frame);
+}
+
+
+bool
+ThrowObjectCoercible(JSContext* cx, HandleValue v)
+{
+ MOZ_ASSERT(v.isUndefined() || v.isNull());
+ MOZ_ALWAYS_FALSE(ToObjectSlow(cx, v, true));
+ return false;
+}
+
+bool
+BaselineGetFunctionThis(JSContext* cx, BaselineFrame* frame, MutableHandleValue res)
+{
+ return GetFunctionThis(cx, frame, res);
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/VMFunctions.h b/js/src/jit/VMFunctions.h
new file mode 100644
index 000000000..f754d58c7
--- /dev/null
+++ b/js/src/jit/VMFunctions.h
@@ -0,0 +1,808 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_VMFunctions_h
+#define jit_VMFunctions_h
+
+#include "mozilla/Attributes.h"
+
+#include "jspubtd.h"
+
+#include "jit/CompileInfo.h"
+#include "jit/JitFrames.h"
+
+namespace js {
+
+class NamedLambdaObject;
+class WithScope;
+class InlineTypedObject;
+class GeneratorObject;
+class TypedArrayObject;
+
+namespace jit {
+
+enum DataType {
+ Type_Void,
+ Type_Bool,
+ Type_Int32,
+ Type_Double,
+ Type_Pointer,
+ Type_Object,
+ Type_Value,
+ Type_Handle
+};
+
+struct PopValues
+{
+ uint32_t numValues;
+
+ explicit PopValues(uint32_t numValues)
+ : numValues(numValues)
+ { }
+};
+
+enum MaybeTailCall {
+ TailCall,
+ NonTailCall
+};
+
+// Contains information about a virtual machine function that can be called
+// from JIT code. Functions described in this manner must conform to a simple
+// protocol: the return type must have a special "failure" value (for example,
+// false for bool, or nullptr for Objects). If the function is designed to
+// return a value that does not meet this requirement - such as
+// object-or-nullptr, or an integer, an optional, final outParam can be
+// specified. In this case, the return type must be boolean to indicate
+// failure.
+//
+// All functions described by VMFunction take a JSContext * as a first
+// argument, and are treated as re-entrant into the VM and therefore fallible.
+struct VMFunction
+{
+ // Global linked list of all VMFunctions.
+ static VMFunction* functions;
+ VMFunction* next;
+
+ // Address of the C function.
+ void* wrapped;
+
+ const char* name_;
+
+ // Number of arguments expected, excluding JSContext * as an implicit
+ // first argument and an outparam as a possible implicit final argument.
+ uint32_t explicitArgs;
+
+ enum ArgProperties {
+ WordByValue = 0,
+ DoubleByValue = 1,
+ WordByRef = 2,
+ DoubleByRef = 3,
+ // BitMask version.
+ Word = 0,
+ Double = 1,
+ ByRef = 2
+ };
+
+ // Contains properties about the first 16 arguments.
+ uint32_t argumentProperties;
+
+ // Which arguments should be passed in float register on platforms that
+ // have them.
+ uint32_t argumentPassedInFloatRegs;
+
+ // The outparam may be any Type_*, and must be the final argument to the
+ // function, if not Void. outParam != Void implies that the return type
+ // has a boolean failure mode.
+ DataType outParam;
+
+ // Type returned by the C function and used by the VMFunction wrapper to
+ // check for failures of the C function. Valid failure/return types are
+ // boolean and object pointers which are asserted inside the VMFunction
+ // constructor. If the C function use an outparam (!= Type_Void), then
+ // the only valid failure/return type is boolean -- object pointers are
+ // pointless because the wrapper will only use it to compare it against
+ // nullptr before discarding its value.
+ DataType returnType;
+
+ // Note: a maximum of seven root types is supported.
+ enum RootType {
+ RootNone = 0,
+ RootObject,
+ RootString,
+ RootPropertyName,
+ RootFunction,
+ RootValue,
+ RootCell
+ };
+
+ // Contains an combination of enumerated types used by the gc for marking
+ // arguments of the VM wrapper.
+ uint64_t argumentRootTypes;
+
+ // The root type of the out param if outParam == Type_Handle.
+ RootType outParamRootType;
+
+ // Number of Values the VM wrapper should pop from the stack when it returns.
+ // Used by baseline IC stubs so that they can use tail calls to call the VM
+ // wrapper.
+ uint32_t extraValuesToPop;
+
+ // On some architectures, called functions need to explicitly push their
+ // return address, for a tail call, there is nothing to push, so tail-callness
+ // needs to be known at compile time.
+ MaybeTailCall expectTailCall;
+
+ uint32_t argc() const {
+ // JSContext * + args + (OutParam? *)
+ return 1 + explicitArgc() + ((outParam == Type_Void) ? 0 : 1);
+ }
+
+ DataType failType() const {
+ return returnType;
+ }
+
+ ArgProperties argProperties(uint32_t explicitArg) const {
+ return ArgProperties((argumentProperties >> (2 * explicitArg)) & 3);
+ }
+
+ RootType argRootType(uint32_t explicitArg) const {
+ return RootType((argumentRootTypes >> (3 * explicitArg)) & 7);
+ }
+
+ bool argPassedInFloatReg(uint32_t explicitArg) const {
+ return ((argumentPassedInFloatRegs >> explicitArg) & 1) == 1;
+ }
+
+ const char* name() const {
+ return name_;
+ }
+
+ // Return the stack size consumed by explicit arguments.
+ size_t explicitStackSlots() const {
+ size_t stackSlots = explicitArgs;
+
+ // Fetch all double-word flags of explicit arguments.
+ uint32_t n =
+ ((1 << (explicitArgs * 2)) - 1) // = Explicit argument mask.
+ & 0x55555555 // = Mask double-size args.
+ & argumentProperties;
+
+ // Add the number of double-word flags. (expect a few loop
+ // iteration)
+ while (n) {
+ stackSlots++;
+ n &= n - 1;
+ }
+ return stackSlots;
+ }
+
+ // Double-size argument which are passed by value are taking the space
+ // of 2 C arguments. This function is used to compute the number of
+ // argument expected by the C function. This is not the same as
+ // explicitStackSlots because reference to stack slots may take one less
+ // register in the total count.
+ size_t explicitArgc() const {
+ size_t stackSlots = explicitArgs;
+
+ // Fetch all explicit arguments.
+ uint32_t n =
+ ((1 << (explicitArgs * 2)) - 1) // = Explicit argument mask.
+ & argumentProperties;
+
+ // Filter double-size arguments (0x5 = 0b0101) and remove (& ~)
+ // arguments passed by reference (0b1010 >> 1 == 0b0101).
+ n = (n & 0x55555555) & ~(n >> 1);
+
+ // Add the number of double-word transfered by value. (expect a few
+ // loop iteration)
+ while (n) {
+ stackSlots++;
+ n &= n - 1;
+ }
+ return stackSlots;
+ }
+
+ size_t doubleByRefArgs() const {
+ size_t count = 0;
+
+ // Fetch all explicit arguments.
+ uint32_t n =
+ ((1 << (explicitArgs * 2)) - 1) // = Explicit argument mask.
+ & argumentProperties;
+
+ // Filter double-size arguments (0x5 = 0b0101) and take (&) only
+ // arguments passed by reference (0b1010 >> 1 == 0b0101).
+ n = (n & 0x55555555) & (n >> 1);
+
+ // Add the number of double-word transfered by refference. (expect a
+ // few loop iterations)
+ while (n) {
+ count++;
+ n &= n - 1;
+ }
+ return count;
+ }
+
+ VMFunction(void* wrapped, const char* name, uint32_t explicitArgs, uint32_t argumentProperties,
+ uint32_t argumentPassedInFloatRegs, uint64_t argRootTypes,
+ DataType outParam, RootType outParamRootType, DataType returnType,
+ uint32_t extraValuesToPop = 0, MaybeTailCall expectTailCall = NonTailCall)
+ : wrapped(wrapped),
+ name_(name),
+ explicitArgs(explicitArgs),
+ argumentProperties(argumentProperties),
+ argumentPassedInFloatRegs(argumentPassedInFloatRegs),
+ outParam(outParam),
+ returnType(returnType),
+ argumentRootTypes(argRootTypes),
+ outParamRootType(outParamRootType),
+ extraValuesToPop(extraValuesToPop),
+ expectTailCall(expectTailCall)
+ {
+ // Check for valid failure/return type.
+ MOZ_ASSERT_IF(outParam != Type_Void, returnType == Type_Bool);
+ MOZ_ASSERT(returnType == Type_Bool ||
+ returnType == Type_Object);
+ }
+
+ VMFunction(const VMFunction& o) {
+ *this = o;
+ addToFunctions();
+ }
+
+ private:
+ // Add this to the global list of VMFunctions.
+ void addToFunctions();
+};
+
+template <class> struct TypeToDataType { /* Unexpected return type for a VMFunction. */ };
+template <> struct TypeToDataType<bool> { static const DataType result = Type_Bool; };
+template <> struct TypeToDataType<JSObject*> { static const DataType result = Type_Object; };
+template <> struct TypeToDataType<NativeObject*> { static const DataType result = Type_Object; };
+template <> struct TypeToDataType<PlainObject*> { static const DataType result = Type_Object; };
+template <> struct TypeToDataType<InlineTypedObject*> { static const DataType result = Type_Object; };
+template <> struct TypeToDataType<NamedLambdaObject*> { static const DataType result = Type_Object; };
+template <> struct TypeToDataType<ArrayObject*> { static const DataType result = Type_Object; };
+template <> struct TypeToDataType<TypedArrayObject*> { static const DataType result = Type_Object; };
+template <> struct TypeToDataType<JSString*> { static const DataType result = Type_Object; };
+template <> struct TypeToDataType<JSFlatString*> { static const DataType result = Type_Object; };
+template <> struct TypeToDataType<HandleObject> { static const DataType result = Type_Handle; };
+template <> struct TypeToDataType<HandleString> { static const DataType result = Type_Handle; };
+template <> struct TypeToDataType<HandlePropertyName> { static const DataType result = Type_Handle; };
+template <> struct TypeToDataType<HandleFunction> { static const DataType result = Type_Handle; };
+template <> struct TypeToDataType<Handle<NativeObject*> > { static const DataType result = Type_Handle; };
+template <> struct TypeToDataType<Handle<InlineTypedObject*> > { static const DataType result = Type_Handle; };
+template <> struct TypeToDataType<Handle<ArrayObject*> > { static const DataType result = Type_Handle; };
+template <> struct TypeToDataType<Handle<GeneratorObject*> > { static const DataType result = Type_Handle; };
+template <> struct TypeToDataType<Handle<PlainObject*> > { static const DataType result = Type_Handle; };
+template <> struct TypeToDataType<Handle<WithScope*> > { static const DataType result = Type_Handle; };
+template <> struct TypeToDataType<Handle<LexicalScope*> > { static const DataType result = Type_Handle; };
+template <> struct TypeToDataType<Handle<Scope*> > { static const DataType result = Type_Handle; };
+template <> struct TypeToDataType<HandleScript> { static const DataType result = Type_Handle; };
+template <> struct TypeToDataType<HandleValue> { static const DataType result = Type_Handle; };
+template <> struct TypeToDataType<MutableHandleValue> { static const DataType result = Type_Handle; };
+
+// Convert argument types to properties of the argument known by the jit.
+template <class T> struct TypeToArgProperties {
+ static const uint32_t result =
+ (sizeof(T) <= sizeof(void*) ? VMFunction::Word : VMFunction::Double);
+};
+template <> struct TypeToArgProperties<const Value&> {
+ static const uint32_t result = TypeToArgProperties<Value>::result | VMFunction::ByRef;
+};
+template <> struct TypeToArgProperties<HandleObject> {
+ static const uint32_t result = TypeToArgProperties<JSObject*>::result | VMFunction::ByRef;
+};
+template <> struct TypeToArgProperties<HandleString> {
+ static const uint32_t result = TypeToArgProperties<JSString*>::result | VMFunction::ByRef;
+};
+template <> struct TypeToArgProperties<HandlePropertyName> {
+ static const uint32_t result = TypeToArgProperties<PropertyName*>::result | VMFunction::ByRef;
+};
+template <> struct TypeToArgProperties<HandleFunction> {
+ static const uint32_t result = TypeToArgProperties<JSFunction*>::result | VMFunction::ByRef;
+};
+template <> struct TypeToArgProperties<Handle<NativeObject*> > {
+ static const uint32_t result = TypeToArgProperties<NativeObject*>::result | VMFunction::ByRef;
+};
+template <> struct TypeToArgProperties<Handle<InlineTypedObject*> > {
+ static const uint32_t result = TypeToArgProperties<InlineTypedObject*>::result | VMFunction::ByRef;
+};
+template <> struct TypeToArgProperties<Handle<ArrayObject*> > {
+ static const uint32_t result = TypeToArgProperties<ArrayObject*>::result | VMFunction::ByRef;
+};
+template <> struct TypeToArgProperties<Handle<GeneratorObject*> > {
+ static const uint32_t result = TypeToArgProperties<GeneratorObject*>::result | VMFunction::ByRef;
+};
+template <> struct TypeToArgProperties<Handle<PlainObject*> > {
+ static const uint32_t result = TypeToArgProperties<PlainObject*>::result | VMFunction::ByRef;
+};
+template <> struct TypeToArgProperties<Handle<WithScope*> > {
+ static const uint32_t result = TypeToArgProperties<WithScope*>::result | VMFunction::ByRef;
+};
+template <> struct TypeToArgProperties<Handle<LexicalScope*> > {
+ static const uint32_t result = TypeToArgProperties<LexicalScope*>::result | VMFunction::ByRef;
+};
+template <> struct TypeToArgProperties<Handle<Scope*> > {
+ static const uint32_t result = TypeToArgProperties<Scope*>::result | VMFunction::ByRef;
+};
+template <> struct TypeToArgProperties<HandleScript> {
+ static const uint32_t result = TypeToArgProperties<JSScript*>::result | VMFunction::ByRef;
+};
+template <> struct TypeToArgProperties<HandleValue> {
+ static const uint32_t result = TypeToArgProperties<Value>::result | VMFunction::ByRef;
+};
+template <> struct TypeToArgProperties<MutableHandleValue> {
+ static const uint32_t result = TypeToArgProperties<Value>::result | VMFunction::ByRef;
+};
+template <> struct TypeToArgProperties<HandleShape> {
+ static const uint32_t result = TypeToArgProperties<Shape*>::result | VMFunction::ByRef;
+};
+template <> struct TypeToArgProperties<HandleObjectGroup> {
+ static const uint32_t result = TypeToArgProperties<ObjectGroup*>::result | VMFunction::ByRef;
+};
+
+// Convert argument type to whether or not it should be passed in a float
+// register on platforms that have them, like x64.
+template <class T> struct TypeToPassInFloatReg {
+ static const uint32_t result = 0;
+};
+template <> struct TypeToPassInFloatReg<double> {
+ static const uint32_t result = 1;
+};
+
+// Convert argument types to root types used by the gc, see MarkJitExitFrame.
+template <class T> struct TypeToRootType {
+ static const uint32_t result = VMFunction::RootNone;
+};
+template <> struct TypeToRootType<HandleObject> {
+ static const uint32_t result = VMFunction::RootObject;
+};
+template <> struct TypeToRootType<HandleString> {
+ static const uint32_t result = VMFunction::RootString;
+};
+template <> struct TypeToRootType<HandlePropertyName> {
+ static const uint32_t result = VMFunction::RootPropertyName;
+};
+template <> struct TypeToRootType<HandleFunction> {
+ static const uint32_t result = VMFunction::RootFunction;
+};
+template <> struct TypeToRootType<HandleValue> {
+ static const uint32_t result = VMFunction::RootValue;
+};
+template <> struct TypeToRootType<MutableHandleValue> {
+ static const uint32_t result = VMFunction::RootValue;
+};
+template <> struct TypeToRootType<HandleShape> {
+ static const uint32_t result = VMFunction::RootCell;
+};
+template <> struct TypeToRootType<HandleObjectGroup> {
+ static const uint32_t result = VMFunction::RootCell;
+};
+template <> struct TypeToRootType<HandleScript> {
+ static const uint32_t result = VMFunction::RootCell;
+};
+template <> struct TypeToRootType<Handle<NativeObject*> > {
+ static const uint32_t result = VMFunction::RootObject;
+};
+template <> struct TypeToRootType<Handle<InlineTypedObject*> > {
+ static const uint32_t result = VMFunction::RootObject;
+};
+template <> struct TypeToRootType<Handle<ArrayObject*> > {
+ static const uint32_t result = VMFunction::RootObject;
+};
+template <> struct TypeToRootType<Handle<GeneratorObject*> > {
+ static const uint32_t result = VMFunction::RootObject;
+};
+template <> struct TypeToRootType<Handle<PlainObject*> > {
+ static const uint32_t result = VMFunction::RootObject;
+};
+template <> struct TypeToRootType<Handle<LexicalScope*> > {
+ static const uint32_t result = VMFunction::RootCell;
+};
+template <> struct TypeToRootType<Handle<WithScope*> > {
+ static const uint32_t result = VMFunction::RootCell;
+};
+template <> struct TypeToRootType<Handle<Scope*> > {
+ static const uint32_t result = VMFunction::RootCell;
+};
+template <class T> struct TypeToRootType<Handle<T> > {
+ // Fail for Handle types that aren't specialized above.
+};
+
+template <class> struct OutParamToDataType { static const DataType result = Type_Void; };
+template <> struct OutParamToDataType<Value*> { static const DataType result = Type_Value; };
+template <> struct OutParamToDataType<int*> { static const DataType result = Type_Int32; };
+template <> struct OutParamToDataType<uint32_t*> { static const DataType result = Type_Int32; };
+template <> struct OutParamToDataType<uint8_t**> { static const DataType result = Type_Pointer; };
+template <> struct OutParamToDataType<bool*> { static const DataType result = Type_Bool; };
+template <> struct OutParamToDataType<double*> { static const DataType result = Type_Double; };
+template <> struct OutParamToDataType<MutableHandleValue> { static const DataType result = Type_Handle; };
+template <> struct OutParamToDataType<MutableHandleObject> { static const DataType result = Type_Handle; };
+template <> struct OutParamToDataType<MutableHandleString> { static const DataType result = Type_Handle; };
+
+template <class> struct OutParamToRootType {
+ static const VMFunction::RootType result = VMFunction::RootNone;
+};
+template <> struct OutParamToRootType<MutableHandleValue> {
+ static const VMFunction::RootType result = VMFunction::RootValue;
+};
+template <> struct OutParamToRootType<MutableHandleObject> {
+ static const VMFunction::RootType result = VMFunction::RootObject;
+};
+template <> struct OutParamToRootType<MutableHandleString> {
+ static const VMFunction::RootType result = VMFunction::RootString;
+};
+
+template <class> struct MatchContext { };
+template <> struct MatchContext<JSContext*> {
+ static const bool valid = true;
+};
+template <> struct MatchContext<ExclusiveContext*> {
+ static const bool valid = true;
+};
+
+// Extract the last element of a list of types.
+template <typename... ArgTypes>
+struct LastArg;
+
+template <>
+struct LastArg<>
+{
+ typedef void Type;
+ static constexpr size_t nbArgs = 0;
+};
+
+template <typename HeadType>
+struct LastArg<HeadType>
+{
+ typedef HeadType Type;
+ static constexpr size_t nbArgs = 1;
+};
+
+template <typename HeadType, typename... TailTypes>
+struct LastArg<HeadType, TailTypes...>
+{
+ typedef typename LastArg<TailTypes...>::Type Type;
+ static constexpr size_t nbArgs = LastArg<TailTypes...>::nbArgs + 1;
+};
+
+// Construct a bit mask from a list of types. The mask is constructed as an OR
+// of the mask produced for each argument. The result of each argument is
+// shifted by its index, such that the result of the first argument is on the
+// low bits of the mask, and the result of the last argument in part of the
+// high bits of the mask.
+template <template<typename> class Each, typename ResultType, size_t Shift,
+ typename... Args>
+struct BitMask;
+
+template <template<typename> class Each, typename ResultType, size_t Shift>
+struct BitMask<Each, ResultType, Shift>
+{
+ static constexpr ResultType result = ResultType();
+};
+
+template <template<typename> class Each, typename ResultType, size_t Shift,
+ typename HeadType, typename... TailTypes>
+struct BitMask<Each, ResultType, Shift, HeadType, TailTypes...>
+{
+ static_assert(ResultType(Each<HeadType>::result) < (1 << Shift),
+ "not enough bits reserved by the shift for individual results");
+ static_assert(LastArg<TailTypes...>::nbArgs < (8 * sizeof(ResultType) / Shift),
+ "not enough bits in the result type to store all bit masks");
+
+ static constexpr ResultType result =
+ ResultType(Each<HeadType>::result) |
+ (BitMask<Each, ResultType, Shift, TailTypes...>::result << Shift);
+};
+
+// Extract VMFunction properties based on the signature of the function. The
+// properties are used to generate the logic for calling the VM function, and
+// also for marking the stack during GCs.
+template <typename... Args>
+struct FunctionInfo;
+
+template <class R, class Context, typename... Args>
+struct FunctionInfo<R (*)(Context, Args...)> : public VMFunction
+{
+ typedef R (*pf)(Context, Args...);
+
+ static DataType returnType() {
+ return TypeToDataType<R>::result;
+ }
+ static DataType outParam() {
+ return OutParamToDataType<typename LastArg<Args...>::Type>::result;
+ }
+ static RootType outParamRootType() {
+ return OutParamToRootType<typename LastArg<Args...>::Type>::result;
+ }
+ static size_t NbArgs() {
+ return LastArg<Args...>::nbArgs;
+ }
+ static size_t explicitArgs() {
+ return NbArgs() - (outParam() != Type_Void ? 1 : 0);
+ }
+ static uint32_t argumentProperties() {
+ return BitMask<TypeToArgProperties, uint32_t, 2, Args...>::result;
+ }
+ static uint32_t argumentPassedInFloatRegs() {
+ return BitMask<TypeToPassInFloatReg, uint32_t, 2, Args...>::result;
+ }
+ static uint64_t argumentRootTypes() {
+ return BitMask<TypeToRootType, uint64_t, 3, Args...>::result;
+ }
+ explicit FunctionInfo(pf fun, const char* name, PopValues extraValuesToPop = PopValues(0))
+ : VMFunction(JS_FUNC_TO_DATA_PTR(void*, fun), name, explicitArgs(),
+ argumentProperties(), argumentPassedInFloatRegs(),
+ argumentRootTypes(), outParam(), outParamRootType(),
+ returnType(), extraValuesToPop.numValues, NonTailCall)
+ {
+ static_assert(MatchContext<Context>::valid, "Invalid cx type in VMFunction");
+ }
+ explicit FunctionInfo(pf fun, const char* name, MaybeTailCall expectTailCall,
+ PopValues extraValuesToPop = PopValues(0))
+ : VMFunction(JS_FUNC_TO_DATA_PTR(void*, fun), name, explicitArgs(),
+ argumentProperties(), argumentPassedInFloatRegs(),
+ argumentRootTypes(), outParam(), outParamRootType(),
+ returnType(), extraValuesToPop.numValues, expectTailCall)
+ {
+ static_assert(MatchContext<Context>::valid, "Invalid cx type in VMFunction");
+ }
+};
+
+class AutoDetectInvalidation
+{
+ JSContext* cx_;
+ IonScript* ionScript_;
+ MutableHandleValue rval_;
+ bool disabled_;
+
+ void setReturnOverride();
+
+ public:
+ AutoDetectInvalidation(JSContext* cx, MutableHandleValue rval, IonScript* ionScript)
+ : cx_(cx), ionScript_(ionScript), rval_(rval), disabled_(false)
+ {
+ MOZ_ASSERT(ionScript);
+ }
+
+ AutoDetectInvalidation(JSContext* cx, MutableHandleValue rval);
+
+ void disable() {
+ MOZ_ASSERT(!disabled_);
+ disabled_ = true;
+ }
+
+ ~AutoDetectInvalidation() {
+ if (!disabled_ && ionScript_->invalidated())
+ setReturnOverride();
+ }
+};
+
+MOZ_MUST_USE bool
+InvokeFunction(JSContext* cx, HandleObject obj0, bool constructing, uint32_t argc, Value* argv,
+ MutableHandleValue rval);
+MOZ_MUST_USE bool
+InvokeFunctionShuffleNewTarget(JSContext* cx, HandleObject obj, uint32_t numActualArgs,
+ uint32_t numFormalArgs, Value* argv, MutableHandleValue rval);
+
+bool CheckOverRecursed(JSContext* cx);
+bool CheckOverRecursedWithExtra(JSContext* cx, BaselineFrame* frame,
+ uint32_t extra, uint32_t earlyCheck);
+
+JSObject* BindVar(JSContext* cx, HandleObject scopeChain);
+MOZ_MUST_USE bool
+DefVar(JSContext* cx, HandlePropertyName dn, unsigned attrs, HandleObject scopeChain);
+MOZ_MUST_USE bool
+DefLexical(JSContext* cx, HandlePropertyName dn, unsigned attrs, HandleObject scopeChain);
+MOZ_MUST_USE bool
+DefGlobalLexical(JSContext* cx, HandlePropertyName dn, unsigned attrs);
+MOZ_MUST_USE bool
+MutatePrototype(JSContext* cx, HandlePlainObject obj, HandleValue value);
+MOZ_MUST_USE bool
+InitProp(JSContext* cx, HandleObject obj, HandlePropertyName name, HandleValue value,
+ jsbytecode* pc);
+
+template<bool Equal>
+bool LooselyEqual(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs, bool* res);
+
+template<bool Equal>
+bool StrictlyEqual(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs, bool* res);
+
+bool LessThan(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs, bool* res);
+bool LessThanOrEqual(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs, bool* res);
+bool GreaterThan(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs, bool* res);
+bool GreaterThanOrEqual(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs, bool* res);
+
+template<bool Equal>
+bool StringsEqual(JSContext* cx, HandleString left, HandleString right, bool* res);
+
+MOZ_MUST_USE bool ArrayPopDense(JSContext* cx, HandleObject obj, MutableHandleValue rval);
+MOZ_MUST_USE bool ArrayPushDense(JSContext* cx, HandleObject obj, HandleValue v, uint32_t* length);
+MOZ_MUST_USE bool ArrayShiftDense(JSContext* cx, HandleObject obj, MutableHandleValue rval);
+JSString* ArrayJoin(JSContext* cx, HandleObject array, HandleString sep);
+
+MOZ_MUST_USE bool
+CharCodeAt(JSContext* cx, HandleString str, int32_t index, uint32_t* code);
+JSFlatString* StringFromCharCode(JSContext* cx, int32_t code);
+JSString* StringFromCodePoint(JSContext* cx, int32_t codePoint);
+
+MOZ_MUST_USE bool
+SetProperty(JSContext* cx, HandleObject obj, HandlePropertyName name, HandleValue value,
+ bool strict, jsbytecode* pc);
+
+MOZ_MUST_USE bool
+InterruptCheck(JSContext* cx);
+
+void* MallocWrapper(JSRuntime* rt, size_t nbytes);
+JSObject* NewCallObject(JSContext* cx, HandleShape shape, HandleObjectGroup group);
+JSObject* NewSingletonCallObject(JSContext* cx, HandleShape shape);
+JSObject* NewStringObject(JSContext* cx, HandleString str);
+
+bool OperatorIn(JSContext* cx, HandleValue key, HandleObject obj, bool* out);
+bool OperatorInI(JSContext* cx, uint32_t index, HandleObject obj, bool* out);
+
+MOZ_MUST_USE bool
+GetIntrinsicValue(JSContext* cx, HandlePropertyName name, MutableHandleValue rval);
+
+MOZ_MUST_USE bool
+CreateThis(JSContext* cx, HandleObject callee, HandleObject newTarget, MutableHandleValue rval);
+
+void GetDynamicName(JSContext* cx, JSObject* scopeChain, JSString* str, Value* vp);
+
+void PostWriteBarrier(JSRuntime* rt, JSObject* obj);
+void PostWriteElementBarrier(JSRuntime* rt, JSObject* obj, int32_t index);
+void PostGlobalWriteBarrier(JSRuntime* rt, JSObject* obj);
+
+uint32_t GetIndexFromString(JSString* str);
+
+MOZ_MUST_USE bool
+DebugPrologue(JSContext* cx, BaselineFrame* frame, jsbytecode* pc, bool* mustReturn);
+MOZ_MUST_USE bool
+DebugEpilogue(JSContext* cx, BaselineFrame* frame, jsbytecode* pc, bool ok);
+MOZ_MUST_USE bool
+DebugEpilogueOnBaselineReturn(JSContext* cx, BaselineFrame* frame, jsbytecode* pc);
+void FrameIsDebuggeeCheck(BaselineFrame* frame);
+
+JSObject* CreateGenerator(JSContext* cx, BaselineFrame* frame);
+
+MOZ_MUST_USE bool
+NormalSuspend(JSContext* cx, HandleObject obj, BaselineFrame* frame, jsbytecode* pc,
+ uint32_t stackDepth);
+MOZ_MUST_USE bool
+FinalSuspend(JSContext* cx, HandleObject obj, BaselineFrame* frame, jsbytecode* pc);
+MOZ_MUST_USE bool
+InterpretResume(JSContext* cx, HandleObject obj, HandleValue val, HandlePropertyName kind,
+ MutableHandleValue rval);
+MOZ_MUST_USE bool
+DebugAfterYield(JSContext* cx, BaselineFrame* frame);
+MOZ_MUST_USE bool
+GeneratorThrowOrClose(JSContext* cx, BaselineFrame* frame, Handle<GeneratorObject*> genObj,
+ HandleValue arg, uint32_t resumeKind);
+
+MOZ_MUST_USE bool
+GlobalNameConflictsCheckFromIon(JSContext* cx, HandleScript script);
+MOZ_MUST_USE bool
+CheckGlobalOrEvalDeclarationConflicts(JSContext* cx, BaselineFrame* frame);
+MOZ_MUST_USE bool
+InitFunctionEnvironmentObjects(JSContext* cx, BaselineFrame* frame);
+
+MOZ_MUST_USE bool
+NewArgumentsObject(JSContext* cx, BaselineFrame* frame, MutableHandleValue res);
+
+JSObject* InitRestParameter(JSContext* cx, uint32_t length, Value* rest, HandleObject templateObj,
+ HandleObject res);
+
+MOZ_MUST_USE bool
+HandleDebugTrap(JSContext* cx, BaselineFrame* frame, uint8_t* retAddr, bool* mustReturn);
+MOZ_MUST_USE bool
+OnDebuggerStatement(JSContext* cx, BaselineFrame* frame, jsbytecode* pc, bool* mustReturn);
+MOZ_MUST_USE bool
+GlobalHasLiveOnDebuggerStatement(JSContext* cx);
+
+MOZ_MUST_USE bool
+EnterWith(JSContext* cx, BaselineFrame* frame, HandleValue val, Handle<WithScope*> templ);
+MOZ_MUST_USE bool
+LeaveWith(JSContext* cx, BaselineFrame* frame);
+
+MOZ_MUST_USE bool
+PushLexicalEnv(JSContext* cx, BaselineFrame* frame, Handle<LexicalScope*> scope);
+MOZ_MUST_USE bool
+PopLexicalEnv(JSContext* cx, BaselineFrame* frame);
+MOZ_MUST_USE bool
+DebugLeaveThenPopLexicalEnv(JSContext* cx, BaselineFrame* frame, jsbytecode* pc);
+MOZ_MUST_USE bool
+FreshenLexicalEnv(JSContext* cx, BaselineFrame* frame);
+MOZ_MUST_USE bool
+DebugLeaveThenFreshenLexicalEnv(JSContext* cx, BaselineFrame* frame, jsbytecode* pc);
+MOZ_MUST_USE bool
+RecreateLexicalEnv(JSContext* cx, BaselineFrame* frame);
+MOZ_MUST_USE bool
+DebugLeaveThenRecreateLexicalEnv(JSContext* cx, BaselineFrame* frame, jsbytecode* pc);
+MOZ_MUST_USE bool
+DebugLeaveLexicalEnv(JSContext* cx, BaselineFrame* frame, jsbytecode* pc);
+
+MOZ_MUST_USE bool
+PushVarEnv(JSContext* cx, BaselineFrame* frame, HandleScope scope);
+MOZ_MUST_USE bool
+PopVarEnv(JSContext* cx, BaselineFrame* frame);
+
+MOZ_MUST_USE bool
+InitBaselineFrameForOsr(BaselineFrame* frame, InterpreterFrame* interpFrame,
+ uint32_t numStackValues);
+
+JSObject* CreateDerivedTypedObj(JSContext* cx, HandleObject descr,
+ HandleObject owner, int32_t offset);
+
+MOZ_MUST_USE bool
+ArraySpliceDense(JSContext* cx, HandleObject obj, uint32_t start, uint32_t deleteCount);
+
+MOZ_MUST_USE bool
+Recompile(JSContext* cx);
+MOZ_MUST_USE bool
+ForcedRecompile(JSContext* cx);
+JSString* StringReplace(JSContext* cx, HandleString string, HandleString pattern,
+ HandleString repl);
+
+MOZ_MUST_USE bool SetDenseOrUnboxedArrayElement(JSContext* cx, HandleObject obj, int32_t index,
+ HandleValue value, bool strict);
+
+void AssertValidObjectPtr(JSContext* cx, JSObject* obj);
+void AssertValidObjectOrNullPtr(JSContext* cx, JSObject* obj);
+void AssertValidStringPtr(JSContext* cx, JSString* str);
+void AssertValidSymbolPtr(JSContext* cx, JS::Symbol* sym);
+void AssertValidValue(JSContext* cx, Value* v);
+
+void MarkValueFromIon(JSRuntime* rt, Value* vp);
+void MarkStringFromIon(JSRuntime* rt, JSString** stringp);
+void MarkObjectFromIon(JSRuntime* rt, JSObject** objp);
+void MarkShapeFromIon(JSRuntime* rt, Shape** shapep);
+void MarkObjectGroupFromIon(JSRuntime* rt, ObjectGroup** groupp);
+
+// Helper for generatePreBarrier.
+inline void*
+IonMarkFunction(MIRType type)
+{
+ switch (type) {
+ case MIRType::Value:
+ return JS_FUNC_TO_DATA_PTR(void*, MarkValueFromIon);
+ case MIRType::String:
+ return JS_FUNC_TO_DATA_PTR(void*, MarkStringFromIon);
+ case MIRType::Object:
+ return JS_FUNC_TO_DATA_PTR(void*, MarkObjectFromIon);
+ case MIRType::Shape:
+ return JS_FUNC_TO_DATA_PTR(void*, MarkShapeFromIon);
+ case MIRType::ObjectGroup:
+ return JS_FUNC_TO_DATA_PTR(void*, MarkObjectGroupFromIon);
+ default: MOZ_CRASH();
+ }
+}
+
+bool ObjectIsCallable(JSObject* obj);
+bool ObjectIsConstructor(JSObject* obj);
+
+MOZ_MUST_USE bool
+ThrowRuntimeLexicalError(JSContext* cx, unsigned errorNumber);
+
+MOZ_MUST_USE bool
+ThrowReadOnlyError(JSContext* cx, int32_t index);
+
+MOZ_MUST_USE bool
+BaselineThrowUninitializedThis(JSContext* cx, BaselineFrame* frame);
+
+MOZ_MUST_USE bool
+ThrowBadDerivedReturn(JSContext* cx, HandleValue v);
+
+MOZ_MUST_USE bool
+ThrowObjectCoercible(JSContext* cx, HandleValue v);
+
+MOZ_MUST_USE bool
+BaselineGetFunctionThis(JSContext* cx, BaselineFrame* frame, MutableHandleValue res);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_VMFunctions_h */
diff --git a/js/src/jit/ValueNumbering.cpp b/js/src/jit/ValueNumbering.cpp
new file mode 100644
index 000000000..cb65c323c
--- /dev/null
+++ b/js/src/jit/ValueNumbering.cpp
@@ -0,0 +1,1306 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/ValueNumbering.h"
+
+#include "jit/AliasAnalysis.h"
+#include "jit/IonAnalysis.h"
+#include "jit/JitSpewer.h"
+#include "jit/MIRGenerator.h"
+
+using namespace js;
+using namespace js::jit;
+
+/*
+ * Some notes on the main algorithm here:
+ * - The SSA identifier id() is the value number. We do replaceAllUsesWith as
+ * we go, so there's always at most one visible value with a given number.
+ *
+ * - Consequently, the GVN algorithm is effectively pessimistic. This means it
+ * is not as powerful as an optimistic GVN would be, but it is simpler and
+ * faster.
+ *
+ * - We iterate in RPO, so that when visiting a block, we've already optimized
+ * and hashed all values in dominating blocks. With occasional exceptions,
+ * this allows us to do everything in a single pass.
+ *
+ * - When we do use multiple passes, we just re-run the algorithm on the whole
+ * graph instead of doing sparse propagation. This is a tradeoff to keep the
+ * algorithm simpler and lighter on inputs that don't have a lot of
+ * interesting unreachable blocks or degenerate loop induction variables, at
+ * the expense of being slower on inputs that do. The loop for this always
+ * terminates, because it only iterates when code is or will be removed, so
+ * eventually it must stop iterating.
+ *
+ * - Values are not immediately removed from the hash set when they go out of
+ * scope. Instead, we check for dominance after a lookup. If the dominance
+ * check fails, the value is removed.
+ */
+
+HashNumber
+ValueNumberer::VisibleValues::ValueHasher::hash(Lookup ins)
+{
+ return ins->valueHash();
+}
+
+// Test whether two MDefinitions are congruent.
+bool
+ValueNumberer::VisibleValues::ValueHasher::match(Key k, Lookup l)
+{
+ // If one of the instructions depends on a store, and the other instruction
+ // does not depend on the same store, the instructions are not congruent.
+ if (k->dependency() != l->dependency())
+ return false;
+
+ bool congruent = k->congruentTo(l); // Ask the values themselves what they think.
+#ifdef JS_JITSPEW
+ if (congruent != l->congruentTo(k)) {
+ JitSpew(JitSpew_GVN, " congruentTo relation is not symmetric between %s%u and %s%u!!",
+ k->opName(), k->id(),
+ l->opName(), l->id());
+ }
+#endif
+ return congruent;
+}
+
+void
+ValueNumberer::VisibleValues::ValueHasher::rekey(Key& k, Key newKey)
+{
+ k = newKey;
+}
+
+ValueNumberer::VisibleValues::VisibleValues(TempAllocator& alloc)
+ : set_(alloc)
+{}
+
+// Initialize the set.
+bool
+ValueNumberer::VisibleValues::init()
+{
+ return set_.init();
+}
+
+// Look up the first entry for |def|.
+ValueNumberer::VisibleValues::Ptr
+ValueNumberer::VisibleValues::findLeader(const MDefinition* def) const
+{
+ return set_.lookup(def);
+}
+
+// Look up the first entry for |def|.
+ValueNumberer::VisibleValues::AddPtr
+ValueNumberer::VisibleValues::findLeaderForAdd(MDefinition* def)
+{
+ return set_.lookupForAdd(def);
+}
+
+// Insert a value into the set.
+bool
+ValueNumberer::VisibleValues::add(AddPtr p, MDefinition* def)
+{
+ return set_.add(p, def);
+}
+
+// Insert a value onto the set overwriting any existing entry.
+void
+ValueNumberer::VisibleValues::overwrite(AddPtr p, MDefinition* def)
+{
+ set_.replaceKey(p, def);
+}
+
+// |def| will be discarded, so remove it from any sets.
+void
+ValueNumberer::VisibleValues::forget(const MDefinition* def)
+{
+ Ptr p = set_.lookup(def);
+ if (p && *p == def)
+ set_.remove(p);
+}
+
+// Clear all state.
+void
+ValueNumberer::VisibleValues::clear()
+{
+ set_.clear();
+}
+
+#ifdef DEBUG
+// Test whether |def| is in the set.
+bool
+ValueNumberer::VisibleValues::has(const MDefinition* def) const
+{
+ Ptr p = set_.lookup(def);
+ return p && *p == def;
+}
+#endif
+
+// Call MDefinition::justReplaceAllUsesWith, and add some GVN-specific asserts.
+static void
+ReplaceAllUsesWith(MDefinition* from, MDefinition* to)
+{
+ MOZ_ASSERT(from != to, "GVN shouldn't try to replace a value with itself");
+ MOZ_ASSERT(from->type() == to->type(), "Def replacement has different type");
+ MOZ_ASSERT(!to->isDiscarded(), "GVN replaces an instruction by a removed instruction");
+
+ // We don't need the extra setting of UseRemoved flags that the regular
+ // replaceAllUsesWith does because we do it ourselves.
+ from->justReplaceAllUsesWith(to);
+}
+
+// Test whether |succ| is a successor of |block|.
+static bool
+HasSuccessor(const MControlInstruction* block, const MBasicBlock* succ)
+{
+ for (size_t i = 0, e = block->numSuccessors(); i != e; ++i) {
+ if (block->getSuccessor(i) == succ)
+ return true;
+ }
+ return false;
+}
+
+// Given a block which has had predecessors removed but is still reachable, test
+// whether the block's new dominator will be closer than its old one and whether
+// it will expose potential optimization opportunities.
+static MBasicBlock*
+ComputeNewDominator(MBasicBlock* block, MBasicBlock* old)
+{
+ MBasicBlock* now = block->getPredecessor(0);
+ for (size_t i = 1, e = block->numPredecessors(); i < e; ++i) {
+ MBasicBlock* pred = block->getPredecessor(i);
+ // Note that dominators haven't been recomputed yet, so we have to check
+ // whether now dominates pred, not block.
+ while (!now->dominates(pred)) {
+ MBasicBlock* next = now->immediateDominator();
+ if (next == old)
+ return old;
+ if (next == now) {
+ MOZ_ASSERT(block == old, "Non-self-dominating block became self-dominating");
+ return block;
+ }
+ now = next;
+ }
+ }
+ MOZ_ASSERT(old != block || old != now, "Missed self-dominating block staying self-dominating");
+ return now;
+}
+
+// Test for any defs which look potentially interesting to GVN.
+static bool
+BlockHasInterestingDefs(MBasicBlock* block)
+{
+ return !block->phisEmpty() || *block->begin() != block->lastIns();
+}
+
+// Walk up the dominator tree from |block| to the root and test for any defs
+// which look potentially interesting to GVN.
+static bool
+ScanDominatorsForDefs(MBasicBlock* block)
+{
+ for (MBasicBlock* i = block;;) {
+ if (BlockHasInterestingDefs(block))
+ return true;
+
+ MBasicBlock* immediateDominator = i->immediateDominator();
+ if (immediateDominator == i)
+ break;
+ i = immediateDominator;
+ }
+ return false;
+}
+
+// Walk up the dominator tree from |now| to |old| and test for any defs which
+// look potentially interesting to GVN.
+static bool
+ScanDominatorsForDefs(MBasicBlock* now, MBasicBlock* old)
+{
+ MOZ_ASSERT(old->dominates(now), "Refined dominator not dominated by old dominator");
+
+ for (MBasicBlock* i = now; i != old; i = i->immediateDominator()) {
+ if (BlockHasInterestingDefs(i))
+ return true;
+ }
+ return false;
+}
+
+// Given a block which has had predecessors removed but is still reachable, test
+// whether the block's new dominator will be closer than its old one and whether
+// it will expose potential optimization opportunities.
+static bool
+IsDominatorRefined(MBasicBlock* block)
+{
+ MBasicBlock* old = block->immediateDominator();
+ MBasicBlock* now = ComputeNewDominator(block, old);
+
+ // If this block is just a goto and it doesn't dominate its destination,
+ // removing its predecessors won't refine the dominators of anything
+ // interesting.
+ MControlInstruction* control = block->lastIns();
+ if (*block->begin() == control && block->phisEmpty() && control->isGoto() &&
+ !block->dominates(control->toGoto()->target()))
+ {
+ return false;
+ }
+
+ // We've computed block's new dominator. Test whether there are any
+ // newly-dominating definitions which look interesting.
+ if (block == old)
+ return block != now && ScanDominatorsForDefs(now);
+ MOZ_ASSERT(block != now, "Non-self-dominating block became self-dominating");
+ return ScanDominatorsForDefs(now, old);
+}
+
+// |def| has just had one of its users release it. If it's now dead, enqueue it
+// for discarding, otherwise just make note of it.
+bool
+ValueNumberer::handleUseReleased(MDefinition* def, UseRemovedOption useRemovedOption)
+{
+ if (IsDiscardable(def)) {
+ values_.forget(def);
+ if (!deadDefs_.append(def))
+ return false;
+ } else {
+ if (useRemovedOption == SetUseRemoved)
+ def->setUseRemovedUnchecked();
+ }
+ return true;
+}
+
+// Discard |def| and anything in its use-def subtree which is no longer needed.
+bool
+ValueNumberer::discardDefsRecursively(MDefinition* def)
+{
+ MOZ_ASSERT(deadDefs_.empty(), "deadDefs_ not cleared");
+
+ return discardDef(def) && processDeadDefs();
+}
+
+// Assuming |resume| is unreachable, release its operands.
+// It might be nice to integrate this code with prepareForDiscard, however GVN
+// needs it to call handleUseReleased so that it can observe when a definition
+// becomes unused, so it isn't trivial to do.
+bool
+ValueNumberer::releaseResumePointOperands(MResumePoint* resume)
+{
+ for (size_t i = 0, e = resume->numOperands(); i < e; ++i) {
+ if (!resume->hasOperand(i))
+ continue;
+ MDefinition* op = resume->getOperand(i);
+ resume->releaseOperand(i);
+
+ // We set the UseRemoved flag when removing resume point operands,
+ // because even though we may think we're certain that a particular
+ // branch might not be taken, the type information might be incomplete.
+ if (!handleUseReleased(op, SetUseRemoved))
+ return false;
+ }
+ return true;
+}
+
+// Assuming |phi| is dead, release and remove its operands. If an operand
+// becomes dead, push it to the discard worklist.
+bool
+ValueNumberer::releaseAndRemovePhiOperands(MPhi* phi)
+{
+ // MPhi saves operands in a vector so we iterate in reverse.
+ for (int o = phi->numOperands() - 1; o >= 0; --o) {
+ MDefinition* op = phi->getOperand(o);
+ phi->removeOperand(o);
+ if (!handleUseReleased(op, DontSetUseRemoved))
+ return false;
+ }
+ return true;
+}
+
+// Assuming |def| is dead, release its operands. If an operand becomes dead,
+// push it to the discard worklist.
+bool
+ValueNumberer::releaseOperands(MDefinition* def)
+{
+ for (size_t o = 0, e = def->numOperands(); o < e; ++o) {
+ MDefinition* op = def->getOperand(o);
+ def->releaseOperand(o);
+ if (!handleUseReleased(op, DontSetUseRemoved))
+ return false;
+ }
+ return true;
+}
+
+// Discard |def| and mine its operands for any subsequently dead defs.
+bool
+ValueNumberer::discardDef(MDefinition* def)
+{
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_GVN, " Discarding %s %s%u",
+ def->block()->isMarked() ? "unreachable" : "dead",
+ def->opName(), def->id());
+#endif
+#ifdef DEBUG
+ MOZ_ASSERT(def != nextDef_, "Invalidating the MDefinition iterator");
+ if (def->block()->isMarked()) {
+ MOZ_ASSERT(!def->hasUses(), "Discarding def that still has uses");
+ } else {
+ MOZ_ASSERT(IsDiscardable(def), "Discarding non-discardable definition");
+ MOZ_ASSERT(!values_.has(def), "Discarding a definition still in the set");
+ }
+#endif
+
+ MBasicBlock* block = def->block();
+ if (def->isPhi()) {
+ MPhi* phi = def->toPhi();
+ if (!releaseAndRemovePhiOperands(phi))
+ return false;
+ block->discardPhi(phi);
+ } else {
+ MInstruction* ins = def->toInstruction();
+ if (MResumePoint* resume = ins->resumePoint()) {
+ if (!releaseResumePointOperands(resume))
+ return false;
+ }
+ if (!releaseOperands(ins))
+ return false;
+ block->discardIgnoreOperands(ins);
+ }
+
+ // If that was the last definition in the block, it can be safely removed
+ // from the graph.
+ if (block->phisEmpty() && block->begin() == block->end()) {
+ MOZ_ASSERT(block->isMarked(), "Reachable block lacks at least a control instruction");
+
+ // As a special case, don't remove a block which is a dominator tree
+ // root so that we don't invalidate the iterator in visitGraph. We'll
+ // check for this and remove it later.
+ if (block->immediateDominator() != block) {
+ JitSpew(JitSpew_GVN, " Block block%u is now empty; discarding", block->id());
+ graph_.removeBlock(block);
+ blocksRemoved_ = true;
+ } else {
+ JitSpew(JitSpew_GVN, " Dominator root block%u is now empty; will discard later",
+ block->id());
+ }
+ }
+
+ return true;
+}
+
+// Recursively discard all the defs on the deadDefs_ worklist.
+bool
+ValueNumberer::processDeadDefs()
+{
+ MDefinition* nextDef = nextDef_;
+ while (!deadDefs_.empty()) {
+ MDefinition* def = deadDefs_.popCopy();
+
+ // Don't invalidate the MDefinition iterator. This is what we're going
+ // to visit next, so we won't miss anything.
+ if (def == nextDef)
+ continue;
+
+ if (!discardDef(def))
+ return false;
+ }
+ return true;
+}
+
+// Test whether |block|, which is a loop header, has any predecessors other than
+// |loopPred|, the loop predecessor, which it doesn't dominate.
+static bool
+hasNonDominatingPredecessor(MBasicBlock* block, MBasicBlock* loopPred)
+{
+ MOZ_ASSERT(block->isLoopHeader());
+ MOZ_ASSERT(block->loopPredecessor() == loopPred);
+
+ for (uint32_t i = 0, e = block->numPredecessors(); i < e; ++i) {
+ MBasicBlock* pred = block->getPredecessor(i);
+ if (pred != loopPred && !block->dominates(pred))
+ return true;
+ }
+ return false;
+}
+
+// A loop is about to be made reachable only through an OSR entry into one of
+// its nested loops. Fix everything up.
+bool
+ValueNumberer::fixupOSROnlyLoop(MBasicBlock* block, MBasicBlock* backedge)
+{
+ // Create an empty and unreachable(!) block which jumps to |block|. This
+ // allows |block| to remain marked as a loop header, so we don't have to
+ // worry about moving a different block into place as the new loop header,
+ // which is hard, especially if the OSR is into a nested loop. Doing all
+ // that would produce slightly more optimal code, but this is so
+ // extraordinarily rare that it isn't worth the complexity.
+ MBasicBlock* fake = MBasicBlock::New(graph_, block->info(), nullptr, MBasicBlock::NORMAL);
+ if (fake == nullptr)
+ return false;
+
+ graph_.insertBlockBefore(block, fake);
+ fake->setImmediateDominator(fake);
+ fake->addNumDominated(1);
+ fake->setDomIndex(fake->id());
+ fake->setUnreachable();
+
+ // Create zero-input phis to use as inputs for any phis in |block|.
+ // Again, this is a little odd, but it's the least-odd thing we can do
+ // without significant complexity.
+ for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ++iter) {
+ MPhi* phi = *iter;
+ MPhi* fakePhi = MPhi::New(graph_.alloc(), phi->type());
+ fake->addPhi(fakePhi);
+ if (!phi->addInputSlow(fakePhi))
+ return false;
+ }
+
+ fake->end(MGoto::New(graph_.alloc(), block));
+
+ if (!block->addPredecessorWithoutPhis(fake))
+ return false;
+
+ // Restore |backedge| as |block|'s loop backedge.
+ block->clearLoopHeader();
+ block->setLoopHeader(backedge);
+
+ JitSpew(JitSpew_GVN, " Created fake block%u", fake->id());
+ hasOSRFixups_ = true;
+ return true;
+}
+
+// Remove the CFG edge between |pred| and |block|, after releasing the phi
+// operands on that edge and discarding any definitions consequently made dead.
+bool
+ValueNumberer::removePredecessorAndDoDCE(MBasicBlock* block, MBasicBlock* pred, size_t predIndex)
+{
+ MOZ_ASSERT(!block->isMarked(),
+ "Block marked unreachable should have predecessors removed already");
+
+ // Before removing the predecessor edge, scan the phi operands for that edge
+ // for dead code before they get removed.
+ MOZ_ASSERT(nextDef_ == nullptr);
+ for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ) {
+ MPhi* phi = *iter++;
+ MOZ_ASSERT(!values_.has(phi), "Visited phi in block having predecessor removed");
+ MOZ_ASSERT(!phi->isGuard());
+
+ MDefinition* op = phi->getOperand(predIndex);
+ phi->removeOperand(predIndex);
+
+ nextDef_ = iter != end ? *iter : nullptr;
+ if (!handleUseReleased(op, DontSetUseRemoved) || !processDeadDefs())
+ return false;
+
+ // If |nextDef_| became dead while we had it pinned, advance the
+ // iterator and discard it now.
+ while (nextDef_ && !nextDef_->hasUses() && !nextDef_->isGuardRangeBailouts()) {
+ phi = nextDef_->toPhi();
+ iter++;
+ nextDef_ = iter != end ? *iter : nullptr;
+ if (!discardDefsRecursively(phi))
+ return false;
+ }
+ }
+ nextDef_ = nullptr;
+
+ block->removePredecessorWithoutPhiOperands(pred, predIndex);
+ return true;
+}
+
+// Remove the CFG edge between |pred| and |block|, and if this makes |block|
+// unreachable, mark it so, and remove the rest of its incoming edges too. And
+// discard any instructions made dead by the entailed release of any phi
+// operands.
+bool
+ValueNumberer::removePredecessorAndCleanUp(MBasicBlock* block, MBasicBlock* pred)
+{
+ MOZ_ASSERT(!block->isMarked(), "Removing predecessor on block already marked unreachable");
+
+ // We'll be removing a predecessor, so anything we know about phis in this
+ // block will be wrong.
+ for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ++iter)
+ values_.forget(*iter);
+
+ // If this is a loop header, test whether it will become an unreachable
+ // loop, or whether it needs special OSR-related fixups.
+ bool isUnreachableLoop = false;
+ if (block->isLoopHeader()) {
+ if (block->loopPredecessor() == pred) {
+ if (MOZ_UNLIKELY(hasNonDominatingPredecessor(block, pred))) {
+ JitSpew(JitSpew_GVN, " "
+ "Loop with header block%u is now only reachable through an "
+ "OSR entry into the middle of the loop!!", block->id());
+ } else {
+ // Deleting the entry into the loop makes the loop unreachable.
+ isUnreachableLoop = true;
+ JitSpew(JitSpew_GVN, " "
+ "Loop with header block%u is no longer reachable",
+ block->id());
+ }
+#ifdef JS_JITSPEW
+ } else if (block->hasUniqueBackedge() && block->backedge() == pred) {
+ JitSpew(JitSpew_GVN, " Loop with header block%u is no longer a loop",
+ block->id());
+#endif
+ }
+ }
+
+ // Actually remove the CFG edge.
+ if (!removePredecessorAndDoDCE(block, pred, block->getPredecessorIndex(pred)))
+ return false;
+
+ // We've now edited the CFG; check to see if |block| became unreachable.
+ if (block->numPredecessors() == 0 || isUnreachableLoop) {
+ JitSpew(JitSpew_GVN, " Disconnecting block%u", block->id());
+
+ // Remove |block| from its dominator parent's subtree. This is the only
+ // immediately-dominated-block information we need to update, because
+ // everything dominated by this block is about to be swept away.
+ MBasicBlock* parent = block->immediateDominator();
+ if (parent != block)
+ parent->removeImmediatelyDominatedBlock(block);
+
+ // Completely disconnect it from the CFG. We do this now rather than
+ // just doing it later when we arrive there in visitUnreachableBlock
+ // so that we don't leave a partially broken loop sitting around. This
+ // also lets visitUnreachableBlock assert that numPredecessors() == 0,
+ // which is a nice invariant.
+ if (block->isLoopHeader())
+ block->clearLoopHeader();
+ for (size_t i = 0, e = block->numPredecessors(); i < e; ++i) {
+ if (!removePredecessorAndDoDCE(block, block->getPredecessor(i), i))
+ return false;
+ }
+
+ // Clear out the resume point operands, as they can hold things that
+ // don't appear to dominate them live.
+ if (MResumePoint* resume = block->entryResumePoint()) {
+ if (!releaseResumePointOperands(resume) || !processDeadDefs())
+ return false;
+ if (MResumePoint* outer = block->outerResumePoint()) {
+ if (!releaseResumePointOperands(outer) || !processDeadDefs())
+ return false;
+ }
+ MOZ_ASSERT(nextDef_ == nullptr);
+ for (MInstructionIterator iter(block->begin()), end(block->end()); iter != end; ) {
+ MInstruction* ins = *iter++;
+ nextDef_ = *iter;
+ if (MResumePoint* resume = ins->resumePoint()) {
+ if (!releaseResumePointOperands(resume) || !processDeadDefs())
+ return false;
+ }
+ }
+ nextDef_ = nullptr;
+ } else {
+#ifdef DEBUG
+ MOZ_ASSERT(block->outerResumePoint() == nullptr,
+ "Outer resume point in block without an entry resume point");
+ for (MInstructionIterator iter(block->begin()), end(block->end());
+ iter != end;
+ ++iter)
+ {
+ MOZ_ASSERT(iter->resumePoint() == nullptr,
+ "Instruction with resume point in block without entry resume point");
+ }
+#endif
+ }
+
+ // Use the mark to note that we've already removed all its predecessors,
+ // and we know it's unreachable.
+ block->mark();
+ }
+
+ return true;
+}
+
+// Return a simplified form of |def|, if we can.
+MDefinition*
+ValueNumberer::simplified(MDefinition* def) const
+{
+ return def->foldsTo(graph_.alloc());
+}
+
+// If an equivalent and dominating value already exists in the set, return it.
+// Otherwise insert |def| into the set and return it.
+MDefinition*
+ValueNumberer::leader(MDefinition* def)
+{
+ // If the value isn't suitable for eliminating, don't bother hashing it. The
+ // convention is that congruentTo returns false for node kinds that wish to
+ // opt out of redundance elimination.
+ // TODO: It'd be nice to clean up that convention (bug 1031406).
+ if (!def->isEffectful() && def->congruentTo(def)) {
+ // Look for a match.
+ VisibleValues::AddPtr p = values_.findLeaderForAdd(def);
+ if (p) {
+ MDefinition* rep = *p;
+ if (!rep->isDiscarded() && rep->block()->dominates(def->block())) {
+ // We found a dominating congruent value.
+ return rep;
+ }
+
+ // The congruent value doesn't dominate. It never will again in this
+ // dominator tree, so overwrite it.
+ values_.overwrite(p, def);
+ } else {
+ // No match. Add a new entry.
+ if (!values_.add(p, def))
+ return nullptr;
+ }
+
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_GVN, " Recording %s%u", def->opName(), def->id());
+#endif
+ }
+
+ return def;
+}
+
+// Test whether |phi| is dominated by a congruent phi.
+bool
+ValueNumberer::hasLeader(const MPhi* phi, const MBasicBlock* phiBlock) const
+{
+ if (VisibleValues::Ptr p = values_.findLeader(phi)) {
+ const MDefinition* rep = *p;
+ return rep != phi && rep->block()->dominates(phiBlock);
+ }
+ return false;
+}
+
+// Test whether there are any phis in |header| which are newly optimizable, as a
+// result of optimizations done inside the loop. This is not a sparse approach,
+// but restarting is rare enough in practice. Termination is ensured by
+// discarding the phi triggering the iteration.
+bool
+ValueNumberer::loopHasOptimizablePhi(MBasicBlock* header) const
+{
+ // If the header is unreachable, don't bother re-optimizing it.
+ if (header->isMarked())
+ return false;
+
+ // Rescan the phis for any that can be simplified, since they may be reading
+ // values from backedges.
+ for (MPhiIterator iter(header->phisBegin()), end(header->phisEnd()); iter != end; ++iter) {
+ MPhi* phi = *iter;
+ MOZ_ASSERT_IF(!phi->hasUses(), !DeadIfUnused(phi));
+
+ if (phi->operandIfRedundant() || hasLeader(phi, header))
+ return true; // Phi can be simplified.
+ }
+ return false;
+}
+
+// Visit |def|.
+bool
+ValueNumberer::visitDefinition(MDefinition* def)
+{
+ // Nop does not fit in any of the previous optimization, as its only purpose
+ // is to reduce the register pressure by keeping additional resume
+ // point. Still, there is no need consecutive list of MNop instructions, and
+ // this will slow down every other iteration on the Graph.
+ if (def->isNop()) {
+ MNop* nop = def->toNop();
+ MBasicBlock* block = nop->block();
+
+ // We look backward to know if we can remove the previous Nop, we do not
+ // look forward as we would not benefit from the folding made by GVN.
+ MInstructionReverseIterator iter = ++block->rbegin(nop);
+
+ // This nop is at the beginning of the basic block, just replace the
+ // resume point of the basic block by the one from the resume point.
+ if (iter == block->rend()) {
+ JitSpew(JitSpew_GVN, " Removing Nop%u", nop->id());
+ nop->moveResumePointAsEntry();
+ block->discard(nop);
+ return true;
+ }
+
+ // The previous instruction is also a Nop, no need to keep it anymore.
+ MInstruction* prev = *iter;
+ if (prev->isNop()) {
+ JitSpew(JitSpew_GVN, " Removing Nop%u", prev->id());
+ block->discard(prev);
+ return true;
+ }
+
+ // The Nop is introduced to capture the result and make sure the operands
+ // are not live anymore when there are no further uses. Though when
+ // all operands are still needed the Nop doesn't decrease the liveness
+ // and can get removed.
+ MResumePoint* rp = nop->resumePoint();
+ if (rp && rp->numOperands() > 0 &&
+ rp->getOperand(rp->numOperands() - 1) == prev &&
+ !nop->block()->lastIns()->isThrow() &&
+ !prev->isAssertRecoveredOnBailout())
+ {
+ size_t numOperandsLive = 0;
+ for (size_t j = 0; j < prev->numOperands(); j++) {
+ for (size_t i = 0; i < rp->numOperands(); i++) {
+ if (prev->getOperand(j) == rp->getOperand(i)) {
+ numOperandsLive++;
+ break;
+ }
+ }
+ }
+
+ if (numOperandsLive == prev->numOperands()) {
+ JitSpew(JitSpew_GVN, " Removing Nop%u", nop->id());
+ block->discard(nop);
+ }
+ }
+
+ return true;
+ }
+
+ // Skip optimizations on instructions which are recovered on bailout, to
+ // avoid mixing instructions which are recovered on bailouts with
+ // instructions which are not.
+ if (def->isRecoveredOnBailout())
+ return true;
+
+ // If this instruction has a dependency() into an unreachable block, we'll
+ // need to update AliasAnalysis.
+ MDefinition* dep = def->dependency();
+ if (dep != nullptr && (dep->isDiscarded() || dep->block()->isDead())) {
+ JitSpew(JitSpew_GVN, " AliasAnalysis invalidated");
+ if (updateAliasAnalysis_ && !dependenciesBroken_) {
+ // TODO: Recomputing alias-analysis could theoretically expose more
+ // GVN opportunities.
+ JitSpew(JitSpew_GVN, " Will recompute!");
+ dependenciesBroken_ = true;
+ }
+ // Temporarily clear its dependency, to protect foldsTo, which may
+ // wish to use the dependency to do store-to-load forwarding.
+ def->setDependency(def->toInstruction());
+ } else {
+ dep = nullptr;
+ }
+
+ // Look for a simplified form of |def|.
+ MDefinition* sim = simplified(def);
+ if (sim != def) {
+ if (sim == nullptr)
+ return false;
+
+ bool isNewInstruction = sim->block() == nullptr;
+
+ // If |sim| doesn't belong to a block, insert it next to |def|.
+ if (isNewInstruction)
+ def->block()->insertAfter(def->toInstruction(), sim->toInstruction());
+
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_GVN, " Folded %s%u to %s%u",
+ def->opName(), def->id(), sim->opName(), sim->id());
+#endif
+ MOZ_ASSERT(!sim->isDiscarded());
+ ReplaceAllUsesWith(def, sim);
+
+ // The node's foldsTo said |def| can be replaced by |rep|. If |def| is a
+ // guard, then either |rep| is also a guard, or a guard isn't actually
+ // needed, so we can clear |def|'s guard flag and let it be discarded.
+ def->setNotGuardUnchecked();
+
+ if (def->isGuardRangeBailouts())
+ sim->setGuardRangeBailoutsUnchecked();
+
+ if (DeadIfUnused(def)) {
+ if (!discardDefsRecursively(def))
+ return false;
+
+ // If that ended up discarding |sim|, then we're done here.
+ if (sim->isDiscarded())
+ return true;
+ }
+
+ if (!rerun_ && def->isPhi() && !sim->isPhi()) {
+ rerun_ = true;
+ JitSpew(JitSpew_GVN, " Replacing phi%u may have enabled cascading optimisations; "
+ "will re-run", def->id());
+ }
+
+ // Otherwise, procede to optimize with |sim| in place of |def|.
+ def = sim;
+
+ // If the simplified instruction was already part of the graph, then we
+ // probably already visited and optimized this instruction.
+ if (!isNewInstruction)
+ return true;
+ }
+
+ // Now that foldsTo is done, re-enable the original dependency. Even though
+ // it may be pointing into a discarded block, it's still valid for the
+ // purposes of detecting congruent loads.
+ if (dep != nullptr)
+ def->setDependency(dep);
+
+ // Look for a dominating def which makes |def| redundant.
+ MDefinition* rep = leader(def);
+ if (rep != def) {
+ if (rep == nullptr)
+ return false;
+ if (rep->updateForReplacement(def)) {
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_GVN,
+ " Replacing %s%u with %s%u",
+ def->opName(), def->id(), rep->opName(), rep->id());
+#endif
+ ReplaceAllUsesWith(def, rep);
+
+ // The node's congruentTo said |def| is congruent to |rep|, and it's
+ // dominated by |rep|. If |def| is a guard, it's covered by |rep|,
+ // so we can clear |def|'s guard flag and let it be discarded.
+ def->setNotGuardUnchecked();
+
+ if (DeadIfUnused(def)) {
+ // discardDef should not add anything to the deadDefs, as the
+ // redundant operation should have the same input operands.
+ mozilla::DebugOnly<bool> r = discardDef(def);
+ MOZ_ASSERT(r, "discardDef shouldn't have tried to add anything to the worklist, "
+ "so it shouldn't have failed");
+ MOZ_ASSERT(deadDefs_.empty(),
+ "discardDef shouldn't have added anything to the worklist");
+ }
+ def = rep;
+ }
+ }
+
+ return true;
+}
+
+// Visit the control instruction at the end of |block|.
+bool
+ValueNumberer::visitControlInstruction(MBasicBlock* block, const MBasicBlock* dominatorRoot)
+{
+ // Look for a simplified form of the control instruction.
+ MControlInstruction* control = block->lastIns();
+ MDefinition* rep = simplified(control);
+ if (rep == control)
+ return true;
+
+ if (rep == nullptr)
+ return false;
+
+ MControlInstruction* newControl = rep->toControlInstruction();
+ MOZ_ASSERT(!newControl->block(),
+ "Control instruction replacement shouldn't already be in a block");
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_GVN, " Folded control instruction %s%u to %s%u",
+ control->opName(), control->id(), newControl->opName(), graph_.getNumInstructionIds());
+#endif
+
+ // If the simplification removes any CFG edges, update the CFG and remove
+ // any blocks that become dead.
+ size_t oldNumSuccs = control->numSuccessors();
+ size_t newNumSuccs = newControl->numSuccessors();
+ if (newNumSuccs != oldNumSuccs) {
+ MOZ_ASSERT(newNumSuccs < oldNumSuccs, "New control instruction has too many successors");
+ for (size_t i = 0; i != oldNumSuccs; ++i) {
+ MBasicBlock* succ = control->getSuccessor(i);
+ if (HasSuccessor(newControl, succ))
+ continue;
+ if (succ->isMarked())
+ continue;
+ if (!removePredecessorAndCleanUp(succ, block))
+ return false;
+ if (succ->isMarked())
+ continue;
+ if (!rerun_) {
+ if (!remainingBlocks_.append(succ))
+ return false;
+ }
+ }
+ }
+
+ if (!releaseOperands(control))
+ return false;
+ block->discardIgnoreOperands(control);
+ block->end(newControl);
+ if (block->entryResumePoint() && newNumSuccs != oldNumSuccs)
+ block->flagOperandsOfPrunedBranches(newControl);
+ return processDeadDefs();
+}
+
+// |block| is unreachable. Mine it for opportunities to delete more dead
+// code, and then discard it.
+bool
+ValueNumberer::visitUnreachableBlock(MBasicBlock* block)
+{
+ JitSpew(JitSpew_GVN, " Visiting unreachable block%u%s%s%s", block->id(),
+ block->isLoopHeader() ? " (loop header)" : "",
+ block->isSplitEdge() ? " (split edge)" : "",
+ block->immediateDominator() == block ? " (dominator root)" : "");
+
+ MOZ_ASSERT(block->isMarked(), "Visiting unmarked (and therefore reachable?) block");
+ MOZ_ASSERT(block->numPredecessors() == 0, "Block marked unreachable still has predecessors");
+ MOZ_ASSERT(block != graph_.entryBlock(), "Removing normal entry block");
+ MOZ_ASSERT(block != graph_.osrBlock(), "Removing OSR entry block");
+ MOZ_ASSERT(deadDefs_.empty(), "deadDefs_ not cleared");
+
+ // Disconnect all outgoing CFG edges.
+ for (size_t i = 0, e = block->numSuccessors(); i < e; ++i) {
+ MBasicBlock* succ = block->getSuccessor(i);
+ if (succ->isDead() || succ->isMarked())
+ continue;
+ if (!removePredecessorAndCleanUp(succ, block))
+ return false;
+ if (succ->isMarked())
+ continue;
+ // |succ| is still reachable. Make a note of it so that we can scan
+ // it for interesting dominator tree changes later.
+ if (!rerun_) {
+ if (!remainingBlocks_.append(succ))
+ return false;
+ }
+ }
+
+ // Discard any instructions with no uses. The remaining instructions will be
+ // discarded when their last use is discarded.
+ MOZ_ASSERT(nextDef_ == nullptr);
+ for (MDefinitionIterator iter(block); iter; ) {
+ MDefinition* def = *iter++;
+ if (def->hasUses())
+ continue;
+ nextDef_ = *iter;
+ if (!discardDefsRecursively(def))
+ return false;
+ }
+
+ nextDef_ = nullptr;
+ MControlInstruction* control = block->lastIns();
+ return discardDefsRecursively(control);
+}
+
+// Visit all the phis and instructions |block|.
+bool
+ValueNumberer::visitBlock(MBasicBlock* block, const MBasicBlock* dominatorRoot)
+{
+ MOZ_ASSERT(!block->isMarked(), "Blocks marked unreachable during GVN");
+ MOZ_ASSERT(!block->isDead(), "Block to visit is already dead");
+
+ JitSpew(JitSpew_GVN, " Visiting block%u", block->id());
+
+ // Visit the definitions in the block top-down.
+ MOZ_ASSERT(nextDef_ == nullptr);
+ for (MDefinitionIterator iter(block); iter; ) {
+ if (!graph_.alloc().ensureBallast())
+ return false;
+ MDefinition* def = *iter++;
+
+ // Remember where our iterator is so that we don't invalidate it.
+ nextDef_ = *iter;
+
+ // If the definition is dead, discard it.
+ if (IsDiscardable(def)) {
+ if (!discardDefsRecursively(def))
+ return false;
+ continue;
+ }
+
+ if (!visitDefinition(def))
+ return false;
+ }
+ nextDef_ = nullptr;
+
+ return visitControlInstruction(block, dominatorRoot);
+}
+
+// Visit all the blocks dominated by dominatorRoot.
+bool
+ValueNumberer::visitDominatorTree(MBasicBlock* dominatorRoot)
+{
+ JitSpew(JitSpew_GVN, " Visiting dominator tree (with %" PRIu64 " blocks) rooted at block%u%s",
+ uint64_t(dominatorRoot->numDominated()), dominatorRoot->id(),
+ dominatorRoot == graph_.entryBlock() ? " (normal entry block)" :
+ dominatorRoot == graph_.osrBlock() ? " (OSR entry block)" :
+ dominatorRoot->numPredecessors() == 0 ? " (odd unreachable block)" :
+ " (merge point from normal entry and OSR entry)");
+ MOZ_ASSERT(dominatorRoot->immediateDominator() == dominatorRoot,
+ "root is not a dominator tree root");
+
+ // Visit all blocks dominated by dominatorRoot, in RPO. This has the nice
+ // property that we'll always visit a block before any block it dominates,
+ // so we can make a single pass through the list and see every full
+ // redundance.
+ size_t numVisited = 0;
+ size_t numDiscarded = 0;
+ for (ReversePostorderIterator iter(graph_.rpoBegin(dominatorRoot)); ; ) {
+ MOZ_ASSERT(iter != graph_.rpoEnd(), "Inconsistent dominator information");
+ MBasicBlock* block = *iter++;
+ // We're only visiting blocks in dominatorRoot's tree right now.
+ if (!dominatorRoot->dominates(block))
+ continue;
+
+ // If this is a loop backedge, remember the header, as we may not be able
+ // to find it after we simplify the block.
+ MBasicBlock* header = block->isLoopBackedge() ? block->loopHeaderOfBackedge() : nullptr;
+
+ if (block->isMarked()) {
+ // This block has become unreachable; handle it specially.
+ if (!visitUnreachableBlock(block))
+ return false;
+ ++numDiscarded;
+ } else {
+ // Visit the block!
+ if (!visitBlock(block, dominatorRoot))
+ return false;
+ ++numVisited;
+ }
+
+ // If the block is/was a loop backedge, check to see if the block that
+ // is/was its header has optimizable phis, which would want a re-run.
+ if (!rerun_ && header && loopHasOptimizablePhi(header)) {
+ JitSpew(JitSpew_GVN, " Loop phi in block%u can now be optimized; will re-run GVN!",
+ header->id());
+ rerun_ = true;
+ remainingBlocks_.clear();
+ }
+
+ MOZ_ASSERT(numVisited <= dominatorRoot->numDominated() - numDiscarded,
+ "Visited blocks too many times");
+ if (numVisited >= dominatorRoot->numDominated() - numDiscarded)
+ break;
+ }
+
+ totalNumVisited_ += numVisited;
+ values_.clear();
+ return true;
+}
+
+// Visit all the blocks in the graph.
+bool
+ValueNumberer::visitGraph()
+{
+ // Due to OSR blocks, the set of blocks dominated by a blocks may not be
+ // contiguous in the RPO. Do a separate traversal for each dominator tree
+ // root. There's always the main entry, and sometimes there's an OSR entry,
+ // and then there are the roots formed where the OSR paths merge with the
+ // main entry paths.
+ for (ReversePostorderIterator iter(graph_.rpoBegin()); ; ) {
+ MOZ_ASSERT(iter != graph_.rpoEnd(), "Inconsistent dominator information");
+ MBasicBlock* block = *iter;
+ if (block->immediateDominator() == block) {
+ if (!visitDominatorTree(block))
+ return false;
+
+ // Normally unreachable blocks would be removed by now, but if this
+ // block is a dominator tree root, it has been special-cased and left
+ // in place in order to avoid invalidating our iterator. Now that
+ // we've finished the tree, increment the iterator, and then if it's
+ // marked for removal, remove it.
+ ++iter;
+ if (block->isMarked()) {
+ JitSpew(JitSpew_GVN, " Discarding dominator root block%u",
+ block->id());
+ MOZ_ASSERT(block->begin() == block->end(),
+ "Unreachable dominator tree root has instructions after tree walk");
+ MOZ_ASSERT(block->phisEmpty(),
+ "Unreachable dominator tree root has phis after tree walk");
+ graph_.removeBlock(block);
+ blocksRemoved_ = true;
+ }
+
+ MOZ_ASSERT(totalNumVisited_ <= graph_.numBlocks(), "Visited blocks too many times");
+ if (totalNumVisited_ >= graph_.numBlocks())
+ break;
+ } else {
+ // This block a dominator tree root. Proceed to the next one.
+ ++iter;
+ }
+ }
+ totalNumVisited_ = 0;
+ return true;
+}
+
+bool
+ValueNumberer::insertOSRFixups()
+{
+ ReversePostorderIterator end(graph_.end());
+ for (ReversePostorderIterator iter(graph_.begin()); iter != end; ) {
+ MBasicBlock* block = *iter++;
+
+ // Only add fixup block above for loops which can be reached from OSR.
+ if (!block->isLoopHeader())
+ continue;
+
+ // If the loop header is not self-dominated, then this loop does not
+ // have to deal with a second entry point, so there is no need to add a
+ // second entry point with a fixup block.
+ if (block->immediateDominator() != block)
+ continue;
+
+ if (!fixupOSROnlyLoop(block, block->backedge()))
+ return false;
+ }
+
+ return true;
+}
+
+// OSR fixups serve the purpose of representing the non-OSR entry into a loop
+// when the only real entry is an OSR entry into the middle. However, if the
+// entry into the middle is subsequently folded away, the loop may actually
+// have become unreachable. Mark-and-sweep all blocks to remove all such code.
+bool ValueNumberer::cleanupOSRFixups()
+{
+ // Mark.
+ Vector<MBasicBlock*, 0, JitAllocPolicy> worklist(graph_.alloc());
+ unsigned numMarked = 2;
+ graph_.entryBlock()->mark();
+ graph_.osrBlock()->mark();
+ if (!worklist.append(graph_.entryBlock()) || !worklist.append(graph_.osrBlock()))
+ return false;
+ while (!worklist.empty()) {
+ MBasicBlock* block = worklist.popCopy();
+ for (size_t i = 0, e = block->numSuccessors(); i != e; ++i) {
+ MBasicBlock* succ = block->getSuccessor(i);
+ if (!succ->isMarked()) {
+ ++numMarked;
+ succ->mark();
+ if (!worklist.append(succ))
+ return false;
+ } else if (succ->isLoopHeader() &&
+ succ->loopPredecessor() == block &&
+ succ->numPredecessors() == 3)
+ {
+ // Unmark fixup blocks if the loop predecessor is marked after
+ // the loop header.
+ succ->getPredecessor(1)->unmarkUnchecked();
+ }
+ }
+
+ // OSR fixup blocks are needed if and only if the loop header is
+ // reachable from its backedge (via the OSR block) and not from its
+ // original loop predecessor.
+ //
+ // Thus OSR fixup blocks are removed if the loop header is not
+ // reachable, or if the loop header is reachable from both its backedge
+ // and its original loop predecessor.
+ if (block->isLoopHeader()) {
+ MBasicBlock* maybeFixupBlock = nullptr;
+ if (block->numPredecessors() == 2) {
+ maybeFixupBlock = block->getPredecessor(0);
+ } else {
+ MOZ_ASSERT(block->numPredecessors() == 3);
+ if (!block->loopPredecessor()->isMarked())
+ maybeFixupBlock = block->getPredecessor(1);
+ }
+
+ if (maybeFixupBlock &&
+ !maybeFixupBlock->isMarked() &&
+ maybeFixupBlock->numPredecessors() == 0)
+ {
+ MOZ_ASSERT(maybeFixupBlock->numSuccessors() == 1,
+ "OSR fixup block should have exactly one successor");
+ MOZ_ASSERT(maybeFixupBlock != graph_.entryBlock(),
+ "OSR fixup block shouldn't be the entry block");
+ MOZ_ASSERT(maybeFixupBlock != graph_.osrBlock(),
+ "OSR fixup block shouldn't be the OSR entry block");
+ maybeFixupBlock->mark();
+ }
+ }
+ }
+
+ // And sweep.
+ return RemoveUnmarkedBlocks(mir_, graph_, numMarked);
+}
+
+ValueNumberer::ValueNumberer(MIRGenerator* mir, MIRGraph& graph)
+ : mir_(mir), graph_(graph),
+ values_(graph.alloc()),
+ deadDefs_(graph.alloc()),
+ remainingBlocks_(graph.alloc()),
+ nextDef_(nullptr),
+ totalNumVisited_(0),
+ rerun_(false),
+ blocksRemoved_(false),
+ updateAliasAnalysis_(false),
+ dependenciesBroken_(false),
+ hasOSRFixups_(false)
+{}
+
+bool
+ValueNumberer::init()
+{
+ // Initialize the value set. It's tempting to pass in a size here of some
+ // function of graph_.getNumInstructionIds(), however if we start out with a
+ // large capacity, it will be far larger than the actual element count for
+ // most of the pass, so when we remove elements, it would often think it
+ // needs to compact itself. Empirically, just letting the HashTable grow as
+ // needed on its own seems to work pretty well.
+ return values_.init();
+}
+
+bool
+ValueNumberer::run(UpdateAliasAnalysisFlag updateAliasAnalysis)
+{
+ updateAliasAnalysis_ = updateAliasAnalysis == UpdateAliasAnalysis;
+
+ JitSpew(JitSpew_GVN, "Running GVN on graph (with %" PRIu64 " blocks)",
+ uint64_t(graph_.numBlocks()));
+
+ // Adding fixup blocks only make sense iff we have a second entry point into
+ // the graph which cannot be reached any more from the entry point.
+ if (graph_.osrBlock()) {
+ if (!insertOSRFixups())
+ return false;
+ }
+
+ // Top level non-sparse iteration loop. If an iteration performs a
+ // significant change, such as discarding a block which changes the
+ // dominator tree and may enable more optimization, this loop takes another
+ // iteration.
+ int runs = 0;
+ for (;;) {
+ if (!visitGraph())
+ return false;
+
+ // Test whether any block which was not removed but which had at least
+ // one predecessor removed will have a new dominator parent.
+ while (!remainingBlocks_.empty()) {
+ MBasicBlock* block = remainingBlocks_.popCopy();
+ if (!block->isDead() && IsDominatorRefined(block)) {
+ JitSpew(JitSpew_GVN, " Dominator for block%u can now be refined; will re-run GVN!",
+ block->id());
+ rerun_ = true;
+ remainingBlocks_.clear();
+ break;
+ }
+ }
+
+ if (blocksRemoved_) {
+ if (!AccountForCFGChanges(mir_, graph_, dependenciesBroken_, /* underValueNumberer = */ true))
+ return false;
+
+ blocksRemoved_ = false;
+ dependenciesBroken_ = false;
+ }
+
+ if (mir_->shouldCancel("GVN (outer loop)"))
+ return false;
+
+ // If no further opportunities have been discovered, we're done.
+ if (!rerun_)
+ break;
+
+ rerun_ = false;
+
+ // Enforce an arbitrary iteration limit. This is rarely reached, and
+ // isn't even strictly necessary, as the algorithm is guaranteed to
+ // terminate on its own in a finite amount of time (since every time we
+ // re-run we discard the construct which triggered the re-run), but it
+ // does help avoid slow compile times on pathological code.
+ ++runs;
+ if (runs == 6) {
+ JitSpew(JitSpew_GVN, "Re-run cutoff of %d reached. Terminating GVN!", runs);
+ break;
+ }
+
+ JitSpew(JitSpew_GVN, "Re-running GVN on graph (run %d, now with %" PRIu64 " blocks)",
+ runs, uint64_t(graph_.numBlocks()));
+ }
+
+ if (MOZ_UNLIKELY(hasOSRFixups_)) {
+ if (!cleanupOSRFixups())
+ return false;
+ hasOSRFixups_ = false;
+ }
+
+ return true;
+}
diff --git a/js/src/jit/ValueNumbering.h b/js/src/jit/ValueNumbering.h
new file mode 100644
index 000000000..c55abeb9d
--- /dev/null
+++ b/js/src/jit/ValueNumbering.h
@@ -0,0 +1,127 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_ValueNumbering_h
+#define jit_ValueNumbering_h
+
+#include "jit/JitAllocPolicy.h"
+#include "js/HashTable.h"
+
+namespace js {
+namespace jit {
+
+class MDefinition;
+class MBasicBlock;
+class MIRGraph;
+class MPhi;
+class MIRGenerator;
+class MResumePoint;
+
+class ValueNumberer
+{
+ // Value numbering data.
+ class VisibleValues
+ {
+ // Hash policy for ValueSet.
+ struct ValueHasher
+ {
+ typedef const MDefinition* Lookup;
+ typedef MDefinition* Key;
+ static HashNumber hash(Lookup ins);
+ static bool match(Key k, Lookup l);
+ static void rekey(Key& k, Key newKey);
+ };
+
+ typedef HashSet<MDefinition*, ValueHasher, JitAllocPolicy> ValueSet;
+
+ ValueSet set_; // Set of visible values
+
+ public:
+ explicit VisibleValues(TempAllocator& alloc);
+ MOZ_MUST_USE bool init();
+
+ typedef ValueSet::Ptr Ptr;
+ typedef ValueSet::AddPtr AddPtr;
+
+ Ptr findLeader(const MDefinition* def) const;
+ AddPtr findLeaderForAdd(MDefinition* def);
+ MOZ_MUST_USE bool add(AddPtr p, MDefinition* def);
+ void overwrite(AddPtr p, MDefinition* def);
+ void forget(const MDefinition* def);
+ void clear();
+#ifdef DEBUG
+ bool has(const MDefinition* def) const;
+#endif
+ };
+
+ typedef Vector<MBasicBlock*, 4, JitAllocPolicy> BlockWorklist;
+ typedef Vector<MDefinition*, 4, JitAllocPolicy> DefWorklist;
+
+ MIRGenerator* const mir_;
+ MIRGraph& graph_;
+ VisibleValues values_; // Numbered values
+ DefWorklist deadDefs_; // Worklist for deleting values
+ BlockWorklist remainingBlocks_; // Blocks remaining with fewer preds
+ MDefinition* nextDef_; // The next definition; don't discard
+ size_t totalNumVisited_; // The number of blocks visited
+ bool rerun_; // Should we run another GVN iteration?
+ bool blocksRemoved_; // Have any blocks been removed?
+ bool updateAliasAnalysis_; // Do we care about AliasAnalysis?
+ bool dependenciesBroken_; // Have we broken AliasAnalysis?
+ bool hasOSRFixups_; // Have we created any OSR fixup blocks?
+
+ enum UseRemovedOption {
+ DontSetUseRemoved,
+ SetUseRemoved
+ };
+
+ MOZ_MUST_USE bool handleUseReleased(MDefinition* def, UseRemovedOption useRemovedOption);
+ MOZ_MUST_USE bool discardDefsRecursively(MDefinition* def);
+ MOZ_MUST_USE bool releaseResumePointOperands(MResumePoint* resume);
+ MOZ_MUST_USE bool releaseAndRemovePhiOperands(MPhi* phi);
+ MOZ_MUST_USE bool releaseOperands(MDefinition* def);
+ MOZ_MUST_USE bool discardDef(MDefinition* def);
+ MOZ_MUST_USE bool processDeadDefs();
+
+ MOZ_MUST_USE bool fixupOSROnlyLoop(MBasicBlock* block, MBasicBlock* backedge);
+ MOZ_MUST_USE bool removePredecessorAndDoDCE(MBasicBlock* block, MBasicBlock* pred,
+ size_t predIndex);
+ MOZ_MUST_USE bool removePredecessorAndCleanUp(MBasicBlock* block, MBasicBlock* pred);
+
+ MDefinition* simplified(MDefinition* def) const;
+ MDefinition* leader(MDefinition* def);
+ bool hasLeader(const MPhi* phi, const MBasicBlock* phiBlock) const;
+ bool loopHasOptimizablePhi(MBasicBlock* header) const;
+
+ MOZ_MUST_USE bool visitDefinition(MDefinition* def);
+ MOZ_MUST_USE bool visitControlInstruction(MBasicBlock* block, const MBasicBlock* root);
+ MOZ_MUST_USE bool visitUnreachableBlock(MBasicBlock* block);
+ MOZ_MUST_USE bool visitBlock(MBasicBlock* block, const MBasicBlock* root);
+ MOZ_MUST_USE bool visitDominatorTree(MBasicBlock* root);
+ MOZ_MUST_USE bool visitGraph();
+
+ MOZ_MUST_USE bool insertOSRFixups();
+ MOZ_MUST_USE bool cleanupOSRFixups();
+
+ public:
+ ValueNumberer(MIRGenerator* mir, MIRGraph& graph);
+ MOZ_MUST_USE bool init();
+
+ enum UpdateAliasAnalysisFlag {
+ DontUpdateAliasAnalysis,
+ UpdateAliasAnalysis
+ };
+
+ // Optimize the graph, performing expression simplification and
+ // canonicalization, eliminating statically fully-redundant expressions,
+ // deleting dead instructions, and removing unreachable blocks.
+ MOZ_MUST_USE bool run(UpdateAliasAnalysisFlag updateAliasAnalysis);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_ValueNumbering_h */
diff --git a/js/src/jit/WasmBCE.cpp b/js/src/jit/WasmBCE.cpp
new file mode 100644
index 000000000..aac362738
--- /dev/null
+++ b/js/src/jit/WasmBCE.cpp
@@ -0,0 +1,94 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#include "jit/WasmBCE.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace mozilla;
+
+typedef js::HashMap<uint32_t, MDefinition*, DefaultHasher<uint32_t>, SystemAllocPolicy>
+ LastSeenMap;
+
+// The Wasm Bounds Check Elimination (BCE) pass looks for bounds checks
+// on SSA values that have already been checked. (in the same block or in a
+// dominating block). These bounds checks are redundant and thus eliminated.
+//
+// Note: This is safe in the presense of dynamic memory sizes as long as they
+// can ONLY GROW. If we allow SHRINKING the heap, this pass should be
+// RECONSIDERED.
+//
+// TODO (dbounov): Are there a lot of cases where there is no single dominating
+// check, but a set of checks that together dominate a redundant check?
+//
+// TODO (dbounov): Generalize to constant additions relative to one base
+bool
+jit::EliminateBoundsChecks(MIRGenerator* mir, MIRGraph& graph)
+{
+ // Map for dominating block where a given definition was checked
+ LastSeenMap lastSeen;
+ if (!lastSeen.init())
+ return false;
+
+ for (ReversePostorderIterator bIter(graph.rpoBegin()); bIter != graph.rpoEnd(); bIter++) {
+ MBasicBlock* block = *bIter;
+ for (MDefinitionIterator dIter(block); dIter;) {
+ MDefinition* def = *dIter++;
+
+ switch (def->op()) {
+ case MDefinition::Op_WasmBoundsCheck: {
+ MWasmBoundsCheck* bc = def->toWasmBoundsCheck();
+ MDefinition* addr = def->getOperand(0);
+
+ LastSeenMap::AddPtr ptr = lastSeen.lookupForAdd(addr->id());
+ if (ptr) {
+ if (ptr->value()->block()->dominates(block))
+ bc->setRedundant(true);
+ } else {
+ if (!lastSeen.add(ptr, addr->id(), def))
+ return false;
+ }
+ break;
+ }
+ case MDefinition::Op_Phi: {
+ MPhi* phi = def->toPhi();
+ bool phiChecked = true;
+
+ MOZ_ASSERT(phi->numOperands() > 0);
+
+ // If all incoming values to a phi node are safe (i.e. have a
+ // check that dominates this block) then we can consider this
+ // phi node checked.
+ //
+ // Note that any phi that is part of a cycle
+ // will not be "safe" since the value coming on the backedge
+ // cannot be in lastSeen because its block hasn't been traversed yet.
+ for (int i = 0, nOps = phi->numOperands(); i < nOps; i++) {
+ MDefinition* src = phi->getOperand(i);
+
+ LastSeenMap::Ptr checkPtr = lastSeen.lookup(src->id());
+ if (!checkPtr || !checkPtr->value()->block()->dominates(block)) {
+ phiChecked = false;
+ break;
+ }
+ }
+
+ if (phiChecked) {
+ if (!lastSeen.put(def->id(), def))
+ return false;
+ }
+
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ }
+
+ return true;
+}
diff --git a/js/src/jit/WasmBCE.h b/js/src/jit/WasmBCE.h
new file mode 100644
index 000000000..e525d6735
--- /dev/null
+++ b/js/src/jit/WasmBCE.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef jit_wasmbce_h
+#define jit_wasmbce_h
+
+namespace js {
+namespace jit {
+
+class MIRGenerator;
+class MIRGraph;
+
+bool EliminateBoundsChecks(MIRGenerator* mir, MIRGraph& graph);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_wasmbce_h */
diff --git a/js/src/jit/arm/Architecture-arm.cpp b/js/src/jit/arm/Architecture-arm.cpp
new file mode 100644
index 000000000..3fcdbb2cc
--- /dev/null
+++ b/js/src/jit/arm/Architecture-arm.cpp
@@ -0,0 +1,444 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm/Architecture-arm.h"
+
+#if !defined(JS_SIMULATOR_ARM) && !defined(__APPLE__)
+#include <elf.h>
+#endif
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "jit/arm/Assembler-arm.h"
+#include "jit/RegisterSets.h"
+
+#if !defined(__linux__) || defined(ANDROID) || defined(JS_SIMULATOR_ARM)
+// The Android NDK and B2G do not include the hwcap.h kernel header, and it is not
+// defined when building the simulator, so inline the header defines we need.
+# define HWCAP_VFP (1 << 6)
+# define HWCAP_NEON (1 << 12)
+# define HWCAP_VFPv3 (1 << 13)
+# define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
+# define HWCAP_VFPv4 (1 << 16)
+# define HWCAP_IDIVA (1 << 17)
+# define HWCAP_IDIVT (1 << 18)
+# define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
+# define AT_HWCAP 16
+#else
+# include <asm/hwcap.h>
+# if !defined(HWCAP_IDIVA)
+# define HWCAP_IDIVA (1 << 17)
+# endif
+# if !defined(HWCAP_VFPD32)
+# define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
+# endif
+#endif
+
+namespace js {
+namespace jit {
+
+// Parse the Linux kernel cpuinfo features. This is also used to parse the
+// override features which has some extensions: 'armv7', 'align' and 'hardfp'.
+static uint32_t
+ParseARMCpuFeatures(const char* features, bool override = false)
+{
+ uint32_t flags = 0;
+
+ // For ease of running tests we want it to be the default to fixup faults.
+ bool fixupAlignmentFault = true;
+
+ for (;;) {
+ char ch = *features;
+ if (!ch) {
+ // End of string.
+ break;
+ }
+ if (ch == ' ' || ch == ',') {
+ // Skip separator characters.
+ features++;
+ continue;
+ }
+ // Find the end of the token.
+ const char* end = features + 1;
+ for (; ; end++) {
+ ch = *end;
+ if (!ch || ch == ' ' || ch == ',')
+ break;
+ }
+ size_t count = end - features;
+ if (count == 3 && strncmp(features, "vfp", 3) == 0)
+ flags |= HWCAP_VFP;
+ else if (count == 4 && strncmp(features, "neon", 4) == 0)
+ flags |= HWCAP_NEON;
+ else if (count == 5 && strncmp(features, "vfpv3", 5) == 0)
+ flags |= HWCAP_VFPv3;
+ else if (count == 8 && strncmp(features, "vfpv3d16", 8) == 0)
+ flags |= HWCAP_VFPv3D16;
+ else if (count == 5 && strncmp(features, "vfpv4", 5) == 0)
+ flags |= HWCAP_VFPv4;
+ else if (count == 5 && strncmp(features, "idiva", 5) == 0)
+ flags |= HWCAP_IDIVA;
+ else if (count == 5 && strncmp(features, "idivt", 5) == 0)
+ flags |= HWCAP_IDIVT;
+ else if (count == 6 && strncmp(features, "vfpd32", 6) == 0)
+ flags |= HWCAP_VFPD32;
+ else if (count == 5 && strncmp(features, "armv7", 5) == 0)
+ flags |= HWCAP_ARMv7;
+ else if (count == 5 && strncmp(features, "align", 5) == 0)
+ flags |= HWCAP_ALIGNMENT_FAULT | HWCAP_FIXUP_FAULT;
+#if defined(JS_SIMULATOR_ARM)
+ else if (count == 7 && strncmp(features, "nofixup", 7) == 0)
+ fixupAlignmentFault = false;
+ else if (count == 6 && strncmp(features, "hardfp", 6) == 0)
+ flags |= HWCAP_USE_HARDFP_ABI;
+#endif
+ else if (override)
+ fprintf(stderr, "Warning: unexpected ARM feature at: %s\n", features);
+ features = end;
+ }
+
+ if (!fixupAlignmentFault)
+ flags &= ~HWCAP_FIXUP_FAULT;
+
+ return flags;
+}
+
+static uint32_t
+CanonicalizeARMHwCapFlags(uint32_t flags)
+{
+ // Canonicalize the flags. These rules are also applied to the features
+ // supplied for simulation.
+
+ // The VFPv3 feature is expected when the VFPv3D16 is reported, but add it
+ // just in case of a kernel difference in feature reporting.
+ if (flags & HWCAP_VFPv3D16)
+ flags |= HWCAP_VFPv3;
+
+ // If VFPv3 or Neon is supported then this must be an ARMv7.
+ if (flags & (HWCAP_VFPv3 | HWCAP_NEON))
+ flags |= HWCAP_ARMv7;
+
+ // Some old kernels report VFP and not VFPv3, but if ARMv7 then it must be
+ // VFPv3.
+ if (flags & HWCAP_VFP && flags & HWCAP_ARMv7)
+ flags |= HWCAP_VFPv3;
+
+ // Older kernels do not implement the HWCAP_VFPD32 flag.
+ if ((flags & HWCAP_VFPv3) && !(flags & HWCAP_VFPv3D16))
+ flags |= HWCAP_VFPD32;
+
+ return flags;
+}
+
+volatile bool forceDoubleCacheFlush = false;
+
+bool
+ForceDoubleCacheFlush() {
+ return forceDoubleCacheFlush;
+}
+
+// The override flags parsed from the ARMHWCAP environment variable or from the
+// --arm-hwcap js shell argument.
+volatile uint32_t armHwCapFlags = HWCAP_UNINITIALIZED;
+
+bool
+ParseARMHwCapFlags(const char* armHwCap)
+{
+ uint32_t flags = 0;
+
+ if (!armHwCap)
+ return false;
+
+ if (strstr(armHwCap, "help")) {
+ fflush(NULL);
+ printf(
+ "\n"
+ "usage: ARMHWCAP=option,option,option,... where options can be:\n"
+ "\n"
+ " vfp \n"
+ " neon \n"
+ " vfpv3 \n"
+ " vfpv3d16 \n"
+ " vfpv4 \n"
+ " idiva \n"
+ " idivt \n"
+ " vfpd32 \n"
+ " armv7 \n"
+ " align - unaligned accesses will trap and be emulated\n"
+#ifdef JS_SIMULATOR_ARM
+ " nofixup - disable emulation of unaligned accesses\n"
+ " hardfp \n"
+#endif
+ "\n"
+ );
+ exit(0);
+ /*NOTREACHED*/
+ }
+
+ flags = ParseARMCpuFeatures(armHwCap, /* override = */ true);
+
+#ifdef JS_CODEGEN_ARM_HARDFP
+ flags |= HWCAP_USE_HARDFP_ABI;
+#endif
+
+ armHwCapFlags = CanonicalizeARMHwCapFlags(flags);
+ JitSpew(JitSpew_Codegen, "ARM HWCAP: 0x%x\n", armHwCapFlags);
+ return true;
+}
+
+void
+InitARMFlags()
+{
+ uint32_t flags = 0;
+
+ if (armHwCapFlags != HWCAP_UNINITIALIZED)
+ return;
+
+ const char* env = getenv("ARMHWCAP");
+ if (ParseARMHwCapFlags(env))
+ return;
+
+#ifdef JS_SIMULATOR_ARM
+ // HWCAP_FIXUP_FAULT is on by default even if HWCAP_ALIGNMENT_FAULT is
+ // not on by default, because some memory access instructions always fault.
+ // Notably, this is true for floating point accesses.
+ flags = HWCAP_ARMv7 | HWCAP_VFP | HWCAP_VFPv3 | HWCAP_VFPv4 | HWCAP_NEON | HWCAP_IDIVA
+ | HWCAP_FIXUP_FAULT;
+#else
+
+#if defined(__linux__) || defined(ANDROID)
+ // This includes Android and B2G.
+ bool readAuxv = false;
+ int fd = open("/proc/self/auxv", O_RDONLY);
+ if (fd > 0) {
+ struct { uint32_t a_type; uint32_t a_val; } aux;
+ while (read(fd, &aux, sizeof(aux))) {
+ if (aux.a_type == AT_HWCAP) {
+ flags = aux.a_val;
+ readAuxv = true;
+ break;
+ }
+ }
+ close(fd);
+ }
+
+ FILE* fp = fopen("/proc/cpuinfo", "r");
+ if (fp) {
+ char buf[1024];
+ memset(buf, 0, sizeof(buf));
+ size_t len = fread(buf, sizeof(char), sizeof(buf) - 1, fp);
+ fclose(fp);
+ buf[len] = '\0';
+
+ // Read the cpuinfo Features if the auxv is not available.
+ if (!readAuxv) {
+ char* featureList = strstr(buf, "Features");
+ if (featureList) {
+ if (char* featuresEnd = strstr(featureList, "\n"))
+ *featuresEnd = '\0';
+ flags = ParseARMCpuFeatures(featureList + 8);
+ }
+ if (strstr(buf, "ARMv7"))
+ flags |= HWCAP_ARMv7;
+ }
+
+ // The exynos7420 cpu (EU galaxy S6 (Note)) has a bug where sometimes
+ // flushing doesn't invalidate the instruction cache. As a result we force
+ // it by calling the cacheFlush twice on different start addresses.
+ char* exynos7420 = strstr(buf, "Exynos7420");
+ if (exynos7420)
+ forceDoubleCacheFlush = true;
+ }
+#endif
+
+ // If compiled to use specialized features then these features can be
+ // assumed to be present otherwise the compiler would fail to run.
+
+#ifdef JS_CODEGEN_ARM_HARDFP
+ // Compiled to use the hardfp ABI.
+ flags |= HWCAP_USE_HARDFP_ABI;
+#endif
+
+#if defined(__VFP_FP__) && !defined(__SOFTFP__)
+ // Compiled to use VFP instructions so assume VFP support.
+ flags |= HWCAP_VFP;
+#endif
+
+#if defined(__ARM_ARCH_7__) || defined (__ARM_ARCH_7A__)
+ // Compiled to use ARMv7 instructions so assume the ARMv7 arch.
+ flags |= HWCAP_ARMv7;
+#endif
+
+#if defined(__APPLE__)
+ #if defined(__ARM_NEON__)
+ flags |= HWCAP_NEON;
+ #endif
+ #if defined(__ARMVFPV3__)
+ flags |= HWCAP_VFPv3 | HWCAP_VFPD32
+ #endif
+#endif
+
+#endif // JS_SIMULATOR_ARM
+
+ armHwCapFlags = CanonicalizeARMHwCapFlags(flags);
+
+ JitSpew(JitSpew_Codegen, "ARM HWCAP: 0x%x\n", armHwCapFlags);
+ return;
+}
+
+uint32_t
+GetARMFlags()
+{
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags;
+}
+
+bool HasARMv7()
+{
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_ARMv7;
+}
+
+bool HasMOVWT()
+{
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_ARMv7;
+}
+
+bool HasLDSTREXBHD()
+{
+ // These are really available from ARMv6K and later, but why bother?
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_ARMv7;
+}
+
+bool HasDMBDSBISB()
+{
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_ARMv7;
+}
+
+bool HasVFPv3()
+{
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_VFPv3;
+}
+
+bool HasVFP()
+{
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_VFP;
+}
+
+bool Has32DP()
+{
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_VFPD32;
+}
+
+bool HasIDIV()
+{
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_IDIVA;
+}
+
+// This is defined in the header and inlined when not using the simulator.
+#ifdef JS_SIMULATOR_ARM
+bool UseHardFpABI()
+{
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_USE_HARDFP_ABI;
+}
+#endif
+
+Registers::Code
+Registers::FromName(const char* name)
+{
+ // Check for some register aliases first.
+ if (strcmp(name, "ip") == 0)
+ return ip;
+ if (strcmp(name, "r13") == 0)
+ return r13;
+ if (strcmp(name, "lr") == 0)
+ return lr;
+ if (strcmp(name, "r15") == 0)
+ return r15;
+
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0)
+ return Code(i);
+ }
+
+ return Invalid;
+}
+
+FloatRegisters::Code
+FloatRegisters::FromName(const char* name)
+{
+ for (size_t i = 0; i < TotalSingle; ++i) {
+ if (strcmp(GetSingleName(Encoding(i)), name) == 0)
+ return VFPRegister(i, VFPRegister::Single).code();
+ }
+ for (size_t i = 0; i < TotalDouble; ++i) {
+ if (strcmp(GetDoubleName(Encoding(i)), name) == 0)
+ return VFPRegister(i, VFPRegister::Double).code();
+ }
+
+ return Invalid;
+}
+
+FloatRegisterSet
+VFPRegister::ReduceSetForPush(const FloatRegisterSet& s)
+{
+ LiveFloatRegisterSet mod;
+ for (FloatRegisterIterator iter(s); iter.more(); ++iter) {
+ if ((*iter).isSingle()) {
+ // Add in just this float.
+ mod.addUnchecked(*iter);
+ } else if ((*iter).id() < 16) {
+ // A double with an overlay, add in both floats.
+ mod.addUnchecked((*iter).singleOverlay(0));
+ mod.addUnchecked((*iter).singleOverlay(1));
+ } else {
+ // Add in the lone double in the range 16-31.
+ mod.addUnchecked(*iter);
+ }
+ }
+ return mod.set();
+}
+
+uint32_t
+VFPRegister::GetPushSizeInBytes(const FloatRegisterSet& s)
+{
+ FloatRegisterSet ss = s.reduceSetForPush();
+ uint64_t bits = ss.bits();
+ uint32_t ret = mozilla::CountPopulation32(bits&0xffffffff) * sizeof(float);
+ ret += mozilla::CountPopulation32(bits >> 32) * sizeof(double);
+ return ret;
+}
+uint32_t
+VFPRegister::getRegisterDumpOffsetInBytes()
+{
+ if (isSingle())
+ return id() * sizeof(float);
+ if (isDouble())
+ return id() * sizeof(double);
+ MOZ_CRASH("not Single or Double");
+}
+
+uint32_t
+FloatRegisters::ActualTotalPhys()
+{
+ if (Has32DP())
+ return 32;
+ return 16;
+}
+
+
+} // namespace jit
+} // namespace js
+
diff --git a/js/src/jit/arm/Architecture-arm.h b/js/src/jit/arm/Architecture-arm.h
new file mode 100644
index 000000000..5e3db5ae2
--- /dev/null
+++ b/js/src/jit/arm/Architecture-arm.h
@@ -0,0 +1,673 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_Architecture_arm_h
+#define jit_arm_Architecture_arm_h
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "js/Utility.h"
+
+// GCC versions 4.6 and above define __ARM_PCS_VFP to denote a hard-float
+// ABI target. The iOS toolchain doesn't define anything specific here,
+// but iOS always supports VFP.
+#if defined(__ARM_PCS_VFP) || defined(XP_IOS)
+#define JS_CODEGEN_ARM_HARDFP
+#endif
+
+namespace js {
+namespace jit {
+
+// In bytes: slots needed for potential memory->memory move spills.
+// +8 for cycles
+// +4 for gpr spills
+// +8 for double spills
+static const uint32_t ION_FRAME_SLACK_SIZE = 20;
+
+// These offsets are specific to nunboxing, and capture offsets into the
+// components of a js::Value.
+static const int32_t NUNBOX32_TYPE_OFFSET = 4;
+static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
+
+static const uint32_t ShadowStackSpace = 0;
+
+// How far forward/back can a jump go? Provide a generous buffer for thunks.
+static const uint32_t JumpImmediateRange = 25 * 1024 * 1024;
+
+////
+// These offsets are related to bailouts.
+////
+
+// Size of each bailout table entry. On arm, this is presently a single call
+// (which is wrong!). The call clobbers lr.
+// For now, I've dealt with this by ensuring that we never allocate to lr. It
+// should probably be 8 bytes, a mov of an immediate into r12 (not allocated
+// presently, or ever) followed by a branch to the apropriate code.
+static const uint32_t BAILOUT_TABLE_ENTRY_SIZE = 4;
+
+class Registers
+{
+ public:
+ enum RegisterID {
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ S0 = r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ S1 = r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ ip = r12,
+ r13,
+ sp = r13,
+ r14,
+ lr = r14,
+ r15,
+ pc = r15,
+ invalid_reg
+ };
+ typedef uint8_t Code;
+ typedef RegisterID Encoding;
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ uintptr_t r;
+ };
+
+ static const char* GetName(Code code) {
+ MOZ_ASSERT(code < Total);
+ static const char * const Names[] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "sp", "r14", "pc"};
+ return Names[code];
+ }
+ static const char* GetName(Encoding i) {
+ return GetName(Code(i));
+ }
+
+ static Code FromName(const char* name);
+
+ static const Encoding StackPointer = sp;
+ static const Encoding Invalid = invalid_reg;
+
+ static const uint32_t Total = 16;
+ static const uint32_t Allocatable = 13;
+
+ typedef uint32_t SetType;
+
+ static const SetType AllMask = (1 << Total) - 1;
+ static const SetType ArgRegMask = (1 << r0) | (1 << r1) | (1 << r2) | (1 << r3);
+
+ static const SetType VolatileMask =
+ (1 << r0) |
+ (1 << r1) |
+ (1 << Registers::r2) |
+ (1 << Registers::r3)
+#if defined(XP_IOS)
+ // per https://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARMv6FunctionCallingConventions.html#//apple_ref/doc/uid/TP40009021-SW4
+ | (1 << Registers::r9)
+#endif
+ ;
+
+ static const SetType NonVolatileMask =
+ (1 << Registers::r4) |
+ (1 << Registers::r5) |
+ (1 << Registers::r6) |
+ (1 << Registers::r7) |
+ (1 << Registers::r8) |
+#if !defined(XP_IOS)
+ (1 << Registers::r9) |
+#endif
+ (1 << Registers::r10) |
+ (1 << Registers::r11) |
+ (1 << Registers::r12) |
+ (1 << Registers::r14);
+
+ static const SetType WrapperMask =
+ VolatileMask | // = arguments
+ (1 << Registers::r4) | // = outReg
+ (1 << Registers::r5); // = argBase
+
+ static const SetType SingleByteRegs =
+ VolatileMask | NonVolatileMask;
+
+ static const SetType NonAllocatableMask =
+ (1 << Registers::sp) |
+ (1 << Registers::r12) | // r12 = ip = scratch
+ (1 << Registers::lr) |
+ (1 << Registers::pc);
+
+ // Registers that can be allocated without being saved, generally.
+ static const SetType TempMask = VolatileMask & ~NonAllocatableMask;
+
+ // Registers returned from a JS -> JS call.
+ static const SetType JSCallMask =
+ (1 << Registers::r2) |
+ (1 << Registers::r3);
+
+ // Registers returned from a JS -> C call.
+ static const SetType CallMask =
+ (1 << Registers::r0) |
+ (1 << Registers::r1); // Used for double-size returns.
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ return mozilla::CountTrailingZeroes32(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 31 - mozilla::CountLeadingZeroes32(x);
+ }
+};
+
+// Smallest integer type that can hold a register bitmask.
+typedef uint16_t PackedRegisterMask;
+typedef uint16_t PackedRegisterMask;
+
+class FloatRegisters
+{
+ public:
+ enum FPRegisterID {
+ s0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ s8,
+ s9,
+ s10,
+ s11,
+ s12,
+ s13,
+ s14,
+ s15,
+ s16,
+ s17,
+ s18,
+ s19,
+ s20,
+ s21,
+ s22,
+ s23,
+ s24,
+ s25,
+ s26,
+ s27,
+ s28,
+ s29,
+ s30,
+ s31,
+ d0,
+ d1,
+ d2,
+ d3,
+ d4,
+ d5,
+ d6,
+ d7,
+ d8,
+ d9,
+ d10,
+ d11,
+ d12,
+ d13,
+ d14,
+ d15,
+ d16,
+ d17,
+ d18,
+ d19,
+ d20,
+ d21,
+ d22,
+ d23,
+ d24,
+ d25,
+ d26,
+ d27,
+ d28,
+ d29,
+ d30,
+ d31,
+ invalid_freg
+ };
+
+ typedef uint32_t Code;
+ typedef FPRegisterID Encoding;
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ double d;
+ };
+
+ static const char* GetDoubleName(Encoding code) {
+ static const char * const Names[] = { "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+ "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+ "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
+ return Names[code];
+ }
+ static const char* GetSingleName(Encoding code) {
+ static const char * const Names[] = { "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
+ "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
+ "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"};
+ return Names[code];
+ }
+
+ static Code FromName(const char* name);
+
+ static const Encoding Invalid = invalid_freg;
+ static const uint32_t Total = 48;
+ static const uint32_t TotalDouble = 16;
+ static const uint32_t TotalSingle = 32;
+ static const uint32_t Allocatable = 45;
+ // There are only 32 places that we can put values.
+ static const uint32_t TotalPhys = 32;
+ static uint32_t ActualTotalPhys();
+
+ typedef uint64_t SetType;
+ static const SetType AllSingleMask = (1ull << TotalSingle) - 1;
+ static const SetType AllDoubleMask = ((1ull << TotalDouble) - 1) << TotalSingle;
+ static const SetType AllMask = AllDoubleMask | AllSingleMask;
+
+ // d15 is the ScratchFloatReg.
+ static const SetType NonVolatileDoubleMask =
+ ((1ULL << d8) |
+ (1ULL << d9) |
+ (1ULL << d10) |
+ (1ULL << d11) |
+ (1ULL << d12) |
+ (1ULL << d13) |
+ (1ULL << d14));
+ // s30 and s31 alias d15.
+ static const SetType NonVolatileMask =
+ (NonVolatileDoubleMask |
+ ((1 << s16) |
+ (1 << s17) |
+ (1 << s18) |
+ (1 << s19) |
+ (1 << s20) |
+ (1 << s21) |
+ (1 << s22) |
+ (1 << s23) |
+ (1 << s24) |
+ (1 << s25) |
+ (1 << s26) |
+ (1 << s27) |
+ (1 << s28) |
+ (1 << s29) |
+ (1 << s30)));
+
+ static const SetType VolatileMask = AllMask & ~NonVolatileMask;
+ static const SetType VolatileDoubleMask = AllDoubleMask & ~NonVolatileDoubleMask;
+
+ static const SetType WrapperMask = VolatileMask;
+
+ // d15 is the ARM scratch float register.
+ // s30 and s31 alias d15.
+ static const SetType NonAllocatableMask = ((1ULL << d15)) |
+ (1ULL << s30) |
+ (1ULL << s31);
+
+ // Registers that can be allocated without being saved, generally.
+ static const SetType TempMask = VolatileMask & ~NonAllocatableMask;
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+template <typename T>
+class TypedRegisterSet;
+
+class VFPRegister
+{
+ public:
+ // What type of data is being stored in this register? UInt / Int are
+ // specifically for vcvt, where we need to know how the data is supposed to
+ // be converted.
+ enum RegType {
+ Single = 0x0,
+ Double = 0x1,
+ UInt = 0x2,
+ Int = 0x3
+ };
+
+ typedef FloatRegisters Codes;
+ typedef Codes::Code Code;
+ typedef Codes::Encoding Encoding;
+
+ protected:
+ RegType kind : 2;
+ // ARM doesn't have more than 32 registers. Don't take more bits than we'll
+ // need. Presently, we don't have plans to address the upper and lower
+ // halves of the double registers seprately, so 5 bits should suffice. If we
+ // do decide to address them seprately (vmov, I'm looking at you), we will
+ // likely specify it as a separate field.
+ public:
+ uint32_t code_ : 5;
+ protected:
+ bool _isInvalid : 1;
+ bool _isMissing : 1;
+
+ public:
+ constexpr VFPRegister(uint32_t r, RegType k)
+ : kind(k), code_ (Code(r)), _isInvalid(false), _isMissing(false)
+ { }
+ constexpr VFPRegister()
+ : kind(Double), code_(Code(0)), _isInvalid(true), _isMissing(false)
+ { }
+
+ constexpr VFPRegister(RegType k, uint32_t id, bool invalid, bool missing) :
+ kind(k), code_(Code(id)), _isInvalid(invalid), _isMissing(missing) {
+ }
+
+ explicit constexpr VFPRegister(Code id)
+ : kind(Double), code_(id), _isInvalid(false), _isMissing(false)
+ { }
+ bool operator==(const VFPRegister& other) const {
+ MOZ_ASSERT(!isInvalid());
+ MOZ_ASSERT(!other.isInvalid());
+ return kind == other.kind && code_ == other.code_;
+ }
+
+ bool isSingle() const { return kind == Single; }
+ bool isDouble() const { return kind == Double; }
+ bool isSimd128() const { return false; }
+ bool isFloat() const { return (kind == Double) || (kind == Single); }
+ bool isInt() const { return (kind == UInt) || (kind == Int); }
+ bool isSInt() const { return kind == Int; }
+ bool isUInt() const { return kind == UInt; }
+ bool equiv(const VFPRegister& other) const { return other.kind == kind; }
+ size_t size() const { return (kind == Double) ? 8 : 4; }
+ bool isInvalid() const;
+ bool isMissing() const;
+
+ VFPRegister doubleOverlay(unsigned int which = 0) const;
+ VFPRegister singleOverlay(unsigned int which = 0) const;
+ VFPRegister sintOverlay(unsigned int which = 0) const;
+ VFPRegister uintOverlay(unsigned int which = 0) const;
+
+ VFPRegister asSingle() const { return singleOverlay(); }
+ VFPRegister asDouble() const { return doubleOverlay(); }
+ VFPRegister asSimd128() const { MOZ_CRASH("NYI"); }
+
+ struct VFPRegIndexSplit;
+ VFPRegIndexSplit encode();
+
+ // For serializing values.
+ struct VFPRegIndexSplit {
+ const uint32_t block : 4;
+ const uint32_t bit : 1;
+
+ private:
+ friend VFPRegIndexSplit js::jit::VFPRegister::encode();
+
+ VFPRegIndexSplit(uint32_t block_, uint32_t bit_)
+ : block(block_), bit(bit_)
+ {
+ MOZ_ASSERT(block == block_);
+ MOZ_ASSERT(bit == bit_);
+ }
+ };
+
+ Code code() const {
+ MOZ_ASSERT(!_isInvalid && !_isMissing);
+ // This should only be used in areas where we only have doubles and
+ // singles.
+ MOZ_ASSERT(isFloat());
+ return Code(code_ | (kind << 5));
+ }
+ Encoding encoding() const {
+ MOZ_ASSERT(!_isInvalid && !_isMissing);
+ return Encoding(code_);
+ }
+ uint32_t id() const {
+ return code_;
+ }
+ static VFPRegister FromCode(uint32_t i) {
+ uint32_t code = i & 31;
+ uint32_t kind = i >> 5;
+ return VFPRegister(code, RegType(kind));
+ }
+ bool volatile_() const {
+ if (isDouble())
+ return !!((1 << (code_ >> 1)) & FloatRegisters::VolatileMask);
+ return !!((1 << code_) & FloatRegisters::VolatileMask);
+ }
+ const char* name() const {
+ if (isDouble())
+ return FloatRegisters::GetDoubleName(Encoding(code_));
+ return FloatRegisters::GetSingleName(Encoding(code_));
+ }
+ bool operator != (const VFPRegister& other) const {
+ return other.kind != kind || code_ != other.code_;
+ }
+ bool aliases(const VFPRegister& other) {
+ if (kind == other.kind)
+ return code_ == other.code_;
+ return doubleOverlay() == other.doubleOverlay();
+ }
+ static const int NumAliasedDoubles = 16;
+ uint32_t numAliased() const {
+ if (isDouble()) {
+ if (code_ < NumAliasedDoubles)
+ return 3;
+ return 1;
+ }
+ return 2;
+ }
+
+ // N.B. FloatRegister is an explicit outparam here because msvc-2010
+ // miscompiled it on win64 when the value was simply returned
+ void aliased(uint32_t aliasIdx, VFPRegister* ret) {
+ if (aliasIdx == 0) {
+ *ret = *this;
+ return;
+ }
+ if (isDouble()) {
+ MOZ_ASSERT(code_ < NumAliasedDoubles);
+ MOZ_ASSERT(aliasIdx <= 2);
+ *ret = singleOverlay(aliasIdx - 1);
+ return;
+ }
+ MOZ_ASSERT(aliasIdx == 1);
+ *ret = doubleOverlay(aliasIdx - 1);
+ }
+ uint32_t numAlignedAliased() const {
+ if (isDouble()) {
+ if (code_ < NumAliasedDoubles)
+ return 2;
+ return 1;
+ }
+ // s1 has 0 other aligned aliases, 1 total.
+ // s0 has 1 other aligned aliase, 2 total.
+ return 2 - (code_ & 1);
+ }
+ // | d0 |
+ // | s0 | s1 |
+ // If we've stored s0 and s1 in memory, we also want to say that d0 is
+ // stored there, but it is only stored at the location where it is aligned
+ // e.g. at s0, not s1.
+ void alignedAliased(uint32_t aliasIdx, VFPRegister* ret) {
+ if (aliasIdx == 0) {
+ *ret = *this;
+ return;
+ }
+ MOZ_ASSERT(aliasIdx == 1);
+ if (isDouble()) {
+ MOZ_ASSERT(code_ < NumAliasedDoubles);
+ *ret = singleOverlay(aliasIdx - 1);
+ return;
+ }
+ MOZ_ASSERT((code_ & 1) == 0);
+ *ret = doubleOverlay(aliasIdx - 1);
+ return;
+ }
+
+ typedef FloatRegisters::SetType SetType;
+
+ // This function is used to ensure that Register set can take all Single
+ // registers, even if we are taking a mix of either double or single
+ // registers.
+ //
+ // s0.alignedOrDominatedAliasedSet() == s0 | d0.
+ // s1.alignedOrDominatedAliasedSet() == s1.
+ // d0.alignedOrDominatedAliasedSet() == s0 | s1 | d0.
+ //
+ // This way the Allocator register set does not have to do any arithmetics
+ // to know if a register is available or not, as we have the following
+ // relations:
+ //
+ // d0.alignedOrDominatedAliasedSet() ==
+ // s0.alignedOrDominatedAliasedSet() | s1.alignedOrDominatedAliasedSet()
+ //
+ // s0.alignedOrDominatedAliasedSet() & s1.alignedOrDominatedAliasedSet() == 0
+ //
+ SetType alignedOrDominatedAliasedSet() const {
+ if (isSingle()) {
+ if (code_ % 2 != 0)
+ return SetType(1) << code_;
+ return (SetType(1) << code_) | (SetType(1) << (32 + code_ / 2));
+ }
+
+ MOZ_ASSERT(isDouble());
+ return (SetType(0b11) << (code_ * 2)) | (SetType(1) << (32 + code_));
+ }
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 8, "SetType must be 64 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static Code FromName(const char* name) {
+ return FloatRegisters::FromName(name);
+ }
+ static TypedRegisterSet<VFPRegister> ReduceSetForPush(const TypedRegisterSet<VFPRegister>& s);
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<VFPRegister>& s);
+ uint32_t getRegisterDumpOffsetInBytes();
+ static uint32_t FirstBit(SetType x) {
+ return mozilla::CountTrailingZeroes64(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 63 - mozilla::CountLeadingZeroes64(x);
+ }
+
+};
+
+// The only floating point register set that we work with are the VFP Registers.
+typedef VFPRegister FloatRegister;
+
+uint32_t GetARMFlags();
+bool HasARMv7();
+bool HasMOVWT();
+bool HasLDSTREXBHD(); // {LD,ST}REX{B,H,D}
+bool HasDMBDSBISB(); // DMB, DSB, and ISB
+bool HasVFPv3();
+bool HasVFP();
+bool Has32DP();
+bool HasIDIV();
+
+extern volatile uint32_t armHwCapFlags;
+
+// Not part of the HWCAP flag, but we need to know these and these bits are not
+// used. Define these here so that their use can be inlined by the simulator.
+
+// A bit to flag when signaled alignment faults are to be fixed up.
+#define HWCAP_FIXUP_FAULT (1 << 24)
+
+// A bit to flag when the flags are uninitialized, so they can be atomically set.
+#define HWCAP_UNINITIALIZED (1 << 25)
+
+// A bit to flag when alignment faults are enabled and signal.
+#define HWCAP_ALIGNMENT_FAULT (1 << 26)
+
+// A bit to flag the use of the hardfp ABI.
+#define HWCAP_USE_HARDFP_ABI (1 << 27)
+
+// A bit to flag the use of the ARMv7 arch, otherwise ARMv6.
+#define HWCAP_ARMv7 (1 << 28)
+
+// Top three bits are reserved, do not use them.
+
+// Returns true when cpu alignment faults are enabled and signaled, and thus we
+// should ensure loads and stores are aligned.
+inline bool HasAlignmentFault()
+{
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_ALIGNMENT_FAULT;
+}
+
+#ifdef JS_SIMULATOR_ARM
+// Returns true when cpu alignment faults will be fixed up by the
+// "operating system", which functionality we will emulate.
+inline bool FixupFault()
+{
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_FIXUP_FAULT;
+}
+#endif
+
+// Arm/D32 has double registers that can NOT be treated as float32 and this
+// requires some dances in lowering.
+inline bool
+hasUnaliasedDouble()
+{
+ return Has32DP();
+}
+
+// On ARM, Dn aliases both S2n and S2n+1, so if you need to convert a float32 to
+// a double as a temporary, you need a temporary double register.
+inline bool
+hasMultiAlias()
+{
+ return true;
+}
+
+bool ParseARMHwCapFlags(const char* armHwCap);
+void InitARMFlags();
+uint32_t GetARMFlags();
+
+// If the simulator is used then the ABI choice is dynamic. Otherwise the ABI is
+// static and useHardFpABI is inlined so that unused branches can be optimized
+// away.
+#ifdef JS_SIMULATOR_ARM
+bool UseHardFpABI();
+#else
+static inline bool UseHardFpABI()
+{
+#if defined(JS_CODEGEN_ARM_HARDFP)
+ return true;
+#else
+ return false;
+#endif
+}
+#endif
+
+bool ForceDoubleCacheFlush();
+
+// In order to handle SoftFp ABI calls, we need to be able to express that we
+// have ABIArg which are represented by pair of general purpose registers.
+#define JS_CODEGEN_REGISTER_PAIR 1
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_Architecture_arm_h */
diff --git a/js/src/jit/arm/Assembler-arm.cpp b/js/src/jit/arm/Assembler-arm.cpp
new file mode 100644
index 000000000..2830f0695
--- /dev/null
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -0,0 +1,3442 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm/Assembler-arm.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jscompartment.h"
+#ifdef JS_DISASM_ARM
+#include "jsprf.h"
+#endif
+#include "jsutil.h"
+
+#include "gc/Marking.h"
+#include "jit/arm/disasm/Disasm-arm.h"
+#include "jit/arm/MacroAssembler-arm.h"
+#include "jit/ExecutableAllocator.h"
+#include "jit/JitCompartment.h"
+#include "jit/MacroAssembler.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::CountLeadingZeroes32;
+
+void dbg_break() {}
+
+// The ABIArgGenerator is used for making system ABI calls and for inter-wasm
+// calls. The system ABI can either be SoftFp or HardFp, and inter-wasm calls
+// are always HardFp calls. The initialization defaults to HardFp, and the ABI
+// choice is made before any system ABI calls with the method "setUseHardFp".
+ABIArgGenerator::ABIArgGenerator()
+ : intRegIndex_(0),
+ floatRegIndex_(0),
+ stackOffset_(0),
+ current_(),
+ useHardFp_(true)
+{ }
+
+// See the "Parameter Passing" section of the "Procedure Call Standard for the
+// ARM Architecture" documentation.
+ABIArg
+ABIArgGenerator::softNext(MIRType type)
+{
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Pointer:
+ if (intRegIndex_ == NumIntArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint32_t);
+ break;
+ }
+ current_ = ABIArg(Register::FromCode(intRegIndex_));
+ intRegIndex_++;
+ break;
+ case MIRType::Int64:
+ // Make sure to use an even register index. Increase to next even number
+ // when odd.
+ intRegIndex_ = (intRegIndex_ + 1) & ~1;
+ if (intRegIndex_ == NumIntArgRegs) {
+ // Align the stack on 8 bytes.
+ static const uint32_t align = sizeof(uint64_t) - 1;
+ stackOffset_ = (stackOffset_ + align) & ~align;
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ break;
+ }
+ current_ = ABIArg(Register::FromCode(intRegIndex_), Register::FromCode(intRegIndex_ + 1));
+ intRegIndex_ += 2;
+ break;
+ case MIRType::Float32:
+ if (intRegIndex_ == NumIntArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint32_t);
+ break;
+ }
+ current_ = ABIArg(Register::FromCode(intRegIndex_));
+ intRegIndex_++;
+ break;
+ case MIRType::Double:
+ // Make sure to use an even register index. Increase to next even number
+ // when odd.
+ intRegIndex_ = (intRegIndex_ + 1) & ~1;
+ if (intRegIndex_ == NumIntArgRegs) {
+ // Align the stack on 8 bytes.
+ static const uint32_t align = sizeof(double) - 1;
+ stackOffset_ = (stackOffset_ + align) & ~align;
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(double);
+ break;
+ }
+ current_ = ABIArg(Register::FromCode(intRegIndex_), Register::FromCode(intRegIndex_ + 1));
+ intRegIndex_ += 2;
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+
+ return current_;
+}
+
+ABIArg
+ABIArgGenerator::hardNext(MIRType type)
+{
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Pointer:
+ if (intRegIndex_ == NumIntArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint32_t);
+ break;
+ }
+ current_ = ABIArg(Register::FromCode(intRegIndex_));
+ intRegIndex_++;
+ break;
+ case MIRType::Int64:
+ // Make sure to use an even register index. Increase to next even number
+ // when odd.
+ intRegIndex_ = (intRegIndex_ + 1) & ~1;
+ if (intRegIndex_ == NumIntArgRegs) {
+ // Align the stack on 8 bytes.
+ static const uint32_t align = sizeof(uint64_t) - 1;
+ stackOffset_ = (stackOffset_ + align) & ~align;
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ break;
+ }
+ current_ = ABIArg(Register::FromCode(intRegIndex_), Register::FromCode(intRegIndex_ + 1));
+ intRegIndex_ += 2;
+ break;
+ case MIRType::Float32:
+ if (floatRegIndex_ == NumFloatArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint32_t);
+ break;
+ }
+ current_ = ABIArg(VFPRegister(floatRegIndex_, VFPRegister::Single));
+ floatRegIndex_++;
+ break;
+ case MIRType::Double:
+ // Double register are composed of 2 float registers, thus we have to
+ // skip any float register which cannot be used in a pair of float
+ // registers in which a double value can be stored.
+ floatRegIndex_ = (floatRegIndex_ + 1) & ~1;
+ if (floatRegIndex_ == NumFloatArgRegs) {
+ static const uint32_t align = sizeof(double) - 1;
+ stackOffset_ = (stackOffset_ + align) & ~align;
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ break;
+ }
+ current_ = ABIArg(VFPRegister(floatRegIndex_ >> 1, VFPRegister::Double));
+ floatRegIndex_ += 2;
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+
+ return current_;
+}
+
+ABIArg
+ABIArgGenerator::next(MIRType type)
+{
+ if (useHardFp_)
+ return hardNext(type);
+ return softNext(type);
+}
+
+// Encode a standard register when it is being used as src1, the dest, and an
+// extra register. These should never be called with an InvalidReg.
+uint32_t
+js::jit::RT(Register r)
+{
+ MOZ_ASSERT((r.code() & ~0xf) == 0);
+ return r.code() << 12;
+}
+
+uint32_t
+js::jit::RN(Register r)
+{
+ MOZ_ASSERT((r.code() & ~0xf) == 0);
+ return r.code() << 16;
+}
+
+uint32_t
+js::jit::RD(Register r)
+{
+ MOZ_ASSERT((r.code() & ~0xf) == 0);
+ return r.code() << 12;
+}
+
+uint32_t
+js::jit::RM(Register r)
+{
+ MOZ_ASSERT((r.code() & ~0xf) == 0);
+ return r.code() << 8;
+}
+
+// Encode a standard register when it is being used as src1, the dest, and an
+// extra register. For these, an InvalidReg is used to indicate a optional
+// register that has been omitted.
+uint32_t
+js::jit::maybeRT(Register r)
+{
+ if (r == InvalidReg)
+ return 0;
+
+ MOZ_ASSERT((r.code() & ~0xf) == 0);
+ return r.code() << 12;
+}
+
+uint32_t
+js::jit::maybeRN(Register r)
+{
+ if (r == InvalidReg)
+ return 0;
+
+ MOZ_ASSERT((r.code() & ~0xf) == 0);
+ return r.code() << 16;
+}
+
+uint32_t
+js::jit::maybeRD(Register r)
+{
+ if (r == InvalidReg)
+ return 0;
+
+ MOZ_ASSERT((r.code() & ~0xf) == 0);
+ return r.code() << 12;
+}
+
+Register
+js::jit::toRD(Instruction i)
+{
+ return Register::FromCode((i.encode() >> 12) & 0xf);
+}
+Register
+js::jit::toR(Instruction i)
+{
+ return Register::FromCode(i.encode() & 0xf);
+}
+
+Register
+js::jit::toRM(Instruction i)
+{
+ return Register::FromCode((i.encode() >> 8) & 0xf);
+}
+
+Register
+js::jit::toRN(Instruction i)
+{
+ return Register::FromCode((i.encode() >> 16) & 0xf);
+}
+
+uint32_t
+js::jit::VD(VFPRegister vr)
+{
+ if (vr.isMissing())
+ return 0;
+
+ // Bits 15,14,13,12, 22.
+ VFPRegister::VFPRegIndexSplit s = vr.encode();
+ return s.bit << 22 | s.block << 12;
+}
+uint32_t
+js::jit::VN(VFPRegister vr)
+{
+ if (vr.isMissing())
+ return 0;
+
+ // Bits 19,18,17,16, 7.
+ VFPRegister::VFPRegIndexSplit s = vr.encode();
+ return s.bit << 7 | s.block << 16;
+}
+uint32_t
+js::jit::VM(VFPRegister vr)
+{
+ if (vr.isMissing())
+ return 0;
+
+ // Bits 5, 3,2,1,0.
+ VFPRegister::VFPRegIndexSplit s = vr.encode();
+ return s.bit << 5 | s.block;
+}
+
+VFPRegister::VFPRegIndexSplit
+jit::VFPRegister::encode()
+{
+ MOZ_ASSERT(!_isInvalid);
+
+ switch (kind) {
+ case Double:
+ return VFPRegIndexSplit(code_ & 0xf, code_ >> 4);
+ case Single:
+ return VFPRegIndexSplit(code_ >> 1, code_ & 1);
+ default:
+ // VFP register treated as an integer, NOT a gpr.
+ return VFPRegIndexSplit(code_ >> 1, code_ & 1);
+ }
+}
+
+bool
+InstDTR::IsTHIS(const Instruction& i)
+{
+ return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
+}
+
+InstDTR*
+InstDTR::AsTHIS(const Instruction& i)
+{
+ if (IsTHIS(i))
+ return (InstDTR*)&i;
+ return nullptr;
+}
+
+bool
+InstLDR::IsTHIS(const Instruction& i)
+{
+ return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
+}
+
+InstLDR*
+InstLDR::AsTHIS(const Instruction& i)
+{
+ if (IsTHIS(i))
+ return (InstLDR*)&i;
+ return nullptr;
+}
+
+InstNOP*
+InstNOP::AsTHIS(Instruction& i)
+{
+ if (IsTHIS(i))
+ return (InstNOP*)&i;
+ return nullptr;
+}
+
+bool
+InstNOP::IsTHIS(const Instruction& i)
+{
+ return (i.encode() & 0x0fffffff) == NopInst;
+}
+
+bool
+InstBranchReg::IsTHIS(const Instruction& i)
+{
+ return InstBXReg::IsTHIS(i) || InstBLXReg::IsTHIS(i);
+}
+
+InstBranchReg*
+InstBranchReg::AsTHIS(const Instruction& i)
+{
+ if (IsTHIS(i))
+ return (InstBranchReg*)&i;
+ return nullptr;
+}
+void
+InstBranchReg::extractDest(Register* dest)
+{
+ *dest = toR(*this);
+}
+bool
+InstBranchReg::checkDest(Register dest)
+{
+ return dest == toR(*this);
+}
+
+bool
+InstBranchImm::IsTHIS(const Instruction& i)
+{
+ return InstBImm::IsTHIS(i) || InstBLImm::IsTHIS(i);
+}
+
+InstBranchImm*
+InstBranchImm::AsTHIS(const Instruction& i)
+{
+ if (IsTHIS(i))
+ return (InstBranchImm*)&i;
+ return nullptr;
+}
+
+void
+InstBranchImm::extractImm(BOffImm* dest)
+{
+ *dest = BOffImm(*this);
+}
+
+bool
+InstBXReg::IsTHIS(const Instruction& i)
+{
+ return (i.encode() & IsBRegMask) == IsBX;
+}
+
+InstBXReg*
+InstBXReg::AsTHIS(const Instruction& i)
+{
+ if (IsTHIS(i))
+ return (InstBXReg*)&i;
+ return nullptr;
+}
+
+bool
+InstBLXReg::IsTHIS(const Instruction& i)
+{
+ return (i.encode() & IsBRegMask) == IsBLX;
+
+}
+InstBLXReg*
+InstBLXReg::AsTHIS(const Instruction& i)
+{
+ if (IsTHIS(i))
+ return (InstBLXReg*)&i;
+ return nullptr;
+}
+
+bool
+InstBImm::IsTHIS(const Instruction& i)
+{
+ return (i.encode () & IsBImmMask) == IsB;
+}
+InstBImm*
+InstBImm::AsTHIS(const Instruction& i)
+{
+ if (IsTHIS(i))
+ return (InstBImm*)&i;
+ return nullptr;
+}
+
+bool
+InstBLImm::IsTHIS(const Instruction& i)
+{
+ return (i.encode () & IsBImmMask) == IsBL;
+
+}
+InstBLImm*
+InstBLImm::AsTHIS(const Instruction& i)
+{
+ if (IsTHIS(i))
+ return (InstBLImm*)&i;
+ return nullptr;
+}
+
+bool
+InstMovWT::IsTHIS(Instruction& i)
+{
+ return InstMovW::IsTHIS(i) || InstMovT::IsTHIS(i);
+}
+InstMovWT*
+InstMovWT::AsTHIS(Instruction& i)
+{
+ if (IsTHIS(i))
+ return (InstMovWT*)&i;
+ return nullptr;
+}
+
+void
+InstMovWT::extractImm(Imm16* imm)
+{
+ *imm = Imm16(*this);
+}
+bool
+InstMovWT::checkImm(Imm16 imm)
+{
+ return imm.decode() == Imm16(*this).decode();
+}
+
+void
+InstMovWT::extractDest(Register* dest)
+{
+ *dest = toRD(*this);
+}
+bool
+InstMovWT::checkDest(Register dest)
+{
+ return dest == toRD(*this);
+}
+
+bool
+InstMovW::IsTHIS(const Instruction& i)
+{
+ return (i.encode() & IsWTMask) == IsW;
+}
+
+InstMovW*
+InstMovW::AsTHIS(const Instruction& i)
+{
+ if (IsTHIS(i))
+ return (InstMovW*)&i;
+ return nullptr;
+}
+InstMovT*
+InstMovT::AsTHIS(const Instruction& i)
+{
+ if (IsTHIS(i))
+ return (InstMovT*)&i;
+ return nullptr;
+}
+
+bool
+InstMovT::IsTHIS(const Instruction& i)
+{
+ return (i.encode() & IsWTMask) == IsT;
+}
+
+InstALU*
+InstALU::AsTHIS(const Instruction& i)
+{
+ if (IsTHIS(i))
+ return (InstALU*)&i;
+ return nullptr;
+}
+bool
+InstALU::IsTHIS(const Instruction& i)
+{
+ return (i.encode() & ALUMask) == 0;
+}
+void
+InstALU::extractOp(ALUOp* ret)
+{
+ *ret = ALUOp(encode() & (0xf << 21));
+}
+bool
+InstALU::checkOp(ALUOp op)
+{
+ ALUOp mine;
+ extractOp(&mine);
+ return mine == op;
+}
+void
+InstALU::extractDest(Register* ret)
+{
+ *ret = toRD(*this);
+}
+bool
+InstALU::checkDest(Register rd)
+{
+ return rd == toRD(*this);
+}
+void
+InstALU::extractOp1(Register* ret)
+{
+ *ret = toRN(*this);
+}
+bool
+InstALU::checkOp1(Register rn)
+{
+ return rn == toRN(*this);
+}
+Operand2
+InstALU::extractOp2()
+{
+ return Operand2(encode());
+}
+
+InstCMP*
+InstCMP::AsTHIS(const Instruction& i)
+{
+ if (IsTHIS(i))
+ return (InstCMP*)&i;
+ return nullptr;
+}
+
+bool
+InstCMP::IsTHIS(const Instruction& i)
+{
+ return InstALU::IsTHIS(i) && InstALU::AsTHIS(i)->checkDest(r0) && InstALU::AsTHIS(i)->checkOp(OpCmp);
+}
+
+InstMOV*
+InstMOV::AsTHIS(const Instruction& i)
+{
+ if (IsTHIS(i))
+ return (InstMOV*)&i;
+ return nullptr;
+}
+
+bool
+InstMOV::IsTHIS(const Instruction& i)
+{
+ return InstALU::IsTHIS(i) && InstALU::AsTHIS(i)->checkOp1(r0) && InstALU::AsTHIS(i)->checkOp(OpMov);
+}
+
+Op2Reg
+Operand2::toOp2Reg() const {
+ return *(Op2Reg*)this;
+}
+
+Imm16::Imm16(Instruction& inst)
+ : lower_(inst.encode() & 0xfff),
+ upper_(inst.encode() >> 16),
+ invalid_(0xfff)
+{ }
+
+Imm16::Imm16(uint32_t imm)
+ : lower_(imm & 0xfff), pad_(0),
+ upper_((imm >> 12) & 0xf),
+ invalid_(0)
+{
+ MOZ_ASSERT(decode() == imm);
+}
+
+Imm16::Imm16()
+ : invalid_(0xfff)
+{ }
+
+void
+jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
+{
+ // We need to determine if this jump can fit into the standard 24+2 bit
+ // address or if we need a larger branch (or just need to use our pool
+ // entry).
+ Instruction* jump = (Instruction*)jump_.raw();
+ // jumpWithPatch() returns the offset of the jump and never a pool or nop.
+ Assembler::Condition c = jump->extractCond();
+ MOZ_ASSERT(jump->is<InstBranchImm>() || jump->is<InstLDR>());
+
+ int jumpOffset = label.raw() - jump_.raw();
+ if (BOffImm::IsInRange(jumpOffset)) {
+ // This instruction started off as a branch, and will remain one.
+ MaybeAutoWritableJitCode awjc(jump, sizeof(Instruction), reprotect);
+ Assembler::RetargetNearBranch(jump, jumpOffset, c);
+ } else {
+ // This instruction started off as a branch, but now needs to be demoted
+ // to an ldr.
+ uint8_t** slot = reinterpret_cast<uint8_t**>(jump_.jumpTableEntry());
+
+ // Ensure both the branch and the slot are writable.
+ MOZ_ASSERT(uintptr_t(slot) > uintptr_t(jump));
+ size_t size = uintptr_t(slot) - uintptr_t(jump) + sizeof(void*);
+ MaybeAutoWritableJitCode awjc(jump, size, reprotect);
+
+ Assembler::RetargetFarBranch(jump, slot, label.raw(), c);
+ }
+}
+
+void
+Assembler::finish()
+{
+ flush();
+ MOZ_ASSERT(!isFinished);
+ isFinished = true;
+}
+
+bool
+Assembler::asmMergeWith(Assembler& other)
+{
+ flush();
+ other.flush();
+ if (other.oom())
+ return false;
+ if (!AssemblerShared::asmMergeWith(size(), other))
+ return false;
+ return m_buffer.appendBuffer(other.m_buffer);
+}
+
+void
+Assembler::executableCopy(uint8_t* buffer)
+{
+ MOZ_ASSERT(isFinished);
+ m_buffer.executableCopy(buffer);
+ AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
+}
+
+uint32_t
+Assembler::actualIndex(uint32_t idx_) const
+{
+ ARMBuffer::PoolEntry pe(idx_);
+ return m_buffer.poolEntryOffset(pe);
+}
+
+uint8_t*
+Assembler::PatchableJumpAddress(JitCode* code, uint32_t pe_)
+{
+ return code->raw() + pe_;
+}
+
+class RelocationIterator
+{
+ CompactBufferReader reader_;
+ // Offset in bytes.
+ uint32_t offset_;
+
+ public:
+ RelocationIterator(CompactBufferReader& reader)
+ : reader_(reader)
+ { }
+
+ bool read() {
+ if (!reader_.more())
+ return false;
+ offset_ = reader_.readUnsigned();
+ return true;
+ }
+
+ uint32_t offset() const {
+ return offset_;
+ }
+};
+
+template<class Iter>
+const uint32_t*
+Assembler::GetCF32Target(Iter* iter)
+{
+ Instruction* inst1 = iter->cur();
+
+ if (inst1->is<InstBranchImm>()) {
+ // See if we have a simple case, b #offset.
+ BOffImm imm;
+ InstBranchImm* jumpB = inst1->as<InstBranchImm>();
+ jumpB->extractImm(&imm);
+ return imm.getDest(inst1)->raw();
+ }
+
+ if (inst1->is<InstMovW>())
+ {
+ // See if we have the complex case:
+ // movw r_temp, #imm1
+ // movt r_temp, #imm2
+ // bx r_temp
+ // OR
+ // movw r_temp, #imm1
+ // movt r_temp, #imm2
+ // str pc, [sp]
+ // bx r_temp
+
+ Imm16 targ_bot;
+ Imm16 targ_top;
+ Register temp;
+
+ // Extract both the temp register and the bottom immediate.
+ InstMovW* bottom = inst1->as<InstMovW>();
+ bottom->extractImm(&targ_bot);
+ bottom->extractDest(&temp);
+
+ // Extract the top part of the immediate.
+ Instruction* inst2 = iter->next();
+ MOZ_ASSERT(inst2->is<InstMovT>());
+ InstMovT* top = inst2->as<InstMovT>();
+ top->extractImm(&targ_top);
+
+ // Make sure they are being loaded into the same register.
+ MOZ_ASSERT(top->checkDest(temp));
+
+ // Make sure we're branching to the same register.
+#ifdef DEBUG
+ // A toggled call sometimes has a NOP instead of a branch for the third
+ // instruction. No way to assert that it's valid in that situation.
+ Instruction* inst3 = iter->next();
+ if (!inst3->is<InstNOP>()) {
+ InstBranchReg* realBranch = nullptr;
+ if (inst3->is<InstBranchReg>()) {
+ realBranch = inst3->as<InstBranchReg>();
+ } else {
+ Instruction* inst4 = iter->next();
+ realBranch = inst4->as<InstBranchReg>();
+ }
+ MOZ_ASSERT(realBranch->checkDest(temp));
+ }
+#endif
+
+ uint32_t* dest = (uint32_t*) (targ_bot.decode() | (targ_top.decode() << 16));
+ return dest;
+ }
+
+ if (inst1->is<InstLDR>())
+ return *(uint32_t**) inst1->as<InstLDR>()->dest();
+
+ MOZ_CRASH("unsupported branch relocation");
+}
+
+uintptr_t
+Assembler::GetPointer(uint8_t* instPtr)
+{
+ InstructionIterator iter((Instruction*)instPtr);
+ uintptr_t ret = (uintptr_t)GetPtr32Target(&iter, nullptr, nullptr);
+ return ret;
+}
+
+template<class Iter>
+const uint32_t*
+Assembler::GetPtr32Target(Iter* start, Register* dest, RelocStyle* style)
+{
+ Instruction* load1 = start->cur();
+ Instruction* load2 = start->next();
+
+ if (load1->is<InstMovW>() && load2->is<InstMovT>()) {
+ if (style)
+ *style = L_MOVWT;
+
+ // See if we have the complex case:
+ // movw r_temp, #imm1
+ // movt r_temp, #imm2
+
+ Imm16 targ_bot;
+ Imm16 targ_top;
+ Register temp;
+
+ // Extract both the temp register and the bottom immediate.
+ InstMovW* bottom = load1->as<InstMovW>();
+ bottom->extractImm(&targ_bot);
+ bottom->extractDest(&temp);
+
+ // Extract the top part of the immediate.
+ InstMovT* top = load2->as<InstMovT>();
+ top->extractImm(&targ_top);
+
+ // Make sure they are being loaded into the same register.
+ MOZ_ASSERT(top->checkDest(temp));
+
+ if (dest)
+ *dest = temp;
+
+ uint32_t* value = (uint32_t*) (targ_bot.decode() | (targ_top.decode() << 16));
+ return value;
+ }
+
+ if (load1->is<InstLDR>()) {
+ if (style)
+ *style = L_LDR;
+ if (dest)
+ *dest = toRD(*load1);
+ return *(uint32_t**) load1->as<InstLDR>()->dest();
+ }
+
+ MOZ_CRASH("unsupported relocation");
+}
+
+static JitCode*
+CodeFromJump(InstructionIterator* jump)
+{
+ uint8_t* target = (uint8_t*)Assembler::GetCF32Target(jump);
+ return JitCode::FromExecutable(target);
+}
+
+void
+Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
+{
+ RelocationIterator iter(reader);
+ while (iter.read()) {
+ InstructionIterator institer((Instruction*) (code->raw() + iter.offset()));
+ JitCode* child = CodeFromJump(&institer);
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ }
+}
+
+template <class Iter>
+static void
+TraceOneDataRelocation(JSTracer* trc, Iter* iter)
+{
+ Instruction* ins = iter->cur();
+ Register dest;
+ Assembler::RelocStyle rs;
+ const void* prior = Assembler::GetPtr32Target(iter, &dest, &rs);
+ void* ptr = const_cast<void*>(prior);
+
+ // No barrier needed since these are constants.
+ TraceManuallyBarrieredGenericPointerEdge(trc, reinterpret_cast<gc::Cell**>(&ptr),
+ "ion-masm-ptr");
+
+ if (ptr != prior) {
+ MacroAssemblerARM::ma_mov_patch(Imm32(int32_t(ptr)), dest, Assembler::Always, rs, ins);
+
+ // L_LDR won't cause any instructions to be updated.
+ if (rs != Assembler::L_LDR) {
+ AutoFlushICache::flush(uintptr_t(ins), 4);
+ AutoFlushICache::flush(uintptr_t(ins->next()), 4);
+ }
+ }
+}
+
+static void
+TraceDataRelocations(JSTracer* trc, uint8_t* buffer, CompactBufferReader& reader)
+{
+ while (reader.more()) {
+ size_t offset = reader.readUnsigned();
+ InstructionIterator iter((Instruction*)(buffer + offset));
+ TraceOneDataRelocation(trc, &iter);
+ }
+}
+
+static void
+TraceDataRelocations(JSTracer* trc, ARMBuffer* buffer, CompactBufferReader& reader)
+{
+ while (reader.more()) {
+ BufferOffset offset(reader.readUnsigned());
+ ARMBuffer::AssemblerBufferInstIterator iter(offset, buffer);
+ TraceOneDataRelocation(trc, &iter);
+ }
+}
+
+void
+Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
+{
+ ::TraceDataRelocations(trc, code->raw(), reader);
+}
+
+void
+Assembler::copyJumpRelocationTable(uint8_t* dest)
+{
+ if (jumpRelocations_.length())
+ memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
+}
+
+void
+Assembler::copyDataRelocationTable(uint8_t* dest)
+{
+ if (dataRelocations_.length())
+ memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
+}
+
+void
+Assembler::copyPreBarrierTable(uint8_t* dest)
+{
+ if (preBarriers_.length())
+ memcpy(dest, preBarriers_.buffer(), preBarriers_.length());
+}
+
+void
+Assembler::trace(JSTracer* trc)
+{
+ for (size_t i = 0; i < jumps_.length(); i++) {
+ RelativePatch& rp = jumps_[i];
+ if (rp.kind() == Relocation::JITCODE) {
+ JitCode* code = JitCode::FromExecutable((uint8_t*)rp.target());
+ TraceManuallyBarrieredEdge(trc, &code, "masmrel32");
+ MOZ_ASSERT(code == JitCode::FromExecutable((uint8_t*)rp.target()));
+ }
+ }
+
+ if (dataRelocations_.length()) {
+ CompactBufferReader reader(dataRelocations_);
+ ::TraceDataRelocations(trc, &m_buffer, reader);
+ }
+}
+
+void
+Assembler::processCodeLabels(uint8_t* rawCode)
+{
+ for (size_t i = 0; i < codeLabels_.length(); i++) {
+ CodeLabel label = codeLabels_[i];
+ Bind(rawCode, label.patchAt(), rawCode + label.target()->offset());
+ }
+}
+
+void
+Assembler::writeCodePointer(CodeOffset* label) {
+ BufferOffset off = writeInst(LabelBase::INVALID_OFFSET);
+ label->bind(off.getOffset());
+}
+
+void
+Assembler::Bind(uint8_t* rawCode, CodeOffset* label, const void* address)
+{
+ *reinterpret_cast<const void**>(rawCode + label->offset()) = address;
+}
+
+Assembler::Condition
+Assembler::InvertCondition(Condition cond)
+{
+ const uint32_t ConditionInversionBit = 0x10000000;
+ return Condition(ConditionInversionBit ^ cond);
+}
+
+Assembler::Condition
+Assembler::UnsignedCondition(Condition cond)
+{
+ switch (cond) {
+ case Zero:
+ case NonZero:
+ return cond;
+ case LessThan:
+ case Below:
+ return Below;
+ case LessThanOrEqual:
+ case BelowOrEqual:
+ return BelowOrEqual;
+ case GreaterThan:
+ case Above:
+ return Above;
+ case AboveOrEqual:
+ case GreaterThanOrEqual:
+ return AboveOrEqual;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+Assembler::Condition
+Assembler::ConditionWithoutEqual(Condition cond)
+{
+ switch (cond) {
+ case LessThan:
+ case LessThanOrEqual:
+ return LessThan;
+ case Below:
+ case BelowOrEqual:
+ return Below;
+ case GreaterThan:
+ case GreaterThanOrEqual:
+ return GreaterThan;
+ case Above:
+ case AboveOrEqual:
+ return Above;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+Imm8::TwoImm8mData
+Imm8::EncodeTwoImms(uint32_t imm)
+{
+ // In the ideal case, we are looking for a number that (in binary) looks
+ // like:
+ // 0b((00)*)n_1((00)*)n_2((00)*)
+ // left n1 mid n2
+ // where both n_1 and n_2 fit into 8 bits.
+ // Since this is being done with rotates, we also need to handle the case
+ // that one of these numbers is in fact split between the left and right
+ // sides, in which case the constant will look like:
+ // 0bn_1a((00)*)n_2((00)*)n_1b
+ // n1a mid n2 rgh n1b
+ // Also remember, values are rotated by multiples of two, and left, mid or
+ // right can have length zero.
+ uint32_t imm1, imm2;
+ int left = CountLeadingZeroes32(imm) & 0x1E;
+ uint32_t no_n1 = imm & ~(0xff << (24 - left));
+
+ // Not technically needed: this case only happens if we can encode as a
+ // single imm8m. There is a perfectly reasonable encoding in this case, but
+ // we shouldn't encourage people to do things like this.
+ if (no_n1 == 0)
+ return TwoImm8mData();
+
+ int mid = CountLeadingZeroes32(no_n1) & 0x1E;
+ uint32_t no_n2 = no_n1 & ~((0xff << ((24 - mid) & 0x1f)) | 0xff >> ((8 + mid) & 0x1f));
+
+ if (no_n2 == 0) {
+ // We hit the easy case, no wraparound.
+ // Note: a single constant *may* look like this.
+ int imm1shift = left + 8;
+ int imm2shift = mid + 8;
+ imm1 = (imm >> (32 - imm1shift)) & 0xff;
+ if (imm2shift >= 32) {
+ imm2shift = 0;
+ // This assert does not always hold, in fact, this would lead to
+ // some incredibly subtle bugs.
+ // assert((imm & 0xff) == no_n1);
+ imm2 = no_n1;
+ } else {
+ imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
+ MOZ_ASSERT( ((no_n1 >> (32 - imm2shift)) | (no_n1 << imm2shift)) ==
+ imm2);
+ }
+ MOZ_ASSERT((imm1shift & 0x1) == 0);
+ MOZ_ASSERT((imm2shift & 0x1) == 0);
+ return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
+ datastore::Imm8mData(imm2, imm2shift >> 1));
+ }
+
+ // Either it wraps, or it does not fit. If we initially chopped off more
+ // than 8 bits, then it won't fit.
+ if (left >= 8)
+ return TwoImm8mData();
+
+ int right = 32 - (CountLeadingZeroes32(no_n2) & 30);
+ // All remaining set bits *must* fit into the lower 8 bits.
+ // The right == 8 case should be handled by the previous case.
+ if (right > 8)
+ return TwoImm8mData();
+
+ // Make sure the initial bits that we removed for no_n1 fit into the
+ // 8-(32-right) leftmost bits.
+ if (((imm & (0xff << (24 - left))) << (8 - right)) != 0) {
+ // BUT we may have removed more bits than we needed to for no_n1
+ // 0x04104001 e.g. we can encode 0x104 with a single op, then 0x04000001
+ // with a second, but we try to encode 0x0410000 and find that we need a
+ // second op for 0x4000, and 0x1 cannot be included in the encoding of
+ // 0x04100000.
+ no_n1 = imm & ~((0xff >> (8 - right)) | (0xff << (24 + right)));
+ mid = CountLeadingZeroes32(no_n1) & 30;
+ no_n2 = no_n1 & ~((0xff << ((24 - mid)&31)) | 0xff >> ((8 + mid)&31));
+ if (no_n2 != 0)
+ return TwoImm8mData();
+ }
+
+ // Now assemble all of this information into a two coherent constants it is
+ // a rotate right from the lower 8 bits.
+ int imm1shift = 8 - right;
+ imm1 = 0xff & ((imm << imm1shift) | (imm >> (32 - imm1shift)));
+ MOZ_ASSERT((imm1shift & ~0x1e) == 0);
+ // left + 8 + mid is the position of the leftmost bit of n_2.
+ // We needed to rotate 0x000000ab right by 8 in order to get 0xab000000,
+ // then shift again by the leftmost bit in order to get the constant that we
+ // care about.
+ int imm2shift = mid + 8;
+ imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
+ MOZ_ASSERT((imm1shift & 0x1) == 0);
+ MOZ_ASSERT((imm2shift & 0x1) == 0);
+ return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
+ datastore::Imm8mData(imm2, imm2shift >> 1));
+}
+
+ALUOp
+jit::ALUNeg(ALUOp op, Register dest, Register scratch, Imm32* imm, Register* negDest)
+{
+ // Find an alternate ALUOp to get the job done, and use a different imm.
+ *negDest = dest;
+ switch (op) {
+ case OpMov:
+ *imm = Imm32(~imm->value);
+ return OpMvn;
+ case OpMvn:
+ *imm = Imm32(~imm->value);
+ return OpMov;
+ case OpAnd:
+ *imm = Imm32(~imm->value);
+ return OpBic;
+ case OpBic:
+ *imm = Imm32(~imm->value);
+ return OpAnd;
+ case OpAdd:
+ *imm = Imm32(-imm->value);
+ return OpSub;
+ case OpSub:
+ *imm = Imm32(-imm->value);
+ return OpAdd;
+ case OpCmp:
+ *imm = Imm32(-imm->value);
+ return OpCmn;
+ case OpCmn:
+ *imm = Imm32(-imm->value);
+ return OpCmp;
+ case OpTst:
+ MOZ_ASSERT(dest == InvalidReg);
+ *imm = Imm32(~imm->value);
+ *negDest = scratch;
+ return OpBic;
+ // orr has orn on thumb2 only.
+ default:
+ return OpInvalid;
+ }
+}
+
+bool
+jit::can_dbl(ALUOp op)
+{
+ // Some instructions can't be processed as two separate instructions such as
+ // and, and possibly add (when we're setting ccodes). There is also some
+ // hilarity with *reading* condition codes. For example, adc dest, src1,
+ // 0xfff; (add with carry) can be split up into adc dest, src1, 0xf00; add
+ // dest, dest, 0xff, since "reading" the condition code increments the
+ // result by one conditionally, that only needs to be done on one of the two
+ // instructions.
+ switch (op) {
+ case OpBic:
+ case OpAdd:
+ case OpSub:
+ case OpEor:
+ case OpOrr:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool
+jit::condsAreSafe(ALUOp op) {
+ // Even when we are setting condition codes, sometimes we can get away with
+ // splitting an operation into two. For example, if our immediate is
+ // 0x00ff00ff, and the operation is eors we can split this in half, since x
+ // ^ 0x00ff0000 ^ 0x000000ff should set all of its condition codes exactly
+ // the same as x ^ 0x00ff00ff. However, if the operation were adds, we
+ // cannot split this in half. If the source on the add is 0xfff00ff0, the
+ // result sholud be 0xef10ef, but do we set the overflow bit or not?
+ // Depending on which half is performed first (0x00ff0000 or 0x000000ff) the
+ // V bit will be set differently, and *not* updating the V bit would be
+ // wrong. Theoretically, the following should work:
+ // adds r0, r1, 0x00ff0000;
+ // addsvs r0, r1, 0x000000ff;
+ // addvc r0, r1, 0x000000ff;
+ // But this is 3 instructions, and at that point, we might as well use
+ // something else.
+ switch(op) {
+ case OpBic:
+ case OpOrr:
+ case OpEor:
+ return true;
+ default:
+ return false;
+ }
+}
+
+ALUOp
+jit::getDestVariant(ALUOp op)
+{
+ // All of the compare operations are dest-less variants of a standard
+ // operation. Given the dest-less variant, return the dest-ful variant.
+ switch (op) {
+ case OpCmp:
+ return OpSub;
+ case OpCmn:
+ return OpAdd;
+ case OpTst:
+ return OpAnd;
+ case OpTeq:
+ return OpEor;
+ default:
+ return op;
+ }
+}
+
+O2RegImmShift
+jit::O2Reg(Register r) {
+ return O2RegImmShift(r, LSL, 0);
+}
+
+O2RegImmShift
+jit::lsl(Register r, int amt)
+{
+ MOZ_ASSERT(0 <= amt && amt <= 31);
+ return O2RegImmShift(r, LSL, amt);
+}
+
+O2RegImmShift
+jit::lsr(Register r, int amt)
+{
+ MOZ_ASSERT(1 <= amt && amt <= 32);
+ return O2RegImmShift(r, LSR, amt);
+}
+
+O2RegImmShift
+jit::ror(Register r, int amt)
+{
+ MOZ_ASSERT(1 <= amt && amt <= 31);
+ return O2RegImmShift(r, ROR, amt);
+}
+O2RegImmShift
+jit::rol(Register r, int amt)
+{
+ MOZ_ASSERT(1 <= amt && amt <= 31);
+ return O2RegImmShift(r, ROR, 32 - amt);
+}
+
+O2RegImmShift
+jit::asr(Register r, int amt)
+{
+ MOZ_ASSERT(1 <= amt && amt <= 32);
+ return O2RegImmShift(r, ASR, amt);
+}
+
+
+O2RegRegShift
+jit::lsl(Register r, Register amt)
+{
+ return O2RegRegShift(r, LSL, amt);
+}
+
+O2RegRegShift
+jit::lsr(Register r, Register amt)
+{
+ return O2RegRegShift(r, LSR, amt);
+}
+
+O2RegRegShift
+jit::ror(Register r, Register amt)
+{
+ return O2RegRegShift(r, ROR, amt);
+}
+
+O2RegRegShift
+jit::asr(Register r, Register amt)
+{
+ return O2RegRegShift(r, ASR, amt);
+}
+
+static js::jit::DoubleEncoder doubleEncoder;
+
+/* static */ const js::jit::VFPImm js::jit::VFPImm::One(0x3FF00000);
+
+js::jit::VFPImm::VFPImm(uint32_t top)
+{
+ data_ = -1;
+ datastore::Imm8VFPImmData tmp;
+ if (doubleEncoder.lookup(top, &tmp))
+ data_ = tmp.encode();
+}
+
+BOffImm::BOffImm(const Instruction& inst)
+ : data_(inst.encode() & 0x00ffffff)
+{
+}
+
+Instruction*
+BOffImm::getDest(Instruction* src) const
+{
+ // TODO: It is probably worthwhile to verify that src is actually a branch.
+ // NOTE: This does not explicitly shift the offset of the destination left by 2,
+ // since it is indexing into an array of instruction sized objects.
+ return &src[((int32_t(data_) << 8) >> 8) + 2];
+}
+
+const js::jit::DoubleEncoder::DoubleEntry js::jit::DoubleEncoder::table[256] = {
+#include "jit/arm/DoubleEntryTable.tbl"
+};
+
+// VFPRegister implementation
+VFPRegister
+VFPRegister::doubleOverlay(unsigned int which) const
+{
+ MOZ_ASSERT(!_isInvalid);
+ MOZ_ASSERT(which == 0);
+ if (kind != Double)
+ return VFPRegister(code_ >> 1, Double);
+ return *this;
+}
+VFPRegister
+VFPRegister::singleOverlay(unsigned int which) const
+{
+ MOZ_ASSERT(!_isInvalid);
+ if (kind == Double) {
+ // There are no corresponding float registers for d16-d31.
+ MOZ_ASSERT(code_ < 16);
+ MOZ_ASSERT(which < 2);
+ return VFPRegister((code_ << 1) + which, Single);
+ }
+ MOZ_ASSERT(which == 0);
+ return VFPRegister(code_, Single);
+}
+
+VFPRegister
+VFPRegister::sintOverlay(unsigned int which) const
+{
+ MOZ_ASSERT(!_isInvalid);
+ if (kind == Double) {
+ // There are no corresponding float registers for d16-d31.
+ MOZ_ASSERT(code_ < 16);
+ MOZ_ASSERT(which < 2);
+ return VFPRegister((code_ << 1) + which, Int);
+ }
+ MOZ_ASSERT(which == 0);
+ return VFPRegister(code_, Int);
+}
+VFPRegister
+VFPRegister::uintOverlay(unsigned int which) const
+{
+ MOZ_ASSERT(!_isInvalid);
+ if (kind == Double) {
+ // There are no corresponding float registers for d16-d31.
+ MOZ_ASSERT(code_ < 16);
+ MOZ_ASSERT(which < 2);
+ return VFPRegister((code_ << 1) + which, UInt);
+ }
+ MOZ_ASSERT(which == 0);
+ return VFPRegister(code_, UInt);
+}
+
+bool
+VFPRegister::isInvalid() const
+{
+ return _isInvalid;
+}
+
+bool
+VFPRegister::isMissing() const
+{
+ MOZ_ASSERT(!_isInvalid);
+ return _isMissing;
+}
+
+
+bool
+Assembler::oom() const
+{
+ return AssemblerShared::oom() ||
+ m_buffer.oom() ||
+ jumpRelocations_.oom() ||
+ dataRelocations_.oom() ||
+ preBarriers_.oom();
+}
+
+// Size of the instruction stream, in bytes. Including pools. This function
+// expects all pools that need to be placed have been placed. If they haven't
+// then we need to go an flush the pools :(
+size_t
+Assembler::size() const
+{
+ return m_buffer.size();
+}
+// Size of the relocation table, in bytes.
+size_t
+Assembler::jumpRelocationTableBytes() const
+{
+ return jumpRelocations_.length();
+}
+size_t
+Assembler::dataRelocationTableBytes() const
+{
+ return dataRelocations_.length();
+}
+
+size_t
+Assembler::preBarrierTableBytes() const
+{
+ return preBarriers_.length();
+}
+
+// Size of the data table, in bytes.
+size_t
+Assembler::bytesNeeded() const
+{
+ return size() +
+ jumpRelocationTableBytes() +
+ dataRelocationTableBytes() +
+ preBarrierTableBytes();
+}
+
+#ifdef JS_DISASM_ARM
+
+void
+Assembler::spewInst(Instruction* i)
+{
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ disasm::EmbeddedVector<char, disasm::ReasonableBufferSize> buffer;
+ uint8_t* loc = reinterpret_cast<uint8_t*>(const_cast<uint32_t*>(i->raw()));
+ dasm.InstructionDecode(buffer, loc);
+ printf(" %08x %s\n", reinterpret_cast<uint32_t>(loc), buffer.start());
+}
+
+// Labels are named as they are encountered by adding names to a
+// table, using the Label address as the key. This is made tricky by
+// the (memory for) Label objects being reused, but reused label
+// objects are recognizable from being marked as not used or not
+// bound. See spewResolve().
+//
+// In a number of cases there is no information about the target, and
+// we just end up printing "patchable constant load to PC". This is
+// true especially for jumps to bailout handlers (which have no
+// names). See spewData() and its callers. In some cases (loop back
+// edges) some information about the intended target may be propagated
+// from higher levels, and if so it's printed here.
+
+void
+Assembler::spew(Instruction* i)
+{
+ if (spewDisabled() || !i)
+ return;
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ disasm::EmbeddedVector<char, disasm::ReasonableBufferSize> buffer;
+ uint8_t* loc = reinterpret_cast<uint8_t*>(const_cast<uint32_t*>(i->raw()));
+ dasm.InstructionDecode(buffer, loc);
+ spew(" %08x %s", reinterpret_cast<uint32_t>(loc), buffer.start());
+}
+
+void
+Assembler::spewTarget(Label* target)
+{
+ if (spewDisabled())
+ return;
+ spew(" -> %d%s", spewResolve(target), !target->bound() ? "f" : "");
+}
+
+// If a target label is known, always print that and do not attempt to
+// disassemble the branch operands, as they will often be encoding
+// metainformation (pointers for a chain of jump instructions), and
+// not actual branch targets.
+
+void
+Assembler::spewBranch(Instruction* i, Label* target /* may be nullptr */)
+{
+ if (spewDisabled() || !i)
+ return;
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ disasm::EmbeddedVector<char, disasm::ReasonableBufferSize> buffer;
+ uint8_t* loc = reinterpret_cast<uint8_t*>(const_cast<uint32_t*>(i->raw()));
+ dasm.InstructionDecode(buffer, loc);
+ char labelBuf[128];
+ labelBuf[0] = 0;
+ if (!target)
+ snprintf(labelBuf, sizeof(labelBuf), " -> (link-time target)");
+ if (InstBranchImm::IsTHIS(*i)) {
+ InstBranchImm* bimm = InstBranchImm::AsTHIS(*i);
+ BOffImm destOff;
+ bimm->extractImm(&destOff);
+ if (destOff.isInvalid() || target) {
+ // The target information in the instruction is likely garbage, so remove it.
+ // The target label will in any case be printed if we have it.
+ //
+ // The format of the instruction disassembly is [0-9a-f]{8}\s+\S+\s+.*,
+ // where the \S+ string is the opcode. Strip everything after the opcode,
+ // and attach the label if we have it.
+ int i;
+ for ( i=8 ; i < buffer.length() && buffer[i] == ' ' ; i++ )
+ ;
+ for ( ; i < buffer.length() && buffer[i] != ' ' ; i++ )
+ ;
+ buffer[i] = 0;
+ if (target) {
+ snprintf(labelBuf, sizeof(labelBuf), " -> %d%s", spewResolve(target),
+ !target->bound() ? "f" : "");
+ target = nullptr;
+ }
+ }
+ }
+ spew(" %08x %s%s", reinterpret_cast<uint32_t>(loc), buffer.start(), labelBuf);
+ if (target)
+ spewTarget(target);
+}
+
+void
+Assembler::spewLabel(Label* l)
+{
+ if (spewDisabled())
+ return;
+ spew(" %d:", spewResolve(l));
+}
+
+void
+Assembler::spewRetarget(Label* label, Label* target)
+{
+ if (spewDisabled())
+ return;
+ spew(" %d: .retarget -> %d%s",
+ spewResolve(label), spewResolve(target), !target->bound() ? "f" : "");
+}
+
+void
+Assembler::spewData(BufferOffset addr, size_t numInstr, bool loadToPC)
+{
+ if (spewDisabled())
+ return;
+ Instruction* inst = m_buffer.getInstOrNull(addr);
+ if (!inst)
+ return;
+ uint32_t *instr = reinterpret_cast<uint32_t*>(inst);
+ for ( size_t k=0 ; k < numInstr ; k++ ) {
+ spew(" %08x %08x (patchable constant load%s)",
+ reinterpret_cast<uint32_t>(instr+k), *(instr+k), loadToPC ? " to PC" : "");
+ }
+}
+
+bool
+Assembler::spewDisabled()
+{
+ return !(JitSpewEnabled(JitSpew_Codegen) || printer_);
+}
+
+void
+Assembler::spew(const char* fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+ spew(fmt, args);
+ va_end(args);
+}
+
+void
+Assembler::spew(const char* fmt, va_list va)
+{
+ if (printer_) {
+ printer_->vprintf(fmt, va);
+ printer_->put("\n");
+ }
+ js::jit::JitSpewVA(js::jit::JitSpew_Codegen, fmt, va);
+}
+
+uint32_t
+Assembler::spewResolve(Label* l)
+{
+ // Note, spewResolve will sometimes return 0 when it is triggered
+ // by the profiler and not by a full disassembly, since in that
+ // case a label can be used or bound but not previously have been
+ // defined.
+ return l->used() || l->bound() ? spewProbe(l) : spewDefine(l);
+}
+
+uint32_t
+Assembler::spewProbe(Label* l)
+{
+ uint32_t key = reinterpret_cast<uint32_t>(l);
+ uint32_t value = 0;
+ spewNodes_.lookup(key, &value);
+ return value;
+}
+
+uint32_t
+Assembler::spewDefine(Label* l)
+{
+ uint32_t key = reinterpret_cast<uint32_t>(l);
+ spewNodes_.remove(key);
+ uint32_t value = spewNext_++;
+ if (!spewNodes_.add(key, value))
+ return 0;
+ return value;
+}
+
+Assembler::SpewNodes::~SpewNodes()
+{
+ Node* p = nodes;
+ while (p) {
+ Node* victim = p;
+ p = p->next;
+ js_free(victim);
+ }
+}
+
+bool
+Assembler::SpewNodes::lookup(uint32_t key, uint32_t* value)
+{
+ for ( Node* p = nodes ; p ; p = p->next ) {
+ if (p->key == key) {
+ *value = p->value;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool
+Assembler::SpewNodes::add(uint32_t key, uint32_t value)
+{
+ Node* node = (Node*)js_malloc(sizeof(Node));
+ if (!node)
+ return false;
+ node->key = key;
+ node->value = value;
+ node->next = nodes;
+ nodes = node;
+ return true;
+}
+
+bool
+Assembler::SpewNodes::remove(uint32_t key)
+{
+ for ( Node* p = nodes, *pp = nullptr ; p ; pp = p, p = p->next ) {
+ if (p->key == key) {
+ if (pp)
+ pp->next = p->next;
+ else
+ nodes = p->next;
+ js_free(p);
+ return true;
+ }
+ }
+ return false;
+}
+
+#endif // JS_DISASM_ARM
+
+// Write a blob of binary into the instruction stream.
+BufferOffset
+Assembler::writeInst(uint32_t x)
+{
+ BufferOffset offs = m_buffer.putInt(x);
+#ifdef JS_DISASM_ARM
+ spew(m_buffer.getInstOrNull(offs));
+#endif
+ return offs;
+}
+
+BufferOffset
+Assembler::writeBranchInst(uint32_t x, Label* documentation)
+{
+ BufferOffset offs = m_buffer.putInt(x, /* markAsBranch = */ true);
+#ifdef JS_DISASM_ARM
+ spewBranch(m_buffer.getInstOrNull(offs), documentation);
+#endif
+ return offs;
+}
+
+// Allocate memory for a branch instruction, it will be overwritten
+// subsequently and should not be disassembled.
+
+BufferOffset
+Assembler::allocBranchInst()
+{
+ return m_buffer.putInt(Always | InstNOP::NopInst, /* markAsBranch = */ true);
+}
+
+void
+Assembler::WriteInstStatic(uint32_t x, uint32_t* dest)
+{
+ MOZ_ASSERT(dest != nullptr);
+ *dest = x;
+}
+
+void
+Assembler::haltingAlign(int alignment)
+{
+ // TODO: Implement a proper halting align.
+ nopAlign(alignment);
+}
+
+void
+Assembler::nopAlign(int alignment)
+{
+ m_buffer.align(alignment);
+}
+
+BufferOffset
+Assembler::as_nop()
+{
+ return writeInst(0xe320f000);
+}
+
+static uint32_t
+EncodeAlu(Register dest, Register src1, Operand2 op2, ALUOp op, SBit s, Assembler::Condition c)
+{
+ return (int)op | (int)s | (int)c | op2.encode() |
+ ((dest == InvalidReg) ? 0 : RD(dest)) |
+ ((src1 == InvalidReg) ? 0 : RN(src1));
+}
+
+BufferOffset
+Assembler::as_alu(Register dest, Register src1, Operand2 op2,
+ ALUOp op, SBit s, Condition c)
+{
+ return writeInst(EncodeAlu(dest, src1, op2, op, s, c));
+}
+
+BufferOffset
+Assembler::as_mov(Register dest, Operand2 op2, SBit s, Condition c)
+{
+ return as_alu(dest, InvalidReg, op2, OpMov, s, c);
+}
+
+/* static */ void
+Assembler::as_alu_patch(Register dest, Register src1, Operand2 op2, ALUOp op, SBit s,
+ Condition c, uint32_t* pos)
+{
+ WriteInstStatic(EncodeAlu(dest, src1, op2, op, s, c), pos);
+}
+
+/* static */ void
+Assembler::as_mov_patch(Register dest, Operand2 op2, SBit s, Condition c, uint32_t* pos)
+{
+ as_alu_patch(dest, InvalidReg, op2, OpMov, s, c, pos);
+}
+
+BufferOffset
+Assembler::as_mvn(Register dest, Operand2 op2, SBit s, Condition c)
+{
+ return as_alu(dest, InvalidReg, op2, OpMvn, s, c);
+}
+
+// Logical operations.
+BufferOffset
+Assembler::as_and(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
+{
+ return as_alu(dest, src1, op2, OpAnd, s, c);
+}
+BufferOffset
+Assembler::as_bic(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
+{
+ return as_alu(dest, src1, op2, OpBic, s, c);
+}
+BufferOffset
+Assembler::as_eor(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
+{
+ return as_alu(dest, src1, op2, OpEor, s, c);
+}
+BufferOffset
+Assembler::as_orr(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
+{
+ return as_alu(dest, src1, op2, OpOrr, s, c);
+}
+
+// Mathematical operations.
+BufferOffset
+Assembler::as_adc(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
+{
+ return as_alu(dest, src1, op2, OpAdc, s, c);
+}
+BufferOffset
+Assembler::as_add(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
+{
+ return as_alu(dest, src1, op2, OpAdd, s, c);
+}
+BufferOffset
+Assembler::as_sbc(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
+{
+ return as_alu(dest, src1, op2, OpSbc, s, c);
+}
+BufferOffset
+Assembler::as_sub(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
+{
+ return as_alu(dest, src1, op2, OpSub, s, c);
+}
+BufferOffset
+Assembler::as_rsb(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
+{
+ return as_alu(dest, src1, op2, OpRsb, s, c);
+}
+BufferOffset
+Assembler::as_rsc(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
+{
+ return as_alu(dest, src1, op2, OpRsc, s, c);
+}
+
+// Test operations.
+BufferOffset
+Assembler::as_cmn(Register src1, Operand2 op2, Condition c)
+{
+ return as_alu(InvalidReg, src1, op2, OpCmn, SetCC, c);
+}
+BufferOffset
+Assembler::as_cmp(Register src1, Operand2 op2, Condition c)
+{
+ return as_alu(InvalidReg, src1, op2, OpCmp, SetCC, c);
+}
+BufferOffset
+Assembler::as_teq(Register src1, Operand2 op2, Condition c)
+{
+ return as_alu(InvalidReg, src1, op2, OpTeq, SetCC, c);
+}
+BufferOffset
+Assembler::as_tst(Register src1, Operand2 op2, Condition c)
+{
+ return as_alu(InvalidReg, src1, op2, OpTst, SetCC, c);
+}
+
+static constexpr Register NoAddend = { Registers::pc };
+
+static const int SignExtend = 0x06000070;
+
+enum SignExtend {
+ SxSxtb = 10 << 20,
+ SxSxth = 11 << 20,
+ SxUxtb = 14 << 20,
+ SxUxth = 15 << 20
+};
+
+// Sign extension operations.
+BufferOffset
+Assembler::as_sxtb(Register dest, Register src, int rotate, Condition c)
+{
+ return writeInst((int)c | SignExtend | SxSxtb | RN(NoAddend) | RD(dest) | ((rotate & 3) << 10) | src.code());
+}
+BufferOffset
+Assembler::as_sxth(Register dest, Register src, int rotate, Condition c)
+{
+ return writeInst((int)c | SignExtend | SxSxth | RN(NoAddend) | RD(dest) | ((rotate & 3) << 10) | src.code());
+}
+BufferOffset
+Assembler::as_uxtb(Register dest, Register src, int rotate, Condition c)
+{
+ return writeInst((int)c | SignExtend | SxUxtb | RN(NoAddend) | RD(dest) | ((rotate & 3) << 10) | src.code());
+}
+BufferOffset
+Assembler::as_uxth(Register dest, Register src, int rotate, Condition c)
+{
+ return writeInst((int)c | SignExtend | SxUxth | RN(NoAddend) | RD(dest) | ((rotate & 3) << 10) | src.code());
+}
+
+static uint32_t
+EncodeMovW(Register dest, Imm16 imm, Assembler::Condition c)
+{
+ MOZ_ASSERT(HasMOVWT());
+ return 0x03000000 | c | imm.encode() | RD(dest);
+}
+
+static uint32_t
+EncodeMovT(Register dest, Imm16 imm, Assembler::Condition c)
+{
+ MOZ_ASSERT(HasMOVWT());
+ return 0x03400000 | c | imm.encode() | RD(dest);
+}
+
+// Not quite ALU worthy, but these are useful none the less. These also have
+// the isue of these being formatted completly differently from the standard ALU
+// operations.
+BufferOffset
+Assembler::as_movw(Register dest, Imm16 imm, Condition c)
+{
+ return writeInst(EncodeMovW(dest, imm, c));
+}
+
+/* static */ void
+Assembler::as_movw_patch(Register dest, Imm16 imm, Condition c, Instruction* pos)
+{
+ WriteInstStatic(EncodeMovW(dest, imm, c), (uint32_t*)pos);
+}
+
+BufferOffset
+Assembler::as_movt(Register dest, Imm16 imm, Condition c)
+{
+ return writeInst(EncodeMovT(dest, imm, c));
+}
+
+/* static */ void
+Assembler::as_movt_patch(Register dest, Imm16 imm, Condition c, Instruction* pos)
+{
+ WriteInstStatic(EncodeMovT(dest, imm, c), (uint32_t*)pos);
+}
+
+static const int mull_tag = 0x90;
+
+BufferOffset
+Assembler::as_genmul(Register dhi, Register dlo, Register rm, Register rn,
+ MULOp op, SBit s, Condition c)
+{
+
+ return writeInst(RN(dhi) | maybeRD(dlo) | RM(rm) | rn.code() | op | s | c | mull_tag);
+}
+BufferOffset
+Assembler::as_mul(Register dest, Register src1, Register src2, SBit s, Condition c)
+{
+ return as_genmul(dest, InvalidReg, src1, src2, OpmMul, s, c);
+}
+BufferOffset
+Assembler::as_mla(Register dest, Register acc, Register src1, Register src2,
+ SBit s, Condition c)
+{
+ return as_genmul(dest, acc, src1, src2, OpmMla, s, c);
+}
+BufferOffset
+Assembler::as_umaal(Register destHI, Register destLO, Register src1, Register src2, Condition c)
+{
+ return as_genmul(destHI, destLO, src1, src2, OpmUmaal, LeaveCC, c);
+}
+BufferOffset
+Assembler::as_mls(Register dest, Register acc, Register src1, Register src2, Condition c)
+{
+ return as_genmul(dest, acc, src1, src2, OpmMls, LeaveCC, c);
+}
+
+BufferOffset
+Assembler::as_umull(Register destHI, Register destLO, Register src1, Register src2,
+ SBit s, Condition c)
+{
+ return as_genmul(destHI, destLO, src1, src2, OpmUmull, s, c);
+}
+
+BufferOffset
+Assembler::as_umlal(Register destHI, Register destLO, Register src1, Register src2,
+ SBit s, Condition c)
+{
+ return as_genmul(destHI, destLO, src1, src2, OpmUmlal, s, c);
+}
+
+BufferOffset
+Assembler::as_smull(Register destHI, Register destLO, Register src1, Register src2,
+ SBit s, Condition c)
+{
+ return as_genmul(destHI, destLO, src1, src2, OpmSmull, s, c);
+}
+
+BufferOffset
+Assembler::as_smlal(Register destHI, Register destLO, Register src1, Register src2,
+ SBit s, Condition c)
+{
+ return as_genmul(destHI, destLO, src1, src2, OpmSmlal, s, c);
+}
+
+BufferOffset
+Assembler::as_sdiv(Register rd, Register rn, Register rm, Condition c)
+{
+ return writeInst(0x0710f010 | c | RN(rd) | RM(rm) | rn.code());
+}
+
+BufferOffset
+Assembler::as_udiv(Register rd, Register rn, Register rm, Condition c)
+{
+ return writeInst(0x0730f010 | c | RN(rd) | RM(rm) | rn.code());
+}
+
+BufferOffset
+Assembler::as_clz(Register dest, Register src, Condition c)
+{
+ MOZ_ASSERT(src != pc && dest != pc);
+ return writeInst(RD(dest) | src.code() | c | 0x016f0f10);
+}
+
+// Data transfer instructions: ldr, str, ldrb, strb. Using an int to
+// differentiate between 8 bits and 32 bits is overkill, but meh.
+
+static uint32_t
+EncodeDtr(LoadStore ls, int size, Index mode, Register rt, DTRAddr addr, Assembler::Condition c)
+{
+ MOZ_ASSERT(mode == Offset || (rt != addr.getBase() && pc != addr.getBase()));
+ MOZ_ASSERT(size == 32 || size == 8);
+ return 0x04000000 | ls | (size == 8 ? 0x00400000 : 0) | mode | c | RT(rt) | addr.encode();
+}
+
+BufferOffset
+Assembler::as_dtr(LoadStore ls, int size, Index mode, Register rt, DTRAddr addr, Condition c)
+{
+ return writeInst(EncodeDtr(ls, size, mode, rt, addr, c));
+}
+
+/* static */ void
+Assembler::as_dtr_patch(LoadStore ls, int size, Index mode, Register rt, DTRAddr addr, Condition c,
+ uint32_t* dest)
+{
+ WriteInstStatic(EncodeDtr(ls, size, mode, rt, addr, c), dest);
+}
+
+class PoolHintData
+{
+ public:
+ enum LoadType {
+ // Set 0 to bogus, since that is the value most likely to be
+ // accidentally left somewhere.
+ PoolBOGUS = 0,
+ PoolDTR = 1,
+ PoolBranch = 2,
+ PoolVDTR = 3
+ };
+
+ private:
+ uint32_t index_ : 16;
+ uint32_t cond_ : 4;
+ LoadType loadType_ : 2;
+ uint32_t destReg_ : 5;
+ uint32_t destType_ : 1;
+ uint32_t ONES : 4;
+
+ static const uint32_t ExpectedOnes = 0xfu;
+
+ public:
+ void init(uint32_t index, Assembler::Condition cond, LoadType lt, Register destReg) {
+ index_ = index;
+ MOZ_ASSERT(index_ == index);
+ cond_ = cond >> 28;
+ MOZ_ASSERT(cond_ == cond >> 28);
+ loadType_ = lt;
+ ONES = ExpectedOnes;
+ destReg_ = destReg.code();
+ destType_ = 0;
+ }
+ void init(uint32_t index, Assembler::Condition cond, LoadType lt, const VFPRegister& destReg) {
+ MOZ_ASSERT(destReg.isFloat());
+ index_ = index;
+ MOZ_ASSERT(index_ == index);
+ cond_ = cond >> 28;
+ MOZ_ASSERT(cond_ == cond >> 28);
+ loadType_ = lt;
+ ONES = ExpectedOnes;
+ destReg_ = destReg.id();
+ destType_ = destReg.isDouble();
+ }
+ Assembler::Condition getCond() const {
+ return Assembler::Condition(cond_ << 28);
+ }
+
+ Register getReg() const {
+ return Register::FromCode(destReg_);
+ }
+ VFPRegister getVFPReg() const {
+ VFPRegister r = VFPRegister(destReg_, destType_ ? VFPRegister::Double : VFPRegister::Single);
+ return r;
+ }
+
+ int32_t getIndex() const {
+ return index_;
+ }
+ void setIndex(uint32_t index) {
+ MOZ_ASSERT(ONES == ExpectedOnes && loadType_ != PoolBOGUS);
+ index_ = index;
+ MOZ_ASSERT(index_ == index);
+ }
+
+ LoadType getLoadType() const {
+ // If this *was* a PoolBranch, but the branch has already been bound
+ // then this isn't going to look like a real poolhintdata, but we still
+ // want to lie about it so everyone knows it *used* to be a branch.
+ if (ONES != ExpectedOnes)
+ return PoolHintData::PoolBranch;
+ return loadType_;
+ }
+
+ bool isValidPoolHint() const {
+ // Most instructions cannot have a condition that is 0xf. Notable
+ // exceptions are blx and the entire NEON instruction set. For the
+ // purposes of pool loads, and possibly patched branches, the possible
+ // instructions are ldr and b, neither of which can have a condition
+ // code of 0xf.
+ return ONES == ExpectedOnes;
+ }
+};
+
+union PoolHintPun
+{
+ PoolHintData phd;
+ uint32_t raw;
+};
+
+// Handles all of the other integral data transferring functions: ldrsb, ldrsh,
+// ldrd, etc. The size is given in bits.
+BufferOffset
+Assembler::as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode,
+ Register rt, EDtrAddr addr, Condition c)
+{
+ int extra_bits2 = 0;
+ int extra_bits1 = 0;
+ switch(size) {
+ case 8:
+ MOZ_ASSERT(IsSigned);
+ MOZ_ASSERT(ls != IsStore);
+ extra_bits1 = 0x1;
+ extra_bits2 = 0x2;
+ break;
+ case 16:
+ // 'case 32' doesn't need to be handled, it is handled by the default
+ // ldr/str.
+ extra_bits2 = 0x01;
+ extra_bits1 = (ls == IsStore) ? 0 : 1;
+ if (IsSigned) {
+ MOZ_ASSERT(ls != IsStore);
+ extra_bits2 |= 0x2;
+ }
+ break;
+ case 64:
+ extra_bits2 = (ls == IsStore) ? 0x3 : 0x2;
+ extra_bits1 = 0;
+ break;
+ default:
+ MOZ_CRASH("unexpected size in as_extdtr");
+ }
+ return writeInst(extra_bits2 << 5 | extra_bits1 << 20 | 0x90 |
+ addr.encode() | RT(rt) | mode | c);
+}
+
+BufferOffset
+Assembler::as_dtm(LoadStore ls, Register rn, uint32_t mask,
+ DTMMode mode, DTMWriteBack wb, Condition c)
+{
+ return writeInst(0x08000000 | RN(rn) | ls | mode | mask | c | wb);
+}
+
+// Note, it's possible for markAsBranch and loadToPC to disagree,
+// because some loads to the PC are not necessarily encoding
+// instructions that should be marked as branches: only patchable
+// near branch instructions should be marked.
+
+BufferOffset
+Assembler::allocEntry(size_t numInst, unsigned numPoolEntries,
+ uint8_t* inst, uint8_t* data, ARMBuffer::PoolEntry* pe,
+ bool markAsBranch, bool loadToPC)
+{
+ BufferOffset offs = m_buffer.allocEntry(numInst, numPoolEntries, inst, data, pe, markAsBranch);
+ propagateOOM(offs.assigned());
+#ifdef JS_DISASM_ARM
+ spewData(offs, numInst, loadToPC);
+#endif
+ return offs;
+}
+
+// This is also used for instructions that might be resolved into branches,
+// or might not. If dest==pc then it is effectively a branch.
+
+BufferOffset
+Assembler::as_Imm32Pool(Register dest, uint32_t value, Condition c)
+{
+ PoolHintPun php;
+ php.phd.init(0, c, PoolHintData::PoolDTR, dest);
+ BufferOffset offs = allocEntry(1, 1, (uint8_t*)&php.raw, (uint8_t*)&value, nullptr, false,
+ dest == pc);
+ return offs;
+}
+
+/* static */ void
+Assembler::WritePoolEntry(Instruction* addr, Condition c, uint32_t data)
+{
+ MOZ_ASSERT(addr->is<InstLDR>());
+ *addr->as<InstLDR>()->dest() = data;
+ MOZ_ASSERT(addr->extractCond() == c);
+}
+
+BufferOffset
+Assembler::as_BranchPool(uint32_t value, RepatchLabel* label, ARMBuffer::PoolEntry* pe, Condition c,
+ Label* documentation)
+{
+ PoolHintPun php;
+ php.phd.init(0, c, PoolHintData::PoolBranch, pc);
+ BufferOffset ret = allocEntry(1, 1, (uint8_t*)&php.raw, (uint8_t*)&value, pe,
+ /* markAsBranch = */ true, /* loadToPC = */ true);
+ // If this label is already bound, then immediately replace the stub load
+ // with a correct branch.
+ if (label->bound()) {
+ BufferOffset dest(label);
+ BOffImm offset = dest.diffB<BOffImm>(ret);
+ if (offset.isInvalid()) {
+ m_buffer.fail_bail();
+ return ret;
+ }
+ as_b(offset, c, ret);
+ } else if (!oom()) {
+ label->use(ret.getOffset());
+ }
+#ifdef JS_DISASM_ARM
+ if (documentation)
+ spewTarget(documentation);
+#endif
+ return ret;
+}
+
+BufferOffset
+Assembler::as_FImm64Pool(VFPRegister dest, wasm::RawF64 value, Condition c)
+{
+ MOZ_ASSERT(dest.isDouble());
+ PoolHintPun php;
+ php.phd.init(0, c, PoolHintData::PoolVDTR, dest);
+ uint64_t d = value.bits();
+ return allocEntry(1, 2, (uint8_t*)&php.raw, (uint8_t*)&d);
+}
+
+BufferOffset
+Assembler::as_FImm32Pool(VFPRegister dest, wasm::RawF32 value, Condition c)
+{
+ // Insert floats into the double pool as they have the same limitations on
+ // immediate offset. This wastes 4 bytes padding per float. An alternative
+ // would be to have a separate pool for floats.
+ MOZ_ASSERT(dest.isSingle());
+ PoolHintPun php;
+ php.phd.init(0, c, PoolHintData::PoolVDTR, dest);
+ uint32_t f = value.bits();
+ return allocEntry(1, 1, (uint8_t*)&php.raw, (uint8_t*)&f);
+}
+
+// Pool callbacks stuff:
+void
+Assembler::InsertIndexIntoTag(uint8_t* load_, uint32_t index)
+{
+ uint32_t* load = (uint32_t*)load_;
+ PoolHintPun php;
+ php.raw = *load;
+ php.phd.setIndex(index);
+ *load = php.raw;
+}
+
+// patchConstantPoolLoad takes the address of the instruction that wants to be
+// patched, and the address of the start of the constant pool, and figures
+// things out from there.
+void
+Assembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+{
+ PoolHintData data = *(PoolHintData*)loadAddr;
+ uint32_t* instAddr = (uint32_t*) loadAddr;
+ int offset = (char*)constPoolAddr - (char*)loadAddr;
+ switch(data.getLoadType()) {
+ case PoolHintData::PoolBOGUS:
+ MOZ_CRASH("bogus load type!");
+ case PoolHintData::PoolDTR:
+ Assembler::as_dtr_patch(IsLoad, 32, Offset, data.getReg(),
+ DTRAddr(pc, DtrOffImm(offset+4*data.getIndex() - 8)),
+ data.getCond(), instAddr);
+ break;
+ case PoolHintData::PoolBranch:
+ // Either this used to be a poolBranch, and the label was already bound,
+ // so it was replaced with a real branch, or this may happen in the
+ // future. If this is going to happen in the future, then the actual
+ // bits that are written here don't matter (except the condition code,
+ // since that is always preserved across patchings) but if it does not
+ // get bound later, then we want to make sure this is a load from the
+ // pool entry (and the pool entry should be nullptr so it will crash).
+ if (data.isValidPoolHint()) {
+ Assembler::as_dtr_patch(IsLoad, 32, Offset, pc,
+ DTRAddr(pc, DtrOffImm(offset+4*data.getIndex() - 8)),
+ data.getCond(), instAddr);
+ }
+ break;
+ case PoolHintData::PoolVDTR: {
+ VFPRegister dest = data.getVFPReg();
+ int32_t imm = offset + (data.getIndex() * 4) - 8;
+ MOZ_ASSERT(-1024 < imm && imm < 1024);
+ Assembler::as_vdtr_patch(IsLoad, dest, VFPAddr(pc, VFPOffImm(imm)), data.getCond(),
+ instAddr);
+ break;
+ }
+ }
+}
+
+// Atomic instruction stuff:
+
+BufferOffset
+Assembler::as_ldrex(Register rt, Register rn, Condition c)
+{
+ return writeInst(0x01900f9f | (int)c | RT(rt) | RN(rn));
+}
+
+BufferOffset
+Assembler::as_ldrexh(Register rt, Register rn, Condition c)
+{
+ return writeInst(0x01f00f9f | (int)c | RT(rt) | RN(rn));
+}
+
+BufferOffset
+Assembler::as_ldrexb(Register rt, Register rn, Condition c)
+{
+ return writeInst(0x01d00f9f | (int)c | RT(rt) | RN(rn));
+}
+
+BufferOffset
+Assembler::as_strex(Register rd, Register rt, Register rn, Condition c)
+{
+ MOZ_ASSERT(rd != rn && rd != rt); // True restriction on Cortex-A7 (RPi2)
+ return writeInst(0x01800f90 | (int)c | RD(rd) | RN(rn) | rt.code());
+}
+
+BufferOffset
+Assembler::as_strexh(Register rd, Register rt, Register rn, Condition c)
+{
+ MOZ_ASSERT(rd != rn && rd != rt); // True restriction on Cortex-A7 (RPi2)
+ return writeInst(0x01e00f90 | (int)c | RD(rd) | RN(rn) | rt.code());
+}
+
+BufferOffset
+Assembler::as_strexb(Register rd, Register rt, Register rn, Condition c)
+{
+ MOZ_ASSERT(rd != rn && rd != rt); // True restriction on Cortex-A7 (RPi2)
+ return writeInst(0x01c00f90 | (int)c | RD(rd) | RN(rn) | rt.code());
+}
+
+// Memory barrier stuff:
+
+BufferOffset
+Assembler::as_dmb(BarrierOption option)
+{
+ return writeInst(0xf57ff050U | (int)option);
+}
+BufferOffset
+Assembler::as_dsb(BarrierOption option)
+{
+ return writeInst(0xf57ff040U | (int)option);
+}
+BufferOffset
+Assembler::as_isb()
+{
+ return writeInst(0xf57ff06fU); // option == SY
+}
+BufferOffset
+Assembler::as_dsb_trap()
+{
+ // DSB is "mcr 15, 0, r0, c7, c10, 4".
+ // See eg https://bugs.kde.org/show_bug.cgi?id=228060.
+ // ARMv7 manual, "VMSA CP15 c7 register summary".
+ // Flagged as "legacy" starting with ARMv8, may be disabled on chip, see
+ // ARMv8 manual E2.7.3 and G3.18.16.
+ return writeInst(0xee070f9a);
+}
+BufferOffset
+Assembler::as_dmb_trap()
+{
+ // DMB is "mcr 15, 0, r0, c7, c10, 5".
+ // ARMv7 manual, "VMSA CP15 c7 register summary".
+ // Flagged as "legacy" starting with ARMv8, may be disabled on chip, see
+ // ARMv8 manual E2.7.3 and G3.18.16.
+ return writeInst(0xee070fba);
+}
+BufferOffset
+Assembler::as_isb_trap()
+{
+ // ISB is "mcr 15, 0, r0, c7, c5, 4".
+ // ARMv7 manual, "VMSA CP15 c7 register summary".
+ // Flagged as "legacy" starting with ARMv8, may be disabled on chip, see
+ // ARMv8 manual E2.7.3 and G3.18.16.
+ return writeInst(0xee070f94);
+}
+
+// Control flow stuff:
+
+// bx can *only* branch to a register, never to an immediate.
+BufferOffset
+Assembler::as_bx(Register r, Condition c)
+{
+ BufferOffset ret = writeInst(((int) c) | OpBx | r.code());
+ return ret;
+}
+
+void
+Assembler::WritePoolGuard(BufferOffset branch, Instruction* dest, BufferOffset afterPool)
+{
+ BOffImm off = afterPool.diffB<BOffImm>(branch);
+ if (off.isInvalid())
+ MOZ_CRASH("BOffImm invalid");
+ *dest = InstBImm(off, Always);
+}
+
+// Branch can branch to an immediate *or* to a register.
+// Branches to immediates are pc relative, branches to registers are absolute.
+BufferOffset
+Assembler::as_b(BOffImm off, Condition c, Label* documentation)
+{
+ BufferOffset ret = writeBranchInst(((int)c) | OpB | off.encode(), documentation);
+ return ret;
+}
+
+BufferOffset
+Assembler::as_b(Label* l, Condition c)
+{
+ if (l->bound()) {
+ // Note only one instruction is emitted here, the NOP is overwritten.
+ BufferOffset ret = allocBranchInst();
+ if (oom())
+ return BufferOffset();
+
+ as_b(BufferOffset(l).diffB<BOffImm>(ret), c, ret);
+#ifdef JS_DISASM_ARM
+ spewBranch(m_buffer.getInstOrNull(ret), l);
+#endif
+ return ret;
+ }
+
+ if (oom())
+ return BufferOffset();
+
+ int32_t old;
+ BufferOffset ret;
+ if (l->used()) {
+ old = l->offset();
+ // This will currently throw an assertion if we couldn't actually
+ // encode the offset of the branch.
+ if (!BOffImm::IsInRange(old)) {
+ m_buffer.fail_bail();
+ return ret;
+ }
+ ret = as_b(BOffImm(old), c, l);
+ } else {
+ old = LabelBase::INVALID_OFFSET;
+ BOffImm inv;
+ ret = as_b(inv, c, l);
+ }
+
+ if (oom())
+ return BufferOffset();
+
+ DebugOnly<int32_t> check = l->use(ret.getOffset());
+ MOZ_ASSERT(check == old);
+ return ret;
+}
+
+BufferOffset
+Assembler::as_b(wasm::TrapDesc target, Condition c)
+{
+ Label l;
+ BufferOffset ret = as_b(&l, c);
+ bindLater(&l, target);
+ return ret;
+}
+
+BufferOffset
+Assembler::as_b(BOffImm off, Condition c, BufferOffset inst)
+{
+ // JS_DISASM_ARM NOTE: Can't disassemble here, because numerous callers use this to
+ // patchup old code. Must disassemble in caller where it makes sense. Not many callers.
+ *editSrc(inst) = InstBImm(off, c);
+ return inst;
+}
+
+// blx can go to either an immediate or a register.
+// When blx'ing to a register, we change processor state depending on the low
+// bit of the register when blx'ing to an immediate, we *always* change
+// processor state.
+
+BufferOffset
+Assembler::as_blx(Register r, Condition c)
+{
+ return writeInst(((int) c) | OpBlx | r.code());
+}
+
+// bl can only branch to an pc-relative immediate offset
+// It cannot change the processor state.
+BufferOffset
+Assembler::as_bl(BOffImm off, Condition c, Label* documentation)
+{
+ return writeBranchInst(((int)c) | OpBl | off.encode(), documentation);
+}
+
+BufferOffset
+Assembler::as_bl(Label* l, Condition c)
+{
+ if (l->bound()) {
+ // Note only one instruction is emitted here, the NOP is overwritten.
+ BufferOffset ret = allocBranchInst();
+ if (oom())
+ return BufferOffset();
+
+ BOffImm offset = BufferOffset(l).diffB<BOffImm>(ret);
+ if (offset.isInvalid()) {
+ m_buffer.fail_bail();
+ return BufferOffset();
+ }
+
+ as_bl(offset, c, ret);
+#ifdef JS_DISASM_ARM
+ spewBranch(m_buffer.getInstOrNull(ret), l);
+#endif
+ return ret;
+ }
+
+ if (oom())
+ return BufferOffset();
+
+ int32_t old;
+ BufferOffset ret;
+ // See if the list was empty :(
+ if (l->used()) {
+ // This will currently throw an assertion if we couldn't actually encode
+ // the offset of the branch.
+ old = l->offset();
+ if (!BOffImm::IsInRange(old)) {
+ m_buffer.fail_bail();
+ return ret;
+ }
+ ret = as_bl(BOffImm(old), c, l);
+ } else {
+ old = LabelBase::INVALID_OFFSET;
+ BOffImm inv;
+ ret = as_bl(inv, c, l);
+ }
+
+ if (oom())
+ return BufferOffset();
+
+ DebugOnly<int32_t> check = l->use(ret.getOffset());
+ MOZ_ASSERT(check == old);
+ return ret;
+}
+
+BufferOffset
+Assembler::as_bl(BOffImm off, Condition c, BufferOffset inst)
+{
+ *editSrc(inst) = InstBLImm(off, c);
+ return inst;
+}
+
+BufferOffset
+Assembler::as_mrs(Register r, Condition c)
+{
+ return writeInst(0x010f0000 | int(c) | RD(r));
+}
+
+BufferOffset
+Assembler::as_msr(Register r, Condition c)
+{
+ // Hardcode the 'mask' field to 0b11 for now. It is bits 18 and 19, which
+ // are the two high bits of the 'c' in this constant.
+ MOZ_ASSERT((r.code() & ~0xf) == 0);
+ return writeInst(0x012cf000 | int(c) | r.code());
+}
+
+// VFP instructions!
+enum vfp_tags {
+ VfpTag = 0x0C000A00,
+ VfpArith = 0x02000000
+};
+
+BufferOffset
+Assembler::writeVFPInst(vfp_size sz, uint32_t blob)
+{
+ MOZ_ASSERT((sz & blob) == 0);
+ MOZ_ASSERT((VfpTag & blob) == 0);
+ return writeInst(VfpTag | sz | blob);
+}
+
+/* static */ void
+Assembler::WriteVFPInstStatic(vfp_size sz, uint32_t blob, uint32_t* dest)
+{
+ MOZ_ASSERT((sz & blob) == 0);
+ MOZ_ASSERT((VfpTag & blob) == 0);
+ WriteInstStatic(VfpTag | sz | blob, dest);
+}
+
+// Unityped variants: all registers hold the same (ieee754 single/double)
+// notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
+BufferOffset
+Assembler::as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+ VFPOp op, Condition c)
+{
+ // Make sure we believe that all of our operands are the same kind.
+ MOZ_ASSERT_IF(!vn.isMissing(), vd.equiv(vn));
+ MOZ_ASSERT_IF(!vm.isMissing(), vd.equiv(vm));
+ vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
+ return writeVFPInst(sz, VD(vd) | VN(vn) | VM(vm) | op | VfpArith | c);
+}
+
+BufferOffset
+Assembler::as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
+{
+ return as_vfp_float(vd, vn, vm, OpvAdd, c);
+}
+
+BufferOffset
+Assembler::as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
+{
+ return as_vfp_float(vd, vn, vm, OpvDiv, c);
+}
+
+BufferOffset
+Assembler::as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
+{
+ return as_vfp_float(vd, vn, vm, OpvMul, c);
+}
+
+BufferOffset
+Assembler::as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
+{
+ return as_vfp_float(vd, vn, vm, OpvMul, c);
+}
+
+BufferOffset
+Assembler::as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
+{
+ MOZ_CRASH("Feature NYI");
+}
+
+BufferOffset
+Assembler::as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
+{
+ MOZ_CRASH("Feature NYI");
+}
+
+BufferOffset
+Assembler::as_vneg(VFPRegister vd, VFPRegister vm, Condition c)
+{
+ return as_vfp_float(vd, NoVFPRegister, vm, OpvNeg, c);
+}
+
+BufferOffset
+Assembler::as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c)
+{
+ return as_vfp_float(vd, NoVFPRegister, vm, OpvSqrt, c);
+}
+
+BufferOffset
+Assembler::as_vabs(VFPRegister vd, VFPRegister vm, Condition c)
+{
+ return as_vfp_float(vd, NoVFPRegister, vm, OpvAbs, c);
+}
+
+BufferOffset
+Assembler::as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
+{
+ return as_vfp_float(vd, vn, vm, OpvSub, c);
+}
+
+BufferOffset
+Assembler::as_vcmp(VFPRegister vd, VFPRegister vm, Condition c)
+{
+ return as_vfp_float(vd, NoVFPRegister, vm, OpvCmp, c);
+}
+
+BufferOffset
+Assembler::as_vcmpz(VFPRegister vd, Condition c)
+{
+ return as_vfp_float(vd, NoVFPRegister, NoVFPRegister, OpvCmpz, c);
+}
+
+// Specifically, a move between two same sized-registers.
+BufferOffset
+Assembler::as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c)
+{
+ return as_vfp_float(vd, NoVFPRegister, vsrc, OpvMov, c);
+}
+
+// Transfer between Core and VFP.
+
+// Unlike the next function, moving between the core registers and vfp registers
+// can't be *that* properly typed. Namely, since I don't want to munge the type
+// VFPRegister to also include core registers. Thus, the core and vfp registers
+// are passed in based on their type, and src/dest is determined by the
+// float2core.
+
+BufferOffset
+Assembler::as_vxfer(Register vt1, Register vt2, VFPRegister vm, FloatToCore_ f2c,
+ Condition c, int idx)
+{
+ vfp_size sz = IsSingle;
+ if (vm.isDouble()) {
+ // Technically, this can be done with a vmov à la ARM ARM under vmov
+ // however, that requires at least an extra bit saying if the operation
+ // should be performed on the lower or upper half of the double. Moving
+ // a single to/from 2N/2N+1 isn't equivalent, since there are 32 single
+ // registers, and 32 double registers so there is no way to encode the
+ // last 16 double registers.
+ sz = IsDouble;
+ MOZ_ASSERT(idx == 0 || idx == 1);
+ // If we are transferring a single half of the double then it must be
+ // moving a VFP reg to a core reg.
+ MOZ_ASSERT_IF(vt2 == InvalidReg, f2c == FloatToCore);
+ idx = idx << 21;
+ } else {
+ MOZ_ASSERT(idx == 0);
+ }
+
+ if (vt2 == InvalidReg)
+ return writeVFPInst(sz, WordTransfer | f2c | c | RT(vt1) | maybeRN(vt2) | VN(vm) | idx);
+
+ // We are doing a 64 bit transfer.
+ return writeVFPInst(sz, DoubleTransfer | f2c | c | RT(vt1) | maybeRN(vt2) | VM(vm) | idx);
+}
+
+enum vcvt_destFloatness {
+ VcvtToInteger = 1 << 18,
+ VcvtToFloat = 0 << 18
+};
+enum vcvt_toZero {
+ VcvtToZero = 1 << 7, // Use the default rounding mode, which rounds truncates.
+ VcvtToFPSCR = 0 << 7 // Use whatever rounding mode the fpscr specifies.
+};
+enum vcvt_Signedness {
+ VcvtToSigned = 1 << 16,
+ VcvtToUnsigned = 0 << 16,
+ VcvtFromSigned = 1 << 7,
+ VcvtFromUnsigned = 0 << 7
+};
+
+// Our encoding actually allows just the src and the dest (and their types) to
+// uniquely specify the encoding that we are going to use.
+BufferOffset
+Assembler::as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR,
+ Condition c)
+{
+ // Unlike other cases, the source and dest types cannot be the same.
+ MOZ_ASSERT(!vd.equiv(vm));
+ vfp_size sz = IsDouble;
+ if (vd.isFloat() && vm.isFloat()) {
+ // Doing a float -> float conversion.
+ if (vm.isSingle())
+ sz = IsSingle;
+ return writeVFPInst(sz, c | 0x02B700C0 | VM(vm) | VD(vd));
+ }
+
+ // At least one of the registers should be a float.
+ vcvt_destFloatness destFloat;
+ vcvt_Signedness opSign;
+ vcvt_toZero doToZero = VcvtToFPSCR;
+ MOZ_ASSERT(vd.isFloat() || vm.isFloat());
+ if (vd.isSingle() || vm.isSingle())
+ sz = IsSingle;
+
+ if (vd.isFloat()) {
+ destFloat = VcvtToFloat;
+ opSign = (vm.isSInt()) ? VcvtFromSigned : VcvtFromUnsigned;
+ } else {
+ destFloat = VcvtToInteger;
+ opSign = (vd.isSInt()) ? VcvtToSigned : VcvtToUnsigned;
+ doToZero = useFPSCR ? VcvtToFPSCR : VcvtToZero;
+ }
+ return writeVFPInst(sz, c | 0x02B80040 | VD(vd) | VM(vm) | destFloat | opSign | doToZero);
+}
+
+BufferOffset
+Assembler::as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint, bool toFixed, Condition c)
+{
+ MOZ_ASSERT(vd.isFloat());
+ uint32_t sx = 0x1;
+ vfp_size sf = vd.isDouble() ? IsDouble : IsSingle;
+ int32_t imm5 = fixedPoint;
+ imm5 = (sx ? 32 : 16) - imm5;
+ MOZ_ASSERT(imm5 >= 0);
+ imm5 = imm5 >> 1 | (imm5 & 1) << 5;
+ return writeVFPInst(sf, 0x02BA0040 | VD(vd) | toFixed << 18 | sx << 7 |
+ (!isSigned) << 16 | imm5 | c);
+}
+
+// Transfer between VFP and memory.
+static uint32_t
+EncodeVdtr(LoadStore ls, VFPRegister vd, VFPAddr addr, Assembler::Condition c)
+{
+ return ls | 0x01000000 | addr.encode() | VD(vd) | c;
+}
+
+BufferOffset
+Assembler::as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
+ Condition c /* vfp doesn't have a wb option */)
+{
+ vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
+ return writeVFPInst(sz, EncodeVdtr(ls, vd, addr, c));
+}
+
+/* static */ void
+Assembler::as_vdtr_patch(LoadStore ls, VFPRegister vd, VFPAddr addr, Condition c, uint32_t* dest)
+{
+ vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
+ WriteVFPInstStatic(sz, EncodeVdtr(ls, vd, addr, c), dest);
+}
+
+// VFP's ldm/stm work differently from the standard arm ones. You can only
+// transfer a range.
+
+BufferOffset
+Assembler::as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length,
+ /* also has update conditions */ Condition c)
+{
+ MOZ_ASSERT(length <= 16 && length >= 0);
+ vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
+
+ if (vd.isDouble())
+ length *= 2;
+
+ return writeVFPInst(sz, dtmLoadStore | RN(rn) | VD(vd) | length |
+ dtmMode | dtmUpdate | dtmCond);
+}
+
+BufferOffset
+Assembler::as_vimm(VFPRegister vd, VFPImm imm, Condition c)
+{
+ MOZ_ASSERT(imm.isValid());
+ vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
+ return writeVFPInst(sz, c | imm.encode() | VD(vd) | 0x02B00000);
+
+}
+
+BufferOffset
+Assembler::as_vmrs(Register r, Condition c)
+{
+ return writeInst(c | 0x0ef10a10 | RT(r));
+}
+
+BufferOffset
+Assembler::as_vmsr(Register r, Condition c)
+{
+ return writeInst(c | 0x0ee10a10 | RT(r));
+}
+
+bool
+Assembler::nextLink(BufferOffset b, BufferOffset* next)
+{
+ Instruction branch = *editSrc(b);
+ MOZ_ASSERT(branch.is<InstBranchImm>());
+
+ BOffImm destOff;
+ branch.as<InstBranchImm>()->extractImm(&destOff);
+ if (destOff.isInvalid())
+ return false;
+
+ // Propagate the next link back to the caller, by constructing a new
+ // BufferOffset into the space they provided.
+ new (next) BufferOffset(destOff.decode());
+ return true;
+}
+
+void
+Assembler::bind(Label* label, BufferOffset boff)
+{
+#ifdef JS_DISASM_ARM
+ spewLabel(label);
+#endif
+ if (oom()) {
+ // Ensure we always bind the label. This matches what we do on
+ // x86/x64 and silences the assert in ~Label.
+ label->bind(0);
+ return;
+ }
+
+ if (label->used()) {
+ bool more;
+ // If our caller didn't give us an explicit target to bind to then we
+ // want to bind to the location of the next instruction.
+ BufferOffset dest = boff.assigned() ? boff : nextOffset();
+ BufferOffset b(label);
+ do {
+ BufferOffset next;
+ more = nextLink(b, &next);
+ Instruction branch = *editSrc(b);
+ Condition c = branch.extractCond();
+ BOffImm offset = dest.diffB<BOffImm>(b);
+ if (offset.isInvalid()) {
+ m_buffer.fail_bail();
+ return;
+ }
+ if (branch.is<InstBImm>())
+ as_b(offset, c, b);
+ else if (branch.is<InstBLImm>())
+ as_bl(offset, c, b);
+ else
+ MOZ_CRASH("crazy fixup!");
+ b = next;
+ } while (more);
+ }
+ label->bind(nextOffset().getOffset());
+ MOZ_ASSERT(!oom());
+}
+
+void
+Assembler::bindLater(Label* label, wasm::TrapDesc target)
+{
+ if (label->used()) {
+ BufferOffset b(label);
+ do {
+ append(wasm::TrapSite(target, b.getOffset()));
+ } while (nextLink(b, &b));
+ }
+ label->reset();
+}
+
+void
+Assembler::bind(RepatchLabel* label)
+{
+ // It does not seem to be useful to record this label for
+ // disassembly, as the value that is bound to the label is often
+ // effectively garbage and is replaced by something else later.
+ BufferOffset dest = nextOffset();
+ if (label->used() && !oom()) {
+ // If the label has a use, then change this use to refer to the bound
+ // label.
+ BufferOffset branchOff(label->offset());
+ // Since this was created with a RepatchLabel, the value written in the
+ // instruction stream is not branch shaped, it is PoolHintData shaped.
+ Instruction* branch = editSrc(branchOff);
+ PoolHintPun p;
+ p.raw = branch->encode();
+ Condition cond;
+ if (p.phd.isValidPoolHint())
+ cond = p.phd.getCond();
+ else
+ cond = branch->extractCond();
+
+ BOffImm offset = dest.diffB<BOffImm>(branchOff);
+ if (offset.isInvalid()) {
+ m_buffer.fail_bail();
+ return;
+ }
+ as_b(offset, cond, branchOff);
+ }
+ label->bind(dest.getOffset());
+}
+
+void
+Assembler::retarget(Label* label, Label* target)
+{
+#ifdef JS_DISASM_ARM
+ spewRetarget(label, target);
+#endif
+ if (label->used() && !oom()) {
+ if (target->bound()) {
+ bind(label, BufferOffset(target));
+ } else if (target->used()) {
+ // The target is not bound but used. Prepend label's branch list
+ // onto target's.
+ BufferOffset labelBranchOffset(label);
+ BufferOffset next;
+
+ // Find the head of the use chain for label.
+ while (nextLink(labelBranchOffset, &next))
+ labelBranchOffset = next;
+
+ // Then patch the head of label's use chain to the tail of target's
+ // use chain, prepending the entire use chain of target.
+ Instruction branch = *editSrc(labelBranchOffset);
+ Condition c = branch.extractCond();
+ int32_t prev = target->use(label->offset());
+ if (branch.is<InstBImm>())
+ as_b(BOffImm(prev), c, labelBranchOffset);
+ else if (branch.is<InstBLImm>())
+ as_bl(BOffImm(prev), c, labelBranchOffset);
+ else
+ MOZ_CRASH("crazy fixup!");
+ } else {
+ // The target is unbound and unused. We can just take the head of
+ // the list hanging off of label, and dump that into target.
+ DebugOnly<uint32_t> prev = target->use(label->offset());
+ MOZ_ASSERT((int32_t)prev == Label::INVALID_OFFSET);
+ }
+ }
+ label->reset();
+
+}
+
+static int stopBKPT = -1;
+void
+Assembler::as_bkpt()
+{
+ // This is a count of how many times a breakpoint instruction has been
+ // generated. It is embedded into the instruction for debugging
+ // purposes. Gdb will print "bkpt xxx" when you attempt to dissassemble a
+ // breakpoint with the number xxx embedded into it. If this breakpoint is
+ // being hit, then you can run (in gdb):
+ // >b dbg_break
+ // >b main
+ // >commands
+ // >set stopBKPT = xxx
+ // >c
+ // >end
+ // which will set a breakpoint on the function dbg_break above set a
+ // scripted breakpoint on main that will set the (otherwise unmodified)
+ // value to the number of the breakpoint, so dbg_break will actuall be
+ // called and finally, when you run the executable, execution will halt when
+ // that breakpoint is generated.
+ static int hit = 0;
+ if (stopBKPT == hit)
+ dbg_break();
+ writeInst(0xe1200070 | (hit & 0xf) | ((hit & 0xfff0) << 4));
+ hit++;
+}
+
+void
+Assembler::flushBuffer()
+{
+ m_buffer.flushPool();
+}
+
+void
+Assembler::enterNoPool(size_t maxInst)
+{
+ m_buffer.enterNoPool(maxInst);
+}
+
+void
+Assembler::leaveNoPool()
+{
+ m_buffer.leaveNoPool();
+}
+
+ptrdiff_t
+Assembler::GetBranchOffset(const Instruction* i_)
+{
+ MOZ_ASSERT(i_->is<InstBranchImm>());
+ InstBranchImm* i = i_->as<InstBranchImm>();
+ BOffImm dest;
+ i->extractImm(&dest);
+ return dest.decode();
+}
+
+void
+Assembler::RetargetNearBranch(Instruction* i, int offset, bool final)
+{
+ Assembler::Condition c = i->extractCond();
+ RetargetNearBranch(i, offset, c, final);
+}
+
+void
+Assembler::RetargetNearBranch(Instruction* i, int offset, Condition cond, bool final)
+{
+ // Retargeting calls is totally unsupported!
+ MOZ_ASSERT_IF(i->is<InstBranchImm>(), i->is<InstBImm>() || i->is<InstBLImm>());
+ if (i->is<InstBLImm>())
+ new (i) InstBLImm(BOffImm(offset), cond);
+ else
+ new (i) InstBImm(BOffImm(offset), cond);
+
+ // Flush the cache, since an instruction was overwritten.
+ if (final)
+ AutoFlushICache::flush(uintptr_t(i), 4);
+}
+
+void
+Assembler::RetargetFarBranch(Instruction* i, uint8_t** slot, uint8_t* dest, Condition cond)
+{
+ int32_t offset = reinterpret_cast<uint8_t*>(slot) - reinterpret_cast<uint8_t*>(i);
+ if (!i->is<InstLDR>()) {
+ new (i) InstLDR(Offset, pc, DTRAddr(pc, DtrOffImm(offset - 8)), cond);
+ AutoFlushICache::flush(uintptr_t(i), 4);
+ }
+ *slot = dest;
+}
+
+struct PoolHeader : Instruction
+{
+ struct Header
+ {
+ // The size should take into account the pool header.
+ // The size is in units of Instruction (4 bytes), not byte.
+ uint32_t size : 15;
+ bool isNatural : 1;
+ uint32_t ONES : 16;
+
+ Header(int size_, bool isNatural_)
+ : size(size_),
+ isNatural(isNatural_),
+ ONES(0xffff)
+ { }
+
+ Header(const Instruction* i) {
+ JS_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t));
+ memcpy(this, i, sizeof(Header));
+ MOZ_ASSERT(ONES == 0xffff);
+ }
+
+ uint32_t raw() const {
+ JS_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t));
+ uint32_t dest;
+ memcpy(&dest, this, sizeof(Header));
+ return dest;
+ }
+ };
+
+ PoolHeader(int size_, bool isNatural_)
+ : Instruction(Header(size_, isNatural_).raw(), true)
+ { }
+
+ uint32_t size() const {
+ Header tmp(this);
+ return tmp.size;
+ }
+ uint32_t isNatural() const {
+ Header tmp(this);
+ return tmp.isNatural;
+ }
+
+ static bool IsTHIS(const Instruction& i) {
+ return (*i.raw() & 0xffff0000) == 0xffff0000;
+ }
+ static const PoolHeader* AsTHIS(const Instruction& i) {
+ if (!IsTHIS(i))
+ return nullptr;
+ return static_cast<const PoolHeader*>(&i);
+ }
+};
+
+void
+Assembler::WritePoolHeader(uint8_t* start, Pool* p, bool isNatural)
+{
+ static_assert(sizeof(PoolHeader) == 4, "PoolHandler must have the correct size.");
+ uint8_t* pool = start + 4;
+ // Go through the usual rigmarole to get the size of the pool.
+ pool += p->getPoolSize();
+ uint32_t size = pool - start;
+ MOZ_ASSERT((size & 3) == 0);
+ size = size >> 2;
+ MOZ_ASSERT(size < (1 << 15));
+ PoolHeader header(size, isNatural);
+ *(PoolHeader*)start = header;
+}
+
+// The size of an arbitrary 32-bit call in the instruction stream. On ARM this
+// sequence is |pc = ldr pc - 4; imm32| given that we never reach the imm32.
+uint32_t
+Assembler::PatchWrite_NearCallSize()
+{
+ return sizeof(uint32_t);
+}
+
+void
+Assembler::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
+{
+ Instruction* inst = (Instruction*) start.raw();
+ // Overwrite whatever instruction used to be here with a call. Since the
+ // destination is in the same function, it will be within range of the
+ // 24 << 2 byte bl instruction.
+ uint8_t* dest = toCall.raw();
+ new (inst) InstBLImm(BOffImm(dest - (uint8_t*)inst) , Always);
+ // Ensure everyone sees the code that was just written into memory.
+ AutoFlushICache::flush(uintptr_t(inst), 4);
+}
+
+void
+Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue)
+{
+ Instruction* ptr = reinterpret_cast<Instruction*>(label.raw());
+ InstructionIterator iter(ptr);
+ Register dest;
+ Assembler::RelocStyle rs;
+
+ DebugOnly<const uint32_t*> val = GetPtr32Target(&iter, &dest, &rs);
+ MOZ_ASSERT(uint32_t((const uint32_t*)val) == uint32_t(expectedValue.value));
+
+ MacroAssembler::ma_mov_patch(Imm32(int32_t(newValue.value)), dest, Always, rs, ptr);
+
+ // L_LDR won't cause any instructions to be updated.
+ if (rs != L_LDR) {
+ AutoFlushICache::flush(uintptr_t(ptr), 4);
+ AutoFlushICache::flush(uintptr_t(ptr->next()), 4);
+ }
+}
+
+void
+Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue)
+{
+ PatchDataWithValueCheck(label,
+ PatchedImmPtr(newValue.value),
+ PatchedImmPtr(expectedValue.value));
+}
+
+// This just stomps over memory with 32 bits of raw data. Its purpose is to
+// overwrite the call of JITed code with 32 bits worth of an offset. This will
+// is only meant to function on code that has been invalidated, so it should be
+// totally safe. Since that instruction will never be executed again, a ICache
+// flush should not be necessary
+void
+Assembler::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
+ // Raw is going to be the return address.
+ uint32_t* raw = (uint32_t*)label.raw();
+ // Overwrite the 4 bytes before the return address, which will end up being
+ // the call instruction.
+ *(raw - 1) = imm.value;
+}
+
+uint8_t*
+Assembler::NextInstruction(uint8_t* inst_, uint32_t* count)
+{
+ Instruction* inst = reinterpret_cast<Instruction*>(inst_);
+ if (count != nullptr)
+ *count += sizeof(Instruction);
+ return reinterpret_cast<uint8_t*>(inst->next());
+}
+
+static bool
+InstIsGuard(Instruction* inst, const PoolHeader** ph)
+{
+ Assembler::Condition c = inst->extractCond();
+ if (c != Assembler::Always)
+ return false;
+ if (!(inst->is<InstBXReg>() || inst->is<InstBImm>()))
+ return false;
+ // See if the next instruction is a pool header.
+ *ph = (inst + 1)->as<const PoolHeader>();
+ return *ph != nullptr;
+}
+
+static bool
+InstIsBNop(Instruction* inst)
+{
+ // In some special situations, it is necessary to insert a NOP into the
+ // instruction stream that nobody knows about, since nobody should know
+ // about it, make sure it gets skipped when Instruction::next() is called.
+ // this generates a very specific nop, namely a branch to the next
+ // instruction.
+ Assembler::Condition c = inst->extractCond();
+ if (c != Assembler::Always)
+ return false;
+ if (!inst->is<InstBImm>())
+ return false;
+ InstBImm* b = inst->as<InstBImm>();
+ BOffImm offset;
+ b->extractImm(&offset);
+ return offset.decode() == 4;
+}
+
+static bool
+InstIsArtificialGuard(Instruction* inst, const PoolHeader** ph)
+{
+ if (!InstIsGuard(inst, ph))
+ return false;
+ return !(*ph)->isNatural();
+}
+
+// If the instruction points to a artificial pool guard then skip the pool.
+Instruction*
+Instruction::skipPool()
+{
+ const PoolHeader* ph;
+ // If this is a guard, and the next instruction is a header, always work
+ // around the pool. If it isn't a guard, then start looking ahead.
+ if (InstIsGuard(this, &ph)) {
+ // Don't skip a natural guard.
+ if (ph->isNatural())
+ return this;
+ return (this + 1 + ph->size())->skipPool();
+ }
+ if (InstIsBNop(this))
+ return (this + 1)->skipPool();
+ return this;
+}
+
+// Cases to be handled:
+// 1) no pools or branches in sight => return this+1
+// 2) branch to next instruction => return this+2, because a nop needed to be inserted into the stream.
+// 3) this+1 is an artificial guard for a pool => return first instruction after the pool
+// 4) this+1 is a natural guard => return the branch
+// 5) this is a branch, right before a pool => return first instruction after the pool
+// in assembly form:
+// 1) add r0, r0, r0 <= this
+// add r1, r1, r1 <= returned value
+// add r2, r2, r2
+//
+// 2) add r0, r0, r0 <= this
+// b foo
+// foo:
+// add r2, r2, r2 <= returned value
+//
+// 3) add r0, r0, r0 <= this
+// b after_pool;
+// .word 0xffff0002 # bit 15 being 0 indicates that the branch was not requested by the assembler
+// 0xdeadbeef # the 2 indicates that there is 1 pool entry, and the pool header
+// add r4, r4, r4 <= returned value
+// 4) add r0, r0, r0 <= this
+// b after_pool <= returned value
+// .word 0xffff8002 # bit 15 being 1 indicates that the branch was requested by the assembler
+// 0xdeadbeef
+// add r4, r4, r4
+// 5) b after_pool <= this
+// .word 0xffff8002 # bit 15 has no bearing on the returned value
+// 0xdeadbeef
+// add r4, r4, r4 <= returned value
+
+Instruction*
+Instruction::next()
+{
+ Instruction* ret = this+1;
+ const PoolHeader* ph;
+ // If this is a guard, and the next instruction is a header, always work
+ // around the pool. If it isn't a guard, then start looking ahead.
+ if (InstIsGuard(this, &ph))
+ return (ret + ph->size())->skipPool();
+ if (InstIsArtificialGuard(ret, &ph))
+ return (ret + 1 + ph->size())->skipPool();
+ return ret->skipPool();
+}
+
+void
+Assembler::ToggleToJmp(CodeLocationLabel inst_)
+{
+ uint32_t* ptr = (uint32_t*)inst_.raw();
+
+ DebugOnly<Instruction*> inst = (Instruction*)inst_.raw();
+ MOZ_ASSERT(inst->is<InstCMP>());
+
+ // Zero bits 20-27, then set 24-27 to be correct for a branch.
+ // 20-23 will be party of the B's immediate, and should be 0.
+ *ptr = (*ptr & ~(0xff << 20)) | (0xa0 << 20);
+ AutoFlushICache::flush(uintptr_t(ptr), 4);
+}
+
+void
+Assembler::ToggleToCmp(CodeLocationLabel inst_)
+{
+ uint32_t* ptr = (uint32_t*)inst_.raw();
+
+ DebugOnly<Instruction*> inst = (Instruction*)inst_.raw();
+ MOZ_ASSERT(inst->is<InstBImm>());
+
+ // Ensure that this masking operation doesn't affect the offset of the
+ // branch instruction when it gets toggled back.
+ MOZ_ASSERT((*ptr & (0xf << 20)) == 0);
+
+ // Also make sure that the CMP is valid. Part of having a valid CMP is that
+ // all of the bits describing the destination in most ALU instructions are
+ // all unset (looks like it is encoding r0).
+ MOZ_ASSERT(toRD(*inst) == r0);
+
+ // Zero out bits 20-27, then set them to be correct for a compare.
+ *ptr = (*ptr & ~(0xff << 20)) | (0x35 << 20);
+
+ AutoFlushICache::flush(uintptr_t(ptr), 4);
+}
+
+void
+Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
+{
+ Instruction* inst = (Instruction*)inst_.raw();
+ // Skip a pool with an artificial guard.
+ inst = inst->skipPool();
+ MOZ_ASSERT(inst->is<InstMovW>() || inst->is<InstLDR>());
+
+ if (inst->is<InstMovW>()) {
+ // If it looks like the start of a movw/movt sequence, then make sure we
+ // have all of it (and advance the iterator past the full sequence).
+ inst = inst->next();
+ MOZ_ASSERT(inst->is<InstMovT>());
+ }
+
+ inst = inst->next();
+ MOZ_ASSERT(inst->is<InstNOP>() || inst->is<InstBLXReg>());
+
+ if (enabled == inst->is<InstBLXReg>()) {
+ // Nothing to do.
+ return;
+ }
+
+ if (enabled)
+ *inst = InstBLXReg(ScratchRegister, Always);
+ else
+ *inst = InstNOP();
+
+ AutoFlushICache::flush(uintptr_t(inst), 4);
+}
+
+size_t
+Assembler::ToggledCallSize(uint8_t* code)
+{
+ Instruction* inst = (Instruction*)code;
+ // Skip a pool with an artificial guard.
+ inst = inst->skipPool();
+ MOZ_ASSERT(inst->is<InstMovW>() || inst->is<InstLDR>());
+
+ if (inst->is<InstMovW>()) {
+ // If it looks like the start of a movw/movt sequence, then make sure we
+ // have all of it (and advance the iterator past the full sequence).
+ inst = inst->next();
+ MOZ_ASSERT(inst->is<InstMovT>());
+ }
+
+ inst = inst->next();
+ MOZ_ASSERT(inst->is<InstNOP>() || inst->is<InstBLXReg>());
+ return uintptr_t(inst) + 4 - uintptr_t(code);
+}
+
+uint8_t*
+Assembler::BailoutTableStart(uint8_t* code)
+{
+ Instruction* inst = (Instruction*)code;
+ // Skip a pool with an artificial guard or NOP fill.
+ inst = inst->skipPool();
+ MOZ_ASSERT(inst->is<InstBLImm>());
+ return (uint8_t*) inst;
+}
+
+InstructionIterator::InstructionIterator(Instruction* i_)
+ : i(i_)
+{
+ // Work around pools with an artificial pool guard and around nop-fill.
+ i = i->skipPool();
+}
+
+uint32_t Assembler::NopFill = 0;
+
+uint32_t
+Assembler::GetNopFill()
+{
+ static bool isSet = false;
+ if (!isSet) {
+ char* fillStr = getenv("ARM_ASM_NOP_FILL");
+ uint32_t fill;
+ if (fillStr && sscanf(fillStr, "%u", &fill) == 1)
+ NopFill = fill;
+ if (NopFill > 8)
+ MOZ_CRASH("Nop fill > 8 is not supported");
+ isSet = true;
+ }
+ return NopFill;
+}
+
+uint32_t Assembler::AsmPoolMaxOffset = 1024;
+
+uint32_t
+Assembler::GetPoolMaxOffset()
+{
+ static bool isSet = false;
+ if (!isSet) {
+ char* poolMaxOffsetStr = getenv("ASM_POOL_MAX_OFFSET");
+ uint32_t poolMaxOffset;
+ if (poolMaxOffsetStr && sscanf(poolMaxOffsetStr, "%u", &poolMaxOffset) == 1)
+ AsmPoolMaxOffset = poolMaxOffset;
+ isSet = true;
+ }
+ return AsmPoolMaxOffset;
+}
+
+SecondScratchRegisterScope::SecondScratchRegisterScope(MacroAssembler &masm)
+ : AutoRegisterScope(masm, masm.getSecondScratchReg())
+{
+}
diff --git a/js/src/jit/arm/Assembler-arm.h b/js/src/jit/arm/Assembler-arm.h
new file mode 100644
index 000000000..8bb754a50
--- /dev/null
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -0,0 +1,2429 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_Assembler_arm_h
+#define jit_arm_Assembler_arm_h
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/arm/Architecture-arm.h"
+#include "jit/CompactBuffer.h"
+#include "jit/IonCode.h"
+#include "jit/JitCompartment.h"
+#include "jit/shared/Assembler-shared.h"
+#include "jit/shared/IonAssemblerBufferWithConstantPools.h"
+
+namespace js {
+namespace jit {
+
+// NOTE: there are duplicates in this list! Sometimes we want to specifically
+// refer to the link register as a link register (bl lr is much clearer than bl
+// r14). HOWEVER, this register can easily be a gpr when it is not busy holding
+// the return address.
+static constexpr Register r0 = { Registers::r0 };
+static constexpr Register r1 = { Registers::r1 };
+static constexpr Register r2 = { Registers::r2 };
+static constexpr Register r3 = { Registers::r3 };
+static constexpr Register r4 = { Registers::r4 };
+static constexpr Register r5 = { Registers::r5 };
+static constexpr Register r6 = { Registers::r6 };
+static constexpr Register r7 = { Registers::r7 };
+static constexpr Register r8 = { Registers::r8 };
+static constexpr Register r9 = { Registers::r9 };
+static constexpr Register r10 = { Registers::r10 };
+static constexpr Register r11 = { Registers::r11 };
+static constexpr Register r12 = { Registers::ip };
+static constexpr Register ip = { Registers::ip };
+static constexpr Register sp = { Registers::sp };
+static constexpr Register r14 = { Registers::lr };
+static constexpr Register lr = { Registers::lr };
+static constexpr Register pc = { Registers::pc };
+
+static constexpr Register ScratchRegister = {Registers::ip};
+
+// Helper class for ScratchRegister usage. Asserts that only one piece
+// of code thinks it has exclusive ownership of the scratch register.
+struct ScratchRegisterScope : public AutoRegisterScope
+{
+ explicit ScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, ScratchRegister)
+ { }
+};
+
+struct SecondScratchRegisterScope : public AutoRegisterScope
+{
+ explicit SecondScratchRegisterScope(MacroAssembler& masm);
+};
+
+static constexpr Register OsrFrameReg = r3;
+static constexpr Register ArgumentsRectifierReg = r8;
+static constexpr Register CallTempReg0 = r5;
+static constexpr Register CallTempReg1 = r6;
+static constexpr Register CallTempReg2 = r7;
+static constexpr Register CallTempReg3 = r8;
+static constexpr Register CallTempReg4 = r0;
+static constexpr Register CallTempReg5 = r1;
+
+static constexpr Register IntArgReg0 = r0;
+static constexpr Register IntArgReg1 = r1;
+static constexpr Register IntArgReg2 = r2;
+static constexpr Register IntArgReg3 = r3;
+static constexpr Register GlobalReg = r10;
+static constexpr Register HeapReg = r11;
+static constexpr Register CallTempNonArgRegs[] = { r5, r6, r7, r8 };
+static const uint32_t NumCallTempNonArgRegs =
+ mozilla::ArrayLength(CallTempNonArgRegs);
+
+class ABIArgGenerator
+{
+ unsigned intRegIndex_;
+ unsigned floatRegIndex_;
+ uint32_t stackOffset_;
+ ABIArg current_;
+
+ // ARM can either use HardFp (use float registers for float arguments), or
+ // SoftFp (use general registers for float arguments) ABI. We keep this
+ // switch as a runtime switch because wasm always use the HardFp back-end
+ // while the calls to native functions have to use the one provided by the
+ // system.
+ bool useHardFp_;
+
+ ABIArg softNext(MIRType argType);
+ ABIArg hardNext(MIRType argType);
+
+ public:
+ ABIArgGenerator();
+
+ void setUseHardFp(bool useHardFp) {
+ MOZ_ASSERT(intRegIndex_ == 0 && floatRegIndex_ == 0);
+ useHardFp_ = useHardFp;
+ }
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+ uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
+};
+
+static constexpr Register ABINonArgReg0 = r4;
+static constexpr Register ABINonArgReg1 = r5;
+static constexpr Register ABINonArgReg2 = r6;
+static constexpr Register ABINonArgReturnReg0 = r4;
+static constexpr Register ABINonArgReturnReg1 = r5;
+
+// TLS pointer argument register for WebAssembly functions. This must not alias
+// any other register used for passing function arguments or return values.
+// Preserved by WebAssembly functions.
+static constexpr Register WasmTlsReg = r9;
+
+// Registers used for wasm table calls. These registers must be disjoint
+// from the ABI argument registers, WasmTlsReg and each other.
+static constexpr Register WasmTableCallScratchReg = ABINonArgReg0;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg1;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg2;
+
+static constexpr Register PreBarrierReg = r1;
+
+static constexpr Register InvalidReg = { Registers::invalid_reg };
+static constexpr FloatRegister InvalidFloatReg;
+
+static constexpr Register JSReturnReg_Type = r3;
+static constexpr Register JSReturnReg_Data = r2;
+static constexpr Register StackPointer = sp;
+static constexpr Register FramePointer = InvalidReg;
+static constexpr Register ReturnReg = r0;
+static constexpr Register64 ReturnReg64(r1, r0);
+static constexpr FloatRegister ReturnFloat32Reg = { FloatRegisters::d0, VFPRegister::Single };
+static constexpr FloatRegister ReturnDoubleReg = { FloatRegisters::d0, VFPRegister::Double};
+static constexpr FloatRegister ReturnSimd128Reg = InvalidFloatReg;
+static constexpr FloatRegister ScratchFloat32Reg = { FloatRegisters::d30, VFPRegister::Single };
+static constexpr FloatRegister ScratchDoubleReg = { FloatRegisters::d15, VFPRegister::Double };
+static constexpr FloatRegister ScratchSimd128Reg = InvalidFloatReg;
+static constexpr FloatRegister ScratchUIntReg = { FloatRegisters::d15, VFPRegister::UInt };
+static constexpr FloatRegister ScratchIntReg = { FloatRegisters::d15, VFPRegister::Int };
+
+struct ScratchFloat32Scope : public AutoFloatRegisterScope
+{
+ explicit ScratchFloat32Scope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchFloat32Reg)
+ { }
+};
+struct ScratchDoubleScope : public AutoFloatRegisterScope
+{
+ explicit ScratchDoubleScope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchDoubleReg)
+ { }
+};
+
+// A bias applied to the GlobalReg to allow the use of instructions with small
+// negative immediate offsets which doubles the range of global data that can be
+// accessed with a single instruction.
+static const int32_t WasmGlobalRegBias = 1024;
+
+// Registers used in the GenerateFFIIonExit Enable Activation block.
+static constexpr Register WasmIonExitRegCallee = r4;
+static constexpr Register WasmIonExitRegE0 = r0;
+static constexpr Register WasmIonExitRegE1 = r1;
+
+// Registers used in the GenerateFFIIonExit Disable Activation block.
+// None of these may be the second scratch register (lr).
+static constexpr Register WasmIonExitRegReturnData = r2;
+static constexpr Register WasmIonExitRegReturnType = r3;
+static constexpr Register WasmIonExitRegD0 = r0;
+static constexpr Register WasmIonExitRegD1 = r1;
+static constexpr Register WasmIonExitRegD2 = r4;
+
+// Registerd used in RegExpMatcher instruction (do not use JSReturnOperand).
+static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpMatcherStringReg = CallTempReg1;
+static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
+
+// Registerd used in RegExpTester instruction (do not use ReturnReg).
+static constexpr Register RegExpTesterRegExpReg = CallTempReg0;
+static constexpr Register RegExpTesterStringReg = CallTempReg1;
+static constexpr Register RegExpTesterLastIndexReg = CallTempReg2;
+
+static constexpr FloatRegister d0 = {FloatRegisters::d0, VFPRegister::Double};
+static constexpr FloatRegister d1 = {FloatRegisters::d1, VFPRegister::Double};
+static constexpr FloatRegister d2 = {FloatRegisters::d2, VFPRegister::Double};
+static constexpr FloatRegister d3 = {FloatRegisters::d3, VFPRegister::Double};
+static constexpr FloatRegister d4 = {FloatRegisters::d4, VFPRegister::Double};
+static constexpr FloatRegister d5 = {FloatRegisters::d5, VFPRegister::Double};
+static constexpr FloatRegister d6 = {FloatRegisters::d6, VFPRegister::Double};
+static constexpr FloatRegister d7 = {FloatRegisters::d7, VFPRegister::Double};
+static constexpr FloatRegister d8 = {FloatRegisters::d8, VFPRegister::Double};
+static constexpr FloatRegister d9 = {FloatRegisters::d9, VFPRegister::Double};
+static constexpr FloatRegister d10 = {FloatRegisters::d10, VFPRegister::Double};
+static constexpr FloatRegister d11 = {FloatRegisters::d11, VFPRegister::Double};
+static constexpr FloatRegister d12 = {FloatRegisters::d12, VFPRegister::Double};
+static constexpr FloatRegister d13 = {FloatRegisters::d13, VFPRegister::Double};
+static constexpr FloatRegister d14 = {FloatRegisters::d14, VFPRegister::Double};
+static constexpr FloatRegister d15 = {FloatRegisters::d15, VFPRegister::Double};
+
+
+// For maximal awesomeness, 8 should be sufficent. ldrd/strd (dual-register
+// load/store) operate in a single cycle when the address they are dealing with
+// is 8 byte aligned. Also, the ARM abi wants the stack to be 8 byte aligned at
+// function boundaries. I'm trying to make sure this is always true.
+static constexpr uint32_t ABIStackAlignment = 8;
+static constexpr uint32_t CodeAlignment = 8;
+static constexpr uint32_t JitStackAlignment = 8;
+
+static constexpr uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+// This boolean indicates whether we support SIMD instructions flavoured for
+// this architecture or not. Rather than a method in the LIRGenerator, it is
+// here such that it is accessible from the entire codebase. Once full support
+// for SIMD is reached on all tier-1 platforms, this constant can be deleted.
+static constexpr bool SupportsSimd = false;
+static constexpr uint32_t SimdMemoryAlignment = 8;
+
+static_assert(CodeAlignment % SimdMemoryAlignment == 0,
+ "Code alignment should be larger than any of the alignments which are used for "
+ "the constant sections of the code buffer. Thus it should be larger than the "
+ "alignment for SIMD constants.");
+
+static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
+ "Stack alignment should be larger than any of the alignments which are used for "
+ "spilled values. Thus it should be larger than the alignment for SIMD accesses.");
+
+static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
+
+// Does this architecture support SIMD conversions between Uint32x4 and Float32x4?
+static constexpr bool SupportsUint32x4FloatConversions = false;
+
+// Does this architecture support comparisons of unsigned integer vectors?
+static constexpr bool SupportsUint8x16Compares = false;
+static constexpr bool SupportsUint16x8Compares = false;
+static constexpr bool SupportsUint32x4Compares = false;
+
+static const Scale ScalePointer = TimesFour;
+
+class Instruction;
+class InstBranchImm;
+uint32_t RM(Register r);
+uint32_t RS(Register r);
+uint32_t RD(Register r);
+uint32_t RT(Register r);
+uint32_t RN(Register r);
+
+uint32_t maybeRD(Register r);
+uint32_t maybeRT(Register r);
+uint32_t maybeRN(Register r);
+
+Register toRN(Instruction i);
+Register toRM(Instruction i);
+Register toRD(Instruction i);
+Register toR(Instruction i);
+
+class VFPRegister;
+uint32_t VD(VFPRegister vr);
+uint32_t VN(VFPRegister vr);
+uint32_t VM(VFPRegister vr);
+
+// For being passed into the generic vfp instruction generator when there is an
+// instruction that only takes two registers.
+static constexpr VFPRegister NoVFPRegister(VFPRegister::Double, 0, false, true);
+
+struct ImmTag : public Imm32
+{
+ explicit ImmTag(JSValueTag mask)
+ : Imm32(int32_t(mask))
+ { }
+};
+
+struct ImmType : public ImmTag
+{
+ explicit ImmType(JSValueType type)
+ : ImmTag(JSVAL_TYPE_TO_TAG(type))
+ { }
+};
+
+enum Index {
+ Offset = 0 << 21 | 1<<24,
+ PreIndex = 1 << 21 | 1 << 24,
+ PostIndex = 0 << 21 | 0 << 24
+ // The docs were rather unclear on this. It sounds like
+ // 1 << 21 | 0 << 24 encodes dtrt.
+};
+
+enum IsImmOp2_ {
+ IsImmOp2 = 1 << 25,
+ IsNotImmOp2 = 0 << 25
+};
+enum IsImmDTR_ {
+ IsImmDTR = 0 << 25,
+ IsNotImmDTR = 1 << 25
+};
+// For the extra memory operations, ldrd, ldrsb, ldrh.
+enum IsImmEDTR_ {
+ IsImmEDTR = 1 << 22,
+ IsNotImmEDTR = 0 << 22
+};
+
+enum ShiftType {
+ LSL = 0, // << 5
+ LSR = 1, // << 5
+ ASR = 2, // << 5
+ ROR = 3, // << 5
+ RRX = ROR // RRX is encoded as ROR with a 0 offset.
+};
+
+// Modes for STM/LDM. Names are the suffixes applied to the instruction.
+enum DTMMode {
+ A = 0 << 24, // empty / after
+ B = 1 << 24, // full / before
+ D = 0 << 23, // decrement
+ I = 1 << 23, // increment
+ DA = D | A,
+ DB = D | B,
+ IA = I | A,
+ IB = I | B
+};
+
+enum DTMWriteBack {
+ WriteBack = 1 << 21,
+ NoWriteBack = 0 << 21
+};
+
+// Condition code updating mode.
+enum SBit {
+ SetCC = 1 << 20, // Set condition code.
+ LeaveCC = 0 << 20 // Leave condition code unchanged.
+};
+
+enum LoadStore {
+ IsLoad = 1 << 20,
+ IsStore = 0 << 20
+};
+
+// You almost never want to use this directly. Instead, you wantto pass in a
+// signed constant, and let this bit be implicitly set for you. This is however,
+// necessary if we want a negative index.
+enum IsUp_ {
+ IsUp = 1 << 23,
+ IsDown = 0 << 23
+};
+enum ALUOp {
+ OpMov = 0xd << 21,
+ OpMvn = 0xf << 21,
+ OpAnd = 0x0 << 21,
+ OpBic = 0xe << 21,
+ OpEor = 0x1 << 21,
+ OpOrr = 0xc << 21,
+ OpAdc = 0x5 << 21,
+ OpAdd = 0x4 << 21,
+ OpSbc = 0x6 << 21,
+ OpSub = 0x2 << 21,
+ OpRsb = 0x3 << 21,
+ OpRsc = 0x7 << 21,
+ OpCmn = 0xb << 21,
+ OpCmp = 0xa << 21,
+ OpTeq = 0x9 << 21,
+ OpTst = 0x8 << 21,
+ OpInvalid = -1
+};
+
+
+enum MULOp {
+ OpmMul = 0 << 21,
+ OpmMla = 1 << 21,
+ OpmUmaal = 2 << 21,
+ OpmMls = 3 << 21,
+ OpmUmull = 4 << 21,
+ OpmUmlal = 5 << 21,
+ OpmSmull = 6 << 21,
+ OpmSmlal = 7 << 21
+};
+enum BranchTag {
+ OpB = 0x0a000000,
+ OpBMask = 0x0f000000,
+ OpBDestMask = 0x00ffffff,
+ OpBl = 0x0b000000,
+ OpBlx = 0x012fff30,
+ OpBx = 0x012fff10
+};
+
+// Just like ALUOp, but for the vfp instruction set.
+enum VFPOp {
+ OpvMul = 0x2 << 20,
+ OpvAdd = 0x3 << 20,
+ OpvSub = 0x3 << 20 | 0x1 << 6,
+ OpvDiv = 0x8 << 20,
+ OpvMov = 0xB << 20 | 0x1 << 6,
+ OpvAbs = 0xB << 20 | 0x3 << 6,
+ OpvNeg = 0xB << 20 | 0x1 << 6 | 0x1 << 16,
+ OpvSqrt = 0xB << 20 | 0x3 << 6 | 0x1 << 16,
+ OpvCmp = 0xB << 20 | 0x1 << 6 | 0x4 << 16,
+ OpvCmpz = 0xB << 20 | 0x1 << 6 | 0x5 << 16
+};
+
+// Negate the operation, AND negate the immediate that we were passed in.
+ALUOp ALUNeg(ALUOp op, Register dest, Register scratch, Imm32* imm, Register* negDest);
+bool can_dbl(ALUOp op);
+bool condsAreSafe(ALUOp op);
+
+// If there is a variant of op that has a dest (think cmp/sub) return that
+// variant of it.
+ALUOp getDestVariant(ALUOp op);
+
+static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data);
+static const ValueOperand softfpReturnOperand = ValueOperand(r1, r0);
+
+// All of these classes exist solely to shuffle data into the various operands.
+// For example Operand2 can be an imm8, a register-shifted-by-a-constant or a
+// register-shifted-by-a-register. We represent this in C++ by having a base
+// class Operand2, which just stores the 32 bits of data as they will be encoded
+// in the instruction. You cannot directly create an Operand2 since it is
+// tricky, and not entirely sane to do so. Instead, you create one of its child
+// classes, e.g. Imm8. Imm8's constructor takes a single integer argument. Imm8
+// will verify that its argument can be encoded as an ARM 12 bit imm8, encode it
+// using an Imm8data, and finally call its parent's (Operand2) constructor with
+// the Imm8data. The Operand2 constructor will then call the Imm8data's encode()
+// function to extract the raw bits from it.
+//
+// In the future, we should be able to extract data from the Operand2 by asking
+// it for its component Imm8data structures. The reason this is so horribly
+// round-about is we wanted to have Imm8 and RegisterShiftedRegister inherit
+// directly from Operand2 but have all of them take up only a single word of
+// storage. We also wanted to avoid passing around raw integers at all since
+// they are error prone.
+class Op2Reg;
+class O2RegImmShift;
+class O2RegRegShift;
+
+namespace datastore {
+
+class Reg
+{
+ // The "second register".
+ uint32_t rm_ : 4;
+ // Do we get another register for shifting.
+ bool rrs_ : 1;
+ ShiftType type_ : 2;
+ // We'd like this to be a more sensible encoding, but that would need to be
+ // a struct and that would not pack :(
+ uint32_t shiftAmount_ : 5;
+ uint32_t pad_ : 20;
+
+ public:
+ Reg(uint32_t rm, ShiftType type, uint32_t rsr, uint32_t shiftAmount)
+ : rm_(rm), rrs_(rsr), type_(type), shiftAmount_(shiftAmount), pad_(0)
+ { }
+ explicit Reg(const Op2Reg& op) {
+ memcpy(this, &op, sizeof(*this));
+ }
+
+ uint32_t shiftAmount() const {
+ return shiftAmount_;
+ }
+
+ uint32_t encode() const {
+ return rm_ | (rrs_ << 4) | (type_ << 5) | (shiftAmount_ << 7);
+ }
+};
+
+// Op2 has a mode labelled "<imm8m>", which is arm's magical immediate encoding.
+// Some instructions actually get 8 bits of data, which is called Imm8Data
+// below. These should have edit distance > 1, but this is how it is for now.
+class Imm8mData
+{
+ uint32_t data_ : 8;
+ uint32_t rot_ : 4;
+ uint32_t buff_ : 19;
+
+ // Throw in an extra bit that will be 1 if we can't encode this properly.
+ // if we can encode it properly, a simple "|" will still suffice to meld it
+ // into the instruction.
+ bool invalid_ : 1;
+
+ public:
+ // Default constructor makes an invalid immediate.
+ Imm8mData()
+ : data_(0xff), rot_(0xf), buff_(0), invalid_(true)
+ { }
+
+ Imm8mData(uint32_t data, uint32_t rot)
+ : data_(data), rot_(rot), buff_(0), invalid_(false)
+ {
+ MOZ_ASSERT(data == data_);
+ MOZ_ASSERT(rot == rot_);
+ }
+
+ bool invalid() const { return invalid_; }
+
+ uint32_t encode() const {
+ MOZ_ASSERT(!invalid_);
+ return data_ | (rot_ << 8);
+ };
+};
+
+class Imm8Data
+{
+ uint32_t imm4L_ : 4;
+ uint32_t pad_ : 4;
+ uint32_t imm4H_ : 4;
+
+ public:
+ explicit Imm8Data(uint32_t imm)
+ : imm4L_(imm & 0xf), imm4H_(imm >> 4)
+ {
+ MOZ_ASSERT(imm <= 0xff);
+ }
+
+ uint32_t encode() const {
+ return imm4L_ | (imm4H_ << 8);
+ };
+};
+
+// VLDR/VSTR take an 8 bit offset, which is implicitly left shifted by 2.
+class Imm8VFPOffData
+{
+ uint32_t data_;
+
+ public:
+ explicit Imm8VFPOffData(uint32_t imm)
+ : data_(imm)
+ {
+ MOZ_ASSERT((imm & ~(0xff)) == 0);
+ }
+ uint32_t encode() const {
+ return data_;
+ };
+};
+
+// ARM can magically encode 256 very special immediates to be moved into a
+// register.
+struct Imm8VFPImmData
+{
+ // This structure's members are public and it has no constructor to
+ // initialize them, for a very special reason. Were this structure to
+ // have a constructor, the initialization for DoubleEncoder's internal
+ // table (see below) would require a rather large static constructor on
+ // some of our supported compilers. The known solution to this is to mark
+ // the constructor constexpr, but, again, some of our supported
+ // compilers don't support constexpr! So we are reduced to public
+ // members and eschewing a constructor in hopes that the initialization
+ // of DoubleEncoder's table is correct.
+ uint32_t imm4L : 4;
+ uint32_t imm4H : 4;
+ int32_t isInvalid : 24;
+
+ uint32_t encode() const {
+ // This assert is an attempting at ensuring that we don't create random
+ // instances of this structure and then asking to encode() it.
+ MOZ_ASSERT(isInvalid == 0);
+ return imm4L | (imm4H << 16);
+ };
+};
+
+class Imm12Data
+{
+ uint32_t data_ : 12;
+
+ public:
+ explicit Imm12Data(uint32_t imm)
+ : data_(imm)
+ {
+ MOZ_ASSERT(data_ == imm);
+ }
+
+ uint32_t encode() const {
+ return data_;
+ }
+};
+
+class RIS
+{
+ uint32_t shiftAmount_ : 5;
+
+ public:
+ explicit RIS(uint32_t imm)
+ : shiftAmount_(imm)
+ {
+ MOZ_ASSERT(shiftAmount_ == imm);
+ }
+
+ explicit RIS(Reg r)
+ : shiftAmount_(r.shiftAmount())
+ { }
+
+ uint32_t encode() const {
+ return shiftAmount_;
+ }
+};
+
+class RRS
+{
+ bool mustZero_ : 1;
+ // The register that holds the shift amount.
+ uint32_t rs_ : 4;
+
+ public:
+ explicit RRS(uint32_t rs)
+ : rs_(rs)
+ {
+ MOZ_ASSERT(rs_ == rs);
+ }
+
+ uint32_t encode() const {
+ return rs_ << 1;
+ }
+};
+
+} // namespace datastore
+
+class MacroAssemblerARM;
+class Operand;
+
+class Operand2
+{
+ friend class Operand;
+ friend class MacroAssemblerARM;
+ friend class InstALU;
+
+ uint32_t oper_ : 31;
+ bool invalid_ : 1;
+
+ protected:
+ explicit Operand2(datastore::Imm8mData base)
+ : oper_(base.invalid() ? -1 : (base.encode() | uint32_t(IsImmOp2))),
+ invalid_(base.invalid())
+ { }
+
+ explicit Operand2(datastore::Reg base)
+ : oper_(base.encode() | uint32_t(IsNotImmOp2)),
+ invalid_(false)
+ { }
+
+ private:
+ explicit Operand2(uint32_t blob)
+ : oper_(blob),
+ invalid_(false)
+ { }
+
+ public:
+ bool isO2Reg() const {
+ return !(oper_ & IsImmOp2);
+ }
+
+ Op2Reg toOp2Reg() const;
+
+ bool isImm8() const {
+ return oper_ & IsImmOp2;
+ }
+
+ bool invalid() const {
+ return invalid_;
+ }
+
+ uint32_t encode() const {
+ return oper_;
+ }
+};
+
+class Imm8 : public Operand2
+{
+ public:
+ explicit Imm8(uint32_t imm)
+ : Operand2(EncodeImm(imm))
+ { }
+
+ static datastore::Imm8mData EncodeImm(uint32_t imm) {
+ // RotateLeft below may not be called with a shift of zero.
+ if (imm <= 0xFF)
+ return datastore::Imm8mData(imm, 0);
+
+ // An encodable integer has a maximum of 8 contiguous set bits,
+ // with an optional wrapped left rotation to even bit positions.
+ for (int rot = 1; rot < 16; rot++) {
+ uint32_t rotimm = mozilla::RotateLeft(imm, rot * 2);
+ if (rotimm <= 0xFF)
+ return datastore::Imm8mData(rotimm, rot);
+ }
+ return datastore::Imm8mData();
+ }
+
+ // Pair template?
+ struct TwoImm8mData
+ {
+ datastore::Imm8mData fst_, snd_;
+
+ TwoImm8mData() = default;
+
+ TwoImm8mData(datastore::Imm8mData fst, datastore::Imm8mData snd)
+ : fst_(fst), snd_(snd)
+ { }
+
+ datastore::Imm8mData fst() const { return fst_; }
+ datastore::Imm8mData snd() const { return snd_; }
+ };
+
+ static TwoImm8mData EncodeTwoImms(uint32_t);
+};
+
+class Op2Reg : public Operand2
+{
+ public:
+ explicit Op2Reg(Register rm, ShiftType type, datastore::RIS shiftImm)
+ : Operand2(datastore::Reg(rm.code(), type, 0, shiftImm.encode()))
+ { }
+
+ explicit Op2Reg(Register rm, ShiftType type, datastore::RRS shiftReg)
+ : Operand2(datastore::Reg(rm.code(), type, 1, shiftReg.encode()))
+ { }
+};
+
+static_assert(sizeof(Op2Reg) == sizeof(datastore::Reg),
+ "datastore::Reg(const Op2Reg&) constructor relies on Reg/Op2Reg having same size");
+
+class O2RegImmShift : public Op2Reg
+{
+ public:
+ explicit O2RegImmShift(Register rn, ShiftType type, uint32_t shift)
+ : Op2Reg(rn, type, datastore::RIS(shift))
+ { }
+};
+
+class O2RegRegShift : public Op2Reg
+{
+ public:
+ explicit O2RegRegShift(Register rn, ShiftType type, Register rs)
+ : Op2Reg(rn, type, datastore::RRS(rs.code()))
+ { }
+};
+
+O2RegImmShift O2Reg(Register r);
+O2RegImmShift lsl(Register r, int amt);
+O2RegImmShift lsr(Register r, int amt);
+O2RegImmShift asr(Register r, int amt);
+O2RegImmShift rol(Register r, int amt);
+O2RegImmShift ror(Register r, int amt);
+
+O2RegRegShift lsl(Register r, Register amt);
+O2RegRegShift lsr(Register r, Register amt);
+O2RegRegShift asr(Register r, Register amt);
+O2RegRegShift ror(Register r, Register amt);
+
+// An offset from a register to be used for ldr/str. This should include the
+// sign bit, since ARM has "signed-magnitude" offsets. That is it encodes an
+// unsigned offset, then the instruction specifies if the offset is positive or
+// negative. The +/- bit is necessary if the instruction set wants to be able to
+// have a negative register offset e.g. ldr pc, [r1,-r2];
+class DtrOff
+{
+ uint32_t data_;
+
+ protected:
+ explicit DtrOff(datastore::Imm12Data immdata, IsUp_ iu)
+ : data_(immdata.encode() | uint32_t(IsImmDTR) | uint32_t(iu))
+ { }
+
+ explicit DtrOff(datastore::Reg reg, IsUp_ iu = IsUp)
+ : data_(reg.encode() | uint32_t(IsNotImmDTR) | iu)
+ { }
+
+ public:
+ uint32_t encode() const { return data_; }
+};
+
+class DtrOffImm : public DtrOff
+{
+ public:
+ explicit DtrOffImm(int32_t imm)
+ : DtrOff(datastore::Imm12Data(mozilla::Abs(imm)), imm >= 0 ? IsUp : IsDown)
+ {
+ MOZ_ASSERT(mozilla::Abs(imm) < 4096);
+ }
+};
+
+class DtrOffReg : public DtrOff
+{
+ // These are designed to be called by a constructor of a subclass.
+ // Constructing the necessary RIS/RRS structures is annoying.
+
+ protected:
+ explicit DtrOffReg(Register rn, ShiftType type, datastore::RIS shiftImm, IsUp_ iu = IsUp)
+ : DtrOff(datastore::Reg(rn.code(), type, 0, shiftImm.encode()), iu)
+ { }
+
+ explicit DtrOffReg(Register rn, ShiftType type, datastore::RRS shiftReg, IsUp_ iu = IsUp)
+ : DtrOff(datastore::Reg(rn.code(), type, 1, shiftReg.encode()), iu)
+ { }
+};
+
+class DtrRegImmShift : public DtrOffReg
+{
+ public:
+ explicit DtrRegImmShift(Register rn, ShiftType type, uint32_t shift, IsUp_ iu = IsUp)
+ : DtrOffReg(rn, type, datastore::RIS(shift), iu)
+ { }
+};
+
+class DtrRegRegShift : public DtrOffReg
+{
+ public:
+ explicit DtrRegRegShift(Register rn, ShiftType type, Register rs, IsUp_ iu = IsUp)
+ : DtrOffReg(rn, type, datastore::RRS(rs.code()), iu)
+ { }
+};
+
+// We will frequently want to bundle a register with its offset so that we have
+// an "operand" to a load instruction.
+class DTRAddr
+{
+ friend class Operand;
+
+ uint32_t data_;
+
+ public:
+ explicit DTRAddr(Register reg, DtrOff dtr)
+ : data_(dtr.encode() | (reg.code() << 16))
+ { }
+
+ uint32_t encode() const {
+ return data_;
+ }
+
+ Register getBase() const {
+ return Register::FromCode((data_ >> 16) & 0xf);
+ }
+};
+
+// Offsets for the extended data transfer instructions:
+// ldrsh, ldrd, ldrsb, etc.
+class EDtrOff
+{
+ uint32_t data_;
+
+ protected:
+ explicit EDtrOff(datastore::Imm8Data imm8, IsUp_ iu = IsUp)
+ : data_(imm8.encode() | IsImmEDTR | uint32_t(iu))
+ { }
+
+ explicit EDtrOff(Register rm, IsUp_ iu = IsUp)
+ : data_(rm.code() | IsNotImmEDTR | iu)
+ { }
+
+ public:
+ uint32_t encode() const {
+ return data_;
+ }
+};
+
+class EDtrOffImm : public EDtrOff
+{
+ public:
+ explicit EDtrOffImm(int32_t imm)
+ : EDtrOff(datastore::Imm8Data(mozilla::Abs(imm)), (imm >= 0) ? IsUp : IsDown)
+ {
+ MOZ_ASSERT(mozilla::Abs(imm) < 256);
+ }
+};
+
+// This is the most-derived class, since the extended data transfer instructions
+// don't support any sort of modifying the "index" operand.
+class EDtrOffReg : public EDtrOff
+{
+ public:
+ explicit EDtrOffReg(Register rm)
+ : EDtrOff(rm)
+ { }
+};
+
+class EDtrAddr
+{
+ uint32_t data_;
+
+ public:
+ explicit EDtrAddr(Register r, EDtrOff off)
+ : data_(RN(r) | off.encode())
+ { }
+
+ uint32_t encode() const {
+ return data_;
+ }
+#ifdef DEBUG
+ Register maybeOffsetRegister() const {
+ if (data_ & IsImmEDTR)
+ return InvalidReg;
+ return Register::FromCode(data_ & 0xf);
+ }
+#endif
+};
+
+class VFPOff
+{
+ uint32_t data_;
+
+ protected:
+ explicit VFPOff(datastore::Imm8VFPOffData imm, IsUp_ isup)
+ : data_(imm.encode() | uint32_t(isup))
+ { }
+
+ public:
+ uint32_t encode() const {
+ return data_;
+ }
+};
+
+class VFPOffImm : public VFPOff
+{
+ public:
+ explicit VFPOffImm(int32_t imm)
+ : VFPOff(datastore::Imm8VFPOffData(mozilla::Abs(imm) / 4), imm < 0 ? IsDown : IsUp)
+ {
+ MOZ_ASSERT(mozilla::Abs(imm) <= 255 * 4);
+ }
+};
+
+class VFPAddr
+{
+ friend class Operand;
+
+ uint32_t data_;
+
+ public:
+ explicit VFPAddr(Register base, VFPOff off)
+ : data_(RN(base) | off.encode())
+ { }
+
+ uint32_t encode() const {
+ return data_;
+ }
+};
+
+class VFPImm
+{
+ uint32_t data_;
+
+ public:
+ explicit VFPImm(uint32_t topWordOfDouble);
+
+ static const VFPImm One;
+
+ uint32_t encode() const {
+ return data_;
+ }
+ bool isValid() const {
+ return data_ != -1U;
+ }
+};
+
+// A BOffImm is an immediate that is used for branches. Namely, it is the offset
+// that will be encoded in the branch instruction. This is the only sane way of
+// constructing a branch.
+class BOffImm
+{
+ friend class InstBranchImm;
+
+ uint32_t data_;
+
+ public:
+ explicit BOffImm(int offset)
+ : data_((offset - 8) >> 2 & 0x00ffffff)
+ {
+ MOZ_ASSERT((offset & 0x3) == 0);
+ if (!IsInRange(offset))
+ MOZ_CRASH("BOffImm offset out of range");
+ }
+
+ explicit BOffImm()
+ : data_(INVALID)
+ { }
+
+ private:
+ explicit BOffImm(const Instruction& inst);
+
+ public:
+ static const uint32_t INVALID = 0x00800000;
+
+ uint32_t encode() const {
+ return data_;
+ }
+ int32_t decode() const {
+ return ((int32_t(data_) << 8) >> 6) + 8;
+ }
+
+ static bool IsInRange(int offset) {
+ if ((offset - 8) < -33554432)
+ return false;
+ if ((offset - 8) > 33554428)
+ return false;
+ return true;
+ }
+
+ bool isInvalid() const {
+ return data_ == INVALID;
+ }
+ Instruction* getDest(Instruction* src) const;
+};
+
+class Imm16
+{
+ uint32_t lower_ : 12;
+ uint32_t pad_ : 4;
+ uint32_t upper_ : 4;
+ uint32_t invalid_ : 12;
+
+ public:
+ explicit Imm16();
+ explicit Imm16(uint32_t imm);
+ explicit Imm16(Instruction& inst);
+
+ uint32_t encode() const {
+ return lower_ | (upper_ << 16);
+ }
+ uint32_t decode() const {
+ return lower_ | (upper_ << 12);
+ }
+
+ bool isInvalid() const {
+ return invalid_;
+ }
+};
+
+// I would preffer that these do not exist, since there are essentially no
+// instructions that would ever take more than one of these, however, the MIR
+// wants to only have one type of arguments to functions, so bugger.
+class Operand
+{
+ // The encoding of registers is the same for OP2, DTR and EDTR yet the type
+ // system doesn't let us express this, so choices must be made.
+ public:
+ enum class Tag : uint8_t {
+ OP2,
+ MEM,
+ FOP
+ };
+
+ private:
+ Tag tag_ : 8;
+ uint32_t reg_ : 5;
+ int32_t offset_;
+
+ public:
+ explicit Operand(Register reg)
+ : tag_(Tag::OP2), reg_(reg.code())
+ { }
+
+ explicit Operand(FloatRegister freg)
+ : tag_(Tag::FOP), reg_(freg.code())
+ { }
+
+ explicit Operand(Register base, Imm32 off)
+ : tag_(Tag::MEM), reg_(base.code()), offset_(off.value)
+ { }
+
+ explicit Operand(Register base, int32_t off)
+ : tag_(Tag::MEM), reg_(base.code()), offset_(off)
+ { }
+
+ explicit Operand(const Address& addr)
+ : tag_(Tag::MEM), reg_(addr.base.code()), offset_(addr.offset)
+ { }
+
+ public:
+ Tag tag() const {
+ return tag_;
+ }
+
+ Operand2 toOp2() const {
+ MOZ_ASSERT(tag_ == Tag::OP2);
+ return O2Reg(Register::FromCode(reg_));
+ }
+
+ Register toReg() const {
+ MOZ_ASSERT(tag_ == Tag::OP2);
+ return Register::FromCode(reg_);
+ }
+
+ Address toAddress() const {
+ MOZ_ASSERT(tag_ == Tag::MEM);
+ return Address(Register::FromCode(reg_), offset_);
+ }
+ int32_t disp() const {
+ MOZ_ASSERT(tag_ == Tag::MEM);
+ return offset_;
+ }
+
+ int32_t base() const {
+ MOZ_ASSERT(tag_ == Tag::MEM);
+ return reg_;
+ }
+ Register baseReg() const {
+ MOZ_ASSERT(tag_ == Tag::MEM);
+ return Register::FromCode(reg_);
+ }
+ DTRAddr toDTRAddr() const {
+ MOZ_ASSERT(tag_ == Tag::MEM);
+ return DTRAddr(baseReg(), DtrOffImm(offset_));
+ }
+ VFPAddr toVFPAddr() const {
+ MOZ_ASSERT(tag_ == Tag::MEM);
+ return VFPAddr(baseReg(), VFPOffImm(offset_));
+ }
+};
+
+inline Imm32
+Imm64::firstHalf() const
+{
+ return low();
+}
+
+inline Imm32
+Imm64::secondHalf() const
+{
+ return hi();
+}
+
+void
+PatchJump(CodeLocationJump& jump_, CodeLocationLabel label,
+ ReprotectCode reprotect = DontReprotect);
+
+static inline void
+PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
+{
+ PatchJump(jump_, label);
+}
+
+class InstructionIterator;
+class Assembler;
+typedef js::jit::AssemblerBufferWithConstantPools<1024, 4, Instruction, Assembler> ARMBuffer;
+
+class Assembler : public AssemblerShared
+{
+ public:
+ // ARM conditional constants:
+ enum ARMCondition {
+ EQ = 0x00000000, // Zero
+ NE = 0x10000000, // Non-zero
+ CS = 0x20000000,
+ CC = 0x30000000,
+ MI = 0x40000000,
+ PL = 0x50000000,
+ VS = 0x60000000,
+ VC = 0x70000000,
+ HI = 0x80000000,
+ LS = 0x90000000,
+ GE = 0xa0000000,
+ LT = 0xb0000000,
+ GT = 0xc0000000,
+ LE = 0xd0000000,
+ AL = 0xe0000000
+ };
+
+ enum Condition {
+ Equal = EQ,
+ NotEqual = NE,
+ Above = HI,
+ AboveOrEqual = CS,
+ Below = CC,
+ BelowOrEqual = LS,
+ GreaterThan = GT,
+ GreaterThanOrEqual = GE,
+ LessThan = LT,
+ LessThanOrEqual = LE,
+ Overflow = VS,
+ CarrySet = CS,
+ CarryClear = CC,
+ Signed = MI,
+ NotSigned = PL,
+ Zero = EQ,
+ NonZero = NE,
+ Always = AL,
+
+ VFP_NotEqualOrUnordered = NE,
+ VFP_Equal = EQ,
+ VFP_Unordered = VS,
+ VFP_NotUnordered = VC,
+ VFP_GreaterThanOrEqualOrUnordered = CS,
+ VFP_GreaterThanOrEqual = GE,
+ VFP_GreaterThanOrUnordered = HI,
+ VFP_GreaterThan = GT,
+ VFP_LessThanOrEqualOrUnordered = LE,
+ VFP_LessThanOrEqual = LS,
+ VFP_LessThanOrUnordered = LT,
+ VFP_LessThan = CC // MI is valid too.
+ };
+
+ // Bit set when a DoubleCondition does not map to a single ARM condition.
+ // The macro assembler has to special-case these conditions, or else
+ // ConditionFromDoubleCondition will complain.
+ static const int DoubleConditionBitSpecial = 0x1;
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is
+ // ordered - i.e. neither operand is NaN.
+ DoubleOrdered = VFP_NotUnordered,
+ DoubleEqual = VFP_Equal,
+ DoubleNotEqual = VFP_NotEqualOrUnordered | DoubleConditionBitSpecial,
+ DoubleGreaterThan = VFP_GreaterThan,
+ DoubleGreaterThanOrEqual = VFP_GreaterThanOrEqual,
+ DoubleLessThan = VFP_LessThan,
+ DoubleLessThanOrEqual = VFP_LessThanOrEqual,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleUnordered = VFP_Unordered,
+ DoubleEqualOrUnordered = VFP_Equal | DoubleConditionBitSpecial,
+ DoubleNotEqualOrUnordered = VFP_NotEqualOrUnordered,
+ DoubleGreaterThanOrUnordered = VFP_GreaterThanOrUnordered,
+ DoubleGreaterThanOrEqualOrUnordered = VFP_GreaterThanOrEqualOrUnordered,
+ DoubleLessThanOrUnordered = VFP_LessThanOrUnordered,
+ DoubleLessThanOrEqualOrUnordered = VFP_LessThanOrEqualOrUnordered
+ };
+
+ Condition getCondition(uint32_t inst) {
+ return (Condition) (0xf0000000 & inst);
+ }
+ static inline Condition ConditionFromDoubleCondition(DoubleCondition cond) {
+ MOZ_ASSERT(!(cond & DoubleConditionBitSpecial));
+ return static_cast<Condition>(cond);
+ }
+
+ enum BarrierOption {
+ BarrierSY = 15, // Full system barrier
+ BarrierST = 14 // StoreStore barrier
+ };
+
+ // This should be protected, but since CodeGenerator wants to use it, it
+ // needs to go out here :(
+
+ BufferOffset nextOffset() {
+ return m_buffer.nextOffset();
+ }
+
+ protected:
+ // Shim around AssemblerBufferWithConstantPools::allocEntry.
+ BufferOffset allocEntry(size_t numInst, unsigned numPoolEntries,
+ uint8_t* inst, uint8_t* data, ARMBuffer::PoolEntry* pe = nullptr,
+ bool markAsBranch = false, bool loadToPC = false);
+
+ Instruction* editSrc(BufferOffset bo) {
+ return m_buffer.getInst(bo);
+ }
+
+#ifdef JS_DISASM_ARM
+ static void spewInst(Instruction* i);
+ void spew(Instruction* i);
+ void spewBranch(Instruction* i, Label* target);
+ void spewData(BufferOffset addr, size_t numInstr, bool loadToPC);
+ void spewLabel(Label* label);
+ void spewRetarget(Label* label, Label* target);
+ void spewTarget(Label* l);
+#endif
+
+ public:
+ void resetCounter();
+ uint32_t actualIndex(uint32_t) const;
+ static uint8_t* PatchableJumpAddress(JitCode* code, uint32_t index);
+ static uint32_t NopFill;
+ static uint32_t GetNopFill();
+ static uint32_t AsmPoolMaxOffset;
+ static uint32_t GetPoolMaxOffset();
+
+ protected:
+ // Structure for fixing up pc-relative loads/jumps when a the machine code
+ // gets moved (executable copy, gc, etc.).
+ struct RelativePatch
+ {
+ void* target_;
+ Relocation::Kind kind_;
+
+ public:
+ RelativePatch(void* target, Relocation::Kind kind)
+ : target_(target), kind_(kind)
+ { }
+ void* target() const { return target_; }
+ Relocation::Kind kind() const { return kind_; }
+ };
+
+ // TODO: this should actually be a pool-like object. It is currently a big
+ // hack, and probably shouldn't exist.
+ js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
+
+ CompactBufferWriter jumpRelocations_;
+ CompactBufferWriter dataRelocations_;
+ CompactBufferWriter preBarriers_;
+
+ ARMBuffer m_buffer;
+
+#ifdef JS_DISASM_ARM
+ private:
+ class SpewNodes {
+ struct Node {
+ uint32_t key;
+ uint32_t value;
+ Node* next;
+ };
+
+ Node* nodes;
+
+ public:
+ SpewNodes() : nodes(nullptr) {}
+ ~SpewNodes();
+
+ bool lookup(uint32_t key, uint32_t* value);
+ bool add(uint32_t key, uint32_t value);
+ bool remove(uint32_t key);
+ };
+
+ SpewNodes spewNodes_;
+ uint32_t spewNext_;
+ Sprinter* printer_;
+
+ bool spewDisabled();
+ uint32_t spewResolve(Label* l);
+ uint32_t spewProbe(Label* l);
+ uint32_t spewDefine(Label* l);
+ void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3);
+ void spew(const char* fmt, va_list args);
+#endif
+
+ public:
+ // For the alignment fill use NOP: 0x0320f000 or (Always | InstNOP::NopInst).
+ // For the nopFill use a branch to the next instruction: 0xeaffffff.
+ Assembler()
+ : m_buffer(1, 1, 8, GetPoolMaxOffset(), 8, 0xe320f000, 0xeaffffff, GetNopFill()),
+#ifdef JS_DISASM_ARM
+ spewNext_(1000),
+ printer_(nullptr),
+#endif
+ isFinished(false),
+ dtmActive(false),
+ dtmCond(Always)
+ { }
+
+ // We need to wait until an AutoJitContextAlloc is created by the
+ // MacroAssembler, before allocating any space.
+ void initWithAllocator() {
+ m_buffer.initWithAllocator();
+ }
+
+ static Condition InvertCondition(Condition cond);
+ static Condition UnsignedCondition(Condition cond);
+ static Condition ConditionWithoutEqual(Condition cond);
+
+ // MacroAssemblers hold onto gcthings, so they are traced by the GC.
+ void trace(JSTracer* trc);
+ void writeRelocation(BufferOffset src) {
+ jumpRelocations_.writeUnsigned(src.getOffset());
+ }
+
+ // As opposed to x86/x64 version, the data relocation has to be executed
+ // before to recover the pointer, and not after.
+ void writeDataRelocation(ImmGCPtr ptr) {
+ if (ptr.value) {
+ if (gc::IsInsideNursery(ptr.value))
+ embedsNurseryPointers_ = true;
+ if (ptr.value)
+ dataRelocations_.writeUnsigned(nextOffset().getOffset());
+ }
+ }
+ void writePrebarrierOffset(CodeOffset label) {
+ preBarriers_.writeUnsigned(label.offset());
+ }
+
+ enum RelocBranchStyle {
+ B_MOVWT,
+ B_LDR_BX,
+ B_LDR,
+ B_MOVW_ADD
+ };
+
+ enum RelocStyle {
+ L_MOVWT,
+ L_LDR
+ };
+
+ public:
+ // Given the start of a Control Flow sequence, grab the value that is
+ // finally branched to given the start of a function that loads an address
+ // into a register get the address that ends up in the register.
+ template <class Iter>
+ static const uint32_t* GetCF32Target(Iter* iter);
+
+ static uintptr_t GetPointer(uint8_t*);
+ template <class Iter>
+ static const uint32_t* GetPtr32Target(Iter* iter, Register* dest = nullptr, RelocStyle* rs = nullptr);
+
+ bool oom() const;
+
+ void setPrinter(Sprinter* sp) {
+#ifdef JS_DISASM_ARM
+ printer_ = sp;
+#endif
+ }
+
+ static const Register getStackPointer() {
+ return StackPointer;
+ }
+
+ private:
+ bool isFinished;
+ public:
+ void finish();
+ bool asmMergeWith(Assembler& other);
+ void executableCopy(void* buffer);
+ void copyJumpRelocationTable(uint8_t* dest);
+ void copyDataRelocationTable(uint8_t* dest);
+ void copyPreBarrierTable(uint8_t* dest);
+
+ // Size of the instruction stream, in bytes, after pools are flushed.
+ size_t size() const;
+ // Size of the jump relocation table, in bytes.
+ size_t jumpRelocationTableBytes() const;
+ size_t dataRelocationTableBytes() const;
+ size_t preBarrierTableBytes() const;
+
+ // Size of the data table, in bytes.
+ size_t bytesNeeded() const;
+
+ // Write a blob of binary into the instruction stream *OR* into a
+ // destination address.
+ BufferOffset writeInst(uint32_t x);
+
+ // As above, but also mark the instruction as a branch.
+ BufferOffset writeBranchInst(uint32_t x, Label* documentation = nullptr);
+
+ // Write a placeholder NOP for a branch into the instruction stream
+ // (in order to adjust assembler addresses and mark it as a branch), it will
+ // be overwritten subsequently.
+ BufferOffset allocBranchInst();
+
+ // A static variant for the cases where we don't want to have an assembler
+ // object.
+ static void WriteInstStatic(uint32_t x, uint32_t* dest);
+
+ public:
+ void writeCodePointer(CodeOffset* label);
+
+ void haltingAlign(int alignment);
+ void nopAlign(int alignment);
+ BufferOffset as_nop();
+ BufferOffset as_alu(Register dest, Register src1, Operand2 op2,
+ ALUOp op, SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_mov(Register dest,
+ Operand2 op2, SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_mvn(Register dest, Operand2 op2,
+ SBit s = LeaveCC, Condition c = Always);
+
+ static void as_alu_patch(Register dest, Register src1, Operand2 op2,
+ ALUOp op, SBit s, Condition c, uint32_t* pos);
+ static void as_mov_patch(Register dest,
+ Operand2 op2, SBit s, Condition c, uint32_t* pos);
+
+ // Logical operations:
+ BufferOffset as_and(Register dest, Register src1,
+ Operand2 op2, SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_bic(Register dest, Register src1,
+ Operand2 op2, SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_eor(Register dest, Register src1,
+ Operand2 op2, SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_orr(Register dest, Register src1,
+ Operand2 op2, SBit s = LeaveCC, Condition c = Always);
+ // Mathematical operations:
+ BufferOffset as_adc(Register dest, Register src1,
+ Operand2 op2, SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_add(Register dest, Register src1,
+ Operand2 op2, SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_sbc(Register dest, Register src1,
+ Operand2 op2, SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_sub(Register dest, Register src1,
+ Operand2 op2, SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_rsb(Register dest, Register src1,
+ Operand2 op2, SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_rsc(Register dest, Register src1,
+ Operand2 op2, SBit s = LeaveCC, Condition c = Always);
+ // Test operations:
+ BufferOffset as_cmn(Register src1, Operand2 op2, Condition c = Always);
+ BufferOffset as_cmp(Register src1, Operand2 op2, Condition c = Always);
+ BufferOffset as_teq(Register src1, Operand2 op2, Condition c = Always);
+ BufferOffset as_tst(Register src1, Operand2 op2, Condition c = Always);
+
+ // Sign extension operations:
+ BufferOffset as_sxtb(Register dest, Register src, int rotate, Condition c = Always);
+ BufferOffset as_sxth(Register dest, Register src, int rotate, Condition c = Always);
+ BufferOffset as_uxtb(Register dest, Register src, int rotate, Condition c = Always);
+ BufferOffset as_uxth(Register dest, Register src, int rotate, Condition c = Always);
+
+ // Not quite ALU worthy, but useful none the less: These also have the issue
+ // of these being formatted completly differently from the standard ALU operations.
+ BufferOffset as_movw(Register dest, Imm16 imm, Condition c = Always);
+ BufferOffset as_movt(Register dest, Imm16 imm, Condition c = Always);
+
+ static void as_movw_patch(Register dest, Imm16 imm, Condition c, Instruction* pos);
+ static void as_movt_patch(Register dest, Imm16 imm, Condition c, Instruction* pos);
+
+ BufferOffset as_genmul(Register d1, Register d2, Register rm, Register rn,
+ MULOp op, SBit s, Condition c = Always);
+ BufferOffset as_mul(Register dest, Register src1, Register src2,
+ SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_mla(Register dest, Register acc, Register src1, Register src2,
+ SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_umaal(Register dest1, Register dest2, Register src1, Register src2,
+ Condition c = Always);
+ BufferOffset as_mls(Register dest, Register acc, Register src1, Register src2,
+ Condition c = Always);
+ BufferOffset as_umull(Register dest1, Register dest2, Register src1, Register src2,
+ SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_umlal(Register dest1, Register dest2, Register src1, Register src2,
+ SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_smull(Register dest1, Register dest2, Register src1, Register src2,
+ SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_smlal(Register dest1, Register dest2, Register src1, Register src2,
+ SBit s = LeaveCC, Condition c = Always);
+
+ BufferOffset as_sdiv(Register dest, Register num, Register div, Condition c = Always);
+ BufferOffset as_udiv(Register dest, Register num, Register div, Condition c = Always);
+ BufferOffset as_clz(Register dest, Register src, Condition c = Always);
+
+ // Data transfer instructions: ldr, str, ldrb, strb.
+ // Using an int to differentiate between 8 bits and 32 bits is overkill.
+ BufferOffset as_dtr(LoadStore ls, int size, Index mode,
+ Register rt, DTRAddr addr, Condition c = Always);
+
+ static void as_dtr_patch(LoadStore ls, int size, Index mode,
+ Register rt, DTRAddr addr, Condition c, uint32_t* dest);
+
+ // Handles all of the other integral data transferring functions:
+ // ldrsb, ldrsh, ldrd, etc. The size is given in bits.
+ BufferOffset as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode,
+ Register rt, EDtrAddr addr, Condition c = Always);
+
+ BufferOffset as_dtm(LoadStore ls, Register rn, uint32_t mask,
+ DTMMode mode, DTMWriteBack wb, Condition c = Always);
+
+ // Overwrite a pool entry with new data.
+ static void WritePoolEntry(Instruction* addr, Condition c, uint32_t data);
+
+ // Load a 32 bit immediate from a pool into a register.
+ BufferOffset as_Imm32Pool(Register dest, uint32_t value, Condition c = Always);
+ // Make a patchable jump that can target the entire 32 bit address space.
+ BufferOffset as_BranchPool(uint32_t value, RepatchLabel* label,
+ ARMBuffer::PoolEntry* pe = nullptr, Condition c = Always,
+ Label* documentation = nullptr);
+
+ // Load a 64 bit floating point immediate from a pool into a register.
+ BufferOffset as_FImm64Pool(VFPRegister dest, wasm::RawF64 value, Condition c = Always);
+ // Load a 32 bit floating point immediate from a pool into a register.
+ BufferOffset as_FImm32Pool(VFPRegister dest, wasm::RawF32 value, Condition c = Always);
+
+ // Atomic instructions: ldrex, ldrexh, ldrexb, strex, strexh, strexb.
+ //
+ // The halfword and byte versions are available from ARMv6K forward.
+ //
+ // The word versions are available from ARMv6 forward and can be used to
+ // implement the halfword and byte versions on older systems.
+
+ // LDREX rt, [rn]
+ BufferOffset as_ldrex(Register rt, Register rn, Condition c = Always);
+ BufferOffset as_ldrexh(Register rt, Register rn, Condition c = Always);
+ BufferOffset as_ldrexb(Register rt, Register rn, Condition c = Always);
+
+ // STREX rd, rt, [rn]. Constraint: rd != rn, rd != rt.
+ BufferOffset as_strex(Register rd, Register rt, Register rn, Condition c = Always);
+ BufferOffset as_strexh(Register rd, Register rt, Register rn, Condition c = Always);
+ BufferOffset as_strexb(Register rd, Register rt, Register rn, Condition c = Always);
+
+ // Memory synchronization.
+ // These are available from ARMv7 forward.
+ BufferOffset as_dmb(BarrierOption option = BarrierSY);
+ BufferOffset as_dsb(BarrierOption option = BarrierSY);
+ BufferOffset as_isb();
+
+ // Memory synchronization for architectures before ARMv7.
+ BufferOffset as_dsb_trap();
+ BufferOffset as_dmb_trap();
+ BufferOffset as_isb_trap();
+
+ // Control flow stuff:
+
+ // bx can *only* branch to a register never to an immediate.
+ BufferOffset as_bx(Register r, Condition c = Always);
+
+ // Branch can branch to an immediate *or* to a register. Branches to
+ // immediates are pc relative, branches to registers are absolute.
+ BufferOffset as_b(BOffImm off, Condition c, Label* documentation = nullptr);
+
+ BufferOffset as_b(Label* l, Condition c = Always);
+ BufferOffset as_b(wasm::TrapDesc target, Condition c = Always);
+ BufferOffset as_b(BOffImm off, Condition c, BufferOffset inst);
+
+ // blx can go to either an immediate or a register. When blx'ing to a
+ // register, we change processor mode depending on the low bit of the
+ // register when blx'ing to an immediate, we *always* change processor
+ // state.
+ BufferOffset as_blx(Label* l);
+
+ BufferOffset as_blx(Register r, Condition c = Always);
+ BufferOffset as_bl(BOffImm off, Condition c, Label* documentation = nullptr);
+ // bl can only branch+link to an immediate, never to a register it never
+ // changes processor state.
+ BufferOffset as_bl();
+ // bl #imm can have a condition code, blx #imm cannot.
+ // blx reg can be conditional.
+ BufferOffset as_bl(Label* l, Condition c);
+ BufferOffset as_bl(BOffImm off, Condition c, BufferOffset inst);
+
+ BufferOffset as_mrs(Register r, Condition c = Always);
+ BufferOffset as_msr(Register r, Condition c = Always);
+
+ // VFP instructions!
+ private:
+ enum vfp_size {
+ IsDouble = 1 << 8,
+ IsSingle = 0 << 8
+ };
+
+ BufferOffset writeVFPInst(vfp_size sz, uint32_t blob);
+
+ static void WriteVFPInstStatic(vfp_size sz, uint32_t blob, uint32_t* dest);
+
+ // Unityped variants: all registers hold the same (ieee754 single/double)
+ // notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
+ BufferOffset as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+ VFPOp op, Condition c = Always);
+
+ public:
+ BufferOffset as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c = Always);
+ BufferOffset as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c = Always);
+ BufferOffset as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c = Always);
+ BufferOffset as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c = Always);
+ BufferOffset as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c = Always);
+ BufferOffset as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c = Always);
+ BufferOffset as_vneg(VFPRegister vd, VFPRegister vm, Condition c = Always);
+ BufferOffset as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c = Always);
+ BufferOffset as_vabs(VFPRegister vd, VFPRegister vm, Condition c = Always);
+ BufferOffset as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c = Always);
+ BufferOffset as_vcmp(VFPRegister vd, VFPRegister vm, Condition c = Always);
+ BufferOffset as_vcmpz(VFPRegister vd, Condition c = Always);
+
+ // Specifically, a move between two same sized-registers.
+ BufferOffset as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c = Always);
+
+ // Transfer between Core and VFP.
+ enum FloatToCore_ {
+ FloatToCore = 1 << 20,
+ CoreToFloat = 0 << 20
+ };
+
+ private:
+ enum VFPXferSize {
+ WordTransfer = 0x02000010,
+ DoubleTransfer = 0x00400010
+ };
+
+ public:
+ // Unlike the next function, moving between the core registers and vfp
+ // registers can't be *that* properly typed. Namely, since I don't want to
+ // munge the type VFPRegister to also include core registers. Thus, the core
+ // and vfp registers are passed in based on their type, and src/dest is
+ // determined by the float2core.
+
+ BufferOffset as_vxfer(Register vt1, Register vt2, VFPRegister vm, FloatToCore_ f2c,
+ Condition c = Always, int idx = 0);
+
+ // Our encoding actually allows just the src and the dest (and their types)
+ // to uniquely specify the encoding that we are going to use.
+ BufferOffset as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR = false,
+ Condition c = Always);
+
+ // Hard coded to a 32 bit fixed width result for now.
+ BufferOffset as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint,
+ bool toFixed, Condition c = Always);
+
+ // Transfer between VFP and memory.
+ BufferOffset as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
+ Condition c = Always /* vfp doesn't have a wb option*/);
+
+ static void as_vdtr_patch(LoadStore ls, VFPRegister vd, VFPAddr addr,
+ Condition c /* vfp doesn't have a wb option */, uint32_t* dest);
+
+ // VFP's ldm/stm work differently from the standard arm ones. You can only
+ // transfer a range.
+
+ BufferOffset as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length,
+ /* also has update conditions */ Condition c = Always);
+
+ BufferOffset as_vimm(VFPRegister vd, VFPImm imm, Condition c = Always);
+
+ BufferOffset as_vmrs(Register r, Condition c = Always);
+ BufferOffset as_vmsr(Register r, Condition c = Always);
+
+ // Label operations.
+ bool nextLink(BufferOffset b, BufferOffset* next);
+ void bind(Label* label, BufferOffset boff = BufferOffset());
+ void bind(RepatchLabel* label);
+ void bindLater(Label* label, wasm::TrapDesc target);
+ uint32_t currentOffset() {
+ return nextOffset().getOffset();
+ }
+ void retarget(Label* label, Label* target);
+ // I'm going to pretend this doesn't exist for now.
+ void retarget(Label* label, void* target, Relocation::Kind reloc);
+
+ void Bind(uint8_t* rawCode, CodeOffset* label, const void* address);
+
+ // See Bind
+ size_t labelToPatchOffset(CodeOffset label) {
+ return label.offset();
+ }
+
+ void as_bkpt();
+
+ public:
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
+ static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
+
+ static bool SupportsFloatingPoint() {
+ return HasVFP();
+ }
+ static bool SupportsUnalignedAccesses() {
+ return HasARMv7();
+ }
+ static bool SupportsSimd() {
+ return js::jit::SupportsSimd;
+ }
+
+ protected:
+ void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind) {
+ enoughMemory_ &= jumps_.append(RelativePatch(target.value, kind));
+ if (kind == Relocation::JITCODE)
+ writeRelocation(src);
+ }
+
+ public:
+ // The buffer is about to be linked, make sure any constant pools or excess
+ // bookkeeping has been flushed to the instruction stream.
+ void flush() {
+ MOZ_ASSERT(!isFinished);
+ m_buffer.flushPool();
+ return;
+ }
+
+ void comment(const char* msg) {
+#ifdef JS_DISASM_ARM
+ spew("; %s", msg);
+#endif
+ }
+
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+
+ // Actual assembly emitting functions.
+
+ // Since I can't think of a reasonable default for the mode, I'm going to
+ // leave it as a required argument.
+ void startDataTransferM(LoadStore ls, Register rm,
+ DTMMode mode, DTMWriteBack update = NoWriteBack,
+ Condition c = Always)
+ {
+ MOZ_ASSERT(!dtmActive);
+ dtmUpdate = update;
+ dtmBase = rm;
+ dtmLoadStore = ls;
+ dtmLastReg = -1;
+ dtmRegBitField = 0;
+ dtmActive = 1;
+ dtmCond = c;
+ dtmMode = mode;
+ }
+
+ void transferReg(Register rn) {
+ MOZ_ASSERT(dtmActive);
+ MOZ_ASSERT(rn.code() > dtmLastReg);
+ dtmRegBitField |= 1 << rn.code();
+ if (dtmLoadStore == IsLoad && rn.code() == 13 && dtmBase.code() == 13) {
+ MOZ_CRASH("ARM Spec says this is invalid");
+ }
+ }
+ void finishDataTransfer() {
+ dtmActive = false;
+ as_dtm(dtmLoadStore, dtmBase, dtmRegBitField, dtmMode, dtmUpdate, dtmCond);
+ }
+
+ void startFloatTransferM(LoadStore ls, Register rm,
+ DTMMode mode, DTMWriteBack update = NoWriteBack,
+ Condition c = Always)
+ {
+ MOZ_ASSERT(!dtmActive);
+ dtmActive = true;
+ dtmUpdate = update;
+ dtmLoadStore = ls;
+ dtmBase = rm;
+ dtmCond = c;
+ dtmLastReg = -1;
+ dtmMode = mode;
+ dtmDelta = 0;
+ }
+ void transferFloatReg(VFPRegister rn)
+ {
+ if (dtmLastReg == -1) {
+ vdtmFirstReg = rn.code();
+ } else {
+ if (dtmDelta == 0) {
+ dtmDelta = rn.code() - dtmLastReg;
+ MOZ_ASSERT(dtmDelta == 1 || dtmDelta == -1);
+ }
+ MOZ_ASSERT(dtmLastReg >= 0);
+ MOZ_ASSERT(rn.code() == unsigned(dtmLastReg) + dtmDelta);
+ }
+
+ dtmLastReg = rn.code();
+ }
+ void finishFloatTransfer() {
+ MOZ_ASSERT(dtmActive);
+ dtmActive = false;
+ MOZ_ASSERT(dtmLastReg != -1);
+ dtmDelta = dtmDelta ? dtmDelta : 1;
+ // The operand for the vstr/vldr instruction is the lowest register in the range.
+ int low = Min(dtmLastReg, vdtmFirstReg);
+ int high = Max(dtmLastReg, vdtmFirstReg);
+ // Fencepost problem.
+ int len = high - low + 1;
+ // vdtm can only transfer 16 registers at once. If we need to transfer more,
+ // then either hoops are necessary, or we need to be updating the register.
+ MOZ_ASSERT_IF(len > 16, dtmUpdate == WriteBack);
+
+ int adjustLow = dtmLoadStore == IsStore ? 0 : 1;
+ int adjustHigh = dtmLoadStore == IsStore ? -1 : 0;
+ while (len > 0) {
+ // Limit the instruction to 16 registers.
+ int curLen = Min(len, 16);
+ // If it is a store, we want to start at the high end and move down
+ // (e.g. vpush d16-d31; vpush d0-d15).
+ int curStart = (dtmLoadStore == IsStore) ? high - curLen + 1 : low;
+ as_vdtm(dtmLoadStore, dtmBase,
+ VFPRegister(FloatRegister::FromCode(curStart)),
+ curLen, dtmCond);
+ // Update the bounds.
+ low += adjustLow * curLen;
+ high += adjustHigh * curLen;
+ // Update the length parameter.
+ len -= curLen;
+ }
+ }
+
+ private:
+ int dtmRegBitField;
+ int vdtmFirstReg;
+ int dtmLastReg;
+ int dtmDelta;
+ Register dtmBase;
+ DTMWriteBack dtmUpdate;
+ DTMMode dtmMode;
+ LoadStore dtmLoadStore;
+ bool dtmActive;
+ Condition dtmCond;
+
+ public:
+ enum {
+ PadForAlign8 = (int)0x00,
+ PadForAlign16 = (int)0x0000,
+ PadForAlign32 = (int)0xe12fff7f // 'bkpt 0xffff'
+ };
+
+ // API for speaking with the IonAssemblerBufferWithConstantPools generate an
+ // initial placeholder instruction that we want to later fix up.
+ static void InsertIndexIntoTag(uint8_t* load, uint32_t index);
+
+ // Take the stub value that was written in before, and write in an actual
+ // load using the index we'd computed previously as well as the address of
+ // the pool start.
+ static void PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
+
+ // We're not tracking short-range branches for ARM for now.
+ static void PatchShortRangeBranchToVeneer(ARMBuffer*, unsigned rangeIdx, BufferOffset deadline,
+ BufferOffset veneer)
+ {
+ MOZ_CRASH();
+ }
+ // END API
+
+ // Move our entire pool into the instruction stream. This is to force an
+ // opportunistic dump of the pool, prefferably when it is more convenient to
+ // do a dump.
+ void flushBuffer();
+ void enterNoPool(size_t maxInst);
+ void leaveNoPool();
+ // This should return a BOffImm, but we didn't want to require everyplace
+ // that used the AssemblerBuffer to make that class.
+ static ptrdiff_t GetBranchOffset(const Instruction* i);
+ static void RetargetNearBranch(Instruction* i, int offset, Condition cond, bool final = true);
+ static void RetargetNearBranch(Instruction* i, int offset, bool final = true);
+ static void RetargetFarBranch(Instruction* i, uint8_t** slot, uint8_t* dest, Condition cond);
+
+ static void WritePoolHeader(uint8_t* start, Pool* p, bool isNatural);
+ static void WritePoolGuard(BufferOffset branch, Instruction* inst, BufferOffset dest);
+
+
+ static uint32_t PatchWrite_NearCallSize();
+ static uint32_t NopSize() { return 4; }
+ static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall);
+ static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue);
+ static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+ ImmPtr expectedValue);
+ static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
+
+ static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm) {
+ MOZ_CRASH("Unused.");
+ }
+
+ static uint32_t AlignDoubleArg(uint32_t offset) {
+ return (offset + 1) & ~1;
+ }
+ static uint8_t* NextInstruction(uint8_t* instruction, uint32_t* count = nullptr);
+
+ // Toggle a jmp or cmp emitted by toggledJump().
+ static void ToggleToJmp(CodeLocationLabel inst_);
+ static void ToggleToCmp(CodeLocationLabel inst_);
+
+ static uint8_t* BailoutTableStart(uint8_t* code);
+
+ static size_t ToggledCallSize(uint8_t* code);
+ static void ToggleCall(CodeLocationLabel inst_, bool enabled);
+
+ void processCodeLabels(uint8_t* rawCode);
+
+ bool bailed() {
+ return m_buffer.bail();
+ }
+
+ void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
+ const Disassembler::HeapAccess& heapAccess)
+ {
+ // Implement this if we implement a disassembler.
+ }
+}; // Assembler
+
+// An Instruction is a structure for both encoding and decoding any and all ARM
+// instructions. Many classes have not been implemented thus far.
+class Instruction
+{
+ uint32_t data;
+
+ protected:
+ // This is not for defaulting to always, this is for instructions that
+ // cannot be made conditional, and have the usually invalid 4b1111 cond
+ // field.
+ explicit Instruction(uint32_t data_, bool fake = false)
+ : data(data_ | 0xf0000000)
+ {
+ MOZ_ASSERT(fake || ((data_ & 0xf0000000) == 0));
+ }
+ // Standard constructor.
+ Instruction(uint32_t data_, Assembler::Condition c)
+ : data(data_ | (uint32_t) c)
+ {
+ MOZ_ASSERT((data_ & 0xf0000000) == 0);
+ }
+ // You should never create an instruction directly. You should create a more
+ // specific instruction which will eventually call one of these constructors
+ // for you.
+ public:
+ uint32_t encode() const {
+ return data;
+ }
+ // Check if this instruction is really a particular case.
+ template <class C>
+ bool is() const { return C::IsTHIS(*this); }
+
+ // Safely get a more specific variant of this pointer.
+ template <class C>
+ C* as() const { return C::AsTHIS(*this); }
+
+ const Instruction& operator=(Instruction src) {
+ data = src.data;
+ return *this;
+ }
+ // Since almost all instructions have condition codes, the condition code
+ // extractor resides in the base class.
+ Assembler::Condition extractCond() {
+ MOZ_ASSERT(data >> 28 != 0xf, "The instruction does not have condition code");
+ return (Assembler::Condition)(data & 0xf0000000);
+ }
+ // Get the next instruction in the instruction stream.
+ // This does neat things like ignoreconstant pools and their guards.
+ Instruction* next();
+
+ // Skipping pools with artificial guards.
+ Instruction* skipPool();
+
+ // Sometimes, an api wants a uint32_t (or a pointer to it) rather than an
+ // instruction. raw() just coerces this into a pointer to a uint32_t.
+ const uint32_t* raw() const { return &data; }
+ uint32_t size() const { return 4; }
+}; // Instruction
+
+// Make sure that it is the right size.
+JS_STATIC_ASSERT(sizeof(Instruction) == 4);
+
+// Data Transfer Instructions.
+class InstDTR : public Instruction
+{
+ public:
+ enum IsByte_ {
+ IsByte = 0x00400000,
+ IsWord = 0x00000000
+ };
+ static const int IsDTR = 0x04000000;
+ static const int IsDTRMask = 0x0c000000;
+
+ // TODO: Replace the initialization with something that is safer.
+ InstDTR(LoadStore ls, IsByte_ ib, Index mode, Register rt, DTRAddr addr, Assembler::Condition c)
+ : Instruction(ls | ib | mode | RT(rt) | addr.encode() | IsDTR, c)
+ { }
+
+ static bool IsTHIS(const Instruction& i);
+ static InstDTR* AsTHIS(const Instruction& i);
+
+};
+JS_STATIC_ASSERT(sizeof(InstDTR) == sizeof(Instruction));
+
+class InstLDR : public InstDTR
+{
+ public:
+ InstLDR(Index mode, Register rt, DTRAddr addr, Assembler::Condition c)
+ : InstDTR(IsLoad, IsWord, mode, rt, addr, c)
+ { }
+
+ static bool IsTHIS(const Instruction& i);
+ static InstLDR* AsTHIS(const Instruction& i);
+
+ int32_t signedOffset() const {
+ int32_t offset = encode() & 0xfff;
+ if (IsUp_(encode() & IsUp) != IsUp)
+ return -offset;
+ return offset;
+ }
+ uint32_t* dest() const {
+ int32_t offset = signedOffset();
+ // When patching the load in PatchConstantPoolLoad, we ensure that the
+ // offset is a multiple of 4, offset by 8 bytes from the actual
+ // location. Indeed, when the base register is PC, ARM's 3 stages
+ // pipeline design makes it that PC is off by 8 bytes (= 2 *
+ // sizeof(uint32*)) when we actually executed it.
+ MOZ_ASSERT(offset % 4 == 0);
+ offset >>= 2;
+ return (uint32_t*)raw() + offset + 2;
+ }
+};
+JS_STATIC_ASSERT(sizeof(InstDTR) == sizeof(InstLDR));
+
+class InstNOP : public Instruction
+{
+ public:
+ static const uint32_t NopInst = 0x0320f000;
+
+ InstNOP()
+ : Instruction(NopInst, Assembler::Always)
+ { }
+
+ static bool IsTHIS(const Instruction& i);
+ static InstNOP* AsTHIS(Instruction& i);
+};
+
+// Branching to a register, or calling a register
+class InstBranchReg : public Instruction
+{
+ protected:
+ // Don't use BranchTag yourself, use a derived instruction.
+ enum BranchTag {
+ IsBX = 0x012fff10,
+ IsBLX = 0x012fff30
+ };
+
+ static const uint32_t IsBRegMask = 0x0ffffff0;
+
+ InstBranchReg(BranchTag tag, Register rm, Assembler::Condition c)
+ : Instruction(tag | rm.code(), c)
+ { }
+
+ public:
+ static bool IsTHIS (const Instruction& i);
+ static InstBranchReg* AsTHIS (const Instruction& i);
+
+ // Get the register that is being branched to
+ void extractDest(Register* dest);
+ // Make sure we are branching to a pre-known register
+ bool checkDest(Register dest);
+};
+JS_STATIC_ASSERT(sizeof(InstBranchReg) == sizeof(Instruction));
+
+// Branching to an immediate offset, or calling an immediate offset
+class InstBranchImm : public Instruction
+{
+ protected:
+ enum BranchTag {
+ IsB = 0x0a000000,
+ IsBL = 0x0b000000
+ };
+
+ static const uint32_t IsBImmMask = 0x0f000000;
+
+ InstBranchImm(BranchTag tag, BOffImm off, Assembler::Condition c)
+ : Instruction(tag | off.encode(), c)
+ { }
+
+ public:
+ static bool IsTHIS (const Instruction& i);
+ static InstBranchImm* AsTHIS (const Instruction& i);
+
+ void extractImm(BOffImm* dest);
+};
+JS_STATIC_ASSERT(sizeof(InstBranchImm) == sizeof(Instruction));
+
+// Very specific branching instructions.
+class InstBXReg : public InstBranchReg
+{
+ public:
+ static bool IsTHIS (const Instruction& i);
+ static InstBXReg* AsTHIS (const Instruction& i);
+};
+
+class InstBLXReg : public InstBranchReg
+{
+ public:
+ InstBLXReg(Register reg, Assembler::Condition c)
+ : InstBranchReg(IsBLX, reg, c)
+ { }
+
+ static bool IsTHIS (const Instruction& i);
+ static InstBLXReg* AsTHIS (const Instruction& i);
+};
+
+class InstBImm : public InstBranchImm
+{
+ public:
+ InstBImm(BOffImm off, Assembler::Condition c)
+ : InstBranchImm(IsB, off, c)
+ { }
+
+ static bool IsTHIS (const Instruction& i);
+ static InstBImm* AsTHIS (const Instruction& i);
+};
+
+class InstBLImm : public InstBranchImm
+{
+ public:
+ InstBLImm(BOffImm off, Assembler::Condition c)
+ : InstBranchImm(IsBL, off, c)
+ { }
+
+ static bool IsTHIS (const Instruction& i);
+ static InstBLImm* AsTHIS (const Instruction& i);
+};
+
+// Both movw and movt. The layout of both the immediate and the destination
+// register is the same so the code is being shared.
+class InstMovWT : public Instruction
+{
+ protected:
+ enum WT {
+ IsW = 0x03000000,
+ IsT = 0x03400000
+ };
+ static const uint32_t IsWTMask = 0x0ff00000;
+
+ InstMovWT(Register rd, Imm16 imm, WT wt, Assembler::Condition c)
+ : Instruction (RD(rd) | imm.encode() | wt, c)
+ { }
+
+ public:
+ void extractImm(Imm16* dest);
+ void extractDest(Register* dest);
+ bool checkImm(Imm16 dest);
+ bool checkDest(Register dest);
+
+ static bool IsTHIS (Instruction& i);
+ static InstMovWT* AsTHIS (Instruction& i);
+
+};
+JS_STATIC_ASSERT(sizeof(InstMovWT) == sizeof(Instruction));
+
+class InstMovW : public InstMovWT
+{
+ public:
+ InstMovW (Register rd, Imm16 imm, Assembler::Condition c)
+ : InstMovWT(rd, imm, IsW, c)
+ { }
+
+ static bool IsTHIS (const Instruction& i);
+ static InstMovW* AsTHIS (const Instruction& i);
+};
+
+class InstMovT : public InstMovWT
+{
+ public:
+ InstMovT (Register rd, Imm16 imm, Assembler::Condition c)
+ : InstMovWT(rd, imm, IsT, c)
+ { }
+
+ static bool IsTHIS (const Instruction& i);
+ static InstMovT* AsTHIS (const Instruction& i);
+};
+
+class InstALU : public Instruction
+{
+ static const int32_t ALUMask = 0xc << 24;
+
+ public:
+ InstALU(Register rd, Register rn, Operand2 op2, ALUOp op, SBit s, Assembler::Condition c)
+ : Instruction(maybeRD(rd) | maybeRN(rn) | op2.encode() | op | s, c)
+ { }
+
+ static bool IsTHIS (const Instruction& i);
+ static InstALU* AsTHIS (const Instruction& i);
+
+ void extractOp(ALUOp* ret);
+ bool checkOp(ALUOp op);
+ void extractDest(Register* ret);
+ bool checkDest(Register rd);
+ void extractOp1(Register* ret);
+ bool checkOp1(Register rn);
+ Operand2 extractOp2();
+};
+
+class InstCMP : public InstALU
+{
+ public:
+ static bool IsTHIS (const Instruction& i);
+ static InstCMP* AsTHIS (const Instruction& i);
+};
+
+class InstMOV : public InstALU
+{
+ public:
+ static bool IsTHIS (const Instruction& i);
+ static InstMOV* AsTHIS (const Instruction& i);
+};
+
+
+class InstructionIterator
+{
+ private:
+ Instruction* i;
+
+ public:
+ explicit InstructionIterator(Instruction* i_);
+
+ Instruction* next() {
+ i = i->next();
+ return cur();
+ }
+ Instruction* cur() const {
+ return i;
+ }
+};
+
+static const uint32_t NumIntArgRegs = 4;
+
+// There are 16 *float* registers available for arguments
+// If doubles are used, only half the number of registers are available.
+static const uint32_t NumFloatArgRegs = 16;
+
+static inline bool
+GetIntArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
+{
+ if (usedIntArgs >= NumIntArgRegs)
+ return false;
+
+ *out = Register::FromCode(usedIntArgs);
+ return true;
+}
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool
+GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
+{
+ if (GetIntArgReg(usedIntArgs, usedFloatArgs, out))
+ return true;
+
+ // Unfortunately, we have to assume things about the point at which
+ // GetIntArgReg returns false, because we need to know how many registers it
+ // can allocate.
+ usedIntArgs -= NumIntArgRegs;
+ if (usedIntArgs >= NumCallTempNonArgRegs)
+ return false;
+
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+
+#if !defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_SIMULATOR_ARM)
+
+static inline uint32_t
+GetArgStackDisp(uint32_t arg)
+{
+ MOZ_ASSERT(!UseHardFpABI());
+ MOZ_ASSERT(arg >= NumIntArgRegs);
+ return (arg - NumIntArgRegs) * sizeof(intptr_t);
+}
+
+#endif
+
+
+#if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_SIMULATOR_ARM)
+
+static inline bool
+GetFloat32ArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, FloatRegister* out)
+{
+ MOZ_ASSERT(UseHardFpABI());
+ if (usedFloatArgs >= NumFloatArgRegs)
+ return false;
+ *out = VFPRegister(usedFloatArgs, VFPRegister::Single);
+ return true;
+}
+static inline bool
+GetDoubleArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, FloatRegister* out)
+{
+ MOZ_ASSERT(UseHardFpABI());
+ MOZ_ASSERT((usedFloatArgs % 2) == 0);
+ if (usedFloatArgs >= NumFloatArgRegs)
+ return false;
+ *out = VFPRegister(usedFloatArgs>>1, VFPRegister::Double);
+ return true;
+}
+
+static inline uint32_t
+GetIntArgStackDisp(uint32_t usedIntArgs, uint32_t usedFloatArgs, uint32_t* padding)
+{
+ MOZ_ASSERT(UseHardFpABI());
+ MOZ_ASSERT(usedIntArgs >= NumIntArgRegs);
+ uint32_t doubleSlots = Max(0, (int32_t)usedFloatArgs - (int32_t)NumFloatArgRegs);
+ doubleSlots *= 2;
+ int intSlots = usedIntArgs - NumIntArgRegs;
+ return (intSlots + doubleSlots + *padding) * sizeof(intptr_t);
+}
+
+static inline uint32_t
+GetFloat32ArgStackDisp(uint32_t usedIntArgs, uint32_t usedFloatArgs, uint32_t* padding)
+{
+ MOZ_ASSERT(UseHardFpABI());
+ MOZ_ASSERT(usedFloatArgs >= NumFloatArgRegs);
+ uint32_t intSlots = 0;
+ if (usedIntArgs > NumIntArgRegs)
+ intSlots = usedIntArgs - NumIntArgRegs;
+ uint32_t float32Slots = usedFloatArgs - NumFloatArgRegs;
+ return (intSlots + float32Slots + *padding) * sizeof(intptr_t);
+}
+
+static inline uint32_t
+GetDoubleArgStackDisp(uint32_t usedIntArgs, uint32_t usedFloatArgs, uint32_t* padding)
+{
+ MOZ_ASSERT(UseHardFpABI());
+ MOZ_ASSERT(usedFloatArgs >= NumFloatArgRegs);
+ uint32_t intSlots = 0;
+ if (usedIntArgs > NumIntArgRegs) {
+ intSlots = usedIntArgs - NumIntArgRegs;
+ // Update the amount of padding required.
+ *padding += (*padding + usedIntArgs) % 2;
+ }
+ uint32_t doubleSlots = usedFloatArgs - NumFloatArgRegs;
+ doubleSlots *= 2;
+ return (intSlots + doubleSlots + *padding) * sizeof(intptr_t);
+}
+
+#endif
+
+class DoubleEncoder
+{
+ struct DoubleEntry
+ {
+ uint32_t dblTop;
+ datastore::Imm8VFPImmData data;
+ };
+
+ static const DoubleEntry table[256];
+
+ public:
+ bool lookup(uint32_t top, datastore::Imm8VFPImmData* ret) const {
+ for (int i = 0; i < 256; i++) {
+ if (table[i].dblTop == top) {
+ *ret = table[i].data;
+ return true;
+ }
+ }
+ return false;
+ }
+};
+
+class AutoForbidPools
+{
+ Assembler* masm_;
+
+ public:
+ // The maxInst argument is the maximum number of word sized instructions
+ // that will be allocated within this context. It is used to determine if
+ // the pool needs to be dumped before entering this content. The debug code
+ // checks that no more than maxInst instructions are actually allocated.
+ //
+ // Allocation of pool entries is not supported within this content so the
+ // code can not use large integers or float constants etc.
+ AutoForbidPools(Assembler* masm, size_t maxInst)
+ : masm_(masm)
+ {
+ masm_->enterNoPool(maxInst);
+ }
+
+ ~AutoForbidPools() {
+ masm_->leaveNoPool();
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_Assembler_arm_h */
diff --git a/js/src/jit/arm/AtomicOperations-arm.h b/js/src/jit/arm/AtomicOperations-arm.h
new file mode 100644
index 000000000..7e988ed29
--- /dev/null
+++ b/js/src/jit/arm/AtomicOperations-arm.h
@@ -0,0 +1,247 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* For documentation, see jit/AtomicOperations.h */
+
+#ifndef jit_arm_AtomicOperations_arm_h
+#define jit_arm_AtomicOperations_arm_h
+
+#include "jit/arm/Architecture-arm.h"
+
+#if defined(__clang__) || defined(__GNUC__)
+
+// The default implementation tactic for gcc/clang is to use the newer
+// __atomic intrinsics added for use in C++11 <atomic>. Where that
+// isn't available, we use GCC's older __sync functions instead.
+//
+// ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS is kept as a backward
+// compatible option for older compilers: enable this to use GCC's old
+// __sync functions instead of the newer __atomic functions. This
+// will be required for GCC 4.6.x and earlier, and probably for Clang
+// 3.1, should we need to use those versions.
+
+//#define ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+
+inline bool
+js::jit::AtomicOperations::isLockfree8()
+{
+ // The JIT and the C++ compiler must agree on whether to use atomics
+ // for 64-bit accesses. There are two ways to do this: either the
+ // JIT defers to the C++ compiler (so if the C++ code is compiled
+ // for ARMv6, say, and __atomic_always_lock_free(8) is false, then the
+ // JIT ignores the fact that the program is running on ARMv7 or newer);
+ // or the C++ code in this file calls out to run-time generated code
+ // to do whatever the JIT does.
+ //
+ // For now, make the JIT defer to the C++ compiler when we know what
+ // the C++ compiler will do, otherwise assume a lock is needed.
+# ifndef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
+ return HasLDSTREXBHD() && __atomic_always_lock_free(sizeof(int64_t), 0);
+# else
+ return false;
+# endif
+}
+
+inline void
+js::jit::AtomicOperations::fenceSeqCst()
+{
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+# else
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::loadSeqCst(T* addr)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+ T v = *addr;
+ __sync_synchronize();
+# else
+ T v;
+ __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
+# endif
+ return v;
+}
+
+template<typename T>
+inline void
+js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+ *addr = val;
+ __sync_synchronize();
+# else
+ __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ T v;
+ __sync_synchronize();
+ do {
+ v = *addr;
+ } while (__sync_val_compare_and_swap(addr, v, val) != v);
+ return v;
+# else
+ T v;
+ __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
+ return v;
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_val_compare_and_swap(addr, oldval, newval);
+# else
+ __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return oldval;
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_add(addr, val);
+# else
+ return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_sub(addr, val);
+# else
+ return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_and(addr, val);
+# else
+ return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_or(addr, val);
+# else
+ return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_xor(addr, val);
+# else
+ return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::loadSafeWhenRacy(T* addr)
+{
+ return *addr; // FIXME (1208663): not yet safe
+}
+
+template<typename T>
+inline void
+js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val)
+{
+ *addr = val; // FIXME (1208663): not yet safe
+}
+
+inline void
+js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src, size_t nbytes)
+{
+ memcpy(dest, src, nbytes); // FIXME (1208663): not yet safe
+}
+
+inline void
+js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes)
+{
+ memmove(dest, src, nbytes); // FIXME (1208663): not yet safe
+}
+
+template<size_t nbytes>
+inline void
+js::jit::RegionLock::acquire(void* addr)
+{
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ while (!__sync_bool_compare_and_swap(&spinlock, 0, 1))
+ ;
+# else
+ uint32_t zero = 0;
+ uint32_t one = 1;
+ while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
+ zero = 0;
+ continue;
+ }
+# endif
+}
+
+template<size_t nbytes>
+inline void
+js::jit::RegionLock::release(void* addr)
+{
+ MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_sub_and_fetch(&spinlock, 1);
+# else
+ uint32_t zero = 0;
+ __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
+# endif
+}
+
+# undef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+
+#elif defined(ENABLE_SHARED_ARRAY_BUFFER)
+
+# error "Either disable JS shared memory at compile time, use GCC or Clang, or add code here"
+
+#endif
+
+#endif // jit_arm_AtomicOperations_arm_h
diff --git a/js/src/jit/arm/Bailouts-arm.cpp b/js/src/jit/arm/Bailouts-arm.cpp
new file mode 100644
index 000000000..db7f69d58
--- /dev/null
+++ b/js/src/jit/arm/Bailouts-arm.cpp
@@ -0,0 +1,119 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+
+#include "jit/arm/Assembler-arm.h"
+#include "jit/Bailouts.h"
+#include "jit/JitCompartment.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+class BailoutStack
+{
+ uintptr_t frameClassId_;
+ // This is pushed in the bailout handler. Both entry points into the handler
+ // inserts their own value int lr, which is then placed onto the stack along
+ // with frameClassId_ above. This should be migrated to ip.
+ public:
+ union {
+ uintptr_t frameSize_;
+ uintptr_t tableOffset_;
+ };
+
+ protected: // Silence Clang warning about unused private fields.
+ RegisterDump::FPUArray fpregs_;
+ RegisterDump::GPRArray regs_;
+
+ uintptr_t snapshotOffset_;
+ uintptr_t padding_;
+
+ public:
+ FrameSizeClass frameClass() const {
+ return FrameSizeClass::FromClass(frameClassId_);
+ }
+ uintptr_t tableOffset() const {
+ MOZ_ASSERT(frameClass() != FrameSizeClass::None());
+ return tableOffset_;
+ }
+ uint32_t frameSize() const {
+ if (frameClass() == FrameSizeClass::None())
+ return frameSize_;
+ return frameClass().frameSize();
+ }
+ MachineState machine() {
+ return MachineState::FromBailout(regs_, fpregs_);
+ }
+ SnapshotOffset snapshotOffset() const {
+ MOZ_ASSERT(frameClass() == FrameSizeClass::None());
+ return snapshotOffset_;
+ }
+ uint8_t* parentStackPointer() const {
+ if (frameClass() == FrameSizeClass::None())
+ return (uint8_t*)this + sizeof(BailoutStack);
+ return (uint8_t*)this + offsetof(BailoutStack, snapshotOffset_);
+ }
+};
+
+// Make sure the compiler doesn't add extra padding.
+static_assert((sizeof(BailoutStack) % 8) == 0, "BailoutStack should be 8-byte aligned.");
+
+} // namespace jit
+} // namespace js
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ BailoutStack* bailout)
+ : machine_(bailout->machine())
+{
+ uint8_t* sp = bailout->parentStackPointer();
+ framePointer_ = sp + bailout->frameSize();
+ topFrameSize_ = framePointer_ - sp;
+
+ JSScript* script = ScriptFromCalleeToken(((JitFrameLayout*) framePointer_)->calleeToken());
+ JitActivation* activation = activations.activation()->asJit();
+ topIonScript_ = script->ionScript();
+
+ attachOnJitActivation(activations);
+
+ if (bailout->frameClass() == FrameSizeClass::None()) {
+ snapshotOffset_ = bailout->snapshotOffset();
+ return;
+ }
+
+ // Compute the snapshot offset from the bailout ID.
+ JSRuntime* rt = activation->compartment()->runtimeFromMainThread();
+ JitCode* code = rt->jitRuntime()->getBailoutTable(bailout->frameClass());
+ uintptr_t tableOffset = bailout->tableOffset();
+ uintptr_t tableStart = reinterpret_cast<uintptr_t>(Assembler::BailoutTableStart(code->raw()));
+
+ MOZ_ASSERT(tableOffset >= tableStart &&
+ tableOffset < tableStart + code->instructionsSize());
+ MOZ_ASSERT((tableOffset - tableStart) % BAILOUT_TABLE_ENTRY_SIZE == 0);
+
+ uint32_t bailoutId = ((tableOffset - tableStart) / BAILOUT_TABLE_ENTRY_SIZE) - 1;
+ MOZ_ASSERT(bailoutId < BAILOUT_TABLE_SIZE);
+
+ snapshotOffset_ = topIonScript_->bailoutToSnapshot(bailoutId);
+}
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ InvalidationBailoutStack* bailout)
+ : machine_(bailout->machine())
+{
+ framePointer_ = (uint8_t*) bailout->fp();
+ topFrameSize_ = framePointer_ - bailout->sp();
+ topIonScript_ = bailout->ionScript();
+ attachOnJitActivation(activations);
+
+ uint8_t* returnAddressToFp_ = bailout->osiPointReturnAddress();
+ const OsiIndex* osiIndex = topIonScript_->getOsiIndex(returnAddressToFp_);
+ snapshotOffset_ = osiIndex->snapshotOffset();
+}
diff --git a/js/src/jit/arm/BaselineCompiler-arm.cpp b/js/src/jit/arm/BaselineCompiler-arm.cpp
new file mode 100644
index 000000000..502a06e23
--- /dev/null
+++ b/js/src/jit/arm/BaselineCompiler-arm.cpp
@@ -0,0 +1,15 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm/BaselineCompiler-arm.h"
+
+using namespace js;
+using namespace js::jit;
+
+BaselineCompilerARM::BaselineCompilerARM(JSContext* cx, TempAllocator& alloc, JSScript* script)
+ : BaselineCompilerShared(cx, alloc, script)
+{
+}
diff --git a/js/src/jit/arm/BaselineCompiler-arm.h b/js/src/jit/arm/BaselineCompiler-arm.h
new file mode 100644
index 000000000..1dcc33719
--- /dev/null
+++ b/js/src/jit/arm/BaselineCompiler-arm.h
@@ -0,0 +1,26 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_BaselineCompiler_arm_h
+#define jit_arm_BaselineCompiler_arm_h
+
+#include "jit/shared/BaselineCompiler-shared.h"
+
+namespace js {
+namespace jit {
+
+class BaselineCompilerARM : public BaselineCompilerShared
+{
+ protected:
+ BaselineCompilerARM(JSContext* cx, TempAllocator& alloc, JSScript* script);
+};
+
+typedef BaselineCompilerARM BaselineCompilerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_BaselineCompiler_arm_h */
diff --git a/js/src/jit/arm/BaselineIC-arm.cpp b/js/src/jit/arm/BaselineIC-arm.cpp
new file mode 100644
index 000000000..853463888
--- /dev/null
+++ b/js/src/jit/arm/BaselineIC-arm.cpp
@@ -0,0 +1,74 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineCompiler.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Linker.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICCompare_Int32
+
+bool
+ICCompare_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // Compare payload regs of R0 and R1.
+ Assembler::Condition cond = JSOpToCondition(op, /* signed = */true);
+ masm.cmp32(R0.payloadReg(), R1.payloadReg());
+ masm.ma_mov(Imm32(1), R0.payloadReg(), cond);
+ masm.ma_mov(Imm32(0), R0.payloadReg(), Assembler::InvertCondition(cond));
+
+ // Result is implicitly boxed already.
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.payloadReg(), R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub.
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ return true;
+}
+
+bool
+ICCompare_Double::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure, isNaN;
+ masm.ensureDouble(R0, FloatReg0, &failure);
+ masm.ensureDouble(R1, FloatReg1, &failure);
+
+ Register dest = R0.scratchReg();
+
+ Assembler::DoubleCondition doubleCond = JSOpToDoubleCondition(op);
+ Assembler::Condition cond = Assembler::ConditionFromDoubleCondition(doubleCond);
+
+ masm.compareDouble(FloatReg0, FloatReg1);
+ masm.ma_mov(Imm32(0), dest);
+ masm.ma_mov(Imm32(1), dest, cond);
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub.
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/arm/CodeGenerator-arm.cpp b/js/src/jit/arm/CodeGenerator-arm.cpp
new file mode 100644
index 000000000..f8f77b7d5
--- /dev/null
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -0,0 +1,3720 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm/CodeGenerator-arm.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+#include "jsnum.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "js/Conversions.h"
+#include "vm/Shape.h"
+#include "vm/TraceLogging.h"
+
+#include "jsscriptinlines.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::FloorLog2;
+using mozilla::NegativeInfinity;
+using JS::GenericNaN;
+using JS::ToInt32;
+
+// shared
+CodeGeneratorARM::CodeGeneratorARM(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorShared(gen, graph, masm)
+{
+}
+
+Register64
+CodeGeneratorARM::ToOperandOrRegister64(const LInt64Allocation input)
+{
+ return ToRegister64(input);
+}
+
+void
+CodeGeneratorARM::emitBranch(Assembler::Condition cond, MBasicBlock* mirTrue, MBasicBlock* mirFalse)
+{
+ if (isNextBlock(mirFalse->lir())) {
+ jumpToBlock(mirTrue, cond);
+ } else {
+ jumpToBlock(mirFalse, Assembler::InvertCondition(cond));
+ jumpToBlock(mirTrue);
+ }
+}
+
+void
+OutOfLineBailout::accept(CodeGeneratorARM* codegen)
+{
+ codegen->visitOutOfLineBailout(this);
+}
+
+void
+CodeGeneratorARM::visitTestIAndBranch(LTestIAndBranch* test)
+{
+ const LAllocation* opd = test->getOperand(0);
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ // Test the operand
+ masm.as_cmp(ToRegister(opd), Imm8(0));
+
+ if (isNextBlock(ifFalse->lir())) {
+ jumpToBlock(ifTrue, Assembler::NonZero);
+ } else if (isNextBlock(ifTrue->lir())) {
+ jumpToBlock(ifFalse, Assembler::Zero);
+ } else {
+ jumpToBlock(ifFalse, Assembler::Zero);
+ jumpToBlock(ifTrue);
+ }
+}
+
+void
+CodeGeneratorARM::visitCompare(LCompare* comp)
+{
+ Assembler::Condition cond = JSOpToCondition(comp->mir()->compareType(), comp->jsop());
+ const LAllocation* left = comp->getOperand(0);
+ const LAllocation* right = comp->getOperand(1);
+ const LDefinition* def = comp->getDef(0);
+
+ ScratchRegisterScope scratch(masm);
+
+ if (right->isConstant()) {
+ masm.ma_cmp(ToRegister(left), Imm32(ToInt32(right)), scratch);
+ } else if (right->isRegister()) {
+ masm.ma_cmp(ToRegister(left), ToRegister(right));
+ } else {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.ma_cmp(ToRegister(left), Operand(ToAddress(right)), scratch, scratch2);
+ }
+ masm.ma_mov(Imm32(0), ToRegister(def));
+ masm.ma_mov(Imm32(1), ToRegister(def), cond);
+}
+
+void
+CodeGeneratorARM::visitCompareAndBranch(LCompareAndBranch* comp)
+{
+ Assembler::Condition cond = JSOpToCondition(comp->cmpMir()->compareType(), comp->jsop());
+ const LAllocation* left = comp->left();
+ const LAllocation* right = comp->right();
+
+ ScratchRegisterScope scratch(masm);
+
+ if (right->isConstant()) {
+ masm.ma_cmp(ToRegister(left), Imm32(ToInt32(right)), scratch);
+ } else if (right->isRegister()) {
+ masm.ma_cmp(ToRegister(left), ToRegister(right));
+ } else {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.ma_cmp(ToRegister(left), Operand(ToAddress(right)), scratch, scratch2);
+ }
+ emitBranch(cond, comp->ifTrue(), comp->ifFalse());
+}
+
+bool
+CodeGeneratorARM::generateOutOfLineCode()
+{
+ if (!CodeGeneratorShared::generateOutOfLineCode())
+ return false;
+
+ if (deoptLabel_.used()) {
+ // All non-table-based bailouts will go here.
+ masm.bind(&deoptLabel_);
+
+ // Push the frame size, so the handler can recover the IonScript.
+ masm.ma_mov(Imm32(frameSize()), lr);
+
+ JitCode* handler = gen->jitRuntime()->getGenericBailoutHandler();
+ masm.branch(handler);
+ }
+
+ return !masm.oom();
+}
+
+void
+CodeGeneratorARM::bailoutIf(Assembler::Condition condition, LSnapshot* snapshot)
+{
+ encode(snapshot);
+
+ // Though the assembler doesn't track all frame pushes, at least make sure
+ // the known value makes sense. We can't use bailout tables if the stack
+ // isn't properly aligned to the static frame size.
+ MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None(),
+ frameClass_.frameSize() == masm.framePushed());
+
+ if (assignBailoutId(snapshot)) {
+ uint8_t* bailoutTable = Assembler::BailoutTableStart(deoptTable_->raw());
+ uint8_t* code = bailoutTable + snapshot->bailoutId() * BAILOUT_TABLE_ENTRY_SIZE;
+ masm.ma_b(code, condition);
+ return;
+ }
+
+ // We could not use a jump table, either because all bailout IDs were
+ // reserved, or a jump table is not optimal for this frame size or
+ // platform. Whatever, we will generate a lazy bailout.
+ InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
+ OutOfLineBailout* ool = new(alloc()) OutOfLineBailout(snapshot, masm.framePushed());
+
+ // All bailout code is associated with the bytecodeSite of the block we are
+ // bailing out from.
+ addOutOfLineCode(ool, new(alloc()) BytecodeSite(tree, tree->script()->code()));
+
+ masm.ma_b(ool->entry(), condition);
+}
+
+void
+CodeGeneratorARM::bailoutFrom(Label* label, LSnapshot* snapshot)
+{
+ if (masm.bailed())
+ return;
+
+ MOZ_ASSERT_IF(!masm.oom(), label->used());
+ MOZ_ASSERT_IF(!masm.oom(), !label->bound());
+
+ encode(snapshot);
+
+ // Though the assembler doesn't track all frame pushes, at least make sure
+ // the known value makes sense. We can't use bailout tables if the stack
+ // isn't properly aligned to the static frame size.
+ MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None(),
+ frameClass_.frameSize() == masm.framePushed());
+
+ // On ARM we don't use a bailout table.
+ InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
+ OutOfLineBailout* ool = new(alloc()) OutOfLineBailout(snapshot, masm.framePushed());
+
+ // All bailout code is associated with the bytecodeSite of the block we are
+ // bailing out from.
+ addOutOfLineCode(ool, new(alloc()) BytecodeSite(tree, tree->script()->code()));
+
+ masm.retarget(label, ool->entry());
+}
+
+void
+CodeGeneratorARM::bailout(LSnapshot* snapshot)
+{
+ Label label;
+ masm.ma_b(&label);
+ bailoutFrom(&label, snapshot);
+}
+
+void
+CodeGeneratorARM::visitOutOfLineBailout(OutOfLineBailout* ool)
+{
+ ScratchRegisterScope scratch(masm);
+ masm.ma_mov(Imm32(ool->snapshot()->snapshotOffset()), scratch);
+ masm.ma_push(scratch); // BailoutStack::padding_
+ masm.ma_push(scratch); // BailoutStack::snapshotOffset_
+ masm.ma_b(&deoptLabel_);
+}
+
+void
+CodeGeneratorARM::visitMinMaxD(LMinMaxD* ins)
+{
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax())
+ masm.maxDouble(second, first, true);
+ else
+ masm.minDouble(second, first, true);
+}
+
+void
+CodeGeneratorARM::visitMinMaxF(LMinMaxF* ins)
+{
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax())
+ masm.maxFloat32(second, first, true);
+ else
+ masm.minFloat32(second, first, true);
+}
+
+void
+CodeGeneratorARM::visitAbsD(LAbsD* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ MOZ_ASSERT(input == ToFloatRegister(ins->output()));
+ masm.ma_vabs(input, input);
+}
+
+void
+CodeGeneratorARM::visitAbsF(LAbsF* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ MOZ_ASSERT(input == ToFloatRegister(ins->output()));
+ masm.ma_vabs_f32(input, input);
+}
+
+void
+CodeGeneratorARM::visitSqrtD(LSqrtD* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ masm.ma_vsqrt(input, output);
+}
+
+void
+CodeGeneratorARM::visitSqrtF(LSqrtF* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ masm.ma_vsqrt_f32(input, output);
+}
+
+void
+CodeGeneratorARM::visitAddI(LAddI* ins)
+{
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ ScratchRegisterScope scratch(masm);
+
+ if (rhs->isConstant())
+ masm.ma_add(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), scratch, SetCC);
+ else if (rhs->isRegister())
+ masm.ma_add(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), SetCC);
+ else
+ masm.ma_add(ToRegister(lhs), Operand(ToAddress(rhs)), ToRegister(dest), SetCC);
+
+ if (ins->snapshot())
+ bailoutIf(Assembler::Overflow, ins->snapshot());
+}
+
+void
+CodeGeneratorARM::visitAddI64(LAddI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void
+CodeGeneratorARM::visitSubI(LSubI* ins)
+{
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ ScratchRegisterScope scratch(masm);
+
+ if (rhs->isConstant())
+ masm.ma_sub(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), scratch, SetCC);
+ else if (rhs->isRegister())
+ masm.ma_sub(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), SetCC);
+ else
+ masm.ma_sub(ToRegister(lhs), Operand(ToAddress(rhs)), ToRegister(dest), SetCC);
+
+ if (ins->snapshot())
+ bailoutIf(Assembler::Overflow, ins->snapshot());
+}
+
+void
+CodeGeneratorARM::visitSubI64(LSubI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LSubI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.sub64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void
+CodeGeneratorARM::visitMulI(LMulI* ins)
+{
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+ MMul* mul = ins->mir();
+ MOZ_ASSERT_IF(mul->mode() == MMul::Integer, !mul->canBeNegativeZero() && !mul->canOverflow());
+
+ if (rhs->isConstant()) {
+ // Bailout when this condition is met.
+ Assembler::Condition c = Assembler::Overflow;
+ // Bailout on -0.0
+ int32_t constant = ToInt32(rhs);
+ if (mul->canBeNegativeZero() && constant <= 0) {
+ Assembler::Condition bailoutCond = (constant == 0) ? Assembler::LessThan : Assembler::Equal;
+ masm.as_cmp(ToRegister(lhs), Imm8(0));
+ bailoutIf(bailoutCond, ins->snapshot());
+ }
+ // TODO: move these to ma_mul.
+ switch (constant) {
+ case -1:
+ masm.as_rsb(ToRegister(dest), ToRegister(lhs), Imm8(0), SetCC);
+ break;
+ case 0:
+ masm.ma_mov(Imm32(0), ToRegister(dest));
+ return; // Escape overflow check;
+ case 1:
+ // Nop
+ masm.ma_mov(ToRegister(lhs), ToRegister(dest));
+ return; // Escape overflow check;
+ case 2:
+ masm.ma_add(ToRegister(lhs), ToRegister(lhs), ToRegister(dest), SetCC);
+ // Overflow is handled later.
+ break;
+ default: {
+ bool handled = false;
+ if (constant > 0) {
+ // Try shift and add sequences for a positive constant.
+ if (!mul->canOverflow()) {
+ // If it cannot overflow, we can do lots of optimizations.
+ Register src = ToRegister(lhs);
+ uint32_t shift = FloorLog2(constant);
+ uint32_t rest = constant - (1 << shift);
+ // See if the constant has one bit set, meaning it can be
+ // encoded as a bitshift.
+ if ((1 << shift) == constant) {
+ masm.ma_lsl(Imm32(shift), src, ToRegister(dest));
+ handled = true;
+ } else {
+ // If the constant cannot be encoded as (1 << C1), see
+ // if it can be encoded as (1 << C1) | (1 << C2), which
+ // can be computed using an add and a shift.
+ uint32_t shift_rest = FloorLog2(rest);
+ if ((1u << shift_rest) == rest) {
+ masm.as_add(ToRegister(dest), src, lsl(src, shift-shift_rest));
+ if (shift_rest != 0)
+ masm.ma_lsl(Imm32(shift_rest), ToRegister(dest), ToRegister(dest));
+ handled = true;
+ }
+ }
+ } else if (ToRegister(lhs) != ToRegister(dest)) {
+ // To stay on the safe side, only optimize things that are a
+ // power of 2.
+
+ uint32_t shift = FloorLog2(constant);
+ if ((1 << shift) == constant) {
+ // dest = lhs * pow(2,shift)
+ masm.ma_lsl(Imm32(shift), ToRegister(lhs), ToRegister(dest));
+ // At runtime, check (lhs == dest >> shift), if this
+ // does not hold, some bits were lost due to overflow,
+ // and the computation should be resumed as a double.
+ masm.as_cmp(ToRegister(lhs), asr(ToRegister(dest), shift));
+ c = Assembler::NotEqual;
+ handled = true;
+ }
+ }
+ }
+
+ if (!handled) {
+ ScratchRegisterScope scratch(masm);
+ if (mul->canOverflow())
+ c = masm.ma_check_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), scratch, c);
+ else
+ masm.ma_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), scratch);
+ }
+ }
+ }
+ // Bailout on overflow.
+ if (mul->canOverflow())
+ bailoutIf(c, ins->snapshot());
+ } else {
+ Assembler::Condition c = Assembler::Overflow;
+
+ if (mul->canOverflow()) {
+ ScratchRegisterScope scratch(masm);
+ c = masm.ma_check_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), scratch, c);
+ } else {
+ masm.ma_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest));
+ }
+
+ // Bailout on overflow.
+ if (mul->canOverflow())
+ bailoutIf(c, ins->snapshot());
+
+ if (mul->canBeNegativeZero()) {
+ Label done;
+ masm.as_cmp(ToRegister(dest), Imm8(0));
+ masm.ma_b(&done, Assembler::NotEqual);
+
+ // Result is -0 if lhs or rhs is negative.
+ masm.ma_cmn(ToRegister(lhs), ToRegister(rhs));
+ bailoutIf(Assembler::Signed, ins->snapshot());
+
+ masm.bind(&done);
+ }
+ }
+}
+
+void
+CodeGeneratorARM::visitMulI64(LMulI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
+
+ MOZ_ASSERT(ToRegister64(lhs) == ToOutRegister64(lir));
+
+ if (IsConstant(rhs)) {
+ int64_t constant = ToInt64(rhs);
+ switch (constant) {
+ case -1:
+ masm.neg64(ToRegister64(lhs));
+ return;
+ case 0:
+ masm.xor64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ case 1:
+ // nop
+ return;
+ case 2:
+ masm.add64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ default:
+ if (constant > 0) {
+ // Use shift if constant is power of 2.
+ int32_t shift = mozilla::FloorLog2(constant);
+ if (int64_t(1) << shift == constant) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ return;
+ }
+ }
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
+ }
+ } else {
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(ToOperandOrRegister64(rhs), ToRegister64(lhs), temp);
+ }
+}
+
+void
+CodeGeneratorARM::divICommon(MDiv* mir, Register lhs, Register rhs, Register output,
+ LSnapshot* snapshot, Label& done)
+{
+ ScratchRegisterScope scratch(masm);
+
+ if (mir->canBeNegativeOverflow()) {
+ // Handle INT32_MIN / -1;
+ // The integer division will give INT32_MIN, but we want -(double)INT32_MIN.
+
+ // Sets EQ if lhs == INT32_MIN.
+ masm.ma_cmp(lhs, Imm32(INT32_MIN), scratch);
+ // If EQ (LHS == INT32_MIN), sets EQ if rhs == -1.
+ masm.ma_cmp(rhs, Imm32(-1), scratch, Assembler::Equal);
+ if (mir->canTruncateOverflow()) {
+ if (mir->trapOnError()) {
+ masm.ma_b(trap(mir, wasm::Trap::IntegerOverflow), Assembler::Equal);
+ } else {
+ // (-INT32_MIN)|0 = INT32_MIN
+ Label skip;
+ masm.ma_b(&skip, Assembler::NotEqual);
+ masm.ma_mov(Imm32(INT32_MIN), output);
+ masm.ma_b(&done);
+ masm.bind(&skip);
+ }
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::Equal, snapshot);
+ }
+ }
+
+ // Handle divide by zero.
+ if (mir->canBeDivideByZero()) {
+ masm.as_cmp(rhs, Imm8(0));
+ if (mir->canTruncateInfinities()) {
+ if (mir->trapOnError()) {
+ masm.ma_b(trap(mir, wasm::Trap::IntegerDivideByZero), Assembler::Equal);
+ } else {
+ // Infinity|0 == 0
+ Label skip;
+ masm.ma_b(&skip, Assembler::NotEqual);
+ masm.ma_mov(Imm32(0), output);
+ masm.ma_b(&done);
+ masm.bind(&skip);
+ }
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::Equal, snapshot);
+ }
+ }
+
+ // Handle negative 0.
+ if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
+ Label nonzero;
+ masm.as_cmp(lhs, Imm8(0));
+ masm.ma_b(&nonzero, Assembler::NotEqual);
+ masm.as_cmp(rhs, Imm8(0));
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::LessThan, snapshot);
+ masm.bind(&nonzero);
+ }
+}
+
+void
+CodeGeneratorARM::visitDivI(LDivI* ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register temp = ToRegister(ins->getTemp(0));
+ Register output = ToRegister(ins->output());
+ MDiv* mir = ins->mir();
+
+ Label done;
+ divICommon(mir, lhs, rhs, output, ins->snapshot(), done);
+
+ if (mir->canTruncateRemainder()) {
+ masm.ma_sdiv(lhs, rhs, output);
+ } else {
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_sdiv(lhs, rhs, temp);
+ masm.ma_mul(temp, rhs, scratch);
+ masm.ma_cmp(lhs, scratch);
+ }
+ bailoutIf(Assembler::NotEqual, ins->snapshot());
+ masm.ma_mov(temp, output);
+ }
+
+ masm.bind(&done);
+}
+
+extern "C" {
+ extern MOZ_EXPORT int64_t __aeabi_idivmod(int,int);
+ extern MOZ_EXPORT int64_t __aeabi_uidivmod(int,int);
+}
+
+void
+CodeGeneratorARM::visitSoftDivI(LSoftDivI* ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+ MDiv* mir = ins->mir();
+
+ Label done;
+ divICommon(mir, lhs, rhs, output, ins->snapshot(), done);
+
+ masm.setupAlignedABICall();
+ masm.passABIArg(lhs);
+ masm.passABIArg(rhs);
+ if (gen->compilingWasm())
+ masm.callWithABI(wasm::SymbolicAddress::aeabi_idivmod);
+ else
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, __aeabi_idivmod));
+
+ // idivmod returns the quotient in r0, and the remainder in r1.
+ if (!mir->canTruncateRemainder()) {
+ MOZ_ASSERT(mir->fallible());
+ masm.as_cmp(r1, Imm8(0));
+ bailoutIf(Assembler::NonZero, ins->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorARM::visitDivPowTwoI(LDivPowTwoI* ins)
+{
+ MDiv* mir = ins->mir();
+ Register lhs = ToRegister(ins->numerator());
+ Register output = ToRegister(ins->output());
+ int32_t shift = ins->shift();
+
+ if (shift == 0) {
+ masm.ma_mov(lhs, output);
+ return;
+ }
+
+ if (!mir->isTruncated()) {
+ // If the remainder is != 0, bailout since this must be a double.
+ {
+ // The bailout code also needs the scratch register.
+ // Here it is only used as a dummy target to set CC flags.
+ ScratchRegisterScope scratch(masm);
+ masm.as_mov(scratch, lsl(lhs, 32 - shift), SetCC);
+ }
+ bailoutIf(Assembler::NonZero, ins->snapshot());
+ }
+
+ if (!mir->canBeNegativeDividend()) {
+ // Numerator is unsigned, so needs no adjusting. Do the shift.
+ masm.as_mov(output, asr(lhs, shift));
+ return;
+ }
+
+ // Adjust the value so that shifting produces a correctly rounded result
+ // when the numerator is negative. See 10-1 "Signed Division by a Known
+ // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
+ ScratchRegisterScope scratch(masm);
+
+ if (shift > 1) {
+ masm.as_mov(scratch, asr(lhs, 31));
+ masm.as_add(scratch, lhs, lsr(scratch, 32 - shift));
+ } else {
+ masm.as_add(scratch, lhs, lsr(lhs, 32 - shift));
+ }
+
+ // Do the shift.
+ masm.as_mov(output, asr(scratch, shift));
+}
+
+void
+CodeGeneratorARM::modICommon(MMod* mir, Register lhs, Register rhs, Register output,
+ LSnapshot* snapshot, Label& done)
+{
+ // 0/X (with X < 0) is bad because both of these values *should* be doubles,
+ // and the result should be -0.0, which cannot be represented in integers.
+ // X/0 is bad because it will give garbage (or abort), when it should give
+ // either \infty, -\infty or NaN.
+
+ // Prevent 0 / X (with X < 0) and X / 0
+ // testing X / Y. Compare Y with 0.
+ // There are three cases: (Y < 0), (Y == 0) and (Y > 0).
+ // If (Y < 0), then we compare X with 0, and bail if X == 0.
+ // If (Y == 0), then we simply want to bail. Since this does not set the
+ // flags necessary for LT to trigger, we don't test X, and take the bailout
+ // because the EQ flag is set.
+ // If (Y > 0), we don't set EQ, and we don't trigger LT, so we don't take
+ // the bailout.
+ if (mir->canBeDivideByZero() || mir->canBeNegativeDividend()) {
+ if (mir->trapOnError()) {
+ // wasm allows negative lhs and return 0 in this case.
+ MOZ_ASSERT(mir->isTruncated());
+ masm.as_cmp(rhs, Imm8(0));
+ masm.ma_b(trap(mir, wasm::Trap::IntegerDivideByZero), Assembler::Equal);
+ return;
+ }
+
+ masm.as_cmp(rhs, Imm8(0));
+ masm.as_cmp(lhs, Imm8(0), Assembler::LessThan);
+ if (mir->isTruncated()) {
+ // NaN|0 == 0 and (0 % -X)|0 == 0
+ Label skip;
+ masm.ma_b(&skip, Assembler::NotEqual);
+ masm.ma_mov(Imm32(0), output);
+ masm.ma_b(&done);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::Equal, snapshot);
+ }
+ }
+}
+
+void
+CodeGeneratorARM::visitModI(LModI* ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+ Register callTemp = ToRegister(ins->callTemp());
+ MMod* mir = ins->mir();
+
+ // Save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
+ masm.ma_mov(lhs, callTemp);
+
+ Label done;
+ modICommon(mir, lhs, rhs, output, ins->snapshot(), done);
+
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_smod(lhs, rhs, output, scratch);
+ }
+
+ // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0.
+ if (mir->canBeNegativeDividend()) {
+ if (mir->isTruncated()) {
+ // -0.0|0 == 0
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ // See if X < 0
+ masm.as_cmp(output, Imm8(0));
+ masm.ma_b(&done, Assembler::NotEqual);
+ masm.as_cmp(callTemp, Imm8(0));
+ bailoutIf(Assembler::Signed, ins->snapshot());
+ }
+ }
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorARM::visitSoftModI(LSoftModI* ins)
+{
+ // Extract the registers from this instruction.
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+ Register callTemp = ToRegister(ins->callTemp());
+ MMod* mir = ins->mir();
+ Label done;
+
+ // Save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
+ MOZ_ASSERT(callTemp.code() > r3.code() && callTemp.code() < r12.code());
+ masm.ma_mov(lhs, callTemp);
+
+
+ // Prevent INT_MIN % -1;
+ // The integer division will give INT_MIN, but we want -(double)INT_MIN.
+ if (mir->canBeNegativeDividend()) {
+ {
+ ScratchRegisterScope scratch(masm);
+ // Sets EQ if lhs == INT_MIN
+ masm.ma_cmp(lhs, Imm32(INT_MIN), scratch);
+ // If EQ (LHS == INT_MIN), sets EQ if rhs == -1
+ masm.ma_cmp(rhs, Imm32(-1), scratch, Assembler::Equal);
+ }
+ if (mir->isTruncated()) {
+ // (INT_MIN % -1)|0 == 0
+ Label skip;
+ masm.ma_b(&skip, Assembler::NotEqual);
+ masm.ma_mov(Imm32(0), output);
+ masm.ma_b(&done);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::Equal, ins->snapshot());
+ }
+ }
+
+ modICommon(mir, lhs, rhs, output, ins->snapshot(), done);
+
+ masm.setupAlignedABICall();
+ masm.passABIArg(lhs);
+ masm.passABIArg(rhs);
+ if (gen->compilingWasm())
+ masm.callWithABI(wasm::SymbolicAddress::aeabi_idivmod);
+ else
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, __aeabi_idivmod));
+
+ // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
+ if (mir->canBeNegativeDividend()) {
+ if (mir->isTruncated()) {
+ // -0.0|0 == 0
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ // See if X < 0
+ masm.as_cmp(r1, Imm8(0));
+ masm.ma_b(&done, Assembler::NotEqual);
+ masm.as_cmp(callTemp, Imm8(0));
+ bailoutIf(Assembler::Signed, ins->snapshot());
+ }
+ }
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorARM::visitModPowTwoI(LModPowTwoI* ins)
+{
+ Register in = ToRegister(ins->getOperand(0));
+ Register out = ToRegister(ins->getDef(0));
+ MMod* mir = ins->mir();
+ Label fin;
+ // bug 739870, jbramley has a different sequence that may help with speed
+ // here.
+
+ masm.ma_mov(in, out, SetCC);
+ masm.ma_b(&fin, Assembler::Zero);
+ masm.as_rsb(out, out, Imm8(0), LeaveCC, Assembler::Signed);
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_and(Imm32((1 << ins->shift()) - 1), out, scratch);
+ }
+ masm.as_rsb(out, out, Imm8(0), SetCC, Assembler::Signed);
+ if (mir->canBeNegativeDividend()) {
+ if (!mir->isTruncated()) {
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ } else {
+ // -0|0 == 0
+ }
+ }
+ masm.bind(&fin);
+}
+
+void
+CodeGeneratorARM::visitModMaskI(LModMaskI* ins)
+{
+ Register src = ToRegister(ins->getOperand(0));
+ Register dest = ToRegister(ins->getDef(0));
+ Register tmp1 = ToRegister(ins->getTemp(0));
+ Register tmp2 = ToRegister(ins->getTemp(1));
+ MMod* mir = ins->mir();
+
+ ScratchRegisterScope scratch(masm);
+ SecondScratchRegisterScope scratch2(masm);
+
+ masm.ma_mod_mask(src, dest, tmp1, tmp2, scratch, scratch2, ins->shift());
+
+ if (mir->canBeNegativeDividend()) {
+ if (!mir->isTruncated()) {
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ } else {
+ // -0|0 == 0
+ }
+ }
+}
+
+void
+CodeGeneratorARM::visitBitNotI(LBitNotI* ins)
+{
+ const LAllocation* input = ins->getOperand(0);
+ const LDefinition* dest = ins->getDef(0);
+ // This will not actually be true on arm. We can not an imm8m in order to
+ // get a wider range of numbers
+ MOZ_ASSERT(!input->isConstant());
+
+ masm.ma_mvn(ToRegister(input), ToRegister(dest));
+}
+
+void
+CodeGeneratorARM::visitBitOpI(LBitOpI* ins)
+{
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ ScratchRegisterScope scratch(masm);
+
+ // All of these bitops should be either imm32's, or integer registers.
+ switch (ins->bitop()) {
+ case JSOP_BITOR:
+ if (rhs->isConstant())
+ masm.ma_orr(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest), scratch);
+ else
+ masm.ma_orr(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
+ break;
+ case JSOP_BITXOR:
+ if (rhs->isConstant())
+ masm.ma_eor(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest), scratch);
+ else
+ masm.ma_eor(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
+ break;
+ case JSOP_BITAND:
+ if (rhs->isConstant())
+ masm.ma_and(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest), scratch);
+ else
+ masm.ma_and(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void
+CodeGeneratorARM::visitShiftI(LShiftI* ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ const LAllocation* rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ int32_t shift = ToInt32(rhs) & 0x1F;
+ switch (ins->bitop()) {
+ case JSOP_LSH:
+ if (shift)
+ masm.ma_lsl(Imm32(shift), lhs, dest);
+ else
+ masm.ma_mov(lhs, dest);
+ break;
+ case JSOP_RSH:
+ if (shift)
+ masm.ma_asr(Imm32(shift), lhs, dest);
+ else
+ masm.ma_mov(lhs, dest);
+ break;
+ case JSOP_URSH:
+ if (shift) {
+ masm.ma_lsr(Imm32(shift), lhs, dest);
+ } else {
+ // x >>> 0 can overflow.
+ masm.ma_mov(lhs, dest);
+ if (ins->mir()->toUrsh()->fallible()) {
+ masm.as_cmp(dest, Imm8(0));
+ bailoutIf(Assembler::LessThan, ins->snapshot());
+ }
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ } else {
+ // The shift amounts should be AND'ed into the 0-31 range since arm
+ // shifts by the lower byte of the register (it will attempt to shift by
+ // 250 if you ask it to).
+ masm.as_and(dest, ToRegister(rhs), Imm8(0x1F));
+
+ switch (ins->bitop()) {
+ case JSOP_LSH:
+ masm.ma_lsl(dest, lhs, dest);
+ break;
+ case JSOP_RSH:
+ masm.ma_asr(dest, lhs, dest);
+ break;
+ case JSOP_URSH:
+ masm.ma_lsr(dest, lhs, dest);
+ if (ins->mir()->toUrsh()->fallible()) {
+ // x >>> 0 can overflow.
+ masm.as_cmp(dest, Imm8(0));
+ bailoutIf(Assembler::LessThan, ins->snapshot());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ }
+}
+
+void
+CodeGeneratorARM::visitUrshD(LUrshD* ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ Register temp = ToRegister(ins->temp());
+
+ const LAllocation* rhs = ins->rhs();
+ FloatRegister out = ToFloatRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ int32_t shift = ToInt32(rhs) & 0x1F;
+ if (shift)
+ masm.ma_lsr(Imm32(shift), lhs, temp);
+ else
+ masm.ma_mov(lhs, temp);
+ } else {
+ masm.as_and(temp, ToRegister(rhs), Imm8(0x1F));
+ masm.ma_lsr(temp, lhs, temp);
+ }
+
+ masm.convertUInt32ToDouble(temp, out);
+}
+
+void
+CodeGeneratorARM::visitClzI(LClzI* ins)
+{
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.clz32(input, output, /* knownNotZero = */ false);
+}
+
+void
+CodeGeneratorARM::visitCtzI(LCtzI* ins)
+{
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.ctz32(input, output, /* knownNotZero = */ false);
+}
+
+void
+CodeGeneratorARM::visitPopcntI(LPopcntI* ins)
+{
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ Register tmp = ToRegister(ins->temp());
+
+ masm.popcnt32(input, output, tmp);
+}
+
+void
+CodeGeneratorARM::visitPowHalfD(LPowHalfD* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ ScratchDoubleScope scratch(masm);
+
+ Label done;
+
+ // Masm.pow(-Infinity, 0.5) == Infinity.
+ masm.loadConstantDouble(NegativeInfinity<double>(), scratch);
+ masm.compareDouble(input, scratch);
+ masm.ma_vneg(scratch, output, Assembler::Equal);
+ masm.ma_b(&done, Assembler::Equal);
+
+ // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
+ // Adding 0 converts any -0 to 0.
+ masm.loadConstantDouble(0.0, scratch);
+ masm.ma_vadd(scratch, input, output);
+ masm.ma_vsqrt(output, output);
+
+ masm.bind(&done);
+}
+
+MoveOperand
+CodeGeneratorARM::toMoveOperand(LAllocation a) const
+{
+ if (a.isGeneralReg())
+ return MoveOperand(ToRegister(a));
+ if (a.isFloatReg())
+ return MoveOperand(ToFloatRegister(a));
+ int32_t offset = ToStackOffset(a);
+ MOZ_ASSERT((offset & 3) == 0);
+ return MoveOperand(StackPointer, offset);
+}
+
+class js::jit::OutOfLineTableSwitch : public OutOfLineCodeBase<CodeGeneratorARM>
+{
+ MTableSwitch* mir_;
+ Vector<CodeLabel, 8, JitAllocPolicy> codeLabels_;
+
+ void accept(CodeGeneratorARM* codegen) {
+ codegen->visitOutOfLineTableSwitch(this);
+ }
+
+ public:
+ OutOfLineTableSwitch(TempAllocator& alloc, MTableSwitch* mir)
+ : mir_(mir),
+ codeLabels_(alloc)
+ {}
+
+ MTableSwitch* mir() const {
+ return mir_;
+ }
+
+ bool addCodeLabel(CodeLabel label) {
+ return codeLabels_.append(label);
+ }
+ CodeLabel codeLabel(unsigned i) {
+ return codeLabels_[i];
+ }
+};
+
+void
+CodeGeneratorARM::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool)
+{
+ MTableSwitch* mir = ool->mir();
+
+ size_t numCases = mir->numCases();
+ for (size_t i = 0; i < numCases; i++) {
+ LBlock* caseblock = skipTrivialBlocks(mir->getCase(numCases - 1 - i))->lir();
+ Label* caseheader = caseblock->label();
+ uint32_t caseoffset = caseheader->offset();
+
+ // The entries of the jump table need to be absolute addresses and thus
+ // must be patched after codegen is finished.
+ CodeLabel cl = ool->codeLabel(i);
+ cl.target()->bind(caseoffset);
+ masm.addCodeLabel(cl);
+ }
+}
+
+void
+CodeGeneratorARM::emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base)
+{
+ // The code generated by this is utter hax.
+ // The end result looks something like:
+ // SUBS index, input, #base
+ // RSBSPL index, index, #max
+ // LDRPL pc, pc, index lsl 2
+ // B default
+
+ // If the range of targets in N through M, we first subtract off the lowest
+ // case (N), which both shifts the arguments into the range 0 to (M - N)
+ // with and sets the MInus flag if the argument was out of range on the low
+ // end.
+
+ // Then we a reverse subtract with the size of the jump table, which will
+ // reverse the order of range (It is size through 0, rather than 0 through
+ // size). The main purpose of this is that we set the same flag as the lower
+ // bound check for the upper bound check. Lastly, we do this conditionally
+ // on the previous check succeeding.
+
+ // Then we conditionally load the pc offset by the (reversed) index (times
+ // the address size) into the pc, which branches to the correct case. NOTE:
+ // when we go to read the pc, the value that we get back is the pc of the
+ // current instruction *PLUS 8*. This means that ldr foo, [pc, +0] reads
+ // $pc+8. In other words, there is an empty word after the branch into the
+ // switch table before the table actually starts. Since the only other
+ // unhandled case is the default case (both out of range high and out of
+ // range low) I then insert a branch to default case into the extra slot,
+ // which ensures we don't attempt to execute the address table.
+ Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
+
+ ScratchRegisterScope scratch(masm);
+
+ int32_t cases = mir->numCases();
+ // Lower value with low value.
+ masm.ma_sub(index, Imm32(mir->low()), index, scratch, SetCC);
+ masm.ma_rsb(index, Imm32(cases - 1), index, scratch, SetCC, Assembler::NotSigned);
+ // Inhibit pools within the following sequence because we are indexing into
+ // a pc relative table. The region will have one instruction for ma_ldr, one
+ // for ma_b, and each table case takes one word.
+ AutoForbidPools afp(&masm, 1 + 1 + cases);
+ masm.ma_ldr(DTRAddr(pc, DtrRegImmShift(index, LSL, 2)), pc, Offset, Assembler::NotSigned);
+ masm.ma_b(defaultcase);
+
+ // To fill in the CodeLabels for the case entries, we need to first generate
+ // the case entries (we don't yet know their offsets in the instruction
+ // stream).
+ OutOfLineTableSwitch* ool = new(alloc()) OutOfLineTableSwitch(alloc(), mir);
+ for (int32_t i = 0; i < cases; i++) {
+ CodeLabel cl;
+ masm.writeCodePointer(cl.patchAt());
+ masm.propagateOOM(ool->addCodeLabel(cl));
+ }
+ addOutOfLineCode(ool, mir);
+}
+
+void
+CodeGeneratorARM::visitMathD(LMathD* math)
+{
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOP_ADD:
+ masm.ma_vadd(src1, src2, output);
+ break;
+ case JSOP_SUB:
+ masm.ma_vsub(src1, src2, output);
+ break;
+ case JSOP_MUL:
+ masm.ma_vmul(src1, src2, output);
+ break;
+ case JSOP_DIV:
+ masm.ma_vdiv(src1, src2, output);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void
+CodeGeneratorARM::visitMathF(LMathF* math)
+{
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOP_ADD:
+ masm.ma_vadd_f32(src1, src2, output);
+ break;
+ case JSOP_SUB:
+ masm.ma_vsub_f32(src1, src2, output);
+ break;
+ case JSOP_MUL:
+ masm.ma_vmul_f32(src1, src2, output);
+ break;
+ case JSOP_DIV:
+ masm.ma_vdiv_f32(src1, src2, output);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void
+CodeGeneratorARM::visitFloor(LFloor* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ Label bail;
+ masm.floor(input, output, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void
+CodeGeneratorARM::visitFloorF(LFloorF* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ Label bail;
+ masm.floorf(input, output, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void
+CodeGeneratorARM::visitCeil(LCeil* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ Label bail;
+ masm.ceil(input, output, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void
+CodeGeneratorARM::visitCeilF(LCeilF* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ Label bail;
+ masm.ceilf(input, output, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void
+CodeGeneratorARM::visitRound(LRound* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ FloatRegister tmp = ToFloatRegister(lir->temp());
+ Label bail;
+ // Output is either correct, or clamped. All -0 cases have been translated
+ // to a clamped case.
+ masm.round(input, output, &bail, tmp);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void
+CodeGeneratorARM::visitRoundF(LRoundF* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ FloatRegister tmp = ToFloatRegister(lir->temp());
+ Label bail;
+ // Output is either correct, or clamped. All -0 cases have been translated
+ // to a clamped case.
+ masm.roundf(input, output, &bail, tmp);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void
+CodeGeneratorARM::emitRoundDouble(FloatRegister src, Register dest, Label* fail)
+{
+ ScratchDoubleScope scratch(masm);
+ ScratchRegisterScope scratchReg(masm);
+
+ masm.ma_vcvt_F64_I32(src, scratch);
+ masm.ma_vxfer(scratch, dest);
+ masm.ma_cmp(dest, Imm32(0x7fffffff), scratchReg);
+ masm.ma_cmp(dest, Imm32(0x80000000), scratchReg, Assembler::NotEqual);
+ masm.ma_b(fail, Assembler::Equal);
+}
+
+void
+CodeGeneratorARM::visitTruncateDToInt32(LTruncateDToInt32* ins)
+{
+ emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()), ins->mir());
+}
+
+void
+CodeGeneratorARM::visitTruncateFToInt32(LTruncateFToInt32* ins)
+{
+ emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()), ins->mir());
+}
+
+static const uint32_t FrameSizes[] = { 128, 256, 512, 1024 };
+
+FrameSizeClass
+FrameSizeClass::FromDepth(uint32_t frameDepth)
+{
+ for (uint32_t i = 0; i < JS_ARRAY_LENGTH(FrameSizes); i++) {
+ if (frameDepth < FrameSizes[i])
+ return FrameSizeClass(i);
+ }
+
+ return FrameSizeClass::None();
+}
+
+FrameSizeClass
+FrameSizeClass::ClassLimit()
+{
+ return FrameSizeClass(JS_ARRAY_LENGTH(FrameSizes));
+}
+
+uint32_t
+FrameSizeClass::frameSize() const
+{
+ MOZ_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID);
+ MOZ_ASSERT(class_ < JS_ARRAY_LENGTH(FrameSizes));
+
+ return FrameSizes[class_];
+}
+
+ValueOperand
+CodeGeneratorARM::ToValue(LInstruction* ins, size_t pos)
+{
+ Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+ValueOperand
+CodeGeneratorARM::ToOutValue(LInstruction* ins)
+{
+ Register typeReg = ToRegister(ins->getDef(TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getDef(PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+ValueOperand
+CodeGeneratorARM::ToTempValue(LInstruction* ins, size_t pos)
+{
+ Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+void
+CodeGeneratorARM::visitValue(LValue* value)
+{
+ const ValueOperand out = ToOutValue(value);
+
+ masm.moveValue(value->value(), out);
+}
+
+void
+CodeGeneratorARM::visitBox(LBox* box)
+{
+ const LDefinition* type = box->getDef(TYPE_INDEX);
+
+ MOZ_ASSERT(!box->getOperand(0)->isConstant());
+
+ // On x86, the input operand and the output payload have the same virtual
+ // register. All that needs to be written is the type tag for the type
+ // definition.
+ masm.ma_mov(Imm32(MIRTypeToTag(box->type())), ToRegister(type));
+}
+
+void
+CodeGeneratorARM::visitBoxFloatingPoint(LBoxFloatingPoint* box)
+{
+ const LDefinition* payload = box->getDef(PAYLOAD_INDEX);
+ const LDefinition* type = box->getDef(TYPE_INDEX);
+ const LAllocation* in = box->getOperand(0);
+ FloatRegister reg = ToFloatRegister(in);
+
+ if (box->type() == MIRType::Float32) {
+ ScratchFloat32Scope scratch(masm);
+ masm.convertFloat32ToDouble(reg, scratch);
+ masm.ma_vxfer(VFPRegister(scratch), ToRegister(payload), ToRegister(type));
+ } else {
+ masm.ma_vxfer(VFPRegister(reg), ToRegister(payload), ToRegister(type));
+ }
+}
+
+void
+CodeGeneratorARM::visitUnbox(LUnbox* unbox)
+{
+ // Note that for unbox, the type and payload indexes are switched on the
+ // inputs.
+ MUnbox* mir = unbox->mir();
+ Register type = ToRegister(unbox->type());
+
+ ScratchRegisterScope scratch(masm);
+
+ if (mir->fallible()) {
+ masm.ma_cmp(type, Imm32(MIRTypeToTag(mir->type())), scratch);
+ bailoutIf(Assembler::NotEqual, unbox->snapshot());
+ }
+}
+
+void
+CodeGeneratorARM::visitDouble(LDouble* ins)
+{
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantDouble(ins->getDouble(), ToFloatRegister(out));
+}
+
+void
+CodeGeneratorARM::visitFloat32(LFloat32* ins)
+{
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantFloat32(ins->getFloat(), ToFloatRegister(out));
+}
+
+Register
+CodeGeneratorARM::splitTagForTest(const ValueOperand& value)
+{
+ return value.typeReg();
+}
+
+void
+CodeGeneratorARM::visitTestDAndBranch(LTestDAndBranch* test)
+{
+ const LAllocation* opd = test->input();
+ masm.ma_vcmpz(ToFloatRegister(opd));
+ masm.as_vmrs(pc);
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+ // If the compare set the 0 bit, then the result is definately false.
+ jumpToBlock(ifFalse, Assembler::Zero);
+ // It is also false if one of the operands is NAN, which is shown as
+ // Overflow.
+ jumpToBlock(ifFalse, Assembler::Overflow);
+ jumpToBlock(ifTrue);
+}
+
+void
+CodeGeneratorARM::visitTestFAndBranch(LTestFAndBranch* test)
+{
+ const LAllocation* opd = test->input();
+ masm.ma_vcmpz_f32(ToFloatRegister(opd));
+ masm.as_vmrs(pc);
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+ // If the compare set the 0 bit, then the result is definately false.
+ jumpToBlock(ifFalse, Assembler::Zero);
+ // It is also false if one of the operands is NAN, which is shown as
+ // Overflow.
+ jumpToBlock(ifFalse, Assembler::Overflow);
+ jumpToBlock(ifTrue);
+}
+
+void
+CodeGeneratorARM::visitCompareD(LCompareD* comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.compareDouble(lhs, rhs);
+ masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output()));
+}
+
+void
+CodeGeneratorARM::visitCompareF(LCompareF* comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.compareFloat(lhs, rhs);
+ masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output()));
+}
+
+void
+CodeGeneratorARM::visitCompareDAndBranch(LCompareDAndBranch* comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ masm.compareDouble(lhs, rhs);
+ emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(), comp->ifFalse());
+}
+
+void
+CodeGeneratorARM::visitCompareFAndBranch(LCompareFAndBranch* comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ masm.compareFloat(lhs, rhs);
+ emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(), comp->ifFalse());
+}
+
+void
+CodeGeneratorARM::visitCompareB(LCompareB* lir)
+{
+ MCompare* mir = lir->mir();
+
+ const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
+ const LAllocation* rhs = lir->rhs();
+ const Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
+
+ Label notBoolean, done;
+ masm.branchTestBoolean(Assembler::NotEqual, lhs, &notBoolean);
+ {
+ if (rhs->isConstant())
+ masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
+ else
+ masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
+ masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output);
+ masm.jump(&done);
+ }
+
+ masm.bind(&notBoolean);
+ {
+ masm.move32(Imm32(mir->jsop() == JSOP_STRICTNE), output);
+ }
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorARM::visitCompareBAndBranch(LCompareBAndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
+ const LAllocation* rhs = lir->rhs();
+
+ MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
+
+ Assembler::Condition cond = masm.testBoolean(Assembler::NotEqual, lhs);
+ jumpToBlock((mir->jsop() == JSOP_STRICTEQ) ? lir->ifFalse() : lir->ifTrue(), cond);
+
+ if (rhs->isConstant())
+ masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
+ else
+ masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
+ emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorARM::visitCompareBitwise(LCompareBitwise* lir)
+{
+ MCompare* mir = lir->mir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+ const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput);
+ const ValueOperand rhs = ToValue(lir, LCompareBitwise::RhsInput);
+ const Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
+ mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
+
+ Label notEqual, done;
+ masm.cmp32(lhs.typeReg(), rhs.typeReg());
+ masm.j(Assembler::NotEqual, &notEqual);
+ {
+ masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
+ masm.emitSet(cond, output);
+ masm.jump(&done);
+ }
+ masm.bind(&notEqual);
+ {
+ masm.move32(Imm32(cond == Assembler::NotEqual), output);
+ }
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorARM::visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+ const ValueOperand lhs = ToValue(lir, LCompareBitwiseAndBranch::LhsInput);
+ const ValueOperand rhs = ToValue(lir, LCompareBitwiseAndBranch::RhsInput);
+
+ MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
+ mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
+
+ MBasicBlock* notEqual = (cond == Assembler::Equal) ? lir->ifFalse() : lir->ifTrue();
+
+ masm.cmp32(lhs.typeReg(), rhs.typeReg());
+ jumpToBlock(notEqual, Assembler::NotEqual);
+ masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
+ emitBranch(cond, lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorARM::visitBitAndAndBranch(LBitAndAndBranch* baab)
+{
+ ScratchRegisterScope scratch(masm);
+ if (baab->right()->isConstant())
+ masm.ma_tst(ToRegister(baab->left()), Imm32(ToInt32(baab->right())), scratch);
+ else
+ masm.ma_tst(ToRegister(baab->left()), ToRegister(baab->right()));
+ emitBranch(Assembler::NonZero, baab->ifTrue(), baab->ifFalse());
+}
+
+void
+CodeGeneratorARM::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir)
+{
+ masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void
+CodeGeneratorARM::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir)
+{
+ masm.convertUInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void
+CodeGeneratorARM::visitNotI(LNotI* ins)
+{
+ // It is hard to optimize !x, so just do it the basic way for now.
+ masm.as_cmp(ToRegister(ins->input()), Imm8(0));
+ masm.emitSet(Assembler::Equal, ToRegister(ins->output()));
+}
+
+void
+CodeGeneratorARM::visitNotI64(LNotI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register output = ToRegister(lir->output());
+
+ masm.ma_orr(input.low, input.high, output);
+ masm.as_cmp(output, Imm8(0));
+ masm.emitSet(Assembler::Equal, output);
+}
+
+void
+CodeGeneratorARM::visitNotD(LNotD* ins)
+{
+ // Since this operation is not, we want to set a bit if the double is
+ // falsey, which means 0.0, -0.0 or NaN. When comparing with 0, an input of
+ // 0 will set the Z bit (30) and NaN will set the V bit (28) of the APSR.
+ FloatRegister opd = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+
+ // Do the compare.
+ masm.ma_vcmpz(opd);
+ // TODO There are three variations here to compare performance-wise.
+ bool nocond = true;
+ if (nocond) {
+ // Load the value into the dest register.
+ masm.as_vmrs(dest);
+ masm.ma_lsr(Imm32(28), dest, dest);
+ // 28 + 2 = 30
+ masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
+ masm.as_and(dest, dest, Imm8(1));
+ } else {
+ masm.as_vmrs(pc);
+ masm.ma_mov(Imm32(0), dest);
+ masm.ma_mov(Imm32(1), dest, Assembler::Equal);
+ masm.ma_mov(Imm32(1), dest, Assembler::Overflow);
+ }
+}
+
+void
+CodeGeneratorARM::visitNotF(LNotF* ins)
+{
+ // Since this operation is not, we want to set a bit if the double is
+ // falsey, which means 0.0, -0.0 or NaN. When comparing with 0, an input of
+ // 0 will set the Z bit (30) and NaN will set the V bit (28) of the APSR.
+ FloatRegister opd = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+
+ // Do the compare.
+ masm.ma_vcmpz_f32(opd);
+ // TODO There are three variations here to compare performance-wise.
+ bool nocond = true;
+ if (nocond) {
+ // Load the value into the dest register.
+ masm.as_vmrs(dest);
+ masm.ma_lsr(Imm32(28), dest, dest);
+ // 28 + 2 = 30
+ masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
+ masm.as_and(dest, dest, Imm8(1));
+ } else {
+ masm.as_vmrs(pc);
+ masm.ma_mov(Imm32(0), dest);
+ masm.ma_mov(Imm32(1), dest, Assembler::Equal);
+ masm.ma_mov(Imm32(1), dest, Assembler::Overflow);
+ }
+}
+
+void
+CodeGeneratorARM::visitGuardShape(LGuardShape* guard)
+{
+ Register obj = ToRegister(guard->input());
+ Register tmp = ToRegister(guard->tempInt());
+
+ ScratchRegisterScope scratch(masm);
+ masm.ma_ldr(DTRAddr(obj, DtrOffImm(ShapedObject::offsetOfShape())), tmp);
+ masm.ma_cmp(tmp, ImmGCPtr(guard->mir()->shape()), scratch);
+
+ bailoutIf(Assembler::NotEqual, guard->snapshot());
+}
+
+void
+CodeGeneratorARM::visitGuardObjectGroup(LGuardObjectGroup* guard)
+{
+ Register obj = ToRegister(guard->input());
+ Register tmp = ToRegister(guard->tempInt());
+ MOZ_ASSERT(obj != tmp);
+
+ ScratchRegisterScope scratch(masm);
+ masm.ma_ldr(DTRAddr(obj, DtrOffImm(JSObject::offsetOfGroup())), tmp);
+ masm.ma_cmp(tmp, ImmGCPtr(guard->mir()->group()), scratch);
+
+ Assembler::Condition cond =
+ guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
+ bailoutIf(cond, guard->snapshot());
+}
+
+void
+CodeGeneratorARM::visitGuardClass(LGuardClass* guard)
+{
+ Register obj = ToRegister(guard->input());
+ Register tmp = ToRegister(guard->tempInt());
+
+ ScratchRegisterScope scratch(masm);
+
+ masm.loadObjClass(obj, tmp);
+ masm.ma_cmp(tmp, Imm32((uint32_t)guard->mir()->getClass()), scratch);
+ bailoutIf(Assembler::NotEqual, guard->snapshot());
+}
+
+void
+CodeGeneratorARM::generateInvalidateEpilogue()
+{
+ // Ensure that there is enough space in the buffer for the OsiPoint patching
+ // to occur. Otherwise, we could overwrite the invalidation epilogue.
+ for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize())
+ masm.nop();
+
+ masm.bind(&invalidate_);
+
+ // Push the return address of the point that we bailed out at onto the stack.
+ masm.Push(lr);
+
+ // Push the Ion script onto the stack (when we determine what that pointer is).
+ invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
+ JitCode* thunk = gen->jitRuntime()->getInvalidationThunk();
+
+ masm.branch(thunk);
+
+ // We should never reach this point in JIT code -- the invalidation thunk
+ // should pop the invalidated JS frame and return directly to its caller.
+ masm.assumeUnreachable("Should have returned directly to its caller instead of here.");
+}
+
+void
+CodeGeneratorARM::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+CodeGeneratorARM::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+CodeGeneratorARM::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir)
+{
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ if (lir->index()->isConstant()) {
+ Address dest(elements, ToInt32(lir->index()) * width);
+ masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+ masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
+ }
+}
+
+void
+CodeGeneratorARM::visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir)
+{
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ Register value = ToRegister(lir->value());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ if (lir->index()->isConstant()) {
+ Address dest(elements, ToInt32(lir->index()) * width);
+ masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp, output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+ masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp, output);
+ }
+}
+
+template<typename S, typename T>
+void
+CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const S& value, const T& mem, Register flagTemp,
+ Register outTemp, AnyRegister output)
+{
+ MOZ_ASSERT(flagTemp != InvalidReg);
+ MOZ_ASSERT_IF(arrayType == Scalar::Uint32, outTemp != InvalidReg);
+
+ switch (arrayType) {
+ case Scalar::Int8:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd8SignExtend(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub8SignExtend(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd8SignExtend(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr8SignExtend(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor8SignExtend(value, mem, flagTemp, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Uint8:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd8ZeroExtend(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub8ZeroExtend(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd8ZeroExtend(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr8ZeroExtend(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor8ZeroExtend(value, mem, flagTemp, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Int16:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd16SignExtend(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub16SignExtend(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd16SignExtend(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr16SignExtend(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor16SignExtend(value, mem, flagTemp, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Uint16:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd16ZeroExtend(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub16ZeroExtend(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd16ZeroExtend(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr16ZeroExtend(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor16ZeroExtend(value, mem, flagTemp, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Int32:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd32(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub32(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd32(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr32(value, mem, flagTemp, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor32(value, mem, flagTemp, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Uint32:
+ // At the moment, the code in MCallOptimize.cpp requires the output
+ // type to be double for uint32 arrays. See bug 1077305.
+ MOZ_ASSERT(output.isFloat());
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd32(value, mem, flagTemp, outTemp);
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub32(value, mem, flagTemp, outTemp);
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd32(value, mem, flagTemp, outTemp);
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr32(value, mem, flagTemp, outTemp);
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor32(value, mem, flagTemp, outTemp);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ masm.convertUInt32ToDouble(outTemp, output.fpu());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Imm32& value, const Address& mem,
+ Register flagTemp, Register outTemp,
+ AnyRegister output);
+template void
+CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Imm32& value, const BaseIndex& mem,
+ Register flagTemp, Register outTemp,
+ AnyRegister output);
+template void
+CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Register& value, const Address& mem,
+ Register flagTemp, Register outTemp,
+ AnyRegister output);
+template void
+CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Register& value, const BaseIndex& mem,
+ Register flagTemp, Register outTemp,
+ AnyRegister output);
+
+// Binary operation for effect, result discarded.
+template<typename S, typename T>
+void
+CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
+ const T& mem, Register flagTemp)
+{
+ MOZ_ASSERT(flagTemp != InvalidReg);
+
+ switch (arrayType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicAdd8(value, mem, flagTemp);
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicSub8(value, mem, flagTemp);
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicAnd8(value, mem, flagTemp);
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicOr8(value, mem, flagTemp);
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicXor8(value, mem, flagTemp);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicAdd16(value, mem, flagTemp);
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicSub16(value, mem, flagTemp);
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicAnd16(value, mem, flagTemp);
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicOr16(value, mem, flagTemp);
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicXor16(value, mem, flagTemp);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicAdd32(value, mem, flagTemp);
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicSub32(value, mem, flagTemp);
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicAnd32(value, mem, flagTemp);
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicOr32(value, mem, flagTemp);
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicXor32(value, mem, flagTemp);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Imm32& value, const Address& mem,
+ Register flagTemp);
+template void
+CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Imm32& value, const BaseIndex& mem,
+ Register flagTemp);
+template void
+CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Register& value, const Address& mem,
+ Register flagTemp);
+template void
+CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Register& value, const BaseIndex& mem,
+ Register flagTemp);
+
+
+template <typename T>
+static inline void
+AtomicBinopToTypedArray(CodeGeneratorARM* cg, AtomicOp op,
+ Scalar::Type arrayType, const LAllocation* value, const T& mem,
+ Register flagTemp, Register outTemp, AnyRegister output)
+{
+ if (value->isConstant())
+ cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, flagTemp, outTemp, output);
+ else
+ cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, flagTemp, outTemp, output);
+}
+
+void
+CodeGeneratorARM::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir)
+{
+ MOZ_ASSERT(lir->mir()->hasUses());
+
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register elements = ToRegister(lir->elements());
+ Register flagTemp = ToRegister(lir->temp1());
+ Register outTemp = lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
+ const LAllocation* value = lir->value();
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ if (lir->index()->isConstant()) {
+ Address mem(elements, ToInt32(lir->index()) * width);
+ AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp, outTemp, output);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+ AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp, outTemp, output);
+ }
+}
+
+template <typename T>
+static inline void
+AtomicBinopToTypedArray(CodeGeneratorARM* cg, AtomicOp op, Scalar::Type arrayType,
+ const LAllocation* value, const T& mem, Register flagTemp)
+{
+ if (value->isConstant())
+ cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, flagTemp);
+ else
+ cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, flagTemp);
+}
+
+void
+CodeGeneratorARM::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir)
+{
+ MOZ_ASSERT(!lir->mir()->hasUses());
+
+ Register elements = ToRegister(lir->elements());
+ Register flagTemp = ToRegister(lir->flagTemp());
+ const LAllocation* value = lir->value();
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ if (lir->index()->isConstant()) {
+ Address mem(elements, ToInt32(lir->index()) * width);
+ AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+ AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp);
+ }
+}
+
+void
+CodeGeneratorARM::visitWasmSelect(LWasmSelect* ins)
+{
+ MIRType mirType = ins->mir()->type();
+
+ Register cond = ToRegister(ins->condExpr());
+ masm.as_cmp(cond, Imm8(0));
+
+ if (mirType == MIRType::Int32) {
+ Register falseExpr = ToRegister(ins->falseExpr());
+ Register out = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->trueExpr()) == out, "true expr input is reused for output");
+ masm.ma_mov(falseExpr, out, LeaveCC, Assembler::Zero);
+ return;
+ }
+
+ FloatRegister out = ToFloatRegister(ins->output());
+ MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out, "true expr input is reused for output");
+
+ FloatRegister falseExpr = ToFloatRegister(ins->falseExpr());
+
+ if (mirType == MIRType::Double)
+ masm.moveDouble(falseExpr, out, Assembler::Zero);
+ else if (mirType == MIRType::Float32)
+ masm.moveFloat32(falseExpr, out, Assembler::Zero);
+ else
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+}
+
+void
+CodeGeneratorARM::visitWasmReinterpret(LWasmReinterpret* lir)
+{
+ MOZ_ASSERT(gen->compilingWasm());
+ MWasmReinterpret* ins = lir->mir();
+
+ MIRType to = ins->type();
+ DebugOnly<MIRType> from = ins->input()->type();
+
+ switch (to) {
+ case MIRType::Int32:
+ MOZ_ASSERT(from == MIRType::Float32);
+ masm.ma_vxfer(ToFloatRegister(lir->input()), ToRegister(lir->output()));
+ break;
+ case MIRType::Float32:
+ MOZ_ASSERT(from == MIRType::Int32);
+ masm.ma_vxfer(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+ break;
+ case MIRType::Double:
+ case MIRType::Int64:
+ MOZ_CRASH("not handled by this LIR opcode");
+ default:
+ MOZ_CRASH("unexpected WasmReinterpret");
+ }
+}
+
+void
+CodeGeneratorARM::emitWasmCall(LWasmCallBase* ins)
+{
+ MWasmCall* mir = ins->mir();
+
+ if (UseHardFpABI() || mir->callee().which() != wasm::CalleeDesc::Builtin) {
+ emitWasmCallBase(ins);
+ return;
+ }
+
+ // The soft ABI passes floating point arguments in GPRs. Since basically
+ // nothing is set up to handle this, the values are placed in the
+ // corresponding VFP registers, then transferred to GPRs immediately
+ // before the call. The mapping is sN <-> rN, where double registers
+ // can be treated as their two component single registers.
+
+ for (unsigned i = 0, e = ins->numOperands(); i < e; i++) {
+ LAllocation* a = ins->getOperand(i);
+ if (a->isFloatReg()) {
+ FloatRegister fr = ToFloatRegister(a);
+ if (fr.isDouble()) {
+ uint32_t srcId = fr.singleOverlay().id();
+ masm.ma_vxfer(fr, Register::FromCode(srcId), Register::FromCode(srcId + 1));
+ } else {
+ uint32_t srcId = fr.id();
+ masm.ma_vxfer(fr, Register::FromCode(srcId));
+ }
+ }
+ }
+
+ emitWasmCallBase(ins);
+
+ switch (mir->type()) {
+ case MIRType::Double:
+ masm.ma_vxfer(r0, r1, d0);
+ break;
+ case MIRType::Float32:
+ masm.as_vxfer(r0, InvalidReg, VFPRegister(d0).singleOverlay(), Assembler::CoreToFloat);
+ break;
+ default:
+ break;
+ }
+}
+
+void
+CodeGeneratorARM::visitWasmCall(LWasmCall* ins)
+{
+ emitWasmCall(ins);
+}
+
+void
+CodeGeneratorARM::visitWasmCallI64(LWasmCallI64* ins)
+{
+ emitWasmCall(ins);
+}
+
+void
+CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
+{
+ const MAsmJSLoadHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->offset() == 0);
+
+ const LAllocation* ptr = ins->ptr();
+
+ bool isSigned;
+ int size;
+ bool isFloat = false;
+ switch (mir->accessType()) {
+ case Scalar::Int8: isSigned = true; size = 8; break;
+ case Scalar::Uint8: isSigned = false; size = 8; break;
+ case Scalar::Int16: isSigned = true; size = 16; break;
+ case Scalar::Uint16: isSigned = false; size = 16; break;
+ case Scalar::Int32:
+ case Scalar::Uint32: isSigned = true; size = 32; break;
+ case Scalar::Float64: isFloat = true; size = 64; break;
+ case Scalar::Float32: isFloat = true; size = 32; break;
+ default: MOZ_CRASH("unexpected array type");
+ }
+
+ if (ptr->isConstant()) {
+ MOZ_ASSERT(!mir->needsBoundsCheck());
+ int32_t ptrImm = ptr->toConstant()->toInt32();
+ MOZ_ASSERT(ptrImm >= 0);
+ if (isFloat) {
+ ScratchRegisterScope scratch(masm);
+ VFPRegister vd(ToFloatRegister(ins->output()));
+ if (size == 32)
+ masm.ma_vldr(Address(HeapReg, ptrImm), vd.singleOverlay(), scratch, Assembler::Always);
+ else
+ masm.ma_vldr(Address(HeapReg, ptrImm), vd, scratch, Assembler::Always);
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, Imm32(ptrImm),
+ ToRegister(ins->output()), scratch, Offset, Assembler::Always);
+ }
+ } else {
+ ScratchRegisterScope scratch(masm);
+ Register ptrReg = ToRegister(ptr);
+ if (isFloat) {
+ FloatRegister output = ToFloatRegister(ins->output());
+ if (size == 32)
+ output = output.singleOverlay();
+
+ Assembler::Condition cond = Assembler::Always;
+ if (mir->needsBoundsCheck()) {
+ BufferOffset cmp = masm.as_cmp(ptrReg, Imm8(0));
+ masm.append(wasm::BoundsCheck(cmp.getOffset()));
+
+ size_t nanOffset = size == 32 ? wasm::NaN32GlobalDataOffset : wasm::NaN64GlobalDataOffset;
+ masm.ma_vldr(Address(GlobalReg, nanOffset - WasmGlobalRegBias), output, scratch,
+ Assembler::AboveOrEqual);
+ cond = Assembler::Below;
+ }
+
+ masm.ma_vldr(output, HeapReg, ptrReg, scratch, 0, cond);
+ } else {
+ Register output = ToRegister(ins->output());
+
+ Assembler::Condition cond = Assembler::Always;
+ if (mir->needsBoundsCheck()) {
+ uint32_t cmpOffset = masm.as_cmp(ptrReg, Imm8(0)).getOffset();
+ masm.append(wasm::BoundsCheck(cmpOffset));
+
+ masm.ma_mov(Imm32(0), output, Assembler::AboveOrEqual);
+ cond = Assembler::Below;
+ }
+
+ masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, output, scratch, Offset, cond);
+ }
+ }
+}
+
+template <typename T>
+void
+CodeGeneratorARM::emitWasmLoad(T* lir)
+{
+ const MWasmLoad* mir = lir->mir();
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+ Register ptr = ToRegister(lir->ptr());
+ Scalar::Type type = mir->access().type();
+
+ // Maybe add the offset.
+ if (offset || type == Scalar::Int64) {
+ ScratchRegisterScope scratch(masm);
+ Register ptrPlusOffset = ToRegister(lir->ptrCopy());
+ if (offset)
+ masm.ma_add(Imm32(offset), ptrPlusOffset, scratch);
+ ptr = ptrPlusOffset;
+ } else {
+ MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
+ }
+
+ bool isSigned = type == Scalar::Int8 || type == Scalar::Int16 || type == Scalar::Int32 ||
+ type == Scalar::Int64;
+ unsigned byteSize = mir->access().byteSize();
+
+ masm.memoryBarrier(mir->access().barrierBefore());
+
+ BufferOffset load;
+ if (mir->type() == MIRType::Int64) {
+ Register64 output = ToOutRegister64(lir);
+ if (type == Scalar::Int64) {
+ MOZ_ASSERT(INT64LOW_OFFSET == 0);
+
+ load = masm.ma_dataTransferN(IsLoad, 32, /* signed = */ false, HeapReg, ptr, output.low);
+ masm.append(mir->access(), load.getOffset(), masm.framePushed());
+
+ masm.as_add(ptr, ptr, Imm8(INT64HIGH_OFFSET));
+
+ load = masm.ma_dataTransferN(IsLoad, 32, isSigned, HeapReg, ptr, output.high);
+ masm.append(mir->access(), load.getOffset(), masm.framePushed());
+ } else {
+ load = masm.ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, output.low);
+ masm.append(mir->access(), load.getOffset(), masm.framePushed());
+
+ if (isSigned)
+ masm.ma_asr(Imm32(31), output.low, output.high);
+ else
+ masm.ma_mov(Imm32(0), output.high);
+ }
+ } else {
+ AnyRegister output = ToAnyRegister(lir->output());
+ bool isFloat = output.isFloat();
+ if (isFloat) {
+ MOZ_ASSERT((byteSize == 4) == output.fpu().isSingle());
+ ScratchRegisterScope scratch(masm);
+ masm.ma_add(HeapReg, ptr, scratch);
+
+ load = masm.ma_vldr(Operand(Address(scratch, 0)).toVFPAddr(), output.fpu());
+ masm.append(mir->access(), load.getOffset(), masm.framePushed());
+ } else {
+ load = masm.ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, output.gpr());
+ masm.append(mir->access(), load.getOffset(), masm.framePushed());
+ }
+ }
+
+ masm.memoryBarrier(mir->access().barrierAfter());
+}
+
+void
+CodeGeneratorARM::visitWasmLoad(LWasmLoad* lir)
+{
+ emitWasmLoad(lir);
+}
+
+void
+CodeGeneratorARM::visitWasmLoadI64(LWasmLoadI64* lir)
+{
+ emitWasmLoad(lir);
+}
+
+template<typename T>
+void
+CodeGeneratorARM::emitWasmUnalignedLoad(T* lir)
+{
+ const MWasmLoad* mir = lir->mir();
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+ Register ptr = ToRegister(lir->ptrCopy());
+ if (offset) {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_add(Imm32(offset), ptr, scratch);
+ }
+
+ // Add HeapReg to ptr, so we can use base+index addressing in the byte loads.
+ masm.ma_add(HeapReg, ptr);
+
+ unsigned byteSize = mir->access().byteSize();
+ Scalar::Type type = mir->access().type();
+ bool isSigned = type == Scalar::Int8 || type == Scalar::Int16 || type == Scalar::Int32 ||
+ type == Scalar::Int64;
+
+ MIRType mirType = mir->type();
+
+ Register tmp = ToRegister(lir->getTemp(1));
+
+ Register low;
+ if (IsFloatingPointType(mirType))
+ low = ToRegister(lir->getTemp(2));
+ else if (mirType == MIRType::Int64)
+ low = ToOutRegister64(lir).low;
+ else
+ low = ToRegister(lir->output());
+
+ MOZ_ASSERT(low != tmp);
+ MOZ_ASSERT(low != ptr);
+
+ masm.memoryBarrier(mir->access().barrierBefore());
+
+ masm.emitUnalignedLoad(isSigned, Min(byteSize, 4u), ptr, tmp, low);
+
+ if (IsFloatingPointType(mirType)) {
+ FloatRegister output = ToFloatRegister(lir->output());
+ if (byteSize == 4) {
+ MOZ_ASSERT(output.isSingle());
+ masm.ma_vxfer(low, output);
+ } else {
+ MOZ_ASSERT(byteSize == 8);
+ MOZ_ASSERT(output.isDouble());
+ Register high = ToRegister(lir->getTemp(3));
+ masm.emitUnalignedLoad(/* signed */ false, 4, ptr, tmp, high, /* offset */ 4);
+ masm.ma_vxfer(low, high, output);
+ }
+ } else if (mirType == MIRType::Int64) {
+ Register64 output = ToOutRegister64(lir);
+ if (type == Scalar::Int64) {
+ MOZ_ASSERT(byteSize == 8);
+ masm.emitUnalignedLoad(isSigned, 4, ptr, tmp, output.high, /* offset */ 4);
+ } else {
+ MOZ_ASSERT(byteSize <= 4);
+ // Propagate sign.
+ if (isSigned)
+ masm.ma_asr(Imm32(31), output.low, output.high);
+ else
+ masm.ma_mov(Imm32(0), output.high);
+ }
+ }
+
+ masm.memoryBarrier(mir->access().barrierAfter());
+}
+
+void
+CodeGeneratorARM::visitWasmUnalignedLoad(LWasmUnalignedLoad* lir)
+{
+ emitWasmUnalignedLoad(lir);
+}
+
+void
+CodeGeneratorARM::visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir)
+{
+ emitWasmUnalignedLoad(lir);
+}
+
+void
+CodeGeneratorARM::visitWasmAddOffset(LWasmAddOffset* lir)
+{
+ MWasmAddOffset* mir = lir->mir();
+ Register base = ToRegister(lir->base());
+ Register out = ToRegister(lir->output());
+
+ ScratchRegisterScope scratch(masm);
+ masm.ma_add(base, Imm32(mir->offset()), out, scratch, SetCC);
+
+ masm.ma_b(trap(mir, wasm::Trap::OutOfBounds), Assembler::CarrySet);
+}
+
+template <typename T>
+void
+CodeGeneratorARM::emitWasmStore(T* lir)
+{
+ const MWasmStore* mir = lir->mir();
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+ Register ptr = ToRegister(lir->ptr());
+ unsigned byteSize = mir->access().byteSize();
+ Scalar::Type type = mir->access().type();
+
+ // Maybe add the offset.
+ if (offset || type == Scalar::Int64) {
+ ScratchRegisterScope scratch(masm);
+ Register ptrPlusOffset = ToRegister(lir->ptrCopy());
+ if (offset)
+ masm.ma_add(Imm32(offset), ptrPlusOffset, scratch);
+ ptr = ptrPlusOffset;
+ } else {
+ MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
+ }
+
+ masm.memoryBarrier(mir->access().barrierBefore());
+
+ BufferOffset store;
+ if (type == Scalar::Int64) {
+ MOZ_ASSERT(INT64LOW_OFFSET == 0);
+
+ Register64 value = ToRegister64(lir->getInt64Operand(lir->ValueIndex));
+
+ store = masm.ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ false, HeapReg, ptr, value.low);
+ masm.append(mir->access(), store.getOffset(), masm.framePushed());
+
+ masm.as_add(ptr, ptr, Imm8(INT64HIGH_OFFSET));
+
+ store = masm.ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ true, HeapReg, ptr, value.high);
+ masm.append(mir->access(), store.getOffset(), masm.framePushed());
+ } else {
+ AnyRegister value = ToAnyRegister(lir->getOperand(lir->ValueIndex));
+ if (value.isFloat()) {
+ ScratchRegisterScope scratch(masm);
+ FloatRegister val = value.fpu();
+ MOZ_ASSERT((byteSize == 4) == val.isSingle());
+ masm.ma_add(HeapReg, ptr, scratch);
+
+ store = masm.ma_vstr(val, Operand(Address(scratch, 0)).toVFPAddr());
+ masm.append(mir->access(), store.getOffset(), masm.framePushed());
+ } else {
+ bool isSigned = type == Scalar::Uint32 || type == Scalar::Int32; // see AsmJSStoreHeap;
+ Register val = value.gpr();
+
+ store = masm.ma_dataTransferN(IsStore, 8 * byteSize /* bits */, isSigned, HeapReg, ptr, val);
+ masm.append(mir->access(), store.getOffset(), masm.framePushed());
+ }
+ }
+
+ masm.memoryBarrier(mir->access().barrierAfter());
+}
+
+void
+CodeGeneratorARM::visitWasmStore(LWasmStore* lir)
+{
+ emitWasmStore(lir);
+}
+
+void
+CodeGeneratorARM::visitWasmStoreI64(LWasmStoreI64* lir)
+{
+ emitWasmStore(lir);
+}
+
+template<typename T>
+void
+CodeGeneratorARM::emitWasmUnalignedStore(T* lir)
+{
+ const MWasmStore* mir = lir->mir();
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+ Register ptr = ToRegister(lir->ptrCopy());
+ if (offset) {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_add(Imm32(offset), ptr, scratch);
+ }
+
+ // Add HeapReg to ptr, so we can use base+index addressing in the byte loads.
+ masm.ma_add(HeapReg, ptr);
+
+ MIRType mirType = mir->value()->type();
+
+ masm.memoryBarrier(mir->access().barrierAfter());
+
+ Register val = ToRegister(lir->valueHelper());
+ if (IsFloatingPointType(mirType)) {
+ masm.ma_vxfer(ToFloatRegister(lir->getOperand(LWasmUnalignedStore::ValueIndex)), val);
+ } else if (mirType == MIRType::Int64) {
+ Register64 input = ToRegister64(lir->getInt64Operand(LWasmUnalignedStoreI64::ValueIndex));
+ if (input.low != val)
+ masm.ma_mov(input.low, val);
+ }
+
+ unsigned byteSize = mir->access().byteSize();
+ masm.emitUnalignedStore(Min(byteSize, 4u), ptr, val);
+
+ if (byteSize > 4) {
+ // It's a double or an int64 load.
+ // Load the high 32 bits when counter == 4.
+ if (IsFloatingPointType(mirType)) {
+ FloatRegister fp = ToFloatRegister(lir->getOperand(LWasmUnalignedStore::ValueIndex));
+ MOZ_ASSERT(fp.isDouble());
+ ScratchRegisterScope scratch(masm);
+ masm.ma_vxfer(fp, scratch, val);
+ } else {
+ MOZ_ASSERT(mirType == MIRType::Int64);
+ masm.ma_mov(ToRegister64(lir->getInt64Operand(LWasmUnalignedStoreI64::ValueIndex)).high, val);
+ }
+ masm.emitUnalignedStore(4, ptr, val, /* offset */ 4);
+ }
+
+ masm.memoryBarrier(mir->access().barrierBefore());
+}
+
+void
+CodeGeneratorARM::visitWasmUnalignedStore(LWasmUnalignedStore* lir)
+{
+ emitWasmUnalignedStore(lir);
+}
+
+void
+CodeGeneratorARM::visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* lir)
+{
+ emitWasmUnalignedStore(lir);
+}
+
+void
+CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
+{
+ const MAsmJSStoreHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->offset() == 0);
+
+ const LAllocation* ptr = ins->ptr();
+
+ bool isSigned;
+ int size;
+ bool isFloat = false;
+ switch (mir->accessType()) {
+ case Scalar::Int8:
+ case Scalar::Uint8: isSigned = false; size = 8; break;
+ case Scalar::Int16:
+ case Scalar::Uint16: isSigned = false; size = 16; break;
+ case Scalar::Int32:
+ case Scalar::Uint32: isSigned = true; size = 32; break;
+ case Scalar::Float64: isFloat = true; size = 64; break;
+ case Scalar::Float32: isFloat = true; size = 32; break;
+ default: MOZ_CRASH("unexpected array type");
+ }
+
+ if (ptr->isConstant()) {
+ MOZ_ASSERT(!mir->needsBoundsCheck());
+ int32_t ptrImm = ptr->toConstant()->toInt32();
+ MOZ_ASSERT(ptrImm >= 0);
+ if (isFloat) {
+ VFPRegister vd(ToFloatRegister(ins->value()));
+ Address addr(HeapReg, ptrImm);
+ if (size == 32)
+ masm.storeFloat32(vd, addr);
+ else
+ masm.storeDouble(vd, addr);
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, Imm32(ptrImm),
+ ToRegister(ins->value()), scratch, Offset, Assembler::Always);
+ }
+ } else {
+ Register ptrReg = ToRegister(ptr);
+
+ Assembler::Condition cond = Assembler::Always;
+ if (mir->needsBoundsCheck()) {
+ BufferOffset cmp = masm.as_cmp(ptrReg, Imm8(0));
+ masm.append(wasm::BoundsCheck(cmp.getOffset()));
+
+ cond = Assembler::Below;
+ }
+
+ if (isFloat) {
+ ScratchRegisterScope scratch(masm);
+ FloatRegister value = ToFloatRegister(ins->value());
+ if (size == 32)
+ value = value.singleOverlay();
+
+ masm.ma_vstr(value, HeapReg, ptrReg, scratch, 0, Assembler::Below);
+ } else {
+ ScratchRegisterScope scratch(masm);
+ Register value = ToRegister(ins->value());
+ masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg, value, scratch, Offset, cond);
+ }
+ }
+}
+
+void
+CodeGeneratorARM::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
+{
+ MAsmJSCompareExchangeHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+
+ Scalar::Type vt = mir->access().type();
+ const LAllocation* ptr = ins->ptr();
+ Register ptrReg = ToRegister(ptr);
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Register oldval = ToRegister(ins->oldValue());
+ Register newval = ToRegister(ins->newValue());
+
+ masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
+ srcAddr, oldval, newval, InvalidReg,
+ ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorARM::visitAsmJSCompareExchangeCallout(LAsmJSCompareExchangeCallout* ins)
+{
+ const MAsmJSCompareExchangeHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+
+ Register ptr = ToRegister(ins->ptr());
+ Register oldval = ToRegister(ins->oldval());
+ Register newval = ToRegister(ins->newval());
+ Register tls = ToRegister(ins->tls());
+ Register instance = ToRegister(ins->getTemp(0));
+ Register viewType = ToRegister(ins->getTemp(1));
+
+ MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg);
+
+ masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance);
+ masm.ma_mov(Imm32(mir->access().type()), viewType);
+
+ masm.setupAlignedABICall();
+ masm.passABIArg(instance);
+ masm.passABIArg(viewType);
+ masm.passABIArg(ptr);
+ masm.passABIArg(oldval);
+ masm.passABIArg(newval);
+ masm.callWithABI(wasm::SymbolicAddress::AtomicCmpXchg);
+}
+
+void
+CodeGeneratorARM::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
+{
+ MAsmJSAtomicExchangeHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+
+ Scalar::Type vt = mir->access().type();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
+ srcAddr, value, InvalidReg, ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorARM::visitAsmJSAtomicExchangeCallout(LAsmJSAtomicExchangeCallout* ins)
+{
+ const MAsmJSAtomicExchangeHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+
+ Register ptr = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ Register tls = ToRegister(ins->tls());
+ Register instance = ToRegister(ins->getTemp(0));
+ Register viewType = ToRegister(ins->getTemp(1));
+
+ MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg);
+
+ masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance);
+ masm.ma_mov(Imm32(mir->access().type()), viewType);
+
+ masm.setupAlignedABICall();
+ masm.passABIArg(instance);
+ masm.passABIArg(viewType);
+ masm.passABIArg(ptr);
+ masm.passABIArg(value);
+ masm.callWithABI(wasm::SymbolicAddress::AtomicXchg);
+}
+
+void
+CodeGeneratorARM::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
+{
+ MAsmJSAtomicBinopHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+ MOZ_ASSERT(mir->hasUses());
+
+ Scalar::Type vt = mir->access().type();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register flagTemp = ToRegister(ins->flagTemp());
+ const LAllocation* value = ins->value();
+ AtomicOp op = mir->operation();
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
+
+ if (value->isConstant()) {
+ atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
+ Imm32(ToInt32(value)), srcAddr, flagTemp, InvalidReg,
+ ToAnyRegister(ins->output()));
+ } else {
+ atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
+ ToRegister(value), srcAddr, flagTemp, InvalidReg,
+ ToAnyRegister(ins->output()));
+ }
+}
+
+void
+CodeGeneratorARM::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
+{
+ MAsmJSAtomicBinopHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+ MOZ_ASSERT(!mir->hasUses());
+
+ Scalar::Type vt = mir->access().type();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register flagTemp = ToRegister(ins->flagTemp());
+ const LAllocation* value = ins->value();
+ AtomicOp op = mir->operation();
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
+
+ if (value->isConstant())
+ atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr, flagTemp);
+ else
+ atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr, flagTemp);
+}
+
+void
+CodeGeneratorARM::visitAsmJSAtomicBinopCallout(LAsmJSAtomicBinopCallout* ins)
+{
+ const MAsmJSAtomicBinopHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+
+ Register ptr = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ Register tls = ToRegister(ins->tls());
+ Register instance = ToRegister(ins->getTemp(0));
+ Register viewType = ToRegister(ins->getTemp(1));
+
+ masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance);
+ masm.move32(Imm32(mir->access().type()), viewType);
+
+ masm.setupAlignedABICall();
+ masm.passABIArg(instance);
+ masm.passABIArg(viewType);
+ masm.passABIArg(ptr);
+ masm.passABIArg(value);
+
+ switch (mir->operation()) {
+ case AtomicFetchAddOp:
+ masm.callWithABI(wasm::SymbolicAddress::AtomicFetchAdd);
+ break;
+ case AtomicFetchSubOp:
+ masm.callWithABI(wasm::SymbolicAddress::AtomicFetchSub);
+ break;
+ case AtomicFetchAndOp:
+ masm.callWithABI(wasm::SymbolicAddress::AtomicFetchAnd);
+ break;
+ case AtomicFetchOrOp:
+ masm.callWithABI(wasm::SymbolicAddress::AtomicFetchOr);
+ break;
+ case AtomicFetchXorOp:
+ masm.callWithABI(wasm::SymbolicAddress::AtomicFetchXor);
+ break;
+ default:
+ MOZ_CRASH("Unknown op");
+ }
+}
+
+void
+CodeGeneratorARM::visitWasmStackArg(LWasmStackArg* ins)
+{
+ const MWasmStackArg* mir = ins->mir();
+ Address dst(StackPointer, mir->spOffset());
+ ScratchRegisterScope scratch(masm);
+ SecondScratchRegisterScope scratch2(masm);
+
+ if (ins->arg()->isConstant()) {
+ masm.ma_mov(Imm32(ToInt32(ins->arg())), scratch);
+ masm.ma_str(scratch, dst, scratch2);
+ } else {
+ if (ins->arg()->isGeneralReg())
+ masm.ma_str(ToRegister(ins->arg()), dst, scratch);
+ else
+ masm.ma_vstr(ToFloatRegister(ins->arg()), dst, scratch);
+ }
+}
+
+void
+CodeGeneratorARM::visitUDiv(LUDiv* ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+
+ Label done;
+ generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), ins->mir());
+
+ masm.ma_udiv(lhs, rhs, output);
+
+ // Check for large unsigned result - represent as double.
+ if (!ins->mir()->isTruncated()) {
+ MOZ_ASSERT(ins->mir()->fallible());
+ masm.as_cmp(output, Imm8(0));
+ bailoutIf(Assembler::LessThan, ins->snapshot());
+ }
+
+ // Check for non-zero remainder if not truncating to int.
+ if (!ins->mir()->canTruncateRemainder()) {
+ MOZ_ASSERT(ins->mir()->fallible());
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_mul(rhs, output, scratch);
+ masm.ma_cmp(scratch, lhs);
+ }
+ bailoutIf(Assembler::NotEqual, ins->snapshot());
+ }
+
+ if (done.used())
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorARM::visitUMod(LUMod* ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+
+ Label done;
+ generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), ins->mir());
+
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_umod(lhs, rhs, output, scratch);
+ }
+
+ // Check for large unsigned result - represent as double.
+ if (!ins->mir()->isTruncated()) {
+ MOZ_ASSERT(ins->mir()->fallible());
+ masm.as_cmp(output, Imm8(0));
+ bailoutIf(Assembler::LessThan, ins->snapshot());
+ }
+
+ if (done.used())
+ masm.bind(&done);
+}
+
+template<class T>
+void
+CodeGeneratorARM::generateUDivModZeroCheck(Register rhs, Register output, Label* done,
+ LSnapshot* snapshot, T* mir)
+{
+ if (!mir)
+ return;
+ if (mir->canBeDivideByZero()) {
+ masm.as_cmp(rhs, Imm8(0));
+ if (mir->isTruncated()) {
+ if (mir->trapOnError()) {
+ masm.ma_b(trap(mir, wasm::Trap::IntegerDivideByZero), Assembler::Equal);
+ } else {
+ Label skip;
+ masm.ma_b(&skip, Assembler::NotEqual);
+ // Infinity|0 == 0
+ masm.ma_mov(Imm32(0), output);
+ masm.ma_b(done);
+ masm.bind(&skip);
+ }
+ } else {
+ // Bailout for divide by zero
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::Equal, snapshot);
+ }
+ }
+}
+
+void
+CodeGeneratorARM::visitSoftUDivOrMod(LSoftUDivOrMod* ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+
+ MOZ_ASSERT(lhs == r0);
+ MOZ_ASSERT(rhs == r1);
+ MOZ_ASSERT(ins->mirRaw()->isDiv() || ins->mirRaw()->isMod());
+ MOZ_ASSERT_IF(ins->mirRaw()->isDiv(), output == r0);
+ MOZ_ASSERT_IF(ins->mirRaw()->isMod(), output == r1);
+
+ Label done;
+ MDiv* div = ins->mir()->isDiv() ? ins->mir()->toDiv() : nullptr;
+ MMod* mod = !div ? ins->mir()->toMod() : nullptr;
+
+ generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), div);
+ generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), mod);
+
+ masm.setupAlignedABICall();
+ masm.passABIArg(lhs);
+ masm.passABIArg(rhs);
+ if (gen->compilingWasm())
+ masm.callWithABI(wasm::SymbolicAddress::aeabi_uidivmod);
+ else
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, __aeabi_uidivmod));
+
+ // uidivmod returns the quotient in r0, and the remainder in r1.
+ if (div && !div->canTruncateRemainder()) {
+ MOZ_ASSERT(div->fallible());
+ masm.as_cmp(r1, Imm8(0));
+ bailoutIf(Assembler::NonZero, ins->snapshot());
+ }
+
+ // Bailout for big unsigned results
+ if ((div && !div->isTruncated()) || (mod && !mod->isTruncated())) {
+ DebugOnly<bool> isFallible = (div && div->fallible()) || (mod && mod->fallible());
+ MOZ_ASSERT(isFallible);
+ masm.as_cmp(output, Imm8(0));
+ bailoutIf(Assembler::LessThan, ins->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorARM::visitEffectiveAddress(LEffectiveAddress* ins)
+{
+ const MEffectiveAddress* mir = ins->mir();
+ Register base = ToRegister(ins->base());
+ Register index = ToRegister(ins->index());
+ Register output = ToRegister(ins->output());
+
+ ScratchRegisterScope scratch(masm);
+
+ masm.as_add(output, base, lsl(index, mir->scale()));
+ masm.ma_add(Imm32(mir->displacement()), output, scratch);
+}
+
+void
+CodeGeneratorARM::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins)
+{
+ const MWasmLoadGlobalVar* mir = ins->mir();
+ unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
+
+ ScratchRegisterScope scratch(masm);
+
+ if (mir->type() == MIRType::Int32) {
+ masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr), ToRegister(ins->output()), scratch);
+ } else if (mir->type() == MIRType::Float32) {
+ VFPRegister vd(ToFloatRegister(ins->output()));
+ masm.ma_vldr(Address(GlobalReg, addr), vd.singleOverlay(), scratch);
+ } else {
+ MOZ_ASSERT(mir->type() == MIRType::Double);
+ masm.ma_vldr(Address(GlobalReg, addr), ToFloatRegister(ins->output()), scratch);
+ }
+}
+
+void
+CodeGeneratorARM::visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins)
+{
+ const MWasmLoadGlobalVar* mir = ins->mir();
+ unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+ Register64 output = ToOutRegister64(ins);
+
+ ScratchRegisterScope scratch(masm);
+ masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr + INT64LOW_OFFSET), output.low, scratch);
+ masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr + INT64HIGH_OFFSET), output.high, scratch);
+}
+
+void
+CodeGeneratorARM::visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins)
+{
+ const MWasmStoreGlobalVar* mir = ins->mir();
+ MIRType type = mir->value()->type();
+
+ ScratchRegisterScope scratch(masm);
+
+ unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
+ if (type == MIRType::Int32) {
+ masm.ma_dtr(IsStore, GlobalReg, Imm32(addr), ToRegister(ins->value()), scratch);
+ } else if (type == MIRType::Float32) {
+ VFPRegister vd(ToFloatRegister(ins->value()));
+ masm.ma_vstr(vd.singleOverlay(), Address(GlobalReg, addr), scratch);
+ } else {
+ MOZ_ASSERT(type == MIRType::Double);
+ masm.ma_vstr(ToFloatRegister(ins->value()), Address(GlobalReg, addr), scratch);
+ }
+}
+
+void
+CodeGeneratorARM::visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins)
+{
+ const MWasmStoreGlobalVar* mir = ins->mir();
+ unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
+ MOZ_ASSERT (mir->value()->type() == MIRType::Int64);
+ Register64 input = ToRegister64(ins->value());
+
+ ScratchRegisterScope scratch(masm);
+ masm.ma_dtr(IsStore, GlobalReg, Imm32(addr + INT64LOW_OFFSET), input.low, scratch);
+ masm.ma_dtr(IsStore, GlobalReg, Imm32(addr + INT64HIGH_OFFSET), input.high, scratch);
+}
+
+void
+CodeGeneratorARM::visitNegI(LNegI* ins)
+{
+ Register input = ToRegister(ins->input());
+ masm.ma_neg(input, ToRegister(ins->output()));
+}
+
+void
+CodeGeneratorARM::visitNegD(LNegD* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ masm.ma_vneg(input, ToFloatRegister(ins->output()));
+}
+
+void
+CodeGeneratorARM::visitNegF(LNegF* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ masm.ma_vneg_f32(input, ToFloatRegister(ins->output()));
+}
+
+void
+CodeGeneratorARM::visitMemoryBarrier(LMemoryBarrier* ins)
+{
+ masm.memoryBarrier(ins->type());
+}
+
+void
+CodeGeneratorARM::setReturnDoubleRegs(LiveRegisterSet* regs)
+{
+ MOZ_ASSERT(ReturnFloat32Reg.code_ == FloatRegisters::s0);
+ MOZ_ASSERT(ReturnDoubleReg.code_ == FloatRegisters::s0);
+ FloatRegister s1 = {FloatRegisters::s1, VFPRegister::Single};
+ regs->add(ReturnFloat32Reg);
+ regs->add(s1);
+ regs->add(ReturnDoubleReg);
+}
+
+void
+CodeGeneratorARM::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir)
+{
+ auto input = ToFloatRegister(lir->input());
+ auto output = ToRegister(lir->output());
+
+ MWasmTruncateToInt32* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ auto* ool = new(alloc()) OutOfLineWasmTruncateCheck(mir, input);
+ addOutOfLineCode(ool, mir);
+ masm.wasmTruncateToInt32(input, output, fromType, mir->isUnsigned(), ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGeneratorARM::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister inputDouble = input;
+ Register64 output = ToOutRegister64(lir);
+
+ MWasmTruncateToInt64* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ auto* ool = new(alloc()) OutOfLineWasmTruncateCheck(mir, input);
+ addOutOfLineCode(ool, mir);
+
+ ScratchDoubleScope scratchScope(masm);
+ if (fromType == MIRType::Float32) {
+ inputDouble = ScratchDoubleReg;
+ masm.convertFloat32ToDouble(input, inputDouble);
+ }
+
+ masm.Push(input);
+
+ masm.setupUnalignedABICall(output.high);
+ masm.passABIArg(inputDouble, MoveOp::DOUBLE);
+ if (lir->mir()->isUnsigned())
+ masm.callWithABI(wasm::SymbolicAddress::TruncateDoubleToUint64);
+ else
+ masm.callWithABI(wasm::SymbolicAddress::TruncateDoubleToInt64);
+
+ masm.Pop(input);
+
+ ScratchRegisterScope scratch(masm);
+ masm.ma_cmp(output.high, Imm32(0x80000000), scratch);
+ masm.as_cmp(output.low, Imm8(0x00000000), Assembler::Equal);
+ masm.ma_b(ool->entry(), Assembler::Equal);
+
+ masm.bind(ool->rejoin());
+
+ MOZ_ASSERT(ReturnReg64 == output);
+}
+
+void
+CodeGeneratorARM::visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool)
+{
+ masm.outOfLineWasmTruncateToIntCheck(ool->input(), ool->fromType(), ool->toType(),
+ ool->isUnsigned(), ool->rejoin(),
+ ool->trapOffset());
+}
+
+void
+CodeGeneratorARM::visitInt64ToFloatingPointCall(LInt64ToFloatingPointCall* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ MInt64ToFloatingPoint* mir = lir->mir();
+ MIRType toType = mir->type();
+
+ // We are free to clobber all registers, since this is a call instruction.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(input.low);
+ regs.take(input.high);
+ Register temp = regs.takeAny();
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(input.high);
+ masm.passABIArg(input.low);
+ if (lir->mir()->isUnsigned())
+ masm.callWithABI(wasm::SymbolicAddress::Uint64ToFloatingPoint, MoveOp::DOUBLE);
+ else
+ masm.callWithABI(wasm::SymbolicAddress::Int64ToFloatingPoint, MoveOp::DOUBLE);
+
+ MOZ_ASSERT_IF(toType == MIRType::Double, output == ReturnDoubleReg);
+ if (toType == MIRType::Float32) {
+ MOZ_ASSERT(output == ReturnFloat32Reg);
+ masm.convertDoubleToFloat32(ReturnDoubleReg, output);
+ }
+}
+
+void
+CodeGeneratorARM::visitCopySignF(LCopySignF* ins)
+{
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ Register lhsi = ToRegister(ins->getTemp(0));
+ Register rhsi = ToRegister(ins->getTemp(1));
+
+ masm.ma_vxfer(lhs, lhsi);
+ masm.ma_vxfer(rhs, rhsi);
+
+ ScratchRegisterScope scratch(masm);
+
+ // Clear lhs's sign.
+ masm.ma_and(Imm32(INT32_MAX), lhsi, lhsi, scratch);
+
+ // Keep rhs's sign.
+ masm.ma_and(Imm32(INT32_MIN), rhsi, rhsi, scratch);
+
+ // Combine.
+ masm.ma_orr(lhsi, rhsi, rhsi);
+
+ masm.ma_vxfer(rhsi, output);
+}
+
+void
+CodeGeneratorARM::visitCopySignD(LCopySignD* ins)
+{
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ Register lhsi = ToRegister(ins->getTemp(0));
+ Register rhsi = ToRegister(ins->getTemp(1));
+
+ // Manipulate high words of double inputs.
+ masm.as_vxfer(lhsi, InvalidReg, lhs, Assembler::FloatToCore, Assembler::Always, 1);
+ masm.as_vxfer(rhsi, InvalidReg, rhs, Assembler::FloatToCore, Assembler::Always, 1);
+
+ ScratchRegisterScope scratch(masm);
+
+ // Clear lhs's sign.
+ masm.ma_and(Imm32(INT32_MAX), lhsi, lhsi, scratch);
+
+ // Keep rhs's sign.
+ masm.ma_and(Imm32(INT32_MIN), rhsi, rhsi, scratch);
+
+ // Combine.
+ masm.ma_orr(lhsi, rhsi, rhsi);
+
+ // Reconstruct the output.
+ masm.as_vxfer(lhsi, InvalidReg, lhs, Assembler::FloatToCore, Assembler::Always, 0);
+ masm.ma_vxfer(lhsi, rhsi, output);
+}
+
+void
+CodeGeneratorARM::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir)
+{
+ const LInt64Allocation& input = lir->getInt64Operand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf())
+ masm.move32(ToRegister(input.low()), output);
+ else
+ masm.move32(ToRegister(input.high()), output);
+}
+
+void
+CodeGeneratorARM::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir)
+{
+ Register64 output = ToOutRegister64(lir);
+ MOZ_ASSERT(ToRegister(lir->input()) == output.low);
+
+ if (lir->mir()->isUnsigned())
+ masm.ma_mov(Imm32(0), output.high);
+ else
+ masm.ma_asr(Imm32(31), output.low, output.high);
+}
+
+void
+CodeGeneratorARM::visitDivOrModI64(LDivOrModI64* lir)
+{
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+ Register64 output = ToOutRegister64(lir);
+
+ MOZ_ASSERT(output == ReturnReg64);
+
+ // All inputs are useAtStart for a call instruction. As a result we cannot
+ // ask for a non-aliasing temp. Using the following to get such a temp.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(lhs.low);
+ regs.take(lhs.high);
+ if (lhs != rhs) {
+ regs.take(rhs.low);
+ regs.take(rhs.high);
+ }
+ Register temp = regs.takeAny();
+
+ Label done;
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero())
+ masm.branchTest64(Assembler::Zero, rhs, rhs, temp, trap(lir, wasm::Trap::IntegerDivideByZero));
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notmin;
+ masm.branch64(Assembler::NotEqual, lhs, Imm64(INT64_MIN), &notmin);
+ masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), &notmin);
+ if (lir->mir()->isMod())
+ masm.xor64(output, output);
+ else
+ masm.jump(trap(lir, wasm::Trap::IntegerOverflow));
+ masm.jump(&done);
+ masm.bind(&notmin);
+ }
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ MOZ_ASSERT(gen->compilingWasm());
+ if (lir->mir()->isMod())
+ masm.callWithABI(wasm::SymbolicAddress::ModI64);
+ else
+ masm.callWithABI(wasm::SymbolicAddress::DivI64);
+
+ MOZ_ASSERT(ReturnReg64 == output);
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorARM::visitUDivOrModI64(LUDivOrModI64* lir)
+{
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
+
+ // All inputs are useAtStart for a call instruction. As a result we cannot
+ // ask for a non-aliasing temp. Using the following to get such a temp.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(lhs.low);
+ regs.take(lhs.high);
+ if (lhs != rhs) {
+ regs.take(rhs.low);
+ regs.take(rhs.high);
+ }
+ Register temp = regs.takeAny();
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero())
+ masm.branchTest64(Assembler::Zero, rhs, rhs, temp, trap(lir, wasm::Trap::IntegerDivideByZero));
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ MOZ_ASSERT(gen->compilingWasm());
+ if (lir->mir()->isMod())
+ masm.callWithABI(wasm::SymbolicAddress::UModI64);
+ else
+ masm.callWithABI(wasm::SymbolicAddress::UDivI64);
+}
+
+void
+CodeGeneratorARM::visitCompareI64(LCompareI64* lir)
+{
+ MCompare* mir = lir->mir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+ Register output = ToRegister(lir->output());
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+ Label done;
+
+ masm.move32(Imm32(1), output);
+
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.branch64(condition, lhsRegs, imm, &done);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.branch64(condition, lhsRegs, rhsRegs, &done);
+ }
+
+ masm.move32(Imm32(0), output);
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorARM::visitCompareI64AndBranch(LCompareI64AndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+
+ Label* trueLabel = getJumpLabelForBranch(lir->ifTrue());
+ Label* falseLabel = getJumpLabelForBranch(lir->ifFalse());
+
+ if (isNextBlock(lir->ifFalse()->lir())) {
+ falseLabel = nullptr;
+ } else if (isNextBlock(lir->ifTrue()->lir())) {
+ condition = Assembler::InvertCondition(condition);
+ trueLabel = falseLabel;
+ falseLabel = nullptr;
+ }
+
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.branch64(condition, lhsRegs, imm, trueLabel, falseLabel);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.branch64(condition, lhsRegs, rhsRegs, trueLabel, falseLabel);
+ }
+}
+
+void
+CodeGeneratorARM::visitShiftI64(LShiftI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LShiftI64::Lhs);
+ LAllocation* rhs = lir->getOperand(LShiftI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (rhs->isConstant()) {
+ int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
+ switch (lir->bitop()) {
+ case JSOP_LSH:
+ if (shift)
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ break;
+ case JSOP_RSH:
+ if (shift)
+ masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs));
+ break;
+ case JSOP_URSH:
+ if (shift)
+ masm.rshift64(Imm32(shift), ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ return;
+ }
+
+ switch (lir->bitop()) {
+ case JSOP_LSH:
+ masm.lshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOP_RSH:
+ masm.rshift64Arithmetic(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOP_URSH:
+ masm.rshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+}
+
+void
+CodeGeneratorARM::visitBitOpI64(LBitOpI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LBitOpI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LBitOpI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ switch (lir->bitop()) {
+ case JSOP_BITOR:
+ if (IsConstant(rhs))
+ masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ else
+ masm.or64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ break;
+ case JSOP_BITXOR:
+ if (IsConstant(rhs))
+ masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ else
+ masm.xor64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ break;
+ case JSOP_BITAND:
+ if (IsConstant(rhs))
+ masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ else
+ masm.and64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void
+CodeGeneratorARM::visitRotateI64(LRotateI64* lir)
+{
+ MRotate* mir = lir->mir();
+ LAllocation* count = lir->count();
+
+ Register64 input = ToRegister64(lir->input());
+ Register64 output = ToOutRegister64(lir);
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+
+ if (count->isConstant()) {
+ int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
+ if (!c) {
+ masm.move64(input, output);
+ return;
+ }
+ if (mir->isLeftRotate())
+ masm.rotateLeft64(Imm32(c), input, output, temp);
+ else
+ masm.rotateRight64(Imm32(c), input, output, temp);
+ } else {
+ if (mir->isLeftRotate())
+ masm.rotateLeft64(ToRegister(count), input, output, temp);
+ else
+ masm.rotateRight64(ToRegister(count), input, output, temp);
+ }
+}
+
+void
+CodeGeneratorARM::visitWasmStackArgI64(LWasmStackArgI64* ins)
+{
+ const MWasmStackArg* mir = ins->mir();
+ Address dst(StackPointer, mir->spOffset());
+ if (IsConstant(ins->arg()))
+ masm.store64(Imm64(ToInt64(ins->arg())), dst);
+ else
+ masm.store64(ToRegister64(ins->arg()), dst);
+}
+
+void
+CodeGeneratorARM::visitWasmSelectI64(LWasmSelectI64* lir)
+{
+ Register cond = ToRegister(lir->condExpr());
+ const LInt64Allocation falseExpr = lir->falseExpr();
+
+ Register64 out = ToOutRegister64(lir);
+ MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out, "true expr is reused for input");
+
+ masm.as_cmp(cond, Imm8(0));
+ if (falseExpr.low().isRegister()) {
+ masm.ma_mov(ToRegister(falseExpr.low()), out.low, LeaveCC, Assembler::Equal);
+ masm.ma_mov(ToRegister(falseExpr.high()), out.high, LeaveCC, Assembler::Equal);
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_ldr(ToAddress(falseExpr.low()), out.low, scratch, Offset, Assembler::Equal);
+ masm.ma_ldr(ToAddress(falseExpr.high()), out.high, scratch, Offset, Assembler::Equal);
+ }
+}
+
+void
+CodeGeneratorARM::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ masm.ma_vxfer(input.low, input.high, output);
+}
+
+void
+CodeGeneratorARM::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ FloatRegister input = ToFloatRegister(lir->getOperand(0));
+ Register64 output = ToOutRegister64(lir);
+
+ masm.ma_vxfer(input, output.low, output.high);
+}
+
+void
+CodeGeneratorARM::visitPopcntI64(LPopcntI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ Register temp = ToRegister(lir->getTemp(0));
+
+ masm.popcnt64(input, output, temp);
+}
+
+void
+CodeGeneratorARM::visitClzI64(LClzI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+
+ masm.clz64(input, output.low);
+ masm.move32(Imm32(0), output.high);
+}
+
+void
+CodeGeneratorARM::visitCtzI64(LCtzI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+
+ masm.ctz64(input, output.low);
+ masm.move32(Imm32(0), output.high);
+}
+
+void
+CodeGeneratorARM::visitTestI64AndBranch(LTestI64AndBranch* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+
+ masm.as_cmp(input.high, Imm8(0));
+ jumpToBlock(lir->ifTrue(), Assembler::NonZero);
+ masm.as_cmp(input.low, Imm8(0));
+ emitBranch(Assembler::NonZero, lir->ifTrue(), lir->ifFalse());
+}
diff --git a/js/src/jit/arm/CodeGenerator-arm.h b/js/src/jit/arm/CodeGenerator-arm.h
new file mode 100644
index 000000000..e617f50eb
--- /dev/null
+++ b/js/src/jit/arm/CodeGenerator-arm.h
@@ -0,0 +1,336 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_CodeGenerator_arm_h
+#define jit_arm_CodeGenerator_arm_h
+
+#include "jit/arm/Assembler-arm.h"
+#include "jit/shared/CodeGenerator-shared.h"
+
+namespace js {
+namespace jit {
+
+class OutOfLineBailout;
+class OutOfLineTableSwitch;
+
+class CodeGeneratorARM : public CodeGeneratorShared
+{
+ friend class MoveResolverARM;
+
+ CodeGeneratorARM* thisFromCtor() {return this;}
+
+ protected:
+ NonAssertingLabel deoptLabel_;
+
+ MoveOperand toMoveOperand(LAllocation a) const;
+
+ void bailoutIf(Assembler::Condition condition, LSnapshot* snapshot);
+ void bailoutFrom(Label* label, LSnapshot* snapshot);
+ void bailout(LSnapshot* snapshot);
+
+ template <typename T1, typename T2>
+ void bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) {
+ masm.cmpPtr(lhs, rhs);
+ bailoutIf(c, snapshot);
+ }
+ void bailoutTestPtr(Assembler::Condition c, Register lhs, Register rhs, LSnapshot* snapshot) {
+ masm.testPtr(lhs, rhs);
+ bailoutIf(c, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) {
+ masm.cmp32(lhs, rhs);
+ bailoutIf(c, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutTest32(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) {
+ masm.test32(lhs, rhs);
+ bailoutIf(c, snapshot);
+ }
+ void bailoutIfFalseBool(Register reg, LSnapshot* snapshot) {
+ masm.test32(reg, Imm32(0xFF));
+ bailoutIf(Assembler::Zero, snapshot);
+ }
+
+ template<class T>
+ void generateUDivModZeroCheck(Register rhs, Register output, Label* done, LSnapshot* snapshot,
+ T* mir);
+
+ protected:
+ bool generateOutOfLineCode();
+
+ void emitRoundDouble(FloatRegister src, Register dest, Label* fail);
+
+ // Emits a branch that directs control flow to the true block if |cond| is
+ // true, and the false block if |cond| is false.
+ void emitBranch(Assembler::Condition cond, MBasicBlock* ifTrue, MBasicBlock* ifFalse);
+
+ void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ cond = masm.testNull(cond, value);
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+ void testUndefinedEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ cond = masm.testUndefined(cond, value);
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+ void testObjectEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ cond = masm.testObject(cond, value);
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+ void testZeroEmitBranch(Assembler::Condition cond, Register reg,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ masm.cmpPtr(reg, ImmWord(0));
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+
+ void emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base);
+
+ template <typename T>
+ void emitWasmLoad(T* ins);
+ template <typename T>
+ void emitWasmUnalignedLoad(T* ins);
+ template <typename T>
+ void emitWasmStore(T* ins);
+ template <typename T>
+ void emitWasmUnalignedStore(T* ins);
+
+ public:
+ // Instruction visitors.
+ virtual void visitMinMaxD(LMinMaxD* ins);
+ virtual void visitMinMaxF(LMinMaxF* ins);
+ virtual void visitAbsD(LAbsD* ins);
+ virtual void visitAbsF(LAbsF* ins);
+ virtual void visitSqrtD(LSqrtD* ins);
+ virtual void visitSqrtF(LSqrtF* ins);
+ virtual void visitAddI(LAddI* ins);
+ virtual void visitSubI(LSubI* ins);
+ virtual void visitBitNotI(LBitNotI* ins);
+ virtual void visitBitOpI(LBitOpI* ins);
+
+ virtual void visitMulI(LMulI* ins);
+
+ virtual void visitDivI(LDivI* ins);
+ virtual void visitSoftDivI(LSoftDivI* ins);
+ virtual void visitDivPowTwoI(LDivPowTwoI* ins);
+ virtual void visitModI(LModI* ins);
+ virtual void visitSoftModI(LSoftModI* ins);
+ virtual void visitModPowTwoI(LModPowTwoI* ins);
+ virtual void visitModMaskI(LModMaskI* ins);
+ virtual void visitPowHalfD(LPowHalfD* ins);
+ virtual void visitShiftI(LShiftI* ins);
+ virtual void visitShiftI64(LShiftI64* ins);
+ virtual void visitUrshD(LUrshD* ins);
+
+ virtual void visitClzI(LClzI* ins);
+ virtual void visitCtzI(LCtzI* ins);
+ virtual void visitPopcntI(LPopcntI* ins);
+
+ virtual void visitTestIAndBranch(LTestIAndBranch* test);
+ virtual void visitCompare(LCompare* comp);
+ virtual void visitCompareAndBranch(LCompareAndBranch* comp);
+ virtual void visitTestDAndBranch(LTestDAndBranch* test);
+ virtual void visitTestFAndBranch(LTestFAndBranch* test);
+ virtual void visitCompareD(LCompareD* comp);
+ virtual void visitCompareF(LCompareF* comp);
+ virtual void visitCompareDAndBranch(LCompareDAndBranch* comp);
+ virtual void visitCompareFAndBranch(LCompareFAndBranch* comp);
+ virtual void visitCompareB(LCompareB* lir);
+ virtual void visitCompareBAndBranch(LCompareBAndBranch* lir);
+ virtual void visitCompareBitwise(LCompareBitwise* lir);
+ virtual void visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir);
+ virtual void visitBitAndAndBranch(LBitAndAndBranch* baab);
+ virtual void visitWasmUint32ToDouble(LWasmUint32ToDouble* lir);
+ virtual void visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir);
+ virtual void visitNotI(LNotI* ins);
+ virtual void visitNotD(LNotD* ins);
+ virtual void visitNotF(LNotF* ins);
+
+ virtual void visitMathD(LMathD* math);
+ virtual void visitMathF(LMathF* math);
+ virtual void visitFloor(LFloor* lir);
+ virtual void visitFloorF(LFloorF* lir);
+ virtual void visitCeil(LCeil* lir);
+ virtual void visitCeilF(LCeilF* lir);
+ virtual void visitRound(LRound* lir);
+ virtual void visitRoundF(LRoundF* lir);
+ virtual void visitTruncateDToInt32(LTruncateDToInt32* ins);
+ virtual void visitTruncateFToInt32(LTruncateFToInt32* ins);
+
+ virtual void visitWrapInt64ToInt32(LWrapInt64ToInt32* lir);
+ virtual void visitExtendInt32ToInt64(LExtendInt32ToInt64* lir);
+ virtual void visitAddI64(LAddI64* lir);
+ virtual void visitSubI64(LSubI64* lir);
+ virtual void visitMulI64(LMulI64* lir);
+ virtual void visitDivOrModI64(LDivOrModI64* lir);
+ virtual void visitUDivOrModI64(LUDivOrModI64* lir);
+ virtual void visitCompareI64(LCompareI64* lir);
+ virtual void visitCompareI64AndBranch(LCompareI64AndBranch* lir);
+ virtual void visitBitOpI64(LBitOpI64* lir);
+ virtual void visitRotateI64(LRotateI64* lir);
+ virtual void visitWasmStackArgI64(LWasmStackArgI64* lir);
+ virtual void visitWasmSelectI64(LWasmSelectI64* lir);
+ virtual void visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir);
+ virtual void visitWasmReinterpretToI64(LWasmReinterpretToI64* lir);
+ virtual void visitPopcntI64(LPopcntI64* ins);
+ virtual void visitClzI64(LClzI64* ins);
+ virtual void visitCtzI64(LCtzI64* ins);
+ virtual void visitNotI64(LNotI64* ins);
+ virtual void visitWasmTruncateToInt64(LWasmTruncateToInt64* ins);
+ virtual void visitInt64ToFloatingPointCall(LInt64ToFloatingPointCall* lir);
+ virtual void visitTestI64AndBranch(LTestI64AndBranch* lir);
+
+ // Out of line visitors.
+ void visitOutOfLineBailout(OutOfLineBailout* ool);
+ void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
+
+ protected:
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToOutValue(LInstruction* ins);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ Register64 ToOperandOrRegister64(const LInt64Allocation input);
+
+ // Functions for LTestVAndBranch.
+ Register splitTagForTest(const ValueOperand& value);
+
+ void divICommon(MDiv* mir, Register lhs, Register rhs, Register output, LSnapshot* snapshot,
+ Label& done);
+ void modICommon(MMod* mir, Register lhs, Register rhs, Register output, LSnapshot* snapshot,
+ Label& done);
+
+ public:
+ CodeGeneratorARM(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
+
+ public:
+ void visitBox(LBox* box);
+ void visitBoxFloatingPoint(LBoxFloatingPoint* box);
+ void visitUnbox(LUnbox* unbox);
+ void visitValue(LValue* value);
+ void visitDouble(LDouble* ins);
+ void visitFloat32(LFloat32* ins);
+
+ void visitGuardShape(LGuardShape* guard);
+ void visitGuardObjectGroup(LGuardObjectGroup* guard);
+ void visitGuardClass(LGuardClass* guard);
+
+ void visitNegI(LNegI* lir);
+ void visitNegD(LNegD* lir);
+ void visitNegF(LNegF* lir);
+ void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins);
+ void visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins);
+ void visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir);
+ void visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir);
+ void visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir);
+ void visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir);
+ void visitWasmSelect(LWasmSelect* ins);
+ void visitWasmReinterpret(LWasmReinterpret* ins);
+ void emitWasmCall(LWasmCallBase* ins);
+ void visitWasmCall(LWasmCall* ins);
+ void visitWasmCallI64(LWasmCallI64* ins);
+ void visitWasmLoad(LWasmLoad* ins);
+ void visitWasmLoadI64(LWasmLoadI64* ins);
+ void visitWasmUnalignedLoad(LWasmUnalignedLoad* ins);
+ void visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* ins);
+ void visitWasmAddOffset(LWasmAddOffset* ins);
+ void visitWasmStore(LWasmStore* ins);
+ void visitWasmStoreI64(LWasmStoreI64* ins);
+ void visitWasmUnalignedStore(LWasmUnalignedStore* ins);
+ void visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* ins);
+ void visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins);
+ void visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins);
+ void visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins);
+ void visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins);
+ void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
+ void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
+ void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins);
+ void visitAsmJSCompareExchangeCallout(LAsmJSCompareExchangeCallout* ins);
+ void visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins);
+ void visitAsmJSAtomicExchangeCallout(LAsmJSAtomicExchangeCallout* ins);
+ void visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins);
+ void visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins);
+ void visitAsmJSAtomicBinopCallout(LAsmJSAtomicBinopCallout* ins);
+ void visitWasmStackArg(LWasmStackArg* ins);
+ void visitWasmTruncateToInt32(LWasmTruncateToInt32* ins);
+ void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);
+ void visitCopySignD(LCopySignD* ins);
+ void visitCopySignF(LCopySignF* ins);
+
+ void visitMemoryBarrier(LMemoryBarrier* ins);
+
+ void generateInvalidateEpilogue();
+
+ void setReturnDoubleRegs(LiveRegisterSet* regs);
+
+ // Generating a result.
+ template<typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
+ const T& mem, Register flagTemp, Register outTemp,
+ AnyRegister output);
+
+ // Generating no result.
+ template<typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
+ const T& mem, Register flagTemp);
+
+ protected:
+ void visitEffectiveAddress(LEffectiveAddress* ins);
+ void visitUDiv(LUDiv* ins);
+ void visitUMod(LUMod* ins);
+ void visitSoftUDivOrMod(LSoftUDivOrMod* ins);
+
+ public:
+ // Unimplemented SIMD instructions
+ void visitSimdSplatX4(LSimdSplatX4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimd128Int(LSimd128Int* ins) { MOZ_CRASH("NYI"); }
+ void visitSimd128Float(LSimd128Float* ins) { MOZ_CRASH("NYI"); }
+ void visitSimdReinterpretCast(LSimdReinterpretCast* ins) { MOZ_CRASH("NYI"); }
+ void visitSimdExtractElementI(LSimdExtractElementI* ins) { MOZ_CRASH("NYI"); }
+ void visitSimdExtractElementF(LSimdExtractElementF* ins) { MOZ_CRASH("NYI"); }
+ void visitSimdGeneralShuffleI(LSimdGeneralShuffleI* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdGeneralShuffleF(LSimdGeneralShuffleF* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdSwizzleI(LSimdSwizzleI* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdSwizzleF(LSimdSwizzleF* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryCompIx4(LSimdBinaryCompIx4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryCompFx4(LSimdBinaryCompFx4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryArithIx4(LSimdBinaryArithIx4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryArithFx4(LSimdBinaryArithFx4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryBitwise(LSimdBinaryBitwise* lir) { MOZ_CRASH("NYI"); }
+};
+
+typedef CodeGeneratorARM CodeGeneratorSpecific;
+
+// An out-of-line bailout thunk.
+class OutOfLineBailout : public OutOfLineCodeBase<CodeGeneratorARM>
+{
+ protected: // Silence Clang warning.
+ LSnapshot* snapshot_;
+ uint32_t frameSize_;
+
+ public:
+ OutOfLineBailout(LSnapshot* snapshot, uint32_t frameSize)
+ : snapshot_(snapshot),
+ frameSize_(frameSize)
+ { }
+
+ void accept(CodeGeneratorARM* codegen);
+
+ LSnapshot* snapshot() const {
+ return snapshot_;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_CodeGenerator_arm_h */
diff --git a/js/src/jit/arm/DoubleEntryTable.tbl b/js/src/jit/arm/DoubleEntryTable.tbl
new file mode 100644
index 000000000..2e9e8c4a3
--- /dev/null
+++ b/js/src/jit/arm/DoubleEntryTable.tbl
@@ -0,0 +1,257 @@
+/* THIS FILE IS AUTOMATICALLY GENERATED BY gen-double-encode-table.py. */
+ { 0x40000000, { 0, 0, 0 } },
+ { 0x40010000, { 1, 0, 0 } },
+ { 0x40020000, { 2, 0, 0 } },
+ { 0x40030000, { 3, 0, 0 } },
+ { 0x40040000, { 4, 0, 0 } },
+ { 0x40050000, { 5, 0, 0 } },
+ { 0x40060000, { 6, 0, 0 } },
+ { 0x40070000, { 7, 0, 0 } },
+ { 0x40080000, { 8, 0, 0 } },
+ { 0x40090000, { 9, 0, 0 } },
+ { 0x400a0000, { 10, 0, 0 } },
+ { 0x400b0000, { 11, 0, 0 } },
+ { 0x400c0000, { 12, 0, 0 } },
+ { 0x400d0000, { 13, 0, 0 } },
+ { 0x400e0000, { 14, 0, 0 } },
+ { 0x400f0000, { 15, 0, 0 } },
+ { 0x40100000, { 0, 1, 0 } },
+ { 0x40110000, { 1, 1, 0 } },
+ { 0x40120000, { 2, 1, 0 } },
+ { 0x40130000, { 3, 1, 0 } },
+ { 0x40140000, { 4, 1, 0 } },
+ { 0x40150000, { 5, 1, 0 } },
+ { 0x40160000, { 6, 1, 0 } },
+ { 0x40170000, { 7, 1, 0 } },
+ { 0x40180000, { 8, 1, 0 } },
+ { 0x40190000, { 9, 1, 0 } },
+ { 0x401a0000, { 10, 1, 0 } },
+ { 0x401b0000, { 11, 1, 0 } },
+ { 0x401c0000, { 12, 1, 0 } },
+ { 0x401d0000, { 13, 1, 0 } },
+ { 0x401e0000, { 14, 1, 0 } },
+ { 0x401f0000, { 15, 1, 0 } },
+ { 0x40200000, { 0, 2, 0 } },
+ { 0x40210000, { 1, 2, 0 } },
+ { 0x40220000, { 2, 2, 0 } },
+ { 0x40230000, { 3, 2, 0 } },
+ { 0x40240000, { 4, 2, 0 } },
+ { 0x40250000, { 5, 2, 0 } },
+ { 0x40260000, { 6, 2, 0 } },
+ { 0x40270000, { 7, 2, 0 } },
+ { 0x40280000, { 8, 2, 0 } },
+ { 0x40290000, { 9, 2, 0 } },
+ { 0x402a0000, { 10, 2, 0 } },
+ { 0x402b0000, { 11, 2, 0 } },
+ { 0x402c0000, { 12, 2, 0 } },
+ { 0x402d0000, { 13, 2, 0 } },
+ { 0x402e0000, { 14, 2, 0 } },
+ { 0x402f0000, { 15, 2, 0 } },
+ { 0x40300000, { 0, 3, 0 } },
+ { 0x40310000, { 1, 3, 0 } },
+ { 0x40320000, { 2, 3, 0 } },
+ { 0x40330000, { 3, 3, 0 } },
+ { 0x40340000, { 4, 3, 0 } },
+ { 0x40350000, { 5, 3, 0 } },
+ { 0x40360000, { 6, 3, 0 } },
+ { 0x40370000, { 7, 3, 0 } },
+ { 0x40380000, { 8, 3, 0 } },
+ { 0x40390000, { 9, 3, 0 } },
+ { 0x403a0000, { 10, 3, 0 } },
+ { 0x403b0000, { 11, 3, 0 } },
+ { 0x403c0000, { 12, 3, 0 } },
+ { 0x403d0000, { 13, 3, 0 } },
+ { 0x403e0000, { 14, 3, 0 } },
+ { 0x403f0000, { 15, 3, 0 } },
+ { 0x3fc00000, { 0, 4, 0 } },
+ { 0x3fc10000, { 1, 4, 0 } },
+ { 0x3fc20000, { 2, 4, 0 } },
+ { 0x3fc30000, { 3, 4, 0 } },
+ { 0x3fc40000, { 4, 4, 0 } },
+ { 0x3fc50000, { 5, 4, 0 } },
+ { 0x3fc60000, { 6, 4, 0 } },
+ { 0x3fc70000, { 7, 4, 0 } },
+ { 0x3fc80000, { 8, 4, 0 } },
+ { 0x3fc90000, { 9, 4, 0 } },
+ { 0x3fca0000, { 10, 4, 0 } },
+ { 0x3fcb0000, { 11, 4, 0 } },
+ { 0x3fcc0000, { 12, 4, 0 } },
+ { 0x3fcd0000, { 13, 4, 0 } },
+ { 0x3fce0000, { 14, 4, 0 } },
+ { 0x3fcf0000, { 15, 4, 0 } },
+ { 0x3fd00000, { 0, 5, 0 } },
+ { 0x3fd10000, { 1, 5, 0 } },
+ { 0x3fd20000, { 2, 5, 0 } },
+ { 0x3fd30000, { 3, 5, 0 } },
+ { 0x3fd40000, { 4, 5, 0 } },
+ { 0x3fd50000, { 5, 5, 0 } },
+ { 0x3fd60000, { 6, 5, 0 } },
+ { 0x3fd70000, { 7, 5, 0 } },
+ { 0x3fd80000, { 8, 5, 0 } },
+ { 0x3fd90000, { 9, 5, 0 } },
+ { 0x3fda0000, { 10, 5, 0 } },
+ { 0x3fdb0000, { 11, 5, 0 } },
+ { 0x3fdc0000, { 12, 5, 0 } },
+ { 0x3fdd0000, { 13, 5, 0 } },
+ { 0x3fde0000, { 14, 5, 0 } },
+ { 0x3fdf0000, { 15, 5, 0 } },
+ { 0x3fe00000, { 0, 6, 0 } },
+ { 0x3fe10000, { 1, 6, 0 } },
+ { 0x3fe20000, { 2, 6, 0 } },
+ { 0x3fe30000, { 3, 6, 0 } },
+ { 0x3fe40000, { 4, 6, 0 } },
+ { 0x3fe50000, { 5, 6, 0 } },
+ { 0x3fe60000, { 6, 6, 0 } },
+ { 0x3fe70000, { 7, 6, 0 } },
+ { 0x3fe80000, { 8, 6, 0 } },
+ { 0x3fe90000, { 9, 6, 0 } },
+ { 0x3fea0000, { 10, 6, 0 } },
+ { 0x3feb0000, { 11, 6, 0 } },
+ { 0x3fec0000, { 12, 6, 0 } },
+ { 0x3fed0000, { 13, 6, 0 } },
+ { 0x3fee0000, { 14, 6, 0 } },
+ { 0x3fef0000, { 15, 6, 0 } },
+ { 0x3ff00000, { 0, 7, 0 } },
+ { 0x3ff10000, { 1, 7, 0 } },
+ { 0x3ff20000, { 2, 7, 0 } },
+ { 0x3ff30000, { 3, 7, 0 } },
+ { 0x3ff40000, { 4, 7, 0 } },
+ { 0x3ff50000, { 5, 7, 0 } },
+ { 0x3ff60000, { 6, 7, 0 } },
+ { 0x3ff70000, { 7, 7, 0 } },
+ { 0x3ff80000, { 8, 7, 0 } },
+ { 0x3ff90000, { 9, 7, 0 } },
+ { 0x3ffa0000, { 10, 7, 0 } },
+ { 0x3ffb0000, { 11, 7, 0 } },
+ { 0x3ffc0000, { 12, 7, 0 } },
+ { 0x3ffd0000, { 13, 7, 0 } },
+ { 0x3ffe0000, { 14, 7, 0 } },
+ { 0x3fff0000, { 15, 7, 0 } },
+ { 0xc0000000, { 0, 8, 0 } },
+ { 0xc0010000, { 1, 8, 0 } },
+ { 0xc0020000, { 2, 8, 0 } },
+ { 0xc0030000, { 3, 8, 0 } },
+ { 0xc0040000, { 4, 8, 0 } },
+ { 0xc0050000, { 5, 8, 0 } },
+ { 0xc0060000, { 6, 8, 0 } },
+ { 0xc0070000, { 7, 8, 0 } },
+ { 0xc0080000, { 8, 8, 0 } },
+ { 0xc0090000, { 9, 8, 0 } },
+ { 0xc00a0000, { 10, 8, 0 } },
+ { 0xc00b0000, { 11, 8, 0 } },
+ { 0xc00c0000, { 12, 8, 0 } },
+ { 0xc00d0000, { 13, 8, 0 } },
+ { 0xc00e0000, { 14, 8, 0 } },
+ { 0xc00f0000, { 15, 8, 0 } },
+ { 0xc0100000, { 0, 9, 0 } },
+ { 0xc0110000, { 1, 9, 0 } },
+ { 0xc0120000, { 2, 9, 0 } },
+ { 0xc0130000, { 3, 9, 0 } },
+ { 0xc0140000, { 4, 9, 0 } },
+ { 0xc0150000, { 5, 9, 0 } },
+ { 0xc0160000, { 6, 9, 0 } },
+ { 0xc0170000, { 7, 9, 0 } },
+ { 0xc0180000, { 8, 9, 0 } },
+ { 0xc0190000, { 9, 9, 0 } },
+ { 0xc01a0000, { 10, 9, 0 } },
+ { 0xc01b0000, { 11, 9, 0 } },
+ { 0xc01c0000, { 12, 9, 0 } },
+ { 0xc01d0000, { 13, 9, 0 } },
+ { 0xc01e0000, { 14, 9, 0 } },
+ { 0xc01f0000, { 15, 9, 0 } },
+ { 0xc0200000, { 0, 10, 0 } },
+ { 0xc0210000, { 1, 10, 0 } },
+ { 0xc0220000, { 2, 10, 0 } },
+ { 0xc0230000, { 3, 10, 0 } },
+ { 0xc0240000, { 4, 10, 0 } },
+ { 0xc0250000, { 5, 10, 0 } },
+ { 0xc0260000, { 6, 10, 0 } },
+ { 0xc0270000, { 7, 10, 0 } },
+ { 0xc0280000, { 8, 10, 0 } },
+ { 0xc0290000, { 9, 10, 0 } },
+ { 0xc02a0000, { 10, 10, 0 } },
+ { 0xc02b0000, { 11, 10, 0 } },
+ { 0xc02c0000, { 12, 10, 0 } },
+ { 0xc02d0000, { 13, 10, 0 } },
+ { 0xc02e0000, { 14, 10, 0 } },
+ { 0xc02f0000, { 15, 10, 0 } },
+ { 0xc0300000, { 0, 11, 0 } },
+ { 0xc0310000, { 1, 11, 0 } },
+ { 0xc0320000, { 2, 11, 0 } },
+ { 0xc0330000, { 3, 11, 0 } },
+ { 0xc0340000, { 4, 11, 0 } },
+ { 0xc0350000, { 5, 11, 0 } },
+ { 0xc0360000, { 6, 11, 0 } },
+ { 0xc0370000, { 7, 11, 0 } },
+ { 0xc0380000, { 8, 11, 0 } },
+ { 0xc0390000, { 9, 11, 0 } },
+ { 0xc03a0000, { 10, 11, 0 } },
+ { 0xc03b0000, { 11, 11, 0 } },
+ { 0xc03c0000, { 12, 11, 0 } },
+ { 0xc03d0000, { 13, 11, 0 } },
+ { 0xc03e0000, { 14, 11, 0 } },
+ { 0xc03f0000, { 15, 11, 0 } },
+ { 0xbfc00000, { 0, 12, 0 } },
+ { 0xbfc10000, { 1, 12, 0 } },
+ { 0xbfc20000, { 2, 12, 0 } },
+ { 0xbfc30000, { 3, 12, 0 } },
+ { 0xbfc40000, { 4, 12, 0 } },
+ { 0xbfc50000, { 5, 12, 0 } },
+ { 0xbfc60000, { 6, 12, 0 } },
+ { 0xbfc70000, { 7, 12, 0 } },
+ { 0xbfc80000, { 8, 12, 0 } },
+ { 0xbfc90000, { 9, 12, 0 } },
+ { 0xbfca0000, { 10, 12, 0 } },
+ { 0xbfcb0000, { 11, 12, 0 } },
+ { 0xbfcc0000, { 12, 12, 0 } },
+ { 0xbfcd0000, { 13, 12, 0 } },
+ { 0xbfce0000, { 14, 12, 0 } },
+ { 0xbfcf0000, { 15, 12, 0 } },
+ { 0xbfd00000, { 0, 13, 0 } },
+ { 0xbfd10000, { 1, 13, 0 } },
+ { 0xbfd20000, { 2, 13, 0 } },
+ { 0xbfd30000, { 3, 13, 0 } },
+ { 0xbfd40000, { 4, 13, 0 } },
+ { 0xbfd50000, { 5, 13, 0 } },
+ { 0xbfd60000, { 6, 13, 0 } },
+ { 0xbfd70000, { 7, 13, 0 } },
+ { 0xbfd80000, { 8, 13, 0 } },
+ { 0xbfd90000, { 9, 13, 0 } },
+ { 0xbfda0000, { 10, 13, 0 } },
+ { 0xbfdb0000, { 11, 13, 0 } },
+ { 0xbfdc0000, { 12, 13, 0 } },
+ { 0xbfdd0000, { 13, 13, 0 } },
+ { 0xbfde0000, { 14, 13, 0 } },
+ { 0xbfdf0000, { 15, 13, 0 } },
+ { 0xbfe00000, { 0, 14, 0 } },
+ { 0xbfe10000, { 1, 14, 0 } },
+ { 0xbfe20000, { 2, 14, 0 } },
+ { 0xbfe30000, { 3, 14, 0 } },
+ { 0xbfe40000, { 4, 14, 0 } },
+ { 0xbfe50000, { 5, 14, 0 } },
+ { 0xbfe60000, { 6, 14, 0 } },
+ { 0xbfe70000, { 7, 14, 0 } },
+ { 0xbfe80000, { 8, 14, 0 } },
+ { 0xbfe90000, { 9, 14, 0 } },
+ { 0xbfea0000, { 10, 14, 0 } },
+ { 0xbfeb0000, { 11, 14, 0 } },
+ { 0xbfec0000, { 12, 14, 0 } },
+ { 0xbfed0000, { 13, 14, 0 } },
+ { 0xbfee0000, { 14, 14, 0 } },
+ { 0xbfef0000, { 15, 14, 0 } },
+ { 0xbff00000, { 0, 15, 0 } },
+ { 0xbff10000, { 1, 15, 0 } },
+ { 0xbff20000, { 2, 15, 0 } },
+ { 0xbff30000, { 3, 15, 0 } },
+ { 0xbff40000, { 4, 15, 0 } },
+ { 0xbff50000, { 5, 15, 0 } },
+ { 0xbff60000, { 6, 15, 0 } },
+ { 0xbff70000, { 7, 15, 0 } },
+ { 0xbff80000, { 8, 15, 0 } },
+ { 0xbff90000, { 9, 15, 0 } },
+ { 0xbffa0000, { 10, 15, 0 } },
+ { 0xbffb0000, { 11, 15, 0 } },
+ { 0xbffc0000, { 12, 15, 0 } },
+ { 0xbffd0000, { 13, 15, 0 } },
+ { 0xbffe0000, { 14, 15, 0 } },
+ { 0xbfff0000, { 15, 15, 0 } },
diff --git a/js/src/jit/arm/LIR-arm.h b/js/src/jit/arm/LIR-arm.h
new file mode 100644
index 000000000..c498bf28e
--- /dev/null
+++ b/js/src/jit/arm/LIR-arm.h
@@ -0,0 +1,710 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_LIR_arm_h
+#define jit_arm_LIR_arm_h
+
+namespace js {
+namespace jit {
+
+class LBoxFloatingPoint : public LInstructionHelper<2, 1, 1>
+{
+ MIRType type_;
+
+ public:
+ LIR_HEADER(BoxFloatingPoint);
+
+ LBoxFloatingPoint(const LAllocation& in, const LDefinition& temp, MIRType type)
+ : type_(type)
+ {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ MIRType type() const {
+ return type_;
+ }
+ const char* extraName() const {
+ return StringFromMIRType(type_);
+ }
+};
+
+class LUnbox : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(Unbox);
+
+ MUnbox* mir() const {
+ return mir_->toUnbox();
+ }
+ const LAllocation* payload() {
+ return getOperand(0);
+ }
+ const LAllocation* type() {
+ return getOperand(1);
+ }
+ const char* extraName() const {
+ return StringFromMIRType(mir()->type());
+ }
+};
+
+class LUnboxFloatingPoint : public LInstructionHelper<1, 2, 0>
+{
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ static const size_t Input = 0;
+
+ LUnboxFloatingPoint(const LBoxAllocation& input, MIRType type)
+ : type_(type)
+ {
+ setBoxOperand(Input, input);
+ }
+
+ MUnbox* mir() const {
+ return mir_->toUnbox();
+ }
+
+ MIRType type() const {
+ return type_;
+ }
+ const char* extraName() const {
+ return StringFromMIRType(type_);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a double.
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmUint32ToDouble)
+
+ LWasmUint32ToDouble(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmUint32ToFloat32)
+
+ LWasmUint32ToFloat32(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+class LDivI : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(DivI);
+
+ LDivI(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ MDiv* mir() const {
+ return mir_->toDiv();
+ }
+};
+
+class LDivOrModI64 : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES*2, 0>
+{
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs)
+ {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeDivideByZero();
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeNegativeDividend();
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+class LUDivOrModI64 : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES*2, 0>
+{
+ public:
+ LIR_HEADER(UDivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LUDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs)
+ {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeDivideByZero();
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeNegativeDividend();
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+// LSoftDivI is a software divide for ARM cores that don't support a hardware
+// divide instruction.
+//
+// It is implemented as a proper C function so it trashes r0, r1, r2 and r3.
+// The call also trashes lr, and has the ability to trash ip. The function also
+// takes two arguments (dividend in r0, divisor in r1). The LInstruction gets
+// encoded such that the divisor and dividend are passed in their apropriate
+// registers and end their life at the start of the instruction by the use of
+// useFixedAtStart. The result is returned in r0 and the other three registers
+// that can be trashed are marked as temps. For the time being, the link
+// register is not marked as trashed because we never allocate to the link
+// register. The FP registers are not trashed.
+class LSoftDivI : public LBinaryMath<3>
+{
+ public:
+ LIR_HEADER(SoftDivI);
+
+ LSoftDivI(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2, const LDefinition& temp3) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+
+ MDiv* mir() const {
+ return mir_->toDiv();
+ }
+};
+
+class LDivPowTwoI : public LInstructionHelper<1, 1, 0>
+{
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(DivPowTwoI)
+
+ LDivPowTwoI(const LAllocation& lhs, int32_t shift)
+ : shift_(shift)
+ {
+ setOperand(0, lhs);
+ }
+
+ const LAllocation* numerator() {
+ return getOperand(0);
+ }
+
+ int32_t shift() {
+ return shift_;
+ }
+
+ MDiv* mir() const {
+ return mir_->toDiv();
+ }
+};
+
+class LModI : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(ModI);
+
+ LModI(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& callTemp)
+ {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, callTemp);
+ }
+
+ const LDefinition* callTemp() {
+ return getTemp(0);
+ }
+
+ MMod* mir() const {
+ return mir_->toMod();
+ }
+};
+
+class LSoftModI : public LBinaryMath<4>
+{
+ public:
+ LIR_HEADER(SoftModI);
+
+ LSoftModI(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2, const LDefinition& temp3,
+ const LDefinition& callTemp)
+ {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ setTemp(3, callTemp);
+ }
+
+ const LDefinition* callTemp() {
+ return getTemp(3);
+ }
+
+ MMod* mir() const {
+ return mir_->toMod();
+ }
+};
+
+class LModPowTwoI : public LInstructionHelper<1, 1, 0>
+{
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModPowTwoI);
+ int32_t shift()
+ {
+ return shift_;
+ }
+
+ LModPowTwoI(const LAllocation& lhs, int32_t shift)
+ : shift_(shift)
+ {
+ setOperand(0, lhs);
+ }
+
+ MMod* mir() const {
+ return mir_->toMod();
+ }
+};
+
+class LModMaskI : public LInstructionHelper<1, 1, 2>
+{
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModMaskI);
+
+ LModMaskI(const LAllocation& lhs, const LDefinition& temp1, const LDefinition& temp2,
+ int32_t shift)
+ : shift_(shift)
+ {
+ setOperand(0, lhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ int32_t shift() const {
+ return shift_;
+ }
+
+ MMod* mir() const {
+ return mir_->toMod();
+ }
+};
+
+// Takes a tableswitch with an integer to decide.
+class LTableSwitch : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(TableSwitch);
+
+ LTableSwitch(const LAllocation& in, const LDefinition& inputCopy, MTableSwitch* ins) {
+ setOperand(0, in);
+ setTemp(0, inputCopy);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const {
+ return mir_->toTableSwitch();
+ }
+
+ const LAllocation* index() {
+ return getOperand(0);
+ }
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+ // This is added to share the same CodeGenerator prefixes.
+ const LDefinition* tempPointer() {
+ return nullptr;
+ }
+};
+
+// Takes a tableswitch with an integer to decide.
+class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 2>
+{
+ public:
+ LIR_HEADER(TableSwitchV);
+
+ LTableSwitchV(const LBoxAllocation& input, const LDefinition& inputCopy,
+ const LDefinition& floatCopy, MTableSwitch* ins)
+ {
+ setBoxOperand(InputValue, input);
+ setTemp(0, inputCopy);
+ setTemp(1, floatCopy);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const {
+ return mir_->toTableSwitch();
+ }
+
+ static const size_t InputValue = 0;
+
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+ const LDefinition* tempFloat() {
+ return getTemp(1);
+ }
+ const LDefinition* tempPointer() {
+ return nullptr;
+ }
+};
+
+class LGuardShape : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(GuardShape);
+
+ LGuardShape(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+ const MGuardShape* mir() const {
+ return mir_->toGuardShape();
+ }
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+};
+
+class LGuardObjectGroup : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(GuardObjectGroup);
+
+ LGuardObjectGroup(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+ const MGuardObjectGroup* mir() const {
+ return mir_->toGuardObjectGroup();
+ }
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+};
+
+class LMulI : public LBinaryMath<0>
+{
+ public:
+ LIR_HEADER(MulI);
+
+ MMul* mir() {
+ return mir_->toMul();
+ }
+};
+
+class LUDiv : public LBinaryMath<0>
+{
+ public:
+ LIR_HEADER(UDiv);
+
+ MDiv* mir() {
+ return mir_->toDiv();
+ }
+};
+
+class LUMod : public LBinaryMath<0>
+{
+ public:
+ LIR_HEADER(UMod);
+
+ MMod* mir() {
+ return mir_->toMod();
+ }
+};
+
+class LSoftUDivOrMod : public LBinaryMath<3>
+{
+ public:
+ LIR_HEADER(SoftUDivOrMod);
+
+ LSoftUDivOrMod(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp1,
+ const LDefinition& temp2, const LDefinition& temp3) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+
+ MInstruction* mir() {
+ return mir_->toInstruction();
+ }
+};
+
+class LAsmJSCompareExchangeCallout : public LCallInstructionHelper<1, 4, 2>
+{
+ public:
+ LIR_HEADER(AsmJSCompareExchangeCallout)
+ LAsmJSCompareExchangeCallout(const LAllocation& ptr, const LAllocation& oldval,
+ const LAllocation& newval, const LAllocation& tls,
+ const LDefinition& temp1, const LDefinition& temp2)
+ {
+ setOperand(0, ptr);
+ setOperand(1, oldval);
+ setOperand(2, newval);
+ setOperand(3, tls);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+ const LAllocation* oldval() {
+ return getOperand(1);
+ }
+ const LAllocation* newval() {
+ return getOperand(2);
+ }
+ const LAllocation* tls() {
+ return getOperand(3);
+ }
+
+ const MAsmJSCompareExchangeHeap* mir() const {
+ return mir_->toAsmJSCompareExchangeHeap();
+ }
+};
+
+class LAsmJSAtomicExchangeCallout : public LCallInstructionHelper<1, 3, 2>
+{
+ public:
+ LIR_HEADER(AsmJSAtomicExchangeCallout)
+
+ LAsmJSAtomicExchangeCallout(const LAllocation& ptr, const LAllocation& value,
+ const LAllocation& tls, const LDefinition& temp1,
+ const LDefinition& temp2)
+ {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setOperand(2, tls);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+ const LAllocation* tls() {
+ return getOperand(2);
+ }
+
+ const MAsmJSAtomicExchangeHeap* mir() const {
+ return mir_->toAsmJSAtomicExchangeHeap();
+ }
+};
+
+class LAsmJSAtomicBinopCallout : public LCallInstructionHelper<1, 3, 2>
+{
+ public:
+ LIR_HEADER(AsmJSAtomicBinopCallout)
+ LAsmJSAtomicBinopCallout(const LAllocation& ptr, const LAllocation& value,
+ const LAllocation& tls, const LDefinition& temp1,
+ const LDefinition& temp2)
+ {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setOperand(2, tls);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+ const LAllocation* tls() {
+ return getOperand(2);
+ }
+
+ const MAsmJSAtomicBinopHeap* mir() const {
+ return mir_->toAsmJSAtomicBinopHeap();
+ }
+};
+
+class LWasmTruncateToInt64 : public LCallInstructionHelper<INT64_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ LWasmTruncateToInt64(const LAllocation& in)
+ {
+ setOperand(0, in);
+ }
+
+ MWasmTruncateToInt64* mir() const {
+ return mir_->toWasmTruncateToInt64();
+ }
+};
+
+class LInt64ToFloatingPointCall: public LCallInstructionHelper<1, INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(Int64ToFloatingPointCall);
+
+ MInt64ToFloatingPoint* mir() const {
+ return mir_->toInt64ToFloatingPoint();
+ }
+};
+
+namespace details {
+
+// Base class for the int64 and non-int64 variants.
+template<size_t NumDefs>
+class LWasmUnalignedLoadBase : public details::LWasmLoadBase<NumDefs, 4>
+{
+ public:
+ typedef LWasmLoadBase<NumDefs, 4> Base;
+ explicit LWasmUnalignedLoadBase(const LAllocation& ptr, const LDefinition& ptrCopy,
+ const LDefinition& temp1, const LDefinition& temp2,
+ const LDefinition& temp3)
+ : Base(ptr)
+ {
+ Base::setTemp(0, ptrCopy);
+ Base::setTemp(1, temp1);
+ Base::setTemp(2, temp2);
+ Base::setTemp(3, temp3);
+ }
+
+ const LDefinition* ptrCopy() {
+ return Base::getTemp(0);
+ }
+};
+
+} // namespace details
+
+class LWasmUnalignedLoad : public details::LWasmUnalignedLoadBase<1>
+{
+ public:
+ explicit LWasmUnalignedLoad(const LAllocation& ptr, const LDefinition& ptrCopy,
+ const LDefinition& temp1, const LDefinition& temp2,
+ const LDefinition& temp3)
+ : LWasmUnalignedLoadBase(ptr, ptrCopy, temp1, temp2, temp3)
+ {}
+ LIR_HEADER(WasmUnalignedLoad);
+};
+
+class LWasmUnalignedLoadI64 : public details::LWasmUnalignedLoadBase<INT64_PIECES>
+{
+ public:
+ explicit LWasmUnalignedLoadI64(const LAllocation& ptr, const LDefinition& ptrCopy,
+ const LDefinition& temp1, const LDefinition& temp2,
+ const LDefinition& temp3)
+ : LWasmUnalignedLoadBase(ptr, ptrCopy, temp1, temp2, temp3)
+ {}
+ LIR_HEADER(WasmUnalignedLoadI64);
+};
+
+namespace details {
+
+// Base class for the int64 and non-int64 variants.
+template<size_t NumOps>
+class LWasmUnalignedStoreBase : public LInstructionHelper<0, NumOps, 2>
+{
+ public:
+ typedef LInstructionHelper<0, NumOps, 2> Base;
+
+ static const uint32_t ValueIndex = 1;
+
+ LWasmUnalignedStoreBase(const LAllocation& ptr, const LDefinition& ptrCopy,
+ const LDefinition& valueHelper)
+ {
+ Base::setOperand(0, ptr);
+ Base::setTemp(0, ptrCopy);
+ Base::setTemp(1, valueHelper);
+ }
+ MWasmStore* mir() const {
+ return Base::mir_->toWasmStore();
+ }
+ const LDefinition* ptrCopy() {
+ return Base::getTemp(0);
+ }
+ const LDefinition* valueHelper() {
+ return Base::getTemp(1);
+ }
+};
+
+} // namespace details
+
+class LWasmUnalignedStore : public details::LWasmUnalignedStoreBase<2>
+{
+ public:
+ LIR_HEADER(WasmUnalignedStore);
+ LWasmUnalignedStore(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& ptrCopy, const LDefinition& valueHelper)
+ : LWasmUnalignedStoreBase(ptr, ptrCopy, valueHelper)
+ {
+ setOperand(1, value);
+ }
+};
+
+class LWasmUnalignedStoreI64 : public details::LWasmUnalignedStoreBase<1 + INT64_PIECES>
+{
+ public:
+ LIR_HEADER(WasmUnalignedStoreI64);
+ LWasmUnalignedStoreI64(const LAllocation& ptr, const LInt64Allocation& value,
+ const LDefinition& ptrCopy, const LDefinition& valueHelper)
+ : LWasmUnalignedStoreBase(ptr, ptrCopy, valueHelper)
+ {
+ setInt64Operand(1, value);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_LIR_arm_h */
diff --git a/js/src/jit/arm/LOpcodes-arm.h b/js/src/jit/arm/LOpcodes-arm.h
new file mode 100644
index 000000000..13a4edd72
--- /dev/null
+++ b/js/src/jit/arm/LOpcodes-arm.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_LOpcodes_arm_h
+#define jit_arm_LOpcodes_arm_h
+
+#include "jit/shared/LOpcodes-shared.h"
+
+#define LIR_CPU_OPCODE_LIST(_) \
+ _(BoxFloatingPoint) \
+ _(SoftDivI) \
+ _(SoftModI) \
+ _(ModMaskI) \
+ _(UDiv) \
+ _(UMod) \
+ _(SoftUDivOrMod) \
+ _(AsmJSCompareExchangeCallout) \
+ _(AsmJSAtomicExchangeCallout) \
+ _(AsmJSAtomicBinopCallout) \
+ _(DivOrModI64) \
+ _(UDivOrModI64) \
+ _(WasmTruncateToInt64) \
+ _(WasmUnalignedLoad) \
+ _(WasmUnalignedStore) \
+ _(WasmUnalignedLoadI64) \
+ _(WasmUnalignedStoreI64) \
+ _(Int64ToFloatingPointCall)
+
+#endif /* jit_arm_LOpcodes_arm_h */
diff --git a/js/src/jit/arm/Lowering-arm.cpp b/js/src/jit/arm/Lowering-arm.cpp
new file mode 100644
index 000000000..c26680116
--- /dev/null
+++ b/js/src/jit/arm/Lowering-arm.cpp
@@ -0,0 +1,1031 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/arm/Assembler-arm.h"
+#include "jit/Lowering.h"
+#include "jit/MIR.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::FloorLog2;
+
+LBoxAllocation
+LIRGeneratorARM::useBoxFixed(MDefinition* mir, Register reg1, Register reg2, bool useAtStart)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+ MOZ_ASSERT(reg1 != reg2);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart),
+ LUse(reg2, VirtualRegisterOfPayload(mir), useAtStart));
+}
+
+LAllocation
+LIRGeneratorARM::useByteOpRegister(MDefinition* mir)
+{
+ return useRegister(mir);
+}
+
+LAllocation
+LIRGeneratorARM::useByteOpRegisterAtStart(MDefinition* mir)
+{
+ return useRegisterAtStart(mir);
+}
+
+LAllocation
+LIRGeneratorARM::useByteOpRegisterOrNonDoubleConstant(MDefinition* mir)
+{
+ return useRegisterOrNonDoubleConstant(mir);
+}
+
+LDefinition
+LIRGeneratorARM::tempByteOpRegister()
+{
+ return temp();
+}
+
+void
+LIRGeneratorARM::visitBox(MBox* box)
+{
+ MDefinition* inner = box->getOperand(0);
+
+ // If the box wrapped a double, it needs a new register.
+ if (IsFloatingPointType(inner->type())) {
+ defineBox(new(alloc()) LBoxFloatingPoint(useRegisterAtStart(inner), tempCopy(inner, 0),
+ inner->type()), box);
+ return;
+ }
+
+ if (box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (inner->isConstant()) {
+ defineBox(new(alloc()) LValue(inner->toConstant()->toJSValue()), box);
+ return;
+ }
+
+ LBox* lir = new(alloc()) LBox(use(inner), inner->type());
+
+ // Otherwise, we should not define a new register for the payload portion
+ // of the output, so bypass defineBox().
+ uint32_t vreg = getVirtualRegister();
+
+ // Note that because we're using BogusTemp(), we do not change the type of
+ // the definition. We also do not define the first output as "TYPE",
+ // because it has no corresponding payload at (vreg + 1). Also note that
+ // although we copy the input's original type for the payload half of the
+ // definition, this is only for clarity. BogusTemp() definitions are
+ // ignored.
+ lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL));
+ lir->setDef(1, LDefinition::BogusTemp());
+ box->setVirtualRegister(vreg);
+ add(lir);
+}
+
+void
+LIRGeneratorARM::visitUnbox(MUnbox* unbox)
+{
+ MDefinition* inner = unbox->getOperand(0);
+
+ if (inner->type() == MIRType::ObjectOrNull) {
+ LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(inner));
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+ defineReuseInput(lir, unbox, 0);
+ return;
+ }
+
+ // An unbox on arm reads in a type tag (either in memory or a register) and
+ // a payload. Unlike most instructions consuming a box, we ask for the type
+ // second, so that the result can re-use the first input.
+ MOZ_ASSERT(inner->type() == MIRType::Value);
+
+ ensureDefined(inner);
+
+ if (IsFloatingPointType(unbox->type())) {
+ LUnboxFloatingPoint* lir = new(alloc()) LUnboxFloatingPoint(useBox(inner), unbox->type());
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+ define(lir, unbox);
+ return;
+ }
+
+ // Swap the order we use the box pieces so we can re-use the payload register.
+ LUnbox* lir = new(alloc()) LUnbox;
+ lir->setOperand(0, usePayloadInRegisterAtStart(inner));
+ lir->setOperand(1, useType(inner, LUse::REGISTER));
+
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+
+ // Types and payloads form two separate intervals. If the type becomes dead
+ // before the payload, it could be used as a Value without the type being
+ // recoverable. Unbox's purpose is to eagerly kill the definition of a type
+ // tag, so keeping both alive (for the purpose of gcmaps) is unappealing.
+ // Instead, we create a new virtual register.
+ defineReuseInput(lir, unbox, 0);
+}
+
+void
+LIRGeneratorARM::visitReturn(MReturn* ret)
+{
+ MDefinition* opd = ret->getOperand(0);
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new(alloc()) LReturn;
+ ins->setOperand(0, LUse(JSReturnReg_Type));
+ ins->setOperand(1, LUse(JSReturnReg_Data));
+ fillBoxUses(ins, 0, opd);
+ add(ins);
+}
+
+void
+LIRGeneratorARM::defineInt64Phi(MPhi* phi, size_t lirIndex)
+{
+ LPhi* low = current->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = current->getPhi(lirIndex + INT64HIGH_INDEX);
+
+ uint32_t lowVreg = getVirtualRegister();
+
+ phi->setVirtualRegister(lowVreg);
+
+ uint32_t highVreg = getVirtualRegister();
+ MOZ_ASSERT(lowVreg + INT64HIGH_INDEX == highVreg + INT64LOW_INDEX);
+
+ low->setDef(0, LDefinition(lowVreg, LDefinition::INT32));
+ high->setDef(0, LDefinition(highVreg, LDefinition::INT32));
+ annotate(high);
+ annotate(low);
+}
+
+void
+LIRGeneratorARM::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex)
+{
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* low = block->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = block->getPhi(lirIndex + INT64HIGH_INDEX);
+ low->setOperand(inputPosition, LUse(operand->virtualRegister() + INT64LOW_INDEX, LUse::ANY));
+ high->setOperand(inputPosition, LUse(operand->virtualRegister() + INT64HIGH_INDEX, LUse::ANY));
+}
+
+// x = !y
+void
+LIRGeneratorARM::lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, MDefinition* input)
+{
+ ins->setOperand(0, ins->snapshot() ? useRegister(input) : useRegisterAtStart(input));
+ define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+// z = x+y
+void
+LIRGeneratorARM::lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ // Some operations depend on checking inputs after writing the result, e.g.
+ // MulI, but only for bail out paths so useAtStart when no bailouts.
+ ins->setOperand(0, ins->snapshot() ? useRegister(lhs) : useRegisterAtStart(lhs));
+ ins->setOperand(1, ins->snapshot() ? useRegisterOrConstant(rhs) :
+ useRegisterOrConstantAtStart(rhs));
+ define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+void
+LIRGeneratorARM::lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void
+LIRGeneratorARM::lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ bool needsTemp = true;
+
+ if (rhs->isConstant()) {
+ int64_t constant = rhs->toConstant()->toInt64();
+ int32_t shift = mozilla::FloorLog2(constant);
+ // See special cases in CodeGeneratorARM::visitMulI64
+ if (constant >= -1 && constant <= 2)
+ needsTemp = false;
+ if (int64_t(1) << shift == constant)
+ needsTemp = false;
+ }
+
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
+ if (needsTemp)
+ ins->setTemp(0, temp());
+
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void
+LIRGeneratorARM::lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, MDefinition* input)
+{
+ ins->setOperand(0, useRegisterAtStart(input));
+ define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template<size_t Temps>
+void
+LIRGeneratorARM::lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setOperand(0, useRegisterAtStart(lhs));
+ ins->setOperand(1, useRegisterAtStart(rhs));
+ define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template void LIRGeneratorARM::lowerForFPU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorARM::lowerForFPU(LInstructionHelper<1, 2, 1>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+void
+LIRGeneratorARM::lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+ MDefinition* lhs, MDefinition* rhs)
+{
+ baab->setOperand(0, useRegisterAtStart(lhs));
+ baab->setOperand(1, useRegisterOrConstantAtStart(rhs));
+ add(baab, mir);
+}
+
+void
+LIRGeneratorARM::defineUntypedPhi(MPhi* phi, size_t lirIndex)
+{
+ LPhi* type = current->getPhi(lirIndex + VREG_TYPE_OFFSET);
+ LPhi* payload = current->getPhi(lirIndex + VREG_DATA_OFFSET);
+
+ uint32_t typeVreg = getVirtualRegister();
+ phi->setVirtualRegister(typeVreg);
+
+ uint32_t payloadVreg = getVirtualRegister();
+ MOZ_ASSERT(typeVreg + 1 == payloadVreg);
+
+ type->setDef(0, LDefinition(typeVreg, LDefinition::TYPE));
+ payload->setDef(0, LDefinition(payloadVreg, LDefinition::PAYLOAD));
+ annotate(type);
+ annotate(payload);
+}
+
+void
+LIRGeneratorARM::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex)
+{
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* type = block->getPhi(lirIndex + VREG_TYPE_OFFSET);
+ LPhi* payload = block->getPhi(lirIndex + VREG_DATA_OFFSET);
+ type->setOperand(inputPosition, LUse(operand->virtualRegister() + VREG_TYPE_OFFSET, LUse::ANY));
+ payload->setOperand(inputPosition, LUse(VirtualRegisterOfPayload(operand), LUse::ANY));
+}
+
+void
+LIRGeneratorARM::lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ define(ins, mir);
+}
+
+template<size_t Temps>
+void
+LIRGeneratorARM::lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ if (mir->isRotate() && !rhs->isConstant())
+ ins->setTemp(0, temp());
+
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setOperand(INT64_PIECES, useRegisterOrConstant(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+template void LIRGeneratorARM::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorARM::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 1>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+void
+LIRGeneratorARM::lowerDivI(MDiv* div)
+{
+ if (div->isUnsigned()) {
+ lowerUDiv(div);
+ return;
+ }
+
+ // Division instructions are slow. Division by constant denominators can be
+ // rewritten to use other instructions.
+ if (div->rhs()->isConstant()) {
+ int32_t rhs = div->rhs()->toConstant()->toInt32();
+ // Check for division by a positive power of two, which is an easy and
+ // important case to optimize. Note that other optimizations are also
+ // possible; division by negative powers of two can be optimized in a
+ // similar manner as positive powers of two, and division by other
+ // constants can be optimized by a reciprocal multiplication technique.
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LDivPowTwoI* lir = new(alloc()) LDivPowTwoI(useRegisterAtStart(div->lhs()), shift);
+ if (div->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ define(lir, div);
+ return;
+ }
+ }
+
+ if (HasIDIV()) {
+ LDivI* lir = new(alloc()) LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ if (div->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ define(lir, div);
+ return;
+ }
+
+ LSoftDivI* lir = new(alloc()) LSoftDivI(useFixedAtStart(div->lhs(), r0), useFixedAtStart(div->rhs(), r1),
+ tempFixed(r1), tempFixed(r2), tempFixed(r3));
+ if (div->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ defineFixed(lir, div, LAllocation(AnyRegister(r0)));
+}
+
+void
+LIRGeneratorARM::lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs)
+{
+ LMulI* lir = new(alloc()) LMulI;
+ if (mul->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ lowerForALU(lir, mul, lhs, rhs);
+}
+
+void
+LIRGeneratorARM::lowerModI(MMod* mod)
+{
+ if (mod->isUnsigned()) {
+ lowerUMod(mod);
+ return;
+ }
+
+ if (mod->rhs()->isConstant()) {
+ int32_t rhs = mod->rhs()->toConstant()->toInt32();
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LModPowTwoI* lir = new(alloc()) LModPowTwoI(useRegister(mod->lhs()), shift);
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ define(lir, mod);
+ return;
+ }
+ if (shift < 31 && (1 << (shift+1)) - 1 == rhs) {
+ MOZ_ASSERT(rhs);
+ LModMaskI* lir = new(alloc()) LModMaskI(useRegister(mod->lhs()), temp(), temp(), shift+1);
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ define(lir, mod);
+ return;
+ }
+ }
+
+ if (HasIDIV()) {
+ LModI* lir = new(alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()), temp());
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ define(lir, mod);
+ return;
+ }
+
+ LSoftModI* lir = new(alloc()) LSoftModI(useFixedAtStart(mod->lhs(), r0), useFixedAtStart(mod->rhs(), r1),
+ tempFixed(r0), tempFixed(r2), tempFixed(r3),
+ temp(LDefinition::GENERAL));
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ defineFixed(lir, mod, LAllocation(AnyRegister(r1)));
+}
+
+void
+LIRGeneratorARM::lowerDivI64(MDiv* div)
+{
+ if (div->isUnsigned()) {
+ lowerUDivI64(div);
+ return;
+ }
+
+ LDivOrModI64* lir = new(alloc()) LDivOrModI64(useInt64RegisterAtStart(div->lhs()),
+ useInt64RegisterAtStart(div->rhs()));
+ defineReturn(lir, div);
+}
+
+void
+LIRGeneratorARM::lowerModI64(MMod* mod)
+{
+ if (mod->isUnsigned()) {
+ lowerUModI64(mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new(alloc()) LDivOrModI64(useInt64RegisterAtStart(mod->lhs()),
+ useInt64RegisterAtStart(mod->rhs()));
+ defineReturn(lir, mod);
+}
+
+void
+LIRGeneratorARM::lowerUDivI64(MDiv* div)
+{
+ LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64RegisterAtStart(div->lhs()),
+ useInt64RegisterAtStart(div->rhs()));
+ defineReturn(lir, div);
+}
+
+void
+LIRGeneratorARM::lowerUModI64(MMod* mod)
+{
+ LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64RegisterAtStart(mod->lhs()),
+ useInt64RegisterAtStart(mod->rhs()));
+ defineReturn(lir, mod);
+}
+
+void
+LIRGeneratorARM::visitPowHalf(MPowHalf* ins)
+{
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Double);
+ LPowHalfD* lir = new(alloc()) LPowHalfD(useRegisterAtStart(input));
+ defineReuseInput(lir, ins, 0);
+}
+
+LTableSwitch*
+LIRGeneratorARM::newLTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ MTableSwitch* tableswitch)
+{
+ return new(alloc()) LTableSwitch(in, inputCopy, tableswitch);
+}
+
+LTableSwitchV*
+LIRGeneratorARM::newLTableSwitchV(MTableSwitch* tableswitch)
+{
+ return new(alloc()) LTableSwitchV(useBox(tableswitch->getOperand(0)),
+ temp(), tempDouble(), tableswitch);
+}
+
+void
+LIRGeneratorARM::visitGuardShape(MGuardShape* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ LDefinition tempObj = temp(LDefinition::OBJECT);
+ LGuardShape* guard = new(alloc()) LGuardShape(useRegister(ins->object()), tempObj);
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, ins->object());
+}
+
+void
+LIRGeneratorARM::visitGuardObjectGroup(MGuardObjectGroup* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ LDefinition tempObj = temp(LDefinition::OBJECT);
+ LGuardObjectGroup* guard = new(alloc()) LGuardObjectGroup(useRegister(ins->object()), tempObj);
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, ins->object());
+}
+
+void
+LIRGeneratorARM::lowerUrshD(MUrsh* mir)
+{
+ MDefinition* lhs = mir->lhs();
+ MDefinition* rhs = mir->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+
+ LUrshD* lir = new(alloc()) LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
+ define(lir, mir);
+}
+
+void
+LIRGeneratorARM::visitWasmSelect(MWasmSelect* ins)
+{
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new(alloc()) LWasmSelectI64(useInt64RegisterAtStart(ins->trueExpr()),
+ useInt64(ins->falseExpr()),
+ useRegister(ins->condExpr()));
+
+ defineInt64ReuseInput(lir, ins, LWasmSelectI64::TrueExprIndex);
+ return;
+ }
+
+ auto* lir = new(alloc()) LWasmSelect(useRegisterAtStart(ins->trueExpr()),
+ useRegister(ins->falseExpr()),
+ useRegister(ins->condExpr()));
+
+ defineReuseInput(lir, ins, LWasmSelect::TrueExprIndex);
+}
+
+void
+LIRGeneratorARM::visitAsmJSNeg(MAsmJSNeg* ins)
+{
+ if (ins->type() == MIRType::Int32) {
+ define(new(alloc()) LNegI(useRegisterAtStart(ins->input())), ins);
+ } else if (ins->type() == MIRType::Float32) {
+ define(new(alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
+ } else {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ define(new(alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
+ }
+}
+
+void
+LIRGeneratorARM::lowerUDiv(MDiv* div)
+{
+ MDefinition* lhs = div->getOperand(0);
+ MDefinition* rhs = div->getOperand(1);
+
+ if (HasIDIV()) {
+ LUDiv* lir = new(alloc()) LUDiv;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (div->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ define(lir, div);
+ } else {
+ LSoftUDivOrMod* lir = new(alloc()) LSoftUDivOrMod(useFixedAtStart(lhs, r0), useFixedAtStart(rhs, r1),
+ tempFixed(r1), tempFixed(r2), tempFixed(r3));
+ if (div->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ defineFixed(lir, div, LAllocation(AnyRegister(r0)));
+ }
+}
+
+void
+LIRGeneratorARM::lowerUMod(MMod* mod)
+{
+ MDefinition* lhs = mod->getOperand(0);
+ MDefinition* rhs = mod->getOperand(1);
+
+ if (HasIDIV()) {
+ LUMod* lir = new(alloc()) LUMod;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ define(lir, mod);
+ } else {
+ LSoftUDivOrMod* lir = new(alloc()) LSoftUDivOrMod(useFixedAtStart(lhs, r0), useFixedAtStart(rhs, r1),
+ tempFixed(r0), tempFixed(r2), tempFixed(r3));
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ defineFixed(lir, mod, LAllocation(AnyRegister(r1)));
+ }
+}
+
+void
+LIRGeneratorARM::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToDouble* lir = new(alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void
+LIRGeneratorARM::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToFloat32* lir = new(alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void
+LIRGeneratorARM::visitWasmLoad(MWasmLoad* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ LAllocation ptr = useRegisterAtStart(base);
+
+ if (ins->access().isUnaligned()) {
+ // Unaligned access expected! Revert to a byte load.
+ LDefinition ptrCopy = tempCopy(base, 0);
+
+ LDefinition noTemp = LDefinition::BogusTemp();
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new(alloc()) LWasmUnalignedLoadI64(ptr, ptrCopy, temp(), noTemp, noTemp);
+ defineInt64(lir, ins);
+ return;
+ }
+
+ LDefinition temp2 = noTemp;
+ LDefinition temp3 = noTemp;
+ if (IsFloatingPointType(ins->type())) {
+ // For putting the low value in a GPR.
+ temp2 = temp();
+ // For putting the high value in a GPR.
+ if (ins->type() == MIRType::Double)
+ temp3 = temp();
+ }
+
+ auto* lir = new(alloc()) LWasmUnalignedLoad(ptr, ptrCopy, temp(), temp2, temp3);
+ define(lir, ins);
+ return;
+ }
+
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new(alloc()) LWasmLoadI64(ptr);
+ if (ins->access().offset() || ins->access().type() == Scalar::Int64)
+ lir->setTemp(0, tempCopy(base, 0));
+ defineInt64(lir, ins);
+ return;
+ }
+
+ auto* lir = new(alloc()) LWasmLoad(ptr);
+ if (ins->access().offset())
+ lir->setTemp(0, tempCopy(base, 0));
+
+ define(lir, ins);
+}
+
+void
+LIRGeneratorARM::visitWasmStore(MWasmStore* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ LAllocation ptr = useRegisterAtStart(base);
+
+ if (ins->access().isUnaligned()) {
+ // Unaligned access expected! Revert to a byte store.
+ LDefinition ptrCopy = tempCopy(base, 0);
+
+ MIRType valueType = ins->value()->type();
+ if (valueType == MIRType::Int64) {
+ LInt64Allocation value = useInt64RegisterAtStart(ins->value());
+ auto* lir = new(alloc()) LWasmUnalignedStoreI64(ptr, value, ptrCopy, temp());
+ add(lir, ins);
+ return;
+ }
+
+ LAllocation value = useRegisterAtStart(ins->value());
+ LDefinition valueHelper = IsFloatingPointType(valueType)
+ ? temp() // to do a FPU -> GPR move.
+ : tempCopy(base, 1); // to clobber the value.
+
+ auto* lir = new(alloc()) LWasmUnalignedStore(ptr, value, ptrCopy, valueHelper);
+ add(lir, ins);
+ return;
+ }
+
+ if (ins->value()->type() == MIRType::Int64) {
+ LInt64Allocation value = useInt64RegisterAtStart(ins->value());
+ auto* lir = new(alloc()) LWasmStoreI64(ptr, value);
+ if (ins->access().offset() || ins->access().type() == Scalar::Int64)
+ lir->setTemp(0, tempCopy(base, 0));
+ add(lir, ins);
+ return;
+ }
+
+ LAllocation value = useRegisterAtStart(ins->value());
+ auto* lir = new(alloc()) LWasmStore(ptr, value);
+
+ if (ins->access().offset())
+ lir->setTemp(0, tempCopy(base, 0));
+
+ add(lir, ins);
+}
+
+void
+LIRGeneratorARM::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
+{
+ MOZ_ASSERT(ins->offset() == 0);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ // For the ARM it is best to keep the 'base' in a register if a bounds check is needed.
+ LAllocation baseAlloc;
+ if (base->isConstant() && !ins->needsBoundsCheck()) {
+ // A bounds check is only skipped for a positive index.
+ MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
+ baseAlloc = LAllocation(base->toConstant());
+ } else {
+ baseAlloc = useRegisterAtStart(base);
+ }
+
+ define(new(alloc()) LAsmJSLoadHeap(baseAlloc), ins);
+}
+
+void
+LIRGeneratorARM::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
+{
+ MOZ_ASSERT(ins->offset() == 0);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+ LAllocation baseAlloc;
+
+ if (base->isConstant() && !ins->needsBoundsCheck()) {
+ MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
+ baseAlloc = LAllocation(base->toConstant());
+ } else {
+ baseAlloc = useRegisterAtStart(base);
+ }
+
+ add(new(alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value())), ins);
+}
+
+void
+LIRGeneratorARM::lowerTruncateDToInt32(MTruncateToInt32* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double);
+
+ define(new(alloc()) LTruncateDToInt32(useRegister(opd), LDefinition::BogusTemp()), ins);
+}
+
+void
+LIRGeneratorARM::lowerTruncateFToInt32(MTruncateToInt32* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Float32);
+
+ define(new(alloc()) LTruncateFToInt32(useRegister(opd), LDefinition::BogusTemp()), ins);
+}
+
+void
+LIRGeneratorARM::visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+LIRGeneratorARM::visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins)
+{
+ MOZ_ASSERT(HasLDSTREXBHD());
+ MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrConstant(ins->index());
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+
+ const LAllocation value = useRegister(ins->value());
+ LDefinition tempDef = LDefinition::BogusTemp();
+ if (ins->arrayType() == Scalar::Uint32) {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ tempDef = temp();
+ }
+
+ LAtomicExchangeTypedArrayElement* lir =
+ new(alloc()) LAtomicExchangeTypedArrayElement(elements, index, value, tempDef);
+
+ define(lir, ins);
+}
+
+void
+LIRGeneratorARM::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins)
+{
+ MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrConstant(ins->index());
+ const LAllocation value = useRegister(ins->value());
+
+ if (!ins->hasUses()) {
+ LAtomicTypedArrayElementBinopForEffect* lir =
+ new(alloc()) LAtomicTypedArrayElementBinopForEffect(elements, index, value,
+ /* flagTemp= */ temp());
+ add(lir, ins);
+ return;
+ }
+
+ // For a Uint32Array with a known double result we need a temp for
+ // the intermediate output.
+ //
+ // Optimization opportunity (bug 1077317): We can do better by
+ // allowing 'value' to remain as an imm32 if it is small enough to
+ // fit in an instruction.
+
+ LDefinition flagTemp = temp();
+ LDefinition outTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
+ outTemp = temp();
+
+ // On arm, map flagTemp to temp1 and outTemp to temp2, at least for now.
+
+ LAtomicTypedArrayElementBinop* lir =
+ new(alloc()) LAtomicTypedArrayElementBinop(elements, index, value, flagTemp, outTemp);
+ define(lir, ins);
+}
+
+void
+LIRGeneratorARM::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins)
+{
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrConstant(ins->index());
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+ //
+ // Optimization opportunity (bug 1077317): We could do better by
+ // allowing oldval to remain an immediate, if it is small enough
+ // to fit in an instruction.
+
+ const LAllocation newval = useRegister(ins->newval());
+ const LAllocation oldval = useRegister(ins->oldval());
+ LDefinition tempDef = LDefinition::BogusTemp();
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
+ tempDef = temp();
+
+ LCompareExchangeTypedArrayElement* lir =
+ new(alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval, newval, tempDef);
+
+ define(lir, ins);
+}
+
+void
+LIRGeneratorARM::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
+{
+ MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+ MOZ_ASSERT(ins->access().offset() == 0);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ if (byteSize(ins->access().type()) != 4 && !HasLDSTREXBHD()) {
+ LAsmJSCompareExchangeCallout* lir =
+ new(alloc()) LAsmJSCompareExchangeCallout(useRegisterAtStart(base),
+ useRegisterAtStart(ins->oldValue()),
+ useRegisterAtStart(ins->newValue()),
+ useFixed(ins->tls(), WasmTlsReg),
+ temp(), temp());
+ defineReturn(lir, ins);
+ return;
+ }
+
+ LAsmJSCompareExchangeHeap* lir =
+ new(alloc()) LAsmJSCompareExchangeHeap(useRegister(base),
+ useRegister(ins->oldValue()),
+ useRegister(ins->newValue()));
+
+ define(lir, ins);
+}
+
+void
+LIRGeneratorARM::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
+{
+ MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+ MOZ_ASSERT(ins->access().offset() == 0);
+
+ const LAllocation base = useRegisterAtStart(ins->base());
+ const LAllocation value = useRegisterAtStart(ins->value());
+
+ if (byteSize(ins->access().type()) < 4 && !HasLDSTREXBHD()) {
+ // Call out on ARMv6.
+ defineReturn(new(alloc()) LAsmJSAtomicExchangeCallout(base, value,
+ useFixed(ins->tls(), WasmTlsReg),
+ temp(), temp()), ins);
+ return;
+ }
+
+ define(new(alloc()) LAsmJSAtomicExchangeHeap(base, value), ins);
+}
+
+void
+LIRGeneratorARM::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
+{
+ MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+ MOZ_ASSERT(ins->access().offset() == 0);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ if (byteSize(ins->access().type()) != 4 && !HasLDSTREXBHD()) {
+ LAsmJSAtomicBinopCallout* lir =
+ new(alloc()) LAsmJSAtomicBinopCallout(useRegisterAtStart(base),
+ useRegisterAtStart(ins->value()),
+ useFixed(ins->tls(), WasmTlsReg),
+ temp(), temp());
+ defineReturn(lir, ins);
+ return;
+ }
+
+ if (!ins->hasUses()) {
+ LAsmJSAtomicBinopHeapForEffect* lir =
+ new(alloc()) LAsmJSAtomicBinopHeapForEffect(useRegister(base),
+ useRegister(ins->value()),
+ /* flagTemp= */ temp());
+ add(lir, ins);
+ return;
+ }
+
+ LAsmJSAtomicBinopHeap* lir =
+ new(alloc()) LAsmJSAtomicBinopHeap(useRegister(base),
+ useRegister(ins->value()),
+ /* temp = */ LDefinition::BogusTemp(),
+ /* flagTemp= */ temp());
+ define(lir, ins);
+}
+
+void
+LIRGeneratorARM::visitSubstr(MSubstr* ins)
+{
+ LSubstr* lir = new (alloc()) LSubstr(useRegister(ins->string()),
+ useRegister(ins->begin()),
+ useRegister(ins->length()),
+ temp(),
+ temp(),
+ tempByteOpRegister());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGeneratorARM::visitRandom(MRandom* ins)
+{
+ LRandom *lir = new(alloc()) LRandom(temp(),
+ temp(),
+ temp(),
+ temp(),
+ temp());
+ defineFixed(lir, ins, LFloatReg(ReturnDoubleReg));
+}
+
+void
+LIRGeneratorARM::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ defineReturn(new(alloc()) LWasmTruncateToInt64(useRegisterAtStart(opd)), ins);
+}
+
+void
+LIRGeneratorARM::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins)
+{
+ MOZ_ASSERT(ins->type() == MIRType::Double || ins->type() == MIRType::Float32);
+
+ auto lir = new(alloc()) LInt64ToFloatingPointCall();
+ lir->setInt64Operand(0, useInt64RegisterAtStart(ins->input()));
+ defineReturn(lir, ins);
+}
+
+void
+LIRGeneratorARM::visitCopySign(MCopySign* ins)
+{
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(IsFloatingPointType(lhs->type()));
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(lhs->type() == ins->type());
+
+ LInstructionHelper<1, 2, 2>* lir;
+ if (lhs->type() == MIRType::Double)
+ lir = new(alloc()) LCopySignD();
+ else
+ lir = new(alloc()) LCopySignF();
+
+ lir->setTemp(0, temp());
+ lir->setTemp(1, temp());
+
+ lowerForFPU(lir, ins, lhs, rhs);
+}
+
+void
+LIRGeneratorARM::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins)
+{
+ auto* lir = new(alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input()));
+ defineInt64(lir, ins);
+
+ LDefinition def(LDefinition::GENERAL, LDefinition::MUST_REUSE_INPUT);
+ def.setReusedInput(0);
+ def.setVirtualRegister(ins->virtualRegister());
+
+ lir->setDef(0, def);
+}
diff --git a/js/src/jit/arm/Lowering-arm.h b/js/src/jit/arm/Lowering-arm.h
new file mode 100644
index 000000000..d66ef8a22
--- /dev/null
+++ b/js/src/jit/arm/Lowering-arm.h
@@ -0,0 +1,132 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_Lowering_arm_h
+#define jit_arm_Lowering_arm_h
+
+#include "jit/shared/Lowering-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorARM : public LIRGeneratorShared
+{
+ public:
+ LIRGeneratorARM(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorShared(gen, graph, lirGraph)
+ { }
+
+ protected:
+ // Returns a box allocation with type set to reg1 and payload set to reg2.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2,
+ bool useAtStart = false);
+
+ // x86 has constraints on what registers can be formatted for 1-byte
+ // stores and loads; on ARM all registers are okay.
+ LAllocation useByteOpRegister(MDefinition* mir);
+ LAllocation useByteOpRegisterAtStart(MDefinition* mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
+ LDefinition tempByteOpRegister();
+
+ inline LDefinition tempToUnbox() {
+ return LDefinition::BogusTemp();
+ }
+
+ bool needTempForPostBarrier() { return false; }
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
+ void defineUntypedPhi(MPhi* phi, size_t lirIndex);
+ void lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
+ void defineInt64Phi(MPhi* phi, size_t lirIndex);
+
+ void lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs);
+ void lowerUrshD(MUrsh* mir);
+
+ void lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* input);
+ void lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs);
+ template<size_t Temps>
+ void lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* src);
+ template<size_t Temps>
+ void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir,
+ MDefinition* lhs, MDefinition* rhs)
+ {
+ return lowerForFPU(ins, mir, lhs, rhs);
+ }
+ void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir,
+ MDefinition* lhs, MDefinition* rhs)
+ {
+ return lowerForFPU(ins, mir, lhs, rhs);
+ }
+
+ void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerTruncateDToInt32(MTruncateToInt32* ins);
+ void lowerTruncateFToInt32(MTruncateToInt32* ins);
+ void lowerDivI(MDiv* div);
+ void lowerModI(MMod* mod);
+ void lowerDivI64(MDiv* div);
+ void lowerModI64(MMod* mod);
+ void lowerUDivI64(MDiv* div);
+ void lowerUModI64(MMod* mod);
+ void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
+ void lowerUDiv(MDiv* div);
+ void lowerUMod(MMod* mod);
+ void visitPowHalf(MPowHalf* ins);
+ void visitAsmJSNeg(MAsmJSNeg* ins);
+
+ LTableSwitch* newLTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ MTableSwitch* ins);
+ LTableSwitchV* newLTableSwitchV(MTableSwitch* ins);
+
+ public:
+ void visitBox(MBox* box);
+ void visitUnbox(MUnbox* unbox);
+ void visitReturn(MReturn* ret);
+ void lowerPhi(MPhi* phi);
+ void visitGuardShape(MGuardShape* ins);
+ void visitGuardObjectGroup(MGuardObjectGroup* ins);
+ void visitWasmSelect(MWasmSelect* ins);
+ void visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins);
+ void visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins);
+ void visitWasmLoad(MWasmLoad* ins);
+ void visitWasmStore(MWasmStore* ins);
+ void visitAsmJSLoadHeap(MAsmJSLoadHeap* ins);
+ void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins);
+ void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins);
+ void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins);
+ void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins);
+ void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins);
+ void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins);
+ void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins);
+ void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins);
+ void visitSubstr(MSubstr* ins);
+ void visitRandom(MRandom* ins);
+ void visitWasmTruncateToInt64(MWasmTruncateToInt64* ins);
+ void visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins);
+ void visitCopySign(MCopySign* ins);
+ void visitExtendInt32ToInt64(MExtendInt32ToInt64* ins);
+};
+
+typedef LIRGeneratorARM LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_Lowering_arm_h */
diff --git a/js/src/jit/arm/MacroAssembler-arm-inl.h b/js/src/jit/arm/MacroAssembler-arm-inl.h
new file mode 100644
index 000000000..1b71dfb7f
--- /dev/null
+++ b/js/src/jit/arm/MacroAssembler-arm-inl.h
@@ -0,0 +1,2143 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_MacroAssembler_arm_inl_h
+#define jit_arm_MacroAssembler_arm_inl_h
+
+#include "jit/arm/MacroAssembler-arm.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void
+MacroAssembler::move64(Register64 src, Register64 dest)
+{
+ move32(src.low, dest.low);
+ move32(src.high, dest.high);
+}
+
+void
+MacroAssembler::move64(Imm64 imm, Register64 dest)
+{
+ move32(Imm32(imm.value & 0xFFFFFFFFL), dest.low);
+ move32(Imm32((imm.value >> 32) & 0xFFFFFFFFL), dest.high);
+}
+
+void
+MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest)
+{
+ ma_vxfer(src, dest);
+}
+
+void
+MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest)
+{
+ ma_vxfer(src, dest);
+}
+
+void
+MacroAssembler::move8SignExtend(Register src, Register dest)
+{
+ as_sxtb(dest, src, 0);
+}
+
+void
+MacroAssembler::move16SignExtend(Register src, Register dest)
+{
+ as_sxth(dest, src, 0);
+}
+
+// ===============================================================
+// Logical instructions
+
+void
+MacroAssembler::not32(Register reg)
+{
+ ma_mvn(reg, reg);
+}
+
+void
+MacroAssembler::and32(Register src, Register dest)
+{
+ ma_and(src, dest, SetCC);
+}
+
+void
+MacroAssembler::and32(Imm32 imm, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+ ma_and(imm, dest, scratch, SetCC);
+}
+
+void
+MacroAssembler::and32(Imm32 imm, const Address& dest)
+{
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(dest, scratch, scratch2);
+ ma_and(imm, scratch, scratch2);
+ ma_str(scratch, dest, scratch2);
+}
+
+void
+MacroAssembler::and32(const Address& src, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(src, scratch, scratch2);
+ ma_and(scratch, dest, SetCC);
+}
+
+void
+MacroAssembler::andPtr(Register src, Register dest)
+{
+ ma_and(src, dest);
+}
+
+void
+MacroAssembler::andPtr(Imm32 imm, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+ ma_and(imm, dest, scratch);
+}
+
+void
+MacroAssembler::and64(Imm64 imm, Register64 dest)
+{
+ if (imm.low().value != int32_t(0xFFFFFFFF))
+ and32(imm.low(), dest.low);
+ if (imm.hi().value != int32_t(0xFFFFFFFF))
+ and32(imm.hi(), dest.high);
+}
+
+void
+MacroAssembler::or64(Imm64 imm, Register64 dest)
+{
+ if (imm.low().value)
+ or32(imm.low(), dest.low);
+ if (imm.hi().value)
+ or32(imm.hi(), dest.high);
+}
+
+void
+MacroAssembler::xor64(Imm64 imm, Register64 dest)
+{
+ if (imm.low().value)
+ xor32(imm.low(), dest.low);
+ if (imm.hi().value)
+ xor32(imm.hi(), dest.high);
+}
+
+void
+MacroAssembler::or32(Register src, Register dest)
+{
+ ma_orr(src, dest);
+}
+
+void
+MacroAssembler::or32(Imm32 imm, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+ ma_orr(imm, dest, scratch);
+}
+
+void
+MacroAssembler::or32(Imm32 imm, const Address& dest)
+{
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(dest, scratch, scratch2);
+ ma_orr(imm, scratch, scratch2);
+ ma_str(scratch, dest, scratch2);
+}
+
+void
+MacroAssembler::orPtr(Register src, Register dest)
+{
+ ma_orr(src, dest);
+}
+
+void
+MacroAssembler::orPtr(Imm32 imm, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+ ma_orr(imm, dest, scratch);
+}
+
+void
+MacroAssembler::and64(Register64 src, Register64 dest)
+{
+ and32(src.low, dest.low);
+ and32(src.high, dest.high);
+}
+
+void
+MacroAssembler::or64(Register64 src, Register64 dest)
+{
+ or32(src.low, dest.low);
+ or32(src.high, dest.high);
+}
+
+void
+MacroAssembler::xor64(Register64 src, Register64 dest)
+{
+ ma_eor(src.low, dest.low);
+ ma_eor(src.high, dest.high);
+}
+
+void
+MacroAssembler::xor32(Register src, Register dest)
+{
+ ma_eor(src, dest, SetCC);
+}
+
+void
+MacroAssembler::xor32(Imm32 imm, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+ ma_eor(imm, dest, scratch, SetCC);
+}
+
+void
+MacroAssembler::xorPtr(Register src, Register dest)
+{
+ ma_eor(src, dest);
+}
+
+void
+MacroAssembler::xorPtr(Imm32 imm, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+ ma_eor(imm, dest, scratch);
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void
+MacroAssembler::add32(Register src, Register dest)
+{
+ ma_add(src, dest, SetCC);
+}
+
+void
+MacroAssembler::add32(Imm32 imm, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+ ma_add(imm, dest, scratch, SetCC);
+}
+
+void
+MacroAssembler::add32(Imm32 imm, const Address& dest)
+{
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(dest, scratch, scratch2);
+ ma_add(imm, scratch, scratch2, SetCC);
+ ma_str(scratch, dest, scratch2);
+}
+
+void
+MacroAssembler::addPtr(Register src, Register dest)
+{
+ ma_add(src, dest);
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+ ma_add(imm, dest, scratch);
+}
+
+void
+MacroAssembler::addPtr(ImmWord imm, Register dest)
+{
+ addPtr(Imm32(imm.value), dest);
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, const Address& dest)
+{
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(dest, scratch, scratch2);
+ ma_add(imm, scratch, scratch2);
+ ma_str(scratch, dest, scratch2);
+}
+
+void
+MacroAssembler::addPtr(const Address& src, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(src, scratch, scratch2);
+ ma_add(scratch, dest, SetCC);
+}
+
+void
+MacroAssembler::add64(Register64 src, Register64 dest)
+{
+ ma_add(src.low, dest.low, SetCC);
+ ma_adc(src.high, dest.high);
+}
+
+void
+MacroAssembler::add64(Imm32 imm, Register64 dest)
+{
+ ScratchRegisterScope scratch(*this);
+ ma_add(imm, dest.low, scratch, SetCC);
+ as_adc(dest.high, dest.high, Imm8(0), LeaveCC);
+}
+
+void
+MacroAssembler::add64(Imm64 imm, Register64 dest)
+{
+ ScratchRegisterScope scratch(*this);
+ ma_add(imm.low(), dest.low, scratch, SetCC);
+ ma_adc(imm.hi(), dest.high, scratch, LeaveCC);
+}
+
+void
+MacroAssembler::addDouble(FloatRegister src, FloatRegister dest)
+{
+ ma_vadd(dest, src, dest);
+}
+
+void
+MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest)
+{
+ ma_vadd_f32(dest, src, dest);
+}
+
+void
+MacroAssembler::sub32(Register src, Register dest)
+{
+ ma_sub(src, dest, SetCC);
+}
+
+void
+MacroAssembler::sub32(Imm32 imm, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+ ma_sub(imm, dest, scratch, SetCC);
+}
+
+void
+MacroAssembler::sub32(const Address& src, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(src, scratch, scratch2);
+ ma_sub(scratch, dest, SetCC);
+}
+
+void
+MacroAssembler::subPtr(Register src, Register dest)
+{
+ ma_sub(src, dest);
+}
+
+void
+MacroAssembler::subPtr(Register src, const Address& dest)
+{
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(dest, scratch, scratch2);
+ ma_sub(src, scratch);
+ ma_str(scratch, dest, scratch2);
+}
+
+void
+MacroAssembler::subPtr(Imm32 imm, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+ ma_sub(imm, dest, scratch);
+}
+
+void
+MacroAssembler::subPtr(const Address& addr, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(addr, scratch, scratch2);
+ ma_sub(scratch, dest);
+}
+
+void
+MacroAssembler::sub64(Register64 src, Register64 dest)
+{
+ ma_sub(src.low, dest.low, SetCC);
+ ma_sbc(src.high, dest.high, LeaveCC);
+}
+
+void
+MacroAssembler::sub64(Imm64 imm, Register64 dest)
+{
+ ScratchRegisterScope scratch(*this);
+ ma_sub(imm.low(), dest.low, scratch, SetCC);
+ ma_sbc(imm.hi(), dest.high, scratch, LeaveCC);
+}
+
+void
+MacroAssembler::subDouble(FloatRegister src, FloatRegister dest)
+{
+ ma_vsub(dest, src, dest);
+}
+
+void
+MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest)
+{
+ ma_vsub_f32(dest, src, dest);
+}
+
+void
+MacroAssembler::mul32(Register rhs, Register srcDest)
+{
+ as_mul(srcDest, srcDest, rhs);
+}
+
+void
+MacroAssembler::mul64(Imm64 imm, const Register64& dest)
+{
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ // HIGH(dest) = LOW(HIGH(dest) * LOW(imm));
+ ma_mov(Imm32(imm.value & 0xFFFFFFFFL), scratch);
+ as_mul(dest.high, dest.high, scratch);
+
+ // high:low = LOW(dest) * LOW(imm);
+ as_umull(scratch2, scratch, dest.low, scratch);
+
+ // HIGH(dest) += high;
+ as_add(dest.high, dest.high, O2Reg(scratch2));
+
+ // HIGH(dest) += LOW(LOW(dest) * HIGH(imm));
+ if (((imm.value >> 32) & 0xFFFFFFFFL) == 5)
+ as_add(scratch2, dest.low, lsl(dest.low, 2));
+ else
+ MOZ_CRASH("Not supported imm");
+ as_add(dest.high, dest.high, O2Reg(scratch2));
+
+ // LOW(dest) = low;
+ ma_mov(scratch, dest.low);
+}
+
+void
+MacroAssembler::mul64(Imm64 imm, const Register64& dest, const Register temp)
+{
+ // LOW32 = LOW(LOW(dest) * LOW(src)); (1)
+ // HIGH32 = LOW(HIGH(dest) * LOW(src)) [multiply src into upper bits] (2)
+ // + LOW(LOW(dest) * HIGH(src)) [multiply dest into upper bits] (3)
+ // + HIGH(LOW(dest) * LOW(src)) [carry] (4)
+
+ MOZ_ASSERT(temp != dest.high && temp != dest.low);
+
+ // Compute mul64
+ ScratchRegisterScope scratch(*this);
+ ma_mul(dest.high, imm.low(), dest.high, scratch); // (2)
+ ma_mul(dest.low, imm.hi(), temp, scratch); // (3)
+ ma_add(dest.high, temp, temp);
+ ma_umull(dest.low, imm.low(), dest.high, dest.low, scratch); // (4) + (1)
+ ma_add(temp, dest.high, dest.high);
+}
+
+void
+MacroAssembler::mul64(const Register64& src, const Register64& dest, const Register temp)
+{
+ // LOW32 = LOW(LOW(dest) * LOW(src)); (1)
+ // HIGH32 = LOW(HIGH(dest) * LOW(src)) [multiply src into upper bits] (2)
+ // + LOW(LOW(dest) * HIGH(src)) [multiply dest into upper bits] (3)
+ // + HIGH(LOW(dest) * LOW(src)) [carry] (4)
+
+ MOZ_ASSERT(dest != src);
+ MOZ_ASSERT(dest.low != src.high && dest.high != src.low);
+
+ // Compute mul64
+ ma_mul(dest.high, src.low, dest.high); // (2)
+ ma_mul(src.high, dest.low, temp); // (3)
+ ma_add(dest.high, temp, temp);
+ ma_umull(dest.low, src.low, dest.high, dest.low); // (4) + (1)
+ ma_add(temp, dest.high, dest.high);
+}
+
+void
+MacroAssembler::mulBy3(Register src, Register dest)
+{
+ as_add(dest, src, lsl(src, 1));
+}
+
+void
+MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest)
+{
+ ma_vmul_f32(dest, src, dest);
+}
+
+void
+MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest)
+{
+ ma_vmul(dest, src, dest);
+}
+
+void
+MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest)
+{
+ ScratchRegisterScope scratch(*this);
+ ScratchDoubleScope scratchDouble(*this);
+
+ movePtr(imm, scratch);
+ ma_vldr(Operand(Address(scratch, 0)).toVFPAddr(), scratchDouble);
+ mulDouble(scratchDouble, dest);
+}
+
+void
+MacroAssembler::quotient32(Register rhs, Register srcDest, bool isUnsigned)
+{
+ MOZ_ASSERT(HasIDIV());
+ if (isUnsigned)
+ ma_udiv(srcDest, rhs, srcDest);
+ else
+ ma_sdiv(srcDest, rhs, srcDest);
+}
+
+void
+MacroAssembler::remainder32(Register rhs, Register srcDest, bool isUnsigned)
+{
+ MOZ_ASSERT(HasIDIV());
+
+ ScratchRegisterScope scratch(*this);
+ if (isUnsigned)
+ ma_umod(srcDest, rhs, srcDest, scratch);
+ else
+ ma_smod(srcDest, rhs, srcDest, scratch);
+}
+
+void
+MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest)
+{
+ ma_vdiv_f32(dest, src, dest);
+}
+
+void
+MacroAssembler::divDouble(FloatRegister src, FloatRegister dest)
+{
+ ma_vdiv(dest, src, dest);
+}
+
+void
+MacroAssembler::inc64(AbsoluteAddress dest)
+{
+ ScratchRegisterScope scratch(*this);
+
+ ma_strd(r0, r1, EDtrAddr(sp, EDtrOffImm(-8)), PreIndex);
+
+ ma_mov(Imm32((int32_t)dest.addr), scratch);
+ ma_ldrd(EDtrAddr(scratch, EDtrOffImm(0)), r0, r1);
+
+ as_add(r0, r0, Imm8(1), SetCC);
+ as_adc(r1, r1, Imm8(0), LeaveCC);
+
+ ma_strd(r0, r1, EDtrAddr(scratch, EDtrOffImm(0)));
+ ma_ldrd(EDtrAddr(sp, EDtrOffImm(8)), r0, r1, PostIndex);
+}
+
+void
+MacroAssembler::neg32(Register reg)
+{
+ ma_neg(reg, reg, SetCC);
+}
+
+void
+MacroAssembler::neg64(Register64 reg)
+{
+ as_rsb(reg.low, reg.low, Imm8(0), SetCC);
+ as_rsc(reg.high, reg.high, Imm8(0));
+}
+
+void
+MacroAssembler::negateDouble(FloatRegister reg)
+{
+ ma_vneg(reg, reg);
+}
+
+void
+MacroAssembler::negateFloat(FloatRegister reg)
+{
+ ma_vneg_f32(reg, reg);
+}
+
+void
+MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest)
+{
+ if (src != dest)
+ ma_vmov_f32(src, dest);
+ ma_vabs_f32(dest, dest);
+}
+
+void
+MacroAssembler::absDouble(FloatRegister src, FloatRegister dest)
+{
+ if (src != dest)
+ ma_vmov(src, dest);
+ ma_vabs(dest, dest);
+}
+
+void
+MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest)
+{
+ ma_vsqrt_f32(src, dest);
+}
+
+void
+MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest)
+{
+ ma_vsqrt(src, dest);
+}
+
+void
+MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ minMaxFloat32(srcDest, other, handleNaN, false);
+}
+
+void
+MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ minMaxDouble(srcDest, other, handleNaN, false);
+}
+
+void
+MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ minMaxFloat32(srcDest, other, handleNaN, true);
+}
+
+void
+MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ minMaxDouble(srcDest, other, handleNaN, true);
+}
+
+// ===============================================================
+// Shift functions
+
+void
+MacroAssembler::lshiftPtr(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ ma_lsl(imm, dest, dest);
+}
+
+void
+MacroAssembler::lshift64(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ if (imm.value == 0) {
+ return;
+ } else if (imm.value < 32) {
+ as_mov(dest.high, lsl(dest.high, imm.value));
+ as_orr(dest.high, dest.high, lsr(dest.low, 32 - imm.value));
+ as_mov(dest.low, lsl(dest.low, imm.value));
+ } else {
+ as_mov(dest.high, lsl(dest.low, imm.value - 32));
+ ma_mov(Imm32(0), dest.low);
+ }
+}
+
+void
+MacroAssembler::lshift64(Register unmaskedShift, Register64 dest)
+{
+ // dest.high = dest.high << shift | dest.low << shift - 32 | dest.low >> 32 - shift
+ // Note: one of the two dest.low shift will always yield zero due to negative shift.
+
+ ScratchRegisterScope shift(*this);
+ as_and(shift, unmaskedShift, Imm8(0x3f));
+ as_mov(dest.high, lsl(dest.high, shift));
+ as_sub(shift, shift, Imm8(32));
+ as_orr(dest.high, dest.high, lsl(dest.low, shift));
+ ma_neg(shift, shift);
+ as_orr(dest.high, dest.high, lsr(dest.low, shift));
+ as_and(shift, unmaskedShift, Imm8(0x3f));
+ as_mov(dest.low, lsl(dest.low, shift));
+}
+
+void
+MacroAssembler::lshift32(Register src, Register dest)
+{
+ ma_lsl(src, dest, dest);
+}
+
+void
+MacroAssembler::lshift32(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ lshiftPtr(imm, dest);
+}
+
+void
+MacroAssembler::rshiftPtr(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ ma_lsr(imm, dest, dest);
+}
+
+void
+MacroAssembler::rshift32(Register src, Register dest)
+{
+ ma_lsr(src, dest, dest);
+}
+
+void
+MacroAssembler::rshift32(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ rshiftPtr(imm, dest);
+}
+
+void
+MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ ma_asr(imm, dest, dest);
+}
+
+void
+MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+
+ if (imm.value < 32) {
+ as_mov(dest.low, lsr(dest.low, imm.value));
+ as_orr(dest.low, dest.low, lsl(dest.high, 32 - imm.value));
+ as_mov(dest.high, asr(dest.high, imm.value));
+ } else if (imm.value == 32) {
+ as_mov(dest.low, O2Reg(dest.high));
+ as_mov(dest.high, asr(dest.high, 31));
+ } else {
+ as_mov(dest.low, asr(dest.high, imm.value - 32));
+ as_mov(dest.high, asr(dest.high, 31));
+ }
+}
+
+void
+MacroAssembler::rshift64Arithmetic(Register unmaskedShift, Register64 dest)
+{
+ Label proceed;
+
+ // dest.low = dest.low >>> shift | dest.high <<< 32 - shift
+ // if (shift - 32 >= 0)
+ // dest.low |= dest.high >>> shift - 32
+ // Note: Negative shifts yield a zero as result, except for the signed
+ // right shift. Therefore we need to test for it and only do it if
+ // it isn't negative.
+ ScratchRegisterScope shift(*this);
+
+ as_and(shift, unmaskedShift, Imm8(0x3f));
+ as_mov(dest.low, lsr(dest.low, shift));
+ as_rsb(shift, shift, Imm8(32));
+ as_orr(dest.low, dest.low, lsl(dest.high, shift));
+ ma_neg(shift, shift, SetCC);
+ ma_b(&proceed, Signed);
+
+ as_orr(dest.low, dest.low, asr(dest.high, shift));
+
+ bind(&proceed);
+ as_and(shift, unmaskedShift, Imm8(0x3f));
+ as_mov(dest.high, asr(dest.high, shift));
+}
+
+void
+MacroAssembler::rshift32Arithmetic(Register src, Register dest)
+{
+ ma_asr(src, dest, dest);
+}
+
+void
+MacroAssembler::rshift32Arithmetic(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ rshiftPtrArithmetic(imm, dest);
+}
+
+void
+MacroAssembler::rshift64(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ if (imm.value < 32) {
+ as_mov(dest.low, lsr(dest.low, imm.value));
+ as_orr(dest.low, dest.low, lsl(dest.high, 32 - imm.value));
+ as_mov(dest.high, lsr(dest.high, imm.value));
+ } else if (imm.value == 32) {
+ ma_mov(dest.high, dest.low);
+ ma_mov(Imm32(0), dest.high);
+ } else {
+ ma_lsr(Imm32(imm.value - 32), dest.high, dest.low);
+ ma_mov(Imm32(0), dest.high);
+ }
+}
+
+void
+MacroAssembler::rshift64(Register unmaskedShift, Register64 dest)
+{
+ // dest.low = dest.low >> shift | dest.high >> shift - 32 | dest.high << 32 - shift
+ // Note: one of the two dest.high shifts will always yield zero due to negative shift.
+
+ ScratchRegisterScope shift(*this);
+ as_and(shift, unmaskedShift, Imm8(0x3f));
+ as_mov(dest.low, lsr(dest.low, shift));
+ as_sub(shift, shift, Imm8(32));
+ as_orr(dest.low, dest.low, lsr(dest.high, shift));
+ ma_neg(shift, shift);
+ as_orr(dest.low, dest.low, lsl(dest.high, shift));
+ as_and(shift, unmaskedShift, Imm8(0x3f));
+ as_mov(dest.high, lsr(dest.high, shift));
+}
+
+// ===============================================================
+// Rotate functions
+void
+MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest)
+{
+ if (count.value)
+ ma_rol(count, input, dest);
+ else
+ ma_mov(input, dest);
+}
+
+void
+MacroAssembler::rotateLeft(Register count, Register input, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+ ma_rol(count, input, dest, scratch);
+}
+
+void
+MacroAssembler::rotateLeft64(Imm32 count, Register64 input, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ MOZ_ASSERT(input.low != dest.high && input.high != dest.low);
+
+ int32_t amount = count.value & 0x3f;
+ if (amount > 32) {
+ rotateRight64(Imm32(64 - amount), input, dest, temp);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ if (amount == 0) {
+ ma_mov(input.low, dest.low);
+ ma_mov(input.high, dest.high);
+ } else if (amount == 32) {
+ ma_mov(input.low, scratch);
+ ma_mov(input.high, dest.low);
+ ma_mov(scratch, dest.high);
+ } else {
+ MOZ_ASSERT(0 < amount && amount < 32);
+ ma_mov(dest.high, scratch);
+ as_mov(dest.high, lsl(dest.high, amount));
+ as_orr(dest.high, dest.high, lsr(dest.low, 32 - amount));
+ as_mov(dest.low, lsl(dest.low, amount));
+ as_orr(dest.low, dest.low, lsr(scratch, 32 - amount));
+ }
+ }
+}
+
+void
+MacroAssembler::rotateLeft64(Register shift, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(shift != temp);
+ MOZ_ASSERT(src == dest);
+ MOZ_ASSERT(temp != src.low && temp != src.high);
+ MOZ_ASSERT(shift != src.low && shift != src.high);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ ScratchRegisterScope shift_value(*this);
+ Label high, done;
+
+ ma_mov(src.high, temp);
+ as_and(shift_value, shift, Imm8(0x3f));
+ as_cmp(shift_value, Imm8(32));
+ ma_b(&high, GreaterThanOrEqual);
+
+ // high = high << shift | low >> 32 - shift
+ // low = low << shift | high >> 32 - shift
+ as_mov(dest.high, lsl(src.high, shift_value));
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_orr(dest.high, dest.high, lsr(src.low, shift_value));
+
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_mov(dest.low, lsl(src.low, shift_value));
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_orr(dest.low, dest.low, lsr(temp, shift_value));
+
+ ma_b(&done);
+
+ // A 32 - 64 shift is a 0 - 32 shift in the other direction.
+ bind(&high);
+ as_rsb(shift_value, shift_value, Imm8(64));
+
+ as_mov(dest.high, lsr(src.high, shift_value));
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_orr(dest.high, dest.high, lsl(src.low, shift_value));
+
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_mov(dest.low, lsr(src.low, shift_value));
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_orr(dest.low, dest.low, lsl(temp, shift_value));
+
+ bind(&done);
+}
+
+void
+MacroAssembler::rotateRight(Imm32 count, Register input, Register dest)
+{
+ if (count.value)
+ ma_ror(count, input, dest);
+ else
+ ma_mov(input, dest);
+}
+
+void
+MacroAssembler::rotateRight(Register count, Register input, Register dest)
+{
+ ma_ror(count, input, dest);
+}
+
+void
+MacroAssembler::rotateRight64(Imm32 count, Register64 input, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ MOZ_ASSERT(input.low != dest.high && input.high != dest.low);
+
+ int32_t amount = count.value & 0x3f;
+ if (amount > 32) {
+ rotateLeft64(Imm32(64 - amount), input, dest, temp);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ if (amount == 0) {
+ ma_mov(input.low, dest.low);
+ ma_mov(input.high, dest.high);
+ } else if (amount == 32) {
+ ma_mov(input.low, scratch);
+ ma_mov(input.high, dest.low);
+ ma_mov(scratch, dest.high);
+ } else {
+ MOZ_ASSERT(0 < amount && amount < 32);
+ ma_mov(dest.high, scratch);
+ as_mov(dest.high, lsr(dest.high, amount));
+ as_orr(dest.high, dest.high, lsl(dest.low, 32 - amount));
+ as_mov(dest.low, lsr(dest.low, amount));
+ as_orr(dest.low, dest.low, lsl(scratch, 32 - amount));
+ }
+ }
+}
+
+void
+MacroAssembler::rotateRight64(Register shift, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(shift != temp);
+ MOZ_ASSERT(src == dest);
+ MOZ_ASSERT(temp != src.low && temp != src.high);
+ MOZ_ASSERT(shift != src.low && shift != src.high);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ ScratchRegisterScope shift_value(*this);
+ Label high, done;
+
+ ma_mov(src.high, temp);
+ as_and(shift_value, shift, Imm8(0x3f));
+ as_cmp(shift_value, Imm8(32));
+ ma_b(&high, GreaterThanOrEqual);
+
+ // high = high >> shift | low << 32 - shift
+ // low = low >> shift | high << 32 - shift
+ as_mov(dest.high, lsr(src.high, shift_value));
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_orr(dest.high, dest.high, lsl(src.low, shift_value));
+
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_mov(dest.low, lsr(src.low, shift_value));
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_orr(dest.low, dest.low, lsl(temp, shift_value));
+
+ ma_b(&done);
+
+ // A 32 - 64 shift is a 0 - 32 shift in the other direction.
+ bind(&high);
+ as_rsb(shift_value, shift_value, Imm8(64));
+
+ as_mov(dest.high, lsl(src.high, shift_value));
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_orr(dest.high, dest.high, lsr(src.low, shift_value));
+
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_mov(dest.low, lsl(src.low, shift_value));
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_orr(dest.low, dest.low, lsr(temp, shift_value));
+
+ bind(&done);
+}
+
+// ===============================================================
+// Condition functions
+
+template <typename T1, typename T2>
+void
+MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest)
+{
+ cmp32(lhs, rhs);
+ emitSet(cond, dest);
+}
+
+template <typename T1, typename T2>
+void
+MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest)
+{
+ cmpPtr(lhs, rhs);
+ emitSet(cond, dest);
+}
+
+// ===============================================================
+// Bit counting functions
+
+void
+MacroAssembler::clz32(Register src, Register dest, bool knownNotZero)
+{
+ ma_clz(src, dest);
+}
+
+void
+MacroAssembler::clz64(Register64 src, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+
+ ma_clz(src.high, scratch);
+ as_cmp(scratch, Imm8(32));
+ ma_mov(scratch, dest, LeaveCC, NotEqual);
+ ma_clz(src.low, dest, Equal);
+ as_add(dest, dest, Imm8(32), LeaveCC, Equal);
+}
+
+void
+MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero)
+{
+ ScratchRegisterScope scratch(*this);
+ ma_ctz(src, dest, scratch);
+}
+
+void
+MacroAssembler::ctz64(Register64 src, Register dest)
+{
+ Label done, high;
+ as_cmp(src.low, Imm8(0));
+ ma_b(&high, Equal);
+
+ ctz32(src.low, dest, /* knownNotZero = */ true);
+ ma_b(&done);
+
+ bind(&high);
+ ctz32(src.high, dest, /* knownNotZero = */ false);
+ as_add(dest, dest, Imm8(32));
+
+ bind(&done);
+}
+
+void
+MacroAssembler::popcnt32(Register input, Register output, Register tmp)
+{
+ // Equivalent to GCC output of mozilla::CountPopulation32()
+
+ ScratchRegisterScope scratch(*this);
+
+ if (input != output)
+ ma_mov(input, output);
+ as_mov(tmp, asr(output, 1));
+ ma_and(Imm32(0x55555555), tmp, scratch);
+ ma_sub(output, tmp, output);
+ as_mov(tmp, asr(output, 2));
+ ma_mov(Imm32(0x33333333), scratch);
+ ma_and(scratch, output);
+ ma_and(scratch, tmp);
+ ma_add(output, tmp, output);
+ as_add(output, output, lsr(output, 4));
+ ma_and(Imm32(0xF0F0F0F), output, scratch);
+ as_add(output, output, lsl(output, 8));
+ as_add(output, output, lsl(output, 16));
+ as_mov(output, asr(output, 24));
+}
+
+void
+MacroAssembler::popcnt64(Register64 src, Register64 dest, Register tmp)
+{
+ MOZ_ASSERT(dest.low != tmp);
+ MOZ_ASSERT(dest.high != tmp);
+ MOZ_ASSERT(dest.low != dest.high);
+ // The source and destination can overlap. Therefore make sure we don't
+ // clobber the source before we have the data.
+ if (dest.low != src.high) {
+ popcnt32(src.low, dest.low, tmp);
+ popcnt32(src.high, dest.high, tmp);
+ } else {
+ MOZ_ASSERT(dest.high != src.high);
+ popcnt32(src.low, dest.high, tmp);
+ popcnt32(src.high, dest.low, tmp);
+ }
+ ma_add(dest.high, dest.low);
+ ma_mov(Imm32(0), dest.high);
+}
+
+// ===============================================================
+// Branch functions
+
+template <class L>
+void
+MacroAssembler::branch32(Condition cond, Register lhs, Register rhs, L label)
+{
+ ma_cmp(lhs, rhs);
+ ma_b(label, cond);
+}
+
+template <class L>
+void
+MacroAssembler::branch32(Condition cond, Register lhs, Imm32 rhs, L label)
+{
+ ScratchRegisterScope scratch(*this);
+
+ ma_cmp(lhs, rhs, scratch);
+ ma_b(label, cond);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs, Label* label)
+{
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(lhs, scratch, scratch2);
+ ma_cmp(scratch, rhs);
+ ma_b(label, cond);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 rhs, Label* label)
+{
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(lhs, scratch, scratch2);
+ ma_cmp(scratch, rhs, scratch2);
+ ma_b(label, cond);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
+{
+ ScratchRegisterScope scratch(*this);
+
+ // Load into scratch.
+ movePtr(ImmWord(uintptr_t(lhs.addr)), scratch);
+ ma_ldr(DTRAddr(scratch, DtrOffImm(0)), scratch);
+
+ ma_cmp(scratch, rhs);
+ ma_b(label, cond);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
+{
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ // Load into scratch.
+ movePtr(ImmWord(uintptr_t(lhs.addr)), scratch);
+ ma_ldr(DTRAddr(scratch, DtrOffImm(0)), scratch);
+
+ ma_cmp(scratch, rhs, scratch2);
+ ma_b(label, cond);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ {
+ ScratchRegisterScope scratch(*this);
+
+ Register base = lhs.base;
+ uint32_t scale = Imm32::ShiftOf(lhs.scale).value;
+
+ // Load lhs into scratch2.
+ if (lhs.offset != 0) {
+ ma_add(base, Imm32(lhs.offset), scratch, scratch2);
+ ma_ldr(DTRAddr(scratch, DtrRegImmShift(lhs.index, LSL, scale)), scratch2);
+ } else {
+ ma_ldr(DTRAddr(base, DtrRegImmShift(lhs.index, LSL, scale)), scratch2);
+ }
+ }
+ branch32(cond, scratch2, rhs, label);
+}
+
+void
+MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, Label* label)
+{
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ movePtr(lhs, scratch);
+ ma_ldr(DTRAddr(scratch, DtrOffImm(0)), scratch);
+
+ ma_cmp(scratch, rhs, scratch2);
+ ma_b(label, cond);
+}
+
+void
+MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val, Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual,
+ "other condition codes not supported");
+
+ branch32(cond, lhs, val.firstHalf(), label);
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), val.secondHalf(), label);
+}
+
+void
+MacroAssembler::branch64(Condition cond, const Address& lhs, const Address& rhs, Register scratch,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ load32(rhs, scratch);
+ branch32(cond, lhs, scratch, label);
+
+ load32(Address(rhs.base, rhs.offset + sizeof(uint32_t)), scratch);
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), scratch, label);
+}
+
+void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val, Label* success, Label* fail)
+{
+ bool fallthrough = false;
+ Label fallthroughLabel;
+
+ if (!fail) {
+ fail = &fallthroughLabel;
+ fallthrough = true;
+ }
+
+ switch(cond) {
+ case Assembler::Equal:
+ branch32(Assembler::NotEqual, lhs.low, val.low(), fail);
+ branch32(Assembler::Equal, lhs.high, val.hi(), success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::NotEqual:
+ branch32(Assembler::NotEqual, lhs.low, val.low(), success);
+ branch32(Assembler::NotEqual, lhs.high, val.hi(), success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual: {
+ Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
+ Assembler::Condition cond2 =
+ Assembler::ConditionWithoutEqual(Assembler::InvertCondition(cond));
+ Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
+
+ cmp32(lhs.high, val.hi());
+ ma_b(success, cond1);
+ ma_b(fail, cond2);
+ cmp32(lhs.low, val.low());
+ ma_b(success, cond3);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ }
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+
+ if (fallthrough)
+ bind(fail);
+}
+
+void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs, Label* success, Label* fail)
+{
+ bool fallthrough = false;
+ Label fallthroughLabel;
+
+ if (!fail) {
+ fail = &fallthroughLabel;
+ fallthrough = true;
+ }
+
+ switch(cond) {
+ case Assembler::Equal:
+ branch32(Assembler::NotEqual, lhs.low, rhs.low, fail);
+ branch32(Assembler::Equal, lhs.high, rhs.high, success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::NotEqual:
+ branch32(Assembler::NotEqual, lhs.low, rhs.low, success);
+ branch32(Assembler::NotEqual, lhs.high, rhs.high, success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual: {
+ Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
+ Assembler::Condition cond2 =
+ Assembler::ConditionWithoutEqual(Assembler::InvertCondition(cond));
+ Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
+
+ cmp32(lhs.high, rhs.high);
+ ma_b(success, cond1);
+ ma_b(fail, cond2);
+ cmp32(lhs.low, rhs.low);
+ ma_b(success, cond3);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ }
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+
+ if (fallthrough)
+ bind(fail);
+}
+
+template <class L>
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs, L label)
+{
+ branch32(cond, lhs, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label)
+{
+ branch32(cond, lhs, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs, Label* label)
+{
+ branchPtr(cond, lhs, ImmWord(uintptr_t(rhs.value)), label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs, Label* label)
+{
+ ScratchRegisterScope scratch(*this);
+ movePtr(rhs, scratch);
+ branchPtr(cond, lhs, scratch, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs, Label* label)
+{
+ branch32(cond, lhs, Imm32(rhs.value), label);
+}
+
+template <class L>
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs, L label)
+{
+ branch32(cond, lhs, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs, Label* label)
+{
+ branchPtr(cond, lhs, ImmWord(uintptr_t(rhs.value)), label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, ImmWord rhs, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+template <typename T>
+inline CodeOffsetJump
+MacroAssembler::branchPtrWithPatch(Condition cond, Register lhs, T rhs, RepatchLabel* label)
+{
+ cmpPtr(lhs, rhs);
+ return jumpWithPatch(label, cond);
+}
+
+template <typename T>
+inline CodeOffsetJump
+MacroAssembler::branchPtrWithPatch(Condition cond, Address lhs, T rhs, RepatchLabel* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ {
+ ScratchRegisterScope scratch(*this);
+ ma_ldr(lhs, scratch2, scratch);
+ }
+ cmpPtr(scratch2, rhs);
+ return jumpWithPatch(label, cond);
+}
+
+void
+MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs, Register rhs, Label* label)
+{
+ branchPtr(cond, lhs, rhs, label);
+}
+
+void
+MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
+ Label* label)
+{
+ compareFloat(lhs, rhs);
+
+ if (cond == DoubleNotEqual) {
+ // Force the unordered cases not to jump.
+ Label unordered;
+ ma_b(&unordered, VFP_Unordered);
+ ma_b(label, VFP_NotEqualOrUnordered);
+ bind(&unordered);
+ return;
+ }
+
+ if (cond == DoubleEqualOrUnordered) {
+ ma_b(label, VFP_Unordered);
+ ma_b(label, VFP_Equal);
+ return;
+ }
+
+ ma_b(label, ConditionFromDoubleCondition(cond));
+}
+
+void
+MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src, Register dest, Label* fail)
+{
+ branchTruncateFloat32ToInt32(src, dest, fail);
+}
+
+void
+MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src, Register dest, Label* fail)
+{
+ ScratchFloat32Scope scratchFloat32(*this);
+ ScratchRegisterScope scratch(*this);
+
+ ma_vcvt_F32_I32(src, scratchFloat32.sintOverlay());
+ ma_vxfer(scratchFloat32, dest);
+ ma_cmp(dest, Imm32(0x7fffffff), scratch);
+ ma_cmp(dest, Imm32(0x80000000), scratch, Assembler::NotEqual);
+ ma_b(fail, Assembler::Equal);
+}
+
+void
+MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
+ Label* label)
+{
+ compareDouble(lhs, rhs);
+
+ if (cond == DoubleNotEqual) {
+ // Force the unordered cases not to jump.
+ Label unordered;
+ ma_b(&unordered, VFP_Unordered);
+ ma_b(label, VFP_NotEqualOrUnordered);
+ bind(&unordered);
+ return;
+ }
+
+ if (cond == DoubleEqualOrUnordered) {
+ ma_b(label, VFP_Unordered);
+ ma_b(label, VFP_Equal);
+ return;
+ }
+
+ ma_b(label, ConditionFromDoubleCondition(cond));
+}
+
+void
+MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src, Register dest, Label* fail)
+{
+ branchTruncateDoubleToInt32(src, dest, fail);
+}
+
+// There are two options for implementing branchTruncateDoubleToInt32:
+//
+// 1. Convert the floating point value to an integer, if it did not fit, then it
+// was clamped to INT_MIN/INT_MAX, and we can test it. NOTE: if the value
+// really was supposed to be INT_MAX / INT_MIN then it will be wrong.
+//
+// 2. Convert the floating point value to an integer, if it did not fit, then it
+// set one or two bits in the fpcsr. Check those.
+void
+MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src, Register dest, Label* fail)
+{
+ ScratchDoubleScope scratchDouble(*this);
+ FloatRegister scratchSIntReg = scratchDouble.sintOverlay();
+ ScratchRegisterScope scratch(*this);
+
+ ma_vcvt_F64_I32(src, scratchSIntReg);
+ ma_vxfer(scratchSIntReg, dest);
+ ma_cmp(dest, Imm32(0x7fffffff), scratch);
+ ma_cmp(dest, Imm32(0x80000000), scratch, Assembler::NotEqual);
+ ma_b(fail, Assembler::Equal);
+}
+
+template <typename T, typename L>
+void
+MacroAssembler::branchAdd32(Condition cond, T src, Register dest, L label)
+{
+ add32(src, dest);
+ as_b(label, cond);
+}
+
+template <typename T>
+void
+MacroAssembler::branchSub32(Condition cond, T src, Register dest, Label* label)
+{
+ sub32(src, dest);
+ j(cond, label);
+}
+
+void
+MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label)
+{
+ ScratchRegisterScope scratch(*this);
+ ma_sub(rhs, lhs, scratch, SetCC);
+ as_b(label, cond);
+}
+
+template <class L>
+void
+MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs, L label)
+{
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
+ // x86 likes test foo, foo rather than cmp foo, #0.
+ // Convert the former into the latter.
+ if (lhs == rhs && (cond == Zero || cond == NonZero))
+ as_cmp(lhs, Imm8(0));
+ else
+ ma_tst(lhs, rhs);
+ ma_b(label, cond);
+}
+
+template <class L>
+void
+MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs, L label)
+{
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
+ ScratchRegisterScope scratch(*this);
+ ma_tst(lhs, rhs, scratch);
+ ma_b(label, cond);
+}
+
+void
+MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ load32(lhs, scratch2);
+ branchTest32(cond, scratch2, rhs, label);
+}
+
+void
+MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ load32(lhs, scratch2);
+ branchTest32(cond, scratch2, rhs, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs, L label)
+{
+ branchTest32(cond, lhs, rhs, label);
+}
+
+void
+MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs, Label* label)
+{
+ branchTest32(cond, lhs, rhs, label);
+}
+
+void
+MacroAssembler::branchTestPtr(Condition cond, const Address& lhs, Imm32 rhs, Label* label)
+{
+ branchTest32(cond, lhs, rhs, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTest64(Condition cond, Register64 lhs, Register64 rhs, Register temp,
+ L label)
+{
+ ScratchRegisterScope scratch(*this);
+
+ if (cond == Assembler::Zero) {
+ MOZ_ASSERT(lhs.low == rhs.low);
+ MOZ_ASSERT(lhs.high == rhs.high);
+ ma_orr(lhs.low, lhs.high, scratch);
+ branchTestPtr(cond, scratch, scratch, label);
+ } else {
+ MOZ_CRASH("Unsupported condition");
+ }
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, Register tag, Label* label)
+{
+ branchTestUndefinedImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, const Address& address, Label* label)
+{
+ branchTestUndefinedImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestUndefinedImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestUndefinedImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestUndefinedImpl(Condition cond, const T& t, Label* label)
+{
+ Condition c = testUndefined(cond, t);
+ ma_b(label, c);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, Register tag, Label* label)
+{
+ branchTestInt32Impl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, const Address& address, Label* label)
+{
+ branchTestInt32Impl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestInt32Impl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestInt32Impl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestInt32Impl(Condition cond, const T& t, Label* label)
+{
+ Condition c = testInt32(cond, t);
+ ma_b(label, c);
+}
+
+void
+MacroAssembler::branchTestInt32Truthy(bool truthy, const ValueOperand& value, Label* label)
+{
+ Condition c = testInt32Truthy(truthy, value);
+ ma_b(label, c);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, Register tag, Label* label)
+{
+ branchTestDoubleImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, const Address& address, Label* label)
+{
+ branchTestDoubleImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestDoubleImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestDoubleImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestDoubleImpl(Condition cond, const T& t, Label* label)
+{
+ Condition c = testDouble(cond, t);
+ ma_b(label, c);
+}
+
+void
+MacroAssembler::branchTestDoubleTruthy(bool truthy, FloatRegister reg, Label* label)
+{
+ Condition c = testDoubleTruthy(truthy, reg);
+ ma_b(label, c);
+}
+
+void
+MacroAssembler::branchTestNumber(Condition cond, Register tag, Label* label)
+{
+ branchTestNumberImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestNumberImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestNumberImpl(Condition cond, const T& t, Label* label)
+{
+ cond = testNumber(cond, t);
+ ma_b(label, cond);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, Register tag, Label* label)
+{
+ branchTestBooleanImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, const Address& address, Label* label)
+{
+ branchTestBooleanImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestBooleanImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestBooleanImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestBooleanImpl(Condition cond, const T& t, Label* label)
+{
+ Condition c = testBoolean(cond, t);
+ ma_b(label, c);
+}
+
+void
+MacroAssembler::branchTestBooleanTruthy(bool truthy, const ValueOperand& value, Label* label)
+{
+ Condition c = testBooleanTruthy(truthy, value);
+ ma_b(label, c);
+}
+
+void
+MacroAssembler::branchTestString(Condition cond, Register tag, Label* label)
+{
+ branchTestStringImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestString(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestStringImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestString(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestStringImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestStringImpl(Condition cond, const T& t, Label* label)
+{
+ Condition c = testString(cond, t);
+ ma_b(label, c);
+}
+
+void
+MacroAssembler::branchTestStringTruthy(bool truthy, const ValueOperand& value, Label* label)
+{
+ Condition c = testStringTruthy(truthy, value);
+ ma_b(label, c);
+}
+
+void
+MacroAssembler::branchTestSymbol(Condition cond, Register tag, Label* label)
+{
+ branchTestSymbolImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestSymbolImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestSymbolImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestSymbolImpl(Condition cond, const T& t, Label* label)
+{
+ Condition c = testSymbol(cond, t);
+ ma_b(label, c);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, Register tag, Label* label)
+{
+ branchTestNullImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, const Address& address, Label* label)
+{
+ branchTestNullImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestNullImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestNullImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestNullImpl(Condition cond, const T& t, Label* label)
+{
+ Condition c = testNull(cond, t);
+ ma_b(label, c);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, Register tag, Label* label)
+{
+ branchTestObjectImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, const Address& address, Label* label)
+{
+ branchTestObjectImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestObjectImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestObjectImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestObjectImpl(Condition cond, const T& t, Label* label)
+{
+ Condition c = testObject(cond, t);
+ ma_b(label, c);
+}
+
+void
+MacroAssembler::branchTestGCThing(Condition cond, const Address& address, Label* label)
+{
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestGCThingImpl(cond, address, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestGCThingImpl(Condition cond, const T& t, Label* label)
+{
+ Condition c = testGCThing(cond, t);
+ ma_b(label, c);
+}
+
+void
+MacroAssembler::branchTestPrimitive(Condition cond, Register tag, Label* label)
+{
+ branchTestPrimitiveImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestPrimitive(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestPrimitiveImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestPrimitiveImpl(Condition cond, const T& t, Label* label)
+{
+ Condition c = testPrimitive(cond, t);
+ ma_b(label, c);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, Register tag, Label* label)
+{
+ branchTestMagicImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, const Address& address, Label* label)
+{
+ branchTestMagicImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestMagicImpl(cond, address, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value, L label)
+{
+ branchTestMagicImpl(cond, value, label);
+}
+
+template <typename T, class L>
+void
+MacroAssembler::branchTestMagicImpl(Condition cond, const T& t, L label)
+{
+ cond = testMagic(cond, t);
+ ma_b(label, cond);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr, JSWhyMagic why, Label* label)
+{
+ branchTestMagic(cond, valaddr, label);
+ branch32(cond, ToPayload(valaddr), Imm32(why), label);
+}
+
+// ========================================================================
+// Memory access primitives.
+void
+MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const Address& addr)
+{
+ ScratchRegisterScope scratch(*this);
+ ma_vstr(src, addr, scratch);
+}
+void
+MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& addr)
+{
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+ uint32_t scale = Imm32::ShiftOf(addr.scale).value;
+ ma_vstr(src, addr.base, addr.index, scratch, scratch2, scale, addr.offset);
+}
+
+void
+MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const Address& addr)
+{
+ ScratchRegisterScope scratch(*this);
+ ma_vstr(src.asSingle(), addr, scratch);
+}
+void
+MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& addr)
+{
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+ uint32_t scale = Imm32::ShiftOf(addr.scale).value;
+ ma_vstr(src.asSingle(), addr.base, addr.index, scratch, scratch2, scale, addr.offset);
+}
+
+void
+MacroAssembler::storeFloat32x3(FloatRegister src, const Address& dest)
+{
+ MOZ_CRASH("NYI");
+}
+void
+MacroAssembler::storeFloat32x3(FloatRegister src, const BaseIndex& dest)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+MacroAssembler::memoryBarrier(MemoryBarrierBits barrier)
+{
+ // On ARMv6 the optional argument (BarrierST, etc) is ignored.
+ if (barrier == (MembarStoreStore|MembarSynchronizing))
+ ma_dsb(BarrierST);
+ else if (barrier & MembarSynchronizing)
+ ma_dsb();
+ else if (barrier == MembarStoreStore)
+ ma_dmb(BarrierST);
+ else if (barrier)
+ ma_dmb();
+}
+
+// ===============================================================
+// Clamping functions.
+
+void
+MacroAssembler::clampIntToUint8(Register reg)
+{
+ // Look at (reg >> 8) if it is 0, then reg shouldn't be clamped if it is
+ // <0, then we want to clamp to 0, otherwise, we wish to clamp to 255
+ ScratchRegisterScope scratch(*this);
+ as_mov(scratch, asr(reg, 8), SetCC);
+ ma_mov(Imm32(0xff), reg, NotEqual);
+ ma_mov(Imm32(0), reg, Signed);
+}
+
+// ========================================================================
+// wasm support
+
+template <class L>
+void
+MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
+{
+ BufferOffset bo = as_cmp(index, Imm8(0));
+ append(wasm::BoundsCheck(bo.getOffset()));
+
+ as_b(label, cond);
+}
+
+void
+MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
+{
+ Instruction* inst = (Instruction*) patchAt;
+ MOZ_ASSERT(inst->is<InstCMP>());
+ InstCMP* cmp = inst->as<InstCMP>();
+
+ Register index;
+ cmp->extractOp1(&index);
+
+ MOZ_ASSERT(cmp->extractOp2().isImm8());
+
+ Imm8 imm8 = Imm8(limit);
+ MOZ_RELEASE_ASSERT(!imm8.invalid());
+
+ *inst = InstALU(InvalidReg, index, imm8, OpCmp, SetCC, Always);
+ // Don't call Auto Flush Cache; the wasm caller has done this for us.
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+void
+MacroAssemblerARMCompat::incrementInt32Value(const Address& addr)
+{
+ asMasm().add32(Imm32(1), ToPayload(addr));
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_MacroAssembler_arm_inl_h */
diff --git a/js/src/jit/arm/MacroAssembler-arm.cpp b/js/src/jit/arm/MacroAssembler-arm.cpp
new file mode 100644
index 000000000..c6e627db6
--- /dev/null
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -0,0 +1,5559 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm/MacroAssembler-arm.h"
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Casting.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/arm/Simulator-arm.h"
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MoveEmitter.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace jit;
+
+using mozilla::Abs;
+using mozilla::BitwiseCast;
+
+bool
+isValueDTRDCandidate(ValueOperand& val)
+{
+ // In order to be used for a DTRD memory function, the two target registers
+ // need to be a) Adjacent, with the tag larger than the payload, and b)
+ // Aligned to a multiple of two.
+ if ((val.typeReg().code() != (val.payloadReg().code() + 1)))
+ return false;
+ if ((val.payloadReg().code() & 1) != 0)
+ return false;
+ return true;
+}
+
+void
+MacroAssemblerARM::convertBoolToInt32(Register source, Register dest)
+{
+ // Note that C++ bool is only 1 byte, so zero extend it to clear the
+ // higher-order bits.
+ as_and(dest, source, Imm8(0xff));
+}
+
+void
+MacroAssemblerARM::convertInt32ToDouble(Register src, FloatRegister dest_)
+{
+ // Direct conversions aren't possible.
+ VFPRegister dest = VFPRegister(dest_);
+ as_vxfer(src, InvalidReg, dest.sintOverlay(), CoreToFloat);
+ as_vcvt(dest, dest.sintOverlay());
+}
+
+void
+MacroAssemblerARM::convertInt32ToDouble(const Address& src, FloatRegister dest)
+{
+ ScratchDoubleScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_vldr(src, scratch, scratch2);
+ as_vcvt(dest, VFPRegister(scratch).sintOverlay());
+}
+
+void
+MacroAssemblerARM::convertInt32ToDouble(const BaseIndex& src, FloatRegister dest)
+{
+ Register base = src.base;
+ uint32_t scale = Imm32::ShiftOf(src.scale).value;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (src.offset != 0) {
+ ma_add(base, Imm32(src.offset), scratch, scratch2);
+ base = scratch;
+ }
+ ma_ldr(DTRAddr(base, DtrRegImmShift(src.index, LSL, scale)), scratch);
+ convertInt32ToDouble(scratch, dest);
+}
+
+void
+MacroAssemblerARM::convertUInt32ToDouble(Register src, FloatRegister dest_)
+{
+ // Direct conversions aren't possible.
+ VFPRegister dest = VFPRegister(dest_);
+ as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat);
+ as_vcvt(dest, dest.uintOverlay());
+}
+
+static const double TO_DOUBLE_HIGH_SCALE = 0x100000000;
+
+bool
+MacroAssemblerARMCompat::convertUInt64ToDoubleNeedsTemp()
+{
+ return false;
+}
+
+void
+MacroAssemblerARMCompat::convertUInt64ToDouble(Register64 src, FloatRegister dest, Register temp)
+{
+ MOZ_ASSERT(temp == Register::Invalid());
+ ScratchDoubleScope scratchDouble(asMasm());
+
+ convertUInt32ToDouble(src.high, dest);
+ {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(ImmPtr(&TO_DOUBLE_HIGH_SCALE), scratch);
+ ma_vldr(Operand(Address(scratch, 0)).toVFPAddr(), scratchDouble);
+ }
+ asMasm().mulDouble(scratchDouble, dest);
+ convertUInt32ToDouble(src.low, scratchDouble);
+ asMasm().addDouble(scratchDouble, dest);
+}
+
+void
+MacroAssemblerARM::convertUInt32ToFloat32(Register src, FloatRegister dest_)
+{
+ // Direct conversions aren't possible.
+ VFPRegister dest = VFPRegister(dest_);
+ as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat);
+ as_vcvt(VFPRegister(dest).singleOverlay(), dest.uintOverlay());
+}
+
+void MacroAssemblerARM::convertDoubleToFloat32(FloatRegister src, FloatRegister dest,
+ Condition c)
+{
+ as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src), false, c);
+}
+
+// Checks whether a double is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void
+MacroAssemblerARM::convertDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail, bool negativeZeroCheck)
+{
+ // Convert the floating point value to an integer, if it did not fit, then
+ // when we convert it *back* to a float, it will have a different value,
+ // which we can test.
+ ScratchDoubleScope scratchDouble(asMasm());
+ ScratchRegisterScope scratch(asMasm());
+
+ FloatRegister scratchSIntReg = scratchDouble.sintOverlay();
+
+ ma_vcvt_F64_I32(src, scratchSIntReg);
+ // Move the value into the dest register.
+ ma_vxfer(scratchSIntReg, dest);
+ ma_vcvt_I32_F64(scratchSIntReg, scratchDouble);
+ ma_vcmp(src, scratchDouble);
+ as_vmrs(pc);
+ ma_b(fail, Assembler::VFP_NotEqualOrUnordered);
+
+ if (negativeZeroCheck) {
+ as_cmp(dest, Imm8(0));
+ // Test and bail for -0.0, when integer result is 0. Move the top word
+ // of the double into the output reg, if it is non-zero, then the
+ // original value was -0.0.
+ as_vxfer(dest, InvalidReg, src, FloatToCore, Assembler::Equal, 1);
+ ma_cmp(dest, Imm32(0x80000000), scratch, Assembler::Equal);
+ ma_b(fail, Assembler::Equal);
+ }
+}
+
+// Checks whether a float32 is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void
+MacroAssemblerARM::convertFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail, bool negativeZeroCheck)
+{
+ // Converting the floating point value to an integer and then converting it
+ // back to a float32 would not work, as float to int32 conversions are
+ // clamping (e.g. float(INT32_MAX + 1) would get converted into INT32_MAX
+ // and then back to float(INT32_MAX + 1)). If this ever happens, we just
+ // bail out.
+ ScratchFloat32Scope scratchFloat(asMasm());
+ ScratchRegisterScope scratch(asMasm());
+
+ FloatRegister ScratchSIntReg = scratchFloat.sintOverlay();
+ ma_vcvt_F32_I32(src, ScratchSIntReg);
+
+ // Store the result
+ ma_vxfer(ScratchSIntReg, dest);
+
+ ma_vcvt_I32_F32(ScratchSIntReg, scratchFloat);
+ ma_vcmp(src, scratchFloat);
+ as_vmrs(pc);
+ ma_b(fail, Assembler::VFP_NotEqualOrUnordered);
+
+ // Bail out in the clamped cases.
+ ma_cmp(dest, Imm32(0x7fffffff), scratch);
+ ma_cmp(dest, Imm32(0x80000000), scratch, Assembler::NotEqual);
+ ma_b(fail, Assembler::Equal);
+
+ if (negativeZeroCheck) {
+ as_cmp(dest, Imm8(0));
+ // Test and bail for -0.0, when integer result is 0. Move the float into
+ // the output reg, and if it is non-zero then the original value was
+ // -0.0
+ as_vxfer(dest, InvalidReg, VFPRegister(src).singleOverlay(), FloatToCore, Assembler::Equal, 0);
+ ma_cmp(dest, Imm32(0x80000000), scratch, Assembler::Equal);
+ ma_b(fail, Assembler::Equal);
+ }
+}
+
+void
+MacroAssemblerARM::convertFloat32ToDouble(FloatRegister src, FloatRegister dest)
+{
+ MOZ_ASSERT(dest.isDouble());
+ MOZ_ASSERT(src.isSingle());
+ as_vcvt(VFPRegister(dest), VFPRegister(src).singleOverlay());
+}
+
+void
+MacroAssemblerARM::convertInt32ToFloat32(Register src, FloatRegister dest)
+{
+ // Direct conversions aren't possible.
+ as_vxfer(src, InvalidReg, dest.sintOverlay(), CoreToFloat);
+ as_vcvt(dest.singleOverlay(), dest.sintOverlay());
+}
+
+void
+MacroAssemblerARM::convertInt32ToFloat32(const Address& src, FloatRegister dest)
+{
+ ScratchFloat32Scope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_vldr(src, scratch, scratch2);
+ as_vcvt(dest, VFPRegister(scratch).sintOverlay());
+}
+
+bool
+MacroAssemblerARM::alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op,
+ SBit s, Condition c)
+{
+ if ((s == SetCC && ! condsAreSafe(op)) || !can_dbl(op))
+ return false;
+
+ ALUOp interop = getDestVariant(op);
+ Imm8::TwoImm8mData both = Imm8::EncodeTwoImms(imm.value);
+ if (both.fst().invalid())
+ return false;
+
+ // For the most part, there is no good reason to set the condition codes for
+ // the first instruction. We can do better things if the second instruction
+ // doesn't have a dest, such as check for overflow by doing first operation
+ // don't do second operation if first operation overflowed. This preserves
+ // the overflow condition code. Unfortunately, it is horribly brittle.
+ as_alu(dest, src1, Operand2(both.fst()), interop, LeaveCC, c);
+ as_alu(dest, dest, Operand2(both.snd()), op, s, c);
+ return true;
+}
+
+void
+MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest, AutoRegisterScope& scratch,
+ ALUOp op, SBit s, Condition c)
+{
+ // ma_mov should be used for moves.
+ MOZ_ASSERT(op != OpMov);
+ MOZ_ASSERT(op != OpMvn);
+ MOZ_ASSERT(src1 != scratch);
+
+ // As it turns out, if you ask for a compare-like instruction you *probably*
+ // want it to set condition codes.
+ MOZ_ASSERT_IF(dest == InvalidReg, s == SetCC);
+
+ // The operator gives us the ability to determine how this can be used.
+ Imm8 imm8 = Imm8(imm.value);
+ // One instruction: If we can encode it using an imm8m, then do so.
+ if (!imm8.invalid()) {
+ as_alu(dest, src1, imm8, op, s, c);
+ return;
+ }
+
+ // One instruction, negated:
+ Imm32 negImm = imm;
+ Register negDest;
+ ALUOp negOp = ALUNeg(op, dest, scratch, &negImm, &negDest);
+ Imm8 negImm8 = Imm8(negImm.value);
+ // 'add r1, r2, -15' can be replaced with 'sub r1, r2, 15'.
+ // The dest can be replaced (InvalidReg => scratch).
+ // This is useful if we wish to negate tst. tst has an invalid (aka not
+ // used) dest, but its negation bic requires a dest.
+ if (negOp != OpInvalid && !negImm8.invalid()) {
+ as_alu(negDest, src1, negImm8, negOp, s, c);
+ return;
+ }
+
+ // Start by attempting to generate a two instruction form. Some things
+ // cannot be made into two-inst forms correctly. Namely, adds dest, src,
+ // 0xffff. Since we want the condition codes (and don't know which ones
+ // will be checked), we need to assume that the overflow flag will be
+ // checked and add{,s} dest, src, 0xff00; add{,s} dest, dest, 0xff is not
+ // guaranteed to set the overflof flag the same as the (theoretical) one
+ // instruction variant.
+ if (alu_dbl(src1, imm, dest, op, s, c))
+ return;
+
+ // And try with its negative.
+ if (negOp != OpInvalid && alu_dbl(src1, negImm, negDest, negOp, s, c))
+ return;
+
+ ma_mov(imm, scratch, c);
+ as_alu(dest, src1, O2Reg(scratch), op, s, c);
+}
+
+void
+MacroAssemblerARM::ma_alu(Register src1, Operand op2, Register dest, ALUOp op,
+ SBit s, Assembler::Condition c)
+{
+ MOZ_ASSERT(op2.tag() == Operand::Tag::OP2);
+ as_alu(dest, src1, op2.toOp2(), op, s, c);
+}
+
+void
+MacroAssemblerARM::ma_alu(Register src1, Operand2 op2, Register dest, ALUOp op, SBit s, Condition c)
+{
+ as_alu(dest, src1, op2, op, s, c);
+}
+
+void
+MacroAssemblerARM::ma_nop()
+{
+ as_nop();
+}
+
+void
+MacroAssemblerARM::ma_movPatchable(Imm32 imm_, Register dest, Assembler::Condition c)
+{
+ int32_t imm = imm_.value;
+ if (HasMOVWT()) {
+ as_movw(dest, Imm16(imm & 0xffff), c);
+ as_movt(dest, Imm16(imm >> 16 & 0xffff), c);
+ } else {
+ as_Imm32Pool(dest, imm, c);
+ }
+}
+
+void
+MacroAssemblerARM::ma_movPatchable(ImmPtr imm, Register dest, Assembler::Condition c)
+{
+ ma_movPatchable(Imm32(int32_t(imm.value)), dest, c);
+}
+
+/* static */ void
+MacroAssemblerARM::ma_mov_patch(Imm32 imm_, Register dest, Assembler::Condition c,
+ RelocStyle rs, Instruction* i)
+{
+ MOZ_ASSERT(i);
+ int32_t imm = imm_.value;
+
+ // Make sure the current instruction is not an artificial guard inserted
+ // by the assembler buffer.
+ i = i->skipPool();
+
+ switch(rs) {
+ case L_MOVWT:
+ Assembler::as_movw_patch(dest, Imm16(imm & 0xffff), c, i);
+ i = i->next();
+ Assembler::as_movt_patch(dest, Imm16(imm >> 16 & 0xffff), c, i);
+ break;
+ case L_LDR:
+ Assembler::WritePoolEntry(i, c, imm);
+ break;
+ }
+}
+
+/* static */ void
+MacroAssemblerARM::ma_mov_patch(ImmPtr imm, Register dest, Assembler::Condition c,
+ RelocStyle rs, Instruction* i)
+{
+ ma_mov_patch(Imm32(int32_t(imm.value)), dest, c, rs, i);
+}
+
+void
+MacroAssemblerARM::ma_mov(Register src, Register dest, SBit s, Assembler::Condition c)
+{
+ if (s == SetCC || dest != src)
+ as_mov(dest, O2Reg(src), s, c);
+}
+
+void
+MacroAssemblerARM::ma_mov(Imm32 imm, Register dest, Assembler::Condition c)
+{
+ // Try mov with Imm8 operand.
+ Imm8 imm8 = Imm8(imm.value);
+ if (!imm8.invalid()) {
+ as_alu(dest, InvalidReg, imm8, OpMov, LeaveCC, c);
+ return;
+ }
+
+ // Try mvn with Imm8 operand.
+ Imm8 negImm8 = Imm8(~imm.value);
+ if (!negImm8.invalid()) {
+ as_alu(dest, InvalidReg, negImm8, OpMvn, LeaveCC, c);
+ return;
+ }
+
+ // Try movw/movt.
+ if (HasMOVWT()) {
+ // ARMv7 supports movw/movt. movw zero-extends its 16 bit argument,
+ // so we can set the register this way. movt leaves the bottom 16
+ // bits in tact, so we always need a movw.
+ as_movw(dest, Imm16(imm.value & 0xffff), c);
+ if (uint32_t(imm.value) >> 16)
+ as_movt(dest, Imm16(uint32_t(imm.value) >> 16), c);
+ return;
+ }
+
+ // If we don't have movw/movt, we need a load.
+ as_Imm32Pool(dest, imm.value, c);
+}
+
+void
+MacroAssemblerARM::ma_mov(ImmWord imm, Register dest, Assembler::Condition c)
+{
+ ma_mov(Imm32(imm.value), dest, c);
+}
+
+void
+MacroAssemblerARM::ma_mov(ImmGCPtr ptr, Register dest)
+{
+ // As opposed to x86/x64 version, the data relocation has to be executed
+ // before to recover the pointer, and not after.
+ writeDataRelocation(ptr);
+ ma_movPatchable(Imm32(uintptr_t(ptr.value)), dest, Always);
+}
+
+// Shifts (just a move with a shifting op2)
+void
+MacroAssemblerARM::ma_lsl(Imm32 shift, Register src, Register dst)
+{
+ as_mov(dst, lsl(src, shift.value));
+}
+
+void
+MacroAssemblerARM::ma_lsr(Imm32 shift, Register src, Register dst)
+{
+ as_mov(dst, lsr(src, shift.value));
+}
+
+void
+MacroAssemblerARM::ma_asr(Imm32 shift, Register src, Register dst)
+{
+ as_mov(dst, asr(src, shift.value));
+}
+
+void
+MacroAssemblerARM::ma_ror(Imm32 shift, Register src, Register dst)
+{
+ as_mov(dst, ror(src, shift.value));
+}
+
+void
+MacroAssemblerARM::ma_rol(Imm32 shift, Register src, Register dst)
+{
+ as_mov(dst, rol(src, shift.value));
+}
+
+// Shifts (just a move with a shifting op2)
+void
+MacroAssemblerARM::ma_lsl(Register shift, Register src, Register dst)
+{
+ as_mov(dst, lsl(src, shift));
+}
+
+void
+MacroAssemblerARM::ma_lsr(Register shift, Register src, Register dst)
+{
+ as_mov(dst, lsr(src, shift));
+}
+
+void
+MacroAssemblerARM::ma_asr(Register shift, Register src, Register dst)
+{
+ as_mov(dst, asr(src, shift));
+}
+
+void
+MacroAssemblerARM::ma_ror(Register shift, Register src, Register dst)
+{
+ as_mov(dst, ror(src, shift));
+}
+
+void
+MacroAssemblerARM::ma_rol(Register shift, Register src, Register dst, AutoRegisterScope& scratch)
+{
+ as_rsb(scratch, shift, Imm8(32));
+ as_mov(dst, ror(src, scratch));
+}
+
+// Move not (dest <- ~src)
+void
+MacroAssemblerARM::ma_mvn(Register src1, Register dest, SBit s, Assembler::Condition c)
+{
+ as_alu(dest, InvalidReg, O2Reg(src1), OpMvn, s, c);
+}
+
+// Negate (dest <- -src), src is a register, rather than a general op2.
+void
+MacroAssemblerARM::ma_neg(Register src1, Register dest, SBit s, Assembler::Condition c)
+{
+ as_rsb(dest, src1, Imm8(0), s, c);
+}
+
+// And.
+void
+MacroAssemblerARM::ma_and(Register src, Register dest, SBit s, Assembler::Condition c)
+{
+ ma_and(dest, src, dest);
+}
+
+void
+MacroAssemblerARM::ma_and(Register src1, Register src2, Register dest,
+ SBit s, Assembler::Condition c)
+{
+ as_and(dest, src1, O2Reg(src2), s, c);
+}
+
+void
+MacroAssemblerARM::ma_and(Imm32 imm, Register dest, AutoRegisterScope& scratch,
+ SBit s, Assembler::Condition c)
+{
+ ma_alu(dest, imm, dest, scratch, OpAnd, s, c);
+}
+
+void
+MacroAssemblerARM::ma_and(Imm32 imm, Register src1, Register dest, AutoRegisterScope& scratch,
+ SBit s, Assembler::Condition c)
+{
+ ma_alu(src1, imm, dest, scratch, OpAnd, s, c);
+}
+
+// Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2).
+void
+MacroAssemblerARM::ma_bic(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s, Assembler::Condition c)
+{
+ ma_alu(dest, imm, dest, scratch, OpBic, s, c);
+}
+
+// Exclusive or.
+void
+MacroAssemblerARM::ma_eor(Register src, Register dest, SBit s, Assembler::Condition c)
+{
+ ma_eor(dest, src, dest, s, c);
+}
+
+void
+MacroAssemblerARM::ma_eor(Register src1, Register src2, Register dest,
+ SBit s, Assembler::Condition c)
+{
+ as_eor(dest, src1, O2Reg(src2), s, c);
+}
+
+void
+MacroAssemblerARM::ma_eor(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s, Assembler::Condition c)
+{
+ ma_alu(dest, imm, dest, scratch, OpEor, s, c);
+}
+
+void
+MacroAssemblerARM::ma_eor(Imm32 imm, Register src1, Register dest, AutoRegisterScope& scratch,
+ SBit s, Assembler::Condition c)
+{
+ ma_alu(src1, imm, dest, scratch, OpEor, s, c);
+}
+
+// Or.
+void
+MacroAssemblerARM::ma_orr(Register src, Register dest, SBit s, Assembler::Condition c)
+{
+ ma_orr(dest, src, dest, s, c);
+}
+
+void
+MacroAssemblerARM::ma_orr(Register src1, Register src2, Register dest,
+ SBit s, Assembler::Condition c)
+{
+ as_orr(dest, src1, O2Reg(src2), s, c);
+}
+
+void
+MacroAssemblerARM::ma_orr(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s, Assembler::Condition c)
+{
+ ma_alu(dest, imm, dest, scratch, OpOrr, s, c);
+}
+
+void
+MacroAssemblerARM::ma_orr(Imm32 imm, Register src1, Register dest, AutoRegisterScope& scratch,
+ SBit s, Assembler::Condition c)
+{
+ ma_alu(src1, imm, dest, scratch, OpOrr, s, c);
+}
+
+// Arithmetic-based ops.
+// Add with carry.
+void
+MacroAssemblerARM::ma_adc(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s, Condition c)
+{
+ ma_alu(dest, imm, dest, scratch, OpAdc, s, c);
+}
+
+void
+MacroAssemblerARM::ma_adc(Register src, Register dest, SBit s, Condition c)
+{
+ as_alu(dest, dest, O2Reg(src), OpAdc, s, c);
+}
+
+void
+MacroAssemblerARM::ma_adc(Register src1, Register src2, Register dest, SBit s, Condition c)
+{
+ as_alu(dest, src1, O2Reg(src2), OpAdc, s, c);
+}
+
+// Add.
+void
+MacroAssemblerARM::ma_add(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s, Condition c)
+{
+ ma_alu(dest, imm, dest, scratch, OpAdd, s, c);
+}
+
+void
+MacroAssemblerARM::ma_add(Register src1, Register dest, SBit s, Condition c)
+{
+ ma_alu(dest, O2Reg(src1), dest, OpAdd, s, c);
+}
+
+void
+MacroAssemblerARM::ma_add(Register src1, Register src2, Register dest, SBit s, Condition c)
+{
+ as_alu(dest, src1, O2Reg(src2), OpAdd, s, c);
+}
+
+void
+MacroAssemblerARM::ma_add(Register src1, Operand op, Register dest, SBit s, Condition c)
+{
+ ma_alu(src1, op, dest, OpAdd, s, c);
+}
+
+void
+MacroAssemblerARM::ma_add(Register src1, Imm32 op, Register dest, AutoRegisterScope& scratch, SBit s, Condition c)
+{
+ ma_alu(src1, op, dest, scratch, OpAdd, s, c);
+}
+
+// Subtract with carry.
+void
+MacroAssemblerARM::ma_sbc(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s, Condition c)
+{
+ ma_alu(dest, imm, dest, scratch, OpSbc, s, c);
+}
+
+void
+MacroAssemblerARM::ma_sbc(Register src1, Register dest, SBit s, Condition c)
+{
+ as_alu(dest, dest, O2Reg(src1), OpSbc, s, c);
+}
+
+void
+MacroAssemblerARM::ma_sbc(Register src1, Register src2, Register dest, SBit s, Condition c)
+{
+ as_alu(dest, src1, O2Reg(src2), OpSbc, s, c);
+}
+
+// Subtract.
+void
+MacroAssemblerARM::ma_sub(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s, Condition c)
+{
+ ma_alu(dest, imm, dest, scratch, OpSub, s, c);
+}
+
+void
+MacroAssemblerARM::ma_sub(Register src1, Register dest, SBit s, Condition c)
+{
+ ma_alu(dest, Operand(src1), dest, OpSub, s, c);
+}
+
+void
+MacroAssemblerARM::ma_sub(Register src1, Register src2, Register dest, SBit s, Condition c)
+{
+ ma_alu(src1, Operand(src2), dest, OpSub, s, c);
+}
+
+void
+MacroAssemblerARM::ma_sub(Register src1, Operand op, Register dest, SBit s, Condition c)
+{
+ ma_alu(src1, op, dest, OpSub, s, c);
+}
+
+void
+MacroAssemblerARM::ma_sub(Register src1, Imm32 op, Register dest, AutoRegisterScope& scratch, SBit s, Condition c)
+{
+ ma_alu(src1, op, dest, scratch, OpSub, s, c);
+}
+
+// Reverse subtract.
+void
+MacroAssemblerARM::ma_rsb(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s, Condition c)
+{
+ ma_alu(dest, imm, dest, scratch, OpRsb, s, c);
+}
+
+void
+MacroAssemblerARM::ma_rsb(Register src1, Register dest, SBit s, Condition c)
+{
+ as_alu(dest, src1, O2Reg(dest), OpRsb, s, c);
+}
+
+void
+MacroAssemblerARM::ma_rsb(Register src1, Register src2, Register dest, SBit s, Condition c)
+{
+ as_alu(dest, src1, O2Reg(src2), OpRsb, s, c);
+}
+
+void
+MacroAssemblerARM::ma_rsb(Register src1, Imm32 op2, Register dest, AutoRegisterScope& scratch, SBit s, Condition c)
+{
+ ma_alu(src1, op2, dest, scratch, OpRsb, s, c);
+}
+
+// Reverse subtract with carry.
+void
+MacroAssemblerARM::ma_rsc(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s, Condition c)
+{
+ ma_alu(dest, imm, dest, scratch, OpRsc, s, c);
+}
+
+void
+MacroAssemblerARM::ma_rsc(Register src1, Register dest, SBit s, Condition c)
+{
+ as_alu(dest, dest, O2Reg(src1), OpRsc, s, c);
+}
+
+void
+MacroAssemblerARM::ma_rsc(Register src1, Register src2, Register dest, SBit s, Condition c)
+{
+ as_alu(dest, src1, O2Reg(src2), OpRsc, s, c);
+}
+
+// Compares/tests.
+// Compare negative (sets condition codes as src1 + src2 would).
+void
+MacroAssemblerARM::ma_cmn(Register src1, Imm32 imm, AutoRegisterScope& scratch, Condition c)
+{
+ ma_alu(src1, imm, InvalidReg, scratch, OpCmn, SetCC, c);
+}
+
+void
+MacroAssemblerARM::ma_cmn(Register src1, Register src2, Condition c)
+{
+ as_alu(InvalidReg, src2, O2Reg(src1), OpCmn, SetCC, c);
+}
+
+void
+MacroAssemblerARM::ma_cmn(Register src1, Operand op, Condition c)
+{
+ MOZ_CRASH("Feature NYI");
+}
+
+// Compare (src - src2).
+void
+MacroAssemblerARM::ma_cmp(Register src1, Imm32 imm, AutoRegisterScope& scratch, Condition c)
+{
+ ma_alu(src1, imm, InvalidReg, scratch, OpCmp, SetCC, c);
+}
+
+void
+MacroAssemblerARM::ma_cmp(Register src1, ImmTag tag, Condition c)
+{
+ // ImmTag comparisons can always be done without use of a scratch register.
+ Imm8 negtag = Imm8(-tag.value);
+ MOZ_ASSERT(!negtag.invalid());
+ as_cmn(src1, negtag, c);
+}
+
+void
+MacroAssemblerARM::ma_cmp(Register src1, ImmWord ptr, AutoRegisterScope& scratch, Condition c)
+{
+ ma_cmp(src1, Imm32(ptr.value), scratch, c);
+}
+
+void
+MacroAssemblerARM::ma_cmp(Register src1, ImmGCPtr ptr, AutoRegisterScope& scratch, Condition c)
+{
+ ma_mov(ptr, scratch);
+ ma_cmp(src1, scratch, c);
+}
+
+void
+MacroAssemblerARM::ma_cmp(Register src1, Operand op, AutoRegisterScope& scratch,
+ AutoRegisterScope& scratch2, Condition c)
+{
+ switch (op.tag()) {
+ case Operand::Tag::OP2:
+ as_cmp(src1, op.toOp2(), c);
+ break;
+ case Operand::Tag::MEM:
+ ma_ldr(op.toAddress(), scratch, scratch2);
+ as_cmp(src1, O2Reg(scratch), c);
+ break;
+ default:
+ MOZ_CRASH("trying to compare FP and integer registers");
+ }
+}
+
+void
+MacroAssemblerARM::ma_cmp(Register src1, Register src2, Condition c)
+{
+ as_cmp(src1, O2Reg(src2), c);
+}
+
+// Test for equality, (src1 ^ src2).
+void
+MacroAssemblerARM::ma_teq(Register src1, Imm32 imm, AutoRegisterScope& scratch, Condition c)
+{
+ ma_alu(src1, imm, InvalidReg, scratch, OpTeq, SetCC, c);
+}
+
+void
+MacroAssemblerARM::ma_teq(Register src1, Register src2, Condition c)
+{
+ as_tst(src1, O2Reg(src2), c);
+}
+
+void
+MacroAssemblerARM::ma_teq(Register src1, Operand op, Condition c)
+{
+ as_teq(src1, op.toOp2(), c);
+}
+
+// Test (src1 & src2).
+void
+MacroAssemblerARM::ma_tst(Register src1, Imm32 imm, AutoRegisterScope& scratch, Condition c)
+{
+ ma_alu(src1, imm, InvalidReg, scratch, OpTst, SetCC, c);
+}
+
+void
+MacroAssemblerARM::ma_tst(Register src1, Register src2, Condition c)
+{
+ as_tst(src1, O2Reg(src2), c);
+}
+
+void
+MacroAssemblerARM::ma_tst(Register src1, Operand op, Condition c)
+{
+ as_tst(src1, op.toOp2(), c);
+}
+
+void
+MacroAssemblerARM::ma_mul(Register src1, Register src2, Register dest)
+{
+ as_mul(dest, src1, src2);
+}
+
+void
+MacroAssemblerARM::ma_mul(Register src1, Imm32 imm, Register dest, AutoRegisterScope& scratch)
+{
+ ma_mov(imm, scratch);
+ as_mul(dest, src1, scratch);
+}
+
+Assembler::Condition
+MacroAssemblerARM::ma_check_mul(Register src1, Register src2, Register dest,
+ AutoRegisterScope& scratch, Condition cond)
+{
+ // TODO: this operation is illegal on armv6 and earlier
+ // if src2 == scratch or src2 == dest.
+ if (cond == Equal || cond == NotEqual) {
+ as_smull(scratch, dest, src1, src2, SetCC);
+ return cond;
+ }
+
+ if (cond == Overflow) {
+ as_smull(scratch, dest, src1, src2);
+ as_cmp(scratch, asr(dest, 31));
+ return NotEqual;
+ }
+
+ MOZ_CRASH("Condition NYI");
+}
+
+Assembler::Condition
+MacroAssemblerARM::ma_check_mul(Register src1, Imm32 imm, Register dest,
+ AutoRegisterScope& scratch, Condition cond)
+{
+ ma_mov(imm, scratch);
+
+ if (cond == Equal || cond == NotEqual) {
+ as_smull(scratch, dest, scratch, src1, SetCC);
+ return cond;
+ }
+
+ if (cond == Overflow) {
+ as_smull(scratch, dest, scratch, src1);
+ as_cmp(scratch, asr(dest, 31));
+ return NotEqual;
+ }
+
+ MOZ_CRASH("Condition NYI");
+}
+
+void
+MacroAssemblerARM::ma_umull(Register src1, Imm32 imm, Register destHigh, Register destLow,
+ AutoRegisterScope& scratch)
+{
+ ma_mov(imm, scratch);
+ as_umull(destHigh, destLow, src1, scratch);
+}
+
+void
+MacroAssemblerARM::ma_umull(Register src1, Register src2, Register destHigh, Register destLow)
+{
+ as_umull(destHigh, destLow, src1, src2);
+}
+
+void
+MacroAssemblerARM::ma_mod_mask(Register src, Register dest, Register hold, Register tmp,
+ AutoRegisterScope& scratch, AutoRegisterScope& scratch2, int32_t shift)
+{
+ // We wish to compute x % (1<<y) - 1 for a known constant, y.
+ //
+ // 1. Let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit dividend as
+ // a number in base b, namely c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
+ //
+ // 2. Since both addition and multiplication commute with modulus:
+ // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
+ // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
+ //
+ // 3. Since b == C + 1, b % C == 1, and b^n % C == 1 the whole thing
+ // simplifies to: c_0 + c_1 + c_2 ... c_n % C
+ //
+ // Each c_n can easily be computed by a shift/bitextract, and the modulus
+ // can be maintained by simply subtracting by C whenever the number gets
+ // over C.
+ int32_t mask = (1 << shift) - 1;
+ Label head;
+
+ // Register 'hold' holds -1 if the value was negative, 1 otherwise. The
+ // scratch reg holds the remaining bits that have not been processed lr
+ // serves as a temporary location to store extracted bits into as well as
+ // holding the trial subtraction as a temp value dest is the accumulator
+ // (and holds the final result)
+ //
+ // Move the whole value into tmp, setting the codition codes so we can muck
+ // with them later.
+ as_mov(tmp, O2Reg(src), SetCC);
+ // Zero out the dest.
+ ma_mov(Imm32(0), dest);
+ // Set the hold appropriately.
+ ma_mov(Imm32(1), hold);
+ ma_mov(Imm32(-1), hold, Signed);
+ as_rsb(tmp, tmp, Imm8(0), SetCC, Signed);
+
+ // Begin the main loop.
+ bind(&head);
+ {
+ // Extract the bottom bits.
+ ma_and(Imm32(mask), tmp, scratch, scratch2);
+ // Add those bits to the accumulator.
+ ma_add(scratch, dest, dest);
+ // Do a trial subtraction, this is the same operation as cmp, but we store
+ // the dest.
+ ma_sub(dest, Imm32(mask), scratch, scratch2, SetCC);
+ // If (sum - C) > 0, store sum - C back into sum, thus performing a modulus.
+ ma_mov(scratch, dest, LeaveCC, NotSigned);
+ // Get rid of the bits that we extracted before, and set the condition codes.
+ as_mov(tmp, lsr(tmp, shift), SetCC);
+ // If the shift produced zero, finish, otherwise, continue in the loop.
+ ma_b(&head, NonZero);
+ }
+
+ // Check the hold to see if we need to negate the result. Hold can only be
+ // 1 or -1, so this will never set the 0 flag.
+ as_cmp(hold, Imm8(0));
+ // If the hold was non-zero, negate the result to be in line with what JS
+ // wants this will set the condition codes if we try to negate.
+ as_rsb(dest, dest, Imm8(0), SetCC, Signed);
+ // Since the Zero flag is not set by the compare, we can *only* set the Zero
+ // flag in the rsb, so Zero is set iff we negated zero (e.g. the result of
+ // the computation was -0.0).
+}
+
+void
+MacroAssemblerARM::ma_smod(Register num, Register div, Register dest, AutoRegisterScope& scratch)
+{
+ as_sdiv(scratch, num, div);
+ as_mls(dest, num, scratch, div);
+}
+
+void
+MacroAssemblerARM::ma_umod(Register num, Register div, Register dest, AutoRegisterScope& scratch)
+{
+ as_udiv(scratch, num, div);
+ as_mls(dest, num, scratch, div);
+}
+
+// Division
+void
+MacroAssemblerARM::ma_sdiv(Register num, Register div, Register dest, Condition cond)
+{
+ as_sdiv(dest, num, div, cond);
+}
+
+void
+MacroAssemblerARM::ma_udiv(Register num, Register div, Register dest, Condition cond)
+{
+ as_udiv(dest, num, div, cond);
+}
+
+// Miscellaneous instructions.
+void
+MacroAssemblerARM::ma_clz(Register src, Register dest, Condition cond)
+{
+ as_clz(dest, src, cond);
+}
+
+void
+MacroAssemblerARM::ma_ctz(Register src, Register dest, AutoRegisterScope& scratch)
+{
+ // int c = __clz(a & -a);
+ // return a ? 31 - c : c;
+ as_rsb(scratch, src, Imm8(0), SetCC);
+ as_and(dest, src, O2Reg(scratch), LeaveCC);
+ as_clz(dest, dest);
+ as_rsb(dest, dest, Imm8(0x1F), LeaveCC, Assembler::NotEqual);
+}
+
+// Memory.
+// Shortcut for when we know we're transferring 32 bits of data.
+void
+MacroAssemblerARM::ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt,
+ AutoRegisterScope& scratch, Index mode, Assembler::Condition cc)
+{
+ ma_dataTransferN(ls, 32, true, rn, offset, rt, scratch, mode, cc);
+}
+
+void
+MacroAssemblerARM::ma_dtr(LoadStore ls, Register rt, const Address& addr,
+ AutoRegisterScope& scratch, Index mode, Condition cc)
+{
+ ma_dataTransferN(ls, 32, true, addr.base, Imm32(addr.offset), rt, scratch, mode, cc);
+}
+
+void
+MacroAssemblerARM::ma_str(Register rt, DTRAddr addr, Index mode, Condition cc)
+{
+ as_dtr(IsStore, 32, mode, rt, addr, cc);
+}
+
+void
+MacroAssemblerARM::ma_str(Register rt, const Address& addr, AutoRegisterScope& scratch, Index mode, Condition cc)
+{
+ ma_dtr(IsStore, rt, addr, scratch, mode, cc);
+}
+
+void
+MacroAssemblerARM::ma_strd(Register rt, DebugOnly<Register> rt2, EDtrAddr addr, Index mode, Condition cc)
+{
+ MOZ_ASSERT((rt.code() & 1) == 0);
+ MOZ_ASSERT(rt2.value.code() == rt.code() + 1);
+ as_extdtr(IsStore, 64, true, mode, rt, addr, cc);
+}
+
+void
+MacroAssemblerARM::ma_ldr(DTRAddr addr, Register rt, Index mode, Condition cc)
+{
+ as_dtr(IsLoad, 32, mode, rt, addr, cc);
+}
+
+void
+MacroAssemblerARM::ma_ldr(const Address& addr, Register rt, AutoRegisterScope& scratch, Index mode, Condition cc)
+{
+ ma_dtr(IsLoad, rt, addr, scratch, mode, cc);
+}
+
+void
+MacroAssemblerARM::ma_ldrb(DTRAddr addr, Register rt, Index mode, Condition cc)
+{
+ as_dtr(IsLoad, 8, mode, rt, addr, cc);
+}
+
+void
+MacroAssemblerARM::ma_ldrsh(EDtrAddr addr, Register rt, Index mode, Condition cc)
+{
+ as_extdtr(IsLoad, 16, true, mode, rt, addr, cc);
+}
+
+void
+MacroAssemblerARM::ma_ldrh(EDtrAddr addr, Register rt, Index mode, Condition cc)
+{
+ as_extdtr(IsLoad, 16, false, mode, rt, addr, cc);
+}
+
+void
+MacroAssemblerARM::ma_ldrsb(EDtrAddr addr, Register rt, Index mode, Condition cc)
+{
+ as_extdtr(IsLoad, 8, true, mode, rt, addr, cc);
+}
+
+void
+MacroAssemblerARM::ma_ldrd(EDtrAddr addr, Register rt, DebugOnly<Register> rt2,
+ Index mode, Condition cc)
+{
+ MOZ_ASSERT((rt.code() & 1) == 0);
+ MOZ_ASSERT(rt2.value.code() == rt.code() + 1);
+ MOZ_ASSERT(addr.maybeOffsetRegister() != rt); // Undefined behavior if rm == rt/rt2.
+ MOZ_ASSERT(addr.maybeOffsetRegister() != rt2);
+ as_extdtr(IsLoad, 64, true, mode, rt, addr, cc);
+}
+
+void
+MacroAssemblerARM::ma_strh(Register rt, EDtrAddr addr, Index mode, Condition cc)
+{
+ as_extdtr(IsStore, 16, false, mode, rt, addr, cc);
+}
+
+void
+MacroAssemblerARM::ma_strb(Register rt, DTRAddr addr, Index mode, Condition cc)
+{
+ as_dtr(IsStore, 8, mode, rt, addr, cc);
+}
+
+// Specialty for moving N bits of data, where n == 8,16,32,64.
+BufferOffset
+MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
+ Register rn, Register rm, Register rt, AutoRegisterScope& scratch,
+ Index mode, Assembler::Condition cc, Scale scale)
+{
+ MOZ_ASSERT(size == 8 || size == 16 || size == 32 || size == 64);
+
+ if (size == 32 || (size == 8 && !IsSigned))
+ return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(rm, LSL, scale)), cc);
+
+ if (scale != TimesOne) {
+ ma_lsl(Imm32(scale), rm, scratch);
+ rm = scratch;
+ }
+
+ return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(rm)), cc);
+}
+
+// No scratch register is required if scale is TimesOne.
+BufferOffset
+MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
+ Register rn, Register rm, Register rt,
+ Index mode, Assembler::Condition cc)
+{
+ MOZ_ASSERT(size == 8 || size == 16 || size == 32 || size == 64);
+ if (size == 32 || (size == 8 && !IsSigned))
+ return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(rm, LSL, TimesOne)), cc);
+ return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(rm)), cc);
+}
+
+
+BufferOffset
+MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
+ Register rn, Imm32 offset, Register rt, AutoRegisterScope& scratch,
+ Index mode, Assembler::Condition cc)
+{
+ MOZ_ASSERT(!(ls == IsLoad && mode == PostIndex && rt == pc),
+ "Large-offset PostIndex loading into PC requires special logic: see ma_popn_pc().");
+
+ int off = offset.value;
+
+ // We can encode this as a standard ldr.
+ if (size == 32 || (size == 8 && !IsSigned) ) {
+ if (off < 4096 && off > -4096) {
+ // This encodes as a single instruction, Emulating mode's behavior
+ // in a multi-instruction sequence is not necessary.
+ return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrOffImm(off)), cc);
+ }
+
+ // We cannot encode this offset in a single ldr. For mode == index,
+ // try to encode it as |add scratch, base, imm; ldr dest, [scratch, +offset]|.
+ // This does not wark for mode == PreIndex or mode == PostIndex.
+ // PreIndex is simple, just do the add into the base register first,
+ // then do a PreIndex'ed load. PostIndexed loads can be tricky.
+ // Normally, doing the load with an index of 0, then doing an add would
+ // work, but if the destination is the PC, you don't get to execute the
+ // instruction after the branch, which will lead to the base register
+ // not being updated correctly. Explicitly handle this case, without
+ // doing anything fancy, then handle all of the other cases.
+
+ // mode == Offset
+ // add scratch, base, offset_hi
+ // ldr dest, [scratch, +offset_lo]
+ //
+ // mode == PreIndex
+ // add base, base, offset_hi
+ // ldr dest, [base, +offset_lo]!
+
+ int bottom = off & 0xfff;
+ int neg_bottom = 0x1000 - bottom;
+
+ MOZ_ASSERT(rn != scratch);
+ MOZ_ASSERT(mode != PostIndex);
+
+ // At this point, both off - bottom and off + neg_bottom will be
+ // reasonable-ish quantities.
+ //
+ // Note a neg_bottom of 0x1000 can not be encoded as an immediate
+ // negative offset in the instruction and this occurs when bottom is
+ // zero, so this case is guarded against below.
+ if (off < 0) {
+ Operand2 sub_off = Imm8(-(off - bottom)); // sub_off = bottom - off
+ if (!sub_off.invalid()) {
+ // - sub_off = off - bottom
+ as_sub(scratch, rn, sub_off, LeaveCC, cc);
+ return as_dtr(ls, size, Offset, rt, DTRAddr(scratch, DtrOffImm(bottom)), cc);
+ }
+
+ // sub_off = -neg_bottom - off
+ sub_off = Imm8(-(off + neg_bottom));
+ if (!sub_off.invalid() && bottom != 0) {
+ // Guarded against by: bottom != 0
+ MOZ_ASSERT(neg_bottom < 0x1000);
+ // - sub_off = neg_bottom + off
+ as_sub(scratch, rn, sub_off, LeaveCC, cc);
+ return as_dtr(ls, size, Offset, rt, DTRAddr(scratch, DtrOffImm(-neg_bottom)), cc);
+ }
+ } else {
+ // sub_off = off - bottom
+ Operand2 sub_off = Imm8(off - bottom);
+ if (!sub_off.invalid()) {
+ // sub_off = off - bottom
+ as_add(scratch, rn, sub_off, LeaveCC, cc);
+ return as_dtr(ls, size, Offset, rt, DTRAddr(scratch, DtrOffImm(bottom)), cc);
+ }
+
+ // sub_off = neg_bottom + off
+ sub_off = Imm8(off + neg_bottom);
+ if (!sub_off.invalid() && bottom != 0) {
+ // Guarded against by: bottom != 0
+ MOZ_ASSERT(neg_bottom < 0x1000);
+ // sub_off = neg_bottom + off
+ as_add(scratch, rn, sub_off, LeaveCC, cc);
+ return as_dtr(ls, size, Offset, rt, DTRAddr(scratch, DtrOffImm(-neg_bottom)), cc);
+ }
+ }
+
+ ma_mov(offset, scratch);
+ return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(scratch, LSL, 0)));
+ } else {
+ // Should attempt to use the extended load/store instructions.
+ if (off < 256 && off > -256)
+ return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffImm(off)), cc);
+
+ // We cannot encode this offset in a single extldr. Try to encode it as
+ // an add scratch, base, imm; extldr dest, [scratch, +offset].
+ int bottom = off & 0xff;
+ int neg_bottom = 0x100 - bottom;
+ // At this point, both off - bottom and off + neg_bottom will be
+ // reasonable-ish quantities.
+ //
+ // Note a neg_bottom of 0x100 can not be encoded as an immediate
+ // negative offset in the instruction and this occurs when bottom is
+ // zero, so this case is guarded against below.
+ if (off < 0) {
+ // sub_off = bottom - off
+ Operand2 sub_off = Imm8(-(off - bottom));
+ if (!sub_off.invalid()) {
+ // - sub_off = off - bottom
+ as_sub(scratch, rn, sub_off, LeaveCC, cc);
+ return as_extdtr(ls, size, IsSigned, Offset, rt,
+ EDtrAddr(scratch, EDtrOffImm(bottom)),
+ cc);
+ }
+ // sub_off = -neg_bottom - off
+ sub_off = Imm8(-(off + neg_bottom));
+ if (!sub_off.invalid() && bottom != 0) {
+ // Guarded against by: bottom != 0
+ MOZ_ASSERT(neg_bottom < 0x100);
+ // - sub_off = neg_bottom + off
+ as_sub(scratch, rn, sub_off, LeaveCC, cc);
+ return as_extdtr(ls, size, IsSigned, Offset, rt,
+ EDtrAddr(scratch, EDtrOffImm(-neg_bottom)),
+ cc);
+ }
+ } else {
+ // sub_off = off - bottom
+ Operand2 sub_off = Imm8(off - bottom);
+ if (!sub_off.invalid()) {
+ // sub_off = off - bottom
+ as_add(scratch, rn, sub_off, LeaveCC, cc);
+ return as_extdtr(ls, size, IsSigned, Offset, rt,
+ EDtrAddr(scratch, EDtrOffImm(bottom)),
+ cc);
+ }
+ // sub_off = neg_bottom + off
+ sub_off = Imm8(off + neg_bottom);
+ if (!sub_off.invalid() && bottom != 0) {
+ // Guarded against by: bottom != 0
+ MOZ_ASSERT(neg_bottom < 0x100);
+ // sub_off = neg_bottom + off
+ as_add(scratch, rn, sub_off, LeaveCC, cc);
+ return as_extdtr(ls, size, IsSigned, Offset, rt,
+ EDtrAddr(scratch, EDtrOffImm(-neg_bottom)),
+ cc);
+ }
+ }
+ ma_mov(offset, scratch);
+ return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(scratch)), cc);
+ }
+}
+
+void
+MacroAssemblerARM::ma_pop(Register r)
+{
+ as_dtr(IsLoad, 32, PostIndex, r, DTRAddr(sp, DtrOffImm(4)));
+}
+
+void
+MacroAssemblerARM::ma_popn_pc(Imm32 n, AutoRegisterScope& scratch, AutoRegisterScope& scratch2)
+{
+ // pc <- [sp]; sp += n
+ int32_t nv = n.value;
+
+ if (nv < 4096 && nv >= -4096) {
+ as_dtr(IsLoad, 32, PostIndex, pc, DTRAddr(sp, DtrOffImm(nv)));
+ } else {
+ ma_mov(sp, scratch);
+ ma_add(Imm32(n), sp, scratch2);
+ as_dtr(IsLoad, 32, Offset, pc, DTRAddr(scratch, DtrOffImm(0)));
+ }
+}
+
+void
+MacroAssemblerARM::ma_push(Register r)
+{
+ MOZ_ASSERT(r != sp, "Use ma_push_sp().");
+ as_dtr(IsStore, 32, PreIndex, r, DTRAddr(sp, DtrOffImm(-4)));
+}
+
+void
+MacroAssemblerARM::ma_push_sp(Register r, AutoRegisterScope& scratch)
+{
+ // Pushing sp is not well-defined: use two instructions.
+ MOZ_ASSERT(r == sp);
+ ma_mov(sp, scratch);
+ as_dtr(IsStore, 32, PreIndex, scratch, DTRAddr(sp, DtrOffImm(-4)));
+}
+
+void
+MacroAssemblerARM::ma_vpop(VFPRegister r)
+{
+ startFloatTransferM(IsLoad, sp, IA, WriteBack);
+ transferFloatReg(r);
+ finishFloatTransfer();
+}
+
+void
+MacroAssemblerARM::ma_vpush(VFPRegister r)
+{
+ startFloatTransferM(IsStore, sp, DB, WriteBack);
+ transferFloatReg(r);
+ finishFloatTransfer();
+}
+
+// Barriers
+void
+MacroAssemblerARM::ma_dmb(BarrierOption option)
+{
+ if (HasDMBDSBISB())
+ as_dmb(option);
+ else
+ as_dmb_trap();
+}
+
+void
+MacroAssemblerARM::ma_dsb(BarrierOption option)
+{
+ if (HasDMBDSBISB())
+ as_dsb(option);
+ else
+ as_dsb_trap();
+}
+
+// Branches when done from within arm-specific code.
+BufferOffset
+MacroAssemblerARM::ma_b(Label* dest, Assembler::Condition c)
+{
+ return as_b(dest, c);
+}
+
+BufferOffset
+MacroAssemblerARM::ma_b(wasm::TrapDesc target, Assembler::Condition c)
+{
+ return as_b(target, c);
+}
+
+void
+MacroAssemblerARM::ma_bx(Register dest, Assembler::Condition c)
+{
+ as_bx(dest, c);
+}
+
+void
+MacroAssemblerARM::ma_b(void* target, Assembler::Condition c)
+{
+ // An immediate pool is used for easier patching.
+ as_Imm32Pool(pc, uint32_t(target), c);
+}
+
+// This is almost NEVER necessary: we'll basically never be calling a label,
+// except possibly in the crazy bailout-table case.
+void
+MacroAssemblerARM::ma_bl(Label* dest, Assembler::Condition c)
+{
+ as_bl(dest, c);
+}
+
+void
+MacroAssemblerARM::ma_blx(Register reg, Assembler::Condition c)
+{
+ as_blx(reg, c);
+}
+
+// VFP/ALU
+void
+MacroAssemblerARM::ma_vadd(FloatRegister src1, FloatRegister src2, FloatRegister dst)
+{
+ as_vadd(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2));
+}
+
+void
+MacroAssemblerARM::ma_vadd_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst)
+{
+ as_vadd(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(),
+ VFPRegister(src2).singleOverlay());
+}
+
+void
+MacroAssemblerARM::ma_vsub(FloatRegister src1, FloatRegister src2, FloatRegister dst)
+{
+ as_vsub(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2));
+}
+
+void
+MacroAssemblerARM::ma_vsub_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst)
+{
+ as_vsub(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(),
+ VFPRegister(src2).singleOverlay());
+}
+
+void
+MacroAssemblerARM::ma_vmul(FloatRegister src1, FloatRegister src2, FloatRegister dst)
+{
+ as_vmul(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2));
+}
+
+void
+MacroAssemblerARM::ma_vmul_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst)
+{
+ as_vmul(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(),
+ VFPRegister(src2).singleOverlay());
+}
+
+void
+MacroAssemblerARM::ma_vdiv(FloatRegister src1, FloatRegister src2, FloatRegister dst)
+{
+ as_vdiv(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2));
+}
+
+void
+MacroAssemblerARM::ma_vdiv_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst)
+{
+ as_vdiv(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(),
+ VFPRegister(src2).singleOverlay());
+}
+
+void
+MacroAssemblerARM::ma_vmov(FloatRegister src, FloatRegister dest, Condition cc)
+{
+ as_vmov(dest, src, cc);
+}
+
+void
+MacroAssemblerARM::ma_vmov_f32(FloatRegister src, FloatRegister dest, Condition cc)
+{
+ as_vmov(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc);
+}
+
+void
+MacroAssemblerARM::ma_vneg(FloatRegister src, FloatRegister dest, Condition cc)
+{
+ as_vneg(dest, src, cc);
+}
+
+void
+MacroAssemblerARM::ma_vneg_f32(FloatRegister src, FloatRegister dest, Condition cc)
+{
+ as_vneg(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc);
+}
+
+void
+MacroAssemblerARM::ma_vabs(FloatRegister src, FloatRegister dest, Condition cc)
+{
+ as_vabs(dest, src, cc);
+}
+
+void
+MacroAssemblerARM::ma_vabs_f32(FloatRegister src, FloatRegister dest, Condition cc)
+{
+ as_vabs(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc);
+}
+
+void
+MacroAssemblerARM::ma_vsqrt(FloatRegister src, FloatRegister dest, Condition cc)
+{
+ as_vsqrt(dest, src, cc);
+}
+
+void
+MacroAssemblerARM::ma_vsqrt_f32(FloatRegister src, FloatRegister dest, Condition cc)
+{
+ as_vsqrt(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc);
+}
+
+static inline uint32_t
+DoubleHighWord(wasm::RawF64 value)
+{
+ return static_cast<uint32_t>(value.bits() >> 32);
+}
+
+static inline uint32_t
+DoubleLowWord(wasm::RawF64 value)
+{
+ return value.bits() & uint32_t(0xffffffff);
+}
+
+void
+MacroAssemblerARM::ma_vimm(wasm::RawF64 value, FloatRegister dest, Condition cc)
+{
+ if (HasVFPv3()) {
+ if (DoubleLowWord(value) == 0) {
+ if (DoubleHighWord(value) == 0) {
+ // To zero a register, load 1.0, then execute dN <- dN - dN
+ as_vimm(dest, VFPImm::One, cc);
+ as_vsub(dest, dest, dest, cc);
+ return;
+ }
+
+ VFPImm enc(DoubleHighWord(value));
+ if (enc.isValid()) {
+ as_vimm(dest, enc, cc);
+ return;
+ }
+ }
+ }
+ // Fall back to putting the value in a pool.
+ as_FImm64Pool(dest, value, cc);
+}
+
+void
+MacroAssemblerARM::ma_vimm_f32(wasm::RawF32 value, FloatRegister dest, Condition cc)
+{
+ VFPRegister vd = VFPRegister(dest).singleOverlay();
+ if (HasVFPv3()) {
+ if (value.bits() == 0) {
+ // To zero a register, load 1.0, then execute sN <- sN - sN.
+ as_vimm(vd, VFPImm::One, cc);
+ as_vsub(vd, vd, vd, cc);
+ return;
+ }
+
+ // Note that the vimm immediate float32 instruction encoding differs
+ // from the vimm immediate double encoding, but this difference matches
+ // the difference in the floating point formats, so it is possible to
+ // convert the float32 to a double and then use the double encoding
+ // paths. It is still necessary to firstly check that the double low
+ // word is zero because some float32 numbers set these bits and this can
+ // not be ignored.
+ wasm::RawF64 doubleValue(double(value.fp()));
+ if (DoubleLowWord(doubleValue) == 0) {
+ VFPImm enc(DoubleHighWord(doubleValue));
+ if (enc.isValid()) {
+ as_vimm(vd, enc, cc);
+ return;
+ }
+ }
+ }
+
+ // Fall back to putting the value in a pool.
+ as_FImm32Pool(vd, value, cc);
+}
+
+void
+MacroAssemblerARM::ma_vcmp(FloatRegister src1, FloatRegister src2, Condition cc)
+{
+ as_vcmp(VFPRegister(src1), VFPRegister(src2), cc);
+}
+
+void
+MacroAssemblerARM::ma_vcmp_f32(FloatRegister src1, FloatRegister src2, Condition cc)
+{
+ as_vcmp(VFPRegister(src1).singleOverlay(), VFPRegister(src2).singleOverlay(), cc);
+}
+
+void
+MacroAssemblerARM::ma_vcmpz(FloatRegister src1, Condition cc)
+{
+ as_vcmpz(VFPRegister(src1), cc);
+}
+
+void
+MacroAssemblerARM::ma_vcmpz_f32(FloatRegister src1, Condition cc)
+{
+ as_vcmpz(VFPRegister(src1).singleOverlay(), cc);
+}
+
+void
+MacroAssemblerARM::ma_vcvt_F64_I32(FloatRegister src, FloatRegister dest, Condition cc)
+{
+ MOZ_ASSERT(src.isDouble());
+ MOZ_ASSERT(dest.isSInt());
+ as_vcvt(dest, src, false, cc);
+}
+
+void
+MacroAssemblerARM::ma_vcvt_F64_U32(FloatRegister src, FloatRegister dest, Condition cc)
+{
+ MOZ_ASSERT(src.isDouble());
+ MOZ_ASSERT(dest.isUInt());
+ as_vcvt(dest, src, false, cc);
+}
+
+void
+MacroAssemblerARM::ma_vcvt_I32_F64(FloatRegister src, FloatRegister dest, Condition cc)
+{
+ MOZ_ASSERT(src.isSInt());
+ MOZ_ASSERT(dest.isDouble());
+ as_vcvt(dest, src, false, cc);
+}
+
+void
+MacroAssemblerARM::ma_vcvt_U32_F64(FloatRegister src, FloatRegister dest, Condition cc)
+{
+ MOZ_ASSERT(src.isUInt());
+ MOZ_ASSERT(dest.isDouble());
+ as_vcvt(dest, src, false, cc);
+}
+
+void
+MacroAssemblerARM::ma_vcvt_F32_I32(FloatRegister src, FloatRegister dest, Condition cc)
+{
+ MOZ_ASSERT(src.isSingle());
+ MOZ_ASSERT(dest.isSInt());
+ as_vcvt(VFPRegister(dest).sintOverlay(), VFPRegister(src).singleOverlay(), false, cc);
+}
+
+void
+MacroAssemblerARM::ma_vcvt_F32_U32(FloatRegister src, FloatRegister dest, Condition cc)
+{
+ MOZ_ASSERT(src.isSingle());
+ MOZ_ASSERT(dest.isUInt());
+ as_vcvt(VFPRegister(dest).uintOverlay(), VFPRegister(src).singleOverlay(), false, cc);
+}
+
+void
+MacroAssemblerARM::ma_vcvt_I32_F32(FloatRegister src, FloatRegister dest, Condition cc)
+{
+ MOZ_ASSERT(src.isSInt());
+ MOZ_ASSERT(dest.isSingle());
+ as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src).sintOverlay(), false, cc);
+}
+
+void
+MacroAssemblerARM::ma_vcvt_U32_F32(FloatRegister src, FloatRegister dest, Condition cc)
+{
+ MOZ_ASSERT(src.isUInt());
+ MOZ_ASSERT(dest.isSingle());
+ as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src).uintOverlay(), false, cc);
+}
+
+void
+MacroAssemblerARM::ma_vxfer(FloatRegister src, Register dest, Condition cc)
+{
+ as_vxfer(dest, InvalidReg, VFPRegister(src).singleOverlay(), FloatToCore, cc);
+}
+
+void
+MacroAssemblerARM::ma_vxfer(FloatRegister src, Register dest1, Register dest2, Condition cc)
+{
+ as_vxfer(dest1, dest2, VFPRegister(src), FloatToCore, cc);
+}
+
+void
+MacroAssemblerARM::ma_vxfer(Register src, FloatRegister dest, Condition cc)
+{
+ as_vxfer(src, InvalidReg, VFPRegister(dest).singleOverlay(), CoreToFloat, cc);
+}
+
+void
+MacroAssemblerARM::ma_vxfer(Register src1, Register src2, FloatRegister dest, Condition cc)
+{
+ as_vxfer(src1, src2, VFPRegister(dest), CoreToFloat, cc);
+}
+
+BufferOffset
+MacroAssemblerARM::ma_vdtr(LoadStore ls, const Address& addr, VFPRegister rt,
+ AutoRegisterScope& scratch, Condition cc)
+{
+ int off = addr.offset;
+ MOZ_ASSERT((off & 3) == 0);
+ Register base = addr.base;
+ if (off > -1024 && off < 1024)
+ return as_vdtr(ls, rt, Operand(addr).toVFPAddr(), cc);
+
+ // We cannot encode this offset in a a single ldr. Try to encode it as an
+ // add scratch, base, imm; ldr dest, [scratch, +offset].
+ int bottom = off & (0xff << 2);
+ int neg_bottom = (0x100 << 2) - bottom;
+ // At this point, both off - bottom and off + neg_bottom will be
+ // reasonable-ish quantities.
+ //
+ // Note a neg_bottom of 0x400 can not be encoded as an immediate negative
+ // offset in the instruction and this occurs when bottom is zero, so this
+ // case is guarded against below.
+ if (off < 0) {
+ // sub_off = bottom - off
+ Operand2 sub_off = Imm8(-(off - bottom));
+ if (!sub_off.invalid()) {
+ // - sub_off = off - bottom
+ as_sub(scratch, base, sub_off, LeaveCC, cc);
+ return as_vdtr(ls, rt, VFPAddr(scratch, VFPOffImm(bottom)), cc);
+ }
+ // sub_off = -neg_bottom - off
+ sub_off = Imm8(-(off + neg_bottom));
+ if (!sub_off.invalid() && bottom != 0) {
+ // Guarded against by: bottom != 0
+ MOZ_ASSERT(neg_bottom < 0x400);
+ // - sub_off = neg_bottom + off
+ as_sub(scratch, base, sub_off, LeaveCC, cc);
+ return as_vdtr(ls, rt, VFPAddr(scratch, VFPOffImm(-neg_bottom)), cc);
+ }
+ } else {
+ // sub_off = off - bottom
+ Operand2 sub_off = Imm8(off - bottom);
+ if (!sub_off.invalid()) {
+ // sub_off = off - bottom
+ as_add(scratch, base, sub_off, LeaveCC, cc);
+ return as_vdtr(ls, rt, VFPAddr(scratch, VFPOffImm(bottom)), cc);
+ }
+ // sub_off = neg_bottom + off
+ sub_off = Imm8(off + neg_bottom);
+ if (!sub_off.invalid() && bottom != 0) {
+ // Guarded against by: bottom != 0
+ MOZ_ASSERT(neg_bottom < 0x400);
+ // sub_off = neg_bottom + off
+ as_add(scratch, base, sub_off, LeaveCC, cc);
+ return as_vdtr(ls, rt, VFPAddr(scratch, VFPOffImm(-neg_bottom)), cc);
+ }
+ }
+
+ // Safe to use scratch as dest, since ma_add() overwrites dest at the end
+ // and can't use it as internal scratch since it may also == base.
+ ma_add(base, Imm32(off), scratch, scratch, LeaveCC, cc);
+ return as_vdtr(ls, rt, VFPAddr(scratch, VFPOffImm(0)), cc);
+}
+
+BufferOffset
+MacroAssemblerARM::ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc)
+{
+ return as_vdtr(IsLoad, dest, addr, cc);
+}
+
+BufferOffset
+MacroAssemblerARM::ma_vldr(const Address& addr, VFPRegister dest, AutoRegisterScope& scratch, Condition cc)
+{
+ return ma_vdtr(IsLoad, addr, dest, scratch, cc);
+}
+
+BufferOffset
+MacroAssemblerARM::ma_vldr(VFPRegister src, Register base, Register index, AutoRegisterScope& scratch,
+ int32_t shift, Condition cc)
+{
+ as_add(scratch, base, lsl(index, shift), LeaveCC, cc);
+ return as_vdtr(IsLoad, src, Operand(Address(scratch, 0)).toVFPAddr(), cc);
+}
+
+BufferOffset
+MacroAssemblerARM::ma_vstr(VFPRegister src, VFPAddr addr, Condition cc)
+{
+ return as_vdtr(IsStore, src, addr, cc);
+}
+
+BufferOffset
+MacroAssemblerARM::ma_vstr(VFPRegister src, const Address& addr, AutoRegisterScope& scratch, Condition cc)
+{
+ return ma_vdtr(IsStore, addr, src, scratch, cc);
+}
+
+BufferOffset
+MacroAssemblerARM::ma_vstr(VFPRegister src, Register base, Register index, AutoRegisterScope& scratch,
+ AutoRegisterScope& scratch2, int32_t shift, int32_t offset, Condition cc)
+{
+ as_add(scratch, base, lsl(index, shift), LeaveCC, cc);
+ return ma_vstr(src, Address(scratch, offset), scratch2, cc);
+}
+
+// Without an offset, no second scratch register is necessary.
+BufferOffset
+MacroAssemblerARM::ma_vstr(VFPRegister src, Register base, Register index, AutoRegisterScope& scratch,
+ int32_t shift, Condition cc)
+{
+ as_add(scratch, base, lsl(index, shift), LeaveCC, cc);
+ return as_vdtr(IsStore, src, Operand(Address(scratch, 0)).toVFPAddr(), cc);
+}
+
+bool
+MacroAssemblerARMCompat::buildOOLFakeExitFrame(void* fakeReturnAddr)
+{
+ DebugOnly<uint32_t> initialDepth = asMasm().framePushed();
+ uint32_t descriptor = MakeFrameDescriptor(asMasm().framePushed(), JitFrame_IonJS,
+ ExitFrameLayout::Size());
+
+ asMasm().Push(Imm32(descriptor)); // descriptor_
+ asMasm().Push(ImmPtr(fakeReturnAddr));
+
+ return true;
+}
+
+void
+MacroAssembler::alignFrameForICArguments(AfterICSaveLive& aic)
+{
+ // Exists for MIPS compatibility.
+}
+
+void
+MacroAssembler::restoreFrameAlignmentForICArguments(AfterICSaveLive& aic)
+{
+ // Exists for MIPS compatibility.
+}
+
+void
+MacroAssemblerARMCompat::move32(Imm32 imm, Register dest)
+{
+ ma_mov(imm, dest);
+}
+
+void
+MacroAssemblerARMCompat::move32(Register src, Register dest)
+{
+ ma_mov(src, dest);
+}
+
+void
+MacroAssemblerARMCompat::movePtr(Register src, Register dest)
+{
+ ma_mov(src, dest);
+}
+
+void
+MacroAssemblerARMCompat::movePtr(ImmWord imm, Register dest)
+{
+ ma_mov(Imm32(imm.value), dest);
+}
+
+void
+MacroAssemblerARMCompat::movePtr(ImmGCPtr imm, Register dest)
+{
+ ma_mov(imm, dest);
+}
+
+void
+MacroAssemblerARMCompat::movePtr(ImmPtr imm, Register dest)
+{
+ movePtr(ImmWord(uintptr_t(imm.value)), dest);
+}
+
+void
+MacroAssemblerARMCompat::movePtr(wasm::SymbolicAddress imm, Register dest)
+{
+ append(wasm::SymbolicAccess(CodeOffset(currentOffset()), imm));
+ ma_movPatchable(Imm32(-1), dest, Always);
+}
+
+void
+MacroAssemblerARMCompat::load8ZeroExtend(const Address& address, Register dest)
+{
+ ScratchRegisterScope scratch(asMasm());
+ ma_dataTransferN(IsLoad, 8, false, address.base, Imm32(address.offset), dest, scratch);
+}
+
+void
+MacroAssemblerARMCompat::load8ZeroExtend(const BaseIndex& src, Register dest)
+{
+ Register base = src.base;
+ uint32_t scale = Imm32::ShiftOf(src.scale).value;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (src.offset == 0) {
+ ma_ldrb(DTRAddr(base, DtrRegImmShift(src.index, LSL, scale)), dest);
+ } else {
+ ma_add(base, Imm32(src.offset), scratch, scratch2);
+ ma_ldrb(DTRAddr(scratch, DtrRegImmShift(src.index, LSL, scale)), dest);
+ }
+}
+
+void
+MacroAssemblerARMCompat::load8SignExtend(const Address& address, Register dest)
+{
+ ScratchRegisterScope scratch(asMasm());
+ ma_dataTransferN(IsLoad, 8, true, address.base, Imm32(address.offset), dest, scratch);
+}
+
+void
+MacroAssemblerARMCompat::load8SignExtend(const BaseIndex& src, Register dest)
+{
+ Register index = src.index;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ // ARMv7 does not have LSL on an index register with an extended load.
+ if (src.scale != TimesOne) {
+ ma_lsl(Imm32::ShiftOf(src.scale), index, scratch);
+ index = scratch;
+ }
+
+ if (src.offset != 0) {
+ if (index != scratch) {
+ ma_mov(index, scratch);
+ index = scratch;
+ }
+ ma_add(Imm32(src.offset), index, scratch2);
+ }
+ ma_ldrsb(EDtrAddr(src.base, EDtrOffReg(index)), dest);
+}
+
+void
+MacroAssemblerARMCompat::load16ZeroExtend(const Address& address, Register dest)
+{
+ ScratchRegisterScope scratch(asMasm());
+ ma_dataTransferN(IsLoad, 16, false, address.base, Imm32(address.offset), dest, scratch);
+}
+
+void
+MacroAssemblerARMCompat::load16ZeroExtend(const BaseIndex& src, Register dest)
+{
+ Register index = src.index;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ // ARMv7 does not have LSL on an index register with an extended load.
+ if (src.scale != TimesOne) {
+ ma_lsl(Imm32::ShiftOf(src.scale), index, scratch);
+ index = scratch;
+ }
+
+ if (src.offset != 0) {
+ if (index != scratch) {
+ ma_mov(index, scratch);
+ index = scratch;
+ }
+ ma_add(Imm32(src.offset), index, scratch2);
+ }
+ ma_ldrh(EDtrAddr(src.base, EDtrOffReg(index)), dest);
+}
+
+void
+MacroAssemblerARMCompat::load16SignExtend(const Address& address, Register dest)
+{
+ ScratchRegisterScope scratch(asMasm());
+ ma_dataTransferN(IsLoad, 16, true, address.base, Imm32(address.offset), dest, scratch);
+}
+
+void
+MacroAssemblerARMCompat::load16SignExtend(const BaseIndex& src, Register dest)
+{
+ Register index = src.index;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ // We don't have LSL on index register yet.
+ if (src.scale != TimesOne) {
+ ma_lsl(Imm32::ShiftOf(src.scale), index, scratch);
+ index = scratch;
+ }
+
+ if (src.offset != 0) {
+ if (index != scratch) {
+ ma_mov(index, scratch);
+ index = scratch;
+ }
+ ma_add(Imm32(src.offset), index, scratch2);
+ }
+ ma_ldrsh(EDtrAddr(src.base, EDtrOffReg(index)), dest);
+}
+
+void
+MacroAssemblerARMCompat::load32(const Address& address, Register dest)
+{
+ loadPtr(address, dest);
+}
+
+void
+MacroAssemblerARMCompat::load32(const BaseIndex& address, Register dest)
+{
+ loadPtr(address, dest);
+}
+
+void
+MacroAssemblerARMCompat::load32(AbsoluteAddress address, Register dest)
+{
+ loadPtr(address, dest);
+}
+
+void
+MacroAssemblerARMCompat::loadPtr(const Address& address, Register dest)
+{
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(address, dest, scratch);
+}
+
+void
+MacroAssemblerARMCompat::loadPtr(const BaseIndex& src, Register dest)
+{
+ Register base = src.base;
+ uint32_t scale = Imm32::ShiftOf(src.scale).value;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (src.offset != 0) {
+ ma_add(base, Imm32(src.offset), scratch, scratch2);
+ ma_ldr(DTRAddr(scratch, DtrRegImmShift(src.index, LSL, scale)), dest);
+ } else {
+ ma_ldr(DTRAddr(base, DtrRegImmShift(src.index, LSL, scale)), dest);
+ }
+}
+
+void
+MacroAssemblerARMCompat::loadPtr(AbsoluteAddress address, Register dest)
+{
+ MOZ_ASSERT(dest != pc); // Use dest as a scratch register.
+ movePtr(ImmWord(uintptr_t(address.addr)), dest);
+ loadPtr(Address(dest, 0), dest);
+}
+
+void
+MacroAssemblerARMCompat::loadPtr(wasm::SymbolicAddress address, Register dest)
+{
+ MOZ_ASSERT(dest != pc); // Use dest as a scratch register.
+ movePtr(address, dest);
+ loadPtr(Address(dest, 0), dest);
+}
+
+void
+MacroAssemblerARMCompat::loadPrivate(const Address& address, Register dest)
+{
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(ToPayload(address), dest, scratch);
+}
+
+void
+MacroAssemblerARMCompat::loadDouble(const Address& address, FloatRegister dest)
+{
+ ScratchRegisterScope scratch(asMasm());
+ ma_vldr(address, dest, scratch);
+}
+
+void
+MacroAssemblerARMCompat::loadDouble(const BaseIndex& src, FloatRegister dest)
+{
+ // VFP instructions don't even support register Base + register Index modes,
+ // so just add the index, then handle the offset like normal.
+ Register base = src.base;
+ Register index = src.index;
+ uint32_t scale = Imm32::ShiftOf(src.scale).value;
+ int32_t offset = src.offset;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ as_add(scratch, base, lsl(index, scale));
+ ma_vldr(Address(scratch, offset), dest, scratch2);
+}
+
+void
+MacroAssemblerARMCompat::loadFloatAsDouble(const Address& address, FloatRegister dest)
+{
+ ScratchRegisterScope scratch(asMasm());
+
+ VFPRegister rt = dest;
+ ma_vldr(address, rt.singleOverlay(), scratch);
+ as_vcvt(rt, rt.singleOverlay());
+}
+
+void
+MacroAssemblerARMCompat::loadFloatAsDouble(const BaseIndex& src, FloatRegister dest)
+{
+ // VFP instructions don't even support register Base + register Index modes,
+ // so just add the index, then handle the offset like normal.
+ Register base = src.base;
+ Register index = src.index;
+ uint32_t scale = Imm32::ShiftOf(src.scale).value;
+ int32_t offset = src.offset;
+ VFPRegister rt = dest;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ as_add(scratch, base, lsl(index, scale));
+ ma_vldr(Address(scratch, offset), rt.singleOverlay(), scratch2);
+ as_vcvt(rt, rt.singleOverlay());
+}
+
+void
+MacroAssemblerARMCompat::loadFloat32(const Address& address, FloatRegister dest)
+{
+ ScratchRegisterScope scratch(asMasm());
+ ma_vldr(address, VFPRegister(dest).singleOverlay(), scratch);
+}
+
+void
+MacroAssemblerARMCompat::loadFloat32(const BaseIndex& src, FloatRegister dest)
+{
+ // VFP instructions don't even support register Base + register Index modes,
+ // so just add the index, then handle the offset like normal.
+ Register base = src.base;
+ Register index = src.index;
+ uint32_t scale = Imm32::ShiftOf(src.scale).value;
+ int32_t offset = src.offset;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ as_add(scratch, base, lsl(index, scale));
+ ma_vldr(Address(scratch, offset), VFPRegister(dest).singleOverlay(), scratch2);
+}
+
+void
+MacroAssemblerARMCompat::store8(Imm32 imm, const Address& address)
+{
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_mov(imm, scratch2);
+ store8(scratch2, address);
+}
+
+void
+MacroAssemblerARMCompat::store8(Register src, const Address& address)
+{
+ ScratchRegisterScope scratch(asMasm());
+ ma_dataTransferN(IsStore, 8, false, address.base, Imm32(address.offset), src, scratch);
+}
+
+void
+MacroAssemblerARMCompat::store8(Imm32 imm, const BaseIndex& dest)
+{
+ Register base = dest.base;
+ uint32_t scale = Imm32::ShiftOf(dest.scale).value;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (dest.offset != 0) {
+ ma_add(base, Imm32(dest.offset), scratch, scratch2);
+ ma_mov(imm, scratch2);
+ ma_strb(scratch2, DTRAddr(scratch, DtrRegImmShift(dest.index, LSL, scale)));
+ } else {
+ ma_mov(imm, scratch2);
+ ma_strb(scratch2, DTRAddr(base, DtrRegImmShift(dest.index, LSL, scale)));
+ }
+}
+
+void
+MacroAssemblerARMCompat::store8(Register src, const BaseIndex& dest)
+{
+ Register base = dest.base;
+ uint32_t scale = Imm32::ShiftOf(dest.scale).value;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (dest.offset != 0) {
+ ma_add(base, Imm32(dest.offset), scratch, scratch2);
+ ma_strb(src, DTRAddr(scratch, DtrRegImmShift(dest.index, LSL, scale)));
+ } else {
+ ma_strb(src, DTRAddr(base, DtrRegImmShift(dest.index, LSL, scale)));
+ }
+}
+
+void
+MacroAssemblerARMCompat::store16(Imm32 imm, const Address& address)
+{
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_mov(imm, scratch2);
+ store16(scratch2, address);
+}
+
+void
+MacroAssemblerARMCompat::store16(Register src, const Address& address)
+{
+ ScratchRegisterScope scratch(asMasm());
+ ma_dataTransferN(IsStore, 16, false, address.base, Imm32(address.offset), src, scratch);
+}
+
+void
+MacroAssemblerARMCompat::store16(Imm32 imm, const BaseIndex& dest)
+{
+ Register index = dest.index;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ // We don't have LSL on index register yet.
+ if (dest.scale != TimesOne) {
+ ma_lsl(Imm32::ShiftOf(dest.scale), index, scratch);
+ index = scratch;
+ }
+
+ if (dest.offset != 0) {
+ ma_add(index, Imm32(dest.offset), scratch, scratch2);
+ index = scratch;
+ }
+
+ ma_mov(imm, scratch2);
+ ma_strh(scratch2, EDtrAddr(dest.base, EDtrOffReg(index)));
+}
+
+void
+MacroAssemblerARMCompat::store16(Register src, const BaseIndex& address)
+{
+ Register index = address.index;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ // We don't have LSL on index register yet.
+ if (address.scale != TimesOne) {
+ ma_lsl(Imm32::ShiftOf(address.scale), index, scratch);
+ index = scratch;
+ }
+
+ if (address.offset != 0) {
+ ma_add(index, Imm32(address.offset), scratch, scratch2);
+ index = scratch;
+ }
+ ma_strh(src, EDtrAddr(address.base, EDtrOffReg(index)));
+}
+
+void
+MacroAssemblerARMCompat::store32(Register src, AbsoluteAddress address)
+{
+ storePtr(src, address);
+}
+
+void
+MacroAssemblerARMCompat::store32(Register src, const Address& address)
+{
+ storePtr(src, address);
+}
+
+void
+MacroAssemblerARMCompat::store32(Imm32 src, const Address& address)
+{
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ move32(src, scratch);
+ ma_str(scratch, address, scratch2);
+}
+
+void
+MacroAssemblerARMCompat::store32(Imm32 imm, const BaseIndex& dest)
+{
+ Register base = dest.base;
+ uint32_t scale = Imm32::ShiftOf(dest.scale).value;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (dest.offset != 0) {
+ ma_add(base, Imm32(dest.offset), scratch, scratch2);
+ ma_mov(imm, scratch2);
+ ma_str(scratch2, DTRAddr(scratch, DtrRegImmShift(dest.index, LSL, scale)));
+ } else {
+ ma_mov(imm, scratch);
+ ma_str(scratch, DTRAddr(base, DtrRegImmShift(dest.index, LSL, scale)));
+ }
+
+}
+
+void
+MacroAssemblerARMCompat::store32(Register src, const BaseIndex& dest)
+{
+ Register base = dest.base;
+ uint32_t scale = Imm32::ShiftOf(dest.scale).value;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (dest.offset != 0) {
+ ma_add(base, Imm32(dest.offset), scratch, scratch2);
+ ma_str(src, DTRAddr(scratch, DtrRegImmShift(dest.index, LSL, scale)));
+ } else {
+ ma_str(src, DTRAddr(base, DtrRegImmShift(dest.index, LSL, scale)));
+ }
+}
+
+void
+MacroAssemblerARMCompat::storePtr(ImmWord imm, const Address& address)
+{
+ store32(Imm32(imm.value), address);
+}
+
+void
+MacroAssemblerARMCompat::storePtr(ImmWord imm, const BaseIndex& address)
+{
+ store32(Imm32(imm.value), address);
+}
+
+void
+MacroAssemblerARMCompat::storePtr(ImmPtr imm, const Address& address)
+{
+ store32(Imm32(uintptr_t(imm.value)), address);
+}
+
+void
+MacroAssemblerARMCompat::storePtr(ImmPtr imm, const BaseIndex& address)
+{
+ store32(Imm32(uintptr_t(imm.value)), address);
+}
+
+void
+MacroAssemblerARMCompat::storePtr(ImmGCPtr imm, const Address& address)
+{
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_mov(imm, scratch);
+ ma_str(scratch, address, scratch2);
+}
+
+void
+MacroAssemblerARMCompat::storePtr(ImmGCPtr imm, const BaseIndex& address)
+{
+ Register base = address.base;
+ uint32_t scale = Imm32::ShiftOf(address.scale).value;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (address.offset != 0) {
+ ma_add(base, Imm32(address.offset), scratch, scratch2);
+ ma_mov(imm, scratch2);
+ ma_str(scratch2, DTRAddr(scratch, DtrRegImmShift(address.index, LSL, scale)));
+ } else {
+ ma_mov(imm, scratch);
+ ma_str(scratch, DTRAddr(base, DtrRegImmShift(address.index, LSL, scale)));
+ }
+}
+
+void
+MacroAssemblerARMCompat::storePtr(Register src, const Address& address)
+{
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_str(src, address, scratch2);
+}
+
+void
+MacroAssemblerARMCompat::storePtr(Register src, const BaseIndex& address)
+{
+ store32(src, address);
+}
+
+void
+MacroAssemblerARMCompat::storePtr(Register src, AbsoluteAddress dest)
+{
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(ImmWord(uintptr_t(dest.addr)), scratch);
+ ma_str(src, DTRAddr(scratch, DtrOffImm(0)));
+}
+
+// Note: this function clobbers the input register.
+void
+MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
+{
+ if (HasVFPv3()) {
+ Label notSplit;
+ {
+ ScratchDoubleScope scratchDouble(*this);
+ MOZ_ASSERT(input != scratchDouble);
+ loadConstantDouble(0.5, scratchDouble);
+
+ ma_vadd(input, scratchDouble, scratchDouble);
+ // Convert the double into an unsigned fixed point value with 24 bits of
+ // precision. The resulting number will look like 0xII.DDDDDD
+ as_vcvtFixed(scratchDouble, false, 24, true);
+ }
+
+ // Move the fixed point value into an integer register.
+ {
+ ScratchFloat32Scope scratchFloat(*this);
+ as_vxfer(output, InvalidReg, scratchFloat.uintOverlay(), FloatToCore);
+ }
+
+ ScratchRegisterScope scratch(*this);
+
+ // See if this value *might* have been an exact integer after adding
+ // 0.5. This tests the 1/2 through 1/16,777,216th places, but 0.5 needs
+ // to be tested out to the 1/140,737,488,355,328th place.
+ ma_tst(output, Imm32(0x00ffffff), scratch);
+ // Convert to a uint8 by shifting out all of the fraction bits.
+ ma_lsr(Imm32(24), output, output);
+ // If any of the bottom 24 bits were non-zero, then we're good, since
+ // this number can't be exactly XX.0
+ ma_b(&notSplit, NonZero);
+ as_vxfer(scratch, InvalidReg, input, FloatToCore);
+ as_cmp(scratch, Imm8(0));
+ // If the lower 32 bits of the double were 0, then this was an exact number,
+ // and it should be even.
+ as_bic(output, output, Imm8(1), LeaveCC, Zero);
+ bind(&notSplit);
+ } else {
+ ScratchDoubleScope scratchDouble(*this);
+ MOZ_ASSERT(input != scratchDouble);
+ loadConstantDouble(0.5, scratchDouble);
+
+ Label outOfRange;
+ ma_vcmpz(input);
+ // Do the add, in place so we can reference it later.
+ ma_vadd(input, scratchDouble, input);
+ // Do the conversion to an integer.
+ as_vcvt(VFPRegister(scratchDouble).uintOverlay(), VFPRegister(input));
+ // Copy the converted value out.
+ as_vxfer(output, InvalidReg, scratchDouble, FloatToCore);
+ as_vmrs(pc);
+ ma_mov(Imm32(0), output, Overflow); // NaN => 0
+ ma_b(&outOfRange, Overflow); // NaN
+ as_cmp(output, Imm8(0xff));
+ ma_mov(Imm32(0xff), output, Above);
+ ma_b(&outOfRange, Above);
+ // Convert it back to see if we got the same value back.
+ as_vcvt(scratchDouble, VFPRegister(scratchDouble).uintOverlay());
+ // Do the check.
+ as_vcmp(scratchDouble, input);
+ as_vmrs(pc);
+ as_bic(output, output, Imm8(1), LeaveCC, Zero);
+ bind(&outOfRange);
+ }
+}
+
+void
+MacroAssemblerARMCompat::cmp32(Register lhs, Imm32 rhs)
+{
+ ScratchRegisterScope scratch(asMasm());
+ ma_cmp(lhs, rhs, scratch);
+}
+
+void
+MacroAssemblerARMCompat::cmp32(Register lhs, Register rhs)
+{
+ ma_cmp(lhs, rhs);
+}
+
+void
+MacroAssemblerARMCompat::cmpPtr(Register lhs, ImmWord rhs)
+{
+ cmp32(lhs, Imm32(rhs.value));
+}
+
+void
+MacroAssemblerARMCompat::cmpPtr(Register lhs, ImmPtr rhs)
+{
+ cmpPtr(lhs, ImmWord(uintptr_t(rhs.value)));
+}
+
+void
+MacroAssemblerARMCompat::cmpPtr(Register lhs, Register rhs)
+{
+ ma_cmp(lhs, rhs);
+}
+
+void
+MacroAssemblerARMCompat::cmpPtr(Register lhs, ImmGCPtr rhs)
+{
+ ScratchRegisterScope scratch(asMasm());
+ ma_cmp(lhs, rhs, scratch);
+}
+
+void
+MacroAssemblerARMCompat::cmpPtr(Register lhs, Imm32 rhs)
+{
+ cmp32(lhs, rhs);
+}
+
+void
+MacroAssemblerARMCompat::cmpPtr(const Address& lhs, Register rhs)
+{
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(lhs, scratch, scratch2);
+ ma_cmp(scratch, rhs);
+}
+
+void
+MacroAssemblerARMCompat::cmpPtr(const Address& lhs, ImmWord rhs)
+{
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(lhs, scratch, scratch2);
+ ma_cmp(scratch, Imm32(rhs.value), scratch2);
+}
+
+void
+MacroAssemblerARMCompat::cmpPtr(const Address& lhs, ImmPtr rhs)
+{
+ cmpPtr(lhs, ImmWord(uintptr_t(rhs.value)));
+}
+
+void
+MacroAssemblerARMCompat::cmpPtr(const Address& lhs, ImmGCPtr rhs)
+{
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(lhs, scratch, scratch2);
+ ma_cmp(scratch, rhs, scratch2);
+}
+
+void
+MacroAssemblerARMCompat::cmpPtr(const Address& lhs, Imm32 rhs)
+{
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(lhs, scratch, scratch2);
+ ma_cmp(scratch, rhs, scratch2);
+}
+
+void
+MacroAssemblerARMCompat::setStackArg(Register reg, uint32_t arg)
+{
+ ScratchRegisterScope scratch(asMasm());
+ ma_dataTransferN(IsStore, 32, true, sp, Imm32(arg * sizeof(intptr_t)), reg, scratch);
+}
+
+void
+MacroAssemblerARMCompat::minMaxDouble(FloatRegister srcDest, FloatRegister second, bool canBeNaN,
+ bool isMax)
+{
+ FloatRegister first = srcDest;
+
+ Label nan, equal, returnSecond, done;
+
+ Assembler::Condition cond = isMax
+ ? Assembler::VFP_LessThanOrEqual
+ : Assembler::VFP_GreaterThanOrEqual;
+
+ compareDouble(first, second);
+ // First or second is NaN, result is NaN.
+ ma_b(&nan, Assembler::VFP_Unordered);
+ // Make sure we handle -0 and 0 right.
+ ma_b(&equal, Assembler::VFP_Equal);
+ ma_b(&returnSecond, cond);
+ ma_b(&done);
+
+ // Check for zero.
+ bind(&equal);
+ compareDouble(first, NoVFPRegister);
+ // First wasn't 0 or -0, so just return it.
+ ma_b(&done, Assembler::VFP_NotEqualOrUnordered);
+ // So now both operands are either -0 or 0.
+ if (isMax) {
+ // -0 + -0 = -0 and -0 + 0 = 0.
+ ma_vadd(second, first, first);
+ } else {
+ ma_vneg(first, first);
+ ma_vsub(first, second, first);
+ ma_vneg(first, first);
+ }
+ ma_b(&done);
+
+ bind(&nan);
+ // If the first argument is the NaN, return it; otherwise return the second
+ // operand.
+ compareDouble(first, first);
+ ma_vmov(first, srcDest, Assembler::VFP_Unordered);
+ ma_b(&done, Assembler::VFP_Unordered);
+
+ bind(&returnSecond);
+ ma_vmov(second, srcDest);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerARMCompat::minMaxFloat32(FloatRegister srcDest, FloatRegister second, bool canBeNaN,
+ bool isMax)
+{
+ FloatRegister first = srcDest;
+
+ Label nan, equal, returnSecond, done;
+
+ Assembler::Condition cond = isMax
+ ? Assembler::VFP_LessThanOrEqual
+ : Assembler::VFP_GreaterThanOrEqual;
+
+ compareFloat(first, second);
+ // First or second is NaN, result is NaN.
+ ma_b(&nan, Assembler::VFP_Unordered);
+ // Make sure we handle -0 and 0 right.
+ ma_b(&equal, Assembler::VFP_Equal);
+ ma_b(&returnSecond, cond);
+ ma_b(&done);
+
+ // Check for zero.
+ bind(&equal);
+ compareFloat(first, NoVFPRegister);
+ // First wasn't 0 or -0, so just return it.
+ ma_b(&done, Assembler::VFP_NotEqualOrUnordered);
+ // So now both operands are either -0 or 0.
+ if (isMax) {
+ // -0 + -0 = -0 and -0 + 0 = 0.
+ ma_vadd_f32(second, first, first);
+ } else {
+ ma_vneg_f32(first, first);
+ ma_vsub_f32(first, second, first);
+ ma_vneg_f32(first, first);
+ }
+ ma_b(&done);
+
+ bind(&nan);
+ // See comment in minMaxDouble.
+ compareFloat(first, first);
+ ma_vmov_f32(first, srcDest, Assembler::VFP_Unordered);
+ ma_b(&done, Assembler::VFP_Unordered);
+
+ bind(&returnSecond);
+ ma_vmov_f32(second, srcDest);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerARMCompat::compareDouble(FloatRegister lhs, FloatRegister rhs)
+{
+ // Compare the doubles, setting vector status flags.
+ if (rhs.isMissing())
+ ma_vcmpz(lhs);
+ else
+ ma_vcmp(lhs, rhs);
+
+ // Move vector status bits to normal status flags.
+ as_vmrs(pc);
+}
+
+void
+MacroAssemblerARMCompat::compareFloat(FloatRegister lhs, FloatRegister rhs)
+{
+ // Compare the doubles, setting vector status flags.
+ if (rhs.isMissing())
+ as_vcmpz(VFPRegister(lhs).singleOverlay());
+ else
+ as_vcmp(VFPRegister(lhs).singleOverlay(), VFPRegister(rhs).singleOverlay());
+
+ // Move vector status bits to normal status flags.
+ as_vmrs(pc);
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testInt32(Assembler::Condition cond, const ValueOperand& value)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_INT32));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testBoolean(Assembler::Condition cond, const ValueOperand& value)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testDouble(Assembler::Condition cond, const ValueOperand& value)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ Assembler::Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ ScratchRegisterScope scratch(asMasm());
+ ma_cmp(value.typeReg(), ImmTag(JSVAL_TAG_CLEAR), scratch);
+ return actual;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testNull(Assembler::Condition cond, const ValueOperand& value)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_NULL));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testUndefined(Assembler::Condition cond, const ValueOperand& value)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_UNDEFINED));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testString(Assembler::Condition cond, const ValueOperand& value)
+{
+ return testString(cond, value.typeReg());
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testSymbol(Assembler::Condition cond, const ValueOperand& value)
+{
+ return testSymbol(cond, value.typeReg());
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testObject(Assembler::Condition cond, const ValueOperand& value)
+{
+ return testObject(cond, value.typeReg());
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testNumber(Assembler::Condition cond, const ValueOperand& value)
+{
+ return testNumber(cond, value.typeReg());
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testMagic(Assembler::Condition cond, const ValueOperand& value)
+{
+ return testMagic(cond, value.typeReg());
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testPrimitive(Assembler::Condition cond, const ValueOperand& value)
+{
+ return testPrimitive(cond, value.typeReg());
+}
+
+// Register-based tests.
+Assembler::Condition
+MacroAssemblerARMCompat::testInt32(Assembler::Condition cond, Register tag)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_INT32));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testBoolean(Assembler::Condition cond, Register tag)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testNull(Assembler::Condition cond, Register tag)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_NULL));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testUndefined(Assembler::Condition cond, Register tag)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testString(Assembler::Condition cond, Register tag)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_STRING));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testSymbol(Assembler::Condition cond, Register tag)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_SYMBOL));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testObject(Assembler::Condition cond, Register tag)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testMagic(Assembler::Condition cond, Register tag)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testPrimitive(Assembler::Condition cond, Register tag)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET));
+ return cond == Equal ? Below : AboveOrEqual;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testGCThing(Assembler::Condition cond, const Address& address)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(address, scratch);
+ ma_cmp(scratch, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET));
+ return cond == Equal ? AboveOrEqual : Below;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testMagic(Assembler::Condition cond, const Address& address)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(address, scratch);
+ ma_cmp(scratch, ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testInt32(Assembler::Condition cond, const Address& address)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(address, scratch);
+ ma_cmp(scratch, ImmTag(JSVAL_TAG_INT32));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testDouble(Condition cond, const Address& address)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(address, scratch);
+ return testDouble(cond, scratch);
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testBoolean(Condition cond, const Address& address)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(address, scratch);
+ return testBoolean(cond, scratch);
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testNull(Condition cond, const Address& address)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(address, scratch);
+ return testNull(cond, scratch);
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testUndefined(Condition cond, const Address& address)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(address, scratch);
+ return testUndefined(cond, scratch);
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testString(Condition cond, const Address& address)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(address, scratch);
+ return testString(cond, scratch);
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testSymbol(Condition cond, const Address& address)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(address, scratch);
+ return testSymbol(cond, scratch);
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testObject(Condition cond, const Address& address)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(address, scratch);
+ return testObject(cond, scratch);
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testNumber(Condition cond, const Address& address)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(address, scratch);
+ return testNumber(cond, scratch);
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testDouble(Condition cond, Register tag)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ ma_cmp(tag, ImmTag(JSVAL_TAG_CLEAR));
+ return actual;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testNumber(Condition cond, Register tag)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET));
+ return cond == Equal ? BelowOrEqual : Above;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testUndefined(Condition cond, const BaseIndex& src)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(src, scratch);
+ ma_cmp(scratch, ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testNull(Condition cond, const BaseIndex& src)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(src, scratch);
+ ma_cmp(scratch, ImmTag(JSVAL_TAG_NULL));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testBoolean(Condition cond, const BaseIndex& src)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(src, scratch);
+ ma_cmp(scratch, ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testString(Condition cond, const BaseIndex& src)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(src, scratch);
+ ma_cmp(scratch, ImmTag(JSVAL_TAG_STRING));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testSymbol(Condition cond, const BaseIndex& src)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(src, scratch);
+ ma_cmp(scratch, ImmTag(JSVAL_TAG_SYMBOL));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testInt32(Condition cond, const BaseIndex& src)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(src, scratch);
+ ma_cmp(scratch, ImmTag(JSVAL_TAG_INT32));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testObject(Condition cond, const BaseIndex& src)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(src, scratch);
+ ma_cmp(scratch, ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testDouble(Condition cond, const BaseIndex& src)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Assembler::Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(src, scratch);
+ ma_cmp(scratch, ImmTag(JSVAL_TAG_CLEAR));
+ return actual;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testMagic(Condition cond, const BaseIndex& address)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(address, scratch);
+ ma_cmp(scratch, ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testGCThing(Condition cond, const BaseIndex& address)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ extractTag(address, scratch);
+ ma_cmp(scratch, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET));
+ return cond == Equal ? AboveOrEqual : Below;
+}
+
+// Unboxing code.
+void
+MacroAssemblerARMCompat::unboxNonDouble(const ValueOperand& operand, Register dest)
+{
+ if (operand.payloadReg() != dest)
+ ma_mov(operand.payloadReg(), dest);
+}
+
+void
+MacroAssemblerARMCompat::unboxNonDouble(const Address& src, Register dest)
+{
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(ToPayload(src), dest, scratch);
+}
+
+void
+MacroAssemblerARMCompat::unboxNonDouble(const BaseIndex& src, Register dest)
+{
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_alu(src.base, lsl(src.index, src.scale), scratch, OpAdd);
+ ma_ldr(Address(scratch, src.offset), dest, scratch2);
+}
+
+void
+MacroAssemblerARMCompat::unboxDouble(const ValueOperand& operand, FloatRegister dest)
+{
+ MOZ_ASSERT(dest.isDouble());
+ as_vxfer(operand.payloadReg(), operand.typeReg(),
+ VFPRegister(dest), CoreToFloat);
+}
+
+void
+MacroAssemblerARMCompat::unboxDouble(const Address& src, FloatRegister dest)
+{
+ MOZ_ASSERT(dest.isDouble());
+ ScratchRegisterScope scratch(asMasm());
+ ma_vldr(src, dest, scratch);
+}
+
+void
+MacroAssemblerARMCompat::unboxValue(const ValueOperand& src, AnyRegister dest)
+{
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.payloadReg(), dest.fpu());
+ ma_b(&end);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else if (src.payloadReg() != dest.gpr()) {
+ as_mov(dest.gpr(), O2Reg(src.payloadReg()));
+ }
+}
+
+void
+MacroAssemblerARMCompat::unboxPrivate(const ValueOperand& src, Register dest)
+{
+ ma_mov(src.payloadReg(), dest);
+}
+
+void
+MacroAssemblerARMCompat::boxDouble(FloatRegister src, const ValueOperand& dest)
+{
+ as_vxfer(dest.payloadReg(), dest.typeReg(), VFPRegister(src), FloatToCore);
+}
+
+void
+MacroAssemblerARMCompat::boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) {
+ if (src != dest.payloadReg())
+ ma_mov(src, dest.payloadReg());
+ ma_mov(ImmType(type), dest.typeReg());
+}
+
+void
+MacroAssemblerARMCompat::boolValueToDouble(const ValueOperand& operand, FloatRegister dest)
+{
+ VFPRegister d = VFPRegister(dest);
+ loadConstantDouble(1.0, dest);
+ as_cmp(operand.payloadReg(), Imm8(0));
+ // If the source is 0, then subtract the dest from itself, producing 0.
+ as_vsub(d, d, d, Equal);
+}
+
+void
+MacroAssemblerARMCompat::int32ValueToDouble(const ValueOperand& operand, FloatRegister dest)
+{
+ VFPRegister vfpdest = VFPRegister(dest);
+ ScratchFloat32Scope scratch(asMasm());
+
+ // Transfer the integral value to a floating point register.
+ as_vxfer(operand.payloadReg(), InvalidReg, scratch.sintOverlay(), CoreToFloat);
+ // Convert the value to a double.
+ as_vcvt(vfpdest, scratch.sintOverlay());
+}
+
+void
+MacroAssemblerARMCompat::boolValueToFloat32(const ValueOperand& operand, FloatRegister dest)
+{
+ VFPRegister d = VFPRegister(dest).singleOverlay();
+ loadConstantFloat32(1.0, dest);
+ as_cmp(operand.payloadReg(), Imm8(0));
+ // If the source is 0, then subtract the dest from itself, producing 0.
+ as_vsub(d, d, d, Equal);
+}
+
+void
+MacroAssemblerARMCompat::int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest)
+{
+ // Transfer the integral value to a floating point register.
+ VFPRegister vfpdest = VFPRegister(dest).singleOverlay();
+ as_vxfer(operand.payloadReg(), InvalidReg,
+ vfpdest.sintOverlay(), CoreToFloat);
+ // Convert the value to a float.
+ as_vcvt(vfpdest, vfpdest.sintOverlay());
+}
+
+void
+MacroAssemblerARMCompat::loadConstantFloat32(float f, FloatRegister dest)
+{
+ loadConstantFloat32(wasm::RawF32(f), dest);
+}
+
+void
+MacroAssemblerARMCompat::loadConstantFloat32(wasm::RawF32 f, FloatRegister dest)
+{
+ ma_vimm_f32(f, dest);
+}
+
+void
+MacroAssemblerARMCompat::loadInt32OrDouble(const Address& src, FloatRegister dest)
+{
+ Label notInt32, end;
+
+ // If it's an int, convert to a double.
+ {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ ma_ldr(ToType(src), scratch, scratch2);
+ asMasm().branchTestInt32(Assembler::NotEqual, scratch, &notInt32);
+ ma_ldr(ToPayload(src), scratch, scratch2);
+ convertInt32ToDouble(scratch, dest);
+ ma_b(&end);
+ }
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ {
+ ScratchRegisterScope scratch(asMasm());
+ ma_vldr(src, dest, scratch);
+ }
+ bind(&end);
+}
+
+void
+MacroAssemblerARMCompat::loadInt32OrDouble(Register base, Register index,
+ FloatRegister dest, int32_t shift)
+{
+ Label notInt32, end;
+
+ JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0);
+
+ ScratchRegisterScope scratch(asMasm());
+
+ // If it's an int, convert it to double.
+ ma_alu(base, lsl(index, shift), scratch, OpAdd);
+
+ // Since we only have one scratch register, we need to stomp over it with
+ // the tag.
+ ma_ldr(DTRAddr(scratch, DtrOffImm(NUNBOX32_TYPE_OFFSET)), scratch);
+ asMasm().branchTestInt32(Assembler::NotEqual, scratch, &notInt32);
+
+ // Implicitly requires NUNBOX32_PAYLOAD_OFFSET == 0: no offset provided
+ ma_ldr(DTRAddr(base, DtrRegImmShift(index, LSL, shift)), scratch);
+ convertInt32ToDouble(scratch, dest);
+ ma_b(&end);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ // First, recompute the offset that had been stored in the scratch register
+ // since the scratch register was overwritten loading in the type.
+ ma_alu(base, lsl(index, shift), scratch, OpAdd);
+ ma_vldr(VFPAddr(scratch, VFPOffImm(0)), dest);
+ bind(&end);
+}
+
+void
+MacroAssemblerARMCompat::loadConstantDouble(double dp, FloatRegister dest)
+{
+ loadConstantDouble(wasm::RawF64(dp), dest);
+}
+
+void
+MacroAssemblerARMCompat::loadConstantDouble(wasm::RawF64 dp, FloatRegister dest)
+{
+ ma_vimm(dp, dest);
+}
+
+// Treat the value as a boolean, and set condition codes accordingly.
+Assembler::Condition
+MacroAssemblerARMCompat::testInt32Truthy(bool truthy, const ValueOperand& operand)
+{
+ ma_tst(operand.payloadReg(), operand.payloadReg());
+ return truthy ? NonZero : Zero;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testBooleanTruthy(bool truthy, const ValueOperand& operand)
+{
+ ma_tst(operand.payloadReg(), operand.payloadReg());
+ return truthy ? NonZero : Zero;
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testDoubleTruthy(bool truthy, FloatRegister reg)
+{
+ as_vcmpz(VFPRegister(reg));
+ as_vmrs(pc);
+ as_cmp(r0, O2Reg(r0), Overflow);
+ return truthy ? NonZero : Zero;
+}
+
+Register
+MacroAssemblerARMCompat::extractObject(const Address& address, Register scratch)
+{
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(ToPayload(address), scratch, scratch2);
+ return scratch;
+}
+
+Register
+MacroAssemblerARMCompat::extractTag(const Address& address, Register scratch)
+{
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(ToType(address), scratch, scratch2);
+ return scratch;
+}
+
+Register
+MacroAssemblerARMCompat::extractTag(const BaseIndex& address, Register scratch)
+{
+ ma_alu(address.base, lsl(address.index, address.scale), scratch, OpAdd, LeaveCC);
+ return extractTag(Address(scratch, address.offset), scratch);
+}
+
+void
+MacroAssemblerARMCompat::moveValue(const Value& val, Register type, Register data)
+{
+ ma_mov(Imm32(val.toNunboxTag()), type);
+ if (val.isMarkable())
+ ma_mov(ImmGCPtr(val.toMarkablePointer()), data);
+ else
+ ma_mov(Imm32(val.toNunboxPayload()), data);
+}
+
+void
+MacroAssemblerARMCompat::moveValue(const Value& val, const ValueOperand& dest)
+{
+ moveValue(val, dest.typeReg(), dest.payloadReg());
+}
+
+/////////////////////////////////////////////////////////////////
+// X86/X64-common (ARM too now) interface.
+/////////////////////////////////////////////////////////////////
+void
+MacroAssemblerARMCompat::storeValue(ValueOperand val, const Address& dst)
+{
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_str(val.payloadReg(), ToPayload(dst), scratch2);
+ ma_str(val.typeReg(), ToType(dst), scratch2);
+}
+
+void
+MacroAssemblerARMCompat::storeValue(ValueOperand val, const BaseIndex& dest)
+{
+ ScratchRegisterScope scratch(asMasm());
+
+ if (isValueDTRDCandidate(val) && Abs(dest.offset) <= 255) {
+ Register tmpIdx;
+ if (dest.offset == 0) {
+ if (dest.scale == TimesOne) {
+ tmpIdx = dest.index;
+ } else {
+ ma_lsl(Imm32(dest.scale), dest.index, scratch);
+ tmpIdx = scratch;
+ }
+ ma_strd(val.payloadReg(), val.typeReg(), EDtrAddr(dest.base, EDtrOffReg(tmpIdx)));
+ } else {
+ ma_alu(dest.base, lsl(dest.index, dest.scale), scratch, OpAdd);
+ ma_strd(val.payloadReg(), val.typeReg(),
+ EDtrAddr(scratch, EDtrOffImm(dest.offset)));
+ }
+ } else {
+ ma_alu(dest.base, lsl(dest.index, dest.scale), scratch, OpAdd);
+ storeValue(val, Address(scratch, dest.offset));
+ }
+}
+
+void
+MacroAssemblerARMCompat::loadValue(const BaseIndex& addr, ValueOperand val)
+{
+ ScratchRegisterScope scratch(asMasm());
+
+ if (isValueDTRDCandidate(val) && Abs(addr.offset) <= 255) {
+ Register tmpIdx;
+ if (addr.offset == 0) {
+ if (addr.scale == TimesOne) {
+ // If the offset register is the same as one of the destination
+ // registers, LDRD's behavior is undefined. Use the scratch
+ // register to avoid this.
+ if (val.aliases(addr.index)) {
+ ma_mov(addr.index, scratch);
+ tmpIdx = scratch;
+ } else {
+ tmpIdx = addr.index;
+ }
+ } else {
+ ma_lsl(Imm32(addr.scale), addr.index, scratch);
+ tmpIdx = scratch;
+ }
+ ma_ldrd(EDtrAddr(addr.base, EDtrOffReg(tmpIdx)), val.payloadReg(), val.typeReg());
+ } else {
+ ma_alu(addr.base, lsl(addr.index, addr.scale), scratch, OpAdd);
+ ma_ldrd(EDtrAddr(scratch, EDtrOffImm(addr.offset)),
+ val.payloadReg(), val.typeReg());
+ }
+ } else {
+ ma_alu(addr.base, lsl(addr.index, addr.scale), scratch, OpAdd);
+ loadValue(Address(scratch, addr.offset), val);
+ }
+}
+
+void
+MacroAssemblerARMCompat::loadValue(Address src, ValueOperand val)
+{
+ Address payload = ToPayload(src);
+ Address type = ToType(src);
+
+ // TODO: copy this code into a generic function that acts on all sequences
+ // of memory accesses
+ if (isValueDTRDCandidate(val)) {
+ // If the value we want is in two consecutive registers starting with an
+ // even register, they can be combined as a single ldrd.
+ int offset = src.offset;
+ if (offset < 256 && offset > -256) {
+ ma_ldrd(EDtrAddr(src.base, EDtrOffImm(src.offset)), val.payloadReg(), val.typeReg());
+ return;
+ }
+ }
+ // If the value is lower than the type, then we may be able to use an ldm
+ // instruction.
+
+ if (val.payloadReg().code() < val.typeReg().code()) {
+ if (src.offset <= 4 && src.offset >= -8 && (src.offset & 3) == 0) {
+ // Turns out each of the 4 value -8, -4, 0, 4 corresponds exactly
+ // with one of LDM{DB, DA, IA, IB}
+ DTMMode mode;
+ switch (src.offset) {
+ case -8: mode = DB; break;
+ case -4: mode = DA; break;
+ case 0: mode = IA; break;
+ case 4: mode = IB; break;
+ default: MOZ_CRASH("Bogus Offset for LoadValue as DTM");
+ }
+ startDataTransferM(IsLoad, src.base, mode);
+ transferReg(val.payloadReg());
+ transferReg(val.typeReg());
+ finishDataTransfer();
+ return;
+ }
+ }
+ // Ensure that loading the payload does not erase the pointer to the Value
+ // in memory.
+ if (type.base != val.payloadReg()) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(payload, val.payloadReg(), scratch2);
+ ma_ldr(type, val.typeReg(), scratch2);
+ } else {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(type, val.typeReg(), scratch2);
+ ma_ldr(payload, val.payloadReg(), scratch2);
+ }
+}
+
+void
+MacroAssemblerARMCompat::tagValue(JSValueType type, Register payload, ValueOperand dest)
+{
+ MOZ_ASSERT(dest.typeReg() != dest.payloadReg());
+ if (payload != dest.payloadReg())
+ ma_mov(payload, dest.payloadReg());
+ ma_mov(ImmType(type), dest.typeReg());
+}
+
+void
+MacroAssemblerARMCompat::pushValue(ValueOperand val)
+{
+ ma_push(val.typeReg());
+ ma_push(val.payloadReg());
+}
+
+void
+MacroAssemblerARMCompat::pushValue(const Address& addr)
+{
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ ma_ldr(ToType(addr), scratch, scratch2);
+ ma_push(scratch);
+ ma_ldr(ToPayloadAfterStackPush(addr), scratch, scratch2);
+ ma_push(scratch);
+}
+
+void
+MacroAssemblerARMCompat::popValue(ValueOperand val)
+{
+ ma_pop(val.payloadReg());
+ ma_pop(val.typeReg());
+}
+
+void
+MacroAssemblerARMCompat::storePayload(const Value& val, const Address& dest)
+{
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (val.isMarkable())
+ ma_mov(ImmGCPtr(val.toMarkablePointer()), scratch);
+ else
+ ma_mov(Imm32(val.toNunboxPayload()), scratch);
+ ma_str(scratch, ToPayload(dest), scratch2);
+}
+
+void
+MacroAssemblerARMCompat::storePayload(Register src, const Address& dest)
+{
+ ScratchRegisterScope scratch(asMasm());
+ ma_str(src, ToPayload(dest), scratch);
+}
+
+void
+MacroAssemblerARMCompat::storePayload(const Value& val, const BaseIndex& dest)
+{
+ unsigned shift = ScaleToShift(dest.scale);
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (val.isMarkable())
+ ma_mov(ImmGCPtr(val.toMarkablePointer()), scratch);
+ else
+ ma_mov(Imm32(val.toNunboxPayload()), scratch);
+
+ // If NUNBOX32_PAYLOAD_OFFSET is not zero, the memory operand [base + index
+ // << shift + imm] cannot be encoded into a single instruction, and cannot
+ // be integrated into the as_dtr call.
+ JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0);
+
+ // If an offset is used, modify the base so that a [base + index << shift]
+ // instruction format can be used.
+ if (dest.offset != 0)
+ ma_add(dest.base, Imm32(dest.offset), dest.base, scratch2);
+
+ as_dtr(IsStore, 32, Offset, scratch,
+ DTRAddr(dest.base, DtrRegImmShift(dest.index, LSL, shift)));
+
+ // Restore the original value of the base, if necessary.
+ if (dest.offset != 0)
+ ma_sub(dest.base, Imm32(dest.offset), dest.base, scratch);
+}
+
+void
+MacroAssemblerARMCompat::storePayload(Register src, const BaseIndex& dest)
+{
+ unsigned shift = ScaleToShift(dest.scale);
+ MOZ_ASSERT(shift < 32);
+
+ ScratchRegisterScope scratch(asMasm());
+
+ // If NUNBOX32_PAYLOAD_OFFSET is not zero, the memory operand [base + index
+ // << shift + imm] cannot be encoded into a single instruction, and cannot
+ // be integrated into the as_dtr call.
+ JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0);
+
+ // Save/restore the base if the BaseIndex has an offset, as above.
+ if (dest.offset != 0)
+ ma_add(dest.base, Imm32(dest.offset), dest.base, scratch);
+
+ // Technically, shift > -32 can be handle by changing LSL to ASR, but should
+ // never come up, and this is one less code path to get wrong.
+ as_dtr(IsStore, 32, Offset, src, DTRAddr(dest.base, DtrRegImmShift(dest.index, LSL, shift)));
+
+ if (dest.offset != 0)
+ ma_sub(dest.base, Imm32(dest.offset), dest.base, scratch);
+}
+
+void
+MacroAssemblerARMCompat::storeTypeTag(ImmTag tag, const Address& dest)
+{
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ ma_mov(tag, scratch);
+ ma_str(scratch, ToType(dest), scratch2);
+}
+
+void
+MacroAssemblerARMCompat::storeTypeTag(ImmTag tag, const BaseIndex& dest)
+{
+ Register base = dest.base;
+ Register index = dest.index;
+ unsigned shift = ScaleToShift(dest.scale);
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ MOZ_ASSERT(base != scratch && base != scratch2);
+ MOZ_ASSERT(index != scratch && index != scratch2);
+
+ ma_add(base, Imm32(dest.offset + NUNBOX32_TYPE_OFFSET), scratch2, scratch);
+ ma_mov(tag, scratch);
+ ma_str(scratch, DTRAddr(scratch2, DtrRegImmShift(index, LSL, shift)));
+}
+
+void
+MacroAssemblerARM::ma_call(ImmPtr dest)
+{
+ ma_movPatchable(dest, CallReg, Always);
+ as_blx(CallReg);
+}
+
+void
+MacroAssemblerARMCompat::breakpoint()
+{
+ as_bkpt();
+}
+
+void
+MacroAssemblerARMCompat::simulatorStop(const char* msg)
+{
+#ifdef JS_SIMULATOR_ARM
+ MOZ_ASSERT(sizeof(char*) == 4);
+ writeInst(0xefffffff);
+ writeInst((int)msg);
+#endif
+}
+
+void
+MacroAssemblerARMCompat::ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure)
+{
+ Label isDouble, done;
+ asMasm().branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, source.typeReg(), failure);
+
+ convertInt32ToDouble(source.payloadReg(), dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerARMCompat::breakpoint(Condition cc)
+{
+ ma_ldr(DTRAddr(r12, DtrRegImmShift(r12, LSL, 0, IsDown)), r12, Offset, cc);
+}
+
+void
+MacroAssemblerARMCompat::checkStackAlignment()
+{
+ asMasm().assertStackAlignment(ABIStackAlignment);
+}
+
+void
+MacroAssemblerARMCompat::handleFailureWithHandlerTail(void* handler)
+{
+ // Reserve space for exception information.
+ int size = (sizeof(ResumeFromException) + 7) & ~7;
+
+ Imm8 size8(size);
+ as_sub(sp, sp, size8);
+ ma_mov(sp, r0);
+
+ // Call the handler.
+ asMasm().setupUnalignedABICall(r1);
+ asMasm().passABIArg(r0);
+ asMasm().callWithABI(handler);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label return_;
+ Label bailout;
+
+ {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(Address(sp, offsetof(ResumeFromException, kind)), r0, scratch);
+ }
+
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME),
+ &entryFrame);
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_CATCH), &catch_);
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FINALLY), &finally);
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FORCED_RETURN),
+ &return_);
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, load the new stack pointer
+ // and return from the entry frame.
+ bind(&entryFrame);
+ moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp, scratch);
+ }
+
+ // We're going to be returning by the ion calling convention, which returns
+ // by ??? (for now, I think ldr pc, [sp]!)
+ as_dtr(IsLoad, 32, PostIndex, pc, DTRAddr(sp, DtrOffImm(4)));
+
+ // If we found a catch handler, this must be a baseline frame. Restore state
+ // and jump to the catch block.
+ bind(&catch_);
+ {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(Address(sp, offsetof(ResumeFromException, target)), r0, scratch);
+ ma_ldr(Address(sp, offsetof(ResumeFromException, framePointer)), r11, scratch);
+ ma_ldr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp, scratch);
+ }
+ jump(r0);
+
+ // If we found a finally block, this must be a baseline frame. Push two
+ // values expected by JSOP_RETSUB: BooleanValue(true) and the exception.
+ bind(&finally);
+ ValueOperand exception = ValueOperand(r1, r2);
+ loadValue(Operand(sp, offsetof(ResumeFromException, exception)), exception);
+ {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(Address(sp, offsetof(ResumeFromException, target)), r0, scratch);
+ ma_ldr(Address(sp, offsetof(ResumeFromException, framePointer)), r11, scratch);
+ ma_ldr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp, scratch);
+ }
+
+ pushValue(BooleanValue(true));
+ pushValue(exception);
+ jump(r0);
+
+ // Only used in debug mode. Return BaselineFrame->returnValue() to the
+ // caller.
+ bind(&return_);
+ {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(Address(sp, offsetof(ResumeFromException, framePointer)), r11, scratch);
+ ma_ldr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp, scratch);
+ }
+ loadValue(Address(r11, BaselineFrame::reverseOffsetOfReturnValue()), JSReturnOperand);
+ ma_mov(r11, sp);
+ pop(r11);
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to caller
+ // frame before returning.
+ {
+ Label skipProfilingInstrumentation;
+ // Test if profiler enabled.
+ AbsoluteAddress addressOfEnabled(GetJitContext()->runtime->spsProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ profilerExitFrame();
+ bind(&skipProfilingInstrumentation);
+ }
+
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to the
+ // bailout tail stub.
+ bind(&bailout);
+ {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(Address(sp, offsetof(ResumeFromException, bailoutInfo)), r2, scratch);
+ ma_mov(Imm32(BAILOUT_RETURN_OK), r0);
+ ma_ldr(Address(sp, offsetof(ResumeFromException, target)), r1, scratch);
+ }
+ jump(r1);
+}
+
+Assembler::Condition
+MacroAssemblerARMCompat::testStringTruthy(bool truthy, const ValueOperand& value)
+{
+ Register string = value.payloadReg();
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ ma_dtr(IsLoad, string, Imm32(JSString::offsetOfLength()), scratch, scratch2);
+ as_cmp(scratch, Imm8(0));
+ return truthy ? Assembler::NotEqual : Assembler::Equal;
+}
+
+void
+MacroAssemblerARMCompat::floor(FloatRegister input, Register output, Label* bail)
+{
+ Label handleZero;
+ Label handleNeg;
+ Label fin;
+
+ ScratchDoubleScope scratchDouble(asMasm());
+
+ compareDouble(input, NoVFPRegister);
+ ma_b(&handleZero, Assembler::Equal);
+ ma_b(&handleNeg, Assembler::Signed);
+ // NaN is always a bail condition, just bail directly.
+ ma_b(bail, Assembler::Overflow);
+
+ // The argument is a positive number, truncation is the path to glory. Since
+ // it is known to be > 0.0, explicitly convert to a larger range, then a
+ // value that rounds to INT_MAX is explicitly different from an argument
+ // that clamps to INT_MAX.
+ ma_vcvt_F64_U32(input, scratchDouble.uintOverlay());
+ ma_vxfer(scratchDouble.uintOverlay(), output);
+ ma_mov(output, output, SetCC);
+ ma_b(bail, Signed);
+ ma_b(&fin);
+
+ bind(&handleZero);
+ // Move the top word of the double into the output reg, if it is non-zero,
+ // then the original value was -0.0.
+ as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1);
+ as_cmp(output, Imm8(0));
+ ma_b(bail, NonZero);
+ ma_b(&fin);
+
+ bind(&handleNeg);
+ // Negative case, negate, then start dancing.
+ ma_vneg(input, input);
+ ma_vcvt_F64_U32(input, scratchDouble.uintOverlay());
+ ma_vxfer(scratchDouble.uintOverlay(), output);
+ ma_vcvt_U32_F64(scratchDouble.uintOverlay(), scratchDouble);
+ compareDouble(scratchDouble, input);
+ as_add(output, output, Imm8(1), LeaveCC, NotEqual);
+ // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the
+ // result will still be a negative number.
+ as_rsb(output, output, Imm8(0), SetCC);
+ // Flip the negated input back to its original value.
+ ma_vneg(input, input);
+ // If the result looks non-negative, then this value didn't actually fit
+ // into the int range, and special handling is required. Zero is also caught
+ // by this case, but floor of a negative number should never be zero.
+ ma_b(bail, NotSigned);
+
+ bind(&fin);
+}
+
+void
+MacroAssemblerARMCompat::floorf(FloatRegister input, Register output, Label* bail)
+{
+ Label handleZero;
+ Label handleNeg;
+ Label fin;
+ compareFloat(input, NoVFPRegister);
+ ma_b(&handleZero, Assembler::Equal);
+ ma_b(&handleNeg, Assembler::Signed);
+ // NaN is always a bail condition, just bail directly.
+ ma_b(bail, Assembler::Overflow);
+
+ // The argument is a positive number, truncation is the path to glory; Since
+ // it is known to be > 0.0, explicitly convert to a larger range, then a
+ // value that rounds to INT_MAX is explicitly different from an argument
+ // that clamps to INT_MAX.
+ {
+ ScratchFloat32Scope scratch(asMasm());
+ ma_vcvt_F32_U32(input, scratch.uintOverlay());
+ ma_vxfer(VFPRegister(scratch).uintOverlay(), output);
+ }
+ ma_mov(output, output, SetCC);
+ ma_b(bail, Signed);
+ ma_b(&fin);
+
+ bind(&handleZero);
+ // Move the top word of the double into the output reg, if it is non-zero,
+ // then the original value was -0.0.
+ as_vxfer(output, InvalidReg, VFPRegister(input).singleOverlay(), FloatToCore, Always, 0);
+ as_cmp(output, Imm8(0));
+ ma_b(bail, NonZero);
+ ma_b(&fin);
+
+ bind(&handleNeg);
+ // Negative case, negate, then start dancing.
+ {
+ ScratchFloat32Scope scratch(asMasm());
+ ma_vneg_f32(input, input);
+ ma_vcvt_F32_U32(input, scratch.uintOverlay());
+ ma_vxfer(VFPRegister(scratch).uintOverlay(), output);
+ ma_vcvt_U32_F32(scratch.uintOverlay(), scratch);
+ compareFloat(scratch, input);
+ as_add(output, output, Imm8(1), LeaveCC, NotEqual);
+ }
+ // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the
+ // result will still be a negative number.
+ as_rsb(output, output, Imm8(0), SetCC);
+ // Flip the negated input back to its original value.
+ ma_vneg_f32(input, input);
+ // If the result looks non-negative, then this value didn't actually fit
+ // into the int range, and special handling is required. Zero is also caught
+ // by this case, but floor of a negative number should never be zero.
+ ma_b(bail, NotSigned);
+
+ bind(&fin);
+}
+
+void
+MacroAssemblerARMCompat::ceil(FloatRegister input, Register output, Label* bail)
+{
+ Label handleZero;
+ Label handlePos;
+ Label fin;
+
+ compareDouble(input, NoVFPRegister);
+ // NaN is always a bail condition, just bail directly.
+ ma_b(bail, Assembler::Overflow);
+ ma_b(&handleZero, Assembler::Equal);
+ ma_b(&handlePos, Assembler::NotSigned);
+
+ ScratchDoubleScope scratchDouble(asMasm());
+
+ // We are in the ]-Inf; 0[ range
+ // If we are in the ]-1; 0[ range => bailout
+ loadConstantDouble(-1.0, scratchDouble);
+ compareDouble(input, scratchDouble);
+ ma_b(bail, Assembler::GreaterThan);
+
+ // We are in the ]-Inf; -1] range: ceil(x) == -floor(-x) and floor can be
+ // computed with direct truncation here (x > 0).
+ ma_vneg(input, scratchDouble);
+ FloatRegister ScratchUIntReg = scratchDouble.uintOverlay();
+ ma_vcvt_F64_U32(scratchDouble, ScratchUIntReg);
+ ma_vxfer(ScratchUIntReg, output);
+ ma_neg(output, output, SetCC);
+ ma_b(bail, NotSigned);
+ ma_b(&fin);
+
+ // Test for 0.0 / -0.0: if the top word of the input double is not zero,
+ // then it was -0 and we need to bail out.
+ bind(&handleZero);
+ as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1);
+ as_cmp(output, Imm8(0));
+ ma_b(bail, NonZero);
+ ma_b(&fin);
+
+ // We are in the ]0; +inf] range: truncate integer values, maybe add 1 for
+ // non integer values, maybe bail if overflow.
+ bind(&handlePos);
+ ma_vcvt_F64_U32(input, ScratchUIntReg);
+ ma_vxfer(ScratchUIntReg, output);
+ ma_vcvt_U32_F64(ScratchUIntReg, scratchDouble);
+ compareDouble(scratchDouble, input);
+ as_add(output, output, Imm8(1), LeaveCC, NotEqual);
+ // Bail out if the add overflowed or the result is non positive.
+ ma_mov(output, output, SetCC);
+ ma_b(bail, Signed);
+ ma_b(bail, Zero);
+
+ bind(&fin);
+}
+
+void
+MacroAssemblerARMCompat::ceilf(FloatRegister input, Register output, Label* bail)
+{
+ Label handleZero;
+ Label handlePos;
+ Label fin;
+
+ compareFloat(input, NoVFPRegister);
+ // NaN is always a bail condition, just bail directly.
+ ma_b(bail, Assembler::Overflow);
+ ma_b(&handleZero, Assembler::Equal);
+ ma_b(&handlePos, Assembler::NotSigned);
+
+ // We are in the ]-Inf; 0[ range
+ // If we are in the ]-1; 0[ range => bailout
+ {
+ ScratchFloat32Scope scratch(asMasm());
+ loadConstantFloat32(-1.f, scratch);
+ compareFloat(input, scratch);
+ ma_b(bail, Assembler::GreaterThan);
+ }
+
+ // We are in the ]-Inf; -1] range: ceil(x) == -floor(-x) and floor can be
+ // computed with direct truncation here (x > 0).
+ {
+ ScratchDoubleScope scratchDouble(asMasm());
+ FloatRegister scratchFloat = scratchDouble.asSingle();
+ FloatRegister scratchUInt = scratchDouble.uintOverlay();
+
+ ma_vneg_f32(input, scratchFloat);
+ ma_vcvt_F32_U32(scratchFloat, scratchUInt);
+ ma_vxfer(scratchUInt, output);
+ ma_neg(output, output, SetCC);
+ ma_b(bail, NotSigned);
+ ma_b(&fin);
+ }
+
+ // Test for 0.0 / -0.0: if the top word of the input double is not zero,
+ // then it was -0 and we need to bail out.
+ bind(&handleZero);
+ as_vxfer(output, InvalidReg, VFPRegister(input).singleOverlay(), FloatToCore, Always, 0);
+ as_cmp(output, Imm8(0));
+ ma_b(bail, NonZero);
+ ma_b(&fin);
+
+ // We are in the ]0; +inf] range: truncate integer values, maybe add 1 for
+ // non integer values, maybe bail if overflow.
+ bind(&handlePos);
+ {
+ ScratchDoubleScope scratchDouble(asMasm());
+ FloatRegister scratchFloat = scratchDouble.asSingle();
+ FloatRegister scratchUInt = scratchDouble.uintOverlay();
+
+ ma_vcvt_F32_U32(input, scratchUInt);
+ ma_vxfer(scratchUInt, output);
+ ma_vcvt_U32_F32(scratchUInt, scratchFloat);
+ compareFloat(scratchFloat, input);
+ as_add(output, output, Imm8(1), LeaveCC, NotEqual);
+
+ // Bail on overflow or non-positive result.
+ ma_mov(output, output, SetCC);
+ ma_b(bail, Signed);
+ ma_b(bail, Zero);
+ }
+
+ bind(&fin);
+}
+
+CodeOffset
+MacroAssemblerARMCompat::toggledJump(Label* label)
+{
+ // Emit a B that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp().
+ BufferOffset b = ma_b(label, Always);
+ CodeOffset ret(b.getOffset());
+ return ret;
+}
+
+CodeOffset
+MacroAssemblerARMCompat::toggledCall(JitCode* target, bool enabled)
+{
+ BufferOffset bo = nextOffset();
+ addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
+ ScratchRegisterScope scratch(asMasm());
+ ma_movPatchable(ImmPtr(target->raw()), scratch, Always);
+ if (enabled)
+ ma_blx(scratch);
+ else
+ ma_nop();
+ return CodeOffset(bo.getOffset());
+}
+
+void
+MacroAssemblerARMCompat::round(FloatRegister input, Register output, Label* bail, FloatRegister tmp)
+{
+ Label handleZero;
+ Label handleNeg;
+ Label fin;
+
+ ScratchDoubleScope scratchDouble(asMasm());
+
+ // Do a compare based on the original value, then do most other things based
+ // on the shifted value.
+ ma_vcmpz(input);
+ // Since we already know the sign bit, flip all numbers to be positive,
+ // stored in tmp.
+ ma_vabs(input, tmp);
+ as_vmrs(pc);
+ ma_b(&handleZero, Assembler::Equal);
+ ma_b(&handleNeg, Assembler::Signed);
+ // NaN is always a bail condition, just bail directly.
+ ma_b(bail, Assembler::Overflow);
+
+ // The argument is a positive number, truncation is the path to glory; Since
+ // it is known to be > 0.0, explicitly convert to a larger range, then a
+ // value that rounds to INT_MAX is explicitly different from an argument
+ // that clamps to INT_MAX.
+
+ // Add the biggest number less than 0.5 (not 0.5, because adding that to
+ // the biggest number less than 0.5 would undesirably round up to 1), and
+ // store the result into tmp.
+ loadConstantDouble(GetBiggestNumberLessThan(0.5), scratchDouble);
+ ma_vadd(scratchDouble, tmp, tmp);
+
+ ma_vcvt_F64_U32(tmp, scratchDouble.uintOverlay());
+ ma_vxfer(VFPRegister(scratchDouble).uintOverlay(), output);
+ ma_mov(output, output, SetCC);
+ ma_b(bail, Signed);
+ ma_b(&fin);
+
+ bind(&handleZero);
+ // Move the top word of the double into the output reg, if it is non-zero,
+ // then the original value was -0.0
+ as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1);
+ as_cmp(output, Imm8(0));
+ ma_b(bail, NonZero);
+ ma_b(&fin);
+
+ bind(&handleNeg);
+ // Negative case, negate, then start dancing. This number may be positive,
+ // since we added 0.5.
+
+ // Add 0.5 to negative numbers, store the result into tmp
+ loadConstantDouble(0.5, scratchDouble);
+ ma_vadd(scratchDouble, tmp, tmp);
+
+ ma_vcvt_F64_U32(tmp, scratchDouble.uintOverlay());
+ ma_vxfer(VFPRegister(scratchDouble).uintOverlay(), output);
+
+ // -output is now a correctly rounded value, unless the original value was
+ // exactly halfway between two integers, at which point, it has been rounded
+ // away from zero, when it should be rounded towards \infty.
+ ma_vcvt_U32_F64(scratchDouble.uintOverlay(), scratchDouble);
+ compareDouble(scratchDouble, tmp);
+ as_sub(output, output, Imm8(1), LeaveCC, Equal);
+ // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the
+ // result will still be a negative number.
+ as_rsb(output, output, Imm8(0), SetCC);
+
+ // If the result looks non-negative, then this value didn't actually fit
+ // into the int range, and special handling is required, or it was zero,
+ // which means the result is actually -0.0 which also requires special
+ // handling.
+ ma_b(bail, NotSigned);
+
+ bind(&fin);
+}
+
+void
+MacroAssemblerARMCompat::roundf(FloatRegister input, Register output, Label* bail, FloatRegister tmp)
+{
+ Label handleZero;
+ Label handleNeg;
+ Label fin;
+
+ ScratchFloat32Scope scratchFloat(asMasm());
+
+ // Do a compare based on the original value, then do most other things based
+ // on the shifted value.
+ compareFloat(input, NoVFPRegister);
+ ma_b(&handleZero, Assembler::Equal);
+ ma_b(&handleNeg, Assembler::Signed);
+
+ // NaN is always a bail condition, just bail directly.
+ ma_b(bail, Assembler::Overflow);
+
+ // The argument is a positive number, truncation is the path to glory; Since
+ // it is known to be > 0.0, explicitly convert to a larger range, then a
+ // value that rounds to INT_MAX is explicitly different from an argument
+ // that clamps to INT_MAX.
+
+ // Add the biggest number less than 0.5f (not 0.5f, because adding that to
+ // the biggest number less than 0.5f would undesirably round up to 1), and
+ // store the result into tmp.
+ loadConstantFloat32(GetBiggestNumberLessThan(0.5f), scratchFloat);
+ ma_vadd_f32(scratchFloat, input, tmp);
+
+ // Note: it doesn't matter whether x + .5 === x or not here, as it doesn't
+ // affect the semantics of the float to unsigned conversion (in particular,
+ // we are not applying any fixup after the operation).
+ ma_vcvt_F32_U32(tmp, scratchFloat.uintOverlay());
+ ma_vxfer(VFPRegister(scratchFloat).uintOverlay(), output);
+ ma_mov(output, output, SetCC);
+ ma_b(bail, Signed);
+ ma_b(&fin);
+
+ bind(&handleZero);
+
+ // Move the whole float32 into the output reg, if it is non-zero, then the
+ // original value was -0.0.
+ as_vxfer(output, InvalidReg, input, FloatToCore, Always, 0);
+ as_cmp(output, Imm8(0));
+ ma_b(bail, NonZero);
+ ma_b(&fin);
+
+ bind(&handleNeg);
+
+ // Add 0.5 to negative numbers, storing the result into tmp.
+ ma_vneg_f32(input, tmp);
+ loadConstantFloat32(0.5f, scratchFloat);
+ ma_vadd_f32(tmp, scratchFloat, scratchFloat);
+
+ // Adding 0.5 to a float input has chances to yield the wrong result, if
+ // the input is too large. In this case, skip the -1 adjustment made below.
+ compareFloat(scratchFloat, tmp);
+
+ // Negative case, negate, then start dancing. This number may be positive,
+ // since we added 0.5.
+ // /!\ The conditional jump afterwards depends on these two instructions
+ // *not* setting the status flags. They need to not change after the
+ // comparison above.
+ ma_vcvt_F32_U32(scratchFloat, tmp.uintOverlay());
+ ma_vxfer(VFPRegister(tmp).uintOverlay(), output);
+
+ Label flipSign;
+ ma_b(&flipSign, Equal);
+
+ // -output is now a correctly rounded value, unless the original value was
+ // exactly halfway between two integers, at which point, it has been rounded
+ // away from zero, when it should be rounded towards \infty.
+ ma_vcvt_U32_F32(tmp.uintOverlay(), tmp);
+ compareFloat(tmp, scratchFloat);
+ as_sub(output, output, Imm8(1), LeaveCC, Equal);
+
+ // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the
+ // result will still be a negative number.
+ bind(&flipSign);
+ as_rsb(output, output, Imm8(0), SetCC);
+
+ // If the result looks non-negative, then this value didn't actually fit
+ // into the int range, and special handling is required, or it was zero,
+ // which means the result is actually -0.0 which also requires special
+ // handling.
+ ma_b(bail, NotSigned);
+
+ bind(&fin);
+}
+
+CodeOffsetJump
+MacroAssemblerARMCompat::jumpWithPatch(RepatchLabel* label, Condition cond, Label* documentation)
+{
+ ARMBuffer::PoolEntry pe;
+ BufferOffset bo = as_BranchPool(0xdeadbeef, label, &pe, cond, documentation);
+ // Fill in a new CodeOffset with both the load and the pool entry that the
+ // instruction loads from.
+ CodeOffsetJump ret(bo.getOffset(), pe.index());
+ return ret;
+}
+
+namespace js {
+namespace jit {
+
+template<>
+Register
+MacroAssemblerARMCompat::computePointer<BaseIndex>(const BaseIndex& src, Register r)
+{
+ Register base = src.base;
+ Register index = src.index;
+ uint32_t scale = Imm32::ShiftOf(src.scale).value;
+ int32_t offset = src.offset;
+
+ ScratchRegisterScope scratch(asMasm());
+
+ as_add(r, base, lsl(index, scale));
+ if (offset != 0)
+ ma_add(r, Imm32(offset), r, scratch);
+ return r;
+}
+
+template<>
+Register
+MacroAssemblerARMCompat::computePointer<Address>(const Address& src, Register r)
+{
+ ScratchRegisterScope scratch(asMasm());
+ if (src.offset == 0)
+ return src.base;
+ ma_add(src.base, Imm32(src.offset), r, scratch);
+ return r;
+}
+
+} // namespace jit
+} // namespace js
+
+template<typename T>
+void
+MacroAssemblerARMCompat::compareExchange(int nbytes, bool signExtend, const T& mem,
+ Register oldval, Register newval, Register output)
+{
+ // If LDREXB/H and STREXB/H are not available we use the
+ // word-width operations with read-modify-add. That does not
+ // abstract well, so fork.
+ //
+ // Bug 1077321: We may further optimize for ARMv8 (AArch32) here.
+ if (nbytes < 4 && !HasLDSTREXBHD())
+ compareExchangeARMv6(nbytes, signExtend, mem, oldval, newval, output);
+ else
+ compareExchangeARMv7(nbytes, signExtend, mem, oldval, newval, output);
+}
+
+// General algorithm:
+//
+// ... ptr, <addr> ; compute address of item
+// dmb
+// L0 ldrex* output, [ptr]
+// sxt* output, output, 0 ; sign-extend if applicable
+// *xt* tmp, oldval, 0 ; sign-extend or zero-extend if applicable
+// cmp output, tmp
+// bne L1 ; failed - values are different
+// strex* tmp, newval, [ptr]
+// cmp tmp, 1
+// beq L0 ; failed - location is dirty, retry
+// L1 dmb
+//
+// Discussion here: http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html.
+// However note that that discussion uses 'isb' as the trailing fence.
+// I've not quite figured out why, and I've gone with dmb here which
+// is safe. Also see the LLVM source, which uses 'dmb ish' generally.
+// (Apple's Swift CPU apparently handles ish in a non-default, faster
+// way.)
+
+template<typename T>
+void
+MacroAssemblerARMCompat::compareExchangeARMv7(int nbytes, bool signExtend, const T& mem,
+ Register oldval, Register newval, Register output)
+{
+ Label again;
+ Label done;
+ ma_dmb(BarrierST);
+
+ SecondScratchRegisterScope scratch2(asMasm());
+ Register ptr = computePointer(mem, scratch2);
+
+ ScratchRegisterScope scratch(asMasm());
+
+ bind(&again);
+ switch (nbytes) {
+ case 1:
+ as_ldrexb(output, ptr);
+ if (signExtend) {
+ as_sxtb(output, output, 0);
+ as_sxtb(scratch, oldval, 0);
+ } else {
+ as_uxtb(scratch, oldval, 0);
+ }
+ break;
+ case 2:
+ as_ldrexh(output, ptr);
+ if (signExtend) {
+ as_sxth(output, output, 0);
+ as_sxth(scratch, oldval, 0);
+ } else {
+ as_uxth(scratch, oldval, 0);
+ }
+ break;
+ case 4:
+ MOZ_ASSERT(!signExtend);
+ as_ldrex(output, ptr);
+ break;
+ }
+ if (nbytes < 4)
+ as_cmp(output, O2Reg(scratch));
+ else
+ as_cmp(output, O2Reg(oldval));
+ as_b(&done, NotEqual);
+ switch (nbytes) {
+ case 1:
+ as_strexb(scratch, newval, ptr);
+ break;
+ case 2:
+ as_strexh(scratch, newval, ptr);
+ break;
+ case 4:
+ as_strex(scratch, newval, ptr);
+ break;
+ }
+ as_cmp(scratch, Imm8(1));
+ as_b(&again, Equal);
+ bind(&done);
+ ma_dmb();
+}
+
+template<typename T>
+void
+MacroAssemblerARMCompat::compareExchangeARMv6(int nbytes, bool signExtend, const T& mem,
+ Register oldval, Register newval, Register output)
+{
+ // Bug 1077318: Must use read-modify-write with LDREX / STREX.
+ MOZ_ASSERT(nbytes == 1 || nbytes == 2);
+ MOZ_CRASH("NYI");
+}
+
+template void
+js::jit::MacroAssemblerARMCompat::compareExchange(int nbytes, bool signExtend,
+ const Address& address, Register oldval,
+ Register newval, Register output);
+template void
+js::jit::MacroAssemblerARMCompat::compareExchange(int nbytes, bool signExtend,
+ const BaseIndex& address, Register oldval,
+ Register newval, Register output);
+
+template<typename T>
+void
+MacroAssemblerARMCompat::atomicExchange(int nbytes, bool signExtend, const T& mem,
+ Register value, Register output)
+{
+ // If LDREXB/H and STREXB/H are not available we use the
+ // word-width operations with read-modify-add. That does not
+ // abstract well, so fork.
+ //
+ // Bug 1077321: We may further optimize for ARMv8 (AArch32) here.
+ if (nbytes < 4 && !HasLDSTREXBHD())
+ atomicExchangeARMv6(nbytes, signExtend, mem, value, output);
+ else
+ atomicExchangeARMv7(nbytes, signExtend, mem, value, output);
+}
+
+template<typename T>
+void
+MacroAssemblerARMCompat::atomicExchangeARMv7(int nbytes, bool signExtend, const T& mem,
+ Register value, Register output)
+{
+ Label again;
+ Label done;
+ ma_dmb(BarrierST);
+
+ SecondScratchRegisterScope scratch2(asMasm());
+ Register ptr = computePointer(mem, scratch2);
+
+ ScratchRegisterScope scratch(asMasm());
+
+ bind(&again);
+ switch (nbytes) {
+ case 1:
+ as_ldrexb(output, ptr);
+ if (signExtend)
+ as_sxtb(output, output, 0);
+ as_strexb(scratch, value, ptr);
+ break;
+ case 2:
+ as_ldrexh(output, ptr);
+ if (signExtend)
+ as_sxth(output, output, 0);
+ as_strexh(scratch, value, ptr);
+ break;
+ case 4:
+ MOZ_ASSERT(!signExtend);
+ as_ldrex(output, ptr);
+ as_strex(scratch, value, ptr);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ as_cmp(scratch, Imm8(1));
+ as_b(&again, Equal);
+ bind(&done);
+ ma_dmb();
+}
+
+template<typename T>
+void
+MacroAssemblerARMCompat::atomicExchangeARMv6(int nbytes, bool signExtend, const T& mem,
+ Register value, Register output)
+{
+ // Bug 1077318: Must use read-modify-write with LDREX / STREX.
+ MOZ_ASSERT(nbytes == 1 || nbytes == 2);
+ MOZ_CRASH("NYI");
+}
+
+template void
+js::jit::MacroAssemblerARMCompat::atomicExchange(int nbytes, bool signExtend,
+ const Address& address, Register value,
+ Register output);
+template void
+js::jit::MacroAssemblerARMCompat::atomicExchange(int nbytes, bool signExtend,
+ const BaseIndex& address, Register value,
+ Register output);
+
+template<typename T>
+void
+MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
+ const T& mem, Register flagTemp, Register output)
+{
+ // The Imm32 case is not needed yet because lowering always forces
+ // the value into a register at present (bug 1077317).
+ //
+ // This would be useful for immediates small enough to fit into
+ // add/sub/and/or/xor.
+ MOZ_CRASH("Feature NYI");
+}
+
+// General algorithm:
+//
+// ... ptr, <addr> ; compute address of item
+// dmb
+// L0 ldrex* output, [ptr]
+// sxt* output, output, 0 ; sign-extend if applicable
+// OP tmp, output, value ; compute value to store
+// strex* tmp2, tmp, [ptr] ; tmp2 required by strex
+// cmp tmp2, 1
+// beq L0 ; failed - location is dirty, retry
+// dmb ; ordering barrier required
+//
+// Also see notes above at compareExchange re the barrier strategy.
+//
+// Observe that the value being operated into the memory element need
+// not be sign-extended because no OP will make use of bits to the
+// left of the bits indicated by the width of the element, and neither
+// output nor the bits stored are affected by OP.
+
+template<typename T>
+void
+MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op,
+ const Register& value, const T& mem, Register flagTemp,
+ Register output)
+{
+ // Fork for non-word operations on ARMv6.
+ //
+ // Bug 1077321: We may further optimize for ARMv8 (AArch32) here.
+ if (nbytes < 4 && !HasLDSTREXBHD())
+ atomicFetchOpARMv6(nbytes, signExtend, op, value, mem, flagTemp, output);
+ else
+ atomicFetchOpARMv7(nbytes, signExtend, op, value, mem, flagTemp, output);
+}
+
+template<typename T>
+void
+MacroAssemblerARMCompat::atomicFetchOpARMv7(int nbytes, bool signExtend, AtomicOp op,
+ const Register& value, const T& mem, Register flagTemp,
+ Register output)
+{
+ MOZ_ASSERT(flagTemp != InvalidReg);
+
+ Label again;
+
+ SecondScratchRegisterScope scratch2(asMasm());
+ Register ptr = computePointer(mem, scratch2);
+
+ ma_dmb();
+
+ ScratchRegisterScope scratch(asMasm());
+
+ bind(&again);
+ switch (nbytes) {
+ case 1:
+ as_ldrexb(output, ptr);
+ if (signExtend)
+ as_sxtb(output, output, 0);
+ break;
+ case 2:
+ as_ldrexh(output, ptr);
+ if (signExtend)
+ as_sxth(output, output, 0);
+ break;
+ case 4:
+ MOZ_ASSERT(!signExtend);
+ as_ldrex(output, ptr);
+ break;
+ }
+ switch (op) {
+ case AtomicFetchAddOp:
+ as_add(scratch, output, O2Reg(value));
+ break;
+ case AtomicFetchSubOp:
+ as_sub(scratch, output, O2Reg(value));
+ break;
+ case AtomicFetchAndOp:
+ as_and(scratch, output, O2Reg(value));
+ break;
+ case AtomicFetchOrOp:
+ as_orr(scratch, output, O2Reg(value));
+ break;
+ case AtomicFetchXorOp:
+ as_eor(scratch, output, O2Reg(value));
+ break;
+ }
+ // Rd must differ from the two other arguments to strex.
+ switch (nbytes) {
+ case 1:
+ as_strexb(flagTemp, scratch, ptr);
+ break;
+ case 2:
+ as_strexh(flagTemp, scratch, ptr);
+ break;
+ case 4:
+ as_strex(flagTemp, scratch, ptr);
+ break;
+ }
+ as_cmp(flagTemp, Imm8(1));
+ as_b(&again, Equal);
+ ma_dmb();
+}
+
+template<typename T>
+void
+MacroAssemblerARMCompat::atomicFetchOpARMv6(int nbytes, bool signExtend, AtomicOp op,
+ const Register& value, const T& mem, Register flagTemp,
+ Register output)
+{
+ // Bug 1077318: Must use read-modify-write with LDREX / STREX.
+ MOZ_ASSERT(nbytes == 1 || nbytes == 2);
+ MOZ_CRASH("NYI");
+}
+
+template<typename T>
+void
+MacroAssemblerARMCompat::atomicEffectOp(int nbytes, AtomicOp op, const Register& value,
+ const T& mem, Register flagTemp)
+{
+ // Fork for non-word operations on ARMv6.
+ //
+ // Bug 1077321: We may further optimize for ARMv8 (AArch32) here.
+ if (nbytes < 4 && !HasLDSTREXBHD())
+ atomicEffectOpARMv6(nbytes, op, value, mem, flagTemp);
+ else
+ atomicEffectOpARMv7(nbytes, op, value, mem, flagTemp);
+}
+
+template<typename T>
+void
+MacroAssemblerARMCompat::atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value,
+ const T& mem, Register flagTemp)
+{
+ // The Imm32 case is not needed yet because lowering always forces
+ // the value into a register at present (bug 1077317).
+ //
+ // This would be useful for immediates small enough to fit into
+ // add/sub/and/or/xor.
+ MOZ_CRASH("NYI");
+}
+
+// Uses both scratch registers, one for the address and one for a temp,
+// but needs two temps for strex:
+//
+// ... ptr, <addr> ; compute address of item
+// dmb
+// L0 ldrex* temp, [ptr]
+// OP temp, temp, value ; compute value to store
+// strex* temp2, temp, [ptr]
+// cmp temp2, 1
+// beq L0 ; failed - location is dirty, retry
+// dmb ; ordering barrier required
+
+template<typename T>
+void
+MacroAssemblerARMCompat::atomicEffectOpARMv7(int nbytes, AtomicOp op, const Register& value,
+ const T& mem, Register flagTemp)
+{
+ MOZ_ASSERT(flagTemp != InvalidReg);
+
+ Label again;
+
+ SecondScratchRegisterScope scratch2(asMasm());
+ Register ptr = computePointer(mem, scratch2);
+
+ ma_dmb();
+
+ ScratchRegisterScope scratch(asMasm());
+
+ bind(&again);
+ switch (nbytes) {
+ case 1:
+ as_ldrexb(scratch, ptr);
+ break;
+ case 2:
+ as_ldrexh(scratch, ptr);
+ break;
+ case 4:
+ as_ldrex(scratch, ptr);
+ break;
+ }
+ switch (op) {
+ case AtomicFetchAddOp:
+ as_add(scratch, scratch, O2Reg(value));
+ break;
+ case AtomicFetchSubOp:
+ as_sub(scratch, scratch, O2Reg(value));
+ break;
+ case AtomicFetchAndOp:
+ as_and(scratch, scratch, O2Reg(value));
+ break;
+ case AtomicFetchOrOp:
+ as_orr(scratch, scratch, O2Reg(value));
+ break;
+ case AtomicFetchXorOp:
+ as_eor(scratch, scratch, O2Reg(value));
+ break;
+ }
+ // Rd must differ from the two other arguments to strex.
+ switch (nbytes) {
+ case 1:
+ as_strexb(flagTemp, scratch, ptr);
+ break;
+ case 2:
+ as_strexh(flagTemp, scratch, ptr);
+ break;
+ case 4:
+ as_strex(flagTemp, scratch, ptr);
+ break;
+ }
+ as_cmp(flagTemp, Imm8(1));
+ as_b(&again, Equal);
+ ma_dmb();
+}
+
+template<typename T>
+void
+MacroAssemblerARMCompat::atomicEffectOpARMv6(int nbytes, AtomicOp op, const Register& value,
+ const T& mem, Register flagTemp)
+{
+ // Bug 1077318: Must use read-modify-write with LDREX / STREX.
+ MOZ_ASSERT(nbytes == 1 || nbytes == 2);
+ MOZ_CRASH("NYI");
+}
+
+template void
+js::jit::MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op,
+ const Imm32& value, const Address& mem,
+ Register flagTemp, Register output);
+template void
+js::jit::MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op,
+ const Imm32& value, const BaseIndex& mem,
+ Register flagTemp, Register output);
+template void
+js::jit::MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op,
+ const Register& value, const Address& mem,
+ Register flagTemp, Register output);
+template void
+js::jit::MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op,
+ const Register& value, const BaseIndex& mem,
+ Register flagTemp, Register output);
+
+template void
+js::jit::MacroAssemblerARMCompat::atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value,
+ const Address& mem, Register flagTemp);
+template void
+js::jit::MacroAssemblerARMCompat::atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value,
+ const BaseIndex& mem, Register flagTemp);
+template void
+js::jit::MacroAssemblerARMCompat::atomicEffectOp(int nbytes, AtomicOp op, const Register& value,
+ const Address& mem, Register flagTemp);
+template void
+js::jit::MacroAssemblerARMCompat::atomicEffectOp(int nbytes, AtomicOp op, const Register& value,
+ const BaseIndex& mem, Register flagTemp);
+
+template<typename T>
+void
+MacroAssemblerARMCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
+ Register oldval, Register newval,
+ Register temp, AnyRegister output)
+{
+ switch (arrayType) {
+ case Scalar::Int8:
+ compareExchange8SignExtend(mem, oldval, newval, output.gpr());
+ break;
+ case Scalar::Uint8:
+ compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
+ break;
+ case Scalar::Int16:
+ compareExchange16SignExtend(mem, oldval, newval, output.gpr());
+ break;
+ case Scalar::Uint16:
+ compareExchange16ZeroExtend(mem, oldval, newval, output.gpr());
+ break;
+ case Scalar::Int32:
+ compareExchange32(mem, oldval, newval, output.gpr());
+ break;
+ case Scalar::Uint32:
+ // At the moment, the code in MCallOptimize.cpp requires the output
+ // type to be double for uint32 arrays. See bug 1077305.
+ MOZ_ASSERT(output.isFloat());
+ compareExchange32(mem, oldval, newval, temp);
+ convertUInt32ToDouble(temp, output.fpu());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+MacroAssemblerARMCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
+ Register oldval, Register newval, Register temp,
+ AnyRegister output);
+template void
+MacroAssemblerARMCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
+ Register oldval, Register newval, Register temp,
+ AnyRegister output);
+
+template<typename T>
+void
+MacroAssemblerARMCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
+ Register value, Register temp, AnyRegister output)
+{
+ switch (arrayType) {
+ case Scalar::Int8:
+ atomicExchange8SignExtend(mem, value, output.gpr());
+ break;
+ case Scalar::Uint8:
+ atomicExchange8ZeroExtend(mem, value, output.gpr());
+ break;
+ case Scalar::Int16:
+ atomicExchange16SignExtend(mem, value, output.gpr());
+ break;
+ case Scalar::Uint16:
+ atomicExchange16ZeroExtend(mem, value, output.gpr());
+ break;
+ case Scalar::Int32:
+ atomicExchange32(mem, value, output.gpr());
+ break;
+ case Scalar::Uint32:
+ // At the moment, the code in MCallOptimize.cpp requires the output
+ // type to be double for uint32 arrays. See bug 1077305.
+ MOZ_ASSERT(output.isFloat());
+ atomicExchange32(mem, value, temp);
+ convertUInt32ToDouble(temp, output.fpu());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+MacroAssemblerARMCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
+ Register value, Register temp, AnyRegister output);
+template void
+MacroAssemblerARMCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
+ Register value, Register temp, AnyRegister output);
+
+void
+MacroAssemblerARMCompat::profilerEnterFrame(Register framePtr, Register scratch)
+{
+ AbsoluteAddress activation(GetJitContext()->runtime->addressOfProfilingActivation());
+ loadPtr(activation, scratch);
+ storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr), Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void
+MacroAssemblerARMCompat::profilerExitFrame()
+{
+ branch(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
+}
+
+MacroAssembler&
+MacroAssemblerARM::asMasm()
+{
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler&
+MacroAssemblerARM::asMasm() const
+{
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+MacroAssembler&
+MacroAssemblerARMCompat::asMasm()
+{
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler&
+MacroAssemblerARMCompat::asMasm() const
+{
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+void
+MacroAssembler::subFromStackPtr(Imm32 imm32)
+{
+ ScratchRegisterScope scratch(*this);
+ if (imm32.value)
+ ma_sub(imm32, sp, scratch);
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// MacroAssembler high-level usage.
+
+void
+MacroAssembler::flush()
+{
+ Assembler::flush();
+}
+
+void
+MacroAssembler::comment(const char* msg)
+{
+ Assembler::comment(msg);
+}
+
+// ===============================================================
+// Stack manipulation functions.
+
+void
+MacroAssembler::PushRegsInMask(LiveRegisterSet set)
+{
+ int32_t diffF = set.fpus().getPushSizeInBytes();
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+
+ if (set.gprs().size() > 1) {
+ adjustFrame(diffG);
+ startDataTransferM(IsStore, StackPointer, DB, WriteBack);
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ transferReg(*iter);
+ }
+ finishDataTransfer();
+ } else {
+ reserveStack(diffG);
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ storePtr(*iter, Address(StackPointer, diffG));
+ }
+ }
+ MOZ_ASSERT(diffG == 0);
+
+ adjustFrame(diffF);
+ diffF += transferMultipleByRuns(set.fpus(), IsStore, StackPointer, DB);
+ MOZ_ASSERT(diffF == 0);
+}
+
+void
+MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
+{
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+ int32_t diffF = set.fpus().getPushSizeInBytes();
+ const int32_t reservedG = diffG;
+ const int32_t reservedF = diffF;
+
+ // ARM can load multiple registers at once, but only if we want back all
+ // the registers we previously saved to the stack.
+ if (ignore.emptyFloat()) {
+ diffF -= transferMultipleByRuns(set.fpus(), IsLoad, StackPointer, IA);
+ adjustFrame(-reservedF);
+ } else {
+ LiveFloatRegisterSet fpset(set.fpus().reduceSetForPush());
+ LiveFloatRegisterSet fpignore(ignore.fpus().reduceSetForPush());
+ for (FloatRegisterBackwardIterator iter(fpset); iter.more(); ++iter) {
+ diffF -= (*iter).size();
+ if (!fpignore.has(*iter))
+ loadDouble(Address(StackPointer, diffF), *iter);
+ }
+ freeStack(reservedF);
+ }
+ MOZ_ASSERT(diffF == 0);
+
+ if (set.gprs().size() > 1 && ignore.emptyGeneral()) {
+ startDataTransferM(IsLoad, StackPointer, IA, WriteBack);
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ transferReg(*iter);
+ }
+ finishDataTransfer();
+ adjustFrame(-reservedG);
+ } else {
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ if (!ignore.has(*iter))
+ loadPtr(Address(StackPointer, diffG), *iter);
+ }
+ freeStack(reservedG);
+ }
+ MOZ_ASSERT(diffG == 0);
+}
+
+void
+MacroAssembler::Push(Register reg)
+{
+ push(reg);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(const Imm32 imm)
+{
+ push(imm);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(const ImmWord imm)
+{
+ push(imm);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(const ImmPtr imm)
+{
+ Push(ImmWord(uintptr_t(imm.value)));
+}
+
+void
+MacroAssembler::Push(const ImmGCPtr ptr)
+{
+ push(ptr);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(FloatRegister reg)
+{
+ VFPRegister r = VFPRegister(reg);
+ ma_vpush(VFPRegister(reg));
+ adjustFrame(r.size());
+}
+
+void
+MacroAssembler::Pop(Register reg)
+{
+ ma_pop(reg);
+ adjustFrame(-sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Pop(FloatRegister reg)
+{
+ ma_vpop(reg);
+ adjustFrame(-reg.size());
+}
+
+void
+MacroAssembler::Pop(const ValueOperand& val)
+{
+ popValue(val);
+ adjustFrame(-sizeof(Value));
+}
+
+// ===============================================================
+// Simple call functions.
+
+CodeOffset
+MacroAssembler::call(Register reg)
+{
+ as_blx(reg);
+ return CodeOffset(currentOffset());
+}
+
+CodeOffset
+MacroAssembler::call(Label* label)
+{
+ // For now, assume that it'll be nearby.
+ as_bl(label, Always);
+ return CodeOffset(currentOffset());
+}
+
+void
+MacroAssembler::call(ImmWord imm)
+{
+ call(ImmPtr((void*)imm.value));
+}
+
+void
+MacroAssembler::call(ImmPtr imm)
+{
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, imm, Relocation::HARDCODED);
+ ma_call(imm);
+}
+
+void
+MacroAssembler::call(wasm::SymbolicAddress imm)
+{
+ movePtr(imm, CallReg);
+ call(CallReg);
+}
+
+void
+MacroAssembler::call(JitCode* c)
+{
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
+ ScratchRegisterScope scratch(*this);
+ ma_movPatchable(ImmPtr(c->raw()), scratch, Always);
+ callJitNoProfiler(scratch);
+}
+
+CodeOffset
+MacroAssembler::callWithPatch()
+{
+ // The caller ensures that the call is always in range using thunks (below)
+ // as necessary.
+ as_bl(BOffImm(), Always, /* documentation */ nullptr);
+ return CodeOffset(currentOffset());
+}
+
+void
+MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset)
+{
+ BufferOffset inst(callerOffset - 4);
+ as_bl(BufferOffset(calleeOffset).diffB<BOffImm>(inst), Always, inst);
+}
+
+CodeOffset
+MacroAssembler::farJumpWithPatch()
+{
+ static_assert(32 * 1024 * 1024 - JumpImmediateRange > wasm::MaxFuncs * 3 * sizeof(Instruction),
+ "always enough space for thunks");
+
+ // The goal of the thunk is to be able to jump to any address without the
+ // usual 32MiB branch range limitation. Additionally, to make the thunk
+ // simple to use, the thunk does not use the constant pool or require
+ // patching an absolute address. Instead, a relative offset is used which
+ // can be patched during compilation.
+
+ // Inhibit pools since these three words must be contiguous so that the offset
+ // calculations below are valid.
+ AutoForbidPools afp(this, 3);
+
+ // When pc is used, the read value is the address of the instruction + 8.
+ // This is exactly the address of the uint32 word we want to load.
+ ScratchRegisterScope scratch(*this);
+ ma_ldr(DTRAddr(pc, DtrOffImm(0)), scratch);
+
+ // Branch by making pc the destination register.
+ ma_add(pc, scratch, pc, LeaveCC, Always);
+
+ // Allocate space which will be patched by patchFarJump().
+ CodeOffset farJump(currentOffset());
+ writeInst(UINT32_MAX);
+
+ return farJump;
+}
+
+void
+MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset)
+{
+ uint32_t* u32 = reinterpret_cast<uint32_t*>(editSrc(BufferOffset(farJump.offset())));
+ MOZ_ASSERT(*u32 == UINT32_MAX);
+
+ uint32_t addOffset = farJump.offset() - 4;
+ MOZ_ASSERT(editSrc(BufferOffset(addOffset))->is<InstALU>());
+
+ // When pc is read as the operand of the add, its value is the address of
+ // the add instruction + 8.
+ *u32 = (targetOffset - addOffset) - 8;
+}
+
+void
+MacroAssembler::repatchFarJump(uint8_t* code, uint32_t farJumpOffset, uint32_t targetOffset)
+{
+ uint32_t* u32 = reinterpret_cast<uint32_t*>(code + farJumpOffset);
+
+ uint32_t addOffset = farJumpOffset - 4;
+ MOZ_ASSERT(reinterpret_cast<Instruction*>(code + addOffset)->is<InstALU>());
+
+ *u32 = (targetOffset - addOffset) - 8;
+}
+
+CodeOffset
+MacroAssembler::nopPatchableToNearJump()
+{
+ // Inhibit pools so that the offset points precisely to the nop.
+ AutoForbidPools afp(this, 1);
+
+ CodeOffset offset(currentOffset());
+ ma_nop();
+ return offset;
+}
+
+void
+MacroAssembler::patchNopToNearJump(uint8_t* jump, uint8_t* target)
+{
+ MOZ_ASSERT(reinterpret_cast<Instruction*>(jump)->is<InstNOP>());
+ new (jump) InstBImm(BOffImm(target - jump), Assembler::Always);
+}
+
+void
+MacroAssembler::patchNearJumpToNop(uint8_t* jump)
+{
+ MOZ_ASSERT(reinterpret_cast<Instruction*>(jump)->is<InstBImm>());
+ new (jump) InstNOP();
+}
+
+void
+MacroAssembler::pushReturnAddress()
+{
+ push(lr);
+}
+
+void
+MacroAssembler::popReturnAddress()
+{
+ pop(lr);
+}
+
+// ===============================================================
+// ABI function calls.
+
+void
+MacroAssembler::setupUnalignedABICall(Register scratch)
+{
+ setupABICall();
+ dynamicAlignment_ = true;
+
+ ma_mov(sp, scratch);
+ // Force sp to be aligned.
+ as_bic(sp, sp, Imm8(ABIStackAlignment - 1));
+ ma_push(scratch);
+}
+
+void
+MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm)
+{
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ if (dynamicAlignment_) {
+ // sizeof(intptr_t) accounts for the saved stack pointer pushed by
+ // setupUnalignedABICall.
+ stackForCall += ComputeByteAlignment(stackForCall + sizeof(intptr_t),
+ ABIStackAlignment);
+ } else {
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(stackForCall + framePushed() + alignmentAtPrologue,
+ ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Position all arguments.
+ {
+ enoughMemory_ = enoughMemory_ && moveResolver_.resolve();
+ if (!enoughMemory_)
+ return;
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+
+ // Save the lr register if we need to preserve it.
+ if (secondScratchReg_ != lr)
+ ma_mov(lr, secondScratchReg_);
+}
+
+void
+MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
+{
+ if (secondScratchReg_ != lr)
+ ma_mov(secondScratchReg_, lr);
+
+ switch (result) {
+ case MoveOp::DOUBLE:
+ if (!UseHardFpABI()) {
+ // Move double from r0/r1 to ReturnFloatReg.
+ ma_vxfer(r0, r1, ReturnDoubleReg);
+ }
+ break;
+ case MoveOp::FLOAT32:
+ if (!UseHardFpABI()) {
+ // Move float32 from r0 to ReturnFloatReg.
+ ma_vxfer(r0, ReturnFloat32Reg.singleOverlay());
+ }
+ break;
+ case MoveOp::GENERAL:
+ break;
+
+ default:
+ MOZ_CRASH("unexpected callWithABI result");
+ }
+
+ freeStack(stackAdjust);
+
+ if (dynamicAlignment_) {
+ // While the x86 supports pop esp, on ARM that isn't well defined, so
+ // just do it manually.
+ as_dtr(IsLoad, 32, Offset, sp, DTRAddr(sp, DtrOffImm(0)));
+ }
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+void
+MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
+{
+ // Load the callee in r12, as above.
+ ma_mov(fun, r12);
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(r12);
+ callWithABIPost(stackAdjust, result);
+}
+
+void
+MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result)
+{
+ // Load the callee in r12, no instruction between the ldr and call should
+ // clobber it. Note that we can't use fun.base because it may be one of the
+ // IntArg registers clobbered before the call.
+ {
+ ScratchRegisterScope scratch(*this);
+ ma_ldr(fun, r12, scratch);
+ }
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(r12);
+ callWithABIPost(stackAdjust, result);
+}
+
+// ===============================================================
+// Jit Frames.
+
+uint32_t
+MacroAssembler::pushFakeReturnAddress(Register scratch)
+{
+ // On ARM any references to the pc, adds an additional 8 to it, which
+ // correspond to 2 instructions of 4 bytes. Thus we use an additional nop
+ // to pad until we reach the pushed pc.
+ //
+ // Note: In practice this should not be necessary, as this fake return
+ // address is never used for resuming any execution. Thus theoriticaly we
+ // could just do a Push(pc), and ignore the nop as well as the pool.
+ enterNoPool(2);
+ DebugOnly<uint32_t> offsetBeforePush = currentOffset();
+ Push(pc); // actually pushes $pc + 8.
+ ma_nop();
+ uint32_t pseudoReturnOffset = currentOffset();
+ leaveNoPool();
+
+ MOZ_ASSERT_IF(!oom(), pseudoReturnOffset - offsetBeforePush == 8);
+ return pseudoReturnOffset;
+}
+
+// ===============================================================
+// Branch functions
+
+void
+MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp,
+ Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(ptr != temp);
+ MOZ_ASSERT(ptr != scratch2);
+
+ ma_lsr(Imm32(gc::ChunkShift), ptr, scratch2);
+ ma_lsl(Imm32(gc::ChunkShift), scratch2, scratch2);
+ load32(Address(scratch2, gc::ChunkLocationOffset), scratch2);
+ branch32(cond, scratch2, Imm32(int32_t(gc::ChunkLocation::Nursery)), label);
+}
+
+void
+MacroAssembler::branchValueIsNurseryObject(Condition cond, const Address& address,
+ Register temp, Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+ branchTestObject(Assembler::NotEqual, address, cond == Assembler::Equal ? &done : label);
+
+ loadPtr(address, temp);
+ branchPtrInNurseryChunk(cond, temp, InvalidReg, label);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::branchValueIsNurseryObject(Condition cond, ValueOperand value,
+ Register temp, Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+ branchTestObject(Assembler::NotEqual, value, cond == Assembler::Equal ? &done : label);
+
+ branchPtrInNurseryChunk(cond, value.payloadReg(), InvalidReg, label);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ // If cond == NotEqual, branch when a.payload != b.payload || a.tag !=
+ // b.tag. If the payloads are equal, compare the tags. If the payloads are
+ // not equal, short circuit true (NotEqual).
+ //
+ // If cand == Equal, branch when a.payload == b.payload && a.tag == b.tag.
+ // If the payloads are equal, compare the tags. If the payloads are not
+ // equal, short circuit false (NotEqual).
+ ScratchRegisterScope scratch(*this);
+
+ if (rhs.isMarkable())
+ ma_cmp(lhs.payloadReg(), ImmGCPtr(rhs.toMarkablePointer()), scratch);
+ else
+ ma_cmp(lhs.payloadReg(), Imm32(rhs.toNunboxPayload()), scratch);
+ ma_cmp(lhs.typeReg(), Imm32(rhs.toNunboxTag()), scratch, Equal);
+ ma_b(label, cond);
+}
+
+// ========================================================================
+// Memory access primitives.
+template <typename T>
+void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const T& dest, MIRType slotType)
+{
+ if (valueType == MIRType::Double) {
+ storeDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ // Store the type tag if needed.
+ if (valueType != slotType)
+ storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), dest);
+
+ // Store the payload.
+ if (value.constant())
+ storePayload(value.value(), dest);
+ else
+ storePayload(value.reg().typedReg().gpr(), dest);
+}
+
+template void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const Address& dest, MIRType slotType);
+template void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const BaseIndex& dest, MIRType slotType);
+
+void
+MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input, Register output, Label* oolEntry)
+{
+ wasmTruncateToInt32(input, output, MIRType::Double, /* isUnsigned= */ true, oolEntry);
+}
+
+void
+MacroAssembler::wasmTruncateDoubleToInt32(FloatRegister input, Register output, Label* oolEntry)
+{
+ wasmTruncateToInt32(input, output, MIRType::Double, /* isUnsigned= */ false, oolEntry);
+}
+
+void
+MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input, Register output, Label* oolEntry)
+{
+ wasmTruncateToInt32(input, output, MIRType::Float32, /* isUnsigned= */ true, oolEntry);
+}
+
+void
+MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input, Register output, Label* oolEntry)
+{
+ wasmTruncateToInt32(input, output, MIRType::Float32, /* isUnsigned= */ false, oolEntry);
+}
+
+//}}} check_macroassembler_style
+
+void
+MacroAssemblerARM::wasmTruncateToInt32(FloatRegister input, Register output, MIRType fromType,
+ bool isUnsigned, Label* oolEntry)
+{
+ // vcvt* converts NaN into 0, so check for NaNs here.
+ {
+ if (fromType == MIRType::Double)
+ asMasm().compareDouble(input, input);
+ else if (fromType == MIRType::Float32)
+ asMasm().compareFloat(input, input);
+ else
+ MOZ_CRASH("unexpected type in visitWasmTruncateToInt32");
+
+ ma_b(oolEntry, Assembler::VFP_Unordered);
+ }
+
+ ScratchDoubleScope scratchScope(asMasm());
+ ScratchRegisterScope scratchReg(asMasm());
+ FloatRegister scratch = scratchScope.uintOverlay();
+
+ // ARM conversion instructions clamp the value to ensure it fits within the
+ // target's type bounds, so every time we see those, we need to check the
+ // input.
+ if (isUnsigned) {
+ if (fromType == MIRType::Double)
+ ma_vcvt_F64_U32(input, scratch);
+ else if (fromType == MIRType::Float32)
+ ma_vcvt_F32_U32(input, scratch);
+ else
+ MOZ_CRASH("unexpected type in visitWasmTruncateToInt32");
+
+ ma_vxfer(scratch, output);
+
+ // int32_t(UINT32_MAX) == -1.
+ ma_cmp(output, Imm32(-1), scratchReg);
+ as_cmp(output, Imm8(0), Assembler::NotEqual);
+ ma_b(oolEntry, Assembler::Equal);
+
+ return;
+ }
+
+ scratch = scratchScope.sintOverlay();
+
+ if (fromType == MIRType::Double)
+ ma_vcvt_F64_I32(input, scratch);
+ else if (fromType == MIRType::Float32)
+ ma_vcvt_F32_I32(input, scratch);
+ else
+ MOZ_CRASH("unexpected type in visitWasmTruncateToInt32");
+
+ ma_vxfer(scratch, output);
+ ma_cmp(output, Imm32(INT32_MAX), scratchReg);
+ ma_cmp(output, Imm32(INT32_MIN), scratchReg, Assembler::NotEqual);
+ ma_b(oolEntry, Assembler::Equal);
+}
+
+void
+MacroAssemblerARM::outOfLineWasmTruncateToIntCheck(FloatRegister input, MIRType fromType,
+ MIRType toType, bool isUnsigned, Label* rejoin,
+ wasm::TrapOffset trapOffset)
+{
+ ScratchDoubleScope scratchScope(asMasm());
+ FloatRegister scratch;
+
+ // Eagerly take care of NaNs.
+ Label inputIsNaN;
+ if (fromType == MIRType::Double)
+ asMasm().branchDouble(Assembler::DoubleUnordered, input, input, &inputIsNaN);
+ else if (fromType == MIRType::Float32)
+ asMasm().branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN);
+ else
+ MOZ_CRASH("unexpected type in visitOutOfLineWasmTruncateCheck");
+
+ // Handle special values.
+ Label fail;
+
+ // By default test for the following inputs and bail:
+ // signed: ] -Inf, INTXX_MIN - 1.0 ] and [ INTXX_MAX + 1.0 : +Inf [
+ // unsigned: ] -Inf, -1.0 ] and [ UINTXX_MAX + 1.0 : +Inf [
+ // Note: we cannot always represent those exact values. As a result
+ // this changes the actual comparison a bit.
+ double minValue, maxValue;
+ Assembler::DoubleCondition minCond = Assembler::DoubleLessThanOrEqual;
+ Assembler::DoubleCondition maxCond = Assembler::DoubleGreaterThanOrEqual;
+ if (toType == MIRType::Int64) {
+ if (isUnsigned) {
+ minValue = -1;
+ maxValue = double(UINT64_MAX) + 1.0;
+ } else {
+ // In the float32/double range there exists no value between
+ // INT64_MIN and INT64_MIN - 1.0. Making INT64_MIN the lower-bound.
+ minValue = double(INT64_MIN);
+ minCond = Assembler::DoubleLessThan;
+ maxValue = double(INT64_MAX) + 1.0;
+ }
+ } else {
+ if (isUnsigned) {
+ minValue = -1;
+ maxValue = double(UINT32_MAX) + 1.0;
+ } else {
+ if (fromType == MIRType::Float32) {
+ // In the float32 range there exists no value between
+ // INT32_MIN and INT32_MIN - 1.0. Making INT32_MIN the lower-bound.
+ minValue = double(INT32_MIN);
+ minCond = Assembler::DoubleLessThan;
+ } else {
+ minValue = double(INT32_MIN) - 1.0;
+ }
+ maxValue = double(INT32_MAX) + 1.0;
+ }
+ }
+
+ if (fromType == MIRType::Double) {
+ scratch = scratchScope.doubleOverlay();
+ asMasm().loadConstantDouble(minValue, scratch);
+ asMasm().branchDouble(minCond, input, scratch, &fail);
+
+ asMasm().loadConstantDouble(maxValue, scratch);
+ asMasm().branchDouble(maxCond, input, scratch, &fail);
+ } else {
+ MOZ_ASSERT(fromType == MIRType::Float32);
+ scratch = scratchScope.singleOverlay();
+ asMasm().loadConstantFloat32(float(minValue), scratch);
+ asMasm().branchFloat(minCond, input, scratch, &fail);
+
+ asMasm().loadConstantFloat32(float(maxValue), scratch);
+ asMasm().branchFloat(maxCond, input, scratch, &fail);
+ }
+
+ // We had an actual correct value, get back to where we were.
+ ma_b(rejoin);
+
+ // Handle errors.
+ bind(&fail);
+ asMasm().jump(wasm::TrapDesc(trapOffset, wasm::Trap::IntegerOverflow,
+ asMasm().framePushed()));
+
+ bind(&inputIsNaN);
+ asMasm().jump(wasm::TrapDesc(trapOffset, wasm::Trap::InvalidConversionToInteger,
+ asMasm().framePushed()));
+}
+
+void
+MacroAssemblerARM::emitUnalignedLoad(bool isSigned, unsigned byteSize, Register ptr, Register tmp,
+ Register dest, unsigned offset)
+{
+ // Preconditions.
+ MOZ_ASSERT(ptr != tmp);
+ MOZ_ASSERT(ptr != dest);
+ MOZ_ASSERT(tmp != dest);
+ MOZ_ASSERT(byteSize <= 4);
+
+ ScratchRegisterScope scratch(asMasm());
+
+ for (unsigned i = 0; i < byteSize; i++) {
+ // Only the last byte load shall be signed, if needed.
+ bool signedByteLoad = isSigned && (i == byteSize - 1);
+ ma_dataTransferN(IsLoad, 8, signedByteLoad, ptr, Imm32(offset + i), i ? tmp : dest, scratch);
+ if (i)
+ as_orr(dest, dest, lsl(tmp, 8 * i));
+ }
+}
+
+void
+MacroAssemblerARM::emitUnalignedStore(unsigned byteSize, Register ptr, Register val,
+ unsigned offset)
+{
+ // Preconditions.
+ MOZ_ASSERT(ptr != val);
+ MOZ_ASSERT(byteSize <= 4);
+
+ ScratchRegisterScope scratch(asMasm());
+
+ for (unsigned i = 0; i < byteSize; i++) {
+ ma_dataTransferN(IsStore, 8 /* bits */, /* signed */ false, ptr, Imm32(offset + i), val, scratch);
+ if (i < byteSize - 1)
+ ma_lsr(Imm32(8), val, val);
+ }
+}
diff --git a/js/src/jit/arm/MacroAssembler-arm.h b/js/src/jit/arm/MacroAssembler-arm.h
new file mode 100644
index 000000000..c011af3c3
--- /dev/null
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -0,0 +1,1554 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_MacroAssembler_arm_h
+#define jit_arm_MacroAssembler_arm_h
+
+#include "mozilla/DebugOnly.h"
+
+#include "jsopcode.h"
+
+#include "jit/arm/Assembler-arm.h"
+#include "jit/AtomicOp.h"
+#include "jit/IonCaches.h"
+#include "jit/JitFrames.h"
+#include "jit/MoveResolver.h"
+
+using mozilla::DebugOnly;
+
+namespace js {
+namespace jit {
+
+static Register CallReg = ip;
+static const int defaultShift = 3;
+JS_STATIC_ASSERT(1 << defaultShift == sizeof(JS::Value));
+
+// MacroAssemblerARM is inheriting form Assembler defined in
+// Assembler-arm.{h,cpp}
+class MacroAssemblerARM : public Assembler
+{
+ private:
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ protected:
+ // On ARM, some instructions require a second scratch register. This
+ // register defaults to lr, since it's non-allocatable (as it can be
+ // clobbered by some instructions). Allow the baseline compiler to override
+ // this though, since baseline IC stubs rely on lr holding the return
+ // address.
+ Register secondScratchReg_;
+
+ public:
+ Register getSecondScratchReg() const {
+ return secondScratchReg_;
+ }
+
+ public:
+ // Higher level tag testing code.
+ // TODO: Can probably remove the Operand versions.
+ Operand ToPayload(Operand base) const {
+ return Operand(Register::FromCode(base.base()), base.disp());
+ }
+ Address ToPayload(const Address& base) const {
+ return base;
+ }
+
+ protected:
+ Operand ToType(Operand base) const {
+ return Operand(Register::FromCode(base.base()), base.disp() + sizeof(void*));
+ }
+ Address ToType(const Address& base) const {
+ return ToType(Operand(base)).toAddress();
+ }
+
+ Address ToPayloadAfterStackPush(const Address& base) const {
+ // If we are based on StackPointer, pass over the type tag just pushed.
+ if (base.base == StackPointer)
+ return Address(base.base, base.offset + sizeof(void *));
+ return ToPayload(base);
+ }
+
+ public:
+ MacroAssemblerARM()
+ : secondScratchReg_(lr)
+ { }
+
+ void setSecondScratchReg(Register reg) {
+ MOZ_ASSERT(reg != ScratchRegister);
+ secondScratchReg_ = reg;
+ }
+
+ void convertBoolToInt32(Register source, Register dest);
+ void convertInt32ToDouble(Register src, FloatRegister dest);
+ void convertInt32ToDouble(const Address& src, FloatRegister dest);
+ void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest);
+ void convertUInt32ToFloat32(Register src, FloatRegister dest);
+ void convertUInt32ToDouble(Register src, FloatRegister dest);
+ void convertDoubleToFloat32(FloatRegister src, FloatRegister dest,
+ Condition c = Always);
+ void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+
+ void convertFloat32ToDouble(FloatRegister src, FloatRegister dest);
+ void convertInt32ToFloat32(Register src, FloatRegister dest);
+ void convertInt32ToFloat32(const Address& src, FloatRegister dest);
+
+ void wasmTruncateToInt32(FloatRegister input, Register output, MIRType fromType,
+ bool isUnsigned, Label* oolEntry);
+ void outOfLineWasmTruncateToIntCheck(FloatRegister input, MIRType fromType,
+ MIRType toType, bool isUnsigned, Label* rejoin,
+ wasm::TrapOffset trapOffs);
+
+ // Somewhat direct wrappers for the low-level assembler funcitons
+ // bitops. Attempt to encode a virtual alu instruction using two real
+ // instructions.
+ private:
+ bool alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op,
+ SBit s, Condition c);
+
+ public:
+ void ma_alu(Register src1, Imm32 imm, Register dest, AutoRegisterScope& scratch,
+ ALUOp op, SBit s = LeaveCC, Condition c = Always);
+ void ma_alu(Register src1, Operand2 op2, Register dest, ALUOp op,
+ SBit s = LeaveCC, Condition c = Always);
+ void ma_alu(Register src1, Operand op2, Register dest, ALUOp op,
+ SBit s = LeaveCC, Condition c = Always);
+ void ma_nop();
+
+ void ma_movPatchable(Imm32 imm, Register dest, Assembler::Condition c);
+ void ma_movPatchable(ImmPtr imm, Register dest, Assembler::Condition c);
+
+ static void ma_mov_patch(Imm32 imm, Register dest, Assembler::Condition c,
+ RelocStyle rs, Instruction* i);
+ static void ma_mov_patch(ImmPtr imm, Register dest, Assembler::Condition c,
+ RelocStyle rs, Instruction* i);
+
+ // ALU based ops
+ // mov
+ void ma_mov(Register src, Register dest, SBit s = LeaveCC, Condition c = Always);
+
+ void ma_mov(Imm32 imm, Register dest, Condition c = Always);
+ void ma_mov(ImmWord imm, Register dest, Condition c = Always);
+
+ void ma_mov(ImmGCPtr ptr, Register dest);
+
+ // Shifts (just a move with a shifting op2)
+ void ma_lsl(Imm32 shift, Register src, Register dst);
+ void ma_lsr(Imm32 shift, Register src, Register dst);
+ void ma_asr(Imm32 shift, Register src, Register dst);
+ void ma_ror(Imm32 shift, Register src, Register dst);
+ void ma_rol(Imm32 shift, Register src, Register dst);
+
+ void ma_lsl(Register shift, Register src, Register dst);
+ void ma_lsr(Register shift, Register src, Register dst);
+ void ma_asr(Register shift, Register src, Register dst);
+ void ma_ror(Register shift, Register src, Register dst);
+ void ma_rol(Register shift, Register src, Register dst, AutoRegisterScope& scratch);
+
+ // Move not (dest <- ~src)
+ void ma_mvn(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
+
+ // Negate (dest <- -src) implemented as rsb dest, src, 0
+ void ma_neg(Register src, Register dest,
+ SBit s = LeaveCC, Condition c = Always);
+
+ // And
+ void ma_and(Register src, Register dest,
+ SBit s = LeaveCC, Condition c = Always);
+
+ void ma_and(Register src1, Register src2, Register dest,
+ SBit s = LeaveCC, Condition c = Always);
+
+ void ma_and(Imm32 imm, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+
+ void ma_and(Imm32 imm, Register src1, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+
+ // Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2)
+ void ma_bic(Imm32 imm, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+
+ // Exclusive or
+ void ma_eor(Register src, Register dest,
+ SBit s = LeaveCC, Condition c = Always);
+
+ void ma_eor(Register src1, Register src2, Register dest,
+ SBit s = LeaveCC, Condition c = Always);
+
+ void ma_eor(Imm32 imm, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+
+ void ma_eor(Imm32 imm, Register src1, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+
+ // Or
+ void ma_orr(Register src, Register dest,
+ SBit s = LeaveCC, Condition c = Always);
+
+ void ma_orr(Register src1, Register src2, Register dest,
+ SBit s = LeaveCC, Condition c = Always);
+
+ void ma_orr(Imm32 imm, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+
+ void ma_orr(Imm32 imm, Register src1, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+
+
+ // Arithmetic based ops.
+ // Add with carry:
+ void ma_adc(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s = LeaveCC, Condition c = Always);
+ void ma_adc(Register src, Register dest, SBit s = LeaveCC, Condition c = Always);
+ void ma_adc(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
+
+ // Add:
+ void ma_add(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s = LeaveCC, Condition c = Always);
+ void ma_add(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
+ void ma_add(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
+ void ma_add(Register src1, Operand op, Register dest, SBit s = LeaveCC, Condition c = Always);
+ void ma_add(Register src1, Imm32 op, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+
+ // Subtract with carry:
+ void ma_sbc(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s = LeaveCC, Condition c = Always);
+ void ma_sbc(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
+ void ma_sbc(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
+
+ // Subtract:
+ void ma_sub(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s = LeaveCC, Condition c = Always);
+ void ma_sub(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
+ void ma_sub(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
+ void ma_sub(Register src1, Operand op, Register dest, SBit s = LeaveCC, Condition c = Always);
+ void ma_sub(Register src1, Imm32 op, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+
+ // Reverse subtract:
+ void ma_rsb(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s = LeaveCC, Condition c = Always);
+ void ma_rsb(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
+ void ma_rsb(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
+ void ma_rsb(Register src1, Imm32 op2, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+
+ // Reverse subtract with carry:
+ void ma_rsc(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s = LeaveCC, Condition c = Always);
+ void ma_rsc(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
+ void ma_rsc(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
+
+ // Compares/tests.
+ // Compare negative (sets condition codes as src1 + src2 would):
+ void ma_cmn(Register src1, Imm32 imm, AutoRegisterScope& scratch, Condition c = Always);
+ void ma_cmn(Register src1, Register src2, Condition c = Always);
+ void ma_cmn(Register src1, Operand op, Condition c = Always);
+
+ // Compare (src - src2):
+ void ma_cmp(Register src1, Imm32 imm, AutoRegisterScope& scratch, Condition c = Always);
+ void ma_cmp(Register src1, ImmTag tag, Condition c = Always);
+ void ma_cmp(Register src1, ImmWord ptr, AutoRegisterScope& scratch, Condition c = Always);
+ void ma_cmp(Register src1, ImmGCPtr ptr, AutoRegisterScope& scratch, Condition c = Always);
+ void ma_cmp(Register src1, Operand op, AutoRegisterScope& scratch, AutoRegisterScope& scratch2,
+ Condition c = Always);
+ void ma_cmp(Register src1, Register src2, Condition c = Always);
+
+ // Test for equality, (src1 ^ src2):
+ void ma_teq(Register src1, Imm32 imm, AutoRegisterScope& scratch, Condition c = Always);
+ void ma_teq(Register src1, Register src2, Condition c = Always);
+ void ma_teq(Register src1, Operand op, Condition c = Always);
+
+ // Test (src1 & src2):
+ void ma_tst(Register src1, Imm32 imm, AutoRegisterScope& scratch, Condition c = Always);
+ void ma_tst(Register src1, Register src2, Condition c = Always);
+ void ma_tst(Register src1, Operand op, Condition c = Always);
+
+ // Multiplies. For now, there are only two that we care about.
+ void ma_mul(Register src1, Register src2, Register dest);
+ void ma_mul(Register src1, Imm32 imm, Register dest, AutoRegisterScope& scratch);
+ Condition ma_check_mul(Register src1, Register src2, Register dest,
+ AutoRegisterScope& scratch, Condition cond);
+ Condition ma_check_mul(Register src1, Imm32 imm, Register dest,
+ AutoRegisterScope& scratch, Condition cond);
+
+ void ma_umull(Register src1, Imm32 imm, Register destHigh, Register destLow, AutoRegisterScope& scratch);
+ void ma_umull(Register src1, Register src2, Register destHigh, Register destLow);
+
+ // Fast mod, uses scratch registers, and thus needs to be in the assembler
+ // implicitly assumes that we can overwrite dest at the beginning of the
+ // sequence.
+ void ma_mod_mask(Register src, Register dest, Register hold, Register tmp,
+ AutoRegisterScope& scratch, AutoRegisterScope& scratch2, int32_t shift);
+
+ // Mod - depends on integer divide instructions being supported.
+ void ma_smod(Register num, Register div, Register dest, AutoRegisterScope& scratch);
+ void ma_umod(Register num, Register div, Register dest, AutoRegisterScope& scratch);
+
+ // Division - depends on integer divide instructions being supported.
+ void ma_sdiv(Register num, Register div, Register dest, Condition cond = Always);
+ void ma_udiv(Register num, Register div, Register dest, Condition cond = Always);
+ // Misc operations
+ void ma_clz(Register src, Register dest, Condition cond = Always);
+ void ma_ctz(Register src, Register dest, AutoRegisterScope& scratch);
+ // Memory:
+ // Shortcut for when we know we're transferring 32 bits of data.
+ void ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt, AutoRegisterScope& scratch,
+ Index mode = Offset, Condition cc = Always);
+ void ma_dtr(LoadStore ls, Register rt, const Address& addr, AutoRegisterScope& scratch,
+ Index mode, Condition cc);
+
+ void ma_str(Register rt, DTRAddr addr, Index mode = Offset, Condition cc = Always);
+ void ma_str(Register rt, const Address& addr, AutoRegisterScope& scratch,
+ Index mode = Offset, Condition cc = Always);
+
+ void ma_ldr(DTRAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
+ void ma_ldr(const Address& addr, Register rt, AutoRegisterScope& scratch,
+ Index mode = Offset, Condition cc = Always);
+
+ void ma_ldrb(DTRAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
+ void ma_ldrh(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
+ void ma_ldrsh(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
+ void ma_ldrsb(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
+ void ma_ldrd(EDtrAddr addr, Register rt, DebugOnly<Register> rt2, Index mode = Offset,
+ Condition cc = Always);
+ void ma_strb(Register rt, DTRAddr addr, Index mode = Offset, Condition cc = Always);
+ void ma_strh(Register rt, EDtrAddr addr, Index mode = Offset, Condition cc = Always);
+ void ma_strd(Register rt, DebugOnly<Register> rt2, EDtrAddr addr, Index mode = Offset,
+ Condition cc = Always);
+
+ // Specialty for moving N bits of data, where n == 8,16,32,64.
+ BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
+ Register rn, Register rm, Register rt, AutoRegisterScope& scratch,
+ Index mode = Offset, Condition cc = Always,
+ Scale scale = TimesOne);
+
+ BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
+ Register rn, Register rm, Register rt,
+ Index mode = Offset, Condition cc = Always);
+
+ BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
+ Register rn, Imm32 offset, Register rt, AutoRegisterScope& scratch,
+ Index mode = Offset, Condition cc = Always);
+
+ void ma_pop(Register r);
+ void ma_popn_pc(Imm32 n, AutoRegisterScope& scratch, AutoRegisterScope& scratch2);
+ void ma_push(Register r);
+ void ma_push_sp(Register r, AutoRegisterScope& scratch);
+
+ void ma_vpop(VFPRegister r);
+ void ma_vpush(VFPRegister r);
+
+ // Barriers.
+ void ma_dmb(BarrierOption option=BarrierSY);
+ void ma_dsb(BarrierOption option=BarrierSY);
+
+ // Branches when done from within arm-specific code.
+ BufferOffset ma_b(Label* dest, Condition c = Always);
+ BufferOffset ma_b(wasm::TrapDesc target, Condition c = Always);
+ void ma_b(void* target, Condition c = Always);
+ void ma_bx(Register dest, Condition c = Always);
+
+ // This is almost NEVER necessary, we'll basically never be calling a label
+ // except, possibly in the crazy bailout-table case.
+ void ma_bl(Label* dest, Condition c = Always);
+
+ void ma_blx(Register dest, Condition c = Always);
+
+ // VFP/ALU:
+ void ma_vadd(FloatRegister src1, FloatRegister src2, FloatRegister dst);
+ void ma_vsub(FloatRegister src1, FloatRegister src2, FloatRegister dst);
+
+ void ma_vmul(FloatRegister src1, FloatRegister src2, FloatRegister dst);
+ void ma_vdiv(FloatRegister src1, FloatRegister src2, FloatRegister dst);
+
+ void ma_vneg(FloatRegister src, FloatRegister dest, Condition cc = Always);
+ void ma_vmov(FloatRegister src, FloatRegister dest, Condition cc = Always);
+ void ma_vmov_f32(FloatRegister src, FloatRegister dest, Condition cc = Always);
+ void ma_vabs(FloatRegister src, FloatRegister dest, Condition cc = Always);
+ void ma_vabs_f32(FloatRegister src, FloatRegister dest, Condition cc = Always);
+
+ void ma_vsqrt(FloatRegister src, FloatRegister dest, Condition cc = Always);
+ void ma_vsqrt_f32(FloatRegister src, FloatRegister dest, Condition cc = Always);
+
+ void ma_vimm(wasm::RawF64 value, FloatRegister dest, Condition cc = Always);
+ void ma_vimm_f32(wasm::RawF32 value, FloatRegister dest, Condition cc = Always);
+
+ void ma_vcmp(FloatRegister src1, FloatRegister src2, Condition cc = Always);
+ void ma_vcmp_f32(FloatRegister src1, FloatRegister src2, Condition cc = Always);
+ void ma_vcmpz(FloatRegister src1, Condition cc = Always);
+ void ma_vcmpz_f32(FloatRegister src1, Condition cc = Always);
+
+ void ma_vadd_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
+ void ma_vsub_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
+
+ void ma_vmul_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
+ void ma_vdiv_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
+
+ void ma_vneg_f32(FloatRegister src, FloatRegister dest, Condition cc = Always);
+
+ // Source is F64, dest is I32:
+ void ma_vcvt_F64_I32(FloatRegister src, FloatRegister dest, Condition cc = Always);
+ void ma_vcvt_F64_U32(FloatRegister src, FloatRegister dest, Condition cc = Always);
+
+ // Source is I32, dest is F64:
+ void ma_vcvt_I32_F64(FloatRegister src, FloatRegister dest, Condition cc = Always);
+ void ma_vcvt_U32_F64(FloatRegister src, FloatRegister dest, Condition cc = Always);
+
+ // Source is F32, dest is I32:
+ void ma_vcvt_F32_I32(FloatRegister src, FloatRegister dest, Condition cc = Always);
+ void ma_vcvt_F32_U32(FloatRegister src, FloatRegister dest, Condition cc = Always);
+
+ // Source is I32, dest is F32:
+ void ma_vcvt_I32_F32(FloatRegister src, FloatRegister dest, Condition cc = Always);
+ void ma_vcvt_U32_F32(FloatRegister src, FloatRegister dest, Condition cc = Always);
+
+
+ // Transfer (do not coerce) a float into a gpr.
+ void ma_vxfer(VFPRegister src, Register dest, Condition cc = Always);
+ // Transfer (do not coerce) a double into a couple of gpr.
+ void ma_vxfer(VFPRegister src, Register dest1, Register dest2, Condition cc = Always);
+
+ // Transfer (do not coerce) a gpr into a float
+ void ma_vxfer(Register src, FloatRegister dest, Condition cc = Always);
+ // Transfer (do not coerce) a couple of gpr into a double
+ void ma_vxfer(Register src1, Register src2, FloatRegister dest, Condition cc = Always);
+
+ BufferOffset ma_vdtr(LoadStore ls, const Address& addr, VFPRegister dest, AutoRegisterScope& scratch,
+ Condition cc = Always);
+
+ BufferOffset ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc = Always);
+ BufferOffset ma_vldr(const Address& addr, VFPRegister dest, AutoRegisterScope& scratch, Condition cc = Always);
+ BufferOffset ma_vldr(VFPRegister src, Register base, Register index, AutoRegisterScope& scratch,
+ int32_t shift = defaultShift, Condition cc = Always);
+
+ BufferOffset ma_vstr(VFPRegister src, VFPAddr addr, Condition cc = Always);
+ BufferOffset ma_vstr(VFPRegister src, const Address& addr, AutoRegisterScope& scratch, Condition cc = Always);
+ BufferOffset ma_vstr(VFPRegister src, Register base, Register index, AutoRegisterScope& scratch,
+ AutoRegisterScope& scratch2, int32_t shift, int32_t offset, Condition cc = Always);
+ BufferOffset ma_vstr(VFPRegister src, Register base, Register index, AutoRegisterScope& scratch,
+ int32_t shift, Condition cc = Always);
+
+ void ma_call(ImmPtr dest);
+
+ // Float registers can only be loaded/stored in continuous runs when using
+ // vstm/vldm. This function breaks set into continuous runs and loads/stores
+ // them at [rm]. rm will be modified and left in a state logically suitable
+ // for the next load/store. Returns the offset from [dm] for the logical
+ // next load/store.
+ int32_t transferMultipleByRuns(FloatRegisterSet set, LoadStore ls,
+ Register rm, DTMMode mode)
+ {
+ if (mode == IA) {
+ return transferMultipleByRunsImpl
+ <FloatRegisterForwardIterator>(set, ls, rm, mode, 1);
+ }
+ if (mode == DB) {
+ return transferMultipleByRunsImpl
+ <FloatRegisterBackwardIterator>(set, ls, rm, mode, -1);
+ }
+ MOZ_CRASH("Invalid data transfer addressing mode");
+ }
+
+ // Loads `byteSize` bytes, byte by byte, by reading from ptr[offset],
+ // applying the indicated signedness (defined by isSigned).
+ // - all three registers must be different.
+ // - tmp and dest will get clobbered, ptr will remain intact.
+ // - byteSize can be up to 4 bytes and no more (GPR are 32 bits on ARM).
+ void emitUnalignedLoad(bool isSigned, unsigned byteSize, Register ptr, Register tmp,
+ Register dest, unsigned offset = 0);
+
+ // Ditto, for a store. Note stores don't care about signedness.
+ // - the two registers must be different.
+ // - val will get clobbered, ptr will remain intact.
+ // - byteSize can be up to 4 bytes and no more (GPR are 32 bits on ARM).
+ void emitUnalignedStore(unsigned byteSize, Register ptr, Register val, unsigned offset = 0);
+
+private:
+ // Implementation for transferMultipleByRuns so we can use different
+ // iterators for forward/backward traversals. The sign argument should be 1
+ // if we traverse forwards, -1 if we traverse backwards.
+ template<typename RegisterIterator> int32_t
+ transferMultipleByRunsImpl(FloatRegisterSet set, LoadStore ls,
+ Register rm, DTMMode mode, int32_t sign)
+ {
+ MOZ_ASSERT(sign == 1 || sign == -1);
+
+ int32_t delta = sign * sizeof(float);
+ int32_t offset = 0;
+ // Build up a new set, which is the sum of all of the single and double
+ // registers. This set can have up to 48 registers in it total
+ // s0-s31 and d16-d31
+ FloatRegisterSet mod = set.reduceSetForPush();
+
+ RegisterIterator iter(mod);
+ while (iter.more()) {
+ startFloatTransferM(ls, rm, mode, WriteBack);
+ int32_t reg = (*iter).code();
+ do {
+ offset += delta;
+ if ((*iter).isDouble())
+ offset += delta;
+ transferFloatReg(*iter);
+ } while ((++iter).more() && int32_t((*iter).code()) == (reg += sign));
+ finishFloatTransfer();
+ }
+ return offset;
+ }
+};
+
+class MacroAssembler;
+
+class MacroAssemblerARMCompat : public MacroAssemblerARM
+{
+ private:
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ public:
+ MacroAssemblerARMCompat()
+ { }
+
+ public:
+
+ // Jumps + other functions that should be called from non-arm specific
+ // code. Basically, an x86 front end on top of the ARM code.
+ void j(Condition code , Label* dest)
+ {
+ as_b(dest, code);
+ }
+ void j(Label* dest)
+ {
+ as_b(dest, Always);
+ }
+
+ void mov(Register src, Register dest) {
+ ma_mov(src, dest);
+ }
+ void mov(ImmWord imm, Register dest) {
+ ma_mov(Imm32(imm.value), dest);
+ }
+ void mov(ImmPtr imm, Register dest) {
+ mov(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(Register src, Address dest) {
+ MOZ_CRASH("NYI-IC");
+ }
+ void mov(Address src, Register dest) {
+ MOZ_CRASH("NYI-IC");
+ }
+
+ void branch(JitCode* c) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
+ ScratchRegisterScope scratch(asMasm());
+ ma_movPatchable(ImmPtr(c->raw()), scratch, Always);
+ ma_bx(scratch);
+ }
+ void branch(const Register reg) {
+ ma_bx(reg);
+ }
+ void nop() {
+ ma_nop();
+ }
+ void shortJumpSizedNop() {
+ ma_nop();
+ }
+ void ret() {
+ ma_pop(pc);
+ }
+ void retn(Imm32 n) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_popn_pc(n, scratch, scratch2);
+ }
+ void push(Imm32 imm) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_mov(imm, scratch);
+ ma_push(scratch);
+ }
+ void push(ImmWord imm) {
+ push(Imm32(imm.value));
+ }
+ void push(ImmGCPtr imm) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_mov(imm, scratch);
+ ma_push(scratch);
+ }
+ void push(const Address& addr) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(addr, scratch, scratch2);
+ ma_push(scratch);
+ }
+ void push(Register reg) {
+ if (reg == sp) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_push_sp(reg, scratch);
+ } else {
+ ma_push(reg);
+ }
+ }
+ void push(FloatRegister reg) {
+ ma_vpush(VFPRegister(reg));
+ }
+ void pushWithPadding(Register reg, const Imm32 extraSpace) {
+ ScratchRegisterScope scratch(asMasm());
+ Imm32 totSpace = Imm32(extraSpace.value + 4);
+ ma_dtr(IsStore, sp, totSpace, reg, scratch, PreIndex);
+ }
+ void pushWithPadding(Imm32 imm, const Imm32 extraSpace) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ Imm32 totSpace = Imm32(extraSpace.value + 4);
+ ma_mov(imm, scratch);
+ ma_dtr(IsStore, sp, totSpace, scratch, scratch2, PreIndex);
+ }
+
+ void pop(Register reg) {
+ ma_pop(reg);
+ }
+ void pop(FloatRegister reg) {
+ ma_vpop(VFPRegister(reg));
+ }
+
+ void popN(Register reg, Imm32 extraSpace) {
+ ScratchRegisterScope scratch(asMasm());
+ Imm32 totSpace = Imm32(extraSpace.value + 4);
+ ma_dtr(IsLoad, sp, totSpace, reg, scratch, PostIndex);
+ }
+
+ CodeOffset toggledJump(Label* label);
+
+ // Emit a BLX or NOP instruction. ToggleCall can be used to patch this
+ // instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled);
+
+ CodeOffset pushWithPatch(ImmWord imm) {
+ ScratchRegisterScope scratch(asMasm());
+ CodeOffset label = movWithPatch(imm, scratch);
+ ma_push(scratch);
+ return label;
+ }
+
+ CodeOffset movWithPatch(ImmWord imm, Register dest) {
+ CodeOffset label = CodeOffset(currentOffset());
+ ma_movPatchable(Imm32(imm.value), dest, Always);
+ return label;
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
+ }
+
+ void jump(Label* label) {
+ as_b(label);
+ }
+ void jump(JitCode* code) {
+ branch(code);
+ }
+ void jump(Register reg) {
+ ma_bx(reg);
+ }
+ void jump(const Address& addr) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(addr, scratch, scratch2);
+ ma_bx(scratch);
+ }
+ void jump(wasm::TrapDesc target) {
+ as_b(target);
+ }
+
+ void negl(Register reg) {
+ ma_neg(reg, reg, SetCC);
+ }
+ void test32(Register lhs, Register rhs) {
+ ma_tst(lhs, rhs);
+ }
+ void test32(Register lhs, Imm32 imm) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_tst(lhs, imm, scratch);
+ }
+ void test32(const Address& addr, Imm32 imm) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(addr, scratch, scratch2);
+ ma_tst(scratch, imm, scratch2);
+ }
+ void testPtr(Register lhs, Register rhs) {
+ test32(lhs, rhs);
+ }
+
+ // Returns the register containing the type tag.
+ Register splitTagForTest(const ValueOperand& value) {
+ return value.typeReg();
+ }
+
+ // Higher level tag testing code.
+ Condition testInt32(Condition cond, const ValueOperand& value);
+ Condition testBoolean(Condition cond, const ValueOperand& value);
+ Condition testDouble(Condition cond, const ValueOperand& value);
+ Condition testNull(Condition cond, const ValueOperand& value);
+ Condition testUndefined(Condition cond, const ValueOperand& value);
+ Condition testString(Condition cond, const ValueOperand& value);
+ Condition testSymbol(Condition cond, const ValueOperand& value);
+ Condition testObject(Condition cond, const ValueOperand& value);
+ Condition testNumber(Condition cond, const ValueOperand& value);
+ Condition testMagic(Condition cond, const ValueOperand& value);
+
+ Condition testPrimitive(Condition cond, const ValueOperand& value);
+
+ // Register-based tests.
+ Condition testInt32(Condition cond, Register tag);
+ Condition testBoolean(Condition cond, Register tag);
+ Condition testNull(Condition cond, Register tag);
+ Condition testUndefined(Condition cond, Register tag);
+ Condition testString(Condition cond, Register tag);
+ Condition testSymbol(Condition cond, Register tag);
+ Condition testObject(Condition cond, Register tag);
+ Condition testDouble(Condition cond, Register tag);
+ Condition testNumber(Condition cond, Register tag);
+ Condition testMagic(Condition cond, Register tag);
+ Condition testPrimitive(Condition cond, Register tag);
+
+ Condition testGCThing(Condition cond, const Address& address);
+ Condition testMagic(Condition cond, const Address& address);
+ Condition testInt32(Condition cond, const Address& address);
+ Condition testDouble(Condition cond, const Address& address);
+ Condition testBoolean(Condition cond, const Address& address);
+ Condition testNull(Condition cond, const Address& address);
+ Condition testUndefined(Condition cond, const Address& address);
+ Condition testString(Condition cond, const Address& address);
+ Condition testSymbol(Condition cond, const Address& address);
+ Condition testObject(Condition cond, const Address& address);
+ Condition testNumber(Condition cond, const Address& address);
+
+ Condition testUndefined(Condition cond, const BaseIndex& src);
+ Condition testNull(Condition cond, const BaseIndex& src);
+ Condition testBoolean(Condition cond, const BaseIndex& src);
+ Condition testString(Condition cond, const BaseIndex& src);
+ Condition testSymbol(Condition cond, const BaseIndex& src);
+ Condition testInt32(Condition cond, const BaseIndex& src);
+ Condition testObject(Condition cond, const BaseIndex& src);
+ Condition testDouble(Condition cond, const BaseIndex& src);
+ Condition testMagic(Condition cond, const BaseIndex& src);
+ Condition testGCThing(Condition cond, const BaseIndex& src);
+
+ // Unboxing code.
+ void unboxNonDouble(const ValueOperand& operand, Register dest);
+ void unboxNonDouble(const Address& src, Register dest);
+ void unboxNonDouble(const BaseIndex& src, Register dest);
+ void unboxInt32(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxInt32(const Address& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxBoolean(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxBoolean(const Address& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxString(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxString(const Address& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxSymbol(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxSymbol(const Address& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxObject(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxObject(const Address& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxObject(const BaseIndex& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxDouble(const ValueOperand& src, FloatRegister dest);
+ void unboxDouble(const Address& src, FloatRegister dest);
+ void unboxValue(const ValueOperand& src, AnyRegister dest);
+ void unboxPrivate(const ValueOperand& src, Register dest);
+
+ void notBoolean(const ValueOperand& val) {
+ as_eor(val.payloadReg(), val.payloadReg(), Imm8(1));
+ }
+
+ // Boxing code.
+ void boxDouble(FloatRegister src, const ValueOperand& dest);
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest);
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ Register extractObject(const Address& address, Register scratch);
+ Register extractObject(const ValueOperand& value, Register scratch) {
+ return value.payloadReg();
+ }
+ Register extractInt32(const ValueOperand& value, Register scratch) {
+ return value.payloadReg();
+ }
+ Register extractBoolean(const ValueOperand& value, Register scratch) {
+ return value.payloadReg();
+ }
+ Register extractTag(const Address& address, Register scratch);
+ Register extractTag(const BaseIndex& address, Register scratch);
+ Register extractTag(const ValueOperand& value, Register scratch) {
+ return value.typeReg();
+ }
+
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void loadInt32OrDouble(const Address& src, FloatRegister dest);
+ void loadInt32OrDouble(Register base, Register index,
+ FloatRegister dest, int32_t shift = defaultShift);
+ void loadConstantDouble(double dp, FloatRegister dest);
+ void loadConstantDouble(wasm::RawF64 dp, FloatRegister dest);
+
+ // Treat the value as a boolean, and set condition codes accordingly.
+ Condition testInt32Truthy(bool truthy, const ValueOperand& operand);
+ Condition testBooleanTruthy(bool truthy, const ValueOperand& operand);
+ Condition testDoubleTruthy(bool truthy, FloatRegister reg);
+ Condition testStringTruthy(bool truthy, const ValueOperand& value);
+
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+ void loadConstantFloat32(wasm::RawF32 f, FloatRegister dest);
+
+ void moveValue(const Value& val, Register type, Register data);
+
+ CodeOffsetJump jumpWithPatch(RepatchLabel* label, Condition cond = Always,
+ Label* documentation = nullptr);
+ CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation) {
+ return jumpWithPatch(label, Always, documentation);
+ }
+
+ void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat()) {
+ loadInt32OrDouble(address, dest.fpu());
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(address, dest.gpr(), scratch);
+ }
+ }
+
+ void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat())
+ loadInt32OrDouble(address.base, address.index, dest.fpu(), address.scale);
+ else
+ load32(address, dest.gpr());
+ }
+
+ template <typename T>
+ void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes) {
+ switch (nbytes) {
+ case 4:
+ storePtr(value.payloadReg(), address);
+ return;
+ case 1:
+ store8(value.payloadReg(), address);
+ return;
+ default: MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void moveValue(const Value& val, const ValueOperand& dest);
+
+ void moveValue(const ValueOperand& src, const ValueOperand& dest) {
+ Register s0 = src.typeReg(), d0 = dest.typeReg(),
+ s1 = src.payloadReg(), d1 = dest.payloadReg();
+
+ // Either one or both of the source registers could be the same as a
+ // destination register.
+ if (s1 == d0) {
+ if (s0 == d1) {
+ // If both are, this is just a swap of two registers.
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(d1 != scratch);
+ MOZ_ASSERT(d0 != scratch);
+ ma_mov(d1, scratch);
+ ma_mov(d0, d1);
+ ma_mov(scratch, d0);
+ return;
+ }
+ // If only one is, copy that source first.
+ mozilla::Swap(s0, s1);
+ mozilla::Swap(d0, d1);
+ }
+
+ if (s0 != d0)
+ ma_mov(s0, d0);
+ if (s1 != d1)
+ ma_mov(s1, d1);
+ }
+
+ void storeValue(ValueOperand val, const Address& dst);
+ void storeValue(ValueOperand val, const BaseIndex& dest);
+ void storeValue(JSValueType type, Register reg, BaseIndex dest) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ int32_t payloadoffset = dest.offset + NUNBOX32_PAYLOAD_OFFSET;
+ int32_t typeoffset = dest.offset + NUNBOX32_TYPE_OFFSET;
+
+ ma_alu(dest.base, lsl(dest.index, dest.scale), scratch, OpAdd);
+
+ // Store the payload.
+ if (payloadoffset < 4096 && payloadoffset > -4096)
+ ma_str(reg, DTRAddr(scratch, DtrOffImm(payloadoffset)));
+ else
+ ma_str(reg, Address(scratch, payloadoffset), scratch2);
+
+ // Store the type.
+ if (typeoffset < 4096 && typeoffset > -4096) {
+ // Encodable as DTRAddr, so only two instructions needed.
+ ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), scratch2);
+ ma_str(scratch2, DTRAddr(scratch, DtrOffImm(typeoffset)));
+ } else {
+ // Since there are only two scratch registers, the offset must be
+ // applied early using a third instruction to be safe.
+ ma_add(Imm32(typeoffset), scratch, scratch2);
+ ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), scratch2);
+ ma_str(scratch2, DTRAddr(scratch, DtrOffImm(0)));
+ }
+ }
+ void storeValue(JSValueType type, Register reg, Address dest) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ ma_str(reg, dest, scratch2);
+ ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), scratch);
+ ma_str(scratch, Address(dest.base, dest.offset + NUNBOX32_TYPE_OFFSET), scratch2);
+ }
+ void storeValue(const Value& val, const Address& dest) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ ma_mov(Imm32(val.toNunboxTag()), scratch);
+ ma_str(scratch, ToType(dest), scratch2);
+ if (val.isMarkable())
+ ma_mov(ImmGCPtr(val.toMarkablePointer()), scratch);
+ else
+ ma_mov(Imm32(val.toNunboxPayload()), scratch);
+ ma_str(scratch, ToPayload(dest), scratch2);
+ }
+ void storeValue(const Value& val, BaseIndex dest) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ int32_t typeoffset = dest.offset + NUNBOX32_TYPE_OFFSET;
+ int32_t payloadoffset = dest.offset + NUNBOX32_PAYLOAD_OFFSET;
+
+ ma_alu(dest.base, lsl(dest.index, dest.scale), scratch, OpAdd);
+
+ // Store the type.
+ if (typeoffset < 4096 && typeoffset > -4096) {
+ ma_mov(Imm32(val.toNunboxTag()), scratch2);
+ ma_str(scratch2, DTRAddr(scratch, DtrOffImm(typeoffset)));
+ } else {
+ ma_add(Imm32(typeoffset), scratch, scratch2);
+ ma_mov(Imm32(val.toNunboxTag()), scratch2);
+ ma_str(scratch2, DTRAddr(scratch, DtrOffImm(0)));
+ // Restore scratch for the payload store.
+ ma_alu(dest.base, lsl(dest.index, dest.scale), scratch, OpAdd);
+ }
+
+ // Store the payload, marking if necessary.
+ if (payloadoffset < 4096 && payloadoffset > -4096) {
+ if (val.isMarkable())
+ ma_mov(ImmGCPtr(val.toMarkablePointer()), scratch2);
+ else
+ ma_mov(Imm32(val.toNunboxPayload()), scratch2);
+ ma_str(scratch2, DTRAddr(scratch, DtrOffImm(payloadoffset)));
+ } else {
+ ma_add(Imm32(payloadoffset), scratch, scratch2);
+ if (val.isMarkable())
+ ma_mov(ImmGCPtr(val.toMarkablePointer()), scratch2);
+ else
+ ma_mov(Imm32(val.toNunboxPayload()), scratch2);
+ ma_str(scratch2, DTRAddr(scratch, DtrOffImm(0)));
+ }
+ }
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ load32(ToType(src), temp);
+ store32(temp, ToType(dest));
+
+ load32(ToPayload(src), temp);
+ store32(temp, ToPayload(dest));
+ }
+
+ void loadValue(Address src, ValueOperand val);
+ void loadValue(Operand dest, ValueOperand val) {
+ loadValue(dest.toAddress(), val);
+ }
+ void loadValue(const BaseIndex& addr, ValueOperand val);
+ void tagValue(JSValueType type, Register payload, ValueOperand dest);
+
+ void pushValue(ValueOperand val);
+ void popValue(ValueOperand val);
+ void pushValue(const Value& val) {
+ push(Imm32(val.toNunboxTag()));
+ if (val.isMarkable())
+ push(ImmGCPtr(val.toMarkablePointer()));
+ else
+ push(Imm32(val.toNunboxPayload()));
+ }
+ void pushValue(JSValueType type, Register reg) {
+ push(ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ ma_push(reg);
+ }
+ void pushValue(const Address& addr);
+
+ void storePayload(const Value& val, const Address& dest);
+ void storePayload(Register src, const Address& dest);
+ void storePayload(const Value& val, const BaseIndex& dest);
+ void storePayload(Register src, const BaseIndex& dest);
+ void storeTypeTag(ImmTag tag, const Address& dest);
+ void storeTypeTag(ImmTag tag, const BaseIndex& dest);
+
+ void handleFailureWithHandlerTail(void* handler);
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+ public:
+ void not32(Register reg);
+
+ void move32(Imm32 imm, Register dest);
+ void move32(Register src, Register dest);
+
+ void movePtr(Register src, Register dest);
+ void movePtr(ImmWord imm, Register dest);
+ void movePtr(ImmPtr imm, Register dest);
+ void movePtr(wasm::SymbolicAddress imm, Register dest);
+ void movePtr(ImmGCPtr imm, Register dest);
+
+ void load8SignExtend(const Address& address, Register dest);
+ void load8SignExtend(const BaseIndex& src, Register dest);
+
+ void load8ZeroExtend(const Address& address, Register dest);
+ void load8ZeroExtend(const BaseIndex& src, Register dest);
+
+ void load16SignExtend(const Address& address, Register dest);
+ void load16SignExtend(const BaseIndex& src, Register dest);
+
+ void load16ZeroExtend(const Address& address, Register dest);
+ void load16ZeroExtend(const BaseIndex& src, Register dest);
+
+ void load32(const Address& address, Register dest);
+ void load32(const BaseIndex& address, Register dest);
+ void load32(AbsoluteAddress address, Register dest);
+ void load64(const Address& address, Register64 dest) {
+ load32(Address(address.base, address.offset + INT64LOW_OFFSET), dest.low);
+ int32_t highOffset = (address.offset < 0) ? -int32_t(INT64HIGH_OFFSET) : INT64HIGH_OFFSET;
+ load32(Address(address.base, address.offset + highOffset), dest.high);
+ }
+
+ void loadPtr(const Address& address, Register dest);
+ void loadPtr(const BaseIndex& src, Register dest);
+ void loadPtr(AbsoluteAddress address, Register dest);
+ void loadPtr(wasm::SymbolicAddress address, Register dest);
+
+ void loadPrivate(const Address& address, Register dest);
+
+ void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x1(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x1(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x2(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x2(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+ void loadAlignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeAlignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Int(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Int(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
+
+ void loadFloat32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadFloat32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadAlignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeAlignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Float(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
+
+ void loadDouble(const Address& addr, FloatRegister dest);
+ void loadDouble(const BaseIndex& src, FloatRegister dest);
+
+ // Load a float value into a register, then expand it to a double.
+ void loadFloatAsDouble(const Address& addr, FloatRegister dest);
+ void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
+
+ void loadFloat32(const Address& addr, FloatRegister dest);
+ void loadFloat32(const BaseIndex& src, FloatRegister dest);
+
+ void store8(Register src, const Address& address);
+ void store8(Imm32 imm, const Address& address);
+ void store8(Register src, const BaseIndex& address);
+ void store8(Imm32 imm, const BaseIndex& address);
+
+ void store16(Register src, const Address& address);
+ void store16(Imm32 imm, const Address& address);
+ void store16(Register src, const BaseIndex& address);
+ void store16(Imm32 imm, const BaseIndex& address);
+
+ void store32(Register src, AbsoluteAddress address);
+ void store32(Register src, const Address& address);
+ void store32(Register src, const BaseIndex& address);
+ void store32(Imm32 src, const Address& address);
+ void store32(Imm32 src, const BaseIndex& address);
+
+ void store64(Register64 src, Address address) {
+ store32(src.low, Address(address.base, address.offset + INT64LOW_OFFSET));
+ store32(src.high, Address(address.base, address.offset + INT64HIGH_OFFSET));
+ }
+
+ void store64(Imm64 imm, Address address) {
+ store32(imm.low(), Address(address.base, address.offset + INT64LOW_OFFSET));
+ store32(imm.hi(), Address(address.base, address.offset + INT64HIGH_OFFSET));
+ }
+
+ void storePtr(ImmWord imm, const Address& address);
+ void storePtr(ImmWord imm, const BaseIndex& address);
+ void storePtr(ImmPtr imm, const Address& address);
+ void storePtr(ImmPtr imm, const BaseIndex& address);
+ void storePtr(ImmGCPtr imm, const Address& address);
+ void storePtr(ImmGCPtr imm, const BaseIndex& address);
+ void storePtr(Register src, const Address& address);
+ void storePtr(Register src, const BaseIndex& address);
+ void storePtr(Register src, AbsoluteAddress dest);
+
+ void moveDouble(FloatRegister src, FloatRegister dest, Condition cc = Always) {
+ ma_vmov(src, dest, cc);
+ }
+
+ private:
+ template<typename T>
+ Register computePointer(const T& src, Register r);
+
+ template<typename T>
+ void compareExchangeARMv6(int nbytes, bool signExtend, const T& mem, Register oldval,
+ Register newval, Register output);
+
+ template<typename T>
+ void compareExchangeARMv7(int nbytes, bool signExtend, const T& mem, Register oldval,
+ Register newval, Register output);
+
+ template<typename T>
+ void compareExchange(int nbytes, bool signExtend, const T& address, Register oldval,
+ Register newval, Register output);
+
+ template<typename T>
+ void atomicExchangeARMv6(int nbytes, bool signExtend, const T& mem, Register value,
+ Register output);
+
+ template<typename T>
+ void atomicExchangeARMv7(int nbytes, bool signExtend, const T& mem, Register value,
+ Register output);
+
+ template<typename T>
+ void atomicExchange(int nbytes, bool signExtend, const T& address, Register value,
+ Register output);
+
+ template<typename T>
+ void atomicFetchOpARMv6(int nbytes, bool signExtend, AtomicOp op, const Register& value,
+ const T& mem, Register flagTemp, Register output);
+
+ template<typename T>
+ void atomicFetchOpARMv7(int nbytes, bool signExtend, AtomicOp op, const Register& value,
+ const T& mem, Register flagTemp, Register output);
+
+ template<typename T>
+ void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
+ const T& address, Register flagTemp, Register output);
+
+ template<typename T>
+ void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
+ const T& address, Register flagTemp, Register output);
+
+ template<typename T>
+ void atomicEffectOpARMv6(int nbytes, AtomicOp op, const Register& value, const T& address,
+ Register flagTemp);
+
+ template<typename T>
+ void atomicEffectOpARMv7(int nbytes, AtomicOp op, const Register& value, const T& address,
+ Register flagTemp);
+
+ template<typename T>
+ void atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value, const T& address,
+ Register flagTemp);
+
+ template<typename T>
+ void atomicEffectOp(int nbytes, AtomicOp op, const Register& value, const T& address,
+ Register flagTemp);
+
+ public:
+ // T in {Address,BaseIndex}
+ // S in {Imm32,Register}
+
+ template<typename T>
+ void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register output)
+ {
+ compareExchange(1, true, mem, oldval, newval, output);
+ }
+ template<typename T>
+ void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register output)
+ {
+ compareExchange(1, false, mem, oldval, newval, output);
+ }
+ template<typename T>
+ void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register output)
+ {
+ compareExchange(2, true, mem, oldval, newval, output);
+ }
+ template<typename T>
+ void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register output)
+ {
+ compareExchange(2, false, mem, oldval, newval, output);
+ }
+ template<typename T>
+ void compareExchange32(const T& mem, Register oldval, Register newval, Register output) {
+ compareExchange(4, false, mem, oldval, newval, output);
+ }
+
+ template<typename T>
+ void atomicExchange8SignExtend(const T& mem, Register value, Register output)
+ {
+ atomicExchange(1, true, mem, value, output);
+ }
+ template<typename T>
+ void atomicExchange8ZeroExtend(const T& mem, Register value, Register output)
+ {
+ atomicExchange(1, false, mem, value, output);
+ }
+ template<typename T>
+ void atomicExchange16SignExtend(const T& mem, Register value, Register output)
+ {
+ atomicExchange(2, true, mem, value, output);
+ }
+ template<typename T>
+ void atomicExchange16ZeroExtend(const T& mem, Register value, Register output)
+ {
+ atomicExchange(2, false, mem, value, output);
+ }
+ template<typename T>
+ void atomicExchange32(const T& mem, Register value, Register output) {
+ atomicExchange(4, false, mem, value, output);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchAdd8SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, true, AtomicFetchAddOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAdd8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, false, AtomicFetchAddOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAdd16SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, true, AtomicFetchAddOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAdd16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, false, AtomicFetchAddOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAdd32(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(4, false, AtomicFetchAddOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicAdd8(const S& value, const T& mem, Register flagTemp) {
+ atomicEffectOp(1, AtomicFetchAddOp, value, mem, flagTemp);
+ }
+ template <typename T, typename S>
+ void atomicAdd16(const S& value, const T& mem, Register flagTemp) {
+ atomicEffectOp(2, AtomicFetchAddOp, value, mem, flagTemp);
+ }
+ template <typename T, typename S>
+ void atomicAdd32(const S& value, const T& mem, Register flagTemp) {
+ atomicEffectOp(4, AtomicFetchAddOp, value, mem, flagTemp);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchSub8SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, true, AtomicFetchSubOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchSub8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, false, AtomicFetchSubOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchSub16SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, true, AtomicFetchSubOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchSub16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, false, AtomicFetchSubOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchSub32(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(4, false, AtomicFetchSubOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicSub8(const S& value, const T& mem, Register flagTemp) {
+ atomicEffectOp(1, AtomicFetchSubOp, value, mem, flagTemp);
+ }
+ template <typename T, typename S>
+ void atomicSub16(const S& value, const T& mem, Register flagTemp) {
+ atomicEffectOp(2, AtomicFetchSubOp, value, mem, flagTemp);
+ }
+ template <typename T, typename S>
+ void atomicSub32(const S& value, const T& mem, Register flagTemp) {
+ atomicEffectOp(4, AtomicFetchSubOp, value, mem, flagTemp);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchAnd8SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, true, AtomicFetchAndOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAnd8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, false, AtomicFetchAndOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAnd16SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, true, AtomicFetchAndOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAnd16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, false, AtomicFetchAndOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAnd32(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(4, false, AtomicFetchAndOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicAnd8(const S& value, const T& mem, Register flagTemp) {
+ atomicEffectOp(1, AtomicFetchAndOp, value, mem, flagTemp);
+ }
+ template <typename T, typename S>
+ void atomicAnd16(const S& value, const T& mem, Register flagTemp) {
+ atomicEffectOp(2, AtomicFetchAndOp, value, mem, flagTemp);
+ }
+ template <typename T, typename S>
+ void atomicAnd32(const S& value, const T& mem, Register flagTemp) {
+ atomicEffectOp(4, AtomicFetchAndOp, value, mem, flagTemp);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchOr8SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, true, AtomicFetchOrOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchOr8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, false, AtomicFetchOrOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchOr16SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, true, AtomicFetchOrOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchOr16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, false, AtomicFetchOrOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchOr32(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(4, false, AtomicFetchOrOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicOr8(const S& value, const T& mem, Register flagTemp) {
+ atomicEffectOp(1, AtomicFetchOrOp, value, mem, flagTemp);
+ }
+ template <typename T, typename S>
+ void atomicOr16(const S& value, const T& mem, Register flagTemp) {
+ atomicEffectOp(2, AtomicFetchOrOp, value, mem, flagTemp);
+ }
+ template <typename T, typename S>
+ void atomicOr32(const S& value, const T& mem, Register flagTemp) {
+ atomicEffectOp(4, AtomicFetchOrOp, value, mem, flagTemp);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchXor8SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, true, AtomicFetchXorOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchXor8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, false, AtomicFetchXorOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchXor16SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, true, AtomicFetchXorOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchXor16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, false, AtomicFetchXorOp, value, mem, temp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchXor32(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(4, false, AtomicFetchXorOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicXor8(const S& value, const T& mem, Register flagTemp) {
+ atomicEffectOp(1, AtomicFetchXorOp, value, mem, flagTemp);
+ }
+ template <typename T, typename S>
+ void atomicXor16(const S& value, const T& mem, Register flagTemp) {
+ atomicEffectOp(2, AtomicFetchXorOp, value, mem, flagTemp);
+ }
+ template <typename T, typename S>
+ void atomicXor32(const S& value, const T& mem, Register flagTemp) {
+ atomicEffectOp(4, AtomicFetchXorOp, value, mem, flagTemp);
+ }
+
+ template<typename T>
+ void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register oldval, Register newval,
+ Register temp, AnyRegister output);
+
+ template<typename T>
+ void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value,
+ Register temp, AnyRegister output);
+
+ inline void incrementInt32Value(const Address& addr);
+
+ void cmp32(Register lhs, Imm32 rhs);
+ void cmp32(Register lhs, Register rhs);
+ void cmp32(const Address& lhs, Imm32 rhs) {
+ MOZ_CRASH("NYI");
+ }
+ void cmp32(const Address& lhs, Register rhs) {
+ MOZ_CRASH("NYI");
+ }
+
+ void cmpPtr(Register lhs, Register rhs);
+ void cmpPtr(Register lhs, ImmWord rhs);
+ void cmpPtr(Register lhs, ImmPtr rhs);
+ void cmpPtr(Register lhs, ImmGCPtr rhs);
+ void cmpPtr(Register lhs, Imm32 rhs);
+ void cmpPtr(const Address& lhs, Register rhs);
+ void cmpPtr(const Address& lhs, ImmWord rhs);
+ void cmpPtr(const Address& lhs, ImmPtr rhs);
+ void cmpPtr(const Address& lhs, ImmGCPtr rhs);
+ void cmpPtr(const Address& lhs, Imm32 rhs);
+
+ static bool convertUInt64ToDoubleNeedsTemp();
+ void convertUInt64ToDouble(Register64 src, FloatRegister dest, Register temp);
+
+ void setStackArg(Register reg, uint32_t arg);
+
+ void breakpoint();
+ // Conditional breakpoint.
+ void breakpoint(Condition cc);
+
+ // Trigger the simulator's interactive read-eval-print loop.
+ // The message will be printed at the stopping point.
+ // (On non-simulator builds, does nothing.)
+ void simulatorStop(const char* msg);
+
+ // Evaluate srcDest = minmax<isMax>{Float32,Double}(srcDest, other).
+ // Checks for NaN if canBeNaN is true.
+ void minMaxDouble(FloatRegister srcDest, FloatRegister other, bool canBeNaN, bool isMax);
+ void minMaxFloat32(FloatRegister srcDest, FloatRegister other, bool canBeNaN, bool isMax);
+
+ void compareDouble(FloatRegister lhs, FloatRegister rhs);
+
+ void compareFloat(FloatRegister lhs, FloatRegister rhs);
+
+ void checkStackAlignment();
+
+ // If source is a double, load it into dest. If source is int32, convert it
+ // to double. Else, branch to failure.
+ void ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure);
+
+ void
+ emitSet(Assembler::Condition cond, Register dest)
+ {
+ ma_mov(Imm32(0), dest);
+ ma_mov(Imm32(1), dest, cond);
+ }
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testNull(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testObject(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testUndefined(cond, value);
+ emitSet(cond, dest);
+ }
+
+ protected:
+ bool buildOOLFakeExitFrame(void* fakeReturnAddr);
+
+ public:
+ CodeOffset labelForPatch() {
+ return CodeOffset(nextOffset().getOffset());
+ }
+
+ void computeEffectiveAddress(const Address& address, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_add(address.base, Imm32(address.offset), dest, scratch, LeaveCC);
+ }
+ void computeEffectiveAddress(const BaseIndex& address, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_alu(address.base, lsl(address.index, address.scale), dest, OpAdd, LeaveCC);
+ if (address.offset)
+ ma_add(dest, Imm32(address.offset), dest, scratch, LeaveCC);
+ }
+ void floor(FloatRegister input, Register output, Label* handleNotAnInt);
+ void floorf(FloatRegister input, Register output, Label* handleNotAnInt);
+ void ceil(FloatRegister input, Register output, Label* handleNotAnInt);
+ void ceilf(FloatRegister input, Register output, Label* handleNotAnInt);
+ void round(FloatRegister input, Register output, Label* handleNotAnInt, FloatRegister tmp);
+ void roundf(FloatRegister input, Register output, Label* handleNotAnInt, FloatRegister tmp);
+
+ void clampCheck(Register r, Label* handleNotAnInt) {
+ // Check explicitly for r == INT_MIN || r == INT_MAX
+ // This is the instruction sequence that gcc generated for this
+ // operation.
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_sub(r, Imm32(0x80000001), scratch, scratch2);
+ as_cmn(scratch, Imm8(3));
+ ma_b(handleNotAnInt, Above);
+ }
+
+ void lea(Operand addr, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_add(addr.baseReg(), Imm32(addr.disp()), dest, scratch);
+ }
+
+ void abiret() {
+ as_bx(lr);
+ }
+
+ void moveFloat32(FloatRegister src, FloatRegister dest, Condition cc = Always) {
+ as_vmov(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc);
+ }
+
+ void loadWasmGlobalPtr(uint32_t globalDataOffset, Register dest) {
+ loadPtr(Address(GlobalReg, globalDataOffset - WasmGlobalRegBias), dest);
+ }
+ void loadWasmPinnedRegsFromTls() {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(Address(WasmTlsReg, offsetof(wasm::TlsData, memoryBase)), HeapReg, scratch);
+ ma_ldr(Address(WasmTlsReg, offsetof(wasm::TlsData, globalData)), GlobalReg, scratch);
+ ma_add(Imm32(WasmGlobalRegBias), GlobalReg, scratch);
+ }
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+
+ struct AutoPrepareForPatching {
+ explicit AutoPrepareForPatching(MacroAssemblerARMCompat&) {}
+ };
+};
+
+typedef MacroAssemblerARMCompat MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_MacroAssembler_arm_h */
diff --git a/js/src/jit/arm/MoveEmitter-arm.cpp b/js/src/jit/arm/MoveEmitter-arm.cpp
new file mode 100644
index 000000000..edacd6913
--- /dev/null
+++ b/js/src/jit/arm/MoveEmitter-arm.cpp
@@ -0,0 +1,427 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm/MoveEmitter-arm.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+MoveEmitterARM::MoveEmitterARM(MacroAssembler& masm)
+ : inCycle_(0),
+ masm(masm),
+ pushedAtCycle_(-1),
+ pushedAtSpill_(-1),
+ spilledReg_(InvalidReg),
+ spilledFloatReg_(InvalidFloatReg)
+{
+ pushedAtStart_ = masm.framePushed();
+}
+
+void
+MoveEmitterARM::emit(const MoveResolver& moves)
+{
+ if (moves.numCycles()) {
+ // Reserve stack for cycle resolution
+ masm.reserveStack(moves.numCycles() * sizeof(double));
+ pushedAtCycle_ = masm.framePushed();
+ }
+
+ for (size_t i = 0; i < moves.numMoves(); i++)
+ emit(moves.getMove(i));
+}
+
+MoveEmitterARM::~MoveEmitterARM()
+{
+ assertDone();
+}
+
+Address
+MoveEmitterARM::cycleSlot(uint32_t slot, uint32_t subslot) const
+{
+ int32_t offset = masm.framePushed() - pushedAtCycle_;
+ MOZ_ASSERT(offset < 4096 && offset > -4096);
+ return Address(StackPointer, offset + slot * sizeof(double) + subslot);
+}
+
+Address
+MoveEmitterARM::spillSlot() const
+{
+ int32_t offset = masm.framePushed() - pushedAtSpill_;
+ MOZ_ASSERT(offset < 4096 && offset > -4096);
+ return Address(StackPointer, offset);
+}
+
+Address
+MoveEmitterARM::toAddress(const MoveOperand& operand) const
+{
+ MOZ_ASSERT(operand.isMemoryOrEffectiveAddress());
+
+ if (operand.base() != StackPointer) {
+ MOZ_ASSERT(operand.disp() < 1024 && operand.disp() > -1024);
+ return Operand(operand.base(), operand.disp()).toAddress();
+ }
+
+ MOZ_ASSERT(operand.disp() >= 0);
+
+ // Otherwise, the stack offset may need to be adjusted.
+ return Address(StackPointer, operand.disp() + (masm.framePushed() - pushedAtStart_));
+}
+
+Register
+MoveEmitterARM::tempReg()
+{
+ if (spilledReg_ != InvalidReg)
+ return spilledReg_;
+
+ // For now, just pick r12/ip as the eviction point. This is totally random,
+ // and if it ends up being bad, we can use actual heuristics later. r12 is
+ // actually a bad choice. It is the scratch register, which is frequently
+ // used for address computations, such as those found when we attempt to
+ // access values more than 4096 off of the stack pointer. Instead, use lr,
+ // the LinkRegister.
+ spilledReg_ = r14;
+ if (pushedAtSpill_ == -1) {
+ masm.Push(spilledReg_);
+ pushedAtSpill_ = masm.framePushed();
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_str(spilledReg_, spillSlot(), scratch);
+ }
+ return spilledReg_;
+}
+
+void
+MoveEmitterARM::breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slotId)
+{
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (A -> B), which we reach first. We save B, then allow
+ // the original move to continue.
+
+ ScratchRegisterScope scratch(masm);
+
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ ScratchFloat32Scope scratchFloat32(masm);
+ masm.ma_vldr(toAddress(to), scratchFloat32, scratch);
+ // Since it is uncertain if the load will be aligned or not
+ // just fill both of them with the same value.
+ masm.ma_vstr(scratchFloat32, cycleSlot(slotId, 0), scratch);
+ masm.ma_vstr(scratchFloat32, cycleSlot(slotId, 4), scratch);
+ } else if (to.isGeneralReg()) {
+ // Since it is uncertain if the load will be aligned or not
+ // just fill both of them with the same value.
+ masm.ma_str(to.reg(), cycleSlot(slotId, 0), scratch);
+ masm.ma_str(to.reg(), cycleSlot(slotId, 4), scratch);
+ } else {
+ FloatRegister src = to.floatReg();
+ // Just always store the largest possible size. Currently, this is
+ // a double. When SIMD is added, two doubles will need to be stored.
+ masm.ma_vstr(src.doubleOverlay(), cycleSlot(slotId, 0), scratch);
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ ScratchDoubleScope scratchDouble(masm);
+ masm.ma_vldr(toAddress(to), scratchDouble, scratch);
+ masm.ma_vstr(scratchDouble, cycleSlot(slotId, 0), scratch);
+ } else if (to.isGeneralRegPair()) {
+ ScratchDoubleScope scratchDouble(masm);
+ masm.ma_vxfer(to.evenReg(), to.oddReg(), scratchDouble);
+ masm.ma_vstr(scratchDouble, cycleSlot(slotId, 0), scratch);
+ } else {
+ masm.ma_vstr(to.floatReg().doubleOverlay(), cycleSlot(slotId, 0), scratch);
+ }
+ break;
+ case MoveOp::INT32:
+ case MoveOp::GENERAL:
+ // an non-vfp value
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.ma_ldr(toAddress(to), temp, scratch);
+ masm.ma_str(temp, cycleSlot(0,0), scratch);
+ } else {
+ if (to.reg() == spilledReg_) {
+ // If the destination was spilled, restore it first.
+ masm.ma_ldr(spillSlot(), spilledReg_, scratch);
+ spilledReg_ = InvalidReg;
+ }
+ masm.ma_str(to.reg(), cycleSlot(0,0), scratch);
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void
+MoveEmitterARM::completeCycle(const MoveOperand& from, const MoveOperand& to, MoveOp::Type type, uint32_t slotId)
+{
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (B -> A), which we reach last. We emit a move from the
+ // saved value of B, to A.
+
+ ScratchRegisterScope scratch(masm);
+
+ switch (type) {
+ case MoveOp::FLOAT32:
+ MOZ_ASSERT(!to.isGeneralRegPair());
+ if (to.isMemory()) {
+ ScratchFloat32Scope scratchFloat32(masm);
+ masm.ma_vldr(cycleSlot(slotId, 0), scratchFloat32, scratch);
+ masm.ma_vstr(scratchFloat32, toAddress(to), scratch);
+ } else if (to.isGeneralReg()) {
+ MOZ_ASSERT(type == MoveOp::FLOAT32);
+ masm.ma_ldr(toAddress(from), to.reg(), scratch);
+ } else {
+ uint32_t offset = 0;
+ if ((!from.isMemory()) && from.floatReg().numAlignedAliased() == 1)
+ offset = sizeof(float);
+ masm.ma_vldr(cycleSlot(slotId, offset), to.floatReg(), scratch);
+ }
+ break;
+ case MoveOp::DOUBLE:
+ MOZ_ASSERT(!to.isGeneralReg());
+ if (to.isMemory()) {
+ ScratchDoubleScope scratchDouble(masm);
+ masm.ma_vldr(cycleSlot(slotId, 0), scratchDouble, scratch);
+ masm.ma_vstr(scratchDouble, toAddress(to), scratch);
+ } else if (to.isGeneralRegPair()) {
+ MOZ_ASSERT(type == MoveOp::DOUBLE);
+ ScratchDoubleScope scratchDouble(masm);
+ masm.ma_vldr(toAddress(from), scratchDouble, scratch);
+ masm.ma_vxfer(scratchDouble, to.evenReg(), to.oddReg());
+ } else {
+ uint32_t offset = 0;
+ if ((!from.isMemory()) && from.floatReg().numAlignedAliased() == 1)
+ offset = sizeof(float);
+ masm.ma_vldr(cycleSlot(slotId, offset), to.floatReg(), scratch);
+ }
+ break;
+ case MoveOp::INT32:
+ case MoveOp::GENERAL:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.ma_ldr(cycleSlot(slotId, 0), temp, scratch);
+ masm.ma_str(temp, toAddress(to), scratch);
+ } else {
+ if (to.reg() == spilledReg_) {
+ // Make sure we don't re-clobber the spilled register later.
+ spilledReg_ = InvalidReg;
+ }
+ masm.ma_ldr(cycleSlot(slotId, 0), to.reg(), scratch);
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void
+MoveEmitterARM::emitMove(const MoveOperand& from, const MoveOperand& to)
+{
+ // Register pairs are used to store Double values during calls.
+ MOZ_ASSERT(!from.isGeneralRegPair());
+ MOZ_ASSERT(!to.isGeneralRegPair());
+
+ ScratchRegisterScope scratch(masm);
+
+ if (to.isGeneralReg() && to.reg() == spilledReg_) {
+ // If the destination is the spilled register, make sure we
+ // don't re-clobber its value.
+ spilledReg_ = InvalidReg;
+ }
+
+ if (from.isGeneralReg()) {
+ if (from.reg() == spilledReg_) {
+ // If the source is a register that has been spilled, make sure
+ // to load the source back into that register.
+ masm.ma_ldr(spillSlot(), spilledReg_, scratch);
+ spilledReg_ = InvalidReg;
+ }
+ if (to.isMemoryOrEffectiveAddress())
+ masm.ma_str(from.reg(), toAddress(to), scratch);
+ else
+ masm.ma_mov(from.reg(), to.reg());
+ } else if (to.isGeneralReg()) {
+ MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
+ if (from.isMemory())
+ masm.ma_ldr(toAddress(from), to.reg(), scratch);
+ else
+ masm.ma_add(from.base(), Imm32(from.disp()), to.reg(), scratch);
+ } else {
+ // Memory to memory gpr move.
+ Register reg = tempReg();
+
+ MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
+ if (from.isMemory())
+ masm.ma_ldr(toAddress(from), reg, scratch);
+ else
+ masm.ma_add(from.base(), Imm32(from.disp()), reg, scratch);
+ MOZ_ASSERT(to.base() != reg);
+ masm.ma_str(reg, toAddress(to), scratch);
+ }
+}
+
+void
+MoveEmitterARM::emitFloat32Move(const MoveOperand& from, const MoveOperand& to)
+{
+ // Register pairs are used to store Double values during calls.
+ MOZ_ASSERT(!from.isGeneralRegPair());
+ MOZ_ASSERT(!to.isGeneralRegPair());
+
+ ScratchRegisterScope scratch(masm);
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg())
+ masm.ma_vmov_f32(from.floatReg(), to.floatReg());
+ else if (to.isGeneralReg())
+ masm.ma_vxfer(from.floatReg(), to.reg());
+ else
+ masm.ma_vstr(VFPRegister(from.floatReg()).singleOverlay(), toAddress(to), scratch);
+ } else if (from.isGeneralReg()) {
+ if (to.isFloatReg()) {
+ masm.ma_vxfer(from.reg(), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ masm.ma_mov(from.reg(), to.reg());
+ } else {
+ masm.ma_str(from.reg(), toAddress(to), scratch);
+ }
+ } else if (to.isFloatReg()) {
+ masm.ma_vldr(toAddress(from), VFPRegister(to.floatReg()).singleOverlay(), scratch);
+ } else if (to.isGeneralReg()) {
+ masm.ma_ldr(toAddress(from), to.reg(), scratch);
+ } else {
+ // Memory to memory move.
+ MOZ_ASSERT(from.isMemory());
+ ScratchFloat32Scope scratchFloat32(masm);
+ masm.ma_vldr(toAddress(from), scratchFloat32, scratch);
+ masm.ma_vstr(scratchFloat32, toAddress(to), scratch);
+ }
+}
+
+void
+MoveEmitterARM::emitDoubleMove(const MoveOperand& from, const MoveOperand& to)
+{
+ // Registers are used to store pointers / int32 / float32 values.
+ MOZ_ASSERT(!from.isGeneralReg());
+ MOZ_ASSERT(!to.isGeneralReg());
+
+ ScratchRegisterScope scratch(masm);
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg())
+ masm.ma_vmov(from.floatReg(), to.floatReg());
+ else if (to.isGeneralRegPair())
+ masm.ma_vxfer(from.floatReg(), to.evenReg(), to.oddReg());
+ else
+ masm.ma_vstr(from.floatReg(), toAddress(to), scratch);
+ } else if (from.isGeneralRegPair()) {
+ if (to.isFloatReg())
+ masm.ma_vxfer(from.evenReg(), from.oddReg(), to.floatReg());
+ else if (to.isGeneralRegPair()) {
+ MOZ_ASSERT(!from.aliases(to));
+ masm.ma_mov(from.evenReg(), to.evenReg());
+ masm.ma_mov(from.oddReg(), to.oddReg());
+ } else {
+ ScratchDoubleScope scratchDouble(masm);
+ masm.ma_vxfer(from.evenReg(), from.oddReg(), scratchDouble);
+ masm.ma_vstr(scratchDouble, toAddress(to), scratch);
+ }
+ } else if (to.isFloatReg()) {
+ masm.ma_vldr(toAddress(from), to.floatReg(), scratch);
+ } else if (to.isGeneralRegPair()) {
+ MOZ_ASSERT(from.isMemory());
+ Address src = toAddress(from);
+ // Note: We can safely use the MoveOperand's displacement here,
+ // even if the base is SP: MoveEmitter::toOperand adjusts
+ // SP-relative operands by the difference between the current
+ // stack usage and stackAdjust, which emitter.finish() resets to
+ // 0.
+ //
+ // Warning: if the offset isn't within [-255,+255] then this
+ // will assert-fail (or, if non-debug, load the wrong words).
+ // Nothing uses such an offset at the time of this writing.
+ masm.ma_ldrd(EDtrAddr(src.base, EDtrOffImm(src.offset)), to.evenReg(), to.oddReg());
+ } else {
+ // Memory to memory move.
+ MOZ_ASSERT(from.isMemory());
+ ScratchDoubleScope scratchDouble(masm);
+ masm.ma_vldr(toAddress(from), scratchDouble, scratch);
+ masm.ma_vstr(scratchDouble, toAddress(to), scratch);
+ }
+}
+
+void
+MoveEmitterARM::emit(const MoveOp& move)
+{
+ const MoveOperand& from = move.from();
+ const MoveOperand& to = move.to();
+
+ if (move.isCycleEnd() && move.isCycleBegin()) {
+ // A fun consequence of aliased registers is you can have multiple
+ // cycles at once, and one can end exactly where another begins.
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ return;
+ }
+
+ if (move.isCycleEnd()) {
+ MOZ_ASSERT(inCycle_);
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ MOZ_ASSERT(inCycle_ > 0);
+ inCycle_--;
+ return;
+ }
+
+ if (move.isCycleBegin()) {
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ inCycle_++;
+ }
+
+ switch (move.type()) {
+ case MoveOp::FLOAT32:
+ emitFloat32Move(from, to);
+ break;
+ case MoveOp::DOUBLE:
+ emitDoubleMove(from, to);
+ break;
+ case MoveOp::INT32:
+ case MoveOp::GENERAL:
+ emitMove(from, to);
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void
+MoveEmitterARM::assertDone()
+{
+ MOZ_ASSERT(inCycle_ == 0);
+}
+
+void
+MoveEmitterARM::finish()
+{
+ assertDone();
+
+ if (pushedAtSpill_ != -1 && spilledReg_ != InvalidReg) {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_ldr(spillSlot(), spilledReg_, scratch);
+ }
+ masm.freeStack(masm.framePushed() - pushedAtStart_);
+}
diff --git a/js/src/jit/arm/MoveEmitter-arm.h b/js/src/jit/arm/MoveEmitter-arm.h
new file mode 100644
index 000000000..70aafbdf6
--- /dev/null
+++ b/js/src/jit/arm/MoveEmitter-arm.h
@@ -0,0 +1,66 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_MoveEmitter_arm_h
+#define jit_arm_MoveEmitter_arm_h
+
+#include "jit/MacroAssembler.h"
+#include "jit/MoveResolver.h"
+
+namespace js {
+namespace jit {
+
+class MoveEmitterARM
+{
+ uint32_t inCycle_;
+ MacroAssembler& masm;
+
+ // Original stack push value.
+ uint32_t pushedAtStart_;
+
+ // These store stack offsets to spill locations, snapshotting
+ // codegen->framePushed_ at the time they were allocated. They are -1 if no
+ // stack space has been allocated for that particular spill.
+ int32_t pushedAtCycle_;
+ int32_t pushedAtSpill_;
+
+ // These are registers that are available for temporary use. They may be
+ // assigned InvalidReg. If no corresponding spill space has been assigned,
+ // then these registers do not need to be spilled.
+ Register spilledReg_;
+ FloatRegister spilledFloatReg_;
+
+ void assertDone();
+ Register tempReg();
+ FloatRegister tempFloatReg();
+ Address cycleSlot(uint32_t slot, uint32_t subslot) const;
+ Address spillSlot() const;
+ Address toAddress(const MoveOperand& operand) const;
+
+ void emitMove(const MoveOperand& from, const MoveOperand& to);
+ void emitFloat32Move(const MoveOperand& from, const MoveOperand& to);
+ void emitDoubleMove(const MoveOperand& from, const MoveOperand& to);
+ void breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+ void completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+ void emit(const MoveOp& move);
+
+ public:
+ MoveEmitterARM(MacroAssembler& masm);
+ ~MoveEmitterARM();
+ void emit(const MoveResolver& moves);
+ void finish();
+
+ void setScratchRegister(Register reg) {}
+};
+
+typedef MoveEmitterARM MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_MoveEmitter_arm_h */
diff --git a/js/src/jit/arm/SharedIC-arm.cpp b/js/src/jit/arm/SharedIC-arm.cpp
new file mode 100644
index 000000000..25c9d4cee
--- /dev/null
+++ b/js/src/jit/arm/SharedIC-arm.cpp
@@ -0,0 +1,217 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineCompiler.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Linker.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICBinaryArith_Int32
+
+extern "C" {
+ extern MOZ_EXPORT int64_t __aeabi_idivmod(int,int);
+}
+
+bool
+ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // Add R0 and R1. Don't need to explicitly unbox, just use R2's payloadReg.
+ Register scratchReg = R2.payloadReg();
+
+ // DIV and MOD need an extra non-volatile ValueOperand to hold R0.
+ AllocatableGeneralRegisterSet savedRegs(availableGeneralRegs(2));
+ savedRegs.set() = GeneralRegisterSet::Intersect(GeneralRegisterSet::NonVolatile(), savedRegs.set());
+ ValueOperand savedValue = savedRegs.takeAnyValue();
+
+ Label maybeNegZero, revertRegister;
+ switch(op_) {
+ case JSOP_ADD:
+ masm.ma_add(R0.payloadReg(), R1.payloadReg(), scratchReg, SetCC);
+
+ // Just jump to failure on overflow. R0 and R1 are preserved, so we can
+ // just jump to the next stub.
+ masm.j(Assembler::Overflow, &failure);
+
+ // Box the result and return. We know R0.typeReg() already contains the
+ // integer tag, so we just need to move the result value into place.
+ masm.mov(scratchReg, R0.payloadReg());
+ break;
+ case JSOP_SUB:
+ masm.ma_sub(R0.payloadReg(), R1.payloadReg(), scratchReg, SetCC);
+ masm.j(Assembler::Overflow, &failure);
+ masm.mov(scratchReg, R0.payloadReg());
+ break;
+ case JSOP_MUL: {
+ ScratchRegisterScope scratch(masm);
+ Assembler::Condition cond = masm.ma_check_mul(R0.payloadReg(), R1.payloadReg(), scratchReg,
+ scratch, Assembler::Overflow);
+ masm.j(cond, &failure);
+
+ masm.as_cmp(scratchReg, Imm8(0));
+ masm.j(Assembler::Equal, &maybeNegZero);
+
+ masm.mov(scratchReg, R0.payloadReg());
+ break;
+ }
+ case JSOP_DIV:
+ case JSOP_MOD: {
+ // Check for INT_MIN / -1, it results in a double.
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_cmp(R0.payloadReg(), Imm32(INT_MIN), scratch);
+ masm.ma_cmp(R1.payloadReg(), Imm32(-1), scratch, Assembler::Equal);
+ masm.j(Assembler::Equal, &failure);
+ }
+
+ // Check for both division by zero and 0 / X with X < 0 (results in -0).
+ masm.as_cmp(R1.payloadReg(), Imm8(0));
+ masm.as_cmp(R0.payloadReg(), Imm8(0), Assembler::LessThan);
+ masm.j(Assembler::Equal, &failure);
+
+ // The call will preserve registers r4-r11. Save R0 and the link
+ // register.
+ MOZ_ASSERT(R1 == ValueOperand(r5, r4));
+ MOZ_ASSERT(R0 == ValueOperand(r3, r2));
+ masm.moveValue(R0, savedValue);
+
+ masm.setupAlignedABICall();
+ masm.passABIArg(R0.payloadReg());
+ masm.passABIArg(R1.payloadReg());
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, __aeabi_idivmod));
+
+ // idivmod returns the quotient in r0, and the remainder in r1.
+ if (op_ == JSOP_DIV) {
+ // Result is a double if the remainder != 0.
+ masm.branch32(Assembler::NotEqual, r1, Imm32(0), &revertRegister);
+ masm.tagValue(JSVAL_TYPE_INT32, r0, R0);
+ } else {
+ // If X % Y == 0 and X < 0, the result is -0.
+ Label done;
+ masm.branch32(Assembler::NotEqual, r1, Imm32(0), &done);
+ masm.branch32(Assembler::LessThan, savedValue.payloadReg(), Imm32(0), &revertRegister);
+ masm.bind(&done);
+ masm.tagValue(JSVAL_TYPE_INT32, r1, R0);
+ }
+ break;
+ }
+ case JSOP_BITOR:
+ masm.ma_orr(R1.payloadReg(), R0.payloadReg(), R0.payloadReg());
+ break;
+ case JSOP_BITXOR:
+ masm.ma_eor(R1.payloadReg(), R0.payloadReg(), R0.payloadReg());
+ break;
+ case JSOP_BITAND:
+ masm.ma_and(R1.payloadReg(), R0.payloadReg(), R0.payloadReg());
+ break;
+ case JSOP_LSH:
+ // ARM will happily try to shift by more than 0x1f.
+ masm.as_and(R1.payloadReg(), R1.payloadReg(), Imm8(0x1F));
+ masm.ma_lsl(R1.payloadReg(), R0.payloadReg(), R0.payloadReg());
+ break;
+ case JSOP_RSH:
+ masm.as_and(R1.payloadReg(), R1.payloadReg(), Imm8(0x1F));
+ masm.ma_asr(R1.payloadReg(), R0.payloadReg(), R0.payloadReg());
+ break;
+ case JSOP_URSH:
+ masm.as_and(scratchReg, R1.payloadReg(), Imm8(0x1F));
+ masm.ma_lsr(scratchReg, R0.payloadReg(), scratchReg);
+ masm.as_cmp(scratchReg, Imm8(0));
+ if (allowDouble_) {
+ Label toUint;
+ masm.j(Assembler::LessThan, &toUint);
+
+ // Move result and box for return.
+ masm.mov(scratchReg, R0.payloadReg());
+ EmitReturnFromIC(masm);
+
+ masm.bind(&toUint);
+ ScratchDoubleScope scratchDouble(masm);
+ masm.convertUInt32ToDouble(scratchReg, scratchDouble);
+ masm.boxDouble(scratchDouble, R0);
+ } else {
+ masm.j(Assembler::LessThan, &failure);
+ // Move result for return.
+ masm.mov(scratchReg, R0.payloadReg());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unhandled op for BinaryArith_Int32.");
+ }
+
+ EmitReturnFromIC(masm);
+
+ switch (op_) {
+ case JSOP_MUL:
+ masm.bind(&maybeNegZero);
+
+ // Result is -0 if exactly one of lhs or rhs is negative.
+ masm.ma_cmn(R0.payloadReg(), R1.payloadReg());
+ masm.j(Assembler::Signed, &failure);
+
+ // Result is +0.
+ masm.ma_mov(Imm32(0), R0.payloadReg());
+ EmitReturnFromIC(masm);
+ break;
+ case JSOP_DIV:
+ case JSOP_MOD:
+ masm.bind(&revertRegister);
+ masm.moveValue(savedValue, R0);
+ break;
+ default:
+ break;
+ }
+
+ // Failure case - jump to next stub.
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ return true;
+}
+
+bool
+ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+
+ switch (op) {
+ case JSOP_BITNOT:
+ masm.ma_mvn(R0.payloadReg(), R0.payloadReg());
+ break;
+ case JSOP_NEG:
+ // Guard against 0 and MIN_INT, both result in a double.
+ masm.branchTest32(Assembler::Zero, R0.payloadReg(), Imm32(0x7fffffff), &failure);
+
+ // Compile -x as 0 - x.
+ masm.as_rsb(R0.payloadReg(), R0.payloadReg(), Imm8(0));
+ break;
+ default:
+ MOZ_CRASH("Unexpected op");
+ }
+
+ EmitReturnFromIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/arm/SharedICHelpers-arm.h b/js/src/jit/arm/SharedICHelpers-arm.h
new file mode 100644
index 000000000..17534adef
--- /dev/null
+++ b/js/src/jit/arm/SharedICHelpers-arm.h
@@ -0,0 +1,384 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_SharedICHelpers_arm_h
+#define jit_arm_SharedICHelpers_arm_h
+
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineIC.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+
+namespace js {
+namespace jit {
+
+// Distance from sp to the top Value inside an IC stub (no return address on the stack on ARM).
+static const size_t ICStackValueOffset = 0;
+
+inline void
+EmitRestoreTailCallReg(MacroAssembler& masm)
+{
+ // No-op on ARM because link register is always holding the return address.
+}
+
+inline void
+EmitRepushTailCallReg(MacroAssembler& masm)
+{
+ // No-op on ARM because link register is always holding the return address.
+}
+
+inline void
+EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm)
+{
+ // Move ICEntry offset into ICStubReg
+ CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
+ *patchOffset = offset;
+
+ // Load stub pointer into ICStubReg
+ masm.loadPtr(Address(ICStubReg, ICEntry::offsetOfFirstStub()), ICStubReg);
+
+ // Load stubcode pointer from BaselineStubEntry.
+ // R2 won't be active when we call ICs, so we can use r0.
+ MOZ_ASSERT(R2 == ValueOperand(r1, r0));
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0);
+
+ // Call the stubcode via a direct branch-and-link.
+ masm.ma_blx(r0);
+}
+
+inline void
+EmitEnterTypeMonitorIC(MacroAssembler& masm,
+ size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub())
+{
+ // This is expected to be called from within an IC, when ICStubReg is
+ // properly initialized to point to the stub.
+ masm.loadPtr(Address(ICStubReg, (uint32_t) monitorStubOffset), ICStubReg);
+
+ // Load stubcode pointer from BaselineStubEntry.
+ // R2 won't be active when we call ICs, so we can use r0.
+ MOZ_ASSERT(R2 == ValueOperand(r1, r0));
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0);
+
+ // Jump to the stubcode.
+ masm.branch(r0);
+}
+
+inline void
+EmitReturnFromIC(MacroAssembler& masm)
+{
+ masm.ma_mov(lr, pc);
+}
+
+inline void
+EmitChangeICReturnAddress(MacroAssembler& masm, Register reg)
+{
+ masm.ma_mov(reg, lr);
+}
+
+inline void
+EmitBaselineTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t argSize)
+{
+ // We assume during this that R0 and R1 have been pushed, and that R2 is
+ // unused.
+ MOZ_ASSERT(R2 == ValueOperand(r1, r0));
+
+ // Compute frame size.
+ masm.movePtr(BaselineFrameReg, r0);
+ masm.as_add(r0, r0, Imm8(BaselineFrame::FramePointerOffset));
+ masm.ma_sub(BaselineStackReg, r0);
+
+ // Store frame size without VMFunction arguments for GC marking.
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_sub(r0, Imm32(argSize), r1, scratch);
+ }
+ masm.store32(r1, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+
+ // Push frame descriptor and perform the tail call.
+ // ICTailCallReg (lr) already contains the return address (as we keep
+ // it there through the stub calls), but the VMWrapper code being called
+ // expects the return address to also be pushed on the stack.
+ MOZ_ASSERT(ICTailCallReg == lr);
+ masm.makeFrameDescriptor(r0, JitFrame_BaselineJS, ExitFrameLayout::Size());
+ masm.push(r0);
+ masm.push(lr);
+ masm.branch(target);
+}
+
+inline void
+EmitIonTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t stackSize)
+{
+ // We assume during this that R0 and R1 have been pushed, and that R2 is
+ // unused.
+ MOZ_ASSERT(R2 == ValueOperand(r1, r0));
+
+ masm.loadPtr(Address(sp, stackSize), r0);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), r0);
+ masm.add32(Imm32(stackSize + JitStubFrameLayout::Size() - sizeof(intptr_t)), r0);
+
+ // Push frame descriptor and perform the tail call.
+ // ICTailCallReg (lr) already contains the return address (as we keep
+ // it there through the stub calls), but the VMWrapper code being called
+ // expects the return address to also be pushed on the stack.
+ MOZ_ASSERT(ICTailCallReg == lr);
+ masm.makeFrameDescriptor(r0, JitFrame_IonJS, ExitFrameLayout::Size());
+ masm.push(r0);
+ masm.push(lr);
+ masm.branch(target);
+}
+
+inline void
+EmitBaselineCreateStubFrameDescriptor(MacroAssembler& masm, Register reg, uint32_t headerSize)
+{
+ // Compute stub frame size. We have to add two pointers: the stub reg and
+ // previous frame pointer pushed by EmitEnterStubFrame.
+ masm.mov(BaselineFrameReg, reg);
+ masm.as_add(reg, reg, Imm8(sizeof(void*) * 2));
+ masm.ma_sub(BaselineStackReg, reg);
+
+ masm.makeFrameDescriptor(reg, JitFrame_BaselineStub, headerSize);
+}
+
+inline void
+EmitBaselineCallVM(JitCode* target, MacroAssembler& masm)
+{
+ EmitBaselineCreateStubFrameDescriptor(masm, r0, ExitFrameLayout::Size());
+ masm.push(r0);
+ masm.call(target);
+}
+
+inline void
+EmitIonCallVM(JitCode* target, size_t stackSlots, MacroAssembler& masm)
+{
+ uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonStub,
+ ExitFrameLayout::Size());
+ masm.Push(Imm32(descriptor));
+ masm.callJit(target);
+
+ // Remove rest of the frame left on the stack. We remove the return address
+ // which is implicitly popped when returning.
+ size_t framePop = sizeof(ExitFrameLayout) - sizeof(void*);
+
+ // Pop arguments from framePushed.
+ masm.implicitPop(stackSlots * sizeof(void*) + framePop);
+}
+
+// Size of vales pushed by EmitEnterStubFrame.
+static const uint32_t STUB_FRAME_SIZE = 4 * sizeof(void*);
+static const uint32_t STUB_FRAME_SAVED_STUB_OFFSET = sizeof(void*);
+
+inline void
+EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch)
+{
+ MOZ_ASSERT(scratch != ICTailCallReg);
+
+ // Compute frame size.
+ masm.mov(BaselineFrameReg, scratch);
+ masm.as_add(scratch, scratch, Imm8(BaselineFrame::FramePointerOffset));
+ masm.ma_sub(BaselineStackReg, scratch);
+
+ masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+
+ // Note: when making changes here, don't forget to update STUB_FRAME_SIZE if
+ // needed.
+
+ // Push frame descriptor and return address.
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, BaselineStubFrameLayout::Size());
+ masm.Push(scratch);
+ masm.Push(ICTailCallReg);
+
+ // Save old frame pointer, stack pointer and stub reg.
+ masm.Push(ICStubReg);
+ masm.Push(BaselineFrameReg);
+ masm.mov(BaselineStackReg, BaselineFrameReg);
+
+ // We pushed 4 words, so the stack is still aligned to 8 bytes.
+ masm.checkStackAlignment();
+}
+
+inline void
+EmitIonEnterStubFrame(MacroAssembler& masm, Register scratch)
+{
+ MOZ_ASSERT(ICTailCallReg == lr);
+
+ // In arm the link register contains the return address,
+ // but in jit frames we expect it to be on the stack. As a result
+ // push the link register (which is actually part of the previous frame.
+ // Therefore using push instead of Push).
+ masm.push(ICTailCallReg);
+
+ masm.Push(ICStubReg);
+}
+
+inline void
+EmitBaselineLeaveStubFrame(MacroAssembler& masm, bool calledIntoIon = false)
+{
+ ScratchRegisterScope scratch(masm);
+
+ // Ion frames do not save and restore the frame pointer. If we called into
+ // Ion, we have to restore the stack pointer from the frame descriptor. If
+ // we performed a VM call, the descriptor has been popped already so in that
+ // case we use the frame pointer.
+ if (calledIntoIon) {
+ masm.Pop(scratch);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch);
+ masm.add32(scratch, BaselineStackReg);
+ } else {
+ masm.mov(BaselineFrameReg, BaselineStackReg);
+ }
+
+ masm.Pop(BaselineFrameReg);
+ masm.Pop(ICStubReg);
+
+ // Load the return address.
+ masm.Pop(ICTailCallReg);
+
+ // Discard the frame descriptor.
+ masm.Pop(scratch);
+}
+
+inline void
+EmitIonLeaveStubFrame(MacroAssembler& masm)
+{
+ masm.Pop(ICStubReg);
+ masm.pop(ICTailCallReg); // See EmitIonEnterStubFrame for explanation on pop/Pop.
+}
+
+inline void
+EmitStowICValues(MacroAssembler& masm, int values)
+{
+ MOZ_ASSERT(values >= 0 && values <= 2);
+ switch(values) {
+ case 1:
+ // Stow R0.
+ masm.Push(R0);
+ break;
+ case 2:
+ // Stow R0 and R1.
+ masm.Push(R0);
+ masm.Push(R1);
+ break;
+ }
+}
+
+inline void
+EmitUnstowICValues(MacroAssembler& masm, int values, bool discard = false)
+{
+ MOZ_ASSERT(values >= 0 && values <= 2);
+ switch(values) {
+ case 1:
+ // Unstow R0.
+ if (discard)
+ masm.addPtr(Imm32(sizeof(Value)), BaselineStackReg);
+ else
+ masm.popValue(R0);
+ break;
+ case 2:
+ // Unstow R0 and R1.
+ if (discard) {
+ masm.addPtr(Imm32(sizeof(Value) * 2), BaselineStackReg);
+ } else {
+ masm.popValue(R1);
+ masm.popValue(R0);
+ }
+ break;
+ }
+ masm.adjustFrame(-values * sizeof(Value));
+}
+
+inline void
+EmitCallTypeUpdateIC(MacroAssembler& masm, JitCode* code, uint32_t objectOffset)
+{
+ MOZ_ASSERT(R2 == ValueOperand(r1, r0));
+
+ // R0 contains the value that needs to be typechecked. The object we're
+ // updating is a boxed Value on the stack, at offset objectOffset from esp,
+ // excluding the return address.
+
+ // Save the current ICStubReg to stack, as well as the TailCallReg,
+ // since on ARM, the LR is live.
+ masm.push(ICStubReg);
+ masm.push(ICTailCallReg);
+
+ // This is expected to be called from within an IC, when ICStubReg is
+ // properly initialized to point to the stub.
+ masm.loadPtr(Address(ICStubReg, ICUpdatedStub::offsetOfFirstUpdateStub()),
+ ICStubReg);
+
+ // TODO: Change r0 uses below to use masm's configurable scratch register instead.
+
+ // Load stubcode pointer from ICStubReg into ICTailCallReg.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0);
+
+ // Call the stubcode.
+ masm.ma_blx(r0);
+
+ // Restore the old stub reg and tailcall reg.
+ masm.pop(ICTailCallReg);
+ masm.pop(ICStubReg);
+
+ // The update IC will store 0 or 1 in R1.scratchReg() reflecting if the
+ // value in R0 type-checked properly or not.
+ Label success;
+ masm.cmp32(R1.scratchReg(), Imm32(1));
+ masm.j(Assembler::Equal, &success);
+
+ // If the IC failed, then call the update fallback function.
+ EmitBaselineEnterStubFrame(masm, R1.scratchReg());
+
+ masm.loadValue(Address(BaselineStackReg, STUB_FRAME_SIZE + objectOffset), R1);
+
+ masm.Push(R0);
+ masm.Push(R1);
+ masm.Push(ICStubReg);
+
+ // Load previous frame pointer, push BaselineFrame*.
+ masm.loadPtr(Address(BaselineFrameReg, 0), R0.scratchReg());
+ masm.pushBaselineFramePtr(R0.scratchReg(), R0.scratchReg());
+
+ EmitBaselineCallVM(code, masm);
+ EmitBaselineLeaveStubFrame(masm);
+
+ // Success at end.
+ masm.bind(&success);
+}
+
+template <typename AddrType>
+inline void
+EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type)
+{
+ // On ARM, lr is clobbered by patchableCallPreBarrier. Save it first.
+ masm.push(lr);
+ masm.patchableCallPreBarrier(addr, type);
+ masm.pop(lr);
+}
+
+inline void
+EmitStubGuardFailure(MacroAssembler& masm)
+{
+ MOZ_ASSERT(R2 == ValueOperand(r1, r0));
+
+ // NOTE: This routine assumes that the stub guard code left the stack in the
+ // same state it was in when it was entered.
+
+ // BaselineStubEntry points to the current stub.
+
+ // Load next stub into ICStubReg.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfNext()), ICStubReg);
+
+ // Load stubcode pointer from BaselineStubEntry into scratch register.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0);
+
+ // Return address is already loaded, just jump to the next stubcode.
+ MOZ_ASSERT(ICTailCallReg == lr);
+ masm.branch(r0);
+}
+
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_SharedICHelpers_arm_h */
diff --git a/js/src/jit/arm/SharedICRegisters-arm.h b/js/src/jit/arm/SharedICRegisters-arm.h
new file mode 100644
index 000000000..144a3bd50
--- /dev/null
+++ b/js/src/jit/arm/SharedICRegisters-arm.h
@@ -0,0 +1,54 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_SharedICRegisters_arm_h
+#define jit_arm_SharedICRegisters_arm_h
+
+#include "jit/MacroAssembler.h"
+
+namespace js {
+namespace jit {
+
+// r15 = program-counter
+// r14 = link-register
+
+// r13 = stack-pointer
+// r11 = frame-pointer
+static constexpr Register BaselineFrameReg = r11;
+static constexpr Register BaselineStackReg = sp;
+
+// ValueOperands R0, R1, and R2.
+// R0 == JSReturnReg, and R2 uses registers not preserved across calls. R1 value
+// should be preserved across calls.
+static constexpr ValueOperand R0(r3, r2);
+static constexpr ValueOperand R1(r5, r4);
+static constexpr ValueOperand R2(r1, r0);
+
+// ICTailCallReg and ICStubReg
+// These use registers that are not preserved across calls.
+static constexpr Register ICTailCallReg = r14;
+static constexpr Register ICStubReg = r9;
+
+static constexpr Register ExtractTemp0 = InvalidReg;
+static constexpr Register ExtractTemp1 = InvalidReg;
+
+// Register used internally by MacroAssemblerARM.
+static constexpr Register BaselineSecondScratchReg = r6;
+
+// R7 - R9 are generally available for use within stubcode.
+
+// Note that ICTailCallReg is actually just the link register. In ARM code
+// emission, we do not clobber ICTailCallReg since we keep the return
+// address for calls there.
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = d0;
+static constexpr FloatRegister FloatReg1 = d1;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_SharedICRegisters_arm_h */
diff --git a/js/src/jit/arm/Simulator-arm.cpp b/js/src/jit/arm/Simulator-arm.cpp
new file mode 100644
index 000000000..2b295212a
--- /dev/null
+++ b/js/src/jit/arm/Simulator-arm.cpp
@@ -0,0 +1,4941 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm/Simulator-arm.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/Likely.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/SizePrintfMacros.h"
+
+#include "jit/arm/Assembler-arm.h"
+#include "jit/arm/disasm/Constants-arm.h"
+#include "jit/AtomicOperations.h"
+#include "threading/LockGuard.h"
+#include "vm/Runtime.h"
+#include "vm/SharedMem.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmSignalHandlers.h"
+
+extern "C" {
+
+int64_t
+__aeabi_idivmod(int x, int y)
+{
+ uint32_t lo = uint32_t(x / y);
+ uint32_t hi = uint32_t(x % y);
+ return (int64_t(hi) << 32) | lo;
+}
+
+int64_t
+__aeabi_uidivmod(int x, int y)
+{
+ uint32_t lo = uint32_t(x) / uint32_t(y);
+ uint32_t hi = uint32_t(x) % uint32_t(y);
+ return (int64_t(hi) << 32) | lo;
+}
+}
+
+namespace js {
+namespace jit {
+
+// Load/store multiple addressing mode.
+enum BlockAddrMode {
+ // Alias modes for comparison when writeback does not matter.
+ da_x = (0|0|0) << 21, // Decrement after.
+ ia_x = (0|4|0) << 21, // Increment after.
+ db_x = (8|0|0) << 21, // Decrement before.
+ ib_x = (8|4|0) << 21, // Increment before.
+};
+
+// Type of VFP register. Determines register encoding.
+enum VFPRegPrecision {
+ kSinglePrecision = 0,
+ kDoublePrecision = 1
+};
+
+enum NeonListType {
+ nlt_1 = 0x7,
+ nlt_2 = 0xA,
+ nlt_3 = 0x6,
+ nlt_4 = 0x2
+};
+
+// Supervisor Call (svc) specific support.
+
+// Special Software Interrupt codes when used in the presence of the ARM
+// simulator.
+// svc (formerly swi) provides a 24bit immediate value. Use bits 22:0 for
+// standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
+enum SoftwareInterruptCodes {
+ kCallRtRedirected = 0x10, // Transition to C code.
+ kBreakpoint= 0x20, // Breakpoint.
+ kStopCode = 1 << 23 // Stop.
+};
+
+const uint32_t kStopCodeMask = kStopCode - 1;
+const uint32_t kMaxStopCode = kStopCode - 1;
+
+// -----------------------------------------------------------------------------
+// Instruction abstraction.
+
+// The class Instruction enables access to individual fields defined in the ARM
+// architecture instruction set encoding as described in figure A3-1.
+// Note that the Assembler uses typedef int32_t Instr.
+//
+// Example: Test whether the instruction at ptr does set the condition code
+// bits.
+//
+// bool InstructionSetsConditionCodes(byte* ptr) {
+// Instruction* instr = Instruction::At(ptr);
+// int type = instr->TypeValue();
+// return ((type == 0) || (type == 1)) && instr->hasS();
+// }
+//
+class SimInstruction {
+ public:
+ enum {
+ kInstrSize = 4,
+ kPCReadOffset = 8
+ };
+
+ // Get the raw instruction bits.
+ inline Instr instructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void setInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int bit(int nr) const {
+ return (instructionBits() >> nr) & 1;
+ }
+
+ // Read a bit field's value out of the instruction bits.
+ inline int bits(int hi, int lo) const {
+ return (instructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Read a bit field out of the instruction bits.
+ inline int bitField(int hi, int lo) const {
+ return instructionBits() & (((2 << (hi - lo)) - 1) << lo);
+ }
+
+ // Accessors for the different named fields used in the ARM encoding.
+ // The naming of these accessor corresponds to figure A3-1.
+ //
+ // Two kind of accessors are declared:
+ // - <Name>Field() will return the raw field, i.e. the field's bits at their
+ // original place in the instruction encoding.
+ // e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
+ // 0xC0810002 conditionField(instr) will return 0xC0000000.
+ // - <Name>Value() will return the field value, shifted back to bit 0.
+ // e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
+ // 0xC0810002 conditionField(instr) will return 0xC.
+
+ // Generally applicable fields
+ inline Assembler::ARMCondition conditionField() const {
+ return static_cast<Assembler::ARMCondition>(bitField(31, 28));
+ }
+ inline int typeValue() const { return bits(27, 25); }
+ inline int specialValue() const { return bits(27, 23); }
+
+ inline int rnValue() const { return bits(19, 16); }
+ inline int rdValue() const { return bits(15, 12); }
+
+ inline int coprocessorValue() const { return bits(11, 8); }
+
+ // Support for VFP.
+ // Vn(19-16) | Vd(15-12) | Vm(3-0)
+ inline int vnValue() const { return bits(19, 16); }
+ inline int vmValue() const { return bits(3, 0); }
+ inline int vdValue() const { return bits(15, 12); }
+ inline int nValue() const { return bit(7); }
+ inline int mValue() const { return bit(5); }
+ inline int dValue() const { return bit(22); }
+ inline int rtValue() const { return bits(15, 12); }
+ inline int pValue() const { return bit(24); }
+ inline int uValue() const { return bit(23); }
+ inline int opc1Value() const { return (bit(23) << 2) | bits(21, 20); }
+ inline int opc2Value() const { return bits(19, 16); }
+ inline int opc3Value() const { return bits(7, 6); }
+ inline int szValue() const { return bit(8); }
+ inline int VLValue() const { return bit(20); }
+ inline int VCValue() const { return bit(8); }
+ inline int VAValue() const { return bits(23, 21); }
+ inline int VBValue() const { return bits(6, 5); }
+ inline int VFPNRegValue(VFPRegPrecision pre) { return VFPGlueRegValue(pre, 16, 7); }
+ inline int VFPMRegValue(VFPRegPrecision pre) { return VFPGlueRegValue(pre, 0, 5); }
+ inline int VFPDRegValue(VFPRegPrecision pre) { return VFPGlueRegValue(pre, 12, 22); }
+
+ // Fields used in Data processing instructions.
+ inline int opcodeValue() const { return static_cast<ALUOp>(bits(24, 21)); }
+ inline ALUOp opcodeField() const { return static_cast<ALUOp>(bitField(24, 21)); }
+ inline int sValue() const { return bit(20); }
+
+ // With register.
+ inline int rmValue() const { return bits(3, 0); }
+ inline ShiftType shifttypeValue() const { return static_cast<ShiftType>(bits(6, 5)); }
+ inline int rsValue() const { return bits(11, 8); }
+ inline int shiftAmountValue() const { return bits(11, 7); }
+
+ // With immediate.
+ inline int rotateValue() const { return bits(11, 8); }
+ inline int immed8Value() const { return bits(7, 0); }
+ inline int immed4Value() const { return bits(19, 16); }
+ inline int immedMovwMovtValue() const { return immed4Value() << 12 | offset12Value(); }
+
+ // Fields used in Load/Store instructions.
+ inline int PUValue() const { return bits(24, 23); }
+ inline int PUField() const { return bitField(24, 23); }
+ inline int bValue() const { return bit(22); }
+ inline int wValue() const { return bit(21); }
+ inline int lValue() const { return bit(20); }
+
+ // With register uses same fields as Data processing instructions above with
+ // immediate.
+ inline int offset12Value() const { return bits(11, 0); }
+
+ // Multiple.
+ inline int rlistValue() const { return bits(15, 0); }
+
+ // Extra loads and stores.
+ inline int signValue() const { return bit(6); }
+ inline int hValue() const { return bit(5); }
+ inline int immedHValue() const { return bits(11, 8); }
+ inline int immedLValue() const { return bits(3, 0); }
+
+ // Fields used in Branch instructions.
+ inline int linkValue() const { return bit(24); }
+ inline int sImmed24Value() const { return ((instructionBits() << 8) >> 8); }
+
+ // Fields used in Software interrupt instructions.
+ inline SoftwareInterruptCodes svcValue() const {
+ return static_cast<SoftwareInterruptCodes>(bits(23, 0));
+ }
+
+ // Test for special encodings of type 0 instructions (extra loads and
+ // stores, as well as multiplications).
+ inline bool isSpecialType0() const { return (bit(7) == 1) && (bit(4) == 1); }
+
+ // Test for miscellaneous instructions encodings of type 0 instructions.
+ inline bool isMiscType0() const {
+ return bit(24) == 1 && bit(23) == 0 && bit(20) == 0 && (bit(7) == 0);
+ }
+
+ // Test for a nop instruction, which falls under type 1.
+ inline bool isNopType1() const { return bits(24, 0) == 0x0120F000; }
+
+ // Test for a stop instruction.
+ inline bool isStop() const {
+ return typeValue() == 7 && bit(24) == 1 && svcValue() >= kStopCode;
+ }
+
+ // Special accessors that test for existence of a value.
+ inline bool hasS() const { return sValue() == 1; }
+ inline bool hasB() const { return bValue() == 1; }
+ inline bool hasW() const { return wValue() == 1; }
+ inline bool hasL() const { return lValue() == 1; }
+ inline bool hasU() const { return uValue() == 1; }
+ inline bool hasSign() const { return signValue() == 1; }
+ inline bool hasH() const { return hValue() == 1; }
+ inline bool hasLink() const { return linkValue() == 1; }
+
+ // Decoding the double immediate in the vmov instruction.
+ double doubleImmedVmov() const;
+ // Decoding the float32 immediate in the vmov.f32 instruction.
+ float float32ImmedVmov() const;
+
+ private:
+ // Join split register codes, depending on single or double precision.
+ // four_bit is the position of the least-significant bit of the four
+ // bit specifier. one_bit is the position of the additional single bit
+ // specifier.
+ inline int VFPGlueRegValue(VFPRegPrecision pre, int four_bit, int one_bit) {
+ if (pre == kSinglePrecision)
+ return (bits(four_bit + 3, four_bit) << 1) | bit(one_bit);
+ return (bit(one_bit) << 4) | bits(four_bit + 3, four_bit);
+ }
+
+ SimInstruction() = delete;
+ SimInstruction(const SimInstruction& other) = delete;
+ void operator=(const SimInstruction& other) = delete;
+};
+
+double
+SimInstruction::doubleImmedVmov() const
+{
+ // Reconstruct a double from the immediate encoded in the vmov instruction.
+ //
+ // instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
+ // double: [aBbbbbbb,bbcdefgh,00000000,00000000,
+ // 00000000,00000000,00000000,00000000]
+ //
+ // where B = ~b. Only the high 16 bits are affected.
+ uint64_t high16;
+ high16 = (bits(17, 16) << 4) | bits(3, 0); // xxxxxxxx,xxcdefgh.
+ high16 |= (0xff * bit(18)) << 6; // xxbbbbbb,bbxxxxxx.
+ high16 |= (bit(18) ^ 1) << 14; // xBxxxxxx,xxxxxxxx.
+ high16 |= bit(19) << 15; // axxxxxxx,xxxxxxxx.
+
+ uint64_t imm = high16 << 48;
+ return mozilla::BitwiseCast<double>(imm);
+}
+
+float
+SimInstruction::float32ImmedVmov() const
+{
+ // Reconstruct a float32 from the immediate encoded in the vmov instruction.
+ //
+ // instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
+ // float32: [aBbbbbbc, defgh000, 00000000, 00000000]
+ //
+ // where B = ~b. Only the high 16 bits are affected.
+ uint32_t imm;
+ imm = (bits(17, 16) << 23) | (bits(3, 0) << 19); // xxxxxxxc,defgh000.0.0
+ imm |= (0x1f * bit(18)) << 25; // xxbbbbbx,xxxxxxxx.0.0
+ imm |= (bit(18) ^ 1) << 30; // xBxxxxxx,xxxxxxxx.0.0
+ imm |= bit(19) << 31; // axxxxxxx,xxxxxxxx.0.0
+
+ return mozilla::BitwiseCast<float>(imm);
+}
+
+class CachePage
+{
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() {
+ memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
+ }
+ char* validityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+ char* cachedData(int offset) {
+ return &data_[offset];
+ }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+// Protects the icache() and redirection() properties of the
+// Simulator.
+class AutoLockSimulatorCache : public LockGuard<Mutex>
+{
+ using Base = LockGuard<Mutex>;
+
+ public:
+ explicit AutoLockSimulatorCache(Simulator* sim)
+ : Base(sim->cacheLock_)
+ , sim_(sim)
+ {
+ MOZ_ASSERT(sim_->cacheLockHolder_.isNothing());
+#ifdef DEBUG
+ sim_->cacheLockHolder_ = mozilla::Some(ThisThread::GetId());
+#endif
+ }
+
+ ~AutoLockSimulatorCache() {
+ MOZ_ASSERT(sim_->cacheLockHolder_.isSome());
+#ifdef DEBUG
+ sim_->cacheLockHolder_.reset();
+#endif
+ }
+
+ private:
+ Simulator* const sim_;
+};
+
+bool Simulator::ICacheCheckingEnabled = false;
+
+int64_t Simulator::StopSimAt = -1L;
+
+Simulator*
+Simulator::Create(JSContext* cx)
+{
+ Simulator* sim = js_new<Simulator>(cx);
+ if (!sim)
+ return nullptr;
+
+ if (!sim->init()) {
+ js_delete(sim);
+ return nullptr;
+ }
+
+ if (getenv("ARM_SIM_ICACHE_CHECKS"))
+ Simulator::ICacheCheckingEnabled = true;
+
+ char* stopAtStr = getenv("ARM_SIM_STOP_AT");
+ int64_t stopAt;
+ if (stopAtStr && sscanf(stopAtStr, "%lld", &stopAt) == 1) {
+ fprintf(stderr, "\nStopping simulation at icount %lld\n", stopAt);
+ Simulator::StopSimAt = stopAt;
+ }
+
+ return sim;
+}
+
+void
+Simulator::Destroy(Simulator* sim)
+{
+ js_delete(sim);
+}
+
+void
+Simulator::disassemble(SimInstruction* instr, size_t n)
+{
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ disasm::EmbeddedVector<char, disasm::ReasonableBufferSize> buffer;
+ while (n-- > 0) {
+ dasm.InstructionDecode(buffer,
+ reinterpret_cast<uint8_t*>(instr));
+ printf(" 0x%08x %s\n", uint32_t(instr), buffer.start());
+ instr = reinterpret_cast<SimInstruction*>(reinterpret_cast<uint8_t*>(instr) + 4);
+ }
+}
+
+void
+Simulator::disasm(SimInstruction* instr)
+{
+ disassemble(instr, 1);
+}
+
+void
+Simulator::disasm(SimInstruction* instr, size_t n)
+{
+ disassemble(instr, n);
+}
+
+void
+Simulator::disasm(SimInstruction* instr, size_t m, size_t n)
+{
+ disassemble(reinterpret_cast<SimInstruction*>(reinterpret_cast<uint8_t*>(instr)-m*4), n);
+}
+
+// The ArmDebugger class is used by the simulator while debugging simulated ARM
+// code.
+class ArmDebugger {
+ public:
+ explicit ArmDebugger(Simulator* sim) : sim_(sim) { }
+
+ void stop(SimInstruction* instr);
+ void debug();
+
+ private:
+ static const Instr kBreakpointInstr = (Assembler::AL | (7 * (1 << 25)) | (1* (1 << 24)) | kBreakpoint);
+ static const Instr kNopInstr = (Assembler::AL | (13 * (1 << 21)));
+
+ Simulator* sim_;
+
+ int32_t getRegisterValue(int regnum);
+ double getRegisterPairDoubleValue(int regnum);
+ void getVFPDoubleRegisterValue(int regnum, double* value);
+ bool getValue(const char* desc, int32_t* value);
+ bool getVFPDoubleValue(const char* desc, double* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool setBreakpoint(SimInstruction* breakpc);
+ bool deleteBreakpoint(SimInstruction* breakpc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void undoBreakpoints();
+ void redoBreakpoints();
+};
+
+void
+ArmDebugger::stop(SimInstruction * instr)
+{
+ // Get the stop code.
+ uint32_t code = instr->svcValue() & kStopCodeMask;
+ // Retrieve the encoded address, which comes just after this stop.
+ char* msg = *reinterpret_cast<char**>(sim_->get_pc()
+ + SimInstruction::kInstrSize);
+ // Update this stop description.
+ if (sim_->isWatchedStop(code) && !sim_->watched_stops_[code].desc) {
+ sim_->watched_stops_[code].desc = msg;
+ }
+ // Print the stop message and code if it is not the default code.
+ if (code != kMaxStopCode) {
+ printf("Simulator hit stop %u: %s\n", code, msg);
+ } else {
+ printf("Simulator hit %s\n", msg);
+ }
+ sim_->set_pc(sim_->get_pc() + 2 * SimInstruction::kInstrSize);
+ debug();
+}
+
+int32_t
+ArmDebugger::getRegisterValue(int regnum)
+{
+ if (regnum == Registers::pc)
+ return sim_->get_pc();
+ return sim_->get_register(regnum);
+}
+
+double
+ArmDebugger::getRegisterPairDoubleValue(int regnum)
+{
+ return sim_->get_double_from_register_pair(regnum);
+}
+
+void
+ArmDebugger::getVFPDoubleRegisterValue(int regnum, double* out)
+{
+ sim_->get_double_from_d_register(regnum, out);
+}
+
+bool
+ArmDebugger::getValue(const char* desc, int32_t* value)
+{
+ Register reg = Register::FromName(desc);
+ if (reg != InvalidReg) {
+ *value = getRegisterValue(reg.code());
+ return true;
+ }
+ if (strncmp(desc, "0x", 2) == 0)
+ return sscanf(desc + 2, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
+ return sscanf(desc, "%u", reinterpret_cast<uint32_t*>(value)) == 1;
+}
+
+bool
+ArmDebugger::getVFPDoubleValue(const char* desc, double* value)
+{
+ FloatRegister reg(FloatRegister::FromName(desc));
+ if (reg != InvalidFloatReg) {
+ sim_->get_double_from_d_register(reg.code(), value);
+ return true;
+ }
+ return false;
+}
+
+bool
+ArmDebugger::setBreakpoint(SimInstruction* breakpc)
+{
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_)
+ return false;
+
+ // Set the breakpoint.
+ sim_->break_pc_ = breakpc;
+ sim_->break_instr_ = breakpc->instructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+}
+
+bool
+ArmDebugger::deleteBreakpoint(SimInstruction* breakpc)
+{
+ if (sim_->break_pc_ != nullptr)
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+
+ sim_->break_pc_ = nullptr;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+void
+ArmDebugger::undoBreakpoints()
+{
+ if (sim_->break_pc_)
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+}
+
+void
+ArmDebugger::redoBreakpoints()
+{
+ if (sim_->break_pc_)
+ sim_->break_pc_->setInstructionBits(kBreakpointInstr);
+}
+
+static char*
+ReadLine(const char* prompt)
+{
+ char* result = nullptr;
+ char line_buf[256];
+ int offset = 0;
+ bool keep_going = true;
+ fprintf(stdout, "%s", prompt);
+ fflush(stdout);
+ while (keep_going) {
+ if (fgets(line_buf, sizeof(line_buf), stdin) == nullptr) {
+ // fgets got an error. Just give up.
+ if (result)
+ js_delete(result);
+ return nullptr;
+ }
+ int len = strlen(line_buf);
+ if (len > 0 && line_buf[len - 1] == '\n') {
+ // Since we read a new line we are done reading the line. This will
+ // exit the loop after copying this buffer into the result.
+ keep_going = false;
+ }
+ if (!result) {
+ // Allocate the initial result and make room for the terminating
+ // '\0'.
+ result = (char*)js_malloc(len + 1);
+ if (!result)
+ return nullptr;
+ } else {
+ // Allocate a new result with enough room for the new addition.
+ int new_len = offset + len + 1;
+ char* new_result = (char*)js_malloc(new_len);
+ if (!new_result)
+ return nullptr;
+ // Copy the existing input into the new array and set the new
+ // array as the result.
+ memcpy(new_result, result, offset * sizeof(char));
+ js_free(result);
+ result = new_result;
+ }
+ // Copy the newly read line into the result.
+ memcpy(result + offset, line_buf, len * sizeof(char));
+ offset += len;
+ }
+
+ MOZ_ASSERT(result);
+ result[offset] = '\0';
+ return result;
+}
+
+
+void
+ArmDebugger::debug()
+{
+ intptr_t last_pc = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = { cmd, arg1, arg2 };
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ undoBreakpoints();
+
+#ifndef JS_DISASM_ARM
+ static bool disasm_warning_printed = false;
+ if (!disasm_warning_printed) {
+ printf(" No ARM disassembler present. Enable JS_DISASM_ARM in configure.in.");
+ disasm_warning_printed = true;
+ }
+#endif
+
+ while (!done && !sim_->has_bad_pc()) {
+ if (last_pc != sim_->get_pc()) {
+#ifdef JS_DISASM_ARM
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ disasm::EmbeddedVector<char, disasm::ReasonableBufferSize> buffer;
+ dasm.InstructionDecode(buffer,
+ reinterpret_cast<uint8_t*>(sim_->get_pc()));
+ printf(" 0x%08x %s\n", sim_->get_pc(), buffer.start());
+#endif
+ last_pc = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == nullptr) {
+ break;
+ } else {
+ char* last_input = sim_->lastDebuggerInput();
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->setLastDebuggerInput(line);
+ }
+
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = sscanf(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if (argc < 0) {
+ continue;
+ } else if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ sim_->instructionDecode(reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ sim_->icount_++;
+ } else if ((strcmp(cmd, "skip") == 0)) {
+ sim_->set_pc(sim_->get_pc() + 4);
+ sim_->icount_++;
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints
+ // disabled.
+ sim_->instructionDecode(reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ sim_->icount_++;
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2 || (argc == 3 && strcmp(arg2, "fp") == 0)) {
+ int32_t value;
+ double dvalue;
+ if (strcmp(arg1, "all") == 0) {
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ value = getRegisterValue(i);
+ printf("%3s: 0x%08x %10d", Registers::GetName(i), value, value);
+ if ((argc == 3 && strcmp(arg2, "fp") == 0) &&
+ i < 8 &&
+ (i % 2) == 0) {
+ dvalue = getRegisterPairDoubleValue(i);
+ printf(" (%.16g)\n", dvalue);
+ } else {
+ printf("\n");
+ }
+ }
+ for (uint32_t i = 0; i < FloatRegisters::TotalPhys; i++) {
+ getVFPDoubleRegisterValue(i, &dvalue);
+ uint64_t as_words = mozilla::BitwiseCast<uint64_t>(dvalue);
+ printf("%3s: %.16g 0x%08x %08x\n",
+ FloatRegister::FromCode(i).name(),
+ dvalue,
+ static_cast<uint32_t>(as_words >> 32),
+ static_cast<uint32_t>(as_words & 0xffffffff));
+ }
+ } else {
+ if (getValue(arg1, &value)) {
+ printf("%s: 0x%08x %d \n", arg1, value, value);
+ } else if (getVFPDoubleValue(arg1, &dvalue)) {
+ uint64_t as_words = mozilla::BitwiseCast<uint64_t>(dvalue);
+ printf("%s: %.16g 0x%08x %08x\n",
+ arg1,
+ dvalue,
+ static_cast<uint32_t>(as_words >> 32),
+ static_cast<uint32_t>(as_words & 0xffffffff));
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ printf("print <register>\n");
+ }
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int32_t* cur = nullptr;
+ int32_t* end = nullptr;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int32_t*>(sim_->get_register(Simulator::sp));
+ } else { // "mem"
+ int32_t value;
+ if (!getValue(arg1, &value)) {
+ printf("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int32_t*>(value);
+ next_arg++;
+ }
+
+ int32_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!getValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ printf(" %p: 0x%08x %10d", cur, *cur, *cur);
+ printf("\n");
+ cur++;
+ }
+ } else if (strcmp(cmd, "disasm") == 0 || strcmp(cmd, "di") == 0) {
+#ifdef JS_DISASM_ARM
+ uint8_t* prev = nullptr;
+ uint8_t* cur = nullptr;
+ uint8_t* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ end = cur + (10 * SimInstruction::kInstrSize);
+ } else if (argc == 2) {
+ Register reg = Register::FromName(arg1);
+ if (reg != InvalidReg || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int32_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * SimInstruction::kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ int32_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * SimInstruction::kInstrSize);
+ }
+ }
+ } else {
+ int32_t value1;
+ int32_t value2;
+ if (getValue(arg1, &value1) && getValue(arg2, &value2)) {
+ cur = reinterpret_cast<uint8_t*>(value1);
+ end = cur + (value2 * SimInstruction::kInstrSize);
+ }
+ }
+ while (cur < end) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ disasm::EmbeddedVector<char, disasm::ReasonableBufferSize> buffer;
+
+ prev = cur;
+ cur += dasm.InstructionDecode(buffer, cur);
+ printf(" 0x%08x %s\n", reinterpret_cast<uint32_t>(prev), buffer.start());
+ }
+#endif
+ } else if (strcmp(cmd, "gdb") == 0) {
+ printf("relinquishing control to gdb\n");
+ asm("int $3");
+ printf("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (argc == 2) {
+ int32_t value;
+ if (getValue(arg1, &value)) {
+ if (!setBreakpoint(reinterpret_cast<SimInstruction*>(value)))
+ printf("setting breakpoint failed\n");
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ } else {
+ printf("break <address>\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!deleteBreakpoint(nullptr)) {
+ printf("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ printf("N flag: %d; ", sim_->n_flag_);
+ printf("Z flag: %d; ", sim_->z_flag_);
+ printf("C flag: %d; ", sim_->c_flag_);
+ printf("V flag: %d\n", sim_->v_flag_);
+ printf("INVALID OP flag: %d; ", sim_->inv_op_vfp_flag_);
+ printf("DIV BY ZERO flag: %d; ", sim_->div_zero_vfp_flag_);
+ printf("OVERFLOW flag: %d; ", sim_->overflow_vfp_flag_);
+ printf("UNDERFLOW flag: %d; ", sim_->underflow_vfp_flag_);
+ printf("INEXACT flag: %d;\n", sim_->inexact_vfp_flag_);
+ } else if (strcmp(cmd, "stop") == 0) {
+ int32_t value;
+ intptr_t stop_pc = sim_->get_pc() - 2 * SimInstruction::kInstrSize;
+ SimInstruction* stop_instr = reinterpret_cast<SimInstruction*>(stop_pc);
+ SimInstruction* msg_address =
+ reinterpret_cast<SimInstruction*>(stop_pc + SimInstruction::kInstrSize);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->isStopInstruction(stop_instr)) {
+ stop_instr->setInstructionBits(kNopInstr);
+ msg_address->setInstructionBits(kNopInstr);
+ } else {
+ printf("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ printf("Stop information:\n");
+ for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++)
+ sim_->printStopInfo(i);
+ } else if (getValue(arg2, &value)) {
+ sim_->printStopInfo(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++)
+ sim_->enableStop(i);
+ } else if (getValue(arg2, &value)) {
+ sim_->enableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
+ sim_->disableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->disableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ printf("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ printf("cont\n");
+ printf(" continue execution (alias 'c')\n");
+ printf("skip\n");
+ printf(" skip one instruction (set pc to next instruction)\n");
+ printf("stepi\n");
+ printf(" step one instruction (alias 'si')\n");
+ printf("print <register>\n");
+ printf(" print register content (alias 'p')\n");
+ printf(" use register name 'all' to print all registers\n");
+ printf(" add argument 'fp' to print register pair double values\n");
+ printf("flags\n");
+ printf(" print flags\n");
+ printf("stack [<words>]\n");
+ printf(" dump stack content, default dump 10 words)\n");
+ printf("mem <address> [<words>]\n");
+ printf(" dump memory content, default dump 10 words)\n");
+ printf("disasm [<instructions>]\n");
+ printf("disasm [<address/register>]\n");
+ printf("disasm [[<address/register>] <instructions>]\n");
+ printf(" disassemble code, default is 10 instructions\n");
+ printf(" from pc (alias 'di')\n");
+ printf("gdb\n");
+ printf(" enter gdb\n");
+ printf("break <address>\n");
+ printf(" set a break point on the address\n");
+ printf("del\n");
+ printf(" delete the breakpoint\n");
+ printf("stop feature:\n");
+ printf(" Description:\n");
+ printf(" Stops are debug instructions inserted by\n");
+ printf(" the Assembler::stop() function.\n");
+ printf(" When hitting a stop, the Simulator will\n");
+ printf(" stop and and give control to the ArmDebugger.\n");
+ printf(" The first %d stop codes are watched:\n",
+ Simulator::kNumOfWatchedStops);
+ printf(" - They can be enabled / disabled: the Simulator\n");
+ printf(" will / won't stop when hitting them.\n");
+ printf(" - The Simulator keeps track of how many times they \n");
+ printf(" are met. (See the info command.) Going over a\n");
+ printf(" disabled stop still increases its counter. \n");
+ printf(" Commands:\n");
+ printf(" stop info all/<code> : print infos about number <code>\n");
+ printf(" or all stop(s).\n");
+ printf(" stop enable/disable all/<code> : enables / disables\n");
+ printf(" all or number <code> stop(s)\n");
+ printf(" stop unstop\n");
+ printf(" ignore the stop instruction at the current location\n");
+ printf(" from now on\n");
+ } else {
+ printf("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ redoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+static bool
+AllOnOnePage(uintptr_t start, int size)
+{
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+static CachePage*
+GetCachePageLocked(Simulator::ICacheMap& i_cache, void* page)
+{
+ MOZ_ASSERT(Simulator::ICacheCheckingEnabled);
+
+ Simulator::ICacheMap::AddPtr p = i_cache.lookupForAdd(page);
+ if (p)
+ return p->value();
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ CachePage* new_page = js_new<CachePage>();
+ if (!new_page || !i_cache.add(p, page, new_page))
+ oomUnsafe.crash("Simulator CachePage");
+
+ return new_page;
+}
+
+// Flush from start up to and not including start + size.
+static void
+FlushOnePageLocked(Simulator::ICacheMap& i_cache, intptr_t start, int size)
+{
+ MOZ_ASSERT(size <= CachePage::kPageSize);
+ MOZ_ASSERT(AllOnOnePage(start, size - 1));
+ MOZ_ASSERT((start & CachePage::kLineMask) == 0);
+ MOZ_ASSERT((size & CachePage::kLineMask) == 0);
+
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(i_cache, page);
+ char* valid_bytemap = cache_page->validityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+static void
+FlushICacheLocked(Simulator::ICacheMap& i_cache, void* start_addr, size_t size)
+{
+ intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+ int intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePageLocked(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ MOZ_ASSERT((start & CachePage::kPageMask) == 0);
+ offset = 0;
+ }
+ if (size != 0)
+ FlushOnePageLocked(i_cache, start, size);
+}
+
+static void
+CheckICacheLocked(Simulator::ICacheMap& i_cache, SimInstruction* instr)
+{
+ intptr_t address = reinterpret_cast<intptr_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(i_cache, page);
+ char* cache_valid_byte = cache_page->validityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->cachedData(offset & ~CachePage::kLineMask);
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ MOZ_ASSERT(memcmp(reinterpret_cast<void*>(instr),
+ cache_page->cachedData(offset),
+ SimInstruction::kInstrSize) == 0);
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+HashNumber
+Simulator::ICacheHasher::hash(const Lookup& l)
+{
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(l)) >> 2;
+}
+
+bool
+Simulator::ICacheHasher::match(const Key& k, const Lookup& l)
+{
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(k) & CachePage::kPageMask) == 0);
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(l) & CachePage::kPageMask) == 0);
+ return k == l;
+}
+
+void
+Simulator::setLastDebuggerInput(char* input)
+{
+ js_free(lastDebuggerInput_);
+ lastDebuggerInput_ = input;
+}
+
+void
+Simulator::FlushICache(void* start_addr, size_t size)
+{
+ JitSpewCont(JitSpew_CacheFlush, "[%p %" PRIxSIZE "]", start_addr, size);
+ if (Simulator::ICacheCheckingEnabled) {
+ Simulator* sim = Simulator::Current();
+
+ AutoLockSimulatorCache als(sim);
+
+ js::jit::FlushICacheLocked(sim->icache(), start_addr, size);
+ }
+}
+
+Simulator::Simulator(JSContext* cx)
+ : cx_(cx),
+ cacheLock_(mutexid::SimulatorCacheLock)
+{
+ // Set up simulator support first. Some of this information is needed to
+ // setup the architecture state.
+
+ // Note, allocation and anything that depends on allocated memory is
+ // deferred until init(), in order to handle OOM properly.
+
+ stack_ = nullptr;
+ stackLimit_ = 0;
+ pc_modified_ = false;
+ icount_ = 0L;
+ resume_pc_ = 0;
+ break_pc_ = nullptr;
+ break_instr_ = 0;
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+ skipCalleeSavedRegsCheck = false;
+
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < num_registers; i++)
+ registers_[i] = 0;
+
+ n_flag_ = false;
+ z_flag_ = false;
+ c_flag_ = false;
+ v_flag_ = false;
+
+ for (int i = 0; i < num_d_registers * 2; i++)
+ vfp_registers_[i] = 0;
+
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = false;
+ v_flag_FPSCR_ = false;
+ FPSCR_rounding_mode_ = SimRZ;
+ FPSCR_default_NaN_mode_ = true;
+
+ inv_op_vfp_flag_ = false;
+ div_zero_vfp_flag_ = false;
+ overflow_vfp_flag_ = false;
+ underflow_vfp_flag_ = false;
+ inexact_vfp_flag_ = false;
+
+ // The lr and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_lr;
+ registers_[lr] = bad_lr;
+
+ lastDebuggerInput_ = nullptr;
+
+ redirection_ = nullptr;
+ exclusiveMonitorHeld_ = false;
+ exclusiveMonitor_ = 0;
+}
+
+bool
+Simulator::init()
+{
+ if (!icache_.init())
+ return false;
+
+ // Allocate 2MB for the stack. Note that we will only use 1MB, see below.
+ static const size_t stackSize = 2 * 1024*1024;
+ stack_ = reinterpret_cast<char*>(js_malloc(stackSize));
+ if (!stack_)
+ return false;
+
+ // Leave a safety margin of 1MB to prevent overrunning the stack when
+ // pushing values (total stack size is 2MB).
+ stackLimit_ = reinterpret_cast<uintptr_t>(stack_) + 1024 * 1024;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int32_t>(stack_) + stackSize - 64;
+
+ return true;
+}
+
+// When the generated code calls a VM function (masm.callWithABI) we need to
+// call that function instead of trying to execute it with the simulator
+// (because it's x86 code instead of arm code). We do that by redirecting the VM
+// call to a svc (Supervisor Call) instruction that is handled by the
+// simulator. We write the original destination of the jump just at a known
+// offset from the svc instruction so the simulator knows what to call.
+class Redirection
+{
+ friend class Simulator;
+
+ // sim's lock must already be held.
+ Redirection(void* nativeFunction, ABIFunctionType type, Simulator* sim)
+ : nativeFunction_(nativeFunction),
+ swiInstruction_(Assembler::AL | (0xf * (1 << 24)) | kCallRtRedirected),
+ type_(type),
+ next_(nullptr)
+ {
+ next_ = sim->redirection();
+ if (Simulator::ICacheCheckingEnabled)
+ FlushICacheLocked(sim->icache(), addressOfSwiInstruction(), SimInstruction::kInstrSize);
+ sim->setRedirection(this);
+ }
+
+ public:
+ void* addressOfSwiInstruction() { return &swiInstruction_; }
+ void* nativeFunction() const { return nativeFunction_; }
+ ABIFunctionType type() const { return type_; }
+
+ static Redirection* Get(void* nativeFunction, ABIFunctionType type) {
+ Simulator* sim = Simulator::Current();
+
+ AutoLockSimulatorCache als(sim);
+
+ Redirection* current = sim->redirection();
+ for (; current != nullptr; current = current->next_) {
+ if (current->nativeFunction_ == nativeFunction) {
+ MOZ_ASSERT(current->type() == type);
+ return current;
+ }
+ }
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ Redirection* redir = (Redirection*)js_malloc(sizeof(Redirection));
+ if (!redir)
+ oomUnsafe.crash("Simulator redirection");
+ new(redir) Redirection(nativeFunction, type, sim);
+ return redir;
+ }
+
+ static Redirection* FromSwiInstruction(SimInstruction* swiInstruction) {
+ uint8_t* addrOfSwi = reinterpret_cast<uint8_t*>(swiInstruction);
+ uint8_t* addrOfRedirection = addrOfSwi - offsetof(Redirection, swiInstruction_);
+ return reinterpret_cast<Redirection*>(addrOfRedirection);
+ }
+
+ private:
+ void* nativeFunction_;
+ uint32_t swiInstruction_;
+ ABIFunctionType type_;
+ Redirection* next_;
+};
+
+Simulator::~Simulator()
+{
+ js_free(stack_);
+ Redirection* r = redirection_;
+ while (r) {
+ Redirection* next = r->next_;
+ js_delete(r);
+ r = next;
+ }
+}
+
+/* static */ void*
+Simulator::RedirectNativeFunction(void* nativeFunction, ABIFunctionType type)
+{
+ Redirection* redirection = Redirection::Get(nativeFunction, type);
+ return redirection->addressOfSwiInstruction();
+}
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void
+Simulator::set_register(int reg, int32_t value)
+{
+ MOZ_ASSERT(reg >= 0 && reg < num_registers);
+ if (reg == pc)
+ pc_modified_ = true;
+ registers_[reg] = value;
+}
+
+// Get the register from the architecture state. This function does handle the
+// special case of accessing the PC register.
+int32_t
+Simulator::get_register(int reg) const
+{
+ MOZ_ASSERT(reg >= 0 && reg < num_registers);
+ // Work around GCC bug: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
+ if (reg >= num_registers) return 0;
+ return registers_[reg] + ((reg == pc) ? SimInstruction::kPCReadOffset : 0);
+}
+
+double
+Simulator::get_double_from_register_pair(int reg)
+{
+ MOZ_ASSERT(reg >= 0 && reg < num_registers && (reg % 2) == 0);
+
+ // Read the bits from the unsigned integer register_[] array into the double
+ // precision floating point value and return it.
+ double dm_val = 0.0;
+ char buffer[2 * sizeof(vfp_registers_[0])];
+ memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
+ memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+ return dm_val;
+}
+
+void
+Simulator::set_register_pair_from_double(int reg, double* value)
+{
+ MOZ_ASSERT(reg >= 0 && reg < num_registers && (reg % 2) == 0);
+ memcpy(registers_ + reg, value, sizeof(*value));
+}
+
+void
+Simulator::set_dw_register(int dreg, const int* dbl)
+{
+ MOZ_ASSERT(dreg >= 0 && dreg < num_d_registers);
+ registers_[dreg] = dbl[0];
+ registers_[dreg + 1] = dbl[1];
+}
+
+void
+Simulator::get_d_register(int dreg, uint64_t* value)
+{
+ MOZ_ASSERT(dreg >= 0 && dreg < int(FloatRegisters::TotalPhys));
+ memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value));
+}
+
+void
+Simulator::set_d_register(int dreg, const uint64_t* value)
+{
+ MOZ_ASSERT(dreg >= 0 && dreg < int(FloatRegisters::TotalPhys));
+ memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value));
+}
+
+void
+Simulator::get_d_register(int dreg, uint32_t* value)
+{
+ MOZ_ASSERT(dreg >= 0 && dreg < int(FloatRegisters::TotalPhys));
+ memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value) * 2);
+}
+
+void
+Simulator::set_d_register(int dreg, const uint32_t* value)
+{
+ MOZ_ASSERT(dreg >= 0 && dreg < int(FloatRegisters::TotalPhys));
+ memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value) * 2);
+}
+
+void
+Simulator::get_q_register(int qreg, uint64_t* value)
+{
+ MOZ_ASSERT(qreg >= 0 && qreg < num_q_registers);
+ memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 2);
+}
+
+void
+Simulator::set_q_register(int qreg, const uint64_t* value)
+{
+ MOZ_ASSERT(qreg >= 0 && qreg < num_q_registers);
+ memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 2);
+}
+
+void
+Simulator::get_q_register(int qreg, uint32_t* value)
+{
+ MOZ_ASSERT(qreg >= 0 && qreg < num_q_registers);
+ memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 4);
+}
+
+void
+Simulator::set_q_register(int qreg, const uint32_t* value)
+{
+ MOZ_ASSERT((qreg >= 0) && (qreg < num_q_registers));
+ memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 4);
+}
+
+void
+Simulator::set_pc(int32_t value)
+{
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+bool
+Simulator::has_bad_pc() const
+{
+ return registers_[pc] == bad_lr || registers_[pc] == end_sim_pc;
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int32_t
+Simulator::get_pc() const
+{
+ return registers_[pc];
+}
+
+void
+Simulator::set_s_register(int sreg, unsigned int value)
+{
+ MOZ_ASSERT(sreg >= 0 && sreg < num_s_registers);
+ vfp_registers_[sreg] = value;
+}
+
+unsigned
+Simulator::get_s_register(int sreg) const
+{
+ MOZ_ASSERT(sreg >= 0 && sreg < num_s_registers);
+ return vfp_registers_[sreg];
+}
+
+template<class InputType, int register_size>
+void
+Simulator::setVFPRegister(int reg_index, const InputType& value)
+{
+ MOZ_ASSERT(reg_index >= 0);
+ MOZ_ASSERT_IF(register_size == 1, reg_index < num_s_registers);
+ MOZ_ASSERT_IF(register_size == 2, reg_index < int(FloatRegisters::TotalPhys));
+
+ char buffer[register_size * sizeof(vfp_registers_[0])];
+ memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
+ memcpy(&vfp_registers_[reg_index * register_size], buffer,
+ register_size * sizeof(vfp_registers_[0]));
+}
+
+template<class ReturnType, int register_size>
+void Simulator::getFromVFPRegister(int reg_index, ReturnType* out)
+{
+ MOZ_ASSERT(reg_index >= 0);
+ MOZ_ASSERT_IF(register_size == 1, reg_index < num_s_registers);
+ MOZ_ASSERT_IF(register_size == 2, reg_index < int(FloatRegisters::TotalPhys));
+
+ char buffer[register_size * sizeof(vfp_registers_[0])];
+ memcpy(buffer, &vfp_registers_[register_size * reg_index],
+ register_size * sizeof(vfp_registers_[0]));
+ memcpy(out, buffer, register_size * sizeof(vfp_registers_[0]));
+}
+
+// These forced-instantiations are for jsapi-tests. Evidently, nothing
+// requires these to be instantiated.
+template void Simulator::getFromVFPRegister<double, 2>(int reg_index, double* out);
+template void Simulator::getFromVFPRegister<float, 1>(int reg_index, float* out);
+template void Simulator::setVFPRegister<double, 2>(int reg_index, const double& value);
+template void Simulator::setVFPRegister<float, 1>(int reg_index, const float& value);
+
+void
+Simulator::getFpArgs(double* x, double* y, int32_t* z)
+{
+ if (UseHardFpABI()) {
+ get_double_from_d_register(0, x);
+ get_double_from_d_register(1, y);
+ *z = get_register(0);
+ } else {
+ *x = get_double_from_register_pair(0);
+ *y = get_double_from_register_pair(2);
+ *z = get_register(2);
+ }
+}
+
+void
+Simulator::getFpFromStack(int32_t* stack, double* x)
+{
+ MOZ_ASSERT(stack && x);
+ char buffer[2 * sizeof(stack[0])];
+ memcpy(buffer, stack, 2 * sizeof(stack[0]));
+ memcpy(x, buffer, 2 * sizeof(stack[0]));
+}
+
+void
+Simulator::setCallResultDouble(double result)
+{
+ // The return value is either in r0/r1 or d0.
+ if (UseHardFpABI()) {
+ char buffer[2 * sizeof(vfp_registers_[0])];
+ memcpy(buffer, &result, sizeof(buffer));
+ // Copy result to d0.
+ memcpy(vfp_registers_, buffer, sizeof(buffer));
+ } else {
+ char buffer[2 * sizeof(registers_[0])];
+ memcpy(buffer, &result, sizeof(buffer));
+ // Copy result to r0 and r1.
+ memcpy(registers_, buffer, sizeof(buffer));
+ }
+}
+
+void
+Simulator::setCallResultFloat(float result)
+{
+ if (UseHardFpABI()) {
+ char buffer[sizeof(registers_[0])];
+ memcpy(buffer, &result, sizeof(buffer));
+ // Copy result to s0.
+ memcpy(vfp_registers_, buffer, sizeof(buffer));
+ } else {
+ char buffer[sizeof(registers_[0])];
+ memcpy(buffer, &result, sizeof(buffer));
+ // Copy result to r0.
+ memcpy(registers_, buffer, sizeof(buffer));
+ }
+}
+
+void
+Simulator::setCallResult(int64_t res)
+{
+ set_register(r0, static_cast<int32_t>(res));
+ set_register(r1, static_cast<int32_t>(res >> 32));
+}
+
+void
+Simulator::exclusiveMonitorSet(uint64_t value)
+{
+ exclusiveMonitor_ = value;
+ exclusiveMonitorHeld_ = true;
+}
+
+uint64_t
+Simulator::exclusiveMonitorGetAndClear(bool* held)
+{
+ *held = exclusiveMonitorHeld_;
+ exclusiveMonitorHeld_ = false;
+ return *held ? exclusiveMonitor_ : 0;
+}
+
+void
+Simulator::exclusiveMonitorClear()
+{
+ exclusiveMonitorHeld_ = false;
+}
+
+// WebAssembly memories contain an extra region of guard pages (see
+// WasmArrayRawBuffer comment). The guard pages catch out-of-bounds accesses
+// using a signal handler that redirects PC to a stub that safely reports an
+// error. However, if the handler is hit by the simulator, the PC is in C++ code
+// and cannot be redirected. Therefore, we must avoid hitting the handler by
+// redirecting in the simulator before the real handler would have been hit.
+bool
+Simulator::handleWasmFault(int32_t addr, unsigned numBytes)
+{
+ WasmActivation* act = cx_->wasmActivationStack();
+ if (!act)
+ return false;
+
+ void* pc = reinterpret_cast<void*>(get_pc());
+ wasm::Instance* instance = act->compartment()->wasm.lookupInstanceDeprecated(pc);
+ if (!instance || !instance->memoryAccessInGuardRegion((uint8_t*)addr, numBytes))
+ return false;
+
+ const wasm::MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
+ if (!memoryAccess) {
+ set_pc(int32_t(instance->codeSegment().outOfBoundsCode()));
+ return true;
+ }
+
+ MOZ_ASSERT(memoryAccess->hasTrapOutOfLineCode());
+ set_pc(int32_t(memoryAccess->trapOutOfLineCode(instance->codeBase())));
+ return true;
+}
+
+uint64_t
+Simulator::readQ(int32_t addr, SimInstruction* instr, UnalignedPolicy f)
+{
+ if (handleWasmFault(addr, 8))
+ return -1;
+
+ if ((addr & 3) == 0 || (f == AllowUnaligned && !HasAlignmentFault())) {
+ uint64_t* ptr = reinterpret_cast<uint64_t*>(addr);
+ return *ptr;
+ }
+
+ // See the comments below in readW.
+ if (FixupFault() && wasm::IsPCInWasmCode(reinterpret_cast<void *>(get_pc()))) {
+ char* ptr = reinterpret_cast<char*>(addr);
+ uint64_t value;
+ memcpy(&value, ptr, sizeof(value));
+ return value;
+ }
+
+ printf("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
+ MOZ_CRASH();
+}
+
+void
+Simulator::writeQ(int32_t addr, uint64_t value, SimInstruction* instr, UnalignedPolicy f)
+{
+ if (handleWasmFault(addr, 8))
+ return;
+
+ if ((addr & 3) == 0 || (f == AllowUnaligned && !HasAlignmentFault())) {
+ uint64_t* ptr = reinterpret_cast<uint64_t*>(addr);
+ *ptr = value;
+ return;
+ }
+
+ // See the comments below in readW.
+ if (FixupFault() && wasm::IsPCInWasmCode(reinterpret_cast<void *>(get_pc()))) {
+ char* ptr = reinterpret_cast<char*>(addr);
+ memcpy(ptr, &value, sizeof(value));
+ return;
+ }
+
+ printf("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
+ MOZ_CRASH();
+}
+
+int
+Simulator::readW(int32_t addr, SimInstruction* instr, UnalignedPolicy f)
+{
+ if (handleWasmFault(addr, 4))
+ return -1;
+
+ if ((addr & 3) == 0 || (f == AllowUnaligned && !HasAlignmentFault())) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ return *ptr;
+ }
+
+ // In WebAssembly, we want unaligned accesses to either raise a signal or
+ // do the right thing. Making this simulator properly emulate the behavior
+ // of raising a signal is complex, so as a special-case, when in wasm code,
+ // we just do the right thing.
+ if (FixupFault() && wasm::IsPCInWasmCode(reinterpret_cast<void *>(get_pc()))) {
+ char* ptr = reinterpret_cast<char*>(addr);
+ int value;
+ memcpy(&value, ptr, sizeof(value));
+ return value;
+ }
+
+ printf("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
+ MOZ_CRASH();
+}
+
+void
+Simulator::writeW(int32_t addr, int value, SimInstruction* instr, UnalignedPolicy f)
+{
+ if (handleWasmFault(addr, 4))
+ return;
+
+ if ((addr & 3) == 0 || (f == AllowUnaligned && !HasAlignmentFault())) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ *ptr = value;
+ return;
+ }
+
+ // See the comments above in readW.
+ if (FixupFault() && wasm::IsPCInWasmCode(reinterpret_cast<void *>(get_pc()))) {
+ char* ptr = reinterpret_cast<char*>(addr);
+ memcpy(ptr, &value, sizeof(value));
+ return;
+ }
+
+ printf("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
+ MOZ_CRASH();
+}
+
+// For the time being, define Relaxed operations in terms of SeqCst
+// operations - we don't yet need Relaxed operations anywhere else in
+// the system, and the distinction is not important to the simulation
+// at the level where we're operating.
+
+template<typename T>
+static
+T loadRelaxed(SharedMem<T*> addr)
+{
+ return AtomicOperations::loadSeqCst(addr);
+}
+
+template<typename T>
+static
+T compareExchangeRelaxed(SharedMem<T*> addr, T oldval, T newval)
+{
+ return AtomicOperations::compareExchangeSeqCst(addr, oldval, newval);
+}
+
+int
+Simulator::readExW(int32_t addr, SimInstruction* instr)
+{
+ // The regexp engine emits unaligned loads, so we don't check for them here
+ // like most of the other methods do.
+ if ((addr & 3) == 0 || !HasAlignmentFault()) {
+ SharedMem<int32_t*> ptr = SharedMem<int32_t*>::shared(reinterpret_cast<int32_t*>(addr));
+ int32_t value = loadRelaxed(ptr);
+ exclusiveMonitorSet(value);
+ return value;
+ } else {
+ printf("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
+ MOZ_CRASH();
+ }
+}
+
+int32_t
+Simulator::writeExW(int32_t addr, int value, SimInstruction* instr)
+{
+ if ((addr & 3) == 0) {
+ SharedMem<int32_t*> ptr = SharedMem<int32_t*>::shared(reinterpret_cast<int32_t*>(addr));
+ bool held;
+ int32_t expected = int32_t(exclusiveMonitorGetAndClear(&held));
+ if (!held)
+ return 1;
+ int32_t old = compareExchangeRelaxed(ptr, expected, int32_t(value));
+ return old != expected;
+ }
+
+ printf("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
+ MOZ_CRASH();
+}
+
+uint16_t
+Simulator::readHU(int32_t addr, SimInstruction* instr)
+{
+ if (handleWasmFault(addr, 2))
+ return UINT16_MAX;
+
+ // The regexp engine emits unaligned loads, so we don't check for them here
+ // like most of the other methods do.
+ if ((addr & 1) == 0 || !HasAlignmentFault()) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ return *ptr;
+ }
+
+ // See comments above in readW.
+ if (FixupFault() && wasm::IsPCInWasmCode(reinterpret_cast<void *>(get_pc()))) {
+ char* ptr = reinterpret_cast<char*>(addr);
+ uint16_t value;
+ memcpy(&value, ptr, sizeof(value));
+ return value;
+ }
+
+ printf("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr);
+ MOZ_CRASH();
+ return 0;
+}
+
+int16_t
+Simulator::readH(int32_t addr, SimInstruction* instr)
+{
+ if (handleWasmFault(addr, 2))
+ return -1;
+
+ if ((addr & 1) == 0 || !HasAlignmentFault()) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ return *ptr;
+ }
+
+ // See comments above in readW.
+ if (FixupFault() && wasm::IsPCInWasmCode(reinterpret_cast<void *>(get_pc()))) {
+ char* ptr = reinterpret_cast<char*>(addr);
+ int16_t value;
+ memcpy(&value, ptr, sizeof(value));
+ return value;
+ }
+
+ printf("Unaligned signed halfword read at 0x%08x\n", addr);
+ MOZ_CRASH();
+ return 0;
+}
+
+void
+Simulator::writeH(int32_t addr, uint16_t value, SimInstruction* instr)
+{
+ if (handleWasmFault(addr, 2))
+ return;
+
+ if ((addr & 1) == 0 || !HasAlignmentFault()) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ *ptr = value;
+ return;
+ }
+
+ // See the comments above in readW.
+ if (FixupFault() && wasm::IsPCInWasmCode(reinterpret_cast<void *>(get_pc()))) {
+ char* ptr = reinterpret_cast<char*>(addr);
+ memcpy(ptr, &value, sizeof(value));
+ return;
+ }
+
+ printf("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr);
+ MOZ_CRASH();
+}
+
+void
+Simulator::writeH(int32_t addr, int16_t value, SimInstruction* instr)
+{
+ if (handleWasmFault(addr, 2))
+ return;
+
+ if ((addr & 1) == 0 || !HasAlignmentFault()) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ *ptr = value;
+ return;
+ }
+
+ // See the comments above in readW.
+ if (FixupFault() && wasm::IsPCInWasmCode(reinterpret_cast<void *>(get_pc()))) {
+ char* ptr = reinterpret_cast<char*>(addr);
+ memcpy(ptr, &value, sizeof(value));
+ return;
+ }
+
+ printf("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr);
+ MOZ_CRASH();
+}
+
+uint16_t
+Simulator::readExHU(int32_t addr, SimInstruction* instr)
+{
+ // The regexp engine emits unaligned loads, so we don't check for them here
+ // like most of the other methods do.
+ if ((addr & 1) == 0 || !HasAlignmentFault()) {
+ SharedMem<uint16_t*> ptr = SharedMem<uint16_t*>::shared(reinterpret_cast<uint16_t*>(addr));
+ uint16_t value = loadRelaxed(ptr);
+ exclusiveMonitorSet(value);
+ return value;
+ }
+ printf("Unaligned atomic unsigned halfword read at 0x%08x, pc=%p\n", addr, instr);
+ MOZ_CRASH();
+ return 0;
+}
+
+int32_t
+Simulator::writeExH(int32_t addr, uint16_t value, SimInstruction* instr)
+{
+ if ((addr & 1) == 0) {
+ SharedMem<uint16_t*> ptr = SharedMem<uint16_t*>::shared(reinterpret_cast<uint16_t*>(addr));
+ bool held;
+ uint16_t expected = uint16_t(exclusiveMonitorGetAndClear(&held));
+ if (!held)
+ return 1;
+ uint16_t old = compareExchangeRelaxed(ptr, expected, value);
+ return old != expected;
+ } else {
+ printf("Unaligned atomic unsigned halfword write at 0x%08x, pc=%p\n", addr, instr);
+ MOZ_CRASH();
+ }
+}
+
+uint8_t
+Simulator::readBU(int32_t addr)
+{
+ if (handleWasmFault(addr, 1))
+ return UINT8_MAX;
+
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ return *ptr;
+}
+
+uint8_t
+Simulator::readExBU(int32_t addr)
+{
+ SharedMem<uint8_t*> ptr = SharedMem<uint8_t*>::shared(reinterpret_cast<uint8_t*>(addr));
+ uint8_t value = loadRelaxed(ptr);
+ exclusiveMonitorSet(value);
+ return value;
+}
+
+int32_t
+Simulator::writeExB(int32_t addr, uint8_t value)
+{
+ SharedMem<uint8_t*> ptr = SharedMem<uint8_t*>::shared(reinterpret_cast<uint8_t*>(addr));
+ bool held;
+ uint8_t expected = uint8_t(exclusiveMonitorGetAndClear(&held));
+ if (!held)
+ return 1;
+ uint8_t old = compareExchangeRelaxed(ptr, expected, value);
+ return old != expected;
+}
+
+int8_t
+Simulator::readB(int32_t addr)
+{
+ if (handleWasmFault(addr, 1))
+ return -1;
+
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ return *ptr;
+}
+
+void
+Simulator::writeB(int32_t addr, uint8_t value)
+{
+ if (handleWasmFault(addr, 1))
+ return;
+
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ *ptr = value;
+}
+
+void
+Simulator::writeB(int32_t addr, int8_t value)
+{
+ if (handleWasmFault(addr, 1))
+ return;
+
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ *ptr = value;
+}
+
+int32_t*
+Simulator::readDW(int32_t addr)
+{
+ if ((addr & 3) == 0) {
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ return ptr;
+ }
+ printf("Unaligned read at 0x%08x\n", addr);
+ MOZ_CRASH();
+ return 0;
+}
+
+void
+Simulator::writeDW(int32_t addr, int32_t value1, int32_t value2)
+{
+ if ((addr & 3) == 0) {
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ *ptr++ = value1;
+ *ptr = value2;
+ } else {
+ printf("Unaligned write at 0x%08x\n", addr);
+ MOZ_CRASH();
+ }
+}
+
+int32_t
+Simulator::readExDW(int32_t addr, int32_t* hibits)
+{
+#if defined(__clang__) && defined(__i386)
+ // This is OK for now, we don't yet generate LDREXD.
+ MOZ_CRASH("Unimplemented - 8-byte atomics are unsupported in Clang on i386");
+#else
+ if ((addr & 3) == 0) {
+ SharedMem<uint64_t*> ptr = SharedMem<uint64_t*>::shared(reinterpret_cast<uint64_t*>(addr));
+ uint64_t value = loadRelaxed(ptr);
+ exclusiveMonitorSet(value);
+ *hibits = int32_t(value);
+ return int32_t(value >> 32);
+ }
+ printf("Unaligned read at 0x%08x\n", addr);
+ MOZ_CRASH();
+ return 0;
+#endif
+}
+
+int32_t
+Simulator::writeExDW(int32_t addr, int32_t value1, int32_t value2)
+{
+#if defined(__clang__) && defined(__i386)
+ // This is OK for now, we don't yet generate STREXD.
+ MOZ_CRASH("Unimplemented - 8-byte atomics are unsupported in Clang on i386");
+#else
+ if ((addr & 3) == 0) {
+ SharedMem<uint64_t*> ptr = SharedMem<uint64_t*>::shared(reinterpret_cast<uint64_t*>(addr));
+ uint64_t value = (uint64_t(value1) << 32) | uint32_t(value2);
+ bool held;
+ uint64_t expected = exclusiveMonitorGetAndClear(&held);
+ if (!held)
+ return 1;
+ uint64_t old = compareExchangeRelaxed(ptr, expected, value);
+ return old != expected;
+ } else {
+ printf("Unaligned write at 0x%08x\n", addr);
+ MOZ_CRASH();
+ }
+#endif
+}
+
+uintptr_t
+Simulator::stackLimit() const
+{
+ return stackLimit_;
+}
+
+uintptr_t*
+Simulator::addressOfStackLimit()
+{
+ return &stackLimit_;
+}
+
+bool
+Simulator::overRecursed(uintptr_t newsp) const
+{
+ if (newsp == 0)
+ newsp = get_register(sp);
+ return newsp <= stackLimit();
+}
+
+bool
+Simulator::overRecursedWithExtra(uint32_t extra) const
+{
+ uintptr_t newsp = get_register(sp) - extra;
+ return newsp <= stackLimit();
+}
+
+// Checks if the current instruction should be executed based on its condition
+// bits.
+bool
+Simulator::conditionallyExecute(SimInstruction* instr)
+{
+ switch (instr->conditionField()) {
+ case Assembler::EQ: return z_flag_;
+ case Assembler::NE: return !z_flag_;
+ case Assembler::CS: return c_flag_;
+ case Assembler::CC: return !c_flag_;
+ case Assembler::MI: return n_flag_;
+ case Assembler::PL: return !n_flag_;
+ case Assembler::VS: return v_flag_;
+ case Assembler::VC: return !v_flag_;
+ case Assembler::HI: return c_flag_ && !z_flag_;
+ case Assembler::LS: return !c_flag_ || z_flag_;
+ case Assembler::GE: return n_flag_ == v_flag_;
+ case Assembler::LT: return n_flag_ != v_flag_;
+ case Assembler::GT: return !z_flag_ && (n_flag_ == v_flag_);
+ case Assembler::LE: return z_flag_ || (n_flag_ != v_flag_);
+ case Assembler::AL: return true;
+ default: MOZ_CRASH();
+ }
+ return false;
+}
+
+// Calculate and set the Negative and Zero flags.
+void
+Simulator::setNZFlags(int32_t val)
+{
+ n_flag_ = (val < 0);
+ z_flag_ = (val == 0);
+}
+
+// Set the Carry flag.
+void
+Simulator::setCFlag(bool val)
+{
+ c_flag_ = val;
+}
+
+// Set the oVerflow flag.
+void
+Simulator::setVFlag(bool val)
+{
+ v_flag_ = val;
+}
+
+// Calculate C flag value for additions.
+bool
+Simulator::carryFrom(int32_t left, int32_t right, int32_t carry)
+{
+ uint32_t uleft = static_cast<uint32_t>(left);
+ uint32_t uright = static_cast<uint32_t>(right);
+ uint32_t urest = 0xffffffffU - uleft;
+ return (uright > urest) ||
+ (carry && (((uright + 1) > urest) || (uright > (urest - 1))));
+}
+
+// Calculate C flag value for subtractions.
+bool
+Simulator::borrowFrom(int32_t left, int32_t right)
+{
+ uint32_t uleft = static_cast<uint32_t>(left);
+ uint32_t uright = static_cast<uint32_t>(right);
+ return (uright > uleft);
+}
+
+// Calculate V flag value for additions and subtractions.
+bool
+Simulator::overflowFrom(int32_t alu_out, int32_t left, int32_t right, bool addition)
+{
+ bool overflow;
+ if (addition) {
+ // Operands have the same sign.
+ overflow = ((left >= 0 && right >= 0) || (left < 0 && right < 0))
+ // And operands and result have different sign.
+ && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+ } else {
+ // Operands have different signs.
+ overflow = ((left < 0 && right >= 0) || (left >= 0 && right < 0))
+ // And first operand and result have different signs.
+ && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+ }
+ return overflow;
+}
+
+// Support for VFP comparisons.
+void
+Simulator::compute_FPSCR_Flags(double val1, double val2)
+{
+ if (mozilla::IsNaN(val1) || mozilla::IsNaN(val2)) {
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = true;
+ v_flag_FPSCR_ = true;
+ // All non-NaN cases.
+ } else if (val1 == val2) {
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = true;
+ c_flag_FPSCR_ = true;
+ v_flag_FPSCR_ = false;
+ } else if (val1 < val2) {
+ n_flag_FPSCR_ = true;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = false;
+ v_flag_FPSCR_ = false;
+ } else {
+ // Case when (val1 > val2).
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = true;
+ v_flag_FPSCR_ = false;
+ }
+}
+
+void
+Simulator::copy_FPSCR_to_APSR()
+{
+ n_flag_ = n_flag_FPSCR_;
+ z_flag_ = z_flag_FPSCR_;
+ c_flag_ = c_flag_FPSCR_;
+ v_flag_ = v_flag_FPSCR_;
+}
+
+// Addressing Mode 1 - Data-processing operands:
+// Get the value based on the shifter_operand with register.
+int32_t
+Simulator::getShiftRm(SimInstruction* instr, bool* carry_out)
+{
+ ShiftType shift = instr->shifttypeValue();
+ int shift_amount = instr->shiftAmountValue();
+ int32_t result = get_register(instr->rmValue());
+ if (instr->bit(4) == 0) {
+ // By immediate.
+ if (shift == ROR && shift_amount == 0) {
+ MOZ_CRASH("NYI");
+ return result;
+ }
+ if ((shift == LSR || shift == ASR) && shift_amount == 0)
+ shift_amount = 32;
+ switch (shift) {
+ case ASR: {
+ if (shift_amount == 0) {
+ if (result < 0) {
+ result = 0xffffffff;
+ *carry_out = true;
+ } else {
+ result = 0;
+ *carry_out = false;
+ }
+ } else {
+ result >>= (shift_amount - 1);
+ *carry_out = (result & 1) == 1;
+ result >>= 1;
+ }
+ break;
+ }
+
+ case LSL: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else {
+ result <<= (shift_amount - 1);
+ *carry_out = (result < 0);
+ result <<= 1;
+ }
+ break;
+ }
+
+ case LSR: {
+ if (shift_amount == 0) {
+ result = 0;
+ *carry_out = c_flag_;
+ } else {
+ uint32_t uresult = static_cast<uint32_t>(result);
+ uresult >>= (shift_amount - 1);
+ *carry_out = (uresult & 1) == 1;
+ uresult >>= 1;
+ result = static_cast<int32_t>(uresult);
+ }
+ break;
+ }
+
+ case ROR: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else {
+ uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
+ uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
+ result = right | left;
+ *carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
+ }
+ break;
+ }
+
+ default:
+ MOZ_CRASH();
+ }
+ } else {
+ // By register.
+ int rs = instr->rsValue();
+ shift_amount = get_register(rs) &0xff;
+ switch (shift) {
+ case ASR: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else if (shift_amount < 32) {
+ result >>= (shift_amount - 1);
+ *carry_out = (result & 1) == 1;
+ result >>= 1;
+ } else {
+ MOZ_ASSERT(shift_amount >= 32);
+ if (result < 0) {
+ *carry_out = true;
+ result = 0xffffffff;
+ } else {
+ *carry_out = false;
+ result = 0;
+ }
+ }
+ break;
+ }
+
+ case LSL: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else if (shift_amount < 32) {
+ result <<= (shift_amount - 1);
+ *carry_out = (result < 0);
+ result <<= 1;
+ } else if (shift_amount == 32) {
+ *carry_out = (result & 1) == 1;
+ result = 0;
+ } else {
+ MOZ_ASSERT(shift_amount > 32);
+ *carry_out = false;
+ result = 0;
+ }
+ break;
+ }
+
+ case LSR: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else if (shift_amount < 32) {
+ uint32_t uresult = static_cast<uint32_t>(result);
+ uresult >>= (shift_amount - 1);
+ *carry_out = (uresult & 1) == 1;
+ uresult >>= 1;
+ result = static_cast<int32_t>(uresult);
+ } else if (shift_amount == 32) {
+ *carry_out = (result < 0);
+ result = 0;
+ } else {
+ *carry_out = false;
+ result = 0;
+ }
+ break;
+ }
+
+ case ROR: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else {
+ uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
+ uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
+ result = right | left;
+ *carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
+ }
+ break;
+ }
+
+ default:
+ MOZ_CRASH();
+ }
+ }
+ return result;
+}
+
+// Addressing Mode 1 - Data-processing operands:
+// Get the value based on the shifter_operand with immediate.
+int32_t
+Simulator::getImm(SimInstruction* instr, bool* carry_out)
+{
+ int rotate = instr->rotateValue() * 2;
+ int immed8 = instr->immed8Value();
+ int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
+ *carry_out = (rotate == 0) ? c_flag_ : (imm < 0);
+ return imm;
+}
+
+int32_t
+Simulator::processPU(SimInstruction* instr, int num_regs, int reg_size,
+ intptr_t* start_address, intptr_t* end_address)
+{
+ int rn = instr->rnValue();
+ int32_t rn_val = get_register(rn);
+ switch (instr->PUField()) {
+ case da_x:
+ MOZ_CRASH();
+ break;
+ case ia_x:
+ *start_address = rn_val;
+ *end_address = rn_val + (num_regs * reg_size) - reg_size;
+ rn_val = rn_val + (num_regs * reg_size);
+ break;
+ case db_x:
+ *start_address = rn_val - (num_regs * reg_size);
+ *end_address = rn_val - reg_size;
+ rn_val = *start_address;
+ break;
+ case ib_x:
+ *start_address = rn_val + reg_size;
+ *end_address = rn_val + (num_regs * reg_size);
+ rn_val = *end_address;
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ return rn_val;
+}
+
+// Addressing Mode 4 - Load and Store Multiple
+void
+Simulator::handleRList(SimInstruction* instr, bool load)
+{
+ int rlist = instr->rlistValue();
+ int num_regs = mozilla::CountPopulation32(rlist);
+
+ intptr_t start_address = 0;
+ intptr_t end_address = 0;
+ int32_t rn_val = processPU(instr, num_regs, sizeof(void*), &start_address, &end_address);
+ intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
+
+ // Catch null pointers a little earlier.
+ MOZ_ASSERT(start_address > 8191 || start_address < 0);
+
+ int reg = 0;
+ while (rlist != 0) {
+ if ((rlist & 1) != 0) {
+ if (load) {
+ set_register(reg, *address);
+ } else {
+ *address = get_register(reg);
+ }
+ address += 1;
+ }
+ reg++;
+ rlist >>= 1;
+ }
+ MOZ_ASSERT(end_address == ((intptr_t)address) - 4);
+ if (instr->hasW())
+ set_register(instr->rnValue(), rn_val);
+}
+
+// Addressing Mode 6 - Load and Store Multiple Coprocessor registers.
+void
+Simulator::handleVList(SimInstruction* instr)
+{
+ VFPRegPrecision precision = (instr->szValue() == 0) ? kSinglePrecision : kDoublePrecision;
+ int operand_size = (precision == kSinglePrecision) ? 4 : 8;
+ bool load = (instr->VLValue() == 0x1);
+
+ int vd;
+ int num_regs;
+ vd = instr->VFPDRegValue(precision);
+ if (precision == kSinglePrecision)
+ num_regs = instr->immed8Value();
+ else
+ num_regs = instr->immed8Value() / 2;
+
+ intptr_t start_address = 0;
+ intptr_t end_address = 0;
+ int32_t rn_val = processPU(instr, num_regs, operand_size, &start_address, &end_address);
+
+ intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
+ for (int reg = vd; reg < vd + num_regs; reg++) {
+ if (precision == kSinglePrecision) {
+ if (load)
+ set_s_register_from_sinteger(reg, readW(reinterpret_cast<int32_t>(address), instr));
+ else
+ writeW(reinterpret_cast<int32_t>(address), get_sinteger_from_s_register(reg), instr);
+ address += 1;
+ } else {
+ if (load) {
+ int32_t data[] = {
+ readW(reinterpret_cast<int32_t>(address), instr),
+ readW(reinterpret_cast<int32_t>(address + 1), instr)
+ };
+ double d;
+ memcpy(&d, data, 8);
+ set_d_register_from_double(reg, d);
+ } else {
+ int32_t data[2];
+ double d;
+ get_double_from_d_register(reg, &d);
+ memcpy(data, &d, 8);
+ writeW(reinterpret_cast<int32_t>(address), data[0], instr);
+ writeW(reinterpret_cast<int32_t>(address + 1), data[1], instr);
+ }
+ address += 2;
+ }
+ }
+ MOZ_ASSERT(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
+ if (instr->hasW())
+ set_register(instr->rnValue(), rn_val);
+}
+
+
+// Note: With the code below we assume that all runtime calls return a 64 bits
+// result. If they don't, the r1 result register contains a bogus value, which
+// is fine because it is caller-saved.
+typedef int64_t (*Prototype_General0)();
+typedef int64_t (*Prototype_General1)(int32_t arg0);
+typedef int64_t (*Prototype_General2)(int32_t arg0, int32_t arg1);
+typedef int64_t (*Prototype_General3)(int32_t arg0, int32_t arg1, int32_t arg2);
+typedef int64_t (*Prototype_General4)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3);
+typedef int64_t (*Prototype_General5)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3,
+ int32_t arg4);
+typedef int64_t (*Prototype_General6)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3,
+ int32_t arg4, int32_t arg5);
+typedef int64_t (*Prototype_General7)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3,
+ int32_t arg4, int32_t arg5, int32_t arg6);
+typedef int64_t (*Prototype_General8)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3,
+ int32_t arg4, int32_t arg5, int32_t arg6, int32_t arg7);
+
+typedef double (*Prototype_Double_None)();
+typedef double (*Prototype_Double_Double)(double arg0);
+typedef double (*Prototype_Double_Int)(int32_t arg0);
+typedef double (*Prototype_Double_IntInt)(int32_t arg0, int32_t arg1);
+typedef int32_t (*Prototype_Int_Double)(double arg0);
+typedef int64_t (*Prototype_Int64_Double)(double arg0);
+typedef int32_t (*Prototype_Int_DoubleIntInt)(double arg0, int32_t arg1, int32_t arg2);
+typedef int32_t (*Prototype_Int_IntDoubleIntInt)(int32_t arg0, double arg1, int32_t arg2,
+ int32_t arg3);
+typedef float (*Prototype_Float32_Float32)(float arg0);
+
+typedef double (*Prototype_DoubleInt)(double arg0, int32_t arg1);
+typedef double (*Prototype_Double_IntDouble)(int32_t arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1);
+typedef int32_t (*Prototype_Int_IntDouble)(int32_t arg0, double arg1);
+
+typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1, double arg2);
+typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0, double arg1,
+ double arg2, double arg3);
+
+// Fill the volatile registers with scratch values.
+//
+// Some of the ABI calls assume that the float registers are not scratched, even
+// though the ABI defines them as volatile - a performance optimization. These
+// are all calls passing operands in integer registers, so for now the simulator
+// does not scratch any float registers for these calls. Should try to narrow it
+// further in future.
+//
+void
+Simulator::scratchVolatileRegisters(bool scratchFloat)
+{
+ int32_t scratch_value = 0xa5a5a5a5 ^ uint32_t(icount_);
+ set_register(r0, scratch_value);
+ set_register(r1, scratch_value);
+ set_register(r2, scratch_value);
+ set_register(r3, scratch_value);
+ set_register(r12, scratch_value); // Intra-Procedure-call scratch register.
+ set_register(r14, scratch_value); // Link register.
+
+ if (scratchFloat) {
+ uint64_t scratch_value_d = 0x5a5a5a5a5a5a5a5aLU ^ uint64_t(icount_) ^ (uint64_t(icount_) << 30);
+ for (uint32_t i = d0; i < d8; i++)
+ set_d_register(i, &scratch_value_d);
+ for (uint32_t i = d16; i < FloatRegisters::TotalPhys; i++)
+ set_d_register(i, &scratch_value_d);
+ }
+}
+
+// Software interrupt instructions are used by the simulator to call into C++.
+void
+Simulator::softwareInterrupt(SimInstruction* instr)
+{
+ int svc = instr->svcValue();
+ switch (svc) {
+ case kCallRtRedirected: {
+ Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ int32_t arg0 = get_register(r0);
+ int32_t arg1 = get_register(r1);
+ int32_t arg2 = get_register(r2);
+ int32_t arg3 = get_register(r3);
+ int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
+ int32_t arg4 = stack_pointer[0];
+ int32_t arg5 = stack_pointer[1];
+
+ int32_t saved_lr = get_register(lr);
+ intptr_t external = reinterpret_cast<intptr_t>(redirection->nativeFunction());
+
+ bool stack_aligned = (get_register(sp) & (ABIStackAlignment - 1)) == 0;
+ if (!stack_aligned) {
+ fprintf(stderr, "Runtime call with unaligned stack!\n");
+ MOZ_CRASH();
+ }
+
+ if (single_stepping_)
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+
+ switch (redirection->type()) {
+ case Args_General0: {
+ Prototype_General0 target = reinterpret_cast<Prototype_General0>(external);
+ int64_t result = target();
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_General1: {
+ Prototype_General1 target = reinterpret_cast<Prototype_General1>(external);
+ int64_t result = target(arg0);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_General2: {
+ Prototype_General2 target = reinterpret_cast<Prototype_General2>(external);
+ int64_t result = target(arg0, arg1);
+ // The ARM backend makes calls to __aeabi_idivmod and
+ // __aeabi_uidivmod assuming that the float registers are
+ // non-volatile as a performance optimization, so the float
+ // registers must not be scratch when calling these.
+ bool scratchFloat = target != __aeabi_idivmod && target != __aeabi_uidivmod;
+ scratchVolatileRegisters(/* scratchFloat = */ scratchFloat);
+ setCallResult(result);
+ break;
+ }
+ case Args_General3: {
+ Prototype_General3 target = reinterpret_cast<Prototype_General3>(external);
+ int64_t result = target(arg0, arg1, arg2);
+ scratchVolatileRegisters(/* scratchFloat = true*/);
+ setCallResult(result);
+ break;
+ }
+ case Args_General4: {
+ Prototype_General4 target = reinterpret_cast<Prototype_General4>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ scratchVolatileRegisters(/* scratchFloat = true*/);
+ setCallResult(result);
+ break;
+ }
+ case Args_General5: {
+ Prototype_General5 target = reinterpret_cast<Prototype_General5>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_General6: {
+ Prototype_General6 target = reinterpret_cast<Prototype_General6>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_General7: {
+ Prototype_General7 target = reinterpret_cast<Prototype_General7>(external);
+ int32_t arg6 = stack_pointer[2];
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_General8: {
+ Prototype_General8 target = reinterpret_cast<Prototype_General8>(external);
+ int32_t arg6 = stack_pointer[2];
+ int32_t arg7 = stack_pointer[3];
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int64_Double: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Int64_Double target = reinterpret_cast<Prototype_Int64_Double>(external);
+ int64_t result = target(dval0);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Double_None: {
+ Prototype_Double_None target = reinterpret_cast<Prototype_Double_None>(external);
+ double dresult = target();
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_Double: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Int_Double target = reinterpret_cast<Prototype_Int_Double>(external);
+ int32_t res = target(dval0);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ set_register(r0, res);
+ break;
+ }
+ case Args_Double_Double: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Double_Double target = reinterpret_cast<Prototype_Double_Double>(external);
+ double dresult = target(dval0);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Float32_Float32: {
+ float fval0;
+ if (UseHardFpABI())
+ get_float_from_s_register(0, &fval0);
+ else
+ fval0 = mozilla::BitwiseCast<float>(arg0);
+ Prototype_Float32_Float32 target = reinterpret_cast<Prototype_Float32_Float32>(external);
+ float fresult = target(fval0);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Double_Int: {
+ Prototype_Double_Int target = reinterpret_cast<Prototype_Double_Int>(external);
+ double dresult = target(arg0);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_IntInt: {
+ Prototype_Double_IntInt target = reinterpret_cast<Prototype_Double_IntInt>(external);
+ double dresult = target(arg0, arg1);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleInt: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_DoubleInt target = reinterpret_cast<Prototype_DoubleInt>(external);
+ double dresult = target(dval0, ival);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDouble: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Double_DoubleDouble target = reinterpret_cast<Prototype_Double_DoubleDouble>(external);
+ double dresult = target(dval0, dval1);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_IntDouble: {
+ int32_t ival = get_register(0);
+ double dval0;
+ if (UseHardFpABI())
+ get_double_from_d_register(0, &dval0);
+ else
+ dval0 = get_double_from_register_pair(2);
+ Prototype_Double_IntDouble target = reinterpret_cast<Prototype_Double_IntDouble>(external);
+ double dresult = target(ival, dval0);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_IntDouble: {
+ int32_t ival = get_register(0);
+ double dval0;
+ if (UseHardFpABI())
+ get_double_from_d_register(0, &dval0);
+ else
+ dval0 = get_double_from_register_pair(2);
+ Prototype_Int_IntDouble target = reinterpret_cast<Prototype_Int_IntDouble>(external);
+ int32_t result = target(ival, dval0);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ set_register(r0, result);
+ break;
+ }
+ case Args_Int_DoubleIntInt: {
+ double dval;
+ int32_t result;
+ Prototype_Int_DoubleIntInt target = reinterpret_cast<Prototype_Int_DoubleIntInt>(external);
+ if (UseHardFpABI()) {
+ get_double_from_d_register(0, &dval);
+ result = target(dval, arg0, arg1);
+ } else {
+ dval = get_double_from_register_pair(0);
+ result = target(dval, arg2, arg3);
+ }
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ set_register(r0, result);
+ break;
+ }
+ case Args_Int_IntDoubleIntInt: {
+ double dval;
+ int32_t result;
+ Prototype_Int_IntDoubleIntInt target = reinterpret_cast<Prototype_Int_IntDoubleIntInt>(external);
+ if (UseHardFpABI()) {
+ get_double_from_d_register(0, &dval);
+ result = target(arg0, dval, arg1, arg2);
+ } else {
+ dval = get_double_from_register_pair(2);
+ result = target(arg0, dval, arg4, arg5);
+ }
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ set_register(r0, result);
+ break;
+ }
+ case Args_Double_DoubleDoubleDouble: {
+ double dval0, dval1, dval2;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ // the last argument is on stack
+ getFpFromStack(stack_pointer, &dval2);
+ Prototype_Double_DoubleDoubleDouble target = reinterpret_cast<Prototype_Double_DoubleDoubleDouble>(external);
+ double dresult = target(dval0, dval1, dval2);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDoubleDoubleDouble: {
+ double dval0, dval1, dval2, dval3;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ // the two last arguments are on stack
+ getFpFromStack(stack_pointer, &dval2);
+ getFpFromStack(stack_pointer + 2, &dval3);
+ Prototype_Double_DoubleDoubleDoubleDouble target = reinterpret_cast<Prototype_Double_DoubleDoubleDoubleDouble>(external);
+ double dresult = target(dval0, dval1, dval2, dval3);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultDouble(dresult);
+ break;
+ }
+ default:
+ MOZ_CRASH("call");
+ }
+
+ if (single_stepping_)
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+
+ set_register(lr, saved_lr);
+ set_pc(get_register(lr));
+ break;
+ }
+ case kBreakpoint: {
+ ArmDebugger dbg(this);
+ dbg.debug();
+ break;
+ }
+ default: { // Stop uses all codes greater than 1 << 23.
+ if (svc >= (1 << 23)) {
+ uint32_t code = svc & kStopCodeMask;
+ if (isWatchedStop(code))
+ increaseStopCounter(code);
+
+ // Stop if it is enabled, otherwise go on jumping over the stop and
+ // the message address.
+ if (isEnabledStop(code)) {
+ ArmDebugger dbg(this);
+ dbg.stop(instr);
+ } else {
+ set_pc(get_pc() + 2 * SimInstruction::kInstrSize);
+ }
+ } else {
+ // This is not a valid svc code.
+ MOZ_CRASH();
+ break;
+ }
+ }
+ }
+}
+
+void
+Simulator::canonicalizeNaN(double* value)
+{
+ if (!JitOptions.wasmTestMode && FPSCR_default_NaN_mode_)
+ *value = JS::CanonicalizeNaN(*value);
+}
+
+void
+Simulator::canonicalizeNaN(float* value)
+{
+ if (!JitOptions.wasmTestMode && FPSCR_default_NaN_mode_)
+ *value = JS::CanonicalizeNaN(*value);
+}
+
+// Stop helper functions.
+bool
+Simulator::isStopInstruction(SimInstruction* instr)
+{
+ return (instr->bits(27, 24) == 0xF) && (instr->svcValue() >= kStopCode);
+}
+
+bool Simulator::isWatchedStop(uint32_t code)
+{
+ MOZ_ASSERT(code <= kMaxStopCode);
+ return code < kNumOfWatchedStops;
+}
+
+bool
+Simulator::isEnabledStop(uint32_t code)
+{
+ MOZ_ASSERT(code <= kMaxStopCode);
+ // Unwatched stops are always enabled.
+ return !isWatchedStop(code) || !(watched_stops_[code].count & kStopDisabledBit);
+}
+
+void
+Simulator::enableStop(uint32_t code)
+{
+ MOZ_ASSERT(isWatchedStop(code));
+ if (!isEnabledStop(code))
+ watched_stops_[code].count &= ~kStopDisabledBit;
+}
+
+void
+Simulator::disableStop(uint32_t code)
+{
+ MOZ_ASSERT(isWatchedStop(code));
+ if (isEnabledStop(code))
+ watched_stops_[code].count |= kStopDisabledBit;
+}
+
+void
+Simulator::increaseStopCounter(uint32_t code)
+{
+ MOZ_ASSERT(code <= kMaxStopCode);
+ MOZ_ASSERT(isWatchedStop(code));
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
+ printf("Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n", code);
+ watched_stops_[code].count = 0;
+ enableStop(code);
+ } else {
+ watched_stops_[code].count++;
+ }
+}
+
+// Print a stop status.
+void
+Simulator::printStopInfo(uint32_t code)
+{
+ MOZ_ASSERT(code <= kMaxStopCode);
+ if (!isWatchedStop(code)) {
+ printf("Stop not watched.");
+ } else {
+ const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watched_stops_[code].count & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watched_stops_[code].desc) {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n",
+ code, code, state, count, watched_stops_[code].desc);
+ } else {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i\n",
+ code, code, state, count);
+ }
+ }
+ }
+}
+
+// Instruction types 0 and 1 are both rolled into one function because they only
+// differ in the handling of the shifter_operand.
+void
+Simulator::decodeType01(SimInstruction* instr)
+{
+ int type = instr->typeValue();
+ if (type == 0 && instr->isSpecialType0()) {
+ // Multiply instruction or extra loads and stores.
+ if (instr->bits(7, 4) == 9) {
+ if (instr->bit(24) == 0) {
+ // Raw field decoding here. Multiply instructions have their Rd
+ // in funny places.
+ int rn = instr->rnValue();
+ int rm = instr->rmValue();
+ int rs = instr->rsValue();
+ int32_t rs_val = get_register(rs);
+ int32_t rm_val = get_register(rm);
+ if (instr->bit(23) == 0) {
+ if (instr->bit(21) == 0) {
+ // The MUL instruction description (A 4.1.33) refers to
+ // Rd as being the destination for the operation, but it
+ // confusingly uses the Rn field to encode it.
+ int rd = rn; // Remap the rn field to the Rd register.
+ int32_t alu_out = rm_val * rs_val;
+ set_register(rd, alu_out);
+ if (instr->hasS())
+ setNZFlags(alu_out);
+ } else {
+ int rd = instr->rdValue();
+ int32_t acc_value = get_register(rd);
+ if (instr->bit(22) == 0) {
+ // The MLA instruction description (A 4.1.28) refers
+ // to the order of registers as "Rd, Rm, Rs,
+ // Rn". But confusingly it uses the Rn field to
+ // encode the Rd register and the Rd field to encode
+ // the Rn register.
+ int32_t mul_out = rm_val * rs_val;
+ int32_t result = acc_value + mul_out;
+ set_register(rn, result);
+ } else {
+ int32_t mul_out = rm_val * rs_val;
+ int32_t result = acc_value - mul_out;
+ set_register(rn, result);
+ }
+ }
+ } else {
+ // The signed/long multiply instructions use the terms RdHi
+ // and RdLo when referring to the target registers. They are
+ // mapped to the Rn and Rd fields as follows:
+ // RdLo == Rd
+ // RdHi == Rn (This is confusingly stored in variable rd here
+ // because the mul instruction from above uses the
+ // Rn field to encode the Rd register. Good luck figuring
+ // this out without reading the ARM instruction manual
+ // at a very detailed level.)
+ int rd_hi = rn; // Remap the rn field to the RdHi register.
+ int rd_lo = instr->rdValue();
+ int32_t hi_res = 0;
+ int32_t lo_res = 0;
+ if (instr->bit(22) == 1) {
+ int64_t left_op = static_cast<int32_t>(rm_val);
+ int64_t right_op = static_cast<int32_t>(rs_val);
+ uint64_t result = left_op * right_op;
+ hi_res = static_cast<int32_t>(result >> 32);
+ lo_res = static_cast<int32_t>(result & 0xffffffff);
+ } else {
+ // Unsigned multiply.
+ uint64_t left_op = static_cast<uint32_t>(rm_val);
+ uint64_t right_op = static_cast<uint32_t>(rs_val);
+ uint64_t result = left_op * right_op;
+ hi_res = static_cast<int32_t>(result >> 32);
+ lo_res = static_cast<int32_t>(result & 0xffffffff);
+ }
+ set_register(rd_lo, lo_res);
+ set_register(rd_hi, hi_res);
+ if (instr->hasS())
+ MOZ_CRASH();
+ }
+ } else {
+ if (instr->bits(disasm::ExclusiveOpHi, disasm::ExclusiveOpLo) == disasm::ExclusiveOpcode) {
+ // Load-exclusive / store-exclusive.
+ if (instr->bit(disasm::ExclusiveLoad)) {
+ int rn = instr->rnValue();
+ int rt = instr->rtValue();
+ int32_t address = get_register(rn);
+ switch (instr->bits(disasm::ExclusiveSizeHi, disasm::ExclusiveSizeLo)) {
+ case disasm::ExclusiveWord:
+ set_register(rt, readExW(address, instr));
+ break;
+ case disasm::ExclusiveDouble: {
+ MOZ_ASSERT((rt % 2) == 0);
+ int32_t hibits;
+ int32_t lobits = readExDW(address, &hibits);
+ set_register(rt, lobits);
+ set_register(rt+1, hibits);
+ break;
+ }
+ case disasm::ExclusiveByte:
+ set_register(rt, readExBU(address));
+ break;
+ case disasm::ExclusiveHalf:
+ set_register(rt, readExHU(address, instr));
+ break;
+ }
+ } else {
+ int rn = instr->rnValue();
+ int rd = instr->rdValue();
+ int rt = instr->bits(3,0);
+ int32_t address = get_register(rn);
+ int32_t value = get_register(rt);
+ int32_t result = 0;
+ switch (instr->bits(disasm::ExclusiveSizeHi, disasm::ExclusiveSizeLo)) {
+ case disasm::ExclusiveWord:
+ result = writeExW(address, value, instr);
+ break;
+ case disasm::ExclusiveDouble: {
+ MOZ_ASSERT((rt % 2) == 0);
+ int32_t value2 = get_register(rt+1);
+ result = writeExDW(address, value, value2);
+ break;
+ }
+ case disasm::ExclusiveByte:
+ result = writeExB(address, (uint8_t)value);
+ break;
+ case disasm::ExclusiveHalf:
+ result = writeExH(address, (uint16_t)value, instr);
+ break;
+ }
+ set_register(rd, result);
+ }
+ } else {
+ MOZ_CRASH(); // Not used atm
+ }
+ }
+ } else {
+ // Extra load/store instructions.
+ int rd = instr->rdValue();
+ int rn = instr->rnValue();
+ int32_t rn_val = get_register(rn);
+ int32_t addr = 0;
+ if (instr->bit(22) == 0) {
+ int rm = instr->rmValue();
+ int32_t rm_val = get_register(rm);
+ switch (instr->PUField()) {
+ case da_x:
+ MOZ_ASSERT(!instr->hasW());
+ addr = rn_val;
+ rn_val -= rm_val;
+ set_register(rn, rn_val);
+ break;
+ case ia_x:
+ MOZ_ASSERT(!instr->hasW());
+ addr = rn_val;
+ rn_val += rm_val;
+ set_register(rn, rn_val);
+ break;
+ case db_x:
+ rn_val -= rm_val;
+ addr = rn_val;
+ if (instr->hasW())
+ set_register(rn, rn_val);
+ break;
+ case ib_x:
+ rn_val += rm_val;
+ addr = rn_val;
+ if (instr->hasW())
+ set_register(rn, rn_val);
+ break;
+ default:
+ // The PU field is a 2-bit field.
+ MOZ_CRASH();
+ break;
+ }
+ } else {
+ int32_t imm_val = (instr->immedHValue() << 4) | instr->immedLValue();
+ switch (instr->PUField()) {
+ case da_x:
+ MOZ_ASSERT(!instr->hasW());
+ addr = rn_val;
+ rn_val -= imm_val;
+ set_register(rn, rn_val);
+ break;
+ case ia_x:
+ MOZ_ASSERT(!instr->hasW());
+ addr = rn_val;
+ rn_val += imm_val;
+ set_register(rn, rn_val);
+ break;
+ case db_x:
+ rn_val -= imm_val;
+ addr = rn_val;
+ if (instr->hasW())
+ set_register(rn, rn_val);
+ break;
+ case ib_x:
+ rn_val += imm_val;
+ addr = rn_val;
+ if (instr->hasW())
+ set_register(rn, rn_val);
+ break;
+ default:
+ // The PU field is a 2-bit field.
+ MOZ_CRASH();
+ break;
+ }
+ }
+ if ((instr->bits(7, 4) & 0xd) == 0xd && instr->bit(20) == 0) {
+ MOZ_ASSERT((rd % 2) == 0);
+ if (instr->hasH()) {
+ // The strd instruction.
+ int32_t value1 = get_register(rd);
+ int32_t value2 = get_register(rd+1);
+ writeDW(addr, value1, value2);
+ } else {
+ // The ldrd instruction.
+ int* rn_data = readDW(addr);
+ set_dw_register(rd, rn_data);
+ }
+ } else if (instr->hasH()) {
+ if (instr->hasSign()) {
+ if (instr->hasL()) {
+ int16_t val = readH(addr, instr);
+ set_register(rd, val);
+ } else {
+ int16_t val = get_register(rd);
+ writeH(addr, val, instr);
+ }
+ } else {
+ if (instr->hasL()) {
+ uint16_t val = readHU(addr, instr);
+ set_register(rd, val);
+ } else {
+ uint16_t val = get_register(rd);
+ writeH(addr, val, instr);
+ }
+ }
+ } else {
+ // Signed byte loads.
+ MOZ_ASSERT(instr->hasSign());
+ MOZ_ASSERT(instr->hasL());
+ int8_t val = readB(addr);
+ set_register(rd, val);
+ }
+ return;
+ }
+ } else if ((type == 0) && instr->isMiscType0()) {
+ if (instr->bits(7, 4) == 0) {
+ if (instr->bit(21) == 0) {
+ // mrs
+ int rd = instr->rdValue();
+ uint32_t flags;
+ if (instr->bit(22) == 0) {
+ // CPSR. Note: The Q flag is not yet implemented!
+ flags = (n_flag_ << 31) |
+ (z_flag_ << 30) |
+ (c_flag_ << 29) |
+ (v_flag_ << 28);
+ } else {
+ // SPSR
+ MOZ_CRASH();
+ }
+ set_register(rd, flags);
+ } else {
+ // msr
+ if (instr->bits(27, 23) == 2) {
+ // Register operand. For now we only emit mask 0b1100.
+ int rm = instr->rmValue();
+ mozilla::DebugOnly<uint32_t> mask = instr->bits(19, 16);
+ MOZ_ASSERT(mask == (3 << 2));
+
+ uint32_t flags = get_register(rm);
+ n_flag_ = (flags >> 31) & 1;
+ z_flag_ = (flags >> 30) & 1;
+ c_flag_ = (flags >> 29) & 1;
+ v_flag_ = (flags >> 28) & 1;
+ } else {
+ MOZ_CRASH();
+ }
+ }
+ } else if (instr->bits(22, 21) == 1) {
+ int rm = instr->rmValue();
+ switch (instr->bits(7, 4)) {
+ case 1: // BX
+ set_pc(get_register(rm));
+ break;
+ case 3: { // BLX
+ uint32_t old_pc = get_pc();
+ set_pc(get_register(rm));
+ set_register(lr, old_pc + SimInstruction::kInstrSize);
+ break;
+ }
+ case 7: { // BKPT
+ fprintf(stderr, "Simulator hit BKPT.\n");
+ if (getenv("ARM_SIM_DEBUGGER")) {
+ ArmDebugger dbg(this);
+ dbg.debug();
+ } else {
+ fprintf(stderr, "Use ARM_SIM_DEBUGGER=1 to enter the builtin debugger.\n");
+ MOZ_CRASH("ARM simulator breakpoint");
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH();
+ }
+ } else if (instr->bits(22, 21) == 3) {
+ int rm = instr->rmValue();
+ int rd = instr->rdValue();
+ switch (instr->bits(7, 4)) {
+ case 1: { // CLZ
+ uint32_t bits = get_register(rm);
+ int leading_zeros = 0;
+ if (bits == 0)
+ leading_zeros = 32;
+ else
+ leading_zeros = mozilla::CountLeadingZeroes32(bits);
+ set_register(rd, leading_zeros);
+ break;
+ }
+ default:
+ MOZ_CRASH();
+ break;
+ }
+ } else {
+ printf("%08x\n", instr->instructionBits());
+ MOZ_CRASH();
+ }
+ } else if ((type == 1) && instr->isNopType1()) {
+ // NOP.
+ } else {
+ int rd = instr->rdValue();
+ int rn = instr->rnValue();
+ int32_t rn_val = get_register(rn);
+ int32_t shifter_operand = 0;
+ bool shifter_carry_out = 0;
+ if (type == 0) {
+ shifter_operand = getShiftRm(instr, &shifter_carry_out);
+ } else {
+ MOZ_ASSERT(instr->typeValue() == 1);
+ shifter_operand = getImm(instr, &shifter_carry_out);
+ }
+ int32_t alu_out;
+ switch (instr->opcodeField()) {
+ case OpAnd:
+ alu_out = rn_val & shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(shifter_carry_out);
+ }
+ break;
+ case OpEor:
+ alu_out = rn_val ^ shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(shifter_carry_out);
+ }
+ break;
+ case OpSub:
+ alu_out = rn_val - shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(!borrowFrom(rn_val, shifter_operand));
+ setVFlag(overflowFrom(alu_out, rn_val, shifter_operand, false));
+ }
+ break;
+ case OpRsb:
+ alu_out = shifter_operand - rn_val;
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(!borrowFrom(shifter_operand, rn_val));
+ setVFlag(overflowFrom(alu_out, shifter_operand, rn_val, false));
+ }
+ break;
+ case OpAdd:
+ alu_out = rn_val + shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(carryFrom(rn_val, shifter_operand));
+ setVFlag(overflowFrom(alu_out, rn_val, shifter_operand, true));
+ }
+ break;
+ case OpAdc:
+ alu_out = rn_val + shifter_operand + getCarry();
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(carryFrom(rn_val, shifter_operand, getCarry()));
+ setVFlag(overflowFrom(alu_out, rn_val, shifter_operand, true));
+ }
+ break;
+ case OpSbc:
+ alu_out = rn_val - shifter_operand - (getCarry() == 0 ? 1 : 0);
+ set_register(rd, alu_out);
+ if (instr->hasS())
+ MOZ_CRASH();
+ break;
+ case OpRsc:
+ alu_out = shifter_operand - rn_val - (getCarry() == 0 ? 1 : 0);
+ set_register(rd, alu_out);
+ if (instr->hasS())
+ MOZ_CRASH();
+ break;
+ case OpTst:
+ if (instr->hasS()) {
+ alu_out = rn_val & shifter_operand;
+ setNZFlags(alu_out);
+ setCFlag(shifter_carry_out);
+ } else {
+ alu_out = instr->immedMovwMovtValue();
+ set_register(rd, alu_out);
+ }
+ break;
+ case OpTeq:
+ if (instr->hasS()) {
+ alu_out = rn_val ^ shifter_operand;
+ setNZFlags(alu_out);
+ setCFlag(shifter_carry_out);
+ } else {
+ // Other instructions matching this pattern are handled in the
+ // miscellaneous instructions part above.
+ MOZ_CRASH();
+ }
+ break;
+ case OpCmp:
+ if (instr->hasS()) {
+ alu_out = rn_val - shifter_operand;
+ setNZFlags(alu_out);
+ setCFlag(!borrowFrom(rn_val, shifter_operand));
+ setVFlag(overflowFrom(alu_out, rn_val, shifter_operand, false));
+ } else {
+ alu_out = (get_register(rd) & 0xffff) |
+ (instr->immedMovwMovtValue() << 16);
+ set_register(rd, alu_out);
+ }
+ break;
+ case OpCmn:
+ if (instr->hasS()) {
+ alu_out = rn_val + shifter_operand;
+ setNZFlags(alu_out);
+ setCFlag(carryFrom(rn_val, shifter_operand));
+ setVFlag(overflowFrom(alu_out, rn_val, shifter_operand, true));
+ } else {
+ // Other instructions matching this pattern are handled in the
+ // miscellaneous instructions part above.
+ MOZ_CRASH();
+ }
+ break;
+ case OpOrr:
+ alu_out = rn_val | shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(shifter_carry_out);
+ }
+ break;
+ case OpMov:
+ alu_out = shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(shifter_carry_out);
+ }
+ break;
+ case OpBic:
+ alu_out = rn_val & ~shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(shifter_carry_out);
+ }
+ break;
+ case OpMvn:
+ alu_out = ~shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(shifter_carry_out);
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ break;
+ }
+ }
+}
+
+void
+Simulator::decodeType2(SimInstruction* instr)
+{
+ int rd = instr->rdValue();
+ int rn = instr->rnValue();
+ int32_t rn_val = get_register(rn);
+ int32_t im_val = instr->offset12Value();
+ int32_t addr = 0;
+ switch (instr->PUField()) {
+ case da_x:
+ MOZ_ASSERT(!instr->hasW());
+ addr = rn_val;
+ rn_val -= im_val;
+ set_register(rn, rn_val);
+ break;
+ case ia_x:
+ MOZ_ASSERT(!instr->hasW());
+ addr = rn_val;
+ rn_val += im_val;
+ set_register(rn, rn_val);
+ break;
+ case db_x:
+ rn_val -= im_val;
+ addr = rn_val;
+ if (instr->hasW())
+ set_register(rn, rn_val);
+ break;
+ case ib_x:
+ rn_val += im_val;
+ addr = rn_val;
+ if (instr->hasW())
+ set_register(rn, rn_val);
+ break;
+ default:
+ MOZ_CRASH();
+ break;
+ }
+ if (instr->hasB()) {
+ if (instr->hasL()) {
+ uint8_t val = readBU(addr);
+ set_register(rd, val);
+ } else {
+ uint8_t val = get_register(rd);
+ writeB(addr, val);
+ }
+ } else {
+ if (instr->hasL())
+ set_register(rd, readW(addr, instr, AllowUnaligned));
+ else
+ writeW(addr, get_register(rd), instr, AllowUnaligned);
+ }
+}
+
+static uint32_t
+rotateBytes(uint32_t val, int32_t rotate)
+{
+ switch (rotate) {
+ default:
+ return val;
+ case 1:
+ return (val >> 8) | (val << 24);
+ case 2:
+ return (val >> 16) | (val << 16);
+ case 3:
+ return (val >> 24) | (val << 8);
+ }
+}
+
+void
+Simulator::decodeType3(SimInstruction* instr)
+{
+ int rd = instr->rdValue();
+ int rn = instr->rnValue();
+ int32_t rn_val = get_register(rn);
+ bool shifter_carry_out = 0;
+ int32_t shifter_operand = getShiftRm(instr, &shifter_carry_out);
+ int32_t addr = 0;
+ switch (instr->PUField()) {
+ case da_x:
+ MOZ_ASSERT(!instr->hasW());
+ MOZ_CRASH();
+ break;
+ case ia_x: {
+ if (instr->bit(4) == 0) {
+ // Memop.
+ } else {
+ if (instr->bit(5) == 0) {
+ switch (instr->bits(22, 21)) {
+ case 0:
+ if (instr->bit(20) == 0) {
+ if (instr->bit(6) == 0) {
+ // Pkhbt.
+ uint32_t rn_val = get_register(rn);
+ uint32_t rm_val = get_register(instr->rmValue());
+ int32_t shift = instr->bits(11, 7);
+ rm_val <<= shift;
+ set_register(rd, (rn_val & 0xFFFF) | (rm_val & 0xFFFF0000U));
+ } else {
+ // Pkhtb.
+ uint32_t rn_val = get_register(rn);
+ int32_t rm_val = get_register(instr->rmValue());
+ int32_t shift = instr->bits(11, 7);
+ if (shift == 0)
+ shift = 32;
+ rm_val >>= shift;
+ set_register(rd, (rn_val & 0xFFFF0000U) | (rm_val & 0xFFFF));
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 1:
+ MOZ_CRASH();
+ break;
+ case 2:
+ MOZ_CRASH();
+ break;
+ case 3: {
+ // Usat.
+ int32_t sat_pos = instr->bits(20, 16);
+ int32_t sat_val = (1 << sat_pos) - 1;
+ int32_t shift = instr->bits(11, 7);
+ int32_t shift_type = instr->bit(6);
+ int32_t rm_val = get_register(instr->rmValue());
+ if (shift_type == 0) // LSL
+ rm_val <<= shift;
+ else // ASR
+ rm_val >>= shift;
+
+ // If saturation occurs, the Q flag should be set in the
+ // CPSR. There is no Q flag yet, and no instruction (MRS)
+ // to read the CPSR directly.
+ if (rm_val > sat_val)
+ rm_val = sat_val;
+ else if (rm_val < 0)
+ rm_val = 0;
+ set_register(rd, rm_val);
+ break;
+ }
+ }
+ } else {
+ switch (instr->bits(22, 21)) {
+ case 0:
+ MOZ_CRASH();
+ break;
+ case 1:
+ if (instr->bits(7,4) == 7 && instr->bits(19,16) == 15) {
+ uint32_t rm_val = rotateBytes(get_register(instr->rmValue()),
+ instr->bits(11, 10));
+ if (instr->bit(20)) {
+ // Sxth.
+ set_register(rd, (int32_t)(int16_t)(rm_val & 0xFFFF));
+ }
+ else {
+ // Sxtb.
+ set_register(rd, (int32_t)(int8_t)(rm_val & 0xFF));
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 2:
+ if ((instr->bit(20) == 0) && (instr->bits(9, 6) == 1)) {
+ if (instr->bits(19, 16) == 0xF) {
+ // Uxtb16.
+ uint32_t rm_val = rotateBytes(get_register(instr->rmValue()),
+ instr->bits(11, 10));
+ set_register(rd, (rm_val & 0xFF) | (rm_val & 0xFF0000));
+ } else {
+ MOZ_CRASH();
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 3:
+ if ((instr->bit(20) == 0) && (instr->bits(9, 6) == 1)) {
+ if (instr->bits(19, 16) == 0xF) {
+ // Uxtb.
+ uint32_t rm_val = rotateBytes(get_register(instr->rmValue()),
+ instr->bits(11, 10));
+ set_register(rd, (rm_val & 0xFF));
+ } else {
+ // Uxtab.
+ uint32_t rn_val = get_register(rn);
+ uint32_t rm_val = rotateBytes(get_register(instr->rmValue()),
+ instr->bits(11, 10));
+ set_register(rd, rn_val + (rm_val & 0xFF));
+ }
+ } else if ((instr->bit(20) == 1) && (instr->bits(9, 6) == 1)) {
+ if (instr->bits(19, 16) == 0xF) {
+ // Uxth.
+ uint32_t rm_val = rotateBytes(get_register(instr->rmValue()),
+ instr->bits(11, 10));
+ set_register(rd, (rm_val & 0xFFFF));
+ } else {
+ // Uxtah.
+ uint32_t rn_val = get_register(rn);
+ uint32_t rm_val = rotateBytes(get_register(instr->rmValue()),
+ instr->bits(11, 10));
+ set_register(rd, rn_val + (rm_val & 0xFFFF));
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ }
+ }
+ return;
+ }
+ break;
+ }
+ case db_x: { // sudiv
+ if (instr->bit(22) == 0x0 && instr->bit(20) == 0x1 &&
+ instr->bits(15,12) == 0x0f && instr->bits(7, 4) == 0x1) {
+ if (!instr->hasW()) {
+ // sdiv (in V8 notation matching ARM ISA format) rn = rm/rs.
+ int rm = instr->rmValue();
+ int32_t rm_val = get_register(rm);
+ int rs = instr->rsValue();
+ int32_t rs_val = get_register(rs);
+ int32_t ret_val = 0;
+ MOZ_ASSERT(rs_val != 0);
+ if ((rm_val == INT32_MIN) && (rs_val == -1))
+ ret_val = INT32_MIN;
+ else
+ ret_val = rm_val / rs_val;
+ set_register(rn, ret_val);
+ return;
+ } else {
+ // udiv (in V8 notation matching ARM ISA format) rn = rm/rs.
+ int rm = instr->rmValue();
+ uint32_t rm_val = get_register(rm);
+ int rs = instr->rsValue();
+ uint32_t rs_val = get_register(rs);
+ uint32_t ret_val = 0;
+ MOZ_ASSERT(rs_val != 0);
+ ret_val = rm_val / rs_val;
+ set_register(rn, ret_val);
+ return;
+ }
+ }
+
+ addr = rn_val - shifter_operand;
+ if (instr->hasW())
+ set_register(rn, addr);
+ break;
+ }
+ case ib_x: {
+ if (instr->hasW() && (instr->bits(6, 4) == 0x5)) {
+ uint32_t widthminus1 = static_cast<uint32_t>(instr->bits(20, 16));
+ uint32_t lsbit = static_cast<uint32_t>(instr->bits(11, 7));
+ uint32_t msbit = widthminus1 + lsbit;
+ if (msbit <= 31) {
+ if (instr->bit(22)) {
+ // ubfx - unsigned bitfield extract.
+ uint32_t rm_val = static_cast<uint32_t>(get_register(instr->rmValue()));
+ uint32_t extr_val = rm_val << (31 - msbit);
+ extr_val = extr_val >> (31 - widthminus1);
+ set_register(instr->rdValue(), extr_val);
+ } else {
+ // sbfx - signed bitfield extract.
+ int32_t rm_val = get_register(instr->rmValue());
+ int32_t extr_val = rm_val << (31 - msbit);
+ extr_val = extr_val >> (31 - widthminus1);
+ set_register(instr->rdValue(), extr_val);
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ return;
+ } else if (!instr->hasW() && (instr->bits(6, 4) == 0x1)) {
+ uint32_t lsbit = static_cast<uint32_t>(instr->bits(11, 7));
+ uint32_t msbit = static_cast<uint32_t>(instr->bits(20, 16));
+ if (msbit >= lsbit) {
+ // bfc or bfi - bitfield clear/insert.
+ uint32_t rd_val =
+ static_cast<uint32_t>(get_register(instr->rdValue()));
+ uint32_t bitcount = msbit - lsbit + 1;
+ uint32_t mask = (1 << bitcount) - 1;
+ rd_val &= ~(mask << lsbit);
+ if (instr->rmValue() != 15) {
+ // bfi - bitfield insert.
+ uint32_t rm_val =
+ static_cast<uint32_t>(get_register(instr->rmValue()));
+ rm_val &= mask;
+ rd_val |= rm_val << lsbit;
+ }
+ set_register(instr->rdValue(), rd_val);
+ } else {
+ MOZ_CRASH();
+ }
+ return;
+ } else {
+ addr = rn_val + shifter_operand;
+ if (instr->hasW())
+ set_register(rn, addr);
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH();
+ break;
+ }
+ if (instr->hasB()) {
+ if (instr->hasL()) {
+ uint8_t byte = readB(addr);
+ set_register(rd, byte);
+ } else {
+ uint8_t byte = get_register(rd);
+ writeB(addr, byte);
+ }
+ } else {
+ if (instr->hasL())
+ set_register(rd, readW(addr, instr, AllowUnaligned));
+ else
+ writeW(addr, get_register(rd), instr, AllowUnaligned);
+ }
+}
+
+void
+Simulator::decodeType4(SimInstruction* instr)
+{
+ // Only allowed to be set in privileged mode.
+ MOZ_ASSERT(instr->bit(22) == 0);
+ bool load = instr->hasL();
+ handleRList(instr, load);
+}
+
+void
+Simulator::decodeType5(SimInstruction* instr)
+{
+ int off = instr->sImmed24Value() << 2;
+ intptr_t pc_address = get_pc();
+ if (instr->hasLink())
+ set_register(lr, pc_address + SimInstruction::kInstrSize);
+ int pc_reg = get_register(pc);
+ set_pc(pc_reg + off);
+}
+
+void
+Simulator::decodeType6(SimInstruction* instr)
+{
+ decodeType6CoprocessorIns(instr);
+}
+
+void
+Simulator::decodeType7(SimInstruction* instr)
+{
+ if (instr->bit(24) == 1)
+ softwareInterrupt(instr);
+ else if (instr->bit(4) == 1 && instr->bits(11,9) != 5)
+ decodeType7CoprocessorIns(instr);
+ else
+ decodeTypeVFP(instr);
+}
+
+void
+Simulator::decodeType7CoprocessorIns(SimInstruction* instr)
+{
+ if (instr->bit(20) == 0) {
+ // MCR, MCR2
+ if (instr->coprocessorValue() == 15) {
+ int opc1 = instr->bits(23,21);
+ int opc2 = instr->bits(7,5);
+ int CRn = instr->bits(19,16);
+ int CRm = instr->bits(3,0);
+ if (opc1 == 0 && opc2 == 4 && CRn == 7 && CRm == 10) {
+ // ARMv6 DSB instruction. We do not use DSB.
+ MOZ_CRASH("DSB not implemented");
+ } else if (opc1 == 0 && opc2 == 5 && CRn == 7 && CRm == 10) {
+ // ARMv6 DMB instruction.
+ AtomicOperations::fenceSeqCst();
+ }
+ else if (opc1 == 0 && opc2 == 4 && CRn == 7 && CRm == 5) {
+ // ARMv6 ISB instruction. We do not use ISB.
+ MOZ_CRASH("ISB not implemented");
+ }
+ else {
+ MOZ_CRASH();
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ } else {
+ // MRC, MRC2
+ MOZ_CRASH();
+ }
+}
+
+void
+Simulator::decodeTypeVFP(SimInstruction* instr)
+{
+ MOZ_ASSERT(instr->typeValue() == 7 && instr->bit(24) == 0);
+ MOZ_ASSERT(instr->bits(11, 9) == 0x5);
+
+ // Obtain double precision register codes.
+ VFPRegPrecision precision = (instr->szValue() == 1) ? kDoublePrecision : kSinglePrecision;
+ int vm = instr->VFPMRegValue(precision);
+ int vd = instr->VFPDRegValue(precision);
+ int vn = instr->VFPNRegValue(precision);
+
+ if (instr->bit(4) == 0) {
+ if (instr->opc1Value() == 0x7) {
+ // Other data processing instructions.
+ if ((instr->opc2Value() == 0x0) && (instr->opc3Value() == 0x1)) {
+ // vmov register to register.
+ if (instr->szValue() == 0x1) {
+ int m = instr->VFPMRegValue(kDoublePrecision);
+ int d = instr->VFPDRegValue(kDoublePrecision);
+ double temp;
+ get_double_from_d_register(m, &temp);
+ set_d_register_from_double(d, temp);
+ } else {
+ int m = instr->VFPMRegValue(kSinglePrecision);
+ int d = instr->VFPDRegValue(kSinglePrecision);
+ float temp;
+ get_float_from_s_register(m, &temp);
+ set_s_register_from_float(d, temp);
+ }
+ } else if ((instr->opc2Value() == 0x0) && (instr->opc3Value() == 0x3)) {
+ // vabs
+ if (instr->szValue() == 0x1) {
+ union {
+ double f64;
+ uint64_t u64;
+ } u;
+ get_double_from_d_register(vm, &u.f64);
+ u.u64 &= 0x7fffffffffffffffu;
+ double dd_value = u.f64;
+ canonicalizeNaN(&dd_value);
+ set_d_register_from_double(vd, dd_value);
+ } else {
+ union {
+ float f32;
+ uint32_t u32;
+ } u;
+ get_float_from_s_register(vm, &u.f32);
+ u.u32 &= 0x7fffffffu;
+ float fd_value = u.f32;
+ canonicalizeNaN(&fd_value);
+ set_s_register_from_float(vd, fd_value);
+ }
+ } else if ((instr->opc2Value() == 0x1) && (instr->opc3Value() == 0x1)) {
+ // vneg
+ if (instr->szValue() == 0x1) {
+ double dm_value;
+ get_double_from_d_register(vm, &dm_value);
+ double dd_value = -dm_value;
+ canonicalizeNaN(&dd_value);
+ set_d_register_from_double(vd, dd_value);
+ } else {
+ float fm_value;
+ get_float_from_s_register(vm, &fm_value);
+ float fd_value = -fm_value;
+ canonicalizeNaN(&fd_value);
+ set_s_register_from_float(vd, fd_value);
+ }
+ } else if ((instr->opc2Value() == 0x7) && (instr->opc3Value() == 0x3)) {
+ decodeVCVTBetweenDoubleAndSingle(instr);
+ } else if ((instr->opc2Value() == 0x8) && (instr->opc3Value() & 0x1)) {
+ decodeVCVTBetweenFloatingPointAndInteger(instr);
+ } else if ((instr->opc2Value() == 0xA) && (instr->opc3Value() == 0x3) &&
+ (instr->bit(8) == 1)) {
+ // vcvt.f64.s32 Dd, Dd, #<fbits>.
+ int fraction_bits = 32 - ((instr->bits(3, 0) << 1) | instr->bit(5));
+ int fixed_value = get_sinteger_from_s_register(vd * 2);
+ double divide = 1 << fraction_bits;
+ set_d_register_from_double(vd, fixed_value / divide);
+ } else if (((instr->opc2Value() >> 1) == 0x6) &&
+ (instr->opc3Value() & 0x1)) {
+ decodeVCVTBetweenFloatingPointAndInteger(instr);
+ } else if (((instr->opc2Value() == 0x4) || (instr->opc2Value() == 0x5)) &&
+ (instr->opc3Value() & 0x1)) {
+ decodeVCMP(instr);
+ } else if (((instr->opc2Value() == 0x1)) && (instr->opc3Value() == 0x3)) {
+ // vsqrt
+ if (instr->szValue() == 0x1) {
+ double dm_value;
+ get_double_from_d_register(vm, &dm_value);
+ double dd_value = std::sqrt(dm_value);
+ canonicalizeNaN(&dd_value);
+ set_d_register_from_double(vd, dd_value);
+ } else {
+ float fm_value;
+ get_float_from_s_register(vm, &fm_value);
+ float fd_value = std::sqrt(fm_value);
+ canonicalizeNaN(&fd_value);
+ set_s_register_from_float(vd, fd_value);
+ }
+ } else if (instr->opc3Value() == 0x0) {
+ // vmov immediate.
+ if (instr->szValue() == 0x1) {
+ set_d_register_from_double(vd, instr->doubleImmedVmov());
+ } else {
+ // vmov.f32 immediate.
+ set_s_register_from_float(vd, instr->float32ImmedVmov());
+ }
+ } else {
+ decodeVCVTBetweenFloatingPointAndIntegerFrac(instr);
+ }
+ } else if (instr->opc1Value() == 0x3) {
+ if (instr->szValue() != 0x1) {
+ if (instr->opc3Value() & 0x1) {
+ // vsub
+ float fn_value;
+ get_float_from_s_register(vn, &fn_value);
+ float fm_value;
+ get_float_from_s_register(vm, &fm_value);
+ float fd_value = fn_value - fm_value;
+ canonicalizeNaN(&fd_value);
+ set_s_register_from_float(vd, fd_value);
+ } else {
+ // vadd
+ float fn_value;
+ get_float_from_s_register(vn, &fn_value);
+ float fm_value;
+ get_float_from_s_register(vm, &fm_value);
+ float fd_value = fn_value + fm_value;
+ canonicalizeNaN(&fd_value);
+ set_s_register_from_float(vd, fd_value);
+ }
+ } else {
+ if (instr->opc3Value() & 0x1) {
+ // vsub
+ double dn_value;
+ get_double_from_d_register(vn, &dn_value);
+ double dm_value;
+ get_double_from_d_register(vm, &dm_value);
+ double dd_value = dn_value - dm_value;
+ canonicalizeNaN(&dd_value);
+ set_d_register_from_double(vd, dd_value);
+ } else {
+ // vadd
+ double dn_value;
+ get_double_from_d_register(vn, &dn_value);
+ double dm_value;
+ get_double_from_d_register(vm, &dm_value);
+ double dd_value = dn_value + dm_value;
+ canonicalizeNaN(&dd_value);
+ set_d_register_from_double(vd, dd_value);
+ }
+ }
+ } else if ((instr->opc1Value() == 0x2) && !(instr->opc3Value() & 0x1)) {
+ // vmul
+ if (instr->szValue() != 0x1) {
+ float fn_value;
+ get_float_from_s_register(vn, &fn_value);
+ float fm_value;
+ get_float_from_s_register(vm, &fm_value);
+ float fd_value = fn_value * fm_value;
+ canonicalizeNaN(&fd_value);
+ set_s_register_from_float(vd, fd_value);
+ } else {
+ double dn_value;
+ get_double_from_d_register(vn, &dn_value);
+ double dm_value;
+ get_double_from_d_register(vm, &dm_value);
+ double dd_value = dn_value * dm_value;
+ canonicalizeNaN(&dd_value);
+ set_d_register_from_double(vd, dd_value);
+ }
+ } else if ((instr->opc1Value() == 0x0)) {
+ // vmla, vmls
+ const bool is_vmls = (instr->opc3Value() & 0x1);
+
+ if (instr->szValue() != 0x1)
+ MOZ_CRASH("Not used by V8.");
+
+ double dd_val;
+ get_double_from_d_register(vd, &dd_val);
+ double dn_val;
+ get_double_from_d_register(vn, &dn_val);
+ double dm_val;
+ get_double_from_d_register(vm, &dm_val);
+
+ // Note: we do the mul and add/sub in separate steps to avoid
+ // getting a result with too high precision.
+ set_d_register_from_double(vd, dn_val * dm_val);
+ double temp;
+ get_double_from_d_register(vd, &temp);
+ if (is_vmls)
+ temp = dd_val - temp;
+ else
+ temp = dd_val + temp;
+ canonicalizeNaN(&temp);
+ set_d_register_from_double(vd, temp);
+ } else if ((instr->opc1Value() == 0x4) && !(instr->opc3Value() & 0x1)) {
+ // vdiv
+ if (instr->szValue() != 0x1) {
+ float fn_value;
+ get_float_from_s_register(vn, &fn_value);
+ float fm_value;
+ get_float_from_s_register(vm, &fm_value);
+ float fd_value = fn_value / fm_value;
+ div_zero_vfp_flag_ = (fm_value == 0);
+ canonicalizeNaN(&fd_value);
+ set_s_register_from_float(vd, fd_value);
+ } else {
+ double dn_value;
+ get_double_from_d_register(vn, &dn_value);
+ double dm_value;
+ get_double_from_d_register(vm, &dm_value);
+ double dd_value = dn_value / dm_value;
+ div_zero_vfp_flag_ = (dm_value == 0);
+ canonicalizeNaN(&dd_value);
+ set_d_register_from_double(vd, dd_value);
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ } else {
+ if (instr->VCValue() == 0x0 && instr->VAValue() == 0x0) {
+ decodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
+ } else if ((instr->VLValue() == 0x0) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->bit(23) == 0x0)) {
+ // vmov (ARM core register to scalar).
+ int vd = instr->bits(19, 16) | (instr->bit(7) << 4);
+ double dd_value;
+ get_double_from_d_register(vd, &dd_value);
+ int32_t data[2];
+ memcpy(data, &dd_value, 8);
+ data[instr->bit(21)] = get_register(instr->rtValue());
+ memcpy(&dd_value, data, 8);
+ set_d_register_from_double(vd, dd_value);
+ } else if ((instr->VLValue() == 0x1) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->bit(23) == 0x0)) {
+ // vmov (scalar to ARM core register).
+ int vn = instr->bits(19, 16) | (instr->bit(7) << 4);
+ double dn_value;
+ get_double_from_d_register(vn, &dn_value);
+ int32_t data[2];
+ memcpy(data, &dn_value, 8);
+ set_register(instr->rtValue(), data[instr->bit(21)]);
+ } else if ((instr->VLValue() == 0x1) &&
+ (instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x7) &&
+ (instr->bits(19, 16) == 0x1)) {
+ // vmrs
+ uint32_t rt = instr->rtValue();
+ if (rt == 0xF) {
+ copy_FPSCR_to_APSR();
+ } else {
+ // Emulate FPSCR from the Simulator flags.
+ uint32_t fpscr = (n_flag_FPSCR_ << 31) |
+ (z_flag_FPSCR_ << 30) |
+ (c_flag_FPSCR_ << 29) |
+ (v_flag_FPSCR_ << 28) |
+ (FPSCR_default_NaN_mode_ << 25) |
+ (inexact_vfp_flag_ << 4) |
+ (underflow_vfp_flag_ << 3) |
+ (overflow_vfp_flag_ << 2) |
+ (div_zero_vfp_flag_ << 1) |
+ (inv_op_vfp_flag_ << 0) |
+ (FPSCR_rounding_mode_);
+ set_register(rt, fpscr);
+ }
+ } else if ((instr->VLValue() == 0x0) &&
+ (instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x7) &&
+ (instr->bits(19, 16) == 0x1)) {
+ // vmsr
+ uint32_t rt = instr->rtValue();
+ if (rt == pc) {
+ MOZ_CRASH();
+ } else {
+ uint32_t rt_value = get_register(rt);
+ n_flag_FPSCR_ = (rt_value >> 31) & 1;
+ z_flag_FPSCR_ = (rt_value >> 30) & 1;
+ c_flag_FPSCR_ = (rt_value >> 29) & 1;
+ v_flag_FPSCR_ = (rt_value >> 28) & 1;
+ FPSCR_default_NaN_mode_ = (rt_value >> 25) & 1;
+ inexact_vfp_flag_ = (rt_value >> 4) & 1;
+ underflow_vfp_flag_ = (rt_value >> 3) & 1;
+ overflow_vfp_flag_ = (rt_value >> 2) & 1;
+ div_zero_vfp_flag_ = (rt_value >> 1) & 1;
+ inv_op_vfp_flag_ = (rt_value >> 0) & 1;
+ FPSCR_rounding_mode_ =
+ static_cast<VFPRoundingMode>((rt_value) & kVFPRoundingModeMask);
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ }
+}
+
+void
+Simulator::decodeVMOVBetweenCoreAndSinglePrecisionRegisters(SimInstruction* instr)
+{
+ MOZ_ASSERT(instr->bit(4) == 1 &&
+ instr->VCValue() == 0x0 &&
+ instr->VAValue() == 0x0);
+
+ int t = instr->rtValue();
+ int n = instr->VFPNRegValue(kSinglePrecision);
+ bool to_arm_register = (instr->VLValue() == 0x1);
+ if (to_arm_register) {
+ int32_t int_value = get_sinteger_from_s_register(n);
+ set_register(t, int_value);
+ } else {
+ int32_t rs_val = get_register(t);
+ set_s_register_from_sinteger(n, rs_val);
+ }
+}
+
+void
+Simulator::decodeVCMP(SimInstruction* instr)
+{
+ MOZ_ASSERT((instr->bit(4) == 0) && (instr->opc1Value() == 0x7));
+ MOZ_ASSERT(((instr->opc2Value() == 0x4) || (instr->opc2Value() == 0x5)) &&
+ (instr->opc3Value() & 0x1));
+ // Comparison.
+
+ VFPRegPrecision precision = kSinglePrecision;
+ if (instr->szValue() == 1)
+ precision = kDoublePrecision;
+
+ int d = instr->VFPDRegValue(precision);
+ int m = 0;
+ if (instr->opc2Value() == 0x4)
+ m = instr->VFPMRegValue(precision);
+
+ if (precision == kDoublePrecision) {
+ double dd_value;
+ get_double_from_d_register(d, &dd_value);
+ double dm_value = 0.0;
+ if (instr->opc2Value() == 0x4)
+ get_double_from_d_register(m, &dm_value);
+
+ // Raise exceptions for quiet NaNs if necessary.
+ if (instr->bit(7) == 1) {
+ if (mozilla::IsNaN(dd_value))
+ inv_op_vfp_flag_ = true;
+ }
+ compute_FPSCR_Flags(dd_value, dm_value);
+ } else {
+ float fd_value;
+ get_float_from_s_register(d, &fd_value);
+ float fm_value = 0.0;
+ if (instr->opc2Value() == 0x4)
+ get_float_from_s_register(m, &fm_value);
+
+ // Raise exceptions for quiet NaNs if necessary.
+ if (instr->bit(7) == 1) {
+ if (mozilla::IsNaN(fd_value))
+ inv_op_vfp_flag_ = true;
+ }
+ compute_FPSCR_Flags(fd_value, fm_value);
+ }
+}
+
+void
+Simulator::decodeVCVTBetweenDoubleAndSingle(SimInstruction* instr)
+{
+ MOZ_ASSERT(instr->bit(4) == 0 && instr->opc1Value() == 0x7);
+ MOZ_ASSERT(instr->opc2Value() == 0x7 && instr->opc3Value() == 0x3);
+
+ VFPRegPrecision dst_precision = kDoublePrecision;
+ VFPRegPrecision src_precision = kSinglePrecision;
+ if (instr->szValue() == 1) {
+ dst_precision = kSinglePrecision;
+ src_precision = kDoublePrecision;
+ }
+
+ int dst = instr->VFPDRegValue(dst_precision);
+ int src = instr->VFPMRegValue(src_precision);
+
+ if (dst_precision == kSinglePrecision) {
+ double val;
+ get_double_from_d_register(src, &val);
+ set_s_register_from_float(dst, static_cast<float>(val));
+ } else {
+ float val;
+ get_float_from_s_register(src, &val);
+ set_d_register_from_double(dst, static_cast<double>(val));
+ }
+}
+
+static bool
+get_inv_op_vfp_flag(VFPRoundingMode mode, double val, bool unsigned_)
+{
+ MOZ_ASSERT(mode == SimRN || mode == SimRM || mode == SimRZ);
+ double max_uint = static_cast<double>(0xffffffffu);
+ double max_int = static_cast<double>(INT32_MAX);
+ double min_int = static_cast<double>(INT32_MIN);
+
+ // Check for NaN.
+ if (val != val)
+ return true;
+
+ // Check for overflow. This code works because 32bit integers can be exactly
+ // represented by ieee-754 64bit floating-point values.
+ switch (mode) {
+ case SimRN:
+ return unsigned_ ? (val >= (max_uint + 0.5)) ||
+ (val < -0.5)
+ : (val >= (max_int + 0.5)) ||
+ (val < (min_int - 0.5));
+ case SimRM:
+ return unsigned_ ? (val >= (max_uint + 1.0)) ||
+ (val < 0)
+ : (val >= (max_int + 1.0)) ||
+ (val < min_int);
+ case SimRZ:
+ return unsigned_ ? (val >= (max_uint + 1.0)) ||
+ (val <= -1)
+ : (val >= (max_int + 1.0)) ||
+ (val <= (min_int - 1.0));
+ default:
+ MOZ_CRASH();
+ return true;
+ }
+}
+
+// We call this function only if we had a vfp invalid exception.
+// It returns the correct saturated value.
+static int
+VFPConversionSaturate(double val, bool unsigned_res)
+{
+ if (val != val) // NaN.
+ return 0;
+ if (unsigned_res)
+ return (val < 0) ? 0 : 0xffffffffu;
+ return (val < 0) ? INT32_MIN : INT32_MAX;
+}
+
+void
+Simulator::decodeVCVTBetweenFloatingPointAndInteger(SimInstruction* instr)
+{
+ MOZ_ASSERT((instr->bit(4) == 0) && (instr->opc1Value() == 0x7) &&
+ (instr->bits(27, 23) == 0x1D));
+ MOZ_ASSERT(((instr->opc2Value() == 0x8) && (instr->opc3Value() & 0x1)) ||
+ (((instr->opc2Value() >> 1) == 0x6) && (instr->opc3Value() & 0x1)));
+
+ // Conversion between floating-point and integer.
+ bool to_integer = (instr->bit(18) == 1);
+
+ VFPRegPrecision src_precision = (instr->szValue() == 1) ? kDoublePrecision : kSinglePrecision;
+
+ if (to_integer) {
+ // We are playing with code close to the C++ standard's limits below,
+ // hence the very simple code and heavy checks.
+ //
+ // Note: C++ defines default type casting from floating point to integer
+ // as (close to) rounding toward zero ("fractional part discarded").
+
+ int dst = instr->VFPDRegValue(kSinglePrecision);
+ int src = instr->VFPMRegValue(src_precision);
+
+ // Bit 7 in vcvt instructions indicates if we should use the FPSCR
+ // rounding mode or the default Round to Zero mode.
+ VFPRoundingMode mode = (instr->bit(7) != 1) ? FPSCR_rounding_mode_ : SimRZ;
+ MOZ_ASSERT(mode == SimRM || mode == SimRZ || mode == SimRN);
+
+ bool unsigned_integer = (instr->bit(16) == 0);
+ bool double_precision = (src_precision == kDoublePrecision);
+
+ double val;
+ if (double_precision) {
+ get_double_from_d_register(src, &val);
+ } else {
+ float fval;
+ get_float_from_s_register(src, &fval);
+ val = double(fval);
+ }
+
+ int temp = unsigned_integer ? static_cast<uint32_t>(val) : static_cast<int32_t>(val);
+
+ inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
+
+ double abs_diff = unsigned_integer
+ ? std::fabs(val - static_cast<uint32_t>(temp))
+ : std::fabs(val - temp);
+
+ inexact_vfp_flag_ = (abs_diff != 0);
+
+ if (inv_op_vfp_flag_) {
+ temp = VFPConversionSaturate(val, unsigned_integer);
+ } else {
+ switch (mode) {
+ case SimRN: {
+ int val_sign = (val > 0) ? 1 : -1;
+ if (abs_diff > 0.5) {
+ temp += val_sign;
+ } else if (abs_diff == 0.5) {
+ // Round to even if exactly halfway.
+ temp = ((temp % 2) == 0) ? temp : temp + val_sign;
+ }
+ break;
+ }
+
+ case SimRM:
+ temp = temp > val ? temp - 1 : temp;
+ break;
+
+ case SimRZ:
+ // Nothing to do.
+ break;
+
+ default:
+ MOZ_CRASH();
+ }
+ }
+
+ // Update the destination register.
+ set_s_register_from_sinteger(dst, temp);
+ } else {
+ bool unsigned_integer = (instr->bit(7) == 0);
+ int dst = instr->VFPDRegValue(src_precision);
+ int src = instr->VFPMRegValue(kSinglePrecision);
+
+ int val = get_sinteger_from_s_register(src);
+
+ if (src_precision == kDoublePrecision) {
+ if (unsigned_integer)
+ set_d_register_from_double(dst, static_cast<double>(static_cast<uint32_t>(val)));
+ else
+ set_d_register_from_double(dst, static_cast<double>(val));
+ } else {
+ if (unsigned_integer)
+ set_s_register_from_float(dst, static_cast<float>(static_cast<uint32_t>(val)));
+ else
+ set_s_register_from_float(dst, static_cast<float>(val));
+ }
+ }
+}
+
+// A VFPv3 specific instruction.
+void
+Simulator::decodeVCVTBetweenFloatingPointAndIntegerFrac(SimInstruction* instr)
+{
+ MOZ_ASSERT(instr->bits(27, 24) == 0xE && instr->opc1Value() == 0x7 && instr->bit(19) == 1 &&
+ instr->bit(17) == 1 && instr->bits(11,9) == 0x5 && instr->bit(6) == 1 &&
+ instr->bit(4) == 0);
+
+ int size = (instr->bit(7) == 1) ? 32 : 16;
+
+ int fraction_bits = size - ((instr->bits(3, 0) << 1) | instr->bit(5));
+ double mult = 1 << fraction_bits;
+
+ MOZ_ASSERT(size == 32); // Only handling size == 32 for now.
+
+ // Conversion between floating-point and integer.
+ bool to_fixed = (instr->bit(18) == 1);
+
+ VFPRegPrecision precision = (instr->szValue() == 1) ? kDoublePrecision : kSinglePrecision;
+
+ if (to_fixed) {
+ // We are playing with code close to the C++ standard's limits below,
+ // hence the very simple code and heavy checks.
+ //
+ // Note: C++ defines default type casting from floating point to integer
+ // as (close to) rounding toward zero ("fractional part discarded").
+
+ int dst = instr->VFPDRegValue(precision);
+
+ bool unsigned_integer = (instr->bit(16) == 1);
+ bool double_precision = (precision == kDoublePrecision);
+
+ double val;
+ if (double_precision) {
+ get_double_from_d_register(dst, &val);
+ } else {
+ float fval;
+ get_float_from_s_register(dst, &fval);
+ val = double(fval);
+ }
+
+ // Scale value by specified number of fraction bits.
+ val *= mult;
+
+ // Rounding down towards zero. No need to account for the rounding error
+ // as this instruction always rounds down towards zero. See SimRZ below.
+ int temp = unsigned_integer ? static_cast<uint32_t>(val) : static_cast<int32_t>(val);
+
+ inv_op_vfp_flag_ = get_inv_op_vfp_flag(SimRZ, val, unsigned_integer);
+
+ double abs_diff = unsigned_integer
+ ? std::fabs(val - static_cast<uint32_t>(temp))
+ : std::fabs(val - temp);
+
+ inexact_vfp_flag_ = (abs_diff != 0);
+
+ if (inv_op_vfp_flag_)
+ temp = VFPConversionSaturate(val, unsigned_integer);
+
+ // Update the destination register.
+ if (double_precision) {
+ uint32_t dbl[2];
+ dbl[0] = temp; dbl[1] = 0;
+ set_d_register(dst, dbl);
+ } else {
+ set_s_register_from_sinteger(dst, temp);
+ }
+ } else {
+ MOZ_CRASH(); // Not implemented, fixed to float.
+ }
+}
+
+void
+Simulator::decodeType6CoprocessorIns(SimInstruction* instr)
+{
+ MOZ_ASSERT(instr->typeValue() == 6);
+
+ if (instr->coprocessorValue() == 0xA) {
+ switch (instr->opcodeValue()) {
+ case 0x8:
+ case 0xA:
+ case 0xC:
+ case 0xE: { // Load and store single precision float to memory.
+ int rn = instr->rnValue();
+ int vd = instr->VFPDRegValue(kSinglePrecision);
+ int offset = instr->immed8Value();
+ if (!instr->hasU())
+ offset = -offset;
+
+ int32_t address = get_register(rn) + 4 * offset;
+ if (instr->hasL()) {
+ // Load double from memory: vldr.
+ set_s_register_from_sinteger(vd, readW(address, instr));
+ } else {
+ // Store double to memory: vstr.
+ writeW(address, get_sinteger_from_s_register(vd), instr);
+ }
+ break;
+ }
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ case 0x9:
+ case 0xB:
+ // Load/store multiple single from memory: vldm/vstm.
+ handleVList(instr);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ } else if (instr->coprocessorValue() == 0xB) {
+ switch (instr->opcodeValue()) {
+ case 0x2:
+ // Load and store double to two GP registers
+ if (instr->bits(7, 6) != 0 || instr->bit(4) != 1) {
+ MOZ_CRASH(); // Not used atm.
+ } else {
+ int rt = instr->rtValue();
+ int rn = instr->rnValue();
+ int vm = instr->VFPMRegValue(kDoublePrecision);
+ if (instr->hasL()) {
+ int32_t data[2];
+ double d;
+ get_double_from_d_register(vm, &d);
+ memcpy(data, &d, 8);
+ set_register(rt, data[0]);
+ set_register(rn, data[1]);
+ } else {
+ int32_t data[] = { get_register(rt), get_register(rn) };
+ double d;
+ memcpy(&d, data, 8);
+ set_d_register_from_double(vm, d);
+ }
+ }
+ break;
+ case 0x8:
+ case 0xA:
+ case 0xC:
+ case 0xE: { // Load and store double to memory.
+ int rn = instr->rnValue();
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+ int offset = instr->immed8Value();
+ if (!instr->hasU())
+ offset = -offset;
+ int32_t address = get_register(rn) + 4 * offset;
+ if (instr->hasL()) {
+ // Load double from memory: vldr.
+ uint64_t data = readQ(address, instr);
+ double val;
+ memcpy(&val, &data, 8);
+ set_d_register_from_double(vd, val);
+ } else {
+ // Store double to memory: vstr.
+ uint64_t data;
+ double val;
+ get_double_from_d_register(vd, &val);
+ memcpy(&data, &val, 8);
+ writeQ(address, data, instr);
+ }
+ break;
+ }
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ case 0x9:
+ case 0xB:
+ // Load/store multiple double from memory: vldm/vstm.
+ handleVList(instr);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ } else {
+ MOZ_CRASH();
+ }
+}
+
+void
+Simulator::decodeSpecialCondition(SimInstruction* instr)
+{
+ switch (instr->specialValue()) {
+ case 5:
+ if (instr->bits(18, 16) == 0 && instr->bits(11, 6) == 0x28 && instr->bit(4) == 1) {
+ // vmovl signed
+ if ((instr->vdValue() & 1) != 0)
+ MOZ_CRASH("Undefined behavior");
+ int Vd = (instr->bit(22) << 3) | (instr->vdValue() >> 1);
+ int Vm = (instr->bit(5) << 4) | instr->vmValue();
+ int imm3 = instr->bits(21, 19);
+ if (imm3 != 1 && imm3 != 2 && imm3 != 4)
+ MOZ_CRASH();
+ int esize = 8 * imm3;
+ int elements = 64 / esize;
+ int8_t from[8];
+ get_d_register(Vm, reinterpret_cast<uint64_t*>(from));
+ int16_t to[8];
+ int e = 0;
+ while (e < elements) {
+ to[e] = from[e];
+ e++;
+ }
+ set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 7:
+ if (instr->bits(18, 16) == 0 && instr->bits(11, 6) == 0x28 && instr->bit(4) == 1) {
+ // vmovl unsigned.
+ if ((instr->vdValue() & 1) != 0)
+ MOZ_CRASH("Undefined behavior");
+ int Vd = (instr->bit(22) << 3) | (instr->vdValue() >> 1);
+ int Vm = (instr->bit(5) << 4) | instr->vmValue();
+ int imm3 = instr->bits(21, 19);
+ if (imm3 != 1 && imm3 != 2 && imm3 != 4)
+ MOZ_CRASH();
+ int esize = 8 * imm3;
+ int elements = 64 / esize;
+ uint8_t from[8];
+ get_d_register(Vm, reinterpret_cast<uint64_t*>(from));
+ uint16_t to[8];
+ int e = 0;
+ while (e < elements) {
+ to[e] = from[e];
+ e++;
+ }
+ set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 8:
+ if (instr->bits(21, 20) == 0) {
+ // vst1
+ int Vd = (instr->bit(22) << 4) | instr->vdValue();
+ int Rn = instr->vnValue();
+ int type = instr->bits(11, 8);
+ int Rm = instr->vmValue();
+ int32_t address = get_register(Rn);
+ int regs = 0;
+ switch (type) {
+ case nlt_1:
+ regs = 1;
+ break;
+ case nlt_2:
+ regs = 2;
+ break;
+ case nlt_3:
+ regs = 3;
+ break;
+ case nlt_4:
+ regs = 4;
+ break;
+ default:
+ MOZ_CRASH();
+ break;
+ }
+ int r = 0;
+ while (r < regs) {
+ uint32_t data[2];
+ get_d_register(Vd + r, data);
+ // TODO: We should AllowUnaligned here only if the alignment attribute of
+ // the instruction calls for default alignment.
+ writeW(address, data[0], instr, AllowUnaligned);
+ writeW(address + 4, data[1], instr, AllowUnaligned);
+ address += 8;
+ r++;
+ }
+ if (Rm != 15) {
+ if (Rm == 13)
+ set_register(Rn, address);
+ else
+ set_register(Rn, get_register(Rn) + get_register(Rm));
+ }
+ } else if (instr->bits(21, 20) == 2) {
+ // vld1
+ int Vd = (instr->bit(22) << 4) | instr->vdValue();
+ int Rn = instr->vnValue();
+ int type = instr->bits(11, 8);
+ int Rm = instr->vmValue();
+ int32_t address = get_register(Rn);
+ int regs = 0;
+ switch (type) {
+ case nlt_1:
+ regs = 1;
+ break;
+ case nlt_2:
+ regs = 2;
+ break;
+ case nlt_3:
+ regs = 3;
+ break;
+ case nlt_4:
+ regs = 4;
+ break;
+ default:
+ MOZ_CRASH();
+ break;
+ }
+ int r = 0;
+ while (r < regs) {
+ uint32_t data[2];
+ // TODO: We should AllowUnaligned here only if the alignment attribute of
+ // the instruction calls for default alignment.
+ data[0] = readW(address, instr, AllowUnaligned);
+ data[1] = readW(address + 4, instr, AllowUnaligned);
+ set_d_register(Vd + r, data);
+ address += 8;
+ r++;
+ }
+ if (Rm != 15) {
+ if (Rm == 13)
+ set_register(Rn, address);
+ else
+ set_register(Rn, get_register(Rn) + get_register(Rm));
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 0xA:
+ if (instr->bits(31,20) == 0xf57) {
+ switch (instr->bits(7,4)) {
+ case 5: // DMB
+ AtomicOperations::fenceSeqCst();
+ break;
+ case 4: // DSB
+ // We do not use DSB.
+ MOZ_CRASH("DSB unimplemented");
+ case 6: // ISB
+ // We do not use ISB.
+ MOZ_CRASH("ISB unimplemented");
+ default:
+ MOZ_CRASH();
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 0xB:
+ if (instr->bits(22, 20) == 5 && instr->bits(15, 12) == 0xf) {
+ // pld: ignore instruction.
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 0x1C:
+ case 0x1D:
+ if (instr->bit(4) == 1 && instr->bits(11,9) != 5) {
+ // MCR, MCR2, MRC, MRC2 with cond == 15
+ decodeType7CoprocessorIns(instr);
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+// Executes the current instruction.
+void
+Simulator::instructionDecode(SimInstruction* instr)
+{
+ if (Simulator::ICacheCheckingEnabled) {
+ AutoLockSimulatorCache als(this);
+ CheckICacheLocked(icache(), instr);
+ }
+
+ pc_modified_ = false;
+
+ static const uint32_t kSpecialCondition = 15 << 28;
+ if (instr->conditionField() == kSpecialCondition) {
+ decodeSpecialCondition(instr);
+ } else if (conditionallyExecute(instr)) {
+ switch (instr->typeValue()) {
+ case 0:
+ case 1:
+ decodeType01(instr);
+ break;
+ case 2:
+ decodeType2(instr);
+ break;
+ case 3:
+ decodeType3(instr);
+ break;
+ case 4:
+ decodeType4(instr);
+ break;
+ case 5:
+ decodeType5(instr);
+ break;
+ case 6:
+ decodeType6(instr);
+ break;
+ case 7:
+ decodeType7(instr);
+ break;
+ default:
+ MOZ_CRASH();
+ break;
+ }
+ // If the instruction is a non taken conditional stop, we need to skip
+ // the inlined message address.
+ } else if (instr->isStop()) {
+ set_pc(get_pc() + 2 * SimInstruction::kInstrSize);
+ }
+ if (!pc_modified_)
+ set_register(pc, reinterpret_cast<int32_t>(instr) + SimInstruction::kInstrSize);
+}
+
+void
+Simulator::enable_single_stepping(SingleStepCallback cb, void* arg)
+{
+ single_stepping_ = true;
+ single_step_callback_ = cb;
+ single_step_callback_arg_ = arg;
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+}
+
+void
+Simulator::disable_single_stepping()
+{
+ if (!single_stepping_)
+ return;
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+}
+
+template<bool EnableStopSimAt>
+void
+Simulator::execute()
+{
+ if (single_stepping_)
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+
+ // Get the PC to simulate. Cannot use the accessor here as we need the raw
+ // PC value and not the one used as input to arithmetic instructions.
+ int program_counter = get_pc();
+
+ while (program_counter != end_sim_pc) {
+ if (EnableStopSimAt && (icount_ == Simulator::StopSimAt)) {
+ fprintf(stderr, "\nStopped simulation at icount %lld\n", icount_);
+ ArmDebugger dbg(this);
+ dbg.debug();
+ } else {
+ if (single_stepping_)
+ single_step_callback_(single_step_callback_arg_, this, (void*)program_counter);
+ SimInstruction* instr = reinterpret_cast<SimInstruction*>(program_counter);
+ instructionDecode(instr);
+ icount_++;
+
+ int32_t rpc = resume_pc_;
+ if (MOZ_UNLIKELY(rpc != 0)) {
+ // wasm signal handler ran and we have to adjust the pc.
+ JSRuntime::innermostWasmActivation()->setResumePC((void*)get_pc());
+ set_pc(rpc);
+ resume_pc_ = 0;
+ }
+ }
+ program_counter = get_pc();
+ }
+
+ if (single_stepping_)
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+}
+
+void
+Simulator::callInternal(uint8_t* entry)
+{
+ // Prepare to execute the code at entry.
+ set_register(pc, reinterpret_cast<int32_t>(entry));
+
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ set_register(lr, end_sim_pc);
+
+ // Remember the values of callee-saved registers. The code below assumes
+ // that r9 is not used as sb (static base) in simulator code and therefore
+ // is regarded as a callee-saved register.
+ int32_t r4_val = get_register(r4);
+ int32_t r5_val = get_register(r5);
+ int32_t r6_val = get_register(r6);
+ int32_t r7_val = get_register(r7);
+ int32_t r8_val = get_register(r8);
+ int32_t r9_val = get_register(r9);
+ int32_t r10_val = get_register(r10);
+ int32_t r11_val = get_register(r11);
+
+ // Remember d8 to d15 which are callee-saved.
+ uint64_t d8_val;
+ get_d_register(d8, &d8_val);
+ uint64_t d9_val;
+ get_d_register(d9, &d9_val);
+ uint64_t d10_val;
+ get_d_register(d10, &d10_val);
+ uint64_t d11_val;
+ get_d_register(d11, &d11_val);
+ uint64_t d12_val;
+ get_d_register(d12, &d12_val);
+ uint64_t d13_val;
+ get_d_register(d13, &d13_val);
+ uint64_t d14_val;
+ get_d_register(d14, &d14_val);
+ uint64_t d15_val;
+ get_d_register(d15, &d15_val);
+
+ // Set up the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ int32_t callee_saved_value = uint32_t(icount_);
+ uint64_t callee_saved_value_d = uint64_t(icount_);
+
+ if (!skipCalleeSavedRegsCheck) {
+ set_register(r4, callee_saved_value);
+ set_register(r5, callee_saved_value);
+ set_register(r6, callee_saved_value);
+ set_register(r7, callee_saved_value);
+ set_register(r8, callee_saved_value);
+ set_register(r9, callee_saved_value);
+ set_register(r10, callee_saved_value);
+ set_register(r11, callee_saved_value);
+
+ set_d_register(d8, &callee_saved_value_d);
+ set_d_register(d9, &callee_saved_value_d);
+ set_d_register(d10, &callee_saved_value_d);
+ set_d_register(d11, &callee_saved_value_d);
+ set_d_register(d12, &callee_saved_value_d);
+ set_d_register(d13, &callee_saved_value_d);
+ set_d_register(d14, &callee_saved_value_d);
+ set_d_register(d15, &callee_saved_value_d);
+
+ }
+ // Start the simulation.
+ if (Simulator::StopSimAt != -1L)
+ execute<true>();
+ else
+ execute<false>();
+
+ if (!skipCalleeSavedRegsCheck) {
+ // Check that the callee-saved registers have been preserved.
+ MOZ_ASSERT(callee_saved_value == get_register(r4));
+ MOZ_ASSERT(callee_saved_value == get_register(r5));
+ MOZ_ASSERT(callee_saved_value == get_register(r6));
+ MOZ_ASSERT(callee_saved_value == get_register(r7));
+ MOZ_ASSERT(callee_saved_value == get_register(r8));
+ MOZ_ASSERT(callee_saved_value == get_register(r9));
+ MOZ_ASSERT(callee_saved_value == get_register(r10));
+ MOZ_ASSERT(callee_saved_value == get_register(r11));
+
+ uint64_t value;
+ get_d_register(d8, &value);
+ MOZ_ASSERT(callee_saved_value_d == value);
+ get_d_register(d9, &value);
+ MOZ_ASSERT(callee_saved_value_d == value);
+ get_d_register(d10, &value);
+ MOZ_ASSERT(callee_saved_value_d == value);
+ get_d_register(d11, &value);
+ MOZ_ASSERT(callee_saved_value_d == value);
+ get_d_register(d12, &value);
+ MOZ_ASSERT(callee_saved_value_d == value);
+ get_d_register(d13, &value);
+ MOZ_ASSERT(callee_saved_value_d == value);
+ get_d_register(d14, &value);
+ MOZ_ASSERT(callee_saved_value_d == value);
+ get_d_register(d15, &value);
+ MOZ_ASSERT(callee_saved_value_d == value);
+
+ // Restore callee-saved registers with the original value.
+ set_register(r4, r4_val);
+ set_register(r5, r5_val);
+ set_register(r6, r6_val);
+ set_register(r7, r7_val);
+ set_register(r8, r8_val);
+ set_register(r9, r9_val);
+ set_register(r10, r10_val);
+ set_register(r11, r11_val);
+
+ set_d_register(d8, &d8_val);
+ set_d_register(d9, &d9_val);
+ set_d_register(d10, &d10_val);
+ set_d_register(d11, &d11_val);
+ set_d_register(d12, &d12_val);
+ set_d_register(d13, &d13_val);
+ set_d_register(d14, &d14_val);
+ set_d_register(d15, &d15_val);
+ }
+}
+
+int32_t
+Simulator::call(uint8_t* entry, int argument_count, ...)
+{
+ va_list parameters;
+ va_start(parameters, argument_count);
+
+ // First four arguments passed in registers.
+ MOZ_ASSERT(argument_count >= 1);
+ set_register(r0, va_arg(parameters, int32_t));
+ if (argument_count >= 2)
+ set_register(r1, va_arg(parameters, int32_t));
+ if (argument_count >= 3)
+ set_register(r2, va_arg(parameters, int32_t));
+ if (argument_count >= 4)
+ set_register(r3, va_arg(parameters, int32_t));
+
+ // Remaining arguments passed on stack.
+ int original_stack = get_register(sp);
+ int entry_stack = original_stack;
+ if (argument_count >= 4)
+ entry_stack -= (argument_count - 4) * sizeof(int32_t);
+
+ entry_stack &= ~ABIStackAlignment;
+
+ // Store remaining arguments on stack, from low to high memory.
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+ for (int i = 4; i < argument_count; i++)
+ stack_argument[i - 4] = va_arg(parameters, int32_t);
+ va_end(parameters);
+ set_register(sp, entry_stack);
+
+ callInternal(entry);
+
+ // Pop stack passed arguments.
+ MOZ_ASSERT(entry_stack == get_register(sp));
+ set_register(sp, original_stack);
+
+ int32_t result = get_register(r0);
+ return result;
+}
+
+Simulator*
+Simulator::Current()
+{
+ return TlsPerThreadData.get()->simulator();
+}
+
+} // namespace jit
+} // namespace js
+
+js::jit::Simulator*
+JSRuntime::simulator() const
+{
+ return simulator_;
+}
+
+uintptr_t*
+JSRuntime::addressOfSimulatorStackLimit()
+{
+ return simulator_->addressOfStackLimit();
+}
+
+js::jit::Simulator*
+js::PerThreadData::simulator() const
+{
+ return runtime_->simulator();
+}
diff --git a/js/src/jit/arm/Simulator-arm.h b/js/src/jit/arm/Simulator-arm.h
new file mode 100644
index 000000000..968f460fb
--- /dev/null
+++ b/js/src/jit/arm/Simulator-arm.h
@@ -0,0 +1,519 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef jit_arm_Simulator_arm_h
+#define jit_arm_Simulator_arm_h
+
+#ifdef JS_SIMULATOR_ARM
+
+#include "jit/arm/Architecture-arm.h"
+#include "jit/arm/disasm/Disasm-arm.h"
+#include "jit/IonTypes.h"
+#include "threading/Thread.h"
+#include "vm/MutexIDs.h"
+
+namespace js {
+namespace jit {
+
+class Simulator;
+class Redirection;
+class CachePage;
+class AutoLockSimulator;
+
+// When the SingleStepCallback is called, the simulator is about to execute
+// sim->get_pc() and the current machine state represents the completed
+// execution of the previous pc.
+typedef void (*SingleStepCallback)(void* arg, Simulator* sim, void* pc);
+
+// VFP rounding modes. See ARM DDI 0406B Page A2-29.
+enum VFPRoundingMode {
+ SimRN = 0 << 22, // Round to Nearest.
+ SimRP = 1 << 22, // Round towards Plus Infinity.
+ SimRM = 2 << 22, // Round towards Minus Infinity.
+ SimRZ = 3 << 22, // Round towards zero.
+
+ // Aliases.
+ kRoundToNearest = SimRN,
+ kRoundToPlusInf = SimRP,
+ kRoundToMinusInf = SimRM,
+ kRoundToZero = SimRZ
+};
+
+const uint32_t kVFPRoundingModeMask = 3 << 22;
+
+typedef int32_t Instr;
+class SimInstruction;
+
+class Simulator
+{
+ friend class Redirection;
+ friend class AutoLockSimulatorCache;
+
+ public:
+ friend class ArmDebugger;
+ enum Register {
+ no_reg = -1,
+ r0 = 0, r1, r2, r3, r4, r5, r6, r7,
+ r8, r9, r10, r11, r12, r13, r14, r15,
+ num_registers,
+ sp = 13,
+ lr = 14,
+ pc = 15,
+ s0 = 0, s1, s2, s3, s4, s5, s6, s7,
+ s8, s9, s10, s11, s12, s13, s14, s15,
+ s16, s17, s18, s19, s20, s21, s22, s23,
+ s24, s25, s26, s27, s28, s29, s30, s31,
+ num_s_registers = 32,
+ d0 = 0, d1, d2, d3, d4, d5, d6, d7,
+ d8, d9, d10, d11, d12, d13, d14, d15,
+ d16, d17, d18, d19, d20, d21, d22, d23,
+ d24, d25, d26, d27, d28, d29, d30, d31,
+ num_d_registers = 32,
+ q0 = 0, q1, q2, q3, q4, q5, q6, q7,
+ q8, q9, q10, q11, q12, q13, q14, q15,
+ num_q_registers = 16
+ };
+
+ // Returns nullptr on OOM.
+ static Simulator* Create(JSContext* cx);
+
+ static void Destroy(Simulator* simulator);
+
+ // Constructor/destructor are for internal use only; use the static methods above.
+ explicit Simulator(JSContext* cx);
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ static Simulator* Current();
+
+ static inline uintptr_t StackLimit() {
+ return Simulator::Current()->stackLimit();
+ }
+
+ // Disassemble some instructions starting at instr and print them
+ // on stdout. Useful for working within GDB after a MOZ_CRASH(),
+ // among other things.
+ //
+ // Typical use within a crashed instruction decoding method is simply:
+ //
+ // call Simulator::disassemble(instr, 1)
+ //
+ // or use one of the more convenient inline methods below.
+ static void disassemble(SimInstruction* instr, size_t n);
+
+ // Disassemble one instruction.
+ // "call disasm(instr)"
+ void disasm(SimInstruction* instr);
+
+ // Disassemble n instructions starting at instr.
+ // "call disasm(instr, 3)"
+ void disasm(SimInstruction* instr, size_t n);
+
+ // Skip backwards m instructions before starting, then disassemble n instructions.
+ // "call disasm(instr, 3, 7)"
+ void disasm(SimInstruction* instr, size_t m, size_t n);
+
+ uintptr_t* addressOfStackLimit();
+
+ // Accessors for register state. Reading the pc value adheres to the ARM
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void set_register(int reg, int32_t value);
+ int32_t get_register(int reg) const;
+ double get_double_from_register_pair(int reg);
+ void set_register_pair_from_double(int reg, double* value);
+ void set_dw_register(int dreg, const int* dbl);
+
+ // Support for VFP.
+ void get_d_register(int dreg, uint64_t* value);
+ void set_d_register(int dreg, const uint64_t* value);
+ void get_d_register(int dreg, uint32_t* value);
+ void set_d_register(int dreg, const uint32_t* value);
+ void get_q_register(int qreg, uint64_t* value);
+ void set_q_register(int qreg, const uint64_t* value);
+ void get_q_register(int qreg, uint32_t* value);
+ void set_q_register(int qreg, const uint32_t* value);
+ void set_s_register(int reg, unsigned int value);
+ unsigned int get_s_register(int reg) const;
+
+ void set_d_register_from_double(int dreg, const double& dbl) {
+ setVFPRegister<double, 2>(dreg, dbl);
+ }
+ void get_double_from_d_register(int dreg, double* out) {
+ getFromVFPRegister<double, 2>(dreg, out);
+ }
+ void set_s_register_from_float(int sreg, const float flt) {
+ setVFPRegister<float, 1>(sreg, flt);
+ }
+ void get_float_from_s_register(int sreg, float* out) {
+ getFromVFPRegister<float, 1>(sreg, out);
+ }
+ void set_s_register_from_sinteger(int sreg, const int sint) {
+ setVFPRegister<int, 1>(sreg, sint);
+ }
+ int get_sinteger_from_s_register(int sreg) {
+ int ret;
+ getFromVFPRegister<int, 1>(sreg, &ret);
+ return ret;
+ }
+
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(int32_t value);
+ int32_t get_pc() const;
+
+ template <typename T>
+ T get_pc_as() const { return reinterpret_cast<T>(get_pc()); }
+
+ void set_resume_pc(void* value) {
+ resume_pc_ = int32_t(value);
+ }
+
+ void enable_single_stepping(SingleStepCallback cb, void* arg);
+ void disable_single_stepping();
+
+ uintptr_t stackLimit() const;
+ bool overRecursed(uintptr_t newsp = 0) const;
+ bool overRecursedWithExtra(uint32_t extra) const;
+
+ // Executes ARM instructions until the PC reaches end_sim_pc.
+ template<bool EnableStopSimAt>
+ void execute();
+
+ // Sets up the simulator state and grabs the result on return.
+ int32_t call(uint8_t* entry, int argument_count, ...);
+
+ // Debugger input.
+ void setLastDebuggerInput(char* input);
+ char* lastDebuggerInput() { return lastDebuggerInput_; }
+
+ // Returns true if pc register contains one of the 'special_values' defined
+ // below (bad_lr, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum special_values {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_lr = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the lr is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2
+ };
+
+ // ForbidUnaligned means "always fault on unaligned access".
+ //
+ // AllowUnaligned means "allow the unaligned access if other conditions are
+ // met". The "other conditions" vary with the instruction: For all
+ // instructions the base condition is !HasAlignmentFault(), ie, the chip is
+ // configured to allow unaligned accesses. For instructions like VLD1
+ // there is an additional constraint that the alignment attribute in the
+ // instruction must be set to "default alignment".
+
+ enum UnalignedPolicy {
+ ForbidUnaligned,
+ AllowUnaligned
+ };
+
+ bool init();
+
+ // Checks if the current instruction should be executed based on its
+ // condition bits.
+ inline bool conditionallyExecute(SimInstruction* instr);
+
+ // Helper functions to set the conditional flags in the architecture state.
+ void setNZFlags(int32_t val);
+ void setCFlag(bool val);
+ void setVFlag(bool val);
+ bool carryFrom(int32_t left, int32_t right, int32_t carry = 0);
+ bool borrowFrom(int32_t left, int32_t right);
+ bool overflowFrom(int32_t alu_out, int32_t left, int32_t right, bool addition);
+
+ inline int getCarry() { return c_flag_ ? 1 : 0; };
+
+ // Support for VFP.
+ void compute_FPSCR_Flags(double val1, double val2);
+ void copy_FPSCR_to_APSR();
+ inline void canonicalizeNaN(double* value);
+ inline void canonicalizeNaN(float* value);
+
+ // Helper functions to decode common "addressing" modes
+ int32_t getShiftRm(SimInstruction* instr, bool* carry_out);
+ int32_t getImm(SimInstruction* instr, bool* carry_out);
+ int32_t processPU(SimInstruction* instr, int num_regs, int operand_size,
+ intptr_t* start_address, intptr_t* end_address);
+ void handleRList(SimInstruction* instr, bool load);
+ void handleVList(SimInstruction* inst);
+ void softwareInterrupt(SimInstruction* instr);
+
+ // Stop helper functions.
+ inline bool isStopInstruction(SimInstruction* instr);
+ inline bool isWatchedStop(uint32_t bkpt_code);
+ inline bool isEnabledStop(uint32_t bkpt_code);
+ inline void enableStop(uint32_t bkpt_code);
+ inline void disableStop(uint32_t bkpt_code);
+ inline void increaseStopCounter(uint32_t bkpt_code);
+ void printStopInfo(uint32_t code);
+
+ // Handle any wasm faults, returning true if the fault was handled.
+ inline bool handleWasmFault(int32_t addr, unsigned numBytes);
+
+ // Read and write memory.
+ inline uint8_t readBU(int32_t addr);
+ inline int8_t readB(int32_t addr);
+ inline void writeB(int32_t addr, uint8_t value);
+ inline void writeB(int32_t addr, int8_t value);
+
+ inline uint8_t readExBU(int32_t addr);
+ inline int32_t writeExB(int32_t addr, uint8_t value);
+
+ inline uint16_t readHU(int32_t addr, SimInstruction* instr);
+ inline int16_t readH(int32_t addr, SimInstruction* instr);
+ // Note: Overloaded on the sign of the value.
+ inline void writeH(int32_t addr, uint16_t value, SimInstruction* instr);
+ inline void writeH(int32_t addr, int16_t value, SimInstruction* instr);
+
+ inline uint16_t readExHU(int32_t addr, SimInstruction* instr);
+ inline int32_t writeExH(int32_t addr, uint16_t value, SimInstruction* instr);
+
+ inline int readW(int32_t addr, SimInstruction* instr, UnalignedPolicy f = ForbidUnaligned);
+ inline void writeW(int32_t addr, int value, SimInstruction* instr, UnalignedPolicy f = ForbidUnaligned);
+
+ inline uint64_t readQ(int32_t addr, SimInstruction* instr, UnalignedPolicy f = ForbidUnaligned);
+ inline void writeQ(int32_t addr, uint64_t value, SimInstruction* instr, UnalignedPolicy f = ForbidUnaligned);
+
+ inline int readExW(int32_t addr, SimInstruction* instr);
+ inline int writeExW(int32_t addr, int value, SimInstruction* instr);
+
+ int32_t* readDW(int32_t addr);
+ void writeDW(int32_t addr, int32_t value1, int32_t value2);
+
+ int32_t readExDW(int32_t addr, int32_t* hibits);
+ int32_t writeExDW(int32_t addr, int32_t value1, int32_t value2);
+
+ // Executing is handled based on the instruction type.
+ // Both type 0 and type 1 rolled into one.
+ void decodeType01(SimInstruction* instr);
+ void decodeType2(SimInstruction* instr);
+ void decodeType3(SimInstruction* instr);
+ void decodeType4(SimInstruction* instr);
+ void decodeType5(SimInstruction* instr);
+ void decodeType6(SimInstruction* instr);
+ void decodeType7(SimInstruction* instr);
+
+ // Support for VFP.
+ void decodeTypeVFP(SimInstruction* instr);
+ void decodeType6CoprocessorIns(SimInstruction* instr);
+ void decodeSpecialCondition(SimInstruction* instr);
+
+ void decodeVMOVBetweenCoreAndSinglePrecisionRegisters(SimInstruction* instr);
+ void decodeVCMP(SimInstruction* instr);
+ void decodeVCVTBetweenDoubleAndSingle(SimInstruction* instr);
+ void decodeVCVTBetweenFloatingPointAndInteger(SimInstruction* instr);
+ void decodeVCVTBetweenFloatingPointAndIntegerFrac(SimInstruction* instr);
+
+ // Support for some system functions.
+ void decodeType7CoprocessorIns(SimInstruction* instr);
+
+ // Executes one instruction.
+ void instructionDecode(SimInstruction* instr);
+
+ public:
+ static bool ICacheCheckingEnabled;
+ static void FlushICache(void* start, size_t size);
+
+ static int64_t StopSimAt;
+
+ // For testing the MoveResolver code, a MoveResolver is set up, and
+ // the VFP registers are loaded with pre-determined values,
+ // then the sequence of code is simulated. In order to test this with the
+ // simulator, the callee-saved registers can't be trashed. This flag
+ // disables that feature.
+ bool skipCalleeSavedRegsCheck;
+
+ // Runtime call support.
+ static void* RedirectNativeFunction(void* nativeFunction, ABIFunctionType type);
+
+ private:
+ // Handle arguments and return value for runtime FP functions.
+ void getFpArgs(double* x, double* y, int32_t* z);
+ void getFpFromStack(int32_t* stack, double* x1);
+ void setCallResultDouble(double result);
+ void setCallResultFloat(float result);
+ void setCallResult(int64_t res);
+ void scratchVolatileRegisters(bool scratchFloat = true);
+
+ template<class ReturnType, int register_size>
+ void getFromVFPRegister(int reg_index, ReturnType* out);
+
+ template<class InputType, int register_size>
+ void setVFPRegister(int reg_index, const InputType& value);
+
+ void callInternal(uint8_t* entry);
+
+ JSContext* const cx_;
+
+ // Architecture state.
+ // Saturating instructions require a Q flag to indicate saturation.
+ // There is currently no way to read the CPSR directly, and thus read the Q
+ // flag, so this is left unimplemented.
+ int32_t registers_[16];
+ bool n_flag_;
+ bool z_flag_;
+ bool c_flag_;
+ bool v_flag_;
+
+ // VFP architecture state.
+ uint32_t vfp_registers_[num_d_registers * 2];
+ bool n_flag_FPSCR_;
+ bool z_flag_FPSCR_;
+ bool c_flag_FPSCR_;
+ bool v_flag_FPSCR_;
+
+ // VFP rounding mode. See ARM DDI 0406B Page A2-29.
+ VFPRoundingMode FPSCR_rounding_mode_;
+ bool FPSCR_default_NaN_mode_;
+
+ // VFP FP exception flags architecture state.
+ bool inv_op_vfp_flag_;
+ bool div_zero_vfp_flag_;
+ bool overflow_vfp_flag_;
+ bool underflow_vfp_flag_;
+ bool inexact_vfp_flag_;
+
+ // Simulator support.
+ char* stack_;
+ uintptr_t stackLimit_;
+ bool pc_modified_;
+ int64_t icount_;
+
+ int32_t resume_pc_;
+
+ // Debugger input.
+ char* lastDebuggerInput_;
+
+ // Registered breakpoints.
+ SimInstruction* break_pc_;
+ Instr break_instr_;
+
+ // Single-stepping support
+ bool single_stepping_;
+ SingleStepCallback single_step_callback_;
+ void* single_step_callback_arg_;
+
+ // A stop is watched if its code is less than kNumOfWatchedStops.
+ // Only watched stops support enabling/disabling and the counter feature.
+ static const uint32_t kNumOfWatchedStops = 256;
+
+ // Breakpoint is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1 << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watched_stops_[code].count is unset.
+ // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count;
+ char* desc;
+ };
+ StopCountAndDesc watched_stops_[kNumOfWatchedStops];
+
+ public:
+ int64_t icount() {
+ return icount_;
+ }
+
+ private:
+ // ICache checking.
+ struct ICacheHasher {
+ typedef void* Key;
+ typedef void* Lookup;
+ static HashNumber hash(const Lookup& l);
+ static bool match(const Key& k, const Lookup& l);
+ };
+
+ public:
+ typedef HashMap<void*, CachePage*, ICacheHasher, SystemAllocPolicy> ICacheMap;
+
+ private:
+ // This lock creates a critical section around 'redirection_' and
+ // 'icache_', which are referenced both by the execution engine
+ // and by the off-thread compiler (see Redirection::Get in the cpp file).
+ Mutex cacheLock_;
+#ifdef DEBUG
+ mozilla::Maybe<Thread::Id> cacheLockHolder_;
+#endif
+
+ Redirection* redirection_;
+ ICacheMap icache_;
+
+ public:
+ ICacheMap& icache() {
+ // Technically we need the lock to access the innards of the
+ // icache, not to take its address, but the latter condition
+ // serves as a useful complement to the former.
+ MOZ_ASSERT(cacheLockHolder_.isSome());
+ return icache_;
+ }
+
+ Redirection* redirection() const {
+ MOZ_ASSERT(cacheLockHolder_.isSome());
+ return redirection_;
+ }
+
+ void setRedirection(js::jit::Redirection* redirection) {
+ MOZ_ASSERT(cacheLockHolder_.isSome());
+ redirection_ = redirection;
+ }
+
+ private:
+ // Exclusive access monitor
+ void exclusiveMonitorSet(uint64_t value);
+ uint64_t exclusiveMonitorGetAndClear(bool* held);
+ void exclusiveMonitorClear();
+
+ bool exclusiveMonitorHeld_;
+ uint64_t exclusiveMonitor_;
+};
+
+#define JS_CHECK_SIMULATOR_RECURSION_WITH_EXTRA(cx, extra, onerror) \
+ JS_BEGIN_MACRO \
+ if (cx->runtime()->simulator()->overRecursedWithExtra(extra)) { \
+ js::ReportOverRecursed(cx); \
+ onerror; \
+ } \
+ JS_END_MACRO
+
+} // namespace jit
+} // namespace js
+
+#endif /* JS_SIMULATOR_ARM */
+
+#endif /* jit_arm_Simulator_arm_h */
diff --git a/js/src/jit/arm/Trampoline-arm.cpp b/js/src/jit/arm/Trampoline-arm.cpp
new file mode 100644
index 000000000..44144763c
--- /dev/null
+++ b/js/src/jit/arm/Trampoline-arm.cpp
@@ -0,0 +1,1442 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jscompartment.h"
+
+#include "jit/arm/SharedICHelpers-arm.h"
+#include "jit/Bailouts.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/JitSpewer.h"
+#include "jit/Linker.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/VMFunctions.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+static const FloatRegisterSet NonVolatileFloatRegs =
+ FloatRegisterSet((1ULL << FloatRegisters::d8) |
+ (1ULL << FloatRegisters::d9) |
+ (1ULL << FloatRegisters::d10) |
+ (1ULL << FloatRegisters::d11) |
+ (1ULL << FloatRegisters::d12) |
+ (1ULL << FloatRegisters::d13) |
+ (1ULL << FloatRegisters::d14) |
+ (1ULL << FloatRegisters::d15));
+
+static void
+GenerateReturn(MacroAssembler& masm, int returnCode, SPSProfiler* prof)
+{
+ // Restore non-volatile floating point registers.
+ masm.transferMultipleByRuns(NonVolatileFloatRegs, IsLoad, StackPointer, IA);
+
+ // Get rid of padding word.
+ masm.addPtr(Imm32(sizeof(void*)), sp);
+
+ // Set up return value
+ masm.ma_mov(Imm32(returnCode), r0);
+
+ // Pop and return
+ masm.startDataTransferM(IsLoad, sp, IA, WriteBack);
+ masm.transferReg(r4);
+ masm.transferReg(r5);
+ masm.transferReg(r6);
+ masm.transferReg(r7);
+ masm.transferReg(r8);
+ masm.transferReg(r9);
+ masm.transferReg(r10);
+ masm.transferReg(r11);
+ // r12 isn't saved, so it shouldn't be restored.
+ masm.transferReg(pc);
+ masm.finishDataTransfer();
+ masm.flushBuffer();
+}
+
+struct EnterJITStack
+{
+ double d8;
+ double d9;
+ double d10;
+ double d11;
+ double d12;
+ double d13;
+ double d14;
+ double d15;
+
+ // Padding.
+ void* padding;
+
+ // Non-volatile registers.
+ void* r4;
+ void* r5;
+ void* r6;
+ void* r7;
+ void* r8;
+ void* r9;
+ void* r10;
+ void* r11;
+ // The abi does not expect r12 (ip) to be preserved
+ void* lr;
+
+ // Arguments.
+ // code == r0
+ // argc == r1
+ // argv == r2
+ // frame == r3
+ CalleeToken token;
+ JSObject* scopeChain;
+ size_t numStackValues;
+ Value* vp;
+};
+
+/*
+ * This method generates a trampoline for a c++ function with the following
+ * signature:
+ * void enter(void* code, int argc, Value* argv, InterpreterFrame* fp, CalleeToken
+ * calleeToken, JSObject* scopeChain, Value* vp)
+ * ...using standard EABI calling convention
+ */
+JitCode*
+JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
+{
+ const Address slot_token(sp, offsetof(EnterJITStack, token));
+ const Address slot_vp(sp, offsetof(EnterJITStack, vp));
+
+ MOZ_ASSERT(OsrFrameReg == r3);
+
+ MacroAssembler masm(cx);
+ Assembler* aasm = &masm;
+
+ // Save non-volatile registers. These must be saved by the trampoline,
+ // rather than the JIT'd code, because they are scanned by the conservative
+ // scanner.
+ masm.startDataTransferM(IsStore, sp, DB, WriteBack);
+ masm.transferReg(r4); // [sp,0]
+ masm.transferReg(r5); // [sp,4]
+ masm.transferReg(r6); // [sp,8]
+ masm.transferReg(r7); // [sp,12]
+ masm.transferReg(r8); // [sp,16]
+ masm.transferReg(r9); // [sp,20]
+ masm.transferReg(r10); // [sp,24]
+ masm.transferReg(r11); // [sp,28]
+ // The abi does not expect r12 (ip) to be preserved
+ masm.transferReg(lr); // [sp,32]
+ // The 5th argument is located at [sp, 36]
+ masm.finishDataTransfer();
+
+ // Add padding word.
+ masm.subPtr(Imm32(sizeof(void*)), sp);
+
+ // Push the float registers.
+ masm.transferMultipleByRuns(NonVolatileFloatRegs, IsStore, sp, DB);
+
+ // Save stack pointer into r8
+ masm.movePtr(sp, r8);
+
+ // Load calleeToken into r9.
+ masm.loadPtr(slot_token, r9);
+
+ // Save stack pointer.
+ if (type == EnterJitBaseline)
+ masm.movePtr(sp, r11);
+
+ // Load the number of actual arguments into r10.
+ masm.loadPtr(slot_vp, r10);
+ masm.unboxInt32(Address(r10, 0), r10);
+
+ {
+ Label noNewTarget;
+ masm.branchTest32(Assembler::Zero, r9, Imm32(CalleeToken_FunctionConstructing),
+ &noNewTarget);
+
+ masm.add32(Imm32(1), r1);
+
+ masm.bind(&noNewTarget);
+ }
+
+ // Guarantee stack alignment of Jit frames.
+ //
+ // This code moves the stack pointer to the location where it should be when
+ // we enter the Jit frame. It moves the stack pointer such that we have
+ // enough space reserved for pushing the arguments, and the JitFrameLayout.
+ // The stack pointer is also aligned on the alignment expected by the Jit
+ // frames.
+ //
+ // At the end the register r4, is a pointer to the stack where the first
+ // argument is expected by the Jit frame.
+ //
+ aasm->as_sub(r4, sp, O2RegImmShift(r1, LSL, 3)); // r4 = sp - argc*8
+ aasm->as_bic(r4, r4, Imm8(JitStackAlignment - 1));
+ // r4 is now the aligned on the bottom of the list of arguments.
+ static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+ // sp' = ~(JitStackAlignment - 1) & (sp - argc * sizeof(Value)) - sizeof(JitFrameLayout)
+ aasm->as_sub(sp, r4, Imm8(sizeof(JitFrameLayout)));
+
+ // Get a copy of the number of args to use as a decrement counter, also set
+ // the zero condition code.
+ aasm->as_mov(r5, O2Reg(r1), SetCC);
+
+ // Loop over arguments, copying them from an unknown buffer onto the Ion
+ // stack so they can be accessed from JIT'ed code.
+ {
+ Label header, footer;
+ // If there aren't any arguments, don't do anything.
+ aasm->as_b(&footer, Assembler::Zero);
+ // Get the top of the loop.
+ masm.bind(&header);
+ aasm->as_sub(r5, r5, Imm8(1), SetCC);
+ // We could be more awesome, and unroll this, using a loadm
+ // (particularly since the offset is effectively 0) but that seems more
+ // error prone, and complex.
+ // BIG FAT WARNING: this loads both r6 and r7.
+ aasm->as_extdtr(IsLoad, 64, true, PostIndex, r6, EDtrAddr(r2, EDtrOffImm(8)));
+ aasm->as_extdtr(IsStore, 64, true, PostIndex, r6, EDtrAddr(r4, EDtrOffImm(8)));
+ aasm->as_b(&header, Assembler::NonZero);
+ masm.bind(&footer);
+ }
+
+ masm.ma_sub(r8, sp, r8);
+ masm.makeFrameDescriptor(r8, JitFrame_Entry, JitFrameLayout::Size());
+
+ masm.startDataTransferM(IsStore, sp, IB, NoWriteBack);
+ // [sp] = return address (written later)
+ masm.transferReg(r8); // [sp',4] = descriptor, argc*8+20
+ masm.transferReg(r9); // [sp',8] = callee token
+ masm.transferReg(r10); // [sp',12] = actual arguments
+ masm.finishDataTransfer();
+
+ Label returnLabel;
+ if (type == EnterJitBaseline) {
+ // Handle OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(JSReturnOperand);
+ regs.takeUnchecked(OsrFrameReg);
+ regs.take(r11);
+ regs.take(ReturnReg);
+
+ const Address slot_numStackValues(r11, offsetof(EnterJITStack, numStackValues));
+
+ Label notOsr;
+ masm.branchTestPtr(Assembler::Zero, OsrFrameReg, OsrFrameReg, &notOsr);
+
+ Register scratch = regs.takeAny();
+
+ Register numStackValues = regs.takeAny();
+ masm.load32(slot_numStackValues, numStackValues);
+
+ // Write return address. On ARM, CodeLabel is only used for tableswitch,
+ // so we can't use it here to get the return address. Instead, we use pc
+ // + a fixed offset to a jump to returnLabel. The pc register holds pc +
+ // 8, so we add the size of 2 instructions to skip the instructions
+ // emitted by storePtr and jump(&skipJump).
+ {
+ AutoForbidPools afp(&masm, 5);
+ Label skipJump;
+ masm.mov(pc, scratch);
+ masm.addPtr(Imm32(2 * sizeof(uint32_t)), scratch);
+ masm.storePtr(scratch, Address(sp, 0));
+ masm.jump(&skipJump);
+ masm.jump(&returnLabel);
+ masm.bind(&skipJump);
+ }
+
+ // Push previous frame pointer.
+ masm.push(r11);
+
+ // Reserve frame.
+ Register framePtr = r11;
+ masm.subPtr(Imm32(BaselineFrame::Size()), sp);
+ masm.mov(sp, framePtr);
+
+#ifdef XP_WIN
+ // Can't push large frames blindly on windows. Touch frame memory
+ // incrementally.
+ masm.ma_lsl(Imm32(3), numStackValues, scratch);
+ masm.subPtr(scratch, framePtr);
+ {
+ masm.ma_sub(sp, Imm32(WINDOWS_BIG_FRAME_TOUCH_INCREMENT), scratch);
+
+ Label touchFrameLoop;
+ Label touchFrameLoopEnd;
+ masm.bind(&touchFrameLoop);
+ masm.branchPtr(Assembler::Below, scratch, framePtr, &touchFrameLoopEnd);
+ masm.store32(Imm32(0), Address(scratch, 0));
+ masm.subPtr(Imm32(WINDOWS_BIG_FRAME_TOUCH_INCREMENT), scratch);
+ masm.jump(&touchFrameLoop);
+ masm.bind(&touchFrameLoopEnd);
+ }
+ masm.mov(sp, framePtr);
+#endif
+
+ // Reserve space for locals and stack values.
+ masm.ma_lsl(Imm32(3), numStackValues, scratch);
+ masm.ma_sub(sp, scratch, sp);
+
+ // Enter exit frame.
+ masm.addPtr(Imm32(BaselineFrame::Size() + BaselineFrame::FramePointerOffset), scratch);
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, ExitFrameLayout::Size());
+ masm.push(scratch);
+ masm.push(Imm32(0)); // Fake return address.
+ // No GC things to mark on the stack, push a bare token.
+ masm.enterFakeExitFrame(ExitFrameLayoutBareToken);
+
+ masm.push(framePtr); // BaselineFrame
+ masm.push(r0); // jitcode
+
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(r11); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, jit::InitBaselineFrameForOsr));
+
+ Register jitcode = regs.takeAny();
+ masm.pop(jitcode);
+ masm.pop(framePtr);
+
+ MOZ_ASSERT(jitcode != ReturnReg);
+
+ Label error;
+ masm.addPtr(Imm32(ExitFrameLayout::SizeWithFooter()), sp);
+ masm.addPtr(Imm32(BaselineFrame::Size()), framePtr);
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ Register realFramePtr = numStackValues;
+ AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.as_add(realFramePtr, framePtr, Imm8(sizeof(void*)));
+ masm.profilerEnterFrame(realFramePtr, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(jitcode);
+
+ // OOM: Load error value, discard return address and previous frame
+ // pointer and return.
+ masm.bind(&error);
+ masm.mov(framePtr, sp);
+ masm.addPtr(Imm32(2 * sizeof(uintptr_t)), sp);
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.jump(&returnLabel);
+
+ masm.bind(&notOsr);
+ // Load the scope chain in R1.
+ MOZ_ASSERT(R1.scratchReg() != r0);
+ masm.loadPtr(Address(r11, offsetof(EnterJITStack, scopeChain)), R1.scratchReg());
+ }
+
+ // The Data transfer is pushing 4 words, which already account for the
+ // return address space of the Jit frame. We have to undo what the data
+ // transfer did before making the call.
+ masm.addPtr(Imm32(sizeof(uintptr_t)), sp);
+
+ // The callee will push the return address on the stack, thus we check that
+ // the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, sizeof(uintptr_t));
+
+ // Call the function.
+ masm.callJitNoProfiler(r0);
+
+ if (type == EnterJitBaseline) {
+ // Baseline OSR will return here.
+ masm.bind(&returnLabel);
+ }
+
+ // The top of the stack now points to the address of the field following the
+ // return address because the return address is popped for the return, so we
+ // need to remove the size of the return address field.
+ aasm->as_sub(sp, sp, Imm8(4));
+
+ // Load off of the stack the size of our local stack.
+ masm.loadPtr(Address(sp, JitFrameLayout::offsetOfDescriptor()), r5);
+ aasm->as_add(sp, sp, lsr(r5, FRAMESIZE_SHIFT));
+
+ // Store the returned value into the slot_vp
+ masm.loadPtr(slot_vp, r5);
+ masm.storeValue(JSReturnOperand, Address(r5, 0));
+
+ // :TODO: Optimize storeValue with:
+ // We're using a load-double here. In order for that to work, the data needs
+ // to be stored in two consecutive registers, make sure this is the case
+ // MOZ_ASSERT(JSReturnReg_Type.code() == JSReturnReg_Data.code()+1);
+ // aasm->as_extdtr(IsStore, 64, true, Offset,
+ // JSReturnReg_Data, EDtrAddr(r5, EDtrOffImm(0)));
+
+ // Restore non-volatile registers and return.
+ GenerateReturn(masm, true, &cx->runtime()->spsProfiler);
+
+ Linker linker(masm);
+ AutoFlushICache afc("EnterJIT");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "EnterJIT");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateInvalidator(JSContext* cx)
+{
+ // See large comment in x86's JitRuntime::generateInvalidator.
+ MacroAssembler masm(cx);
+ // At this point, one of two things has happened:
+ // 1) Execution has just returned from C code, which left the stack aligned
+ // 2) Execution has just returned from Ion code, which left the stack unaligned.
+ // The old return address should not matter, but we still want the stack to
+ // be aligned, and there is no good reason to automatically align it with a
+ // call to setupUnalignedABICall.
+ masm.as_bic(sp, sp, Imm8(7));
+ masm.startDataTransferM(IsStore, sp, DB, WriteBack);
+ // We don't have to push everything, but this is likely easier.
+ // Setting regs_.
+ for (uint32_t i = 0; i < Registers::Total; i++)
+ masm.transferReg(Register::FromCode(i));
+ masm.finishDataTransfer();
+
+ // Since our datastructures for stack inspection are compile-time fixed,
+ // if there are only 16 double registers, then we need to reserve
+ // space on the stack for the missing 16.
+ if (FloatRegisters::ActualTotalPhys() != FloatRegisters::TotalPhys) {
+ ScratchRegisterScope scratch(masm);
+ int missingRegs = FloatRegisters::TotalPhys - FloatRegisters::ActualTotalPhys();
+ masm.ma_sub(Imm32(missingRegs * sizeof(double)), sp, scratch);
+ }
+
+ masm.startFloatTransferM(IsStore, sp, DB, WriteBack);
+ for (uint32_t i = 0; i < FloatRegisters::ActualTotalPhys(); i++)
+ masm.transferFloatReg(FloatRegister(i, FloatRegister::Double));
+ masm.finishFloatTransfer();
+
+ masm.ma_mov(sp, r0);
+ const int sizeOfRetval = sizeof(size_t)*2;
+ masm.reserveStack(sizeOfRetval);
+ masm.mov(sp, r1);
+ const int sizeOfBailoutInfo = sizeof(void*)*2;
+ masm.reserveStack(sizeOfBailoutInfo);
+ masm.mov(sp, r2);
+ masm.setupAlignedABICall();
+ masm.passABIArg(r0);
+ masm.passABIArg(r1);
+ masm.passABIArg(r2);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, InvalidationBailout));
+
+ masm.ma_ldr(DTRAddr(sp, DtrOffImm(0)), r2);
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_ldr(Address(sp, sizeOfBailoutInfo), r1, scratch);
+ }
+ // Remove the return address, the IonScript, the register state
+ // (InvaliationBailoutStack) and the space that was allocated for the return
+ // value.
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_add(sp, Imm32(sizeof(InvalidationBailoutStack) + sizeOfRetval + sizeOfBailoutInfo), sp, scratch);
+ }
+ // Remove the space that this frame was using before the bailout (computed
+ // by InvalidationBailout)
+ masm.ma_add(sp, r1, sp);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
+ JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.branch(bailoutTail);
+
+ Linker linker(masm);
+ AutoFlushICache afc("Invalidator");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+ JitSpew(JitSpew_IonInvalidate, " invalidation thunk created at %p", (void*) code->raw());
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "Invalidator");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut)
+{
+ MacroAssembler masm(cx);
+ masm.pushReturnAddress();
+
+ // ArgumentsRectifierReg contains the |nargs| pushed onto the current frame.
+ // Including |this|, there are (|nargs| + 1) arguments to copy.
+ MOZ_ASSERT(ArgumentsRectifierReg == r8);
+
+ // Copy number of actual arguments into r0.
+ masm.ma_ldr(DTRAddr(sp, DtrOffImm(RectifierFrameLayout::offsetOfNumActualArgs())), r0);
+
+ // Load the number of |undefined|s to push into r6.
+ masm.ma_ldr(DTRAddr(sp, DtrOffImm(RectifierFrameLayout::offsetOfCalleeToken())), r1);
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_and(Imm32(CalleeTokenMask), r1, r6, scratch);
+ }
+ masm.ma_ldrh(EDtrAddr(r6, EDtrOffImm(JSFunction::offsetOfNargs())), r6);
+
+ masm.ma_sub(r6, r8, r2);
+
+ // Get the topmost argument.
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_alu(sp, lsl(r8, 3), r3, OpAdd); // r3 <- r3 + nargs * 8
+ masm.ma_add(r3, Imm32(sizeof(RectifierFrameLayout)), r3, scratch);
+ }
+
+ {
+ Label notConstructing;
+
+ masm.branchTest32(Assembler::Zero, r1, Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ // Add sizeof(Value) to overcome |this|
+ masm.as_extdtr(IsLoad, 64, true, Offset, r4, EDtrAddr(r3, EDtrOffImm(8)));
+ masm.as_extdtr(IsStore, 64, true, PreIndex, r4, EDtrAddr(sp, EDtrOffImm(-8)));
+
+ // Include the newly pushed newTarget value in the frame size
+ // calculated below.
+ masm.add32(Imm32(1), r6);
+
+ masm.bind(&notConstructing);
+ }
+
+ // Push undefined.
+ masm.moveValue(UndefinedValue(), r5, r4);
+ {
+ Label undefLoopTop;
+ masm.bind(&undefLoopTop);
+ masm.as_extdtr(IsStore, 64, true, PreIndex, r4, EDtrAddr(sp, EDtrOffImm(-8)));
+ masm.as_sub(r2, r2, Imm8(1), SetCC);
+
+ masm.ma_b(&undefLoopTop, Assembler::NonZero);
+ }
+
+ // Push arguments, |nargs| + 1 times (to include |this|).
+ {
+ Label copyLoopTop;
+ masm.bind(&copyLoopTop);
+ masm.as_extdtr(IsLoad, 64, true, PostIndex, r4, EDtrAddr(r3, EDtrOffImm(-8)));
+ masm.as_extdtr(IsStore, 64, true, PreIndex, r4, EDtrAddr(sp, EDtrOffImm(-8)));
+
+ masm.as_sub(r8, r8, Imm8(1), SetCC);
+ masm.ma_b(&copyLoopTop, Assembler::NotSigned);
+ }
+
+ // translate the framesize from values into bytes
+ masm.as_add(r6, r6, Imm8(1));
+ masm.ma_lsl(Imm32(3), r6, r6);
+
+ // Construct sizeDescriptor.
+ masm.makeFrameDescriptor(r6, JitFrame_Rectifier, JitFrameLayout::Size());
+
+ // Construct JitFrameLayout.
+ masm.ma_push(r0); // actual arguments.
+ masm.ma_push(r1); // callee token
+ masm.ma_push(r6); // frame descriptor.
+
+ // Call the target function.
+ // Note that this code assumes the function is JITted.
+ masm.andPtr(Imm32(CalleeTokenMask), r1);
+ masm.ma_ldr(DTRAddr(r1, DtrOffImm(JSFunction::offsetOfNativeOrScript())), r3);
+ masm.loadBaselineOrIonRaw(r3, r3, nullptr);
+ uint32_t returnOffset = masm.callJitNoProfiler(r3);
+
+ // arg1
+ // ...
+ // argN
+ // num actual args
+ // callee token
+ // sizeDescriptor <- sp now
+ // return address
+
+ // Remove the rectifier frame.
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_dtr(IsLoad, sp, Imm32(12), r4, scratch, PostIndex);
+ }
+
+ // arg1
+ // ...
+ // argN <- sp now; r4 <- frame descriptor
+ // num actual args
+ // callee token
+ // sizeDescriptor
+ // return address
+
+ // Discard pushed arguments.
+ masm.ma_alu(sp, lsr(r4, FRAMESIZE_SHIFT), sp, OpAdd);
+
+ masm.ret();
+ Linker linker(masm);
+ AutoFlushICache afc("ArgumentsRectifier");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+ if (returnAddrOut)
+ *returnAddrOut = (void*) (code->raw() + returnOffset);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ArgumentsRectifier");
+#endif
+
+ return code;
+}
+
+static void
+PushBailoutFrame(MacroAssembler& masm, uint32_t frameClass, Register spArg)
+{
+ // the stack should look like:
+ // [IonFrame]
+ // bailoutFrame.registersnapshot
+ // bailoutFrame.fpsnapshot
+ // bailoutFrame.snapshotOffset
+ // bailoutFrame.frameSize
+
+ // STEP 1a: Save our register sets to the stack so Bailout() can read
+ // everything.
+ // sp % 8 == 0
+
+ masm.startDataTransferM(IsStore, sp, DB, WriteBack);
+ // We don't have to push everything, but this is likely easier.
+ // Setting regs_.
+ for (uint32_t i = 0; i < Registers::Total; i++)
+ masm.transferReg(Register::FromCode(i));
+ masm.finishDataTransfer();
+
+ ScratchRegisterScope scratch(masm);
+
+ // Since our datastructures for stack inspection are compile-time fixed,
+ // if there are only 16 double registers, then we need to reserve
+ // space on the stack for the missing 16.
+ if (FloatRegisters::ActualTotalPhys() != FloatRegisters::TotalPhys) {
+ int missingRegs = FloatRegisters::TotalPhys - FloatRegisters::ActualTotalPhys();
+ masm.ma_sub(Imm32(missingRegs * sizeof(double)), sp, scratch);
+ }
+ masm.startFloatTransferM(IsStore, sp, DB, WriteBack);
+ for (uint32_t i = 0; i < FloatRegisters::ActualTotalPhys(); i++)
+ masm.transferFloatReg(FloatRegister(i, FloatRegister::Double));
+ masm.finishFloatTransfer();
+
+ // STEP 1b: Push both the "return address" of the function call (the address
+ // of the instruction after the call that we used to get here) as
+ // well as the callee token onto the stack. The return address is
+ // currently in r14. We will proceed by loading the callee token
+ // into a sacrificial register <= r14, then pushing both onto the
+ // stack.
+
+ // Now place the frameClass onto the stack, via a register.
+ masm.ma_mov(Imm32(frameClass), r4);
+ // And onto the stack. Since the stack is full, we need to put this one past
+ // the end of the current stack. Sadly, the ABI says that we need to always
+ // point to the lowest place that has been written. The OS is free to do
+ // whatever it wants below sp.
+ masm.startDataTransferM(IsStore, sp, DB, WriteBack);
+ // Set frameClassId_.
+ masm.transferReg(r4);
+ // Set tableOffset_; higher registers are stored at higher locations on the
+ // stack.
+ masm.transferReg(lr);
+ masm.finishDataTransfer();
+
+ masm.ma_mov(sp, spArg);
+}
+
+static void
+GenerateBailoutThunk(JSContext* cx, MacroAssembler& masm, uint32_t frameClass)
+{
+ PushBailoutFrame(masm, frameClass, r0);
+
+ // SP % 8 == 4
+ // STEP 1c: Call the bailout function, giving a pointer to the
+ // structure we just blitted onto the stack.
+ const int sizeOfBailoutInfo = sizeof(void*)*2;
+ masm.reserveStack(sizeOfBailoutInfo);
+ masm.mov(sp, r1);
+ masm.setupAlignedABICall();
+
+ // Decrement sp by another 4, so we keep alignment. Not Anymore! Pushing
+ // both the snapshotoffset as well as the: masm.as_sub(sp, sp, Imm8(4));
+
+ // Set the old (4-byte aligned) value of the sp as the first argument.
+ masm.passABIArg(r0);
+ masm.passABIArg(r1);
+
+ // Sp % 8 == 0
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, Bailout));
+ masm.ma_ldr(DTRAddr(sp, DtrOffImm(0)), r2);
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_add(sp, Imm32(sizeOfBailoutInfo), sp, scratch);
+ }
+
+ // Common size of a bailout frame.
+ uint32_t bailoutFrameSize = 0
+ + sizeof(void*) // frameClass
+ + sizeof(RegisterDump);
+
+ if (frameClass == NO_FRAME_SIZE_CLASS_ID) {
+ // Make sure the bailout frame size fits into the offset for a load.
+ masm.as_dtr(IsLoad, 32, Offset,
+ r4, DTRAddr(sp, DtrOffImm(4)));
+ // Used to be: offsetof(BailoutStack, frameSize_)
+ // This structure is no longer available to us :(
+ // We add 12 to the bailoutFrameSize because:
+ // sizeof(uint32_t) for the tableOffset that was pushed onto the stack
+ // sizeof(uintptr_t) for the snapshotOffset;
+ // alignment to round the uintptr_t up to a multiple of 8 bytes.
+ ScratchRegisterScope scratch(masm);
+ masm.ma_add(sp, Imm32(bailoutFrameSize+12), sp, scratch);
+ masm.as_add(sp, sp, O2Reg(r4));
+ } else {
+ ScratchRegisterScope scratch(masm);
+ uint32_t frameSize = FrameSizeClass::FromClass(frameClass).frameSize();
+ masm.ma_add(Imm32(// The frame that was added when we entered the most
+ // recent function.
+ frameSize
+ // The size of the "return address" that was dumped on
+ // the stack.
+ + sizeof(void*)
+ // Everything else that was pushed on the stack.
+ + bailoutFrameSize)
+ , sp, scratch);
+ }
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
+ JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.branch(bailoutTail);
+}
+
+JitCode*
+JitRuntime::generateBailoutTable(JSContext* cx, uint32_t frameClass)
+{
+ MacroAssembler masm(cx);
+
+ {
+ // Emit the table without any pools being inserted.
+ Label bailout;
+ AutoForbidPools afp(&masm, BAILOUT_TABLE_SIZE);
+ for (size_t i = 0; i < BAILOUT_TABLE_SIZE; i++)
+ masm.ma_bl(&bailout);
+ masm.bind(&bailout);
+ }
+
+ GenerateBailoutThunk(cx, masm, frameClass);
+
+ Linker linker(masm);
+ AutoFlushICache afc("BailoutTable");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutTable");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateBailoutHandler(JSContext* cx)
+{
+ MacroAssembler masm(cx);
+ GenerateBailoutThunk(cx, masm, NO_FRAME_SIZE_CLASS_ID);
+
+ Linker linker(masm);
+ AutoFlushICache afc("BailoutHandler");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutHandler");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateVMWrapper(JSContext* cx, const VMFunction& f)
+{
+ MOZ_ASSERT(functionWrappers_);
+ MOZ_ASSERT(functionWrappers_->initialized());
+ VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
+ if (p)
+ return p->value();
+
+ // Generate a separated code for the wrapper.
+ MacroAssembler masm(cx);
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ // Wrapper register set is a superset of Volatile register set.
+ JS_STATIC_ASSERT((Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0);
+
+ // The context is the first argument; r0 is the first argument register.
+ Register cxreg = r0;
+ regs.take(cxreg);
+
+ // Stack is:
+ // ... frame ...
+ // +8 [args] + argPadding
+ // +0 ExitFrame
+ //
+ // We're aligned to an exit frame, so link it up.
+ // If it isn't a tail call, then the return address needs to be saved
+ if (f.expectTailCall == NonTailCall)
+ masm.pushReturnAddress();
+
+ masm.enterExitFrame(&f);
+ masm.loadJSContext(cxreg);
+
+ // Save the base of the argument set stored on the stack.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ argsBase = r5;
+ regs.take(argsBase);
+ ScratchRegisterScope scratch(masm);
+ masm.ma_add(sp, Imm32(ExitFrameLayout::SizeWithFooter()), argsBase, scratch);
+ }
+
+ // Reserve space for the outparameter.
+ Register outReg = InvalidReg;
+ switch (f.outParam) {
+ case Type_Value:
+ outReg = r4;
+ regs.take(outReg);
+ masm.reserveStack(sizeof(Value));
+ masm.ma_mov(sp, outReg);
+ break;
+
+ case Type_Handle:
+ outReg = r4;
+ regs.take(outReg);
+ masm.PushEmptyRooted(f.outParamRootType);
+ masm.ma_mov(sp, outReg);
+ break;
+
+ case Type_Int32:
+ case Type_Pointer:
+ case Type_Bool:
+ outReg = r4;
+ regs.take(outReg);
+ masm.reserveStack(sizeof(int32_t));
+ masm.ma_mov(sp, outReg);
+ break;
+
+ case Type_Double:
+ outReg = r4;
+ regs.take(outReg);
+ masm.reserveStack(sizeof(double));
+ masm.ma_mov(sp, outReg);
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ if (!generateTLEnterVM(cx, masm, f))
+ return nullptr;
+
+ masm.setupUnalignedABICall(regs.getAny());
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = 0;
+
+ // Copy any arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ MoveOperand from;
+ switch (f.argProperties(explicitArg)) {
+ case VMFunction::WordByValue:
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunction::DoubleByValue:
+ // Values should be passed by reference, not by value, so we assert
+ // that the argument is a double-precision float.
+ MOZ_ASSERT(f.argPassedInFloatReg(explicitArg));
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
+ argDisp += sizeof(double);
+ break;
+ case VMFunction::WordByRef:
+ masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS), MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunction::DoubleByRef:
+ masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS), MoveOp::GENERAL);
+ argDisp += 2 * sizeof(void*);
+ break;
+ }
+ }
+
+ // Copy the implicit outparam, if any.
+ if (outReg != InvalidReg)
+ masm.passABIArg(outReg);
+
+ masm.callWithABI(f.wrapped);
+
+ if (!generateTLExitVM(cx, masm, f))
+ return nullptr;
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Object:
+ masm.branchTestPtr(Assembler::Zero, r0, r0, masm.failureLabel());
+ break;
+ case Type_Bool:
+ masm.branchIfFalseBool(r0, masm.failureLabel());
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Value:
+ masm.loadValue(Address(sp, 0), JSReturnOperand);
+ masm.freeStack(sizeof(Value));
+ break;
+
+ case Type_Int32:
+ case Type_Pointer:
+ masm.load32(Address(sp, 0), ReturnReg);
+ masm.freeStack(sizeof(int32_t));
+ break;
+
+ case Type_Bool:
+ masm.load8ZeroExtend(Address(sp, 0), ReturnReg);
+ masm.freeStack(sizeof(int32_t));
+ break;
+
+ case Type_Double:
+ if (cx->runtime()->jitSupportsFloatingPoint)
+ masm.loadDouble(Address(sp, 0), ReturnDoubleReg);
+ else
+ masm.assumeUnreachable("Unable to load into float reg, with no FP support.");
+ masm.freeStack(sizeof(double));
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+ masm.leaveExitFrame();
+ masm.retn(Imm32(sizeof(ExitFrameLayout) +
+ f.explicitStackSlots() * sizeof(void*) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ Linker linker(masm);
+ AutoFlushICache afc("VMWrapper");
+ JitCode* wrapper = linker.newCode<NoGC>(cx, OTHER_CODE);
+ if (!wrapper)
+ return nullptr;
+
+ // linker.newCode may trigger a GC and sweep functionWrappers_ so we have to
+ // use relookupOrAdd instead of add.
+ if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
+ return nullptr;
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(wrapper, "VMWrapper");
+#endif
+
+ return wrapper;
+}
+
+JitCode*
+JitRuntime::generatePreBarrier(JSContext* cx, MIRType type)
+{
+ MacroAssembler masm(cx);
+
+ LiveRegisterSet save;
+ if (cx->runtime()->jitSupportsFloatingPoint) {
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileDoubleMask));
+ } else {
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet());
+ }
+ save.add(lr);
+ masm.PushRegsInMask(save);
+
+ MOZ_ASSERT(PreBarrierReg == r1);
+ masm.movePtr(ImmPtr(cx->runtime()), r0);
+
+ masm.setupUnalignedABICall(r2);
+ masm.passABIArg(r0);
+ masm.passABIArg(r1);
+ masm.callWithABI(IonMarkFunction(type));
+ save.take(AnyRegister(lr));
+ save.add(pc);
+ masm.PopRegsInMask(save);
+
+ Linker linker(masm);
+ AutoFlushICache afc("PreBarrier");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "PreBarrier");
+#endif
+
+ return code;
+}
+
+typedef bool (*HandleDebugTrapFn)(JSContext*, BaselineFrame*, uint8_t*, bool*);
+static const VMFunction HandleDebugTrapInfo =
+ FunctionInfo<HandleDebugTrapFn>(HandleDebugTrap, "HandleDebugTrap");
+
+JitCode*
+JitRuntime::generateDebugTrapHandler(JSContext* cx)
+{
+ MacroAssembler masm;
+
+ Register scratch1 = r0;
+ Register scratch2 = r1;
+
+ // Load BaselineFrame pointer in scratch1.
+ masm.mov(r11, scratch1);
+ masm.subPtr(Imm32(BaselineFrame::Size()), scratch1);
+
+ // Enter a stub frame and call the HandleDebugTrap VM function. Ensure the
+ // stub frame has a nullptr ICStub pointer, since this pointer is marked
+ // during GC.
+ masm.movePtr(ImmPtr(nullptr), ICStubReg);
+ EmitBaselineEnterStubFrame(masm, scratch2);
+
+ JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(HandleDebugTrapInfo);
+ if (!code)
+ return nullptr;
+
+ masm.push(lr);
+ masm.push(scratch1);
+ EmitBaselineCallVM(code, masm);
+
+ EmitBaselineLeaveStubFrame(masm);
+
+ // If the stub returns |true|, we have to perform a forced return (return
+ // from the JS frame). If the stub returns |false|, just return from the
+ // trap stub so that execution continues at the current pc.
+ Label forcedReturn;
+ masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg, &forcedReturn);
+ masm.mov(lr, pc);
+
+ masm.bind(&forcedReturn);
+ masm.loadValue(Address(r11, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ masm.mov(r11, sp);
+ masm.pop(r11);
+
+ // Before returning, if profiling is turned on, make sure that lastProfilingFrame
+ // is set to the correct caller frame.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skipProfilingInstrumentation);
+ masm.profilerExitFrame();
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.ret();
+
+ Linker linker(masm);
+ AutoFlushICache afc("DebugTrapHandler");
+ JitCode* codeDbg = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(codeDbg, "DebugTrapHandler");
+#endif
+
+ return codeDbg;
+}
+
+JitCode*
+JitRuntime::generateExceptionTailStub(JSContext* cx, void* handler)
+{
+ MacroAssembler masm;
+
+ masm.handleFailureWithHandlerTail(handler);
+
+ Linker linker(masm);
+ AutoFlushICache afc("ExceptionTailStub");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ExceptionTailStub");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateBailoutTailStub(JSContext* cx)
+{
+ MacroAssembler masm;
+
+ masm.generateBailoutTail(r1, r2);
+
+ Linker linker(masm);
+ AutoFlushICache afc("BailoutTailStub");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutTailStub");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
+{
+ MacroAssembler masm;
+
+ Register scratch1 = r5;
+ Register scratch2 = r6;
+ Register scratch3 = r7;
+ Register scratch4 = r8;
+
+ //
+ // The code generated below expects that the current stack pointer points
+ // to an Ion or Baseline frame, at the state it would be immediately
+ // before a ret(). Thus, after this stub's business is done, it executes
+ // a ret() and returns directly to the caller script, on behalf of the
+ // callee script that jumped to this code.
+ //
+ // Thus the expected stack is:
+ //
+ // StackPointer ----+
+ // v
+ // ..., ActualArgc, CalleeToken, Descriptor, ReturnAddr
+ // MEM-HI MEM-LOW
+ //
+ //
+ // The generated jitcode is responsible for overwriting the
+ // jitActivation->lastProfilingFrame field with a pointer to the previous
+ // Ion or Baseline jit-frame that was pushed before this one. It is also
+ // responsible for overwriting jitActivation->lastProfilingCallSite with
+ // the return address into that frame. The frame could either be an
+ // immediate "caller" frame, or it could be a frame in a previous
+ // JitActivation (if the current frame was entered from C++, and the C++
+ // was entered by some caller jit-frame further down the stack).
+ //
+ // So this jitcode is responsible for "walking up" the jit stack, finding
+ // the previous Ion or Baseline JS frame, and storing its address and the
+ // return address into the appropriate fields on the current jitActivation.
+ //
+ // There are a fixed number of different path types that can lead to the
+ // current frame, which is either a baseline or ion frame:
+ //
+ // <Baseline-Or-Ion>
+ // ^
+ // |
+ // ^--- Ion
+ // |
+ // ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Argument Rectifier
+ // | ^
+ // | |
+ // | ^--- Ion
+ // | |
+ // | ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Entry Frame (From C++)
+ //
+ Register actReg = scratch4;
+ AbsoluteAddress activationAddr(GetJitContext()->runtime->addressOfProfilingActivation());
+ masm.loadPtr(activationAddr, actReg);
+
+ Address lastProfilingFrame(actReg, JitActivation::offsetOfLastProfilingFrame());
+ Address lastProfilingCallSite(actReg, JitActivation::offsetOfLastProfilingCallSite());
+
+#ifdef DEBUG
+ // Ensure that frame we are exiting is current lastProfilingFrame
+ {
+ masm.loadPtr(lastProfilingFrame, scratch1);
+ Label checkOk;
+ masm.branchPtr(Assembler::Equal, scratch1, ImmWord(0), &checkOk);
+ masm.branchPtr(Assembler::Equal, StackPointer, scratch1, &checkOk);
+ masm.assumeUnreachable(
+ "Mismatch between stored lastProfilingFrame and current stack pointer.");
+ masm.bind(&checkOk);
+ }
+#endif
+
+ // Load the frame descriptor into |scratch1|, figure out what to do depending on its type.
+ masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfDescriptor()), scratch1);
+
+ // Going into the conditionals, we will have:
+ // FrameDescriptor.size in scratch1
+ // FrameDescriptor.type in scratch2
+ {
+ ScratchRegisterScope asmScratch(masm);
+ masm.ma_and(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1, scratch2, asmScratch);
+ }
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch1);
+
+ // Handling of each case is dependent on FrameDescriptor.type
+ Label handle_IonJS;
+ Label handle_BaselineStub;
+ Label handle_Rectifier;
+ Label handle_IonAccessorIC;
+ Label handle_Entry;
+ Label end;
+
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonJS), &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineJS), &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineStub), &handle_BaselineStub);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Rectifier), &handle_Rectifier);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonAccessorIC), &handle_IonAccessorIC);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Entry), &handle_Entry);
+
+ masm.assumeUnreachable("Invalid caller frame type when exiting from Ion frame.");
+
+ //
+ // JitFrame_IonJS
+ //
+ // Stack layout:
+ // ...
+ // Ion-Descriptor
+ // Prev-FP ---> Ion-ReturnAddr
+ // ... previous frame data ... |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_IonJS);
+ {
+ // |scratch1| contains Descriptor.size
+
+ // returning directly to an IonJS frame. Store return addr to frame
+ // in lastProfilingCallSite.
+ masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfReturnAddress()), scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ // Store return frame in lastProfilingFrame.
+ // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size();
+ masm.ma_add(StackPointer, scratch1, scratch2);
+ masm.as_add(scratch2, scratch2, Imm8(JitFrameLayout::Size()));
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // JitFrame_BaselineStub
+ //
+ // Look past the stub and store the frame pointer to
+ // the baselineJS frame prior to it.
+ //
+ // Stack layout:
+ // ...
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-PrevFramePointer
+ // | ... BL-FrameData ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ // We take advantage of the fact that the stub frame saves the frame
+ // pointer pointing to the baseline frame, so a bunch of calculation can
+ // be avoided.
+ //
+ masm.bind(&handle_BaselineStub);
+ {
+ masm.ma_add(StackPointer, scratch1, scratch3);
+ Address stubFrameReturnAddr(scratch3,
+ JitFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ Address stubFrameSavedFramePtr(scratch3,
+ JitFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch2);
+ masm.addPtr(Imm32(sizeof(void*)), scratch2); // Skip past BL-PrevFramePtr
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+
+ //
+ // JitFrame_Rectifier
+ //
+ // The rectifier frame can be preceded by either an IonJS or a
+ // BaselineStub frame.
+ //
+ // Stack layout if caller of rectifier was Ion:
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- Rect-Descriptor.Size
+ // < COMMON LAYOUT >
+ //
+ // Stack layout if caller of rectifier was Baseline:
+ //
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-SavedFramePointer
+ // | ... baseline frame data ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Rect-Descriptor.Size
+ // ... args to rectifier ... |
+ // < COMMON LAYOUT >
+ //
+ // Common stack layout:
+ //
+ // ActualArgc |
+ // CalleeToken |- IonRectitiferFrameLayout::Size()
+ // Rect-Descriptor |
+ // Rect-ReturnAddr |
+ // ... rectifier data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_Rectifier);
+ {
+ // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size();
+ masm.ma_add(StackPointer, scratch1, scratch2);
+ masm.add32(Imm32(JitFrameLayout::Size()), scratch2);
+ masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfDescriptor()), scratch3);
+ masm.ma_lsr(Imm32(FRAMESIZE_SHIFT), scratch3, scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch3);
+
+ // Now |scratch1| contains Rect-Descriptor.Size
+ // and |scratch2| points to Rectifier frame
+ // and |scratch3| contains Rect-Descriptor.Type
+
+ // Check for either Ion or BaselineStub frame.
+ Label handle_Rectifier_BaselineStub;
+ masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS),
+ &handle_Rectifier_BaselineStub);
+
+ // Handle Rectifier <- IonJS
+ // scratch3 := RectFrame[ReturnAddr]
+ masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfReturnAddress()), scratch3);
+ masm.storePtr(scratch3, lastProfilingCallSite);
+
+ // scratch3 := RectFrame + Rect-Descriptor.Size + RectifierFrameLayout::Size()
+ masm.ma_add(scratch2, scratch1, scratch3);
+ masm.add32(Imm32(RectifierFrameLayout::Size()), scratch3);
+ masm.storePtr(scratch3, lastProfilingFrame);
+ masm.ret();
+
+ // Handle Rectifier <- BaselineStub <- BaselineJS
+ masm.bind(&handle_Rectifier_BaselineStub);
+#ifdef DEBUG
+ {
+ Label checkOk;
+ masm.branch32(Assembler::Equal, scratch3, Imm32(JitFrame_BaselineStub), &checkOk);
+ masm.assumeUnreachable("Unrecognized frame preceding baselineStub.");
+ masm.bind(&checkOk);
+ }
+#endif
+ masm.ma_add(scratch2, scratch1, scratch3);
+ Address stubFrameReturnAddr(scratch3, RectifierFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ Address stubFrameSavedFramePtr(scratch3,
+ RectifierFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch2);
+ masm.addPtr(Imm32(sizeof(void*)), scratch2);
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+ // JitFrame_IonAccessorIC
+ //
+ // The caller is always an IonJS frame.
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- AccFrame-Descriptor.Size
+ // StubCode |
+ // AccFrame-Descriptor |- IonAccessorICFrameLayout::Size()
+ // AccFrame-ReturnAddr |
+ // ... accessor frame data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ masm.bind(&handle_IonAccessorIC);
+ {
+ // scratch2 := StackPointer + Descriptor.size + JitFrameLayout::Size()
+ masm.ma_add(StackPointer, scratch1, scratch2);
+ masm.addPtr(Imm32(JitFrameLayout::Size()), scratch2);
+
+ // scratch3 := AccFrame-Descriptor.Size
+ masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfDescriptor()), scratch3);
+#ifdef DEBUG
+ // Assert previous frame is an IonJS frame.
+ masm.movePtr(scratch3, scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1);
+ {
+ Label checkOk;
+ masm.branch32(Assembler::Equal, scratch1, Imm32(JitFrame_IonJS), &checkOk);
+ masm.assumeUnreachable("IonAccessorIC frame must be preceded by IonJS frame");
+ masm.bind(&checkOk);
+ }
+#endif
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch3);
+
+ // lastProfilingCallSite := AccFrame-ReturnAddr
+ masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfReturnAddress()), scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+
+ // lastProfilingFrame := AccessorFrame + AccFrame-Descriptor.Size +
+ // IonAccessorICFrameLayout::Size()
+ masm.ma_add(scratch2, scratch3, scratch1);
+ masm.addPtr(Imm32(IonAccessorICFrameLayout::Size()), scratch1);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // JitFrame_Entry
+ //
+ // If at an entry frame, store null into both fields.
+ //
+ masm.bind(&handle_Entry);
+ {
+ masm.movePtr(ImmPtr(nullptr), scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+
+ Linker linker(masm);
+ AutoFlushICache afc("ProfilerExitFrameTailStub");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ProfilerExitFrameStub");
+#endif
+
+ return code;
+}
diff --git a/js/src/jit/arm/disasm/Constants-arm.cpp b/js/src/jit/arm/disasm/Constants-arm.cpp
new file mode 100644
index 000000000..2201a85e7
--- /dev/null
+++ b/js/src/jit/arm/disasm/Constants-arm.cpp
@@ -0,0 +1,144 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ */
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "jit/arm/disasm/Constants-arm.h"
+
+#ifdef JS_DISASM_ARM
+
+namespace js {
+namespace jit {
+namespace disasm {
+
+double
+Instruction::DoubleImmedVmov() const
+{
+ // Reconstruct a double from the immediate encoded in the vmov instruction.
+ //
+ // instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
+ // double: [aBbbbbbb,bbcdefgh,00000000,00000000,
+ // 00000000,00000000,00000000,00000000]
+ //
+ // where B = ~b. Only the high 16 bits are affected.
+ uint64_t high16;
+ high16 = (Bits(17, 16) << 4) | Bits(3, 0); // xxxxxxxx,xxcdefgh.
+ high16 |= (0xff * Bit(18)) << 6; // xxbbbbbb,bbxxxxxx.
+ high16 |= (Bit(18) ^ 1) << 14; // xBxxxxxx,xxxxxxxx.
+ high16 |= Bit(19) << 15; // axxxxxxx,xxxxxxxx.
+
+ uint64_t imm = high16 << 48;
+ double d;
+ memcpy(&d, &imm, 8);
+ return d;
+}
+
+
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* Registers::names_[kNumRegisters] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "fp", "ip", "sp", "lr", "pc",
+};
+
+
+// List of alias names which can be used when referring to ARM registers.
+const Registers::RegisterAlias Registers::aliases_[] = {
+ {10, "sl"},
+ {11, "r11"},
+ {12, "r12"},
+ {13, "r13"},
+ {14, "r14"},
+ {15, "r15"},
+ {kNoRegister, NULL}
+};
+
+
+const char*
+Registers::Name(int reg)
+{
+ const char* result;
+ if ((0 <= reg) && (reg < kNumRegisters)) {
+ result = names_[reg];
+ } else {
+ result = "noreg";
+ }
+ return result;
+}
+
+
+// Support for VFP registers s0 to s31 (d0 to d15) and d16-d31.
+// Note that "sN:sM" is the same as "dN/2" up to d15.
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* VFPRegisters::names_[kNumVFPRegisters] = {
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
+ "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
+ "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
+ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+ "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+ "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"
+};
+
+
+const char*
+VFPRegisters::Name(int reg, bool is_double)
+{
+ MOZ_ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
+ return names_[reg + (is_double ? kNumVFPSingleRegisters : 0)];
+}
+
+
+int
+VFPRegisters::Number(const char* name, bool* is_double)
+{
+ for (int i = 0; i < kNumVFPRegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ if (i < kNumVFPSingleRegisters) {
+ *is_double = false;
+ return i;
+ } else {
+ *is_double = true;
+ return i - kNumVFPSingleRegisters;
+ }
+ }
+ }
+
+ // No register with the requested name found.
+ return kNoRegister;
+}
+
+
+int
+Registers::Number(const char* name)
+{
+ // Look through the canonical names.
+ for (int i = 0; i < kNumRegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // Look through the alias names.
+ int i = 0;
+ while (aliases_[i].reg != kNoRegister) {
+ if (strcmp(aliases_[i].name, name) == 0) {
+ return aliases_[i].reg;
+ }
+ i++;
+ }
+
+ // No register with the requested name found.
+ return kNoRegister;
+}
+
+
+} // namespace disasm
+} // namespace jit
+} // namespace js
+
+#endif // JS_DISASM_ARM
diff --git a/js/src/jit/arm/disasm/Constants-arm.h b/js/src/jit/arm/disasm/Constants-arm.h
new file mode 100644
index 000000000..de63f39dc
--- /dev/null
+++ b/js/src/jit/arm/disasm/Constants-arm.h
@@ -0,0 +1,745 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef jit_arm_disasm_Constants_arm_h
+#define jit_arm_disasm_Constants_arm_h
+
+#ifdef JS_DISASM_ARM
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+#include <string.h>
+
+namespace js {
+namespace jit {
+namespace disasm {
+
+// Constant pool marker.
+// Use UDF, the permanently undefined instruction.
+const int kConstantPoolMarkerMask = 0xfff000f0;
+const int kConstantPoolMarker = 0xe7f000f0;
+const int kConstantPoolLengthMaxMask = 0xffff;
+
+inline int
+EncodeConstantPoolLength(int length)
+{
+ MOZ_ASSERT((length & kConstantPoolLengthMaxMask) == length);
+ return ((length & 0xfff0) << 4) | (length & 0xf);
+}
+
+inline int
+DecodeConstantPoolLength(int instr)
+{
+ MOZ_ASSERT((instr & kConstantPoolMarkerMask) == kConstantPoolMarker);
+ return ((instr >> 4) & 0xfff0) | (instr & 0xf);
+}
+
+// Used in code age prologue - ldr(pc, MemOperand(pc, -4))
+const int kCodeAgeJumpInstruction = 0xe51ff004;
+
+// Number of registers in normal ARM mode.
+const int kNumRegisters = 16;
+
+// VFP support.
+const int kNumVFPSingleRegisters = 32;
+const int kNumVFPDoubleRegisters = 32;
+const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
+
+// PC is register 15.
+const int kPCRegister = 15;
+const int kNoRegister = -1;
+
+// -----------------------------------------------------------------------------
+// Conditions.
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate ARM instructions.
+//
+// Section references in the code refer to the "ARM Architecture Reference
+// Manual" from July 2005 (available at http://www.arm.com/miscPDFs/14128.pdf)
+//
+// Constants for specific fields are defined in their respective named enums.
+// General constants are in an anonymous enum in class Instr.
+
+// Values for the condition field as defined in section A3.2
+enum Condition {
+ kNoCondition = -1,
+
+ eq = 0 << 28, // Z set Equal.
+ ne = 1 << 28, // Z clear Not equal.
+ cs = 2 << 28, // C set Unsigned higher or same.
+ cc = 3 << 28, // C clear Unsigned lower.
+ mi = 4 << 28, // N set Negative.
+ pl = 5 << 28, // N clear Positive or zero.
+ vs = 6 << 28, // V set Overflow.
+ vc = 7 << 28, // V clear No overflow.
+ hi = 8 << 28, // C set, Z clear Unsigned higher.
+ ls = 9 << 28, // C clear or Z set Unsigned lower or same.
+ ge = 10 << 28, // N == V Greater or equal.
+ lt = 11 << 28, // N != V Less than.
+ gt = 12 << 28, // Z clear, N == V Greater than.
+ le = 13 << 28, // Z set or N != V Less then or equal
+ al = 14 << 28, // Always.
+
+ kSpecialCondition = 15 << 28, // Special condition (refer to section A3.2.1).
+ kNumberOfConditions = 16,
+
+ // Aliases.
+ hs = cs, // C set Unsigned higher or same.
+ lo = cc // C clear Unsigned lower.
+};
+
+
+inline Condition
+NegateCondition(Condition cond)
+{
+ MOZ_ASSERT(cond != al);
+ return static_cast<Condition>(cond ^ ne);
+}
+
+
+// Commute a condition such that {a cond b == b cond' a}.
+inline Condition
+CommuteCondition(Condition cond)
+{
+ switch (cond) {
+ case lo:
+ return hi;
+ case hi:
+ return lo;
+ case hs:
+ return ls;
+ case ls:
+ return hs;
+ case lt:
+ return gt;
+ case gt:
+ return lt;
+ case ge:
+ return le;
+ case le:
+ return ge;
+ default:
+ return cond;
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Instructions encoding.
+
+// Instr is merely used by the Assembler to distinguish 32bit integers
+// representing instructions from usual 32 bit values.
+// Instruction objects are pointers to 32bit values, and provide methods to
+// access the various ISA fields.
+typedef int32_t Instr;
+
+
+// Opcodes for Data-processing instructions (instructions with a type 0 and 1)
+// as defined in section A3.4
+enum Opcode {
+ AND = 0 << 21, // Logical AND.
+ EOR = 1 << 21, // Logical Exclusive OR.
+ SUB = 2 << 21, // Subtract.
+ RSB = 3 << 21, // Reverse Subtract.
+ ADD = 4 << 21, // Add.
+ ADC = 5 << 21, // Add with Carry.
+ SBC = 6 << 21, // Subtract with Carry.
+ RSC = 7 << 21, // Reverse Subtract with Carry.
+ TST = 8 << 21, // Test.
+ TEQ = 9 << 21, // Test Equivalence.
+ CMP = 10 << 21, // Compare.
+ CMN = 11 << 21, // Compare Negated.
+ ORR = 12 << 21, // Logical (inclusive) OR.
+ MOV = 13 << 21, // Move.
+ BIC = 14 << 21, // Bit Clear.
+ MVN = 15 << 21 // Move Not.
+};
+
+
+// The bits for bit 7-4 for some type 0 miscellaneous instructions.
+enum MiscInstructionsBits74 {
+ // With bits 22-21 01.
+ BX = 1 << 4,
+ BXJ = 2 << 4,
+ BLX = 3 << 4,
+ BKPT = 7 << 4,
+
+ // With bits 22-21 11.
+ CLZ = 1 << 4
+};
+
+// Load and store exclusive instructions.
+
+// Bit positions.
+enum {
+ ExclusiveOpHi = 24, // Hi bit of opcode field
+ ExclusiveOpLo = 23, // Lo bit of opcode field
+ ExclusiveSizeHi = 22, // Hi bit of operand size field
+ ExclusiveSizeLo = 21, // Lo bit of operand size field
+ ExclusiveLoad = 20 // Bit indicating load
+};
+
+// Opcode bits for exclusive instructions.
+enum {
+ ExclusiveOpcode = 3
+};
+
+// Operand size, Bits(ExclusiveSizeHi,ExclusiveSizeLo).
+enum {
+ ExclusiveWord = 0,
+ ExclusiveDouble = 1,
+ ExclusiveByte = 2,
+ ExclusiveHalf = 3
+};
+
+// Instruction encoding bits and masks.
+enum {
+ H = 1 << 5, // Halfword (or byte).
+ S6 = 1 << 6, // Signed (or unsigned).
+ L = 1 << 20, // Load (or store).
+ S = 1 << 20, // Set condition code (or leave unchanged).
+ W = 1 << 21, // Writeback base register (or leave unchanged).
+ A = 1 << 21, // Accumulate in multiply instruction (or not).
+ B = 1 << 22, // Unsigned byte (or word).
+ N = 1 << 22, // Long (or short).
+ U = 1 << 23, // Positive (or negative) offset/index.
+ P = 1 << 24, // Offset/pre-indexed addressing (or post-indexed addressing).
+ I = 1 << 25, // Immediate shifter operand (or not).
+ B0 = 1 << 0,
+ B4 = 1 << 4,
+ B5 = 1 << 5,
+ B6 = 1 << 6,
+ B7 = 1 << 7,
+ B8 = 1 << 8,
+ B9 = 1 << 9,
+ B12 = 1 << 12,
+ B16 = 1 << 16,
+ B17 = 1 << 17,
+ B18 = 1 << 18,
+ B19 = 1 << 19,
+ B20 = 1 << 20,
+ B21 = 1 << 21,
+ B22 = 1 << 22,
+ B23 = 1 << 23,
+ B24 = 1 << 24,
+ B25 = 1 << 25,
+ B26 = 1 << 26,
+ B27 = 1 << 27,
+ B28 = 1 << 28,
+
+ // Instruction bit masks.
+ kCondMask = 15 << 28,
+ kALUMask = 0x6f << 21,
+ kRdMask = 15 << 12, // In str instruction.
+ kCoprocessorMask = 15 << 8,
+ kOpCodeMask = 15 << 21, // In data-processing instructions.
+ kImm24Mask = (1 << 24) - 1,
+ kImm16Mask = (1 << 16) - 1,
+ kImm8Mask = (1 << 8) - 1,
+ kOff12Mask = (1 << 12) - 1,
+ kOff8Mask = (1 << 8) - 1
+};
+
+
+// -----------------------------------------------------------------------------
+// Addressing modes and instruction variants.
+
+// Condition code updating mode.
+enum SBit {
+ SetCC = 1 << 20, // Set condition code.
+ LeaveCC = 0 << 20 // Leave condition code unchanged.
+};
+
+
+// Status register selection.
+enum SRegister {
+ CPSR = 0 << 22,
+ SPSR = 1 << 22
+};
+
+
+// Shifter types for Data-processing operands as defined in section A5.1.2.
+enum ShiftOp {
+ LSL = 0 << 5, // Logical shift left.
+ LSR = 1 << 5, // Logical shift right.
+ ASR = 2 << 5, // Arithmetic shift right.
+ ROR = 3 << 5, // Rotate right.
+
+ // RRX is encoded as ROR with shift_imm == 0.
+ // Use a special code to make the distinction. The RRX ShiftOp is only used
+ // as an argument, and will never actually be encoded. The Assembler will
+ // detect it and emit the correct ROR shift operand with shift_imm == 0.
+ RRX = -1,
+ kNumberOfShifts = 4
+};
+
+
+// Status register fields.
+enum SRegisterField {
+ CPSR_c = CPSR | 1 << 16,
+ CPSR_x = CPSR | 1 << 17,
+ CPSR_s = CPSR | 1 << 18,
+ CPSR_f = CPSR | 1 << 19,
+ SPSR_c = SPSR | 1 << 16,
+ SPSR_x = SPSR | 1 << 17,
+ SPSR_s = SPSR | 1 << 18,
+ SPSR_f = SPSR | 1 << 19
+};
+
+// Status register field mask (or'ed SRegisterField enum values).
+typedef uint32_t SRegisterFieldMask;
+
+
+// Memory operand addressing mode.
+enum AddrMode {
+ // Bit encoding P U W.
+ Offset = (8|4|0) << 21, // Offset (without writeback to base).
+ PreIndex = (8|4|1) << 21, // Pre-indexed addressing with writeback.
+ PostIndex = (0|4|0) << 21, // Post-indexed addressing with writeback.
+ NegOffset = (8|0|0) << 21, // Negative offset (without writeback to base).
+ NegPreIndex = (8|0|1) << 21, // Negative pre-indexed with writeback.
+ NegPostIndex = (0|0|0) << 21 // Negative post-indexed with writeback.
+};
+
+
+// Load/store multiple addressing mode.
+enum BlockAddrMode {
+ // Bit encoding P U W .
+ da = (0|0|0) << 21, // Decrement after.
+ ia = (0|4|0) << 21, // Increment after.
+ db = (8|0|0) << 21, // Decrement before.
+ ib = (8|4|0) << 21, // Increment before.
+ da_w = (0|0|1) << 21, // Decrement after with writeback to base.
+ ia_w = (0|4|1) << 21, // Increment after with writeback to base.
+ db_w = (8|0|1) << 21, // Decrement before with writeback to base.
+ ib_w = (8|4|1) << 21, // Increment before with writeback to base.
+
+ // Alias modes for comparison when writeback does not matter.
+ da_x = (0|0|0) << 21, // Decrement after.
+ ia_x = (0|4|0) << 21, // Increment after.
+ db_x = (8|0|0) << 21, // Decrement before.
+ ib_x = (8|4|0) << 21, // Increment before.
+
+ kBlockAddrModeMask = (8|4|1) << 21
+};
+
+
+// Coprocessor load/store operand size.
+enum LFlag {
+ Long = 1 << 22, // Long load/store coprocessor.
+ Short = 0 << 22 // Short load/store coprocessor.
+};
+
+
+// NEON data type
+enum NeonDataType {
+ NeonS8 = 0x1, // U = 0, imm3 = 0b001
+ NeonS16 = 0x2, // U = 0, imm3 = 0b010
+ NeonS32 = 0x4, // U = 0, imm3 = 0b100
+ NeonU8 = 1 << 24 | 0x1, // U = 1, imm3 = 0b001
+ NeonU16 = 1 << 24 | 0x2, // U = 1, imm3 = 0b010
+ NeonU32 = 1 << 24 | 0x4, // U = 1, imm3 = 0b100
+ NeonDataTypeSizeMask = 0x7,
+ NeonDataTypeUMask = 1 << 24
+};
+
+enum NeonListType {
+ nlt_1 = 0x7,
+ nlt_2 = 0xA,
+ nlt_3 = 0x6,
+ nlt_4 = 0x2
+};
+
+enum NeonSize {
+ Neon8 = 0x0,
+ Neon16 = 0x1,
+ Neon32 = 0x2,
+ Neon64 = 0x3
+};
+
+// -----------------------------------------------------------------------------
+// Supervisor Call (svc) specific support.
+
+// Special Software Interrupt codes when used in the presence of the ARM
+// simulator.
+// svc (formerly swi) provides a 24bit immediate value. Use bits 22:0 for
+// standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
+enum SoftwareInterruptCodes {
+ // transition to C code
+ kCallRtRedirected = 0x10,
+ // break point
+ kBreakpoint = 0x20,
+ // stop
+ kStopCode = 1 << 23
+};
+const uint32_t kStopCodeMask = kStopCode - 1;
+const uint32_t kMaxStopCode = kStopCode - 1;
+const int32_t kDefaultStopCode = -1;
+
+
+// Type of VFP register. Determines register encoding.
+enum VFPRegPrecision {
+ kSinglePrecision = 0,
+ kDoublePrecision = 1
+};
+
+
+// VFP FPSCR constants.
+enum VFPConversionMode {
+ kFPSCRRounding = 0,
+ kDefaultRoundToZero = 1
+};
+
+// This mask does not include the "inexact" or "input denormal" cumulative
+// exceptions flags, because we usually don't want to check for it.
+const uint32_t kVFPExceptionMask = 0xf;
+const uint32_t kVFPInvalidOpExceptionBit = 1 << 0;
+const uint32_t kVFPOverflowExceptionBit = 1 << 2;
+const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
+const uint32_t kVFPInexactExceptionBit = 1 << 4;
+const uint32_t kVFPFlushToZeroMask = 1 << 24;
+const uint32_t kVFPDefaultNaNModeControlBit = 1 << 25;
+
+const uint32_t kVFPNConditionFlagBit = 1 << 31;
+const uint32_t kVFPZConditionFlagBit = 1 << 30;
+const uint32_t kVFPCConditionFlagBit = 1 << 29;
+const uint32_t kVFPVConditionFlagBit = 1 << 28;
+
+
+// VFP rounding modes. See ARM DDI 0406B Page A2-29.
+enum VFPRoundingMode {
+ RN = 0 << 22, // Round to Nearest.
+ RP = 1 << 22, // Round towards Plus Infinity.
+ RM = 2 << 22, // Round towards Minus Infinity.
+ RZ = 3 << 22, // Round towards zero.
+
+ // Aliases.
+ kRoundToNearest = RN,
+ kRoundToPlusInf = RP,
+ kRoundToMinusInf = RM,
+ kRoundToZero = RZ
+};
+
+const uint32_t kVFPRoundingModeMask = 3 << 22;
+
+enum CheckForInexactConversion {
+ kCheckForInexactConversion,
+ kDontCheckForInexactConversion
+};
+
+// -----------------------------------------------------------------------------
+// Hints.
+
+// Branch hints are not used on the ARM. They are defined so that they can
+// appear in shared function signatures, but will be ignored in ARM
+// implementations.
+enum Hint { no_hint };
+
+// Hints are not used on the arm. Negating is trivial.
+inline Hint
+NegateHint(Hint ignored)
+{
+ return no_hint;
+}
+
+
+// -----------------------------------------------------------------------------
+// Instruction abstraction.
+
+// The class Instruction enables access to individual fields defined in the ARM
+// architecture instruction set encoding as described in figure A3-1.
+// Note that the Assembler uses typedef int32_t Instr.
+//
+// Example: Test whether the instruction at ptr does set the condition code
+// bits.
+//
+// bool InstructionSetsConditionCodes(byte* ptr) {
+// Instruction* instr = Instruction::At(ptr);
+// int type = instr->TypeValue();
+// return ((type == 0) || (type == 1)) && instr->HasS();
+// }
+//
+class Instruction {
+ public:
+ enum {
+ kInstrSize = 4,
+ kInstrSizeLog2 = 2,
+ kPCReadOffset = 8
+ };
+
+ // Helper macro to define static accessors.
+ // We use the cast to char* trick to bypass the strict anti-aliasing rules.
+#define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
+ static inline return_type Name(Instr instr) { \
+ char* temp = reinterpret_cast<char*>(&instr); \
+ return reinterpret_cast<Instruction*>(temp)->Name(); \
+ }
+
+#define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
+
+ // Get the raw instruction bits.
+ inline Instr InstructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void SetInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int Bit(int nr) const {
+ return (InstructionBits() >> nr) & 1;
+ }
+
+ // Read a bit field's value out of the instruction bits.
+ inline int Bits(int hi, int lo) const {
+ return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Read a bit field out of the instruction bits.
+ inline int BitField(int hi, int lo) const {
+ return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
+ }
+
+ // Static support.
+
+ // Read one particular bit out of the instruction bits.
+ static inline int Bit(Instr instr, int nr) {
+ return (instr >> nr) & 1;
+ }
+
+ // Read the value of a bit field out of the instruction bits.
+ static inline int Bits(Instr instr, int hi, int lo) {
+ return (instr >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+
+ // Read a bit field out of the instruction bits.
+ static inline int BitField(Instr instr, int hi, int lo) {
+ return instr & (((2 << (hi - lo)) - 1) << lo);
+ }
+
+
+ // Accessors for the different named fields used in the ARM encoding.
+ // The naming of these accessor corresponds to figure A3-1.
+ //
+ // Two kind of accessors are declared:
+ // - <Name>Field() will return the raw field, i.e. the field's bits at their
+ // original place in the instruction encoding.
+ // e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
+ // 0xC0810002 ConditionField(instr) will return 0xC0000000.
+ // - <Name>Value() will return the field value, shifted back to bit 0.
+ // e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
+ // 0xC0810002 ConditionField(instr) will return 0xC.
+
+
+ // Generally applicable fields
+ inline Condition ConditionValue() const {
+ return static_cast<Condition>(Bits(31, 28));
+ }
+ inline Condition ConditionField() const {
+ return static_cast<Condition>(BitField(31, 28));
+ }
+ DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionValue);
+ DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionField);
+
+ inline int TypeValue() const { return Bits(27, 25); }
+ inline int SpecialValue() const { return Bits(27, 23); }
+
+ inline int RnValue() const { return Bits(19, 16); }
+ DECLARE_STATIC_ACCESSOR(RnValue);
+ inline int RdValue() const { return Bits(15, 12); }
+ DECLARE_STATIC_ACCESSOR(RdValue);
+
+ inline int CoprocessorValue() const { return Bits(11, 8); }
+ // Support for VFP.
+ // Vn(19-16) | Vd(15-12) | Vm(3-0)
+ inline int VnValue() const { return Bits(19, 16); }
+ inline int VmValue() const { return Bits(3, 0); }
+ inline int VdValue() const { return Bits(15, 12); }
+ inline int NValue() const { return Bit(7); }
+ inline int MValue() const { return Bit(5); }
+ inline int DValue() const { return Bit(22); }
+ inline int RtValue() const { return Bits(15, 12); }
+ inline int PValue() const { return Bit(24); }
+ inline int UValue() const { return Bit(23); }
+ inline int Opc1Value() const { return (Bit(23) << 2) | Bits(21, 20); }
+ inline int Opc2Value() const { return Bits(19, 16); }
+ inline int Opc3Value() const { return Bits(7, 6); }
+ inline int SzValue() const { return Bit(8); }
+ inline int VLValue() const { return Bit(20); }
+ inline int VCValue() const { return Bit(8); }
+ inline int VAValue() const { return Bits(23, 21); }
+ inline int VBValue() const { return Bits(6, 5); }
+ inline int VFPNRegValue(VFPRegPrecision pre) {
+ return VFPGlueRegValue(pre, 16, 7);
+ }
+ inline int VFPMRegValue(VFPRegPrecision pre) {
+ return VFPGlueRegValue(pre, 0, 5);
+ }
+ inline int VFPDRegValue(VFPRegPrecision pre) {
+ return VFPGlueRegValue(pre, 12, 22);
+ }
+
+ // Fields used in Data processing instructions
+ inline int OpcodeValue() const {
+ return static_cast<Opcode>(Bits(24, 21));
+ }
+ inline Opcode OpcodeField() const {
+ return static_cast<Opcode>(BitField(24, 21));
+ }
+ inline int SValue() const { return Bit(20); }
+ // with register
+ inline int RmValue() const { return Bits(3, 0); }
+ DECLARE_STATIC_ACCESSOR(RmValue);
+ inline int ShiftValue() const { return static_cast<ShiftOp>(Bits(6, 5)); }
+ inline ShiftOp ShiftField() const {
+ return static_cast<ShiftOp>(BitField(6, 5));
+ }
+ inline int RegShiftValue() const { return Bit(4); }
+ inline int RsValue() const { return Bits(11, 8); }
+ inline int ShiftAmountValue() const { return Bits(11, 7); }
+ // with immediate
+ inline int RotateValue() const { return Bits(11, 8); }
+ DECLARE_STATIC_ACCESSOR(RotateValue);
+ inline int Immed8Value() const { return Bits(7, 0); }
+ DECLARE_STATIC_ACCESSOR(Immed8Value);
+ inline int Immed4Value() const { return Bits(19, 16); }
+ inline int ImmedMovwMovtValue() const {
+ return Immed4Value() << 12 | Offset12Value(); }
+ DECLARE_STATIC_ACCESSOR(ImmedMovwMovtValue);
+
+ // Fields used in Load/Store instructions
+ inline int PUValue() const { return Bits(24, 23); }
+ inline int PUField() const { return BitField(24, 23); }
+ inline int BValue() const { return Bit(22); }
+ inline int WValue() const { return Bit(21); }
+ inline int LValue() const { return Bit(20); }
+ // with register uses same fields as Data processing instructions above
+ // with immediate
+ inline int Offset12Value() const { return Bits(11, 0); }
+ // multiple
+ inline int RlistValue() const { return Bits(15, 0); }
+ // extra loads and stores
+ inline int SignValue() const { return Bit(6); }
+ inline int HValue() const { return Bit(5); }
+ inline int ImmedHValue() const { return Bits(11, 8); }
+ inline int ImmedLValue() const { return Bits(3, 0); }
+
+ // Fields used in Branch instructions
+ inline int LinkValue() const { return Bit(24); }
+ inline int SImmed24Value() const { return ((InstructionBits() << 8) >> 8); }
+
+ // Fields used in Software interrupt instructions
+ inline SoftwareInterruptCodes SvcValue() const {
+ return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
+ }
+
+ // Test for special encodings of type 0 instructions (extra loads and stores,
+ // as well as multiplications).
+ inline bool IsSpecialType0() const { return (Bit(7) == 1) && (Bit(4) == 1); }
+
+ // Test for miscellaneous instructions encodings of type 0 instructions.
+ inline bool IsMiscType0() const { return (Bit(24) == 1)
+ && (Bit(23) == 0)
+ && (Bit(20) == 0)
+ && ((Bit(7) == 0)); }
+
+ // Test for a nop instruction, which falls under type 1.
+ inline bool IsNopType1() const { return Bits(24, 0) == 0x0120F000; }
+
+ // Test for a stop instruction.
+ inline bool IsStop() const {
+ return (TypeValue() == 7) && (Bit(24) == 1) && (SvcValue() >= kStopCode);
+ }
+
+ // Special accessors that test for existence of a value.
+ inline bool HasS() const { return SValue() == 1; }
+ inline bool HasB() const { return BValue() == 1; }
+ inline bool HasW() const { return WValue() == 1; }
+ inline bool HasL() const { return LValue() == 1; }
+ inline bool HasU() const { return UValue() == 1; }
+ inline bool HasSign() const { return SignValue() == 1; }
+ inline bool HasH() const { return HValue() == 1; }
+ inline bool HasLink() const { return LinkValue() == 1; }
+
+ // Decoding the double immediate in the vmov instruction.
+ double DoubleImmedVmov() const;
+
+ // Instructions are read of out a code stream. The only way to get a
+ // reference to an instruction is to convert a pointer. There is no way
+ // to allocate or create instances of class Instruction.
+ // Use the At(pc) function to create references to Instruction.
+ static Instruction* At(uint8_t* pc) {
+ return reinterpret_cast<Instruction*>(pc);
+ }
+
+
+ private:
+ // Join split register codes, depending on single or double precision.
+ // four_bit is the position of the least-significant bit of the four
+ // bit specifier. one_bit is the position of the additional single bit
+ // specifier.
+ inline int VFPGlueRegValue(VFPRegPrecision pre, int four_bit, int one_bit) {
+ if (pre == kSinglePrecision) {
+ return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit);
+ }
+ return (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
+ }
+
+ // We need to prevent the creation of instances of class Instruction.
+ Instruction() = delete;
+ Instruction(const Instruction&) = delete;
+ void operator=(const Instruction&) = delete;
+};
+
+
+// Helper functions for converting between register numbers and names.
+class Registers {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ struct RegisterAlias {
+ int reg;
+ const char* name;
+ };
+
+ private:
+ static const char* names_[kNumRegisters];
+ static const RegisterAlias aliases_[];
+};
+
+// Helper functions for converting between VFP register numbers and names.
+class VFPRegisters {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg, bool is_double);
+
+ // Lookup the register number for the name provided.
+ // Set flag pointed by is_double to true if register
+ // is double-precision.
+ static int Number(const char* name, bool* is_double);
+
+ private:
+ static const char* names_[kNumVFPRegisters];
+};
+
+
+} // namespace disasm
+} // namespace jit
+} // namespace js
+
+#endif // JS_DISASM_ARM
+
+#endif // jit_arm_disasm_Constants_arm_h
diff --git a/js/src/jit/arm/disasm/Disasm-arm.cpp b/js/src/jit/arm/disasm/Disasm-arm.cpp
new file mode 100644
index 000000000..8bd7bff0c
--- /dev/null
+++ b/js/src/jit/arm/disasm/Disasm-arm.cpp
@@ -0,0 +1,2173 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A Disassembler object is used to disassemble a block of code instruction by
+// instruction. The default implementation of the NameConverter object can be
+// overriden to modify register names or to do symbol lookup on addresses.
+//
+// The example below will disassemble a block of code and print it to stdout.
+//
+// disasm::NameConverter converter;
+// disasm::Disassembler d(converter);
+// for (uint8_t* pc = begin; pc < end;) {
+// disasm::EmbeddedVector<char, disasm::ReasonableBufferSize> buffer;
+// uint8_t* prev_pc = pc;
+// pc += d.InstructionDecode(buffer, pc);
+// printf("%p %08x %s\n",
+// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
+// }
+//
+// The Disassembler class also has a convenience method to disassemble a block
+// of code into a FILE*, meaning that the above functionality could also be
+// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
+
+
+#include "jit/arm/disasm/Disasm-arm.h"
+
+#ifdef JS_DISASM_ARM
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "jit/arm/disasm/Constants-arm.h"
+
+namespace js {
+namespace jit {
+namespace disasm {
+
+
+// Helper function for printing to a Vector.
+static int
+MOZ_FORMAT_PRINTF(2, 3)
+SNPrintF(V8Vector<char> str, const char* format, ...)
+{
+ va_list args;
+ va_start(args, format);
+ int result = vsnprintf(str.start(), str.length(), format, args);
+ va_end(args);
+ return result;
+}
+
+
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+ Decoder(const disasm::NameConverter& converter,
+ V8Vector<char> out_buffer)
+ : converter_(converter),
+ out_buffer_(out_buffer),
+ out_buffer_pos_(0) {
+ out_buffer_[out_buffer_pos_] = '\0';
+ }
+
+ ~Decoder() {}
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(uint8_t* instruction);
+
+ static bool IsConstantPoolAt(uint8_t* instr_ptr);
+ static int ConstantPoolSizeAt(uint8_t* instr_ptr);
+
+ private:
+ // Bottleneck functions to print into the out_buffer.
+ void PrintChar(const char ch);
+ void Print(const char* str);
+
+ // Printing of common values.
+ void PrintRegister(int reg);
+ void PrintSRegister(int reg);
+ void PrintDRegister(int reg);
+ int FormatVFPRegister(Instruction* instr, const char* format);
+ void PrintMovwMovt(Instruction* instr);
+ int FormatVFPinstruction(Instruction* instr, const char* format);
+ void PrintCondition(Instruction* instr);
+ void PrintShiftRm(Instruction* instr);
+ void PrintShiftImm(Instruction* instr);
+ void PrintShiftSat(Instruction* instr);
+ void PrintPU(Instruction* instr);
+ void PrintSoftwareInterrupt(SoftwareInterruptCodes svc);
+
+ // Handle formatting of instructions and their options.
+ int FormatRegister(Instruction* instr, const char* option);
+ void FormatNeonList(int Vd, int type);
+ void FormatNeonMemory(int Rn, int align, int Rm);
+ int FormatOption(Instruction* instr, const char* option);
+ void Format(Instruction* instr, const char* format);
+ void Unknown(Instruction* instr);
+
+ // Each of these functions decodes one particular instruction type, a 3-bit
+ // field in the instruction encoding.
+ // Types 0 and 1 are combined as they are largely the same except for the way
+ // they interpret the shifter operand.
+ void DecodeType01(Instruction* instr);
+ void DecodeType2(Instruction* instr);
+ void DecodeType3(Instruction* instr);
+ void DecodeType4(Instruction* instr);
+ void DecodeType5(Instruction* instr);
+ void DecodeType6(Instruction* instr);
+ // Type 7 includes special Debugger instructions.
+ int DecodeType7(Instruction* instr);
+ // For VFP support.
+ void DecodeTypeVFP(Instruction* instr);
+ void DecodeType6CoprocessorIns(Instruction* instr);
+
+ void DecodeSpecialCondition(Instruction* instr);
+
+ void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
+ void DecodeVCMP(Instruction* instr);
+ void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
+ void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
+
+ const disasm::NameConverter& converter_;
+ V8Vector<char> out_buffer_;
+ int out_buffer_pos_;
+
+ // Disallow copy and assign.
+ Decoder(const Decoder&) = delete;
+ void operator=(const Decoder&) = delete;
+};
+
+
+// Support for assertions in the Decoder formatting functions.
+#define STRING_STARTS_WITH(string, compare_string) \
+ (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+
+// Append the ch to the output buffer.
+void
+Decoder::PrintChar(const char ch)
+{
+ out_buffer_[out_buffer_pos_++] = ch;
+}
+
+
+// Append the str to the output buffer.
+void
+Decoder::Print(const char* str)
+{
+ char cur = *str++;
+ while (cur != '\0' && (out_buffer_pos_ < int(out_buffer_.length() - 1))) {
+ PrintChar(cur);
+ cur = *str++;
+ }
+ out_buffer_[out_buffer_pos_] = 0;
+}
+
+
+// These condition names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+static const char* const cond_names[kNumberOfConditions] = {
+ "eq", "ne", "cs" , "cc" , "mi" , "pl" , "vs" , "vc" ,
+ "hi", "ls", "ge", "lt", "gt", "le", "", "invalid",
+};
+
+
+// Print the condition guarding the instruction.
+void
+Decoder::PrintCondition(Instruction* instr)
+{
+ Print(cond_names[instr->ConditionValue()]);
+}
+
+
+// Print the register name according to the active name converter.
+void
+Decoder::PrintRegister(int reg)
+{
+ Print(converter_.NameOfCPURegister(reg));
+}
+
+
+// Print the VFP S register name according to the active name converter.
+void
+Decoder::PrintSRegister(int reg)
+{
+ Print(VFPRegisters::Name(reg, false));
+}
+
+
+// Print the VFP D register name according to the active name converter.
+void
+Decoder::PrintDRegister(int reg)
+{
+ Print(VFPRegisters::Name(reg, true));
+}
+
+
+// These shift names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+static const char* const shift_names[kNumberOfShifts] = {
+ "lsl", "lsr", "asr", "ror"
+};
+
+
+// Print the register shift operands for the instruction. Generally used for
+// data processing instructions.
+void
+Decoder::PrintShiftRm(Instruction* instr)
+{
+ ShiftOp shift = instr->ShiftField();
+ int shift_index = instr->ShiftValue();
+ int shift_amount = instr->ShiftAmountValue();
+ int rm = instr->RmValue();
+
+ PrintRegister(rm);
+
+ if ((instr->RegShiftValue() == 0) && (shift == LSL) && (shift_amount == 0)) {
+ // Special case for using rm only.
+ return;
+ }
+ if (instr->RegShiftValue() == 0) {
+ // by immediate
+ if ((shift == ROR) && (shift_amount == 0)) {
+ Print(", RRX");
+ return;
+ } else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
+ shift_amount = 32;
+ }
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", %s #%d",
+ shift_names[shift_index],
+ shift_amount);
+ } else {
+ // by register
+ int rs = instr->RsValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", %s ", shift_names[shift_index]);
+ PrintRegister(rs);
+ }
+}
+
+
+static inline uint32_t
+RotateRight32(uint32_t value, uint32_t shift)
+{
+ if (shift == 0) return value;
+ return (value >> shift) | (value << (32 - shift));
+}
+
+
+// Print the immediate operand for the instruction. Generally used for data
+// processing instructions.
+void
+Decoder::PrintShiftImm(Instruction* instr)
+{
+ int rotate = instr->RotateValue() * 2;
+ int immed8 = instr->Immed8Value();
+ int imm = RotateRight32(immed8, rotate);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "#%d", imm);
+}
+
+
+// Print the optional shift and immediate used by saturating instructions.
+void
+Decoder::PrintShiftSat(Instruction* instr)
+{
+ int shift = instr->Bits(11, 7);
+ if (shift > 0) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", %s #%d",
+ shift_names[instr->Bit(6) * 2],
+ instr->Bits(11, 7));
+ }
+}
+
+
+// Print PU formatting to reduce complexity of FormatOption.
+void
+Decoder::PrintPU(Instruction* instr)
+{
+ switch (instr->PUField()) {
+ case da_x: {
+ Print("da");
+ break;
+ }
+ case ia_x: {
+ Print("ia");
+ break;
+ }
+ case db_x: {
+ Print("db");
+ break;
+ }
+ case ib_x: {
+ Print("ib");
+ break;
+ }
+ default: {
+ MOZ_CRASH();
+ break;
+ }
+ }
+}
+
+
+// Print SoftwareInterrupt codes. Factoring this out reduces the complexity of
+// the FormatOption method.
+void
+Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc)
+{
+ switch (svc) {
+ case kCallRtRedirected:
+ Print("call rt redirected");
+ return;
+ case kBreakpoint:
+ Print("breakpoint");
+ return;
+ default:
+ if (svc >= kStopCode) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d - 0x%x",
+ svc & kStopCodeMask,
+ svc & kStopCodeMask);
+ } else {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d",
+ svc);
+ }
+ return;
+ }
+}
+
+
+// Handle all register based formatting in this function to reduce the
+// complexity of FormatOption.
+int
+Decoder::FormatRegister(Instruction* instr, const char* format)
+{
+ MOZ_ASSERT(format[0] == 'r');
+ if (format[1] == 'n') { // 'rn: Rn register
+ int reg = instr->RnValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'rd: Rd register
+ int reg = instr->RdValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 's') { // 'rs: Rs register
+ int reg = instr->RsValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'm') { // 'rm: Rm register
+ int reg = instr->RmValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 't') { // 'rt: Rt register
+ int reg = instr->RtValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'l') {
+ // 'rlist: register list for load and store multiple instructions
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "rlist"));
+ int rlist = instr->RlistValue();
+ int reg = 0;
+ Print("{");
+ // Print register list in ascending order, by scanning the bit mask.
+ while (rlist != 0) {
+ if ((rlist & 1) != 0) {
+ PrintRegister(reg);
+ if ((rlist >> 1) != 0) {
+ Print(", ");
+ }
+ }
+ reg++;
+ rlist >>= 1;
+ }
+ Print("}");
+ return 5;
+ }
+ MOZ_CRASH();
+ return -1;
+}
+
+
+// Handle all VFP register based formatting in this function to reduce the
+// complexity of FormatOption.
+int
+Decoder::FormatVFPRegister(Instruction* instr, const char* format)
+{
+ MOZ_ASSERT((format[0] == 'S') || (format[0] == 'D'));
+
+ VFPRegPrecision precision =
+ format[0] == 'D' ? kDoublePrecision : kSinglePrecision;
+
+ int retval = 2;
+ int reg = -1;
+ if (format[1] == 'n') {
+ reg = instr->VFPNRegValue(precision);
+ } else if (format[1] == 'm') {
+ reg = instr->VFPMRegValue(precision);
+ } else if (format[1] == 'd') {
+ if ((instr->TypeValue() == 7) &&
+ (instr->Bit(24) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(4) == 0x1)) {
+ // vmov.32 has Vd in a different place.
+ reg = instr->Bits(19, 16) | (instr->Bit(7) << 4);
+ } else {
+ reg = instr->VFPDRegValue(precision);
+ }
+
+ if (format[2] == '+') {
+ int immed8 = instr->Immed8Value();
+ if (format[0] == 'S') reg += immed8 - 1;
+ if (format[0] == 'D') reg += (immed8 / 2 - 1);
+ }
+ if (format[2] == '+') retval = 3;
+ } else {
+ MOZ_CRASH();
+ }
+
+ if (precision == kSinglePrecision) {
+ PrintSRegister(reg);
+ } else {
+ PrintDRegister(reg);
+ }
+
+ return retval;
+}
+
+
+int
+Decoder::FormatVFPinstruction(Instruction* instr, const char* format)
+{
+ Print(format);
+ return 0;
+}
+
+
+void
+Decoder::FormatNeonList(int Vd, int type)
+{
+ if (type == nlt_1) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d}", Vd);
+ } else if (type == nlt_2) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d, d%d}", Vd, Vd + 1);
+ } else if (type == nlt_3) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2);
+ } else if (type == nlt_4) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d, d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2, Vd + 3);
+ }
+}
+
+
+void
+Decoder::FormatNeonMemory(int Rn, int align, int Rm)
+{
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "[r%d", Rn);
+ if (align != 0) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ ":%d", (1 << align) << 6);
+ }
+ if (Rm == 15) {
+ Print("]");
+ } else if (Rm == 13) {
+ Print("]!");
+ } else {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "], r%d", Rm);
+ }
+}
+
+
+// Print the movw or movt instruction.
+void
+Decoder::PrintMovwMovt(Instruction* instr)
+{
+ int imm = instr->ImmedMovwMovtValue();
+ int rd = instr->RdValue();
+ PrintRegister(rd);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, ", #%d", imm);
+}
+
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.) FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instruction* instr, const char* format)
+{
+ switch (format[0]) {
+ case 'a': { // 'a: accumulate multiplies
+ if (instr->Bit(21) == 0) {
+ Print("ul");
+ } else {
+ Print("la");
+ }
+ return 1;
+ }
+ case 'b': { // 'b: byte loads or stores
+ if (instr->HasB()) {
+ Print("b");
+ }
+ return 1;
+ }
+ case 'c': { // 'cond: conditional execution
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "cond"));
+ PrintCondition(instr);
+ return 4;
+ }
+ case 'd': { // 'd: vmov double immediate.
+ double d = instr->DoubleImmedVmov();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "#%g", d);
+ return 1;
+ }
+ case 'f': { // 'f: bitfield instructions - v7 and above.
+ uint32_t lsbit = instr->Bits(11, 7);
+ uint32_t width = instr->Bits(20, 16) + 1;
+ if (instr->Bit(21) == 0) {
+ // BFC/BFI:
+ // Bits 20-16 represent most-significant bit. Covert to width.
+ width -= lsbit;
+ MOZ_ASSERT(width > 0);
+ }
+ MOZ_ASSERT((width + lsbit) <= 32);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "#%d, #%d", lsbit, width);
+ return 1;
+ }
+ case 'h': { // 'h: halfword operation for extra loads and stores
+ if (instr->HasH()) {
+ Print("h");
+ } else {
+ Print("b");
+ }
+ return 1;
+ }
+ case 'i': { // 'i: immediate value from adjacent bits.
+ // Expects tokens in the form imm%02d@%02d, i.e. imm05@07, imm10@16
+ int width = (format[3] - '0') * 10 + (format[4] - '0');
+ int lsb = (format[6] - '0') * 10 + (format[7] - '0');
+
+ MOZ_ASSERT((width >= 1) && (width <= 32));
+ MOZ_ASSERT((lsb >= 0) && (lsb <= 31));
+ MOZ_ASSERT((width + lsb) <= 32);
+
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d",
+ instr->Bits(width + lsb - 1, lsb));
+ return 8;
+ }
+ case 'l': { // 'l: branch and link
+ if (instr->HasLink()) {
+ Print("l");
+ }
+ return 1;
+ }
+ case 'm': {
+ if (format[1] == 'w') {
+ // 'mw: movt/movw instructions.
+ PrintMovwMovt(instr);
+ return 2;
+ }
+ if (format[1] == 'e') { // 'memop: load/store instructions.
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "memop"));
+ if (instr->HasL()) {
+ Print("ldr");
+ } else {
+ if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0) &&
+ (instr->Bits(7, 6) == 3) && (instr->Bit(4) == 1)) {
+ if (instr->Bit(5) == 1) {
+ Print("strd");
+ } else {
+ Print("ldrd");
+ }
+ return 5;
+ }
+ Print("str");
+ }
+ return 5;
+ }
+ // 'msg: for simulator break instructions
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "msg"));
+ uint8_t* str =
+ reinterpret_cast<uint8_t*>(instr->InstructionBits() & 0x0fffffff);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%s", converter_.NameInCode(str));
+ return 3;
+ }
+ case 'o': {
+ if ((format[3] == '1') && (format[4] == '2')) {
+ // 'off12: 12-bit offset for load and store instructions
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "off12"));
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d", instr->Offset12Value());
+ return 5;
+ } else if (format[3] == '0') {
+ // 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d",
+ (instr->Bits(19, 8) << 4) +
+ instr->Bits(3, 0));
+ return 15;
+ }
+ // 'off8: 8-bit offset for extra load and store instructions
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "off8"));
+ int offs8 = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs8);
+ return 4;
+ }
+ case 'p': { // 'pu: P and U bits for load and store instructions
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "pu"));
+ PrintPU(instr);
+ return 2;
+ }
+ case 'r': {
+ return FormatRegister(instr, format);
+ }
+ case 's': {
+ if (format[1] == 'h') { // 'shift_op or 'shift_rm or 'shift_sat.
+ if (format[6] == 'o') { // 'shift_op
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "shift_op"));
+ if (instr->TypeValue() == 0) {
+ PrintShiftRm(instr);
+ } else {
+ MOZ_ASSERT(instr->TypeValue() == 1);
+ PrintShiftImm(instr);
+ }
+ return 8;
+ } else if (format[6] == 's') { // 'shift_sat.
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "shift_sat"));
+ PrintShiftSat(instr);
+ return 9;
+ } else { // 'shift_rm
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "shift_rm"));
+ PrintShiftRm(instr);
+ return 8;
+ }
+ } else if (format[1] == 'v') { // 'svc
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "svc"));
+ PrintSoftwareInterrupt(instr->SvcValue());
+ return 3;
+ } else if (format[1] == 'i') { // 'sign: signed extra loads and stores
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "sign"));
+ if (instr->HasSign()) {
+ Print("s");
+ }
+ return 4;
+ }
+ // 's: S field of data processing instructions
+ if (instr->HasS()) {
+ Print("s");
+ }
+ return 1;
+ }
+ case 't': { // 'target: target of branch instructions
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "target"));
+ int off = (instr->SImmed24Value() << 2) + 8;
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%+d -> %s",
+ off,
+ converter_.NameOfAddress(
+ reinterpret_cast<uint8_t*>(instr) + off));
+ return 6;
+ }
+ case 'u': { // 'u: signed or unsigned multiplies
+ // The manual gets the meaning of bit 22 backwards in the multiply
+ // instruction overview on page A3.16.2. The instructions that
+ // exist in u and s variants are the following:
+ // smull A4.1.87
+ // umull A4.1.129
+ // umlal A4.1.128
+ // smlal A4.1.76
+ // For these 0 means u and 1 means s. As can be seen on their individual
+ // pages. The other 18 mul instructions have the bit set or unset in
+ // arbitrary ways that are unrelated to the signedness of the instruction.
+ // None of these 18 instructions exist in both a 'u' and an 's' variant.
+
+ if (instr->Bit(22) == 0) {
+ Print("u");
+ } else {
+ Print("s");
+ }
+ return 1;
+ }
+ case 'v': {
+ return FormatVFPinstruction(instr, format);
+ }
+ case 'S':
+ case 'D': {
+ return FormatVFPRegister(instr, format);
+ }
+ case 'w': { // 'w: W field of load and store instructions
+ if (instr->HasW()) {
+ Print("!");
+ }
+ return 1;
+ }
+ default: {
+ MOZ_CRASH();
+ break;
+ }
+ }
+ MOZ_CRASH();
+ return -1;
+}
+
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void
+Decoder::Format(Instruction* instr, const char* format)
+{
+ char cur = *format++;
+ while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ if (cur == '\'') { // Single quote is used as the formatting escape.
+ format += FormatOption(instr, format);
+ } else {
+ out_buffer_[out_buffer_pos_++] = cur;
+ }
+ cur = *format++;
+ }
+ out_buffer_[out_buffer_pos_] = '\0';
+}
+
+
+// The disassembler may end up decoding data inlined in the code. We do not want
+// it to crash if the data does not ressemble any known instruction.
+#define VERIFY(condition) \
+ if(!(condition)) { \
+ Unknown(instr); \
+ return; \
+ }
+
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void
+Decoder::Unknown(Instruction* instr)
+{
+ Format(instr, "unknown");
+}
+
+
+void
+Decoder::DecodeType01(Instruction* instr)
+{
+ int type = instr->TypeValue();
+ if ((type == 0) && instr->IsSpecialType0()) {
+ // multiply instruction or extra loads and stores
+ if (instr->Bits(7, 4) == 9) {
+ if (instr->Bit(24) == 0) {
+ // multiply instructions
+ if (instr->Bit(23) == 0) {
+ if (instr->Bit(21) == 0) {
+ // The MUL instruction description (A 4.1.33) refers to Rd as being
+ // the destination for the operation, but it confusingly uses the
+ // Rn field to encode it.
+ Format(instr, "mul'cond's 'rn, 'rm, 'rs");
+ } else {
+ if (instr->Bit(22) == 0) {
+ // The MLA instruction description (A 4.1.28) refers to the order
+ // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
+ // Rn field to encode the Rd register and the Rd field to encode
+ // the Rn register.
+ Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
+ } else {
+ // The MLS instruction description (A 4.1.29) refers to the order
+ // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
+ // Rn field to encode the Rd register and the Rd field to encode
+ // the Rn register.
+ Format(instr, "mls'cond's 'rn, 'rm, 'rs, 'rd");
+ }
+ }
+ } else {
+ // The signed/long multiply instructions use the terms RdHi and RdLo
+ // when referring to the target registers. They are mapped to the Rn
+ // and Rd fields as follows:
+ // RdLo == Rd field
+ // RdHi == Rn field
+ // The order of registers is: <RdLo>, <RdHi>, <Rm>, <Rs>
+ Format(instr, "'um'al'cond's 'rd, 'rn, 'rm, 'rs");
+ }
+ } else {
+ if (instr->Bits(ExclusiveOpHi, ExclusiveOpLo) == ExclusiveOpcode) {
+ if (instr->Bit(ExclusiveLoad) == 1) {
+ switch (instr->Bits(ExclusiveSizeHi, ExclusiveSizeLo)) {
+ case ExclusiveWord:
+ Format(instr, "ldrex'cond 'rt, ['rn]");
+ break;
+ case ExclusiveDouble:
+ Format(instr, "ldrexd'cond 'rt, ['rn]");
+ break;
+ case ExclusiveByte:
+ Format(instr, "ldrexb'cond 'rt, ['rn]");
+ break;
+ case ExclusiveHalf:
+ Format(instr, "ldrexh'cond 'rt, ['rn]");
+ break;
+ }
+ } else {
+ // The documentation names the low four bits of the
+ // store-exclusive instructions "Rt" but canonically
+ // for disassembly they are really "Rm".
+ switch (instr->Bits(ExclusiveSizeHi, ExclusiveSizeLo)) {
+ case ExclusiveWord:
+ Format(instr, "strex'cond 'rd, 'rm, ['rn]");
+ break;
+ case ExclusiveDouble:
+ Format(instr, "strexd'cond 'rd, 'rm, ['rn]");
+ break;
+ case ExclusiveByte:
+ Format(instr, "strexb'cond 'rd, 'rm, ['rn]");
+ break;
+ case ExclusiveHalf:
+ Format(instr, "strexh'cond 'rd, 'rm, ['rn]");
+ break;
+ }
+ }
+ } else {
+ Unknown(instr);
+ }
+ }
+ } else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) {
+ // ldrd, strd
+ switch (instr->PUField()) {
+ case da_x: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond's 'rd, ['rn], -'rm");
+ } else {
+ Format(instr, "'memop'cond's 'rd, ['rn], #-'off8");
+ }
+ break;
+ }
+ case ia_x: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond's 'rd, ['rn], +'rm");
+ } else {
+ Format(instr, "'memop'cond's 'rd, ['rn], #+'off8");
+ }
+ break;
+ }
+ case db_x: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond's 'rd, ['rn, -'rm]'w");
+ } else {
+ Format(instr, "'memop'cond's 'rd, ['rn, #-'off8]'w");
+ }
+ break;
+ }
+ case ib_x: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond's 'rd, ['rn, +'rm]'w");
+ } else {
+ Format(instr, "'memop'cond's 'rd, ['rn, #+'off8]'w");
+ }
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ MOZ_CRASH();
+ break;
+ }
+ }
+ } else {
+ // extra load/store instructions
+ switch (instr->PUField()) {
+ case da_x: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
+ } else {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
+ }
+ break;
+ }
+ case ia_x: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
+ } else {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
+ }
+ break;
+ }
+ case db_x: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
+ } else {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
+ }
+ break;
+ }
+ case ib_x: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
+ } else {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
+ }
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ MOZ_CRASH();
+ break;
+ }
+ }
+ return;
+ }
+ } else if ((type == 0) && instr->IsMiscType0()) {
+ if (instr->Bits(22, 21) == 1) {
+ switch (instr->BitField(7, 4)) {
+ case BX:
+ Format(instr, "bx'cond 'rm");
+ break;
+ case BLX:
+ Format(instr, "blx'cond 'rm");
+ break;
+ case BKPT:
+ Format(instr, "bkpt 'off0to3and8to19");
+ break;
+ default:
+ Unknown(instr); // not used by V8
+ break;
+ }
+ } else if (instr->Bits(22, 21) == 3) {
+ switch (instr->BitField(7, 4)) {
+ case CLZ:
+ Format(instr, "clz'cond 'rd, 'rm");
+ break;
+ default:
+ Unknown(instr); // not used by V8
+ break;
+ }
+ } else {
+ Unknown(instr); // not used by V8
+ }
+ } else if ((type == 1) && instr->IsNopType1()) {
+ Format(instr, "nop'cond");
+ } else {
+ switch (instr->OpcodeField()) {
+ case AND: {
+ Format(instr, "and'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case EOR: {
+ Format(instr, "eor'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case SUB: {
+ Format(instr, "sub'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case RSB: {
+ Format(instr, "rsb'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case ADD: {
+ Format(instr, "add'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case ADC: {
+ Format(instr, "adc'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case SBC: {
+ Format(instr, "sbc'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case RSC: {
+ Format(instr, "rsc'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case TST: {
+ if (instr->HasS()) {
+ Format(instr, "tst'cond 'rn, 'shift_op");
+ } else {
+ Format(instr, "movw'cond 'mw");
+ }
+ break;
+ }
+ case TEQ: {
+ if (instr->HasS()) {
+ Format(instr, "teq'cond 'rn, 'shift_op");
+ } else {
+ // Other instructions matching this pattern are handled in the
+ // miscellaneous instructions part above.
+ MOZ_CRASH();
+ }
+ break;
+ }
+ case CMP: {
+ if (instr->HasS()) {
+ Format(instr, "cmp'cond 'rn, 'shift_op");
+ } else {
+ Format(instr, "movt'cond 'mw");
+ }
+ break;
+ }
+ case CMN: {
+ if (instr->HasS()) {
+ Format(instr, "cmn'cond 'rn, 'shift_op");
+ } else {
+ // Other instructions matching this pattern are handled in the
+ // miscellaneous instructions part above.
+ MOZ_CRASH();
+ }
+ break;
+ }
+ case ORR: {
+ Format(instr, "orr'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case MOV: {
+ Format(instr, "mov'cond's 'rd, 'shift_op");
+ break;
+ }
+ case BIC: {
+ Format(instr, "bic'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case MVN: {
+ Format(instr, "mvn'cond's 'rd, 'shift_op");
+ break;
+ }
+ default: {
+ // The Opcode field is a 4-bit field.
+ MOZ_CRASH();
+ break;
+ }
+ }
+ }
+}
+
+
+void
+Decoder::DecodeType2(Instruction* instr)
+{
+ switch (instr->PUField()) {
+ case da_x: {
+ if (instr->HasW()) {
+ Unknown(instr); // not used in V8
+ return;
+ }
+ Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
+ break;
+ }
+ case ia_x: {
+ if (instr->HasW()) {
+ Unknown(instr); // not used in V8
+ return;
+ }
+ Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
+ break;
+ }
+ case db_x: {
+ Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
+ break;
+ }
+ case ib_x: {
+ Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ MOZ_CRASH();
+ break;
+ }
+ }
+}
+
+
+void
+Decoder::DecodeType3(Instruction* instr)
+{
+ switch (instr->PUField()) {
+ case da_x: {
+ VERIFY(!instr->HasW());
+ Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
+ break;
+ }
+ case ia_x: {
+ if (instr->Bit(4) == 0) {
+ Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
+ } else {
+ if (instr->Bit(5) == 0) {
+ switch (instr->Bits(22, 21)) {
+ case 0:
+ if (instr->Bit(20) == 0) {
+ if (instr->Bit(6) == 0) {
+ Format(instr, "pkhbt'cond 'rd, 'rn, 'rm, lsl #'imm05@07");
+ } else {
+ if (instr->Bits(11, 7) == 0) {
+ Format(instr, "pkhtb'cond 'rd, 'rn, 'rm, asr #32");
+ } else {
+ Format(instr, "pkhtb'cond 'rd, 'rn, 'rm, asr #'imm05@07");
+ }
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 1:
+ MOZ_CRASH();
+ break;
+ case 2:
+ MOZ_CRASH();
+ break;
+ case 3:
+ Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
+ break;
+ }
+ } else {
+ switch (instr->Bits(22, 21)) {
+ case 0:
+ MOZ_CRASH();
+ break;
+ case 1:
+ if (instr->Bits(9, 6) == 1) {
+ if (instr->Bit(20) == 0) {
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "sxtb'cond 'rd, 'rm");
+ break;
+ case 1:
+ Format(instr, "sxtb'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "sxtb'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "sxtb'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "sxtab'cond 'rd, 'rn, 'rm");
+ break;
+ case 1:
+ Format(instr, "sxtab'cond 'rd, 'rn, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "sxtab'cond 'rd, 'rn, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "sxtab'cond 'rd, 'rn, 'rm, ror #24");
+ break;
+ }
+ }
+ } else {
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "sxth'cond 'rd, 'rm");
+ break;
+ case 1:
+ Format(instr, "sxth'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "sxth'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "sxth'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "sxtah'cond 'rd, 'rn, 'rm");
+ break;
+ case 1:
+ Format(instr, "sxtah'cond 'rd, 'rn, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "sxtah'cond 'rd, 'rn, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "sxtah'cond 'rd, 'rn, 'rm, ror #24");
+ break;
+ }
+ }
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 2:
+ if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtb16'cond 'rd, 'rm");
+ break;
+ case 1:
+ Format(instr, "uxtb16'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtb16'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtb16'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 3:
+ if ((instr->Bits(9, 6) == 1)) {
+ if ((instr->Bit(20) == 0)) {
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtb'cond 'rd, 'rm");
+ break;
+ case 1:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm");
+ break;
+ case 1:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #24");
+ break;
+ }
+ }
+ } else {
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxth'cond 'rd, 'rm");
+ break;
+ case 1:
+ Format(instr, "uxth'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxth'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxth'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtah'cond 'rd, 'rn, 'rm");
+ break;
+ case 1:
+ Format(instr, "uxtah'cond 'rd, 'rn, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtah'cond 'rd, 'rn, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtah'cond 'rd, 'rn, 'rm, ror #24");
+ break;
+ }
+ }
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ }
+ }
+ }
+ break;
+ }
+ case db_x: {
+ if (instr->Bits(22, 20) == 0x5) {
+ if (instr->Bits(7, 4) == 0x1) {
+ if (instr->Bits(15, 12) == 0xF) {
+ Format(instr, "smmul'cond 'rn, 'rm, 'rs");
+ } else {
+ // SMMLA (in V8 notation matching ARM ISA format)
+ Format(instr, "smmla'cond 'rn, 'rm, 'rs, 'rd");
+ }
+ break;
+ }
+ }
+ bool FLAG_enable_sudiv = true; // Flag doesn't exist in our engine.
+ if (FLAG_enable_sudiv) {
+ if (instr->Bits(5, 4) == 0x1) {
+ if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
+ if (instr->Bit(21) == 0x1) {
+ // UDIV (in V8 notation matching ARM ISA format) rn = rm/rs
+ Format(instr, "udiv'cond'b 'rn, 'rm, 'rs");
+ } else {
+ // SDIV (in V8 notation matching ARM ISA format) rn = rm/rs
+ Format(instr, "sdiv'cond'b 'rn, 'rm, 'rs");
+ }
+ break;
+ }
+ }
+ }
+ Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
+ break;
+ }
+ case ib_x: {
+ if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
+ uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
+ uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
+ uint32_t msbit = widthminus1 + lsbit;
+ if (msbit <= 31) {
+ if (instr->Bit(22)) {
+ Format(instr, "ubfx'cond 'rd, 'rm, 'f");
+ } else {
+ Format(instr, "sbfx'cond 'rd, 'rm, 'f");
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ } else if (!instr->HasW() && (instr->Bits(6, 4) == 0x1)) {
+ uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
+ uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
+ if (msbit >= lsbit) {
+ if (instr->RmValue() == 15) {
+ Format(instr, "bfc'cond 'rd, 'f");
+ } else {
+ Format(instr, "bfi'cond 'rd, 'rm, 'f");
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ } else {
+ Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
+ }
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ MOZ_CRASH();
+ break;
+ }
+ }
+}
+
+
+void
+Decoder::DecodeType4(Instruction* instr)
+{
+ if (instr->Bit(22) != 0) {
+ // Privileged mode currently not supported.
+ Unknown(instr);
+ } else {
+ if (instr->HasL()) {
+ Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
+ } else {
+ Format(instr, "stm'cond'pu 'rn'w, 'rlist");
+ }
+ }
+}
+
+
+void
+Decoder::DecodeType5(Instruction* instr)
+{
+ Format(instr, "b'l'cond 'target");
+}
+
+
+void
+Decoder::DecodeType6(Instruction* instr)
+{
+ DecodeType6CoprocessorIns(instr);
+}
+
+
+int
+Decoder::DecodeType7(Instruction* instr)
+{
+ if (instr->Bit(24) == 1) {
+ if (instr->SvcValue() >= kStopCode) {
+ Format(instr, "stop'cond 'svc");
+ // Also print the stop message. Its address is encoded
+ // in the following 4 bytes.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "\n %p %08x stop message: %s",
+ reinterpret_cast<void*>(instr
+ + Instruction::kInstrSize),
+ *reinterpret_cast<uint32_t*>(instr
+ + Instruction::kInstrSize),
+ *reinterpret_cast<char**>(instr
+ + Instruction::kInstrSize));
+ // We have decoded 2 * Instruction::kInstrSize bytes.
+ return 2 * Instruction::kInstrSize;
+ } else {
+ Format(instr, "svc'cond 'svc");
+ }
+ } else {
+ DecodeTypeVFP(instr);
+ }
+ return Instruction::kInstrSize;
+}
+
+
+// void Decoder::DecodeTypeVFP(Instruction* instr)
+// vmov: Sn = Rt
+// vmov: Rt = Sn
+// vcvt: Dd = Sm
+// vcvt: Sd = Dm
+// vcvt.f64.s32 Dd, Dd, #<fbits>
+// Dd = vabs(Dm)
+// Sd = vabs(Sm)
+// Dd = vneg(Dm)
+// Sd = vneg(Sm)
+// Dd = vadd(Dn, Dm)
+// Sd = vadd(Sn, Sm)
+// Dd = vsub(Dn, Dm)
+// Sd = vsub(Sn, Sm)
+// Dd = vmul(Dn, Dm)
+// Sd = vmul(Sn, Sm)
+// Dd = vmla(Dn, Dm)
+// Sd = vmla(Sn, Sm)
+// Dd = vmls(Dn, Dm)
+// Sd = vmls(Sn, Sm)
+// Dd = vdiv(Dn, Dm)
+// Sd = vdiv(Sn, Sm)
+// vcmp(Dd, Dm)
+// vcmp(Sd, Sm)
+// Dd = vsqrt(Dm)
+// Sd = vsqrt(Sm)
+// vmrs
+// vmsr
+void
+Decoder::DecodeTypeVFP(Instruction* instr)
+{
+ VERIFY((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
+ VERIFY(instr->Bits(11, 9) == 0x5);
+
+ if (instr->Bit(4) == 0) {
+ if (instr->Opc1Value() == 0x7) {
+ // Other data processing instructions
+ if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
+ // vmov register to register.
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vmov'cond.f64 'Dd, 'Dm");
+ } else {
+ Format(instr, "vmov'cond.f32 'Sd, 'Sm");
+ }
+ } else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
+ // vabs
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vabs'cond.f64 'Dd, 'Dm");
+ } else {
+ Format(instr, "vabs'cond.f32 'Sd, 'Sm");
+ }
+ } else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
+ // vneg
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vneg'cond.f64 'Dd, 'Dm");
+ } else {
+ Format(instr, "vneg'cond.f32 'Sd, 'Sm");
+ }
+ } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
+ DecodeVCVTBetweenDoubleAndSingle(instr);
+ } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
+ DecodeVCVTBetweenFloatingPointAndInteger(instr);
+ } else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) &&
+ (instr->Bit(8) == 1)) {
+ // vcvt.f64.s32 Dd, Dd, #<fbits>
+ int fraction_bits = 32 - ((instr->Bits(3, 0) << 1) | instr->Bit(5));
+ Format(instr, "vcvt'cond.f64.s32 'Dd, 'Dd");
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", #%d", fraction_bits);
+ } else if (((instr->Opc2Value() >> 1) == 0x6) &&
+ (instr->Opc3Value() & 0x1)) {
+ DecodeVCVTBetweenFloatingPointAndInteger(instr);
+ } else if (((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+ (instr->Opc3Value() & 0x1)) {
+ DecodeVCMP(instr);
+ } else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vsqrt'cond.f64 'Dd, 'Dm");
+ } else {
+ Format(instr, "vsqrt'cond.f32 'Sd, 'Sm");
+ }
+ } else if (instr->Opc3Value() == 0x0) {
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vmov'cond.f64 'Dd, 'd");
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else if (((instr->Opc2Value() == 0x6)) && instr->Opc3Value() == 0x3) {
+ // vrintz - round towards zero (truncate)
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vrintz'cond.f64.f64 'Dd, 'Dm");
+ } else {
+ Format(instr, "vrintz'cond.f32.f32 'Sd, 'Sm");
+ }
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else if (instr->Opc1Value() == 0x3) {
+ if (instr->SzValue() == 0x1) {
+ if (instr->Opc3Value() & 0x1) {
+ Format(instr, "vsub'cond.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vadd'cond.f64 'Dd, 'Dn, 'Dm");
+ }
+ } else {
+ if (instr->Opc3Value() & 0x1) {
+ Format(instr, "vsub'cond.f32 'Sd, 'Sn, 'Sm");
+ } else {
+ Format(instr, "vadd'cond.f32 'Sd, 'Sn, 'Sm");
+ }
+ }
+ } else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vmul'cond.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vmul'cond.f32 'Sd, 'Sn, 'Sm");
+ }
+ } else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) {
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vmla'cond.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vmla'cond.f32 'Sd, 'Sn, 'Sm");
+ }
+ } else if ((instr->Opc1Value() == 0x0) && (instr->Opc3Value() & 0x1)) {
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vmls'cond.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vmls'cond.f32 'Sd, 'Sn, 'Sm");
+ }
+ } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vdiv'cond.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vdiv'cond.f32 'Sd, 'Sn, 'Sm");
+ }
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else {
+ if ((instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x0)) {
+ DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
+ } else if ((instr->VLValue() == 0x0) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ if (instr->Bit(21) == 0x0) {
+ Format(instr, "vmov'cond.32 'Dd[0], 'rt");
+ } else {
+ Format(instr, "vmov'cond.32 'Dd[1], 'rt");
+ }
+ } else if ((instr->VLValue() == 0x1) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ if (instr->Bit(21) == 0x0) {
+ Format(instr, "vmov'cond.32 'rt, 'Dd[0]");
+ } else {
+ Format(instr, "vmov'cond.32 'rt, 'Dd[1]");
+ }
+ } else if ((instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x7) &&
+ (instr->Bits(19, 16) == 0x1)) {
+ if (instr->VLValue() == 0) {
+ if (instr->Bits(15, 12) == 0xF) {
+ Format(instr, "vmsr'cond FPSCR, APSR");
+ } else {
+ Format(instr, "vmsr'cond FPSCR, 'rt");
+ }
+ } else {
+ if (instr->Bits(15, 12) == 0xF) {
+ Format(instr, "vmrs'cond APSR, FPSCR");
+ } else {
+ Format(instr, "vmrs'cond 'rt, FPSCR");
+ }
+ }
+ }
+ }
+}
+
+
+void
+Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr)
+{
+ VERIFY((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x0));
+
+ bool to_arm_register = (instr->VLValue() == 0x1);
+
+ if (to_arm_register) {
+ Format(instr, "vmov'cond 'rt, 'Sn");
+ } else {
+ Format(instr, "vmov'cond 'Sn, 'rt");
+ }
+}
+
+
+void
+Decoder::DecodeVCMP(Instruction* instr)
+{
+ VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ VERIFY(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+ (instr->Opc3Value() & 0x1));
+
+ // Comparison.
+ bool dp_operation = (instr->SzValue() == 1);
+ bool raise_exception_for_qnan = (instr->Bit(7) == 0x1);
+
+ if (dp_operation && !raise_exception_for_qnan) {
+ if (instr->Opc2Value() == 0x4) {
+ Format(instr, "vcmp'cond.f64 'Dd, 'Dm");
+ } else if (instr->Opc2Value() == 0x5) {
+ Format(instr, "vcmp'cond.f64 'Dd, #0.0");
+ } else {
+ Unknown(instr); // invalid
+ }
+ } else if (!raise_exception_for_qnan) {
+ if (instr->Opc2Value() == 0x4) {
+ Format(instr, "vcmp'cond.f32 'Sd, 'Sm");
+ } else if (instr->Opc2Value() == 0x5) {
+ Format(instr, "vcmp'cond.f32 'Sd, #0.0");
+ } else {
+ Unknown(instr); // invalid
+ }
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+}
+
+
+void
+Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr)
+{
+ VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ VERIFY((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
+
+ bool double_to_single = (instr->SzValue() == 1);
+
+ if (double_to_single) {
+ Format(instr, "vcvt'cond.f32.f64 'Sd, 'Dm");
+ } else {
+ Format(instr, "vcvt'cond.f64.f32 'Dd, 'Sm");
+ }
+}
+
+
+void
+Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr)
+{
+ VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ VERIFY(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
+ (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
+
+ bool to_integer = (instr->Bit(18) == 1);
+ bool dp_operation = (instr->SzValue() == 1);
+ if (to_integer) {
+ bool unsigned_integer = (instr->Bit(16) == 0);
+
+ if (dp_operation) {
+ if (unsigned_integer) {
+ Format(instr, "vcvt'cond.u32.f64 'Sd, 'Dm");
+ } else {
+ Format(instr, "vcvt'cond.s32.f64 'Sd, 'Dm");
+ }
+ } else {
+ if (unsigned_integer) {
+ Format(instr, "vcvt'cond.u32.f32 'Sd, 'Sm");
+ } else {
+ Format(instr, "vcvt'cond.s32.f32 'Sd, 'Sm");
+ }
+ }
+ } else {
+ bool unsigned_integer = (instr->Bit(7) == 0);
+
+ if (dp_operation) {
+ if (unsigned_integer) {
+ Format(instr, "vcvt'cond.f64.u32 'Dd, 'Sm");
+ } else {
+ Format(instr, "vcvt'cond.f64.s32 'Dd, 'Sm");
+ }
+ } else {
+ if (unsigned_integer) {
+ Format(instr, "vcvt'cond.f32.u32 'Sd, 'Sm");
+ } else {
+ Format(instr, "vcvt'cond.f32.s32 'Sd, 'Sm");
+ }
+ }
+ }
+}
+
+
+// Decode Type 6 coprocessor instructions.
+// Dm = vmov(Rt, Rt2)
+// <Rt, Rt2> = vmov(Dm)
+// Ddst = MEM(Rbase + 4*offset).
+// MEM(Rbase + 4*offset) = Dsrc.
+void
+Decoder::DecodeType6CoprocessorIns(Instruction* instr)
+{
+ VERIFY(instr->TypeValue() == 6);
+
+ if (instr->CoprocessorValue() == 0xA) {
+ switch (instr->OpcodeValue()) {
+ case 0x8:
+ case 0xA:
+ if (instr->HasL()) {
+ Format(instr, "vldr'cond 'Sd, ['rn - 4*'imm08@00]");
+ } else {
+ Format(instr, "vstr'cond 'Sd, ['rn - 4*'imm08@00]");
+ }
+ break;
+ case 0xC:
+ case 0xE:
+ if (instr->HasL()) {
+ Format(instr, "vldr'cond 'Sd, ['rn + 4*'imm08@00]");
+ } else {
+ Format(instr, "vstr'cond 'Sd, ['rn + 4*'imm08@00]");
+ }
+ break;
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ case 0x9:
+ case 0xB: {
+ bool to_vfp_register = (instr->VLValue() == 0x1);
+ if (to_vfp_register) {
+ Format(instr, "vldm'cond'pu 'rn'w, {'Sd-'Sd+}");
+ } else {
+ Format(instr, "vstm'cond'pu 'rn'w, {'Sd-'Sd+}");
+ }
+ break;
+ }
+ default:
+ Unknown(instr); // Not used by V8.
+ }
+ } else if (instr->CoprocessorValue() == 0xB) {
+ switch (instr->OpcodeValue()) {
+ case 0x2:
+ // Load and store double to two GP registers
+ if (instr->Bits(7, 6) != 0 || instr->Bit(4) != 1) {
+ Unknown(instr); // Not used by V8.
+ } else if (instr->HasL()) {
+ Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
+ } else {
+ Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
+ }
+ break;
+ case 0x8:
+ case 0xA:
+ if (instr->HasL()) {
+ Format(instr, "vldr'cond 'Dd, ['rn - 4*'imm08@00]");
+ } else {
+ Format(instr, "vstr'cond 'Dd, ['rn - 4*'imm08@00]");
+ }
+ break;
+ case 0xC:
+ case 0xE:
+ if (instr->HasL()) {
+ Format(instr, "vldr'cond 'Dd, ['rn + 4*'imm08@00]");
+ } else {
+ Format(instr, "vstr'cond 'Dd, ['rn + 4*'imm08@00]");
+ }
+ break;
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ case 0x9:
+ case 0xB: {
+ bool to_vfp_register = (instr->VLValue() == 0x1);
+ if (to_vfp_register) {
+ Format(instr, "vldm'cond'pu 'rn'w, {'Dd-'Dd+}");
+ } else {
+ Format(instr, "vstm'cond'pu 'rn'w, {'Dd-'Dd+}");
+ }
+ break;
+ }
+ default:
+ Unknown(instr); // Not used by V8.
+ }
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+}
+
+
+void
+Decoder::DecodeSpecialCondition(Instruction* instr)
+{
+ switch (instr->SpecialValue()) {
+ case 5:
+ if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+ (instr->Bit(4) == 1)) {
+ // vmovl signed
+ if ((instr->VdValue() & 1) != 0) Unknown(instr);
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
+ int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+ int imm3 = instr->Bits(21, 19);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmovl.s%d q%d, d%d", imm3*8, Vd, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 7:
+ if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+ (instr->Bit(4) == 1)) {
+ // vmovl unsigned
+ if ((instr->VdValue() & 1) != 0) Unknown(instr);
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
+ int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+ int imm3 = instr->Bits(21, 19);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmovl.u%d q%d, d%d", imm3*8, Vd, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 8:
+ if (instr->Bits(21, 20) == 0) {
+ // vst1
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int type = instr->Bits(11, 8);
+ int size = instr->Bits(7, 6);
+ int align = instr->Bits(5, 4);
+ int Rm = instr->VmValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vst1.%d ", (1 << size) << 3);
+ FormatNeonList(Vd, type);
+ Print(", ");
+ FormatNeonMemory(Rn, align, Rm);
+ } else if (instr->Bits(21, 20) == 2) {
+ // vld1
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int type = instr->Bits(11, 8);
+ int size = instr->Bits(7, 6);
+ int align = instr->Bits(5, 4);
+ int Rm = instr->VmValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vld1.%d ", (1 << size) << 3);
+ FormatNeonList(Vd, type);
+ Print(", ");
+ FormatNeonMemory(Rn, align, Rm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 0xA:
+ if (instr->Bits(22, 20) == 7) {
+ const char* option = "?";
+ switch (instr->Bits(3, 0)) {
+ case 2: option = "oshst"; break;
+ case 3: option = "osh"; break;
+ case 6: option = "nshst"; break;
+ case 7: option = "nsh"; break;
+ case 10: option = "ishst"; break;
+ case 11: option = "ish"; break;
+ case 14: option = "st"; break;
+ case 15: option = "sy"; break;
+ }
+ switch (instr->Bits(7, 4)) {
+ case 4:
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "dsb %s", option);
+ break;
+ case 5:
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "dmb %s", option);
+ break;
+ default:
+ Unknown(instr);
+ }
+ break;
+ }
+ MOZ_FALLTHROUGH;
+ case 0xB:
+ if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
+ int Rn = instr->Bits(19, 16);
+ int offset = instr->Bits(11, 0);
+ if (offset == 0) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [r%d]", Rn);
+ } else if (instr->Bit(23) == 0) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [r%d, #-%d]", Rn, offset);
+ } else {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [r%d, #+%d]", Rn, offset);
+ }
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 0x1D:
+ if (instr->Opc1Value() == 0x7 && instr->Bits(19, 18) == 0x2 &&
+ instr->Bits(11, 9) == 0x5 && instr->Bits(7, 6) == 0x1 &&
+ instr->Bit(4) == 0x0) {
+ // VRINTA, VRINTN, VRINTP, VRINTM (floating-point)
+ bool dp_operation = (instr->SzValue() == 1);
+ int rounding_mode = instr->Bits(17, 16);
+ switch (rounding_mode) {
+ case 0x0:
+ if (dp_operation) {
+ Format(instr, "vrinta.f64.f64 'Dd, 'Dm");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 0x1:
+ if (dp_operation) {
+ Format(instr, "vrintn.f64.f64 'Dd, 'Dm");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 0x2:
+ if (dp_operation) {
+ Format(instr, "vrintp.f64.f64 'Dd, 'Dm");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 0x3:
+ if (dp_operation) {
+ Format(instr, "vrintm.f64.f64 'Dd, 'Dm");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ default:
+ MOZ_CRASH(); // Case analysis is exhaustive.
+ break;
+ }
+ } else {
+ Unknown(instr);
+ }
+ break;
+ default:
+ Unknown(instr);
+ break;
+ }
+}
+
+#undef VERIFIY
+
+bool
+Decoder::IsConstantPoolAt(uint8_t* instr_ptr)
+{
+ int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
+ return (instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker;
+}
+
+
+int
+Decoder::ConstantPoolSizeAt(uint8_t* instr_ptr)
+{
+ if (IsConstantPoolAt(instr_ptr)) {
+ int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
+ return DecodeConstantPoolLength(instruction_bits);
+ } else {
+ return -1;
+ }
+}
+
+
+// Disassemble the instruction at *instr_ptr into the output buffer.
+int
+Decoder::InstructionDecode(uint8_t* instr_ptr)
+{
+ Instruction* instr = Instruction::At(instr_ptr);
+ // Print raw instruction bytes.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%08x ",
+ instr->InstructionBits());
+ if (instr->ConditionField() == kSpecialCondition) {
+ DecodeSpecialCondition(instr);
+ return Instruction::kInstrSize;
+ }
+ int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
+ if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "constant pool begin (length %d)",
+ DecodeConstantPoolLength(instruction_bits));
+ return Instruction::kInstrSize;
+ } else if (instruction_bits == kCodeAgeJumpInstruction) {
+ // The code age prologue has a constant immediatly following the jump
+ // instruction.
+ Instruction* target = Instruction::At(instr_ptr + Instruction::kInstrSize);
+ DecodeType2(instr);
+ SNPrintF(out_buffer_ + out_buffer_pos_,
+ " (0x%08x)", target->InstructionBits());
+ return 2 * Instruction::kInstrSize;
+ }
+ switch (instr->TypeValue()) {
+ case 0:
+ case 1: {
+ DecodeType01(instr);
+ break;
+ }
+ case 2: {
+ DecodeType2(instr);
+ break;
+ }
+ case 3: {
+ DecodeType3(instr);
+ break;
+ }
+ case 4: {
+ DecodeType4(instr);
+ break;
+ }
+ case 5: {
+ DecodeType5(instr);
+ break;
+ }
+ case 6: {
+ DecodeType6(instr);
+ break;
+ }
+ case 7: {
+ return DecodeType7(instr);
+ }
+ default: {
+ // The type field is 3-bits in the ARM encoding.
+ MOZ_CRASH();
+ break;
+ }
+ }
+ return Instruction::kInstrSize;
+}
+
+
+} // namespace disasm
+
+
+#undef STRING_STARTS_WITH
+#undef VERIFY
+
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+
+const char*
+NameConverter::NameOfAddress(uint8_t* addr) const
+{
+ SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
+}
+
+
+const char*
+NameConverter::NameOfConstant(uint8_t* addr) const
+{
+ return NameOfAddress(addr);
+}
+
+
+const char*
+NameConverter::NameOfCPURegister(int reg) const
+{
+ return disasm::Registers::Name(reg);
+}
+
+
+const char*
+NameConverter::NameOfByteCPURegister(int reg) const
+{
+ MOZ_CRASH(); // ARM does not have the concept of a byte register
+ return "nobytereg";
+}
+
+
+const char*
+NameConverter::NameOfXMMRegister(int reg) const
+{
+ MOZ_CRASH(); // ARM does not have any XMM registers
+ return "noxmmreg";
+}
+
+
+const char*
+NameConverter::NameInCode(uint8_t* addr) const
+{
+ // The default name converter is called for unknown code. So we will not try
+ // to access any memory.
+ return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+ : converter_(converter)
+{
+}
+
+
+Disassembler::~Disassembler()
+{
+}
+
+
+int
+Disassembler::InstructionDecode(V8Vector<char> buffer, uint8_t* instruction)
+{
+ Decoder d(converter_, buffer);
+ return d.InstructionDecode(instruction);
+}
+
+
+int
+Disassembler::ConstantPoolSizeAt(uint8_t* instruction)
+{
+ return Decoder::ConstantPoolSizeAt(instruction);
+}
+
+
+void
+Disassembler::Disassemble(FILE* f, uint8_t* begin, uint8_t* end)
+{
+ NameConverter converter;
+ Disassembler d(converter);
+ for (uint8_t* pc = begin; pc < end;) {
+ EmbeddedVector<char, ReasonableBufferSize> buffer;
+ buffer[0] = '\0';
+ uint8_t* prev_pc = pc;
+ pc += d.InstructionDecode(buffer, pc);
+ fprintf(
+ f, "%p %08x %s\n",
+ prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ }
+}
+
+
+} // namespace disasm
+} // namespace jit
+} // namespace js
+
+#endif // JS_DISASM_ARM
diff --git a/js/src/jit/arm/disasm/Disasm-arm.h b/js/src/jit/arm/disasm/Disasm-arm.h
new file mode 100644
index 000000000..5421a03c7
--- /dev/null
+++ b/js/src/jit/arm/disasm/Disasm-arm.h
@@ -0,0 +1,143 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ */
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef jit_arm_disasm_Disasm_arm_h
+#define jit_arm_disasm_Disasm_arm_h
+
+#ifdef JS_DISASM_ARM
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+#include <stdio.h>
+
+namespace js {
+namespace jit {
+namespace disasm {
+
+typedef unsigned char byte;
+
+// A reasonable (ie, safe) buffer size for the disassembly of a single instruction.
+const int ReasonableBufferSize = 256;
+
+// Vector as used by the original code to allow for minimal modification.
+// Functions exactly like a character array with helper methods.
+template <typename T>
+class V8Vector {
+ public:
+ V8Vector() : start_(nullptr), length_(0) {}
+ V8Vector(T* data, int length) : start_(data), length_(length) {
+ MOZ_ASSERT(length == 0 || (length > 0 && data != nullptr));
+ }
+
+ // Returns the length of the vector.
+ int length() const { return length_; }
+
+ // Returns the pointer to the start of the data in the vector.
+ T* start() const { return start_; }
+
+ // Access individual vector elements - checks bounds in debug mode.
+ T& operator[](int index) const {
+ MOZ_ASSERT(0 <= index && index < length_);
+ return start_[index];
+ }
+
+ inline V8Vector<T> operator+(int offset) {
+ MOZ_ASSERT(offset < length_);
+ return V8Vector<T>(start_ + offset, length_ - offset);
+ }
+
+ private:
+ T* start_;
+ int length_;
+};
+
+
+template <typename T, int kSize>
+class EmbeddedVector : public V8Vector<T> {
+ public:
+ EmbeddedVector() : V8Vector<T>(buffer_, kSize) { }
+
+ explicit EmbeddedVector(T initial_value) : V8Vector<T>(buffer_, kSize) {
+ for (int i = 0; i < kSize; ++i) {
+ buffer_[i] = initial_value;
+ }
+ }
+
+ // When copying, make underlying Vector to reference our buffer.
+ EmbeddedVector(const EmbeddedVector& rhs)
+ : V8Vector<T>(rhs) {
+ MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
+ this->set_start(buffer_);
+ }
+
+ EmbeddedVector& operator=(const EmbeddedVector& rhs) {
+ if (this == &rhs) return *this;
+ V8Vector<T>::operator=(rhs);
+ MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
+ this->set_start(buffer_);
+ return *this;
+ }
+
+ private:
+ T buffer_[kSize];
+};
+
+
+// Interface and default implementation for converting addresses and
+// register-numbers to text. The default implementation is machine
+// specific.
+class NameConverter {
+ public:
+ virtual ~NameConverter() {}
+ virtual const char* NameOfCPURegister(int reg) const;
+ virtual const char* NameOfByteCPURegister(int reg) const;
+ virtual const char* NameOfXMMRegister(int reg) const;
+ virtual const char* NameOfAddress(byte* addr) const;
+ virtual const char* NameOfConstant(byte* addr) const;
+ virtual const char* NameInCode(byte* addr) const;
+
+ protected:
+ EmbeddedVector<char, 128> tmp_buffer_;
+};
+
+
+// A generic Disassembler interface
+class Disassembler {
+ public:
+ // Caller deallocates converter.
+ explicit Disassembler(const NameConverter& converter);
+
+ virtual ~Disassembler();
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(V8Vector<char> buffer, uint8_t* instruction);
+
+ // Returns -1 if instruction does not mark the beginning of a constant pool,
+ // or the number of entries in the constant pool beginning here.
+ int ConstantPoolSizeAt(byte* instruction);
+
+ // Write disassembly into specified file 'f' using specified NameConverter
+ // (see constructor).
+ static void Disassemble(FILE* f, uint8_t* begin, uint8_t* end);
+ private:
+ const NameConverter& converter_;
+
+ // Disallow implicit constructors.
+ Disassembler() = delete;
+ Disassembler(const Disassembler&) = delete;
+ void operator=(const Disassembler&) = delete;
+};
+
+} // namespace disasm
+} // namespace jit
+} // namespace js
+
+#endif // JS_DISASM_ARM
+
+#endif // jit_arm_disasm_Disasm_arm_h
diff --git a/js/src/jit/arm/gen-double-encoder-table.py b/js/src/jit/arm/gen-double-encoder-table.py
new file mode 100644
index 000000000..1a208fdf4
--- /dev/null
+++ b/js/src/jit/arm/gen-double-encoder-table.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""Generate tables of immediately-encodable VFP doubles.
+
+DOES NOT get automatically run during the build process. If you need to
+modify this file (which is unlikely), you must re-run this script:
+
+python gen-double-encode-table.py > $(topsrcdir)/path/to/DoubleEntryTable.tbl
+"""
+
+import operator
+
+def rep(bit, count):
+ return reduce(operator.ior, [bit << c for c in range(count)])
+
+def encodeDouble(value):
+ """Generate an ARM ARM 'VFP modified immediate constant' with format:
+ aBbbbbbb bbcdefgh 000...
+
+ We will return the top 32 bits of the double; the rest are 0."""
+ assert (0 <= value) and (value <= 255)
+ a = value >> 7
+ b = (value >> 6) & 1
+ B = int(b == 0)
+ cdefgh = value & 0x3f
+ return (a << 31) | (B << 30) | (rep(b, 8) << 22) | cdefgh << 16
+
+print '/* THIS FILE IS AUTOMATICALLY GENERATED BY gen-double-encode-table.py. */'
+for i in range(256):
+ print ' { 0x%08x, { %d, %d, 0 } },' % (encodeDouble(i), i & 0xf, i >> 4)
diff --git a/js/src/jit/arm/llvm-compiler-rt/arm/aeabi_idivmod.S b/js/src/jit/arm/llvm-compiler-rt/arm/aeabi_idivmod.S
new file mode 100644
index 000000000..0237f2221
--- /dev/null
+++ b/js/src/jit/arm/llvm-compiler-rt/arm/aeabi_idivmod.S
@@ -0,0 +1,27 @@
+//===-- aeabi_idivmod.S - EABI idivmod implementation ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// struct { int quot, int rem} __aeabi_idivmod(int numerator, int denominator) {
+// int rem, quot;
+// quot = __divmodsi4(numerator, denominator, &rem);
+// return {quot, rem};
+// }
+
+ .syntax unified
+ .align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_idivmod)
+ push { lr }
+ sub sp, sp, #4
+ mov r2, sp
+ bl SYMBOL_NAME(__divmodsi4)
+ ldr r1, [sp]
+ add sp, sp, #4
+ pop { pc }
diff --git a/js/src/jit/arm/llvm-compiler-rt/arm/aeabi_uidivmod.S b/js/src/jit/arm/llvm-compiler-rt/arm/aeabi_uidivmod.S
new file mode 100644
index 000000000..f7e1d2ebe
--- /dev/null
+++ b/js/src/jit/arm/llvm-compiler-rt/arm/aeabi_uidivmod.S
@@ -0,0 +1,28 @@
+//===-- aeabi_uidivmod.S - EABI uidivmod implementation -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// struct { unsigned quot, unsigned rem}
+// __aeabi_uidivmod(unsigned numerator, unsigned denominator) {
+// unsigned rem, quot;
+// quot = __udivmodsi4(numerator, denominator, &rem);
+// return {quot, rem};
+// }
+
+ .syntax unified
+ .align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_uidivmod)
+ push { lr }
+ sub sp, sp, #4
+ mov r2, sp
+ bl SYMBOL_NAME(__udivmodsi4)
+ ldr r1, [sp]
+ add sp, sp, #4
+ pop { pc }
diff --git a/js/src/jit/arm/llvm-compiler-rt/assembly.h b/js/src/jit/arm/llvm-compiler-rt/assembly.h
new file mode 100644
index 000000000..83bed1233
--- /dev/null
+++ b/js/src/jit/arm/llvm-compiler-rt/assembly.h
@@ -0,0 +1,70 @@
+/* ===-- assembly.h - compiler-rt assembler support macros -----------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file defines macros for use in compiler-rt assembler source.
+ * This file is not part of the interface of this library.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#ifndef COMPILERRT_ASSEMBLY_H
+#define COMPILERRT_ASSEMBLY_H
+
+#if defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__)
+#define SEPARATOR @
+#else
+#define SEPARATOR ;
+#endif
+
+#if defined(__APPLE__)
+#define HIDDEN_DIRECTIVE .private_extern
+#define LOCAL_LABEL(name) L_##name
+#else
+#define HIDDEN_DIRECTIVE .hidden
+#define LOCAL_LABEL(name) .L_##name
+#endif
+
+#define GLUE2(a, b) a ## b
+#define GLUE(a, b) GLUE2(a, b)
+#define SYMBOL_NAME(name) GLUE(__USER_LABEL_PREFIX__, name)
+
+#ifdef VISIBILITY_HIDDEN
+#define DECLARE_SYMBOL_VISIBILITY(name) \
+ HIDDEN_DIRECTIVE SYMBOL_NAME(name) SEPARATOR
+#else
+#define DECLARE_SYMBOL_VISIBILITY(name)
+#endif
+
+#define DEFINE_COMPILERRT_FUNCTION(name) \
+ .globl SYMBOL_NAME(name) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY(name) \
+ SYMBOL_NAME(name):
+
+#define DEFINE_COMPILERRT_PRIVATE_FUNCTION(name) \
+ .globl SYMBOL_NAME(name) SEPARATOR \
+ HIDDEN_DIRECTIVE SYMBOL_NAME(name) SEPARATOR \
+ SYMBOL_NAME(name):
+
+#define DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(name) \
+ .globl name SEPARATOR \
+ HIDDEN_DIRECTIVE name SEPARATOR \
+ name:
+
+#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
+ .globl SYMBOL_NAME(name) SEPARATOR \
+ .set SYMBOL_NAME(name), SYMBOL_NAME(target) SEPARATOR
+
+#if defined (__ARM_EABI__)
+# define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name) \
+ DEFINE_COMPILERRT_FUNCTION_ALIAS(aeabi_name, name)
+#else
+# define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name)
+#endif
+
+#endif /* COMPILERRT_ASSEMBLY_H */
diff --git a/js/src/jit/arm64/Architecture-arm64.cpp b/js/src/jit/arm64/Architecture-arm64.cpp
new file mode 100644
index 000000000..a5e62fb61
--- /dev/null
+++ b/js/src/jit/arm64/Architecture-arm64.cpp
@@ -0,0 +1,75 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm64/Architecture-arm64.h"
+
+#include <cstring>
+
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+Registers::Code
+Registers::FromName(const char* name)
+{
+ // Check for some register aliases first.
+ if (strcmp(name, "ip0") == 0)
+ return ip0;
+ if (strcmp(name, "ip1") == 0)
+ return ip1;
+ if (strcmp(name, "fp") == 0)
+ return fp;
+
+ for (uint32_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(Code(i)), name) == 0)
+ return Code(i);
+ }
+
+ return invalid_reg;
+}
+
+FloatRegisters::Code
+FloatRegisters::FromName(const char* name)
+{
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(Code(i)), name) == 0)
+ return Code(i);
+ }
+
+ return invalid_fpreg;
+}
+
+FloatRegisterSet
+FloatRegister::ReduceSetForPush(const FloatRegisterSet& s)
+{
+ LiveFloatRegisterSet ret;
+ for (FloatRegisterIterator iter(s); iter.more(); ++iter)
+ ret.addUnchecked(FromCode((*iter).encoding()));
+ return ret.set();
+}
+
+uint32_t
+FloatRegister::GetSizeInBytes(const FloatRegisterSet& s)
+{
+ return s.size() * sizeof(double);
+}
+
+uint32_t
+FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s)
+{
+ return s.size() * sizeof(double);
+}
+
+uint32_t
+FloatRegister::getRegisterDumpOffsetInBytes()
+{
+ // Although registers are 128-bits wide, only the first 64 need saving per ABI.
+ return encoding() * sizeof(double);
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/arm64/Architecture-arm64.h b/js/src/jit/arm64/Architecture-arm64.h
new file mode 100644
index 000000000..e74340f13
--- /dev/null
+++ b/js/src/jit/arm64/Architecture-arm64.h
@@ -0,0 +1,462 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_Architecture_arm64_h
+#define jit_arm64_Architecture_arm64_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "js/Utility.h"
+
+namespace js {
+namespace jit {
+
+// AArch64 has 32 64-bit integer registers, x0 though x31.
+// x31 is special and functions as both the stack pointer and a zero register.
+// The bottom 32 bits of each of the X registers is accessible as w0 through w31.
+// The program counter is no longer accessible as a register.
+// SIMD and scalar floating-point registers share a register bank.
+// 32 bit float registers are s0 through s31.
+// 64 bit double registers are d0 through d31.
+// 128 bit SIMD registers are v0 through v31.
+// e.g., s0 is the bottom 32 bits of d0, which is the bottom 64 bits of v0.
+
+// AArch64 Calling Convention:
+// x0 - x7: arguments and return value
+// x8: indirect result (struct) location
+// x9 - x15: temporary registers
+// x16 - x17: intra-call-use registers (PLT, linker)
+// x18: platform specific use (TLS)
+// x19 - x28: callee-saved registers
+// x29: frame pointer
+// x30: link register
+
+// AArch64 Calling Convention for Floats:
+// d0 - d7: arguments and return value
+// d8 - d15: callee-saved registers
+// Bits 64:128 are not saved for v8-v15.
+// d16 - d31: temporary registers
+
+// AArch64 does not have soft float.
+
+class Registers {
+ public:
+ enum RegisterID {
+ w0 = 0, x0 = 0,
+ w1 = 1, x1 = 1,
+ w2 = 2, x2 = 2,
+ w3 = 3, x3 = 3,
+ w4 = 4, x4 = 4,
+ w5 = 5, x5 = 5,
+ w6 = 6, x6 = 6,
+ w7 = 7, x7 = 7,
+ w8 = 8, x8 = 8,
+ w9 = 9, x9 = 9,
+ w10 = 10, x10 = 10,
+ w11 = 11, x11 = 11,
+ w12 = 12, x12 = 12,
+ w13 = 13, x13 = 13,
+ w14 = 14, x14 = 14,
+ w15 = 15, x15 = 15,
+ w16 = 16, x16 = 16, ip0 = 16, // MacroAssembler scratch register 1.
+ w17 = 17, x17 = 17, ip1 = 17, // MacroAssembler scratch register 2.
+ w18 = 18, x18 = 18, tls = 18, // Platform-specific use (TLS).
+ w19 = 19, x19 = 19,
+ w20 = 20, x20 = 20,
+ w21 = 21, x21 = 21,
+ w22 = 22, x22 = 22,
+ w23 = 23, x23 = 23,
+ w24 = 24, x24 = 24,
+ w25 = 25, x25 = 25,
+ w26 = 26, x26 = 26,
+ w27 = 27, x27 = 27,
+ w28 = 28, x28 = 28,
+ w29 = 29, x29 = 29, fp = 29,
+ w30 = 30, x30 = 30, lr = 30,
+ w31 = 31, x31 = 31, wzr = 31, xzr = 31, sp = 31, // Special: both stack pointer and a zero register.
+ invalid_reg
+ };
+ typedef uint8_t Code;
+ typedef uint32_t Encoding;
+ typedef uint32_t SetType;
+
+ union RegisterContent {
+ uintptr_t r;
+ };
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ return mozilla::CountTrailingZeroes32(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 31 - mozilla::CountLeadingZeroes32(x);
+ }
+
+ static const char* GetName(Code code) {
+ static const char* const Names[] =
+ { "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9",
+ "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19",
+ "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x29",
+ "lr", "sp", "invalid" };
+ return Names[code];
+ }
+ static const char* GetName(uint32_t i) {
+ MOZ_ASSERT(i < Total);
+ return GetName(Code(i));
+ }
+
+ static Code FromName(const char* name);
+
+ // If SP is used as the base register for a memory load or store, then the value
+ // of the stack pointer prior to adding any offset must be quadword (16 byte) aligned,
+ // or else a stack aligment exception will be generated.
+ static const Code StackPointer = sp;
+
+ static const Code Invalid = invalid_reg;
+
+ static const uint32_t Total = 32;
+ static const uint32_t TotalPhys = 32;
+ static const uint32_t Allocatable = 27; // No named special-function registers.
+
+ static const SetType AllMask = 0xFFFFFFFF;
+
+ static const SetType ArgRegMask =
+ (1 << Registers::x0) | (1 << Registers::x1) |
+ (1 << Registers::x2) | (1 << Registers::x3) |
+ (1 << Registers::x4) | (1 << Registers::x5) |
+ (1 << Registers::x6) | (1 << Registers::x7) |
+ (1 << Registers::x8);
+
+ static const SetType VolatileMask =
+ (1 << Registers::x0) | (1 << Registers::x1) |
+ (1 << Registers::x2) | (1 << Registers::x3) |
+ (1 << Registers::x4) | (1 << Registers::x5) |
+ (1 << Registers::x6) | (1 << Registers::x7) |
+ (1 << Registers::x8) | (1 << Registers::x9) |
+ (1 << Registers::x10) | (1 << Registers::x11) |
+ (1 << Registers::x11) | (1 << Registers::x12) |
+ (1 << Registers::x13) | (1 << Registers::x14) |
+ (1 << Registers::x14) | (1 << Registers::x15) |
+ (1 << Registers::x16) | (1 << Registers::x17) |
+ (1 << Registers::x18);
+
+ static const SetType NonVolatileMask =
+ (1 << Registers::x19) | (1 << Registers::x20) |
+ (1 << Registers::x21) | (1 << Registers::x22) |
+ (1 << Registers::x23) | (1 << Registers::x24) |
+ (1 << Registers::x25) | (1 << Registers::x26) |
+ (1 << Registers::x27) | (1 << Registers::x28) |
+ (1 << Registers::x29) | (1 << Registers::x30);
+
+ static const SetType SingleByteRegs = VolatileMask | NonVolatileMask;
+
+ static const SetType NonAllocatableMask =
+ (1 << Registers::x28) | // PseudoStackPointer.
+ (1 << Registers::ip0) | // First scratch register.
+ (1 << Registers::ip1) | // Second scratch register.
+ (1 << Registers::tls) |
+ (1 << Registers::lr) |
+ (1 << Registers::sp);
+
+ // Registers that can be allocated without being saved, generally.
+ static const SetType TempMask = VolatileMask & ~NonAllocatableMask;
+
+ static const SetType WrapperMask = VolatileMask;
+
+ // Registers returned from a JS -> JS call.
+ static const SetType JSCallMask = (1 << Registers::x2);
+
+ // Registers returned from a JS -> C call.
+ static const SetType CallMask = (1 << Registers::x0);
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+// Smallest integer type that can hold a register bitmask.
+typedef uint32_t PackedRegisterMask;
+
+template <typename T>
+class TypedRegisterSet;
+
+class FloatRegisters
+{
+ public:
+ enum FPRegisterID {
+ s0 = 0, d0 = 0, v0 = 0,
+ s1 = 1, d1 = 1, v1 = 1,
+ s2 = 2, d2 = 2, v2 = 2,
+ s3 = 3, d3 = 3, v3 = 3,
+ s4 = 4, d4 = 4, v4 = 4,
+ s5 = 5, d5 = 5, v5 = 5,
+ s6 = 6, d6 = 6, v6 = 6,
+ s7 = 7, d7 = 7, v7 = 7,
+ s8 = 8, d8 = 8, v8 = 8,
+ s9 = 9, d9 = 9, v9 = 9,
+ s10 = 10, d10 = 10, v10 = 10,
+ s11 = 11, d11 = 11, v11 = 11,
+ s12 = 12, d12 = 12, v12 = 12,
+ s13 = 13, d13 = 13, v13 = 13,
+ s14 = 14, d14 = 14, v14 = 14,
+ s15 = 15, d15 = 15, v15 = 15,
+ s16 = 16, d16 = 16, v16 = 16,
+ s17 = 17, d17 = 17, v17 = 17,
+ s18 = 18, d18 = 18, v18 = 18,
+ s19 = 19, d19 = 19, v19 = 19,
+ s20 = 20, d20 = 20, v20 = 20,
+ s21 = 21, d21 = 21, v21 = 21,
+ s22 = 22, d22 = 22, v22 = 22,
+ s23 = 23, d23 = 23, v23 = 23,
+ s24 = 24, d24 = 24, v24 = 24,
+ s25 = 25, d25 = 25, v25 = 25,
+ s26 = 26, d26 = 26, v26 = 26,
+ s27 = 27, d27 = 27, v27 = 27,
+ s28 = 28, d28 = 28, v28 = 28,
+ s29 = 29, d29 = 29, v29 = 29,
+ s30 = 30, d30 = 30, v30 = 30,
+ s31 = 31, d31 = 31, v31 = 31, // Scratch register.
+ invalid_fpreg
+ };
+ typedef uint8_t Code;
+ typedef FPRegisterID Encoding;
+ typedef uint64_t SetType;
+
+ static const char* GetName(Code code) {
+ static const char* const Names[] =
+ { "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9",
+ "d10", "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19",
+ "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29",
+ "d30", "d31", "invalid" };
+ return Names[code];
+ }
+
+ static const char* GetName(uint32_t i) {
+ MOZ_ASSERT(i < TotalPhys);
+ return GetName(Code(i));
+ }
+
+ static Code FromName(const char* name);
+
+ static const Code Invalid = invalid_fpreg;
+
+ static const uint32_t Total = 64;
+ static const uint32_t TotalPhys = 32;
+ static const SetType AllMask = 0xFFFFFFFFFFFFFFFFULL;
+ static const SetType AllPhysMask = 0xFFFFFFFFULL;
+ static const SetType SpreadCoefficient = 0x100000001ULL;
+
+ static const uint32_t Allocatable = 31; // Without d31, the scratch register.
+
+ // d31 is the ScratchFloatReg.
+ static const SetType NonVolatileMask =
+ SetType((1 << FloatRegisters::d8) | (1 << FloatRegisters::d9) |
+ (1 << FloatRegisters::d10) | (1 << FloatRegisters::d11) |
+ (1 << FloatRegisters::d12) | (1 << FloatRegisters::d13) |
+ (1 << FloatRegisters::d14) | (1 << FloatRegisters::d15) |
+ (1 << FloatRegisters::d16) | (1 << FloatRegisters::d17) |
+ (1 << FloatRegisters::d18) | (1 << FloatRegisters::d19) |
+ (1 << FloatRegisters::d20) | (1 << FloatRegisters::d21) |
+ (1 << FloatRegisters::d22) | (1 << FloatRegisters::d23) |
+ (1 << FloatRegisters::d24) | (1 << FloatRegisters::d25) |
+ (1 << FloatRegisters::d26) | (1 << FloatRegisters::d27) |
+ (1 << FloatRegisters::d28) | (1 << FloatRegisters::d29) |
+ (1 << FloatRegisters::d30)) * SpreadCoefficient;
+
+ static const SetType VolatileMask = AllMask & ~NonVolatileMask;
+ static const SetType AllDoubleMask = AllMask;
+ static const SetType AllSingleMask = AllMask;
+
+ static const SetType WrapperMask = VolatileMask;
+
+ // d31 is the ScratchFloatReg.
+ static const SetType NonAllocatableMask = (SetType(1) << FloatRegisters::d31) * SpreadCoefficient;
+
+ // Registers that can be allocated without being saved, generally.
+ static const SetType TempMask = VolatileMask & ~NonAllocatableMask;
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+ union RegisterContent {
+ float s;
+ double d;
+ };
+ enum Kind {
+ Double,
+ Single
+ };
+};
+
+// In bytes: slots needed for potential memory->memory move spills.
+// +8 for cycles
+// +8 for gpr spills
+// +8 for double spills
+static const uint32_t ION_FRAME_SLACK_SIZE = 24;
+
+static const uint32_t ShadowStackSpace = 0;
+
+// TODO:
+// This constant needs to be updated to account for whatever near/far branching
+// strategy is used by ARM64.
+static const uint32_t JumpImmediateRange = UINT32_MAX;
+
+static const uint32_t ABIStackAlignment = 16;
+static const uint32_t CodeAlignment = 16;
+static const bool StackKeptAligned = false;
+
+// Although sp is only usable if 16-byte alignment is kept,
+// the Pseudo-StackPointer enables use of 8-byte alignment.
+static const uint32_t StackAlignment = 8;
+static const uint32_t NativeFrameSize = 8;
+
+struct FloatRegister
+{
+ typedef FloatRegisters Codes;
+ typedef Codes::Code Code;
+ typedef Codes::Encoding Encoding;
+ typedef Codes::SetType SetType;
+
+ union RegisterContent {
+ float s;
+ double d;
+ };
+
+ constexpr FloatRegister(uint32_t code, FloatRegisters::Kind k)
+ : code_(FloatRegisters::Code(code & 31)),
+ k_(k)
+ { }
+
+ explicit constexpr FloatRegister(uint32_t code)
+ : code_(FloatRegisters::Code(code & 31)),
+ k_(FloatRegisters::Kind(code >> 5))
+ { }
+
+ constexpr FloatRegister()
+ : code_(FloatRegisters::Code(-1)),
+ k_(FloatRegisters::Double)
+ { }
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 8, "SetType must be 64 bits");
+ x |= x >> FloatRegisters::TotalPhys;
+ x &= FloatRegisters::AllPhysMask;
+ return mozilla::CountPopulation32(x);
+ }
+
+ static FloatRegister FromCode(uint32_t i) {
+ MOZ_ASSERT(i < FloatRegisters::Total);
+ FloatRegister r(i);
+ return r;
+ }
+ Code code() const {
+ MOZ_ASSERT((uint32_t)code_ < FloatRegisters::Total);
+ return Code(code_ | (k_ << 5));
+ }
+ Encoding encoding() const {
+ return Encoding(code_);
+ }
+
+ const char* name() const {
+ return FloatRegisters::GetName(code());
+ }
+ bool volatile_() const {
+ return !!((SetType(1) << code()) & FloatRegisters::VolatileMask);
+ }
+ bool operator!=(FloatRegister other) const {
+ return other.code_ != code_ || other.k_ != k_;
+ }
+ bool operator==(FloatRegister other) const {
+ return other.code_ == code_ && other.k_ == k_;
+ }
+ bool aliases(FloatRegister other) const {
+ return other.code_ == code_;
+ }
+ uint32_t numAliased() const {
+ return 2;
+ }
+ static FloatRegisters::Kind otherkind(FloatRegisters::Kind k) {
+ if (k == FloatRegisters::Double)
+ return FloatRegisters::Single;
+ return FloatRegisters::Double;
+ }
+ void aliased(uint32_t aliasIdx, FloatRegister* ret) {
+ if (aliasIdx == 0)
+ *ret = *this;
+ else
+ *ret = FloatRegister(code_, otherkind(k_));
+ }
+ // This function mostly exists for the ARM backend. It is to ensure that two
+ // floating point registers' types are equivalent. e.g. S0 is not equivalent
+ // to D16, since S0 holds a float32, and D16 holds a Double.
+ // Since all floating point registers on x86 and x64 are equivalent, it is
+ // reasonable for this function to do the same.
+ bool equiv(FloatRegister other) const {
+ return k_ == other.k_;
+ }
+ constexpr uint32_t size() const {
+ return k_ == FloatRegisters::Double ? sizeof(double) : sizeof(float);
+ }
+ uint32_t numAlignedAliased() {
+ return numAliased();
+ }
+ void alignedAliased(uint32_t aliasIdx, FloatRegister* ret) {
+ MOZ_ASSERT(aliasIdx == 0);
+ aliased(aliasIdx, ret);
+ }
+ SetType alignedOrDominatedAliasedSet() const {
+ return Codes::SpreadCoefficient << code_;
+ }
+
+ bool isSingle() const {
+ return k_ == FloatRegisters::Single;
+ }
+ bool isDouble() const {
+ return k_ == FloatRegisters::Double;
+ }
+ bool isSimd128() const {
+ return false;
+ }
+
+ static uint32_t FirstBit(SetType x) {
+ JS_STATIC_ASSERT(sizeof(SetType) == 8);
+ return mozilla::CountTrailingZeroes64(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ JS_STATIC_ASSERT(sizeof(SetType) == 8);
+ return 63 - mozilla::CountLeadingZeroes64(x);
+ }
+
+ static TypedRegisterSet<FloatRegister> ReduceSetForPush(const TypedRegisterSet<FloatRegister>& s);
+ static uint32_t GetSizeInBytes(const TypedRegisterSet<FloatRegister>& s);
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>& s);
+ uint32_t getRegisterDumpOffsetInBytes();
+
+ public:
+ Code code_ : 8;
+ FloatRegisters::Kind k_ : 1;
+};
+
+// ARM/D32 has double registers that cannot be treated as float32.
+// Luckily, ARMv8 doesn't have the same misfortune.
+inline bool
+hasUnaliasedDouble()
+{
+ return false;
+}
+
+// ARM prior to ARMv8 also has doubles that alias multiple floats.
+// Again, ARMv8 is in the clear.
+inline bool
+hasMultiAlias()
+{
+ return false;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_arm64_Architecture_arm64_h
diff --git a/js/src/jit/arm64/Assembler-arm64.cpp b/js/src/jit/arm64/Assembler-arm64.cpp
new file mode 100644
index 000000000..3d032cebd
--- /dev/null
+++ b/js/src/jit/arm64/Assembler-arm64.cpp
@@ -0,0 +1,670 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm64/Assembler-arm64.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jscompartment.h"
+#include "jsutil.h"
+
+#include "gc/Marking.h"
+
+#include "jit/arm64/Architecture-arm64.h"
+#include "jit/arm64/MacroAssembler-arm64.h"
+#include "jit/ExecutableAllocator.h"
+#include "jit/JitCompartment.h"
+
+#include "gc/StoreBuffer-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::CountLeadingZeroes32;
+using mozilla::DebugOnly;
+
+// Note this is used for inter-wasm calls and may pass arguments and results
+// in floating point registers even if the system ABI does not.
+
+ABIArg
+ABIArgGenerator::next(MIRType type)
+{
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Pointer:
+ if (intRegIndex_ == NumIntArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uintptr_t);
+ break;
+ }
+ current_ = ABIArg(Register::FromCode(intRegIndex_));
+ intRegIndex_++;
+ break;
+
+ case MIRType::Float32:
+ case MIRType::Double:
+ if (floatRegIndex_ == NumFloatArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(double);
+ break;
+ }
+ current_ = ABIArg(FloatRegister(floatRegIndex_,
+ type == MIRType::Double ? FloatRegisters::Double
+ : FloatRegisters::Single));
+ floatRegIndex_++;
+ break;
+
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+}
+
+namespace js {
+namespace jit {
+
+void
+Assembler::finish()
+{
+ armbuffer_.flushPool();
+
+ // The extended jump table is part of the code buffer.
+ ExtendedJumpTable_ = emitExtendedJumpTable();
+ Assembler::FinalizeCode();
+
+ // The jump relocation table starts with a fixed-width integer pointing
+ // to the start of the extended jump table.
+ // Space for this integer is allocated by Assembler::addJumpRelocation()
+ // before writing the first entry.
+ // Don't touch memory if we saw an OOM error.
+ if (jumpRelocations_.length() && !oom()) {
+ MOZ_ASSERT(jumpRelocations_.length() >= sizeof(uint32_t));
+ *(uint32_t*)jumpRelocations_.buffer() = ExtendedJumpTable_.getOffset();
+ }
+}
+
+BufferOffset
+Assembler::emitExtendedJumpTable()
+{
+ if (!pendingJumps_.length() || oom())
+ return BufferOffset();
+
+ armbuffer_.flushPool();
+ armbuffer_.align(SizeOfJumpTableEntry);
+
+ BufferOffset tableOffset = armbuffer_.nextOffset();
+
+ for (size_t i = 0; i < pendingJumps_.length(); i++) {
+ // Each JumpTableEntry is of the form:
+ // LDR ip0 [PC, 8]
+ // BR ip0
+ // [Patchable 8-byte constant low bits]
+ // [Patchable 8-byte constant high bits]
+ DebugOnly<size_t> preOffset = size_t(armbuffer_.nextOffset().getOffset());
+
+ ldr(vixl::ip0, ptrdiff_t(8 / vixl::kInstructionSize));
+ br(vixl::ip0);
+
+ DebugOnly<size_t> prePointer = size_t(armbuffer_.nextOffset().getOffset());
+ MOZ_ASSERT_IF(!oom(), prePointer - preOffset == OffsetOfJumpTableEntryPointer);
+
+ brk(0x0);
+ brk(0x0);
+
+ DebugOnly<size_t> postOffset = size_t(armbuffer_.nextOffset().getOffset());
+
+ MOZ_ASSERT_IF(!oom(), postOffset - preOffset == SizeOfJumpTableEntry);
+ }
+
+ if (oom())
+ return BufferOffset();
+
+ return tableOffset;
+}
+
+void
+Assembler::executableCopy(uint8_t* buffer)
+{
+ // Copy the code and all constant pools into the output buffer.
+ armbuffer_.executableCopy(buffer);
+
+ // Patch any relative jumps that target code outside the buffer.
+ // The extended jump table may be used for distant jumps.
+ for (size_t i = 0; i < pendingJumps_.length(); i++) {
+ RelativePatch& rp = pendingJumps_[i];
+
+ if (!rp.target) {
+ // The patch target is nullptr for jumps that have been linked to
+ // a label within the same code block, but may be repatched later
+ // to jump to a different code block.
+ continue;
+ }
+
+ Instruction* target = (Instruction*)rp.target;
+ Instruction* branch = (Instruction*)(buffer + rp.offset.getOffset());
+ JumpTableEntry* extendedJumpTable =
+ reinterpret_cast<JumpTableEntry*>(buffer + ExtendedJumpTable_.getOffset());
+ if (branch->BranchType() != vixl::UnknownBranchType) {
+ if (branch->IsTargetReachable(target)) {
+ branch->SetImmPCOffsetTarget(target);
+ } else {
+ JumpTableEntry* entry = &extendedJumpTable[i];
+ branch->SetImmPCOffsetTarget(entry->getLdr());
+ entry->data = target;
+ }
+ } else {
+ // Currently a two-instruction call, it should be possible to optimize this
+ // into a single instruction call + nop in some instances, but this will work.
+ }
+ }
+}
+
+BufferOffset
+Assembler::immPool(ARMRegister dest, uint8_t* value, vixl::LoadLiteralOp op, ARMBuffer::PoolEntry* pe)
+{
+ uint32_t inst = op | Rt(dest);
+ const size_t numInst = 1;
+ const unsigned sizeOfPoolEntryInBytes = 4;
+ const unsigned numPoolEntries = sizeof(value) / sizeOfPoolEntryInBytes;
+ return allocEntry(numInst, numPoolEntries, (uint8_t*)&inst, value, pe);
+}
+
+BufferOffset
+Assembler::immPool64(ARMRegister dest, uint64_t value, ARMBuffer::PoolEntry* pe)
+{
+ return immPool(dest, (uint8_t*)&value, vixl::LDR_x_lit, pe);
+}
+
+BufferOffset
+Assembler::immPool64Branch(RepatchLabel* label, ARMBuffer::PoolEntry* pe, Condition c)
+{
+ MOZ_CRASH("immPool64Branch");
+}
+
+BufferOffset
+Assembler::fImmPool(ARMFPRegister dest, uint8_t* value, vixl::LoadLiteralOp op)
+{
+ uint32_t inst = op | Rt(dest);
+ const size_t numInst = 1;
+ const unsigned sizeOfPoolEntryInBits = 32;
+ const unsigned numPoolEntries = dest.size() / sizeOfPoolEntryInBits;
+ return allocEntry(numInst, numPoolEntries, (uint8_t*)&inst, value);
+}
+
+BufferOffset
+Assembler::fImmPool64(ARMFPRegister dest, double value)
+{
+ return fImmPool(dest, (uint8_t*)&value, vixl::LDR_d_lit);
+}
+BufferOffset
+Assembler::fImmPool32(ARMFPRegister dest, float value)
+{
+ return fImmPool(dest, (uint8_t*)&value, vixl::LDR_s_lit);
+}
+
+void
+Assembler::bind(Label* label, BufferOffset targetOffset)
+{
+ // Nothing has seen the label yet: just mark the location.
+ // If we've run out of memory, don't attempt to modify the buffer which may
+ // not be there. Just mark the label as bound to the (possibly bogus)
+ // targetOffset.
+ if (!label->used() || oom()) {
+ label->bind(targetOffset.getOffset());
+ return;
+ }
+
+ // Get the most recent instruction that used the label, as stored in the label.
+ // This instruction is the head of an implicit linked list of label uses.
+ BufferOffset branchOffset(label);
+
+ while (branchOffset.assigned()) {
+ // Before overwriting the offset in this instruction, get the offset of
+ // the next link in the implicit branch list.
+ BufferOffset nextOffset = NextLink(branchOffset);
+
+ // Linking against the actual (Instruction*) would be invalid,
+ // since that Instruction could be anywhere in memory.
+ // Instead, just link against the correct relative offset, assuming
+ // no constant pools, which will be taken into consideration
+ // during finalization.
+ ptrdiff_t relativeByteOffset = targetOffset.getOffset() - branchOffset.getOffset();
+ Instruction* link = getInstructionAt(branchOffset);
+
+ // This branch may still be registered for callbacks. Stop tracking it.
+ vixl::ImmBranchType branchType = link->BranchType();
+ vixl::ImmBranchRangeType branchRange = Instruction::ImmBranchTypeToRange(branchType);
+ if (branchRange < vixl::NumShortBranchRangeTypes) {
+ BufferOffset deadline(branchOffset.getOffset() +
+ Instruction::ImmBranchMaxForwardOffset(branchRange));
+ armbuffer_.unregisterBranchDeadline(branchRange, deadline);
+ }
+
+ // Is link able to reach the label?
+ if (link->IsPCRelAddressing() || link->IsTargetReachable(link + relativeByteOffset)) {
+ // Write a new relative offset into the instruction.
+ link->SetImmPCOffsetTarget(link + relativeByteOffset);
+ } else {
+ // This is a short-range branch, and it can't reach the label directly.
+ // Verify that it branches to a veneer: an unconditional branch.
+ MOZ_ASSERT(getInstructionAt(nextOffset)->BranchType() == vixl::UncondBranchType);
+ }
+
+ branchOffset = nextOffset;
+ }
+
+ // Bind the label, so that future uses may encode the offset immediately.
+ label->bind(targetOffset.getOffset());
+}
+
+void
+Assembler::bind(RepatchLabel* label)
+{
+ // Nothing has seen the label yet: just mark the location.
+ // If we've run out of memory, don't attempt to modify the buffer which may
+ // not be there. Just mark the label as bound to nextOffset().
+ if (!label->used() || oom()) {
+ label->bind(nextOffset().getOffset());
+ return;
+ }
+ int branchOffset = label->offset();
+ Instruction* inst = getInstructionAt(BufferOffset(branchOffset));
+ inst->SetImmPCOffsetTarget(inst + nextOffset().getOffset() - branchOffset);
+}
+
+void
+Assembler::trace(JSTracer* trc)
+{
+ for (size_t i = 0; i < pendingJumps_.length(); i++) {
+ RelativePatch& rp = pendingJumps_[i];
+ if (rp.kind == Relocation::JITCODE) {
+ JitCode* code = JitCode::FromExecutable((uint8_t*)rp.target);
+ TraceManuallyBarrieredEdge(trc, &code, "masmrel32");
+ MOZ_ASSERT(code == JitCode::FromExecutable((uint8_t*)rp.target));
+ }
+ }
+
+ // TODO: Trace.
+#if 0
+ if (tmpDataRelocations_.length())
+ ::TraceDataRelocations(trc, &armbuffer_, &tmpDataRelocations_);
+#endif
+}
+
+void
+Assembler::addJumpRelocation(BufferOffset src, Relocation::Kind reloc)
+{
+ // Only JITCODE relocations are patchable at runtime.
+ MOZ_ASSERT(reloc == Relocation::JITCODE);
+
+ // The jump relocation table starts with a fixed-width integer pointing
+ // to the start of the extended jump table. But, we don't know the
+ // actual extended jump table offset yet, so write a 0 which we'll
+ // patch later in Assembler::finish().
+ if (!jumpRelocations_.length())
+ jumpRelocations_.writeFixedUint32_t(0);
+
+ // Each entry in the table is an (offset, extendedTableIndex) pair.
+ jumpRelocations_.writeUnsigned(src.getOffset());
+ jumpRelocations_.writeUnsigned(pendingJumps_.length());
+}
+
+void
+Assembler::addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind reloc)
+{
+ MOZ_ASSERT(target.value != nullptr);
+
+ if (reloc == Relocation::JITCODE)
+ addJumpRelocation(src, reloc);
+
+ // This jump is not patchable at runtime. Extended jump table entry requirements
+ // cannot be known until finalization, so to be safe, give each jump and entry.
+ // This also causes GC tracing of the target.
+ enoughMemory_ &= pendingJumps_.append(RelativePatch(src, target.value, reloc));
+}
+
+size_t
+Assembler::addPatchableJump(BufferOffset src, Relocation::Kind reloc)
+{
+ MOZ_CRASH("TODO: This is currently unused (and untested)");
+ if (reloc == Relocation::JITCODE)
+ addJumpRelocation(src, reloc);
+
+ size_t extendedTableIndex = pendingJumps_.length();
+ enoughMemory_ &= pendingJumps_.append(RelativePatch(src, nullptr, reloc));
+ return extendedTableIndex;
+}
+
+void
+PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
+{
+ MOZ_CRASH("PatchJump");
+}
+
+void
+Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
+ PatchedImmPtr expected)
+{
+ Instruction* i = (Instruction*)label.raw();
+ void** pValue = i->LiteralAddress<void**>();
+ MOZ_ASSERT(*pValue == expected.value);
+ *pValue = newValue.value;
+}
+
+void
+Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expected)
+{
+ PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value), PatchedImmPtr(expected.value));
+}
+
+void
+Assembler::ToggleToJmp(CodeLocationLabel inst_)
+{
+ Instruction* i = (Instruction*)inst_.raw();
+ MOZ_ASSERT(i->IsAddSubImmediate());
+
+ // Refer to instruction layout in ToggleToCmp().
+ int imm19 = (int)i->Bits(23, 5);
+ MOZ_ASSERT(vixl::is_int19(imm19));
+
+ b(i, imm19, Always);
+}
+
+void
+Assembler::ToggleToCmp(CodeLocationLabel inst_)
+{
+ Instruction* i = (Instruction*)inst_.raw();
+ MOZ_ASSERT(i->IsCondB());
+
+ int imm19 = i->ImmCondBranch();
+ // bit 23 is reserved, and the simulator throws an assertion when this happens
+ // It'll be messy to decode, but we can steal bit 30 or bit 31.
+ MOZ_ASSERT(vixl::is_int18(imm19));
+
+ // 31 - 64-bit if set, 32-bit if unset. (OK!)
+ // 30 - sub if set, add if unset. (OK!)
+ // 29 - SetFlagsBit. Must be set.
+ // 22:23 - ShiftAddSub. (OK!)
+ // 10:21 - ImmAddSub. (OK!)
+ // 5:9 - First source register (Rn). (OK!)
+ // 0:4 - Destination Register. Must be xzr.
+
+ // From the above, there is a safe 19-bit contiguous region from 5:23.
+ Emit(i, vixl::ThirtyTwoBits | vixl::AddSubImmediateFixed | vixl::SUB | Flags(vixl::SetFlags) |
+ Rd(vixl::xzr) | (imm19 << vixl::Rn_offset));
+}
+
+void
+Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
+{
+ const Instruction* first = reinterpret_cast<Instruction*>(inst_.raw());
+ Instruction* load;
+ Instruction* call;
+
+ // There might be a constant pool at the very first instruction.
+ first = first->skipPool();
+
+ // Skip the stack pointer restore instruction.
+ if (first->IsStackPtrSync())
+ first = first->InstructionAtOffset(vixl::kInstructionSize)->skipPool();
+
+ load = const_cast<Instruction*>(first);
+
+ // The call instruction follows the load, but there may be an injected
+ // constant pool.
+ call = const_cast<Instruction*>(load->InstructionAtOffset(vixl::kInstructionSize)->skipPool());
+
+ if (call->IsBLR() == enabled)
+ return;
+
+ if (call->IsBLR()) {
+ // If the second instruction is blr(), then wehave:
+ // ldr x17, [pc, offset]
+ // blr x17
+ MOZ_ASSERT(load->IsLDR());
+ // We want to transform this to:
+ // adr xzr, [pc, offset]
+ // nop
+ int32_t offset = load->ImmLLiteral();
+ adr(load, xzr, int32_t(offset));
+ nop(call);
+ } else {
+ // We have:
+ // adr xzr, [pc, offset] (or ldr x17, [pc, offset])
+ // nop
+ MOZ_ASSERT(load->IsADR() || load->IsLDR());
+ MOZ_ASSERT(call->IsNOP());
+ // Transform this to:
+ // ldr x17, [pc, offset]
+ // blr x17
+ int32_t offset = (int)load->ImmPCRawOffset();
+ MOZ_ASSERT(vixl::is_int19(offset));
+ ldr(load, ScratchReg2_64, int32_t(offset));
+ blr(call, ScratchReg2_64);
+ }
+}
+
+class RelocationIterator
+{
+ CompactBufferReader reader_;
+ uint32_t tableStart_;
+ uint32_t offset_;
+ uint32_t extOffset_;
+
+ public:
+ explicit RelocationIterator(CompactBufferReader& reader)
+ : reader_(reader)
+ {
+ // The first uint32_t stores the extended table offset.
+ tableStart_ = reader_.readFixedUint32_t();
+ }
+
+ bool read() {
+ if (!reader_.more())
+ return false;
+ offset_ = reader_.readUnsigned();
+ extOffset_ = reader_.readUnsigned();
+ return true;
+ }
+
+ uint32_t offset() const {
+ return offset_;
+ }
+ uint32_t extendedOffset() const {
+ return extOffset_;
+ }
+};
+
+static JitCode*
+CodeFromJump(JitCode* code, uint8_t* jump)
+{
+ const Instruction* inst = (const Instruction*)jump;
+ uint8_t* target;
+
+ // We're expecting a call created by MacroAssembler::call(JitCode*).
+ // It looks like:
+ //
+ // ldr scratch, [pc, offset]
+ // blr scratch
+ //
+ // If the call has been toggled by ToggleCall(), it looks like:
+ //
+ // adr xzr, [pc, offset]
+ // nop
+ //
+ // There might be a constant pool at the very first instruction.
+ // See also ToggleCall().
+ inst = inst->skipPool();
+
+ // Skip the stack pointer restore instruction.
+ if (inst->IsStackPtrSync())
+ inst = inst->InstructionAtOffset(vixl::kInstructionSize)->skipPool();
+
+ if (inst->BranchType() != vixl::UnknownBranchType) {
+ // This is an immediate branch.
+ target = (uint8_t*)inst->ImmPCOffsetTarget();
+ } else if (inst->IsLDR()) {
+ // This is an ldr+blr call that is enabled. See ToggleCall().
+ mozilla::DebugOnly<const Instruction*> nextInst =
+ inst->InstructionAtOffset(vixl::kInstructionSize)->skipPool();
+ MOZ_ASSERT(nextInst->IsNOP() || nextInst->IsBLR());
+ target = (uint8_t*)inst->Literal64();
+ } else if (inst->IsADR()) {
+ // This is a disabled call: adr+nop. See ToggleCall().
+ mozilla::DebugOnly<const Instruction*> nextInst =
+ inst->InstructionAtOffset(vixl::kInstructionSize)->skipPool();
+ MOZ_ASSERT(nextInst->IsNOP());
+ ptrdiff_t offset = inst->ImmPCRawOffset() << vixl::kLiteralEntrySizeLog2;
+ // This is what Literal64 would do with the corresponding ldr.
+ memcpy(&target, inst + offset, sizeof(target));
+ } else {
+ MOZ_CRASH("Unrecognized jump instruction.");
+ }
+
+ // If the jump is within the code buffer, it uses the extended jump table.
+ if (target >= code->raw() && target < code->raw() + code->instructionsSize()) {
+ MOZ_ASSERT(target + Assembler::SizeOfJumpTableEntry <= code->raw() + code->instructionsSize());
+
+ uint8_t** patchablePtr = (uint8_t**)(target + Assembler::OffsetOfJumpTableEntryPointer);
+ target = *patchablePtr;
+ }
+
+ return JitCode::FromExecutable(target);
+}
+
+void
+Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
+{
+ RelocationIterator iter(reader);
+ while (iter.read()) {
+ JitCode* child = CodeFromJump(code, code->raw() + iter.offset());
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ MOZ_ASSERT(child == CodeFromJump(code, code->raw() + iter.offset()));
+ }
+}
+
+static void
+TraceDataRelocations(JSTracer* trc, uint8_t* buffer, CompactBufferReader& reader)
+{
+ while (reader.more()) {
+ size_t offset = reader.readUnsigned();
+ Instruction* load = (Instruction*)&buffer[offset];
+
+ // The only valid traceable operation is a 64-bit load to an ARMRegister.
+ // Refer to movePatchablePtr() for generation.
+ MOZ_ASSERT(load->Mask(vixl::LoadLiteralMask) == vixl::LDR_x_lit);
+
+ uintptr_t* literalAddr = load->LiteralAddress<uintptr_t*>();
+ uintptr_t literal = *literalAddr;
+
+ // All pointers on AArch64 will have the top bits cleared.
+ // If those bits are not cleared, this must be a Value.
+ if (literal >> JSVAL_TAG_SHIFT) {
+ Value v = Value::fromRawBits(literal);
+ TraceManuallyBarrieredEdge(trc, &v, "ion-masm-value");
+ if (*literalAddr != v.asRawBits()) {
+ // Only update the code if the value changed, because the code
+ // is not writable if we're not moving objects.
+ *literalAddr = v.asRawBits();
+ }
+
+ // TODO: When we can, flush caches here if a pointer was moved.
+ continue;
+ }
+
+ // No barriers needed since the pointers are constants.
+ TraceManuallyBarrieredGenericPointerEdge(trc, reinterpret_cast<gc::Cell**>(literalAddr),
+ "ion-masm-ptr");
+
+ // TODO: Flush caches at end?
+ }
+}
+
+void
+Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
+{
+ ::TraceDataRelocations(trc, code->raw(), reader);
+}
+
+void
+Assembler::FixupNurseryObjects(JSContext* cx, JitCode* code, CompactBufferReader& reader,
+ const ObjectVector& nurseryObjects)
+{
+
+ MOZ_ASSERT(!nurseryObjects.empty());
+
+ uint8_t* buffer = code->raw();
+ bool hasNurseryPointers = false;
+
+ while (reader.more()) {
+ size_t offset = reader.readUnsigned();
+ Instruction* ins = (Instruction*)&buffer[offset];
+
+ uintptr_t* literalAddr = ins->LiteralAddress<uintptr_t*>();
+ uintptr_t literal = *literalAddr;
+
+ if (literal >> JSVAL_TAG_SHIFT)
+ continue; // This is a Value.
+
+ if (!(literal & 0x1))
+ continue;
+
+ uint32_t index = literal >> 1;
+ JSObject* obj = nurseryObjects[index];
+ *literalAddr = uintptr_t(obj);
+
+ // Either all objects are still in the nursery, or all objects are tenured.
+ MOZ_ASSERT_IF(hasNurseryPointers, IsInsideNursery(obj));
+
+ if (!hasNurseryPointers && IsInsideNursery(obj))
+ hasNurseryPointers = true;
+ }
+
+ if (hasNurseryPointers)
+ cx->runtime()->gc.storeBuffer.putWholeCell(code);
+}
+
+void
+Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm)
+{
+ MOZ_CRASH("PatchInstructionImmediate()");
+}
+
+void
+Assembler::retarget(Label* label, Label* target)
+{
+ if (label->used()) {
+ if (target->bound()) {
+ bind(label, BufferOffset(target));
+ } else if (target->used()) {
+ // The target is not bound but used. Prepend label's branch list
+ // onto target's.
+ BufferOffset labelBranchOffset(label);
+
+ // Find the head of the use chain for label.
+ BufferOffset next = NextLink(labelBranchOffset);
+ while (next.assigned()) {
+ labelBranchOffset = next;
+ next = NextLink(next);
+ }
+
+ // Then patch the head of label's use chain to the tail of target's
+ // use chain, prepending the entire use chain of target.
+ SetNextLink(labelBranchOffset, BufferOffset(target));
+ target->use(label->offset());
+ } else {
+ // The target is unbound and unused. We can just take the head of
+ // the list hanging off of label, and dump that into target.
+ DebugOnly<uint32_t> prev = target->use(label->offset());
+ MOZ_ASSERT((int32_t)prev == Label::INVALID_OFFSET);
+ }
+ }
+ label->reset();
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/arm64/Assembler-arm64.h b/js/src/jit/arm64/Assembler-arm64.h
new file mode 100644
index 000000000..287ab23b3
--- /dev/null
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -0,0 +1,557 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef A64_ASSEMBLER_A64_H_
+#define A64_ASSEMBLER_A64_H_
+
+#include "jit/arm64/vixl/Assembler-vixl.h"
+
+#include "jit/JitCompartment.h"
+
+namespace js {
+namespace jit {
+
+// VIXL imports.
+typedef vixl::Register ARMRegister;
+typedef vixl::FPRegister ARMFPRegister;
+using vixl::ARMBuffer;
+using vixl::Instruction;
+
+static const uint32_t AlignmentAtPrologue = 0;
+static const uint32_t AlignmentMidPrologue = 8;
+static const Scale ScalePointer = TimesEight;
+
+// The MacroAssembler uses scratch registers extensively and unexpectedly.
+// For safety, scratch registers should always be acquired using
+// vixl::UseScratchRegisterScope.
+static constexpr Register ScratchReg = { Registers::ip0 };
+static constexpr ARMRegister ScratchReg64 = { ScratchReg, 64 };
+
+static constexpr Register ScratchReg2 = { Registers::ip1 };
+static constexpr ARMRegister ScratchReg2_64 = { ScratchReg2, 64 };
+
+static constexpr FloatRegister ScratchDoubleReg = { FloatRegisters::d31, FloatRegisters::Double };
+static constexpr FloatRegister ReturnDoubleReg = { FloatRegisters::d0, FloatRegisters::Double };
+
+static constexpr FloatRegister ReturnFloat32Reg = { FloatRegisters::s0, FloatRegisters::Single };
+static constexpr FloatRegister ScratchFloat32Reg = { FloatRegisters::s31, FloatRegisters::Single };
+
+static constexpr Register InvalidReg = { Registers::invalid_reg };
+static constexpr FloatRegister InvalidFloatReg = { FloatRegisters::invalid_fpreg, FloatRegisters::Single };
+
+static constexpr Register OsrFrameReg = { Registers::x3 };
+static constexpr Register ArgumentsRectifierReg = { Registers::x8 };
+static constexpr Register CallTempReg0 = { Registers::x9 };
+static constexpr Register CallTempReg1 = { Registers::x10 };
+static constexpr Register CallTempReg2 = { Registers::x11 };
+static constexpr Register CallTempReg3 = { Registers::x12 };
+static constexpr Register CallTempReg4 = { Registers::x13 };
+static constexpr Register CallTempReg5 = { Registers::x14 };
+
+static constexpr Register PreBarrierReg = { Registers::x1 };
+
+static constexpr Register ReturnReg = { Registers::x0 };
+static constexpr Register64 ReturnReg64(ReturnReg);
+static constexpr Register JSReturnReg = { Registers::x2 };
+static constexpr Register FramePointer = { Registers::fp };
+static constexpr Register ZeroRegister = { Registers::sp };
+static constexpr ARMRegister ZeroRegister64 = { Registers::sp, 64 };
+static constexpr ARMRegister ZeroRegister32 = { Registers::sp, 32 };
+
+static constexpr FloatRegister ReturnSimd128Reg = InvalidFloatReg;
+static constexpr FloatRegister ScratchSimd128Reg = InvalidFloatReg;
+
+// StackPointer is intentionally undefined on ARM64 to prevent misuse:
+// using sp as a base register is only valid if sp % 16 == 0.
+static constexpr Register RealStackPointer = { Registers::sp };
+
+static constexpr Register PseudoStackPointer = { Registers::x28 };
+static constexpr ARMRegister PseudoStackPointer64 = { Registers::x28, 64 };
+static constexpr ARMRegister PseudoStackPointer32 = { Registers::x28, 32 };
+
+// StackPointer for use by irregexp.
+static constexpr Register RegExpStackPointer = PseudoStackPointer;
+
+static constexpr Register IntArgReg0 = { Registers::x0 };
+static constexpr Register IntArgReg1 = { Registers::x1 };
+static constexpr Register IntArgReg2 = { Registers::x2 };
+static constexpr Register IntArgReg3 = { Registers::x3 };
+static constexpr Register IntArgReg4 = { Registers::x4 };
+static constexpr Register IntArgReg5 = { Registers::x5 };
+static constexpr Register IntArgReg6 = { Registers::x6 };
+static constexpr Register IntArgReg7 = { Registers::x7 };
+static constexpr Register GlobalReg = { Registers::x20 };
+static constexpr Register HeapReg = { Registers::x21 };
+static constexpr Register HeapLenReg = { Registers::x22 };
+
+// Define unsized Registers.
+#define DEFINE_UNSIZED_REGISTERS(N) \
+static constexpr Register r##N = { Registers::x##N };
+REGISTER_CODE_LIST(DEFINE_UNSIZED_REGISTERS)
+#undef DEFINE_UNSIZED_REGISTERS
+static constexpr Register ip0 = { Registers::x16 };
+static constexpr Register ip1 = { Registers::x16 };
+static constexpr Register fp = { Registers::x30 };
+static constexpr Register lr = { Registers::x30 };
+static constexpr Register rzr = { Registers::xzr };
+
+// Import VIXL registers into the js::jit namespace.
+#define IMPORT_VIXL_REGISTERS(N) \
+static constexpr ARMRegister w##N = vixl::w##N; \
+static constexpr ARMRegister x##N = vixl::x##N;
+REGISTER_CODE_LIST(IMPORT_VIXL_REGISTERS)
+#undef IMPORT_VIXL_REGISTERS
+static constexpr ARMRegister wzr = vixl::wzr;
+static constexpr ARMRegister xzr = vixl::xzr;
+static constexpr ARMRegister wsp = vixl::wsp;
+static constexpr ARMRegister sp = vixl::sp;
+
+// Import VIXL VRegisters into the js::jit namespace.
+#define IMPORT_VIXL_VREGISTERS(N) \
+static constexpr ARMFPRegister s##N = vixl::s##N; \
+static constexpr ARMFPRegister d##N = vixl::d##N;
+REGISTER_CODE_LIST(IMPORT_VIXL_VREGISTERS)
+#undef IMPORT_VIXL_VREGISTERS
+
+static constexpr ValueOperand JSReturnOperand = ValueOperand(JSReturnReg);
+
+// Registers used in the GenerateFFIIonExit Enable Activation block.
+static constexpr Register WasmIonExitRegCallee = r8;
+static constexpr Register WasmIonExitRegE0 = r0;
+static constexpr Register WasmIonExitRegE1 = r1;
+
+// Registers used in the GenerateFFIIonExit Disable Activation block.
+// None of these may be the second scratch register.
+static constexpr Register WasmIonExitRegReturnData = r2;
+static constexpr Register WasmIonExitRegReturnType = r3;
+static constexpr Register WasmIonExitRegD0 = r0;
+static constexpr Register WasmIonExitRegD1 = r1;
+static constexpr Register WasmIonExitRegD2 = r4;
+
+// Registerd used in RegExpMatcher instruction (do not use JSReturnOperand).
+static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpMatcherStringReg = CallTempReg1;
+static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
+
+// Registerd used in RegExpTester instruction (do not use ReturnReg).
+static constexpr Register RegExpTesterRegExpReg = CallTempReg0;
+static constexpr Register RegExpTesterStringReg = CallTempReg1;
+static constexpr Register RegExpTesterLastIndexReg = CallTempReg2;
+
+static constexpr Register JSReturnReg_Type = r3;
+static constexpr Register JSReturnReg_Data = r2;
+
+static constexpr FloatRegister NANReg = { FloatRegisters::d14, FloatRegisters::Single };
+// N.B. r8 isn't listed as an aapcs temp register, but we can use it as such because we never
+// use return-structs.
+static constexpr Register CallTempNonArgRegs[] = { r8, r9, r10, r11, r12, r13, r14, r15 };
+static const uint32_t NumCallTempNonArgRegs =
+ mozilla::ArrayLength(CallTempNonArgRegs);
+
+static constexpr uint32_t JitStackAlignment = 16;
+
+static constexpr uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+// This boolean indicates whether we support SIMD instructions flavoured for
+// this architecture or not. Rather than a method in the LIRGenerator, it is
+// here such that it is accessible from the entire codebase. Once full support
+// for SIMD is reached on all tier-1 platforms, this constant can be deleted.
+static constexpr bool SupportsSimd = false;
+static constexpr uint32_t SimdMemoryAlignment = 16;
+
+static_assert(CodeAlignment % SimdMemoryAlignment == 0,
+ "Code alignment should be larger than any of the alignments which are used for "
+ "the constant sections of the code buffer. Thus it should be larger than the "
+ "alignment for SIMD constants.");
+
+static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
+static const int32_t WasmGlobalRegBias = 1024;
+
+// Does this architecture support SIMD conversions between Uint32x4 and Float32x4?
+static constexpr bool SupportsUint32x4FloatConversions = false;
+
+// Does this architecture support comparisons of unsigned integer vectors?
+static constexpr bool SupportsUint8x16Compares = false;
+static constexpr bool SupportsUint16x8Compares = false;
+static constexpr bool SupportsUint32x4Compares = false;
+
+class Assembler : public vixl::Assembler
+{
+ public:
+ Assembler()
+ : vixl::Assembler()
+ { }
+
+ typedef vixl::Condition Condition;
+
+ void finish();
+ bool asmMergeWith(const Assembler& other) {
+ MOZ_CRASH("NYI");
+ }
+ void trace(JSTracer* trc);
+
+ // Emit the jump table, returning the BufferOffset to the first entry in the table.
+ BufferOffset emitExtendedJumpTable();
+ BufferOffset ExtendedJumpTable_;
+ void executableCopy(uint8_t* buffer);
+
+ BufferOffset immPool(ARMRegister dest, uint8_t* value, vixl::LoadLiteralOp op,
+ ARMBuffer::PoolEntry* pe = nullptr);
+ BufferOffset immPool64(ARMRegister dest, uint64_t value, ARMBuffer::PoolEntry* pe = nullptr);
+ BufferOffset immPool64Branch(RepatchLabel* label, ARMBuffer::PoolEntry* pe, vixl::Condition c);
+ BufferOffset fImmPool(ARMFPRegister dest, uint8_t* value, vixl::LoadLiteralOp op);
+ BufferOffset fImmPool64(ARMFPRegister dest, double value);
+ BufferOffset fImmPool32(ARMFPRegister dest, float value);
+
+ void bind(Label* label) { bind(label, nextOffset()); }
+ void bind(Label* label, BufferOffset boff);
+ void bind(RepatchLabel* label);
+ void bindLater(Label* label, wasm::TrapDesc target) {
+ MOZ_CRASH("NYI");
+ }
+
+ bool oom() const {
+ return AssemblerShared::oom() ||
+ armbuffer_.oom() ||
+ jumpRelocations_.oom() ||
+ dataRelocations_.oom() ||
+ preBarriers_.oom();
+ }
+
+ void copyJumpRelocationTable(uint8_t* dest) const {
+ if (jumpRelocations_.length())
+ memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
+ }
+ void copyDataRelocationTable(uint8_t* dest) const {
+ if (dataRelocations_.length())
+ memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
+ }
+ void copyPreBarrierTable(uint8_t* dest) const {
+ if (preBarriers_.length())
+ memcpy(dest, preBarriers_.buffer(), preBarriers_.length());
+ }
+
+ size_t jumpRelocationTableBytes() const {
+ return jumpRelocations_.length();
+ }
+ size_t dataRelocationTableBytes() const {
+ return dataRelocations_.length();
+ }
+ size_t preBarrierTableBytes() const {
+ return preBarriers_.length();
+ }
+ size_t bytesNeeded() const {
+ return SizeOfCodeGenerated() +
+ jumpRelocationTableBytes() +
+ dataRelocationTableBytes() +
+ preBarrierTableBytes();
+ }
+
+ void processCodeLabels(uint8_t* rawCode) {
+ for (size_t i = 0; i < codeLabels_.length(); i++) {
+ CodeLabel label = codeLabels_[i];
+ Bind(rawCode, label.patchAt(), rawCode + label.target()->offset());
+ }
+ }
+
+ void Bind(uint8_t* rawCode, CodeOffset* label, const void* address) {
+ *reinterpret_cast<const void**>(rawCode + label->offset()) = address;
+ }
+
+ void retarget(Label* cur, Label* next);
+
+ // The buffer is about to be linked. Ensure any constant pools or
+ // excess bookkeeping has been flushed to the instruction stream.
+ void flush() {
+ armbuffer_.flushPool();
+ }
+
+ void comment(const char* msg) {
+ // This is not implemented because setPrinter() is not implemented.
+ // TODO spew("; %s", msg);
+ }
+
+ int actualIndex(int curOffset) {
+ ARMBuffer::PoolEntry pe(curOffset);
+ return armbuffer_.poolEntryOffset(pe);
+ }
+ size_t labelToPatchOffset(CodeOffset label) {
+ return label.offset();
+ }
+ static uint8_t* PatchableJumpAddress(JitCode* code, uint32_t index) {
+ return code->raw() + index;
+ }
+ void setPrinter(Sprinter* sp) {
+ }
+
+ static bool SupportsFloatingPoint() { return true; }
+ static bool SupportsUnalignedAccesses() { return true; }
+ static bool SupportsSimd() { return js::jit::SupportsSimd; }
+
+ // Tracks a jump that is patchable after finalization.
+ void addJumpRelocation(BufferOffset src, Relocation::Kind reloc);
+
+ protected:
+ // Add a jump whose target is unknown until finalization.
+ // The jump may not be patched at runtime.
+ void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind);
+
+ // Add a jump whose target is unknown until finalization, and may change
+ // thereafter. The jump is patchable at runtime.
+ size_t addPatchableJump(BufferOffset src, Relocation::Kind kind);
+
+ public:
+ static uint32_t PatchWrite_NearCallSize() {
+ return 4;
+ }
+
+ static uint32_t NopSize() {
+ return 4;
+ }
+
+ static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall) {
+ Instruction* dest = (Instruction*)start.raw();
+ //printf("patching %p with call to %p\n", start.raw(), toCall.raw());
+ bl(dest, ((Instruction*)toCall.raw() - dest)>>2);
+
+ }
+ static void PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expected);
+
+ static void PatchDataWithValueCheck(CodeLocationLabel label,
+ ImmPtr newValue,
+ ImmPtr expected);
+
+ static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
+ // Raw is going to be the return address.
+ uint32_t* raw = (uint32_t*)label.raw();
+ // Overwrite the 4 bytes before the return address, which will end up being
+ // the call instruction.
+ *(raw - 1) = imm.value;
+ }
+ static uint32_t AlignDoubleArg(uint32_t offset) {
+ MOZ_CRASH("AlignDoubleArg()");
+ }
+ static uintptr_t GetPointer(uint8_t* ptr) {
+ Instruction* i = reinterpret_cast<Instruction*>(ptr);
+ uint64_t ret = i->Literal64();
+ return ret;
+ }
+
+ // Toggle a jmp or cmp emitted by toggledJump().
+ static void ToggleToJmp(CodeLocationLabel inst_);
+ static void ToggleToCmp(CodeLocationLabel inst_);
+ static void ToggleCall(CodeLocationLabel inst_, bool enabled);
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
+ static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
+
+ static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm);
+
+ static void FixupNurseryObjects(JSContext* cx, JitCode* code, CompactBufferReader& reader,
+ const ObjectVector& nurseryObjects);
+
+ public:
+ // A Jump table entry is 2 instructions, with 8 bytes of raw data
+ static const size_t SizeOfJumpTableEntry = 16;
+
+ struct JumpTableEntry
+ {
+ uint32_t ldr;
+ uint32_t br;
+ void* data;
+
+ Instruction* getLdr() {
+ return reinterpret_cast<Instruction*>(&ldr);
+ }
+ };
+
+ // Offset of the patchable target for the given entry.
+ static const size_t OffsetOfJumpTableEntryPointer = 8;
+
+ public:
+ void writeCodePointer(AbsoluteLabel* absoluteLabel) {
+ MOZ_ASSERT(!absoluteLabel->bound());
+ uintptr_t x = LabelBase::INVALID_OFFSET;
+ BufferOffset off = EmitData(&x, sizeof(uintptr_t));
+
+ // The x86/x64 makes general use of AbsoluteLabel and weaves a linked list
+ // of uses of an AbsoluteLabel through the assembly. ARM only uses labels
+ // for the case statements of switch jump tables. Thus, for simplicity, we
+ // simply treat the AbsoluteLabel as a label and bind it to the offset of
+ // the jump table entry that needs to be patched.
+ LabelBase* label = absoluteLabel;
+ label->bind(off.getOffset());
+ }
+
+ void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
+ const Disassembler::HeapAccess& heapAccess)
+ {
+ MOZ_CRASH("verifyHeapAccessDisassembly");
+ }
+
+ protected:
+ // Because jumps may be relocated to a target inaccessible by a short jump,
+ // each relocatable jump must have a unique entry in the extended jump table.
+ // Valid relocatable targets are of type Relocation::JITCODE.
+ struct JumpRelocation
+ {
+ BufferOffset jump; // Offset to the short jump, from the start of the code buffer.
+ uint32_t extendedTableIndex; // Unique index within the extended jump table.
+
+ JumpRelocation(BufferOffset jump, uint32_t extendedTableIndex)
+ : jump(jump), extendedTableIndex(extendedTableIndex)
+ { }
+ };
+
+ // Structure for fixing up pc-relative loads/jumps when the machine
+ // code gets moved (executable copy, gc, etc.).
+ struct RelativePatch
+ {
+ BufferOffset offset;
+ void* target;
+ Relocation::Kind kind;
+
+ RelativePatch(BufferOffset offset, void* target, Relocation::Kind kind)
+ : offset(offset), target(target), kind(kind)
+ { }
+ };
+
+ // List of jumps for which the target is either unknown until finalization,
+ // or cannot be known due to GC. Each entry here requires a unique entry
+ // in the extended jump table, and is patched at finalization.
+ js::Vector<RelativePatch, 8, SystemAllocPolicy> pendingJumps_;
+
+ // Final output formatters.
+ CompactBufferWriter jumpRelocations_;
+ CompactBufferWriter dataRelocations_;
+ CompactBufferWriter preBarriers_;
+};
+
+static const uint32_t NumIntArgRegs = 8;
+static const uint32_t NumFloatArgRegs = 8;
+
+class ABIArgGenerator
+{
+ public:
+ ABIArgGenerator()
+ : intRegIndex_(0),
+ floatRegIndex_(0),
+ stackOffset_(0),
+ current_()
+ { }
+
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+ uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
+
+ protected:
+ unsigned intRegIndex_;
+ unsigned floatRegIndex_;
+ uint32_t stackOffset_;
+ ABIArg current_;
+};
+
+static constexpr Register ABINonArgReg0 = r8;
+static constexpr Register ABINonArgReg1 = r9;
+static constexpr Register ABINonArgReg2 = r10;
+static constexpr Register ABINonArgReturnReg0 = r8;
+static constexpr Register ABINonArgReturnReg1 = r9;
+
+// TLS pointer argument register for WebAssembly functions. This must not alias
+// any other register used for passing function arguments or return values.
+// Preserved by WebAssembly functions.
+static constexpr Register WasmTlsReg = { Registers::x17 };
+
+// Registers used for wasm table calls. These registers must be disjoint
+// from the ABI argument registers, WasmTlsReg and each other.
+static constexpr Register WasmTableCallScratchReg = ABINonArgReg0;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg1;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg2;
+
+static inline bool
+GetIntArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
+{
+ if (usedIntArgs >= NumIntArgRegs)
+ return false;
+ *out = Register::FromCode(usedIntArgs);
+ return true;
+}
+
+static inline bool
+GetFloatArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, FloatRegister* out)
+{
+ if (usedFloatArgs >= NumFloatArgRegs)
+ return false;
+ *out = FloatRegister::FromCode(usedFloatArgs);
+ return true;
+}
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool
+GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
+{
+ if (GetIntArgReg(usedIntArgs, usedFloatArgs, out))
+ return true;
+ // Unfortunately, we have to assume things about the point at which
+ // GetIntArgReg returns false, because we need to know how many registers it
+ // can allocate.
+ usedIntArgs -= NumIntArgRegs;
+ if (usedIntArgs >= NumCallTempNonArgRegs)
+ return false;
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+inline Imm32
+Imm64::firstHalf() const
+{
+ return low();
+}
+
+inline Imm32
+Imm64::secondHalf() const
+{
+ return hi();
+}
+
+void PatchJump(CodeLocationJump& jump_, CodeLocationLabel label,
+ ReprotectCode reprotect = DontReprotect);
+
+static inline void
+PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
+{
+ PatchJump(jump_, label);
+}
+
+// Forbids pool generation during a specified interval. Not nestable.
+class AutoForbidPools
+{
+ Assembler* asm_;
+
+ public:
+ AutoForbidPools(Assembler* asm_, size_t maxInst)
+ : asm_(asm_)
+ {
+ asm_->enterNoPool(maxInst);
+ }
+
+ ~AutoForbidPools() {
+ asm_->leaveNoPool();
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // A64_ASSEMBLER_A64_H_
diff --git a/js/src/jit/arm64/AtomicOperations-arm64.h b/js/src/jit/arm64/AtomicOperations-arm64.h
new file mode 100644
index 000000000..b213b0218
--- /dev/null
+++ b/js/src/jit/arm64/AtomicOperations-arm64.h
@@ -0,0 +1,156 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* For documentation, see jit/AtomicOperations.h */
+
+#ifndef jit_arm64_AtomicOperations_arm64_h
+#define jit_arm64_AtomicOperations_arm64_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+inline bool
+js::jit::AtomicOperations::isLockfree8()
+{
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
+ return true;
+}
+
+inline void
+js::jit::AtomicOperations::fenceSeqCst()
+{
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::loadSeqCst(T* addr)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+ T v;
+ __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
+ return v;
+}
+
+template<typename T>
+inline void
+js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+ __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+ T v;
+ __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
+ return v;
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+ __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return oldval;
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+ return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+ return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+ return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+ return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+ return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
+}
+
+template <typename T>
+inline T
+js::jit::AtomicOperations::loadSafeWhenRacy(T* addr)
+{
+ return *addr; // FIXME (1208663): not yet safe
+}
+
+template <typename T>
+inline void
+js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val)
+{
+ *addr = val; // FIXME (1208663): not yet safe
+}
+
+inline void
+js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src,
+ size_t nbytes)
+{
+ memcpy(dest, src, nbytes); // FIXME (1208663): not yet safe
+}
+
+inline void
+js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src,
+ size_t nbytes)
+{
+ memmove(dest, src, nbytes); // FIXME (1208663): not yet safe
+}
+
+template<size_t nbytes>
+inline void
+js::jit::RegionLock::acquire(void* addr)
+{
+ uint32_t zero = 0;
+ uint32_t one = 1;
+ while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
+ zero = 0;
+ continue;
+ }
+}
+
+template<size_t nbytes>
+inline void
+js::jit::RegionLock::release(void* addr)
+{
+ MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
+ uint32_t zero = 0;
+ __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
+}
+
+#endif // jit_arm64_AtomicOperations_arm64_h
diff --git a/js/src/jit/arm64/Bailouts-arm64.cpp b/js/src/jit/arm64/Bailouts-arm64.cpp
new file mode 100644
index 000000000..af764e8f7
--- /dev/null
+++ b/js/src/jit/arm64/Bailouts-arm64.cpp
@@ -0,0 +1,67 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Bailouts.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+class BailoutStack
+{
+ RegisterDump::FPUArray fpregs_;
+ RegisterDump::GPRArray regs_;
+ uintptr_t frameSize_;
+ uintptr_t snapshotOffset_;
+
+ public:
+ MachineState machineState() {
+ return MachineState::FromBailout(regs_, fpregs_);
+ }
+ uint32_t snapshotOffset() const {
+ return snapshotOffset_;
+ }
+ uint32_t frameSize() const {
+ return frameSize_;
+ }
+ uint8_t* parentStackPointer() {
+ return (uint8_t*)this + sizeof(BailoutStack);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ BailoutStack* bailout)
+ : machine_(bailout->machineState())
+{
+ uint8_t* sp = bailout->parentStackPointer();
+ framePointer_ = sp + bailout->frameSize();
+ topFrameSize_ = framePointer_ - sp;
+
+ JSScript* script = ScriptFromCalleeToken(((JitFrameLayout*) framePointer_)->calleeToken());
+ topIonScript_ = script->ionScript();
+
+ attachOnJitActivation(activations);
+ snapshotOffset_ = bailout->snapshotOffset();
+}
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ InvalidationBailoutStack* bailout)
+ : machine_(bailout->machine())
+{
+ framePointer_ = (uint8_t*) bailout->fp();
+ topFrameSize_ = framePointer_ - bailout->sp();
+ topIonScript_ = bailout->ionScript();
+ attachOnJitActivation(activations);
+
+ uint8_t* returnAddressToFp_ = bailout->osiPointReturnAddress();
+ const OsiIndex* osiIndex = topIonScript_->getOsiIndex(returnAddressToFp_);
+ snapshotOffset_ = osiIndex->snapshotOffset();
+}
diff --git a/js/src/jit/arm64/BaselineCompiler-arm64.h b/js/src/jit/arm64/BaselineCompiler-arm64.h
new file mode 100644
index 000000000..946099ff1
--- /dev/null
+++ b/js/src/jit/arm64/BaselineCompiler-arm64.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_BaselineCompiler_arm64_h
+#define jit_arm64_BaselineCompiler_arm64_h
+
+#include "jit/shared/BaselineCompiler-shared.h"
+
+namespace js {
+namespace jit {
+
+class BaselineCompilerARM64 : public BaselineCompilerShared
+{
+ protected:
+ BaselineCompilerARM64(JSContext* cx, TempAllocator& alloc, JSScript* script)
+ : BaselineCompilerShared(cx, alloc, script)
+ { }
+};
+
+typedef BaselineCompilerARM64 BaselineCompilerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm64_BaselineCompiler_arm64_h */
diff --git a/js/src/jit/arm64/BaselineIC-arm64.cpp b/js/src/jit/arm64/BaselineIC-arm64.cpp
new file mode 100644
index 000000000..54ac47d5b
--- /dev/null
+++ b/js/src/jit/arm64/BaselineIC-arm64.cpp
@@ -0,0 +1,75 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/SharedIC.h"
+#include "jit/SharedICHelpers.h"
+
+#ifdef JS_SIMULATOR_ARM64
+#include "jit/arm64/Assembler-arm64.h"
+#include "jit/arm64/BaselineCompiler-arm64.h"
+#include "jit/arm64/vixl/Debugger-vixl.h"
+#endif
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICCompare_Int32
+
+bool
+ICCompare_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // Compare payload regs of R0 and R1.
+ Assembler::Condition cond = JSOpToCondition(op, /* signed = */true);
+ masm.cmp32(R0.valueReg(), R1.valueReg());
+ masm.Cset(ARMRegister(R0.valueReg(), 32), cond);
+
+ // Result is implicitly boxed already.
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.valueReg(), R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub.
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ return true;
+}
+
+bool
+ICCompare_Double::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure, isNaN;
+ masm.ensureDouble(R0, FloatReg0, &failure);
+ masm.ensureDouble(R1, FloatReg1, &failure);
+
+ Register dest = R0.valueReg();
+
+ Assembler::DoubleCondition doubleCond = JSOpToDoubleCondition(op);
+ Assembler::Condition cond = Assembler::ConditionFromDoubleCondition(doubleCond);
+
+ masm.compareDouble(doubleCond, FloatReg0, FloatReg1);
+ masm.Cset(ARMRegister(dest, 32), cond);
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub.
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/arm64/CodeGenerator-arm64.cpp b/js/src/jit/arm64/CodeGenerator-arm64.cpp
new file mode 100644
index 000000000..83330a262
--- /dev/null
+++ b/js/src/jit/arm64/CodeGenerator-arm64.cpp
@@ -0,0 +1,783 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm64/CodeGenerator-arm64.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+#include "jsnum.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "vm/Shape.h"
+#include "vm/TraceLogging.h"
+
+#include "jsscriptinlines.h"
+
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::FloorLog2;
+using mozilla::NegativeInfinity;
+using JS::GenericNaN;
+
+// shared
+CodeGeneratorARM64::CodeGeneratorARM64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorShared(gen, graph, masm)
+{
+}
+
+bool
+CodeGeneratorARM64::generateOutOfLineCode()
+{
+ MOZ_CRASH("generateOutOfLineCode");
+}
+
+void
+CodeGeneratorARM64::emitBranch(Assembler::Condition cond, MBasicBlock* mirTrue, MBasicBlock* mirFalse)
+{
+ MOZ_CRASH("emitBranch");
+}
+
+void
+OutOfLineBailout::accept(CodeGeneratorARM64* codegen)
+{
+ MOZ_CRASH("accept");
+}
+
+void
+CodeGeneratorARM64::visitTestIAndBranch(LTestIAndBranch* test)
+{
+ MOZ_CRASH("visitTestIAndBranch");
+}
+
+void
+CodeGeneratorARM64::visitCompare(LCompare* comp)
+{
+ MOZ_CRASH("visitCompare");
+}
+
+void
+CodeGeneratorARM64::visitCompareAndBranch(LCompareAndBranch* comp)
+{
+ MOZ_CRASH("visitCompareAndBranch");
+}
+
+void
+CodeGeneratorARM64::bailoutIf(Assembler::Condition condition, LSnapshot* snapshot)
+{
+ MOZ_CRASH("bailoutIf");
+}
+
+void
+CodeGeneratorARM64::bailoutFrom(Label* label, LSnapshot* snapshot)
+{
+ MOZ_CRASH("bailoutFrom");
+}
+
+void
+CodeGeneratorARM64::bailout(LSnapshot* snapshot)
+{
+ MOZ_CRASH("bailout");
+}
+
+void
+CodeGeneratorARM64::visitOutOfLineBailout(OutOfLineBailout* ool)
+{
+ MOZ_CRASH("visitOutOfLineBailout");
+}
+
+void
+CodeGeneratorARM64::visitMinMaxD(LMinMaxD* ins)
+{
+ MOZ_CRASH("visitMinMaxD");
+}
+
+void
+CodeGeneratorARM64::visitMinMaxF(LMinMaxF* ins)
+{
+ MOZ_CRASH("visitMinMaxF");
+}
+
+void
+CodeGeneratorARM64::visitAbsD(LAbsD* ins)
+{
+ MOZ_CRASH("visitAbsD");
+}
+
+void
+CodeGeneratorARM64::visitAbsF(LAbsF* ins)
+{
+ MOZ_CRASH("visitAbsF");
+}
+
+void
+CodeGeneratorARM64::visitSqrtD(LSqrtD* ins)
+{
+ MOZ_CRASH("visitSqrtD");
+}
+
+void
+CodeGeneratorARM64::visitSqrtF(LSqrtF* ins)
+{
+ MOZ_CRASH("visitSqrtF");
+}
+
+// FIXME: Uh, is this a static function? It looks like it is...
+template <typename T>
+ARMRegister
+toWRegister(const T* a)
+{
+ return ARMRegister(ToRegister(a), 32);
+}
+
+// FIXME: Uh, is this a static function? It looks like it is...
+template <typename T>
+ARMRegister
+toXRegister(const T* a)
+{
+ return ARMRegister(ToRegister(a), 64);
+}
+
+js::jit::Operand
+toWOperand(const LAllocation* a)
+{
+ MOZ_CRASH("toWOperand");
+}
+
+vixl::CPURegister
+ToCPURegister(const LAllocation* a, Scalar::Type type)
+{
+ MOZ_CRASH("ToCPURegister");
+}
+
+vixl::CPURegister
+ToCPURegister(const LDefinition* d, Scalar::Type type)
+{
+ return ToCPURegister(d->output(), type);
+}
+
+void
+CodeGeneratorARM64::visitAddI(LAddI* ins)
+{
+ MOZ_CRASH("visitAddI");
+}
+
+void
+CodeGeneratorARM64::visitSubI(LSubI* ins)
+{
+ MOZ_CRASH("visitSubI");
+}
+
+void
+CodeGeneratorARM64::visitMulI(LMulI* ins)
+{
+ MOZ_CRASH("visitMulI");
+}
+
+
+void
+CodeGeneratorARM64::visitDivI(LDivI* ins)
+{
+ MOZ_CRASH("visitDivI");
+}
+
+void
+CodeGeneratorARM64::visitDivPowTwoI(LDivPowTwoI* ins)
+{
+ MOZ_CRASH("CodeGeneratorARM64::visitDivPowTwoI");
+}
+
+void
+CodeGeneratorARM64::modICommon(MMod* mir, Register lhs, Register rhs, Register output,
+ LSnapshot* snapshot, Label& done)
+{
+ MOZ_CRASH("CodeGeneratorARM64::modICommon");
+}
+
+void
+CodeGeneratorARM64::visitModI(LModI* ins)
+{
+ MOZ_CRASH("visitModI");
+}
+
+void
+CodeGeneratorARM64::visitModPowTwoI(LModPowTwoI* ins)
+{
+ MOZ_CRASH("visitModPowTwoI");
+}
+
+void
+CodeGeneratorARM64::visitModMaskI(LModMaskI* ins)
+{
+ MOZ_CRASH("CodeGeneratorARM64::visitModMaskI");
+}
+
+void
+CodeGeneratorARM64::visitBitNotI(LBitNotI* ins)
+{
+ MOZ_CRASH("visitBitNotI");
+}
+
+void
+CodeGeneratorARM64::visitBitOpI(LBitOpI* ins)
+{
+ MOZ_CRASH("visitBitOpI");
+}
+
+void
+CodeGeneratorARM64::visitShiftI(LShiftI* ins)
+{
+ MOZ_CRASH("visitShiftI");
+}
+
+void
+CodeGeneratorARM64::visitUrshD(LUrshD* ins)
+{
+ MOZ_CRASH("visitUrshD");
+}
+
+void
+CodeGeneratorARM64::visitPowHalfD(LPowHalfD* ins)
+{
+ MOZ_CRASH("visitPowHalfD");
+}
+
+MoveOperand
+CodeGeneratorARM64::toMoveOperand(const LAllocation a) const
+{
+ MOZ_CRASH("toMoveOperand");
+}
+
+class js::jit::OutOfLineTableSwitch : public OutOfLineCodeBase<CodeGeneratorARM64>
+{
+ MTableSwitch* mir_;
+ Vector<CodeLabel, 8, JitAllocPolicy> codeLabels_;
+
+ void accept(CodeGeneratorARM64* codegen) {
+ codegen->visitOutOfLineTableSwitch(this);
+ }
+
+ public:
+ OutOfLineTableSwitch(TempAllocator& alloc, MTableSwitch* mir)
+ : mir_(mir),
+ codeLabels_(alloc)
+ { }
+
+ MTableSwitch* mir() const {
+ return mir_;
+ }
+
+ bool addCodeLabel(CodeLabel label) {
+ return codeLabels_.append(label);
+ }
+ CodeLabel codeLabel(unsigned i) {
+ return codeLabels_[i];
+ }
+};
+
+void
+CodeGeneratorARM64::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool)
+{
+ MOZ_CRASH("visitOutOfLineTableSwitch");
+}
+
+void
+CodeGeneratorARM64::emitTableSwitchDispatch(MTableSwitch* mir, Register index_, Register base_)
+{
+ MOZ_CRASH("emitTableSwitchDispatch");
+}
+
+void
+CodeGeneratorARM64::visitMathD(LMathD* math)
+{
+ MOZ_CRASH("visitMathD");
+}
+
+void
+CodeGeneratorARM64::visitMathF(LMathF* math)
+{
+ MOZ_CRASH("visitMathF");
+}
+
+void
+CodeGeneratorARM64::visitFloor(LFloor* lir)
+{
+ MOZ_CRASH("visitFloor");
+}
+
+void
+CodeGeneratorARM64::visitFloorF(LFloorF* lir)
+{
+ MOZ_CRASH("visitFloorF");
+}
+
+void
+CodeGeneratorARM64::visitCeil(LCeil* lir)
+{
+ MOZ_CRASH("visitCeil");
+}
+
+void
+CodeGeneratorARM64::visitCeilF(LCeilF* lir)
+{
+ MOZ_CRASH("visitCeilF");
+}
+
+void
+CodeGeneratorARM64::visitRound(LRound* lir)
+{
+ MOZ_CRASH("visitRound");
+}
+
+void
+CodeGeneratorARM64::visitRoundF(LRoundF* lir)
+{
+ MOZ_CRASH("visitRoundF");
+}
+
+void
+CodeGeneratorARM64::visitClzI(LClzI* lir)
+{
+ MOZ_CRASH("visitClzI");
+}
+
+void
+CodeGeneratorARM64::visitCtzI(LCtzI* lir)
+{
+ MOZ_CRASH("visitCtzI");
+}
+
+void
+CodeGeneratorARM64::emitRoundDouble(FloatRegister src, Register dest, Label* fail)
+{
+ MOZ_CRASH("CodeGeneratorARM64::emitRoundDouble");
+}
+
+void
+CodeGeneratorARM64::visitTruncateDToInt32(LTruncateDToInt32* ins)
+{
+ MOZ_CRASH("visitTruncateDToInt32");
+}
+
+void
+CodeGeneratorARM64::visitTruncateFToInt32(LTruncateFToInt32* ins)
+{
+ MOZ_CRASH("visitTruncateFToInt32");
+}
+
+static const uint32_t FrameSizes[] = { 128, 256, 512, 1024 };
+
+FrameSizeClass
+FrameSizeClass::FromDepth(uint32_t frameDepth)
+{
+ return FrameSizeClass::None();
+}
+
+FrameSizeClass
+FrameSizeClass::ClassLimit()
+{
+ return FrameSizeClass(0);
+}
+
+uint32_t
+FrameSizeClass::frameSize() const
+{
+ MOZ_CRASH("arm64 does not use frame size classes");
+}
+
+ValueOperand
+CodeGeneratorARM64::ToValue(LInstruction* ins, size_t pos)
+{
+ return ValueOperand(ToRegister(ins->getOperand(pos)));
+}
+
+ValueOperand
+CodeGeneratorARM64::ToOutValue(LInstruction* ins)
+{
+ Register payloadReg = ToRegister(ins->getDef(0));
+ return ValueOperand(payloadReg);
+}
+
+ValueOperand
+CodeGeneratorARM64::ToTempValue(LInstruction* ins, size_t pos)
+{
+ MOZ_CRASH("CodeGeneratorARM64::ToTempValue");
+}
+
+void
+CodeGeneratorARM64::visitValue(LValue* value)
+{
+ MOZ_CRASH("visitValue");
+}
+
+void
+CodeGeneratorARM64::visitBox(LBox* box)
+{
+ MOZ_CRASH("visitBox");
+}
+
+void
+CodeGeneratorARM64::visitUnbox(LUnbox* unbox)
+{
+ MOZ_CRASH("visitUnbox");
+}
+
+void
+CodeGeneratorARM64::visitDouble(LDouble* ins)
+{
+ MOZ_CRASH("visitDouble");
+}
+
+void
+CodeGeneratorARM64::visitFloat32(LFloat32* ins)
+{
+ MOZ_CRASH("visitFloat32");
+}
+
+Register
+CodeGeneratorARM64::splitTagForTest(const ValueOperand& value)
+{
+ MOZ_CRASH("splitTagForTest");
+}
+
+void
+CodeGeneratorARM64::visitTestDAndBranch(LTestDAndBranch* test)
+{
+ MOZ_CRASH("visitTestDAndBranch");
+}
+
+void
+CodeGeneratorARM64::visitTestFAndBranch(LTestFAndBranch* test)
+{
+ MOZ_CRASH("visitTestFAndBranch");
+}
+
+void
+CodeGeneratorARM64::visitCompareD(LCompareD* comp)
+{
+ MOZ_CRASH("visitCompareD");
+}
+
+void
+CodeGeneratorARM64::visitCompareF(LCompareF* comp)
+{
+ MOZ_CRASH("visitCompareF");
+}
+
+void
+CodeGeneratorARM64::visitCompareDAndBranch(LCompareDAndBranch* comp)
+{
+ MOZ_CRASH("visitCompareDAndBranch");
+}
+
+void
+CodeGeneratorARM64::visitCompareFAndBranch(LCompareFAndBranch* comp)
+{
+ MOZ_CRASH("visitCompareFAndBranch");
+}
+
+void
+CodeGeneratorARM64::visitCompareB(LCompareB* lir)
+{
+ MOZ_CRASH("visitCompareB");
+}
+
+void
+CodeGeneratorARM64::visitCompareBAndBranch(LCompareBAndBranch* lir)
+{
+ MOZ_CRASH("visitCompareBAndBranch");
+}
+
+void
+CodeGeneratorARM64::visitCompareBitwise(LCompareBitwise* lir)
+{
+ MOZ_CRASH("visitCompareBitwise");
+}
+
+void
+CodeGeneratorARM64::visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir)
+{
+ MOZ_CRASH("visitCompareBitwiseAndBranch");
+}
+
+void
+CodeGeneratorARM64::visitBitAndAndBranch(LBitAndAndBranch* baab)
+{
+ MOZ_CRASH("visitBitAndAndBranch");
+}
+
+void
+CodeGeneratorARM64::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir)
+{
+ MOZ_CRASH("visitWasmUint32ToDouble");
+}
+
+void
+CodeGeneratorARM64::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir)
+{
+ MOZ_CRASH("visitWasmUint32ToFloat32");
+}
+
+void
+CodeGeneratorARM64::visitNotI(LNotI* ins)
+{
+ MOZ_CRASH("visitNotI");
+}
+
+// NZCV
+// NAN -> 0011
+// == -> 0110
+// < -> 1000
+// > -> 0010
+void
+CodeGeneratorARM64::visitNotD(LNotD* ins)
+{
+ MOZ_CRASH("visitNotD");
+}
+
+void
+CodeGeneratorARM64::visitNotF(LNotF* ins)
+{
+ MOZ_CRASH("visitNotF");
+}
+
+void
+CodeGeneratorARM64::visitLoadSlotV(LLoadSlotV* load)
+{
+ MOZ_CRASH("CodeGeneratorARM64::visitLoadSlotV");
+}
+
+void
+CodeGeneratorARM64::visitLoadSlotT(LLoadSlotT* load)
+{
+ MOZ_CRASH("CodeGeneratorARM64::visitLoadSlotT");
+}
+
+void
+CodeGeneratorARM64::visitStoreSlotT(LStoreSlotT* store)
+{
+ MOZ_CRASH("CodeGeneratorARM64::visitStoreSlotT");
+}
+
+void
+CodeGeneratorARM64::visitLoadElementT(LLoadElementT* load)
+{
+ MOZ_CRASH("CodeGeneratorARM64::visitLoadElementT");
+}
+
+void
+CodeGeneratorARM64::storeElementTyped(const LAllocation* value, MIRType valueType,
+ MIRType elementType, Register elements,
+ const LAllocation* index)
+{
+ MOZ_CRASH("CodeGeneratorARM64::storeElementTyped");
+}
+
+void
+CodeGeneratorARM64::visitGuardShape(LGuardShape* guard)
+{
+ MOZ_CRASH("visitGuardShape");
+}
+
+void
+CodeGeneratorARM64::visitGuardObjectGroup(LGuardObjectGroup* guard)
+{
+ MOZ_CRASH("visitGuardObjectGroup");
+}
+
+void
+CodeGeneratorARM64::visitGuardClass(LGuardClass* guard)
+{
+ MOZ_CRASH("CodeGeneratorARM64::visitGuardClass");
+}
+
+void
+CodeGeneratorARM64::visitInterruptCheck(LInterruptCheck* lir)
+{
+ MOZ_CRASH("CodeGeneratorARM64::visitInterruptCheck");
+}
+
+void
+CodeGeneratorARM64::generateInvalidateEpilogue()
+{
+ MOZ_CRASH("generateInvalidateEpilogue");
+}
+
+template <class U>
+Register
+getBase(U* mir)
+{
+ switch (mir->base()) {
+ case U::Heap: return HeapReg;
+ case U::Global: return GlobalReg;
+ }
+ return InvalidReg;
+}
+
+void
+CodeGeneratorARM64::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins)
+{
+ MOZ_CRASH("CodeGeneratorARM64::visitLoadTypedArrayElementStatic");
+}
+
+void
+CodeGeneratorARM64::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins)
+{
+ MOZ_CRASH("CodeGeneratorARM64::visitStoreTypedArrayElementStatic");
+}
+
+void
+CodeGeneratorARM64::visitWasmCall(LWasmCall* ins)
+{
+ MOZ_CRASH("vistWasmCall");
+}
+
+void
+CodeGeneratorARM64::visitWasmCallI64(LWasmCallI64* ins)
+{
+ MOZ_CRASH("vistWasmCallI64");
+}
+
+void
+CodeGeneratorARM64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
+{
+ MOZ_CRASH("visitAsmJSLoadHeap");
+}
+
+void
+CodeGeneratorARM64::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
+{
+ MOZ_CRASH("visitAsmJSStoreHeap");
+}
+
+void
+CodeGeneratorARM64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
+{
+ MOZ_CRASH("visitAsmJSCompareExchangeHeap");
+}
+
+void
+CodeGeneratorARM64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
+{
+ MOZ_CRASH("visitAsmJSAtomicBinopHeap");
+}
+
+void
+CodeGeneratorARM64::visitWasmStackArg(LWasmStackArg* ins)
+{
+ MOZ_CRASH("visitWasmStackArg");
+}
+
+void
+CodeGeneratorARM64::visitUDiv(LUDiv* ins)
+{
+ MOZ_CRASH("visitUDiv");
+}
+
+void
+CodeGeneratorARM64::visitUMod(LUMod* ins)
+{
+ MOZ_CRASH("visitUMod");
+}
+
+void
+CodeGeneratorARM64::visitEffectiveAddress(LEffectiveAddress* ins)
+{
+ MOZ_CRASH("visitEffectiveAddress");
+}
+
+void
+CodeGeneratorARM64::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins)
+{
+ MOZ_CRASH("visitWasmLoadGlobalVar");
+}
+
+void
+CodeGeneratorARM64::visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins)
+{
+ MOZ_CRASH("visitWasmStoreGlobalVar");
+}
+
+void
+CodeGeneratorARM64::visitNegI(LNegI* ins)
+{
+ MOZ_CRASH("visitNegI");
+}
+
+void
+CodeGeneratorARM64::visitNegD(LNegD* ins)
+{
+ MOZ_CRASH("visitNegD");
+}
+
+void
+CodeGeneratorARM64::visitNegF(LNegF* ins)
+{
+ MOZ_CRASH("visitNegF");
+}
+
+void
+CodeGeneratorARM64::setReturnDoubleRegs(LiveRegisterSet* regs)
+{
+ MOZ_ASSERT(ReturnFloat32Reg.code_ == FloatRegisters::s0);
+ MOZ_ASSERT(ReturnDoubleReg.code_ == FloatRegisters::d0);
+ FloatRegister s1 = {FloatRegisters::s1, FloatRegisters::Single};
+ regs->add(ReturnFloat32Reg);
+ regs->add(s1);
+ regs->add(ReturnDoubleReg);
+}
+
+void
+CodeGeneratorARM64::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir)
+{
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ if (lir->index()->isConstant()) {
+ Address dest(elements, ToInt32(lir->index()) * width);
+ masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+ masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
+ }
+}
+
+void
+CodeGeneratorARM64::visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir)
+{
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ Register value = ToRegister(lir->value());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ if (lir->index()->isConstant()) {
+ Address dest(elements, ToInt32(lir->index()) * width);
+ masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp, output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+ masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp, output);
+ }
+}
+
diff --git a/js/src/jit/arm64/CodeGenerator-arm64.h b/js/src/jit/arm64/CodeGenerator-arm64.h
new file mode 100644
index 000000000..63199d1fd
--- /dev/null
+++ b/js/src/jit/arm64/CodeGenerator-arm64.h
@@ -0,0 +1,262 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_CodeGenerator_arm64_h
+#define jit_arm64_CodeGenerator_arm64_h
+
+#include "jit/arm64/Assembler-arm64.h"
+#include "jit/shared/CodeGenerator-shared.h"
+
+namespace js {
+namespace jit {
+
+class OutOfLineBailout;
+class OutOfLineTableSwitch;
+
+class CodeGeneratorARM64 : public CodeGeneratorShared
+{
+ friend class MoveResolverARM64;
+
+ CodeGeneratorARM64* thisFromCtor() { return this; }
+
+ public:
+ CodeGeneratorARM64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
+
+ protected:
+ NonAssertingLabel deoptLabel_;
+
+ MoveOperand toMoveOperand(const LAllocation a) const;
+
+ void bailoutIf(Assembler::Condition condition, LSnapshot* snapshot);
+ void bailoutFrom(Label* label, LSnapshot* snapshot);
+ void bailout(LSnapshot* snapshot);
+
+ template <typename T1, typename T2>
+ void bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) {
+ masm.cmpPtr(lhs, rhs);
+ return bailoutIf(c, snapshot);
+ }
+ void bailoutTestPtr(Assembler::Condition c, Register lhs, Register rhs, LSnapshot* snapshot) {
+ masm.testPtr(lhs, rhs);
+ return bailoutIf(c, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) {
+ masm.cmp32(lhs, rhs);
+ return bailoutIf(c, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutTest32(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) {
+ masm.test32(lhs, rhs);
+ return bailoutIf(c, snapshot);
+ }
+ void bailoutIfFalseBool(Register reg, LSnapshot* snapshot) {
+ masm.test32(reg, Imm32(0xFF));
+ return bailoutIf(Assembler::Zero, snapshot);
+ }
+
+ protected:
+ bool generateOutOfLineCode();
+
+ void emitRoundDouble(FloatRegister src, Register dest, Label* fail);
+
+ // Emits a branch that directs control flow to the true block if |cond| is
+ // true, and the false block if |cond| is false.
+ void emitBranch(Assembler::Condition cond, MBasicBlock* ifTrue, MBasicBlock* ifFalse);
+
+ void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ cond = masm.testNull(cond, value);
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+ void testUndefinedEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ cond = masm.testUndefined(cond, value);
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+ void testObjectEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ cond = masm.testObject(cond, value);
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+ void testZeroEmitBranch(Assembler::Condition cond, Register reg,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ masm.cmpPtr(reg, ImmWord(0));
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+
+ void emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base);
+
+ public:
+ // Instruction visitors.
+ virtual void visitMinMaxD(LMinMaxD* ins);
+ virtual void visitMinMaxF(LMinMaxF* math);
+ virtual void visitAbsD(LAbsD* ins);
+ virtual void visitAbsF(LAbsF* ins);
+ virtual void visitSqrtD(LSqrtD* ins);
+ virtual void visitSqrtF(LSqrtF* ins);
+ virtual void visitAddI(LAddI* ins);
+ virtual void visitSubI(LSubI* ins);
+ virtual void visitBitNotI(LBitNotI* ins);
+ virtual void visitBitOpI(LBitOpI* ins);
+
+ virtual void visitMulI(LMulI* ins);
+
+ virtual void visitDivI(LDivI* ins);
+ virtual void visitDivPowTwoI(LDivPowTwoI* ins);
+ virtual void visitModI(LModI* ins);
+ virtual void visitModPowTwoI(LModPowTwoI* ins);
+ virtual void visitModMaskI(LModMaskI* ins);
+ virtual void visitPowHalfD(LPowHalfD* ins);
+ virtual void visitShiftI(LShiftI* ins);
+ virtual void visitUrshD(LUrshD* ins);
+
+ virtual void visitTestIAndBranch(LTestIAndBranch* test);
+ virtual void visitCompare(LCompare* comp);
+ virtual void visitCompareAndBranch(LCompareAndBranch* comp);
+ virtual void visitTestDAndBranch(LTestDAndBranch* test);
+ virtual void visitTestFAndBranch(LTestFAndBranch* test);
+ virtual void visitCompareD(LCompareD* comp);
+ virtual void visitCompareF(LCompareF* comp);
+ virtual void visitCompareDAndBranch(LCompareDAndBranch* comp);
+ virtual void visitCompareFAndBranch(LCompareFAndBranch* comp);
+ virtual void visitCompareB(LCompareB* lir);
+ virtual void visitCompareBAndBranch(LCompareBAndBranch* lir);
+ virtual void visitCompareBitwise(LCompareBitwise* lir);
+ virtual void visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir);
+ virtual void visitBitAndAndBranch(LBitAndAndBranch* baab);
+ virtual void visitWasmUint32ToDouble(LWasmUint32ToDouble* lir);
+ virtual void visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir);
+ virtual void visitNotI(LNotI* ins);
+ virtual void visitNotD(LNotD* ins);
+ virtual void visitNotF(LNotF* ins);
+
+ virtual void visitMathD(LMathD* math);
+ virtual void visitMathF(LMathF* math);
+ virtual void visitFloor(LFloor* lir);
+ virtual void visitFloorF(LFloorF* lir);
+ virtual void visitCeil(LCeil* lir);
+ virtual void visitCeilF(LCeilF* lir);
+ virtual void visitRound(LRound* lir);
+ virtual void visitRoundF(LRoundF* lir);
+ virtual void visitTruncateDToInt32(LTruncateDToInt32* ins);
+ virtual void visitTruncateFToInt32(LTruncateFToInt32* ins);
+
+ virtual void visitClzI(LClzI* lir);
+ virtual void visitCtzI(LCtzI* lir);
+ // Out of line visitors.
+ void visitOutOfLineBailout(OutOfLineBailout* ool);
+ void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
+
+ protected:
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToOutValue(LInstruction* ins);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ // Functions for LTestVAndBranch.
+ Register splitTagForTest(const ValueOperand& value);
+
+ void storeElementTyped(const LAllocation* value, MIRType valueType, MIRType elementType,
+ Register elements, const LAllocation* index);
+
+ void divICommon(MDiv* mir, Register lhs, Register rhs, Register output, LSnapshot* snapshot,
+ Label& done);
+ void modICommon(MMod* mir, Register lhs, Register rhs, Register output, LSnapshot* snapshot,
+ Label& done);
+
+ public:
+ void visitBox(LBox* box);
+ void visitUnbox(LUnbox* unbox);
+ void visitValue(LValue* value);
+ void visitDouble(LDouble* ins);
+ void visitFloat32(LFloat32* ins);
+
+ void visitLoadSlotV(LLoadSlotV* load);
+ void visitLoadSlotT(LLoadSlotT* load);
+ void visitStoreSlotT(LStoreSlotT* load);
+
+ void visitLoadElementT(LLoadElementT* load);
+
+ void visitGuardShape(LGuardShape* guard);
+ void visitGuardObjectGroup(LGuardObjectGroup* guard);
+ void visitGuardClass(LGuardClass* guard);
+
+ void visitInterruptCheck(LInterruptCheck* lir);
+
+ void visitNegI(LNegI* lir);
+ void visitNegD(LNegD* lir);
+ void visitNegF(LNegF* lir);
+ void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins);
+ void visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins);
+ void visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir);
+ void visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir);
+ void visitWasmCall(LWasmCall* ins);
+ void visitWasmCallI64(LWasmCallI64* ins);
+ void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
+ void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
+ void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins);
+ void visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins);
+ void visitWasmStackArg(LWasmStackArg* ins);
+
+ void visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins);
+ void visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins);
+
+ void generateInvalidateEpilogue();
+
+ void setReturnDoubleRegs(LiveRegisterSet* regs);
+
+ protected:
+ void postWasmCall(LWasmCall* lir) {
+ MOZ_CRASH("postWasmCall");
+ }
+
+ void visitEffectiveAddress(LEffectiveAddress* ins);
+ void visitUDiv(LUDiv* ins);
+ void visitUMod(LUMod* ins);
+
+ public:
+ // Unimplemented SIMD instructions.
+ void visitSimdSplatX4(LSimdSplatX4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimd128Int(LSimd128Int* ins) { MOZ_CRASH("NYI"); }
+ void visitSimd128Float(LSimd128Float* ins) { MOZ_CRASH("NYI"); }
+ void visitSimdExtractElementI(LSimdExtractElementI* ins) { MOZ_CRASH("NYI"); }
+ void visitSimdExtractElementF(LSimdExtractElementF* ins) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryCompIx4(LSimdBinaryCompIx4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryCompFx4(LSimdBinaryCompFx4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryArithIx4(LSimdBinaryArithIx4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryArithFx4(LSimdBinaryArithFx4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryBitwise(LSimdBinaryBitwise* lir) { MOZ_CRASH("NYI"); }
+};
+
+typedef CodeGeneratorARM64 CodeGeneratorSpecific;
+
+// An out-of-line bailout thunk.
+class OutOfLineBailout : public OutOfLineCodeBase<CodeGeneratorARM64>
+{
+ protected: // Silence Clang warning.
+ LSnapshot* snapshot_;
+
+ public:
+ OutOfLineBailout(LSnapshot* snapshot)
+ : snapshot_(snapshot)
+ { }
+
+ void accept(CodeGeneratorARM64* codegen);
+
+ LSnapshot* snapshot() const {
+ return snapshot_;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm64_CodeGenerator_arm64_h */
diff --git a/js/src/jit/arm64/LIR-arm64.h b/js/src/jit/arm64/LIR-arm64.h
new file mode 100644
index 000000000..7cfb784c0
--- /dev/null
+++ b/js/src/jit/arm64/LIR-arm64.h
@@ -0,0 +1,395 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_LIR_arm64_h
+#define jit_arm64_LIR_arm64_h
+
+namespace js {
+namespace jit {
+
+class LUnboxBase : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LUnboxBase(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ static const size_t Input = 0;
+
+ MUnbox* mir() const {
+ return mir_->toUnbox();
+ }
+};
+
+class LUnbox : public LUnboxBase
+{
+ public:
+ LIR_HEADER(Unbox);
+
+ LUnbox(const LAllocation& input)
+ : LUnboxBase(input)
+ { }
+
+ const char* extraName() const {
+ return StringFromMIRType(mir()->type());
+ }
+};
+
+class LUnboxFloatingPoint : public LUnboxBase
+{
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ LUnboxFloatingPoint(const LAllocation& input, MIRType type)
+ : LUnboxBase(input),
+ type_(type)
+ { }
+
+ MIRType type() const {
+ return type_;
+ }
+ const char* extraName() const {
+ return StringFromMIRType(type_);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a double.
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmUint32ToDouble)
+
+ LWasmUint32ToDouble(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmUint32ToFloat32)
+
+ LWasmUint32ToFloat32(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+class LDivI : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(DivI);
+
+ LDivI(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ MDiv* mir() const {
+ return mir_->toDiv();
+ }
+};
+
+// LSoftDivI is a software divide for ARM cores that don't support a hardware
+// divide instruction.
+//
+// It is implemented as a proper C function so it trashes r0, r1, r2 and r3.
+// The call also trashes lr, and has the ability to trash ip. The function also
+// takes two arguments (dividend in r0, divisor in r1). The LInstruction gets
+// encoded such that the divisor and dividend are passed in their apropriate
+// registers and end their life at the start of the instruction by the use of
+// useFixedAtStart. The result is returned in r0 and the other three registers
+// that can be trashed are marked as temps. For the time being, the link
+// register is not marked as trashed because we never allocate to the link
+// register. The FP registers are not trashed.
+class LSoftDivI : public LBinaryMath<3>
+{
+ public:
+ LIR_HEADER(SoftDivI);
+
+ LSoftDivI(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2, const LDefinition& temp3) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+
+ MDiv* mir() const {
+ return mir_->toDiv();
+ }
+};
+
+class LDivPowTwoI : public LInstructionHelper<1, 1, 0>
+{
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(DivPowTwoI)
+
+ LDivPowTwoI(const LAllocation& lhs, int32_t shift)
+ : shift_(shift)
+ {
+ setOperand(0, lhs);
+ }
+
+ const LAllocation* numerator() {
+ return getOperand(0);
+ }
+
+ int32_t shift() {
+ return shift_;
+ }
+
+ MDiv* mir() const {
+ return mir_->toDiv();
+ }
+};
+
+class LModI : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(ModI);
+
+ LModI(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& callTemp)
+ {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, callTemp);
+ }
+
+ const LDefinition* callTemp() {
+ return getTemp(0);
+ }
+
+ MMod* mir() const {
+ return mir_->toMod();
+ }
+};
+
+class LSoftModI : public LBinaryMath<4>
+{
+ public:
+ LIR_HEADER(SoftModI);
+
+ LSoftModI(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2, const LDefinition& temp3,
+ const LDefinition& callTemp)
+ {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ setTemp(3, callTemp);
+ }
+
+ const LDefinition* callTemp() {
+ return getTemp(3);
+ }
+
+ MMod* mir() const {
+ return mir_->toMod();
+ }
+};
+
+class LModPowTwoI : public LInstructionHelper<1, 1, 0>
+{
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModPowTwoI);
+ int32_t shift()
+ {
+ return shift_;
+ }
+
+ LModPowTwoI(const LAllocation& lhs, int32_t shift)
+ : shift_(shift)
+ {
+ setOperand(0, lhs);
+ }
+
+ MMod* mir() const {
+ return mir_->toMod();
+ }
+};
+
+class LModMaskI : public LInstructionHelper<1, 1, 1>
+{
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModMaskI);
+
+ LModMaskI(const LAllocation& lhs, const LDefinition& temp1, int32_t shift)
+ : shift_(shift)
+ {
+ setOperand(0, lhs);
+ setTemp(0, temp1);
+ }
+
+ int32_t shift() const {
+ return shift_;
+ }
+
+ MMod* mir() const {
+ return mir_->toMod();
+ }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitch : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(TableSwitch);
+
+ LTableSwitch(const LAllocation& in, const LDefinition& inputCopy, MTableSwitch* ins) {
+ setOperand(0, in);
+ setTemp(0, inputCopy);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const {
+ return mir_->toTableSwitch();
+ }
+
+ const LAllocation* index() {
+ return getOperand(0);
+ }
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+ // This is added to share the same CodeGenerator prefixes.
+ const LDefinition* tempPointer() {
+ return nullptr;
+ }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 2>
+{
+ public:
+ LIR_HEADER(TableSwitchV);
+
+ LTableSwitchV(const LBoxAllocation& input, const LDefinition& inputCopy,
+ const LDefinition& floatCopy, MTableSwitch* ins)
+ {
+ setBoxOperand(InputValue, input);
+ setTemp(0, inputCopy);
+ setTemp(1, floatCopy);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const {
+ return mir_->toTableSwitch();
+ }
+
+ static const size_t InputValue = 0;
+
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+ const LDefinition* tempFloat() {
+ return getTemp(1);
+ }
+ const LDefinition* tempPointer() {
+ return nullptr;
+ }
+};
+
+class LGuardShape : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(GuardShape);
+
+ LGuardShape(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+ const MGuardShape* mir() const {
+ return mir_->toGuardShape();
+ }
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+};
+
+class LGuardObjectGroup : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(GuardObjectGroup);
+
+ LGuardObjectGroup(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+ const MGuardObjectGroup* mir() const {
+ return mir_->toGuardObjectGroup();
+ }
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+};
+
+class LMulI : public LBinaryMath<0>
+{
+ public:
+ LIR_HEADER(MulI);
+
+ MMul* mir() {
+ return mir_->toMul();
+ }
+};
+
+class LUDiv : public LBinaryMath<0>
+{
+ public:
+ LIR_HEADER(UDiv);
+
+ MDiv* mir() {
+ return mir_->toDiv();
+ }
+};
+
+class LUMod : public LBinaryMath<0>
+{
+ public:
+ LIR_HEADER(UMod);
+
+ MMod* mir() {
+ return mir_->toMod();
+ }
+};
+
+// This class performs a simple x86 'div', yielding either a quotient or remainder depending on
+// whether this instruction is defined to output eax (quotient) or edx (remainder).
+class LSoftUDivOrMod : public LBinaryMath<3>
+{
+ public:
+ LIR_HEADER(SoftUDivOrMod);
+
+ LSoftUDivOrMod(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp1,
+ const LDefinition& temp2, const LDefinition& temp3) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm64_LIR_arm64_h */
diff --git a/js/src/jit/arm64/LOpcodes-arm64.h b/js/src/jit/arm64/LOpcodes-arm64.h
new file mode 100644
index 000000000..5d9e05e30
--- /dev/null
+++ b/js/src/jit/arm64/LOpcodes-arm64.h
@@ -0,0 +1,20 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_LOpcodes_arm64_h
+#define jit_arm64_LOpcodes_arm64_h
+
+#include "jit/shared/LOpcodes-shared.h"
+
+#define LIR_CPU_OPCODE_LIST(_) \
+ _(SoftDivI) \
+ _(SoftModI) \
+ _(ModMaskI) \
+ _(UDiv) \
+ _(UMod) \
+ _(SoftUDivOrMod)
+
+#endif /* jit_arm64_LOpcodes_arm64_h */
diff --git a/js/src/jit/arm64/Lowering-arm64.cpp b/js/src/jit/arm64/Lowering-arm64.cpp
new file mode 100644
index 000000000..ca86b450d
--- /dev/null
+++ b/js/src/jit/arm64/Lowering-arm64.cpp
@@ -0,0 +1,369 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/arm64/Assembler-arm64.h"
+#include "jit/Lowering.h"
+#include "jit/MIR.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::FloorLog2;
+
+LBoxAllocation
+LIRGeneratorARM64::useBoxFixed(MDefinition* mir, Register reg1, Register, bool useAtStart)
+{
+ MOZ_CRASH("useBoxFixed");
+}
+
+LAllocation
+LIRGeneratorARM64::useByteOpRegister(MDefinition* mir)
+{
+ MOZ_CRASH("useByteOpRegister");
+}
+
+LAllocation
+LIRGeneratorARM64::useByteOpRegisterAtStart(MDefinition* mir)
+{
+ MOZ_CRASH("useByteOpRegister");
+}
+
+LAllocation
+LIRGeneratorARM64::useByteOpRegisterOrNonDoubleConstant(MDefinition* mir)
+{
+ MOZ_CRASH("useByteOpRegisterOrNonDoubleConstant");
+}
+
+void
+LIRGeneratorARM64::visitBox(MBox* box)
+{
+ MOZ_CRASH("visitBox");
+}
+
+void
+LIRGeneratorARM64::visitUnbox(MUnbox* unbox)
+{
+ MOZ_CRASH("visitUnbox");
+}
+
+void
+LIRGeneratorARM64::visitReturn(MReturn* ret)
+{
+ MOZ_CRASH("visitReturn");
+}
+
+// x = !y
+void
+LIRGeneratorARM64::lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, MDefinition* input)
+{
+ MOZ_CRASH("lowerForALU");
+}
+
+// z = x+y
+void
+LIRGeneratorARM64::lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs)
+{
+ MOZ_CRASH("lowerForALU");
+}
+
+void
+LIRGeneratorARM64::lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, MDefinition* input)
+{
+ MOZ_CRASH("lowerForFPU");
+}
+
+template <size_t Temps>
+void
+LIRGeneratorARM64::lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs)
+{
+ MOZ_CRASH("lowerForFPU");
+}
+
+template void LIRGeneratorARM64::lowerForFPU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorARM64::lowerForFPU(LInstructionHelper<1, 2, 1>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+void
+LIRGeneratorARM64::lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+LIRGeneratorARM64::lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ MOZ_CRASH("NYI");
+}
+
+template<size_t Temps>
+void
+LIRGeneratorARM64::lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ MOZ_CRASH("NYI");
+}
+
+template void LIRGeneratorARM64::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorARM64::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 1>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+void
+LIRGeneratorARM64::lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+ MDefinition* lhs, MDefinition* rhs)
+{
+ MOZ_CRASH("lowerForBitAndAndBranch");
+}
+
+void
+LIRGeneratorARM64::defineUntypedPhi(MPhi* phi, size_t lirIndex)
+{
+ MOZ_CRASH("defineUntypedPhi");
+}
+
+void
+LIRGeneratorARM64::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex)
+{
+ MOZ_CRASH("lowerUntypedPhiInput");
+}
+
+void
+LIRGeneratorARM64::lowerForShift(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ MOZ_CRASH("lowerForShift");
+}
+
+void
+LIRGeneratorARM64::lowerDivI(MDiv* div)
+{
+ MOZ_CRASH("lowerDivI");
+}
+
+void
+LIRGeneratorARM64::lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs)
+{
+ MOZ_CRASH("lowerMulI");
+}
+
+void
+LIRGeneratorARM64::lowerModI(MMod* mod)
+{
+ MOZ_CRASH("lowerModI");
+}
+
+void
+LIRGeneratorARM64::lowerDivI64(MDiv* div)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+LIRGeneratorARM64::lowerModI64(MMod* mod)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+LIRGeneratorARM64::visitPowHalf(MPowHalf* ins)
+{
+ MOZ_CRASH("visitPowHalf");
+}
+
+LTableSwitch*
+LIRGeneratorARM64::newLTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ MTableSwitch* tableswitch)
+{
+ MOZ_CRASH("newLTableSwitch");
+}
+
+LTableSwitchV*
+LIRGeneratorARM64::newLTableSwitchV(MTableSwitch* tableswitch)
+{
+ MOZ_CRASH("newLTableSwitchV");
+}
+
+void
+LIRGeneratorARM64::visitGuardShape(MGuardShape* ins)
+{
+ MOZ_CRASH("visitGuardShape");
+}
+
+void
+LIRGeneratorARM64::visitGuardObjectGroup(MGuardObjectGroup* ins)
+{
+ MOZ_CRASH("visitGuardObjectGroup");
+}
+
+void
+LIRGeneratorARM64::lowerUrshD(MUrsh* mir)
+{
+ MOZ_CRASH("lowerUrshD");
+}
+
+void
+LIRGeneratorARM64::visitAsmJSNeg(MAsmJSNeg* ins)
+{
+ MOZ_CRASH("visitAsmJSNeg");
+}
+
+void
+LIRGeneratorARM64::visitWasmSelect(MWasmSelect* ins)
+{
+ MOZ_CRASH("visitWasmSelect");
+}
+
+void
+LIRGeneratorARM64::lowerUDiv(MDiv* div)
+{
+ MOZ_CRASH("lowerUDiv");
+}
+
+void
+LIRGeneratorARM64::lowerUMod(MMod* mod)
+{
+ MOZ_CRASH("lowerUMod");
+}
+
+void
+LIRGeneratorARM64::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins)
+{
+ MOZ_CRASH("visitWasmUnsignedToDouble");
+}
+
+void
+LIRGeneratorARM64::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins)
+{
+ MOZ_CRASH("visitWasmUnsignedToFloat32");
+}
+
+void
+LIRGeneratorARM64::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
+{
+ MOZ_CRASH("visitAsmJSLoadHeap");
+}
+
+void
+LIRGeneratorARM64::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
+{
+ MOZ_CRASH("visitAsmJSStoreHeap");
+}
+
+void
+LIRGeneratorARM64::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
+{
+ MOZ_CRASH("visitAsmJSCompareExchangeHeap");
+}
+
+void
+LIRGeneratorARM64::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
+{
+ MOZ_CRASH("visitAsmJSAtomicExchangeHeap");
+}
+
+void
+LIRGeneratorARM64::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
+{
+ MOZ_CRASH("visitAsmJSAtomicBinopHeap");
+}
+
+void
+LIRGeneratorARM64::lowerTruncateDToInt32(MTruncateToInt32* ins)
+{
+ MOZ_CRASH("lowerTruncateDToInt32");
+}
+
+void
+LIRGeneratorARM64::lowerTruncateFToInt32(MTruncateToInt32* ins)
+{
+ MOZ_CRASH("lowerTruncateFToInt32");
+}
+
+void
+LIRGeneratorARM64::visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+LIRGeneratorARM64::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+LIRGeneratorARM64::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+LIRGeneratorARM64::visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+LIRGeneratorARM64::visitSubstr(MSubstr* ins)
+{
+ MOZ_CRASH("visitSubstr");
+}
+
+void
+LIRGeneratorARM64::visitRandom(MRandom* ins)
+{
+ LRandom *lir = new(alloc()) LRandom(temp(),
+ temp(),
+ temp());
+ defineFixed(lir, ins, LFloatReg(ReturnDoubleReg));
+}
+
+void
+LIRGeneratorARM64::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins)
+{
+ MOZ_CRASH("NY");
+}
+
+void
+LIRGeneratorARM64::visitWasmLoad(MWasmLoad* ins)
+{
+ MOZ_CRASH("NY");
+}
+
+void
+LIRGeneratorARM64::visitWasmStore(MWasmStore* ins)
+{
+ MOZ_CRASH("NY");
+}
+
+void
+LIRGeneratorARM64::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins)
+{
+ MOZ_CRASH("NY");
+}
+
+void
+LIRGeneratorARM64::visitCopySign(MCopySign* ins)
+{
+ MOZ_CRASH("NY");
+}
+
+void
+LIRGeneratorARM64::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins)
+{
+ MOZ_CRASH("NYI");
+}
diff --git a/js/src/jit/arm64/Lowering-arm64.h b/js/src/jit/arm64/Lowering-arm64.h
new file mode 100644
index 000000000..f23627d10
--- /dev/null
+++ b/js/src/jit/arm64/Lowering-arm64.h
@@ -0,0 +1,132 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_Lowering_arm64_h
+#define jit_arm64_Lowering_arm64_h
+
+#include "jit/shared/Lowering-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorARM64 : public LIRGeneratorShared
+{
+ public:
+ LIRGeneratorARM64(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorShared(gen, graph, lirGraph)
+ { }
+
+ protected:
+ // Returns a box allocation. reg2 is ignored on 64-bit platforms.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2,
+ bool useAtStart = false);
+
+ LAllocation useByteOpRegister(MDefinition* mir);
+ LAllocation useByteOpRegisterAtStart(MDefinition* mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
+
+ inline LDefinition tempToUnbox() {
+ return temp();
+ }
+
+ bool needTempForPostBarrier() { return true; }
+
+ // ARM64 has a scratch register, so no need for another temp for dispatch ICs.
+ LDefinition tempForDispatchCache(MIRType outputType = MIRType::None) {
+ return LDefinition::BogusTemp();
+ }
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
+ void defineUntypedPhi(MPhi* phi, size_t lirIndex);
+ void lowerInt64PhiInput(MPhi*, uint32_t, LBlock*, size_t) { MOZ_CRASH("NYI"); }
+ void defineInt64Phi(MPhi*, size_t) { MOZ_CRASH("NYI"); }
+ void lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs);
+ void lowerUrshD(MUrsh* mir);
+
+ void lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, MDefinition* input);
+ void lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs);
+ template<size_t Temps>
+ void lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, MDefinition* input);
+
+ template <size_t Temps>
+ void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir,
+ MDefinition* lhs, MDefinition* rhs)
+ {
+ return lowerForFPU(ins, mir, lhs, rhs);
+ }
+
+ void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir,
+ MDefinition* lhs, MDefinition* rhs)
+ {
+ return lowerForFPU(ins, mir, lhs, rhs);
+ }
+
+ void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerTruncateDToInt32(MTruncateToInt32* ins);
+ void lowerTruncateFToInt32(MTruncateToInt32* ins);
+ void lowerDivI(MDiv* div);
+ void lowerModI(MMod* mod);
+ void lowerDivI64(MDiv* div);
+ void lowerModI64(MMod* mod);
+ void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
+ void lowerUDiv(MDiv* div);
+ void lowerUMod(MMod* mod);
+ void visitPowHalf(MPowHalf* ins);
+ void visitAsmJSNeg(MAsmJSNeg* ins);
+ void visitWasmSelect(MWasmSelect* ins);
+
+ LTableSwitchV* newLTableSwitchV(MTableSwitch* ins);
+ LTableSwitch* newLTableSwitch(const LAllocation& in,
+ const LDefinition& inputCopy,
+ MTableSwitch* ins);
+
+ public:
+ void visitBox(MBox* box);
+ void visitUnbox(MUnbox* unbox);
+ void visitReturn(MReturn* ret);
+ void lowerPhi(MPhi* phi);
+ void visitGuardShape(MGuardShape* ins);
+ void visitGuardObjectGroup(MGuardObjectGroup* ins);
+ void visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins);
+ void visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins);
+ void visitAsmJSLoadHeap(MAsmJSLoadHeap* ins);
+ void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins);
+ void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins);
+ void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins);
+ void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins);
+ void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins);
+ void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins);
+ void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins);
+ void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins);
+ void visitSubstr(MSubstr* ins);
+ void visitRandom(MRandom* ins);
+ void visitWasmTruncateToInt64(MWasmTruncateToInt64* ins);
+ void visitWasmLoad(MWasmLoad* ins);
+ void visitWasmStore(MWasmStore* ins);
+ void visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins);
+ void visitCopySign(MCopySign* ins);
+ void visitExtendInt32ToInt64(MExtendInt32ToInt64* ins);
+};
+
+typedef LIRGeneratorARM64 LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm64_Lowering_arm64_h */
diff --git a/js/src/jit/arm64/MacroAssembler-arm64-inl.h b/js/src/jit/arm64/MacroAssembler-arm64-inl.h
new file mode 100644
index 000000000..4300d90eb
--- /dev/null
+++ b/js/src/jit/arm64/MacroAssembler-arm64-inl.h
@@ -0,0 +1,1793 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_MacroAssembler_arm64_inl_h
+#define jit_arm64_MacroAssembler_arm64_inl_h
+
+#include "jit/arm64/MacroAssembler-arm64.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void
+MacroAssembler::move64(Register64 src, Register64 dest)
+{
+ movePtr(src.reg, dest.reg);
+}
+
+void
+MacroAssembler::move64(Imm64 imm, Register64 dest)
+{
+ movePtr(ImmWord(imm.value), dest.reg);
+}
+
+void
+MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest)
+{
+ MOZ_CRASH("NYI: moveFloat32ToGPR");
+}
+
+void
+MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest)
+{
+ MOZ_CRASH("NYI: moveGPRToFloat32");
+}
+
+void
+MacroAssembler::move8SignExtend(Register src, Register dest)
+{
+ MOZ_CRASH("NYI: move8SignExtend");
+}
+
+void
+MacroAssembler::move16SignExtend(Register src, Register dest)
+{
+ MOZ_CRASH("NYI: move16SignExtend");
+}
+
+// ===============================================================
+// Logical instructions
+
+void
+MacroAssembler::not32(Register reg)
+{
+ Orn(ARMRegister(reg, 32), vixl::wzr, ARMRegister(reg, 32));
+}
+
+void
+MacroAssembler::and32(Register src, Register dest)
+{
+ And(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32)));
+}
+
+void
+MacroAssembler::and32(Imm32 imm, Register dest)
+{
+ And(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
+}
+
+void
+MacroAssembler::and32(Imm32 imm, Register src, Register dest)
+{
+ And(ARMRegister(dest, 32), ARMRegister(src, 32), Operand(imm.value));
+}
+
+void
+MacroAssembler::and32(Imm32 imm, const Address& dest)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != dest.base);
+ load32(dest, scratch32.asUnsized());
+ And(scratch32, scratch32, Operand(imm.value));
+ store32(scratch32.asUnsized(), dest);
+}
+
+void
+MacroAssembler::and32(const Address& src, Register dest)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != src.base);
+ load32(src, scratch32.asUnsized());
+ And(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(scratch32));
+}
+
+void
+MacroAssembler::andPtr(Register src, Register dest)
+{
+ And(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(ARMRegister(src, 64)));
+}
+
+void
+MacroAssembler::andPtr(Imm32 imm, Register dest)
+{
+ And(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
+}
+
+void
+MacroAssembler::and64(Imm64 imm, Register64 dest)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ mov(ImmWord(imm.value), scratch);
+ andPtr(scratch, dest.reg);
+}
+
+void
+MacroAssembler::and64(Register64 src, Register64 dest)
+{
+ MOZ_CRASH("NYI: and64");
+}
+
+void
+MacroAssembler::or64(Imm64 imm, Register64 dest)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ mov(ImmWord(imm.value), scratch);
+ orPtr(scratch, dest.reg);
+}
+
+void
+MacroAssembler::xor64(Imm64 imm, Register64 dest)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ mov(ImmWord(imm.value), scratch);
+ xorPtr(scratch, dest.reg);
+}
+
+void
+MacroAssembler::or32(Imm32 imm, Register dest)
+{
+ Orr(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
+}
+
+void
+MacroAssembler::or32(Register src, Register dest)
+{
+ Orr(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32)));
+}
+
+void
+MacroAssembler::or32(Imm32 imm, const Address& dest)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != dest.base);
+ load32(dest, scratch32.asUnsized());
+ Orr(scratch32, scratch32, Operand(imm.value));
+ store32(scratch32.asUnsized(), dest);
+}
+
+void
+MacroAssembler::orPtr(Register src, Register dest)
+{
+ Orr(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(ARMRegister(src, 64)));
+}
+
+void
+MacroAssembler::orPtr(Imm32 imm, Register dest)
+{
+ Orr(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
+}
+
+void
+MacroAssembler::or64(Register64 src, Register64 dest)
+{
+ orPtr(src.reg, dest.reg);
+}
+
+void
+MacroAssembler::xor64(Register64 src, Register64 dest)
+{
+ xorPtr(src.reg, dest.reg);
+}
+
+void
+MacroAssembler::xor32(Register src, Register dest)
+{
+ Eor(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32)));
+}
+
+void
+MacroAssembler::xor32(Imm32 imm, Register dest)
+{
+ Eor(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
+}
+
+void
+MacroAssembler::xorPtr(Register src, Register dest)
+{
+ Eor(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(ARMRegister(src, 64)));
+}
+
+void
+MacroAssembler::xorPtr(Imm32 imm, Register dest)
+{
+ Eor(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void
+MacroAssembler::add32(Register src, Register dest)
+{
+ Add(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32)));
+}
+
+void
+MacroAssembler::add32(Imm32 imm, Register dest)
+{
+ Add(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
+}
+
+void
+MacroAssembler::add32(Imm32 imm, const Address& dest)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != dest.base);
+
+ Ldr(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset));
+ Add(scratch32, scratch32, Operand(imm.value));
+ Str(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset));
+}
+
+void
+MacroAssembler::addPtr(Register src, Register dest)
+{
+ addPtr(src, dest, dest);
+}
+
+void
+MacroAssembler::addPtr(Register src1, Register src2, Register dest)
+{
+ Add(ARMRegister(dest, 64), ARMRegister(src1, 64), Operand(ARMRegister(src2, 64)));
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, Register dest)
+{
+ addPtr(imm, dest, dest);
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, Register src, Register dest)
+{
+ Add(ARMRegister(dest, 64), ARMRegister(src, 64), Operand(imm.value));
+}
+
+void
+MacroAssembler::addPtr(ImmWord imm, Register dest)
+{
+ Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, const Address& dest)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != dest.base);
+
+ Ldr(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset));
+ Add(scratch64, scratch64, Operand(imm.value));
+ Str(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset));
+}
+
+void
+MacroAssembler::addPtr(const Address& src, Register dest)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != src.base);
+
+ Ldr(scratch64, MemOperand(ARMRegister(src.base, 64), src.offset));
+ Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64));
+}
+
+void
+MacroAssembler::add64(Register64 src, Register64 dest)
+{
+ addPtr(src.reg, dest.reg);
+}
+
+void
+MacroAssembler::add64(Imm32 imm, Register64 dest)
+{
+ Add(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
+}
+
+void
+MacroAssembler::addDouble(FloatRegister src, FloatRegister dest)
+{
+ fadd(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
+}
+
+void
+MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest)
+{
+ fadd(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
+}
+
+void
+MacroAssembler::sub32(Imm32 imm, Register dest)
+{
+ Sub(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
+}
+
+void
+MacroAssembler::sub32(Register src, Register dest)
+{
+ Sub(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32)));
+}
+
+void
+MacroAssembler::sub32(const Address& src, Register dest)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != src.base);
+ load32(src, scratch32.asUnsized());
+ Sub(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(scratch32));
+}
+
+void
+MacroAssembler::subPtr(Register src, Register dest)
+{
+ Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(ARMRegister(src, 64)));
+}
+
+void
+MacroAssembler::subPtr(Register src, const Address& dest)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != dest.base);
+
+ Ldr(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset));
+ Sub(scratch64, scratch64, Operand(ARMRegister(src, 64)));
+ Str(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset));
+}
+
+void
+MacroAssembler::subPtr(Imm32 imm, Register dest)
+{
+ Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
+}
+
+void
+MacroAssembler::subPtr(const Address& addr, Register dest)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != addr.base);
+
+ Ldr(scratch64, MemOperand(ARMRegister(addr.base, 64), addr.offset));
+ Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64));
+}
+
+void
+MacroAssembler::sub64(Register64 src, Register64 dest)
+{
+ MOZ_CRASH("NYI: sub64");
+}
+
+void
+MacroAssembler::subDouble(FloatRegister src, FloatRegister dest)
+{
+ fsub(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
+}
+
+void
+MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest)
+{
+ fsub(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
+}
+
+void
+MacroAssembler::mul32(Register rhs, Register srcDest)
+{
+ MOZ_CRASH("NYI - mul32");
+}
+
+void
+MacroAssembler::mul32(Register src1, Register src2, Register dest, Label* onOver, Label* onZero)
+{
+ Smull(ARMRegister(dest, 64), ARMRegister(src1, 32), ARMRegister(src2, 32));
+ if (onOver) {
+ Cmp(ARMRegister(dest, 64), Operand(ARMRegister(dest, 32), vixl::SXTW));
+ B(onOver, NotEqual);
+ }
+ if (onZero)
+ Cbz(ARMRegister(dest, 32), onZero);
+
+ // Clear upper 32 bits.
+ Mov(ARMRegister(dest, 32), ARMRegister(dest, 32));
+}
+
+void
+MacroAssembler::mul64(Imm64 imm, const Register64& dest)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(dest.reg != scratch64.asUnsized());
+ mov(ImmWord(imm.value), scratch64.asUnsized());
+ Mul(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), scratch64);
+}
+
+void
+MacroAssembler::mul64(const Register64& src, const Register64& dest, const Register temp)
+{
+ MOZ_CRASH("NYI: mul64");
+}
+
+void
+MacroAssembler::mulBy3(Register src, Register dest)
+{
+ ARMRegister xdest(dest, 64);
+ ARMRegister xsrc(src, 64);
+ Add(xdest, xsrc, Operand(xsrc, vixl::LSL, 1));
+}
+
+void
+MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest)
+{
+ fmul(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
+}
+
+void
+MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest)
+{
+ fmul(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
+}
+
+void
+MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(temp != scratch);
+ movePtr(imm, scratch);
+ const ARMFPRegister scratchDouble = temps.AcquireD();
+ Ldr(scratchDouble, MemOperand(Address(scratch, 0)));
+ fmul(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), scratchDouble);
+}
+
+void
+MacroAssembler::quotient32(Register rhs, Register srcDest, bool isUnsigned)
+{
+ MOZ_CRASH("NYI - quotient32");
+}
+
+void
+MacroAssembler::remainder32(Register rhs, Register srcDest, bool isUnsigned)
+{
+ MOZ_CRASH("NYI - remainder32");
+}
+
+void
+MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest)
+{
+ fdiv(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
+}
+
+void
+MacroAssembler::divDouble(FloatRegister src, FloatRegister dest)
+{
+ fdiv(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
+}
+
+void
+MacroAssembler::inc64(AbsoluteAddress dest)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratchAddr64 = temps.AcquireX();
+ const ARMRegister scratch64 = temps.AcquireX();
+
+ Mov(scratchAddr64, uint64_t(dest.addr));
+ Ldr(scratch64, MemOperand(scratchAddr64, 0));
+ Add(scratch64, scratch64, Operand(1));
+ Str(scratch64, MemOperand(scratchAddr64, 0));
+}
+
+void
+MacroAssembler::neg32(Register reg)
+{
+ Negs(ARMRegister(reg, 32), Operand(ARMRegister(reg, 32)));
+}
+
+void
+MacroAssembler::negateFloat(FloatRegister reg)
+{
+ fneg(ARMFPRegister(reg, 32), ARMFPRegister(reg, 32));
+}
+
+void
+MacroAssembler::negateDouble(FloatRegister reg)
+{
+ fneg(ARMFPRegister(reg, 64), ARMFPRegister(reg, 64));
+}
+
+void
+MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest)
+{
+ MOZ_CRASH("NYI - absFloat32");
+}
+
+void
+MacroAssembler::absDouble(FloatRegister src, FloatRegister dest)
+{
+ MOZ_CRASH("NYI - absDouble");
+}
+
+void
+MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest)
+{
+ MOZ_CRASH("NYI - sqrtFloat32");
+}
+
+void
+MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest)
+{
+ MOZ_CRASH("NYI - sqrtDouble");
+}
+
+void
+MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ MOZ_CRASH("NYI - minFloat32");
+}
+
+void
+MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ MOZ_CRASH("NYI - minDouble");
+}
+
+void
+MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ MOZ_CRASH("NYI - maxFloat32");
+}
+
+void
+MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ MOZ_CRASH("NYI - maxDouble");
+}
+
+// ===============================================================
+// Shift functions
+
+void
+MacroAssembler::lshiftPtr(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ Lsl(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value);
+}
+
+void
+MacroAssembler::lshift64(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ lshiftPtr(imm, dest.reg);
+}
+
+void
+MacroAssembler::lshift64(Register shift, Register64 srcDest)
+{
+ MOZ_CRASH("NYI: lshift64");
+}
+
+void
+MacroAssembler::lshift32(Register shift, Register dest)
+{
+ Lsl(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32));
+}
+
+void
+MacroAssembler::lshift32(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ Lsl(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value);
+}
+
+void
+MacroAssembler::rshiftPtr(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ Lsr(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value);
+}
+
+void
+MacroAssembler::rshiftPtr(Imm32 imm, Register src, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ Lsr(ARMRegister(dest, 64), ARMRegister(src, 64), imm.value);
+}
+
+void
+MacroAssembler::rshift32(Register shift, Register dest)
+{
+ Lsr(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32));
+}
+
+void
+MacroAssembler::rshift32(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ Lsr(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value);
+}
+
+void
+MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ Asr(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value);
+}
+
+void
+MacroAssembler::rshift32Arithmetic(Register shift, Register dest)
+{
+ Asr(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32));
+}
+
+void
+MacroAssembler::rshift32Arithmetic(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ Asr(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value);
+}
+
+void
+MacroAssembler::rshift64(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ rshiftPtr(imm, dest.reg);
+}
+
+void
+MacroAssembler::rshift64(Register shift, Register64 srcDest)
+{
+ MOZ_CRASH("NYI: rshift64");
+}
+
+void
+MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest)
+{
+ MOZ_CRASH("NYI: rshift64Arithmetic");
+}
+
+void
+MacroAssembler::rshift64Arithmetic(Register shift, Register64 srcDest)
+{
+ MOZ_CRASH("NYI: rshift64Arithmetic");
+}
+
+// ===============================================================
+// Condition functions
+
+template <typename T1, typename T2>
+void
+MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest)
+{
+ cmp32(lhs, rhs);
+ emitSet(cond, dest);
+}
+
+template <typename T1, typename T2>
+void
+MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest)
+{
+ cmpPtr(lhs, rhs);
+ emitSet(cond, dest);
+}
+
+// ===============================================================
+// Rotation functions
+
+void
+MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest)
+{
+ MOZ_CRASH("NYI: rotateLeft by immediate");
+}
+
+void
+MacroAssembler::rotateLeft(Register count, Register input, Register dest)
+{
+ MOZ_CRASH("NYI: rotateLeft by register");
+}
+
+void
+MacroAssembler::rotateRight(Imm32 count, Register input, Register dest)
+{
+ MOZ_CRASH("NYI: rotateRight by immediate");
+}
+
+void
+MacroAssembler::rotateRight(Register count, Register input, Register dest)
+{
+ MOZ_CRASH("NYI: rotateRight by register");
+}
+
+void
+MacroAssembler::rotateLeft64(Register count, Register64 input, Register64 dest, Register temp)
+{
+ MOZ_CRASH("NYI: rotateLeft64");
+}
+
+void
+MacroAssembler::rotateRight64(Register count, Register64 input, Register64 dest, Register temp)
+{
+ MOZ_CRASH("NYI: rotateRight64");
+}
+
+// ===============================================================
+// Bit counting functions
+
+void
+MacroAssembler::clz32(Register src, Register dest, bool knownNotZero)
+{
+ MOZ_CRASH("NYI: clz32");
+}
+
+void
+MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero)
+{
+ MOZ_CRASH("NYI: ctz32");
+}
+
+void
+MacroAssembler::clz64(Register64 src, Register dest)
+{
+ MOZ_CRASH("NYI: clz64");
+}
+
+void
+MacroAssembler::ctz64(Register64 src, Register dest)
+{
+ MOZ_CRASH("NYI: ctz64");
+}
+
+void
+MacroAssembler::popcnt32(Register src, Register dest, Register temp)
+{
+ MOZ_CRASH("NYI: popcnt32");
+}
+
+void
+MacroAssembler::popcnt64(Register64 src, Register64 dest, Register temp)
+{
+ MOZ_CRASH("NYI: popcnt64");
+}
+
+// ===============================================================
+// Branch functions
+
+template <class L>
+void
+MacroAssembler::branch32(Condition cond, Register lhs, Register rhs, L label)
+{
+ cmp32(lhs, rhs);
+ B(label, cond);
+}
+
+template <class L>
+void
+MacroAssembler::branch32(Condition cond, Register lhs, Imm32 imm, L label)
+{
+ cmp32(lhs, imm);
+ B(label, cond);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs, Label* label)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+ MOZ_ASSERT(scratch != rhs);
+ load32(lhs, scratch);
+ branch32(cond, scratch, rhs, label);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 imm, Label* label)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+ load32(lhs, scratch);
+ branch32(cond, scratch, imm, label);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ movePtr(ImmPtr(lhs.addr), scratch);
+ branch32(cond, Address(scratch, 0), rhs, label);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ movePtr(ImmPtr(lhs.addr), scratch);
+ branch32(cond, Address(scratch, 0), rhs, label);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs, Label* label)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != lhs.base);
+ MOZ_ASSERT(scratch32.asUnsized() != lhs.index);
+ doBaseIndex(scratch32, lhs, vixl::LDR_w);
+ branch32(cond, scratch32.asUnsized(), rhs, label);
+}
+
+void
+MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, Label* label)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ movePtr(lhs, scratch);
+ branch32(cond, Address(scratch, 0), rhs, label);
+}
+
+void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val, Label* success, Label* fail)
+{
+ MOZ_CRASH("NYI: branch64 reg-imm");
+}
+
+void
+MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val, Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, ImmWord(val.value), label);
+}
+
+void
+MacroAssembler::branch64(Condition cond, const Address& lhs, const Address& rhs, Register scratch,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ loadPtr(rhs, scratch);
+ branchPtr(cond, lhs, scratch, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs, L label)
+{
+ Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64));
+ B(label, cond);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label)
+{
+ cmpPtr(lhs, rhs);
+ B(label, cond);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs, Label* label)
+{
+ cmpPtr(lhs, rhs);
+ B(label, cond);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs, Label* label)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs);
+ movePtr(rhs, scratch);
+ branchPtr(cond, lhs, scratch, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs, Label* label)
+{
+ cmpPtr(lhs, rhs);
+ B(label, cond);
+}
+
+template <class L>
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs, L label)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+ MOZ_ASSERT(scratch != rhs);
+ loadPtr(lhs, scratch);
+ branchPtr(cond, scratch, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs, Label* label)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+ loadPtr(lhs, scratch);
+ branchPtr(cond, scratch, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs, Label* label)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch1_64 = temps.AcquireX();
+ const ARMRegister scratch2_64 = temps.AcquireX();
+ MOZ_ASSERT(scratch1_64.asUnsized() != lhs.base);
+ MOZ_ASSERT(scratch2_64.asUnsized() != lhs.base);
+
+ movePtr(rhs, scratch1_64.asUnsized());
+ loadPtr(lhs, scratch2_64.asUnsized());
+ branchPtr(cond, scratch2_64.asUnsized(), scratch1_64.asUnsized(), label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs, Label* label)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+ loadPtr(lhs, scratch);
+ branchPtr(cond, scratch, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != rhs);
+ loadPtr(lhs, scratch);
+ branchPtr(cond, scratch, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, ImmWord rhs, Label* label)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ loadPtr(lhs, scratch);
+ branchPtr(cond, scratch, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs, Label* label)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != rhs);
+ loadPtr(lhs, scratch);
+ branchPtr(cond, scratch, rhs, label);
+}
+
+template <typename T>
+CodeOffsetJump
+MacroAssembler::branchPtrWithPatch(Condition cond, Register lhs, T rhs, RepatchLabel* label)
+{
+ cmpPtr(lhs, rhs);
+ return jumpWithPatch(label, cond);
+}
+
+template <typename T>
+CodeOffsetJump
+MacroAssembler::branchPtrWithPatch(Condition cond, Address lhs, T rhs, RepatchLabel* label)
+{
+ // The scratch register is unused after the condition codes are set.
+ {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+ loadPtr(lhs, scratch);
+ cmpPtr(scratch, rhs);
+ }
+ return jumpWithPatch(label, cond);
+}
+
+void
+MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs, Register rhs, Label* label)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ if (rhs != scratch)
+ movePtr(rhs, scratch);
+ // Instead of unboxing lhs, box rhs and do direct comparison with lhs.
+ rshiftPtr(Imm32(1), scratch);
+ branchPtr(cond, lhs, scratch, label);
+}
+
+void
+MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
+ Label* label)
+{
+ compareFloat(cond, lhs, rhs);
+ switch (cond) {
+ case DoubleNotEqual: {
+ Label unordered;
+ // not equal *and* ordered
+ branch(Overflow, &unordered);
+ branch(NotEqual, label);
+ bind(&unordered);
+ break;
+ }
+ case DoubleEqualOrUnordered:
+ branch(Overflow, label);
+ branch(Equal, label);
+ break;
+ default:
+ branch(Condition(cond), label);
+ }
+}
+
+void
+MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src, Register dest, Label* fail)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+
+ ARMFPRegister src32(src, 32);
+ ARMRegister dest64(dest, 64);
+
+ MOZ_ASSERT(!scratch64.Is(dest64));
+
+ Fcvtzs(dest64, src32);
+ Add(scratch64, dest64, Operand(0x7fffffffffffffff));
+ Cmn(scratch64, 3);
+ B(fail, Assembler::Above);
+ And(dest64, dest64, Operand(0xffffffff));
+}
+
+void
+MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src, Register dest, Label* fail)
+{
+ convertFloat32ToInt32(src, dest, fail);
+}
+
+void
+MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
+ Label* label)
+{
+ compareDouble(cond, lhs, rhs);
+ switch (cond) {
+ case DoubleNotEqual: {
+ Label unordered;
+ // not equal *and* ordered
+ branch(Overflow, &unordered);
+ branch(NotEqual, label);
+ bind(&unordered);
+ break;
+ }
+ case DoubleEqualOrUnordered:
+ branch(Overflow, label);
+ branch(Equal, label);
+ break;
+ default:
+ branch(Condition(cond), label);
+ }
+}
+
+void
+MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src, Register dest, Label* fail)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+
+ // An out of range integer will be saturated to the destination size.
+ ARMFPRegister src64(src, 64);
+ ARMRegister dest64(dest, 64);
+
+ MOZ_ASSERT(!scratch64.Is(dest64));
+
+ Fcvtzs(dest64, src64);
+ Add(scratch64, dest64, Operand(0x7fffffffffffffff));
+ Cmn(scratch64, 3);
+ B(fail, Assembler::Above);
+ And(dest64, dest64, Operand(0xffffffff));
+}
+
+void
+MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src, Register dest, Label* fail)
+{
+ convertDoubleToInt32(src, dest, fail);
+}
+
+template <typename T, typename L>
+void
+MacroAssembler::branchAdd32(Condition cond, T src, Register dest, L label)
+{
+ adds32(src, dest);
+ B(label, cond);
+}
+
+template <typename T>
+void
+MacroAssembler::branchSub32(Condition cond, T src, Register dest, Label* label)
+{
+ subs32(src, dest);
+ branch(cond, label);
+}
+
+void
+MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label)
+{
+ Subs(ARMRegister(lhs, 64), ARMRegister(lhs, 64), Operand(rhs.value));
+ B(cond, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs, L label)
+{
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
+ // x86 prefers |test foo, foo| to |cmp foo, #0|.
+ // Convert the former to the latter for ARM.
+ if (lhs == rhs && (cond == Zero || cond == NonZero))
+ cmp32(lhs, Imm32(0));
+ else
+ test32(lhs, rhs);
+ B(label, cond);
+}
+
+template <class L>
+void
+MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs, L label)
+{
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
+ test32(lhs, rhs);
+ B(label, cond);
+}
+
+void
+MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs, Label* label)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+ load32(lhs, scratch);
+ branchTest32(cond, scratch, rhs, label);
+}
+
+void
+MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ load32(lhs, scratch);
+ branchTest32(cond, scratch, rhs, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs, L label)
+{
+ Tst(ARMRegister(lhs, 64), Operand(ARMRegister(rhs, 64)));
+ B(label, cond);
+}
+
+void
+MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs, Label* label)
+{
+ Tst(ARMRegister(lhs, 64), Operand(rhs.value));
+ B(label, cond);
+}
+
+void
+MacroAssembler::branchTestPtr(Condition cond, const Address& lhs, Imm32 rhs, Label* label)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+ loadPtr(lhs, scratch);
+ branchTestPtr(cond, scratch, rhs, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTest64(Condition cond, Register64 lhs, Register64 rhs, Register temp,
+ L label)
+{
+ branchTestPtr(cond, lhs.reg, rhs.reg, label);
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, Register tag, Label* label)
+{
+ branchTestUndefinedImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, const Address& address, Label* label)
+{
+ branchTestUndefinedImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestUndefinedImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestUndefinedImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestUndefinedImpl(Condition cond, const T& t, Label* label)
+{
+ Condition c = testUndefined(cond, t);
+ B(label, c);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, Register tag, Label* label)
+{
+ branchTestInt32Impl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, const Address& address, Label* label)
+{
+ branchTestInt32Impl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestInt32Impl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestInt32Impl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestInt32Impl(Condition cond, const T& t, Label* label)
+{
+ Condition c = testInt32(cond, t);
+ B(label, c);
+}
+
+void
+MacroAssembler::branchTestInt32Truthy(bool truthy, const ValueOperand& value, Label* label)
+{
+ Condition c = testInt32Truthy(truthy, value);
+ B(label, c);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, Register tag, Label* label)
+{
+ branchTestDoubleImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, const Address& address, Label* label)
+{
+ branchTestDoubleImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestDoubleImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestDoubleImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestDoubleImpl(Condition cond, const T& t, Label* label)
+{
+ Condition c = testDouble(cond, t);
+ B(label, c);
+}
+
+void
+MacroAssembler::branchTestDoubleTruthy(bool truthy, FloatRegister reg, Label* label)
+{
+ Fcmp(ARMFPRegister(reg, 64), 0.0);
+ if (!truthy) {
+ // falsy values are zero, and NaN.
+ branch(Zero, label);
+ branch(Overflow, label);
+ } else {
+ // truthy values are non-zero and not nan.
+ // If it is overflow
+ Label onFalse;
+ branch(Zero, &onFalse);
+ branch(Overflow, &onFalse);
+ B(label);
+ bind(&onFalse);
+ }
+}
+
+void
+MacroAssembler::branchTestNumber(Condition cond, Register tag, Label* label)
+{
+ branchTestNumberImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestNumberImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestNumberImpl(Condition cond, const T& t, Label* label)
+{
+ Condition c = testNumber(cond, t);
+ B(label, c);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, Register tag, Label* label)
+{
+ branchTestBooleanImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, const Address& address, Label* label)
+{
+ branchTestBooleanImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestBooleanImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestBooleanImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestBooleanImpl(Condition cond, const T& tag, Label* label)
+{
+ Condition c = testBoolean(cond, tag);
+ B(label, c);
+}
+
+void
+MacroAssembler::branchTestBooleanTruthy(bool truthy, const ValueOperand& value, Label* label)
+{
+ Condition c = testBooleanTruthy(truthy, value);
+ B(label, c);
+}
+
+void
+MacroAssembler::branchTestString(Condition cond, Register tag, Label* label)
+{
+ branchTestStringImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestString(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestStringImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestString(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestStringImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestStringImpl(Condition cond, const T& t, Label* label)
+{
+ Condition c = testString(cond, t);
+ B(label, c);
+}
+
+void
+MacroAssembler::branchTestStringTruthy(bool truthy, const ValueOperand& value, Label* label)
+{
+ Condition c = testStringTruthy(truthy, value);
+ B(label, c);
+}
+
+void
+MacroAssembler::branchTestSymbol(Condition cond, Register tag, Label* label)
+{
+ branchTestSymbolImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestSymbolImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestSymbolImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestSymbolImpl(Condition cond, const T& t, Label* label)
+{
+ Condition c = testSymbol(cond, t);
+ B(label, c);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, Register tag, Label* label)
+{
+ branchTestNullImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, const Address& address, Label* label)
+{
+ branchTestNullImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestNullImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestNullImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestNullImpl(Condition cond, const T& t, Label* label)
+{
+ Condition c = testNull(cond, t);
+ B(label, c);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, Register tag, Label* label)
+{
+ branchTestObjectImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, const Address& address, Label* label)
+{
+ branchTestObjectImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestObjectImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestObjectImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestObjectImpl(Condition cond, const T& t, Label* label)
+{
+ Condition c = testObject(cond, t);
+ B(label, c);
+}
+
+void
+MacroAssembler::branchTestGCThing(Condition cond, const Address& address, Label* label)
+{
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestGCThingImpl(cond, address, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestGCThingImpl(Condition cond, const T& src, Label* label)
+{
+ Condition c = testGCThing(cond, src);
+ B(label, c);
+}
+
+void
+MacroAssembler::branchTestPrimitive(Condition cond, Register tag, Label* label)
+{
+ branchTestPrimitiveImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestPrimitive(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestPrimitiveImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestPrimitiveImpl(Condition cond, const T& t, Label* label)
+{
+ Condition c = testPrimitive(cond, t);
+ B(label, c);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, Register tag, Label* label)
+{
+ branchTestMagicImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, const Address& address, Label* label)
+{
+ branchTestMagicImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestMagicImpl(cond, address, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value, L label)
+{
+ branchTestMagicImpl(cond, value, label);
+}
+
+template <typename T, class L>
+void
+MacroAssembler::branchTestMagicImpl(Condition cond, const T& t, L label)
+{
+ Condition c = testMagic(cond, t);
+ B(label, c);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr, JSWhyMagic why, Label* label)
+{
+ uint64_t magic = MagicValue(why).asRawBits();
+ cmpPtr(valaddr, ImmWord(magic));
+ B(label, cond);
+}
+
+// ========================================================================
+// Memory access primitives.
+void
+MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const Address& dest)
+{
+ Str(ARMFPRegister(src, 64), MemOperand(ARMRegister(dest.base, 64), dest.offset));
+}
+void
+MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& dest)
+{
+ doBaseIndex(ARMFPRegister(src, 64), dest, vixl::STR_d);
+}
+
+void
+MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const Address& addr)
+{
+ Str(ARMFPRegister(src, 32), MemOperand(ARMRegister(addr.base, 64), addr.offset));
+}
+void
+MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& addr)
+{
+ doBaseIndex(ARMFPRegister(src, 32), addr, vixl::STR_s);
+}
+
+void
+MacroAssembler::storeFloat32x3(FloatRegister src, const Address& dest)
+{
+ MOZ_CRASH("NYI");
+}
+void
+MacroAssembler::storeFloat32x3(FloatRegister src, const BaseIndex& dest)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+MacroAssembler::memoryBarrier(MemoryBarrierBits barrier)
+{
+ MOZ_CRASH("NYI");
+}
+
+// ===============================================================
+// Clamping functions.
+
+void
+MacroAssembler::clampIntToUint8(Register reg)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ const ARMRegister reg32(reg, 32);
+ MOZ_ASSERT(!scratch32.Is(reg32));
+
+ Cmp(reg32, Operand(reg32, vixl::UXTB));
+ Csel(reg32, reg32, vixl::wzr, Assembler::GreaterThanOrEqual);
+ Mov(scratch32, Operand(0xff));
+ Csel(reg32, reg32, scratch32, Assembler::LessThanOrEqual);
+}
+
+// ========================================================================
+// wasm support
+
+template <class L>
+void
+MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
+{
+ MOZ_CRASH("NYI");
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+template <typename T>
+void
+MacroAssemblerCompat::addToStackPtr(T t)
+{
+ asMasm().addPtr(t, getStackPointer());
+}
+
+template <typename T>
+void
+MacroAssemblerCompat::addStackPtrTo(T t)
+{
+ asMasm().addPtr(getStackPointer(), t);
+}
+
+template <typename T>
+void
+MacroAssemblerCompat::subFromStackPtr(T t)
+{
+ asMasm().subPtr(t, getStackPointer()); syncStackPtr();
+}
+
+template <typename T>
+void
+MacroAssemblerCompat::subStackPtrFrom(T t)
+{
+ asMasm().subPtr(getStackPointer(), t);
+}
+
+template <typename T>
+void
+MacroAssemblerCompat::andToStackPtr(T t)
+{
+ asMasm().andPtr(t, getStackPointer());
+ syncStackPtr();
+}
+
+template <typename T>
+void
+MacroAssemblerCompat::andStackPtrTo(T t)
+{
+ asMasm().andPtr(getStackPointer(), t);
+}
+
+template <typename T>
+void
+MacroAssemblerCompat::branchStackPtr(Condition cond, T rhs, Label* label)
+{
+ asMasm().branchPtr(cond, getStackPointer(), rhs, label);
+}
+
+template <typename T>
+void
+MacroAssemblerCompat::branchStackPtrRhs(Condition cond, T lhs, Label* label)
+{
+ asMasm().branchPtr(cond, lhs, getStackPointer(), label);
+}
+
+template <typename T>
+void
+MacroAssemblerCompat::branchTestStackPtr(Condition cond, T t, Label* label)
+{
+ asMasm().branchTestPtr(cond, getStackPointer(), t, label);
+}
+
+// If source is a double, load into dest.
+// If source is int32, convert to double and store in dest.
+// Else, branch to failure.
+void
+MacroAssemblerCompat::ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure)
+{
+ Label isDouble, done;
+
+ // TODO: splitTagForTest really should not leak a scratch register.
+ Register tag = splitTagForTest(source);
+ {
+ vixl::UseScratchRegisterScope temps(this);
+ temps.Exclude(ARMRegister(tag, 64));
+
+ asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
+ }
+
+ convertInt32ToDouble(source.valueReg(), dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerCompat::unboxValue(const ValueOperand& src, AnyRegister dest)
+{
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.valueReg(), dest.fpu());
+ jump(&end);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else {
+ unboxNonDouble(src, dest.gpr());
+ }
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm64_MacroAssembler_arm64_inl_h */
diff --git a/js/src/jit/arm64/MacroAssembler-arm64.cpp b/js/src/jit/arm64/MacroAssembler-arm64.cpp
new file mode 100644
index 000000000..d3d3cc210
--- /dev/null
+++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp
@@ -0,0 +1,838 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm64/MacroAssembler-arm64.h"
+
+#include "jit/arm64/MoveEmitter-arm64.h"
+#include "jit/arm64/SharedICRegisters-arm64.h"
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/MacroAssembler.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+void
+MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
+{
+ ARMRegister dest(output, 32);
+ Fcvtns(dest, ARMFPRegister(input, 64));
+
+ {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+
+ Mov(scratch32, Operand(0xff));
+ Cmp(dest, scratch32);
+ Csel(dest, dest, scratch32, LessThan);
+ }
+
+ Cmp(dest, Operand(0));
+ Csel(dest, dest, wzr, GreaterThan);
+}
+
+void
+MacroAssembler::alignFrameForICArguments(MacroAssembler::AfterICSaveLive& aic)
+{
+ // Exists for MIPS compatibility.
+}
+
+void
+MacroAssembler::restoreFrameAlignmentForICArguments(MacroAssembler::AfterICSaveLive& aic)
+{
+ // Exists for MIPS compatibility.
+}
+
+js::jit::MacroAssembler&
+MacroAssemblerCompat::asMasm()
+{
+ return *static_cast<js::jit::MacroAssembler*>(this);
+}
+
+const js::jit::MacroAssembler&
+MacroAssemblerCompat::asMasm() const
+{
+ return *static_cast<const js::jit::MacroAssembler*>(this);
+}
+
+vixl::MacroAssembler&
+MacroAssemblerCompat::asVIXL()
+{
+ return *static_cast<vixl::MacroAssembler*>(this);
+}
+
+const vixl::MacroAssembler&
+MacroAssemblerCompat::asVIXL() const
+{
+ return *static_cast<const vixl::MacroAssembler*>(this);
+}
+
+BufferOffset
+MacroAssemblerCompat::movePatchablePtr(ImmPtr ptr, Register dest)
+{
+ const size_t numInst = 1; // Inserting one load instruction.
+ const unsigned numPoolEntries = 2; // Every pool entry is 4 bytes.
+ uint8_t* literalAddr = (uint8_t*)(&ptr.value); // TODO: Should be const.
+
+ // Scratch space for generating the load instruction.
+ //
+ // allocEntry() will use InsertIndexIntoTag() to store a temporary
+ // index to the corresponding PoolEntry in the instruction itself.
+ //
+ // That index will be fixed up later when finishPool()
+ // walks over all marked loads and calls PatchConstantPoolLoad().
+ uint32_t instructionScratch = 0;
+
+ // Emit the instruction mask in the scratch space.
+ // The offset doesn't matter: it will be fixed up later.
+ vixl::Assembler::ldr((Instruction*)&instructionScratch, ARMRegister(dest, 64), 0);
+
+ // Add the entry to the pool, fix up the LDR imm19 offset,
+ // and add the completed instruction to the buffer.
+ return allocEntry(numInst, numPoolEntries, (uint8_t*)&instructionScratch,
+ literalAddr);
+}
+
+BufferOffset
+MacroAssemblerCompat::movePatchablePtr(ImmWord ptr, Register dest)
+{
+ const size_t numInst = 1; // Inserting one load instruction.
+ const unsigned numPoolEntries = 2; // Every pool entry is 4 bytes.
+ uint8_t* literalAddr = (uint8_t*)(&ptr.value);
+
+ // Scratch space for generating the load instruction.
+ //
+ // allocEntry() will use InsertIndexIntoTag() to store a temporary
+ // index to the corresponding PoolEntry in the instruction itself.
+ //
+ // That index will be fixed up later when finishPool()
+ // walks over all marked loads and calls PatchConstantPoolLoad().
+ uint32_t instructionScratch = 0;
+
+ // Emit the instruction mask in the scratch space.
+ // The offset doesn't matter: it will be fixed up later.
+ vixl::Assembler::ldr((Instruction*)&instructionScratch, ARMRegister(dest, 64), 0);
+
+ // Add the entry to the pool, fix up the LDR imm19 offset,
+ // and add the completed instruction to the buffer.
+ return allocEntry(numInst, numPoolEntries, (uint8_t*)&instructionScratch,
+ literalAddr);
+}
+
+void
+MacroAssemblerCompat::loadPrivate(const Address& src, Register dest)
+{
+ loadPtr(src, dest);
+ asMasm().lshiftPtr(Imm32(1), dest);
+}
+
+void
+MacroAssemblerCompat::handleFailureWithHandlerTail(void* handler)
+{
+ // Reserve space for exception information.
+ int64_t size = (sizeof(ResumeFromException) + 7) & ~7;
+ Sub(GetStackPointer64(), GetStackPointer64(), Operand(size));
+ if (!GetStackPointer64().Is(sp))
+ Mov(sp, GetStackPointer64());
+
+ Mov(x0, GetStackPointer64());
+
+ // Call the handler.
+ asMasm().setupUnalignedABICall(r1);
+ asMasm().passABIArg(r0);
+ asMasm().callWithABI(handler);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label return_;
+ Label bailout;
+
+ MOZ_ASSERT(GetStackPointer64().Is(x28)); // Lets the code below be a little cleaner.
+
+ loadPtr(Address(r28, offsetof(ResumeFromException, kind)), r0);
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME),
+ &entryFrame);
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_CATCH), &catch_);
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FINALLY), &finally);
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FORCED_RETURN),
+ &return_);
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, load the new stack pointer,
+ // and return from the entry frame.
+ bind(&entryFrame);
+ moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(Address(r28, offsetof(ResumeFromException, stackPointer)), r28);
+ retn(Imm32(1 * sizeof(void*))); // Pop from stack and return.
+
+ // If we found a catch handler, this must be a baseline frame. Restore state
+ // and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(r28, offsetof(ResumeFromException, target)), r0);
+ loadPtr(Address(r28, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
+ loadPtr(Address(r28, offsetof(ResumeFromException, stackPointer)), r28);
+ syncStackPtr();
+ Br(x0);
+
+ // If we found a finally block, this must be a baseline frame.
+ // Push two values expected by JSOP_RETSUB: BooleanValue(true)
+ // and the exception.
+ bind(&finally);
+ ARMRegister exception = x1;
+ Ldr(exception, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, exception)));
+ Ldr(x0, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, target)));
+ Ldr(ARMRegister(BaselineFrameReg, 64),
+ MemOperand(GetStackPointer64(), offsetof(ResumeFromException, framePointer)));
+ Ldr(GetStackPointer64(), MemOperand(GetStackPointer64(), offsetof(ResumeFromException, stackPointer)));
+ syncStackPtr();
+ pushValue(BooleanValue(true));
+ push(exception);
+ Br(x0);
+
+ // Only used in debug mode. Return BaselineFrame->returnValue() to the caller.
+ bind(&return_);
+ loadPtr(Address(r28, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
+ loadPtr(Address(r28, offsetof(ResumeFromException, stackPointer)), r28);
+ loadValue(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ movePtr(BaselineFrameReg, r28);
+ vixl::MacroAssembler::Pop(ARMRegister(BaselineFrameReg, 64), vixl::lr);
+ syncStackPtr();
+ vixl::MacroAssembler::Ret(vixl::lr);
+
+ // If we are bailing out to baseline to handle an exception,
+ // jump to the bailout tail stub.
+ bind(&bailout);
+ Ldr(x2, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, bailoutInfo)));
+ Ldr(x1, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, target)));
+ Mov(x0, BAILOUT_RETURN_OK);
+ Br(x1);
+}
+
+void
+MacroAssemblerCompat::breakpoint()
+{
+ static int code = 0xA77;
+ Brk((code++) & 0xffff);
+}
+
+template<typename T>
+void
+MacroAssemblerCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
+ Register oldval, Register newval,
+ Register temp, AnyRegister output)
+{
+ switch (arrayType) {
+ case Scalar::Int8:
+ compareExchange8SignExtend(mem, oldval, newval, output.gpr());
+ break;
+ case Scalar::Uint8:
+ compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
+ break;
+ case Scalar::Int16:
+ compareExchange16SignExtend(mem, oldval, newval, output.gpr());
+ break;
+ case Scalar::Uint16:
+ compareExchange16ZeroExtend(mem, oldval, newval, output.gpr());
+ break;
+ case Scalar::Int32:
+ compareExchange32(mem, oldval, newval, output.gpr());
+ break;
+ case Scalar::Uint32:
+ // At the moment, the code in MCallOptimize.cpp requires the output
+ // type to be double for uint32 arrays. See bug 1077305.
+ MOZ_ASSERT(output.isFloat());
+ compareExchange32(mem, oldval, newval, temp);
+ convertUInt32ToDouble(temp, output.fpu());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+MacroAssemblerCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
+ Register oldval, Register newval, Register temp,
+ AnyRegister output);
+template void
+MacroAssemblerCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
+ Register oldval, Register newval, Register temp,
+ AnyRegister output);
+
+template<typename T>
+void
+MacroAssemblerCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
+ Register value, Register temp, AnyRegister output)
+{
+ switch (arrayType) {
+ case Scalar::Int8:
+ atomicExchange8SignExtend(mem, value, output.gpr());
+ break;
+ case Scalar::Uint8:
+ atomicExchange8ZeroExtend(mem, value, output.gpr());
+ break;
+ case Scalar::Int16:
+ atomicExchange16SignExtend(mem, value, output.gpr());
+ break;
+ case Scalar::Uint16:
+ atomicExchange16ZeroExtend(mem, value, output.gpr());
+ break;
+ case Scalar::Int32:
+ atomicExchange32(mem, value, output.gpr());
+ break;
+ case Scalar::Uint32:
+ // At the moment, the code in MCallOptimize.cpp requires the output
+ // type to be double for uint32 arrays. See bug 1077305.
+ MOZ_ASSERT(output.isFloat());
+ atomicExchange32(mem, value, temp);
+ convertUInt32ToDouble(temp, output.fpu());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+MacroAssemblerCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
+ Register value, Register temp, AnyRegister output);
+template void
+MacroAssemblerCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
+ Register value, Register temp, AnyRegister output);
+
+void
+MacroAssembler::reserveStack(uint32_t amount)
+{
+ // TODO: This bumps |sp| every time we reserve using a second register.
+ // It would save some instructions if we had a fixed frame size.
+ vixl::MacroAssembler::Claim(Operand(amount));
+ adjustFrame(amount);
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// MacroAssembler high-level usage.
+
+void
+MacroAssembler::flush()
+{
+ Assembler::flush();
+}
+
+// ===============================================================
+// Stack manipulation functions.
+
+void
+MacroAssembler::PushRegsInMask(LiveRegisterSet set)
+{
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ) {
+ vixl::CPURegister src[4] = { vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg };
+
+ for (size_t i = 0; i < 4 && iter.more(); i++) {
+ src[i] = ARMRegister(*iter, 64);
+ ++iter;
+ adjustFrame(8);
+ }
+ vixl::MacroAssembler::Push(src[0], src[1], src[2], src[3]);
+ }
+
+ for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ) {
+ vixl::CPURegister src[4] = { vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg };
+
+ for (size_t i = 0; i < 4 && iter.more(); i++) {
+ src[i] = ARMFPRegister(*iter, 64);
+ ++iter;
+ adjustFrame(8);
+ }
+ vixl::MacroAssembler::Push(src[0], src[1], src[2], src[3]);
+ }
+}
+
+void
+MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
+{
+ // The offset of the data from the stack pointer.
+ uint32_t offset = 0;
+
+ for (FloatRegisterIterator iter(set.fpus().reduceSetForPush()); iter.more(); ) {
+ vixl::CPURegister dest[2] = { vixl::NoCPUReg, vixl::NoCPUReg };
+ uint32_t nextOffset = offset;
+
+ for (size_t i = 0; i < 2 && iter.more(); i++) {
+ if (!ignore.has(*iter))
+ dest[i] = ARMFPRegister(*iter, 64);
+ ++iter;
+ nextOffset += sizeof(double);
+ }
+
+ if (!dest[0].IsNone() && !dest[1].IsNone())
+ Ldp(dest[0], dest[1], MemOperand(GetStackPointer64(), offset));
+ else if (!dest[0].IsNone())
+ Ldr(dest[0], MemOperand(GetStackPointer64(), offset));
+ else if (!dest[1].IsNone())
+ Ldr(dest[1], MemOperand(GetStackPointer64(), offset + sizeof(double)));
+
+ offset = nextOffset;
+ }
+
+ MOZ_ASSERT(offset == set.fpus().getPushSizeInBytes());
+
+ for (GeneralRegisterIterator iter(set.gprs()); iter.more(); ) {
+ vixl::CPURegister dest[2] = { vixl::NoCPUReg, vixl::NoCPUReg };
+ uint32_t nextOffset = offset;
+
+ for (size_t i = 0; i < 2 && iter.more(); i++) {
+ if (!ignore.has(*iter))
+ dest[i] = ARMRegister(*iter, 64);
+ ++iter;
+ nextOffset += sizeof(uint64_t);
+ }
+
+ if (!dest[0].IsNone() && !dest[1].IsNone())
+ Ldp(dest[0], dest[1], MemOperand(GetStackPointer64(), offset));
+ else if (!dest[0].IsNone())
+ Ldr(dest[0], MemOperand(GetStackPointer64(), offset));
+ else if (!dest[1].IsNone())
+ Ldr(dest[1], MemOperand(GetStackPointer64(), offset + sizeof(uint64_t)));
+
+ offset = nextOffset;
+ }
+
+ size_t bytesPushed = set.gprs().size() * sizeof(uint64_t) + set.fpus().getPushSizeInBytes();
+ MOZ_ASSERT(offset == bytesPushed);
+ freeStack(bytesPushed);
+}
+
+void
+MacroAssembler::Push(Register reg)
+{
+ push(reg);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(Register reg1, Register reg2, Register reg3, Register reg4)
+{
+ push(reg1, reg2, reg3, reg4);
+ adjustFrame(4 * sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(const Imm32 imm)
+{
+ push(imm);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(const ImmWord imm)
+{
+ push(imm);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(const ImmPtr imm)
+{
+ push(imm);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(const ImmGCPtr ptr)
+{
+ push(ptr);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(FloatRegister f)
+{
+ push(f);
+ adjustFrame(sizeof(double));
+}
+
+void
+MacroAssembler::Pop(Register reg)
+{
+ pop(reg);
+ adjustFrame(-1 * int64_t(sizeof(int64_t)));
+}
+
+void
+MacroAssembler::Pop(FloatRegister f)
+{
+ MOZ_CRASH("NYI: Pop(FloatRegister)");
+}
+
+void
+MacroAssembler::Pop(const ValueOperand& val)
+{
+ pop(val);
+ adjustFrame(-1 * int64_t(sizeof(int64_t)));
+}
+
+// ===============================================================
+// Simple call functions.
+
+CodeOffset
+MacroAssembler::call(Register reg)
+{
+ syncStackPtr();
+ Blr(ARMRegister(reg, 64));
+ return CodeOffset(currentOffset());
+}
+
+CodeOffset
+MacroAssembler::call(Label* label)
+{
+ syncStackPtr();
+ Bl(label);
+ return CodeOffset(currentOffset());
+}
+
+void
+MacroAssembler::call(ImmWord imm)
+{
+ call(ImmPtr((void*)imm.value));
+}
+
+void
+MacroAssembler::call(ImmPtr imm)
+{
+ syncStackPtr();
+ movePtr(imm, ip0);
+ Blr(vixl::ip0);
+}
+
+void
+MacroAssembler::call(wasm::SymbolicAddress imm)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ syncStackPtr();
+ movePtr(imm, scratch);
+ call(scratch);
+}
+
+void
+MacroAssembler::call(JitCode* c)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ syncStackPtr();
+ BufferOffset off = immPool64(scratch64, uint64_t(c->raw()));
+ addPendingJump(off, ImmPtr(c->raw()), Relocation::JITCODE);
+ blr(scratch64);
+}
+
+CodeOffset
+MacroAssembler::callWithPatch()
+{
+ MOZ_CRASH("NYI");
+ return CodeOffset();
+}
+void
+MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset)
+{
+ MOZ_CRASH("NYI");
+}
+
+CodeOffset
+MacroAssembler::farJumpWithPatch()
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+MacroAssembler::repatchFarJump(uint8_t* code, uint32_t farJumpOffset, uint32_t targetOffset)
+{
+ MOZ_CRASH("NYI");
+}
+
+CodeOffset
+MacroAssembler::nopPatchableToNearJump()
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+MacroAssembler::patchNopToNearJump(uint8_t* jump, uint8_t* target)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+MacroAssembler::patchNearJumpToNop(uint8_t* jump)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+MacroAssembler::pushReturnAddress()
+{
+ push(lr);
+}
+
+void
+MacroAssembler::popReturnAddress()
+{
+ pop(lr);
+}
+
+// ===============================================================
+// ABI function calls.
+
+void
+MacroAssembler::setupUnalignedABICall(Register scratch)
+{
+ setupABICall();
+ dynamicAlignment_ = true;
+
+ int64_t alignment = ~(int64_t(ABIStackAlignment) - 1);
+ ARMRegister scratch64(scratch, 64);
+
+ // Always save LR -- Baseline ICs assume that LR isn't modified.
+ push(lr);
+
+ // Unhandled for sp -- needs slightly different logic.
+ MOZ_ASSERT(!GetStackPointer64().Is(sp));
+
+ // Remember the stack address on entry.
+ Mov(scratch64, GetStackPointer64());
+
+ // Make alignment, including the effective push of the previous sp.
+ Sub(GetStackPointer64(), GetStackPointer64(), Operand(8));
+ And(GetStackPointer64(), GetStackPointer64(), Operand(alignment));
+
+ // If the PseudoStackPointer is used, sp must be <= psp before a write is valid.
+ syncStackPtr();
+
+ // Store previous sp to the top of the stack, aligned.
+ Str(scratch64, MemOperand(GetStackPointer64(), 0));
+}
+
+void
+MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm)
+{
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ // ARM64 /really/ wants the stack to always be aligned. Since we're already tracking it
+ // getting it aligned for an abi call is pretty easy.
+ MOZ_ASSERT(dynamicAlignment_);
+ stackForCall += ComputeByteAlignment(stackForCall, StackAlignment);
+ *stackAdjust = stackForCall;
+ reserveStack(*stackAdjust);
+ {
+ moveResolver_.resolve();
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ // Call boundaries communicate stack via sp.
+ syncStackPtr();
+}
+
+void
+MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
+{
+ // Call boundaries communicate stack via sp.
+ if (!GetStackPointer64().Is(sp))
+ Mov(GetStackPointer64(), sp);
+
+ freeStack(stackAdjust);
+
+ // Restore the stack pointer from entry.
+ if (dynamicAlignment_)
+ Ldr(GetStackPointer64(), MemOperand(GetStackPointer64(), 0));
+
+ // Restore LR.
+ pop(lr);
+
+ // TODO: This one shouldn't be necessary -- check that callers
+ // aren't enforcing the ABI themselves!
+ syncStackPtr();
+
+ // If the ABI's return regs are where ION is expecting them, then
+ // no other work needs to be done.
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+void
+MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ movePtr(fun, scratch);
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(scratch);
+ callWithABIPost(stackAdjust, result);
+}
+
+void
+MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result)
+{
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ loadPtr(fun, scratch);
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(scratch);
+ callWithABIPost(stackAdjust, result);
+}
+
+// ===============================================================
+// Jit Frames.
+
+uint32_t
+MacroAssembler::pushFakeReturnAddress(Register scratch)
+{
+ enterNoPool(3);
+ Label fakeCallsite;
+
+ Adr(ARMRegister(scratch, 64), &fakeCallsite);
+ Push(scratch);
+ bind(&fakeCallsite);
+ uint32_t pseudoReturnOffset = currentOffset();
+
+ leaveNoPool();
+ return pseudoReturnOffset;
+}
+
+// ===============================================================
+// Branch functions
+
+void
+MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(ptr != temp);
+ MOZ_ASSERT(ptr != ScratchReg && ptr != ScratchReg2); // Both may be used internally.
+ MOZ_ASSERT(temp != ScratchReg && temp != ScratchReg2);
+
+ movePtr(ptr, temp);
+ orPtr(Imm32(gc::ChunkMask), temp);
+ branch32(cond, Address(temp, gc::ChunkLocationOffsetFromLastByte),
+ Imm32(int32_t(gc::ChunkLocation::Nursery)), label);
+}
+
+void
+MacroAssembler::branchValueIsNurseryObject(Condition cond, const Address& address, Register temp,
+ Label* label)
+{
+ branchValueIsNurseryObjectImpl(cond, address, temp, label);
+}
+
+void
+MacroAssembler::branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp,
+ Label* label)
+{
+ branchValueIsNurseryObjectImpl(cond, value, temp, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchValueIsNurseryObjectImpl(Condition cond, const T& value, Register temp,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(temp != ScratchReg && temp != ScratchReg2); // Both may be used internally.
+
+ Label done;
+ branchTestObject(Assembler::NotEqual, value, cond == Assembler::Equal ? &done : label);
+
+ extractObject(value, temp);
+ orPtr(Imm32(gc::ChunkMask), temp);
+ branch32(cond, Address(temp, gc::ChunkLocationOffsetFromLastByte),
+ Imm32(int32_t(gc::ChunkLocation::Nursery)), label);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != lhs.valueReg());
+ moveValue(rhs, ValueOperand(scratch64.asUnsized()));
+ Cmp(ARMRegister(lhs.valueReg(), 64), scratch64);
+ B(label, cond);
+}
+
+// ========================================================================
+// Memory access primitives.
+template <typename T>
+void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const T& dest, MIRType slotType)
+{
+ if (valueType == MIRType::Double) {
+ storeDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ // For known integers and booleans, we can just store the unboxed value if
+ // the slot has the same type.
+ if ((valueType == MIRType::Int32 || valueType == MIRType::Boolean) && slotType == valueType) {
+ if (value.constant()) {
+ Value val = value.value();
+ if (valueType == MIRType::Int32)
+ store32(Imm32(val.toInt32()), dest);
+ else
+ store32(Imm32(val.toBoolean() ? 1 : 0), dest);
+ } else {
+ store32(value.reg().typedReg().gpr(), dest);
+ }
+ return;
+ }
+
+ if (value.constant())
+ storeValue(value.value(), dest);
+ else
+ storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(), dest);
+
+}
+
+template void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const Address& dest, MIRType slotType);
+template void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const BaseIndex& dest, MIRType slotType);
+
+void
+MacroAssembler::comment(const char* msg)
+{
+ Assembler::comment(msg);
+}
+
+//}}} check_macroassembler_style
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/arm64/MacroAssembler-arm64.h b/js/src/jit/arm64/MacroAssembler-arm64.h
new file mode 100644
index 000000000..b95831443
--- /dev/null
+++ b/js/src/jit/arm64/MacroAssembler-arm64.h
@@ -0,0 +1,2338 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_MacroAssembler_arm64_h
+#define jit_arm64_MacroAssembler_arm64_h
+
+#include "jit/arm64/Assembler-arm64.h"
+#include "jit/arm64/vixl/Debugger-vixl.h"
+#include "jit/arm64/vixl/MacroAssembler-vixl.h"
+
+#include "jit/AtomicOp.h"
+#include "jit/JitFrames.h"
+#include "jit/MoveResolver.h"
+
+namespace js {
+namespace jit {
+
+// Import VIXL operands directly into the jit namespace for shared code.
+using vixl::Operand;
+using vixl::MemOperand;
+
+struct ImmShiftedTag : public ImmWord
+{
+ ImmShiftedTag(JSValueShiftedTag shtag)
+ : ImmWord((uintptr_t)shtag)
+ { }
+
+ ImmShiftedTag(JSValueType type)
+ : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type))))
+ { }
+};
+
+struct ImmTag : public Imm32
+{
+ ImmTag(JSValueTag tag)
+ : Imm32(tag)
+ { }
+};
+
+class MacroAssemblerCompat : public vixl::MacroAssembler
+{
+ public:
+ typedef vixl::Condition Condition;
+
+ private:
+ // Perform a downcast. Should be removed by Bug 996602.
+ js::jit::MacroAssembler& asMasm();
+ const js::jit::MacroAssembler& asMasm() const;
+
+ public:
+ // Restrict to only VIXL-internal functions.
+ vixl::MacroAssembler& asVIXL();
+ const MacroAssembler& asVIXL() const;
+
+ protected:
+ bool enoughMemory_;
+ uint32_t framePushed_;
+
+ MacroAssemblerCompat()
+ : vixl::MacroAssembler(),
+ enoughMemory_(true),
+ framePushed_(0)
+ { }
+
+ protected:
+ MoveResolver moveResolver_;
+
+ public:
+ bool oom() const {
+ return Assembler::oom() || !enoughMemory_;
+ }
+ static MemOperand toMemOperand(Address& a) {
+ return MemOperand(ARMRegister(a.base, 64), a.offset);
+ }
+ void doBaseIndex(const vixl::CPURegister& rt, const BaseIndex& addr, vixl::LoadStoreOp op) {
+ const ARMRegister base = ARMRegister(addr.base, 64);
+ const ARMRegister index = ARMRegister(addr.index, 64);
+ const unsigned scale = addr.scale;
+
+ if (!addr.offset && (!scale || scale == static_cast<unsigned>(CalcLSDataSize(op)))) {
+ LoadStoreMacro(rt, MemOperand(base, index, vixl::LSL, scale), op);
+ return;
+ }
+
+ vixl::UseScratchRegisterScope temps(this);
+ ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(!scratch64.Is(rt));
+ MOZ_ASSERT(!scratch64.Is(base));
+ MOZ_ASSERT(!scratch64.Is(index));
+
+ Add(scratch64, base, Operand(index, vixl::LSL, scale));
+ LoadStoreMacro(rt, MemOperand(scratch64, addr.offset), op);
+ }
+ void Push(ARMRegister reg) {
+ push(reg);
+ adjustFrame(reg.size() / 8);
+ }
+ void Push(Register reg) {
+ vixl::MacroAssembler::Push(ARMRegister(reg, 64));
+ adjustFrame(8);
+ }
+ void Push(Imm32 imm) {
+ push(imm);
+ adjustFrame(8);
+ }
+ void Push(FloatRegister f) {
+ push(ARMFPRegister(f, 64));
+ adjustFrame(8);
+ }
+ void Push(ImmPtr imm) {
+ push(imm);
+ adjustFrame(sizeof(void*));
+ }
+ void push(FloatRegister f) {
+ vixl::MacroAssembler::Push(ARMFPRegister(f, 64));
+ }
+ void push(ARMFPRegister f) {
+ vixl::MacroAssembler::Push(f);
+ }
+ void push(Imm32 imm) {
+ if (imm.value == 0) {
+ vixl::MacroAssembler::Push(vixl::xzr);
+ } else {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ move32(imm, scratch64.asUnsized());
+ vixl::MacroAssembler::Push(scratch64);
+ }
+ }
+ void push(ImmWord imm) {
+ if (imm.value == 0) {
+ vixl::MacroAssembler::Push(vixl::xzr);
+ } else {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ Mov(scratch64, imm.value);
+ vixl::MacroAssembler::Push(scratch64);
+ }
+ }
+ void push(ImmPtr imm) {
+ if (imm.value == nullptr) {
+ vixl::MacroAssembler::Push(vixl::xzr);
+ } else {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ movePtr(imm, scratch64.asUnsized());
+ vixl::MacroAssembler::Push(scratch64);
+ }
+ }
+ void push(ImmGCPtr imm) {
+ if (imm.value == nullptr) {
+ vixl::MacroAssembler::Push(vixl::xzr);
+ } else {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ movePtr(imm, scratch64.asUnsized());
+ vixl::MacroAssembler::Push(scratch64);
+ }
+ }
+ void push(ARMRegister reg) {
+ vixl::MacroAssembler::Push(reg);
+ }
+ void push(Address a) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(a.base != scratch64.asUnsized());
+ loadPtr(a, scratch64.asUnsized());
+ vixl::MacroAssembler::Push(scratch64);
+ }
+
+ // Push registers.
+ void push(Register reg) {
+ vixl::MacroAssembler::Push(ARMRegister(reg, 64));
+ }
+ void push(Register r0, Register r1) {
+ vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64));
+ }
+ void push(Register r0, Register r1, Register r2) {
+ vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64), ARMRegister(r2, 64));
+ }
+ void push(Register r0, Register r1, Register r2, Register r3) {
+ vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64),
+ ARMRegister(r2, 64), ARMRegister(r3, 64));
+ }
+ void push(ARMFPRegister r0, ARMFPRegister r1, ARMFPRegister r2, ARMFPRegister r3) {
+ vixl::MacroAssembler::Push(r0, r1, r2, r3);
+ }
+
+ // Pop registers.
+ void pop(Register reg) {
+ vixl::MacroAssembler::Pop(ARMRegister(reg, 64));
+ }
+ void pop(Register r0, Register r1) {
+ vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64));
+ }
+ void pop(Register r0, Register r1, Register r2) {
+ vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64), ARMRegister(r2, 64));
+ }
+ void pop(Register r0, Register r1, Register r2, Register r3) {
+ vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64),
+ ARMRegister(r2, 64), ARMRegister(r3, 64));
+ }
+ void pop(ARMFPRegister r0, ARMFPRegister r1, ARMFPRegister r2, ARMFPRegister r3) {
+ vixl::MacroAssembler::Pop(r0, r1, r2, r3);
+ }
+
+ void pop(const ValueOperand& v) {
+ pop(v.valueReg());
+ }
+ void pop(const FloatRegister& f) {
+ vixl::MacroAssembler::Pop(ARMRegister(f.code(), 64));
+ }
+
+ void implicitPop(uint32_t args) {
+ MOZ_ASSERT(args % sizeof(intptr_t) == 0);
+ adjustFrame(-args);
+ }
+ void Pop(ARMRegister r) {
+ vixl::MacroAssembler::Pop(r);
+ adjustFrame(- r.size() / 8);
+ }
+ // FIXME: This is the same on every arch.
+ // FIXME: If we can share framePushed_, we can share this.
+ // FIXME: Or just make it at the highest level.
+ CodeOffset PushWithPatch(ImmWord word) {
+ framePushed_ += sizeof(word.value);
+ return pushWithPatch(word);
+ }
+ CodeOffset PushWithPatch(ImmPtr ptr) {
+ return PushWithPatch(ImmWord(uintptr_t(ptr.value)));
+ }
+
+ uint32_t framePushed() const {
+ return framePushed_;
+ }
+ void adjustFrame(int32_t diff) {
+ setFramePushed(framePushed_ + diff);
+ }
+
+ void setFramePushed(uint32_t framePushed) {
+ framePushed_ = framePushed;
+ }
+
+ void freeStack(Register amount) {
+ vixl::MacroAssembler::Drop(Operand(ARMRegister(amount, 64)));
+ }
+
+ // Update sp with the value of the current active stack pointer, if necessary.
+ void syncStackPtr() {
+ if (!GetStackPointer64().Is(vixl::sp))
+ Mov(vixl::sp, GetStackPointer64());
+ }
+ void initStackPtr() {
+ if (!GetStackPointer64().Is(vixl::sp))
+ Mov(GetStackPointer64(), vixl::sp);
+ }
+ void storeValue(ValueOperand val, const Address& dest) {
+ storePtr(val.valueReg(), dest);
+ }
+
+ template <typename T>
+ void storeValue(JSValueType type, Register reg, const T& dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != reg);
+ tagValue(type, reg, ValueOperand(scratch));
+ storeValue(ValueOperand(scratch), dest);
+ }
+ template <typename T>
+ void storeValue(const Value& val, const T& dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ moveValue(val, ValueOperand(scratch));
+ storeValue(ValueOperand(scratch), dest);
+ }
+ void storeValue(ValueOperand val, BaseIndex dest) {
+ storePtr(val.valueReg(), dest);
+ }
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ loadPtr(src, temp);
+ storePtr(temp, dest);
+ }
+
+ void loadValue(Address src, Register val) {
+ Ldr(ARMRegister(val, 64), MemOperand(src));
+ }
+ void loadValue(Address src, ValueOperand val) {
+ Ldr(ARMRegister(val.valueReg(), 64), MemOperand(src));
+ }
+ void loadValue(const BaseIndex& src, ValueOperand val) {
+ doBaseIndex(ARMRegister(val.valueReg(), 64), src, vixl::LDR_x);
+ }
+ void tagValue(JSValueType type, Register payload, ValueOperand dest) {
+ // This could be cleverer, but the first attempt had bugs.
+ Orr(ARMRegister(dest.valueReg(), 64), ARMRegister(payload, 64), Operand(ImmShiftedTag(type).value));
+ }
+ void pushValue(ValueOperand val) {
+ vixl::MacroAssembler::Push(ARMRegister(val.valueReg(), 64));
+ }
+ void popValue(ValueOperand val) {
+ vixl::MacroAssembler::Pop(ARMRegister(val.valueReg(), 64));
+ }
+ void pushValue(const Value& val) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ if (val.isMarkable()) {
+ BufferOffset load = movePatchablePtr(ImmPtr(val.bitsAsPunboxPointer()), scratch);
+ writeDataRelocation(val, load);
+ push(scratch);
+ } else {
+ moveValue(val, scratch);
+ push(scratch);
+ }
+ }
+ void pushValue(JSValueType type, Register reg) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != reg);
+ tagValue(type, reg, ValueOperand(scratch));
+ push(scratch);
+ }
+ void pushValue(const Address& addr) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != addr.base);
+ loadValue(addr, scratch);
+ push(scratch);
+ }
+ template <typename T>
+ void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes) {
+ switch (nbytes) {
+ case 8: {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ unboxNonDouble(value, scratch);
+ storePtr(scratch, address);
+ return;
+ }
+ case 4:
+ store32(value.valueReg(), address);
+ return;
+ case 1:
+ store8(value.valueReg(), address);
+ return;
+ default: MOZ_CRASH("Bad payload width");
+ }
+ }
+ void moveValue(const Value& val, Register dest) {
+ if (val.isMarkable()) {
+ BufferOffset load = movePatchablePtr(ImmPtr(val.bitsAsPunboxPointer()), dest);
+ writeDataRelocation(val, load);
+ } else {
+ movePtr(ImmWord(val.asRawBits()), dest);
+ }
+ }
+ void moveValue(const Value& src, const ValueOperand& dest) {
+ moveValue(src, dest.valueReg());
+ }
+ void moveValue(const ValueOperand& src, const ValueOperand& dest) {
+ if (src.valueReg() != dest.valueReg())
+ movePtr(src.valueReg(), dest.valueReg());
+ }
+
+ CodeOffset pushWithPatch(ImmWord imm) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ CodeOffset label = movWithPatch(imm, scratch);
+ push(scratch);
+ return label;
+ }
+
+ CodeOffset movWithPatch(ImmWord imm, Register dest) {
+ BufferOffset off = immPool64(ARMRegister(dest, 64), imm.value);
+ return CodeOffset(off.getOffset());
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ BufferOffset off = immPool64(ARMRegister(dest, 64), uint64_t(imm.value));
+ return CodeOffset(off.getOffset());
+ }
+
+ void boxValue(JSValueType type, Register src, Register dest) {
+ Orr(ARMRegister(dest, 64), ARMRegister(src, 64), Operand(ImmShiftedTag(type).value));
+ }
+ void splitTag(Register src, Register dest) {
+ ubfx(ARMRegister(dest, 64), ARMRegister(src, 64), JSVAL_TAG_SHIFT, (64 - JSVAL_TAG_SHIFT));
+ }
+ Register extractTag(const Address& address, Register scratch) {
+ loadPtr(address, scratch);
+ splitTag(scratch, scratch);
+ return scratch;
+ }
+ Register extractTag(const ValueOperand& value, Register scratch) {
+ splitTag(value.valueReg(), scratch);
+ return scratch;
+ }
+ Register extractObject(const Address& address, Register scratch) {
+ loadPtr(address, scratch);
+ unboxObject(scratch, scratch);
+ return scratch;
+ }
+ Register extractObject(const ValueOperand& value, Register scratch) {
+ unboxObject(value, scratch);
+ return scratch;
+ }
+ Register extractInt32(const ValueOperand& value, Register scratch) {
+ unboxInt32(value, scratch);
+ return scratch;
+ }
+ Register extractBoolean(const ValueOperand& value, Register scratch) {
+ unboxBoolean(value, scratch);
+ return scratch;
+ }
+
+ inline void ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure);
+
+ void emitSet(Condition cond, Register dest) {
+ Cset(ARMRegister(dest, 64), cond);
+ }
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testNull(cond, value);
+ emitSet(cond, dest);
+ }
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testObject(cond, value);
+ emitSet(cond, dest);
+ }
+ void testUndefinedSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testUndefined(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void convertBoolToInt32(Register source, Register dest) {
+ Uxtb(ARMRegister(dest, 64), ARMRegister(source, 64));
+ }
+
+ void convertInt32ToDouble(Register src, FloatRegister dest) {
+ Scvtf(ARMFPRegister(dest, 64), ARMRegister(src, 32)); // Uses FPCR rounding mode.
+ }
+ void convertInt32ToDouble(const Address& src, FloatRegister dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != src.base);
+ load32(src, scratch);
+ convertInt32ToDouble(scratch, dest);
+ }
+ void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != src.base);
+ MOZ_ASSERT(scratch != src.index);
+ load32(src, scratch);
+ convertInt32ToDouble(scratch, dest);
+ }
+
+ void convertInt32ToFloat32(Register src, FloatRegister dest) {
+ Scvtf(ARMFPRegister(dest, 32), ARMRegister(src, 32)); // Uses FPCR rounding mode.
+ }
+ void convertInt32ToFloat32(const Address& src, FloatRegister dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != src.base);
+ load32(src, scratch);
+ convertInt32ToFloat32(scratch, dest);
+ }
+
+ void convertUInt32ToDouble(Register src, FloatRegister dest) {
+ Ucvtf(ARMFPRegister(dest, 64), ARMRegister(src, 32)); // Uses FPCR rounding mode.
+ }
+ void convertUInt32ToDouble(const Address& src, FloatRegister dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != src.base);
+ load32(src, scratch);
+ convertUInt32ToDouble(scratch, dest);
+ }
+
+ void convertUInt32ToFloat32(Register src, FloatRegister dest) {
+ Ucvtf(ARMFPRegister(dest, 32), ARMRegister(src, 32)); // Uses FPCR rounding mode.
+ }
+ void convertUInt32ToFloat32(const Address& src, FloatRegister dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != src.base);
+ load32(src, scratch);
+ convertUInt32ToFloat32(scratch, dest);
+ }
+
+ void convertFloat32ToDouble(FloatRegister src, FloatRegister dest) {
+ Fcvt(ARMFPRegister(dest, 64), ARMFPRegister(src, 32));
+ }
+ void convertDoubleToFloat32(FloatRegister src, FloatRegister dest) {
+ Fcvt(ARMFPRegister(dest, 32), ARMFPRegister(src, 64));
+ }
+
+ using vixl::MacroAssembler::B;
+ void B(wasm::TrapDesc) {
+ MOZ_CRASH("NYI");
+ }
+ void B(wasm::TrapDesc, Condition cond) {
+ MOZ_CRASH("NYI");
+ }
+
+ void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true)
+ {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMFPRegister scratch64 = temps.AcquireD();
+
+ ARMFPRegister fsrc(src, 64);
+ ARMRegister dest32(dest, 32);
+ ARMRegister dest64(dest, 64);
+
+ MOZ_ASSERT(!scratch64.Is(fsrc));
+
+ Fcvtzs(dest32, fsrc); // Convert, rounding toward zero.
+ Scvtf(scratch64, dest32); // Convert back, using FPCR rounding mode.
+ Fcmp(scratch64, fsrc);
+ B(fail, Assembler::NotEqual);
+
+ if (negativeZeroCheck) {
+ Label nonzero;
+ Cbnz(dest32, &nonzero);
+ Fmov(dest64, fsrc);
+ Cbnz(dest64, fail);
+ bind(&nonzero);
+ }
+ }
+ void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true)
+ {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMFPRegister scratch32 = temps.AcquireS();
+
+ ARMFPRegister fsrc(src, 32);
+ ARMRegister dest32(dest, 32);
+ ARMRegister dest64(dest, 64);
+
+ MOZ_ASSERT(!scratch32.Is(fsrc));
+
+ Fcvtzs(dest64, fsrc); // Convert, rounding toward zero.
+ Scvtf(scratch32, dest32); // Convert back, using FPCR rounding mode.
+ Fcmp(scratch32, fsrc);
+ B(fail, Assembler::NotEqual);
+
+ if (negativeZeroCheck) {
+ Label nonzero;
+ Cbnz(dest32, &nonzero);
+ Fmov(dest32, fsrc);
+ Cbnz(dest32, fail);
+ bind(&nonzero);
+ }
+ And(dest64, dest64, Operand(0xffffffff));
+ }
+
+ void floor(FloatRegister input, Register output, Label* bail) {
+ Label handleZero;
+ //Label handleNeg;
+ Label fin;
+ ARMFPRegister iDbl(input, 64);
+ ARMRegister o64(output, 64);
+ ARMRegister o32(output, 32);
+ Fcmp(iDbl, 0.0);
+ B(Assembler::Equal, &handleZero);
+ //B(Assembler::Signed, &handleNeg);
+ // NaN is always a bail condition, just bail directly.
+ B(Assembler::Overflow, bail);
+ Fcvtms(o64, iDbl);
+ Cmp(o64, Operand(o64, vixl::SXTW));
+ B(NotEqual, bail);
+ Mov(o32, o32);
+ B(&fin);
+
+ bind(&handleZero);
+ // Move the top word of the double into the output reg, if it is non-zero,
+ // then the original value was -0.0.
+ Fmov(o64, iDbl);
+ Cbnz(o64, bail);
+ bind(&fin);
+ }
+
+ void floorf(FloatRegister input, Register output, Label* bail) {
+ Label handleZero;
+ //Label handleNeg;
+ Label fin;
+ ARMFPRegister iFlt(input, 32);
+ ARMRegister o64(output, 64);
+ ARMRegister o32(output, 32);
+ Fcmp(iFlt, 0.0);
+ B(Assembler::Equal, &handleZero);
+ //B(Assembler::Signed, &handleNeg);
+ // NaN is always a bail condition, just bail directly.
+ B(Assembler::Overflow, bail);
+ Fcvtms(o64, iFlt);
+ Cmp(o64, Operand(o64, vixl::SXTW));
+ B(NotEqual, bail);
+ Mov(o32, o32);
+ B(&fin);
+
+ bind(&handleZero);
+ // Move the top word of the double into the output reg, if it is non-zero,
+ // then the original value was -0.0.
+ Fmov(o32, iFlt);
+ Cbnz(o32, bail);
+ bind(&fin);
+ }
+
+ void ceil(FloatRegister input, Register output, Label* bail) {
+ Label handleZero;
+ Label fin;
+ ARMFPRegister iDbl(input, 64);
+ ARMRegister o64(output, 64);
+ ARMRegister o32(output, 32);
+ Fcmp(iDbl, 0.0);
+ B(Assembler::Overflow, bail);
+ Fcvtps(o64, iDbl);
+ Cmp(o64, Operand(o64, vixl::SXTW));
+ B(NotEqual, bail);
+ Cbz(o64, &handleZero);
+ Mov(o32, o32);
+ B(&fin);
+
+ bind(&handleZero);
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch = temps.AcquireX();
+ Fmov(scratch, iDbl);
+ Cbnz(scratch, bail);
+ bind(&fin);
+ }
+
+ void ceilf(FloatRegister input, Register output, Label* bail) {
+ Label handleZero;
+ Label fin;
+ ARMFPRegister iFlt(input, 32);
+ ARMRegister o64(output, 64);
+ ARMRegister o32(output, 32);
+ Fcmp(iFlt, 0.0);
+
+ // NaN is always a bail condition, just bail directly.
+ B(Assembler::Overflow, bail);
+ Fcvtps(o64, iFlt);
+ Cmp(o64, Operand(o64, vixl::SXTW));
+ B(NotEqual, bail);
+ Cbz(o64, &handleZero);
+ Mov(o32, o32);
+ B(&fin);
+
+ bind(&handleZero);
+ // Move the top word of the double into the output reg, if it is non-zero,
+ // then the original value was -0.0.
+ Fmov(o32, iFlt);
+ Cbnz(o32, bail);
+ bind(&fin);
+ }
+
+ void jump(Label* label) {
+ B(label);
+ }
+ void jump(JitCode* code) {
+ branch(code);
+ }
+ void jump(RepatchLabel* label) {
+ MOZ_CRASH("jump (repatchlabel)");
+ }
+ void jump(Register reg) {
+ Br(ARMRegister(reg, 64));
+ }
+ void jump(const Address& addr) {
+ loadPtr(addr, ip0);
+ Br(vixl::ip0);
+ }
+ void jump(wasm::TrapDesc target) {
+ MOZ_CRASH("NYI");
+ }
+
+ void align(int alignment) {
+ armbuffer_.align(alignment);
+ }
+
+ void haltingAlign(int alignment) {
+ // TODO: Implement a proper halting align.
+ // ARM doesn't have one either.
+ armbuffer_.align(alignment);
+ }
+ void nopAlign(int alignment) {
+ MOZ_CRASH("NYI");
+ }
+
+ void movePtr(Register src, Register dest) {
+ Mov(ARMRegister(dest, 64), ARMRegister(src, 64));
+ }
+ void movePtr(ImmWord imm, Register dest) {
+ Mov(ARMRegister(dest, 64), int64_t(imm.value));
+ }
+ void movePtr(ImmPtr imm, Register dest) {
+ Mov(ARMRegister(dest, 64), int64_t(imm.value));
+ }
+ void movePtr(wasm::SymbolicAddress imm, Register dest) {
+ BufferOffset off = movePatchablePtr(ImmWord(0xffffffffffffffffULL), dest);
+ append(wasm::SymbolicAccess(CodeOffset(off.getOffset()), imm));
+ }
+ void movePtr(ImmGCPtr imm, Register dest) {
+ BufferOffset load = movePatchablePtr(ImmPtr(imm.value), dest);
+ writeDataRelocation(imm, load);
+ }
+
+ void mov(ImmWord imm, Register dest) {
+ movePtr(imm, dest);
+ }
+ void mov(ImmPtr imm, Register dest) {
+ movePtr(imm, dest);
+ }
+ void mov(wasm::SymbolicAddress imm, Register dest) {
+ movePtr(imm, dest);
+ }
+ void mov(Register src, Register dest) {
+ movePtr(src, dest);
+ }
+
+ void move32(Imm32 imm, Register dest) {
+ Mov(ARMRegister(dest, 32), (int64_t)imm.value);
+ }
+ void move32(Register src, Register dest) {
+ Mov(ARMRegister(dest, 32), ARMRegister(src, 32));
+ }
+
+ // Move a pointer using a literal pool, so that the pointer
+ // may be easily patched or traced.
+ // Returns the BufferOffset of the load instruction emitted.
+ BufferOffset movePatchablePtr(ImmWord ptr, Register dest);
+ BufferOffset movePatchablePtr(ImmPtr ptr, Register dest);
+
+ void loadPtr(wasm::SymbolicAddress address, Register dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch = temps.AcquireX();
+ movePtr(address, scratch.asUnsized());
+ Ldr(ARMRegister(dest, 64), MemOperand(scratch));
+ }
+ void loadPtr(AbsoluteAddress address, Register dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch = temps.AcquireX();
+ movePtr(ImmWord((uintptr_t)address.addr), scratch.asUnsized());
+ Ldr(ARMRegister(dest, 64), MemOperand(scratch));
+ }
+ void loadPtr(const Address& address, Register dest) {
+ Ldr(ARMRegister(dest, 64), MemOperand(address));
+ }
+ void loadPtr(const BaseIndex& src, Register dest) {
+ Register base = src.base;
+ uint32_t scale = Imm32::ShiftOf(src.scale).value;
+ ARMRegister dest64(dest, 64);
+ ARMRegister index64(src.index, 64);
+
+ if (src.offset) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch = temps.AcquireX();
+ MOZ_ASSERT(!scratch.Is(ARMRegister(base, 64)));
+ MOZ_ASSERT(!scratch.Is(dest64));
+ MOZ_ASSERT(!scratch.Is(index64));
+
+ Add(scratch, ARMRegister(base, 64), Operand(int64_t(src.offset)));
+ Ldr(dest64, MemOperand(scratch, index64, vixl::LSL, scale));
+ return;
+ }
+
+ Ldr(dest64, MemOperand(ARMRegister(base, 64), index64, vixl::LSL, scale));
+ }
+ void loadPrivate(const Address& src, Register dest);
+
+ void store8(Register src, const Address& address) {
+ Strb(ARMRegister(src, 32), MemOperand(ARMRegister(address.base, 64), address.offset));
+ }
+ void store8(Imm32 imm, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != address.base);
+ move32(imm, scratch32.asUnsized());
+ Strb(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset));
+ }
+ void store8(Register src, const BaseIndex& address) {
+ doBaseIndex(ARMRegister(src, 32), address, vixl::STRB_w);
+ }
+ void store8(Imm32 imm, const BaseIndex& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != address.base);
+ MOZ_ASSERT(scratch32.asUnsized() != address.index);
+ Mov(scratch32, Operand(imm.value));
+ doBaseIndex(scratch32, address, vixl::STRB_w);
+ }
+
+ void store16(Register src, const Address& address) {
+ Strh(ARMRegister(src, 32), MemOperand(ARMRegister(address.base, 64), address.offset));
+ }
+ void store16(Imm32 imm, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != address.base);
+ move32(imm, scratch32.asUnsized());
+ Strh(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset));
+ }
+ void store16(Register src, const BaseIndex& address) {
+ doBaseIndex(ARMRegister(src, 32), address, vixl::STRH_w);
+ }
+ void store16(Imm32 imm, const BaseIndex& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != address.base);
+ MOZ_ASSERT(scratch32.asUnsized() != address.index);
+ Mov(scratch32, Operand(imm.value));
+ doBaseIndex(scratch32, address, vixl::STRH_w);
+ }
+
+ void storePtr(ImmWord imm, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != address.base);
+ movePtr(imm, scratch);
+ storePtr(scratch, address);
+ }
+ void storePtr(ImmPtr imm, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != address.base);
+ Mov(scratch64, uint64_t(imm.value));
+ Str(scratch64, MemOperand(ARMRegister(address.base, 64), address.offset));
+ }
+ void storePtr(ImmGCPtr imm, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != address.base);
+ movePtr(imm, scratch);
+ storePtr(scratch, address);
+ }
+ void storePtr(Register src, const Address& address) {
+ Str(ARMRegister(src, 64), MemOperand(ARMRegister(address.base, 64), address.offset));
+ }
+
+ void storePtr(ImmWord imm, const BaseIndex& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != address.base);
+ MOZ_ASSERT(scratch64.asUnsized() != address.index);
+ Mov(scratch64, Operand(imm.value));
+ doBaseIndex(scratch64, address, vixl::STR_x);
+ }
+ void storePtr(ImmGCPtr imm, const BaseIndex& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != address.base);
+ MOZ_ASSERT(scratch != address.index);
+ movePtr(imm, scratch);
+ doBaseIndex(ARMRegister(scratch, 64), address, vixl::STR_x);
+ }
+ void storePtr(Register src, const BaseIndex& address) {
+ doBaseIndex(ARMRegister(src, 64), address, vixl::STR_x);
+ }
+
+ void storePtr(Register src, AbsoluteAddress address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ Mov(scratch64, uint64_t(address.addr));
+ Str(ARMRegister(src, 64), MemOperand(scratch64));
+ }
+
+ void store32(Register src, AbsoluteAddress address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ Mov(scratch64, uint64_t(address.addr));
+ Str(ARMRegister(src, 32), MemOperand(scratch64));
+ }
+ void store32(Imm32 imm, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != address.base);
+ Mov(scratch32, uint64_t(imm.value));
+ Str(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset));
+ }
+ void store32(Register r, const Address& address) {
+ Str(ARMRegister(r, 32), MemOperand(ARMRegister(address.base, 64), address.offset));
+ }
+ void store32(Imm32 imm, const BaseIndex& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != address.base);
+ MOZ_ASSERT(scratch32.asUnsized() != address.index);
+ Mov(scratch32, imm.value);
+ doBaseIndex(scratch32, address, vixl::STR_w);
+ }
+ void store32(Register r, const BaseIndex& address) {
+ doBaseIndex(ARMRegister(r, 32), address, vixl::STR_w);
+ }
+
+ void store32_NoSecondScratch(Imm32 imm, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ temps.Exclude(ARMRegister(ScratchReg2, 32)); // Disallow ScratchReg2.
+ const ARMRegister scratch32 = temps.AcquireW();
+
+ MOZ_ASSERT(scratch32.asUnsized() != address.base);
+ Mov(scratch32, uint64_t(imm.value));
+ Str(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset));
+ }
+
+ void store64(Register64 src, Address address) {
+ storePtr(src.reg, address);
+ }
+
+ // SIMD.
+ void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x1(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x1(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x2(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x2(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+ void loadAlignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadAlignedSimd128Int(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeAlignedSimd128Int(FloatRegister src, const Address& addr) { MOZ_CRASH("NYI"); }
+ void storeAlignedSimd128Int(FloatRegister src, const BaseIndex& addr) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Int(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Int(FloatRegister dest, const Address& addr) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Int(FloatRegister dest, const BaseIndex& addr) { MOZ_CRASH("NYI"); }
+
+ void loadFloat32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadFloat32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadAlignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadAlignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeAlignedSimd128Float(FloatRegister src, const Address& addr) { MOZ_CRASH("NYI"); }
+ void storeAlignedSimd128Float(FloatRegister src, const BaseIndex& addr) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Float(FloatRegister dest, const Address& addr) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Float(FloatRegister dest, const BaseIndex& addr) { MOZ_CRASH("NYI"); }
+
+ // StackPointer manipulation.
+ template <typename T> void addToStackPtr(T t);
+ template <typename T> void addStackPtrTo(T t);
+
+ template <typename T> inline void subFromStackPtr(T t);
+ template <typename T> inline void subStackPtrFrom(T t);
+
+ template <typename T> void andToStackPtr(T t);
+ template <typename T> void andStackPtrTo(T t);
+
+ template <typename T>
+ void moveToStackPtr(T t) { movePtr(t, getStackPointer()); syncStackPtr(); }
+ template <typename T>
+ void moveStackPtrTo(T t) { movePtr(getStackPointer(), t); }
+
+ template <typename T>
+ void loadStackPtr(T t) { loadPtr(t, getStackPointer()); syncStackPtr(); }
+ template <typename T>
+ void storeStackPtr(T t) { storePtr(getStackPointer(), t); }
+
+ // StackPointer testing functions.
+ template <typename T>
+ inline void branchTestStackPtr(Condition cond, T t, Label* label);
+ template <typename T>
+ void branchStackPtr(Condition cond, T rhs, Label* label);
+ template <typename T>
+ void branchStackPtrRhs(Condition cond, T lhs, Label* label);
+
+ void testPtr(Register lhs, Register rhs) {
+ Tst(ARMRegister(lhs, 64), Operand(ARMRegister(rhs, 64)));
+ }
+ void test32(Register lhs, Register rhs) {
+ Tst(ARMRegister(lhs, 32), Operand(ARMRegister(rhs, 32)));
+ }
+ void test32(const Address& addr, Imm32 imm) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != addr.base);
+ load32(addr, scratch32.asUnsized());
+ Tst(scratch32, Operand(imm.value));
+ }
+ void test32(Register lhs, Imm32 rhs) {
+ Tst(ARMRegister(lhs, 32), Operand(rhs.value));
+ }
+ void cmp32(Register lhs, Imm32 rhs) {
+ Cmp(ARMRegister(lhs, 32), Operand(rhs.value));
+ }
+ void cmp32(Register a, Register b) {
+ Cmp(ARMRegister(a, 32), Operand(ARMRegister(b, 32)));
+ }
+ void cmp32(const Address& lhs, Imm32 rhs) {
+ cmp32(Operand(lhs.base, lhs.offset), rhs);
+ }
+ void cmp32(const Address& lhs, Register rhs) {
+ cmp32(Operand(lhs.base, lhs.offset), rhs);
+ }
+ void cmp32(const Operand& lhs, Imm32 rhs) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ Mov(scratch32, lhs);
+ Cmp(scratch32, Operand(rhs.value));
+ }
+ void cmp32(const Operand& lhs, Register rhs) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ Mov(scratch32, lhs);
+ Cmp(scratch32, Operand(ARMRegister(rhs, 32)));
+ }
+
+ void cmpPtr(Register lhs, Imm32 rhs) {
+ Cmp(ARMRegister(lhs, 64), Operand(rhs.value));
+ }
+ void cmpPtr(Register lhs, ImmWord rhs) {
+ Cmp(ARMRegister(lhs, 64), Operand(rhs.value));
+ }
+ void cmpPtr(Register lhs, ImmPtr rhs) {
+ Cmp(ARMRegister(lhs, 64), Operand(uint64_t(rhs.value)));
+ }
+ void cmpPtr(Register lhs, Register rhs) {
+ Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64));
+ }
+ void cmpPtr(Register lhs, ImmGCPtr rhs) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs);
+ movePtr(rhs, scratch);
+ cmpPtr(lhs, scratch);
+ }
+
+ void cmpPtr(const Address& lhs, Register rhs) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != lhs.base);
+ MOZ_ASSERT(scratch64.asUnsized() != rhs);
+ Ldr(scratch64, MemOperand(ARMRegister(lhs.base, 64), lhs.offset));
+ Cmp(scratch64, Operand(ARMRegister(rhs, 64)));
+ }
+ void cmpPtr(const Address& lhs, ImmWord rhs) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != lhs.base);
+ Ldr(scratch64, MemOperand(ARMRegister(lhs.base, 64), lhs.offset));
+ Cmp(scratch64, Operand(rhs.value));
+ }
+ void cmpPtr(const Address& lhs, ImmPtr rhs) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != lhs.base);
+ Ldr(scratch64, MemOperand(ARMRegister(lhs.base, 64), lhs.offset));
+ Cmp(scratch64, Operand(uint64_t(rhs.value)));
+ }
+ void cmpPtr(const Address& lhs, ImmGCPtr rhs) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+ loadPtr(lhs, scratch);
+ cmpPtr(scratch, rhs);
+ }
+
+ void loadDouble(const Address& src, FloatRegister dest) {
+ Ldr(ARMFPRegister(dest, 64), MemOperand(src));
+ }
+ void loadDouble(const BaseIndex& src, FloatRegister dest) {
+ ARMRegister base(src.base, 64);
+ ARMRegister index(src.index, 64);
+
+ if (src.offset == 0) {
+ Ldr(ARMFPRegister(dest, 64), MemOperand(base, index, vixl::LSL, unsigned(src.scale)));
+ return;
+ }
+
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != src.base);
+ MOZ_ASSERT(scratch64.asUnsized() != src.index);
+
+ Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale)));
+ Ldr(ARMFPRegister(dest, 64), MemOperand(scratch64, src.offset));
+ }
+ void loadFloatAsDouble(const Address& addr, FloatRegister dest) {
+ Ldr(ARMFPRegister(dest, 32), MemOperand(ARMRegister(addr.base,64), addr.offset));
+ fcvt(ARMFPRegister(dest, 64), ARMFPRegister(dest, 32));
+ }
+ void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest) {
+ ARMRegister base(src.base, 64);
+ ARMRegister index(src.index, 64);
+ if (src.offset == 0) {
+ Ldr(ARMFPRegister(dest, 32), MemOperand(base, index, vixl::LSL, unsigned(src.scale)));
+ } else {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != src.base);
+ MOZ_ASSERT(scratch64.asUnsized() != src.index);
+
+ Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale)));
+ Ldr(ARMFPRegister(dest, 32), MemOperand(scratch64, src.offset));
+ }
+ fcvt(ARMFPRegister(dest, 64), ARMFPRegister(dest, 32));
+ }
+
+ void loadFloat32(const Address& addr, FloatRegister dest) {
+ Ldr(ARMFPRegister(dest, 32), MemOperand(ARMRegister(addr.base,64), addr.offset));
+ }
+ void loadFloat32(const BaseIndex& src, FloatRegister dest) {
+ ARMRegister base(src.base, 64);
+ ARMRegister index(src.index, 64);
+ if (src.offset == 0) {
+ Ldr(ARMFPRegister(dest, 32), MemOperand(base, index, vixl::LSL, unsigned(src.scale)));
+ } else {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != src.base);
+ MOZ_ASSERT(scratch64.asUnsized() != src.index);
+
+ Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale)));
+ Ldr(ARMFPRegister(dest, 32), MemOperand(scratch64, src.offset));
+ }
+ }
+
+ void moveDouble(FloatRegister src, FloatRegister dest) {
+ fmov(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
+ }
+ void zeroDouble(FloatRegister reg) {
+ fmov(ARMFPRegister(reg, 64), vixl::xzr);
+ }
+ void zeroFloat32(FloatRegister reg) {
+ fmov(ARMFPRegister(reg, 32), vixl::wzr);
+ }
+
+ void moveFloat32(FloatRegister src, FloatRegister dest) {
+ fmov(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
+ }
+ void moveFloatAsDouble(Register src, FloatRegister dest) {
+ MOZ_CRASH("moveFloatAsDouble");
+ }
+
+ void splitTag(const ValueOperand& operand, Register dest) {
+ splitTag(operand.valueReg(), dest);
+ }
+ void splitTag(const Address& operand, Register dest) {
+ loadPtr(operand, dest);
+ splitTag(dest, dest);
+ }
+ void splitTag(const BaseIndex& operand, Register dest) {
+ loadPtr(operand, dest);
+ splitTag(dest, dest);
+ }
+
+ // Extracts the tag of a value and places it in ScratchReg.
+ Register splitTagForTest(const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != value.valueReg());
+ Lsr(scratch64, ARMRegister(value.valueReg(), 64), JSVAL_TAG_SHIFT);
+ return scratch64.asUnsized(); // FIXME: Surely we can make a better interface.
+ }
+ void cmpTag(const ValueOperand& operand, ImmTag tag) {
+ MOZ_CRASH("cmpTag");
+ }
+
+ void load32(const Address& address, Register dest) {
+ Ldr(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset));
+ }
+ void load32(const BaseIndex& src, Register dest) {
+ doBaseIndex(ARMRegister(dest, 32), src, vixl::LDR_w);
+ }
+ void load32(AbsoluteAddress address, Register dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ movePtr(ImmWord((uintptr_t)address.addr), scratch64.asUnsized());
+ ldr(ARMRegister(dest, 32), MemOperand(scratch64));
+ }
+ void load64(const Address& address, Register64 dest) {
+ loadPtr(address, dest.reg);
+ }
+
+ void load8SignExtend(const Address& address, Register dest) {
+ Ldrsb(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset));
+ }
+ void load8SignExtend(const BaseIndex& src, Register dest) {
+ doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRSB_w);
+ }
+
+ void load8ZeroExtend(const Address& address, Register dest) {
+ Ldrb(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset));
+ }
+ void load8ZeroExtend(const BaseIndex& src, Register dest) {
+ doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRB_w);
+ }
+
+ void load16SignExtend(const Address& address, Register dest) {
+ Ldrsh(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset));
+ }
+ void load16SignExtend(const BaseIndex& src, Register dest) {
+ doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRSH_w);
+ }
+
+ void load16ZeroExtend(const Address& address, Register dest) {
+ Ldrh(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset));
+ }
+ void load16ZeroExtend(const BaseIndex& src, Register dest) {
+ doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRH_w);
+ }
+
+ void adds32(Register src, Register dest) {
+ Adds(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32)));
+ }
+ void adds32(Imm32 imm, Register dest) {
+ Adds(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
+ }
+ void adds32(Imm32 imm, const Address& dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != dest.base);
+
+ Ldr(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset));
+ Adds(scratch32, scratch32, Operand(imm.value));
+ Str(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset));
+ }
+
+ void subs32(Imm32 imm, Register dest) {
+ Subs(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
+ }
+ void subs32(Register src, Register dest) {
+ Subs(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32)));
+ }
+
+ void ret() {
+ pop(lr);
+ abiret();
+ }
+
+ void retn(Imm32 n) {
+ // ip0 <- [sp]; sp += n; ret ip0
+ Ldr(vixl::ip0, MemOperand(GetStackPointer64(), ptrdiff_t(n.value), vixl::PostIndex));
+ syncStackPtr(); // SP is always used to transmit the stack between calls.
+ Ret(vixl::ip0);
+ }
+
+ void j(Condition cond, Label* dest) {
+ B(dest, cond);
+ }
+
+ void branch(Condition cond, Label* label) {
+ B(label, cond);
+ }
+ void branch(JitCode* target) {
+ syncStackPtr();
+ BufferOffset loc = b(-1); // The jump target will be patched by executableCopy().
+ addPendingJump(loc, ImmPtr(target->raw()), Relocation::JITCODE);
+ }
+
+ CodeOffsetJump jumpWithPatch(RepatchLabel* label, Condition cond = Always,
+ Label* documentation = nullptr)
+ {
+ ARMBuffer::PoolEntry pe;
+ BufferOffset load_bo;
+ BufferOffset branch_bo;
+
+ // Does not overwrite condition codes from the caller.
+ {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ load_bo = immPool64(scratch64, (uint64_t)label, &pe);
+ }
+
+ MOZ_ASSERT(!label->bound());
+ if (cond != Always) {
+ Label notTaken;
+ B(&notTaken, Assembler::InvertCondition(cond));
+ branch_bo = b(-1);
+ bind(&notTaken);
+ } else {
+ nop();
+ branch_bo = b(-1);
+ }
+ label->use(branch_bo.getOffset());
+ return CodeOffsetJump(load_bo.getOffset(), pe.index());
+ }
+ CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr) {
+ return jumpWithPatch(label, Always, documentation);
+ }
+
+ void compareDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs) {
+ Fcmp(ARMFPRegister(lhs, 64), ARMFPRegister(rhs, 64));
+ }
+
+ void compareFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs) {
+ Fcmp(ARMFPRegister(lhs, 32), ARMFPRegister(rhs, 32));
+ }
+
+ void branchNegativeZero(FloatRegister reg, Register scratch, Label* label) {
+ MOZ_CRASH("branchNegativeZero");
+ }
+ void branchNegativeZeroFloat32(FloatRegister reg, Register scratch, Label* label) {
+ MOZ_CRASH("branchNegativeZeroFloat32");
+ }
+
+ void boxDouble(FloatRegister src, const ValueOperand& dest) {
+ Fmov(ARMRegister(dest.valueReg(), 64), ARMFPRegister(src, 64));
+ }
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) {
+ boxValue(type, src, dest.valueReg());
+ }
+
+ // Note that the |dest| register here may be ScratchReg, so we shouldn't use it.
+ void unboxInt32(const ValueOperand& src, Register dest) {
+ move32(src.valueReg(), dest);
+ }
+ void unboxInt32(const Address& src, Register dest) {
+ load32(src, dest);
+ }
+ void unboxDouble(const Address& src, FloatRegister dest) {
+ loadDouble(src, dest);
+ }
+ void unboxDouble(const ValueOperand& src, FloatRegister dest) {
+ Fmov(ARMFPRegister(dest, 64), ARMRegister(src.valueReg(), 64));
+ }
+
+ void unboxArgObjMagic(const ValueOperand& src, Register dest) {
+ MOZ_CRASH("unboxArgObjMagic");
+ }
+ void unboxArgObjMagic(const Address& src, Register dest) {
+ MOZ_CRASH("unboxArgObjMagic");
+ }
+
+ void unboxBoolean(const ValueOperand& src, Register dest) {
+ move32(src.valueReg(), dest);
+ }
+ void unboxBoolean(const Address& src, Register dest) {
+ load32(src, dest);
+ }
+
+ void unboxMagic(const ValueOperand& src, Register dest) {
+ move32(src.valueReg(), dest);
+ }
+ // Unbox any non-double value into dest. Prefer unboxInt32 or unboxBoolean
+ // instead if the source type is known.
+ void unboxNonDouble(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src.valueReg(), dest);
+ }
+ void unboxNonDouble(Address src, Register dest) {
+ loadPtr(src, dest);
+ unboxNonDouble(dest, dest);
+ }
+
+ void unboxNonDouble(Register src, Register dest) {
+ And(ARMRegister(dest, 64), ARMRegister(src, 64), Operand((1ULL << JSVAL_TAG_SHIFT) - 1ULL));
+ }
+
+ void unboxPrivate(const ValueOperand& src, Register dest) {
+ ubfx(ARMRegister(dest, 64), ARMRegister(src.valueReg(), 64), 1, JSVAL_TAG_SHIFT - 1);
+ }
+
+ void notBoolean(const ValueOperand& val) {
+ ARMRegister r(val.valueReg(), 64);
+ eor(r, r, Operand(1));
+ }
+ void unboxObject(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src.valueReg(), dest);
+ }
+ void unboxObject(Register src, Register dest) {
+ unboxNonDouble(src, dest);
+ }
+ void unboxObject(const Address& src, Register dest) {
+ loadPtr(src, dest);
+ unboxNonDouble(dest, dest);
+ }
+ void unboxObject(const BaseIndex& src, Register dest) {
+ doBaseIndex(ARMRegister(dest, 64), src, vixl::LDR_x);
+ unboxNonDouble(dest, dest);
+ }
+
+ inline void unboxValue(const ValueOperand& src, AnyRegister dest);
+
+ void unboxString(const ValueOperand& operand, Register dest) {
+ unboxNonDouble(operand, dest);
+ }
+ void unboxString(const Address& src, Register dest) {
+ unboxNonDouble(src, dest);
+ }
+ void unboxSymbol(const ValueOperand& operand, Register dest) {
+ unboxNonDouble(operand, dest);
+ }
+ void unboxSymbol(const Address& src, Register dest) {
+ unboxNonDouble(src, dest);
+ }
+ // These two functions use the low 32-bits of the full value register.
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.valueReg(), dest);
+ }
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.valueReg(), dest);
+ }
+
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.valueReg(), dest);
+ }
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.valueReg(), dest);
+ }
+
+ void loadConstantDouble(wasm::RawF64 d, FloatRegister dest) {
+ loadConstantDouble(d.fp(), dest);
+ }
+ void loadConstantFloat32(wasm::RawF32 f, FloatRegister dest) {
+ loadConstantFloat32(f.fp(), dest);
+ }
+ void loadConstantDouble(double d, FloatRegister dest) {
+ Fmov(ARMFPRegister(dest, 64), d);
+ }
+ void loadConstantFloat32(float f, FloatRegister dest) {
+ Fmov(ARMFPRegister(dest, 32), f);
+ }
+
+ // Register-based tests.
+ Condition testUndefined(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+ }
+ Condition testInt32(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_INT32));
+ return cond;
+ }
+ Condition testBoolean(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+ }
+ Condition testNull(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_NULL));
+ return cond;
+ }
+ Condition testString(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_STRING));
+ return cond;
+ }
+ Condition testSymbol(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_SYMBOL));
+ return cond;
+ }
+ Condition testObject(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+ }
+ Condition testDouble(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, Imm32(JSVAL_TAG_MAX_DOUBLE));
+ return (cond == Equal) ? BelowOrEqual : Above;
+ }
+ Condition testNumber(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, Imm32(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET));
+ return (cond == Equal) ? BelowOrEqual : Above;
+ }
+ Condition testGCThing(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, Imm32(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET));
+ return (cond == Equal) ? AboveOrEqual : Below;
+ }
+ Condition testMagic(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testPrimitive(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, Imm32(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET));
+ return (cond == Equal) ? Below : AboveOrEqual;
+ }
+ Condition testError(Condition cond, Register tag) {
+ return testMagic(cond, tag);
+ }
+
+ // ValueOperand-based tests.
+ Condition testInt32(Condition cond, const ValueOperand& value) {
+ // The incoming ValueOperand may use scratch registers.
+ vixl::UseScratchRegisterScope temps(this);
+
+ if (value.valueReg() == ScratchReg2) {
+ MOZ_ASSERT(temps.IsAvailable(ScratchReg64));
+ MOZ_ASSERT(!temps.IsAvailable(ScratchReg2_64));
+ temps.Exclude(ScratchReg64);
+
+ if (cond != Equal && cond != NotEqual)
+ MOZ_CRASH("NYI: non-equality comparisons");
+
+ // In the event that the tag is not encodable in a single cmp / teq instruction,
+ // perform the xor that teq would use, this will leave the tag bits being
+ // zero, or non-zero, which can be tested with either and or shift.
+ unsigned int n, imm_r, imm_s;
+ uint64_t immediate = uint64_t(ImmTag(JSVAL_TAG_INT32).value) << JSVAL_TAG_SHIFT;
+ if (IsImmLogical(immediate, 64, &n, &imm_s, &imm_r)) {
+ Eor(ScratchReg64, ScratchReg2_64, Operand(immediate));
+ } else {
+ Mov(ScratchReg64, immediate);
+ Eor(ScratchReg64, ScratchReg2_64, ScratchReg64);
+ }
+ Tst(ScratchReg64, Operand((unsigned long long)(-1ll) << JSVAL_TAG_SHIFT));
+ return cond;
+ }
+
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != value.valueReg());
+
+ splitTag(value, scratch);
+ return testInt32(cond, scratch);
+ }
+ Condition testBoolean(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitTag(value, scratch);
+ return testBoolean(cond, scratch);
+ }
+ Condition testDouble(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitTag(value, scratch);
+ return testDouble(cond, scratch);
+ }
+ Condition testNull(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitTag(value, scratch);
+ return testNull(cond, scratch);
+ }
+ Condition testUndefined(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitTag(value, scratch);
+ return testUndefined(cond, scratch);
+ }
+ Condition testString(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitTag(value, scratch);
+ return testString(cond, scratch);
+ }
+ Condition testSymbol(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitTag(value, scratch);
+ return testSymbol(cond, scratch);
+ }
+ Condition testObject(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitTag(value, scratch);
+ return testObject(cond, scratch);
+ }
+ Condition testNumber(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitTag(value, scratch);
+ return testNumber(cond, scratch);
+ }
+ Condition testPrimitive(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitTag(value, scratch);
+ return testPrimitive(cond, scratch);
+ }
+ Condition testMagic(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitTag(value, scratch);
+ return testMagic(cond, scratch);
+ }
+ Condition testError(Condition cond, const ValueOperand& value) {
+ return testMagic(cond, value);
+ }
+
+ // Address-based tests.
+ Condition testGCThing(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitTag(address, scratch);
+ return testGCThing(cond, scratch);
+ }
+ Condition testMagic(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitTag(address, scratch);
+ return testMagic(cond, scratch);
+ }
+ Condition testInt32(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitTag(address, scratch);
+ return testInt32(cond, scratch);
+ }
+ Condition testDouble(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitTag(address, scratch);
+ return testDouble(cond, scratch);
+ }
+ Condition testBoolean(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitTag(address, scratch);
+ return testBoolean(cond, scratch);
+ }
+ Condition testNull(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitTag(address, scratch);
+ return testNull(cond, scratch);
+ }
+ Condition testUndefined(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitTag(address, scratch);
+ return testUndefined(cond, scratch);
+ }
+ Condition testString(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitTag(address, scratch);
+ return testString(cond, scratch);
+ }
+ Condition testSymbol(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitTag(address, scratch);
+ return testSymbol(cond, scratch);
+ }
+ Condition testObject(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitTag(address, scratch);
+ return testObject(cond, scratch);
+ }
+ Condition testNumber(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitTag(address, scratch);
+ return testNumber(cond, scratch);
+ }
+
+ // BaseIndex-based tests.
+ Condition testUndefined(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitTag(src, scratch);
+ return testUndefined(cond, scratch);
+ }
+ Condition testNull(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitTag(src, scratch);
+ return testNull(cond, scratch);
+ }
+ Condition testBoolean(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitTag(src, scratch);
+ return testBoolean(cond, scratch);
+ }
+ Condition testString(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitTag(src, scratch);
+ return testString(cond, scratch);
+ }
+ Condition testSymbol(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitTag(src, scratch);
+ return testSymbol(cond, scratch);
+ }
+ Condition testInt32(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitTag(src, scratch);
+ return testInt32(cond, scratch);
+ }
+ Condition testObject(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitTag(src, scratch);
+ return testObject(cond, scratch);
+ }
+ Condition testDouble(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitTag(src, scratch);
+ return testDouble(cond, scratch);
+ }
+ Condition testMagic(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitTag(src, scratch);
+ return testMagic(cond, scratch);
+ }
+ Condition testGCThing(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitTag(src, scratch);
+ return testGCThing(cond, scratch);
+ }
+
+ Condition testInt32Truthy(bool truthy, const ValueOperand& operand) {
+ ARMRegister payload32(operand.valueReg(), 32);
+ Tst(payload32, payload32);
+ return truthy ? NonZero : Zero;
+ }
+
+ Condition testBooleanTruthy(bool truthy, const ValueOperand& operand) {
+ ARMRegister payload32(operand.valueReg(), 32);
+ Tst(payload32, payload32);
+ return truthy ? NonZero : Zero;
+ }
+ Condition testStringTruthy(bool truthy, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ const ARMRegister scratch32(scratch, 32);
+ const ARMRegister scratch64(scratch, 64);
+
+ MOZ_ASSERT(value.valueReg() != scratch);
+
+ unboxString(value, scratch);
+ Ldr(scratch32, MemOperand(scratch64, JSString::offsetOfLength()));
+ Cmp(scratch32, Operand(0));
+ return truthy ? Condition::NonZero : Condition::Zero;
+ }
+ void int32OrDouble(Register src, ARMFPRegister dest) {
+ Label isInt32;
+ Label join;
+ testInt32(Equal, ValueOperand(src));
+ B(&isInt32, Equal);
+ // is double, move teh bits as is
+ Fmov(dest, ARMRegister(src, 64));
+ B(&join);
+ bind(&isInt32);
+ // is int32, do a conversion while moving
+ Scvtf(dest, ARMRegister(src, 32));
+ bind(&join);
+ }
+ void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat()) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != address.base);
+ Ldr(scratch64, toMemOperand(address));
+ int32OrDouble(scratch64.asUnsized(), ARMFPRegister(dest.fpu(), 64));
+ } else if (type == MIRType::Int32 || type == MIRType::Boolean) {
+ load32(address, dest.gpr());
+ } else {
+ loadPtr(address, dest.gpr());
+ unboxNonDouble(dest.gpr(), dest.gpr());
+ }
+ }
+
+ void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat()) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != address.base);
+ MOZ_ASSERT(scratch64.asUnsized() != address.index);
+ doBaseIndex(scratch64, address, vixl::LDR_x);
+ int32OrDouble(scratch64.asUnsized(), ARMFPRegister(dest.fpu(), 64));
+ } else if (type == MIRType::Int32 || type == MIRType::Boolean) {
+ load32(address, dest.gpr());
+ } else {
+ loadPtr(address, dest.gpr());
+ unboxNonDouble(dest.gpr(), dest.gpr());
+ }
+ }
+
+ void loadInstructionPointerAfterCall(Register dest) {
+ MOZ_CRASH("loadInstructionPointerAfterCall");
+ }
+
+ // Emit a B that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp().
+ CodeOffset toggledJump(Label* label) {
+ BufferOffset offset = b(label, Always);
+ CodeOffset ret(offset.getOffset());
+ return ret;
+ }
+
+ // load: offset to the load instruction obtained by movePatchablePtr().
+ void writeDataRelocation(ImmGCPtr ptr, BufferOffset load) {
+ if (ptr.value)
+ dataRelocations_.writeUnsigned(load.getOffset());
+ }
+ void writeDataRelocation(const Value& val, BufferOffset load) {
+ if (val.isMarkable()) {
+ gc::Cell* cell = val.toMarkablePointer();
+ if (cell && gc::IsInsideNursery(cell))
+ embedsNurseryPointers_ = true;
+ dataRelocations_.writeUnsigned(load.getOffset());
+ }
+ }
+
+ void writePrebarrierOffset(CodeOffset label) {
+ preBarriers_.writeUnsigned(label.offset());
+ }
+
+ void computeEffectiveAddress(const Address& address, Register dest) {
+ Add(ARMRegister(dest, 64), ARMRegister(address.base, 64), Operand(address.offset));
+ }
+ void computeEffectiveAddress(const BaseIndex& address, Register dest) {
+ ARMRegister dest64(dest, 64);
+ ARMRegister base64(address.base, 64);
+ ARMRegister index64(address.index, 64);
+
+ Add(dest64, base64, Operand(index64, vixl::LSL, address.scale));
+ if (address.offset)
+ Add(dest64, dest64, Operand(address.offset));
+ }
+
+ public:
+ CodeOffset labelForPatch() {
+ return CodeOffset(nextOffset().getOffset());
+ }
+
+ void handleFailureWithHandlerTail(void* handler);
+
+ void profilerEnterFrame(Register framePtr, Register scratch) {
+ AbsoluteAddress activation(GetJitContext()->runtime->addressOfProfilingActivation());
+ loadPtr(activation, scratch);
+ storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr), Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+ }
+ void profilerExitFrame() {
+ branch(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
+ }
+ Address ToPayload(Address value) {
+ return value;
+ }
+ Address ToType(Address value) {
+ return value;
+ }
+
+ private:
+ template <typename T>
+ void compareExchange(int nbytes, bool signExtend, const T& address, Register oldval,
+ Register newval, Register output)
+ {
+ MOZ_CRASH("compareExchange");
+ }
+
+ template <typename T>
+ void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
+ const T& address, Register temp, Register output)
+ {
+ MOZ_CRASH("atomicFetchOp");
+ }
+
+ template <typename T>
+ void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
+ const T& address, Register temp, Register output)
+ {
+ MOZ_CRASH("atomicFetchOp");
+ }
+
+ template <typename T>
+ void atomicEffectOp(int nbytes, AtomicOp op, const Register& value, const T& mem) {
+ MOZ_CRASH("atomicEffectOp");
+ }
+
+ template <typename T>
+ void atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value, const T& mem) {
+ MOZ_CRASH("atomicEffectOp");
+ }
+
+ public:
+ // T in {Address,BaseIndex}
+ // S in {Imm32,Register}
+
+ template <typename T>
+ void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register output)
+ {
+ compareExchange(1, true, mem, oldval, newval, output);
+ }
+ template <typename T>
+ void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register output)
+ {
+ compareExchange(1, false, mem, oldval, newval, output);
+ }
+ template <typename T>
+ void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register output)
+ {
+ compareExchange(2, true, mem, oldval, newval, output);
+ }
+ template <typename T>
+ void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register output)
+ {
+ compareExchange(2, false, mem, oldval, newval, output);
+ }
+ template <typename T>
+ void compareExchange32(const T& mem, Register oldval, Register newval, Register output) {
+ compareExchange(4, false, mem, oldval, newval, output);
+ }
+ template <typename T>
+ void atomicExchange32(const T& mem, Register value, Register output) {
+ MOZ_CRASH("atomicExchang32");
+ }
+
+ template <typename T>
+ void atomicExchange8ZeroExtend(const T& mem, Register value, Register output) {
+ MOZ_CRASH("atomicExchange8ZeroExtend");
+ }
+ template <typename T>
+ void atomicExchange8SignExtend(const T& mem, Register value, Register output) {
+ MOZ_CRASH("atomicExchange8SignExtend");
+ }
+
+ template <typename T, typename S>
+ void atomicFetchAdd8SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, true, AtomicFetchAddOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchAdd8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, false, AtomicFetchAddOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchAdd16SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, true, AtomicFetchAddOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchAdd16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, false, AtomicFetchAddOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchAdd32(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(4, false, AtomicFetchAddOp, value, mem, temp, output);
+ }
+
+ template <typename T, typename S>
+ void atomicAdd8(const S& value, const T& mem) {
+ atomicEffectOp(1, AtomicFetchAddOp, value, mem);
+ }
+ template <typename T, typename S>
+ void atomicAdd16(const S& value, const T& mem) {
+ atomicEffectOp(2, AtomicFetchAddOp, value, mem);
+ }
+ template <typename T, typename S>
+ void atomicAdd32(const S& value, const T& mem) {
+ atomicEffectOp(4, AtomicFetchAddOp, value, mem);
+ }
+
+ template <typename T>
+ void atomicExchange16ZeroExtend(const T& mem, Register value, Register output) {
+ MOZ_CRASH("atomicExchange16ZeroExtend");
+ }
+ template <typename T>
+ void atomicExchange16SignExtend(const T& mem, Register value, Register output) {
+ MOZ_CRASH("atomicExchange16SignExtend");
+ }
+
+ template <typename T, typename S>
+ void atomicFetchSub8SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, true, AtomicFetchSubOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchSub8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, false, AtomicFetchSubOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchSub16SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, true, AtomicFetchSubOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchSub16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, false, AtomicFetchSubOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchSub32(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(4, false, AtomicFetchSubOp, value, mem, temp, output);
+ }
+
+ template <typename T, typename S>
+ void atomicSub8(const S& value, const T& mem) {
+ atomicEffectOp(1, AtomicFetchSubOp, value, mem);
+ }
+ template <typename T, typename S>
+ void atomicSub16(const S& value, const T& mem) {
+ atomicEffectOp(2, AtomicFetchSubOp, value, mem);
+ }
+ template <typename T, typename S>
+ void atomicSub32(const S& value, const T& mem) {
+ atomicEffectOp(4, AtomicFetchSubOp, value, mem);
+ }
+
+ template <typename T, typename S>
+ void atomicFetchAnd8SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, true, AtomicFetchAndOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchAnd8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, false, AtomicFetchAndOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchAnd16SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, true, AtomicFetchAndOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchAnd16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, false, AtomicFetchAndOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchAnd32(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(4, false, AtomicFetchAndOp, value, mem, temp, output);
+ }
+
+ template <typename T, typename S>
+ void atomicAnd8(const S& value, const T& mem) {
+ atomicEffectOp(1, AtomicFetchAndOp, value, mem);
+ }
+ template <typename T, typename S>
+ void atomicAnd16(const S& value, const T& mem) {
+ atomicEffectOp(2, AtomicFetchAndOp, value, mem);
+ }
+ template <typename T, typename S>
+ void atomicAnd32(const S& value, const T& mem) {
+ atomicEffectOp(4, AtomicFetchAndOp, value, mem);
+ }
+
+ template <typename T, typename S>
+ void atomicFetchOr8SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, true, AtomicFetchOrOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchOr8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, false, AtomicFetchOrOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchOr16SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, true, AtomicFetchOrOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchOr16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, false, AtomicFetchOrOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchOr32(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(4, false, AtomicFetchOrOp, value, mem, temp, output);
+ }
+
+ template <typename T, typename S>
+ void atomicOr8(const S& value, const T& mem) {
+ atomicEffectOp(1, AtomicFetchOrOp, value, mem);
+ }
+ template <typename T, typename S>
+ void atomicOr16(const S& value, const T& mem) {
+ atomicEffectOp(2, AtomicFetchOrOp, value, mem);
+ }
+ template <typename T, typename S>
+ void atomicOr32(const S& value, const T& mem) {
+ atomicEffectOp(4, AtomicFetchOrOp, value, mem);
+ }
+
+ template <typename T, typename S>
+ void atomicFetchXor8SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, true, AtomicFetchXorOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchXor8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(1, false, AtomicFetchXorOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchXor16SignExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, true, AtomicFetchXorOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchXor16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(2, false, AtomicFetchXorOp, value, mem, temp, output);
+ }
+ template <typename T, typename S>
+ void atomicFetchXor32(const S& value, const T& mem, Register temp, Register output) {
+ atomicFetchOp(4, false, AtomicFetchXorOp, value, mem, temp, output);
+ }
+
+ template <typename T, typename S>
+ void atomicXor8(const S& value, const T& mem) {
+ atomicEffectOp(1, AtomicFetchXorOp, value, mem);
+ }
+ template <typename T, typename S>
+ void atomicXor16(const S& value, const T& mem) {
+ atomicEffectOp(2, AtomicFetchXorOp, value, mem);
+ }
+ template <typename T, typename S>
+ void atomicXor32(const S& value, const T& mem) {
+ atomicEffectOp(4, AtomicFetchXorOp, value, mem);
+ }
+
+ template<typename T>
+ void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register oldval, Register newval,
+ Register temp, AnyRegister output);
+
+ template<typename T>
+ void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value,
+ Register temp, AnyRegister output);
+
+ // Emit a BLR or NOP instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled) {
+ // The returned offset must be to the first instruction generated,
+ // for the debugger to match offset with Baseline's pcMappingEntries_.
+ BufferOffset offset = nextOffset();
+
+ syncStackPtr();
+
+ BufferOffset loadOffset;
+ {
+ vixl::UseScratchRegisterScope temps(this);
+
+ // The register used for the load is hardcoded, so that ToggleCall
+ // can patch in the branch instruction easily. This could be changed,
+ // but then ToggleCall must read the target register from the load.
+ MOZ_ASSERT(temps.IsAvailable(ScratchReg2_64));
+ temps.Exclude(ScratchReg2_64);
+
+ loadOffset = immPool64(ScratchReg2_64, uint64_t(target->raw()));
+
+ if (enabled)
+ blr(ScratchReg2_64);
+ else
+ nop();
+ }
+
+ addPendingJump(loadOffset, ImmPtr(target->raw()), Relocation::JITCODE);
+ CodeOffset ret(offset.getOffset());
+ return ret;
+ }
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ static const uint32_t syncStackInstruction = 0x9100039f; // mov sp, r28
+
+ // start it off as an 8 byte sequence
+ int ret = 8;
+ Instruction* cur = (Instruction*)code;
+ uint32_t* curw = (uint32_t*)code;
+
+ if (*curw == syncStackInstruction) {
+ ret += 4;
+ cur += 4;
+ }
+
+ if (cur->IsUncondB())
+ ret += cur->ImmPCRawOffset() << vixl::kInstructionSizeLog2;
+
+ return ret;
+ }
+
+ void checkARMRegAlignment(const ARMRegister& reg) {
+#ifdef DEBUG
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != reg.asUnsized());
+ Label aligned;
+ Mov(scratch64, reg);
+ Tst(scratch64, Operand(StackAlignment - 1));
+ B(Zero, &aligned);
+ breakpoint();
+ bind(&aligned);
+ Mov(scratch64, vixl::xzr); // Clear the scratch register for sanity.
+#endif
+ }
+
+ void checkStackAlignment() {
+#ifdef DEBUG
+ checkARMRegAlignment(GetStackPointer64());
+
+ // If another register is being used to track pushes, check sp explicitly.
+ if (!GetStackPointer64().Is(vixl::sp))
+ checkARMRegAlignment(vixl::sp);
+#endif
+ }
+
+ void abiret() {
+ syncStackPtr(); // SP is always used to transmit the stack between calls.
+ vixl::MacroAssembler::Ret(vixl::lr);
+ }
+
+ bool convertUInt64ToDoubleNeedsTemp() {
+ return false;
+ }
+
+ void convertUInt64ToDouble(Register64 src, FloatRegister dest, Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ Ucvtf(ARMFPRegister(dest, 64), ARMRegister(src.reg, 64));
+ }
+
+ void clampCheck(Register r, Label* handleNotAnInt) {
+ MOZ_CRASH("clampCheck");
+ }
+
+ void stackCheck(ImmWord limitAddr, Label* label) {
+ MOZ_CRASH("stackCheck");
+ }
+
+ void incrementInt32Value(const Address& addr) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != addr.base);
+
+ load32(addr, scratch32.asUnsized());
+ Add(scratch32, scratch32, Operand(1));
+ store32(scratch32.asUnsized(), addr);
+ }
+
+ void BoundsCheck(Register ptrReg, Label* onFail, vixl::CPURegister zeroMe = vixl::NoReg) {
+ // use tst rather than Tst to *ensure* that a single instrution is generated.
+ Cmp(ARMRegister(ptrReg, 32), ARMRegister(HeapLenReg, 32));
+ if (!zeroMe.IsNone()) {
+ if (zeroMe.IsRegister()) {
+ Csel(ARMRegister(zeroMe),
+ ARMRegister(zeroMe),
+ Operand(zeroMe.Is32Bits() ? vixl::wzr : vixl::xzr),
+ Assembler::Below);
+ } else if (zeroMe.Is32Bits()) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMFPRegister scratchFloat = temps.AcquireS();
+ Fmov(scratchFloat, JS::GenericNaN());
+ Fcsel(ARMFPRegister(zeroMe), ARMFPRegister(zeroMe), scratchFloat, Assembler::Below);
+ } else {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMFPRegister scratchDouble = temps.AcquireD();
+ Fmov(scratchDouble, JS::GenericNaN());
+ Fcsel(ARMFPRegister(zeroMe), ARMFPRegister(zeroMe), scratchDouble, Assembler::Below);
+ }
+ }
+ B(onFail, Assembler::AboveOrEqual);
+ }
+ void breakpoint();
+
+ // Emits a simulator directive to save the current sp on an internal stack.
+ void simulatorMarkSP() {
+#ifdef JS_SIMULATOR_ARM64
+ svc(vixl::kMarkStackPointer);
+#endif
+ }
+
+ // Emits a simulator directive to pop from its internal stack
+ // and assert that the value is equal to the current sp.
+ void simulatorCheckSP() {
+#ifdef JS_SIMULATOR_ARM64
+ svc(vixl::kCheckStackPointer);
+#endif
+ }
+
+ void loadWasmGlobalPtr(uint32_t globalDataOffset, Register dest) {
+ loadPtr(Address(GlobalReg, globalDataOffset - WasmGlobalRegBias), dest);
+ }
+ void loadWasmPinnedRegsFromTls() {
+ loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, memoryBase)), HeapReg);
+ loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, globalData)), GlobalReg);
+ adds32(Imm32(WasmGlobalRegBias), GlobalReg);
+ }
+
+ // Overwrites the payload bits of a dest register containing a Value.
+ void movePayload(Register src, Register dest) {
+ // Bfxil cannot be used with the zero register as a source.
+ if (src == rzr)
+ And(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(~int64_t(JSVAL_PAYLOAD_MASK)));
+ else
+ Bfxil(ARMRegister(dest, 64), ARMRegister(src, 64), 0, JSVAL_TAG_SHIFT);
+ }
+
+ // FIXME: Should be in Assembler?
+ // FIXME: Should be const?
+ uint32_t currentOffset() const {
+ return nextOffset().getOffset();
+ }
+
+ struct AutoPrepareForPatching {
+ explicit AutoPrepareForPatching(MacroAssemblerCompat&) {}
+ };
+
+ protected:
+ bool buildOOLFakeExitFrame(void* fakeReturnAddr) {
+ uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS,
+ ExitFrameLayout::Size());
+ Push(Imm32(descriptor));
+ Push(ImmPtr(fakeReturnAddr));
+ return true;
+ }
+};
+
+typedef MacroAssemblerCompat MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_arm64_MacroAssembler_arm64_h
diff --git a/js/src/jit/arm64/MoveEmitter-arm64.cpp b/js/src/jit/arm64/MoveEmitter-arm64.cpp
new file mode 100644
index 000000000..f8e237683
--- /dev/null
+++ b/js/src/jit/arm64/MoveEmitter-arm64.cpp
@@ -0,0 +1,300 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm64/MoveEmitter-arm64.h"
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+MemOperand
+MoveEmitterARM64::toMemOperand(const MoveOperand& operand) const
+{
+ MOZ_ASSERT(operand.isMemory());
+ ARMRegister base(operand.base(), 64);
+ if (operand.base() == masm.getStackPointer())
+ return MemOperand(base, operand.disp() + (masm.framePushed() - pushedAtStart_));
+ return MemOperand(base, operand.disp());
+}
+
+void
+MoveEmitterARM64::emit(const MoveResolver& moves)
+{
+ if (moves.numCycles()) {
+ masm.reserveStack(sizeof(void*));
+ pushedAtCycle_ = masm.framePushed();
+ }
+
+ for (size_t i = 0; i < moves.numMoves(); i++)
+ emitMove(moves.getMove(i));
+}
+
+void
+MoveEmitterARM64::finish()
+{
+ assertDone();
+ masm.freeStack(masm.framePushed() - pushedAtStart_);
+ MOZ_ASSERT(masm.framePushed() == pushedAtStart_);
+}
+
+void
+MoveEmitterARM64::emitMove(const MoveOp& move)
+{
+ const MoveOperand& from = move.from();
+ const MoveOperand& to = move.to();
+
+ if (move.isCycleBegin()) {
+ MOZ_ASSERT(!inCycle_ && !move.isCycleEnd());
+ breakCycle(from, to, move.endCycleType());
+ inCycle_ = true;
+ } else if (move.isCycleEnd()) {
+ MOZ_ASSERT(inCycle_);
+ completeCycle(from, to, move.type());
+ inCycle_ = false;
+ return;
+ }
+
+ switch (move.type()) {
+ case MoveOp::FLOAT32:
+ emitFloat32Move(from, to);
+ break;
+ case MoveOp::DOUBLE:
+ emitDoubleMove(from, to);
+ break;
+ case MoveOp::INT32:
+ emitInt32Move(from, to);
+ break;
+ case MoveOp::GENERAL:
+ emitGeneralMove(from, to);
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void
+MoveEmitterARM64::emitFloat32Move(const MoveOperand& from, const MoveOperand& to)
+{
+ if (from.isFloatReg()) {
+ if (to.isFloatReg())
+ masm.Fmov(toFPReg(to, MoveOp::FLOAT32), toFPReg(from, MoveOp::FLOAT32));
+ else
+ masm.Str(toFPReg(from, MoveOp::FLOAT32), toMemOperand(to));
+ return;
+ }
+
+ if (to.isFloatReg()) {
+ masm.Ldr(toFPReg(to, MoveOp::FLOAT32), toMemOperand(from));
+ return;
+ }
+
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMFPRegister scratch32 = temps.AcquireS();
+ masm.Ldr(scratch32, toMemOperand(from));
+ masm.Str(scratch32, toMemOperand(to));
+}
+
+void
+MoveEmitterARM64::emitDoubleMove(const MoveOperand& from, const MoveOperand& to)
+{
+ if (from.isFloatReg()) {
+ if (to.isFloatReg())
+ masm.Fmov(toFPReg(to, MoveOp::DOUBLE), toFPReg(from, MoveOp::DOUBLE));
+ else
+ masm.Str(toFPReg(from, MoveOp::DOUBLE), toMemOperand(to));
+ return;
+ }
+
+ if (to.isFloatReg()) {
+ masm.Ldr(toFPReg(to, MoveOp::DOUBLE), toMemOperand(from));
+ return;
+ }
+
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMFPRegister scratch = temps.AcquireD();
+ masm.Ldr(scratch, toMemOperand(from));
+ masm.Str(scratch, toMemOperand(to));
+}
+
+void
+MoveEmitterARM64::emitInt32Move(const MoveOperand& from, const MoveOperand& to)
+{
+ if (from.isGeneralReg()) {
+ if (to.isGeneralReg())
+ masm.Mov(toARMReg32(to), toARMReg32(from));
+ else
+ masm.Str(toARMReg32(from), toMemOperand(to));
+ return;
+ }
+
+ if (to.isGeneralReg()) {
+ masm.Ldr(toARMReg32(to), toMemOperand(from));
+ return;
+ }
+
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMRegister scratch32 = temps.AcquireW();
+ masm.Ldr(scratch32, toMemOperand(from));
+ masm.Str(scratch32, toMemOperand(to));
+}
+
+void
+MoveEmitterARM64::emitGeneralMove(const MoveOperand& from, const MoveOperand& to)
+{
+ if (from.isGeneralReg()) {
+ MOZ_ASSERT(to.isGeneralReg() || to.isMemory());
+ if (to.isGeneralReg())
+ masm.Mov(toARMReg64(to), toARMReg64(from));
+ else
+ masm.Str(toARMReg64(from), toMemOperand(to));
+ return;
+ }
+
+ // {Memory OR EffectiveAddress} -> Register move.
+ if (to.isGeneralReg()) {
+ MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
+ if (from.isMemory())
+ masm.Ldr(toARMReg64(to), toMemOperand(from));
+ else
+ masm.Add(toARMReg64(to), toARMReg64(from), Operand(from.disp()));
+ return;
+ }
+
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMRegister scratch64 = temps.AcquireX();
+
+ // Memory -> Memory move.
+ if (from.isMemory()) {
+ MOZ_ASSERT(to.isMemory());
+ masm.Ldr(scratch64, toMemOperand(from));
+ masm.Str(scratch64, toMemOperand(to));
+ return;
+ }
+
+ // EffectiveAddress -> Memory move.
+ MOZ_ASSERT(from.isEffectiveAddress());
+ MOZ_ASSERT(to.isMemory());
+ masm.Add(scratch64, toARMReg64(from), Operand(from.disp()));
+ masm.Str(scratch64, toMemOperand(to));
+}
+
+MemOperand
+MoveEmitterARM64::cycleSlot()
+{
+ // Using SP as stack pointer requires alignment preservation below.
+ MOZ_ASSERT(!masm.GetStackPointer64().Is(sp));
+
+ // emit() already allocated a slot for resolving the cycle.
+ MOZ_ASSERT(pushedAtCycle_ != -1);
+
+ return MemOperand(masm.GetStackPointer64(), masm.framePushed() - pushedAtCycle_);
+}
+
+void
+MoveEmitterARM64::breakCycle(const MoveOperand& from, const MoveOperand& to, MoveOp::Type type)
+{
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMFPRegister scratch32 = temps.AcquireS();
+ masm.Ldr(scratch32, toMemOperand(to));
+ masm.Str(scratch32, cycleSlot());
+ } else {
+ masm.Str(toFPReg(to, type), cycleSlot());
+ }
+ break;
+
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMFPRegister scratch64 = temps.AcquireD();
+ masm.Ldr(scratch64, toMemOperand(to));
+ masm.Str(scratch64, cycleSlot());
+ } else {
+ masm.Str(toFPReg(to, type), cycleSlot());
+ }
+ break;
+
+ case MoveOp::INT32:
+ if (to.isMemory()) {
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMRegister scratch32 = temps.AcquireW();
+ masm.Ldr(scratch32, toMemOperand(to));
+ masm.Str(scratch32, cycleSlot());
+ } else {
+ masm.Str(toARMReg32(to), cycleSlot());
+ }
+ break;
+
+ case MoveOp::GENERAL:
+ if (to.isMemory()) {
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMRegister scratch64 = temps.AcquireX();
+ masm.Ldr(scratch64, toMemOperand(to));
+ masm.Str(scratch64, cycleSlot());
+ } else {
+ masm.Str(toARMReg64(to), cycleSlot());
+ }
+ break;
+
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void
+MoveEmitterARM64::completeCycle(const MoveOperand& from, const MoveOperand& to, MoveOp::Type type)
+{
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMFPRegister scratch32 = temps.AcquireS();
+ masm.Ldr(scratch32, cycleSlot());
+ masm.Str(scratch32, toMemOperand(to));
+ } else {
+ masm.Ldr(toFPReg(to, type), cycleSlot());
+ }
+ break;
+
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMFPRegister scratch = temps.AcquireD();
+ masm.Ldr(scratch, cycleSlot());
+ masm.Str(scratch, toMemOperand(to));
+ } else {
+ masm.Ldr(toFPReg(to, type), cycleSlot());
+ }
+ break;
+
+ case MoveOp::INT32:
+ if (to.isMemory()) {
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMRegister scratch32 = temps.AcquireW();
+ masm.Ldr(scratch32, cycleSlot());
+ masm.Str(scratch32, toMemOperand(to));
+ } else {
+ masm.Ldr(toARMReg64(to), cycleSlot());
+ }
+ break;
+
+ case MoveOp::GENERAL:
+ if (to.isMemory()) {
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMRegister scratch64 = temps.AcquireX();
+ masm.Ldr(scratch64, cycleSlot());
+ masm.Str(scratch64, toMemOperand(to));
+ } else {
+ masm.Ldr(toARMReg64(to), cycleSlot());
+ }
+ break;
+
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
diff --git a/js/src/jit/arm64/MoveEmitter-arm64.h b/js/src/jit/arm64/MoveEmitter-arm64.h
new file mode 100644
index 000000000..09f96315f
--- /dev/null
+++ b/js/src/jit/arm64/MoveEmitter-arm64.h
@@ -0,0 +1,86 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_MoveEmitter_arm64_h
+#define jit_arm64_MoveEmitter_arm64_h
+
+#include "jit/arm64/Assembler-arm64.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MoveResolver.h"
+
+namespace js {
+namespace jit {
+
+class CodeGenerator;
+
+class MoveEmitterARM64
+{
+ bool inCycle_;
+ MacroAssembler& masm;
+
+ // Original stack push value.
+ uint32_t pushedAtStart_;
+
+ // These store stack offsets to spill locations, snapshotting
+ // codegen->framePushed_ at the time they were allocated. They are -1 if no
+ // stack space has been allocated for that particular spill.
+ int32_t pushedAtCycle_;
+ int32_t pushedAtSpill_;
+
+ void assertDone() {
+ MOZ_ASSERT(!inCycle_);
+ }
+
+ MemOperand cycleSlot();
+ MemOperand toMemOperand(const MoveOperand& operand) const;
+ ARMRegister toARMReg32(const MoveOperand& operand) const {
+ MOZ_ASSERT(operand.isGeneralReg());
+ return ARMRegister(operand.reg(), 32);
+ }
+ ARMRegister toARMReg64(const MoveOperand& operand) const {
+ if (operand.isGeneralReg())
+ return ARMRegister(operand.reg(), 64);
+ else
+ return ARMRegister(operand.base(), 64);
+ }
+ ARMFPRegister toFPReg(const MoveOperand& operand, MoveOp::Type t) const {
+ MOZ_ASSERT(operand.isFloatReg());
+ return ARMFPRegister(operand.floatReg().encoding(), t == MoveOp::FLOAT32 ? 32 : 64);
+ }
+
+ void emitFloat32Move(const MoveOperand& from, const MoveOperand& to);
+ void emitDoubleMove(const MoveOperand& from, const MoveOperand& to);
+ void emitInt32Move(const MoveOperand& from, const MoveOperand& to);
+ void emitGeneralMove(const MoveOperand& from, const MoveOperand& to);
+
+ void emitMove(const MoveOp& move);
+ void breakCycle(const MoveOperand& from, const MoveOperand& to, MoveOp::Type type);
+ void completeCycle(const MoveOperand& from, const MoveOperand& to, MoveOp::Type type);
+
+ public:
+ MoveEmitterARM64(MacroAssembler& masm)
+ : inCycle_(false),
+ masm(masm),
+ pushedAtStart_(masm.framePushed()),
+ pushedAtCycle_(-1),
+ pushedAtSpill_(-1)
+ { }
+
+ ~MoveEmitterARM64() {
+ assertDone();
+ }
+
+ void emit(const MoveResolver& moves);
+ void finish();
+ void setScratchRegister(Register reg) {}
+};
+
+typedef MoveEmitterARM64 MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm64_MoveEmitter_arm64_h */
diff --git a/js/src/jit/arm64/SharedIC-arm64.cpp b/js/src/jit/arm64/SharedIC-arm64.cpp
new file mode 100644
index 000000000..0c7aa1364
--- /dev/null
+++ b/js/src/jit/arm64/SharedIC-arm64.cpp
@@ -0,0 +1,219 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/SharedIC.h"
+#include "jit/SharedICHelpers.h"
+
+#ifdef JS_SIMULATOR_ARM64
+#include "jit/arm64/Assembler-arm64.h"
+#include "jit/arm64/BaselineCompiler-arm64.h"
+#include "jit/arm64/vixl/Debugger-vixl.h"
+#endif
+
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICBinaryArith_Int32
+
+bool
+ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // Add R0 and R1. Don't need to explicitly unbox, just use R2.
+ Register Rscratch = R2_;
+ ARMRegister Wscratch = ARMRegister(Rscratch, 32);
+#ifdef MERGE
+ // DIV and MOD need an extra non-volatile ValueOperand to hold R0.
+ AllocatableGeneralRegisterSet savedRegs(availableGeneralRegs(2));
+ savedRegs.set() = GeneralRegisterSet::Intersect(GeneralRegisterSet::NonVolatile(), savedRegs);
+#endif
+ // get some more ARM-y names for the registers
+ ARMRegister W0(R0_, 32);
+ ARMRegister X0(R0_, 64);
+ ARMRegister W1(R1_, 32);
+ ARMRegister X1(R1_, 64);
+ ARMRegister WTemp(ExtractTemp0, 32);
+ ARMRegister XTemp(ExtractTemp0, 64);
+ Label maybeNegZero, revertRegister;
+ switch(op_) {
+ case JSOP_ADD:
+ masm.Adds(WTemp, W0, Operand(W1));
+
+ // Just jump to failure on overflow. R0 and R1 are preserved, so we can
+ // just jump to the next stub.
+ masm.j(Assembler::Overflow, &failure);
+
+ // Box the result and return. We know R0 already contains the
+ // integer tag, so we just need to move the payload into place.
+ masm.movePayload(ExtractTemp0, R0_);
+ break;
+
+ case JSOP_SUB:
+ masm.Subs(WTemp, W0, Operand(W1));
+ masm.j(Assembler::Overflow, &failure);
+ masm.movePayload(ExtractTemp0, R0_);
+ break;
+
+ case JSOP_MUL:
+ masm.mul32(R0.valueReg(), R1.valueReg(), Rscratch, &failure, &maybeNegZero);
+ masm.movePayload(Rscratch, R0_);
+ break;
+
+ case JSOP_DIV:
+ case JSOP_MOD: {
+
+ // Check for INT_MIN / -1, it results in a double.
+ Label check2;
+ masm.Cmp(W0, Operand(INT_MIN));
+ masm.B(&check2, Assembler::NotEqual);
+ masm.Cmp(W1, Operand(-1));
+ masm.j(Assembler::Equal, &failure);
+ masm.bind(&check2);
+ Label no_fail;
+ // Check for both division by zero and 0 / X with X < 0 (results in -0).
+ masm.Cmp(W1, Operand(0));
+ // If x > 0, then it can't be bad.
+ masm.B(&no_fail, Assembler::GreaterThan);
+ // if x == 0, then ignore any comparison, and force
+ // it to fail, if x < 0 (the only other case)
+ // then do the comparison, and fail if y == 0
+ masm.Ccmp(W0, Operand(0), vixl::ZFlag, Assembler::NotEqual);
+ masm.B(&failure, Assembler::Equal);
+ masm.bind(&no_fail);
+ masm.Sdiv(Wscratch, W0, W1);
+ // Start calculating the remainder, x - (x / y) * y.
+ masm.mul(WTemp, W1, Wscratch);
+ if (op_ == JSOP_DIV) {
+ // Result is a double if the remainder != 0, which happens
+ // when (x/y)*y != x.
+ masm.branch32(Assembler::NotEqual, R0.valueReg(), ExtractTemp0, &revertRegister);
+ masm.movePayload(Rscratch, R0_);
+ } else {
+ // Calculate the actual mod. Set the condition code, so we can see if it is non-zero.
+ masm.Subs(WTemp, W0, WTemp);
+
+ // If X % Y == 0 and X < 0, the result is -0.
+ masm.Ccmp(W0, Operand(0), vixl::NoFlag, Assembler::Equal);
+ masm.branch(Assembler::LessThan, &revertRegister);
+ masm.movePayload(ExtractTemp0, R0_);
+ }
+ break;
+ }
+ // ORR, EOR, AND can trivially be coerced int
+ // working without affecting the tag of the dest..
+ case JSOP_BITOR:
+ masm.Orr(X0, X0, Operand(X1));
+ break;
+ case JSOP_BITXOR:
+ masm.Eor(X0, X0, Operand(W1, vixl::UXTW));
+ break;
+ case JSOP_BITAND:
+ masm.And(X0, X0, Operand(X1));
+ break;
+ // LSH, RSH and URSH can not.
+ case JSOP_LSH:
+ // ARM will happily try to shift by more than 0x1f.
+ masm.Lsl(Wscratch, W0, W1);
+ masm.movePayload(Rscratch, R0.valueReg());
+ break;
+ case JSOP_RSH:
+ masm.Asr(Wscratch, W0, W1);
+ masm.movePayload(Rscratch, R0.valueReg());
+ break;
+ case JSOP_URSH:
+ masm.Lsr(Wscratch, W0, W1);
+ if (allowDouble_) {
+ Label toUint;
+ // Testing for negative is equivalent to testing bit 31
+ masm.Tbnz(Wscratch, 31, &toUint);
+ // Move result and box for return.
+ masm.movePayload(Rscratch, R0_);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&toUint);
+ masm.convertUInt32ToDouble(Rscratch, ScratchDoubleReg);
+ masm.boxDouble(ScratchDoubleReg, R0);
+ } else {
+ // Testing for negative is equivalent to testing bit 31
+ masm.Tbnz(Wscratch, 31, &failure);
+ // Move result for return.
+ masm.movePayload(Rscratch, R0_);
+ }
+ break;
+ default:
+ MOZ_CRASH("Unhandled op for BinaryArith_Int32.");
+ }
+
+ EmitReturnFromIC(masm);
+
+ switch (op_) {
+ case JSOP_MUL:
+ masm.bind(&maybeNegZero);
+
+ // Result is -0 if exactly one of lhs or rhs is negative.
+ masm.Cmn(W0, W1);
+ masm.j(Assembler::Signed, &failure);
+
+ // Result is +0, so use the zero register.
+ masm.movePayload(rzr, R0_);
+ EmitReturnFromIC(masm);
+ break;
+ case JSOP_DIV:
+ case JSOP_MOD:
+ masm.bind(&revertRegister);
+ break;
+ default:
+ break;
+ }
+
+ // Failure case - jump to next stub.
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ return true;
+}
+
+bool
+ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+
+ switch (op) {
+ case JSOP_BITNOT:
+ masm.Mvn(ARMRegister(R1.valueReg(), 32), ARMRegister(R0.valueReg(), 32));
+ masm.movePayload(R1.valueReg(), R0.valueReg());
+ break;
+ case JSOP_NEG:
+ // Guard against 0 and MIN_INT, both result in a double.
+ masm.branchTest32(Assembler::Zero, R0.valueReg(), Imm32(0x7fffffff), &failure);
+
+ // Compile -x as 0 - x.
+ masm.Sub(ARMRegister(R1.valueReg(), 32), wzr, ARMRegister(R0.valueReg(), 32));
+ masm.movePayload(R1.valueReg(), R0.valueReg());
+ break;
+ default:
+ MOZ_CRASH("Unexpected op");
+ }
+
+ EmitReturnFromIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/arm64/SharedICHelpers-arm64.h b/js/src/jit/arm64/SharedICHelpers-arm64.h
new file mode 100644
index 000000000..b97129e65
--- /dev/null
+++ b/js/src/jit/arm64/SharedICHelpers-arm64.h
@@ -0,0 +1,337 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_SharedICHelpers_arm64_h
+#define jit_arm64_SharedICHelpers_arm64_h
+
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineIC.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+
+namespace js {
+namespace jit {
+
+// Distance from sp to the top Value inside an IC stub (no return address on the stack on ARM).
+static const size_t ICStackValueOffset = 0;
+
+inline void
+EmitRestoreTailCallReg(MacroAssembler& masm)
+{
+ // No-op on ARM because link register is always holding the return address.
+}
+
+inline void
+EmitRepushTailCallReg(MacroAssembler& masm)
+{
+ // No-op on ARM because link register is always holding the return address.
+}
+
+inline void
+EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm)
+{
+ // Move ICEntry offset into ICStubReg
+ CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
+ *patchOffset = offset;
+
+ // Load stub pointer into ICStubReg
+ masm.loadPtr(Address(ICStubReg, ICEntry::offsetOfFirstStub()), ICStubReg);
+
+ // Load stubcode pointer from BaselineStubEntry.
+ // R2 won't be active when we call ICs, so we can use r0.
+ MOZ_ASSERT(R2 == ValueOperand(r0));
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0);
+
+ // Call the stubcode via a direct branch-and-link.
+ masm.Blr(x0);
+}
+
+inline void
+EmitEnterTypeMonitorIC(MacroAssembler& masm,
+ size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub())
+{
+ // This is expected to be called from within an IC, when ICStubReg is
+ // properly initialized to point to the stub.
+ masm.loadPtr(Address(ICStubReg, (uint32_t) monitorStubOffset), ICStubReg);
+
+ // Load stubcode pointer from BaselineStubEntry.
+ // R2 won't be active when we call ICs, so we can use r0.
+ MOZ_ASSERT(R2 == ValueOperand(r0));
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0);
+
+ // Jump to the stubcode.
+ masm.Br(x0);
+}
+
+inline void
+EmitReturnFromIC(MacroAssembler& masm)
+{
+ masm.abiret(); // Defaults to lr.
+}
+
+inline void
+EmitChangeICReturnAddress(MacroAssembler& masm, Register reg)
+{
+ masm.movePtr(reg, lr);
+}
+
+inline void
+EmitBaselineTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t argSize)
+{
+ // We assume that R0 has been pushed, and R2 is unused.
+ MOZ_ASSERT(R2 == ValueOperand(r0));
+
+ // Compute frame size into w0. Used below in makeFrameDescriptor().
+ masm.Sub(x0, BaselineFrameReg64, masm.GetStackPointer64());
+ masm.Add(w0, w0, Operand(BaselineFrame::FramePointerOffset));
+
+ // Store frame size without VMFunction arguments for GC marking.
+ {
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMRegister scratch32 = temps.AcquireW();
+
+ masm.Sub(scratch32, w0, Operand(argSize));
+ masm.store32(scratch32.asUnsized(),
+ Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+ }
+
+ // Push frame descriptor (minus the return address) and perform the tail call.
+ MOZ_ASSERT(ICTailCallReg == lr);
+ masm.makeFrameDescriptor(r0, JitFrame_BaselineJS, ExitFrameLayout::Size());
+ masm.push(r0);
+
+ // The return address will be pushed by the VM wrapper, for compatibility
+ // with direct calls. Refer to the top of generateVMWrapper().
+ // ICTailCallReg (lr) already contains the return address (as we keep
+ // it there through the stub calls).
+
+ masm.branch(target);
+}
+
+inline void
+EmitIonTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t stackSize)
+{
+ MOZ_CRASH("Not implemented yet.");
+}
+
+inline void
+EmitBaselineCreateStubFrameDescriptor(MacroAssembler& masm, Register reg, uint32_t headerSize)
+{
+ ARMRegister reg64(reg, 64);
+
+ // Compute stub frame size.
+ masm.Sub(reg64, masm.GetStackPointer64(), Operand(sizeof(void*) * 2));
+ masm.Sub(reg64, BaselineFrameReg64, reg64);
+
+ masm.makeFrameDescriptor(reg, JitFrame_BaselineStub, headerSize);
+}
+
+inline void
+EmitBaselineCallVM(JitCode* target, MacroAssembler& masm)
+{
+ EmitBaselineCreateStubFrameDescriptor(masm, r0, ExitFrameLayout::Size());
+ masm.push(r0);
+ masm.call(target);
+}
+
+inline void
+EmitIonCallVM(JitCode* target, size_t stackSlots, MacroAssembler& masm)
+{
+ MOZ_CRASH("Not implemented yet.");
+}
+
+// Size of values pushed by EmitEnterStubFrame.
+static const uint32_t STUB_FRAME_SIZE = 4 * sizeof(void*);
+static const uint32_t STUB_FRAME_SAVED_STUB_OFFSET = sizeof(void*);
+
+inline void
+EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch)
+{
+ MOZ_ASSERT(scratch != ICTailCallReg);
+
+ // Compute frame size.
+ masm.Add(ARMRegister(scratch, 64), BaselineFrameReg64, Operand(BaselineFrame::FramePointerOffset));
+ masm.Sub(ARMRegister(scratch, 64), ARMRegister(scratch, 64), masm.GetStackPointer64());
+
+ masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+
+ // Note: when making changes here, don't forget to update STUB_FRAME_SIZE.
+
+ // Push frame descriptor and return address.
+ // Save old frame pointer, stack pointer, and stub reg.
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, BaselineStubFrameLayout::Size());
+ masm.Push(scratch, ICTailCallReg, ICStubReg, BaselineFrameReg);
+
+ // Update the frame register.
+ masm.Mov(BaselineFrameReg64, masm.GetStackPointer64());
+
+ // Stack should remain 16-byte aligned.
+ masm.checkStackAlignment();
+}
+
+inline void
+EmitIonEnterStubFrame(MacroAssembler& masm, Register scratch)
+{
+ MOZ_CRASH("Not implemented yet.");
+}
+
+inline void
+EmitBaselineLeaveStubFrame(MacroAssembler& masm, bool calledIntoIon = false)
+{
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMRegister scratch64 = temps.AcquireX();
+
+ // Ion frames do not save and restore the frame pointer. If we called
+ // into Ion, we have to restore the stack pointer from the frame descriptor.
+ // If we performed a VM call, the descriptor has been popped already so
+ // in that case we use the frame pointer.
+ if (calledIntoIon) {
+ masm.pop(scratch64.asUnsized());
+ masm.Lsr(scratch64, scratch64, FRAMESIZE_SHIFT);
+ masm.Add(masm.GetStackPointer64(), masm.GetStackPointer64(), scratch64);
+ } else {
+ masm.Mov(masm.GetStackPointer64(), BaselineFrameReg64);
+ }
+
+ // Pop values, discarding the frame descriptor.
+ masm.pop(BaselineFrameReg, ICStubReg, ICTailCallReg, scratch64.asUnsized());
+
+ // Stack should remain 16-byte aligned.
+ masm.checkStackAlignment();
+}
+
+inline void
+EmitIonLeaveStubFrame(MacroAssembler& masm)
+{
+ MOZ_CRASH("Not implemented yet.");
+}
+
+inline void
+EmitStowICValues(MacroAssembler& masm, int values)
+{
+ switch (values) {
+ case 1:
+ // Stow R0.
+ masm.Push(R0);
+ break;
+ case 2:
+ // Stow R0 and R1.
+ masm.Push(R0.valueReg());
+ masm.Push(R1.valueReg());
+ break;
+ default:
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Expected 1 or 2 values");
+ }
+}
+
+inline void
+EmitUnstowICValues(MacroAssembler& masm, int values, bool discard = false)
+{
+ MOZ_ASSERT(values >= 0 && values <= 2);
+ switch (values) {
+ case 1:
+ // Unstow R0.
+ if (discard)
+ masm.Drop(Operand(sizeof(Value)));
+ else
+ masm.popValue(R0);
+ break;
+ case 2:
+ // Unstow R0 and R1.
+ if (discard)
+ masm.Drop(Operand(sizeof(Value) * 2));
+ else
+ masm.pop(R1.valueReg(), R0.valueReg());
+ break;
+ default:
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Expected 1 or 2 values");
+ }
+ masm.adjustFrame(-values * sizeof(Value));
+}
+
+inline void
+EmitCallTypeUpdateIC(MacroAssembler& masm, JitCode* code, uint32_t objectOffset)
+{
+ // R0 contains the value that needs to be typechecked.
+ // The object we're updating is a boxed Value on the stack, at offset
+ // objectOffset from stack top, excluding the return address.
+ MOZ_ASSERT(R2 == ValueOperand(r0));
+
+ // Save the current ICStubReg to stack, as well as the TailCallReg,
+ // since on AArch64, the LR is live.
+ masm.push(ICStubReg, ICTailCallReg);
+
+ // This is expected to be called from within an IC, when ICStubReg
+ // is properly initialized to point to the stub.
+ masm.loadPtr(Address(ICStubReg, (int32_t)ICUpdatedStub::offsetOfFirstUpdateStub()),
+ ICStubReg);
+
+ // Load stubcode pointer from ICStubReg into ICTailCallReg.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), ICTailCallReg);
+
+ // Call the stubcode.
+ masm.Blr(ARMRegister(ICTailCallReg, 64));
+
+ // Restore the old stub reg and tailcall reg.
+ masm.pop(ICTailCallReg, ICStubReg);
+
+ // The update IC will store 0 or 1 in R1.scratchReg() reflecting if the
+ // value in R0 type-checked properly or not.
+ Label success;
+ masm.cmp32(R1.scratchReg(), Imm32(1));
+ masm.j(Assembler::Equal, &success);
+
+ // If the IC failed, then call the update fallback function.
+ EmitBaselineEnterStubFrame(masm, R1.scratchReg());
+
+ masm.loadValue(Address(masm.getStackPointer(), STUB_FRAME_SIZE + objectOffset), R1);
+ masm.Push(R0.valueReg());
+ masm.Push(R1.valueReg());
+ masm.Push(ICStubReg);
+
+ // Load previous frame pointer, push BaselineFrame*.
+ masm.loadPtr(Address(BaselineFrameReg, 0), R0.scratchReg());
+ masm.pushBaselineFramePtr(R0.scratchReg(), R0.scratchReg());
+
+ EmitBaselineCallVM(code, masm);
+ EmitBaselineLeaveStubFrame(masm);
+
+ // Success at end.
+ masm.bind(&success);
+}
+
+template <typename AddrType>
+inline void
+EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type)
+{
+ // On AArch64, lr is clobbered by patchableCallPreBarrier. Save it first.
+ masm.push(lr);
+ masm.patchableCallPreBarrier(addr, type);
+ masm.pop(lr);
+}
+
+inline void
+EmitStubGuardFailure(MacroAssembler& masm)
+{
+ // NOTE: This routine assumes that the stub guard code left the stack in the
+ // same state it was in when it was entered.
+
+ // BaselineStubEntry points to the current stub.
+
+ // Load next stub into ICStubReg.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfNext()), ICStubReg);
+
+ // Load stubcode pointer from BaselineStubEntry into scratch register.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0);
+
+ // Return address is already loaded, just jump to the next stubcode.
+ masm.Br(x0);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_arm64_SharedICHelpers_arm64_h
diff --git a/js/src/jit/arm64/SharedICRegisters-arm64.h b/js/src/jit/arm64/SharedICRegisters-arm64.h
new file mode 100644
index 000000000..c78724158
--- /dev/null
+++ b/js/src/jit/arm64/SharedICRegisters-arm64.h
@@ -0,0 +1,58 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_SharedICRegisters_arm64_h
+#define jit_arm64_SharedICRegisters_arm64_h
+
+#include "jit/MacroAssembler.h"
+
+namespace js {
+namespace jit {
+
+// Must be a callee-saved register for preservation around generateEnterJIT().
+static constexpr Register BaselineFrameReg = r23;
+static constexpr ARMRegister BaselineFrameReg64 = { BaselineFrameReg, 64 };
+
+// BaselineStackReg is intentionally undefined on ARM64.
+// Refer to the comment next to the definition of RealStackPointer.
+
+// ValueOperands R0, R1, and R2.
+// R0 == JSReturnReg, and R2 uses registers not preserved across calls.
+// R1 value should be preserved across calls.
+static constexpr Register R0_ = r2;
+static constexpr Register R1_ = r19;
+static constexpr Register R2_ = r0;
+
+static constexpr ValueOperand R0(R0_);
+static constexpr ValueOperand R1(R1_);
+static constexpr ValueOperand R2(R2_);
+
+// ICTailCallReg and ICStubReg use registers that are not preserved across calls.
+static constexpr Register ICTailCallReg = r30;
+static constexpr Register ICStubReg = r9;
+
+// ExtractTemps must be callee-save registers:
+// ICSetProp_Native::Compiler::generateStubCode() stores the object
+// in ExtractTemp0, but then calls callTypeUpdateIC(), which clobbers
+// caller-save registers.
+// They should also not be the scratch registers ip0 or ip1,
+// since those get clobbered all the time.
+static constexpr Register ExtractTemp0 = r24;
+static constexpr Register ExtractTemp1 = r25;
+
+// R7 - R9 are generally available for use within stubcode.
+
+// Note that BaselineTailCallReg is actually just the link
+// register. In ARM code emission, we do not clobber BaselineTailCallReg
+// since we keep the return address for calls there.
+
+static constexpr FloatRegister FloatReg0 = { FloatRegisters::d0, FloatRegisters::Double };
+static constexpr FloatRegister FloatReg1 = { FloatRegisters::d1, FloatRegisters::Double };
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_arm64_SharedICRegisters_arm64_h
diff --git a/js/src/jit/arm64/Trampoline-arm64.cpp b/js/src/jit/arm64/Trampoline-arm64.cpp
new file mode 100644
index 000000000..547bdd927
--- /dev/null
+++ b/js/src/jit/arm64/Trampoline-arm64.cpp
@@ -0,0 +1,1229 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Bailouts.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/Linker.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/arm64/SharedICHelpers-arm64.h"
+#include "jit/VMFunctions.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// All registers to save and restore. This includes the stack pointer, since we
+// use the ability to reference register values on the stack by index.
+static const LiveRegisterSet AllRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::AllMask & ~(1 << 31 | 1 << 30 | 1 << 29| 1 << 28)),
+ FloatRegisterSet(FloatRegisters::AllMask));
+
+/* This method generates a trampoline on ARM64 for a c++ function with
+ * the following signature:
+ * bool blah(void* code, int argc, Value* argv, JSObject* scopeChain, Value* vp)
+ * ...using standard AArch64 calling convention
+ */
+JitCode*
+JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
+{
+ MacroAssembler masm(cx);
+
+ const Register reg_code = IntArgReg0; // EnterJitData::jitcode.
+ const Register reg_argc = IntArgReg1; // EnterJitData::maxArgc.
+ const Register reg_argv = IntArgReg2; // EnterJitData::maxArgv.
+ const Register reg_osrFrame = IntArgReg3; // EnterJitData::osrFrame.
+ const Register reg_callee = IntArgReg4; // EnterJitData::calleeToken.
+ const Register reg_scope = IntArgReg5; // EnterJitData::scopeChain.
+ const Register reg_osrNStack = IntArgReg6; // EnterJitData::osrNumStackValues.
+ const Register reg_vp = IntArgReg7; // Address of EnterJitData::result.
+
+ MOZ_ASSERT(OsrFrameReg == IntArgReg3);
+
+ // During the pushes below, use the normal stack pointer.
+ masm.SetStackPointer64(sp);
+
+ // Save old frame pointer and return address; set new frame pointer.
+ masm.push(r29, r30);
+ masm.moveStackPtrTo(r29);
+
+ // Save callee-save integer registers.
+ // Also save x7 (reg_vp) and x30 (lr), for use later.
+ masm.push(r19, r20, r21, r22);
+ masm.push(r23, r24, r25, r26);
+ masm.push(r27, r28, r7, r30);
+
+ // Save callee-save floating-point registers.
+ // AArch64 ABI specifies that only the lower 64 bits must be saved.
+ masm.push(d8, d9, d10, d11);
+ masm.push(d12, d13, d14, d15);
+
+#ifdef DEBUG
+ // Emit stack canaries.
+ masm.movePtr(ImmWord(0xdeadd00d), r23);
+ masm.movePtr(ImmWord(0xdeadd11d), r24);
+ masm.push(r23, r24);
+#endif
+
+ // Common code below attempts to push single registers at a time,
+ // which breaks the stack pointer's 16-byte alignment requirement.
+ // Note that movePtr() is invalid because StackPointer is treated as xzr.
+ //
+ // FIXME: After testing, this entire function should be rewritten to not
+ // use the PseudoStackPointer: since the amount of data pushed is precalculated,
+ // we can just allocate the whole frame header at once and index off sp.
+ // This will save a significant number of instructions where Push() updates sp.
+ masm.Mov(PseudoStackPointer64, sp);
+ masm.SetStackPointer64(PseudoStackPointer64);
+
+ // Save the stack pointer at this point for Baseline OSR.
+ masm.moveStackPtrTo(BaselineFrameReg);
+ // Remember stack depth without padding and arguments.
+ masm.moveStackPtrTo(r19);
+
+ // If constructing, include newTarget in argument vector.
+ {
+ Label noNewTarget;
+ Imm32 constructingToken(CalleeToken_FunctionConstructing);
+ masm.branchTest32(Assembler::Zero, reg_callee, constructingToken, &noNewTarget);
+ masm.add32(Imm32(1), reg_argc);
+ masm.bind(&noNewTarget);
+ }
+
+ // JitFrameLayout is as follows (higher is higher in memory):
+ // N*8 - [ JS argument vector ] (base 16-byte aligned)
+ // 8 - numActualArgs
+ // 8 - calleeToken (16-byte aligned)
+ // 8 - frameDescriptor
+ // 8 - returnAddress (16-byte aligned, pushed by callee)
+
+ // Push the argument vector onto the stack.
+ // WARNING: destructively modifies reg_argv
+ {
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+
+ const ARMRegister tmp_argc = temps.AcquireX();
+ const ARMRegister tmp_sp = temps.AcquireX();
+
+ Label noArguments;
+ Label loopHead;
+
+ masm.movePtr(reg_argc, tmp_argc.asUnsized());
+
+ // sp -= 8
+ // Since we're using PostIndex Str below, this is necessary to avoid overwriting
+ // the SPS mark pushed above.
+ masm.subFromStackPtr(Imm32(8));
+
+ // sp -= 8 * argc
+ masm.Sub(PseudoStackPointer64, PseudoStackPointer64, Operand(tmp_argc, vixl::SXTX, 3));
+
+ // Give sp 16-byte alignment and sync stack pointers.
+ masm.andToStackPtr(Imm32(~0xff));
+ masm.moveStackPtrTo(tmp_sp.asUnsized());
+
+ masm.branchTestPtr(Assembler::Zero, reg_argc, reg_argc, &noArguments);
+
+ // Begin argument-pushing loop.
+ // This could be optimized using Ldp and Stp.
+ {
+ masm.bind(&loopHead);
+
+ // Load an argument from argv, then increment argv by 8.
+ masm.Ldr(x24, MemOperand(ARMRegister(reg_argv, 64), Operand(8), vixl::PostIndex));
+
+ // Store the argument to tmp_sp, then increment tmp_sp by 8.
+ masm.Str(x24, MemOperand(tmp_sp, Operand(8), vixl::PostIndex));
+
+ // Set the condition codes for |cmp tmp_argc, 2| (using the old value).
+ masm.Subs(tmp_argc, tmp_argc, Operand(1));
+
+ // Branch if arguments remain.
+ masm.B(&loopHead, vixl::Condition::ge);
+ }
+
+ masm.bind(&noArguments);
+ }
+ masm.checkStackAlignment();
+
+ // Push the number of actual arguments and the calleeToken.
+ // The result address is used to store the actual number of arguments
+ // without adding an argument to EnterJIT.
+ masm.unboxInt32(Address(reg_vp, 0x0), ip0);
+ masm.push(ip0, reg_callee);
+ masm.checkStackAlignment();
+
+ // Calculate the number of bytes pushed so far.
+ masm.subStackPtrFrom(r19);
+
+ // Push the frameDescriptor.
+ masm.makeFrameDescriptor(r19, JitFrame_Entry, JitFrameLayout::Size());
+ masm.Push(r19);
+
+ Label osrReturnPoint;
+ if (type == EnterJitBaseline) {
+ // Check for OSR.
+ Label notOsr;
+ masm.branchTestPtr(Assembler::Zero, OsrFrameReg, OsrFrameReg, &notOsr);
+
+ // Push return address and previous frame pointer.
+ masm.Adr(ScratchReg2_64, &osrReturnPoint);
+ masm.push(ScratchReg2, BaselineFrameReg);
+
+ // Reserve frame.
+ masm.subFromStackPtr(Imm32(BaselineFrame::Size()));
+ masm.moveStackPtrTo(BaselineFrameReg);
+
+ // Reserve space for locals and stack values.
+ masm.Lsl(w19, ARMRegister(reg_osrNStack, 32), 3); // w19 = num_stack_values * sizeof(Value).
+ masm.subFromStackPtr(r19);
+
+ // Enter exit frame.
+ masm.addPtr(Imm32(BaselineFrame::Size() + BaselineFrame::FramePointerOffset), r19);
+ masm.makeFrameDescriptor(r19, JitFrame_BaselineJS, ExitFrameLayout::Size());
+ masm.asVIXL().Push(x19, xzr); // Push xzr for a fake return address.
+ // No GC things to mark: push a bare token.
+ masm.enterFakeExitFrame(ExitFrameLayoutBareToken);
+
+ masm.push(BaselineFrameReg, reg_code);
+
+ // Initialize the frame, including filling in the slots.
+ masm.setupUnalignedABICall(r19);
+ masm.passABIArg(BaselineFrameReg); // BaselineFrame.
+ masm.passABIArg(reg_osrFrame); // InterpreterFrame.
+ masm.passABIArg(reg_osrNStack);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, jit::InitBaselineFrameForOsr));
+
+ masm.pop(r19, BaselineFrameReg);
+ MOZ_ASSERT(r19 != ReturnReg);
+
+ masm.addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
+ masm.addPtr(Imm32(BaselineFrame::Size()), BaselineFrameReg);
+
+ Label error;
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ masm.jump(r19);
+
+ // OOM: load error value, discard return address and previous frame
+ // pointer, and return.
+ masm.bind(&error);
+ masm.Add(masm.GetStackPointer64(), BaselineFrameReg64, Operand(2 * sizeof(uintptr_t)));
+ masm.syncStackPtr();
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.B(&osrReturnPoint);
+
+ masm.bind(&notOsr);
+ masm.movePtr(reg_scope, R1_);
+ }
+
+ // Call function.
+ // Since AArch64 doesn't have the pc register available, the callee must push lr.
+ masm.callJitNoProfiler(reg_code);
+
+ // Baseline OSR will return here.
+ if (type == EnterJitBaseline)
+ masm.bind(&osrReturnPoint);
+
+ // Return back to SP.
+ masm.Pop(r19);
+ masm.Add(masm.GetStackPointer64(), masm.GetStackPointer64(),
+ Operand(x19, vixl::LSR, FRAMESIZE_SHIFT));
+ masm.syncStackPtr();
+ masm.SetStackPointer64(sp);
+
+#ifdef DEBUG
+ // Check that canaries placed on function entry are still present.
+ masm.pop(r24, r23);
+ Label x23OK, x24OK;
+
+ masm.branchPtr(Assembler::Equal, r23, ImmWord(0xdeadd00d), &x23OK);
+ masm.breakpoint();
+ masm.bind(&x23OK);
+
+ masm.branchPtr(Assembler::Equal, r24, ImmWord(0xdeadd11d), &x24OK);
+ masm.breakpoint();
+ masm.bind(&x24OK);
+#endif
+
+ // Restore callee-save floating-point registers.
+ masm.pop(d15, d14, d13, d12);
+ masm.pop(d11, d10, d9, d8);
+
+ // Restore callee-save integer registers.
+ // Also restore x7 (reg_vp) and x30 (lr).
+ masm.pop(r30, r7, r28, r27);
+ masm.pop(r26, r25, r24, r23);
+ masm.pop(r22, r21, r20, r19);
+
+ // Store return value (in JSReturnReg = x2 to just-popped reg_vp).
+ masm.storeValue(JSReturnOperand, Address(reg_vp, 0));
+
+ // Restore old frame pointer.
+ masm.pop(r30, r29);
+
+ // Return using the value popped into x30.
+ masm.abiret();
+
+ Linker linker(masm);
+ AutoFlushICache afc("EnterJIT");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "EnterJIT");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateInvalidator(JSContext* cx)
+{
+ MacroAssembler masm;
+
+ masm.push(r0, r1, r2, r3);
+
+ masm.PushRegsInMask(AllRegs);
+ masm.moveStackPtrTo(r0);
+
+ masm.Sub(x1, masm.GetStackPointer64(), Operand(sizeof(size_t)));
+ masm.Sub(x2, masm.GetStackPointer64(), Operand(sizeof(size_t) + sizeof(void*)));
+ masm.moveToStackPtr(r2);
+
+ masm.setupUnalignedABICall(r10);
+ masm.passABIArg(r0);
+ masm.passABIArg(r1);
+ masm.passABIArg(r2);
+
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, InvalidationBailout));
+
+ masm.pop(r2, r1);
+
+ masm.Add(masm.GetStackPointer64(), masm.GetStackPointer64(), x1);
+ masm.Add(masm.GetStackPointer64(), masm.GetStackPointer64(),
+ Operand(sizeof(InvalidationBailoutStack)));
+ masm.syncStackPtr();
+
+ JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.branch(bailoutTail);
+
+ Linker linker(masm);
+ AutoFlushICache afc("Invalidator");
+ return linker.newCode<NoGC>(cx, OTHER_CODE);
+}
+
+JitCode*
+JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut)
+{
+ MacroAssembler masm;
+
+ // Save the return address for later.
+ masm.push(lr);
+
+ // Load the information that the rectifier needs from the stack.
+ masm.Ldr(w0, MemOperand(masm.GetStackPointer64(), RectifierFrameLayout::offsetOfNumActualArgs()));
+ masm.Ldr(x1, MemOperand(masm.GetStackPointer64(), RectifierFrameLayout::offsetOfCalleeToken()));
+
+ // Extract a JSFunction pointer from the callee token and keep the
+ // intermediary to avoid later recalculation.
+ masm.And(x5, x1, Operand(CalleeTokenMask));
+
+ // Get the arguments from the function object.
+ masm.Ldrh(x6, MemOperand(x5, JSFunction::offsetOfNargs()));
+
+ static_assert(CalleeToken_FunctionConstructing == 0x1, "Constructing must be low-order bit");
+ masm.And(x4, x1, Operand(CalleeToken_FunctionConstructing));
+ masm.Add(x7, x6, x4);
+
+ // Calculate the position that our arguments are at before sp gets modified.
+ MOZ_ASSERT(ArgumentsRectifierReg == r8, "x8 used for argc in Arguments Rectifier");
+ masm.Add(x3, masm.GetStackPointer64(), Operand(x8, vixl::LSL, 3));
+ masm.Add(x3, x3, Operand(sizeof(RectifierFrameLayout)));
+
+ // Pad to a multiple of 16 bytes. This neglects the |this| value,
+ // which will also be pushed, because the rest of the frame will
+ // round off that value. See pushes of |argc|, |callee| and |desc| below.
+ Label noPadding;
+ masm.Tbnz(x7, 0, &noPadding);
+ masm.asVIXL().Push(xzr);
+ masm.Add(x7, x7, Operand(1));
+ masm.bind(&noPadding);
+
+ {
+ Label notConstructing;
+ masm.Cbz(x4, &notConstructing);
+
+ // new.target lives at the end of the pushed args
+ // NB: The arg vector holder starts at the beginning of the last arg,
+ // add a value to get to argv[argc]
+ masm.loadPtr(Address(r3, sizeof(Value)), r4);
+ masm.Push(r4);
+
+ masm.bind(&notConstructing);
+ }
+
+ // Calculate the number of undefineds that need to be pushed.
+ masm.Sub(w2, w6, w8);
+
+ // Put an undefined in a register so it can be pushed.
+ masm.moveValue(UndefinedValue(), r4);
+
+ // Push undefined N times.
+ {
+ Label undefLoopTop;
+ masm.bind(&undefLoopTop);
+ masm.Push(r4);
+ masm.Subs(w2, w2, Operand(1));
+ masm.B(&undefLoopTop, Assembler::NonZero);
+ }
+
+ // Arguments copy loop. Copy for x8 >= 0 to include |this|.
+ {
+ Label copyLoopTop;
+ masm.bind(&copyLoopTop);
+ masm.Ldr(x4, MemOperand(x3, -sizeof(Value), vixl::PostIndex));
+ masm.Push(r4);
+ masm.Subs(x8, x8, Operand(1));
+ masm.B(&copyLoopTop, Assembler::NotSigned);
+ }
+
+ // Fix up the size of the stack frame. +1 accounts for |this|.
+ masm.Add(x6, x7, Operand(1));
+ masm.Lsl(x6, x6, 3);
+
+ // Make that into a frame descriptor.
+ masm.makeFrameDescriptor(r6, JitFrame_Rectifier, JitFrameLayout::Size());
+
+ masm.push(r0, // Number of actual arguments.
+ r1, // Callee token.
+ r6); // Frame descriptor.
+
+ // Load the address of the code that is getting called.
+ masm.Ldr(x3, MemOperand(x5, JSFunction::offsetOfNativeOrScript()));
+ masm.loadBaselineOrIonRaw(r3, r3, nullptr);
+ uint32_t returnOffset = masm.callJitNoProfiler(r3);
+
+ // Clean up!
+ // Get the size of the stack frame, and clean up the later fixed frame.
+ masm.Ldr(x4, MemOperand(masm.GetStackPointer64(), 24, vixl::PostIndex));
+
+ // Now that the size of the stack frame sans the fixed frame has been loaded,
+ // add that onto the stack pointer.
+ masm.Add(masm.GetStackPointer64(), masm.GetStackPointer64(),
+ Operand(x4, vixl::LSR, FRAMESIZE_SHIFT));
+
+ // Pop the return address from earlier and branch.
+ masm.ret();
+
+ Linker linker(masm);
+ AutoFlushICache afc("ArgumentsRectifier");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+ if (returnAddrOut)
+ *returnAddrOut = (void*) (code->raw() + returnOffset);
+
+ return code;
+}
+
+static void
+PushBailoutFrame(MacroAssembler& masm, uint32_t frameClass, Register spArg)
+{
+ // the stack should look like:
+ // [IonFrame]
+ // bailoutFrame.registersnapshot
+ // bailoutFrame.fpsnapshot
+ // bailoutFrame.snapshotOffset
+ // bailoutFrame.frameSize
+
+ // STEP 1a: Save our register sets to the stack so Bailout() can read
+ // everything.
+ // sp % 8 == 0
+
+ // We don't have to push everything, but this is likely easier.
+ // Setting regs_.
+ masm.subFromStackPtr(Imm32(Registers::TotalPhys * sizeof(void*)));
+ for (uint32_t i = 0; i < Registers::TotalPhys; i += 2) {
+ masm.Stp(ARMRegister::XRegFromCode(i),
+ ARMRegister::XRegFromCode(i + 1),
+ MemOperand(masm.GetStackPointer64(), i * sizeof(void*)));
+ }
+
+ // Since our datastructures for stack inspection are compile-time fixed,
+ // if there are only 16 double registers, then we need to reserve
+ // space on the stack for the missing 16.
+ masm.subFromStackPtr(Imm32(FloatRegisters::TotalPhys * sizeof(double)));
+ for (uint32_t i = 0; i < FloatRegisters::TotalPhys; i += 2) {
+ masm.Stp(ARMFPRegister::DRegFromCode(i),
+ ARMFPRegister::DRegFromCode(i + 1),
+ MemOperand(masm.GetStackPointer64(), i * sizeof(void*)));
+ }
+
+ // STEP 1b: Push both the "return address" of the function call (the address
+ // of the instruction after the call that we used to get here) as
+ // well as the callee token onto the stack. The return address is
+ // currently in r14. We will proceed by loading the callee token
+ // into a sacrificial register <= r14, then pushing both onto the
+ // stack.
+
+ // Now place the frameClass onto the stack, via a register.
+ masm.Mov(x9, frameClass);
+
+ // And onto the stack. Since the stack is full, we need to put this one past
+ // the end of the current stack. Sadly, the ABI says that we need to always
+ // point to the lowest place that has been written. The OS is free to do
+ // whatever it wants below sp.
+ masm.push(r30, r9);
+ masm.moveStackPtrTo(spArg);
+}
+
+static void
+GenerateBailoutThunk(JSContext* cx, MacroAssembler& masm, uint32_t frameClass)
+{
+ PushBailoutFrame(masm, frameClass, r0);
+
+ // SP % 8 == 4
+ // STEP 1c: Call the bailout function, giving a pointer to the
+ // structure we just blitted onto the stack.
+ // Make space for the BaselineBailoutInfo* outparam.
+ const int sizeOfBailoutInfo = sizeof(void*) * 2;
+ masm.reserveStack(sizeOfBailoutInfo);
+ masm.moveStackPtrTo(r1);
+
+ masm.setupUnalignedABICall(r2);
+ masm.passABIArg(r0);
+ masm.passABIArg(r1);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, Bailout));
+
+ masm.Ldr(x2, MemOperand(masm.GetStackPointer64(), 0));
+ masm.addToStackPtr(Imm32(sizeOfBailoutInfo));
+
+ static const uint32_t BailoutDataSize = sizeof(void*) * Registers::Total +
+ sizeof(double) * FloatRegisters::TotalPhys;
+
+ if (frameClass == NO_FRAME_SIZE_CLASS_ID) {
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMRegister scratch64 = temps.AcquireX();
+
+ masm.Ldr(scratch64, MemOperand(masm.GetStackPointer64(), sizeof(uintptr_t)));
+ masm.addToStackPtr(Imm32(BailoutDataSize + 32));
+ masm.addToStackPtr(scratch64.asUnsized());
+ } else {
+ uint32_t frameSize = FrameSizeClass::FromClass(frameClass).frameSize();
+ masm.addToStackPtr(Imm32(frameSize + BailoutDataSize + sizeof(void*)));
+ }
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r9.
+ JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.branch(bailoutTail);
+}
+
+JitCode*
+JitRuntime::generateBailoutTable(JSContext* cx, uint32_t frameClass)
+{
+ // FIXME: Implement.
+ MacroAssembler masm;
+ masm.breakpoint();
+ Linker linker(masm);
+ AutoFlushICache afc("BailoutTable");
+ return linker.newCode<NoGC>(cx, OTHER_CODE);
+}
+
+JitCode*
+JitRuntime::generateBailoutHandler(JSContext* cx)
+{
+ MacroAssembler masm(cx);
+ GenerateBailoutThunk(cx, masm, NO_FRAME_SIZE_CLASS_ID);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutHandler");
+#endif
+
+ Linker linker(masm);
+ AutoFlushICache afc("BailoutHandler");
+ return linker.newCode<NoGC>(cx, OTHER_CODE);
+}
+
+JitCode*
+JitRuntime::generateVMWrapper(JSContext* cx, const VMFunction& f)
+{
+ MOZ_ASSERT(functionWrappers_);
+ MOZ_ASSERT(functionWrappers_->initialized());
+ VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
+ if (p)
+ return p->value();
+
+ MacroAssembler masm(cx);
+
+ // Avoid conflicts with argument registers while discarding the result after
+ // the function call.
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ // Wrapper register set is a superset of the Volatile register set.
+ JS_STATIC_ASSERT((Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0);
+
+ // Unlike on other platforms, it is the responsibility of the VM *callee* to
+ // push the return address, while the caller must ensure that the address
+ // is stored in lr on entry. This allows the VM wrapper to work with both direct
+ // calls and tail calls.
+ masm.push(lr);
+
+ // First argument is the JSContext.
+ Register reg_cx = IntArgReg0;
+ regs.take(reg_cx);
+
+ // Stack is:
+ // ... frame ...
+ // +12 [args]
+ // +8 descriptor
+ // +0 returnAddress (pushed by this function, caller sets as lr)
+ //
+ // We're aligned to an exit frame, so link it up.
+ masm.enterExitFrame(&f);
+ masm.loadJSContext(reg_cx);
+
+ // Save the current stack pointer as the base for copying arguments.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ // argsBase can't be an argument register. Bad things would happen if
+ // the MoveResolver didn't throw an assertion failure first.
+ argsBase = r8;
+ regs.take(argsBase);
+ masm.Add(ARMRegister(argsBase, 64), masm.GetStackPointer64(),
+ Operand(ExitFrameLayout::SizeWithFooter()));
+ }
+
+ // Reserve space for any outparameter.
+ Register outReg = InvalidReg;
+ switch (f.outParam) {
+ case Type_Value:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(Value));
+ masm.moveStackPtrTo(outReg);
+ break;
+
+ case Type_Handle:
+ outReg = regs.takeAny();
+ masm.PushEmptyRooted(f.outParamRootType);
+ masm.moveStackPtrTo(outReg);
+ break;
+
+ case Type_Int32:
+ case Type_Bool:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(int64_t));
+ masm.moveStackPtrTo(outReg);
+ break;
+
+ case Type_Double:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(double));
+ masm.moveStackPtrTo(outReg);
+ break;
+
+ case Type_Pointer:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(uintptr_t));
+ masm.moveStackPtrTo(outReg);
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ if (!generateTLEnterVM(cx, masm, f))
+ return nullptr;
+
+ masm.setupUnalignedABICall(regs.getAny());
+ masm.passABIArg(reg_cx);
+
+ size_t argDisp = 0;
+
+ // Copy arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ MoveOperand from;
+ switch (f.argProperties(explicitArg)) {
+ case VMFunction::WordByValue:
+ masm.passABIArg(MoveOperand(argsBase, argDisp),
+ (f.argPassedInFloatReg(explicitArg) ? MoveOp::DOUBLE : MoveOp::GENERAL));
+ argDisp += sizeof(void*);
+ break;
+
+ case VMFunction::WordByRef:
+ masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+
+ case VMFunction::DoubleByValue:
+ case VMFunction::DoubleByRef:
+ MOZ_CRASH("NYI: AArch64 callVM should not be used with 128bit values.");
+ }
+ }
+
+ // Copy the semi-implicit outparam, if any.
+ // It is not a C++-abi outparam, which would get passed in the
+ // outparam register, but a real parameter to the function, which
+ // was stack-allocated above.
+ if (outReg != InvalidReg)
+ masm.passABIArg(outReg);
+
+ masm.callWithABI(f.wrapped);
+
+ if (!generateTLExitVM(cx, masm, f))
+ return nullptr;
+
+ // SP is used to transfer stack across call boundaries.
+ if (!masm.GetStackPointer64().Is(vixl::sp))
+ masm.Mov(masm.GetStackPointer64(), vixl::sp);
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Object:
+ masm.branchTestPtr(Assembler::Zero, r0, r0, masm.failureLabel());
+ break;
+ case Type_Bool:
+ masm.branchIfFalseBool(r0, masm.failureLabel());
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Value:
+ masm.Ldr(ARMRegister(JSReturnReg, 64), MemOperand(masm.GetStackPointer64()));
+ masm.freeStack(sizeof(Value));
+ break;
+
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Int32:
+ masm.Ldr(ARMRegister(ReturnReg, 32), MemOperand(masm.GetStackPointer64()));
+ masm.freeStack(sizeof(int64_t));
+ break;
+
+ case Type_Bool:
+ masm.Ldrb(ARMRegister(ReturnReg, 32), MemOperand(masm.GetStackPointer64()));
+ masm.freeStack(sizeof(int64_t));
+ break;
+
+ case Type_Double:
+ MOZ_ASSERT(cx->runtime()->jitSupportsFloatingPoint);
+ masm.Ldr(ARMFPRegister(ReturnDoubleReg, 64), MemOperand(masm.GetStackPointer64()));
+ masm.freeStack(sizeof(double));
+ break;
+
+ case Type_Pointer:
+ masm.Ldr(ARMRegister(ReturnReg, 64), MemOperand(masm.GetStackPointer64()));
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ masm.leaveExitFrame();
+ masm.retn(Imm32(sizeof(ExitFrameLayout) +
+ f.explicitStackSlots() * sizeof(void*) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ Linker linker(masm);
+ AutoFlushICache afc("VMWrapper");
+ JitCode* wrapper = linker.newCode<NoGC>(cx, OTHER_CODE);
+ if (!wrapper)
+ return nullptr;
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(wrapper, "VMWrapper");
+#endif
+
+ // linker.newCode may trigger a GC and sweep functionWrappers_ so we have to
+ // use relookupOrAdd instead of add.
+ if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
+ return nullptr;
+
+ return wrapper;
+}
+
+JitCode*
+JitRuntime::generatePreBarrier(JSContext* cx, MIRType type)
+{
+ MacroAssembler masm(cx);
+
+ LiveRegisterSet regs = LiveRegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+
+ // Also preserve the return address.
+ regs.add(lr);
+
+ masm.PushRegsInMask(regs);
+
+ MOZ_ASSERT(PreBarrierReg == r1);
+ masm.movePtr(ImmPtr(cx->runtime()), r3);
+
+ masm.setupUnalignedABICall(r0);
+ masm.passABIArg(r3);
+ masm.passABIArg(PreBarrierReg);
+ masm.callWithABI(IonMarkFunction(type));
+
+ // Pop the volatile regs and restore LR.
+ masm.PopRegsInMask(regs);
+
+ masm.abiret();
+
+ Linker linker(masm);
+ AutoFlushICache afc("PreBarrier");
+ return linker.newCode<NoGC>(cx, OTHER_CODE);
+}
+
+typedef bool (*HandleDebugTrapFn)(JSContext*, BaselineFrame*, uint8_t*, bool*);
+static const VMFunction HandleDebugTrapInfo =
+ FunctionInfo<HandleDebugTrapFn>(HandleDebugTrap, "HandleDebugTrap");
+
+JitCode*
+JitRuntime::generateDebugTrapHandler(JSContext* cx)
+{
+ MacroAssembler masm(cx);
+#ifndef JS_USE_LINK_REGISTER
+ // The first value contains the return addres,
+ // which we pull into ICTailCallReg for tail calls.
+ masm.setFramePushed(sizeof(intptr_t));
+#endif
+
+ Register scratch1 = r0;
+ Register scratch2 = r1;
+
+ // Load BaselineFrame pointer into scratch1.
+ masm.Sub(ARMRegister(scratch1, 64), BaselineFrameReg64, Operand(BaselineFrame::Size()));
+
+ // Enter a stub frame and call the HandleDebugTrap VM function. Ensure the
+ // stub frame has a nullptr ICStub pointer, since this pointer is marked
+ // during GC.
+ masm.movePtr(ImmPtr(nullptr), ICStubReg);
+ EmitBaselineEnterStubFrame(masm, scratch2);
+
+ JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(HandleDebugTrapInfo);
+ if (!code)
+ return nullptr;
+
+ masm.asVIXL().Push(vixl::lr, ARMRegister(scratch1, 64));
+ EmitBaselineCallVM(code, masm);
+
+ EmitBaselineLeaveStubFrame(masm);
+
+ // If the stub returns |true|, we have to perform a forced return (return
+ // from the JS frame). If the stub returns |false|, just return from the
+ // trap stub so that execution continues at the current pc.
+ Label forcedReturn;
+ masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg, &forcedReturn);
+ masm.abiret();
+
+ masm.bind(&forcedReturn);
+ masm.loadValue(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ masm.Mov(masm.GetStackPointer64(), BaselineFrameReg64);
+
+ masm.pop(BaselineFrameReg, lr);
+ masm.syncStackPtr();
+ masm.abiret();
+
+ Linker linker(masm);
+ AutoFlushICache afc("DebugTrapHandler");
+ JitCode* codeDbg = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(codeDbg, "DebugTrapHandler");
+#endif
+
+ return codeDbg;
+}
+
+JitCode*
+JitRuntime::generateExceptionTailStub(JSContext* cx, void* handler)
+{
+ MacroAssembler masm(cx);
+
+ masm.handleFailureWithHandlerTail(handler);
+
+ Linker linker(masm);
+ AutoFlushICache afc("ExceptionTailStub");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ExceptionTailStub");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateBailoutTailStub(JSContext* cx)
+{
+ MacroAssembler masm(cx);
+
+ masm.generateBailoutTail(r1, r2);
+
+ Linker linker(masm);
+ AutoFlushICache afc("BailoutTailStub");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutTailStub");
+#endif
+
+ return code;
+}
+JitCode*
+JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
+{
+ MacroAssembler masm;
+
+ Register scratch1 = r8;
+ Register scratch2 = r9;
+ Register scratch3 = r10;
+ Register scratch4 = r11;
+
+ //
+ // The code generated below expects that the current stack pointer points
+ // to an Ion or Baseline frame, at the state it would be immediately
+ // before a ret(). Thus, after this stub's business is done, it executes
+ // a ret() and returns directly to the caller script, on behalf of the
+ // callee script that jumped to this code.
+ //
+ // Thus the expected stack is:
+ //
+ // StackPointer ----+
+ // v
+ // ..., ActualArgc, CalleeToken, Descriptor, ReturnAddr
+ // MEM-HI MEM-LOW
+ //
+ //
+ // The generated jitcode is responsible for overwriting the
+ // jitActivation->lastProfilingFrame field with a pointer to the previous
+ // Ion or Baseline jit-frame that was pushed before this one. It is also
+ // responsible for overwriting jitActivation->lastProfilingCallSite with
+ // the return address into that frame. The frame could either be an
+ // immediate "caller" frame, or it could be a frame in a previous
+ // JitActivation (if the current frame was entered from C++, and the C++
+ // was entered by some caller jit-frame further down the stack).
+ //
+ // So this jitcode is responsible for "walking up" the jit stack, finding
+ // the previous Ion or Baseline JS frame, and storing its address and the
+ // return address into the appropriate fields on the current jitActivation.
+ //
+ // There are a fixed number of different path types that can lead to the
+ // current frame, which is either a baseline or ion frame:
+ //
+ // <Baseline-Or-Ion>
+ // ^
+ // |
+ // ^--- Ion
+ // |
+ // ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Argument Rectifier
+ // | ^
+ // | |
+ // | ^--- Ion
+ // | |
+ // | ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Entry Frame (From C++)
+ //
+ Register actReg = scratch4;
+ AbsoluteAddress activationAddr(GetJitContext()->runtime->addressOfProfilingActivation());
+ masm.loadPtr(activationAddr, actReg);
+
+ Address lastProfilingFrame(actReg, JitActivation::offsetOfLastProfilingFrame());
+ Address lastProfilingCallSite(actReg, JitActivation::offsetOfLastProfilingCallSite());
+
+#ifdef DEBUG
+ // Ensure that frame we are exiting is current lastProfilingFrame
+ {
+ masm.loadPtr(lastProfilingFrame, scratch1);
+ Label checkOk;
+ masm.branchPtr(Assembler::Equal, scratch1, ImmWord(0), &checkOk);
+ masm.branchStackPtr(Assembler::Equal, scratch1, &checkOk);
+ masm.assumeUnreachable("Mismatch between stored lastProfilingFrame and current stack pointer.");
+ masm.bind(&checkOk);
+ }
+#endif
+
+ // Load the frame descriptor into |scratch1|, figure out what to do depending on its type.
+ masm.loadPtr(Address(masm.getStackPointer(), JitFrameLayout::offsetOfDescriptor()), scratch1);
+
+ // Going into the conditionals, we will have:
+ // FrameDescriptor.size in scratch1
+ // FrameDescriptor.type in scratch2
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1, scratch2);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch1);
+
+ // Handling of each case is dependent on FrameDescriptor.type
+ Label handle_IonJS;
+ Label handle_BaselineStub;
+ Label handle_Rectifier;
+ Label handle_IonAccessorIC;
+ Label handle_Entry;
+ Label end;
+
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonJS), &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineJS), &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineStub), &handle_BaselineStub);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Rectifier), &handle_Rectifier);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonAccessorIC), &handle_IonAccessorIC);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Entry), &handle_Entry);
+
+ masm.assumeUnreachable("Invalid caller frame type when exiting from Ion frame.");
+
+ //
+ // JitFrame_IonJS
+ //
+ // Stack layout:
+ // ...
+ // Ion-Descriptor
+ // Prev-FP ---> Ion-ReturnAddr
+ // ... previous frame data ... |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_IonJS);
+ {
+ // |scratch1| contains Descriptor.size
+
+ // returning directly to an IonJS frame. Store return addr to frame
+ // in lastProfilingCallSite.
+ masm.loadPtr(Address(masm.getStackPointer(), JitFrameLayout::offsetOfReturnAddress()),
+ scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ // Store return frame in lastProfilingFrame.
+ // scratch2 := masm.getStackPointer() + Descriptor.size*1 + JitFrameLayout::Size();
+ masm.addPtr(masm.getStackPointer(), scratch1, scratch2);
+ masm.syncStackPtr();
+ masm.addPtr(Imm32(JitFrameLayout::Size()), scratch2, scratch2);
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // JitFrame_BaselineStub
+ //
+ // Look past the stub and store the frame pointer to
+ // the baselineJS frame prior to it.
+ //
+ // Stack layout:
+ // ...
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-PrevFramePointer
+ // | ... BL-FrameData ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ // We take advantage of the fact that the stub frame saves the frame
+ // pointer pointing to the baseline frame, so a bunch of calculation can
+ // be avoided.
+ //
+ masm.bind(&handle_BaselineStub);
+ {
+ masm.addPtr(masm.getStackPointer(), scratch1, scratch3);
+ masm.syncStackPtr();
+ Address stubFrameReturnAddr(scratch3,
+ JitFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ Address stubFrameSavedFramePtr(scratch3,
+ JitFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch2);
+ masm.addPtr(Imm32(sizeof(void*)), scratch2); // Skip past BL-PrevFramePtr.
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+
+ //
+ // JitFrame_Rectifier
+ //
+ // The rectifier frame can be preceded by either an IonJS or a
+ // BaselineStub frame.
+ //
+ // Stack layout if caller of rectifier was Ion:
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- Rect-Descriptor.Size
+ // < COMMON LAYOUT >
+ //
+ // Stack layout if caller of rectifier was Baseline:
+ //
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-SavedFramePointer
+ // | ... baseline frame data ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Rect-Descriptor.Size
+ // ... args to rectifier ... |
+ // < COMMON LAYOUT >
+ //
+ // Common stack layout:
+ //
+ // ActualArgc |
+ // CalleeToken |- IonRectitiferFrameLayout::Size()
+ // Rect-Descriptor |
+ // Rect-ReturnAddr |
+ // ... rectifier data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_Rectifier);
+ {
+ // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size();
+ masm.addPtr(masm.getStackPointer(), scratch1, scratch2);
+ masm.syncStackPtr();
+ masm.add32(Imm32(JitFrameLayout::Size()), scratch2);
+ masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfDescriptor()), scratch3);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch3, scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch3);
+
+ // Now |scratch1| contains Rect-Descriptor.Size
+ // and |scratch2| points to Rectifier frame
+ // and |scratch3| contains Rect-Descriptor.Type
+
+ // Check for either Ion or BaselineStub frame.
+ Label handle_Rectifier_BaselineStub;
+ masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS),
+ &handle_Rectifier_BaselineStub);
+
+ // Handle Rectifier <- IonJS
+ // scratch3 := RectFrame[ReturnAddr]
+ masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfReturnAddress()), scratch3);
+ masm.storePtr(scratch3, lastProfilingCallSite);
+
+ // scratch3 := RectFrame + Rect-Descriptor.Size + RectifierFrameLayout::Size()
+ masm.addPtr(scratch2, scratch1, scratch3);
+ masm.add32(Imm32(RectifierFrameLayout::Size()), scratch3);
+ masm.storePtr(scratch3, lastProfilingFrame);
+ masm.ret();
+
+ // Handle Rectifier <- BaselineStub <- BaselineJS
+ masm.bind(&handle_Rectifier_BaselineStub);
+#ifdef DEBUG
+ {
+ Label checkOk;
+ masm.branch32(Assembler::Equal, scratch3, Imm32(JitFrame_BaselineStub), &checkOk);
+ masm.assumeUnreachable("Unrecognized frame preceding baselineStub.");
+ masm.bind(&checkOk);
+ }
+#endif
+ masm.addPtr(scratch2, scratch1, scratch3);
+ Address stubFrameReturnAddr(scratch3, RectifierFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ Address stubFrameSavedFramePtr(scratch3,
+ RectifierFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch2);
+ masm.addPtr(Imm32(sizeof(void*)), scratch2);
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+ // JitFrame_IonAccessorIC
+ //
+ // The caller is always an IonJS frame.
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- AccFrame-Descriptor.Size
+ // StubCode |
+ // AccFrame-Descriptor |- IonAccessorICFrameLayout::Size()
+ // AccFrame-ReturnAddr |
+ // ... accessor frame data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ masm.bind(&handle_IonAccessorIC);
+ {
+ // scratch2 := StackPointer + Descriptor.size + JitFrameLayout::Size()
+ masm.addPtr(masm.getStackPointer(), scratch1, scratch2);
+ masm.syncStackPtr();
+ masm.addPtr(Imm32(JitFrameLayout::Size()), scratch2);
+
+ // scratch3 := AccFrame-Descriptor.Size
+ masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfDescriptor()), scratch3);
+#ifdef DEBUG
+ // Assert previous frame is an IonJS frame.
+ masm.movePtr(scratch3, scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1);
+ {
+ Label checkOk;
+ masm.branch32(Assembler::Equal, scratch1, Imm32(JitFrame_IonJS), &checkOk);
+ masm.assumeUnreachable("IonAccessorIC frame must be preceded by IonJS frame");
+ masm.bind(&checkOk);
+ }
+#endif
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch3);
+
+ // lastProfilingCallSite := AccFrame-ReturnAddr
+ masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfReturnAddress()), scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+
+ // lastProfilingFrame := AccessorFrame + AccFrame-Descriptor.Size +
+ // IonAccessorICFrameLayout::Size()
+ masm.addPtr(scratch2, scratch3, scratch1);
+ masm.addPtr(Imm32(IonAccessorICFrameLayout::Size()), scratch1);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // JitFrame_Entry
+ //
+ // If at an entry frame, store null into both fields.
+ //
+ masm.bind(&handle_Entry);
+ {
+ masm.movePtr(ImmPtr(nullptr), scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+
+ Linker linker(masm);
+ AutoFlushICache afc("ProfilerExitFrameTailStub");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ProfilerExitFrameStub");
+#endif
+
+ return code;
+}
diff --git a/js/src/jit/arm64/vixl/.clang-format b/js/src/jit/arm64/vixl/.clang-format
new file mode 100644
index 000000000..122a79540
--- /dev/null
+++ b/js/src/jit/arm64/vixl/.clang-format
@@ -0,0 +1,4 @@
+BasedOnStyle: Chromium
+
+# Ignore all comments because they aren't reflowed properly.
+CommentPragmas: "^"
diff --git a/js/src/jit/arm64/vixl/Assembler-vixl.cpp b/js/src/jit/arm64/vixl/Assembler-vixl.cpp
new file mode 100644
index 000000000..d784fd860
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Assembler-vixl.cpp
@@ -0,0 +1,5088 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Assembler-vixl.h"
+
+#include <cmath>
+
+#include "jit/arm64/vixl/MacroAssembler-vixl.h"
+
+namespace vixl {
+
+// CPURegList utilities.
+CPURegister CPURegList::PopLowestIndex() {
+ if (IsEmpty()) {
+ return NoCPUReg;
+ }
+ int index = CountTrailingZeros(list_);
+ VIXL_ASSERT((1 << index) & list_);
+ Remove(index);
+ return CPURegister(index, size_, type_);
+}
+
+
+CPURegister CPURegList::PopHighestIndex() {
+ VIXL_ASSERT(IsValid());
+ if (IsEmpty()) {
+ return NoCPUReg;
+ }
+ int index = CountLeadingZeros(list_);
+ index = kRegListSizeInBits - 1 - index;
+ VIXL_ASSERT((1 << index) & list_);
+ Remove(index);
+ return CPURegister(index, size_, type_);
+}
+
+
+bool CPURegList::IsValid() const {
+ if ((type_ == CPURegister::kRegister) ||
+ (type_ == CPURegister::kVRegister)) {
+ bool is_valid = true;
+ // Try to create a CPURegister for each element in the list.
+ for (int i = 0; i < kRegListSizeInBits; i++) {
+ if (((list_ >> i) & 1) != 0) {
+ is_valid &= CPURegister(i, size_, type_).IsValid();
+ }
+ }
+ return is_valid;
+ } else if (type_ == CPURegister::kNoRegister) {
+ // We can't use IsEmpty here because that asserts IsValid().
+ return list_ == 0;
+ } else {
+ return false;
+ }
+}
+
+
+void CPURegList::RemoveCalleeSaved() {
+ if (type() == CPURegister::kRegister) {
+ Remove(GetCalleeSaved(RegisterSizeInBits()));
+ } else if (type() == CPURegister::kVRegister) {
+ Remove(GetCalleeSavedV(RegisterSizeInBits()));
+ } else {
+ VIXL_ASSERT(type() == CPURegister::kNoRegister);
+ VIXL_ASSERT(IsEmpty());
+ // The list must already be empty, so do nothing.
+ }
+}
+
+
+CPURegList CPURegList::Union(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3) {
+ return Union(list_1, Union(list_2, list_3));
+}
+
+
+CPURegList CPURegList::Union(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3,
+ const CPURegList& list_4) {
+ return Union(Union(list_1, list_2), Union(list_3, list_4));
+}
+
+
+CPURegList CPURegList::Intersection(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3) {
+ return Intersection(list_1, Intersection(list_2, list_3));
+}
+
+
+CPURegList CPURegList::Intersection(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3,
+ const CPURegList& list_4) {
+ return Intersection(Intersection(list_1, list_2),
+ Intersection(list_3, list_4));
+}
+
+
+CPURegList CPURegList::GetCalleeSaved(unsigned size) {
+ return CPURegList(CPURegister::kRegister, size, 19, 29);
+}
+
+
+CPURegList CPURegList::GetCalleeSavedV(unsigned size) {
+ return CPURegList(CPURegister::kVRegister, size, 8, 15);
+}
+
+
+CPURegList CPURegList::GetCallerSaved(unsigned size) {
+ // Registers x0-x18 and lr (x30) are caller-saved.
+ CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
+ // Do not use lr directly to avoid initialisation order fiasco bugs for users.
+ list.Combine(Register(30, kXRegSize));
+ return list;
+}
+
+
+CPURegList CPURegList::GetCallerSavedV(unsigned size) {
+ // Registers d0-d7 and d16-d31 are caller-saved.
+ CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7);
+ list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31));
+ return list;
+}
+
+
+const CPURegList kCalleeSaved = CPURegList::GetCalleeSaved();
+const CPURegList kCalleeSavedV = CPURegList::GetCalleeSavedV();
+const CPURegList kCallerSaved = CPURegList::GetCallerSaved();
+const CPURegList kCallerSavedV = CPURegList::GetCallerSavedV();
+
+
+// Registers.
+#define WREG(n) w##n,
+const Register Register::wregisters[] = {
+REGISTER_CODE_LIST(WREG)
+};
+#undef WREG
+
+#define XREG(n) x##n,
+const Register Register::xregisters[] = {
+REGISTER_CODE_LIST(XREG)
+};
+#undef XREG
+
+#define BREG(n) b##n,
+const VRegister VRegister::bregisters[] = {
+REGISTER_CODE_LIST(BREG)
+};
+#undef BREG
+
+#define HREG(n) h##n,
+const VRegister VRegister::hregisters[] = {
+REGISTER_CODE_LIST(HREG)
+};
+#undef HREG
+
+#define SREG(n) s##n,
+const VRegister VRegister::sregisters[] = {
+REGISTER_CODE_LIST(SREG)
+};
+#undef SREG
+
+#define DREG(n) d##n,
+const VRegister VRegister::dregisters[] = {
+REGISTER_CODE_LIST(DREG)
+};
+#undef DREG
+
+#define QREG(n) q##n,
+const VRegister VRegister::qregisters[] = {
+REGISTER_CODE_LIST(QREG)
+};
+#undef QREG
+
+#define VREG(n) v##n,
+const VRegister VRegister::vregisters[] = {
+REGISTER_CODE_LIST(VREG)
+};
+#undef VREG
+
+
+const Register& Register::WRegFromCode(unsigned code) {
+ if (code == kSPRegInternalCode) {
+ return wsp;
+ } else {
+ VIXL_ASSERT(code < kNumberOfRegisters);
+ return wregisters[code];
+ }
+}
+
+
+const Register& Register::XRegFromCode(unsigned code) {
+ if (code == kSPRegInternalCode) {
+ return sp;
+ } else {
+ VIXL_ASSERT(code < kNumberOfRegisters);
+ return xregisters[code];
+ }
+}
+
+
+const VRegister& VRegister::BRegFromCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return bregisters[code];
+}
+
+
+const VRegister& VRegister::HRegFromCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return hregisters[code];
+}
+
+
+const VRegister& VRegister::SRegFromCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return sregisters[code];
+}
+
+
+const VRegister& VRegister::DRegFromCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return dregisters[code];
+}
+
+
+const VRegister& VRegister::QRegFromCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return qregisters[code];
+}
+
+
+const VRegister& VRegister::VRegFromCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return vregisters[code];
+}
+
+
+const Register& CPURegister::W() const {
+ VIXL_ASSERT(IsValidRegister());
+ return Register::WRegFromCode(code_);
+}
+
+
+const Register& CPURegister::X() const {
+ VIXL_ASSERT(IsValidRegister());
+ return Register::XRegFromCode(code_);
+}
+
+
+const VRegister& CPURegister::B() const {
+ VIXL_ASSERT(IsValidVRegister());
+ return VRegister::BRegFromCode(code_);
+}
+
+
+const VRegister& CPURegister::H() const {
+ VIXL_ASSERT(IsValidVRegister());
+ return VRegister::HRegFromCode(code_);
+}
+
+
+const VRegister& CPURegister::S() const {
+ VIXL_ASSERT(IsValidVRegister());
+ return VRegister::SRegFromCode(code_);
+}
+
+
+const VRegister& CPURegister::D() const {
+ VIXL_ASSERT(IsValidVRegister());
+ return VRegister::DRegFromCode(code_);
+}
+
+
+const VRegister& CPURegister::Q() const {
+ VIXL_ASSERT(IsValidVRegister());
+ return VRegister::QRegFromCode(code_);
+}
+
+
+const VRegister& CPURegister::V() const {
+ VIXL_ASSERT(IsValidVRegister());
+ return VRegister::VRegFromCode(code_);
+}
+
+
+// Operand.
+Operand::Operand(int64_t immediate)
+ : immediate_(immediate),
+ reg_(NoReg),
+ shift_(NO_SHIFT),
+ extend_(NO_EXTEND),
+ shift_amount_(0) {}
+
+
+Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
+ : reg_(reg),
+ shift_(shift),
+ extend_(NO_EXTEND),
+ shift_amount_(shift_amount) {
+ VIXL_ASSERT(shift != MSL);
+ VIXL_ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
+ VIXL_ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
+ VIXL_ASSERT(!reg.IsSP());
+}
+
+
+Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
+ : reg_(reg),
+ shift_(NO_SHIFT),
+ extend_(extend),
+ shift_amount_(shift_amount) {
+ VIXL_ASSERT(reg.IsValid());
+ VIXL_ASSERT(shift_amount <= 4);
+ VIXL_ASSERT(!reg.IsSP());
+
+ // Extend modes SXTX and UXTX require a 64-bit register.
+ VIXL_ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
+}
+
+
+bool Operand::IsImmediate() const {
+ return reg_.Is(NoReg);
+}
+
+
+bool Operand::IsShiftedRegister() const {
+ return reg_.IsValid() && (shift_ != NO_SHIFT);
+}
+
+
+bool Operand::IsExtendedRegister() const {
+ return reg_.IsValid() && (extend_ != NO_EXTEND);
+}
+
+
+bool Operand::IsZero() const {
+ if (IsImmediate()) {
+ return immediate() == 0;
+ } else {
+ return reg().IsZero();
+ }
+}
+
+
+Operand Operand::ToExtendedRegister() const {
+ VIXL_ASSERT(IsShiftedRegister());
+ VIXL_ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
+ return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
+}
+
+
+// MemOperand
+MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
+ : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode) {
+ VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
+}
+
+
+MemOperand::MemOperand(Register base,
+ Register regoffset,
+ Extend extend,
+ unsigned shift_amount)
+ : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
+ shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
+ VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
+ VIXL_ASSERT(!regoffset.IsSP());
+ VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
+
+ // SXTX extend mode requires a 64-bit offset register.
+ VIXL_ASSERT(regoffset.Is64Bits() || (extend != SXTX));
+}
+
+
+MemOperand::MemOperand(Register base,
+ Register regoffset,
+ Shift shift,
+ unsigned shift_amount)
+ : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
+ shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
+ VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
+ VIXL_ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
+ VIXL_ASSERT(shift == LSL);
+}
+
+
+MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
+ : base_(base), regoffset_(NoReg), addrmode_(addrmode) {
+ VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
+
+ if (offset.IsImmediate()) {
+ offset_ = offset.immediate();
+ } else if (offset.IsShiftedRegister()) {
+ VIXL_ASSERT((addrmode == Offset) || (addrmode == PostIndex));
+
+ regoffset_ = offset.reg();
+ shift_ = offset.shift();
+ shift_amount_ = offset.shift_amount();
+
+ extend_ = NO_EXTEND;
+ offset_ = 0;
+
+ // These assertions match those in the shifted-register constructor.
+ VIXL_ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
+ VIXL_ASSERT(shift_ == LSL);
+ } else {
+ VIXL_ASSERT(offset.IsExtendedRegister());
+ VIXL_ASSERT(addrmode == Offset);
+
+ regoffset_ = offset.reg();
+ extend_ = offset.extend();
+ shift_amount_ = offset.shift_amount();
+
+ shift_ = NO_SHIFT;
+ offset_ = 0;
+
+ // These assertions match those in the extended-register constructor.
+ VIXL_ASSERT(!regoffset_.IsSP());
+ VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
+ VIXL_ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
+ }
+}
+
+
+bool MemOperand::IsImmediateOffset() const {
+ return (addrmode_ == Offset) && regoffset_.Is(NoReg);
+}
+
+
+bool MemOperand::IsRegisterOffset() const {
+ return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
+}
+
+
+bool MemOperand::IsPreIndex() const {
+ return addrmode_ == PreIndex;
+}
+
+
+bool MemOperand::IsPostIndex() const {
+ return addrmode_ == PostIndex;
+}
+
+
+void MemOperand::AddOffset(int64_t offset) {
+ VIXL_ASSERT(IsImmediateOffset());
+ offset_ += offset;
+}
+
+
+// Assembler
+Assembler::Assembler(PositionIndependentCodeOption pic)
+ : pic_(pic) {
+}
+
+
+// Code generation.
+void Assembler::br(const Register& xn) {
+ VIXL_ASSERT(xn.Is64Bits());
+ Emit(BR | Rn(xn));
+}
+
+
+void Assembler::blr(const Register& xn) {
+ VIXL_ASSERT(xn.Is64Bits());
+ Emit(BLR | Rn(xn));
+}
+
+
+void Assembler::ret(const Register& xn) {
+ VIXL_ASSERT(xn.Is64Bits());
+ Emit(RET | Rn(xn));
+}
+
+
+void Assembler::NEONTable(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEONTableOp op) {
+ VIXL_ASSERT(vd.Is16B() || vd.Is8B());
+ VIXL_ASSERT(vn.Is16B());
+ VIXL_ASSERT(AreSameFormat(vd, vm));
+ Emit(op | (vd.IsQ() ? NEON_Q : 0) | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONTable(vd, vn, vm, NEON_TBL_1v);
+}
+
+
+void Assembler::tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vm) {
+ USE(vn2);
+ VIXL_ASSERT(AreSameFormat(vn, vn2));
+ VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
+
+ NEONTable(vd, vn, vm, NEON_TBL_2v);
+}
+
+
+void Assembler::tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vm) {
+ USE(vn2, vn3);
+ VIXL_ASSERT(AreSameFormat(vn, vn2, vn3));
+ VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
+ VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters));
+
+ NEONTable(vd, vn, vm, NEON_TBL_3v);
+}
+
+
+void Assembler::tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vn4,
+ const VRegister& vm) {
+ USE(vn2, vn3, vn4);
+ VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4));
+ VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
+ VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters));
+ VIXL_ASSERT(vn4.code() == ((vn.code() + 3) % kNumberOfVRegisters));
+
+ NEONTable(vd, vn, vm, NEON_TBL_4v);
+}
+
+
+void Assembler::tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONTable(vd, vn, vm, NEON_TBX_1v);
+}
+
+
+void Assembler::tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vm) {
+ USE(vn2);
+ VIXL_ASSERT(AreSameFormat(vn, vn2));
+ VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
+
+ NEONTable(vd, vn, vm, NEON_TBX_2v);
+}
+
+
+void Assembler::tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vm) {
+ USE(vn2, vn3);
+ VIXL_ASSERT(AreSameFormat(vn, vn2, vn3));
+ VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
+ VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters));
+
+ NEONTable(vd, vn, vm, NEON_TBX_3v);
+}
+
+
+void Assembler::tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vn4,
+ const VRegister& vm) {
+ USE(vn2, vn3, vn4);
+ VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4));
+ VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
+ VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters));
+ VIXL_ASSERT(vn4.code() == ((vn.code() + 3) % kNumberOfVRegisters));
+
+ NEONTable(vd, vn, vm, NEON_TBX_4v);
+}
+
+
+void Assembler::add(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, LeaveFlags, ADD);
+}
+
+
+void Assembler::adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, SetFlags, ADD);
+}
+
+
+void Assembler::cmn(const Register& rn,
+ const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rn);
+ adds(zr, rn, operand);
+}
+
+
+void Assembler::sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, LeaveFlags, SUB);
+}
+
+
+void Assembler::subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, SetFlags, SUB);
+}
+
+
+void Assembler::cmp(const Register& rn, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rn);
+ subs(zr, rn, operand);
+}
+
+
+void Assembler::neg(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sub(rd, zr, operand);
+}
+
+
+void Assembler::negs(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ subs(rd, zr, operand);
+}
+
+
+void Assembler::adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
+}
+
+
+void Assembler::adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
+}
+
+
+void Assembler::sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
+}
+
+
+void Assembler::sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
+}
+
+
+void Assembler::ngc(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sbc(rd, zr, operand);
+}
+
+
+void Assembler::ngcs(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sbcs(rd, zr, operand);
+}
+
+
+// Logical instructions.
+void Assembler::and_(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, AND);
+}
+
+
+void Assembler::bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, BIC);
+}
+
+
+void Assembler::bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, BICS);
+}
+
+
+void Assembler::orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ORR);
+}
+
+
+void Assembler::orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ORN);
+}
+
+
+void Assembler::eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, EOR);
+}
+
+
+void Assembler::eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, EON);
+}
+
+
+void Assembler::lslv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::lsrv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::asrv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::rorv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+// Bitfield operations.
+void Assembler::bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | BFM | N |
+ ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ VIXL_ASSERT(rd.Is64Bits() || rn.Is32Bits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | SBFM | N |
+ ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | UBFM | N |
+ ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | EXTR | N | Rm(rm) | ImmS(lsb, rn.size()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::csel(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSEL);
+}
+
+
+void Assembler::csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSINC);
+}
+
+
+void Assembler::csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSINV);
+}
+
+
+void Assembler::csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSNEG);
+}
+
+
+void Assembler::cset(const Register &rd, Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ Register zr = AppropriateZeroRegFor(rd);
+ csinc(rd, zr, zr, InvertCondition(cond));
+}
+
+
+void Assembler::csetm(const Register &rd, Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ Register zr = AppropriateZeroRegFor(rd);
+ csinv(rd, zr, zr, InvertCondition(cond));
+}
+
+
+void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ csinc(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ csinv(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ csneg(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::ConditionalSelect(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond,
+ ConditionalSelectOp op) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ConditionalCompare(rn, operand, nzcv, cond, CCMN);
+}
+
+
+void Assembler::ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ConditionalCompare(rn, operand, nzcv, cond, CCMP);
+}
+
+
+void Assembler::DataProcessing3Source(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra,
+ DataProcessing3SourceOp op) {
+ Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32b(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32B | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32h(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32H | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32w(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32W | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32x(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is64Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32X | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32cb(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32CB | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32ch(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32CH | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32cw(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32CW | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32cx(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is64Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32CX | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::mul(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm));
+ DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MADD);
+}
+
+
+void Assembler::madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ DataProcessing3Source(rd, rn, rm, ra, MADD);
+}
+
+
+void Assembler::mneg(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm));
+ DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MSUB);
+}
+
+
+void Assembler::msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ DataProcessing3Source(rd, rn, rm, ra, MSUB);
+}
+
+
+void Assembler::umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
+}
+
+
+void Assembler::smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
+}
+
+
+void Assembler::umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
+}
+
+
+void Assembler::smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
+}
+
+
+void Assembler::smull(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is64Bits());
+ VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
+}
+
+
+void Assembler::sdiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::smulh(const Register& xd,
+ const Register& xn,
+ const Register& xm) {
+ VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits());
+ DataProcessing3Source(xd, xn, xm, xzr, SMULH_x);
+}
+
+
+void Assembler::umulh(const Register& xd,
+ const Register& xn,
+ const Register& xm) {
+ VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits());
+ DataProcessing3Source(xd, xn, xm, xzr, UMULH_x);
+}
+
+
+void Assembler::udiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::rbit(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, RBIT);
+}
+
+
+void Assembler::rev16(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, REV16);
+}
+
+
+void Assembler::rev32(const Register& rd,
+ const Register& rn) {
+ VIXL_ASSERT(rd.Is64Bits());
+ DataProcessing1Source(rd, rn, REV);
+}
+
+
+void Assembler::rev(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
+}
+
+
+void Assembler::clz(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, CLZ);
+}
+
+
+void Assembler::cls(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, CLS);
+}
+
+
+void Assembler::ldp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
+}
+
+
+void Assembler::stp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
+}
+
+
+void Assembler::ldpsw(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src) {
+ VIXL_ASSERT(rt.Is64Bits());
+ LoadStorePair(rt, rt2, src, LDPSW_x);
+}
+
+
+void Assembler::LoadStorePair(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op) {
+ // 'rt' and 'rt2' can only be aliased for stores.
+ VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
+ VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
+ VIXL_ASSERT(IsImmLSPair(addr.offset(), CalcLSPairDataSize(op)));
+
+ int offset = static_cast<int>(addr.offset());
+ Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
+ ImmLSPair(offset, CalcLSPairDataSize(op));
+
+ Instr addrmodeop;
+ if (addr.IsImmediateOffset()) {
+ addrmodeop = LoadStorePairOffsetFixed;
+ } else {
+ VIXL_ASSERT(addr.offset() != 0);
+ if (addr.IsPreIndex()) {
+ addrmodeop = LoadStorePairPreIndexFixed;
+ } else {
+ VIXL_ASSERT(addr.IsPostIndex());
+ addrmodeop = LoadStorePairPostIndexFixed;
+ }
+ }
+ Emit(addrmodeop | memop);
+}
+
+
+void Assembler::ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ LoadStorePairNonTemporal(rt, rt2, src,
+ LoadPairNonTemporalOpFor(rt, rt2));
+}
+
+
+void Assembler::stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ LoadStorePairNonTemporal(rt, rt2, dst,
+ StorePairNonTemporalOpFor(rt, rt2));
+}
+
+
+void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairNonTemporalOp op) {
+ VIXL_ASSERT(!rt.Is(rt2));
+ VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
+ VIXL_ASSERT(addr.IsImmediateOffset());
+
+ unsigned size = CalcLSPairDataSize(
+ static_cast<LoadStorePairOp>(op & LoadStorePairMask));
+ VIXL_ASSERT(IsImmLSPair(addr.offset(), size));
+ int offset = static_cast<int>(addr.offset());
+ Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | ImmLSPair(offset, size));
+}
+
+
+// Memory instructions.
+void Assembler::ldrb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, src, LDRB_w, option);
+}
+
+
+void Assembler::strb(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, dst, STRB_w, option);
+}
+
+
+void Assembler::ldrsb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option);
+}
+
+
+void Assembler::ldrh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, src, LDRH_w, option);
+}
+
+
+void Assembler::strh(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, dst, STRH_w, option);
+}
+
+
+void Assembler::ldrsh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option);
+}
+
+
+void Assembler::ldr(const CPURegister& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, src, LoadOpFor(rt), option);
+}
+
+
+void Assembler::str(const CPURegister& rt, const MemOperand& dst,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, dst, StoreOpFor(rt), option);
+}
+
+
+void Assembler::ldrsw(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(rt.Is64Bits());
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, src, LDRSW_x, option);
+}
+
+
+void Assembler::ldurb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, src, LDRB_w, option);
+}
+
+
+void Assembler::sturb(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, dst, STRB_w, option);
+}
+
+
+void Assembler::ldursb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option);
+}
+
+
+void Assembler::ldurh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, src, LDRH_w, option);
+}
+
+
+void Assembler::sturh(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, dst, STRH_w, option);
+}
+
+
+void Assembler::ldursh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option);
+}
+
+
+void Assembler::ldur(const CPURegister& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, src, LoadOpFor(rt), option);
+}
+
+
+void Assembler::stur(const CPURegister& rt, const MemOperand& dst,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, dst, StoreOpFor(rt), option);
+}
+
+
+void Assembler::ldursw(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(rt.Is64Bits());
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, src, LDRSW_x, option);
+}
+
+
+void Assembler::ldrsw(const Register& rt, int imm19) {
+ Emit(LDRSW_x_lit | ImmLLiteral(imm19) | Rt(rt));
+}
+
+
+void Assembler::ldr(const CPURegister& rt, int imm19) {
+ LoadLiteralOp op = LoadLiteralOpFor(rt);
+ Emit(op | ImmLLiteral(imm19) | Rt(rt));
+}
+
+
+void Assembler::prfm(PrefetchOperation op, int imm19) {
+ Emit(PRFM_lit | ImmPrefetchOperation(op) | ImmLLiteral(imm19));
+}
+
+
+// Exclusive-access instructions.
+void Assembler::stxrb(const Register& rs,
+ const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ Emit(STXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::stxrh(const Register& rs,
+ const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ Emit(STXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::stxr(const Register& rs,
+ const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? STXR_x : STXR_w;
+ Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::ldxrb(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ Emit(LDXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::ldxrh(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ Emit(LDXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::ldxr(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? LDXR_x : LDXR_w;
+ Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::stxp(const Register& rs,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& dst) {
+ VIXL_ASSERT(rt.size() == rt2.size());
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? STXP_x : STXP_w;
+ Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.base()));
+}
+
+
+void Assembler::ldxp(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src) {
+ VIXL_ASSERT(rt.size() == rt2.size());
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? LDXP_x : LDXP_w;
+ Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.base()));
+}
+
+
+void Assembler::stlxrb(const Register& rs,
+ const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ Emit(STLXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::stlxrh(const Register& rs,
+ const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ Emit(STLXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::stlxr(const Register& rs,
+ const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? STLXR_x : STLXR_w;
+ Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::ldaxrb(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ Emit(LDAXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::ldaxrh(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ Emit(LDAXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::ldaxr(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? LDAXR_x : LDAXR_w;
+ Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::stlxp(const Register& rs,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& dst) {
+ VIXL_ASSERT(rt.size() == rt2.size());
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? STLXP_x : STLXP_w;
+ Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.base()));
+}
+
+
+void Assembler::ldaxp(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src) {
+ VIXL_ASSERT(rt.size() == rt2.size());
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? LDAXP_x : LDAXP_w;
+ Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.base()));
+}
+
+
+void Assembler::stlrb(const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ Emit(STLRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::stlrh(const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ Emit(STLRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::stlr(const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? STLR_x : STLR_w;
+ Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::ldarb(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ Emit(LDARB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::ldarh(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ Emit(LDARH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::ldar(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? LDAR_x : LDAR_w;
+ Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::prfm(PrefetchOperation op, const MemOperand& address,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ Prefetch(op, address, option);
+}
+
+
+void Assembler::prfum(PrefetchOperation op, const MemOperand& address,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ Prefetch(op, address, option);
+}
+
+
+void Assembler::sys(int op1, int crn, int crm, int op2, const Register& rt) {
+ Emit(SYS | ImmSysOp1(op1) | CRn(crn) | CRm(crm) | ImmSysOp2(op2) | Rt(rt));
+}
+
+
+void Assembler::sys(int op, const Register& rt) {
+ Emit(SYS | SysOp(op) | Rt(rt));
+}
+
+
+void Assembler::dc(DataCacheOp op, const Register& rt) {
+ VIXL_ASSERT((op == CVAC) || (op == CVAU) || (op == CIVAC) || (op == ZVA));
+ sys(op, rt);
+}
+
+
+void Assembler::ic(InstructionCacheOp op, const Register& rt) {
+ VIXL_ASSERT(op == IVAU);
+ sys(op, rt);
+}
+
+
+// NEON structure loads and stores.
+Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr) {
+ Instr addr_field = RnSP(addr.base());
+
+ if (addr.IsPostIndex()) {
+ VIXL_STATIC_ASSERT(NEONLoadStoreMultiStructPostIndex ==
+ static_cast<NEONLoadStoreMultiStructPostIndexOp>(
+ NEONLoadStoreSingleStructPostIndex));
+
+ addr_field |= NEONLoadStoreMultiStructPostIndex;
+ if (addr.offset() == 0) {
+ addr_field |= RmNot31(addr.regoffset());
+ } else {
+ // The immediate post index addressing mode is indicated by rm = 31.
+ // The immediate is implied by the number of vector registers used.
+ addr_field |= (0x1f << Rm_offset);
+ }
+ } else {
+ VIXL_ASSERT(addr.IsImmediateOffset() && (addr.offset() == 0));
+ }
+ return addr_field;
+}
+
+void Assembler::LoadStoreStructVerify(const VRegister& vt,
+ const MemOperand& addr,
+ Instr op) {
+#ifdef DEBUG
+ // Assert that addressing mode is either offset (with immediate 0), post
+ // index by immediate of the size of the register list, or post index by a
+ // value in a core register.
+ if (addr.IsImmediateOffset()) {
+ VIXL_ASSERT(addr.offset() == 0);
+ } else {
+ int offset = vt.SizeInBytes();
+ switch (op) {
+ case NEON_LD1_1v:
+ case NEON_ST1_1v:
+ offset *= 1; break;
+ case NEONLoadStoreSingleStructLoad1:
+ case NEONLoadStoreSingleStructStore1:
+ case NEON_LD1R:
+ offset = (offset / vt.lanes()) * 1; break;
+
+ case NEON_LD1_2v:
+ case NEON_ST1_2v:
+ case NEON_LD2:
+ case NEON_ST2:
+ offset *= 2;
+ break;
+ case NEONLoadStoreSingleStructLoad2:
+ case NEONLoadStoreSingleStructStore2:
+ case NEON_LD2R:
+ offset = (offset / vt.lanes()) * 2; break;
+
+ case NEON_LD1_3v:
+ case NEON_ST1_3v:
+ case NEON_LD3:
+ case NEON_ST3:
+ offset *= 3; break;
+ case NEONLoadStoreSingleStructLoad3:
+ case NEONLoadStoreSingleStructStore3:
+ case NEON_LD3R:
+ offset = (offset / vt.lanes()) * 3; break;
+
+ case NEON_LD1_4v:
+ case NEON_ST1_4v:
+ case NEON_LD4:
+ case NEON_ST4:
+ offset *= 4; break;
+ case NEONLoadStoreSingleStructLoad4:
+ case NEONLoadStoreSingleStructStore4:
+ case NEON_LD4R:
+ offset = (offset / vt.lanes()) * 4; break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ VIXL_ASSERT(!addr.regoffset().Is(NoReg) ||
+ addr.offset() == offset);
+ }
+#else
+ USE(vt, addr, op);
+#endif
+}
+
+void Assembler::LoadStoreStruct(const VRegister& vt,
+ const MemOperand& addr,
+ NEONLoadStoreMultiStructOp op) {
+ LoadStoreStructVerify(vt, addr, op);
+ VIXL_ASSERT(vt.IsVector() || vt.Is1D());
+ Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
+}
+
+
+void Assembler::LoadStoreStructSingleAllLanes(const VRegister& vt,
+ const MemOperand& addr,
+ NEONLoadStoreSingleStructOp op) {
+ LoadStoreStructVerify(vt, addr, op);
+ Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
+}
+
+
+void Assembler::ld1(const VRegister& vt,
+ const MemOperand& src) {
+ LoadStoreStruct(vt, src, NEON_LD1_1v);
+}
+
+
+void Assembler::ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStruct(vt, src, NEON_LD1_2v);
+}
+
+
+void Assembler::ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStruct(vt, src, NEON_LD1_3v);
+}
+
+
+void Assembler::ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStruct(vt, src, NEON_LD1_4v);
+}
+
+
+void Assembler::ld2(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStruct(vt, src, NEON_LD2);
+}
+
+
+void Assembler::ld2(const VRegister& vt,
+ const VRegister& vt2,
+ int lane,
+ const MemOperand& src) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad2);
+}
+
+
+void Assembler::ld2r(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStructSingleAllLanes(vt, src, NEON_LD2R);
+}
+
+
+void Assembler::ld3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStruct(vt, src, NEON_LD3);
+}
+
+
+void Assembler::ld3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ int lane,
+ const MemOperand& src) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad3);
+}
+
+
+void Assembler::ld3r(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStructSingleAllLanes(vt, src, NEON_LD3R);
+}
+
+
+void Assembler::ld4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStruct(vt, src, NEON_LD4);
+}
+
+
+void Assembler::ld4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ int lane,
+ const MemOperand& src) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad4);
+}
+
+
+void Assembler::ld4r(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStructSingleAllLanes(vt, src, NEON_LD4R);
+}
+
+
+void Assembler::st1(const VRegister& vt,
+ const MemOperand& src) {
+ LoadStoreStruct(vt, src, NEON_ST1_1v);
+}
+
+
+void Assembler::st1(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStruct(vt, src, NEON_ST1_2v);
+}
+
+
+void Assembler::st1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStruct(vt, src, NEON_ST1_3v);
+}
+
+
+void Assembler::st1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStruct(vt, src, NEON_ST1_4v);
+}
+
+
+void Assembler::st2(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& dst) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStruct(vt, dst, NEON_ST2);
+}
+
+
+void Assembler::st2(const VRegister& vt,
+ const VRegister& vt2,
+ int lane,
+ const MemOperand& dst) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore2);
+}
+
+
+void Assembler::st3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& dst) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStruct(vt, dst, NEON_ST3);
+}
+
+
+void Assembler::st3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ int lane,
+ const MemOperand& dst) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore3);
+}
+
+
+void Assembler::st4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& dst) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStruct(vt, dst, NEON_ST4);
+}
+
+
+void Assembler::st4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ int lane,
+ const MemOperand& dst) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore4);
+}
+
+
+void Assembler::LoadStoreStructSingle(const VRegister& vt,
+ uint32_t lane,
+ const MemOperand& addr,
+ NEONLoadStoreSingleStructOp op) {
+ LoadStoreStructVerify(vt, addr, op);
+
+ // We support vt arguments of the form vt.VxT() or vt.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ unsigned lane_size = vt.LaneSizeInBytes();
+ VIXL_ASSERT(lane < (kQRegSizeInBytes / lane_size));
+
+ // Lane size is encoded in the opcode field. Lane index is encoded in the Q,
+ // S and size fields.
+ lane *= lane_size;
+ if (lane_size == 8) lane++;
+
+ Instr size = (lane << NEONLSSize_offset) & NEONLSSize_mask;
+ Instr s = (lane << (NEONS_offset - 2)) & NEONS_mask;
+ Instr q = (lane << (NEONQ_offset - 3)) & NEONQ_mask;
+
+ Instr instr = op;
+ switch (lane_size) {
+ case 1: instr |= NEONLoadStoreSingle_b; break;
+ case 2: instr |= NEONLoadStoreSingle_h; break;
+ case 4: instr |= NEONLoadStoreSingle_s; break;
+ default:
+ VIXL_ASSERT(lane_size == 8);
+ instr |= NEONLoadStoreSingle_d;
+ }
+
+ Emit(instr | LoadStoreStructAddrModeField(addr) | q | size | s | Rt(vt));
+}
+
+
+void Assembler::ld1(const VRegister& vt,
+ int lane,
+ const MemOperand& src) {
+ LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad1);
+}
+
+
+void Assembler::ld1r(const VRegister& vt,
+ const MemOperand& src) {
+ LoadStoreStructSingleAllLanes(vt, src, NEON_LD1R);
+}
+
+
+void Assembler::st1(const VRegister& vt,
+ int lane,
+ const MemOperand& dst) {
+ LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore1);
+}
+
+
+void Assembler::NEON3DifferentL(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3DifferentOp vop) {
+ VIXL_ASSERT(AreSameFormat(vn, vm));
+ VIXL_ASSERT((vn.Is1H() && vd.Is1S()) ||
+ (vn.Is1S() && vd.Is1D()) ||
+ (vn.Is8B() && vd.Is8H()) ||
+ (vn.Is4H() && vd.Is4S()) ||
+ (vn.Is2S() && vd.Is2D()) ||
+ (vn.Is16B() && vd.Is8H())||
+ (vn.Is8H() && vd.Is4S()) ||
+ (vn.Is4S() && vd.Is2D()));
+ Instr format, op = vop;
+ if (vd.IsScalar()) {
+ op |= NEON_Q | NEONScalar;
+ format = SFormat(vn);
+ } else {
+ format = VFormat(vn);
+ }
+ Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEON3DifferentW(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3DifferentOp vop) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT((vm.Is8B() && vd.Is8H()) ||
+ (vm.Is4H() && vd.Is4S()) ||
+ (vm.Is2S() && vd.Is2D()) ||
+ (vm.Is16B() && vd.Is8H())||
+ (vm.Is8H() && vd.Is4S()) ||
+ (vm.Is4S() && vd.Is2D()));
+ Emit(VFormat(vm) | vop | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEON3DifferentHN(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3DifferentOp vop) {
+ VIXL_ASSERT(AreSameFormat(vm, vn));
+ VIXL_ASSERT((vd.Is8B() && vn.Is8H()) ||
+ (vd.Is4H() && vn.Is4S()) ||
+ (vd.Is2S() && vn.Is2D()) ||
+ (vd.Is16B() && vn.Is8H())||
+ (vd.Is8H() && vn.Is4S()) ||
+ (vd.Is4S() && vn.Is2D()));
+ Emit(VFormat(vd) | vop | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+#define NEON_3DIFF_LONG_LIST(V) \
+ V(pmull, NEON_PMULL, vn.IsVector() && vn.Is8B()) \
+ V(pmull2, NEON_PMULL2, vn.IsVector() && vn.Is16B()) \
+ V(saddl, NEON_SADDL, vn.IsVector() && vn.IsD()) \
+ V(saddl2, NEON_SADDL2, vn.IsVector() && vn.IsQ()) \
+ V(sabal, NEON_SABAL, vn.IsVector() && vn.IsD()) \
+ V(sabal2, NEON_SABAL2, vn.IsVector() && vn.IsQ()) \
+ V(uabal, NEON_UABAL, vn.IsVector() && vn.IsD()) \
+ V(uabal2, NEON_UABAL2, vn.IsVector() && vn.IsQ()) \
+ V(sabdl, NEON_SABDL, vn.IsVector() && vn.IsD()) \
+ V(sabdl2, NEON_SABDL2, vn.IsVector() && vn.IsQ()) \
+ V(uabdl, NEON_UABDL, vn.IsVector() && vn.IsD()) \
+ V(uabdl2, NEON_UABDL2, vn.IsVector() && vn.IsQ()) \
+ V(smlal, NEON_SMLAL, vn.IsVector() && vn.IsD()) \
+ V(smlal2, NEON_SMLAL2, vn.IsVector() && vn.IsQ()) \
+ V(umlal, NEON_UMLAL, vn.IsVector() && vn.IsD()) \
+ V(umlal2, NEON_UMLAL2, vn.IsVector() && vn.IsQ()) \
+ V(smlsl, NEON_SMLSL, vn.IsVector() && vn.IsD()) \
+ V(smlsl2, NEON_SMLSL2, vn.IsVector() && vn.IsQ()) \
+ V(umlsl, NEON_UMLSL, vn.IsVector() && vn.IsD()) \
+ V(umlsl2, NEON_UMLSL2, vn.IsVector() && vn.IsQ()) \
+ V(smull, NEON_SMULL, vn.IsVector() && vn.IsD()) \
+ V(smull2, NEON_SMULL2, vn.IsVector() && vn.IsQ()) \
+ V(umull, NEON_UMULL, vn.IsVector() && vn.IsD()) \
+ V(umull2, NEON_UMULL2, vn.IsVector() && vn.IsQ()) \
+ V(ssubl, NEON_SSUBL, vn.IsVector() && vn.IsD()) \
+ V(ssubl2, NEON_SSUBL2, vn.IsVector() && vn.IsQ()) \
+ V(uaddl, NEON_UADDL, vn.IsVector() && vn.IsD()) \
+ V(uaddl2, NEON_UADDL2, vn.IsVector() && vn.IsQ()) \
+ V(usubl, NEON_USUBL, vn.IsVector() && vn.IsD()) \
+ V(usubl2, NEON_USUBL2, vn.IsVector() && vn.IsQ()) \
+ V(sqdmlal, NEON_SQDMLAL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
+ V(sqdmlal2, NEON_SQDMLAL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
+ V(sqdmlsl, NEON_SQDMLSL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
+ V(sqdmlsl2, NEON_SQDMLSL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
+ V(sqdmull, NEON_SQDMULL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
+ V(sqdmull2, NEON_SQDMULL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
+
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm) { \
+ VIXL_ASSERT(AS); \
+ NEON3DifferentL(vd, vn, vm, OP); \
+}
+NEON_3DIFF_LONG_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+#define NEON_3DIFF_HN_LIST(V) \
+ V(addhn, NEON_ADDHN, vd.IsD()) \
+ V(addhn2, NEON_ADDHN2, vd.IsQ()) \
+ V(raddhn, NEON_RADDHN, vd.IsD()) \
+ V(raddhn2, NEON_RADDHN2, vd.IsQ()) \
+ V(subhn, NEON_SUBHN, vd.IsD()) \
+ V(subhn2, NEON_SUBHN2, vd.IsQ()) \
+ V(rsubhn, NEON_RSUBHN, vd.IsD()) \
+ V(rsubhn2, NEON_RSUBHN2, vd.IsQ())
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm) { \
+ VIXL_ASSERT(AS); \
+ NEON3DifferentHN(vd, vn, vm, OP); \
+}
+NEON_3DIFF_HN_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+void Assembler::uaddw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsD());
+ NEON3DifferentW(vd, vn, vm, NEON_UADDW);
+}
+
+
+void Assembler::uaddw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsQ());
+ NEON3DifferentW(vd, vn, vm, NEON_UADDW2);
+}
+
+
+void Assembler::saddw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsD());
+ NEON3DifferentW(vd, vn, vm, NEON_SADDW);
+}
+
+
+void Assembler::saddw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsQ());
+ NEON3DifferentW(vd, vn, vm, NEON_SADDW2);
+}
+
+
+void Assembler::usubw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsD());
+ NEON3DifferentW(vd, vn, vm, NEON_USUBW);
+}
+
+
+void Assembler::usubw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsQ());
+ NEON3DifferentW(vd, vn, vm, NEON_USUBW2);
+}
+
+
+void Assembler::ssubw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsD());
+ NEON3DifferentW(vd, vn, vm, NEON_SSUBW);
+}
+
+
+void Assembler::ssubw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsQ());
+ NEON3DifferentW(vd, vn, vm, NEON_SSUBW2);
+}
+
+
+void Assembler::mov(const Register& rd, const Register& rm) {
+ // Moves involving the stack pointer are encoded as add immediate with
+ // second operand of zero. Otherwise, orr with first operand zr is
+ // used.
+ if (rd.IsSP() || rm.IsSP()) {
+ add(rd, rm, 0);
+ } else {
+ orr(rd, AppropriateZeroRegFor(rd), rm);
+ }
+}
+
+
+void Assembler::mvn(const Register& rd, const Operand& operand) {
+ orn(rd, AppropriateZeroRegFor(rd), operand);
+}
+
+
+void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
+ VIXL_ASSERT(rt.Is64Bits());
+ Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
+}
+
+
+void Assembler::msr(SystemRegister sysreg, const Register& rt) {
+ VIXL_ASSERT(rt.Is64Bits());
+ Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
+}
+
+
+void Assembler::clrex(int imm4) {
+ Emit(CLREX | CRm(imm4));
+}
+
+
+void Assembler::dmb(BarrierDomain domain, BarrierType type) {
+ Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
+}
+
+
+void Assembler::dsb(BarrierDomain domain, BarrierType type) {
+ Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
+}
+
+
+void Assembler::isb() {
+ Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
+}
+
+
+void Assembler::fmov(const VRegister& vd, double imm) {
+ if (vd.IsScalar()) {
+ VIXL_ASSERT(vd.Is1D());
+ Emit(FMOV_d_imm | Rd(vd) | ImmFP64(imm));
+ } else {
+ VIXL_ASSERT(vd.Is2D());
+ Instr op = NEONModifiedImmediate_MOVI | NEONModifiedImmediateOpBit;
+ Instr q = NEON_Q;
+ uint32_t encoded_imm = FP64ToImm8(imm);
+ Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd));
+ }
+}
+
+
+void Assembler::fmov(const VRegister& vd, float imm) {
+ if (vd.IsScalar()) {
+ VIXL_ASSERT(vd.Is1S());
+ Emit(FMOV_s_imm | Rd(vd) | ImmFP32(imm));
+ } else {
+ VIXL_ASSERT(vd.Is2S() | vd.Is4S());
+ Instr op = NEONModifiedImmediate_MOVI;
+ Instr q = vd.Is4S() ? NEON_Q : 0;
+ uint32_t encoded_imm = FP32ToImm8(imm);
+ Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd));
+ }
+}
+
+
+void Assembler::fmov(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(vn.Is1S() || vn.Is1D());
+ VIXL_ASSERT(rd.size() == vn.size());
+ FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
+ Emit(op | Rd(rd) | Rn(vn));
+}
+
+
+void Assembler::fmov(const VRegister& vd, const Register& rn) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ VIXL_ASSERT(vd.size() == rn.size());
+ FPIntegerConvertOp op = vd.Is32Bits() ? FMOV_sw : FMOV_dx;
+ Emit(op | Rd(vd) | Rn(rn));
+}
+
+
+void Assembler::fmov(const VRegister& vd, const VRegister& vn) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ VIXL_ASSERT(vd.IsSameFormat(vn));
+ Emit(FPType(vd) | FMOV | Rd(vd) | Rn(vn));
+}
+
+
+void Assembler::fmov(const VRegister& vd, int index, const Register& rn) {
+ VIXL_ASSERT((index == 1) && vd.Is1D() && rn.IsX());
+ USE(index);
+ Emit(FMOV_d1_x | Rd(vd) | Rn(rn));
+}
+
+
+void Assembler::fmov(const Register& rd, const VRegister& vn, int index) {
+ VIXL_ASSERT((index == 1) && vn.Is1D() && rd.IsX());
+ USE(index);
+ Emit(FMOV_x_d1 | Rd(rd) | Rn(vn));
+}
+
+
+void Assembler::fmadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FMADD_s : FMADD_d);
+}
+
+
+void Assembler::fmsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FMSUB_s : FMSUB_d);
+}
+
+
+void Assembler::fnmadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FNMADD_s : FNMADD_d);
+}
+
+
+void Assembler::fnmsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FNMSUB_s : FNMSUB_d);
+}
+
+
+void Assembler::fnmul(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm));
+ Instr op = vd.Is1S() ? FNMUL_s : FNMUL_d;
+ Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::FPCompareMacro(const VRegister& vn,
+ double value,
+ FPTrapFlags trap) {
+ USE(value);
+ // Although the fcmp{e} instructions can strictly only take an immediate
+ // value of +0.0, we don't need to check for -0.0 because the sign of 0.0
+ // doesn't affect the result of the comparison.
+ VIXL_ASSERT(value == 0.0);
+ VIXL_ASSERT(vn.Is1S() || vn.Is1D());
+ Instr op = (trap == EnableTrap) ? FCMPE_zero : FCMP_zero;
+ Emit(FPType(vn) | op | Rn(vn));
+}
+
+
+void Assembler::FPCompareMacro(const VRegister& vn,
+ const VRegister& vm,
+ FPTrapFlags trap) {
+ VIXL_ASSERT(vn.Is1S() || vn.Is1D());
+ VIXL_ASSERT(vn.IsSameSizeAndType(vm));
+ Instr op = (trap == EnableTrap) ? FCMPE : FCMP;
+ Emit(FPType(vn) | op | Rm(vm) | Rn(vn));
+}
+
+
+void Assembler::fcmp(const VRegister& vn,
+ const VRegister& vm) {
+ FPCompareMacro(vn, vm, DisableTrap);
+}
+
+
+void Assembler::fcmpe(const VRegister& vn,
+ const VRegister& vm) {
+ FPCompareMacro(vn, vm, EnableTrap);
+}
+
+
+void Assembler::fcmp(const VRegister& vn,
+ double value) {
+ FPCompareMacro(vn, value, DisableTrap);
+}
+
+
+void Assembler::fcmpe(const VRegister& vn,
+ double value) {
+ FPCompareMacro(vn, value, EnableTrap);
+}
+
+
+void Assembler::FPCCompareMacro(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond,
+ FPTrapFlags trap) {
+ VIXL_ASSERT(vn.Is1S() || vn.Is1D());
+ VIXL_ASSERT(vn.IsSameSizeAndType(vm));
+ Instr op = (trap == EnableTrap) ? FCCMPE : FCCMP;
+ Emit(FPType(vn) | op | Rm(vm) | Cond(cond) | Rn(vn) | Nzcv(nzcv));
+}
+
+void Assembler::fccmp(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond) {
+ FPCCompareMacro(vn, vm, nzcv, cond, DisableTrap);
+}
+
+
+void Assembler::fccmpe(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond) {
+ FPCCompareMacro(vn, vm, nzcv, cond, EnableTrap);
+}
+
+
+void Assembler::fcsel(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ Condition cond) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ VIXL_ASSERT(AreSameFormat(vd, vn, vm));
+ Emit(FPType(vd) | FCSEL | Rm(vm) | Cond(cond) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONFPConvertToInt(const Register& rd,
+ const VRegister& vn,
+ Instr op) {
+ Emit(SF(rd) | FPType(vn) | op | Rn(vn) | Rd(rd));
+}
+
+
+void Assembler::NEONFPConvertToInt(const VRegister& vd,
+ const VRegister& vn,
+ Instr op) {
+ if (vn.IsScalar()) {
+ VIXL_ASSERT((vd.Is1S() && vn.Is1S()) || (vd.Is1D() && vn.Is1D()));
+ op |= NEON_Q | NEONScalar;
+ }
+ Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fcvt(const VRegister& vd,
+ const VRegister& vn) {
+ FPDataProcessing1SourceOp op;
+ if (vd.Is1D()) {
+ VIXL_ASSERT(vn.Is1S() || vn.Is1H());
+ op = vn.Is1S() ? FCVT_ds : FCVT_dh;
+ } else if (vd.Is1S()) {
+ VIXL_ASSERT(vn.Is1D() || vn.Is1H());
+ op = vn.Is1D() ? FCVT_sd : FCVT_sh;
+ } else {
+ VIXL_ASSERT(vd.Is1H());
+ VIXL_ASSERT(vn.Is1D() || vn.Is1S());
+ op = vn.Is1D() ? FCVT_hd : FCVT_hs;
+ }
+ FPDataProcessing1Source(vd, vn, op);
+}
+
+
+void Assembler::fcvtl(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is4S() && vn.Is4H()) ||
+ (vd.Is2D() && vn.Is2S()));
+ Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
+ Emit(format | NEON_FCVTL | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fcvtl2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is4S() && vn.Is8H()) ||
+ (vd.Is2D() && vn.Is4S()));
+ Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
+ Emit(NEON_Q | format | NEON_FCVTL | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fcvtn(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vn.Is4S() && vd.Is4H()) ||
+ (vn.Is2D() && vd.Is2S()));
+ Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
+ Emit(format | NEON_FCVTN | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fcvtn2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vn.Is4S() && vd.Is8H()) ||
+ (vn.Is2D() && vd.Is4S()));
+ Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
+ Emit(NEON_Q | format | NEON_FCVTN | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fcvtxn(const VRegister& vd,
+ const VRegister& vn) {
+ Instr format = 1 << NEONSize_offset;
+ if (vd.IsScalar()) {
+ VIXL_ASSERT(vd.Is1S() && vn.Is1D());
+ Emit(format | NEON_FCVTXN_scalar | Rn(vn) | Rd(vd));
+ } else {
+ VIXL_ASSERT(vd.Is2S() && vn.Is2D());
+ Emit(format | NEON_FCVTXN | Rn(vn) | Rd(vd));
+ }
+}
+
+
+void Assembler::fcvtxn2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.Is4S() && vn.Is2D());
+ Instr format = 1 << NEONSize_offset;
+ Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd));
+}
+
+
+#define NEON_FP2REGMISC_FCVT_LIST(V) \
+ V(fcvtnu, NEON_FCVTNU, FCVTNU) \
+ V(fcvtns, NEON_FCVTNS, FCVTNS) \
+ V(fcvtpu, NEON_FCVTPU, FCVTPU) \
+ V(fcvtps, NEON_FCVTPS, FCVTPS) \
+ V(fcvtmu, NEON_FCVTMU, FCVTMU) \
+ V(fcvtms, NEON_FCVTMS, FCVTMS) \
+ V(fcvtau, NEON_FCVTAU, FCVTAU) \
+ V(fcvtas, NEON_FCVTAS, FCVTAS)
+
+#define DEFINE_ASM_FUNCS(FN, VEC_OP, SCA_OP) \
+void Assembler::FN(const Register& rd, \
+ const VRegister& vn) { \
+ NEONFPConvertToInt(rd, vn, SCA_OP); \
+} \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn) { \
+ NEONFPConvertToInt(vd, vn, VEC_OP); \
+}
+NEON_FP2REGMISC_FCVT_LIST(DEFINE_ASM_FUNCS)
+#undef DEFINE_ASM_FUNCS
+
+
+void Assembler::fcvtzs(const Register& rd,
+ const VRegister& vn,
+ int fbits) {
+ VIXL_ASSERT(vn.Is1S() || vn.Is1D());
+ VIXL_ASSERT((fbits >= 0) && (fbits <= rd.SizeInBits()));
+ if (fbits == 0) {
+ Emit(SF(rd) | FPType(vn) | FCVTZS | Rn(vn) | Rd(rd));
+ } else {
+ Emit(SF(rd) | FPType(vn) | FCVTZS_fixed | FPScale(64 - fbits) | Rn(vn) |
+ Rd(rd));
+ }
+}
+
+
+void Assembler::fcvtzs(const VRegister& vd,
+ const VRegister& vn,
+ int fbits) {
+ VIXL_ASSERT(fbits >= 0);
+ if (fbits == 0) {
+ NEONFP2RegMisc(vd, vn, NEON_FCVTZS);
+ } else {
+ VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
+ NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZS_imm);
+ }
+}
+
+
+void Assembler::fcvtzu(const Register& rd,
+ const VRegister& vn,
+ int fbits) {
+ VIXL_ASSERT(vn.Is1S() || vn.Is1D());
+ VIXL_ASSERT((fbits >= 0) && (fbits <= rd.SizeInBits()));
+ if (fbits == 0) {
+ Emit(SF(rd) | FPType(vn) | FCVTZU | Rn(vn) | Rd(rd));
+ } else {
+ Emit(SF(rd) | FPType(vn) | FCVTZU_fixed | FPScale(64 - fbits) | Rn(vn) |
+ Rd(rd));
+ }
+}
+
+
+void Assembler::fcvtzu(const VRegister& vd,
+ const VRegister& vn,
+ int fbits) {
+ VIXL_ASSERT(fbits >= 0);
+ if (fbits == 0) {
+ NEONFP2RegMisc(vd, vn, NEON_FCVTZU);
+ } else {
+ VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
+ NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZU_imm);
+ }
+}
+
+void Assembler::ucvtf(const VRegister& vd,
+ const VRegister& vn,
+ int fbits) {
+ VIXL_ASSERT(fbits >= 0);
+ if (fbits == 0) {
+ NEONFP2RegMisc(vd, vn, NEON_UCVTF);
+ } else {
+ VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
+ NEONShiftRightImmediate(vd, vn, fbits, NEON_UCVTF_imm);
+ }
+}
+
+void Assembler::scvtf(const VRegister& vd,
+ const VRegister& vn,
+ int fbits) {
+ VIXL_ASSERT(fbits >= 0);
+ if (fbits == 0) {
+ NEONFP2RegMisc(vd, vn, NEON_SCVTF);
+ } else {
+ VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
+ NEONShiftRightImmediate(vd, vn, fbits, NEON_SCVTF_imm);
+ }
+}
+
+
+void Assembler::scvtf(const VRegister& vd,
+ const Register& rn,
+ int fbits) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ VIXL_ASSERT(fbits >= 0);
+ if (fbits == 0) {
+ Emit(SF(rn) | FPType(vd) | SCVTF | Rn(rn) | Rd(vd));
+ } else {
+ Emit(SF(rn) | FPType(vd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
+ Rd(vd));
+ }
+}
+
+
+void Assembler::ucvtf(const VRegister& vd,
+ const Register& rn,
+ int fbits) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ VIXL_ASSERT(fbits >= 0);
+ if (fbits == 0) {
+ Emit(SF(rn) | FPType(vd) | UCVTF | Rn(rn) | Rd(vd));
+ } else {
+ Emit(SF(rn) | FPType(vd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
+ Rd(vd));
+ }
+}
+
+
+void Assembler::NEON3Same(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3SameOp vop) {
+ VIXL_ASSERT(AreSameFormat(vd, vn, vm));
+ VIXL_ASSERT(vd.IsVector() || !vd.IsQ());
+
+ Instr format, op = vop;
+ if (vd.IsScalar()) {
+ op |= NEON_Q | NEONScalar;
+ format = SFormat(vd);
+ } else {
+ format = VFormat(vd);
+ }
+
+ Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONFP3Same(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ Instr op) {
+ VIXL_ASSERT(AreSameFormat(vd, vn, vm));
+ Emit(FPFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+#define NEON_FP2REGMISC_LIST(V) \
+ V(fabs, NEON_FABS, FABS) \
+ V(fneg, NEON_FNEG, FNEG) \
+ V(fsqrt, NEON_FSQRT, FSQRT) \
+ V(frintn, NEON_FRINTN, FRINTN) \
+ V(frinta, NEON_FRINTA, FRINTA) \
+ V(frintp, NEON_FRINTP, FRINTP) \
+ V(frintm, NEON_FRINTM, FRINTM) \
+ V(frintx, NEON_FRINTX, FRINTX) \
+ V(frintz, NEON_FRINTZ, FRINTZ) \
+ V(frinti, NEON_FRINTI, FRINTI) \
+ V(frsqrte, NEON_FRSQRTE, NEON_FRSQRTE_scalar) \
+ V(frecpe, NEON_FRECPE, NEON_FRECPE_scalar )
+
+
+#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn) { \
+ Instr op; \
+ if (vd.IsScalar()) { \
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D()); \
+ op = SCA_OP; \
+ } else { \
+ VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S()); \
+ op = VEC_OP; \
+ } \
+ NEONFP2RegMisc(vd, vn, op); \
+}
+NEON_FP2REGMISC_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+void Assembler::NEONFP2RegMisc(const VRegister& vd,
+ const VRegister& vn,
+ Instr op) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEON2RegMisc(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp vop,
+ int value) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(value == 0);
+ USE(value);
+
+ Instr format, op = vop;
+ if (vd.IsScalar()) {
+ op |= NEON_Q | NEONScalar;
+ format = SFormat(vd);
+ } else {
+ format = VFormat(vd);
+ }
+
+ Emit(format | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::cmeq(const VRegister& vd,
+ const VRegister& vn,
+ int value) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMEQ_zero, value);
+}
+
+
+void Assembler::cmge(const VRegister& vd,
+ const VRegister& vn,
+ int value) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMGE_zero, value);
+}
+
+
+void Assembler::cmgt(const VRegister& vd,
+ const VRegister& vn,
+ int value) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMGT_zero, value);
+}
+
+
+void Assembler::cmle(const VRegister& vd,
+ const VRegister& vn,
+ int value) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMLE_zero, value);
+}
+
+
+void Assembler::cmlt(const VRegister& vd,
+ const VRegister& vn,
+ int value) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMLT_zero, value);
+}
+
+
+void Assembler::shll(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT((vd.Is8H() && vn.Is8B() && shift == 8) ||
+ (vd.Is4S() && vn.Is4H() && shift == 16) ||
+ (vd.Is2D() && vn.Is2S() && shift == 32));
+ USE(shift);
+ Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::shll2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ USE(shift);
+ VIXL_ASSERT((vd.Is8H() && vn.Is16B() && shift == 8) ||
+ (vd.Is4S() && vn.Is8H() && shift == 16) ||
+ (vd.Is2D() && vn.Is4S() && shift == 32));
+ Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONFP2RegMisc(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp vop,
+ double value) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(value == 0.0);
+ USE(value);
+
+ Instr op = vop;
+ if (vd.IsScalar()) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ op |= NEON_Q | NEONScalar;
+ } else {
+ VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S());
+ }
+
+ Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fcmeq(const VRegister& vd,
+ const VRegister& vn,
+ double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMEQ_zero, value);
+}
+
+
+void Assembler::fcmge(const VRegister& vd,
+ const VRegister& vn,
+ double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMGE_zero, value);
+}
+
+
+void Assembler::fcmgt(const VRegister& vd,
+ const VRegister& vn,
+ double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMGT_zero, value);
+}
+
+
+void Assembler::fcmle(const VRegister& vd,
+ const VRegister& vn,
+ double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMLE_zero, value);
+}
+
+
+void Assembler::fcmlt(const VRegister& vd,
+ const VRegister& vn,
+ double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMLT_zero, value);
+}
+
+
+void Assembler::frecpx(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsScalar());
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ Emit(FPFormat(vd) | NEON_FRECPX_scalar | Rn(vn) | Rd(vd));
+}
+
+
+#define NEON_3SAME_LIST(V) \
+ V(add, NEON_ADD, vd.IsVector() || vd.Is1D()) \
+ V(addp, NEON_ADDP, vd.IsVector() || vd.Is1D()) \
+ V(sub, NEON_SUB, vd.IsVector() || vd.Is1D()) \
+ V(cmeq, NEON_CMEQ, vd.IsVector() || vd.Is1D()) \
+ V(cmge, NEON_CMGE, vd.IsVector() || vd.Is1D()) \
+ V(cmgt, NEON_CMGT, vd.IsVector() || vd.Is1D()) \
+ V(cmhi, NEON_CMHI, vd.IsVector() || vd.Is1D()) \
+ V(cmhs, NEON_CMHS, vd.IsVector() || vd.Is1D()) \
+ V(cmtst, NEON_CMTST, vd.IsVector() || vd.Is1D()) \
+ V(sshl, NEON_SSHL, vd.IsVector() || vd.Is1D()) \
+ V(ushl, NEON_USHL, vd.IsVector() || vd.Is1D()) \
+ V(srshl, NEON_SRSHL, vd.IsVector() || vd.Is1D()) \
+ V(urshl, NEON_URSHL, vd.IsVector() || vd.Is1D()) \
+ V(sqdmulh, NEON_SQDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \
+ V(sqrdmulh, NEON_SQRDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \
+ V(shadd, NEON_SHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uhadd, NEON_UHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(srhadd, NEON_SRHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(urhadd, NEON_URHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(shsub, NEON_SHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uhsub, NEON_UHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(smax, NEON_SMAX, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(smaxp, NEON_SMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(smin, NEON_SMIN, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(sminp, NEON_SMINP, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(umax, NEON_UMAX, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(umaxp, NEON_UMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(umin, NEON_UMIN, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uminp, NEON_UMINP, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(saba, NEON_SABA, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(sabd, NEON_SABD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uaba, NEON_UABA, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uabd, NEON_UABD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(mla, NEON_MLA, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(mls, NEON_MLS, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(mul, NEON_MUL, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(and_, NEON_AND, vd.Is8B() || vd.Is16B()) \
+ V(orr, NEON_ORR, vd.Is8B() || vd.Is16B()) \
+ V(orn, NEON_ORN, vd.Is8B() || vd.Is16B()) \
+ V(eor, NEON_EOR, vd.Is8B() || vd.Is16B()) \
+ V(bic, NEON_BIC, vd.Is8B() || vd.Is16B()) \
+ V(bit, NEON_BIT, vd.Is8B() || vd.Is16B()) \
+ V(bif, NEON_BIF, vd.Is8B() || vd.Is16B()) \
+ V(bsl, NEON_BSL, vd.Is8B() || vd.Is16B()) \
+ V(pmul, NEON_PMUL, vd.Is8B() || vd.Is16B()) \
+ V(uqadd, NEON_UQADD, true) \
+ V(sqadd, NEON_SQADD, true) \
+ V(uqsub, NEON_UQSUB, true) \
+ V(sqsub, NEON_SQSUB, true) \
+ V(sqshl, NEON_SQSHL, true) \
+ V(uqshl, NEON_UQSHL, true) \
+ V(sqrshl, NEON_SQRSHL, true) \
+ V(uqrshl, NEON_UQRSHL, true)
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm) { \
+ VIXL_ASSERT(AS); \
+ NEON3Same(vd, vn, vm, OP); \
+}
+NEON_3SAME_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+#define NEON_FP3SAME_OP_LIST(V) \
+ V(fadd, NEON_FADD, FADD) \
+ V(fsub, NEON_FSUB, FSUB) \
+ V(fmul, NEON_FMUL, FMUL) \
+ V(fdiv, NEON_FDIV, FDIV) \
+ V(fmax, NEON_FMAX, FMAX) \
+ V(fmaxnm, NEON_FMAXNM, FMAXNM) \
+ V(fmin, NEON_FMIN, FMIN) \
+ V(fminnm, NEON_FMINNM, FMINNM) \
+ V(fmulx, NEON_FMULX, NEON_FMULX_scalar) \
+ V(frecps, NEON_FRECPS, NEON_FRECPS_scalar) \
+ V(frsqrts, NEON_FRSQRTS, NEON_FRSQRTS_scalar) \
+ V(fabd, NEON_FABD, NEON_FABD_scalar) \
+ V(fmla, NEON_FMLA, 0) \
+ V(fmls, NEON_FMLS, 0) \
+ V(facge, NEON_FACGE, NEON_FACGE_scalar) \
+ V(facgt, NEON_FACGT, NEON_FACGT_scalar) \
+ V(fcmeq, NEON_FCMEQ, NEON_FCMEQ_scalar) \
+ V(fcmge, NEON_FCMGE, NEON_FCMGE_scalar) \
+ V(fcmgt, NEON_FCMGT, NEON_FCMGT_scalar) \
+ V(faddp, NEON_FADDP, 0) \
+ V(fmaxp, NEON_FMAXP, 0) \
+ V(fminp, NEON_FMINP, 0) \
+ V(fmaxnmp, NEON_FMAXNMP, 0) \
+ V(fminnmp, NEON_FMINNMP, 0)
+
+#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm) { \
+ Instr op; \
+ if ((SCA_OP != 0) && vd.IsScalar()) { \
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D()); \
+ op = SCA_OP; \
+ } else { \
+ VIXL_ASSERT(vd.IsVector()); \
+ VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S()); \
+ op = VEC_OP; \
+ } \
+ NEONFP3Same(vd, vn, vm, op); \
+}
+NEON_FP3SAME_OP_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+void Assembler::addp(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is1D() && vn.Is2D()));
+ Emit(SFormat(vd) | NEON_ADDP_scalar | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::faddp(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
+ (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FADDP_scalar | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fmaxp(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
+ (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FMAXP_scalar | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fminp(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
+ (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FMINP_scalar | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fmaxnmp(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
+ (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FMAXNMP_scalar | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fminnmp(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
+ (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FMINNMP_scalar | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::orr(const VRegister& vd,
+ const int imm8,
+ const int left_shift) {
+ NEONModifiedImmShiftLsl(vd, imm8, left_shift,
+ NEONModifiedImmediate_ORR);
+}
+
+
+void Assembler::mov(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ if (vd.IsD()) {
+ orr(vd.V8B(), vn.V8B(), vn.V8B());
+ } else {
+ VIXL_ASSERT(vd.IsQ());
+ orr(vd.V16B(), vn.V16B(), vn.V16B());
+ }
+}
+
+
+void Assembler::bic(const VRegister& vd,
+ const int imm8,
+ const int left_shift) {
+ NEONModifiedImmShiftLsl(vd, imm8, left_shift,
+ NEONModifiedImmediate_BIC);
+}
+
+
+void Assembler::movi(const VRegister& vd,
+ const uint64_t imm,
+ Shift shift,
+ const int shift_amount) {
+ VIXL_ASSERT((shift == LSL) || (shift == MSL));
+ if (vd.Is2D() || vd.Is1D()) {
+ VIXL_ASSERT(shift_amount == 0);
+ int imm8 = 0;
+ for (int i = 0; i < 8; ++i) {
+ int byte = (imm >> (i * 8)) & 0xff;
+ VIXL_ASSERT((byte == 0) || (byte == 0xff));
+ if (byte == 0xff) {
+ imm8 |= (1 << i);
+ }
+ }
+ int q = vd.Is2D() ? NEON_Q : 0;
+ Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI |
+ ImmNEONabcdefgh(imm8) | NEONCmode(0xe) | Rd(vd));
+ } else if (shift == LSL) {
+ VIXL_ASSERT(is_uint8(imm));
+ NEONModifiedImmShiftLsl(vd, static_cast<int>(imm), shift_amount,
+ NEONModifiedImmediate_MOVI);
+ } else {
+ VIXL_ASSERT(is_uint8(imm));
+ NEONModifiedImmShiftMsl(vd, static_cast<int>(imm), shift_amount,
+ NEONModifiedImmediate_MOVI);
+ }
+}
+
+
+void Assembler::mvn(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ if (vd.IsD()) {
+ not_(vd.V8B(), vn.V8B());
+ } else {
+ VIXL_ASSERT(vd.IsQ());
+ not_(vd.V16B(), vn.V16B());
+ }
+}
+
+
+void Assembler::mvni(const VRegister& vd,
+ const int imm8,
+ Shift shift,
+ const int shift_amount) {
+ VIXL_ASSERT((shift == LSL) || (shift == MSL));
+ if (shift == LSL) {
+ NEONModifiedImmShiftLsl(vd, imm8, shift_amount,
+ NEONModifiedImmediate_MVNI);
+ } else {
+ NEONModifiedImmShiftMsl(vd, imm8, shift_amount,
+ NEONModifiedImmediate_MVNI);
+ }
+}
+
+
+void Assembler::NEONFPByElement(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index,
+ NEONByIndexedElementOp vop) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT((vd.Is2S() && vm.Is1S()) ||
+ (vd.Is4S() && vm.Is1S()) ||
+ (vd.Is1S() && vm.Is1S()) ||
+ (vd.Is2D() && vm.Is1D()) ||
+ (vd.Is1D() && vm.Is1D()));
+ VIXL_ASSERT((vm.Is1S() && (vm_index < 4)) ||
+ (vm.Is1D() && (vm_index < 2)));
+
+ Instr op = vop;
+ int index_num_bits = vm.Is1S() ? 2 : 1;
+ if (vd.IsScalar()) {
+ op |= NEON_Q | NEONScalar;
+ }
+
+ Emit(FPFormat(vd) | op | ImmNEONHLM(vm_index, index_num_bits) |
+ Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONByElement(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index,
+ NEONByIndexedElementOp vop) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT((vd.Is4H() && vm.Is1H()) ||
+ (vd.Is8H() && vm.Is1H()) ||
+ (vd.Is1H() && vm.Is1H()) ||
+ (vd.Is2S() && vm.Is1S()) ||
+ (vd.Is4S() && vm.Is1S()) ||
+ (vd.Is1S() && vm.Is1S()));
+ VIXL_ASSERT((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) ||
+ (vm.Is1S() && (vm_index < 4)));
+
+ Instr format, op = vop;
+ int index_num_bits = vm.Is1H() ? 3 : 2;
+ if (vd.IsScalar()) {
+ op |= NEONScalar | NEON_Q;
+ format = SFormat(vn);
+ } else {
+ format = VFormat(vn);
+ }
+ Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) |
+ Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONByElementL(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index,
+ NEONByIndexedElementOp vop) {
+ VIXL_ASSERT((vd.Is4S() && vn.Is4H() && vm.Is1H()) ||
+ (vd.Is4S() && vn.Is8H() && vm.Is1H()) ||
+ (vd.Is1S() && vn.Is1H() && vm.Is1H()) ||
+ (vd.Is2D() && vn.Is2S() && vm.Is1S()) ||
+ (vd.Is2D() && vn.Is4S() && vm.Is1S()) ||
+ (vd.Is1D() && vn.Is1S() && vm.Is1S()));
+
+ VIXL_ASSERT((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) ||
+ (vm.Is1S() && (vm_index < 4)));
+
+ Instr format, op = vop;
+ int index_num_bits = vm.Is1H() ? 3 : 2;
+ if (vd.IsScalar()) {
+ op |= NEONScalar | NEON_Q;
+ format = SFormat(vn);
+ } else {
+ format = VFormat(vn);
+ }
+ Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) |
+ Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+#define NEON_BYELEMENT_LIST(V) \
+ V(mul, NEON_MUL_byelement, vn.IsVector()) \
+ V(mla, NEON_MLA_byelement, vn.IsVector()) \
+ V(mls, NEON_MLS_byelement, vn.IsVector()) \
+ V(sqdmulh, NEON_SQDMULH_byelement, true) \
+ V(sqrdmulh, NEON_SQRDMULH_byelement, true)
+
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm, \
+ int vm_index) { \
+ VIXL_ASSERT(AS); \
+ NEONByElement(vd, vn, vm, vm_index, OP); \
+}
+NEON_BYELEMENT_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+#define NEON_FPBYELEMENT_LIST(V) \
+ V(fmul, NEON_FMUL_byelement) \
+ V(fmla, NEON_FMLA_byelement) \
+ V(fmls, NEON_FMLS_byelement) \
+ V(fmulx, NEON_FMULX_byelement)
+
+
+#define DEFINE_ASM_FUNC(FN, OP) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm, \
+ int vm_index) { \
+ NEONFPByElement(vd, vn, vm, vm_index, OP); \
+}
+NEON_FPBYELEMENT_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+#define NEON_BYELEMENT_LONG_LIST(V) \
+ V(sqdmull, NEON_SQDMULL_byelement, vn.IsScalar() || vn.IsD()) \
+ V(sqdmull2, NEON_SQDMULL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(sqdmlal, NEON_SQDMLAL_byelement, vn.IsScalar() || vn.IsD()) \
+ V(sqdmlal2, NEON_SQDMLAL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(sqdmlsl, NEON_SQDMLSL_byelement, vn.IsScalar() || vn.IsD()) \
+ V(sqdmlsl2, NEON_SQDMLSL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(smull, NEON_SMULL_byelement, vn.IsVector() && vn.IsD()) \
+ V(smull2, NEON_SMULL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(umull, NEON_UMULL_byelement, vn.IsVector() && vn.IsD()) \
+ V(umull2, NEON_UMULL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(smlal, NEON_SMLAL_byelement, vn.IsVector() && vn.IsD()) \
+ V(smlal2, NEON_SMLAL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(umlal, NEON_UMLAL_byelement, vn.IsVector() && vn.IsD()) \
+ V(umlal2, NEON_UMLAL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(smlsl, NEON_SMLSL_byelement, vn.IsVector() && vn.IsD()) \
+ V(smlsl2, NEON_SMLSL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(umlsl, NEON_UMLSL_byelement, vn.IsVector() && vn.IsD()) \
+ V(umlsl2, NEON_UMLSL_byelement, vn.IsVector() && vn.IsQ())
+
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm, \
+ int vm_index) { \
+ VIXL_ASSERT(AS); \
+ NEONByElementL(vd, vn, vm, vm_index, OP); \
+}
+NEON_BYELEMENT_LONG_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+void Assembler::suqadd(const VRegister& vd,
+ const VRegister& vn) {
+ NEON2RegMisc(vd, vn, NEON_SUQADD);
+}
+
+
+void Assembler::usqadd(const VRegister& vd,
+ const VRegister& vn) {
+ NEON2RegMisc(vd, vn, NEON_USQADD);
+}
+
+
+void Assembler::abs(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_ABS);
+}
+
+
+void Assembler::sqabs(const VRegister& vd,
+ const VRegister& vn) {
+ NEON2RegMisc(vd, vn, NEON_SQABS);
+}
+
+
+void Assembler::neg(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_NEG);
+}
+
+
+void Assembler::sqneg(const VRegister& vd,
+ const VRegister& vn) {
+ NEON2RegMisc(vd, vn, NEON_SQNEG);
+}
+
+
+void Assembler::NEONXtn(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp vop) {
+ Instr format, op = vop;
+ if (vd.IsScalar()) {
+ VIXL_ASSERT((vd.Is1B() && vn.Is1H()) ||
+ (vd.Is1H() && vn.Is1S()) ||
+ (vd.Is1S() && vn.Is1D()));
+ op |= NEON_Q | NEONScalar;
+ format = SFormat(vd);
+ } else {
+ VIXL_ASSERT((vd.Is8B() && vn.Is8H()) ||
+ (vd.Is4H() && vn.Is4S()) ||
+ (vd.Is2S() && vn.Is2D()) ||
+ (vd.Is16B() && vn.Is8H()) ||
+ (vd.Is8H() && vn.Is4S()) ||
+ (vd.Is4S() && vn.Is2D()));
+ format = VFormat(vd);
+ }
+ Emit(format | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::xtn(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() && vd.IsD());
+ NEONXtn(vd, vn, NEON_XTN);
+}
+
+
+void Assembler::xtn2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() && vd.IsQ());
+ NEONXtn(vd, vn, NEON_XTN);
+}
+
+
+void Assembler::sqxtn(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsScalar() || vd.IsD());
+ NEONXtn(vd, vn, NEON_SQXTN);
+}
+
+
+void Assembler::sqxtn2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() && vd.IsQ());
+ NEONXtn(vd, vn, NEON_SQXTN);
+}
+
+
+void Assembler::sqxtun(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsScalar() || vd.IsD());
+ NEONXtn(vd, vn, NEON_SQXTUN);
+}
+
+
+void Assembler::sqxtun2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() && vd.IsQ());
+ NEONXtn(vd, vn, NEON_SQXTUN);
+}
+
+
+void Assembler::uqxtn(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsScalar() || vd.IsD());
+ NEONXtn(vd, vn, NEON_UQXTN);
+}
+
+
+void Assembler::uqxtn2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() && vd.IsQ());
+ NEONXtn(vd, vn, NEON_UQXTN);
+}
+
+
+// NEON NOT and RBIT are distinguised by bit 22, the bottom bit of "size".
+void Assembler::not_(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B());
+ Emit(VFormat(vd) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::rbit(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B());
+ Emit(VFormat(vn) | (1 << NEONSize_offset) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::ext(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int index) {
+ VIXL_ASSERT(AreSameFormat(vd, vn, vm));
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B());
+ VIXL_ASSERT((0 <= index) && (index < vd.lanes()));
+ Emit(VFormat(vd) | NEON_EXT | Rm(vm) | ImmNEONExt(index) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::dup(const VRegister& vd,
+ const VRegister& vn,
+ int vn_index) {
+ Instr q, scalar;
+
+ // We support vn arguments of the form vn.VxT() or vn.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ int lane_size = vn.LaneSizeInBytes();
+ NEONFormatField format;
+ switch (lane_size) {
+ case 1: format = NEON_16B; break;
+ case 2: format = NEON_8H; break;
+ case 4: format = NEON_4S; break;
+ default:
+ VIXL_ASSERT(lane_size == 8);
+ format = NEON_2D;
+ break;
+ }
+
+ if (vd.IsScalar()) {
+ q = NEON_Q;
+ scalar = NEONScalar;
+ } else {
+ VIXL_ASSERT(!vd.Is1D());
+ q = vd.IsD() ? 0 : NEON_Q;
+ scalar = 0;
+ }
+ Emit(q | scalar | NEON_DUP_ELEMENT |
+ ImmNEON5(format, vn_index) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::mov(const VRegister& vd,
+ const VRegister& vn,
+ int vn_index) {
+ VIXL_ASSERT(vn.IsScalar());
+ dup(vd, vn, vn_index);
+}
+
+
+void Assembler::dup(const VRegister& vd, const Register& rn) {
+ VIXL_ASSERT(!vd.Is1D());
+ VIXL_ASSERT(vd.Is2D() == rn.IsX());
+ int q = vd.IsD() ? 0 : NEON_Q;
+ Emit(q | NEON_DUP_GENERAL | ImmNEON5(VFormat(vd), 0) | Rn(rn) | Rd(vd));
+}
+
+
+void Assembler::ins(const VRegister& vd,
+ int vd_index,
+ const VRegister& vn,
+ int vn_index) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ int lane_size = vd.LaneSizeInBytes();
+ NEONFormatField format;
+ switch (lane_size) {
+ case 1: format = NEON_16B; break;
+ case 2: format = NEON_8H; break;
+ case 4: format = NEON_4S; break;
+ default:
+ VIXL_ASSERT(lane_size == 8);
+ format = NEON_2D;
+ break;
+ }
+
+ VIXL_ASSERT((0 <= vd_index) &&
+ (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ VIXL_ASSERT((0 <= vn_index) &&
+ (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ Emit(NEON_INS_ELEMENT | ImmNEON5(format, vd_index) |
+ ImmNEON4(format, vn_index) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::mov(const VRegister& vd,
+ int vd_index,
+ const VRegister& vn,
+ int vn_index) {
+ ins(vd, vd_index, vn, vn_index);
+}
+
+
+void Assembler::ins(const VRegister& vd,
+ int vd_index,
+ const Register& rn) {
+ // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ int lane_size = vd.LaneSizeInBytes();
+ NEONFormatField format;
+ switch (lane_size) {
+ case 1: format = NEON_16B; VIXL_ASSERT(rn.IsW()); break;
+ case 2: format = NEON_8H; VIXL_ASSERT(rn.IsW()); break;
+ case 4: format = NEON_4S; VIXL_ASSERT(rn.IsW()); break;
+ default:
+ VIXL_ASSERT(lane_size == 8);
+ VIXL_ASSERT(rn.IsX());
+ format = NEON_2D;
+ break;
+ }
+
+ VIXL_ASSERT((0 <= vd_index) &&
+ (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ Emit(NEON_INS_GENERAL | ImmNEON5(format, vd_index) | Rn(rn) | Rd(vd));
+}
+
+
+void Assembler::mov(const VRegister& vd,
+ int vd_index,
+ const Register& rn) {
+ ins(vd, vd_index, rn);
+}
+
+
+void Assembler::umov(const Register& rd,
+ const VRegister& vn,
+ int vn_index) {
+ // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ int lane_size = vn.LaneSizeInBytes();
+ NEONFormatField format;
+ Instr q = 0;
+ switch (lane_size) {
+ case 1: format = NEON_16B; VIXL_ASSERT(rd.IsW()); break;
+ case 2: format = NEON_8H; VIXL_ASSERT(rd.IsW()); break;
+ case 4: format = NEON_4S; VIXL_ASSERT(rd.IsW()); break;
+ default:
+ VIXL_ASSERT(lane_size == 8);
+ VIXL_ASSERT(rd.IsX());
+ format = NEON_2D;
+ q = NEON_Q;
+ break;
+ }
+
+ VIXL_ASSERT((0 <= vn_index) &&
+ (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ Emit(q | NEON_UMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
+}
+
+
+void Assembler::mov(const Register& rd,
+ const VRegister& vn,
+ int vn_index) {
+ VIXL_ASSERT(vn.SizeInBytes() >= 4);
+ umov(rd, vn, vn_index);
+}
+
+
+void Assembler::smov(const Register& rd,
+ const VRegister& vn,
+ int vn_index) {
+ // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
+ // number of lanes, and T is b, h, s.
+ int lane_size = vn.LaneSizeInBytes();
+ NEONFormatField format;
+ Instr q = 0;
+ VIXL_ASSERT(lane_size != 8);
+ switch (lane_size) {
+ case 1: format = NEON_16B; break;
+ case 2: format = NEON_8H; break;
+ default:
+ VIXL_ASSERT(lane_size == 4);
+ VIXL_ASSERT(rd.IsX());
+ format = NEON_4S;
+ break;
+ }
+ q = rd.IsW() ? 0 : NEON_Q;
+ VIXL_ASSERT((0 <= vn_index) &&
+ (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ Emit(q | NEON_SMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
+}
+
+
+void Assembler::cls(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
+ Emit(VFormat(vn) | NEON_CLS | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::clz(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
+ Emit(VFormat(vn) | NEON_CLZ | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::cnt(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B());
+ Emit(VFormat(vn) | NEON_CNT | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::rev16(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B());
+ Emit(VFormat(vn) | NEON_REV16 | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::rev32(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H());
+ Emit(VFormat(vn) | NEON_REV32 | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::rev64(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
+ Emit(VFormat(vn) | NEON_REV64 | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::ursqrte(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is2S() || vd.Is4S());
+ Emit(VFormat(vn) | NEON_URSQRTE | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::urecpe(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is2S() || vd.Is4S());
+ Emit(VFormat(vn) | NEON_URECPE | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONAddlp(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp op) {
+ VIXL_ASSERT((op == NEON_SADDLP) ||
+ (op == NEON_UADDLP) ||
+ (op == NEON_SADALP) ||
+ (op == NEON_UADALP));
+
+ VIXL_ASSERT((vn.Is8B() && vd.Is4H()) ||
+ (vn.Is4H() && vd.Is2S()) ||
+ (vn.Is2S() && vd.Is1D()) ||
+ (vn.Is16B() && vd.Is8H())||
+ (vn.Is8H() && vd.Is4S()) ||
+ (vn.Is4S() && vd.Is2D()));
+ Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::saddlp(const VRegister& vd,
+ const VRegister& vn) {
+ NEONAddlp(vd, vn, NEON_SADDLP);
+}
+
+
+void Assembler::uaddlp(const VRegister& vd,
+ const VRegister& vn) {
+ NEONAddlp(vd, vn, NEON_UADDLP);
+}
+
+
+void Assembler::sadalp(const VRegister& vd,
+ const VRegister& vn) {
+ NEONAddlp(vd, vn, NEON_SADALP);
+}
+
+
+void Assembler::uadalp(const VRegister& vd,
+ const VRegister& vn) {
+ NEONAddlp(vd, vn, NEON_UADALP);
+}
+
+
+void Assembler::NEONAcrossLanesL(const VRegister& vd,
+ const VRegister& vn,
+ NEONAcrossLanesOp op) {
+ VIXL_ASSERT((vn.Is8B() && vd.Is1H()) ||
+ (vn.Is16B() && vd.Is1H()) ||
+ (vn.Is4H() && vd.Is1S()) ||
+ (vn.Is8H() && vd.Is1S()) ||
+ (vn.Is4S() && vd.Is1D()));
+ Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::saddlv(const VRegister& vd,
+ const VRegister& vn) {
+ NEONAcrossLanesL(vd, vn, NEON_SADDLV);
+}
+
+
+void Assembler::uaddlv(const VRegister& vd,
+ const VRegister& vn) {
+ NEONAcrossLanesL(vd, vn, NEON_UADDLV);
+}
+
+
+void Assembler::NEONAcrossLanes(const VRegister& vd,
+ const VRegister& vn,
+ NEONAcrossLanesOp op) {
+ VIXL_ASSERT((vn.Is8B() && vd.Is1B()) ||
+ (vn.Is16B() && vd.Is1B()) ||
+ (vn.Is4H() && vd.Is1H()) ||
+ (vn.Is8H() && vd.Is1H()) ||
+ (vn.Is4S() && vd.Is1S()));
+ if ((op & NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
+ Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
+ } else {
+ Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
+ }
+}
+
+
+#define NEON_ACROSSLANES_LIST(V) \
+ V(fmaxv, NEON_FMAXV, vd.Is1S()) \
+ V(fminv, NEON_FMINV, vd.Is1S()) \
+ V(fmaxnmv, NEON_FMAXNMV, vd.Is1S()) \
+ V(fminnmv, NEON_FMINNMV, vd.Is1S()) \
+ V(addv, NEON_ADDV, true) \
+ V(smaxv, NEON_SMAXV, true) \
+ V(sminv, NEON_SMINV, true) \
+ V(umaxv, NEON_UMAXV, true) \
+ V(uminv, NEON_UMINV, true)
+
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn) { \
+ VIXL_ASSERT(AS); \
+ NEONAcrossLanes(vd, vn, OP); \
+}
+NEON_ACROSSLANES_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+void Assembler::NEONPerm(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEONPermOp op) {
+ VIXL_ASSERT(AreSameFormat(vd, vn, vm));
+ VIXL_ASSERT(!vd.Is1D());
+ Emit(VFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::trn1(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_TRN1);
+}
+
+
+void Assembler::trn2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_TRN2);
+}
+
+
+void Assembler::uzp1(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_UZP1);
+}
+
+
+void Assembler::uzp2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_UZP2);
+}
+
+
+void Assembler::zip1(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_ZIP1);
+}
+
+
+void Assembler::zip2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_ZIP2);
+}
+
+
+void Assembler::NEONShiftImmediate(const VRegister& vd,
+ const VRegister& vn,
+ NEONShiftImmediateOp op,
+ int immh_immb) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ Instr q, scalar;
+ if (vn.IsScalar()) {
+ q = NEON_Q;
+ scalar = NEONScalar;
+ } else {
+ q = vd.IsD() ? 0 : NEON_Q;
+ scalar = 0;
+ }
+ Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONShiftLeftImmediate(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op) {
+ int laneSizeInBits = vn.LaneSizeInBits();
+ VIXL_ASSERT((shift >= 0) && (shift < laneSizeInBits));
+ NEONShiftImmediate(vd, vn, op, (laneSizeInBits + shift) << 16);
+}
+
+
+void Assembler::NEONShiftRightImmediate(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op) {
+ int laneSizeInBits = vn.LaneSizeInBits();
+ VIXL_ASSERT((shift >= 1) && (shift <= laneSizeInBits));
+ NEONShiftImmediate(vd, vn, op, ((2 * laneSizeInBits) - shift) << 16);
+}
+
+
+void Assembler::NEONShiftImmediateL(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op) {
+ int laneSizeInBits = vn.LaneSizeInBits();
+ VIXL_ASSERT((shift >= 0) && (shift < laneSizeInBits));
+ int immh_immb = (laneSizeInBits + shift) << 16;
+
+ VIXL_ASSERT((vn.Is8B() && vd.Is8H()) ||
+ (vn.Is4H() && vd.Is4S()) ||
+ (vn.Is2S() && vd.Is2D()) ||
+ (vn.Is16B() && vd.Is8H())||
+ (vn.Is8H() && vd.Is4S()) ||
+ (vn.Is4S() && vd.Is2D()));
+ Instr q;
+ q = vn.IsD() ? 0 : NEON_Q;
+ Emit(q | op | immh_immb | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONShiftImmediateN(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op) {
+ Instr q, scalar;
+ int laneSizeInBits = vd.LaneSizeInBits();
+ VIXL_ASSERT((shift >= 1) && (shift <= laneSizeInBits));
+ int immh_immb = (2 * laneSizeInBits - shift) << 16;
+
+ if (vn.IsScalar()) {
+ VIXL_ASSERT((vd.Is1B() && vn.Is1H()) ||
+ (vd.Is1H() && vn.Is1S()) ||
+ (vd.Is1S() && vn.Is1D()));
+ q = NEON_Q;
+ scalar = NEONScalar;
+ } else {
+ VIXL_ASSERT((vd.Is8B() && vn.Is8H()) ||
+ (vd.Is4H() && vn.Is4S()) ||
+ (vd.Is2S() && vn.Is2D()) ||
+ (vd.Is16B() && vn.Is8H())||
+ (vd.Is8H() && vn.Is4S()) ||
+ (vd.Is4S() && vn.Is2D()));
+ scalar = 0;
+ q = vd.IsD() ? 0 : NEON_Q;
+ }
+ Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::shl(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_SHL);
+}
+
+
+void Assembler::sli(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_SLI);
+}
+
+
+void Assembler::sqshl(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHL_imm);
+}
+
+
+void Assembler::sqshlu(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHLU);
+}
+
+
+void Assembler::uqshl(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_UQSHL_imm);
+}
+
+
+void Assembler::sshll(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsD());
+ NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
+}
+
+
+void Assembler::sshll2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsQ());
+ NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
+}
+
+
+void Assembler::sxtl(const VRegister& vd,
+ const VRegister& vn) {
+ sshll(vd, vn, 0);
+}
+
+
+void Assembler::sxtl2(const VRegister& vd,
+ const VRegister& vn) {
+ sshll2(vd, vn, 0);
+}
+
+
+void Assembler::ushll(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsD());
+ NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
+}
+
+
+void Assembler::ushll2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsQ());
+ NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
+}
+
+
+void Assembler::uxtl(const VRegister& vd,
+ const VRegister& vn) {
+ ushll(vd, vn, 0);
+}
+
+
+void Assembler::uxtl2(const VRegister& vd,
+ const VRegister& vn) {
+ ushll2(vd, vn, 0);
+}
+
+
+void Assembler::sri(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SRI);
+}
+
+
+void Assembler::sshr(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SSHR);
+}
+
+
+void Assembler::ushr(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_USHR);
+}
+
+
+void Assembler::srshr(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SRSHR);
+}
+
+
+void Assembler::urshr(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_URSHR);
+}
+
+
+void Assembler::ssra(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SSRA);
+}
+
+
+void Assembler::usra(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_USRA);
+}
+
+
+void Assembler::srsra(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SRSRA);
+}
+
+
+void Assembler::ursra(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_URSRA);
+}
+
+
+void Assembler::shrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsD());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
+}
+
+
+void Assembler::shrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
+}
+
+
+void Assembler::rshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsD());
+ NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
+}
+
+
+void Assembler::rshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
+}
+
+
+void Assembler::sqshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
+}
+
+
+void Assembler::sqshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
+}
+
+
+void Assembler::sqrshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
+}
+
+
+void Assembler::sqrshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
+}
+
+
+void Assembler::sqshrun(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
+}
+
+
+void Assembler::sqshrun2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
+}
+
+
+void Assembler::sqrshrun(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
+}
+
+
+void Assembler::sqrshrun2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
+}
+
+
+void Assembler::uqshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
+}
+
+
+void Assembler::uqshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
+}
+
+
+void Assembler::uqrshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
+}
+
+
+void Assembler::uqrshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
+}
+
+
+// Note:
+// Below, a difference in case for the same letter indicates a
+// negated bit.
+// If b is 1, then B is 0.
+uint32_t Assembler::FP32ToImm8(float imm) {
+ VIXL_ASSERT(IsImmFP32(imm));
+ // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = float_to_rawbits(imm);
+ // bit7: a000.0000
+ uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
+
+ return bit7 | bit6 | bit5_to_0;
+}
+
+
+Instr Assembler::ImmFP32(float imm) {
+ return FP32ToImm8(imm) << ImmFP_offset;
+}
+
+
+uint32_t Assembler::FP64ToImm8(double imm) {
+ VIXL_ASSERT(IsImmFP64(imm));
+ // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = double_to_rawbits(imm);
+ // bit7: a000.0000
+ uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
+
+ return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
+}
+
+
+Instr Assembler::ImmFP64(double imm) {
+ return FP64ToImm8(imm) << ImmFP_offset;
+}
+
+
+// Code generation helpers.
+void Assembler::MoveWide(const Register& rd,
+ uint64_t imm,
+ int shift,
+ MoveWideImmediateOp mov_op) {
+ // Ignore the top 32 bits of an immediate if we're moving to a W register.
+ if (rd.Is32Bits()) {
+ // Check that the top 32 bits are zero (a positive 32-bit number) or top
+ // 33 bits are one (a negative 32-bit number, sign extended to 64 bits).
+ VIXL_ASSERT(((imm >> kWRegSize) == 0) ||
+ ((imm >> (kWRegSize - 1)) == 0x1ffffffff));
+ imm &= kWRegMask;
+ }
+
+ if (shift >= 0) {
+ // Explicit shift specified.
+ VIXL_ASSERT((shift == 0) || (shift == 16) ||
+ (shift == 32) || (shift == 48));
+ VIXL_ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16));
+ shift /= 16;
+ } else {
+ // Calculate a new immediate and shift combination to encode the immediate
+ // argument.
+ shift = 0;
+ if ((imm & 0xffffffffffff0000) == 0) {
+ // Nothing to do.
+ } else if ((imm & 0xffffffff0000ffff) == 0) {
+ imm >>= 16;
+ shift = 1;
+ } else if ((imm & 0xffff0000ffffffff) == 0) {
+ VIXL_ASSERT(rd.Is64Bits());
+ imm >>= 32;
+ shift = 2;
+ } else if ((imm & 0x0000ffffffffffff) == 0) {
+ VIXL_ASSERT(rd.Is64Bits());
+ imm >>= 48;
+ shift = 3;
+ }
+ }
+
+ VIXL_ASSERT(is_uint16(imm));
+
+ Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
+ Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
+}
+
+
+void Assembler::AddSub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ VIXL_ASSERT(IsImmAddSub(immediate));
+ Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
+ ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn));
+ } else if (operand.IsShiftedRegister()) {
+ VIXL_ASSERT(operand.reg().size() == rd.size());
+ VIXL_ASSERT(operand.shift() != ROR);
+
+ // For instructions of the form:
+ // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ]
+ // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ]
+ // add/sub wsp, wsp, <Wm> [, LSL #0-3 ]
+ // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
+ // or their 64-bit register equivalents, convert the operand from shifted to
+ // extended register mode, and emit an add/sub extended instruction.
+ if (rn.IsSP() || rd.IsSP()) {
+ VIXL_ASSERT(!(rd.IsSP() && (S == SetFlags)));
+ DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
+ AddSubExtendedFixed | op);
+ } else {
+ DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
+ }
+ } else {
+ VIXL_ASSERT(operand.IsExtendedRegister());
+ DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
+ }
+}
+
+
+void Assembler::AddSubWithCarry(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == operand.reg().size());
+ VIXL_ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+ Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::hlt(int code) {
+ VIXL_ASSERT(is_uint16(code));
+ Emit(HLT | ImmException(code));
+}
+
+
+void Assembler::brk(int code) {
+ VIXL_ASSERT(is_uint16(code));
+ Emit(BRK | ImmException(code));
+}
+
+
+void Assembler::svc(int code) {
+ Emit(SVC | ImmException(code));
+}
+
+
+void Assembler::ConditionalCompare(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op) {
+ Instr ccmpop;
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ VIXL_ASSERT(IsImmConditionalCompare(immediate));
+ ccmpop = ConditionalCompareImmediateFixed | op |
+ ImmCondCmp(static_cast<unsigned>(immediate));
+ } else {
+ VIXL_ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+ ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
+ }
+ Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
+}
+
+
+void Assembler::DataProcessing1Source(const Register& rd,
+ const Register& rn,
+ DataProcessing1SourceOp op) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ Emit(SF(rn) | op | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::FPDataProcessing1Source(const VRegister& vd,
+ const VRegister& vn,
+ FPDataProcessing1SourceOp op) {
+ VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D());
+ Emit(FPType(vn) | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::FPDataProcessing3Source(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va,
+ FPDataProcessing3SourceOp op) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm, va));
+ Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd) | Ra(va));
+}
+
+
+void Assembler::NEONModifiedImmShiftLsl(const VRegister& vd,
+ const int imm8,
+ const int left_shift,
+ NEONModifiedImmediateOp op) {
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H() ||
+ vd.Is2S() || vd.Is4S());
+ VIXL_ASSERT((left_shift == 0) || (left_shift == 8) ||
+ (left_shift == 16) || (left_shift == 24));
+ VIXL_ASSERT(is_uint8(imm8));
+
+ int cmode_1, cmode_2, cmode_3;
+ if (vd.Is8B() || vd.Is16B()) {
+ VIXL_ASSERT(op == NEONModifiedImmediate_MOVI);
+ cmode_1 = 1;
+ cmode_2 = 1;
+ cmode_3 = 1;
+ } else {
+ cmode_1 = (left_shift >> 3) & 1;
+ cmode_2 = left_shift >> 4;
+ cmode_3 = 0;
+ if (vd.Is4H() || vd.Is8H()) {
+ VIXL_ASSERT((left_shift == 0) || (left_shift == 8));
+ cmode_3 = 1;
+ }
+ }
+ int cmode = (cmode_3 << 3) | (cmode_2 << 2) | (cmode_1 << 1);
+
+ int q = vd.IsQ() ? NEON_Q : 0;
+
+ Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
+}
+
+
+void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd,
+ const int imm8,
+ const int shift_amount,
+ NEONModifiedImmediateOp op) {
+ VIXL_ASSERT(vd.Is2S() || vd.Is4S());
+ VIXL_ASSERT((shift_amount == 8) || (shift_amount == 16));
+ VIXL_ASSERT(is_uint8(imm8));
+
+ int cmode_0 = (shift_amount >> 4) & 1;
+ int cmode = 0xc | cmode_0;
+
+ int q = vd.IsQ() ? NEON_Q : 0;
+
+ Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
+}
+
+
+void Assembler::EmitShift(const Register& rd,
+ const Register& rn,
+ Shift shift,
+ unsigned shift_amount) {
+ switch (shift) {
+ case LSL:
+ lsl(rd, rn, shift_amount);
+ break;
+ case LSR:
+ lsr(rd, rn, shift_amount);
+ break;
+ case ASR:
+ asr(rd, rn, shift_amount);
+ break;
+ case ROR:
+ ror(rd, rn, shift_amount);
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+}
+
+
+void Assembler::EmitExtendShift(const Register& rd,
+ const Register& rn,
+ Extend extend,
+ unsigned left_shift) {
+ VIXL_ASSERT(rd.size() >= rn.size());
+ unsigned reg_size = rd.size();
+ // Use the correct size of register.
+ Register rn_ = Register(rn.code(), rd.size());
+ // Bits extracted are high_bit:0.
+ unsigned high_bit = (8 << (extend & 0x3)) - 1;
+ // Number of bits left in the result that are not introduced by the shift.
+ unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
+
+ if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
+ switch (extend) {
+ case UXTB:
+ case UXTH:
+ case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
+ case SXTB:
+ case SXTH:
+ case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
+ case UXTX:
+ case SXTX: {
+ VIXL_ASSERT(rn.size() == kXRegSize);
+ // Nothing to extend. Just shift.
+ lsl(rd, rn_, left_shift);
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ } else {
+ // No need to extend as the extended bits would be shifted away.
+ lsl(rd, rn_, left_shift);
+ }
+}
+
+
+void Assembler::DataProcExtendedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op) {
+ Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
+ ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
+ dest_reg | RnSP(rn));
+}
+
+
+Instr Assembler::LoadStoreMemOperand(const MemOperand& addr,
+ unsigned access_size,
+ LoadStoreScalingOption option) {
+ Instr base = RnSP(addr.base());
+ int64_t offset = addr.offset();
+
+ if (addr.IsImmediateOffset()) {
+ bool prefer_unscaled = (option == PreferUnscaledOffset) ||
+ (option == RequireUnscaledOffset);
+ if (prefer_unscaled && IsImmLSUnscaled(offset)) {
+ // Use the unscaled addressing mode.
+ return base | LoadStoreUnscaledOffsetFixed |
+ ImmLS(static_cast<int>(offset));
+ }
+
+ if ((option != RequireUnscaledOffset) &&
+ IsImmLSScaled(offset, access_size)) {
+ // Use the scaled addressing mode.
+ return base | LoadStoreUnsignedOffsetFixed |
+ ImmLSUnsigned(static_cast<int>(offset) >> access_size);
+ }
+
+ if ((option != RequireScaledOffset) && IsImmLSUnscaled(offset)) {
+ // Use the unscaled addressing mode.
+ return base | LoadStoreUnscaledOffsetFixed |
+ ImmLS(static_cast<int>(offset));
+ }
+ }
+
+ // All remaining addressing modes are register-offset, pre-indexed or
+ // post-indexed modes.
+ VIXL_ASSERT((option != RequireUnscaledOffset) &&
+ (option != RequireScaledOffset));
+
+ if (addr.IsRegisterOffset()) {
+ Extend ext = addr.extend();
+ Shift shift = addr.shift();
+ unsigned shift_amount = addr.shift_amount();
+
+ // LSL is encoded in the option field as UXTX.
+ if (shift == LSL) {
+ ext = UXTX;
+ }
+
+ // Shifts are encoded in one bit, indicating a left shift by the memory
+ // access size.
+ VIXL_ASSERT((shift_amount == 0) || (shift_amount == access_size));
+ return base | LoadStoreRegisterOffsetFixed | Rm(addr.regoffset()) |
+ ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0);
+ }
+
+ if (addr.IsPreIndex() && IsImmLSUnscaled(offset)) {
+ return base | LoadStorePreIndexFixed | ImmLS(static_cast<int>(offset));
+ }
+
+ if (addr.IsPostIndex() && IsImmLSUnscaled(offset)) {
+ return base | LoadStorePostIndexFixed | ImmLS(static_cast<int>(offset));
+ }
+
+ // If this point is reached, the MemOperand (addr) cannot be encoded.
+ VIXL_UNREACHABLE();
+ return 0;
+}
+
+
+void Assembler::LoadStore(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op,
+ LoadStoreScalingOption option) {
+ Emit(op | Rt(rt) | LoadStoreMemOperand(addr, CalcLSDataSize(op), option));
+}
+
+
+void Assembler::Prefetch(PrefetchOperation op,
+ const MemOperand& addr,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(addr.IsRegisterOffset() || addr.IsImmediateOffset());
+
+ Instr prfop = ImmPrefetchOperation(op);
+ Emit(PRFM | prfop | LoadStoreMemOperand(addr, kXRegSizeInBytesLog2, option));
+}
+
+
+bool Assembler::IsImmAddSub(int64_t immediate) {
+ return is_uint12(immediate) ||
+ (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
+}
+
+
+bool Assembler::IsImmConditionalCompare(int64_t immediate) {
+ return is_uint5(immediate);
+}
+
+
+bool Assembler::IsImmFP32(float imm) {
+ // Valid values will have the form:
+ // aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = float_to_rawbits(imm);
+ // bits[19..0] are cleared.
+ if ((bits & 0x7ffff) != 0) {
+ return false;
+ }
+
+ // bits[29..25] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 16) & 0x3e00;
+ if (b_pattern != 0 && b_pattern != 0x3e00) {
+ return false;
+ }
+
+ // bit[30] and bit[29] are opposite.
+ if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
+ return false;
+ }
+
+ return true;
+}
+
+
+bool Assembler::IsImmFP64(double imm) {
+ // Valid values will have the form:
+ // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = double_to_rawbits(imm);
+ // bits[47..0] are cleared.
+ if ((bits & 0x0000ffffffffffff) != 0) {
+ return false;
+ }
+
+ // bits[61..54] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 48) & 0x3fc0;
+ if ((b_pattern != 0) && (b_pattern != 0x3fc0)) {
+ return false;
+ }
+
+ // bit[62] and bit[61] are opposite.
+ if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) {
+ return false;
+ }
+
+ return true;
+}
+
+
+bool Assembler::IsImmLSPair(int64_t offset, unsigned access_size) {
+ VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2);
+ bool offset_is_size_multiple =
+ (((offset >> access_size) << access_size) == offset);
+ return offset_is_size_multiple && is_int7(offset >> access_size);
+}
+
+
+bool Assembler::IsImmLSScaled(int64_t offset, unsigned access_size) {
+ VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2);
+ bool offset_is_size_multiple =
+ (((offset >> access_size) << access_size) == offset);
+ return offset_is_size_multiple && is_uint12(offset >> access_size);
+}
+
+
+bool Assembler::IsImmLSUnscaled(int64_t offset) {
+ return is_int9(offset);
+}
+
+
+// The movn instruction can generate immediates containing an arbitrary 16-bit
+// value, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
+bool Assembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
+ return IsImmMovz(~imm, reg_size);
+}
+
+
+// The movz instruction can generate immediates containing an arbitrary 16-bit
+// value, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
+bool Assembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
+ VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
+ return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
+}
+
+
+// Test if a given value can be encoded in the immediate field of a logical
+// instruction.
+// If it can be encoded, the function returns true, and values pointed to by n,
+// imm_s and imm_r are updated with immediates encoded in the format required
+// by the corresponding fields in the logical instruction.
+// If it can not be encoded, the function returns false, and the values pointed
+// to by n, imm_s and imm_r are undefined.
+bool Assembler::IsImmLogical(uint64_t value,
+ unsigned width,
+ unsigned* n,
+ unsigned* imm_s,
+ unsigned* imm_r) {
+ VIXL_ASSERT((width == kWRegSize) || (width == kXRegSize));
+
+ bool negate = false;
+
+ // Logical immediates are encoded using parameters n, imm_s and imm_r using
+ // the following table:
+ //
+ // N imms immr size S R
+ // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
+ // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
+ // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
+ // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
+ // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
+ // 0 11110s xxxxxr 2 UInt(s) UInt(r)
+ // (s bits must not be all set)
+ //
+ // A pattern is constructed of size bits, where the least significant S+1 bits
+ // are set. The pattern is rotated right by R, and repeated across a 32 or
+ // 64-bit value, depending on destination register width.
+ //
+ // Put another way: the basic format of a logical immediate is a single
+ // contiguous stretch of 1 bits, repeated across the whole word at intervals
+ // given by a power of 2. To identify them quickly, we first locate the
+ // lowest stretch of 1 bits, then the next 1 bit above that; that combination
+ // is different for every logical immediate, so it gives us all the
+ // information we need to identify the only logical immediate that our input
+ // could be, and then we simply check if that's the value we actually have.
+ //
+ // (The rotation parameter does give the possibility of the stretch of 1 bits
+ // going 'round the end' of the word. To deal with that, we observe that in
+ // any situation where that happens the bitwise NOT of the value is also a
+ // valid logical immediate. So we simply invert the input whenever its low bit
+ // is set, and then we know that the rotated case can't arise.)
+
+ if (value & 1) {
+ // If the low bit is 1, negate the value, and set a flag to remember that we
+ // did (so that we can adjust the return values appropriately).
+ negate = true;
+ value = ~value;
+ }
+
+ if (width == kWRegSize) {
+ // To handle 32-bit logical immediates, the very easiest thing is to repeat
+ // the input value twice to make a 64-bit word. The correct encoding of that
+ // as a logical immediate will also be the correct encoding of the 32-bit
+ // value.
+
+ // Avoid making the assumption that the most-significant 32 bits are zero by
+ // shifting the value left and duplicating it.
+ value <<= kWRegSize;
+ value |= value >> kWRegSize;
+ }
+
+ // The basic analysis idea: imagine our input word looks like this.
+ //
+ // 0011111000111110001111100011111000111110001111100011111000111110
+ // c b a
+ // |<--d-->|
+ //
+ // We find the lowest set bit (as an actual power-of-2 value, not its index)
+ // and call it a. Then we add a to our original number, which wipes out the
+ // bottommost stretch of set bits and replaces it with a 1 carried into the
+ // next zero bit. Then we look for the new lowest set bit, which is in
+ // position b, and subtract it, so now our number is just like the original
+ // but with the lowest stretch of set bits completely gone. Now we find the
+ // lowest set bit again, which is position c in the diagram above. Then we'll
+ // measure the distance d between bit positions a and c (using CLZ), and that
+ // tells us that the only valid logical immediate that could possibly be equal
+ // to this number is the one in which a stretch of bits running from a to just
+ // below b is replicated every d bits.
+ uint64_t a = LowestSetBit(value);
+ uint64_t value_plus_a = value + a;
+ uint64_t b = LowestSetBit(value_plus_a);
+ uint64_t value_plus_a_minus_b = value_plus_a - b;
+ uint64_t c = LowestSetBit(value_plus_a_minus_b);
+
+ int d, clz_a, out_n;
+ uint64_t mask;
+
+ if (c != 0) {
+ // The general case, in which there is more than one stretch of set bits.
+ // Compute the repeat distance d, and set up a bitmask covering the basic
+ // unit of repetition (i.e. a word with the bottom d bits set). Also, in all
+ // of these cases the N bit of the output will be zero.
+ clz_a = CountLeadingZeros(a, kXRegSize);
+ int clz_c = CountLeadingZeros(c, kXRegSize);
+ d = clz_a - clz_c;
+ mask = ((UINT64_C(1) << d) - 1);
+ out_n = 0;
+ } else {
+ // Handle degenerate cases.
+ //
+ // If any of those 'find lowest set bit' operations didn't find a set bit at
+ // all, then the word will have been zero thereafter, so in particular the
+ // last lowest_set_bit operation will have returned zero. So we can test for
+ // all the special case conditions in one go by seeing if c is zero.
+ if (a == 0) {
+ // The input was zero (or all 1 bits, which will come to here too after we
+ // inverted it at the start of the function), for which we just return
+ // false.
+ return false;
+ } else {
+ // Otherwise, if c was zero but a was not, then there's just one stretch
+ // of set bits in our word, meaning that we have the trivial case of
+ // d == 64 and only one 'repetition'. Set up all the same variables as in
+ // the general case above, and set the N bit in the output.
+ clz_a = CountLeadingZeros(a, kXRegSize);
+ d = 64;
+ mask = ~UINT64_C(0);
+ out_n = 1;
+ }
+ }
+
+ // If the repeat period d is not a power of two, it can't be encoded.
+ if (!IsPowerOf2(d)) {
+ return false;
+ }
+
+ if (((b - a) & ~mask) != 0) {
+ // If the bit stretch (b - a) does not fit within the mask derived from the
+ // repeat period, then fail.
+ return false;
+ }
+
+ // The only possible option is b - a repeated every d bits. Now we're going to
+ // actually construct the valid logical immediate derived from that
+ // specification, and see if it equals our original input.
+ //
+ // To repeat a value every d bits, we multiply it by a number of the form
+ // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can
+ // be derived using a table lookup on CLZ(d).
+ static const uint64_t multipliers[] = {
+ 0x0000000000000001UL,
+ 0x0000000100000001UL,
+ 0x0001000100010001UL,
+ 0x0101010101010101UL,
+ 0x1111111111111111UL,
+ 0x5555555555555555UL,
+ };
+ uint64_t multiplier = multipliers[CountLeadingZeros(d, kXRegSize) - 57];
+ uint64_t candidate = (b - a) * multiplier;
+
+ if (value != candidate) {
+ // The candidate pattern doesn't match our input value, so fail.
+ return false;
+ }
+
+ // We have a match! This is a valid logical immediate, so now we have to
+ // construct the bits and pieces of the instruction encoding that generates
+ // it.
+
+ // Count the set bits in our basic stretch. The special case of clz(0) == -1
+ // makes the answer come out right for stretches that reach the very top of
+ // the word (e.g. numbers like 0xffffc00000000000).
+ int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSize);
+ int s = clz_a - clz_b;
+
+ // Decide how many bits to rotate right by, to put the low bit of that basic
+ // stretch in position a.
+ int r;
+ if (negate) {
+ // If we inverted the input right at the start of this function, here's
+ // where we compensate: the number of set bits becomes the number of clear
+ // bits, and the rotation count is based on position b rather than position
+ // a (since b is the location of the 'lowest' 1 bit after inversion).
+ s = d - s;
+ r = (clz_b + 1) & (d - 1);
+ } else {
+ r = (clz_a + 1) & (d - 1);
+ }
+
+ // Now we're done, except for having to encode the S output in such a way that
+ // it gives both the number of set bits and the length of the repeated
+ // segment. The s field is encoded like this:
+ //
+ // imms size S
+ // ssssss 64 UInt(ssssss)
+ // 0sssss 32 UInt(sssss)
+ // 10ssss 16 UInt(ssss)
+ // 110sss 8 UInt(sss)
+ // 1110ss 4 UInt(ss)
+ // 11110s 2 UInt(s)
+ //
+ // So we 'or' (-d << 1) with our computed s to form imms.
+ if ((n != NULL) || (imm_s != NULL) || (imm_r != NULL)) {
+ *n = out_n;
+ *imm_s = ((-d << 1) | (s - 1)) & 0x3f;
+ *imm_r = r;
+ }
+
+ return true;
+}
+
+
+LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
+ VIXL_ASSERT(rt.IsValid());
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDR_x : LDR_w;
+ } else {
+ VIXL_ASSERT(rt.IsVRegister());
+ switch (rt.SizeInBits()) {
+ case kBRegSize: return LDR_b;
+ case kHRegSize: return LDR_h;
+ case kSRegSize: return LDR_s;
+ case kDRegSize: return LDR_d;
+ default:
+ VIXL_ASSERT(rt.IsQ());
+ return LDR_q;
+ }
+ }
+}
+
+
+LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
+ VIXL_ASSERT(rt.IsValid());
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STR_x : STR_w;
+ } else {
+ VIXL_ASSERT(rt.IsVRegister());
+ switch (rt.SizeInBits()) {
+ case kBRegSize: return STR_b;
+ case kHRegSize: return STR_h;
+ case kSRegSize: return STR_s;
+ case kDRegSize: return STR_d;
+ default:
+ VIXL_ASSERT(rt.IsQ());
+ return STR_q;
+ }
+ }
+}
+
+
+LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
+ const CPURegister& rt2) {
+ VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STP_x : STP_w;
+ } else {
+ VIXL_ASSERT(rt.IsVRegister());
+ switch (rt.SizeInBytes()) {
+ case kSRegSizeInBytes: return STP_s;
+ case kDRegSizeInBytes: return STP_d;
+ default:
+ VIXL_ASSERT(rt.IsQ());
+ return STP_q;
+ }
+ }
+}
+
+
+LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
+ const CPURegister& rt2) {
+ VIXL_ASSERT((STP_w | LoadStorePairLBit) == LDP_w);
+ return static_cast<LoadStorePairOp>(StorePairOpFor(rt, rt2) |
+ LoadStorePairLBit);
+}
+
+
+LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2) {
+ VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STNP_x : STNP_w;
+ } else {
+ VIXL_ASSERT(rt.IsVRegister());
+ switch (rt.SizeInBytes()) {
+ case kSRegSizeInBytes: return STNP_s;
+ case kDRegSizeInBytes: return STNP_d;
+ default:
+ VIXL_ASSERT(rt.IsQ());
+ return STNP_q;
+ }
+ }
+}
+
+
+LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2) {
+ VIXL_ASSERT((STNP_w | LoadStorePairNonTemporalLBit) == LDNP_w);
+ return static_cast<LoadStorePairNonTemporalOp>(
+ StorePairNonTemporalOpFor(rt, rt2) | LoadStorePairNonTemporalLBit);
+}
+
+
+LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
+ if (rt.IsRegister()) {
+ return rt.IsX() ? LDR_x_lit : LDR_w_lit;
+ } else {
+ VIXL_ASSERT(rt.IsVRegister());
+ switch (rt.SizeInBytes()) {
+ case kSRegSizeInBytes: return LDR_s_lit;
+ case kDRegSizeInBytes: return LDR_d_lit;
+ default:
+ VIXL_ASSERT(rt.IsQ());
+ return LDR_q_lit;
+ }
+ }
+}
+
+
+bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3, const CPURegister& reg4,
+ const CPURegister& reg5, const CPURegister& reg6,
+ const CPURegister& reg7, const CPURegister& reg8) {
+ int number_of_valid_regs = 0;
+ int number_of_valid_fpregs = 0;
+
+ RegList unique_regs = 0;
+ RegList unique_fpregs = 0;
+
+ const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
+
+ for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) {
+ if (regs[i].IsRegister()) {
+ number_of_valid_regs++;
+ unique_regs |= regs[i].Bit();
+ } else if (regs[i].IsVRegister()) {
+ number_of_valid_fpregs++;
+ unique_fpregs |= regs[i].Bit();
+ } else {
+ VIXL_ASSERT(!regs[i].IsValid());
+ }
+ }
+
+ int number_of_unique_regs = CountSetBits(unique_regs);
+ int number_of_unique_fpregs = CountSetBits(unique_fpregs);
+
+ VIXL_ASSERT(number_of_valid_regs >= number_of_unique_regs);
+ VIXL_ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs);
+
+ return (number_of_valid_regs != number_of_unique_regs) ||
+ (number_of_valid_fpregs != number_of_unique_fpregs);
+}
+
+
+bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3, const CPURegister& reg4,
+ const CPURegister& reg5, const CPURegister& reg6,
+ const CPURegister& reg7, const CPURegister& reg8) {
+ VIXL_ASSERT(reg1.IsValid());
+ bool match = true;
+ match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
+ match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
+ match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
+ match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
+ match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
+ match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
+ match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
+ return match;
+}
+
+
+bool AreSameFormat(const VRegister& reg1, const VRegister& reg2,
+ const VRegister& reg3, const VRegister& reg4) {
+ VIXL_ASSERT(reg1.IsValid());
+ bool match = true;
+ match &= !reg2.IsValid() || reg2.IsSameFormat(reg1);
+ match &= !reg3.IsValid() || reg3.IsSameFormat(reg1);
+ match &= !reg4.IsValid() || reg4.IsSameFormat(reg1);
+ return match;
+}
+
+
+bool AreConsecutive(const VRegister& reg1, const VRegister& reg2,
+ const VRegister& reg3, const VRegister& reg4) {
+ VIXL_ASSERT(reg1.IsValid());
+ bool match = true;
+ match &= !reg2.IsValid() ||
+ (reg2.code() == ((reg1.code() + 1) % kNumberOfVRegisters));
+ match &= !reg3.IsValid() ||
+ (reg3.code() == ((reg1.code() + 2) % kNumberOfVRegisters));
+ match &= !reg4.IsValid() ||
+ (reg4.code() == ((reg1.code() + 3) % kNumberOfVRegisters));
+ return match;
+}
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Assembler-vixl.h b/js/src/jit/arm64/vixl/Assembler-vixl.h
new file mode 100644
index 000000000..d209f8b57
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Assembler-vixl.h
@@ -0,0 +1,4257 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_ASSEMBLER_A64_H_
+#define VIXL_A64_ASSEMBLER_A64_H_
+
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Instructions-vixl.h"
+#include "jit/arm64/vixl/MozBaseAssembler-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+#include "jit/JitSpewer.h"
+
+#include "jit/shared/Assembler-shared.h"
+#include "jit/shared/IonAssemblerBufferWithConstantPools.h"
+
+namespace vixl {
+
+using js::jit::BufferOffset;
+using js::jit::Label;
+using js::jit::Address;
+using js::jit::BaseIndex;
+
+typedef uint64_t RegList;
+static const int kRegListSizeInBits = sizeof(RegList) * 8;
+
+
+// Registers.
+
+// Some CPURegister methods can return Register or VRegister types, so we need
+// to declare them in advance.
+class Register;
+class VRegister;
+
+class CPURegister {
+ public:
+ enum RegisterType {
+ // The kInvalid value is used to detect uninitialized static instances,
+ // which are always zero-initialized before any constructors are called.
+ kInvalid = 0,
+ kRegister,
+ kVRegister,
+ kFPRegister = kVRegister,
+ kNoRegister
+ };
+
+ constexpr CPURegister() : code_(0), size_(0), type_(kNoRegister) {
+ }
+
+ constexpr CPURegister(unsigned code, unsigned size, RegisterType type)
+ : code_(code), size_(size), type_(type) {
+ }
+
+ unsigned code() const {
+ VIXL_ASSERT(IsValid());
+ return code_;
+ }
+
+ RegisterType type() const {
+ VIXL_ASSERT(IsValidOrNone());
+ return type_;
+ }
+
+ RegList Bit() const {
+ VIXL_ASSERT(code_ < (sizeof(RegList) * 8));
+ return IsValid() ? (static_cast<RegList>(1) << code_) : 0;
+ }
+
+ unsigned size() const {
+ VIXL_ASSERT(IsValid());
+ return size_;
+ }
+
+ int SizeInBytes() const {
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(size() % 8 == 0);
+ return size_ / 8;
+ }
+
+ int SizeInBits() const {
+ VIXL_ASSERT(IsValid());
+ return size_;
+ }
+
+ bool Is8Bits() const {
+ VIXL_ASSERT(IsValid());
+ return size_ == 8;
+ }
+
+ bool Is16Bits() const {
+ VIXL_ASSERT(IsValid());
+ return size_ == 16;
+ }
+
+ bool Is32Bits() const {
+ VIXL_ASSERT(IsValid());
+ return size_ == 32;
+ }
+
+ bool Is64Bits() const {
+ VIXL_ASSERT(IsValid());
+ return size_ == 64;
+ }
+
+ bool Is128Bits() const {
+ VIXL_ASSERT(IsValid());
+ return size_ == 128;
+ }
+
+ bool IsValid() const {
+ if (IsValidRegister() || IsValidVRegister()) {
+ VIXL_ASSERT(!IsNone());
+ return true;
+ } else {
+ // This assert is hit when the register has not been properly initialized.
+ // One cause for this can be an initialisation order fiasco. See
+ // https://isocpp.org/wiki/faq/ctors#static-init-order for some details.
+ VIXL_ASSERT(IsNone());
+ return false;
+ }
+ }
+
+ bool IsValidRegister() const {
+ return IsRegister() &&
+ ((size_ == kWRegSize) || (size_ == kXRegSize)) &&
+ ((code_ < kNumberOfRegisters) || (code_ == kSPRegInternalCode));
+ }
+
+ bool IsValidVRegister() const {
+ return IsVRegister() &&
+ ((size_ == kBRegSize) || (size_ == kHRegSize) ||
+ (size_ == kSRegSize) || (size_ == kDRegSize) ||
+ (size_ == kQRegSize)) &&
+ (code_ < kNumberOfVRegisters);
+ }
+
+ bool IsValidFPRegister() const {
+ return IsFPRegister() && (code_ < kNumberOfVRegisters);
+ }
+
+ bool IsNone() const {
+ // kNoRegister types should always have size 0 and code 0.
+ VIXL_ASSERT((type_ != kNoRegister) || (code_ == 0));
+ VIXL_ASSERT((type_ != kNoRegister) || (size_ == 0));
+
+ return type_ == kNoRegister;
+ }
+
+ bool Aliases(const CPURegister& other) const {
+ VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
+ return (code_ == other.code_) && (type_ == other.type_);
+ }
+
+ bool Is(const CPURegister& other) const {
+ VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
+ return Aliases(other) && (size_ == other.size_);
+ }
+
+ bool IsZero() const {
+ VIXL_ASSERT(IsValid());
+ return IsRegister() && (code_ == kZeroRegCode);
+ }
+
+ bool IsSP() const {
+ VIXL_ASSERT(IsValid());
+ return IsRegister() && (code_ == kSPRegInternalCode);
+ }
+
+ bool IsRegister() const {
+ return type_ == kRegister;
+ }
+
+ bool IsVRegister() const {
+ return type_ == kVRegister;
+ }
+
+ bool IsFPRegister() const {
+ return IsS() || IsD();
+ }
+
+ bool IsW() const { return IsValidRegister() && Is32Bits(); }
+ bool IsX() const { return IsValidRegister() && Is64Bits(); }
+
+ // These assertions ensure that the size and type of the register are as
+ // described. They do not consider the number of lanes that make up a vector.
+ // So, for example, Is8B() implies IsD(), and Is1D() implies IsD, but IsD()
+ // does not imply Is1D() or Is8B().
+ // Check the number of lanes, ie. the format of the vector, using methods such
+ // as Is8B(), Is1D(), etc. in the VRegister class.
+ bool IsV() const { return IsVRegister(); }
+ bool IsB() const { return IsV() && Is8Bits(); }
+ bool IsH() const { return IsV() && Is16Bits(); }
+ bool IsS() const { return IsV() && Is32Bits(); }
+ bool IsD() const { return IsV() && Is64Bits(); }
+ bool IsQ() const { return IsV() && Is128Bits(); }
+
+ const Register& W() const;
+ const Register& X() const;
+ const VRegister& V() const;
+ const VRegister& B() const;
+ const VRegister& H() const;
+ const VRegister& S() const;
+ const VRegister& D() const;
+ const VRegister& Q() const;
+
+ bool IsSameSizeAndType(const CPURegister& other) const {
+ return (size_ == other.size_) && (type_ == other.type_);
+ }
+
+ protected:
+ unsigned code_;
+ unsigned size_;
+ RegisterType type_;
+
+ private:
+ bool IsValidOrNone() const {
+ return IsValid() || IsNone();
+ }
+};
+
+
+class Register : public CPURegister {
+ public:
+ Register() : CPURegister() {}
+ explicit Register(const CPURegister& other)
+ : CPURegister(other.code(), other.size(), other.type()) {
+ VIXL_ASSERT(IsValidRegister());
+ }
+ constexpr Register(unsigned code, unsigned size)
+ : CPURegister(code, size, kRegister) {}
+
+ constexpr Register(js::jit::Register r, unsigned size)
+ : CPURegister(r.code(), size, kRegister) {}
+
+ bool IsValid() const {
+ VIXL_ASSERT(IsRegister() || IsNone());
+ return IsValidRegister();
+ }
+
+ js::jit::Register asUnsized() const {
+ if (code_ == kSPRegInternalCode)
+ return js::jit::Register::FromCode((js::jit::Register::Code)kZeroRegCode);
+ return js::jit::Register::FromCode((js::jit::Register::Code)code_);
+ }
+
+
+ static const Register& WRegFromCode(unsigned code);
+ static const Register& XRegFromCode(unsigned code);
+
+ private:
+ static const Register wregisters[];
+ static const Register xregisters[];
+};
+
+
+class VRegister : public CPURegister {
+ public:
+ VRegister() : CPURegister(), lanes_(1) {}
+ explicit VRegister(const CPURegister& other)
+ : CPURegister(other.code(), other.size(), other.type()), lanes_(1) {
+ VIXL_ASSERT(IsValidVRegister());
+ VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
+ }
+ constexpr VRegister(unsigned code, unsigned size, unsigned lanes = 1)
+ : CPURegister(code, size, kVRegister), lanes_(lanes) {
+ // VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
+ }
+ constexpr VRegister(js::jit::FloatRegister r)
+ : CPURegister(r.code_, r.size() * 8, kVRegister), lanes_(1) {
+ }
+ constexpr VRegister(js::jit::FloatRegister r, unsigned size)
+ : CPURegister(r.code_, size, kVRegister), lanes_(1) {
+ }
+ VRegister(unsigned code, VectorFormat format)
+ : CPURegister(code, RegisterSizeInBitsFromFormat(format), kVRegister),
+ lanes_(IsVectorFormat(format) ? LaneCountFromFormat(format) : 1) {
+ VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
+ }
+
+ bool IsValid() const {
+ VIXL_ASSERT(IsVRegister() || IsNone());
+ return IsValidVRegister();
+ }
+
+ static const VRegister& BRegFromCode(unsigned code);
+ static const VRegister& HRegFromCode(unsigned code);
+ static const VRegister& SRegFromCode(unsigned code);
+ static const VRegister& DRegFromCode(unsigned code);
+ static const VRegister& QRegFromCode(unsigned code);
+ static const VRegister& VRegFromCode(unsigned code);
+
+ VRegister V8B() const { return VRegister(code_, kDRegSize, 8); }
+ VRegister V16B() const { return VRegister(code_, kQRegSize, 16); }
+ VRegister V4H() const { return VRegister(code_, kDRegSize, 4); }
+ VRegister V8H() const { return VRegister(code_, kQRegSize, 8); }
+ VRegister V2S() const { return VRegister(code_, kDRegSize, 2); }
+ VRegister V4S() const { return VRegister(code_, kQRegSize, 4); }
+ VRegister V2D() const { return VRegister(code_, kQRegSize, 2); }
+ VRegister V1D() const { return VRegister(code_, kDRegSize, 1); }
+
+ bool Is8B() const { return (Is64Bits() && (lanes_ == 8)); }
+ bool Is16B() const { return (Is128Bits() && (lanes_ == 16)); }
+ bool Is4H() const { return (Is64Bits() && (lanes_ == 4)); }
+ bool Is8H() const { return (Is128Bits() && (lanes_ == 8)); }
+ bool Is2S() const { return (Is64Bits() && (lanes_ == 2)); }
+ bool Is4S() const { return (Is128Bits() && (lanes_ == 4)); }
+ bool Is1D() const { return (Is64Bits() && (lanes_ == 1)); }
+ bool Is2D() const { return (Is128Bits() && (lanes_ == 2)); }
+
+ // For consistency, we assert the number of lanes of these scalar registers,
+ // even though there are no vectors of equivalent total size with which they
+ // could alias.
+ bool Is1B() const {
+ VIXL_ASSERT(!(Is8Bits() && IsVector()));
+ return Is8Bits();
+ }
+ bool Is1H() const {
+ VIXL_ASSERT(!(Is16Bits() && IsVector()));
+ return Is16Bits();
+ }
+ bool Is1S() const {
+ VIXL_ASSERT(!(Is32Bits() && IsVector()));
+ return Is32Bits();
+ }
+
+ bool IsLaneSizeB() const { return LaneSizeInBits() == kBRegSize; }
+ bool IsLaneSizeH() const { return LaneSizeInBits() == kHRegSize; }
+ bool IsLaneSizeS() const { return LaneSizeInBits() == kSRegSize; }
+ bool IsLaneSizeD() const { return LaneSizeInBits() == kDRegSize; }
+
+ int lanes() const {
+ return lanes_;
+ }
+
+ bool IsScalar() const {
+ return lanes_ == 1;
+ }
+
+ bool IsVector() const {
+ return lanes_ > 1;
+ }
+
+ bool IsSameFormat(const VRegister& other) const {
+ return (size_ == other.size_) && (lanes_ == other.lanes_);
+ }
+
+ unsigned LaneSizeInBytes() const {
+ return SizeInBytes() / lanes_;
+ }
+
+ unsigned LaneSizeInBits() const {
+ return LaneSizeInBytes() * 8;
+ }
+
+ private:
+ static const VRegister bregisters[];
+ static const VRegister hregisters[];
+ static const VRegister sregisters[];
+ static const VRegister dregisters[];
+ static const VRegister qregisters[];
+ static const VRegister vregisters[];
+ int lanes_;
+};
+
+
+// Backward compatibility for FPRegisters.
+typedef VRegister FPRegister;
+
+// No*Reg is used to indicate an unused argument, or an error case. Note that
+// these all compare equal (using the Is() method). The Register and VRegister
+// variants are provided for convenience.
+const Register NoReg;
+const VRegister NoVReg;
+const FPRegister NoFPReg; // For backward compatibility.
+const CPURegister NoCPUReg;
+
+
+#define DEFINE_REGISTERS(N) \
+constexpr Register w##N(N, kWRegSize); \
+constexpr Register x##N(N, kXRegSize);
+REGISTER_CODE_LIST(DEFINE_REGISTERS)
+#undef DEFINE_REGISTERS
+constexpr Register wsp(kSPRegInternalCode, kWRegSize);
+constexpr Register sp(kSPRegInternalCode, kXRegSize);
+
+
+#define DEFINE_VREGISTERS(N) \
+constexpr VRegister b##N(N, kBRegSize); \
+constexpr VRegister h##N(N, kHRegSize); \
+constexpr VRegister s##N(N, kSRegSize); \
+constexpr VRegister d##N(N, kDRegSize); \
+constexpr VRegister q##N(N, kQRegSize); \
+constexpr VRegister v##N(N, kQRegSize);
+REGISTER_CODE_LIST(DEFINE_VREGISTERS)
+#undef DEFINE_VREGISTERS
+
+
+// Registers aliases.
+constexpr Register ip0 = x16;
+constexpr Register ip1 = x17;
+constexpr Register lr = x30;
+constexpr Register xzr = x31;
+constexpr Register wzr = w31;
+
+
+// AreAliased returns true if any of the named registers overlap. Arguments
+// set to NoReg are ignored. The system stack pointer may be specified.
+bool AreAliased(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoReg,
+ const CPURegister& reg4 = NoReg,
+ const CPURegister& reg5 = NoReg,
+ const CPURegister& reg6 = NoReg,
+ const CPURegister& reg7 = NoReg,
+ const CPURegister& reg8 = NoReg);
+
+
+// AreSameSizeAndType returns true if all of the specified registers have the
+// same size, and are of the same type. The system stack pointer may be
+// specified. Arguments set to NoReg are ignored, as are any subsequent
+// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
+bool AreSameSizeAndType(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoCPUReg,
+ const CPURegister& reg4 = NoCPUReg,
+ const CPURegister& reg5 = NoCPUReg,
+ const CPURegister& reg6 = NoCPUReg,
+ const CPURegister& reg7 = NoCPUReg,
+ const CPURegister& reg8 = NoCPUReg);
+
+
+// AreSameFormat returns true if all of the specified VRegisters have the same
+// vector format. Arguments set to NoReg are ignored, as are any subsequent
+// arguments. At least one argument (reg1) must be valid (not NoVReg).
+bool AreSameFormat(const VRegister& reg1,
+ const VRegister& reg2,
+ const VRegister& reg3 = NoVReg,
+ const VRegister& reg4 = NoVReg);
+
+
+// AreConsecutive returns true if all of the specified VRegisters are
+// consecutive in the register file. Arguments set to NoReg are ignored, as are
+// any subsequent arguments. At least one argument (reg1) must be valid
+// (not NoVReg).
+bool AreConsecutive(const VRegister& reg1,
+ const VRegister& reg2,
+ const VRegister& reg3 = NoVReg,
+ const VRegister& reg4 = NoVReg);
+
+
+// Lists of registers.
+class CPURegList {
+ public:
+ explicit CPURegList(CPURegister reg1,
+ CPURegister reg2 = NoCPUReg,
+ CPURegister reg3 = NoCPUReg,
+ CPURegister reg4 = NoCPUReg)
+ : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
+ size_(reg1.size()), type_(reg1.type()) {
+ VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
+ VIXL_ASSERT(IsValid());
+ }
+
+ CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
+ : list_(list), size_(size), type_(type) {
+ VIXL_ASSERT(IsValid());
+ }
+
+ CPURegList(CPURegister::RegisterType type, unsigned size,
+ unsigned first_reg, unsigned last_reg)
+ : size_(size), type_(type) {
+ VIXL_ASSERT(((type == CPURegister::kRegister) &&
+ (last_reg < kNumberOfRegisters)) ||
+ ((type == CPURegister::kVRegister) &&
+ (last_reg < kNumberOfVRegisters)));
+ VIXL_ASSERT(last_reg >= first_reg);
+ list_ = (UINT64_C(1) << (last_reg + 1)) - 1;
+ list_ &= ~((UINT64_C(1) << first_reg) - 1);
+ VIXL_ASSERT(IsValid());
+ }
+
+ CPURegister::RegisterType type() const {
+ VIXL_ASSERT(IsValid());
+ return type_;
+ }
+
+ // Combine another CPURegList into this one. Registers that already exist in
+ // this list are left unchanged. The type and size of the registers in the
+ // 'other' list must match those in this list.
+ void Combine(const CPURegList& other) {
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(other.type() == type_);
+ VIXL_ASSERT(other.RegisterSizeInBits() == size_);
+ list_ |= other.list();
+ }
+
+ // Remove every register in the other CPURegList from this one. Registers that
+ // do not exist in this list are ignored. The type and size of the registers
+ // in the 'other' list must match those in this list.
+ void Remove(const CPURegList& other) {
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(other.type() == type_);
+ VIXL_ASSERT(other.RegisterSizeInBits() == size_);
+ list_ &= ~other.list();
+ }
+
+ // Variants of Combine and Remove which take a single register.
+ void Combine(const CPURegister& other) {
+ VIXL_ASSERT(other.type() == type_);
+ VIXL_ASSERT(other.size() == size_);
+ Combine(other.code());
+ }
+
+ void Remove(const CPURegister& other) {
+ VIXL_ASSERT(other.type() == type_);
+ VIXL_ASSERT(other.size() == size_);
+ Remove(other.code());
+ }
+
+ // Variants of Combine and Remove which take a single register by its code;
+ // the type and size of the register is inferred from this list.
+ void Combine(int code) {
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
+ list_ |= (UINT64_C(1) << code);
+ }
+
+ void Remove(int code) {
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
+ list_ &= ~(UINT64_C(1) << code);
+ }
+
+ static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) {
+ VIXL_ASSERT(list_1.type_ == list_2.type_);
+ VIXL_ASSERT(list_1.size_ == list_2.size_);
+ return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_);
+ }
+ static CPURegList Union(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3);
+ static CPURegList Union(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3,
+ const CPURegList& list_4);
+
+ static CPURegList Intersection(const CPURegList& list_1,
+ const CPURegList& list_2) {
+ VIXL_ASSERT(list_1.type_ == list_2.type_);
+ VIXL_ASSERT(list_1.size_ == list_2.size_);
+ return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_);
+ }
+ static CPURegList Intersection(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3);
+ static CPURegList Intersection(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3,
+ const CPURegList& list_4);
+
+ bool Overlaps(const CPURegList& other) const {
+ return (type_ == other.type_) && ((list_ & other.list_) != 0);
+ }
+
+ RegList list() const {
+ VIXL_ASSERT(IsValid());
+ return list_;
+ }
+
+ void set_list(RegList new_list) {
+ VIXL_ASSERT(IsValid());
+ list_ = new_list;
+ }
+
+ // Remove all callee-saved registers from the list. This can be useful when
+ // preparing registers for an AAPCS64 function call, for example.
+ void RemoveCalleeSaved();
+
+ CPURegister PopLowestIndex();
+ CPURegister PopHighestIndex();
+
+ // AAPCS64 callee-saved registers.
+ static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
+ static CPURegList GetCalleeSavedV(unsigned size = kDRegSize);
+
+ // AAPCS64 caller-saved registers. Note that this includes lr.
+ // TODO(all): Determine how we handle d8-d15 being callee-saved, but the top
+ // 64-bits being caller-saved.
+ static CPURegList GetCallerSaved(unsigned size = kXRegSize);
+ static CPURegList GetCallerSavedV(unsigned size = kDRegSize);
+
+ bool IsEmpty() const {
+ VIXL_ASSERT(IsValid());
+ return list_ == 0;
+ }
+
+ bool IncludesAliasOf(const CPURegister& other) const {
+ VIXL_ASSERT(IsValid());
+ return (type_ == other.type()) && ((other.Bit() & list_) != 0);
+ }
+
+ bool IncludesAliasOf(int code) const {
+ VIXL_ASSERT(IsValid());
+ return ((code & list_) != 0);
+ }
+
+ int Count() const {
+ VIXL_ASSERT(IsValid());
+ return CountSetBits(list_);
+ }
+
+ unsigned RegisterSizeInBits() const {
+ VIXL_ASSERT(IsValid());
+ return size_;
+ }
+
+ unsigned RegisterSizeInBytes() const {
+ int size_in_bits = RegisterSizeInBits();
+ VIXL_ASSERT((size_in_bits % 8) == 0);
+ return size_in_bits / 8;
+ }
+
+ unsigned TotalSizeInBytes() const {
+ VIXL_ASSERT(IsValid());
+ return RegisterSizeInBytes() * Count();
+ }
+
+ private:
+ RegList list_;
+ unsigned size_;
+ CPURegister::RegisterType type_;
+
+ bool IsValid() const;
+};
+
+
+// AAPCS64 callee-saved registers.
+extern const CPURegList kCalleeSaved;
+extern const CPURegList kCalleeSavedV;
+
+
+// AAPCS64 caller-saved registers. Note that this includes lr.
+extern const CPURegList kCallerSaved;
+extern const CPURegList kCallerSavedV;
+
+
+// Operand.
+class Operand {
+ public:
+ // #<immediate>
+ // where <immediate> is int64_t.
+ // This is allowed to be an implicit constructor because Operand is
+ // a wrapper class that doesn't normally perform any type conversion.
+ Operand(int64_t immediate = 0); // NOLINT(runtime/explicit)
+
+ // rm, {<shift> #<shift_amount>}
+ // where <shift> is one of {LSL, LSR, ASR, ROR}.
+ // <shift_amount> is uint6_t.
+ // This is allowed to be an implicit constructor because Operand is
+ // a wrapper class that doesn't normally perform any type conversion.
+ Operand(Register reg,
+ Shift shift = LSL,
+ unsigned shift_amount = 0); // NOLINT(runtime/explicit)
+
+ // rm, {<extend> {#<shift_amount>}}
+ // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
+ // <shift_amount> is uint2_t.
+ explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0);
+
+ // FIXME: Temporary constructors for compilation.
+ // FIXME: These should be removed -- Operand should not leak into shared code.
+ // FIXME: Something like an LAllocationUnion for {gpreg, fpreg, Address} is wanted.
+ explicit Operand(js::jit::Register) {
+ MOZ_CRASH("Operand with Register");
+ }
+ explicit Operand(js::jit::FloatRegister) {
+ MOZ_CRASH("Operand with FloatRegister");
+ }
+ explicit Operand(js::jit::Register, int32_t) {
+ MOZ_CRASH("Operand with implicit Address");
+ }
+
+ bool IsImmediate() const;
+ bool IsShiftedRegister() const;
+ bool IsExtendedRegister() const;
+ bool IsZero() const;
+
+ // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
+ // which helps in the encoding of instructions that use the stack pointer.
+ Operand ToExtendedRegister() const;
+
+ int64_t immediate() const {
+ VIXL_ASSERT(IsImmediate());
+ return immediate_;
+ }
+
+ Register reg() const {
+ VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ return reg_;
+ }
+
+ CPURegister maybeReg() const {
+ if (IsShiftedRegister() || IsExtendedRegister())
+ return reg_;
+ return NoCPUReg;
+ }
+
+ Shift shift() const {
+ VIXL_ASSERT(IsShiftedRegister());
+ return shift_;
+ }
+
+ Extend extend() const {
+ VIXL_ASSERT(IsExtendedRegister());
+ return extend_;
+ }
+
+ unsigned shift_amount() const {
+ VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ return shift_amount_;
+ }
+
+ private:
+ int64_t immediate_;
+ Register reg_;
+ Shift shift_;
+ Extend extend_;
+ unsigned shift_amount_;
+};
+
+
+// MemOperand represents the addressing mode of a load or store instruction.
+class MemOperand {
+ public:
+ explicit MemOperand(Register base,
+ int64_t offset = 0,
+ AddrMode addrmode = Offset);
+ MemOperand(Register base,
+ Register regoffset,
+ Shift shift = LSL,
+ unsigned shift_amount = 0);
+ MemOperand(Register base,
+ Register regoffset,
+ Extend extend,
+ unsigned shift_amount = 0);
+ MemOperand(Register base,
+ const Operand& offset,
+ AddrMode addrmode = Offset);
+
+ // Adapter constructors using C++11 delegating.
+ // TODO: If sp == kSPRegInternalCode, the xzr check isn't necessary.
+ explicit MemOperand(js::jit::Address addr)
+ : MemOperand(addr.base.code() == 31 ? sp : Register(addr.base, 64),
+ (ptrdiff_t)addr.offset) {
+ }
+
+ const Register& base() const { return base_; }
+ const Register& regoffset() const { return regoffset_; }
+ int64_t offset() const { return offset_; }
+ AddrMode addrmode() const { return addrmode_; }
+ Shift shift() const { return shift_; }
+ Extend extend() const { return extend_; }
+ unsigned shift_amount() const { return shift_amount_; }
+ bool IsImmediateOffset() const;
+ bool IsRegisterOffset() const;
+ bool IsPreIndex() const;
+ bool IsPostIndex() const;
+
+ void AddOffset(int64_t offset);
+
+ private:
+ Register base_;
+ Register regoffset_;
+ int64_t offset_;
+ AddrMode addrmode_;
+ Shift shift_;
+ Extend extend_;
+ unsigned shift_amount_;
+};
+
+
+// Control whether or not position-independent code should be emitted.
+enum PositionIndependentCodeOption {
+ // All code generated will be position-independent; all branches and
+ // references to labels generated with the Label class will use PC-relative
+ // addressing.
+ PositionIndependentCode,
+
+ // Allow VIXL to generate code that refers to absolute addresses. With this
+ // option, it will not be possible to copy the code buffer and run it from a
+ // different address; code must be generated in its final location.
+ PositionDependentCode,
+
+ // Allow VIXL to assume that the bottom 12 bits of the address will be
+ // constant, but that the top 48 bits may change. This allows `adrp` to
+ // function in systems which copy code between pages, but otherwise maintain
+ // 4KB page alignment.
+ PageOffsetDependentCode
+};
+
+
+// Control how scaled- and unscaled-offset loads and stores are generated.
+enum LoadStoreScalingOption {
+ // Prefer scaled-immediate-offset instructions, but emit unscaled-offset,
+ // register-offset, pre-index or post-index instructions if necessary.
+ PreferScaledOffset,
+
+ // Prefer unscaled-immediate-offset instructions, but emit scaled-offset,
+ // register-offset, pre-index or post-index instructions if necessary.
+ PreferUnscaledOffset,
+
+ // Require scaled-immediate-offset instructions.
+ RequireScaledOffset,
+
+ // Require unscaled-immediate-offset instructions.
+ RequireUnscaledOffset
+};
+
+
+// Assembler.
+class Assembler : public MozBaseAssembler {
+ public:
+ Assembler(PositionIndependentCodeOption pic = PositionIndependentCode);
+
+ // System functions.
+
+ // Finalize a code buffer of generated instructions. This function must be
+ // called before executing or copying code from the buffer.
+ void FinalizeCode();
+
+#define COPYENUM(v) static const Condition v = vixl::v
+#define COPYENUM_(v) static const Condition v = vixl::v##_
+ COPYENUM(Equal);
+ COPYENUM(Zero);
+ COPYENUM(NotEqual);
+ COPYENUM(NonZero);
+ COPYENUM(AboveOrEqual);
+ COPYENUM(CarrySet);
+ COPYENUM(Below);
+ COPYENUM(CarryClear);
+ COPYENUM(Signed);
+ COPYENUM(NotSigned);
+ COPYENUM(Overflow);
+ COPYENUM(NoOverflow);
+ COPYENUM(Above);
+ COPYENUM(BelowOrEqual);
+ COPYENUM_(GreaterThanOrEqual);
+ COPYENUM_(LessThan);
+ COPYENUM_(GreaterThan);
+ COPYENUM_(LessThanOrEqual);
+ COPYENUM(Always);
+ COPYENUM(Never);
+#undef COPYENUM
+#undef COPYENUM_
+
+ // Bit set when a DoubleCondition does not map to a single ARM condition.
+ // The MacroAssembler must special-case these conditions, or else
+ // ConditionFromDoubleCondition will complain.
+ static const int DoubleConditionBitSpecial = 0x100;
+
+ enum DoubleCondition {
+ DoubleOrdered = Condition::vc,
+ DoubleEqual = Condition::eq,
+ DoubleNotEqual = Condition::ne | DoubleConditionBitSpecial,
+ DoubleGreaterThan = Condition::gt,
+ DoubleGreaterThanOrEqual = Condition::ge,
+ DoubleLessThan = Condition::lo, // Could also use Condition::mi.
+ DoubleLessThanOrEqual = Condition::ls,
+
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleUnordered = Condition::vs,
+ DoubleEqualOrUnordered = Condition::eq | DoubleConditionBitSpecial,
+ DoubleNotEqualOrUnordered = Condition::ne,
+ DoubleGreaterThanOrUnordered = Condition::hi,
+ DoubleGreaterThanOrEqualOrUnordered = Condition::hs,
+ DoubleLessThanOrUnordered = Condition::lt,
+ DoubleLessThanOrEqualOrUnordered = Condition::le
+ };
+
+ static inline Condition InvertCondition(Condition cond) {
+ // Conditions al and nv behave identically, as "always true". They can't be
+ // inverted, because there is no "always false" condition.
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ return static_cast<Condition>(cond ^ 1);
+ }
+
+ // This is chaging the condition codes for cmp a, b to the same codes for cmp b, a.
+ static inline Condition InvertCmpCondition(Condition cond) {
+ // Conditions al and nv behave identically, as "always true". They can't be
+ // inverted, because there is no "always false" condition.
+ switch (cond) {
+ case eq:
+ case ne:
+ return cond;
+ case gt:
+ return le;
+ case le:
+ return gt;
+ case ge:
+ return lt;
+ case lt:
+ return ge;
+ case hi:
+ return lo;
+ case lo:
+ return hi;
+ case hs:
+ return ls;
+ case ls:
+ return hs;
+ case mi:
+ return pl;
+ case pl:
+ return mi;
+ default:
+ MOZ_CRASH("TODO: figure this case out.");
+ }
+ return static_cast<Condition>(cond ^ 1);
+ }
+
+ static inline Condition ConditionFromDoubleCondition(DoubleCondition cond) {
+ VIXL_ASSERT(!(cond & DoubleConditionBitSpecial));
+ return static_cast<Condition>(cond);
+ }
+
+ // Instruction set functions.
+
+ // Branch / Jump instructions.
+ // Branch to register.
+ void br(const Register& xn);
+ static void br(Instruction* at, const Register& xn);
+
+ // Branch with link to register.
+ void blr(const Register& xn);
+ static void blr(Instruction* at, const Register& blr);
+
+ // Branch to register with return hint.
+ void ret(const Register& xn = lr);
+
+ // Unconditional branch to label.
+ BufferOffset b(Label* label);
+
+ // Conditional branch to label.
+ BufferOffset b(Label* label, Condition cond);
+
+ // Unconditional branch to PC offset.
+ BufferOffset b(int imm26);
+ static void b(Instruction* at, int imm26);
+
+ // Conditional branch to PC offset.
+ BufferOffset b(int imm19, Condition cond);
+ static void b(Instruction*at, int imm19, Condition cond);
+
+ // Branch with link to label.
+ void bl(Label* label);
+
+ // Branch with link to PC offset.
+ void bl(int imm26);
+ static void bl(Instruction* at, int imm26);
+
+ // Compare and branch to label if zero.
+ void cbz(const Register& rt, Label* label);
+
+ // Compare and branch to PC offset if zero.
+ void cbz(const Register& rt, int imm19);
+ static void cbz(Instruction* at, const Register& rt, int imm19);
+
+ // Compare and branch to label if not zero.
+ void cbnz(const Register& rt, Label* label);
+
+ // Compare and branch to PC offset if not zero.
+ void cbnz(const Register& rt, int imm19);
+ static void cbnz(Instruction* at, const Register& rt, int imm19);
+
+ // Table lookup from one register.
+ void tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Table lookup from two registers.
+ void tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vm);
+
+ // Table lookup from three registers.
+ void tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vm);
+
+ // Table lookup from four registers.
+ void tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vn4,
+ const VRegister& vm);
+
+ // Table lookup extension from one register.
+ void tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Table lookup extension from two registers.
+ void tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vm);
+
+ // Table lookup extension from three registers.
+ void tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vm);
+
+ // Table lookup extension from four registers.
+ void tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vn4,
+ const VRegister& vm);
+
+ // Test bit and branch to label if zero.
+ void tbz(const Register& rt, unsigned bit_pos, Label* label);
+
+ // Test bit and branch to PC offset if zero.
+ void tbz(const Register& rt, unsigned bit_pos, int imm14);
+ static void tbz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14);
+
+ // Test bit and branch to label if not zero.
+ void tbnz(const Register& rt, unsigned bit_pos, Label* label);
+
+ // Test bit and branch to PC offset if not zero.
+ void tbnz(const Register& rt, unsigned bit_pos, int imm14);
+ static void tbnz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14);
+
+ // Address calculation instructions.
+ // Calculate a PC-relative address. Unlike for branches the offset in adr is
+ // unscaled (i.e. the result can be unaligned).
+
+ // Calculate the address of a label.
+ void adr(const Register& rd, Label* label);
+
+ // Calculate the address of a PC offset.
+ void adr(const Register& rd, int imm21);
+ static void adr(Instruction* at, const Register& rd, int imm21);
+
+ // Calculate the page address of a label.
+ void adrp(const Register& rd, Label* label);
+
+ // Calculate the page address of a PC offset.
+ void adrp(const Register& rd, int imm21);
+ static void adrp(Instruction* at, const Register& rd, int imm21);
+
+ // Data Processing instructions.
+ // Add.
+ void add(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Add and update status flags.
+ void adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Compare negative.
+ void cmn(const Register& rn, const Operand& operand);
+
+ // Subtract.
+ void sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract and update status flags.
+ void subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Compare.
+ void cmp(const Register& rn, const Operand& operand);
+
+ // Negate.
+ void neg(const Register& rd,
+ const Operand& operand);
+
+ // Negate and update status flags.
+ void negs(const Register& rd,
+ const Operand& operand);
+
+ // Add with carry bit.
+ void adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Add with carry bit and update status flags.
+ void adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract with carry bit.
+ void sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract with carry bit and update status flags.
+ void sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Negate with carry bit.
+ void ngc(const Register& rd,
+ const Operand& operand);
+
+ // Negate with carry bit and update status flags.
+ void ngcs(const Register& rd,
+ const Operand& operand);
+
+ // Logical instructions.
+ // Bitwise and (A & B).
+ void and_(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bitwise and (A & B) and update status flags.
+ BufferOffset ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bit test and set flags.
+ BufferOffset tst(const Register& rn, const Operand& operand);
+
+ // Bit clear (A & ~B).
+ void bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bit clear (A & ~B) and update status flags.
+ void bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bitwise or (A | B).
+ void orr(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise nor (A | ~B).
+ void orn(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise eor/xor (A ^ B).
+ void eor(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise enor/xnor (A ^ ~B).
+ void eon(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Logical shift left by variable.
+ void lslv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Logical shift right by variable.
+ void lsrv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Arithmetic shift right by variable.
+ void asrv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Rotate right by variable.
+ void rorv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Bitfield instructions.
+ // Bitfield move.
+ void bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Signed bitfield move.
+ void sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Unsigned bitfield move.
+ void ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Bfm aliases.
+ // Bitfield insert.
+ void bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
+ bfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
+ }
+
+ // Bitfield extract and insert low.
+ void bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
+ bfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Sbfm aliases.
+ // Arithmetic shift right.
+ void asr(const Register& rd, const Register& rn, unsigned shift) {
+ VIXL_ASSERT(shift < rd.size());
+ sbfm(rd, rn, shift, rd.size() - 1);
+ }
+
+ // Signed bitfield insert with zero at right.
+ void sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
+ sbfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
+ }
+
+ // Signed bitfield extract.
+ void sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
+ sbfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Signed extend byte.
+ void sxtb(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 7);
+ }
+
+ // Signed extend halfword.
+ void sxth(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 15);
+ }
+
+ // Signed extend word.
+ void sxtw(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 31);
+ }
+
+ // Ubfm aliases.
+ // Logical shift left.
+ void lsl(const Register& rd, const Register& rn, unsigned shift) {
+ unsigned reg_size = rd.size();
+ VIXL_ASSERT(shift < reg_size);
+ ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
+ }
+
+ // Logical shift right.
+ void lsr(const Register& rd, const Register& rn, unsigned shift) {
+ VIXL_ASSERT(shift < rd.size());
+ ubfm(rd, rn, shift, rd.size() - 1);
+ }
+
+ // Unsigned bitfield insert with zero at right.
+ void ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
+ ubfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
+ }
+
+ // Unsigned bitfield extract.
+ void ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
+ ubfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Unsigned extend byte.
+ void uxtb(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 7);
+ }
+
+ // Unsigned extend halfword.
+ void uxth(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 15);
+ }
+
+ // Unsigned extend word.
+ void uxtw(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 31);
+ }
+
+ // Extract.
+ void extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb);
+
+ // Conditional select: rd = cond ? rn : rm.
+ void csel(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select increment: rd = cond ? rn : rm + 1.
+ void csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select inversion: rd = cond ? rn : ~rm.
+ void csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select negation: rd = cond ? rn : -rm.
+ void csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional set: rd = cond ? 1 : 0.
+ void cset(const Register& rd, Condition cond);
+
+ // Conditional set mask: rd = cond ? -1 : 0.
+ void csetm(const Register& rd, Condition cond);
+
+ // Conditional increment: rd = cond ? rn + 1 : rn.
+ void cinc(const Register& rd, const Register& rn, Condition cond);
+
+ // Conditional invert: rd = cond ? ~rn : rn.
+ void cinv(const Register& rd, const Register& rn, Condition cond);
+
+ // Conditional negate: rd = cond ? -rn : rn.
+ void cneg(const Register& rd, const Register& rn, Condition cond);
+
+ // Rotate right.
+ void ror(const Register& rd, const Register& rs, unsigned shift) {
+ extr(rd, rs, rs, shift);
+ }
+
+ // Conditional comparison.
+ // Conditional compare negative.
+ void ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // Conditional compare.
+ void ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // CRC-32 checksum from byte.
+ void crc32b(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32 checksum from half-word.
+ void crc32h(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32 checksum from word.
+ void crc32w(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32 checksum from double word.
+ void crc32x(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32 C checksum from byte.
+ void crc32cb(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32 C checksum from half-word.
+ void crc32ch(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32 C checksum from word.
+ void crc32cw(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32C checksum from double word.
+ void crc32cx(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // Multiply.
+ void mul(const Register& rd, const Register& rn, const Register& rm);
+
+ // Negated multiply.
+ void mneg(const Register& rd, const Register& rn, const Register& rm);
+
+ // Signed long multiply: 32 x 32 -> 64-bit.
+ void smull(const Register& rd, const Register& rn, const Register& rm);
+
+ // Signed multiply high: 64 x 64 -> 64-bit <127:64>.
+ void smulh(const Register& xd, const Register& xn, const Register& xm);
+
+ // Multiply and accumulate.
+ void madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Multiply and subtract.
+ void msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Signed long multiply and accumulate: 32 x 32 + 64 -> 64-bit.
+ void smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned long multiply and accumulate: 32 x 32 + 64 -> 64-bit.
+ void umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned long multiply: 32 x 32 -> 64-bit.
+ void umull(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ umaddl(rd, rn, rm, xzr);
+ }
+
+ // Unsigned multiply high: 64 x 64 -> 64-bit <127:64>.
+ void umulh(const Register& xd,
+ const Register& xn,
+ const Register& xm);
+
+ // Signed long multiply and subtract: 64 - (32 x 32) -> 64-bit.
+ void smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned long multiply and subtract: 64 - (32 x 32) -> 64-bit.
+ void umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Signed integer divide.
+ void sdiv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Unsigned integer divide.
+ void udiv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Bit reverse.
+ void rbit(const Register& rd, const Register& rn);
+
+ // Reverse bytes in 16-bit half words.
+ void rev16(const Register& rd, const Register& rn);
+
+ // Reverse bytes in 32-bit words.
+ void rev32(const Register& rd, const Register& rn);
+
+ // Reverse bytes.
+ void rev(const Register& rd, const Register& rn);
+
+ // Count leading zeroes.
+ void clz(const Register& rd, const Register& rn);
+
+ // Count leading sign bits.
+ void cls(const Register& rd, const Register& rn);
+
+ // Memory instructions.
+ // Load integer or FP register.
+ void ldr(const CPURegister& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Store integer or FP register.
+ void str(const CPURegister& rt, const MemOperand& dst,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Load word with sign extension.
+ void ldrsw(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Load byte.
+ void ldrb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Store byte.
+ void strb(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Load byte with sign extension.
+ void ldrsb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Load half-word.
+ void ldrh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Store half-word.
+ void strh(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Load half-word with sign extension.
+ void ldrsh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Load integer or FP register (with unscaled offset).
+ void ldur(const CPURegister& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Store integer or FP register (with unscaled offset).
+ void stur(const CPURegister& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Load word with sign extension.
+ void ldursw(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Load byte (with unscaled offset).
+ void ldurb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Store byte (with unscaled offset).
+ void sturb(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Load byte with sign extension (and unscaled offset).
+ void ldursb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Load half-word (with unscaled offset).
+ void ldurh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Store half-word (with unscaled offset).
+ void sturh(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Load half-word with sign extension (and unscaled offset).
+ void ldursh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Load integer or FP register pair.
+ void ldp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& src);
+
+ // Store integer or FP register pair.
+ void stp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& dst);
+
+ // Load word pair with sign extension.
+ void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
+
+ // Load integer or FP register pair, non-temporal.
+ void ldnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& src);
+
+ // Store integer or FP register pair, non-temporal.
+ void stnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& dst);
+
+ // Load integer or FP register from pc + imm19 << 2.
+ void ldr(const CPURegister& rt, int imm19);
+ static void ldr(Instruction* at, const CPURegister& rt, int imm19);
+
+ // Load word with sign extension from pc + imm19 << 2.
+ void ldrsw(const Register& rt, int imm19);
+
+ // Store exclusive byte.
+ void stxrb(const Register& rs, const Register& rt, const MemOperand& dst);
+
+ // Store exclusive half-word.
+ void stxrh(const Register& rs, const Register& rt, const MemOperand& dst);
+
+ // Store exclusive register.
+ void stxr(const Register& rs, const Register& rt, const MemOperand& dst);
+
+ // Load exclusive byte.
+ void ldxrb(const Register& rt, const MemOperand& src);
+
+ // Load exclusive half-word.
+ void ldxrh(const Register& rt, const MemOperand& src);
+
+ // Load exclusive register.
+ void ldxr(const Register& rt, const MemOperand& src);
+
+ // Store exclusive register pair.
+ void stxp(const Register& rs,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& dst);
+
+ // Load exclusive register pair.
+ void ldxp(const Register& rt, const Register& rt2, const MemOperand& src);
+
+ // Store-release exclusive byte.
+ void stlxrb(const Register& rs, const Register& rt, const MemOperand& dst);
+
+ // Store-release exclusive half-word.
+ void stlxrh(const Register& rs, const Register& rt, const MemOperand& dst);
+
+ // Store-release exclusive register.
+ void stlxr(const Register& rs, const Register& rt, const MemOperand& dst);
+
+ // Load-acquire exclusive byte.
+ void ldaxrb(const Register& rt, const MemOperand& src);
+
+ // Load-acquire exclusive half-word.
+ void ldaxrh(const Register& rt, const MemOperand& src);
+
+ // Load-acquire exclusive register.
+ void ldaxr(const Register& rt, const MemOperand& src);
+
+ // Store-release exclusive register pair.
+ void stlxp(const Register& rs,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& dst);
+
+ // Load-acquire exclusive register pair.
+ void ldaxp(const Register& rt, const Register& rt2, const MemOperand& src);
+
+ // Store-release byte.
+ void stlrb(const Register& rt, const MemOperand& dst);
+
+ // Store-release half-word.
+ void stlrh(const Register& rt, const MemOperand& dst);
+
+ // Store-release register.
+ void stlr(const Register& rt, const MemOperand& dst);
+
+ // Load-acquire byte.
+ void ldarb(const Register& rt, const MemOperand& src);
+
+ // Load-acquire half-word.
+ void ldarh(const Register& rt, const MemOperand& src);
+
+ // Load-acquire register.
+ void ldar(const Register& rt, const MemOperand& src);
+
+ // Prefetch memory.
+ void prfm(PrefetchOperation op, const MemOperand& addr,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Prefetch memory (with unscaled offset).
+ void prfum(PrefetchOperation op, const MemOperand& addr,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Prefetch from pc + imm19 << 2.
+ void prfm(PrefetchOperation op, int imm19);
+
+ // Move instructions. The default shift of -1 indicates that the move
+ // instruction will calculate an appropriate 16-bit immediate and left shift
+ // that is equal to the 64-bit immediate argument. If an explicit left shift
+ // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
+ //
+ // For movk, an explicit shift can be used to indicate which half word should
+ // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
+ // half word with zero, whereas movk(x0, 0, 48) will overwrite the
+ // most-significant.
+
+ // Move immediate and keep.
+ void movk(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVK);
+ }
+
+ // Move inverted immediate.
+ void movn(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVN);
+ }
+
+ // Move immediate.
+ void movz(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVZ);
+ }
+
+ // Misc instructions.
+ // Monitor debug-mode breakpoint.
+ void brk(int code);
+
+ // Halting debug-mode breakpoint.
+ void hlt(int code);
+
+ // Generate exception targeting EL1.
+ void svc(int code);
+ static void svc(Instruction* at, int code);
+
+ // Move register to register.
+ void mov(const Register& rd, const Register& rn);
+
+ // Move inverted operand to register.
+ void mvn(const Register& rd, const Operand& operand);
+
+ // System instructions.
+ // Move to register from system register.
+ void mrs(const Register& rt, SystemRegister sysreg);
+
+ // Move from register to system register.
+ void msr(SystemRegister sysreg, const Register& rt);
+
+ // System instruction.
+ void sys(int op1, int crn, int crm, int op2, const Register& rt = xzr);
+
+ // System instruction with pre-encoded op (op1:crn:crm:op2).
+ void sys(int op, const Register& rt = xzr);
+
+ // System data cache operation.
+ void dc(DataCacheOp op, const Register& rt);
+
+ // System instruction cache operation.
+ void ic(InstructionCacheOp op, const Register& rt);
+
+ // System hint.
+ BufferOffset hint(SystemHint code);
+ static void hint(Instruction* at, SystemHint code);
+
+ // Clear exclusive monitor.
+ void clrex(int imm4 = 0xf);
+
+ // Data memory barrier.
+ void dmb(BarrierDomain domain, BarrierType type);
+
+ // Data synchronization barrier.
+ void dsb(BarrierDomain domain, BarrierType type);
+
+ // Instruction synchronization barrier.
+ void isb();
+
+ // Alias for system instructions.
+ // No-op.
+ BufferOffset nop() {
+ return hint(NOP);
+ }
+ static void nop(Instruction* at);
+
+ // FP and NEON instructions.
+ // Move double precision immediate to FP register.
+ void fmov(const VRegister& vd, double imm);
+
+ // Move single precision immediate to FP register.
+ void fmov(const VRegister& vd, float imm);
+
+ // Move FP register to register.
+ void fmov(const Register& rd, const VRegister& fn);
+
+ // Move register to FP register.
+ void fmov(const VRegister& vd, const Register& rn);
+
+ // Move FP register to FP register.
+ void fmov(const VRegister& vd, const VRegister& fn);
+
+ // Move 64-bit register to top half of 128-bit FP register.
+ void fmov(const VRegister& vd, int index, const Register& rn);
+
+ // Move top half of 128-bit FP register to 64-bit register.
+ void fmov(const Register& rd, const VRegister& vn, int index);
+
+ // FP add.
+ void fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP subtract.
+ void fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP multiply.
+ void fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP fused multiply-add.
+ void fmadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va);
+
+ // FP fused multiply-subtract.
+ void fmsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va);
+
+ // FP fused multiply-add and negate.
+ void fnmadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va);
+
+ // FP fused multiply-subtract and negate.
+ void fnmsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va);
+
+ // FP multiply-negate scalar.
+ void fnmul(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP reciprocal exponent scalar.
+ void frecpx(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP divide.
+ void fdiv(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+ // FP maximum.
+ void fmax(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+ // FP minimum.
+ void fmin(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+ // FP maximum number.
+ void fmaxnm(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+ // FP minimum number.
+ void fminnm(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+ // FP absolute.
+ void fabs(const VRegister& vd, const VRegister& vn);
+
+ // FP negate.
+ void fneg(const VRegister& vd, const VRegister& vn);
+
+ // FP square root.
+ void fsqrt(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, nearest with ties to away.
+ void frinta(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, implicit rounding.
+ void frinti(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, toward minus infinity.
+ void frintm(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, nearest with ties to even.
+ void frintn(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, toward plus infinity.
+ void frintp(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, exact, implicit rounding.
+ void frintx(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, towards zero.
+ void frintz(const VRegister& vd, const VRegister& vn);
+
+ void FPCompareMacro(const VRegister& vn,
+ double value,
+ FPTrapFlags trap);
+
+ void FPCompareMacro(const VRegister& vn,
+ const VRegister& vm,
+ FPTrapFlags trap);
+
+ // FP compare registers.
+ void fcmp(const VRegister& vn, const VRegister& vm);
+
+ // FP compare immediate.
+ void fcmp(const VRegister& vn, double value);
+
+ void FPCCompareMacro(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond,
+ FPTrapFlags trap);
+
+ // FP conditional compare.
+ void fccmp(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // FP signaling compare registers.
+ void fcmpe(const VRegister& vn, const VRegister& vm);
+
+ // FP signaling compare immediate.
+ void fcmpe(const VRegister& vn, double value);
+
+ // FP conditional signaling compare.
+ void fccmpe(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // FP conditional select.
+ void fcsel(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ Condition cond);
+
+ // Common FP Convert functions.
+ void NEONFPConvertToInt(const Register& rd,
+ const VRegister& vn,
+ Instr op);
+ void NEONFPConvertToInt(const VRegister& vd,
+ const VRegister& vn,
+ Instr op);
+
+ // FP convert between precisions.
+ void fcvt(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to higher precision.
+ void fcvtl(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to higher precision (second part).
+ void fcvtl2(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to lower precision.
+ void fcvtn(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to lower prevision (second part).
+ void fcvtn2(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to lower precision, rounding to odd.
+ void fcvtxn(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to lower precision, rounding to odd (second part).
+ void fcvtxn2(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to signed integer, nearest with ties to away.
+ void fcvtas(const Register& rd, const VRegister& vn);
+
+ // FP convert to unsigned integer, nearest with ties to away.
+ void fcvtau(const Register& rd, const VRegister& vn);
+
+ // FP convert to signed integer, nearest with ties to away.
+ void fcvtas(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to unsigned integer, nearest with ties to away.
+ void fcvtau(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to signed integer, round towards -infinity.
+ void fcvtms(const Register& rd, const VRegister& vn);
+
+ // FP convert to unsigned integer, round towards -infinity.
+ void fcvtmu(const Register& rd, const VRegister& vn);
+
+ // FP convert to signed integer, round towards -infinity.
+ void fcvtms(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to unsigned integer, round towards -infinity.
+ void fcvtmu(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to signed integer, nearest with ties to even.
+ void fcvtns(const Register& rd, const VRegister& vn);
+
+ // FP convert to unsigned integer, nearest with ties to even.
+ void fcvtnu(const Register& rd, const VRegister& vn);
+
+ // FP convert to signed integer, nearest with ties to even.
+ void fcvtns(const VRegister& rd, const VRegister& vn);
+
+ // FP convert to unsigned integer, nearest with ties to even.
+ void fcvtnu(const VRegister& rd, const VRegister& vn);
+
+ // FP convert to signed integer or fixed-point, round towards zero.
+ void fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0);
+
+ // FP convert to unsigned integer or fixed-point, round towards zero.
+ void fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0);
+
+ // FP convert to signed integer or fixed-point, round towards zero.
+ void fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0);
+
+ // FP convert to unsigned integer or fixed-point, round towards zero.
+ void fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0);
+
+ // FP convert to signed integer, round towards +infinity.
+ void fcvtps(const Register& rd, const VRegister& vn);
+
+ // FP convert to unsigned integer, round towards +infinity.
+ void fcvtpu(const Register& rd, const VRegister& vn);
+
+ // FP convert to signed integer, round towards +infinity.
+ void fcvtps(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to unsigned integer, round towards +infinity.
+ void fcvtpu(const VRegister& vd, const VRegister& vn);
+
+ // Convert signed integer or fixed point to FP.
+ void scvtf(const VRegister& fd, const Register& rn, int fbits = 0);
+
+ // Convert unsigned integer or fixed point to FP.
+ void ucvtf(const VRegister& fd, const Register& rn, int fbits = 0);
+
+ // Convert signed integer or fixed-point to FP.
+ void scvtf(const VRegister& fd, const VRegister& vn, int fbits = 0);
+
+ // Convert unsigned integer or fixed-point to FP.
+ void ucvtf(const VRegister& fd, const VRegister& vn, int fbits = 0);
+
+ // Unsigned absolute difference.
+ void uabd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed absolute difference.
+ void sabd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned absolute difference and accumulate.
+ void uaba(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed absolute difference and accumulate.
+ void saba(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Add.
+ void add(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Subtract.
+ void sub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned halving add.
+ void uhadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed halving add.
+ void shadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned rounding halving add.
+ void urhadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed rounding halving add.
+ void srhadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned halving sub.
+ void uhsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed halving sub.
+ void shsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned saturating add.
+ void uqadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating add.
+ void sqadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned saturating subtract.
+ void uqsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating subtract.
+ void sqsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Add pairwise.
+ void addp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Add pair of elements scalar.
+ void addp(const VRegister& vd,
+ const VRegister& vn);
+
+ // Multiply-add to accumulator.
+ void mla(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Multiply-subtract to accumulator.
+ void mls(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Multiply.
+ void mul(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Multiply by scalar element.
+ void mul(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Multiply-add by scalar element.
+ void mla(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Multiply-subtract by scalar element.
+ void mls(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply-add by scalar element.
+ void smlal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply-add by scalar element (second part).
+ void smlal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply-add by scalar element.
+ void umlal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply-add by scalar element (second part).
+ void umlal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply-sub by scalar element.
+ void smlsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply-sub by scalar element (second part).
+ void smlsl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply-sub by scalar element.
+ void umlsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply-sub by scalar element (second part).
+ void umlsl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply by scalar element.
+ void smull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply by scalar element (second part).
+ void smull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply by scalar element.
+ void umull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply by scalar element (second part).
+ void umull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating double long multiply by element.
+ void sqdmull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating double long multiply by element (second part).
+ void sqdmull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating doubling long multiply-add by element.
+ void sqdmlal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating doubling long multiply-add by element (second part).
+ void sqdmlal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating doubling long multiply-sub by element.
+ void sqdmlsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating doubling long multiply-sub by element (second part).
+ void sqdmlsl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Compare equal.
+ void cmeq(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Compare signed greater than or equal.
+ void cmge(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Compare signed greater than.
+ void cmgt(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Compare unsigned higher.
+ void cmhi(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Compare unsigned higher or same.
+ void cmhs(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Compare bitwise test bits nonzero.
+ void cmtst(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Compare bitwise to zero.
+ void cmeq(const VRegister& vd,
+ const VRegister& vn,
+ int value);
+
+ // Compare signed greater than or equal to zero.
+ void cmge(const VRegister& vd,
+ const VRegister& vn,
+ int value);
+
+ // Compare signed greater than zero.
+ void cmgt(const VRegister& vd,
+ const VRegister& vn,
+ int value);
+
+ // Compare signed less than or equal to zero.
+ void cmle(const VRegister& vd,
+ const VRegister& vn,
+ int value);
+
+ // Compare signed less than zero.
+ void cmlt(const VRegister& vd,
+ const VRegister& vn,
+ int value);
+
+ // Signed shift left by register.
+ void sshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned shift left by register.
+ void ushl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating shift left by register.
+ void sqshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned saturating shift left by register.
+ void uqshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed rounding shift left by register.
+ void srshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned rounding shift left by register.
+ void urshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating rounding shift left by register.
+ void sqrshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned saturating rounding shift left by register.
+ void uqrshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise and.
+ void and_(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise or.
+ void orr(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise or immediate.
+ void orr(const VRegister& vd,
+ const int imm8,
+ const int left_shift = 0);
+
+ // Move register to register.
+ void mov(const VRegister& vd,
+ const VRegister& vn);
+
+ // Bitwise orn.
+ void orn(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise eor.
+ void eor(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bit clear immediate.
+ void bic(const VRegister& vd,
+ const int imm8,
+ const int left_shift = 0);
+
+ // Bit clear.
+ void bic(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise insert if false.
+ void bif(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise insert if true.
+ void bit(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise select.
+ void bsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Polynomial multiply.
+ void pmul(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Vector move immediate.
+ void movi(const VRegister& vd,
+ const uint64_t imm,
+ Shift shift = LSL,
+ const int shift_amount = 0);
+
+ // Bitwise not.
+ void mvn(const VRegister& vd,
+ const VRegister& vn);
+
+ // Vector move inverted immediate.
+ void mvni(const VRegister& vd,
+ const int imm8,
+ Shift shift = LSL,
+ const int shift_amount = 0);
+
+ // Signed saturating accumulate of unsigned value.
+ void suqadd(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned saturating accumulate of signed value.
+ void usqadd(const VRegister& vd,
+ const VRegister& vn);
+
+ // Absolute value.
+ void abs(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed saturating absolute value.
+ void sqabs(const VRegister& vd,
+ const VRegister& vn);
+
+ // Negate.
+ void neg(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed saturating negate.
+ void sqneg(const VRegister& vd,
+ const VRegister& vn);
+
+ // Bitwise not.
+ void not_(const VRegister& vd,
+ const VRegister& vn);
+
+ // Extract narrow.
+ void xtn(const VRegister& vd,
+ const VRegister& vn);
+
+ // Extract narrow (second part).
+ void xtn2(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed saturating extract narrow.
+ void sqxtn(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed saturating extract narrow (second part).
+ void sqxtn2(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned saturating extract narrow.
+ void uqxtn(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned saturating extract narrow (second part).
+ void uqxtn2(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed saturating extract unsigned narrow.
+ void sqxtun(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed saturating extract unsigned narrow (second part).
+ void sqxtun2(const VRegister& vd,
+ const VRegister& vn);
+
+ // Extract vector from pair of vectors.
+ void ext(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int index);
+
+ // Duplicate vector element to vector or scalar.
+ void dup(const VRegister& vd,
+ const VRegister& vn,
+ int vn_index);
+
+ // Move vector element to scalar.
+ void mov(const VRegister& vd,
+ const VRegister& vn,
+ int vn_index);
+
+ // Duplicate general-purpose register to vector.
+ void dup(const VRegister& vd,
+ const Register& rn);
+
+ // Insert vector element from another vector element.
+ void ins(const VRegister& vd,
+ int vd_index,
+ const VRegister& vn,
+ int vn_index);
+
+ // Move vector element to another vector element.
+ void mov(const VRegister& vd,
+ int vd_index,
+ const VRegister& vn,
+ int vn_index);
+
+ // Insert vector element from general-purpose register.
+ void ins(const VRegister& vd,
+ int vd_index,
+ const Register& rn);
+
+ // Move general-purpose register to a vector element.
+ void mov(const VRegister& vd,
+ int vd_index,
+ const Register& rn);
+
+ // Unsigned move vector element to general-purpose register.
+ void umov(const Register& rd,
+ const VRegister& vn,
+ int vn_index);
+
+ // Move vector element to general-purpose register.
+ void mov(const Register& rd,
+ const VRegister& vn,
+ int vn_index);
+
+ // Signed move vector element to general-purpose register.
+ void smov(const Register& rd,
+ const VRegister& vn,
+ int vn_index);
+
+ // One-element structure load to one register.
+ void ld1(const VRegister& vt,
+ const MemOperand& src);
+
+ // One-element structure load to two registers.
+ void ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src);
+
+ // One-element structure load to three registers.
+ void ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src);
+
+ // One-element structure load to four registers.
+ void ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src);
+
+ // One-element single structure load to one lane.
+ void ld1(const VRegister& vt,
+ int lane,
+ const MemOperand& src);
+
+ // One-element single structure load to all lanes.
+ void ld1r(const VRegister& vt,
+ const MemOperand& src);
+
+ // Two-element structure load.
+ void ld2(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src);
+
+ // Two-element single structure load to one lane.
+ void ld2(const VRegister& vt,
+ const VRegister& vt2,
+ int lane,
+ const MemOperand& src);
+
+ // Two-element single structure load to all lanes.
+ void ld2r(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src);
+
+ // Three-element structure load.
+ void ld3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src);
+
+ // Three-element single structure load to one lane.
+ void ld3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ int lane,
+ const MemOperand& src);
+
+ // Three-element single structure load to all lanes.
+ void ld3r(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src);
+
+ // Four-element structure load.
+ void ld4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src);
+
+ // Four-element single structure load to one lane.
+ void ld4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ int lane,
+ const MemOperand& src);
+
+ // Four-element single structure load to all lanes.
+ void ld4r(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src);
+
+ // Count leading sign bits.
+ void cls(const VRegister& vd,
+ const VRegister& vn);
+
+ // Count leading zero bits (vector).
+ void clz(const VRegister& vd,
+ const VRegister& vn);
+
+ // Population count per byte.
+ void cnt(const VRegister& vd,
+ const VRegister& vn);
+
+ // Reverse bit order.
+ void rbit(const VRegister& vd,
+ const VRegister& vn);
+
+ // Reverse elements in 16-bit halfwords.
+ void rev16(const VRegister& vd,
+ const VRegister& vn);
+
+ // Reverse elements in 32-bit words.
+ void rev32(const VRegister& vd,
+ const VRegister& vn);
+
+ // Reverse elements in 64-bit doublewords.
+ void rev64(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned reciprocal square root estimate.
+ void ursqrte(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned reciprocal estimate.
+ void urecpe(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed pairwise long add.
+ void saddlp(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned pairwise long add.
+ void uaddlp(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed pairwise long add and accumulate.
+ void sadalp(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned pairwise long add and accumulate.
+ void uadalp(const VRegister& vd,
+ const VRegister& vn);
+
+ // Shift left by immediate.
+ void shl(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating shift left by immediate.
+ void sqshl(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating shift left unsigned by immediate.
+ void sqshlu(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned saturating shift left by immediate.
+ void uqshl(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed shift left long by immediate.
+ void sshll(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed shift left long by immediate (second part).
+ void sshll2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed extend long.
+ void sxtl(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed extend long (second part).
+ void sxtl2(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned shift left long by immediate.
+ void ushll(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned shift left long by immediate (second part).
+ void ushll2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Shift left long by element size.
+ void shll(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Shift left long by element size (second part).
+ void shll2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned extend long.
+ void uxtl(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned extend long (second part).
+ void uxtl2(const VRegister& vd,
+ const VRegister& vn);
+
+ // Shift left by immediate and insert.
+ void sli(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Shift right by immediate and insert.
+ void sri(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed maximum.
+ void smax(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed pairwise maximum.
+ void smaxp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Add across vector.
+ void addv(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed add long across vector.
+ void saddlv(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned add long across vector.
+ void uaddlv(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP maximum number across vector.
+ void fmaxnmv(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP maximum across vector.
+ void fmaxv(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP minimum number across vector.
+ void fminnmv(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP minimum across vector.
+ void fminv(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed maximum across vector.
+ void smaxv(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed minimum.
+ void smin(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed minimum pairwise.
+ void sminp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed minimum across vector.
+ void sminv(const VRegister& vd,
+ const VRegister& vn);
+
+ // One-element structure store from one register.
+ void st1(const VRegister& vt,
+ const MemOperand& src);
+
+ // One-element structure store from two registers.
+ void st1(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src);
+
+ // One-element structure store from three registers.
+ void st1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src);
+
+ // One-element structure store from four registers.
+ void st1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src);
+
+ // One-element single structure store from one lane.
+ void st1(const VRegister& vt,
+ int lane,
+ const MemOperand& src);
+
+ // Two-element structure store from two registers.
+ void st2(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src);
+
+ // Two-element single structure store from two lanes.
+ void st2(const VRegister& vt,
+ const VRegister& vt2,
+ int lane,
+ const MemOperand& src);
+
+ // Three-element structure store from three registers.
+ void st3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src);
+
+ // Three-element single structure store from three lanes.
+ void st3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ int lane,
+ const MemOperand& src);
+
+ // Four-element structure store from four registers.
+ void st4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src);
+
+ // Four-element single structure store from four lanes.
+ void st4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ int lane,
+ const MemOperand& src);
+
+ // Unsigned add long.
+ void uaddl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned add long (second part).
+ void uaddl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned add wide.
+ void uaddw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned add wide (second part).
+ void uaddw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed add long.
+ void saddl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed add long (second part).
+ void saddl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed add wide.
+ void saddw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed add wide (second part).
+ void saddw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned subtract long.
+ void usubl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned subtract long (second part).
+ void usubl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned subtract wide.
+ void usubw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned subtract wide (second part).
+ void usubw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed subtract long.
+ void ssubl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed subtract long (second part).
+ void ssubl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed integer subtract wide.
+ void ssubw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed integer subtract wide (second part).
+ void ssubw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned maximum.
+ void umax(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned pairwise maximum.
+ void umaxp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned maximum across vector.
+ void umaxv(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned minimum.
+ void umin(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned pairwise minimum.
+ void uminp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned minimum across vector.
+ void uminv(const VRegister& vd,
+ const VRegister& vn);
+
+ // Transpose vectors (primary).
+ void trn1(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Transpose vectors (secondary).
+ void trn2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unzip vectors (primary).
+ void uzp1(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unzip vectors (secondary).
+ void uzp2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Zip vectors (primary).
+ void zip1(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Zip vectors (secondary).
+ void zip2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed shift right by immediate.
+ void sshr(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned shift right by immediate.
+ void ushr(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed rounding shift right by immediate.
+ void srshr(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned rounding shift right by immediate.
+ void urshr(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed shift right by immediate and accumulate.
+ void ssra(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned shift right by immediate and accumulate.
+ void usra(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed rounding shift right by immediate and accumulate.
+ void srsra(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned rounding shift right by immediate and accumulate.
+ void ursra(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Shift right narrow by immediate.
+ void shrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Shift right narrow by immediate (second part).
+ void shrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Rounding shift right narrow by immediate.
+ void rshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Rounding shift right narrow by immediate (second part).
+ void rshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned saturating shift right narrow by immediate.
+ void uqshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned saturating shift right narrow by immediate (second part).
+ void uqshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned saturating rounding shift right narrow by immediate.
+ void uqrshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned saturating rounding shift right narrow by immediate (second part).
+ void uqrshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating shift right narrow by immediate.
+ void sqshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating shift right narrow by immediate (second part).
+ void sqshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating rounded shift right narrow by immediate.
+ void sqrshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating rounded shift right narrow by immediate (second part).
+ void sqrshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating shift right unsigned narrow by immediate.
+ void sqshrun(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating shift right unsigned narrow by immediate (second part).
+ void sqshrun2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed sat rounded shift right unsigned narrow by immediate.
+ void sqrshrun(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed sat rounded shift right unsigned narrow by immediate (second part).
+ void sqrshrun2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // FP reciprocal step.
+ void frecps(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP reciprocal estimate.
+ void frecpe(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP reciprocal square root estimate.
+ void frsqrte(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP reciprocal square root step.
+ void frsqrts(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed absolute difference and accumulate long.
+ void sabal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed absolute difference and accumulate long (second part).
+ void sabal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned absolute difference and accumulate long.
+ void uabal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned absolute difference and accumulate long (second part).
+ void uabal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed absolute difference long.
+ void sabdl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed absolute difference long (second part).
+ void sabdl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned absolute difference long.
+ void uabdl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned absolute difference long (second part).
+ void uabdl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Polynomial multiply long.
+ void pmull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Polynomial multiply long (second part).
+ void pmull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed long multiply-add.
+ void smlal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed long multiply-add (second part).
+ void smlal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned long multiply-add.
+ void umlal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned long multiply-add (second part).
+ void umlal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed long multiply-sub.
+ void smlsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed long multiply-sub (second part).
+ void smlsl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned long multiply-sub.
+ void umlsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned long multiply-sub (second part).
+ void umlsl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed long multiply.
+ void smull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed long multiply (second part).
+ void smull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling long multiply-add.
+ void sqdmlal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling long multiply-add (second part).
+ void sqdmlal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling long multiply-subtract.
+ void sqdmlsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling long multiply-subtract (second part).
+ void sqdmlsl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling long multiply.
+ void sqdmull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling long multiply (second part).
+ void sqdmull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling multiply returning high half.
+ void sqdmulh(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating rounding doubling multiply returning high half.
+ void sqrdmulh(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling multiply element returning high half.
+ void sqdmulh(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating rounding doubling multiply element returning high half.
+ void sqrdmulh(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply long.
+ void umull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned long multiply (second part).
+ void umull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Add narrow returning high half.
+ void addhn(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Add narrow returning high half (second part).
+ void addhn2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Rounding add narrow returning high half.
+ void raddhn(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Rounding add narrow returning high half (second part).
+ void raddhn2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Subtract narrow returning high half.
+ void subhn(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Subtract narrow returning high half (second part).
+ void subhn2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Rounding subtract narrow returning high half.
+ void rsubhn(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Rounding subtract narrow returning high half (second part).
+ void rsubhn2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP vector multiply accumulate.
+ void fmla(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP vector multiply subtract.
+ void fmls(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP vector multiply extended.
+ void fmulx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP absolute greater than or equal.
+ void facge(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP absolute greater than.
+ void facgt(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP multiply by element.
+ void fmul(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // FP fused multiply-add to accumulator by element.
+ void fmla(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // FP fused multiply-sub from accumulator by element.
+ void fmls(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // FP multiply extended by element.
+ void fmulx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // FP compare equal.
+ void fcmeq(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP greater than.
+ void fcmgt(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP greater than or equal.
+ void fcmge(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP compare equal to zero.
+ void fcmeq(const VRegister& vd,
+ const VRegister& vn,
+ double imm);
+
+ // FP greater than zero.
+ void fcmgt(const VRegister& vd,
+ const VRegister& vn,
+ double imm);
+
+ // FP greater than or equal to zero.
+ void fcmge(const VRegister& vd,
+ const VRegister& vn,
+ double imm);
+
+ // FP less than or equal to zero.
+ void fcmle(const VRegister& vd,
+ const VRegister& vn,
+ double imm);
+
+ // FP less than to zero.
+ void fcmlt(const VRegister& vd,
+ const VRegister& vn,
+ double imm);
+
+ // FP absolute difference.
+ void fabd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP pairwise add vector.
+ void faddp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP pairwise add scalar.
+ void faddp(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP pairwise maximum vector.
+ void fmaxp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP pairwise maximum scalar.
+ void fmaxp(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP pairwise minimum vector.
+ void fminp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP pairwise minimum scalar.
+ void fminp(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP pairwise maximum number vector.
+ void fmaxnmp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP pairwise maximum number scalar.
+ void fmaxnmp(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP pairwise minimum number vector.
+ void fminnmp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP pairwise minimum number scalar.
+ void fminnmp(const VRegister& vd,
+ const VRegister& vn);
+
+ // Emit generic instructions.
+ // Emit raw instructions into the instruction stream.
+ void dci(Instr raw_inst) { Emit(raw_inst); }
+
+ // Emit 32 bits of data into the instruction stream.
+ void dc32(uint32_t data) {
+ EmitData(&data, sizeof(data));
+ }
+
+ // Emit 64 bits of data into the instruction stream.
+ void dc64(uint64_t data) {
+ EmitData(&data, sizeof(data));
+ }
+
+ // Code generation helpers.
+
+ // Register encoding.
+ static Instr Rd(CPURegister rd) {
+ VIXL_ASSERT(rd.code() != kSPRegInternalCode);
+ return rd.code() << Rd_offset;
+ }
+
+ static Instr Rn(CPURegister rn) {
+ VIXL_ASSERT(rn.code() != kSPRegInternalCode);
+ return rn.code() << Rn_offset;
+ }
+
+ static Instr Rm(CPURegister rm) {
+ VIXL_ASSERT(rm.code() != kSPRegInternalCode);
+ return rm.code() << Rm_offset;
+ }
+
+ static Instr RmNot31(CPURegister rm) {
+ VIXL_ASSERT(rm.code() != kSPRegInternalCode);
+ VIXL_ASSERT(!rm.IsZero());
+ return Rm(rm);
+ }
+
+ static Instr Ra(CPURegister ra) {
+ VIXL_ASSERT(ra.code() != kSPRegInternalCode);
+ return ra.code() << Ra_offset;
+ }
+
+ static Instr Rt(CPURegister rt) {
+ VIXL_ASSERT(rt.code() != kSPRegInternalCode);
+ return rt.code() << Rt_offset;
+ }
+
+ static Instr Rt2(CPURegister rt2) {
+ VIXL_ASSERT(rt2.code() != kSPRegInternalCode);
+ return rt2.code() << Rt2_offset;
+ }
+
+ static Instr Rs(CPURegister rs) {
+ VIXL_ASSERT(rs.code() != kSPRegInternalCode);
+ return rs.code() << Rs_offset;
+ }
+
+ // These encoding functions allow the stack pointer to be encoded, and
+ // disallow the zero register.
+ static Instr RdSP(Register rd) {
+ VIXL_ASSERT(!rd.IsZero());
+ return (rd.code() & kRegCodeMask) << Rd_offset;
+ }
+
+ static Instr RnSP(Register rn) {
+ VIXL_ASSERT(!rn.IsZero());
+ return (rn.code() & kRegCodeMask) << Rn_offset;
+ }
+
+ // Flags encoding.
+ static Instr Flags(FlagsUpdate S) {
+ if (S == SetFlags) {
+ return 1 << FlagsUpdate_offset;
+ } else if (S == LeaveFlags) {
+ return 0 << FlagsUpdate_offset;
+ }
+ VIXL_UNREACHABLE();
+ return 0;
+ }
+
+ static Instr Cond(Condition cond) {
+ return cond << Condition_offset;
+ }
+
+ // PC-relative address encoding.
+ static Instr ImmPCRelAddress(int imm21) {
+ VIXL_ASSERT(is_int21(imm21));
+ Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
+ Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
+ Instr immlo = imm << ImmPCRelLo_offset;
+ return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
+ }
+
+ // Branch encoding.
+ static Instr ImmUncondBranch(int imm26) {
+ VIXL_ASSERT(is_int26(imm26));
+ return truncate_to_int26(imm26) << ImmUncondBranch_offset;
+ }
+
+ static Instr ImmCondBranch(int imm19) {
+ VIXL_ASSERT(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmCondBranch_offset;
+ }
+
+ static Instr ImmCmpBranch(int imm19) {
+ VIXL_ASSERT(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmCmpBranch_offset;
+ }
+
+ static Instr ImmTestBranch(int imm14) {
+ VIXL_ASSERT(is_int14(imm14));
+ return truncate_to_int14(imm14) << ImmTestBranch_offset;
+ }
+
+ static Instr ImmTestBranchBit(unsigned bit_pos) {
+ VIXL_ASSERT(is_uint6(bit_pos));
+ // Subtract five from the shift offset, as we need bit 5 from bit_pos.
+ unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
+ unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
+ b5 &= ImmTestBranchBit5_mask;
+ b40 &= ImmTestBranchBit40_mask;
+ return b5 | b40;
+ }
+
+ // Data Processing encoding.
+ static Instr SF(Register rd) {
+ return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
+ }
+
+ static Instr ImmAddSub(int imm) {
+ VIXL_ASSERT(IsImmAddSub(imm));
+ if (is_uint12(imm)) { // No shift required.
+ imm <<= ImmAddSub_offset;
+ } else {
+ imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
+ }
+ return imm;
+ }
+
+ static Instr ImmS(unsigned imms, unsigned reg_size) {
+ VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
+ ((reg_size == kWRegSize) && is_uint5(imms)));
+ USE(reg_size);
+ return imms << ImmS_offset;
+ }
+
+ static Instr ImmR(unsigned immr, unsigned reg_size) {
+ VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
+ ((reg_size == kWRegSize) && is_uint5(immr)));
+ USE(reg_size);
+ VIXL_ASSERT(is_uint6(immr));
+ return immr << ImmR_offset;
+ }
+
+ static Instr ImmSetBits(unsigned imms, unsigned reg_size) {
+ VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ VIXL_ASSERT(is_uint6(imms));
+ VIXL_ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
+ USE(reg_size);
+ return imms << ImmSetBits_offset;
+ }
+
+ static Instr ImmRotate(unsigned immr, unsigned reg_size) {
+ VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
+ ((reg_size == kWRegSize) && is_uint5(immr)));
+ USE(reg_size);
+ return immr << ImmRotate_offset;
+ }
+
+ static Instr ImmLLiteral(int imm19) {
+ VIXL_ASSERT(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmLLiteral_offset;
+ }
+
+ static Instr BitN(unsigned bitn, unsigned reg_size) {
+ VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ VIXL_ASSERT((reg_size == kXRegSize) || (bitn == 0));
+ USE(reg_size);
+ return bitn << BitN_offset;
+ }
+
+ static Instr ShiftDP(Shift shift) {
+ VIXL_ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
+ return shift << ShiftDP_offset;
+ }
+
+ static Instr ImmDPShift(unsigned amount) {
+ VIXL_ASSERT(is_uint6(amount));
+ return amount << ImmDPShift_offset;
+ }
+
+ static Instr ExtendMode(Extend extend) {
+ return extend << ExtendMode_offset;
+ }
+
+ static Instr ImmExtendShift(unsigned left_shift) {
+ VIXL_ASSERT(left_shift <= 4);
+ return left_shift << ImmExtendShift_offset;
+ }
+
+ static Instr ImmCondCmp(unsigned imm) {
+ VIXL_ASSERT(is_uint5(imm));
+ return imm << ImmCondCmp_offset;
+ }
+
+ static Instr Nzcv(StatusFlags nzcv) {
+ return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
+ }
+
+ // MemOperand offset encoding.
+ static Instr ImmLSUnsigned(int imm12) {
+ VIXL_ASSERT(is_uint12(imm12));
+ return imm12 << ImmLSUnsigned_offset;
+ }
+
+ static Instr ImmLS(int imm9) {
+ VIXL_ASSERT(is_int9(imm9));
+ return truncate_to_int9(imm9) << ImmLS_offset;
+ }
+
+ static Instr ImmLSPair(int imm7, unsigned access_size) {
+ VIXL_ASSERT(((imm7 >> access_size) << access_size) == imm7);
+ int scaled_imm7 = imm7 >> access_size;
+ VIXL_ASSERT(is_int7(scaled_imm7));
+ return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
+ }
+
+ static Instr ImmShiftLS(unsigned shift_amount) {
+ VIXL_ASSERT(is_uint1(shift_amount));
+ return shift_amount << ImmShiftLS_offset;
+ }
+
+ static Instr ImmPrefetchOperation(int imm5) {
+ VIXL_ASSERT(is_uint5(imm5));
+ return imm5 << ImmPrefetchOperation_offset;
+ }
+
+ static Instr ImmException(int imm16) {
+ VIXL_ASSERT(is_uint16(imm16));
+ return imm16 << ImmException_offset;
+ }
+
+ static Instr ImmSystemRegister(int imm15) {
+ VIXL_ASSERT(is_uint15(imm15));
+ return imm15 << ImmSystemRegister_offset;
+ }
+
+ static Instr ImmHint(int imm7) {
+ VIXL_ASSERT(is_uint7(imm7));
+ return imm7 << ImmHint_offset;
+ }
+
+ static Instr CRm(int imm4) {
+ VIXL_ASSERT(is_uint4(imm4));
+ return imm4 << CRm_offset;
+ }
+
+ static Instr CRn(int imm4) {
+ VIXL_ASSERT(is_uint4(imm4));
+ return imm4 << CRn_offset;
+ }
+
+ static Instr SysOp(int imm14) {
+ VIXL_ASSERT(is_uint14(imm14));
+ return imm14 << SysOp_offset;
+ }
+
+ static Instr ImmSysOp1(int imm3) {
+ VIXL_ASSERT(is_uint3(imm3));
+ return imm3 << SysOp1_offset;
+ }
+
+ static Instr ImmSysOp2(int imm3) {
+ VIXL_ASSERT(is_uint3(imm3));
+ return imm3 << SysOp2_offset;
+ }
+
+ static Instr ImmBarrierDomain(int imm2) {
+ VIXL_ASSERT(is_uint2(imm2));
+ return imm2 << ImmBarrierDomain_offset;
+ }
+
+ static Instr ImmBarrierType(int imm2) {
+ VIXL_ASSERT(is_uint2(imm2));
+ return imm2 << ImmBarrierType_offset;
+ }
+
+ // Move immediates encoding.
+ static Instr ImmMoveWide(uint64_t imm) {
+ VIXL_ASSERT(is_uint16(imm));
+ return static_cast<Instr>(imm << ImmMoveWide_offset);
+ }
+
+ static Instr ShiftMoveWide(int64_t shift) {
+ VIXL_ASSERT(is_uint2(shift));
+ return static_cast<Instr>(shift << ShiftMoveWide_offset);
+ }
+
+ // FP Immediates.
+ static Instr ImmFP32(float imm);
+ static Instr ImmFP64(double imm);
+
+ // FP register type.
+ static Instr FPType(FPRegister fd) {
+ return fd.Is64Bits() ? FP64 : FP32;
+ }
+
+ static Instr FPScale(unsigned scale) {
+ VIXL_ASSERT(is_uint6(scale));
+ return scale << FPScale_offset;
+ }
+
+ // Immediate field checking helpers.
+ static bool IsImmAddSub(int64_t immediate);
+ static bool IsImmConditionalCompare(int64_t immediate);
+ static bool IsImmFP32(float imm);
+ static bool IsImmFP64(double imm);
+ static bool IsImmLogical(uint64_t value,
+ unsigned width,
+ unsigned* n = NULL,
+ unsigned* imm_s = NULL,
+ unsigned* imm_r = NULL);
+ static bool IsImmLSPair(int64_t offset, unsigned access_size);
+ static bool IsImmLSScaled(int64_t offset, unsigned access_size);
+ static bool IsImmLSUnscaled(int64_t offset);
+ static bool IsImmMovn(uint64_t imm, unsigned reg_size);
+ static bool IsImmMovz(uint64_t imm, unsigned reg_size);
+
+ // Instruction bits for vector format in data processing operations.
+ static Instr VFormat(VRegister vd) {
+ if (vd.Is64Bits()) {
+ switch (vd.lanes()) {
+ case 2: return NEON_2S;
+ case 4: return NEON_4H;
+ case 8: return NEON_8B;
+ default: return 0xffffffff;
+ }
+ } else {
+ VIXL_ASSERT(vd.Is128Bits());
+ switch (vd.lanes()) {
+ case 2: return NEON_2D;
+ case 4: return NEON_4S;
+ case 8: return NEON_8H;
+ case 16: return NEON_16B;
+ default: return 0xffffffff;
+ }
+ }
+ }
+
+ // Instruction bits for vector format in floating point data processing
+ // operations.
+ static Instr FPFormat(VRegister vd) {
+ if (vd.lanes() == 1) {
+ // Floating point scalar formats.
+ VIXL_ASSERT(vd.Is32Bits() || vd.Is64Bits());
+ return vd.Is64Bits() ? FP64 : FP32;
+ }
+
+ // Two lane floating point vector formats.
+ if (vd.lanes() == 2) {
+ VIXL_ASSERT(vd.Is64Bits() || vd.Is128Bits());
+ return vd.Is128Bits() ? NEON_FP_2D : NEON_FP_2S;
+ }
+
+ // Four lane floating point vector format.
+ VIXL_ASSERT((vd.lanes() == 4) && vd.Is128Bits());
+ return NEON_FP_4S;
+ }
+
+ // Instruction bits for vector format in load and store operations.
+ static Instr LSVFormat(VRegister vd) {
+ if (vd.Is64Bits()) {
+ switch (vd.lanes()) {
+ case 1: return LS_NEON_1D;
+ case 2: return LS_NEON_2S;
+ case 4: return LS_NEON_4H;
+ case 8: return LS_NEON_8B;
+ default: return 0xffffffff;
+ }
+ } else {
+ VIXL_ASSERT(vd.Is128Bits());
+ switch (vd.lanes()) {
+ case 2: return LS_NEON_2D;
+ case 4: return LS_NEON_4S;
+ case 8: return LS_NEON_8H;
+ case 16: return LS_NEON_16B;
+ default: return 0xffffffff;
+ }
+ }
+ }
+
+ // Instruction bits for scalar format in data processing operations.
+ static Instr SFormat(VRegister vd) {
+ VIXL_ASSERT(vd.lanes() == 1);
+ switch (vd.SizeInBytes()) {
+ case 1: return NEON_B;
+ case 2: return NEON_H;
+ case 4: return NEON_S;
+ case 8: return NEON_D;
+ default: return 0xffffffff;
+ }
+ }
+
+ static Instr ImmNEONHLM(int index, int num_bits) {
+ int h, l, m;
+ if (num_bits == 3) {
+ VIXL_ASSERT(is_uint3(index));
+ h = (index >> 2) & 1;
+ l = (index >> 1) & 1;
+ m = (index >> 0) & 1;
+ } else if (num_bits == 2) {
+ VIXL_ASSERT(is_uint2(index));
+ h = (index >> 1) & 1;
+ l = (index >> 0) & 1;
+ m = 0;
+ } else {
+ VIXL_ASSERT(is_uint1(index) && (num_bits == 1));
+ h = (index >> 0) & 1;
+ l = 0;
+ m = 0;
+ }
+ return (h << NEONH_offset) | (l << NEONL_offset) | (m << NEONM_offset);
+ }
+
+ static Instr ImmNEONExt(int imm4) {
+ VIXL_ASSERT(is_uint4(imm4));
+ return imm4 << ImmNEONExt_offset;
+ }
+
+ static Instr ImmNEON5(Instr format, int index) {
+ VIXL_ASSERT(is_uint4(index));
+ int s = LaneSizeInBytesLog2FromFormat(static_cast<VectorFormat>(format));
+ int imm5 = (index << (s + 1)) | (1 << s);
+ return imm5 << ImmNEON5_offset;
+ }
+
+ static Instr ImmNEON4(Instr format, int index) {
+ VIXL_ASSERT(is_uint4(index));
+ int s = LaneSizeInBytesLog2FromFormat(static_cast<VectorFormat>(format));
+ int imm4 = index << s;
+ return imm4 << ImmNEON4_offset;
+ }
+
+ static Instr ImmNEONabcdefgh(int imm8) {
+ VIXL_ASSERT(is_uint8(imm8));
+ Instr instr;
+ instr = ((imm8 >> 5) & 7) << ImmNEONabc_offset;
+ instr |= (imm8 & 0x1f) << ImmNEONdefgh_offset;
+ return instr;
+ }
+
+ static Instr NEONCmode(int cmode) {
+ VIXL_ASSERT(is_uint4(cmode));
+ return cmode << NEONCmode_offset;
+ }
+
+ static Instr NEONModImmOp(int op) {
+ VIXL_ASSERT(is_uint1(op));
+ return op << NEONModImmOp_offset;
+ }
+
+ size_t size() const {
+ return SizeOfCodeGenerated();
+ }
+
+ size_t SizeOfCodeGenerated() const {
+ return armbuffer_.size();
+ }
+
+ PositionIndependentCodeOption pic() const {
+ return pic_;
+ }
+
+ bool AllowPageOffsetDependentCode() const {
+ return (pic() == PageOffsetDependentCode) ||
+ (pic() == PositionDependentCode);
+ }
+
+ static const Register& AppropriateZeroRegFor(const CPURegister& reg) {
+ return reg.Is64Bits() ? xzr : wzr;
+ }
+
+
+ protected:
+ void LoadStore(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ void LoadStorePair(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op);
+ void LoadStoreStruct(const VRegister& vt,
+ const MemOperand& addr,
+ NEONLoadStoreMultiStructOp op);
+ void LoadStoreStruct1(const VRegister& vt,
+ int reg_count,
+ const MemOperand& addr);
+ void LoadStoreStructSingle(const VRegister& vt,
+ uint32_t lane,
+ const MemOperand& addr,
+ NEONLoadStoreSingleStructOp op);
+ void LoadStoreStructSingleAllLanes(const VRegister& vt,
+ const MemOperand& addr,
+ NEONLoadStoreSingleStructOp op);
+ void LoadStoreStructVerify(const VRegister& vt,
+ const MemOperand& addr,
+ Instr op);
+
+ void Prefetch(PrefetchOperation op,
+ const MemOperand& addr,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // TODO(all): The third parameter should be passed by reference but gcc 4.8.2
+ // reports a bogus uninitialised warning then.
+ BufferOffset Logical(const Register& rd,
+ const Register& rn,
+ const Operand operand,
+ LogicalOp op);
+ BufferOffset LogicalImmediate(const Register& rd,
+ const Register& rn,
+ unsigned n,
+ unsigned imm_s,
+ unsigned imm_r,
+ LogicalOp op);
+
+ void ConditionalCompare(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op);
+
+ void AddSubWithCarry(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op);
+
+
+ // Functions for emulating operands not directly supported by the instruction
+ // set.
+ void EmitShift(const Register& rd,
+ const Register& rn,
+ Shift shift,
+ unsigned amount);
+ void EmitExtendShift(const Register& rd,
+ const Register& rn,
+ Extend extend,
+ unsigned left_shift);
+
+ void AddSub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op);
+
+ void NEONTable(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEONTableOp op);
+
+ // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
+ // registers. Only simple loads are supported; sign- and zero-extension (such
+ // as in LDPSW_x or LDRB_w) are not supported.
+ static LoadStoreOp LoadOpFor(const CPURegister& rt);
+ static LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
+ const CPURegister& rt2);
+ static LoadStoreOp StoreOpFor(const CPURegister& rt);
+ static LoadStorePairOp StorePairOpFor(const CPURegister& rt,
+ const CPURegister& rt2);
+ static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2);
+ static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2);
+ static LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
+
+
+ private:
+ static uint32_t FP32ToImm8(float imm);
+ static uint32_t FP64ToImm8(double imm);
+
+ // Instruction helpers.
+ void MoveWide(const Register& rd,
+ uint64_t imm,
+ int shift,
+ MoveWideImmediateOp mov_op);
+ BufferOffset DataProcShiftedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op);
+ void DataProcExtendedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op);
+ void LoadStorePairNonTemporal(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairNonTemporalOp op);
+ void LoadLiteral(const CPURegister& rt, uint64_t imm, LoadLiteralOp op);
+ void ConditionalSelect(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond,
+ ConditionalSelectOp op);
+ void DataProcessing1Source(const Register& rd,
+ const Register& rn,
+ DataProcessing1SourceOp op);
+ void DataProcessing3Source(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra,
+ DataProcessing3SourceOp op);
+ void FPDataProcessing1Source(const VRegister& fd,
+ const VRegister& fn,
+ FPDataProcessing1SourceOp op);
+ void FPDataProcessing3Source(const VRegister& fd,
+ const VRegister& fn,
+ const VRegister& fm,
+ const VRegister& fa,
+ FPDataProcessing3SourceOp op);
+ void NEONAcrossLanesL(const VRegister& vd,
+ const VRegister& vn,
+ NEONAcrossLanesOp op);
+ void NEONAcrossLanes(const VRegister& vd,
+ const VRegister& vn,
+ NEONAcrossLanesOp op);
+ void NEONModifiedImmShiftLsl(const VRegister& vd,
+ const int imm8,
+ const int left_shift,
+ NEONModifiedImmediateOp op);
+ void NEONModifiedImmShiftMsl(const VRegister& vd,
+ const int imm8,
+ const int shift_amount,
+ NEONModifiedImmediateOp op);
+ void NEONFP2Same(const VRegister& vd,
+ const VRegister& vn,
+ Instr vop);
+ void NEON3Same(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3SameOp vop);
+ void NEONFP3Same(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ Instr op);
+ void NEON3DifferentL(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3DifferentOp vop);
+ void NEON3DifferentW(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3DifferentOp vop);
+ void NEON3DifferentHN(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3DifferentOp vop);
+ void NEONFP2RegMisc(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp vop,
+ double value = 0.0);
+ void NEON2RegMisc(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp vop,
+ int value = 0);
+ void NEONFP2RegMisc(const VRegister& vd,
+ const VRegister& vn,
+ Instr op);
+ void NEONAddlp(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp op);
+ void NEONPerm(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEONPermOp op);
+ void NEONFPByElement(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index,
+ NEONByIndexedElementOp op);
+ void NEONByElement(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index,
+ NEONByIndexedElementOp op);
+ void NEONByElementL(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index,
+ NEONByIndexedElementOp op);
+ void NEONShiftImmediate(const VRegister& vd,
+ const VRegister& vn,
+ NEONShiftImmediateOp op,
+ int immh_immb);
+ void NEONShiftLeftImmediate(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op);
+ void NEONShiftRightImmediate(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op);
+ void NEONShiftImmediateL(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op);
+ void NEONShiftImmediateN(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op);
+ void NEONXtn(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp vop);
+
+ Instr LoadStoreStructAddrModeField(const MemOperand& addr);
+
+ // Encode the specified MemOperand for the specified access size and scaling
+ // preference.
+ Instr LoadStoreMemOperand(const MemOperand& addr,
+ unsigned access_size,
+ LoadStoreScalingOption option);
+
+ protected:
+ // Prevent generation of a literal pool for the next |maxInst| instructions.
+ // Guarantees instruction linearity.
+ class AutoBlockLiteralPool {
+ ARMBuffer* armbuffer_;
+
+ public:
+ AutoBlockLiteralPool(Assembler* assembler, size_t maxInst)
+ : armbuffer_(&assembler->armbuffer_) {
+ armbuffer_->enterNoPool(maxInst);
+ }
+ ~AutoBlockLiteralPool() {
+ armbuffer_->leaveNoPool();
+ }
+ };
+
+ protected:
+ // Buffer where the code is emitted.
+ PositionIndependentCodeOption pic_;
+
+#ifdef DEBUG
+ bool finalized_;
+#endif
+};
+
+} // namespace vixl
+
+#endif // VIXL_A64_ASSEMBLER_A64_H_
diff --git a/js/src/jit/arm64/vixl/CompilerIntrinsics-vixl.h b/js/src/jit/arm64/vixl/CompilerIntrinsics-vixl.h
new file mode 100644
index 000000000..e13eef613
--- /dev/null
+++ b/js/src/jit/arm64/vixl/CompilerIntrinsics-vixl.h
@@ -0,0 +1,179 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef VIXL_COMPILER_INTRINSICS_H
+#define VIXL_COMPILER_INTRINSICS_H
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/arm64/vixl/Globals-vixl.h"
+
+namespace vixl {
+
+// Helper to check whether the version of GCC used is greater than the specified
+// requirement.
+#define MAJOR 1000000
+#define MINOR 1000
+#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
+#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \
+ ((__GNUC__ * MAJOR + __GNUC_MINOR__ * MINOR + __GNUC_PATCHLEVEL__) >= \
+ ((major) * MAJOR + (minor) * MINOR + (patchlevel)))
+#elif defined(__GNUC__) && defined(__GNUC_MINOR__)
+#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \
+ ((__GNUC__ * MAJOR + __GNUC_MINOR__ * MINOR) >= \
+ ((major) * MAJOR + (minor) * MINOR + (patchlevel)))
+#else
+#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) 0
+#endif
+
+
+#if defined(__clang__) && !defined(VIXL_NO_COMPILER_BUILTINS)
+
+#define COMPILER_HAS_BUILTIN_CLRSB (__has_builtin(__builtin_clrsb))
+#define COMPILER_HAS_BUILTIN_CLZ (__has_builtin(__builtin_clz))
+#define COMPILER_HAS_BUILTIN_CTZ (__has_builtin(__builtin_ctz))
+#define COMPILER_HAS_BUILTIN_FFS (__has_builtin(__builtin_ffs))
+#define COMPILER_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount))
+
+#elif defined(__GNUC__) && !defined(VIXL_NO_COMPILER_BUILTINS)
+// The documentation for these builtins is available at:
+// https://gcc.gnu.org/onlinedocs/gcc-$MAJOR.$MINOR.$PATCHLEVEL/gcc//Other-Builtins.html
+
+# define COMPILER_HAS_BUILTIN_CLRSB (GCC_VERSION_OR_NEWER(4, 7, 0))
+# define COMPILER_HAS_BUILTIN_CLZ (GCC_VERSION_OR_NEWER(3, 4, 0))
+# define COMPILER_HAS_BUILTIN_CTZ (GCC_VERSION_OR_NEWER(3, 4, 0))
+# define COMPILER_HAS_BUILTIN_FFS (GCC_VERSION_OR_NEWER(3, 4, 0))
+# define COMPILER_HAS_BUILTIN_POPCOUNT (GCC_VERSION_OR_NEWER(3, 4, 0))
+
+#else
+// One can define VIXL_NO_COMPILER_BUILTINS to force using the manually
+// implemented C++ methods.
+
+#define COMPILER_HAS_BUILTIN_BSWAP false
+#define COMPILER_HAS_BUILTIN_CLRSB false
+#define COMPILER_HAS_BUILTIN_CLZ false
+#define COMPILER_HAS_BUILTIN_CTZ false
+#define COMPILER_HAS_BUILTIN_FFS false
+#define COMPILER_HAS_BUILTIN_POPCOUNT false
+
+#endif
+
+
+template<typename V>
+inline bool IsPowerOf2(V value) {
+ return (value != 0) && ((value & (value - 1)) == 0);
+}
+
+
+// Implementation of intrinsics functions.
+// TODO: The implementations could be improved for sizes different from 32bit
+// and 64bit: we could mask the values and call the appropriate builtin.
+
+
+template<typename V>
+inline int CountLeadingZeros(V value, int width = (sizeof(V) * 8)) {
+#if COMPILER_HAS_BUILTIN_CLZ
+ if (width == 32) {
+ return (value == 0) ? 32 : __builtin_clz(static_cast<unsigned>(value));
+ } else if (width == 64) {
+ return (value == 0) ? 64 : __builtin_clzll(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#else
+ if (width == 32) {
+ return mozilla::CountLeadingZeroes32(value);
+ } else if (width == 64) {
+ return mozilla::CountLeadingZeroes64(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#endif
+}
+
+
+template<typename V>
+inline int CountLeadingSignBits(V value, int width = (sizeof(V) * 8)) {
+#if COMPILER_HAS_BUILTIN_CLRSB
+ if (width == 32) {
+ return __builtin_clrsb(value);
+ } else if (width == 64) {
+ return __builtin_clrsbll(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#else
+ VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
+ if (value >= 0) {
+ return CountLeadingZeros(value, width) - 1;
+ } else {
+ return CountLeadingZeros(~value, width) - 1;
+ }
+#endif
+}
+
+
+template<typename V>
+inline int CountSetBits(V value, int width = (sizeof(V) * 8)) {
+#if COMPILER_HAS_BUILTIN_POPCOUNT
+ if (width == 32) {
+ return __builtin_popcount(static_cast<unsigned>(value));
+ } else if (width == 64) {
+ return __builtin_popcountll(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#else
+ if (width == 32) {
+ return mozilla::CountPopulation32(value);
+ } else if (width == 64) {
+ return mozilla::CountPopulation64(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#endif
+}
+
+
+template<typename V>
+inline int CountTrailingZeros(V value, int width = (sizeof(V) * 8)) {
+#if COMPILER_HAS_BUILTIN_CTZ
+ if (width == 32) {
+ return (value == 0) ? 32 : __builtin_ctz(static_cast<unsigned>(value));
+ } else if (width == 64) {
+ return (value == 0) ? 64 : __builtin_ctzll(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#else
+ if (width == 32) {
+ return mozilla::CountTrailingZeroes32(value);
+ } else if (width == 64) {
+ return mozilla::CountTrailingZeroes64(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#endif
+}
+
+} // namespace vixl
+
+#endif // VIXL_COMPILER_INTRINSICS_H
+
diff --git a/js/src/jit/arm64/vixl/Constants-vixl.h b/js/src/jit/arm64/vixl/Constants-vixl.h
new file mode 100644
index 000000000..3724cf4b3
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Constants-vixl.h
@@ -0,0 +1,2148 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_CONSTANTS_A64_H_
+#define VIXL_A64_CONSTANTS_A64_H_
+
+#include <stdint.h>
+
+#include "jit/arm64/vixl/Globals-vixl.h"
+
+namespace vixl {
+
+// Supervisor Call (svc) specific support.
+//
+// The SVC instruction encodes an optional 16-bit immediate value.
+// The simulator understands the codes below.
+enum SVCSimulatorCodes {
+ kCallRtRedirected = 0x10, // Transition to x86_64 C code.
+ kMarkStackPointer = 0x11, // Push the current SP on a special Simulator stack.
+ kCheckStackPointer = 0x12 // Pop from the special Simulator stack and compare to SP.
+};
+
+const unsigned kNumberOfRegisters = 32;
+const unsigned kNumberOfVRegisters = 32;
+const unsigned kNumberOfFPRegisters = kNumberOfVRegisters;
+// Callee saved registers are x21-x30(lr).
+const int kNumberOfCalleeSavedRegisters = 10;
+const int kFirstCalleeSavedRegisterIndex = 21;
+// Callee saved FP registers are d8-d15.
+const int kNumberOfCalleeSavedFPRegisters = 8;
+const int kFirstCalleeSavedFPRegisterIndex = 8;
+
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+#define INSTRUCTION_FIELDS_LIST(V_) \
+/* Register fields */ \
+V_(Rd, 4, 0, Bits) /* Destination register. */ \
+V_(Rn, 9, 5, Bits) /* First source register. */ \
+V_(Rm, 20, 16, Bits) /* Second source register. */ \
+V_(Ra, 14, 10, Bits) /* Third source register. */ \
+V_(Rt, 4, 0, Bits) /* Load/store register. */ \
+V_(Rt2, 14, 10, Bits) /* Load/store second register. */ \
+V_(Rs, 20, 16, Bits) /* Exclusive access status. */ \
+ \
+/* Common bits */ \
+V_(SixtyFourBits, 31, 31, Bits) \
+V_(FlagsUpdate, 29, 29, Bits) \
+ \
+/* PC relative addressing */ \
+V_(ImmPCRelHi, 23, 5, SignedBits) \
+V_(ImmPCRelLo, 30, 29, Bits) \
+ \
+/* Add/subtract/logical shift register */ \
+V_(ShiftDP, 23, 22, Bits) \
+V_(ImmDPShift, 15, 10, Bits) \
+ \
+/* Add/subtract immediate */ \
+V_(ImmAddSub, 21, 10, Bits) \
+V_(ShiftAddSub, 23, 22, Bits) \
+ \
+/* Add/substract extend */ \
+V_(ImmExtendShift, 12, 10, Bits) \
+V_(ExtendMode, 15, 13, Bits) \
+ \
+/* Move wide */ \
+V_(ImmMoveWide, 20, 5, Bits) \
+V_(ShiftMoveWide, 22, 21, Bits) \
+ \
+/* Logical immediate, bitfield and extract */ \
+V_(BitN, 22, 22, Bits) \
+V_(ImmRotate, 21, 16, Bits) \
+V_(ImmSetBits, 15, 10, Bits) \
+V_(ImmR, 21, 16, Bits) \
+V_(ImmS, 15, 10, Bits) \
+ \
+/* Test and branch immediate */ \
+V_(ImmTestBranch, 18, 5, SignedBits) \
+V_(ImmTestBranchBit40, 23, 19, Bits) \
+V_(ImmTestBranchBit5, 31, 31, Bits) \
+ \
+/* Conditionals */ \
+V_(Condition, 15, 12, Bits) \
+V_(ConditionBranch, 3, 0, Bits) \
+V_(Nzcv, 3, 0, Bits) \
+V_(ImmCondCmp, 20, 16, Bits) \
+V_(ImmCondBranch, 23, 5, SignedBits) \
+ \
+/* Floating point */ \
+V_(FPType, 23, 22, Bits) \
+V_(ImmFP, 20, 13, Bits) \
+V_(FPScale, 15, 10, Bits) \
+ \
+/* Load Store */ \
+V_(ImmLS, 20, 12, SignedBits) \
+V_(ImmLSUnsigned, 21, 10, Bits) \
+V_(ImmLSPair, 21, 15, SignedBits) \
+V_(ImmShiftLS, 12, 12, Bits) \
+V_(LSOpc, 23, 22, Bits) \
+V_(LSVector, 26, 26, Bits) \
+V_(LSSize, 31, 30, Bits) \
+V_(ImmPrefetchOperation, 4, 0, Bits) \
+V_(PrefetchHint, 4, 3, Bits) \
+V_(PrefetchTarget, 2, 1, Bits) \
+V_(PrefetchStream, 0, 0, Bits) \
+ \
+/* Other immediates */ \
+V_(ImmUncondBranch, 25, 0, SignedBits) \
+V_(ImmCmpBranch, 23, 5, SignedBits) \
+V_(ImmLLiteral, 23, 5, SignedBits) \
+V_(ImmException, 20, 5, Bits) \
+V_(ImmHint, 11, 5, Bits) \
+V_(ImmBarrierDomain, 11, 10, Bits) \
+V_(ImmBarrierType, 9, 8, Bits) \
+ \
+/* System (MRS, MSR, SYS) */ \
+V_(ImmSystemRegister, 19, 5, Bits) \
+V_(SysO0, 19, 19, Bits) \
+V_(SysOp, 18, 5, Bits) \
+V_(SysOp1, 18, 16, Bits) \
+V_(SysOp2, 7, 5, Bits) \
+V_(CRn, 15, 12, Bits) \
+V_(CRm, 11, 8, Bits) \
+ \
+/* Load-/store-exclusive */ \
+V_(LdStXLoad, 22, 22, Bits) \
+V_(LdStXNotExclusive, 23, 23, Bits) \
+V_(LdStXAcquireRelease, 15, 15, Bits) \
+V_(LdStXSizeLog2, 31, 30, Bits) \
+V_(LdStXPair, 21, 21, Bits) \
+ \
+/* NEON generic fields */ \
+V_(NEONQ, 30, 30, Bits) \
+V_(NEONSize, 23, 22, Bits) \
+V_(NEONLSSize, 11, 10, Bits) \
+V_(NEONS, 12, 12, Bits) \
+V_(NEONL, 21, 21, Bits) \
+V_(NEONM, 20, 20, Bits) \
+V_(NEONH, 11, 11, Bits) \
+V_(ImmNEONExt, 14, 11, Bits) \
+V_(ImmNEON5, 20, 16, Bits) \
+V_(ImmNEON4, 14, 11, Bits) \
+ \
+/* NEON Modified Immediate fields */ \
+V_(ImmNEONabc, 18, 16, Bits) \
+V_(ImmNEONdefgh, 9, 5, Bits) \
+V_(NEONModImmOp, 29, 29, Bits) \
+V_(NEONCmode, 15, 12, Bits) \
+ \
+/* NEON Shift Immediate fields */ \
+V_(ImmNEONImmhImmb, 22, 16, Bits) \
+V_(ImmNEONImmh, 22, 19, Bits) \
+V_(ImmNEONImmb, 18, 16, Bits)
+
+#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
+/* NZCV */ \
+V_(Flags, 31, 28, Bits) \
+V_(N, 31, 31, Bits) \
+V_(Z, 30, 30, Bits) \
+V_(C, 29, 29, Bits) \
+V_(V, 28, 28, Bits) \
+M_(NZCV, Flags_mask) \
+/* FPCR */ \
+V_(AHP, 26, 26, Bits) \
+V_(DN, 25, 25, Bits) \
+V_(FZ, 24, 24, Bits) \
+V_(RMode, 23, 22, Bits) \
+M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask)
+
+// Fields offsets.
+#define DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, X) \
+const int Name##_offset = LowBit; \
+const int Name##_width = HighBit - LowBit + 1; \
+const uint32_t Name##_mask = ((1 << Name##_width) - 1) << LowBit;
+#define NOTHING(A, B)
+INSTRUCTION_FIELDS_LIST(DECLARE_FIELDS_OFFSETS)
+SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING)
+#undef NOTHING
+#undef DECLARE_FIELDS_BITS
+
+// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), formed
+// from ImmPCRelLo and ImmPCRelHi.
+const int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask;
+
+// Condition codes.
+enum Condition {
+ eq = 0, // Z set Equal.
+ ne = 1, // Z clear Not equal.
+ cs = 2, // C set Carry set.
+ cc = 3, // C clear Carry clear.
+ mi = 4, // N set Negative.
+ pl = 5, // N clear Positive or zero.
+ vs = 6, // V set Overflow.
+ vc = 7, // V clear No overflow.
+ hi = 8, // C set, Z clear Unsigned higher.
+ ls = 9, // C clear or Z set Unsigned lower or same.
+ ge = 10, // N == V Greater or equal.
+ lt = 11, // N != V Less than.
+ gt = 12, // Z clear, N == V Greater than.
+ le = 13, // Z set or N != V Less then or equal
+ al = 14, // Always.
+ nv = 15, // Behaves as always/al.
+
+ // Aliases.
+ hs = cs, // C set Unsigned higher or same.
+ lo = cc, // C clear Unsigned lower.
+
+ // Mozilla expanded aliases.
+ Equal = 0, Zero = 0,
+ NotEqual = 1, NonZero = 1,
+ AboveOrEqual = 2, CarrySet = 2,
+ Below = 3, CarryClear = 3,
+ Signed = 4,
+ NotSigned = 5,
+ Overflow = 6,
+ NoOverflow = 7,
+ Above = 8,
+ BelowOrEqual = 9,
+ GreaterThanOrEqual_ = 10,
+ LessThan_ = 11,
+ GreaterThan_ = 12,
+ LessThanOrEqual_ = 13,
+ Always = 14,
+ Never = 15
+};
+
+inline Condition InvertCondition(Condition cond) {
+ // Conditions al and nv behave identically, as "always true". They can't be
+ // inverted, because there is no "always false" condition.
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ return static_cast<Condition>(cond ^ 1);
+}
+
+enum FPTrapFlags {
+ EnableTrap = 1,
+ DisableTrap = 0
+};
+
+enum FlagsUpdate {
+ SetFlags = 1,
+ LeaveFlags = 0
+};
+
+enum StatusFlags {
+ NoFlag = 0,
+
+ // Derive the flag combinations from the system register bit descriptions.
+ NFlag = N_mask,
+ ZFlag = Z_mask,
+ CFlag = C_mask,
+ VFlag = V_mask,
+ NZFlag = NFlag | ZFlag,
+ NCFlag = NFlag | CFlag,
+ NVFlag = NFlag | VFlag,
+ ZCFlag = ZFlag | CFlag,
+ ZVFlag = ZFlag | VFlag,
+ CVFlag = CFlag | VFlag,
+ NZCFlag = NFlag | ZFlag | CFlag,
+ NZVFlag = NFlag | ZFlag | VFlag,
+ NCVFlag = NFlag | CFlag | VFlag,
+ ZCVFlag = ZFlag | CFlag | VFlag,
+ NZCVFlag = NFlag | ZFlag | CFlag | VFlag,
+
+ // Floating-point comparison results.
+ FPEqualFlag = ZCFlag,
+ FPLessThanFlag = NFlag,
+ FPGreaterThanFlag = CFlag,
+ FPUnorderedFlag = CVFlag
+};
+
+enum Shift {
+ NO_SHIFT = -1,
+ LSL = 0x0,
+ LSR = 0x1,
+ ASR = 0x2,
+ ROR = 0x3,
+ MSL = 0x4
+};
+
+enum Extend {
+ NO_EXTEND = -1,
+ UXTB = 0,
+ UXTH = 1,
+ UXTW = 2,
+ UXTX = 3,
+ SXTB = 4,
+ SXTH = 5,
+ SXTW = 6,
+ SXTX = 7
+};
+
+enum SystemHint {
+ NOP = 0,
+ YIELD = 1,
+ WFE = 2,
+ WFI = 3,
+ SEV = 4,
+ SEVL = 5
+};
+
+enum BarrierDomain {
+ OuterShareable = 0,
+ NonShareable = 1,
+ InnerShareable = 2,
+ FullSystem = 3
+};
+
+enum BarrierType {
+ BarrierOther = 0,
+ BarrierReads = 1,
+ BarrierWrites = 2,
+ BarrierAll = 3
+};
+
+enum PrefetchOperation {
+ PLDL1KEEP = 0x00,
+ PLDL1STRM = 0x01,
+ PLDL2KEEP = 0x02,
+ PLDL2STRM = 0x03,
+ PLDL3KEEP = 0x04,
+ PLDL3STRM = 0x05,
+
+ PLIL1KEEP = 0x08,
+ PLIL1STRM = 0x09,
+ PLIL2KEEP = 0x0a,
+ PLIL2STRM = 0x0b,
+ PLIL3KEEP = 0x0c,
+ PLIL3STRM = 0x0d,
+
+ PSTL1KEEP = 0x10,
+ PSTL1STRM = 0x11,
+ PSTL2KEEP = 0x12,
+ PSTL2STRM = 0x13,
+ PSTL3KEEP = 0x14,
+ PSTL3STRM = 0x15
+};
+
+// System/special register names.
+// This information is not encoded as one field but as the concatenation of
+// multiple fields (Op0<0>, Op1, Crn, Crm, Op2).
+enum SystemRegister {
+ NZCV = ((0x1 << SysO0_offset) |
+ (0x3 << SysOp1_offset) |
+ (0x4 << CRn_offset) |
+ (0x2 << CRm_offset) |
+ (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset,
+ FPCR = ((0x1 << SysO0_offset) |
+ (0x3 << SysOp1_offset) |
+ (0x4 << CRn_offset) |
+ (0x4 << CRm_offset) |
+ (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset
+};
+
+enum InstructionCacheOp {
+ IVAU = ((0x3 << SysOp1_offset) |
+ (0x7 << CRn_offset) |
+ (0x5 << CRm_offset) |
+ (0x1 << SysOp2_offset)) >> SysOp_offset
+};
+
+enum DataCacheOp {
+ CVAC = ((0x3 << SysOp1_offset) |
+ (0x7 << CRn_offset) |
+ (0xa << CRm_offset) |
+ (0x1 << SysOp2_offset)) >> SysOp_offset,
+ CVAU = ((0x3 << SysOp1_offset) |
+ (0x7 << CRn_offset) |
+ (0xb << CRm_offset) |
+ (0x1 << SysOp2_offset)) >> SysOp_offset,
+ CIVAC = ((0x3 << SysOp1_offset) |
+ (0x7 << CRn_offset) |
+ (0xe << CRm_offset) |
+ (0x1 << SysOp2_offset)) >> SysOp_offset,
+ ZVA = ((0x3 << SysOp1_offset) |
+ (0x7 << CRn_offset) |
+ (0x4 << CRm_offset) |
+ (0x1 << SysOp2_offset)) >> SysOp_offset
+};
+
+// Instruction enumerations.
+//
+// These are the masks that define a class of instructions, and the list of
+// instructions within each class. Each enumeration has a Fixed, FMask and
+// Mask value.
+//
+// Fixed: The fixed bits in this instruction class.
+// FMask: The mask used to extract the fixed bits in the class.
+// Mask: The mask used to identify the instructions within a class.
+//
+// The enumerations can be used like this:
+//
+// VIXL_ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
+// switch(instr->Mask(PCRelAddressingMask)) {
+// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break;
+// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break;
+// default: printf("Unknown instruction\n");
+// }
+
+
+// Generic fields.
+enum GenericInstrField {
+ SixtyFourBits = 0x80000000,
+ ThirtyTwoBits = 0x00000000,
+ FP32 = 0x00000000,
+ FP64 = 0x00400000
+};
+
+enum NEONFormatField {
+ NEONFormatFieldMask = 0x40C00000,
+ NEON_Q = 0x40000000,
+ NEON_8B = 0x00000000,
+ NEON_16B = NEON_8B | NEON_Q,
+ NEON_4H = 0x00400000,
+ NEON_8H = NEON_4H | NEON_Q,
+ NEON_2S = 0x00800000,
+ NEON_4S = NEON_2S | NEON_Q,
+ NEON_1D = 0x00C00000,
+ NEON_2D = 0x00C00000 | NEON_Q
+};
+
+enum NEONFPFormatField {
+ NEONFPFormatFieldMask = 0x40400000,
+ NEON_FP_2S = FP32,
+ NEON_FP_4S = FP32 | NEON_Q,
+ NEON_FP_2D = FP64 | NEON_Q
+};
+
+enum NEONLSFormatField {
+ NEONLSFormatFieldMask = 0x40000C00,
+ LS_NEON_8B = 0x00000000,
+ LS_NEON_16B = LS_NEON_8B | NEON_Q,
+ LS_NEON_4H = 0x00000400,
+ LS_NEON_8H = LS_NEON_4H | NEON_Q,
+ LS_NEON_2S = 0x00000800,
+ LS_NEON_4S = LS_NEON_2S | NEON_Q,
+ LS_NEON_1D = 0x00000C00,
+ LS_NEON_2D = LS_NEON_1D | NEON_Q
+};
+
+enum NEONScalarFormatField {
+ NEONScalarFormatFieldMask = 0x00C00000,
+ NEONScalar = 0x10000000,
+ NEON_B = 0x00000000,
+ NEON_H = 0x00400000,
+ NEON_S = 0x00800000,
+ NEON_D = 0x00C00000
+};
+
+// PC relative addressing.
+enum PCRelAddressingOp {
+ PCRelAddressingFixed = 0x10000000,
+ PCRelAddressingFMask = 0x1F000000,
+ PCRelAddressingMask = 0x9F000000,
+ ADR = PCRelAddressingFixed | 0x00000000,
+ ADRP = PCRelAddressingFixed | 0x80000000
+};
+
+// Add/sub (immediate, shifted and extended.)
+const int kSFOffset = 31;
+enum AddSubOp {
+ AddSubOpMask = 0x60000000,
+ AddSubSetFlagsBit = 0x20000000,
+ ADD = 0x00000000,
+ ADDS = ADD | AddSubSetFlagsBit,
+ SUB = 0x40000000,
+ SUBS = SUB | AddSubSetFlagsBit
+};
+
+#define ADD_SUB_OP_LIST(V) \
+ V(ADD), \
+ V(ADDS), \
+ V(SUB), \
+ V(SUBS)
+
+enum AddSubImmediateOp {
+ AddSubImmediateFixed = 0x11000000,
+ AddSubImmediateFMask = 0x1F000000,
+ AddSubImmediateMask = 0xFF000000,
+ #define ADD_SUB_IMMEDIATE(A) \
+ A##_w_imm = AddSubImmediateFixed | A, \
+ A##_x_imm = AddSubImmediateFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_IMMEDIATE)
+ #undef ADD_SUB_IMMEDIATE
+};
+
+enum AddSubShiftedOp {
+ AddSubShiftedFixed = 0x0B000000,
+ AddSubShiftedFMask = 0x1F200000,
+ AddSubShiftedMask = 0xFF200000,
+ #define ADD_SUB_SHIFTED(A) \
+ A##_w_shift = AddSubShiftedFixed | A, \
+ A##_x_shift = AddSubShiftedFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_SHIFTED)
+ #undef ADD_SUB_SHIFTED
+};
+
+enum AddSubExtendedOp {
+ AddSubExtendedFixed = 0x0B200000,
+ AddSubExtendedFMask = 0x1F200000,
+ AddSubExtendedMask = 0xFFE00000,
+ #define ADD_SUB_EXTENDED(A) \
+ A##_w_ext = AddSubExtendedFixed | A, \
+ A##_x_ext = AddSubExtendedFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_EXTENDED)
+ #undef ADD_SUB_EXTENDED
+};
+
+// Add/sub with carry.
+enum AddSubWithCarryOp {
+ AddSubWithCarryFixed = 0x1A000000,
+ AddSubWithCarryFMask = 0x1FE00000,
+ AddSubWithCarryMask = 0xFFE0FC00,
+ ADC_w = AddSubWithCarryFixed | ADD,
+ ADC_x = AddSubWithCarryFixed | ADD | SixtyFourBits,
+ ADC = ADC_w,
+ ADCS_w = AddSubWithCarryFixed | ADDS,
+ ADCS_x = AddSubWithCarryFixed | ADDS | SixtyFourBits,
+ SBC_w = AddSubWithCarryFixed | SUB,
+ SBC_x = AddSubWithCarryFixed | SUB | SixtyFourBits,
+ SBC = SBC_w,
+ SBCS_w = AddSubWithCarryFixed | SUBS,
+ SBCS_x = AddSubWithCarryFixed | SUBS | SixtyFourBits
+};
+
+
+// Logical (immediate and shifted register).
+enum LogicalOp {
+ LogicalOpMask = 0x60200000,
+ NOT = 0x00200000,
+ AND = 0x00000000,
+ BIC = AND | NOT,
+ ORR = 0x20000000,
+ ORN = ORR | NOT,
+ EOR = 0x40000000,
+ EON = EOR | NOT,
+ ANDS = 0x60000000,
+ BICS = ANDS | NOT
+};
+
+// Logical immediate.
+enum LogicalImmediateOp {
+ LogicalImmediateFixed = 0x12000000,
+ LogicalImmediateFMask = 0x1F800000,
+ LogicalImmediateMask = 0xFF800000,
+ AND_w_imm = LogicalImmediateFixed | AND,
+ AND_x_imm = LogicalImmediateFixed | AND | SixtyFourBits,
+ ORR_w_imm = LogicalImmediateFixed | ORR,
+ ORR_x_imm = LogicalImmediateFixed | ORR | SixtyFourBits,
+ EOR_w_imm = LogicalImmediateFixed | EOR,
+ EOR_x_imm = LogicalImmediateFixed | EOR | SixtyFourBits,
+ ANDS_w_imm = LogicalImmediateFixed | ANDS,
+ ANDS_x_imm = LogicalImmediateFixed | ANDS | SixtyFourBits
+};
+
+// Logical shifted register.
+enum LogicalShiftedOp {
+ LogicalShiftedFixed = 0x0A000000,
+ LogicalShiftedFMask = 0x1F000000,
+ LogicalShiftedMask = 0xFF200000,
+ AND_w = LogicalShiftedFixed | AND,
+ AND_x = LogicalShiftedFixed | AND | SixtyFourBits,
+ AND_shift = AND_w,
+ BIC_w = LogicalShiftedFixed | BIC,
+ BIC_x = LogicalShiftedFixed | BIC | SixtyFourBits,
+ BIC_shift = BIC_w,
+ ORR_w = LogicalShiftedFixed | ORR,
+ ORR_x = LogicalShiftedFixed | ORR | SixtyFourBits,
+ ORR_shift = ORR_w,
+ ORN_w = LogicalShiftedFixed | ORN,
+ ORN_x = LogicalShiftedFixed | ORN | SixtyFourBits,
+ ORN_shift = ORN_w,
+ EOR_w = LogicalShiftedFixed | EOR,
+ EOR_x = LogicalShiftedFixed | EOR | SixtyFourBits,
+ EOR_shift = EOR_w,
+ EON_w = LogicalShiftedFixed | EON,
+ EON_x = LogicalShiftedFixed | EON | SixtyFourBits,
+ EON_shift = EON_w,
+ ANDS_w = LogicalShiftedFixed | ANDS,
+ ANDS_x = LogicalShiftedFixed | ANDS | SixtyFourBits,
+ ANDS_shift = ANDS_w,
+ BICS_w = LogicalShiftedFixed | BICS,
+ BICS_x = LogicalShiftedFixed | BICS | SixtyFourBits,
+ BICS_shift = BICS_w
+};
+
+// Move wide immediate.
+enum MoveWideImmediateOp {
+ MoveWideImmediateFixed = 0x12800000,
+ MoveWideImmediateFMask = 0x1F800000,
+ MoveWideImmediateMask = 0xFF800000,
+ MOVN = 0x00000000,
+ MOVZ = 0x40000000,
+ MOVK = 0x60000000,
+ MOVN_w = MoveWideImmediateFixed | MOVN,
+ MOVN_x = MoveWideImmediateFixed | MOVN | SixtyFourBits,
+ MOVZ_w = MoveWideImmediateFixed | MOVZ,
+ MOVZ_x = MoveWideImmediateFixed | MOVZ | SixtyFourBits,
+ MOVK_w = MoveWideImmediateFixed | MOVK,
+ MOVK_x = MoveWideImmediateFixed | MOVK | SixtyFourBits
+};
+
+// Bitfield.
+const int kBitfieldNOffset = 22;
+enum BitfieldOp {
+ BitfieldFixed = 0x13000000,
+ BitfieldFMask = 0x1F800000,
+ BitfieldMask = 0xFF800000,
+ SBFM_w = BitfieldFixed | 0x00000000,
+ SBFM_x = BitfieldFixed | 0x80000000,
+ SBFM = SBFM_w,
+ BFM_w = BitfieldFixed | 0x20000000,
+ BFM_x = BitfieldFixed | 0xA0000000,
+ BFM = BFM_w,
+ UBFM_w = BitfieldFixed | 0x40000000,
+ UBFM_x = BitfieldFixed | 0xC0000000,
+ UBFM = UBFM_w
+ // Bitfield N field.
+};
+
+// Extract.
+enum ExtractOp {
+ ExtractFixed = 0x13800000,
+ ExtractFMask = 0x1F800000,
+ ExtractMask = 0xFFA00000,
+ EXTR_w = ExtractFixed | 0x00000000,
+ EXTR_x = ExtractFixed | 0x80000000,
+ EXTR = EXTR_w
+};
+
+// Unconditional branch.
+enum UnconditionalBranchOp {
+ UnconditionalBranchFixed = 0x14000000,
+ UnconditionalBranchFMask = 0x7C000000,
+ UnconditionalBranchMask = 0xFC000000,
+ B = UnconditionalBranchFixed | 0x00000000,
+ BL = UnconditionalBranchFixed | 0x80000000
+};
+
+// Unconditional branch to register.
+enum UnconditionalBranchToRegisterOp {
+ UnconditionalBranchToRegisterFixed = 0xD6000000,
+ UnconditionalBranchToRegisterFMask = 0xFE000000,
+ UnconditionalBranchToRegisterMask = 0xFFFFFC1F,
+ BR = UnconditionalBranchToRegisterFixed | 0x001F0000,
+ BLR = UnconditionalBranchToRegisterFixed | 0x003F0000,
+ RET = UnconditionalBranchToRegisterFixed | 0x005F0000
+};
+
+// Compare and branch.
+enum CompareBranchOp {
+ CompareBranchFixed = 0x34000000,
+ CompareBranchFMask = 0x7E000000,
+ CompareBranchMask = 0xFF000000,
+ CBZ_w = CompareBranchFixed | 0x00000000,
+ CBZ_x = CompareBranchFixed | 0x80000000,
+ CBZ = CBZ_w,
+ CBNZ_w = CompareBranchFixed | 0x01000000,
+ CBNZ_x = CompareBranchFixed | 0x81000000,
+ CBNZ = CBNZ_w
+};
+
+// Test and branch.
+enum TestBranchOp {
+ TestBranchFixed = 0x36000000,
+ TestBranchFMask = 0x7E000000,
+ TestBranchMask = 0x7F000000,
+ TBZ = TestBranchFixed | 0x00000000,
+ TBNZ = TestBranchFixed | 0x01000000
+};
+
+// Conditional branch.
+enum ConditionalBranchOp {
+ ConditionalBranchFixed = 0x54000000,
+ ConditionalBranchFMask = 0xFE000000,
+ ConditionalBranchMask = 0xFF000010,
+ B_cond = ConditionalBranchFixed | 0x00000000
+};
+
+// System.
+// System instruction encoding is complicated because some instructions use op
+// and CR fields to encode parameters. To handle this cleanly, the system
+// instructions are split into more than one enum.
+
+enum SystemOp {
+ SystemFixed = 0xD5000000,
+ SystemFMask = 0xFFC00000
+};
+
+enum SystemSysRegOp {
+ SystemSysRegFixed = 0xD5100000,
+ SystemSysRegFMask = 0xFFD00000,
+ SystemSysRegMask = 0xFFF00000,
+ MRS = SystemSysRegFixed | 0x00200000,
+ MSR = SystemSysRegFixed | 0x00000000
+};
+
+enum SystemHintOp {
+ SystemHintFixed = 0xD503201F,
+ SystemHintFMask = 0xFFFFF01F,
+ SystemHintMask = 0xFFFFF01F,
+ HINT = SystemHintFixed | 0x00000000
+};
+
+enum SystemSysOp {
+ SystemSysFixed = 0xD5080000,
+ SystemSysFMask = 0xFFF80000,
+ SystemSysMask = 0xFFF80000,
+ SYS = SystemSysFixed | 0x00000000
+};
+
+// Exception.
+enum ExceptionOp {
+ ExceptionFixed = 0xD4000000,
+ ExceptionFMask = 0xFF000000,
+ ExceptionMask = 0xFFE0001F,
+ HLT = ExceptionFixed | 0x00400000,
+ BRK = ExceptionFixed | 0x00200000,
+ SVC = ExceptionFixed | 0x00000001,
+ HVC = ExceptionFixed | 0x00000002,
+ SMC = ExceptionFixed | 0x00000003,
+ DCPS1 = ExceptionFixed | 0x00A00001,
+ DCPS2 = ExceptionFixed | 0x00A00002,
+ DCPS3 = ExceptionFixed | 0x00A00003
+};
+
+enum MemBarrierOp {
+ MemBarrierFixed = 0xD503309F,
+ MemBarrierFMask = 0xFFFFF09F,
+ MemBarrierMask = 0xFFFFF0FF,
+ DSB = MemBarrierFixed | 0x00000000,
+ DMB = MemBarrierFixed | 0x00000020,
+ ISB = MemBarrierFixed | 0x00000040
+};
+
+enum SystemExclusiveMonitorOp {
+ SystemExclusiveMonitorFixed = 0xD503305F,
+ SystemExclusiveMonitorFMask = 0xFFFFF0FF,
+ SystemExclusiveMonitorMask = 0xFFFFF0FF,
+ CLREX = SystemExclusiveMonitorFixed
+};
+
+// Any load or store.
+enum LoadStoreAnyOp {
+ LoadStoreAnyFMask = 0x0a000000,
+ LoadStoreAnyFixed = 0x08000000
+};
+
+// Any load pair or store pair.
+enum LoadStorePairAnyOp {
+ LoadStorePairAnyFMask = 0x3a000000,
+ LoadStorePairAnyFixed = 0x28000000
+};
+
+#define LOAD_STORE_PAIR_OP_LIST(V) \
+ V(STP, w, 0x00000000), \
+ V(LDP, w, 0x00400000), \
+ V(LDPSW, x, 0x40400000), \
+ V(STP, x, 0x80000000), \
+ V(LDP, x, 0x80400000), \
+ V(STP, s, 0x04000000), \
+ V(LDP, s, 0x04400000), \
+ V(STP, d, 0x44000000), \
+ V(LDP, d, 0x44400000), \
+ V(STP, q, 0x84000000), \
+ V(LDP, q, 0x84400000)
+
+// Load/store pair (post, pre and offset.)
+enum LoadStorePairOp {
+ LoadStorePairMask = 0xC4400000,
+ LoadStorePairLBit = 1 << 22,
+ #define LOAD_STORE_PAIR(A, B, C) \
+ A##_##B = C
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR)
+ #undef LOAD_STORE_PAIR
+};
+
+enum LoadStorePairPostIndexOp {
+ LoadStorePairPostIndexFixed = 0x28800000,
+ LoadStorePairPostIndexFMask = 0x3B800000,
+ LoadStorePairPostIndexMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_POST_INDEX(A, B, C) \
+ A##_##B##_post = LoadStorePairPostIndexFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_POST_INDEX)
+ #undef LOAD_STORE_PAIR_POST_INDEX
+};
+
+enum LoadStorePairPreIndexOp {
+ LoadStorePairPreIndexFixed = 0x29800000,
+ LoadStorePairPreIndexFMask = 0x3B800000,
+ LoadStorePairPreIndexMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_PRE_INDEX(A, B, C) \
+ A##_##B##_pre = LoadStorePairPreIndexFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_PRE_INDEX)
+ #undef LOAD_STORE_PAIR_PRE_INDEX
+};
+
+enum LoadStorePairOffsetOp {
+ LoadStorePairOffsetFixed = 0x29000000,
+ LoadStorePairOffsetFMask = 0x3B800000,
+ LoadStorePairOffsetMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_OFFSET(A, B, C) \
+ A##_##B##_off = LoadStorePairOffsetFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_OFFSET)
+ #undef LOAD_STORE_PAIR_OFFSET
+};
+
+enum LoadStorePairNonTemporalOp {
+ LoadStorePairNonTemporalFixed = 0x28000000,
+ LoadStorePairNonTemporalFMask = 0x3B800000,
+ LoadStorePairNonTemporalMask = 0xFFC00000,
+ LoadStorePairNonTemporalLBit = 1 << 22,
+ STNP_w = LoadStorePairNonTemporalFixed | STP_w,
+ LDNP_w = LoadStorePairNonTemporalFixed | LDP_w,
+ STNP_x = LoadStorePairNonTemporalFixed | STP_x,
+ LDNP_x = LoadStorePairNonTemporalFixed | LDP_x,
+ STNP_s = LoadStorePairNonTemporalFixed | STP_s,
+ LDNP_s = LoadStorePairNonTemporalFixed | LDP_s,
+ STNP_d = LoadStorePairNonTemporalFixed | STP_d,
+ LDNP_d = LoadStorePairNonTemporalFixed | LDP_d,
+ STNP_q = LoadStorePairNonTemporalFixed | STP_q,
+ LDNP_q = LoadStorePairNonTemporalFixed | LDP_q
+};
+
+// Load literal.
+enum LoadLiteralOp {
+ LoadLiteralFixed = 0x18000000,
+ LoadLiteralFMask = 0x3B000000,
+ LoadLiteralMask = 0xFF000000,
+ LDR_w_lit = LoadLiteralFixed | 0x00000000,
+ LDR_x_lit = LoadLiteralFixed | 0x40000000,
+ LDRSW_x_lit = LoadLiteralFixed | 0x80000000,
+ PRFM_lit = LoadLiteralFixed | 0xC0000000,
+ LDR_s_lit = LoadLiteralFixed | 0x04000000,
+ LDR_d_lit = LoadLiteralFixed | 0x44000000,
+ LDR_q_lit = LoadLiteralFixed | 0x84000000
+};
+
+#define LOAD_STORE_OP_LIST(V) \
+ V(ST, RB, w, 0x00000000), \
+ V(ST, RH, w, 0x40000000), \
+ V(ST, R, w, 0x80000000), \
+ V(ST, R, x, 0xC0000000), \
+ V(LD, RB, w, 0x00400000), \
+ V(LD, RH, w, 0x40400000), \
+ V(LD, R, w, 0x80400000), \
+ V(LD, R, x, 0xC0400000), \
+ V(LD, RSB, x, 0x00800000), \
+ V(LD, RSH, x, 0x40800000), \
+ V(LD, RSW, x, 0x80800000), \
+ V(LD, RSB, w, 0x00C00000), \
+ V(LD, RSH, w, 0x40C00000), \
+ V(ST, R, b, 0x04000000), \
+ V(ST, R, h, 0x44000000), \
+ V(ST, R, s, 0x84000000), \
+ V(ST, R, d, 0xC4000000), \
+ V(ST, R, q, 0x04800000), \
+ V(LD, R, b, 0x04400000), \
+ V(LD, R, h, 0x44400000), \
+ V(LD, R, s, 0x84400000), \
+ V(LD, R, d, 0xC4400000), \
+ V(LD, R, q, 0x04C00000)
+
+// Load/store (post, pre, offset and unsigned.)
+enum LoadStoreOp {
+ LoadStoreMask = 0xC4C00000,
+ LoadStoreVMask = 0x04000000,
+ #define LOAD_STORE(A, B, C, D) \
+ A##B##_##C = D
+ LOAD_STORE_OP_LIST(LOAD_STORE),
+ #undef LOAD_STORE
+ PRFM = 0xC0800000
+};
+
+// Load/store unscaled offset.
+enum LoadStoreUnscaledOffsetOp {
+ LoadStoreUnscaledOffsetFixed = 0x38000000,
+ LoadStoreUnscaledOffsetFMask = 0x3B200C00,
+ LoadStoreUnscaledOffsetMask = 0xFFE00C00,
+ PRFUM = LoadStoreUnscaledOffsetFixed | PRFM,
+ #define LOAD_STORE_UNSCALED(A, B, C, D) \
+ A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED)
+ #undef LOAD_STORE_UNSCALED
+};
+
+// Load/store post index.
+enum LoadStorePostIndex {
+ LoadStorePostIndexFixed = 0x38000400,
+ LoadStorePostIndexFMask = 0x3B200C00,
+ LoadStorePostIndexMask = 0xFFE00C00,
+ #define LOAD_STORE_POST_INDEX(A, B, C, D) \
+ A##B##_##C##_post = LoadStorePostIndexFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_POST_INDEX)
+ #undef LOAD_STORE_POST_INDEX
+};
+
+// Load/store pre index.
+enum LoadStorePreIndex {
+ LoadStorePreIndexFixed = 0x38000C00,
+ LoadStorePreIndexFMask = 0x3B200C00,
+ LoadStorePreIndexMask = 0xFFE00C00,
+ #define LOAD_STORE_PRE_INDEX(A, B, C, D) \
+ A##B##_##C##_pre = LoadStorePreIndexFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_PRE_INDEX)
+ #undef LOAD_STORE_PRE_INDEX
+};
+
+// Load/store unsigned offset.
+enum LoadStoreUnsignedOffset {
+ LoadStoreUnsignedOffsetFixed = 0x39000000,
+ LoadStoreUnsignedOffsetFMask = 0x3B000000,
+ LoadStoreUnsignedOffsetMask = 0xFFC00000,
+ PRFM_unsigned = LoadStoreUnsignedOffsetFixed | PRFM,
+ #define LOAD_STORE_UNSIGNED_OFFSET(A, B, C, D) \
+ A##B##_##C##_unsigned = LoadStoreUnsignedOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_UNSIGNED_OFFSET)
+ #undef LOAD_STORE_UNSIGNED_OFFSET
+};
+
+// Load/store register offset.
+enum LoadStoreRegisterOffset {
+ LoadStoreRegisterOffsetFixed = 0x38200800,
+ LoadStoreRegisterOffsetFMask = 0x3B200C00,
+ LoadStoreRegisterOffsetMask = 0xFFE00C00,
+ PRFM_reg = LoadStoreRegisterOffsetFixed | PRFM,
+ #define LOAD_STORE_REGISTER_OFFSET(A, B, C, D) \
+ A##B##_##C##_reg = LoadStoreRegisterOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_REGISTER_OFFSET)
+ #undef LOAD_STORE_REGISTER_OFFSET
+};
+
+enum LoadStoreExclusive {
+ LoadStoreExclusiveFixed = 0x08000000,
+ LoadStoreExclusiveFMask = 0x3F000000,
+ LoadStoreExclusiveMask = 0xFFE08000,
+ STXRB_w = LoadStoreExclusiveFixed | 0x00000000,
+ STXRH_w = LoadStoreExclusiveFixed | 0x40000000,
+ STXR_w = LoadStoreExclusiveFixed | 0x80000000,
+ STXR_x = LoadStoreExclusiveFixed | 0xC0000000,
+ LDXRB_w = LoadStoreExclusiveFixed | 0x00400000,
+ LDXRH_w = LoadStoreExclusiveFixed | 0x40400000,
+ LDXR_w = LoadStoreExclusiveFixed | 0x80400000,
+ LDXR_x = LoadStoreExclusiveFixed | 0xC0400000,
+ STXP_w = LoadStoreExclusiveFixed | 0x80200000,
+ STXP_x = LoadStoreExclusiveFixed | 0xC0200000,
+ LDXP_w = LoadStoreExclusiveFixed | 0x80600000,
+ LDXP_x = LoadStoreExclusiveFixed | 0xC0600000,
+ STLXRB_w = LoadStoreExclusiveFixed | 0x00008000,
+ STLXRH_w = LoadStoreExclusiveFixed | 0x40008000,
+ STLXR_w = LoadStoreExclusiveFixed | 0x80008000,
+ STLXR_x = LoadStoreExclusiveFixed | 0xC0008000,
+ LDAXRB_w = LoadStoreExclusiveFixed | 0x00408000,
+ LDAXRH_w = LoadStoreExclusiveFixed | 0x40408000,
+ LDAXR_w = LoadStoreExclusiveFixed | 0x80408000,
+ LDAXR_x = LoadStoreExclusiveFixed | 0xC0408000,
+ STLXP_w = LoadStoreExclusiveFixed | 0x80208000,
+ STLXP_x = LoadStoreExclusiveFixed | 0xC0208000,
+ LDAXP_w = LoadStoreExclusiveFixed | 0x80608000,
+ LDAXP_x = LoadStoreExclusiveFixed | 0xC0608000,
+ STLRB_w = LoadStoreExclusiveFixed | 0x00808000,
+ STLRH_w = LoadStoreExclusiveFixed | 0x40808000,
+ STLR_w = LoadStoreExclusiveFixed | 0x80808000,
+ STLR_x = LoadStoreExclusiveFixed | 0xC0808000,
+ LDARB_w = LoadStoreExclusiveFixed | 0x00C08000,
+ LDARH_w = LoadStoreExclusiveFixed | 0x40C08000,
+ LDAR_w = LoadStoreExclusiveFixed | 0x80C08000,
+ LDAR_x = LoadStoreExclusiveFixed | 0xC0C08000
+};
+
+// Conditional compare.
+enum ConditionalCompareOp {
+ ConditionalCompareMask = 0x60000000,
+ CCMN = 0x20000000,
+ CCMP = 0x60000000
+};
+
+// Conditional compare register.
+enum ConditionalCompareRegisterOp {
+ ConditionalCompareRegisterFixed = 0x1A400000,
+ ConditionalCompareRegisterFMask = 0x1FE00800,
+ ConditionalCompareRegisterMask = 0xFFE00C10,
+ CCMN_w = ConditionalCompareRegisterFixed | CCMN,
+ CCMN_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMN,
+ CCMP_w = ConditionalCompareRegisterFixed | CCMP,
+ CCMP_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMP
+};
+
+// Conditional compare immediate.
+enum ConditionalCompareImmediateOp {
+ ConditionalCompareImmediateFixed = 0x1A400800,
+ ConditionalCompareImmediateFMask = 0x1FE00800,
+ ConditionalCompareImmediateMask = 0xFFE00C10,
+ CCMN_w_imm = ConditionalCompareImmediateFixed | CCMN,
+ CCMN_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMN,
+ CCMP_w_imm = ConditionalCompareImmediateFixed | CCMP,
+ CCMP_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMP
+};
+
+// Conditional select.
+enum ConditionalSelectOp {
+ ConditionalSelectFixed = 0x1A800000,
+ ConditionalSelectFMask = 0x1FE00000,
+ ConditionalSelectMask = 0xFFE00C00,
+ CSEL_w = ConditionalSelectFixed | 0x00000000,
+ CSEL_x = ConditionalSelectFixed | 0x80000000,
+ CSEL = CSEL_w,
+ CSINC_w = ConditionalSelectFixed | 0x00000400,
+ CSINC_x = ConditionalSelectFixed | 0x80000400,
+ CSINC = CSINC_w,
+ CSINV_w = ConditionalSelectFixed | 0x40000000,
+ CSINV_x = ConditionalSelectFixed | 0xC0000000,
+ CSINV = CSINV_w,
+ CSNEG_w = ConditionalSelectFixed | 0x40000400,
+ CSNEG_x = ConditionalSelectFixed | 0xC0000400,
+ CSNEG = CSNEG_w
+};
+
+// Data processing 1 source.
+enum DataProcessing1SourceOp {
+ DataProcessing1SourceFixed = 0x5AC00000,
+ DataProcessing1SourceFMask = 0x5FE00000,
+ DataProcessing1SourceMask = 0xFFFFFC00,
+ RBIT = DataProcessing1SourceFixed | 0x00000000,
+ RBIT_w = RBIT,
+ RBIT_x = RBIT | SixtyFourBits,
+ REV16 = DataProcessing1SourceFixed | 0x00000400,
+ REV16_w = REV16,
+ REV16_x = REV16 | SixtyFourBits,
+ REV = DataProcessing1SourceFixed | 0x00000800,
+ REV_w = REV,
+ REV32_x = REV | SixtyFourBits,
+ REV_x = DataProcessing1SourceFixed | SixtyFourBits | 0x00000C00,
+ CLZ = DataProcessing1SourceFixed | 0x00001000,
+ CLZ_w = CLZ,
+ CLZ_x = CLZ | SixtyFourBits,
+ CLS = DataProcessing1SourceFixed | 0x00001400,
+ CLS_w = CLS,
+ CLS_x = CLS | SixtyFourBits
+};
+
+// Data processing 2 source.
+enum DataProcessing2SourceOp {
+ DataProcessing2SourceFixed = 0x1AC00000,
+ DataProcessing2SourceFMask = 0x5FE00000,
+ DataProcessing2SourceMask = 0xFFE0FC00,
+ UDIV_w = DataProcessing2SourceFixed | 0x00000800,
+ UDIV_x = DataProcessing2SourceFixed | 0x80000800,
+ UDIV = UDIV_w,
+ SDIV_w = DataProcessing2SourceFixed | 0x00000C00,
+ SDIV_x = DataProcessing2SourceFixed | 0x80000C00,
+ SDIV = SDIV_w,
+ LSLV_w = DataProcessing2SourceFixed | 0x00002000,
+ LSLV_x = DataProcessing2SourceFixed | 0x80002000,
+ LSLV = LSLV_w,
+ LSRV_w = DataProcessing2SourceFixed | 0x00002400,
+ LSRV_x = DataProcessing2SourceFixed | 0x80002400,
+ LSRV = LSRV_w,
+ ASRV_w = DataProcessing2SourceFixed | 0x00002800,
+ ASRV_x = DataProcessing2SourceFixed | 0x80002800,
+ ASRV = ASRV_w,
+ RORV_w = DataProcessing2SourceFixed | 0x00002C00,
+ RORV_x = DataProcessing2SourceFixed | 0x80002C00,
+ RORV = RORV_w,
+ CRC32B = DataProcessing2SourceFixed | 0x00004000,
+ CRC32H = DataProcessing2SourceFixed | 0x00004400,
+ CRC32W = DataProcessing2SourceFixed | 0x00004800,
+ CRC32X = DataProcessing2SourceFixed | SixtyFourBits | 0x00004C00,
+ CRC32CB = DataProcessing2SourceFixed | 0x00005000,
+ CRC32CH = DataProcessing2SourceFixed | 0x00005400,
+ CRC32CW = DataProcessing2SourceFixed | 0x00005800,
+ CRC32CX = DataProcessing2SourceFixed | SixtyFourBits | 0x00005C00
+};
+
+// Data processing 3 source.
+enum DataProcessing3SourceOp {
+ DataProcessing3SourceFixed = 0x1B000000,
+ DataProcessing3SourceFMask = 0x1F000000,
+ DataProcessing3SourceMask = 0xFFE08000,
+ MADD_w = DataProcessing3SourceFixed | 0x00000000,
+ MADD_x = DataProcessing3SourceFixed | 0x80000000,
+ MADD = MADD_w,
+ MSUB_w = DataProcessing3SourceFixed | 0x00008000,
+ MSUB_x = DataProcessing3SourceFixed | 0x80008000,
+ MSUB = MSUB_w,
+ SMADDL_x = DataProcessing3SourceFixed | 0x80200000,
+ SMSUBL_x = DataProcessing3SourceFixed | 0x80208000,
+ SMULH_x = DataProcessing3SourceFixed | 0x80400000,
+ UMADDL_x = DataProcessing3SourceFixed | 0x80A00000,
+ UMSUBL_x = DataProcessing3SourceFixed | 0x80A08000,
+ UMULH_x = DataProcessing3SourceFixed | 0x80C00000
+};
+
+// Floating point compare.
+enum FPCompareOp {
+ FPCompareFixed = 0x1E202000,
+ FPCompareFMask = 0x5F203C00,
+ FPCompareMask = 0xFFE0FC1F,
+ FCMP_s = FPCompareFixed | 0x00000000,
+ FCMP_d = FPCompareFixed | FP64 | 0x00000000,
+ FCMP = FCMP_s,
+ FCMP_s_zero = FPCompareFixed | 0x00000008,
+ FCMP_d_zero = FPCompareFixed | FP64 | 0x00000008,
+ FCMP_zero = FCMP_s_zero,
+ FCMPE_s = FPCompareFixed | 0x00000010,
+ FCMPE_d = FPCompareFixed | FP64 | 0x00000010,
+ FCMPE = FCMPE_s,
+ FCMPE_s_zero = FPCompareFixed | 0x00000018,
+ FCMPE_d_zero = FPCompareFixed | FP64 | 0x00000018,
+ FCMPE_zero = FCMPE_s_zero
+};
+
+// Floating point conditional compare.
+enum FPConditionalCompareOp {
+ FPConditionalCompareFixed = 0x1E200400,
+ FPConditionalCompareFMask = 0x5F200C00,
+ FPConditionalCompareMask = 0xFFE00C10,
+ FCCMP_s = FPConditionalCompareFixed | 0x00000000,
+ FCCMP_d = FPConditionalCompareFixed | FP64 | 0x00000000,
+ FCCMP = FCCMP_s,
+ FCCMPE_s = FPConditionalCompareFixed | 0x00000010,
+ FCCMPE_d = FPConditionalCompareFixed | FP64 | 0x00000010,
+ FCCMPE = FCCMPE_s
+};
+
+// Floating point conditional select.
+enum FPConditionalSelectOp {
+ FPConditionalSelectFixed = 0x1E200C00,
+ FPConditionalSelectFMask = 0x5F200C00,
+ FPConditionalSelectMask = 0xFFE00C00,
+ FCSEL_s = FPConditionalSelectFixed | 0x00000000,
+ FCSEL_d = FPConditionalSelectFixed | FP64 | 0x00000000,
+ FCSEL = FCSEL_s
+};
+
+// Floating point immediate.
+enum FPImmediateOp {
+ FPImmediateFixed = 0x1E201000,
+ FPImmediateFMask = 0x5F201C00,
+ FPImmediateMask = 0xFFE01C00,
+ FMOV_s_imm = FPImmediateFixed | 0x00000000,
+ FMOV_d_imm = FPImmediateFixed | FP64 | 0x00000000
+};
+
+// Floating point data processing 1 source.
+enum FPDataProcessing1SourceOp {
+ FPDataProcessing1SourceFixed = 0x1E204000,
+ FPDataProcessing1SourceFMask = 0x5F207C00,
+ FPDataProcessing1SourceMask = 0xFFFFFC00,
+ FMOV_s = FPDataProcessing1SourceFixed | 0x00000000,
+ FMOV_d = FPDataProcessing1SourceFixed | FP64 | 0x00000000,
+ FMOV = FMOV_s,
+ FABS_s = FPDataProcessing1SourceFixed | 0x00008000,
+ FABS_d = FPDataProcessing1SourceFixed | FP64 | 0x00008000,
+ FABS = FABS_s,
+ FNEG_s = FPDataProcessing1SourceFixed | 0x00010000,
+ FNEG_d = FPDataProcessing1SourceFixed | FP64 | 0x00010000,
+ FNEG = FNEG_s,
+ FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000,
+ FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000,
+ FSQRT = FSQRT_s,
+ FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000,
+ FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000,
+ FCVT_hs = FPDataProcessing1SourceFixed | 0x00038000,
+ FCVT_hd = FPDataProcessing1SourceFixed | FP64 | 0x00038000,
+ FCVT_sh = FPDataProcessing1SourceFixed | 0x00C20000,
+ FCVT_dh = FPDataProcessing1SourceFixed | 0x00C28000,
+ FRINTN_s = FPDataProcessing1SourceFixed | 0x00040000,
+ FRINTN_d = FPDataProcessing1SourceFixed | FP64 | 0x00040000,
+ FRINTN = FRINTN_s,
+ FRINTP_s = FPDataProcessing1SourceFixed | 0x00048000,
+ FRINTP_d = FPDataProcessing1SourceFixed | FP64 | 0x00048000,
+ FRINTP = FRINTP_s,
+ FRINTM_s = FPDataProcessing1SourceFixed | 0x00050000,
+ FRINTM_d = FPDataProcessing1SourceFixed | FP64 | 0x00050000,
+ FRINTM = FRINTM_s,
+ FRINTZ_s = FPDataProcessing1SourceFixed | 0x00058000,
+ FRINTZ_d = FPDataProcessing1SourceFixed | FP64 | 0x00058000,
+ FRINTZ = FRINTZ_s,
+ FRINTA_s = FPDataProcessing1SourceFixed | 0x00060000,
+ FRINTA_d = FPDataProcessing1SourceFixed | FP64 | 0x00060000,
+ FRINTA = FRINTA_s,
+ FRINTX_s = FPDataProcessing1SourceFixed | 0x00070000,
+ FRINTX_d = FPDataProcessing1SourceFixed | FP64 | 0x00070000,
+ FRINTX = FRINTX_s,
+ FRINTI_s = FPDataProcessing1SourceFixed | 0x00078000,
+ FRINTI_d = FPDataProcessing1SourceFixed | FP64 | 0x00078000,
+ FRINTI = FRINTI_s
+};
+
+// Floating point data processing 2 source.
+enum FPDataProcessing2SourceOp {
+ FPDataProcessing2SourceFixed = 0x1E200800,
+ FPDataProcessing2SourceFMask = 0x5F200C00,
+ FPDataProcessing2SourceMask = 0xFFE0FC00,
+ FMUL = FPDataProcessing2SourceFixed | 0x00000000,
+ FMUL_s = FMUL,
+ FMUL_d = FMUL | FP64,
+ FDIV = FPDataProcessing2SourceFixed | 0x00001000,
+ FDIV_s = FDIV,
+ FDIV_d = FDIV | FP64,
+ FADD = FPDataProcessing2SourceFixed | 0x00002000,
+ FADD_s = FADD,
+ FADD_d = FADD | FP64,
+ FSUB = FPDataProcessing2SourceFixed | 0x00003000,
+ FSUB_s = FSUB,
+ FSUB_d = FSUB | FP64,
+ FMAX = FPDataProcessing2SourceFixed | 0x00004000,
+ FMAX_s = FMAX,
+ FMAX_d = FMAX | FP64,
+ FMIN = FPDataProcessing2SourceFixed | 0x00005000,
+ FMIN_s = FMIN,
+ FMIN_d = FMIN | FP64,
+ FMAXNM = FPDataProcessing2SourceFixed | 0x00006000,
+ FMAXNM_s = FMAXNM,
+ FMAXNM_d = FMAXNM | FP64,
+ FMINNM = FPDataProcessing2SourceFixed | 0x00007000,
+ FMINNM_s = FMINNM,
+ FMINNM_d = FMINNM | FP64,
+ FNMUL = FPDataProcessing2SourceFixed | 0x00008000,
+ FNMUL_s = FNMUL,
+ FNMUL_d = FNMUL | FP64
+};
+
+// Floating point data processing 3 source.
+enum FPDataProcessing3SourceOp {
+ FPDataProcessing3SourceFixed = 0x1F000000,
+ FPDataProcessing3SourceFMask = 0x5F000000,
+ FPDataProcessing3SourceMask = 0xFFE08000,
+ FMADD_s = FPDataProcessing3SourceFixed | 0x00000000,
+ FMSUB_s = FPDataProcessing3SourceFixed | 0x00008000,
+ FNMADD_s = FPDataProcessing3SourceFixed | 0x00200000,
+ FNMSUB_s = FPDataProcessing3SourceFixed | 0x00208000,
+ FMADD_d = FPDataProcessing3SourceFixed | 0x00400000,
+ FMSUB_d = FPDataProcessing3SourceFixed | 0x00408000,
+ FNMADD_d = FPDataProcessing3SourceFixed | 0x00600000,
+ FNMSUB_d = FPDataProcessing3SourceFixed | 0x00608000
+};
+
+// Conversion between floating point and integer.
+enum FPIntegerConvertOp {
+ FPIntegerConvertFixed = 0x1E200000,
+ FPIntegerConvertFMask = 0x5F20FC00,
+ FPIntegerConvertMask = 0xFFFFFC00,
+ FCVTNS = FPIntegerConvertFixed | 0x00000000,
+ FCVTNS_ws = FCVTNS,
+ FCVTNS_xs = FCVTNS | SixtyFourBits,
+ FCVTNS_wd = FCVTNS | FP64,
+ FCVTNS_xd = FCVTNS | SixtyFourBits | FP64,
+ FCVTNU = FPIntegerConvertFixed | 0x00010000,
+ FCVTNU_ws = FCVTNU,
+ FCVTNU_xs = FCVTNU | SixtyFourBits,
+ FCVTNU_wd = FCVTNU | FP64,
+ FCVTNU_xd = FCVTNU | SixtyFourBits | FP64,
+ FCVTPS = FPIntegerConvertFixed | 0x00080000,
+ FCVTPS_ws = FCVTPS,
+ FCVTPS_xs = FCVTPS | SixtyFourBits,
+ FCVTPS_wd = FCVTPS | FP64,
+ FCVTPS_xd = FCVTPS | SixtyFourBits | FP64,
+ FCVTPU = FPIntegerConvertFixed | 0x00090000,
+ FCVTPU_ws = FCVTPU,
+ FCVTPU_xs = FCVTPU | SixtyFourBits,
+ FCVTPU_wd = FCVTPU | FP64,
+ FCVTPU_xd = FCVTPU | SixtyFourBits | FP64,
+ FCVTMS = FPIntegerConvertFixed | 0x00100000,
+ FCVTMS_ws = FCVTMS,
+ FCVTMS_xs = FCVTMS | SixtyFourBits,
+ FCVTMS_wd = FCVTMS | FP64,
+ FCVTMS_xd = FCVTMS | SixtyFourBits | FP64,
+ FCVTMU = FPIntegerConvertFixed | 0x00110000,
+ FCVTMU_ws = FCVTMU,
+ FCVTMU_xs = FCVTMU | SixtyFourBits,
+ FCVTMU_wd = FCVTMU | FP64,
+ FCVTMU_xd = FCVTMU | SixtyFourBits | FP64,
+ FCVTZS = FPIntegerConvertFixed | 0x00180000,
+ FCVTZS_ws = FCVTZS,
+ FCVTZS_xs = FCVTZS | SixtyFourBits,
+ FCVTZS_wd = FCVTZS | FP64,
+ FCVTZS_xd = FCVTZS | SixtyFourBits | FP64,
+ FCVTZU = FPIntegerConvertFixed | 0x00190000,
+ FCVTZU_ws = FCVTZU,
+ FCVTZU_xs = FCVTZU | SixtyFourBits,
+ FCVTZU_wd = FCVTZU | FP64,
+ FCVTZU_xd = FCVTZU | SixtyFourBits | FP64,
+ SCVTF = FPIntegerConvertFixed | 0x00020000,
+ SCVTF_sw = SCVTF,
+ SCVTF_sx = SCVTF | SixtyFourBits,
+ SCVTF_dw = SCVTF | FP64,
+ SCVTF_dx = SCVTF | SixtyFourBits | FP64,
+ UCVTF = FPIntegerConvertFixed | 0x00030000,
+ UCVTF_sw = UCVTF,
+ UCVTF_sx = UCVTF | SixtyFourBits,
+ UCVTF_dw = UCVTF | FP64,
+ UCVTF_dx = UCVTF | SixtyFourBits | FP64,
+ FCVTAS = FPIntegerConvertFixed | 0x00040000,
+ FCVTAS_ws = FCVTAS,
+ FCVTAS_xs = FCVTAS | SixtyFourBits,
+ FCVTAS_wd = FCVTAS | FP64,
+ FCVTAS_xd = FCVTAS | SixtyFourBits | FP64,
+ FCVTAU = FPIntegerConvertFixed | 0x00050000,
+ FCVTAU_ws = FCVTAU,
+ FCVTAU_xs = FCVTAU | SixtyFourBits,
+ FCVTAU_wd = FCVTAU | FP64,
+ FCVTAU_xd = FCVTAU | SixtyFourBits | FP64,
+ FMOV_ws = FPIntegerConvertFixed | 0x00060000,
+ FMOV_sw = FPIntegerConvertFixed | 0x00070000,
+ FMOV_xd = FMOV_ws | SixtyFourBits | FP64,
+ FMOV_dx = FMOV_sw | SixtyFourBits | FP64,
+ FMOV_d1_x = FPIntegerConvertFixed | SixtyFourBits | 0x008F0000,
+ FMOV_x_d1 = FPIntegerConvertFixed | SixtyFourBits | 0x008E0000
+};
+
+// Conversion between fixed point and floating point.
+enum FPFixedPointConvertOp {
+ FPFixedPointConvertFixed = 0x1E000000,
+ FPFixedPointConvertFMask = 0x5F200000,
+ FPFixedPointConvertMask = 0xFFFF0000,
+ FCVTZS_fixed = FPFixedPointConvertFixed | 0x00180000,
+ FCVTZS_ws_fixed = FCVTZS_fixed,
+ FCVTZS_xs_fixed = FCVTZS_fixed | SixtyFourBits,
+ FCVTZS_wd_fixed = FCVTZS_fixed | FP64,
+ FCVTZS_xd_fixed = FCVTZS_fixed | SixtyFourBits | FP64,
+ FCVTZU_fixed = FPFixedPointConvertFixed | 0x00190000,
+ FCVTZU_ws_fixed = FCVTZU_fixed,
+ FCVTZU_xs_fixed = FCVTZU_fixed | SixtyFourBits,
+ FCVTZU_wd_fixed = FCVTZU_fixed | FP64,
+ FCVTZU_xd_fixed = FCVTZU_fixed | SixtyFourBits | FP64,
+ SCVTF_fixed = FPFixedPointConvertFixed | 0x00020000,
+ SCVTF_sw_fixed = SCVTF_fixed,
+ SCVTF_sx_fixed = SCVTF_fixed | SixtyFourBits,
+ SCVTF_dw_fixed = SCVTF_fixed | FP64,
+ SCVTF_dx_fixed = SCVTF_fixed | SixtyFourBits | FP64,
+ UCVTF_fixed = FPFixedPointConvertFixed | 0x00030000,
+ UCVTF_sw_fixed = UCVTF_fixed,
+ UCVTF_sx_fixed = UCVTF_fixed | SixtyFourBits,
+ UCVTF_dw_fixed = UCVTF_fixed | FP64,
+ UCVTF_dx_fixed = UCVTF_fixed | SixtyFourBits | FP64
+};
+
+// Crypto - two register SHA.
+enum Crypto2RegSHAOp {
+ Crypto2RegSHAFixed = 0x5E280800,
+ Crypto2RegSHAFMask = 0xFF3E0C00
+};
+
+// Crypto - three register SHA.
+enum Crypto3RegSHAOp {
+ Crypto3RegSHAFixed = 0x5E000000,
+ Crypto3RegSHAFMask = 0xFF208C00
+};
+
+// Crypto - AES.
+enum CryptoAESOp {
+ CryptoAESFixed = 0x4E280800,
+ CryptoAESFMask = 0xFF3E0C00
+};
+
+// NEON instructions with two register operands.
+enum NEON2RegMiscOp {
+ NEON2RegMiscFixed = 0x0E200800,
+ NEON2RegMiscFMask = 0x9F3E0C00,
+ NEON2RegMiscMask = 0xBF3FFC00,
+ NEON2RegMiscUBit = 0x20000000,
+ NEON_REV64 = NEON2RegMiscFixed | 0x00000000,
+ NEON_REV32 = NEON2RegMiscFixed | 0x20000000,
+ NEON_REV16 = NEON2RegMiscFixed | 0x00001000,
+ NEON_SADDLP = NEON2RegMiscFixed | 0x00002000,
+ NEON_UADDLP = NEON_SADDLP | NEON2RegMiscUBit,
+ NEON_SUQADD = NEON2RegMiscFixed | 0x00003000,
+ NEON_USQADD = NEON_SUQADD | NEON2RegMiscUBit,
+ NEON_CLS = NEON2RegMiscFixed | 0x00004000,
+ NEON_CLZ = NEON2RegMiscFixed | 0x20004000,
+ NEON_CNT = NEON2RegMiscFixed | 0x00005000,
+ NEON_RBIT_NOT = NEON2RegMiscFixed | 0x20005000,
+ NEON_SADALP = NEON2RegMiscFixed | 0x00006000,
+ NEON_UADALP = NEON_SADALP | NEON2RegMiscUBit,
+ NEON_SQABS = NEON2RegMiscFixed | 0x00007000,
+ NEON_SQNEG = NEON2RegMiscFixed | 0x20007000,
+ NEON_CMGT_zero = NEON2RegMiscFixed | 0x00008000,
+ NEON_CMGE_zero = NEON2RegMiscFixed | 0x20008000,
+ NEON_CMEQ_zero = NEON2RegMiscFixed | 0x00009000,
+ NEON_CMLE_zero = NEON2RegMiscFixed | 0x20009000,
+ NEON_CMLT_zero = NEON2RegMiscFixed | 0x0000A000,
+ NEON_ABS = NEON2RegMiscFixed | 0x0000B000,
+ NEON_NEG = NEON2RegMiscFixed | 0x2000B000,
+ NEON_XTN = NEON2RegMiscFixed | 0x00012000,
+ NEON_SQXTUN = NEON2RegMiscFixed | 0x20012000,
+ NEON_SHLL = NEON2RegMiscFixed | 0x20013000,
+ NEON_SQXTN = NEON2RegMiscFixed | 0x00014000,
+ NEON_UQXTN = NEON_SQXTN | NEON2RegMiscUBit,
+
+ NEON2RegMiscOpcode = 0x0001F000,
+ NEON_RBIT_NOT_opcode = NEON_RBIT_NOT & NEON2RegMiscOpcode,
+ NEON_NEG_opcode = NEON_NEG & NEON2RegMiscOpcode,
+ NEON_XTN_opcode = NEON_XTN & NEON2RegMiscOpcode,
+ NEON_UQXTN_opcode = NEON_UQXTN & NEON2RegMiscOpcode,
+
+ // These instructions use only one bit of the size field. The other bit is
+ // used to distinguish between instructions.
+ NEON2RegMiscFPMask = NEON2RegMiscMask | 0x00800000,
+ NEON_FABS = NEON2RegMiscFixed | 0x0080F000,
+ NEON_FNEG = NEON2RegMiscFixed | 0x2080F000,
+ NEON_FCVTN = NEON2RegMiscFixed | 0x00016000,
+ NEON_FCVTXN = NEON2RegMiscFixed | 0x20016000,
+ NEON_FCVTL = NEON2RegMiscFixed | 0x00017000,
+ NEON_FRINTN = NEON2RegMiscFixed | 0x00018000,
+ NEON_FRINTA = NEON2RegMiscFixed | 0x20018000,
+ NEON_FRINTP = NEON2RegMiscFixed | 0x00818000,
+ NEON_FRINTM = NEON2RegMiscFixed | 0x00019000,
+ NEON_FRINTX = NEON2RegMiscFixed | 0x20019000,
+ NEON_FRINTZ = NEON2RegMiscFixed | 0x00819000,
+ NEON_FRINTI = NEON2RegMiscFixed | 0x20819000,
+ NEON_FCVTNS = NEON2RegMiscFixed | 0x0001A000,
+ NEON_FCVTNU = NEON_FCVTNS | NEON2RegMiscUBit,
+ NEON_FCVTPS = NEON2RegMiscFixed | 0x0081A000,
+ NEON_FCVTPU = NEON_FCVTPS | NEON2RegMiscUBit,
+ NEON_FCVTMS = NEON2RegMiscFixed | 0x0001B000,
+ NEON_FCVTMU = NEON_FCVTMS | NEON2RegMiscUBit,
+ NEON_FCVTZS = NEON2RegMiscFixed | 0x0081B000,
+ NEON_FCVTZU = NEON_FCVTZS | NEON2RegMiscUBit,
+ NEON_FCVTAS = NEON2RegMiscFixed | 0x0001C000,
+ NEON_FCVTAU = NEON_FCVTAS | NEON2RegMiscUBit,
+ NEON_FSQRT = NEON2RegMiscFixed | 0x2081F000,
+ NEON_SCVTF = NEON2RegMiscFixed | 0x0001D000,
+ NEON_UCVTF = NEON_SCVTF | NEON2RegMiscUBit,
+ NEON_URSQRTE = NEON2RegMiscFixed | 0x2081C000,
+ NEON_URECPE = NEON2RegMiscFixed | 0x0081C000,
+ NEON_FRSQRTE = NEON2RegMiscFixed | 0x2081D000,
+ NEON_FRECPE = NEON2RegMiscFixed | 0x0081D000,
+ NEON_FCMGT_zero = NEON2RegMiscFixed | 0x0080C000,
+ NEON_FCMGE_zero = NEON2RegMiscFixed | 0x2080C000,
+ NEON_FCMEQ_zero = NEON2RegMiscFixed | 0x0080D000,
+ NEON_FCMLE_zero = NEON2RegMiscFixed | 0x2080D000,
+ NEON_FCMLT_zero = NEON2RegMiscFixed | 0x0080E000,
+
+ NEON_FCVTL_opcode = NEON_FCVTL & NEON2RegMiscOpcode,
+ NEON_FCVTN_opcode = NEON_FCVTN & NEON2RegMiscOpcode
+};
+
+// NEON instructions with three same-type operands.
+enum NEON3SameOp {
+ NEON3SameFixed = 0x0E200400,
+ NEON3SameFMask = 0x9F200400,
+ NEON3SameMask = 0xBF20FC00,
+ NEON3SameUBit = 0x20000000,
+ NEON_ADD = NEON3SameFixed | 0x00008000,
+ NEON_ADDP = NEON3SameFixed | 0x0000B800,
+ NEON_SHADD = NEON3SameFixed | 0x00000000,
+ NEON_SHSUB = NEON3SameFixed | 0x00002000,
+ NEON_SRHADD = NEON3SameFixed | 0x00001000,
+ NEON_CMEQ = NEON3SameFixed | NEON3SameUBit | 0x00008800,
+ NEON_CMGE = NEON3SameFixed | 0x00003800,
+ NEON_CMGT = NEON3SameFixed | 0x00003000,
+ NEON_CMHI = NEON3SameFixed | NEON3SameUBit | NEON_CMGT,
+ NEON_CMHS = NEON3SameFixed | NEON3SameUBit | NEON_CMGE,
+ NEON_CMTST = NEON3SameFixed | 0x00008800,
+ NEON_MLA = NEON3SameFixed | 0x00009000,
+ NEON_MLS = NEON3SameFixed | 0x20009000,
+ NEON_MUL = NEON3SameFixed | 0x00009800,
+ NEON_PMUL = NEON3SameFixed | 0x20009800,
+ NEON_SRSHL = NEON3SameFixed | 0x00005000,
+ NEON_SQSHL = NEON3SameFixed | 0x00004800,
+ NEON_SQRSHL = NEON3SameFixed | 0x00005800,
+ NEON_SSHL = NEON3SameFixed | 0x00004000,
+ NEON_SMAX = NEON3SameFixed | 0x00006000,
+ NEON_SMAXP = NEON3SameFixed | 0x0000A000,
+ NEON_SMIN = NEON3SameFixed | 0x00006800,
+ NEON_SMINP = NEON3SameFixed | 0x0000A800,
+ NEON_SABD = NEON3SameFixed | 0x00007000,
+ NEON_SABA = NEON3SameFixed | 0x00007800,
+ NEON_UABD = NEON3SameFixed | NEON3SameUBit | NEON_SABD,
+ NEON_UABA = NEON3SameFixed | NEON3SameUBit | NEON_SABA,
+ NEON_SQADD = NEON3SameFixed | 0x00000800,
+ NEON_SQSUB = NEON3SameFixed | 0x00002800,
+ NEON_SUB = NEON3SameFixed | NEON3SameUBit | 0x00008000,
+ NEON_UHADD = NEON3SameFixed | NEON3SameUBit | NEON_SHADD,
+ NEON_UHSUB = NEON3SameFixed | NEON3SameUBit | NEON_SHSUB,
+ NEON_URHADD = NEON3SameFixed | NEON3SameUBit | NEON_SRHADD,
+ NEON_UMAX = NEON3SameFixed | NEON3SameUBit | NEON_SMAX,
+ NEON_UMAXP = NEON3SameFixed | NEON3SameUBit | NEON_SMAXP,
+ NEON_UMIN = NEON3SameFixed | NEON3SameUBit | NEON_SMIN,
+ NEON_UMINP = NEON3SameFixed | NEON3SameUBit | NEON_SMINP,
+ NEON_URSHL = NEON3SameFixed | NEON3SameUBit | NEON_SRSHL,
+ NEON_UQADD = NEON3SameFixed | NEON3SameUBit | NEON_SQADD,
+ NEON_UQRSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQRSHL,
+ NEON_UQSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQSHL,
+ NEON_UQSUB = NEON3SameFixed | NEON3SameUBit | NEON_SQSUB,
+ NEON_USHL = NEON3SameFixed | NEON3SameUBit | NEON_SSHL,
+ NEON_SQDMULH = NEON3SameFixed | 0x0000B000,
+ NEON_SQRDMULH = NEON3SameFixed | 0x2000B000,
+
+ // NEON floating point instructions with three same-type operands.
+ NEON3SameFPFixed = NEON3SameFixed | 0x0000C000,
+ NEON3SameFPFMask = NEON3SameFMask | 0x0000C000,
+ NEON3SameFPMask = NEON3SameMask | 0x00800000,
+ NEON_FADD = NEON3SameFixed | 0x0000D000,
+ NEON_FSUB = NEON3SameFixed | 0x0080D000,
+ NEON_FMUL = NEON3SameFixed | 0x2000D800,
+ NEON_FDIV = NEON3SameFixed | 0x2000F800,
+ NEON_FMAX = NEON3SameFixed | 0x0000F000,
+ NEON_FMAXNM = NEON3SameFixed | 0x0000C000,
+ NEON_FMAXP = NEON3SameFixed | 0x2000F000,
+ NEON_FMAXNMP = NEON3SameFixed | 0x2000C000,
+ NEON_FMIN = NEON3SameFixed | 0x0080F000,
+ NEON_FMINNM = NEON3SameFixed | 0x0080C000,
+ NEON_FMINP = NEON3SameFixed | 0x2080F000,
+ NEON_FMINNMP = NEON3SameFixed | 0x2080C000,
+ NEON_FMLA = NEON3SameFixed | 0x0000C800,
+ NEON_FMLS = NEON3SameFixed | 0x0080C800,
+ NEON_FMULX = NEON3SameFixed | 0x0000D800,
+ NEON_FRECPS = NEON3SameFixed | 0x0000F800,
+ NEON_FRSQRTS = NEON3SameFixed | 0x0080F800,
+ NEON_FABD = NEON3SameFixed | 0x2080D000,
+ NEON_FADDP = NEON3SameFixed | 0x2000D000,
+ NEON_FCMEQ = NEON3SameFixed | 0x0000E000,
+ NEON_FCMGE = NEON3SameFixed | 0x2000E000,
+ NEON_FCMGT = NEON3SameFixed | 0x2080E000,
+ NEON_FACGE = NEON3SameFixed | 0x2000E800,
+ NEON_FACGT = NEON3SameFixed | 0x2080E800,
+
+ // NEON logical instructions with three same-type operands.
+ NEON3SameLogicalFixed = NEON3SameFixed | 0x00001800,
+ NEON3SameLogicalFMask = NEON3SameFMask | 0x0000F800,
+ NEON3SameLogicalMask = 0xBFE0FC00,
+ NEON3SameLogicalFormatMask = NEON_Q,
+ NEON_AND = NEON3SameLogicalFixed | 0x00000000,
+ NEON_ORR = NEON3SameLogicalFixed | 0x00A00000,
+ NEON_ORN = NEON3SameLogicalFixed | 0x00C00000,
+ NEON_EOR = NEON3SameLogicalFixed | 0x20000000,
+ NEON_BIC = NEON3SameLogicalFixed | 0x00400000,
+ NEON_BIF = NEON3SameLogicalFixed | 0x20C00000,
+ NEON_BIT = NEON3SameLogicalFixed | 0x20800000,
+ NEON_BSL = NEON3SameLogicalFixed | 0x20400000
+};
+
+// NEON instructions with three different-type operands.
+enum NEON3DifferentOp {
+ NEON3DifferentFixed = 0x0E200000,
+ NEON3DifferentFMask = 0x9F200C00,
+ NEON3DifferentMask = 0xFF20FC00,
+ NEON_ADDHN = NEON3DifferentFixed | 0x00004000,
+ NEON_ADDHN2 = NEON_ADDHN | NEON_Q,
+ NEON_PMULL = NEON3DifferentFixed | 0x0000E000,
+ NEON_PMULL2 = NEON_PMULL | NEON_Q,
+ NEON_RADDHN = NEON3DifferentFixed | 0x20004000,
+ NEON_RADDHN2 = NEON_RADDHN | NEON_Q,
+ NEON_RSUBHN = NEON3DifferentFixed | 0x20006000,
+ NEON_RSUBHN2 = NEON_RSUBHN | NEON_Q,
+ NEON_SABAL = NEON3DifferentFixed | 0x00005000,
+ NEON_SABAL2 = NEON_SABAL | NEON_Q,
+ NEON_SABDL = NEON3DifferentFixed | 0x00007000,
+ NEON_SABDL2 = NEON_SABDL | NEON_Q,
+ NEON_SADDL = NEON3DifferentFixed | 0x00000000,
+ NEON_SADDL2 = NEON_SADDL | NEON_Q,
+ NEON_SADDW = NEON3DifferentFixed | 0x00001000,
+ NEON_SADDW2 = NEON_SADDW | NEON_Q,
+ NEON_SMLAL = NEON3DifferentFixed | 0x00008000,
+ NEON_SMLAL2 = NEON_SMLAL | NEON_Q,
+ NEON_SMLSL = NEON3DifferentFixed | 0x0000A000,
+ NEON_SMLSL2 = NEON_SMLSL | NEON_Q,
+ NEON_SMULL = NEON3DifferentFixed | 0x0000C000,
+ NEON_SMULL2 = NEON_SMULL | NEON_Q,
+ NEON_SSUBL = NEON3DifferentFixed | 0x00002000,
+ NEON_SSUBL2 = NEON_SSUBL | NEON_Q,
+ NEON_SSUBW = NEON3DifferentFixed | 0x00003000,
+ NEON_SSUBW2 = NEON_SSUBW | NEON_Q,
+ NEON_SQDMLAL = NEON3DifferentFixed | 0x00009000,
+ NEON_SQDMLAL2 = NEON_SQDMLAL | NEON_Q,
+ NEON_SQDMLSL = NEON3DifferentFixed | 0x0000B000,
+ NEON_SQDMLSL2 = NEON_SQDMLSL | NEON_Q,
+ NEON_SQDMULL = NEON3DifferentFixed | 0x0000D000,
+ NEON_SQDMULL2 = NEON_SQDMULL | NEON_Q,
+ NEON_SUBHN = NEON3DifferentFixed | 0x00006000,
+ NEON_SUBHN2 = NEON_SUBHN | NEON_Q,
+ NEON_UABAL = NEON_SABAL | NEON3SameUBit,
+ NEON_UABAL2 = NEON_UABAL | NEON_Q,
+ NEON_UABDL = NEON_SABDL | NEON3SameUBit,
+ NEON_UABDL2 = NEON_UABDL | NEON_Q,
+ NEON_UADDL = NEON_SADDL | NEON3SameUBit,
+ NEON_UADDL2 = NEON_UADDL | NEON_Q,
+ NEON_UADDW = NEON_SADDW | NEON3SameUBit,
+ NEON_UADDW2 = NEON_UADDW | NEON_Q,
+ NEON_UMLAL = NEON_SMLAL | NEON3SameUBit,
+ NEON_UMLAL2 = NEON_UMLAL | NEON_Q,
+ NEON_UMLSL = NEON_SMLSL | NEON3SameUBit,
+ NEON_UMLSL2 = NEON_UMLSL | NEON_Q,
+ NEON_UMULL = NEON_SMULL | NEON3SameUBit,
+ NEON_UMULL2 = NEON_UMULL | NEON_Q,
+ NEON_USUBL = NEON_SSUBL | NEON3SameUBit,
+ NEON_USUBL2 = NEON_USUBL | NEON_Q,
+ NEON_USUBW = NEON_SSUBW | NEON3SameUBit,
+ NEON_USUBW2 = NEON_USUBW | NEON_Q
+};
+
+// NEON instructions operating across vectors.
+enum NEONAcrossLanesOp {
+ NEONAcrossLanesFixed = 0x0E300800,
+ NEONAcrossLanesFMask = 0x9F3E0C00,
+ NEONAcrossLanesMask = 0xBF3FFC00,
+ NEON_ADDV = NEONAcrossLanesFixed | 0x0001B000,
+ NEON_SADDLV = NEONAcrossLanesFixed | 0x00003000,
+ NEON_UADDLV = NEONAcrossLanesFixed | 0x20003000,
+ NEON_SMAXV = NEONAcrossLanesFixed | 0x0000A000,
+ NEON_SMINV = NEONAcrossLanesFixed | 0x0001A000,
+ NEON_UMAXV = NEONAcrossLanesFixed | 0x2000A000,
+ NEON_UMINV = NEONAcrossLanesFixed | 0x2001A000,
+
+ // NEON floating point across instructions.
+ NEONAcrossLanesFPFixed = NEONAcrossLanesFixed | 0x0000C000,
+ NEONAcrossLanesFPFMask = NEONAcrossLanesFMask | 0x0000C000,
+ NEONAcrossLanesFPMask = NEONAcrossLanesMask | 0x00800000,
+
+ NEON_FMAXV = NEONAcrossLanesFPFixed | 0x2000F000,
+ NEON_FMINV = NEONAcrossLanesFPFixed | 0x2080F000,
+ NEON_FMAXNMV = NEONAcrossLanesFPFixed | 0x2000C000,
+ NEON_FMINNMV = NEONAcrossLanesFPFixed | 0x2080C000
+};
+
+// NEON instructions with indexed element operand.
+enum NEONByIndexedElementOp {
+ NEONByIndexedElementFixed = 0x0F000000,
+ NEONByIndexedElementFMask = 0x9F000400,
+ NEONByIndexedElementMask = 0xBF00F400,
+ NEON_MUL_byelement = NEONByIndexedElementFixed | 0x00008000,
+ NEON_MLA_byelement = NEONByIndexedElementFixed | 0x20000000,
+ NEON_MLS_byelement = NEONByIndexedElementFixed | 0x20004000,
+ NEON_SMULL_byelement = NEONByIndexedElementFixed | 0x0000A000,
+ NEON_SMLAL_byelement = NEONByIndexedElementFixed | 0x00002000,
+ NEON_SMLSL_byelement = NEONByIndexedElementFixed | 0x00006000,
+ NEON_UMULL_byelement = NEONByIndexedElementFixed | 0x2000A000,
+ NEON_UMLAL_byelement = NEONByIndexedElementFixed | 0x20002000,
+ NEON_UMLSL_byelement = NEONByIndexedElementFixed | 0x20006000,
+ NEON_SQDMULL_byelement = NEONByIndexedElementFixed | 0x0000B000,
+ NEON_SQDMLAL_byelement = NEONByIndexedElementFixed | 0x00003000,
+ NEON_SQDMLSL_byelement = NEONByIndexedElementFixed | 0x00007000,
+ NEON_SQDMULH_byelement = NEONByIndexedElementFixed | 0x0000C000,
+ NEON_SQRDMULH_byelement = NEONByIndexedElementFixed | 0x0000D000,
+
+ // Floating point instructions.
+ NEONByIndexedElementFPFixed = NEONByIndexedElementFixed | 0x00800000,
+ NEONByIndexedElementFPMask = NEONByIndexedElementMask | 0x00800000,
+ NEON_FMLA_byelement = NEONByIndexedElementFPFixed | 0x00001000,
+ NEON_FMLS_byelement = NEONByIndexedElementFPFixed | 0x00005000,
+ NEON_FMUL_byelement = NEONByIndexedElementFPFixed | 0x00009000,
+ NEON_FMULX_byelement = NEONByIndexedElementFPFixed | 0x20009000
+};
+
+// NEON register copy.
+enum NEONCopyOp {
+ NEONCopyFixed = 0x0E000400,
+ NEONCopyFMask = 0x9FE08400,
+ NEONCopyMask = 0x3FE08400,
+ NEONCopyInsElementMask = NEONCopyMask | 0x40000000,
+ NEONCopyInsGeneralMask = NEONCopyMask | 0x40007800,
+ NEONCopyDupElementMask = NEONCopyMask | 0x20007800,
+ NEONCopyDupGeneralMask = NEONCopyDupElementMask,
+ NEONCopyUmovMask = NEONCopyMask | 0x20007800,
+ NEONCopySmovMask = NEONCopyMask | 0x20007800,
+ NEON_INS_ELEMENT = NEONCopyFixed | 0x60000000,
+ NEON_INS_GENERAL = NEONCopyFixed | 0x40001800,
+ NEON_DUP_ELEMENT = NEONCopyFixed | 0x00000000,
+ NEON_DUP_GENERAL = NEONCopyFixed | 0x00000800,
+ NEON_SMOV = NEONCopyFixed | 0x00002800,
+ NEON_UMOV = NEONCopyFixed | 0x00003800
+};
+
+// NEON extract.
+enum NEONExtractOp {
+ NEONExtractFixed = 0x2E000000,
+ NEONExtractFMask = 0xBF208400,
+ NEONExtractMask = 0xBFE08400,
+ NEON_EXT = NEONExtractFixed | 0x00000000
+};
+
+enum NEONLoadStoreMultiOp {
+ NEONLoadStoreMultiL = 0x00400000,
+ NEONLoadStoreMulti1_1v = 0x00007000,
+ NEONLoadStoreMulti1_2v = 0x0000A000,
+ NEONLoadStoreMulti1_3v = 0x00006000,
+ NEONLoadStoreMulti1_4v = 0x00002000,
+ NEONLoadStoreMulti2 = 0x00008000,
+ NEONLoadStoreMulti3 = 0x00004000,
+ NEONLoadStoreMulti4 = 0x00000000
+};
+
+// NEON load/store multiple structures.
+enum NEONLoadStoreMultiStructOp {
+ NEONLoadStoreMultiStructFixed = 0x0C000000,
+ NEONLoadStoreMultiStructFMask = 0xBFBF0000,
+ NEONLoadStoreMultiStructMask = 0xBFFFF000,
+ NEONLoadStoreMultiStructStore = NEONLoadStoreMultiStructFixed,
+ NEONLoadStoreMultiStructLoad = NEONLoadStoreMultiStructFixed |
+ NEONLoadStoreMultiL,
+ NEON_LD1_1v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_1v,
+ NEON_LD1_2v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_2v,
+ NEON_LD1_3v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_3v,
+ NEON_LD1_4v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_4v,
+ NEON_LD2 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti2,
+ NEON_LD3 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti3,
+ NEON_LD4 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti4,
+ NEON_ST1_1v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_1v,
+ NEON_ST1_2v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_2v,
+ NEON_ST1_3v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_3v,
+ NEON_ST1_4v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_4v,
+ NEON_ST2 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti2,
+ NEON_ST3 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti3,
+ NEON_ST4 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti4
+};
+
+// NEON load/store multiple structures with post-index addressing.
+enum NEONLoadStoreMultiStructPostIndexOp {
+ NEONLoadStoreMultiStructPostIndexFixed = 0x0C800000,
+ NEONLoadStoreMultiStructPostIndexFMask = 0xBFA00000,
+ NEONLoadStoreMultiStructPostIndexMask = 0xBFE0F000,
+ NEONLoadStoreMultiStructPostIndex = 0x00800000,
+ NEON_LD1_1v_post = NEON_LD1_1v | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD1_2v_post = NEON_LD1_2v | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD1_3v_post = NEON_LD1_3v | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD1_4v_post = NEON_LD1_4v | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD2_post = NEON_LD2 | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD3_post = NEON_LD3 | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD4_post = NEON_LD4 | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST1_1v_post = NEON_ST1_1v | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST1_2v_post = NEON_ST1_2v | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST1_3v_post = NEON_ST1_3v | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST1_4v_post = NEON_ST1_4v | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST2_post = NEON_ST2 | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST3_post = NEON_ST3 | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST4_post = NEON_ST4 | NEONLoadStoreMultiStructPostIndex
+};
+
+enum NEONLoadStoreSingleOp {
+ NEONLoadStoreSingle1 = 0x00000000,
+ NEONLoadStoreSingle2 = 0x00200000,
+ NEONLoadStoreSingle3 = 0x00002000,
+ NEONLoadStoreSingle4 = 0x00202000,
+ NEONLoadStoreSingleL = 0x00400000,
+ NEONLoadStoreSingle_b = 0x00000000,
+ NEONLoadStoreSingle_h = 0x00004000,
+ NEONLoadStoreSingle_s = 0x00008000,
+ NEONLoadStoreSingle_d = 0x00008400,
+ NEONLoadStoreSingleAllLanes = 0x0000C000,
+ NEONLoadStoreSingleLenMask = 0x00202000
+};
+
+// NEON load/store single structure.
+enum NEONLoadStoreSingleStructOp {
+ NEONLoadStoreSingleStructFixed = 0x0D000000,
+ NEONLoadStoreSingleStructFMask = 0xBF9F0000,
+ NEONLoadStoreSingleStructMask = 0xBFFFE000,
+ NEONLoadStoreSingleStructStore = NEONLoadStoreSingleStructFixed,
+ NEONLoadStoreSingleStructLoad = NEONLoadStoreSingleStructFixed |
+ NEONLoadStoreSingleL,
+ NEONLoadStoreSingleStructLoad1 = NEONLoadStoreSingle1 |
+ NEONLoadStoreSingleStructLoad,
+ NEONLoadStoreSingleStructLoad2 = NEONLoadStoreSingle2 |
+ NEONLoadStoreSingleStructLoad,
+ NEONLoadStoreSingleStructLoad3 = NEONLoadStoreSingle3 |
+ NEONLoadStoreSingleStructLoad,
+ NEONLoadStoreSingleStructLoad4 = NEONLoadStoreSingle4 |
+ NEONLoadStoreSingleStructLoad,
+ NEONLoadStoreSingleStructStore1 = NEONLoadStoreSingle1 |
+ NEONLoadStoreSingleStructFixed,
+ NEONLoadStoreSingleStructStore2 = NEONLoadStoreSingle2 |
+ NEONLoadStoreSingleStructFixed,
+ NEONLoadStoreSingleStructStore3 = NEONLoadStoreSingle3 |
+ NEONLoadStoreSingleStructFixed,
+ NEONLoadStoreSingleStructStore4 = NEONLoadStoreSingle4 |
+ NEONLoadStoreSingleStructFixed,
+ NEON_LD1_b = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_b,
+ NEON_LD1_h = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_h,
+ NEON_LD1_s = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_s,
+ NEON_LD1_d = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_d,
+ NEON_LD1R = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingleAllLanes,
+ NEON_ST1_b = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_b,
+ NEON_ST1_h = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_h,
+ NEON_ST1_s = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_s,
+ NEON_ST1_d = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_d,
+
+ NEON_LD2_b = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_b,
+ NEON_LD2_h = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_h,
+ NEON_LD2_s = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_s,
+ NEON_LD2_d = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_d,
+ NEON_LD2R = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingleAllLanes,
+ NEON_ST2_b = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_b,
+ NEON_ST2_h = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_h,
+ NEON_ST2_s = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_s,
+ NEON_ST2_d = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_d,
+
+ NEON_LD3_b = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_b,
+ NEON_LD3_h = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_h,
+ NEON_LD3_s = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_s,
+ NEON_LD3_d = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_d,
+ NEON_LD3R = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingleAllLanes,
+ NEON_ST3_b = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_b,
+ NEON_ST3_h = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_h,
+ NEON_ST3_s = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_s,
+ NEON_ST3_d = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_d,
+
+ NEON_LD4_b = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_b,
+ NEON_LD4_h = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_h,
+ NEON_LD4_s = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_s,
+ NEON_LD4_d = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_d,
+ NEON_LD4R = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingleAllLanes,
+ NEON_ST4_b = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_b,
+ NEON_ST4_h = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_h,
+ NEON_ST4_s = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_s,
+ NEON_ST4_d = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_d
+};
+
+// NEON load/store single structure with post-index addressing.
+enum NEONLoadStoreSingleStructPostIndexOp {
+ NEONLoadStoreSingleStructPostIndexFixed = 0x0D800000,
+ NEONLoadStoreSingleStructPostIndexFMask = 0xBF800000,
+ NEONLoadStoreSingleStructPostIndexMask = 0xBFE0E000,
+ NEONLoadStoreSingleStructPostIndex = 0x00800000,
+ NEON_LD1_b_post = NEON_LD1_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD1_h_post = NEON_LD1_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD1_s_post = NEON_LD1_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD1_d_post = NEON_LD1_d | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD1R_post = NEON_LD1R | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST1_b_post = NEON_ST1_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST1_h_post = NEON_ST1_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST1_s_post = NEON_ST1_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST1_d_post = NEON_ST1_d | NEONLoadStoreSingleStructPostIndex,
+
+ NEON_LD2_b_post = NEON_LD2_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD2_h_post = NEON_LD2_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD2_s_post = NEON_LD2_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD2_d_post = NEON_LD2_d | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD2R_post = NEON_LD2R | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST2_b_post = NEON_ST2_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST2_h_post = NEON_ST2_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST2_s_post = NEON_ST2_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST2_d_post = NEON_ST2_d | NEONLoadStoreSingleStructPostIndex,
+
+ NEON_LD3_b_post = NEON_LD3_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD3_h_post = NEON_LD3_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD3_s_post = NEON_LD3_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD3_d_post = NEON_LD3_d | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD3R_post = NEON_LD3R | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST3_b_post = NEON_ST3_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST3_h_post = NEON_ST3_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST3_s_post = NEON_ST3_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST3_d_post = NEON_ST3_d | NEONLoadStoreSingleStructPostIndex,
+
+ NEON_LD4_b_post = NEON_LD4_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD4_h_post = NEON_LD4_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD4_s_post = NEON_LD4_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD4_d_post = NEON_LD4_d | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD4R_post = NEON_LD4R | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST4_b_post = NEON_ST4_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST4_h_post = NEON_ST4_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST4_s_post = NEON_ST4_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST4_d_post = NEON_ST4_d | NEONLoadStoreSingleStructPostIndex
+};
+
+// NEON modified immediate.
+enum NEONModifiedImmediateOp {
+ NEONModifiedImmediateFixed = 0x0F000400,
+ NEONModifiedImmediateFMask = 0x9FF80400,
+ NEONModifiedImmediateOpBit = 0x20000000,
+ NEONModifiedImmediate_MOVI = NEONModifiedImmediateFixed | 0x00000000,
+ NEONModifiedImmediate_MVNI = NEONModifiedImmediateFixed | 0x20000000,
+ NEONModifiedImmediate_ORR = NEONModifiedImmediateFixed | 0x00001000,
+ NEONModifiedImmediate_BIC = NEONModifiedImmediateFixed | 0x20001000
+};
+
+// NEON shift immediate.
+enum NEONShiftImmediateOp {
+ NEONShiftImmediateFixed = 0x0F000400,
+ NEONShiftImmediateFMask = 0x9F800400,
+ NEONShiftImmediateMask = 0xBF80FC00,
+ NEONShiftImmediateUBit = 0x20000000,
+ NEON_SHL = NEONShiftImmediateFixed | 0x00005000,
+ NEON_SSHLL = NEONShiftImmediateFixed | 0x0000A000,
+ NEON_USHLL = NEONShiftImmediateFixed | 0x2000A000,
+ NEON_SLI = NEONShiftImmediateFixed | 0x20005000,
+ NEON_SRI = NEONShiftImmediateFixed | 0x20004000,
+ NEON_SHRN = NEONShiftImmediateFixed | 0x00008000,
+ NEON_RSHRN = NEONShiftImmediateFixed | 0x00008800,
+ NEON_UQSHRN = NEONShiftImmediateFixed | 0x20009000,
+ NEON_UQRSHRN = NEONShiftImmediateFixed | 0x20009800,
+ NEON_SQSHRN = NEONShiftImmediateFixed | 0x00009000,
+ NEON_SQRSHRN = NEONShiftImmediateFixed | 0x00009800,
+ NEON_SQSHRUN = NEONShiftImmediateFixed | 0x20008000,
+ NEON_SQRSHRUN = NEONShiftImmediateFixed | 0x20008800,
+ NEON_SSHR = NEONShiftImmediateFixed | 0x00000000,
+ NEON_SRSHR = NEONShiftImmediateFixed | 0x00002000,
+ NEON_USHR = NEONShiftImmediateFixed | 0x20000000,
+ NEON_URSHR = NEONShiftImmediateFixed | 0x20002000,
+ NEON_SSRA = NEONShiftImmediateFixed | 0x00001000,
+ NEON_SRSRA = NEONShiftImmediateFixed | 0x00003000,
+ NEON_USRA = NEONShiftImmediateFixed | 0x20001000,
+ NEON_URSRA = NEONShiftImmediateFixed | 0x20003000,
+ NEON_SQSHLU = NEONShiftImmediateFixed | 0x20006000,
+ NEON_SCVTF_imm = NEONShiftImmediateFixed | 0x0000E000,
+ NEON_UCVTF_imm = NEONShiftImmediateFixed | 0x2000E000,
+ NEON_FCVTZS_imm = NEONShiftImmediateFixed | 0x0000F800,
+ NEON_FCVTZU_imm = NEONShiftImmediateFixed | 0x2000F800,
+ NEON_SQSHL_imm = NEONShiftImmediateFixed | 0x00007000,
+ NEON_UQSHL_imm = NEONShiftImmediateFixed | 0x20007000
+};
+
+// NEON table.
+enum NEONTableOp {
+ NEONTableFixed = 0x0E000000,
+ NEONTableFMask = 0xBF208C00,
+ NEONTableExt = 0x00001000,
+ NEONTableMask = 0xBF20FC00,
+ NEON_TBL_1v = NEONTableFixed | 0x00000000,
+ NEON_TBL_2v = NEONTableFixed | 0x00002000,
+ NEON_TBL_3v = NEONTableFixed | 0x00004000,
+ NEON_TBL_4v = NEONTableFixed | 0x00006000,
+ NEON_TBX_1v = NEON_TBL_1v | NEONTableExt,
+ NEON_TBX_2v = NEON_TBL_2v | NEONTableExt,
+ NEON_TBX_3v = NEON_TBL_3v | NEONTableExt,
+ NEON_TBX_4v = NEON_TBL_4v | NEONTableExt
+};
+
+// NEON perm.
+enum NEONPermOp {
+ NEONPermFixed = 0x0E000800,
+ NEONPermFMask = 0xBF208C00,
+ NEONPermMask = 0x3F20FC00,
+ NEON_UZP1 = NEONPermFixed | 0x00001000,
+ NEON_TRN1 = NEONPermFixed | 0x00002000,
+ NEON_ZIP1 = NEONPermFixed | 0x00003000,
+ NEON_UZP2 = NEONPermFixed | 0x00005000,
+ NEON_TRN2 = NEONPermFixed | 0x00006000,
+ NEON_ZIP2 = NEONPermFixed | 0x00007000
+};
+
+// NEON scalar instructions with two register operands.
+enum NEONScalar2RegMiscOp {
+ NEONScalar2RegMiscFixed = 0x5E200800,
+ NEONScalar2RegMiscFMask = 0xDF3E0C00,
+ NEONScalar2RegMiscMask = NEON_Q | NEONScalar | NEON2RegMiscMask,
+ NEON_CMGT_zero_scalar = NEON_Q | NEONScalar | NEON_CMGT_zero,
+ NEON_CMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_CMEQ_zero,
+ NEON_CMLT_zero_scalar = NEON_Q | NEONScalar | NEON_CMLT_zero,
+ NEON_CMGE_zero_scalar = NEON_Q | NEONScalar | NEON_CMGE_zero,
+ NEON_CMLE_zero_scalar = NEON_Q | NEONScalar | NEON_CMLE_zero,
+ NEON_ABS_scalar = NEON_Q | NEONScalar | NEON_ABS,
+ NEON_SQABS_scalar = NEON_Q | NEONScalar | NEON_SQABS,
+ NEON_NEG_scalar = NEON_Q | NEONScalar | NEON_NEG,
+ NEON_SQNEG_scalar = NEON_Q | NEONScalar | NEON_SQNEG,
+ NEON_SQXTN_scalar = NEON_Q | NEONScalar | NEON_SQXTN,
+ NEON_UQXTN_scalar = NEON_Q | NEONScalar | NEON_UQXTN,
+ NEON_SQXTUN_scalar = NEON_Q | NEONScalar | NEON_SQXTUN,
+ NEON_SUQADD_scalar = NEON_Q | NEONScalar | NEON_SUQADD,
+ NEON_USQADD_scalar = NEON_Q | NEONScalar | NEON_USQADD,
+
+ NEONScalar2RegMiscOpcode = NEON2RegMiscOpcode,
+ NEON_NEG_scalar_opcode = NEON_NEG_scalar & NEONScalar2RegMiscOpcode,
+
+ NEONScalar2RegMiscFPMask = NEONScalar2RegMiscMask | 0x00800000,
+ NEON_FRSQRTE_scalar = NEON_Q | NEONScalar | NEON_FRSQRTE,
+ NEON_FRECPE_scalar = NEON_Q | NEONScalar | NEON_FRECPE,
+ NEON_SCVTF_scalar = NEON_Q | NEONScalar | NEON_SCVTF,
+ NEON_UCVTF_scalar = NEON_Q | NEONScalar | NEON_UCVTF,
+ NEON_FCMGT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGT_zero,
+ NEON_FCMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_FCMEQ_zero,
+ NEON_FCMLT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLT_zero,
+ NEON_FCMGE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGE_zero,
+ NEON_FCMLE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLE_zero,
+ NEON_FRECPX_scalar = NEONScalar2RegMiscFixed | 0x0081F000,
+ NEON_FCVTNS_scalar = NEON_Q | NEONScalar | NEON_FCVTNS,
+ NEON_FCVTNU_scalar = NEON_Q | NEONScalar | NEON_FCVTNU,
+ NEON_FCVTPS_scalar = NEON_Q | NEONScalar | NEON_FCVTPS,
+ NEON_FCVTPU_scalar = NEON_Q | NEONScalar | NEON_FCVTPU,
+ NEON_FCVTMS_scalar = NEON_Q | NEONScalar | NEON_FCVTMS,
+ NEON_FCVTMU_scalar = NEON_Q | NEONScalar | NEON_FCVTMU,
+ NEON_FCVTZS_scalar = NEON_Q | NEONScalar | NEON_FCVTZS,
+ NEON_FCVTZU_scalar = NEON_Q | NEONScalar | NEON_FCVTZU,
+ NEON_FCVTAS_scalar = NEON_Q | NEONScalar | NEON_FCVTAS,
+ NEON_FCVTAU_scalar = NEON_Q | NEONScalar | NEON_FCVTAU,
+ NEON_FCVTXN_scalar = NEON_Q | NEONScalar | NEON_FCVTXN
+};
+
+// NEON scalar instructions with three same-type operands.
+enum NEONScalar3SameOp {
+ NEONScalar3SameFixed = 0x5E200400,
+ NEONScalar3SameFMask = 0xDF200400,
+ NEONScalar3SameMask = 0xFF20FC00,
+ NEON_ADD_scalar = NEON_Q | NEONScalar | NEON_ADD,
+ NEON_CMEQ_scalar = NEON_Q | NEONScalar | NEON_CMEQ,
+ NEON_CMGE_scalar = NEON_Q | NEONScalar | NEON_CMGE,
+ NEON_CMGT_scalar = NEON_Q | NEONScalar | NEON_CMGT,
+ NEON_CMHI_scalar = NEON_Q | NEONScalar | NEON_CMHI,
+ NEON_CMHS_scalar = NEON_Q | NEONScalar | NEON_CMHS,
+ NEON_CMTST_scalar = NEON_Q | NEONScalar | NEON_CMTST,
+ NEON_SUB_scalar = NEON_Q | NEONScalar | NEON_SUB,
+ NEON_UQADD_scalar = NEON_Q | NEONScalar | NEON_UQADD,
+ NEON_SQADD_scalar = NEON_Q | NEONScalar | NEON_SQADD,
+ NEON_UQSUB_scalar = NEON_Q | NEONScalar | NEON_UQSUB,
+ NEON_SQSUB_scalar = NEON_Q | NEONScalar | NEON_SQSUB,
+ NEON_USHL_scalar = NEON_Q | NEONScalar | NEON_USHL,
+ NEON_SSHL_scalar = NEON_Q | NEONScalar | NEON_SSHL,
+ NEON_UQSHL_scalar = NEON_Q | NEONScalar | NEON_UQSHL,
+ NEON_SQSHL_scalar = NEON_Q | NEONScalar | NEON_SQSHL,
+ NEON_URSHL_scalar = NEON_Q | NEONScalar | NEON_URSHL,
+ NEON_SRSHL_scalar = NEON_Q | NEONScalar | NEON_SRSHL,
+ NEON_UQRSHL_scalar = NEON_Q | NEONScalar | NEON_UQRSHL,
+ NEON_SQRSHL_scalar = NEON_Q | NEONScalar | NEON_SQRSHL,
+ NEON_SQDMULH_scalar = NEON_Q | NEONScalar | NEON_SQDMULH,
+ NEON_SQRDMULH_scalar = NEON_Q | NEONScalar | NEON_SQRDMULH,
+
+ // NEON floating point scalar instructions with three same-type operands.
+ NEONScalar3SameFPFixed = NEONScalar3SameFixed | 0x0000C000,
+ NEONScalar3SameFPFMask = NEONScalar3SameFMask | 0x0000C000,
+ NEONScalar3SameFPMask = NEONScalar3SameMask | 0x00800000,
+ NEON_FACGE_scalar = NEON_Q | NEONScalar | NEON_FACGE,
+ NEON_FACGT_scalar = NEON_Q | NEONScalar | NEON_FACGT,
+ NEON_FCMEQ_scalar = NEON_Q | NEONScalar | NEON_FCMEQ,
+ NEON_FCMGE_scalar = NEON_Q | NEONScalar | NEON_FCMGE,
+ NEON_FCMGT_scalar = NEON_Q | NEONScalar | NEON_FCMGT,
+ NEON_FMULX_scalar = NEON_Q | NEONScalar | NEON_FMULX,
+ NEON_FRECPS_scalar = NEON_Q | NEONScalar | NEON_FRECPS,
+ NEON_FRSQRTS_scalar = NEON_Q | NEONScalar | NEON_FRSQRTS,
+ NEON_FABD_scalar = NEON_Q | NEONScalar | NEON_FABD
+};
+
+// NEON scalar instructions with three different-type operands.
+enum NEONScalar3DiffOp {
+ NEONScalar3DiffFixed = 0x5E200000,
+ NEONScalar3DiffFMask = 0xDF200C00,
+ NEONScalar3DiffMask = NEON_Q | NEONScalar | NEON3DifferentMask,
+ NEON_SQDMLAL_scalar = NEON_Q | NEONScalar | NEON_SQDMLAL,
+ NEON_SQDMLSL_scalar = NEON_Q | NEONScalar | NEON_SQDMLSL,
+ NEON_SQDMULL_scalar = NEON_Q | NEONScalar | NEON_SQDMULL
+};
+
+// NEON scalar instructions with indexed element operand.
+enum NEONScalarByIndexedElementOp {
+ NEONScalarByIndexedElementFixed = 0x5F000000,
+ NEONScalarByIndexedElementFMask = 0xDF000400,
+ NEONScalarByIndexedElementMask = 0xFF00F400,
+ NEON_SQDMLAL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMLAL_byelement,
+ NEON_SQDMLSL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMLSL_byelement,
+ NEON_SQDMULL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMULL_byelement,
+ NEON_SQDMULH_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMULH_byelement,
+ NEON_SQRDMULH_byelement_scalar
+ = NEON_Q | NEONScalar | NEON_SQRDMULH_byelement,
+
+ // Floating point instructions.
+ NEONScalarByIndexedElementFPFixed
+ = NEONScalarByIndexedElementFixed | 0x00800000,
+ NEONScalarByIndexedElementFPMask
+ = NEONScalarByIndexedElementMask | 0x00800000,
+ NEON_FMLA_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLA_byelement,
+ NEON_FMLS_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLS_byelement,
+ NEON_FMUL_byelement_scalar = NEON_Q | NEONScalar | NEON_FMUL_byelement,
+ NEON_FMULX_byelement_scalar = NEON_Q | NEONScalar | NEON_FMULX_byelement
+};
+
+// NEON scalar register copy.
+enum NEONScalarCopyOp {
+ NEONScalarCopyFixed = 0x5E000400,
+ NEONScalarCopyFMask = 0xDFE08400,
+ NEONScalarCopyMask = 0xFFE0FC00,
+ NEON_DUP_ELEMENT_scalar = NEON_Q | NEONScalar | NEON_DUP_ELEMENT
+};
+
+// NEON scalar pairwise instructions.
+enum NEONScalarPairwiseOp {
+ NEONScalarPairwiseFixed = 0x5E300800,
+ NEONScalarPairwiseFMask = 0xDF3E0C00,
+ NEONScalarPairwiseMask = 0xFFB1F800,
+ NEON_ADDP_scalar = NEONScalarPairwiseFixed | 0x0081B000,
+ NEON_FMAXNMP_scalar = NEONScalarPairwiseFixed | 0x2000C000,
+ NEON_FMINNMP_scalar = NEONScalarPairwiseFixed | 0x2080C000,
+ NEON_FADDP_scalar = NEONScalarPairwiseFixed | 0x2000D000,
+ NEON_FMAXP_scalar = NEONScalarPairwiseFixed | 0x2000F000,
+ NEON_FMINP_scalar = NEONScalarPairwiseFixed | 0x2080F000
+};
+
+// NEON scalar shift immediate.
+enum NEONScalarShiftImmediateOp {
+ NEONScalarShiftImmediateFixed = 0x5F000400,
+ NEONScalarShiftImmediateFMask = 0xDF800400,
+ NEONScalarShiftImmediateMask = 0xFF80FC00,
+ NEON_SHL_scalar = NEON_Q | NEONScalar | NEON_SHL,
+ NEON_SLI_scalar = NEON_Q | NEONScalar | NEON_SLI,
+ NEON_SRI_scalar = NEON_Q | NEONScalar | NEON_SRI,
+ NEON_SSHR_scalar = NEON_Q | NEONScalar | NEON_SSHR,
+ NEON_USHR_scalar = NEON_Q | NEONScalar | NEON_USHR,
+ NEON_SRSHR_scalar = NEON_Q | NEONScalar | NEON_SRSHR,
+ NEON_URSHR_scalar = NEON_Q | NEONScalar | NEON_URSHR,
+ NEON_SSRA_scalar = NEON_Q | NEONScalar | NEON_SSRA,
+ NEON_USRA_scalar = NEON_Q | NEONScalar | NEON_USRA,
+ NEON_SRSRA_scalar = NEON_Q | NEONScalar | NEON_SRSRA,
+ NEON_URSRA_scalar = NEON_Q | NEONScalar | NEON_URSRA,
+ NEON_UQSHRN_scalar = NEON_Q | NEONScalar | NEON_UQSHRN,
+ NEON_UQRSHRN_scalar = NEON_Q | NEONScalar | NEON_UQRSHRN,
+ NEON_SQSHRN_scalar = NEON_Q | NEONScalar | NEON_SQSHRN,
+ NEON_SQRSHRN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRN,
+ NEON_SQSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQSHRUN,
+ NEON_SQRSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRUN,
+ NEON_SQSHLU_scalar = NEON_Q | NEONScalar | NEON_SQSHLU,
+ NEON_SQSHL_imm_scalar = NEON_Q | NEONScalar | NEON_SQSHL_imm,
+ NEON_UQSHL_imm_scalar = NEON_Q | NEONScalar | NEON_UQSHL_imm,
+ NEON_SCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_SCVTF_imm,
+ NEON_UCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_UCVTF_imm,
+ NEON_FCVTZS_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZS_imm,
+ NEON_FCVTZU_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZU_imm
+};
+
+// Unimplemented and unallocated instructions. These are defined to make fixed
+// bit assertion easier.
+enum UnimplementedOp {
+ UnimplementedFixed = 0x00000000,
+ UnimplementedFMask = 0x00000000
+};
+
+enum UnallocatedOp {
+ UnallocatedFixed = 0x00000000,
+ UnallocatedFMask = 0x00000000
+};
+
+} // namespace vixl
+
+#endif // VIXL_A64_CONSTANTS_A64_H_
diff --git a/js/src/jit/arm64/vixl/Cpu-vixl.cpp b/js/src/jit/arm64/vixl/Cpu-vixl.cpp
new file mode 100644
index 000000000..804f0cad1
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Cpu-vixl.cpp
@@ -0,0 +1,170 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Cpu-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+namespace vixl {
+
+// Initialise to smallest possible cache size.
+unsigned CPU::dcache_line_size_ = 1;
+unsigned CPU::icache_line_size_ = 1;
+
+
+// Currently computes I and D cache line size.
+void CPU::SetUp() {
+ uint32_t cache_type_register = GetCacheType();
+
+ // The cache type register holds information about the caches, including I
+ // D caches line size.
+ static const int kDCacheLineSizeShift = 16;
+ static const int kICacheLineSizeShift = 0;
+ static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift;
+ static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift;
+
+ // The cache type register holds the size of the I and D caches in words as
+ // a power of two.
+ uint32_t dcache_line_size_power_of_two =
+ (cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift;
+ uint32_t icache_line_size_power_of_two =
+ (cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift;
+
+ dcache_line_size_ = 4 << dcache_line_size_power_of_two;
+ icache_line_size_ = 4 << icache_line_size_power_of_two;
+}
+
+
+uint32_t CPU::GetCacheType() {
+#ifdef __aarch64__
+ uint64_t cache_type_register;
+ // Copy the content of the cache type register to a core register.
+ __asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT
+ : [ctr] "=r" (cache_type_register));
+ VIXL_ASSERT(is_uint32(cache_type_register));
+ return cache_type_register;
+#else
+ // This will lead to a cache with 1 byte long lines, which is fine since
+ // neither EnsureIAndDCacheCoherency nor the simulator will need this
+ // information.
+ return 0;
+#endif
+}
+
+
+void CPU::EnsureIAndDCacheCoherency(void *address, size_t length) {
+#ifdef __aarch64__
+ // Implement the cache synchronisation for all targets where AArch64 is the
+ // host, even if we're building the simulator for an AAarch64 host. This
+ // allows for cases where the user wants to simulate code as well as run it
+ // natively.
+
+ if (length == 0) {
+ return;
+ }
+
+ // The code below assumes user space cache operations are allowed.
+
+ // Work out the line sizes for each cache, and use them to determine the
+ // start addresses.
+ uintptr_t start = reinterpret_cast<uintptr_t>(address);
+ uintptr_t dsize = static_cast<uintptr_t>(dcache_line_size_);
+ uintptr_t isize = static_cast<uintptr_t>(icache_line_size_);
+ uintptr_t dline = start & ~(dsize - 1);
+ uintptr_t iline = start & ~(isize - 1);
+
+ // Cache line sizes are always a power of 2.
+ VIXL_ASSERT(IsPowerOf2(dsize));
+ VIXL_ASSERT(IsPowerOf2(isize));
+ uintptr_t end = start + length;
+
+ do {
+ __asm__ __volatile__ (
+ // Clean each line of the D cache containing the target data.
+ //
+ // dc : Data Cache maintenance
+ // c : Clean
+ // va : by (Virtual) Address
+ // u : to the point of Unification
+ // The point of unification for a processor is the point by which the
+ // instruction and data caches are guaranteed to see the same copy of a
+ // memory location. See ARM DDI 0406B page B2-12 for more information.
+ " dc cvau, %[dline]\n"
+ :
+ : [dline] "r" (dline)
+ // This code does not write to memory, but the "memory" dependency
+ // prevents GCC from reordering the code.
+ : "memory");
+ dline += dsize;
+ } while (dline < end);
+
+ __asm__ __volatile__ (
+ // Make sure that the data cache operations (above) complete before the
+ // instruction cache operations (below).
+ //
+ // dsb : Data Synchronisation Barrier
+ // ish : Inner SHareable domain
+ //
+ // The point of unification for an Inner Shareable shareability domain is
+ // the point by which the instruction and data caches of all the processors
+ // in that Inner Shareable shareability domain are guaranteed to see the
+ // same copy of a memory location. See ARM DDI 0406B page B2-12 for more
+ // information.
+ " dsb ish\n"
+ : : : "memory");
+
+ do {
+ __asm__ __volatile__ (
+ // Invalidate each line of the I cache containing the target data.
+ //
+ // ic : Instruction Cache maintenance
+ // i : Invalidate
+ // va : by Address
+ // u : to the point of Unification
+ " ic ivau, %[iline]\n"
+ :
+ : [iline] "r" (iline)
+ : "memory");
+ iline += isize;
+ } while (iline < end);
+
+ __asm__ __volatile__ (
+ // Make sure that the instruction cache operations (above) take effect
+ // before the isb (below).
+ " dsb ish\n"
+
+ // Ensure that any instructions already in the pipeline are discarded and
+ // reloaded from the new data.
+ // isb : Instruction Synchronisation Barrier
+ " isb\n"
+ : : : "memory");
+#else
+ // If the host isn't AArch64, we must be using the simulator, so this function
+ // doesn't have to do anything.
+ USE(address, length);
+#endif
+}
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Cpu-vixl.h b/js/src/jit/arm64/vixl/Cpu-vixl.h
new file mode 100644
index 000000000..57ac65f61
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Cpu-vixl.h
@@ -0,0 +1,83 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_CPU_A64_H
+#define VIXL_CPU_A64_H
+
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Instructions-vixl.h"
+
+namespace vixl {
+
+class CPU {
+ public:
+ // Initialise CPU support.
+ static void SetUp();
+
+ // Ensures the data at a given address and with a given size is the same for
+ // the I and D caches. I and D caches are not automatically coherent on ARM
+ // so this operation is required before any dynamically generated code can
+ // safely run.
+ static void EnsureIAndDCacheCoherency(void *address, size_t length);
+
+ // Handle tagged pointers.
+ template <typename T>
+ static T SetPointerTag(T pointer, uint64_t tag) {
+ VIXL_ASSERT(is_uintn(kAddressTagWidth, tag));
+
+ // Use C-style casts to get static_cast behaviour for integral types (T),
+ // and reinterpret_cast behaviour for other types.
+
+ uint64_t raw = (uint64_t)pointer;
+ VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
+
+ raw = (raw & ~kAddressTagMask) | (tag << kAddressTagOffset);
+ return (T)raw;
+ }
+
+ template <typename T>
+ static uint64_t GetPointerTag(T pointer) {
+ // Use C-style casts to get static_cast behaviour for integral types (T),
+ // and reinterpret_cast behaviour for other types.
+
+ uint64_t raw = (uint64_t)pointer;
+ VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
+
+ return (raw & kAddressTagMask) >> kAddressTagOffset;
+ }
+
+ private:
+ // Return the content of the cache type register.
+ static uint32_t GetCacheType();
+
+ // I and D cache line size in bytes.
+ static unsigned icache_line_size_;
+ static unsigned dcache_line_size_;
+};
+
+} // namespace vixl
+
+#endif // VIXL_CPU_A64_H
diff --git a/js/src/jit/arm64/vixl/Debugger-vixl.cpp b/js/src/jit/arm64/vixl/Debugger-vixl.cpp
new file mode 100644
index 000000000..85097ed5a
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Debugger-vixl.cpp
@@ -0,0 +1,1535 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL ARM LIMITED BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "js-config.h"
+
+#ifdef JS_SIMULATOR_ARM64
+
+#include "jit/arm64/vixl/Debugger-vixl.h"
+
+#include "mozilla/Vector.h"
+
+#include "jsalloc.h"
+
+namespace vixl {
+
+// List of commands supported by the debugger.
+#define DEBUG_COMMAND_LIST(C) \
+C(HelpCommand) \
+C(ContinueCommand) \
+C(StepCommand) \
+C(DisasmCommand) \
+C(PrintCommand) \
+C(ExamineCommand)
+
+// Debugger command lines are broken up in token of different type to make
+// processing easier later on.
+class Token {
+ public:
+ virtual ~Token() {}
+
+ // Token type.
+ virtual bool IsRegister() const { return false; }
+ virtual bool IsFPRegister() const { return false; }
+ virtual bool IsIdentifier() const { return false; }
+ virtual bool IsAddress() const { return false; }
+ virtual bool IsInteger() const { return false; }
+ virtual bool IsFormat() const { return false; }
+ virtual bool IsUnknown() const { return false; }
+ // Token properties.
+ virtual bool CanAddressMemory() const { return false; }
+ virtual uint8_t* ToAddress(Debugger* debugger) const = 0;
+ virtual void Print(FILE* out = stdout) const = 0;
+
+ static Token* Tokenize(const char* arg);
+};
+
+typedef mozilla::Vector<Token*, 0, js::SystemAllocPolicy> TokenVector;
+
+// Tokens often hold one value.
+template<typename T> class ValueToken : public Token {
+ public:
+ explicit ValueToken(T value) : value_(value) {}
+ ValueToken() {}
+
+ T value() const { return value_; }
+
+ virtual uint8_t* ToAddress(Debugger* debugger) const {
+ USE(debugger);
+ VIXL_ABORT();
+ }
+
+ protected:
+ T value_;
+};
+
+// Integer registers (X or W) and their aliases.
+// Format: wn or xn with 0 <= n < 32 or a name in the aliases list.
+class RegisterToken : public ValueToken<const Register> {
+ public:
+ explicit RegisterToken(const Register reg)
+ : ValueToken<const Register>(reg) {}
+
+ virtual bool IsRegister() const { return true; }
+ virtual bool CanAddressMemory() const { return value().Is64Bits(); }
+ virtual uint8_t* ToAddress(Debugger* debugger) const;
+ virtual void Print(FILE* out = stdout) const ;
+ const char* Name() const;
+
+ static Token* Tokenize(const char* arg);
+ static RegisterToken* Cast(Token* tok) {
+ VIXL_ASSERT(tok->IsRegister());
+ return reinterpret_cast<RegisterToken*>(tok);
+ }
+
+ private:
+ static const int kMaxAliasNumber = 4;
+ static const char* kXAliases[kNumberOfRegisters][kMaxAliasNumber];
+ static const char* kWAliases[kNumberOfRegisters][kMaxAliasNumber];
+};
+
+// Floating point registers (D or S).
+// Format: sn or dn with 0 <= n < 32.
+class FPRegisterToken : public ValueToken<const FPRegister> {
+ public:
+ explicit FPRegisterToken(const FPRegister fpreg)
+ : ValueToken<const FPRegister>(fpreg) {}
+
+ virtual bool IsFPRegister() const { return true; }
+ virtual void Print(FILE* out = stdout) const ;
+
+ static Token* Tokenize(const char* arg);
+ static FPRegisterToken* Cast(Token* tok) {
+ VIXL_ASSERT(tok->IsFPRegister());
+ return reinterpret_cast<FPRegisterToken*>(tok);
+ }
+};
+
+
+// Non-register identifiers.
+// Format: Alphanumeric string starting with a letter.
+class IdentifierToken : public ValueToken<char*> {
+ public:
+ explicit IdentifierToken(const char* name) {
+ size_t size = strlen(name) + 1;
+ value_ = (char*)js_malloc(size);
+ strncpy(value_, name, size);
+ }
+ virtual ~IdentifierToken() { js_free(value_); }
+
+ virtual bool IsIdentifier() const { return true; }
+ virtual bool CanAddressMemory() const { return strcmp(value(), "pc") == 0; }
+ virtual uint8_t* ToAddress(Debugger* debugger) const;
+ virtual void Print(FILE* out = stdout) const;
+
+ static Token* Tokenize(const char* arg);
+ static IdentifierToken* Cast(Token* tok) {
+ VIXL_ASSERT(tok->IsIdentifier());
+ return reinterpret_cast<IdentifierToken*>(tok);
+ }
+};
+
+// 64-bit address literal.
+// Format: 0x... with up to 16 hexadecimal digits.
+class AddressToken : public ValueToken<uint8_t*> {
+ public:
+ explicit AddressToken(uint8_t* address) : ValueToken<uint8_t*>(address) {}
+
+ virtual bool IsAddress() const { return true; }
+ virtual bool CanAddressMemory() const { return true; }
+ virtual uint8_t* ToAddress(Debugger* debugger) const;
+ virtual void Print(FILE* out = stdout) const ;
+
+ static Token* Tokenize(const char* arg);
+ static AddressToken* Cast(Token* tok) {
+ VIXL_ASSERT(tok->IsAddress());
+ return reinterpret_cast<AddressToken*>(tok);
+ }
+};
+
+
+// 64-bit decimal integer literal.
+// Format: n.
+class IntegerToken : public ValueToken<int64_t> {
+ public:
+ explicit IntegerToken(int64_t value) : ValueToken<int64_t>(value) {}
+
+ virtual bool IsInteger() const { return true; }
+ virtual void Print(FILE* out = stdout) const;
+
+ static Token* Tokenize(const char* arg);
+ static IntegerToken* Cast(Token* tok) {
+ VIXL_ASSERT(tok->IsInteger());
+ return reinterpret_cast<IntegerToken*>(tok);
+ }
+};
+
+// Literal describing how to print a chunk of data (up to 64 bits).
+// Format: .ln
+// where l (letter) is one of
+// * x: hexadecimal
+// * s: signed integer
+// * u: unsigned integer
+// * f: floating point
+// * i: instruction
+// and n (size) is one of 8, 16, 32 and 64. n should be omitted for
+// instructions.
+class FormatToken : public Token {
+ public:
+ FormatToken() {}
+
+ virtual bool IsFormat() const { return true; }
+ virtual int SizeOf() const = 0;
+ virtual char type_code() const = 0;
+ virtual void PrintData(void* data, FILE* out = stdout) const = 0;
+ virtual void Print(FILE* out = stdout) const = 0;
+
+ virtual uint8_t* ToAddress(Debugger* debugger) const {
+ USE(debugger);
+ VIXL_ABORT();
+ }
+
+ static Token* Tokenize(const char* arg);
+ static FormatToken* Cast(Token* tok) {
+ VIXL_ASSERT(tok->IsFormat());
+ return reinterpret_cast<FormatToken*>(tok);
+ }
+};
+
+
+template<typename T> class Format : public FormatToken {
+ public:
+ Format(const char* fmt, char type_code) : fmt_(fmt), type_code_(type_code) {}
+
+ virtual int SizeOf() const { return sizeof(T); }
+ virtual char type_code() const { return type_code_; }
+ virtual void PrintData(void* data, FILE* out = stdout) const {
+ T value;
+ memcpy(&value, data, sizeof(value));
+ fprintf(out, fmt_, value);
+ }
+ virtual void Print(FILE* out = stdout) const;
+
+ private:
+ const char* fmt_;
+ char type_code_;
+};
+
+// Tokens which don't fit any of the above.
+class UnknownToken : public Token {
+ public:
+ explicit UnknownToken(const char* arg) {
+ size_t size = strlen(arg) + 1;
+ unknown_ = (char*)js_malloc(size);
+ strncpy(unknown_, arg, size);
+ }
+ virtual ~UnknownToken() { js_free(unknown_); }
+ virtual uint8_t* ToAddress(Debugger* debugger) const {
+ USE(debugger);
+ VIXL_ABORT();
+ }
+
+ virtual bool IsUnknown() const { return true; }
+ virtual void Print(FILE* out = stdout) const;
+
+ private:
+ char* unknown_;
+};
+
+
+// All debugger commands must subclass DebugCommand and implement Run, Print
+// and Build. Commands must also define kHelp and kAliases.
+class DebugCommand {
+ public:
+ explicit DebugCommand(Token* name) : name_(IdentifierToken::Cast(name)) {}
+ DebugCommand() : name_(NULL) {}
+ virtual ~DebugCommand() { js_delete(name_); }
+
+ const char* name() { return name_->value(); }
+ // Run the command on the given debugger. The command returns true if
+ // execution should move to the next instruction.
+ virtual bool Run(Debugger * debugger) = 0;
+ virtual void Print(FILE* out = stdout);
+
+ static bool Match(const char* name, const char** aliases);
+ static DebugCommand* Parse(char* line);
+ static void PrintHelp(const char** aliases,
+ const char* args,
+ const char* help);
+
+ private:
+ IdentifierToken* name_;
+};
+
+// For all commands below see their respective kHelp and kAliases in
+// debugger-a64.cc
+class HelpCommand : public DebugCommand {
+ public:
+ explicit HelpCommand(Token* name) : DebugCommand(name) {}
+
+ virtual bool Run(Debugger* debugger);
+
+ static DebugCommand* Build(TokenVector&& args);
+
+ static const char* kHelp;
+ static const char* kAliases[];
+ static const char* kArguments;
+};
+
+
+class ContinueCommand : public DebugCommand {
+ public:
+ explicit ContinueCommand(Token* name) : DebugCommand(name) {}
+
+ virtual bool Run(Debugger* debugger);
+
+ static DebugCommand* Build(TokenVector&& args);
+
+ static const char* kHelp;
+ static const char* kAliases[];
+ static const char* kArguments;
+};
+
+
+class StepCommand : public DebugCommand {
+ public:
+ StepCommand(Token* name, IntegerToken* count)
+ : DebugCommand(name), count_(count) {}
+ virtual ~StepCommand() { js_delete(count_); }
+
+ int64_t count() { return count_->value(); }
+ virtual bool Run(Debugger* debugger);
+ virtual void Print(FILE* out = stdout);
+
+ static DebugCommand* Build(TokenVector&& args);
+
+ static const char* kHelp;
+ static const char* kAliases[];
+ static const char* kArguments;
+
+ private:
+ IntegerToken* count_;
+};
+
+class DisasmCommand : public DebugCommand {
+ public:
+ static DebugCommand* Build(TokenVector&& args);
+
+ static const char* kHelp;
+ static const char* kAliases[];
+ static const char* kArguments;
+};
+
+
+class PrintCommand : public DebugCommand {
+ public:
+ PrintCommand(Token* name, Token* target, FormatToken* format)
+ : DebugCommand(name), target_(target), format_(format) {}
+ virtual ~PrintCommand() {
+ js_delete(target_);
+ js_delete(format_);
+ }
+
+ Token* target() { return target_; }
+ FormatToken* format() { return format_; }
+ virtual bool Run(Debugger* debugger);
+ virtual void Print(FILE* out = stdout);
+
+ static DebugCommand* Build(TokenVector&& args);
+
+ static const char* kHelp;
+ static const char* kAliases[];
+ static const char* kArguments;
+
+ private:
+ Token* target_;
+ FormatToken* format_;
+};
+
+class ExamineCommand : public DebugCommand {
+ public:
+ ExamineCommand(Token* name,
+ Token* target,
+ FormatToken* format,
+ IntegerToken* count)
+ : DebugCommand(name), target_(target), format_(format), count_(count) {}
+ virtual ~ExamineCommand() {
+ js_delete(target_);
+ js_delete(format_);
+ js_delete(count_);
+ }
+
+ Token* target() { return target_; }
+ FormatToken* format() { return format_; }
+ IntegerToken* count() { return count_; }
+ virtual bool Run(Debugger* debugger);
+ virtual void Print(FILE* out = stdout);
+
+ static DebugCommand* Build(TokenVector&& args);
+
+ static const char* kHelp;
+ static const char* kAliases[];
+ static const char* kArguments;
+
+ private:
+ Token* target_;
+ FormatToken* format_;
+ IntegerToken* count_;
+};
+
+// Commands which name does not match any of the known commnand.
+class UnknownCommand : public DebugCommand {
+ public:
+ explicit UnknownCommand(TokenVector&& args) : args_(Move(args)) {}
+ virtual ~UnknownCommand();
+
+ virtual bool Run(Debugger* debugger);
+
+ private:
+ TokenVector args_;
+};
+
+// Commands which name match a known command but the syntax is invalid.
+class InvalidCommand : public DebugCommand {
+ public:
+ InvalidCommand(TokenVector&& args, int index, const char* cause)
+ : args_(Move(args)), index_(index), cause_(cause) {}
+ virtual ~InvalidCommand();
+
+ virtual bool Run(Debugger* debugger);
+
+ private:
+ TokenVector args_;
+ int index_;
+ const char* cause_;
+};
+
+const char* HelpCommand::kAliases[] = { "help", NULL };
+const char* HelpCommand::kArguments = NULL;
+const char* HelpCommand::kHelp = " Print this help.";
+
+const char* ContinueCommand::kAliases[] = { "continue", "c", NULL };
+const char* ContinueCommand::kArguments = NULL;
+const char* ContinueCommand::kHelp = " Resume execution.";
+
+const char* StepCommand::kAliases[] = { "stepi", "si", NULL };
+const char* StepCommand::kArguments = "[n = 1]";
+const char* StepCommand::kHelp = " Execute n next instruction(s).";
+
+const char* DisasmCommand::kAliases[] = { "disasm", "di", NULL };
+const char* DisasmCommand::kArguments = "[n = 10]";
+const char* DisasmCommand::kHelp =
+ " Disassemble n instruction(s) at pc.\n"
+ " This command is equivalent to x pc.i [n = 10]."
+;
+
+const char* PrintCommand::kAliases[] = { "print", "p", NULL };
+const char* PrintCommand::kArguments = "<entity>[.format]";
+const char* PrintCommand::kHelp =
+ " Print the given entity according to the given format.\n"
+ " The format parameter only affects individual registers; it is ignored\n"
+ " for other entities.\n"
+ " <entity> can be one of the following:\n"
+ " * A register name (such as x0, s1, ...).\n"
+ " * 'regs', to print all integer (W and X) registers.\n"
+ " * 'fpregs' to print all floating-point (S and D) registers.\n"
+ " * 'sysregs' to print all system registers (including NZCV).\n"
+ " * 'pc' to print the current program counter.\n"
+;
+
+const char* ExamineCommand::kAliases[] = { "m", "mem", "x", NULL };
+const char* ExamineCommand::kArguments = "<addr>[.format] [n = 10]";
+const char* ExamineCommand::kHelp =
+ " Examine memory. Print n items of memory at address <addr> according to\n"
+ " the given [.format].\n"
+ " Addr can be an immediate address, a register name or pc.\n"
+ " Format is made of a type letter: 'x' (hexadecimal), 's' (signed), 'u'\n"
+ " (unsigned), 'f' (floating point), i (instruction) and a size in bits\n"
+ " when appropriate (8, 16, 32, 64)\n"
+ " E.g 'x sp.x64' will print 10 64-bit words from the stack in\n"
+ " hexadecimal format."
+;
+
+const char* RegisterToken::kXAliases[kNumberOfRegisters][kMaxAliasNumber] = {
+ { "x0", NULL },
+ { "x1", NULL },
+ { "x2", NULL },
+ { "x3", NULL },
+ { "x4", NULL },
+ { "x5", NULL },
+ { "x6", NULL },
+ { "x7", NULL },
+ { "x8", NULL },
+ { "x9", NULL },
+ { "x10", NULL },
+ { "x11", NULL },
+ { "x12", NULL },
+ { "x13", NULL },
+ { "x14", NULL },
+ { "x15", NULL },
+ { "ip0", "x16", NULL },
+ { "ip1", "x17", NULL },
+ { "x18", "pr", NULL },
+ { "x19", NULL },
+ { "x20", NULL },
+ { "x21", NULL },
+ { "x22", NULL },
+ { "x23", NULL },
+ { "x24", NULL },
+ { "x25", NULL },
+ { "x26", NULL },
+ { "x27", NULL },
+ { "x28", NULL },
+ { "fp", "x29", NULL },
+ { "lr", "x30", NULL },
+ { "sp", NULL}
+};
+
+const char* RegisterToken::kWAliases[kNumberOfRegisters][kMaxAliasNumber] = {
+ { "w0", NULL },
+ { "w1", NULL },
+ { "w2", NULL },
+ { "w3", NULL },
+ { "w4", NULL },
+ { "w5", NULL },
+ { "w6", NULL },
+ { "w7", NULL },
+ { "w8", NULL },
+ { "w9", NULL },
+ { "w10", NULL },
+ { "w11", NULL },
+ { "w12", NULL },
+ { "w13", NULL },
+ { "w14", NULL },
+ { "w15", NULL },
+ { "w16", NULL },
+ { "w17", NULL },
+ { "w18", NULL },
+ { "w19", NULL },
+ { "w20", NULL },
+ { "w21", NULL },
+ { "w22", NULL },
+ { "w23", NULL },
+ { "w24", NULL },
+ { "w25", NULL },
+ { "w26", NULL },
+ { "w27", NULL },
+ { "w28", NULL },
+ { "w29", NULL },
+ { "w30", NULL },
+ { "wsp", NULL }
+};
+
+
+Debugger::Debugger(Decoder* decoder, FILE* stream)
+ : Simulator(decoder, stream),
+ debug_parameters_(DBG_INACTIVE),
+ pending_request_(false),
+ steps_(0),
+ last_command_(NULL) {
+ disasm_ = js_new<PrintDisassembler>(stdout);
+ printer_ = js_new<Decoder>();
+ printer_->AppendVisitor(disasm_);
+}
+
+
+Debugger::~Debugger() {
+ js_delete(disasm_);
+ js_delete(printer_);
+}
+
+
+void Debugger::Run() {
+ pc_modified_ = false;
+ while (pc_ != kEndOfSimAddress) {
+ if (pending_request()) RunDebuggerShell();
+ ExecuteInstruction();
+ LogAllWrittenRegisters();
+ }
+}
+
+
+void Debugger::PrintInstructions(const void* address, int64_t count) {
+ if (count == 0) {
+ return;
+ }
+
+ const Instruction* from = Instruction::CastConst(address);
+ if (count < 0) {
+ count = -count;
+ from -= (count - 1) * kInstructionSize;
+ }
+ const Instruction* to = from + count * kInstructionSize;
+
+ for (const Instruction* current = from;
+ current < to;
+ current = current->NextInstruction()) {
+ printer_->Decode(current);
+ }
+}
+
+
+void Debugger::PrintMemory(const uint8_t* address,
+ const FormatToken* format,
+ int64_t count) {
+ if (count == 0) {
+ return;
+ }
+
+ const uint8_t* from = address;
+ int size = format->SizeOf();
+ if (count < 0) {
+ count = -count;
+ from -= (count - 1) * size;
+ }
+ const uint8_t* to = from + count * size;
+
+ for (const uint8_t* current = from; current < to; current += size) {
+ if (((current - from) % 8) == 0) {
+ printf("\n%p: ", current);
+ }
+
+ uint64_t data = Memory::Read<uint64_t>(current);
+ format->PrintData(&data);
+ printf(" ");
+ }
+ printf("\n\n");
+}
+
+
+void Debugger::PrintRegister(const Register& target_reg,
+ const char* name,
+ const FormatToken* format) {
+ const uint64_t reg_size = target_reg.size();
+ const uint64_t format_size = format->SizeOf() * 8;
+ const uint64_t count = reg_size / format_size;
+ const uint64_t mask = 0xffffffffffffffff >> (64 - format_size);
+ const uint64_t reg_value = reg<uint64_t>(target_reg.code(),
+ Reg31IsStackPointer);
+ VIXL_ASSERT(count > 0);
+
+ printf("%s = ", name);
+ for (uint64_t i = 1; i <= count; i++) {
+ uint64_t data = reg_value >> (reg_size - (i * format_size));
+ data &= mask;
+ format->PrintData(&data);
+ printf(" ");
+ }
+ printf("\n");
+}
+
+
+// TODO(all): fix this for vector registers.
+void Debugger::PrintFPRegister(const FPRegister& target_fpreg,
+ const FormatToken* format) {
+ const unsigned fpreg_size = target_fpreg.size();
+ const uint64_t format_size = format->SizeOf() * 8;
+ const uint64_t count = fpreg_size / format_size;
+ const uint64_t mask = 0xffffffffffffffff >> (64 - format_size);
+ const uint64_t fpreg_value = vreg<uint64_t>(fpreg_size, target_fpreg.code());
+ VIXL_ASSERT(count > 0);
+
+ if (target_fpreg.Is32Bits()) {
+ printf("s%u = ", target_fpreg.code());
+ } else {
+ printf("d%u = ", target_fpreg.code());
+ }
+ for (uint64_t i = 1; i <= count; i++) {
+ uint64_t data = fpreg_value >> (fpreg_size - (i * format_size));
+ data &= mask;
+ format->PrintData(&data);
+ printf(" ");
+ }
+ printf("\n");
+}
+
+
+void Debugger::VisitException(const Instruction* instr) {
+ switch (instr->Mask(ExceptionMask)) {
+ case BRK:
+ DoBreakpoint(instr);
+ return;
+ case HLT:
+ VIXL_FALLTHROUGH();
+ default: Simulator::VisitException(instr);
+ }
+}
+
+
+// Read a command. A command will be at most kMaxDebugShellLine char long and
+// ends with '\n\0'.
+// TODO: Should this be a utility function?
+char* Debugger::ReadCommandLine(const char* prompt, char* buffer, int length) {
+ int fgets_calls = 0;
+ char* end = NULL;
+
+ printf("%s", prompt);
+ fflush(stdout);
+
+ do {
+ if (fgets(buffer, length, stdin) == NULL) {
+ printf(" ** Error while reading command. **\n");
+ return NULL;
+ }
+
+ fgets_calls++;
+ end = strchr(buffer, '\n');
+ } while (end == NULL);
+
+ if (fgets_calls != 1) {
+ printf(" ** Command too long. **\n");
+ return NULL;
+ }
+
+ // Remove the newline from the end of the command.
+ VIXL_ASSERT(end[1] == '\0');
+ VIXL_ASSERT((end - buffer) < (length - 1));
+ end[0] = '\0';
+
+ return buffer;
+}
+
+
+void Debugger::RunDebuggerShell() {
+ if (IsDebuggerRunning()) {
+ if (steps_ > 0) {
+ // Finish stepping first.
+ --steps_;
+ return;
+ }
+
+ printf("Next: ");
+ PrintInstructions(pc());
+ bool done = false;
+ while (!done) {
+ char buffer[kMaxDebugShellLine];
+ char* line = ReadCommandLine("vixl> ", buffer, kMaxDebugShellLine);
+
+ if (line == NULL) continue; // An error occurred.
+
+ DebugCommand* command = DebugCommand::Parse(line);
+ if (command != NULL) {
+ last_command_ = command;
+ }
+
+ if (last_command_ != NULL) {
+ done = last_command_->Run(this);
+ } else {
+ printf("No previous command to run!\n");
+ }
+ }
+
+ if ((debug_parameters_ & DBG_BREAK) != 0) {
+ // The break request has now been handled, move to next instruction.
+ debug_parameters_ &= ~DBG_BREAK;
+ increment_pc();
+ }
+ }
+}
+
+
+void Debugger::DoBreakpoint(const Instruction* instr) {
+ VIXL_ASSERT(instr->Mask(ExceptionMask) == BRK);
+
+ printf("Hit breakpoint at pc=%p.\n", reinterpret_cast<const void*>(instr));
+ set_debug_parameters(debug_parameters() | DBG_BREAK | DBG_ACTIVE);
+ // Make the shell point to the brk instruction.
+ set_pc(instr);
+}
+
+
+static bool StringToUInt64(uint64_t* value, const char* line, int base = 10) {
+ char* endptr = NULL;
+ errno = 0; // Reset errors.
+ uint64_t parsed = strtoul(line, &endptr, base);
+
+ if (errno == ERANGE) {
+ // Overflow.
+ return false;
+ }
+
+ if (endptr == line) {
+ // No digits were parsed.
+ return false;
+ }
+
+ if (*endptr != '\0') {
+ // Non-digit characters present at the end.
+ return false;
+ }
+
+ *value = parsed;
+ return true;
+}
+
+
+static bool StringToInt64(int64_t* value, const char* line, int base = 10) {
+ char* endptr = NULL;
+ errno = 0; // Reset errors.
+ int64_t parsed = strtol(line, &endptr, base);
+
+ if (errno == ERANGE) {
+ // Overflow, undeflow.
+ return false;
+ }
+
+ if (endptr == line) {
+ // No digits were parsed.
+ return false;
+ }
+
+ if (*endptr != '\0') {
+ // Non-digit characters present at the end.
+ return false;
+ }
+
+ *value = parsed;
+ return true;
+}
+
+
+Token* Token::Tokenize(const char* arg) {
+ if ((arg == NULL) || (*arg == '\0')) {
+ return NULL;
+ }
+
+ // The order is important. For example Identifier::Tokenize would consider
+ // any register to be a valid identifier.
+
+ Token* token = RegisterToken::Tokenize(arg);
+ if (token != NULL) {
+ return token;
+ }
+
+ token = FPRegisterToken::Tokenize(arg);
+ if (token != NULL) {
+ return token;
+ }
+
+ token = IdentifierToken::Tokenize(arg);
+ if (token != NULL) {
+ return token;
+ }
+
+ token = AddressToken::Tokenize(arg);
+ if (token != NULL) {
+ return token;
+ }
+
+ token = IntegerToken::Tokenize(arg);
+ if (token != NULL) {
+ return token;
+ }
+
+ return js_new<UnknownToken>(arg);
+}
+
+
+uint8_t* RegisterToken::ToAddress(Debugger* debugger) const {
+ VIXL_ASSERT(CanAddressMemory());
+ uint64_t reg_value = debugger->xreg(value().code(), Reg31IsStackPointer);
+ uint8_t* address = NULL;
+ memcpy(&address, &reg_value, sizeof(address));
+ return address;
+}
+
+
+void RegisterToken::Print(FILE* out) const {
+ VIXL_ASSERT(value().IsValid());
+ fprintf(out, "[Register %s]", Name());
+}
+
+
+const char* RegisterToken::Name() const {
+ if (value().Is32Bits()) {
+ return kWAliases[value().code()][0];
+ } else {
+ return kXAliases[value().code()][0];
+ }
+}
+
+
+Token* RegisterToken::Tokenize(const char* arg) {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ // Is it a X register or alias?
+ for (const char** current = kXAliases[i]; *current != NULL; current++) {
+ if (strcmp(arg, *current) == 0) {
+ return js_new<RegisterToken>(Register::XRegFromCode(i));
+ }
+ }
+
+ // Is it a W register or alias?
+ for (const char** current = kWAliases[i]; *current != NULL; current++) {
+ if (strcmp(arg, *current) == 0) {
+ return js_new<RegisterToken>(Register::WRegFromCode(i));
+ }
+ }
+ }
+
+ return NULL;
+}
+
+
+void FPRegisterToken::Print(FILE* out) const {
+ VIXL_ASSERT(value().IsValid());
+ char prefix = value().Is32Bits() ? 's' : 'd';
+ fprintf(out, "[FPRegister %c%" PRIu32 "]", prefix, value().code());
+}
+
+
+Token* FPRegisterToken::Tokenize(const char* arg) {
+ if (strlen(arg) < 2) {
+ return NULL;
+ }
+
+ switch (*arg) {
+ case 's':
+ case 'd':
+ const char* cursor = arg + 1;
+ uint64_t code = 0;
+ if (!StringToUInt64(&code, cursor)) {
+ return NULL;
+ }
+
+ if (code > kNumberOfFPRegisters) {
+ return NULL;
+ }
+
+ VRegister fpreg = NoVReg;
+ switch (*arg) {
+ case 's':
+ fpreg = VRegister::SRegFromCode(static_cast<unsigned>(code));
+ break;
+ case 'd':
+ fpreg = VRegister::DRegFromCode(static_cast<unsigned>(code));
+ break;
+ default: VIXL_UNREACHABLE();
+ }
+
+ return js_new<FPRegisterToken>(fpreg);
+ }
+
+ return NULL;
+}
+
+
+uint8_t* IdentifierToken::ToAddress(Debugger* debugger) const {
+ VIXL_ASSERT(CanAddressMemory());
+ const Instruction* pc_value = debugger->pc();
+ uint8_t* address = NULL;
+ memcpy(&address, &pc_value, sizeof(address));
+ return address;
+}
+
+void IdentifierToken::Print(FILE* out) const {
+ fprintf(out, "[Identifier %s]", value());
+}
+
+
+Token* IdentifierToken::Tokenize(const char* arg) {
+ if (!isalpha(arg[0])) {
+ return NULL;
+ }
+
+ const char* cursor = arg + 1;
+ while ((*cursor != '\0') && isalnum(*cursor)) {
+ ++cursor;
+ }
+
+ if (*cursor == '\0') {
+ return js_new<IdentifierToken>(arg);
+ }
+
+ return NULL;
+}
+
+
+uint8_t* AddressToken::ToAddress(Debugger* debugger) const {
+ USE(debugger);
+ return value();
+}
+
+
+void AddressToken::Print(FILE* out) const {
+ fprintf(out, "[Address %p]", value());
+}
+
+
+Token* AddressToken::Tokenize(const char* arg) {
+ if ((strlen(arg) < 3) || (arg[0] != '0') || (arg[1] != 'x')) {
+ return NULL;
+ }
+
+ uint64_t ptr = 0;
+ if (!StringToUInt64(&ptr, arg, 16)) {
+ return NULL;
+ }
+
+ uint8_t* address = reinterpret_cast<uint8_t*>(ptr);
+ return js_new<AddressToken>(address);
+}
+
+
+void IntegerToken::Print(FILE* out) const {
+ fprintf(out, "[Integer %" PRId64 "]", value());
+}
+
+
+Token* IntegerToken::Tokenize(const char* arg) {
+ int64_t value = 0;
+ if (!StringToInt64(&value, arg)) {
+ return NULL;
+ }
+
+ return js_new<IntegerToken>(value);
+}
+
+
+Token* FormatToken::Tokenize(const char* arg) {
+ size_t length = strlen(arg);
+ switch (arg[0]) {
+ case 'x':
+ case 's':
+ case 'u':
+ case 'f':
+ if (length == 1) return NULL;
+ break;
+ case 'i':
+ if (length == 1) return js_new<Format<uint32_t>>("%08" PRIx32, 'i');
+ VIXL_FALLTHROUGH();
+ default: return NULL;
+ }
+
+ char* endptr = NULL;
+ errno = 0; // Reset errors.
+ uint64_t count = strtoul(arg + 1, &endptr, 10);
+
+ if (errno != 0) {
+ // Overflow, etc.
+ return NULL;
+ }
+
+ if (endptr == arg) {
+ // No digits were parsed.
+ return NULL;
+ }
+
+ if (*endptr != '\0') {
+ // There are unexpected (non-digit) characters after the number.
+ return NULL;
+ }
+
+ switch (arg[0]) {
+ case 'x':
+ switch (count) {
+ case 8: return js_new<Format<uint8_t>>("%02" PRIx8, 'x');
+ case 16: return js_new<Format<uint16_t>>("%04" PRIx16, 'x');
+ case 32: return js_new<Format<uint32_t>>("%08" PRIx32, 'x');
+ case 64: return js_new<Format<uint64_t>>("%016" PRIx64, 'x');
+ default: return NULL;
+ }
+ case 's':
+ switch (count) {
+ case 8: return js_new<Format<int8_t>>("%4" PRId8, 's');
+ case 16: return js_new<Format<int16_t>>("%6" PRId16, 's');
+ case 32: return js_new<Format<int32_t>>("%11" PRId32, 's');
+ case 64: return js_new<Format<int64_t>>("%20" PRId64, 's');
+ default: return NULL;
+ }
+ case 'u':
+ switch (count) {
+ case 8: return js_new<Format<uint8_t>>("%3" PRIu8, 'u');
+ case 16: return js_new<Format<uint16_t>>("%5" PRIu16, 'u');
+ case 32: return js_new<Format<uint32_t>>("%10" PRIu32, 'u');
+ case 64: return js_new<Format<uint64_t>>("%20" PRIu64, 'u');
+ default: return NULL;
+ }
+ case 'f':
+ switch (count) {
+ case 32: return js_new<Format<float>>("%13g", 'f');
+ case 64: return js_new<Format<double>>("%13g", 'f');
+ default: return NULL;
+ }
+ default:
+ VIXL_UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+template<typename T>
+void Format<T>::Print(FILE* out) const {
+ unsigned size = sizeof(T) * 8;
+ fprintf(out, "[Format %c%u - %s]", type_code_, size, fmt_);
+}
+
+
+void UnknownToken::Print(FILE* out) const {
+ fprintf(out, "[Unknown %s]", unknown_);
+}
+
+
+void DebugCommand::Print(FILE* out) {
+ fprintf(out, "%s", name());
+}
+
+
+bool DebugCommand::Match(const char* name, const char** aliases) {
+ for (const char** current = aliases; *current != NULL; current++) {
+ if (strcmp(name, *current) == 0) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+DebugCommand* DebugCommand::Parse(char* line) {
+ TokenVector args;
+
+ for (char* chunk = strtok(line, " \t");
+ chunk != NULL;
+ chunk = strtok(NULL, " \t")) {
+ char* dot = strchr(chunk, '.');
+ if (dot != NULL) {
+ // 'Token.format'.
+ Token* format = FormatToken::Tokenize(dot + 1);
+ if (format != NULL) {
+ *dot = '\0';
+ args.append(Token::Tokenize(chunk));
+ args.append(format);
+ } else {
+ // Error while parsing the format, push the UnknownToken so an error
+ // can be accurately reported.
+ args.append(Token::Tokenize(chunk));
+ }
+ } else {
+ args.append(Token::Tokenize(chunk));
+ }
+ }
+
+ if (args.empty()) {
+ return NULL;
+ }
+
+ if (!args[0]->IsIdentifier()) {
+ return js_new<InvalidCommand>(Move(args), 0, "command name is not valid");
+ }
+
+ const char* name = IdentifierToken::Cast(args[0])->value();
+ #define RETURN_IF_MATCH(Command) \
+ if (Match(name, Command::kAliases)) { \
+ return Command::Build(Move(args)); \
+ }
+ DEBUG_COMMAND_LIST(RETURN_IF_MATCH);
+ #undef RETURN_IF_MATCH
+
+ return js_new<UnknownCommand>(Move(args));
+}
+
+
+void DebugCommand::PrintHelp(const char** aliases,
+ const char* args,
+ const char* help) {
+ VIXL_ASSERT(aliases[0] != NULL);
+ VIXL_ASSERT(help != NULL);
+
+ printf("\n----\n\n");
+ for (const char** current = aliases; *current != NULL; current++) {
+ if (args != NULL) {
+ printf("%s %s\n", *current, args);
+ } else {
+ printf("%s\n", *current);
+ }
+ }
+ printf("\n%s\n", help);
+}
+
+
+bool HelpCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+ USE(debugger);
+
+ #define PRINT_HELP(Command) \
+ DebugCommand::PrintHelp(Command::kAliases, \
+ Command::kArguments, \
+ Command::kHelp);
+ DEBUG_COMMAND_LIST(PRINT_HELP);
+ #undef PRINT_HELP
+ printf("\n----\n\n");
+
+ return false;
+}
+
+
+DebugCommand* HelpCommand::Build(TokenVector&& args) {
+ if (args.length() != 1) {
+ return js_new<InvalidCommand>(Move(args), -1, "too many arguments");
+ }
+
+ return js_new<HelpCommand>(args[0]);
+}
+
+
+bool ContinueCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+
+ debugger->set_debug_parameters(debugger->debug_parameters() & ~DBG_ACTIVE);
+ return true;
+}
+
+
+DebugCommand* ContinueCommand::Build(TokenVector&& args) {
+ if (args.length() != 1) {
+ return js_new<InvalidCommand>(Move(args), -1, "too many arguments");
+ }
+
+ return js_new<ContinueCommand>(args[0]);
+}
+
+
+bool StepCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+
+ int64_t steps = count();
+ if (steps < 0) {
+ printf(" ** invalid value for steps: %" PRId64 " (<0) **\n", steps);
+ } else if (steps > 1) {
+ debugger->set_steps(steps - 1);
+ }
+
+ return true;
+}
+
+
+void StepCommand::Print(FILE* out) {
+ fprintf(out, "%s %" PRId64 "", name(), count());
+}
+
+
+DebugCommand* StepCommand::Build(TokenVector&& args) {
+ IntegerToken* count = NULL;
+ switch (args.length()) {
+ case 1: { // step [1]
+ count = js_new<IntegerToken>(1);
+ break;
+ }
+ case 2: { // step n
+ Token* first = args[1];
+ if (!first->IsInteger()) {
+ return js_new<InvalidCommand>(Move(args), 1, "expects int");
+ }
+ count = IntegerToken::Cast(first);
+ break;
+ }
+ default:
+ return js_new<InvalidCommand>(Move(args), -1, "too many arguments");
+ }
+
+ return js_new<StepCommand>(args[0], count);
+}
+
+
+DebugCommand* DisasmCommand::Build(TokenVector&& args) {
+ IntegerToken* count = NULL;
+ switch (args.length()) {
+ case 1: { // disasm [10]
+ count = js_new<IntegerToken>(10);
+ break;
+ }
+ case 2: { // disasm n
+ Token* first = args[1];
+ if (!first->IsInteger()) {
+ return js_new<InvalidCommand>(Move(args), 1, "expects int");
+ }
+
+ count = IntegerToken::Cast(first);
+ break;
+ }
+ default:
+ return js_new<InvalidCommand>(Move(args), -1, "too many arguments");
+ }
+
+ Token* target = js_new<IdentifierToken>("pc");
+ FormatToken* format = js_new<Format<uint32_t>>("%08" PRIx32, 'i');
+ return js_new<ExamineCommand>(args[0], target, format, count);
+}
+
+
+void PrintCommand::Print(FILE* out) {
+ fprintf(out, "%s ", name());
+ target()->Print(out);
+ if (format() != NULL) format()->Print(out);
+}
+
+
+bool PrintCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+
+ Token* tok = target();
+ if (tok->IsIdentifier()) {
+ char* identifier = IdentifierToken::Cast(tok)->value();
+ if (strcmp(identifier, "regs") == 0) {
+ debugger->PrintRegisters();
+ } else if (strcmp(identifier, "fpregs") == 0) {
+ debugger->PrintVRegisters();
+ } else if (strcmp(identifier, "sysregs") == 0) {
+ debugger->PrintSystemRegisters();
+ } else if (strcmp(identifier, "pc") == 0) {
+ printf("pc = %16p\n", reinterpret_cast<const void*>(debugger->pc()));
+ } else {
+ printf(" ** Unknown identifier to print: %s **\n", identifier);
+ }
+
+ return false;
+ }
+
+ FormatToken* format_tok = format();
+ VIXL_ASSERT(format_tok != NULL);
+ if (format_tok->type_code() == 'i') {
+ // TODO(all): Add support for instruction disassembly.
+ printf(" ** unsupported format: instructions **\n");
+ return false;
+ }
+
+ if (tok->IsRegister()) {
+ RegisterToken* reg_tok = RegisterToken::Cast(tok);
+ Register reg = reg_tok->value();
+ debugger->PrintRegister(reg, reg_tok->Name(), format_tok);
+ return false;
+ }
+
+ if (tok->IsFPRegister()) {
+ FPRegister fpreg = FPRegisterToken::Cast(tok)->value();
+ debugger->PrintFPRegister(fpreg, format_tok);
+ return false;
+ }
+
+ VIXL_UNREACHABLE();
+ return false;
+}
+
+
+DebugCommand* PrintCommand::Build(TokenVector&& args) {
+ if (args.length() < 2) {
+ return js_new<InvalidCommand>(Move(args), -1, "too few arguments");
+ }
+
+ Token* target = args[1];
+ if (!target->IsRegister() &&
+ !target->IsFPRegister() &&
+ !target->IsIdentifier()) {
+ return js_new<InvalidCommand>(Move(args), 1, "expects reg or identifier");
+ }
+
+ FormatToken* format = NULL;
+ int target_size = 0;
+ if (target->IsRegister()) {
+ Register reg = RegisterToken::Cast(target)->value();
+ target_size = reg.SizeInBytes();
+ } else if (target->IsFPRegister()) {
+ FPRegister fpreg = FPRegisterToken::Cast(target)->value();
+ target_size = fpreg.SizeInBytes();
+ }
+ // If the target is an identifier there must be no format. This is checked
+ // in the switch statement below.
+
+ switch (args.length()) {
+ case 2: {
+ if (target->IsRegister()) {
+ switch (target_size) {
+ case 4: format = js_new<Format<uint32_t>>("%08" PRIx32, 'x'); break;
+ case 8: format = js_new<Format<uint64_t>>("%016" PRIx64, 'x'); break;
+ default: VIXL_UNREACHABLE();
+ }
+ } else if (target->IsFPRegister()) {
+ switch (target_size) {
+ case 4: format = js_new<Format<float>>("%8g", 'f'); break;
+ case 8: format = js_new<Format<double>>("%8g", 'f'); break;
+ default: VIXL_UNREACHABLE();
+ }
+ }
+ break;
+ }
+ case 3: {
+ if (target->IsIdentifier()) {
+ return js_new<InvalidCommand>(Move(args), 2,
+ "format is only allowed with registers");
+ }
+
+ Token* second = args[2];
+ if (!second->IsFormat()) {
+ return js_new<InvalidCommand>(Move(args), 2, "expects format");
+ }
+ format = FormatToken::Cast(second);
+
+ if (format->SizeOf() > target_size) {
+ return js_new<InvalidCommand>(Move(args), 2, "format too wide");
+ }
+
+ break;
+ }
+ default:
+ return js_new<InvalidCommand>(Move(args), -1, "too many arguments");
+ }
+
+ return js_new<PrintCommand>(args[0], target, format);
+}
+
+
+bool ExamineCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+
+ uint8_t* address = target()->ToAddress(debugger);
+ int64_t amount = count()->value();
+ if (format()->type_code() == 'i') {
+ debugger->PrintInstructions(address, amount);
+ } else {
+ debugger->PrintMemory(address, format(), amount);
+ }
+
+ return false;
+}
+
+
+void ExamineCommand::Print(FILE* out) {
+ fprintf(out, "%s ", name());
+ format()->Print(out);
+ target()->Print(out);
+}
+
+
+DebugCommand* ExamineCommand::Build(TokenVector&& args) {
+ if (args.length() < 2) {
+ return js_new<InvalidCommand>(Move(args), -1, "too few arguments");
+ }
+
+ Token* target = args[1];
+ if (!target->CanAddressMemory()) {
+ return js_new<InvalidCommand>(Move(args), 1, "expects address");
+ }
+
+ FormatToken* format = NULL;
+ IntegerToken* count = NULL;
+
+ switch (args.length()) {
+ case 2: { // mem addr[.x64] [10]
+ format = js_new<Format<uint64_t>>("%016" PRIx64, 'x');
+ count = js_new<IntegerToken>(10);
+ break;
+ }
+ case 3: { // mem addr.format [10]
+ // mem addr[.x64] n
+ Token* second = args[2];
+ if (second->IsFormat()) {
+ format = FormatToken::Cast(second);
+ count = js_new<IntegerToken>(10);
+ break;
+ } else if (second->IsInteger()) {
+ format = js_new<Format<uint64_t>>("%016" PRIx64, 'x');
+ count = IntegerToken::Cast(second);
+ } else {
+ return js_new<InvalidCommand>(Move(args), 2, "expects format or integer");
+ }
+ VIXL_UNREACHABLE();
+ break;
+ }
+ case 4: { // mem addr.format n
+ Token* second = args[2];
+ Token* third = args[3];
+ if (!second->IsFormat() || !third->IsInteger()) {
+ return js_new<InvalidCommand>(Move(args), -1, "expects addr[.format] [n]");
+ }
+ format = FormatToken::Cast(second);
+ count = IntegerToken::Cast(third);
+ break;
+ }
+ default:
+ return js_new<InvalidCommand>(Move(args), -1, "too many arguments");
+ }
+
+ return js_new<ExamineCommand>(args[0], target, format, count);
+}
+
+
+UnknownCommand::~UnknownCommand() {
+ const size_t size = args_.length();
+ for (size_t i = 0; i < size; ++i) {
+ js_delete(args_[i]);
+ }
+}
+
+
+bool UnknownCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+ USE(debugger);
+
+ printf(" ** Unknown Command:");
+ const size_t size = args_.length();
+ for (size_t i = 0; i < size; ++i) {
+ printf(" ");
+ args_[i]->Print(stdout);
+ }
+ printf(" **\n");
+
+ return false;
+}
+
+
+InvalidCommand::~InvalidCommand() {
+ const size_t size = args_.length();
+ for (size_t i = 0; i < size; ++i) {
+ js_delete(args_[i]);
+ }
+}
+
+
+bool InvalidCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+ USE(debugger);
+
+ printf(" ** Invalid Command:");
+ const size_t size = args_.length();
+ for (size_t i = 0; i < size; ++i) {
+ printf(" ");
+ if (i == static_cast<size_t>(index_)) {
+ printf(">>");
+ args_[i]->Print(stdout);
+ printf("<<");
+ } else {
+ args_[i]->Print(stdout);
+ }
+ }
+ printf(" **\n");
+ printf(" ** %s\n", cause_);
+
+ return false;
+}
+
+} // namespace vixl
+
+#endif // JS_SIMULATOR_ARM64
diff --git a/js/src/jit/arm64/vixl/Debugger-vixl.h b/js/src/jit/arm64/vixl/Debugger-vixl.h
new file mode 100644
index 000000000..be2b3d9cf
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Debugger-vixl.h
@@ -0,0 +1,117 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifdef JS_SIMULATOR_ARM64
+
+#ifndef VIXL_A64_DEBUGGER_A64_H_
+#define VIXL_A64_DEBUGGER_A64_H_
+
+#include <ctype.h>
+#include <errno.h>
+#include <limits.h>
+
+#include "jit/arm64/vixl/Constants-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Simulator-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+namespace vixl {
+
+// Flags that represent the debugger state.
+enum DebugParameters {
+ DBG_INACTIVE = 0,
+ DBG_ACTIVE = 1 << 0, // The debugger is active.
+ DBG_BREAK = 1 << 1 // The debugger is at a breakpoint.
+};
+
+// Forward declarations.
+class DebugCommand;
+class Token;
+class FormatToken;
+
+class Debugger : public Simulator {
+ public:
+ explicit Debugger(Decoder* decoder, FILE* stream = stdout);
+ ~Debugger();
+
+ virtual void Run();
+ virtual void VisitException(const Instruction* instr);
+
+ int debug_parameters() const { return debug_parameters_; }
+ void set_debug_parameters(int parameters) {
+ debug_parameters_ = parameters;
+
+ update_pending_request();
+ }
+
+ // Numbers of instructions to execute before the debugger shell is given
+ // back control.
+ int64_t steps() const { return steps_; }
+ void set_steps(int64_t value) {
+ VIXL_ASSERT(value > 1);
+ steps_ = value;
+ }
+
+ bool IsDebuggerRunning() const {
+ return (debug_parameters_ & DBG_ACTIVE) != 0;
+ }
+
+ bool pending_request() const { return pending_request_; }
+ void update_pending_request() {
+ pending_request_ = IsDebuggerRunning();
+ }
+
+ void PrintInstructions(const void* address, int64_t count = 1);
+ void PrintMemory(const uint8_t* address,
+ const FormatToken* format,
+ int64_t count = 1);
+ void PrintRegister(const Register& target_reg,
+ const char* name,
+ const FormatToken* format);
+ void PrintFPRegister(const FPRegister& target_fpreg,
+ const FormatToken* format);
+
+ private:
+ char* ReadCommandLine(const char* prompt, char* buffer, int length);
+ void RunDebuggerShell();
+ void DoBreakpoint(const Instruction* instr);
+
+ int debug_parameters_;
+ bool pending_request_;
+ int64_t steps_;
+ DebugCommand* last_command_;
+ PrintDisassembler* disasm_;
+ Decoder* printer_;
+
+ // Length of the biggest command line accepted by the debugger shell.
+ static const int kMaxDebugShellLine = 256;
+};
+
+} // namespace vixl
+
+#endif // VIXL_A64_DEBUGGER_A64_H_
+
+#endif // JS_SIMULATOR_ARM64
diff --git a/js/src/jit/arm64/vixl/Decoder-vixl.cpp b/js/src/jit/arm64/vixl/Decoder-vixl.cpp
new file mode 100644
index 000000000..5865689ae
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Decoder-vixl.cpp
@@ -0,0 +1,874 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Decoder-vixl.h"
+
+#include <algorithm>
+
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+namespace vixl {
+
+void Decoder::DecodeInstruction(const Instruction *instr) {
+ if (instr->Bits(28, 27) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ switch (instr->Bits(27, 24)) {
+ // 0: PC relative addressing.
+ case 0x0: DecodePCRelAddressing(instr); break;
+
+ // 1: Add/sub immediate.
+ case 0x1: DecodeAddSubImmediate(instr); break;
+
+ // A: Logical shifted register.
+ // Add/sub with carry.
+ // Conditional compare register.
+ // Conditional compare immediate.
+ // Conditional select.
+ // Data processing 1 source.
+ // Data processing 2 source.
+ // B: Add/sub shifted register.
+ // Add/sub extended register.
+ // Data processing 3 source.
+ case 0xA:
+ case 0xB: DecodeDataProcessing(instr); break;
+
+ // 2: Logical immediate.
+ // Move wide immediate.
+ case 0x2: DecodeLogical(instr); break;
+
+ // 3: Bitfield.
+ // Extract.
+ case 0x3: DecodeBitfieldExtract(instr); break;
+
+ // 4: Unconditional branch immediate.
+ // Exception generation.
+ // Compare and branch immediate.
+ // 5: Compare and branch immediate.
+ // Conditional branch.
+ // System.
+ // 6,7: Unconditional branch.
+ // Test and branch immediate.
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7: DecodeBranchSystemException(instr); break;
+
+ // 8,9: Load/store register pair post-index.
+ // Load register literal.
+ // Load/store register unscaled immediate.
+ // Load/store register immediate post-index.
+ // Load/store register immediate pre-index.
+ // Load/store register offset.
+ // Load/store exclusive.
+ // C,D: Load/store register pair offset.
+ // Load/store register pair pre-index.
+ // Load/store register unsigned immediate.
+ // Advanced SIMD.
+ case 0x8:
+ case 0x9:
+ case 0xC:
+ case 0xD: DecodeLoadStore(instr); break;
+
+ // E: FP fixed point conversion.
+ // FP integer conversion.
+ // FP data processing 1 source.
+ // FP compare.
+ // FP immediate.
+ // FP data processing 2 source.
+ // FP conditional compare.
+ // FP conditional select.
+ // Advanced SIMD.
+ // F: FP data processing 3 source.
+ // Advanced SIMD.
+ case 0xE:
+ case 0xF: DecodeFP(instr); break;
+ }
+ }
+}
+
+void Decoder::AppendVisitor(DecoderVisitor* new_visitor) {
+ visitors_.append(new_visitor);
+}
+
+
+void Decoder::PrependVisitor(DecoderVisitor* new_visitor) {
+ visitors_.insert(visitors_.begin(), new_visitor);
+}
+
+
+void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor) {
+ for (auto it = visitors_.begin(); it != visitors_.end(); it++) {
+ if (*it == registered_visitor) {
+ visitors_.insert(it, new_visitor);
+ return;
+ }
+ }
+ // We reached the end of the list without finding registered_visitor.
+ visitors_.append(new_visitor);
+}
+
+
+void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor) {
+ for (auto it = visitors_.begin(); it != visitors_.end(); it++) {
+ if (*it == registered_visitor) {
+ it++;
+ visitors_.insert(it, new_visitor);
+ return;
+ }
+ }
+ // We reached the end of the list without finding registered_visitor.
+ visitors_.append(new_visitor);
+}
+
+
+void Decoder::RemoveVisitor(DecoderVisitor* visitor) {
+ visitors_.erase(std::remove(visitors_.begin(), visitors_.end(), visitor),
+ visitors_.end());
+}
+
+
+void Decoder::DecodePCRelAddressing(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(27, 24) == 0x0);
+ // We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
+ // decode.
+ VIXL_ASSERT(instr->Bit(28) == 0x1);
+ VisitPCRelAddressing(instr);
+}
+
+
+void Decoder::DecodeBranchSystemException(const Instruction* instr) {
+ VIXL_ASSERT((instr->Bits(27, 24) == 0x4) ||
+ (instr->Bits(27, 24) == 0x5) ||
+ (instr->Bits(27, 24) == 0x6) ||
+ (instr->Bits(27, 24) == 0x7) );
+
+ switch (instr->Bits(31, 29)) {
+ case 0:
+ case 4: {
+ VisitUnconditionalBranch(instr);
+ break;
+ }
+ case 1:
+ case 5: {
+ if (instr->Bit(25) == 0) {
+ VisitCompareBranch(instr);
+ } else {
+ VisitTestBranch(instr);
+ }
+ break;
+ }
+ case 2: {
+ if (instr->Bit(25) == 0) {
+ if ((instr->Bit(24) == 0x1) ||
+ (instr->Mask(0x01000010) == 0x00000010)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitConditionalBranch(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ break;
+ }
+ case 6: {
+ if (instr->Bit(25) == 0) {
+ if (instr->Bit(24) == 0) {
+ if ((instr->Bits(4, 2) != 0) ||
+ (instr->Mask(0x00E0001D) == 0x00200001) ||
+ (instr->Mask(0x00E0001D) == 0x00400001) ||
+ (instr->Mask(0x00E0001E) == 0x00200002) ||
+ (instr->Mask(0x00E0001E) == 0x00400002) ||
+ (instr->Mask(0x00E0001C) == 0x00600000) ||
+ (instr->Mask(0x00E0001C) == 0x00800000) ||
+ (instr->Mask(0x00E0001F) == 0x00A00000) ||
+ (instr->Mask(0x00C0001C) == 0x00C00000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitException(instr);
+ }
+ } else {
+ if (instr->Bits(23, 22) == 0) {
+ const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0);
+ if ((instr->Bits(21, 19) == 0x4) ||
+ (masked_003FF0E0 == 0x00033000) ||
+ (masked_003FF0E0 == 0x003FF020) ||
+ (masked_003FF0E0 == 0x003FF060) ||
+ (masked_003FF0E0 == 0x003FF0E0) ||
+ (instr->Mask(0x00388000) == 0x00008000) ||
+ (instr->Mask(0x0038E000) == 0x00000000) ||
+ (instr->Mask(0x0039E000) == 0x00002000) ||
+ (instr->Mask(0x003AE000) == 0x00002000) ||
+ (instr->Mask(0x003CE000) == 0x00042000) ||
+ (instr->Mask(0x003FFFC0) == 0x000320C0) ||
+ (instr->Mask(0x003FF100) == 0x00032100) ||
+ (instr->Mask(0x003FF200) == 0x00032200) ||
+ (instr->Mask(0x003FF400) == 0x00032400) ||
+ (instr->Mask(0x003FF800) == 0x00032800) ||
+ (instr->Mask(0x0038F000) == 0x00005000) ||
+ (instr->Mask(0x0038E000) == 0x00006000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitSystem(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ if ((instr->Bit(24) == 0x1) ||
+ (instr->Bits(20, 16) != 0x1F) ||
+ (instr->Bits(15, 10) != 0) ||
+ (instr->Bits(4, 0) != 0) ||
+ (instr->Bits(24, 21) == 0x3) ||
+ (instr->Bits(24, 22) == 0x3)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitUnconditionalBranchToRegister(instr);
+ }
+ }
+ break;
+ }
+ case 3:
+ case 7: {
+ VisitUnallocated(instr);
+ break;
+ }
+ }
+}
+
+
+void Decoder::DecodeLoadStore(const Instruction* instr) {
+ VIXL_ASSERT((instr->Bits(27, 24) == 0x8) ||
+ (instr->Bits(27, 24) == 0x9) ||
+ (instr->Bits(27, 24) == 0xC) ||
+ (instr->Bits(27, 24) == 0xD) );
+ // TODO(all): rearrange the tree to integrate this branch.
+ if ((instr->Bit(28) == 0) && (instr->Bit(29) == 0) && (instr->Bit(26) == 1)) {
+ DecodeNEONLoadStore(instr);
+ return;
+ }
+
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(26) == 0) {
+ VisitLoadStoreExclusive(instr);
+ } else {
+ VIXL_UNREACHABLE();
+ }
+ } else {
+ if ((instr->Bits(31, 30) == 0x3) ||
+ (instr->Mask(0xC4400000) == 0x40000000)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ if (instr->Mask(0xC4400000) == 0xC0400000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStorePairNonTemporal(instr);
+ }
+ } else {
+ VisitLoadStorePairPostIndex(instr);
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(29) == 0) {
+ if (instr->Mask(0xC4000000) == 0xC4000000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadLiteral(instr);
+ }
+ } else {
+ if ((instr->Mask(0x84C00000) == 0x80C00000) ||
+ (instr->Mask(0x44800000) == 0x44800000) ||
+ (instr->Mask(0x84800000) == 0x84800000)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(21) == 0) {
+ switch (instr->Bits(11, 10)) {
+ case 0: {
+ VisitLoadStoreUnscaledOffset(instr);
+ break;
+ }
+ case 1: {
+ if (instr->Mask(0xC4C00000) == 0xC0800000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStorePostIndex(instr);
+ }
+ break;
+ }
+ case 2: {
+ // TODO: VisitLoadStoreRegisterOffsetUnpriv.
+ VisitUnimplemented(instr);
+ break;
+ }
+ case 3: {
+ if (instr->Mask(0xC4C00000) == 0xC0800000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStorePreIndex(instr);
+ }
+ break;
+ }
+ }
+ } else {
+ if (instr->Bits(11, 10) == 0x2) {
+ if (instr->Bit(14) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStoreRegisterOffset(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(29) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ if ((instr->Bits(31, 30) == 0x3) ||
+ (instr->Mask(0xC4400000) == 0x40000000)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ VisitLoadStorePairOffset(instr);
+ } else {
+ VisitLoadStorePairPreIndex(instr);
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(29) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ if ((instr->Mask(0x84C00000) == 0x80C00000) ||
+ (instr->Mask(0x44800000) == 0x44800000) ||
+ (instr->Mask(0x84800000) == 0x84800000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStoreUnsignedOffset(instr);
+ }
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeLogical(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(27, 24) == 0x2);
+
+ if (instr->Mask(0x80400000) == 0x00400000) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ VisitLogicalImmediate(instr);
+ } else {
+ if (instr->Bits(30, 29) == 0x1) {
+ VisitUnallocated(instr);
+ } else {
+ VisitMoveWideImmediate(instr);
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeBitfieldExtract(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(27, 24) == 0x3);
+
+ if ((instr->Mask(0x80400000) == 0x80000000) ||
+ (instr->Mask(0x80400000) == 0x00400000) ||
+ (instr->Mask(0x80008000) == 0x00008000)) {
+ VisitUnallocated(instr);
+ } else if (instr->Bit(23) == 0) {
+ if ((instr->Mask(0x80200000) == 0x00200000) ||
+ (instr->Mask(0x60000000) == 0x60000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitBitfield(instr);
+ }
+ } else {
+ if ((instr->Mask(0x60200000) == 0x00200000) ||
+ (instr->Mask(0x60000000) != 0x00000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitExtract(instr);
+ }
+ }
+}
+
+
+void Decoder::DecodeAddSubImmediate(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(27, 24) == 0x1);
+ if (instr->Bit(23) == 1) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubImmediate(instr);
+ }
+}
+
+
+void Decoder::DecodeDataProcessing(const Instruction* instr) {
+ VIXL_ASSERT((instr->Bits(27, 24) == 0xA) ||
+ (instr->Bits(27, 24) == 0xB));
+
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(28) == 0) {
+ if (instr->Mask(0x80008000) == 0x00008000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLogicalShifted(instr);
+ }
+ } else {
+ switch (instr->Bits(23, 21)) {
+ case 0: {
+ if (instr->Mask(0x0000FC00) != 0) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubWithCarry(instr);
+ }
+ break;
+ }
+ case 2: {
+ if ((instr->Bit(29) == 0) ||
+ (instr->Mask(0x00000410) != 0)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(11) == 0) {
+ VisitConditionalCompareRegister(instr);
+ } else {
+ VisitConditionalCompareImmediate(instr);
+ }
+ }
+ break;
+ }
+ case 4: {
+ if (instr->Mask(0x20000800) != 0x00000000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitConditionalSelect(instr);
+ }
+ break;
+ }
+ case 6: {
+ if (instr->Bit(29) == 0x1) {
+ VisitUnallocated(instr);
+ VIXL_FALLTHROUGH();
+ } else {
+ if (instr->Bit(30) == 0) {
+ if ((instr->Bit(15) == 0x1) ||
+ (instr->Bits(15, 11) == 0) ||
+ (instr->Bits(15, 12) == 0x1) ||
+ (instr->Bits(15, 12) == 0x3) ||
+ (instr->Bits(15, 13) == 0x3) ||
+ (instr->Mask(0x8000EC00) == 0x00004C00) ||
+ (instr->Mask(0x8000E800) == 0x80004000) ||
+ (instr->Mask(0x8000E400) == 0x80004000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitDataProcessing2Source(instr);
+ }
+ } else {
+ if ((instr->Bit(13) == 1) ||
+ (instr->Bits(20, 16) != 0) ||
+ (instr->Bits(15, 14) != 0) ||
+ (instr->Mask(0xA01FFC00) == 0x00000C00) ||
+ (instr->Mask(0x201FF800) == 0x00001800)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitDataProcessing1Source(instr);
+ }
+ }
+ break;
+ }
+ }
+ case 1:
+ case 3:
+ case 5:
+ case 7: VisitUnallocated(instr); break;
+ }
+ }
+ } else {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(21) == 0) {
+ if ((instr->Bits(23, 22) == 0x3) ||
+ (instr->Mask(0x80008000) == 0x00008000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubShifted(instr);
+ }
+ } else {
+ if ((instr->Mask(0x00C00000) != 0x00000000) ||
+ (instr->Mask(0x00001400) == 0x00001400) ||
+ (instr->Mask(0x00001800) == 0x00001800)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubExtended(instr);
+ }
+ }
+ } else {
+ if ((instr->Bit(30) == 0x1) ||
+ (instr->Bits(30, 29) == 0x1) ||
+ (instr->Mask(0xE0600000) == 0x00200000) ||
+ (instr->Mask(0xE0608000) == 0x00400000) ||
+ (instr->Mask(0x60608000) == 0x00408000) ||
+ (instr->Mask(0x60E00000) == 0x00E00000) ||
+ (instr->Mask(0x60E00000) == 0x00800000) ||
+ (instr->Mask(0x60E00000) == 0x00600000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitDataProcessing3Source(instr);
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeFP(const Instruction* instr) {
+ VIXL_ASSERT((instr->Bits(27, 24) == 0xE) ||
+ (instr->Bits(27, 24) == 0xF));
+ if (instr->Bit(28) == 0) {
+ DecodeNEONVectorDataProcessing(instr);
+ } else {
+ if (instr->Bits(31, 30) == 0x3) {
+ VisitUnallocated(instr);
+ } else if (instr->Bits(31, 30) == 0x1) {
+ DecodeNEONScalarDataProcessing(instr);
+ } else {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(21) == 0) {
+ if ((instr->Bit(23) == 1) ||
+ (instr->Bit(18) == 1) ||
+ (instr->Mask(0x80008000) == 0x00000000) ||
+ (instr->Mask(0x000E0000) == 0x00000000) ||
+ (instr->Mask(0x000E0000) == 0x000A0000) ||
+ (instr->Mask(0x00160000) == 0x00000000) ||
+ (instr->Mask(0x00160000) == 0x00120000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPFixedPointConvert(instr);
+ }
+ } else {
+ if (instr->Bits(15, 10) == 32) {
+ VisitUnallocated(instr);
+ } else if (instr->Bits(15, 10) == 0) {
+ if ((instr->Bits(23, 22) == 0x3) ||
+ (instr->Mask(0x000E0000) == 0x000A0000) ||
+ (instr->Mask(0x000E0000) == 0x000C0000) ||
+ (instr->Mask(0x00160000) == 0x00120000) ||
+ (instr->Mask(0x00160000) == 0x00140000) ||
+ (instr->Mask(0x20C40000) == 0x00800000) ||
+ (instr->Mask(0x20C60000) == 0x00840000) ||
+ (instr->Mask(0xA0C60000) == 0x80060000) ||
+ (instr->Mask(0xA0C60000) == 0x00860000) ||
+ (instr->Mask(0xA0C60000) == 0x00460000) ||
+ (instr->Mask(0xA0CE0000) == 0x80860000) ||
+ (instr->Mask(0xA0CE0000) == 0x804E0000) ||
+ (instr->Mask(0xA0CE0000) == 0x000E0000) ||
+ (instr->Mask(0xA0D60000) == 0x00160000) ||
+ (instr->Mask(0xA0D60000) == 0x80560000) ||
+ (instr->Mask(0xA0D60000) == 0x80960000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPIntegerConvert(instr);
+ }
+ } else if (instr->Bits(14, 10) == 16) {
+ const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000);
+ if ((instr->Mask(0x80180000) != 0) ||
+ (masked_A0DF8000 == 0x00020000) ||
+ (masked_A0DF8000 == 0x00030000) ||
+ (masked_A0DF8000 == 0x00068000) ||
+ (masked_A0DF8000 == 0x00428000) ||
+ (masked_A0DF8000 == 0x00430000) ||
+ (masked_A0DF8000 == 0x00468000) ||
+ (instr->Mask(0xA0D80000) == 0x00800000) ||
+ (instr->Mask(0xA0DE0000) == 0x00C00000) ||
+ (instr->Mask(0xA0DF0000) == 0x00C30000) ||
+ (instr->Mask(0xA0DC0000) == 0x00C40000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPDataProcessing1Source(instr);
+ }
+ } else if (instr->Bits(13, 10) == 8) {
+ if ((instr->Bits(15, 14) != 0) ||
+ (instr->Bits(2, 0) != 0) ||
+ (instr->Mask(0x80800000) != 0x00000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPCompare(instr);
+ }
+ } else if (instr->Bits(12, 10) == 4) {
+ if ((instr->Bits(9, 5) != 0) ||
+ (instr->Mask(0x80800000) != 0x00000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPImmediate(instr);
+ }
+ } else {
+ if (instr->Mask(0x80800000) != 0x00000000) {
+ VisitUnallocated(instr);
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 1: {
+ VisitFPConditionalCompare(instr);
+ break;
+ }
+ case 2: {
+ if ((instr->Bits(15, 14) == 0x3) ||
+ (instr->Mask(0x00009000) == 0x00009000) ||
+ (instr->Mask(0x0000A000) == 0x0000A000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPDataProcessing2Source(instr);
+ }
+ break;
+ }
+ case 3: {
+ VisitFPConditionalSelect(instr);
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ }
+ }
+ }
+ } else {
+ // Bit 30 == 1 has been handled earlier.
+ VIXL_ASSERT(instr->Bit(30) == 0);
+ if (instr->Mask(0xA0800000) != 0) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPDataProcessing3Source(instr);
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeNEONLoadStore(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(29, 25) == 0x6);
+ if (instr->Bit(31) == 0) {
+ if ((instr->Bit(24) == 0) && (instr->Bit(21) == 1)) {
+ VisitUnallocated(instr);
+ return;
+ }
+
+ if (instr->Bit(23) == 0) {
+ if (instr->Bits(20, 16) == 0) {
+ if (instr->Bit(24) == 0) {
+ VisitNEONLoadStoreMultiStruct(instr);
+ } else {
+ VisitNEONLoadStoreSingleStruct(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ } else {
+ if (instr->Bit(24) == 0) {
+ VisitNEONLoadStoreMultiStructPostIndex(instr);
+ } else {
+ VisitNEONLoadStoreSingleStructPostIndex(instr);
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+}
+
+
+void Decoder::DecodeNEONVectorDataProcessing(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(28, 25) == 0x7);
+ if (instr->Bit(31) == 0) {
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(21) == 0) {
+ if (instr->Bit(15) == 0) {
+ if (instr->Bit(10) == 0) {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(11) == 0) {
+ VisitNEONTable(instr);
+ } else {
+ VisitNEONPerm(instr);
+ }
+ } else {
+ VisitNEONExtract(instr);
+ }
+ } else {
+ if (instr->Bits(23, 22) == 0) {
+ VisitNEONCopy(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ } else {
+ if (instr->Bit(10) == 0) {
+ if (instr->Bit(11) == 0) {
+ VisitNEON3Different(instr);
+ } else {
+ if (instr->Bits(18, 17) == 0) {
+ if (instr->Bit(20) == 0) {
+ if (instr->Bit(19) == 0) {
+ VisitNEON2RegMisc(instr);
+ } else {
+ if (instr->Bits(30, 29) == 0x2) {
+ VisitCryptoAES(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ if (instr->Bit(19) == 0) {
+ VisitNEONAcrossLanes(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ VisitNEON3Same(instr);
+ }
+ }
+ } else {
+ if (instr->Bit(10) == 0) {
+ VisitNEONByIndexedElement(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ if (instr->Bits(22, 19) == 0) {
+ VisitNEONModifiedImmediate(instr);
+ } else {
+ VisitNEONShiftImmediate(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+}
+
+
+void Decoder::DecodeNEONScalarDataProcessing(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(28, 25) == 0xF);
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(21) == 0) {
+ if (instr->Bit(15) == 0) {
+ if (instr->Bit(10) == 0) {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(11) == 0) {
+ VisitCrypto3RegSHA(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ } else {
+ if (instr->Bits(23, 22) == 0) {
+ VisitNEONScalarCopy(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ } else {
+ if (instr->Bit(10) == 0) {
+ if (instr->Bit(11) == 0) {
+ VisitNEONScalar3Diff(instr);
+ } else {
+ if (instr->Bits(18, 17) == 0) {
+ if (instr->Bit(20) == 0) {
+ if (instr->Bit(19) == 0) {
+ VisitNEONScalar2RegMisc(instr);
+ } else {
+ if (instr->Bit(29) == 0) {
+ VisitCrypto2RegSHA(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ if (instr->Bit(19) == 0) {
+ VisitNEONScalarPairwise(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ VisitNEONScalar3Same(instr);
+ }
+ }
+ } else {
+ if (instr->Bit(10) == 0) {
+ VisitNEONScalarByIndexedElement(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ VisitNEONScalarShiftImmediate(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ }
+}
+
+
+#define DEFINE_VISITOR_CALLERS(A) \
+ void Decoder::Visit##A(const Instruction *instr) { \
+ VIXL_ASSERT(instr->Mask(A##FMask) == A##Fixed); \
+ for (auto visitor : visitors_) { \
+ visitor->Visit##A(instr); \
+ } \
+ }
+VISITOR_LIST(DEFINE_VISITOR_CALLERS)
+#undef DEFINE_VISITOR_CALLERS
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Decoder-vixl.h b/js/src/jit/arm64/vixl/Decoder-vixl.h
new file mode 100644
index 000000000..95dd589e8
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Decoder-vixl.h
@@ -0,0 +1,274 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_DECODER_A64_H_
+#define VIXL_A64_DECODER_A64_H_
+
+#include "mozilla/Vector.h"
+
+#include "jsalloc.h"
+
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Instructions-vixl.h"
+
+
+// List macro containing all visitors needed by the decoder class.
+
+#define VISITOR_LIST_THAT_RETURN(V) \
+ V(PCRelAddressing) \
+ V(AddSubImmediate) \
+ V(LogicalImmediate) \
+ V(MoveWideImmediate) \
+ V(Bitfield) \
+ V(Extract) \
+ V(UnconditionalBranch) \
+ V(UnconditionalBranchToRegister) \
+ V(CompareBranch) \
+ V(TestBranch) \
+ V(ConditionalBranch) \
+ V(System) \
+ V(Exception) \
+ V(LoadStorePairPostIndex) \
+ V(LoadStorePairOffset) \
+ V(LoadStorePairPreIndex) \
+ V(LoadStorePairNonTemporal) \
+ V(LoadLiteral) \
+ V(LoadStoreUnscaledOffset) \
+ V(LoadStorePostIndex) \
+ V(LoadStorePreIndex) \
+ V(LoadStoreRegisterOffset) \
+ V(LoadStoreUnsignedOffset) \
+ V(LoadStoreExclusive) \
+ V(LogicalShifted) \
+ V(AddSubShifted) \
+ V(AddSubExtended) \
+ V(AddSubWithCarry) \
+ V(ConditionalCompareRegister) \
+ V(ConditionalCompareImmediate) \
+ V(ConditionalSelect) \
+ V(DataProcessing1Source) \
+ V(DataProcessing2Source) \
+ V(DataProcessing3Source) \
+ V(FPCompare) \
+ V(FPConditionalCompare) \
+ V(FPConditionalSelect) \
+ V(FPImmediate) \
+ V(FPDataProcessing1Source) \
+ V(FPDataProcessing2Source) \
+ V(FPDataProcessing3Source) \
+ V(FPIntegerConvert) \
+ V(FPFixedPointConvert) \
+ V(Crypto2RegSHA) \
+ V(Crypto3RegSHA) \
+ V(CryptoAES) \
+ V(NEON2RegMisc) \
+ V(NEON3Different) \
+ V(NEON3Same) \
+ V(NEONAcrossLanes) \
+ V(NEONByIndexedElement) \
+ V(NEONCopy) \
+ V(NEONExtract) \
+ V(NEONLoadStoreMultiStruct) \
+ V(NEONLoadStoreMultiStructPostIndex) \
+ V(NEONLoadStoreSingleStruct) \
+ V(NEONLoadStoreSingleStructPostIndex) \
+ V(NEONModifiedImmediate) \
+ V(NEONScalar2RegMisc) \
+ V(NEONScalar3Diff) \
+ V(NEONScalar3Same) \
+ V(NEONScalarByIndexedElement) \
+ V(NEONScalarCopy) \
+ V(NEONScalarPairwise) \
+ V(NEONScalarShiftImmediate) \
+ V(NEONShiftImmediate) \
+ V(NEONTable) \
+ V(NEONPerm) \
+
+#define VISITOR_LIST_THAT_DONT_RETURN(V) \
+ V(Unallocated) \
+ V(Unimplemented) \
+
+#define VISITOR_LIST(V) \
+ VISITOR_LIST_THAT_RETURN(V) \
+ VISITOR_LIST_THAT_DONT_RETURN(V) \
+
+namespace vixl {
+
+// The Visitor interface. Disassembler and simulator (and other tools)
+// must provide implementations for all of these functions.
+class DecoderVisitor {
+ public:
+ enum VisitorConstness {
+ kConstVisitor,
+ kNonConstVisitor
+ };
+ explicit DecoderVisitor(VisitorConstness constness = kConstVisitor)
+ : constness_(constness) {}
+
+ virtual ~DecoderVisitor() {}
+
+ #define DECLARE(A) virtual void Visit##A(const Instruction* instr) = 0;
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ bool IsConstVisitor() const { return constness_ == kConstVisitor; }
+ Instruction* MutableInstruction(const Instruction* instr) {
+ VIXL_ASSERT(!IsConstVisitor());
+ return const_cast<Instruction*>(instr);
+ }
+
+ private:
+ const VisitorConstness constness_;
+};
+
+
+class Decoder {
+ public:
+ Decoder() {}
+
+ // Top-level wrappers around the actual decoding function.
+ void Decode(const Instruction* instr) {
+ for (auto visitor : visitors_) {
+ VIXL_ASSERT(visitor->IsConstVisitor());
+ }
+ DecodeInstruction(instr);
+ }
+ void Decode(Instruction* instr) {
+ DecodeInstruction(const_cast<const Instruction*>(instr));
+ }
+
+ // Register a new visitor class with the decoder.
+ // Decode() will call the corresponding visitor method from all registered
+ // visitor classes when decoding reaches the leaf node of the instruction
+ // decode tree.
+ // Visitors are called in order.
+ // A visitor can be registered multiple times.
+ //
+ // d.AppendVisitor(V1);
+ // d.AppendVisitor(V2);
+ // d.PrependVisitor(V2);
+ // d.AppendVisitor(V3);
+ //
+ // d.Decode(i);
+ //
+ // will call in order visitor methods in V2, V1, V2, V3.
+ void AppendVisitor(DecoderVisitor* visitor);
+ void PrependVisitor(DecoderVisitor* visitor);
+ // These helpers register `new_visitor` before or after the first instance of
+ // `registered_visiter` in the list.
+ // So if
+ // V1, V2, V1, V2
+ // are registered in this order in the decoder, calls to
+ // d.InsertVisitorAfter(V3, V1);
+ // d.InsertVisitorBefore(V4, V2);
+ // will yield the order
+ // V1, V3, V4, V2, V1, V2
+ //
+ // For more complex modifications of the order of registered visitors, one can
+ // directly access and modify the list of visitors via the `visitors()'
+ // accessor.
+ void InsertVisitorBefore(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor);
+ void InsertVisitorAfter(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor);
+
+ // Remove all instances of a previously registered visitor class from the list
+ // of visitors stored by the decoder.
+ void RemoveVisitor(DecoderVisitor* visitor);
+
+ #define DECLARE(A) void Visit##A(const Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+
+ private:
+ // Decodes an instruction and calls the visitor functions registered with the
+ // Decoder class.
+ void DecodeInstruction(const Instruction* instr);
+
+ // Decode the PC relative addressing instruction, and call the corresponding
+ // visitors.
+ // On entry, instruction bits 27:24 = 0x0.
+ void DecodePCRelAddressing(const Instruction* instr);
+
+ // Decode the add/subtract immediate instruction, and call the correspoding
+ // visitors.
+ // On entry, instruction bits 27:24 = 0x1.
+ void DecodeAddSubImmediate(const Instruction* instr);
+
+ // Decode the branch, system command, and exception generation parts of
+ // the instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
+ void DecodeBranchSystemException(const Instruction* instr);
+
+ // Decode the load and store parts of the instruction tree, and call
+ // the corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
+ void DecodeLoadStore(const Instruction* instr);
+
+ // Decode the logical immediate and move wide immediate parts of the
+ // instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = 0x2.
+ void DecodeLogical(const Instruction* instr);
+
+ // Decode the bitfield and extraction parts of the instruction tree,
+ // and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = 0x3.
+ void DecodeBitfieldExtract(const Instruction* instr);
+
+ // Decode the data processing parts of the instruction tree, and call the
+ // corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
+ void DecodeDataProcessing(const Instruction* instr);
+
+ // Decode the floating point parts of the instruction tree, and call the
+ // corresponding visitors.
+ // On entry, instruction bits 27:24 = {0xE, 0xF}.
+ void DecodeFP(const Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
+ // and call the corresponding visitors.
+ // On entry, instruction bits 29:25 = 0x6.
+ void DecodeNEONLoadStore(const Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) vector data processing part of the
+ // instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 28:25 = 0x7.
+ void DecodeNEONVectorDataProcessing(const Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) scalar data processing part of the
+ // instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 28:25 = 0xF.
+ void DecodeNEONScalarDataProcessing(const Instruction* instr);
+
+ private:
+ // Visitors are registered in a list.
+ mozilla::Vector<DecoderVisitor*, 8, js::SystemAllocPolicy> visitors_;
+};
+
+} // namespace vixl
+
+#endif // VIXL_A64_DECODER_A64_H_
diff --git a/js/src/jit/arm64/vixl/Disasm-vixl.cpp b/js/src/jit/arm64/vixl/Disasm-vixl.cpp
new file mode 100644
index 000000000..365ef597b
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Disasm-vixl.cpp
@@ -0,0 +1,3488 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Disasm-vixl.h"
+
+#include <cstdlib>
+
+namespace vixl {
+
+Disassembler::Disassembler() {
+ buffer_size_ = 256;
+ buffer_ = reinterpret_cast<char*>(malloc(buffer_size_));
+ buffer_pos_ = 0;
+ own_buffer_ = true;
+ code_address_offset_ = 0;
+}
+
+
+Disassembler::Disassembler(char* text_buffer, int buffer_size) {
+ buffer_size_ = buffer_size;
+ buffer_ = text_buffer;
+ buffer_pos_ = 0;
+ own_buffer_ = false;
+ code_address_offset_ = 0;
+}
+
+
+Disassembler::~Disassembler() {
+ if (own_buffer_) {
+ free(buffer_);
+ }
+}
+
+
+char* Disassembler::GetOutput() {
+ return buffer_;
+}
+
+
+void Disassembler::VisitAddSubImmediate(const Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool stack_op = (rd_is_zr || RnIsZROrSP(instr)) &&
+ (instr->ImmAddSub() == 0) ? true : false;
+ const char *mnemonic = "";
+ const char *form = "'Rds, 'Rns, 'IAddSub";
+ const char *form_cmp = "'Rns, 'IAddSub";
+ const char *form_mov = "'Rds, 'Rns";
+
+ switch (instr->Mask(AddSubImmediateMask)) {
+ case ADD_w_imm:
+ case ADD_x_imm: {
+ mnemonic = "add";
+ if (stack_op) {
+ mnemonic = "mov";
+ form = form_mov;
+ }
+ break;
+ }
+ case ADDS_w_imm:
+ case ADDS_x_imm: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_imm:
+ case SUB_x_imm: mnemonic = "sub"; break;
+ case SUBS_w_imm:
+ case SUBS_x_imm: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubShifted(const Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm'NDP";
+ const char *form_cmp = "'Rn, 'Rm'NDP";
+ const char *form_neg = "'Rd, 'Rm'NDP";
+
+ switch (instr->Mask(AddSubShiftedMask)) {
+ case ADD_w_shift:
+ case ADD_x_shift: mnemonic = "add"; break;
+ case ADDS_w_shift:
+ case ADDS_x_shift: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_shift:
+ case SUB_x_shift: {
+ mnemonic = "sub";
+ if (rn_is_zr) {
+ mnemonic = "neg";
+ form = form_neg;
+ }
+ break;
+ }
+ case SUBS_w_shift:
+ case SUBS_x_shift: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ } else if (rn_is_zr) {
+ mnemonic = "negs";
+ form = form_neg;
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubExtended(const Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ const char *mnemonic = "";
+ Extend mode = static_cast<Extend>(instr->ExtendMode());
+ const char *form = ((mode == UXTX) || (mode == SXTX)) ?
+ "'Rds, 'Rns, 'Xm'Ext" : "'Rds, 'Rns, 'Wm'Ext";
+ const char *form_cmp = ((mode == UXTX) || (mode == SXTX)) ?
+ "'Rns, 'Xm'Ext" : "'Rns, 'Wm'Ext";
+
+ switch (instr->Mask(AddSubExtendedMask)) {
+ case ADD_w_ext:
+ case ADD_x_ext: mnemonic = "add"; break;
+ case ADDS_w_ext:
+ case ADDS_x_ext: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_ext:
+ case SUB_x_ext: mnemonic = "sub"; break;
+ case SUBS_w_ext:
+ case SUBS_x_ext: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubWithCarry(const Instruction* instr) {
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm";
+ const char *form_neg = "'Rd, 'Rm";
+
+ switch (instr->Mask(AddSubWithCarryMask)) {
+ case ADC_w:
+ case ADC_x: mnemonic = "adc"; break;
+ case ADCS_w:
+ case ADCS_x: mnemonic = "adcs"; break;
+ case SBC_w:
+ case SBC_x: {
+ mnemonic = "sbc";
+ if (rn_is_zr) {
+ mnemonic = "ngc";
+ form = form_neg;
+ }
+ break;
+ }
+ case SBCS_w:
+ case SBCS_x: {
+ mnemonic = "sbcs";
+ if (rn_is_zr) {
+ mnemonic = "ngcs";
+ form = form_neg;
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLogicalImmediate(const Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rds, 'Rn, 'ITri";
+
+ if (instr->ImmLogical() == 0) {
+ // The immediate encoded in the instruction is not in the expected format.
+ Format(instr, "unallocated", "(LogicalImmediate)");
+ return;
+ }
+
+ switch (instr->Mask(LogicalImmediateMask)) {
+ case AND_w_imm:
+ case AND_x_imm: mnemonic = "and"; break;
+ case ORR_w_imm:
+ case ORR_x_imm: {
+ mnemonic = "orr";
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize
+ : kWRegSize;
+ if (rn_is_zr && !IsMovzMovnImm(reg_size, instr->ImmLogical())) {
+ mnemonic = "mov";
+ form = "'Rds, 'ITri";
+ }
+ break;
+ }
+ case EOR_w_imm:
+ case EOR_x_imm: mnemonic = "eor"; break;
+ case ANDS_w_imm:
+ case ANDS_x_imm: {
+ mnemonic = "ands";
+ if (rd_is_zr) {
+ mnemonic = "tst";
+ form = "'Rn, 'ITri";
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
+ VIXL_ASSERT((reg_size == kXRegSize) ||
+ ((reg_size == kWRegSize) && (value <= 0xffffffff)));
+
+ // Test for movz: 16 bits set at positions 0, 16, 32 or 48.
+ if (((value & UINT64_C(0xffffffffffff0000)) == 0) ||
+ ((value & UINT64_C(0xffffffff0000ffff)) == 0) ||
+ ((value & UINT64_C(0xffff0000ffffffff)) == 0) ||
+ ((value & UINT64_C(0x0000ffffffffffff)) == 0)) {
+ return true;
+ }
+
+ // Test for movn: NOT(16 bits set at positions 0, 16, 32 or 48).
+ if ((reg_size == kXRegSize) &&
+ (((~value & UINT64_C(0xffffffffffff0000)) == 0) ||
+ ((~value & UINT64_C(0xffffffff0000ffff)) == 0) ||
+ ((~value & UINT64_C(0xffff0000ffffffff)) == 0) ||
+ ((~value & UINT64_C(0x0000ffffffffffff)) == 0))) {
+ return true;
+ }
+ if ((reg_size == kWRegSize) &&
+ (((value & 0xffff0000) == 0xffff0000) ||
+ ((value & 0x0000ffff) == 0x0000ffff))) {
+ return true;
+ }
+ return false;
+}
+
+
+void Disassembler::VisitLogicalShifted(const Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm'NLo";
+
+ switch (instr->Mask(LogicalShiftedMask)) {
+ case AND_w:
+ case AND_x: mnemonic = "and"; break;
+ case BIC_w:
+ case BIC_x: mnemonic = "bic"; break;
+ case EOR_w:
+ case EOR_x: mnemonic = "eor"; break;
+ case EON_w:
+ case EON_x: mnemonic = "eon"; break;
+ case BICS_w:
+ case BICS_x: mnemonic = "bics"; break;
+ case ANDS_w:
+ case ANDS_x: {
+ mnemonic = "ands";
+ if (rd_is_zr) {
+ mnemonic = "tst";
+ form = "'Rn, 'Rm'NLo";
+ }
+ break;
+ }
+ case ORR_w:
+ case ORR_x: {
+ mnemonic = "orr";
+ if (rn_is_zr && (instr->ImmDPShift() == 0) && (instr->ShiftDP() == LSL)) {
+ mnemonic = "mov";
+ form = "'Rd, 'Rm";
+ }
+ break;
+ }
+ case ORN_w:
+ case ORN_x: {
+ mnemonic = "orn";
+ if (rn_is_zr) {
+ mnemonic = "mvn";
+ form = "'Rd, 'Rm'NLo";
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareRegister(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rn, 'Rm, 'INzcv, 'Cond";
+
+ switch (instr->Mask(ConditionalCompareRegisterMask)) {
+ case CCMN_w:
+ case CCMN_x: mnemonic = "ccmn"; break;
+ case CCMP_w:
+ case CCMP_x: mnemonic = "ccmp"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareImmediate(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rn, 'IP, 'INzcv, 'Cond";
+
+ switch (instr->Mask(ConditionalCompareImmediateMask)) {
+ case CCMN_w_imm:
+ case CCMN_x_imm: mnemonic = "ccmn"; break;
+ case CCMP_w_imm:
+ case CCMP_x_imm: mnemonic = "ccmp"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalSelect(const Instruction* instr) {
+ bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr));
+ bool rn_is_rm = (instr->Rn() == instr->Rm());
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm, 'Cond";
+ const char *form_test = "'Rd, 'CInv";
+ const char *form_update = "'Rd, 'Rn, 'CInv";
+
+ Condition cond = static_cast<Condition>(instr->Condition());
+ bool invertible_cond = (cond != al) && (cond != nv);
+
+ switch (instr->Mask(ConditionalSelectMask)) {
+ case CSEL_w:
+ case CSEL_x: mnemonic = "csel"; break;
+ case CSINC_w:
+ case CSINC_x: {
+ mnemonic = "csinc";
+ if (rnm_is_zr && invertible_cond) {
+ mnemonic = "cset";
+ form = form_test;
+ } else if (rn_is_rm && invertible_cond) {
+ mnemonic = "cinc";
+ form = form_update;
+ }
+ break;
+ }
+ case CSINV_w:
+ case CSINV_x: {
+ mnemonic = "csinv";
+ if (rnm_is_zr && invertible_cond) {
+ mnemonic = "csetm";
+ form = form_test;
+ } else if (rn_is_rm && invertible_cond) {
+ mnemonic = "cinv";
+ form = form_update;
+ }
+ break;
+ }
+ case CSNEG_w:
+ case CSNEG_x: {
+ mnemonic = "csneg";
+ if (rn_is_rm && invertible_cond) {
+ mnemonic = "cneg";
+ form = form_update;
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitBitfield(const Instruction* instr) {
+ unsigned s = instr->ImmS();
+ unsigned r = instr->ImmR();
+ unsigned rd_size_minus_1 =
+ ((instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize) - 1;
+ const char *mnemonic = "";
+ const char *form = "";
+ const char *form_shift_right = "'Rd, 'Rn, 'IBr";
+ const char *form_extend = "'Rd, 'Wn";
+ const char *form_bfiz = "'Rd, 'Rn, 'IBZ-r, 'IBs+1";
+ const char *form_bfx = "'Rd, 'Rn, 'IBr, 'IBs-r+1";
+ const char *form_lsl = "'Rd, 'Rn, 'IBZ-r";
+
+ switch (instr->Mask(BitfieldMask)) {
+ case SBFM_w:
+ case SBFM_x: {
+ mnemonic = "sbfx";
+ form = form_bfx;
+ if (r == 0) {
+ form = form_extend;
+ if (s == 7) {
+ mnemonic = "sxtb";
+ } else if (s == 15) {
+ mnemonic = "sxth";
+ } else if ((s == 31) && (instr->SixtyFourBits() == 1)) {
+ mnemonic = "sxtw";
+ } else {
+ form = form_bfx;
+ }
+ } else if (s == rd_size_minus_1) {
+ mnemonic = "asr";
+ form = form_shift_right;
+ } else if (s < r) {
+ mnemonic = "sbfiz";
+ form = form_bfiz;
+ }
+ break;
+ }
+ case UBFM_w:
+ case UBFM_x: {
+ mnemonic = "ubfx";
+ form = form_bfx;
+ if (r == 0) {
+ form = form_extend;
+ if (s == 7) {
+ mnemonic = "uxtb";
+ } else if (s == 15) {
+ mnemonic = "uxth";
+ } else {
+ form = form_bfx;
+ }
+ }
+ if (s == rd_size_minus_1) {
+ mnemonic = "lsr";
+ form = form_shift_right;
+ } else if (r == s + 1) {
+ mnemonic = "lsl";
+ form = form_lsl;
+ } else if (s < r) {
+ mnemonic = "ubfiz";
+ form = form_bfiz;
+ }
+ break;
+ }
+ case BFM_w:
+ case BFM_x: {
+ mnemonic = "bfxil";
+ form = form_bfx;
+ if (s < r) {
+ mnemonic = "bfi";
+ form = form_bfiz;
+ }
+ }
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitExtract(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm, 'IExtract";
+
+ switch (instr->Mask(ExtractMask)) {
+ case EXTR_w:
+ case EXTR_x: {
+ if (instr->Rn() == instr->Rm()) {
+ mnemonic = "ror";
+ form = "'Rd, 'Rn, 'IExtract";
+ } else {
+ mnemonic = "extr";
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitPCRelAddressing(const Instruction* instr) {
+ switch (instr->Mask(PCRelAddressingMask)) {
+ case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break;
+ case ADRP: Format(instr, "adrp", "'Xd, 'AddrPCRelPage"); break;
+ default: Format(instr, "unimplemented", "(PCRelAddressing)");
+ }
+}
+
+
+void Disassembler::VisitConditionalBranch(const Instruction* instr) {
+ switch (instr->Mask(ConditionalBranchMask)) {
+ case B_cond: Format(instr, "b.'CBrn", "'TImmCond"); break;
+ default: VIXL_UNREACHABLE();
+ }
+}
+
+
+void Disassembler::VisitUnconditionalBranchToRegister(
+ const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Xn";
+
+ switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
+ case BR: mnemonic = "br"; break;
+ case BLR: mnemonic = "blr"; break;
+ case RET: {
+ mnemonic = "ret";
+ if (instr->Rn() == kLinkRegCode) {
+ form = NULL;
+ }
+ break;
+ }
+ default: form = "(UnconditionalBranchToRegister)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitUnconditionalBranch(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'TImmUncn";
+
+ switch (instr->Mask(UnconditionalBranchMask)) {
+ case B: mnemonic = "b"; break;
+ case BL: mnemonic = "bl"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing1Source(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn";
+
+ switch (instr->Mask(DataProcessing1SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: mnemonic = B; break;
+ FORMAT(RBIT, "rbit");
+ FORMAT(REV16, "rev16");
+ FORMAT(REV, "rev");
+ FORMAT(CLZ, "clz");
+ FORMAT(CLS, "cls");
+ #undef FORMAT
+ case REV32_x: mnemonic = "rev32"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing2Source(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Rd, 'Rn, 'Rm";
+ const char *form_wwx = "'Wd, 'Wn, 'Xm";
+
+ switch (instr->Mask(DataProcessing2SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: mnemonic = B; break;
+ FORMAT(UDIV, "udiv");
+ FORMAT(SDIV, "sdiv");
+ FORMAT(LSLV, "lsl");
+ FORMAT(LSRV, "lsr");
+ FORMAT(ASRV, "asr");
+ FORMAT(RORV, "ror");
+ #undef FORMAT
+ case CRC32B: mnemonic = "crc32b"; break;
+ case CRC32H: mnemonic = "crc32h"; break;
+ case CRC32W: mnemonic = "crc32w"; break;
+ case CRC32X: mnemonic = "crc32x"; form = form_wwx; break;
+ case CRC32CB: mnemonic = "crc32cb"; break;
+ case CRC32CH: mnemonic = "crc32ch"; break;
+ case CRC32CW: mnemonic = "crc32cw"; break;
+ case CRC32CX: mnemonic = "crc32cx"; form = form_wwx; break;
+ default: form = "(DataProcessing2Source)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing3Source(const Instruction* instr) {
+ bool ra_is_zr = RaIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Xd, 'Wn, 'Wm, 'Xa";
+ const char *form_rrr = "'Rd, 'Rn, 'Rm";
+ const char *form_rrrr = "'Rd, 'Rn, 'Rm, 'Ra";
+ const char *form_xww = "'Xd, 'Wn, 'Wm";
+ const char *form_xxx = "'Xd, 'Xn, 'Xm";
+
+ switch (instr->Mask(DataProcessing3SourceMask)) {
+ case MADD_w:
+ case MADD_x: {
+ mnemonic = "madd";
+ form = form_rrrr;
+ if (ra_is_zr) {
+ mnemonic = "mul";
+ form = form_rrr;
+ }
+ break;
+ }
+ case MSUB_w:
+ case MSUB_x: {
+ mnemonic = "msub";
+ form = form_rrrr;
+ if (ra_is_zr) {
+ mnemonic = "mneg";
+ form = form_rrr;
+ }
+ break;
+ }
+ case SMADDL_x: {
+ mnemonic = "smaddl";
+ if (ra_is_zr) {
+ mnemonic = "smull";
+ form = form_xww;
+ }
+ break;
+ }
+ case SMSUBL_x: {
+ mnemonic = "smsubl";
+ if (ra_is_zr) {
+ mnemonic = "smnegl";
+ form = form_xww;
+ }
+ break;
+ }
+ case UMADDL_x: {
+ mnemonic = "umaddl";
+ if (ra_is_zr) {
+ mnemonic = "umull";
+ form = form_xww;
+ }
+ break;
+ }
+ case UMSUBL_x: {
+ mnemonic = "umsubl";
+ if (ra_is_zr) {
+ mnemonic = "umnegl";
+ form = form_xww;
+ }
+ break;
+ }
+ case SMULH_x: {
+ mnemonic = "smulh";
+ form = form_xxx;
+ break;
+ }
+ case UMULH_x: {
+ mnemonic = "umulh";
+ form = form_xxx;
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitCompareBranch(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rt, 'TImmCmpa";
+
+ switch (instr->Mask(CompareBranchMask)) {
+ case CBZ_w:
+ case CBZ_x: mnemonic = "cbz"; break;
+ case CBNZ_w:
+ case CBNZ_x: mnemonic = "cbnz"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitTestBranch(const Instruction* instr) {
+ const char *mnemonic = "";
+ // If the top bit of the immediate is clear, the tested register is
+ // disassembled as Wt, otherwise Xt. As the top bit of the immediate is
+ // encoded in bit 31 of the instruction, we can reuse the Rt form, which
+ // uses bit 31 (normally "sf") to choose the register size.
+ const char *form = "'Rt, 'IS, 'TImmTest";
+
+ switch (instr->Mask(TestBranchMask)) {
+ case TBZ: mnemonic = "tbz"; break;
+ case TBNZ: mnemonic = "tbnz"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitMoveWideImmediate(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'IMoveImm";
+
+ // Print the shift separately for movk, to make it clear which half word will
+ // be overwritten. Movn and movz print the computed immediate, which includes
+ // shift calculation.
+ switch (instr->Mask(MoveWideImmediateMask)) {
+ case MOVN_w:
+ case MOVN_x:
+ if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0)) {
+ if ((instr->SixtyFourBits() == 0) && (instr->ImmMoveWide() == 0xffff)) {
+ mnemonic = "movn";
+ } else {
+ mnemonic = "mov";
+ form = "'Rd, 'IMoveNeg";
+ }
+ } else {
+ mnemonic = "movn";
+ }
+ break;
+ case MOVZ_w:
+ case MOVZ_x:
+ if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0))
+ mnemonic = "mov";
+ else
+ mnemonic = "movz";
+ break;
+ case MOVK_w:
+ case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_LIST(V) \
+ V(STRB_w, "strb", "'Wt") \
+ V(STRH_w, "strh", "'Wt") \
+ V(STR_w, "str", "'Wt") \
+ V(STR_x, "str", "'Xt") \
+ V(LDRB_w, "ldrb", "'Wt") \
+ V(LDRH_w, "ldrh", "'Wt") \
+ V(LDR_w, "ldr", "'Wt") \
+ V(LDR_x, "ldr", "'Xt") \
+ V(LDRSB_x, "ldrsb", "'Xt") \
+ V(LDRSH_x, "ldrsh", "'Xt") \
+ V(LDRSW_x, "ldrsw", "'Xt") \
+ V(LDRSB_w, "ldrsb", "'Wt") \
+ V(LDRSH_w, "ldrsh", "'Wt") \
+ V(STR_b, "str", "'Bt") \
+ V(STR_h, "str", "'Ht") \
+ V(STR_s, "str", "'St") \
+ V(STR_d, "str", "'Dt") \
+ V(LDR_b, "ldr", "'Bt") \
+ V(LDR_h, "ldr", "'Ht") \
+ V(LDR_s, "ldr", "'St") \
+ V(LDR_d, "ldr", "'Dt") \
+ V(STR_q, "str", "'Qt") \
+ V(LDR_q, "ldr", "'Qt")
+
+void Disassembler::VisitLoadStorePreIndex(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePreIndex)";
+
+ switch (instr->Mask(LoadStorePreIndexMask)) {
+ #define LS_PREINDEX(A, B, C) \
+ case A##_pre: mnemonic = B; form = C ", ['Xns'ILS]!"; break;
+ LOAD_STORE_LIST(LS_PREINDEX)
+ #undef LS_PREINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePostIndex(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePostIndex)";
+
+ switch (instr->Mask(LoadStorePostIndexMask)) {
+ #define LS_POSTINDEX(A, B, C) \
+ case A##_post: mnemonic = B; form = C ", ['Xns]'ILS"; break;
+ LOAD_STORE_LIST(LS_POSTINDEX)
+ #undef LS_POSTINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStoreUnsignedOffset)";
+
+ switch (instr->Mask(LoadStoreUnsignedOffsetMask)) {
+ #define LS_UNSIGNEDOFFSET(A, B, C) \
+ case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break;
+ LOAD_STORE_LIST(LS_UNSIGNEDOFFSET)
+ #undef LS_UNSIGNEDOFFSET
+ case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xns'ILU]";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreRegisterOffset(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStoreRegisterOffset)";
+
+ switch (instr->Mask(LoadStoreRegisterOffsetMask)) {
+ #define LS_REGISTEROFFSET(A, B, C) \
+ case A##_reg: mnemonic = B; form = C ", ['Xns, 'Offsetreg]"; break;
+ LOAD_STORE_LIST(LS_REGISTEROFFSET)
+ #undef LS_REGISTEROFFSET
+ case PRFM_reg: mnemonic = "prfm"; form = "'PrefOp, ['Xns, 'Offsetreg]";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Wt, ['Xns'ILS]";
+ const char *form_x = "'Xt, ['Xns'ILS]";
+ const char *form_b = "'Bt, ['Xns'ILS]";
+ const char *form_h = "'Ht, ['Xns'ILS]";
+ const char *form_s = "'St, ['Xns'ILS]";
+ const char *form_d = "'Dt, ['Xns'ILS]";
+ const char *form_q = "'Qt, ['Xns'ILS]";
+ const char *form_prefetch = "'PrefOp, ['Xns'ILS]";
+
+ switch (instr->Mask(LoadStoreUnscaledOffsetMask)) {
+ case STURB_w: mnemonic = "sturb"; break;
+ case STURH_w: mnemonic = "sturh"; break;
+ case STUR_w: mnemonic = "stur"; break;
+ case STUR_x: mnemonic = "stur"; form = form_x; break;
+ case STUR_b: mnemonic = "stur"; form = form_b; break;
+ case STUR_h: mnemonic = "stur"; form = form_h; break;
+ case STUR_s: mnemonic = "stur"; form = form_s; break;
+ case STUR_d: mnemonic = "stur"; form = form_d; break;
+ case STUR_q: mnemonic = "stur"; form = form_q; break;
+ case LDURB_w: mnemonic = "ldurb"; break;
+ case LDURH_w: mnemonic = "ldurh"; break;
+ case LDUR_w: mnemonic = "ldur"; break;
+ case LDUR_x: mnemonic = "ldur"; form = form_x; break;
+ case LDUR_b: mnemonic = "ldur"; form = form_b; break;
+ case LDUR_h: mnemonic = "ldur"; form = form_h; break;
+ case LDUR_s: mnemonic = "ldur"; form = form_s; break;
+ case LDUR_d: mnemonic = "ldur"; form = form_d; break;
+ case LDUR_q: mnemonic = "ldur"; form = form_q; break;
+ case LDURSB_x: form = form_x; VIXL_FALLTHROUGH();
+ case LDURSB_w: mnemonic = "ldursb"; break;
+ case LDURSH_x: form = form_x; VIXL_FALLTHROUGH();
+ case LDURSH_w: mnemonic = "ldursh"; break;
+ case LDURSW_x: mnemonic = "ldursw"; form = form_x; break;
+ case PRFUM: mnemonic = "prfum"; form = form_prefetch; break;
+ default: form = "(LoadStoreUnscaledOffset)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadLiteral(const Instruction* instr) {
+ const char *mnemonic = "ldr";
+ const char *form = "(LoadLiteral)";
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit: form = "'Wt, 'ILLiteral 'LValue"; break;
+ case LDR_x_lit: form = "'Xt, 'ILLiteral 'LValue"; break;
+ case LDR_s_lit: form = "'St, 'ILLiteral 'LValue"; break;
+ case LDR_d_lit: form = "'Dt, 'ILLiteral 'LValue"; break;
+ case LDR_q_lit: form = "'Qt, 'ILLiteral 'LValue"; break;
+ case LDRSW_x_lit: {
+ mnemonic = "ldrsw";
+ form = "'Xt, 'ILLiteral 'LValue";
+ break;
+ }
+ case PRFM_lit: {
+ mnemonic = "prfm";
+ form = "'PrefOp, 'ILLiteral 'LValue";
+ break;
+ }
+ default: mnemonic = "unimplemented";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_PAIR_LIST(V) \
+ V(STP_w, "stp", "'Wt, 'Wt2", "2") \
+ V(LDP_w, "ldp", "'Wt, 'Wt2", "2") \
+ V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "2") \
+ V(STP_x, "stp", "'Xt, 'Xt2", "3") \
+ V(LDP_x, "ldp", "'Xt, 'Xt2", "3") \
+ V(STP_s, "stp", "'St, 'St2", "2") \
+ V(LDP_s, "ldp", "'St, 'St2", "2") \
+ V(STP_d, "stp", "'Dt, 'Dt2", "3") \
+ V(LDP_d, "ldp", "'Dt, 'Dt2", "3") \
+ V(LDP_q, "ldp", "'Qt, 'Qt2", "4") \
+ V(STP_q, "stp", "'Qt, 'Qt2", "4")
+
+void Disassembler::VisitLoadStorePairPostIndex(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairPostIndex)";
+
+ switch (instr->Mask(LoadStorePairPostIndexMask)) {
+ #define LSP_POSTINDEX(A, B, C, D) \
+ case A##_post: mnemonic = B; form = C ", ['Xns]'ILP" D; break;
+ LOAD_STORE_PAIR_LIST(LSP_POSTINDEX)
+ #undef LSP_POSTINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairPreIndex(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairPreIndex)";
+
+ switch (instr->Mask(LoadStorePairPreIndexMask)) {
+ #define LSP_PREINDEX(A, B, C, D) \
+ case A##_pre: mnemonic = B; form = C ", ['Xns'ILP" D "]!"; break;
+ LOAD_STORE_PAIR_LIST(LSP_PREINDEX)
+ #undef LSP_PREINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairOffset(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairOffset)";
+
+ switch (instr->Mask(LoadStorePairOffsetMask)) {
+ #define LSP_OFFSET(A, B, C, D) \
+ case A##_off: mnemonic = B; form = C ", ['Xns'ILP" D "]"; break;
+ LOAD_STORE_PAIR_LIST(LSP_OFFSET)
+ #undef LSP_OFFSET
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairNonTemporal(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form;
+
+ switch (instr->Mask(LoadStorePairNonTemporalMask)) {
+ case STNP_w: mnemonic = "stnp"; form = "'Wt, 'Wt2, ['Xns'ILP2]"; break;
+ case LDNP_w: mnemonic = "ldnp"; form = "'Wt, 'Wt2, ['Xns'ILP2]"; break;
+ case STNP_x: mnemonic = "stnp"; form = "'Xt, 'Xt2, ['Xns'ILP3]"; break;
+ case LDNP_x: mnemonic = "ldnp"; form = "'Xt, 'Xt2, ['Xns'ILP3]"; break;
+ case STNP_s: mnemonic = "stnp"; form = "'St, 'St2, ['Xns'ILP2]"; break;
+ case LDNP_s: mnemonic = "ldnp"; form = "'St, 'St2, ['Xns'ILP2]"; break;
+ case STNP_d: mnemonic = "stnp"; form = "'Dt, 'Dt2, ['Xns'ILP3]"; break;
+ case LDNP_d: mnemonic = "ldnp"; form = "'Dt, 'Dt2, ['Xns'ILP3]"; break;
+ case STNP_q: mnemonic = "stnp"; form = "'Qt, 'Qt2, ['Xns'ILP4]"; break;
+ case LDNP_q: mnemonic = "ldnp"; form = "'Qt, 'Qt2, ['Xns'ILP4]"; break;
+ default: form = "(LoadStorePairNonTemporal)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreExclusive(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form;
+
+ switch (instr->Mask(LoadStoreExclusiveMask)) {
+ case STXRB_w: mnemonic = "stxrb"; form = "'Ws, 'Wt, ['Xns]"; break;
+ case STXRH_w: mnemonic = "stxrh"; form = "'Ws, 'Wt, ['Xns]"; break;
+ case STXR_w: mnemonic = "stxr"; form = "'Ws, 'Wt, ['Xns]"; break;
+ case STXR_x: mnemonic = "stxr"; form = "'Ws, 'Xt, ['Xns]"; break;
+ case LDXRB_w: mnemonic = "ldxrb"; form = "'Wt, ['Xns]"; break;
+ case LDXRH_w: mnemonic = "ldxrh"; form = "'Wt, ['Xns]"; break;
+ case LDXR_w: mnemonic = "ldxr"; form = "'Wt, ['Xns]"; break;
+ case LDXR_x: mnemonic = "ldxr"; form = "'Xt, ['Xns]"; break;
+ case STXP_w: mnemonic = "stxp"; form = "'Ws, 'Wt, 'Wt2, ['Xns]"; break;
+ case STXP_x: mnemonic = "stxp"; form = "'Ws, 'Xt, 'Xt2, ['Xns]"; break;
+ case LDXP_w: mnemonic = "ldxp"; form = "'Wt, 'Wt2, ['Xns]"; break;
+ case LDXP_x: mnemonic = "ldxp"; form = "'Xt, 'Xt2, ['Xns]"; break;
+ case STLXRB_w: mnemonic = "stlxrb"; form = "'Ws, 'Wt, ['Xns]"; break;
+ case STLXRH_w: mnemonic = "stlxrh"; form = "'Ws, 'Wt, ['Xns]"; break;
+ case STLXR_w: mnemonic = "stlxr"; form = "'Ws, 'Wt, ['Xns]"; break;
+ case STLXR_x: mnemonic = "stlxr"; form = "'Ws, 'Xt, ['Xns]"; break;
+ case LDAXRB_w: mnemonic = "ldaxrb"; form = "'Wt, ['Xns]"; break;
+ case LDAXRH_w: mnemonic = "ldaxrh"; form = "'Wt, ['Xns]"; break;
+ case LDAXR_w: mnemonic = "ldaxr"; form = "'Wt, ['Xns]"; break;
+ case LDAXR_x: mnemonic = "ldaxr"; form = "'Xt, ['Xns]"; break;
+ case STLXP_w: mnemonic = "stlxp"; form = "'Ws, 'Wt, 'Wt2, ['Xns]"; break;
+ case STLXP_x: mnemonic = "stlxp"; form = "'Ws, 'Xt, 'Xt2, ['Xns]"; break;
+ case LDAXP_w: mnemonic = "ldaxp"; form = "'Wt, 'Wt2, ['Xns]"; break;
+ case LDAXP_x: mnemonic = "ldaxp"; form = "'Xt, 'Xt2, ['Xns]"; break;
+ case STLRB_w: mnemonic = "stlrb"; form = "'Wt, ['Xns]"; break;
+ case STLRH_w: mnemonic = "stlrh"; form = "'Wt, ['Xns]"; break;
+ case STLR_w: mnemonic = "stlr"; form = "'Wt, ['Xns]"; break;
+ case STLR_x: mnemonic = "stlr"; form = "'Xt, ['Xns]"; break;
+ case LDARB_w: mnemonic = "ldarb"; form = "'Wt, ['Xns]"; break;
+ case LDARH_w: mnemonic = "ldarh"; form = "'Wt, ['Xns]"; break;
+ case LDAR_w: mnemonic = "ldar"; form = "'Wt, ['Xns]"; break;
+ case LDAR_x: mnemonic = "ldar"; form = "'Xt, ['Xns]"; break;
+ default: form = "(LoadStoreExclusive)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPCompare(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fn, 'Fm";
+ const char *form_zero = "'Fn, #0.0";
+
+ switch (instr->Mask(FPCompareMask)) {
+ case FCMP_s_zero:
+ case FCMP_d_zero: form = form_zero; VIXL_FALLTHROUGH();
+ case FCMP_s:
+ case FCMP_d: mnemonic = "fcmp"; break;
+ case FCMPE_s_zero:
+ case FCMPE_d_zero: form = form_zero; VIXL_FALLTHROUGH();
+ case FCMPE_s:
+ case FCMPE_d: mnemonic = "fcmpe"; break;
+ default: form = "(FPCompare)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalCompare(const Instruction* instr) {
+ const char *mnemonic = "unmplemented";
+ const char *form = "'Fn, 'Fm, 'INzcv, 'Cond";
+
+ switch (instr->Mask(FPConditionalCompareMask)) {
+ case FCCMP_s:
+ case FCCMP_d: mnemonic = "fccmp"; break;
+ case FCCMPE_s:
+ case FCCMPE_d: mnemonic = "fccmpe"; break;
+ default: form = "(FPConditionalCompare)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalSelect(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm, 'Cond";
+
+ switch (instr->Mask(FPConditionalSelectMask)) {
+ case FCSEL_s:
+ case FCSEL_d: mnemonic = "fcsel"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing1Source(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fd, 'Fn";
+
+ switch (instr->Mask(FPDataProcessing1SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMOV, "fmov");
+ FORMAT(FABS, "fabs");
+ FORMAT(FNEG, "fneg");
+ FORMAT(FSQRT, "fsqrt");
+ FORMAT(FRINTN, "frintn");
+ FORMAT(FRINTP, "frintp");
+ FORMAT(FRINTM, "frintm");
+ FORMAT(FRINTZ, "frintz");
+ FORMAT(FRINTA, "frinta");
+ FORMAT(FRINTX, "frintx");
+ FORMAT(FRINTI, "frinti");
+ #undef FORMAT
+ case FCVT_ds: mnemonic = "fcvt"; form = "'Dd, 'Sn"; break;
+ case FCVT_sd: mnemonic = "fcvt"; form = "'Sd, 'Dn"; break;
+ case FCVT_hs: mnemonic = "fcvt"; form = "'Hd, 'Sn"; break;
+ case FCVT_sh: mnemonic = "fcvt"; form = "'Sd, 'Hn"; break;
+ case FCVT_dh: mnemonic = "fcvt"; form = "'Dd, 'Hn"; break;
+ case FCVT_hd: mnemonic = "fcvt"; form = "'Hd, 'Dn"; break;
+ default: form = "(FPDataProcessing1Source)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing2Source(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm";
+
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMUL, "fmul");
+ FORMAT(FDIV, "fdiv");
+ FORMAT(FADD, "fadd");
+ FORMAT(FSUB, "fsub");
+ FORMAT(FMAX, "fmax");
+ FORMAT(FMIN, "fmin");
+ FORMAT(FMAXNM, "fmaxnm");
+ FORMAT(FMINNM, "fminnm");
+ FORMAT(FNMUL, "fnmul");
+ #undef FORMAT
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing3Source(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm, 'Fa";
+
+ switch (instr->Mask(FPDataProcessing3SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMADD, "fmadd");
+ FORMAT(FMSUB, "fmsub");
+ FORMAT(FNMADD, "fnmadd");
+ FORMAT(FNMSUB, "fnmsub");
+ #undef FORMAT
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPImmediate(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "(FPImmediate)";
+
+ switch (instr->Mask(FPImmediateMask)) {
+ case FMOV_s_imm: mnemonic = "fmov"; form = "'Sd, 'IFPSingle"; break;
+ case FMOV_d_imm: mnemonic = "fmov"; form = "'Dd, 'IFPDouble"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPIntegerConvert(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(FPIntegerConvert)";
+ const char *form_rf = "'Rd, 'Fn";
+ const char *form_fr = "'Fd, 'Rn";
+
+ switch (instr->Mask(FPIntegerConvertMask)) {
+ case FMOV_ws:
+ case FMOV_xd: mnemonic = "fmov"; form = form_rf; break;
+ case FMOV_sw:
+ case FMOV_dx: mnemonic = "fmov"; form = form_fr; break;
+ case FMOV_d1_x: mnemonic = "fmov"; form = "'Vd.D[1], 'Rn"; break;
+ case FMOV_x_d1: mnemonic = "fmov"; form = "'Rd, 'Vn.D[1]"; break;
+ case FCVTAS_ws:
+ case FCVTAS_xs:
+ case FCVTAS_wd:
+ case FCVTAS_xd: mnemonic = "fcvtas"; form = form_rf; break;
+ case FCVTAU_ws:
+ case FCVTAU_xs:
+ case FCVTAU_wd:
+ case FCVTAU_xd: mnemonic = "fcvtau"; form = form_rf; break;
+ case FCVTMS_ws:
+ case FCVTMS_xs:
+ case FCVTMS_wd:
+ case FCVTMS_xd: mnemonic = "fcvtms"; form = form_rf; break;
+ case FCVTMU_ws:
+ case FCVTMU_xs:
+ case FCVTMU_wd:
+ case FCVTMU_xd: mnemonic = "fcvtmu"; form = form_rf; break;
+ case FCVTNS_ws:
+ case FCVTNS_xs:
+ case FCVTNS_wd:
+ case FCVTNS_xd: mnemonic = "fcvtns"; form = form_rf; break;
+ case FCVTNU_ws:
+ case FCVTNU_xs:
+ case FCVTNU_wd:
+ case FCVTNU_xd: mnemonic = "fcvtnu"; form = form_rf; break;
+ case FCVTZU_xd:
+ case FCVTZU_ws:
+ case FCVTZU_wd:
+ case FCVTZU_xs: mnemonic = "fcvtzu"; form = form_rf; break;
+ case FCVTZS_xd:
+ case FCVTZS_wd:
+ case FCVTZS_xs:
+ case FCVTZS_ws: mnemonic = "fcvtzs"; form = form_rf; break;
+ case FCVTPU_xd:
+ case FCVTPU_ws:
+ case FCVTPU_wd:
+ case FCVTPU_xs: mnemonic = "fcvtpu"; form = form_rf; break;
+ case FCVTPS_xd:
+ case FCVTPS_wd:
+ case FCVTPS_xs:
+ case FCVTPS_ws: mnemonic = "fcvtps"; form = form_rf; break;
+ case SCVTF_sw:
+ case SCVTF_sx:
+ case SCVTF_dw:
+ case SCVTF_dx: mnemonic = "scvtf"; form = form_fr; break;
+ case UCVTF_sw:
+ case UCVTF_sx:
+ case UCVTF_dw:
+ case UCVTF_dx: mnemonic = "ucvtf"; form = form_fr; break;
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPFixedPointConvert(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Fn, 'IFPFBits";
+ const char *form_fr = "'Fd, 'Rn, 'IFPFBits";
+
+ switch (instr->Mask(FPFixedPointConvertMask)) {
+ case FCVTZS_ws_fixed:
+ case FCVTZS_xs_fixed:
+ case FCVTZS_wd_fixed:
+ case FCVTZS_xd_fixed: mnemonic = "fcvtzs"; break;
+ case FCVTZU_ws_fixed:
+ case FCVTZU_xs_fixed:
+ case FCVTZU_wd_fixed:
+ case FCVTZU_xd_fixed: mnemonic = "fcvtzu"; break;
+ case SCVTF_sw_fixed:
+ case SCVTF_sx_fixed:
+ case SCVTF_dw_fixed:
+ case SCVTF_dx_fixed: mnemonic = "scvtf"; form = form_fr; break;
+ case UCVTF_sw_fixed:
+ case UCVTF_sx_fixed:
+ case UCVTF_dw_fixed:
+ case UCVTF_dx_fixed: mnemonic = "ucvtf"; form = form_fr; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitSystem(const Instruction* instr) {
+ // Some system instructions hijack their Op and Cp fields to represent a
+ // range of immediates instead of indicating a different instruction. This
+ // makes the decoding tricky.
+ const char *mnemonic = "unimplemented";
+ const char *form = "(System)";
+
+ if (instr->Mask(SystemExclusiveMonitorFMask) == SystemExclusiveMonitorFixed) {
+ switch (instr->Mask(SystemExclusiveMonitorMask)) {
+ case CLREX: {
+ mnemonic = "clrex";
+ form = (instr->CRm() == 0xf) ? NULL : "'IX";
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+ switch (instr->Mask(SystemSysRegMask)) {
+ case MRS: {
+ mnemonic = "mrs";
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: form = "'Xt, nzcv"; break;
+ case FPCR: form = "'Xt, fpcr"; break;
+ default: form = "'Xt, (unknown)"; break;
+ }
+ break;
+ }
+ case MSR: {
+ mnemonic = "msr";
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: form = "nzcv, 'Xt"; break;
+ case FPCR: form = "fpcr, 'Xt"; break;
+ default: form = "(unknown), 'Xt"; break;
+ }
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
+ switch (instr->ImmHint()) {
+ case NOP: {
+ mnemonic = "nop";
+ form = NULL;
+ break;
+ }
+ }
+ } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
+ switch (instr->Mask(MemBarrierMask)) {
+ case DMB: {
+ mnemonic = "dmb";
+ form = "'M";
+ break;
+ }
+ case DSB: {
+ mnemonic = "dsb";
+ form = "'M";
+ break;
+ }
+ case ISB: {
+ mnemonic = "isb";
+ form = NULL;
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemSysFMask) == SystemSysFixed) {
+ switch (instr->SysOp()) {
+ case IVAU:
+ mnemonic = "ic";
+ form = "ivau, 'Xt";
+ break;
+ case CVAC:
+ mnemonic = "dc";
+ form = "cvac, 'Xt";
+ break;
+ case CVAU:
+ mnemonic = "dc";
+ form = "cvau, 'Xt";
+ break;
+ case CIVAC:
+ mnemonic = "dc";
+ form = "civac, 'Xt";
+ break;
+ case ZVA:
+ mnemonic = "dc";
+ form = "zva, 'Xt";
+ break;
+ default:
+ mnemonic = "sys";
+ if (instr->Rt() == 31) {
+ form = "'G1, 'Kn, 'Km, 'G2";
+ } else {
+ form = "'G1, 'Kn, 'Km, 'G2, 'Xt";
+ }
+ break;
+ }
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitException(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'IDebug";
+
+ switch (instr->Mask(ExceptionMask)) {
+ case HLT: mnemonic = "hlt"; break;
+ case BRK: mnemonic = "brk"; break;
+ case SVC: mnemonic = "svc"; break;
+ case HVC: mnemonic = "hvc"; break;
+ case SMC: mnemonic = "smc"; break;
+ case DCPS1: mnemonic = "dcps1"; form = "{'IDebug}"; break;
+ case DCPS2: mnemonic = "dcps2"; form = "{'IDebug}"; break;
+ case DCPS3: mnemonic = "dcps3"; form = "{'IDebug}"; break;
+ default: form = "(Exception)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitCrypto2RegSHA(const Instruction* instr) {
+ VisitUnimplemented(instr);
+}
+
+
+void Disassembler::VisitCrypto3RegSHA(const Instruction* instr) {
+ VisitUnimplemented(instr);
+}
+
+
+void Disassembler::VisitCryptoAES(const Instruction* instr) {
+ VisitUnimplemented(instr);
+}
+
+
+void Disassembler::VisitNEON2RegMisc(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Vd.%s, 'Vn.%s";
+ const char *form_cmp_zero = "'Vd.%s, 'Vn.%s, #0";
+ const char *form_fcmp_zero = "'Vd.%s, 'Vn.%s, #0.0";
+ NEONFormatDecoder nfd(instr);
+
+ static const NEONFormatMap map_lp_ta = {
+ {23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}
+ };
+
+ static const NEONFormatMap map_cvt_ta = {
+ {22}, {NF_4S, NF_2D}
+ };
+
+ static const NEONFormatMap map_cvt_tb = {
+ {22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S}
+ };
+
+ if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) {
+ // These instructions all use a two bit size field, except NOT and RBIT,
+ // which use the field to encode the operation.
+ switch (instr->Mask(NEON2RegMiscMask)) {
+ case NEON_REV64: mnemonic = "rev64"; break;
+ case NEON_REV32: mnemonic = "rev32"; break;
+ case NEON_REV16: mnemonic = "rev16"; break;
+ case NEON_SADDLP:
+ mnemonic = "saddlp";
+ nfd.SetFormatMap(0, &map_lp_ta);
+ break;
+ case NEON_UADDLP:
+ mnemonic = "uaddlp";
+ nfd.SetFormatMap(0, &map_lp_ta);
+ break;
+ case NEON_SUQADD: mnemonic = "suqadd"; break;
+ case NEON_USQADD: mnemonic = "usqadd"; break;
+ case NEON_CLS: mnemonic = "cls"; break;
+ case NEON_CLZ: mnemonic = "clz"; break;
+ case NEON_CNT: mnemonic = "cnt"; break;
+ case NEON_SADALP:
+ mnemonic = "sadalp";
+ nfd.SetFormatMap(0, &map_lp_ta);
+ break;
+ case NEON_UADALP:
+ mnemonic = "uadalp";
+ nfd.SetFormatMap(0, &map_lp_ta);
+ break;
+ case NEON_SQABS: mnemonic = "sqabs"; break;
+ case NEON_SQNEG: mnemonic = "sqneg"; break;
+ case NEON_CMGT_zero: mnemonic = "cmgt"; form = form_cmp_zero; break;
+ case NEON_CMGE_zero: mnemonic = "cmge"; form = form_cmp_zero; break;
+ case NEON_CMEQ_zero: mnemonic = "cmeq"; form = form_cmp_zero; break;
+ case NEON_CMLE_zero: mnemonic = "cmle"; form = form_cmp_zero; break;
+ case NEON_CMLT_zero: mnemonic = "cmlt"; form = form_cmp_zero; break;
+ case NEON_ABS: mnemonic = "abs"; break;
+ case NEON_NEG: mnemonic = "neg"; break;
+ case NEON_RBIT_NOT:
+ switch (instr->FPType()) {
+ case 0: mnemonic = "mvn"; break;
+ case 1: mnemonic = "rbit"; break;
+ default: form = "(NEON2RegMisc)";
+ }
+ nfd.SetFormatMaps(nfd.LogicalFormatMap());
+ break;
+ }
+ } else {
+ // These instructions all use a one bit size field, except XTN, SQXTUN,
+ // SHLL, SQXTN and UQXTN, which use a two bit size field.
+ nfd.SetFormatMaps(nfd.FPFormatMap());
+ switch (instr->Mask(NEON2RegMiscFPMask)) {
+ case NEON_FABS: mnemonic = "fabs"; break;
+ case NEON_FNEG: mnemonic = "fneg"; break;
+ case NEON_FCVTN:
+ mnemonic = instr->Mask(NEON_Q) ? "fcvtn2" : "fcvtn";
+ nfd.SetFormatMap(0, &map_cvt_tb);
+ nfd.SetFormatMap(1, &map_cvt_ta);
+ break;
+ case NEON_FCVTXN:
+ mnemonic = instr->Mask(NEON_Q) ? "fcvtxn2" : "fcvtxn";
+ nfd.SetFormatMap(0, &map_cvt_tb);
+ nfd.SetFormatMap(1, &map_cvt_ta);
+ break;
+ case NEON_FCVTL:
+ mnemonic = instr->Mask(NEON_Q) ? "fcvtl2" : "fcvtl";
+ nfd.SetFormatMap(0, &map_cvt_ta);
+ nfd.SetFormatMap(1, &map_cvt_tb);
+ break;
+ case NEON_FRINTN: mnemonic = "frintn"; break;
+ case NEON_FRINTA: mnemonic = "frinta"; break;
+ case NEON_FRINTP: mnemonic = "frintp"; break;
+ case NEON_FRINTM: mnemonic = "frintm"; break;
+ case NEON_FRINTX: mnemonic = "frintx"; break;
+ case NEON_FRINTZ: mnemonic = "frintz"; break;
+ case NEON_FRINTI: mnemonic = "frinti"; break;
+ case NEON_FCVTNS: mnemonic = "fcvtns"; break;
+ case NEON_FCVTNU: mnemonic = "fcvtnu"; break;
+ case NEON_FCVTPS: mnemonic = "fcvtps"; break;
+ case NEON_FCVTPU: mnemonic = "fcvtpu"; break;
+ case NEON_FCVTMS: mnemonic = "fcvtms"; break;
+ case NEON_FCVTMU: mnemonic = "fcvtmu"; break;
+ case NEON_FCVTZS: mnemonic = "fcvtzs"; break;
+ case NEON_FCVTZU: mnemonic = "fcvtzu"; break;
+ case NEON_FCVTAS: mnemonic = "fcvtas"; break;
+ case NEON_FCVTAU: mnemonic = "fcvtau"; break;
+ case NEON_FSQRT: mnemonic = "fsqrt"; break;
+ case NEON_SCVTF: mnemonic = "scvtf"; break;
+ case NEON_UCVTF: mnemonic = "ucvtf"; break;
+ case NEON_URSQRTE: mnemonic = "ursqrte"; break;
+ case NEON_URECPE: mnemonic = "urecpe"; break;
+ case NEON_FRSQRTE: mnemonic = "frsqrte"; break;
+ case NEON_FRECPE: mnemonic = "frecpe"; break;
+ case NEON_FCMGT_zero: mnemonic = "fcmgt"; form = form_fcmp_zero; break;
+ case NEON_FCMGE_zero: mnemonic = "fcmge"; form = form_fcmp_zero; break;
+ case NEON_FCMEQ_zero: mnemonic = "fcmeq"; form = form_fcmp_zero; break;
+ case NEON_FCMLE_zero: mnemonic = "fcmle"; form = form_fcmp_zero; break;
+ case NEON_FCMLT_zero: mnemonic = "fcmlt"; form = form_fcmp_zero; break;
+ default:
+ if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) &&
+ (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) {
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+
+ switch (instr->Mask(NEON2RegMiscMask)) {
+ case NEON_XTN: mnemonic = "xtn"; break;
+ case NEON_SQXTN: mnemonic = "sqxtn"; break;
+ case NEON_UQXTN: mnemonic = "uqxtn"; break;
+ case NEON_SQXTUN: mnemonic = "sqxtun"; break;
+ case NEON_SHLL:
+ mnemonic = "shll";
+ nfd.SetFormatMap(0, nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(1, nfd.IntegerFormatMap());
+ switch (instr->NEONSize()) {
+ case 0: form = "'Vd.%s, 'Vn.%s, #8"; break;
+ case 1: form = "'Vd.%s, 'Vn.%s, #16"; break;
+ case 2: form = "'Vd.%s, 'Vn.%s, #32"; break;
+ default: form = "(NEON2RegMisc)";
+ }
+ }
+ Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form));
+ return;
+ } else {
+ form = "(NEON2RegMisc)";
+ }
+ }
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEON3Same(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s";
+ NEONFormatDecoder nfd(instr);
+
+ if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) {
+ switch (instr->Mask(NEON3SameLogicalMask)) {
+ case NEON_AND: mnemonic = "and"; break;
+ case NEON_ORR:
+ mnemonic = "orr";
+ if (instr->Rm() == instr->Rn()) {
+ mnemonic = "mov";
+ form = "'Vd.%s, 'Vn.%s";
+ }
+ break;
+ case NEON_ORN: mnemonic = "orn"; break;
+ case NEON_EOR: mnemonic = "eor"; break;
+ case NEON_BIC: mnemonic = "bic"; break;
+ case NEON_BIF: mnemonic = "bif"; break;
+ case NEON_BIT: mnemonic = "bit"; break;
+ case NEON_BSL: mnemonic = "bsl"; break;
+ default: form = "(NEON3Same)";
+ }
+ nfd.SetFormatMaps(nfd.LogicalFormatMap());
+ } else {
+ static const char *mnemonics[] = {
+ "shadd", "uhadd", "shadd", "uhadd",
+ "sqadd", "uqadd", "sqadd", "uqadd",
+ "srhadd", "urhadd", "srhadd", "urhadd",
+ NULL, NULL, NULL, NULL, // Handled by logical cases above.
+ "shsub", "uhsub", "shsub", "uhsub",
+ "sqsub", "uqsub", "sqsub", "uqsub",
+ "cmgt", "cmhi", "cmgt", "cmhi",
+ "cmge", "cmhs", "cmge", "cmhs",
+ "sshl", "ushl", "sshl", "ushl",
+ "sqshl", "uqshl", "sqshl", "uqshl",
+ "srshl", "urshl", "srshl", "urshl",
+ "sqrshl", "uqrshl", "sqrshl", "uqrshl",
+ "smax", "umax", "smax", "umax",
+ "smin", "umin", "smin", "umin",
+ "sabd", "uabd", "sabd", "uabd",
+ "saba", "uaba", "saba", "uaba",
+ "add", "sub", "add", "sub",
+ "cmtst", "cmeq", "cmtst", "cmeq",
+ "mla", "mls", "mla", "mls",
+ "mul", "pmul", "mul", "pmul",
+ "smaxp", "umaxp", "smaxp", "umaxp",
+ "sminp", "uminp", "sminp", "uminp",
+ "sqdmulh", "sqrdmulh", "sqdmulh", "sqrdmulh",
+ "addp", "unallocated", "addp", "unallocated",
+ "fmaxnm", "fmaxnmp", "fminnm", "fminnmp",
+ "fmla", "unallocated", "fmls", "unallocated",
+ "fadd", "faddp", "fsub", "fabd",
+ "fmulx", "fmul", "unallocated", "unallocated",
+ "fcmeq", "fcmge", "unallocated", "fcmgt",
+ "unallocated", "facge", "unallocated", "facgt",
+ "fmax", "fmaxp", "fmin", "fminp",
+ "frecps", "fdiv", "frsqrts", "unallocated"};
+
+ // Operation is determined by the opcode bits (15-11), the top bit of
+ // size (23) and the U bit (29).
+ unsigned index = (instr->Bits(15, 11) << 2) | (instr->Bit(23) << 1) |
+ instr->Bit(29);
+ VIXL_ASSERT(index < (sizeof(mnemonics) / sizeof(mnemonics[0])));
+ mnemonic = mnemonics[index];
+ // Assert that index is not one of the previously handled logical
+ // instructions.
+ VIXL_ASSERT(mnemonic != NULL);
+
+ if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) {
+ nfd.SetFormatMaps(nfd.FPFormatMap());
+ }
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEON3Different(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s";
+
+ NEONFormatDecoder nfd(instr);
+ nfd.SetFormatMap(0, nfd.LongIntegerFormatMap());
+
+ // Ignore the Q bit. Appending a "2" suffix is handled later.
+ switch (instr->Mask(NEON3DifferentMask) & ~NEON_Q) {
+ case NEON_PMULL: mnemonic = "pmull"; break;
+ case NEON_SABAL: mnemonic = "sabal"; break;
+ case NEON_SABDL: mnemonic = "sabdl"; break;
+ case NEON_SADDL: mnemonic = "saddl"; break;
+ case NEON_SMLAL: mnemonic = "smlal"; break;
+ case NEON_SMLSL: mnemonic = "smlsl"; break;
+ case NEON_SMULL: mnemonic = "smull"; break;
+ case NEON_SSUBL: mnemonic = "ssubl"; break;
+ case NEON_SQDMLAL: mnemonic = "sqdmlal"; break;
+ case NEON_SQDMLSL: mnemonic = "sqdmlsl"; break;
+ case NEON_SQDMULL: mnemonic = "sqdmull"; break;
+ case NEON_UABAL: mnemonic = "uabal"; break;
+ case NEON_UABDL: mnemonic = "uabdl"; break;
+ case NEON_UADDL: mnemonic = "uaddl"; break;
+ case NEON_UMLAL: mnemonic = "umlal"; break;
+ case NEON_UMLSL: mnemonic = "umlsl"; break;
+ case NEON_UMULL: mnemonic = "umull"; break;
+ case NEON_USUBL: mnemonic = "usubl"; break;
+ case NEON_SADDW:
+ mnemonic = "saddw";
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+ break;
+ case NEON_SSUBW:
+ mnemonic = "ssubw";
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+ break;
+ case NEON_UADDW:
+ mnemonic = "uaddw";
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+ break;
+ case NEON_USUBW:
+ mnemonic = "usubw";
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+ break;
+ case NEON_ADDHN:
+ mnemonic = "addhn";
+ nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ break;
+ case NEON_RADDHN:
+ mnemonic = "raddhn";
+ nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ break;
+ case NEON_RSUBHN:
+ mnemonic = "rsubhn";
+ nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ break;
+ case NEON_SUBHN:
+ mnemonic = "subhn";
+ nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ break;
+ default: form = "(NEON3Different)";
+ }
+ Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONAcrossLanes(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, 'Vn.%s";
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap(),
+ NEONFormatDecoder::IntegerFormatMap());
+
+ if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
+ nfd.SetFormatMap(0, nfd.FPScalarFormatMap());
+ nfd.SetFormatMap(1, nfd.FPFormatMap());
+ switch (instr->Mask(NEONAcrossLanesFPMask)) {
+ case NEON_FMAXV: mnemonic = "fmaxv"; break;
+ case NEON_FMINV: mnemonic = "fminv"; break;
+ case NEON_FMAXNMV: mnemonic = "fmaxnmv"; break;
+ case NEON_FMINNMV: mnemonic = "fminnmv"; break;
+ default: form = "(NEONAcrossLanes)"; break;
+ }
+ } else if (instr->Mask(NEONAcrossLanesFMask) == NEONAcrossLanesFixed) {
+ switch (instr->Mask(NEONAcrossLanesMask)) {
+ case NEON_ADDV: mnemonic = "addv"; break;
+ case NEON_SMAXV: mnemonic = "smaxv"; break;
+ case NEON_SMINV: mnemonic = "sminv"; break;
+ case NEON_UMAXV: mnemonic = "umaxv"; break;
+ case NEON_UMINV: mnemonic = "uminv"; break;
+ case NEON_SADDLV:
+ mnemonic = "saddlv";
+ nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+ break;
+ case NEON_UADDLV:
+ mnemonic = "uaddlv";
+ nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+ break;
+ default: form = "(NEONAcrossLanes)"; break;
+ }
+ }
+ Format(instr, mnemonic, nfd.Substitute(form,
+ NEONFormatDecoder::kPlaceholder, NEONFormatDecoder::kFormat));
+}
+
+
+void Disassembler::VisitNEONByIndexedElement(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ bool l_instr = false;
+ bool fp_instr = false;
+
+ const char *form = "'Vd.%s, 'Vn.%s, 'Ve.%s['IVByElemIndex]";
+
+ static const NEONFormatMap map_ta = {
+ {23, 22}, {NF_UNDEF, NF_4S, NF_2D}
+ };
+ NEONFormatDecoder nfd(instr, &map_ta,
+ NEONFormatDecoder::IntegerFormatMap(),
+ NEONFormatDecoder::ScalarFormatMap());
+
+ switch (instr->Mask(NEONByIndexedElementMask)) {
+ case NEON_SMULL_byelement: mnemonic = "smull"; l_instr = true; break;
+ case NEON_UMULL_byelement: mnemonic = "umull"; l_instr = true; break;
+ case NEON_SMLAL_byelement: mnemonic = "smlal"; l_instr = true; break;
+ case NEON_UMLAL_byelement: mnemonic = "umlal"; l_instr = true; break;
+ case NEON_SMLSL_byelement: mnemonic = "smlsl"; l_instr = true; break;
+ case NEON_UMLSL_byelement: mnemonic = "umlsl"; l_instr = true; break;
+ case NEON_SQDMULL_byelement: mnemonic = "sqdmull"; l_instr = true; break;
+ case NEON_SQDMLAL_byelement: mnemonic = "sqdmlal"; l_instr = true; break;
+ case NEON_SQDMLSL_byelement: mnemonic = "sqdmlsl"; l_instr = true; break;
+ case NEON_MUL_byelement: mnemonic = "mul"; break;
+ case NEON_MLA_byelement: mnemonic = "mla"; break;
+ case NEON_MLS_byelement: mnemonic = "mls"; break;
+ case NEON_SQDMULH_byelement: mnemonic = "sqdmulh"; break;
+ case NEON_SQRDMULH_byelement: mnemonic = "sqrdmulh"; break;
+ default:
+ switch (instr->Mask(NEONByIndexedElementFPMask)) {
+ case NEON_FMUL_byelement: mnemonic = "fmul"; fp_instr = true; break;
+ case NEON_FMLA_byelement: mnemonic = "fmla"; fp_instr = true; break;
+ case NEON_FMLS_byelement: mnemonic = "fmls"; fp_instr = true; break;
+ case NEON_FMULX_byelement: mnemonic = "fmulx"; fp_instr = true; break;
+ }
+ }
+
+ if (l_instr) {
+ Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form));
+ } else if (fp_instr) {
+ nfd.SetFormatMap(0, nfd.FPFormatMap());
+ Format(instr, mnemonic, nfd.Substitute(form));
+ } else {
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ Format(instr, mnemonic, nfd.Substitute(form));
+ }
+}
+
+
+void Disassembler::VisitNEONCopy(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONCopy)";
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap(),
+ NEONFormatDecoder::TriangularScalarFormatMap());
+
+ if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) {
+ mnemonic = "mov";
+ nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+ form = "'Vd.%s['IVInsIndex1], 'Vn.%s['IVInsIndex2]";
+ } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) {
+ mnemonic = "mov";
+ nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+ if (nfd.GetVectorFormat() == kFormatD) {
+ form = "'Vd.%s['IVInsIndex1], 'Xn";
+ } else {
+ form = "'Vd.%s['IVInsIndex1], 'Wn";
+ }
+ } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) {
+ if (instr->Mask(NEON_Q) || ((instr->ImmNEON5() & 7) == 4)) {
+ mnemonic = "mov";
+ } else {
+ mnemonic = "umov";
+ }
+ nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+ if (nfd.GetVectorFormat() == kFormatD) {
+ form = "'Xd, 'Vn.%s['IVInsIndex1]";
+ } else {
+ form = "'Wd, 'Vn.%s['IVInsIndex1]";
+ }
+ } else if (instr->Mask(NEONCopySmovMask) == NEON_SMOV) {
+ mnemonic = "smov";
+ nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+ form = "'Rdq, 'Vn.%s['IVInsIndex1]";
+ } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) {
+ mnemonic = "dup";
+ form = "'Vd.%s, 'Vn.%s['IVInsIndex1]";
+ } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) {
+ mnemonic = "dup";
+ if (nfd.GetVectorFormat() == kFormat2D) {
+ form = "'Vd.%s, 'Xn";
+ } else {
+ form = "'Vd.%s, 'Wn";
+ }
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONExtract(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONExtract)";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
+ if (instr->Mask(NEONExtractMask) == NEON_EXT) {
+ mnemonic = "ext";
+ form = "'Vd.%s, 'Vn.%s, 'Vm.%s, 'IVExtract";
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONLoadStoreMultiStruct(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONLoadStoreMultiStruct)";
+ const char *form_1v = "{'Vt.%1$s}, ['Xns]";
+ const char *form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns]";
+ const char *form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns]";
+ const char *form_4v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+ switch (instr->Mask(NEONLoadStoreMultiStructMask)) {
+ case NEON_LD1_1v: mnemonic = "ld1"; form = form_1v; break;
+ case NEON_LD1_2v: mnemonic = "ld1"; form = form_2v; break;
+ case NEON_LD1_3v: mnemonic = "ld1"; form = form_3v; break;
+ case NEON_LD1_4v: mnemonic = "ld1"; form = form_4v; break;
+ case NEON_LD2: mnemonic = "ld2"; form = form_2v; break;
+ case NEON_LD3: mnemonic = "ld3"; form = form_3v; break;
+ case NEON_LD4: mnemonic = "ld4"; form = form_4v; break;
+ case NEON_ST1_1v: mnemonic = "st1"; form = form_1v; break;
+ case NEON_ST1_2v: mnemonic = "st1"; form = form_2v; break;
+ case NEON_ST1_3v: mnemonic = "st1"; form = form_3v; break;
+ case NEON_ST1_4v: mnemonic = "st1"; form = form_4v; break;
+ case NEON_ST2: mnemonic = "st2"; form = form_2v; break;
+ case NEON_ST3: mnemonic = "st3"; form = form_3v; break;
+ case NEON_ST4: mnemonic = "st4"; form = form_4v; break;
+ default: break;
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONLoadStoreMultiStructPostIndex(
+ const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONLoadStoreMultiStructPostIndex)";
+ const char *form_1v = "{'Vt.%1$s}, ['Xns], 'Xmr1";
+ const char *form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns], 'Xmr2";
+ const char *form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns], 'Xmr3";
+ const char *form_4v =
+ "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmr4";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+ switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) {
+ case NEON_LD1_1v_post: mnemonic = "ld1"; form = form_1v; break;
+ case NEON_LD1_2v_post: mnemonic = "ld1"; form = form_2v; break;
+ case NEON_LD1_3v_post: mnemonic = "ld1"; form = form_3v; break;
+ case NEON_LD1_4v_post: mnemonic = "ld1"; form = form_4v; break;
+ case NEON_LD2_post: mnemonic = "ld2"; form = form_2v; break;
+ case NEON_LD3_post: mnemonic = "ld3"; form = form_3v; break;
+ case NEON_LD4_post: mnemonic = "ld4"; form = form_4v; break;
+ case NEON_ST1_1v_post: mnemonic = "st1"; form = form_1v; break;
+ case NEON_ST1_2v_post: mnemonic = "st1"; form = form_2v; break;
+ case NEON_ST1_3v_post: mnemonic = "st1"; form = form_3v; break;
+ case NEON_ST1_4v_post: mnemonic = "st1"; form = form_4v; break;
+ case NEON_ST2_post: mnemonic = "st2"; form = form_2v; break;
+ case NEON_ST3_post: mnemonic = "st3"; form = form_3v; break;
+ case NEON_ST4_post: mnemonic = "st4"; form = form_4v; break;
+ default: break;
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONLoadStoreSingleStruct(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONLoadStoreSingleStruct)";
+
+ const char *form_1b = "{'Vt.b}['IVLSLane0], ['Xns]";
+ const char *form_1h = "{'Vt.h}['IVLSLane1], ['Xns]";
+ const char *form_1s = "{'Vt.s}['IVLSLane2], ['Xns]";
+ const char *form_1d = "{'Vt.d}['IVLSLane3], ['Xns]";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+ switch (instr->Mask(NEONLoadStoreSingleStructMask)) {
+ case NEON_LD1_b: mnemonic = "ld1"; form = form_1b; break;
+ case NEON_LD1_h: mnemonic = "ld1"; form = form_1h; break;
+ case NEON_LD1_s:
+ mnemonic = "ld1";
+ VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d);
+ form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+ break;
+ case NEON_ST1_b: mnemonic = "st1"; form = form_1b; break;
+ case NEON_ST1_h: mnemonic = "st1"; form = form_1h; break;
+ case NEON_ST1_s:
+ mnemonic = "st1";
+ VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d);
+ form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+ break;
+ case NEON_LD1R:
+ mnemonic = "ld1r";
+ form = "{'Vt.%s}, ['Xns]";
+ break;
+ case NEON_LD2_b:
+ case NEON_ST2_b:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+ form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns]";
+ break;
+ case NEON_LD2_h:
+ case NEON_ST2_h:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+ form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns]";
+ break;
+ case NEON_LD2_s:
+ case NEON_ST2_s:
+ VIXL_STATIC_ASSERT((NEON_ST2_s | (1 << NEONLSSize_offset)) == NEON_ST2_d);
+ VIXL_STATIC_ASSERT((NEON_LD2_s | (1 << NEONLSSize_offset)) == NEON_LD2_d);
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns]";
+ else
+ form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns]";
+ break;
+ case NEON_LD2R:
+ mnemonic = "ld2r";
+ form = "{'Vt.%s, 'Vt2.%s}, ['Xns]";
+ break;
+ case NEON_LD3_b:
+ case NEON_ST3_b:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+ form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns]";
+ break;
+ case NEON_LD3_h:
+ case NEON_ST3_h:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+ form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns]";
+ break;
+ case NEON_LD3_s:
+ case NEON_ST3_s:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns]";
+ else
+ form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns]";
+ break;
+ case NEON_LD3R:
+ mnemonic = "ld3r";
+ form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns]";
+ break;
+ case NEON_LD4_b:
+ case NEON_ST4_b:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+ form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns]";
+ break;
+ case NEON_LD4_h:
+ case NEON_ST4_h:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+ form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns]";
+ break;
+ case NEON_LD4_s:
+ case NEON_ST4_s:
+ VIXL_STATIC_ASSERT((NEON_LD4_s | (1 << NEONLSSize_offset)) == NEON_LD4_d);
+ VIXL_STATIC_ASSERT((NEON_ST4_s | (1 << NEONLSSize_offset)) == NEON_ST4_d);
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns]";
+ else
+ form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns]";
+ break;
+ case NEON_LD4R:
+ mnemonic = "ld4r";
+ form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]";
+ break;
+ default: break;
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONLoadStoreSingleStructPostIndex(
+ const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONLoadStoreSingleStructPostIndex)";
+
+ const char *form_1b = "{'Vt.b}['IVLSLane0], ['Xns], 'Xmb1";
+ const char *form_1h = "{'Vt.h}['IVLSLane1], ['Xns], 'Xmb2";
+ const char *form_1s = "{'Vt.s}['IVLSLane2], ['Xns], 'Xmb4";
+ const char *form_1d = "{'Vt.d}['IVLSLane3], ['Xns], 'Xmb8";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+ switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) {
+ case NEON_LD1_b_post: mnemonic = "ld1"; form = form_1b; break;
+ case NEON_LD1_h_post: mnemonic = "ld1"; form = form_1h; break;
+ case NEON_LD1_s_post:
+ mnemonic = "ld1";
+ VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d);
+ form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+ break;
+ case NEON_ST1_b_post: mnemonic = "st1"; form = form_1b; break;
+ case NEON_ST1_h_post: mnemonic = "st1"; form = form_1h; break;
+ case NEON_ST1_s_post:
+ mnemonic = "st1";
+ VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d);
+ form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+ break;
+ case NEON_LD1R_post:
+ mnemonic = "ld1r";
+ form = "{'Vt.%s}, ['Xns], 'Xmz1";
+ break;
+ case NEON_LD2_b_post:
+ case NEON_ST2_b_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+ form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns], 'Xmb2";
+ break;
+ case NEON_ST2_h_post:
+ case NEON_LD2_h_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+ form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns], 'Xmb4";
+ break;
+ case NEON_LD2_s_post:
+ case NEON_ST2_s_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns], 'Xmb8";
+ else
+ form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns], 'Xmb16";
+ break;
+ case NEON_LD2R_post:
+ mnemonic = "ld2r";
+ form = "{'Vt.%s, 'Vt2.%s}, ['Xns], 'Xmz2";
+ break;
+ case NEON_LD3_b_post:
+ case NEON_ST3_b_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+ form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns], 'Xmb3";
+ break;
+ case NEON_LD3_h_post:
+ case NEON_ST3_h_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+ form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns], 'Xmb6";
+ break;
+ case NEON_LD3_s_post:
+ case NEON_ST3_s_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns], 'Xmb12";
+ else
+ form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns], 'Xmr3";
+ break;
+ case NEON_LD3R_post:
+ mnemonic = "ld3r";
+ form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns], 'Xmz3";
+ break;
+ case NEON_LD4_b_post:
+ case NEON_ST4_b_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+ form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns], 'Xmb4";
+ break;
+ case NEON_LD4_h_post:
+ case NEON_ST4_h_post:
+ mnemonic = (instr->LdStXLoad()) == 1 ? "ld4" : "st4";
+ form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns], 'Xmb8";
+ break;
+ case NEON_LD4_s_post:
+ case NEON_ST4_s_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns], 'Xmb16";
+ else
+ form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns], 'Xmb32";
+ break;
+ case NEON_LD4R_post:
+ mnemonic = "ld4r";
+ form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmz4";
+ break;
+ default: break;
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONModifiedImmediate(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Vt.%s, 'IVMIImm8, lsl 'IVMIShiftAmt1";
+
+ int cmode = instr->NEONCmode();
+ int cmode_3 = (cmode >> 3) & 1;
+ int cmode_2 = (cmode >> 2) & 1;
+ int cmode_1 = (cmode >> 1) & 1;
+ int cmode_0 = cmode & 1;
+ int q = instr->NEONQ();
+ int op = instr->NEONModImmOp();
+
+ static const NEONFormatMap map_b = { {30}, {NF_8B, NF_16B} };
+ static const NEONFormatMap map_h = { {30}, {NF_4H, NF_8H} };
+ static const NEONFormatMap map_s = { {30}, {NF_2S, NF_4S} };
+ NEONFormatDecoder nfd(instr, &map_b);
+
+ if (cmode_3 == 0) {
+ if (cmode_0 == 0) {
+ mnemonic = (op == 1) ? "mvni" : "movi";
+ } else { // cmode<0> == '1'.
+ mnemonic = (op == 1) ? "bic" : "orr";
+ }
+ nfd.SetFormatMap(0, &map_s);
+ } else { // cmode<3> == '1'.
+ if (cmode_2 == 0) {
+ if (cmode_0 == 0) {
+ mnemonic = (op == 1) ? "mvni" : "movi";
+ } else { // cmode<0> == '1'.
+ mnemonic = (op == 1) ? "bic" : "orr";
+ }
+ nfd.SetFormatMap(0, &map_h);
+ } else { // cmode<2> == '1'.
+ if (cmode_1 == 0) {
+ mnemonic = (op == 1) ? "mvni" : "movi";
+ form = "'Vt.%s, 'IVMIImm8, msl 'IVMIShiftAmt2";
+ nfd.SetFormatMap(0, &map_s);
+ } else { // cmode<1> == '1'.
+ if (cmode_0 == 0) {
+ mnemonic = "movi";
+ if (op == 0) {
+ form = "'Vt.%s, 'IVMIImm8";
+ } else {
+ form = (q == 0) ? "'Dd, 'IVMIImm" : "'Vt.2d, 'IVMIImm";
+ }
+ } else { // cmode<0> == '1'
+ mnemonic = "fmov";
+ if (op == 0) {
+ form = "'Vt.%s, 'IVMIImmFPSingle";
+ nfd.SetFormatMap(0, &map_s);
+ } else {
+ if (q == 1) {
+ form = "'Vt.2d, 'IVMIImmFPDouble";
+ }
+ }
+ }
+ }
+ }
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONScalar2RegMisc(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, %sn";
+ const char *form_0 = "%sd, %sn, #0";
+ const char *form_fp0 = "%sd, %sn, #0.0";
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+
+ if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) {
+ // These instructions all use a two bit size field, except NOT and RBIT,
+ // which use the field to encode the operation.
+ switch (instr->Mask(NEONScalar2RegMiscMask)) {
+ case NEON_CMGT_zero_scalar: mnemonic = "cmgt"; form = form_0; break;
+ case NEON_CMGE_zero_scalar: mnemonic = "cmge"; form = form_0; break;
+ case NEON_CMLE_zero_scalar: mnemonic = "cmle"; form = form_0; break;
+ case NEON_CMLT_zero_scalar: mnemonic = "cmlt"; form = form_0; break;
+ case NEON_CMEQ_zero_scalar: mnemonic = "cmeq"; form = form_0; break;
+ case NEON_NEG_scalar: mnemonic = "neg"; break;
+ case NEON_SQNEG_scalar: mnemonic = "sqneg"; break;
+ case NEON_ABS_scalar: mnemonic = "abs"; break;
+ case NEON_SQABS_scalar: mnemonic = "sqabs"; break;
+ case NEON_SUQADD_scalar: mnemonic = "suqadd"; break;
+ case NEON_USQADD_scalar: mnemonic = "usqadd"; break;
+ default: form = "(NEONScalar2RegMisc)";
+ }
+ } else {
+ // These instructions all use a one bit size field, except SQXTUN, SQXTN
+ // and UQXTN, which use a two bit size field.
+ nfd.SetFormatMaps(nfd.FPScalarFormatMap());
+ switch (instr->Mask(NEONScalar2RegMiscFPMask)) {
+ case NEON_FRSQRTE_scalar: mnemonic = "frsqrte"; break;
+ case NEON_FRECPE_scalar: mnemonic = "frecpe"; break;
+ case NEON_SCVTF_scalar: mnemonic = "scvtf"; break;
+ case NEON_UCVTF_scalar: mnemonic = "ucvtf"; break;
+ case NEON_FCMGT_zero_scalar: mnemonic = "fcmgt"; form = form_fp0; break;
+ case NEON_FCMGE_zero_scalar: mnemonic = "fcmge"; form = form_fp0; break;
+ case NEON_FCMLE_zero_scalar: mnemonic = "fcmle"; form = form_fp0; break;
+ case NEON_FCMLT_zero_scalar: mnemonic = "fcmlt"; form = form_fp0; break;
+ case NEON_FCMEQ_zero_scalar: mnemonic = "fcmeq"; form = form_fp0; break;
+ case NEON_FRECPX_scalar: mnemonic = "frecpx"; break;
+ case NEON_FCVTNS_scalar: mnemonic = "fcvtns"; break;
+ case NEON_FCVTNU_scalar: mnemonic = "fcvtnu"; break;
+ case NEON_FCVTPS_scalar: mnemonic = "fcvtps"; break;
+ case NEON_FCVTPU_scalar: mnemonic = "fcvtpu"; break;
+ case NEON_FCVTMS_scalar: mnemonic = "fcvtms"; break;
+ case NEON_FCVTMU_scalar: mnemonic = "fcvtmu"; break;
+ case NEON_FCVTZS_scalar: mnemonic = "fcvtzs"; break;
+ case NEON_FCVTZU_scalar: mnemonic = "fcvtzu"; break;
+ case NEON_FCVTAS_scalar: mnemonic = "fcvtas"; break;
+ case NEON_FCVTAU_scalar: mnemonic = "fcvtau"; break;
+ case NEON_FCVTXN_scalar:
+ nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+ mnemonic = "fcvtxn";
+ break;
+ default:
+ nfd.SetFormatMap(0, nfd.ScalarFormatMap());
+ nfd.SetFormatMap(1, nfd.LongScalarFormatMap());
+ switch (instr->Mask(NEONScalar2RegMiscMask)) {
+ case NEON_SQXTN_scalar: mnemonic = "sqxtn"; break;
+ case NEON_UQXTN_scalar: mnemonic = "uqxtn"; break;
+ case NEON_SQXTUN_scalar: mnemonic = "sqxtun"; break;
+ default: form = "(NEONScalar2RegMisc)";
+ }
+ }
+ }
+ Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+
+void Disassembler::VisitNEONScalar3Diff(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, %sn, %sm";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap(),
+ NEONFormatDecoder::ScalarFormatMap());
+
+ switch (instr->Mask(NEONScalar3DiffMask)) {
+ case NEON_SQDMLAL_scalar : mnemonic = "sqdmlal"; break;
+ case NEON_SQDMLSL_scalar : mnemonic = "sqdmlsl"; break;
+ case NEON_SQDMULL_scalar : mnemonic = "sqdmull"; break;
+ default: form = "(NEONScalar3Diff)";
+ }
+ Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+
+void Disassembler::VisitNEONScalar3Same(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, %sn, %sm";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+
+ if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) {
+ nfd.SetFormatMaps(nfd.FPScalarFormatMap());
+ switch (instr->Mask(NEONScalar3SameFPMask)) {
+ case NEON_FACGE_scalar: mnemonic = "facge"; break;
+ case NEON_FACGT_scalar: mnemonic = "facgt"; break;
+ case NEON_FCMEQ_scalar: mnemonic = "fcmeq"; break;
+ case NEON_FCMGE_scalar: mnemonic = "fcmge"; break;
+ case NEON_FCMGT_scalar: mnemonic = "fcmgt"; break;
+ case NEON_FMULX_scalar: mnemonic = "fmulx"; break;
+ case NEON_FRECPS_scalar: mnemonic = "frecps"; break;
+ case NEON_FRSQRTS_scalar: mnemonic = "frsqrts"; break;
+ case NEON_FABD_scalar: mnemonic = "fabd"; break;
+ default: form = "(NEONScalar3Same)";
+ }
+ } else {
+ switch (instr->Mask(NEONScalar3SameMask)) {
+ case NEON_ADD_scalar: mnemonic = "add"; break;
+ case NEON_SUB_scalar: mnemonic = "sub"; break;
+ case NEON_CMEQ_scalar: mnemonic = "cmeq"; break;
+ case NEON_CMGE_scalar: mnemonic = "cmge"; break;
+ case NEON_CMGT_scalar: mnemonic = "cmgt"; break;
+ case NEON_CMHI_scalar: mnemonic = "cmhi"; break;
+ case NEON_CMHS_scalar: mnemonic = "cmhs"; break;
+ case NEON_CMTST_scalar: mnemonic = "cmtst"; break;
+ case NEON_UQADD_scalar: mnemonic = "uqadd"; break;
+ case NEON_SQADD_scalar: mnemonic = "sqadd"; break;
+ case NEON_UQSUB_scalar: mnemonic = "uqsub"; break;
+ case NEON_SQSUB_scalar: mnemonic = "sqsub"; break;
+ case NEON_USHL_scalar: mnemonic = "ushl"; break;
+ case NEON_SSHL_scalar: mnemonic = "sshl"; break;
+ case NEON_UQSHL_scalar: mnemonic = "uqshl"; break;
+ case NEON_SQSHL_scalar: mnemonic = "sqshl"; break;
+ case NEON_URSHL_scalar: mnemonic = "urshl"; break;
+ case NEON_SRSHL_scalar: mnemonic = "srshl"; break;
+ case NEON_UQRSHL_scalar: mnemonic = "uqrshl"; break;
+ case NEON_SQRSHL_scalar: mnemonic = "sqrshl"; break;
+ case NEON_SQDMULH_scalar: mnemonic = "sqdmulh"; break;
+ case NEON_SQRDMULH_scalar: mnemonic = "sqrdmulh"; break;
+ default: form = "(NEONScalar3Same)";
+ }
+ }
+ Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+
+void Disassembler::VisitNEONScalarByIndexedElement(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, %sn, 'Ve.%s['IVByElemIndex]";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+ bool long_instr = false;
+
+ switch (instr->Mask(NEONScalarByIndexedElementMask)) {
+ case NEON_SQDMULL_byelement_scalar:
+ mnemonic = "sqdmull";
+ long_instr = true;
+ break;
+ case NEON_SQDMLAL_byelement_scalar:
+ mnemonic = "sqdmlal";
+ long_instr = true;
+ break;
+ case NEON_SQDMLSL_byelement_scalar:
+ mnemonic = "sqdmlsl";
+ long_instr = true;
+ break;
+ case NEON_SQDMULH_byelement_scalar:
+ mnemonic = "sqdmulh";
+ break;
+ case NEON_SQRDMULH_byelement_scalar:
+ mnemonic = "sqrdmulh";
+ break;
+ default:
+ nfd.SetFormatMap(0, nfd.FPScalarFormatMap());
+ switch (instr->Mask(NEONScalarByIndexedElementFPMask)) {
+ case NEON_FMUL_byelement_scalar: mnemonic = "fmul"; break;
+ case NEON_FMLA_byelement_scalar: mnemonic = "fmla"; break;
+ case NEON_FMLS_byelement_scalar: mnemonic = "fmls"; break;
+ case NEON_FMULX_byelement_scalar: mnemonic = "fmulx"; break;
+ default: form = "(NEONScalarByIndexedElement)";
+ }
+ }
+
+ if (long_instr) {
+ nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(
+ form, nfd.kPlaceholder, nfd.kPlaceholder, nfd.kFormat));
+}
+
+
+void Disassembler::VisitNEONScalarCopy(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONScalarCopy)";
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap());
+
+ if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) {
+ mnemonic = "mov";
+ form = "%sd, 'Vn.%s['IVInsIndex1]";
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form, nfd.kPlaceholder, nfd.kFormat));
+}
+
+
+void Disassembler::VisitNEONScalarPairwise(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, 'Vn.%s";
+ NEONFormatMap map = { {22}, {NF_2S, NF_2D} };
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarFormatMap(), &map);
+
+ switch (instr->Mask(NEONScalarPairwiseMask)) {
+ case NEON_ADDP_scalar: mnemonic = "addp"; break;
+ case NEON_FADDP_scalar: mnemonic = "faddp"; break;
+ case NEON_FMAXP_scalar: mnemonic = "fmaxp"; break;
+ case NEON_FMAXNMP_scalar: mnemonic = "fmaxnmp"; break;
+ case NEON_FMINP_scalar: mnemonic = "fminp"; break;
+ case NEON_FMINNMP_scalar: mnemonic = "fminnmp"; break;
+ default: form = "(NEONScalarPairwise)";
+ }
+ Format(instr, mnemonic, nfd.Substitute(form,
+ NEONFormatDecoder::kPlaceholder, NEONFormatDecoder::kFormat));
+}
+
+
+void Disassembler::VisitNEONScalarShiftImmediate(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, %sn, 'Is1";
+ const char *form_2 = "%sd, %sn, 'Is2";
+
+ static const NEONFormatMap map_shift = {
+ {22, 21, 20, 19},
+ {NF_UNDEF, NF_B, NF_H, NF_H, NF_S, NF_S, NF_S, NF_S,
+ NF_D, NF_D, NF_D, NF_D, NF_D, NF_D, NF_D, NF_D}
+ };
+ static const NEONFormatMap map_shift_narrow = {
+ {21, 20, 19},
+ {NF_UNDEF, NF_H, NF_S, NF_S, NF_D, NF_D, NF_D, NF_D}
+ };
+ NEONFormatDecoder nfd(instr, &map_shift);
+
+ if (instr->ImmNEONImmh()) { // immh has to be non-zero.
+ switch (instr->Mask(NEONScalarShiftImmediateMask)) {
+ case NEON_FCVTZU_imm_scalar: mnemonic = "fcvtzu"; break;
+ case NEON_FCVTZS_imm_scalar: mnemonic = "fcvtzs"; break;
+ case NEON_SCVTF_imm_scalar: mnemonic = "scvtf"; break;
+ case NEON_UCVTF_imm_scalar: mnemonic = "ucvtf"; break;
+ case NEON_SRI_scalar: mnemonic = "sri"; break;
+ case NEON_SSHR_scalar: mnemonic = "sshr"; break;
+ case NEON_USHR_scalar: mnemonic = "ushr"; break;
+ case NEON_SRSHR_scalar: mnemonic = "srshr"; break;
+ case NEON_URSHR_scalar: mnemonic = "urshr"; break;
+ case NEON_SSRA_scalar: mnemonic = "ssra"; break;
+ case NEON_USRA_scalar: mnemonic = "usra"; break;
+ case NEON_SRSRA_scalar: mnemonic = "srsra"; break;
+ case NEON_URSRA_scalar: mnemonic = "ursra"; break;
+ case NEON_SHL_scalar: mnemonic = "shl"; form = form_2; break;
+ case NEON_SLI_scalar: mnemonic = "sli"; form = form_2; break;
+ case NEON_SQSHLU_scalar: mnemonic = "sqshlu"; form = form_2; break;
+ case NEON_SQSHL_imm_scalar: mnemonic = "sqshl"; form = form_2; break;
+ case NEON_UQSHL_imm_scalar: mnemonic = "uqshl"; form = form_2; break;
+ case NEON_UQSHRN_scalar:
+ mnemonic = "uqshrn";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_UQRSHRN_scalar:
+ mnemonic = "uqrshrn";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_SQSHRN_scalar:
+ mnemonic = "sqshrn";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_SQRSHRN_scalar:
+ mnemonic = "sqrshrn";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_SQSHRUN_scalar:
+ mnemonic = "sqshrun";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_SQRSHRUN_scalar:
+ mnemonic = "sqrshrun";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ default:
+ form = "(NEONScalarShiftImmediate)";
+ }
+ } else {
+ form = "(NEONScalarShiftImmediate)";
+ }
+ Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+
+void Disassembler::VisitNEONShiftImmediate(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Vd.%s, 'Vn.%s, 'Is1";
+ const char *form_shift_2 = "'Vd.%s, 'Vn.%s, 'Is2";
+ const char *form_xtl = "'Vd.%s, 'Vn.%s";
+
+ // 0001->8H, 001x->4S, 01xx->2D, all others undefined.
+ static const NEONFormatMap map_shift_ta = {
+ {22, 21, 20, 19},
+ {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}
+ };
+
+ // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H,
+ // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined.
+ static const NEONFormatMap map_shift_tb = {
+ {22, 21, 20, 19, 30},
+ {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_4H, NF_8H,
+ NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S,
+ NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D,
+ NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D}
+ };
+
+ NEONFormatDecoder nfd(instr, &map_shift_tb);
+
+ if (instr->ImmNEONImmh()) { // immh has to be non-zero.
+ switch (instr->Mask(NEONShiftImmediateMask)) {
+ case NEON_SQSHLU: mnemonic = "sqshlu"; form = form_shift_2; break;
+ case NEON_SQSHL_imm: mnemonic = "sqshl"; form = form_shift_2; break;
+ case NEON_UQSHL_imm: mnemonic = "uqshl"; form = form_shift_2; break;
+ case NEON_SHL: mnemonic = "shl"; form = form_shift_2; break;
+ case NEON_SLI: mnemonic = "sli"; form = form_shift_2; break;
+ case NEON_SCVTF_imm: mnemonic = "scvtf"; break;
+ case NEON_UCVTF_imm: mnemonic = "ucvtf"; break;
+ case NEON_FCVTZU_imm: mnemonic = "fcvtzu"; break;
+ case NEON_FCVTZS_imm: mnemonic = "fcvtzs"; break;
+ case NEON_SRI: mnemonic = "sri"; break;
+ case NEON_SSHR: mnemonic = "sshr"; break;
+ case NEON_USHR: mnemonic = "ushr"; break;
+ case NEON_SRSHR: mnemonic = "srshr"; break;
+ case NEON_URSHR: mnemonic = "urshr"; break;
+ case NEON_SSRA: mnemonic = "ssra"; break;
+ case NEON_USRA: mnemonic = "usra"; break;
+ case NEON_SRSRA: mnemonic = "srsra"; break;
+ case NEON_URSRA: mnemonic = "ursra"; break;
+ case NEON_SHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "shrn2" : "shrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_RSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "rshrn2" : "rshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_UQSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "uqshrn2" : "uqshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_UQRSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "uqrshrn2" : "uqrshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SQSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "sqshrn2" : "sqshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SQRSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "sqrshrn2" : "sqrshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SQSHRUN:
+ mnemonic = instr->Mask(NEON_Q) ? "sqshrun2" : "sqshrun";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SQRSHRUN:
+ mnemonic = instr->Mask(NEON_Q) ? "sqrshrun2" : "sqrshrun";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SSHLL:
+ nfd.SetFormatMap(0, &map_shift_ta);
+ if (instr->ImmNEONImmb() == 0 &&
+ CountSetBits(instr->ImmNEONImmh(), 32) == 1) { // sxtl variant.
+ form = form_xtl;
+ mnemonic = instr->Mask(NEON_Q) ? "sxtl2" : "sxtl";
+ } else { // sshll variant.
+ form = form_shift_2;
+ mnemonic = instr->Mask(NEON_Q) ? "sshll2" : "sshll";
+ }
+ break;
+ case NEON_USHLL:
+ nfd.SetFormatMap(0, &map_shift_ta);
+ if (instr->ImmNEONImmb() == 0 &&
+ CountSetBits(instr->ImmNEONImmh(), 32) == 1) { // uxtl variant.
+ form = form_xtl;
+ mnemonic = instr->Mask(NEON_Q) ? "uxtl2" : "uxtl";
+ } else { // ushll variant.
+ form = form_shift_2;
+ mnemonic = instr->Mask(NEON_Q) ? "ushll2" : "ushll";
+ }
+ break;
+ default: form = "(NEONShiftImmediate)";
+ }
+ } else {
+ form = "(NEONShiftImmediate)";
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONTable(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONTable)";
+ const char form_1v[] = "'Vd.%%s, {'Vn.16b}, 'Vm.%%s";
+ const char form_2v[] = "'Vd.%%s, {'Vn.16b, v%d.16b}, 'Vm.%%s";
+ const char form_3v[] = "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b}, 'Vm.%%s";
+ const char form_4v[] =
+ "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b, v%d.16b}, 'Vm.%%s";
+ static const NEONFormatMap map_b = { {30}, {NF_8B, NF_16B} };
+ NEONFormatDecoder nfd(instr, &map_b);
+
+ switch (instr->Mask(NEONTableMask)) {
+ case NEON_TBL_1v: mnemonic = "tbl"; form = form_1v; break;
+ case NEON_TBL_2v: mnemonic = "tbl"; form = form_2v; break;
+ case NEON_TBL_3v: mnemonic = "tbl"; form = form_3v; break;
+ case NEON_TBL_4v: mnemonic = "tbl"; form = form_4v; break;
+ case NEON_TBX_1v: mnemonic = "tbx"; form = form_1v; break;
+ case NEON_TBX_2v: mnemonic = "tbx"; form = form_2v; break;
+ case NEON_TBX_3v: mnemonic = "tbx"; form = form_3v; break;
+ case NEON_TBX_4v: mnemonic = "tbx"; form = form_4v; break;
+ default: break;
+ }
+
+ char re_form[sizeof(form_4v) + 6];
+ int reg_num = instr->Rn();
+ snprintf(re_form, sizeof(re_form), form,
+ (reg_num + 1) % kNumberOfVRegisters,
+ (reg_num + 2) % kNumberOfVRegisters,
+ (reg_num + 3) % kNumberOfVRegisters);
+
+ Format(instr, mnemonic, nfd.Substitute(re_form));
+}
+
+
+void Disassembler::VisitNEONPerm(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s";
+ NEONFormatDecoder nfd(instr);
+
+ switch (instr->Mask(NEONPermMask)) {
+ case NEON_TRN1: mnemonic = "trn1"; break;
+ case NEON_TRN2: mnemonic = "trn2"; break;
+ case NEON_UZP1: mnemonic = "uzp1"; break;
+ case NEON_UZP2: mnemonic = "uzp2"; break;
+ case NEON_ZIP1: mnemonic = "zip1"; break;
+ case NEON_ZIP2: mnemonic = "zip2"; break;
+ default: form = "(NEONPerm)";
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitUnimplemented(const Instruction* instr) {
+ Format(instr, "unimplemented", "(Unimplemented)");
+}
+
+
+void Disassembler::VisitUnallocated(const Instruction* instr) {
+ Format(instr, "unallocated", "(Unallocated)");
+}
+
+
+void Disassembler::ProcessOutput(const Instruction* /*instr*/) {
+ // The base disasm does nothing more than disassembling into a buffer.
+}
+
+
+void Disassembler::AppendRegisterNameToOutput(const Instruction* instr,
+ const CPURegister& reg) {
+ USE(instr);
+ VIXL_ASSERT(reg.IsValid());
+ char reg_char;
+
+ if (reg.IsRegister()) {
+ reg_char = reg.Is64Bits() ? 'x' : 'w';
+ } else {
+ VIXL_ASSERT(reg.IsVRegister());
+ switch (reg.SizeInBits()) {
+ case kBRegSize: reg_char = 'b'; break;
+ case kHRegSize: reg_char = 'h'; break;
+ case kSRegSize: reg_char = 's'; break;
+ case kDRegSize: reg_char = 'd'; break;
+ default:
+ VIXL_ASSERT(reg.Is128Bits());
+ reg_char = 'q';
+ }
+ }
+
+ if (reg.IsVRegister() || !(reg.Aliases(sp) || reg.Aliases(xzr))) {
+ // A core or scalar/vector register: [wx]0 - 30, [bhsdq]0 - 31.
+ AppendToOutput("%c%d", reg_char, reg.code());
+ } else if (reg.Aliases(sp)) {
+ // Disassemble w31/x31 as stack pointer wsp/sp.
+ AppendToOutput("%s", reg.Is64Bits() ? "sp" : "wsp");
+ } else {
+ // Disassemble w31/x31 as zero register wzr/xzr.
+ AppendToOutput("%czr", reg_char);
+ }
+}
+
+
+void Disassembler::AppendPCRelativeOffsetToOutput(const Instruction* instr,
+ int64_t offset) {
+ USE(instr);
+ char sign = (offset < 0) ? '-' : '+';
+ AppendToOutput("#%c0x%" PRIx64, sign, std::abs(offset));
+}
+
+
+void Disassembler::AppendAddressToOutput(const Instruction* instr,
+ const void* addr) {
+ USE(instr);
+ AppendToOutput("(addr 0x%" PRIxPTR ")", reinterpret_cast<uintptr_t>(addr));
+}
+
+
+void Disassembler::AppendCodeAddressToOutput(const Instruction* instr,
+ const void* addr) {
+ AppendAddressToOutput(instr, addr);
+}
+
+
+void Disassembler::AppendDataAddressToOutput(const Instruction* instr,
+ const void* addr) {
+ AppendAddressToOutput(instr, addr);
+}
+
+
+void Disassembler::AppendCodeRelativeAddressToOutput(const Instruction* instr,
+ const void* addr) {
+ USE(instr);
+ int64_t rel_addr = CodeRelativeAddress(addr);
+ if (rel_addr >= 0) {
+ AppendToOutput("(addr 0x%" PRIx64 ")", rel_addr);
+ } else {
+ AppendToOutput("(addr -0x%" PRIx64 ")", -rel_addr);
+ }
+}
+
+
+void Disassembler::AppendCodeRelativeCodeAddressToOutput(
+ const Instruction* instr, const void* addr) {
+ AppendCodeRelativeAddressToOutput(instr, addr);
+}
+
+
+void Disassembler::AppendCodeRelativeDataAddressToOutput(
+ const Instruction* instr, const void* addr) {
+ AppendCodeRelativeAddressToOutput(instr, addr);
+}
+
+
+void Disassembler::MapCodeAddress(int64_t base_address,
+ const Instruction* instr_address) {
+ set_code_address_offset(
+ base_address - reinterpret_cast<intptr_t>(instr_address));
+}
+int64_t Disassembler::CodeRelativeAddress(const void* addr) {
+ return reinterpret_cast<intptr_t>(addr) + code_address_offset();
+}
+
+
+void Disassembler::Format(const Instruction* instr, const char* mnemonic,
+ const char* format) {
+ VIXL_ASSERT(mnemonic != NULL);
+ ResetOutput();
+ Substitute(instr, mnemonic);
+ if (format != NULL) {
+ VIXL_ASSERT(buffer_pos_ < buffer_size_);
+ buffer_[buffer_pos_++] = ' ';
+ Substitute(instr, format);
+ }
+ VIXL_ASSERT(buffer_pos_ < buffer_size_);
+ buffer_[buffer_pos_] = 0;
+ ProcessOutput(instr);
+}
+
+
+void Disassembler::Substitute(const Instruction* instr, const char* string) {
+ char chr = *string++;
+ while (chr != '\0') {
+ if (chr == '\'') {
+ string += SubstituteField(instr, string);
+ } else {
+ VIXL_ASSERT(buffer_pos_ < buffer_size_);
+ buffer_[buffer_pos_++] = chr;
+ }
+ chr = *string++;
+ }
+}
+
+
+int Disassembler::SubstituteField(const Instruction* instr,
+ const char* format) {
+ switch (format[0]) {
+ // NB. The remaining substitution prefix characters are: GJKUZ.
+ case 'R': // Register. X or W, selected by sf bit.
+ case 'F': // FP register. S or D, selected by type field.
+ case 'V': // Vector register, V, vector format.
+ case 'W':
+ case 'X':
+ case 'B':
+ case 'H':
+ case 'S':
+ case 'D':
+ case 'Q': return SubstituteRegisterField(instr, format);
+ case 'I': return SubstituteImmediateField(instr, format);
+ case 'L': return SubstituteLiteralField(instr, format);
+ case 'N': return SubstituteShiftField(instr, format);
+ case 'P': return SubstitutePrefetchField(instr, format);
+ case 'C': return SubstituteConditionField(instr, format);
+ case 'E': return SubstituteExtendField(instr, format);
+ case 'A': return SubstitutePCRelAddressField(instr, format);
+ case 'T': return SubstituteBranchTargetField(instr, format);
+ case 'O': return SubstituteLSRegOffsetField(instr, format);
+ case 'M': return SubstituteBarrierField(instr, format);
+ case 'K': return SubstituteCrField(instr, format);
+ case 'G': return SubstituteSysOpField(instr, format);
+ default: {
+ VIXL_UNREACHABLE();
+ return 1;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteRegisterField(const Instruction* instr,
+ const char* format) {
+ char reg_prefix = format[0];
+ unsigned reg_num = 0;
+ unsigned field_len = 2;
+
+ switch (format[1]) {
+ case 'd':
+ reg_num = instr->Rd();
+ if (format[2] == 'q') {
+ reg_prefix = instr->NEONQ() ? 'X' : 'W';
+ field_len = 3;
+ }
+ break;
+ case 'n': reg_num = instr->Rn(); break;
+ case 'm':
+ reg_num = instr->Rm();
+ switch (format[2]) {
+ // Handle registers tagged with b (bytes), z (instruction), or
+ // r (registers), used for address updates in
+ // NEON load/store instructions.
+ case 'r':
+ case 'b':
+ case 'z': {
+ field_len = 3;
+ char* eimm;
+ int imm = static_cast<int>(strtol(&format[3], &eimm, 10));
+ field_len += eimm - &format[3];
+ if (reg_num == 31) {
+ switch (format[2]) {
+ case 'z':
+ imm *= (1 << instr->NEONLSSize());
+ break;
+ case 'r':
+ imm *= (instr->NEONQ() == 0) ? kDRegSizeInBytes
+ : kQRegSizeInBytes;
+ break;
+ case 'b':
+ break;
+ }
+ AppendToOutput("#%d", imm);
+ return field_len;
+ }
+ break;
+ }
+ }
+ break;
+ case 'e':
+ // This is register Rm, but using a 4-bit specifier. Used in NEON
+ // by-element instructions.
+ reg_num = (instr->Rm() & 0xf);
+ break;
+ case 'a': reg_num = instr->Ra(); break;
+ case 's': reg_num = instr->Rs(); break;
+ case 't':
+ reg_num = instr->Rt();
+ if (format[0] == 'V') {
+ if ((format[2] >= '2') && (format[2] <= '4')) {
+ // Handle consecutive vector register specifiers Vt2, Vt3 and Vt4.
+ reg_num = (reg_num + format[2] - '1') % 32;
+ field_len = 3;
+ }
+ } else {
+ if (format[2] == '2') {
+ // Handle register specifier Rt2.
+ reg_num = instr->Rt2();
+ field_len = 3;
+ }
+ }
+ break;
+ default: VIXL_UNREACHABLE();
+ }
+
+ // Increase field length for registers tagged as stack.
+ if (format[2] == 's') {
+ field_len = 3;
+ }
+
+ CPURegister::RegisterType reg_type = CPURegister::kRegister;
+ unsigned reg_size = kXRegSize;
+
+ if (reg_prefix == 'R') {
+ reg_prefix = instr->SixtyFourBits() ? 'X' : 'W';
+ } else if (reg_prefix == 'F') {
+ reg_prefix = ((instr->FPType() & 1) == 0) ? 'S' : 'D';
+ }
+
+ switch (reg_prefix) {
+ case 'W':
+ reg_type = CPURegister::kRegister; reg_size = kWRegSize; break;
+ case 'X':
+ reg_type = CPURegister::kRegister; reg_size = kXRegSize; break;
+ case 'B':
+ reg_type = CPURegister::kVRegister; reg_size = kBRegSize; break;
+ case 'H':
+ reg_type = CPURegister::kVRegister; reg_size = kHRegSize; break;
+ case 'S':
+ reg_type = CPURegister::kVRegister; reg_size = kSRegSize; break;
+ case 'D':
+ reg_type = CPURegister::kVRegister; reg_size = kDRegSize; break;
+ case 'Q':
+ reg_type = CPURegister::kVRegister; reg_size = kQRegSize; break;
+ case 'V':
+ AppendToOutput("v%d", reg_num);
+ return field_len;
+ default:
+ VIXL_UNREACHABLE();
+ }
+
+ if ((reg_type == CPURegister::kRegister) &&
+ (reg_num == kZeroRegCode) && (format[2] == 's')) {
+ reg_num = kSPRegInternalCode;
+ }
+
+ AppendRegisterNameToOutput(instr, CPURegister(reg_num, reg_size, reg_type));
+
+ return field_len;
+}
+
+
+int Disassembler::SubstituteImmediateField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'I');
+
+ switch (format[1]) {
+ case 'M': { // IMoveImm, IMoveNeg or IMoveLSL.
+ if (format[5] == 'L') {
+ AppendToOutput("#0x%" PRIx32, instr->ImmMoveWide());
+ if (instr->ShiftMoveWide() > 0) {
+ AppendToOutput(", lsl #%" PRId32, 16 * instr->ShiftMoveWide());
+ }
+ } else {
+ VIXL_ASSERT((format[5] == 'I') || (format[5] == 'N'));
+ uint64_t imm = static_cast<uint64_t>(instr->ImmMoveWide()) <<
+ (16 * instr->ShiftMoveWide());
+ if (format[5] == 'N')
+ imm = ~imm;
+ if (!instr->SixtyFourBits())
+ imm &= UINT64_C(0xffffffff);
+ AppendToOutput("#0x%" PRIx64, imm);
+ }
+ return 8;
+ }
+ case 'L': {
+ switch (format[2]) {
+ case 'L': { // ILLiteral - Immediate Load Literal.
+ AppendToOutput("pc%+" PRId32,
+ instr->ImmLLiteral() << kLiteralEntrySizeLog2);
+ return 9;
+ }
+ case 'S': { // ILS - Immediate Load/Store.
+ if (instr->ImmLS() != 0) {
+ AppendToOutput(", #%" PRId32, instr->ImmLS());
+ }
+ return 3;
+ }
+ case 'P': { // ILPx - Immediate Load/Store Pair, x = access size.
+ if (instr->ImmLSPair() != 0) {
+ // format[3] is the scale value. Convert to a number.
+ int scale = 1 << (format[3] - '0');
+ AppendToOutput(", #%" PRId32, instr->ImmLSPair() * scale);
+ }
+ return 4;
+ }
+ case 'U': { // ILU - Immediate Load/Store Unsigned.
+ if (instr->ImmLSUnsigned() != 0) {
+ int shift = instr->SizeLS();
+ AppendToOutput(", #%" PRId32, instr->ImmLSUnsigned() << shift);
+ }
+ return 3;
+ }
+ }
+ }
+ case 'C': { // ICondB - Immediate Conditional Branch.
+ int64_t offset = instr->ImmCondBranch() << 2;
+ AppendPCRelativeOffsetToOutput(instr, offset);
+ return 6;
+ }
+ case 'A': { // IAddSub.
+ VIXL_ASSERT(instr->ShiftAddSub() <= 1);
+ int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
+ AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
+ return 7;
+ }
+ case 'F': { // IFPSingle, IFPDouble or IFPFBits.
+ if (format[3] == 'F') { // IFPFbits.
+ AppendToOutput("#%" PRId32, 64 - instr->FPScale());
+ return 8;
+ } else {
+ AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmFP(),
+ format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64());
+ return 9;
+ }
+ }
+ case 'T': { // ITri - Immediate Triangular Encoded.
+ AppendToOutput("#0x%" PRIx64, instr->ImmLogical());
+ return 4;
+ }
+ case 'N': { // INzcv.
+ int nzcv = (instr->Nzcv() << Flags_offset);
+ AppendToOutput("#%c%c%c%c", ((nzcv & NFlag) == 0) ? 'n' : 'N',
+ ((nzcv & ZFlag) == 0) ? 'z' : 'Z',
+ ((nzcv & CFlag) == 0) ? 'c' : 'C',
+ ((nzcv & VFlag) == 0) ? 'v' : 'V');
+ return 5;
+ }
+ case 'P': { // IP - Conditional compare.
+ AppendToOutput("#%" PRId32, instr->ImmCondCmp());
+ return 2;
+ }
+ case 'B': { // Bitfields.
+ return SubstituteBitfieldImmediateField(instr, format);
+ }
+ case 'E': { // IExtract.
+ AppendToOutput("#%" PRId32, instr->ImmS());
+ return 8;
+ }
+ case 'S': { // IS - Test and branch bit.
+ AppendToOutput("#%" PRId32, (instr->ImmTestBranchBit5() << 5) |
+ instr->ImmTestBranchBit40());
+ return 2;
+ }
+ case 's': { // Is - Shift (immediate).
+ switch (format[2]) {
+ case '1': { // Is1 - SSHR.
+ int shift = 16 << HighestSetBitPosition(instr->ImmNEONImmh());
+ shift -= instr->ImmNEONImmhImmb();
+ AppendToOutput("#%d", shift);
+ return 3;
+ }
+ case '2': { // Is2 - SLI.
+ int shift = instr->ImmNEONImmhImmb();
+ shift -= 8 << HighestSetBitPosition(instr->ImmNEONImmh());
+ AppendToOutput("#%d", shift);
+ return 3;
+ }
+ default: {
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+ }
+ }
+ case 'D': { // IDebug - HLT and BRK instructions.
+ AppendToOutput("#0x%" PRIx32, instr->ImmException());
+ return 6;
+ }
+ case 'V': { // Immediate Vector.
+ switch (format[2]) {
+ case 'E': { // IVExtract.
+ AppendToOutput("#%" PRId32, instr->ImmNEONExt());
+ return 9;
+ }
+ case 'B': { // IVByElemIndex.
+ int vm_index = (instr->NEONH() << 1) | instr->NEONL();
+ if (instr->NEONSize() == 1) {
+ vm_index = (vm_index << 1) | instr->NEONM();
+ }
+ AppendToOutput("%d", vm_index);
+ return strlen("IVByElemIndex");
+ }
+ case 'I': { // INS element.
+ if (strncmp(format, "IVInsIndex", strlen("IVInsIndex")) == 0) {
+ int rd_index, rn_index;
+ int imm5 = instr->ImmNEON5();
+ int imm4 = instr->ImmNEON4();
+ int tz = CountTrailingZeros(imm5, 32);
+ rd_index = imm5 >> (tz + 1);
+ rn_index = imm4 >> tz;
+ if (strncmp(format, "IVInsIndex1", strlen("IVInsIndex1")) == 0) {
+ AppendToOutput("%d", rd_index);
+ return strlen("IVInsIndex1");
+ } else if (strncmp(format, "IVInsIndex2",
+ strlen("IVInsIndex2")) == 0) {
+ AppendToOutput("%d", rn_index);
+ return strlen("IVInsIndex2");
+ } else {
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+ }
+ VIXL_FALLTHROUGH();
+ }
+ case 'L': { // IVLSLane[0123] - suffix indicates access size shift.
+ AppendToOutput("%d", instr->NEONLSIndex(format[8] - '0'));
+ return 9;
+ }
+ case 'M': { // Modified Immediate cases.
+ if (strncmp(format,
+ "IVMIImmFPSingle",
+ strlen("IVMIImmFPSingle")) == 0) {
+ AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(),
+ instr->ImmNEONFP32());
+ return strlen("IVMIImmFPSingle");
+ } else if (strncmp(format,
+ "IVMIImmFPDouble",
+ strlen("IVMIImmFPDouble")) == 0) {
+ AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(),
+ instr->ImmNEONFP64());
+ return strlen("IVMIImmFPDouble");
+ } else if (strncmp(format, "IVMIImm8", strlen("IVMIImm8")) == 0) {
+ uint64_t imm8 = instr->ImmNEONabcdefgh();
+ AppendToOutput("#0x%" PRIx64, imm8);
+ return strlen("IVMIImm8");
+ } else if (strncmp(format, "IVMIImm", strlen("IVMIImm")) == 0) {
+ uint64_t imm8 = instr->ImmNEONabcdefgh();
+ uint64_t imm = 0;
+ for (int i = 0; i < 8; ++i) {
+ if (imm8 & (1 << i)) {
+ imm |= (UINT64_C(0xff) << (8 * i));
+ }
+ }
+ AppendToOutput("#0x%" PRIx64, imm);
+ return strlen("IVMIImm");
+ } else if (strncmp(format, "IVMIShiftAmt1",
+ strlen("IVMIShiftAmt1")) == 0) {
+ int cmode = instr->NEONCmode();
+ int shift_amount = 8 * ((cmode >> 1) & 3);
+ AppendToOutput("#%d", shift_amount);
+ return strlen("IVMIShiftAmt1");
+ } else if (strncmp(format, "IVMIShiftAmt2",
+ strlen("IVMIShiftAmt2")) == 0) {
+ int cmode = instr->NEONCmode();
+ int shift_amount = 8 << (cmode & 1);
+ AppendToOutput("#%d", shift_amount);
+ return strlen("IVMIShiftAmt2");
+ } else {
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+ }
+ default: {
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+ }
+ }
+ case 'X': { // IX - CLREX instruction.
+ AppendToOutput("#0x%" PRIx32, instr->CRm());
+ return 2;
+ }
+ default: {
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteBitfieldImmediateField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT((format[0] == 'I') && (format[1] == 'B'));
+ unsigned r = instr->ImmR();
+ unsigned s = instr->ImmS();
+
+ switch (format[2]) {
+ case 'r': { // IBr.
+ AppendToOutput("#%d", r);
+ return 3;
+ }
+ case 's': { // IBs+1 or IBs-r+1.
+ if (format[3] == '+') {
+ AppendToOutput("#%d", s + 1);
+ return 5;
+ } else {
+ VIXL_ASSERT(format[3] == '-');
+ AppendToOutput("#%d", s - r + 1);
+ return 7;
+ }
+ }
+ case 'Z': { // IBZ-r.
+ VIXL_ASSERT((format[3] == '-') && (format[4] == 'r'));
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize;
+ AppendToOutput("#%d", reg_size - r);
+ return 5;
+ }
+ default: {
+ VIXL_UNREACHABLE();
+ return 0;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteLiteralField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(strncmp(format, "LValue", 6) == 0);
+ USE(format);
+
+ const void * address = instr->LiteralAddress<const void *>();
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit:
+ case LDR_x_lit:
+ case LDRSW_x_lit:
+ case LDR_s_lit:
+ case LDR_d_lit:
+ case LDR_q_lit:
+ AppendCodeRelativeDataAddressToOutput(instr, address);
+ break;
+ case PRFM_lit: {
+ // Use the prefetch hint to decide how to print the address.
+ switch (instr->PrefetchHint()) {
+ case 0x0: // PLD: prefetch for load.
+ case 0x2: // PST: prepare for store.
+ AppendCodeRelativeDataAddressToOutput(instr, address);
+ break;
+ case 0x1: // PLI: preload instructions.
+ AppendCodeRelativeCodeAddressToOutput(instr, address);
+ break;
+ case 0x3: // Unallocated hint.
+ AppendCodeRelativeAddressToOutput(instr, address);
+ break;
+ }
+ break;
+ }
+ default:
+ VIXL_UNREACHABLE();
+ }
+
+ return 6;
+}
+
+
+int Disassembler::SubstituteShiftField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'N');
+ VIXL_ASSERT(instr->ShiftDP() <= 0x3);
+
+ switch (format[1]) {
+ case 'D': { // HDP.
+ VIXL_ASSERT(instr->ShiftDP() != ROR);
+ VIXL_FALLTHROUGH();
+ }
+ case 'L': { // HLo.
+ if (instr->ImmDPShift() != 0) {
+ const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
+ AppendToOutput(", %s #%" PRId32, shift_type[instr->ShiftDP()],
+ instr->ImmDPShift());
+ }
+ return 3;
+ }
+ default:
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+}
+
+
+int Disassembler::SubstituteConditionField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'C');
+ const char* condition_code[] = { "eq", "ne", "hs", "lo",
+ "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt",
+ "gt", "le", "al", "nv" };
+ int cond;
+ switch (format[1]) {
+ case 'B': cond = instr->ConditionBranch(); break;
+ case 'I': {
+ cond = InvertCondition(static_cast<Condition>(instr->Condition()));
+ break;
+ }
+ default: cond = instr->Condition();
+ }
+ AppendToOutput("%s", condition_code[cond]);
+ return 4;
+}
+
+
+int Disassembler::SubstitutePCRelAddressField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT((strcmp(format, "AddrPCRelByte") == 0) || // Used by `adr`.
+ (strcmp(format, "AddrPCRelPage") == 0)); // Used by `adrp`.
+
+ int64_t offset = instr->ImmPCRel();
+
+ // Compute the target address based on the effective address (after applying
+ // code_address_offset). This is required for correct behaviour of adrp.
+ const Instruction* base = instr + code_address_offset();
+ if (format[9] == 'P') {
+ offset *= kPageSize;
+ base = AlignDown(base, kPageSize);
+ }
+ // Strip code_address_offset before printing, so we can use the
+ // semantically-correct AppendCodeRelativeAddressToOutput.
+ const void* target =
+ reinterpret_cast<const void*>(base + offset - code_address_offset());
+
+ AppendPCRelativeOffsetToOutput(instr, offset);
+ AppendToOutput(" ");
+ AppendCodeRelativeAddressToOutput(instr, target);
+ return 13;
+}
+
+
+int Disassembler::SubstituteBranchTargetField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(strncmp(format, "TImm", 4) == 0);
+
+ int64_t offset = 0;
+ switch (format[5]) {
+ // BImmUncn - unconditional branch immediate.
+ case 'n': offset = instr->ImmUncondBranch(); break;
+ // BImmCond - conditional branch immediate.
+ case 'o': offset = instr->ImmCondBranch(); break;
+ // BImmCmpa - compare and branch immediate.
+ case 'm': offset = instr->ImmCmpBranch(); break;
+ // BImmTest - test and branch immediate.
+ case 'e': offset = instr->ImmTestBranch(); break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ offset <<= kInstructionSizeLog2;
+ const void* target_address = reinterpret_cast<const void*>(instr + offset);
+ VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
+
+ AppendPCRelativeOffsetToOutput(instr, offset);
+ AppendToOutput(" ");
+ AppendCodeRelativeCodeAddressToOutput(instr, target_address);
+
+ return 8;
+}
+
+
+int Disassembler::SubstituteExtendField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(strncmp(format, "Ext", 3) == 0);
+ VIXL_ASSERT(instr->ExtendMode() <= 7);
+ USE(format);
+
+ const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
+ "sxtb", "sxth", "sxtw", "sxtx" };
+
+ // If rd or rn is SP, uxtw on 32-bit registers and uxtx on 64-bit
+ // registers becomes lsl.
+ if (((instr->Rd() == kZeroRegCode) || (instr->Rn() == kZeroRegCode)) &&
+ (((instr->ExtendMode() == UXTW) && (instr->SixtyFourBits() == 0)) ||
+ (instr->ExtendMode() == UXTX))) {
+ if (instr->ImmExtendShift() > 0) {
+ AppendToOutput(", lsl #%" PRId32, instr->ImmExtendShift());
+ }
+ } else {
+ AppendToOutput(", %s", extend_mode[instr->ExtendMode()]);
+ if (instr->ImmExtendShift() > 0) {
+ AppendToOutput(" #%" PRId32, instr->ImmExtendShift());
+ }
+ }
+ return 3;
+}
+
+
+int Disassembler::SubstituteLSRegOffsetField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(strncmp(format, "Offsetreg", 9) == 0);
+ const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
+ "undefined", "undefined", "sxtw", "sxtx" };
+ USE(format);
+
+ unsigned shift = instr->ImmShiftLS();
+ Extend ext = static_cast<Extend>(instr->ExtendMode());
+ char reg_type = ((ext == UXTW) || (ext == SXTW)) ? 'w' : 'x';
+
+ unsigned rm = instr->Rm();
+ if (rm == kZeroRegCode) {
+ AppendToOutput("%czr", reg_type);
+ } else {
+ AppendToOutput("%c%d", reg_type, rm);
+ }
+
+ // Extend mode UXTX is an alias for shift mode LSL here.
+ if (!((ext == UXTX) && (shift == 0))) {
+ AppendToOutput(", %s", extend_mode[ext]);
+ if (shift != 0) {
+ AppendToOutput(" #%d", instr->SizeLS());
+ }
+ }
+ return 9;
+}
+
+
+int Disassembler::SubstitutePrefetchField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'P');
+ USE(format);
+
+ static const char* hints[] = {"ld", "li", "st"};
+ static const char* stream_options[] = {"keep", "strm"};
+
+ unsigned hint = instr->PrefetchHint();
+ unsigned target = instr->PrefetchTarget() + 1;
+ unsigned stream = instr->PrefetchStream();
+
+ if ((hint >= (sizeof(hints) / sizeof(hints[0]))) || (target > 3)) {
+ // Unallocated prefetch operations.
+ int prefetch_mode = instr->ImmPrefetchOperation();
+ AppendToOutput("#0b%c%c%c%c%c",
+ (prefetch_mode & (1 << 4)) ? '1' : '0',
+ (prefetch_mode & (1 << 3)) ? '1' : '0',
+ (prefetch_mode & (1 << 2)) ? '1' : '0',
+ (prefetch_mode & (1 << 1)) ? '1' : '0',
+ (prefetch_mode & (1 << 0)) ? '1' : '0');
+ } else {
+ VIXL_ASSERT(stream < (sizeof(stream_options) / sizeof(stream_options[0])));
+ AppendToOutput("p%sl%d%s", hints[hint], target, stream_options[stream]);
+ }
+ return 6;
+}
+
+int Disassembler::SubstituteBarrierField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'M');
+ USE(format);
+
+ static const char* options[4][4] = {
+ { "sy (0b0000)", "oshld", "oshst", "osh" },
+ { "sy (0b0100)", "nshld", "nshst", "nsh" },
+ { "sy (0b1000)", "ishld", "ishst", "ish" },
+ { "sy (0b1100)", "ld", "st", "sy" }
+ };
+ int domain = instr->ImmBarrierDomain();
+ int type = instr->ImmBarrierType();
+
+ AppendToOutput("%s", options[domain][type]);
+ return 1;
+}
+
+int Disassembler::SubstituteSysOpField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'G');
+ int op = -1;
+ switch (format[1]) {
+ case '1': op = instr->SysOp1(); break;
+ case '2': op = instr->SysOp2(); break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ AppendToOutput("#%d", op);
+ return 2;
+}
+
+int Disassembler::SubstituteCrField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'K');
+ int cr = -1;
+ switch (format[1]) {
+ case 'n': cr = instr->CRn(); break;
+ case 'm': cr = instr->CRm(); break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ AppendToOutput("C%d", cr);
+ return 2;
+}
+
+void Disassembler::ResetOutput() {
+ buffer_pos_ = 0;
+ buffer_[buffer_pos_] = 0;
+}
+
+
+void Disassembler::AppendToOutput(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ buffer_pos_ += vsnprintf(&buffer_[buffer_pos_], buffer_size_ - buffer_pos_,
+ format, args);
+ va_end(args);
+}
+
+
+void PrintDisassembler::ProcessOutput(const Instruction* instr) {
+ fprintf(stream_, "0x%016" PRIx64 " %08" PRIx32 "\t\t%s\n",
+ reinterpret_cast<uint64_t>(instr),
+ instr->InstructionBits(),
+ GetOutput());
+}
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Disasm-vixl.h b/js/src/jit/arm64/vixl/Disasm-vixl.h
new file mode 100644
index 000000000..1f2cd81fe
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Disasm-vixl.h
@@ -0,0 +1,177 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_DISASM_A64_H
+#define VIXL_A64_DISASM_A64_H
+
+#include "jit/arm64/vixl/Assembler-vixl.h"
+#include "jit/arm64/vixl/Decoder-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Instructions-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+namespace vixl {
+
+class Disassembler: public DecoderVisitor {
+ public:
+ Disassembler();
+ Disassembler(char* text_buffer, int buffer_size);
+ virtual ~Disassembler();
+ char* GetOutput();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) virtual void Visit##A(const Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ protected:
+ virtual void ProcessOutput(const Instruction* instr);
+
+ // Default output functions. The functions below implement a default way of
+ // printing elements in the disassembly. A sub-class can override these to
+ // customize the disassembly output.
+
+ // Prints the name of a register.
+ // TODO: This currently doesn't allow renaming of V registers.
+ virtual void AppendRegisterNameToOutput(const Instruction* instr,
+ const CPURegister& reg);
+
+ // Prints a PC-relative offset. This is used for example when disassembling
+ // branches to immediate offsets.
+ virtual void AppendPCRelativeOffsetToOutput(const Instruction* instr,
+ int64_t offset);
+
+ // Prints an address, in the general case. It can be code or data. This is
+ // used for example to print the target address of an ADR instruction.
+ virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr,
+ const void* addr);
+
+ // Prints the address of some code.
+ // This is used for example to print the target address of a branch to an
+ // immediate offset.
+ // A sub-class can for example override this method to lookup the address and
+ // print an appropriate name.
+ virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr,
+ const void* addr);
+
+ // Prints the address of some data.
+ // This is used for example to print the source address of a load literal
+ // instruction.
+ virtual void AppendCodeRelativeDataAddressToOutput(const Instruction* instr,
+ const void* addr);
+
+ // Same as the above, but for addresses that are not relative to the code
+ // buffer. They are currently not used by VIXL.
+ virtual void AppendAddressToOutput(const Instruction* instr,
+ const void* addr);
+ virtual void AppendCodeAddressToOutput(const Instruction* instr,
+ const void* addr);
+ virtual void AppendDataAddressToOutput(const Instruction* instr,
+ const void* addr);
+
+ public:
+ // Get/Set the offset that should be added to code addresses when printing
+ // code-relative addresses in the AppendCodeRelative<Type>AddressToOutput()
+ // helpers.
+ // Below is an example of how a branch immediate instruction in memory at
+ // address 0xb010200 would disassemble with different offsets.
+ // Base address | Disassembly
+ // 0x0 | 0xb010200: b #+0xcc (addr 0xb0102cc)
+ // 0x10000 | 0xb000200: b #+0xcc (addr 0xb0002cc)
+ // 0xb010200 | 0x0: b #+0xcc (addr 0xcc)
+ void MapCodeAddress(int64_t base_address, const Instruction* instr_address);
+ int64_t CodeRelativeAddress(const void* instr);
+
+ private:
+ void Format(
+ const Instruction* instr, const char* mnemonic, const char* format);
+ void Substitute(const Instruction* instr, const char* string);
+ int SubstituteField(const Instruction* instr, const char* format);
+ int SubstituteRegisterField(const Instruction* instr, const char* format);
+ int SubstituteImmediateField(const Instruction* instr, const char* format);
+ int SubstituteLiteralField(const Instruction* instr, const char* format);
+ int SubstituteBitfieldImmediateField(
+ const Instruction* instr, const char* format);
+ int SubstituteShiftField(const Instruction* instr, const char* format);
+ int SubstituteExtendField(const Instruction* instr, const char* format);
+ int SubstituteConditionField(const Instruction* instr, const char* format);
+ int SubstitutePCRelAddressField(const Instruction* instr, const char* format);
+ int SubstituteBranchTargetField(const Instruction* instr, const char* format);
+ int SubstituteLSRegOffsetField(const Instruction* instr, const char* format);
+ int SubstitutePrefetchField(const Instruction* instr, const char* format);
+ int SubstituteBarrierField(const Instruction* instr, const char* format);
+ int SubstituteSysOpField(const Instruction* instr, const char* format);
+ int SubstituteCrField(const Instruction* instr, const char* format);
+ bool RdIsZROrSP(const Instruction* instr) const {
+ return (instr->Rd() == kZeroRegCode);
+ }
+
+ bool RnIsZROrSP(const Instruction* instr) const {
+ return (instr->Rn() == kZeroRegCode);
+ }
+
+ bool RmIsZROrSP(const Instruction* instr) const {
+ return (instr->Rm() == kZeroRegCode);
+ }
+
+ bool RaIsZROrSP(const Instruction* instr) const {
+ return (instr->Ra() == kZeroRegCode);
+ }
+
+ bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
+
+ int64_t code_address_offset() const { return code_address_offset_; }
+
+ protected:
+ void ResetOutput();
+ void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3);
+
+ void set_code_address_offset(int64_t code_address_offset) {
+ code_address_offset_ = code_address_offset;
+ }
+
+ char* buffer_;
+ uint32_t buffer_pos_;
+ uint32_t buffer_size_;
+ bool own_buffer_;
+
+ int64_t code_address_offset_;
+};
+
+
+class PrintDisassembler: public Disassembler {
+ public:
+ explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
+
+ protected:
+ virtual void ProcessOutput(const Instruction* instr);
+
+ private:
+ FILE *stream_;
+};
+} // namespace vixl
+
+#endif // VIXL_A64_DISASM_A64_H
diff --git a/js/src/jit/arm64/vixl/Globals-vixl.h b/js/src/jit/arm64/vixl/Globals-vixl.h
new file mode 100644
index 000000000..8a7418eb8
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Globals-vixl.h
@@ -0,0 +1,122 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_GLOBALS_H
+#define VIXL_GLOBALS_H
+
+// Get standard C99 macros for integer types.
+#ifndef __STDC_CONSTANT_MACROS
+#define __STDC_CONSTANT_MACROS
+#endif
+
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS
+#endif
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include "mozilla/Assertions.h"
+
+#include <inttypes.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "js-config.h"
+
+#include "jit/arm64/vixl/Platform-vixl.h"
+#include "js/Utility.h"
+
+
+typedef uint8_t byte;
+
+// Type for half-precision (16 bit) floating point numbers.
+typedef uint16_t float16;
+
+const int KBytes = 1024;
+const int MBytes = 1024 * KBytes;
+
+#define VIXL_ABORT() \
+ do { printf("in %s, line %i", __FILE__, __LINE__); abort(); } while (false)
+#ifdef DEBUG
+ #define VIXL_ASSERT(condition) MOZ_ASSERT(condition)
+ #define VIXL_CHECK(condition) VIXL_ASSERT(condition)
+ #define VIXL_UNIMPLEMENTED() \
+ do { fprintf(stderr, "UNIMPLEMENTED\t"); VIXL_ABORT(); } while (false)
+ #define VIXL_UNREACHABLE() \
+ do { fprintf(stderr, "UNREACHABLE\t"); VIXL_ABORT(); } while (false)
+#else
+ #define VIXL_ASSERT(condition) ((void) 0)
+ #define VIXL_CHECK(condition) ((void) 0)
+ #define VIXL_UNIMPLEMENTED() ((void) 0)
+ #define VIXL_UNREACHABLE() ((void) 0)
+#endif
+// This is not as powerful as template based assertions, but it is simple.
+// It assumes that the descriptions are unique. If this starts being a problem,
+// we can switch to a different implemention.
+#define VIXL_CONCAT(a, b) a##b
+#define VIXL_STATIC_ASSERT_LINE(line, condition) \
+ typedef char VIXL_CONCAT(STATIC_ASSERT_LINE_, line)[(condition) ? 1 : -1] \
+ __attribute__((unused))
+#define VIXL_STATIC_ASSERT(condition) \
+ VIXL_STATIC_ASSERT_LINE(__LINE__, condition)
+
+template <typename T1>
+inline void USE(T1) {}
+
+template <typename T1, typename T2>
+inline void USE(T1, T2) {}
+
+template <typename T1, typename T2, typename T3>
+inline void USE(T1, T2, T3) {}
+
+template <typename T1, typename T2, typename T3, typename T4>
+inline void USE(T1, T2, T3, T4) {}
+
+#define VIXL_ALIGNMENT_EXCEPTION() \
+ do { fprintf(stderr, "ALIGNMENT EXCEPTION\t"); VIXL_ABORT(); } while (0)
+
+// The clang::fallthrough attribute is used along with the Wimplicit-fallthrough
+// argument to annotate intentional fall-through between switch labels.
+// For more information please refer to:
+// http://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough
+#ifndef __has_warning
+ #define __has_warning(x) 0
+#endif
+
+// Note: This option is only available for Clang. And will only be enabled for
+// C++11(201103L).
+#if __has_warning("-Wimplicit-fallthrough") && __cplusplus >= 201103L
+ #define VIXL_FALLTHROUGH() [[clang::fallthrough]] //NOLINT
+#else
+ #define VIXL_FALLTHROUGH() do {} while (0)
+#endif
+
+#endif // VIXL_GLOBALS_H
diff --git a/js/src/jit/arm64/vixl/Instructions-vixl.cpp b/js/src/jit/arm64/vixl/Instructions-vixl.cpp
new file mode 100644
index 000000000..25b8e307d
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Instructions-vixl.cpp
@@ -0,0 +1,670 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Instructions-vixl.h"
+
+#include "jit/arm64/vixl/Assembler-vixl.h"
+
+namespace vixl {
+
+
+// Floating-point infinity values.
+const float16 kFP16PositiveInfinity = 0x7c00;
+const float16 kFP16NegativeInfinity = 0xfc00;
+const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000);
+const float kFP32NegativeInfinity = rawbits_to_float(0xff800000);
+const double kFP64PositiveInfinity =
+ rawbits_to_double(UINT64_C(0x7ff0000000000000));
+const double kFP64NegativeInfinity =
+ rawbits_to_double(UINT64_C(0xfff0000000000000));
+
+
+// The default NaN values (for FPCR.DN=1).
+const double kFP64DefaultNaN = rawbits_to_double(UINT64_C(0x7ff8000000000000));
+const float kFP32DefaultNaN = rawbits_to_float(0x7fc00000);
+const float16 kFP16DefaultNaN = 0x7e00;
+
+
+static uint64_t RotateRight(uint64_t value,
+ unsigned int rotate,
+ unsigned int width) {
+ VIXL_ASSERT(width <= 64);
+ rotate &= 63;
+ return ((value & ((UINT64_C(1) << rotate) - 1)) <<
+ (width - rotate)) | (value >> rotate);
+}
+
+
+static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
+ uint64_t value,
+ unsigned width) {
+ VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
+ (width == 32));
+ VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ uint64_t result = value & ((UINT64_C(1) << width) - 1);
+ for (unsigned i = width; i < reg_size; i *= 2) {
+ result |= (result << i);
+ }
+ return result;
+}
+
+
+bool Instruction::IsLoad() const {
+ if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+ return false;
+ }
+
+ if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+ return Mask(LoadStorePairLBit) != 0;
+ } else {
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
+ switch (op) {
+ case LDRB_w:
+ case LDRH_w:
+ case LDR_w:
+ case LDR_x:
+ case LDRSB_w:
+ case LDRSB_x:
+ case LDRSH_w:
+ case LDRSH_x:
+ case LDRSW_x:
+ case LDR_b:
+ case LDR_h:
+ case LDR_s:
+ case LDR_d:
+ case LDR_q: return true;
+ default: return false;
+ }
+ }
+}
+
+
+bool Instruction::IsStore() const {
+ if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+ return false;
+ }
+
+ if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+ return Mask(LoadStorePairLBit) == 0;
+ } else {
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
+ switch (op) {
+ case STRB_w:
+ case STRH_w:
+ case STR_w:
+ case STR_x:
+ case STR_b:
+ case STR_h:
+ case STR_s:
+ case STR_d:
+ case STR_q: return true;
+ default: return false;
+ }
+ }
+}
+
+
+// Logical immediates can't encode zero, so a return value of zero is used to
+// indicate a failure case. Specifically, where the constraints on imm_s are
+// not met.
+uint64_t Instruction::ImmLogical() const {
+ unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize;
+ int32_t n = BitN();
+ int32_t imm_s = ImmSetBits();
+ int32_t imm_r = ImmRotate();
+
+ // An integer is constructed from the n, imm_s and imm_r bits according to
+ // the following table:
+ //
+ // N imms immr size S R
+ // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
+ // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
+ // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
+ // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
+ // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
+ // 0 11110s xxxxxr 2 UInt(s) UInt(r)
+ // (s bits must not be all set)
+ //
+ // A pattern is constructed of size bits, where the least significant S+1
+ // bits are set. The pattern is rotated right by R, and repeated across a
+ // 32 or 64-bit value, depending on destination register width.
+ //
+
+ if (n == 1) {
+ if (imm_s == 0x3f) {
+ return 0;
+ }
+ uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1;
+ return RotateRight(bits, imm_r, 64);
+ } else {
+ if ((imm_s >> 1) == 0x1f) {
+ return 0;
+ }
+ for (int width = 0x20; width >= 0x2; width >>= 1) {
+ if ((imm_s & width) == 0) {
+ int mask = width - 1;
+ if ((imm_s & mask) == mask) {
+ return 0;
+ }
+ uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1;
+ return RepeatBitsAcrossReg(reg_size,
+ RotateRight(bits, imm_r & mask, width),
+ width);
+ }
+ }
+ }
+ VIXL_UNREACHABLE();
+ return 0;
+}
+
+
+uint32_t Instruction::ImmNEONabcdefgh() const {
+ return ImmNEONabc() << 5 | ImmNEONdefgh();
+}
+
+
+float Instruction::Imm8ToFP32(uint32_t imm8) {
+ // Imm8: abcdefgh (8 bits)
+ // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
+ // where B is b ^ 1
+ uint32_t bits = imm8;
+ uint32_t bit7 = (bits >> 7) & 0x1;
+ uint32_t bit6 = (bits >> 6) & 0x1;
+ uint32_t bit5_to_0 = bits & 0x3f;
+ uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
+
+ return rawbits_to_float(result);
+}
+
+
+float Instruction::ImmFP32() const {
+ return Imm8ToFP32(ImmFP());
+}
+
+
+double Instruction::Imm8ToFP64(uint32_t imm8) {
+ // Imm8: abcdefgh (8 bits)
+ // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
+ // where B is b ^ 1
+ uint32_t bits = imm8;
+ uint64_t bit7 = (bits >> 7) & 0x1;
+ uint64_t bit6 = (bits >> 6) & 0x1;
+ uint64_t bit5_to_0 = bits & 0x3f;
+ uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
+
+ return rawbits_to_double(result);
+}
+
+
+double Instruction::ImmFP64() const {
+ return Imm8ToFP64(ImmFP());
+}
+
+
+float Instruction::ImmNEONFP32() const {
+ return Imm8ToFP32(ImmNEONabcdefgh());
+}
+
+
+double Instruction::ImmNEONFP64() const {
+ return Imm8ToFP64(ImmNEONabcdefgh());
+}
+
+
+unsigned CalcLSDataSize(LoadStoreOp op) {
+ VIXL_ASSERT((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
+ unsigned size = static_cast<Instr>(op) >> LSSize_offset;
+ if ((op & LSVector_mask) != 0) {
+ // Vector register memory operations encode the access size in the "size"
+ // and "opc" fields.
+ if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
+ size = kQRegSizeInBytesLog2;
+ }
+ }
+ return size;
+}
+
+
+unsigned CalcLSPairDataSize(LoadStorePairOp op) {
+ VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes);
+ VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes);
+ switch (op) {
+ case STP_q:
+ case LDP_q: return kQRegSizeInBytesLog2;
+ case STP_x:
+ case LDP_x:
+ case STP_d:
+ case LDP_d: return kXRegSizeInBytesLog2;
+ default: return kWRegSizeInBytesLog2;
+ }
+}
+
+
+int Instruction::ImmBranchRangeBitwidth(ImmBranchType branch_type) {
+ switch (branch_type) {
+ case UncondBranchType:
+ return ImmUncondBranch_width;
+ case CondBranchType:
+ return ImmCondBranch_width;
+ case CompareBranchType:
+ return ImmCmpBranch_width;
+ case TestBranchType:
+ return ImmTestBranch_width;
+ default:
+ VIXL_UNREACHABLE();
+ return 0;
+ }
+}
+
+
+int32_t Instruction::ImmBranchForwardRange(ImmBranchType branch_type) {
+ int32_t encoded_max = 1 << (ImmBranchRangeBitwidth(branch_type) - 1);
+ return encoded_max * kInstructionSize;
+}
+
+
+bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
+ int64_t offset) {
+ return is_intn(ImmBranchRangeBitwidth(branch_type), offset);
+}
+
+ImmBranchRangeType Instruction::ImmBranchTypeToRange(ImmBranchType branch_type)
+{
+ switch (branch_type) {
+ case UncondBranchType:
+ return UncondBranchRangeType;
+ case CondBranchType:
+ case CompareBranchType:
+ return CondBranchRangeType;
+ case TestBranchType:
+ return TestBranchRangeType;
+ default:
+ return UnknownBranchRangeType;
+ }
+}
+
+int32_t Instruction::ImmBranchMaxForwardOffset(ImmBranchRangeType range_type)
+{
+ // Branches encode a pc-relative two's complement number of 32-bit
+ // instructions. Compute the number of bytes corresponding to the largest
+ // positive number of instructions that can be encoded.
+ switch(range_type) {
+ case TestBranchRangeType:
+ return ((1 << ImmTestBranch_width) - 1) / 2 * kInstructionSize;
+ case CondBranchRangeType:
+ return ((1 << ImmCondBranch_width) - 1) / 2 * kInstructionSize;
+ case UncondBranchRangeType:
+ return ((1 << ImmUncondBranch_width) - 1) / 2 * kInstructionSize;
+ default:
+ VIXL_UNREACHABLE();
+ return 0;
+ }
+}
+
+int32_t Instruction::ImmBranchMinBackwardOffset(ImmBranchRangeType range_type)
+{
+ switch(range_type) {
+ case TestBranchRangeType:
+ return -int32_t(1 << ImmTestBranch_width) / 2 * kInstructionSize;
+ case CondBranchRangeType:
+ return -int32_t(1 << ImmCondBranch_width) / 2 * kInstructionSize;
+ case UncondBranchRangeType:
+ return -int32_t(1 << ImmUncondBranch_width) / 2 * kInstructionSize;
+ default:
+ VIXL_UNREACHABLE();
+ return 0;
+ }
+}
+
+const Instruction* Instruction::ImmPCOffsetTarget() const {
+ const Instruction * base = this;
+ ptrdiff_t offset;
+ if (IsPCRelAddressing()) {
+ // ADR and ADRP.
+ offset = ImmPCRel();
+ if (Mask(PCRelAddressingMask) == ADRP) {
+ base = AlignDown(base, kPageSize);
+ offset *= kPageSize;
+ } else {
+ VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
+ }
+ } else {
+ // All PC-relative branches.
+ VIXL_ASSERT(BranchType() != UnknownBranchType);
+ // Relative branch offsets are instruction-size-aligned.
+ offset = ImmBranch() << kInstructionSizeLog2;
+ }
+ return base + offset;
+}
+
+
+int Instruction::ImmBranch() const {
+ switch (BranchType()) {
+ case CondBranchType: return ImmCondBranch();
+ case UncondBranchType: return ImmUncondBranch();
+ case CompareBranchType: return ImmCmpBranch();
+ case TestBranchType: return ImmTestBranch();
+ default: VIXL_UNREACHABLE();
+ }
+ return 0;
+}
+
+
+void Instruction::SetImmPCOffsetTarget(const Instruction* target) {
+ if (IsPCRelAddressing()) {
+ SetPCRelImmTarget(target);
+ } else {
+ SetBranchImmTarget(target);
+ }
+}
+
+
+void Instruction::SetPCRelImmTarget(const Instruction* target) {
+ ptrdiff_t imm21;
+ if ((Mask(PCRelAddressingMask) == ADR)) {
+ imm21 = target - this;
+ } else {
+ VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP);
+ uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize;
+ uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize;
+ imm21 = target_page - this_page;
+ }
+ Instr imm = Assembler::ImmPCRelAddress(static_cast<int32_t>(imm21));
+
+ SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
+}
+
+
+void Instruction::SetBranchImmTarget(const Instruction* target) {
+ VIXL_ASSERT(((target - this) & 3) == 0);
+ Instr branch_imm = 0;
+ uint32_t imm_mask = 0;
+ int offset = static_cast<int>((target - this) >> kInstructionSizeLog2);
+ switch (BranchType()) {
+ case CondBranchType: {
+ branch_imm = Assembler::ImmCondBranch(offset);
+ imm_mask = ImmCondBranch_mask;
+ break;
+ }
+ case UncondBranchType: {
+ branch_imm = Assembler::ImmUncondBranch(offset);
+ imm_mask = ImmUncondBranch_mask;
+ break;
+ }
+ case CompareBranchType: {
+ branch_imm = Assembler::ImmCmpBranch(offset);
+ imm_mask = ImmCmpBranch_mask;
+ break;
+ }
+ case TestBranchType: {
+ branch_imm = Assembler::ImmTestBranch(offset);
+ imm_mask = ImmTestBranch_mask;
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ SetInstructionBits(Mask(~imm_mask) | branch_imm);
+}
+
+
+void Instruction::SetImmLLiteral(const Instruction* source) {
+ VIXL_ASSERT(IsWordAligned(source));
+ ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2;
+ Instr imm = Assembler::ImmLLiteral(static_cast<int>(offset));
+ Instr mask = ImmLLiteral_mask;
+
+ SetInstructionBits(Mask(~mask) | imm);
+}
+
+
+VectorFormat VectorFormatHalfWidth(const VectorFormat vform) {
+ VIXL_ASSERT(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D ||
+ vform == kFormatH || vform == kFormatS || vform == kFormatD);
+ switch (vform) {
+ case kFormat8H: return kFormat8B;
+ case kFormat4S: return kFormat4H;
+ case kFormat2D: return kFormat2S;
+ case kFormatH: return kFormatB;
+ case kFormatS: return kFormatH;
+ case kFormatD: return kFormatS;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+
+VectorFormat VectorFormatDoubleWidth(const VectorFormat vform) {
+ VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S ||
+ vform == kFormatB || vform == kFormatH || vform == kFormatS);
+ switch (vform) {
+ case kFormat8B: return kFormat8H;
+ case kFormat4H: return kFormat4S;
+ case kFormat2S: return kFormat2D;
+ case kFormatB: return kFormatH;
+ case kFormatH: return kFormatS;
+ case kFormatS: return kFormatD;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+
+VectorFormat VectorFormatFillQ(const VectorFormat vform) {
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B: return kFormat16B;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H: return kFormat8H;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S: return kFormat4S;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D: return kFormat2D;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+VectorFormat VectorFormatHalfWidthDoubleLanes(const VectorFormat vform) {
+ switch (vform) {
+ case kFormat4H: return kFormat8B;
+ case kFormat8H: return kFormat16B;
+ case kFormat2S: return kFormat4H;
+ case kFormat4S: return kFormat8H;
+ case kFormat1D: return kFormat2S;
+ case kFormat2D: return kFormat4S;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+VectorFormat VectorFormatDoubleLanes(const VectorFormat vform) {
+ VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
+ switch (vform) {
+ case kFormat8B: return kFormat16B;
+ case kFormat4H: return kFormat8H;
+ case kFormat2S: return kFormat4S;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+
+VectorFormat VectorFormatHalfLanes(const VectorFormat vform) {
+ VIXL_ASSERT(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
+ switch (vform) {
+ case kFormat16B: return kFormat8B;
+ case kFormat8H: return kFormat4H;
+ case kFormat4S: return kFormat2S;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+
+VectorFormat ScalarFormatFromLaneSize(int laneSize) {
+ switch (laneSize) {
+ case 8: return kFormatB;
+ case 16: return kFormatH;
+ case 32: return kFormatS;
+ case 64: return kFormatD;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+
+unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
+ VIXL_ASSERT(vform != kFormatUndefined);
+ switch (vform) {
+ case kFormatB: return kBRegSize;
+ case kFormatH: return kHRegSize;
+ case kFormatS: return kSRegSize;
+ case kFormatD: return kDRegSize;
+ case kFormat8B:
+ case kFormat4H:
+ case kFormat2S:
+ case kFormat1D: return kDRegSize;
+ default: return kQRegSize;
+ }
+}
+
+
+unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
+ return RegisterSizeInBitsFromFormat(vform) / 8;
+}
+
+
+unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
+ VIXL_ASSERT(vform != kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B: return 8;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H: return 16;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S: return 32;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D: return 64;
+ default: VIXL_UNREACHABLE(); return 0;
+ }
+}
+
+
+int LaneSizeInBytesFromFormat(VectorFormat vform) {
+ return LaneSizeInBitsFromFormat(vform) / 8;
+}
+
+
+int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
+ VIXL_ASSERT(vform != kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B: return 0;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H: return 1;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S: return 2;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D: return 3;
+ default: VIXL_UNREACHABLE(); return 0;
+ }
+}
+
+
+int LaneCountFromFormat(VectorFormat vform) {
+ VIXL_ASSERT(vform != kFormatUndefined);
+ switch (vform) {
+ case kFormat16B: return 16;
+ case kFormat8B:
+ case kFormat8H: return 8;
+ case kFormat4H:
+ case kFormat4S: return 4;
+ case kFormat2S:
+ case kFormat2D: return 2;
+ case kFormat1D:
+ case kFormatB:
+ case kFormatH:
+ case kFormatS:
+ case kFormatD: return 1;
+ default: VIXL_UNREACHABLE(); return 0;
+ }
+}
+
+
+int MaxLaneCountFromFormat(VectorFormat vform) {
+ VIXL_ASSERT(vform != kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B: return 16;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H: return 8;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S: return 4;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D: return 2;
+ default: VIXL_UNREACHABLE(); return 0;
+ }
+}
+
+
+// Does 'vform' indicate a vector format or a scalar format?
+bool IsVectorFormat(VectorFormat vform) {
+ VIXL_ASSERT(vform != kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormatH:
+ case kFormatS:
+ case kFormatD: return false;
+ default: return true;
+ }
+}
+
+
+int64_t MaxIntFromFormat(VectorFormat vform) {
+ return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
+}
+
+
+int64_t MinIntFromFormat(VectorFormat vform) {
+ return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform));
+}
+
+
+uint64_t MaxUintFromFormat(VectorFormat vform) {
+ return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
+}
+} // namespace vixl
+
diff --git a/js/src/jit/arm64/vixl/Instructions-vixl.h b/js/src/jit/arm64/vixl/Instructions-vixl.h
new file mode 100644
index 000000000..d55b6d75d
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Instructions-vixl.h
@@ -0,0 +1,830 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_INSTRUCTIONS_A64_H_
+#define VIXL_A64_INSTRUCTIONS_A64_H_
+
+#include "jit/arm64/vixl/Constants-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+namespace vixl {
+// ISA constants. --------------------------------------------------------------
+
+typedef uint32_t Instr;
+const unsigned kInstructionSize = 4;
+const unsigned kInstructionSizeLog2 = 2;
+const unsigned kLiteralEntrySize = 4;
+const unsigned kLiteralEntrySizeLog2 = 2;
+const unsigned kMaxLoadLiteralRange = 1 * MBytes;
+
+// This is the nominal page size (as used by the adrp instruction); the actual
+// size of the memory pages allocated by the kernel is likely to differ.
+const unsigned kPageSize = 4 * KBytes;
+const unsigned kPageSizeLog2 = 12;
+
+const unsigned kBRegSize = 8;
+const unsigned kBRegSizeLog2 = 3;
+const unsigned kBRegSizeInBytes = kBRegSize / 8;
+const unsigned kBRegSizeInBytesLog2 = kBRegSizeLog2 - 3;
+const unsigned kHRegSize = 16;
+const unsigned kHRegSizeLog2 = 4;
+const unsigned kHRegSizeInBytes = kHRegSize / 8;
+const unsigned kHRegSizeInBytesLog2 = kHRegSizeLog2 - 3;
+const unsigned kWRegSize = 32;
+const unsigned kWRegSizeLog2 = 5;
+const unsigned kWRegSizeInBytes = kWRegSize / 8;
+const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
+const unsigned kXRegSize = 64;
+const unsigned kXRegSizeLog2 = 6;
+const unsigned kXRegSizeInBytes = kXRegSize / 8;
+const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
+const unsigned kSRegSize = 32;
+const unsigned kSRegSizeLog2 = 5;
+const unsigned kSRegSizeInBytes = kSRegSize / 8;
+const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
+const unsigned kDRegSize = 64;
+const unsigned kDRegSizeLog2 = 6;
+const unsigned kDRegSizeInBytes = kDRegSize / 8;
+const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
+const unsigned kQRegSize = 128;
+const unsigned kQRegSizeLog2 = 7;
+const unsigned kQRegSizeInBytes = kQRegSize / 8;
+const unsigned kQRegSizeInBytesLog2 = kQRegSizeLog2 - 3;
+const uint64_t kWRegMask = UINT64_C(0xffffffff);
+const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff);
+const uint64_t kSRegMask = UINT64_C(0xffffffff);
+const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff);
+const uint64_t kSSignMask = UINT64_C(0x80000000);
+const uint64_t kDSignMask = UINT64_C(0x8000000000000000);
+const uint64_t kWSignMask = UINT64_C(0x80000000);
+const uint64_t kXSignMask = UINT64_C(0x8000000000000000);
+const uint64_t kByteMask = UINT64_C(0xff);
+const uint64_t kHalfWordMask = UINT64_C(0xffff);
+const uint64_t kWordMask = UINT64_C(0xffffffff);
+const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff);
+const uint64_t kWMaxUInt = UINT64_C(0xffffffff);
+const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff);
+const int64_t kXMinInt = INT64_C(0x8000000000000000);
+const int32_t kWMaxInt = INT32_C(0x7fffffff);
+const int32_t kWMinInt = INT32_C(0x80000000);
+const unsigned kLinkRegCode = 30;
+const unsigned kZeroRegCode = 31;
+const unsigned kSPRegInternalCode = 63;
+const unsigned kRegCodeMask = 0x1f;
+
+const unsigned kAddressTagOffset = 56;
+const unsigned kAddressTagWidth = 8;
+const uint64_t kAddressTagMask =
+ ((UINT64_C(1) << kAddressTagWidth) - 1) << kAddressTagOffset;
+VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000));
+
+// AArch64 floating-point specifics. These match IEEE-754.
+const unsigned kDoubleMantissaBits = 52;
+const unsigned kDoubleExponentBits = 11;
+const unsigned kFloatMantissaBits = 23;
+const unsigned kFloatExponentBits = 8;
+const unsigned kFloat16MantissaBits = 10;
+const unsigned kFloat16ExponentBits = 5;
+
+// Floating-point infinity values.
+extern const float16 kFP16PositiveInfinity;
+extern const float16 kFP16NegativeInfinity;
+extern const float kFP32PositiveInfinity;
+extern const float kFP32NegativeInfinity;
+extern const double kFP64PositiveInfinity;
+extern const double kFP64NegativeInfinity;
+
+// The default NaN values (for FPCR.DN=1).
+extern const float16 kFP16DefaultNaN;
+extern const float kFP32DefaultNaN;
+extern const double kFP64DefaultNaN;
+
+unsigned CalcLSDataSize(LoadStoreOp op);
+unsigned CalcLSPairDataSize(LoadStorePairOp op);
+
+enum ImmBranchType {
+ UnknownBranchType = 0,
+ CondBranchType = 1,
+ UncondBranchType = 2,
+ CompareBranchType = 3,
+ TestBranchType = 4
+};
+
+// The classes of immediate branch ranges, in order of increasing range.
+// Note that CondBranchType and CompareBranchType have the same range.
+enum ImmBranchRangeType {
+ TestBranchRangeType, // tbz/tbnz: imm14 = +/- 32KB.
+ CondBranchRangeType, // b.cond/cbz/cbnz: imm19 = +/- 1MB.
+ UncondBranchRangeType, // b/bl: imm26 = +/- 128MB.
+ UnknownBranchRangeType,
+
+ // Number of 'short-range' branch range types.
+ // We don't consider unconditional branches 'short-range'.
+ NumShortBranchRangeTypes = UncondBranchRangeType
+};
+
+enum AddrMode {
+ Offset,
+ PreIndex,
+ PostIndex
+};
+
+enum FPRounding {
+ // The first four values are encodable directly by FPCR<RMode>.
+ FPTieEven = 0x0,
+ FPPositiveInfinity = 0x1,
+ FPNegativeInfinity = 0x2,
+ FPZero = 0x3,
+
+ // The final rounding modes are only available when explicitly specified by
+ // the instruction (such as with fcvta). It cannot be set in FPCR.
+ FPTieAway,
+ FPRoundOdd
+};
+
+enum Reg31Mode {
+ Reg31IsStackPointer,
+ Reg31IsZeroRegister
+};
+
+// Instructions. ---------------------------------------------------------------
+
+class Instruction {
+ public:
+ Instr InstructionBits() const {
+ return *(reinterpret_cast<const Instr*>(this));
+ }
+
+ void SetInstructionBits(Instr new_instr) {
+ *(reinterpret_cast<Instr*>(this)) = new_instr;
+ }
+
+ int Bit(int pos) const {
+ return (InstructionBits() >> pos) & 1;
+ }
+
+ uint32_t Bits(int msb, int lsb) const {
+ return unsigned_bitextract_32(msb, lsb, InstructionBits());
+ }
+
+ int32_t SignedBits(int msb, int lsb) const {
+ int32_t bits = *(reinterpret_cast<const int32_t*>(this));
+ return signed_bitextract_32(msb, lsb, bits);
+ }
+
+ Instr Mask(uint32_t mask) const {
+ return InstructionBits() & mask;
+ }
+
+ #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
+ int32_t Name() const { return Func(HighBit, LowBit); }
+ INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
+ #undef DEFINE_GETTER
+
+ #define DEFINE_SETTER(Name, HighBit, LowBit, Func) \
+ inline void Set##Name(unsigned n) { SetBits32(HighBit, LowBit, n); }
+ INSTRUCTION_FIELDS_LIST(DEFINE_SETTER)
+ #undef DEFINE_SETTER
+
+ // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
+ // formed from ImmPCRelLo and ImmPCRelHi.
+ int ImmPCRel() const {
+ int offset =
+ static_cast<int>((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
+ int width = ImmPCRelLo_width + ImmPCRelHi_width;
+ return signed_bitextract_32(width - 1, 0, offset);
+ }
+
+ uint64_t ImmLogical() const;
+ unsigned ImmNEONabcdefgh() const;
+ float ImmFP32() const;
+ double ImmFP64() const;
+ float ImmNEONFP32() const;
+ double ImmNEONFP64() const;
+
+ unsigned SizeLS() const {
+ return CalcLSDataSize(static_cast<LoadStoreOp>(Mask(LoadStoreMask)));
+ }
+
+ unsigned SizeLSPair() const {
+ return CalcLSPairDataSize(
+ static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
+ }
+
+ int NEONLSIndex(int access_size_shift) const {
+ int64_t q = NEONQ();
+ int64_t s = NEONS();
+ int64_t size = NEONLSSize();
+ int64_t index = (q << 3) | (s << 2) | size;
+ return static_cast<int>(index >> access_size_shift);
+ }
+
+ // Helpers.
+ bool IsCondBranchImm() const {
+ return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
+ }
+
+ bool IsUncondBranchImm() const {
+ return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
+ }
+
+ bool IsCompareBranch() const {
+ return Mask(CompareBranchFMask) == CompareBranchFixed;
+ }
+
+ bool IsTestBranch() const {
+ return Mask(TestBranchFMask) == TestBranchFixed;
+ }
+
+ bool IsImmBranch() const {
+ return BranchType() != UnknownBranchType;
+ }
+
+ bool IsPCRelAddressing() const {
+ return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
+ }
+
+ bool IsLogicalImmediate() const {
+ return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
+ }
+
+ bool IsAddSubImmediate() const {
+ return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
+ }
+
+ bool IsAddSubExtended() const {
+ return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
+ }
+
+ bool IsLoadOrStore() const {
+ return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
+ }
+
+ bool IsLoad() const;
+ bool IsStore() const;
+
+ bool IsLoadLiteral() const {
+ // This includes PRFM_lit.
+ return Mask(LoadLiteralFMask) == LoadLiteralFixed;
+ }
+
+ bool IsMovn() const {
+ return (Mask(MoveWideImmediateMask) == MOVN_x) ||
+ (Mask(MoveWideImmediateMask) == MOVN_w);
+ }
+
+ // Mozilla modifications.
+ bool IsUncondB() const;
+ bool IsCondB() const;
+ bool IsBL() const;
+ bool IsBR() const;
+ bool IsBLR() const;
+ bool IsTBZ() const;
+ bool IsTBNZ() const;
+ bool IsCBZ() const;
+ bool IsCBNZ() const;
+ bool IsLDR() const;
+ bool IsNOP() const;
+ bool IsADR() const;
+ bool IsADRP() const;
+ bool IsBranchLinkImm() const;
+ bool IsTargetReachable(Instruction* target) const;
+ ptrdiff_t ImmPCRawOffset() const;
+ void SetImmPCRawOffset(ptrdiff_t offset);
+ void SetBits32(int msb, int lsb, unsigned value);
+
+ // Is this a stack pointer synchronization instruction as inserted by
+ // MacroAssembler::syncStackPtr()?
+ bool IsStackPtrSync() const;
+
+ static int ImmBranchRangeBitwidth(ImmBranchType branch_type);
+ static int32_t ImmBranchForwardRange(ImmBranchType branch_type);
+
+ // Check if offset can be encoded as a RAW offset in a branch_type
+ // instruction. The offset must be encodeable directly as the immediate field
+ // in the instruction, it is not scaled by kInstructionSize first.
+ static bool IsValidImmPCOffset(ImmBranchType branch_type, int64_t offset);
+
+ // Get the range type corresponding to a branch type.
+ static ImmBranchRangeType ImmBranchTypeToRange(ImmBranchType);
+
+ // Get the maximum realizable forward PC offset (in bytes) for an immediate
+ // branch of the given range type.
+ // This is the largest positive multiple of kInstructionSize, offset, such
+ // that:
+ //
+ // IsValidImmPCOffset(xxx, offset / kInstructionSize)
+ //
+ // returns true for the same branch type.
+ static int32_t ImmBranchMaxForwardOffset(ImmBranchRangeType range_type);
+
+ // Get the minimuum realizable backward PC offset (in bytes) for an immediate
+ // branch of the given range type.
+ // This is the smallest (i.e., largest in magnitude) negative multiple of
+ // kInstructionSize, offset, such that:
+ //
+ // IsValidImmPCOffset(xxx, offset / kInstructionSize)
+ //
+ // returns true for the same branch type.
+ static int32_t ImmBranchMinBackwardOffset(ImmBranchRangeType range_type);
+
+ // Indicate whether Rd can be the stack pointer or the zero register. This
+ // does not check that the instruction actually has an Rd field.
+ Reg31Mode RdMode() const {
+ // The following instructions use sp or wsp as Rd:
+ // Add/sub (immediate) when not setting the flags.
+ // Add/sub (extended) when not setting the flags.
+ // Logical (immediate) when not setting the flags.
+ // Otherwise, r31 is the zero register.
+ if (IsAddSubImmediate() || IsAddSubExtended()) {
+ if (Mask(AddSubSetFlagsBit)) {
+ return Reg31IsZeroRegister;
+ } else {
+ return Reg31IsStackPointer;
+ }
+ }
+ if (IsLogicalImmediate()) {
+ // Of the logical (immediate) instructions, only ANDS (and its aliases)
+ // can set the flags. The others can all write into sp.
+ // Note that some logical operations are not available to
+ // immediate-operand instructions, so we have to combine two masks here.
+ if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
+ return Reg31IsZeroRegister;
+ } else {
+ return Reg31IsStackPointer;
+ }
+ }
+ return Reg31IsZeroRegister;
+ }
+
+ // Indicate whether Rn can be the stack pointer or the zero register. This
+ // does not check that the instruction actually has an Rn field.
+ Reg31Mode RnMode() const {
+ // The following instructions use sp or wsp as Rn:
+ // All loads and stores.
+ // Add/sub (immediate).
+ // Add/sub (extended).
+ // Otherwise, r31 is the zero register.
+ if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
+ return Reg31IsStackPointer;
+ }
+ return Reg31IsZeroRegister;
+ }
+
+ ImmBranchType BranchType() const {
+ if (IsCondBranchImm()) {
+ return CondBranchType;
+ } else if (IsUncondBranchImm()) {
+ return UncondBranchType;
+ } else if (IsCompareBranch()) {
+ return CompareBranchType;
+ } else if (IsTestBranch()) {
+ return TestBranchType;
+ } else {
+ return UnknownBranchType;
+ }
+ }
+
+ // Find the target of this instruction. 'this' may be a branch or a
+ // PC-relative addressing instruction.
+ const Instruction* ImmPCOffsetTarget() const;
+
+ // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
+ // a PC-relative addressing instruction.
+ void SetImmPCOffsetTarget(const Instruction* target);
+ // Patch a literal load instruction to load from 'source'.
+ void SetImmLLiteral(const Instruction* source);
+
+ // The range of a load literal instruction, expressed as 'instr +- range'.
+ // The range is actually the 'positive' range; the branch instruction can
+ // target [instr - range - kInstructionSize, instr + range].
+ static const int kLoadLiteralImmBitwidth = 19;
+ static const int kLoadLiteralRange =
+ (1 << kLoadLiteralImmBitwidth) / 2 - kInstructionSize;
+
+ // Calculate the address of a literal referred to by a load-literal
+ // instruction, and return it as the specified type.
+ //
+ // The literal itself is safely mutable only if the backing buffer is safely
+ // mutable.
+ template <typename T>
+ T LiteralAddress() const {
+ uint64_t base_raw = reinterpret_cast<uint64_t>(this);
+ int64_t offset = ImmLLiteral() << kLiteralEntrySizeLog2;
+ uint64_t address_raw = base_raw + offset;
+
+ // Cast the address using a C-style cast. A reinterpret_cast would be
+ // appropriate, but it can't cast one integral type to another.
+ T address = (T)(address_raw);
+
+ // Assert that the address can be represented by the specified type.
+ VIXL_ASSERT((uint64_t)(address) == address_raw);
+
+ return address;
+ }
+
+ uint32_t Literal32() const {
+ uint32_t literal;
+ memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
+ return literal;
+ }
+
+ uint64_t Literal64() const {
+ uint64_t literal;
+ memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
+ return literal;
+ }
+
+ float LiteralFP32() const {
+ return rawbits_to_float(Literal32());
+ }
+
+ double LiteralFP64() const {
+ return rawbits_to_double(Literal64());
+ }
+
+ const Instruction* NextInstruction() const {
+ return this + kInstructionSize;
+ }
+
+ // Skip any constant pools with artificial guards at this point.
+ // Return either |this| or the first instruction after the pool.
+ const Instruction* skipPool() const;
+
+ const Instruction* InstructionAtOffset(int64_t offset) const {
+ VIXL_ASSERT(IsWordAligned(this + offset));
+ return this + offset;
+ }
+
+ template<typename T> static Instruction* Cast(T src) {
+ return reinterpret_cast<Instruction*>(src);
+ }
+
+ template<typename T> static const Instruction* CastConst(T src) {
+ return reinterpret_cast<const Instruction*>(src);
+ }
+
+ private:
+ int ImmBranch() const;
+
+ static float Imm8ToFP32(uint32_t imm8);
+ static double Imm8ToFP64(uint32_t imm8);
+
+ void SetPCRelImmTarget(const Instruction* target);
+ void SetBranchImmTarget(const Instruction* target);
+};
+
+
+// Functions for handling NEON vector format information.
+enum VectorFormat {
+ kFormatUndefined = 0xffffffff,
+ kFormat8B = NEON_8B,
+ kFormat16B = NEON_16B,
+ kFormat4H = NEON_4H,
+ kFormat8H = NEON_8H,
+ kFormat2S = NEON_2S,
+ kFormat4S = NEON_4S,
+ kFormat1D = NEON_1D,
+ kFormat2D = NEON_2D,
+
+ // Scalar formats. We add the scalar bit to distinguish between scalar and
+ // vector enumerations; the bit is always set in the encoding of scalar ops
+ // and always clear for vector ops. Although kFormatD and kFormat1D appear
+ // to be the same, their meaning is subtly different. The first is a scalar
+ // operation, the second a vector operation that only affects one lane.
+ kFormatB = NEON_B | NEONScalar,
+ kFormatH = NEON_H | NEONScalar,
+ kFormatS = NEON_S | NEONScalar,
+ kFormatD = NEON_D | NEONScalar
+};
+
+VectorFormat VectorFormatHalfWidth(const VectorFormat vform);
+VectorFormat VectorFormatDoubleWidth(const VectorFormat vform);
+VectorFormat VectorFormatDoubleLanes(const VectorFormat vform);
+VectorFormat VectorFormatHalfLanes(const VectorFormat vform);
+VectorFormat ScalarFormatFromLaneSize(int lanesize);
+VectorFormat VectorFormatHalfWidthDoubleLanes(const VectorFormat vform);
+VectorFormat VectorFormatFillQ(const VectorFormat vform);
+unsigned RegisterSizeInBitsFromFormat(VectorFormat vform);
+unsigned RegisterSizeInBytesFromFormat(VectorFormat vform);
+// TODO: Make the return types of these functions consistent.
+unsigned LaneSizeInBitsFromFormat(VectorFormat vform);
+int LaneSizeInBytesFromFormat(VectorFormat vform);
+int LaneSizeInBytesLog2FromFormat(VectorFormat vform);
+int LaneCountFromFormat(VectorFormat vform);
+int MaxLaneCountFromFormat(VectorFormat vform);
+bool IsVectorFormat(VectorFormat vform);
+int64_t MaxIntFromFormat(VectorFormat vform);
+int64_t MinIntFromFormat(VectorFormat vform);
+uint64_t MaxUintFromFormat(VectorFormat vform);
+
+
+enum NEONFormat {
+ NF_UNDEF = 0,
+ NF_8B = 1,
+ NF_16B = 2,
+ NF_4H = 3,
+ NF_8H = 4,
+ NF_2S = 5,
+ NF_4S = 6,
+ NF_1D = 7,
+ NF_2D = 8,
+ NF_B = 9,
+ NF_H = 10,
+ NF_S = 11,
+ NF_D = 12
+};
+
+static const unsigned kNEONFormatMaxBits = 6;
+
+struct NEONFormatMap {
+ // The bit positions in the instruction to consider.
+ uint8_t bits[kNEONFormatMaxBits];
+
+ // Mapping from concatenated bits to format.
+ NEONFormat map[1 << kNEONFormatMaxBits];
+};
+
+class NEONFormatDecoder {
+ public:
+ enum SubstitutionMode {
+ kPlaceholder,
+ kFormat
+ };
+
+ // Construct a format decoder with increasingly specific format maps for each
+ // subsitution. If no format map is specified, the default is the integer
+ // format map.
+ explicit NEONFormatDecoder(const Instruction* instr) {
+ instrbits_ = instr->InstructionBits();
+ SetFormatMaps(IntegerFormatMap());
+ }
+ NEONFormatDecoder(const Instruction* instr,
+ const NEONFormatMap* format) {
+ instrbits_ = instr->InstructionBits();
+ SetFormatMaps(format);
+ }
+ NEONFormatDecoder(const Instruction* instr,
+ const NEONFormatMap* format0,
+ const NEONFormatMap* format1) {
+ instrbits_ = instr->InstructionBits();
+ SetFormatMaps(format0, format1);
+ }
+ NEONFormatDecoder(const Instruction* instr,
+ const NEONFormatMap* format0,
+ const NEONFormatMap* format1,
+ const NEONFormatMap* format2) {
+ instrbits_ = instr->InstructionBits();
+ SetFormatMaps(format0, format1, format2);
+ }
+
+ // Set the format mapping for all or individual substitutions.
+ void SetFormatMaps(const NEONFormatMap* format0,
+ const NEONFormatMap* format1 = NULL,
+ const NEONFormatMap* format2 = NULL) {
+ VIXL_ASSERT(format0 != NULL);
+ formats_[0] = format0;
+ formats_[1] = (format1 == NULL) ? formats_[0] : format1;
+ formats_[2] = (format2 == NULL) ? formats_[1] : format2;
+ }
+ void SetFormatMap(unsigned index, const NEONFormatMap* format) {
+ VIXL_ASSERT(index <= (sizeof(formats_) / sizeof(formats_[0])));
+ VIXL_ASSERT(format != NULL);
+ formats_[index] = format;
+ }
+
+ // Substitute %s in the input string with the placeholder string for each
+ // register, ie. "'B", "'H", etc.
+ const char* SubstitutePlaceholders(const char* string) {
+ return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder);
+ }
+
+ // Substitute %s in the input string with a new string based on the
+ // substitution mode.
+ const char* Substitute(const char* string,
+ SubstitutionMode mode0 = kFormat,
+ SubstitutionMode mode1 = kFormat,
+ SubstitutionMode mode2 = kFormat) {
+ snprintf(form_buffer_, sizeof(form_buffer_), string,
+ GetSubstitute(0, mode0),
+ GetSubstitute(1, mode1),
+ GetSubstitute(2, mode2));
+ return form_buffer_;
+ }
+
+ // Append a "2" to a mnemonic string based of the state of the Q bit.
+ const char* Mnemonic(const char* mnemonic) {
+ if ((instrbits_ & NEON_Q) != 0) {
+ snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
+ return mne_buffer_;
+ }
+ return mnemonic;
+ }
+
+ VectorFormat GetVectorFormat(int format_index = 0) {
+ return GetVectorFormat(formats_[format_index]);
+ }
+
+ VectorFormat GetVectorFormat(const NEONFormatMap* format_map) {
+ static const VectorFormat vform[] = {
+ kFormatUndefined,
+ kFormat8B, kFormat16B, kFormat4H, kFormat8H,
+ kFormat2S, kFormat4S, kFormat1D, kFormat2D,
+ kFormatB, kFormatH, kFormatS, kFormatD
+ };
+ VIXL_ASSERT(GetNEONFormat(format_map) < (sizeof(vform) / sizeof(vform[0])));
+ return vform[GetNEONFormat(format_map)];
+ }
+
+ // Built in mappings for common cases.
+
+ // The integer format map uses three bits (Q, size<1:0>) to encode the
+ // "standard" set of NEON integer vector formats.
+ static const NEONFormatMap* IntegerFormatMap() {
+ static const NEONFormatMap map = {
+ {23, 22, 30},
+ {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}
+ };
+ return &map;
+ }
+
+ // The long integer format map uses two bits (size<1:0>) to encode the
+ // long set of NEON integer vector formats. These are used in narrow, wide
+ // and long operations.
+ static const NEONFormatMap* LongIntegerFormatMap() {
+ static const NEONFormatMap map = {
+ {23, 22}, {NF_8H, NF_4S, NF_2D}
+ };
+ return &map;
+ }
+
+ // The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
+ // formats: NF_2S, NF_4S, NF_2D.
+ static const NEONFormatMap* FPFormatMap() {
+ // The FP format map assumes two bits (Q, size<0>) are used to encode the
+ // NEON FP vector formats: NF_2S, NF_4S, NF_2D.
+ static const NEONFormatMap map = {
+ {22, 30}, {NF_2S, NF_4S, NF_UNDEF, NF_2D}
+ };
+ return &map;
+ }
+
+ // The load/store format map uses three bits (Q, 11, 10) to encode the
+ // set of NEON vector formats.
+ static const NEONFormatMap* LoadStoreFormatMap() {
+ static const NEONFormatMap map = {
+ {11, 10, 30},
+ {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}
+ };
+ return &map;
+ }
+
+ // The logical format map uses one bit (Q) to encode the NEON vector format:
+ // NF_8B, NF_16B.
+ static const NEONFormatMap* LogicalFormatMap() {
+ static const NEONFormatMap map = {
+ {30}, {NF_8B, NF_16B}
+ };
+ return &map;
+ }
+
+ // The triangular format map uses between two and five bits to encode the NEON
+ // vector format:
+ // xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
+ // x1000->2S, x1001->4S, 10001->2D, all others undefined.
+ static const NEONFormatMap* TriangularFormatMap() {
+ static const NEONFormatMap map = {
+ {19, 18, 17, 16, 30},
+ {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_2S,
+ NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_UNDEF, NF_2D,
+ NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_2S, NF_4S, NF_8B, NF_16B,
+ NF_4H, NF_8H, NF_8B, NF_16B}
+ };
+ return &map;
+ }
+
+ // The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
+ // formats: NF_B, NF_H, NF_S, NF_D.
+ static const NEONFormatMap* ScalarFormatMap() {
+ static const NEONFormatMap map = {
+ {23, 22}, {NF_B, NF_H, NF_S, NF_D}
+ };
+ return &map;
+ }
+
+ // The long scalar format map uses two bits (size<1:0>) to encode the longer
+ // NEON scalar formats: NF_H, NF_S, NF_D.
+ static const NEONFormatMap* LongScalarFormatMap() {
+ static const NEONFormatMap map = {
+ {23, 22}, {NF_H, NF_S, NF_D}
+ };
+ return &map;
+ }
+
+ // The FP scalar format map assumes one bit (size<0>) is used to encode the
+ // NEON FP scalar formats: NF_S, NF_D.
+ static const NEONFormatMap* FPScalarFormatMap() {
+ static const NEONFormatMap map = {
+ {22}, {NF_S, NF_D}
+ };
+ return &map;
+ }
+
+ // The triangular scalar format map uses between one and four bits to encode
+ // the NEON FP scalar formats:
+ // xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
+ static const NEONFormatMap* TriangularScalarFormatMap() {
+ static const NEONFormatMap map = {
+ {19, 18, 17, 16},
+ {NF_UNDEF, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B,
+ NF_D, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B}
+ };
+ return &map;
+ }
+
+ private:
+ // Get a pointer to a string that represents the format or placeholder for
+ // the specified substitution index, based on the format map and instruction.
+ const char* GetSubstitute(int index, SubstitutionMode mode) {
+ if (mode == kFormat) {
+ return NEONFormatAsString(GetNEONFormat(formats_[index]));
+ }
+ VIXL_ASSERT(mode == kPlaceholder);
+ return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index]));
+ }
+
+ // Get the NEONFormat enumerated value for bits obtained from the
+ // instruction based on the specified format mapping.
+ NEONFormat GetNEONFormat(const NEONFormatMap* format_map) {
+ return format_map->map[PickBits(format_map->bits)];
+ }
+
+ // Convert a NEONFormat into a string.
+ static const char* NEONFormatAsString(NEONFormat format) {
+ static const char* formats[] = {
+ "undefined",
+ "8b", "16b", "4h", "8h", "2s", "4s", "1d", "2d",
+ "b", "h", "s", "d"
+ };
+ VIXL_ASSERT(format < (sizeof(formats) / sizeof(formats[0])));
+ return formats[format];
+ }
+
+ // Convert a NEONFormat into a register placeholder string.
+ static const char* NEONFormatAsPlaceholder(NEONFormat format) {
+ VIXL_ASSERT((format == NF_B) || (format == NF_H) ||
+ (format == NF_S) || (format == NF_D) ||
+ (format == NF_UNDEF));
+ static const char* formats[] = {
+ "undefined",
+ "undefined", "undefined", "undefined", "undefined",
+ "undefined", "undefined", "undefined", "undefined",
+ "'B", "'H", "'S", "'D"
+ };
+ return formats[format];
+ }
+
+ // Select bits from instrbits_ defined by the bits array, concatenate them,
+ // and return the value.
+ uint8_t PickBits(const uint8_t bits[]) {
+ uint8_t result = 0;
+ for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
+ if (bits[b] == 0) break;
+ result <<= 1;
+ result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
+ }
+ return result;
+ }
+
+ Instr instrbits_;
+ const NEONFormatMap* formats_[3];
+ char form_buffer_[64];
+ char mne_buffer_[16];
+};
+} // namespace vixl
+
+#endif // VIXL_A64_INSTRUCTIONS_A64_H_
diff --git a/js/src/jit/arm64/vixl/Instrument-vixl.cpp b/js/src/jit/arm64/vixl/Instrument-vixl.cpp
new file mode 100644
index 000000000..7653e0856
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Instrument-vixl.cpp
@@ -0,0 +1,844 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Instrument-vixl.h"
+
+namespace vixl {
+
+Counter::Counter(const char* name, CounterType type)
+ : count_(0), enabled_(false), type_(type) {
+ VIXL_ASSERT(name != NULL);
+ strncpy(name_, name, kCounterNameMaxLength);
+}
+
+
+void Counter::Enable() {
+ enabled_ = true;
+}
+
+
+void Counter::Disable() {
+ enabled_ = false;
+}
+
+
+bool Counter::IsEnabled() {
+ return enabled_;
+}
+
+
+void Counter::Increment() {
+ if (enabled_) {
+ count_++;
+ }
+}
+
+
+uint64_t Counter::count() {
+ uint64_t result = count_;
+ if (type_ == Gauge) {
+ // If the counter is a Gauge, reset the count after reading.
+ count_ = 0;
+ }
+ return result;
+}
+
+
+const char* Counter::name() {
+ return name_;
+}
+
+
+CounterType Counter::type() {
+ return type_;
+}
+
+
+struct CounterDescriptor {
+ const char* name;
+ CounterType type;
+};
+
+
+static const CounterDescriptor kCounterList[] = {
+ {"Instruction", Cumulative},
+
+ {"Move Immediate", Gauge},
+ {"Add/Sub DP", Gauge},
+ {"Logical DP", Gauge},
+ {"Other Int DP", Gauge},
+ {"FP DP", Gauge},
+
+ {"Conditional Select", Gauge},
+ {"Conditional Compare", Gauge},
+
+ {"Unconditional Branch", Gauge},
+ {"Compare and Branch", Gauge},
+ {"Test and Branch", Gauge},
+ {"Conditional Branch", Gauge},
+
+ {"Load Integer", Gauge},
+ {"Load FP", Gauge},
+ {"Load Pair", Gauge},
+ {"Load Literal", Gauge},
+
+ {"Store Integer", Gauge},
+ {"Store FP", Gauge},
+ {"Store Pair", Gauge},
+
+ {"PC Addressing", Gauge},
+ {"Other", Gauge},
+ {"NEON", Gauge},
+ {"Crypto", Gauge}
+};
+
+
+Instrument::Instrument(const char* datafile, uint64_t sample_period)
+ : output_stream_(stdout), sample_period_(sample_period) {
+
+ // Set up the output stream. If datafile is non-NULL, use that file. If it
+ // can't be opened, or datafile is NULL, use stdout.
+ if (datafile != NULL) {
+ output_stream_ = fopen(datafile, "w");
+ if (output_stream_ == NULL) {
+ printf("Can't open output file %s. Using stdout.\n", datafile);
+ output_stream_ = stdout;
+ }
+ }
+
+ static const int num_counters =
+ sizeof(kCounterList) / sizeof(CounterDescriptor);
+
+ // Dump an instrumentation description comment at the top of the file.
+ fprintf(output_stream_, "# counters=%d\n", num_counters);
+ fprintf(output_stream_, "# sample_period=%" PRIu64 "\n", sample_period_);
+
+ // Construct Counter objects from counter description array.
+ for (int i = 0; i < num_counters; i++) {
+ if (Counter* counter = js_new<Counter>(kCounterList[i].name, kCounterList[i].type))
+ counters_.append(counter);
+ }
+
+ DumpCounterNames();
+}
+
+
+Instrument::~Instrument() {
+ // Dump any remaining instruction data to the output file.
+ DumpCounters();
+
+ // Free all the counter objects.
+ for (auto counter : counters_) {
+ js_delete(counter);
+ }
+
+ if (output_stream_ != stdout) {
+ fclose(output_stream_);
+ }
+}
+
+
+void Instrument::Update() {
+ // Increment the instruction counter, and dump all counters if a sample period
+ // has elapsed.
+ static Counter* counter = GetCounter("Instruction");
+ VIXL_ASSERT(counter->type() == Cumulative);
+ counter->Increment();
+
+ if (counter->IsEnabled() && (counter->count() % sample_period_) == 0) {
+ DumpCounters();
+ }
+}
+
+
+void Instrument::DumpCounters() {
+ // Iterate through the counter objects, dumping their values to the output
+ // stream.
+ for (auto counter : counters_) {
+ fprintf(output_stream_, "%" PRIu64 ",", counter->count());
+ }
+ fprintf(output_stream_, "\n");
+ fflush(output_stream_);
+}
+
+
+void Instrument::DumpCounterNames() {
+ // Iterate through the counter objects, dumping the counter names to the
+ // output stream.
+ for (auto counter : counters_) {
+ fprintf(output_stream_, "%s,", counter->name());
+ }
+ fprintf(output_stream_, "\n");
+ fflush(output_stream_);
+}
+
+
+void Instrument::HandleInstrumentationEvent(unsigned event) {
+ switch (event) {
+ case InstrumentStateEnable: Enable(); break;
+ case InstrumentStateDisable: Disable(); break;
+ default: DumpEventMarker(event);
+ }
+}
+
+
+void Instrument::DumpEventMarker(unsigned marker) {
+ // Dumpan event marker to the output stream as a specially formatted comment
+ // line.
+ static Counter* counter = GetCounter("Instruction");
+
+ fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xff,
+ (marker >> 8) & 0xff, counter->count());
+}
+
+
+Counter* Instrument::GetCounter(const char* name) {
+ // Get a Counter object by name from the counter list.
+ for (auto counter : counters_) {
+ if (strcmp(counter->name(), name) == 0) {
+ return counter;
+ }
+ }
+
+ // A Counter by that name does not exist: print an error message to stderr
+ // and the output file, and exit.
+ static const char* error_message =
+ "# Error: Unknown counter \"%s\". Exiting.\n";
+ fprintf(stderr, error_message, name);
+ fprintf(output_stream_, error_message, name);
+ exit(1);
+}
+
+
+void Instrument::Enable() {
+ for (auto counter : counters_) {
+ counter->Enable();
+ }
+}
+
+
+void Instrument::Disable() {
+ for (auto counter : counters_) {
+ counter->Disable();
+ }
+}
+
+
+void Instrument::VisitPCRelAddressing(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("PC Addressing");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitLogicalImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Logical DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitMoveWideImmediate(const Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Move Immediate");
+
+ if (instr->IsMovn() && (instr->Rd() == kZeroRegCode)) {
+ unsigned imm = instr->ImmMoveWide();
+ HandleInstrumentationEvent(imm);
+ } else {
+ counter->Increment();
+ }
+}
+
+
+void Instrument::VisitBitfield(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitExtract(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnconditionalBranch(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Unconditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnconditionalBranchToRegister(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Unconditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitCompareBranch(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Compare and Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitTestBranch(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Test and Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalBranch(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Conditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitSystem(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::VisitException(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::InstrumentLoadStorePair(const Instruction* instr) {
+ static Counter* load_pair_counter = GetCounter("Load Pair");
+ static Counter* store_pair_counter = GetCounter("Store Pair");
+
+ if (instr->Mask(LoadStorePairLBit) != 0) {
+ load_pair_counter->Increment();
+ } else {
+ store_pair_counter->Increment();
+ }
+}
+
+
+void Instrument::VisitLoadStorePairPostIndex(const Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairOffset(const Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairPreIndex(const Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairNonTemporal(const Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStoreExclusive(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::VisitLoadLiteral(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Load Literal");
+ counter->Increment();
+}
+
+
+void Instrument::InstrumentLoadStore(const Instruction* instr) {
+ static Counter* load_int_counter = GetCounter("Load Integer");
+ static Counter* store_int_counter = GetCounter("Store Integer");
+ static Counter* load_fp_counter = GetCounter("Load FP");
+ static Counter* store_fp_counter = GetCounter("Store FP");
+
+ switch (instr->Mask(LoadStoreMask)) {
+ case STRB_w:
+ case STRH_w:
+ case STR_w:
+ VIXL_FALLTHROUGH();
+ case STR_x: store_int_counter->Increment(); break;
+ case STR_s:
+ VIXL_FALLTHROUGH();
+ case STR_d: store_fp_counter->Increment(); break;
+ case LDRB_w:
+ case LDRH_w:
+ case LDR_w:
+ case LDR_x:
+ case LDRSB_x:
+ case LDRSH_x:
+ case LDRSW_x:
+ case LDRSB_w:
+ VIXL_FALLTHROUGH();
+ case LDRSH_w: load_int_counter->Increment(); break;
+ case LDR_s:
+ VIXL_FALLTHROUGH();
+ case LDR_d: load_fp_counter->Increment(); break;
+ }
+}
+
+
+void Instrument::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStorePostIndex(const Instruction* instr) {
+ USE(instr);
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStorePreIndex(const Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStoreRegisterOffset(const Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLogicalShifted(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Logical DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubShifted(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubExtended(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubWithCarry(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalCompareRegister(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalCompareImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalSelect(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Conditional Select");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing1Source(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing2Source(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing3Source(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPCompare(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPConditionalCompare(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPConditionalSelect(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Conditional Select");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing1Source(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing2Source(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing3Source(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPIntegerConvert(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPFixedPointConvert(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitCrypto2RegSHA(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Crypto");
+ counter->Increment();
+}
+
+
+void Instrument::VisitCrypto3RegSHA(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Crypto");
+ counter->Increment();
+}
+
+
+void Instrument::VisitCryptoAES(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Crypto");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEON2RegMisc(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEON3Same(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEON3Different(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONAcrossLanes(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONByIndexedElement(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONCopy(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONExtract(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONLoadStoreMultiStruct(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONLoadStoreMultiStructPostIndex(
+ const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONLoadStoreSingleStruct(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONLoadStoreSingleStructPostIndex(
+ const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONModifiedImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalar2RegMisc(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalar3Diff(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalar3Same(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalarByIndexedElement(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalarCopy(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalarPairwise(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalarShiftImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONShiftImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONTable(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONPerm(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnallocated(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnimplemented(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Instrument-vixl.h b/js/src/jit/arm64/vixl/Instrument-vixl.h
new file mode 100644
index 000000000..68b04c60c
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Instrument-vixl.h
@@ -0,0 +1,110 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_INSTRUMENT_A64_H_
+#define VIXL_A64_INSTRUMENT_A64_H_
+
+#include "mozilla/Vector.h"
+
+#include "jsalloc.h"
+
+#include "jit/arm64/vixl/Constants-vixl.h"
+#include "jit/arm64/vixl/Decoder-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+namespace vixl {
+
+const int kCounterNameMaxLength = 256;
+const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22;
+
+
+enum InstrumentState {
+ InstrumentStateDisable = 0,
+ InstrumentStateEnable = 1
+};
+
+
+enum CounterType {
+ Gauge = 0, // Gauge counters reset themselves after reading.
+ Cumulative = 1 // Cumulative counters keep their value after reading.
+};
+
+
+class Counter {
+ public:
+ explicit Counter(const char* name, CounterType type = Gauge);
+
+ void Increment();
+ void Enable();
+ void Disable();
+ bool IsEnabled();
+ uint64_t count();
+ const char* name();
+ CounterType type();
+
+ private:
+ char name_[kCounterNameMaxLength];
+ uint64_t count_;
+ bool enabled_;
+ CounterType type_;
+};
+
+
+class Instrument: public DecoderVisitor {
+ public:
+ explicit Instrument(const char* datafile = NULL,
+ uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
+ ~Instrument();
+
+ void Enable();
+ void Disable();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) void Visit##A(const Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ private:
+ void Update();
+ void DumpCounters();
+ void DumpCounterNames();
+ void DumpEventMarker(unsigned marker);
+ void HandleInstrumentationEvent(unsigned event);
+ Counter* GetCounter(const char* name);
+
+ void InstrumentLoadStore(const Instruction* instr);
+ void InstrumentLoadStorePair(const Instruction* instr);
+
+ mozilla::Vector<Counter*, 8, js::SystemAllocPolicy> counters_;
+
+ FILE *output_stream_;
+ uint64_t sample_period_;
+};
+
+} // namespace vixl
+
+#endif // VIXL_A64_INSTRUMENT_A64_H_
diff --git a/js/src/jit/arm64/vixl/Logic-vixl.cpp b/js/src/jit/arm64/vixl/Logic-vixl.cpp
new file mode 100644
index 000000000..539e145ec
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Logic-vixl.cpp
@@ -0,0 +1,4878 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifdef JS_SIMULATOR_ARM64
+
+#include <cmath>
+
+#include "jit/arm64/vixl/Simulator-vixl.h"
+
+namespace vixl {
+
+template<> double Simulator::FPDefaultNaN<double>() {
+ return kFP64DefaultNaN;
+}
+
+
+template<> float Simulator::FPDefaultNaN<float>() {
+ return kFP32DefaultNaN;
+}
+
+// See FPRound for a description of this function.
+static inline double FPRoundToDouble(int64_t sign, int64_t exponent,
+ uint64_t mantissa, FPRounding round_mode) {
+ int64_t bits =
+ FPRound<int64_t, kDoubleExponentBits, kDoubleMantissaBits>(sign,
+ exponent,
+ mantissa,
+ round_mode);
+ return rawbits_to_double(bits);
+}
+
+
+// See FPRound for a description of this function.
+static inline float FPRoundToFloat(int64_t sign, int64_t exponent,
+ uint64_t mantissa, FPRounding round_mode) {
+ int32_t bits =
+ FPRound<int32_t, kFloatExponentBits, kFloatMantissaBits>(sign,
+ exponent,
+ mantissa,
+ round_mode);
+ return rawbits_to_float(bits);
+}
+
+
+// See FPRound for a description of this function.
+static inline float16 FPRoundToFloat16(int64_t sign,
+ int64_t exponent,
+ uint64_t mantissa,
+ FPRounding round_mode) {
+ return FPRound<float16, kFloat16ExponentBits, kFloat16MantissaBits>(
+ sign, exponent, mantissa, round_mode);
+}
+
+
+double Simulator::FixedToDouble(int64_t src, int fbits, FPRounding round) {
+ if (src >= 0) {
+ return UFixedToDouble(src, fbits, round);
+ } else {
+ // This works for all negative values, including INT64_MIN.
+ return -UFixedToDouble(-src, fbits, round);
+ }
+}
+
+
+double Simulator::UFixedToDouble(uint64_t src, int fbits, FPRounding round) {
+ // An input of 0 is a special case because the result is effectively
+ // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
+ if (src == 0) {
+ return 0.0;
+ }
+
+ // Calculate the exponent. The highest significant bit will have the value
+ // 2^exponent.
+ const int highest_significant_bit = 63 - CountLeadingZeros(src);
+ const int64_t exponent = highest_significant_bit - fbits;
+
+ return FPRoundToDouble(0, exponent, src, round);
+}
+
+
+float Simulator::FixedToFloat(int64_t src, int fbits, FPRounding round) {
+ if (src >= 0) {
+ return UFixedToFloat(src, fbits, round);
+ } else {
+ // This works for all negative values, including INT64_MIN.
+ return -UFixedToFloat(-src, fbits, round);
+ }
+}
+
+
+float Simulator::UFixedToFloat(uint64_t src, int fbits, FPRounding round) {
+ // An input of 0 is a special case because the result is effectively
+ // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
+ if (src == 0) {
+ return 0.0f;
+ }
+
+ // Calculate the exponent. The highest significant bit will have the value
+ // 2^exponent.
+ const int highest_significant_bit = 63 - CountLeadingZeros(src);
+ const int32_t exponent = highest_significant_bit - fbits;
+
+ return FPRoundToFloat(0, exponent, src, round);
+}
+
+
+double Simulator::FPToDouble(float value) {
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (IsSignallingNaN(value)) {
+ FPProcessException();
+ }
+ if (DN()) return kFP64DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred entirely, except that the top
+ // bit is forced to '1', making the result a quiet NaN. The unused
+ // (low-order) payload bits are set to 0.
+ uint32_t raw = float_to_rawbits(value);
+
+ uint64_t sign = raw >> 31;
+ uint64_t exponent = (1 << 11) - 1;
+ uint64_t payload = unsigned_bitextract_64(21, 0, raw);
+ payload <<= (52 - 23); // The unused low-order bits should be 0.
+ payload |= (UINT64_C(1) << 51); // Force a quiet NaN.
+
+ return rawbits_to_double((sign << 63) | (exponent << 52) | payload);
+ }
+
+ case FP_ZERO:
+ case FP_NORMAL:
+ case FP_SUBNORMAL:
+ case FP_INFINITE: {
+ // All other inputs are preserved in a standard cast, because every value
+ // representable using an IEEE-754 float is also representable using an
+ // IEEE-754 double.
+ return static_cast<double>(value);
+ }
+ }
+
+ VIXL_UNREACHABLE();
+ return static_cast<double>(value);
+}
+
+
+float Simulator::FPToFloat(float16 value) {
+ uint32_t sign = value >> 15;
+ uint32_t exponent = unsigned_bitextract_32(
+ kFloat16MantissaBits + kFloat16ExponentBits - 1, kFloat16MantissaBits,
+ value);
+ uint32_t mantissa = unsigned_bitextract_32(
+ kFloat16MantissaBits - 1, 0, value);
+
+ switch (float16classify(value)) {
+ case FP_ZERO:
+ return (sign == 0) ? 0.0f : -0.0f;
+
+ case FP_INFINITE:
+ return (sign == 0) ? kFP32PositiveInfinity : kFP32NegativeInfinity;
+
+ case FP_SUBNORMAL: {
+ // Calculate shift required to put mantissa into the most-significant bits
+ // of the destination mantissa.
+ int shift = CountLeadingZeros(mantissa << (32 - 10));
+
+ // Shift mantissa and discard implicit '1'.
+ mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits) + shift + 1;
+ mantissa &= (1 << kFloatMantissaBits) - 1;
+
+ // Adjust the exponent for the shift applied, and rebias.
+ exponent = exponent - shift + (-15 + 127);
+ break;
+ }
+
+ case FP_NAN:
+ if (IsSignallingNaN(value)) {
+ FPProcessException();
+ }
+ if (DN()) return kFP32DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred entirely, except that the top
+ // bit is forced to '1', making the result a quiet NaN. The unused
+ // (low-order) payload bits are set to 0.
+ exponent = (1 << kFloatExponentBits) - 1;
+
+ // Increase bits in mantissa, making low-order bits 0.
+ mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits);
+ mantissa |= 1 << 22; // Force a quiet NaN.
+ break;
+
+ case FP_NORMAL:
+ // Increase bits in mantissa, making low-order bits 0.
+ mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits);
+
+ // Change exponent bias.
+ exponent += (-15 + 127);
+ break;
+
+ default: VIXL_UNREACHABLE();
+ }
+ return rawbits_to_float((sign << 31) |
+ (exponent << kFloatMantissaBits) |
+ mantissa);
+}
+
+
+float16 Simulator::FPToFloat16(float value, FPRounding round_mode) {
+ // Only the FPTieEven rounding mode is implemented.
+ VIXL_ASSERT(round_mode == FPTieEven);
+ USE(round_mode);
+
+ uint32_t raw = float_to_rawbits(value);
+ int32_t sign = raw >> 31;
+ int32_t exponent = unsigned_bitextract_32(30, 23, raw) - 127;
+ uint32_t mantissa = unsigned_bitextract_32(22, 0, raw);
+
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (IsSignallingNaN(value)) {
+ FPProcessException();
+ }
+ if (DN()) return kFP16DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred as much as possible, except
+ // that the top bit is forced to '1', making the result a quiet NaN.
+ float16 result = (sign == 0) ? kFP16PositiveInfinity
+ : kFP16NegativeInfinity;
+ result |= mantissa >> (kFloatMantissaBits - kFloat16MantissaBits);
+ result |= (1 << 9); // Force a quiet NaN;
+ return result;
+ }
+
+ case FP_ZERO:
+ return (sign == 0) ? 0 : 0x8000;
+
+ case FP_INFINITE:
+ return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity;
+
+ case FP_NORMAL:
+ case FP_SUBNORMAL: {
+ // Convert float-to-half as the processor would, assuming that FPCR.FZ
+ // (flush-to-zero) is not set.
+
+ // Add the implicit '1' bit to the mantissa.
+ mantissa += (1 << 23);
+ return FPRoundToFloat16(sign, exponent, mantissa, round_mode);
+ }
+ }
+
+ VIXL_UNREACHABLE();
+ return 0;
+}
+
+
+float16 Simulator::FPToFloat16(double value, FPRounding round_mode) {
+ // Only the FPTieEven rounding mode is implemented.
+ VIXL_ASSERT(round_mode == FPTieEven);
+ USE(round_mode);
+
+ uint64_t raw = double_to_rawbits(value);
+ int32_t sign = raw >> 63;
+ int64_t exponent = unsigned_bitextract_64(62, 52, raw) - 1023;
+ uint64_t mantissa = unsigned_bitextract_64(51, 0, raw);
+
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (IsSignallingNaN(value)) {
+ FPProcessException();
+ }
+ if (DN()) return kFP16DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred as much as possible, except
+ // that the top bit is forced to '1', making the result a quiet NaN.
+ float16 result = (sign == 0) ? kFP16PositiveInfinity
+ : kFP16NegativeInfinity;
+ result |= mantissa >> (kDoubleMantissaBits - kFloat16MantissaBits);
+ result |= (1 << 9); // Force a quiet NaN;
+ return result;
+ }
+
+ case FP_ZERO:
+ return (sign == 0) ? 0 : 0x8000;
+
+ case FP_INFINITE:
+ return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity;
+
+ case FP_NORMAL:
+ case FP_SUBNORMAL: {
+ // Convert double-to-half as the processor would, assuming that FPCR.FZ
+ // (flush-to-zero) is not set.
+
+ // Add the implicit '1' bit to the mantissa.
+ mantissa += (UINT64_C(1) << 52);
+ return FPRoundToFloat16(sign, exponent, mantissa, round_mode);
+ }
+ }
+
+ VIXL_UNREACHABLE();
+ return 0;
+}
+
+
+float Simulator::FPToFloat(double value, FPRounding round_mode) {
+ // Only the FPTieEven rounding mode is implemented.
+ VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd));
+ USE(round_mode);
+
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (IsSignallingNaN(value)) {
+ FPProcessException();
+ }
+ if (DN()) return kFP32DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred as much as possible, except
+ // that the top bit is forced to '1', making the result a quiet NaN.
+ uint64_t raw = double_to_rawbits(value);
+
+ uint32_t sign = raw >> 63;
+ uint32_t exponent = (1 << 8) - 1;
+ uint32_t payload =
+ static_cast<uint32_t>(unsigned_bitextract_64(50, 52 - 23, raw));
+ payload |= (1 << 22); // Force a quiet NaN.
+
+ return rawbits_to_float((sign << 31) | (exponent << 23) | payload);
+ }
+
+ case FP_ZERO:
+ case FP_INFINITE: {
+ // In a C++ cast, any value representable in the target type will be
+ // unchanged. This is always the case for +/-0.0 and infinities.
+ return static_cast<float>(value);
+ }
+
+ case FP_NORMAL:
+ case FP_SUBNORMAL: {
+ // Convert double-to-float as the processor would, assuming that FPCR.FZ
+ // (flush-to-zero) is not set.
+ uint64_t raw = double_to_rawbits(value);
+ // Extract the IEEE-754 double components.
+ uint32_t sign = raw >> 63;
+ // Extract the exponent and remove the IEEE-754 encoding bias.
+ int32_t exponent =
+ static_cast<int32_t>(unsigned_bitextract_64(62, 52, raw)) - 1023;
+ // Extract the mantissa and add the implicit '1' bit.
+ uint64_t mantissa = unsigned_bitextract_64(51, 0, raw);
+ if (std::fpclassify(value) == FP_NORMAL) {
+ mantissa |= (UINT64_C(1) << 52);
+ }
+ return FPRoundToFloat(sign, exponent, mantissa, round_mode);
+ }
+ }
+
+ VIXL_UNREACHABLE();
+ return value;
+}
+
+
+void Simulator::ld1(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t addr) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.ReadUintFromMem(vform, i, addr);
+ addr += LaneSizeInBytesFromFormat(vform);
+ }
+}
+
+
+void Simulator::ld1(VectorFormat vform,
+ LogicVRegister dst,
+ int index,
+ uint64_t addr) {
+ dst.ReadUintFromMem(vform, index, addr);
+}
+
+
+void Simulator::ld1r(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t addr) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.ReadUintFromMem(vform, i, addr);
+ }
+}
+
+
+void Simulator::ld2(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ uint64_t addr1) {
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr1 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr1);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ addr1 += 2 * esize;
+ addr2 += 2 * esize;
+ }
+}
+
+
+void Simulator::ld2(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ int index,
+ uint64_t addr1) {
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform);
+ dst1.ReadUintFromMem(vform, index, addr1);
+ dst2.ReadUintFromMem(vform, index, addr2);
+}
+
+
+void Simulator::ld2r(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ uint64_t addr) {
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ }
+}
+
+
+void Simulator::ld3(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ uint64_t addr1) {
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr1 + esize;
+ uint64_t addr3 = addr2 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr1);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ dst3.ReadUintFromMem(vform, i, addr3);
+ addr1 += 3 * esize;
+ addr2 += 3 * esize;
+ addr3 += 3 * esize;
+ }
+}
+
+
+void Simulator::ld3(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ int index,
+ uint64_t addr1) {
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform);
+ dst1.ReadUintFromMem(vform, index, addr1);
+ dst2.ReadUintFromMem(vform, index, addr2);
+ dst3.ReadUintFromMem(vform, index, addr3);
+}
+
+
+void Simulator::ld3r(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ uint64_t addr) {
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ dst3.ReadUintFromMem(vform, i, addr3);
+ }
+}
+
+
+void Simulator::ld4(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ uint64_t addr1) {
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ dst4.ClearForWrite(vform);
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr1 + esize;
+ uint64_t addr3 = addr2 + esize;
+ uint64_t addr4 = addr3 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr1);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ dst3.ReadUintFromMem(vform, i, addr3);
+ dst4.ReadUintFromMem(vform, i, addr4);
+ addr1 += 4 * esize;
+ addr2 += 4 * esize;
+ addr3 += 4 * esize;
+ addr4 += 4 * esize;
+ }
+}
+
+
+void Simulator::ld4(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ int index,
+ uint64_t addr1) {
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ dst4.ClearForWrite(vform);
+ uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr4 = addr3 + LaneSizeInBytesFromFormat(vform);
+ dst1.ReadUintFromMem(vform, index, addr1);
+ dst2.ReadUintFromMem(vform, index, addr2);
+ dst3.ReadUintFromMem(vform, index, addr3);
+ dst4.ReadUintFromMem(vform, index, addr4);
+}
+
+
+void Simulator::ld4r(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ uint64_t addr) {
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ dst4.ClearForWrite(vform);
+ uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr4 = addr3 + LaneSizeInBytesFromFormat(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ dst3.ReadUintFromMem(vform, i, addr3);
+ dst4.ReadUintFromMem(vform, i, addr4);
+ }
+}
+
+
+void Simulator::st1(VectorFormat vform,
+ LogicVRegister src,
+ uint64_t addr) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ src.WriteUintToMem(vform, i, addr);
+ addr += LaneSizeInBytesFromFormat(vform);
+ }
+}
+
+
+void Simulator::st1(VectorFormat vform,
+ LogicVRegister src,
+ int index,
+ uint64_t addr) {
+ src.WriteUintToMem(vform, index, addr);
+}
+
+
+void Simulator::st2(VectorFormat vform,
+ LogicVRegister dst,
+ LogicVRegister dst2,
+ uint64_t addr) {
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.WriteUintToMem(vform, i, addr);
+ dst2.WriteUintToMem(vform, i, addr2);
+ addr += 2 * esize;
+ addr2 += 2 * esize;
+ }
+}
+
+
+void Simulator::st2(VectorFormat vform,
+ LogicVRegister dst,
+ LogicVRegister dst2,
+ int index,
+ uint64_t addr) {
+ int esize = LaneSizeInBytesFromFormat(vform);
+ dst.WriteUintToMem(vform, index, addr);
+ dst2.WriteUintToMem(vform, index, addr + 1 * esize);
+}
+
+
+void Simulator::st3(VectorFormat vform,
+ LogicVRegister dst,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ uint64_t addr) {
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr + esize;
+ uint64_t addr3 = addr2 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.WriteUintToMem(vform, i, addr);
+ dst2.WriteUintToMem(vform, i, addr2);
+ dst3.WriteUintToMem(vform, i, addr3);
+ addr += 3 * esize;
+ addr2 += 3 * esize;
+ addr3 += 3 * esize;
+ }
+}
+
+
+void Simulator::st3(VectorFormat vform,
+ LogicVRegister dst,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ int index,
+ uint64_t addr) {
+ int esize = LaneSizeInBytesFromFormat(vform);
+ dst.WriteUintToMem(vform, index, addr);
+ dst2.WriteUintToMem(vform, index, addr + 1 * esize);
+ dst3.WriteUintToMem(vform, index, addr + 2 * esize);
+}
+
+
+void Simulator::st4(VectorFormat vform,
+ LogicVRegister dst,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ uint64_t addr) {
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr + esize;
+ uint64_t addr3 = addr2 + esize;
+ uint64_t addr4 = addr3 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.WriteUintToMem(vform, i, addr);
+ dst2.WriteUintToMem(vform, i, addr2);
+ dst3.WriteUintToMem(vform, i, addr3);
+ dst4.WriteUintToMem(vform, i, addr4);
+ addr += 4 * esize;
+ addr2 += 4 * esize;
+ addr3 += 4 * esize;
+ addr4 += 4 * esize;
+ }
+}
+
+
+void Simulator::st4(VectorFormat vform,
+ LogicVRegister dst,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ int index,
+ uint64_t addr) {
+ int esize = LaneSizeInBytesFromFormat(vform);
+ dst.WriteUintToMem(vform, index, addr);
+ dst2.WriteUintToMem(vform, index, addr + 1 * esize);
+ dst3.WriteUintToMem(vform, index, addr + 2 * esize);
+ dst4.WriteUintToMem(vform, index, addr + 3 * esize);
+}
+
+
+LogicVRegister Simulator::cmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int64_t sa = src1.Int(vform, i);
+ int64_t sb = src2.Int(vform, i);
+ uint64_t ua = src1.Uint(vform, i);
+ uint64_t ub = src2.Uint(vform, i);
+ bool result = false;
+ switch (cond) {
+ case eq: result = (ua == ub); break;
+ case ge: result = (sa >= sb); break;
+ case gt: result = (sa > sb) ; break;
+ case hi: result = (ua > ub) ; break;
+ case hs: result = (ua >= ub); break;
+ case lt: result = (sa < sb) ; break;
+ case le: result = (sa <= sb); break;
+ default: VIXL_UNREACHABLE(); break;
+ }
+ dst.SetUint(vform, i, result ? MaxUintFromFormat(vform) : 0);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::cmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ int imm,
+ Condition cond) {
+ SimVRegister temp;
+ LogicVRegister imm_reg = dup_immediate(vform, temp, imm);
+ return cmp(vform, dst, src1, imm_reg, cond);
+}
+
+
+LogicVRegister Simulator::cmptst(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t ua = src1.Uint(vform, i);
+ uint64_t ub = src2.Uint(vform, i);
+ dst.SetUint(vform, i, ((ua & ub) != 0) ? MaxUintFromFormat(vform) : 0);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::add(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ // TODO(all): consider assigning the result of LaneCountFromFormat to a local.
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ // Test for unsigned saturation.
+ uint64_t ua = src1.UintLeftJustified(vform, i);
+ uint64_t ub = src2.UintLeftJustified(vform, i);
+ uint64_t ur = ua + ub;
+ if (ur < ua) {
+ dst.SetUnsignedSat(i, true);
+ }
+
+ // Test for signed saturation.
+ int64_t sa = src1.IntLeftJustified(vform, i);
+ int64_t sb = src2.IntLeftJustified(vform, i);
+ int64_t sr = sa + sb;
+ // If the signs of the operands are the same, but different from the result,
+ // there was an overflow.
+ if (((sa >= 0) == (sb >= 0)) && ((sa >= 0) != (sr >= 0))) {
+ dst.SetSignedSat(i, sa >= 0);
+ }
+
+ dst.SetInt(vform, i, src1.Int(vform, i) + src2.Int(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::addp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uzp1(vform, temp1, src1, src2);
+ uzp2(vform, temp2, src1, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::mla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ mul(vform, temp, src1, src2);
+ add(vform, dst, dst, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::mls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ mul(vform, temp, src1, src2);
+ sub(vform, dst, dst, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::mul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) * src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::mul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return mul(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::mla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return mla(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::mls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return mls(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::smull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smull(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::smull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smull2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::umull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umull(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::umull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umull2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::smlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smlal(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::smlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smlal2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::umlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umlal(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::umlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umlal2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::smlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smlsl(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::smlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::umlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umlsl(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::umlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmull(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmull2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmlal(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmlal2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmlsl(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return sqdmulh(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqrdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return sqrdmulh(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+uint16_t Simulator::PolynomialMult(uint8_t op1, uint8_t op2) {
+ uint16_t result = 0;
+ uint16_t extended_op2 = op2;
+ for (int i = 0; i < 8; ++i) {
+ if ((op1 >> i) & 1) {
+ result = result ^ (extended_op2 << i);
+ }
+ }
+ return result;
+}
+
+
+LogicVRegister Simulator::pmul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i,
+ PolynomialMult(src1.Uint(vform, i), src2.Uint(vform, i)));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::pmull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ VectorFormat vform_src = VectorFormatHalfWidth(vform);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, PolynomialMult(src1.Uint(vform_src, i),
+ src2.Uint(vform_src, i)));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::pmull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ VectorFormat vform_src = VectorFormatHalfWidthDoubleLanes(vform);
+ dst.ClearForWrite(vform);
+ int lane_count = LaneCountFromFormat(vform);
+ for (int i = 0; i < lane_count; i++) {
+ dst.SetUint(vform, i, PolynomialMult(src1.Uint(vform_src, lane_count + i),
+ src2.Uint(vform_src, lane_count + i)));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sub(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ // Test for unsigned saturation.
+ if (src2.Uint(vform, i) > src1.Uint(vform, i)) {
+ dst.SetUnsignedSat(i, false);
+ }
+
+ // Test for signed saturation.
+ int64_t sa = src1.IntLeftJustified(vform, i);
+ int64_t sb = src2.IntLeftJustified(vform, i);
+ int64_t sr = sa - sb;
+ // If the signs of the operands are different, and the sign of the first
+ // operand doesn't match the result, there was an overflow.
+ if (((sa >= 0) != (sb >= 0)) && ((sa >= 0) != (sr >= 0))) {
+ dst.SetSignedSat(i, sr < 0);
+ }
+
+ dst.SetInt(vform, i, src1.Int(vform, i) - src2.Int(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::and_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) & src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::orr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) | src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::orn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) | ~src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::eor(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) ^ src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::bic(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) & ~src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::bic(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ uint64_t imm) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ result[i] = src.Uint(vform, i) & ~imm;
+ }
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::bif(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t operand1 = dst.Uint(vform, i);
+ uint64_t operand2 = ~src2.Uint(vform, i);
+ uint64_t operand3 = src1.Uint(vform, i);
+ uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2);
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::bit(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t operand1 = dst.Uint(vform, i);
+ uint64_t operand2 = src2.Uint(vform, i);
+ uint64_t operand3 = src1.Uint(vform, i);
+ uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2);
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::bsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t operand1 = src2.Uint(vform, i);
+ uint64_t operand2 = dst.Uint(vform, i);
+ uint64_t operand3 = src1.Uint(vform, i);
+ uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2);
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sminmax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool max) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int64_t src1_val = src1.Int(vform, i);
+ int64_t src2_val = src2.Int(vform, i);
+ int64_t dst_val;
+ if (max == true) {
+ dst_val = (src1_val > src2_val) ? src1_val : src2_val;
+ } else {
+ dst_val = (src1_val < src2_val) ? src1_val : src2_val;
+ }
+ dst.SetInt(vform, i, dst_val);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::smax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return sminmax(vform, dst, src1, src2, true);
+}
+
+
+LogicVRegister Simulator::smin(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return sminmax(vform, dst, src1, src2, false);
+}
+
+
+LogicVRegister Simulator::sminmaxp(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ const LogicVRegister& src,
+ bool max) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i += 2) {
+ int64_t src1_val = src.Int(vform, i);
+ int64_t src2_val = src.Int(vform, i + 1);
+ int64_t dst_val;
+ if (max == true) {
+ dst_val = (src1_val > src2_val) ? src1_val : src2_val;
+ } else {
+ dst_val = (src1_val < src2_val) ? src1_val : src2_val;
+ }
+ dst.SetInt(vform, dst_index + (i >> 1), dst_val);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::smaxp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ sminmaxp(vform, dst, 0, src1, true);
+ sminmaxp(vform, dst, LaneCountFromFormat(vform) >> 1, src2, true);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sminp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ sminmaxp(vform, dst, 0, src1, false);
+ sminmaxp(vform, dst, LaneCountFromFormat(vform) >> 1, src2, false);
+ return dst;
+}
+
+
+LogicVRegister Simulator::addp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VIXL_ASSERT(vform == kFormatD);
+
+ int64_t dst_val = src.Int(kFormat2D, 0) + src.Int(kFormat2D, 1);
+ dst.ClearForWrite(vform);
+ dst.SetInt(vform, 0, dst_val);
+ return dst;
+}
+
+
+LogicVRegister Simulator::addv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_dst
+ = ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform));
+
+
+ int64_t dst_val = 0;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst_val += src.Int(vform, i);
+ }
+
+ dst.ClearForWrite(vform_dst);
+ dst.SetInt(vform_dst, 0, dst_val);
+ return dst;
+}
+
+
+LogicVRegister Simulator::saddlv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_dst
+ = ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform) * 2);
+
+ int64_t dst_val = 0;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst_val += src.Int(vform, i);
+ }
+
+ dst.ClearForWrite(vform_dst);
+ dst.SetInt(vform_dst, 0, dst_val);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uaddlv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_dst
+ = ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform) * 2);
+
+ uint64_t dst_val = 0;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst_val += src.Uint(vform, i);
+ }
+
+ dst.ClearForWrite(vform_dst);
+ dst.SetUint(vform_dst, 0, dst_val);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sminmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ bool max) {
+ dst.ClearForWrite(vform);
+ int64_t dst_val = max ? INT64_MIN : INT64_MAX;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetInt(vform, i, 0);
+ int64_t src_val = src.Int(vform, i);
+ if (max == true) {
+ dst_val = (src_val > dst_val) ? src_val : dst_val;
+ } else {
+ dst_val = (src_val < dst_val) ? src_val : dst_val;
+ }
+ }
+ dst.SetInt(vform, 0, dst_val);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ sminmaxv(vform, dst, src, true);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sminv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ sminmaxv(vform, dst, src, false);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uminmax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool max) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t src1_val = src1.Uint(vform, i);
+ uint64_t src2_val = src2.Uint(vform, i);
+ uint64_t dst_val;
+ if (max == true) {
+ dst_val = (src1_val > src2_val) ? src1_val : src2_val;
+ } else {
+ dst_val = (src1_val < src2_val) ? src1_val : src2_val;
+ }
+ dst.SetUint(vform, i, dst_val);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::umax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return uminmax(vform, dst, src1, src2, true);
+}
+
+
+LogicVRegister Simulator::umin(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return uminmax(vform, dst, src1, src2, false);
+}
+
+
+LogicVRegister Simulator::uminmaxp(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ const LogicVRegister& src,
+ bool max) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i += 2) {
+ uint64_t src1_val = src.Uint(vform, i);
+ uint64_t src2_val = src.Uint(vform, i + 1);
+ uint64_t dst_val;
+ if (max == true) {
+ dst_val = (src1_val > src2_val) ? src1_val : src2_val;
+ } else {
+ dst_val = (src1_val < src2_val) ? src1_val : src2_val;
+ }
+ dst.SetUint(vform, dst_index + (i >> 1), dst_val);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::umaxp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ uminmaxp(vform, dst, 0, src1, true);
+ uminmaxp(vform, dst, LaneCountFromFormat(vform) >> 1, src2, true);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uminp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ uminmaxp(vform, dst, 0, src1, false);
+ uminmaxp(vform, dst, LaneCountFromFormat(vform) >> 1, src2, false);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uminmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ bool max) {
+ dst.ClearForWrite(vform);
+ uint64_t dst_val = max ? 0 : UINT64_MAX;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, 0);
+ uint64_t src_val = src.Uint(vform, i);
+ if (max == true) {
+ dst_val = (src_val > dst_val) ? src_val : dst_val;
+ } else {
+ dst_val = (src_val < dst_val) ? src_val : dst_val;
+ }
+ }
+ dst.SetUint(vform, 0, dst_val);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ uminmaxv(vform, dst, src, true);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uminv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ uminmaxv(vform, dst, src, false);
+ return dst;
+}
+
+
+LogicVRegister Simulator::shl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, shift);
+ return ushl(vform, dst, src, shiftreg);
+}
+
+
+LogicVRegister Simulator::sshll(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp1, temp2;
+ LogicVRegister shiftreg = dup_immediate(vform, temp1, shift);
+ LogicVRegister extendedreg = sxtl(vform, temp2, src);
+ return sshl(vform, dst, extendedreg, shiftreg);
+}
+
+
+LogicVRegister Simulator::sshll2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp1, temp2;
+ LogicVRegister shiftreg = dup_immediate(vform, temp1, shift);
+ LogicVRegister extendedreg = sxtl2(vform, temp2, src);
+ return sshl(vform, dst, extendedreg, shiftreg);
+}
+
+
+LogicVRegister Simulator::shll(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ int shift = LaneSizeInBitsFromFormat(vform) / 2;
+ return sshll(vform, dst, src, shift);
+}
+
+
+LogicVRegister Simulator::shll2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ int shift = LaneSizeInBitsFromFormat(vform) / 2;
+ return sshll2(vform, dst, src, shift);
+}
+
+
+LogicVRegister Simulator::ushll(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp1, temp2;
+ LogicVRegister shiftreg = dup_immediate(vform, temp1, shift);
+ LogicVRegister extendedreg = uxtl(vform, temp2, src);
+ return ushl(vform, dst, extendedreg, shiftreg);
+}
+
+
+LogicVRegister Simulator::ushll2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp1, temp2;
+ LogicVRegister shiftreg = dup_immediate(vform, temp1, shift);
+ LogicVRegister extendedreg = uxtl2(vform, temp2, src);
+ return ushl(vform, dst, extendedreg, shiftreg);
+}
+
+
+LogicVRegister Simulator::sli(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ dst.ClearForWrite(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; i++) {
+ uint64_t src_lane = src.Uint(vform, i);
+ uint64_t dst_lane = dst.Uint(vform, i);
+ uint64_t shifted = src_lane << shift;
+ uint64_t mask = MaxUintFromFormat(vform) << shift;
+ dst.SetUint(vform, i, (dst_lane & ~mask) | shifted);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sqshl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, shift);
+ return sshl(vform, dst, src, shiftreg).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::uqshl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, shift);
+ return ushl(vform, dst, src, shiftreg).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqshlu(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, shift);
+ return sshl(vform, dst, src, shiftreg).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sri(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ dst.ClearForWrite(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ VIXL_ASSERT((shift > 0) &&
+ (shift <= static_cast<int>(LaneSizeInBitsFromFormat(vform))));
+ for (int i = 0; i < laneCount; i++) {
+ uint64_t src_lane = src.Uint(vform, i);
+ uint64_t dst_lane = dst.Uint(vform, i);
+ uint64_t shifted;
+ uint64_t mask;
+ if (shift == 64) {
+ shifted = 0;
+ mask = 0;
+ } else {
+ shifted = src_lane >> shift;
+ mask = MaxUintFromFormat(vform) >> shift;
+ }
+ dst.SetUint(vform, i, (dst_lane & ~mask) | shifted);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::ushr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, -shift);
+ return ushl(vform, dst, src, shiftreg);
+}
+
+
+LogicVRegister Simulator::sshr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, -shift);
+ return sshl(vform, dst, src, shiftreg);
+}
+
+
+LogicVRegister Simulator::ssra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ LogicVRegister shifted_reg = sshr(vform, temp, src, shift);
+ return add(vform, dst, dst, shifted_reg);
+}
+
+
+LogicVRegister Simulator::usra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ LogicVRegister shifted_reg = ushr(vform, temp, src, shift);
+ return add(vform, dst, dst, shifted_reg);
+}
+
+
+LogicVRegister Simulator::srsra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ LogicVRegister shifted_reg = sshr(vform, temp, src, shift).Round(vform);
+ return add(vform, dst, dst, shifted_reg);
+}
+
+
+LogicVRegister Simulator::ursra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ LogicVRegister shifted_reg = ushr(vform, temp, src, shift).Round(vform);
+ return add(vform, dst, dst, shifted_reg);
+}
+
+
+LogicVRegister Simulator::cls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ uint64_t result[16];
+ int laneSizeInBits = LaneSizeInBitsFromFormat(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; i++) {
+ result[i] = CountLeadingSignBits(src.Int(vform, i), laneSizeInBits);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::clz(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ uint64_t result[16];
+ int laneSizeInBits = LaneSizeInBitsFromFormat(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; i++) {
+ result[i] = CountLeadingZeros(src.Uint(vform, i), laneSizeInBits);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::cnt(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ uint64_t result[16];
+ int laneSizeInBits = LaneSizeInBitsFromFormat(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; i++) {
+ uint64_t value = src.Uint(vform, i);
+ result[i] = 0;
+ for (int j = 0; j < laneSizeInBits; j++) {
+ result[i] += (value & 1);
+ value >>= 1;
+ }
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sshl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int8_t shift_val = src2.Int(vform, i);
+ int64_t lj_src_val = src1.IntLeftJustified(vform, i);
+
+ // Set signed saturation state.
+ if ((shift_val > CountLeadingSignBits(lj_src_val)) &&
+ (lj_src_val != 0)) {
+ dst.SetSignedSat(i, lj_src_val >= 0);
+ }
+
+ // Set unsigned saturation state.
+ if (lj_src_val < 0) {
+ dst.SetUnsignedSat(i, false);
+ } else if ((shift_val > CountLeadingZeros(lj_src_val)) &&
+ (lj_src_val != 0)) {
+ dst.SetUnsignedSat(i, true);
+ }
+
+ int64_t src_val = src1.Int(vform, i);
+ if (shift_val > 63) {
+ dst.SetInt(vform, i, 0);
+ } else if (shift_val < -63) {
+ dst.SetRounding(i, src_val < 0);
+ dst.SetInt(vform, i, (src_val < 0) ? -1 : 0);
+ } else {
+ if (shift_val < 0) {
+ // Set rounding state. Rounding only needed on right shifts.
+ if (((src_val >> (-shift_val - 1)) & 1) == 1) {
+ dst.SetRounding(i, true);
+ }
+ src_val >>= -shift_val;
+ } else {
+ src_val <<= shift_val;
+ }
+ dst.SetInt(vform, i, src_val);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::ushl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int8_t shift_val = src2.Int(vform, i);
+ uint64_t lj_src_val = src1.UintLeftJustified(vform, i);
+
+ // Set saturation state.
+ if ((shift_val > CountLeadingZeros(lj_src_val)) && (lj_src_val != 0)) {
+ dst.SetUnsignedSat(i, true);
+ }
+
+ uint64_t src_val = src1.Uint(vform, i);
+ if ((shift_val > 63) || (shift_val < -64)) {
+ dst.SetUint(vform, i, 0);
+ } else {
+ if (shift_val < 0) {
+ // Set rounding state. Rounding only needed on right shifts.
+ if (((src_val >> (-shift_val - 1)) & 1) == 1) {
+ dst.SetRounding(i, true);
+ }
+
+ if (shift_val == -64) {
+ src_val = 0;
+ } else {
+ src_val >>= -shift_val;
+ }
+ } else {
+ src_val <<= shift_val;
+ }
+ dst.SetUint(vform, i, src_val);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::neg(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ // Test for signed saturation.
+ int64_t sa = src.Int(vform, i);
+ if (sa == MinIntFromFormat(vform)) {
+ dst.SetSignedSat(i, true);
+ }
+ dst.SetInt(vform, i, -sa);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::suqadd(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int64_t sa = dst.IntLeftJustified(vform, i);
+ uint64_t ub = src.UintLeftJustified(vform, i);
+ int64_t sr = sa + ub;
+
+ if (sr < sa) { // Test for signed positive saturation.
+ dst.SetInt(vform, i, MaxIntFromFormat(vform));
+ } else {
+ dst.SetInt(vform, i, dst.Int(vform, i) + src.Int(vform, i));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::usqadd(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t ua = dst.UintLeftJustified(vform, i);
+ int64_t sb = src.IntLeftJustified(vform, i);
+ uint64_t ur = ua + sb;
+
+ if ((sb > 0) && (ur <= ua)) {
+ dst.SetUint(vform, i, MaxUintFromFormat(vform)); // Positive saturation.
+ } else if ((sb < 0) && (ur >= ua)) {
+ dst.SetUint(vform, i, 0); // Negative saturation.
+ } else {
+ dst.SetUint(vform, i, dst.Uint(vform, i) + src.Int(vform, i));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::abs(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ // Test for signed saturation.
+ int64_t sa = src.Int(vform, i);
+ if (sa == MinIntFromFormat(vform)) {
+ dst.SetSignedSat(i, true);
+ }
+ if (sa < 0) {
+ dst.SetInt(vform, i, -sa);
+ } else {
+ dst.SetInt(vform, i, sa);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::extractnarrow(VectorFormat dstform,
+ LogicVRegister dst,
+ bool dstIsSigned,
+ const LogicVRegister& src,
+ bool srcIsSigned) {
+ bool upperhalf = false;
+ VectorFormat srcform = kFormatUndefined;
+ int64_t ssrc[8];
+ uint64_t usrc[8];
+
+ switch (dstform) {
+ case kFormat8B : upperhalf = false; srcform = kFormat8H; break;
+ case kFormat16B: upperhalf = true; srcform = kFormat8H; break;
+ case kFormat4H : upperhalf = false; srcform = kFormat4S; break;
+ case kFormat8H : upperhalf = true; srcform = kFormat4S; break;
+ case kFormat2S : upperhalf = false; srcform = kFormat2D; break;
+ case kFormat4S : upperhalf = true; srcform = kFormat2D; break;
+ case kFormatB : upperhalf = false; srcform = kFormatH; break;
+ case kFormatH : upperhalf = false; srcform = kFormatS; break;
+ case kFormatS : upperhalf = false; srcform = kFormatD; break;
+ default:VIXL_UNIMPLEMENTED();
+ }
+
+ for (int i = 0; i < LaneCountFromFormat(srcform); i++) {
+ ssrc[i] = src.Int(srcform, i);
+ usrc[i] = src.Uint(srcform, i);
+ }
+
+ int offset;
+ if (upperhalf) {
+ offset = LaneCountFromFormat(dstform) / 2;
+ } else {
+ offset = 0;
+ dst.ClearForWrite(dstform);
+ }
+
+ for (int i = 0; i < LaneCountFromFormat(srcform); i++) {
+ // Test for signed saturation
+ if (ssrc[i] > MaxIntFromFormat(dstform)) {
+ dst.SetSignedSat(offset + i, true);
+ } else if (ssrc[i] < MinIntFromFormat(dstform)) {
+ dst.SetSignedSat(offset + i, false);
+ }
+
+ // Test for unsigned saturation
+ if (srcIsSigned) {
+ if (ssrc[i] > static_cast<int64_t>(MaxUintFromFormat(dstform))) {
+ dst.SetUnsignedSat(offset + i, true);
+ } else if (ssrc[i] < 0) {
+ dst.SetUnsignedSat(offset + i, false);
+ }
+ } else {
+ if (usrc[i] > MaxUintFromFormat(dstform)) {
+ dst.SetUnsignedSat(offset + i, true);
+ }
+ }
+
+ int64_t result;
+ if (srcIsSigned) {
+ result = ssrc[i] & MaxUintFromFormat(dstform);
+ } else {
+ result = usrc[i] & MaxUintFromFormat(dstform);
+ }
+
+ if (dstIsSigned) {
+ dst.SetInt(dstform, offset + i, result);
+ } else {
+ dst.SetUint(dstform, offset + i, result);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::xtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return extractnarrow(vform, dst, true, src, true);
+}
+
+
+LogicVRegister Simulator::sqxtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return extractnarrow(vform, dst, true, src, true).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqxtun(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return extractnarrow(vform, dst, false, src, true).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::uqxtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return extractnarrow(vform, dst, false, src, false).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::absdiff(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool issigned) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ if (issigned) {
+ int64_t sr = src1.Int(vform, i) - src2.Int(vform, i);
+ sr = sr > 0 ? sr : -sr;
+ dst.SetInt(vform, i, sr);
+ } else {
+ int64_t sr = src1.Uint(vform, i) - src2.Uint(vform, i);
+ sr = sr > 0 ? sr : -sr;
+ dst.SetUint(vform, i, sr);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::saba(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ dst.ClearForWrite(vform);
+ absdiff(vform, temp, src1, src2, true);
+ add(vform, dst, dst, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uaba(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ dst.ClearForWrite(vform);
+ absdiff(vform, temp, src1, src2, false);
+ add(vform, dst, dst, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::not_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, ~src.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::rbit(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int laneSizeInBits = LaneSizeInBitsFromFormat(vform);
+ uint64_t reversed_value;
+ uint64_t value;
+ for (int i = 0; i < laneCount; i++) {
+ value = src.Uint(vform, i);
+ reversed_value = 0;
+ for (int j = 0; j < laneSizeInBits; j++) {
+ reversed_value = (reversed_value << 1) | (value & 1);
+ value >>= 1;
+ }
+ result[i] = reversed_value;
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::rev(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int revSize) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int laneSize = LaneSizeInBytesFromFormat(vform);
+ int lanesPerLoop = revSize / laneSize;
+ for (int i = 0; i < laneCount; i += lanesPerLoop) {
+ for (int j = 0; j < lanesPerLoop; j++) {
+ result[i + lanesPerLoop - 1 - j] = src.Uint(vform, i + j);
+ }
+ }
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::rev16(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return rev(vform, dst, src, 2);
+}
+
+
+LogicVRegister Simulator::rev32(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return rev(vform, dst, src, 4);
+}
+
+
+LogicVRegister Simulator::rev64(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return rev(vform, dst, src, 8);
+}
+
+
+LogicVRegister Simulator::addlp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ bool is_signed,
+ bool do_accumulate) {
+ VectorFormat vformsrc = VectorFormatHalfWidthDoubleLanes(vform);
+
+ int64_t sr[16];
+ uint64_t ur[16];
+
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ if (is_signed) {
+ sr[i] = src.Int(vformsrc, 2 * i) + src.Int(vformsrc, 2 * i + 1);
+ } else {
+ ur[i] = src.Uint(vformsrc, 2 * i) + src.Uint(vformsrc, 2 * i + 1);
+ }
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ if (do_accumulate) {
+ if (is_signed) {
+ dst.SetInt(vform, i, dst.Int(vform, i) + sr[i]);
+ } else {
+ dst.SetUint(vform, i, dst.Uint(vform, i) + ur[i]);
+ }
+ } else {
+ if (is_signed) {
+ dst.SetInt(vform, i, sr[i]);
+ } else {
+ dst.SetUint(vform, i, ur[i]);
+ }
+ }
+ }
+
+ return dst;
+}
+
+
+LogicVRegister Simulator::saddlp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return addlp(vform, dst, src, true, false);
+}
+
+
+LogicVRegister Simulator::uaddlp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return addlp(vform, dst, src, false, false);
+}
+
+
+LogicVRegister Simulator::sadalp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return addlp(vform, dst, src, true, true);
+}
+
+
+LogicVRegister Simulator::uadalp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return addlp(vform, dst, src, false, true);
+}
+
+
+LogicVRegister Simulator::ext(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ uint8_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount - index; ++i) {
+ result[i] = src1.Uint(vform, i + index);
+ }
+ for (int i = 0; i < index; ++i) {
+ result[laneCount - index + i] = src2.Uint(vform, i);
+ }
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::dup_element(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int src_index) {
+ int laneCount = LaneCountFromFormat(vform);
+ uint64_t value = src.Uint(vform, src_index);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, value);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::dup_immediate(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t imm) {
+ int laneCount = LaneCountFromFormat(vform);
+ uint64_t value = imm & MaxUintFromFormat(vform);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, value);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::ins_element(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ const LogicVRegister& src,
+ int src_index) {
+ dst.SetUint(vform, dst_index, src.Uint(vform, src_index));
+ return dst;
+}
+
+
+LogicVRegister Simulator::ins_immediate(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ uint64_t imm) {
+ uint64_t value = imm & MaxUintFromFormat(vform);
+ dst.SetUint(vform, dst_index, value);
+ return dst;
+}
+
+
+LogicVRegister Simulator::movi(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t imm) {
+ int laneCount = LaneCountFromFormat(vform);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, imm);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::mvni(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t imm) {
+ int laneCount = LaneCountFromFormat(vform);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, ~imm);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::orr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ uint64_t imm) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ result[i] = src.Uint(vform, i) | imm;
+ }
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::uxtl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_half = VectorFormatHalfWidth(vform);
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src.Uint(vform_half, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sxtl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_half = VectorFormatHalfWidth(vform);
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetInt(vform, i, src.Int(vform_half, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::uxtl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_half = VectorFormatHalfWidth(vform);
+ int lane_count = LaneCountFromFormat(vform);
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < lane_count; i++) {
+ dst.SetUint(vform, i, src.Uint(vform_half, lane_count + i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sxtl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_half = VectorFormatHalfWidth(vform);
+ int lane_count = LaneCountFromFormat(vform);
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < lane_count; i++) {
+ dst.SetInt(vform, i, src.Int(vform_half, lane_count + i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::shrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vform_src = VectorFormatDoubleWidth(vform);
+ VectorFormat vform_dst = vform;
+ LogicVRegister shifted_src = ushr(vform_src, temp, src, shift);
+ return extractnarrow(vform_dst, dst, false, shifted_src, false);
+}
+
+
+LogicVRegister Simulator::shrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift);
+ return extractnarrow(vformdst, dst, false, shifted_src, false);
+}
+
+
+LogicVRegister Simulator::rshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift).Round(vformsrc);
+ return extractnarrow(vformdst, dst, false, shifted_src, false);
+}
+
+
+LogicVRegister Simulator::rshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift).Round(vformsrc);
+ return extractnarrow(vformdst, dst, false, shifted_src, false);
+}
+
+
+LogicVRegister Simulator::tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& ind) {
+ movi(vform, dst, 0);
+ return tbx(vform, dst, tab, ind);
+}
+
+
+LogicVRegister Simulator::tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& ind) {
+ movi(vform, dst, 0);
+ return tbx(vform, dst, tab, tab2, ind);
+}
+
+
+LogicVRegister Simulator::tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& ind) {
+ movi(vform, dst, 0);
+ return tbx(vform, dst, tab, tab2, tab3, ind);
+}
+
+
+LogicVRegister Simulator::tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& tab4,
+ const LogicVRegister& ind) {
+ movi(vform, dst, 0);
+ return tbx(vform, dst, tab, tab2, tab3, tab4, ind);
+}
+
+
+LogicVRegister Simulator::tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& ind) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t j = ind.Uint(vform, i);
+ switch (j >> 4) {
+ case 0: dst.SetUint(vform, i, tab.Uint(kFormat16B, j & 15)); break;
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& ind) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t j = ind.Uint(vform, i);
+ switch (j >> 4) {
+ case 0: dst.SetUint(vform, i, tab.Uint(kFormat16B, j & 15)); break;
+ case 1: dst.SetUint(vform, i, tab2.Uint(kFormat16B, j & 15)); break;
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& ind) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t j = ind.Uint(vform, i);
+ switch (j >> 4) {
+ case 0: dst.SetUint(vform, i, tab.Uint(kFormat16B, j & 15)); break;
+ case 1: dst.SetUint(vform, i, tab2.Uint(kFormat16B, j & 15)); break;
+ case 2: dst.SetUint(vform, i, tab3.Uint(kFormat16B, j & 15)); break;
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& tab4,
+ const LogicVRegister& ind) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t j = ind.Uint(vform, i);
+ switch (j >> 4) {
+ case 0: dst.SetUint(vform, i, tab.Uint(kFormat16B, j & 15)); break;
+ case 1: dst.SetUint(vform, i, tab2.Uint(kFormat16B, j & 15)); break;
+ case 2: dst.SetUint(vform, i, tab3.Uint(kFormat16B, j & 15)); break;
+ case 3: dst.SetUint(vform, i, tab4.Uint(kFormat16B, j & 15)); break;
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::uqshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ return shrn(vform, dst, src, shift).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::uqshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ return shrn2(vform, dst, src, shift).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::uqrshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ return rshrn(vform, dst, src, shift).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::uqrshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ return rshrn2(vform, dst, src, shift).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift);
+ return sqxtn(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift);
+ return sqxtn(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqrshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc);
+ return sqxtn(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqrshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc);
+ return sqxtn(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqshrun(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift);
+ return sqxtun(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqshrun2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift);
+ return sqxtun(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqrshrun(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc);
+ return sqxtun(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqrshrun2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc);
+ return sqxtun(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::uaddl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uaddl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uaddw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ uxtl(vform, temp, src2);
+ add(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uaddw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ uxtl2(vform, temp, src2);
+ add(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::saddl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::saddl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::saddw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sxtl(vform, temp, src2);
+ add(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::saddw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sxtl2(vform, temp, src2);
+ add(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::usubl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ sub(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::usubl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ sub(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::usubw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ uxtl(vform, temp, src2);
+ sub(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::usubw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ uxtl2(vform, temp, src2);
+ sub(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::ssubl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ sub(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::ssubl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ sub(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::ssubw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sxtl(vform, temp, src2);
+ sub(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::ssubw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sxtl2(vform, temp, src2);
+ sub(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uabal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ uaba(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uabal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ uaba(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sabal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ saba(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sabal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ saba(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uabdl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ absdiff(vform, dst, temp1, temp2, false);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uabdl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ absdiff(vform, dst, temp1, temp2, false);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sabdl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ absdiff(vform, dst, temp1, temp2, true);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sabdl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ absdiff(vform, dst, temp1, temp2, true);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ mul(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ mul(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ mul(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ mul(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ mls(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ mls(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ mls(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ mls(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ mla(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ mla(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ mla(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ mla(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sqdmlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = sqdmull(vform, temp, src1, src2);
+ return add(vform, dst, dst, product).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqdmlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = sqdmull2(vform, temp, src1, src2);
+ return add(vform, dst, dst, product).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqdmlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = sqdmull(vform, temp, src1, src2);
+ return sub(vform, dst, dst, product).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqdmlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = sqdmull2(vform, temp, src1, src2);
+ return sub(vform, dst, dst, product).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqdmull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = smull(vform, temp, src1, src2);
+ return add(vform, dst, product, product).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqdmull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = smull2(vform, temp, src1, src2);
+ return add(vform, dst, product, product).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqrdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool round) {
+ // 2 * INT_32_MIN * INT_32_MIN causes int64_t to overflow.
+ // To avoid this, we use (src1 * src2 + 1 << (esize - 2)) >> (esize - 1)
+ // which is same as (2 * src1 * src2 + 1 << (esize - 1)) >> esize.
+
+ int esize = LaneSizeInBitsFromFormat(vform);
+ int round_const = round ? (1 << (esize - 2)) : 0;
+ int64_t product;
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ product = src1.Int(vform, i) * src2.Int(vform, i);
+ product += round_const;
+ product = product >> (esize - 1);
+
+ if (product > MaxIntFromFormat(vform)) {
+ product = MaxIntFromFormat(vform);
+ } else if (product < MinIntFromFormat(vform)) {
+ product = MinIntFromFormat(vform);
+ }
+ dst.SetInt(vform, i, product);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sqdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return sqrdmulh(vform, dst, src1, src2, false);
+}
+
+
+LogicVRegister Simulator::addhn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ add(VectorFormatDoubleWidth(vform), temp, src1, src2);
+ shrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::addhn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ add(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2);
+ shrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::raddhn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ add(VectorFormatDoubleWidth(vform), temp, src1, src2);
+ rshrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::raddhn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ add(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2);
+ rshrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::subhn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sub(VectorFormatDoubleWidth(vform), temp, src1, src2);
+ shrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::subhn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sub(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2);
+ shrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::rsubhn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sub(VectorFormatDoubleWidth(vform), temp, src1, src2);
+ rshrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::rsubhn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sub(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2);
+ rshrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::trn1(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int pairs = laneCount / 2;
+ for (int i = 0; i < pairs; ++i) {
+ result[2 * i] = src1.Uint(vform, 2 * i);
+ result[(2 * i) + 1] = src2.Uint(vform, 2 * i);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::trn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int pairs = laneCount / 2;
+ for (int i = 0; i < pairs; ++i) {
+ result[2 * i] = src1.Uint(vform, (2 * i) + 1);
+ result[(2 * i) + 1] = src2.Uint(vform, (2 * i) + 1);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::zip1(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int pairs = laneCount / 2;
+ for (int i = 0; i < pairs; ++i) {
+ result[2 * i] = src1.Uint(vform, i);
+ result[(2 * i) + 1] = src2.Uint(vform, i);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::zip2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int pairs = laneCount / 2;
+ for (int i = 0; i < pairs; ++i) {
+ result[2 * i] = src1.Uint(vform, pairs + i);
+ result[(2 * i) + 1] = src2.Uint(vform, pairs + i);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::uzp1(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[32];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ result[i] = src1.Uint(vform, i);
+ result[laneCount + i] = src2.Uint(vform, i);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[2 * i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::uzp2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[32];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ result[i] = src1.Uint(vform, i);
+ result[laneCount + i] = src2.Uint(vform, i);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[ (2 * i) + 1]);
+ }
+ return dst;
+}
+
+
+template <typename T>
+T Simulator::FPAdd(T op1, T op2) {
+ T result = FPProcessNaNs(op1, op2);
+ if (std::isnan(result)) return result;
+
+ if (std::isinf(op1) && std::isinf(op2) && (op1 != op2)) {
+ // inf + -inf returns the default NaN.
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 + op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPSub(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ VIXL_ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if (std::isinf(op1) && std::isinf(op2) && (op1 == op2)) {
+ // inf - inf returns the default NaN.
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 - op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMul(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ VIXL_ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) {
+ // inf * 0.0 returns the default NaN.
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 * op2;
+ }
+}
+
+
+template<typename T>
+T Simulator::FPMulx(T op1, T op2) {
+ if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) {
+ // inf * 0.0 returns +/-2.0.
+ T two = 2.0;
+ return copysign(1.0, op1) * copysign(1.0, op2) * two;
+ }
+ return FPMul(op1, op2);
+}
+
+
+template<typename T>
+T Simulator::FPMulAdd(T a, T op1, T op2) {
+ T result = FPProcessNaNs3(a, op1, op2);
+
+ T sign_a = copysign(1.0, a);
+ T sign_prod = copysign(1.0, op1) * copysign(1.0, op2);
+ bool isinf_prod = std::isinf(op1) || std::isinf(op2);
+ bool operation_generates_nan =
+ (std::isinf(op1) && (op2 == 0.0)) || // inf * 0.0
+ (std::isinf(op2) && (op1 == 0.0)) || // 0.0 * inf
+ (std::isinf(a) && isinf_prod && (sign_a != sign_prod)); // inf - inf
+
+ if (std::isnan(result)) {
+ // Generated NaNs override quiet NaNs propagated from a.
+ if (operation_generates_nan && IsQuietNaN(a)) {
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ return result;
+ }
+ }
+
+ // If the operation would produce a NaN, return the default NaN.
+ if (operation_generates_nan) {
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ }
+
+ // Work around broken fma implementations for exact zero results: The sign of
+ // exact 0.0 results is positive unless both a and op1 * op2 are negative.
+ if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) {
+ return ((sign_a < 0) && (sign_prod < 0)) ? -0.0 : 0.0;
+ }
+
+ result = FusedMultiplyAdd(op1, op2, a);
+ VIXL_ASSERT(!std::isnan(result));
+
+ // Work around broken fma implementations for rounded zero results: If a is
+ // 0.0, the sign of the result is the sign of op1 * op2 before rounding.
+ if ((a == 0.0) && (result == 0.0)) {
+ return copysign(0.0, sign_prod);
+ }
+
+ return result;
+}
+
+
+template <typename T>
+T Simulator::FPDiv(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ VIXL_ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if ((std::isinf(op1) && std::isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) {
+ // inf / inf and 0.0 / 0.0 return the default NaN.
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ if (op2 == 0.0) FPProcessException();
+
+ // Other cases should be handled by standard arithmetic.
+ return op1 / op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPSqrt(T op) {
+ if (std::isnan(op)) {
+ return FPProcessNaN(op);
+ } else if (op < 0.0) {
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ return sqrt(op);
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMax(T a, T b) {
+ T result = FPProcessNaNs(a, b);
+ if (std::isnan(result)) return result;
+
+ if ((a == 0.0) && (b == 0.0) &&
+ (copysign(1.0, a) != copysign(1.0, b))) {
+ // a and b are zero, and the sign differs: return +0.0.
+ return 0.0;
+ } else {
+ return (a > b) ? a : b;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMaxNM(T a, T b) {
+ if (IsQuietNaN(a) && !IsQuietNaN(b)) {
+ a = kFP64NegativeInfinity;
+ } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
+ b = kFP64NegativeInfinity;
+ }
+
+ T result = FPProcessNaNs(a, b);
+ return std::isnan(result) ? result : FPMax(a, b);
+}
+
+
+template <typename T>
+T Simulator::FPMin(T a, T b) {
+ T result = FPProcessNaNs(a, b);
+ if (std::isnan(result)) return result;
+
+ if ((a == 0.0) && (b == 0.0) &&
+ (copysign(1.0, a) != copysign(1.0, b))) {
+ // a and b are zero, and the sign differs: return -0.0.
+ return -0.0;
+ } else {
+ return (a < b) ? a : b;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMinNM(T a, T b) {
+ if (IsQuietNaN(a) && !IsQuietNaN(b)) {
+ a = kFP64PositiveInfinity;
+ } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
+ b = kFP64PositiveInfinity;
+ }
+
+ T result = FPProcessNaNs(a, b);
+ return std::isnan(result) ? result : FPMin(a, b);
+}
+
+
+template <typename T>
+T Simulator::FPRecipStepFused(T op1, T op2) {
+ const T two = 2.0;
+ if ((std::isinf(op1) && (op2 == 0.0))
+ || ((op1 == 0.0) && (std::isinf(op2)))) {
+ return two;
+ } else if (std::isinf(op1) || std::isinf(op2)) {
+ // Return +inf if signs match, otherwise -inf.
+ return ((op1 >= 0.0) == (op2 >= 0.0)) ? kFP64PositiveInfinity
+ : kFP64NegativeInfinity;
+ } else {
+ return FusedMultiplyAdd(op1, op2, two);
+ }
+}
+
+
+template <typename T>
+T Simulator::FPRSqrtStepFused(T op1, T op2) {
+ const T one_point_five = 1.5;
+ const T two = 2.0;
+
+ if ((std::isinf(op1) && (op2 == 0.0))
+ || ((op1 == 0.0) && (std::isinf(op2)))) {
+ return one_point_five;
+ } else if (std::isinf(op1) || std::isinf(op2)) {
+ // Return +inf if signs match, otherwise -inf.
+ return ((op1 >= 0.0) == (op2 >= 0.0)) ? kFP64PositiveInfinity
+ : kFP64NegativeInfinity;
+ } else {
+ // The multiply-add-halve operation must be fully fused, so avoid interim
+ // rounding by checking which operand can be losslessly divided by two
+ // before doing the multiply-add.
+ if (std::isnormal(op1 / two)) {
+ return FusedMultiplyAdd(op1 / two, op2, one_point_five);
+ } else if (std::isnormal(op2 / two)) {
+ return FusedMultiplyAdd(op1, op2 / two, one_point_five);
+ } else {
+ // Neither operand is normal after halving: the result is dominated by
+ // the addition term, so just return that.
+ return one_point_five;
+ }
+ }
+}
+
+
+double Simulator::FPRoundInt(double value, FPRounding round_mode) {
+ if ((value == 0.0) || (value == kFP64PositiveInfinity) ||
+ (value == kFP64NegativeInfinity)) {
+ return value;
+ } else if (std::isnan(value)) {
+ return FPProcessNaN(value);
+ }
+
+ double int_result = std::floor(value);
+ double error = value - int_result;
+ switch (round_mode) {
+ case FPTieAway: {
+ // Take care of correctly handling the range ]-0.5, -0.0], which must
+ // yield -0.0.
+ if ((-0.5 < value) && (value < 0.0)) {
+ int_result = -0.0;
+
+ } else if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) {
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is positive, round up.
+ int_result++;
+ }
+ break;
+ }
+ case FPTieEven: {
+ // Take care of correctly handling the range [-0.5, -0.0], which must
+ // yield -0.0.
+ if ((-0.5 <= value) && (value < 0.0)) {
+ int_result = -0.0;
+
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is odd, round up.
+ } else if ((error > 0.5) ||
+ ((error == 0.5) && (std::fmod(int_result, 2) != 0))) {
+ int_result++;
+ }
+ break;
+ }
+ case FPZero: {
+ // If value>0 then we take floor(value)
+ // otherwise, ceil(value).
+ if (value < 0) {
+ int_result = ceil(value);
+ }
+ break;
+ }
+ case FPNegativeInfinity: {
+ // We always use floor(value).
+ break;
+ }
+ case FPPositiveInfinity: {
+ // Take care of correctly handling the range ]-1.0, -0.0], which must
+ // yield -0.0.
+ if ((-1.0 < value) && (value < 0.0)) {
+ int_result = -0.0;
+
+ // If the error is non-zero, round up.
+ } else if (error > 0.0) {
+ int_result++;
+ }
+ break;
+ }
+ default: VIXL_UNIMPLEMENTED();
+ }
+ return int_result;
+}
+
+
+int32_t Simulator::FPToInt32(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kWMaxInt) {
+ return kWMaxInt;
+ } else if (value < kWMinInt) {
+ return kWMinInt;
+ }
+ return std::isnan(value) ? 0 : static_cast<int32_t>(value);
+}
+
+
+int64_t Simulator::FPToInt64(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kXMaxInt) {
+ return kXMaxInt;
+ } else if (value < kXMinInt) {
+ return kXMinInt;
+ }
+ return std::isnan(value) ? 0 : static_cast<int64_t>(value);
+}
+
+
+uint32_t Simulator::FPToUInt32(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kWMaxUInt) {
+ return kWMaxUInt;
+ } else if (value < 0.0) {
+ return 0;
+ }
+ return std::isnan(value) ? 0 : static_cast<uint32_t>(value);
+}
+
+
+uint64_t Simulator::FPToUInt64(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kXMaxUInt) {
+ return kXMaxUInt;
+ } else if (value < 0.0) {
+ return 0;
+ }
+ return std::isnan(value) ? 0 : static_cast<uint64_t>(value);
+}
+
+
+#define DEFINE_NEON_FP_VECTOR_OP(FN, OP, PROCNAN) \
+template <typename T> \
+LogicVRegister Simulator::FN(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2) { \
+ dst.ClearForWrite(vform); \
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) { \
+ T op1 = src1.Float<T>(i); \
+ T op2 = src2.Float<T>(i); \
+ T result; \
+ if (PROCNAN) { \
+ result = FPProcessNaNs(op1, op2); \
+ if (!std::isnan(result)) { \
+ result = OP(op1, op2); \
+ } \
+ } else { \
+ result = OP(op1, op2); \
+ } \
+ dst.SetFloat(i, result); \
+ } \
+ return dst; \
+} \
+ \
+LogicVRegister Simulator::FN(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2) { \
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { \
+ FN<float>(vform, dst, src1, src2); \
+ } else { \
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); \
+ FN<double>(vform, dst, src1, src2); \
+ } \
+ return dst; \
+}
+NEON_FP3SAME_LIST(DEFINE_NEON_FP_VECTOR_OP)
+#undef DEFINE_NEON_FP_VECTOR_OP
+
+
+LogicVRegister Simulator::fnmul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = fmul(vform, temp, src1, src2);
+ return fneg(vform, dst, product);
+}
+
+
+template <typename T>
+LogicVRegister Simulator::frecps(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op1 = -src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T result = FPProcessNaNs(op1, op2);
+ dst.SetFloat(i, std::isnan(result) ? result : FPRecipStepFused(op1, op2));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::frecps(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ frecps<float>(vform, dst, src1, src2);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ frecps<double>(vform, dst, src1, src2);
+ }
+ return dst;
+}
+
+
+template <typename T>
+LogicVRegister Simulator::frsqrts(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op1 = -src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T result = FPProcessNaNs(op1, op2);
+ dst.SetFloat(i, std::isnan(result) ? result : FPRSqrtStepFused(op1, op2));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::frsqrts(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ frsqrts<float>(vform, dst, src1, src2);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ frsqrts<double>(vform, dst, src1, src2);
+ }
+ return dst;
+}
+
+
+template <typename T>
+LogicVRegister Simulator::fcmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ bool result = false;
+ T op1 = src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T nan_result = FPProcessNaNs(op1, op2);
+ if (!std::isnan(nan_result)) {
+ switch (cond) {
+ case eq: result = (op1 == op2); break;
+ case ge: result = (op1 >= op2); break;
+ case gt: result = (op1 > op2) ; break;
+ case le: result = (op1 <= op2); break;
+ case lt: result = (op1 < op2) ; break;
+ default: VIXL_UNREACHABLE(); break;
+ }
+ }
+ dst.SetUint(vform, i, result ? MaxUintFromFormat(vform) : 0);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ fcmp<float>(vform, dst, src1, src2, cond);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ fcmp<double>(vform, dst, src1, src2, cond);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcmp_zero(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ Condition cond) {
+ SimVRegister temp;
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ LogicVRegister zero_reg = dup_immediate(vform, temp, float_to_rawbits(0.0));
+ fcmp<float>(vform, dst, src, zero_reg, cond);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ LogicVRegister zero_reg = dup_immediate(vform, temp,
+ double_to_rawbits(0.0));
+ fcmp<double>(vform, dst, src, zero_reg, cond);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fabscmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond) {
+ SimVRegister temp1, temp2;
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ LogicVRegister abs_src1 = fabs_<float>(vform, temp1, src1);
+ LogicVRegister abs_src2 = fabs_<float>(vform, temp2, src2);
+ fcmp<float>(vform, dst, abs_src1, abs_src2, cond);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ LogicVRegister abs_src1 = fabs_<double>(vform, temp1, src1);
+ LogicVRegister abs_src2 = fabs_<double>(vform, temp2, src2);
+ fcmp<double>(vform, dst, abs_src1, abs_src2, cond);
+ }
+ return dst;
+}
+
+
+template <typename T>
+LogicVRegister Simulator::fmla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op1 = src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T acc = dst.Float<T>(i);
+ T result = FPMulAdd(acc, op1, op2);
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ fmla<float>(vform, dst, src1, src2);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ fmla<double>(vform, dst, src1, src2);
+ }
+ return dst;
+}
+
+
+template <typename T>
+LogicVRegister Simulator::fmls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op1 = -src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T acc = dst.Float<T>(i);
+ T result = FPMulAdd(acc, op1, op2);
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ fmls<float>(vform, dst, src1, src2);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ fmls<double>(vform, dst, src1, src2);
+ }
+ return dst;
+}
+
+
+template <typename T>
+LogicVRegister Simulator::fneg(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op = src.Float<T>(i);
+ op = -op;
+ dst.SetFloat(i, op);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fneg(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ fneg<float>(vform, dst, src);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ fneg<double>(vform, dst, src);
+ }
+ return dst;
+}
+
+
+template <typename T>
+LogicVRegister Simulator::fabs_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op = src.Float<T>(i);
+ if (copysign(1.0, op) < 0.0) {
+ op = -op;
+ }
+ dst.SetFloat(i, op);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fabs_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ fabs_<float>(vform, dst, src);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ fabs_<double>(vform, dst, src);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fabd(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ fsub(vform, temp, src1, src2);
+ fabs_(vform, dst, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::fsqrt(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float result = FPSqrt(src.Float<float>(i));
+ dst.SetFloat(i, result);
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double result = FPSqrt(src.Float<double>(i));
+ dst.SetFloat(i, result);
+ }
+ }
+ return dst;
+}
+
+
+#define DEFINE_NEON_FP_PAIR_OP(FNP, FN, OP) \
+LogicVRegister Simulator::FNP(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2) { \
+ SimVRegister temp1, temp2; \
+ uzp1(vform, temp1, src1, src2); \
+ uzp2(vform, temp2, src1, src2); \
+ FN(vform, dst, temp1, temp2); \
+ return dst; \
+} \
+ \
+LogicVRegister Simulator::FNP(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src) { \
+ if (vform == kFormatS) { \
+ float result = OP(src.Float<float>(0), src.Float<float>(1)); \
+ dst.SetFloat(0, result); \
+ } else { \
+ VIXL_ASSERT(vform == kFormatD); \
+ double result = OP(src.Float<double>(0), src.Float<double>(1)); \
+ dst.SetFloat(0, result); \
+ } \
+ dst.ClearForWrite(vform); \
+ return dst; \
+}
+NEON_FPPAIRWISE_LIST(DEFINE_NEON_FP_PAIR_OP)
+#undef DEFINE_NEON_FP_PAIR_OP
+
+
+LogicVRegister Simulator::fminmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPMinMaxOp Op) {
+ VIXL_ASSERT(vform == kFormat4S);
+ USE(vform);
+ float result1 = (this->*Op)(src.Float<float>(0), src.Float<float>(1));
+ float result2 = (this->*Op)(src.Float<float>(2), src.Float<float>(3));
+ float result = (this->*Op)(result1, result2);
+ dst.ClearForWrite(kFormatS);
+ dst.SetFloat<float>(0, result);
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return fminmaxv(vform, dst, src, &Simulator::FPMax);
+}
+
+
+LogicVRegister Simulator::fminv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return fminmaxv(vform, dst, src, &Simulator::FPMin);
+}
+
+
+LogicVRegister Simulator::fmaxnmv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return fminmaxv(vform, dst, src, &Simulator::FPMaxNM);
+}
+
+
+LogicVRegister Simulator::fminnmv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return fminmaxv(vform, dst, src, &Simulator::FPMinNM);
+}
+
+
+LogicVRegister Simulator::fmul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ dst.ClearForWrite(vform);
+ SimVRegister temp;
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index);
+ fmul<float>(vform, dst, src1, index_reg);
+
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index);
+ fmul<double>(vform, dst, src1, index_reg);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ dst.ClearForWrite(vform);
+ SimVRegister temp;
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index);
+ fmla<float>(vform, dst, src1, index_reg);
+
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index);
+ fmla<double>(vform, dst, src1, index_reg);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ dst.ClearForWrite(vform);
+ SimVRegister temp;
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index);
+ fmls<float>(vform, dst, src1, index_reg);
+
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index);
+ fmls<double>(vform, dst, src1, index_reg);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmulx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ dst.ClearForWrite(vform);
+ SimVRegister temp;
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index);
+ fmulx<float>(vform, dst, src1, index_reg);
+
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index);
+ fmulx<double>(vform, dst, src1, index_reg);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::frint(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode,
+ bool inexact_exception) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float input = src.Float<float>(i);
+ float rounded = FPRoundInt(input, rounding_mode);
+ if (inexact_exception && !std::isnan(input) && (input != rounded)) {
+ FPProcessException();
+ }
+ dst.SetFloat<float>(i, rounded);
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double input = src.Float<double>(i);
+ double rounded = FPRoundInt(input, rounding_mode);
+ if (inexact_exception && !std::isnan(input) && (input != rounded)) {
+ FPProcessException();
+ }
+ dst.SetFloat<double>(i, rounded);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvts(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode,
+ int fbits) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float op = src.Float<float>(i) * std::pow(2.0f, fbits);
+ dst.SetInt(vform, i, FPToInt32(op, rounding_mode));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double op = src.Float<double>(i) * std::pow(2.0, fbits);
+ dst.SetInt(vform, i, FPToInt64(op, rounding_mode));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtu(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode,
+ int fbits) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float op = src.Float<float>(i) * std::pow(2.0f, fbits);
+ dst.SetUint(vform, i, FPToUInt32(op, rounding_mode));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double op = src.Float<double>(i) * std::pow(2.0, fbits);
+ dst.SetUint(vform, i, FPToUInt64(op, rounding_mode));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = LaneCountFromFormat(vform) - 1; i >= 0; i--) {
+ dst.SetFloat(i, FPToFloat(src.Float<float16>(i)));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = LaneCountFromFormat(vform) - 1; i >= 0; i--) {
+ dst.SetFloat(i, FPToDouble(src.Float<float>(i)));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ int lane_count = LaneCountFromFormat(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < lane_count; i++) {
+ dst.SetFloat(i, FPToFloat(src.Float<float16>(i + lane_count)));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < lane_count; i++) {
+ dst.SetFloat(i, FPToDouble(src.Float<float>(i + lane_count)));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ if (LaneSizeInBitsFromFormat(vform) == kHRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetFloat(i, FPToFloat16(src.Float<float>(i), FPTieEven));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetFloat(i, FPToFloat(src.Float<double>(i), FPTieEven));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ int lane_count = LaneCountFromFormat(vform) / 2;
+ if (LaneSizeInBitsFromFormat(vform) == kHRegSize) {
+ for (int i = lane_count - 1; i >= 0; i--) {
+ dst.SetFloat(i + lane_count, FPToFloat16(src.Float<float>(i), FPTieEven));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ for (int i = lane_count - 1; i >= 0; i--) {
+ dst.SetFloat(i + lane_count, FPToFloat(src.Float<double>(i), FPTieEven));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtxn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetFloat(i, FPToFloat(src.Float<double>(i), FPRoundOdd));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtxn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ int lane_count = LaneCountFromFormat(vform) / 2;
+ for (int i = lane_count - 1; i >= 0; i--) {
+ dst.SetFloat(i + lane_count, FPToFloat(src.Float<double>(i), FPRoundOdd));
+ }
+ return dst;
+}
+
+
+// Based on reference C function recip_sqrt_estimate from ARM ARM.
+double Simulator::recip_sqrt_estimate(double a) {
+ int q0, q1, s;
+ double r;
+ if (a < 0.5) {
+ q0 = static_cast<int>(a * 512.0);
+ r = 1.0 / sqrt((static_cast<double>(q0) + 0.5) / 512.0);
+ } else {
+ q1 = static_cast<int>(a * 256.0);
+ r = 1.0 / sqrt((static_cast<double>(q1) + 0.5) / 256.0);
+ }
+ s = static_cast<int>(256.0 * r + 0.5);
+ return static_cast<double>(s) / 256.0;
+}
+
+
+static inline uint64_t Bits(uint64_t val, int start_bit, int end_bit) {
+ return unsigned_bitextract_64(start_bit, end_bit, val);
+}
+
+
+template <typename T>
+T Simulator::FPRecipSqrtEstimate(T op) {
+ if (std::isnan(op)) {
+ return FPProcessNaN(op);
+ } else if (op == 0.0) {
+ if (copysign(1.0, op) < 0.0) {
+ return kFP64NegativeInfinity;
+ } else {
+ return kFP64PositiveInfinity;
+ }
+ } else if (copysign(1.0, op) < 0.0) {
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else if (std::isinf(op)) {
+ return 0.0;
+ } else {
+ uint64_t fraction;
+ int exp, result_exp;
+
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ exp = float_exp(op);
+ fraction = float_mantissa(op);
+ fraction <<= 29;
+ } else {
+ exp = double_exp(op);
+ fraction = double_mantissa(op);
+ }
+
+ if (exp == 0) {
+ while (Bits(fraction, 51, 51) == 0) {
+ fraction = Bits(fraction, 50, 0) << 1;
+ exp -= 1;
+ }
+ fraction = Bits(fraction, 50, 0) << 1;
+ }
+
+ double scaled;
+ if (Bits(exp, 0, 0) == 0) {
+ scaled = double_pack(0, 1022, Bits(fraction, 51, 44) << 44);
+ } else {
+ scaled = double_pack(0, 1021, Bits(fraction, 51, 44) << 44);
+ }
+
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ result_exp = (380 - exp) / 2;
+ } else {
+ result_exp = (3068 - exp) / 2;
+ }
+
+ uint64_t estimate = double_to_rawbits(recip_sqrt_estimate(scaled));
+
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ uint32_t exp_bits = static_cast<uint32_t>(Bits(result_exp, 7, 0));
+ uint32_t est_bits = static_cast<uint32_t>(Bits(estimate, 51, 29));
+ return float_pack(0, exp_bits, est_bits);
+ } else {
+ return double_pack(0, Bits(result_exp, 10, 0), Bits(estimate, 51, 0));
+ }
+ }
+}
+
+
+LogicVRegister Simulator::frsqrte(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float input = src.Float<float>(i);
+ dst.SetFloat(i, FPRecipSqrtEstimate<float>(input));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double input = src.Float<double>(i);
+ dst.SetFloat(i, FPRecipSqrtEstimate<double>(input));
+ }
+ }
+ return dst;
+}
+
+template <typename T>
+T Simulator::FPRecipEstimate(T op, FPRounding rounding) {
+ uint32_t sign;
+
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ sign = float_sign(op);
+ } else {
+ sign = double_sign(op);
+ }
+
+ if (std::isnan(op)) {
+ return FPProcessNaN(op);
+ } else if (std::isinf(op)) {
+ return (sign == 1) ? -0.0 : 0.0;
+ } else if (op == 0.0) {
+ FPProcessException(); // FPExc_DivideByZero exception.
+ return (sign == 1) ? kFP64NegativeInfinity : kFP64PositiveInfinity;
+ } else if (((sizeof(T) == sizeof(float)) && // NOLINT(runtime/sizeof)
+ (std::fabs(op) < std::pow(2.0, -128.0))) ||
+ ((sizeof(T) == sizeof(double)) && // NOLINT(runtime/sizeof)
+ (std::fabs(op) < std::pow(2.0, -1024.0)))) {
+ bool overflow_to_inf = false;
+ switch (rounding) {
+ case FPTieEven: overflow_to_inf = true; break;
+ case FPPositiveInfinity: overflow_to_inf = (sign == 0); break;
+ case FPNegativeInfinity: overflow_to_inf = (sign == 1); break;
+ case FPZero: overflow_to_inf = false; break;
+ default: break;
+ }
+ FPProcessException(); // FPExc_Overflow and FPExc_Inexact.
+ if (overflow_to_inf) {
+ return (sign == 1) ? kFP64NegativeInfinity : kFP64PositiveInfinity;
+ } else {
+ // Return FPMaxNormal(sign).
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ return float_pack(sign, 0xfe, 0x07fffff);
+ } else {
+ return double_pack(sign, 0x7fe, 0x0fffffffffffffl);
+ }
+ }
+ } else {
+ uint64_t fraction;
+ int exp, result_exp;
+ uint32_t sign;
+
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ sign = float_sign(op);
+ exp = float_exp(op);
+ fraction = float_mantissa(op);
+ fraction <<= 29;
+ } else {
+ sign = double_sign(op);
+ exp = double_exp(op);
+ fraction = double_mantissa(op);
+ }
+
+ if (exp == 0) {
+ if (Bits(fraction, 51, 51) == 0) {
+ exp -= 1;
+ fraction = Bits(fraction, 49, 0) << 2;
+ } else {
+ fraction = Bits(fraction, 50, 0) << 1;
+ }
+ }
+
+ double scaled = double_pack(0, 1022, Bits(fraction, 51, 44) << 44);
+
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ result_exp = (253 - exp); // In range 253-254 = -1 to 253+1 = 254.
+ } else {
+ result_exp = (2045 - exp); // In range 2045-2046 = -1 to 2045+1 = 2046.
+ }
+
+ double estimate = recip_estimate(scaled);
+
+ fraction = double_mantissa(estimate);
+ if (result_exp == 0) {
+ fraction = (UINT64_C(1) << 51) | Bits(fraction, 51, 1);
+ } else if (result_exp == -1) {
+ fraction = (UINT64_C(1) << 50) | Bits(fraction, 51, 2);
+ result_exp = 0;
+ }
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ uint32_t exp_bits = static_cast<uint32_t>(Bits(result_exp, 7, 0));
+ uint32_t frac_bits = static_cast<uint32_t>(Bits(fraction, 51, 29));
+ return float_pack(sign, exp_bits, frac_bits);
+ } else {
+ return double_pack(sign, Bits(result_exp, 10, 0), Bits(fraction, 51, 0));
+ }
+ }
+}
+
+
+LogicVRegister Simulator::frecpe(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding round) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float input = src.Float<float>(i);
+ dst.SetFloat(i, FPRecipEstimate<float>(input, round));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double input = src.Float<double>(i);
+ dst.SetFloat(i, FPRecipEstimate<double>(input, round));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::ursqrte(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ uint64_t operand;
+ uint32_t result;
+ double dp_operand, dp_result;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ operand = src.Uint(vform, i);
+ if (operand <= 0x3FFFFFFF) {
+ result = 0xFFFFFFFF;
+ } else {
+ dp_operand = operand * std::pow(2.0, -32);
+ dp_result = recip_sqrt_estimate(dp_operand) * std::pow(2.0, 31);
+ result = static_cast<uint32_t>(dp_result);
+ }
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+
+// Based on reference C function recip_estimate from ARM ARM.
+double Simulator::recip_estimate(double a) {
+ int q, s;
+ double r;
+ q = static_cast<int>(a * 512.0);
+ r = 1.0 / ((static_cast<double>(q) + 0.5) / 512.0);
+ s = static_cast<int>(256.0 * r + 0.5);
+ return static_cast<double>(s) / 256.0;
+}
+
+
+LogicVRegister Simulator::urecpe(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ uint64_t operand;
+ uint32_t result;
+ double dp_operand, dp_result;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ operand = src.Uint(vform, i);
+ if (operand <= 0x7FFFFFFF) {
+ result = 0xFFFFFFFF;
+ } else {
+ dp_operand = operand * std::pow(2.0, -32);
+ dp_result = recip_estimate(dp_operand) * std::pow(2.0, 31);
+ result = static_cast<uint32_t>(dp_result);
+ }
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+template <typename T>
+LogicVRegister Simulator::frecpx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op = src.Float<T>(i);
+ T result;
+ if (std::isnan(op)) {
+ result = FPProcessNaN(op);
+ } else {
+ int exp;
+ uint32_t sign;
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ sign = float_sign(op);
+ exp = float_exp(op);
+ exp = (exp == 0) ? (0xFF - 1) : static_cast<int>(Bits(~exp, 7, 0));
+ result = float_pack(sign, exp, 0);
+ } else {
+ sign = double_sign(op);
+ exp = double_exp(op);
+ exp = (exp == 0) ? (0x7FF - 1) : static_cast<int>(Bits(~exp, 10, 0));
+ result = double_pack(sign, exp, 0);
+ }
+ }
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::frecpx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ frecpx<float>(vform, dst, src);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ frecpx<double>(vform, dst, src);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::scvtf(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int fbits,
+ FPRounding round) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ float result = FixedToFloat(src.Int(kFormatS, i), fbits, round);
+ dst.SetFloat<float>(i, result);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ double result = FixedToDouble(src.Int(kFormatD, i), fbits, round);
+ dst.SetFloat<double>(i, result);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::ucvtf(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int fbits,
+ FPRounding round) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ float result = UFixedToFloat(src.Uint(kFormatS, i), fbits, round);
+ dst.SetFloat<float>(i, result);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ double result = UFixedToDouble(src.Uint(kFormatD, i), fbits, round);
+ dst.SetFloat<double>(i, result);
+ }
+ }
+ return dst;
+}
+
+
+} // namespace vixl
+
+#endif // JS_SIMULATOR_ARM64
diff --git a/js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp b/js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp
new file mode 100644
index 000000000..02c62ecdb
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp
@@ -0,0 +1,2007 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/MacroAssembler-vixl.h"
+
+#include <ctype.h>
+
+namespace vixl {
+
+MacroAssembler::MacroAssembler()
+ : js::jit::Assembler(),
+ sp_(x28),
+ tmp_list_(ip0, ip1),
+ fptmp_list_(d31)
+{
+}
+
+
+void MacroAssembler::FinalizeCode() {
+ Assembler::FinalizeCode();
+}
+
+
+int MacroAssembler::MoveImmediateHelper(MacroAssembler* masm,
+ const Register &rd,
+ uint64_t imm) {
+ bool emit_code = (masm != NULL);
+ VIXL_ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
+ // The worst case for size is mov 64-bit immediate to sp:
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction to move to sp
+ MacroEmissionCheckScope guard(masm);
+
+ // Immediates on Aarch64 can be produced using an initial value, and zero to
+ // three move keep operations.
+ //
+ // Initial values can be generated with:
+ // 1. 64-bit move zero (movz).
+ // 2. 32-bit move inverted (movn).
+ // 3. 64-bit move inverted.
+ // 4. 32-bit orr immediate.
+ // 5. 64-bit orr immediate.
+ // Move-keep may then be used to modify each of the 16-bit half words.
+ //
+ // The code below supports all five initial value generators, and
+ // applying move-keep operations to move-zero and move-inverted initial
+ // values.
+
+ // Try to move the immediate in one instruction, and if that fails, switch to
+ // using multiple instructions.
+ if (OneInstrMoveImmediateHelper(masm, rd, imm)) {
+ return 1;
+ } else {
+ int instruction_count = 0;
+ unsigned reg_size = rd.size();
+
+ // Generic immediate case. Imm will be represented by
+ // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
+ // A move-zero or move-inverted is generated for the first non-zero or
+ // non-0xffff immX, and a move-keep for subsequent non-zero immX.
+
+ uint64_t ignored_halfword = 0;
+ bool invert_move = false;
+ // If the number of 0xffff halfwords is greater than the number of 0x0000
+ // halfwords, it's more efficient to use move-inverted.
+ if (CountClearHalfWords(~imm, reg_size) >
+ CountClearHalfWords(imm, reg_size)) {
+ ignored_halfword = 0xffff;
+ invert_move = true;
+ }
+
+ // Mov instructions can't move values into the stack pointer, so set up a
+ // temporary register, if needed.
+ UseScratchRegisterScope temps;
+ Register temp;
+ if (emit_code) {
+ temps.Open(masm);
+ temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
+ }
+
+ // Iterate through the halfwords. Use movn/movz for the first non-ignored
+ // halfword, and movk for subsequent halfwords.
+ VIXL_ASSERT((reg_size % 16) == 0);
+ bool first_mov_done = false;
+ for (unsigned i = 0; i < (temp.size() / 16); i++) {
+ uint64_t imm16 = (imm >> (16 * i)) & 0xffff;
+ if (imm16 != ignored_halfword) {
+ if (!first_mov_done) {
+ if (invert_move) {
+ if (emit_code) masm->movn(temp, ~imm16 & 0xffff, 16 * i);
+ instruction_count++;
+ } else {
+ if (emit_code) masm->movz(temp, imm16, 16 * i);
+ instruction_count++;
+ }
+ first_mov_done = true;
+ } else {
+ // Construct a wider constant.
+ if (emit_code) masm->movk(temp, imm16, 16 * i);
+ instruction_count++;
+ }
+ }
+ }
+
+ VIXL_ASSERT(first_mov_done);
+
+ // Move the temporary if the original destination register was the stack
+ // pointer.
+ if (rd.IsSP()) {
+ if (emit_code) masm->mov(rd, temp);
+ instruction_count++;
+ }
+ return instruction_count;
+ }
+}
+
+
+bool MacroAssembler::OneInstrMoveImmediateHelper(MacroAssembler* masm,
+ const Register& dst,
+ int64_t imm) {
+ bool emit_code = masm != NULL;
+ unsigned n, imm_s, imm_r;
+ int reg_size = dst.size();
+
+ if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
+ // Immediate can be represented in a move zero instruction. Movz can't write
+ // to the stack pointer.
+ if (emit_code) {
+ masm->movz(dst, imm);
+ }
+ return true;
+ } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
+ // Immediate can be represented in a move negative instruction. Movn can't
+ // write to the stack pointer.
+ if (emit_code) {
+ masm->movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
+ }
+ return true;
+ } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be represented in a logical orr instruction.
+ VIXL_ASSERT(!dst.IsZero());
+ if (emit_code) {
+ masm->LogicalImmediate(
+ dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
+ }
+ return true;
+ }
+ return false;
+}
+
+
+void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
+ VIXL_ASSERT((reg.Is(NoReg) || (type >= kBranchTypeFirstUsingReg)) &&
+ ((bit == -1) || (type >= kBranchTypeFirstUsingBit)));
+ if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
+ B(static_cast<Condition>(type), label);
+ } else {
+ switch (type) {
+ case always: B(label); break;
+ case never: break;
+ case reg_zero: Cbz(reg, label); break;
+ case reg_not_zero: Cbnz(reg, label); break;
+ case reg_bit_clear: Tbz(reg, bit, label); break;
+ case reg_bit_set: Tbnz(reg, bit, label); break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ }
+}
+
+
+void MacroAssembler::B(Label* label) {
+ SingleEmissionCheckScope guard(this);
+ b(label);
+}
+
+
+void MacroAssembler::B(Label* label, Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ EmissionCheckScope guard(this, 2 * kInstructionSize);
+
+ if (label->bound() && LabelIsOutOfRange(label, CondBranchType)) {
+ Label done;
+ b(&done, InvertCondition(cond));
+ b(label);
+ bind(&done);
+ } else {
+ // TODO: Need to register a slot in a literal pool, so that we can
+ // write a branch instruction there and use that to branch in case
+ // the unbound label winds up being out of range.
+ b(label, cond);
+ }
+}
+
+
+void MacroAssembler::Cbnz(const Register& rt, Label* label) {
+ VIXL_ASSERT(!rt.IsZero());
+ EmissionCheckScope guard(this, 2 * kInstructionSize);
+
+ if (label->bound() && LabelIsOutOfRange(label, CondBranchType)) {
+ Label done;
+ cbz(rt, &done);
+ b(label);
+ bind(&done);
+ } else {
+ // TODO: Need to register a slot in a literal pool, so that we can
+ // write a branch instruction there and use that to branch in case
+ // the unbound label winds up being out of range.
+ cbnz(rt, label);
+ }
+}
+
+
+void MacroAssembler::Cbz(const Register& rt, Label* label) {
+ VIXL_ASSERT(!rt.IsZero());
+ EmissionCheckScope guard(this, 2 * kInstructionSize);
+
+ if (label->bound() && LabelIsOutOfRange(label, CondBranchType)) {
+ Label done;
+ cbnz(rt, &done);
+ b(label);
+ bind(&done);
+ } else {
+ // TODO: Nede to register a slot in a literal pool, so that we can
+ // write a branch instruction there and use that to branch in case
+ // the unbound label winds up being out of range.
+ cbz(rt, label);
+ }
+}
+
+
+void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
+ VIXL_ASSERT(!rt.IsZero());
+ EmissionCheckScope guard(this, 2 * kInstructionSize);
+
+ if (label->bound() && LabelIsOutOfRange(label, TestBranchType)) {
+ Label done;
+ tbz(rt, bit_pos, &done);
+ b(label);
+ bind(&done);
+ } else {
+ // TODO: Nede to register a slot in a literal pool, so that we can
+ // write a branch instruction there and use that to branch in case
+ // the unbound label winds up being out of range.
+ tbnz(rt, bit_pos, label);
+ }
+}
+
+
+void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
+ VIXL_ASSERT(!rt.IsZero());
+ EmissionCheckScope guard(this, 2 * kInstructionSize);
+
+ if (label->bound() && LabelIsOutOfRange(label, TestBranchType)) {
+ Label done;
+ tbnz(rt, bit_pos, &done);
+ b(label);
+ bind(&done);
+ } else {
+ // TODO: Nede to register a slot in a literal pool, so that we can
+ // write a branch instruction there and use that to branch in case
+ // the unbound label winds up being out of range.
+ tbz(rt, bit_pos, label);
+ }
+}
+
+
+void MacroAssembler::And(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, AND);
+}
+
+
+void MacroAssembler::Ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, ANDS);
+}
+
+
+void MacroAssembler::Tst(const Register& rn,
+ const Operand& operand) {
+ Ands(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void MacroAssembler::Bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, BIC);
+}
+
+
+void MacroAssembler::Bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, BICS);
+}
+
+
+void MacroAssembler::Orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, ORR);
+}
+
+
+void MacroAssembler::Orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, ORN);
+}
+
+
+void MacroAssembler::Eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, EOR);
+}
+
+
+void MacroAssembler::Eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, EON);
+}
+
+
+void MacroAssembler::LogicalMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op) {
+ // The worst case for size is logical immediate to sp:
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction to do the operation
+ // * 1 instruction to move to sp
+ MacroEmissionCheckScope guard(this);
+ UseScratchRegisterScope temps(this);
+
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ unsigned reg_size = rd.size();
+
+ // If the operation is NOT, invert the operation and immediate.
+ if ((op & NOT) == NOT) {
+ op = static_cast<LogicalOp>(op & ~NOT);
+ immediate = ~immediate;
+ }
+
+ // Ignore the top 32 bits of an immediate if we're moving to a W register.
+ if (rd.Is32Bits()) {
+ // Check that the top 32 bits are consistent.
+ VIXL_ASSERT(((immediate >> kWRegSize) == 0) ||
+ ((immediate >> kWRegSize) == -1));
+ immediate &= kWRegMask;
+ }
+
+ VIXL_ASSERT(rd.Is64Bits() || is_uint32(immediate));
+
+ // Special cases for all set or all clear immediates.
+ if (immediate == 0) {
+ switch (op) {
+ case AND:
+ Mov(rd, 0);
+ return;
+ case ORR:
+ VIXL_FALLTHROUGH();
+ case EOR:
+ Mov(rd, rn);
+ return;
+ case ANDS:
+ VIXL_FALLTHROUGH();
+ case BICS:
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ } else if ((rd.Is64Bits() && (immediate == -1)) ||
+ (rd.Is32Bits() && (immediate == 0xffffffff))) {
+ switch (op) {
+ case AND:
+ Mov(rd, rn);
+ return;
+ case ORR:
+ Mov(rd, immediate);
+ return;
+ case EOR:
+ Mvn(rd, rn);
+ return;
+ case ANDS:
+ VIXL_FALLTHROUGH();
+ case BICS:
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ }
+
+ unsigned n, imm_s, imm_r;
+ if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be encoded in the instruction.
+ LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
+ } else {
+ // Immediate can't be encoded: synthesize using move immediate.
+ Register temp = temps.AcquireSameSizeAs(rn);
+ Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
+
+ // VIXL can acquire temp registers. Assert that the caller is aware.
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn));
+ VIXL_ASSERT(!temp.Is(operand.maybeReg()));
+
+ if (rd.Is(sp)) {
+ // If rd is the stack pointer we cannot use it as the destination
+ // register so we use the temp register as an intermediate again.
+ Logical(temp, rn, imm_operand, op);
+ Mov(sp, temp);
+ } else {
+ Logical(rd, rn, imm_operand, op);
+ }
+ }
+ } else if (operand.IsExtendedRegister()) {
+ VIXL_ASSERT(operand.reg().size() <= rd.size());
+ // Add/sub extended supports shift <= 4. We want to support exactly the
+ // same modes here.
+ VIXL_ASSERT(operand.shift_amount() <= 4);
+ VIXL_ASSERT(operand.reg().Is64Bits() ||
+ ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
+
+ temps.Exclude(operand.reg());
+ Register temp = temps.AcquireSameSizeAs(rn);
+
+ // VIXL can acquire temp registers. Assert that the caller is aware.
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn));
+ VIXL_ASSERT(!temp.Is(operand.maybeReg()));
+
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ Logical(rd, rn, Operand(temp), op);
+ } else {
+ // The operand can be encoded in the instruction.
+ VIXL_ASSERT(operand.IsShiftedRegister());
+ Logical(rd, rn, operand, op);
+ }
+}
+
+
+void MacroAssembler::Mov(const Register& rd,
+ const Operand& operand,
+ DiscardMoveMode discard_mode) {
+ // The worst case for size is mov immediate with up to 4 instructions.
+ MacroEmissionCheckScope guard(this);
+
+ if (operand.IsImmediate()) {
+ // Call the macro assembler for generic immediates.
+ Mov(rd, operand.immediate());
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
+ // Emit a shift instruction if moving a shifted register. This operation
+ // could also be achieved using an orr instruction (like orn used by Mvn),
+ // but using a shift instruction makes the disassembly clearer.
+ EmitShift(rd, operand.reg(), operand.shift(), operand.shift_amount());
+ } else if (operand.IsExtendedRegister()) {
+ // Emit an extend instruction if moving an extended register. This handles
+ // extend with post-shift operations, too.
+ EmitExtendShift(rd, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ } else {
+ // Otherwise, emit a register move only if the registers are distinct, or
+ // if they are not X registers.
+ //
+ // Note that mov(w0, w0) is not a no-op because it clears the top word of
+ // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
+ // registers is not required to clear the top word of the X register. In
+ // this case, the instruction is discarded.
+ //
+ // If the sp is an operand, add #0 is emitted, otherwise, orr #0.
+ if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
+ (discard_mode == kDontDiscardForSameWReg))) {
+ mov(rd, operand.reg());
+ }
+ }
+}
+
+
+void MacroAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
+ VIXL_ASSERT(is_uint16(imm));
+ int byte1 = (imm & 0xff);
+ int byte2 = ((imm >> 8) & 0xff);
+ if (byte1 == byte2) {
+ movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1);
+ } else if (byte1 == 0) {
+ movi(vd, byte2, LSL, 8);
+ } else if (byte2 == 0) {
+ movi(vd, byte1);
+ } else if (byte1 == 0xff) {
+ mvni(vd, ~byte2 & 0xff, LSL, 8);
+ } else if (byte2 == 0xff) {
+ mvni(vd, ~byte1 & 0xff);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireW();
+ movz(temp, imm);
+ dup(vd, temp);
+ }
+}
+
+
+void MacroAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
+ VIXL_ASSERT(is_uint32(imm));
+
+ uint8_t bytes[sizeof(imm)];
+ memcpy(bytes, &imm, sizeof(imm));
+
+ // All bytes are either 0x00 or 0xff.
+ {
+ bool all0orff = true;
+ for (int i = 0; i < 4; ++i) {
+ if ((bytes[i] != 0) && (bytes[i] != 0xff)) {
+ all0orff = false;
+ break;
+ }
+ }
+
+ if (all0orff == true) {
+ movi(vd.Is64Bits() ? vd.V1D() : vd.V2D(), ((imm << 32) | imm));
+ return;
+ }
+ }
+
+ // Of the 4 bytes, only one byte is non-zero.
+ for (int i = 0; i < 4; i++) {
+ if ((imm & (0xff << (i * 8))) == imm) {
+ movi(vd, bytes[i], LSL, i * 8);
+ return;
+ }
+ }
+
+ // Of the 4 bytes, only one byte is not 0xff.
+ for (int i = 0; i < 4; i++) {
+ uint32_t mask = ~(0xff << (i * 8));
+ if ((imm & mask) == mask) {
+ mvni(vd, ~bytes[i] & 0xff, LSL, i * 8);
+ return;
+ }
+ }
+
+ // Immediate is of the form 0x00MMFFFF.
+ if ((imm & 0xff00ffff) == 0x0000ffff) {
+ movi(vd, bytes[2], MSL, 16);
+ return;
+ }
+
+ // Immediate is of the form 0x0000MMFF.
+ if ((imm & 0xffff00ff) == 0x000000ff) {
+ movi(vd, bytes[1], MSL, 8);
+ return;
+ }
+
+ // Immediate is of the form 0xFFMM0000.
+ if ((imm & 0xff00ffff) == 0xff000000) {
+ mvni(vd, ~bytes[2] & 0xff, MSL, 16);
+ return;
+ }
+ // Immediate is of the form 0xFFFFMM00.
+ if ((imm & 0xffff00ff) == 0xffff0000) {
+ mvni(vd, ~bytes[1] & 0xff, MSL, 8);
+ return;
+ }
+
+ // Top and bottom 16-bits are equal.
+ if (((imm >> 16) & 0xffff) == (imm & 0xffff)) {
+ Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xffff);
+ return;
+ }
+
+ // Default case.
+ {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireW();
+ Mov(temp, imm);
+ dup(vd, temp);
+ }
+}
+
+
+void MacroAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
+ // All bytes are either 0x00 or 0xff.
+ {
+ bool all0orff = true;
+ for (int i = 0; i < 8; ++i) {
+ int byteval = (imm >> (i * 8)) & 0xff;
+ if (byteval != 0 && byteval != 0xff) {
+ all0orff = false;
+ break;
+ }
+ }
+ if (all0orff == true) {
+ movi(vd, imm);
+ return;
+ }
+ }
+
+ // Top and bottom 32-bits are equal.
+ if (((imm >> 32) & 0xffffffff) == (imm & 0xffffffff)) {
+ Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xffffffff);
+ return;
+ }
+
+ // Default case.
+ {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Mov(temp, imm);
+ if (vd.Is1D()) {
+ mov(vd.D(), 0, temp);
+ } else {
+ dup(vd.V2D(), temp);
+ }
+ }
+}
+
+
+void MacroAssembler::Movi(const VRegister& vd,
+ uint64_t imm,
+ Shift shift,
+ int shift_amount) {
+ MacroEmissionCheckScope guard(this);
+ if (shift_amount != 0 || shift != LSL) {
+ movi(vd, imm, shift, shift_amount);
+ } else if (vd.Is8B() || vd.Is16B()) {
+ // 8-bit immediate.
+ VIXL_ASSERT(is_uint8(imm));
+ movi(vd, imm);
+ } else if (vd.Is4H() || vd.Is8H()) {
+ // 16-bit immediate.
+ Movi16bitHelper(vd, imm);
+ } else if (vd.Is2S() || vd.Is4S()) {
+ // 32-bit immediate.
+ Movi32bitHelper(vd, imm);
+ } else {
+ // 64-bit immediate.
+ Movi64bitHelper(vd, imm);
+ }
+}
+
+
+void MacroAssembler::Movi(const VRegister& vd,
+ uint64_t hi,
+ uint64_t lo) {
+ // TODO: Move 128-bit values in a more efficient way.
+ VIXL_ASSERT(vd.Is128Bits());
+ UseScratchRegisterScope temps(this);
+ Movi(vd.V2D(), lo);
+ Register temp = temps.AcquireX();
+ Mov(temp, hi);
+ Ins(vd.V2D(), 1, temp);
+}
+
+
+void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
+ // The worst case for size is mvn immediate with up to 4 instructions.
+ MacroEmissionCheckScope guard(this);
+
+ if (operand.IsImmediate()) {
+ // Call the macro assembler for generic immediates.
+ Mvn(rd, operand.immediate());
+ } else if (operand.IsExtendedRegister()) {
+ UseScratchRegisterScope temps(this);
+ temps.Exclude(operand.reg());
+
+ // Emit two instructions for the extend case. This differs from Mov, as
+ // the extend and invert can't be achieved in one instruction.
+ Register temp = temps.AcquireSameSizeAs(rd);
+
+ // VIXL can acquire temp registers. Assert that the caller is aware.
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(operand.maybeReg()));
+
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ mvn(rd, Operand(temp));
+ } else {
+ // Otherwise, register and shifted register cases can be handled by the
+ // assembler directly, using orn.
+ mvn(rd, operand);
+ }
+}
+
+
+void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
+ MoveImmediateHelper(this, rd, imm);
+}
+
+
+void MacroAssembler::Ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
+ } else {
+ ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
+ }
+}
+
+
+void MacroAssembler::Ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
+ } else {
+ ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
+ }
+}
+
+
+void MacroAssembler::ConditionalCompareMacro(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ // The worst case for size is ccmp immediate:
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction for ccmp
+ MacroEmissionCheckScope guard(this);
+
+ if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
+ (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
+ // The immediate can be encoded in the instruction, or the operand is an
+ // unshifted register: call the assembler.
+ ConditionalCompare(rn, operand, nzcv, cond, op);
+ } else {
+ UseScratchRegisterScope temps(this);
+ // The operand isn't directly supported by the instruction: perform the
+ // operation on a temporary register.
+ Register temp = temps.AcquireSameSizeAs(rn);
+ VIXL_ASSERT(!temp.Is(rn) && !temp.Is(operand.maybeReg()));
+ Mov(temp, operand);
+ ConditionalCompare(rn, temp, nzcv, cond, op);
+ }
+}
+
+
+void MacroAssembler::Csel(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ // The worst case for size is csel immediate:
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction for csel
+ MacroEmissionCheckScope guard(this);
+
+ if (operand.IsImmediate()) {
+ // Immediate argument. Handle special cases of 0, 1 and -1 using zero
+ // register.
+ int64_t imm = operand.immediate();
+ Register zr = AppropriateZeroRegFor(rn);
+ if (imm == 0) {
+ csel(rd, rn, zr, cond);
+ } else if (imm == 1) {
+ csinc(rd, rn, zr, cond);
+ } else if (imm == -1) {
+ csinv(rd, rn, zr, cond);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn));
+ VIXL_ASSERT(!temp.Is(operand.maybeReg()));
+ Mov(temp, operand.immediate());
+ csel(rd, rn, temp, cond);
+ }
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
+ // Unshifted register argument.
+ csel(rd, rn, operand.reg(), cond);
+ } else {
+ // All other arguments.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn));
+ VIXL_ASSERT(!temp.Is(operand.maybeReg()));
+ Mov(temp, operand);
+ csel(rd, rn, temp, cond);
+ }
+}
+
+
+void MacroAssembler::Add(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S) {
+ if (operand.IsImmediate() && (operand.immediate() < 0) &&
+ IsImmAddSub(-operand.immediate())) {
+ AddSubMacro(rd, rn, -operand.immediate(), S, SUB);
+ } else {
+ AddSubMacro(rd, rn, operand, S, ADD);
+ }
+}
+
+
+void MacroAssembler::Adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Add(rd, rn, operand, SetFlags);
+}
+
+
+void MacroAssembler::Sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S) {
+ if (operand.IsImmediate() && (operand.immediate() < 0) &&
+ IsImmAddSub(-operand.immediate())) {
+ AddSubMacro(rd, rn, -operand.immediate(), S, ADD);
+ } else {
+ AddSubMacro(rd, rn, operand, S, SUB);
+ }
+}
+
+
+void MacroAssembler::Subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Sub(rd, rn, operand, SetFlags);
+}
+
+
+void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
+ Adds(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
+ Subs(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void MacroAssembler::Fcmp(const FPRegister& fn, double value,
+ FPTrapFlags trap) {
+ // The worst case for size is:
+ // * 1 to materialise the constant, using literal pool if necessary
+ // * 1 instruction for fcmp{e}
+ MacroEmissionCheckScope guard(this);
+ if (value != 0.0) {
+ UseScratchRegisterScope temps(this);
+ FPRegister tmp = temps.AcquireSameSizeAs(fn);
+ VIXL_ASSERT(!tmp.Is(fn));
+ Fmov(tmp, value);
+ FPCompareMacro(fn, tmp, trap);
+ } else {
+ FPCompareMacro(fn, value, trap);
+ }
+}
+
+
+void MacroAssembler::Fcmpe(const FPRegister& fn, double value) {
+ Fcmp(fn, value, EnableTrap);
+}
+
+
+void MacroAssembler::Fmov(VRegister vd, double imm) {
+ // Floating point immediates are loaded through the literal pool.
+ MacroEmissionCheckScope guard(this);
+
+ if (vd.Is1S() || vd.Is2S() || vd.Is4S()) {
+ Fmov(vd, static_cast<float>(imm));
+ return;
+ }
+
+ VIXL_ASSERT(vd.Is1D() || vd.Is2D());
+ if (IsImmFP64(imm)) {
+ fmov(vd, imm);
+ } else {
+ uint64_t rawbits = double_to_rawbits(imm);
+ if (vd.IsScalar()) {
+ if (rawbits == 0) {
+ fmov(vd, xzr);
+ } else {
+ Assembler::fImmPool64(vd, imm);
+ }
+ } else {
+ // TODO: consider NEON support for load literal.
+ Movi(vd, rawbits);
+ }
+ }
+}
+
+
+void MacroAssembler::Fmov(VRegister vd, float imm) {
+ // Floating point immediates are loaded through the literal pool.
+ MacroEmissionCheckScope guard(this);
+
+ if (vd.Is1D() || vd.Is2D()) {
+ Fmov(vd, static_cast<double>(imm));
+ return;
+ }
+
+ VIXL_ASSERT(vd.Is1S() || vd.Is2S() || vd.Is4S());
+ if (IsImmFP32(imm)) {
+ fmov(vd, imm);
+ } else {
+ uint32_t rawbits = float_to_rawbits(imm);
+ if (vd.IsScalar()) {
+ if (rawbits == 0) {
+ fmov(vd, wzr);
+ } else {
+ Assembler::fImmPool32(vd, imm);
+ }
+ } else {
+ // TODO: consider NEON support for load literal.
+ Movi(vd, rawbits);
+ }
+ }
+}
+
+
+
+void MacroAssembler::Neg(const Register& rd,
+ const Operand& operand) {
+ if (operand.IsImmediate()) {
+ Mov(rd, -operand.immediate());
+ } else {
+ Sub(rd, AppropriateZeroRegFor(rd), operand);
+ }
+}
+
+
+void MacroAssembler::Negs(const Register& rd,
+ const Operand& operand) {
+ Subs(rd, AppropriateZeroRegFor(rd), operand);
+}
+
+
+bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
+ int64_t imm) {
+ return OneInstrMoveImmediateHelper(this, dst, imm);
+}
+
+
+Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
+ int64_t imm) {
+ int reg_size = dst.size();
+
+ // Encode the immediate in a single move instruction, if possible.
+ if (TryOneInstrMoveImmediate(dst, imm)) {
+ // The move was successful; nothing to do here.
+ } else {
+ // Pre-shift the immediate to the least-significant bits of the register.
+ int shift_low = CountTrailingZeros(imm, reg_size);
+ int64_t imm_low = imm >> shift_low;
+
+ // Pre-shift the immediate to the most-significant bits of the register,
+ // inserting set bits in the least-significant bits.
+ int shift_high = CountLeadingZeros(imm, reg_size);
+ int64_t imm_high = (imm << shift_high) | ((INT64_C(1) << shift_high) - 1);
+
+ if (TryOneInstrMoveImmediate(dst, imm_low)) {
+ // The new immediate has been moved into the destination's low bits:
+ // return a new leftward-shifting operand.
+ return Operand(dst, LSL, shift_low);
+ } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
+ // The new immediate has been moved into the destination's high bits:
+ // return a new rightward-shifting operand.
+ return Operand(dst, LSR, shift_high);
+ } else {
+ Mov(dst, imm);
+ }
+ }
+ return Operand(dst);
+}
+
+
+void MacroAssembler::ComputeAddress(const Register& dst,
+ const MemOperand& mem_op) {
+ // We cannot handle pre-indexing or post-indexing.
+ VIXL_ASSERT(mem_op.addrmode() == Offset);
+ Register base = mem_op.base();
+ if (mem_op.IsImmediateOffset()) {
+ Add(dst, base, mem_op.offset());
+ } else {
+ VIXL_ASSERT(mem_op.IsRegisterOffset());
+ Register reg_offset = mem_op.regoffset();
+ Shift shift = mem_op.shift();
+ Extend extend = mem_op.extend();
+ if (shift == NO_SHIFT) {
+ VIXL_ASSERT(extend != NO_EXTEND);
+ Add(dst, base, Operand(reg_offset, extend, mem_op.shift_amount()));
+ } else {
+ VIXL_ASSERT(extend == NO_EXTEND);
+ Add(dst, base, Operand(reg_offset, shift, mem_op.shift_amount()));
+ }
+ }
+}
+
+
+void MacroAssembler::AddSubMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op) {
+ // Worst case is add/sub immediate:
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction for add/sub
+ MacroEmissionCheckScope guard(this);
+
+ if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
+ (S == LeaveFlags)) {
+ // The instruction would be a nop. Avoid generating useless code.
+ return;
+ }
+
+ if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
+ (rn.IsZero() && !operand.IsShiftedRegister()) ||
+ (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+
+ // VIXL can acquire temp registers. Assert that the caller is aware.
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn));
+ VIXL_ASSERT(!temp.Is(operand.maybeReg()));
+
+ if (operand.IsImmediate()) {
+ Operand imm_operand =
+ MoveImmediateForShiftedOp(temp, operand.immediate());
+ AddSub(rd, rn, imm_operand, S, op);
+ } else {
+ Mov(temp, operand);
+ AddSub(rd, rn, temp, S, op);
+ }
+ } else {
+ AddSub(rd, rn, operand, S, op);
+ }
+}
+
+
+void MacroAssembler::Adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
+}
+
+
+void MacroAssembler::Adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
+}
+
+
+void MacroAssembler::Sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
+}
+
+
+void MacroAssembler::Sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
+}
+
+
+void MacroAssembler::Ngc(const Register& rd,
+ const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ Sbc(rd, zr, operand);
+}
+
+
+void MacroAssembler::Ngcs(const Register& rd,
+ const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ Sbcs(rd, zr, operand);
+}
+
+
+void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ // Worst case is addc/subc immediate:
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction for add/sub
+ MacroEmissionCheckScope guard(this);
+ UseScratchRegisterScope temps(this);
+
+ if (operand.IsImmediate() ||
+ (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
+ // Add/sub with carry (immediate or ROR shifted register.)
+ Register temp = temps.AcquireSameSizeAs(rn);
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn) && !temp.Is(operand.maybeReg()));
+ Mov(temp, operand);
+ AddSubWithCarry(rd, rn, Operand(temp), S, op);
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
+ // Add/sub with carry (shifted register).
+ VIXL_ASSERT(operand.reg().size() == rd.size());
+ VIXL_ASSERT(operand.shift() != ROR);
+ VIXL_ASSERT(is_uintn(rd.size() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2,
+ operand.shift_amount()));
+ temps.Exclude(operand.reg());
+ Register temp = temps.AcquireSameSizeAs(rn);
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn) && !temp.Is(operand.maybeReg()));
+ EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
+ AddSubWithCarry(rd, rn, Operand(temp), S, op);
+ } else if (operand.IsExtendedRegister()) {
+ // Add/sub with carry (extended register).
+ VIXL_ASSERT(operand.reg().size() <= rd.size());
+ // Add/sub extended supports a shift <= 4. We want to support exactly the
+ // same modes.
+ VIXL_ASSERT(operand.shift_amount() <= 4);
+ VIXL_ASSERT(operand.reg().Is64Bits() ||
+ ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
+ temps.Exclude(operand.reg());
+ Register temp = temps.AcquireSameSizeAs(rn);
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn) && !temp.Is(operand.maybeReg()));
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ AddSubWithCarry(rd, rn, Operand(temp), S, op);
+ } else {
+ // The addressing mode is directly supported by the instruction.
+ AddSubWithCarry(rd, rn, operand, S, op);
+ }
+}
+
+
+#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
+void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
+ LoadStoreMacro(REG, addr, OP); \
+}
+LS_MACRO_LIST(DEFINE_FUNCTION)
+#undef DEFINE_FUNCTION
+
+
+void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op) {
+ // Worst case is ldr/str pre/post index:
+ // * 1 instruction for ldr/str
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction to update the base
+ MacroEmissionCheckScope guard(this);
+
+ int64_t offset = addr.offset();
+ unsigned access_size = CalcLSDataSize(op);
+
+ // Check if an immediate offset fits in the immediate field of the
+ // appropriate instruction. If not, emit two instructions to perform
+ // the operation.
+ if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, access_size) &&
+ !IsImmLSUnscaled(offset)) {
+ // Immediate offset that can't be encoded using unsigned or unscaled
+ // addressing modes.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(addr.base());
+ VIXL_ASSERT(!temp.Is(rt));
+ VIXL_ASSERT(!temp.Is(addr.base()) && !temp.Is(addr.regoffset()));
+ Mov(temp, addr.offset());
+ LoadStore(rt, MemOperand(addr.base(), temp), op);
+ } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
+ // Post-index beyond unscaled addressing range.
+ LoadStore(rt, MemOperand(addr.base()), op);
+ Add(addr.base(), addr.base(), Operand(offset));
+ } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
+ // Pre-index beyond unscaled addressing range.
+ Add(addr.base(), addr.base(), Operand(offset));
+ LoadStore(rt, MemOperand(addr.base()), op);
+ } else {
+ // Encodable in one load/store instruction.
+ LoadStore(rt, addr, op);
+ }
+}
+
+
+#define DEFINE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
+void MacroAssembler::FN(const REGTYPE REG, \
+ const REGTYPE REG2, \
+ const MemOperand& addr) { \
+ LoadStorePairMacro(REG, REG2, addr, OP); \
+}
+LSPAIR_MACRO_LIST(DEFINE_FUNCTION)
+#undef DEFINE_FUNCTION
+
+void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op) {
+ // TODO(all): Should we support register offset for load-store-pair?
+ VIXL_ASSERT(!addr.IsRegisterOffset());
+ // Worst case is ldp/stp immediate:
+ // * 1 instruction for ldp/stp
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction to update the base
+ MacroEmissionCheckScope guard(this);
+
+ int64_t offset = addr.offset();
+ unsigned access_size = CalcLSPairDataSize(op);
+
+ // Check if the offset fits in the immediate field of the appropriate
+ // instruction. If not, emit two instructions to perform the operation.
+ if (IsImmLSPair(offset, access_size)) {
+ // Encodable in one load/store pair instruction.
+ LoadStorePair(rt, rt2, addr, op);
+ } else {
+ Register base = addr.base();
+ if (addr.IsImmediateOffset()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(base);
+ Add(temp, base, offset);
+ LoadStorePair(rt, rt2, MemOperand(temp), op);
+ } else if (addr.IsPostIndex()) {
+ LoadStorePair(rt, rt2, MemOperand(base), op);
+ Add(base, base, offset);
+ } else {
+ VIXL_ASSERT(addr.IsPreIndex());
+ Add(base, base, offset);
+ LoadStorePair(rt, rt2, MemOperand(base), op);
+ }
+ }
+}
+
+
+void MacroAssembler::Prfm(PrefetchOperation op, const MemOperand& addr) {
+ MacroEmissionCheckScope guard(this);
+
+ // There are no pre- or post-index modes for prfm.
+ VIXL_ASSERT(addr.IsImmediateOffset() || addr.IsRegisterOffset());
+
+ // The access size is implicitly 8 bytes for all prefetch operations.
+ unsigned size = kXRegSizeInBytesLog2;
+
+ // Check if an immediate offset fits in the immediate field of the
+ // appropriate instruction. If not, emit two instructions to perform
+ // the operation.
+ if (addr.IsImmediateOffset() && !IsImmLSScaled(addr.offset(), size) &&
+ !IsImmLSUnscaled(addr.offset())) {
+ // Immediate offset that can't be encoded using unsigned or unscaled
+ // addressing modes.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(addr.base());
+ Mov(temp, addr.offset());
+ Prefetch(op, MemOperand(addr.base(), temp));
+ } else {
+ // Simple register-offsets are encodable in one instruction.
+ Prefetch(op, addr);
+ }
+}
+
+
+void MacroAssembler::PushStackPointer() {
+ PrepareForPush(1, 8);
+
+ // Pushing a stack pointer leads to implementation-defined
+ // behavior, which may be surprising. In particular,
+ // str x28, [x28, #-8]!
+ // pre-decrements the stack pointer, storing the decremented value.
+ // Additionally, sp is read as xzr in this context, so it cannot be pushed.
+ // So we must use a scratch register.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+
+ Mov(scratch, GetStackPointer64());
+ str(scratch, MemOperand(GetStackPointer64(), -8, PreIndex));
+}
+
+
+void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3) {
+ VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+ VIXL_ASSERT(src0.IsValid());
+
+ int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
+ int size = src0.SizeInBytes();
+
+ if (src0.Is(GetStackPointer64())) {
+ VIXL_ASSERT(count == 1);
+ VIXL_ASSERT(size == 8);
+ PushStackPointer();
+ return;
+ }
+
+ PrepareForPush(count, size);
+ PushHelper(count, size, src0, src1, src2, src3);
+}
+
+
+void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3) {
+ // It is not valid to pop into the same register more than once in one
+ // instruction, not even into the zero register.
+ VIXL_ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
+ VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ VIXL_ASSERT(dst0.IsValid());
+
+ int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
+ int size = dst0.SizeInBytes();
+
+ PrepareForPop(count, size);
+ PopHelper(count, size, dst0, dst1, dst2, dst3);
+}
+
+
+void MacroAssembler::PushCPURegList(CPURegList registers) {
+ VIXL_ASSERT(!registers.Overlaps(*TmpList()));
+ VIXL_ASSERT(!registers.Overlaps(*FPTmpList()));
+
+ int reg_size = registers.RegisterSizeInBytes();
+ PrepareForPush(registers.Count(), reg_size);
+
+ // Bump the stack pointer and store two registers at the bottom.
+ int size = registers.TotalSizeInBytes();
+ const CPURegister& bottom_0 = registers.PopLowestIndex();
+ const CPURegister& bottom_1 = registers.PopLowestIndex();
+ if (bottom_0.IsValid() && bottom_1.IsValid()) {
+ Stp(bottom_0, bottom_1, MemOperand(GetStackPointer64(), -size, PreIndex));
+ } else if (bottom_0.IsValid()) {
+ Str(bottom_0, MemOperand(GetStackPointer64(), -size, PreIndex));
+ }
+
+ int offset = 2 * reg_size;
+ while (!registers.IsEmpty()) {
+ const CPURegister& src0 = registers.PopLowestIndex();
+ const CPURegister& src1 = registers.PopLowestIndex();
+ if (src1.IsValid()) {
+ Stp(src0, src1, MemOperand(GetStackPointer64(), offset));
+ } else {
+ Str(src0, MemOperand(GetStackPointer64(), offset));
+ }
+ offset += 2 * reg_size;
+ }
+}
+
+
+void MacroAssembler::PopCPURegList(CPURegList registers) {
+ VIXL_ASSERT(!registers.Overlaps(*TmpList()));
+ VIXL_ASSERT(!registers.Overlaps(*FPTmpList()));
+
+ int reg_size = registers.RegisterSizeInBytes();
+ PrepareForPop(registers.Count(), reg_size);
+
+
+ int size = registers.TotalSizeInBytes();
+ const CPURegister& bottom_0 = registers.PopLowestIndex();
+ const CPURegister& bottom_1 = registers.PopLowestIndex();
+
+ int offset = 2 * reg_size;
+ while (!registers.IsEmpty()) {
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ const CPURegister& dst1 = registers.PopLowestIndex();
+ if (dst1.IsValid()) {
+ Ldp(dst0, dst1, MemOperand(GetStackPointer64(), offset));
+ } else {
+ Ldr(dst0, MemOperand(GetStackPointer64(), offset));
+ }
+ offset += 2 * reg_size;
+ }
+
+ // Load the two registers at the bottom and drop the stack pointer.
+ if (bottom_0.IsValid() && bottom_1.IsValid()) {
+ Ldp(bottom_0, bottom_1, MemOperand(GetStackPointer64(), size, PostIndex));
+ } else if (bottom_0.IsValid()) {
+ Ldr(bottom_0, MemOperand(GetStackPointer64(), size, PostIndex));
+ }
+}
+
+
+void MacroAssembler::PushMultipleTimes(int count, Register src) {
+ int size = src.SizeInBytes();
+
+ PrepareForPush(count, size);
+ // Push up to four registers at a time if possible because if the current
+ // stack pointer is sp and the register size is 32, registers must be pushed
+ // in blocks of four in order to maintain the 16-byte alignment for sp.
+ while (count >= 4) {
+ PushHelper(4, size, src, src, src, src);
+ count -= 4;
+ }
+ if (count >= 2) {
+ PushHelper(2, size, src, src, NoReg, NoReg);
+ count -= 2;
+ }
+ if (count == 1) {
+ PushHelper(1, size, src, NoReg, NoReg, NoReg);
+ count -= 1;
+ }
+ VIXL_ASSERT(count == 0);
+}
+
+
+void MacroAssembler::PushHelper(int count, int size,
+ const CPURegister& src0,
+ const CPURegister& src1,
+ const CPURegister& src2,
+ const CPURegister& src3) {
+ // Ensure that we don't unintentionally modify scratch or debug registers.
+ // Worst case for size is 2 stp.
+ InstructionAccurateScope scope(this, 2,
+ InstructionAccurateScope::kMaximumSize);
+
+ VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+ VIXL_ASSERT(size == src0.SizeInBytes());
+
+ // Pushing the stack pointer has unexpected behavior. See PushStackPointer().
+ VIXL_ASSERT(!src0.Is(GetStackPointer64()) && !src0.Is(sp));
+ VIXL_ASSERT(!src1.Is(GetStackPointer64()) && !src1.Is(sp));
+ VIXL_ASSERT(!src2.Is(GetStackPointer64()) && !src2.Is(sp));
+ VIXL_ASSERT(!src3.Is(GetStackPointer64()) && !src3.Is(sp));
+
+ // The JS engine should never push 4 bytes.
+ VIXL_ASSERT(size >= 8);
+
+ // When pushing multiple registers, the store order is chosen such that
+ // Push(a, b) is equivalent to Push(a) followed by Push(b).
+ switch (count) {
+ case 1:
+ VIXL_ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
+ str(src0, MemOperand(GetStackPointer64(), -1 * size, PreIndex));
+ break;
+ case 2:
+ VIXL_ASSERT(src2.IsNone() && src3.IsNone());
+ stp(src1, src0, MemOperand(GetStackPointer64(), -2 * size, PreIndex));
+ break;
+ case 3:
+ VIXL_ASSERT(src3.IsNone());
+ stp(src2, src1, MemOperand(GetStackPointer64(), -3 * size, PreIndex));
+ str(src0, MemOperand(GetStackPointer64(), 2 * size));
+ break;
+ case 4:
+ // Skip over 4 * size, then fill in the gap. This allows four W registers
+ // to be pushed using sp, whilst maintaining 16-byte alignment for sp at
+ // all times.
+ stp(src3, src2, MemOperand(GetStackPointer64(), -4 * size, PreIndex));
+ stp(src1, src0, MemOperand(GetStackPointer64(), 2 * size));
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::PopHelper(int count, int size,
+ const CPURegister& dst0,
+ const CPURegister& dst1,
+ const CPURegister& dst2,
+ const CPURegister& dst3) {
+ // Ensure that we don't unintentionally modify scratch or debug registers.
+ // Worst case for size is 2 ldp.
+ InstructionAccurateScope scope(this, 2,
+ InstructionAccurateScope::kMaximumSize);
+
+ VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ VIXL_ASSERT(size == dst0.SizeInBytes());
+
+ // When popping multiple registers, the load order is chosen such that
+ // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
+ switch (count) {
+ case 1:
+ VIXL_ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
+ ldr(dst0, MemOperand(GetStackPointer64(), 1 * size, PostIndex));
+ break;
+ case 2:
+ VIXL_ASSERT(dst2.IsNone() && dst3.IsNone());
+ ldp(dst0, dst1, MemOperand(GetStackPointer64(), 2 * size, PostIndex));
+ break;
+ case 3:
+ VIXL_ASSERT(dst3.IsNone());
+ ldr(dst2, MemOperand(GetStackPointer64(), 2 * size));
+ ldp(dst0, dst1, MemOperand(GetStackPointer64(), 3 * size, PostIndex));
+ break;
+ case 4:
+ // Load the higher addresses first, then load the lower addresses and skip
+ // the whole block in the second instruction. This allows four W registers
+ // to be popped using sp, whilst maintaining 16-byte alignment for sp at
+ // all times.
+ ldp(dst2, dst3, MemOperand(GetStackPointer64(), 2 * size));
+ ldp(dst0, dst1, MemOperand(GetStackPointer64(), 4 * size, PostIndex));
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::PrepareForPush(int count, int size) {
+ if (sp.Is(GetStackPointer64())) {
+ // If the current stack pointer is sp, then it must be aligned to 16 bytes
+ // on entry and the total size of the specified registers must also be a
+ // multiple of 16 bytes.
+ VIXL_ASSERT((count * size) % 16 == 0);
+ } else {
+ // Even if the current stack pointer is not the system stack pointer (sp),
+ // the system stack pointer will still be modified in order to comply with
+ // ABI rules about accessing memory below the system stack pointer.
+ BumpSystemStackPointer(count * size);
+ }
+}
+
+
+void MacroAssembler::PrepareForPop(int count, int size) {
+ USE(count, size);
+ if (sp.Is(GetStackPointer64())) {
+ // If the current stack pointer is sp, then it must be aligned to 16 bytes
+ // on entry and the total size of the specified registers must also be a
+ // multiple of 16 bytes.
+ VIXL_ASSERT((count * size) % 16 == 0);
+ }
+}
+
+void MacroAssembler::Poke(const Register& src, const Operand& offset) {
+ if (offset.IsImmediate()) {
+ VIXL_ASSERT(offset.immediate() >= 0);
+ }
+
+ Str(src, MemOperand(GetStackPointer64(), offset));
+}
+
+
+void MacroAssembler::Peek(const Register& dst, const Operand& offset) {
+ if (offset.IsImmediate()) {
+ VIXL_ASSERT(offset.immediate() >= 0);
+ }
+
+ Ldr(dst, MemOperand(GetStackPointer64(), offset));
+}
+
+
+void MacroAssembler::Claim(const Operand& size) {
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ if (size.IsImmediate()) {
+ VIXL_ASSERT(size.immediate() > 0);
+ if (sp.Is(GetStackPointer64())) {
+ VIXL_ASSERT((size.immediate() % 16) == 0);
+ }
+ }
+
+ Sub(GetStackPointer64(), GetStackPointer64(), size);
+
+ // Make sure the real stack pointer reflects the claimed stack space.
+ // We can't use stack memory below the stack pointer, it could be clobbered by
+ // interupts and signal handlers.
+ if (!sp.Is(GetStackPointer64())) {
+ Mov(sp, GetStackPointer64());
+ }
+}
+
+
+void MacroAssembler::Drop(const Operand& size) {
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ if (size.IsImmediate()) {
+ VIXL_ASSERT(size.immediate() > 0);
+ if (sp.Is(GetStackPointer64())) {
+ VIXL_ASSERT((size.immediate() % 16) == 0);
+ }
+ }
+
+ Add(GetStackPointer64(), GetStackPointer64(), size);
+}
+
+
+void MacroAssembler::PushCalleeSavedRegisters() {
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ // 10 stp will be emitted.
+ // TODO(all): Should we use GetCalleeSaved and SavedFP.
+ InstructionAccurateScope scope(this, 10);
+
+ // This method must not be called unless the current stack pointer is sp.
+ VIXL_ASSERT(sp.Is(GetStackPointer64()));
+
+ MemOperand tos(sp, -2 * static_cast<int>(kXRegSizeInBytes), PreIndex);
+
+ stp(x29, x30, tos);
+ stp(x27, x28, tos);
+ stp(x25, x26, tos);
+ stp(x23, x24, tos);
+ stp(x21, x22, tos);
+ stp(x19, x20, tos);
+
+ stp(d14, d15, tos);
+ stp(d12, d13, tos);
+ stp(d10, d11, tos);
+ stp(d8, d9, tos);
+}
+
+
+void MacroAssembler::PopCalleeSavedRegisters() {
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ // 10 ldp will be emitted.
+ // TODO(all): Should we use GetCalleeSaved and SavedFP.
+ InstructionAccurateScope scope(this, 10);
+
+ // This method must not be called unless the current stack pointer is sp.
+ VIXL_ASSERT(sp.Is(GetStackPointer64()));
+
+ MemOperand tos(sp, 2 * kXRegSizeInBytes, PostIndex);
+
+ ldp(d8, d9, tos);
+ ldp(d10, d11, tos);
+ ldp(d12, d13, tos);
+ ldp(d14, d15, tos);
+
+ ldp(x19, x20, tos);
+ ldp(x21, x22, tos);
+ ldp(x23, x24, tos);
+ ldp(x25, x26, tos);
+ ldp(x27, x28, tos);
+ ldp(x29, x30, tos);
+}
+
+void MacroAssembler::LoadCPURegList(CPURegList registers,
+ const MemOperand& src) {
+ LoadStoreCPURegListHelper(kLoad, registers, src);
+}
+
+void MacroAssembler::StoreCPURegList(CPURegList registers,
+ const MemOperand& dst) {
+ LoadStoreCPURegListHelper(kStore, registers, dst);
+}
+
+
+void MacroAssembler::LoadStoreCPURegListHelper(LoadStoreCPURegListAction op,
+ CPURegList registers,
+ const MemOperand& mem) {
+ // We do not handle pre-indexing or post-indexing.
+ VIXL_ASSERT(!(mem.IsPreIndex() || mem.IsPostIndex()));
+ VIXL_ASSERT(!registers.Overlaps(tmp_list_));
+ VIXL_ASSERT(!registers.Overlaps(fptmp_list_));
+ VIXL_ASSERT(!registers.IncludesAliasOf(sp));
+
+ UseScratchRegisterScope temps(this);
+
+ MemOperand loc = BaseMemOperandForLoadStoreCPURegList(registers,
+ mem,
+ &temps);
+
+ while (registers.Count() >= 2) {
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ const CPURegister& dst1 = registers.PopLowestIndex();
+ if (op == kStore) {
+ Stp(dst0, dst1, loc);
+ } else {
+ VIXL_ASSERT(op == kLoad);
+ Ldp(dst0, dst1, loc);
+ }
+ loc.AddOffset(2 * registers.RegisterSizeInBytes());
+ }
+ if (!registers.IsEmpty()) {
+ if (op == kStore) {
+ Str(registers.PopLowestIndex(), loc);
+ } else {
+ VIXL_ASSERT(op == kLoad);
+ Ldr(registers.PopLowestIndex(), loc);
+ }
+ }
+}
+
+MemOperand MacroAssembler::BaseMemOperandForLoadStoreCPURegList(
+ const CPURegList& registers,
+ const MemOperand& mem,
+ UseScratchRegisterScope* scratch_scope) {
+ // If necessary, pre-compute the base address for the accesses.
+ if (mem.IsRegisterOffset()) {
+ Register reg_base = scratch_scope->AcquireX();
+ ComputeAddress(reg_base, mem);
+ return MemOperand(reg_base);
+
+ } else if (mem.IsImmediateOffset()) {
+ int reg_size = registers.RegisterSizeInBytes();
+ int total_size = registers.TotalSizeInBytes();
+ int64_t min_offset = mem.offset();
+ int64_t max_offset = mem.offset() + std::max(0, total_size - 2 * reg_size);
+ if ((registers.Count() >= 2) &&
+ (!Assembler::IsImmLSPair(min_offset, WhichPowerOf2(reg_size)) ||
+ !Assembler::IsImmLSPair(max_offset, WhichPowerOf2(reg_size)))) {
+ Register reg_base = scratch_scope->AcquireX();
+ ComputeAddress(reg_base, mem);
+ return MemOperand(reg_base);
+ }
+ }
+
+ return mem;
+}
+
+void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
+ VIXL_ASSERT(!sp.Is(GetStackPointer64()));
+ // TODO: Several callers rely on this not using scratch registers, so we use
+ // the assembler directly here. However, this means that large immediate
+ // values of 'space' cannot be handled.
+ InstructionAccurateScope scope(this, 1);
+ sub(sp, GetStackPointer64(), space);
+}
+
+
+void MacroAssembler::Trace(TraceParameters parameters, TraceCommand command) {
+
+#ifdef JS_SIMULATOR_ARM64
+ // The arguments to the trace pseudo instruction need to be contiguous in
+ // memory, so make sure we don't try to emit a literal pool.
+ InstructionAccurateScope scope(this, kTraceLength / kInstructionSize);
+
+ Label start;
+ bind(&start);
+
+ // Refer to simulator-a64.h for a description of the marker and its
+ // arguments.
+ hlt(kTraceOpcode);
+
+ // VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceParamsOffset);
+ dc32(parameters);
+
+ // VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceCommandOffset);
+ dc32(command);
+#else
+ // Emit nothing on real hardware.
+ USE(parameters, command);
+#endif
+}
+
+
+void MacroAssembler::Log(TraceParameters parameters) {
+
+#ifdef JS_SIMULATOR_ARM64
+ // The arguments to the log pseudo instruction need to be contiguous in
+ // memory, so make sure we don't try to emit a literal pool.
+ InstructionAccurateScope scope(this, kLogLength / kInstructionSize);
+
+ Label start;
+ bind(&start);
+
+ // Refer to simulator-a64.h for a description of the marker and its
+ // arguments.
+ hlt(kLogOpcode);
+
+ // VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kLogParamsOffset);
+ dc32(parameters);
+#else
+ // Emit nothing on real hardware.
+ USE(parameters);
+#endif
+}
+
+
+void MacroAssembler::EnableInstrumentation() {
+ VIXL_ASSERT(!isprint(InstrumentStateEnable));
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, InstrumentStateEnable);
+}
+
+
+void MacroAssembler::DisableInstrumentation() {
+ VIXL_ASSERT(!isprint(InstrumentStateDisable));
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, InstrumentStateDisable);
+}
+
+
+void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
+ VIXL_ASSERT(strlen(marker_name) == 2);
+
+ // We allow only printable characters in the marker names. Unprintable
+ // characters are reserved for controlling features of the instrumentation.
+ VIXL_ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
+
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, (marker_name[1] << 8) | marker_name[0]);
+}
+
+
+void UseScratchRegisterScope::Open(MacroAssembler* masm) {
+ VIXL_ASSERT(!initialised_);
+ available_ = masm->TmpList();
+ availablefp_ = masm->FPTmpList();
+ old_available_ = available_->list();
+ old_availablefp_ = availablefp_->list();
+ VIXL_ASSERT(available_->type() == CPURegister::kRegister);
+ VIXL_ASSERT(availablefp_->type() == CPURegister::kVRegister);
+#ifdef DEBUG
+ initialised_ = true;
+#endif
+}
+
+
+void UseScratchRegisterScope::Close() {
+ if (available_) {
+ available_->set_list(old_available_);
+ available_ = NULL;
+ }
+ if (availablefp_) {
+ availablefp_->set_list(old_availablefp_);
+ availablefp_ = NULL;
+ }
+#ifdef DEBUG
+ initialised_ = false;
+#endif
+}
+
+
+UseScratchRegisterScope::UseScratchRegisterScope(MacroAssembler* masm) {
+#ifdef DEBUG
+ initialised_ = false;
+#endif
+ Open(masm);
+}
+
+// This allows deferred (and optional) initialisation of the scope.
+UseScratchRegisterScope::UseScratchRegisterScope()
+ : available_(NULL), availablefp_(NULL),
+ old_available_(0), old_availablefp_(0) {
+#ifdef DEBUG
+ initialised_ = false;
+#endif
+}
+
+UseScratchRegisterScope::~UseScratchRegisterScope() {
+ Close();
+}
+
+
+bool UseScratchRegisterScope::IsAvailable(const CPURegister& reg) const {
+ return available_->IncludesAliasOf(reg) || availablefp_->IncludesAliasOf(reg);
+}
+
+
+Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
+ int code = AcquireNextAvailable(available_).code();
+ return Register(code, reg.size());
+}
+
+
+FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
+ int code = AcquireNextAvailable(availablefp_).code();
+ return FPRegister(code, reg.size());
+}
+
+
+void UseScratchRegisterScope::Release(const CPURegister& reg) {
+ VIXL_ASSERT(initialised_);
+ if (reg.IsRegister()) {
+ ReleaseByCode(available_, reg.code());
+ } else if (reg.IsFPRegister()) {
+ ReleaseByCode(availablefp_, reg.code());
+ } else {
+ VIXL_ASSERT(reg.IsNone());
+ }
+}
+
+
+void UseScratchRegisterScope::Include(const CPURegList& list) {
+ VIXL_ASSERT(initialised_);
+ if (list.type() == CPURegister::kRegister) {
+ // Make sure that neither sp nor xzr are included the list.
+ IncludeByRegList(available_, list.list() & ~(xzr.Bit() | sp.Bit()));
+ } else {
+ VIXL_ASSERT(list.type() == CPURegister::kVRegister);
+ IncludeByRegList(availablefp_, list.list());
+ }
+}
+
+
+void UseScratchRegisterScope::Include(const Register& reg1,
+ const Register& reg2,
+ const Register& reg3,
+ const Register& reg4) {
+ VIXL_ASSERT(initialised_);
+ RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
+ // Make sure that neither sp nor xzr are included the list.
+ include &= ~(xzr.Bit() | sp.Bit());
+
+ IncludeByRegList(available_, include);
+}
+
+
+void UseScratchRegisterScope::Include(const FPRegister& reg1,
+ const FPRegister& reg2,
+ const FPRegister& reg3,
+ const FPRegister& reg4) {
+ RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
+ IncludeByRegList(availablefp_, include);
+}
+
+
+void UseScratchRegisterScope::Exclude(const CPURegList& list) {
+ if (list.type() == CPURegister::kRegister) {
+ ExcludeByRegList(available_, list.list());
+ } else {
+ VIXL_ASSERT(list.type() == CPURegister::kVRegister);
+ ExcludeByRegList(availablefp_, list.list());
+ }
+}
+
+
+void UseScratchRegisterScope::Exclude(const Register& reg1,
+ const Register& reg2,
+ const Register& reg3,
+ const Register& reg4) {
+ RegList exclude = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
+ ExcludeByRegList(available_, exclude);
+}
+
+
+void UseScratchRegisterScope::Exclude(const FPRegister& reg1,
+ const FPRegister& reg2,
+ const FPRegister& reg3,
+ const FPRegister& reg4) {
+ RegList excludefp = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
+ ExcludeByRegList(availablefp_, excludefp);
+}
+
+
+void UseScratchRegisterScope::Exclude(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3,
+ const CPURegister& reg4) {
+ RegList exclude = 0;
+ RegList excludefp = 0;
+
+ const CPURegister regs[] = {reg1, reg2, reg3, reg4};
+
+ for (unsigned i = 0; i < (sizeof(regs) / sizeof(regs[0])); i++) {
+ if (regs[i].IsRegister()) {
+ exclude |= regs[i].Bit();
+ } else if (regs[i].IsFPRegister()) {
+ excludefp |= regs[i].Bit();
+ } else {
+ VIXL_ASSERT(regs[i].IsNone());
+ }
+ }
+
+ ExcludeByRegList(available_, exclude);
+ ExcludeByRegList(availablefp_, excludefp);
+}
+
+
+void UseScratchRegisterScope::ExcludeAll() {
+ ExcludeByRegList(available_, available_->list());
+ ExcludeByRegList(availablefp_, availablefp_->list());
+}
+
+
+CPURegister UseScratchRegisterScope::AcquireNextAvailable(
+ CPURegList* available) {
+ VIXL_CHECK(!available->IsEmpty());
+ CPURegister result = available->PopLowestIndex();
+ VIXL_ASSERT(!AreAliased(result, xzr, sp));
+ return result;
+}
+
+
+void UseScratchRegisterScope::ReleaseByCode(CPURegList* available, int code) {
+ ReleaseByRegList(available, static_cast<RegList>(1) << code);
+}
+
+
+void UseScratchRegisterScope::ReleaseByRegList(CPURegList* available,
+ RegList regs) {
+ available->set_list(available->list() | regs);
+}
+
+
+void UseScratchRegisterScope::IncludeByRegList(CPURegList* available,
+ RegList regs) {
+ available->set_list(available->list() | regs);
+}
+
+
+void UseScratchRegisterScope::ExcludeByRegList(CPURegList* available,
+ RegList exclude) {
+ available->set_list(available->list() & ~exclude);
+}
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/MacroAssembler-vixl.h b/js/src/jit/arm64/vixl/MacroAssembler-vixl.h
new file mode 100644
index 000000000..352794432
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MacroAssembler-vixl.h
@@ -0,0 +1,2494 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_MACRO_ASSEMBLER_A64_H_
+#define VIXL_A64_MACRO_ASSEMBLER_A64_H_
+
+#include <algorithm>
+#include <limits>
+
+#include "jit/arm64/Assembler-arm64.h"
+#include "jit/arm64/vixl/Debugger-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Instrument-vixl.h"
+#include "jit/arm64/vixl/Simulator-Constants-vixl.h"
+
+#define LS_MACRO_LIST(V) \
+ V(Ldrb, Register&, rt, LDRB_w) \
+ V(Strb, Register&, rt, STRB_w) \
+ V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
+ V(Ldrh, Register&, rt, LDRH_w) \
+ V(Strh, Register&, rt, STRH_w) \
+ V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
+ V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
+ V(Str, CPURegister&, rt, StoreOpFor(rt)) \
+ V(Ldrsw, Register&, rt, LDRSW_x)
+
+
+#define LSPAIR_MACRO_LIST(V) \
+ V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \
+ V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
+ V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
+
+namespace vixl {
+
+// Forward declaration
+class MacroAssembler;
+class UseScratchRegisterScope;
+
+// This scope has the following purposes:
+// * Acquire/Release the underlying assembler's code buffer.
+// * This is mandatory before emitting.
+// * Emit the literal or veneer pools if necessary before emitting the
+// macro-instruction.
+// * Ensure there is enough space to emit the macro-instruction.
+class EmissionCheckScope {
+ public:
+ EmissionCheckScope(MacroAssembler* masm, size_t size)
+ : masm_(masm)
+ { }
+
+ protected:
+ MacroAssembler* masm_;
+#ifdef DEBUG
+ Label start_;
+ size_t size_;
+#endif
+};
+
+
+// Helper for common Emission checks.
+// The macro-instruction maps to a single instruction.
+class SingleEmissionCheckScope : public EmissionCheckScope {
+ public:
+ explicit SingleEmissionCheckScope(MacroAssembler* masm)
+ : EmissionCheckScope(masm, kInstructionSize) {}
+};
+
+
+// The macro instruction is a "typical" macro-instruction. Typical macro-
+// instruction only emit a few instructions, a few being defined as 8 here.
+class MacroEmissionCheckScope : public EmissionCheckScope {
+ public:
+ explicit MacroEmissionCheckScope(MacroAssembler* masm)
+ : EmissionCheckScope(masm, kTypicalMacroInstructionMaxSize) {}
+
+ private:
+ static const size_t kTypicalMacroInstructionMaxSize = 8 * kInstructionSize;
+};
+
+
+enum BranchType {
+ // Copies of architectural conditions.
+ // The associated conditions can be used in place of those, the code will
+ // take care of reinterpreting them with the correct type.
+ integer_eq = eq,
+ integer_ne = ne,
+ integer_hs = hs,
+ integer_lo = lo,
+ integer_mi = mi,
+ integer_pl = pl,
+ integer_vs = vs,
+ integer_vc = vc,
+ integer_hi = hi,
+ integer_ls = ls,
+ integer_ge = ge,
+ integer_lt = lt,
+ integer_gt = gt,
+ integer_le = le,
+ integer_al = al,
+ integer_nv = nv,
+
+ // These two are *different* from the architectural codes al and nv.
+ // 'always' is used to generate unconditional branches.
+ // 'never' is used to not generate a branch (generally as the inverse
+ // branch type of 'always).
+ always, never,
+ // cbz and cbnz
+ reg_zero, reg_not_zero,
+ // tbz and tbnz
+ reg_bit_clear, reg_bit_set,
+
+ // Aliases.
+ kBranchTypeFirstCondition = eq,
+ kBranchTypeLastCondition = nv,
+ kBranchTypeFirstUsingReg = reg_zero,
+ kBranchTypeFirstUsingBit = reg_bit_clear
+};
+
+
+enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
+
+
+class MacroAssembler : public js::jit::Assembler {
+ public:
+ MacroAssembler();
+
+ // Finalize a code buffer of generated instructions. This function must be
+ // called before executing or copying code from the buffer.
+ void FinalizeCode();
+
+
+ // Constant generation helpers.
+ // These functions return the number of instructions required to move the
+ // immediate into the destination register. Also, if the masm pointer is
+ // non-null, it generates the code to do so.
+ // The two features are implemented using one function to avoid duplication of
+ // the logic.
+ // The function can be used to evaluate the cost of synthesizing an
+ // instruction using 'mov immediate' instructions. A user might prefer loading
+ // a constant using the literal pool instead of using multiple 'mov immediate'
+ // instructions.
+ static int MoveImmediateHelper(MacroAssembler* masm,
+ const Register &rd,
+ uint64_t imm);
+ static bool OneInstrMoveImmediateHelper(MacroAssembler* masm,
+ const Register& dst,
+ int64_t imm);
+
+
+ // Logical macros.
+ void And(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Tst(const Register& rn, const Operand& operand);
+ void LogicalMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op);
+
+ // Add and sub macros.
+ void Add(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S = LeaveFlags);
+ void Adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S = LeaveFlags);
+ void Subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Cmn(const Register& rn, const Operand& operand);
+ void Cmp(const Register& rn, const Operand& operand);
+ void Neg(const Register& rd,
+ const Operand& operand);
+ void Negs(const Register& rd,
+ const Operand& operand);
+
+ void AddSubMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op);
+
+ // Add/sub with carry macros.
+ void Adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Ngc(const Register& rd,
+ const Operand& operand);
+ void Ngcs(const Register& rd,
+ const Operand& operand);
+ void AddSubWithCarryMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op);
+
+ // Move macros.
+ void Mov(const Register& rd, uint64_t imm);
+ void Mov(const Register& rd,
+ const Operand& operand,
+ DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
+ void Mvn(const Register& rd, uint64_t imm) {
+ Mov(rd, (rd.size() == kXRegSize) ? ~imm : (~imm & kWRegMask));
+ }
+ void Mvn(const Register& rd, const Operand& operand);
+
+ // Try to move an immediate into the destination register in a single
+ // instruction. Returns true for success, and updates the contents of dst.
+ // Returns false, otherwise.
+ bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
+
+ // Move an immediate into register dst, and return an Operand object for
+ // use with a subsequent instruction that accepts a shift. The value moved
+ // into dst is not necessarily equal to imm; it may have had a shifting
+ // operation applied to it that will be subsequently undone by the shift
+ // applied in the Operand.
+ Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm);
+
+ // Synthesises the address represented by a MemOperand into a register.
+ void ComputeAddress(const Register& dst, const MemOperand& mem_op);
+
+ // Conditional macros.
+ void Ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+ void Ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+ void ConditionalCompareMacro(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op);
+ void Csel(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ Condition cond);
+
+ // Load/store macros.
+#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
+ void FN(const REGTYPE REG, const MemOperand& addr);
+ LS_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+ void LoadStoreMacro(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op);
+
+#define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
+ void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
+ LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+ void LoadStorePairMacro(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op);
+
+ void Prfm(PrefetchOperation op, const MemOperand& addr);
+
+ // Push or pop up to 4 registers of the same width to or from the stack,
+ // using the current stack pointer as set by SetStackPointer.
+ //
+ // If an argument register is 'NoReg', all further arguments are also assumed
+ // to be 'NoReg', and are thus not pushed or popped.
+ //
+ // Arguments are ordered such that "Push(a, b);" is functionally equivalent
+ // to "Push(a); Push(b);".
+ //
+ // It is valid to push the same register more than once, and there is no
+ // restriction on the order in which registers are specified.
+ //
+ // It is not valid to pop into the same register more than once in one
+ // operation, not even into the zero register.
+ //
+ // If the current stack pointer (as set by SetStackPointer) is sp, then it
+ // must be aligned to 16 bytes on entry and the total size of the specified
+ // registers must also be a multiple of 16 bytes.
+ //
+ // Even if the current stack pointer is not the system stack pointer (sp),
+ // Push (and derived methods) will still modify the system stack pointer in
+ // order to comply with ABI rules about accessing memory below the system
+ // stack pointer.
+ //
+ // Other than the registers passed into Pop, the stack pointer and (possibly)
+ // the system stack pointer, these methods do not modify any other registers.
+ void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
+ const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
+ void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
+ const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
+ void PushStackPointer();
+
+ // Alternative forms of Push and Pop, taking a RegList or CPURegList that
+ // specifies the registers that are to be pushed or popped. Higher-numbered
+ // registers are associated with higher memory addresses (as in the A32 push
+ // and pop instructions).
+ //
+ // (Push|Pop)SizeRegList allow you to specify the register size as a
+ // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
+ // supported.
+ //
+ // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
+ void PushCPURegList(CPURegList registers);
+ void PopCPURegList(CPURegList registers);
+
+ void PushSizeRegList(RegList registers, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PushCPURegList(CPURegList(type, reg_size, registers));
+ }
+ void PopSizeRegList(RegList registers, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PopCPURegList(CPURegList(type, reg_size, registers));
+ }
+ void PushXRegList(RegList regs) {
+ PushSizeRegList(regs, kXRegSize);
+ }
+ void PopXRegList(RegList regs) {
+ PopSizeRegList(regs, kXRegSize);
+ }
+ void PushWRegList(RegList regs) {
+ PushSizeRegList(regs, kWRegSize);
+ }
+ void PopWRegList(RegList regs) {
+ PopSizeRegList(regs, kWRegSize);
+ }
+ void PushDRegList(RegList regs) {
+ PushSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
+ }
+ void PopDRegList(RegList regs) {
+ PopSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
+ }
+ void PushSRegList(RegList regs) {
+ PushSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
+ }
+ void PopSRegList(RegList regs) {
+ PopSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
+ }
+
+ // Push the specified register 'count' times.
+ void PushMultipleTimes(int count, Register src);
+
+ // Poke 'src' onto the stack. The offset is in bytes.
+ //
+ // If the current stack pointer (as set by SetStackPointer) is sp, then sp
+ // must be aligned to 16 bytes.
+ void Poke(const Register& src, const Operand& offset);
+
+ // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
+ //
+ // If the current stack pointer (as set by SetStackPointer) is sp, then sp
+ // must be aligned to 16 bytes.
+ void Peek(const Register& dst, const Operand& offset);
+
+ // Alternative forms of Peek and Poke, taking a RegList or CPURegList that
+ // specifies the registers that are to be pushed or popped. Higher-numbered
+ // registers are associated with higher memory addresses.
+ //
+ // (Peek|Poke)SizeRegList allow you to specify the register size as a
+ // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
+ // supported.
+ //
+ // Otherwise, (Peek|Poke)(CPU|X|W|D|S)RegList is preferred.
+ void PeekCPURegList(CPURegList registers, int64_t offset) {
+ LoadCPURegList(registers, MemOperand(StackPointer(), offset));
+ }
+ void PokeCPURegList(CPURegList registers, int64_t offset) {
+ StoreCPURegList(registers, MemOperand(StackPointer(), offset));
+ }
+
+ void PeekSizeRegList(RegList registers, int64_t offset, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PeekCPURegList(CPURegList(type, reg_size, registers), offset);
+ }
+ void PokeSizeRegList(RegList registers, int64_t offset, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PokeCPURegList(CPURegList(type, reg_size, registers), offset);
+ }
+ void PeekXRegList(RegList regs, int64_t offset) {
+ PeekSizeRegList(regs, offset, kXRegSize);
+ }
+ void PokeXRegList(RegList regs, int64_t offset) {
+ PokeSizeRegList(regs, offset, kXRegSize);
+ }
+ void PeekWRegList(RegList regs, int64_t offset) {
+ PeekSizeRegList(regs, offset, kWRegSize);
+ }
+ void PokeWRegList(RegList regs, int64_t offset) {
+ PokeSizeRegList(regs, offset, kWRegSize);
+ }
+ void PeekDRegList(RegList regs, int64_t offset) {
+ PeekSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
+ }
+ void PokeDRegList(RegList regs, int64_t offset) {
+ PokeSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
+ }
+ void PeekSRegList(RegList regs, int64_t offset) {
+ PeekSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
+ }
+ void PokeSRegList(RegList regs, int64_t offset) {
+ PokeSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
+ }
+
+
+ // Claim or drop stack space without actually accessing memory.
+ //
+ // If the current stack pointer (as set by SetStackPointer) is sp, then it
+ // must be aligned to 16 bytes and the size claimed or dropped must be a
+ // multiple of 16 bytes.
+ void Claim(const Operand& size);
+ void Drop(const Operand& size);
+
+ // Preserve the callee-saved registers (as defined by AAPCS64).
+ //
+ // Higher-numbered registers are pushed before lower-numbered registers, and
+ // thus get higher addresses.
+ // Floating-point registers are pushed before general-purpose registers, and
+ // thus get higher addresses.
+ //
+ // This method must not be called unless StackPointer() is sp, and it is
+ // aligned to 16 bytes.
+ void PushCalleeSavedRegisters();
+
+ // Restore the callee-saved registers (as defined by AAPCS64).
+ //
+ // Higher-numbered registers are popped after lower-numbered registers, and
+ // thus come from higher addresses.
+ // Floating-point registers are popped after general-purpose registers, and
+ // thus come from higher addresses.
+ //
+ // This method must not be called unless StackPointer() is sp, and it is
+ // aligned to 16 bytes.
+ void PopCalleeSavedRegisters();
+
+ void LoadCPURegList(CPURegList registers, const MemOperand& src);
+ void StoreCPURegList(CPURegList registers, const MemOperand& dst);
+
+ // Remaining instructions are simple pass-through calls to the assembler.
+ void Adr(const Register& rd, Label* label) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ adr(rd, label);
+ }
+ void Adrp(const Register& rd, Label* label) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ adrp(rd, label);
+ }
+ void Asr(const Register& rd, const Register& rn, unsigned shift) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ asr(rd, rn, shift);
+ }
+ void Asr(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ asrv(rd, rn, rm);
+ }
+
+ // Branch type inversion relies on these relations.
+ VIXL_STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
+ (reg_bit_clear == (reg_bit_set ^ 1)) &&
+ (always == (never ^ 1)));
+
+ BranchType InvertBranchType(BranchType type) {
+ if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
+ return static_cast<BranchType>(
+ InvertCondition(static_cast<Condition>(type)));
+ } else {
+ return static_cast<BranchType>(type ^ 1);
+ }
+ }
+
+ void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
+
+ void B(Label* label);
+ void B(Label* label, Condition cond);
+ void B(Condition cond, Label* label) {
+ B(label, cond);
+ }
+ void Bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ bfm(rd, rn, immr, imms);
+ }
+ void Bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ bfi(rd, rn, lsb, width);
+ }
+ void Bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ bfxil(rd, rn, lsb, width);
+ }
+ void Bind(Label* label);
+ // Bind a label to a specified offset from the start of the buffer.
+ void BindToOffset(Label* label, ptrdiff_t offset);
+ void Bl(Label* label) {
+ SingleEmissionCheckScope guard(this);
+ bl(label);
+ }
+ void Blr(const Register& xn) {
+ VIXL_ASSERT(!xn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ blr(xn);
+ }
+ void Br(const Register& xn) {
+ VIXL_ASSERT(!xn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ br(xn);
+ }
+ void Brk(int code = 0) {
+ SingleEmissionCheckScope guard(this);
+ brk(code);
+ }
+ void Cbnz(const Register& rt, Label* label);
+ void Cbz(const Register& rt, Label* label);
+ void Cinc(const Register& rd, const Register& rn, Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ cinc(rd, rn, cond);
+ }
+ void Cinv(const Register& rd, const Register& rn, Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ cinv(rd, rn, cond);
+ }
+ void Clrex() {
+ SingleEmissionCheckScope guard(this);
+ clrex();
+ }
+ void Cls(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ cls(rd, rn);
+ }
+ void Clz(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ clz(rd, rn);
+ }
+ void Cneg(const Register& rd, const Register& rn, Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ cneg(rd, rn, cond);
+ }
+ void Cset(const Register& rd, Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ cset(rd, cond);
+ }
+ void Csetm(const Register& rd, Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ csetm(rd, cond);
+ }
+ void Csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ SingleEmissionCheckScope guard(this);
+ csinc(rd, rn, rm, cond);
+ }
+ void Csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ SingleEmissionCheckScope guard(this);
+ csinv(rd, rn, rm, cond);
+ }
+ void Csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ SingleEmissionCheckScope guard(this);
+ csneg(rd, rn, rm, cond);
+ }
+ void Dmb(BarrierDomain domain, BarrierType type) {
+ SingleEmissionCheckScope guard(this);
+ dmb(domain, type);
+ }
+ void Dsb(BarrierDomain domain, BarrierType type) {
+ SingleEmissionCheckScope guard(this);
+ dsb(domain, type);
+ }
+ void Extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ extr(rd, rn, rm, lsb);
+ }
+ void Fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fadd(vd, vn, vm);
+ }
+ void Fccmp(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond,
+ FPTrapFlags trap = DisableTrap) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ SingleEmissionCheckScope guard(this);
+ FPCCompareMacro(vn, vm, nzcv, cond, trap);
+ }
+ void Fccmpe(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond) {
+ Fccmp(vn, vm, nzcv, cond, EnableTrap);
+ }
+ void Fcmp(const VRegister& vn, const VRegister& vm,
+ FPTrapFlags trap = DisableTrap) {
+ SingleEmissionCheckScope guard(this);
+ FPCompareMacro(vn, vm, trap);
+ }
+ void Fcmp(const VRegister& vn, double value,
+ FPTrapFlags trap = DisableTrap);
+ void Fcmpe(const VRegister& vn, double value);
+ void Fcmpe(const VRegister& vn, const VRegister& vm) {
+ Fcmp(vn, vm, EnableTrap);
+ }
+ void Fcsel(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ SingleEmissionCheckScope guard(this);
+ fcsel(vd, vn, vm, cond);
+ }
+ void Fcvt(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvt(vd, vn);
+ }
+ void Fcvtl(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvtl(vd, vn);
+ }
+ void Fcvtl2(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvtl2(vd, vn);
+ }
+ void Fcvtn(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvtn(vd, vn);
+ }
+ void Fcvtn2(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvtn2(vd, vn);
+ }
+ void Fcvtxn(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvtxn(vd, vn);
+ }
+ void Fcvtxn2(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvtxn2(vd, vn);
+ }
+ void Fcvtas(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtas(rd, vn);
+ }
+ void Fcvtau(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtau(rd, vn);
+ }
+ void Fcvtms(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtms(rd, vn);
+ }
+ void Fcvtmu(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtmu(rd, vn);
+ }
+ void Fcvtns(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtns(rd, vn);
+ }
+ void Fcvtnu(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtnu(rd, vn);
+ }
+ void Fcvtps(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtps(rd, vn);
+ }
+ void Fcvtpu(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtpu(rd, vn);
+ }
+ void Fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtzs(rd, vn, fbits);
+ }
+ void Fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtzu(rd, vn, fbits);
+ }
+ void Fdiv(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fdiv(vd, vn, vm);
+ }
+ void Fmax(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fmax(vd, vn, vm);
+ }
+ void Fmaxnm(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fmaxnm(vd, vn, vm);
+ }
+ void Fmin(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fmin(vd, vn, vm);
+ }
+ void Fminnm(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fminnm(vd, vn, vm);
+ }
+ void Fmov(VRegister vd, VRegister vn) {
+ SingleEmissionCheckScope guard(this);
+ // Only emit an instruction if vd and vn are different, and they are both D
+ // registers. fmov(s0, s0) is not a no-op because it clears the top word of
+ // d0. Technically, fmov(d0, d0) is not a no-op either because it clears
+ // the top of q0, but VRegister does not currently support Q registers.
+ if (!vd.Is(vn) || !vd.Is64Bits()) {
+ fmov(vd, vn);
+ }
+ }
+ void Fmov(VRegister vd, Register rn) {
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fmov(vd, rn);
+ }
+ void Fmov(const VRegister& vd, int index, const Register& rn) {
+ SingleEmissionCheckScope guard(this);
+ fmov(vd, index, rn);
+ }
+ void Fmov(const Register& rd, const VRegister& vn, int index) {
+ SingleEmissionCheckScope guard(this);
+ fmov(rd, vn, index);
+ }
+
+ // Provide explicit double and float interfaces for FP immediate moves, rather
+ // than relying on implicit C++ casts. This allows signalling NaNs to be
+ // preserved when the immediate matches the format of vd. Most systems convert
+ // signalling NaNs to quiet NaNs when converting between float and double.
+ void Fmov(VRegister vd, double imm);
+ void Fmov(VRegister vd, float imm);
+ // Provide a template to allow other types to be converted automatically.
+ template<typename T>
+ void Fmov(VRegister vd, T imm) {
+ Fmov(vd, static_cast<double>(imm));
+ }
+ void Fmov(Register rd, VRegister vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fmov(rd, vn);
+ }
+ void Fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fmul(vd, vn, vm);
+ }
+ void Fnmul(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fnmul(vd, vn, vm);
+ }
+ void Fmadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ SingleEmissionCheckScope guard(this);
+ fmadd(vd, vn, vm, va);
+ }
+ void Fmsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ SingleEmissionCheckScope guard(this);
+ fmsub(vd, vn, vm, va);
+ }
+ void Fnmadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ SingleEmissionCheckScope guard(this);
+ fnmadd(vd, vn, vm, va);
+ }
+ void Fnmsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ SingleEmissionCheckScope guard(this);
+ fnmsub(vd, vn, vm, va);
+ }
+ void Fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fsub(vd, vn, vm);
+ }
+ void Hint(SystemHint code) {
+ SingleEmissionCheckScope guard(this);
+ hint(code);
+ }
+ void Hlt(int code) {
+ SingleEmissionCheckScope guard(this);
+ hlt(code);
+ }
+ void Isb() {
+ SingleEmissionCheckScope guard(this);
+ isb();
+ }
+ void Ldar(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldar(rt, src);
+ }
+ void Ldarb(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldarb(rt, src);
+ }
+ void Ldarh(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldarh(rt, src);
+ }
+ void Ldaxp(const Register& rt, const Register& rt2, const MemOperand& src) {
+ VIXL_ASSERT(!rt.Aliases(rt2));
+ SingleEmissionCheckScope guard(this);
+ ldaxp(rt, rt2, src);
+ }
+ void Ldaxr(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldaxr(rt, src);
+ }
+ void Ldaxrb(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldaxrb(rt, src);
+ }
+ void Ldaxrh(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldaxrh(rt, src);
+ }
+ void Ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldnp(rt, rt2, src);
+ }
+ // Provide both double and float interfaces for FP immediate loads, rather
+ // than relying on implicit C++ casts. This allows signalling NaNs to be
+ // preserved when the immediate matches the format of fd. Most systems convert
+ // signalling NaNs to quiet NaNs when converting between float and double.
+ void Ldr(const VRegister& vt, double imm) {
+ SingleEmissionCheckScope guard(this);
+ if (vt.Is64Bits()) {
+ ldr(vt, imm);
+ } else {
+ ldr(vt, static_cast<float>(imm));
+ }
+ }
+ void Ldr(const VRegister& vt, float imm) {
+ SingleEmissionCheckScope guard(this);
+ if (vt.Is32Bits()) {
+ ldr(vt, imm);
+ } else {
+ ldr(vt, static_cast<double>(imm));
+ }
+ }
+ /*
+ void Ldr(const VRegister& vt, uint64_t high64, uint64_t low64) {
+ VIXL_ASSERT(vt.IsQ());
+ SingleEmissionCheckScope guard(this);
+ ldr(vt, new Literal<uint64_t>(high64, low64,
+ &literal_pool_,
+ RawLiteral::kDeletedOnPlacementByPool));
+ }
+ */
+ void Ldr(const Register& rt, uint64_t imm) {
+ VIXL_ASSERT(!rt.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ldr(rt, imm);
+ }
+ void Ldrsw(const Register& rt, uint32_t imm) {
+ VIXL_ASSERT(!rt.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ldrsw(rt, imm);
+ }
+ void Ldxp(const Register& rt, const Register& rt2, const MemOperand& src) {
+ VIXL_ASSERT(!rt.Aliases(rt2));
+ SingleEmissionCheckScope guard(this);
+ ldxp(rt, rt2, src);
+ }
+ void Ldxr(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldxr(rt, src);
+ }
+ void Ldxrb(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldxrb(rt, src);
+ }
+ void Ldxrh(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldxrh(rt, src);
+ }
+ void Lsl(const Register& rd, const Register& rn, unsigned shift) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ lsl(rd, rn, shift);
+ }
+ void Lsl(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ lslv(rd, rn, rm);
+ }
+ void Lsr(const Register& rd, const Register& rn, unsigned shift) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ lsr(rd, rn, shift);
+ }
+ void Lsr(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ lsrv(rd, rn, rm);
+ }
+ void Madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT(!ra.IsZero());
+ SingleEmissionCheckScope guard(this);
+ madd(rd, rn, rm, ra);
+ }
+ void Mneg(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ mneg(rd, rn, rm);
+ }
+ void Mov(const Register& rd, const Register& rn) {
+ SingleEmissionCheckScope guard(this);
+ mov(rd, rn);
+ }
+ void Movk(const Register& rd, uint64_t imm, int shift = -1) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ movk(rd, imm, shift);
+ }
+ void Mrs(const Register& rt, SystemRegister sysreg) {
+ VIXL_ASSERT(!rt.IsZero());
+ SingleEmissionCheckScope guard(this);
+ mrs(rt, sysreg);
+ }
+ void Msr(SystemRegister sysreg, const Register& rt) {
+ VIXL_ASSERT(!rt.IsZero());
+ SingleEmissionCheckScope guard(this);
+ msr(sysreg, rt);
+ }
+ void Sys(int op1, int crn, int crm, int op2, const Register& rt = xzr) {
+ SingleEmissionCheckScope guard(this);
+ sys(op1, crn, crm, op2, rt);
+ }
+ void Dc(DataCacheOp op, const Register& rt) {
+ SingleEmissionCheckScope guard(this);
+ dc(op, rt);
+ }
+ void Ic(InstructionCacheOp op, const Register& rt) {
+ SingleEmissionCheckScope guard(this);
+ ic(op, rt);
+ }
+ void Msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT(!ra.IsZero());
+ SingleEmissionCheckScope guard(this);
+ msub(rd, rn, rm, ra);
+ }
+ void Mul(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ mul(rd, rn, rm);
+ }
+ void Nop() {
+ SingleEmissionCheckScope guard(this);
+ nop();
+ }
+ void Rbit(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ rbit(rd, rn);
+ }
+ void Ret(const Register& xn = lr) {
+ VIXL_ASSERT(!xn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ret(xn);
+ }
+ void Rev(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ rev(rd, rn);
+ }
+ void Rev16(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ rev16(rd, rn);
+ }
+ void Rev32(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ rev32(rd, rn);
+ }
+ void Ror(const Register& rd, const Register& rs, unsigned shift) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rs.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ror(rd, rs, shift);
+ }
+ void Ror(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ rorv(rd, rn, rm);
+ }
+ void Sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sbfiz(rd, rn, lsb, width);
+ }
+ void Sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sbfm(rd, rn, immr, imms);
+ }
+ void Sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sbfx(rd, rn, lsb, width);
+ }
+ void Scvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ scvtf(vd, rn, fbits);
+ }
+ void Sdiv(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sdiv(rd, rn, rm);
+ }
+ void Smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT(!ra.IsZero());
+ SingleEmissionCheckScope guard(this);
+ smaddl(rd, rn, rm, ra);
+ }
+ void Smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT(!ra.IsZero());
+ SingleEmissionCheckScope guard(this);
+ smsubl(rd, rn, rm, ra);
+ }
+ void Smull(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ smull(rd, rn, rm);
+ }
+ void Smulh(const Register& xd, const Register& xn, const Register& xm) {
+ VIXL_ASSERT(!xd.IsZero());
+ VIXL_ASSERT(!xn.IsZero());
+ VIXL_ASSERT(!xm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ smulh(xd, xn, xm);
+ }
+ void Stlr(const Register& rt, const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ stlr(rt, dst);
+ }
+ void Stlrb(const Register& rt, const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ stlrb(rt, dst);
+ }
+ void Stlrh(const Register& rt, const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ stlrh(rt, dst);
+ }
+ void Stlxp(const Register& rs,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ VIXL_ASSERT(!rs.Aliases(rt2));
+ SingleEmissionCheckScope guard(this);
+ stlxp(rs, rt, rt2, dst);
+ }
+ void Stlxr(const Register& rs, const Register& rt, const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ SingleEmissionCheckScope guard(this);
+ stlxr(rs, rt, dst);
+ }
+ void Stlxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ SingleEmissionCheckScope guard(this);
+ stlxrb(rs, rt, dst);
+ }
+ void Stlxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ SingleEmissionCheckScope guard(this);
+ stlxrh(rs, rt, dst);
+ }
+ void Stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ stnp(rt, rt2, dst);
+ }
+ void Stxp(const Register& rs,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ VIXL_ASSERT(!rs.Aliases(rt2));
+ SingleEmissionCheckScope guard(this);
+ stxp(rs, rt, rt2, dst);
+ }
+ void Stxr(const Register& rs, const Register& rt, const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ SingleEmissionCheckScope guard(this);
+ stxr(rs, rt, dst);
+ }
+ void Stxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ SingleEmissionCheckScope guard(this);
+ stxrb(rs, rt, dst);
+ }
+ void Stxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ SingleEmissionCheckScope guard(this);
+ stxrh(rs, rt, dst);
+ }
+ void Svc(int code) {
+ SingleEmissionCheckScope guard(this);
+ svc(code);
+ }
+ void Sxtb(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sxtb(rd, rn);
+ }
+ void Sxth(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sxth(rd, rn);
+ }
+ void Sxtw(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sxtw(rd, rn);
+ }
+ void Tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbl(vd, vn, vm);
+ }
+ void Tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbl(vd, vn, vn2, vm);
+ }
+ void Tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbl(vd, vn, vn2, vn3, vm);
+ }
+ void Tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vn4,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbl(vd, vn, vn2, vn3, vn4, vm);
+ }
+ void Tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbx(vd, vn, vm);
+ }
+ void Tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbx(vd, vn, vn2, vm);
+ }
+ void Tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbx(vd, vn, vn2, vn3, vm);
+ }
+ void Tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vn4,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbx(vd, vn, vn2, vn3, vn4, vm);
+ }
+ void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
+ void Tbz(const Register& rt, unsigned bit_pos, Label* label);
+ void Ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ubfiz(rd, rn, lsb, width);
+ }
+ void Ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ubfm(rd, rn, immr, imms);
+ }
+ void Ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ubfx(rd, rn, lsb, width);
+ }
+ void Ucvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ucvtf(vd, rn, fbits);
+ }
+ void Udiv(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ udiv(rd, rn, rm);
+ }
+ void Umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT(!ra.IsZero());
+ SingleEmissionCheckScope guard(this);
+ umaddl(rd, rn, rm, ra);
+ }
+ void Umull(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ umull(rd, rn, rm);
+ }
+ void Umulh(const Register& xd, const Register& xn, const Register& xm) {
+ VIXL_ASSERT(!xd.IsZero());
+ VIXL_ASSERT(!xn.IsZero());
+ VIXL_ASSERT(!xm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ umulh(xd, xn, xm);
+ }
+ void Umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT(!ra.IsZero());
+ SingleEmissionCheckScope guard(this);
+ umsubl(rd, rn, rm, ra);
+ }
+ void Unreachable() {
+ SingleEmissionCheckScope guard(this);
+#ifdef JS_SIMULATOR_ARM64
+ hlt(kUnreachableOpcode);
+#else
+ // Branch to 0 to generate a segfault.
+ // lr - kInstructionSize is the address of the offending instruction.
+ blr(xzr);
+#endif
+ }
+ void Uxtb(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ uxtb(rd, rn);
+ }
+ void Uxth(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ uxth(rd, rn);
+ }
+ void Uxtw(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ uxtw(rd, rn);
+ }
+
+ // NEON 3 vector register instructions.
+ #define NEON_3VREG_MACRO_LIST(V) \
+ V(add, Add) \
+ V(addhn, Addhn) \
+ V(addhn2, Addhn2) \
+ V(addp, Addp) \
+ V(and_, And) \
+ V(bic, Bic) \
+ V(bif, Bif) \
+ V(bit, Bit) \
+ V(bsl, Bsl) \
+ V(cmeq, Cmeq) \
+ V(cmge, Cmge) \
+ V(cmgt, Cmgt) \
+ V(cmhi, Cmhi) \
+ V(cmhs, Cmhs) \
+ V(cmtst, Cmtst) \
+ V(eor, Eor) \
+ V(fabd, Fabd) \
+ V(facge, Facge) \
+ V(facgt, Facgt) \
+ V(faddp, Faddp) \
+ V(fcmeq, Fcmeq) \
+ V(fcmge, Fcmge) \
+ V(fcmgt, Fcmgt) \
+ V(fmaxnmp, Fmaxnmp) \
+ V(fmaxp, Fmaxp) \
+ V(fminnmp, Fminnmp) \
+ V(fminp, Fminp) \
+ V(fmla, Fmla) \
+ V(fmls, Fmls) \
+ V(fmulx, Fmulx) \
+ V(frecps, Frecps) \
+ V(frsqrts, Frsqrts) \
+ V(mla, Mla) \
+ V(mls, Mls) \
+ V(mul, Mul) \
+ V(orn, Orn) \
+ V(orr, Orr) \
+ V(pmul, Pmul) \
+ V(pmull, Pmull) \
+ V(pmull2, Pmull2) \
+ V(raddhn, Raddhn) \
+ V(raddhn2, Raddhn2) \
+ V(rsubhn, Rsubhn) \
+ V(rsubhn2, Rsubhn2) \
+ V(saba, Saba) \
+ V(sabal, Sabal) \
+ V(sabal2, Sabal2) \
+ V(sabd, Sabd) \
+ V(sabdl, Sabdl) \
+ V(sabdl2, Sabdl2) \
+ V(saddl, Saddl) \
+ V(saddl2, Saddl2) \
+ V(saddw, Saddw) \
+ V(saddw2, Saddw2) \
+ V(shadd, Shadd) \
+ V(shsub, Shsub) \
+ V(smax, Smax) \
+ V(smaxp, Smaxp) \
+ V(smin, Smin) \
+ V(sminp, Sminp) \
+ V(smlal, Smlal) \
+ V(smlal2, Smlal2) \
+ V(smlsl, Smlsl) \
+ V(smlsl2, Smlsl2) \
+ V(smull, Smull) \
+ V(smull2, Smull2) \
+ V(sqadd, Sqadd) \
+ V(sqdmlal, Sqdmlal) \
+ V(sqdmlal2, Sqdmlal2) \
+ V(sqdmlsl, Sqdmlsl) \
+ V(sqdmlsl2, Sqdmlsl2) \
+ V(sqdmulh, Sqdmulh) \
+ V(sqdmull, Sqdmull) \
+ V(sqdmull2, Sqdmull2) \
+ V(sqrdmulh, Sqrdmulh) \
+ V(sqrshl, Sqrshl) \
+ V(sqshl, Sqshl) \
+ V(sqsub, Sqsub) \
+ V(srhadd, Srhadd) \
+ V(srshl, Srshl) \
+ V(sshl, Sshl) \
+ V(ssubl, Ssubl) \
+ V(ssubl2, Ssubl2) \
+ V(ssubw, Ssubw) \
+ V(ssubw2, Ssubw2) \
+ V(sub, Sub) \
+ V(subhn, Subhn) \
+ V(subhn2, Subhn2) \
+ V(trn1, Trn1) \
+ V(trn2, Trn2) \
+ V(uaba, Uaba) \
+ V(uabal, Uabal) \
+ V(uabal2, Uabal2) \
+ V(uabd, Uabd) \
+ V(uabdl, Uabdl) \
+ V(uabdl2, Uabdl2) \
+ V(uaddl, Uaddl) \
+ V(uaddl2, Uaddl2) \
+ V(uaddw, Uaddw) \
+ V(uaddw2, Uaddw2) \
+ V(uhadd, Uhadd) \
+ V(uhsub, Uhsub) \
+ V(umax, Umax) \
+ V(umaxp, Umaxp) \
+ V(umin, Umin) \
+ V(uminp, Uminp) \
+ V(umlal, Umlal) \
+ V(umlal2, Umlal2) \
+ V(umlsl, Umlsl) \
+ V(umlsl2, Umlsl2) \
+ V(umull, Umull) \
+ V(umull2, Umull2) \
+ V(uqadd, Uqadd) \
+ V(uqrshl, Uqrshl) \
+ V(uqshl, Uqshl) \
+ V(uqsub, Uqsub) \
+ V(urhadd, Urhadd) \
+ V(urshl, Urshl) \
+ V(ushl, Ushl) \
+ V(usubl, Usubl) \
+ V(usubl2, Usubl2) \
+ V(usubw, Usubw) \
+ V(usubw2, Usubw2) \
+ V(uzp1, Uzp1) \
+ V(uzp2, Uzp2) \
+ V(zip1, Zip1) \
+ V(zip2, Zip2)
+
+ #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(vd, vn, vm); \
+ }
+ NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+ #undef DEFINE_MACRO_ASM_FUNC
+
+ // NEON 2 vector register instructions.
+ #define NEON_2VREG_MACRO_LIST(V) \
+ V(abs, Abs) \
+ V(addp, Addp) \
+ V(addv, Addv) \
+ V(cls, Cls) \
+ V(clz, Clz) \
+ V(cnt, Cnt) \
+ V(fabs, Fabs) \
+ V(faddp, Faddp) \
+ V(fcvtas, Fcvtas) \
+ V(fcvtau, Fcvtau) \
+ V(fcvtms, Fcvtms) \
+ V(fcvtmu, Fcvtmu) \
+ V(fcvtns, Fcvtns) \
+ V(fcvtnu, Fcvtnu) \
+ V(fcvtps, Fcvtps) \
+ V(fcvtpu, Fcvtpu) \
+ V(fmaxnmp, Fmaxnmp) \
+ V(fmaxnmv, Fmaxnmv) \
+ V(fmaxp, Fmaxp) \
+ V(fmaxv, Fmaxv) \
+ V(fminnmp, Fminnmp) \
+ V(fminnmv, Fminnmv) \
+ V(fminp, Fminp) \
+ V(fminv, Fminv) \
+ V(fneg, Fneg) \
+ V(frecpe, Frecpe) \
+ V(frecpx, Frecpx) \
+ V(frinta, Frinta) \
+ V(frinti, Frinti) \
+ V(frintm, Frintm) \
+ V(frintn, Frintn) \
+ V(frintp, Frintp) \
+ V(frintx, Frintx) \
+ V(frintz, Frintz) \
+ V(frsqrte, Frsqrte) \
+ V(fsqrt, Fsqrt) \
+ V(mov, Mov) \
+ V(mvn, Mvn) \
+ V(neg, Neg) \
+ V(not_, Not) \
+ V(rbit, Rbit) \
+ V(rev16, Rev16) \
+ V(rev32, Rev32) \
+ V(rev64, Rev64) \
+ V(sadalp, Sadalp) \
+ V(saddlp, Saddlp) \
+ V(saddlv, Saddlv) \
+ V(smaxv, Smaxv) \
+ V(sminv, Sminv) \
+ V(sqabs, Sqabs) \
+ V(sqneg, Sqneg) \
+ V(sqxtn, Sqxtn) \
+ V(sqxtn2, Sqxtn2) \
+ V(sqxtun, Sqxtun) \
+ V(sqxtun2, Sqxtun2) \
+ V(suqadd, Suqadd) \
+ V(sxtl, Sxtl) \
+ V(sxtl2, Sxtl2) \
+ V(uadalp, Uadalp) \
+ V(uaddlp, Uaddlp) \
+ V(uaddlv, Uaddlv) \
+ V(umaxv, Umaxv) \
+ V(uminv, Uminv) \
+ V(uqxtn, Uqxtn) \
+ V(uqxtn2, Uqxtn2) \
+ V(urecpe, Urecpe) \
+ V(ursqrte, Ursqrte) \
+ V(usqadd, Usqadd) \
+ V(uxtl, Uxtl) \
+ V(uxtl2, Uxtl2) \
+ V(xtn, Xtn) \
+ V(xtn2, Xtn2)
+
+ #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, \
+ const VRegister& vn) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(vd, vn); \
+ }
+ NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+ #undef DEFINE_MACRO_ASM_FUNC
+
+ // NEON 2 vector register with immediate instructions.
+ #define NEON_2VREG_FPIMM_MACRO_LIST(V) \
+ V(fcmeq, Fcmeq) \
+ V(fcmge, Fcmge) \
+ V(fcmgt, Fcmgt) \
+ V(fcmle, Fcmle) \
+ V(fcmlt, Fcmlt)
+
+ #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, \
+ const VRegister& vn, \
+ double imm) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(vd, vn, imm); \
+ }
+ NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+ #undef DEFINE_MACRO_ASM_FUNC
+
+ // NEON by element instructions.
+ #define NEON_BYELEMENT_MACRO_LIST(V) \
+ V(fmul, Fmul) \
+ V(fmla, Fmla) \
+ V(fmls, Fmls) \
+ V(fmulx, Fmulx) \
+ V(mul, Mul) \
+ V(mla, Mla) \
+ V(mls, Mls) \
+ V(sqdmulh, Sqdmulh) \
+ V(sqrdmulh, Sqrdmulh) \
+ V(sqdmull, Sqdmull) \
+ V(sqdmull2, Sqdmull2) \
+ V(sqdmlal, Sqdmlal) \
+ V(sqdmlal2, Sqdmlal2) \
+ V(sqdmlsl, Sqdmlsl) \
+ V(sqdmlsl2, Sqdmlsl2) \
+ V(smull, Smull) \
+ V(smull2, Smull2) \
+ V(smlal, Smlal) \
+ V(smlal2, Smlal2) \
+ V(smlsl, Smlsl) \
+ V(smlsl2, Smlsl2) \
+ V(umull, Umull) \
+ V(umull2, Umull2) \
+ V(umlal, Umlal) \
+ V(umlal2, Umlal2) \
+ V(umlsl, Umlsl) \
+ V(umlsl2, Umlsl2)
+
+ #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm, \
+ int vm_index \
+ ) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(vd, vn, vm, vm_index); \
+ }
+ NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+ #undef DEFINE_MACRO_ASM_FUNC
+
+ #define NEON_2VREG_SHIFT_MACRO_LIST(V) \
+ V(rshrn, Rshrn) \
+ V(rshrn2, Rshrn2) \
+ V(shl, Shl) \
+ V(shll, Shll) \
+ V(shll2, Shll2) \
+ V(shrn, Shrn) \
+ V(shrn2, Shrn2) \
+ V(sli, Sli) \
+ V(sqrshrn, Sqrshrn) \
+ V(sqrshrn2, Sqrshrn2) \
+ V(sqrshrun, Sqrshrun) \
+ V(sqrshrun2, Sqrshrun2) \
+ V(sqshl, Sqshl) \
+ V(sqshlu, Sqshlu) \
+ V(sqshrn, Sqshrn) \
+ V(sqshrn2, Sqshrn2) \
+ V(sqshrun, Sqshrun) \
+ V(sqshrun2, Sqshrun2) \
+ V(sri, Sri) \
+ V(srshr, Srshr) \
+ V(srsra, Srsra) \
+ V(sshll, Sshll) \
+ V(sshll2, Sshll2) \
+ V(sshr, Sshr) \
+ V(ssra, Ssra) \
+ V(uqrshrn, Uqrshrn) \
+ V(uqrshrn2, Uqrshrn2) \
+ V(uqshl, Uqshl) \
+ V(uqshrn, Uqshrn) \
+ V(uqshrn2, Uqshrn2) \
+ V(urshr, Urshr) \
+ V(ursra, Ursra) \
+ V(ushll, Ushll) \
+ V(ushll2, Ushll2) \
+ V(ushr, Ushr) \
+ V(usra, Usra) \
+
+ #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, \
+ const VRegister& vn, \
+ int shift) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(vd, vn, shift); \
+ }
+ NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+ #undef DEFINE_MACRO_ASM_FUNC
+
+ void Bic(const VRegister& vd,
+ const int imm8,
+ const int left_shift = 0) {
+ SingleEmissionCheckScope guard(this);
+ bic(vd, imm8, left_shift);
+ }
+ void Cmeq(const VRegister& vd,
+ const VRegister& vn,
+ int imm) {
+ SingleEmissionCheckScope guard(this);
+ cmeq(vd, vn, imm);
+ }
+ void Cmge(const VRegister& vd,
+ const VRegister& vn,
+ int imm) {
+ SingleEmissionCheckScope guard(this);
+ cmge(vd, vn, imm);
+ }
+ void Cmgt(const VRegister& vd,
+ const VRegister& vn,
+ int imm) {
+ SingleEmissionCheckScope guard(this);
+ cmgt(vd, vn, imm);
+ }
+ void Cmle(const VRegister& vd,
+ const VRegister& vn,
+ int imm) {
+ SingleEmissionCheckScope guard(this);
+ cmle(vd, vn, imm);
+ }
+ void Cmlt(const VRegister& vd,
+ const VRegister& vn,
+ int imm) {
+ SingleEmissionCheckScope guard(this);
+ cmlt(vd, vn, imm);
+ }
+ void Dup(const VRegister& vd,
+ const VRegister& vn,
+ int index) {
+ SingleEmissionCheckScope guard(this);
+ dup(vd, vn, index);
+ }
+ void Dup(const VRegister& vd,
+ const Register& rn) {
+ SingleEmissionCheckScope guard(this);
+ dup(vd, rn);
+ }
+ void Ext(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int index) {
+ SingleEmissionCheckScope guard(this);
+ ext(vd, vn, vm, index);
+ }
+ void Ins(const VRegister& vd,
+ int vd_index,
+ const VRegister& vn,
+ int vn_index) {
+ SingleEmissionCheckScope guard(this);
+ ins(vd, vd_index, vn, vn_index);
+ }
+ void Ins(const VRegister& vd,
+ int vd_index,
+ const Register& rn) {
+ SingleEmissionCheckScope guard(this);
+ ins(vd, vd_index, rn);
+ }
+ void Ld1(const VRegister& vt,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld1(vt, src);
+ }
+ void Ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld1(vt, vt2, src);
+ }
+ void Ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld1(vt, vt2, vt3, src);
+ }
+ void Ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld1(vt, vt2, vt3, vt4, src);
+ }
+ void Ld1(const VRegister& vt,
+ int lane,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld1(vt, lane, src);
+ }
+ void Ld1r(const VRegister& vt,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld1r(vt, src);
+ }
+ void Ld2(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld2(vt, vt2, src);
+ }
+ void Ld2(const VRegister& vt,
+ const VRegister& vt2,
+ int lane,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld2(vt, vt2, lane, src);
+ }
+ void Ld2r(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld2r(vt, vt2, src);
+ }
+ void Ld3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld3(vt, vt2, vt3, src);
+ }
+ void Ld3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ int lane,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld3(vt, vt2, vt3, lane, src);
+ }
+ void Ld3r(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld3r(vt, vt2, vt3, src);
+ }
+ void Ld4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld4(vt, vt2, vt3, vt4, src);
+ }
+ void Ld4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ int lane,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld4(vt, vt2, vt3, vt4, lane, src);
+ }
+ void Ld4r(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld4r(vt, vt2, vt3, vt4, src);
+ }
+ void Mov(const VRegister& vd,
+ int vd_index,
+ const VRegister& vn,
+ int vn_index) {
+ SingleEmissionCheckScope guard(this);
+ mov(vd, vd_index, vn, vn_index);
+ }
+ void Mov(const VRegister& vd,
+ const VRegister& vn,
+ int index) {
+ SingleEmissionCheckScope guard(this);
+ mov(vd, vn, index);
+ }
+ void Mov(const VRegister& vd,
+ int vd_index,
+ const Register& rn) {
+ SingleEmissionCheckScope guard(this);
+ mov(vd, vd_index, rn);
+ }
+ void Mov(const Register& rd,
+ const VRegister& vn,
+ int vn_index) {
+ SingleEmissionCheckScope guard(this);
+ mov(rd, vn, vn_index);
+ }
+ void Movi(const VRegister& vd,
+ uint64_t imm,
+ Shift shift = LSL,
+ int shift_amount = 0);
+ void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
+ void Mvni(const VRegister& vd,
+ const int imm8,
+ Shift shift = LSL,
+ const int shift_amount = 0) {
+ SingleEmissionCheckScope guard(this);
+ mvni(vd, imm8, shift, shift_amount);
+ }
+ void Orr(const VRegister& vd,
+ const int imm8,
+ const int left_shift = 0) {
+ SingleEmissionCheckScope guard(this);
+ orr(vd, imm8, left_shift);
+ }
+ void Scvtf(const VRegister& vd,
+ const VRegister& vn,
+ int fbits = 0) {
+ SingleEmissionCheckScope guard(this);
+ scvtf(vd, vn, fbits);
+ }
+ void Ucvtf(const VRegister& vd,
+ const VRegister& vn,
+ int fbits = 0) {
+ SingleEmissionCheckScope guard(this);
+ ucvtf(vd, vn, fbits);
+ }
+ void Fcvtzs(const VRegister& vd,
+ const VRegister& vn,
+ int fbits = 0) {
+ SingleEmissionCheckScope guard(this);
+ fcvtzs(vd, vn, fbits);
+ }
+ void Fcvtzu(const VRegister& vd,
+ const VRegister& vn,
+ int fbits = 0) {
+ SingleEmissionCheckScope guard(this);
+ fcvtzu(vd, vn, fbits);
+ }
+ void St1(const VRegister& vt,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st1(vt, dst);
+ }
+ void St1(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st1(vt, vt2, dst);
+ }
+ void St1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st1(vt, vt2, vt3, dst);
+ }
+ void St1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st1(vt, vt2, vt3, vt4, dst);
+ }
+ void St1(const VRegister& vt,
+ int lane,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st1(vt, lane, dst);
+ }
+ void St2(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st2(vt, vt2, dst);
+ }
+ void St3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st3(vt, vt2, vt3, dst);
+ }
+ void St4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st4(vt, vt2, vt3, vt4, dst);
+ }
+ void St2(const VRegister& vt,
+ const VRegister& vt2,
+ int lane,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st2(vt, vt2, lane, dst);
+ }
+ void St3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ int lane,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st3(vt, vt2, vt3, lane, dst);
+ }
+ void St4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ int lane,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st4(vt, vt2, vt3, vt4, lane, dst);
+ }
+ void Smov(const Register& rd,
+ const VRegister& vn,
+ int vn_index) {
+ SingleEmissionCheckScope guard(this);
+ smov(rd, vn, vn_index);
+ }
+ void Umov(const Register& rd,
+ const VRegister& vn,
+ int vn_index) {
+ SingleEmissionCheckScope guard(this);
+ umov(rd, vn, vn_index);
+ }
+ void Crc32b(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32b(rd, rn, rm);
+ }
+ void Crc32h(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32h(rd, rn, rm);
+ }
+ void Crc32w(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32w(rd, rn, rm);
+ }
+ void Crc32x(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32x(rd, rn, rm);
+ }
+ void Crc32cb(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32cb(rd, rn, rm);
+ }
+ void Crc32ch(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32ch(rd, rn, rm);
+ }
+ void Crc32cw(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32cw(rd, rn, rm);
+ }
+ void Crc32cx(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32cx(rd, rn, rm);
+ }
+
+ // Push the system stack pointer (sp) down to allow the same to be done to
+ // the current stack pointer (according to StackPointer()). This must be
+ // called _before_ accessing the memory.
+ //
+ // This is necessary when pushing or otherwise adding things to the stack, to
+ // satisfy the AAPCS64 constraint that the memory below the system stack
+ // pointer is not accessed.
+ //
+ // This method asserts that StackPointer() is not sp, since the call does
+ // not make sense in that context.
+ //
+ // TODO: This method can only accept values of 'space' that can be encoded in
+ // one instruction. Refer to the implementation for details.
+ void BumpSystemStackPointer(const Operand& space);
+
+ // Set the current stack pointer, but don't generate any code.
+ void SetStackPointer64(const Register& stack_pointer) {
+ VIXL_ASSERT(!TmpList()->IncludesAliasOf(stack_pointer));
+ sp_ = stack_pointer;
+ }
+
+ // Return the current stack pointer, as set by SetStackPointer.
+ const Register& StackPointer() const {
+ return sp_;
+ }
+
+ const Register& GetStackPointer64() const {
+ return sp_;
+ }
+
+ const js::jit::Register getStackPointer() const {
+ int code = sp_.code();
+ if (code == kSPRegInternalCode) {
+ code = 31;
+ }
+ return js::jit::Register::FromCode(code);
+ }
+
+ CPURegList* TmpList() { return &tmp_list_; }
+ CPURegList* FPTmpList() { return &fptmp_list_; }
+
+ // Trace control when running the debug simulator.
+ //
+ // For example:
+ //
+ // __ Trace(LOG_REGS, TRACE_ENABLE);
+ // Will add registers to the trace if it wasn't already the case.
+ //
+ // __ Trace(LOG_DISASM, TRACE_DISABLE);
+ // Will stop logging disassembly. It has no effect if the disassembly wasn't
+ // already being logged.
+ void Trace(TraceParameters parameters, TraceCommand command);
+
+ // Log the requested data independently of what is being traced.
+ //
+ // For example:
+ //
+ // __ Log(LOG_FLAGS)
+ // Will output the flags.
+ void Log(TraceParameters parameters);
+
+ // Enable or disable instrumentation when an Instrument visitor is attached to
+ // the simulator.
+ void EnableInstrumentation();
+ void DisableInstrumentation();
+
+ // Add a marker to the instrumentation data produced by an Instrument visitor.
+ // The name is a two character string that will be attached to the marker in
+ // the output data.
+ void AnnotateInstrumentation(const char* marker_name);
+
+ private:
+ // The actual Push and Pop implementations. These don't generate any code
+ // other than that required for the push or pop. This allows
+ // (Push|Pop)CPURegList to bundle together setup code for a large block of
+ // registers.
+ //
+ // Note that size is per register, and is specified in bytes.
+ void PushHelper(int count, int size,
+ const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3);
+ void PopHelper(int count, int size,
+ const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3);
+
+ void Movi16bitHelper(const VRegister& vd, uint64_t imm);
+ void Movi32bitHelper(const VRegister& vd, uint64_t imm);
+ void Movi64bitHelper(const VRegister& vd, uint64_t imm);
+
+ // Perform necessary maintenance operations before a push or pop.
+ //
+ // Note that size is per register, and is specified in bytes.
+ void PrepareForPush(int count, int size);
+ void PrepareForPop(int count, int size);
+
+ // The actual implementation of load and store operations for CPURegList.
+ enum LoadStoreCPURegListAction {
+ kLoad,
+ kStore
+ };
+ void LoadStoreCPURegListHelper(LoadStoreCPURegListAction operation,
+ CPURegList registers,
+ const MemOperand& mem);
+ // Returns a MemOperand suitable for loading or storing a CPURegList at `dst`.
+ // This helper may allocate registers from `scratch_scope` and generate code
+ // to compute an intermediate address. The resulting MemOperand is only valid
+ // as long as `scratch_scope` remains valid.
+ MemOperand BaseMemOperandForLoadStoreCPURegList(
+ const CPURegList& registers,
+ const MemOperand& mem,
+ UseScratchRegisterScope* scratch_scope);
+
+ bool LabelIsOutOfRange(Label* label, ImmBranchType branch_type) {
+ return !Instruction::IsValidImmPCOffset(branch_type, nextOffset().getOffset() - label->offset());
+ }
+
+ // The register to use as a stack pointer for stack operations.
+ Register sp_;
+
+ // Scratch registers available for use by the MacroAssembler.
+ CPURegList tmp_list_;
+ CPURegList fptmp_list_;
+
+ ptrdiff_t checkpoint_;
+ ptrdiff_t recommended_checkpoint_;
+};
+
+
+// All Assembler emits MUST acquire/release the underlying code buffer. The
+// helper scope below will do so and optionally ensure the buffer is big enough
+// to receive the emit. It is possible to request the scope not to perform any
+// checks (kNoCheck) if for example it is known in advance the buffer size is
+// adequate or there is some other size checking mechanism in place.
+class CodeBufferCheckScope {
+ public:
+ // Tell whether or not the scope needs to ensure the associated CodeBuffer
+ // has enough space for the requested size.
+ enum CheckPolicy {
+ kNoCheck,
+ kCheck
+ };
+
+ // Tell whether or not the scope should assert the amount of code emitted
+ // within the scope is consistent with the requested amount.
+ enum AssertPolicy {
+ kNoAssert, // No assert required.
+ kExactSize, // The code emitted must be exactly size bytes.
+ kMaximumSize // The code emitted must be at most size bytes.
+ };
+
+ CodeBufferCheckScope(Assembler* assm,
+ size_t size,
+ CheckPolicy check_policy = kCheck,
+ AssertPolicy assert_policy = kMaximumSize)
+ { }
+
+ // This is a shortcut for CodeBufferCheckScope(assm, 0, kNoCheck, kNoAssert).
+ explicit CodeBufferCheckScope(Assembler* assm) {}
+};
+
+
+// Use this scope when you need a one-to-one mapping between methods and
+// instructions. This scope prevents the MacroAssembler from being called and
+// literal pools from being emitted. It also asserts the number of instructions
+// emitted is what you specified when creating the scope.
+// FIXME: Because of the disabled calls below, this class asserts nothing.
+class InstructionAccurateScope : public CodeBufferCheckScope {
+ public:
+ InstructionAccurateScope(MacroAssembler* masm,
+ int64_t count,
+ AssertPolicy policy = kExactSize)
+ : CodeBufferCheckScope(masm,
+ (count * kInstructionSize),
+ kCheck,
+ policy) {
+ }
+};
+
+
+// This scope utility allows scratch registers to be managed safely. The
+// MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
+// registers. These registers can be allocated on demand, and will be returned
+// at the end of the scope.
+//
+// When the scope ends, the MacroAssembler's lists will be restored to their
+// original state, even if the lists were modified by some other means.
+class UseScratchRegisterScope {
+ public:
+ // This constructor implicitly calls the `Open` function to initialise the
+ // scope, so it is ready to use immediately after it has been constructed.
+ explicit UseScratchRegisterScope(MacroAssembler* masm);
+ // This constructor allows deferred and optional initialisation of the scope.
+ // The user is required to explicitly call the `Open` function before using
+ // the scope.
+ UseScratchRegisterScope();
+ // This function performs the actual initialisation work.
+ void Open(MacroAssembler* masm);
+
+ // The destructor always implicitly calls the `Close` function.
+ ~UseScratchRegisterScope();
+ // This function performs the cleaning-up work. It must succeed even if the
+ // scope has not been opened. It is safe to call multiple times.
+ void Close();
+
+
+ bool IsAvailable(const CPURegister& reg) const;
+
+
+ // Take a register from the appropriate temps list. It will be returned
+ // automatically when the scope ends.
+ Register AcquireW() { return AcquireNextAvailable(available_).W(); }
+ Register AcquireX() { return AcquireNextAvailable(available_).X(); }
+ VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
+ VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
+
+
+ Register AcquireSameSizeAs(const Register& reg);
+ VRegister AcquireSameSizeAs(const VRegister& reg);
+
+
+ // Explicitly release an acquired (or excluded) register, putting it back in
+ // the appropriate temps list.
+ void Release(const CPURegister& reg);
+
+
+ // Make the specified registers available as scratch registers for the
+ // duration of this scope.
+ void Include(const CPURegList& list);
+ void Include(const Register& reg1,
+ const Register& reg2 = NoReg,
+ const Register& reg3 = NoReg,
+ const Register& reg4 = NoReg);
+ void Include(const VRegister& reg1,
+ const VRegister& reg2 = NoVReg,
+ const VRegister& reg3 = NoVReg,
+ const VRegister& reg4 = NoVReg);
+
+
+ // Make sure that the specified registers are not available in this scope.
+ // This can be used to prevent helper functions from using sensitive
+ // registers, for example.
+ void Exclude(const CPURegList& list);
+ void Exclude(const Register& reg1,
+ const Register& reg2 = NoReg,
+ const Register& reg3 = NoReg,
+ const Register& reg4 = NoReg);
+ void Exclude(const VRegister& reg1,
+ const VRegister& reg2 = NoVReg,
+ const VRegister& reg3 = NoVReg,
+ const VRegister& reg4 = NoVReg);
+ void Exclude(const CPURegister& reg1,
+ const CPURegister& reg2 = NoCPUReg,
+ const CPURegister& reg3 = NoCPUReg,
+ const CPURegister& reg4 = NoCPUReg);
+
+
+ // Prevent any scratch registers from being used in this scope.
+ void ExcludeAll();
+
+
+ private:
+ static CPURegister AcquireNextAvailable(CPURegList* available);
+
+ static void ReleaseByCode(CPURegList* available, int code);
+
+ static void ReleaseByRegList(CPURegList* available,
+ RegList regs);
+
+ static void IncludeByRegList(CPURegList* available,
+ RegList exclude);
+
+ static void ExcludeByRegList(CPURegList* available,
+ RegList exclude);
+
+ // Available scratch registers.
+ CPURegList* available_; // kRegister
+ CPURegList* availablefp_; // kVRegister
+
+ // The state of the available lists at the start of this scope.
+ RegList old_available_; // kRegister
+ RegList old_availablefp_; // kVRegister
+#ifdef DEBUG
+ bool initialised_;
+#endif
+
+ // Disallow copy constructor and operator=.
+ UseScratchRegisterScope(const UseScratchRegisterScope&) {
+ VIXL_UNREACHABLE();
+ }
+ void operator=(const UseScratchRegisterScope&) {
+ VIXL_UNREACHABLE();
+ }
+};
+
+
+} // namespace vixl
+
+#endif // VIXL_A64_MACRO_ASSEMBLER_A64_H_
diff --git a/js/src/jit/arm64/vixl/MozAssembler-vixl.cpp b/js/src/jit/arm64/vixl/MozAssembler-vixl.cpp
new file mode 100644
index 000000000..3b2e0a8bc
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MozAssembler-vixl.cpp
@@ -0,0 +1,712 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jsutil.h"
+
+#include "jit/arm64/vixl/Assembler-vixl.h"
+#include "jit/Label.h"
+
+namespace vixl {
+
+
+// Assembler
+void Assembler::FinalizeCode() {
+#ifdef DEBUG
+ finalized_ = true;
+#endif
+}
+
+// Unbound Label Representation.
+//
+// We can have multiple branches using the same label before it is bound.
+// Assembler::bind() must then be able to enumerate all the branches and patch
+// them to target the final label location.
+//
+// When a Label is unbound with uses, its offset is pointing to the tip of a
+// linked list of uses. The uses can be branches or adr/adrp instructions. In
+// the case of branches, the next member in the linked list is simply encoded
+// as the branch target. For adr/adrp, the relative pc offset is encoded in the
+// immediate field as a signed instruction offset.
+//
+// In both cases, the end of the list is encoded as a 0 pc offset, i.e. the
+// tail is pointing to itself.
+
+static const ptrdiff_t kEndOfLabelUseList = 0;
+
+BufferOffset
+MozBaseAssembler::NextLink(BufferOffset cur)
+{
+ Instruction* link = getInstructionAt(cur);
+ // Raw encoded offset.
+ ptrdiff_t offset = link->ImmPCRawOffset();
+ // End of the list is encoded as 0.
+ if (offset == kEndOfLabelUseList)
+ return BufferOffset();
+ // The encoded offset is the number of instructions to move.
+ return BufferOffset(cur.getOffset() + offset * kInstructionSize);
+}
+
+static ptrdiff_t
+EncodeOffset(BufferOffset cur, BufferOffset next)
+{
+ MOZ_ASSERT(next.assigned() && cur.assigned());
+ ptrdiff_t offset = next.getOffset() - cur.getOffset();
+ MOZ_ASSERT(offset % kInstructionSize == 0);
+ return offset / kInstructionSize;
+}
+
+void
+MozBaseAssembler::SetNextLink(BufferOffset cur, BufferOffset next)
+{
+ Instruction* link = getInstructionAt(cur);
+ link->SetImmPCRawOffset(EncodeOffset(cur, next));
+}
+
+// A common implementation for the LinkAndGet<Type>OffsetTo helpers.
+//
+// If the label is bound, returns the offset as a multiple of 1 << elementShift.
+// Otherwise, links the instruction to the label and returns the raw offset to
+// encode. (This will be an instruction count.)
+//
+// The offset is calculated by aligning the PC and label addresses down to a
+// multiple of 1 << elementShift, then calculating the (scaled) offset between
+// them. This matches the semantics of adrp, for example. (Assuming that the
+// assembler buffer is page-aligned, which it probably isn't.)
+//
+// For an unbound label, the returned offset will be encodable in the provided
+// branch range. If the label is already bound, the caller is expected to make
+// sure that it is in range, and emit the necessary branch instrutions if it
+// isn't.
+//
+ptrdiff_t
+MozBaseAssembler::LinkAndGetOffsetTo(BufferOffset branch, ImmBranchRangeType branchRange,
+ unsigned elementShift, Label* label)
+{
+ if (armbuffer_.oom())
+ return kEndOfLabelUseList;
+
+ if (label->bound()) {
+ // The label is bound: all uses are already linked.
+ ptrdiff_t branch_offset = ptrdiff_t(branch.getOffset() >> elementShift);
+ ptrdiff_t label_offset = ptrdiff_t(label->offset() >> elementShift);
+ return label_offset - branch_offset;
+ }
+
+ // Keep track of short-range branches targeting unbound labels. We may need
+ // to insert veneers in PatchShortRangeBranchToVeneer() below.
+ if (branchRange < NumShortBranchRangeTypes) {
+ // This is the last possible branch target.
+ BufferOffset deadline(branch.getOffset() +
+ Instruction::ImmBranchMaxForwardOffset(branchRange));
+ armbuffer_.registerBranchDeadline(branchRange, deadline);
+ }
+
+ // The label is unbound and previously unused: Store the offset in the label
+ // itself for patching by bind().
+ if (!label->used()) {
+ label->use(branch.getOffset());
+ return kEndOfLabelUseList;
+ }
+
+ // The label is unbound and has multiple users. Create a linked list between
+ // the branches, and update the linked list head in the label struct. This is
+ // not always trivial since the branches in the linked list have limited
+ // ranges.
+
+ // What is the earliest buffer offset that would be reachable by the branch
+ // we're about to add?
+ ptrdiff_t earliestReachable =
+ branch.getOffset() + Instruction::ImmBranchMinBackwardOffset(branchRange);
+
+ // If the existing instruction at the head of the list is within reach of the
+ // new branch, we can simply insert the new branch at the front of the list.
+ if (label->offset() >= earliestReachable) {
+ ptrdiff_t offset = EncodeOffset(branch, BufferOffset(label));
+ label->use(branch.getOffset());
+ MOZ_ASSERT(offset != kEndOfLabelUseList);
+ return offset;
+ }
+
+ // The label already has a linked list of uses, but we can't reach the head
+ // of the list with the allowed branch range. Insert this branch at a
+ // different position in the list.
+ //
+ // Find an existing branch, exbr, such that:
+ //
+ // 1. The new branch can be reached by exbr, and either
+ // 2a. The new branch can reach exbr's target, or
+ // 2b. The exbr branch is at the end of the list.
+ //
+ // Then the new branch can be inserted after exbr in the linked list.
+ //
+ // We know that it is always possible to find an exbr branch satisfying these
+ // conditions because of the PatchShortRangeBranchToVeneer() mechanism. All
+ // branches are guaranteed to either be able to reach the end of the
+ // assembler buffer, or they will be pointing to an unconditional branch that
+ // can.
+ //
+ // In particular, the end of the list is always a viable candidate, so we'll
+ // just get that.
+ BufferOffset next(label);
+ BufferOffset exbr;
+ do {
+ exbr = next;
+ next = NextLink(next);
+ } while (next.assigned());
+ SetNextLink(exbr, branch);
+
+ // This branch becomes the new end of the list.
+ return kEndOfLabelUseList;
+}
+
+ptrdiff_t MozBaseAssembler::LinkAndGetByteOffsetTo(BufferOffset branch, Label* label) {
+ return LinkAndGetOffsetTo(branch, UncondBranchRangeType, 0, label);
+}
+
+ptrdiff_t MozBaseAssembler::LinkAndGetInstructionOffsetTo(BufferOffset branch,
+ ImmBranchRangeType branchRange,
+ Label* label) {
+ return LinkAndGetOffsetTo(branch, branchRange, kInstructionSizeLog2, label);
+}
+
+ptrdiff_t MozBaseAssembler::LinkAndGetPageOffsetTo(BufferOffset branch, Label* label) {
+ return LinkAndGetOffsetTo(branch, UncondBranchRangeType, kPageSizeLog2, label);
+}
+
+BufferOffset Assembler::b(int imm26) {
+ return EmitBranch(B | ImmUncondBranch(imm26));
+}
+
+
+void Assembler::b(Instruction* at, int imm26) {
+ return EmitBranch(at, B | ImmUncondBranch(imm26));
+}
+
+
+BufferOffset Assembler::b(int imm19, Condition cond) {
+ return EmitBranch(B_cond | ImmCondBranch(imm19) | cond);
+}
+
+
+void Assembler::b(Instruction* at, int imm19, Condition cond) {
+ EmitBranch(at, B_cond | ImmCondBranch(imm19) | cond);
+}
+
+
+BufferOffset Assembler::b(Label* label) {
+ // Encode the relative offset from the inserted branch to the label.
+ return b(LinkAndGetInstructionOffsetTo(nextInstrOffset(), UncondBranchRangeType, label));
+}
+
+
+BufferOffset Assembler::b(Label* label, Condition cond) {
+ // Encode the relative offset from the inserted branch to the label.
+ return b(LinkAndGetInstructionOffsetTo(nextInstrOffset(), CondBranchRangeType, label), cond);
+}
+
+void Assembler::br(Instruction* at, const Register& xn) {
+ VIXL_ASSERT(xn.Is64Bits());
+ // No need for EmitBranch(): no immediate offset needs fixing.
+ Emit(at, BR | Rn(xn));
+}
+
+
+void Assembler::blr(Instruction* at, const Register& xn) {
+ VIXL_ASSERT(xn.Is64Bits());
+ // No need for EmitBranch(): no immediate offset needs fixing.
+ Emit(at, BLR | Rn(xn));
+}
+
+
+void Assembler::bl(int imm26) {
+ EmitBranch(BL | ImmUncondBranch(imm26));
+}
+
+
+void Assembler::bl(Instruction* at, int imm26) {
+ EmitBranch(at, BL | ImmUncondBranch(imm26));
+}
+
+
+void Assembler::bl(Label* label) {
+ // Encode the relative offset from the inserted branch to the label.
+ return bl(LinkAndGetInstructionOffsetTo(nextInstrOffset(), UncondBranchRangeType, label));
+}
+
+
+void Assembler::cbz(const Register& rt, int imm19) {
+ EmitBranch(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
+}
+
+
+void Assembler::cbz(Instruction* at, const Register& rt, int imm19) {
+ EmitBranch(at, SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
+}
+
+
+void Assembler::cbz(const Register& rt, Label* label) {
+ // Encode the relative offset from the inserted branch to the label.
+ return cbz(rt, LinkAndGetInstructionOffsetTo(nextInstrOffset(), CondBranchRangeType, label));
+}
+
+
+void Assembler::cbnz(const Register& rt, int imm19) {
+ EmitBranch(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
+}
+
+
+void Assembler::cbnz(Instruction* at, const Register& rt, int imm19) {
+ EmitBranch(at, SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
+}
+
+
+void Assembler::cbnz(const Register& rt, Label* label) {
+ // Encode the relative offset from the inserted branch to the label.
+ return cbnz(rt, LinkAndGetInstructionOffsetTo(nextInstrOffset(), CondBranchRangeType, label));
+}
+
+
+void Assembler::tbz(const Register& rt, unsigned bit_pos, int imm14) {
+ VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
+ EmitBranch(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
+}
+
+
+void Assembler::tbz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14) {
+ VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
+ EmitBranch(at, TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
+}
+
+
+void Assembler::tbz(const Register& rt, unsigned bit_pos, Label* label) {
+ // Encode the relative offset from the inserted branch to the label.
+ return tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(nextInstrOffset(), TestBranchRangeType, label));
+}
+
+
+void Assembler::tbnz(const Register& rt, unsigned bit_pos, int imm14) {
+ VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
+ EmitBranch(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
+}
+
+
+void Assembler::tbnz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14) {
+ VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
+ EmitBranch(at, TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
+}
+
+
+void Assembler::tbnz(const Register& rt, unsigned bit_pos, Label* label) {
+ // Encode the relative offset from the inserted branch to the label.
+ return tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(nextInstrOffset(), TestBranchRangeType, label));
+}
+
+
+void Assembler::adr(const Register& rd, int imm21) {
+ VIXL_ASSERT(rd.Is64Bits());
+ EmitBranch(ADR | ImmPCRelAddress(imm21) | Rd(rd));
+}
+
+
+void Assembler::adr(Instruction* at, const Register& rd, int imm21) {
+ VIXL_ASSERT(rd.Is64Bits());
+ EmitBranch(at, ADR | ImmPCRelAddress(imm21) | Rd(rd));
+}
+
+
+void Assembler::adr(const Register& rd, Label* label) {
+ // Encode the relative offset from the inserted adr to the label.
+ return adr(rd, LinkAndGetByteOffsetTo(nextInstrOffset(), label));
+}
+
+
+void Assembler::adrp(const Register& rd, int imm21) {
+ VIXL_ASSERT(rd.Is64Bits());
+ EmitBranch(ADRP | ImmPCRelAddress(imm21) | Rd(rd));
+}
+
+
+void Assembler::adrp(Instruction* at, const Register& rd, int imm21) {
+ VIXL_ASSERT(rd.Is64Bits());
+ EmitBranch(at, ADRP | ImmPCRelAddress(imm21) | Rd(rd));
+}
+
+
+void Assembler::adrp(const Register& rd, Label* label) {
+ VIXL_ASSERT(AllowPageOffsetDependentCode());
+ // Encode the relative offset from the inserted adr to the label.
+ return adrp(rd, LinkAndGetPageOffsetTo(nextInstrOffset(), label));
+}
+
+
+BufferOffset Assembler::ands(const Register& rd, const Register& rn, const Operand& operand) {
+ return Logical(rd, rn, operand, ANDS);
+}
+
+
+BufferOffset Assembler::tst(const Register& rn, const Operand& operand) {
+ return ands(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void Assembler::ldr(Instruction* at, const CPURegister& rt, int imm19) {
+ LoadLiteralOp op = LoadLiteralOpFor(rt);
+ Emit(at, op | ImmLLiteral(imm19) | Rt(rt));
+}
+
+
+BufferOffset Assembler::hint(SystemHint code) {
+ return Emit(HINT | ImmHint(code) | Rt(xzr));
+}
+
+
+void Assembler::hint(Instruction* at, SystemHint code) {
+ Emit(at, HINT | ImmHint(code) | Rt(xzr));
+}
+
+
+void Assembler::svc(Instruction* at, int code) {
+ VIXL_ASSERT(is_uint16(code));
+ Emit(at, SVC | ImmException(code));
+}
+
+
+void Assembler::nop(Instruction* at) {
+ hint(at, NOP);
+}
+
+
+BufferOffset Assembler::Logical(const Register& rd, const Register& rn,
+ const Operand operand, LogicalOp op)
+{
+ VIXL_ASSERT(rd.size() == rn.size());
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ unsigned reg_size = rd.size();
+
+ VIXL_ASSERT(immediate != 0);
+ VIXL_ASSERT(immediate != -1);
+ VIXL_ASSERT(rd.Is64Bits() || is_uint32(immediate));
+
+ // If the operation is NOT, invert the operation and immediate.
+ if ((op & NOT) == NOT) {
+ op = static_cast<LogicalOp>(op & ~NOT);
+ immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
+ }
+
+ unsigned n, imm_s, imm_r;
+ if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be encoded in the instruction.
+ return LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
+ } else {
+ // This case is handled in the macro assembler.
+ VIXL_UNREACHABLE();
+ }
+ } else {
+ VIXL_ASSERT(operand.IsShiftedRegister());
+ VIXL_ASSERT(operand.reg().size() == rd.size());
+ Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
+ return DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
+ }
+}
+
+
+BufferOffset Assembler::LogicalImmediate(const Register& rd, const Register& rn,
+ unsigned n, unsigned imm_s, unsigned imm_r, LogicalOp op)
+{
+ unsigned reg_size = rd.size();
+ Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
+ return Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
+ ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg | Rn(rn));
+}
+
+
+BufferOffset Assembler::DataProcShiftedRegister(const Register& rd, const Register& rn,
+ const Operand& operand, FlagsUpdate S, Instr op)
+{
+ VIXL_ASSERT(operand.IsShiftedRegister());
+ VIXL_ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
+ return Emit(SF(rd) | op | Flags(S) |
+ ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
+ Rm(operand.reg()) | Rn(rn) | Rd(rd));
+}
+
+
+void MozBaseAssembler::InsertIndexIntoTag(uint8_t* load, uint32_t index) {
+ // Store the js::jit::PoolEntry index into the instruction.
+ // finishPool() will walk over all literal load instructions
+ // and use PatchConstantPoolLoad() to patch to the final relative offset.
+ *((uint32_t*)load) |= Assembler::ImmLLiteral(index);
+}
+
+
+bool MozBaseAssembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) {
+ Instruction* load = reinterpret_cast<Instruction*>(loadAddr);
+
+ // The load currently contains the js::jit::PoolEntry's index,
+ // as written by InsertIndexIntoTag().
+ uint32_t index = load->ImmLLiteral();
+
+ // Each entry in the literal pool is uint32_t-sized,
+ // but literals may use multiple entries.
+ uint32_t* constPool = reinterpret_cast<uint32_t*>(constPoolAddr);
+ Instruction* source = reinterpret_cast<Instruction*>(&constPool[index]);
+
+ load->SetImmLLiteral(source);
+ return false; // Nothing uses the return value.
+}
+
+void
+MozBaseAssembler::PatchShortRangeBranchToVeneer(ARMBuffer* buffer, unsigned rangeIdx,
+ BufferOffset deadline, BufferOffset veneer)
+{
+ // Reconstruct the position of the branch from (rangeIdx, deadline).
+ vixl::ImmBranchRangeType branchRange = static_cast<vixl::ImmBranchRangeType>(rangeIdx);
+ BufferOffset branch(deadline.getOffset() - Instruction::ImmBranchMaxForwardOffset(branchRange));
+ Instruction *branchInst = buffer->getInst(branch);
+ Instruction *veneerInst = buffer->getInst(veneer);
+
+ // Verify that the branch range matches what's encoded.
+ MOZ_ASSERT(Instruction::ImmBranchTypeToRange(branchInst->BranchType()) == branchRange);
+
+ // We want to insert veneer after branch in the linked list of instructions
+ // that use the same unbound label.
+ // The veneer should be an unconditional branch.
+ ptrdiff_t nextElemOffset = branchInst->ImmPCRawOffset();
+
+ // If offset is 0, this is the end of the linked list.
+ if (nextElemOffset != kEndOfLabelUseList) {
+ // Make the offset relative to veneer so it targets the same instruction
+ // as branchInst.
+ nextElemOffset *= kInstructionSize;
+ nextElemOffset += branch.getOffset() - veneer.getOffset();
+ nextElemOffset /= kInstructionSize;
+ }
+ Assembler::b(veneerInst, nextElemOffset);
+
+ // Now point branchInst at veneer. See also SetNextLink() above.
+ branchInst->SetImmPCRawOffset(EncodeOffset(branch, veneer));
+}
+
+struct PoolHeader {
+ uint32_t data;
+
+ struct Header {
+ // The size should take into account the pool header.
+ // The size is in units of Instruction (4bytes), not byte.
+ union {
+ struct {
+ uint32_t size : 15;
+
+ // "Natural" guards are part of the normal instruction stream,
+ // while "non-natural" guards are inserted for the sole purpose
+ // of skipping around a pool.
+ bool isNatural : 1;
+ uint32_t ONES : 16;
+ };
+ uint32_t data;
+ };
+
+ Header(int size_, bool isNatural_)
+ : size(size_),
+ isNatural(isNatural_),
+ ONES(0xffff)
+ { }
+
+ Header(uint32_t data)
+ : data(data)
+ {
+ JS_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t));
+ VIXL_ASSERT(ONES == 0xffff);
+ }
+
+ uint32_t raw() const {
+ JS_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t));
+ return data;
+ }
+ };
+
+ PoolHeader(int size_, bool isNatural_)
+ : data(Header(size_, isNatural_).raw())
+ { }
+
+ uint32_t size() const {
+ Header tmp(data);
+ return tmp.size;
+ }
+
+ uint32_t isNatural() const {
+ Header tmp(data);
+ return tmp.isNatural;
+ }
+};
+
+
+void MozBaseAssembler::WritePoolHeader(uint8_t* start, js::jit::Pool* p, bool isNatural) {
+ JS_STATIC_ASSERT(sizeof(PoolHeader) == 4);
+
+ // Get the total size of the pool.
+ const uintptr_t totalPoolSize = sizeof(PoolHeader) + p->getPoolSize();
+ const uintptr_t totalPoolInstructions = totalPoolSize / sizeof(Instruction);
+
+ VIXL_ASSERT((totalPoolSize & 0x3) == 0);
+ VIXL_ASSERT(totalPoolInstructions < (1 << 15));
+
+ PoolHeader header(totalPoolInstructions, isNatural);
+ *(PoolHeader*)start = header;
+}
+
+
+void MozBaseAssembler::WritePoolFooter(uint8_t* start, js::jit::Pool* p, bool isNatural) {
+ return;
+}
+
+
+void MozBaseAssembler::WritePoolGuard(BufferOffset branch, Instruction* inst, BufferOffset dest) {
+ int byteOffset = dest.getOffset() - branch.getOffset();
+ VIXL_ASSERT(byteOffset % kInstructionSize == 0);
+
+ int instOffset = byteOffset >> kInstructionSizeLog2;
+ Assembler::b(inst, instOffset);
+}
+
+
+ptrdiff_t MozBaseAssembler::GetBranchOffset(const Instruction* ins) {
+ // Branch instructions use an instruction offset.
+ if (ins->BranchType() != UnknownBranchType)
+ return ins->ImmPCRawOffset() * kInstructionSize;
+
+ // ADR and ADRP encode relative offsets and therefore require patching as if they were branches.
+ // ADR uses a byte offset.
+ if (ins->IsADR())
+ return ins->ImmPCRawOffset();
+
+ // ADRP uses a page offset.
+ if (ins->IsADRP())
+ return ins->ImmPCRawOffset() * kPageSize;
+
+ MOZ_CRASH("Unsupported branch type");
+}
+
+
+void MozBaseAssembler::RetargetNearBranch(Instruction* i, int offset, Condition cond, bool final) {
+ if (i->IsCondBranchImm()) {
+ VIXL_ASSERT(i->IsCondB());
+ Assembler::b(i, offset, cond);
+ return;
+ }
+ MOZ_CRASH("Unsupported branch type");
+}
+
+
+void MozBaseAssembler::RetargetNearBranch(Instruction* i, int byteOffset, bool final) {
+ const int instOffset = byteOffset >> kInstructionSizeLog2;
+
+ // The only valid conditional instruction is B.
+ if (i->IsCondBranchImm()) {
+ VIXL_ASSERT(byteOffset % kInstructionSize == 0);
+ VIXL_ASSERT(i->IsCondB());
+ Condition cond = static_cast<Condition>(i->ConditionBranch());
+ Assembler::b(i, instOffset, cond);
+ return;
+ }
+
+ // Valid unconditional branches are B and BL.
+ if (i->IsUncondBranchImm()) {
+ VIXL_ASSERT(byteOffset % kInstructionSize == 0);
+ if (i->IsUncondB()) {
+ Assembler::b(i, instOffset);
+ } else {
+ VIXL_ASSERT(i->IsBL());
+ Assembler::bl(i, instOffset);
+ }
+
+ VIXL_ASSERT(i->ImmUncondBranch() == instOffset);
+ return;
+ }
+
+ // Valid compare branches are CBZ and CBNZ.
+ if (i->IsCompareBranch()) {
+ VIXL_ASSERT(byteOffset % kInstructionSize == 0);
+ Register rt = i->SixtyFourBits() ? Register::XRegFromCode(i->Rt())
+ : Register::WRegFromCode(i->Rt());
+
+ if (i->IsCBZ()) {
+ Assembler::cbz(i, rt, instOffset);
+ } else {
+ VIXL_ASSERT(i->IsCBNZ());
+ Assembler::cbnz(i, rt, instOffset);
+ }
+
+ VIXL_ASSERT(i->ImmCmpBranch() == instOffset);
+ return;
+ }
+
+ // Valid test branches are TBZ and TBNZ.
+ if (i->IsTestBranch()) {
+ VIXL_ASSERT(byteOffset % kInstructionSize == 0);
+ // Opposite of ImmTestBranchBit(): MSB in bit 5, 0:5 at bit 40.
+ unsigned bit_pos = (i->ImmTestBranchBit5() << 5) | (i->ImmTestBranchBit40());
+ VIXL_ASSERT(is_uint6(bit_pos));
+
+ // Register size doesn't matter for the encoding.
+ Register rt = Register::XRegFromCode(i->Rt());
+
+ if (i->IsTBZ()) {
+ Assembler::tbz(i, rt, bit_pos, instOffset);
+ } else {
+ VIXL_ASSERT(i->IsTBNZ());
+ Assembler::tbnz(i, rt, bit_pos, instOffset);
+ }
+
+ VIXL_ASSERT(i->ImmTestBranch() == instOffset);
+ return;
+ }
+
+ if (i->IsADR()) {
+ Register rd = Register::XRegFromCode(i->Rd());
+ Assembler::adr(i, rd, byteOffset);
+ return;
+ }
+
+ if (i->IsADRP()) {
+ const int pageOffset = byteOffset >> kPageSizeLog2;
+ Register rd = Register::XRegFromCode(i->Rd());
+ Assembler::adrp(i, rd, pageOffset);
+ return;
+ }
+
+ MOZ_CRASH("Unsupported branch type");
+}
+
+
+void MozBaseAssembler::RetargetFarBranch(Instruction* i, uint8_t** slot, uint8_t* dest, Condition cond) {
+ MOZ_CRASH("RetargetFarBranch()");
+}
+
+
+} // namespace vixl
+
diff --git a/js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h b/js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h
new file mode 100644
index 000000000..d079340fc
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h
@@ -0,0 +1,216 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef jit_arm64_vixl_MozBaseAssembler_vixl_h
+#define jit_arm64_vixl_MozBaseAssembler_vixl_h
+
+#include "jit/arm64/vixl/Constants-vixl.h"
+#include "jit/arm64/vixl/Instructions-vixl.h"
+
+#include "jit/shared/Assembler-shared.h"
+#include "jit/shared/IonAssemblerBufferWithConstantPools.h"
+
+namespace vixl {
+
+
+using js::jit::BufferOffset;
+
+
+class MozBaseAssembler;
+typedef js::jit::AssemblerBufferWithConstantPools<1024, 4, Instruction, MozBaseAssembler,
+ NumShortBranchRangeTypes> ARMBuffer;
+
+// Base class for vixl::Assembler, for isolating Moz-specific changes to VIXL.
+class MozBaseAssembler : public js::jit::AssemblerShared {
+ // Buffer initialization constants.
+ static const unsigned BufferGuardSize = 1;
+ static const unsigned BufferHeaderSize = 1;
+ static const size_t BufferCodeAlignment = 8;
+ static const size_t BufferMaxPoolOffset = 1024;
+ static const unsigned BufferPCBias = 0;
+ static const uint32_t BufferAlignmentFillInstruction = BRK | (0xdead << ImmException_offset);
+ static const uint32_t BufferNopFillInstruction = HINT | (31 << Rt_offset);
+ static const unsigned BufferNumDebugNopsToInsert = 0;
+
+ public:
+ MozBaseAssembler()
+ : armbuffer_(BufferGuardSize,
+ BufferHeaderSize,
+ BufferCodeAlignment,
+ BufferMaxPoolOffset,
+ BufferPCBias,
+ BufferAlignmentFillInstruction,
+ BufferNopFillInstruction,
+ BufferNumDebugNopsToInsert)
+ { }
+
+ public:
+ // Helper function for use with the ARMBuffer.
+ // The MacroAssembler must create an AutoJitContextAlloc before initializing the buffer.
+ void initWithAllocator() {
+ armbuffer_.initWithAllocator();
+ }
+
+ // Return the Instruction at a given byte offset.
+ Instruction* getInstructionAt(BufferOffset offset) {
+ return armbuffer_.getInst(offset);
+ }
+
+ // Return the byte offset of a bound label.
+ template <typename T>
+ inline T GetLabelByteOffset(const js::jit::Label* label) {
+ VIXL_ASSERT(label->bound());
+ JS_STATIC_ASSERT(sizeof(T) >= sizeof(uint32_t));
+ return reinterpret_cast<T>(label->offset());
+ }
+
+ protected:
+ // Get the buffer offset of the next inserted instruction. This may flush
+ // constant pools.
+ BufferOffset nextInstrOffset() {
+ return armbuffer_.nextInstrOffset();
+ }
+
+ // Get the next usable buffer offset. Note that a constant pool may be placed
+ // here before the next instruction is emitted.
+ BufferOffset nextOffset() const {
+ return armbuffer_.nextOffset();
+ }
+
+ // Allocate memory in the buffer by forwarding to armbuffer_.
+ // Propagate OOM errors.
+ BufferOffset allocEntry(size_t numInst, unsigned numPoolEntries,
+ uint8_t* inst, uint8_t* data,
+ ARMBuffer::PoolEntry* pe = nullptr,
+ bool markAsBranch = false)
+ {
+ BufferOffset offset = armbuffer_.allocEntry(numInst, numPoolEntries, inst,
+ data, pe, markAsBranch);
+ propagateOOM(offset.assigned());
+ return offset;
+ }
+
+ // Emit the instruction, returning its offset.
+ BufferOffset Emit(Instr instruction, bool isBranch = false) {
+ JS_STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
+ return armbuffer_.putInt(*(uint32_t*)(&instruction), isBranch);
+ }
+
+ BufferOffset EmitBranch(Instr instruction) {
+ return Emit(instruction, true);
+ }
+
+ public:
+ // Emit the instruction at |at|.
+ static void Emit(Instruction* at, Instr instruction) {
+ JS_STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
+ memcpy(at, &instruction, sizeof(instruction));
+ }
+
+ static void EmitBranch(Instruction* at, Instr instruction) {
+ // TODO: Assert that the buffer already has the instruction marked as a branch.
+ Emit(at, instruction);
+ }
+
+ // Emit data inline in the instruction stream.
+ BufferOffset EmitData(void const * data, unsigned size) {
+ VIXL_ASSERT(size % 4 == 0);
+ return armbuffer_.allocEntry(size / sizeof(uint32_t), 0, (uint8_t*)(data), nullptr);
+ }
+
+ public:
+ // Size of the code generated in bytes, including pools.
+ size_t SizeOfCodeGenerated() const {
+ return armbuffer_.size();
+ }
+
+ // Move the pool into the instruction stream.
+ void flushBuffer() {
+ armbuffer_.flushPool();
+ }
+
+ // Inhibit pool flushing for the given number of instructions.
+ // Generating more than |maxInst| instructions in a no-pool region
+ // triggers an assertion within the ARMBuffer.
+ // Does not nest.
+ void enterNoPool(size_t maxInst) {
+ armbuffer_.enterNoPool(maxInst);
+ }
+
+ // Marks the end of a no-pool region.
+ void leaveNoPool() {
+ armbuffer_.leaveNoPool();
+ }
+
+ public:
+ // Static interface used by IonAssemblerBufferWithConstantPools.
+ static void InsertIndexIntoTag(uint8_t* load, uint32_t index);
+ static bool PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
+ static void PatchShortRangeBranchToVeneer(ARMBuffer*, unsigned rangeIdx, BufferOffset deadline,
+ BufferOffset veneer);
+ static uint32_t PlaceConstantPoolBarrier(int offset);
+
+ static void WritePoolHeader(uint8_t* start, js::jit::Pool* p, bool isNatural);
+ static void WritePoolFooter(uint8_t* start, js::jit::Pool* p, bool isNatural);
+ static void WritePoolGuard(BufferOffset branch, Instruction* inst, BufferOffset dest);
+
+ static ptrdiff_t GetBranchOffset(const Instruction* i);
+ static void RetargetNearBranch(Instruction* i, int offset, Condition cond, bool final = true);
+ static void RetargetNearBranch(Instruction* i, int offset, bool final = true);
+ static void RetargetFarBranch(Instruction* i, uint8_t** slot, uint8_t* dest, Condition cond);
+
+ protected:
+ // Functions for managing Labels and linked lists of Label uses.
+
+ // Get the next Label user in the linked list of Label uses.
+ // Return an unassigned BufferOffset when the end of the list is reached.
+ BufferOffset NextLink(BufferOffset cur);
+
+ // Patch the instruction at cur to link to the instruction at next.
+ void SetNextLink(BufferOffset cur, BufferOffset next);
+
+ // Link the current (not-yet-emitted) instruction to the specified label,
+ // then return a raw offset to be encoded in the instruction.
+ ptrdiff_t LinkAndGetByteOffsetTo(BufferOffset branch, js::jit::Label* label);
+ ptrdiff_t LinkAndGetInstructionOffsetTo(BufferOffset branch, ImmBranchRangeType branchRange,
+ js::jit::Label* label);
+ ptrdiff_t LinkAndGetPageOffsetTo(BufferOffset branch, js::jit::Label* label);
+
+ // A common implementation for the LinkAndGet<Type>OffsetTo helpers.
+ ptrdiff_t LinkAndGetOffsetTo(BufferOffset branch, ImmBranchRangeType branchRange,
+ unsigned elementSizeBits, js::jit::Label* label);
+
+ protected:
+ // The buffer into which code and relocation info are generated.
+ ARMBuffer armbuffer_;
+};
+
+
+} // namespace vixl
+
+
+#endif // jit_arm64_vixl_MozBaseAssembler_vixl_h
+
diff --git a/js/src/jit/arm64/vixl/MozInstructions-vixl.cpp b/js/src/jit/arm64/vixl/MozInstructions-vixl.cpp
new file mode 100644
index 000000000..fcae1ba47
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MozInstructions-vixl.cpp
@@ -0,0 +1,195 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/Architecture-arm64.h"
+#include "jit/arm64/vixl/Assembler-vixl.h"
+#include "jit/arm64/vixl/Instructions-vixl.h"
+
+namespace vixl {
+
+bool Instruction::IsUncondB() const {
+ return Mask(UnconditionalBranchMask) == (UnconditionalBranchFixed | B);
+}
+
+
+bool Instruction::IsCondB() const {
+ return Mask(ConditionalBranchMask) == (ConditionalBranchFixed | B_cond);
+}
+
+
+bool Instruction::IsBL() const {
+ return Mask(UnconditionalBranchMask) == (UnconditionalBranchFixed | BL);
+}
+
+
+bool Instruction::IsBR() const {
+ return Mask(UnconditionalBranchToRegisterMask) == (UnconditionalBranchToRegisterFixed | BR);
+}
+
+
+bool Instruction::IsBLR() const {
+ return Mask(UnconditionalBranchToRegisterMask) == (UnconditionalBranchToRegisterFixed | BLR);
+}
+
+
+bool Instruction::IsTBZ() const {
+ return Mask(TestBranchMask) == TBZ;
+}
+
+
+bool Instruction::IsTBNZ() const {
+ return Mask(TestBranchMask) == TBNZ;
+}
+
+
+bool Instruction::IsCBZ() const {
+ return Mask(CompareBranchMask) == CBZ_w || Mask(CompareBranchMask) == CBZ_x;
+}
+
+
+bool Instruction::IsCBNZ() const {
+ return Mask(CompareBranchMask) == CBNZ_w || Mask(CompareBranchMask) == CBNZ_x;
+}
+
+
+bool Instruction::IsLDR() const {
+ return Mask(LoadLiteralMask) == LDR_x_lit;
+}
+
+
+bool Instruction::IsNOP() const {
+ return Mask(SystemHintMask) == HINT && ImmHint() == NOP;
+}
+
+
+bool Instruction::IsADR() const {
+ return Mask(PCRelAddressingMask) == ADR;
+}
+
+
+bool Instruction::IsADRP() const {
+ return Mask(PCRelAddressingMask) == ADRP;
+}
+
+
+bool Instruction::IsBranchLinkImm() const {
+ return Mask(UnconditionalBranchFMask) == (UnconditionalBranchFixed | BL);
+}
+
+
+bool Instruction::IsTargetReachable(Instruction* target) const {
+ VIXL_ASSERT(((target - this) & 3) == 0);
+ int offset = (target - this) >> kInstructionSizeLog2;
+ switch (BranchType()) {
+ case CondBranchType:
+ return is_int19(offset);
+ case UncondBranchType:
+ return is_int26(offset);
+ case CompareBranchType:
+ return is_int19(offset);
+ case TestBranchType:
+ return is_int14(offset);
+ default:
+ VIXL_UNREACHABLE();
+ }
+}
+
+
+ptrdiff_t Instruction::ImmPCRawOffset() const {
+ ptrdiff_t offset;
+ if (IsPCRelAddressing()) {
+ // ADR and ADRP.
+ offset = ImmPCRel();
+ } else if (BranchType() == UnknownBranchType) {
+ offset = ImmLLiteral();
+ } else {
+ offset = ImmBranch();
+ }
+ return offset;
+}
+
+void
+Instruction::SetImmPCRawOffset(ptrdiff_t offset)
+{
+ if (IsPCRelAddressing()) {
+ // ADR and ADRP. We're encoding a raw offset here.
+ // See also SetPCRelImmTarget().
+ Instr imm = vixl::Assembler::ImmPCRelAddress(offset);
+ SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
+ } else {
+ SetBranchImmTarget(this + (offset << kInstructionSizeLog2));
+ }
+}
+
+// Is this a stack pointer synchronization instruction as inserted by
+// MacroAssembler::syncStackPtr()?
+bool
+Instruction::IsStackPtrSync() const
+{
+ // The stack pointer sync is a move to the stack pointer.
+ // This is encoded as 'add sp, Rs, #0'.
+ return IsAddSubImmediate() && Rd() == js::jit::Registers::sp && ImmAddSub() == 0;
+}
+
+// Skip over a constant pool at |this| if there is one.
+//
+// If |this| is pointing to the artifical guard branch around a constant pool,
+// return the instruction after the pool. Otherwise return |this| itself.
+//
+// This function does not skip constant pools with a natural guard branch. It
+// is assumed that anyone inspecting the instruction stream understands about
+// branches that were inserted naturally.
+const Instruction*
+Instruction::skipPool() const
+{
+ // Artificial pool guards can only be B (rather than BR), and they must be
+ // forward branches.
+ if (!IsUncondB() || ImmUncondBranch() <= 0)
+ return this;
+
+ // Check for a constant pool header which has the high 16 bits set. See
+ // struct PoolHeader. Bit 15 indicates a natural pool guard when set. It
+ // must be clear which indicates an artificial pool guard.
+ const Instruction *header = InstructionAtOffset(kInstructionSize);
+ if (header->Mask(0xffff8000) != 0xffff0000)
+ return this;
+
+ // OK, this is an artificial jump around a constant pool.
+ return ImmPCOffsetTarget();
+}
+
+
+void Instruction::SetBits32(int msb, int lsb, unsigned value) {
+ uint32_t me;
+ memcpy(&me, this, sizeof(me));
+ uint32_t new_mask = (1 << (msb+1)) - (1 << lsb);
+ uint32_t keep_mask = ~new_mask;
+ me = (me & keep_mask) | ((value << lsb) & new_mask);
+ memcpy(this, &me, sizeof(me));
+}
+
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp b/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp
new file mode 100644
index 000000000..7447b4d2a
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp
@@ -0,0 +1,708 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "mozilla/DebugOnly.h"
+
+#include "jit/arm64/vixl/Debugger-vixl.h"
+#include "jit/arm64/vixl/Simulator-vixl.h"
+#include "jit/IonTypes.h"
+#include "threading/LockGuard.h"
+#include "vm/Runtime.h"
+
+namespace vixl {
+
+
+using mozilla::DebugOnly;
+using js::jit::ABIFunctionType;
+
+Simulator::Simulator(Decoder* decoder, FILE* stream)
+ : stream_(nullptr)
+ , print_disasm_(nullptr)
+ , instrumentation_(nullptr)
+ , stack_(nullptr)
+ , stack_limit_(nullptr)
+ , decoder_(nullptr)
+ , oom_(false)
+ , lock_(js::mutexid::Arm64SimulatorLock)
+{
+ this->init(decoder, stream);
+}
+
+
+Simulator::~Simulator() {
+ js_free(stack_);
+ stack_ = nullptr;
+
+ // The decoder may outlive the simulator.
+ if (print_disasm_) {
+ decoder_->RemoveVisitor(print_disasm_);
+ js_delete(print_disasm_);
+ print_disasm_ = nullptr;
+ }
+
+ if (instrumentation_) {
+ decoder_->RemoveVisitor(instrumentation_);
+ js_delete(instrumentation_);
+ instrumentation_ = nullptr;
+ }
+}
+
+
+void Simulator::ResetState() {
+ // Reset the system registers.
+ nzcv_ = SimSystemRegister::DefaultValueFor(NZCV);
+ fpcr_ = SimSystemRegister::DefaultValueFor(FPCR);
+
+ // Reset registers to 0.
+ pc_ = nullptr;
+ pc_modified_ = false;
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ set_xreg(i, 0xbadbeef);
+ }
+ // Set FP registers to a value that is a NaN in both 32-bit and 64-bit FP.
+ uint64_t nan_bits = UINT64_C(0x7ff0dead7f8beef1);
+ VIXL_ASSERT(IsSignallingNaN(rawbits_to_double(nan_bits & kDRegMask)));
+ VIXL_ASSERT(IsSignallingNaN(rawbits_to_float(nan_bits & kSRegMask)));
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ set_dreg_bits(i, nan_bits);
+ }
+ // Returning to address 0 exits the Simulator.
+ set_lr(kEndOfSimAddress);
+ set_resume_pc(nullptr);
+}
+
+
+void Simulator::init(Decoder* decoder, FILE* stream) {
+ // Ensure that shift operations act as the simulator expects.
+ VIXL_ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
+ VIXL_ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF);
+
+ instruction_stats_ = false;
+
+ // Set up the decoder.
+ decoder_ = decoder;
+ decoder_->AppendVisitor(this);
+
+ stream_ = stream;
+ print_disasm_ = js_new<PrintDisassembler>(stream_);
+ if (!print_disasm_) {
+ oom_ = true;
+ return;
+ }
+ set_coloured_trace(false);
+ trace_parameters_ = LOG_NONE;
+
+ ResetState();
+
+ // Allocate and set up the simulator stack.
+ stack_ = (byte*)js_malloc(stack_size_);
+ if (!stack_) {
+ oom_ = true;
+ return;
+ }
+ stack_limit_ = stack_ + stack_protection_size_;
+ // Configure the starting stack pointer.
+ // - Find the top of the stack.
+ byte * tos = stack_ + stack_size_;
+ // - There's a protection region at both ends of the stack.
+ tos -= stack_protection_size_;
+ // - The stack pointer must be 16-byte aligned.
+ tos = AlignDown(tos, 16);
+ set_sp(tos);
+
+ // Set the sample period to 10, as the VIXL examples and tests are short.
+ instrumentation_ = js_new<Instrument>("vixl_stats.csv", 10);
+ if (!instrumentation_) {
+ oom_ = true;
+ return;
+ }
+
+ // Print a warning about exclusive-access instructions, but only the first
+ // time they are encountered. This warning can be silenced using
+ // SilenceExclusiveAccessWarning().
+ print_exclusive_access_warning_ = true;
+
+ redirection_ = nullptr;
+}
+
+
+Simulator* Simulator::Current() {
+ return js::TlsPerThreadData.get()->simulator();
+}
+
+
+Simulator* Simulator::Create(JSContext* cx) {
+ Decoder *decoder = js_new<vixl::Decoder>();
+ if (!decoder)
+ return nullptr;
+
+ // FIXME: This just leaks the Decoder object for now, which is probably OK.
+ // FIXME: We should free it at some point.
+ // FIXME: Note that it can't be stored in the SimulatorRuntime due to lifetime conflicts.
+ Simulator *sim;
+ if (getenv("USE_DEBUGGER") != nullptr)
+ sim = js_new<Debugger>(decoder, stdout);
+ else
+ sim = js_new<Simulator>(decoder, stdout);
+
+ // Check if Simulator:init ran out of memory.
+ if (sim && sim->oom()) {
+ js_delete(sim);
+ return nullptr;
+ }
+
+ return sim;
+}
+
+
+void Simulator::Destroy(Simulator* sim) {
+ js_delete(sim);
+}
+
+
+void Simulator::ExecuteInstruction() {
+ // The program counter should always be aligned.
+ VIXL_ASSERT(IsWordAligned(pc_));
+ decoder_->Decode(pc_);
+ const Instruction* rpc = resume_pc_;
+ increment_pc();
+
+ if (MOZ_UNLIKELY(rpc)) {
+ JSRuntime::innermostWasmActivation()->setResumePC((void*)pc());
+ set_pc(rpc);
+ // Just calling set_pc turns the pc_modified_ flag on, which means it doesn't
+ // auto-step after executing the next instruction. Force that to off so it
+ // will auto-step after executing the first instruction of the handler.
+ pc_modified_ = false;
+ resume_pc_ = nullptr;
+ }
+}
+
+
+uintptr_t Simulator::stackLimit() const {
+ return reinterpret_cast<uintptr_t>(stack_limit_);
+}
+
+
+uintptr_t* Simulator::addressOfStackLimit() {
+ return (uintptr_t*)&stack_limit_;
+}
+
+
+bool Simulator::overRecursed(uintptr_t newsp) const {
+ if (newsp)
+ newsp = xreg(31, Reg31IsStackPointer);
+ return newsp <= stackLimit();
+}
+
+
+bool Simulator::overRecursedWithExtra(uint32_t extra) const {
+ uintptr_t newsp = xreg(31, Reg31IsStackPointer) - extra;
+ return newsp <= stackLimit();
+}
+
+
+void Simulator::set_resume_pc(void* new_resume_pc) {
+ resume_pc_ = AddressUntag(reinterpret_cast<Instruction*>(new_resume_pc));
+}
+
+
+int64_t Simulator::call(uint8_t* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+
+ // First eight arguments passed in registers.
+ VIXL_ASSERT(argument_count <= 8);
+ // This code should use the type of the called function
+ // (with templates, like the callVM machinery), but since the
+ // number of called functions is miniscule, their types have been
+ // divined from the number of arguments.
+ if (argument_count == 8) {
+ // EnterJitData::jitcode.
+ set_xreg(0, va_arg(parameters, int64_t));
+ // EnterJitData::maxArgc.
+ set_xreg(1, va_arg(parameters, unsigned));
+ // EnterJitData::maxArgv.
+ set_xreg(2, va_arg(parameters, int64_t));
+ // EnterJitData::osrFrame.
+ set_xreg(3, va_arg(parameters, int64_t));
+ // EnterJitData::calleeToken.
+ set_xreg(4, va_arg(parameters, int64_t));
+ // EnterJitData::scopeChain.
+ set_xreg(5, va_arg(parameters, int64_t));
+ // EnterJitData::osrNumStackValues.
+ set_xreg(6, va_arg(parameters, unsigned));
+ // Address of EnterJitData::result.
+ set_xreg(7, va_arg(parameters, int64_t));
+ } else if (argument_count == 2) {
+ // EntryArg* args
+ set_xreg(0, va_arg(parameters, int64_t));
+ // uint8_t* GlobalData
+ set_xreg(1, va_arg(parameters, int64_t));
+ } else if (argument_count == 1) { // irregexp
+ // InputOutputData& data
+ set_xreg(0, va_arg(parameters, int64_t));
+ } else {
+ MOZ_CRASH("Unknown number of arguments");
+ }
+
+ va_end(parameters);
+
+ // Call must transition back to native code on exit.
+ VIXL_ASSERT(xreg(30) == int64_t(kEndOfSimAddress));
+
+ // Execute the simulation.
+ DebugOnly<int64_t> entryStack = xreg(31, Reg31IsStackPointer);
+ RunFrom((Instruction*)entry);
+ DebugOnly<int64_t> exitStack = xreg(31, Reg31IsStackPointer);
+ VIXL_ASSERT(entryStack == exitStack);
+
+ int64_t result = xreg(0);
+ if (getenv("USE_DEBUGGER"))
+ printf("LEAVE\n");
+ return result;
+}
+
+
+// Protects the icache and redirection properties of the simulator.
+class AutoLockSimulatorCache : public js::LockGuard<js::Mutex>
+{
+ friend class Simulator;
+ using Base = js::LockGuard<js::Mutex>;
+
+ public:
+ explicit AutoLockSimulatorCache(Simulator* sim)
+ : Base(sim->lock_)
+ {
+ }
+};
+
+
+// When the generated code calls a VM function (masm.callWithABI) we need to
+// call that function instead of trying to execute it with the simulator
+// (because it's x64 code instead of AArch64 code). We do that by redirecting the VM
+// call to a svc (Supervisor Call) instruction that is handled by the
+// simulator. We write the original destination of the jump just at a known
+// offset from the svc instruction so the simulator knows what to call.
+class Redirection
+{
+ friend class Simulator;
+
+ Redirection(void* nativeFunction, ABIFunctionType type, Simulator* sim)
+ : nativeFunction_(nativeFunction),
+ type_(type),
+ next_(nullptr)
+ {
+ next_ = sim->redirection();
+ // TODO: Flush ICache?
+ sim->setRedirection(this);
+
+ Instruction* instr = (Instruction*)(&svcInstruction_);
+ vixl::Assembler::svc(instr, kCallRtRedirected);
+ }
+
+ public:
+ void* addressOfSvcInstruction() { return &svcInstruction_; }
+ void* nativeFunction() const { return nativeFunction_; }
+ ABIFunctionType type() const { return type_; }
+
+ static Redirection* Get(void* nativeFunction, ABIFunctionType type) {
+ Simulator* sim = Simulator::Current();
+ AutoLockSimulatorCache alsr(sim);
+
+ // TODO: Store srt_ in the simulator for this assertion.
+ // VIXL_ASSERT_IF(pt->simulator(), pt->simulator()->srt_ == srt);
+
+ Redirection* current = sim->redirection();
+ for (; current != nullptr; current = current->next_) {
+ if (current->nativeFunction_ == nativeFunction) {
+ VIXL_ASSERT(current->type() == type);
+ return current;
+ }
+ }
+
+ js::AutoEnterOOMUnsafeRegion oomUnsafe;
+ Redirection* redir = (Redirection*)js_malloc(sizeof(Redirection));
+ if (!redir)
+ oomUnsafe.crash("Simulator redirection");
+ new(redir) Redirection(nativeFunction, type, sim);
+ return redir;
+ }
+
+ static const Redirection* FromSvcInstruction(const Instruction* svcInstruction) {
+ const uint8_t* addrOfSvc = reinterpret_cast<const uint8_t*>(svcInstruction);
+ const uint8_t* addrOfRedirection = addrOfSvc - offsetof(Redirection, svcInstruction_);
+ return reinterpret_cast<const Redirection*>(addrOfRedirection);
+ }
+
+ private:
+ void* nativeFunction_;
+ uint32_t svcInstruction_;
+ ABIFunctionType type_;
+ Redirection* next_;
+};
+
+
+void Simulator::setRedirection(Redirection* redirection) {
+ redirection_ = redirection;
+}
+
+
+Redirection* Simulator::redirection() const {
+ return redirection_;
+}
+
+
+void* Simulator::RedirectNativeFunction(void* nativeFunction, ABIFunctionType type) {
+ Redirection* redirection = Redirection::Get(nativeFunction, type);
+ return redirection->addressOfSvcInstruction();
+}
+
+
+void Simulator::VisitException(const Instruction* instr) {
+ switch (instr->Mask(ExceptionMask)) {
+ case BRK: {
+ int lowbit = ImmException_offset;
+ int highbit = ImmException_offset + ImmException_width - 1;
+ HostBreakpoint(instr->Bits(highbit, lowbit));
+ break;
+ }
+ case HLT:
+ switch (instr->ImmException()) {
+ case kUnreachableOpcode:
+ DoUnreachable(instr);
+ return;
+ case kTraceOpcode:
+ DoTrace(instr);
+ return;
+ case kLogOpcode:
+ DoLog(instr);
+ return;
+ case kPrintfOpcode:
+ DoPrintf(instr);
+ return;
+ default:
+ HostBreakpoint();
+ return;
+ }
+ case SVC:
+ // The SVC instruction is hijacked by the JIT as a pseudo-instruction
+ // causing the Simulator to execute host-native code for callWithABI.
+ switch (instr->ImmException()) {
+ case kCallRtRedirected:
+ VisitCallRedirection(instr);
+ return;
+ case kMarkStackPointer:
+ spStack_.append(xreg(31, Reg31IsStackPointer));
+ return;
+ case kCheckStackPointer: {
+ int64_t current = xreg(31, Reg31IsStackPointer);
+ int64_t expected = spStack_.popCopy();
+ VIXL_ASSERT(current == expected);
+ return;
+ }
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::setGPR32Result(int32_t result) {
+ set_wreg(0, result);
+}
+
+
+void Simulator::setGPR64Result(int64_t result) {
+ set_xreg(0, result);
+}
+
+
+void Simulator::setFP32Result(float result) {
+ set_sreg(0, result);
+}
+
+
+void Simulator::setFP64Result(double result) {
+ set_dreg(0, result);
+}
+
+
+typedef int64_t (*Prototype_General0)();
+typedef int64_t (*Prototype_General1)(int64_t arg0);
+typedef int64_t (*Prototype_General2)(int64_t arg0, int64_t arg1);
+typedef int64_t (*Prototype_General3)(int64_t arg0, int64_t arg1, int64_t arg2);
+typedef int64_t (*Prototype_General4)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3);
+typedef int64_t (*Prototype_General5)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
+ int64_t arg4);
+typedef int64_t (*Prototype_General6)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5);
+typedef int64_t (*Prototype_General7)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5, int64_t arg6);
+typedef int64_t (*Prototype_General8)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5, int64_t arg6, int64_t arg7);
+
+typedef int64_t (*Prototype_Int_Double)(double arg0);
+typedef int64_t (*Prototype_Int_IntDouble)(int32_t arg0, double arg1);
+typedef int64_t (*Prototype_Int_DoubleIntInt)(double arg0, uint64_t arg1, uint64_t arg2);
+typedef int64_t (*Prototype_Int_IntDoubleIntInt)(uint64_t arg0, double arg1,
+ uint64_t arg2, uint64_t arg3);
+
+typedef float (*Prototype_Float32_Float32)(float arg0);
+
+typedef double (*Prototype_Double_None)();
+typedef double (*Prototype_Double_Double)(double arg0);
+typedef double (*Prototype_Double_Int)(int32_t arg0);
+typedef double (*Prototype_Double_DoubleInt)(double arg0, int64_t arg1);
+typedef double (*Prototype_Double_IntDouble)(int64_t arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1, double arg2);
+typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0, double arg1,
+ double arg2, double arg3);
+
+
+// Simulator support for callWithABI().
+void
+Simulator::VisitCallRedirection(const Instruction* instr)
+{
+ VIXL_ASSERT(instr->Mask(ExceptionMask) == SVC);
+ VIXL_ASSERT(instr->ImmException() == kCallRtRedirected);
+
+ const Redirection* redir = Redirection::FromSvcInstruction(instr);
+ uintptr_t nativeFn = reinterpret_cast<uintptr_t>(redir->nativeFunction());
+
+ // Stack must be aligned prior to the call.
+ // FIXME: It's actually our job to perform the alignment...
+ //VIXL_ASSERT((xreg(31, Reg31IsStackPointer) & (StackAlignment - 1)) == 0);
+
+ // Used to assert that callee-saved registers are preserved.
+ DebugOnly<int64_t> x19 = xreg(19);
+ DebugOnly<int64_t> x20 = xreg(20);
+ DebugOnly<int64_t> x21 = xreg(21);
+ DebugOnly<int64_t> x22 = xreg(22);
+ DebugOnly<int64_t> x23 = xreg(23);
+ DebugOnly<int64_t> x24 = xreg(24);
+ DebugOnly<int64_t> x25 = xreg(25);
+ DebugOnly<int64_t> x26 = xreg(26);
+ DebugOnly<int64_t> x27 = xreg(27);
+ DebugOnly<int64_t> x28 = xreg(28);
+ DebugOnly<int64_t> x29 = xreg(29);
+ DebugOnly<int64_t> savedSP = xreg(31, Reg31IsStackPointer);
+
+ // Remember LR for returning from the "call".
+ int64_t savedLR = xreg(30);
+
+ // Allow recursive Simulator calls: returning from the call must stop
+ // the simulation and transition back to native Simulator code.
+ set_xreg(30, int64_t(kEndOfSimAddress));
+
+ // Store argument register values in local variables for ease of use below.
+ int64_t x0 = xreg(0);
+ int64_t x1 = xreg(1);
+ int64_t x2 = xreg(2);
+ int64_t x3 = xreg(3);
+ int64_t x4 = xreg(4);
+ int64_t x5 = xreg(5);
+ int64_t x6 = xreg(6);
+ int64_t x7 = xreg(7);
+ double d0 = dreg(0);
+ double d1 = dreg(1);
+ double d2 = dreg(2);
+ double d3 = dreg(3);
+ float s0 = sreg(0);
+
+ // Dispatch the call and set the return value.
+ switch (redir->type()) {
+ // Cases with int64_t return type.
+ case js::jit::Args_General0: {
+ int64_t ret = reinterpret_cast<Prototype_General0>(nativeFn)();
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General1: {
+ int64_t ret = reinterpret_cast<Prototype_General1>(nativeFn)(x0);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General2: {
+ int64_t ret = reinterpret_cast<Prototype_General2>(nativeFn)(x0, x1);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General3: {
+ int64_t ret = reinterpret_cast<Prototype_General3>(nativeFn)(x0, x1, x2);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General4: {
+ int64_t ret = reinterpret_cast<Prototype_General4>(nativeFn)(x0, x1, x2, x3);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General5: {
+ int64_t ret = reinterpret_cast<Prototype_General5>(nativeFn)(x0, x1, x2, x3, x4);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General6: {
+ int64_t ret = reinterpret_cast<Prototype_General6>(nativeFn)(x0, x1, x2, x3, x4, x5);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General7: {
+ int64_t ret = reinterpret_cast<Prototype_General7>(nativeFn)(x0, x1, x2, x3, x4, x5, x6);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General8: {
+ int64_t ret = reinterpret_cast<Prototype_General8>(nativeFn)(x0, x1, x2, x3, x4, x5, x6, x7);
+ setGPR64Result(ret);
+ break;
+ }
+
+ // Cases with GPR return type. This can be int32 or int64, but int64 is a safer assumption.
+ case js::jit::Args_Int_Double: {
+ int64_t ret = reinterpret_cast<Prototype_Int_Double>(nativeFn)(d0);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_Int_IntDouble: {
+ int64_t ret = reinterpret_cast<Prototype_Int_IntDouble>(nativeFn)(x0, d0);
+ setGPR64Result(ret);
+ break;
+ }
+
+ case js::jit::Args_Int_IntDoubleIntInt: {
+ int64_t ret = reinterpret_cast<Prototype_Int_IntDoubleIntInt>(nativeFn)(x0, d0, x1, x2);
+ setGPR64Result(ret);
+ break;
+ }
+
+ case js::jit::Args_Int_DoubleIntInt: {
+ int64_t ret = reinterpret_cast<Prototype_Int_DoubleIntInt>(nativeFn)(d0, x0, x1);
+ setGPR64Result(ret);
+ break;
+ }
+
+ // Cases with float return type.
+ case js::jit::Args_Float32_Float32: {
+ float ret = reinterpret_cast<Prototype_Float32_Float32>(nativeFn)(s0);
+ setFP32Result(ret);
+ break;
+ }
+
+ // Cases with double return type.
+ case js::jit::Args_Double_None: {
+ double ret = reinterpret_cast<Prototype_Double_None>(nativeFn)();
+ setFP64Result(ret);
+ break;
+ }
+ case js::jit::Args_Double_Double: {
+ double ret = reinterpret_cast<Prototype_Double_Double>(nativeFn)(d0);
+ setFP64Result(ret);
+ break;
+ }
+ case js::jit::Args_Double_Int: {
+ double ret = reinterpret_cast<Prototype_Double_Int>(nativeFn)(x0);
+ setFP64Result(ret);
+ break;
+ }
+ case js::jit::Args_Double_DoubleInt: {
+ double ret = reinterpret_cast<Prototype_Double_DoubleInt>(nativeFn)(d0, x0);
+ setFP64Result(ret);
+ break;
+ }
+ case js::jit::Args_Double_DoubleDouble: {
+ double ret = reinterpret_cast<Prototype_Double_DoubleDouble>(nativeFn)(d0, d1);
+ setFP64Result(ret);
+ break;
+ }
+ case js::jit::Args_Double_DoubleDoubleDouble: {
+ double ret = reinterpret_cast<Prototype_Double_DoubleDoubleDouble>(nativeFn)(d0, d1, d2);
+ setFP64Result(ret);
+ break;
+ }
+ case js::jit::Args_Double_DoubleDoubleDoubleDouble: {
+ double ret = reinterpret_cast<Prototype_Double_DoubleDoubleDoubleDouble>(nativeFn)(d0, d1, d2, d3);
+ setFP64Result(ret);
+ break;
+ }
+
+ case js::jit::Args_Double_IntDouble: {
+ double ret = reinterpret_cast<Prototype_Double_IntDouble>(nativeFn)(x0, d0);
+ setFP64Result(ret);
+ break;
+ }
+
+ default:
+ MOZ_CRASH("Unknown function type.");
+ }
+
+ // TODO: Nuke the volatile registers.
+
+ // Assert that callee-saved registers are unchanged.
+ VIXL_ASSERT(xreg(19) == x19);
+ VIXL_ASSERT(xreg(20) == x20);
+ VIXL_ASSERT(xreg(21) == x21);
+ VIXL_ASSERT(xreg(22) == x22);
+ VIXL_ASSERT(xreg(23) == x23);
+ VIXL_ASSERT(xreg(24) == x24);
+ VIXL_ASSERT(xreg(25) == x25);
+ VIXL_ASSERT(xreg(26) == x26);
+ VIXL_ASSERT(xreg(27) == x27);
+ VIXL_ASSERT(xreg(28) == x28);
+ VIXL_ASSERT(xreg(29) == x29);
+
+ // Assert that the stack is unchanged.
+ VIXL_ASSERT(savedSP == xreg(31, Reg31IsStackPointer));
+
+ // Simulate a return.
+ set_lr(savedLR);
+ set_pc((Instruction*)savedLR);
+ if (getenv("USE_DEBUGGER"))
+ printf("SVCRET\n");
+}
+
+
+} // namespace vixl
+
+
+vixl::Simulator* js::PerThreadData::simulator() const {
+ return runtime_->simulator();
+}
+
+
+vixl::Simulator* JSRuntime::simulator() const {
+ return simulator_;
+}
+
+
+uintptr_t* JSRuntime::addressOfSimulatorStackLimit() {
+ return simulator_->addressOfStackLimit();
+}
diff --git a/js/src/jit/arm64/vixl/Platform-vixl.h b/js/src/jit/arm64/vixl/Platform-vixl.h
new file mode 100644
index 000000000..df0420867
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Platform-vixl.h
@@ -0,0 +1,39 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_PLATFORM_H
+#define VIXL_PLATFORM_H
+
+// Define platform specific functionalities.
+#include <signal.h>
+
+#include "js-config.h"
+
+namespace vixl {
+inline void HostBreakpoint(int64_t code = 0) { raise(SIGINT); }
+} // namespace vixl
+
+#endif
diff --git a/js/src/jit/arm64/vixl/Simulator-Constants-vixl.h b/js/src/jit/arm64/vixl/Simulator-Constants-vixl.h
new file mode 100644
index 000000000..9a66ab023
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Simulator-Constants-vixl.h
@@ -0,0 +1,141 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_SIMULATOR_CONSTANTS_A64_H_
+#define VIXL_A64_SIMULATOR_CONSTANTS_A64_H_
+
+namespace vixl {
+
+// Debug instructions.
+//
+// VIXL's macro-assembler and simulator support a few pseudo instructions to
+// make debugging easier. These pseudo instructions do not exist on real
+// hardware.
+//
+// TODO: Also consider allowing these pseudo-instructions to be disabled in the
+// simulator, so that users can check that the input is a valid native code.
+// (This isn't possible in all cases. Printf won't work, for example.)
+//
+// Each debug pseudo instruction is represented by a HLT instruction. The HLT
+// immediate field is used to identify the type of debug pseudo instruction.
+
+enum DebugHltOpcodes {
+ kUnreachableOpcode = 0xdeb0,
+ kPrintfOpcode,
+ kTraceOpcode,
+ kLogOpcode,
+ // Aliases.
+ kDebugHltFirstOpcode = kUnreachableOpcode,
+ kDebugHltLastOpcode = kLogOpcode
+};
+
+// Each pseudo instruction uses a custom encoding for additional arguments, as
+// described below.
+
+// Unreachable - kUnreachableOpcode
+//
+// Instruction which should never be executed. This is used as a guard in parts
+// of the code that should not be reachable, such as in data encoded inline in
+// the instructions.
+
+// Printf - kPrintfOpcode
+// - arg_count: The number of arguments.
+// - arg_pattern: A set of PrintfArgPattern values, packed into two-bit fields.
+//
+// Simulate a call to printf.
+//
+// Floating-point and integer arguments are passed in separate sets of registers
+// in AAPCS64 (even for varargs functions), so it is not possible to determine
+// the type of each argument without some information about the values that were
+// passed in. This information could be retrieved from the printf format string,
+// but the format string is not trivial to parse so we encode the relevant
+// information with the HLT instruction.
+//
+// Also, the following registers are populated (as if for a native A64 call):
+// x0: The format string
+// x1-x7: Optional arguments, if type == CPURegister::kRegister
+// d0-d7: Optional arguments, if type == CPURegister::kFPRegister
+const unsigned kPrintfArgCountOffset = 1 * kInstructionSize;
+const unsigned kPrintfArgPatternListOffset = 2 * kInstructionSize;
+const unsigned kPrintfLength = 3 * kInstructionSize;
+
+const unsigned kPrintfMaxArgCount = 4;
+
+// The argument pattern is a set of two-bit-fields, each with one of the
+// following values:
+enum PrintfArgPattern {
+ kPrintfArgW = 1,
+ kPrintfArgX = 2,
+ // There is no kPrintfArgS because floats are always converted to doubles in C
+ // varargs calls.
+ kPrintfArgD = 3
+};
+static const unsigned kPrintfArgPatternBits = 2;
+
+// Trace - kTraceOpcode
+// - parameter: TraceParameter stored as a uint32_t
+// - command: TraceCommand stored as a uint32_t
+//
+// Allow for trace management in the generated code. This enables or disables
+// automatic tracing of the specified information for every simulated
+// instruction.
+const unsigned kTraceParamsOffset = 1 * kInstructionSize;
+const unsigned kTraceCommandOffset = 2 * kInstructionSize;
+const unsigned kTraceLength = 3 * kInstructionSize;
+
+// Trace parameters.
+enum TraceParameters {
+ LOG_DISASM = 1 << 0, // Log disassembly.
+ LOG_REGS = 1 << 1, // Log general purpose registers.
+ LOG_VREGS = 1 << 2, // Log NEON and floating-point registers.
+ LOG_SYSREGS = 1 << 3, // Log the flags and system registers.
+ LOG_WRITE = 1 << 4, // Log writes to memory.
+
+ LOG_NONE = 0,
+ LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYSREGS,
+ LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE
+};
+
+// Trace commands.
+enum TraceCommand {
+ TRACE_ENABLE = 1,
+ TRACE_DISABLE = 2
+};
+
+// Log - kLogOpcode
+// - parameter: TraceParameter stored as a uint32_t
+//
+// Print the specified information once. This mechanism is separate from Trace.
+// In particular, _all_ of the specified registers are printed, rather than just
+// the registers that the instruction writes.
+//
+// Any combination of the TraceParameters values can be used, except that
+// LOG_DISASM is not supported for Log.
+const unsigned kLogParamsOffset = 1 * kInstructionSize;
+const unsigned kLogLength = 2 * kInstructionSize;
+} // namespace vixl
+
+#endif // VIXL_A64_SIMULATOR_CONSTANTS_A64_H_
diff --git a/js/src/jit/arm64/vixl/Simulator-vixl.cpp b/js/src/jit/arm64/vixl/Simulator-vixl.cpp
new file mode 100644
index 000000000..9d95a2bb4
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Simulator-vixl.cpp
@@ -0,0 +1,3949 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "js-config.h"
+
+#ifdef JS_SIMULATOR_ARM64
+
+#include "jit/arm64/vixl/Simulator-vixl.h"
+
+#include <cmath>
+#include <string.h>
+
+namespace vixl {
+
+const Instruction* Simulator::kEndOfSimAddress = NULL;
+
+void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
+ int width = msb - lsb + 1;
+ VIXL_ASSERT(is_uintn(width, bits) || is_intn(width, bits));
+
+ bits <<= lsb;
+ uint32_t mask = ((1 << width) - 1) << lsb;
+ VIXL_ASSERT((mask & write_ignore_mask_) == 0);
+
+ value_ = (value_ & ~mask) | (bits & mask);
+}
+
+
+SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
+ switch (id) {
+ case NZCV:
+ return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask);
+ case FPCR:
+ return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask);
+ default:
+ VIXL_UNREACHABLE();
+ return SimSystemRegister();
+ }
+}
+
+
+void Simulator::Run() {
+ pc_modified_ = false;
+ while (pc_ != kEndOfSimAddress) {
+ ExecuteInstruction();
+ LogAllWrittenRegisters();
+ }
+}
+
+
+void Simulator::RunFrom(const Instruction* first) {
+ set_pc(first);
+ Run();
+}
+
+
+const char* Simulator::xreg_names[] = {
+"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
+"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
+"x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
+"x24", "x25", "x26", "x27", "x28", "x29", "lr", "xzr", "sp"};
+
+const char* Simulator::wreg_names[] = {
+"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7",
+"w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15",
+"w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23",
+"w24", "w25", "w26", "w27", "w28", "w29", "w30", "wzr", "wsp"};
+
+const char* Simulator::sreg_names[] = {
+"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+"s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
+"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
+"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"};
+
+const char* Simulator::dreg_names[] = {
+"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+"d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
+
+const char* Simulator::vreg_names[] = {
+"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+"v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
+"v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
+"v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
+
+
+
+const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
+ VIXL_ASSERT(code < kNumberOfRegisters);
+ // If the code represents the stack pointer, index the name after zr.
+ if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
+ code = kZeroRegCode + 1;
+ }
+ return wreg_names[code];
+}
+
+
+const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
+ VIXL_ASSERT(code < kNumberOfRegisters);
+ // If the code represents the stack pointer, index the name after zr.
+ if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
+ code = kZeroRegCode + 1;
+ }
+ return xreg_names[code];
+}
+
+
+const char* Simulator::SRegNameForCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfFPRegisters);
+ return sreg_names[code];
+}
+
+
+const char* Simulator::DRegNameForCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfFPRegisters);
+ return dreg_names[code];
+}
+
+
+const char* Simulator::VRegNameForCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return vreg_names[code];
+}
+
+
+#define COLOUR(colour_code) "\033[0;" colour_code "m"
+#define COLOUR_BOLD(colour_code) "\033[1;" colour_code "m"
+#define NORMAL ""
+#define GREY "30"
+#define RED "31"
+#define GREEN "32"
+#define YELLOW "33"
+#define BLUE "34"
+#define MAGENTA "35"
+#define CYAN "36"
+#define WHITE "37"
+void Simulator::set_coloured_trace(bool value) {
+ coloured_trace_ = value;
+
+ clr_normal = value ? COLOUR(NORMAL) : "";
+ clr_flag_name = value ? COLOUR_BOLD(WHITE) : "";
+ clr_flag_value = value ? COLOUR(NORMAL) : "";
+ clr_reg_name = value ? COLOUR_BOLD(CYAN) : "";
+ clr_reg_value = value ? COLOUR(CYAN) : "";
+ clr_vreg_name = value ? COLOUR_BOLD(MAGENTA) : "";
+ clr_vreg_value = value ? COLOUR(MAGENTA) : "";
+ clr_memory_address = value ? COLOUR_BOLD(BLUE) : "";
+ clr_warning = value ? COLOUR_BOLD(YELLOW) : "";
+ clr_warning_message = value ? COLOUR(YELLOW) : "";
+ clr_printf = value ? COLOUR(GREEN) : "";
+}
+#undef COLOUR
+#undef COLOUR_BOLD
+#undef NORMAL
+#undef GREY
+#undef RED
+#undef GREEN
+#undef YELLOW
+#undef BLUE
+#undef MAGENTA
+#undef CYAN
+#undef WHITE
+
+
+void Simulator::set_trace_parameters(int parameters) {
+ bool disasm_before = trace_parameters_ & LOG_DISASM;
+ trace_parameters_ = parameters;
+ bool disasm_after = trace_parameters_ & LOG_DISASM;
+
+ if (disasm_before != disasm_after) {
+ if (disasm_after) {
+ decoder_->InsertVisitorBefore(print_disasm_, this);
+ } else {
+ decoder_->RemoveVisitor(print_disasm_);
+ }
+ }
+}
+
+
+void Simulator::set_instruction_stats(bool value) {
+ if (value != instruction_stats_) {
+ if (value) {
+ decoder_->AppendVisitor(instrumentation_);
+ } else {
+ decoder_->RemoveVisitor(instrumentation_);
+ }
+ instruction_stats_ = value;
+ }
+}
+
+// Helpers ---------------------------------------------------------------------
+uint64_t Simulator::AddWithCarry(unsigned reg_size,
+ bool set_flags,
+ uint64_t left,
+ uint64_t right,
+ int carry_in) {
+ VIXL_ASSERT((carry_in == 0) || (carry_in == 1));
+ VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
+
+ uint64_t max_uint = (reg_size == kWRegSize) ? kWMaxUInt : kXMaxUInt;
+ uint64_t reg_mask = (reg_size == kWRegSize) ? kWRegMask : kXRegMask;
+ uint64_t sign_mask = (reg_size == kWRegSize) ? kWSignMask : kXSignMask;
+
+ left &= reg_mask;
+ right &= reg_mask;
+ uint64_t result = (left + right + carry_in) & reg_mask;
+
+ if (set_flags) {
+ nzcv().SetN(CalcNFlag(result, reg_size));
+ nzcv().SetZ(CalcZFlag(result));
+
+ // Compute the C flag by comparing the result to the max unsigned integer.
+ uint64_t max_uint_2op = max_uint - carry_in;
+ bool C = (left > max_uint_2op) || ((max_uint_2op - left) < right);
+ nzcv().SetC(C ? 1 : 0);
+
+ // Overflow iff the sign bit is the same for the two inputs and different
+ // for the result.
+ uint64_t left_sign = left & sign_mask;
+ uint64_t right_sign = right & sign_mask;
+ uint64_t result_sign = result & sign_mask;
+ bool V = (left_sign == right_sign) && (left_sign != result_sign);
+ nzcv().SetV(V ? 1 : 0);
+
+ LogSystemRegister(NZCV);
+ }
+ return result;
+}
+
+
+int64_t Simulator::ShiftOperand(unsigned reg_size,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount) {
+ if (amount == 0) {
+ return value;
+ }
+ int64_t mask = reg_size == kXRegSize ? kXRegMask : kWRegMask;
+ switch (shift_type) {
+ case LSL:
+ return (value << amount) & mask;
+ case LSR:
+ return static_cast<uint64_t>(value) >> amount;
+ case ASR: {
+ // Shift used to restore the sign.
+ unsigned s_shift = kXRegSize - reg_size;
+ // Value with its sign restored.
+ int64_t s_value = (value << s_shift) >> s_shift;
+ return (s_value >> amount) & mask;
+ }
+ case ROR: {
+ if (reg_size == kWRegSize) {
+ value &= kWRegMask;
+ }
+ return (static_cast<uint64_t>(value) >> amount) |
+ ((value & ((INT64_C(1) << amount) - 1)) <<
+ (reg_size - amount));
+ }
+ default:
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+}
+
+
+int64_t Simulator::ExtendValue(unsigned reg_size,
+ int64_t value,
+ Extend extend_type,
+ unsigned left_shift) {
+ switch (extend_type) {
+ case UXTB:
+ value &= kByteMask;
+ break;
+ case UXTH:
+ value &= kHalfWordMask;
+ break;
+ case UXTW:
+ value &= kWordMask;
+ break;
+ case SXTB:
+ value = (value << 56) >> 56;
+ break;
+ case SXTH:
+ value = (value << 48) >> 48;
+ break;
+ case SXTW:
+ value = (value << 32) >> 32;
+ break;
+ case UXTX:
+ case SXTX:
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ int64_t mask = (reg_size == kXRegSize) ? kXRegMask : kWRegMask;
+ return (value << left_shift) & mask;
+}
+
+
+void Simulator::FPCompare(double val0, double val1, FPTrapFlags trap) {
+ AssertSupportedFPCR();
+
+ // TODO: This assumes that the C++ implementation handles comparisons in the
+ // way that we expect (as per AssertSupportedFPCR()).
+ bool process_exception = false;
+ if ((std::isnan(val0) != 0) || (std::isnan(val1) != 0)) {
+ nzcv().SetRawValue(FPUnorderedFlag);
+ if (IsSignallingNaN(val0) || IsSignallingNaN(val1) ||
+ (trap == EnableTrap)) {
+ process_exception = true;
+ }
+ } else if (val0 < val1) {
+ nzcv().SetRawValue(FPLessThanFlag);
+ } else if (val0 > val1) {
+ nzcv().SetRawValue(FPGreaterThanFlag);
+ } else if (val0 == val1) {
+ nzcv().SetRawValue(FPEqualFlag);
+ } else {
+ VIXL_UNREACHABLE();
+ }
+ LogSystemRegister(NZCV);
+ if (process_exception) FPProcessException();
+}
+
+
+Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatForSize(
+ unsigned reg_size, unsigned lane_size) {
+ VIXL_ASSERT(reg_size >= lane_size);
+
+ uint32_t format = 0;
+ if (reg_size != lane_size) {
+ switch (reg_size) {
+ default: VIXL_UNREACHABLE(); break;
+ case kQRegSizeInBytes: format = kPrintRegAsQVector; break;
+ case kDRegSizeInBytes: format = kPrintRegAsDVector; break;
+ }
+ }
+
+ switch (lane_size) {
+ default: VIXL_UNREACHABLE(); break;
+ case kQRegSizeInBytes: format |= kPrintReg1Q; break;
+ case kDRegSizeInBytes: format |= kPrintReg1D; break;
+ case kSRegSizeInBytes: format |= kPrintReg1S; break;
+ case kHRegSizeInBytes: format |= kPrintReg1H; break;
+ case kBRegSizeInBytes: format |= kPrintReg1B; break;
+ }
+ // These sizes would be duplicate case labels.
+ VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes);
+ VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes);
+ VIXL_STATIC_ASSERT(kPrintXReg == kPrintReg1D);
+ VIXL_STATIC_ASSERT(kPrintWReg == kPrintReg1S);
+
+ return static_cast<PrintRegisterFormat>(format);
+}
+
+
+Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormat(
+ VectorFormat vform) {
+ switch (vform) {
+ default: VIXL_UNREACHABLE(); return kPrintReg16B;
+ case kFormat16B: return kPrintReg16B;
+ case kFormat8B: return kPrintReg8B;
+ case kFormat8H: return kPrintReg8H;
+ case kFormat4H: return kPrintReg4H;
+ case kFormat4S: return kPrintReg4S;
+ case kFormat2S: return kPrintReg2S;
+ case kFormat2D: return kPrintReg2D;
+ case kFormat1D: return kPrintReg1D;
+ }
+}
+
+
+void Simulator::PrintWrittenRegisters() {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if (registers_[i].WrittenSinceLastLog()) PrintRegister(i);
+ }
+}
+
+
+void Simulator::PrintWrittenVRegisters() {
+ for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
+ // At this point there is no type information, so print as a raw 1Q.
+ if (vregisters_[i].WrittenSinceLastLog()) PrintVRegister(i, kPrintReg1Q);
+ }
+}
+
+
+void Simulator::PrintSystemRegisters() {
+ PrintSystemRegister(NZCV);
+ PrintSystemRegister(FPCR);
+}
+
+
+void Simulator::PrintRegisters() {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ PrintRegister(i);
+ }
+}
+
+
+void Simulator::PrintVRegisters() {
+ for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
+ // At this point there is no type information, so print as a raw 1Q.
+ PrintVRegister(i, kPrintReg1Q);
+ }
+}
+
+
+// Print a register's name and raw value.
+//
+// Only the least-significant `size_in_bytes` bytes of the register are printed,
+// but the value is aligned as if the whole register had been printed.
+//
+// For typical register updates, size_in_bytes should be set to kXRegSizeInBytes
+// -- the default -- so that the whole register is printed. Other values of
+// size_in_bytes are intended for use when the register hasn't actually been
+// updated (such as in PrintWrite).
+//
+// No newline is printed. This allows the caller to print more details (such as
+// a memory access annotation).
+void Simulator::PrintRegisterRawHelper(unsigned code, Reg31Mode r31mode,
+ int size_in_bytes) {
+ // The template for all supported sizes.
+ // "# x{code}: 0xffeeddccbbaa9988"
+ // "# w{code}: 0xbbaa9988"
+ // "# w{code}<15:0>: 0x9988"
+ // "# w{code}<7:0>: 0x88"
+ unsigned padding_chars = (kXRegSizeInBytes - size_in_bytes) * 2;
+
+ const char * name = "";
+ const char * suffix = "";
+ switch (size_in_bytes) {
+ case kXRegSizeInBytes: name = XRegNameForCode(code, r31mode); break;
+ case kWRegSizeInBytes: name = WRegNameForCode(code, r31mode); break;
+ case 2:
+ name = WRegNameForCode(code, r31mode);
+ suffix = "<15:0>";
+ padding_chars -= strlen(suffix);
+ break;
+ case 1:
+ name = WRegNameForCode(code, r31mode);
+ suffix = "<7:0>";
+ padding_chars -= strlen(suffix);
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ fprintf(stream_, "# %s%5s%s: ", clr_reg_name, name, suffix);
+
+ // Print leading padding spaces.
+ VIXL_ASSERT(padding_chars < (kXRegSizeInBytes * 2));
+ for (unsigned i = 0; i < padding_chars; i++) {
+ putc(' ', stream_);
+ }
+
+ // Print the specified bits in hexadecimal format.
+ uint64_t bits = reg<uint64_t>(code, r31mode);
+ bits &= kXRegMask >> ((kXRegSizeInBytes - size_in_bytes) * 8);
+ VIXL_STATIC_ASSERT(sizeof(bits) == kXRegSizeInBytes);
+
+ int chars = size_in_bytes * 2;
+ fprintf(stream_, "%s0x%0*" PRIx64 "%s",
+ clr_reg_value, chars, bits, clr_normal);
+}
+
+
+void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) {
+ registers_[code].NotifyRegisterLogged();
+
+ // Don't print writes into xzr.
+ if ((code == kZeroRegCode) && (r31mode == Reg31IsZeroRegister)) {
+ return;
+ }
+
+ // The template for all x and w registers:
+ // "# x{code}: 0x{value}"
+ // "# w{code}: 0x{value}"
+
+ PrintRegisterRawHelper(code, r31mode);
+ fprintf(stream_, "\n");
+}
+
+
+// Print a register's name and raw value.
+//
+// The `bytes` and `lsb` arguments can be used to limit the bytes that are
+// printed. These arguments are intended for use in cases where register hasn't
+// actually been updated (such as in PrintVWrite).
+//
+// No newline is printed. This allows the caller to print more details (such as
+// a floating-point interpretation or a memory access annotation).
+void Simulator::PrintVRegisterRawHelper(unsigned code, int bytes, int lsb) {
+ // The template for vector types:
+ // "# v{code}: 0xffeeddccbbaa99887766554433221100".
+ // An example with bytes=4 and lsb=8:
+ // "# v{code}: 0xbbaa9988 ".
+ fprintf(stream_, "# %s%5s: %s",
+ clr_vreg_name, VRegNameForCode(code), clr_vreg_value);
+
+ int msb = lsb + bytes - 1;
+ int byte = kQRegSizeInBytes - 1;
+
+ // Print leading padding spaces. (Two spaces per byte.)
+ while (byte > msb) {
+ fprintf(stream_, " ");
+ byte--;
+ }
+
+ // Print the specified part of the value, byte by byte.
+ qreg_t rawbits = qreg(code);
+ fprintf(stream_, "0x");
+ while (byte >= lsb) {
+ fprintf(stream_, "%02x", rawbits.val[byte]);
+ byte--;
+ }
+
+ // Print trailing padding spaces.
+ while (byte >= 0) {
+ fprintf(stream_, " ");
+ byte--;
+ }
+ fprintf(stream_, "%s", clr_normal);
+}
+
+
+// Print each of the specified lanes of a register as a float or double value.
+//
+// The `lane_count` and `lslane` arguments can be used to limit the lanes that
+// are printed. These arguments are intended for use in cases where register
+// hasn't actually been updated (such as in PrintVWrite).
+//
+// No newline is printed. This allows the caller to print more details (such as
+// a memory access annotation).
+void Simulator::PrintVRegisterFPHelper(unsigned code,
+ unsigned lane_size_in_bytes,
+ int lane_count,
+ int rightmost_lane) {
+ VIXL_ASSERT((lane_size_in_bytes == kSRegSizeInBytes) ||
+ (lane_size_in_bytes == kDRegSizeInBytes));
+
+ unsigned msb = ((lane_count + rightmost_lane) * lane_size_in_bytes);
+ VIXL_ASSERT(msb <= kQRegSizeInBytes);
+
+ // For scalar types ((lane_count == 1) && (rightmost_lane == 0)), a register
+ // name is used:
+ // " (s{code}: {value})"
+ // " (d{code}: {value})"
+ // For vector types, "..." is used to represent one or more omitted lanes.
+ // " (..., {value}, {value}, ...)"
+ if ((lane_count == 1) && (rightmost_lane == 0)) {
+ const char * name =
+ (lane_size_in_bytes == kSRegSizeInBytes) ? SRegNameForCode(code)
+ : DRegNameForCode(code);
+ fprintf(stream_, " (%s%s: ", clr_vreg_name, name);
+ } else {
+ if (msb < (kQRegSizeInBytes - 1)) {
+ fprintf(stream_, " (..., ");
+ } else {
+ fprintf(stream_, " (");
+ }
+ }
+
+ // Print the list of values.
+ const char * separator = "";
+ int leftmost_lane = rightmost_lane + lane_count - 1;
+ for (int lane = leftmost_lane; lane >= rightmost_lane; lane--) {
+ double value =
+ (lane_size_in_bytes == kSRegSizeInBytes) ? vreg(code).Get<float>(lane)
+ : vreg(code).Get<double>(lane);
+ fprintf(stream_, "%s%s%#g%s", separator, clr_vreg_value, value, clr_normal);
+ separator = ", ";
+ }
+
+ if (rightmost_lane > 0) {
+ fprintf(stream_, ", ...");
+ }
+ fprintf(stream_, ")");
+}
+
+
+void Simulator::PrintVRegister(unsigned code, PrintRegisterFormat format) {
+ vregisters_[code].NotifyRegisterLogged();
+
+ int lane_size_log2 = format & kPrintRegLaneSizeMask;
+
+ int reg_size_log2;
+ if (format & kPrintRegAsQVector) {
+ reg_size_log2 = kQRegSizeInBytesLog2;
+ } else if (format & kPrintRegAsDVector) {
+ reg_size_log2 = kDRegSizeInBytesLog2;
+ } else {
+ // Scalar types.
+ reg_size_log2 = lane_size_log2;
+ }
+
+ int lane_count = 1 << (reg_size_log2 - lane_size_log2);
+ int lane_size = 1 << lane_size_log2;
+
+ // The template for vector types:
+ // "# v{code}: 0x{rawbits} (..., {value}, ...)".
+ // The template for scalar types:
+ // "# v{code}: 0x{rawbits} ({reg}:{value})".
+ // The values in parentheses after the bit representations are floating-point
+ // interpretations. They are displayed only if the kPrintVRegAsFP bit is set.
+
+ PrintVRegisterRawHelper(code);
+ if (format & kPrintRegAsFP) {
+ PrintVRegisterFPHelper(code, lane_size, lane_count);
+ }
+
+ fprintf(stream_, "\n");
+}
+
+
+void Simulator::PrintSystemRegister(SystemRegister id) {
+ switch (id) {
+ case NZCV:
+ fprintf(stream_, "# %sNZCV: %sN:%d Z:%d C:%d V:%d%s\n",
+ clr_flag_name, clr_flag_value,
+ nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(),
+ clr_normal);
+ break;
+ case FPCR: {
+ static const char * rmode[] = {
+ "0b00 (Round to Nearest)",
+ "0b01 (Round towards Plus Infinity)",
+ "0b10 (Round towards Minus Infinity)",
+ "0b11 (Round towards Zero)"
+ };
+ VIXL_ASSERT(fpcr().RMode() < (sizeof(rmode) / sizeof(rmode[0])));
+ fprintf(stream_,
+ "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
+ clr_flag_name, clr_flag_value,
+ fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
+ clr_normal);
+ break;
+ }
+ default:
+ VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::PrintRead(uintptr_t address,
+ unsigned reg_code,
+ PrintRegisterFormat format) {
+ registers_[reg_code].NotifyRegisterLogged();
+
+ USE(format);
+
+ // The template is "# {reg}: 0x{value} <- {address}".
+ PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister);
+ fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
+}
+
+
+void Simulator::PrintVRead(uintptr_t address,
+ unsigned reg_code,
+ PrintRegisterFormat format,
+ unsigned lane) {
+ vregisters_[reg_code].NotifyRegisterLogged();
+
+ // The template is "# v{code}: 0x{rawbits} <- address".
+ PrintVRegisterRawHelper(reg_code);
+ if (format & kPrintRegAsFP) {
+ PrintVRegisterFPHelper(reg_code, GetPrintRegLaneSizeInBytes(format),
+ GetPrintRegLaneCount(format), lane);
+ }
+ fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
+}
+
+
+void Simulator::PrintWrite(uintptr_t address,
+ unsigned reg_code,
+ PrintRegisterFormat format) {
+ VIXL_ASSERT(GetPrintRegLaneCount(format) == 1);
+
+ // The template is "# v{code}: 0x{value} -> {address}". To keep the trace tidy
+ // and readable, the value is aligned with the values in the register trace.
+ PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister,
+ GetPrintRegSizeInBytes(format));
+ fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
+}
+
+
+void Simulator::PrintVWrite(uintptr_t address,
+ unsigned reg_code,
+ PrintRegisterFormat format,
+ unsigned lane) {
+ // The templates:
+ // "# v{code}: 0x{rawbits} -> {address}"
+ // "# v{code}: 0x{rawbits} (..., {value}, ...) -> {address}".
+ // "# v{code}: 0x{rawbits} ({reg}:{value}) -> {address}"
+ // Because this trace doesn't represent a change to the source register's
+ // value, only the relevant part of the value is printed. To keep the trace
+ // tidy and readable, the raw value is aligned with the other values in the
+ // register trace.
+ int lane_count = GetPrintRegLaneCount(format);
+ int lane_size = GetPrintRegLaneSizeInBytes(format);
+ int reg_size = GetPrintRegSizeInBytes(format);
+ PrintVRegisterRawHelper(reg_code, reg_size, lane_size * lane);
+ if (format & kPrintRegAsFP) {
+ PrintVRegisterFPHelper(reg_code, lane_size, lane_count, lane);
+ }
+ fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
+}
+
+
+// Visitors---------------------------------------------------------------------
+
+void Simulator::VisitUnimplemented(const Instruction* instr) {
+ printf("Unimplemented instruction at %p: 0x%08" PRIx32 "\n",
+ reinterpret_cast<const void*>(instr), instr->InstructionBits());
+ VIXL_UNIMPLEMENTED();
+}
+
+
+void Simulator::VisitUnallocated(const Instruction* instr) {
+ printf("Unallocated instruction at %p: 0x%08" PRIx32 "\n",
+ reinterpret_cast<const void*>(instr), instr->InstructionBits());
+ VIXL_UNIMPLEMENTED();
+}
+
+
+void Simulator::VisitPCRelAddressing(const Instruction* instr) {
+ VIXL_ASSERT((instr->Mask(PCRelAddressingMask) == ADR) ||
+ (instr->Mask(PCRelAddressingMask) == ADRP));
+
+ set_reg(instr->Rd(), instr->ImmPCOffsetTarget());
+}
+
+
+void Simulator::VisitUnconditionalBranch(const Instruction* instr) {
+ switch (instr->Mask(UnconditionalBranchMask)) {
+ case BL:
+ set_lr(instr->NextInstruction());
+ VIXL_FALLTHROUGH();
+ case B:
+ set_pc(instr->ImmPCOffsetTarget());
+ break;
+ default: VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitConditionalBranch(const Instruction* instr) {
+ VIXL_ASSERT(instr->Mask(ConditionalBranchMask) == B_cond);
+ if (ConditionPassed(instr->ConditionBranch())) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::VisitUnconditionalBranchToRegister(const Instruction* instr) {
+ const Instruction* target = Instruction::Cast(xreg(instr->Rn()));
+
+ switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
+ case BLR:
+ set_lr(instr->NextInstruction());
+ VIXL_FALLTHROUGH();
+ case BR:
+ case RET: set_pc(target); break;
+ default: VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitTestBranch(const Instruction* instr) {
+ unsigned bit_pos = (instr->ImmTestBranchBit5() << 5) |
+ instr->ImmTestBranchBit40();
+ bool bit_zero = ((xreg(instr->Rt()) >> bit_pos) & 1) == 0;
+ bool take_branch = false;
+ switch (instr->Mask(TestBranchMask)) {
+ case TBZ: take_branch = bit_zero; break;
+ case TBNZ: take_branch = !bit_zero; break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ if (take_branch) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::VisitCompareBranch(const Instruction* instr) {
+ unsigned rt = instr->Rt();
+ bool take_branch = false;
+ switch (instr->Mask(CompareBranchMask)) {
+ case CBZ_w: take_branch = (wreg(rt) == 0); break;
+ case CBZ_x: take_branch = (xreg(rt) == 0); break;
+ case CBNZ_w: take_branch = (wreg(rt) != 0); break;
+ case CBNZ_x: take_branch = (xreg(rt) != 0); break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ if (take_branch) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::AddSubHelper(const Instruction* instr, int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ bool set_flags = instr->FlagsUpdate();
+ int64_t new_val = 0;
+ Instr operation = instr->Mask(AddSubOpMask);
+
+ switch (operation) {
+ case ADD:
+ case ADDS: {
+ new_val = AddWithCarry(reg_size,
+ set_flags,
+ reg(reg_size, instr->Rn(), instr->RnMode()),
+ op2);
+ break;
+ }
+ case SUB:
+ case SUBS: {
+ new_val = AddWithCarry(reg_size,
+ set_flags,
+ reg(reg_size, instr->Rn(), instr->RnMode()),
+ ~op2,
+ 1);
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+
+ set_reg(reg_size, instr->Rd(), new_val, LogRegWrites, instr->RdMode());
+}
+
+
+void Simulator::VisitAddSubShifted(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op2 = ShiftOperand(reg_size,
+ reg(reg_size, instr->Rm()),
+ static_cast<Shift>(instr->ShiftDP()),
+ instr->ImmDPShift());
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubImmediate(const Instruction* instr) {
+ int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0);
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubExtended(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op2 = ExtendValue(reg_size,
+ reg(reg_size, instr->Rm()),
+ static_cast<Extend>(instr->ExtendMode()),
+ instr->ImmExtendShift());
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubWithCarry(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op2 = reg(reg_size, instr->Rm());
+ int64_t new_val;
+
+ if ((instr->Mask(AddSubOpMask) == SUB) || instr->Mask(AddSubOpMask) == SUBS) {
+ op2 = ~op2;
+ }
+
+ new_val = AddWithCarry(reg_size,
+ instr->FlagsUpdate(),
+ reg(reg_size, instr->Rn()),
+ op2,
+ C());
+
+ set_reg(reg_size, instr->Rd(), new_val);
+}
+
+
+void Simulator::VisitLogicalShifted(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ Shift shift_type = static_cast<Shift>(instr->ShiftDP());
+ unsigned shift_amount = instr->ImmDPShift();
+ int64_t op2 = ShiftOperand(reg_size, reg(reg_size, instr->Rm()), shift_type,
+ shift_amount);
+ if (instr->Mask(NOT) == NOT) {
+ op2 = ~op2;
+ }
+ LogicalHelper(instr, op2);
+}
+
+
+void Simulator::VisitLogicalImmediate(const Instruction* instr) {
+ LogicalHelper(instr, instr->ImmLogical());
+}
+
+
+void Simulator::LogicalHelper(const Instruction* instr, int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op1 = reg(reg_size, instr->Rn());
+ int64_t result = 0;
+ bool update_flags = false;
+
+ // Switch on the logical operation, stripping out the NOT bit, as it has a
+ // different meaning for logical immediate instructions.
+ switch (instr->Mask(LogicalOpMask & ~NOT)) {
+ case ANDS: update_flags = true; VIXL_FALLTHROUGH();
+ case AND: result = op1 & op2; break;
+ case ORR: result = op1 | op2; break;
+ case EOR: result = op1 ^ op2; break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+
+ if (update_flags) {
+ nzcv().SetN(CalcNFlag(result, reg_size));
+ nzcv().SetZ(CalcZFlag(result));
+ nzcv().SetC(0);
+ nzcv().SetV(0);
+ LogSystemRegister(NZCV);
+ }
+
+ set_reg(reg_size, instr->Rd(), result, LogRegWrites, instr->RdMode());
+}
+
+
+void Simulator::VisitConditionalCompareRegister(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ ConditionalCompareHelper(instr, reg(reg_size, instr->Rm()));
+}
+
+
+void Simulator::VisitConditionalCompareImmediate(const Instruction* instr) {
+ ConditionalCompareHelper(instr, instr->ImmCondCmp());
+}
+
+
+void Simulator::ConditionalCompareHelper(const Instruction* instr,
+ int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op1 = reg(reg_size, instr->Rn());
+
+ if (ConditionPassed(instr->Condition())) {
+ // If the condition passes, set the status flags to the result of comparing
+ // the operands.
+ if (instr->Mask(ConditionalCompareMask) == CCMP) {
+ AddWithCarry(reg_size, true, op1, ~op2, 1);
+ } else {
+ VIXL_ASSERT(instr->Mask(ConditionalCompareMask) == CCMN);
+ AddWithCarry(reg_size, true, op1, op2, 0);
+ }
+ } else {
+ // If the condition fails, set the status flags to the nzcv immediate.
+ nzcv().SetFlags(instr->Nzcv());
+ LogSystemRegister(NZCV);
+ }
+}
+
+
+void Simulator::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
+ int offset = instr->ImmLSUnsigned() << instr->SizeLS();
+ LoadStoreHelper(instr, offset, Offset);
+}
+
+
+void Simulator::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), Offset);
+}
+
+
+void Simulator::VisitLoadStorePreIndex(const Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), PreIndex);
+}
+
+
+void Simulator::VisitLoadStorePostIndex(const Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), PostIndex);
+}
+
+
+void Simulator::VisitLoadStoreRegisterOffset(const Instruction* instr) {
+ Extend ext = static_cast<Extend>(instr->ExtendMode());
+ VIXL_ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
+ unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS();
+
+ int64_t offset = ExtendValue(kXRegSize, xreg(instr->Rm()), ext,
+ shift_amount);
+ LoadStoreHelper(instr, offset, Offset);
+}
+
+
+
+void Simulator::LoadStoreHelper(const Instruction* instr,
+ int64_t offset,
+ AddrMode addrmode) {
+ unsigned srcdst = instr->Rt();
+ uintptr_t address = AddressModeHelper(instr->Rn(), offset, addrmode);
+
+ LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreMask));
+ switch (op) {
+ case LDRB_w:
+ set_wreg(srcdst, Memory::Read<uint8_t>(address), NoRegLog); break;
+ case LDRH_w:
+ set_wreg(srcdst, Memory::Read<uint16_t>(address), NoRegLog); break;
+ case LDR_w:
+ set_wreg(srcdst, Memory::Read<uint32_t>(address), NoRegLog); break;
+ case LDR_x:
+ set_xreg(srcdst, Memory::Read<uint64_t>(address), NoRegLog); break;
+ case LDRSB_w:
+ set_wreg(srcdst, Memory::Read<int8_t>(address), NoRegLog); break;
+ case LDRSH_w:
+ set_wreg(srcdst, Memory::Read<int16_t>(address), NoRegLog); break;
+ case LDRSB_x:
+ set_xreg(srcdst, Memory::Read<int8_t>(address), NoRegLog); break;
+ case LDRSH_x:
+ set_xreg(srcdst, Memory::Read<int16_t>(address), NoRegLog); break;
+ case LDRSW_x:
+ set_xreg(srcdst, Memory::Read<int32_t>(address), NoRegLog); break;
+ case LDR_b:
+ set_breg(srcdst, Memory::Read<uint8_t>(address), NoRegLog); break;
+ case LDR_h:
+ set_hreg(srcdst, Memory::Read<uint16_t>(address), NoRegLog); break;
+ case LDR_s:
+ set_sreg(srcdst, Memory::Read<float>(address), NoRegLog); break;
+ case LDR_d:
+ set_dreg(srcdst, Memory::Read<double>(address), NoRegLog); break;
+ case LDR_q:
+ set_qreg(srcdst, Memory::Read<qreg_t>(address), NoRegLog); break;
+
+ case STRB_w: Memory::Write<uint8_t>(address, wreg(srcdst)); break;
+ case STRH_w: Memory::Write<uint16_t>(address, wreg(srcdst)); break;
+ case STR_w: Memory::Write<uint32_t>(address, wreg(srcdst)); break;
+ case STR_x: Memory::Write<uint64_t>(address, xreg(srcdst)); break;
+ case STR_b: Memory::Write<uint8_t>(address, breg(srcdst)); break;
+ case STR_h: Memory::Write<uint16_t>(address, hreg(srcdst)); break;
+ case STR_s: Memory::Write<float>(address, sreg(srcdst)); break;
+ case STR_d: Memory::Write<double>(address, dreg(srcdst)); break;
+ case STR_q: Memory::Write<qreg_t>(address, qreg(srcdst)); break;
+
+ // Ignore prfm hint instructions.
+ case PRFM: break;
+
+ default: VIXL_UNIMPLEMENTED();
+ }
+
+ unsigned access_size = 1 << instr->SizeLS();
+ if (instr->IsLoad()) {
+ if ((op == LDR_s) || (op == LDR_d)) {
+ LogVRead(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size));
+ } else if ((op == LDR_b) || (op == LDR_h) || (op == LDR_q)) {
+ LogVRead(address, srcdst, GetPrintRegisterFormatForSize(access_size));
+ } else {
+ LogRead(address, srcdst, GetPrintRegisterFormatForSize(access_size));
+ }
+ } else {
+ if ((op == STR_s) || (op == STR_d)) {
+ LogVWrite(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size));
+ } else if ((op == STR_b) || (op == STR_h) || (op == STR_q)) {
+ LogVWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size));
+ } else {
+ LogWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size));
+ }
+ }
+
+ local_monitor_.MaybeClear();
+}
+
+
+void Simulator::VisitLoadStorePairOffset(const Instruction* instr) {
+ LoadStorePairHelper(instr, Offset);
+}
+
+
+void Simulator::VisitLoadStorePairPreIndex(const Instruction* instr) {
+ LoadStorePairHelper(instr, PreIndex);
+}
+
+
+void Simulator::VisitLoadStorePairPostIndex(const Instruction* instr) {
+ LoadStorePairHelper(instr, PostIndex);
+}
+
+
+void Simulator::VisitLoadStorePairNonTemporal(const Instruction* instr) {
+ LoadStorePairHelper(instr, Offset);
+}
+
+
+void Simulator::LoadStorePairHelper(const Instruction* instr,
+ AddrMode addrmode) {
+ unsigned rt = instr->Rt();
+ unsigned rt2 = instr->Rt2();
+ int element_size = 1 << instr->SizeLSPair();
+ int64_t offset = instr->ImmLSPair() * element_size;
+ uintptr_t address = AddressModeHelper(instr->Rn(), offset, addrmode);
+ uintptr_t address2 = address + element_size;
+
+ LoadStorePairOp op =
+ static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
+
+ // 'rt' and 'rt2' can only be aliased for stores.
+ VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2));
+
+ switch (op) {
+ // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_FP_REGS). We
+ // will print a more detailed log.
+ case LDP_w: {
+ set_wreg(rt, Memory::Read<uint32_t>(address), NoRegLog);
+ set_wreg(rt2, Memory::Read<uint32_t>(address2), NoRegLog);
+ break;
+ }
+ case LDP_s: {
+ set_sreg(rt, Memory::Read<float>(address), NoRegLog);
+ set_sreg(rt2, Memory::Read<float>(address2), NoRegLog);
+ break;
+ }
+ case LDP_x: {
+ set_xreg(rt, Memory::Read<uint64_t>(address), NoRegLog);
+ set_xreg(rt2, Memory::Read<uint64_t>(address2), NoRegLog);
+ break;
+ }
+ case LDP_d: {
+ set_dreg(rt, Memory::Read<double>(address), NoRegLog);
+ set_dreg(rt2, Memory::Read<double>(address2), NoRegLog);
+ break;
+ }
+ case LDP_q: {
+ set_qreg(rt, Memory::Read<qreg_t>(address), NoRegLog);
+ set_qreg(rt2, Memory::Read<qreg_t>(address2), NoRegLog);
+ break;
+ }
+ case LDPSW_x: {
+ set_xreg(rt, Memory::Read<int32_t>(address), NoRegLog);
+ set_xreg(rt2, Memory::Read<int32_t>(address2), NoRegLog);
+ break;
+ }
+ case STP_w: {
+ Memory::Write<uint32_t>(address, wreg(rt));
+ Memory::Write<uint32_t>(address2, wreg(rt2));
+ break;
+ }
+ case STP_s: {
+ Memory::Write<float>(address, sreg(rt));
+ Memory::Write<float>(address2, sreg(rt2));
+ break;
+ }
+ case STP_x: {
+ Memory::Write<uint64_t>(address, xreg(rt));
+ Memory::Write<uint64_t>(address2, xreg(rt2));
+ break;
+ }
+ case STP_d: {
+ Memory::Write<double>(address, dreg(rt));
+ Memory::Write<double>(address2, dreg(rt2));
+ break;
+ }
+ case STP_q: {
+ Memory::Write<qreg_t>(address, qreg(rt));
+ Memory::Write<qreg_t>(address2, qreg(rt2));
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+
+ // Print a detailed trace (including the memory address) instead of the basic
+ // register:value trace generated by set_*reg().
+ if (instr->IsLoad()) {
+ if ((op == LDP_s) || (op == LDP_d)) {
+ LogVRead(address, rt, GetPrintRegisterFormatForSizeFP(element_size));
+ LogVRead(address2, rt2, GetPrintRegisterFormatForSizeFP(element_size));
+ } else if (op == LDP_q) {
+ LogVRead(address, rt, GetPrintRegisterFormatForSize(element_size));
+ LogVRead(address2, rt2, GetPrintRegisterFormatForSize(element_size));
+ } else {
+ LogRead(address, rt, GetPrintRegisterFormatForSize(element_size));
+ LogRead(address2, rt2, GetPrintRegisterFormatForSize(element_size));
+ }
+ } else {
+ if ((op == STP_s) || (op == STP_d)) {
+ LogVWrite(address, rt, GetPrintRegisterFormatForSizeFP(element_size));
+ LogVWrite(address2, rt2, GetPrintRegisterFormatForSizeFP(element_size));
+ } else if (op == STP_q) {
+ LogVWrite(address, rt, GetPrintRegisterFormatForSize(element_size));
+ LogVWrite(address2, rt2, GetPrintRegisterFormatForSize(element_size));
+ } else {
+ LogWrite(address, rt, GetPrintRegisterFormatForSize(element_size));
+ LogWrite(address2, rt2, GetPrintRegisterFormatForSize(element_size));
+ }
+ }
+
+ local_monitor_.MaybeClear();
+}
+
+
+void Simulator::PrintExclusiveAccessWarning() {
+ if (print_exclusive_access_warning_) {
+ fprintf(
+ stderr,
+ "%sWARNING:%s VIXL simulator support for load-/store-/clear-exclusive "
+ "instructions is limited. Refer to the README for details.%s\n",
+ clr_warning, clr_warning_message, clr_normal);
+ print_exclusive_access_warning_ = false;
+ }
+}
+
+
+void Simulator::VisitLoadStoreExclusive(const Instruction* instr) {
+ PrintExclusiveAccessWarning();
+
+ unsigned rs = instr->Rs();
+ unsigned rt = instr->Rt();
+ unsigned rt2 = instr->Rt2();
+ unsigned rn = instr->Rn();
+
+ LoadStoreExclusive op =
+ static_cast<LoadStoreExclusive>(instr->Mask(LoadStoreExclusiveMask));
+
+ bool is_acquire_release = instr->LdStXAcquireRelease();
+ bool is_exclusive = !instr->LdStXNotExclusive();
+ bool is_load = instr->LdStXLoad();
+ bool is_pair = instr->LdStXPair();
+
+ unsigned element_size = 1 << instr->LdStXSizeLog2();
+ unsigned access_size = is_pair ? element_size * 2 : element_size;
+ uint64_t address = reg<uint64_t>(rn, Reg31IsStackPointer);
+
+ // Verify that the address is available to the host.
+ VIXL_ASSERT(address == static_cast<uintptr_t>(address));
+
+ // Check the alignment of `address`.
+ if (AlignDown(address, access_size) != address) {
+ VIXL_ALIGNMENT_EXCEPTION();
+ }
+
+ // The sp must be aligned to 16 bytes when it is accessed.
+ if ((rn == 31) && (AlignDown(address, 16) != address)) {
+ VIXL_ALIGNMENT_EXCEPTION();
+ }
+
+ if (is_load) {
+ if (is_exclusive) {
+ local_monitor_.MarkExclusive(address, access_size);
+ } else {
+ // Any non-exclusive load can clear the local monitor as a side effect. We
+ // don't need to do this, but it is useful to stress the simulated code.
+ local_monitor_.Clear();
+ }
+
+ // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_FP_REGS). We
+ // will print a more detailed log.
+ switch (op) {
+ case LDXRB_w:
+ case LDAXRB_w:
+ case LDARB_w:
+ set_wreg(rt, Memory::Read<uint8_t>(address), NoRegLog);
+ break;
+ case LDXRH_w:
+ case LDAXRH_w:
+ case LDARH_w:
+ set_wreg(rt, Memory::Read<uint16_t>(address), NoRegLog);
+ break;
+ case LDXR_w:
+ case LDAXR_w:
+ case LDAR_w:
+ set_wreg(rt, Memory::Read<uint32_t>(address), NoRegLog);
+ break;
+ case LDXR_x:
+ case LDAXR_x:
+ case LDAR_x:
+ set_xreg(rt, Memory::Read<uint64_t>(address), NoRegLog);
+ break;
+ case LDXP_w:
+ case LDAXP_w:
+ set_wreg(rt, Memory::Read<uint32_t>(address), NoRegLog);
+ set_wreg(rt2, Memory::Read<uint32_t>(address + element_size), NoRegLog);
+ break;
+ case LDXP_x:
+ case LDAXP_x:
+ set_xreg(rt, Memory::Read<uint64_t>(address), NoRegLog);
+ set_xreg(rt2, Memory::Read<uint64_t>(address + element_size), NoRegLog);
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+
+ if (is_acquire_release) {
+ // Approximate load-acquire by issuing a full barrier after the load.
+ __sync_synchronize();
+ }
+
+ LogRead(address, rt, GetPrintRegisterFormatForSize(element_size));
+ if (is_pair) {
+ LogRead(address + element_size, rt2,
+ GetPrintRegisterFormatForSize(element_size));
+ }
+ } else {
+ if (is_acquire_release) {
+ // Approximate store-release by issuing a full barrier before the store.
+ __sync_synchronize();
+ }
+
+ bool do_store = true;
+ if (is_exclusive) {
+ do_store = local_monitor_.IsExclusive(address, access_size) &&
+ global_monitor_.IsExclusive(address, access_size);
+ set_wreg(rs, do_store ? 0 : 1);
+
+ // - All exclusive stores explicitly clear the local monitor.
+ local_monitor_.Clear();
+ } else {
+ // - Any other store can clear the local monitor as a side effect.
+ local_monitor_.MaybeClear();
+ }
+
+ if (do_store) {
+ switch (op) {
+ case STXRB_w:
+ case STLXRB_w:
+ case STLRB_w:
+ Memory::Write<uint8_t>(address, wreg(rt));
+ break;
+ case STXRH_w:
+ case STLXRH_w:
+ case STLRH_w:
+ Memory::Write<uint16_t>(address, wreg(rt));
+ break;
+ case STXR_w:
+ case STLXR_w:
+ case STLR_w:
+ Memory::Write<uint32_t>(address, wreg(rt));
+ break;
+ case STXR_x:
+ case STLXR_x:
+ case STLR_x:
+ Memory::Write<uint64_t>(address, xreg(rt));
+ break;
+ case STXP_w:
+ case STLXP_w:
+ Memory::Write<uint32_t>(address, wreg(rt));
+ Memory::Write<uint32_t>(address + element_size, wreg(rt2));
+ break;
+ case STXP_x:
+ case STLXP_x:
+ Memory::Write<uint64_t>(address, xreg(rt));
+ Memory::Write<uint64_t>(address + element_size, xreg(rt2));
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+
+ LogWrite(address, rt, GetPrintRegisterFormatForSize(element_size));
+ if (is_pair) {
+ LogWrite(address + element_size, rt2,
+ GetPrintRegisterFormatForSize(element_size));
+ }
+ }
+ }
+}
+
+
+void Simulator::VisitLoadLiteral(const Instruction* instr) {
+ unsigned rt = instr->Rt();
+ uint64_t address = instr->LiteralAddress<uint64_t>();
+
+ // Verify that the calculated address is available to the host.
+ VIXL_ASSERT(address == static_cast<uintptr_t>(address));
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_VREGS), then
+ // print a more detailed log.
+ case LDR_w_lit:
+ set_wreg(rt, Memory::Read<uint32_t>(address), NoRegLog);
+ LogRead(address, rt, kPrintWReg);
+ break;
+ case LDR_x_lit:
+ set_xreg(rt, Memory::Read<uint64_t>(address), NoRegLog);
+ LogRead(address, rt, kPrintXReg);
+ break;
+ case LDR_s_lit:
+ set_sreg(rt, Memory::Read<float>(address), NoRegLog);
+ LogVRead(address, rt, kPrintSReg);
+ break;
+ case LDR_d_lit:
+ set_dreg(rt, Memory::Read<double>(address), NoRegLog);
+ LogVRead(address, rt, kPrintDReg);
+ break;
+ case LDR_q_lit:
+ set_qreg(rt, Memory::Read<qreg_t>(address), NoRegLog);
+ LogVRead(address, rt, kPrintReg1Q);
+ break;
+ case LDRSW_x_lit:
+ set_xreg(rt, Memory::Read<int32_t>(address), NoRegLog);
+ LogRead(address, rt, kPrintWReg);
+ break;
+
+ // Ignore prfm hint instructions.
+ case PRFM_lit: break;
+
+ default: VIXL_UNREACHABLE();
+ }
+
+ local_monitor_.MaybeClear();
+}
+
+
+uintptr_t Simulator::AddressModeHelper(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode) {
+ uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
+
+ if ((addr_reg == 31) && ((address % 16) != 0)) {
+ // When the base register is SP the stack pointer is required to be
+ // quadword aligned prior to the address calculation and write-backs.
+ // Misalignment will cause a stack alignment fault.
+ VIXL_ALIGNMENT_EXCEPTION();
+ }
+
+ if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
+ VIXL_ASSERT(offset != 0);
+ // Only preindex should log the register update here. For Postindex, the
+ // update will be printed automatically by LogWrittenRegisters _after_ the
+ // memory access itself is logged.
+ RegLogMode log_mode = (addrmode == PreIndex) ? LogRegWrites : NoRegLog;
+ set_xreg(addr_reg, address + offset, log_mode, Reg31IsStackPointer);
+ }
+
+ if ((addrmode == Offset) || (addrmode == PreIndex)) {
+ address += offset;
+ }
+
+ // Verify that the calculated address is available to the host.
+ VIXL_ASSERT(address == static_cast<uintptr_t>(address));
+
+ return static_cast<uintptr_t>(address);
+}
+
+
+void Simulator::VisitMoveWideImmediate(const Instruction* instr) {
+ MoveWideImmediateOp mov_op =
+ static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
+ int64_t new_xn_val = 0;
+
+ bool is_64_bits = instr->SixtyFourBits() == 1;
+ // Shift is limited for W operations.
+ VIXL_ASSERT(is_64_bits || (instr->ShiftMoveWide() < 2));
+
+ // Get the shifted immediate.
+ int64_t shift = instr->ShiftMoveWide() * 16;
+ int64_t shifted_imm16 = static_cast<int64_t>(instr->ImmMoveWide()) << shift;
+
+ // Compute the new value.
+ switch (mov_op) {
+ case MOVN_w:
+ case MOVN_x: {
+ new_xn_val = ~shifted_imm16;
+ if (!is_64_bits) new_xn_val &= kWRegMask;
+ break;
+ }
+ case MOVK_w:
+ case MOVK_x: {
+ unsigned reg_code = instr->Rd();
+ int64_t prev_xn_val = is_64_bits ? xreg(reg_code)
+ : wreg(reg_code);
+ new_xn_val =
+ (prev_xn_val & ~(INT64_C(0xffff) << shift)) | shifted_imm16;
+ break;
+ }
+ case MOVZ_w:
+ case MOVZ_x: {
+ new_xn_val = shifted_imm16;
+ break;
+ }
+ default:
+ VIXL_UNREACHABLE();
+ }
+
+ // Update the destination register.
+ set_xreg(instr->Rd(), new_xn_val);
+}
+
+
+void Simulator::VisitConditionalSelect(const Instruction* instr) {
+ uint64_t new_val = xreg(instr->Rn());
+
+ if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
+ new_val = xreg(instr->Rm());
+ switch (instr->Mask(ConditionalSelectMask)) {
+ case CSEL_w:
+ case CSEL_x: break;
+ case CSINC_w:
+ case CSINC_x: new_val++; break;
+ case CSINV_w:
+ case CSINV_x: new_val = ~new_val; break;
+ case CSNEG_w:
+ case CSNEG_x: new_val = -new_val; break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ }
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ set_reg(reg_size, instr->Rd(), new_val);
+}
+
+
+void Simulator::VisitDataProcessing1Source(const Instruction* instr) {
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+
+ switch (instr->Mask(DataProcessing1SourceMask)) {
+ case RBIT_w: set_wreg(dst, ReverseBits(wreg(src))); break;
+ case RBIT_x: set_xreg(dst, ReverseBits(xreg(src))); break;
+ case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), 1)); break;
+ case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), 1)); break;
+ case REV_w: set_wreg(dst, ReverseBytes(wreg(src), 2)); break;
+ case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), 2)); break;
+ case REV_x: set_xreg(dst, ReverseBytes(xreg(src), 3)); break;
+ case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src))); break;
+ case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src))); break;
+ case CLS_w: {
+ set_wreg(dst, CountLeadingSignBits(wreg(src)));
+ break;
+ }
+ case CLS_x: {
+ set_xreg(dst, CountLeadingSignBits(xreg(src)));
+ break;
+ }
+ default: VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+uint32_t Simulator::Poly32Mod2(unsigned n, uint64_t data, uint32_t poly) {
+ VIXL_ASSERT((n > 32) && (n <= 64));
+ for (unsigned i = (n - 1); i >= 32; i--) {
+ if (((data >> i) & 1) != 0) {
+ uint64_t polysh32 = (uint64_t)poly << (i - 32);
+ uint64_t mask = (UINT64_C(1) << i) - 1;
+ data = ((data & mask) ^ polysh32);
+ }
+ }
+ return data & 0xffffffff;
+}
+
+
+template <typename T>
+uint32_t Simulator::Crc32Checksum(uint32_t acc, T val, uint32_t poly) {
+ unsigned size = sizeof(val) * 8; // Number of bits in type T.
+ VIXL_ASSERT((size == 8) || (size == 16) || (size == 32));
+ uint64_t tempacc = static_cast<uint64_t>(ReverseBits(acc)) << size;
+ uint64_t tempval = static_cast<uint64_t>(ReverseBits(val)) << 32;
+ return ReverseBits(Poly32Mod2(32 + size, tempacc ^ tempval, poly));
+}
+
+
+uint32_t Simulator::Crc32Checksum(uint32_t acc, uint64_t val, uint32_t poly) {
+ // Poly32Mod2 cannot handle inputs with more than 32 bits, so compute
+ // the CRC of each 32-bit word sequentially.
+ acc = Crc32Checksum(acc, (uint32_t)(val & 0xffffffff), poly);
+ return Crc32Checksum(acc, (uint32_t)(val >> 32), poly);
+}
+
+
+void Simulator::VisitDataProcessing2Source(const Instruction* instr) {
+ Shift shift_op = NO_SHIFT;
+ int64_t result = 0;
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+
+ switch (instr->Mask(DataProcessing2SourceMask)) {
+ case SDIV_w: {
+ int32_t rn = wreg(instr->Rn());
+ int32_t rm = wreg(instr->Rm());
+ if ((rn == kWMinInt) && (rm == -1)) {
+ result = kWMinInt;
+ } else if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case SDIV_x: {
+ int64_t rn = xreg(instr->Rn());
+ int64_t rm = xreg(instr->Rm());
+ if ((rn == kXMinInt) && (rm == -1)) {
+ result = kXMinInt;
+ } else if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case UDIV_w: {
+ uint32_t rn = static_cast<uint32_t>(wreg(instr->Rn()));
+ uint32_t rm = static_cast<uint32_t>(wreg(instr->Rm()));
+ if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case UDIV_x: {
+ uint64_t rn = static_cast<uint64_t>(xreg(instr->Rn()));
+ uint64_t rm = static_cast<uint64_t>(xreg(instr->Rm()));
+ if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case LSLV_w:
+ case LSLV_x: shift_op = LSL; break;
+ case LSRV_w:
+ case LSRV_x: shift_op = LSR; break;
+ case ASRV_w:
+ case ASRV_x: shift_op = ASR; break;
+ case RORV_w:
+ case RORV_x: shift_op = ROR; break;
+ case CRC32B: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint8_t val = reg<uint8_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32_POLY);
+ break;
+ }
+ case CRC32H: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint16_t val = reg<uint16_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32_POLY);
+ break;
+ }
+ case CRC32W: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint32_t val = reg<uint32_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32_POLY);
+ break;
+ }
+ case CRC32X: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint64_t val = reg<uint64_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32_POLY);
+ reg_size = kWRegSize;
+ break;
+ }
+ case CRC32CB: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint8_t val = reg<uint8_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32C_POLY);
+ break;
+ }
+ case CRC32CH: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint16_t val = reg<uint16_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32C_POLY);
+ break;
+ }
+ case CRC32CW: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint32_t val = reg<uint32_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32C_POLY);
+ break;
+ }
+ case CRC32CX: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint64_t val = reg<uint64_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32C_POLY);
+ reg_size = kWRegSize;
+ break;
+ }
+ default: VIXL_UNIMPLEMENTED();
+ }
+
+ if (shift_op != NO_SHIFT) {
+ // Shift distance encoded in the least-significant five/six bits of the
+ // register.
+ int mask = (instr->SixtyFourBits() == 1) ? 0x3f : 0x1f;
+ unsigned shift = wreg(instr->Rm()) & mask;
+ result = ShiftOperand(reg_size, reg(reg_size, instr->Rn()), shift_op,
+ shift);
+ }
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+// The algorithm used is adapted from the one described in section 8.2 of
+// Hacker's Delight, by Henry S. Warren, Jr.
+// It assumes that a right shift on a signed integer is an arithmetic shift.
+// Type T must be either uint64_t or int64_t.
+template <typename T>
+static T MultiplyHigh(T u, T v) {
+ uint64_t u0, v0, w0;
+ T u1, v1, w1, w2, t;
+
+ VIXL_ASSERT(sizeof(u) == sizeof(u0));
+
+ u0 = u & 0xffffffff;
+ u1 = u >> 32;
+ v0 = v & 0xffffffff;
+ v1 = v >> 32;
+
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> 32);
+ w1 = t & 0xffffffff;
+ w2 = t >> 32;
+ w1 = u0 * v1 + w1;
+
+ return u1 * v1 + w2 + (w1 >> 32);
+}
+
+
+void Simulator::VisitDataProcessing3Source(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+
+ int64_t result = 0;
+ // Extract and sign- or zero-extend 32-bit arguments for widening operations.
+ uint64_t rn_u32 = reg<uint32_t>(instr->Rn());
+ uint64_t rm_u32 = reg<uint32_t>(instr->Rm());
+ int64_t rn_s32 = reg<int32_t>(instr->Rn());
+ int64_t rm_s32 = reg<int32_t>(instr->Rm());
+ switch (instr->Mask(DataProcessing3SourceMask)) {
+ case MADD_w:
+ case MADD_x:
+ result = xreg(instr->Ra()) + (xreg(instr->Rn()) * xreg(instr->Rm()));
+ break;
+ case MSUB_w:
+ case MSUB_x:
+ result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm()));
+ break;
+ case SMADDL_x: result = xreg(instr->Ra()) + (rn_s32 * rm_s32); break;
+ case SMSUBL_x: result = xreg(instr->Ra()) - (rn_s32 * rm_s32); break;
+ case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break;
+ case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break;
+ case UMULH_x:
+ result = MultiplyHigh(reg<uint64_t>(instr->Rn()),
+ reg<uint64_t>(instr->Rm()));
+ break;
+ case SMULH_x:
+ result = MultiplyHigh(xreg(instr->Rn()), xreg(instr->Rm()));
+ break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+void Simulator::VisitBitfield(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t reg_mask = instr->SixtyFourBits() ? kXRegMask : kWRegMask;
+ int64_t R = instr->ImmR();
+ int64_t S = instr->ImmS();
+ int64_t diff = S - R;
+ int64_t mask;
+ if (diff >= 0) {
+ mask = (diff < (reg_size - 1)) ? (INT64_C(1) << (diff + 1)) - 1
+ : reg_mask;
+ } else {
+ mask = (INT64_C(1) << (S + 1)) - 1;
+ mask = (static_cast<uint64_t>(mask) >> R) | (mask << (reg_size - R));
+ diff += reg_size;
+ }
+
+ // inzero indicates if the extracted bitfield is inserted into the
+ // destination register value or in zero.
+ // If extend is true, extend the sign of the extracted bitfield.
+ bool inzero = false;
+ bool extend = false;
+ switch (instr->Mask(BitfieldMask)) {
+ case BFM_x:
+ case BFM_w:
+ break;
+ case SBFM_x:
+ case SBFM_w:
+ inzero = true;
+ extend = true;
+ break;
+ case UBFM_x:
+ case UBFM_w:
+ inzero = true;
+ break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+
+ int64_t dst = inzero ? 0 : reg(reg_size, instr->Rd());
+ int64_t src = reg(reg_size, instr->Rn());
+ // Rotate source bitfield into place.
+ int64_t result = (static_cast<uint64_t>(src) >> R) | (src << (reg_size - R));
+ // Determine the sign extension.
+ int64_t topbits = ((INT64_C(1) << (reg_size - diff - 1)) - 1) << (diff + 1);
+ int64_t signbits = extend && ((src >> S) & 1) ? topbits : 0;
+
+ // Merge sign extension, dest/zero and bitfield.
+ result = signbits | (result & mask) | (dst & ~mask);
+
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+void Simulator::VisitExtract(const Instruction* instr) {
+ unsigned lsb = instr->ImmS();
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize
+ : kWRegSize;
+ uint64_t low_res = static_cast<uint64_t>(reg(reg_size, instr->Rm())) >> lsb;
+ uint64_t high_res =
+ (lsb == 0) ? 0 : reg(reg_size, instr->Rn()) << (reg_size - lsb);
+ set_reg(reg_size, instr->Rd(), low_res | high_res);
+}
+
+
+void Simulator::VisitFPImmediate(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dest = instr->Rd();
+ switch (instr->Mask(FPImmediateMask)) {
+ case FMOV_s_imm: set_sreg(dest, instr->ImmFP32()); break;
+ case FMOV_d_imm: set_dreg(dest, instr->ImmFP64()); break;
+ default: VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPIntegerConvert(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+
+ FPRounding round = RMode();
+
+ switch (instr->Mask(FPIntegerConvertMask)) {
+ case FCVTAS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieAway)); break;
+ case FCVTAS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieAway)); break;
+ case FCVTAS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieAway)); break;
+ case FCVTAS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieAway)); break;
+ case FCVTAU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieAway)); break;
+ case FCVTAU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieAway)); break;
+ case FCVTAU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieAway)); break;
+ case FCVTAU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieAway)); break;
+ case FCVTMS_ws:
+ set_wreg(dst, FPToInt32(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_xs:
+ set_xreg(dst, FPToInt64(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_wd:
+ set_wreg(dst, FPToInt32(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_xd:
+ set_xreg(dst, FPToInt64(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_ws:
+ set_wreg(dst, FPToUInt32(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_xs:
+ set_xreg(dst, FPToUInt64(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_wd:
+ set_wreg(dst, FPToUInt32(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_xd:
+ set_xreg(dst, FPToUInt64(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTPS_ws:
+ set_wreg(dst, FPToInt32(sreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPS_xs:
+ set_xreg(dst, FPToInt64(sreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPS_wd:
+ set_wreg(dst, FPToInt32(dreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPS_xd:
+ set_xreg(dst, FPToInt64(dreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPU_ws:
+ set_wreg(dst, FPToUInt32(sreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPU_xs:
+ set_xreg(dst, FPToUInt64(sreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPU_wd:
+ set_wreg(dst, FPToUInt32(dreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPU_xd:
+ set_xreg(dst, FPToUInt64(dreg(src), FPPositiveInfinity));
+ break;
+ case FCVTNS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieEven)); break;
+ case FCVTNS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieEven)); break;
+ case FCVTNS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieEven)); break;
+ case FCVTNS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieEven)); break;
+ case FCVTNU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieEven)); break;
+ case FCVTNU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieEven)); break;
+ case FCVTNU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieEven)); break;
+ case FCVTNU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieEven)); break;
+ case FCVTZS_ws: set_wreg(dst, FPToInt32(sreg(src), FPZero)); break;
+ case FCVTZS_xs: set_xreg(dst, FPToInt64(sreg(src), FPZero)); break;
+ case FCVTZS_wd: set_wreg(dst, FPToInt32(dreg(src), FPZero)); break;
+ case FCVTZS_xd: set_xreg(dst, FPToInt64(dreg(src), FPZero)); break;
+ case FCVTZU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPZero)); break;
+ case FCVTZU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPZero)); break;
+ case FCVTZU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPZero)); break;
+ case FCVTZU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPZero)); break;
+ case FMOV_ws: set_wreg(dst, sreg_bits(src)); break;
+ case FMOV_xd: set_xreg(dst, dreg_bits(src)); break;
+ case FMOV_sw: set_sreg_bits(dst, wreg(src)); break;
+ case FMOV_dx: set_dreg_bits(dst, xreg(src)); break;
+ case FMOV_d1_x:
+ LogicVRegister(vreg(dst)).SetUint(kFormatD, 1, xreg(src));
+ break;
+ case FMOV_x_d1:
+ set_xreg(dst, LogicVRegister(vreg(src)).Uint(kFormatD, 1));
+ break;
+
+ // A 32-bit input can be handled in the same way as a 64-bit input, since
+ // the sign- or zero-extension will not affect the conversion.
+ case SCVTF_dx: set_dreg(dst, FixedToDouble(xreg(src), 0, round)); break;
+ case SCVTF_dw: set_dreg(dst, FixedToDouble(wreg(src), 0, round)); break;
+ case UCVTF_dx: set_dreg(dst, UFixedToDouble(xreg(src), 0, round)); break;
+ case UCVTF_dw: {
+ set_dreg(dst, UFixedToDouble(static_cast<uint32_t>(wreg(src)), 0, round));
+ break;
+ }
+ case SCVTF_sx: set_sreg(dst, FixedToFloat(xreg(src), 0, round)); break;
+ case SCVTF_sw: set_sreg(dst, FixedToFloat(wreg(src), 0, round)); break;
+ case UCVTF_sx: set_sreg(dst, UFixedToFloat(xreg(src), 0, round)); break;
+ case UCVTF_sw: {
+ set_sreg(dst, UFixedToFloat(static_cast<uint32_t>(wreg(src)), 0, round));
+ break;
+ }
+
+ default: VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPFixedPointConvert(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+ int fbits = 64 - instr->FPScale();
+
+ FPRounding round = RMode();
+
+ switch (instr->Mask(FPFixedPointConvertMask)) {
+ // A 32-bit input can be handled in the same way as a 64-bit input, since
+ // the sign- or zero-extension will not affect the conversion.
+ case SCVTF_dx_fixed:
+ set_dreg(dst, FixedToDouble(xreg(src), fbits, round));
+ break;
+ case SCVTF_dw_fixed:
+ set_dreg(dst, FixedToDouble(wreg(src), fbits, round));
+ break;
+ case UCVTF_dx_fixed:
+ set_dreg(dst, UFixedToDouble(xreg(src), fbits, round));
+ break;
+ case UCVTF_dw_fixed: {
+ set_dreg(dst,
+ UFixedToDouble(static_cast<uint32_t>(wreg(src)), fbits, round));
+ break;
+ }
+ case SCVTF_sx_fixed:
+ set_sreg(dst, FixedToFloat(xreg(src), fbits, round));
+ break;
+ case SCVTF_sw_fixed:
+ set_sreg(dst, FixedToFloat(wreg(src), fbits, round));
+ break;
+ case UCVTF_sx_fixed:
+ set_sreg(dst, UFixedToFloat(xreg(src), fbits, round));
+ break;
+ case UCVTF_sw_fixed: {
+ set_sreg(dst,
+ UFixedToFloat(static_cast<uint32_t>(wreg(src)), fbits, round));
+ break;
+ }
+ case FCVTZS_xd_fixed:
+ set_xreg(dst, FPToInt64(dreg(src) * std::pow(2.0, fbits), FPZero));
+ break;
+ case FCVTZS_wd_fixed:
+ set_wreg(dst, FPToInt32(dreg(src) * std::pow(2.0, fbits), FPZero));
+ break;
+ case FCVTZU_xd_fixed:
+ set_xreg(dst, FPToUInt64(dreg(src) * std::pow(2.0, fbits), FPZero));
+ break;
+ case FCVTZU_wd_fixed:
+ set_wreg(dst, FPToUInt32(dreg(src) * std::pow(2.0, fbits), FPZero));
+ break;
+ case FCVTZS_xs_fixed:
+ set_xreg(dst, FPToInt64(sreg(src) * std::pow(2.0f, fbits), FPZero));
+ break;
+ case FCVTZS_ws_fixed:
+ set_wreg(dst, FPToInt32(sreg(src) * std::pow(2.0f, fbits), FPZero));
+ break;
+ case FCVTZU_xs_fixed:
+ set_xreg(dst, FPToUInt64(sreg(src) * std::pow(2.0f, fbits), FPZero));
+ break;
+ case FCVTZU_ws_fixed:
+ set_wreg(dst, FPToUInt32(sreg(src) * std::pow(2.0f, fbits), FPZero));
+ break;
+ default: VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPCompare(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ FPTrapFlags trap = DisableTrap;
+ switch (instr->Mask(FPCompareMask)) {
+ case FCMPE_s: trap = EnableTrap; VIXL_FALLTHROUGH();
+ case FCMP_s: FPCompare(sreg(instr->Rn()), sreg(instr->Rm()), trap); break;
+ case FCMPE_d: trap = EnableTrap; VIXL_FALLTHROUGH();
+ case FCMP_d: FPCompare(dreg(instr->Rn()), dreg(instr->Rm()), trap); break;
+ case FCMPE_s_zero: trap = EnableTrap; VIXL_FALLTHROUGH();
+ case FCMP_s_zero: FPCompare(sreg(instr->Rn()), 0.0f, trap); break;
+ case FCMPE_d_zero: trap = EnableTrap; VIXL_FALLTHROUGH();
+ case FCMP_d_zero: FPCompare(dreg(instr->Rn()), 0.0, trap); break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPConditionalCompare(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ FPTrapFlags trap = DisableTrap;
+ switch (instr->Mask(FPConditionalCompareMask)) {
+ case FCCMPE_s: trap = EnableTrap;
+ VIXL_FALLTHROUGH();
+ case FCCMP_s:
+ if (ConditionPassed(instr->Condition())) {
+ FPCompare(sreg(instr->Rn()), sreg(instr->Rm()), trap);
+ } else {
+ nzcv().SetFlags(instr->Nzcv());
+ LogSystemRegister(NZCV);
+ }
+ break;
+ case FCCMPE_d: trap = EnableTrap;
+ VIXL_FALLTHROUGH();
+ case FCCMP_d:
+ if (ConditionPassed(instr->Condition())) {
+ FPCompare(dreg(instr->Rn()), dreg(instr->Rm()), trap);
+ } else {
+ nzcv().SetFlags(instr->Nzcv());
+ LogSystemRegister(NZCV);
+ }
+ break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPConditionalSelect(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ Instr selected;
+ if (ConditionPassed(instr->Condition())) {
+ selected = instr->Rn();
+ } else {
+ selected = instr->Rm();
+ }
+
+ switch (instr->Mask(FPConditionalSelectMask)) {
+ case FCSEL_s: set_sreg(instr->Rd(), sreg(selected)); break;
+ case FCSEL_d: set_dreg(instr->Rd(), dreg(selected)); break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPDataProcessing1Source(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+ VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS;
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ bool inexact_exception = false;
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+
+ switch (instr->Mask(FPDataProcessing1SourceMask)) {
+ case FMOV_s: set_sreg(fd, sreg(fn)); return;
+ case FMOV_d: set_dreg(fd, dreg(fn)); return;
+ case FABS_s: fabs_(kFormatS, vreg(fd), vreg(fn)); return;
+ case FABS_d: fabs_(kFormatD, vreg(fd), vreg(fn)); return;
+ case FNEG_s: fneg(kFormatS, vreg(fd), vreg(fn)); return;
+ case FNEG_d: fneg(kFormatD, vreg(fd), vreg(fn)); return;
+ case FCVT_ds: set_dreg(fd, FPToDouble(sreg(fn))); return;
+ case FCVT_sd: set_sreg(fd, FPToFloat(dreg(fn), FPTieEven)); return;
+ case FCVT_hs: set_hreg(fd, FPToFloat16(sreg(fn), FPTieEven)); return;
+ case FCVT_sh: set_sreg(fd, FPToFloat(hreg(fn))); return;
+ case FCVT_dh: set_dreg(fd, FPToDouble(FPToFloat(hreg(fn)))); return;
+ case FCVT_hd: set_hreg(fd, FPToFloat16(dreg(fn), FPTieEven)); return;
+ case FSQRT_s:
+ case FSQRT_d: fsqrt(vform, rd, rn); return;
+ case FRINTI_s:
+ case FRINTI_d: break; // Use FPCR rounding mode.
+ case FRINTX_s:
+ case FRINTX_d: inexact_exception = true; break;
+ case FRINTA_s:
+ case FRINTA_d: fpcr_rounding = FPTieAway; break;
+ case FRINTM_s:
+ case FRINTM_d: fpcr_rounding = FPNegativeInfinity; break;
+ case FRINTN_s:
+ case FRINTN_d: fpcr_rounding = FPTieEven; break;
+ case FRINTP_s:
+ case FRINTP_d: fpcr_rounding = FPPositiveInfinity; break;
+ case FRINTZ_s:
+ case FRINTZ_d: fpcr_rounding = FPZero; break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+
+ // Only FRINT* instructions fall through the switch above.
+ frint(vform, rd, rn, fpcr_rounding, inexact_exception);
+}
+
+
+void Simulator::VisitFPDataProcessing2Source(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS;
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ case FADD_s:
+ case FADD_d: fadd(vform, rd, rn, rm); break;
+ case FSUB_s:
+ case FSUB_d: fsub(vform, rd, rn, rm); break;
+ case FMUL_s:
+ case FMUL_d: fmul(vform, rd, rn, rm); break;
+ case FNMUL_s:
+ case FNMUL_d: fnmul(vform, rd, rn, rm); break;
+ case FDIV_s:
+ case FDIV_d: fdiv(vform, rd, rn, rm); break;
+ case FMAX_s:
+ case FMAX_d: fmax(vform, rd, rn, rm); break;
+ case FMIN_s:
+ case FMIN_d: fmin(vform, rd, rn, rm); break;
+ case FMAXNM_s:
+ case FMAXNM_d: fmaxnm(vform, rd, rn, rm); break;
+ case FMINNM_s:
+ case FMINNM_d: fminnm(vform, rd, rn, rm); break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPDataProcessing3Source(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+ unsigned fa = instr->Ra();
+
+ switch (instr->Mask(FPDataProcessing3SourceMask)) {
+ // fd = fa +/- (fn * fm)
+ case FMADD_s: set_sreg(fd, FPMulAdd(sreg(fa), sreg(fn), sreg(fm))); break;
+ case FMSUB_s: set_sreg(fd, FPMulAdd(sreg(fa), -sreg(fn), sreg(fm))); break;
+ case FMADD_d: set_dreg(fd, FPMulAdd(dreg(fa), dreg(fn), dreg(fm))); break;
+ case FMSUB_d: set_dreg(fd, FPMulAdd(dreg(fa), -dreg(fn), dreg(fm))); break;
+ // Negated variants of the above.
+ case FNMADD_s:
+ set_sreg(fd, FPMulAdd(-sreg(fa), -sreg(fn), sreg(fm)));
+ break;
+ case FNMSUB_s:
+ set_sreg(fd, FPMulAdd(-sreg(fa), sreg(fn), sreg(fm)));
+ break;
+ case FNMADD_d:
+ set_dreg(fd, FPMulAdd(-dreg(fa), -dreg(fn), dreg(fm)));
+ break;
+ case FNMSUB_d:
+ set_dreg(fd, FPMulAdd(-dreg(fa), dreg(fn), dreg(fm)));
+ break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+bool Simulator::FPProcessNaNs(const Instruction* instr) {
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+ bool done = false;
+
+ if (instr->Mask(FP64) == FP64) {
+ double result = FPProcessNaNs(dreg(fn), dreg(fm));
+ if (std::isnan(result)) {
+ set_dreg(fd, result);
+ done = true;
+ }
+ } else {
+ float result = FPProcessNaNs(sreg(fn), sreg(fm));
+ if (std::isnan(result)) {
+ set_sreg(fd, result);
+ done = true;
+ }
+ }
+
+ return done;
+}
+
+
+void Simulator::SysOp_W(int op, int64_t val) {
+ switch (op) {
+ case IVAU:
+ case CVAC:
+ case CVAU:
+ case CIVAC: {
+ // Perform a dummy memory access to ensure that we have read access
+ // to the specified address.
+ volatile uint8_t y = Memory::Read<uint8_t>(val);
+ USE(y);
+ // TODO: Implement "case ZVA:".
+ break;
+ }
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitSystem(const Instruction* instr) {
+ // Some system instructions hijack their Op and Cp fields to represent a
+ // range of immediates instead of indicating a different instruction. This
+ // makes the decoding tricky.
+ if (instr->Mask(SystemExclusiveMonitorFMask) == SystemExclusiveMonitorFixed) {
+ VIXL_ASSERT(instr->Mask(SystemExclusiveMonitorMask) == CLREX);
+ switch (instr->Mask(SystemExclusiveMonitorMask)) {
+ case CLREX: {
+ PrintExclusiveAccessWarning();
+ ClearLocalMonitor();
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+ switch (instr->Mask(SystemSysRegMask)) {
+ case MRS: {
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: set_xreg(instr->Rt(), nzcv().RawValue()); break;
+ case FPCR: set_xreg(instr->Rt(), fpcr().RawValue()); break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ break;
+ }
+ case MSR: {
+ switch (instr->ImmSystemRegister()) {
+ case NZCV:
+ nzcv().SetRawValue(wreg(instr->Rt()));
+ LogSystemRegister(NZCV);
+ break;
+ case FPCR:
+ fpcr().SetRawValue(wreg(instr->Rt()));
+ LogSystemRegister(FPCR);
+ break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
+ VIXL_ASSERT(instr->Mask(SystemHintMask) == HINT);
+ switch (instr->ImmHint()) {
+ case NOP: break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
+ __sync_synchronize();
+ } else if ((instr->Mask(SystemSysFMask) == SystemSysFixed)) {
+ switch (instr->Mask(SystemSysMask)) {
+ case SYS: SysOp_W(instr->SysOp(), xreg(instr->Rt())); break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ } else {
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitCrypto2RegSHA(const Instruction* instr) {
+ VisitUnimplemented(instr);
+}
+
+
+void Simulator::VisitCrypto3RegSHA(const Instruction* instr) {
+ VisitUnimplemented(instr);
+}
+
+
+void Simulator::VisitCryptoAES(const Instruction* instr) {
+ VisitUnimplemented(instr);
+}
+
+
+void Simulator::VisitNEON2RegMisc(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ static const NEONFormatMap map_lp = {
+ {23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}
+ };
+ VectorFormat vf_lp = nfd.GetVectorFormat(&map_lp);
+
+ static const NEONFormatMap map_fcvtl = {
+ {22}, {NF_4S, NF_2D}
+ };
+ VectorFormat vf_fcvtl = nfd.GetVectorFormat(&map_fcvtl);
+
+ static const NEONFormatMap map_fcvtn = {
+ {22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S}
+ };
+ VectorFormat vf_fcvtn = nfd.GetVectorFormat(&map_fcvtn);
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) {
+ // These instructions all use a two bit size field, except NOT and RBIT,
+ // which use the field to encode the operation.
+ switch (instr->Mask(NEON2RegMiscMask)) {
+ case NEON_REV64: rev64(vf, rd, rn); break;
+ case NEON_REV32: rev32(vf, rd, rn); break;
+ case NEON_REV16: rev16(vf, rd, rn); break;
+ case NEON_SUQADD: suqadd(vf, rd, rn); break;
+ case NEON_USQADD: usqadd(vf, rd, rn); break;
+ case NEON_CLS: cls(vf, rd, rn); break;
+ case NEON_CLZ: clz(vf, rd, rn); break;
+ case NEON_CNT: cnt(vf, rd, rn); break;
+ case NEON_SQABS: abs(vf, rd, rn).SignedSaturate(vf); break;
+ case NEON_SQNEG: neg(vf, rd, rn).SignedSaturate(vf); break;
+ case NEON_CMGT_zero: cmp(vf, rd, rn, 0, gt); break;
+ case NEON_CMGE_zero: cmp(vf, rd, rn, 0, ge); break;
+ case NEON_CMEQ_zero: cmp(vf, rd, rn, 0, eq); break;
+ case NEON_CMLE_zero: cmp(vf, rd, rn, 0, le); break;
+ case NEON_CMLT_zero: cmp(vf, rd, rn, 0, lt); break;
+ case NEON_ABS: abs(vf, rd, rn); break;
+ case NEON_NEG: neg(vf, rd, rn); break;
+ case NEON_SADDLP: saddlp(vf_lp, rd, rn); break;
+ case NEON_UADDLP: uaddlp(vf_lp, rd, rn); break;
+ case NEON_SADALP: sadalp(vf_lp, rd, rn); break;
+ case NEON_UADALP: uadalp(vf_lp, rd, rn); break;
+ case NEON_RBIT_NOT:
+ vf = nfd.GetVectorFormat(nfd.LogicalFormatMap());
+ switch (instr->FPType()) {
+ case 0: not_(vf, rd, rn); break;
+ case 1: rbit(vf, rd, rn);; break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ break;
+ }
+ } else {
+ VectorFormat fpf = nfd.GetVectorFormat(nfd.FPFormatMap());
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+ bool inexact_exception = false;
+
+ // These instructions all use a one bit size field, except XTN, SQXTUN,
+ // SHLL, SQXTN and UQXTN, which use a two bit size field.
+ switch (instr->Mask(NEON2RegMiscFPMask)) {
+ case NEON_FABS: fabs_(fpf, rd, rn); return;
+ case NEON_FNEG: fneg(fpf, rd, rn); return;
+ case NEON_FSQRT: fsqrt(fpf, rd, rn); return;
+ case NEON_FCVTL:
+ if (instr->Mask(NEON_Q)) {
+ fcvtl2(vf_fcvtl, rd, rn);
+ } else {
+ fcvtl(vf_fcvtl, rd, rn);
+ }
+ return;
+ case NEON_FCVTN:
+ if (instr->Mask(NEON_Q)) {
+ fcvtn2(vf_fcvtn, rd, rn);
+ } else {
+ fcvtn(vf_fcvtn, rd, rn);
+ }
+ return;
+ case NEON_FCVTXN:
+ if (instr->Mask(NEON_Q)) {
+ fcvtxn2(vf_fcvtn, rd, rn);
+ } else {
+ fcvtxn(vf_fcvtn, rd, rn);
+ }
+ return;
+
+ // The following instructions break from the switch statement, rather
+ // than return.
+ case NEON_FRINTI: break; // Use FPCR rounding mode.
+ case NEON_FRINTX: inexact_exception = true; break;
+ case NEON_FRINTA: fpcr_rounding = FPTieAway; break;
+ case NEON_FRINTM: fpcr_rounding = FPNegativeInfinity; break;
+ case NEON_FRINTN: fpcr_rounding = FPTieEven; break;
+ case NEON_FRINTP: fpcr_rounding = FPPositiveInfinity; break;
+ case NEON_FRINTZ: fpcr_rounding = FPZero; break;
+
+ case NEON_FCVTNS: fcvts(fpf, rd, rn, FPTieEven); return;
+ case NEON_FCVTNU: fcvtu(fpf, rd, rn, FPTieEven); return;
+ case NEON_FCVTPS: fcvts(fpf, rd, rn, FPPositiveInfinity); return;
+ case NEON_FCVTPU: fcvtu(fpf, rd, rn, FPPositiveInfinity); return;
+ case NEON_FCVTMS: fcvts(fpf, rd, rn, FPNegativeInfinity); return;
+ case NEON_FCVTMU: fcvtu(fpf, rd, rn, FPNegativeInfinity); return;
+ case NEON_FCVTZS: fcvts(fpf, rd, rn, FPZero); return;
+ case NEON_FCVTZU: fcvtu(fpf, rd, rn, FPZero); return;
+ case NEON_FCVTAS: fcvts(fpf, rd, rn, FPTieAway); return;
+ case NEON_FCVTAU: fcvtu(fpf, rd, rn, FPTieAway); return;
+ case NEON_SCVTF: scvtf(fpf, rd, rn, 0, fpcr_rounding); return;
+ case NEON_UCVTF: ucvtf(fpf, rd, rn, 0, fpcr_rounding); return;
+ case NEON_URSQRTE: ursqrte(fpf, rd, rn); return;
+ case NEON_URECPE: urecpe(fpf, rd, rn); return;
+ case NEON_FRSQRTE: frsqrte(fpf, rd, rn); return;
+ case NEON_FRECPE: frecpe(fpf, rd, rn, fpcr_rounding); return;
+ case NEON_FCMGT_zero: fcmp_zero(fpf, rd, rn, gt); return;
+ case NEON_FCMGE_zero: fcmp_zero(fpf, rd, rn, ge); return;
+ case NEON_FCMEQ_zero: fcmp_zero(fpf, rd, rn, eq); return;
+ case NEON_FCMLE_zero: fcmp_zero(fpf, rd, rn, le); return;
+ case NEON_FCMLT_zero: fcmp_zero(fpf, rd, rn, lt); return;
+ default:
+ if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) &&
+ (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) {
+ switch (instr->Mask(NEON2RegMiscMask)) {
+ case NEON_XTN: xtn(vf, rd, rn); return;
+ case NEON_SQXTN: sqxtn(vf, rd, rn); return;
+ case NEON_UQXTN: uqxtn(vf, rd, rn); return;
+ case NEON_SQXTUN: sqxtun(vf, rd, rn); return;
+ case NEON_SHLL:
+ vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
+ if (instr->Mask(NEON_Q)) {
+ shll2(vf, rd, rn);
+ } else {
+ shll(vf, rd, rn);
+ }
+ return;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ } else {
+ VIXL_UNIMPLEMENTED();
+ }
+ }
+
+ // Only FRINT* instructions fall through the switch above.
+ frint(fpf, rd, rn, fpcr_rounding, inexact_exception);
+ }
+}
+
+
+void Simulator::VisitNEON3Same(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+
+ if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) {
+ VectorFormat vf = nfd.GetVectorFormat(nfd.LogicalFormatMap());
+ switch (instr->Mask(NEON3SameLogicalMask)) {
+ case NEON_AND: and_(vf, rd, rn, rm); break;
+ case NEON_ORR: orr(vf, rd, rn, rm); break;
+ case NEON_ORN: orn(vf, rd, rn, rm); break;
+ case NEON_EOR: eor(vf, rd, rn, rm); break;
+ case NEON_BIC: bic(vf, rd, rn, rm); break;
+ case NEON_BIF: bif(vf, rd, rn, rm); break;
+ case NEON_BIT: bit(vf, rd, rn, rm); break;
+ case NEON_BSL: bsl(vf, rd, rn, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ } else if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) {
+ VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap());
+ switch (instr->Mask(NEON3SameFPMask)) {
+ case NEON_FADD: fadd(vf, rd, rn, rm); break;
+ case NEON_FSUB: fsub(vf, rd, rn, rm); break;
+ case NEON_FMUL: fmul(vf, rd, rn, rm); break;
+ case NEON_FDIV: fdiv(vf, rd, rn, rm); break;
+ case NEON_FMAX: fmax(vf, rd, rn, rm); break;
+ case NEON_FMIN: fmin(vf, rd, rn, rm); break;
+ case NEON_FMAXNM: fmaxnm(vf, rd, rn, rm); break;
+ case NEON_FMINNM: fminnm(vf, rd, rn, rm); break;
+ case NEON_FMLA: fmla(vf, rd, rn, rm); break;
+ case NEON_FMLS: fmls(vf, rd, rn, rm); break;
+ case NEON_FMULX: fmulx(vf, rd, rn, rm); break;
+ case NEON_FACGE: fabscmp(vf, rd, rn, rm, ge); break;
+ case NEON_FACGT: fabscmp(vf, rd, rn, rm, gt); break;
+ case NEON_FCMEQ: fcmp(vf, rd, rn, rm, eq); break;
+ case NEON_FCMGE: fcmp(vf, rd, rn, rm, ge); break;
+ case NEON_FCMGT: fcmp(vf, rd, rn, rm, gt); break;
+ case NEON_FRECPS: frecps(vf, rd, rn, rm); break;
+ case NEON_FRSQRTS: frsqrts(vf, rd, rn, rm); break;
+ case NEON_FABD: fabd(vf, rd, rn, rm); break;
+ case NEON_FADDP: faddp(vf, rd, rn, rm); break;
+ case NEON_FMAXP: fmaxp(vf, rd, rn, rm); break;
+ case NEON_FMAXNMP: fmaxnmp(vf, rd, rn, rm); break;
+ case NEON_FMINP: fminp(vf, rd, rn, rm); break;
+ case NEON_FMINNMP: fminnmp(vf, rd, rn, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ } else {
+ VectorFormat vf = nfd.GetVectorFormat();
+ switch (instr->Mask(NEON3SameMask)) {
+ case NEON_ADD: add(vf, rd, rn, rm); break;
+ case NEON_ADDP: addp(vf, rd, rn, rm); break;
+ case NEON_CMEQ: cmp(vf, rd, rn, rm, eq); break;
+ case NEON_CMGE: cmp(vf, rd, rn, rm, ge); break;
+ case NEON_CMGT: cmp(vf, rd, rn, rm, gt); break;
+ case NEON_CMHI: cmp(vf, rd, rn, rm, hi); break;
+ case NEON_CMHS: cmp(vf, rd, rn, rm, hs); break;
+ case NEON_CMTST: cmptst(vf, rd, rn, rm); break;
+ case NEON_MLS: mls(vf, rd, rn, rm); break;
+ case NEON_MLA: mla(vf, rd, rn, rm); break;
+ case NEON_MUL: mul(vf, rd, rn, rm); break;
+ case NEON_PMUL: pmul(vf, rd, rn, rm); break;
+ case NEON_SMAX: smax(vf, rd, rn, rm); break;
+ case NEON_SMAXP: smaxp(vf, rd, rn, rm); break;
+ case NEON_SMIN: smin(vf, rd, rn, rm); break;
+ case NEON_SMINP: sminp(vf, rd, rn, rm); break;
+ case NEON_SUB: sub(vf, rd, rn, rm); break;
+ case NEON_UMAX: umax(vf, rd, rn, rm); break;
+ case NEON_UMAXP: umaxp(vf, rd, rn, rm); break;
+ case NEON_UMIN: umin(vf, rd, rn, rm); break;
+ case NEON_UMINP: uminp(vf, rd, rn, rm); break;
+ case NEON_SSHL: sshl(vf, rd, rn, rm); break;
+ case NEON_USHL: ushl(vf, rd, rn, rm); break;
+ case NEON_SABD: absdiff(vf, rd, rn, rm, true); break;
+ case NEON_UABD: absdiff(vf, rd, rn, rm, false); break;
+ case NEON_SABA: saba(vf, rd, rn, rm); break;
+ case NEON_UABA: uaba(vf, rd, rn, rm); break;
+ case NEON_UQADD: add(vf, rd, rn, rm).UnsignedSaturate(vf); break;
+ case NEON_SQADD: add(vf, rd, rn, rm).SignedSaturate(vf); break;
+ case NEON_UQSUB: sub(vf, rd, rn, rm).UnsignedSaturate(vf); break;
+ case NEON_SQSUB: sub(vf, rd, rn, rm).SignedSaturate(vf); break;
+ case NEON_SQDMULH: sqdmulh(vf, rd, rn, rm); break;
+ case NEON_SQRDMULH: sqrdmulh(vf, rd, rn, rm); break;
+ case NEON_UQSHL: ushl(vf, rd, rn, rm).UnsignedSaturate(vf); break;
+ case NEON_SQSHL: sshl(vf, rd, rn, rm).SignedSaturate(vf); break;
+ case NEON_URSHL: ushl(vf, rd, rn, rm).Round(vf); break;
+ case NEON_SRSHL: sshl(vf, rd, rn, rm).Round(vf); break;
+ case NEON_UQRSHL:
+ ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf);
+ break;
+ case NEON_SQRSHL:
+ sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf);
+ break;
+ case NEON_UHADD:
+ add(vf, rd, rn, rm).Uhalve(vf);
+ break;
+ case NEON_URHADD:
+ add(vf, rd, rn, rm).Uhalve(vf).Round(vf);
+ break;
+ case NEON_SHADD:
+ add(vf, rd, rn, rm).Halve(vf);
+ break;
+ case NEON_SRHADD:
+ add(vf, rd, rn, rm).Halve(vf).Round(vf);
+ break;
+ case NEON_UHSUB:
+ sub(vf, rd, rn, rm).Uhalve(vf);
+ break;
+ case NEON_SHSUB:
+ sub(vf, rd, rn, rm).Halve(vf);
+ break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ }
+}
+
+
+void Simulator::VisitNEON3Different(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ VectorFormat vf = nfd.GetVectorFormat();
+ VectorFormat vf_l = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+
+ switch (instr->Mask(NEON3DifferentMask)) {
+ case NEON_PMULL: pmull(vf_l, rd, rn, rm); break;
+ case NEON_PMULL2: pmull2(vf_l, rd, rn, rm); break;
+ case NEON_UADDL: uaddl(vf_l, rd, rn, rm); break;
+ case NEON_UADDL2: uaddl2(vf_l, rd, rn, rm); break;
+ case NEON_SADDL: saddl(vf_l, rd, rn, rm); break;
+ case NEON_SADDL2: saddl2(vf_l, rd, rn, rm); break;
+ case NEON_USUBL: usubl(vf_l, rd, rn, rm); break;
+ case NEON_USUBL2: usubl2(vf_l, rd, rn, rm); break;
+ case NEON_SSUBL: ssubl(vf_l, rd, rn, rm); break;
+ case NEON_SSUBL2: ssubl2(vf_l, rd, rn, rm); break;
+ case NEON_SABAL: sabal(vf_l, rd, rn, rm); break;
+ case NEON_SABAL2: sabal2(vf_l, rd, rn, rm); break;
+ case NEON_UABAL: uabal(vf_l, rd, rn, rm); break;
+ case NEON_UABAL2: uabal2(vf_l, rd, rn, rm); break;
+ case NEON_SABDL: sabdl(vf_l, rd, rn, rm); break;
+ case NEON_SABDL2: sabdl2(vf_l, rd, rn, rm); break;
+ case NEON_UABDL: uabdl(vf_l, rd, rn, rm); break;
+ case NEON_UABDL2: uabdl2(vf_l, rd, rn, rm); break;
+ case NEON_SMLAL: smlal(vf_l, rd, rn, rm); break;
+ case NEON_SMLAL2: smlal2(vf_l, rd, rn, rm); break;
+ case NEON_UMLAL: umlal(vf_l, rd, rn, rm); break;
+ case NEON_UMLAL2: umlal2(vf_l, rd, rn, rm); break;
+ case NEON_SMLSL: smlsl(vf_l, rd, rn, rm); break;
+ case NEON_SMLSL2: smlsl2(vf_l, rd, rn, rm); break;
+ case NEON_UMLSL: umlsl(vf_l, rd, rn, rm); break;
+ case NEON_UMLSL2: umlsl2(vf_l, rd, rn, rm); break;
+ case NEON_SMULL: smull(vf_l, rd, rn, rm); break;
+ case NEON_SMULL2: smull2(vf_l, rd, rn, rm); break;
+ case NEON_UMULL: umull(vf_l, rd, rn, rm); break;
+ case NEON_UMULL2: umull2(vf_l, rd, rn, rm); break;
+ case NEON_SQDMLAL: sqdmlal(vf_l, rd, rn, rm); break;
+ case NEON_SQDMLAL2: sqdmlal2(vf_l, rd, rn, rm); break;
+ case NEON_SQDMLSL: sqdmlsl(vf_l, rd, rn, rm); break;
+ case NEON_SQDMLSL2: sqdmlsl2(vf_l, rd, rn, rm); break;
+ case NEON_SQDMULL: sqdmull(vf_l, rd, rn, rm); break;
+ case NEON_SQDMULL2: sqdmull2(vf_l, rd, rn, rm); break;
+ case NEON_UADDW: uaddw(vf_l, rd, rn, rm); break;
+ case NEON_UADDW2: uaddw2(vf_l, rd, rn, rm); break;
+ case NEON_SADDW: saddw(vf_l, rd, rn, rm); break;
+ case NEON_SADDW2: saddw2(vf_l, rd, rn, rm); break;
+ case NEON_USUBW: usubw(vf_l, rd, rn, rm); break;
+ case NEON_USUBW2: usubw2(vf_l, rd, rn, rm); break;
+ case NEON_SSUBW: ssubw(vf_l, rd, rn, rm); break;
+ case NEON_SSUBW2: ssubw2(vf_l, rd, rn, rm); break;
+ case NEON_ADDHN: addhn(vf, rd, rn, rm); break;
+ case NEON_ADDHN2: addhn2(vf, rd, rn, rm); break;
+ case NEON_RADDHN: raddhn(vf, rd, rn, rm); break;
+ case NEON_RADDHN2: raddhn2(vf, rd, rn, rm); break;
+ case NEON_SUBHN: subhn(vf, rd, rn, rm); break;
+ case NEON_SUBHN2: subhn2(vf, rd, rn, rm); break;
+ case NEON_RSUBHN: rsubhn(vf, rd, rn, rm); break;
+ case NEON_RSUBHN2: rsubhn2(vf, rd, rn, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONAcrossLanes(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ // The input operand's VectorFormat is passed for these instructions.
+ if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
+ VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap());
+
+ switch (instr->Mask(NEONAcrossLanesFPMask)) {
+ case NEON_FMAXV: fmaxv(vf, rd, rn); break;
+ case NEON_FMINV: fminv(vf, rd, rn); break;
+ case NEON_FMAXNMV: fmaxnmv(vf, rd, rn); break;
+ case NEON_FMINNMV: fminnmv(vf, rd, rn); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ } else {
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ switch (instr->Mask(NEONAcrossLanesMask)) {
+ case NEON_ADDV: addv(vf, rd, rn); break;
+ case NEON_SMAXV: smaxv(vf, rd, rn); break;
+ case NEON_SMINV: sminv(vf, rd, rn); break;
+ case NEON_UMAXV: umaxv(vf, rd, rn); break;
+ case NEON_UMINV: uminv(vf, rd, rn); break;
+ case NEON_SADDLV: saddlv(vf, rd, rn); break;
+ case NEON_UADDLV: uaddlv(vf, rd, rn); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ }
+}
+
+
+void Simulator::VisitNEONByIndexedElement(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ VectorFormat vf_r = nfd.GetVectorFormat();
+ VectorFormat vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ ByElementOp Op = NULL;
+
+ int rm_reg = instr->Rm();
+ int index = (instr->NEONH() << 1) | instr->NEONL();
+ if (instr->NEONSize() == 1) {
+ rm_reg &= 0xf;
+ index = (index << 1) | instr->NEONM();
+ }
+
+ switch (instr->Mask(NEONByIndexedElementMask)) {
+ case NEON_MUL_byelement: Op = &Simulator::mul; vf = vf_r; break;
+ case NEON_MLA_byelement: Op = &Simulator::mla; vf = vf_r; break;
+ case NEON_MLS_byelement: Op = &Simulator::mls; vf = vf_r; break;
+ case NEON_SQDMULH_byelement: Op = &Simulator::sqdmulh; vf = vf_r; break;
+ case NEON_SQRDMULH_byelement: Op = &Simulator::sqrdmulh; vf = vf_r; break;
+ case NEON_SMULL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::smull2;
+ } else {
+ Op = &Simulator::smull;
+ }
+ break;
+ case NEON_UMULL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::umull2;
+ } else {
+ Op = &Simulator::umull;
+ }
+ break;
+ case NEON_SMLAL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::smlal2;
+ } else {
+ Op = &Simulator::smlal;
+ }
+ break;
+ case NEON_UMLAL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::umlal2;
+ } else {
+ Op = &Simulator::umlal;
+ }
+ break;
+ case NEON_SMLSL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::smlsl2;
+ } else {
+ Op = &Simulator::smlsl;
+ }
+ break;
+ case NEON_UMLSL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::umlsl2;
+ } else {
+ Op = &Simulator::umlsl;
+ }
+ break;
+ case NEON_SQDMULL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::sqdmull2;
+ } else {
+ Op = &Simulator::sqdmull;
+ }
+ break;
+ case NEON_SQDMLAL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::sqdmlal2;
+ } else {
+ Op = &Simulator::sqdmlal;
+ }
+ break;
+ case NEON_SQDMLSL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::sqdmlsl2;
+ } else {
+ Op = &Simulator::sqdmlsl;
+ }
+ break;
+ default:
+ index = instr->NEONH();
+ if ((instr->FPType() & 1) == 0) {
+ index = (index << 1) | instr->NEONL();
+ }
+
+ vf = nfd.GetVectorFormat(nfd.FPFormatMap());
+
+ switch (instr->Mask(NEONByIndexedElementFPMask)) {
+ case NEON_FMUL_byelement: Op = &Simulator::fmul; break;
+ case NEON_FMLA_byelement: Op = &Simulator::fmla; break;
+ case NEON_FMLS_byelement: Op = &Simulator::fmls; break;
+ case NEON_FMULX_byelement: Op = &Simulator::fmulx; break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ }
+
+ (this->*Op)(vf, rd, rn, vreg(rm_reg), index);
+}
+
+
+void Simulator::VisitNEONCopy(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ int imm5 = instr->ImmNEON5();
+ int tz = CountTrailingZeros(imm5, 32);
+ int reg_index = imm5 >> (tz + 1);
+
+ if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) {
+ int imm4 = instr->ImmNEON4();
+ int rn_index = imm4 >> tz;
+ ins_element(vf, rd, reg_index, rn, rn_index);
+ } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) {
+ ins_immediate(vf, rd, reg_index, xreg(instr->Rn()));
+ } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) {
+ uint64_t value = LogicVRegister(rn).Uint(vf, reg_index);
+ value &= MaxUintFromFormat(vf);
+ set_xreg(instr->Rd(), value);
+ } else if (instr->Mask(NEONCopyUmovMask) == NEON_SMOV) {
+ int64_t value = LogicVRegister(rn).Int(vf, reg_index);
+ if (instr->NEONQ()) {
+ set_xreg(instr->Rd(), value);
+ } else {
+ set_wreg(instr->Rd(), (int32_t)value);
+ }
+ } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) {
+ dup_element(vf, rd, rn, reg_index);
+ } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) {
+ dup_immediate(vf, rd, xreg(instr->Rn()));
+ } else {
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONExtract(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+ if (instr->Mask(NEONExtractMask) == NEON_EXT) {
+ int index = instr->ImmNEONExt();
+ ext(vf, rd, rn, rm, index);
+ } else {
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr,
+ AddrMode addr_mode) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ uint64_t addr_base = xreg(instr->Rn(), Reg31IsStackPointer);
+ int reg_size = RegisterSizeInBytesFromFormat(vf);
+
+ int reg[4];
+ uint64_t addr[4];
+ for (int i = 0; i < 4; i++) {
+ reg[i] = (instr->Rt() + i) % kNumberOfVRegisters;
+ addr[i] = addr_base + (i * reg_size);
+ }
+ int count = 1;
+ bool log_read = true;
+
+ Instr itype = instr->Mask(NEONLoadStoreMultiStructMask);
+ if (((itype == NEON_LD1_1v) || (itype == NEON_LD1_2v) ||
+ (itype == NEON_LD1_3v) || (itype == NEON_LD1_4v) ||
+ (itype == NEON_ST1_1v) || (itype == NEON_ST1_2v) ||
+ (itype == NEON_ST1_3v) || (itype == NEON_ST1_4v)) &&
+ (instr->Bits(20, 16) != 0)) {
+ VIXL_UNREACHABLE();
+ }
+
+ // We use the PostIndex mask here, as it works in this case for both Offset
+ // and PostIndex addressing.
+ switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) {
+ case NEON_LD1_4v:
+ case NEON_LD1_4v_post: ld1(vf, vreg(reg[3]), addr[3]); count++;
+ VIXL_FALLTHROUGH();
+ case NEON_LD1_3v:
+ case NEON_LD1_3v_post: ld1(vf, vreg(reg[2]), addr[2]); count++;
+ VIXL_FALLTHROUGH();
+ case NEON_LD1_2v:
+ case NEON_LD1_2v_post: ld1(vf, vreg(reg[1]), addr[1]); count++;
+ VIXL_FALLTHROUGH();
+ case NEON_LD1_1v:
+ case NEON_LD1_1v_post:
+ ld1(vf, vreg(reg[0]), addr[0]);
+ log_read = true;
+ break;
+ case NEON_ST1_4v:
+ case NEON_ST1_4v_post: st1(vf, vreg(reg[3]), addr[3]); count++;
+ VIXL_FALLTHROUGH();
+ case NEON_ST1_3v:
+ case NEON_ST1_3v_post: st1(vf, vreg(reg[2]), addr[2]); count++;
+ VIXL_FALLTHROUGH();
+ case NEON_ST1_2v:
+ case NEON_ST1_2v_post: st1(vf, vreg(reg[1]), addr[1]); count++;
+ VIXL_FALLTHROUGH();
+ case NEON_ST1_1v:
+ case NEON_ST1_1v_post:
+ st1(vf, vreg(reg[0]), addr[0]);
+ log_read = false;
+ break;
+ case NEON_LD2_post:
+ case NEON_LD2:
+ ld2(vf, vreg(reg[0]), vreg(reg[1]), addr[0]);
+ count = 2;
+ break;
+ case NEON_ST2:
+ case NEON_ST2_post:
+ st2(vf, vreg(reg[0]), vreg(reg[1]), addr[0]);
+ count = 2;
+ break;
+ case NEON_LD3_post:
+ case NEON_LD3:
+ ld3(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), addr[0]);
+ count = 3;
+ break;
+ case NEON_ST3:
+ case NEON_ST3_post:
+ st3(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), addr[0]);
+ count = 3;
+ break;
+ case NEON_ST4:
+ case NEON_ST4_post:
+ st4(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), vreg(reg[3]),
+ addr[0]);
+ count = 4;
+ break;
+ case NEON_LD4_post:
+ case NEON_LD4:
+ ld4(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), vreg(reg[3]),
+ addr[0]);
+ count = 4;
+ break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+
+ // Explicitly log the register update whilst we have type information.
+ for (int i = 0; i < count; i++) {
+ // For de-interleaving loads, only print the base address.
+ int lane_size = LaneSizeInBytesFromFormat(vf);
+ PrintRegisterFormat format = GetPrintRegisterFormatTryFP(
+ GetPrintRegisterFormatForSize(reg_size, lane_size));
+ if (log_read) {
+ LogVRead(addr_base, reg[i], format);
+ } else {
+ LogVWrite(addr_base, reg[i], format);
+ }
+ }
+
+ if (addr_mode == PostIndex) {
+ int rm = instr->Rm();
+ // The immediate post index addressing mode is indicated by rm = 31.
+ // The immediate is implied by the number of vector registers used.
+ addr_base += (rm == 31) ? RegisterSizeInBytesFromFormat(vf) * count
+ : xreg(rm);
+ set_xreg(instr->Rn(), addr_base);
+ } else {
+ VIXL_ASSERT(addr_mode == Offset);
+ }
+}
+
+
+void Simulator::VisitNEONLoadStoreMultiStruct(const Instruction* instr) {
+ NEONLoadStoreMultiStructHelper(instr, Offset);
+}
+
+
+void Simulator::VisitNEONLoadStoreMultiStructPostIndex(
+ const Instruction* instr) {
+ NEONLoadStoreMultiStructHelper(instr, PostIndex);
+}
+
+
+void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
+ AddrMode addr_mode) {
+ uint64_t addr = xreg(instr->Rn(), Reg31IsStackPointer);
+ int rt = instr->Rt();
+
+ Instr itype = instr->Mask(NEONLoadStoreSingleStructMask);
+ if (((itype == NEON_LD1_b) || (itype == NEON_LD1_h) ||
+ (itype == NEON_LD1_s) || (itype == NEON_LD1_d)) &&
+ (instr->Bits(20, 16) != 0)) {
+ VIXL_UNREACHABLE();
+ }
+
+ // We use the PostIndex mask here, as it works in this case for both Offset
+ // and PostIndex addressing.
+ bool do_load = false;
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+ VectorFormat vf_t = nfd.GetVectorFormat();
+
+ VectorFormat vf = kFormat16B;
+ switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) {
+ case NEON_LD1_b:
+ case NEON_LD1_b_post:
+ case NEON_LD2_b:
+ case NEON_LD2_b_post:
+ case NEON_LD3_b:
+ case NEON_LD3_b_post:
+ case NEON_LD4_b:
+ case NEON_LD4_b_post: do_load = true;
+ VIXL_FALLTHROUGH();
+ case NEON_ST1_b:
+ case NEON_ST1_b_post:
+ case NEON_ST2_b:
+ case NEON_ST2_b_post:
+ case NEON_ST3_b:
+ case NEON_ST3_b_post:
+ case NEON_ST4_b:
+ case NEON_ST4_b_post: break;
+
+ case NEON_LD1_h:
+ case NEON_LD1_h_post:
+ case NEON_LD2_h:
+ case NEON_LD2_h_post:
+ case NEON_LD3_h:
+ case NEON_LD3_h_post:
+ case NEON_LD4_h:
+ case NEON_LD4_h_post: do_load = true;
+ VIXL_FALLTHROUGH();
+ case NEON_ST1_h:
+ case NEON_ST1_h_post:
+ case NEON_ST2_h:
+ case NEON_ST2_h_post:
+ case NEON_ST3_h:
+ case NEON_ST3_h_post:
+ case NEON_ST4_h:
+ case NEON_ST4_h_post: vf = kFormat8H; break;
+ case NEON_LD1_s:
+ case NEON_LD1_s_post:
+ case NEON_LD2_s:
+ case NEON_LD2_s_post:
+ case NEON_LD3_s:
+ case NEON_LD3_s_post:
+ case NEON_LD4_s:
+ case NEON_LD4_s_post: do_load = true;
+ VIXL_FALLTHROUGH();
+ case NEON_ST1_s:
+ case NEON_ST1_s_post:
+ case NEON_ST2_s:
+ case NEON_ST2_s_post:
+ case NEON_ST3_s:
+ case NEON_ST3_s_post:
+ case NEON_ST4_s:
+ case NEON_ST4_s_post: {
+ VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d);
+ VIXL_STATIC_ASSERT(
+ (NEON_LD1_s_post | (1 << NEONLSSize_offset)) == NEON_LD1_d_post);
+ VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d);
+ VIXL_STATIC_ASSERT(
+ (NEON_ST1_s_post | (1 << NEONLSSize_offset)) == NEON_ST1_d_post);
+ vf = ((instr->NEONLSSize() & 1) == 0) ? kFormat4S : kFormat2D;
+ break;
+ }
+
+ case NEON_LD1R:
+ case NEON_LD1R_post: {
+ vf = vf_t;
+ ld1r(vf, vreg(rt), addr);
+ do_load = true;
+ break;
+ }
+
+ case NEON_LD2R:
+ case NEON_LD2R_post: {
+ vf = vf_t;
+ int rt2 = (rt + 1) % kNumberOfVRegisters;
+ ld2r(vf, vreg(rt), vreg(rt2), addr);
+ do_load = true;
+ break;
+ }
+
+ case NEON_LD3R:
+ case NEON_LD3R_post: {
+ vf = vf_t;
+ int rt2 = (rt + 1) % kNumberOfVRegisters;
+ int rt3 = (rt2 + 1) % kNumberOfVRegisters;
+ ld3r(vf, vreg(rt), vreg(rt2), vreg(rt3), addr);
+ do_load = true;
+ break;
+ }
+
+ case NEON_LD4R:
+ case NEON_LD4R_post: {
+ vf = vf_t;
+ int rt2 = (rt + 1) % kNumberOfVRegisters;
+ int rt3 = (rt2 + 1) % kNumberOfVRegisters;
+ int rt4 = (rt3 + 1) % kNumberOfVRegisters;
+ ld4r(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), addr);
+ do_load = true;
+ break;
+ }
+ default: VIXL_UNIMPLEMENTED();
+ }
+
+ PrintRegisterFormat print_format =
+ GetPrintRegisterFormatTryFP(GetPrintRegisterFormat(vf));
+ // Make sure that the print_format only includes a single lane.
+ print_format =
+ static_cast<PrintRegisterFormat>(print_format & ~kPrintRegAsVectorMask);
+
+ int esize = LaneSizeInBytesFromFormat(vf);
+ int index_shift = LaneSizeInBytesLog2FromFormat(vf);
+ int lane = instr->NEONLSIndex(index_shift);
+ int scale = 0;
+ int rt2 = (rt + 1) % kNumberOfVRegisters;
+ int rt3 = (rt2 + 1) % kNumberOfVRegisters;
+ int rt4 = (rt3 + 1) % kNumberOfVRegisters;
+ switch (instr->Mask(NEONLoadStoreSingleLenMask)) {
+ case NEONLoadStoreSingle1:
+ scale = 1;
+ if (do_load) {
+ ld1(vf, vreg(rt), lane, addr);
+ LogVRead(addr, rt, print_format, lane);
+ } else {
+ st1(vf, vreg(rt), lane, addr);
+ LogVWrite(addr, rt, print_format, lane);
+ }
+ break;
+ case NEONLoadStoreSingle2:
+ scale = 2;
+ if (do_load) {
+ ld2(vf, vreg(rt), vreg(rt2), lane, addr);
+ LogVRead(addr, rt, print_format, lane);
+ LogVRead(addr + esize, rt2, print_format, lane);
+ } else {
+ st2(vf, vreg(rt), vreg(rt2), lane, addr);
+ LogVWrite(addr, rt, print_format, lane);
+ LogVWrite(addr + esize, rt2, print_format, lane);
+ }
+ break;
+ case NEONLoadStoreSingle3:
+ scale = 3;
+ if (do_load) {
+ ld3(vf, vreg(rt), vreg(rt2), vreg(rt3), lane, addr);
+ LogVRead(addr, rt, print_format, lane);
+ LogVRead(addr + esize, rt2, print_format, lane);
+ LogVRead(addr + (2 * esize), rt3, print_format, lane);
+ } else {
+ st3(vf, vreg(rt), vreg(rt2), vreg(rt3), lane, addr);
+ LogVWrite(addr, rt, print_format, lane);
+ LogVWrite(addr + esize, rt2, print_format, lane);
+ LogVWrite(addr + (2 * esize), rt3, print_format, lane);
+ }
+ break;
+ case NEONLoadStoreSingle4:
+ scale = 4;
+ if (do_load) {
+ ld4(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), lane, addr);
+ LogVRead(addr, rt, print_format, lane);
+ LogVRead(addr + esize, rt2, print_format, lane);
+ LogVRead(addr + (2 * esize), rt3, print_format, lane);
+ LogVRead(addr + (3 * esize), rt4, print_format, lane);
+ } else {
+ st4(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), lane, addr);
+ LogVWrite(addr, rt, print_format, lane);
+ LogVWrite(addr + esize, rt2, print_format, lane);
+ LogVWrite(addr + (2 * esize), rt3, print_format, lane);
+ LogVWrite(addr + (3 * esize), rt4, print_format, lane);
+ }
+ break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+
+ if (addr_mode == PostIndex) {
+ int rm = instr->Rm();
+ int lane_size = LaneSizeInBytesFromFormat(vf);
+ set_xreg(instr->Rn(), addr + ((rm == 31) ? (scale * lane_size) : xreg(rm)));
+ }
+}
+
+
+void Simulator::VisitNEONLoadStoreSingleStruct(const Instruction* instr) {
+ NEONLoadStoreSingleStructHelper(instr, Offset);
+}
+
+
+void Simulator::VisitNEONLoadStoreSingleStructPostIndex(
+ const Instruction* instr) {
+ NEONLoadStoreSingleStructHelper(instr, PostIndex);
+}
+
+
+void Simulator::VisitNEONModifiedImmediate(const Instruction* instr) {
+ SimVRegister& rd = vreg(instr->Rd());
+ int cmode = instr->NEONCmode();
+ int cmode_3_1 = (cmode >> 1) & 7;
+ int cmode_3 = (cmode >> 3) & 1;
+ int cmode_2 = (cmode >> 2) & 1;
+ int cmode_1 = (cmode >> 1) & 1;
+ int cmode_0 = cmode & 1;
+ int q = instr->NEONQ();
+ int op_bit = instr->NEONModImmOp();
+ uint64_t imm8 = instr->ImmNEONabcdefgh();
+
+ // Find the format and immediate value
+ uint64_t imm = 0;
+ VectorFormat vform = kFormatUndefined;
+ switch (cmode_3_1) {
+ case 0x0:
+ case 0x1:
+ case 0x2:
+ case 0x3:
+ vform = (q == 1) ? kFormat4S : kFormat2S;
+ imm = imm8 << (8 * cmode_3_1);
+ break;
+ case 0x4:
+ case 0x5:
+ vform = (q == 1) ? kFormat8H : kFormat4H;
+ imm = imm8 << (8 * cmode_1);
+ break;
+ case 0x6:
+ vform = (q == 1) ? kFormat4S : kFormat2S;
+ if (cmode_0 == 0) {
+ imm = imm8 << 8 | 0x000000ff;
+ } else {
+ imm = imm8 << 16 | 0x0000ffff;
+ }
+ break;
+ case 0x7:
+ if (cmode_0 == 0 && op_bit == 0) {
+ vform = q ? kFormat16B : kFormat8B;
+ imm = imm8;
+ } else if (cmode_0 == 0 && op_bit == 1) {
+ vform = q ? kFormat2D : kFormat1D;
+ imm = 0;
+ for (int i = 0; i < 8; ++i) {
+ if (imm8 & (1 << i)) {
+ imm |= (UINT64_C(0xff) << (8 * i));
+ }
+ }
+ } else { // cmode_0 == 1, cmode == 0xf.
+ if (op_bit == 0) {
+ vform = q ? kFormat4S : kFormat2S;
+ imm = float_to_rawbits(instr->ImmNEONFP32());
+ } else if (q == 1) {
+ vform = kFormat2D;
+ imm = double_to_rawbits(instr->ImmNEONFP64());
+ } else {
+ VIXL_ASSERT((q == 0) && (op_bit == 1) && (cmode == 0xf));
+ VisitUnallocated(instr);
+ }
+ }
+ break;
+ default: VIXL_UNREACHABLE(); break;
+ }
+
+ // Find the operation
+ NEONModifiedImmediateOp op;
+ if (cmode_3 == 0) {
+ if (cmode_0 == 0) {
+ op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
+ } else { // cmode<0> == '1'
+ op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR;
+ }
+ } else { // cmode<3> == '1'
+ if (cmode_2 == 0) {
+ if (cmode_0 == 0) {
+ op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
+ } else { // cmode<0> == '1'
+ op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR;
+ }
+ } else { // cmode<2> == '1'
+ if (cmode_1 == 0) {
+ op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
+ } else { // cmode<1> == '1'
+ if (cmode_0 == 0) {
+ op = NEONModifiedImmediate_MOVI;
+ } else { // cmode<0> == '1'
+ op = NEONModifiedImmediate_MOVI;
+ }
+ }
+ }
+ }
+
+ // Call the logic function
+ if (op == NEONModifiedImmediate_ORR) {
+ orr(vform, rd, rd, imm);
+ } else if (op == NEONModifiedImmediate_BIC) {
+ bic(vform, rd, rd, imm);
+ } else if (op == NEONModifiedImmediate_MOVI) {
+ movi(vform, rd, imm);
+ } else if (op == NEONModifiedImmediate_MVNI) {
+ mvni(vform, rd, imm);
+ } else {
+ VisitUnimplemented(instr);
+ }
+}
+
+
+void Simulator::VisitNEONScalar2RegMisc(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) {
+ // These instructions all use a two bit size field, except NOT and RBIT,
+ // which use the field to encode the operation.
+ switch (instr->Mask(NEONScalar2RegMiscMask)) {
+ case NEON_CMEQ_zero_scalar: cmp(vf, rd, rn, 0, eq); break;
+ case NEON_CMGE_zero_scalar: cmp(vf, rd, rn, 0, ge); break;
+ case NEON_CMGT_zero_scalar: cmp(vf, rd, rn, 0, gt); break;
+ case NEON_CMLT_zero_scalar: cmp(vf, rd, rn, 0, lt); break;
+ case NEON_CMLE_zero_scalar: cmp(vf, rd, rn, 0, le); break;
+ case NEON_ABS_scalar: abs(vf, rd, rn); break;
+ case NEON_SQABS_scalar: abs(vf, rd, rn).SignedSaturate(vf); break;
+ case NEON_NEG_scalar: neg(vf, rd, rn); break;
+ case NEON_SQNEG_scalar: neg(vf, rd, rn).SignedSaturate(vf); break;
+ case NEON_SUQADD_scalar: suqadd(vf, rd, rn); break;
+ case NEON_USQADD_scalar: usqadd(vf, rd, rn); break;
+ default: VIXL_UNIMPLEMENTED(); break;
+ }
+ } else {
+ VectorFormat fpf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+
+ // These instructions all use a one bit size field, except SQXTUN, SQXTN
+ // and UQXTN, which use a two bit size field.
+ switch (instr->Mask(NEONScalar2RegMiscFPMask)) {
+ case NEON_FRECPE_scalar: frecpe(fpf, rd, rn, fpcr_rounding); break;
+ case NEON_FRECPX_scalar: frecpx(fpf, rd, rn); break;
+ case NEON_FRSQRTE_scalar: frsqrte(fpf, rd, rn); break;
+ case NEON_FCMGT_zero_scalar: fcmp_zero(fpf, rd, rn, gt); break;
+ case NEON_FCMGE_zero_scalar: fcmp_zero(fpf, rd, rn, ge); break;
+ case NEON_FCMEQ_zero_scalar: fcmp_zero(fpf, rd, rn, eq); break;
+ case NEON_FCMLE_zero_scalar: fcmp_zero(fpf, rd, rn, le); break;
+ case NEON_FCMLT_zero_scalar: fcmp_zero(fpf, rd, rn, lt); break;
+ case NEON_SCVTF_scalar: scvtf(fpf, rd, rn, 0, fpcr_rounding); break;
+ case NEON_UCVTF_scalar: ucvtf(fpf, rd, rn, 0, fpcr_rounding); break;
+ case NEON_FCVTNS_scalar: fcvts(fpf, rd, rn, FPTieEven); break;
+ case NEON_FCVTNU_scalar: fcvtu(fpf, rd, rn, FPTieEven); break;
+ case NEON_FCVTPS_scalar: fcvts(fpf, rd, rn, FPPositiveInfinity); break;
+ case NEON_FCVTPU_scalar: fcvtu(fpf, rd, rn, FPPositiveInfinity); break;
+ case NEON_FCVTMS_scalar: fcvts(fpf, rd, rn, FPNegativeInfinity); break;
+ case NEON_FCVTMU_scalar: fcvtu(fpf, rd, rn, FPNegativeInfinity); break;
+ case NEON_FCVTZS_scalar: fcvts(fpf, rd, rn, FPZero); break;
+ case NEON_FCVTZU_scalar: fcvtu(fpf, rd, rn, FPZero); break;
+ case NEON_FCVTAS_scalar: fcvts(fpf, rd, rn, FPTieAway); break;
+ case NEON_FCVTAU_scalar: fcvtu(fpf, rd, rn, FPTieAway); break;
+ case NEON_FCVTXN_scalar:
+ // Unlike all of the other FP instructions above, fcvtxn encodes dest
+ // size S as size<0>=1. There's only one case, so we ignore the form.
+ VIXL_ASSERT(instr->Bit(22) == 1);
+ fcvtxn(kFormatS, rd, rn);
+ break;
+ default:
+ switch (instr->Mask(NEONScalar2RegMiscMask)) {
+ case NEON_SQXTN_scalar: sqxtn(vf, rd, rn); break;
+ case NEON_UQXTN_scalar: uqxtn(vf, rd, rn); break;
+ case NEON_SQXTUN_scalar: sqxtun(vf, rd, rn); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ }
+ }
+}
+
+
+void Simulator::VisitNEONScalar3Diff(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+ switch (instr->Mask(NEONScalar3DiffMask)) {
+ case NEON_SQDMLAL_scalar: sqdmlal(vf, rd, rn, rm); break;
+ case NEON_SQDMLSL_scalar: sqdmlsl(vf, rd, rn, rm); break;
+ case NEON_SQDMULL_scalar: sqdmull(vf, rd, rn, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONScalar3Same(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+
+ if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) {
+ vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
+ switch (instr->Mask(NEONScalar3SameFPMask)) {
+ case NEON_FMULX_scalar: fmulx(vf, rd, rn, rm); break;
+ case NEON_FACGE_scalar: fabscmp(vf, rd, rn, rm, ge); break;
+ case NEON_FACGT_scalar: fabscmp(vf, rd, rn, rm, gt); break;
+ case NEON_FCMEQ_scalar: fcmp(vf, rd, rn, rm, eq); break;
+ case NEON_FCMGE_scalar: fcmp(vf, rd, rn, rm, ge); break;
+ case NEON_FCMGT_scalar: fcmp(vf, rd, rn, rm, gt); break;
+ case NEON_FRECPS_scalar: frecps(vf, rd, rn, rm); break;
+ case NEON_FRSQRTS_scalar: frsqrts(vf, rd, rn, rm); break;
+ case NEON_FABD_scalar: fabd(vf, rd, rn, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ } else {
+ switch (instr->Mask(NEONScalar3SameMask)) {
+ case NEON_ADD_scalar: add(vf, rd, rn, rm); break;
+ case NEON_SUB_scalar: sub(vf, rd, rn, rm); break;
+ case NEON_CMEQ_scalar: cmp(vf, rd, rn, rm, eq); break;
+ case NEON_CMGE_scalar: cmp(vf, rd, rn, rm, ge); break;
+ case NEON_CMGT_scalar: cmp(vf, rd, rn, rm, gt); break;
+ case NEON_CMHI_scalar: cmp(vf, rd, rn, rm, hi); break;
+ case NEON_CMHS_scalar: cmp(vf, rd, rn, rm, hs); break;
+ case NEON_CMTST_scalar: cmptst(vf, rd, rn, rm); break;
+ case NEON_USHL_scalar: ushl(vf, rd, rn, rm); break;
+ case NEON_SSHL_scalar: sshl(vf, rd, rn, rm); break;
+ case NEON_SQDMULH_scalar: sqdmulh(vf, rd, rn, rm); break;
+ case NEON_SQRDMULH_scalar: sqrdmulh(vf, rd, rn, rm); break;
+ case NEON_UQADD_scalar:
+ add(vf, rd, rn, rm).UnsignedSaturate(vf);
+ break;
+ case NEON_SQADD_scalar:
+ add(vf, rd, rn, rm).SignedSaturate(vf);
+ break;
+ case NEON_UQSUB_scalar:
+ sub(vf, rd, rn, rm).UnsignedSaturate(vf);
+ break;
+ case NEON_SQSUB_scalar:
+ sub(vf, rd, rn, rm).SignedSaturate(vf);
+ break;
+ case NEON_UQSHL_scalar:
+ ushl(vf, rd, rn, rm).UnsignedSaturate(vf);
+ break;
+ case NEON_SQSHL_scalar:
+ sshl(vf, rd, rn, rm).SignedSaturate(vf);
+ break;
+ case NEON_URSHL_scalar:
+ ushl(vf, rd, rn, rm).Round(vf);
+ break;
+ case NEON_SRSHL_scalar:
+ sshl(vf, rd, rn, rm).Round(vf);
+ break;
+ case NEON_UQRSHL_scalar:
+ ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf);
+ break;
+ case NEON_SQRSHL_scalar:
+ sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf);
+ break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ }
+}
+
+
+void Simulator::VisitNEONScalarByIndexedElement(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+ VectorFormat vf_r = nfd.GetVectorFormat(nfd.ScalarFormatMap());
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ ByElementOp Op = NULL;
+
+ int rm_reg = instr->Rm();
+ int index = (instr->NEONH() << 1) | instr->NEONL();
+ if (instr->NEONSize() == 1) {
+ rm_reg &= 0xf;
+ index = (index << 1) | instr->NEONM();
+ }
+
+ switch (instr->Mask(NEONScalarByIndexedElementMask)) {
+ case NEON_SQDMULL_byelement_scalar: Op = &Simulator::sqdmull; break;
+ case NEON_SQDMLAL_byelement_scalar: Op = &Simulator::sqdmlal; break;
+ case NEON_SQDMLSL_byelement_scalar: Op = &Simulator::sqdmlsl; break;
+ case NEON_SQDMULH_byelement_scalar:
+ Op = &Simulator::sqdmulh;
+ vf = vf_r;
+ break;
+ case NEON_SQRDMULH_byelement_scalar:
+ Op = &Simulator::sqrdmulh;
+ vf = vf_r;
+ break;
+ default:
+ vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
+ index = instr->NEONH();
+ if ((instr->FPType() & 1) == 0) {
+ index = (index << 1) | instr->NEONL();
+ }
+ switch (instr->Mask(NEONScalarByIndexedElementFPMask)) {
+ case NEON_FMUL_byelement_scalar: Op = &Simulator::fmul; break;
+ case NEON_FMLA_byelement_scalar: Op = &Simulator::fmla; break;
+ case NEON_FMLS_byelement_scalar: Op = &Simulator::fmls; break;
+ case NEON_FMULX_byelement_scalar: Op = &Simulator::fmulx; break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ }
+
+ (this->*Op)(vf, rd, rn, vreg(rm_reg), index);
+}
+
+
+void Simulator::VisitNEONScalarCopy(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) {
+ int imm5 = instr->ImmNEON5();
+ int tz = CountTrailingZeros(imm5, 32);
+ int rn_index = imm5 >> (tz + 1);
+ dup_element(vf, rd, rn, rn_index);
+ } else {
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONScalarPairwise(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ switch (instr->Mask(NEONScalarPairwiseMask)) {
+ case NEON_ADDP_scalar: addp(vf, rd, rn); break;
+ case NEON_FADDP_scalar: faddp(vf, rd, rn); break;
+ case NEON_FMAXP_scalar: fmaxp(vf, rd, rn); break;
+ case NEON_FMAXNMP_scalar: fmaxnmp(vf, rd, rn); break;
+ case NEON_FMINP_scalar: fminp(vf, rd, rn); break;
+ case NEON_FMINNMP_scalar: fminnmp(vf, rd, rn); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONScalarShiftImmediate(const Instruction* instr) {
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+
+ static const NEONFormatMap map = {
+ {22, 21, 20, 19},
+ {NF_UNDEF, NF_B, NF_H, NF_H, NF_S, NF_S, NF_S, NF_S,
+ NF_D, NF_D, NF_D, NF_D, NF_D, NF_D, NF_D, NF_D}
+ };
+ NEONFormatDecoder nfd(instr, &map);
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ int highestSetBit = HighestSetBitPosition(instr->ImmNEONImmh());
+ int immhimmb = instr->ImmNEONImmhImmb();
+ int right_shift = (16 << highestSetBit) - immhimmb;
+ int left_shift = immhimmb - (8 << highestSetBit);
+ switch (instr->Mask(NEONScalarShiftImmediateMask)) {
+ case NEON_SHL_scalar: shl(vf, rd, rn, left_shift); break;
+ case NEON_SLI_scalar: sli(vf, rd, rn, left_shift); break;
+ case NEON_SQSHL_imm_scalar: sqshl(vf, rd, rn, left_shift); break;
+ case NEON_UQSHL_imm_scalar: uqshl(vf, rd, rn, left_shift); break;
+ case NEON_SQSHLU_scalar: sqshlu(vf, rd, rn, left_shift); break;
+ case NEON_SRI_scalar: sri(vf, rd, rn, right_shift); break;
+ case NEON_SSHR_scalar: sshr(vf, rd, rn, right_shift); break;
+ case NEON_USHR_scalar: ushr(vf, rd, rn, right_shift); break;
+ case NEON_SRSHR_scalar: sshr(vf, rd, rn, right_shift).Round(vf); break;
+ case NEON_URSHR_scalar: ushr(vf, rd, rn, right_shift).Round(vf); break;
+ case NEON_SSRA_scalar: ssra(vf, rd, rn, right_shift); break;
+ case NEON_USRA_scalar: usra(vf, rd, rn, right_shift); break;
+ case NEON_SRSRA_scalar: srsra(vf, rd, rn, right_shift); break;
+ case NEON_URSRA_scalar: ursra(vf, rd, rn, right_shift); break;
+ case NEON_UQSHRN_scalar: uqshrn(vf, rd, rn, right_shift); break;
+ case NEON_UQRSHRN_scalar: uqrshrn(vf, rd, rn, right_shift); break;
+ case NEON_SQSHRN_scalar: sqshrn(vf, rd, rn, right_shift); break;
+ case NEON_SQRSHRN_scalar: sqrshrn(vf, rd, rn, right_shift); break;
+ case NEON_SQSHRUN_scalar: sqshrun(vf, rd, rn, right_shift); break;
+ case NEON_SQRSHRUN_scalar: sqrshrun(vf, rd, rn, right_shift); break;
+ case NEON_FCVTZS_imm_scalar: fcvts(vf, rd, rn, FPZero, right_shift); break;
+ case NEON_FCVTZU_imm_scalar: fcvtu(vf, rd, rn, FPZero, right_shift); break;
+ case NEON_SCVTF_imm_scalar:
+ scvtf(vf, rd, rn, right_shift, fpcr_rounding);
+ break;
+ case NEON_UCVTF_imm_scalar:
+ ucvtf(vf, rd, rn, right_shift, fpcr_rounding);
+ break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONShiftImmediate(const Instruction* instr) {
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+
+ // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H,
+ // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined.
+ static const NEONFormatMap map = {
+ {22, 21, 20, 19, 30},
+ {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_4H, NF_8H,
+ NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S,
+ NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D,
+ NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D}
+ };
+ NEONFormatDecoder nfd(instr, &map);
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ // 0001->8H, 001x->4S, 01xx->2D, all others undefined.
+ static const NEONFormatMap map_l = {
+ {22, 21, 20, 19},
+ {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}
+ };
+ VectorFormat vf_l = nfd.GetVectorFormat(&map_l);
+
+ int highestSetBit = HighestSetBitPosition(instr->ImmNEONImmh());
+ int immhimmb = instr->ImmNEONImmhImmb();
+ int right_shift = (16 << highestSetBit) - immhimmb;
+ int left_shift = immhimmb - (8 << highestSetBit);
+
+ switch (instr->Mask(NEONShiftImmediateMask)) {
+ case NEON_SHL: shl(vf, rd, rn, left_shift); break;
+ case NEON_SLI: sli(vf, rd, rn, left_shift); break;
+ case NEON_SQSHLU: sqshlu(vf, rd, rn, left_shift); break;
+ case NEON_SRI: sri(vf, rd, rn, right_shift); break;
+ case NEON_SSHR: sshr(vf, rd, rn, right_shift); break;
+ case NEON_USHR: ushr(vf, rd, rn, right_shift); break;
+ case NEON_SRSHR: sshr(vf, rd, rn, right_shift).Round(vf); break;
+ case NEON_URSHR: ushr(vf, rd, rn, right_shift).Round(vf); break;
+ case NEON_SSRA: ssra(vf, rd, rn, right_shift); break;
+ case NEON_USRA: usra(vf, rd, rn, right_shift); break;
+ case NEON_SRSRA: srsra(vf, rd, rn, right_shift); break;
+ case NEON_URSRA: ursra(vf, rd, rn, right_shift); break;
+ case NEON_SQSHL_imm: sqshl(vf, rd, rn, left_shift); break;
+ case NEON_UQSHL_imm: uqshl(vf, rd, rn, left_shift); break;
+ case NEON_SCVTF_imm: scvtf(vf, rd, rn, right_shift, fpcr_rounding); break;
+ case NEON_UCVTF_imm: ucvtf(vf, rd, rn, right_shift, fpcr_rounding); break;
+ case NEON_FCVTZS_imm: fcvts(vf, rd, rn, FPZero, right_shift); break;
+ case NEON_FCVTZU_imm: fcvtu(vf, rd, rn, FPZero, right_shift); break;
+ case NEON_SSHLL:
+ vf = vf_l;
+ if (instr->Mask(NEON_Q)) {
+ sshll2(vf, rd, rn, left_shift);
+ } else {
+ sshll(vf, rd, rn, left_shift);
+ }
+ break;
+ case NEON_USHLL:
+ vf = vf_l;
+ if (instr->Mask(NEON_Q)) {
+ ushll2(vf, rd, rn, left_shift);
+ } else {
+ ushll(vf, rd, rn, left_shift);
+ }
+ break;
+ case NEON_SHRN:
+ if (instr->Mask(NEON_Q)) {
+ shrn2(vf, rd, rn, right_shift);
+ } else {
+ shrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_RSHRN:
+ if (instr->Mask(NEON_Q)) {
+ rshrn2(vf, rd, rn, right_shift);
+ } else {
+ rshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_UQSHRN:
+ if (instr->Mask(NEON_Q)) {
+ uqshrn2(vf, rd, rn, right_shift);
+ } else {
+ uqshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_UQRSHRN:
+ if (instr->Mask(NEON_Q)) {
+ uqrshrn2(vf, rd, rn, right_shift);
+ } else {
+ uqrshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_SQSHRN:
+ if (instr->Mask(NEON_Q)) {
+ sqshrn2(vf, rd, rn, right_shift);
+ } else {
+ sqshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_SQRSHRN:
+ if (instr->Mask(NEON_Q)) {
+ sqrshrn2(vf, rd, rn, right_shift);
+ } else {
+ sqrshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_SQSHRUN:
+ if (instr->Mask(NEON_Q)) {
+ sqshrun2(vf, rd, rn, right_shift);
+ } else {
+ sqshrun(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_SQRSHRUN:
+ if (instr->Mask(NEON_Q)) {
+ sqrshrun2(vf, rd, rn, right_shift);
+ } else {
+ sqrshrun(vf, rd, rn, right_shift);
+ }
+ break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONTable(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rn2 = vreg((instr->Rn() + 1) % kNumberOfVRegisters);
+ SimVRegister& rn3 = vreg((instr->Rn() + 2) % kNumberOfVRegisters);
+ SimVRegister& rn4 = vreg((instr->Rn() + 3) % kNumberOfVRegisters);
+ SimVRegister& rm = vreg(instr->Rm());
+
+ switch (instr->Mask(NEONTableMask)) {
+ case NEON_TBL_1v: tbl(vf, rd, rn, rm); break;
+ case NEON_TBL_2v: tbl(vf, rd, rn, rn2, rm); break;
+ case NEON_TBL_3v: tbl(vf, rd, rn, rn2, rn3, rm); break;
+ case NEON_TBL_4v: tbl(vf, rd, rn, rn2, rn3, rn4, rm); break;
+ case NEON_TBX_1v: tbx(vf, rd, rn, rm); break;
+ case NEON_TBX_2v: tbx(vf, rd, rn, rn2, rm); break;
+ case NEON_TBX_3v: tbx(vf, rd, rn, rn2, rn3, rm); break;
+ case NEON_TBX_4v: tbx(vf, rd, rn, rn2, rn3, rn4, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONPerm(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+
+ switch (instr->Mask(NEONPermMask)) {
+ case NEON_TRN1: trn1(vf, rd, rn, rm); break;
+ case NEON_TRN2: trn2(vf, rd, rn, rm); break;
+ case NEON_UZP1: uzp1(vf, rd, rn, rm); break;
+ case NEON_UZP2: uzp2(vf, rd, rn, rm); break;
+ case NEON_ZIP1: zip1(vf, rd, rn, rm); break;
+ case NEON_ZIP2: zip2(vf, rd, rn, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::DoUnreachable(const Instruction* instr) {
+ VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
+ (instr->ImmException() == kUnreachableOpcode));
+
+ fprintf(stream_, "Hit UNREACHABLE marker at pc=%p.\n",
+ reinterpret_cast<const void*>(instr));
+ abort();
+}
+
+
+void Simulator::DoTrace(const Instruction* instr) {
+ VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
+ (instr->ImmException() == kTraceOpcode));
+
+ // Read the arguments encoded inline in the instruction stream.
+ uint32_t parameters;
+ uint32_t command;
+
+ VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
+ memcpy(&parameters, instr + kTraceParamsOffset, sizeof(parameters));
+ memcpy(&command, instr + kTraceCommandOffset, sizeof(command));
+
+ switch (command) {
+ case TRACE_ENABLE:
+ set_trace_parameters(trace_parameters() | parameters);
+ break;
+ case TRACE_DISABLE:
+ set_trace_parameters(trace_parameters() & ~parameters);
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+
+ set_pc(instr->InstructionAtOffset(kTraceLength));
+}
+
+
+void Simulator::DoLog(const Instruction* instr) {
+ VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
+ (instr->ImmException() == kLogOpcode));
+
+ // Read the arguments encoded inline in the instruction stream.
+ uint32_t parameters;
+
+ VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
+ memcpy(&parameters, instr + kTraceParamsOffset, sizeof(parameters));
+
+ // We don't support a one-shot LOG_DISASM.
+ VIXL_ASSERT((parameters & LOG_DISASM) == 0);
+ // Print the requested information.
+ if (parameters & LOG_SYSREGS) PrintSystemRegisters();
+ if (parameters & LOG_REGS) PrintRegisters();
+ if (parameters & LOG_VREGS) PrintVRegisters();
+
+ set_pc(instr->InstructionAtOffset(kLogLength));
+}
+
+
+void Simulator::DoPrintf(const Instruction* instr) {
+ VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
+ (instr->ImmException() == kPrintfOpcode));
+
+ // Read the arguments encoded inline in the instruction stream.
+ uint32_t arg_count;
+ uint32_t arg_pattern_list;
+ VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
+ memcpy(&arg_count,
+ instr + kPrintfArgCountOffset,
+ sizeof(arg_count));
+ memcpy(&arg_pattern_list,
+ instr + kPrintfArgPatternListOffset,
+ sizeof(arg_pattern_list));
+
+ VIXL_ASSERT(arg_count <= kPrintfMaxArgCount);
+ VIXL_ASSERT((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0);
+
+ // We need to call the host printf function with a set of arguments defined by
+ // arg_pattern_list. Because we don't know the types and sizes of the
+ // arguments, this is very difficult to do in a robust and portable way. To
+ // work around the problem, we pick apart the format string, and print one
+ // format placeholder at a time.
+
+ // Allocate space for the format string. We take a copy, so we can modify it.
+ // Leave enough space for one extra character per expected argument (plus the
+ // '\0' termination).
+ const char * format_base = reg<const char *>(0);
+ VIXL_ASSERT(format_base != NULL);
+ size_t length = strlen(format_base) + 1;
+ char * const format = (char *)js_calloc(length + arg_count);
+
+ // A list of chunks, each with exactly one format placeholder.
+ const char * chunks[kPrintfMaxArgCount];
+
+ // Copy the format string and search for format placeholders.
+ uint32_t placeholder_count = 0;
+ char * format_scratch = format;
+ for (size_t i = 0; i < length; i++) {
+ if (format_base[i] != '%') {
+ *format_scratch++ = format_base[i];
+ } else {
+ if (format_base[i + 1] == '%') {
+ // Ignore explicit "%%" sequences.
+ *format_scratch++ = format_base[i];
+ i++;
+ // Chunks after the first are passed as format strings to printf, so we
+ // need to escape '%' characters in those chunks.
+ if (placeholder_count > 0) *format_scratch++ = format_base[i];
+ } else {
+ VIXL_CHECK(placeholder_count < arg_count);
+ // Insert '\0' before placeholders, and store their locations.
+ *format_scratch++ = '\0';
+ chunks[placeholder_count++] = format_scratch;
+ *format_scratch++ = format_base[i];
+ }
+ }
+ }
+ VIXL_CHECK(placeholder_count == arg_count);
+
+ // Finally, call printf with each chunk, passing the appropriate register
+ // argument. Normally, printf returns the number of bytes transmitted, so we
+ // can emulate a single printf call by adding the result from each chunk. If
+ // any call returns a negative (error) value, though, just return that value.
+
+ printf("%s", clr_printf);
+
+ // Because '\0' is inserted before each placeholder, the first string in
+ // 'format' contains no format placeholders and should be printed literally.
+ int result = printf("%s", format);
+ int pcs_r = 1; // Start at x1. x0 holds the format string.
+ int pcs_f = 0; // Start at d0.
+ if (result >= 0) {
+ for (uint32_t i = 0; i < placeholder_count; i++) {
+ int part_result = -1;
+
+ uint32_t arg_pattern = arg_pattern_list >> (i * kPrintfArgPatternBits);
+ arg_pattern &= (1 << kPrintfArgPatternBits) - 1;
+ switch (arg_pattern) {
+ case kPrintfArgW: part_result = printf(chunks[i], wreg(pcs_r++)); break;
+ case kPrintfArgX: part_result = printf(chunks[i], xreg(pcs_r++)); break;
+ case kPrintfArgD: part_result = printf(chunks[i], dreg(pcs_f++)); break;
+ default: VIXL_UNREACHABLE();
+ }
+
+ if (part_result < 0) {
+ // Handle error values.
+ result = part_result;
+ break;
+ }
+
+ result += part_result;
+ }
+ }
+
+ printf("%s", clr_normal);
+
+ // Printf returns its result in x0 (just like the C library's printf).
+ set_xreg(0, result);
+
+ // The printf parameters are inlined in the code, so skip them.
+ set_pc(instr->InstructionAtOffset(kPrintfLength));
+
+ // Set LR as if we'd just called a native printf function.
+ set_lr(pc());
+
+ js_free(format);
+}
+
+} // namespace vixl
+
+#endif // JS_SIMULATOR_ARM64
diff --git a/js/src/jit/arm64/vixl/Simulator-vixl.h b/js/src/jit/arm64/vixl/Simulator-vixl.h
new file mode 100644
index 000000000..8755ad671
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Simulator-vixl.h
@@ -0,0 +1,2677 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_SIMULATOR_A64_H_
+#define VIXL_A64_SIMULATOR_A64_H_
+
+#include "js-config.h"
+
+#ifdef JS_SIMULATOR_ARM64
+
+#include "mozilla/Vector.h"
+
+#include "jsalloc.h"
+
+#include "jit/arm64/vixl/Assembler-vixl.h"
+#include "jit/arm64/vixl/Disasm-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Instructions-vixl.h"
+#include "jit/arm64/vixl/Instrument-vixl.h"
+#include "jit/arm64/vixl/Simulator-Constants-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+#include "jit/IonTypes.h"
+#include "vm/MutexIDs.h"
+#include "vm/PosixNSPR.h"
+
+#define JS_CHECK_SIMULATOR_RECURSION_WITH_EXTRA(cx, extra, onerror) \
+ JS_BEGIN_MACRO \
+ if (cx->mainThread().simulator()->overRecursedWithExtra(extra)) { \
+ js::ReportOverRecursed(cx); \
+ onerror; \
+ } \
+ JS_END_MACRO
+
+namespace vixl {
+
+// Assemble the specified IEEE-754 components into the target type and apply
+// appropriate rounding.
+// sign: 0 = positive, 1 = negative
+// exponent: Unbiased IEEE-754 exponent.
+// mantissa: The mantissa of the input. The top bit (which is not encoded for
+// normal IEEE-754 values) must not be omitted. This bit has the
+// value 'pow(2, exponent)'.
+//
+// The input value is assumed to be a normalized value. That is, the input may
+// not be infinity or NaN. If the source value is subnormal, it must be
+// normalized before calling this function such that the highest set bit in the
+// mantissa has the value 'pow(2, exponent)'.
+//
+// Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than
+// calling a templated FPRound.
+template <class T, int ebits, int mbits>
+T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
+ FPRounding round_mode) {
+ VIXL_ASSERT((sign == 0) || (sign == 1));
+
+ // Only FPTieEven and FPRoundOdd rounding modes are implemented.
+ VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd));
+
+ // Rounding can promote subnormals to normals, and normals to infinities. For
+ // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be
+ // encodable as a float, but rounding based on the low-order mantissa bits
+ // could make it overflow. With ties-to-even rounding, this value would become
+ // an infinity.
+
+ // ---- Rounding Method ----
+ //
+ // The exponent is irrelevant in the rounding operation, so we treat the
+ // lowest-order bit that will fit into the result ('onebit') as having
+ // the value '1'. Similarly, the highest-order bit that won't fit into
+ // the result ('halfbit') has the value '0.5'. The 'point' sits between
+ // 'onebit' and 'halfbit':
+ //
+ // These bits fit into the result.
+ // |---------------------|
+ // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ // ||
+ // / |
+ // / halfbit
+ // onebit
+ //
+ // For subnormal outputs, the range of representable bits is smaller and
+ // the position of onebit and halfbit depends on the exponent of the
+ // input, but the method is otherwise similar.
+ //
+ // onebit(frac)
+ // |
+ // | halfbit(frac) halfbit(adjusted)
+ // | / /
+ // | | |
+ // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00
+ // 0b00.0... -> 0b00.0... -> 0b00
+ // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00
+ // 0b00.1... -> 0b00.1... -> 0b01
+ // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01
+ // 0b01.0... -> 0b01.0... -> 0b01
+ // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10
+ // 0b01.1... -> 0b01.1... -> 0b10
+ // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10
+ // 0b10.0... -> 0b10.0... -> 0b10
+ // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10
+ // 0b10.1... -> 0b10.1... -> 0b11
+ // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11
+ // ... / | / |
+ // / | / |
+ // / |
+ // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / |
+ //
+ // mantissa = (mantissa >> shift) + halfbit(adjusted);
+
+ static const int mantissa_offset = 0;
+ static const int exponent_offset = mantissa_offset + mbits;
+ static const int sign_offset = exponent_offset + ebits;
+ VIXL_ASSERT(sign_offset == (sizeof(T) * 8 - 1));
+
+ // Bail out early for zero inputs.
+ if (mantissa == 0) {
+ return static_cast<T>(sign << sign_offset);
+ }
+
+ // If all bits in the exponent are set, the value is infinite or NaN.
+ // This is true for all binary IEEE-754 formats.
+ static const int infinite_exponent = (1 << ebits) - 1;
+ static const int max_normal_exponent = infinite_exponent - 1;
+
+ // Apply the exponent bias to encode it for the result. Doing this early makes
+ // it easy to detect values that will be infinite or subnormal.
+ exponent += max_normal_exponent >> 1;
+
+ if (exponent > max_normal_exponent) {
+ // Overflow: the input is too large for the result type to represent.
+ if (round_mode == FPTieEven) {
+ // FPTieEven rounding mode handles overflows using infinities.
+ exponent = infinite_exponent;
+ mantissa = 0;
+ } else {
+ VIXL_ASSERT(round_mode == FPRoundOdd);
+ // FPRoundOdd rounding mode handles overflows using the largest magnitude
+ // normal number.
+ exponent = max_normal_exponent;
+ mantissa = (UINT64_C(1) << exponent_offset) - 1;
+ }
+ return static_cast<T>((sign << sign_offset) |
+ (exponent << exponent_offset) |
+ (mantissa << mantissa_offset));
+ }
+
+ // Calculate the shift required to move the top mantissa bit to the proper
+ // place in the destination type.
+ const int highest_significant_bit = 63 - CountLeadingZeros(mantissa);
+ int shift = highest_significant_bit - mbits;
+
+ if (exponent <= 0) {
+ // The output will be subnormal (before rounding).
+ // For subnormal outputs, the shift must be adjusted by the exponent. The +1
+ // is necessary because the exponent of a subnormal value (encoded as 0) is
+ // the same as the exponent of the smallest normal value (encoded as 1).
+ shift += -exponent + 1;
+
+ // Handle inputs that would produce a zero output.
+ //
+ // Shifts higher than highest_significant_bit+1 will always produce a zero
+ // result. A shift of exactly highest_significant_bit+1 might produce a
+ // non-zero result after rounding.
+ if (shift > (highest_significant_bit + 1)) {
+ if (round_mode == FPTieEven) {
+ // The result will always be +/-0.0.
+ return static_cast<T>(sign << sign_offset);
+ } else {
+ VIXL_ASSERT(round_mode == FPRoundOdd);
+ VIXL_ASSERT(mantissa != 0);
+ // For FPRoundOdd, if the mantissa is too small to represent and
+ // non-zero return the next "odd" value.
+ return static_cast<T>((sign << sign_offset) | 1);
+ }
+ }
+
+ // Properly encode the exponent for a subnormal output.
+ exponent = 0;
+ } else {
+ // Clear the topmost mantissa bit, since this is not encoded in IEEE-754
+ // normal values.
+ mantissa &= ~(UINT64_C(1) << highest_significant_bit);
+ }
+
+ if (shift > 0) {
+ if (round_mode == FPTieEven) {
+ // We have to shift the mantissa to the right. Some precision is lost, so
+ // we need to apply rounding.
+ uint64_t onebit_mantissa = (mantissa >> (shift)) & 1;
+ uint64_t halfbit_mantissa = (mantissa >> (shift-1)) & 1;
+ uint64_t adjustment = (halfbit_mantissa & ~onebit_mantissa);
+ uint64_t adjusted = mantissa - adjustment;
+ T halfbit_adjusted = (adjusted >> (shift-1)) & 1;
+
+ T result = static_cast<T>((sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa >> shift) << mantissa_offset));
+
+ // A very large mantissa can overflow during rounding. If this happens,
+ // the exponent should be incremented and the mantissa set to 1.0
+ // (encoded as 0). Applying halfbit_adjusted after assembling the float
+ // has the nice side-effect that this case is handled for free.
+ //
+ // This also handles cases where a very large finite value overflows to
+ // infinity, or where a very large subnormal value overflows to become
+ // normal.
+ return result + halfbit_adjusted;
+ } else {
+ VIXL_ASSERT(round_mode == FPRoundOdd);
+ // If any bits at position halfbit or below are set, onebit (ie. the
+ // bottom bit of the resulting mantissa) must be set.
+ uint64_t fractional_bits = mantissa & ((UINT64_C(1) << shift) - 1);
+ if (fractional_bits != 0) {
+ mantissa |= UINT64_C(1) << shift;
+ }
+
+ return static_cast<T>((sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa >> shift) << mantissa_offset));
+ }
+ } else {
+ // We have to shift the mantissa to the left (or not at all). The input
+ // mantissa is exactly representable in the output mantissa, so apply no
+ // rounding correction.
+ return static_cast<T>((sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa << -shift) << mantissa_offset));
+ }
+}
+
+
+// Representation of memory, with typed getters and setters for access.
+class Memory {
+ public:
+ template <typename T>
+ static T AddressUntag(T address) {
+ // Cast the address using a C-style cast. A reinterpret_cast would be
+ // appropriate, but it can't cast one integral type to another.
+ uint64_t bits = (uint64_t)address;
+ return (T)(bits & ~kAddressTagMask);
+ }
+
+ template <typename T, typename A>
+ static T Read(A address) {
+ T value;
+ address = AddressUntag(address);
+ VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
+ (sizeof(value) == 4) || (sizeof(value) == 8) ||
+ (sizeof(value) == 16));
+ memcpy(&value, reinterpret_cast<const char *>(address), sizeof(value));
+ return value;
+ }
+
+ template <typename T, typename A>
+ static void Write(A address, T value) {
+ address = AddressUntag(address);
+ VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
+ (sizeof(value) == 4) || (sizeof(value) == 8) ||
+ (sizeof(value) == 16));
+ memcpy(reinterpret_cast<char *>(address), &value, sizeof(value));
+ }
+};
+
+// Represent a register (r0-r31, v0-v31).
+template<int kSizeInBytes>
+class SimRegisterBase {
+ public:
+ SimRegisterBase() : written_since_last_log_(false) {}
+
+ // Write the specified value. The value is zero-extended if necessary.
+ template<typename T>
+ void Set(T new_value) {
+ VIXL_STATIC_ASSERT(sizeof(new_value) <= kSizeInBytes);
+ if (sizeof(new_value) < kSizeInBytes) {
+ // All AArch64 registers are zero-extending.
+ memset(value_ + sizeof(new_value), 0, kSizeInBytes - sizeof(new_value));
+ }
+ memcpy(value_, &new_value, sizeof(new_value));
+ NotifyRegisterWrite();
+ }
+
+ // Insert a typed value into a register, leaving the rest of the register
+ // unchanged. The lane parameter indicates where in the register the value
+ // should be inserted, in the range [ 0, sizeof(value_) / sizeof(T) ), where
+ // 0 represents the least significant bits.
+ template<typename T>
+ void Insert(int lane, T new_value) {
+ VIXL_ASSERT(lane >= 0);
+ VIXL_ASSERT((sizeof(new_value) +
+ (lane * sizeof(new_value))) <= kSizeInBytes);
+ memcpy(&value_[lane * sizeof(new_value)], &new_value, sizeof(new_value));
+ NotifyRegisterWrite();
+ }
+
+ // Read the value as the specified type. The value is truncated if necessary.
+ template<typename T>
+ T Get(int lane = 0) const {
+ T result;
+ VIXL_ASSERT(lane >= 0);
+ VIXL_ASSERT((sizeof(result) + (lane * sizeof(result))) <= kSizeInBytes);
+ memcpy(&result, &value_[lane * sizeof(result)], sizeof(result));
+ return result;
+ }
+
+ // TODO: Make this return a map of updated bytes, so that we can highlight
+ // updated lanes for load-and-insert. (That never happens for scalar code, but
+ // NEON has some instructions that can update individual lanes.)
+ bool WrittenSinceLastLog() const {
+ return written_since_last_log_;
+ }
+
+ void NotifyRegisterLogged() {
+ written_since_last_log_ = false;
+ }
+
+ protected:
+ uint8_t value_[kSizeInBytes];
+
+ // Helpers to aid with register tracing.
+ bool written_since_last_log_;
+
+ void NotifyRegisterWrite() {
+ written_since_last_log_ = true;
+ }
+};
+typedef SimRegisterBase<kXRegSizeInBytes> SimRegister; // r0-r31
+typedef SimRegisterBase<kQRegSizeInBytes> SimVRegister; // v0-v31
+
+// Representation of a vector register, with typed getters and setters for lanes
+// and additional information to represent lane state.
+class LogicVRegister {
+ public:
+ inline LogicVRegister(SimVRegister& other) // NOLINT
+ : register_(other) {
+ for (unsigned i = 0; i < sizeof(saturated_) / sizeof(saturated_[0]); i++) {
+ saturated_[i] = kNotSaturated;
+ }
+ for (unsigned i = 0; i < sizeof(round_) / sizeof(round_[0]); i++) {
+ round_[i] = 0;
+ }
+ }
+
+ int64_t Int(VectorFormat vform, int index) const {
+ int64_t element;
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8: element = register_.Get<int8_t>(index); break;
+ case 16: element = register_.Get<int16_t>(index); break;
+ case 32: element = register_.Get<int32_t>(index); break;
+ case 64: element = register_.Get<int64_t>(index); break;
+ default: VIXL_UNREACHABLE(); return 0;
+ }
+ return element;
+ }
+
+ uint64_t Uint(VectorFormat vform, int index) const {
+ uint64_t element;
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8: element = register_.Get<uint8_t>(index); break;
+ case 16: element = register_.Get<uint16_t>(index); break;
+ case 32: element = register_.Get<uint32_t>(index); break;
+ case 64: element = register_.Get<uint64_t>(index); break;
+ default: VIXL_UNREACHABLE(); return 0;
+ }
+ return element;
+ }
+
+ int64_t IntLeftJustified(VectorFormat vform, int index) const {
+ return Int(vform, index) << (64 - LaneSizeInBitsFromFormat(vform));
+ }
+
+ uint64_t UintLeftJustified(VectorFormat vform, int index) const {
+ return Uint(vform, index) << (64 - LaneSizeInBitsFromFormat(vform));
+ }
+
+ void SetInt(VectorFormat vform, int index, int64_t value) const {
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8: register_.Insert(index, static_cast<int8_t>(value)); break;
+ case 16: register_.Insert(index, static_cast<int16_t>(value)); break;
+ case 32: register_.Insert(index, static_cast<int32_t>(value)); break;
+ case 64: register_.Insert(index, static_cast<int64_t>(value)); break;
+ default: VIXL_UNREACHABLE(); return;
+ }
+ }
+
+ void SetUint(VectorFormat vform, int index, uint64_t value) const {
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8: register_.Insert(index, static_cast<uint8_t>(value)); break;
+ case 16: register_.Insert(index, static_cast<uint16_t>(value)); break;
+ case 32: register_.Insert(index, static_cast<uint32_t>(value)); break;
+ case 64: register_.Insert(index, static_cast<uint64_t>(value)); break;
+ default: VIXL_UNREACHABLE(); return;
+ }
+ }
+
+ void ReadUintFromMem(VectorFormat vform, int index, uint64_t addr) const {
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8: register_.Insert(index, Memory::Read<uint8_t>(addr)); break;
+ case 16: register_.Insert(index, Memory::Read<uint16_t>(addr)); break;
+ case 32: register_.Insert(index, Memory::Read<uint32_t>(addr)); break;
+ case 64: register_.Insert(index, Memory::Read<uint64_t>(addr)); break;
+ default: VIXL_UNREACHABLE(); return;
+ }
+ }
+
+ void WriteUintToMem(VectorFormat vform, int index, uint64_t addr) const {
+ uint64_t value = Uint(vform, index);
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8: Memory::Write(addr, static_cast<uint8_t>(value)); break;
+ case 16: Memory::Write(addr, static_cast<uint16_t>(value)); break;
+ case 32: Memory::Write(addr, static_cast<uint32_t>(value)); break;
+ case 64: Memory::Write(addr, value); break;
+ }
+ }
+
+ template <typename T>
+ T Float(int index) const {
+ return register_.Get<T>(index);
+ }
+
+ template <typename T>
+ void SetFloat(int index, T value) const {
+ register_.Insert(index, value);
+ }
+
+ // When setting a result in a register of size less than Q, the top bits of
+ // the Q register must be cleared.
+ void ClearForWrite(VectorFormat vform) const {
+ unsigned size = RegisterSizeInBytesFromFormat(vform);
+ for (unsigned i = size; i < kQRegSizeInBytes; i++) {
+ SetUint(kFormat16B, i, 0);
+ }
+ }
+
+ // Saturation state for each lane of a vector.
+ enum Saturation {
+ kNotSaturated = 0,
+ kSignedSatPositive = 1 << 0,
+ kSignedSatNegative = 1 << 1,
+ kSignedSatMask = kSignedSatPositive | kSignedSatNegative,
+ kSignedSatUndefined = kSignedSatMask,
+ kUnsignedSatPositive = 1 << 2,
+ kUnsignedSatNegative = 1 << 3,
+ kUnsignedSatMask = kUnsignedSatPositive | kUnsignedSatNegative,
+ kUnsignedSatUndefined = kUnsignedSatMask
+ };
+
+ // Getters for saturation state.
+ Saturation GetSignedSaturation(int index) {
+ return static_cast<Saturation>(saturated_[index] & kSignedSatMask);
+ }
+
+ Saturation GetUnsignedSaturation(int index) {
+ return static_cast<Saturation>(saturated_[index] & kUnsignedSatMask);
+ }
+
+ // Setters for saturation state.
+ void ClearSat(int index) {
+ saturated_[index] = kNotSaturated;
+ }
+
+ void SetSignedSat(int index, bool positive) {
+ SetSatFlag(index, positive ? kSignedSatPositive : kSignedSatNegative);
+ }
+
+ void SetUnsignedSat(int index, bool positive) {
+ SetSatFlag(index, positive ? kUnsignedSatPositive : kUnsignedSatNegative);
+ }
+
+ void SetSatFlag(int index, Saturation sat) {
+ saturated_[index] = static_cast<Saturation>(saturated_[index] | sat);
+ VIXL_ASSERT((sat & kUnsignedSatMask) != kUnsignedSatUndefined);
+ VIXL_ASSERT((sat & kSignedSatMask) != kSignedSatUndefined);
+ }
+
+ // Saturate lanes of a vector based on saturation state.
+ LogicVRegister& SignedSaturate(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ Saturation sat = GetSignedSaturation(i);
+ if (sat == kSignedSatPositive) {
+ SetInt(vform, i, MaxIntFromFormat(vform));
+ } else if (sat == kSignedSatNegative) {
+ SetInt(vform, i, MinIntFromFormat(vform));
+ }
+ }
+ return *this;
+ }
+
+ LogicVRegister& UnsignedSaturate(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ Saturation sat = GetUnsignedSaturation(i);
+ if (sat == kUnsignedSatPositive) {
+ SetUint(vform, i, MaxUintFromFormat(vform));
+ } else if (sat == kUnsignedSatNegative) {
+ SetUint(vform, i, 0);
+ }
+ }
+ return *this;
+ }
+
+ // Getter for rounding state.
+ bool GetRounding(int index) {
+ return round_[index];
+ }
+
+ // Setter for rounding state.
+ void SetRounding(int index, bool round) {
+ round_[index] = round;
+ }
+
+ // Round lanes of a vector based on rounding state.
+ LogicVRegister& Round(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ SetInt(vform, i, Int(vform, i) + (GetRounding(i) ? 1 : 0));
+ }
+ return *this;
+ }
+
+ // Unsigned halve lanes of a vector, and use the saturation state to set the
+ // top bit.
+ LogicVRegister& Uhalve(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t val = Uint(vform, i);
+ SetRounding(i, (val & 1) == 1);
+ val >>= 1;
+ if (GetUnsignedSaturation(i) != kNotSaturated) {
+ // If the operation causes unsigned saturation, the bit shifted into the
+ // most significant bit must be set.
+ val |= (MaxUintFromFormat(vform) >> 1) + 1;
+ }
+ SetInt(vform, i, val);
+ }
+ return *this;
+ }
+
+ // Signed halve lanes of a vector, and use the carry state to set the top bit.
+ LogicVRegister& Halve(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int64_t val = Int(vform, i);
+ SetRounding(i, (val & 1) == 1);
+ val >>= 1;
+ if (GetSignedSaturation(i) != kNotSaturated) {
+ // If the operation causes signed saturation, the sign bit must be
+ // inverted.
+ val ^= (MaxUintFromFormat(vform) >> 1) + 1;
+ }
+ SetInt(vform, i, val);
+ }
+ return *this;
+ }
+
+ private:
+ SimVRegister& register_;
+
+ // Allocate one saturation state entry per lane; largest register is type Q,
+ // and lanes can be a minimum of one byte wide.
+ Saturation saturated_[kQRegSizeInBytes];
+
+ // Allocate one rounding state entry per lane.
+ bool round_[kQRegSizeInBytes];
+};
+
+// The proper way to initialize a simulated system register (such as NZCV) is as
+// follows:
+// SimSystemRegister nzcv = SimSystemRegister::DefaultValueFor(NZCV);
+class SimSystemRegister {
+ public:
+ // The default constructor represents a register which has no writable bits.
+ // It is not possible to set its value to anything other than 0.
+ SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) { }
+
+ uint32_t RawValue() const {
+ return value_;
+ }
+
+ void SetRawValue(uint32_t new_value) {
+ value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_);
+ }
+
+ uint32_t Bits(int msb, int lsb) const {
+ return unsigned_bitextract_32(msb, lsb, value_);
+ }
+
+ int32_t SignedBits(int msb, int lsb) const {
+ return signed_bitextract_32(msb, lsb, value_);
+ }
+
+ void SetBits(int msb, int lsb, uint32_t bits);
+
+ // Default system register values.
+ static SimSystemRegister DefaultValueFor(SystemRegister id);
+
+#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
+ uint32_t Name() const { return Func(HighBit, LowBit); } \
+ void Set##Name(uint32_t bits) { SetBits(HighBit, LowBit, bits); }
+#define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \
+ static const uint32_t Name##WriteIgnoreMask = ~static_cast<uint32_t>(Mask);
+
+ SYSTEM_REGISTER_FIELDS_LIST(DEFINE_GETTER, DEFINE_WRITE_IGNORE_MASK)
+
+#undef DEFINE_ZERO_BITS
+#undef DEFINE_GETTER
+
+ protected:
+ // Most system registers only implement a few of the bits in the word. Other
+ // bits are "read-as-zero, write-ignored". The write_ignore_mask argument
+ // describes the bits which are not modifiable.
+ SimSystemRegister(uint32_t value, uint32_t write_ignore_mask)
+ : value_(value), write_ignore_mask_(write_ignore_mask) { }
+
+ uint32_t value_;
+ uint32_t write_ignore_mask_;
+};
+
+
+class SimExclusiveLocalMonitor {
+ public:
+ SimExclusiveLocalMonitor() : kSkipClearProbability(8), seed_(0x87654321) {
+ Clear();
+ }
+
+ // Clear the exclusive monitor (like clrex).
+ void Clear() {
+ address_ = 0;
+ size_ = 0;
+ }
+
+ // Clear the exclusive monitor most of the time.
+ void MaybeClear() {
+ if ((seed_ % kSkipClearProbability) != 0) {
+ Clear();
+ }
+
+ // Advance seed_ using a simple linear congruential generator.
+ seed_ = (seed_ * 48271) % 2147483647;
+ }
+
+ // Mark the address range for exclusive access (like load-exclusive).
+ void MarkExclusive(uint64_t address, size_t size) {
+ address_ = address;
+ size_ = size;
+ }
+
+ // Return true if the address range is marked (like store-exclusive).
+ // This helper doesn't implicitly clear the monitor.
+ bool IsExclusive(uint64_t address, size_t size) {
+ VIXL_ASSERT(size > 0);
+ // Be pedantic: Require both the address and the size to match.
+ return (size == size_) && (address == address_);
+ }
+
+ private:
+ uint64_t address_;
+ size_t size_;
+
+ const int kSkipClearProbability;
+ uint32_t seed_;
+};
+
+
+// We can't accurate simulate the global monitor since it depends on external
+// influences. Instead, this implementation occasionally causes accesses to
+// fail, according to kPassProbability.
+class SimExclusiveGlobalMonitor {
+ public:
+ SimExclusiveGlobalMonitor() : kPassProbability(8), seed_(0x87654321) {}
+
+ bool IsExclusive(uint64_t address, size_t size) {
+ USE(address, size);
+
+ bool pass = (seed_ % kPassProbability) != 0;
+ // Advance seed_ using a simple linear congruential generator.
+ seed_ = (seed_ * 48271) % 2147483647;
+ return pass;
+ }
+
+ private:
+ const int kPassProbability;
+ uint32_t seed_;
+};
+
+class Redirection;
+
+class Simulator : public DecoderVisitor {
+ friend class AutoLockSimulatorCache;
+
+ public:
+ explicit Simulator(Decoder* decoder, FILE* stream = stdout);
+ ~Simulator();
+
+ // Moz changes.
+ void init(Decoder* decoder, FILE* stream);
+ static Simulator* Current();
+ static Simulator* Create(JSContext* cx);
+ static void Destroy(Simulator* sim);
+ uintptr_t stackLimit() const;
+ uintptr_t* addressOfStackLimit();
+ bool overRecursed(uintptr_t newsp = 0) const;
+ bool overRecursedWithExtra(uint32_t extra) const;
+ int64_t call(uint8_t* entry, int argument_count, ...);
+ void setRedirection(Redirection* redirection);
+ Redirection* redirection() const;
+ static void* RedirectNativeFunction(void* nativeFunction, js::jit::ABIFunctionType type);
+ void setGPR32Result(int32_t result);
+ void setGPR64Result(int64_t result);
+ void setFP32Result(float result);
+ void setFP64Result(double result);
+ void VisitCallRedirection(const Instruction* instr);
+ static inline uintptr_t StackLimit() {
+ return Simulator::Current()->stackLimit();
+ }
+
+ void ResetState();
+
+ // Run the simulator.
+ virtual void Run();
+ void RunFrom(const Instruction* first);
+
+ // Simulation helpers.
+ const Instruction* pc() const { return pc_; }
+ const Instruction* get_pc() const { return pc_; }
+
+ template <typename T>
+ T get_pc_as() const { return reinterpret_cast<T>(const_cast<Instruction*>(pc())); }
+
+ void set_pc(const Instruction* new_pc) {
+ pc_ = Memory::AddressUntag(new_pc);
+ pc_modified_ = true;
+ }
+
+ void set_resume_pc(void* new_resume_pc);
+
+ void increment_pc() {
+ if (!pc_modified_) {
+ pc_ = pc_->NextInstruction();
+ }
+
+ pc_modified_ = false;
+ }
+
+ void ExecuteInstruction();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) virtual void Visit##A(const Instruction* instr);
+ VISITOR_LIST_THAT_RETURN(DECLARE)
+ VISITOR_LIST_THAT_DONT_RETURN(DECLARE)
+ #undef DECLARE
+
+
+ // Integer register accessors.
+
+ // Basic accessor: Read the register as the specified type.
+ template<typename T>
+ T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ VIXL_ASSERT(code < kNumberOfRegisters);
+ if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
+ T result;
+ memset(&result, 0, sizeof(result));
+ return result;
+ }
+ return registers_[code].Get<T>();
+ }
+
+ // Common specialized accessors for the reg() template.
+ int32_t wreg(unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int32_t>(code, r31mode);
+ }
+
+ int64_t xreg(unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int64_t>(code, r31mode);
+ }
+
+ // As above, with parameterized size and return type. The value is
+ // either zero-extended or truncated to fit, as required.
+ template<typename T>
+ T reg(unsigned size, unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ uint64_t raw;
+ switch (size) {
+ case kWRegSize: raw = reg<uint32_t>(code, r31mode); break;
+ case kXRegSize: raw = reg<uint64_t>(code, r31mode); break;
+ default:
+ VIXL_UNREACHABLE();
+ return 0;
+ }
+
+ T result;
+ VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(raw));
+ // Copy the result and truncate to fit. This assumes a little-endian host.
+ memcpy(&result, &raw, sizeof(result));
+ return result;
+ }
+
+ // Use int64_t by default if T is not specified.
+ int64_t reg(unsigned size, unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int64_t>(size, code, r31mode);
+ }
+
+ enum RegLogMode {
+ LogRegWrites,
+ NoRegLog
+ };
+
+ // Write 'value' into an integer register. The value is zero-extended. This
+ // behaviour matches AArch64 register writes.
+ template<typename T>
+ void set_reg(unsigned code, T value,
+ RegLogMode log_mode = LogRegWrites,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ VIXL_STATIC_ASSERT((sizeof(T) == kWRegSizeInBytes) ||
+ (sizeof(T) == kXRegSizeInBytes));
+ VIXL_ASSERT(code < kNumberOfRegisters);
+
+ if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
+ return;
+ }
+
+ registers_[code].Set(value);
+
+ if (log_mode == LogRegWrites) LogRegister(code, r31mode);
+ }
+
+ // Common specialized accessors for the set_reg() template.
+ void set_wreg(unsigned code, int32_t value,
+ RegLogMode log_mode = LogRegWrites,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(code, value, log_mode, r31mode);
+ }
+
+ void set_xreg(unsigned code, int64_t value,
+ RegLogMode log_mode = LogRegWrites,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(code, value, log_mode, r31mode);
+ }
+
+ // As above, with parameterized size and type. The value is either
+ // zero-extended or truncated to fit, as required.
+ template<typename T>
+ void set_reg(unsigned size, unsigned code, T value,
+ RegLogMode log_mode = LogRegWrites,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ // Zero-extend the input.
+ uint64_t raw = 0;
+ VIXL_STATIC_ASSERT(sizeof(value) <= sizeof(raw));
+ memcpy(&raw, &value, sizeof(value));
+
+ // Write (and possibly truncate) the value.
+ switch (size) {
+ case kWRegSize:
+ set_reg(code, static_cast<uint32_t>(raw), log_mode, r31mode);
+ break;
+ case kXRegSize:
+ set_reg(code, raw, log_mode, r31mode);
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ return;
+ }
+ }
+
+ // Common specialized accessors for the set_reg() template.
+
+ // Commonly-used special cases.
+ template<typename T>
+ void set_lr(T value) {
+ set_reg(kLinkRegCode, value);
+ }
+
+ template<typename T>
+ void set_sp(T value) {
+ set_reg(31, value, LogRegWrites, Reg31IsStackPointer);
+ }
+
+ // Vector register accessors.
+ // These are equivalent to the integer register accessors, but for vector
+ // registers.
+
+ // A structure for representing a 128-bit Q register.
+ struct qreg_t { uint8_t val[kQRegSizeInBytes]; };
+
+ // Basic accessor: read the register as the specified type.
+ template<typename T>
+ T vreg(unsigned code) const {
+ VIXL_STATIC_ASSERT((sizeof(T) == kBRegSizeInBytes) ||
+ (sizeof(T) == kHRegSizeInBytes) ||
+ (sizeof(T) == kSRegSizeInBytes) ||
+ (sizeof(T) == kDRegSizeInBytes) ||
+ (sizeof(T) == kQRegSizeInBytes));
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+
+ return vregisters_[code].Get<T>();
+ }
+
+ // Common specialized accessors for the vreg() template.
+ int8_t breg(unsigned code) const {
+ return vreg<int8_t>(code);
+ }
+
+ int16_t hreg(unsigned code) const {
+ return vreg<int16_t>(code);
+ }
+
+ float sreg(unsigned code) const {
+ return vreg<float>(code);
+ }
+
+ uint32_t sreg_bits(unsigned code) const {
+ return vreg<uint32_t>(code);
+ }
+
+ double dreg(unsigned code) const {
+ return vreg<double>(code);
+ }
+
+ uint64_t dreg_bits(unsigned code) const {
+ return vreg<uint64_t>(code);
+ }
+
+ qreg_t qreg(unsigned code) const {
+ return vreg<qreg_t>(code);
+ }
+
+ // As above, with parameterized size and return type. The value is
+ // either zero-extended or truncated to fit, as required.
+ template<typename T>
+ T vreg(unsigned size, unsigned code) const {
+ uint64_t raw = 0;
+ T result;
+
+ switch (size) {
+ case kSRegSize: raw = vreg<uint32_t>(code); break;
+ case kDRegSize: raw = vreg<uint64_t>(code); break;
+ default:
+ VIXL_UNREACHABLE();
+ break;
+ }
+
+ VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(raw));
+ // Copy the result and truncate to fit. This assumes a little-endian host.
+ memcpy(&result, &raw, sizeof(result));
+ return result;
+ }
+
+ inline SimVRegister& vreg(unsigned code) {
+ return vregisters_[code];
+ }
+
+ // Basic accessor: Write the specified value.
+ template<typename T>
+ void set_vreg(unsigned code, T value,
+ RegLogMode log_mode = LogRegWrites) {
+ VIXL_STATIC_ASSERT((sizeof(value) == kBRegSizeInBytes) ||
+ (sizeof(value) == kHRegSizeInBytes) ||
+ (sizeof(value) == kSRegSizeInBytes) ||
+ (sizeof(value) == kDRegSizeInBytes) ||
+ (sizeof(value) == kQRegSizeInBytes));
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ vregisters_[code].Set(value);
+
+ if (log_mode == LogRegWrites) {
+ LogVRegister(code, GetPrintRegisterFormat(value));
+ }
+ }
+
+ // Common specialized accessors for the set_vreg() template.
+ void set_breg(unsigned code, int8_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_hreg(unsigned code, int16_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_sreg(unsigned code, float value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_sreg_bits(unsigned code, uint32_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_dreg(unsigned code, double value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_dreg_bits(unsigned code, uint64_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_qreg(unsigned code, qreg_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ bool N() const { return nzcv_.N() != 0; }
+ bool Z() const { return nzcv_.Z() != 0; }
+ bool C() const { return nzcv_.C() != 0; }
+ bool V() const { return nzcv_.V() != 0; }
+ SimSystemRegister& nzcv() { return nzcv_; }
+
+ // TODO: Find a way to make the fpcr_ members return the proper types, so
+ // these accessors are not necessary.
+ FPRounding RMode() { return static_cast<FPRounding>(fpcr_.RMode()); }
+ bool DN() { return fpcr_.DN() != 0; }
+ SimSystemRegister& fpcr() { return fpcr_; }
+
+ // Specify relevant register formats for Print(V)Register and related helpers.
+ enum PrintRegisterFormat {
+ // The lane size.
+ kPrintRegLaneSizeB = 0 << 0,
+ kPrintRegLaneSizeH = 1 << 0,
+ kPrintRegLaneSizeS = 2 << 0,
+ kPrintRegLaneSizeW = kPrintRegLaneSizeS,
+ kPrintRegLaneSizeD = 3 << 0,
+ kPrintRegLaneSizeX = kPrintRegLaneSizeD,
+ kPrintRegLaneSizeQ = 4 << 0,
+
+ kPrintRegLaneSizeOffset = 0,
+ kPrintRegLaneSizeMask = 7 << 0,
+
+ // The lane count.
+ kPrintRegAsScalar = 0,
+ kPrintRegAsDVector = 1 << 3,
+ kPrintRegAsQVector = 2 << 3,
+
+ kPrintRegAsVectorMask = 3 << 3,
+
+ // Indicate floating-point format lanes. (This flag is only supported for S-
+ // and D-sized lanes.)
+ kPrintRegAsFP = 1 << 5,
+
+ // Supported combinations.
+
+ kPrintXReg = kPrintRegLaneSizeX | kPrintRegAsScalar,
+ kPrintWReg = kPrintRegLaneSizeW | kPrintRegAsScalar,
+ kPrintSReg = kPrintRegLaneSizeS | kPrintRegAsScalar | kPrintRegAsFP,
+ kPrintDReg = kPrintRegLaneSizeD | kPrintRegAsScalar | kPrintRegAsFP,
+
+ kPrintReg1B = kPrintRegLaneSizeB | kPrintRegAsScalar,
+ kPrintReg8B = kPrintRegLaneSizeB | kPrintRegAsDVector,
+ kPrintReg16B = kPrintRegLaneSizeB | kPrintRegAsQVector,
+ kPrintReg1H = kPrintRegLaneSizeH | kPrintRegAsScalar,
+ kPrintReg4H = kPrintRegLaneSizeH | kPrintRegAsDVector,
+ kPrintReg8H = kPrintRegLaneSizeH | kPrintRegAsQVector,
+ kPrintReg1S = kPrintRegLaneSizeS | kPrintRegAsScalar,
+ kPrintReg2S = kPrintRegLaneSizeS | kPrintRegAsDVector,
+ kPrintReg4S = kPrintRegLaneSizeS | kPrintRegAsQVector,
+ kPrintReg1SFP = kPrintRegLaneSizeS | kPrintRegAsScalar | kPrintRegAsFP,
+ kPrintReg2SFP = kPrintRegLaneSizeS | kPrintRegAsDVector | kPrintRegAsFP,
+ kPrintReg4SFP = kPrintRegLaneSizeS | kPrintRegAsQVector | kPrintRegAsFP,
+ kPrintReg1D = kPrintRegLaneSizeD | kPrintRegAsScalar,
+ kPrintReg2D = kPrintRegLaneSizeD | kPrintRegAsQVector,
+ kPrintReg1DFP = kPrintRegLaneSizeD | kPrintRegAsScalar | kPrintRegAsFP,
+ kPrintReg2DFP = kPrintRegLaneSizeD | kPrintRegAsQVector | kPrintRegAsFP,
+ kPrintReg1Q = kPrintRegLaneSizeQ | kPrintRegAsScalar
+ };
+
+ unsigned GetPrintRegLaneSizeInBytesLog2(PrintRegisterFormat format) {
+ return (format & kPrintRegLaneSizeMask) >> kPrintRegLaneSizeOffset;
+ }
+
+ unsigned GetPrintRegLaneSizeInBytes(PrintRegisterFormat format) {
+ return 1 << GetPrintRegLaneSizeInBytesLog2(format);
+ }
+
+ unsigned GetPrintRegSizeInBytesLog2(PrintRegisterFormat format) {
+ if (format & kPrintRegAsDVector) return kDRegSizeInBytesLog2;
+ if (format & kPrintRegAsQVector) return kQRegSizeInBytesLog2;
+
+ // Scalar types.
+ return GetPrintRegLaneSizeInBytesLog2(format);
+ }
+
+ unsigned GetPrintRegSizeInBytes(PrintRegisterFormat format) {
+ return 1 << GetPrintRegSizeInBytesLog2(format);
+ }
+
+ unsigned GetPrintRegLaneCount(PrintRegisterFormat format) {
+ unsigned reg_size_log2 = GetPrintRegSizeInBytesLog2(format);
+ unsigned lane_size_log2 = GetPrintRegLaneSizeInBytesLog2(format);
+ VIXL_ASSERT(reg_size_log2 >= lane_size_log2);
+ return 1 << (reg_size_log2 - lane_size_log2);
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormatForSize(unsigned reg_size,
+ unsigned lane_size);
+
+ PrintRegisterFormat GetPrintRegisterFormatForSize(unsigned size) {
+ return GetPrintRegisterFormatForSize(size, size);
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormatForSizeFP(unsigned size) {
+ switch (size) {
+ default: VIXL_UNREACHABLE(); return kPrintDReg;
+ case kDRegSizeInBytes: return kPrintDReg;
+ case kSRegSizeInBytes: return kPrintSReg;
+ }
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormatTryFP(PrintRegisterFormat format) {
+ if ((GetPrintRegLaneSizeInBytes(format) == kSRegSizeInBytes) ||
+ (GetPrintRegLaneSizeInBytes(format) == kDRegSizeInBytes)) {
+ return static_cast<PrintRegisterFormat>(format | kPrintRegAsFP);
+ }
+ return format;
+ }
+
+ template<typename T>
+ PrintRegisterFormat GetPrintRegisterFormat(T value) {
+ return GetPrintRegisterFormatForSize(sizeof(value));
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormat(double value) {
+ VIXL_STATIC_ASSERT(sizeof(value) == kDRegSizeInBytes);
+ return GetPrintRegisterFormatForSizeFP(sizeof(value));
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormat(float value) {
+ VIXL_STATIC_ASSERT(sizeof(value) == kSRegSizeInBytes);
+ return GetPrintRegisterFormatForSizeFP(sizeof(value));
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormat(VectorFormat vform);
+
+ // Print all registers of the specified types.
+ void PrintRegisters();
+ void PrintVRegisters();
+ void PrintSystemRegisters();
+
+ // As above, but only print the registers that have been updated.
+ void PrintWrittenRegisters();
+ void PrintWrittenVRegisters();
+
+ // As above, but respect LOG_REG and LOG_VREG.
+ void LogWrittenRegisters() {
+ if (trace_parameters() & LOG_REGS) PrintWrittenRegisters();
+ }
+ void LogWrittenVRegisters() {
+ if (trace_parameters() & LOG_VREGS) PrintWrittenVRegisters();
+ }
+ void LogAllWrittenRegisters() {
+ LogWrittenRegisters();
+ LogWrittenVRegisters();
+ }
+
+ // Print individual register values (after update).
+ void PrintRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer);
+ void PrintVRegister(unsigned code, PrintRegisterFormat format);
+ void PrintSystemRegister(SystemRegister id);
+
+ // Like Print* (above), but respect trace_parameters().
+ void LogRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer) {
+ if (trace_parameters() & LOG_REGS) PrintRegister(code, r31mode);
+ }
+ void LogVRegister(unsigned code, PrintRegisterFormat format) {
+ if (trace_parameters() & LOG_VREGS) PrintVRegister(code, format);
+ }
+ void LogSystemRegister(SystemRegister id) {
+ if (trace_parameters() & LOG_SYSREGS) PrintSystemRegister(id);
+ }
+
+ // Print memory accesses.
+ void PrintRead(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format);
+ void PrintWrite(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format);
+ void PrintVRead(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format, unsigned lane);
+ void PrintVWrite(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format, unsigned lane);
+
+ // Like Print* (above), but respect trace_parameters().
+ void LogRead(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format) {
+ if (trace_parameters() & LOG_REGS) PrintRead(address, reg_code, format);
+ }
+ void LogWrite(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format) {
+ if (trace_parameters() & LOG_WRITE) PrintWrite(address, reg_code, format);
+ }
+ void LogVRead(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format, unsigned lane = 0) {
+ if (trace_parameters() & LOG_VREGS) {
+ PrintVRead(address, reg_code, format, lane);
+ }
+ }
+ void LogVWrite(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format, unsigned lane = 0) {
+ if (trace_parameters() & LOG_WRITE) {
+ PrintVWrite(address, reg_code, format, lane);
+ }
+ }
+
+ // Helper functions for register tracing.
+ void PrintRegisterRawHelper(unsigned code, Reg31Mode r31mode,
+ int size_in_bytes = kXRegSizeInBytes);
+ void PrintVRegisterRawHelper(unsigned code, int bytes = kQRegSizeInBytes,
+ int lsb = 0);
+ void PrintVRegisterFPHelper(unsigned code, unsigned lane_size_in_bytes,
+ int lane_count = 1, int rightmost_lane = 0);
+
+ void DoUnreachable(const Instruction* instr);
+ void DoTrace(const Instruction* instr);
+ void DoLog(const Instruction* instr);
+
+ static const char* WRegNameForCode(unsigned code,
+ Reg31Mode mode = Reg31IsZeroRegister);
+ static const char* XRegNameForCode(unsigned code,
+ Reg31Mode mode = Reg31IsZeroRegister);
+ static const char* SRegNameForCode(unsigned code);
+ static const char* DRegNameForCode(unsigned code);
+ static const char* VRegNameForCode(unsigned code);
+
+ bool coloured_trace() const { return coloured_trace_; }
+ void set_coloured_trace(bool value);
+
+ int trace_parameters() const { return trace_parameters_; }
+ void set_trace_parameters(int parameters);
+
+ void set_instruction_stats(bool value);
+
+ // Clear the simulated local monitor to force the next store-exclusive
+ // instruction to fail.
+ void ClearLocalMonitor() {
+ local_monitor_.Clear();
+ }
+
+ void SilenceExclusiveAccessWarning() {
+ print_exclusive_access_warning_ = false;
+ }
+
+ protected:
+ const char* clr_normal;
+ const char* clr_flag_name;
+ const char* clr_flag_value;
+ const char* clr_reg_name;
+ const char* clr_reg_value;
+ const char* clr_vreg_name;
+ const char* clr_vreg_value;
+ const char* clr_memory_address;
+ const char* clr_warning;
+ const char* clr_warning_message;
+ const char* clr_printf;
+
+ // Simulation helpers ------------------------------------
+ bool ConditionPassed(Condition cond) {
+ switch (cond) {
+ case eq:
+ return Z();
+ case ne:
+ return !Z();
+ case hs:
+ return C();
+ case lo:
+ return !C();
+ case mi:
+ return N();
+ case pl:
+ return !N();
+ case vs:
+ return V();
+ case vc:
+ return !V();
+ case hi:
+ return C() && !Z();
+ case ls:
+ return !(C() && !Z());
+ case ge:
+ return N() == V();
+ case lt:
+ return N() != V();
+ case gt:
+ return !Z() && (N() == V());
+ case le:
+ return !(!Z() && (N() == V()));
+ case nv:
+ VIXL_FALLTHROUGH();
+ case al:
+ return true;
+ default:
+ VIXL_UNREACHABLE();
+ return false;
+ }
+ }
+
+ bool ConditionPassed(Instr cond) {
+ return ConditionPassed(static_cast<Condition>(cond));
+ }
+
+ bool ConditionFailed(Condition cond) {
+ return !ConditionPassed(cond);
+ }
+
+ void AddSubHelper(const Instruction* instr, int64_t op2);
+ uint64_t AddWithCarry(unsigned reg_size,
+ bool set_flags,
+ uint64_t left,
+ uint64_t right,
+ int carry_in = 0);
+ void LogicalHelper(const Instruction* instr, int64_t op2);
+ void ConditionalCompareHelper(const Instruction* instr, int64_t op2);
+ void LoadStoreHelper(const Instruction* instr,
+ int64_t offset,
+ AddrMode addrmode);
+ void LoadStorePairHelper(const Instruction* instr, AddrMode addrmode);
+ uintptr_t AddressModeHelper(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode);
+ void NEONLoadStoreMultiStructHelper(const Instruction* instr,
+ AddrMode addr_mode);
+ void NEONLoadStoreSingleStructHelper(const Instruction* instr,
+ AddrMode addr_mode);
+
+ uint64_t AddressUntag(uint64_t address) {
+ return address & ~kAddressTagMask;
+ }
+
+ template <typename T>
+ T* AddressUntag(T* address) {
+ uintptr_t address_raw = reinterpret_cast<uintptr_t>(address);
+ return reinterpret_cast<T*>(AddressUntag(address_raw));
+ }
+
+ int64_t ShiftOperand(unsigned reg_size,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount);
+ int64_t Rotate(unsigned reg_width,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount);
+ int64_t ExtendValue(unsigned reg_width,
+ int64_t value,
+ Extend extend_type,
+ unsigned left_shift = 0);
+ uint16_t PolynomialMult(uint8_t op1, uint8_t op2);
+
+ void ld1(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t addr);
+ void ld1(VectorFormat vform,
+ LogicVRegister dst,
+ int index,
+ uint64_t addr);
+ void ld1r(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t addr);
+ void ld2(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ uint64_t addr);
+ void ld2(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ int index,
+ uint64_t addr);
+ void ld2r(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ uint64_t addr);
+ void ld3(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ uint64_t addr);
+ void ld3(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ int index,
+ uint64_t addr);
+ void ld3r(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ uint64_t addr);
+ void ld4(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ uint64_t addr);
+ void ld4(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ int index,
+ uint64_t addr);
+ void ld4r(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ uint64_t addr);
+ void st1(VectorFormat vform,
+ LogicVRegister src,
+ uint64_t addr);
+ void st1(VectorFormat vform,
+ LogicVRegister src,
+ int index,
+ uint64_t addr);
+ void st2(VectorFormat vform,
+ LogicVRegister src,
+ LogicVRegister src2,
+ uint64_t addr);
+ void st2(VectorFormat vform,
+ LogicVRegister src,
+ LogicVRegister src2,
+ int index,
+ uint64_t addr);
+ void st3(VectorFormat vform,
+ LogicVRegister src,
+ LogicVRegister src2,
+ LogicVRegister src3,
+ uint64_t addr);
+ void st3(VectorFormat vform,
+ LogicVRegister src,
+ LogicVRegister src2,
+ LogicVRegister src3,
+ int index,
+ uint64_t addr);
+ void st4(VectorFormat vform,
+ LogicVRegister src,
+ LogicVRegister src2,
+ LogicVRegister src3,
+ LogicVRegister src4,
+ uint64_t addr);
+ void st4(VectorFormat vform,
+ LogicVRegister src,
+ LogicVRegister src2,
+ LogicVRegister src3,
+ LogicVRegister src4,
+ int index,
+ uint64_t addr);
+ LogicVRegister cmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond);
+ LogicVRegister cmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ int imm,
+ Condition cond);
+ LogicVRegister cmptst(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister add(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister addp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister mla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister mls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister mul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister mul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister mla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister mls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister pmul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+
+ typedef LogicVRegister (Simulator::*ByElementOp)(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister fmul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister fmla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister fmls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister fmulx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister smull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister smull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister umull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister umull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister smlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister smlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister umlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister umlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister smlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister smlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister umlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister umlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqrdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sub(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister and_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister orr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister orn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister eor(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister bic(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister bic(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ uint64_t imm);
+ LogicVRegister bif(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister bit(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister bsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister cls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister clz(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister cnt(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister not_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister rbit(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister rev(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int revSize);
+ LogicVRegister rev16(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister rev32(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister rev64(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister addlp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ bool is_signed,
+ bool do_accumulate);
+ LogicVRegister saddlp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uaddlp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sadalp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uadalp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister ext(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister ins_element(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ const LogicVRegister& src,
+ int src_index);
+ LogicVRegister ins_immediate(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ uint64_t imm);
+ LogicVRegister dup_element(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int src_index);
+ LogicVRegister dup_immediate(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t imm);
+ LogicVRegister movi(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t imm);
+ LogicVRegister mvni(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t imm);
+ LogicVRegister orr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ uint64_t imm);
+ LogicVRegister sshl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister ushl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister sminmax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool max);
+ LogicVRegister smax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister smin(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister sminmaxp(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ const LogicVRegister& src,
+ bool max);
+ LogicVRegister smaxp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister sminp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister addp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister addv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uaddlv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister saddlv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sminmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ bool max);
+ LogicVRegister smaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sminv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uxtl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uxtl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sxtl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sxtl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& ind);
+ LogicVRegister tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& ind);
+ LogicVRegister tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& ind);
+ LogicVRegister tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& tab4,
+ const LogicVRegister& ind);
+ LogicVRegister tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& ind);
+ LogicVRegister tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& ind);
+ LogicVRegister tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& ind);
+ LogicVRegister tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& tab4,
+ const LogicVRegister& ind);
+ LogicVRegister uaddl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uaddl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uaddw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uaddw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister saddl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister saddl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister saddw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister saddw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister usubl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister usubl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister usubw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister usubw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister ssubl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister ssubl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister ssubw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister ssubw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uminmax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool max);
+ LogicVRegister umax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister umin(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uminmaxp(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ const LogicVRegister& src,
+ bool max);
+ LogicVRegister umaxp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uminp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uminmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ bool max);
+ LogicVRegister umaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uminv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister trn1(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister trn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister zip1(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister zip2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uzp1(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uzp2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister shl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister scvtf(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int fbits,
+ FPRounding rounding_mode);
+ LogicVRegister ucvtf(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int fbits,
+ FPRounding rounding_mode);
+ LogicVRegister sshll(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sshll2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister shll(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister shll2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister ushll(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister ushll2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sli(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sri(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sshr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister ushr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister ssra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister usra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister srsra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister ursra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister suqadd(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister usqadd(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sqshl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister uqshl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqshlu(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister abs(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister neg(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister extractnarrow(VectorFormat vform,
+ LogicVRegister dst,
+ bool dstIsSigned,
+ const LogicVRegister& src,
+ bool srcIsSigned);
+ LogicVRegister xtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sqxtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uqxtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sqxtun(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister absdiff(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool issigned);
+ LogicVRegister saba(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uaba(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister shrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister shrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister rshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister rshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister uqshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister uqshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister uqrshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister uqrshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqrshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqrshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqshrun(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqshrun2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqrshrun(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqrshrun2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqrdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool round = true);
+ LogicVRegister sqdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ #define NEON_3VREG_LOGIC_LIST(V) \
+ V(addhn) \
+ V(addhn2) \
+ V(raddhn) \
+ V(raddhn2) \
+ V(subhn) \
+ V(subhn2) \
+ V(rsubhn) \
+ V(rsubhn2) \
+ V(pmull) \
+ V(pmull2) \
+ V(sabal) \
+ V(sabal2) \
+ V(uabal) \
+ V(uabal2) \
+ V(sabdl) \
+ V(sabdl2) \
+ V(uabdl) \
+ V(uabdl2) \
+ V(smull) \
+ V(smull2) \
+ V(umull) \
+ V(umull2) \
+ V(smlal) \
+ V(smlal2) \
+ V(umlal) \
+ V(umlal2) \
+ V(smlsl) \
+ V(smlsl2) \
+ V(umlsl) \
+ V(umlsl2) \
+ V(sqdmlal) \
+ V(sqdmlal2) \
+ V(sqdmlsl) \
+ V(sqdmlsl2) \
+ V(sqdmull) \
+ V(sqdmull2)
+
+ #define DEFINE_LOGIC_FUNC(FXN) \
+ LogicVRegister FXN(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2);
+ NEON_3VREG_LOGIC_LIST(DEFINE_LOGIC_FUNC)
+ #undef DEFINE_LOGIC_FUNC
+
+ #define NEON_FP3SAME_LIST(V) \
+ V(fadd, FPAdd, false) \
+ V(fsub, FPSub, true) \
+ V(fmul, FPMul, true) \
+ V(fmulx, FPMulx, true) \
+ V(fdiv, FPDiv, true) \
+ V(fmax, FPMax, false) \
+ V(fmin, FPMin, false) \
+ V(fmaxnm, FPMaxNM, false) \
+ V(fminnm, FPMinNM, false)
+
+ #define DECLARE_NEON_FP_VECTOR_OP(FN, OP, PROCNAN) \
+ template <typename T> \
+ LogicVRegister FN(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2); \
+ LogicVRegister FN(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2);
+ NEON_FP3SAME_LIST(DECLARE_NEON_FP_VECTOR_OP)
+ #undef DECLARE_NEON_FP_VECTOR_OP
+
+ #define NEON_FPPAIRWISE_LIST(V) \
+ V(faddp, fadd, FPAdd) \
+ V(fmaxp, fmax, FPMax) \
+ V(fmaxnmp, fmaxnm, FPMaxNM) \
+ V(fminp, fmin, FPMin) \
+ V(fminnmp, fminnm, FPMinNM)
+
+ #define DECLARE_NEON_FP_PAIR_OP(FNP, FN, OP) \
+ LogicVRegister FNP(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2); \
+ LogicVRegister FNP(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src);
+ NEON_FPPAIRWISE_LIST(DECLARE_NEON_FP_PAIR_OP)
+ #undef DECLARE_NEON_FP_PAIR_OP
+
+ template <typename T>
+ LogicVRegister frecps(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister frecps(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ template <typename T>
+ LogicVRegister frsqrts(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister frsqrts(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ template <typename T>
+ LogicVRegister fmla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister fmla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ template <typename T>
+ LogicVRegister fmls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister fmls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister fnmul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+
+ template <typename T>
+ LogicVRegister fcmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond);
+ LogicVRegister fcmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond);
+ LogicVRegister fabscmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond);
+ LogicVRegister fcmp_zero(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ Condition cond);
+
+ template <typename T>
+ LogicVRegister fneg(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fneg(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ template <typename T>
+ LogicVRegister frecpx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister frecpx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ template <typename T>
+ LogicVRegister fabs_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fabs_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fabd(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister frint(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode,
+ bool inexact_exception = false);
+ LogicVRegister fcvts(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode,
+ int fbits = 0);
+ LogicVRegister fcvtu(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode,
+ int fbits = 0);
+ LogicVRegister fcvtl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtxn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtxn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fsqrt(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister frsqrte(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister frecpe(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding);
+ LogicVRegister ursqrte(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister urecpe(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+
+ typedef float (Simulator::*FPMinMaxOp)(float a, float b);
+
+ LogicVRegister fminmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPMinMaxOp Op);
+
+ LogicVRegister fminv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fminnmv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fmaxnmv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+
+ static const uint32_t CRC32_POLY = 0x04C11DB7;
+ static const uint32_t CRC32C_POLY = 0x1EDC6F41;
+ uint32_t Poly32Mod2(unsigned n, uint64_t data, uint32_t poly);
+ template <typename T>
+ uint32_t Crc32Checksum(uint32_t acc, T val, uint32_t poly);
+ uint32_t Crc32Checksum(uint32_t acc, uint64_t val, uint32_t poly);
+
+ void SysOp_W(int op, int64_t val);
+
+ template <typename T>
+ T FPRecipSqrtEstimate(T op);
+ template <typename T>
+ T FPRecipEstimate(T op, FPRounding rounding);
+ template <typename T, typename R>
+ R FPToFixed(T op, int fbits, bool is_signed, FPRounding rounding);
+
+ void FPCompare(double val0, double val1, FPTrapFlags trap);
+ double FPRoundInt(double value, FPRounding round_mode);
+ double FPToDouble(float value);
+ float FPToFloat(double value, FPRounding round_mode);
+ float FPToFloat(float16 value);
+ float16 FPToFloat16(float value, FPRounding round_mode);
+ float16 FPToFloat16(double value, FPRounding round_mode);
+ double recip_sqrt_estimate(double a);
+ double recip_estimate(double a);
+ double FPRecipSqrtEstimate(double a);
+ double FPRecipEstimate(double a);
+ double FixedToDouble(int64_t src, int fbits, FPRounding round_mode);
+ double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode);
+ float FixedToFloat(int64_t src, int fbits, FPRounding round_mode);
+ float UFixedToFloat(uint64_t src, int fbits, FPRounding round_mode);
+ int32_t FPToInt32(double value, FPRounding rmode);
+ int64_t FPToInt64(double value, FPRounding rmode);
+ uint32_t FPToUInt32(double value, FPRounding rmode);
+ uint64_t FPToUInt64(double value, FPRounding rmode);
+
+ template <typename T>
+ T FPAdd(T op1, T op2);
+
+ template <typename T>
+ T FPDiv(T op1, T op2);
+
+ template <typename T>
+ T FPMax(T a, T b);
+
+ template <typename T>
+ T FPMaxNM(T a, T b);
+
+ template <typename T>
+ T FPMin(T a, T b);
+
+ template <typename T>
+ T FPMinNM(T a, T b);
+
+ template <typename T>
+ T FPMul(T op1, T op2);
+
+ template <typename T>
+ T FPMulx(T op1, T op2);
+
+ template <typename T>
+ T FPMulAdd(T a, T op1, T op2);
+
+ template <typename T>
+ T FPSqrt(T op);
+
+ template <typename T>
+ T FPSub(T op1, T op2);
+
+ template <typename T>
+ T FPRecipStepFused(T op1, T op2);
+
+ template <typename T>
+ T FPRSqrtStepFused(T op1, T op2);
+
+ // This doesn't do anything at the moment. We'll need it if we want support
+ // for cumulative exception bits or floating-point exceptions.
+ void FPProcessException() { }
+
+ bool FPProcessNaNs(const Instruction* instr);
+
+ // Pseudo Printf instruction
+ void DoPrintf(const Instruction* instr);
+
+ // Processor state ---------------------------------------
+
+ // Simulated monitors for exclusive access instructions.
+ SimExclusiveLocalMonitor local_monitor_;
+ SimExclusiveGlobalMonitor global_monitor_;
+
+ // Output stream.
+ FILE* stream_;
+ PrintDisassembler* print_disasm_;
+
+ // Instruction statistics instrumentation.
+ Instrument* instrumentation_;
+
+ // General purpose registers. Register 31 is the stack pointer.
+ SimRegister registers_[kNumberOfRegisters];
+
+ // Vector registers
+ SimVRegister vregisters_[kNumberOfVRegisters];
+
+ // Program Status Register.
+ // bits[31, 27]: Condition flags N, Z, C, and V.
+ // (Negative, Zero, Carry, Overflow)
+ SimSystemRegister nzcv_;
+
+ // Floating-Point Control Register
+ SimSystemRegister fpcr_;
+
+ // Only a subset of FPCR features are supported by the simulator. This helper
+ // checks that the FPCR settings are supported.
+ //
+ // This is checked when floating-point instructions are executed, not when
+ // FPCR is set. This allows generated code to modify FPCR for external
+ // functions, or to save and restore it when entering and leaving generated
+ // code.
+ void AssertSupportedFPCR() {
+ VIXL_ASSERT(fpcr().FZ() == 0); // No flush-to-zero support.
+ VIXL_ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
+
+ // The simulator does not support half-precision operations so fpcr().AHP()
+ // is irrelevant, and is not checked here.
+ }
+
+ static int CalcNFlag(uint64_t result, unsigned reg_size) {
+ return (result >> (reg_size - 1)) & 1;
+ }
+
+ static int CalcZFlag(uint64_t result) {
+ return (result == 0) ? 1 : 0;
+ }
+
+ static const uint32_t kConditionFlagsMask = 0xf0000000;
+
+ // Stack
+ byte* stack_;
+ static const int stack_protection_size_ = 128 * KBytes;
+ static const int stack_size_ = (2 * MBytes) + (2 * stack_protection_size_);
+ byte* stack_limit_;
+
+ Decoder* decoder_;
+ // Indicates if the pc has been modified by the instruction and should not be
+ // automatically incremented.
+ bool pc_modified_;
+ const Instruction* pc_;
+ const Instruction* resume_pc_;
+
+ static const char* xreg_names[];
+ static const char* wreg_names[];
+ static const char* sreg_names[];
+ static const char* dreg_names[];
+ static const char* vreg_names[];
+
+ static const Instruction* kEndOfSimAddress;
+
+ private:
+ template <typename T>
+ static T FPDefaultNaN();
+
+ // Standard NaN processing.
+ template <typename T>
+ T FPProcessNaN(T op) {
+ VIXL_ASSERT(std::isnan(op));
+ if (IsSignallingNaN(op)) {
+ FPProcessException();
+ }
+ return DN() ? FPDefaultNaN<T>() : ToQuietNaN(op);
+ }
+
+ template <typename T>
+ T FPProcessNaNs(T op1, T op2) {
+ if (IsSignallingNaN(op1)) {
+ return FPProcessNaN(op1);
+ } else if (IsSignallingNaN(op2)) {
+ return FPProcessNaN(op2);
+ } else if (std::isnan(op1)) {
+ VIXL_ASSERT(IsQuietNaN(op1));
+ return FPProcessNaN(op1);
+ } else if (std::isnan(op2)) {
+ VIXL_ASSERT(IsQuietNaN(op2));
+ return FPProcessNaN(op2);
+ } else {
+ return 0.0;
+ }
+ }
+
+ template <typename T>
+ T FPProcessNaNs3(T op1, T op2, T op3) {
+ if (IsSignallingNaN(op1)) {
+ return FPProcessNaN(op1);
+ } else if (IsSignallingNaN(op2)) {
+ return FPProcessNaN(op2);
+ } else if (IsSignallingNaN(op3)) {
+ return FPProcessNaN(op3);
+ } else if (std::isnan(op1)) {
+ VIXL_ASSERT(IsQuietNaN(op1));
+ return FPProcessNaN(op1);
+ } else if (std::isnan(op2)) {
+ VIXL_ASSERT(IsQuietNaN(op2));
+ return FPProcessNaN(op2);
+ } else if (std::isnan(op3)) {
+ VIXL_ASSERT(IsQuietNaN(op3));
+ return FPProcessNaN(op3);
+ } else {
+ return 0.0;
+ }
+ }
+
+ bool coloured_trace_;
+
+ // A set of TraceParameters flags.
+ int trace_parameters_;
+
+ // Indicates whether the instruction instrumentation is active.
+ bool instruction_stats_;
+
+ // Indicates whether the exclusive-access warning has been printed.
+ bool print_exclusive_access_warning_;
+ void PrintExclusiveAccessWarning();
+
+ // Indicates that the simulator ran out of memory at some point.
+ // Data structures may not be fully allocated.
+ bool oom_;
+
+ public:
+ // True if the simulator ran out of memory during or after construction.
+ bool oom() const { return oom_; }
+
+ protected:
+ // Moz: Synchronizes access between main thread and compilation threads.
+ js::Mutex lock_;
+ Redirection* redirection_;
+ mozilla::Vector<int64_t, 0, js::SystemAllocPolicy> spStack_;
+};
+} // namespace vixl
+
+#endif // JS_SIMULATOR_ARM64
+#endif // VIXL_A64_SIMULATOR_A64_H_
diff --git a/js/src/jit/arm64/vixl/Utils-vixl.cpp b/js/src/jit/arm64/vixl/Utils-vixl.cpp
new file mode 100644
index 000000000..7af311bee
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Utils-vixl.cpp
@@ -0,0 +1,145 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <stdio.h>
+
+namespace vixl {
+
+uint32_t float_to_rawbits(float value) {
+ uint32_t bits = 0;
+ memcpy(&bits, &value, 4);
+ return bits;
+}
+
+
+uint64_t double_to_rawbits(double value) {
+ uint64_t bits = 0;
+ memcpy(&bits, &value, 8);
+ return bits;
+}
+
+
+float rawbits_to_float(uint32_t bits) {
+ float value = 0.0;
+ memcpy(&value, &bits, 4);
+ return value;
+}
+
+
+double rawbits_to_double(uint64_t bits) {
+ double value = 0.0;
+ memcpy(&value, &bits, 8);
+ return value;
+}
+
+
+uint32_t float_sign(float val) {
+ uint32_t rawbits = float_to_rawbits(val);
+ return unsigned_bitextract_32(31, 31, rawbits);
+}
+
+
+uint32_t float_exp(float val) {
+ uint32_t rawbits = float_to_rawbits(val);
+ return unsigned_bitextract_32(30, 23, rawbits);
+}
+
+
+uint32_t float_mantissa(float val) {
+ uint32_t rawbits = float_to_rawbits(val);
+ return unsigned_bitextract_32(22, 0, rawbits);
+}
+
+
+uint32_t double_sign(double val) {
+ uint64_t rawbits = double_to_rawbits(val);
+ return static_cast<uint32_t>(unsigned_bitextract_64(63, 63, rawbits));
+}
+
+
+uint32_t double_exp(double val) {
+ uint64_t rawbits = double_to_rawbits(val);
+ return static_cast<uint32_t>(unsigned_bitextract_64(62, 52, rawbits));
+}
+
+
+uint64_t double_mantissa(double val) {
+ uint64_t rawbits = double_to_rawbits(val);
+ return unsigned_bitextract_64(51, 0, rawbits);
+}
+
+
+float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa) {
+ uint32_t bits = (sign << 31) | (exp << 23) | mantissa;
+ return rawbits_to_float(bits);
+}
+
+
+double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa) {
+ uint64_t bits = (sign << 63) | (exp << 52) | mantissa;
+ return rawbits_to_double(bits);
+}
+
+
+int float16classify(float16 value) {
+ uint16_t exponent_max = (1 << 5) - 1;
+ uint16_t exponent_mask = exponent_max << 10;
+ uint16_t mantissa_mask = (1 << 10) - 1;
+
+ uint16_t exponent = (value & exponent_mask) >> 10;
+ uint16_t mantissa = value & mantissa_mask;
+ if (exponent == 0) {
+ if (mantissa == 0) {
+ return FP_ZERO;
+ }
+ return FP_SUBNORMAL;
+ } else if (exponent == exponent_max) {
+ if (mantissa == 0) {
+ return FP_INFINITE;
+ }
+ return FP_NAN;
+ }
+ return FP_NORMAL;
+}
+
+
+unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size) {
+ VIXL_ASSERT((reg_size % 8) == 0);
+ int count = 0;
+ for (unsigned i = 0; i < (reg_size / 16); i++) {
+ if ((imm & 0xffff) == 0) {
+ count++;
+ }
+ imm >>= 16;
+ }
+ return count;
+}
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Utils-vixl.h b/js/src/jit/arm64/vixl/Utils-vixl.h
new file mode 100644
index 000000000..738cfb085
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Utils-vixl.h
@@ -0,0 +1,286 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_UTILS_H
+#define VIXL_UTILS_H
+
+#include "mozilla/FloatingPoint.h"
+
+#include "jit/arm64/vixl/CompilerIntrinsics-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+
+namespace vixl {
+
+// Macros for compile-time format checking.
+#if defined(__GNUC__)
+#define PRINTF_CHECK(format_index, varargs_index) \
+ __attribute__((format(printf, format_index, varargs_index)))
+#else
+#define PRINTF_CHECK(format_index, varargs_index)
+#endif
+
+// Check number width.
+inline bool is_intn(unsigned n, int64_t x) {
+ VIXL_ASSERT((0 < n) && (n < 64));
+ int64_t limit = INT64_C(1) << (n - 1);
+ return (-limit <= x) && (x < limit);
+}
+
+inline bool is_uintn(unsigned n, int64_t x) {
+ VIXL_ASSERT((0 < n) && (n < 64));
+ return !(x >> n);
+}
+
+inline uint32_t truncate_to_intn(unsigned n, int64_t x) {
+ VIXL_ASSERT((0 < n) && (n < 64));
+ return static_cast<uint32_t>(x & ((INT64_C(1) << n) - 1));
+}
+
+#define INT_1_TO_63_LIST(V) \
+V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \
+V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \
+V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \
+V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \
+V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
+V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \
+V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \
+V(57) V(58) V(59) V(60) V(61) V(62) V(63)
+
+#define DECLARE_IS_INT_N(N) \
+inline bool is_int##N(int64_t x) { return is_intn(N, x); }
+#define DECLARE_IS_UINT_N(N) \
+inline bool is_uint##N(int64_t x) { return is_uintn(N, x); }
+#define DECLARE_TRUNCATE_TO_INT_N(N) \
+inline uint32_t truncate_to_int##N(int x) { return truncate_to_intn(N, x); }
+INT_1_TO_63_LIST(DECLARE_IS_INT_N)
+INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
+INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
+#undef DECLARE_IS_INT_N
+#undef DECLARE_IS_UINT_N
+#undef DECLARE_TRUNCATE_TO_INT_N
+
+// Bit field extraction.
+inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) {
+ return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1);
+}
+
+inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) {
+ return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
+}
+
+inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) {
+ return (x << (31 - msb)) >> (lsb + 31 - msb);
+}
+
+inline int64_t signed_bitextract_64(int msb, int lsb, int64_t x) {
+ return (x << (63 - msb)) >> (lsb + 63 - msb);
+}
+
+// Floating point representation.
+uint32_t float_to_rawbits(float value);
+uint64_t double_to_rawbits(double value);
+float rawbits_to_float(uint32_t bits);
+double rawbits_to_double(uint64_t bits);
+
+uint32_t float_sign(float val);
+uint32_t float_exp(float val);
+uint32_t float_mantissa(float val);
+uint32_t double_sign(double val);
+uint32_t double_exp(double val);
+uint64_t double_mantissa(double val);
+
+float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa);
+double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa);
+
+// An fpclassify() function for 16-bit half-precision floats.
+int float16classify(float16 value);
+
+// NaN tests.
+inline bool IsSignallingNaN(double num) {
+ const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
+ uint64_t raw = double_to_rawbits(num);
+ if (mozilla::IsNaN(num) && ((raw & kFP64QuietNaNMask) == 0)) {
+ return true;
+ }
+ return false;
+}
+
+
+inline bool IsSignallingNaN(float num) {
+ const uint32_t kFP32QuietNaNMask = 0x00400000;
+ uint32_t raw = float_to_rawbits(num);
+ if (mozilla::IsNaN(num) && ((raw & kFP32QuietNaNMask) == 0)) {
+ return true;
+ }
+ return false;
+}
+
+
+inline bool IsSignallingNaN(float16 num) {
+ const uint16_t kFP16QuietNaNMask = 0x0200;
+ return (float16classify(num) == FP_NAN) &&
+ ((num & kFP16QuietNaNMask) == 0);
+}
+
+
+template <typename T>
+inline bool IsQuietNaN(T num) {
+ return mozilla::IsNaN(num) && !IsSignallingNaN(num);
+}
+
+
+// Convert the NaN in 'num' to a quiet NaN.
+inline double ToQuietNaN(double num) {
+ const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
+ VIXL_ASSERT(mozilla::IsNaN(num));
+ return rawbits_to_double(double_to_rawbits(num) | kFP64QuietNaNMask);
+}
+
+
+inline float ToQuietNaN(float num) {
+ const uint32_t kFP32QuietNaNMask = 0x00400000;
+ VIXL_ASSERT(mozilla::IsNaN(num));
+ return rawbits_to_float(float_to_rawbits(num) | kFP32QuietNaNMask);
+}
+
+
+// Fused multiply-add.
+inline double FusedMultiplyAdd(double op1, double op2, double a) {
+ return fma(op1, op2, a);
+}
+
+
+inline float FusedMultiplyAdd(float op1, float op2, float a) {
+ return fmaf(op1, op2, a);
+}
+
+
+inline uint64_t LowestSetBit(uint64_t value) {
+ return value & -value;
+}
+
+
+template<typename T>
+inline int HighestSetBitPosition(T value) {
+ VIXL_ASSERT(value != 0);
+ return (sizeof(value) * 8 - 1) - CountLeadingZeros(value);
+}
+
+
+template<typename V>
+inline int WhichPowerOf2(V value) {
+ VIXL_ASSERT(IsPowerOf2(value));
+ return CountTrailingZeros(value);
+}
+
+
+unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
+
+
+template <typename T>
+T ReverseBits(T value) {
+ VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
+ (sizeof(value) == 4) || (sizeof(value) == 8));
+ T result = 0;
+ for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
+ result = (result << 1) | (value & 1);
+ value >>= 1;
+ }
+ return result;
+}
+
+
+template <typename T>
+T ReverseBytes(T value, int block_bytes_log2) {
+ VIXL_ASSERT((sizeof(value) == 4) || (sizeof(value) == 8));
+ VIXL_ASSERT((1U << block_bytes_log2) <= sizeof(value));
+ // Split the 64-bit value into an 8-bit array, where b[0] is the least
+ // significant byte, and b[7] is the most significant.
+ uint8_t bytes[8];
+ uint64_t mask = 0xff00000000000000;
+ for (int i = 7; i >= 0; i--) {
+ bytes[i] = (static_cast<uint64_t>(value) & mask) >> (i * 8);
+ mask >>= 8;
+ }
+
+ // Permutation tables for REV instructions.
+ // permute_table[0] is used by REV16_x, REV16_w
+ // permute_table[1] is used by REV32_x, REV_w
+ // permute_table[2] is used by REV_x
+ VIXL_ASSERT((0 < block_bytes_log2) && (block_bytes_log2 < 4));
+ static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
+ {4, 5, 6, 7, 0, 1, 2, 3},
+ {0, 1, 2, 3, 4, 5, 6, 7} };
+ T result = 0;
+ for (int i = 0; i < 8; i++) {
+ result <<= 8;
+ result |= bytes[permute_table[block_bytes_log2 - 1][i]];
+ }
+ return result;
+}
+
+
+// Pointer alignment
+// TODO: rename/refactor to make it specific to instructions.
+template<typename T>
+bool IsWordAligned(T pointer) {
+ VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof)
+ return ((intptr_t)(pointer) & 3) == 0;
+}
+
+// Increment a pointer (up to 64 bits) until it has the specified alignment.
+template<class T>
+T AlignUp(T pointer, size_t alignment) {
+ // Use C-style casts to get static_cast behaviour for integral types (T), and
+ // reinterpret_cast behaviour for other types.
+
+ uint64_t pointer_raw = (uint64_t)pointer;
+ VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
+
+ size_t align_step = (alignment - pointer_raw) % alignment;
+ VIXL_ASSERT((pointer_raw + align_step) % alignment == 0);
+
+ return (T)(pointer_raw + align_step);
+}
+
+// Decrement a pointer (up to 64 bits) until it has the specified alignment.
+template<class T>
+T AlignDown(T pointer, size_t alignment) {
+ // Use C-style casts to get static_cast behaviour for integral types (T), and
+ // reinterpret_cast behaviour for other types.
+
+ uint64_t pointer_raw = (uint64_t)pointer;
+ VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
+
+ size_t align_step = pointer_raw % alignment;
+ VIXL_ASSERT((pointer_raw - align_step) % alignment == 0);
+
+ return (T)(pointer_raw - align_step);
+}
+
+} // namespace vixl
+
+#endif // VIXL_UTILS_H
diff --git a/js/src/jit/mips-shared/Architecture-mips-shared.cpp b/js/src/jit/mips-shared/Architecture-mips-shared.cpp
new file mode 100644
index 000000000..4fcf2c90e
--- /dev/null
+++ b/js/src/jit/mips-shared/Architecture-mips-shared.cpp
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/Architecture-mips-shared.h"
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "jit/RegisterSets.h"
+
+#define HWCAP_MIPS (1 << 28)
+#define HWCAP_LOONGSON (1 << 27)
+#define HWCAP_FPU (1 << 0)
+
+namespace js {
+namespace jit {
+
+static uint32_t
+get_mips_flags()
+{
+ uint32_t flags = HWCAP_MIPS;
+
+#if defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
+ flags |= HWCAP_FPU;
+#else
+# ifdef __linux__
+ FILE* fp = fopen("/proc/cpuinfo", "r");
+ if (!fp)
+ return flags;
+
+ char buf[1024];
+ memset(buf, 0, sizeof(buf));
+ fread(buf, sizeof(char), sizeof(buf) - 1, fp);
+ fclose(fp);
+ if (strstr(buf, "FPU"))
+ flags |= HWCAP_FPU;
+ if (strstr(buf, "Loongson"))
+ flags |= HWCAP_LOONGSON;
+# endif
+#endif // JS_SIMULATOR_MIPS32 || JS_SIMULATOR_MIPS64
+ return flags;
+}
+
+static bool check_fpu()
+{
+ return mips_private::Flags & HWCAP_FPU;
+}
+
+static bool check_loongson()
+{
+ return mips_private::Flags & HWCAP_LOONGSON;
+}
+
+namespace mips_private {
+ // Cache a local copy so we only have to read /proc/cpuinfo once.
+ uint32_t Flags = get_mips_flags();
+ bool hasFPU = check_fpu();;
+ bool isLoongson = check_loongson();
+}
+
+Registers::Code
+Registers::FromName(const char* name)
+{
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0)
+ return Code(i);
+ }
+
+ return Invalid;
+}
+
+} // namespace ion
+} // namespace js
+
diff --git a/js/src/jit/mips-shared/Architecture-mips-shared.h b/js/src/jit/mips-shared/Architecture-mips-shared.h
new file mode 100644
index 000000000..7afe30594
--- /dev/null
+++ b/js/src/jit/mips-shared/Architecture-mips-shared.h
@@ -0,0 +1,338 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_Architecture_mips_shared_h
+#define jit_mips_shared_Architecture_mips_shared_h
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "js/Utility.h"
+
+// gcc appears to use _mips_hard_float to denote
+// that the target is a hard-float target.
+#ifdef _mips_hard_float
+#define JS_CODEGEN_MIPS_HARDFP
+#endif
+
+#if (defined(_MIPS_SIM) && (_MIPS_SIM == _ABIO32)) || defined(JS_SIMULATOR_MIPS32)
+#define USES_O32_ABI
+#elif (defined(_MIPS_SIM) && (_MIPS_SIM == _ABI64)) || defined(JS_SIMULATOR_MIPS64)
+#define USES_N64_ABI
+#else
+#error "Unsupported ABI"
+#endif
+
+namespace js {
+namespace jit {
+
+// How far forward/back can a jump go? Provide a generous buffer for thunks.
+static const uint32_t JumpImmediateRange = UINT32_MAX;
+
+class Registers
+{
+ public:
+ enum RegisterID {
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+ r16,
+ r17,
+ r18,
+ r19,
+ r20,
+ r21,
+ r22,
+ r23,
+ r24,
+ r25,
+ r26,
+ r27,
+ r28,
+ r29,
+ r30,
+ r31,
+ zero = r0,
+ at = r1,
+ v0 = r2,
+ v1 = r3,
+ a0 = r4,
+ a1 = r5,
+ a2 = r6,
+ a3 = r7,
+#if defined(USES_O32_ABI)
+ t0 = r8,
+ t1 = r9,
+ t2 = r10,
+ t3 = r11,
+ t4 = r12,
+ t5 = r13,
+ t6 = r14,
+ t7 = r15,
+ ta0 = t4,
+ ta1 = t5,
+ ta2 = t6,
+ ta3 = t7,
+#elif defined(USES_N64_ABI)
+ a4 = r8,
+ a5 = r9,
+ a6 = r10,
+ a7 = r11,
+ t0 = r12,
+ t1 = r13,
+ t2 = r14,
+ t3 = r15,
+ ta0 = a4,
+ ta1 = a5,
+ ta2 = a6,
+ ta3 = a7,
+#endif
+ s0 = r16,
+ s1 = r17,
+ s2 = r18,
+ s3 = r19,
+ s4 = r20,
+ s5 = r21,
+ s6 = r22,
+ s7 = r23,
+ t8 = r24,
+ t9 = r25,
+ k0 = r26,
+ k1 = r27,
+ gp = r28,
+ sp = r29,
+ fp = r30,
+ ra = r31,
+ invalid_reg
+ };
+ typedef uint8_t Code;
+ typedef RegisterID Encoding;
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ uintptr_t r;
+ };
+
+ static const char * const RegNames[];
+ static const char* GetName(Code code) {
+ MOZ_ASSERT(code < Total);
+ return RegNames[code];
+ }
+ static const char* GetName(Encoding i) {
+ return GetName(Code(i));
+ }
+
+ static Code FromName(const char* name);
+
+ static const Encoding StackPointer = sp;
+ static const Encoding Invalid = invalid_reg;
+
+ static const uint32_t Total = 32;
+ static const uint32_t Allocatable;
+
+ typedef uint32_t SetType;
+ static const SetType AllMask = 0xffffffff;
+ static const SetType SharedArgRegMask = (1 << a0) | (1 << a1) | (1 << a2) | (1 << a3);
+ static const SetType ArgRegMask;
+
+ static const SetType VolatileMask =
+ (1 << Registers::v0) |
+ (1 << Registers::v1) |
+ (1 << Registers::a0) |
+ (1 << Registers::a1) |
+ (1 << Registers::a2) |
+ (1 << Registers::a3) |
+ (1 << Registers::t0) |
+ (1 << Registers::t1) |
+ (1 << Registers::t2) |
+ (1 << Registers::t3) |
+ (1 << Registers::ta0) |
+ (1 << Registers::ta1) |
+ (1 << Registers::ta2) |
+ (1 << Registers::ta3);
+
+ // We use this constant to save registers when entering functions. This
+ // is why $ra is added here even though it is not "Non Volatile".
+ static const SetType NonVolatileMask =
+ (1 << Registers::s0) |
+ (1 << Registers::s1) |
+ (1 << Registers::s2) |
+ (1 << Registers::s3) |
+ (1 << Registers::s4) |
+ (1 << Registers::s5) |
+ (1 << Registers::s6) |
+ (1 << Registers::s7) |
+ (1 << Registers::ra);
+
+ static const SetType WrapperMask =
+ VolatileMask | // = arguments
+ (1 << Registers::t0) | // = outReg
+ (1 << Registers::t1); // = argBase
+
+ static const SetType NonAllocatableMask =
+ (1 << Registers::zero) |
+ (1 << Registers::at) | // at = scratch
+ (1 << Registers::t8) | // t8 = scratch
+ (1 << Registers::t9) | // t9 = scratch
+ (1 << Registers::k0) |
+ (1 << Registers::k1) |
+ (1 << Registers::gp) |
+ (1 << Registers::sp) |
+ (1 << Registers::fp) |
+ (1 << Registers::ra);
+
+ // Registers that can be allocated without being saved, generally.
+ static const SetType TempMask = VolatileMask & ~NonAllocatableMask;
+
+ // Registers returned from a JS -> JS call.
+ static const SetType JSCallMask;
+
+ // Registers returned from a JS -> C call.
+ static const SetType SharedCallMask = (1 << Registers::v0);
+ static const SetType CallMask;
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ return mozilla::CountTrailingZeroes32(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 31 - mozilla::CountLeadingZeroes32(x);
+ }
+};
+
+// Smallest integer type that can hold a register bitmask.
+typedef uint32_t PackedRegisterMask;
+
+class FloatRegistersMIPSShared
+{
+ public:
+ enum FPRegisterID {
+ f0 = 0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ invalid_freg
+ };
+ typedef FPRegisterID Code;
+ typedef FPRegisterID Encoding;
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ double d;
+ };
+
+ static const char* GetName(Code code) {
+ static const char * const Names[] = { "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13",
+ "f14", "f15", "f16", "f17", "f18", "f19",
+ "f20", "f21", "f22", "f23", "f24", "f25",
+ "f26", "f27", "f28", "f29", "f30", "f31"};
+ return Names[code];
+ }
+
+ static const Code Invalid = invalid_freg;
+
+ typedef uint64_t SetType;
+};
+
+template <typename T>
+class TypedRegisterSet;
+
+class FloatRegisterMIPSShared
+{
+ public:
+ bool isSimd128() const { return false; }
+
+ typedef FloatRegistersMIPSShared::SetType SetType;
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 8, "SetType must be 64 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ return mozilla::CountTrailingZeroes64(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 63 - mozilla::CountLeadingZeroes64(x);
+ }
+};
+
+namespace mips_private {
+ extern uint32_t Flags;
+ extern bool hasFPU;
+ extern bool isLoongson;
+}
+
+inline uint32_t GetMIPSFlags() { return mips_private::Flags; }
+inline bool hasFPU() { return mips_private::hasFPU; }
+inline bool isLoongson() { return mips_private::isLoongson; }
+
+// MIPS doesn't have double registers that can NOT be treated as float32.
+inline bool
+hasUnaliasedDouble() {
+ return false;
+}
+
+// On MIPS, fn-double aliases both fn-float32 and fn+1-float32, so if you need
+// to convert a float32 to a double as a temporary, you need a temporary
+// double register.
+inline bool
+hasMultiAlias() {
+ return true;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_Architecture_mips_shared_h */
diff --git a/js/src/jit/mips-shared/Assembler-mips-shared.cpp b/js/src/jit/mips-shared/Assembler-mips-shared.cpp
new file mode 100644
index 000000000..f813eb946
--- /dev/null
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.cpp
@@ -0,0 +1,1746 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/Assembler-mips-shared.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jscompartment.h"
+#include "jsutil.h"
+
+#include "gc/Marking.h"
+#include "jit/ExecutableAllocator.h"
+#include "jit/JitCompartment.h"
+
+using mozilla::DebugOnly;
+
+using namespace js;
+using namespace js::jit;
+
+// Encode a standard register when it is being used as rd, the rs, and
+// an extra register(rt). These should never be called with an InvalidReg.
+uint32_t
+js::jit::RS(Register r)
+{
+ MOZ_ASSERT((r.code() & ~RegMask) == 0);
+ return r.code() << RSShift;
+}
+
+uint32_t
+js::jit::RT(Register r)
+{
+ MOZ_ASSERT((r.code() & ~RegMask) == 0);
+ return r.code() << RTShift;
+}
+
+uint32_t
+js::jit::RD(Register r)
+{
+ MOZ_ASSERT((r.code() & ~RegMask) == 0);
+ return r.code() << RDShift;
+}
+
+uint32_t
+js::jit::RZ(Register r)
+{
+ MOZ_ASSERT((r.code() & ~RegMask) == 0);
+ return r.code() << RZShift;
+}
+
+uint32_t
+js::jit::SA(uint32_t value)
+{
+ MOZ_ASSERT(value < 32);
+ return value << SAShift;
+}
+
+Register
+js::jit::toRS(Instruction& i)
+{
+ return Register::FromCode((i.encode() & RSMask ) >> RSShift);
+}
+
+Register
+js::jit::toRT(Instruction& i)
+{
+ return Register::FromCode((i.encode() & RTMask ) >> RTShift);
+}
+
+Register
+js::jit::toRD(Instruction& i)
+{
+ return Register::FromCode((i.encode() & RDMask ) >> RDShift);
+}
+
+Register
+js::jit::toR(Instruction& i)
+{
+ return Register::FromCode(i.encode() & RegMask);
+}
+
+void
+InstImm::extractImm16(BOffImm16* dest)
+{
+ *dest = BOffImm16(*this);
+}
+
+void
+AssemblerMIPSShared::finish()
+{
+ MOZ_ASSERT(!isFinished);
+ isFinished = true;
+}
+
+bool
+AssemblerMIPSShared::asmMergeWith(const AssemblerMIPSShared& other)
+{
+ if (!AssemblerShared::asmMergeWith(size(), other))
+ return false;
+ for (size_t i = 0; i < other.numLongJumps(); i++) {
+ size_t off = other.longJumps_[i];
+ addLongJump(BufferOffset(size() + off));
+ }
+ return m_buffer.appendBuffer(other.m_buffer);
+}
+
+uint32_t
+AssemblerMIPSShared::actualIndex(uint32_t idx_) const
+{
+ return idx_;
+}
+
+uint8_t*
+AssemblerMIPSShared::PatchableJumpAddress(JitCode* code, uint32_t pe_)
+{
+ return code->raw() + pe_;
+}
+
+void
+AssemblerMIPSShared::copyJumpRelocationTable(uint8_t* dest)
+{
+ if (jumpRelocations_.length())
+ memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
+}
+
+void
+AssemblerMIPSShared::copyDataRelocationTable(uint8_t* dest)
+{
+ if (dataRelocations_.length())
+ memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
+}
+
+void
+AssemblerMIPSShared::copyPreBarrierTable(uint8_t* dest)
+{
+ if (preBarriers_.length())
+ memcpy(dest, preBarriers_.buffer(), preBarriers_.length());
+}
+
+void
+AssemblerMIPSShared::processCodeLabels(uint8_t* rawCode)
+{
+ for (size_t i = 0; i < codeLabels_.length(); i++) {
+ CodeLabel label = codeLabels_[i];
+ Bind(rawCode, label.patchAt(), rawCode + label.target()->offset());
+ }
+}
+
+AssemblerMIPSShared::Condition
+AssemblerMIPSShared::InvertCondition(Condition cond)
+{
+ switch (cond) {
+ case Equal:
+ return NotEqual;
+ case NotEqual:
+ return Equal;
+ case Zero:
+ return NonZero;
+ case NonZero:
+ return Zero;
+ case LessThan:
+ return GreaterThanOrEqual;
+ case LessThanOrEqual:
+ return GreaterThan;
+ case GreaterThan:
+ return LessThanOrEqual;
+ case GreaterThanOrEqual:
+ return LessThan;
+ case Above:
+ return BelowOrEqual;
+ case AboveOrEqual:
+ return Below;
+ case Below:
+ return AboveOrEqual;
+ case BelowOrEqual:
+ return Above;
+ case Signed:
+ return NotSigned;
+ case NotSigned:
+ return Signed;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+AssemblerMIPSShared::DoubleCondition
+AssemblerMIPSShared::InvertCondition(DoubleCondition cond)
+{
+ switch (cond) {
+ case DoubleOrdered:
+ return DoubleUnordered;
+ case DoubleEqual:
+ return DoubleNotEqualOrUnordered;
+ case DoubleNotEqual:
+ return DoubleEqualOrUnordered;
+ case DoubleGreaterThan:
+ return DoubleLessThanOrEqualOrUnordered;
+ case DoubleGreaterThanOrEqual:
+ return DoubleLessThanOrUnordered;
+ case DoubleLessThan:
+ return DoubleGreaterThanOrEqualOrUnordered;
+ case DoubleLessThanOrEqual:
+ return DoubleGreaterThanOrUnordered;
+ case DoubleUnordered:
+ return DoubleOrdered;
+ case DoubleEqualOrUnordered:
+ return DoubleNotEqual;
+ case DoubleNotEqualOrUnordered:
+ return DoubleEqual;
+ case DoubleGreaterThanOrUnordered:
+ return DoubleLessThanOrEqual;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ return DoubleLessThan;
+ case DoubleLessThanOrUnordered:
+ return DoubleGreaterThanOrEqual;
+ case DoubleLessThanOrEqualOrUnordered:
+ return DoubleGreaterThan;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+BOffImm16::BOffImm16(InstImm inst)
+ : data(inst.encode() & Imm16Mask)
+{
+}
+
+Instruction*
+BOffImm16::getDest(Instruction* src) const
+{
+ return &src[(((int32_t)data << 16) >> 16) + 1];
+}
+
+bool
+AssemblerMIPSShared::oom() const
+{
+ return AssemblerShared::oom() ||
+ m_buffer.oom() ||
+ jumpRelocations_.oom() ||
+ dataRelocations_.oom() ||
+ preBarriers_.oom();
+}
+
+// Size of the instruction stream, in bytes.
+size_t
+AssemblerMIPSShared::size() const
+{
+ return m_buffer.size();
+}
+
+// Size of the relocation table, in bytes.
+size_t
+AssemblerMIPSShared::jumpRelocationTableBytes() const
+{
+ return jumpRelocations_.length();
+}
+
+size_t
+AssemblerMIPSShared::dataRelocationTableBytes() const
+{
+ return dataRelocations_.length();
+}
+
+size_t
+AssemblerMIPSShared::preBarrierTableBytes() const
+{
+ return preBarriers_.length();
+}
+
+// Size of the data table, in bytes.
+size_t
+AssemblerMIPSShared::bytesNeeded() const
+{
+ return size() +
+ jumpRelocationTableBytes() +
+ dataRelocationTableBytes() +
+ preBarrierTableBytes();
+}
+
+// write a blob of binary into the instruction stream
+BufferOffset
+AssemblerMIPSShared::writeInst(uint32_t x, uint32_t* dest)
+{
+ if (dest == nullptr)
+ return m_buffer.putInt(x);
+
+ WriteInstStatic(x, dest);
+ return BufferOffset();
+}
+
+void
+AssemblerMIPSShared::WriteInstStatic(uint32_t x, uint32_t* dest)
+{
+ MOZ_ASSERT(dest != nullptr);
+ *dest = x;
+}
+
+BufferOffset
+AssemblerMIPSShared::haltingAlign(int alignment)
+{
+ // TODO: Implement a proper halting align.
+ return nopAlign(alignment);
+}
+
+BufferOffset
+AssemblerMIPSShared::nopAlign(int alignment)
+{
+ BufferOffset ret;
+ MOZ_ASSERT(m_buffer.isAligned(4));
+ if (alignment == 8) {
+ if (!m_buffer.isAligned(alignment)) {
+ BufferOffset tmp = as_nop();
+ if (!ret.assigned())
+ ret = tmp;
+ }
+ } else {
+ MOZ_ASSERT((alignment & (alignment - 1)) == 0);
+ while (size() & (alignment - 1)) {
+ BufferOffset tmp = as_nop();
+ if (!ret.assigned())
+ ret = tmp;
+ }
+ }
+ return ret;
+}
+
+BufferOffset
+AssemblerMIPSShared::as_nop()
+{
+ return writeInst(op_special | ff_sll);
+}
+
+// Logical operations.
+BufferOffset
+AssemblerMIPSShared::as_and(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_and).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_or(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_or).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_xor(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_xor).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_nor(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_nor).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_andi(Register rd, Register rs, int32_t j)
+{
+ MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
+ return writeInst(InstImm(op_andi, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ori(Register rd, Register rs, int32_t j)
+{
+ MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
+ return writeInst(InstImm(op_ori, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_xori(Register rd, Register rs, int32_t j)
+{
+ MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
+ return writeInst(InstImm(op_xori, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_lui(Register rd, int32_t j)
+{
+ MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
+ return writeInst(InstImm(op_lui, zero, rd, Imm16(j)).encode());
+}
+
+// Branch and jump instructions
+BufferOffset
+AssemblerMIPSShared::as_bal(BOffImm16 off)
+{
+ BufferOffset bo = writeInst(InstImm(op_regimm, zero, rt_bgezal, off).encode());
+ return bo;
+}
+
+BufferOffset
+AssemblerMIPSShared::as_b(BOffImm16 off)
+{
+ BufferOffset bo = writeInst(InstImm(op_beq, zero, zero, off).encode());
+ return bo;
+}
+
+InstImm
+AssemblerMIPSShared::getBranchCode(JumpOrCall jumpOrCall)
+{
+ if (jumpOrCall == BranchIsCall)
+ return InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
+
+ return InstImm(op_beq, zero, zero, BOffImm16(0));
+}
+
+InstImm
+AssemblerMIPSShared::getBranchCode(Register s, Register t, Condition c)
+{
+ MOZ_ASSERT(c == AssemblerMIPSShared::Equal || c == AssemblerMIPSShared::NotEqual);
+ return InstImm(c == AssemblerMIPSShared::Equal ? op_beq : op_bne, s, t, BOffImm16(0));
+}
+
+InstImm
+AssemblerMIPSShared::getBranchCode(Register s, Condition c)
+{
+ switch (c) {
+ case AssemblerMIPSShared::Equal:
+ case AssemblerMIPSShared::Zero:
+ case AssemblerMIPSShared::BelowOrEqual:
+ return InstImm(op_beq, s, zero, BOffImm16(0));
+ case AssemblerMIPSShared::NotEqual:
+ case AssemblerMIPSShared::NonZero:
+ case AssemblerMIPSShared::Above:
+ return InstImm(op_bne, s, zero, BOffImm16(0));
+ case AssemblerMIPSShared::GreaterThan:
+ return InstImm(op_bgtz, s, zero, BOffImm16(0));
+ case AssemblerMIPSShared::GreaterThanOrEqual:
+ case AssemblerMIPSShared::NotSigned:
+ return InstImm(op_regimm, s, rt_bgez, BOffImm16(0));
+ case AssemblerMIPSShared::LessThan:
+ case AssemblerMIPSShared::Signed:
+ return InstImm(op_regimm, s, rt_bltz, BOffImm16(0));
+ case AssemblerMIPSShared::LessThanOrEqual:
+ return InstImm(op_blez, s, zero, BOffImm16(0));
+ default:
+ MOZ_CRASH("Condition not supported.");
+ }
+}
+
+InstImm
+AssemblerMIPSShared::getBranchCode(FloatTestKind testKind, FPConditionBit fcc)
+{
+ MOZ_ASSERT(!(fcc && FccMask));
+ uint32_t rtField = ((testKind == TestForTrue ? 1 : 0) | (fcc << FccShift)) << RTShift;
+
+ return InstImm(op_cop1, rs_bc1, rtField, BOffImm16(0));
+}
+
+BufferOffset
+AssemblerMIPSShared::as_j(JOffImm26 off)
+{
+ BufferOffset bo = writeInst(InstJump(op_j, off).encode());
+ return bo;
+}
+BufferOffset
+AssemblerMIPSShared::as_jal(JOffImm26 off)
+{
+ BufferOffset bo = writeInst(InstJump(op_jal, off).encode());
+ return bo;
+}
+
+BufferOffset
+AssemblerMIPSShared::as_jr(Register rs)
+{
+ BufferOffset bo = writeInst(InstReg(op_special, rs, zero, zero, ff_jr).encode());
+ return bo;
+}
+BufferOffset
+AssemblerMIPSShared::as_jalr(Register rs)
+{
+ BufferOffset bo = writeInst(InstReg(op_special, rs, zero, ra, ff_jalr).encode());
+ return bo;
+}
+
+
+// Arithmetic instructions
+BufferOffset
+AssemblerMIPSShared::as_addu(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_addu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_addiu(Register rd, Register rs, int32_t j)
+{
+ MOZ_ASSERT(Imm16::IsInSignedRange(j));
+ return writeInst(InstImm(op_addiu, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_daddu(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_daddu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_daddiu(Register rd, Register rs, int32_t j)
+{
+ MOZ_ASSERT(Imm16::IsInSignedRange(j));
+ return writeInst(InstImm(op_daddiu, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_subu(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_subu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsubu(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_dsubu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_mult(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_mult).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_multu(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_multu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dmult(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_dmult).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dmultu(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_dmultu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_div(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_div).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_divu(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_divu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ddiv(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_ddiv).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ddivu(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_ddivu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_mul(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special2, rs, rt, rd, ff_mul).encode());
+}
+
+// Shift instructions
+BufferOffset
+AssemblerMIPSShared::as_sll(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_sll).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsll(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_dsll).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsll32(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(31 < sa && sa < 64);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa - 32, ff_dsll32).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sllv(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_sllv).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsllv(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_dsllv).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_srl(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_srl).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsrl(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_dsrl).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsrl32(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(31 < sa && sa < 64);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa - 32, ff_dsrl32).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_srlv(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_srlv).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsrlv(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_dsrlv).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sra(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_sra).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsra(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_dsra).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsra32(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(31 < sa && sa < 64);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa - 32, ff_dsra32).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_srav(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_srav).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsrav(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_dsrav).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_rotr(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_one, rt, rd, sa, ff_srl).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_drotr(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_one, rt, rd, sa, ff_dsrl).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_drotr32(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(31 < sa && sa < 64);
+ return writeInst(InstReg(op_special, rs_one, rt, rd, sa - 32, ff_dsrl32).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_rotrv(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, 1, ff_srlv).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_drotrv(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, 1, ff_dsrlv).encode());
+}
+
+// Load and store instructions
+BufferOffset
+AssemblerMIPSShared::as_lb(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lb, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_lbu(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lbu, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_lh(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lh, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_lhu(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lhu, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_lw(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lw, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_lwu(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lwu, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_lwl(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lwl, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_lwr(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lwr, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ll(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_ll, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ld(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_ld, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ldl(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_ldl, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ldr(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_ldr, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sb(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_sb, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sh(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_sh, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sw(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_sw, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_swl(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_swl, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_swr(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_swr, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sc(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_sc, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sd(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_sd, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sdl(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_sdl, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sdr(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_sdr, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gslbx(Register rd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_ldc2, rs, rd, ri, Imm8(off), ff_gsxbx).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gssbx(Register rd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_sdc2, rs, rd, ri, Imm8(off), ff_gsxbx).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gslhx(Register rd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_ldc2, rs, rd, ri, Imm8(off), ff_gsxhx).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gsshx(Register rd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_sdc2, rs, rd, ri, Imm8(off), ff_gsxhx).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gslwx(Register rd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_ldc2, rs, rd, ri, Imm8(off), ff_gsxwx).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gsswx(Register rd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_sdc2, rs, rd, ri, Imm8(off), ff_gsxwx).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gsldx(Register rd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_ldc2, rs, rd, ri, Imm8(off), ff_gsxdx).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gssdx(Register rd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_sdc2, rs, rd, ri, Imm8(off), ff_gsxdx).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gslq(Register rh, Register rl, Register rs, int16_t off)
+{
+ MOZ_ASSERT(GSImm13::IsInRange(off));
+ return writeInst(InstGS(op_lwc2, rs, rl, rh, GSImm13(off), ff_gsxq).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gssq(Register rh, Register rl, Register rs, int16_t off)
+{
+ MOZ_ASSERT(GSImm13::IsInRange(off));
+ return writeInst(InstGS(op_swc2, rs, rl, rh, GSImm13(off), ff_gsxq).encode());
+}
+
+// Move from HI/LO register.
+BufferOffset
+AssemblerMIPSShared::as_mfhi(Register rd)
+{
+ return writeInst(InstReg(op_special, rd, ff_mfhi).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_mflo(Register rd)
+{
+ return writeInst(InstReg(op_special, rd, ff_mflo).encode());
+}
+
+// Set on less than.
+BufferOffset
+AssemblerMIPSShared::as_slt(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_slt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sltu(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_sltu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_slti(Register rd, Register rs, int32_t j)
+{
+ MOZ_ASSERT(Imm16::IsInSignedRange(j));
+ return writeInst(InstImm(op_slti, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sltiu(Register rd, Register rs, uint32_t j)
+{
+ MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
+ return writeInst(InstImm(op_sltiu, rs, rd, Imm16(j)).encode());
+}
+
+// Conditional move.
+BufferOffset
+AssemblerMIPSShared::as_movz(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movz).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_movn(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movn).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_movt(Register rd, Register rs, uint16_t cc)
+{
+ Register rt;
+ rt = Register::FromCode((cc & 0x7) << 2 | 1);
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movci).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_movf(Register rd, Register rs, uint16_t cc)
+{
+ Register rt;
+ rt = Register::FromCode((cc & 0x7) << 2 | 0);
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movci).encode());
+}
+
+// Bit twiddling.
+BufferOffset
+AssemblerMIPSShared::as_clz(Register rd, Register rs)
+{
+ return writeInst(InstReg(op_special2, rs, rd, rd, ff_clz).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dclz(Register rd, Register rs)
+{
+ return writeInst(InstReg(op_special2, rs, rd, rd, ff_dclz).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ins(Register rt, Register rs, uint16_t pos, uint16_t size)
+{
+ MOZ_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 && pos + size <= 32);
+ Register rd;
+ rd = Register::FromCode(pos + size - 1);
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_ins).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dins(Register rt, Register rs, uint16_t pos, uint16_t size)
+{
+ MOZ_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 && pos + size <= 32);
+ Register rd;
+ rd = Register::FromCode(pos + size - 1);
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dins).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dinsm(Register rt, Register rs, uint16_t pos, uint16_t size)
+{
+ MOZ_ASSERT(pos < 32 && size >= 2 && size <= 64 && pos + size > 32 && pos + size <= 64);
+ Register rd;
+ rd = Register::FromCode(pos + size - 1 - 32);
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dinsm).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dinsu(Register rt, Register rs, uint16_t pos, uint16_t size)
+{
+ MOZ_ASSERT(pos >= 32 && pos < 64 && size >= 1 && size <= 32 && pos + size > 32 && pos + size <= 64);
+ Register rd;
+ rd = Register::FromCode(pos + size - 1 - 32);
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos - 32, ff_dinsu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ext(Register rt, Register rs, uint16_t pos, uint16_t size)
+{
+ MOZ_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 && pos + size <= 32);
+ Register rd;
+ rd = Register::FromCode(size - 1);
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_ext).encode());
+}
+
+// Sign extend
+BufferOffset
+AssemblerMIPSShared::as_seb(Register rd, Register rt)
+{
+ return writeInst(InstReg(op_special3, zero, rt, rd, 16, ff_bshfl).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_seh(Register rd, Register rt)
+{
+ return writeInst(InstReg(op_special3, zero, rt, rd, 24, ff_bshfl).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dext(Register rt, Register rs, uint16_t pos, uint16_t size)
+{
+ MOZ_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 && pos + size <= 63);
+ Register rd;
+ rd = Register::FromCode(size - 1);
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dext).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dextm(Register rt, Register rs, uint16_t pos, uint16_t size)
+{
+ MOZ_ASSERT(pos < 32 && size > 32 && size <= 64 && pos + size > 32 && pos + size <= 64);
+ Register rd;
+ rd = Register::FromCode(size - 1 - 32);
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dextm).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dextu(Register rt, Register rs, uint16_t pos, uint16_t size)
+{
+ MOZ_ASSERT(pos >= 32 && pos < 64 && size != 0 && size <= 32 && pos + size > 32 && pos + size <= 64);
+ Register rd;
+ rd = Register::FromCode(size - 1);
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos - 32, ff_dextu).encode());
+}
+
+// FP instructions
+BufferOffset
+AssemblerMIPSShared::as_ld(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm16::IsInSignedRange(off));
+ return writeInst(InstImm(op_ldc1, base, fd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sd(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm16::IsInSignedRange(off));
+ return writeInst(InstImm(op_sdc1, base, fd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ls(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm16::IsInSignedRange(off));
+ return writeInst(InstImm(op_lwc1, base, fd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ss(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm16::IsInSignedRange(off));
+ return writeInst(InstImm(op_swc1, base, fd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gsldl(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_lwc2, base, fd, Imm8(off), ff_gsxdlc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gsldr(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_lwc2, base, fd, Imm8(off), ff_gsxdrc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gssdl(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_swc2, base, fd, Imm8(off), ff_gsxdlc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gssdr(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_swc2, base, fd, Imm8(off), ff_gsxdrc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gslsl(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_lwc2, base, fd, Imm8(off), ff_gsxwlc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gslsr(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_lwc2, base, fd, Imm8(off), ff_gsxwrc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gsssl(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_swc2, base, fd, Imm8(off), ff_gsxwlc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gsssr(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_swc2, base, fd, Imm8(off), ff_gsxwrc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gslsx(FloatRegister fd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_ldc2, rs, fd, ri, Imm8(off), ff_gsxwxc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gsssx(FloatRegister fd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_sdc2, rs, fd, ri, Imm8(off), ff_gsxwxc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gsldx(FloatRegister fd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_ldc2, rs, fd, ri, Imm8(off), ff_gsxdxc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gssdx(FloatRegister fd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_sdc2, rs, fd, ri, Imm8(off), ff_gsxdxc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gslq(FloatRegister rh, FloatRegister rl, Register rs, int16_t off)
+{
+ MOZ_ASSERT(GSImm13::IsInRange(off));
+ return writeInst(InstGS(op_lwc2, rs, rl, rh, GSImm13(off), ff_gsxqc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gssq(FloatRegister rh, FloatRegister rl, Register rs, int16_t off)
+{
+ MOZ_ASSERT(GSImm13::IsInRange(off));
+ return writeInst(InstGS(op_swc2, rs, rl, rh, GSImm13(off), ff_gsxqc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_movs(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_mov_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_movd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_mov_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ctc1(Register rt, FPControl fc)
+{
+ return writeInst(InstReg(op_cop1, rs_ctc1, rt, FloatRegister(fc)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cfc1(Register rt, FPControl fc)
+{
+ return writeInst(InstReg(op_cop1, rs_cfc1, rt, FloatRegister(fc)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_mtc1(Register rt, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_mtc1, rt, fs).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_mfc1(Register rt, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_mfc1, rt, fs).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_mthc1(Register rt, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_mthc1, rt, fs).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_mfhc1(Register rt, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_mfhc1, rt, fs).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dmtc1(Register rt, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_dmtc1, rt, fs).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dmfc1(Register rt, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_dmfc1, rt, fs).encode());
+}
+
+// FP convert instructions
+BufferOffset
+AssemblerMIPSShared::as_ceilws(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_ceil_w_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_floorws(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_floor_w_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_roundws(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_round_w_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_truncws(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_trunc_w_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_truncls(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_trunc_l_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ceilwd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_ceil_w_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_floorwd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_floor_w_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_roundwd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_round_w_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_truncwd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_trunc_w_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_truncld(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_trunc_l_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cvtdl(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_l, zero, fs, fd, ff_cvt_d_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cvtds(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_cvt_d_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cvtdw(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_w, zero, fs, fd, ff_cvt_d_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cvtsd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_cvt_s_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cvtsl(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_l, zero, fs, fd, ff_cvt_s_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cvtsw(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_w, zero, fs, fd, ff_cvt_s_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cvtwd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_cvt_w_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cvtws(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_cvt_w_fmt).encode());
+}
+
+// FP arithmetic instructions
+BufferOffset
+AssemblerMIPSShared::as_adds(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_add_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_addd(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_add_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_subs(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_sub_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_subd(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_sub_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_abss(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_abs_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_absd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_abs_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_negs(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_neg_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_negd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_neg_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_muls(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_mul_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_muld(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_mul_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_divs(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_div_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_divd(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_div_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sqrts(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_sqrt_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sqrtd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_sqrt_fmt).encode());
+}
+
+// FP compare instructions
+BufferOffset
+AssemblerMIPSShared::as_cf(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_f_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cun(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_un_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ceq(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_eq_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cueq(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ueq_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_colt(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_olt_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cult(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ult_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cole(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ole_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cule(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ule_fmt).encode());
+}
+
+// FP conditional move.
+BufferOffset
+AssemblerMIPSShared::as_movt(FloatFormat fmt, FloatRegister fd, FloatRegister fs, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ Register rt = Register::FromCode(fcc << 2 | 1);
+ return writeInst(InstReg(op_cop1, rs, rt, fs, fd, ff_movf_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_movf(FloatFormat fmt, FloatRegister fd, FloatRegister fs, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ Register rt = Register::FromCode(fcc << 2 | 0);
+ return writeInst(InstReg(op_cop1, rs, rt, fs, fd, ff_movf_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_movz(FloatFormat fmt, FloatRegister fd, FloatRegister fs, Register rt)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, rt, fs, fd, ff_movz_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_movn(FloatFormat fmt, FloatRegister fd, FloatRegister fs, Register rt)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, rt, fs, fd, ff_movn_fmt).encode());
+}
+
+void
+AssemblerMIPSShared::bind(Label* label, BufferOffset boff)
+{
+ // If our caller didn't give us an explicit target to bind to
+ // then we want to bind to the location of the next instruction
+ BufferOffset dest = boff.assigned() ? boff : nextOffset();
+ if (label->used()) {
+ int32_t next;
+
+ // A used label holds a link to branch that uses it.
+ BufferOffset b(label);
+ do {
+ // Even a 0 offset may be invalid if we're out of memory.
+ if (oom())
+ return;
+
+ Instruction* inst = editSrc(b);
+
+ // Second word holds a pointer to the next branch in label's chain.
+ next = inst[1].encode();
+ bind(reinterpret_cast<InstImm*>(inst), b.getOffset(), dest.getOffset());
+
+ b = BufferOffset(next);
+ } while (next != LabelBase::INVALID_OFFSET);
+ }
+ label->bind(dest.getOffset());
+}
+
+void
+AssemblerMIPSShared::bindLater(Label* label, wasm::TrapDesc target)
+{
+ if (label->used()) {
+ int32_t next;
+
+ BufferOffset b(label);
+ do {
+ Instruction* inst = editSrc(b);
+
+ append(wasm::TrapSite(target, b.getOffset()));
+ next = inst[1].encode();
+ inst[1].makeNop();
+
+ b = BufferOffset(next);
+ } while (next != LabelBase::INVALID_OFFSET);
+ }
+ label->reset();
+}
+
+void
+AssemblerMIPSShared::retarget(Label* label, Label* target)
+{
+ if (label->used() && !oom()) {
+ if (target->bound()) {
+ bind(label, BufferOffset(target));
+ } else if (target->used()) {
+ // The target is not bound but used. Prepend label's branch list
+ // onto target's.
+ int32_t next;
+ BufferOffset labelBranchOffset(label);
+
+ // Find the head of the use chain for label.
+ do {
+ Instruction* inst = editSrc(labelBranchOffset);
+
+ // Second word holds a pointer to the next branch in chain.
+ next = inst[1].encode();
+ labelBranchOffset = BufferOffset(next);
+ } while (next != LabelBase::INVALID_OFFSET);
+
+ // Then patch the head of label's use chain to the tail of
+ // target's use chain, prepending the entire use chain of target.
+ Instruction* inst = editSrc(labelBranchOffset);
+ int32_t prev = target->use(label->offset());
+ inst[1].setData(prev);
+ } else {
+ // The target is unbound and unused. We can just take the head of
+ // the list hanging off of label, and dump that into target.
+ DebugOnly<uint32_t> prev = target->use(label->offset());
+ MOZ_ASSERT((int32_t)prev == Label::INVALID_OFFSET);
+ }
+ }
+ label->reset();
+}
+
+void dbg_break() {}
+void
+AssemblerMIPSShared::as_break(uint32_t code)
+{
+ MOZ_ASSERT(code <= MAX_BREAK_CODE);
+ writeInst(op_special | code << FunctionBits | ff_break);
+}
+
+void
+AssemblerMIPSShared::as_sync(uint32_t stype)
+{
+ MOZ_ASSERT(stype <= 31);
+ writeInst(InstReg(op_special, zero, zero, zero, stype, ff_sync).encode());
+}
+
+// This just stomps over memory with 32 bits of raw data. Its purpose is to
+// overwrite the call of JITed code with 32 bits worth of an offset. This will
+// is only meant to function on code that has been invalidated, so it should
+// be totally safe. Since that instruction will never be executed again, a
+// ICache flush should not be necessary
+void
+AssemblerMIPSShared::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm)
+{
+ // Raw is going to be the return address.
+ uint32_t* raw = (uint32_t*)label.raw();
+ // Overwrite the 4 bytes before the return address, which will
+ // end up being the call instruction.
+ *(raw - 1) = imm.value;
+}
+
+uint8_t*
+AssemblerMIPSShared::NextInstruction(uint8_t* inst_, uint32_t* count)
+{
+ Instruction* inst = reinterpret_cast<Instruction*>(inst_);
+ if (count != nullptr)
+ *count += sizeof(Instruction);
+ return reinterpret_cast<uint8_t*>(inst->next());
+}
+
+// Since there are no pools in MIPS implementation, this should be simple.
+Instruction*
+Instruction::next()
+{
+ return this + 1;
+}
+
+InstImm AssemblerMIPSShared::invertBranch(InstImm branch, BOffImm16 skipOffset)
+{
+ uint32_t rt = 0;
+ Opcode op = (Opcode) (branch.extractOpcode() << OpcodeShift);
+ switch(op) {
+ case op_beq:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bne);
+ return branch;
+ case op_bne:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_beq);
+ return branch;
+ case op_bgtz:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_blez);
+ return branch;
+ case op_blez:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bgtz);
+ return branch;
+ case op_regimm:
+ branch.setBOffImm16(skipOffset);
+ rt = branch.extractRT();
+ if (rt == (rt_bltz >> RTShift)) {
+ branch.setRT(rt_bgez);
+ return branch;
+ }
+ if (rt == (rt_bgez >> RTShift)) {
+ branch.setRT(rt_bltz);
+ return branch;
+ }
+
+ MOZ_CRASH("Error creating long branch.");
+
+ case op_cop1:
+ MOZ_ASSERT(branch.extractRS() == rs_bc1 >> RSShift);
+
+ branch.setBOffImm16(skipOffset);
+ rt = branch.extractRT();
+ if (rt & 0x1)
+ branch.setRT((RTField) ((rt & ~0x1) << RTShift));
+ else
+ branch.setRT((RTField) ((rt | 0x1) << RTShift));
+ return branch;
+ default:
+ MOZ_CRASH("Error creating long branch.");
+ }
+}
+
+void
+AssemblerMIPSShared::ToggleToJmp(CodeLocationLabel inst_)
+{
+ InstImm * inst = (InstImm*)inst_.raw();
+
+ MOZ_ASSERT(inst->extractOpcode() == ((uint32_t)op_andi >> OpcodeShift));
+ // We converted beq to andi, so now we restore it.
+ inst->setOpcode(op_beq);
+
+ AutoFlushICache::flush(uintptr_t(inst), 4);
+}
+
+void
+AssemblerMIPSShared::ToggleToCmp(CodeLocationLabel inst_)
+{
+ InstImm * inst = (InstImm*)inst_.raw();
+
+ // toggledJump is allways used for short jumps.
+ MOZ_ASSERT(inst->extractOpcode() == ((uint32_t)op_beq >> OpcodeShift));
+ // Replace "beq $zero, $zero, offset" with "andi $zero, $zero, offset"
+ inst->setOpcode(op_andi);
+
+ AutoFlushICache::flush(uintptr_t(inst), 4);
+}
+
diff --git a/js/src/jit/mips-shared/Assembler-mips-shared.h b/js/src/jit/mips-shared/Assembler-mips-shared.h
new file mode 100644
index 000000000..a619fa0e0
--- /dev/null
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.h
@@ -0,0 +1,1522 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_Assembler_mips_shared_h
+#define jit_mips_shared_Assembler_mips_shared_h
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/CompactBuffer.h"
+#include "jit/IonCode.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitSpewer.h"
+#include "jit/mips-shared/Architecture-mips-shared.h"
+#include "jit/shared/Assembler-shared.h"
+#include "jit/shared/IonAssemblerBuffer.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register zero = { Registers::zero };
+static constexpr Register at = { Registers::at };
+static constexpr Register v0 = { Registers::v0 };
+static constexpr Register v1 = { Registers::v1 };
+static constexpr Register a0 = { Registers::a0 };
+static constexpr Register a1 = { Registers::a1 };
+static constexpr Register a2 = { Registers::a2 };
+static constexpr Register a3 = { Registers::a3 };
+static constexpr Register a4 = { Registers::ta0 };
+static constexpr Register a5 = { Registers::ta1 };
+static constexpr Register a6 = { Registers::ta2 };
+static constexpr Register a7 = { Registers::ta3 };
+static constexpr Register t0 = { Registers::t0 };
+static constexpr Register t1 = { Registers::t1 };
+static constexpr Register t2 = { Registers::t2 };
+static constexpr Register t3 = { Registers::t3 };
+static constexpr Register t4 = { Registers::ta0 };
+static constexpr Register t5 = { Registers::ta1 };
+static constexpr Register t6 = { Registers::ta2 };
+static constexpr Register t7 = { Registers::ta3 };
+static constexpr Register s0 = { Registers::s0 };
+static constexpr Register s1 = { Registers::s1 };
+static constexpr Register s2 = { Registers::s2 };
+static constexpr Register s3 = { Registers::s3 };
+static constexpr Register s4 = { Registers::s4 };
+static constexpr Register s5 = { Registers::s5 };
+static constexpr Register s6 = { Registers::s6 };
+static constexpr Register s7 = { Registers::s7 };
+static constexpr Register t8 = { Registers::t8 };
+static constexpr Register t9 = { Registers::t9 };
+static constexpr Register k0 = { Registers::k0 };
+static constexpr Register k1 = { Registers::k1 };
+static constexpr Register gp = { Registers::gp };
+static constexpr Register sp = { Registers::sp };
+static constexpr Register fp = { Registers::fp };
+static constexpr Register ra = { Registers::ra };
+
+static constexpr Register ScratchRegister = at;
+static constexpr Register SecondScratchReg = t8;
+
+// Helper classes for ScratchRegister usage. Asserts that only one piece
+// of code thinks it has exclusive ownership of each scratch register.
+struct ScratchRegisterScope : public AutoRegisterScope
+{
+ explicit ScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, ScratchRegister)
+ { }
+};
+struct SecondScratchRegisterScope : public AutoRegisterScope
+{
+ explicit SecondScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, SecondScratchReg)
+ { }
+};
+
+// Use arg reg from EnterJIT function as OsrFrameReg.
+static constexpr Register OsrFrameReg = a3;
+static constexpr Register ArgumentsRectifierReg = s3;
+static constexpr Register CallTempReg0 = t0;
+static constexpr Register CallTempReg1 = t1;
+static constexpr Register CallTempReg2 = t2;
+static constexpr Register CallTempReg3 = t3;
+
+static constexpr Register IntArgReg0 = a0;
+static constexpr Register IntArgReg1 = a1;
+static constexpr Register IntArgReg2 = a2;
+static constexpr Register IntArgReg3 = a3;
+static constexpr Register IntArgReg4 = a4;
+static constexpr Register IntArgReg5 = a5;
+static constexpr Register IntArgReg6 = a6;
+static constexpr Register IntArgReg7 = a7;
+static constexpr Register GlobalReg = s6; // used by Odin
+static constexpr Register HeapReg = s7; // used by Odin
+
+static constexpr Register PreBarrierReg = a1;
+
+static constexpr Register InvalidReg = { Registers::invalid_reg };
+static constexpr FloatRegister InvalidFloatReg;
+
+static constexpr Register StackPointer = sp;
+static constexpr Register FramePointer = InvalidReg;
+static constexpr Register ReturnReg = v0;
+static constexpr FloatRegister ReturnSimd128Reg = InvalidFloatReg;
+static constexpr FloatRegister ScratchSimd128Reg = InvalidFloatReg;
+
+// A bias applied to the GlobalReg to allow the use of instructions with small
+// negative immediate offsets which doubles the range of global data that can be
+// accessed with a single instruction.
+static const int32_t WasmGlobalRegBias = 32768;
+
+// Registers used in the GenerateFFIIonExit Enable Activation block.
+static constexpr Register WasmIonExitRegCallee = t0;
+static constexpr Register WasmIonExitRegE0 = a0;
+static constexpr Register WasmIonExitRegE1 = a1;
+
+// Registers used in the GenerateFFIIonExit Disable Activation block.
+// None of these may be the second scratch register (t8).
+static constexpr Register WasmIonExitRegD0 = a0;
+static constexpr Register WasmIonExitRegD1 = a1;
+static constexpr Register WasmIonExitRegD2 = t0;
+
+// Registerd used in RegExpMatcher instruction (do not use JSReturnOperand).
+static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpMatcherStringReg = CallTempReg1;
+static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
+
+// Registerd used in RegExpTester instruction (do not use ReturnReg).
+static constexpr Register RegExpTesterRegExpReg = CallTempReg0;
+static constexpr Register RegExpTesterStringReg = CallTempReg1;
+static constexpr Register RegExpTesterLastIndexReg = CallTempReg2;
+
+static constexpr uint32_t CodeAlignment = 4;
+
+// This boolean indicates whether we support SIMD instructions flavoured for
+// this architecture or not. Rather than a method in the LIRGenerator, it is
+// here such that it is accessible from the entire codebase. Once full support
+// for SIMD is reached on all tier-1 platforms, this constant can be deleted.
+static constexpr bool SupportsSimd = false;
+
+// MIPS instruction types
+// +---------------------------------------------------------------+
+// | 6 | 5 | 5 | 5 | 5 | 6 |
+// +---------------------------------------------------------------+
+// Register type | Opcode | Rs | Rt | Rd | Sa | Function |
+// +---------------------------------------------------------------+
+// | 6 | 5 | 5 | 16 |
+// +---------------------------------------------------------------+
+// Immediate type | Opcode | Rs | Rt | 2's complement constant |
+// +---------------------------------------------------------------+
+// | 6 | 26 |
+// +---------------------------------------------------------------+
+// Jump type | Opcode | jump_target |
+// +---------------------------------------------------------------+
+// 31 bit bit 0
+
+// MIPS instruction encoding constants.
+static const uint32_t OpcodeShift = 26;
+static const uint32_t OpcodeBits = 6;
+static const uint32_t RSShift = 21;
+static const uint32_t RSBits = 5;
+static const uint32_t RTShift = 16;
+static const uint32_t RTBits = 5;
+static const uint32_t RDShift = 11;
+static const uint32_t RDBits = 5;
+static const uint32_t RZShift = 0;
+static const uint32_t RZBits = 5;
+static const uint32_t SAShift = 6;
+static const uint32_t SABits = 5;
+static const uint32_t FunctionShift = 0;
+static const uint32_t FunctionBits = 6;
+static const uint32_t Imm16Shift = 0;
+static const uint32_t Imm16Bits = 16;
+static const uint32_t Imm26Shift = 0;
+static const uint32_t Imm26Bits = 26;
+static const uint32_t Imm28Shift = 0;
+static const uint32_t Imm28Bits = 28;
+static const uint32_t ImmFieldShift = 2;
+static const uint32_t FRBits = 5;
+static const uint32_t FRShift = 21;
+static const uint32_t FSShift = 11;
+static const uint32_t FSBits = 5;
+static const uint32_t FTShift = 16;
+static const uint32_t FTBits = 5;
+static const uint32_t FDShift = 6;
+static const uint32_t FDBits = 5;
+static const uint32_t FCccShift = 8;
+static const uint32_t FCccBits = 3;
+static const uint32_t FBccShift = 18;
+static const uint32_t FBccBits = 3;
+static const uint32_t FBtrueShift = 16;
+static const uint32_t FBtrueBits = 1;
+static const uint32_t FccMask = 0x7;
+static const uint32_t FccShift = 2;
+
+
+// MIPS instruction field bit masks.
+static const uint32_t OpcodeMask = ((1 << OpcodeBits) - 1) << OpcodeShift;
+static const uint32_t Imm16Mask = ((1 << Imm16Bits) - 1) << Imm16Shift;
+static const uint32_t Imm26Mask = ((1 << Imm26Bits) - 1) << Imm26Shift;
+static const uint32_t Imm28Mask = ((1 << Imm28Bits) - 1) << Imm28Shift;
+static const uint32_t RSMask = ((1 << RSBits) - 1) << RSShift;
+static const uint32_t RTMask = ((1 << RTBits) - 1) << RTShift;
+static const uint32_t RDMask = ((1 << RDBits) - 1) << RDShift;
+static const uint32_t SAMask = ((1 << SABits) - 1) << SAShift;
+static const uint32_t FunctionMask = ((1 << FunctionBits) - 1) << FunctionShift;
+static const uint32_t RegMask = Registers::Total - 1;
+
+static const uint32_t BREAK_STACK_UNALIGNED = 1;
+static const uint32_t MAX_BREAK_CODE = 1024 - 1;
+
+class Instruction;
+class InstReg;
+class InstImm;
+class InstJump;
+
+uint32_t RS(Register r);
+uint32_t RT(Register r);
+uint32_t RT(uint32_t regCode);
+uint32_t RT(FloatRegister r);
+uint32_t RD(Register r);
+uint32_t RD(FloatRegister r);
+uint32_t RD(uint32_t regCode);
+uint32_t RZ(Register r);
+uint32_t RZ(FloatRegister r);
+uint32_t SA(uint32_t value);
+uint32_t SA(FloatRegister r);
+
+Register toRS (Instruction& i);
+Register toRT (Instruction& i);
+Register toRD (Instruction& i);
+Register toR (Instruction& i);
+
+// MIPS enums for instruction fields
+enum Opcode {
+ op_special = 0 << OpcodeShift,
+ op_regimm = 1 << OpcodeShift,
+
+ op_j = 2 << OpcodeShift,
+ op_jal = 3 << OpcodeShift,
+ op_beq = 4 << OpcodeShift,
+ op_bne = 5 << OpcodeShift,
+ op_blez = 6 << OpcodeShift,
+ op_bgtz = 7 << OpcodeShift,
+
+ op_addi = 8 << OpcodeShift,
+ op_addiu = 9 << OpcodeShift,
+ op_slti = 10 << OpcodeShift,
+ op_sltiu = 11 << OpcodeShift,
+ op_andi = 12 << OpcodeShift,
+ op_ori = 13 << OpcodeShift,
+ op_xori = 14 << OpcodeShift,
+ op_lui = 15 << OpcodeShift,
+
+ op_cop1 = 17 << OpcodeShift,
+ op_cop1x = 19 << OpcodeShift,
+
+ op_beql = 20 << OpcodeShift,
+ op_bnel = 21 << OpcodeShift,
+ op_blezl = 22 << OpcodeShift,
+ op_bgtzl = 23 << OpcodeShift,
+
+ op_daddi = 24 << OpcodeShift,
+ op_daddiu = 25 << OpcodeShift,
+
+ op_ldl = 26 << OpcodeShift,
+ op_ldr = 27 << OpcodeShift,
+
+ op_special2 = 28 << OpcodeShift,
+ op_special3 = 31 << OpcodeShift,
+
+ op_lb = 32 << OpcodeShift,
+ op_lh = 33 << OpcodeShift,
+ op_lwl = 34 << OpcodeShift,
+ op_lw = 35 << OpcodeShift,
+ op_lbu = 36 << OpcodeShift,
+ op_lhu = 37 << OpcodeShift,
+ op_lwr = 38 << OpcodeShift,
+ op_lwu = 39 << OpcodeShift,
+ op_sb = 40 << OpcodeShift,
+ op_sh = 41 << OpcodeShift,
+ op_swl = 42 << OpcodeShift,
+ op_sw = 43 << OpcodeShift,
+ op_sdl = 44 << OpcodeShift,
+ op_sdr = 45 << OpcodeShift,
+ op_swr = 46 << OpcodeShift,
+
+ op_ll = 48 << OpcodeShift,
+ op_lwc1 = 49 << OpcodeShift,
+ op_lwc2 = 50 << OpcodeShift,
+ op_ldc1 = 53 << OpcodeShift,
+ op_ldc2 = 54 << OpcodeShift,
+ op_ld = 55 << OpcodeShift,
+
+ op_sc = 56 << OpcodeShift,
+ op_swc1 = 57 << OpcodeShift,
+ op_swc2 = 58 << OpcodeShift,
+ op_sdc1 = 61 << OpcodeShift,
+ op_sdc2 = 62 << OpcodeShift,
+ op_sd = 63 << OpcodeShift,
+};
+
+enum RSField {
+ rs_zero = 0 << RSShift,
+ // cop1 encoding of RS field.
+ rs_mfc1 = 0 << RSShift,
+ rs_one = 1 << RSShift,
+ rs_dmfc1 = 1 << RSShift,
+ rs_cfc1 = 2 << RSShift,
+ rs_mfhc1 = 3 << RSShift,
+ rs_mtc1 = 4 << RSShift,
+ rs_dmtc1 = 5 << RSShift,
+ rs_ctc1 = 6 << RSShift,
+ rs_mthc1 = 7 << RSShift,
+ rs_bc1 = 8 << RSShift,
+ rs_s = 16 << RSShift,
+ rs_d = 17 << RSShift,
+ rs_w = 20 << RSShift,
+ rs_l = 21 << RSShift,
+ rs_ps = 22 << RSShift
+};
+
+enum RTField {
+ rt_zero = 0 << RTShift,
+ // regimm encoding of RT field.
+ rt_bltz = 0 << RTShift,
+ rt_bgez = 1 << RTShift,
+ rt_bltzal = 16 << RTShift,
+ rt_bgezal = 17 << RTShift
+};
+
+enum FunctionField {
+ // special encoding of function field.
+ ff_sll = 0,
+ ff_movci = 1,
+ ff_srl = 2,
+ ff_sra = 3,
+ ff_sllv = 4,
+ ff_srlv = 6,
+ ff_srav = 7,
+
+ ff_jr = 8,
+ ff_jalr = 9,
+ ff_movz = 10,
+ ff_movn = 11,
+ ff_break = 13,
+ ff_sync = 15,
+
+ ff_mfhi = 16,
+ ff_mflo = 18,
+
+ ff_dsllv = 20,
+ ff_dsrlv = 22,
+ ff_dsrav = 23,
+
+ ff_mult = 24,
+ ff_multu = 25,
+ ff_div = 26,
+ ff_divu = 27,
+ ff_dmult = 28,
+ ff_dmultu = 29,
+ ff_ddiv = 30,
+ ff_ddivu = 31,
+
+ ff_add = 32,
+ ff_addu = 33,
+ ff_sub = 34,
+ ff_subu = 35,
+ ff_and = 36,
+ ff_or = 37,
+ ff_xor = 38,
+ ff_nor = 39,
+
+ ff_slt = 42,
+ ff_sltu = 43,
+ ff_dadd = 44,
+ ff_daddu = 45,
+ ff_dsub = 46,
+ ff_dsubu = 47,
+
+ ff_tge = 48,
+ ff_tgeu = 49,
+ ff_tlt = 50,
+ ff_tltu = 51,
+ ff_teq = 52,
+ ff_tne = 54,
+ ff_dsll = 56,
+ ff_dsrl = 58,
+ ff_dsra = 59,
+ ff_dsll32 = 60,
+ ff_dsrl32 = 62,
+ ff_dsra32 = 63,
+
+ // special2 encoding of function field.
+ ff_mul = 2,
+ ff_clz = 32,
+ ff_clo = 33,
+ ff_dclz = 36,
+
+ // special3 encoding of function field.
+ ff_ext = 0,
+ ff_dextm = 1,
+ ff_dextu = 2,
+ ff_dext = 3,
+ ff_ins = 4,
+ ff_dinsm = 5,
+ ff_dinsu = 6,
+ ff_dins = 7,
+ ff_bshfl = 32,
+
+ // cop1 encoding of function field.
+ ff_add_fmt = 0,
+ ff_sub_fmt = 1,
+ ff_mul_fmt = 2,
+ ff_div_fmt = 3,
+ ff_sqrt_fmt = 4,
+ ff_abs_fmt = 5,
+ ff_mov_fmt = 6,
+ ff_neg_fmt = 7,
+
+ ff_round_l_fmt = 8,
+ ff_trunc_l_fmt = 9,
+ ff_ceil_l_fmt = 10,
+ ff_floor_l_fmt = 11,
+
+ ff_round_w_fmt = 12,
+ ff_trunc_w_fmt = 13,
+ ff_ceil_w_fmt = 14,
+ ff_floor_w_fmt = 15,
+
+ ff_movf_fmt = 17,
+ ff_movz_fmt = 18,
+ ff_movn_fmt = 19,
+
+ ff_cvt_s_fmt = 32,
+ ff_cvt_d_fmt = 33,
+ ff_cvt_w_fmt = 36,
+ ff_cvt_l_fmt = 37,
+ ff_cvt_ps_s = 38,
+
+ ff_c_f_fmt = 48,
+ ff_c_un_fmt = 49,
+ ff_c_eq_fmt = 50,
+ ff_c_ueq_fmt = 51,
+ ff_c_olt_fmt = 52,
+ ff_c_ult_fmt = 53,
+ ff_c_ole_fmt = 54,
+ ff_c_ule_fmt = 55,
+
+ ff_madd_s = 32,
+ ff_madd_d = 33,
+
+ // Loongson encoding of function field.
+ ff_gsxbx = 0,
+ ff_gsxhx = 1,
+ ff_gsxwx = 2,
+ ff_gsxdx = 3,
+ ff_gsxwlc1 = 4,
+ ff_gsxwrc1 = 5,
+ ff_gsxdlc1 = 6,
+ ff_gsxdrc1 = 7,
+ ff_gsxwxc1 = 6,
+ ff_gsxdxc1 = 7,
+ ff_gsxq = 0x20,
+ ff_gsxqc1 = 0x8020,
+
+ ff_null = 0
+};
+
+class Operand;
+
+// A BOffImm16 is a 16 bit immediate that is used for branches.
+class BOffImm16
+{
+ uint32_t data;
+
+ public:
+ uint32_t encode() {
+ MOZ_ASSERT(!isInvalid());
+ return data;
+ }
+ int32_t decode() {
+ MOZ_ASSERT(!isInvalid());
+ return (int32_t(data << 18) >> 16) + 4;
+ }
+
+ explicit BOffImm16(int offset)
+ : data ((offset - 4) >> 2 & Imm16Mask)
+ {
+ MOZ_ASSERT((offset & 0x3) == 0);
+ MOZ_ASSERT(IsInRange(offset));
+ }
+ static bool IsInRange(int offset) {
+ if ((offset - 4) < int(unsigned(INT16_MIN) << 2))
+ return false;
+ if ((offset - 4) > (INT16_MAX << 2))
+ return false;
+ return true;
+ }
+ static const uint32_t INVALID = 0x00020000;
+ BOffImm16()
+ : data(INVALID)
+ { }
+
+ bool isInvalid() {
+ return data == INVALID;
+ }
+ Instruction* getDest(Instruction* src) const;
+
+ BOffImm16(InstImm inst);
+};
+
+// A JOffImm26 is a 26 bit immediate that is used for unconditional jumps.
+class JOffImm26
+{
+ uint32_t data;
+
+ public:
+ uint32_t encode() {
+ MOZ_ASSERT(!isInvalid());
+ return data;
+ }
+ int32_t decode() {
+ MOZ_ASSERT(!isInvalid());
+ return (int32_t(data << 8) >> 6) + 4;
+ }
+
+ explicit JOffImm26(int offset)
+ : data ((offset - 4) >> 2 & Imm26Mask)
+ {
+ MOZ_ASSERT((offset & 0x3) == 0);
+ MOZ_ASSERT(IsInRange(offset));
+ }
+ static bool IsInRange(int offset) {
+ if ((offset - 4) < -536870912)
+ return false;
+ if ((offset - 4) > 536870908)
+ return false;
+ return true;
+ }
+ static const uint32_t INVALID = 0x20000000;
+ JOffImm26()
+ : data(INVALID)
+ { }
+
+ bool isInvalid() {
+ return data == INVALID;
+ }
+ Instruction* getDest(Instruction* src);
+
+};
+
+class Imm16
+{
+ uint16_t value;
+
+ public:
+ Imm16();
+ Imm16(uint32_t imm)
+ : value(imm)
+ { }
+ uint32_t encode() {
+ return value;
+ }
+ int32_t decodeSigned() {
+ return value;
+ }
+ uint32_t decodeUnsigned() {
+ return value;
+ }
+ static bool IsInSignedRange(int32_t imm) {
+ return imm >= INT16_MIN && imm <= INT16_MAX;
+ }
+ static bool IsInUnsignedRange(uint32_t imm) {
+ return imm <= UINT16_MAX ;
+ }
+ static Imm16 Lower (Imm32 imm) {
+ return Imm16(imm.value & 0xffff);
+ }
+ static Imm16 Upper (Imm32 imm) {
+ return Imm16((imm.value >> 16) & 0xffff);
+ }
+};
+
+class Imm8
+{
+ uint8_t value;
+
+ public:
+ Imm8();
+ Imm8(uint32_t imm)
+ : value(imm)
+ { }
+ uint32_t encode(uint32_t shift) {
+ return value << shift;
+ }
+ int32_t decodeSigned() {
+ return value;
+ }
+ uint32_t decodeUnsigned() {
+ return value;
+ }
+ static bool IsInSignedRange(int32_t imm) {
+ return imm >= INT8_MIN && imm <= INT8_MAX;
+ }
+ static bool IsInUnsignedRange(uint32_t imm) {
+ return imm <= UINT8_MAX ;
+ }
+ static Imm8 Lower (Imm16 imm) {
+ return Imm8(imm.decodeSigned() & 0xff);
+ }
+ static Imm8 Upper (Imm16 imm) {
+ return Imm8((imm.decodeSigned() >> 8) & 0xff);
+ }
+};
+
+class GSImm13
+{
+ uint16_t value;
+
+ public:
+ GSImm13();
+ GSImm13(uint32_t imm)
+ : value(imm & ~0xf)
+ { }
+ uint32_t encode(uint32_t shift) {
+ return ((value >> 4) & 0x1f) << shift;
+ }
+ int32_t decodeSigned() {
+ return value;
+ }
+ uint32_t decodeUnsigned() {
+ return value;
+ }
+ static bool IsInRange(int32_t imm) {
+ return imm >= int32_t(uint32_t(-256) << 4) && imm <= (255 << 4);
+ }
+};
+
+class Operand
+{
+ public:
+ enum Tag {
+ REG,
+ FREG,
+ MEM
+ };
+
+ private:
+ Tag tag : 3;
+ uint32_t reg : 5;
+ int32_t offset;
+
+ public:
+ Operand (Register reg_)
+ : tag(REG), reg(reg_.code())
+ { }
+
+ Operand (FloatRegister freg)
+ : tag(FREG), reg(freg.code())
+ { }
+
+ Operand (Register base, Imm32 off)
+ : tag(MEM), reg(base.code()), offset(off.value)
+ { }
+
+ Operand (Register base, int32_t off)
+ : tag(MEM), reg(base.code()), offset(off)
+ { }
+
+ Operand (const Address& addr)
+ : tag(MEM), reg(addr.base.code()), offset(addr.offset)
+ { }
+
+ Tag getTag() const {
+ return tag;
+ }
+
+ Register toReg() const {
+ MOZ_ASSERT(tag == REG);
+ return Register::FromCode(reg);
+ }
+
+ FloatRegister toFReg() const {
+ MOZ_ASSERT(tag == FREG);
+ return FloatRegister::FromCode(reg);
+ }
+
+ void toAddr(Register* r, Imm32* dest) const {
+ MOZ_ASSERT(tag == MEM);
+ *r = Register::FromCode(reg);
+ *dest = Imm32(offset);
+ }
+ Address toAddress() const {
+ MOZ_ASSERT(tag == MEM);
+ return Address(Register::FromCode(reg), offset);
+ }
+ int32_t disp() const {
+ MOZ_ASSERT(tag == MEM);
+ return offset;
+ }
+
+ int32_t base() const {
+ MOZ_ASSERT(tag == MEM);
+ return reg;
+ }
+ Register baseReg() const {
+ MOZ_ASSERT(tag == MEM);
+ return Register::FromCode(reg);
+ }
+};
+
+inline Imm32
+Imm64::firstHalf() const
+{
+ return low();
+}
+
+inline Imm32
+Imm64::secondHalf() const
+{
+ return hi();
+}
+
+void
+PatchJump(CodeLocationJump& jump_, CodeLocationLabel label,
+ ReprotectCode reprotect = DontReprotect);
+
+void
+PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target);
+
+typedef js::jit::AssemblerBuffer<1024, Instruction> MIPSBuffer;
+
+class MIPSBufferWithExecutableCopy : public MIPSBuffer
+{
+ public:
+ void executableCopy(uint8_t* buffer) {
+ if (this->oom())
+ return;
+
+ for (Slice* cur = head; cur != nullptr; cur = cur->getNext()) {
+ memcpy(buffer, &cur->instructions, cur->length());
+ buffer += cur->length();
+ }
+ }
+
+ bool appendBuffer(const MIPSBufferWithExecutableCopy& other) {
+ if (this->oom())
+ return false;
+
+ for (Slice* cur = other.head; cur != nullptr; cur = cur->getNext()) {
+ this->putBytes(cur->length(), &cur->instructions);
+ if (this->oom())
+ return false;
+ }
+ return true;
+ }
+};
+
+class AssemblerMIPSShared : public AssemblerShared
+{
+ public:
+
+ enum Condition {
+ Equal,
+ NotEqual,
+ Above,
+ AboveOrEqual,
+ Below,
+ BelowOrEqual,
+ GreaterThan,
+ GreaterThanOrEqual,
+ LessThan,
+ LessThanOrEqual,
+ Overflow,
+ CarrySet,
+ CarryClear,
+ Signed,
+ NotSigned,
+ Zero,
+ NonZero,
+ Always,
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleOrdered,
+ DoubleEqual,
+ DoubleNotEqual,
+ DoubleGreaterThan,
+ DoubleGreaterThanOrEqual,
+ DoubleLessThan,
+ DoubleLessThanOrEqual,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleUnordered,
+ DoubleEqualOrUnordered,
+ DoubleNotEqualOrUnordered,
+ DoubleGreaterThanOrUnordered,
+ DoubleGreaterThanOrEqualOrUnordered,
+ DoubleLessThanOrUnordered,
+ DoubleLessThanOrEqualOrUnordered
+ };
+
+ enum FPConditionBit {
+ FCC0 = 0,
+ FCC1,
+ FCC2,
+ FCC3,
+ FCC4,
+ FCC5,
+ FCC6,
+ FCC7
+ };
+
+ enum FPControl {
+ FIR = 0,
+ UFR,
+ UNFR = 4,
+ FCCR = 25,
+ FEXR,
+ FENR = 28,
+ FCSR = 31
+ };
+
+ enum FloatFormat {
+ SingleFloat,
+ DoubleFloat
+ };
+
+ enum JumpOrCall {
+ BranchIsJump,
+ BranchIsCall
+ };
+
+ enum FloatTestKind {
+ TestForTrue,
+ TestForFalse
+ };
+
+ // :( this should be protected, but since CodeGenerator
+ // wants to use it, It needs to go out here :(
+
+ BufferOffset nextOffset() {
+ return m_buffer.nextOffset();
+ }
+
+ protected:
+ Instruction * editSrc (BufferOffset bo) {
+ return m_buffer.getInst(bo);
+ }
+ public:
+ uint32_t actualIndex(uint32_t) const;
+ static uint8_t* PatchableJumpAddress(JitCode* code, uint32_t index);
+ protected:
+ // structure for fixing up pc-relative loads/jumps when a the machine code
+ // gets moved (executable copy, gc, etc.)
+ struct RelativePatch
+ {
+ // the offset within the code buffer where the value is loaded that
+ // we want to fix-up
+ BufferOffset offset;
+ void* target;
+ Relocation::Kind kind;
+
+ RelativePatch(BufferOffset offset, void* target, Relocation::Kind kind)
+ : offset(offset),
+ target(target),
+ kind(kind)
+ { }
+ };
+
+ js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
+ js::Vector<uint32_t, 8, SystemAllocPolicy> longJumps_;
+
+ CompactBufferWriter jumpRelocations_;
+ CompactBufferWriter dataRelocations_;
+ CompactBufferWriter preBarriers_;
+
+ MIPSBufferWithExecutableCopy m_buffer;
+
+ public:
+ AssemblerMIPSShared()
+ : m_buffer(),
+ isFinished(false)
+ { }
+
+ static Condition InvertCondition(Condition cond);
+ static DoubleCondition InvertCondition(DoubleCondition cond);
+
+ void writeRelocation(BufferOffset src) {
+ jumpRelocations_.writeUnsigned(src.getOffset());
+ }
+
+ // As opposed to x86/x64 version, the data relocation has to be executed
+ // before to recover the pointer, and not after.
+ void writeDataRelocation(ImmGCPtr ptr) {
+ if (ptr.value) {
+ if (gc::IsInsideNursery(ptr.value))
+ embedsNurseryPointers_ = true;
+ dataRelocations_.writeUnsigned(nextOffset().getOffset());
+ }
+ }
+ void writePrebarrierOffset(CodeOffset label) {
+ preBarriers_.writeUnsigned(label.offset());
+ }
+
+ public:
+ bool oom() const;
+
+ void setPrinter(Sprinter* sp) {
+ }
+
+ static const Register getStackPointer() {
+ return StackPointer;
+ }
+
+ protected:
+ bool isFinished;
+ public:
+ void finish();
+ bool asmMergeWith(const AssemblerMIPSShared& other);
+ void executableCopy(void* buffer);
+ void copyJumpRelocationTable(uint8_t* dest);
+ void copyDataRelocationTable(uint8_t* dest);
+ void copyPreBarrierTable(uint8_t* dest);
+
+ // Size of the instruction stream, in bytes.
+ size_t size() const;
+ // Size of the jump relocation table, in bytes.
+ size_t jumpRelocationTableBytes() const;
+ size_t dataRelocationTableBytes() const;
+ size_t preBarrierTableBytes() const;
+
+ // Size of the data table, in bytes.
+ size_t bytesNeeded() const;
+
+ // Write a blob of binary into the instruction stream *OR*
+ // into a destination address. If dest is nullptr (the default), then the
+ // instruction gets written into the instruction stream. If dest is not null
+ // it is interpreted as a pointer to the location that we want the
+ // instruction to be written.
+ BufferOffset writeInst(uint32_t x, uint32_t* dest = nullptr);
+ // A static variant for the cases where we don't want to have an assembler
+ // object at all. Normally, you would use the dummy (nullptr) object.
+ static void WriteInstStatic(uint32_t x, uint32_t* dest);
+
+ public:
+ BufferOffset haltingAlign(int alignment);
+ BufferOffset nopAlign(int alignment);
+ BufferOffset as_nop();
+
+ // Branch and jump instructions
+ BufferOffset as_bal(BOffImm16 off);
+ BufferOffset as_b(BOffImm16 off);
+
+ InstImm getBranchCode(JumpOrCall jumpOrCall);
+ InstImm getBranchCode(Register s, Register t, Condition c);
+ InstImm getBranchCode(Register s, Condition c);
+ InstImm getBranchCode(FloatTestKind testKind, FPConditionBit fcc);
+
+ BufferOffset as_j(JOffImm26 off);
+ BufferOffset as_jal(JOffImm26 off);
+
+ BufferOffset as_jr(Register rs);
+ BufferOffset as_jalr(Register rs);
+
+ // Arithmetic instructions
+ BufferOffset as_addu(Register rd, Register rs, Register rt);
+ BufferOffset as_addiu(Register rd, Register rs, int32_t j);
+ BufferOffset as_daddu(Register rd, Register rs, Register rt);
+ BufferOffset as_daddiu(Register rd, Register rs, int32_t j);
+ BufferOffset as_subu(Register rd, Register rs, Register rt);
+ BufferOffset as_dsubu(Register rd, Register rs, Register rt);
+ BufferOffset as_mult(Register rs, Register rt);
+ BufferOffset as_multu(Register rs, Register rt);
+ BufferOffset as_dmult(Register rs, Register rt);
+ BufferOffset as_dmultu(Register rs, Register rt);
+ BufferOffset as_div(Register rs, Register rt);
+ BufferOffset as_divu(Register rs, Register rt);
+ BufferOffset as_mul(Register rd, Register rs, Register rt);
+ BufferOffset as_ddiv(Register rs, Register rt);
+ BufferOffset as_ddivu(Register rs, Register rt);
+
+ // Logical instructions
+ BufferOffset as_and(Register rd, Register rs, Register rt);
+ BufferOffset as_or(Register rd, Register rs, Register rt);
+ BufferOffset as_xor(Register rd, Register rs, Register rt);
+ BufferOffset as_nor(Register rd, Register rs, Register rt);
+
+ BufferOffset as_andi(Register rd, Register rs, int32_t j);
+ BufferOffset as_ori(Register rd, Register rs, int32_t j);
+ BufferOffset as_xori(Register rd, Register rs, int32_t j);
+ BufferOffset as_lui(Register rd, int32_t j);
+
+ // Shift instructions
+ // as_sll(zero, zero, x) instructions are reserved as nop
+ BufferOffset as_sll(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsll(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsll32(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_sllv(Register rd, Register rt, Register rs);
+ BufferOffset as_dsllv(Register rd, Register rt, Register rs);
+ BufferOffset as_srl(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsrl(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsrl32(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_srlv(Register rd, Register rt, Register rs);
+ BufferOffset as_dsrlv(Register rd, Register rt, Register rs);
+ BufferOffset as_sra(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsra(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsra32(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_srav(Register rd, Register rt, Register rs);
+ BufferOffset as_rotr(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_rotrv(Register rd, Register rt, Register rs);
+ BufferOffset as_dsrav(Register rd, Register rt, Register rs);
+ BufferOffset as_drotr(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_drotr32(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_drotrv(Register rd, Register rt, Register rs);
+
+ // Load and store instructions
+ BufferOffset as_lb(Register rd, Register rs, int16_t off);
+ BufferOffset as_lbu(Register rd, Register rs, int16_t off);
+ BufferOffset as_lh(Register rd, Register rs, int16_t off);
+ BufferOffset as_lhu(Register rd, Register rs, int16_t off);
+ BufferOffset as_lw(Register rd, Register rs, int16_t off);
+ BufferOffset as_lwu(Register rd, Register rs, int16_t off);
+ BufferOffset as_lwl(Register rd, Register rs, int16_t off);
+ BufferOffset as_lwr(Register rd, Register rs, int16_t off);
+ BufferOffset as_ll(Register rd, Register rs, int16_t off);
+ BufferOffset as_ld(Register rd, Register rs, int16_t off);
+ BufferOffset as_ldl(Register rd, Register rs, int16_t off);
+ BufferOffset as_ldr(Register rd, Register rs, int16_t off);
+ BufferOffset as_sb(Register rd, Register rs, int16_t off);
+ BufferOffset as_sh(Register rd, Register rs, int16_t off);
+ BufferOffset as_sw(Register rd, Register rs, int16_t off);
+ BufferOffset as_swl(Register rd, Register rs, int16_t off);
+ BufferOffset as_swr(Register rd, Register rs, int16_t off);
+ BufferOffset as_sc(Register rd, Register rs, int16_t off);
+ BufferOffset as_sd(Register rd, Register rs, int16_t off);
+ BufferOffset as_sdl(Register rd, Register rs, int16_t off);
+ BufferOffset as_sdr(Register rd, Register rs, int16_t off);
+
+ // Loongson-specific load and store instructions
+ BufferOffset as_gslbx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gssbx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gslhx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gsshx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gslwx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gsswx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gsldx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gssdx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gslq(Register rh, Register rl, Register rs, int16_t off);
+ BufferOffset as_gssq(Register rh, Register rl, Register rs, int16_t off);
+
+ // Move from HI/LO register.
+ BufferOffset as_mfhi(Register rd);
+ BufferOffset as_mflo(Register rd);
+
+ // Set on less than.
+ BufferOffset as_slt(Register rd, Register rs, Register rt);
+ BufferOffset as_sltu(Register rd, Register rs, Register rt);
+ BufferOffset as_slti(Register rd, Register rs, int32_t j);
+ BufferOffset as_sltiu(Register rd, Register rs, uint32_t j);
+
+ // Conditional move.
+ BufferOffset as_movz(Register rd, Register rs, Register rt);
+ BufferOffset as_movn(Register rd, Register rs, Register rt);
+ BufferOffset as_movt(Register rd, Register rs, uint16_t cc = 0);
+ BufferOffset as_movf(Register rd, Register rs, uint16_t cc = 0);
+
+ // Bit twiddling.
+ BufferOffset as_clz(Register rd, Register rs);
+ BufferOffset as_dclz(Register rd, Register rs);
+ BufferOffset as_ins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dinsm(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dinsu(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dextm(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dextu(Register rt, Register rs, uint16_t pos, uint16_t size);
+
+ // Sign extend
+ BufferOffset as_seb(Register rd, Register rt);
+ BufferOffset as_seh(Register rd, Register rt);
+
+ // FP instructions
+
+ // Use these two functions only when you are sure address is aligned.
+ // Otherwise, use ma_ld and ma_sd.
+ BufferOffset as_ld(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_sd(FloatRegister fd, Register base, int32_t off);
+
+ BufferOffset as_ls(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_ss(FloatRegister fd, Register base, int32_t off);
+
+ // Loongson-specific FP load and store instructions
+ BufferOffset as_gsldl(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gsldr(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gssdl(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gssdr(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gslsl(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gslsr(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gsssl(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gsssr(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gslsx(FloatRegister fd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gsssx(FloatRegister fd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gsldx(FloatRegister fd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gssdx(FloatRegister fd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gslq(FloatRegister rh, FloatRegister rl, Register rs, int16_t off);
+ BufferOffset as_gssq(FloatRegister rh, FloatRegister rl, Register rs, int16_t off);
+
+ BufferOffset as_movs(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_movd(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_ctc1(Register rt, FPControl fc);
+ BufferOffset as_cfc1(Register rt, FPControl fc);
+
+ BufferOffset as_mtc1(Register rt, FloatRegister fs);
+ BufferOffset as_mfc1(Register rt, FloatRegister fs);
+
+ BufferOffset as_mthc1(Register rt, FloatRegister fs);
+ BufferOffset as_mfhc1(Register rt, FloatRegister fs);
+ BufferOffset as_dmtc1(Register rt, FloatRegister fs);
+ BufferOffset as_dmfc1(Register rt, FloatRegister fs);
+
+ public:
+ // FP convert instructions
+ BufferOffset as_ceilws(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_floorws(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_roundws(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_truncws(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_truncls(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_ceilwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_floorwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_roundwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_truncwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_truncld(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_cvtdl(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtds(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtdw(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtld(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtls(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtsd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtsl(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtsw(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtws(FloatRegister fd, FloatRegister fs);
+
+ // FP arithmetic instructions
+ BufferOffset as_adds(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_addd(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_subs(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_subd(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+
+ BufferOffset as_abss(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_absd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_negs(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_negd(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_muls(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_muld(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_divs(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_divd(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_sqrts(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_sqrtd(FloatRegister fd, FloatRegister fs);
+
+ // FP compare instructions
+ BufferOffset as_cf(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cun(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_ceq(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cueq(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_colt(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cult(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cole(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cule(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+
+ // FP conditional move.
+ BufferOffset as_movt(FloatFormat fmt, FloatRegister fd, FloatRegister fs,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_movf(FloatFormat fmt, FloatRegister fd, FloatRegister fs,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_movz(FloatFormat fmt, FloatRegister fd, FloatRegister fs, Register rt);
+ BufferOffset as_movn(FloatFormat fmt, FloatRegister fd, FloatRegister fs, Register rt);
+
+ // label operations
+ void bind(Label* label, BufferOffset boff = BufferOffset());
+ void bindLater(Label* label, wasm::TrapDesc target);
+ virtual void bind(InstImm* inst, uintptr_t branch, uintptr_t target) = 0;
+ virtual void Bind(uint8_t* rawCode, CodeOffset* label, const void* address) = 0;
+ void bind(CodeOffset* label) {
+ label->bind(currentOffset());
+ }
+ uint32_t currentOffset() {
+ return nextOffset().getOffset();
+ }
+ void retarget(Label* label, Label* target);
+
+ // See Bind
+ size_t labelToPatchOffset(CodeOffset label) { return label.offset(); }
+
+ void call(Label* label);
+ void call(void* target);
+
+ void as_break(uint32_t code);
+ void as_sync(uint32_t stype = 0);
+
+ public:
+ static bool SupportsFloatingPoint() {
+#if (defined(__mips_hard_float) && !defined(__mips_single_float)) || \
+ defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
+ return true;
+#else
+ return false;
+#endif
+ }
+ static bool SupportsUnalignedAccesses() {
+ return true;
+ }
+ static bool SupportsSimd() {
+ return js::jit::SupportsSimd;
+ }
+
+ protected:
+ InstImm invertBranch(InstImm branch, BOffImm16 skipOffset);
+ void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind) {
+ enoughMemory_ &= jumps_.append(RelativePatch(src, target.value, kind));
+ if (kind == Relocation::JITCODE)
+ writeRelocation(src);
+ }
+
+ void addLongJump(BufferOffset src) {
+ enoughMemory_ &= longJumps_.append(src.getOffset());
+ }
+
+ public:
+ size_t numLongJumps() const {
+ return longJumps_.length();
+ }
+ uint32_t longJump(size_t i) {
+ return longJumps_[i];
+ }
+
+ void flushBuffer() {
+ }
+
+ void comment(const char* msg) {
+ // This is not implemented because setPrinter() is not implemented.
+ // TODO spew("; %s", msg);
+ }
+
+ static uint32_t NopSize() { return 4; }
+
+ static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
+
+ static uint32_t AlignDoubleArg(uint32_t offset) {
+ return (offset + 1U) &~ 1U;
+ }
+
+ static uint8_t* NextInstruction(uint8_t* instruction, uint32_t* count = nullptr);
+
+ static void ToggleToJmp(CodeLocationLabel inst_);
+ static void ToggleToCmp(CodeLocationLabel inst_);
+
+ void processCodeLabels(uint8_t* rawCode);
+
+ bool bailed() {
+ return m_buffer.bail();
+ }
+
+ void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
+ const Disassembler::HeapAccess& heapAccess)
+ {
+ // Implement this if we implement a disassembler.
+ }
+}; // AssemblerMIPSShared
+
+// sll zero, zero, 0
+const uint32_t NopInst = 0x00000000;
+
+// An Instruction is a structure for both encoding and decoding any and all
+// MIPS instructions.
+class Instruction
+{
+ protected:
+ uint32_t data;
+
+ // Standard constructor
+ Instruction (uint32_t data_) : data(data_) { }
+
+ // You should never create an instruction directly. You should create a
+ // more specific instruction which will eventually call one of these
+ // constructors for you.
+ public:
+ uint32_t encode() const {
+ return data;
+ }
+
+ void makeNop() {
+ data = NopInst;
+ }
+
+ void setData(uint32_t data) {
+ this->data = data;
+ }
+
+ const Instruction & operator=(const Instruction& src) {
+ data = src.data;
+ return *this;
+ }
+
+ // Extract the one particular bit.
+ uint32_t extractBit(uint32_t bit) {
+ return (encode() >> bit) & 1;
+ }
+ // Extract a bit field out of the instruction
+ uint32_t extractBitField(uint32_t hi, uint32_t lo) {
+ return (encode() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+ // Since all MIPS instructions have opcode, the opcode
+ // extractor resides in the base class.
+ uint32_t extractOpcode() {
+ return extractBitField(OpcodeShift + OpcodeBits - 1, OpcodeShift);
+ }
+ // Return the fields at their original place in the instruction encoding.
+ Opcode OpcodeFieldRaw() const {
+ return static_cast<Opcode>(encode() & OpcodeMask);
+ }
+
+ // Get the next instruction in the instruction stream.
+ // This does neat things like ignoreconstant pools and their guards.
+ Instruction* next();
+
+ // Sometimes, an api wants a uint32_t (or a pointer to it) rather than
+ // an instruction. raw() just coerces this into a pointer to a uint32_t
+ const uint32_t* raw() const { return &data; }
+ uint32_t size() const { return 4; }
+}; // Instruction
+
+// make sure that it is the right size
+static_assert(sizeof(Instruction) == 4, "Size of Instruction class has to be 4 bytes.");
+
+class InstNOP : public Instruction
+{
+ public:
+ InstNOP()
+ : Instruction(NopInst)
+ { }
+
+};
+
+// Class for register type instructions.
+class InstReg : public Instruction
+{
+ public:
+ InstReg(Opcode op, Register rd, FunctionField ff)
+ : Instruction(op | RD(rd) | ff)
+ { }
+ InstReg(Opcode op, Register rs, Register rt, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | ff)
+ { }
+ InstReg(Opcode op, Register rs, Register rt, Register rd, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RD(rd) | ff)
+ { }
+ InstReg(Opcode op, Register rs, Register rt, Register rd, uint32_t sa, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RD(rd) | SA(sa) | ff)
+ { }
+ InstReg(Opcode op, RSField rs, Register rt, Register rd, uint32_t sa, FunctionField ff)
+ : Instruction(op | rs | RT(rt) | RD(rd) | SA(sa) | ff)
+ { }
+ InstReg(Opcode op, Register rs, RTField rt, Register rd, uint32_t sa, FunctionField ff)
+ : Instruction(op | RS(rs) | rt | RD(rd) | SA(sa) | ff)
+ { }
+ InstReg(Opcode op, Register rs, uint32_t cc, Register rd, uint32_t sa, FunctionField ff)
+ : Instruction(op | RS(rs) | cc | RD(rd) | SA(sa) | ff)
+ { }
+ InstReg(Opcode op, uint32_t code, FunctionField ff)
+ : Instruction(op | code | ff)
+ { }
+ // for float point
+ InstReg(Opcode op, RSField rs, Register rt, FloatRegister rd)
+ : Instruction(op | rs | RT(rt) | RD(rd))
+ { }
+ InstReg(Opcode op, RSField rs, Register rt, FloatRegister rd, uint32_t sa, FunctionField ff)
+ : Instruction(op | rs | RT(rt) | RD(rd) | SA(sa) | ff)
+ { }
+ InstReg(Opcode op, RSField rs, Register rt, FloatRegister fs, FloatRegister fd, FunctionField ff)
+ : Instruction(op | rs | RT(rt) | RD(fs) | SA(fd) | ff)
+ { }
+ InstReg(Opcode op, RSField rs, FloatRegister ft, FloatRegister fs, FloatRegister fd, FunctionField ff)
+ : Instruction(op | rs | RT(ft) | RD(fs) | SA(fd) | ff)
+ { }
+ InstReg(Opcode op, RSField rs, FloatRegister ft, FloatRegister fd, uint32_t sa, FunctionField ff)
+ : Instruction(op | rs | RT(ft) | RD(fd) | SA(sa) | ff)
+ { }
+
+ uint32_t extractRS () {
+ return extractBitField(RSShift + RSBits - 1, RSShift);
+ }
+ uint32_t extractRT () {
+ return extractBitField(RTShift + RTBits - 1, RTShift);
+ }
+ uint32_t extractRD () {
+ return extractBitField(RDShift + RDBits - 1, RDShift);
+ }
+ uint32_t extractSA () {
+ return extractBitField(SAShift + SABits - 1, SAShift);
+ }
+ uint32_t extractFunctionField () {
+ return extractBitField(FunctionShift + FunctionBits - 1, FunctionShift);
+ }
+};
+
+// Class for branch, load and store instructions with immediate offset.
+class InstImm : public Instruction
+{
+ public:
+ void extractImm16(BOffImm16* dest);
+
+ InstImm(Opcode op, Register rs, Register rt, BOffImm16 off)
+ : Instruction(op | RS(rs) | RT(rt) | off.encode())
+ { }
+ InstImm(Opcode op, Register rs, RTField rt, BOffImm16 off)
+ : Instruction(op | RS(rs) | rt | off.encode())
+ { }
+ InstImm(Opcode op, RSField rs, uint32_t cc, BOffImm16 off)
+ : Instruction(op | rs | cc | off.encode())
+ { }
+ InstImm(Opcode op, Register rs, Register rt, Imm16 off)
+ : Instruction(op | RS(rs) | RT(rt) | off.encode())
+ { }
+ InstImm(uint32_t raw)
+ : Instruction(raw)
+ { }
+ // For floating-point loads and stores.
+ InstImm(Opcode op, Register rs, FloatRegister rt, Imm16 off)
+ : Instruction(op | RS(rs) | RT(rt) | off.encode())
+ { }
+
+ uint32_t extractOpcode() {
+ return extractBitField(OpcodeShift + OpcodeBits - 1, OpcodeShift);
+ }
+ void setOpcode(Opcode op) {
+ data = (data & ~OpcodeMask) | op;
+ }
+ uint32_t extractRS() {
+ return extractBitField(RSShift + RSBits - 1, RSShift);
+ }
+ uint32_t extractRT() {
+ return extractBitField(RTShift + RTBits - 1, RTShift);
+ }
+ void setRT(RTField rt) {
+ data = (data & ~RTMask) | rt;
+ }
+ uint32_t extractImm16Value() {
+ return extractBitField(Imm16Shift + Imm16Bits - 1, Imm16Shift);
+ }
+ void setBOffImm16(BOffImm16 off) {
+ // Reset immediate field and replace it
+ data = (data & ~Imm16Mask) | off.encode();
+ }
+ void setImm16(Imm16 off) {
+ // Reset immediate field and replace it
+ data = (data & ~Imm16Mask) | off.encode();
+ }
+};
+
+// Class for Jump type instructions.
+class InstJump : public Instruction
+{
+ public:
+ InstJump(Opcode op, JOffImm26 off)
+ : Instruction(op | off.encode())
+ { }
+
+ uint32_t extractImm26Value() {
+ return extractBitField(Imm26Shift + Imm26Bits - 1, Imm26Shift);
+ }
+};
+
+// Class for Loongson-specific instructions
+class InstGS : public Instruction
+{
+ public:
+ // For indexed loads and stores.
+ InstGS(Opcode op, Register rs, Register rt, Register rd, Imm8 off, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RD(rd) | off.encode(3) | ff)
+ { }
+ InstGS(Opcode op, Register rs, FloatRegister rt, Register rd, Imm8 off, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RD(rd) | off.encode(3) | ff)
+ { }
+ // For quad-word loads and stores.
+ InstGS(Opcode op, Register rs, Register rt, Register rz, GSImm13 off, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RZ(rz) | off.encode(6) | ff)
+ { }
+ InstGS(Opcode op, Register rs, FloatRegister rt, FloatRegister rz, GSImm13 off, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RZ(rz) | off.encode(6) | ff)
+ { }
+ InstGS(uint32_t raw)
+ : Instruction(raw)
+ { }
+ // For floating-point unaligned loads and stores.
+ InstGS(Opcode op, Register rs, FloatRegister rt, Imm8 off, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | off.encode(6) | ff)
+ { }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_Assembler_mips_shared_h */
diff --git a/js/src/jit/mips-shared/AtomicOperations-mips-shared.h b/js/src/jit/mips-shared/AtomicOperations-mips-shared.h
new file mode 100644
index 000000000..31e221ab2
--- /dev/null
+++ b/js/src/jit/mips-shared/AtomicOperations-mips-shared.h
@@ -0,0 +1,241 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* For documentation, see jit/AtomicOperations.h */
+
+#ifndef jit_mips_shared_AtomicOperations_mips_shared_h
+#define jit_mips_shared_AtomicOperations_mips_shared_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+#if defined(__clang__) || defined(__GNUC__)
+
+// The default implementation tactic for gcc/clang is to use the newer
+// __atomic intrinsics added for use in C++11 <atomic>. Where that
+// isn't available, we use GCC's older __sync functions instead.
+//
+// ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS is kept as a backward
+// compatible option for older compilers: enable this to use GCC's old
+// __sync functions instead of the newer __atomic functions. This
+// will be required for GCC 4.6.x and earlier, and probably for Clang
+// 3.1, should we need to use those versions.
+
+//#define ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+
+inline bool
+js::jit::AtomicOperations::isLockfree8()
+{
+# ifndef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
+# if _MIPS_SIM == _ABI64
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0));
+# endif
+ return true;
+# else
+ return false;
+# endif
+}
+
+inline void
+js::jit::AtomicOperations::fenceSeqCst()
+{
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+# else
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::loadSeqCst(T* addr)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+ T v = *addr;
+ __sync_synchronize();
+# else
+ T v;
+ __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
+# endif
+ return v;
+}
+
+template<typename T>
+inline void
+js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+ *addr = val;
+ __sync_synchronize();
+# else
+ __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_val_compare_and_swap(addr, oldval, newval);
+# else
+ __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return oldval;
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_add(addr, val);
+# else
+ return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_sub(addr, val);
+# else
+ return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_and(addr, val);
+# else
+ return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_or(addr, val);
+# else
+ return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_xor(addr, val);
+# else
+ return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::loadSafeWhenRacy(T* addr)
+{
+ return *addr; // FIXME (1208663): not yet safe
+}
+
+template<typename T>
+inline void
+js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val)
+{
+ *addr = val; // FIXME (1208663): not yet safe
+}
+
+inline void
+js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src, size_t nbytes)
+{
+ ::memcpy(dest, src, nbytes); // FIXME (1208663): not yet safe
+}
+
+inline void
+js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes)
+{
+ ::memmove(dest, src, nbytes); // FIXME (1208663): not yet safe
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ T v;
+ __sync_synchronize();
+ do {
+ v = *addr;
+ } while (__sync_val_compare_and_swap(addr, v, val) != v);
+ return v;
+# else
+ T v;
+ __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
+ return v;
+# endif
+}
+
+template<size_t nbytes>
+inline void
+js::jit::RegionLock::acquire(void* addr)
+{
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ while (!__sync_bool_compare_and_swap(&spinlock, 0, 1))
+ ;
+# else
+ uint32_t zero = 0;
+ uint32_t one = 1;
+ while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
+ zero = 0;
+ continue;
+ }
+# endif
+}
+
+template<size_t nbytes>
+inline void
+js::jit::RegionLock::release(void* addr)
+{
+ MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_sub_and_fetch(&spinlock, 1);
+# else
+ uint32_t zero = 0;
+ __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
+# endif
+}
+
+# undef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+
+#elif defined(ENABLE_SHARED_ARRAY_BUFFER)
+
+# error "Either disable JS shared memory at compile time, use GCC or Clang, or add code here"
+
+#endif
+
+#endif // jit_mips_shared_AtomicOperations_mips_shared_h
diff --git a/js/src/jit/mips-shared/Bailouts-mips-shared.cpp b/js/src/jit/mips-shared/Bailouts-mips-shared.cpp
new file mode 100644
index 000000000..7d8c2a76d
--- /dev/null
+++ b/js/src/jit/mips-shared/Bailouts-mips-shared.cpp
@@ -0,0 +1,24 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Bailouts.h"
+
+using namespace js;
+using namespace js::jit;
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ InvalidationBailoutStack* bailout)
+ : machine_(bailout->machine())
+{
+ framePointer_ = (uint8_t*) bailout->fp();
+ topFrameSize_ = framePointer_ - bailout->sp();
+ topIonScript_ = bailout->ionScript();
+ attachOnJitActivation(activations);
+
+ uint8_t* returnAddressToFp_ = bailout->osiPointReturnAddress();
+ const OsiIndex* osiIndex = topIonScript_->getOsiIndex(returnAddressToFp_);
+ snapshotOffset_ = osiIndex->snapshotOffset();
+}
diff --git a/js/src/jit/mips-shared/BaselineCompiler-mips-shared.cpp b/js/src/jit/mips-shared/BaselineCompiler-mips-shared.cpp
new file mode 100644
index 000000000..b8d8017a2
--- /dev/null
+++ b/js/src/jit/mips-shared/BaselineCompiler-mips-shared.cpp
@@ -0,0 +1,16 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/BaselineCompiler-mips-shared.h"
+
+using namespace js;
+using namespace js::jit;
+
+BaselineCompilerMIPSShared::BaselineCompilerMIPSShared(JSContext* cx, TempAllocator& alloc,
+ JSScript* script)
+ : BaselineCompilerShared(cx, alloc, script)
+{
+}
diff --git a/js/src/jit/mips-shared/BaselineCompiler-mips-shared.h b/js/src/jit/mips-shared/BaselineCompiler-mips-shared.h
new file mode 100644
index 000000000..43f32f997
--- /dev/null
+++ b/js/src/jit/mips-shared/BaselineCompiler-mips-shared.h
@@ -0,0 +1,24 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_BaselineCompiler_mips_shared_h
+#define jit_mips_shared_BaselineCompiler_mips_shared_h
+
+#include "jit/shared/BaselineCompiler-shared.h"
+
+namespace js {
+namespace jit {
+
+class BaselineCompilerMIPSShared : public BaselineCompilerShared
+{
+ protected:
+ BaselineCompilerMIPSShared(JSContext* cx, TempAllocator& alloc, JSScript* script);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_BaselineCompiler_mips_shared_h */
diff --git a/js/src/jit/mips-shared/BaselineIC-mips-shared.cpp b/js/src/jit/mips-shared/BaselineIC-mips-shared.cpp
new file mode 100644
index 000000000..dc4fcab1a
--- /dev/null
+++ b/js/src/jit/mips-shared/BaselineIC-mips-shared.cpp
@@ -0,0 +1,39 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineIC.h"
+#include "jit/SharedICHelpers.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+bool
+ICCompare_Double::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure, isNaN;
+ masm.ensureDouble(R0, FloatReg0, &failure);
+ masm.ensureDouble(R1, FloatReg1, &failure);
+
+ Register dest = R0.scratchReg();
+
+ Assembler::DoubleCondition doubleCond = JSOpToDoubleCondition(op);
+
+ masm.ma_cmp_set_double(dest, FloatReg0, FloatReg1, doubleCond);
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
new file mode 100644
index 000000000..f3c776f42
--- /dev/null
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -0,0 +1,2931 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/CodeGenerator-mips-shared.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+#include "jsnum.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "js/Conversions.h"
+#include "vm/Shape.h"
+#include "vm/TraceLogging.h"
+
+#include "jsscriptinlines.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::DebugOnly;
+using mozilla::FloorLog2;
+using mozilla::NegativeInfinity;
+using JS::GenericNaN;
+using JS::ToInt32;
+
+// shared
+CodeGeneratorMIPSShared::CodeGeneratorMIPSShared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorShared(gen, graph, masm)
+{
+}
+
+Operand
+CodeGeneratorMIPSShared::ToOperand(const LAllocation& a)
+{
+ if (a.isGeneralReg())
+ return Operand(a.toGeneralReg()->reg());
+ if (a.isFloatReg())
+ return Operand(a.toFloatReg()->reg());
+ return Operand(masm.getStackPointer(), ToStackOffset(&a));
+}
+
+Operand
+CodeGeneratorMIPSShared::ToOperand(const LAllocation* a)
+{
+ return ToOperand(*a);
+}
+
+Operand
+CodeGeneratorMIPSShared::ToOperand(const LDefinition* def)
+{
+ return ToOperand(def->output());
+}
+
+#ifdef JS_PUNBOX64
+Operand
+CodeGeneratorMIPSShared::ToOperandOrRegister64(const LInt64Allocation input)
+{
+ return ToOperand(input.value());
+}
+#else
+Register64
+CodeGeneratorMIPSShared::ToOperandOrRegister64(const LInt64Allocation input)
+{
+ return ToRegister64(input);
+}
+#endif
+
+void
+CodeGeneratorMIPSShared::branchToBlock(Assembler::FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
+ MBasicBlock* mir, Assembler::DoubleCondition cond)
+{
+ // Skip past trivial blocks.
+ mir = skipTrivialBlocks(mir);
+
+ Label* label = mir->lir()->label();
+ if (Label* oolEntry = labelForBackedgeWithImplicitCheck(mir)) {
+ // Note: the backedge is initially a jump to the next instruction.
+ // It will be patched to the target block's label during link().
+ RepatchLabel rejoin;
+
+ CodeOffsetJump backedge;
+ Label skip;
+ if (fmt == Assembler::DoubleFloat)
+ masm.ma_bc1d(lhs, rhs, &skip, Assembler::InvertCondition(cond), ShortJump);
+ else
+ masm.ma_bc1s(lhs, rhs, &skip, Assembler::InvertCondition(cond), ShortJump);
+
+ backedge = masm.backedgeJump(&rejoin);
+ masm.bind(&rejoin);
+ masm.bind(&skip);
+
+ if (!patchableBackedges_.append(PatchableBackedgeInfo(backedge, label, oolEntry)))
+ MOZ_CRASH();
+ } else {
+ if (fmt == Assembler::DoubleFloat)
+ masm.branchDouble(cond, lhs, rhs, mir->lir()->label());
+ else
+ masm.branchFloat(cond, lhs, rhs, mir->lir()->label());
+ }
+}
+
+void
+OutOfLineBailout::accept(CodeGeneratorMIPSShared* codegen)
+{
+ codegen->visitOutOfLineBailout(this);
+}
+
+void
+CodeGeneratorMIPSShared::visitTestIAndBranch(LTestIAndBranch* test)
+{
+ const LAllocation* opd = test->getOperand(0);
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ emitBranch(ToRegister(opd), Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
+}
+
+void
+CodeGeneratorMIPSShared::visitCompare(LCompare* comp)
+{
+ MCompare* mir = comp->mir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
+ const LAllocation* left = comp->getOperand(0);
+ const LAllocation* right = comp->getOperand(1);
+ const LDefinition* def = comp->getDef(0);
+
+#ifdef JS_CODEGEN_MIPS64
+ if (mir->compareType() == MCompare::Compare_Object) {
+ if (right->isGeneralReg())
+ masm.cmpPtrSet(cond, ToRegister(left), ToRegister(right), ToRegister(def));
+ else
+ masm.cmpPtrSet(cond, ToRegister(left), ToAddress(right), ToRegister(def));
+ return;
+ }
+#endif
+
+ if (right->isConstant())
+ masm.cmp32Set(cond, ToRegister(left), Imm32(ToInt32(right)), ToRegister(def));
+ else if (right->isGeneralReg())
+ masm.cmp32Set(cond, ToRegister(left), ToRegister(right), ToRegister(def));
+ else
+ masm.cmp32Set(cond, ToRegister(left), ToAddress(right), ToRegister(def));
+}
+
+void
+CodeGeneratorMIPSShared::visitCompareAndBranch(LCompareAndBranch* comp)
+{
+ MCompare* mir = comp->cmpMir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
+
+#ifdef JS_CODEGEN_MIPS64
+ if (mir->compareType() == MCompare::Compare_Object) {
+ if (comp->right()->isGeneralReg()) {
+ emitBranch(ToRegister(comp->left()), ToRegister(comp->right()), cond,
+ comp->ifTrue(), comp->ifFalse());
+ } else {
+ masm.loadPtr(ToAddress(comp->right()), ScratchRegister);
+ emitBranch(ToRegister(comp->left()), ScratchRegister, cond,
+ comp->ifTrue(), comp->ifFalse());
+ }
+ return;
+ }
+#endif
+
+ if (comp->right()->isConstant()) {
+ emitBranch(ToRegister(comp->left()), Imm32(ToInt32(comp->right())), cond,
+ comp->ifTrue(), comp->ifFalse());
+ } else if (comp->right()->isGeneralReg()) {
+ emitBranch(ToRegister(comp->left()), ToRegister(comp->right()), cond,
+ comp->ifTrue(), comp->ifFalse());
+ } else {
+ masm.load32(ToAddress(comp->right()), ScratchRegister);
+ emitBranch(ToRegister(comp->left()), ScratchRegister, cond,
+ comp->ifTrue(), comp->ifFalse());
+ }
+}
+
+bool
+CodeGeneratorMIPSShared::generateOutOfLineCode()
+{
+ if (!CodeGeneratorShared::generateOutOfLineCode())
+ return false;
+
+ if (deoptLabel_.used()) {
+ // All non-table-based bailouts will go here.
+ masm.bind(&deoptLabel_);
+
+ // Push the frame size, so the handler can recover the IonScript.
+ // Frame size is stored in 'ra' and pushed by GenerateBailoutThunk
+ // We have to use 'ra' because generateBailoutTable will implicitly do
+ // the same.
+ masm.move32(Imm32(frameSize()), ra);
+
+ JitCode* handler = gen->jitRuntime()->getGenericBailoutHandler();
+
+ masm.branch(handler);
+ }
+
+ return !masm.oom();
+}
+
+void
+CodeGeneratorMIPSShared::bailoutFrom(Label* label, LSnapshot* snapshot)
+{
+ if (masm.bailed())
+ return;
+
+ MOZ_ASSERT_IF(!masm.oom(), label->used());
+ MOZ_ASSERT_IF(!masm.oom(), !label->bound());
+
+ encode(snapshot);
+
+ // Though the assembler doesn't track all frame pushes, at least make sure
+ // the known value makes sense. We can't use bailout tables if the stack
+ // isn't properly aligned to the static frame size.
+ MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None(),
+ frameClass_.frameSize() == masm.framePushed());
+
+ // We don't use table bailouts because retargeting is easier this way.
+ InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
+ OutOfLineBailout* ool = new(alloc()) OutOfLineBailout(snapshot, masm.framePushed());
+ addOutOfLineCode(ool, new(alloc()) BytecodeSite(tree, tree->script()->code()));
+
+ masm.retarget(label, ool->entry());
+}
+
+void
+CodeGeneratorMIPSShared::bailout(LSnapshot* snapshot)
+{
+ Label label;
+ masm.jump(&label);
+ bailoutFrom(&label, snapshot);
+}
+
+void
+CodeGeneratorMIPSShared::visitMinMaxD(LMinMaxD* ins)
+{
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax())
+ masm.maxDouble(second, first, true);
+ else
+ masm.minDouble(second, first, true);
+}
+
+void
+CodeGeneratorMIPSShared::visitMinMaxF(LMinMaxF* ins)
+{
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax())
+ masm.maxFloat32(second, first, true);
+ else
+ masm.minFloat32(second, first, true);
+}
+
+void
+CodeGeneratorMIPSShared::visitAbsD(LAbsD* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ MOZ_ASSERT(input == ToFloatRegister(ins->output()));
+ masm.as_absd(input, input);
+}
+
+void
+CodeGeneratorMIPSShared::visitAbsF(LAbsF* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ MOZ_ASSERT(input == ToFloatRegister(ins->output()));
+ masm.as_abss(input, input);
+}
+
+void
+CodeGeneratorMIPSShared::visitSqrtD(LSqrtD* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ masm.as_sqrtd(output, input);
+}
+
+void
+CodeGeneratorMIPSShared::visitSqrtF(LSqrtF* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ masm.as_sqrts(output, input);
+}
+
+void
+CodeGeneratorMIPSShared::visitAddI(LAddI* ins)
+{
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
+
+ // If there is no snapshot, we don't need to check for overflow
+ if (!ins->snapshot()) {
+ if (rhs->isConstant())
+ masm.ma_addu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ else
+ masm.as_addu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ return;
+ }
+
+ Label overflow;
+ if (rhs->isConstant())
+ masm.ma_addTestOverflow(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)), &overflow);
+ else
+ masm.ma_addTestOverflow(ToRegister(dest), ToRegister(lhs), ToRegister(rhs), &overflow);
+
+ bailoutFrom(&overflow, ins->snapshot());
+}
+
+void
+CodeGeneratorMIPSShared::visitAddI64(LAddI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void
+CodeGeneratorMIPSShared::visitSubI(LSubI* ins)
+{
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
+
+ // If there is no snapshot, we don't need to check for overflow
+ if (!ins->snapshot()) {
+ if (rhs->isConstant())
+ masm.ma_subu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ else
+ masm.as_subu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ return;
+ }
+
+ Label overflow;
+ if (rhs->isConstant())
+ masm.ma_subTestOverflow(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)), &overflow);
+ else
+ masm.ma_subTestOverflow(ToRegister(dest), ToRegister(lhs), ToRegister(rhs), &overflow);
+
+ bailoutFrom(&overflow, ins->snapshot());
+}
+
+void
+CodeGeneratorMIPSShared::visitSubI64(LSubI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LSubI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.sub64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void
+CodeGeneratorMIPSShared::visitMulI(LMulI* ins)
+{
+ const LAllocation* lhs = ins->lhs();
+ const LAllocation* rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+ MMul* mul = ins->mir();
+
+ MOZ_ASSERT_IF(mul->mode() == MMul::Integer, !mul->canBeNegativeZero() && !mul->canOverflow());
+
+ if (rhs->isConstant()) {
+ int32_t constant = ToInt32(rhs);
+ Register src = ToRegister(lhs);
+
+ // Bailout on -0.0
+ if (mul->canBeNegativeZero() && constant <= 0) {
+ Assembler::Condition cond = (constant == 0) ? Assembler::LessThan : Assembler::Equal;
+ bailoutCmp32(cond, src, Imm32(0), ins->snapshot());
+ }
+
+ switch (constant) {
+ case -1:
+ if (mul->canOverflow())
+ bailoutCmp32(Assembler::Equal, src, Imm32(INT32_MIN), ins->snapshot());
+
+ masm.ma_negu(dest, src);
+ break;
+ case 0:
+ masm.move32(Imm32(0), dest);
+ break;
+ case 1:
+ masm.move32(src, dest);
+ break;
+ case 2:
+ if (mul->canOverflow()) {
+ Label mulTwoOverflow;
+ masm.ma_addTestOverflow(dest, src, src, &mulTwoOverflow);
+
+ bailoutFrom(&mulTwoOverflow, ins->snapshot());
+ } else {
+ masm.as_addu(dest, src, src);
+ }
+ break;
+ default:
+ uint32_t shift = FloorLog2(constant);
+
+ if (!mul->canOverflow() && (constant > 0)) {
+ // If it cannot overflow, we can do lots of optimizations.
+ uint32_t rest = constant - (1 << shift);
+
+ // See if the constant has one bit set, meaning it can be
+ // encoded as a bitshift.
+ if ((1 << shift) == constant) {
+ masm.ma_sll(dest, src, Imm32(shift));
+ return;
+ }
+
+ // If the constant cannot be encoded as (1<<C1), see if it can
+ // be encoded as (1<<C1) | (1<<C2), which can be computed
+ // using an add and a shift.
+ uint32_t shift_rest = FloorLog2(rest);
+ if (src != dest && (1u << shift_rest) == rest) {
+ masm.ma_sll(dest, src, Imm32(shift - shift_rest));
+ masm.add32(src, dest);
+ if (shift_rest != 0)
+ masm.ma_sll(dest, dest, Imm32(shift_rest));
+ return;
+ }
+ }
+
+ if (mul->canOverflow() && (constant > 0) && (src != dest)) {
+ // To stay on the safe side, only optimize things that are a
+ // power of 2.
+
+ if ((1 << shift) == constant) {
+ // dest = lhs * pow(2, shift)
+ masm.ma_sll(dest, src, Imm32(shift));
+ // At runtime, check (lhs == dest >> shift), if this does
+ // not hold, some bits were lost due to overflow, and the
+ // computation should be resumed as a double.
+ masm.ma_sra(ScratchRegister, dest, Imm32(shift));
+ bailoutCmp32(Assembler::NotEqual, src, ScratchRegister, ins->snapshot());
+ return;
+ }
+ }
+
+ if (mul->canOverflow()) {
+ Label mulConstOverflow;
+ masm.ma_mul_branch_overflow(dest, ToRegister(lhs), Imm32(ToInt32(rhs)),
+ &mulConstOverflow);
+
+ bailoutFrom(&mulConstOverflow, ins->snapshot());
+ } else {
+ masm.ma_mul(dest, src, Imm32(ToInt32(rhs)));
+ }
+ break;
+ }
+ } else {
+ Label multRegOverflow;
+
+ if (mul->canOverflow()) {
+ masm.ma_mul_branch_overflow(dest, ToRegister(lhs), ToRegister(rhs), &multRegOverflow);
+ bailoutFrom(&multRegOverflow, ins->snapshot());
+ } else {
+ masm.as_mul(dest, ToRegister(lhs), ToRegister(rhs));
+ }
+
+ if (mul->canBeNegativeZero()) {
+ Label done;
+ masm.ma_b(dest, dest, &done, Assembler::NonZero, ShortJump);
+
+ // Result is -0 if lhs or rhs is negative.
+ // In that case result must be double value so bailout
+ Register scratch = SecondScratchReg;
+ masm.as_or(scratch, ToRegister(lhs), ToRegister(rhs));
+ bailoutCmp32(Assembler::Signed, scratch, scratch, ins->snapshot());
+
+ masm.bind(&done);
+ }
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitMulI64(LMulI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
+
+ MOZ_ASSERT(ToRegister64(lhs) == ToOutRegister64(lir));
+
+ if (IsConstant(rhs)) {
+ int64_t constant = ToInt64(rhs);
+ switch (constant) {
+ case -1:
+ masm.neg64(ToRegister64(lhs));
+ return;
+ case 0:
+ masm.xor64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ case 1:
+ // nop
+ return;
+ case 2:
+ masm.add64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ default:
+ if (constant > 0) {
+ // Use shift if constant is power of 2.
+ int32_t shift = mozilla::FloorLog2(constant);
+ if (int64_t(1) << shift == constant) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ return;
+ }
+ }
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
+ }
+ } else {
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(ToOperandOrRegister64(rhs), ToRegister64(lhs), temp);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitDivI(LDivI* ins)
+{
+ // Extract the registers from this instruction
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register dest = ToRegister(ins->output());
+ Register temp = ToRegister(ins->getTemp(0));
+ MDiv* mir = ins->mir();
+
+ Label done;
+
+ // Handle divide by zero.
+ if (mir->canBeDivideByZero()) {
+ if (mir->trapOnError()) {
+ masm.ma_b(rhs, rhs, trap(mir, wasm::Trap::IntegerDivideByZero), Assembler::Zero);
+ } else if (mir->canTruncateInfinities()) {
+ // Truncated division by zero is zero (Infinity|0 == 0)
+ Label notzero;
+ masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&notzero);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Zero, rhs, rhs, ins->snapshot());
+ }
+ }
+
+ // Handle an integer overflow exception from -2147483648 / -1.
+ if (mir->canBeNegativeOverflow()) {
+ Label notMinInt;
+ masm.move32(Imm32(INT32_MIN), temp);
+ masm.ma_b(lhs, temp, &notMinInt, Assembler::NotEqual, ShortJump);
+
+ masm.move32(Imm32(-1), temp);
+ if (mir->trapOnError()) {
+ masm.ma_b(rhs, temp, trap(mir, wasm::Trap::IntegerOverflow), Assembler::Equal);
+ } else if (mir->canTruncateOverflow()) {
+ // (-INT32_MIN)|0 == INT32_MIN
+ Label skip;
+ masm.ma_b(rhs, temp, &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(INT32_MIN), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, temp, ins->snapshot());
+ }
+ masm.bind(&notMinInt);
+ }
+
+ // Handle negative 0. (0/-Y)
+ if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
+ Label nonzero;
+ masm.ma_b(lhs, lhs, &nonzero, Assembler::NonZero, ShortJump);
+ bailoutCmp32(Assembler::LessThan, rhs, Imm32(0), ins->snapshot());
+ masm.bind(&nonzero);
+ }
+ // Note: above safety checks could not be verified as Ion seems to be
+ // smarter and requires double arithmetic in such cases.
+
+ // All regular. Lets call div.
+ if (mir->canTruncateRemainder()) {
+ masm.as_div(lhs, rhs);
+ masm.as_mflo(dest);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+
+ Label remainderNonZero;
+ masm.ma_div_branch_overflow(dest, lhs, rhs, &remainderNonZero);
+ bailoutFrom(&remainderNonZero, ins->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPSShared::visitDivPowTwoI(LDivPowTwoI* ins)
+{
+ Register lhs = ToRegister(ins->numerator());
+ Register dest = ToRegister(ins->output());
+ Register tmp = ToRegister(ins->getTemp(0));
+ int32_t shift = ins->shift();
+
+ if (shift != 0) {
+ MDiv* mir = ins->mir();
+ if (!mir->isTruncated()) {
+ // If the remainder is going to be != 0, bailout since this must
+ // be a double.
+ masm.ma_sll(tmp, lhs, Imm32(32 - shift));
+ bailoutCmp32(Assembler::NonZero, tmp, tmp, ins->snapshot());
+ }
+
+ if (!mir->canBeNegativeDividend()) {
+ // Numerator is unsigned, so needs no adjusting. Do the shift.
+ masm.ma_sra(dest, lhs, Imm32(shift));
+ return;
+ }
+
+ // Adjust the value so that shifting produces a correctly rounded result
+ // when the numerator is negative. See 10-1 "Signed Division by a Known
+ // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
+ if (shift > 1) {
+ masm.ma_sra(tmp, lhs, Imm32(31));
+ masm.ma_srl(tmp, tmp, Imm32(32 - shift));
+ masm.add32(lhs, tmp);
+ } else {
+ masm.ma_srl(tmp, lhs, Imm32(32 - shift));
+ masm.add32(lhs, tmp);
+ }
+
+ // Do the shift.
+ masm.ma_sra(dest, tmp, Imm32(shift));
+ } else {
+ masm.move32(lhs, dest);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitModI(LModI* ins)
+{
+ // Extract the registers from this instruction
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register dest = ToRegister(ins->output());
+ Register callTemp = ToRegister(ins->callTemp());
+ MMod* mir = ins->mir();
+ Label done, prevent;
+
+ masm.move32(lhs, callTemp);
+
+ // Prevent INT_MIN % -1;
+ // The integer division will give INT_MIN, but we want -(double)INT_MIN.
+ if (mir->canBeNegativeDividend()) {
+ masm.ma_b(lhs, Imm32(INT_MIN), &prevent, Assembler::NotEqual, ShortJump);
+ if (mir->isTruncated()) {
+ // (INT_MIN % -1)|0 == 0
+ Label skip;
+ masm.ma_b(rhs, Imm32(-1), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(-1), ins->snapshot());
+ }
+ masm.bind(&prevent);
+ }
+
+ // 0/X (with X < 0) is bad because both of these values *should* be
+ // doubles, and the result should be -0.0, which cannot be represented in
+ // integers. X/0 is bad because it will give garbage (or abort), when it
+ // should give either \infty, -\infty or NAN.
+
+ // Prevent 0 / X (with X < 0) and X / 0
+ // testing X / Y. Compare Y with 0.
+ // There are three cases: (Y < 0), (Y == 0) and (Y > 0)
+ // If (Y < 0), then we compare X with 0, and bail if X == 0
+ // If (Y == 0), then we simply want to bail.
+ // if (Y > 0), we don't bail.
+
+ if (mir->canBeDivideByZero()) {
+ if (mir->isTruncated()) {
+ if (mir->trapOnError()) {
+ masm.ma_b(rhs, rhs, trap(mir, wasm::Trap::IntegerDivideByZero), Assembler::Zero);
+ } else {
+ Label skip;
+ masm.ma_b(rhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ }
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
+ }
+ }
+
+ if (mir->canBeNegativeDividend()) {
+ Label notNegative;
+ masm.ma_b(rhs, Imm32(0), &notNegative, Assembler::GreaterThan, ShortJump);
+ if (mir->isTruncated()) {
+ // NaN|0 == 0 and (0 % -X)|0 == 0
+ Label skip;
+ masm.ma_b(lhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, lhs, Imm32(0), ins->snapshot());
+ }
+ masm.bind(&notNegative);
+ }
+
+ masm.as_div(lhs, rhs);
+ masm.as_mfhi(dest);
+
+ // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
+ if (mir->canBeNegativeDividend()) {
+ if (mir->isTruncated()) {
+ // -0.0|0 == 0
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ // See if X < 0
+ masm.ma_b(dest, Imm32(0), &done, Assembler::NotEqual, ShortJump);
+ bailoutCmp32(Assembler::Signed, callTemp, Imm32(0), ins->snapshot());
+ }
+ }
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPSShared::visitModPowTwoI(LModPowTwoI* ins)
+{
+ Register in = ToRegister(ins->getOperand(0));
+ Register out = ToRegister(ins->getDef(0));
+ MMod* mir = ins->mir();
+ Label negative, done;
+
+ masm.move32(in, out);
+ masm.ma_b(in, in, &done, Assembler::Zero, ShortJump);
+ // Switch based on sign of the lhs.
+ // Positive numbers are just a bitmask
+ masm.ma_b(in, in, &negative, Assembler::Signed, ShortJump);
+ {
+ masm.and32(Imm32((1 << ins->shift()) - 1), out);
+ masm.ma_b(&done, ShortJump);
+ }
+
+ // Negative numbers need a negate, bitmask, negate
+ {
+ masm.bind(&negative);
+ masm.neg32(out);
+ masm.and32(Imm32((1 << ins->shift()) - 1), out);
+ masm.neg32(out);
+ }
+ if (mir->canBeNegativeDividend()) {
+ if (!mir->isTruncated()) {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, out, zero, ins->snapshot());
+ } else {
+ // -0|0 == 0
+ }
+ }
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPSShared::visitModMaskI(LModMaskI* ins)
+{
+ Register src = ToRegister(ins->getOperand(0));
+ Register dest = ToRegister(ins->getDef(0));
+ Register tmp0 = ToRegister(ins->getTemp(0));
+ Register tmp1 = ToRegister(ins->getTemp(1));
+ MMod* mir = ins->mir();
+
+ if (!mir->isTruncated() && mir->canBeNegativeDividend()) {
+ MOZ_ASSERT(mir->fallible());
+
+ Label bail;
+ masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), &bail);
+ bailoutFrom(&bail, ins->snapshot());
+ } else {
+ masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), nullptr);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitBitNotI(LBitNotI* ins)
+{
+ const LAllocation* input = ins->getOperand(0);
+ const LDefinition* dest = ins->getDef(0);
+ MOZ_ASSERT(!input->isConstant());
+
+ masm.ma_not(ToRegister(dest), ToRegister(input));
+}
+
+void
+CodeGeneratorMIPSShared::visitBitOpI(LBitOpI* ins)
+{
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+ // all of these bitops should be either imm32's, or integer registers.
+ switch (ins->bitop()) {
+ case JSOP_BITOR:
+ if (rhs->isConstant())
+ masm.ma_or(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ else
+ masm.as_or(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ break;
+ case JSOP_BITXOR:
+ if (rhs->isConstant())
+ masm.ma_xor(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ else
+ masm.as_xor(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ break;
+ case JSOP_BITAND:
+ if (rhs->isConstant())
+ masm.ma_and(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ else
+ masm.as_and(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitBitOpI64(LBitOpI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LBitOpI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LBitOpI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ switch (lir->bitop()) {
+ case JSOP_BITOR:
+ if (IsConstant(rhs))
+ masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ else
+ masm.or64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ break;
+ case JSOP_BITXOR:
+ if (IsConstant(rhs))
+ masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ else
+ masm.xor64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ break;
+ case JSOP_BITAND:
+ if (IsConstant(rhs))
+ masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ else
+ masm.and64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitShiftI(LShiftI* ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ const LAllocation* rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ int32_t shift = ToInt32(rhs) & 0x1F;
+ switch (ins->bitop()) {
+ case JSOP_LSH:
+ if (shift)
+ masm.ma_sll(dest, lhs, Imm32(shift));
+ else
+ masm.move32(lhs, dest);
+ break;
+ case JSOP_RSH:
+ if (shift)
+ masm.ma_sra(dest, lhs, Imm32(shift));
+ else
+ masm.move32(lhs, dest);
+ break;
+ case JSOP_URSH:
+ if (shift) {
+ masm.ma_srl(dest, lhs, Imm32(shift));
+ } else {
+ // x >>> 0 can overflow.
+ if (ins->mir()->toUrsh()->fallible())
+ bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
+ masm.move32(lhs, dest);
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ } else {
+ // The shift amounts should be AND'ed into the 0-31 range
+ masm.ma_and(dest, ToRegister(rhs), Imm32(0x1F));
+
+ switch (ins->bitop()) {
+ case JSOP_LSH:
+ masm.ma_sll(dest, lhs, dest);
+ break;
+ case JSOP_RSH:
+ masm.ma_sra(dest, lhs, dest);
+ break;
+ case JSOP_URSH:
+ masm.ma_srl(dest, lhs, dest);
+ if (ins->mir()->toUrsh()->fallible()) {
+ // x >>> 0 can overflow.
+ bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitShiftI64(LShiftI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LShiftI64::Lhs);
+ LAllocation* rhs = lir->getOperand(LShiftI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (rhs->isConstant()) {
+ int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
+ switch (lir->bitop()) {
+ case JSOP_LSH:
+ if (shift)
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ break;
+ case JSOP_RSH:
+ if (shift)
+ masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs));
+ break;
+ case JSOP_URSH:
+ if (shift)
+ masm.rshift64(Imm32(shift), ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ return;
+ }
+
+ switch (lir->bitop()) {
+ case JSOP_LSH:
+ masm.lshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOP_RSH:
+ masm.rshift64Arithmetic(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOP_URSH:
+ masm.rshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitRotateI64(LRotateI64* lir)
+{
+ MRotate* mir = lir->mir();
+ LAllocation* count = lir->count();
+
+ Register64 input = ToRegister64(lir->input());
+ Register64 output = ToOutRegister64(lir);
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+
+ MOZ_ASSERT(input == output);
+
+ if (count->isConstant()) {
+ int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
+ if (!c)
+ return;
+ if (mir->isLeftRotate())
+ masm.rotateLeft64(Imm32(c), input, output, temp);
+ else
+ masm.rotateRight64(Imm32(c), input, output, temp);
+ } else {
+ if (mir->isLeftRotate())
+ masm.rotateLeft64(ToRegister(count), input, output, temp);
+ else
+ masm.rotateRight64(ToRegister(count), input, output, temp);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitUrshD(LUrshD* ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ Register temp = ToRegister(ins->temp());
+
+ const LAllocation* rhs = ins->rhs();
+ FloatRegister out = ToFloatRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ masm.ma_srl(temp, lhs, Imm32(ToInt32(rhs)));
+ } else {
+ masm.ma_srl(temp, lhs, ToRegister(rhs));
+ }
+
+ masm.convertUInt32ToDouble(temp, out);
+}
+
+void
+CodeGeneratorMIPSShared::visitClzI(LClzI* ins)
+{
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.as_clz(output, input);
+}
+
+void
+CodeGeneratorMIPSShared::visitCtzI(LCtzI* ins)
+{
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.ma_ctz(output, input);
+}
+
+void
+CodeGeneratorMIPSShared::visitPopcntI(LPopcntI* ins)
+{
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+ Register tmp = ToRegister(ins->temp());
+
+ masm.popcnt32(input, output, tmp);
+}
+
+void
+CodeGeneratorMIPSShared::visitPopcntI64(LPopcntI64* ins)
+{
+ Register64 input = ToRegister64(ins->getInt64Operand(0));
+ Register64 output = ToOutRegister64(ins);
+ Register tmp = ToRegister(ins->getTemp(0));
+
+ masm.popcnt64(input, output, tmp);
+}
+
+void
+CodeGeneratorMIPSShared::visitPowHalfD(LPowHalfD* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ Label done, skip;
+
+ // Masm.pow(-Infinity, 0.5) == Infinity.
+ masm.loadConstantDouble(NegativeInfinity<double>(), ScratchDoubleReg);
+ masm.ma_bc1d(input, ScratchDoubleReg, &skip, Assembler::DoubleNotEqualOrUnordered, ShortJump);
+ masm.as_negd(output, ScratchDoubleReg);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&skip);
+ // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
+ // Adding 0 converts any -0 to 0.
+ masm.loadConstantDouble(0.0, ScratchDoubleReg);
+ masm.as_addd(output, input, ScratchDoubleReg);
+ masm.as_sqrtd(output, output);
+
+ masm.bind(&done);
+}
+
+MoveOperand
+CodeGeneratorMIPSShared::toMoveOperand(LAllocation a) const
+{
+ if (a.isGeneralReg())
+ return MoveOperand(ToRegister(a));
+ if (a.isFloatReg()) {
+ return MoveOperand(ToFloatRegister(a));
+ }
+ int32_t offset = ToStackOffset(a);
+ MOZ_ASSERT((offset & 3) == 0);
+
+ return MoveOperand(StackPointer, offset);
+}
+
+void
+CodeGeneratorMIPSShared::visitMathD(LMathD* math)
+{
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOP_ADD:
+ masm.as_addd(output, src1, src2);
+ break;
+ case JSOP_SUB:
+ masm.as_subd(output, src1, src2);
+ break;
+ case JSOP_MUL:
+ masm.as_muld(output, src1, src2);
+ break;
+ case JSOP_DIV:
+ masm.as_divd(output, src1, src2);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitMathF(LMathF* math)
+{
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOP_ADD:
+ masm.as_adds(output, src1, src2);
+ break;
+ case JSOP_SUB:
+ masm.as_subs(output, src1, src2);
+ break;
+ case JSOP_MUL:
+ masm.as_muls(output, src1, src2);
+ break;
+ case JSOP_DIV:
+ masm.as_divs(output, src1, src2);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitFloor(LFloor* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister scratch = ScratchDoubleReg;
+ Register output = ToRegister(lir->output());
+
+ Label skipCheck, done;
+
+ // If Nan, 0 or -0 check for bailout
+ masm.loadConstantDouble(0.0, scratch);
+ masm.ma_bc1d(input, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If high part is not zero, it is NaN or -0, so we bail.
+ masm.moveFromDoubleHi(input, SecondScratchReg);
+ bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot());
+
+ // Input was zero, so return zero.
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&skipCheck);
+ masm.as_floorwd(scratch, input);
+ masm.moveFromDoubleLo(scratch, output);
+
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPSShared::visitFloorF(LFloorF* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister scratch = ScratchFloat32Reg;
+ Register output = ToRegister(lir->output());
+
+ Label skipCheck, done;
+
+ // If Nan, 0 or -0 check for bailout
+ masm.loadConstantFloat32(0.0f, scratch);
+ masm.ma_bc1s(input, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If binary value is not zero, it is NaN or -0, so we bail.
+ masm.moveFromDoubleLo(input, SecondScratchReg);
+ bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot());
+
+ // Input was zero, so return zero.
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&skipCheck);
+ masm.as_floorws(scratch, input);
+ masm.moveFromDoubleLo(scratch, output);
+
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPSShared::visitCeil(LCeil* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister scratch = ScratchDoubleReg;
+ Register output = ToRegister(lir->output());
+
+ Label performCeil, done;
+
+ // If x < -1 or x > 0 then perform ceil.
+ masm.loadConstantDouble(0, scratch);
+ masm.branchDouble(Assembler::DoubleGreaterThan, input, scratch, &performCeil);
+ masm.loadConstantDouble(-1, scratch);
+ masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, scratch, &performCeil);
+
+ // If high part is not zero, the input was not 0, so we bail.
+ masm.moveFromDoubleHi(input, SecondScratchReg);
+ bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot());
+
+ // Input was zero, so return zero.
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&performCeil);
+ masm.as_ceilwd(scratch, input);
+ masm.moveFromDoubleLo(scratch, output);
+
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPSShared::visitCeilF(LCeilF* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister scratch = ScratchFloat32Reg;
+ Register output = ToRegister(lir->output());
+
+ Label performCeil, done;
+
+ // If x < -1 or x > 0 then perform ceil.
+ masm.loadConstantFloat32(0.0f, scratch);
+ masm.branchFloat(Assembler::DoubleGreaterThan, input, scratch, &performCeil);
+ masm.loadConstantFloat32(-1.0f, scratch);
+ masm.branchFloat(Assembler::DoubleLessThanOrEqual, input, scratch, &performCeil);
+
+ // If binary value is not zero, the input was not 0, so we bail.
+ masm.moveFromFloat32(input, SecondScratchReg);
+ bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot());
+
+ // Input was zero, so return zero.
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&performCeil);
+ masm.as_ceilws(scratch, input);
+ masm.moveFromFloat32(scratch, output);
+
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPSShared::visitRound(LRound* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister temp = ToFloatRegister(lir->temp());
+ FloatRegister scratch = ScratchDoubleReg;
+ Register output = ToRegister(lir->output());
+
+ Label bail, negative, end, skipCheck;
+
+ // Load biggest number less than 0.5 in the temp register.
+ masm.loadConstantDouble(GetBiggestNumberLessThan(0.5), temp);
+
+ // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
+ masm.loadConstantDouble(0.0, scratch);
+ masm.ma_bc1d(input, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
+
+ // If Nan, 0 or -0 check for bailout
+ masm.ma_bc1d(input, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If high part is not zero, it is NaN or -0, so we bail.
+ masm.moveFromDoubleHi(input, SecondScratchReg);
+ bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot());
+
+ // Input was zero, so return zero.
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&end, ShortJump);
+
+ masm.bind(&skipCheck);
+ masm.as_addd(scratch, input, temp);
+ masm.as_floorwd(scratch, scratch);
+
+ masm.moveFromDoubleLo(scratch, output);
+
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
+
+ masm.jump(&end);
+
+ // Input is negative, but isn't -0.
+ masm.bind(&negative);
+
+ // Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
+ // be added the biggest double less than 0.5.
+ Label loadJoin;
+ masm.loadConstantDouble(-0.5, scratch);
+ masm.branchDouble(Assembler::DoubleLessThan, input, scratch, &loadJoin);
+ masm.loadConstantDouble(0.5, temp);
+ masm.bind(&loadJoin);
+
+ masm.addDouble(input, temp);
+
+ // If input + 0.5 >= 0, input is a negative number >= -0.5 and the
+ // result is -0.
+ masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, temp, scratch, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ masm.as_floorwd(scratch, temp);
+ masm.moveFromDoubleLo(scratch, output);
+
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
+
+ masm.bind(&end);
+}
+
+void
+CodeGeneratorMIPSShared::visitRoundF(LRoundF* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister temp = ToFloatRegister(lir->temp());
+ FloatRegister scratch = ScratchFloat32Reg;
+ Register output = ToRegister(lir->output());
+
+ Label bail, negative, end, skipCheck;
+
+ // Load biggest number less than 0.5 in the temp register.
+ masm.loadConstantFloat32(GetBiggestNumberLessThan(0.5f), temp);
+
+ // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
+ masm.loadConstantFloat32(0.0f, scratch);
+ masm.ma_bc1s(input, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
+
+ // If Nan, 0 or -0 check for bailout
+ masm.ma_bc1s(input, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If binary value is not zero, it is NaN or -0, so we bail.
+ masm.moveFromFloat32(input, SecondScratchReg);
+ bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot());
+
+ // Input was zero, so return zero.
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&end, ShortJump);
+
+ masm.bind(&skipCheck);
+ masm.as_adds(scratch, input, temp);
+ masm.as_floorws(scratch, scratch);
+
+ masm.moveFromFloat32(scratch, output);
+
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
+
+ masm.jump(&end);
+
+ // Input is negative, but isn't -0.
+ masm.bind(&negative);
+
+ // Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
+ // be added the biggest double less than 0.5.
+ Label loadJoin;
+ masm.loadConstantFloat32(-0.5f, scratch);
+ masm.branchFloat(Assembler::DoubleLessThan, input, scratch, &loadJoin);
+ masm.loadConstantFloat32(0.5f, temp);
+ masm.bind(&loadJoin);
+
+ masm.as_adds(temp, input, temp);
+
+ // If input + 0.5 >= 0, input is a negative number >= -0.5 and the
+ // result is -0.
+ masm.branchFloat(Assembler::DoubleGreaterThanOrEqual, temp, scratch, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ masm.as_floorws(scratch, temp);
+ masm.moveFromFloat32(scratch, output);
+
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
+
+ masm.bind(&end);
+}
+
+void
+CodeGeneratorMIPSShared::visitTruncateDToInt32(LTruncateDToInt32* ins)
+{
+ emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void
+CodeGeneratorMIPSShared::visitTruncateFToInt32(LTruncateFToInt32* ins)
+{
+ emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir)
+{
+ auto input = ToFloatRegister(lir->input());
+ auto output = ToRegister(lir->output());
+
+ MWasmTruncateToInt32* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input);
+ addOutOfLineCode(ool, mir);
+
+ if (mir->isUnsigned()) {
+ // When the input value is Infinity, NaN, or rounds to an integer outside the
+ // range [INT64_MIN; INT64_MAX + 1[, the Invalid Operation flag is set in the FCSR.
+ if (fromType == MIRType::Double)
+ masm.as_truncld(ScratchDoubleReg, input);
+ else if (fromType == MIRType::Float32)
+ masm.as_truncls(ScratchDoubleReg, input);
+ else
+ MOZ_CRASH("unexpected type in visitWasmTruncateToInt32");
+
+ // Check that the result is in the uint32_t range.
+ masm.moveFromDoubleHi(ScratchDoubleReg, output);
+ masm.as_cfc1(ScratchRegister, Assembler::FCSR);
+ masm.as_ext(ScratchRegister, ScratchRegister, 16, 1);
+ masm.ma_or(output, ScratchRegister);
+ masm.ma_b(output, Imm32(0), ool->entry(), Assembler::NotEqual);
+
+ masm.moveFromFloat32(ScratchDoubleReg, output);
+ return;
+ }
+
+ // When the input value is Infinity, NaN, or rounds to an integer outside the
+ // range [INT32_MIN; INT32_MAX + 1[, the Invalid Operation flag is set in the FCSR.
+ if (fromType == MIRType::Double)
+ masm.as_truncwd(ScratchFloat32Reg, input);
+ else if (fromType == MIRType::Float32)
+ masm.as_truncws(ScratchFloat32Reg, input);
+ else
+ MOZ_CRASH("unexpected type in visitWasmTruncateToInt32");
+
+ // Check that the result is in the int32_t range.
+ masm.as_cfc1(output, Assembler::FCSR);
+ masm.as_ext(output, output, 16, 1);
+ masm.ma_b(output, Imm32(0), ool->entry(), Assembler::NotEqual);
+
+ masm.bind(ool->rejoin());
+ masm.moveFromFloat32(ScratchFloat32Reg, output);
+}
+
+void
+CodeGeneratorMIPSShared::visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool)
+{
+ FloatRegister input = ool->input();
+ MIRType fromType = ool->fromType();
+ MIRType toType = ool->toType();
+
+ // Eagerly take care of NaNs.
+ Label inputIsNaN;
+ if (fromType == MIRType::Double)
+ masm.branchDouble(Assembler::DoubleUnordered, input, input, &inputIsNaN);
+ else if (fromType == MIRType::Float32)
+ masm.branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN);
+ else
+ MOZ_CRASH("unexpected type in visitOutOfLineWasmTruncateCheck");
+
+ Label fail;
+
+ // Handle special values (not needed for unsigned values).
+ if (!ool->isUnsigned()) {
+ if (toType == MIRType::Int32) {
+ // MWasmTruncateToInt32
+ if (fromType == MIRType::Double) {
+ // we've used truncwd. the only valid double values that can
+ // truncate to INT32_MIN are in ]INT32_MIN - 1; INT32_MIN].
+ masm.loadConstantDouble(double(INT32_MIN) - 1.0, ScratchDoubleReg);
+ masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, ScratchDoubleReg, &fail);
+
+ masm.loadConstantDouble(double(INT32_MIN), ScratchDoubleReg);
+ masm.branchDouble(Assembler::DoubleGreaterThan, input, ScratchDoubleReg, &fail);
+
+ masm.as_truncwd(ScratchFloat32Reg, ScratchDoubleReg);
+ masm.jump(ool->rejoin());
+ }
+ } else if (toType == MIRType::Int64) {
+ if (fromType == MIRType::Double) {
+ masm.loadConstantDouble(double(INT64_MIN), ScratchDoubleReg);
+ masm.branchDouble(Assembler::DoubleLessThan, input, ScratchDoubleReg, &fail);
+
+ masm.loadConstantDouble(double(INT64_MAX) + 1.0, ScratchDoubleReg);
+ masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input,
+ ScratchDoubleReg, &fail);
+ masm.jump(ool->rejoin());
+ }
+ }
+ } else {
+ if (toType == MIRType::Int64) {
+ if (fromType == MIRType::Double) {
+ masm.loadConstantDouble(double(-1), ScratchDoubleReg);
+ masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, ScratchDoubleReg, &fail);
+
+ masm.loadConstantDouble(double(UINT64_MAX) + 1.0, ScratchDoubleReg);
+ masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input,
+ ScratchDoubleReg, &fail);
+ masm.jump(ool->rejoin());
+ }
+ }
+ }
+
+ // Handle errors.
+ masm.bind(&fail);
+ masm.jump(trap(ool, wasm::Trap::IntegerOverflow));
+
+ masm.bind(&inputIsNaN);
+ masm.jump(trap(ool, wasm::Trap::InvalidConversionToInteger));
+}
+
+void
+CodeGeneratorMIPSShared::visitCopySignF(LCopySignF* ins)
+{
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ Register lhsi = ToRegister(ins->getTemp(0));
+ Register rhsi = ToRegister(ins->getTemp(1));
+
+ masm.moveFromFloat32(lhs, lhsi);
+ masm.moveFromFloat32(rhs, rhsi);
+
+ // Combine.
+ masm.as_ins(rhsi, lhsi, 0, 31);
+
+ masm.moveToFloat32(rhsi, output);
+}
+
+void
+CodeGeneratorMIPSShared::visitCopySignD(LCopySignD* ins)
+{
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ Register lhsi = ToRegister(ins->getTemp(0));
+ Register rhsi = ToRegister(ins->getTemp(1));
+
+ // Manipulate high words of double inputs.
+ masm.moveFromDoubleHi(lhs, lhsi);
+ masm.moveFromDoubleHi(rhs, rhsi);
+
+ // Combine.
+ masm.as_ins(rhsi, lhsi, 0, 31);
+
+ masm.moveToDoubleHi(rhsi, output);
+}
+
+void
+CodeGeneratorMIPSShared::visitValue(LValue* value)
+{
+ const ValueOperand out = ToOutValue(value);
+
+ masm.moveValue(value->value(), out);
+}
+
+void
+CodeGeneratorMIPSShared::visitDouble(LDouble* ins)
+{
+ const LDefinition* out = ins->getDef(0);
+
+ masm.loadConstantDouble(ins->getDouble(), ToFloatRegister(out));
+}
+
+void
+CodeGeneratorMIPSShared::visitFloat32(LFloat32* ins)
+{
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantFloat32(ins->getFloat(), ToFloatRegister(out));
+}
+
+void
+CodeGeneratorMIPSShared::visitTestDAndBranch(LTestDAndBranch* test)
+{
+ FloatRegister input = ToFloatRegister(test->input());
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ masm.loadConstantDouble(0.0, ScratchDoubleReg);
+ // If 0, or NaN, the result is false.
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::DoubleFloat, input, ScratchDoubleReg, ifTrue,
+ Assembler::DoubleNotEqual);
+ } else {
+ branchToBlock(Assembler::DoubleFloat, input, ScratchDoubleReg, ifFalse,
+ Assembler::DoubleEqualOrUnordered);
+ jumpToBlock(ifTrue);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitTestFAndBranch(LTestFAndBranch* test)
+{
+ FloatRegister input = ToFloatRegister(test->input());
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ masm.loadConstantFloat32(0.0f, ScratchFloat32Reg);
+ // If 0, or NaN, the result is false.
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::SingleFloat, input, ScratchFloat32Reg, ifTrue,
+ Assembler::DoubleNotEqual);
+ } else {
+ branchToBlock(Assembler::SingleFloat, input, ScratchFloat32Reg, ifFalse,
+ Assembler::DoubleEqualOrUnordered);
+ jumpToBlock(ifTrue);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitCompareD(LCompareD* comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+ Register dest = ToRegister(comp->output());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.ma_cmp_set_double(dest, lhs, rhs, cond);
+}
+
+void
+CodeGeneratorMIPSShared::visitCompareF(LCompareF* comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+ Register dest = ToRegister(comp->output());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.ma_cmp_set_float32(dest, lhs, rhs, cond);
+}
+
+void
+CodeGeneratorMIPSShared::visitCompareDAndBranch(LCompareDAndBranch* comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifTrue, cond);
+ } else {
+ branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifFalse,
+ Assembler::InvertCondition(cond));
+ jumpToBlock(ifTrue);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitCompareFAndBranch(LCompareFAndBranch* comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::SingleFloat, lhs, rhs, ifTrue, cond);
+ } else {
+ branchToBlock(Assembler::SingleFloat, lhs, rhs, ifFalse,
+ Assembler::InvertCondition(cond));
+ jumpToBlock(ifTrue);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitBitAndAndBranch(LBitAndAndBranch* lir)
+{
+ if (lir->right()->isConstant())
+ masm.ma_and(ScratchRegister, ToRegister(lir->left()), Imm32(ToInt32(lir->right())));
+ else
+ masm.as_and(ScratchRegister, ToRegister(lir->left()), ToRegister(lir->right()));
+ emitBranch(ScratchRegister, ScratchRegister, Assembler::NonZero, lir->ifTrue(),
+ lir->ifFalse());
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir)
+{
+ masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir)
+{
+ masm.convertUInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void
+CodeGeneratorMIPSShared::visitNotI(LNotI* ins)
+{
+ masm.cmp32Set(Assembler::Equal, ToRegister(ins->input()), Imm32(0),
+ ToRegister(ins->output()));
+}
+
+void
+CodeGeneratorMIPSShared::visitNotD(LNotD* ins)
+{
+ // Since this operation is not, we want to set a bit if
+ // the double is falsey, which means 0.0, -0.0 or NaN.
+ FloatRegister in = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+
+ masm.loadConstantDouble(0.0, ScratchDoubleReg);
+ masm.ma_cmp_set_double(dest, in, ScratchDoubleReg, Assembler::DoubleEqualOrUnordered);
+}
+
+void
+CodeGeneratorMIPSShared::visitNotF(LNotF* ins)
+{
+ // Since this operation is not, we want to set a bit if
+ // the float32 is falsey, which means 0.0, -0.0 or NaN.
+ FloatRegister in = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+
+ masm.loadConstantFloat32(0.0f, ScratchFloat32Reg);
+ masm.ma_cmp_set_float32(dest, in, ScratchFloat32Reg, Assembler::DoubleEqualOrUnordered);
+}
+
+void
+CodeGeneratorMIPSShared::visitGuardShape(LGuardShape* guard)
+{
+ Register obj = ToRegister(guard->input());
+ Register tmp = ToRegister(guard->tempInt());
+
+ masm.loadPtr(Address(obj, ShapedObject::offsetOfShape()), tmp);
+ bailoutCmpPtr(Assembler::NotEqual, tmp, ImmGCPtr(guard->mir()->shape()),
+ guard->snapshot());
+}
+
+void
+CodeGeneratorMIPSShared::visitGuardObjectGroup(LGuardObjectGroup* guard)
+{
+ Register obj = ToRegister(guard->input());
+ Register tmp = ToRegister(guard->tempInt());
+ MOZ_ASSERT(obj != tmp);
+
+ masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), tmp);
+ Assembler::Condition cond = guard->mir()->bailOnEquality()
+ ? Assembler::Equal
+ : Assembler::NotEqual;
+ bailoutCmpPtr(cond, tmp, ImmGCPtr(guard->mir()->group()), guard->snapshot());
+}
+
+void
+CodeGeneratorMIPSShared::visitGuardClass(LGuardClass* guard)
+{
+ Register obj = ToRegister(guard->input());
+ Register tmp = ToRegister(guard->tempInt());
+
+ masm.loadObjClass(obj, tmp);
+ bailoutCmpPtr(Assembler::NotEqual, tmp, ImmPtr(guard->mir()->getClass()),
+ guard->snapshot());
+}
+
+void
+CodeGeneratorMIPSShared::visitMemoryBarrier(LMemoryBarrier* ins)
+{
+ masm.memoryBarrier(ins->type());
+}
+
+void
+CodeGeneratorMIPSShared::generateInvalidateEpilogue()
+{
+ // Ensure that there is enough space in the buffer for the OsiPoint
+ // patching to occur. Otherwise, we could overwrite the invalidation
+ // epilogue.
+ for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize())
+ masm.nop();
+
+ masm.bind(&invalidate_);
+
+ // Push the return address of the point that we bailed out at to the stack
+ masm.Push(ra);
+
+ // Push the Ion script onto the stack (when we determine what that
+ // pointer is).
+ invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
+ JitCode* thunk = gen->jitRuntime()->getInvalidationThunk();
+
+ masm.branch(thunk);
+
+ // We should never reach this point in JIT code -- the invalidation thunk
+ // should pop the invalidated JS frame and return directly to its caller.
+ masm.assumeUnreachable("Should have returned directly to its caller instead of here.");
+}
+
+void
+CodeGeneratorMIPSShared::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+CodeGeneratorMIPSShared::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmCall(LWasmCall* ins)
+{
+ emitWasmCallBase(ins);
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmCallI64(LWasmCallI64* ins)
+{
+ emitWasmCallBase(ins);
+}
+
+template <typename T>
+void
+CodeGeneratorMIPSShared::emitWasmLoad(T* lir)
+{
+ const MWasmLoad* mir = lir->mir();
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset <= INT32_MAX);
+
+ Register ptr = ToRegister(lir->ptr());
+
+ // Maybe add the offset.
+ if (offset) {
+ Register ptrPlusOffset = ToRegister(lir->ptrCopy());
+ masm.addPtr(Imm32(offset), ptrPlusOffset);
+ ptr = ptrPlusOffset;
+ } else {
+ MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
+ }
+
+ unsigned byteSize = mir->access().byteSize();
+ bool isSigned;
+ bool isFloat = false;
+
+ switch (mir->access().type()) {
+ case Scalar::Int8: isSigned = true; break;
+ case Scalar::Uint8: isSigned = false; break;
+ case Scalar::Int16: isSigned = true; break;
+ case Scalar::Uint16: isSigned = false; break;
+ case Scalar::Int32: isSigned = true; break;
+ case Scalar::Uint32: isSigned = false; break;
+ case Scalar::Float64: isFloat = true; break;
+ case Scalar::Float32: isFloat = true; break;
+ default: MOZ_CRASH("unexpected array type");
+ }
+
+ masm.memoryBarrier(mir->access().barrierBefore());
+
+ BaseIndex address(HeapReg, ptr, TimesOne);
+
+ if (mir->access().isUnaligned()) {
+ Register temp = ToRegister(lir->getTemp(1));
+
+ if (isFloat) {
+ if (byteSize == 4)
+ masm.loadUnalignedFloat32(address, temp, ToFloatRegister(lir->output()));
+ else
+ masm.loadUnalignedDouble(address, temp, ToFloatRegister(lir->output()));
+ } else {
+ masm.ma_load_unaligned(ToRegister(lir->output()), address, temp,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+
+ masm.memoryBarrier(mir->access().barrierAfter());
+ return;
+ }
+
+ if (isFloat) {
+ if (byteSize == 4)
+ masm.loadFloat32(address, ToFloatRegister(lir->output()));
+ else
+ masm.loadDouble(address, ToFloatRegister(lir->output()));
+ } else {
+ masm.ma_load(ToRegister(lir->output()), address,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+
+ masm.memoryBarrier(mir->access().barrierAfter());
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmLoad(LWasmLoad* lir)
+{
+ emitWasmLoad(lir);
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmUnalignedLoad(LWasmUnalignedLoad* lir)
+{
+ emitWasmLoad(lir);
+}
+
+template <typename T>
+void
+CodeGeneratorMIPSShared::emitWasmStore(T* lir)
+{
+ const MWasmStore* mir = lir->mir();
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset <= INT32_MAX);
+
+ Register ptr = ToRegister(lir->ptr());
+
+ // Maybe add the offset.
+ if (offset) {
+ Register ptrPlusOffset = ToRegister(lir->ptrCopy());
+ masm.addPtr(Imm32(offset), ptrPlusOffset);
+ ptr = ptrPlusOffset;
+ } else {
+ MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
+ }
+
+ unsigned byteSize = mir->access().byteSize();
+ bool isSigned;
+ bool isFloat = false;
+
+ switch (mir->access().type()) {
+ case Scalar::Int8: isSigned = true; break;
+ case Scalar::Uint8: isSigned = false; break;
+ case Scalar::Int16: isSigned = true; break;
+ case Scalar::Uint16: isSigned = false; break;
+ case Scalar::Int32: isSigned = true; break;
+ case Scalar::Uint32: isSigned = false; break;
+ case Scalar::Int64: isSigned = true; break;
+ case Scalar::Float64: isFloat = true; break;
+ case Scalar::Float32: isFloat = true; break;
+ default: MOZ_CRASH("unexpected array type");
+ }
+
+ masm.memoryBarrier(mir->access().barrierBefore());
+
+ BaseIndex address(HeapReg, ptr, TimesOne);
+
+ if (mir->access().isUnaligned()) {
+ Register temp = ToRegister(lir->getTemp(1));
+
+ if (isFloat) {
+ if (byteSize == 4)
+ masm.storeUnalignedFloat32(ToFloatRegister(lir->value()), temp, address);
+ else
+ masm.storeUnalignedDouble(ToFloatRegister(lir->value()), temp, address);
+ } else {
+ masm.ma_store_unaligned(ToRegister(lir->value()), address, temp,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+
+ masm.memoryBarrier(mir->access().barrierAfter());
+ return;
+ }
+
+ if (isFloat) {
+ if (byteSize == 4) {
+ masm.storeFloat32(ToFloatRegister(lir->value()), address);
+ } else
+ masm.storeDouble(ToFloatRegister(lir->value()), address);
+ } else {
+ masm.ma_store(ToRegister(lir->value()), address,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+
+ masm.memoryBarrier(mir->access().barrierAfter());
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmStore(LWasmStore* lir)
+{
+ emitWasmStore(lir);
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmUnalignedStore(LWasmUnalignedStore* lir)
+{
+ emitWasmStore(lir);
+}
+
+void
+CodeGeneratorMIPSShared::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
+{
+ const MAsmJSLoadHeap* mir = ins->mir();
+ const LAllocation* ptr = ins->ptr();
+ const LDefinition* out = ins->output();
+
+ bool isSigned;
+ int size;
+ bool isFloat = false;
+ switch (mir->access().type()) {
+ case Scalar::Int8: isSigned = true; size = 8; break;
+ case Scalar::Uint8: isSigned = false; size = 8; break;
+ case Scalar::Int16: isSigned = true; size = 16; break;
+ case Scalar::Uint16: isSigned = false; size = 16; break;
+ case Scalar::Int32: isSigned = true; size = 32; break;
+ case Scalar::Uint32: isSigned = false; size = 32; break;
+ case Scalar::Float64: isFloat = true; size = 64; break;
+ case Scalar::Float32: isFloat = true; size = 32; break;
+ default: MOZ_CRASH("unexpected array type");
+ }
+
+ if (ptr->isConstant()) {
+ MOZ_ASSERT(!mir->needsBoundsCheck());
+ int32_t ptrImm = ptr->toConstant()->toInt32();
+ MOZ_ASSERT(ptrImm >= 0);
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadFloat32(Address(HeapReg, ptrImm), ToFloatRegister(out));
+ } else {
+ masm.loadDouble(Address(HeapReg, ptrImm), ToFloatRegister(out));
+ }
+ } else {
+ masm.ma_load(ToRegister(out), Address(HeapReg, ptrImm),
+ static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Register ptrReg = ToRegister(ptr);
+
+ if (!mir->needsBoundsCheck()) {
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out));
+ } else {
+ masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out));
+ }
+ } else {
+ masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister);
+
+ Label done, outOfRange;
+ masm.ma_b(ptrReg, ScratchRegister, &outOfRange, Assembler::AboveOrEqual, ShortJump);
+ // Offset is ok, let's load value.
+ if (isFloat) {
+ if (size == 32)
+ masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out));
+ else
+ masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out));
+ } else {
+ masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
+ }
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&outOfRange);
+ // Offset is out of range. Load default values.
+ if (isFloat) {
+ if (size == 32)
+ masm.loadFloat32(Address(GlobalReg, wasm::NaN32GlobalDataOffset - WasmGlobalRegBias),
+ ToFloatRegister(out));
+ else
+ masm.loadDouble(Address(GlobalReg, wasm::NaN64GlobalDataOffset - WasmGlobalRegBias),
+ ToFloatRegister(out));
+ } else {
+ masm.move32(Imm32(0), ToRegister(out));
+ }
+ masm.bind(&done);
+
+ masm.append(wasm::BoundsCheck(bo.getOffset()));
+}
+
+void
+CodeGeneratorMIPSShared::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
+{
+ const MAsmJSStoreHeap* mir = ins->mir();
+ const LAllocation* value = ins->value();
+ const LAllocation* ptr = ins->ptr();
+
+ bool isSigned;
+ int size;
+ bool isFloat = false;
+ switch (mir->access().type()) {
+ case Scalar::Int8: isSigned = true; size = 8; break;
+ case Scalar::Uint8: isSigned = false; size = 8; break;
+ case Scalar::Int16: isSigned = true; size = 16; break;
+ case Scalar::Uint16: isSigned = false; size = 16; break;
+ case Scalar::Int32: isSigned = true; size = 32; break;
+ case Scalar::Uint32: isSigned = false; size = 32; break;
+ case Scalar::Float64: isFloat = true; size = 64; break;
+ case Scalar::Float32: isFloat = true; size = 32; break;
+ default: MOZ_CRASH("unexpected array type");
+ }
+
+ if (ptr->isConstant()) {
+ MOZ_ASSERT(!mir->needsBoundsCheck());
+ int32_t ptrImm = ptr->toConstant()->toInt32();
+ MOZ_ASSERT(ptrImm >= 0);
+
+ if (isFloat) {
+ FloatRegister freg = ToFloatRegister(value);
+ Address addr(HeapReg, ptrImm);
+ if (size == 32)
+ masm.storeFloat32(freg, addr);
+ else
+ masm.storeDouble(freg, addr);
+ } else {
+ masm.ma_store(ToRegister(value), Address(HeapReg, ptrImm),
+ static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Register ptrReg = ToRegister(ptr);
+ Address dstAddr(ptrReg, 0);
+
+ if (!mir->needsBoundsCheck()) {
+ if (isFloat) {
+ FloatRegister freg = ToFloatRegister(value);
+ BaseIndex bi(HeapReg, ptrReg, TimesOne);
+ if (size == 32)
+ masm.storeFloat32(freg, bi);
+ else
+ masm.storeDouble(freg, bi);
+ } else {
+ masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister);
+
+ Label outOfRange;
+ masm.ma_b(ptrReg, ScratchRegister, &outOfRange, Assembler::AboveOrEqual, ShortJump);
+
+ // Offset is ok, let's store value.
+ if (isFloat) {
+ if (size == 32) {
+ masm.storeFloat32(ToFloatRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne));
+ } else
+ masm.storeDouble(ToFloatRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne));
+ } else {
+ masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
+ }
+
+ masm.bind(&outOfRange);
+ masm.append(wasm::BoundsCheck(bo.getOffset()));
+}
+
+void
+CodeGeneratorMIPSShared::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
+{
+ MAsmJSCompareExchangeHeap* mir = ins->mir();
+ Scalar::Type vt = mir->access().type();
+ const LAllocation* ptr = ins->ptr();
+ Register ptrReg = ToRegister(ptr);
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Register oldval = ToRegister(ins->oldValue());
+ Register newval = ToRegister(ins->newValue());
+ Register valueTemp = ToRegister(ins->valueTemp());
+ Register offsetTemp = ToRegister(ins->offsetTemp());
+ Register maskTemp = ToRegister(ins->maskTemp());
+
+ masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
+ srcAddr, oldval, newval, InvalidReg,
+ valueTemp, offsetTemp, maskTemp,
+ ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorMIPSShared::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
+{
+ MAsmJSAtomicExchangeHeap* mir = ins->mir();
+ Scalar::Type vt = mir->access().type();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Register valueTemp = ToRegister(ins->valueTemp());
+ Register offsetTemp = ToRegister(ins->offsetTemp());
+ Register maskTemp = ToRegister(ins->maskTemp());
+
+ masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
+ srcAddr, value, InvalidReg, valueTemp,
+ offsetTemp, maskTemp, ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorMIPSShared::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
+{
+ MOZ_ASSERT(ins->mir()->hasUses());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ MAsmJSAtomicBinopHeap* mir = ins->mir();
+ Scalar::Type vt = mir->access().type();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register flagTemp = ToRegister(ins->flagTemp());
+ Register valueTemp = ToRegister(ins->valueTemp());
+ Register offsetTemp = ToRegister(ins->offsetTemp());
+ Register maskTemp = ToRegister(ins->maskTemp());
+ const LAllocation* value = ins->value();
+ AtomicOp op = mir->operation();
+
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
+
+ if (value->isConstant())
+ atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
+ Imm32(ToInt32(value)), srcAddr, flagTemp, InvalidReg,
+ valueTemp, offsetTemp, maskTemp,
+ ToAnyRegister(ins->output()));
+ else
+ atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
+ ToRegister(value), srcAddr, flagTemp, InvalidReg,
+ valueTemp, offsetTemp, maskTemp,
+ ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorMIPSShared::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
+{
+ MOZ_ASSERT(!ins->mir()->hasUses());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ MAsmJSAtomicBinopHeap* mir = ins->mir();
+ Scalar::Type vt = mir->access().type();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register flagTemp = ToRegister(ins->flagTemp());
+ Register valueTemp = ToRegister(ins->valueTemp());
+ Register offsetTemp = ToRegister(ins->offsetTemp());
+ Register maskTemp = ToRegister(ins->maskTemp());
+ const LAllocation* value = ins->value();
+ AtomicOp op = mir->operation();
+
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
+
+ if (value->isConstant())
+ atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr, flagTemp,
+ valueTemp, offsetTemp, maskTemp);
+ else
+ atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr, flagTemp,
+ valueTemp, offsetTemp, maskTemp);
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmStackArg(LWasmStackArg* ins)
+{
+ const MWasmStackArg* mir = ins->mir();
+ if (ins->arg()->isConstant()) {
+ masm.storePtr(ImmWord(ToInt32(ins->arg())), Address(StackPointer, mir->spOffset()));
+ } else {
+ if (ins->arg()->isGeneralReg()) {
+ masm.storePtr(ToRegister(ins->arg()), Address(StackPointer, mir->spOffset()));
+ } else {
+ masm.storeDouble(ToFloatRegister(ins->arg()).doubleOverlay(),
+ Address(StackPointer, mir->spOffset()));
+ }
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmStackArgI64(LWasmStackArgI64* ins)
+{
+ const MWasmStackArg* mir = ins->mir();
+ Address dst(StackPointer, mir->spOffset());
+ if (IsConstant(ins->arg()))
+ masm.store64(Imm64(ToInt64(ins->arg())), dst);
+ else
+ masm.store64(ToRegister64(ins->arg()), dst);
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmSelect(LWasmSelect* ins)
+{
+ MIRType mirType = ins->mir()->type();
+
+ Register cond = ToRegister(ins->condExpr());
+ const LAllocation* falseExpr = ins->falseExpr();
+
+ if (mirType == MIRType::Int32) {
+ Register out = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->trueExpr()) == out, "true expr input is reused for output");
+ masm.as_movz(out, ToRegister(falseExpr), cond);
+ return;
+ }
+
+ FloatRegister out = ToFloatRegister(ins->output());
+ MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out, "true expr input is reused for output");
+
+ if (falseExpr->isFloatReg()) {
+ if (mirType == MIRType::Float32)
+ masm.as_movz(Assembler::SingleFloat, out, ToFloatRegister(falseExpr), cond);
+ else if (mirType == MIRType::Double)
+ masm.as_movz(Assembler::DoubleFloat, out, ToFloatRegister(falseExpr), cond);
+ else
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+ } else {
+ Label done;
+ masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
+
+ if (mirType == MIRType::Float32)
+ masm.loadFloat32(ToAddress(falseExpr), out);
+ else if (mirType == MIRType::Double)
+ masm.loadDouble(ToAddress(falseExpr), out);
+ else
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+
+ masm.bind(&done);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmReinterpret(LWasmReinterpret* lir)
+{
+ MOZ_ASSERT(gen->compilingWasm());
+ MWasmReinterpret* ins = lir->mir();
+
+ MIRType to = ins->type();
+ DebugOnly<MIRType> from = ins->input()->type();
+
+ switch (to) {
+ case MIRType::Int32:
+ MOZ_ASSERT(from == MIRType::Float32);
+ masm.as_mfc1(ToRegister(lir->output()), ToFloatRegister(lir->input()));
+ break;
+ case MIRType::Float32:
+ MOZ_ASSERT(from == MIRType::Int32);
+ masm.as_mtc1(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+ break;
+ case MIRType::Double:
+ case MIRType::Int64:
+ MOZ_CRASH("not handled by this LIR opcode");
+ default:
+ MOZ_CRASH("unexpected WasmReinterpret");
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitUDivOrMod(LUDivOrMod* ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+ Label done;
+
+ // Prevent divide by zero.
+ if (ins->canBeDivideByZero()) {
+ if (ins->mir()->isTruncated()) {
+ if (ins->trapOnError()) {
+ masm.ma_b(rhs, rhs, trap(ins, wasm::Trap::InvalidConversionToInteger), Assembler::Zero);
+ } else {
+ // Infinity|0 == 0
+ Label notzero;
+ masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&notzero);
+ }
+ } else {
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
+ }
+ }
+
+ masm.as_divu(lhs, rhs);
+ masm.as_mfhi(output);
+
+ // If the remainder is > 0, bailout since this must be a double.
+ if (ins->mir()->isDiv()) {
+ if (!ins->mir()->toDiv()->canTruncateRemainder())
+ bailoutCmp32(Assembler::NonZero, output, output, ins->snapshot());
+ // Get quotient
+ masm.as_mflo(output);
+ }
+
+ if (!ins->mir()->isTruncated())
+ bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot());
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPSShared::visitEffectiveAddress(LEffectiveAddress* ins)
+{
+ const MEffectiveAddress* mir = ins->mir();
+ Register base = ToRegister(ins->base());
+ Register index = ToRegister(ins->index());
+ Register output = ToRegister(ins->output());
+
+ BaseIndex address(base, index, mir->scale(), mir->displacement());
+ masm.computeEffectiveAddress(address, output);
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins)
+{
+ const MWasmLoadGlobalVar* mir = ins->mir();
+ unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
+ if (mir->type() == MIRType::Int32)
+ masm.load32(Address(GlobalReg, addr), ToRegister(ins->output()));
+ else if (mir->type() == MIRType::Float32)
+ masm.loadFloat32(Address(GlobalReg, addr), ToFloatRegister(ins->output()));
+ else
+ masm.loadDouble(Address(GlobalReg, addr), ToFloatRegister(ins->output()));
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins)
+{
+ const MWasmStoreGlobalVar* mir = ins->mir();
+
+ MOZ_ASSERT(IsNumberType(mir->value()->type()));
+ unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
+ if (mir->value()->type() == MIRType::Int32)
+ masm.store32(ToRegister(ins->value()), Address(GlobalReg, addr));
+ else if (mir->value()->type() == MIRType::Float32)
+ masm.storeFloat32(ToFloatRegister(ins->value()), Address(GlobalReg, addr));
+ else
+ masm.storeDouble(ToFloatRegister(ins->value()), Address(GlobalReg, addr));
+}
+
+void
+CodeGeneratorMIPSShared::visitNegI(LNegI* ins)
+{
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.ma_negu(output, input);
+}
+
+void
+CodeGeneratorMIPSShared::visitNegD(LNegD* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ masm.as_negd(output, input);
+}
+
+void
+CodeGeneratorMIPSShared::visitNegF(LNegF* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ masm.as_negs(output, input);
+}
+
+template<typename S, typename T>
+void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const S& value, const T& mem, Register flagTemp,
+ Register outTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ AnyRegister output)
+{
+ MOZ_ASSERT(flagTemp != InvalidReg);
+ MOZ_ASSERT_IF(arrayType == Scalar::Uint32, outTemp != InvalidReg);
+
+ switch (arrayType) {
+ case Scalar::Int8:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Uint8:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Int16:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Uint16:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Int32:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Uint32:
+ // At the moment, the code in MCallOptimize.cpp requires the output
+ // type to be double for uint32 arrays. See bug 1077305.
+ MOZ_ASSERT(output.isFloat());
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ masm.convertUInt32ToDouble(outTemp, output.fpu());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Imm32& value, const Address& mem,
+ Register flagTemp, Register outTemp,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, AnyRegister output);
+template void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Imm32& value, const BaseIndex& mem,
+ Register flagTemp, Register outTemp,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, AnyRegister output);
+template void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Register& value, const Address& mem,
+ Register flagTemp, Register outTemp,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, AnyRegister output);
+template void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Register& value, const BaseIndex& mem,
+ Register flagTemp, Register outTemp,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, AnyRegister output);
+
+// Binary operation for effect, result discarded.
+template<typename S, typename T>
+void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
+ const T& mem, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp)
+{
+ MOZ_ASSERT(flagTemp != InvalidReg);
+
+ switch (arrayType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicAdd8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicSub8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicAnd8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicOr8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicXor8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicAdd16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicSub16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicAnd16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicOr16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicXor16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicAdd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicSub32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicAnd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicOr32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicXor32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Imm32& value, const Address& mem,
+ Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp);
+template void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Imm32& value, const BaseIndex& mem,
+ Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp);
+template void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Register& value, const Address& mem,
+ Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp);
+template void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Register& value, const BaseIndex& mem,
+ Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp);
+
+
+void
+CodeGeneratorMIPSShared::visitWasmAddOffset(LWasmAddOffset* lir)
+{
+ MWasmAddOffset* mir = lir->mir();
+ Register base = ToRegister(lir->base());
+ Register out = ToRegister(lir->output());
+
+ masm.ma_addTestCarry(out, base, Imm32(mir->offset()), trap(mir, wasm::Trap::OutOfBounds));
+}
+
+template <typename T>
+static inline void
+AtomicBinopToTypedArray(CodeGeneratorMIPSShared* cg, AtomicOp op,
+ Scalar::Type arrayType, const LAllocation* value, const T& mem,
+ Register flagTemp, Register outTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, AnyRegister output)
+{
+ if (value->isConstant())
+ cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, flagTemp, outTemp,
+ valueTemp, offsetTemp, maskTemp, output);
+ else
+ cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, flagTemp, outTemp,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void
+CodeGeneratorMIPSShared::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir)
+{
+ MOZ_ASSERT(lir->mir()->hasUses());
+
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register elements = ToRegister(lir->elements());
+ Register flagTemp = ToRegister(lir->temp1());
+ Register outTemp = lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
+ Register valueTemp = ToRegister(lir->valueTemp());
+ Register offsetTemp = ToRegister(lir->offsetTemp());
+ Register maskTemp = ToRegister(lir->maskTemp());
+ const LAllocation* value = lir->value();
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ if (lir->index()->isConstant()) {
+ Address mem(elements, ToInt32(lir->index()) * width);
+ AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp, outTemp,
+ valueTemp, offsetTemp, maskTemp, output);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+ AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp, outTemp,
+ valueTemp, offsetTemp, maskTemp, output);
+ }
+}
+
+template <typename T>
+static inline void
+AtomicBinopToTypedArray(CodeGeneratorMIPSShared* cg, AtomicOp op, Scalar::Type arrayType,
+ const LAllocation* value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+{
+ if (value->isConstant())
+ cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem,
+ flagTemp, valueTemp, offsetTemp, maskTemp);
+ else
+ cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem,
+ flagTemp, valueTemp, offsetTemp, maskTemp);
+}
+
+void
+CodeGeneratorMIPSShared::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir)
+{
+ MOZ_ASSERT(!lir->mir()->hasUses());
+
+ Register elements = ToRegister(lir->elements());
+ Register flagTemp = ToRegister(lir->flagTemp());
+ Register valueTemp = ToRegister(lir->valueTemp());
+ Register offsetTemp = ToRegister(lir->offsetTemp());
+ Register maskTemp = ToRegister(lir->maskTemp());
+ const LAllocation* value = lir->value();
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ if (lir->index()->isConstant()) {
+ Address mem(elements, ToInt32(lir->index()) * width);
+ AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem,
+ flagTemp, valueTemp, offsetTemp, maskTemp);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+ AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem,
+ flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir)
+{
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+ Register valueTemp = ToRegister(lir->valueTemp());
+ Register offsetTemp = ToRegister(lir->offsetTemp());
+ Register maskTemp = ToRegister(lir->maskTemp());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ if (lir->index()->isConstant()) {
+ Address dest(elements, ToInt32(lir->index()) * width);
+ masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp,
+ valueTemp, offsetTemp, maskTemp, output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+ masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp,
+ valueTemp, offsetTemp, maskTemp, output);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir)
+{
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ Register value = ToRegister(lir->value());
+ Register valueTemp = ToRegister(lir->valueTemp());
+ Register offsetTemp = ToRegister(lir->offsetTemp());
+ Register maskTemp = ToRegister(lir->maskTemp());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ if (lir->index()->isConstant()) {
+ Address dest(elements, ToInt32(lir->index()) * width);
+ masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp,
+ valueTemp, offsetTemp, maskTemp, output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+ masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp,
+ valueTemp, offsetTemp, maskTemp, output);
+ }
+}
diff --git a/js/src/jit/mips-shared/CodeGenerator-mips-shared.h b/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
new file mode 100644
index 000000000..ff5cca196
--- /dev/null
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
@@ -0,0 +1,301 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_CodeGenerator_mips_shared_h
+#define jit_mips_shared_CodeGenerator_mips_shared_h
+
+#include "jit/shared/CodeGenerator-shared.h"
+
+namespace js {
+namespace jit {
+
+class OutOfLineBailout;
+class OutOfLineTableSwitch;
+
+class CodeGeneratorMIPSShared : public CodeGeneratorShared
+{
+ friend class MoveResolverMIPS;
+
+ CodeGeneratorMIPSShared* thisFromCtor() {
+ return this;
+ }
+
+ protected:
+ NonAssertingLabel deoptLabel_;
+
+ Operand ToOperand(const LAllocation& a);
+ Operand ToOperand(const LAllocation* a);
+ Operand ToOperand(const LDefinition* def);
+
+#ifdef JS_PUNBOX64
+ Operand ToOperandOrRegister64(const LInt64Allocation input);
+#else
+ Register64 ToOperandOrRegister64(const LInt64Allocation input);
+#endif
+
+ MoveOperand toMoveOperand(LAllocation a) const;
+
+ template <typename T1, typename T2>
+ void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) {
+ Label bail;
+ masm.branch32(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ template<typename T>
+ void bailoutTest32(Assembler::Condition c, Register lhs, T rhs, LSnapshot* snapshot) {
+ Label bail;
+ masm.branchTest32(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) {
+ Label bail;
+ masm.branchPtr(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ void bailoutTestPtr(Assembler::Condition c, Register lhs, Register rhs, LSnapshot* snapshot) {
+ Label bail;
+ masm.branchTestPtr(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ void bailoutIfFalseBool(Register reg, LSnapshot* snapshot) {
+ Label bail;
+ masm.branchTest32(Assembler::Zero, reg, Imm32(0xFF), &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+
+ void bailoutFrom(Label* label, LSnapshot* snapshot);
+ void bailout(LSnapshot* snapshot);
+
+ protected:
+ bool generateOutOfLineCode();
+
+ template <typename T>
+ void branchToBlock(Register lhs, T rhs, MBasicBlock* mir, Assembler::Condition cond)
+ {
+ mir = skipTrivialBlocks(mir);
+
+ Label* label = mir->lir()->label();
+ if (Label* oolEntry = labelForBackedgeWithImplicitCheck(mir)) {
+ // Note: the backedge is initially a jump to the next instruction.
+ // It will be patched to the target block's label during link().
+ RepatchLabel rejoin;
+ CodeOffsetJump backedge;
+ Label skip;
+
+ masm.ma_b(lhs, rhs, &skip, Assembler::InvertCondition(cond), ShortJump);
+ backedge = masm.backedgeJump(&rejoin);
+ masm.bind(&rejoin);
+ masm.bind(&skip);
+
+ if (!patchableBackedges_.append(PatchableBackedgeInfo(backedge, label, oolEntry)))
+ MOZ_CRASH();
+ } else {
+ masm.ma_b(lhs, rhs, label, cond);
+ }
+ }
+ void branchToBlock(Assembler::FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
+ MBasicBlock* mir, Assembler::DoubleCondition cond);
+
+ // Emits a branch that directs control flow to the true block if |cond| is
+ // true, and the false block if |cond| is false.
+ template <typename T>
+ void emitBranch(Register lhs, T rhs, Assembler::Condition cond,
+ MBasicBlock* mirTrue, MBasicBlock* mirFalse)
+ {
+ if (isNextBlock(mirFalse->lir())) {
+ branchToBlock(lhs, rhs, mirTrue, cond);
+ } else {
+ branchToBlock(lhs, rhs, mirFalse, Assembler::InvertCondition(cond));
+ jumpToBlock(mirTrue);
+ }
+ }
+ void testZeroEmitBranch(Assembler::Condition cond, Register reg,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ emitBranch(reg, Imm32(0), cond, ifTrue, ifFalse);
+ }
+
+ template <typename T>
+ void emitWasmLoad(T* ins);
+ template <typename T>
+ void emitWasmStore(T* ins);
+
+ public:
+ // Instruction visitors.
+ virtual void visitMinMaxD(LMinMaxD* ins);
+ virtual void visitMinMaxF(LMinMaxF* ins);
+ virtual void visitAbsD(LAbsD* ins);
+ virtual void visitAbsF(LAbsF* ins);
+ virtual void visitSqrtD(LSqrtD* ins);
+ virtual void visitSqrtF(LSqrtF* ins);
+ virtual void visitAddI(LAddI* ins);
+ virtual void visitAddI64(LAddI64* ins);
+ virtual void visitSubI(LSubI* ins);
+ virtual void visitSubI64(LSubI64* ins);
+ virtual void visitBitNotI(LBitNotI* ins);
+ virtual void visitBitOpI(LBitOpI* ins);
+ virtual void visitBitOpI64(LBitOpI64* ins);
+
+ virtual void visitMulI(LMulI* ins);
+ virtual void visitMulI64(LMulI64* ins);
+
+ virtual void visitDivI(LDivI* ins);
+ virtual void visitDivPowTwoI(LDivPowTwoI* ins);
+ virtual void visitModI(LModI* ins);
+ virtual void visitModPowTwoI(LModPowTwoI* ins);
+ virtual void visitModMaskI(LModMaskI* ins);
+ virtual void visitPowHalfD(LPowHalfD* ins);
+ virtual void visitShiftI(LShiftI* ins);
+ virtual void visitShiftI64(LShiftI64* ins);
+ virtual void visitRotateI64(LRotateI64* lir);
+ virtual void visitUrshD(LUrshD* ins);
+
+ virtual void visitClzI(LClzI* ins);
+ virtual void visitCtzI(LCtzI* ins);
+ virtual void visitPopcntI(LPopcntI* ins);
+ virtual void visitPopcntI64(LPopcntI64* lir);
+
+ virtual void visitTestIAndBranch(LTestIAndBranch* test);
+ virtual void visitCompare(LCompare* comp);
+ virtual void visitCompareAndBranch(LCompareAndBranch* comp);
+ virtual void visitTestDAndBranch(LTestDAndBranch* test);
+ virtual void visitTestFAndBranch(LTestFAndBranch* test);
+ virtual void visitCompareD(LCompareD* comp);
+ virtual void visitCompareF(LCompareF* comp);
+ virtual void visitCompareDAndBranch(LCompareDAndBranch* comp);
+ virtual void visitCompareFAndBranch(LCompareFAndBranch* comp);
+ virtual void visitBitAndAndBranch(LBitAndAndBranch* lir);
+ virtual void visitWasmUint32ToDouble(LWasmUint32ToDouble* lir);
+ virtual void visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir);
+ virtual void visitNotI(LNotI* ins);
+ virtual void visitNotD(LNotD* ins);
+ virtual void visitNotF(LNotF* ins);
+
+ virtual void visitMathD(LMathD* math);
+ virtual void visitMathF(LMathF* math);
+ virtual void visitFloor(LFloor* lir);
+ virtual void visitFloorF(LFloorF* lir);
+ virtual void visitCeil(LCeil* lir);
+ virtual void visitCeilF(LCeilF* lir);
+ virtual void visitRound(LRound* lir);
+ virtual void visitRoundF(LRoundF* lir);
+ virtual void visitTruncateDToInt32(LTruncateDToInt32* ins);
+ virtual void visitTruncateFToInt32(LTruncateFToInt32* ins);
+
+ void visitWasmTruncateToInt32(LWasmTruncateToInt32* lir);
+ void visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins);
+ void visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins);
+
+ // Out of line visitors.
+ virtual void visitOutOfLineBailout(OutOfLineBailout* ool) = 0;
+ void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);
+ void visitCopySignD(LCopySignD* ins);
+ void visitCopySignF(LCopySignF* ins);
+
+ protected:
+ virtual ValueOperand ToOutValue(LInstruction* ins) = 0;
+
+ public:
+ CodeGeneratorMIPSShared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
+
+ void visitValue(LValue* value);
+ void visitDouble(LDouble* ins);
+ void visitFloat32(LFloat32* ins);
+
+ void visitGuardShape(LGuardShape* guard);
+ void visitGuardObjectGroup(LGuardObjectGroup* guard);
+ void visitGuardClass(LGuardClass* guard);
+
+ void visitNegI(LNegI* lir);
+ void visitNegD(LNegD* lir);
+ void visitNegF(LNegF* lir);
+ void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins);
+ void visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins);
+ void visitWasmCall(LWasmCall* ins);
+ void visitWasmCallI64(LWasmCallI64* ins);
+ void visitWasmLoad(LWasmLoad* ins);
+ void visitWasmUnalignedLoad(LWasmUnalignedLoad* ins);
+ void visitWasmStore(LWasmStore* ins);
+ void visitWasmUnalignedStore(LWasmUnalignedStore* ins);
+ void visitWasmAddOffset(LWasmAddOffset* ins);
+ void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
+ void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
+ void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins);
+ void visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins);
+ void visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins);
+ void visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins);
+
+ void visitWasmStackArg(LWasmStackArg* ins);
+ void visitWasmStackArgI64(LWasmStackArgI64* ins);
+ void visitWasmSelect(LWasmSelect* ins);
+ void visitWasmReinterpret(LWasmReinterpret* ins);
+
+ void visitMemoryBarrier(LMemoryBarrier* ins);
+ void visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir);
+ void visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir);
+ void visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir);
+ void visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir);
+
+ void generateInvalidateEpilogue();
+
+ // Generating a result.
+ template<typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
+ const T& mem, Register flagTemp, Register outTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+
+ // Generating no result.
+ template<typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
+ const T& mem, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp);
+
+ protected:
+ void visitEffectiveAddress(LEffectiveAddress* ins);
+ void visitUDivOrMod(LUDivOrMod* ins);
+
+ public:
+ // Unimplemented SIMD instructions
+ void visitSimdSplatX4(LSimdSplatX4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimd128Int(LSimd128Int* ins) { MOZ_CRASH("NYI"); }
+ void visitSimd128Float(LSimd128Float* ins) { MOZ_CRASH("NYI"); }
+ void visitSimdReinterpretCast(LSimdReinterpretCast* ins) { MOZ_CRASH("NYI"); }
+ void visitSimdExtractElementI(LSimdExtractElementI* ins) { MOZ_CRASH("NYI"); }
+ void visitSimdExtractElementF(LSimdExtractElementF* ins) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryCompIx4(LSimdBinaryCompIx4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryCompFx4(LSimdBinaryCompFx4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryArithIx4(LSimdBinaryArithIx4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryArithFx4(LSimdBinaryArithFx4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryBitwise(LSimdBinaryBitwise* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdGeneralShuffleI(LSimdGeneralShuffleI* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdGeneralShuffleF(LSimdGeneralShuffleF* lir) { MOZ_CRASH("NYI"); }
+};
+
+// An out-of-line bailout thunk.
+class OutOfLineBailout : public OutOfLineCodeBase<CodeGeneratorMIPSShared>
+{
+ LSnapshot* snapshot_;
+ uint32_t frameSize_;
+
+ public:
+ OutOfLineBailout(LSnapshot* snapshot, uint32_t frameSize)
+ : snapshot_(snapshot),
+ frameSize_(frameSize)
+ { }
+
+ void accept(CodeGeneratorMIPSShared* codegen);
+
+ LSnapshot* snapshot() const {
+ return snapshot_;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_CodeGenerator_mips_shared_h */
diff --git a/js/src/jit/mips-shared/LIR-mips-shared.h b/js/src/jit/mips-shared/LIR-mips-shared.h
new file mode 100644
index 000000000..466965e84
--- /dev/null
+++ b/js/src/jit/mips-shared/LIR-mips-shared.h
@@ -0,0 +1,408 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_LIR_mips_shared_h
+#define jit_mips_shared_LIR_mips_shared_h
+
+namespace js {
+namespace jit {
+
+// Convert a 32-bit unsigned integer to a double.
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmUint32ToDouble)
+
+ LWasmUint32ToDouble(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmUint32ToFloat32)
+
+ LWasmUint32ToFloat32(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+
+class LDivI : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(DivI);
+
+ LDivI(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ MDiv* mir() const {
+ return mir_->toDiv();
+ }
+};
+
+class LDivPowTwoI : public LInstructionHelper<1, 1, 1>
+{
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(DivPowTwoI)
+
+ LDivPowTwoI(const LAllocation& lhs, int32_t shift, const LDefinition& temp)
+ : shift_(shift)
+ {
+ setOperand(0, lhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* numerator() {
+ return getOperand(0);
+ }
+
+ int32_t shift() {
+ return shift_;
+ }
+
+ MDiv* mir() const {
+ return mir_->toDiv();
+ }
+};
+
+class LModI : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(ModI);
+
+ LModI(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& callTemp)
+ {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, callTemp);
+ }
+
+ const LDefinition* callTemp() {
+ return getTemp(0);
+ }
+
+ MMod* mir() const {
+ return mir_->toMod();
+ }
+};
+
+class LModPowTwoI : public LInstructionHelper<1, 1, 0>
+{
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModPowTwoI);
+ int32_t shift()
+ {
+ return shift_;
+ }
+
+ LModPowTwoI(const LAllocation& lhs, int32_t shift)
+ : shift_(shift)
+ {
+ setOperand(0, lhs);
+ }
+
+ MMod* mir() const {
+ return mir_->toMod();
+ }
+};
+
+class LModMaskI : public LInstructionHelper<1, 1, 2>
+{
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModMaskI);
+
+ LModMaskI(const LAllocation& lhs, const LDefinition& temp0, const LDefinition& temp1,
+ int32_t shift)
+ : shift_(shift)
+ {
+ setOperand(0, lhs);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ int32_t shift() const {
+ return shift_;
+ }
+
+ MMod* mir() const {
+ return mir_->toMod();
+ }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitch : public LInstructionHelper<0, 1, 2>
+{
+ public:
+ LIR_HEADER(TableSwitch);
+
+ LTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ const LDefinition& jumpTablePointer, MTableSwitch* ins) {
+ setOperand(0, in);
+ setTemp(0, inputCopy);
+ setTemp(1, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const {
+ return mir_->toTableSwitch();
+ }
+
+ const LAllocation* index() {
+ return getOperand(0);
+ }
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+ // This is added to share the same CodeGenerator prefixes.
+ const LDefinition* tempPointer() {
+ return getTemp(1);
+ }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 3>
+{
+ public:
+ LIR_HEADER(TableSwitchV);
+
+ LTableSwitchV(const LBoxAllocation& input, const LDefinition& inputCopy,
+ const LDefinition& floatCopy, const LDefinition& jumpTablePointer,
+ MTableSwitch* ins)
+ {
+ setBoxOperand(InputValue, input);
+ setTemp(0, inputCopy);
+ setTemp(1, floatCopy);
+ setTemp(2, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const {
+ return mir_->toTableSwitch();
+ }
+
+ static const size_t InputValue = 0;
+
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+ const LDefinition* tempFloat() {
+ return getTemp(1);
+ }
+ const LDefinition* tempPointer() {
+ return getTemp(2);
+ }
+};
+
+class LGuardShape : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(GuardShape);
+
+ LGuardShape(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+ const MGuardShape* mir() const {
+ return mir_->toGuardShape();
+ }
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+};
+
+class LGuardObjectGroup : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(GuardObjectGroup);
+
+ LGuardObjectGroup(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+ const MGuardObjectGroup* mir() const {
+ return mir_->toGuardObjectGroup();
+ }
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+};
+
+class LMulI : public LBinaryMath<0>
+{
+ public:
+ LIR_HEADER(MulI);
+
+ MMul* mir() {
+ return mir_->toMul();
+ }
+};
+
+class LUDivOrMod : public LBinaryMath<0>
+{
+ public:
+ LIR_HEADER(UDivOrMod);
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeDivideByZero();
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+
+ bool trapOnError() const {
+ if (mir_->isMod())
+ return mir_->toMod()->trapOnError();
+ return mir_->toDiv()->trapOnError();
+ }
+
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+class LInt64ToFloatingPoint : public LInstructionHelper<1, INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(Int64ToFloatingPoint);
+
+ explicit LInt64ToFloatingPoint(const LInt64Allocation& in) {
+ setInt64Operand(0, in);
+ }
+
+ MInt64ToFloatingPoint* mir() const {
+ return mir_->toInt64ToFloatingPoint();
+ }
+};
+
+namespace details {
+
+// Base class for the int64 and non-int64 variants.
+template<size_t NumDefs>
+class LWasmUnalignedLoadBase : public details::LWasmLoadBase<NumDefs, 2>
+{
+ public:
+ typedef LWasmLoadBase<NumDefs, 2> Base;
+
+ explicit LWasmUnalignedLoadBase(const LAllocation& ptr, const LDefinition& valueHelper)
+ : Base(ptr)
+ {
+ Base::setTemp(0, LDefinition::BogusTemp());
+ Base::setTemp(1, valueHelper);
+ }
+ const LAllocation* ptr() {
+ return Base::getOperand(0);
+ }
+ const LDefinition* ptrCopy() {
+ return Base::getTemp(0);
+ }
+};
+
+} // namespace details
+
+class LWasmUnalignedLoad : public details::LWasmUnalignedLoadBase<1>
+{
+ public:
+ explicit LWasmUnalignedLoad(const LAllocation& ptr, const LDefinition& valueHelper)
+ : LWasmUnalignedLoadBase(ptr, valueHelper)
+ {}
+ LIR_HEADER(WasmUnalignedLoad);
+};
+
+class LWasmUnalignedLoadI64 : public details::LWasmUnalignedLoadBase<INT64_PIECES>
+{
+ public:
+ explicit LWasmUnalignedLoadI64(const LAllocation& ptr, const LDefinition& valueHelper)
+ : LWasmUnalignedLoadBase(ptr, valueHelper)
+ {}
+ LIR_HEADER(WasmUnalignedLoadI64);
+};
+
+namespace details {
+
+// Base class for the int64 and non-int64 variants.
+template<size_t NumOps>
+class LWasmUnalignedStoreBase : public LInstructionHelper<0, NumOps, 2>
+{
+ public:
+ typedef LInstructionHelper<0, NumOps, 2> Base;
+
+ static const size_t PtrIndex = 0;
+ static const size_t ValueIndex = 1;
+
+ LWasmUnalignedStoreBase(const LAllocation& ptr, const LDefinition& valueHelper)
+ {
+ Base::setOperand(0, ptr);
+ Base::setTemp(0, LDefinition::BogusTemp());
+ Base::setTemp(1, valueHelper);
+ }
+ MWasmStore* mir() const {
+ return Base::mir_->toWasmStore();
+ }
+ const LAllocation* ptr() {
+ return Base::getOperand(PtrIndex);
+ }
+ const LDefinition* ptrCopy() {
+ return Base::getTemp(0);
+ }
+};
+
+} // namespace details
+
+class LWasmUnalignedStore : public details::LWasmUnalignedStoreBase<2>
+{
+ public:
+ LIR_HEADER(WasmUnalignedStore);
+ LWasmUnalignedStore(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& valueHelper)
+ : LWasmUnalignedStoreBase(ptr, valueHelper)
+ {
+ setOperand(1, value);
+ }
+ const LAllocation* value() {
+ return Base::getOperand(ValueIndex);
+ }
+};
+
+class LWasmUnalignedStoreI64 : public details::LWasmUnalignedStoreBase<1 + INT64_PIECES>
+{
+ public:
+ LIR_HEADER(WasmUnalignedStoreI64);
+ LWasmUnalignedStoreI64(const LAllocation& ptr, const LInt64Allocation& value,
+ const LDefinition& valueHelper)
+ : LWasmUnalignedStoreBase(ptr, valueHelper)
+ {
+ setInt64Operand(1, value);
+ }
+ const LInt64Allocation value() {
+ return getInt64Operand(ValueIndex);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_LIR_mips_shared_h */
diff --git a/js/src/jit/mips-shared/Lowering-mips-shared.cpp b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
new file mode 100644
index 000000000..f328d16f7
--- /dev/null
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
@@ -0,0 +1,753 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/Lowering-mips-shared.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/MIR.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::FloorLog2;
+
+LAllocation
+LIRGeneratorMIPSShared::useByteOpRegister(MDefinition* mir)
+{
+ return useRegister(mir);
+}
+
+LAllocation
+LIRGeneratorMIPSShared::useByteOpRegisterAtStart(MDefinition* mir)
+{
+ return useRegisterAtStart(mir);
+}
+
+LAllocation
+LIRGeneratorMIPSShared::useByteOpRegisterOrNonDoubleConstant(MDefinition* mir)
+{
+ return useRegisterOrNonDoubleConstant(mir);
+}
+
+LDefinition
+LIRGeneratorMIPSShared::tempByteOpRegister()
+{
+ return temp();
+}
+
+// x = !y
+void
+LIRGeneratorMIPSShared::lowerForALU(LInstructionHelper<1, 1, 0>* ins,
+ MDefinition* mir, MDefinition* input)
+{
+ ins->setOperand(0, useRegister(input));
+ define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+// z = x+y
+void
+LIRGeneratorMIPSShared::lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+void
+LIRGeneratorMIPSShared::lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES,
+ lhs != rhs ? useInt64OrConstant(rhs) : useInt64OrConstantAtStart(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void
+LIRGeneratorMIPSShared::lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ bool needsTemp = false;
+
+#ifdef JS_CODEGEN_MIPS32
+ needsTemp = true;
+ if (rhs->isConstant()) {
+ int64_t constant = rhs->toConstant()->toInt64();
+ int32_t shift = mozilla::FloorLog2(constant);
+ // See special cases in CodeGeneratorMIPSShared::visitMulI64
+ if (constant >= -1 && constant <= 2)
+ needsTemp = false;
+ if (int64_t(1) << shift == constant)
+ needsTemp = false;
+ }
+#endif
+
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES,
+ lhs != rhs ? useInt64OrConstant(rhs) : useInt64OrConstantAtStart(rhs));
+ if (needsTemp)
+ ins->setTemp(0, temp());
+
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+template<size_t Temps>
+void
+LIRGeneratorMIPSShared::lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+#if defined(JS_NUNBOX32)
+ if (mir->isRotate())
+ ins->setTemp(0, temp());
+#endif
+
+ static_assert(LShiftI64::Rhs == INT64_PIECES, "Assume Rhs is located at INT64_PIECES.");
+ static_assert(LRotateI64::Count == INT64_PIECES, "Assume Count is located at INT64_PIECES.");
+
+ ins->setOperand(INT64_PIECES, useRegisterOrConstant(rhs));
+
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+template void LIRGeneratorMIPSShared::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorMIPSShared::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 1>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+void
+LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* input)
+{
+ ins->setOperand(0, useRegister(input));
+ define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template<size_t Temps>
+void
+LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegister(rhs));
+ define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template void LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 2, 1>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+void
+LIRGeneratorMIPSShared::lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+ MDefinition* lhs, MDefinition* rhs)
+{
+ baab->setOperand(0, useRegisterAtStart(lhs));
+ baab->setOperand(1, useRegisterOrConstantAtStart(rhs));
+ add(baab, mir);
+}
+
+void
+LIRGeneratorMIPSShared::lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ define(ins, mir);
+}
+
+void
+LIRGeneratorMIPSShared::lowerDivI(MDiv* div)
+{
+ if (div->isUnsigned()) {
+ lowerUDiv(div);
+ return;
+ }
+
+ // Division instructions are slow. Division by constant denominators can be
+ // rewritten to use other instructions.
+ if (div->rhs()->isConstant()) {
+ int32_t rhs = div->rhs()->toConstant()->toInt32();
+ // Check for division by a positive power of two, which is an easy and
+ // important case to optimize. Note that other optimizations are also
+ // possible; division by negative powers of two can be optimized in a
+ // similar manner as positive powers of two, and division by other
+ // constants can be optimized by a reciprocal multiplication technique.
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LDivPowTwoI* lir = new(alloc()) LDivPowTwoI(useRegister(div->lhs()), shift, temp());
+ if (div->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ define(lir, div);
+ return;
+ }
+ }
+
+ LDivI* lir = new(alloc()) LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ if (div->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ define(lir, div);
+}
+
+void
+LIRGeneratorMIPSShared::lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs)
+{
+ LMulI* lir = new(alloc()) LMulI;
+ if (mul->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+
+ lowerForALU(lir, mul, lhs, rhs);
+}
+
+void
+LIRGeneratorMIPSShared::lowerModI(MMod* mod)
+{
+ if (mod->isUnsigned()) {
+ lowerUMod(mod);
+ return;
+ }
+
+ if (mod->rhs()->isConstant()) {
+ int32_t rhs = mod->rhs()->toConstant()->toInt32();
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LModPowTwoI* lir = new(alloc()) LModPowTwoI(useRegister(mod->lhs()), shift);
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ define(lir, mod);
+ return;
+ } else if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) {
+ LModMaskI* lir = new(alloc()) LModMaskI(useRegister(mod->lhs()),
+ temp(LDefinition::GENERAL),
+ temp(LDefinition::GENERAL),
+ shift + 1);
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ define(lir, mod);
+ return;
+ }
+ }
+ LModI* lir = new(alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()),
+ temp(LDefinition::GENERAL));
+
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ define(lir, mod);
+}
+
+void
+LIRGeneratorMIPSShared::visitPowHalf(MPowHalf* ins)
+{
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Double);
+ LPowHalfD* lir = new(alloc()) LPowHalfD(useRegisterAtStart(input));
+ defineReuseInput(lir, ins, 0);
+}
+
+LTableSwitch*
+LIRGeneratorMIPSShared::newLTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ MTableSwitch* tableswitch)
+{
+ return new(alloc()) LTableSwitch(in, inputCopy, temp(), tableswitch);
+}
+
+LTableSwitchV*
+LIRGeneratorMIPSShared::newLTableSwitchV(MTableSwitch* tableswitch)
+{
+ return new(alloc()) LTableSwitchV(useBox(tableswitch->getOperand(0)),
+ temp(), tempDouble(), temp(), tableswitch);
+}
+
+void
+LIRGeneratorMIPSShared::visitGuardShape(MGuardShape* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ LDefinition tempObj = temp(LDefinition::OBJECT);
+ LGuardShape* guard = new(alloc()) LGuardShape(useRegister(ins->object()), tempObj);
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, ins->object());
+}
+
+void
+LIRGeneratorMIPSShared::visitGuardObjectGroup(MGuardObjectGroup* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ LDefinition tempObj = temp(LDefinition::OBJECT);
+ LGuardObjectGroup* guard = new(alloc()) LGuardObjectGroup(useRegister(ins->object()), tempObj);
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, ins->object());
+}
+
+void
+LIRGeneratorMIPSShared::lowerUrshD(MUrsh* mir)
+{
+ MDefinition* lhs = mir->lhs();
+ MDefinition* rhs = mir->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+
+ LUrshD* lir = new(alloc()) LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
+ define(lir, mir);
+}
+
+void
+LIRGeneratorMIPSShared::visitAsmJSNeg(MAsmJSNeg* ins)
+{
+ if (ins->type() == MIRType::Int32) {
+ define(new(alloc()) LNegI(useRegisterAtStart(ins->input())), ins);
+ } else if (ins->type() == MIRType::Float32) {
+ define(new(alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
+ } else {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ define(new(alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
+ }
+}
+
+void
+LIRGeneratorMIPSShared::visitWasmLoad(MWasmLoad* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ LAllocation ptr = useRegisterAtStart(base);
+
+ if (ins->access().isUnaligned()) {
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new(alloc()) LWasmUnalignedLoadI64(ptr, temp());
+ if (ins->access().offset())
+ lir->setTemp(0, tempCopy(base, 0));
+
+ defineInt64(lir, ins);
+ return;
+ }
+
+ auto* lir = new(alloc()) LWasmUnalignedLoad(ptr, temp());
+ if (ins->access().offset())
+ lir->setTemp(0, tempCopy(base, 0));
+
+ define(lir, ins);
+ return;
+ }
+
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new(alloc()) LWasmLoadI64(ptr);
+ if (ins->access().offset())
+ lir->setTemp(0, tempCopy(base, 0));
+
+ defineInt64(lir, ins);
+ return;
+ }
+
+ auto* lir = new(alloc()) LWasmLoad(ptr);
+ if (ins->access().offset())
+ lir->setTemp(0, tempCopy(base, 0));
+
+ define(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitWasmStore(MWasmStore* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* value = ins->value();
+ LAllocation baseAlloc = useRegisterAtStart(base);
+
+ if (ins->access().isUnaligned()) {
+ if (ins->type() == MIRType::Int64) {
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
+ auto* lir = new(alloc()) LWasmUnalignedStoreI64(baseAlloc, valueAlloc, temp());
+ if (ins->access().offset())
+ lir->setTemp(0, tempCopy(base, 0));
+
+ add(lir, ins);
+ return;
+ }
+
+ LAllocation valueAlloc = useRegisterAtStart(value);
+ auto* lir = new(alloc()) LWasmUnalignedStore(baseAlloc, valueAlloc, temp());
+ if (ins->access().offset())
+ lir->setTemp(0, tempCopy(base, 0));
+
+ add(lir, ins);
+ return;
+ }
+
+ if (ins->type() == MIRType::Int64) {
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
+ auto* lir = new(alloc()) LWasmStoreI64(baseAlloc, valueAlloc);
+ if (ins->access().offset())
+ lir->setTemp(0, tempCopy(base, 0));
+
+ add(lir, ins);
+ return;
+ }
+
+ LAllocation valueAlloc = useRegisterAtStart(value);
+ auto* lir = new(alloc()) LWasmStore(baseAlloc, valueAlloc);
+ if (ins->access().offset())
+ lir->setTemp(0, tempCopy(base, 0));
+
+ add(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitWasmSelect(MWasmSelect* ins)
+{
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new(alloc()) LWasmSelectI64(useInt64RegisterAtStart(ins->trueExpr()),
+ useInt64(ins->falseExpr()),
+ useRegister(ins->condExpr()));
+
+ defineInt64ReuseInput(lir, ins, LWasmSelectI64::TrueExprIndex);
+ return;
+ }
+
+ auto* lir = new(alloc()) LWasmSelect(useRegisterAtStart(ins->trueExpr()),
+ use(ins->falseExpr()),
+ useRegister(ins->condExpr()));
+
+ defineReuseInput(lir, ins, LWasmSelect::TrueExprIndex);
+}
+
+void
+LIRGeneratorMIPSShared::lowerUDiv(MDiv* div)
+{
+ MDefinition* lhs = div->getOperand(0);
+ MDefinition* rhs = div->getOperand(1);
+
+ LUDivOrMod* lir = new(alloc()) LUDivOrMod;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (div->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+
+ define(lir, div);
+}
+
+void
+LIRGeneratorMIPSShared::lowerUMod(MMod* mod)
+{
+ MDefinition* lhs = mod->getOperand(0);
+ MDefinition* rhs = mod->getOperand(1);
+
+ LUDivOrMod* lir = new(alloc()) LUDivOrMod;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+
+ define(lir, mod);
+}
+
+void
+LIRGeneratorMIPSShared::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToDouble* lir = new(alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToFloat32* lir = new(alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
+{
+ MOZ_ASSERT(ins->access().offset() == 0);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+ LAllocation baseAlloc;
+
+ // For MIPS it is best to keep the 'base' in a register if a bounds check
+ // is needed.
+ if (base->isConstant() && !ins->needsBoundsCheck()) {
+ // A bounds check is only skipped for a positive index.
+ MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
+ baseAlloc = LAllocation(base->toConstant());
+ } else
+ baseAlloc = useRegisterAtStart(base);
+
+ define(new(alloc()) LAsmJSLoadHeap(baseAlloc), ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
+{
+ MOZ_ASSERT(ins->access().offset() == 0);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+ LAllocation baseAlloc;
+
+ if (base->isConstant() && !ins->needsBoundsCheck()) {
+ MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
+ baseAlloc = LAllocation(base->toConstant());
+ } else
+ baseAlloc = useRegisterAtStart(base);
+
+ add(new(alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value())), ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitSubstr(MSubstr* ins)
+{
+ LSubstr* lir = new (alloc()) LSubstr(useRegister(ins->string()),
+ useRegister(ins->begin()),
+ useRegister(ins->length()),
+ temp(),
+ temp(),
+ tempByteOpRegister());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+LIRGeneratorMIPSShared::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins)
+{
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrConstant(ins->index());
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+
+ const LAllocation newval = useRegister(ins->newval());
+ const LAllocation oldval = useRegister(ins->oldval());
+ LDefinition uint32Temp = LDefinition::BogusTemp();
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
+ uint32Temp = temp();
+
+ LCompareExchangeTypedArrayElement* lir =
+ new(alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval, newval, uint32Temp,
+ /* valueTemp= */ temp(), /* offsetTemp= */ temp(),
+ /* maskTemp= */ temp());
+
+ define(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins)
+{
+ MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrConstant(ins->index());
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+
+ const LAllocation value = useRegister(ins->value());
+ LDefinition uint32Temp = LDefinition::BogusTemp();
+ if (ins->arrayType() == Scalar::Uint32) {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ uint32Temp = temp();
+ }
+
+ LAtomicExchangeTypedArrayElement* lir =
+ new(alloc()) LAtomicExchangeTypedArrayElement(elements, index, value, uint32Temp,
+ /* valueTemp= */ temp(), /* offsetTemp= */ temp(),
+ /* maskTemp= */ temp());
+
+ define(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
+{
+ MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+ MOZ_ASSERT(ins->access().offset() == 0);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ LAsmJSCompareExchangeHeap* lir =
+ new(alloc()) LAsmJSCompareExchangeHeap(useRegister(base),
+ useRegister(ins->oldValue()),
+ useRegister(ins->newValue()),
+ /* valueTemp= */ temp(),
+ /* offsetTemp= */ temp(),
+ /* maskTemp= */ temp());
+
+ define(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
+{
+ MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->access().offset() == 0);
+
+ const LAllocation base = useRegister(ins->base());
+ const LAllocation value = useRegister(ins->value());
+
+ // The output may not be used but will be clobbered regardless,
+ // so ignore the case where we're not using the value and just
+ // use the output register as a temp.
+
+ LAsmJSAtomicExchangeHeap* lir =
+ new(alloc()) LAsmJSAtomicExchangeHeap(base, value,
+ /* valueTemp= */ temp(),
+ /* offsetTemp= */ temp(),
+ /* maskTemp= */ temp());
+ define(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
+{
+ MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+ MOZ_ASSERT(ins->access().offset() == 0);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ if (!ins->hasUses()) {
+ LAsmJSAtomicBinopHeapForEffect* lir =
+ new(alloc()) LAsmJSAtomicBinopHeapForEffect(useRegister(base),
+ useRegister(ins->value()),
+ /* flagTemp= */ temp(),
+ /* valueTemp= */ temp(),
+ /* offsetTemp= */ temp(),
+ /* maskTemp= */ temp());
+ add(lir, ins);
+ return;
+ }
+
+ LAsmJSAtomicBinopHeap* lir =
+ new(alloc()) LAsmJSAtomicBinopHeap(useRegister(base),
+ useRegister(ins->value()),
+ /* temp= */ LDefinition::BogusTemp(),
+ /* flagTemp= */ temp(),
+ /* valueTemp= */ temp(),
+ /* offsetTemp= */ temp(),
+ /* maskTemp= */ temp());
+
+ define(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins)
+{
+ MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrConstant(ins->index());
+ const LAllocation value = useRegister(ins->value());
+
+ if (!ins->hasUses()) {
+ LAtomicTypedArrayElementBinopForEffect* lir =
+ new(alloc()) LAtomicTypedArrayElementBinopForEffect(elements, index, value,
+ /* flagTemp= */ temp(),
+ /* valueTemp= */ temp(),
+ /* offsetTemp= */ temp(),
+ /* maskTemp= */ temp());
+ add(lir, ins);
+ return;
+ }
+
+ // For a Uint32Array with a known double result we need a temp for
+ // the intermediate output.
+
+ LDefinition flagTemp = temp();
+ LDefinition outTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
+ outTemp = temp();
+
+ // On mips, map flagTemp to temp1 and outTemp to temp2, at least for now.
+
+ LAtomicTypedArrayElementBinop* lir =
+ new(alloc()) LAtomicTypedArrayElementBinop(elements, index, value, flagTemp, outTemp,
+ /* valueTemp= */ temp(), /* offsetTemp= */ temp(),
+ /* maskTemp= */ temp());
+ define(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ defineInt64(new(alloc()) LWasmTruncateToInt64(useRegister(opd)), ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Int64);
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+
+ define(new(alloc()) LInt64ToFloatingPoint(useInt64Register(opd)), ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitCopySign(MCopySign* ins)
+{
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(IsFloatingPointType(lhs->type()));
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(lhs->type() == ins->type());
+
+ LInstructionHelper<1, 2, 2>* lir;
+ if (lhs->type() == MIRType::Double)
+ lir = new(alloc()) LCopySignD();
+ else
+ lir = new(alloc()) LCopySignF();
+
+ lir->setTemp(0, temp());
+ lir->setTemp(1, temp());
+
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ defineReuseInput(lir, ins, 0);
+}
+
+void
+LIRGeneratorMIPSShared::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins)
+{
+ defineInt64(new(alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input())), ins);
+}
diff --git a/js/src/jit/mips-shared/Lowering-mips-shared.h b/js/src/jit/mips-shared/Lowering-mips-shared.h
new file mode 100644
index 000000000..a92addfe3
--- /dev/null
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.h
@@ -0,0 +1,108 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_Lowering_mips_shared_h
+#define jit_mips_shared_Lowering_mips_shared_h
+
+#include "jit/shared/Lowering-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorMIPSShared : public LIRGeneratorShared
+{
+ protected:
+ LIRGeneratorMIPSShared(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorShared(gen, graph, lirGraph)
+ { }
+
+ protected:
+ // x86 has constraints on what registers can be formatted for 1-byte
+ // stores and loads; on MIPS all registers are okay.
+ LAllocation useByteOpRegister(MDefinition* mir);
+ LAllocation useByteOpRegisterAtStart(MDefinition* mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
+ LDefinition tempByteOpRegister();
+
+ bool needTempForPostBarrier() { return false; }
+
+ void lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs);
+ void lowerUrshD(MUrsh* mir);
+
+ void lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* input);
+ void lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs);
+ template<size_t Temps>
+ void lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* src);
+ template<size_t Temps>
+ void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir,
+ MDefinition* lhs, MDefinition* rhs)
+ {
+ return lowerForFPU(ins, mir, lhs, rhs);
+ }
+ void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir,
+ MDefinition* lhs, MDefinition* rhs)
+ {
+ return lowerForFPU(ins, mir, lhs, rhs);
+ }
+
+ void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerDivI(MDiv* div);
+ void lowerModI(MMod* mod);
+ void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
+ void lowerUDiv(MDiv* div);
+ void lowerUMod(MMod* mod);
+ void visitPowHalf(MPowHalf* ins);
+ void visitAsmJSNeg(MAsmJSNeg* ins);
+ void visitWasmLoad(MWasmLoad* ins);
+ void visitWasmStore(MWasmStore* ins);
+ void visitWasmSelect(MWasmSelect* ins);
+
+ LTableSwitch* newLTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ MTableSwitch* ins);
+ LTableSwitchV* newLTableSwitchV(MTableSwitch* ins);
+
+ public:
+ void lowerPhi(MPhi* phi);
+ void visitGuardShape(MGuardShape* ins);
+ void visitGuardObjectGroup(MGuardObjectGroup* ins);
+ void visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins);
+ void visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins);
+ void visitAsmJSLoadHeap(MAsmJSLoadHeap* ins);
+ void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins);
+ void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins);
+ void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins);
+ void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins);
+ void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins);
+ void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins);
+ void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins);
+ void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins);
+ void visitSubstr(MSubstr* ins);
+ void visitWasmTruncateToInt64(MWasmTruncateToInt64* ins);
+ void visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins);
+ void visitCopySign(MCopySign* ins);
+ void visitExtendInt32ToInt64(MExtendInt32ToInt64* ins);
+
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_Lowering_mips_shared_h */
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h b/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
new file mode 100644
index 000000000..f2eb0c9b2
--- /dev/null
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
@@ -0,0 +1,1030 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_MacroAssembler_mips_shared_inl_h
+#define jit_mips_shared_MacroAssembler_mips_shared_inl_h
+
+#include "jit/mips-shared/MacroAssembler-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void
+MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest)
+{
+ moveFromFloat32(src, dest);
+}
+
+void
+MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest)
+{
+ moveToFloat32(src, dest);
+}
+
+void
+MacroAssembler::move8SignExtend(Register src, Register dest)
+{
+ as_seb(dest, src);
+}
+
+void
+MacroAssembler::move16SignExtend(Register src, Register dest)
+{
+ as_seh(dest, src);
+}
+
+// ===============================================================
+// Logical instructions
+
+void
+MacroAssembler::not32(Register reg)
+{
+ ma_not(reg, reg);
+}
+
+void
+MacroAssembler::and32(Register src, Register dest)
+{
+ as_and(dest, dest, src);
+}
+
+void
+MacroAssembler::and32(Imm32 imm, Register dest)
+{
+ ma_and(dest, imm);
+}
+
+void
+MacroAssembler::and32(Imm32 imm, const Address& dest)
+{
+ load32(dest, SecondScratchReg);
+ ma_and(SecondScratchReg, imm);
+ store32(SecondScratchReg, dest);
+}
+
+void
+MacroAssembler::and32(const Address& src, Register dest)
+{
+ load32(src, SecondScratchReg);
+ ma_and(dest, SecondScratchReg);
+}
+
+void
+MacroAssembler::or32(Register src, Register dest)
+{
+ ma_or(dest, src);
+}
+
+void
+MacroAssembler::or32(Imm32 imm, Register dest)
+{
+ ma_or(dest, imm);
+}
+
+void
+MacroAssembler::or32(Imm32 imm, const Address& dest)
+{
+ load32(dest, SecondScratchReg);
+ ma_or(SecondScratchReg, imm);
+ store32(SecondScratchReg, dest);
+}
+
+void
+MacroAssembler::xor32(Register src, Register dest)
+{
+ ma_xor(dest, src);
+}
+
+void
+MacroAssembler::xor32(Imm32 imm, Register dest)
+{
+ ma_xor(dest, imm);
+}
+
+// ===============================================================
+// Arithmetic instructions
+
+void
+MacroAssembler::add32(Register src, Register dest)
+{
+ as_addu(dest, dest, src);
+}
+
+void
+MacroAssembler::add32(Imm32 imm, Register dest)
+{
+ ma_addu(dest, dest, imm);
+}
+
+void
+MacroAssembler::add32(Imm32 imm, const Address& dest)
+{
+ load32(dest, SecondScratchReg);
+ ma_addu(SecondScratchReg, imm);
+ store32(SecondScratchReg, dest);
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, const Address& dest)
+{
+ loadPtr(dest, ScratchRegister);
+ addPtr(imm, ScratchRegister);
+ storePtr(ScratchRegister, dest);
+}
+
+void
+MacroAssembler::addPtr(const Address& src, Register dest)
+{
+ loadPtr(src, ScratchRegister);
+ addPtr(ScratchRegister, dest);
+}
+
+void
+MacroAssembler::addDouble(FloatRegister src, FloatRegister dest)
+{
+ as_addd(dest, dest, src);
+}
+
+void
+MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest)
+{
+ as_adds(dest, dest, src);
+}
+
+void
+MacroAssembler::sub32(Register src, Register dest)
+{
+ as_subu(dest, dest, src);
+}
+
+void
+MacroAssembler::sub32(Imm32 imm, Register dest)
+{
+ ma_subu(dest, dest, imm);
+}
+
+void
+MacroAssembler::sub32(const Address& src, Register dest)
+{
+ load32(src, SecondScratchReg);
+ as_subu(dest, dest, SecondScratchReg);
+}
+
+void
+MacroAssembler::subPtr(Register src, const Address& dest)
+{
+ loadPtr(dest, SecondScratchReg);
+ subPtr(src, SecondScratchReg);
+ storePtr(SecondScratchReg, dest);
+}
+
+void
+MacroAssembler::subPtr(const Address& addr, Register dest)
+{
+ loadPtr(addr, SecondScratchReg);
+ subPtr(SecondScratchReg, dest);
+}
+
+void
+MacroAssembler::subDouble(FloatRegister src, FloatRegister dest)
+{
+ as_subd(dest, dest, src);
+}
+
+void
+MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest)
+{
+ as_subs(dest, dest, src);
+}
+
+void
+MacroAssembler::mul32(Register rhs, Register srcDest)
+{
+ as_mul(srcDest, srcDest, rhs);
+}
+
+void
+MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest)
+{
+ as_muls(dest, dest, src);
+}
+
+void
+MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest)
+{
+ as_muld(dest, dest, src);
+}
+
+void
+MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest)
+{
+ movePtr(imm, ScratchRegister);
+ loadDouble(Address(ScratchRegister, 0), ScratchDoubleReg);
+ mulDouble(ScratchDoubleReg, dest);
+}
+
+void
+MacroAssembler::quotient32(Register rhs, Register srcDest, bool isUnsigned)
+{
+ if (isUnsigned)
+ as_divu(srcDest, rhs);
+ else
+ as_div(srcDest, rhs);
+ as_mflo(srcDest);
+}
+
+void
+MacroAssembler::remainder32(Register rhs, Register srcDest, bool isUnsigned)
+{
+ if (isUnsigned)
+ as_divu(srcDest, rhs);
+ else
+ as_div(srcDest, rhs);
+ as_mfhi(srcDest);
+}
+
+void
+MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest)
+{
+ as_divs(dest, dest, src);
+}
+
+void
+MacroAssembler::divDouble(FloatRegister src, FloatRegister dest)
+{
+ as_divd(dest, dest, src);
+}
+
+void
+MacroAssembler::neg32(Register reg)
+{
+ ma_negu(reg, reg);
+}
+
+void
+MacroAssembler::negateDouble(FloatRegister reg)
+{
+ as_negd(reg, reg);
+}
+
+void
+MacroAssembler::negateFloat(FloatRegister reg)
+{
+ as_negs(reg, reg);
+}
+
+void
+MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest)
+{
+ as_abss(dest, src);
+}
+
+void
+MacroAssembler::absDouble(FloatRegister src, FloatRegister dest)
+{
+ as_absd(dest, src);
+}
+
+void
+MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest)
+{
+ as_sqrts(dest, src);
+}
+
+void
+MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest)
+{
+ as_sqrtd(dest, src);
+}
+
+void
+MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ minMaxFloat32(srcDest, other, handleNaN, false);
+}
+
+void
+MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ minMaxDouble(srcDest, other, handleNaN, false);
+}
+
+void
+MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ minMaxFloat32(srcDest, other, handleNaN, true);
+}
+
+void
+MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ minMaxDouble(srcDest, other, handleNaN, true);
+}
+
+// ===============================================================
+// Shift functions
+
+void
+MacroAssembler::lshift32(Register src, Register dest)
+{
+ ma_sll(dest, dest, src);
+}
+
+void
+MacroAssembler::lshift32(Imm32 imm, Register dest)
+{
+ ma_sll(dest, dest, imm);
+}
+
+void
+MacroAssembler::rshift32(Register src, Register dest)
+{
+ ma_srl(dest, dest, src);
+}
+
+void
+MacroAssembler::rshift32(Imm32 imm, Register dest)
+{
+ ma_srl(dest, dest, imm);
+}
+
+void
+MacroAssembler::rshift32Arithmetic(Register src, Register dest)
+{
+ ma_sra(dest, dest, src);
+}
+
+void
+MacroAssembler::rshift32Arithmetic(Imm32 imm, Register dest)
+{
+ ma_sra(dest, dest, imm);
+}
+
+// ===============================================================
+// Rotation functions
+void
+MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest)
+{
+ if (count.value)
+ ma_rol(dest, input, count);
+ else
+ ma_move(dest, input);
+}
+void
+MacroAssembler::rotateLeft(Register count, Register input, Register dest)
+{
+ ma_rol(dest, input, count);
+}
+void
+MacroAssembler::rotateRight(Imm32 count, Register input, Register dest)
+{
+ if (count.value)
+ ma_ror(dest, input, count);
+ else
+ ma_move(dest, input);
+}
+void
+MacroAssembler::rotateRight(Register count, Register input, Register dest)
+{
+ ma_ror(dest, input, count);
+}
+
+// ===============================================================
+// Bit counting functions
+
+void
+MacroAssembler::clz32(Register src, Register dest, bool knownNotZero)
+{
+ as_clz(dest, src);
+}
+
+void
+MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero)
+{
+ ma_ctz(dest, src);
+}
+
+void
+MacroAssembler::popcnt32(Register input, Register output, Register tmp)
+{
+ // Equivalent to GCC output of mozilla::CountPopulation32()
+ ma_move(output, input);
+ ma_sra(tmp, input, Imm32(1));
+ ma_and(tmp, Imm32(0x55555555));
+ ma_subu(output, tmp);
+ ma_sra(tmp, output, Imm32(2));
+ ma_and(output, Imm32(0x33333333));
+ ma_and(tmp, Imm32(0x33333333));
+ ma_addu(output, tmp);
+ ma_srl(tmp, output, Imm32(4));
+ ma_addu(output, tmp);
+ ma_and(output, Imm32(0xF0F0F0F));
+ ma_sll(tmp, output, Imm32(8));
+ ma_addu(output, tmp);
+ ma_sll(tmp, output, Imm32(16));
+ ma_addu(output, tmp);
+ ma_sra(output, output, Imm32(24));
+}
+
+// ===============================================================
+// Branch functions
+
+template <class L>
+void
+MacroAssembler::branch32(Condition cond, Register lhs, Register rhs, L label)
+{
+ ma_b(lhs, rhs, label, cond);
+}
+
+template <class L>
+void
+MacroAssembler::branch32(Condition cond, Register lhs, Imm32 imm, L label)
+{
+ ma_b(lhs, imm, label, cond);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs, Label* label)
+{
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 rhs, Label* label)
+{
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
+{
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
+{
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs, Label* label)
+{
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void
+MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress addr, Imm32 imm, Label* label)
+{
+ load32(addr, SecondScratchReg);
+ ma_b(SecondScratchReg, imm, label, cond);
+}
+
+template <class L>
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs, L label)
+{
+ ma_b(lhs, rhs, label, cond);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label)
+{
+ ma_b(lhs, rhs, label, cond);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs, Label* label)
+{
+ ma_b(lhs, rhs, label, cond);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs, Label* label)
+{
+ ma_b(lhs, rhs, label, cond);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs, Label* label)
+{
+ ma_b(lhs, rhs, label, cond);
+}
+
+template <class L>
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs, L label)
+{
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs, Label* label)
+{
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs, Label* label)
+{
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs, Label* label)
+{
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
+{
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, ImmWord rhs, Label* label)
+{
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs, Label* label)
+{
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+template <typename T>
+CodeOffsetJump
+MacroAssembler::branchPtrWithPatch(Condition cond, Register lhs, T rhs, RepatchLabel* label)
+{
+ movePtr(rhs, ScratchRegister);
+ Label skipJump;
+ ma_b(lhs, ScratchRegister, &skipJump, InvertCondition(cond), ShortJump);
+ CodeOffsetJump off = jumpWithPatch(label);
+ bind(&skipJump);
+ return off;
+}
+
+template <typename T>
+CodeOffsetJump
+MacroAssembler::branchPtrWithPatch(Condition cond, Address lhs, T rhs, RepatchLabel* label)
+{
+ loadPtr(lhs, SecondScratchReg);
+ movePtr(rhs, ScratchRegister);
+ Label skipJump;
+ ma_b(SecondScratchReg, ScratchRegister, &skipJump, InvertCondition(cond), ShortJump);
+ CodeOffsetJump off = jumpWithPatch(label);
+ bind(&skipJump);
+ return off;
+}
+
+void
+MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
+ Label* label)
+{
+ ma_bc1s(lhs, rhs, label, cond);
+}
+
+void
+MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src, Register dest, Label* fail)
+{
+ Label test, success;
+ as_truncws(ScratchFloat32Reg, src);
+ as_mfc1(dest, ScratchFloat32Reg);
+
+ ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
+}
+
+void
+MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src, Register dest, Label* fail)
+{
+ convertFloat32ToInt32(src, dest, fail);
+}
+
+void
+MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
+ Label* label)
+{
+ ma_bc1d(lhs, rhs, label, cond);
+}
+
+// Convert the floating point value to an integer, if it did not fit, then it
+// was clamped to INT32_MIN/INT32_MAX, and we can test it.
+// NOTE: if the value really was supposed to be INT32_MAX / INT32_MIN then it
+// will be wrong.
+void
+MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src, Register dest, Label* fail)
+{
+ Label test, success;
+ as_truncwd(ScratchDoubleReg, src);
+ as_mfc1(dest, ScratchDoubleReg);
+
+ ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
+ ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+}
+
+void
+MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src, Register dest, Label* fail)
+{
+ convertDoubleToInt32(src, dest, fail);
+}
+
+template <typename T, typename L>
+void
+MacroAssembler::branchAdd32(Condition cond, T src, Register dest, L overflow)
+{
+ switch (cond) {
+ case Overflow:
+ ma_addTestOverflow(dest, dest, src, overflow);
+ break;
+ case CarrySet:
+ ma_addTestCarry(dest, dest, src, overflow);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+template <typename T>
+void
+MacroAssembler::branchSub32(Condition cond, T src, Register dest, Label* overflow)
+{
+ switch (cond) {
+ case Overflow:
+ ma_subTestOverflow(dest, dest, src, overflow);
+ break;
+ case NonZero:
+ case Zero:
+ ma_subu(dest, src);
+ ma_b(dest, dest, overflow, cond);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+void
+MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label)
+{
+ subPtr(rhs, lhs);
+ branchPtr(cond, lhs, Imm32(0), label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs, L label)
+{
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
+ if (lhs == rhs) {
+ ma_b(lhs, rhs, label, cond);
+ } else {
+ as_and(ScratchRegister, lhs, rhs);
+ ma_b(ScratchRegister, ScratchRegister, label, cond);
+ }
+}
+
+template <class L>
+void
+MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs, L label)
+{
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
+ ma_and(ScratchRegister, lhs, rhs);
+ ma_b(ScratchRegister, ScratchRegister, label, cond);
+}
+
+void
+MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs, Label* label)
+{
+ load32(lhs, SecondScratchReg);
+ branchTest32(cond, SecondScratchReg, rhs, label);
+}
+
+void
+MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
+{
+ load32(lhs, SecondScratchReg);
+ branchTest32(cond, SecondScratchReg, rhs, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs, L label)
+{
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
+ if (lhs == rhs) {
+ ma_b(lhs, rhs, label, cond);
+ } else {
+ as_and(ScratchRegister, lhs, rhs);
+ ma_b(ScratchRegister, ScratchRegister, label, cond);
+ }
+}
+
+void
+MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs, Label* label)
+{
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
+ ma_and(ScratchRegister, lhs, rhs);
+ ma_b(ScratchRegister, ScratchRegister, label, cond);
+}
+
+void
+MacroAssembler::branchTestPtr(Condition cond, const Address& lhs, Imm32 rhs, Label* label)
+{
+ loadPtr(lhs, SecondScratchReg);
+ branchTestPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, const Address& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestUndefined(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, const BaseIndex& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestUndefined(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_INT32), label, cond);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, const Address& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestInt32(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestInt32(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, const Address& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestDouble(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestDouble(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestDoubleTruthy(bool b, FloatRegister value, Label* label)
+{
+ ma_lid(ScratchDoubleReg, 0.0);
+ DoubleCondition cond = b ? DoubleNotEqual : DoubleEqualOrUnordered;
+ ma_bc1d(value, ScratchDoubleReg, label, cond);
+}
+
+void
+MacroAssembler::branchTestNumber(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = cond == Equal ? BelowOrEqual : Above;
+ ma_b(tag, ImmTag(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET), label, actual);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_BOOLEAN), label, cond);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, const Address& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestBoolean(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestBoolean(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestString(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_STRING), label, cond);
+}
+
+void
+MacroAssembler::branchTestString(Condition cond, const BaseIndex& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestString(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestSymbol(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_SYMBOL), label, cond);
+}
+
+void
+MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestSymbol(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_NULL), label, cond);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, const Address& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestNull(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestNull(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_OBJECT), label, cond);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, const Address& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestObject(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestObject(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestGCThing(Condition cond, const Address& address, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ ma_b(scratch2, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
+ (cond == Equal) ? AboveOrEqual : Below);
+}
+void
+MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ ma_b(scratch2, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
+ (cond == Equal) ? AboveOrEqual : Below);
+}
+
+void
+MacroAssembler::branchTestPrimitive(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET), label,
+ (cond == Equal) ? Below : AboveOrEqual);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, const Address& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestMagic(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestMagic(cond, scratch2, label);
+}
+
+// ========================================================================
+// Memory access primitives.
+void
+MacroAssembler::storeFloat32x3(FloatRegister src, const Address& dest)
+{
+ MOZ_CRASH("NYI");
+}
+void
+MacroAssembler::storeFloat32x3(FloatRegister src, const BaseIndex& dest)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+MacroAssembler::memoryBarrier(MemoryBarrierBits barrier)
+{
+ if (barrier == MembarLoadLoad)
+ as_sync(19);
+ else if (barrier == MembarStoreStore)
+ as_sync(4);
+ else if (barrier & MembarSynchronizing)
+ as_sync();
+ else if (barrier)
+ as_sync(16);
+}
+
+// ===============================================================
+// Clamping functions.
+
+void
+MacroAssembler::clampIntToUint8(Register reg)
+{
+ // If reg is < 0, then we want to clamp to 0.
+ as_slti(ScratchRegister, reg, 0);
+ as_movn(reg, zero, ScratchRegister);
+
+ // If reg is >= 255, then we want to clamp to 255.
+ ma_li(SecondScratchReg, Imm32(255));
+ as_slti(ScratchRegister, reg, 255);
+ as_movz(reg, SecondScratchReg, ScratchRegister);
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_MacroAssembler_mips_shared_inl_h */
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
new file mode 100644
index 000000000..18997e542
--- /dev/null
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
@@ -0,0 +1,1728 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/MacroAssembler-mips-shared.h"
+
+#include "jit/MacroAssembler.h"
+
+using namespace js;
+using namespace jit;
+
+void
+MacroAssemblerMIPSShared::ma_move(Register rd, Register rs)
+{
+ as_or(rd, rs, zero);
+}
+
+void
+MacroAssemblerMIPSShared::ma_li(Register dest, ImmGCPtr ptr)
+{
+ writeDataRelocation(ptr);
+ asMasm().ma_liPatchable(dest, ImmPtr(ptr.value));
+}
+
+void
+MacroAssemblerMIPSShared::ma_li(Register dest, Imm32 imm)
+{
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_addiu(dest, zero, imm.value);
+ } else if (Imm16::IsInUnsignedRange(imm.value)) {
+ as_ori(dest, zero, Imm16::Lower(imm).encode());
+ } else if (Imm16::Lower(imm).encode() == 0) {
+ as_lui(dest, Imm16::Upper(imm).encode());
+ } else {
+ as_lui(dest, Imm16::Upper(imm).encode());
+ as_ori(dest, dest, Imm16::Lower(imm).encode());
+ }
+}
+
+// Shifts
+void
+MacroAssemblerMIPSShared::ma_sll(Register rd, Register rt, Imm32 shift)
+{
+ as_sll(rd, rt, shift.value % 32);
+}
+void
+MacroAssemblerMIPSShared::ma_srl(Register rd, Register rt, Imm32 shift)
+{
+ as_srl(rd, rt, shift.value % 32);
+}
+
+void
+MacroAssemblerMIPSShared::ma_sra(Register rd, Register rt, Imm32 shift)
+{
+ as_sra(rd, rt, shift.value % 32);
+}
+
+void
+MacroAssemblerMIPSShared::ma_ror(Register rd, Register rt, Imm32 shift)
+{
+ as_rotr(rd, rt, shift.value % 32);
+}
+
+void
+MacroAssemblerMIPSShared::ma_rol(Register rd, Register rt, Imm32 shift)
+{
+ as_rotr(rd, rt, 32 - (shift.value % 32));
+}
+
+void
+MacroAssemblerMIPSShared::ma_sll(Register rd, Register rt, Register shift)
+{
+ as_sllv(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPSShared::ma_srl(Register rd, Register rt, Register shift)
+{
+ as_srlv(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPSShared::ma_sra(Register rd, Register rt, Register shift)
+{
+ as_srav(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPSShared::ma_ror(Register rd, Register rt, Register shift)
+{
+ as_rotrv(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPSShared::ma_rol(Register rd, Register rt, Register shift)
+{
+ ma_negu(ScratchRegister, shift);
+ as_rotrv(rd, rt, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPSShared::ma_negu(Register rd, Register rs)
+{
+ as_subu(rd, zero, rs);
+}
+
+void
+MacroAssemblerMIPSShared::ma_not(Register rd, Register rs)
+{
+ as_nor(rd, rs, zero);
+}
+
+// And.
+void
+MacroAssemblerMIPSShared::ma_and(Register rd, Register rs)
+{
+ as_and(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPSShared::ma_and(Register rd, Imm32 imm)
+{
+ ma_and(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPSShared::ma_and(Register rd, Register rs, Imm32 imm)
+{
+ if (Imm16::IsInUnsignedRange(imm.value)) {
+ as_andi(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_and(rd, rs, ScratchRegister);
+ }
+}
+
+// Or.
+void
+MacroAssemblerMIPSShared::ma_or(Register rd, Register rs)
+{
+ as_or(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPSShared::ma_or(Register rd, Imm32 imm)
+{
+ ma_or(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPSShared::ma_or(Register rd, Register rs, Imm32 imm)
+{
+ if (Imm16::IsInUnsignedRange(imm.value)) {
+ as_ori(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_or(rd, rs, ScratchRegister);
+ }
+}
+
+// xor
+void
+MacroAssemblerMIPSShared::ma_xor(Register rd, Register rs)
+{
+ as_xor(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPSShared::ma_xor(Register rd, Imm32 imm)
+{
+ ma_xor(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPSShared::ma_xor(Register rd, Register rs, Imm32 imm)
+{
+ if (Imm16::IsInUnsignedRange(imm.value)) {
+ as_xori(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_xor(rd, rs, ScratchRegister);
+ }
+}
+
+void
+MacroAssemblerMIPSShared::ma_ctz(Register rd, Register rs)
+{
+ ma_negu(ScratchRegister, rs);
+ as_and(rd, ScratchRegister, rs);
+ as_clz(rd, rd);
+ ma_negu(SecondScratchReg, rd);
+ ma_addu(SecondScratchReg, Imm32(0x1f));
+ as_movn(rd, SecondScratchReg, ScratchRegister);
+}
+
+// Arithmetic-based ops.
+
+// Add.
+void
+MacroAssemblerMIPSShared::ma_addu(Register rd, Register rs, Imm32 imm)
+{
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_addiu(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_addu(rd, rs, ScratchRegister);
+ }
+}
+
+void
+MacroAssemblerMIPSShared::ma_addu(Register rd, Register rs)
+{
+ as_addu(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPSShared::ma_addu(Register rd, Imm32 imm)
+{
+ ma_addu(rd, rd, imm);
+}
+
+template <typename L>
+void
+MacroAssemblerMIPSShared::ma_addTestCarry(Register rd, Register rs, Register rt, L overflow)
+{
+ as_addu(rd, rs, rt);
+ as_sltu(SecondScratchReg, rd, rs);
+ ma_b(SecondScratchReg, SecondScratchReg, overflow, Assembler::NonZero);
+}
+
+template void
+MacroAssemblerMIPSShared::ma_addTestCarry<Label*>(Register rd, Register rs,
+ Register rt, Label* overflow);
+template void
+MacroAssemblerMIPSShared::ma_addTestCarry<wasm::TrapDesc>(Register rd, Register rs, Register rt,
+ wasm::TrapDesc overflow);
+
+template <typename L>
+void
+MacroAssemblerMIPSShared::ma_addTestCarry(Register rd, Register rs, Imm32 imm, L overflow)
+{
+ ma_li(ScratchRegister, imm);
+ ma_addTestCarry(rd, rs, ScratchRegister, overflow);
+}
+
+template void
+MacroAssemblerMIPSShared::ma_addTestCarry<Label*>(Register rd, Register rs,
+ Imm32 imm, Label* overflow);
+template void
+MacroAssemblerMIPSShared::ma_addTestCarry<wasm::TrapDesc>(Register rd, Register rs, Imm32 imm,
+ wasm::TrapDesc overflow);
+
+// Subtract.
+void
+MacroAssemblerMIPSShared::ma_subu(Register rd, Register rs, Imm32 imm)
+{
+ if (Imm16::IsInSignedRange(-imm.value)) {
+ as_addiu(rd, rs, -imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_subu(rd, rs, ScratchRegister);
+ }
+}
+
+void
+MacroAssemblerMIPSShared::ma_subu(Register rd, Imm32 imm)
+{
+ ma_subu(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPSShared::ma_subu(Register rd, Register rs)
+{
+ as_subu(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPSShared::ma_subTestOverflow(Register rd, Register rs, Imm32 imm, Label* overflow)
+{
+ if (imm.value != INT32_MIN) {
+ asMasm().ma_addTestOverflow(rd, rs, Imm32(-imm.value), overflow);
+ } else {
+ ma_li(ScratchRegister, Imm32(imm.value));
+ asMasm().ma_subTestOverflow(rd, rs, ScratchRegister, overflow);
+ }
+}
+
+void
+MacroAssemblerMIPSShared::ma_mul(Register rd, Register rs, Imm32 imm)
+{
+ ma_li(ScratchRegister, imm);
+ as_mul(rd, rs, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPSShared::ma_mul_branch_overflow(Register rd, Register rs, Register rt, Label* overflow)
+{
+ as_mult(rs, rt);
+ as_mflo(rd);
+ as_sra(ScratchRegister, rd, 31);
+ as_mfhi(SecondScratchReg);
+ ma_b(ScratchRegister, SecondScratchReg, overflow, Assembler::NotEqual);
+}
+
+void
+MacroAssemblerMIPSShared::ma_mul_branch_overflow(Register rd, Register rs, Imm32 imm, Label* overflow)
+{
+ ma_li(ScratchRegister, imm);
+ ma_mul_branch_overflow(rd, rs, ScratchRegister, overflow);
+}
+
+void
+MacroAssemblerMIPSShared::ma_div_branch_overflow(Register rd, Register rs, Register rt, Label* overflow)
+{
+ as_div(rs, rt);
+ as_mfhi(ScratchRegister);
+ ma_b(ScratchRegister, ScratchRegister, overflow, Assembler::NonZero);
+ as_mflo(rd);
+}
+
+void
+MacroAssemblerMIPSShared::ma_div_branch_overflow(Register rd, Register rs, Imm32 imm, Label* overflow)
+{
+ ma_li(ScratchRegister, imm);
+ ma_div_branch_overflow(rd, rs, ScratchRegister, overflow);
+}
+
+void
+MacroAssemblerMIPSShared::ma_mod_mask(Register src, Register dest, Register hold, Register remain,
+ int32_t shift, Label* negZero)
+{
+ // MATH:
+ // We wish to compute x % (1<<y) - 1 for a known constant, y.
+ // First, let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit
+ // dividend as a number in base b, namely
+ // c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
+ // now, since both addition and multiplication commute with modulus,
+ // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
+ // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
+ // now, since b == C + 1, b % C == 1, and b^n % C == 1
+ // this means that the whole thing simplifies to:
+ // c_0 + c_1 + c_2 ... c_n % C
+ // each c_n can easily be computed by a shift/bitextract, and the modulus
+ // can be maintained by simply subtracting by C whenever the number gets
+ // over C.
+ int32_t mask = (1 << shift) - 1;
+ Label head, negative, sumSigned, done;
+
+ // hold holds -1 if the value was negative, 1 otherwise.
+ // remain holds the remaining bits that have not been processed
+ // SecondScratchReg serves as a temporary location to store extracted bits
+ // into as well as holding the trial subtraction as a temp value dest is
+ // the accumulator (and holds the final result)
+
+ // move the whole value into the remain.
+ ma_move(remain, src);
+ // Zero out the dest.
+ ma_li(dest, Imm32(0));
+ // Set the hold appropriately.
+ ma_b(remain, remain, &negative, Signed, ShortJump);
+ ma_li(hold, Imm32(1));
+ ma_b(&head, ShortJump);
+
+ bind(&negative);
+ ma_li(hold, Imm32(-1));
+ ma_negu(remain, remain);
+
+ // Begin the main loop.
+ bind(&head);
+
+ // Extract the bottom bits into SecondScratchReg.
+ ma_and(SecondScratchReg, remain, Imm32(mask));
+ // Add those bits to the accumulator.
+ as_addu(dest, dest, SecondScratchReg);
+ // Do a trial subtraction
+ ma_subu(SecondScratchReg, dest, Imm32(mask));
+ // If (sum - C) > 0, store sum - C back into sum, thus performing a
+ // modulus.
+ ma_b(SecondScratchReg, SecondScratchReg, &sumSigned, Signed, ShortJump);
+ ma_move(dest, SecondScratchReg);
+ bind(&sumSigned);
+ // Get rid of the bits that we extracted before.
+ as_srl(remain, remain, shift);
+ // If the shift produced zero, finish, otherwise, continue in the loop.
+ ma_b(remain, remain, &head, NonZero, ShortJump);
+ // Check the hold to see if we need to negate the result.
+ ma_b(hold, hold, &done, NotSigned, ShortJump);
+
+ // If the hold was non-zero, negate the result to be in line with
+ // what JS wants
+ if (negZero != nullptr) {
+ // Jump out in case of negative zero.
+ ma_b(hold, hold, negZero, Zero);
+ ma_negu(dest, dest);
+ } else {
+ ma_negu(dest, dest);
+ }
+
+ bind(&done);
+}
+
+// Memory.
+
+void
+MacroAssemblerMIPSShared::ma_load(Register dest, const BaseIndex& src,
+ LoadStoreSize size, LoadStoreExtension extension)
+{
+ if (isLoongson() && ZeroExtend != extension && Imm8::IsInSignedRange(src.offset)) {
+ Register index = src.index;
+
+ if (src.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(src.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != src.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, src.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, src.index, Imm32(shift));
+#endif
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_gslbx(dest, src.base, index, src.offset);
+ break;
+ case SizeHalfWord:
+ as_gslhx(dest, src.base, index, src.offset);
+ break;
+ case SizeWord:
+ as_gslwx(dest, src.base, index, src.offset);
+ break;
+ case SizeDouble:
+ as_gsldx(dest, src.base, index, src.offset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+ return;
+ }
+
+ asMasm().computeScaledAddress(src, SecondScratchReg);
+ asMasm().ma_load(dest, Address(SecondScratchReg, src.offset), size, extension);
+}
+
+void
+MacroAssemblerMIPSShared::ma_load_unaligned(Register dest, const BaseIndex& src, Register temp,
+ LoadStoreSize size, LoadStoreExtension extension)
+{
+ int16_t lowOffset, hiOffset;
+ Register base;
+
+ asMasm().computeScaledAddress(src, SecondScratchReg);
+
+ if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + size / 8 - 1)) {
+ base = SecondScratchReg;
+ lowOffset = Imm16(src.offset).encode();
+ hiOffset = Imm16(src.offset + size / 8 - 1).encode();
+ } else {
+ ma_li(ScratchRegister, Imm32(src.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ base = ScratchRegister;
+ lowOffset = Imm16(0).encode();
+ hiOffset = Imm16(size / 8 - 1).encode();
+ }
+
+ switch (size) {
+ case SizeHalfWord:
+ as_lbu(dest, base, lowOffset);
+ if (extension != ZeroExtend)
+ as_lbu(temp, base, hiOffset);
+ else
+ as_lb(temp, base, hiOffset);
+ as_ins(dest, temp, 8, 24);
+ break;
+ case SizeWord:
+ as_lwl(dest, base, hiOffset);
+ as_lwr(dest, base, lowOffset);
+#ifdef JS_CODEGEN_MIPS64
+ if (extension != ZeroExtend)
+ as_dext(dest, dest, 0, 32);
+#endif
+ break;
+ case SizeDouble:
+ as_ldl(dest, base, hiOffset);
+ as_ldr(dest, base, lowOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+}
+
+void
+MacroAssemblerMIPSShared::ma_store(Register data, const BaseIndex& dest,
+ LoadStoreSize size, LoadStoreExtension extension)
+{
+ if (isLoongson() && Imm8::IsInSignedRange(dest.offset)) {
+ Register index = dest.index;
+
+ if (dest.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(dest.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != dest.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, dest.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, dest.index, Imm32(shift));
+#endif
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_gssbx(data, dest.base, index, dest.offset);
+ break;
+ case SizeHalfWord:
+ as_gsshx(data, dest.base, index, dest.offset);
+ break;
+ case SizeWord:
+ as_gsswx(data, dest.base, index, dest.offset);
+ break;
+ case SizeDouble:
+ as_gssdx(data, dest.base, index, dest.offset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+ return;
+ }
+
+ asMasm().computeScaledAddress(dest, SecondScratchReg);
+ asMasm().ma_store(data, Address(SecondScratchReg, dest.offset), size, extension);
+}
+
+void
+MacroAssemblerMIPSShared::ma_store(Imm32 imm, const BaseIndex& dest,
+ LoadStoreSize size, LoadStoreExtension extension)
+{
+ if (isLoongson() && Imm8::IsInSignedRange(dest.offset)) {
+ Register data = zero;
+ Register index = dest.index;
+
+ if (imm.value) {
+ MOZ_ASSERT(ScratchRegister != dest.base);
+ MOZ_ASSERT(ScratchRegister != dest.index);
+ data = ScratchRegister;
+ ma_li(data, imm);
+ }
+
+ if (dest.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(dest.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != dest.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, dest.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, dest.index, Imm32(shift));
+#endif
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_gssbx(data, dest.base, index, dest.offset);
+ break;
+ case SizeHalfWord:
+ as_gsshx(data, dest.base, index, dest.offset);
+ break;
+ case SizeWord:
+ as_gsswx(data, dest.base, index, dest.offset);
+ break;
+ case SizeDouble:
+ as_gssdx(data, dest.base, index, dest.offset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+ return;
+ }
+
+ // Make sure that SecondScratchReg contains absolute address so that
+ // offset is 0.
+ asMasm().computeEffectiveAddress(dest, SecondScratchReg);
+
+ // Scrach register is free now, use it for loading imm value
+ ma_li(ScratchRegister, imm);
+
+ // with offset=0 ScratchRegister will not be used in ma_store()
+ // so we can use it as a parameter here
+ asMasm().ma_store(ScratchRegister, Address(SecondScratchReg, 0), size, extension);
+}
+
+void
+MacroAssemblerMIPSShared::ma_store_unaligned(Register data, const BaseIndex& dest, Register temp,
+ LoadStoreSize size, LoadStoreExtension extension)
+{
+ int16_t lowOffset, hiOffset;
+ Register base;
+
+ asMasm().computeEffectiveAddress(dest, SecondScratchReg);
+
+ if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + size / 8 - 1)) {
+ base = SecondScratchReg;
+ lowOffset = Imm16(dest.offset).encode();
+ hiOffset = Imm16(dest.offset + size / 8 - 1).encode();
+ } else {
+ ma_li(ScratchRegister, Imm32(dest.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ base = ScratchRegister;
+ lowOffset = Imm16(0).encode();
+ hiOffset = Imm16(size / 8 - 1).encode();
+ }
+
+ switch (size) {
+ case SizeHalfWord:
+ as_sb(data, base, lowOffset);
+ as_ext(temp, data, 8, 8);
+ as_sb(temp, base, hiOffset);
+ break;
+ case SizeWord:
+ as_swl(data, base, hiOffset);
+ as_swr(data, base, lowOffset);
+ break;
+ case SizeDouble:
+ as_sdl(data, base, hiOffset);
+ as_sdr(data, base, lowOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+}
+
+// Branches when done from within mips-specific code.
+void
+MacroAssemblerMIPSShared::ma_b(Register lhs, Register rhs, Label* label, Condition c, JumpKind jumpKind)
+{
+ switch (c) {
+ case Equal :
+ case NotEqual:
+ asMasm().branchWithCode(getBranchCode(lhs, rhs, c), label, jumpKind);
+ break;
+ case Always:
+ ma_b(label, jumpKind);
+ break;
+ case Zero:
+ case NonZero:
+ case Signed:
+ case NotSigned:
+ MOZ_ASSERT(lhs == rhs);
+ asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+ break;
+ default:
+ Condition cond = ma_cmp(ScratchRegister, lhs, rhs, c);
+ asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label, jumpKind);
+ break;
+ }
+}
+
+void
+MacroAssemblerMIPSShared::ma_b(Register lhs, Imm32 imm, Label* label, Condition c, JumpKind jumpKind)
+{
+ MOZ_ASSERT(c != Overflow);
+ if (imm.value == 0) {
+ if (c == Always || c == AboveOrEqual)
+ ma_b(label, jumpKind);
+ else if (c == Below)
+ ; // This condition is always false. No branch required.
+ else
+ asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+ } else {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+ ma_b(lhs, ScratchRegister, label, c, jumpKind);
+ }
+}
+
+void
+MacroAssemblerMIPSShared::ma_b(Register lhs, ImmPtr imm, Label* l, Condition c, JumpKind jumpKind)
+{
+ asMasm().ma_b(lhs, ImmWord(uintptr_t(imm.value)), l, c, jumpKind);
+}
+
+template <typename T>
+void
+MacroAssemblerMIPSShared::ma_b(Register lhs, T rhs, wasm::TrapDesc target, Condition c,
+ JumpKind jumpKind)
+{
+ Label label;
+ ma_b(lhs, rhs, &label, c, jumpKind);
+ bindLater(&label, target);
+}
+
+template void MacroAssemblerMIPSShared::ma_b<Register>(Register lhs, Register rhs,
+ wasm::TrapDesc target, Condition c,
+ JumpKind jumpKind);
+template void MacroAssemblerMIPSShared::ma_b<Imm32>(Register lhs, Imm32 rhs,
+ wasm::TrapDesc target, Condition c,
+ JumpKind jumpKind);
+template void MacroAssemblerMIPSShared::ma_b<ImmTag>(Register lhs, ImmTag rhs,
+ wasm::TrapDesc target, Condition c,
+ JumpKind jumpKind);
+
+void
+MacroAssemblerMIPSShared::ma_b(Label* label, JumpKind jumpKind)
+{
+ asMasm().branchWithCode(getBranchCode(BranchIsJump), label, jumpKind);
+}
+
+void
+MacroAssemblerMIPSShared::ma_b(wasm::TrapDesc target, JumpKind jumpKind)
+{
+ Label label;
+ asMasm().branchWithCode(getBranchCode(BranchIsJump), &label, jumpKind);
+ bindLater(&label, target);
+}
+
+Assembler::Condition
+MacroAssemblerMIPSShared::ma_cmp(Register scratch, Register lhs, Register rhs, Condition c)
+{
+ switch (c) {
+ case Above:
+ // bgtu s,t,label =>
+ // sltu at,t,s
+ // bne at,$zero,offs
+ as_sltu(scratch, rhs, lhs);
+ return NotEqual;
+ case AboveOrEqual:
+ // bgeu s,t,label =>
+ // sltu at,s,t
+ // beq at,$zero,offs
+ as_sltu(scratch, lhs, rhs);
+ return Equal;
+ case Below:
+ // bltu s,t,label =>
+ // sltu at,s,t
+ // bne at,$zero,offs
+ as_sltu(scratch, lhs, rhs);
+ return NotEqual;
+ case BelowOrEqual:
+ // bleu s,t,label =>
+ // sltu at,t,s
+ // beq at,$zero,offs
+ as_sltu(scratch, rhs, lhs);
+ return Equal;
+ case GreaterThan:
+ // bgt s,t,label =>
+ // slt at,t,s
+ // bne at,$zero,offs
+ as_slt(scratch, rhs, lhs);
+ return NotEqual;
+ case GreaterThanOrEqual:
+ // bge s,t,label =>
+ // slt at,s,t
+ // beq at,$zero,offs
+ as_slt(scratch, lhs, rhs);
+ return Equal;
+ case LessThan:
+ // blt s,t,label =>
+ // slt at,s,t
+ // bne at,$zero,offs
+ as_slt(scratch, lhs, rhs);
+ return NotEqual;
+ case LessThanOrEqual:
+ // ble s,t,label =>
+ // slt at,t,s
+ // beq at,$zero,offs
+ as_slt(scratch, rhs, lhs);
+ return Equal;
+ case Equal :
+ case NotEqual:
+ case Zero:
+ case NonZero:
+ case Always:
+ case Signed:
+ case NotSigned:
+ MOZ_CRASH("There is a better way to compare for equality.");
+ break;
+ case Overflow:
+ MOZ_CRASH("Overflow condition not supported for MIPS.");
+ break;
+ default:
+ MOZ_CRASH("Invalid condition for branch.");
+ }
+ return Always;
+}
+
+void
+MacroAssemblerMIPSShared::ma_cmp_set(Register rd, Register rs, Register rt, Condition c)
+{
+ switch (c) {
+ case Equal :
+ // seq d,s,t =>
+ // xor d,s,t
+ // sltiu d,d,1
+ as_xor(rd, rs, rt);
+ as_sltiu(rd, rd, 1);
+ break;
+ case NotEqual:
+ // sne d,s,t =>
+ // xor d,s,t
+ // sltu d,$zero,d
+ as_xor(rd, rs, rt);
+ as_sltu(rd, zero, rd);
+ break;
+ case Above:
+ // sgtu d,s,t =>
+ // sltu d,t,s
+ as_sltu(rd, rt, rs);
+ break;
+ case AboveOrEqual:
+ // sgeu d,s,t =>
+ // sltu d,s,t
+ // xori d,d,1
+ as_sltu(rd, rs, rt);
+ as_xori(rd, rd, 1);
+ break;
+ case Below:
+ // sltu d,s,t
+ as_sltu(rd, rs, rt);
+ break;
+ case BelowOrEqual:
+ // sleu d,s,t =>
+ // sltu d,t,s
+ // xori d,d,1
+ as_sltu(rd, rt, rs);
+ as_xori(rd, rd, 1);
+ break;
+ case GreaterThan:
+ // sgt d,s,t =>
+ // slt d,t,s
+ as_slt(rd, rt, rs);
+ break;
+ case GreaterThanOrEqual:
+ // sge d,s,t =>
+ // slt d,s,t
+ // xori d,d,1
+ as_slt(rd, rs, rt);
+ as_xori(rd, rd, 1);
+ break;
+ case LessThan:
+ // slt d,s,t
+ as_slt(rd, rs, rt);
+ break;
+ case LessThanOrEqual:
+ // sle d,s,t =>
+ // slt d,t,s
+ // xori d,d,1
+ as_slt(rd, rt, rs);
+ as_xori(rd, rd, 1);
+ break;
+ case Zero:
+ MOZ_ASSERT(rs == rt);
+ // seq d,s,$zero =>
+ // xor d,s,$zero
+ // sltiu d,d,1
+ as_xor(rd, rs, zero);
+ as_sltiu(rd, rd, 1);
+ break;
+ case NonZero:
+ // sne d,s,$zero =>
+ // xor d,s,$zero
+ // sltu d,$zero,d
+ as_xor(rd, rs, zero);
+ as_sltu(rd, zero, rd);
+ break;
+ case Signed:
+ as_slt(rd, rs, zero);
+ break;
+ case NotSigned:
+ // sge d,s,$zero =>
+ // slt d,s,$zero
+ // xori d,d,1
+ as_slt(rd, rs, zero);
+ as_xori(rd, rd, 1);
+ break;
+ default:
+ MOZ_CRASH("Invalid condition for ma_cmp_set.");
+ }
+}
+
+void
+MacroAssemblerMIPSShared::compareFloatingPoint(FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c, FloatTestKind* testKind,
+ FPConditionBit fcc)
+{
+ switch (c) {
+ case DoubleOrdered:
+ as_cun(fmt, lhs, rhs, fcc);
+ *testKind = TestForFalse;
+ break;
+ case DoubleEqual:
+ as_ceq(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleNotEqual:
+ as_cueq(fmt, lhs, rhs, fcc);
+ *testKind = TestForFalse;
+ break;
+ case DoubleGreaterThan:
+ as_colt(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleGreaterThanOrEqual:
+ as_cole(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThan:
+ as_colt(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThanOrEqual:
+ as_cole(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleUnordered:
+ as_cun(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleEqualOrUnordered:
+ as_cueq(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleNotEqualOrUnordered:
+ as_ceq(fmt, lhs, rhs, fcc);
+ *testKind = TestForFalse;
+ break;
+ case DoubleGreaterThanOrUnordered:
+ as_cult(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ as_cule(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThanOrUnordered:
+ as_cult(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThanOrEqualOrUnordered:
+ as_cule(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ default:
+ MOZ_CRASH("Invalid DoubleCondition.");
+ }
+}
+
+void
+MacroAssemblerMIPSShared::ma_cmp_set_double(Register dest, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c)
+{
+ ma_li(dest, Imm32(0));
+ ma_li(ScratchRegister, Imm32(1));
+
+ FloatTestKind moveCondition;
+ compareFloatingPoint(DoubleFloat, lhs, rhs, c, &moveCondition);
+
+ if (moveCondition == TestForTrue)
+ as_movt(dest, ScratchRegister);
+ else
+ as_movf(dest, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPSShared::ma_cmp_set_float32(Register dest, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c)
+{
+ ma_li(dest, Imm32(0));
+ ma_li(ScratchRegister, Imm32(1));
+
+ FloatTestKind moveCondition;
+ compareFloatingPoint(SingleFloat, lhs, rhs, c, &moveCondition);
+
+ if (moveCondition == TestForTrue)
+ as_movt(dest, ScratchRegister);
+ else
+ as_movf(dest, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPSShared::ma_cmp_set(Register rd, Register rs, Imm32 imm, Condition c)
+{
+ ma_li(ScratchRegister, imm);
+ ma_cmp_set(rd, rs, ScratchRegister, c);
+}
+
+// fp instructions
+void
+MacroAssemblerMIPSShared::ma_lis(FloatRegister dest, float value)
+{
+ Imm32 imm(mozilla::BitwiseCast<uint32_t>(value));
+
+ ma_li(ScratchRegister, imm);
+ moveToFloat32(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSShared::ma_lis(FloatRegister dest, wasm::RawF32 value)
+{
+ Imm32 imm(value.bits());
+
+ ma_li(ScratchRegister, imm);
+ moveToFloat32(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSShared::ma_liNegZero(FloatRegister dest)
+{
+ moveToDoubleLo(zero, dest);
+ ma_li(ScratchRegister, Imm32(INT_MIN));
+ asMasm().moveToDoubleHi(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSShared::ma_sd(FloatRegister ft, BaseIndex address)
+{
+ if (isLoongson() && Imm8::IsInSignedRange(address.offset)) {
+ Register index = address.index;
+
+ if (address.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != address.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, address.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, address.index, Imm32(shift));
+#endif
+ }
+
+ as_gssdx(ft, address.base, index, address.offset);
+ return;
+ }
+
+ asMasm().computeScaledAddress(address, SecondScratchReg);
+ asMasm().ma_sd(ft, Address(SecondScratchReg, address.offset));
+}
+
+void
+MacroAssemblerMIPSShared::ma_ss(FloatRegister ft, BaseIndex address)
+{
+ if (isLoongson() && Imm8::IsInSignedRange(address.offset)) {
+ Register index = address.index;
+
+ if (address.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != address.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, address.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, address.index, Imm32(shift));
+#endif
+ }
+
+ as_gsssx(ft, address.base, index, address.offset);
+ return;
+ }
+
+ asMasm().computeScaledAddress(address, SecondScratchReg);
+ asMasm().ma_ss(ft, Address(SecondScratchReg, address.offset));
+}
+
+void
+MacroAssemblerMIPSShared::ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label* label,
+ DoubleCondition c, JumpKind jumpKind, FPConditionBit fcc)
+{
+ FloatTestKind testKind;
+ compareFloatingPoint(SingleFloat, lhs, rhs, c, &testKind, fcc);
+ asMasm().branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
+}
+
+void
+MacroAssemblerMIPSShared::ma_bc1d(FloatRegister lhs, FloatRegister rhs, Label* label,
+ DoubleCondition c, JumpKind jumpKind, FPConditionBit fcc)
+{
+ FloatTestKind testKind;
+ compareFloatingPoint(DoubleFloat, lhs, rhs, c, &testKind, fcc);
+ asMasm().branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
+}
+
+void
+MacroAssemblerMIPSShared::minMaxDouble(FloatRegister srcDest, FloatRegister second,
+ bool handleNaN, bool isMax)
+{
+ FloatRegister first = srcDest;
+
+ Assembler::DoubleCondition cond = isMax
+ ? Assembler::DoubleLessThanOrEqual
+ : Assembler::DoubleGreaterThanOrEqual;
+ Label nan, equal, done;
+ FloatTestKind moveCondition;
+
+ // First or second is NaN, result is NaN.
+ ma_bc1d(first, second, &nan, Assembler::DoubleUnordered, ShortJump);
+ // Make sure we handle -0 and 0 right.
+ ma_bc1d(first, second, &equal, Assembler::DoubleEqual, ShortJump);
+ compareFloatingPoint(DoubleFloat, first, second, cond, &moveCondition);
+ MOZ_ASSERT(TestForTrue == moveCondition);
+ as_movt(DoubleFloat, first, second);
+ ma_b(&done, ShortJump);
+
+ // Check for zero.
+ bind(&equal);
+ asMasm().loadConstantDouble(0.0, ScratchDoubleReg);
+ compareFloatingPoint(DoubleFloat, first, ScratchDoubleReg,
+ Assembler::DoubleEqual, &moveCondition);
+
+ // So now both operands are either -0 or 0.
+ if (isMax) {
+ // -0 + -0 = -0 and -0 + 0 = 0.
+ as_addd(ScratchDoubleReg, first, second);
+ } else {
+ as_negd(ScratchDoubleReg, first);
+ as_subd(ScratchDoubleReg, ScratchDoubleReg, second);
+ as_negd(ScratchDoubleReg, ScratchDoubleReg);
+ }
+ MOZ_ASSERT(TestForTrue == moveCondition);
+ // First is 0 or -0, move max/min to it, else just return it.
+ as_movt(DoubleFloat, first, ScratchDoubleReg);
+ ma_b(&done, ShortJump);
+
+ bind(&nan);
+ asMasm().loadConstantDouble(JS::GenericNaN(), srcDest);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerMIPSShared::minMaxFloat32(FloatRegister srcDest, FloatRegister second,
+ bool handleNaN, bool isMax)
+{
+ FloatRegister first = srcDest;
+
+ Assembler::DoubleCondition cond = isMax
+ ? Assembler::DoubleLessThanOrEqual
+ : Assembler::DoubleGreaterThanOrEqual;
+ Label nan, equal, done;
+ FloatTestKind moveCondition;
+
+ // First or second is NaN, result is NaN.
+ ma_bc1s(first, second, &nan, Assembler::DoubleUnordered, ShortJump);
+ // Make sure we handle -0 and 0 right.
+ ma_bc1s(first, second, &equal, Assembler::DoubleEqual, ShortJump);
+ compareFloatingPoint(SingleFloat, first, second, cond, &moveCondition);
+ MOZ_ASSERT(TestForTrue == moveCondition);
+ as_movt(SingleFloat, first, second);
+ ma_b(&done, ShortJump);
+
+ // Check for zero.
+ bind(&equal);
+ asMasm().loadConstantFloat32(0.0f, ScratchFloat32Reg);
+ compareFloatingPoint(SingleFloat, first, ScratchFloat32Reg,
+ Assembler::DoubleEqual, &moveCondition);
+
+ // So now both operands are either -0 or 0.
+ if (isMax) {
+ // -0 + -0 = -0 and -0 + 0 = 0.
+ as_adds(ScratchFloat32Reg, first, second);
+ } else {
+ as_negs(ScratchFloat32Reg, first);
+ as_subs(ScratchFloat32Reg, ScratchFloat32Reg, second);
+ as_negs(ScratchFloat32Reg, ScratchFloat32Reg);
+ }
+ MOZ_ASSERT(TestForTrue == moveCondition);
+ // First is 0 or -0, move max/min to it, else just return it.
+ as_movt(SingleFloat, first, ScratchFloat32Reg);
+ ma_b(&done, ShortJump);
+
+ bind(&nan);
+ asMasm().loadConstantFloat32(JS::GenericNaN(), srcDest);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerMIPSShared::ma_call(ImmPtr dest)
+{
+ asMasm().ma_liPatchable(CallReg, dest);
+ as_jalr(CallReg);
+ as_nop();
+}
+
+void
+MacroAssemblerMIPSShared::ma_jump(ImmPtr dest)
+{
+ asMasm().ma_liPatchable(ScratchRegister, dest);
+ as_jr(ScratchRegister);
+ as_nop();
+}
+
+MacroAssembler&
+MacroAssemblerMIPSShared::asMasm()
+{
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler&
+MacroAssemblerMIPSShared::asMasm() const
+{
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+void
+MacroAssemblerMIPSShared::atomicEffectOpMIPSr2(int nbytes, AtomicOp op,
+ const Register& value, const Register& addr,
+ Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp)
+{
+ atomicFetchOpMIPSr2(nbytes, false, op, value, addr, flagTemp,
+ valueTemp, offsetTemp, maskTemp, InvalidReg);
+}
+
+void
+MacroAssemblerMIPSShared::atomicFetchOpMIPSr2(int nbytes, bool signExtend, AtomicOp op, const Register& value,
+ const Register& addr, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+{
+ Label again;
+
+ as_andi(offsetTemp, addr, 3);
+ asMasm().subPtr(offsetTemp, addr);
+ as_sll(offsetTemp, offsetTemp, 3);
+ ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ as_sllv(maskTemp, maskTemp, offsetTemp);
+
+ bind(&again);
+
+ as_sync(16);
+
+ as_ll(flagTemp, addr, 0);
+
+ as_sllv(valueTemp, value, offsetTemp);
+ if (output != InvalidReg) {
+ as_and(output, flagTemp, maskTemp);
+ as_srlv(output, output, offsetTemp);
+ if (signExtend) {
+ switch (nbytes) {
+ case 1:
+ as_seb(output, output);
+ break;
+ case 2:
+ as_seh(output, output);
+ break;
+ case 4:
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+ }
+ }
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ as_addu(valueTemp, flagTemp, valueTemp);
+ break;
+ case AtomicFetchSubOp:
+ as_subu(valueTemp, flagTemp, valueTemp);
+ break;
+ case AtomicFetchAndOp:
+ as_and(valueTemp, flagTemp, valueTemp);
+ break;
+ case AtomicFetchOrOp:
+ as_or(valueTemp, flagTemp, valueTemp);
+ break;
+ case AtomicFetchXorOp:
+ as_xor(valueTemp, flagTemp, valueTemp);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+
+ as_and(valueTemp, valueTemp, maskTemp);
+ as_or(flagTemp, flagTemp, maskTemp);
+ as_xor(flagTemp, flagTemp, maskTemp);
+ as_or(flagTemp, flagTemp, valueTemp);
+
+ as_sc(flagTemp, addr, 0);
+
+ ma_b(flagTemp, flagTemp, &again, Zero, ShortJump);
+
+ as_sync(0);
+}
+
+void
+MacroAssemblerMIPSShared::atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value,
+ const Address& address, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+{
+ ma_li(SecondScratchReg, value);
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ atomicEffectOpMIPSr2(nbytes, op, SecondScratchReg, ScratchRegister,
+ flagTemp, valueTemp, offsetTemp, maskTemp);
+}
+
+void
+MacroAssemblerMIPSShared::atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value,
+ const BaseIndex& address, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+{
+ ma_li(SecondScratchReg, value);
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ atomicEffectOpMIPSr2(nbytes, op, SecondScratchReg, ScratchRegister,
+ flagTemp, valueTemp, offsetTemp, maskTemp);
+}
+
+void
+MacroAssemblerMIPSShared::atomicEffectOp(int nbytes, AtomicOp op, const Register& value,
+ const Address& address, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+{
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ atomicEffectOpMIPSr2(nbytes, op, value, ScratchRegister,
+ flagTemp, valueTemp, offsetTemp, maskTemp);
+}
+
+void
+MacroAssemblerMIPSShared::atomicEffectOp(int nbytes, AtomicOp op, const Register& value,
+ const BaseIndex& address, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+{
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ atomicEffectOpMIPSr2(nbytes, op, value, ScratchRegister,
+ flagTemp, valueTemp, offsetTemp, maskTemp);
+}
+
+void
+MacroAssemblerMIPSShared::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
+ const Address& address, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+{
+ ma_li(SecondScratchReg, value);
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ atomicFetchOpMIPSr2(nbytes, signExtend, op, SecondScratchReg, ScratchRegister,
+ flagTemp, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void
+MacroAssemblerMIPSShared::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
+ const BaseIndex& address, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+{
+ ma_li(SecondScratchReg, value);
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ atomicFetchOpMIPSr2(nbytes, signExtend, op, SecondScratchReg, ScratchRegister,
+ flagTemp, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void
+MacroAssemblerMIPSShared::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
+ const Address& address, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+{
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ atomicFetchOpMIPSr2(nbytes, signExtend, op, value, ScratchRegister,
+ flagTemp, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void
+MacroAssemblerMIPSShared::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
+ const BaseIndex& address, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+{
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ atomicFetchOpMIPSr2(nbytes, signExtend, op, value, ScratchRegister,
+ flagTemp, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void
+MacroAssemblerMIPSShared::compareExchangeMIPSr2(int nbytes, bool signExtend, const Register& addr,
+ Register oldval, Register newval, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp,
+ Register output)
+{
+ Label again, end;
+
+ as_andi(offsetTemp, addr, 3);
+ asMasm().subPtr(offsetTemp, addr);
+ as_sll(offsetTemp, offsetTemp, 3);
+ ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ as_sllv(maskTemp, maskTemp, offsetTemp);
+
+ bind(&again);
+
+ as_sync(16);
+
+ as_ll(flagTemp, addr, 0);
+
+ as_and(output, flagTemp, maskTemp);
+ // If oldval is valid register, do compareExchange
+ if (InvalidReg != oldval) {
+ as_sllv(valueTemp, oldval, offsetTemp);
+ as_and(valueTemp, valueTemp, maskTemp);
+ ma_b(output, valueTemp, &end, NotEqual, ShortJump);
+ }
+
+ as_sllv(valueTemp, newval, offsetTemp);
+ as_and(valueTemp, valueTemp, maskTemp);
+ as_or(flagTemp, flagTemp, maskTemp);
+ as_xor(flagTemp, flagTemp, maskTemp);
+ as_or(flagTemp, flagTemp, valueTemp);
+
+ as_sc(flagTemp, addr, 0);
+
+ ma_b(flagTemp, flagTemp, &again, Zero, ShortJump);
+
+ as_sync(0);
+
+ bind(&end);
+
+ as_srlv(output, output, offsetTemp);
+ if (signExtend) {
+ switch (nbytes) {
+ case 1:
+ as_seb(output, output);
+ break;
+ case 2:
+ as_seh(output, output);
+ break;
+ case 4:
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+ }
+}
+
+void
+MacroAssemblerMIPSShared::compareExchange(int nbytes, bool signExtend, const Address& address,
+ Register oldval, Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+{
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ compareExchangeMIPSr2(nbytes, signExtend, ScratchRegister, oldval, newval, SecondScratchReg,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void
+MacroAssemblerMIPSShared::compareExchange(int nbytes, bool signExtend, const BaseIndex& address,
+ Register oldval, Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+{
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ compareExchangeMIPSr2(nbytes, signExtend, ScratchRegister, oldval, newval, SecondScratchReg,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void
+MacroAssemblerMIPSShared::atomicExchange(int nbytes, bool signExtend, const Address& address,
+ Register value, Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output)
+{
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ compareExchangeMIPSr2(nbytes, signExtend, ScratchRegister, InvalidReg, value, SecondScratchReg,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void
+MacroAssemblerMIPSShared::atomicExchange(int nbytes, bool signExtend, const BaseIndex& address,
+ Register value, Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output)
+{
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ compareExchangeMIPSr2(nbytes, signExtend, ScratchRegister, InvalidReg, value, SecondScratchReg,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// MacroAssembler high-level usage.
+
+void
+MacroAssembler::flush()
+{
+}
+
+// ===============================================================
+// Stack manipulation functions.
+
+void
+MacroAssembler::Push(Register reg)
+{
+ ma_push(reg);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(const Imm32 imm)
+{
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(const ImmWord imm)
+{
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(const ImmPtr imm)
+{
+ Push(ImmWord(uintptr_t(imm.value)));
+}
+
+void
+MacroAssembler::Push(const ImmGCPtr ptr)
+{
+ ma_li(ScratchRegister, ptr);
+ ma_push(ScratchRegister);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(FloatRegister f)
+{
+ ma_push(f);
+ adjustFrame(sizeof(double));
+}
+
+void
+MacroAssembler::Pop(Register reg)
+{
+ ma_pop(reg);
+ adjustFrame(-sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Pop(FloatRegister f)
+{
+ ma_pop(f);
+ adjustFrame(-sizeof(double));
+}
+
+void
+MacroAssembler::Pop(const ValueOperand& val)
+{
+ popValue(val);
+ framePushed_ -= sizeof(Value);
+}
+
+
+// ===============================================================
+// Simple call functions.
+
+CodeOffset
+MacroAssembler::call(Register reg)
+{
+ as_jalr(reg);
+ as_nop();
+ return CodeOffset(currentOffset());
+}
+
+CodeOffset
+MacroAssembler::call(Label* label)
+{
+ ma_bal(label);
+ return CodeOffset(currentOffset());
+}
+
+CodeOffset
+MacroAssembler::callWithPatch()
+{
+ as_bal(BOffImm16(3 * sizeof(uint32_t)));
+ addPtr(Imm32(5 * sizeof(uint32_t)), ra);
+ // Allocate space which will be patched by patchCall().
+ writeInst(UINT32_MAX);
+ as_lw(ScratchRegister, ra, -(int32_t)(5 * sizeof(uint32_t)));
+ addPtr(ra, ScratchRegister);
+ as_jr(ScratchRegister);
+ as_nop();
+ return CodeOffset(currentOffset());
+}
+
+void
+MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset)
+{
+ BufferOffset call(callerOffset - 7 * sizeof(uint32_t));
+
+ BOffImm16 offset = BufferOffset(calleeOffset).diffB<BOffImm16>(call);
+ if (!offset.isInvalid()) {
+ InstImm* bal = (InstImm*)editSrc(call);
+ bal->setBOffImm16(offset);
+ } else {
+ uint32_t u32Offset = callerOffset - 5 * sizeof(uint32_t);
+ uint32_t* u32 = reinterpret_cast<uint32_t*>(editSrc(BufferOffset(u32Offset)));
+ *u32 = calleeOffset - callerOffset;
+ }
+}
+
+CodeOffset
+MacroAssembler::farJumpWithPatch()
+{
+ ma_move(SecondScratchReg, ra);
+ as_bal(BOffImm16(3 * sizeof(uint32_t)));
+ as_lw(ScratchRegister, ra, 0);
+ // Allocate space which will be patched by patchFarJump().
+ CodeOffset farJump(currentOffset());
+ writeInst(UINT32_MAX);
+ addPtr(ra, ScratchRegister);
+ as_jr(ScratchRegister);
+ ma_move(ra, SecondScratchReg);
+ return farJump;
+}
+
+void
+MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset)
+{
+ uint32_t* u32 = reinterpret_cast<uint32_t*>(editSrc(BufferOffset(farJump.offset())));
+ MOZ_ASSERT(*u32 == UINT32_MAX);
+ *u32 = targetOffset - farJump.offset();
+}
+
+void
+MacroAssembler::repatchFarJump(uint8_t* code, uint32_t farJumpOffset, uint32_t targetOffset)
+{
+ uint32_t* u32 = reinterpret_cast<uint32_t*>(code + farJumpOffset);
+ *u32 = targetOffset - farJumpOffset;
+}
+
+CodeOffset
+MacroAssembler::nopPatchableToNearJump()
+{
+ CodeOffset offset(currentOffset());
+ as_nop();
+ as_nop();
+ return offset;
+}
+
+void
+MacroAssembler::patchNopToNearJump(uint8_t* jump, uint8_t* target)
+{
+ new (jump) InstImm(op_beq, zero, zero, BOffImm16(target - jump));
+}
+
+void
+MacroAssembler::patchNearJumpToNop(uint8_t* jump)
+{
+ new (jump) InstNOP();
+}
+
+void
+MacroAssembler::call(wasm::SymbolicAddress target)
+{
+ movePtr(target, CallReg);
+ call(CallReg);
+}
+
+void
+MacroAssembler::call(ImmWord target)
+{
+ call(ImmPtr((void*)target.value));
+}
+
+void
+MacroAssembler::call(ImmPtr target)
+{
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, target, Relocation::HARDCODED);
+ ma_call(target);
+}
+
+void
+MacroAssembler::call(JitCode* c)
+{
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(c->raw()));
+ callJitNoProfiler(ScratchRegister);
+}
+
+void
+MacroAssembler::pushReturnAddress()
+{
+ push(ra);
+}
+
+void
+MacroAssembler::popReturnAddress()
+{
+ pop(ra);
+}
+
+// ===============================================================
+// Jit Frames.
+
+uint32_t
+MacroAssembler::pushFakeReturnAddress(Register scratch)
+{
+ CodeLabel cl;
+
+ ma_li(scratch, cl.patchAt());
+ Push(scratch);
+ bind(cl.target());
+ uint32_t retAddr = currentOffset();
+
+ addCodeLabel(cl);
+ return retAddr;
+}
+
+void
+MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(ptr != temp);
+ MOZ_ASSERT(ptr != SecondScratchReg);
+
+ movePtr(ptr, SecondScratchReg);
+ orPtr(Imm32(gc::ChunkMask), SecondScratchReg);
+ branch32(cond, Address(SecondScratchReg, gc::ChunkLocationOffsetFromLastByte),
+ Imm32(int32_t(gc::ChunkLocation::Nursery)), label);
+}
+
+void
+MacroAssembler::comment(const char* msg)
+{
+ Assembler::comment(msg);
+}
+
+//}}} check_macroassembler_style
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared.h b/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
new file mode 100644
index 000000000..c9bd4a4d9
--- /dev/null
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
@@ -0,0 +1,262 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_MacroAssembler_mips_shared_h
+#define jit_mips_shared_MacroAssembler_mips_shared_h
+
+#if defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/Assembler-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/Assembler-mips64.h"
+#endif
+
+#include "jit/AtomicOp.h"
+
+namespace js {
+namespace jit {
+
+enum LoadStoreSize
+{
+ SizeByte = 8,
+ SizeHalfWord = 16,
+ SizeWord = 32,
+ SizeDouble = 64
+};
+
+enum LoadStoreExtension
+{
+ ZeroExtend = 0,
+ SignExtend = 1
+};
+
+enum JumpKind
+{
+ LongJump = 0,
+ ShortJump = 1
+};
+
+enum DelaySlotFill
+{
+ DontFillDelaySlot = 0,
+ FillDelaySlot = 1
+};
+
+static Register CallReg = t9;
+
+class MacroAssemblerMIPSShared : public Assembler
+{
+ protected:
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ Condition ma_cmp(Register rd, Register lhs, Register rhs, Condition c);
+
+ void compareFloatingPoint(FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c, FloatTestKind* testKind,
+ FPConditionBit fcc = FCC0);
+
+ public:
+ void ma_move(Register rd, Register rs);
+
+ void ma_li(Register dest, ImmGCPtr ptr);
+
+ void ma_li(Register dest, Imm32 imm);
+
+ // Shift operations
+ void ma_sll(Register rd, Register rt, Imm32 shift);
+ void ma_srl(Register rd, Register rt, Imm32 shift);
+ void ma_sra(Register rd, Register rt, Imm32 shift);
+ void ma_ror(Register rd, Register rt, Imm32 shift);
+ void ma_rol(Register rd, Register rt, Imm32 shift);
+
+ void ma_sll(Register rd, Register rt, Register shift);
+ void ma_srl(Register rd, Register rt, Register shift);
+ void ma_sra(Register rd, Register rt, Register shift);
+ void ma_ror(Register rd, Register rt, Register shift);
+ void ma_rol(Register rd, Register rt, Register shift);
+
+ // Negate
+ void ma_negu(Register rd, Register rs);
+
+ void ma_not(Register rd, Register rs);
+
+ // and
+ void ma_and(Register rd, Register rs);
+ void ma_and(Register rd, Imm32 imm);
+ void ma_and(Register rd, Register rs, Imm32 imm);
+
+ // or
+ void ma_or(Register rd, Register rs);
+ void ma_or(Register rd, Imm32 imm);
+ void ma_or(Register rd, Register rs, Imm32 imm);
+
+ // xor
+ void ma_xor(Register rd, Register rs);
+ void ma_xor(Register rd, Imm32 imm);
+ void ma_xor(Register rd, Register rs, Imm32 imm);
+
+ void ma_ctz(Register rd, Register rs);
+
+ // load
+ void ma_load(Register dest, const BaseIndex& src, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_load_unaligned(Register dest, const BaseIndex& src, Register temp,
+ LoadStoreSize size, LoadStoreExtension extension);
+
+ // store
+ void ma_store(Register data, const BaseIndex& dest, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_store(Imm32 imm, const BaseIndex& dest, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_store_unaligned(Register data, const BaseIndex& dest, Register temp,
+ LoadStoreSize size, LoadStoreExtension extension);
+
+ // arithmetic based ops
+ // add
+ void ma_addu(Register rd, Register rs, Imm32 imm);
+ void ma_addu(Register rd, Register rs);
+ void ma_addu(Register rd, Imm32 imm);
+ template <typename L>
+ void ma_addTestCarry(Register rd, Register rs, Register rt, L overflow);
+ template <typename L>
+ void ma_addTestCarry(Register rd, Register rs, Imm32 imm, L overflow);
+
+ // subtract
+ void ma_subu(Register rd, Register rs, Imm32 imm);
+ void ma_subu(Register rd, Register rs);
+ void ma_subu(Register rd, Imm32 imm);
+ void ma_subTestOverflow(Register rd, Register rs, Imm32 imm, Label* overflow);
+
+ // multiplies. For now, there are only few that we care about.
+ void ma_mul(Register rd, Register rs, Imm32 imm);
+ void ma_mul_branch_overflow(Register rd, Register rs, Register rt, Label* overflow);
+ void ma_mul_branch_overflow(Register rd, Register rs, Imm32 imm, Label* overflow);
+
+ // divisions
+ void ma_div_branch_overflow(Register rd, Register rs, Register rt, Label* overflow);
+ void ma_div_branch_overflow(Register rd, Register rs, Imm32 imm, Label* overflow);
+
+ // fast mod, uses scratch registers, and thus needs to be in the assembler
+ // implicitly assumes that we can overwrite dest at the beginning of the sequence
+ void ma_mod_mask(Register src, Register dest, Register hold, Register remain,
+ int32_t shift, Label* negZero = nullptr);
+
+ // branches when done from within mips-specific code
+ void ma_b(Register lhs, Register rhs, Label* l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, ImmPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump) {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+ ma_b(lhs, ScratchRegister, l, c, jumpKind);
+ }
+ template <typename T>
+ void ma_b(Register lhs, T rhs, wasm::TrapDesc target, Condition c,
+ JumpKind jumpKind = LongJump);
+
+ void ma_b(Label* l, JumpKind jumpKind = LongJump);
+ void ma_b(wasm::TrapDesc target, JumpKind jumpKind = LongJump);
+
+ // fp instructions
+ void ma_lis(FloatRegister dest, float value);
+ void ma_lis(FloatRegister dest, wasm::RawF32 value);
+ void ma_liNegZero(FloatRegister dest);
+
+ void ma_sd(FloatRegister fd, BaseIndex address);
+ void ma_ss(FloatRegister fd, BaseIndex address);
+
+ //FP branches
+ void ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label* label, DoubleCondition c,
+ JumpKind jumpKind = LongJump, FPConditionBit fcc = FCC0);
+ void ma_bc1d(FloatRegister lhs, FloatRegister rhs, Label* label, DoubleCondition c,
+ JumpKind jumpKind = LongJump, FPConditionBit fcc = FCC0);
+
+ void ma_call(ImmPtr dest);
+
+ void ma_jump(ImmPtr dest);
+
+ void ma_cmp_set(Register dst, Register lhs, Register rhs, Condition c);
+ void ma_cmp_set(Register dst, Register lhs, Imm32 imm, Condition c);
+ void ma_cmp_set_double(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c);
+ void ma_cmp_set_float32(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c);
+
+ void moveToDoubleLo(Register src, FloatRegister dest) {
+ as_mtc1(src, dest);
+ }
+ void moveFromDoubleLo(FloatRegister src, Register dest) {
+ as_mfc1(dest, src);
+ }
+
+ void moveToFloat32(Register src, FloatRegister dest) {
+ as_mtc1(src, dest);
+ }
+ void moveFromFloat32(FloatRegister src, Register dest) {
+ as_mfc1(dest, src);
+ }
+
+ // Evaluate srcDest = minmax<isMax>{Float32,Double}(srcDest, other).
+ // Handle NaN specially if handleNaN is true.
+ void minMaxDouble(FloatRegister srcDest, FloatRegister other, bool handleNaN, bool isMax);
+ void minMaxFloat32(FloatRegister srcDest, FloatRegister other, bool handleNaN, bool isMax);
+
+ private:
+ void atomicEffectOpMIPSr2(int nbytes, AtomicOp op, const Register& value, const Register& addr,
+ Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
+ void atomicFetchOpMIPSr2(int nbytes, bool signExtend, AtomicOp op, const Register& value, const Register& addr,
+ Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp,
+ Register output);
+ void compareExchangeMIPSr2(int nbytes, bool signExtend, const Register& addr, Register oldval,
+ Register newval, Register flagTemp, Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output);
+
+ protected:
+ void atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value, const Address& address,
+ Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
+ void atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value, const BaseIndex& address,
+ Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
+ void atomicEffectOp(int nbytes, AtomicOp op, const Register& value, const Address& address,
+ Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
+ void atomicEffectOp(int nbytes, AtomicOp op, const Register& value, const BaseIndex& address,
+ Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
+
+ void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
+ const Address& address, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output);
+ void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
+ const BaseIndex& address, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output);
+ void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
+ const Address& address, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output);
+ void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
+ const BaseIndex& address, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output);
+
+ void compareExchange(int nbytes, bool signExtend, const Address& address, Register oldval,
+ Register newval, Register valueTemp, Register offsetTemp, Register maskTemp,
+ Register output);
+ void compareExchange(int nbytes, bool signExtend, const BaseIndex& address, Register oldval,
+ Register newval, Register valueTemp, Register offsetTemp, Register maskTemp,
+ Register output);
+
+ void atomicExchange(int nbytes, bool signExtend, const Address& address, Register value,
+ Register valueTemp, Register offsetTemp, Register maskTemp,
+ Register output);
+ void atomicExchange(int nbytes, bool signExtend, const BaseIndex& address, Register value,
+ Register valueTemp, Register offsetTemp, Register maskTemp,
+ Register output);
+
+ public:
+ struct AutoPrepareForPatching {
+ explicit AutoPrepareForPatching(MacroAssemblerMIPSShared&) {}
+ };
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_MacroAssembler_mips_shared_h */
diff --git a/js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp b/js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp
new file mode 100644
index 000000000..f1e1fd514
--- /dev/null
+++ b/js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp
@@ -0,0 +1,223 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/MoveEmitter-mips-shared.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void
+MoveEmitterMIPSShared::emit(const MoveResolver& moves)
+{
+ if (moves.numCycles()) {
+ // Reserve stack for cycle resolution
+ masm.reserveStack(moves.numCycles() * sizeof(double));
+ pushedAtCycle_ = masm.framePushed();
+ }
+
+ for (size_t i = 0; i < moves.numMoves(); i++)
+ emit(moves.getMove(i));
+}
+
+Address
+MoveEmitterMIPSShared::cycleSlot(uint32_t slot, uint32_t subslot) const
+{
+ int32_t offset = masm.framePushed() - pushedAtCycle_;
+ MOZ_ASSERT(Imm16::IsInSignedRange(offset));
+ return Address(StackPointer, offset + slot * sizeof(double) + subslot);
+}
+
+int32_t
+MoveEmitterMIPSShared::getAdjustedOffset(const MoveOperand& operand)
+{
+ MOZ_ASSERT(operand.isMemoryOrEffectiveAddress());
+ if (operand.base() != StackPointer)
+ return operand.disp();
+
+ // Adjust offset if stack pointer has been moved.
+ return operand.disp() + masm.framePushed() - pushedAtStart_;
+}
+
+Address
+MoveEmitterMIPSShared::getAdjustedAddress(const MoveOperand& operand)
+{
+ return Address(operand.base(), getAdjustedOffset(operand));
+}
+
+
+Register
+MoveEmitterMIPSShared::tempReg()
+{
+ spilledReg_ = SecondScratchReg;
+ return SecondScratchReg;
+}
+
+void
+MoveEmitterMIPSShared::emitMove(const MoveOperand& from, const MoveOperand& to)
+{
+ if (from.isGeneralReg()) {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(from.reg() != spilledReg_);
+
+ if (to.isGeneralReg())
+ masm.movePtr(from.reg(), to.reg());
+ else if (to.isMemory())
+ masm.storePtr(from.reg(), getAdjustedAddress(to));
+ else
+ MOZ_CRASH("Invalid emitMove arguments.");
+ } else if (from.isMemory()) {
+ if (to.isGeneralReg()) {
+ masm.loadPtr(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ masm.loadPtr(getAdjustedAddress(from), tempReg());
+ masm.storePtr(tempReg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else if (from.isEffectiveAddress()) {
+ if (to.isGeneralReg()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), tempReg());
+ masm.storePtr(tempReg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+}
+
+void
+MoveEmitterMIPSShared::emitInt32Move(const MoveOperand &from, const MoveOperand &to)
+{
+ if (from.isGeneralReg()) {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(from.reg() != spilledReg_);
+
+ if (to.isGeneralReg())
+ masm.move32(from.reg(), to.reg());
+ else if (to.isMemory())
+ masm.store32(from.reg(), getAdjustedAddress(to));
+ else
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ } else if (from.isMemory()) {
+ if (to.isGeneralReg()) {
+ masm.load32(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ masm.load32(getAdjustedAddress(from), tempReg());
+ masm.store32(tempReg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else if (from.isEffectiveAddress()) {
+ if (to.isGeneralReg()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), tempReg());
+ masm.store32(tempReg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+}
+
+void
+MoveEmitterMIPSShared::emitFloat32Move(const MoveOperand& from, const MoveOperand& to)
+{
+ // Ensure that we can use ScratchFloat32Reg in memory move.
+ MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchFloat32Reg);
+ MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchFloat32Reg);
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveFloat32(from.floatReg(), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ // This should only be used when passing float parameter in a1,a2,a3
+ MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+ masm.moveFromFloat32(from.floatReg(), to.reg());
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeFloat32(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ MOZ_ASSERT(from.isMemory());
+ masm.loadFloat32(getAdjustedAddress(from), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ MOZ_ASSERT(from.isMemory());
+ // This should only be used when passing float parameter in a1,a2,a3
+ MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+ masm.loadPtr(getAdjustedAddress(from), to.reg());
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ masm.loadFloat32(getAdjustedAddress(from), ScratchFloat32Reg);
+ masm.storeFloat32(ScratchFloat32Reg, getAdjustedAddress(to));
+ }
+}
+
+void
+MoveEmitterMIPSShared::emit(const MoveOp& move)
+{
+ const MoveOperand& from = move.from();
+ const MoveOperand& to = move.to();
+
+ if (move.isCycleEnd() && move.isCycleBegin()) {
+ // A fun consequence of aliased registers is you can have multiple
+ // cycles at once, and one can end exactly where another begins.
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ return;
+ }
+
+ if (move.isCycleEnd()) {
+ MOZ_ASSERT(inCycle_);
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ MOZ_ASSERT(inCycle_ > 0);
+ inCycle_--;
+ return;
+ }
+
+ if (move.isCycleBegin()) {
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ inCycle_++;
+ }
+
+ switch (move.type()) {
+ case MoveOp::FLOAT32:
+ emitFloat32Move(from, to);
+ break;
+ case MoveOp::DOUBLE:
+ emitDoubleMove(from, to);
+ break;
+ case MoveOp::INT32:
+ emitInt32Move(from, to);
+ break;
+ case MoveOp::GENERAL:
+ emitMove(from, to);
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void
+MoveEmitterMIPSShared::assertDone()
+{
+ MOZ_ASSERT(inCycle_ == 0);
+}
+
+void
+MoveEmitterMIPSShared::finish()
+{
+ assertDone();
+
+ masm.freeStack(masm.framePushed() - pushedAtStart_);
+}
diff --git a/js/src/jit/mips-shared/MoveEmitter-mips-shared.h b/js/src/jit/mips-shared/MoveEmitter-mips-shared.h
new file mode 100644
index 000000000..b7f794c53
--- /dev/null
+++ b/js/src/jit/mips-shared/MoveEmitter-mips-shared.h
@@ -0,0 +1,76 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_MoveEmitter_mips_shared_h
+#define jit_mips_shared_MoveEmitter_mips_shared_h
+
+#include "jit/MacroAssembler.h"
+#include "jit/MoveResolver.h"
+
+namespace js {
+namespace jit {
+
+class MoveEmitterMIPSShared
+{
+ protected:
+ uint32_t inCycle_;
+ MacroAssembler& masm;
+
+ // Original stack push value.
+ uint32_t pushedAtStart_;
+
+ // These store stack offsets to spill locations, snapshotting
+ // codegen->framePushed_ at the time they were allocated. They are -1 if no
+ // stack space has been allocated for that particular spill.
+ int32_t pushedAtCycle_;
+ int32_t pushedAtSpill_;
+
+ // These are registers that are available for temporary use. They may be
+ // assigned InvalidReg. If no corresponding spill space has been assigned,
+ // then these registers do not need to be spilled.
+ Register spilledReg_;
+ FloatRegister spilledFloatReg_;
+
+ void assertDone();
+ Register tempReg();
+ FloatRegister tempFloatReg();
+ Address cycleSlot(uint32_t slot, uint32_t subslot = 0) const;
+ int32_t getAdjustedOffset(const MoveOperand& operand);
+ Address getAdjustedAddress(const MoveOperand& operand);
+
+ void emitMove(const MoveOperand& from, const MoveOperand& to);
+ void emitInt32Move(const MoveOperand& from, const MoveOperand& to);
+ void emitFloat32Move(const MoveOperand& from, const MoveOperand& to);
+ virtual void emitDoubleMove(const MoveOperand& from, const MoveOperand& to) = 0;
+ virtual void breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot) = 0;
+ virtual void completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot) = 0;
+ void emit(const MoveOp& move);
+
+ public:
+ MoveEmitterMIPSShared(MacroAssembler& masm)
+ : inCycle_(0),
+ masm(masm),
+ pushedAtStart_(masm.framePushed()),
+ pushedAtCycle_(-1),
+ pushedAtSpill_(-1),
+ spilledReg_(InvalidReg),
+ spilledFloatReg_(InvalidFloatReg)
+ { }
+ ~MoveEmitterMIPSShared() {
+ assertDone();
+ }
+ void emit(const MoveResolver& moves);
+ void finish();
+
+ void setScratchRegister(Register reg) {}
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_MoveEmitter_mips_shared_h */
diff --git a/js/src/jit/mips-shared/SharedICHelpers-mips-shared.h b/js/src/jit/mips-shared/SharedICHelpers-mips-shared.h
new file mode 100644
index 000000000..e665c92dd
--- /dev/null
+++ b/js/src/jit/mips-shared/SharedICHelpers-mips-shared.h
@@ -0,0 +1,382 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_SharedICHelpers_mips_shared_h
+#define jit_mips_shared_SharedICHelpers_mips_shared_h
+
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineIC.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+
+namespace js {
+namespace jit {
+
+// Distance from sp to the top Value inside an IC stub (no return address on
+// the stack on MIPS).
+static const size_t ICStackValueOffset = 0;
+
+inline void
+EmitRestoreTailCallReg(MacroAssembler& masm)
+{
+ // No-op on MIPS because ra register is always holding the return address.
+}
+
+inline void
+EmitRepushTailCallReg(MacroAssembler& masm)
+{
+ // No-op on MIPS because ra register is always holding the return address.
+}
+
+inline void
+EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm)
+{
+ // Move ICEntry offset into ICStubReg.
+ CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
+ *patchOffset = offset;
+
+ // Load stub pointer into ICStubReg.
+ masm.loadPtr(Address(ICStubReg, ICEntry::offsetOfFirstStub()), ICStubReg);
+
+ // Load stubcode pointer from BaselineStubEntry.
+ // R2 won't be active when we call ICs, so we can use it as scratch.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+ // Call the stubcode via a direct jump-and-link
+ masm.call(R2.scratchReg());
+}
+
+inline void
+EmitEnterTypeMonitorIC(MacroAssembler& masm,
+ size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub())
+{
+ // This is expected to be called from within an IC, when ICStubReg
+ // is properly initialized to point to the stub.
+ masm.loadPtr(Address(ICStubReg, (uint32_t) monitorStubOffset), ICStubReg);
+
+ // Load stubcode pointer from BaselineStubEntry.
+ // R2 won't be active when we call ICs, so we can use it.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+ // Jump to the stubcode.
+ masm.branch(R2.scratchReg());
+}
+
+inline void
+EmitReturnFromIC(MacroAssembler& masm)
+{
+ masm.branch(ra);
+}
+
+inline void
+EmitChangeICReturnAddress(MacroAssembler& masm, Register reg)
+{
+ masm.movePtr(reg, ra);
+}
+
+inline void
+EmitBaselineTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t argSize)
+{
+ Register scratch = R2.scratchReg();
+
+ // Compute frame size.
+ masm.movePtr(BaselineFrameReg, scratch);
+ masm.addPtr(Imm32(BaselineFrame::FramePointerOffset), scratch);
+ masm.subPtr(BaselineStackReg, scratch);
+
+ // Store frame size without VMFunction arguments for GC marking.
+ masm.subPtr(Imm32(argSize), scratch);
+ masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+ masm.addPtr(Imm32(argSize), scratch);
+
+ // Push frame descriptor and perform the tail call.
+ // ICTailCallReg (ra) already contains the return address (as we
+ // keep it there through the stub calls), but the VMWrapper code being
+ // called expects the return address to also be pushed on the stack.
+ MOZ_ASSERT(ICTailCallReg == ra);
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, ExitFrameLayout::Size());
+ masm.subPtr(Imm32(sizeof(CommonFrameLayout)), StackPointer);
+ masm.storePtr(scratch, Address(StackPointer, CommonFrameLayout::offsetOfDescriptor()));
+ masm.storePtr(ra, Address(StackPointer, CommonFrameLayout::offsetOfReturnAddress()));
+
+ masm.branch(target);
+}
+
+inline void
+EmitIonTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t stackSize)
+{
+ Register scratch = R2.scratchReg();
+
+ masm.loadPtr(Address(sp, stackSize), scratch);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch);
+ masm.addPtr(Imm32(stackSize + JitStubFrameLayout::Size() - sizeof(intptr_t)), scratch);
+
+ // Push frame descriptor and perform the tail call.
+ MOZ_ASSERT(ICTailCallReg == ra);
+ masm.makeFrameDescriptor(scratch, JitFrame_IonJS, ExitFrameLayout::Size());
+ masm.push(scratch);
+ masm.push(ICTailCallReg);
+ masm.branch(target);
+}
+
+inline void
+EmitBaselineCreateStubFrameDescriptor(MacroAssembler& masm, Register reg, uint32_t headerSize)
+{
+ // Compute stub frame size. We have to add two pointers: the stub reg and
+ // previous frame pointer pushed by EmitEnterStubFrame.
+ masm.movePtr(BaselineFrameReg, reg);
+ masm.addPtr(Imm32(sizeof(intptr_t) * 2), reg);
+ masm.subPtr(BaselineStackReg, reg);
+
+ masm.makeFrameDescriptor(reg, JitFrame_BaselineStub, headerSize);
+}
+
+inline void
+EmitBaselineCallVM(JitCode* target, MacroAssembler& masm)
+{
+ Register scratch = R2.scratchReg();
+ EmitBaselineCreateStubFrameDescriptor(masm, scratch, ExitFrameLayout::Size());
+ masm.push(scratch);
+ masm.call(target);
+}
+
+inline void
+EmitIonCallVM(JitCode* target, size_t stackSlots, MacroAssembler& masm)
+{
+ uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonStub,
+ ExitFrameLayout::Size());
+ masm.Push(Imm32(descriptor));
+ masm.callJit(target);
+
+ // Remove rest of the frame left on the stack. We remove the return address
+ // which is implicitly popped when returning.
+ size_t framePop = sizeof(ExitFrameLayout) - sizeof(void*);
+
+ // Pop arguments from framePushed.
+ masm.implicitPop(stackSlots * sizeof(void*) + framePop);
+}
+
+struct BaselineStubFrame {
+ uintptr_t savedFrame;
+ uintptr_t savedStub;
+ uintptr_t returnAddress;
+ uintptr_t descriptor;
+};
+
+static const uint32_t STUB_FRAME_SIZE = sizeof(BaselineStubFrame);
+static const uint32_t STUB_FRAME_SAVED_STUB_OFFSET = offsetof(BaselineStubFrame, savedStub);
+
+inline void
+EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch)
+{
+ MOZ_ASSERT(scratch != ICTailCallReg);
+
+ // Compute frame size.
+ masm.movePtr(BaselineFrameReg, scratch);
+ masm.addPtr(Imm32(BaselineFrame::FramePointerOffset), scratch);
+ masm.subPtr(BaselineStackReg, scratch);
+
+ masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+
+ // Note: when making changes here, don't forget to update
+ // BaselineStubFrame if needed.
+
+ // Push frame descriptor and return address.
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, BaselineStubFrameLayout::Size());
+ masm.subPtr(Imm32(STUB_FRAME_SIZE), StackPointer);
+ masm.storePtr(scratch, Address(StackPointer, offsetof(BaselineStubFrame, descriptor)));
+ masm.storePtr(ICTailCallReg, Address(StackPointer,
+ offsetof(BaselineStubFrame, returnAddress)));
+
+ // Save old frame pointer, stack pointer and stub reg.
+ masm.storePtr(ICStubReg, Address(StackPointer,
+ offsetof(BaselineStubFrame, savedStub)));
+ masm.storePtr(BaselineFrameReg, Address(StackPointer,
+ offsetof(BaselineStubFrame, savedFrame)));
+ masm.movePtr(BaselineStackReg, BaselineFrameReg);
+
+ // Stack should remain aligned.
+ masm.assertStackAlignment(sizeof(Value), 0);
+}
+
+inline void
+EmitIonEnterStubFrame(MacroAssembler& masm, Register scratch)
+{
+ MOZ_ASSERT(ICTailCallReg == ra);
+
+ // In MIPS the ra register contains the return address,
+ // but in jit frames we expect it to be on the stack. As a result
+ // push the link register (which is actually part of the previous frame.
+ // Therefore using push instead of Push).
+ masm.push(ICTailCallReg);
+
+ masm.Push(ICStubReg);
+}
+
+inline void
+EmitBaselineLeaveStubFrame(MacroAssembler& masm, bool calledIntoIon = false)
+{
+ // Ion frames do not save and restore the frame pointer. If we called
+ // into Ion, we have to restore the stack pointer from the frame descriptor.
+ // If we performed a VM call, the descriptor has been popped already so
+ // in that case we use the frame pointer.
+ if (calledIntoIon) {
+ masm.pop(ScratchRegister);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), ScratchRegister);
+ masm.addPtr(ScratchRegister, BaselineStackReg);
+ } else {
+ masm.movePtr(BaselineFrameReg, BaselineStackReg);
+ }
+
+ masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, savedFrame)),
+ BaselineFrameReg);
+ masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, savedStub)),
+ ICStubReg);
+
+ // Load the return address.
+ masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, returnAddress)),
+ ICTailCallReg);
+
+ // Discard the frame descriptor.
+ masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, descriptor)), ScratchRegister);
+ masm.addPtr(Imm32(STUB_FRAME_SIZE), StackPointer);
+}
+
+inline void
+EmitIonLeaveStubFrame(MacroAssembler& masm)
+{
+ masm.Pop(ICStubReg);
+ masm.pop(ICTailCallReg); // See EmitIonEnterStubFrame for explanation on pop/Pop.
+}
+
+inline void
+EmitStowICValues(MacroAssembler& masm, int values)
+{
+ MOZ_ASSERT(values >= 0 && values <= 2);
+ switch(values) {
+ case 1:
+ // Stow R0
+ masm.Push(R0);
+ break;
+ case 2:
+ // Stow R0 and R1
+ masm.Push(R0);
+ masm.Push(R1);
+ }
+}
+
+inline void
+EmitUnstowICValues(MacroAssembler& masm, int values, bool discard = false)
+{
+ MOZ_ASSERT(values >= 0 && values <= 2);
+ switch(values) {
+ case 1:
+ // Unstow R0.
+ if (discard)
+ masm.addPtr(Imm32(sizeof(Value)), BaselineStackReg);
+ else
+ masm.popValue(R0);
+ break;
+ case 2:
+ // Unstow R0 and R1.
+ if (discard) {
+ masm.addPtr(Imm32(sizeof(Value) * 2), BaselineStackReg);
+ } else {
+ masm.popValue(R1);
+ masm.popValue(R0);
+ }
+ break;
+ }
+ masm.adjustFrame(-values * sizeof(Value));
+}
+
+inline void
+EmitCallTypeUpdateIC(MacroAssembler& masm, JitCode* code, uint32_t objectOffset)
+{
+ // R0 contains the value that needs to be typechecked.
+ // The object we're updating is a boxed Value on the stack, at offset
+ // objectOffset from $sp, excluding the return address.
+
+ // Save the current ICStubReg to stack, as well as the TailCallReg,
+ // since on mips, the $ra is live.
+ masm.subPtr(Imm32(2 * sizeof(intptr_t)), StackPointer);
+ masm.storePtr(ICStubReg, Address(StackPointer, sizeof(intptr_t)));
+ masm.storePtr(ICTailCallReg, Address(StackPointer, 0));
+
+ // This is expected to be called from within an IC, when ICStubReg
+ // is properly initialized to point to the stub.
+ masm.loadPtr(Address(ICStubReg, ICUpdatedStub::offsetOfFirstUpdateStub()),
+ ICStubReg);
+
+ // Load stubcode pointer from ICStubReg into ICTailCallReg.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+ // Call the stubcode.
+ masm.call(R2.scratchReg());
+
+ // Restore the old stub reg and tailcall reg.
+ masm.loadPtr(Address(StackPointer, 0), ICTailCallReg);
+ masm.loadPtr(Address(StackPointer, sizeof(intptr_t)), ICStubReg);
+ masm.addPtr(Imm32(2 * sizeof(intptr_t)), StackPointer);
+
+ // The update IC will store 0 or 1 in R1.scratchReg() reflecting if the
+ // value in R0 type-checked properly or not.
+ Label success;
+ masm.ma_b(R1.scratchReg(), Imm32(1), &success, Assembler::Equal, ShortJump);
+
+ // If the IC failed, then call the update fallback function.
+ EmitBaselineEnterStubFrame(masm, R1.scratchReg());
+
+ masm.loadValue(Address(BaselineStackReg, STUB_FRAME_SIZE + objectOffset), R1);
+
+ masm.Push(R0);
+ masm.Push(R1);
+ masm.Push(ICStubReg);
+
+ // Load previous frame pointer, push BaselineFrame*.
+ masm.loadPtr(Address(BaselineFrameReg, 0), R0.scratchReg());
+ masm.pushBaselineFramePtr(R0.scratchReg(), R0.scratchReg());
+
+ EmitBaselineCallVM(code, masm);
+ EmitBaselineLeaveStubFrame(masm);
+
+ // Success at end.
+ masm.bind(&success);
+}
+
+template <typename AddrType>
+inline void
+EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type)
+{
+ // On MIPS, $ra is clobbered by patchableCallPreBarrier. Save it first.
+ masm.push(ra);
+ masm.patchableCallPreBarrier(addr, type);
+ masm.pop(ra);
+}
+
+inline void
+EmitStubGuardFailure(MacroAssembler& masm)
+{
+ // NOTE: This routine assumes that the stub guard code left the stack in
+ // the same state it was in when it was entered.
+
+ // BaselineStubEntry points to the current stub.
+
+ // Load next stub into ICStubReg
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfNext()), ICStubReg);
+
+ // Load stubcode pointer from BaselineStubEntry into scratch register.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+ // Return address is already loaded, just jump to the next stubcode.
+ MOZ_ASSERT(ICTailCallReg == ra);
+ masm.branch(R2.scratchReg());
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_SharedICHelpers_mips_shared_h */
diff --git a/js/src/jit/mips32/Architecture-mips32.cpp b/js/src/jit/mips32/Architecture-mips32.cpp
new file mode 100644
index 000000000..9aca3f831
--- /dev/null
+++ b/js/src/jit/mips32/Architecture-mips32.cpp
@@ -0,0 +1,102 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/Architecture-mips32.h"
+
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+const char * const Registers::RegNames[] = { "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra" };
+
+const uint32_t Allocatable = 14;
+
+const Registers::SetType Registers::ArgRegMask = Registers::SharedArgRegMask;
+
+const Registers::SetType Registers::JSCallMask =
+ (1 << Registers::a2) |
+ (1 << Registers::a3);
+
+const Registers::SetType Registers::CallMask =
+ (1 << Registers::v0) |
+ (1 << Registers::v1); // used for double-size returns
+
+FloatRegisters::Code
+FloatRegisters::FromName(const char* name)
+{
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0)
+ return Code(i);
+ }
+
+ return Invalid;
+}
+
+FloatRegister
+FloatRegister::doubleOverlay(unsigned int which) const
+{
+ MOZ_ASSERT(!isInvalid());
+ if (kind_ != Double)
+ return FloatRegister(code_ & ~1, Double);
+ return *this;
+}
+
+FloatRegister
+FloatRegister::singleOverlay(unsigned int which) const
+{
+ MOZ_ASSERT(!isInvalid());
+ if (kind_ == Double) {
+ // Only even registers are double
+ MOZ_ASSERT(code_ % 2 == 0);
+ MOZ_ASSERT(which < 2);
+ return FloatRegister(code_ + which, Single);
+ }
+ MOZ_ASSERT(which == 0);
+ return FloatRegister(code_, Single);
+}
+
+FloatRegisterSet
+FloatRegister::ReduceSetForPush(const FloatRegisterSet& s)
+{
+ LiveFloatRegisterSet mod;
+ for (FloatRegisterIterator iter(s); iter.more(); ++iter) {
+ if ((*iter).isSingle()) {
+ // Even for single size registers save complete double register.
+ mod.addUnchecked((*iter).doubleOverlay());
+ } else {
+ mod.addUnchecked(*iter);
+ }
+ }
+ return mod.set();
+}
+
+uint32_t
+FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s)
+{
+ FloatRegisterSet ss = s.reduceSetForPush();
+ uint64_t bits = ss.bits();
+ // We are only pushing double registers.
+ MOZ_ASSERT((bits & 0xffffffff) == 0);
+ uint32_t ret = mozilla::CountPopulation32(bits >> 32) * sizeof(double);
+ return ret;
+}
+uint32_t
+FloatRegister::getRegisterDumpOffsetInBytes()
+{
+ if (isSingle())
+ return id() * sizeof(float);
+ if (isDouble())
+ return id() * sizeof(double);
+ MOZ_CRASH();
+}
+
+} // namespace ion
+} // namespace js
+
diff --git a/js/src/jit/mips32/Architecture-mips32.h b/js/src/jit/mips32/Architecture-mips32.h
new file mode 100644
index 000000000..9e5f3ca28
--- /dev/null
+++ b/js/src/jit/mips32/Architecture-mips32.h
@@ -0,0 +1,287 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_Architecture_mips32_h
+#define jit_mips32_Architecture_mips32_h
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "jit/mips-shared/Architecture-mips-shared.h"
+
+#include "js/Utility.h"
+
+namespace js {
+namespace jit {
+
+// Shadow stack space is not required on MIPS.
+static const uint32_t ShadowStackSpace = 4 * sizeof(uintptr_t);
+
+// These offsets are specific to nunboxing, and capture offsets into the
+// components of a js::Value.
+// Size of MIPS32 general purpose registers is 32 bits.
+static const int32_t NUNBOX32_TYPE_OFFSET = 4;
+static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
+
+// Size of each bailout table entry.
+// For MIPS this is 2 instructions relative call.
+static const uint32_t BAILOUT_TABLE_ENTRY_SIZE = 2 * sizeof(void*);
+
+// MIPS32 can have two types of floating-point coprocessors:
+// - 32 bit floating-point coprocessor - In this case, there are 32 single
+// precision registers and pairs of even and odd float registers are used as
+// double precision registers. Example: f0 (double) is composed of
+// f0 and f1 (single).
+// - 64 bit floating-point coprocessor - In this case, there are 32 double
+// precision register which can also be used as single precision registers.
+
+// When using O32 ABI, floating-point coprocessor is 32 bit.
+// When using N32 ABI, floating-point coprocessor is 64 bit.
+class FloatRegisters : public FloatRegistersMIPSShared
+{
+ public:
+ static const char* GetName(uint32_t i) {
+ MOZ_ASSERT(i < Total);
+ return FloatRegistersMIPSShared::GetName(Code(i % 32));
+ }
+
+ static Code FromName(const char* name);
+
+ static const uint32_t Total = 64;
+ static const uint32_t TotalDouble = 16;
+ static const uint32_t RegisterIdLimit = 32;
+ // Workarounds: On Loongson CPU-s the odd FP registers behave differently
+ // in fp-32 mode than standard MIPS.
+#if defined(_MIPS_ARCH_LOONGSON3A)
+ static const uint32_t TotalSingle = 16;
+ static const uint32_t Allocatable = 28;
+ static const SetType AllSingleMask = 0x55555555ULL;
+#else
+ static const uint32_t TotalSingle = 32;
+ static const uint32_t Allocatable = 42;
+ static const SetType AllSingleMask = (1ULL << 32) - 1;
+#endif
+ // When saving all registers we only need to do is save double registers.
+ static const uint32_t TotalPhys = 16;
+
+ static_assert(sizeof(SetType) * 8 >= Total,
+ "SetType should be large enough to enumerate all registers.");
+
+ static const SetType AllDoubleMask = 0x55555555ULL << 32;
+ static const SetType AllMask = AllDoubleMask | AllSingleMask;
+
+ static const SetType NonVolatileDoubleMask =
+ ((1ULL << FloatRegisters::f20) |
+ (1ULL << FloatRegisters::f22) |
+ (1ULL << FloatRegisters::f24) |
+ (1ULL << FloatRegisters::f26) |
+ (1ULL << FloatRegisters::f28) |
+ (1ULL << FloatRegisters::f30)) << 32;
+
+ // f20-single and f21-single alias f20-double ...
+ static const SetType NonVolatileMask =
+ NonVolatileDoubleMask |
+ (1ULL << FloatRegisters::f20) |
+ (1ULL << FloatRegisters::f21) |
+ (1ULL << FloatRegisters::f22) |
+ (1ULL << FloatRegisters::f23) |
+ (1ULL << FloatRegisters::f24) |
+ (1ULL << FloatRegisters::f25) |
+ (1ULL << FloatRegisters::f26) |
+ (1ULL << FloatRegisters::f27) |
+ (1ULL << FloatRegisters::f28) |
+ (1ULL << FloatRegisters::f29) |
+ (1ULL << FloatRegisters::f30) |
+ (1ULL << FloatRegisters::f31);
+
+ static const SetType VolatileMask = AllMask & ~NonVolatileMask;
+ static const SetType VolatileDoubleMask = AllDoubleMask & ~NonVolatileDoubleMask;
+
+ static const SetType WrapperMask = VolatileMask;
+
+ static const SetType NonAllocatableDoubleMask =
+ ((1ULL << FloatRegisters::f16) |
+ (1ULL << FloatRegisters::f18)) << 32;
+ // f16-single and f17-single alias f16-double ...
+ static const SetType NonAllocatableMask =
+ NonAllocatableDoubleMask |
+ (1ULL << FloatRegisters::f16) |
+ (1ULL << FloatRegisters::f17) |
+ (1ULL << FloatRegisters::f18) |
+ (1ULL << FloatRegisters::f19);
+
+ // Registers that can be allocated without being saved, generally.
+ static const SetType TempMask = VolatileMask & ~NonAllocatableMask;
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+class FloatRegister : public FloatRegisterMIPSShared
+{
+ public:
+ enum RegType {
+ Single = 0x0,
+ Double = 0x1,
+ };
+
+ typedef FloatRegisters Codes;
+ typedef Codes::Code Code;
+ typedef Codes::Encoding Encoding;
+
+ uint32_t code_ : 6;
+ protected:
+ RegType kind_ : 1;
+
+ public:
+ constexpr FloatRegister(uint32_t code, RegType kind = Double)
+ : code_ (Code(code)), kind_(kind)
+ { }
+ constexpr FloatRegister()
+ : code_(Code(FloatRegisters::invalid_freg)), kind_(Double)
+ { }
+
+ bool operator==(const FloatRegister& other) const {
+ MOZ_ASSERT(!isInvalid());
+ MOZ_ASSERT(!other.isInvalid());
+ return kind_ == other.kind_ && code_ == other.code_;
+ }
+ bool equiv(const FloatRegister& other) const { return other.kind_ == kind_; }
+ size_t size() const { return (kind_ == Double) ? 8 : 4; }
+ bool isInvalid() const {
+ return code_ == FloatRegisters::invalid_freg;
+ }
+
+ bool isSingle() const { return kind_ == Single; }
+ bool isDouble() const { return kind_ == Double; }
+
+ FloatRegister doubleOverlay(unsigned int which = 0) const;
+ FloatRegister singleOverlay(unsigned int which = 0) const;
+ FloatRegister sintOverlay(unsigned int which = 0) const;
+ FloatRegister uintOverlay(unsigned int which = 0) const;
+
+ FloatRegister asSingle() const { return singleOverlay(); }
+ FloatRegister asDouble() const { return doubleOverlay(); }
+ FloatRegister asSimd128() const { MOZ_CRASH("NYI"); }
+
+ Code code() const {
+ MOZ_ASSERT(!isInvalid());
+ return Code(code_ | (kind_ << 5));
+ }
+ Encoding encoding() const {
+ MOZ_ASSERT(!isInvalid());
+ return Encoding(code_);
+ }
+ uint32_t id() const {
+ return code_;
+ }
+ static FloatRegister FromCode(uint32_t i) {
+ uint32_t code = i & 31;
+ uint32_t kind = i >> 5;
+ return FloatRegister(code, RegType(kind));
+ }
+ // This is similar to FromCode except for double registers on O32.
+ static FloatRegister FromIndex(uint32_t index, RegType kind) {
+#if defined(USES_O32_ABI)
+ // Only even FP registers are avaiable for Loongson on O32.
+# if defined(_MIPS_ARCH_LOONGSON3A)
+ return FloatRegister(index * 2, kind);
+# else
+ if (kind == Double)
+ return FloatRegister(index * 2, kind);
+# endif
+#endif
+ return FloatRegister(index, kind);
+ }
+
+ bool volatile_() const {
+ if (isDouble())
+ return !!((1ULL << code_) & FloatRegisters::VolatileMask);
+ return !!((1ULL << (code_ & ~1)) & FloatRegisters::VolatileMask);
+ }
+ const char* name() const {
+ return FloatRegisters::GetName(code_);
+ }
+ bool operator != (const FloatRegister& other) const {
+ return other.kind_ != kind_ || code_ != other.code_;
+ }
+ bool aliases(const FloatRegister& other) {
+ if (kind_ == other.kind_)
+ return code_ == other.code_;
+ return doubleOverlay() == other.doubleOverlay();
+ }
+ uint32_t numAliased() const {
+ if (isDouble()) {
+ MOZ_ASSERT((code_ & 1) == 0);
+ return 3;
+ }
+ return 2;
+ }
+ void aliased(uint32_t aliasIdx, FloatRegister* ret) {
+ if (aliasIdx == 0) {
+ *ret = *this;
+ return;
+ }
+ if (isDouble()) {
+ MOZ_ASSERT((code_ & 1) == 0);
+ MOZ_ASSERT(aliasIdx <= 2);
+ *ret = singleOverlay(aliasIdx - 1);
+ return;
+ }
+ MOZ_ASSERT(aliasIdx == 1);
+ *ret = doubleOverlay(aliasIdx - 1);
+ }
+ uint32_t numAlignedAliased() const {
+ if (isDouble()) {
+ MOZ_ASSERT((code_ & 1) == 0);
+ return 2;
+ }
+ // f1-float32 has 0 other aligned aliases, 1 total.
+ // f0-float32 has 1 other aligned alias, 2 total.
+ return 2 - (code_ & 1);
+ }
+ // | f0-double |
+ // | f0-float32 | f1-float32 |
+ // We only push double registers on MIPS. So, if we've stored f0-double
+ // we also want to f0-float32 is stored there.
+ void alignedAliased(uint32_t aliasIdx, FloatRegister* ret) {
+ MOZ_ASSERT(isDouble());
+ MOZ_ASSERT((code_ & 1) == 0);
+ if (aliasIdx == 0) {
+ *ret = *this;
+ return;
+ }
+ MOZ_ASSERT(aliasIdx == 1);
+ *ret = singleOverlay(aliasIdx - 1);
+ }
+
+ SetType alignedOrDominatedAliasedSet() const {
+ if (isSingle())
+ return SetType(1) << code_;
+
+ MOZ_ASSERT(isDouble());
+ return SetType(0b11) << code_;
+ }
+
+ static Code FromName(const char* name) {
+ return FloatRegisters::FromName(name);
+ }
+ static TypedRegisterSet<FloatRegister> ReduceSetForPush(const TypedRegisterSet<FloatRegister>& s);
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>& s);
+ uint32_t getRegisterDumpOffsetInBytes();
+};
+
+// In order to handle functions such as int(*)(int, double) where the first
+// argument is a general purpose register, and the second argument is a floating
+// point register, we have to store the double content into 2 general purpose
+// registers, namely a2 and a3.
+#define JS_CODEGEN_REGISTER_PAIR 1
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_Architecture_mips32_h */
diff --git a/js/src/jit/mips32/Assembler-mips32.cpp b/js/src/jit/mips32/Assembler-mips32.cpp
new file mode 100644
index 000000000..6283c1d5a
--- /dev/null
+++ b/js/src/jit/mips32/Assembler-mips32.cpp
@@ -0,0 +1,545 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/Assembler-mips32.h"
+
+#include "mozilla/DebugOnly.h"
+
+using mozilla::DebugOnly;
+
+using namespace js;
+using namespace js::jit;
+
+ABIArgGenerator::ABIArgGenerator()
+ : usedArgSlots_(0),
+ firstArgFloatSize_(0),
+ useGPRForFloats_(false),
+ current_()
+{}
+
+ABIArg
+ABIArgGenerator::next(MIRType type)
+{
+ Register destReg;
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Pointer:
+ if (GetIntArgReg(usedArgSlots_, &destReg))
+ current_ = ABIArg(destReg);
+ else
+ current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
+ usedArgSlots_++;
+ break;
+ case MIRType::Int64:
+ if (!usedArgSlots_) {
+ current_ = ABIArg(a0, a1);
+ usedArgSlots_ = 2;
+ } else if (usedArgSlots_ <= 2) {
+ current_ = ABIArg(a2, a3);
+ usedArgSlots_ = 4;
+ } else {
+ if (usedArgSlots_ < NumIntArgRegs)
+ usedArgSlots_ = NumIntArgRegs;
+ usedArgSlots_ += usedArgSlots_ % 2;
+ current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
+ usedArgSlots_ += 2;
+ }
+ break;
+ case MIRType::Float32:
+ if (!usedArgSlots_) {
+ current_ = ABIArg(f12.asSingle());
+ firstArgFloatSize_ = 1;
+ } else if (usedArgSlots_ == firstArgFloatSize_) {
+ current_ = ABIArg(f14.asSingle());
+ } else if (useGPRForFloats_ && GetIntArgReg(usedArgSlots_, &destReg)) {
+ current_ = ABIArg(destReg);
+ } else {
+ if (usedArgSlots_ < NumIntArgRegs)
+ usedArgSlots_ = NumIntArgRegs;
+ current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
+ }
+ usedArgSlots_++;
+ break;
+ case MIRType::Double:
+ if (!usedArgSlots_) {
+ current_ = ABIArg(f12);
+ usedArgSlots_ = 2;
+ firstArgFloatSize_ = 2;
+ } else if (usedArgSlots_ == firstArgFloatSize_) {
+ current_ = ABIArg(f14);
+ usedArgSlots_ = 4;
+ } else if (useGPRForFloats_ && usedArgSlots_ <= 2) {
+ current_ = ABIArg(a2, a3);
+ usedArgSlots_ = 4;
+ } else {
+ if (usedArgSlots_ < NumIntArgRegs)
+ usedArgSlots_ = NumIntArgRegs;
+ usedArgSlots_ += usedArgSlots_ % 2;
+ current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
+ usedArgSlots_ += 2;
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+}
+
+uint32_t
+js::jit::RT(FloatRegister r)
+{
+ MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
+ return r.id() << RTShift;
+}
+
+uint32_t
+js::jit::RD(FloatRegister r)
+{
+ MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
+ return r.id() << RDShift;
+}
+
+uint32_t
+js::jit::RZ(FloatRegister r)
+{
+ MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
+ return r.id() << RZShift;
+}
+
+uint32_t
+js::jit::SA(FloatRegister r)
+{
+ MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
+ return r.id() << SAShift;
+}
+
+// Used to patch jumps created by MacroAssemblerMIPSCompat::jumpWithPatch.
+void
+jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
+{
+ Instruction* inst1 = (Instruction*)jump_.raw();
+ Instruction* inst2 = inst1->next();
+
+ MaybeAutoWritableJitCode awjc(inst1, 8, reprotect);
+ Assembler::UpdateLuiOriValue(inst1, inst2, (uint32_t)label.raw());
+
+ AutoFlushICache::flush(uintptr_t(inst1), 8);
+}
+
+// For more infromation about backedges look at comment in
+// MacroAssemblerMIPSCompat::backedgeJump()
+void
+jit::PatchBackedge(CodeLocationJump& jump, CodeLocationLabel label,
+ JitRuntime::BackedgeTarget target)
+{
+ uint32_t sourceAddr = (uint32_t)jump.raw();
+ uint32_t targetAddr = (uint32_t)label.raw();
+ InstImm* branch = (InstImm*)jump.raw();
+
+ MOZ_ASSERT(branch->extractOpcode() == (uint32_t(op_beq) >> OpcodeShift));
+
+ if (BOffImm16::IsInRange(targetAddr - sourceAddr)) {
+ branch->setBOffImm16(BOffImm16(targetAddr - sourceAddr));
+ } else {
+ if (target == JitRuntime::BackedgeLoopHeader) {
+ Instruction* lui = &branch[1];
+ Assembler::UpdateLuiOriValue(lui, lui->next(), targetAddr);
+ // Jump to ori. The lui will be executed in delay slot.
+ branch->setBOffImm16(BOffImm16(2 * sizeof(uint32_t)));
+ } else {
+ Instruction* lui = &branch[4];
+ Assembler::UpdateLuiOriValue(lui, lui->next(), targetAddr);
+ branch->setBOffImm16(BOffImm16(4 * sizeof(uint32_t)));
+ }
+ }
+}
+
+void
+Assembler::executableCopy(uint8_t* buffer)
+{
+ MOZ_ASSERT(isFinished);
+ m_buffer.executableCopy(buffer);
+
+ // Patch all long jumps during code copy.
+ for (size_t i = 0; i < longJumps_.length(); i++) {
+ Instruction* inst1 = (Instruction*) ((uint32_t)buffer + longJumps_[i]);
+
+ uint32_t value = Assembler::ExtractLuiOriValue(inst1, inst1->next());
+ Assembler::UpdateLuiOriValue(inst1, inst1->next(), (uint32_t)buffer + value);
+ }
+
+ AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
+}
+
+uintptr_t
+Assembler::GetPointer(uint8_t* instPtr)
+{
+ Instruction* inst = (Instruction*)instPtr;
+ return Assembler::ExtractLuiOriValue(inst, inst->next());
+}
+
+static JitCode*
+CodeFromJump(Instruction* jump)
+{
+ uint8_t* target = (uint8_t*)Assembler::ExtractLuiOriValue(jump, jump->next());
+ return JitCode::FromExecutable(target);
+}
+
+void
+Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
+{
+ while (reader.more()) {
+ JitCode* child = CodeFromJump((Instruction*)(code->raw() + reader.readUnsigned()));
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ }
+}
+
+static void
+TraceOneDataRelocation(JSTracer* trc, Instruction* inst)
+{
+ void* ptr = (void*)Assembler::ExtractLuiOriValue(inst, inst->next());
+ void* prior = ptr;
+
+ // No barrier needed since these are constants.
+ TraceManuallyBarrieredGenericPointerEdge(trc, reinterpret_cast<gc::Cell**>(&ptr),
+ "ion-masm-ptr");
+ if (ptr != prior) {
+ Assembler::UpdateLuiOriValue(inst, inst->next(), uint32_t(ptr));
+ AutoFlushICache::flush(uintptr_t(inst), 8);
+ }
+}
+
+static void
+TraceDataRelocations(JSTracer* trc, uint8_t* buffer, CompactBufferReader& reader)
+{
+ while (reader.more()) {
+ size_t offset = reader.readUnsigned();
+ Instruction* inst = (Instruction*)(buffer + offset);
+ TraceOneDataRelocation(trc, inst);
+ }
+}
+
+static void
+TraceDataRelocations(JSTracer* trc, MIPSBuffer* buffer, CompactBufferReader& reader)
+{
+ while (reader.more()) {
+ BufferOffset bo (reader.readUnsigned());
+ MIPSBuffer::AssemblerBufferInstIterator iter(bo, buffer);
+ TraceOneDataRelocation(trc, iter.cur());
+ }
+}
+
+void
+Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
+{
+ ::TraceDataRelocations(trc, code->raw(), reader);
+}
+
+Assembler::Condition
+Assembler::UnsignedCondition(Condition cond)
+{
+ switch (cond) {
+ case Zero:
+ case NonZero:
+ return cond;
+ case LessThan:
+ case Below:
+ return Below;
+ case LessThanOrEqual:
+ case BelowOrEqual:
+ return BelowOrEqual;
+ case GreaterThan:
+ case Above:
+ return Above;
+ case AboveOrEqual:
+ case GreaterThanOrEqual:
+ return AboveOrEqual;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+Assembler::Condition
+Assembler::ConditionWithoutEqual(Condition cond)
+{
+ switch (cond) {
+ case LessThan:
+ case LessThanOrEqual:
+ return LessThan;
+ case Below:
+ case BelowOrEqual:
+ return Below;
+ case GreaterThan:
+ case GreaterThanOrEqual:
+ return GreaterThan;
+ case Above:
+ case AboveOrEqual:
+ return Above;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void
+Assembler::trace(JSTracer* trc)
+{
+ for (size_t i = 0; i < jumps_.length(); i++) {
+ RelativePatch& rp = jumps_[i];
+ if (rp.kind == Relocation::JITCODE) {
+ JitCode* code = JitCode::FromExecutable((uint8_t*)rp.target);
+ TraceManuallyBarrieredEdge(trc, &code, "masmrel32");
+ MOZ_ASSERT(code == JitCode::FromExecutable((uint8_t*)rp.target));
+ }
+ }
+ if (dataRelocations_.length()) {
+ CompactBufferReader reader(dataRelocations_);
+ ::TraceDataRelocations(trc, &m_buffer, reader);
+ }
+}
+
+void
+Assembler::Bind(uint8_t* rawCode, CodeOffset* label, const void* address)
+{
+ if (label->bound()) {
+ intptr_t offset = label->offset();
+ Instruction* inst = (Instruction*) (rawCode + offset);
+ Assembler::UpdateLuiOriValue(inst, inst->next(), (uint32_t)address);
+ }
+}
+
+void
+Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target)
+{
+ int32_t offset = target - branch;
+ InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ // If encoded offset is 4, then the jump must be short
+ if (BOffImm16(inst[0]).decode() == 4) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+ return;
+ }
+
+ // Generate the long jump for calls because return address has to be the
+ // address after the reserved block.
+ if (inst[0].encode() == inst_bgezal.encode()) {
+ addLongJump(BufferOffset(branch));
+ Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
+ inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
+ // There is 1 nop after this.
+ return;
+ }
+
+ if (BOffImm16::IsInRange(offset)) {
+ bool conditional = (inst[0].encode() != inst_bgezal.encode() &&
+ inst[0].encode() != inst_beq.encode());
+
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+
+ // Skip the trailing nops in conditional branches.
+ if (conditional) {
+ inst[2] = InstImm(op_regimm, zero, rt_bgez, BOffImm16(3 * sizeof(void*))).encode();
+ // There are 2 nops after this
+ }
+ return;
+ }
+
+ if (inst[0].encode() == inst_beq.encode()) {
+ // Handle long unconditional jump.
+ addLongJump(BufferOffset(branch));
+ Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
+ inst[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ // There is 1 nop after this.
+ } else {
+ // Handle long conditional jump.
+ inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(void*)));
+ // No need for a "nop" here because we can clobber scratch.
+ addLongJump(BufferOffset(branch + sizeof(void*)));
+ Assembler::WriteLuiOriInstructions(&inst[1], &inst[2], ScratchRegister, target);
+ inst[3] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ // There is 1 nop after this.
+ }
+}
+
+void
+Assembler::bind(RepatchLabel* label)
+{
+ BufferOffset dest = nextOffset();
+ if (label->used() && !oom()) {
+ // If the label has a use, then change this use to refer to
+ // the bound label;
+ BufferOffset b(label->offset());
+ InstImm* inst = (InstImm*)editSrc(b);
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+ uint32_t offset = dest.getOffset() - label->offset();
+
+ // If first instruction is lui, then this is a long jump.
+ // If second instruction is lui, then this is a loop backedge.
+ if (inst[0].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift)) {
+ // For unconditional long branches generated by ma_liPatchable,
+ // such as under:
+ // jumpWithpatch
+ Assembler::UpdateLuiOriValue(inst, inst->next(), dest.getOffset());
+ } else if (inst[1].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift) ||
+ BOffImm16::IsInRange(offset))
+ {
+ // Handle code produced by:
+ // backedgeJump
+ // branchWithCode
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ MOZ_ASSERT(inst[0].extractOpcode() == (uint32_t(op_beq) >> OpcodeShift) ||
+ inst[0].extractOpcode() == (uint32_t(op_bne) >> OpcodeShift) ||
+ inst[0].extractOpcode() == (uint32_t(op_blez) >> OpcodeShift) ||
+ inst[0].extractOpcode() == (uint32_t(op_bgtz) >> OpcodeShift));
+ inst[0].setBOffImm16(BOffImm16(offset));
+ } else if (inst[0].encode() == inst_beq.encode()) {
+ // Handle open long unconditional jumps created by
+ // MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
+ // We need to add it to long jumps array here.
+ // See MacroAssemblerMIPS::branchWithCode().
+ MOZ_ASSERT(inst[1].encode() == NopInst);
+ MOZ_ASSERT(inst[2].encode() == NopInst);
+ MOZ_ASSERT(inst[3].encode() == NopInst);
+ addLongJump(BufferOffset(label->offset()));
+ Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, dest.getOffset());
+ inst[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ } else {
+ // Handle open long conditional jumps created by
+ // MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
+ inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(void*)));
+ // No need for a "nop" here because we can clobber scratch.
+ // We need to add it to long jumps array here.
+ // See MacroAssemblerMIPS::branchWithCode().
+ MOZ_ASSERT(inst[1].encode() == NopInst);
+ MOZ_ASSERT(inst[2].encode() == NopInst);
+ MOZ_ASSERT(inst[3].encode() == NopInst);
+ MOZ_ASSERT(inst[4].encode() == NopInst);
+ addLongJump(BufferOffset(label->offset() + sizeof(void*)));
+ Assembler::WriteLuiOriInstructions(&inst[1], &inst[2], ScratchRegister, dest.getOffset());
+ inst[3] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ }
+ }
+ label->bind(dest.getOffset());
+}
+
+uint32_t
+Assembler::PatchWrite_NearCallSize()
+{
+ return 4 * sizeof(uint32_t);
+}
+
+void
+Assembler::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
+{
+ Instruction* inst = (Instruction*) start.raw();
+ uint8_t* dest = toCall.raw();
+
+ // Overwrite whatever instruction used to be here with a call.
+ // Always use long jump for two reasons:
+ // - Jump has to be the same size because of PatchWrite_NearCallSize.
+ // - Return address has to be at the end of replaced block.
+ // Short jump wouldn't be more efficient.
+ Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, (uint32_t)dest);
+ inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+ inst[3] = InstNOP();
+
+ // Ensure everyone sees the code that was just written into memory.
+ AutoFlushICache::flush(uintptr_t(inst), PatchWrite_NearCallSize());
+}
+
+uint32_t
+Assembler::ExtractLuiOriValue(Instruction* inst0, Instruction* inst1)
+{
+ InstImm* i0 = (InstImm*) inst0;
+ InstImm* i1 = (InstImm*) inst1;
+ MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ uint32_t value = i0->extractImm16Value() << 16;
+ value = value | i1->extractImm16Value();
+ return value;
+}
+
+void
+Assembler::UpdateLuiOriValue(Instruction* inst0, Instruction* inst1, uint32_t value)
+{
+ MOZ_ASSERT(inst0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(inst1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ ((InstImm*) inst0)->setImm16(Imm16::Upper(Imm32(value)));
+ ((InstImm*) inst1)->setImm16(Imm16::Lower(Imm32(value)));
+}
+
+void
+Assembler::WriteLuiOriInstructions(Instruction* inst0, Instruction* inst1,
+ Register reg, uint32_t value)
+{
+ *inst0 = InstImm(op_lui, zero, reg, Imm16::Upper(Imm32(value)));
+ *inst1 = InstImm(op_ori, reg, reg, Imm16::Lower(Imm32(value)));
+}
+
+void
+Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+ ImmPtr expectedValue)
+{
+ PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
+ PatchedImmPtr(expectedValue.value));
+}
+
+void
+Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue)
+{
+ Instruction* inst = (Instruction*) label.raw();
+
+ // Extract old Value
+ DebugOnly<uint32_t> value = Assembler::ExtractLuiOriValue(&inst[0], &inst[1]);
+ MOZ_ASSERT(value == uint32_t(expectedValue.value));
+
+ // Replace with new value
+ Assembler::UpdateLuiOriValue(inst, inst->next(), uint32_t(newValue.value));
+
+ AutoFlushICache::flush(uintptr_t(inst), 8);
+}
+
+void
+Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm)
+{
+ InstImm* inst = (InstImm*)code;
+ Assembler::UpdateLuiOriValue(inst, inst->next(), (uint32_t)imm.value);
+}
+
+uint32_t
+Assembler::ExtractInstructionImmediate(uint8_t* code)
+{
+ InstImm* inst = (InstImm*)code;
+ return Assembler::ExtractLuiOriValue(inst, inst->next());
+}
+
+void
+Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
+{
+ Instruction* inst = (Instruction*)inst_.raw();
+ InstImm* i0 = (InstImm*) inst;
+ InstImm* i1 = (InstImm*) i0->next();
+ Instruction* i2 = (Instruction*) i1->next();
+
+ MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ if (enabled) {
+ InstReg jalr = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+ *i2 = jalr;
+ } else {
+ InstNOP nop;
+ *i2 = nop;
+ }
+
+ AutoFlushICache::flush(uintptr_t(i2), 4);
+}
diff --git a/js/src/jit/mips32/Assembler-mips32.h b/js/src/jit/mips32/Assembler-mips32.h
new file mode 100644
index 000000000..9fdbcda98
--- /dev/null
+++ b/js/src/jit/mips32/Assembler-mips32.h
@@ -0,0 +1,227 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_Assembler_mips32_h
+#define jit_mips32_Assembler_mips32_h
+
+#include "jit/mips-shared/Assembler-mips-shared.h"
+
+#include "jit/mips32/Architecture-mips32.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register CallTempReg4 = t4;
+static constexpr Register CallTempReg5 = t5;
+
+static constexpr Register CallTempNonArgRegs[] = { t0, t1, t2, t3, t4 };
+static const uint32_t NumCallTempNonArgRegs = mozilla::ArrayLength(CallTempNonArgRegs);
+
+class ABIArgGenerator
+{
+ unsigned usedArgSlots_;
+ unsigned firstArgFloatSize_;
+ // Note: This is not compliant with the system ABI. The Lowering phase
+ // expects to lower an MWasmParameter to only one register.
+ bool useGPRForFloats_;
+ ABIArg current_;
+
+ public:
+ ABIArgGenerator();
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+
+ void enforceO32ABI() {
+ useGPRForFloats_ = true;
+ }
+
+ uint32_t stackBytesConsumedSoFar() const {
+ if (usedArgSlots_ <= 4)
+ return ShadowStackSpace;
+
+ return usedArgSlots_ * sizeof(intptr_t);
+ }
+};
+
+static constexpr Register ABINonArgReg0 = t0;
+static constexpr Register ABINonArgReg1 = t1;
+static constexpr Register ABINonArgReg2 = t2;
+static constexpr Register ABINonArgReturnReg0 = t0;
+static constexpr Register ABINonArgReturnReg1 = t1;
+
+// TLS pointer argument register for WebAssembly functions. This must not alias
+// any other register used for passing function arguments or return values.
+// Preserved by WebAssembly functions.
+static constexpr Register WasmTlsReg = s5;
+
+// Registers used for asm.js/wasm table calls. These registers must be disjoint
+// from the ABI argument registers, WasmTlsReg and each other.
+static constexpr Register WasmTableCallScratchReg = ABINonArgReg0;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg1;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg2;
+
+static constexpr Register JSReturnReg_Type = a3;
+static constexpr Register JSReturnReg_Data = a2;
+static constexpr Register64 ReturnReg64(InvalidReg, InvalidReg);
+static constexpr FloatRegister ReturnFloat32Reg = { FloatRegisters::f0, FloatRegister::Single };
+static constexpr FloatRegister ReturnDoubleReg = { FloatRegisters::f0, FloatRegister::Double };
+static constexpr FloatRegister ScratchFloat32Reg = { FloatRegisters::f18, FloatRegister::Single };
+static constexpr FloatRegister ScratchDoubleReg = { FloatRegisters::f18, FloatRegister::Double };
+static constexpr FloatRegister SecondScratchFloat32Reg = { FloatRegisters::f16, FloatRegister::Single };
+static constexpr FloatRegister SecondScratchDoubleReg = { FloatRegisters::f16, FloatRegister::Double };
+
+// Registers used in the GenerateFFIIonExit Disable Activation block.
+// None of these may be the second scratch register (t8).
+static constexpr Register WasmIonExitRegReturnData = JSReturnReg_Data;
+static constexpr Register WasmIonExitRegReturnType = JSReturnReg_Type;
+
+static constexpr FloatRegister f0 = { FloatRegisters::f0, FloatRegister::Double };
+static constexpr FloatRegister f2 = { FloatRegisters::f2, FloatRegister::Double };
+static constexpr FloatRegister f4 = { FloatRegisters::f4, FloatRegister::Double };
+static constexpr FloatRegister f6 = { FloatRegisters::f6, FloatRegister::Double };
+static constexpr FloatRegister f8 = { FloatRegisters::f8, FloatRegister::Double };
+static constexpr FloatRegister f10 = { FloatRegisters::f10, FloatRegister::Double };
+static constexpr FloatRegister f12 = { FloatRegisters::f12, FloatRegister::Double };
+static constexpr FloatRegister f14 = { FloatRegisters::f14, FloatRegister::Double };
+static constexpr FloatRegister f16 = { FloatRegisters::f16, FloatRegister::Double };
+static constexpr FloatRegister f18 = { FloatRegisters::f18, FloatRegister::Double };
+static constexpr FloatRegister f20 = { FloatRegisters::f20, FloatRegister::Double };
+static constexpr FloatRegister f22 = { FloatRegisters::f22, FloatRegister::Double };
+static constexpr FloatRegister f24 = { FloatRegisters::f24, FloatRegister::Double };
+static constexpr FloatRegister f26 = { FloatRegisters::f26, FloatRegister::Double };
+static constexpr FloatRegister f28 = { FloatRegisters::f28, FloatRegister::Double };
+static constexpr FloatRegister f30 = { FloatRegisters::f30, FloatRegister::Double };
+
+// MIPS CPUs can only load multibyte data that is "naturally"
+// four-byte-aligned, sp register should be eight-byte-aligned.
+static constexpr uint32_t ABIStackAlignment = 8;
+static constexpr uint32_t JitStackAlignment = 8;
+
+static constexpr uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+// TODO this is just a filler to prevent a build failure. The MIPS SIMD
+// alignment requirements still need to be explored.
+// TODO Copy the static_asserts from x64/x86 assembler files.
+static constexpr uint32_t SimdMemoryAlignment = 8;
+static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
+
+// Does this architecture support SIMD conversions between Uint32x4 and Float32x4?
+static constexpr bool SupportsUint32x4FloatConversions = false;
+
+// Does this architecture support comparisons of unsigned integer vectors?
+static constexpr bool SupportsUint8x16Compares = false;
+static constexpr bool SupportsUint16x8Compares = false;
+static constexpr bool SupportsUint32x4Compares = false;
+
+static constexpr Scale ScalePointer = TimesFour;
+
+class Assembler : public AssemblerMIPSShared
+{
+ public:
+ Assembler()
+ : AssemblerMIPSShared()
+ { }
+
+ static Condition UnsignedCondition(Condition cond);
+ static Condition ConditionWithoutEqual(Condition cond);
+
+ // MacroAssemblers hold onto gcthings, so they are traced by the GC.
+ void trace(JSTracer* trc);
+
+ static uintptr_t GetPointer(uint8_t*);
+
+ protected:
+ // This is used to access the odd register form the pair of single
+ // precision registers that make one double register.
+ FloatRegister getOddPair(FloatRegister reg) {
+ MOZ_ASSERT(reg.isDouble());
+ return reg.singleOverlay(1);
+ }
+
+ public:
+ using AssemblerMIPSShared::bind;
+
+ void bind(RepatchLabel* label);
+ void Bind(uint8_t* rawCode, CodeOffset* label, const void* address);
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
+ static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
+
+ void bind(InstImm* inst, uintptr_t branch, uintptr_t target);
+
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+
+ static uint32_t PatchWrite_NearCallSize();
+
+ static uint32_t ExtractLuiOriValue(Instruction* inst0, Instruction* inst1);
+ static void UpdateLuiOriValue(Instruction* inst0, Instruction* inst1, uint32_t value);
+ static void WriteLuiOriInstructions(Instruction* inst, Instruction* inst1,
+ Register reg, uint32_t value);
+
+ static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall);
+ static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+ ImmPtr expectedValue);
+ static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue);
+
+ static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm);
+ static uint32_t ExtractInstructionImmediate(uint8_t* code);
+
+ static void ToggleCall(CodeLocationLabel inst_, bool enabled);
+}; // Assembler
+
+static const uint32_t NumIntArgRegs = 4;
+
+static inline bool
+GetIntArgReg(uint32_t usedArgSlots, Register* out)
+{
+ if (usedArgSlots < NumIntArgRegs) {
+ *out = Register::FromCode(a0.code() + usedArgSlots);
+ return true;
+ }
+ return false;
+}
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool
+GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
+{
+ // NOTE: We can't properly determine which regs are used if there are
+ // float arguments. If this is needed, we will have to guess.
+ MOZ_ASSERT(usedFloatArgs == 0);
+
+ if (GetIntArgReg(usedIntArgs, out))
+ return true;
+ // Unfortunately, we have to assume things about the point at which
+ // GetIntArgReg returns false, because we need to know how many registers it
+ // can allocate.
+ usedIntArgs -= NumIntArgRegs;
+ if (usedIntArgs >= NumCallTempNonArgRegs)
+ return false;
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+static inline uint32_t
+GetArgStackDisp(uint32_t usedArgSlots)
+{
+ MOZ_ASSERT(usedArgSlots >= NumIntArgRegs);
+ // Even register arguments have place reserved on stack.
+ return usedArgSlots * sizeof(intptr_t);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_Assembler_mips32_h */
diff --git a/js/src/jit/mips32/Bailouts-mips32.cpp b/js/src/jit/mips32/Bailouts-mips32.cpp
new file mode 100644
index 000000000..1b92d729c
--- /dev/null
+++ b/js/src/jit/mips32/Bailouts-mips32.cpp
@@ -0,0 +1,48 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/Bailouts-mips32.h"
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+
+using namespace js;
+using namespace js::jit;
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ BailoutStack* bailout)
+ : machine_(bailout->machine())
+{
+ uint8_t* sp = bailout->parentStackPointer();
+ framePointer_ = sp + bailout->frameSize();
+ topFrameSize_ = framePointer_ - sp;
+
+ JSScript* script = ScriptFromCalleeToken(((JitFrameLayout*) framePointer_)->calleeToken());
+ JitActivation* activation = activations.activation()->asJit();
+ topIonScript_ = script->ionScript();
+
+ attachOnJitActivation(activations);
+
+ if (bailout->frameClass() == FrameSizeClass::None()) {
+ snapshotOffset_ = bailout->snapshotOffset();
+ return;
+ }
+
+ // Compute the snapshot offset from the bailout ID.
+ JSRuntime* rt = activation->compartment()->runtimeFromMainThread();
+ JitCode* code = rt->jitRuntime()->getBailoutTable(bailout->frameClass());
+ uintptr_t tableOffset = bailout->tableOffset();
+ uintptr_t tableStart = reinterpret_cast<uintptr_t>(code->raw());
+
+ MOZ_ASSERT(tableOffset >= tableStart &&
+ tableOffset < tableStart + code->instructionsSize());
+ MOZ_ASSERT((tableOffset - tableStart) % BAILOUT_TABLE_ENTRY_SIZE == 0);
+
+ uint32_t bailoutId = ((tableOffset - tableStart) / BAILOUT_TABLE_ENTRY_SIZE) - 1;
+ MOZ_ASSERT(bailoutId < BAILOUT_TABLE_SIZE);
+
+ snapshotOffset_ = topIonScript_->bailoutToSnapshot(bailoutId);
+}
diff --git a/js/src/jit/mips32/Bailouts-mips32.h b/js/src/jit/mips32/Bailouts-mips32.h
new file mode 100644
index 000000000..0c4d7f313
--- /dev/null
+++ b/js/src/jit/mips32/Bailouts-mips32.h
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_Bailouts_mips32_h
+#define jit_mips32_Bailouts_mips32_h
+
+#include "jit/Bailouts.h"
+#include "jit/JitCompartment.h"
+
+namespace js {
+namespace jit {
+
+class BailoutStack
+{
+ uintptr_t frameClassId_;
+ // This is pushed in the bailout handler. Both entry points into the
+ // handler inserts their own value int lr, which is then placed onto the
+ // stack along with frameClassId_ above. This should be migrated to ip.
+ public:
+ union {
+ uintptr_t frameSize_;
+ uintptr_t tableOffset_;
+ };
+
+ protected:
+ RegisterDump::FPUArray fpregs_;
+ RegisterDump::GPRArray regs_;
+
+ uintptr_t snapshotOffset_;
+ uintptr_t padding_;
+
+ public:
+ FrameSizeClass frameClass() const {
+ return FrameSizeClass::FromClass(frameClassId_);
+ }
+ uintptr_t tableOffset() const {
+ MOZ_ASSERT(frameClass() != FrameSizeClass::None());
+ return tableOffset_;
+ }
+ uint32_t frameSize() const {
+ if (frameClass() == FrameSizeClass::None())
+ return frameSize_;
+ return frameClass().frameSize();
+ }
+ MachineState machine() {
+ return MachineState::FromBailout(regs_, fpregs_);
+ }
+ SnapshotOffset snapshotOffset() const {
+ MOZ_ASSERT(frameClass() == FrameSizeClass::None());
+ return snapshotOffset_;
+ }
+ uint8_t* parentStackPointer() const {
+ if (frameClass() == FrameSizeClass::None())
+ return (uint8_t*)this + sizeof(BailoutStack);
+ return (uint8_t*)this + offsetof(BailoutStack, snapshotOffset_);
+ }
+ static size_t offsetOfFrameClass() {
+ return offsetof(BailoutStack, frameClassId_);
+ }
+ static size_t offsetOfFrameSize() {
+ return offsetof(BailoutStack, frameSize_);
+ }
+ static size_t offsetOfFpRegs() {
+ return offsetof(BailoutStack, fpregs_);
+ }
+ static size_t offsetOfRegs() {
+ return offsetof(BailoutStack, regs_);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_Bailouts_mips32_h */
diff --git a/js/src/jit/mips32/BaselineCompiler-mips32.cpp b/js/src/jit/mips32/BaselineCompiler-mips32.cpp
new file mode 100644
index 000000000..acbc67ff0
--- /dev/null
+++ b/js/src/jit/mips32/BaselineCompiler-mips32.cpp
@@ -0,0 +1,16 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/BaselineCompiler-mips32.h"
+
+using namespace js;
+using namespace js::jit;
+
+BaselineCompilerMIPS::BaselineCompilerMIPS(JSContext* cx, TempAllocator& alloc,
+ JSScript* script)
+ : BaselineCompilerMIPSShared(cx, alloc, script)
+{
+}
diff --git a/js/src/jit/mips32/BaselineCompiler-mips32.h b/js/src/jit/mips32/BaselineCompiler-mips32.h
new file mode 100644
index 000000000..cd6fe41ee
--- /dev/null
+++ b/js/src/jit/mips32/BaselineCompiler-mips32.h
@@ -0,0 +1,26 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_BaselineCompiler_mips32_h
+#define jit_mips32_BaselineCompiler_mips32_h
+
+#include "jit/mips-shared/BaselineCompiler-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class BaselineCompilerMIPS : public BaselineCompilerMIPSShared
+{
+ protected:
+ BaselineCompilerMIPS(JSContext* cx, TempAllocator& alloc, JSScript* script);
+};
+
+typedef BaselineCompilerMIPS BaselineCompilerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_BaselineCompiler_mips32_h */
diff --git a/js/src/jit/mips32/BaselineIC-mips32.cpp b/js/src/jit/mips32/BaselineIC-mips32.cpp
new file mode 100644
index 000000000..e41ecf774
--- /dev/null
+++ b/js/src/jit/mips32/BaselineIC-mips32.cpp
@@ -0,0 +1,45 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineCompiler.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Linker.h"
+#include "jit/SharedICHelpers.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICCompare_Int32
+
+bool
+ICCompare_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ Label conditionTrue;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // Compare payload regs of R0 and R1.
+ Assembler::Condition cond = JSOpToCondition(op, /* signed = */true);
+ masm.ma_cmp_set(R0.payloadReg(), R0.payloadReg(), R1.payloadReg(), cond);
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.payloadReg(), R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/mips32/CodeGenerator-mips32.cpp b/js/src/jit/mips32/CodeGenerator-mips32.cpp
new file mode 100644
index 000000000..b947c14aa
--- /dev/null
+++ b/js/src/jit/mips32/CodeGenerator-mips32.cpp
@@ -0,0 +1,832 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/CodeGenerator-mips32.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "js/Conversions.h"
+#include "vm/Shape.h"
+#include "vm/TraceLogging.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+class js::jit::OutOfLineTableSwitch : public OutOfLineCodeBase<CodeGeneratorMIPS>
+{
+ MTableSwitch* mir_;
+ CodeLabel jumpLabel_;
+
+ void accept(CodeGeneratorMIPS* codegen) {
+ codegen->visitOutOfLineTableSwitch(this);
+ }
+
+ public:
+ OutOfLineTableSwitch(MTableSwitch* mir)
+ : mir_(mir)
+ {}
+
+ MTableSwitch* mir() const {
+ return mir_;
+ }
+
+ CodeLabel* jumpLabel() {
+ return &jumpLabel_;
+ }
+};
+
+void
+CodeGeneratorMIPS::visitOutOfLineBailout(OutOfLineBailout* ool)
+{
+ // Push snapshotOffset and make sure stack is aligned.
+ masm.subPtr(Imm32(2 * sizeof(void*)), StackPointer);
+ masm.storePtr(ImmWord(ool->snapshot()->snapshotOffset()), Address(StackPointer, 0));
+
+ masm.jump(&deoptLabel_);
+}
+
+void
+CodeGeneratorMIPS::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool)
+{
+ MTableSwitch* mir = ool->mir();
+
+ masm.haltingAlign(sizeof(void*));
+ masm.bind(ool->jumpLabel()->target());
+ masm.addCodeLabel(*ool->jumpLabel());
+
+ for (size_t i = 0; i < mir->numCases(); i++) {
+ LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
+ Label* caseheader = caseblock->label();
+ uint32_t caseoffset = caseheader->offset();
+
+ // The entries of the jump table need to be absolute addresses and thus
+ // must be patched after codegen is finished.
+ CodeLabel cl;
+ masm.ma_li(ScratchRegister, cl.patchAt());
+ masm.branch(ScratchRegister);
+ cl.target()->bind(caseoffset);
+ masm.addCodeLabel(cl);
+ }
+}
+
+void
+CodeGeneratorMIPS::emitTableSwitchDispatch(MTableSwitch* mir, Register index,
+ Register address)
+{
+ Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
+
+ // Lower value with low value
+ if (mir->low() != 0)
+ masm.subPtr(Imm32(mir->low()), index);
+
+ // Jump to default case if input is out of range
+ int32_t cases = mir->numCases();
+ masm.branchPtr(Assembler::AboveOrEqual, index, ImmWord(cases), defaultcase);
+
+ // To fill in the CodeLabels for the case entries, we need to first
+ // generate the case entries (we don't yet know their offsets in the
+ // instruction stream).
+ OutOfLineTableSwitch* ool = new(alloc()) OutOfLineTableSwitch(mir);
+ addOutOfLineCode(ool, mir);
+
+ // Compute the position where a pointer to the right case stands.
+ masm.ma_li(address, ool->jumpLabel()->patchAt());
+ masm.lshiftPtr(Imm32(4), index);
+ masm.addPtr(index, address);
+
+ masm.branch(address);
+}
+
+static const uint32_t FrameSizes[] = { 128, 256, 512, 1024 };
+
+FrameSizeClass
+FrameSizeClass::FromDepth(uint32_t frameDepth)
+{
+ for (uint32_t i = 0; i < JS_ARRAY_LENGTH(FrameSizes); i++) {
+ if (frameDepth < FrameSizes[i])
+ return FrameSizeClass(i);
+ }
+
+ return FrameSizeClass::None();
+}
+
+FrameSizeClass
+FrameSizeClass::ClassLimit()
+{
+ return FrameSizeClass(JS_ARRAY_LENGTH(FrameSizes));
+}
+
+uint32_t
+FrameSizeClass::frameSize() const
+{
+ MOZ_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID);
+ MOZ_ASSERT(class_ < JS_ARRAY_LENGTH(FrameSizes));
+
+ return FrameSizes[class_];
+}
+
+ValueOperand
+CodeGeneratorMIPS::ToValue(LInstruction* ins, size_t pos)
+{
+ Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+ValueOperand
+CodeGeneratorMIPS::ToOutValue(LInstruction* ins)
+{
+ Register typeReg = ToRegister(ins->getDef(TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getDef(PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+ValueOperand
+CodeGeneratorMIPS::ToTempValue(LInstruction* ins, size_t pos)
+{
+ Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+void
+CodeGeneratorMIPS::visitBox(LBox* box)
+{
+ const LDefinition* type = box->getDef(TYPE_INDEX);
+
+ MOZ_ASSERT(!box->getOperand(0)->isConstant());
+
+ // For NUNBOX32, the input operand and the output payload have the same
+ // virtual register. All that needs to be written is the type tag for
+ // the type definition.
+ masm.move32(Imm32(MIRTypeToTag(box->type())), ToRegister(type));
+}
+
+void
+CodeGeneratorMIPS::visitBoxFloatingPoint(LBoxFloatingPoint* box)
+{
+ const LDefinition* payload = box->getDef(PAYLOAD_INDEX);
+ const LDefinition* type = box->getDef(TYPE_INDEX);
+ const LAllocation* in = box->getOperand(0);
+
+ FloatRegister reg = ToFloatRegister(in);
+ if (box->type() == MIRType::Float32) {
+ masm.convertFloat32ToDouble(reg, ScratchDoubleReg);
+ reg = ScratchDoubleReg;
+ }
+ masm.ma_mv(reg, ValueOperand(ToRegister(type), ToRegister(payload)));
+}
+
+void
+CodeGeneratorMIPS::visitUnbox(LUnbox* unbox)
+{
+ // Note that for unbox, the type and payload indexes are switched on the
+ // inputs.
+ MUnbox* mir = unbox->mir();
+ Register type = ToRegister(unbox->type());
+
+ if (mir->fallible()) {
+ bailoutCmp32(Assembler::NotEqual, type, Imm32(MIRTypeToTag(mir->type())),
+ unbox->snapshot());
+ }
+}
+
+Register
+CodeGeneratorMIPS::splitTagForTest(const ValueOperand& value)
+{
+ return value.typeReg();
+}
+
+void
+CodeGeneratorMIPS::visitCompareB(LCompareB* lir)
+{
+ MCompare* mir = lir->mir();
+
+ const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
+ const LAllocation* rhs = lir->rhs();
+ const Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+
+ Label notBoolean, done;
+ masm.branchTestBoolean(Assembler::NotEqual, lhs, &notBoolean);
+ {
+ if (rhs->isConstant())
+ masm.cmp32Set(cond, lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()), output);
+ else
+ masm.cmp32Set(cond, lhs.payloadReg(), ToRegister(rhs), output);
+ masm.jump(&done);
+ }
+
+ masm.bind(&notBoolean);
+ {
+ masm.move32(Imm32(mir->jsop() == JSOP_STRICTNE), output);
+ }
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPS::visitCompareBAndBranch(LCompareBAndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
+ const LAllocation* rhs = lir->rhs();
+
+ MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
+
+ MBasicBlock* mirNotBoolean = (mir->jsop() == JSOP_STRICTEQ) ? lir->ifFalse() : lir->ifTrue();
+ branchToBlock(lhs.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN), mirNotBoolean, Assembler::NotEqual);
+
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+ if (rhs->isConstant())
+ emitBranch(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()), cond, lir->ifTrue(),
+ lir->ifFalse());
+ else
+ emitBranch(lhs.payloadReg(), ToRegister(rhs), cond, lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorMIPS::visitCompareBitwise(LCompareBitwise* lir)
+{
+ MCompare* mir = lir->mir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+ const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput);
+ const ValueOperand rhs = ToValue(lir, LCompareBitwise::RhsInput);
+ const Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(IsEqualityOp(mir->jsop()));
+
+ Label notEqual, done;
+ masm.ma_b(lhs.typeReg(), rhs.typeReg(), &notEqual, Assembler::NotEqual, ShortJump);
+ {
+ masm.cmp32Set(cond, lhs.payloadReg(), rhs.payloadReg(), output);
+ masm.ma_b(&done, ShortJump);
+ }
+ masm.bind(&notEqual);
+ {
+ masm.move32(Imm32(cond == Assembler::NotEqual), output);
+ }
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPS::visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+ const ValueOperand lhs = ToValue(lir, LCompareBitwiseAndBranch::LhsInput);
+ const ValueOperand rhs = ToValue(lir, LCompareBitwiseAndBranch::RhsInput);
+
+ MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
+ mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
+
+ MBasicBlock* notEqual = (cond == Assembler::Equal) ? lir->ifFalse() : lir->ifTrue();
+
+ branchToBlock(lhs.typeReg(), rhs.typeReg(), notEqual, Assembler::NotEqual);
+ emitBranch(lhs.payloadReg(), rhs.payloadReg(), cond, lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorMIPS::visitCompareI64(LCompareI64* lir)
+{
+ MCompare* mir = lir->mir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+ Register output = ToRegister(lir->output());
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+ Label done;
+
+ masm.move32(Imm32(1), output);
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.branch64(condition, lhsRegs, imm, &done);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.branch64(condition, lhsRegs, rhsRegs, &done);
+ }
+
+ masm.move32(Imm32(0), output);
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPS::visitCompareI64AndBranch(LCompareI64AndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+
+ Label* trueLabel = getJumpLabelForBranch(lir->ifTrue());
+ Label* falseLabel = getJumpLabelForBranch(lir->ifFalse());
+
+ if (isNextBlock(lir->ifFalse()->lir())) {
+ falseLabel = nullptr;
+ } else if (isNextBlock(lir->ifTrue()->lir())) {
+ condition = Assembler::InvertCondition(condition);
+ trueLabel = falseLabel;
+ falseLabel = nullptr;
+ }
+
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.branch64(condition, lhsRegs, imm, trueLabel, falseLabel);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.branch64(condition, lhsRegs, rhsRegs, trueLabel, falseLabel);
+ }
+}
+
+void
+CodeGeneratorMIPS::visitDivOrModI64(LDivOrModI64* lir)
+{
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+ Register64 output = ToOutRegister64(lir);
+
+ MOZ_ASSERT(output == ReturnReg64);
+
+ // All inputs are useAtStart for a call instruction. As a result we cannot
+ // ask for a non-aliasing temp. Using the following to get such a temp.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(lhs.low);
+ regs.take(lhs.high);
+ if (lhs != rhs) {
+ regs.take(rhs.low);
+ regs.take(rhs.high);
+ }
+ Register temp = regs.takeAny();
+ Label done;
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero())
+ masm.branchTest64(Assembler::Zero, rhs, rhs, temp, trap(lir, wasm::Trap::IntegerDivideByZero));
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notmin;
+ masm.branch64(Assembler::NotEqual, lhs, Imm64(INT64_MIN), &notmin);
+ masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), &notmin);
+ if (lir->mir()->isMod()) {
+ masm.xor64(output, output);
+ } else {
+ masm.jump(trap(lir, wasm::Trap::IntegerOverflow));
+ }
+ masm.jump(&done);
+ masm.bind(&notmin);
+ }
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ MOZ_ASSERT(gen->compilingWasm());
+ if (lir->mir()->isMod())
+ masm.callWithABI(wasm::SymbolicAddress::ModI64);
+ else
+ masm.callWithABI(wasm::SymbolicAddress::DivI64);
+ MOZ_ASSERT(ReturnReg64 == output);
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPS::visitUDivOrModI64(LUDivOrModI64* lir)
+{
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
+
+ // All inputs are useAtStart for a call instruction. As a result we cannot
+ // ask for a non-aliasing temp. Using the following to get such a temp.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(lhs.low);
+ regs.take(lhs.high);
+ if (lhs != rhs) {
+ regs.take(rhs.low);
+ regs.take(rhs.high);
+ }
+ Register temp = regs.takeAny();
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero())
+ masm.branchTest64(Assembler::Zero, rhs, rhs, temp, trap(lir, wasm::Trap::IntegerDivideByZero));
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ MOZ_ASSERT(gen->compilingWasm());
+ if (lir->mir()->isMod())
+ masm.callWithABI(wasm::SymbolicAddress::UModI64);
+ else
+ masm.callWithABI(wasm::SymbolicAddress::UDivI64);
+}
+
+template <typename T>
+void
+CodeGeneratorMIPS::emitWasmLoadI64(T* lir)
+{
+ const MWasmLoad* mir = lir->mir();
+ Register64 output = ToOutRegister64(lir);
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+ Register ptr = ToRegister(lir->ptr());
+
+ if (offset) {
+ Register ptrPlusOffset = ToRegister(lir->ptrCopy());
+ masm.addPtr(Imm32(offset), ptrPlusOffset);
+ ptr = ptrPlusOffset;
+ } else {
+ MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
+ }
+
+ unsigned byteSize = mir->access().byteSize();
+ bool isSigned;
+ switch (mir->access().type()) {
+ case Scalar::Int8: isSigned = true; break;
+ case Scalar::Uint8: isSigned = false; break;
+ case Scalar::Int16: isSigned = true; break;
+ case Scalar::Uint16: isSigned = false; break;
+ case Scalar::Int32: isSigned = true; break;
+ case Scalar::Uint32: isSigned = false; break;
+ case Scalar::Int64: isSigned = true; break;
+ default: MOZ_CRASH("unexpected array type");
+ }
+
+ masm.memoryBarrier(mir->access().barrierBefore());
+
+ MOZ_ASSERT(INT64LOW_OFFSET == 0);
+ if (mir->access().isUnaligned()) {
+ Register temp = ToRegister(lir->getTemp(1));
+
+ if (byteSize <= 4) {
+ masm.ma_load_unaligned(output.low, BaseIndex(HeapReg, ptr, TimesOne),
+ temp, static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ if (!isSigned)
+ masm.move32(Imm32(0), output.high);
+ else
+ masm.ma_sra(output.high, output.low, Imm32(31));
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_load_unaligned(output.low, BaseIndex(HeapReg, ptr, TimesOne),
+ temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
+ masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
+ masm.ma_load_unaligned(output.high, BaseIndex(HeapReg, scratch, TimesOne),
+ temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ if (byteSize <= 4) {
+ masm.ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne),
+ static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
+ if (!isSigned)
+ masm.move32(Imm32(0), output.high);
+ else
+ masm.ma_sra(output.high, output.low, Imm32(31));
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord);
+ masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
+ masm.ma_load(output.high, BaseIndex(HeapReg, scratch, TimesOne), SizeWord);
+ }
+
+ masm.memoryBarrier(mir->access().barrierAfter());
+}
+
+void
+CodeGeneratorMIPS::visitWasmLoadI64(LWasmLoadI64* lir)
+{
+ emitWasmLoadI64(lir);
+}
+
+void
+CodeGeneratorMIPS::visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir)
+{
+ emitWasmLoadI64(lir);
+}
+
+template <typename T>
+void
+CodeGeneratorMIPS::emitWasmStoreI64(T* lir)
+{
+ const MWasmStore* mir = lir->mir();
+ Register64 value = ToRegister64(lir->getInt64Operand(lir->ValueIndex));
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+ Register ptr = ToRegister(lir->ptr());
+
+ if (offset) {
+ Register ptrPlusOffset = ToRegister(lir->ptrCopy());
+ masm.addPtr(Imm32(offset), ptrPlusOffset);
+ ptr = ptrPlusOffset;
+ } else {
+ MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
+ }
+
+ unsigned byteSize = mir->access().byteSize();
+ bool isSigned;
+ switch (mir->access().type()) {
+ case Scalar::Int8: isSigned = true; break;
+ case Scalar::Uint8: isSigned = false; break;
+ case Scalar::Int16: isSigned = true; break;
+ case Scalar::Uint16: isSigned = false; break;
+ case Scalar::Int32: isSigned = true; break;
+ case Scalar::Uint32: isSigned = false; break;
+ case Scalar::Int64: isSigned = true; break;
+ default: MOZ_CRASH("unexpected array type");
+ }
+
+ masm.memoryBarrier(mir->access().barrierBefore());
+
+ MOZ_ASSERT(INT64LOW_OFFSET == 0);
+ if (mir->access().isUnaligned()) {
+ Register temp = ToRegister(lir->getTemp(1));
+
+ if (byteSize <= 4) {
+ masm.ma_store_unaligned(value.low, BaseIndex(HeapReg, ptr, TimesOne),
+ temp, static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_store_unaligned(value.low, BaseIndex(HeapReg, ptr, TimesOne),
+ temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
+ masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
+ masm.ma_store_unaligned(value.high, BaseIndex(HeapReg, scratch, TimesOne),
+ temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ if (byteSize <= 4) {
+ masm.ma_store(value.low, BaseIndex(HeapReg, ptr, TimesOne),
+ static_cast<LoadStoreSize>(8 * byteSize));
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_store(value.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord);
+ masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
+ masm.ma_store(value.high, BaseIndex(HeapReg, scratch, TimesOne), SizeWord);
+ }
+
+ masm.memoryBarrier(mir->access().barrierAfter());
+}
+
+void
+CodeGeneratorMIPS::visitWasmStoreI64(LWasmStoreI64* lir)
+{
+ emitWasmStoreI64(lir);
+}
+
+void
+CodeGeneratorMIPS::visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* lir)
+{
+ emitWasmStoreI64(lir);
+}
+
+void
+CodeGeneratorMIPS::visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins)
+{
+ const MWasmLoadGlobalVar* mir = ins->mir();
+ unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+ Register64 output = ToOutRegister64(ins);
+
+ masm.load32(Address(GlobalReg, addr + INT64LOW_OFFSET), output.low);
+ masm.load32(Address(GlobalReg, addr + INT64HIGH_OFFSET), output.high);
+}
+
+void
+CodeGeneratorMIPS::visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins)
+{
+ const MWasmStoreGlobalVar* mir = ins->mir();
+ unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
+ MOZ_ASSERT (mir->value()->type() == MIRType::Int64);
+ Register64 input = ToRegister64(ins->value());
+
+ masm.store32(input.low, Address(GlobalReg, addr + INT64LOW_OFFSET));
+ masm.store32(input.high, Address(GlobalReg, addr + INT64HIGH_OFFSET));
+}
+
+void
+CodeGeneratorMIPS::visitWasmSelectI64(LWasmSelectI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ Register cond = ToRegister(lir->condExpr());
+ const LInt64Allocation trueExpr = lir->trueExpr();
+ const LInt64Allocation falseExpr = lir->falseExpr();
+
+ Register64 output = ToOutRegister64(lir);
+
+ masm.move64(ToRegister64(trueExpr), output);
+
+ if (falseExpr.low().isRegister()) {
+ masm.as_movz(output.low, ToRegister(falseExpr.low()), cond);
+ masm.as_movz(output.high, ToRegister(falseExpr.high()), cond);
+ } else {
+ Label done;
+ masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
+ masm.loadPtr(ToAddress(falseExpr.low()), output.low);
+ masm.loadPtr(ToAddress(falseExpr.high()), output.high);
+ masm.bind(&done);
+ }
+}
+
+void
+CodeGeneratorMIPS::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ masm.moveToDoubleLo(input.low, output);
+ masm.moveToDoubleHi(input.high, output);
+}
+
+void
+CodeGeneratorMIPS::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ FloatRegister input = ToFloatRegister(lir->getOperand(0));
+ Register64 output = ToOutRegister64(lir);
+
+ masm.moveFromDoubleLo(input, output.low);
+ masm.moveFromDoubleHi(input, output.high);
+}
+
+void
+CodeGeneratorMIPS::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir)
+{
+ Register input = ToRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ if (input != output.low)
+ masm.move32(input, output.low);
+ if (lir->mir()->isUnsigned())
+ masm.move32(Imm32(0), output.high);
+ else
+ masm.ma_sra(output.high, output.low, Imm32(31));
+}
+
+void
+CodeGeneratorMIPS::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir)
+{
+ const LInt64Allocation& input = lir->getInt64Operand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf())
+ masm.move32(ToRegister(input.low()), output);
+ else
+ masm.move32(ToRegister(input.high()), output);
+}
+
+void
+CodeGeneratorMIPS::visitClzI64(LClzI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.clz64(input, output.low);
+ masm.move32(Imm32(0), output.high);
+}
+
+void
+CodeGeneratorMIPS::visitCtzI64(LCtzI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.ctz64(input, output.low);
+ masm.move32(Imm32(0), output.high);
+}
+
+void
+CodeGeneratorMIPS::visitNotI64(LNotI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register output = ToRegister(lir->output());
+
+ masm.as_or(output, input.low, input.high);
+ masm.cmp32Set(Assembler::Equal, output, Imm32(0), output);
+}
+
+void
+CodeGeneratorMIPS::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister scratch = input;
+ Register64 output = ToOutRegister64(lir);
+ MWasmTruncateToInt64* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ auto* ool = new(alloc()) OutOfLineWasmTruncateCheck(mir, input);
+ addOutOfLineCode(ool, mir);
+
+ if (fromType == MIRType::Double) {
+ masm.branchDouble(Assembler::DoubleUnordered, input, input, ool->entry());
+ } else if (fromType == MIRType::Float32) {
+ masm.branchFloat(Assembler::DoubleUnordered, input, input, ool->entry());
+ scratch = ScratchDoubleReg;
+ masm.convertFloat32ToDouble(input, scratch);
+ } else {
+ MOZ_CRASH("unexpected type in visitOutOfLineWasmTruncateCheck");
+ }
+
+ masm.setupUnalignedABICall(output.high);
+ masm.passABIArg(scratch, MoveOp::DOUBLE);
+ if (lir->mir()->isUnsigned())
+ masm.callWithABI(wasm::SymbolicAddress::TruncateDoubleToUint64);
+ else
+ masm.callWithABI(wasm::SymbolicAddress::TruncateDoubleToInt64);
+ masm.ma_b(output.high, Imm32(0x80000000), ool->rejoin(), Assembler::NotEqual);
+ masm.ma_b(output.low, Imm32(0x00000000), ool->rejoin(), Assembler::NotEqual);
+ masm.ma_b(ool->entry());
+
+ masm.bind(ool->rejoin());
+
+ MOZ_ASSERT(ReturnReg64 == output);
+}
+
+void
+CodeGeneratorMIPS::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ MInt64ToFloatingPoint* mir = lir->mir();
+ MIRType toType = mir->type();
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(input.low);
+ regs.take(input.high);
+ Register temp = regs.takeAny();
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(input.high);
+ masm.passABIArg(input.low);
+
+ if (lir->mir()->isUnsigned())
+ masm.callWithABI(wasm::SymbolicAddress::Uint64ToFloatingPoint, MoveOp::DOUBLE);
+ else
+ masm.callWithABI(wasm::SymbolicAddress::Int64ToFloatingPoint, MoveOp::DOUBLE);
+
+ MOZ_ASSERT_IF(toType == MIRType::Double, output == ReturnDoubleReg);
+ if (toType == MIRType::Float32) {
+ MOZ_ASSERT(output == ReturnFloat32Reg);
+ masm.convertDoubleToFloat32(ReturnDoubleReg, output);
+ }
+}
+
+void
+CodeGeneratorMIPS::visitTestI64AndBranch(LTestI64AndBranch* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+
+ branchToBlock(input.high, Imm32(0), lir->ifTrue(), Assembler::NonZero);
+ emitBranch(input.low, Imm32(0), Assembler::NonZero, lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorMIPS::setReturnDoubleRegs(LiveRegisterSet* regs)
+{
+ MOZ_ASSERT(ReturnFloat32Reg.code_ == ReturnDoubleReg.code_);
+ regs->add(ReturnFloat32Reg);
+ regs->add(ReturnDoubleReg.singleOverlay(1));
+ regs->add(ReturnDoubleReg);
+}
diff --git a/js/src/jit/mips32/CodeGenerator-mips32.h b/js/src/jit/mips32/CodeGenerator-mips32.h
new file mode 100644
index 000000000..fc4394b65
--- /dev/null
+++ b/js/src/jit/mips32/CodeGenerator-mips32.h
@@ -0,0 +1,96 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_CodeGenerator_mips32_h
+#define jit_mips32_CodeGenerator_mips32_h
+
+#include "jit/mips-shared/CodeGenerator-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorMIPS : public CodeGeneratorMIPSShared
+{
+ protected:
+ void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ emitBranch(value.typeReg(), (Imm32)ImmType(JSVAL_TYPE_NULL), cond, ifTrue, ifFalse);
+ }
+ void testUndefinedEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ emitBranch(value.typeReg(), (Imm32)ImmType(JSVAL_TYPE_UNDEFINED), cond, ifTrue, ifFalse);
+ }
+ void testObjectEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ emitBranch(value.typeReg(), (Imm32)ImmType(JSVAL_TYPE_OBJECT), cond, ifTrue, ifFalse);
+ }
+
+ void emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base);
+
+ template <typename T>
+ void emitWasmLoadI64(T* ins);
+ template <typename T>
+ void emitWasmStoreI64(T* ins);
+
+ public:
+ void visitCompareB(LCompareB* lir);
+ void visitCompareBAndBranch(LCompareBAndBranch* lir);
+ void visitCompareBitwise(LCompareBitwise* lir);
+ void visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir);
+ void visitCompareI64(LCompareI64* lir);
+ void visitCompareI64AndBranch(LCompareI64AndBranch* lir);
+ void visitDivOrModI64(LDivOrModI64* lir);
+ void visitUDivOrModI64(LUDivOrModI64* lir);
+ void visitWasmLoadI64(LWasmLoadI64* ins);
+ void visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir);
+ void visitWasmStoreI64(LWasmStoreI64* ins);
+ void visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* ins);
+ void visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins);
+ void visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins);
+ void visitWasmSelectI64(LWasmSelectI64* lir);
+ void visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir);
+ void visitWasmReinterpretToI64(LWasmReinterpretToI64* lir);
+ void visitExtendInt32ToInt64(LExtendInt32ToInt64* lir);
+ void visitWrapInt64ToInt32(LWrapInt64ToInt32* lir);
+ void visitClzI64(LClzI64* ins);
+ void visitCtzI64(LCtzI64* ins);
+ void visitNotI64(LNotI64* ins);
+ void visitWasmTruncateToInt64(LWasmTruncateToInt64* ins);
+ void visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir);
+ void visitTestI64AndBranch(LTestI64AndBranch* lir);
+
+ // Out of line visitors.
+ void visitOutOfLineBailout(OutOfLineBailout* ool);
+ void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
+ protected:
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToOutValue(LInstruction* ins);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ // Functions for LTestVAndBranch.
+ Register splitTagForTest(const ValueOperand& value);
+
+ public:
+ CodeGeneratorMIPS(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorMIPSShared(gen, graph, masm)
+ { }
+
+ public:
+ void visitBox(LBox* box);
+ void visitBoxFloatingPoint(LBoxFloatingPoint* box);
+ void visitUnbox(LUnbox* unbox);
+ void setReturnDoubleRegs(LiveRegisterSet* regs);
+};
+
+typedef CodeGeneratorMIPS CodeGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_CodeGenerator_mips32_h */
diff --git a/js/src/jit/mips32/LIR-mips32.h b/js/src/jit/mips32/LIR-mips32.h
new file mode 100644
index 000000000..8c0fa9a95
--- /dev/null
+++ b/js/src/jit/mips32/LIR-mips32.h
@@ -0,0 +1,169 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_LIR_mips32_h
+#define jit_mips32_LIR_mips32_h
+
+namespace js {
+namespace jit {
+
+class LBoxFloatingPoint : public LInstructionHelper<2, 1, 1>
+{
+ MIRType type_;
+
+ public:
+ LIR_HEADER(BoxFloatingPoint);
+
+ LBoxFloatingPoint(const LAllocation& in, const LDefinition& temp, MIRType type)
+ : type_(type)
+ {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ MIRType type() const {
+ return type_;
+ }
+ const char* extraName() const {
+ return StringFromMIRType(type_);
+ }
+};
+
+class LUnbox : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(Unbox);
+
+ MUnbox* mir() const {
+ return mir_->toUnbox();
+ }
+ const LAllocation* payload() {
+ return getOperand(0);
+ }
+ const LAllocation* type() {
+ return getOperand(1);
+ }
+ const char* extraName() const {
+ return StringFromMIRType(mir()->type());
+ }
+};
+
+class LUnboxFloatingPoint : public LInstructionHelper<1, 2, 0>
+{
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ static const size_t Input = 0;
+
+ LUnboxFloatingPoint(const LBoxAllocation& input, MIRType type)
+ : type_(type)
+ {
+ setBoxOperand(Input, input);
+ }
+
+ MUnbox* mir() const {
+ return mir_->toUnbox();
+ }
+
+ MIRType type() const {
+ return type_;
+ }
+ const char* extraName() const {
+ return StringFromMIRType(type_);
+ }
+};
+
+class LDivOrModI64 : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES*2, 0>
+{
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs)
+ {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ }
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeDivideByZero();
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeNegativeDividend();
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+class LUDivOrModI64 : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES*2, 0>
+{
+ public:
+ LIR_HEADER(UDivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LUDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs)
+ {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ }
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeDivideByZero();
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeNegativeDividend();
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+class LWasmTruncateToInt64 : public LCallInstructionHelper<INT64_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ explicit LWasmTruncateToInt64(const LAllocation& in)
+ {
+ setOperand(0, in);
+ }
+
+ MWasmTruncateToInt64* mir() const {
+ return mir_->toWasmTruncateToInt64();
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_LIR_mips32_h */
diff --git a/js/src/jit/mips32/LOpcodes-mips32.h b/js/src/jit/mips32/LOpcodes-mips32.h
new file mode 100644
index 000000000..8e39737c7
--- /dev/null
+++ b/js/src/jit/mips32/LOpcodes-mips32.h
@@ -0,0 +1,25 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_LOpcodes_mips32_h__
+#define jit_mips32_LOpcodes_mips32_h__
+
+#include "jit/shared/LOpcodes-shared.h"
+
+#define LIR_CPU_OPCODE_LIST(_) \
+ _(BoxFloatingPoint) \
+ _(ModMaskI) \
+ _(UDivOrMod) \
+ _(DivOrModI64) \
+ _(UDivOrModI64) \
+ _(WasmUnalignedLoad) \
+ _(WasmUnalignedStore) \
+ _(WasmUnalignedLoadI64) \
+ _(WasmUnalignedStoreI64) \
+ _(WasmTruncateToInt64) \
+ _(Int64ToFloatingPoint)
+
+#endif // jit_mips32_LOpcodes_mips32_h__
diff --git a/js/src/jit/mips32/Lowering-mips32.cpp b/js/src/jit/mips32/Lowering-mips32.cpp
new file mode 100644
index 000000000..650694823
--- /dev/null
+++ b/js/src/jit/mips32/Lowering-mips32.cpp
@@ -0,0 +1,258 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/Lowering-mips32.h"
+
+#include "jit/mips32/Assembler-mips32.h"
+
+#include "jit/MIR.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+LBoxAllocation
+LIRGeneratorMIPS::useBoxFixed(MDefinition* mir, Register reg1, Register reg2, bool useAtStart)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+ MOZ_ASSERT(reg1 != reg2);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart),
+ LUse(reg2, VirtualRegisterOfPayload(mir), useAtStart));
+}
+
+void
+LIRGeneratorMIPS::visitBox(MBox* box)
+{
+ MDefinition* inner = box->getOperand(0);
+
+ // If the box wrapped a double, it needs a new register.
+ if (IsFloatingPointType(inner->type())) {
+ defineBox(new(alloc()) LBoxFloatingPoint(useRegisterAtStart(inner),
+ tempCopy(inner, 0), inner->type()), box);
+ return;
+ }
+
+ if (box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (inner->isConstant()) {
+ defineBox(new(alloc()) LValue(inner->toConstant()->toJSValue()), box);
+ return;
+ }
+
+ LBox* lir = new(alloc()) LBox(use(inner), inner->type());
+
+ // Otherwise, we should not define a new register for the payload portion
+ // of the output, so bypass defineBox().
+ uint32_t vreg = getVirtualRegister();
+
+ // Note that because we're using BogusTemp(), we do not change the type of
+ // the definition. We also do not define the first output as "TYPE",
+ // because it has no corresponding payload at (vreg + 1). Also note that
+ // although we copy the input's original type for the payload half of the
+ // definition, this is only for clarity. BogusTemp() definitions are
+ // ignored.
+ lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL));
+ lir->setDef(1, LDefinition::BogusTemp());
+ box->setVirtualRegister(vreg);
+ add(lir);
+}
+
+void
+LIRGeneratorMIPS::visitUnbox(MUnbox* unbox)
+{
+ MDefinition* inner = unbox->getOperand(0);
+
+ if (inner->type() == MIRType::ObjectOrNull) {
+ LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(inner));
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+ defineReuseInput(lir, unbox, 0);
+ return;
+ }
+
+ // An unbox on mips reads in a type tag (either in memory or a register) and
+ // a payload. Unlike most instructions consuming a box, we ask for the type
+ // second, so that the result can re-use the first input.
+ MOZ_ASSERT(inner->type() == MIRType::Value);
+
+ ensureDefined(inner);
+
+ if (IsFloatingPointType(unbox->type())) {
+ LUnboxFloatingPoint* lir = new(alloc()) LUnboxFloatingPoint(useBox(inner), unbox->type());
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+ define(lir, unbox);
+ return;
+ }
+
+ // Swap the order we use the box pieces so we can re-use the payload
+ // register.
+ LUnbox* lir = new(alloc()) LUnbox;
+ lir->setOperand(0, usePayloadInRegisterAtStart(inner));
+ lir->setOperand(1, useType(inner, LUse::REGISTER));
+
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+
+ // Types and payloads form two separate intervals. If the type becomes dead
+ // before the payload, it could be used as a Value without the type being
+ // recoverable. Unbox's purpose is to eagerly kill the definition of a type
+ // tag, so keeping both alive (for the purpose of gcmaps) is unappealing.
+ // Instead, we create a new virtual register.
+ defineReuseInput(lir, unbox, 0);
+}
+
+void
+LIRGeneratorMIPS::visitReturn(MReturn* ret)
+{
+ MDefinition* opd = ret->getOperand(0);
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new(alloc()) LReturn;
+ ins->setOperand(0, LUse(JSReturnReg_Type));
+ ins->setOperand(1, LUse(JSReturnReg_Data));
+ fillBoxUses(ins, 0, opd);
+ add(ins);
+}
+
+void
+LIRGeneratorMIPS::defineUntypedPhi(MPhi* phi, size_t lirIndex)
+{
+ LPhi* type = current->getPhi(lirIndex + VREG_TYPE_OFFSET);
+ LPhi* payload = current->getPhi(lirIndex + VREG_DATA_OFFSET);
+
+ uint32_t typeVreg = getVirtualRegister();
+ phi->setVirtualRegister(typeVreg);
+
+ uint32_t payloadVreg = getVirtualRegister();
+ MOZ_ASSERT(typeVreg + 1 == payloadVreg);
+
+ type->setDef(0, LDefinition(typeVreg, LDefinition::TYPE));
+ payload->setDef(0, LDefinition(payloadVreg, LDefinition::PAYLOAD));
+ annotate(type);
+ annotate(payload);
+}
+
+void
+LIRGeneratorMIPS::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex)
+{
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* type = block->getPhi(lirIndex + VREG_TYPE_OFFSET);
+ LPhi* payload = block->getPhi(lirIndex + VREG_DATA_OFFSET);
+ type->setOperand(inputPosition, LUse(operand->virtualRegister() + VREG_TYPE_OFFSET,
+ LUse::ANY));
+ payload->setOperand(inputPosition, LUse(VirtualRegisterOfPayload(operand), LUse::ANY));
+}
+
+void
+LIRGeneratorMIPS::defineInt64Phi(MPhi* phi, size_t lirIndex)
+{
+ LPhi* low = current->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = current->getPhi(lirIndex + INT64HIGH_INDEX);
+
+ uint32_t lowVreg = getVirtualRegister();
+
+ phi->setVirtualRegister(lowVreg);
+
+ uint32_t highVreg = getVirtualRegister();
+ MOZ_ASSERT(lowVreg + INT64HIGH_INDEX == highVreg + INT64LOW_INDEX);
+
+ low->setDef(0, LDefinition(lowVreg, LDefinition::INT32));
+ high->setDef(0, LDefinition(highVreg, LDefinition::INT32));
+ annotate(high);
+ annotate(low);
+}
+
+void
+LIRGeneratorMIPS::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex)
+{
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* low = block->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = block->getPhi(lirIndex + INT64HIGH_INDEX);
+ low->setOperand(inputPosition, LUse(operand->virtualRegister() + INT64LOW_INDEX, LUse::ANY));
+ high->setOperand(inputPosition, LUse(operand->virtualRegister() + INT64HIGH_INDEX, LUse::ANY));
+}
+
+void
+LIRGeneratorMIPS::lowerTruncateDToInt32(MTruncateToInt32* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double);
+
+ define(new(alloc()) LTruncateDToInt32(useRegister(opd), LDefinition::BogusTemp()), ins);
+}
+
+void
+LIRGeneratorMIPS::lowerTruncateFToInt32(MTruncateToInt32* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Float32);
+
+ define(new(alloc()) LTruncateFToInt32(useRegister(opd), LDefinition::BogusTemp()), ins);
+}
+
+void
+LIRGeneratorMIPS::lowerDivI64(MDiv* div)
+{
+ if (div->isUnsigned()) {
+ lowerUDivI64(div);
+ return;
+ }
+
+ LDivOrModI64* lir = new(alloc()) LDivOrModI64(useInt64RegisterAtStart(div->lhs()),
+ useInt64RegisterAtStart(div->rhs()));
+
+ defineReturn(lir, div);
+}
+
+void
+LIRGeneratorMIPS::lowerModI64(MMod* mod)
+{
+ if (mod->isUnsigned()) {
+ lowerUModI64(mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new(alloc()) LDivOrModI64(useInt64RegisterAtStart(mod->lhs()),
+ useInt64RegisterAtStart(mod->rhs()));
+
+ defineReturn(lir, mod);
+}
+
+void
+LIRGeneratorMIPS::lowerUDivI64(MDiv* div)
+{
+ LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64RegisterAtStart(div->lhs()),
+ useInt64RegisterAtStart(div->rhs()));
+ defineReturn(lir, div);
+}
+
+void
+LIRGeneratorMIPS::lowerUModI64(MMod* mod)
+{
+ LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64RegisterAtStart(mod->lhs()),
+ useInt64RegisterAtStart(mod->rhs()));
+ defineReturn(lir, mod);
+}
+
+void
+LIRGeneratorMIPS::visitRandom(MRandom* ins)
+{
+ LRandom *lir = new(alloc()) LRandom(temp(),
+ temp(),
+ temp(),
+ temp(),
+ temp());
+ defineFixed(lir, ins, LFloatReg(ReturnDoubleReg));
+}
diff --git a/js/src/jit/mips32/Lowering-mips32.h b/js/src/jit/mips32/Lowering-mips32.h
new file mode 100644
index 000000000..2deb268a8
--- /dev/null
+++ b/js/src/jit/mips32/Lowering-mips32.h
@@ -0,0 +1,57 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_Lowering_mips32_h
+#define jit_mips32_Lowering_mips32_h
+
+#include "jit/mips-shared/Lowering-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorMIPS : public LIRGeneratorMIPSShared
+{
+ protected:
+ LIRGeneratorMIPS(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorMIPSShared(gen, graph, lirGraph)
+ { }
+
+ protected:
+ // Returns a box allocation with type set to reg1 and payload set to reg2.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2,
+ bool useAtStart = false);
+
+ inline LDefinition tempToUnbox() {
+ return LDefinition::BogusTemp();
+ }
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
+ void defineUntypedPhi(MPhi* phi, size_t lirIndex);
+
+ void lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
+ void defineInt64Phi(MPhi* phi, size_t lirIndex);
+
+ void lowerTruncateDToInt32(MTruncateToInt32* ins);
+ void lowerTruncateFToInt32(MTruncateToInt32* ins);
+
+ void lowerDivI64(MDiv* div);
+ void lowerModI64(MMod* mod);
+ void lowerUDivI64(MDiv* div);
+ void lowerUModI64(MMod* mod);
+
+ public:
+ void visitBox(MBox* box);
+ void visitUnbox(MUnbox* unbox);
+ void visitReturn(MReturn* ret);
+ void visitRandom(MRandom* ins);
+};
+
+typedef LIRGeneratorMIPS LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_Lowering_mips32_h */
diff --git a/js/src/jit/mips32/MacroAssembler-mips32-inl.h b/js/src/jit/mips32/MacroAssembler-mips32-inl.h
new file mode 100644
index 000000000..2dae8fb87
--- /dev/null
+++ b/js/src/jit/mips32/MacroAssembler-mips32-inl.h
@@ -0,0 +1,1077 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_MacroAssembler_mips32_inl_h
+#define jit_mips32_MacroAssembler_mips32_inl_h
+
+#include "jit/mips32/MacroAssembler-mips32.h"
+
+#include "jit/mips-shared/MacroAssembler-mips-shared-inl.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void
+MacroAssembler::move64(Register64 src, Register64 dest)
+{
+ move32(src.low, dest.low);
+ move32(src.high, dest.high);
+}
+
+void
+MacroAssembler::move64(Imm64 imm, Register64 dest)
+{
+ move32(Imm32(imm.value & 0xFFFFFFFFL), dest.low);
+ move32(Imm32((imm.value >> 32) & 0xFFFFFFFFL), dest.high);
+}
+
+// ===============================================================
+// Logical instructions
+
+void
+MacroAssembler::andPtr(Register src, Register dest)
+{
+ ma_and(dest, src);
+}
+
+void
+MacroAssembler::andPtr(Imm32 imm, Register dest)
+{
+ ma_and(dest, imm);
+}
+
+void
+MacroAssembler::and64(Imm64 imm, Register64 dest)
+{
+ if (imm.low().value != int32_t(0xFFFFFFFF))
+ and32(imm.low(), dest.low);
+ if (imm.hi().value != int32_t(0xFFFFFFFF))
+ and32(imm.hi(), dest.high);
+}
+
+void
+MacroAssembler::and64(Register64 src, Register64 dest)
+{
+ and32(src.low, dest.low);
+ and32(src.high, dest.high);
+}
+
+void
+MacroAssembler::or64(Imm64 imm, Register64 dest)
+{
+ if (imm.low().value)
+ or32(imm.low(), dest.low);
+ if (imm.hi().value)
+ or32(imm.hi(), dest.high);
+}
+
+void
+MacroAssembler::xor64(Imm64 imm, Register64 dest)
+{
+ if (imm.low().value)
+ xor32(imm.low(), dest.low);
+ if (imm.hi().value)
+ xor32(imm.hi(), dest.high);
+}
+
+void
+MacroAssembler::orPtr(Register src, Register dest)
+{
+ ma_or(dest, src);
+}
+
+void
+MacroAssembler::orPtr(Imm32 imm, Register dest)
+{
+ ma_or(dest, imm);
+}
+
+void
+MacroAssembler::or64(Register64 src, Register64 dest)
+{
+ or32(src.low, dest.low);
+ or32(src.high, dest.high);
+}
+
+void
+MacroAssembler::xor64(Register64 src, Register64 dest)
+{
+ ma_xor(dest.low, src.low);
+ ma_xor(dest.high, src.high);
+}
+
+void
+MacroAssembler::xorPtr(Register src, Register dest)
+{
+ ma_xor(dest, src);
+}
+
+void
+MacroAssembler::xorPtr(Imm32 imm, Register dest)
+{
+ ma_xor(dest, imm);
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void
+MacroAssembler::addPtr(Register src, Register dest)
+{
+ ma_addu(dest, src);
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, Register dest)
+{
+ ma_addu(dest, imm);
+}
+
+void
+MacroAssembler::addPtr(ImmWord imm, Register dest)
+{
+ addPtr(Imm32(imm.value), dest);
+}
+
+void
+MacroAssembler::add64(Register64 src, Register64 dest)
+{
+ as_addu(dest.low, dest.low, src.low);
+ as_sltu(ScratchRegister, dest.low, src.low);
+ as_addu(dest.high, dest.high, src.high);
+ as_addu(dest.high, dest.high, ScratchRegister);
+}
+
+void
+MacroAssembler::add64(Imm32 imm, Register64 dest)
+{
+ ma_li(ScratchRegister, imm);
+ as_addu(dest.low, dest.low, ScratchRegister);
+ as_sltu(ScratchRegister, dest.low, ScratchRegister);
+ as_addu(dest.high, dest.high, ScratchRegister);
+}
+
+void
+MacroAssembler::add64(Imm64 imm, Register64 dest)
+{
+ add64(imm.low(), dest);
+ ma_addu(dest.high, dest.high, imm.hi());
+}
+
+void
+MacroAssembler::subPtr(Register src, Register dest)
+{
+ as_subu(dest, dest, src);
+}
+
+void
+MacroAssembler::subPtr(Imm32 imm, Register dest)
+{
+ ma_subu(dest, dest, imm);
+}
+
+void
+MacroAssembler::sub64(Register64 src, Register64 dest)
+{
+ as_sltu(ScratchRegister, dest.low, src.low);
+ as_subu(dest.high, dest.high, ScratchRegister);
+ as_subu(dest.low, dest.low, src.low);
+ as_subu(dest.high, dest.high, src.high);
+}
+
+void
+MacroAssembler::sub64(Imm64 imm, Register64 dest)
+{
+ ma_li(ScratchRegister, imm.low());
+ as_sltu(ScratchRegister, dest.low, ScratchRegister);
+ as_subu(dest.high, dest.high, ScratchRegister);
+ ma_subu(dest.low, dest.low, imm.low());
+ ma_subu(dest.high, dest.high, imm.hi());
+}
+
+void
+MacroAssembler::mul64(Imm64 imm, const Register64& dest)
+{
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+
+ // HIGH(dest) = LOW(HIGH(dest) * LOW(imm));
+ ma_li(ScratchRegister, Imm32(imm.value & LOW_32_MASK));
+ as_multu(dest.high, ScratchRegister);
+ as_mflo(dest.high);
+
+ // mfhi:mflo = LOW(dest) * LOW(imm);
+ as_multu(dest.low, ScratchRegister);
+
+ // HIGH(dest) += mfhi;
+ as_mfhi(ScratchRegister);
+ as_addu(dest.high, dest.high, ScratchRegister);
+
+ if (((imm.value >> 32) & LOW_32_MASK) == 5) {
+ // Optimized case for Math.random().
+
+ // HIGH(dest) += LOW(LOW(dest) * HIGH(imm));
+ as_sll(ScratchRegister, dest.low, 2);
+ as_addu(ScratchRegister, ScratchRegister, dest.low);
+ as_addu(dest.high, dest.high, ScratchRegister);
+
+ // LOW(dest) = mflo;
+ as_mflo(dest.low);
+ } else {
+ // tmp = mflo
+ as_mflo(SecondScratchReg);
+
+ // HIGH(dest) += LOW(LOW(dest) * HIGH(imm));
+ ma_li(ScratchRegister, Imm32((imm.value >> 32) & LOW_32_MASK));
+ as_multu(dest.low, ScratchRegister);
+ as_mflo(ScratchRegister);
+ as_addu(dest.high, dest.high, ScratchRegister);
+
+ // LOW(dest) = tmp;
+ ma_move(dest.low, SecondScratchReg);
+ }
+}
+
+void
+MacroAssembler::mul64(Imm64 imm, const Register64& dest, const Register temp)
+{
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+
+ // HIGH(dest) = LOW(HIGH(dest) * LOW(imm));
+ MOZ_ASSERT(temp != dest.high && temp != dest.low);
+
+ ma_li(ScratchRegister, imm.firstHalf());
+ as_multu(dest.high, ScratchRegister);
+ as_mflo(dest.high);
+
+ ma_li(ScratchRegister, imm.secondHalf());
+ as_multu(dest.low, ScratchRegister);
+ as_mflo(temp);
+ as_addu(temp, dest.high, temp);
+
+ ma_li(ScratchRegister, imm.firstHalf());
+ as_multu(dest.low, ScratchRegister);
+ as_mfhi(dest.high);
+ as_mflo(dest.low);
+ as_addu(dest.high, dest.high, temp);
+}
+
+void
+MacroAssembler::mul64(const Register64& src, const Register64& dest, const Register temp)
+{
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+
+ // HIGH(dest) = LOW(HIGH(dest) * LOW(imm));
+ MOZ_ASSERT(dest != src);
+ MOZ_ASSERT(dest.low != src.high && dest.high != src.low);
+
+ as_multu(dest.high, src.low); // (2)
+ as_mflo(dest.high);
+ as_multu(dest.low, src.high); // (3)
+ as_mflo(temp);
+ as_addu(temp, dest.high, temp);
+ as_multu(dest.low, src.low); // (4) + (1)
+ as_mfhi(dest.high);
+ as_mflo(dest.low);
+ as_addu(dest.high, dest.high, temp);
+}
+
+void
+MacroAssembler::neg64(Register64 reg)
+{
+ ma_li(ScratchRegister, Imm32(1));
+ as_movz(ScratchRegister, zero, reg.low);
+ ma_negu(reg.low, reg.low);
+ as_addu(reg.high, reg.high, ScratchRegister);
+ ma_negu(reg.high, reg.high);
+}
+
+void
+MacroAssembler::mulBy3(Register src, Register dest)
+{
+ as_addu(dest, src, src);
+ as_addu(dest, dest, src);
+}
+
+void
+MacroAssembler::inc64(AbsoluteAddress dest)
+{
+ ma_li(ScratchRegister, Imm32((int32_t)dest.addr));
+ as_lw(SecondScratchReg, ScratchRegister, 0);
+
+ as_addiu(SecondScratchReg, SecondScratchReg, 1);
+ as_sw(SecondScratchReg, ScratchRegister, 0);
+
+ as_sltiu(SecondScratchReg, SecondScratchReg, 1);
+ as_lw(ScratchRegister, ScratchRegister, 4);
+
+ as_addu(SecondScratchReg, ScratchRegister, SecondScratchReg);
+
+ ma_li(ScratchRegister, Imm32((int32_t)dest.addr));
+ as_sw(SecondScratchReg, ScratchRegister, 4);
+}
+
+// ===============================================================
+// Shift functions
+
+void
+MacroAssembler::lshiftPtr(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ ma_sll(dest, dest, imm);
+}
+
+void
+MacroAssembler::lshift64(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ScratchRegisterScope scratch(*this);
+
+ if (imm.value == 0) {
+ return;
+ } else if (imm.value < 32) {
+ as_sll(dest.high, dest.high, imm.value);
+ as_srl(scratch, dest.low, 32 - imm.value);
+ as_or(dest.high, dest.high, scratch);
+ as_sll(dest.low, dest.low, imm.value);
+ } else {
+ as_sll(dest.high, dest.low, imm.value - 32);
+ move32(Imm32(0), dest.low);
+ }
+}
+
+void
+MacroAssembler::lshift64(Register unmaskedShift, Register64 dest)
+{
+ Label done, less;
+ ScratchRegisterScope shift(*this);
+
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_b(shift, Imm32(0), &done, Equal);
+
+ ma_sll(dest.high, dest.high, shift);
+ ma_subu(shift, shift, Imm32(32));
+ ma_b(shift, Imm32(0), &less, LessThan);
+
+ ma_sll(dest.high, dest.low, shift);
+ move32(Imm32(0), dest.low);
+ ma_b(&done);
+
+ bind(&less);
+ ma_li(SecondScratchReg, Imm32(0));
+ as_subu(shift, SecondScratchReg, shift);
+ ma_srl(SecondScratchReg, dest.low, shift);
+ as_or(dest.high, dest.high, SecondScratchReg);
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_sll(dest.low, dest.low, shift);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::rshiftPtr(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ ma_srl(dest, dest, imm);
+}
+
+void
+MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ ma_sra(dest, dest, imm);
+}
+
+void
+MacroAssembler::rshift64(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ScratchRegisterScope scratch(*this);
+
+ if (imm.value < 32) {
+ as_srl(dest.low, dest.low, imm.value);
+ as_sll(scratch, dest.high, 32 - imm.value);
+ as_or(dest.low, dest.low, scratch);
+ as_srl(dest.high, dest.high, imm.value);
+ } else if (imm.value == 32) {
+ ma_move(dest.low, dest.high);
+ move32(Imm32(0), dest.high);
+ } else {
+ ma_srl(dest.low, dest.high, Imm32(imm.value - 32));
+ move32(Imm32(0), dest.high);
+ }
+}
+
+void
+MacroAssembler::rshift64(Register unmaskedShift, Register64 dest)
+{
+ Label done, less;
+ ScratchRegisterScope shift(*this);
+
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_srl(dest.low, dest.low, shift);
+ ma_subu(shift, shift, Imm32(32));
+ ma_b(shift, Imm32(0), &less, LessThan);
+
+ ma_srl(dest.low, dest.high, shift);
+ move32(Imm32(0), dest.high);
+ ma_b(&done);
+
+ bind(&less);
+ ma_li(SecondScratchReg, Imm32(0));
+ as_subu(shift, SecondScratchReg, shift);
+ ma_sll(SecondScratchReg, dest.high, shift);
+ as_or(dest.low, dest.low, SecondScratchReg);
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_srl(dest.high, dest.high, shift);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ScratchRegisterScope scratch(*this);
+
+ if (imm.value < 32) {
+ as_srl(dest.low, dest.low, imm.value);
+ as_sll(scratch, dest.high, 32 - imm.value);
+ as_or(dest.low, dest.low, scratch);
+ as_sra(dest.high, dest.high, imm.value);
+ } else if (imm.value == 32) {
+ ma_move(dest.low, dest.high);
+ as_sra(dest.high, dest.high, 31);
+ } else {
+ as_sra(dest.low, dest.high, imm.value - 32);
+ as_sra(dest.high, dest.high, 31);
+ }
+}
+
+void
+MacroAssembler::rshift64Arithmetic(Register unmaskedShift, Register64 dest)
+{
+ Label done, less;
+
+ ScratchRegisterScope shift(*this);
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+
+ ma_srl(dest.low, dest.low, shift);
+ ma_subu(shift, shift, Imm32(32));
+ ma_b(shift, Imm32(0), &less, LessThan);
+
+ ma_sra(dest.low, dest.high, shift);
+ as_sra(dest.high, dest.high, 31);
+ ma_b(&done);
+
+ bind(&less);
+ ma_li(SecondScratchReg, Imm32(0));
+ as_subu(shift, SecondScratchReg, shift);
+ ma_sll(SecondScratchReg, dest.high, shift);
+ as_or(dest.low, dest.low, SecondScratchReg);
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_sra(dest.high, dest.high, shift);
+
+ bind(&done);
+}
+
+// ===============================================================
+// Rotation functions
+
+void
+MacroAssembler::rotateLeft64(Imm32 count, Register64 input, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ MOZ_ASSERT(input.low != dest.high && input.high != dest.low);
+
+ int32_t amount = count.value & 0x3f;
+ if (amount > 32) {
+ rotateRight64(Imm32(64 - amount), input, dest, temp);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ if (amount == 0) {
+ ma_move(dest.low, input.low);
+ ma_move(dest.high, input.high);
+ } else if (amount == 32) {
+ ma_move(scratch, input.low);
+ ma_move(dest.low, input.high);
+ ma_move(dest.high, scratch);
+ } else {
+ MOZ_ASSERT(0 < amount && amount < 32);
+ ma_move(scratch, input.high);
+ ma_sll(dest.high, input.high, Imm32(amount));
+ ma_srl(SecondScratchReg, input.low, Imm32(32 - amount));
+ as_or(dest.high, dest.high, SecondScratchReg);
+ ma_sll(dest.low, input.low, Imm32(amount));
+ ma_srl(SecondScratchReg, scratch, Imm32(32 - amount));
+ as_or(dest.low, dest.low, SecondScratchReg);
+
+ }
+ }
+}
+
+void
+MacroAssembler::rotateLeft64(Register shift, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp != src.low && temp != src.high);
+ MOZ_ASSERT(shift != src.low && shift != src.high);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ ScratchRegisterScope shift_value(*this);
+ Label high, done, zero;
+
+ ma_and(temp, shift, Imm32(0x3f));
+ ma_b(temp, Imm32(32), &high, GreaterThanOrEqual);
+
+ // high = high << shift | low >> 32 - shift
+ // low = low << shift | high >> 32 - shift
+ ma_sll(dest.high, src.high, temp);
+ ma_b(temp, Imm32(0), &zero, Equal);
+ ma_li(SecondScratchReg, Imm32(32));
+ as_subu(shift_value, SecondScratchReg, temp);
+
+ ma_srl(SecondScratchReg, src.low, shift_value);
+ as_or(dest.high, dest.high, SecondScratchReg);
+
+ ma_sll(dest.low, src.low, temp);
+ ma_srl(SecondScratchReg, src.high, shift_value);
+ as_or(dest.low, dest.low, SecondScratchReg);
+ ma_b(&done);
+
+ bind(&zero);
+ ma_move(dest.low, src.low);
+ ma_move(dest.high, src.high);
+ ma_b(&done);
+
+ // A 32 - 64 shift is a 0 - 32 shift in the other direction.
+ bind(&high);
+ ma_and(shift, shift, Imm32(0x3f));
+ ma_li(SecondScratchReg, Imm32(64));
+ as_subu(temp, SecondScratchReg, shift);
+
+ ma_srl(dest.high, src.high, temp);
+ ma_li(SecondScratchReg, Imm32(32));
+ as_subu(shift_value, SecondScratchReg, temp);
+ ma_sll(SecondScratchReg, src.low, shift_value);
+ as_or(dest.high, dest.high, SecondScratchReg);
+
+ ma_srl(dest.low, src.low, temp);
+ ma_sll(SecondScratchReg, src.high, shift_value);
+ as_or(dest.low, dest.low, SecondScratchReg);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::rotateRight64(Imm32 count, Register64 input, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ MOZ_ASSERT(input.low != dest.high && input.high != dest.low);
+
+ int32_t amount = count.value & 0x3f;
+ if (amount > 32) {
+ rotateLeft64(Imm32(64 - amount), input, dest, temp);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ if (amount == 0) {
+ ma_move(dest.low, input.low);
+ ma_move(dest.high, input.high);
+ } else if (amount == 32) {
+ ma_move(scratch, input.low);
+ ma_move(dest.low, input.high);
+ ma_move(dest.high, scratch);
+ } else {
+ MOZ_ASSERT(0 < amount && amount < 32);
+ ma_move(scratch, input.high);
+ ma_srl(dest.high, input.high, Imm32(amount));
+ ma_sll(SecondScratchReg, input.low, Imm32(32 - amount));
+ as_or(dest.high, dest.high, SecondScratchReg);
+ ma_srl(dest.low, input.low, Imm32(amount));
+ ma_sll(SecondScratchReg, scratch, Imm32(32 - amount));
+ as_or(dest.low, dest.low, SecondScratchReg);
+ }
+ }
+}
+
+void
+MacroAssembler::rotateRight64(Register shift, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp != src.low && temp != src.high);
+ MOZ_ASSERT(shift != src.low && shift != src.high);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ ScratchRegisterScope shift_value(*this);
+ Label high, done, zero;
+
+ ma_and(temp, shift, Imm32(0x3f));
+ ma_b(temp, Imm32(32), &high, GreaterThanOrEqual);
+
+ // high = high >> shift | low << 32 - shift
+ // low = low >> shift | high << 32 - shift
+ ma_srl(dest.high, src.high, temp);
+ ma_b(temp, Imm32(0), &zero, Equal);
+ ma_li(SecondScratchReg, Imm32(32));
+ as_subu(shift_value, SecondScratchReg, temp);
+
+ ma_sll(SecondScratchReg, src.low, shift_value);
+ as_or(dest.high, dest.high, SecondScratchReg);
+
+ ma_srl(dest.low, src.low, temp);
+
+ //ma_li(SecondScratchReg, Imm32(32));
+ //as_subu(shift_value, SecondScratchReg, shift_value);
+ ma_sll(SecondScratchReg, src.high, shift_value);
+ as_or(dest.low, dest.low, SecondScratchReg);
+
+ ma_b(&done);
+
+ bind(&zero);
+ ma_move(dest.low, src.low);
+ ma_move(dest.high, src.high);
+ ma_b(&done);
+
+ // A 32 - 64 shift is a 0 - 32 shift in the other direction.
+ bind(&high);
+ ma_and(shift, shift, Imm32(0x3f));
+ ma_li(SecondScratchReg, Imm32(64));
+ as_subu(temp, SecondScratchReg, shift);
+
+ ma_sll(dest.high, src.high, temp);
+ ma_li(SecondScratchReg, Imm32(32));
+ as_subu(shift_value, SecondScratchReg, temp);
+
+ ma_srl(SecondScratchReg, src.low, shift_value);
+ as_or(dest.high, dest.high, SecondScratchReg);
+
+ ma_sll(dest.low, src.low, temp);
+ ma_srl(SecondScratchReg, src.high, shift_value);
+ as_or(dest.low, dest.low, SecondScratchReg);
+
+ bind(&done);
+}
+
+template <typename T1, typename T2>
+void
+MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest)
+{
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+
+template <typename T1, typename T2>
+void
+MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest)
+{
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+
+// ===============================================================
+// Bit counting functions
+
+void
+MacroAssembler::clz64(Register64 src, Register dest)
+{
+ Label done, low;
+
+ ma_b(src.high, Imm32(0), &low, Equal);
+ as_clz(dest, src.high);
+ ma_b(&done);
+
+ bind(&low);
+ as_clz(dest, src.low);
+ ma_addu(dest, Imm32(32));
+
+ bind(&done);
+}
+
+void
+MacroAssembler::ctz64(Register64 src, Register dest)
+{
+ Label done, high;
+
+ ma_b(src.low, Imm32(0), &high, Equal);
+
+ ma_ctz(dest, src.low);
+ ma_b(&done);
+
+ bind(&high);
+ ma_ctz(dest, src.high);
+ ma_addu(dest, Imm32(32));
+
+ bind(&done);
+}
+
+void
+MacroAssembler::popcnt64(Register64 src, Register64 dest, Register tmp)
+{
+ MOZ_ASSERT(dest.low != tmp);
+ MOZ_ASSERT(dest.high != tmp);
+ MOZ_ASSERT(dest.low != dest.high);
+
+ if (dest.low != src.high) {
+ popcnt32(src.low, dest.low, tmp);
+ popcnt32(src.high, dest.high, tmp);
+ } else {
+ MOZ_ASSERT(dest.high != src.high);
+ popcnt32(src.low, dest.high, tmp);
+ popcnt32(src.high, dest.low, tmp);
+ }
+
+ ma_addu(dest.low, dest.high);
+ move32(Imm32(0), dest.high);
+}
+
+// ===============================================================
+// Branch functions
+
+void
+MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val, Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual,
+ "other condition codes not supported");
+
+ branch32(cond, lhs, val.firstHalf(), label);
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), val.secondHalf(), label);
+}
+
+void
+MacroAssembler::branch64(Condition cond, const Address& lhs, const Address& rhs, Register scratch,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ load32(rhs, scratch);
+ branch32(cond, lhs, scratch, label);
+
+ load32(Address(rhs.base, rhs.offset + sizeof(uint32_t)), scratch);
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), scratch, label);
+}
+
+void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val, Label* success, Label* fail)
+{
+ bool fallthrough = false;
+ Label fallthroughLabel;
+
+ if (!fail) {
+ fail = &fallthroughLabel;
+ fallthrough = true;
+ }
+
+ switch(cond) {
+ case Assembler::Equal:
+ branch32(Assembler::NotEqual, lhs.low, val.low(), fail);
+ branch32(Assembler::Equal, lhs.high, val.hi(), success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::NotEqual:
+ branch32(Assembler::NotEqual, lhs.low, val.low(), success);
+ branch32(Assembler::NotEqual, lhs.high, val.hi(), success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual: {
+ Assembler::Condition invert_cond = Assembler::InvertCondition(cond);
+ Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
+ Assembler::Condition cond2 = Assembler::ConditionWithoutEqual(invert_cond);
+ Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
+
+ ma_b(lhs.high, val.hi(), success, cond1);
+ ma_b(lhs.high, val.hi(), fail, cond2);
+ ma_b(lhs.low, val.low(), success, cond3);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ }
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+
+ if (fallthrough)
+ bind(fail);
+}
+
+void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs, Label* success, Label* fail)
+{
+ bool fallthrough = false;
+ Label fallthroughLabel;
+
+ if (!fail) {
+ fail = &fallthroughLabel;
+ fallthrough = true;
+ }
+
+ switch(cond) {
+ case Assembler::Equal:
+ branch32(Assembler::NotEqual, lhs.low, rhs.low, fail);
+ branch32(Assembler::Equal, lhs.high, rhs.high, success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::NotEqual:
+ branch32(Assembler::NotEqual, lhs.low, rhs.low, success);
+ branch32(Assembler::NotEqual, lhs.high, rhs.high, success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual: {
+ Assembler::Condition invert_cond = Assembler::InvertCondition(cond);
+ Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
+ Assembler::Condition cond2 = Assembler::ConditionWithoutEqual(invert_cond);
+ Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
+
+ ma_b(lhs.high, rhs.high, success, cond1);
+ ma_b(lhs.high, rhs.high, fail, cond2);
+ ma_b(lhs.low, rhs.low, success, cond3);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ }
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+
+ if (fallthrough)
+ bind(fail);
+}
+
+void
+MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs, Register rhs, Label* label)
+{
+ branchPtr(cond, lhs, rhs, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTest64(Condition cond, Register64 lhs, Register64 rhs, Register temp,
+ L label)
+{
+ if (cond == Assembler::Zero) {
+ MOZ_ASSERT(lhs.low == rhs.low);
+ MOZ_ASSERT(lhs.high == rhs.high);
+ as_or(ScratchRegister, lhs.low, lhs.high);
+ branchTestPtr(cond, ScratchRegister, ScratchRegister, label);
+ } else {
+ MOZ_CRASH("Unsupported condition");
+ }
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestUndefined(cond, value.typeReg(), label);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestInt32(cond, value.typeReg(), label);
+}
+
+void
+MacroAssembler::branchTestInt32Truthy(bool b, const ValueOperand& value, Label* label)
+{
+ ScratchRegisterScope scratch(*this);
+ as_and(scratch, value.payloadReg(), value.payloadReg());
+ ma_b(scratch, scratch, label, b ? NonZero : Zero);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ ma_b(tag, ImmTag(JSVAL_TAG_CLEAR), label, actual);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestDouble(cond, value.typeReg(), label);
+}
+
+void
+MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestNumber(cond, value.typeReg(), label);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, const ValueOperand& value, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(value.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
+}
+
+void
+MacroAssembler::branchTestBooleanTruthy(bool b, const ValueOperand& value, Label* label)
+{
+ ma_b(value.payloadReg(), value.payloadReg(), label, b ? NonZero : Zero);
+}
+
+void
+MacroAssembler::branchTestString(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestString(cond, value.typeReg(), label);
+}
+
+void
+MacroAssembler::branchTestStringTruthy(bool b, const ValueOperand& value, Label* label)
+{
+ Register string = value.payloadReg();
+ SecondScratchRegisterScope scratch2(*this);
+ ma_lw(scratch2, Address(string, JSString::offsetOfLength()));
+ ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal);
+}
+
+void
+MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestSymbol(cond, value.typeReg(), label);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestNull(cond, value.typeReg(), label);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestObject(cond, value.typeReg(), label);
+}
+
+void
+MacroAssembler::branchTestPrimitive(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestPrimitive(cond, value.typeReg(), label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value, L label)
+{
+ ma_b(value.typeReg(), ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr, JSWhyMagic why, Label* label)
+{
+ branchTestMagic(cond, valaddr, label);
+ branch32(cond, ToPayload(valaddr), Imm32(why), label);
+}
+
+// ========================================================================
+// Memory access primitives.
+void
+MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const Address& addr)
+{
+ ma_sd(src, addr);
+}
+void
+MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& addr)
+{
+ MOZ_ASSERT(addr.offset == 0);
+ ma_sd(src, addr);
+}
+
+void
+MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const Address& addr)
+{
+ ma_ss(src, addr);
+}
+void
+MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& addr)
+{
+ MOZ_ASSERT(addr.offset == 0);
+ ma_ss(src, addr);
+}
+
+// ========================================================================
+// wasm support
+
+template <class L>
+void
+MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
+{
+ BufferOffset bo = ma_BoundsCheck(ScratchRegister);
+ append(wasm::BoundsCheck(bo.getOffset()));
+
+ ma_b(index, ScratchRegister, label, cond);
+}
+
+void
+MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
+{
+ Instruction* inst = (Instruction*) patchAt;
+ InstImm* i0 = (InstImm*) inst;
+ InstImm* i1 = (InstImm*) i0->next();
+
+ // Replace with new value
+ Assembler::UpdateLuiOriValue(i0, i1, limit);
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+void
+MacroAssemblerMIPSCompat::incrementInt32Value(const Address& addr)
+{
+ asMasm().add32(Imm32(1), ToPayload(addr));
+}
+
+void
+MacroAssemblerMIPSCompat::computeEffectiveAddress(const BaseIndex& address, Register dest)
+{
+ computeScaledAddress(address, dest);
+ if (address.offset)
+ asMasm().addPtr(Imm32(address.offset), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::retn(Imm32 n) {
+ // pc <- [sp]; sp += n
+ loadPtr(Address(StackPointer, 0), ra);
+ asMasm().addPtr(n, StackPointer);
+ as_jr(ra);
+ as_nop();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_MacroAssembler_mips32_inl_h */
diff --git a/js/src/jit/mips32/MacroAssembler-mips32.cpp b/js/src/jit/mips32/MacroAssembler-mips32.cpp
new file mode 100644
index 000000000..0d3e55e21
--- /dev/null
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -0,0 +1,2365 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/MacroAssembler-mips32.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/mips32/Simulator-mips32.h"
+#include "jit/MoveEmitter.h"
+#include "jit/SharedICRegisters.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace jit;
+
+using mozilla::Abs;
+
+static const int32_t PAYLOAD_OFFSET = NUNBOX32_PAYLOAD_OFFSET;
+static const int32_t TAG_OFFSET = NUNBOX32_TYPE_OFFSET;
+
+static_assert(sizeof(intptr_t) == 4, "Not 64-bit clean.");
+
+void
+MacroAssemblerMIPSCompat::convertBoolToInt32(Register src, Register dest)
+{
+ // Note that C++ bool is only 1 byte, so zero extend it to clear the
+ // higher-order bits.
+ ma_and(dest, src, Imm32(0xff));
+}
+
+void
+MacroAssemblerMIPSCompat::convertInt32ToDouble(Register src, FloatRegister dest)
+{
+ as_mtc1(src, dest);
+ as_cvtdw(dest, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::convertInt32ToDouble(const Address& src, FloatRegister dest)
+{
+ ma_ls(dest, src);
+ as_cvtdw(dest, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::convertInt32ToDouble(const BaseIndex& src, FloatRegister dest)
+{
+ computeScaledAddress(src, ScratchRegister);
+ convertInt32ToDouble(Address(ScratchRegister, src.offset), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::convertUInt32ToDouble(Register src, FloatRegister dest)
+{
+ // We use SecondScratchDoubleReg because MacroAssembler::loadFromTypedArray
+ // calls with ScratchDoubleReg as dest.
+ MOZ_ASSERT(dest != SecondScratchDoubleReg);
+
+ // Subtract INT32_MIN to get a positive number
+ ma_subu(ScratchRegister, src, Imm32(INT32_MIN));
+
+ // Convert value
+ as_mtc1(ScratchRegister, dest);
+ as_cvtdw(dest, dest);
+
+ // Add unsigned value of INT32_MIN
+ ma_lid(SecondScratchDoubleReg, 2147483648.0);
+ as_addd(dest, dest, SecondScratchDoubleReg);
+}
+
+static const double TO_DOUBLE_HIGH_SCALE = 0x100000000;
+
+bool
+MacroAssemblerMIPSCompat::convertUInt64ToDoubleNeedsTemp()
+{
+ return false;
+}
+
+void
+MacroAssemblerMIPSCompat::convertUInt64ToDouble(Register64 src, FloatRegister dest, Register temp)
+{
+ MOZ_ASSERT(temp == Register::Invalid());
+ convertUInt32ToDouble(src.high, dest);
+ loadConstantDouble(TO_DOUBLE_HIGH_SCALE, ScratchDoubleReg);
+ asMasm().mulDouble(ScratchDoubleReg, dest);
+ convertUInt32ToDouble(src.low, ScratchDoubleReg);
+ asMasm().addDouble(ScratchDoubleReg, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::convertUInt32ToFloat32(Register src, FloatRegister dest)
+{
+ Label positive, done;
+ ma_b(src, src, &positive, NotSigned, ShortJump);
+
+ // We cannot do the same as convertUInt32ToDouble because float32 doesn't
+ // have enough precision.
+ convertUInt32ToDouble(src, dest);
+ convertDoubleToFloat32(dest, dest);
+ ma_b(&done, ShortJump);
+
+ bind(&positive);
+ convertInt32ToFloat32(src, dest);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerMIPSCompat::convertDoubleToFloat32(FloatRegister src, FloatRegister dest)
+{
+ as_cvtsd(dest, src);
+}
+
+// Checks whether a double is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void
+MacroAssemblerMIPSCompat::convertDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail, bool negativeZeroCheck)
+{
+ if (negativeZeroCheck) {
+ moveFromDoubleHi(src, dest);
+ moveFromDoubleLo(src, ScratchRegister);
+ as_movn(dest, zero, ScratchRegister);
+ ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+ }
+
+ // Convert double to int, then convert back and check if we have the
+ // same number.
+ as_cvtwd(ScratchDoubleReg, src);
+ as_mfc1(dest, ScratchDoubleReg);
+ as_cvtdw(ScratchDoubleReg, ScratchDoubleReg);
+ ma_bc1d(src, ScratchDoubleReg, fail, Assembler::DoubleNotEqualOrUnordered);
+}
+
+// Checks whether a float32 is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void
+MacroAssemblerMIPSCompat::convertFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail, bool negativeZeroCheck)
+{
+ if (negativeZeroCheck) {
+ moveFromFloat32(src, dest);
+ ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+ }
+
+ // Converting the floating point value to an integer and then converting it
+ // back to a float32 would not work, as float to int32 conversions are
+ // clamping (e.g. float(INT32_MAX + 1) would get converted into INT32_MAX
+ // and then back to float(INT32_MAX + 1)). If this ever happens, we just
+ // bail out.
+ as_cvtws(ScratchFloat32Reg, src);
+ as_mfc1(dest, ScratchFloat32Reg);
+ as_cvtsw(ScratchFloat32Reg, ScratchFloat32Reg);
+ ma_bc1s(src, ScratchFloat32Reg, fail, Assembler::DoubleNotEqualOrUnordered);
+
+ // Bail out in the clamped cases.
+ ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
+}
+
+void
+MacroAssemblerMIPSCompat::convertFloat32ToDouble(FloatRegister src, FloatRegister dest)
+{
+ as_cvtds(dest, src);
+}
+
+void
+MacroAssemblerMIPSCompat::convertInt32ToFloat32(Register src, FloatRegister dest)
+{
+ as_mtc1(src, dest);
+ as_cvtsw(dest, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::convertInt32ToFloat32(const Address& src, FloatRegister dest)
+{
+ ma_ls(dest, src);
+ as_cvtsw(dest, dest);
+}
+
+void
+MacroAssemblerMIPS::ma_li(Register dest, CodeOffset* label)
+{
+ BufferOffset bo = m_buffer.nextOffset();
+ ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
+ label->bind(bo.getOffset());
+}
+
+void
+MacroAssemblerMIPS::ma_li(Register dest, ImmWord imm)
+{
+ ma_li(dest, Imm32(uint32_t(imm.value)));
+}
+
+// This method generates lui and ori instruction pair that can be modified by
+// UpdateLuiOriValue, either during compilation (eg. Assembler::bind), or
+// during execution (eg. jit::PatchJump).
+void
+MacroAssemblerMIPS::ma_liPatchable(Register dest, Imm32 imm)
+{
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+ as_lui(dest, Imm16::Upper(imm).encode());
+ as_ori(dest, dest, Imm16::Lower(imm).encode());
+}
+
+void
+MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmPtr imm)
+{
+ ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
+}
+
+void
+MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmWord imm)
+{
+ ma_liPatchable(dest, Imm32(int32_t(imm.value)));
+}
+
+// Arithmetic-based ops.
+
+// Add.
+template <typename L>
+void
+MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Register rt, L overflow)
+{
+ Label goodAddition;
+ as_addu(rd, rs, rt);
+
+ as_xor(ScratchRegister, rs, rt); // If different sign, no overflow
+ ma_b(ScratchRegister, Imm32(0), &goodAddition, Assembler::LessThan, ShortJump);
+
+ // If different sign, then overflow
+ as_xor(ScratchRegister, rs, rd);
+ ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
+
+ bind(&goodAddition);
+}
+
+template void
+MacroAssemblerMIPS::ma_addTestOverflow<Label*>(Register rd, Register rs,
+ Register rt, Label* overflow);
+template void
+MacroAssemblerMIPS::ma_addTestOverflow<wasm::TrapDesc>(Register rd, Register rs, Register rt,
+ wasm::TrapDesc overflow);
+
+template <typename L>
+void
+MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, L overflow)
+{
+ // Check for signed range because of as_addiu
+ // Check for unsigned range because of as_xori
+ if (Imm16::IsInSignedRange(imm.value) && Imm16::IsInUnsignedRange(imm.value)) {
+ Label goodAddition;
+ as_addiu(rd, rs, imm.value);
+
+ // If different sign, no overflow
+ as_xori(ScratchRegister, rs, imm.value);
+ ma_b(ScratchRegister, Imm32(0), &goodAddition, Assembler::LessThan, ShortJump);
+
+ // If different sign, then overflow
+ as_xor(ScratchRegister, rs, rd);
+ ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
+
+ bind(&goodAddition);
+ } else {
+ ma_li(ScratchRegister, imm);
+ ma_addTestOverflow(rd, rs, ScratchRegister, overflow);
+ }
+}
+
+template void
+MacroAssemblerMIPS::ma_addTestOverflow<Label*>(Register rd, Register rs,
+ Imm32 imm, Label* overflow);
+template void
+MacroAssemblerMIPS::ma_addTestOverflow<wasm::TrapDesc>(Register rd, Register rs, Imm32 imm,
+ wasm::TrapDesc overflow);
+
+// Subtract.
+void
+MacroAssemblerMIPS::ma_subTestOverflow(Register rd, Register rs, Register rt, Label* overflow)
+{
+ Label goodSubtraction;
+ // Use second scratch. The instructions generated by ma_b don't use the
+ // second scratch register.
+ as_subu(rd, rs, rt);
+
+ as_xor(ScratchRegister, rs, rt); // If same sign, no overflow
+ ma_b(ScratchRegister, Imm32(0), &goodSubtraction, Assembler::GreaterThanOrEqual, ShortJump);
+
+ // If different sign, then overflow
+ as_xor(ScratchRegister, rs, rd);
+ ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
+
+ bind(&goodSubtraction);
+}
+
+// Memory.
+
+void
+MacroAssemblerMIPS::ma_load(Register dest, Address address,
+ LoadStoreSize size, LoadStoreExtension extension)
+{
+ int16_t encodedOffset;
+ Register base;
+
+ if (isLoongson() && ZeroExtend != extension &&
+ !Imm16::IsInSignedRange(address.offset))
+ {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ base = address.base;
+
+ switch (size) {
+ case SizeByte:
+ as_gslbx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeHalfWord:
+ as_gslhx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeWord:
+ as_gslwx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeDouble:
+ as_gsldx(dest, base, ScratchRegister, 0);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+ return;
+ }
+
+ if (!Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = Imm16(0).encode();
+ } else {
+ encodedOffset = Imm16(address.offset).encode();
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ if (ZeroExtend == extension)
+ as_lbu(dest, base, encodedOffset);
+ else
+ as_lb(dest, base, encodedOffset);
+ break;
+ case SizeHalfWord:
+ if (ZeroExtend == extension)
+ as_lhu(dest, base, encodedOffset);
+ else
+ as_lh(dest, base, encodedOffset);
+ break;
+ case SizeWord:
+ as_lw(dest, base, encodedOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_store(Register data, Address address, LoadStoreSize size,
+ LoadStoreExtension extension)
+{
+ int16_t encodedOffset;
+ Register base;
+
+ if (isLoongson() && !Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ base = address.base;
+
+ switch (size) {
+ case SizeByte:
+ as_gssbx(data, base, ScratchRegister, 0);
+ break;
+ case SizeHalfWord:
+ as_gsshx(data, base, ScratchRegister, 0);
+ break;
+ case SizeWord:
+ as_gsswx(data, base, ScratchRegister, 0);
+ break;
+ case SizeDouble:
+ as_gssdx(data, base, ScratchRegister, 0);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+ return;
+ }
+
+ if (!Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = Imm16(0).encode();
+ } else {
+ encodedOffset = Imm16(address.offset).encode();
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_sb(data, base, encodedOffset);
+ break;
+ case SizeHalfWord:
+ as_sh(data, base, encodedOffset);
+ break;
+ case SizeWord:
+ as_sw(data, base, encodedOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+}
+
+void
+MacroAssemblerMIPSCompat::computeScaledAddress(const BaseIndex& address, Register dest)
+{
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+ if (shift) {
+ ma_sll(ScratchRegister, address.index, Imm32(shift));
+ as_addu(dest, address.base, ScratchRegister);
+ } else {
+ as_addu(dest, address.base, address.index);
+ }
+}
+
+// Shortcut for when we know we're transferring 32 bits of data.
+void
+MacroAssemblerMIPS::ma_lw(Register data, Address address)
+{
+ ma_load(data, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPS::ma_sw(Register data, Address address)
+{
+ ma_store(data, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPS::ma_sw(Imm32 imm, Address address)
+{
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_sw(ScratchRegister, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != SecondScratchReg);
+
+ ma_li(SecondScratchReg, Imm32(address.offset));
+ as_addu(SecondScratchReg, address.base, SecondScratchReg);
+ as_sw(ScratchRegister, SecondScratchReg, 0);
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_sw(Register data, BaseIndex& address)
+{
+ ma_store(data, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPS::ma_pop(Register r)
+{
+ as_lw(r, StackPointer, 0);
+ as_addiu(StackPointer, StackPointer, sizeof(intptr_t));
+}
+
+void
+MacroAssemblerMIPS::ma_push(Register r)
+{
+ if (r == sp) {
+ // Pushing sp requires one more instruction.
+ ma_move(ScratchRegister, sp);
+ r = ScratchRegister;
+ }
+
+ as_addiu(StackPointer, StackPointer, -sizeof(intptr_t));
+ as_sw(r, StackPointer, 0);
+}
+
+// Branches when done from within mips-specific code.
+void
+MacroAssemblerMIPS::ma_b(Register lhs, Address addr, Label* label, Condition c, JumpKind jumpKind)
+{
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_lw(ScratchRegister, addr);
+ ma_b(lhs, ScratchRegister, label, c, jumpKind);
+}
+
+void
+MacroAssemblerMIPS::ma_b(Address addr, Imm32 imm, Label* label, Condition c, JumpKind jumpKind)
+{
+ ma_lw(SecondScratchReg, addr);
+ ma_b(SecondScratchReg, imm, label, c, jumpKind);
+}
+
+void
+MacroAssemblerMIPS::ma_b(Address addr, ImmGCPtr imm, Label* label, Condition c, JumpKind jumpKind)
+{
+ ma_lw(SecondScratchReg, addr);
+ ma_b(SecondScratchReg, imm, label, c, jumpKind);
+}
+
+void
+MacroAssemblerMIPS::ma_bal(Label* label, DelaySlotFill delaySlotFill)
+{
+ if (label->bound()) {
+ // Generate the long jump for calls because return address has to be
+ // the address after the reserved block.
+ addLongJump(nextOffset());
+ ma_liPatchable(ScratchRegister, Imm32(label->offset()));
+ as_jalr(ScratchRegister);
+ if (delaySlotFill == FillDelaySlot)
+ as_nop();
+ return;
+ }
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace(4 * sizeof(uint32_t));
+
+ BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
+ writeInst(nextInChain);
+ if (!oom())
+ label->use(bo.getOffset());
+ // Leave space for long jump.
+ as_nop();
+ if (delaySlotFill == FillDelaySlot)
+ as_nop();
+}
+
+void
+MacroAssemblerMIPS::branchWithCode(InstImm code, Label* label, JumpKind jumpKind)
+{
+ MOZ_ASSERT(code.encode() != InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ if (label->bound()) {
+ int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
+
+ if (BOffImm16::IsInRange(offset))
+ jumpKind = ShortJump;
+
+ if (jumpKind == ShortJump) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ code.setBOffImm16(BOffImm16(offset));
+ writeInst(code.encode());
+ as_nop();
+ return;
+ }
+
+ if (code.encode() == inst_beq.encode()) {
+ // Handle long jump
+ addLongJump(nextOffset());
+ ma_liPatchable(ScratchRegister, Imm32(label->offset()));
+ as_jr(ScratchRegister);
+ as_nop();
+ return;
+ }
+
+ // Handle long conditional branch
+ writeInst(invertBranch(code, BOffImm16(5 * sizeof(uint32_t))).encode());
+ // No need for a "nop" here because we can clobber scratch.
+ addLongJump(nextOffset());
+ ma_liPatchable(ScratchRegister, Imm32(label->offset()));
+ as_jr(ScratchRegister);
+ as_nop();
+ return;
+ }
+
+ // Generate open jump and link it to a label.
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ if (jumpKind == ShortJump) {
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+
+ // Indicate that this is short jump with offset 4.
+ code.setBOffImm16(BOffImm16(4));
+ BufferOffset bo = writeInst(code.encode());
+ writeInst(nextInChain);
+ if (!oom())
+ label->use(bo.getOffset());
+ return;
+ }
+
+ bool conditional = code.encode() != inst_beq.encode();
+
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace((conditional ? 5 : 4) * sizeof(uint32_t));
+
+ BufferOffset bo = writeInst(code.encode());
+ writeInst(nextInChain);
+ if (!oom())
+ label->use(bo.getOffset());
+ // Leave space for potential long jump.
+ as_nop();
+ as_nop();
+ if (conditional)
+ as_nop();
+}
+
+void
+MacroAssemblerMIPS::ma_cmp_set(Register rd, Register rs, Address addr, Condition c)
+{
+ ma_lw(ScratchRegister, addr);
+ ma_cmp_set(rd, rs, ScratchRegister, c);
+}
+
+void
+MacroAssemblerMIPS::ma_cmp_set(Register dst, Address lhs, Register rhs, Condition c)
+{
+ ma_lw(ScratchRegister, lhs);
+ ma_cmp_set(dst, ScratchRegister, rhs, c);
+}
+
+// fp instructions
+void
+MacroAssemblerMIPS::ma_lid(FloatRegister dest, double value)
+{
+ struct DoubleStruct {
+ uint32_t lo;
+ uint32_t hi;
+ } ;
+ DoubleStruct intStruct = mozilla::BitwiseCast<DoubleStruct>(value);
+
+ // put hi part of 64 bit value into the odd register
+ if (intStruct.hi == 0) {
+ moveToDoubleHi(zero, dest);
+ } else {
+ ma_li(ScratchRegister, Imm32(intStruct.hi));
+ moveToDoubleHi(ScratchRegister, dest);
+ }
+
+ // put low part of 64 bit value into the even register
+ if (intStruct.lo == 0) {
+ moveToDoubleLo(zero, dest);
+ } else {
+ ma_li(ScratchRegister, Imm32(intStruct.lo));
+ moveToDoubleLo(ScratchRegister, dest);
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_mv(FloatRegister src, ValueOperand dest)
+{
+ moveFromDoubleLo(src, dest.payloadReg());
+ moveFromDoubleHi(src, dest.typeReg());
+}
+
+void
+MacroAssemblerMIPS::ma_mv(ValueOperand src, FloatRegister dest)
+{
+ moveToDoubleLo(src.payloadReg(), dest);
+ moveToDoubleHi(src.typeReg(), dest);
+}
+
+void
+MacroAssemblerMIPS::ma_ls(FloatRegister ft, Address address)
+{
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_ls(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gslsx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_ls(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_ld(FloatRegister ft, Address address)
+{
+ // Use single precision load instructions so we don't have to worry about
+ // alignment.
+
+ int32_t off2 = address.offset + TAG_OFFSET;
+ if (Imm16::IsInSignedRange(address.offset) && Imm16::IsInSignedRange(off2)) {
+ as_ls(ft, address.base, address.offset);
+ as_ls(getOddPair(ft), address.base, off2);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_ls(ft, ScratchRegister, PAYLOAD_OFFSET);
+ as_ls(getOddPair(ft), ScratchRegister, TAG_OFFSET);
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_sd(FloatRegister ft, Address address)
+{
+ int32_t off2 = address.offset + TAG_OFFSET;
+ if (Imm16::IsInSignedRange(address.offset) && Imm16::IsInSignedRange(off2)) {
+ as_ss(ft, address.base, address.offset);
+ as_ss(getOddPair(ft), address.base, off2);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_ss(ft, ScratchRegister, PAYLOAD_OFFSET);
+ as_ss(getOddPair(ft), ScratchRegister, TAG_OFFSET);
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_ss(FloatRegister ft, Address address)
+{
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_ss(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gsssx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_ss(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_pop(FloatRegister fs)
+{
+ ma_ld(fs.doubleOverlay(0), Address(StackPointer, 0));
+ as_addiu(StackPointer, StackPointer, sizeof(double));
+}
+
+void
+MacroAssemblerMIPS::ma_push(FloatRegister fs)
+{
+ as_addiu(StackPointer, StackPointer, -sizeof(double));
+ ma_sd(fs.doubleOverlay(0), Address(StackPointer, 0));
+}
+
+bool
+MacroAssemblerMIPSCompat::buildOOLFakeExitFrame(void* fakeReturnAddr)
+{
+ uint32_t descriptor = MakeFrameDescriptor(asMasm().framePushed(), JitFrame_IonJS,
+ ExitFrameLayout::Size());
+
+ asMasm().Push(Imm32(descriptor)); // descriptor_
+ asMasm().Push(ImmPtr(fakeReturnAddr));
+
+ return true;
+}
+
+void
+MacroAssemblerMIPSCompat::move32(Imm32 imm, Register dest)
+{
+ ma_li(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::move32(Register src, Register dest)
+{
+ ma_move(dest, src);
+}
+
+void
+MacroAssemblerMIPSCompat::movePtr(Register src, Register dest)
+{
+ ma_move(dest, src);
+}
+void
+MacroAssemblerMIPSCompat::movePtr(ImmWord imm, Register dest)
+{
+ ma_li(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::movePtr(ImmGCPtr imm, Register dest)
+{
+ ma_li(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::movePtr(ImmPtr imm, Register dest)
+{
+ movePtr(ImmWord(uintptr_t(imm.value)), dest);
+}
+void
+MacroAssemblerMIPSCompat::movePtr(wasm::SymbolicAddress imm, Register dest)
+{
+ append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm));
+ ma_liPatchable(dest, ImmWord(-1));
+}
+
+void
+MacroAssemblerMIPSCompat::load8ZeroExtend(const Address& address, Register dest)
+{
+ ma_load(dest, address, SizeByte, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load8ZeroExtend(const BaseIndex& src, Register dest)
+{
+ ma_load(dest, src, SizeByte, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load8SignExtend(const Address& address, Register dest)
+{
+ ma_load(dest, address, SizeByte, SignExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load8SignExtend(const BaseIndex& src, Register dest)
+{
+ ma_load(dest, src, SizeByte, SignExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load16ZeroExtend(const Address& address, Register dest)
+{
+ ma_load(dest, address, SizeHalfWord, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load16ZeroExtend(const BaseIndex& src, Register dest)
+{
+ ma_load(dest, src, SizeHalfWord, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load16SignExtend(const Address& address, Register dest)
+{
+ ma_load(dest, address, SizeHalfWord, SignExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load16SignExtend(const BaseIndex& src, Register dest)
+{
+ ma_load(dest, src, SizeHalfWord, SignExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load32(const Address& address, Register dest)
+{
+ ma_load(dest, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::load32(const BaseIndex& address, Register dest)
+{
+ ma_load(dest, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::load32(AbsoluteAddress address, Register dest)
+{
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ load32(Address(ScratchRegister, 0), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::load32(wasm::SymbolicAddress address, Register dest)
+{
+ movePtr(address, ScratchRegister);
+ load32(Address(ScratchRegister, 0), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadPtr(const Address& address, Register dest)
+{
+ ma_load(dest, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::loadPtr(const BaseIndex& src, Register dest)
+{
+ ma_load(dest, src, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::loadPtr(AbsoluteAddress address, Register dest)
+{
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadPtr(wasm::SymbolicAddress address, Register dest)
+{
+ movePtr(address, ScratchRegister);
+ loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadPrivate(const Address& address, Register dest)
+{
+ ma_lw(dest, Address(address.base, address.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::loadDouble(const Address& address, FloatRegister dest)
+{
+ ma_ld(dest, address);
+}
+
+void
+MacroAssemblerMIPSCompat::loadDouble(const BaseIndex& src, FloatRegister dest)
+{
+ computeScaledAddress(src, SecondScratchReg);
+ ma_ld(dest, Address(SecondScratchReg, src.offset));
+}
+
+void
+MacroAssemblerMIPSCompat::loadUnalignedDouble(const BaseIndex& src, Register temp,
+ FloatRegister dest)
+{
+ computeScaledAddress(src, SecondScratchReg);
+
+ if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 7)) {
+ as_lwl(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET + 3);
+ as_lwr(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET);
+ moveToDoubleLo(temp, dest);
+ as_lwl(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET + 3);
+ as_lwr(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET);
+ moveToDoubleHi(temp, dest);
+ } else {
+ ma_li(ScratchRegister, Imm32(src.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ as_lwl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
+ as_lwr(temp, ScratchRegister, INT64LOW_OFFSET);
+ moveToDoubleLo(temp, dest);
+ as_lwl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
+ as_lwr(temp, ScratchRegister, INT64HIGH_OFFSET);
+ moveToDoubleHi(temp, dest);
+ }
+}
+
+void
+MacroAssemblerMIPSCompat::loadFloatAsDouble(const Address& address, FloatRegister dest)
+{
+ ma_ls(dest, address);
+ as_cvtds(dest, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadFloatAsDouble(const BaseIndex& src, FloatRegister dest)
+{
+ loadFloat32(src, dest);
+ as_cvtds(dest, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadFloat32(const Address& address, FloatRegister dest)
+{
+ ma_ls(dest, address);
+}
+
+void
+MacroAssemblerMIPSCompat::loadFloat32(const BaseIndex& src, FloatRegister dest)
+{
+ computeScaledAddress(src, SecondScratchReg);
+ ma_ls(dest, Address(SecondScratchReg, src.offset));
+}
+
+void
+MacroAssemblerMIPSCompat::loadUnalignedFloat32(const BaseIndex& src, Register temp,
+ FloatRegister dest)
+{
+ computeScaledAddress(src, SecondScratchReg);
+
+ if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 3)) {
+ as_lwl(temp, SecondScratchReg, src.offset + 3);
+ as_lwr(temp, SecondScratchReg, src.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(src.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ as_lwl(temp, ScratchRegister, 3);
+ as_lwr(temp, ScratchRegister, 0);
+ }
+
+ moveToFloat32(temp, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::store8(Imm32 imm, const Address& address)
+{
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeByte);
+}
+
+void
+MacroAssemblerMIPSCompat::store8(Register src, const Address& address)
+{
+ ma_store(src, address, SizeByte);
+}
+
+void
+MacroAssemblerMIPSCompat::store8(Imm32 imm, const BaseIndex& dest)
+{
+ ma_store(imm, dest, SizeByte);
+}
+
+void
+MacroAssemblerMIPSCompat::store8(Register src, const BaseIndex& dest)
+{
+ ma_store(src, dest, SizeByte);
+}
+
+void
+MacroAssemblerMIPSCompat::store16(Imm32 imm, const Address& address)
+{
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store16(Register src, const Address& address)
+{
+ ma_store(src, address, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store16(Imm32 imm, const BaseIndex& dest)
+{
+ ma_store(imm, dest, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store16(Register src, const BaseIndex& address)
+{
+ ma_store(src, address, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store32(Register src, AbsoluteAddress address)
+{
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ store32(src, Address(ScratchRegister, 0));
+}
+
+void
+MacroAssemblerMIPSCompat::store32(Register src, const Address& address)
+{
+ ma_store(src, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store32(Imm32 src, const Address& address)
+{
+ move32(src, SecondScratchReg);
+ ma_store(SecondScratchReg, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store32(Imm32 imm, const BaseIndex& dest)
+{
+ ma_store(imm, dest, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store32(Register src, const BaseIndex& dest)
+{
+ ma_store(src, dest, SizeWord);
+}
+
+template <typename T>
+void
+MacroAssemblerMIPSCompat::storePtr(ImmWord imm, T address)
+{
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeWord);
+}
+
+template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmWord imm, Address address);
+template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmWord imm, BaseIndex address);
+
+template <typename T>
+void
+MacroAssemblerMIPSCompat::storePtr(ImmPtr imm, T address)
+{
+ storePtr(ImmWord(uintptr_t(imm.value)), address);
+}
+
+template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmPtr imm, Address address);
+template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmPtr imm, BaseIndex address);
+
+template <typename T>
+void
+MacroAssemblerMIPSCompat::storePtr(ImmGCPtr imm, T address)
+{
+ movePtr(imm, SecondScratchReg);
+ storePtr(SecondScratchReg, address);
+}
+
+template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmGCPtr imm, Address address);
+template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmGCPtr imm, BaseIndex address);
+
+void
+MacroAssemblerMIPSCompat::storePtr(Register src, const Address& address)
+{
+ ma_store(src, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::storePtr(Register src, const BaseIndex& address)
+{
+ ma_store(src, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::storePtr(Register src, AbsoluteAddress dest)
+{
+ movePtr(ImmPtr(dest.addr), ScratchRegister);
+ storePtr(src, Address(ScratchRegister, 0));
+}
+
+void
+MacroAssemblerMIPSCompat::storeUnalignedFloat32(FloatRegister src, Register temp,
+ const BaseIndex& dest)
+{
+ computeScaledAddress(dest, SecondScratchReg);
+ moveFromFloat32(src, temp);
+
+ if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 3)) {
+ as_swl(temp, SecondScratchReg, dest.offset + 3);
+ as_swr(temp, SecondScratchReg, dest.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(dest.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ as_swl(temp, ScratchRegister, 3);
+ as_swr(temp, ScratchRegister, 0);
+ }
+}
+
+void
+MacroAssemblerMIPSCompat::storeUnalignedDouble(FloatRegister src, Register temp,
+ const BaseIndex& dest)
+{
+ computeScaledAddress(dest, SecondScratchReg);
+
+ if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 7)) {
+ moveFromDoubleLo(src, temp);
+ as_swl(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET + 3);
+ as_swr(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET);
+ moveFromDoubleHi(src, temp);
+ as_swl(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET + 3);
+ as_swr(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET);
+ } else {
+ ma_li(ScratchRegister, Imm32(dest.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ moveFromDoubleLo(src, temp);
+ as_swl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
+ as_swr(temp, ScratchRegister, INT64LOW_OFFSET);
+ moveFromDoubleHi(src, temp);
+ as_swl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
+ as_swr(temp, ScratchRegister, INT64HIGH_OFFSET);
+ }
+}
+
+// Note: this function clobbers the input register.
+void
+MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
+{
+ MOZ_ASSERT(input != ScratchDoubleReg);
+ Label positive, done;
+
+ // <= 0 or NaN --> 0
+ zeroDouble(ScratchDoubleReg);
+ branchDouble(DoubleGreaterThan, input, ScratchDoubleReg, &positive);
+ {
+ move32(Imm32(0), output);
+ jump(&done);
+ }
+
+ bind(&positive);
+
+ // Add 0.5 and truncate.
+ loadConstantDouble(0.5, ScratchDoubleReg);
+ addDouble(ScratchDoubleReg, input);
+
+ Label outOfRange;
+
+ branchTruncateDoubleMaybeModUint32(input, output, &outOfRange);
+ asMasm().branch32(Assembler::Above, output, Imm32(255), &outOfRange);
+ {
+ // Check if we had a tie.
+ convertInt32ToDouble(output, ScratchDoubleReg);
+ branchDouble(DoubleNotEqual, input, ScratchDoubleReg, &done);
+
+ // It was a tie. Mask out the ones bit to get an even value.
+ // See also js_TypedArray_uint8_clamp_double.
+ and32(Imm32(~1), output);
+ jump(&done);
+ }
+
+ // > 255 --> 255
+ bind(&outOfRange);
+ {
+ move32(Imm32(255), output);
+ }
+
+ bind(&done);
+}
+
+// higher level tag testing code
+Operand
+MacroAssemblerMIPSCompat::ToPayload(Operand base)
+{
+ return Operand(Register::FromCode(base.base()), base.disp() + PAYLOAD_OFFSET);
+}
+
+Operand
+MacroAssemblerMIPSCompat::ToType(Operand base)
+{
+ return Operand(Register::FromCode(base.base()), base.disp() + TAG_OFFSET);
+}
+
+void
+MacroAssemblerMIPSCompat::testNullSet(Condition cond, const ValueOperand& value, Register dest)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_NULL), cond);
+}
+
+void
+MacroAssemblerMIPSCompat::testObjectSet(Condition cond, const ValueOperand& value, Register dest)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_OBJECT), cond);
+}
+
+void
+MacroAssemblerMIPSCompat::testUndefinedSet(Condition cond, const ValueOperand& value, Register dest)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_UNDEFINED), cond);
+}
+
+// unboxing code
+void
+MacroAssemblerMIPSCompat::unboxNonDouble(const ValueOperand& operand, Register dest)
+{
+ if (operand.payloadReg() != dest)
+ ma_move(dest, operand.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::unboxNonDouble(const Address& src, Register dest)
+{
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::unboxNonDouble(const BaseIndex& src, Register dest)
+{
+ computeScaledAddress(src, SecondScratchReg);
+ ma_lw(dest, Address(SecondScratchReg, src.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::unboxInt32(const ValueOperand& operand, Register dest)
+{
+ ma_move(dest, operand.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::unboxInt32(const Address& src, Register dest)
+{
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::unboxBoolean(const ValueOperand& operand, Register dest)
+{
+ ma_move(dest, operand.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::unboxBoolean(const Address& src, Register dest)
+{
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::unboxDouble(const ValueOperand& operand, FloatRegister dest)
+{
+ moveToDoubleLo(operand.payloadReg(), dest);
+ moveToDoubleHi(operand.typeReg(), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::unboxDouble(const Address& src, FloatRegister dest)
+{
+ ma_lw(ScratchRegister, Address(src.base, src.offset + PAYLOAD_OFFSET));
+ moveToDoubleLo(ScratchRegister, dest);
+ ma_lw(ScratchRegister, Address(src.base, src.offset + TAG_OFFSET));
+ moveToDoubleHi(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::unboxString(const ValueOperand& operand, Register dest)
+{
+ ma_move(dest, operand.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::unboxString(const Address& src, Register dest)
+{
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::unboxObject(const ValueOperand& src, Register dest)
+{
+ ma_move(dest, src.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::unboxObject(const Address& src, Register dest)
+{
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::unboxValue(const ValueOperand& src, AnyRegister dest)
+{
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.payloadReg(), dest.fpu());
+ ma_b(&end, ShortJump);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else if (src.payloadReg() != dest.gpr()) {
+ ma_move(dest.gpr(), src.payloadReg());
+ }
+}
+
+void
+MacroAssemblerMIPSCompat::unboxPrivate(const ValueOperand& src, Register dest)
+{
+ ma_move(dest, src.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::boxDouble(FloatRegister src, const ValueOperand& dest)
+{
+ moveFromDoubleLo(src, dest.payloadReg());
+ moveFromDoubleHi(src, dest.typeReg());
+}
+
+void
+MacroAssemblerMIPSCompat::boxNonDouble(JSValueType type, Register src,
+ const ValueOperand& dest)
+{
+ if (src != dest.payloadReg())
+ ma_move(dest.payloadReg(), src);
+ ma_li(dest.typeReg(), ImmType(type));
+}
+
+void
+MacroAssemblerMIPSCompat::boolValueToDouble(const ValueOperand& operand, FloatRegister dest)
+{
+ convertBoolToInt32(operand.payloadReg(), ScratchRegister);
+ convertInt32ToDouble(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::int32ValueToDouble(const ValueOperand& operand,
+ FloatRegister dest)
+{
+ convertInt32ToDouble(operand.payloadReg(), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::boolValueToFloat32(const ValueOperand& operand,
+ FloatRegister dest)
+{
+
+ convertBoolToInt32(operand.payloadReg(), ScratchRegister);
+ convertInt32ToFloat32(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::int32ValueToFloat32(const ValueOperand& operand,
+ FloatRegister dest)
+{
+ convertInt32ToFloat32(operand.payloadReg(), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadConstantFloat32(float f, FloatRegister dest)
+{
+ ma_lis(dest, f);
+}
+
+void
+MacroAssemblerMIPSCompat::loadConstantFloat32(wasm::RawF32 f, FloatRegister dest)
+{
+ ma_lis(dest, f);
+}
+
+void
+MacroAssemblerMIPSCompat::loadInt32OrDouble(const Address& src, FloatRegister dest)
+{
+ Label notInt32, end;
+ // If it's an int, convert it to double.
+ ma_lw(SecondScratchReg, Address(src.base, src.offset + TAG_OFFSET));
+ asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+ ma_lw(SecondScratchReg, Address(src.base, src.offset + PAYLOAD_OFFSET));
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_b(&end, ShortJump);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ ma_ld(dest, src);
+ bind(&end);
+}
+
+void
+MacroAssemblerMIPSCompat::loadInt32OrDouble(Register base, Register index,
+ FloatRegister dest, int32_t shift)
+{
+ Label notInt32, end;
+
+ // If it's an int, convert it to double.
+
+ computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
+ // Since we only have one scratch, we need to stomp over it with the tag.
+ load32(Address(SecondScratchReg, TAG_OFFSET), SecondScratchReg);
+ asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+
+ computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
+ load32(Address(SecondScratchReg, PAYLOAD_OFFSET), SecondScratchReg);
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_b(&end, ShortJump);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ // First, recompute the offset that had been stored in the scratch register
+ // since the scratch register was overwritten loading in the type.
+ computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
+ loadDouble(Address(SecondScratchReg, 0), dest);
+ bind(&end);
+}
+
+void
+MacroAssemblerMIPSCompat::loadConstantDouble(double dp, FloatRegister dest)
+{
+ ma_lid(dest, dp);
+}
+
+void
+MacroAssemblerMIPSCompat::loadConstantDouble(wasm::RawF64 d, FloatRegister dest)
+{
+ struct DoubleStruct {
+ uint32_t lo;
+ uint32_t hi;
+ } ;
+ DoubleStruct intStruct = mozilla::BitwiseCast<DoubleStruct>(d.bits());
+
+ // put hi part of 64 bit value into the odd register
+ if (intStruct.hi == 0) {
+ moveToDoubleHi(zero, dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, Imm32(intStruct.hi));
+ moveToDoubleHi(scratch, dest);
+ }
+
+ // put low part of 64 bit value into the even register
+ if (intStruct.lo == 0) {
+ moveToDoubleLo(zero, dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, Imm32(intStruct.lo));
+ moveToDoubleLo(scratch, dest);
+ }
+}
+
+Register
+MacroAssemblerMIPSCompat::extractObject(const Address& address, Register scratch)
+{
+ ma_lw(scratch, Address(address.base, address.offset + PAYLOAD_OFFSET));
+ return scratch;
+}
+
+Register
+MacroAssemblerMIPSCompat::extractTag(const Address& address, Register scratch)
+{
+ ma_lw(scratch, Address(address.base, address.offset + TAG_OFFSET));
+ return scratch;
+}
+
+Register
+MacroAssemblerMIPSCompat::extractTag(const BaseIndex& address, Register scratch)
+{
+ computeScaledAddress(address, scratch);
+ return extractTag(Address(scratch, address.offset), scratch);
+}
+
+
+uint32_t
+MacroAssemblerMIPSCompat::getType(const Value& val)
+{
+ return val.toNunboxTag();
+}
+
+void
+MacroAssemblerMIPSCompat::moveData(const Value& val, Register data)
+{
+ if (val.isMarkable())
+ ma_li(data, ImmGCPtr(val.toMarkablePointer()));
+ else
+ ma_li(data, Imm32(val.toNunboxPayload()));
+}
+
+void
+MacroAssemblerMIPSCompat::moveValue(const Value& val, Register type, Register data)
+{
+ MOZ_ASSERT(type != data);
+ ma_li(type, Imm32(getType(val)));
+ moveData(val, data);
+}
+void
+MacroAssemblerMIPSCompat::moveValue(const Value& val, const ValueOperand& dest)
+{
+ moveValue(val, dest.typeReg(), dest.payloadReg());
+}
+
+/* There are 3 paths trough backedge jump. They are listed here in the order
+ * in which instructions are executed.
+ * - The short jump is simple:
+ * b offset # Jumps directly to target.
+ * lui at, addr1_hi # In delay slot. Don't care about 'at' here.
+ *
+ * - The long jump to loop header:
+ * b label1
+ * lui at, addr1_hi # In delay slot. We use the value in 'at' later.
+ * label1:
+ * ori at, addr1_lo
+ * jr at
+ * lui at, addr2_hi # In delay slot. Don't care about 'at' here.
+ *
+ * - The long jump to interrupt loop:
+ * b label2
+ * lui at, addr1_hi # In delay slot. Don't care about 'at' here.
+ * label2:
+ * lui at, addr2_hi
+ * ori at, addr2_lo
+ * jr at
+ * nop # In delay slot.
+ *
+ * The backedge is done this way to avoid patching lui+ori pair while it is
+ * being executed. Look also at jit::PatchBackedge().
+ */
+CodeOffsetJump
+MacroAssemblerMIPSCompat::backedgeJump(RepatchLabel* label, Label* documentation)
+{
+ // Only one branch per label.
+ MOZ_ASSERT(!label->used());
+ uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
+ BufferOffset bo = nextOffset();
+ label->use(bo.getOffset());
+
+ // Backedges are short jumps when bound, but can become long when patched.
+ m_buffer.ensureSpace(8 * sizeof(uint32_t));
+ if (label->bound()) {
+ int32_t offset = label->offset() - bo.getOffset();
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ as_b(BOffImm16(offset));
+ } else {
+ // Jump to "label1" by default to jump to the loop header.
+ as_b(BOffImm16(2 * sizeof(uint32_t)));
+ }
+ // No need for nop here. We can safely put next instruction in delay slot.
+ ma_liPatchable(ScratchRegister, Imm32(dest));
+ MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 3 * sizeof(uint32_t));
+ as_jr(ScratchRegister);
+ // No need for nop here. We can safely put next instruction in delay slot.
+ ma_liPatchable(ScratchRegister, Imm32(dest));
+ as_jr(ScratchRegister);
+ as_nop();
+ MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 8 * sizeof(uint32_t));
+ return CodeOffsetJump(bo.getOffset());
+}
+
+CodeOffsetJump
+MacroAssemblerMIPSCompat::jumpWithPatch(RepatchLabel* label, Label* documentation)
+{
+ // Only one branch per label.
+ MOZ_ASSERT(!label->used());
+ uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ BufferOffset bo = nextOffset();
+ label->use(bo.getOffset());
+ addLongJump(bo);
+ ma_liPatchable(ScratchRegister, Imm32(dest));
+ as_jr(ScratchRegister);
+ as_nop();
+ return CodeOffsetJump(bo.getOffset());
+}
+
+/////////////////////////////////////////////////////////////////
+// X86/X64-common/ARM/MIPS interface.
+/////////////////////////////////////////////////////////////////
+void
+MacroAssemblerMIPSCompat::storeValue(ValueOperand val, Operand dst)
+{
+ storeValue(val, Address(Register::FromCode(dst.base()), dst.disp()));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(ValueOperand val, const BaseIndex& dest)
+{
+ computeScaledAddress(dest, SecondScratchReg);
+ storeValue(val, Address(SecondScratchReg, dest.offset));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg, BaseIndex dest)
+{
+ computeScaledAddress(dest, ScratchRegister);
+
+ // Make sure that ma_sw doesn't clobber ScratchRegister
+ int32_t offset = dest.offset;
+ if (!Imm16::IsInSignedRange(offset)) {
+ ma_li(SecondScratchReg, Imm32(offset));
+ as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+
+ storeValue(type, reg, Address(ScratchRegister, offset));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(ValueOperand val, const Address& dest)
+{
+ ma_sw(val.payloadReg(), Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+ ma_sw(val.typeReg(), Address(dest.base, dest.offset + TAG_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg, Address dest)
+{
+ MOZ_ASSERT(dest.base != SecondScratchReg);
+
+ ma_sw(reg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+ ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(const Value& val, Address dest)
+{
+ MOZ_ASSERT(dest.base != SecondScratchReg);
+
+ ma_li(SecondScratchReg, Imm32(getType(val)));
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
+ moveData(val, SecondScratchReg);
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(const Value& val, BaseIndex dest)
+{
+ computeScaledAddress(dest, ScratchRegister);
+
+ // Make sure that ma_sw doesn't clobber ScratchRegister
+ int32_t offset = dest.offset;
+ if (!Imm16::IsInSignedRange(offset)) {
+ ma_li(SecondScratchReg, Imm32(offset));
+ as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+ storeValue(val, Address(ScratchRegister, offset));
+}
+
+void
+MacroAssemblerMIPSCompat::loadValue(const BaseIndex& addr, ValueOperand val)
+{
+ computeScaledAddress(addr, SecondScratchReg);
+ loadValue(Address(SecondScratchReg, addr.offset), val);
+}
+
+void
+MacroAssemblerMIPSCompat::loadValue(Address src, ValueOperand val)
+{
+ // Ensure that loading the payload does not erase the pointer to the
+ // Value in memory.
+ if (src.base != val.payloadReg()) {
+ ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
+ ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
+ } else {
+ ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
+ ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
+ }
+}
+
+void
+MacroAssemblerMIPSCompat::tagValue(JSValueType type, Register payload, ValueOperand dest)
+{
+ MOZ_ASSERT(payload != dest.typeReg());
+ ma_li(dest.typeReg(), ImmType(type));
+ if (payload != dest.payloadReg())
+ ma_move(dest.payloadReg(), payload);
+}
+
+void
+MacroAssemblerMIPSCompat::pushValue(ValueOperand val)
+{
+ // Allocate stack slots for type and payload. One for each.
+ asMasm().subPtr(Imm32(sizeof(Value)), StackPointer);
+ // Store type and payload.
+ storeValue(val, Address(StackPointer, 0));
+}
+
+void
+MacroAssemblerMIPSCompat::pushValue(const Address& addr)
+{
+ // Allocate stack slots for type and payload. One for each.
+ ma_subu(StackPointer, StackPointer, Imm32(sizeof(Value)));
+ // Store type and payload.
+ ma_lw(ScratchRegister, Address(addr.base, addr.offset + TAG_OFFSET));
+ ma_sw(ScratchRegister, Address(StackPointer, TAG_OFFSET));
+ ma_lw(ScratchRegister, Address(addr.base, addr.offset + PAYLOAD_OFFSET));
+ ma_sw(ScratchRegister, Address(StackPointer, PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::popValue(ValueOperand val)
+{
+ // Load payload and type.
+ as_lw(val.payloadReg(), StackPointer, PAYLOAD_OFFSET);
+ as_lw(val.typeReg(), StackPointer, TAG_OFFSET);
+ // Free stack.
+ as_addiu(StackPointer, StackPointer, sizeof(Value));
+}
+
+void
+MacroAssemblerMIPSCompat::storePayload(const Value& val, Address dest)
+{
+ moveData(val, SecondScratchReg);
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storePayload(Register src, Address dest)
+{
+ ma_sw(src, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+ return;
+}
+
+void
+MacroAssemblerMIPSCompat::storePayload(const Value& val, const BaseIndex& dest)
+{
+ MOZ_ASSERT(dest.offset == 0);
+
+ computeScaledAddress(dest, SecondScratchReg);
+
+ moveData(val, ScratchRegister);
+
+ as_sw(ScratchRegister, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
+}
+
+void
+MacroAssemblerMIPSCompat::storePayload(Register src, const BaseIndex& dest)
+{
+ MOZ_ASSERT(dest.offset == 0);
+
+ computeScaledAddress(dest, SecondScratchReg);
+ as_sw(src, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
+}
+
+void
+MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, Address dest)
+{
+ ma_li(SecondScratchReg, tag);
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, const BaseIndex& dest)
+{
+ MOZ_ASSERT(dest.offset == 0);
+
+ computeScaledAddress(dest, SecondScratchReg);
+ ma_li(ScratchRegister, tag);
+ as_sw(ScratchRegister, SecondScratchReg, TAG_OFFSET);
+}
+
+void
+MacroAssemblerMIPSCompat::breakpoint()
+{
+ as_break(0);
+}
+
+void
+MacroAssemblerMIPSCompat::ensureDouble(const ValueOperand& source, FloatRegister dest,
+ Label* failure)
+{
+ Label isDouble, done;
+ asMasm().branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, source.typeReg(), failure);
+
+ convertInt32ToDouble(source.payloadReg(), dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerMIPSCompat::checkStackAlignment()
+{
+#ifdef DEBUG
+ Label aligned;
+ as_andi(ScratchRegister, sp, ABIStackAlignment - 1);
+ ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
+ as_break(BREAK_STACK_UNALIGNED);
+ bind(&aligned);
+#endif
+}
+
+void
+MacroAssemblerMIPSCompat::alignStackPointer()
+{
+ movePtr(StackPointer, SecondScratchReg);
+ asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ asMasm().andPtr(Imm32(~(ABIStackAlignment - 1)), StackPointer);
+ storePtr(SecondScratchReg, Address(StackPointer, 0));
+}
+
+void
+MacroAssemblerMIPSCompat::restoreStackPointer()
+{
+ loadPtr(Address(StackPointer, 0), StackPointer);
+}
+
+void
+MacroAssembler::alignFrameForICArguments(AfterICSaveLive& aic)
+{
+ if (framePushed() % ABIStackAlignment != 0) {
+ aic.alignmentPadding = ABIStackAlignment - (framePushed() % ABIStackAlignment);
+ reserveStack(aic.alignmentPadding);
+ } else {
+ aic.alignmentPadding = 0;
+ }
+ MOZ_ASSERT(framePushed() % ABIStackAlignment == 0);
+ checkStackAlignment();
+}
+
+void
+MacroAssembler::restoreFrameAlignmentForICArguments(AfterICSaveLive& aic)
+{
+ if (aic.alignmentPadding != 0)
+ freeStack(aic.alignmentPadding);
+}
+
+void
+MacroAssemblerMIPSCompat::handleFailureWithHandlerTail(void* handler)
+{
+ // Reserve space for exception information.
+ int size = (sizeof(ResumeFromException) + ABIStackAlignment) & ~(ABIStackAlignment - 1);
+ asMasm().subPtr(Imm32(size), StackPointer);
+ ma_move(a0, StackPointer); // Use a0 since it is a first function argument
+
+ // Call the handler.
+ asMasm().setupUnalignedABICall(a1);
+ asMasm().passABIArg(a0);
+ asMasm().callWithABI(handler);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label return_;
+ Label bailout;
+
+ // Already clobbered a0, so use it...
+ load32(Address(StackPointer, offsetof(ResumeFromException, kind)), a0);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME),
+ &entryFrame);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_CATCH), &catch_);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FINALLY), &finally);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FORCED_RETURN),
+ &return_);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, load the new stack pointer
+ // and return from the entry frame.
+ bind(&entryFrame);
+ moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
+
+ // We're going to be returning by the ion calling convention
+ ma_pop(ra);
+ as_jr(ra);
+ as_nop();
+
+ // If we found a catch handler, this must be a baseline frame. Restore
+ // state and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, target)), a0);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
+ jump(a0);
+
+ // If we found a finally block, this must be a baseline frame. Push
+ // two values expected by JSOP_RETSUB: BooleanValue(true) and the
+ // exception.
+ bind(&finally);
+ ValueOperand exception = ValueOperand(a1, a2);
+ loadValue(Address(sp, offsetof(ResumeFromException, exception)), exception);
+
+ loadPtr(Address(sp, offsetof(ResumeFromException, target)), a0);
+ loadPtr(Address(sp, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
+ loadPtr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp);
+
+ pushValue(BooleanValue(true));
+ pushValue(exception);
+ jump(a0);
+
+ // Only used in debug mode. Return BaselineFrame->returnValue() to the
+ // caller.
+ bind(&return_);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
+ loadValue(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ ma_move(StackPointer, BaselineFrameReg);
+ pop(BaselineFrameReg);
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to caller
+ // frame before returning.
+ {
+ Label skipProfilingInstrumentation;
+ // Test if profiler enabled.
+ AbsoluteAddress addressOfEnabled(GetJitContext()->runtime->spsProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ profilerExitFrame();
+ bind(&skipProfilingInstrumentation);
+ }
+
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to
+ // the bailout tail stub.
+ bind(&bailout);
+ loadPtr(Address(sp, offsetof(ResumeFromException, bailoutInfo)), a2);
+ ma_li(ReturnReg, Imm32(BAILOUT_RETURN_OK));
+ loadPtr(Address(sp, offsetof(ResumeFromException, target)), a1);
+ jump(a1);
+}
+
+template<typename T>
+void
+MacroAssemblerMIPSCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
+ Register oldval, Register newval,
+ Register temp, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ AnyRegister output)
+{
+ switch (arrayType) {
+ case Scalar::Int8:
+ compareExchange8SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Uint8:
+ compareExchange8ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Int16:
+ compareExchange16SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Uint16:
+ compareExchange16ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Int32:
+ compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Uint32:
+ // At the moment, the code in MCallOptimize.cpp requires the output
+ // type to be double for uint32 arrays. See bug 1077305.
+ MOZ_ASSERT(output.isFloat());
+ compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, temp);
+ convertUInt32ToDouble(temp, output.fpu());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+MacroAssemblerMIPSCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
+ Register oldval, Register newval, Register temp,
+ Register valueTemp, Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+template void
+MacroAssemblerMIPSCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
+ Register oldval, Register newval, Register temp,
+ Register valueTemp, Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+
+template<typename T>
+void
+MacroAssemblerMIPSCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
+ Register value, Register temp, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ AnyRegister output)
+{
+ switch (arrayType) {
+ case Scalar::Int8:
+ atomicExchange8SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Uint8:
+ atomicExchange8ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Int16:
+ atomicExchange16SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Uint16:
+ atomicExchange16ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Int32:
+ atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Uint32:
+ // At the moment, the code in MCallOptimize.cpp requires the output
+ // type to be double for uint32 arrays. See bug 1077305.
+ MOZ_ASSERT(output.isFloat());
+ atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, temp);
+ convertUInt32ToDouble(temp, output.fpu());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+MacroAssemblerMIPSCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
+ Register value, Register temp, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+template void
+MacroAssemblerMIPSCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
+ Register value, Register temp, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+
+CodeOffset
+MacroAssemblerMIPSCompat::toggledJump(Label* label)
+{
+ CodeOffset ret(nextOffset().getOffset());
+ ma_b(label);
+ return ret;
+}
+
+CodeOffset
+MacroAssemblerMIPSCompat::toggledCall(JitCode* target, bool enabled)
+{
+ BufferOffset bo = nextOffset();
+ CodeOffset offset(bo.getOffset());
+ addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
+ if (enabled) {
+ as_jalr(ScratchRegister);
+ as_nop();
+ } else {
+ as_nop();
+ as_nop();
+ }
+ MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() == ToggledCallSize(nullptr));
+ return offset;
+}
+
+void
+MacroAssemblerMIPSCompat::profilerEnterFrame(Register framePtr, Register scratch)
+{
+ AbsoluteAddress activation(GetJitContext()->runtime->addressOfProfilingActivation());
+ loadPtr(activation, scratch);
+ storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr), Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void
+MacroAssemblerMIPSCompat::profilerExitFrame()
+{
+ branch(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
+}
+
+void
+MacroAssembler::subFromStackPtr(Imm32 imm32)
+{
+ if (imm32.value)
+ asMasm().subPtr(imm32, StackPointer);
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// Stack manipulation functions.
+
+void
+MacroAssembler::PushRegsInMask(LiveRegisterSet set)
+{
+ int32_t diffF = set.fpus().getPushSizeInBytes();
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+
+ reserveStack(diffG);
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ storePtr(*iter, Address(StackPointer, diffG));
+ }
+ MOZ_ASSERT(diffG == 0);
+
+ // Double values have to be aligned. We reserve extra space so that we can
+ // start writing from the first aligned location.
+ // We reserve a whole extra double so that the buffer has even size.
+ ma_and(SecondScratchReg, sp, Imm32(~(ABIStackAlignment - 1)));
+ reserveStack(diffF + sizeof(double));
+
+ for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ++iter) {
+ if ((*iter).code() % 2 == 0)
+ as_sd(*iter, SecondScratchReg, -diffF);
+ diffF -= sizeof(double);
+ }
+ MOZ_ASSERT(diffF == 0);
+}
+
+void
+MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
+{
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+ int32_t diffF = set.fpus().getPushSizeInBytes();
+ const int32_t reservedG = diffG;
+ const int32_t reservedF = diffF;
+
+ // Read the buffer form the first aligned location.
+ ma_addu(SecondScratchReg, sp, Imm32(reservedF + sizeof(double)));
+ ma_and(SecondScratchReg, SecondScratchReg, Imm32(~(ABIStackAlignment - 1)));
+
+ for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ++iter) {
+ if (!ignore.has(*iter) && ((*iter).code() % 2 == 0))
+ // Use assembly l.d because we have alligned the stack.
+ as_ld(*iter, SecondScratchReg, -diffF);
+ diffF -= sizeof(double);
+ }
+ freeStack(reservedF + sizeof(double));
+ MOZ_ASSERT(diffF == 0);
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ if (!ignore.has(*iter))
+ loadPtr(Address(StackPointer, diffG), *iter);
+ }
+ freeStack(reservedG);
+ MOZ_ASSERT(diffG == 0);
+}
+
+// ===============================================================
+// ABI function calls.
+
+void
+MacroAssembler::setupUnalignedABICall(Register scratch)
+{
+ setupABICall();
+ dynamicAlignment_ = true;
+
+ ma_move(scratch, StackPointer);
+
+ // Force sp to be aligned
+ asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
+ storePtr(scratch, Address(StackPointer, 0));
+}
+
+void
+MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm)
+{
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ // Reserve place for $ra.
+ stackForCall += sizeof(intptr_t);
+
+ if (dynamicAlignment_) {
+ stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
+ } else {
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(stackForCall + framePushed() + alignmentAtPrologue,
+ ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Save $ra because call is going to clobber it. Restore it in
+ // callWithABIPost. NOTE: This is needed for calls from SharedIC.
+ // Maybe we can do this differently.
+ storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
+
+ // Position all arguments.
+ {
+ enoughMemory_ = enoughMemory_ && moveResolver_.resolve();
+ if (!enoughMemory_)
+ return;
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+}
+
+void
+MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
+{
+ // Restore ra value (as stored in callWithABIPre()).
+ loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
+
+ if (dynamicAlignment_) {
+ // Restore sp value from stack (as stored in setupUnalignedABICall()).
+ loadPtr(Address(StackPointer, stackAdjust), StackPointer);
+ // Use adjustFrame instead of freeStack because we already restored sp.
+ adjustFrame(-stackAdjust);
+ } else {
+ freeStack(stackAdjust);
+ }
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+void
+MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
+{
+ // Load the callee in t9, no instruction between the lw and call
+ // should clobber it. Note that we can't use fun.base because it may
+ // be one of the IntArg registers clobbered before the call.
+ ma_move(t9, fun);
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(t9);
+ callWithABIPost(stackAdjust, result);
+}
+
+void
+MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result)
+{
+ // Load the callee in t9, as above.
+ loadPtr(Address(fun.base, fun.offset), t9);
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(t9);
+ callWithABIPost(stackAdjust, result);
+}
+
+// ===============================================================
+// Branch functions
+
+void
+MacroAssembler::branchValueIsNurseryObject(Condition cond, const Address& address,
+ Register temp, Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+
+ branchTestObject(Assembler::NotEqual, address, cond == Assembler::Equal ? &done : label);
+ loadPtr(address, temp);
+ branchPtrInNurseryChunk(cond, temp, InvalidReg, label);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::branchValueIsNurseryObject(Condition cond, ValueOperand value,
+ Register temp, Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+
+ branchTestObject(Assembler::NotEqual, value, cond == Assembler::Equal ? &done : label);
+ branchPtrInNurseryChunk(cond, value.payloadReg(), temp, label);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(*this);
+ moveData(rhs, scratch);
+
+ if (cond == Equal) {
+ Label done;
+ ma_b(lhs.payloadReg(), scratch, &done, NotEqual, ShortJump);
+ {
+ ma_b(lhs.typeReg(), Imm32(getType(rhs)), label, Equal);
+ }
+ bind(&done);
+ } else {
+ ma_b(lhs.payloadReg(), scratch, label, NotEqual);
+
+ ma_b(lhs.typeReg(), Imm32(getType(rhs)), label, NotEqual);
+ }
+}
+
+// ========================================================================
+// Memory access primitives.
+template <typename T>
+void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const T& dest, MIRType slotType)
+{
+ if (valueType == MIRType::Double) {
+ storeDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ // Store the type tag if needed.
+ if (valueType != slotType)
+ storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), dest);
+
+ // Store the payload.
+ if (value.constant())
+ storePayload(value.value(), dest);
+ else
+ storePayload(value.reg().typedReg().gpr(), dest);
+}
+
+template void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const Address& dest, MIRType slotType);
+template void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const BaseIndex& dest, MIRType slotType);
+
+//}}} check_macroassembler_style
diff --git a/js/src/jit/mips32/MacroAssembler-mips32.h b/js/src/jit/mips32/MacroAssembler-mips32.h
new file mode 100644
index 000000000..4c7618d08
--- /dev/null
+++ b/js/src/jit/mips32/MacroAssembler-mips32.h
@@ -0,0 +1,1021 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_MacroAssembler_mips32_h
+#define jit_mips32_MacroAssembler_mips32_h
+
+#include "jsopcode.h"
+
+#include "jit/IonCaches.h"
+#include "jit/JitFrames.h"
+#include "jit/mips-shared/MacroAssembler-mips-shared.h"
+#include "jit/MoveResolver.h"
+
+namespace js {
+namespace jit {
+
+struct ImmTag : public Imm32
+{
+ ImmTag(JSValueTag mask)
+ : Imm32(int32_t(mask))
+ { }
+};
+
+struct ImmType : public ImmTag
+{
+ ImmType(JSValueType type)
+ : ImmTag(JSVAL_TYPE_TO_TAG(type))
+ { }
+};
+
+static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data);
+static const ValueOperand softfpReturnOperand = ValueOperand(v1, v0);
+
+static const int defaultShift = 3;
+static_assert(1 << defaultShift == sizeof(JS::Value), "The defaultShift is wrong");
+
+static const uint32_t LOW_32_MASK = (1LL << 32) - 1;
+static const int32_t LOW_32_OFFSET = 0;
+static const int32_t HIGH_32_OFFSET = 4;
+
+class MacroAssemblerMIPS : public MacroAssemblerMIPSShared
+{
+ public:
+ using MacroAssemblerMIPSShared::ma_b;
+ using MacroAssemblerMIPSShared::ma_li;
+ using MacroAssemblerMIPSShared::ma_ss;
+ using MacroAssemblerMIPSShared::ma_sd;
+ using MacroAssemblerMIPSShared::ma_load;
+ using MacroAssemblerMIPSShared::ma_store;
+ using MacroAssemblerMIPSShared::ma_cmp_set;
+ using MacroAssemblerMIPSShared::ma_subTestOverflow;
+
+ void ma_li(Register dest, CodeOffset* label);
+
+ void ma_liPatchable(Register dest, Imm32 imm);
+ void ma_li(Register dest, ImmWord imm);
+ void ma_liPatchable(Register dest, ImmPtr imm);
+ void ma_liPatchable(Register dest, ImmWord imm);
+
+ // load
+ void ma_load(Register dest, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // store
+ void ma_store(Register data, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // arithmetic based ops
+ // add
+ template <typename L>
+ void ma_addTestOverflow(Register rd, Register rs, Register rt, L overflow);
+ template <typename L>
+ void ma_addTestOverflow(Register rd, Register rs, Imm32 imm, L overflow);
+
+ // subtract
+ void ma_subTestOverflow(Register rd, Register rs, Register rt, Label* overflow);
+
+ // memory
+ // shortcut for when we know we're transferring 32 bits of data
+ void ma_lw(Register data, Address address);
+
+ void ma_sw(Register data, Address address);
+ void ma_sw(Imm32 imm, Address address);
+ void ma_sw(Register data, BaseIndex& address);
+
+ void ma_pop(Register r);
+ void ma_push(Register r);
+
+ void branchWithCode(InstImm code, Label* label, JumpKind jumpKind);
+ // branches when done from within mips-specific code
+ void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = LongJump)
+ {
+ ma_b(lhs, Imm32(uint32_t(imm.value)), l, c, jumpKind);
+ }
+ void ma_b(Address addr, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = LongJump)
+ {
+ ma_b(addr, Imm32(uint32_t(imm.value)), l, c, jumpKind);
+ }
+
+ void ma_b(Register lhs, Address addr, Label* l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Register rhs, Label* l, Condition c, JumpKind jumpKind = LongJump) {
+ MOZ_ASSERT(rhs != ScratchRegister);
+ ma_load(ScratchRegister, addr, SizeWord);
+ ma_b(ScratchRegister, rhs, l, c, jumpKind);
+ }
+
+ void ma_bal(Label* l, DelaySlotFill delaySlotFill = FillDelaySlot);
+
+ // fp instructions
+ void ma_lid(FloatRegister dest, double value);
+
+ void ma_mv(FloatRegister src, ValueOperand dest);
+ void ma_mv(ValueOperand src, FloatRegister dest);
+
+ void ma_ls(FloatRegister fd, Address address);
+ void ma_ld(FloatRegister fd, Address address);
+ void ma_sd(FloatRegister fd, Address address);
+ void ma_ss(FloatRegister fd, Address address);
+
+ void ma_pop(FloatRegister fs);
+ void ma_push(FloatRegister fs);
+
+ void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c) {
+ ma_cmp_set(dst, lhs, Imm32(uint32_t(imm.value)), c);
+ }
+ void ma_cmp_set(Register rd, Register rs, Address addr, Condition c);
+ void ma_cmp_set(Register dst, Address lhs, Register rhs, Condition c);
+ void ma_cmp_set(Register dst, Address lhs, ImmPtr imm, Condition c) {
+ ma_lw(ScratchRegister, lhs);
+ ma_li(SecondScratchReg, Imm32(uint32_t(imm.value)));
+ ma_cmp_set(dst, ScratchRegister, SecondScratchReg, c);
+ }
+
+ // These fuctions abstract the access to high part of the double precision
+ // float register. It is intended to work on both 32 bit and 64 bit
+ // floating point coprocessor.
+ // :TODO: (Bug 985881) Modify this for N32 ABI to use mthc1 and mfhc1
+ void moveToDoubleHi(Register src, FloatRegister dest) {
+ as_mtc1(src, getOddPair(dest));
+ }
+ void moveFromDoubleHi(FloatRegister src, Register dest) {
+ as_mfc1(dest, getOddPair(src));
+ }
+};
+
+class MacroAssembler;
+
+class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
+{
+ public:
+ using MacroAssemblerMIPS::call;
+
+ MacroAssemblerMIPSCompat()
+ { }
+
+ void convertBoolToInt32(Register source, Register dest);
+ void convertInt32ToDouble(Register src, FloatRegister dest);
+ void convertInt32ToDouble(const Address& src, FloatRegister dest);
+ void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest);
+ void convertUInt32ToDouble(Register src, FloatRegister dest);
+ void convertUInt32ToFloat32(Register src, FloatRegister dest);
+ void convertDoubleToFloat32(FloatRegister src, FloatRegister dest);
+ void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+
+ void convertFloat32ToDouble(FloatRegister src, FloatRegister dest);
+ void convertInt32ToFloat32(Register src, FloatRegister dest);
+ void convertInt32ToFloat32(const Address& src, FloatRegister dest);
+
+ void computeScaledAddress(const BaseIndex& address, Register dest);
+
+ void computeEffectiveAddress(const Address& address, Register dest) {
+ ma_addu(dest, address.base, Imm32(address.offset));
+ }
+
+ inline void computeEffectiveAddress(const BaseIndex& address, Register dest);
+
+ void j(Label* dest) {
+ ma_b(dest);
+ }
+
+ void mov(Register src, Register dest) {
+ as_ori(dest, src, 0);
+ }
+ void mov(ImmWord imm, Register dest) {
+ ma_li(dest, imm);
+ }
+ void mov(ImmPtr imm, Register dest) {
+ mov(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(Register src, Address dest) {
+ MOZ_CRASH("NYI-IC");
+ }
+ void mov(Address src, Register dest) {
+ MOZ_CRASH("NYI-IC");
+ }
+
+ void branch(JitCode* c) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(c->raw()));
+ as_jr(ScratchRegister);
+ as_nop();
+ }
+ void branch(const Register reg) {
+ as_jr(reg);
+ as_nop();
+ }
+ void nop() {
+ as_nop();
+ }
+ void ret() {
+ ma_pop(ra);
+ as_jr(ra);
+ as_nop();
+ }
+ inline void retn(Imm32 n);
+ void push(Imm32 imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(ImmWord imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(ImmGCPtr imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(const Address& address) {
+ loadPtr(address, ScratchRegister);
+ ma_push(ScratchRegister);
+ }
+ void push(Register reg) {
+ ma_push(reg);
+ }
+ void push(FloatRegister reg) {
+ ma_push(reg);
+ }
+ void pop(Register reg) {
+ ma_pop(reg);
+ }
+ void pop(FloatRegister reg) {
+ ma_pop(reg);
+ }
+
+ // Emit a branch that can be toggled to a non-operation. On MIPS we use
+ // "andi" instruction to toggle the branch.
+ // See ToggleToJmp(), ToggleToCmp().
+ CodeOffset toggledJump(Label* label);
+
+ // Emit a "jalr" or "nop" instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled);
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ // Four instructions used in: MacroAssemblerMIPSCompat::toggledCall
+ return 4 * sizeof(uint32_t);
+ }
+
+ CodeOffset pushWithPatch(ImmWord imm) {
+ CodeOffset label = movWithPatch(imm, ScratchRegister);
+ ma_push(ScratchRegister);
+ return label;
+ }
+
+ CodeOffset movWithPatch(ImmWord imm, Register dest) {
+ CodeOffset label = CodeOffset(currentOffset());
+ ma_liPatchable(dest, imm);
+ return label;
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
+ }
+
+ void jump(Label* label) {
+ ma_b(label);
+ }
+ void jump(Register reg) {
+ as_jr(reg);
+ as_nop();
+ }
+ void jump(const Address& address) {
+ loadPtr(address, ScratchRegister);
+ as_jr(ScratchRegister);
+ as_nop();
+ }
+
+ void jump(JitCode* code) {
+ branch(code);
+ }
+
+ void jump(wasm::TrapDesc target) {
+ ma_b(target);
+ }
+
+ void negl(Register reg) {
+ ma_negu(reg, reg);
+ }
+
+ // Returns the register containing the type tag.
+ Register splitTagForTest(const ValueOperand& value) {
+ return value.typeReg();
+ }
+
+ // unboxing code
+ void unboxNonDouble(const ValueOperand& operand, Register dest);
+ void unboxNonDouble(const Address& src, Register dest);
+ void unboxNonDouble(const BaseIndex& src, Register dest);
+ void unboxInt32(const ValueOperand& operand, Register dest);
+ void unboxInt32(const Address& src, Register dest);
+ void unboxBoolean(const ValueOperand& operand, Register dest);
+ void unboxBoolean(const Address& src, Register dest);
+ void unboxDouble(const ValueOperand& operand, FloatRegister dest);
+ void unboxDouble(const Address& src, FloatRegister dest);
+ void unboxString(const ValueOperand& operand, Register dest);
+ void unboxString(const Address& src, Register dest);
+ void unboxObject(const ValueOperand& src, Register dest);
+ void unboxObject(const Address& src, Register dest);
+ void unboxObject(const BaseIndex& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxValue(const ValueOperand& src, AnyRegister dest);
+ void unboxPrivate(const ValueOperand& src, Register dest);
+
+ void notBoolean(const ValueOperand& val) {
+ as_xori(val.payloadReg(), val.payloadReg(), 1);
+ }
+
+ // boxing code
+ void boxDouble(FloatRegister src, const ValueOperand& dest);
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest);
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ Register extractObject(const Address& address, Register scratch);
+ Register extractObject(const ValueOperand& value, Register scratch) {
+ return value.payloadReg();
+ }
+ Register extractInt32(const ValueOperand& value, Register scratch) {
+ return value.payloadReg();
+ }
+ Register extractBoolean(const ValueOperand& value, Register scratch) {
+ return value.payloadReg();
+ }
+ Register extractTag(const Address& address, Register scratch);
+ Register extractTag(const BaseIndex& address, Register scratch);
+ Register extractTag(const ValueOperand& value, Register scratch) {
+ return value.typeReg();
+ }
+
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void loadInt32OrDouble(const Address& address, FloatRegister dest);
+ void loadInt32OrDouble(Register base, Register index,
+ FloatRegister dest, int32_t shift = defaultShift);
+ void loadConstantDouble(double dp, FloatRegister dest);
+ void loadConstantDouble(wasm::RawF64 d, FloatRegister dest);
+
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+ void loadConstantFloat32(wasm::RawF32 f, FloatRegister dest);
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value, Register dest);
+
+ // higher level tag testing code
+ Operand ToPayload(Operand base);
+ Address ToPayload(Address base) {
+ return ToPayload(Operand(base)).toAddress();
+ }
+
+ protected:
+ Operand ToType(Operand base);
+ Address ToType(Address base) {
+ return ToType(Operand(base)).toAddress();
+ }
+
+ uint32_t getType(const Value& val);
+ void moveData(const Value& val, Register data);
+ public:
+ void moveValue(const Value& val, Register type, Register data);
+
+ CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr);
+ CodeOffsetJump jumpWithPatch(RepatchLabel* label, Label* documentation = nullptr);
+
+ void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat())
+ loadInt32OrDouble(address, dest.fpu());
+ else
+ ma_lw(dest.gpr(), address);
+ }
+
+ void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat())
+ loadInt32OrDouble(address.base, address.index, dest.fpu(), address.scale);
+ else
+ load32(address, dest.gpr());
+ }
+
+ template <typename T>
+ void storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const T& dest,
+ MIRType slotType);
+
+ template <typename T>
+ void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes) {
+ switch (nbytes) {
+ case 4:
+ store32(value.payloadReg(), address);
+ return;
+ case 1:
+ store8(value.payloadReg(), address);
+ return;
+ default: MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void moveValue(const Value& val, const ValueOperand& dest);
+
+ void moveValue(const ValueOperand& src, const ValueOperand& dest) {
+ Register s0 = src.typeReg(), d0 = dest.typeReg(),
+ s1 = src.payloadReg(), d1 = dest.payloadReg();
+
+ // Either one or both of the source registers could be the same as a
+ // destination register.
+ if (s1 == d0) {
+ if (s0 == d1) {
+ // If both are, this is just a swap of two registers.
+ MOZ_ASSERT(d1 != ScratchRegister);
+ MOZ_ASSERT(d0 != ScratchRegister);
+ move32(d1, ScratchRegister);
+ move32(d0, d1);
+ move32(ScratchRegister, d0);
+ return;
+ }
+ // If only one is, copy that source first.
+ mozilla::Swap(s0, s1);
+ mozilla::Swap(d0, d1);
+ }
+
+ if (s0 != d0)
+ move32(s0, d0);
+ if (s1 != d1)
+ move32(s1, d1);
+ }
+
+ void storeValue(ValueOperand val, Operand dst);
+ void storeValue(ValueOperand val, const BaseIndex& dest);
+ void storeValue(JSValueType type, Register reg, BaseIndex dest);
+ void storeValue(ValueOperand val, const Address& dest);
+ void storeValue(JSValueType type, Register reg, Address dest);
+ void storeValue(const Value& val, Address dest);
+ void storeValue(const Value& val, BaseIndex dest);
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ load32(ToType(src), temp);
+ store32(temp, ToType(dest));
+
+ load32(ToPayload(src), temp);
+ store32(temp, ToPayload(dest));
+ }
+
+ void loadValue(Address src, ValueOperand val);
+ void loadValue(Operand dest, ValueOperand val) {
+ loadValue(dest.toAddress(), val);
+ }
+ void loadValue(const BaseIndex& addr, ValueOperand val);
+ void tagValue(JSValueType type, Register payload, ValueOperand dest);
+
+ void pushValue(ValueOperand val);
+ void popValue(ValueOperand val);
+ void pushValue(const Value& val) {
+ push(Imm32(val.toNunboxTag()));
+ if (val.isMarkable())
+ push(ImmGCPtr(val.toMarkablePointer()));
+ else
+ push(Imm32(val.toNunboxPayload()));
+ }
+ void pushValue(JSValueType type, Register reg) {
+ push(ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ ma_push(reg);
+ }
+ void pushValue(const Address& addr);
+
+ void storePayload(const Value& val, Address dest);
+ void storePayload(Register src, Address dest);
+ void storePayload(const Value& val, const BaseIndex& dest);
+ void storePayload(Register src, const BaseIndex& dest);
+ void storeTypeTag(ImmTag tag, Address dest);
+ void storeTypeTag(ImmTag tag, const BaseIndex& dest);
+
+ void handleFailureWithHandlerTail(void* handler);
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+ public:
+ // The following functions are exposed for use in platform-shared code.
+
+ template<typename T>
+ void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ compareExchange(1, true, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ compareExchange(1, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ compareExchange(2, true, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ compareExchange(2, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void compareExchange32(const T& mem, Register oldval, Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ compareExchange(4, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
+ }
+
+ template<typename T>
+ void atomicExchange8SignExtend(const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicExchange(1, true, mem, value, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void atomicExchange8ZeroExtend(const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicExchange(1, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void atomicExchange16SignExtend(const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicExchange(2, true, mem, value, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void atomicExchange16ZeroExtend(const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicExchange(2, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void atomicExchange32(const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicExchange(4, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchAdd8SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, true, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAdd8ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAdd16SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, true, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAdd16ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAdd32(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(4, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template <typename T, typename S>
+ void atomicAdd8(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(1, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicAdd16(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(2, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicAdd32(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(4, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchSub8SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, true, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchSub8ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchSub16SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, true, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchSub16ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchSub32(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(4, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template <typename T, typename S>
+ void atomicSub8(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(1, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicSub16(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(2, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicSub32(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(4, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchAnd8SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, true, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAnd8ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAnd16SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, true, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAnd16ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAnd32(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(4, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template <typename T, typename S>
+ void atomicAnd8(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(1, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicAnd16(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(2, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicAnd32(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(4, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchOr8SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, true, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchOr8ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchOr16SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, true, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchOr16ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchOr32(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(4, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template <typename T, typename S>
+ void atomicOr8(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(1, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicOr16(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(2, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicOr32(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(4, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchXor8SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, true, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchXor8ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchXor16SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, true, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchXor16ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchXor32(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(4, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template <typename T, typename S>
+ void atomicXor8(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(1, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicXor16(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(2, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicXor32(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(4, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+
+ template<typename T>
+ void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register oldval, Register newval,
+ Register temp, Register valueTemp, Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+
+ template<typename T>
+ void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value,
+ Register temp, Register valueTemp, Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+
+ inline void incrementInt32Value(const Address& addr);
+
+ void move32(Imm32 imm, Register dest);
+ void move32(Register src, Register dest);
+
+ void movePtr(Register src, Register dest);
+ void movePtr(ImmWord imm, Register dest);
+ void movePtr(ImmPtr imm, Register dest);
+ void movePtr(wasm::SymbolicAddress imm, Register dest);
+ void movePtr(ImmGCPtr imm, Register dest);
+
+ void load8SignExtend(const Address& address, Register dest);
+ void load8SignExtend(const BaseIndex& src, Register dest);
+
+ void load8ZeroExtend(const Address& address, Register dest);
+ void load8ZeroExtend(const BaseIndex& src, Register dest);
+
+ void load16SignExtend(const Address& address, Register dest);
+ void load16SignExtend(const BaseIndex& src, Register dest);
+
+ void load16ZeroExtend(const Address& address, Register dest);
+ void load16ZeroExtend(const BaseIndex& src, Register dest);
+
+ void load32(const Address& address, Register dest);
+ void load32(const BaseIndex& address, Register dest);
+ void load32(AbsoluteAddress address, Register dest);
+ void load32(wasm::SymbolicAddress address, Register dest);
+ void load64(const Address& address, Register64 dest) {
+ load32(Address(address.base, address.offset + INT64LOW_OFFSET), dest.low);
+ int32_t highOffset = (address.offset < 0) ? -int32_t(INT64HIGH_OFFSET) : INT64HIGH_OFFSET;
+ load32(Address(address.base, address.offset + highOffset), dest.high);
+ }
+
+ void loadPtr(const Address& address, Register dest);
+ void loadPtr(const BaseIndex& src, Register dest);
+ void loadPtr(AbsoluteAddress address, Register dest);
+ void loadPtr(wasm::SymbolicAddress address, Register dest);
+
+ void loadPrivate(const Address& address, Register dest);
+
+ void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x1(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x1(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x2(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x2(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+ void loadAlignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeAlignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Int(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Int(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
+
+ void loadFloat32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadFloat32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadAlignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeAlignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Float(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
+
+ void loadDouble(const Address& addr, FloatRegister dest);
+ void loadDouble(const BaseIndex& src, FloatRegister dest);
+ void loadUnalignedDouble(const BaseIndex& src, Register temp, FloatRegister dest);
+
+ // Load a float value into a register, then expand it to a double.
+ void loadFloatAsDouble(const Address& addr, FloatRegister dest);
+ void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
+
+ void loadFloat32(const Address& addr, FloatRegister dest);
+ void loadFloat32(const BaseIndex& src, FloatRegister dest);
+ void loadUnalignedFloat32(const BaseIndex& src, Register temp, FloatRegister dest);
+
+ void store8(Register src, const Address& address);
+ void store8(Imm32 imm, const Address& address);
+ void store8(Register src, const BaseIndex& address);
+ void store8(Imm32 imm, const BaseIndex& address);
+
+ void store16(Register src, const Address& address);
+ void store16(Imm32 imm, const Address& address);
+ void store16(Register src, const BaseIndex& address);
+ void store16(Imm32 imm, const BaseIndex& address);
+
+ void store32(Register src, AbsoluteAddress address);
+ void store32(Register src, const Address& address);
+ void store32(Register src, const BaseIndex& address);
+ void store32(Imm32 src, const Address& address);
+ void store32(Imm32 src, const BaseIndex& address);
+
+ // NOTE: This will use second scratch on MIPS. Only ARM needs the
+ // implementation without second scratch.
+ void store32_NoSecondScratch(Imm32 src, const Address& address) {
+ store32(src, address);
+ }
+
+ void store64(Register64 src, Address address) {
+ store32(src.low, Address(address.base, address.offset + LOW_32_OFFSET));
+ store32(src.high, Address(address.base, address.offset + HIGH_32_OFFSET));
+ }
+
+ void store64(Imm64 imm, Address address) {
+ store32(imm.low(), Address(address.base, address.offset + LOW_32_OFFSET));
+ store32(imm.hi(), Address(address.base, address.offset + HIGH_32_OFFSET));
+ }
+
+ template <typename T> void storePtr(ImmWord imm, T address);
+ template <typename T> void storePtr(ImmPtr imm, T address);
+ template <typename T> void storePtr(ImmGCPtr imm, T address);
+ void storePtr(Register src, const Address& address);
+ void storePtr(Register src, const BaseIndex& address);
+ void storePtr(Register src, AbsoluteAddress dest);
+
+ void storeUnalignedFloat32(FloatRegister src, Register temp, const BaseIndex& dest);
+ void storeUnalignedDouble(FloatRegister src, Register temp, const BaseIndex& dest);
+
+ void moveDouble(FloatRegister src, FloatRegister dest) {
+ as_movd(dest, src);
+ }
+
+ void zeroDouble(FloatRegister reg) {
+ moveToDoubleLo(zero, reg);
+ moveToDoubleHi(zero, reg);
+ }
+
+ static bool convertUInt64ToDoubleNeedsTemp();
+ void convertUInt64ToDouble(Register64 src, FloatRegister dest, Register temp);
+
+ void breakpoint();
+
+ void checkStackAlignment();
+
+ void alignStackPointer();
+ void restoreStackPointer();
+ static void calculateAlignedStackPointer(void** stackPointer);
+
+ // If source is a double, load it into dest. If source is int32,
+ // convert it to double. Else, branch to failure.
+ void ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure);
+
+ protected:
+ bool buildOOLFakeExitFrame(void* fakeReturnAddr);
+
+ public:
+ CodeOffset labelForPatch() {
+ return CodeOffset(nextOffset().getOffset());
+ }
+
+ void lea(Operand addr, Register dest) {
+ ma_addu(dest, addr.baseReg(), Imm32(addr.disp()));
+ }
+
+ void abiret() {
+ as_jr(ra);
+ as_nop();
+ }
+
+ void ma_storeImm(Imm32 imm, const Address& addr) {
+ ma_sw(imm, addr);
+ }
+
+ BufferOffset ma_BoundsCheck(Register bounded) {
+ BufferOffset bo = m_buffer.nextOffset();
+ ma_liPatchable(bounded, ImmWord(0));
+ return bo;
+ }
+
+ void moveFloat32(FloatRegister src, FloatRegister dest) {
+ as_movs(dest, src);
+ }
+ void loadWasmGlobalPtr(uint32_t globalDataOffset, Register dest) {
+ loadPtr(Address(GlobalReg, globalDataOffset - WasmGlobalRegBias), dest);
+ }
+ void loadWasmPinnedRegsFromTls() {
+ loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, memoryBase)), HeapReg);
+ loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, globalData)), GlobalReg);
+ ma_addu(GlobalReg, Imm32(WasmGlobalRegBias));
+ }
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+};
+
+typedef MacroAssemblerMIPSCompat MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_MacroAssembler_mips32_h */
diff --git a/js/src/jit/mips32/MoveEmitter-mips32.cpp b/js/src/jit/mips32/MoveEmitter-mips32.cpp
new file mode 100644
index 000000000..7b5a8996f
--- /dev/null
+++ b/js/src/jit/mips32/MoveEmitter-mips32.cpp
@@ -0,0 +1,156 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/MoveEmitter-mips32.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void
+MoveEmitterMIPS::breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slotId)
+{
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (A -> B), which we reach first. We save B, then allow
+ // the original move to continue.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchFloat32Reg;
+ masm.loadFloat32(getAdjustedAddress(to), temp);
+ // Since it is uncertain if the load will be aligned or not
+ // just fill both of them with the same value.
+ masm.storeFloat32(temp, cycleSlot(slotId, 0));
+ masm.storeFloat32(temp, cycleSlot(slotId, 4));
+ } else {
+ // Just always store the largest possible size.
+ masm.storeDouble(to.floatReg().doubleOverlay(), cycleSlot(slotId, 0));
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchDoubleReg;
+ masm.loadDouble(getAdjustedAddress(to), temp);
+ masm.storeDouble(temp, cycleSlot(slotId, 0));
+ } else {
+ masm.storeDouble(to.floatReg(), cycleSlot(slotId, 0));
+ }
+ break;
+ case MoveOp::INT32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(int32_t));
+ case MoveOp::GENERAL:
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.loadPtr(getAdjustedAddress(to), temp);
+ masm.storePtr(temp, cycleSlot(0, 0));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.storePtr(to.reg(), cycleSlot(0, 0));
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void
+MoveEmitterMIPS::completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slotId)
+{
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (B -> A), which we reach last. We emit a move from the
+ // saved value of B, to A.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchFloat32Reg;
+ masm.loadFloat32(cycleSlot(slotId, 0), temp);
+ masm.storeFloat32(temp, getAdjustedAddress(to));
+ } else {
+ uint32_t offset = 0;
+ if (from.floatReg().numAlignedAliased() == 1)
+ offset = sizeof(float);
+ masm.loadFloat32(cycleSlot(slotId, offset), to.floatReg());
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchDoubleReg;
+ masm.loadDouble(cycleSlot(slotId, 0), temp);
+ masm.storeDouble(temp, getAdjustedAddress(to));
+ } else {
+ masm.loadDouble(cycleSlot(slotId, 0), to.floatReg());
+ }
+ break;
+ case MoveOp::INT32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(int32_t));
+ case MoveOp::GENERAL:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.loadPtr(cycleSlot(0, 0), temp);
+ masm.storePtr(temp, getAdjustedAddress(to));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.loadPtr(cycleSlot(0, 0), to.reg());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void
+MoveEmitterMIPS::emitDoubleMove(const MoveOperand& from, const MoveOperand& to)
+{
+ // Ensure that we can use ScratchDoubleReg in memory move.
+ MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchDoubleReg);
+ MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchDoubleReg);
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveDouble(from.floatReg(), to.floatReg());
+ } else if (to.isGeneralRegPair()) {
+ // Used for passing double parameter in a2,a3 register pair.
+ // Two moves are added for one double parameter by
+ // MacroAssembler::passABIArg
+ MOZ_ASSERT(to.evenReg() == a2 && to.oddReg() == a3,
+ "Invalid emitDoubleMove arguments.");
+ masm.moveFromDoubleLo(from.floatReg(), a2);
+ masm.moveFromDoubleHi(from.floatReg(), a3);
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeDouble(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ MOZ_ASSERT(from.isMemory());
+ masm.loadDouble(getAdjustedAddress(from), to.floatReg());
+ } else if (to.isGeneralRegPair()) {
+ // Used for passing double parameter in a2,a3 register pair.
+ // Two moves are added for one double parameter by
+ // MacroAssembler::passABIArg
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.evenReg() == a2 && to.oddReg() == a3,
+ "Invalid emitDoubleMove arguments.");
+ masm.loadPtr(getAdjustedAddress(from), a2);
+ masm.loadPtr(Address(from.base(), getAdjustedOffset(from) + sizeof(uint32_t)), a3);
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ masm.loadDouble(getAdjustedAddress(from), ScratchDoubleReg);
+ masm.storeDouble(ScratchDoubleReg, getAdjustedAddress(to));
+ }
+}
diff --git a/js/src/jit/mips32/MoveEmitter-mips32.h b/js/src/jit/mips32/MoveEmitter-mips32.h
new file mode 100644
index 000000000..8d8d1c0c1
--- /dev/null
+++ b/js/src/jit/mips32/MoveEmitter-mips32.h
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_MoveEmitter_mips32_h
+#define jit_mips32_MoveEmitter_mips32_h
+
+#include "jit/mips-shared/MoveEmitter-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class MoveEmitterMIPS : public MoveEmitterMIPSShared
+{
+ void emitDoubleMove(const MoveOperand& from, const MoveOperand& to);
+ void breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+ void completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+
+ public:
+ MoveEmitterMIPS(MacroAssembler& masm)
+ : MoveEmitterMIPSShared(masm)
+ { }
+};
+
+typedef MoveEmitterMIPS MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_MoveEmitter_mips32_h */
diff --git a/js/src/jit/mips32/SharedIC-mips32.cpp b/js/src/jit/mips32/SharedIC-mips32.cpp
new file mode 100644
index 000000000..9a9c85ac8
--- /dev/null
+++ b/js/src/jit/mips32/SharedIC-mips32.cpp
@@ -0,0 +1,177 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jsiter.h"
+
+#include "jit/BaselineCompiler.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Linker.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jsboolinlines.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICBinaryArith_Int32
+
+bool
+ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // Add R0 and R1. Don't need to explicitly unbox, just use R2's payloadReg.
+ Register scratchReg = R2.payloadReg();
+
+ // DIV and MOD need an extra non-volatile ValueOperand to hold R0.
+ AllocatableGeneralRegisterSet savedRegs(availableGeneralRegs(2));
+ savedRegs.set() = GeneralRegisterSet::Intersect(GeneralRegisterSet::NonVolatile(), savedRegs.set());
+
+ Label goodMul, divTest1, divTest2;
+ switch(op_) {
+ case JSOP_ADD:
+ // We know R0.typeReg() already contains the integer tag. No boxing
+ // required.
+ masm.ma_addTestOverflow(scratchReg, R0.payloadReg(), R1.payloadReg(), &failure);
+ masm.move32(scratchReg, R0.payloadReg());
+ break;
+ case JSOP_SUB:
+ masm.ma_subTestOverflow(scratchReg, R0.payloadReg(), R1.payloadReg(), &failure);
+ masm.move32(scratchReg, R0.payloadReg());
+ break;
+ case JSOP_MUL: {
+ masm.ma_mul_branch_overflow(scratchReg, R0.payloadReg(), R1.payloadReg(), &failure);
+
+ masm.ma_b(scratchReg, Imm32(0), &goodMul, Assembler::NotEqual, ShortJump);
+
+ // Result is -0 if operands have different signs.
+ masm.as_xor(t8, R0.payloadReg(), R1.payloadReg());
+ masm.ma_b(t8, Imm32(0), &failure, Assembler::LessThan, ShortJump);
+
+ masm.bind(&goodMul);
+ masm.move32(scratchReg, R0.payloadReg());
+ break;
+ }
+ case JSOP_DIV:
+ case JSOP_MOD: {
+ // Check for INT_MIN / -1, it results in a double.
+ masm.ma_b(R0.payloadReg(), Imm32(INT_MIN), &divTest1, Assembler::NotEqual, ShortJump);
+ masm.ma_b(R1.payloadReg(), Imm32(-1), &failure, Assembler::Equal, ShortJump);
+ masm.bind(&divTest1);
+
+ // Check for division by zero
+ masm.ma_b(R1.payloadReg(), Imm32(0), &failure, Assembler::Equal, ShortJump);
+
+ // Check for 0 / X with X < 0 (results in -0).
+ masm.ma_b(R0.payloadReg(), Imm32(0), &divTest2, Assembler::NotEqual, ShortJump);
+ masm.ma_b(R1.payloadReg(), Imm32(0), &failure, Assembler::LessThan, ShortJump);
+ masm.bind(&divTest2);
+
+ masm.as_div(R0.payloadReg(), R1.payloadReg());
+
+ if (op_ == JSOP_DIV) {
+ // Result is a double if the remainder != 0.
+ masm.as_mfhi(scratchReg);
+ masm.ma_b(scratchReg, Imm32(0), &failure, Assembler::NotEqual, ShortJump);
+ masm.as_mflo(scratchReg);
+ masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
+ } else {
+ Label done;
+ // If X % Y == 0 and X < 0, the result is -0.
+ masm.as_mfhi(scratchReg);
+ masm.ma_b(scratchReg, Imm32(0), &done, Assembler::NotEqual, ShortJump);
+ masm.ma_b(R0.payloadReg(), Imm32(0), &failure, Assembler::LessThan, ShortJump);
+ masm.bind(&done);
+ masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
+ }
+ break;
+ }
+ case JSOP_BITOR:
+ masm.as_or(R0.payloadReg() , R0.payloadReg(), R1.payloadReg());
+ break;
+ case JSOP_BITXOR:
+ masm.as_xor(R0.payloadReg() , R0.payloadReg(), R1.payloadReg());
+ break;
+ case JSOP_BITAND:
+ masm.as_and(R0.payloadReg() , R0.payloadReg(), R1.payloadReg());
+ break;
+ case JSOP_LSH:
+ // MIPS will only use 5 lowest bits in R1 as shift offset.
+ masm.ma_sll(R0.payloadReg(), R0.payloadReg(), R1.payloadReg());
+ break;
+ case JSOP_RSH:
+ masm.ma_sra(R0.payloadReg(), R0.payloadReg(), R1.payloadReg());
+ break;
+ case JSOP_URSH:
+ masm.ma_srl(scratchReg, R0.payloadReg(), R1.payloadReg());
+ if (allowDouble_) {
+ Label toUint;
+ masm.ma_b(scratchReg, Imm32(0), &toUint, Assembler::LessThan, ShortJump);
+
+ // Move result and box for return.
+ masm.move32(scratchReg, R0.payloadReg());
+ EmitReturnFromIC(masm);
+
+ masm.bind(&toUint);
+ masm.convertUInt32ToDouble(scratchReg, FloatReg1);
+ masm.boxDouble(FloatReg1, R0);
+ } else {
+ masm.ma_b(scratchReg, Imm32(0), &failure, Assembler::LessThan, ShortJump);
+ // Move result for return.
+ masm.move32(scratchReg, R0.payloadReg());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unhandled op for BinaryArith_Int32.");
+ }
+
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ return true;
+}
+
+bool
+ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+
+ switch (op) {
+ case JSOP_BITNOT:
+ masm.not32(R0.payloadReg());
+ break;
+ case JSOP_NEG:
+ // Guard against 0 and MIN_INT, both result in a double.
+ masm.branchTest32(Assembler::Zero, R0.payloadReg(), Imm32(INT32_MAX), &failure);
+
+ masm.neg32(R0.payloadReg());
+ break;
+ default:
+ MOZ_CRASH("Unexpected op");
+ return false;
+ }
+
+ EmitReturnFromIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/mips32/SharedICRegisters-mips32.h b/js/src/jit/mips32/SharedICRegisters-mips32.h
new file mode 100644
index 000000000..78c124d90
--- /dev/null
+++ b/js/src/jit/mips32/SharedICRegisters-mips32.h
@@ -0,0 +1,44 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_SharedICRegisters_mips32_h
+#define jit_mips32_SharedICRegisters_mips32_h
+
+#include "jit/MacroAssembler.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register BaselineFrameReg = s5;
+static constexpr Register BaselineStackReg = sp;
+
+static constexpr ValueOperand R0(a3, a2);
+static constexpr ValueOperand R1(s7, s6);
+static constexpr ValueOperand R2(t7, t6);
+
+// ICTailCallReg and ICStubReg
+// These use registers that are not preserved across calls.
+static constexpr Register ICTailCallReg = ra;
+static constexpr Register ICStubReg = t5;
+
+static constexpr Register ExtractTemp0 = InvalidReg;
+static constexpr Register ExtractTemp1 = InvalidReg;
+
+// Register used internally by MacroAssemblerMIPS.
+static constexpr Register BaselineSecondScratchReg = SecondScratchReg;
+
+// Note that ICTailCallReg is actually just the link register.
+// In MIPS code emission, we do not clobber ICTailCallReg since we keep
+// the return address for calls there.
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = f0;
+static constexpr FloatRegister FloatReg1 = f2;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_SharedICRegisters_mips32_h */
diff --git a/js/src/jit/mips32/Simulator-mips32.cpp b/js/src/jit/mips32/Simulator-mips32.cpp
new file mode 100644
index 000000000..ae2e9d4f3
--- /dev/null
+++ b/js/src/jit/mips32/Simulator-mips32.cpp
@@ -0,0 +1,3519 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/mips32/Simulator-mips32.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/Likely.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <float.h>
+
+#include "jit/mips32/Assembler-mips32.h"
+#include "vm/Runtime.h"
+
+namespace js {
+namespace jit {
+
+static const Instr kCallRedirInstr = op_special | MAX_BREAK_CODE << FunctionBits | ff_break;
+
+// Utils functions.
+static bool
+HaveSameSign(int32_t a, int32_t b)
+{
+ return ((a ^ b) >= 0);
+}
+
+static uint32_t
+GetFCSRConditionBit(uint32_t cc)
+{
+ if (cc == 0) {
+ return 23;
+ } else {
+ return 24 + cc;
+ }
+}
+
+static const int32_t kRegisterskMaxValue = 0x7fffffff;
+static const int32_t kRegisterskMinValue = 0x80000000;
+
+// -----------------------------------------------------------------------------
+// MIPS assembly various constants.
+
+class SimInstruction
+{
+ public:
+ enum {
+ kInstrSize = 4,
+ // On MIPS PC cannot actually be directly accessed. We behave as if PC was
+ // always the value of the current instruction being executed.
+ kPCReadOffset = 0
+ };
+
+ // Get the raw instruction bits.
+ inline Instr instructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void setInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int bit(int nr) const {
+ return (instructionBits() >> nr) & 1;
+ }
+
+ // Read a bit field out of the instruction bits.
+ inline int bits(int hi, int lo) const {
+ return (instructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Instruction type.
+ enum Type {
+ kRegisterType,
+ kImmediateType,
+ kJumpType,
+ kUnsupported = -1
+ };
+
+ // Get the encoding type of the instruction.
+ Type instructionType() const;
+
+
+ // Accessors for the different named fields used in the MIPS encoding.
+ inline Opcode opcodeValue() const {
+ return static_cast<Opcode>(bits(OpcodeShift + OpcodeBits - 1, OpcodeShift));
+ }
+
+ inline int rsValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType);
+ return bits(RSShift + RSBits - 1, RSShift);
+ }
+
+ inline int rtValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType);
+ return bits(RTShift + RTBits - 1, RTShift);
+ }
+
+ inline int rdValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return bits(RDShift + RDBits - 1, RDShift);
+ }
+
+ inline int saValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return bits(SAShift + SABits - 1, SAShift);
+ }
+
+ inline int functionValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType);
+ return bits(FunctionShift + FunctionBits - 1, FunctionShift);
+ }
+
+ inline int fdValue() const {
+ return bits(FDShift + FDBits - 1, FDShift);
+ }
+
+ inline int fsValue() const {
+ return bits(FSShift + FSBits - 1, FSShift);
+ }
+
+ inline int ftValue() const {
+ return bits(FTShift + FTBits - 1, FTShift);
+ }
+
+ inline int frValue() const {
+ return bits(FRShift + FRBits - 1, FRShift);
+ }
+
+ // Float Compare condition code instruction bits.
+ inline int fcccValue() const {
+ return bits(FCccShift + FCccBits - 1, FCccShift);
+ }
+
+ // Float Branch condition code instruction bits.
+ inline int fbccValue() const {
+ return bits(FBccShift + FBccBits - 1, FBccShift);
+ }
+
+ // Float Branch true/false instruction bit.
+ inline int fbtrueValue() const {
+ return bits(FBtrueShift + FBtrueBits - 1, FBtrueShift);
+ }
+
+ // Return the fields at their original place in the instruction encoding.
+ inline Opcode opcodeFieldRaw() const {
+ return static_cast<Opcode>(instructionBits() & OpcodeMask);
+ }
+
+ inline int rsFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType);
+ return instructionBits() & RSMask;
+ }
+
+ // Same as above function, but safe to call within instructionType().
+ inline int rsFieldRawNoAssert() const {
+ return instructionBits() & RSMask;
+ }
+
+ inline int rtFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType);
+ return instructionBits() & RTMask;
+ }
+
+ inline int rdFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return instructionBits() & RDMask;
+ }
+
+ inline int saFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return instructionBits() & SAMask;
+ }
+
+ inline int functionFieldRaw() const {
+ return instructionBits() & FunctionMask;
+ }
+
+ // Get the secondary field according to the opcode.
+ inline int secondaryValue() const {
+ Opcode op = opcodeFieldRaw();
+ switch (op) {
+ case op_special:
+ case op_special2:
+ return functionValue();
+ case op_cop1:
+ return rsValue();
+ case op_regimm:
+ return rtValue();
+ default:
+ return ff_null;
+ }
+ }
+
+ inline int32_t imm16Value() const {
+ MOZ_ASSERT(instructionType() == kImmediateType);
+ return bits(Imm16Shift + Imm16Bits - 1, Imm16Shift);
+ }
+
+ inline int32_t imm26Value() const {
+ MOZ_ASSERT(instructionType() == kJumpType);
+ return bits(Imm26Shift + Imm26Bits - 1, Imm26Shift);
+ }
+
+ // Say if the instruction should not be used in a branch delay slot.
+ bool isForbiddenInBranchDelay() const;
+ // Say if the instruction 'links'. e.g. jal, bal.
+ bool isLinkingInstruction() const;
+ // Say if the instruction is a break or a trap.
+ bool isTrap() const;
+
+ private:
+
+ SimInstruction() = delete;
+ SimInstruction(const SimInstruction& other) = delete;
+ void operator=(const SimInstruction& other) = delete;
+};
+
+bool
+SimInstruction::isForbiddenInBranchDelay() const
+{
+ const int op = opcodeFieldRaw();
+ switch (op) {
+ case op_j:
+ case op_jal:
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ case op_beql:
+ case op_bnel:
+ case op_blezl:
+ case op_bgtzl:
+ return true;
+ case op_regimm:
+ switch (rtFieldRaw()) {
+ case rt_bltz:
+ case rt_bgez:
+ case rt_bltzal:
+ case rt_bgezal:
+ return true;
+ default:
+ return false;
+ };
+ break;
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ return true;
+ default:
+ return false;
+ };
+ break;
+ default:
+ return false;
+ }
+}
+
+bool
+SimInstruction::isLinkingInstruction() const
+{
+ const int op = opcodeFieldRaw();
+ switch (op) {
+ case op_jal:
+ return true;
+ case op_regimm:
+ switch (rtFieldRaw()) {
+ case rt_bgezal:
+ case rt_bltzal:
+ return true;
+ default:
+ return false;
+ };
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jalr:
+ return true;
+ default:
+ return false;
+ };
+ default:
+ return false;
+ };
+}
+
+bool
+SimInstruction::isTrap() const
+{
+ if (opcodeFieldRaw() != op_special) {
+ return false;
+ } else {
+ switch (functionFieldRaw()) {
+ case ff_break:
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ return true;
+ default:
+ return false;
+ };
+ }
+}
+
+SimInstruction::Type
+SimInstruction::instructionType() const
+{
+ switch (opcodeFieldRaw()) {
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ case ff_break:
+ case ff_sll:
+ case ff_srl:
+ case ff_sra:
+ case ff_sllv:
+ case ff_srlv:
+ case ff_srav:
+ case ff_mfhi:
+ case ff_mflo:
+ case ff_mult:
+ case ff_multu:
+ case ff_div:
+ case ff_divu:
+ case ff_add:
+ case ff_addu:
+ case ff_sub:
+ case ff_subu:
+ case ff_and:
+ case ff_or:
+ case ff_xor:
+ case ff_nor:
+ case ff_slt:
+ case ff_sltu:
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ case ff_movz:
+ case ff_movn:
+ case ff_movci:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_special2:
+ switch (functionFieldRaw()) {
+ case ff_mul:
+ case ff_clz:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_special3:
+ switch (functionFieldRaw()) {
+ case ff_ins:
+ case ff_ext:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_cop1: // Coprocessor instructions.
+ switch (rsFieldRawNoAssert()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ return kImmediateType;
+ default:
+ return kRegisterType;
+ };
+ break;
+ case op_cop1x:
+ return kRegisterType;
+ // 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
+ case op_regimm:
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ case op_addi:
+ case op_addiu:
+ case op_slti:
+ case op_sltiu:
+ case op_andi:
+ case op_ori:
+ case op_xori:
+ case op_lui:
+ case op_beql:
+ case op_bnel:
+ case op_blezl:
+ case op_bgtzl:
+ case op_lb:
+ case op_lh:
+ case op_lwl:
+ case op_lw:
+ case op_lbu:
+ case op_lhu:
+ case op_lwr:
+ case op_sb:
+ case op_sh:
+ case op_swl:
+ case op_sw:
+ case op_swr:
+ case op_lwc1:
+ case op_ldc1:
+ case op_swc1:
+ case op_sdc1:
+ return kImmediateType;
+ // 26 bits immediate type instructions. e.g.: j imm26.
+ case op_j:
+ case op_jal:
+ return kJumpType;
+ default:
+ return kUnsupported;
+ }
+ return kUnsupported;
+}
+
+// C/C++ argument slots size.
+const int kCArgSlotCount = 4;
+const int kCArgsSlotsSize = kCArgSlotCount * SimInstruction::kInstrSize;
+const int kBranchReturnOffset = 2 * SimInstruction::kInstrSize;
+
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() {
+ memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
+ }
+
+ char* validityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+
+ char* cachedData(int offset) {
+ return &data_[offset];
+ }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+// Protects the icache() and redirection() properties of the
+// Simulator.
+class AutoLockSimulatorCache : public LockGuard<Mutex>
+{
+ using Base = LockGuard<Mutex>;
+
+ public:
+ explicit AutoLockSimulatorCache(Simulator* sim)
+ : Base(sim->cacheLock_)
+ , sim_(sim)
+ {
+ MOZ_ASSERT(sim_->cacheLockHolder_.isNothing());
+#ifdef DEBUG
+ sim_->cacheLockHolder_ = mozilla::Some(ThisThread::GetId());
+#endif
+ }
+
+ ~AutoLockSimulatorCache() {
+ MOZ_ASSERT(sim_->cacheLockHolder_.isSome());
+#ifdef DEBUG
+ sim_->cacheLockHolder_.reset();
+#endif
+ }
+
+ private:
+ Simulator* const sim_;
+};
+
+bool Simulator::ICacheCheckingEnabled = false;
+
+int Simulator::StopSimAt = -1;
+
+Simulator*
+Simulator::Create(JSContext* cx)
+{
+ Simulator* sim = js_new<Simulator>();
+ if (!sim)
+ return nullptr;
+
+ if (!sim->init()) {
+ js_delete(sim);
+ return nullptr;
+ }
+
+ if (getenv("MIPS_SIM_ICACHE_CHECKS"))
+ Simulator::ICacheCheckingEnabled = true;
+
+ char* stopAtStr = getenv("MIPS_SIM_STOP_AT");
+ int64_t stopAt;
+ if (stopAtStr && sscanf(stopAtStr, "%lld", &stopAt) == 1) {
+ fprintf(stderr, "\nStopping simulation at icount %lld\n", stopAt);
+ Simulator::StopSimAt = stopAt;
+ }
+
+ return sim;
+}
+
+void
+Simulator::Destroy(Simulator* sim)
+{
+ js_delete(sim);
+}
+
+// The MipsDebugger class is used by the simulator while debugging simulated
+// code.
+class MipsDebugger
+{
+ public:
+ explicit MipsDebugger(Simulator* sim) : sim_(sim) { }
+
+ void stop(SimInstruction* instr);
+ void debug();
+ // Print all registers with a nice formatting.
+ void printAllRegs();
+ void printAllRegsIncludingFPU();
+
+ private:
+ // We set the breakpoint code to 0xfffff to easily recognize it.
+ static const Instr kBreakpointInstr = op_special | ff_break | 0xfffff << 6;
+ static const Instr kNopInstr = op_special | ff_sll;
+
+ Simulator* sim_;
+
+ int32_t getRegisterValue(int regnum);
+ int32_t getFPURegisterValueInt(int regnum);
+ int64_t getFPURegisterValueLong(int regnum);
+ float getFPURegisterValueFloat(int regnum);
+ double getFPURegisterValueDouble(int regnum);
+ bool getValue(const char* desc, int32_t* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool setBreakpoint(SimInstruction* breakpc);
+ bool deleteBreakpoint(SimInstruction* breakpc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void undoBreakpoints();
+ void redoBreakpoints();
+};
+
+static void
+UNSUPPORTED()
+{
+ printf("Unsupported instruction.\n");
+ MOZ_CRASH();
+}
+
+void
+MipsDebugger::stop(SimInstruction* instr)
+{
+ // Get the stop code.
+ uint32_t code = instr->bits(25, 6);
+ // Retrieve the encoded address, which comes just after this stop.
+ char* msg = *reinterpret_cast<char**>(sim_->get_pc() +
+ SimInstruction::kInstrSize);
+ // Update this stop description.
+ if (!sim_->watchedStops_[code].desc_) {
+ sim_->watchedStops_[code].desc_ = msg;
+ }
+ // Print the stop message and code if it is not the default code.
+ if (code != kMaxStopCode) {
+ printf("Simulator hit stop %u: %s\n", code, msg);
+ } else {
+ printf("Simulator hit %s\n", msg);
+ }
+ sim_->set_pc(sim_->get_pc() + 2 * SimInstruction::kInstrSize);
+ debug();
+}
+
+int32_t
+MipsDebugger::getRegisterValue(int regnum)
+{
+ if (regnum == kPCRegister)
+ return sim_->get_pc();
+ return sim_->getRegister(regnum);
+}
+
+int32_t MipsDebugger::getFPURegisterValueInt(int regnum)
+{
+ return sim_->getFpuRegister(regnum);
+}
+
+int64_t
+MipsDebugger::getFPURegisterValueLong(int regnum)
+{
+ return sim_->getFpuRegisterLong(regnum);
+}
+
+float
+MipsDebugger::getFPURegisterValueFloat(int regnum)
+{
+ return sim_->getFpuRegisterFloat(regnum);
+}
+
+double
+MipsDebugger::getFPURegisterValueDouble(int regnum)
+{
+ return sim_->getFpuRegisterDouble(regnum);
+}
+
+bool
+MipsDebugger::getValue(const char* desc, int32_t* value)
+{
+ Register reg = Register::FromName(desc);
+ if (reg != InvalidReg) {
+ *value = getRegisterValue(reg.code());
+ return true;
+ }
+
+ if (strncmp(desc, "0x", 2) == 0) {
+ return sscanf(desc, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
+ }
+ return sscanf(desc, "%i", value) == 1;
+}
+
+bool
+MipsDebugger::setBreakpoint(SimInstruction* breakpc)
+{
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_ != nullptr)
+ return false;
+
+ // Set the breakpoint.
+ sim_->break_pc_ = breakpc;
+ sim_->break_instr_ = breakpc->instructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+
+}
+
+bool
+MipsDebugger::deleteBreakpoint(SimInstruction* breakpc)
+{
+ if (sim_->break_pc_ != nullptr)
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+
+ sim_->break_pc_ = nullptr;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+void
+MipsDebugger::undoBreakpoints()
+{
+ if (sim_->break_pc_)
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+}
+
+void
+MipsDebugger::redoBreakpoints()
+{
+ if (sim_->break_pc_)
+ sim_->break_pc_->setInstructionBits(kBreakpointInstr);
+}
+
+void
+MipsDebugger::printAllRegs()
+{
+ int32_t value;
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ value = getRegisterValue(i);
+ printf("%3s: 0x%08x %10d ", Registers::GetName(i), value, value);
+
+ if (i % 2)
+ printf("\n");
+ }
+ printf("\n");
+
+ value = getRegisterValue(Simulator::LO);
+ printf(" LO: 0x%08x %10d ", value, value);
+ value = getRegisterValue(Simulator::HI);
+ printf(" HI: 0x%08x %10d\n", value, value);
+ value = getRegisterValue(Simulator::pc);
+ printf(" pc: 0x%08x\n", value);
+}
+
+void
+MipsDebugger::printAllRegsIncludingFPU()
+{
+ printAllRegs();
+
+ printf("\n\n");
+ // f0, f1, f2, ... f31.
+ for (uint32_t i = 0; i < FloatRegisters::RegisterIdLimit; i++) {
+ if (i & 0x1) {
+ printf("%3s: 0x%08x\tflt: %-8.4g\n",
+ FloatRegisters::GetName(i),
+ getFPURegisterValueInt(i),
+ getFPURegisterValueFloat(i));
+ } else {
+ printf("%3s: 0x%08x\tflt: %-8.4g\tdbl: %-16.4g\n",
+ FloatRegisters::GetName(i),
+ getFPURegisterValueInt(i),
+ getFPURegisterValueFloat(i),
+ getFPURegisterValueDouble(i));
+ }
+ }
+
+}
+
+static char*
+ReadLine(const char* prompt)
+{
+ char* result = nullptr;
+ char lineBuf[256];
+ int offset = 0;
+ bool keepGoing = true;
+ fprintf(stdout, "%s", prompt);
+ fflush(stdout);
+ while (keepGoing) {
+ if (fgets(lineBuf, sizeof(lineBuf), stdin) == nullptr) {
+ // fgets got an error. Just give up.
+ if (result)
+ js_delete(result);
+ return nullptr;
+ }
+ int len = strlen(lineBuf);
+ if (len > 0 && lineBuf[len - 1] == '\n') {
+ // Since we read a new line we are done reading the line. This
+ // will exit the loop after copying this buffer into the result.
+ keepGoing = false;
+ }
+ if (!result) {
+ // Allocate the initial result and make room for the terminating '\0'
+ result = (char*)js_malloc(len + 1);
+ if (!result)
+ return nullptr;
+ } else {
+ // Allocate a new result with enough room for the new addition.
+ int new_len = offset + len + 1;
+ char* new_result = (char*)js_malloc(new_len);
+ if (!new_result)
+ return nullptr;
+ // Copy the existing input into the new array and set the new
+ // array as the result.
+ memcpy(new_result, result, offset * sizeof(char));
+ js_free(result);
+ result = new_result;
+ }
+ // Copy the newly read line into the result.
+ memcpy(result + offset, lineBuf, len * sizeof(char));
+ offset += len;
+ }
+
+ MOZ_ASSERT(result);
+ result[offset] = '\0';
+ return result;
+}
+
+static void
+DisassembleInstruction(uint32_t pc)
+{
+ uint8_t* bytes = reinterpret_cast<uint8_t*>(pc);
+ char hexbytes[256];
+ sprintf(hexbytes, "0x%x 0x%x 0x%x 0x%x", bytes[0], bytes[1], bytes[2], bytes[3]);
+ char llvmcmd[1024];
+ sprintf(llvmcmd, "bash -c \"echo -n '%p'; echo '%s' | "
+ "llvm-mc -disassemble -arch=mipsel -mcpu=mips32r2 | "
+ "grep -v pure_instructions | grep -v .text\"", static_cast<void*>(bytes), hexbytes);
+ if (system(llvmcmd))
+ printf("Cannot disassemble instruction.\n");
+}
+
+void
+MipsDebugger::debug()
+{
+ intptr_t lastPC = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = { cmd, arg1, arg2 };
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ undoBreakpoints();
+
+ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+ if (lastPC != sim_->get_pc()) {
+ DisassembleInstruction(sim_->get_pc());
+ lastPC = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == nullptr) {
+ break;
+ } else {
+ char* last_input = sim_->lastDebuggerInput();
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->setLastDebuggerInput(line);
+ }
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = sscanf(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ SimInstruction* instr = reinterpret_cast<SimInstruction*>(sim_->get_pc());
+ if (!(instr->isTrap()) ||
+ instr->instructionBits() == kCallRedirInstr) {
+ sim_->instructionDecode(
+ reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ } else {
+ // Allow si to jump over generated breakpoints.
+ printf("/!\\ Jumping over generated breakpoint.\n");
+ sim_->set_pc(sim_->get_pc() + SimInstruction::kInstrSize);
+ }
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->instructionDecode(reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2) {
+ int32_t value;
+ if (strcmp(arg1, "all") == 0) {
+ printAllRegs();
+ } else if (strcmp(arg1, "allf") == 0) {
+ printAllRegsIncludingFPU();
+ } else {
+ Register reg = Register::FromName(arg1);
+ FloatRegisters::Code fCode = FloatRegister::FromName(arg1);
+ if (reg != InvalidReg) {
+ value = getRegisterValue(reg.code());
+ printf("%s: 0x%08x %d \n", arg1, value, value);
+ } else if (fCode != FloatRegisters::Invalid) {
+ if (fCode & 0x1) {
+ printf("%3s: 0x%08x\tflt: %-8.4g\n",
+ FloatRegisters::GetName(fCode),
+ getFPURegisterValueInt(fCode),
+ getFPURegisterValueFloat(fCode));
+ } else {
+ printf("%3s: 0x%08x\tflt: %-8.4g\tdbl: %-16.4g\n",
+ FloatRegisters::GetName(fCode),
+ getFPURegisterValueInt(fCode),
+ getFPURegisterValueFloat(fCode),
+ getFPURegisterValueDouble(fCode));
+ }
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ printf("print <register> or print <fpu register> single\n");
+ }
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int32_t* cur = nullptr;
+ int32_t* end = nullptr;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int32_t*>(sim_->getRegister(Simulator::sp));
+ } else { // Command "mem".
+ int32_t value;
+ if (!getValue(arg1, &value)) {
+ printf("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int32_t*>(value);
+ next_arg++;
+ }
+
+ int32_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!getValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ printf(" %p: 0x%08x %10d", cur, *cur, *cur);
+ printf("\n");
+ cur++;
+ }
+
+ } else if ((strcmp(cmd, "disasm") == 0) ||
+ (strcmp(cmd, "dpc") == 0) ||
+ (strcmp(cmd, "di") == 0)) {
+ uint8_t* cur = nullptr;
+ uint8_t* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ end = cur + (10 * SimInstruction::kInstrSize);
+ } else if (argc == 2) {
+ Register reg = Register::FromName(arg1);
+ if (reg != InvalidReg || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int32_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * SimInstruction::kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ int32_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * SimInstruction::kInstrSize);
+ }
+ }
+ } else {
+ int32_t value1;
+ int32_t value2;
+ if (getValue(arg1, &value1) && getValue(arg2, &value2)) {
+ cur = reinterpret_cast<uint8_t*>(value1);
+ end = cur + (value2 * SimInstruction::kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ DisassembleInstruction(uint32_t(cur));
+ cur += SimInstruction::kInstrSize;
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ printf("relinquishing control to gdb\n");
+ asm("int $3");
+ printf("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (argc == 2) {
+ int32_t value;
+ if (getValue(arg1, &value)) {
+ if (!setBreakpoint(reinterpret_cast<SimInstruction*>(value)))
+ printf("setting breakpoint failed\n");
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ } else {
+ printf("break <address>\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!deleteBreakpoint(nullptr)) {
+ printf("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ printf("No flags on MIPS !\n");
+ } else if (strcmp(cmd, "stop") == 0) {
+ int32_t value;
+ intptr_t stop_pc = sim_->get_pc() -
+ 2 * SimInstruction::kInstrSize;
+ SimInstruction* stop_instr = reinterpret_cast<SimInstruction*>(stop_pc);
+ SimInstruction* msg_address =
+ reinterpret_cast<SimInstruction*>(stop_pc +
+ SimInstruction::kInstrSize);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->isStopInstruction(stop_instr)) {
+ stop_instr->setInstructionBits(kNopInstr);
+ msg_address->setInstructionBits(kNopInstr);
+ } else {
+ printf("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ printf("Stop information:\n");
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->printStopInfo(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->printStopInfo(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->enableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->enableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->disableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->disableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ printf("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ printf("cont\n");
+ printf(" continue execution (alias 'c')\n");
+ printf("stepi\n");
+ printf(" step one instruction (alias 'si')\n");
+ printf("print <register>\n");
+ printf(" print register content (alias 'p')\n");
+ printf(" use register name 'all' to print all registers\n");
+ printf("printobject <register>\n");
+ printf(" print an object from a register (alias 'po')\n");
+ printf("stack [<words>]\n");
+ printf(" dump stack content, default dump 10 words)\n");
+ printf("mem <address> [<words>]\n");
+ printf(" dump memory content, default dump 10 words)\n");
+ printf("flags\n");
+ printf(" print flags\n");
+ printf("disasm [<instructions>]\n");
+ printf("disasm [<address/register>]\n");
+ printf("disasm [[<address/register>] <instructions>]\n");
+ printf(" disassemble code, default is 10 instructions\n");
+ printf(" from pc (alias 'di')\n");
+ printf("gdb\n");
+ printf(" enter gdb\n");
+ printf("break <address>\n");
+ printf(" set a break point on the address\n");
+ printf("del\n");
+ printf(" delete the breakpoint\n");
+ printf("stop feature:\n");
+ printf(" Description:\n");
+ printf(" Stops are debug instructions inserted by\n");
+ printf(" the Assembler::stop() function.\n");
+ printf(" When hitting a stop, the Simulator will\n");
+ printf(" stop and and give control to the Debugger.\n");
+ printf(" All stop codes are watched:\n");
+ printf(" - They can be enabled / disabled: the Simulator\n");
+ printf(" will / won't stop when hitting them.\n");
+ printf(" - The Simulator keeps track of how many times they \n");
+ printf(" are met. (See the info command.) Going over a\n");
+ printf(" disabled stop still increases its counter. \n");
+ printf(" Commands:\n");
+ printf(" stop info all/<code> : print infos about number <code>\n");
+ printf(" or all stop(s).\n");
+ printf(" stop enable/disable all/<code> : enables / disables\n");
+ printf(" all or number <code> stop(s)\n");
+ printf(" stop unstop\n");
+ printf(" ignore the stop instruction at the current location\n");
+ printf(" from now on\n");
+ } else {
+ printf("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ redoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+static bool
+AllOnOnePage(uintptr_t start, int size)
+{
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+void
+Simulator::setLastDebuggerInput(char* input)
+{
+ js_free(lastDebuggerInput_);
+ lastDebuggerInput_ = input;
+}
+
+static CachePage*
+GetCachePageLocked(Simulator::ICacheMap& i_cache, void* page)
+{
+ Simulator::ICacheMap::AddPtr p = i_cache.lookupForAdd(page);
+ if (p)
+ return p->value();
+
+ CachePage* new_page = js_new<CachePage>();
+ if (!i_cache.add(p, page, new_page))
+ return nullptr;
+ return new_page;
+}
+
+// Flush from start up to and not including start + size.
+static void
+FlushOnePageLocked(Simulator::ICacheMap& i_cache, intptr_t start, int size)
+{
+ MOZ_ASSERT(size <= CachePage::kPageSize);
+ MOZ_ASSERT(AllOnOnePage(start, size - 1));
+ MOZ_ASSERT((start & CachePage::kLineMask) == 0);
+ MOZ_ASSERT((size & CachePage::kLineMask) == 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(i_cache, page);
+ char* valid_bytemap = cache_page->validityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+static void
+FlushICacheLocked(Simulator::ICacheMap& i_cache, void* start_addr, size_t size)
+{
+ intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+ int intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePageLocked(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ MOZ_ASSERT((start & CachePage::kPageMask) == 0);
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePageLocked(i_cache, start, size);
+ }
+}
+
+static void
+CheckICacheLocked(Simulator::ICacheMap& i_cache, SimInstruction* instr)
+{
+ intptr_t address = reinterpret_cast<intptr_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(i_cache, page);
+ char* cache_valid_byte = cache_page->validityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->cachedData(offset & ~CachePage::kLineMask);
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ MOZ_ASSERT(memcmp(reinterpret_cast<void*>(instr),
+ cache_page->cachedData(offset),
+ SimInstruction::kInstrSize) == 0);
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+HashNumber
+Simulator::ICacheHasher::hash(const Lookup& l)
+{
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(l)) >> 2;
+}
+
+bool
+Simulator::ICacheHasher::match(const Key& k, const Lookup& l)
+{
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(k) & CachePage::kPageMask) == 0);
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(l) & CachePage::kPageMask) == 0);
+ return k == l;
+}
+
+void
+Simulator::FlushICache(void* start_addr, size_t size)
+{
+ if (Simulator::ICacheCheckingEnabled) {
+ Simulator* sim = Simulator::Current();
+ AutoLockSimulatorCache als(sim);
+ js::jit::FlushICacheLocked(sim->icache(), start_addr, size);
+ }
+}
+
+Simulator::Simulator()
+ : cacheLock_(mutexid::SimulatorCacheLock)
+{
+ // Set up simulator support first. Some of this information is needed to
+ // setup the architecture state.
+
+ // Note, allocation and anything that depends on allocated memory is
+ // deferred until init(), in order to handle OOM properly.
+
+ stack_ = nullptr;
+ stackLimit_ = 0;
+ pc_modified_ = false;
+ icount_ = 0;
+ break_count_ = 0;
+ resume_pc_ = 0;
+ break_pc_ = nullptr;
+ break_instr_ = 0;
+
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < Register::kNumSimuRegisters; i++) {
+ registers_[i] = 0;
+ }
+ for (int i = 0; i < Simulator::FPURegister::kNumFPURegisters; i++) {
+ FPUregisters_[i] = 0;
+ }
+ FCSR_ = 0;
+
+ // The ra and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_ra;
+ registers_[ra] = bad_ra;
+
+ for (int i = 0; i < kNumExceptions; i++)
+ exceptions[i] = 0;
+
+ lastDebuggerInput_ = nullptr;
+
+ redirection_ = nullptr;
+}
+
+bool
+Simulator::init()
+{
+ if (!icache_.init())
+ return false;
+
+ // Allocate 2MB for the stack. Note that we will only use 1MB, see below.
+ static const size_t stackSize = 2 * 1024 * 1024;
+ stack_ = static_cast<char*>(js_malloc(stackSize));
+ if (!stack_)
+ return false;
+
+ // Leave a safety margin of 1MB to prevent overrunning the stack when
+ // pushing values (total stack size is 2MB).
+ stackLimit_ = reinterpret_cast<uintptr_t>(stack_) + 1024 * 1024;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int32_t>(stack_) + stackSize - 64;
+
+ return true;
+}
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a swi (software-interrupt) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the swi instruction so the simulator knows what to call.
+class Redirection
+{
+ friend class Simulator;
+
+ // sim's lock must already be held.
+ Redirection(void* nativeFunction, ABIFunctionType type, Simulator* sim)
+ : nativeFunction_(nativeFunction),
+ swiInstruction_(kCallRedirInstr),
+ type_(type),
+ next_(nullptr)
+ {
+ next_ = sim->redirection();
+ if (Simulator::ICacheCheckingEnabled)
+ FlushICacheLocked(sim->icache(), addressOfSwiInstruction(), SimInstruction::kInstrSize);
+ sim->setRedirection(this);
+ }
+
+ public:
+ void* addressOfSwiInstruction() { return &swiInstruction_; }
+ void* nativeFunction() const { return nativeFunction_; }
+ ABIFunctionType type() const { return type_; }
+
+ static Redirection* Get(void* nativeFunction, ABIFunctionType type) {
+ Simulator* sim = Simulator::Current();
+
+ AutoLockSimulatorCache als(sim);
+
+ Redirection* current = sim->redirection();
+ for (; current != nullptr; current = current->next_) {
+ if (current->nativeFunction_ == nativeFunction) {
+ MOZ_ASSERT(current->type() == type);
+ return current;
+ }
+ }
+
+ Redirection* redir = (Redirection*)js_malloc(sizeof(Redirection));
+ if (!redir) {
+ MOZ_ReportAssertionFailure("[unhandlable oom] Simulator redirection",
+ __FILE__, __LINE__);
+ MOZ_CRASH();
+ }
+ new(redir) Redirection(nativeFunction, type, sim);
+ return redir;
+ }
+
+ static Redirection* FromSwiInstruction(SimInstruction* swiInstruction) {
+ uint8_t* addrOfSwi = reinterpret_cast<uint8_t*>(swiInstruction);
+ uint8_t* addrOfRedirection = addrOfSwi - offsetof(Redirection, swiInstruction_);
+ return reinterpret_cast<Redirection*>(addrOfRedirection);
+ }
+
+ private:
+ void* nativeFunction_;
+ uint32_t swiInstruction_;
+ ABIFunctionType type_;
+ Redirection* next_;
+};
+
+Simulator::~Simulator()
+{
+ js_free(stack_);
+ Redirection* r = redirection_;
+ while (r) {
+ Redirection* next = r->next_;
+ js_delete(r);
+ r = next;
+ }
+}
+
+/* static */ void*
+Simulator::RedirectNativeFunction(void* nativeFunction, ABIFunctionType type)
+{
+ Redirection* redirection = Redirection::Get(nativeFunction, type);
+ return redirection->addressOfSwiInstruction();
+}
+
+// Get the active Simulator for the current thread.
+Simulator*
+Simulator::Current()
+{
+ return TlsPerThreadData.get()->simulator();
+}
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::setRegister(int reg, int32_t value)
+{
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters));
+ if (reg == pc) {
+ pc_modified_ = true;
+ }
+
+ // Zero register always holds 0.
+ registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+void
+Simulator::setFpuRegister(int fpureg, int32_t value)
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ FPUregisters_[fpureg] = value;
+}
+
+void
+Simulator::setFpuRegisterFloat(int fpureg, float value)
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<float*>(&FPUregisters_[fpureg]) = value;
+}
+
+void
+Simulator::setFpuRegisterFloat(int fpureg, int64_t value)
+{
+ setFpuRegister(fpureg, value & 0xffffffff);
+ setFpuRegister(fpureg + 1, value >> 32);
+}
+
+void
+Simulator::setFpuRegisterDouble(int fpureg, double value)
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters)
+ && ((fpureg % 2) == 0));
+ *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]) = value;
+}
+
+void
+Simulator::setFpuRegisterDouble(int fpureg, int64_t value)
+{
+ setFpuRegister(fpureg, value & 0xffffffff);
+ setFpuRegister(fpureg + 1, value >> 32);
+}
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int32_t
+Simulator::getRegister(int reg) const
+{
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters));
+ if (reg == 0)
+ return 0;
+ return registers_[reg] + ((reg == pc) ? SimInstruction::kPCReadOffset : 0);
+}
+
+double
+Simulator::getDoubleFromRegisterPair(int reg)
+{
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters) && ((reg % 2) == 0));
+
+ double dm_val = 0.0;
+ // Read the bits from the unsigned integer register_[] array
+ // into the double precision floating point value and return it.
+ memcpy(&dm_val, &registers_[reg], sizeof(dm_val));
+ return(dm_val);
+}
+
+int32_t
+Simulator::getFpuRegister(int fpureg) const
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return FPUregisters_[fpureg];
+}
+
+int64_t
+Simulator::getFpuRegisterLong(int fpureg) const
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters)
+ && ((fpureg % 2) == 0));
+ return *mozilla::BitwiseCast<int64_t*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+float
+Simulator::getFpuRegisterFloat(int fpureg) const
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<float*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+double
+Simulator::getFpuRegisterDouble(int fpureg) const
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters)
+ && ((fpureg % 2) == 0));
+ return *mozilla::BitwiseCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+// Runtime FP routines take up to two double arguments and zero
+// or one integer arguments. All are constructed here,
+// from a0-a3 or f12 and f14.
+void
+Simulator::getFpArgs(double* x, double* y, int32_t* z)
+{
+ *x = getFpuRegisterDouble(12);
+ *y = getFpuRegisterDouble(14);
+ *z = getRegister(a2);
+}
+
+void
+Simulator::getFpFromStack(int32_t* stack, double* x)
+{
+ MOZ_ASSERT(stack);
+ MOZ_ASSERT(x);
+ memcpy(x, stack, sizeof(double));
+}
+
+void
+Simulator::setCallResultDouble(double result)
+{
+ setFpuRegisterDouble(f0, result);
+}
+
+void
+Simulator::setCallResultFloat(float result)
+{
+ setFpuRegisterFloat(f0, result);
+}
+
+void
+Simulator::setCallResult(int64_t res)
+{
+ setRegister(v0, static_cast<int32_t>(res));
+ setRegister(v1, static_cast<int32_t>(res >> 32));
+}
+
+// Helper functions for setting and testing the FCSR register's bits.
+void
+Simulator::setFCSRBit(uint32_t cc, bool value)
+{
+ if (value)
+ FCSR_ |= (1 << cc);
+ else
+ FCSR_ &= ~(1 << cc);
+}
+
+bool
+Simulator::testFCSRBit(uint32_t cc)
+{
+ return FCSR_ & (1 << cc);
+}
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool
+Simulator::setFCSRRoundError(double original, double rounded)
+{
+ bool ret = false;
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ setFCSRBit(kFCSRInexactFlagBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ setFCSRBit(kFCSRUnderflowFlagBit, true);
+ ret = true;
+ }
+
+ if (rounded > INT_MAX || rounded < INT_MIN) {
+ setFCSRBit(kFCSROverflowFlagBit, true);
+ // The reference is not really clear but it seems this is required:
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+// Raw access to the PC register.
+void
+Simulator::set_pc(int32_t value)
+{
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+bool
+Simulator::has_bad_pc() const
+{
+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int32_t
+Simulator::get_pc() const
+{
+ return registers_[pc];
+}
+
+// The MIPS cannot do unaligned reads and writes. On some MIPS platforms an
+// interrupt is caused. On others it does a funky rotation thing. For now we
+// simply disallow unaligned reads, but at some point we may want to move to
+// emulating the rotate behaviour. Note that simulator runs have the runtime
+// system running directly on the host system and only generated code is
+// executed in the simulator. Since the host is typically IA32 we will not
+// get the correct MIPS-like behaviour on unaligned accesses.
+
+int
+Simulator::readW(uint32_t addr, SimInstruction* instr)
+{
+ if (addr < 0x400) {
+ // This has to be a NULL-dereference, drop into debugger.
+ printf("Memory read from bad address: 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ }
+ if ((addr & kPointerAlignmentMask) == 0) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned read at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void
+Simulator::writeW(uint32_t addr, int value, SimInstruction* instr)
+{
+ if (addr < 0x400) {
+ // This has to be a NULL-dereference, drop into debugger.
+ printf("Memory write to bad address: 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ }
+ if ((addr & kPointerAlignmentMask) == 0) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned write at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+double
+Simulator::readD(uint32_t addr, SimInstruction* instr)
+{
+ if ((addr & kDoubleAlignmentMask) == 0) {
+ double* ptr = reinterpret_cast<double*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned (double) read at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void
+Simulator::writeD(uint32_t addr, double value, SimInstruction* instr)
+{
+ if ((addr & kDoubleAlignmentMask) == 0) {
+ double* ptr = reinterpret_cast<double*>(addr);
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned (double) write at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+uint16_t
+Simulator::readHU(uint32_t addr, SimInstruction* instr)
+{
+ if ((addr & 1) == 0) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int16_t
+Simulator::readH(uint32_t addr, SimInstruction* instr)
+{
+ if ((addr & 1) == 0) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned signed halfword read at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void
+Simulator::writeH(uint32_t addr, uint16_t value, SimInstruction* instr)
+{
+ if ((addr & 1) == 0) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+void
+Simulator::writeH(uint32_t addr, int16_t value, SimInstruction* instr)
+{
+ if ((addr & 1) == 0) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned halfword write at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+uint32_t
+Simulator::readBU(uint32_t addr)
+{
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ return *ptr;
+}
+
+int32_t
+Simulator::readB(uint32_t addr)
+{
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ return *ptr;
+}
+
+void
+Simulator::writeB(uint32_t addr, uint8_t value)
+{
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ *ptr = value;
+}
+
+void
+Simulator::writeB(uint32_t addr, int8_t value)
+{
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ *ptr = value;
+}
+
+uintptr_t
+Simulator::stackLimit() const
+{
+ return stackLimit_;
+}
+
+uintptr_t*
+Simulator::addressOfStackLimit()
+{
+ return &stackLimit_;
+}
+
+bool
+Simulator::overRecursed(uintptr_t newsp) const
+{
+ if (newsp == 0)
+ newsp = getRegister(sp);
+ return newsp <= stackLimit();
+}
+
+bool
+Simulator::overRecursedWithExtra(uint32_t extra) const
+{
+ uintptr_t newsp = getRegister(sp) - extra;
+ return newsp <= stackLimit();
+}
+
+// Unsupported instructions use format to print an error and stop execution.
+void
+Simulator::format(SimInstruction* instr, const char* format)
+{
+ printf("Simulator found unsupported instruction:\n 0x%08" PRIxPTR ": %s\n",
+ reinterpret_cast<intptr_t>(instr), format);
+ MOZ_CRASH();
+}
+
+// Note: With the code below we assume that all runtime calls return a 64 bits
+// result. If they don't, the v1 result register contains a bogus value, which
+// is fine because it is caller-saved.
+typedef int64_t (*Prototype_General0)();
+typedef int64_t (*Prototype_General1)(int32_t arg0);
+typedef int64_t (*Prototype_General2)(int32_t arg0, int32_t arg1);
+typedef int64_t (*Prototype_General3)(int32_t arg0, int32_t arg1, int32_t arg2);
+typedef int64_t (*Prototype_General4)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3);
+typedef int64_t (*Prototype_General5)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3,
+ int32_t arg4);
+typedef int64_t (*Prototype_General6)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3,
+ int32_t arg4, int32_t arg5);
+typedef int64_t (*Prototype_General7)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3,
+ int32_t arg4, int32_t arg5, int32_t arg6);
+typedef int64_t (*Prototype_General8)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3,
+ int32_t arg4, int32_t arg5, int32_t arg6, int32_t arg7);
+
+typedef double (*Prototype_Double_None)();
+typedef double (*Prototype_Double_Double)(double arg0);
+typedef double (*Prototype_Double_Int)(int32_t arg0);
+typedef int32_t (*Prototype_Int_Double)(double arg0);
+typedef int64_t (*Prototype_Int64_Double)(double arg0);
+typedef int32_t (*Prototype_Int_DoubleIntInt)(double arg0, int32_t arg1, int32_t arg2);
+typedef int32_t (*Prototype_Int_IntDoubleIntInt)(int32_t arg0, double arg1, int32_t arg2,
+ int32_t arg3);
+typedef float (*Prototype_Float32_Float32)(float arg0);
+
+typedef double (*Prototype_DoubleInt)(double arg0, int32_t arg1);
+typedef double (*Prototype_Double_IntInt)(int32_t arg0, int32_t arg1);
+typedef double (*Prototype_Double_IntDouble)(int32_t arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1);
+typedef int32_t (*Prototype_Int_IntDouble)(int32_t arg0, double arg1);
+
+typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1, double arg2);
+typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0, double arg1,
+ double arg2, double arg3);
+
+// Software interrupt instructions are used by the simulator to call into C++.
+void
+Simulator::softwareInterrupt(SimInstruction* instr)
+{
+ int32_t func = instr->functionFieldRaw();
+ uint32_t code = (func == ff_break) ? instr->bits(25, 6) : -1;
+
+ // We first check if we met a call_rt_redirected.
+ if (instr->instructionBits() == kCallRedirInstr) {
+#if !defined(USES_O32_ABI)
+ MOZ_CRASH("Only O32 ABI supported.");
+#else
+ Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ int32_t arg0 = getRegister(a0);
+ int32_t arg1 = getRegister(a1);
+ int32_t arg2 = getRegister(a2);
+ int32_t arg3 = getRegister(a3);
+
+ int32_t* stack_pointer = reinterpret_cast<int32_t*>(getRegister(sp));
+ // Args 4 and 5 are on the stack after the reserved space for args 0..3.
+ int32_t arg4 = stack_pointer[4];
+ int32_t arg5 = stack_pointer[5];
+
+ // This is dodgy but it works because the C entry stubs are never moved.
+ // See comment in codegen-arm.cc and bug 1242173.
+ int32_t saved_ra = getRegister(ra);
+
+ intptr_t external = reinterpret_cast<intptr_t>(redirection->nativeFunction());
+
+ bool stack_aligned = (getRegister(sp) & (ABIStackAlignment - 1)) == 0;
+ if (!stack_aligned) {
+ fprintf(stderr, "Runtime call with unaligned stack!\n");
+ MOZ_CRASH();
+ }
+
+ switch (redirection->type()) {
+ case Args_General0: {
+ Prototype_General0 target = reinterpret_cast<Prototype_General0>(external);
+ int64_t result = target();
+ setCallResult(result);
+ break;
+ }
+ case Args_General1: {
+ Prototype_General1 target = reinterpret_cast<Prototype_General1>(external);
+ int64_t result = target(arg0);
+ setCallResult(result);
+ break;
+ }
+ case Args_General2: {
+ Prototype_General2 target = reinterpret_cast<Prototype_General2>(external);
+ int64_t result = target(arg0, arg1);
+ setCallResult(result);
+ break;
+ }
+ case Args_General3: {
+ Prototype_General3 target = reinterpret_cast<Prototype_General3>(external);
+ int64_t result = target(arg0, arg1, arg2);
+ setCallResult(result);
+ break;
+ }
+ case Args_General4: {
+ Prototype_General4 target = reinterpret_cast<Prototype_General4>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ setCallResult(result);
+ break;
+ }
+ case Args_General5: {
+ Prototype_General5 target = reinterpret_cast<Prototype_General5>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4);
+ setCallResult(result);
+ break;
+ }
+ case Args_General6: {
+ Prototype_General6 target = reinterpret_cast<Prototype_General6>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ setCallResult(result);
+ break;
+ }
+ case Args_General7: {
+ Prototype_General7 target = reinterpret_cast<Prototype_General7>(external);
+ int32_t arg6 = stack_pointer[6];
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6);
+ setCallResult(result);
+ break;
+ }
+ case Args_General8: {
+ Prototype_General8 target = reinterpret_cast<Prototype_General8>(external);
+ int32_t arg6 = stack_pointer[6];
+ int32_t arg7 = stack_pointer[7];
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+ setCallResult(result);
+ break;
+ }
+ case Args_Double_None: {
+ Prototype_Double_None target = reinterpret_cast<Prototype_Double_None>(external);
+ double dresult = target();
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_Double: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Int_Double target = reinterpret_cast<Prototype_Int_Double>(external);
+ int32_t res = target(dval0);
+ setRegister(v0, res);
+ break;
+ }
+ case Args_Int64_Double: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Int64_Double target = reinterpret_cast<Prototype_Int64_Double>(external);
+ int64_t result = target(dval0);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int_DoubleIntInt: {
+ double dval = getFpuRegisterDouble(12);
+ Prototype_Int_DoubleIntInt target = reinterpret_cast<Prototype_Int_DoubleIntInt>(external);
+ int32_t res = target(dval, arg2, arg3);
+ setRegister(v0, res);
+ break;
+ }
+ case Args_Int_IntDoubleIntInt: {
+ double dval = getDoubleFromRegisterPair(a2);
+ Prototype_Int_IntDoubleIntInt target = reinterpret_cast<Prototype_Int_IntDoubleIntInt>(external);
+ int32_t res = target(arg0, dval, arg4, arg5);
+ setRegister(v0, res);
+ break;
+ }
+ case Args_Double_Double: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Double_Double target = reinterpret_cast<Prototype_Double_Double>(external);
+ double dresult = target(dval0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Float32_Float32: {
+ float fval0;
+ fval0 = getFpuRegisterFloat(12);
+ Prototype_Float32_Float32 target = reinterpret_cast<Prototype_Float32_Float32>(external);
+ float fresult = target(fval0);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Double_Int: {
+ Prototype_Double_Int target = reinterpret_cast<Prototype_Double_Int>(external);
+ double dresult = target(arg0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_IntInt: {
+ Prototype_Double_IntInt target = reinterpret_cast<Prototype_Double_IntInt>(external);
+ double dresult = target(arg0, arg1);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleInt: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_DoubleInt target = reinterpret_cast<Prototype_DoubleInt>(external);
+ double dresult = target(dval0, ival);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDouble: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Double_DoubleDouble target = reinterpret_cast<Prototype_Double_DoubleDouble>(external);
+ double dresult = target(dval0, dval1);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_IntDouble: {
+ int32_t ival = getRegister(a0);
+ double dval0 = getDoubleFromRegisterPair(a2);
+ Prototype_Double_IntDouble target = reinterpret_cast<Prototype_Double_IntDouble>(external);
+ double dresult = target(ival, dval0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_IntDouble: {
+ int32_t ival = getRegister(a0);
+ double dval0 = getDoubleFromRegisterPair(a2);
+ Prototype_Int_IntDouble target = reinterpret_cast<Prototype_Int_IntDouble>(external);
+ int32_t result = target(ival, dval0);
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Double_DoubleDoubleDouble: {
+ double dval0, dval1, dval2;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ // the last argument is on stack
+ getFpFromStack(stack_pointer + 4, &dval2);
+ Prototype_Double_DoubleDoubleDouble target = reinterpret_cast<Prototype_Double_DoubleDoubleDouble>(external);
+ double dresult = target(dval0, dval1, dval2);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDoubleDoubleDouble: {
+ double dval0, dval1, dval2, dval3;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ // the two last arguments are on stack
+ getFpFromStack(stack_pointer + 4, &dval2);
+ getFpFromStack(stack_pointer + 6, &dval3);
+ Prototype_Double_DoubleDoubleDoubleDouble target = reinterpret_cast<Prototype_Double_DoubleDoubleDoubleDouble>(external);
+ double dresult = target(dval0, dval1, dval2, dval3);
+ setCallResultDouble(dresult);
+ break;
+ }
+ default:
+ MOZ_CRASH("call");
+ }
+
+ setRegister(ra, saved_ra);
+ set_pc(getRegister(ra));
+#endif
+ } else if (func == ff_break && code <= kMaxStopCode) {
+ if (isWatchpoint(code)) {
+ printWatchpoint(code);
+ } else {
+ increaseStopCounter(code);
+ handleStop(code, instr);
+ }
+ } else {
+ // All remaining break_ codes, and all traps are handled here.
+ MipsDebugger dbg(this);
+ dbg.debug();
+ }
+}
+
+// Stop helper functions.
+bool
+Simulator::isWatchpoint(uint32_t code)
+{
+ return (code <= kMaxWatchpointCode);
+}
+
+void
+Simulator::printWatchpoint(uint32_t code)
+{
+ MipsDebugger dbg(this);
+ ++break_count_;
+ printf("\n---- break %d marker: %3d (instr count: %8d) ----------"
+ "----------------------------------",
+ code, break_count_, icount_);
+ dbg.printAllRegs(); // Print registers and continue running.
+}
+
+void
+Simulator::handleStop(uint32_t code, SimInstruction* instr)
+{
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (isEnabledStop(code)) {
+ MipsDebugger dbg(this);
+ dbg.stop(instr);
+ } else {
+ set_pc(get_pc() + 2 * SimInstruction::kInstrSize);
+ }
+}
+
+bool
+Simulator::isStopInstruction(SimInstruction* instr)
+{
+ int32_t func = instr->functionFieldRaw();
+ uint32_t code = static_cast<uint32_t>(instr->bits(25, 6));
+ return (func == ff_break) && code > kMaxWatchpointCode && code <= kMaxStopCode;
+}
+
+bool
+Simulator::isEnabledStop(uint32_t code)
+{
+ MOZ_ASSERT(code <= kMaxStopCode);
+ MOZ_ASSERT(code > kMaxWatchpointCode);
+ return !(watchedStops_[code].count_ & kStopDisabledBit);
+}
+
+void
+Simulator::enableStop(uint32_t code)
+{
+ if (!isEnabledStop(code))
+ watchedStops_[code].count_ &= ~kStopDisabledBit;
+}
+
+void
+Simulator::disableStop(uint32_t code)
+{
+ if (isEnabledStop(code))
+ watchedStops_[code].count_ |= kStopDisabledBit;
+}
+
+void
+Simulator::increaseStopCounter(uint32_t code)
+{
+ MOZ_ASSERT(code <= kMaxStopCode);
+ if ((watchedStops_[code].count_ & ~(1 << 31)) == 0x7fffffff) {
+ printf("Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n", code);
+ watchedStops_[code].count_ = 0;
+ enableStop(code);
+ } else {
+ watchedStops_[code].count_++;
+ }
+}
+
+// Print a stop status.
+void
+Simulator::printStopInfo(uint32_t code)
+{
+ if (code <= kMaxWatchpointCode) {
+ printf("That is a watchpoint, not a stop.\n");
+ return;
+ } else if (code > kMaxStopCode) {
+ printf("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+ return;
+ }
+ const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watchedStops_[code].count_ & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watchedStops_[code].desc_) {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n",
+ code, code, state, count, watchedStops_[code].desc_);
+ } else {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i\n",
+ code, code, state, count);
+ }
+ }
+}
+
+void
+Simulator::signalExceptions()
+{
+ for (int i = 1; i < kNumExceptions; i++) {
+ if (exceptions[i] != 0)
+ MOZ_CRASH("Error: Exception raised.");
+ }
+}
+
+// Handle execution based on instruction types.
+void
+Simulator::configureTypeRegister(SimInstruction* instr,
+ int32_t& alu_out,
+ int64_t& i64hilo,
+ uint64_t& u64hilo,
+ int32_t& next_pc,
+ int32_t& return_addr_reg,
+ bool& do_interrupt)
+{
+ // Every local variable declared here needs to be const.
+ // This is to make sure that changed values are sent back to
+ // decodeTypeRegister correctly.
+
+ // Instruction fields.
+ const Opcode op = instr->opcodeFieldRaw();
+ const int32_t rs_reg = instr->rsValue();
+ const int32_t rs = getRegister(rs_reg);
+ const uint32_t rs_u = static_cast<uint32_t>(rs);
+ const int32_t rt_reg = instr->rtValue();
+ const int32_t rt = getRegister(rt_reg);
+ const uint32_t rt_u = static_cast<uint32_t>(rt);
+ const int32_t rd_reg = instr->rdValue();
+ const uint32_t sa = instr->saValue();
+
+ const int32_t fs_reg = instr->fsValue();
+
+
+ // ---------- Configuration.
+ switch (op) {
+ case op_cop1: // Coprocessor instructions.
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Handled in DecodeTypeImmed, should never come here.
+ MOZ_CRASH();
+ break;
+ case rs_cfc1:
+ // At the moment only FCSR is supported.
+ MOZ_ASSERT(fs_reg == kFCSRRegister);
+ alu_out = FCSR_;
+ break;
+ case rs_mfc1:
+ alu_out = getFpuRegister(fs_reg);
+ break;
+ case rs_mfhc1:
+ MOZ_CRASH();
+ break;
+ case rs_ctc1:
+ case rs_mtc1:
+ case rs_mthc1:
+ // Do the store in the execution step.
+ break;
+ case rs_s:
+ case rs_d:
+ case rs_w:
+ case rs_l:
+ case rs_ps:
+ // Do everything in the execution step.
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_cop1x:
+ break;
+ case op_special:
+ switch (instr->functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ next_pc = getRegister(instr->rsValue());
+ return_addr_reg = instr->rdValue();
+ break;
+ case ff_sll:
+ alu_out = rt << sa;
+ break;
+ case ff_srl:
+ if (rs_reg == 0) {
+ // Regular logical right shift of a word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ alu_out = rt_u >> sa;
+ } else {
+ // Logical right-rotate of a word by a fixed number of bits. This
+ // is special case of SRL instruction, added in MIPS32 Release 2.
+ // RS field is equal to 00001.
+ alu_out = (rt_u >> sa) | (rt_u << (32 - sa));
+ }
+ break;
+ case ff_sra:
+ alu_out = rt >> sa;
+ break;
+ case ff_sllv:
+ alu_out = rt << rs;
+ break;
+ case ff_srlv:
+ if (sa == 0) {
+ // Regular logical right-shift of a word by a variable number of
+ // bits instruction. SA field is always equal to 0.
+ alu_out = rt_u >> rs;
+ } else {
+ // Logical right-rotate of a word by a variable number of bits.
+ // This is special case od SRLV instruction, added in MIPS32
+ // Release 2. SA field is equal to 00001.
+ alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
+ }
+ break;
+ case ff_srav:
+ alu_out = rt >> rs;
+ break;
+ case ff_mfhi:
+ alu_out = getRegister(HI);
+ break;
+ case ff_mflo:
+ alu_out = getRegister(LO);
+ break;
+ case ff_mult:
+ i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
+ break;
+ case ff_multu:
+ u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
+ break;
+ case ff_add:
+ if (HaveSameSign(rs, rt)) {
+ if (rs > 0) {
+ exceptions[kIntegerOverflow] = rs > (kRegisterskMaxValue - rt);
+ } else if (rs < 0) {
+ exceptions[kIntegerUnderflow] = rs < (kRegisterskMinValue - rt);
+ }
+ }
+ alu_out = rs + rt;
+ break;
+ case ff_addu:
+ alu_out = rs + rt;
+ break;
+ case ff_sub:
+ if (!HaveSameSign(rs, rt)) {
+ if (rs > 0) {
+ exceptions[kIntegerOverflow] = rs > (kRegisterskMaxValue + rt);
+ } else if (rs < 0) {
+ exceptions[kIntegerUnderflow] = rs < (kRegisterskMinValue + rt);
+ }
+ }
+ alu_out = rs - rt;
+ break;
+ case ff_subu:
+ alu_out = rs - rt;
+ break;
+ case ff_and:
+ alu_out = rs & rt;
+ break;
+ case ff_or:
+ alu_out = rs | rt;
+ break;
+ case ff_xor:
+ alu_out = rs ^ rt;
+ break;
+ case ff_nor:
+ alu_out = ~(rs | rt);
+ break;
+ case ff_slt:
+ alu_out = rs < rt ? 1 : 0;
+ break;
+ case ff_sltu:
+ alu_out = rs_u < rt_u ? 1 : 0;
+ break;
+ // Break and trap instructions.
+ case ff_break:
+ do_interrupt = true;
+ break;
+ case ff_tge:
+ do_interrupt = rs >= rt;
+ break;
+ case ff_tgeu:
+ do_interrupt = rs_u >= rt_u;
+ break;
+ case ff_tlt:
+ do_interrupt = rs < rt;
+ break;
+ case ff_tltu:
+ do_interrupt = rs_u < rt_u;
+ break;
+ case ff_teq:
+ do_interrupt = rs == rt;
+ break;
+ case ff_tne:
+ do_interrupt = rs != rt;
+ break;
+ case ff_movn:
+ case ff_movz:
+ case ff_movci:
+ // No action taken on decode.
+ break;
+ case ff_div:
+ case ff_divu:
+ // div and divu never raise exceptions.
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_special2:
+ switch (instr->functionFieldRaw()) {
+ case ff_mul:
+ alu_out = rs_u * rt_u; // Only the lower 32 bits are kept.
+ break;
+ case ff_clz:
+ alu_out = rs_u ? __builtin_clz(rs_u) : 32;
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_special3:
+ switch (instr->functionFieldRaw()) {
+ case ff_ins: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa;
+ uint16_t size = msb - lsb + 1;
+ uint32_t mask = (1 << size) - 1;
+ alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
+ break;
+ }
+ case ff_ext: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa;
+ uint16_t size = msb + 1;
+ uint32_t mask = (1 << size) - 1;
+ alu_out = (rs_u & (mask << lsb)) >> lsb;
+ break;
+ }
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+void
+Simulator::decodeTypeRegister(SimInstruction* instr)
+{
+ // Instruction fields.
+ const Opcode op = instr->opcodeFieldRaw();
+ const int32_t rs_reg = instr->rsValue();
+ const int32_t rs = getRegister(rs_reg);
+ const uint32_t rs_u = static_cast<uint32_t>(rs);
+ const int32_t rt_reg = instr->rtValue();
+ const int32_t rt = getRegister(rt_reg);
+ const uint32_t rt_u = static_cast<uint32_t>(rt);
+ const int32_t rd_reg = instr->rdValue();
+
+ const int32_t fr_reg = instr->frValue();
+ const int32_t fs_reg = instr->fsValue();
+ const int32_t ft_reg = instr->ftValue();
+ const int32_t fd_reg = instr->fdValue();
+ int64_t i64hilo = 0;
+ uint64_t u64hilo = 0;
+
+ // ALU output.
+ // It should not be used as is. Instructions using it should always
+ // initialize it first.
+ int32_t alu_out = 0x12345678;
+
+ // For break and trap instructions.
+ bool do_interrupt = false;
+
+ // For jr and jalr.
+ // Get current pc.
+ int32_t current_pc = get_pc();
+ // Next pc
+ int32_t next_pc = 0;
+ int32_t return_addr_reg = 31;
+
+ // Set up the variables if needed before executing the instruction.
+ configureTypeRegister(instr,
+ alu_out,
+ i64hilo,
+ u64hilo,
+ next_pc,
+ return_addr_reg,
+ do_interrupt);
+
+ // ---------- Raise exceptions triggered.
+ signalExceptions();
+
+ // ---------- Execution.
+ switch (op) {
+ case op_cop1:
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ MOZ_CRASH();
+ break;
+ case rs_cfc1:
+ setRegister(rt_reg, alu_out);
+ case rs_mfc1:
+ setRegister(rt_reg, alu_out);
+ break;
+ case rs_mfhc1:
+ MOZ_CRASH();
+ break;
+ case rs_ctc1:
+ // At the moment only FCSR is supported.
+ MOZ_ASSERT(fs_reg == kFCSRRegister);
+ FCSR_ = registers_[rt_reg];
+ break;
+ case rs_mtc1:
+ FPUregisters_[fs_reg] = registers_[rt_reg];
+ break;
+ case rs_mthc1:
+ MOZ_CRASH();
+ break;
+ case rs_s:
+ float f, ft_value, fs_value;
+ uint32_t cc, fcsr_cc;
+ int64_t i64;
+ fs_value = getFpuRegisterFloat(fs_reg);
+ ft_value = getFpuRegisterFloat(ft_reg);
+ cc = instr->fcccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ switch (instr->functionFieldRaw()) {
+ case ff_add_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value + ft_value);
+ break;
+ case ff_sub_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value - ft_value);
+ break;
+ case ff_mul_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value * ft_value);
+ break;
+ case ff_div_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value / ft_value);
+ break;
+ case ff_abs_fmt:
+ setFpuRegisterFloat(fd_reg, fabsf(fs_value));
+ break;
+ case ff_mov_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value);
+ break;
+ case ff_neg_fmt:
+ setFpuRegisterFloat(fd_reg, -fs_value);
+ break;
+ case ff_sqrt_fmt:
+ setFpuRegisterFloat(fd_reg, sqrtf(fs_value));
+ break;
+ case ff_c_un_fmt:
+ setFCSRBit(fcsr_cc, mozilla::IsNaN(fs_value) || mozilla::IsNaN(ft_value));
+ break;
+ case ff_c_eq_fmt:
+ setFCSRBit(fcsr_cc, (fs_value == ft_value));
+ break;
+ case ff_c_ueq_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value == ft_value) || (mozilla::IsNaN(fs_value) || mozilla::IsNaN(ft_value)));
+ break;
+ case ff_c_olt_fmt:
+ setFCSRBit(fcsr_cc, (fs_value < ft_value));
+ break;
+ case ff_c_ult_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value < ft_value) || (mozilla::IsNaN(fs_value) || mozilla::IsNaN(ft_value)));
+ break;
+ case ff_c_ole_fmt:
+ setFCSRBit(fcsr_cc, (fs_value <= ft_value));
+ break;
+ case ff_c_ule_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value <= ft_value) || (mozilla::IsNaN(fs_value) || mozilla::IsNaN(ft_value)));
+ break;
+ case ff_cvt_d_fmt:
+ f = getFpuRegisterFloat(fs_reg);
+ setFpuRegisterDouble(fd_reg, static_cast<double>(f));
+ break;
+ case ff_cvt_w_fmt: // Convert float to word.
+ // Rounding modes are not yet supported.
+ MOZ_ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ case ff_round_w_fmt: { // Round double to word (round half to even).
+ float rounded = std::floor(fs_value + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fs_value == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_trunc_w_fmt: { // Truncate float to word (round towards 0).
+ float rounded = truncf(fs_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_floor_w_fmt: { // Round float to word towards negative infinity.
+ float rounded = std::floor(fs_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_ceil_w_fmt: { // Round double to word towards positive infinity.
+ float rounded = std::ceil(fs_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_cvt_l_fmt: { // Mips32r2: Truncate float to 64-bit long-word.
+ float rounded = truncf(fs_value);
+ i64 = static_cast<int64_t>(rounded);
+ setFpuRegisterFloat(fd_reg, i64);
+ break;
+ }
+ case ff_round_l_fmt: { // Mips32r2 instruction.
+ float rounded =
+ fs_value > 0 ? std::floor(fs_value + 0.5) : std::ceil(fs_value - 0.5);
+ i64 = static_cast<int64_t>(rounded);
+ setFpuRegisterFloat(fd_reg, i64);
+ break;
+ }
+ case ff_trunc_l_fmt: { // Mips32r2 instruction.
+ float rounded = truncf(fs_value);
+ i64 = static_cast<int64_t>(rounded);
+ setFpuRegisterFloat(fd_reg, i64);
+ break;
+ }
+ case ff_floor_l_fmt: // Mips32r2 instruction.
+ i64 = static_cast<int64_t>(std::floor(fs_value));
+ setFpuRegisterFloat(fd_reg, i64);
+ break;
+ case ff_ceil_l_fmt: // Mips32r2 instruction.
+ i64 = static_cast<int64_t>(std::ceil(fs_value));
+ setFpuRegisterFloat(fd_reg, i64);
+ break;
+ case ff_cvt_ps_s:
+ case ff_c_f_fmt:
+ MOZ_CRASH();
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_d:
+ double dt_value, ds_value;
+ ds_value = getFpuRegisterDouble(fs_reg);
+ dt_value = getFpuRegisterDouble(ft_reg);
+ cc = instr->fcccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ switch (instr->functionFieldRaw()) {
+ case ff_add_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value + dt_value);
+ break;
+ case ff_sub_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value - dt_value);
+ break;
+ case ff_mul_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value * dt_value);
+ break;
+ case ff_div_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value / dt_value);
+ break;
+ case ff_abs_fmt:
+ setFpuRegisterDouble(fd_reg, fabs(ds_value));
+ break;
+ case ff_mov_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value);
+ break;
+ case ff_neg_fmt:
+ setFpuRegisterDouble(fd_reg, -ds_value);
+ break;
+ case ff_sqrt_fmt:
+ setFpuRegisterDouble(fd_reg, sqrt(ds_value));
+ break;
+ case ff_c_un_fmt:
+ setFCSRBit(fcsr_cc, mozilla::IsNaN(ds_value) || mozilla::IsNaN(dt_value));
+ break;
+ case ff_c_eq_fmt:
+ setFCSRBit(fcsr_cc, (ds_value == dt_value));
+ break;
+ case ff_c_ueq_fmt:
+ setFCSRBit(fcsr_cc,
+ (ds_value == dt_value) || (mozilla::IsNaN(ds_value) || mozilla::IsNaN(dt_value)));
+ break;
+ case ff_c_olt_fmt:
+ setFCSRBit(fcsr_cc, (ds_value < dt_value));
+ break;
+ case ff_c_ult_fmt:
+ setFCSRBit(fcsr_cc,
+ (ds_value < dt_value) || (mozilla::IsNaN(ds_value) || mozilla::IsNaN(dt_value)));
+ break;
+ case ff_c_ole_fmt:
+ setFCSRBit(fcsr_cc, (ds_value <= dt_value));
+ break;
+ case ff_c_ule_fmt:
+ setFCSRBit(fcsr_cc,
+ (ds_value <= dt_value) || (mozilla::IsNaN(ds_value) || mozilla::IsNaN(dt_value)));
+ break;
+ case ff_cvt_w_fmt: // Convert double to word.
+ // Rounding modes are not yet supported.
+ MOZ_ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ case ff_round_w_fmt: { // Round double to word (round half to even).
+ double rounded = std::floor(ds_value + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - ds_value == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_trunc_w_fmt: { // Truncate double to word (round towards 0).
+ double rounded = trunc(ds_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_floor_w_fmt: { // Round double to word towards negative infinity.
+ double rounded = std::floor(ds_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_ceil_w_fmt: { // Round double to word towards positive infinity.
+ double rounded = std::ceil(ds_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_cvt_s_fmt: // Convert double to float (single).
+ setFpuRegisterFloat(fd_reg, static_cast<float>(ds_value));
+ break;
+ case ff_cvt_l_fmt: { // Mips32r2: Truncate double to 64-bit long-word.
+ double rounded = trunc(ds_value);
+ i64 = static_cast<int64_t>(rounded);
+ setFpuRegisterDouble(fd_reg, i64);
+ break;
+ }
+ case ff_trunc_l_fmt: { // Mips32r2 instruction.
+ double rounded = trunc(ds_value);
+ i64 = static_cast<int64_t>(rounded);
+ setFpuRegisterDouble(fd_reg, i64);
+ break;
+ }
+ case ff_round_l_fmt: { // Mips32r2 instruction.
+ double rounded =
+ ds_value > 0 ? std::floor(ds_value + 0.5) : std::ceil(ds_value - 0.5);
+ i64 = static_cast<int64_t>(rounded);
+ setFpuRegisterDouble(fd_reg, i64);
+ break;
+ }
+ case ff_floor_l_fmt: // Mips32r2 instruction.
+ i64 = static_cast<int64_t>(std::floor(ds_value));
+ setFpuRegisterDouble(fd_reg, i64);
+ break;
+ case ff_ceil_l_fmt: // Mips32r2 instruction.
+ i64 = static_cast<int64_t>(std::ceil(ds_value));
+ setFpuRegisterDouble(fd_reg, i64);
+ break;
+ case ff_c_f_fmt:
+ MOZ_CRASH();
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_w:
+ switch (instr->functionFieldRaw()) {
+ case ff_cvt_s_fmt: // Convert word to float (single).
+ alu_out = getFpuRegister(fs_reg);
+ setFpuRegisterFloat(fd_reg, static_cast<float>(alu_out));
+ break;
+ case ff_cvt_d_fmt: // Convert word to double.
+ alu_out = getFpuRegister(fs_reg);
+ setFpuRegisterDouble(fd_reg, static_cast<double>(alu_out));
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_l:
+ switch (instr->functionFieldRaw()) {
+ case ff_cvt_d_fmt: // Mips32r2 instruction.
+ // Watch the signs here, we want 2 32-bit vals
+ // to make a sign-64.
+ i64 = static_cast<uint32_t>(getFpuRegister(fs_reg));
+ i64 |= static_cast<int64_t>(getFpuRegister(fs_reg + 1)) << 32;
+ setFpuRegisterDouble(fd_reg, static_cast<double>(i64));
+ break;
+ case ff_cvt_s_fmt:
+ MOZ_CRASH();
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_ps:
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_cop1x:
+ switch (instr->functionFieldRaw()) {
+ case ff_madd_s:
+ float fr, ft, fs;
+ fr = getFpuRegisterFloat(fr_reg);
+ fs = getFpuRegisterFloat(fs_reg);
+ ft = getFpuRegisterFloat(ft_reg);
+ setFpuRegisterFloat(fd_reg, fs * ft + fr);
+ break;
+ case ff_madd_d:
+ double dr, dt, ds;
+ dr = getFpuRegisterDouble(fr_reg);
+ ds = getFpuRegisterDouble(fs_reg);
+ dt = getFpuRegisterDouble(ft_reg);
+ setFpuRegisterDouble(fd_reg, ds * dt + dr);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_special:
+ switch (instr->functionFieldRaw()) {
+ case ff_jr: {
+ SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>(
+ current_pc + SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ case ff_jalr: {
+ SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>(
+ current_pc + SimInstruction::kInstrSize);
+ setRegister(return_addr_reg, current_pc + 2 * SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ // Instructions using HI and LO registers.
+ case ff_mult:
+ setRegister(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
+ setRegister(HI, static_cast<int32_t>(i64hilo >> 32));
+ break;
+ case ff_multu:
+ setRegister(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+ setRegister(HI, static_cast<int32_t>(u64hilo >> 32));
+ break;
+ case ff_div:
+ // Divide by zero and overflow was not checked in the configuration
+ // step - div and divu do not raise exceptions. On division by 0
+ // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1),
+ // return INT_MIN which is what the hardware does.
+ if (rs == INT_MIN && rt == -1) {
+ setRegister(LO, INT_MIN);
+ setRegister(HI, 0);
+ } else if (rt != 0) {
+ setRegister(LO, rs / rt);
+ setRegister(HI, rs % rt);
+ }
+ break;
+ case ff_divu:
+ if (rt_u != 0) {
+ setRegister(LO, rs_u / rt_u);
+ setRegister(HI, rs_u % rt_u);
+ }
+ break;
+ // Break and trap instructions.
+ case ff_break:
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ if (do_interrupt) {
+ softwareInterrupt(instr);
+ }
+ break;
+ // Conditional moves.
+ case ff_movn:
+ if (rt) setRegister(rd_reg, rs);
+ break;
+ case ff_movci: {
+ uint32_t cc = instr->fbccValue();
+ uint32_t fcsr_cc = GetFCSRConditionBit(cc);
+ if (instr->bit(16)) { // Read Tf bit.
+ if (testFCSRBit(fcsr_cc)) setRegister(rd_reg, rs);
+ } else {
+ if (!testFCSRBit(fcsr_cc)) setRegister(rd_reg, rs);
+ }
+ break;
+ }
+ case ff_movz:
+ if (!rt) setRegister(rd_reg, rs);
+ break;
+ default: // For other special opcodes we do the default operation.
+ setRegister(rd_reg, alu_out);
+ }
+ break;
+ case op_special2:
+ switch (instr->functionFieldRaw()) {
+ case ff_mul:
+ setRegister(rd_reg, alu_out);
+ // HI and LO are UNPREDICTABLE after the operation.
+ setRegister(LO, Unpredictable);
+ setRegister(HI, Unpredictable);
+ break;
+ default: // For other special2 opcodes we do the default operation.
+ setRegister(rd_reg, alu_out);
+ }
+ break;
+ case op_special3:
+ switch (instr->functionFieldRaw()) {
+ case ff_ins:
+ // Ins instr leaves result in Rt, rather than Rd.
+ setRegister(rt_reg, alu_out);
+ break;
+ case ff_ext:
+ // Ext instr leaves result in Rt, rather than Rd.
+ setRegister(rt_reg, alu_out);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ // Unimplemented opcodes raised an error in the configuration step before,
+ // so we can use the default here to set the destination register in common
+ // cases.
+ default:
+ setRegister(rd_reg, alu_out);
+ }
+}
+
+// Type 2: instructions using a 16 bytes immediate. (e.g. addi, beq).
+void
+Simulator::decodeTypeImmediate(SimInstruction* instr)
+{
+ // Instruction fields.
+ Opcode op = instr->opcodeFieldRaw();
+ int32_t rs = getRegister(instr->rsValue());
+ uint32_t rs_u = static_cast<uint32_t>(rs);
+ int32_t rt_reg = instr->rtValue(); // Destination register.
+ int32_t rt = getRegister(rt_reg);
+ int16_t imm16 = instr->imm16Value();
+
+ int32_t ft_reg = instr->ftValue(); // Destination register.
+
+ // Zero extended immediate.
+ uint32_t oe_imm16 = 0xffff & imm16;
+ // Sign extended immediate.
+ int32_t se_imm16 = imm16;
+
+ // Get current pc.
+ int32_t current_pc = get_pc();
+ // Next pc.
+ int32_t next_pc = bad_ra;
+
+ // Used for conditional branch instructions.
+ bool do_branch = false;
+ bool execute_branch_delay_instruction = false;
+
+ // Used for arithmetic instructions.
+ int32_t alu_out = 0;
+ // Floating point.
+ double fp_out = 0.0;
+ uint32_t cc, cc_value, fcsr_cc;
+
+ // Used for memory instructions.
+ uint32_t addr = 0x0;
+ // Value to be written in memory.
+ uint32_t mem_value = 0x0;
+
+ // ---------- Configuration (and execution for op_regimm).
+ switch (op) {
+ // ------------- op_cop1. Coprocessor instructions.
+ case op_cop1:
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ cc = instr->fbccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ cc_value = testFCSRBit(fcsr_cc);
+ do_branch = (instr->fbtrueValue()) ? cc_value : !cc_value;
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ // ------------- op_regimm class.
+ case op_regimm:
+ switch (instr->rtFieldRaw()) {
+ case rt_bltz:
+ do_branch = (rs < 0);
+ break;
+ case rt_bltzal:
+ do_branch = rs < 0;
+ break;
+ case rt_bgez:
+ do_branch = rs >= 0;
+ break;
+ case rt_bgezal:
+ do_branch = rs >= 0;
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ switch (instr->rtFieldRaw()) {
+ case rt_bltz:
+ case rt_bltzal:
+ case rt_bgez:
+ case rt_bgezal:
+ // Branch instructions common part.
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ if (instr->isLinkingInstruction()) {
+ setRegister(31, current_pc + kBranchReturnOffset);
+ }
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ default:
+ break;
+ }
+ break; // case op_regimm.
+ // ------------- Branch instructions.
+ // When comparing to zero, the encoding of rt field is always 0, so we don't
+ // need to replace rt with zero.
+ case op_beq:
+ do_branch = (rs == rt);
+ break;
+ case op_bne:
+ do_branch = rs != rt;
+ break;
+ case op_blez:
+ do_branch = rs <= 0;
+ break;
+ case op_bgtz:
+ do_branch = rs > 0;
+ break;
+ // ------------- Arithmetic instructions.
+ case op_addi:
+ if (HaveSameSign(rs, se_imm16)) {
+ if (rs > 0) {
+ exceptions[kIntegerOverflow] = rs > (kRegisterskMaxValue - se_imm16);
+ } else if (rs < 0) {
+ exceptions[kIntegerUnderflow] =
+ rs < (kRegisterskMinValue - se_imm16);
+ }
+ }
+ alu_out = rs + se_imm16;
+ break;
+ case op_addiu:
+ alu_out = rs + se_imm16;
+ break;
+ case op_slti:
+ alu_out = (rs < se_imm16) ? 1 : 0;
+ break;
+ case op_sltiu:
+ alu_out = (rs_u < static_cast<uint32_t>(se_imm16)) ? 1 : 0;
+ break;
+ case op_andi:
+ alu_out = rs & oe_imm16;
+ break;
+ case op_ori:
+ alu_out = rs | oe_imm16;
+ break;
+ case op_xori:
+ alu_out = rs ^ oe_imm16;
+ break;
+ case op_lui:
+ alu_out = (oe_imm16 << 16);
+ break;
+ // ------------- Memory instructions.
+ case op_lb:
+ addr = rs + se_imm16;
+ alu_out = readB(addr);
+ break;
+ case op_lh:
+ addr = rs + se_imm16;
+ alu_out = readH(addr, instr);
+ break;
+ case op_lwl: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = (1 << byte_shift * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readW(addr, instr);
+ alu_out <<= byte_shift * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case op_lw:
+ addr = rs + se_imm16;
+ alu_out = readW(addr, instr);
+ break;
+ case op_lbu:
+ addr = rs + se_imm16;
+ alu_out = readBU(addr);
+ break;
+ case op_lhu:
+ addr = rs + se_imm16;
+ alu_out = readHU(addr, instr);
+ break;
+ case op_lwr: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readW(addr, instr);
+ alu_out = static_cast<uint32_t> (alu_out) >> al_offset * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case op_sb:
+ addr = rs + se_imm16;
+ break;
+ case op_sh:
+ addr = rs + se_imm16;
+ break;
+ case op_swl: {
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr) & mask;
+ mem_value |= static_cast<uint32_t>(rt) >> byte_shift * 8;
+ break;
+ }
+ case op_sw:
+ addr = rs + se_imm16;
+ break;
+ case op_swr: {
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint32_t mask = (1 << al_offset * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr);
+ mem_value = (rt << al_offset * 8) | (mem_value & mask);
+ break;
+ }
+ case op_lwc1:
+ addr = rs + se_imm16;
+ alu_out = readW(addr, instr);
+ break;
+ case op_ldc1:
+ addr = rs + se_imm16;
+ fp_out = readD(addr, instr);
+ break;
+ case op_swc1:
+ case op_sdc1:
+ addr = rs + se_imm16;
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ // ---------- Raise exceptions triggered.
+ signalExceptions();
+
+ // ---------- Execution.
+ switch (op) {
+ // ------------- Branch instructions.
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ // Branch instructions common part.
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ if (instr->isLinkingInstruction()) {
+ setRegister(31, current_pc + 2 * SimInstruction::kInstrSize);
+ }
+ } else {
+ next_pc = current_pc + 2 * SimInstruction::kInstrSize;
+ }
+ break;
+ // ------------- Arithmetic instructions.
+ case op_addi:
+ case op_addiu:
+ case op_slti:
+ case op_sltiu:
+ case op_andi:
+ case op_ori:
+ case op_xori:
+ case op_lui:
+ setRegister(rt_reg, alu_out);
+ break;
+ // ------------- Memory instructions.
+ case op_lb:
+ case op_lh:
+ case op_lwl:
+ case op_lw:
+ case op_lbu:
+ case op_lhu:
+ case op_lwr:
+ setRegister(rt_reg, alu_out);
+ break;
+ case op_sb:
+ writeB(addr, static_cast<int8_t>(rt));
+ break;
+ case op_sh:
+ writeH(addr, static_cast<uint16_t>(rt), instr);
+ break;
+ case op_swl:
+ writeW(addr, mem_value, instr);
+ break;
+ case op_sw:
+ writeW(addr, rt, instr);
+ break;
+ case op_swr:
+ writeW(addr, mem_value, instr);
+ break;
+ case op_lwc1:
+ setFpuRegister(ft_reg, alu_out);
+ break;
+ case op_ldc1:
+ setFpuRegisterDouble(ft_reg, fp_out);
+ break;
+ case op_swc1:
+ addr = rs + se_imm16;
+ writeW(addr, getFpuRegister(ft_reg), instr);
+ break;
+ case op_sdc1:
+ addr = rs + se_imm16;
+ writeD(addr, getFpuRegisterDouble(ft_reg), instr);
+ break;
+ default:
+ break;
+ }
+
+
+ if (execute_branch_delay_instruction) {
+ // Execute branch delay slot
+ // We don't check for end_sim_pc. First it should not be met as the current
+ // pc is valid. Secondly a jump should always execute its branch delay slot.
+ SimInstruction* branch_delay_instr =
+ reinterpret_cast<SimInstruction*>(current_pc + SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ }
+
+ // If needed update pc after the branch delay execution.
+ if (next_pc != bad_ra)
+ set_pc(next_pc);
+}
+
+// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
+void
+Simulator::decodeTypeJump(SimInstruction* instr)
+{
+ // Get current pc.
+ int32_t current_pc = get_pc();
+ // Get unchanged bits of pc.
+ int32_t pc_high_bits = current_pc & 0xf0000000;
+ // Next pc.
+ int32_t next_pc = pc_high_bits | (instr->imm26Value() << 2);
+
+ // Execute branch delay slot.
+ // We don't check for end_sim_pc. First it should not be met as the current pc
+ // is valid. Secondly a jump should always execute its branch delay slot.
+ SimInstruction* branch_delay_instr =
+ reinterpret_cast<SimInstruction*>(current_pc + SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+
+ // Update pc and ra if necessary.
+ // Do this after the branch delay execution.
+ if (instr->isLinkingInstruction())
+ setRegister(31, current_pc + 2 * SimInstruction::kInstrSize);
+ set_pc(next_pc);
+ pc_modified_ = true;
+}
+
+// Executes the current instruction.
+void
+Simulator::instructionDecode(SimInstruction* instr)
+{
+ if (Simulator::ICacheCheckingEnabled) {
+ AutoLockSimulatorCache als(this);
+ CheckICacheLocked(icache(), instr);
+ }
+ pc_modified_ = false;
+
+ switch (instr->instructionType()) {
+ case SimInstruction::kRegisterType:
+ decodeTypeRegister(instr);
+ break;
+ case SimInstruction::kImmediateType:
+ decodeTypeImmediate(instr);
+ break;
+ case SimInstruction::kJumpType:
+ decodeTypeJump(instr);
+ break;
+ default:
+ UNSUPPORTED();
+ }
+ if (!pc_modified_)
+ setRegister(pc, reinterpret_cast<int32_t>(instr) + SimInstruction::kInstrSize);
+}
+
+void
+Simulator::branchDelayInstructionDecode(SimInstruction* instr)
+{
+ if (instr->instructionBits() == NopInst) {
+ // Short-cut generic nop instructions. They are always valid and they
+ // never change the simulator state.
+ return;
+ }
+
+ if (instr->isForbiddenInBranchDelay()) {
+ MOZ_CRASH("Eror:Unexpected opcode in a branch delay slot.");
+ }
+ instructionDecode(instr);
+}
+
+template<bool enableStopSimAt>
+void
+Simulator::execute()
+{
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ int program_counter = get_pc();
+ WasmActivation* activation = TlsPerThreadData.get()->runtimeFromMainThread()->wasmActivationStack();
+
+ while (program_counter != end_sim_pc) {
+ if (enableStopSimAt && (icount_ == Simulator::StopSimAt)) {
+ MipsDebugger dbg(this);
+ dbg.debug();
+ } else {
+ SimInstruction* instr = reinterpret_cast<SimInstruction*>(program_counter);
+ instructionDecode(instr);
+ icount_++;
+
+ int32_t rpc = resume_pc_;
+ if (MOZ_UNLIKELY(rpc != 0)) {
+ // wasm signal handler ran and we have to adjust the pc.
+ activation->setResumePC((void*)get_pc());
+ set_pc(rpc);
+ resume_pc_ = 0;
+ }
+ }
+ program_counter = get_pc();
+ }
+}
+
+void
+Simulator::callInternal(uint8_t* entry)
+{
+ // Prepare to execute the code at entry.
+ setRegister(pc, reinterpret_cast<int32_t>(entry));
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ setRegister(ra, end_sim_pc);
+
+ // Remember the values of callee-saved registers.
+ // The code below assumes that r9 is not used as sb (static base) in
+ // simulator code and therefore is regarded as a callee-saved register.
+ int32_t s0_val = getRegister(s0);
+ int32_t s1_val = getRegister(s1);
+ int32_t s2_val = getRegister(s2);
+ int32_t s3_val = getRegister(s3);
+ int32_t s4_val = getRegister(s4);
+ int32_t s5_val = getRegister(s5);
+ int32_t s6_val = getRegister(s6);
+ int32_t s7_val = getRegister(s7);
+ int32_t gp_val = getRegister(gp);
+ int32_t sp_val = getRegister(sp);
+ int32_t fp_val = getRegister(fp);
+
+ // Set up the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ int32_t callee_saved_value = icount_;
+ setRegister(s0, callee_saved_value);
+ setRegister(s1, callee_saved_value);
+ setRegister(s2, callee_saved_value);
+ setRegister(s3, callee_saved_value);
+ setRegister(s4, callee_saved_value);
+ setRegister(s5, callee_saved_value);
+ setRegister(s6, callee_saved_value);
+ setRegister(s7, callee_saved_value);
+ setRegister(gp, callee_saved_value);
+ setRegister(fp, callee_saved_value);
+
+ // Start the simulation.
+ if (Simulator::StopSimAt != -1)
+ execute<true>();
+ else
+ execute<false>();
+
+ // Check that the callee-saved registers have been preserved.
+ MOZ_ASSERT(callee_saved_value == getRegister(s0));
+ MOZ_ASSERT(callee_saved_value == getRegister(s1));
+ MOZ_ASSERT(callee_saved_value == getRegister(s2));
+ MOZ_ASSERT(callee_saved_value == getRegister(s3));
+ MOZ_ASSERT(callee_saved_value == getRegister(s4));
+ MOZ_ASSERT(callee_saved_value == getRegister(s5));
+ MOZ_ASSERT(callee_saved_value == getRegister(s6));
+ MOZ_ASSERT(callee_saved_value == getRegister(s7));
+ MOZ_ASSERT(callee_saved_value == getRegister(gp));
+ MOZ_ASSERT(callee_saved_value == getRegister(fp));
+
+ // Restore callee-saved registers with the original value.
+ setRegister(s0, s0_val);
+ setRegister(s1, s1_val);
+ setRegister(s2, s2_val);
+ setRegister(s3, s3_val);
+ setRegister(s4, s4_val);
+ setRegister(s5, s5_val);
+ setRegister(s6, s6_val);
+ setRegister(s7, s7_val);
+ setRegister(gp, gp_val);
+ setRegister(sp, sp_val);
+ setRegister(fp, fp_val);
+}
+
+int32_t
+Simulator::call(uint8_t* entry, int argument_count, ...)
+{
+ va_list parameters;
+ va_start(parameters, argument_count);
+
+ int original_stack = getRegister(sp);
+ // Compute position of stack on entry to generated code.
+ int entry_stack = original_stack;
+ if (argument_count > kCArgSlotCount)
+ entry_stack = entry_stack - argument_count * sizeof(int32_t);
+ else
+ entry_stack = entry_stack - kCArgsSlotsSize;
+
+ entry_stack &= ~(ABIStackAlignment - 1);
+
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+
+ // Setup the arguments.
+ for (int i = 0; i < argument_count; i++) {
+ js::jit::Register argReg;
+ if (GetIntArgReg(i, &argReg))
+ setRegister(argReg.code(), va_arg(parameters, int32_t));
+ else
+ stack_argument[i] = va_arg(parameters, int32_t);
+ }
+
+ va_end(parameters);
+ setRegister(sp, entry_stack);
+
+ callInternal(entry);
+
+ // Pop stack passed arguments.
+ MOZ_ASSERT(entry_stack == getRegister(sp));
+ setRegister(sp, original_stack);
+
+ int32_t result = getRegister(v0);
+ return result;
+}
+
+uintptr_t
+Simulator::pushAddress(uintptr_t address)
+{
+ int new_sp = getRegister(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ setRegister(sp, new_sp);
+ return new_sp;
+}
+
+uintptr_t
+Simulator::popAddress()
+{
+ int current_sp = getRegister(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ setRegister(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+} // namespace jit
+} // namespace js
+
+js::jit::Simulator*
+JSRuntime::simulator() const
+{
+ return simulator_;
+}
+
+js::jit::Simulator*
+js::PerThreadData::simulator() const
+{
+ return runtime_->simulator();
+}
+
+uintptr_t*
+JSRuntime::addressOfSimulatorStackLimit()
+{
+ return simulator_->addressOfStackLimit();
+}
diff --git a/js/src/jit/mips32/Simulator-mips32.h b/js/src/jit/mips32/Simulator-mips32.h
new file mode 100644
index 000000000..96986dd9b
--- /dev/null
+++ b/js/src/jit/mips32/Simulator-mips32.h
@@ -0,0 +1,424 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef jit_mips32_Simulator_mips32_h
+#define jit_mips32_Simulator_mips32_h
+
+#ifdef JS_SIMULATOR_MIPS32
+
+#include "jit/IonTypes.h"
+#include "threading/Thread.h"
+#include "vm/MutexIDs.h"
+
+namespace js {
+namespace jit {
+
+class Simulator;
+class Redirection;
+class CachePage;
+class AutoLockSimulator;
+
+const intptr_t kPointerAlignment = 4;
+const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
+
+const intptr_t kDoubleAlignment = 8;
+const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
+
+
+// Number of general purpose registers.
+const int kNumRegisters = 32;
+
+// In the simulator, the PC register is simulated as the 34th register.
+const int kPCRegister = 34;
+
+// Number coprocessor registers.
+const int kNumFPURegisters = 32;
+
+// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
+const int kFCSRRegister = 31;
+const int kInvalidFPUControlRegister = -1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+
+// FCSR constants.
+const uint32_t kFCSRInexactFlagBit = 2;
+const uint32_t kFCSRUnderflowFlagBit = 3;
+const uint32_t kFCSROverflowFlagBit = 4;
+const uint32_t kFCSRDivideByZeroFlagBit = 5;
+const uint32_t kFCSRInvalidOpFlagBit = 6;
+
+const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+
+const uint32_t kFCSRFlagMask =
+ kFCSRInexactFlagMask |
+ kFCSRUnderflowFlagMask |
+ kFCSROverflowFlagMask |
+ kFCSRDivideByZeroFlagMask |
+ kFCSRInvalidOpFlagMask;
+
+const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
+
+// On MIPS Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+// the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+// instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+// debugger.
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+typedef uint32_t Instr;
+class SimInstruction;
+
+class Simulator {
+ friend class Redirection;
+ friend class MipsDebugger;
+ friend class AutoLockSimulatorCache;
+ public:
+
+ // Registers are declared in order. See "See MIPS Run Linux" chapter 2.
+ enum Register {
+ no_reg = -1,
+ zero_reg = 0,
+ at,
+ v0, v1,
+ a0, a1, a2, a3,
+ t0, t1, t2, t3, t4, t5, t6, t7,
+ s0, s1, s2, s3, s4, s5, s6, s7,
+ t8, t9,
+ k0, k1,
+ gp,
+ sp,
+ s8,
+ ra,
+ // LO, HI, and pc.
+ LO,
+ HI,
+ pc, // pc must be the last register.
+ kNumSimuRegisters,
+ // aliases
+ fp = s8
+ };
+
+ // Coprocessor registers.
+ enum FPURegister {
+ f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
+ f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters.
+ f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
+ f26, f27, f28, f29, f30, f31,
+ kNumFPURegisters
+ };
+
+ // Returns nullptr on OOM.
+ static Simulator* Create(JSContext* cx);
+
+ static void Destroy(Simulator* simulator);
+
+ // Constructor/destructor are for internal use only; use the static methods above.
+ Simulator();
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ static Simulator* Current();
+
+ static inline uintptr_t StackLimit() {
+ return Simulator::Current()->stackLimit();
+ }
+
+ uintptr_t* addressOfStackLimit();
+
+ // Accessors for register state. Reading the pc value adheres to the MIPS
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void setRegister(int reg, int32_t value);
+ int32_t getRegister(int reg) const;
+ double getDoubleFromRegisterPair(int reg);
+ // Same for FPURegisters.
+ void setFpuRegister(int fpureg, int32_t value);
+ void setFpuRegisterFloat(int fpureg, float value);
+ void setFpuRegisterFloat(int fpureg, int64_t value);
+ void setFpuRegisterDouble(int fpureg, double value);
+ void setFpuRegisterDouble(int fpureg, int64_t value);
+ int32_t getFpuRegister(int fpureg) const;
+ int64_t getFpuRegisterLong(int fpureg) const;
+ float getFpuRegisterFloat(int fpureg) const;
+ double getFpuRegisterDouble(int fpureg) const;
+ void setFCSRBit(uint32_t cc, bool value);
+ bool testFCSRBit(uint32_t cc);
+ bool setFCSRRoundError(double original, double rounded);
+
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(int32_t value);
+ int32_t get_pc() const;
+
+ template <typename T>
+ T get_pc_as() const { return reinterpret_cast<T>(get_pc()); }
+
+ void set_resume_pc(void* value) {
+ resume_pc_ = int32_t(value);
+ }
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t stackLimit() const;
+ bool overRecursed(uintptr_t newsp = 0) const;
+ bool overRecursedWithExtra(uint32_t extra) const;
+
+ // Executes MIPS instructions until the PC reaches end_sim_pc.
+ template<bool enableStopSimAt>
+ void execute();
+
+ // Sets up the simulator state and grabs the result on return.
+ int32_t call(uint8_t* entry, int argument_count, ...);
+
+ // Push an address onto the JS stack.
+ uintptr_t pushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t popAddress();
+
+ // Debugger input.
+ void setLastDebuggerInput(char* input);
+ char* lastDebuggerInput() { return lastDebuggerInput_; }
+ // ICache checking.
+ static void FlushICache(void* start, size_t size);
+
+ // Returns true if pc register contains one of the 'SpecialValues' defined
+ // below (bad_ra, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum SpecialValues {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_ra = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the ra is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2,
+ // Unpredictable value.
+ Unpredictable = 0xbadbeaf
+ };
+
+ bool init();
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void format(SimInstruction* instr, const char* format);
+
+ // Read and write memory.
+ inline uint32_t readBU(uint32_t addr);
+ inline int32_t readB(uint32_t addr);
+ inline void writeB(uint32_t addr, uint8_t value);
+ inline void writeB(uint32_t addr, int8_t value);
+
+ inline uint16_t readHU(uint32_t addr, SimInstruction* instr);
+ inline int16_t readH(uint32_t addr, SimInstruction* instr);
+ // Note: Overloaded on the sign of the value.
+ inline void writeH(uint32_t addr, uint16_t value, SimInstruction* instr);
+ inline void writeH(uint32_t addr, int16_t value, SimInstruction* instr);
+
+ inline int readW(uint32_t addr, SimInstruction* instr);
+ inline void writeW(uint32_t addr, int value, SimInstruction* instr);
+
+ inline double readD(uint32_t addr, SimInstruction* instr);
+ inline void writeD(uint32_t addr, double value, SimInstruction* instr);
+
+ // Executing is handled based on the instruction type.
+ void decodeTypeRegister(SimInstruction* instr);
+
+ // Helper function for decodeTypeRegister.
+ void configureTypeRegister(SimInstruction* instr,
+ int32_t& alu_out,
+ int64_t& i64hilo,
+ uint64_t& u64hilo,
+ int32_t& next_pc,
+ int32_t& return_addr_reg,
+ bool& do_interrupt);
+
+ void decodeTypeImmediate(SimInstruction* instr);
+ void decodeTypeJump(SimInstruction* instr);
+
+ // Used for breakpoints and traps.
+ void softwareInterrupt(SimInstruction* instr);
+
+ // Stop helper functions.
+ bool isWatchpoint(uint32_t code);
+ void printWatchpoint(uint32_t code);
+ void handleStop(uint32_t code, SimInstruction* instr);
+ bool isStopInstruction(SimInstruction* instr);
+ bool isEnabledStop(uint32_t code);
+ void enableStop(uint32_t code);
+ void disableStop(uint32_t code);
+ void increaseStopCounter(uint32_t code);
+ void printStopInfo(uint32_t code);
+
+
+ // Executes one instruction.
+ void instructionDecode(SimInstruction* instr);
+ // Execute one instruction placed in a branch delay slot.
+ void branchDelayInstructionDecode(SimInstruction* instr);
+
+ public:
+ static bool ICacheCheckingEnabled;
+
+ static int StopSimAt;
+
+ // Runtime call support.
+ static void* RedirectNativeFunction(void* nativeFunction, ABIFunctionType type);
+
+ private:
+ enum Exception {
+ kNone,
+ kIntegerOverflow,
+ kIntegerUnderflow,
+ kDivideByZero,
+ kNumExceptions
+ };
+ int16_t exceptions[kNumExceptions];
+
+ // Exceptions.
+ void signalExceptions();
+
+ // Handle arguments and return value for runtime FP functions.
+ void getFpArgs(double* x, double* y, int32_t* z);
+ void getFpFromStack(int32_t* stack, double* x);
+
+ void setCallResultDouble(double result);
+ void setCallResultFloat(float result);
+ void setCallResult(int64_t res);
+
+ void callInternal(uint8_t* entry);
+
+ // Architecture state.
+ // Registers.
+ int32_t registers_[kNumSimuRegisters];
+ // Coprocessor Registers.
+ int32_t FPUregisters_[kNumFPURegisters];
+ // FPU control register.
+ uint32_t FCSR_;
+
+ // Simulator support.
+ char* stack_;
+ uintptr_t stackLimit_;
+ bool pc_modified_;
+ int icount_;
+ int break_count_;
+
+ int32_t resume_pc_;
+
+ // Debugger input.
+ char* lastDebuggerInput_;
+
+ // Registered breakpoints.
+ SimInstruction* break_pc_;
+ Instr break_instr_;
+
+ // A stop is watched if its code is less than kNumOfWatchedStops.
+ // Only watched stops support enabling/disabling and the counter feature.
+ static const uint32_t kNumOfWatchedStops = 256;
+
+
+ // Stop is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1U << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watchedStops_[code].count is unset.
+ // The value watchedStops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count_;
+ char* desc_;
+ };
+ StopCountAndDesc watchedStops_[kNumOfWatchedStops];
+
+ private:
+ // ICache checking.
+ struct ICacheHasher {
+ typedef void* Key;
+ typedef void* Lookup;
+ static HashNumber hash(const Lookup& l);
+ static bool match(const Key& k, const Lookup& l);
+ };
+
+ public:
+ typedef HashMap<void*, CachePage*, ICacheHasher, SystemAllocPolicy> ICacheMap;
+
+ private:
+ // This lock creates a critical section around 'redirection_' and
+ // 'icache_', which are referenced both by the execution engine
+ // and by the off-thread compiler (see Redirection::Get in the cpp file).
+ Mutex cacheLock_;
+#ifdef DEBUG
+ mozilla::Maybe<Thread::Id> cacheLockHolder_;
+#endif
+
+ Redirection* redirection_;
+ ICacheMap icache_;
+
+ public:
+ ICacheMap& icache() {
+ // Technically we need the lock to access the innards of the
+ // icache, not to take its address, but the latter condition
+ // serves as a useful complement to the former.
+ MOZ_ASSERT(cacheLockHolder_.isSome());
+ return icache_;
+ }
+
+ Redirection* redirection() const {
+ MOZ_ASSERT(cacheLockHolder_.isSome());
+ return redirection_;
+ }
+
+ void setRedirection(js::jit::Redirection* redirection) {
+ MOZ_ASSERT(cacheLockHolder_.isSome());
+ redirection_ = redirection;
+ }
+};
+
+#define JS_CHECK_SIMULATOR_RECURSION_WITH_EXTRA(cx, extra, onerror) \
+ JS_BEGIN_MACRO \
+ if (cx->mainThread().simulator()->overRecursedWithExtra(extra)) { \
+ js::ReportOverRecursed(cx); \
+ onerror; \
+ } \
+ JS_END_MACRO
+
+} // namespace jit
+} // namespace js
+
+#endif /* JS_SIMULATOR_MIPS32 */
+
+#endif /* jit_mips32_Simulator_mips32_h */
diff --git a/js/src/jit/mips32/Trampoline-mips32.cpp b/js/src/jit/mips32/Trampoline-mips32.cpp
new file mode 100644
index 000000000..d422ed757
--- /dev/null
+++ b/js/src/jit/mips32/Trampoline-mips32.cpp
@@ -0,0 +1,1418 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/DebugOnly.h"
+
+#include "jscompartment.h"
+
+#include "jit/Bailouts.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/JitSpewer.h"
+#include "jit/Linker.h"
+#include "jit/mips-shared/SharedICHelpers-mips-shared.h"
+#include "jit/mips32/Bailouts-mips32.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/VMFunctions.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+static_assert(sizeof(uintptr_t) == sizeof(uint32_t), "Not 64-bit clean.");
+
+struct EnterJITRegs
+{
+ double f30;
+ double f28;
+ double f26;
+ double f24;
+ double f22;
+ double f20;
+
+ // empty slot for alignment
+ uintptr_t align;
+
+ // non-volatile registers.
+ uintptr_t ra;
+ uintptr_t s7;
+ uintptr_t s6;
+ uintptr_t s5;
+ uintptr_t s4;
+ uintptr_t s3;
+ uintptr_t s2;
+ uintptr_t s1;
+ uintptr_t s0;
+};
+
+struct EnterJITArgs
+{
+ // First 4 argumet placeholders
+ void* jitcode; // <- sp points here when function is entered.
+ int maxArgc;
+ Value* maxArgv;
+ InterpreterFrame* fp;
+
+ // Arguments on stack
+ CalleeToken calleeToken;
+ JSObject* scopeChain;
+ size_t numStackValues;
+ Value* vp;
+};
+
+static void
+GenerateReturn(MacroAssembler& masm, int returnCode)
+{
+ MOZ_ASSERT(masm.framePushed() == sizeof(EnterJITRegs));
+
+ // Restore non-volatile registers
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s0)), s0);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s1)), s1);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s2)), s2);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s3)), s3);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s4)), s4);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s5)), s5);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s6)), s6);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s7)), s7);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, ra)), ra);
+
+ // Restore non-volatile floating point registers
+ masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f20)), f20);
+ masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f22)), f22);
+ masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f24)), f24);
+ masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f26)), f26);
+ masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f28)), f28);
+ masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f30)), f30);
+
+ masm.freeStack(sizeof(EnterJITRegs));
+
+ masm.branch(ra);
+}
+
+static void
+GeneratePrologue(MacroAssembler& masm)
+{
+ // Save non-volatile registers. These must be saved by the trampoline,
+ // rather than the JIT'd code, because they are scanned by the conservative
+ // scanner.
+ masm.reserveStack(sizeof(EnterJITRegs));
+ masm.storePtr(s0, Address(StackPointer, offsetof(EnterJITRegs, s0)));
+ masm.storePtr(s1, Address(StackPointer, offsetof(EnterJITRegs, s1)));
+ masm.storePtr(s2, Address(StackPointer, offsetof(EnterJITRegs, s2)));
+ masm.storePtr(s3, Address(StackPointer, offsetof(EnterJITRegs, s3)));
+ masm.storePtr(s4, Address(StackPointer, offsetof(EnterJITRegs, s4)));
+ masm.storePtr(s5, Address(StackPointer, offsetof(EnterJITRegs, s5)));
+ masm.storePtr(s6, Address(StackPointer, offsetof(EnterJITRegs, s6)));
+ masm.storePtr(s7, Address(StackPointer, offsetof(EnterJITRegs, s7)));
+ masm.storePtr(ra, Address(StackPointer, offsetof(EnterJITRegs, ra)));
+
+ masm.as_sd(f20, StackPointer, offsetof(EnterJITRegs, f20));
+ masm.as_sd(f22, StackPointer, offsetof(EnterJITRegs, f22));
+ masm.as_sd(f24, StackPointer, offsetof(EnterJITRegs, f24));
+ masm.as_sd(f26, StackPointer, offsetof(EnterJITRegs, f26));
+ masm.as_sd(f28, StackPointer, offsetof(EnterJITRegs, f28));
+ masm.as_sd(f30, StackPointer, offsetof(EnterJITRegs, f30));
+}
+
+
+/*
+ * This method generates a trampoline for a c++ function with the following
+ * signature:
+ * void enter(void* code, int argc, Value* argv, InterpreterFrame* fp,
+ * CalleeToken calleeToken, JSObject* scopeChain, Value* vp)
+ * ...using standard EABI calling convention
+ */
+JitCode*
+JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
+{
+ const Register reg_code = a0;
+ const Register reg_argc = a1;
+ const Register reg_argv = a2;
+ const mozilla::DebugOnly<Register> reg_frame = a3;
+
+ MOZ_ASSERT(OsrFrameReg == reg_frame);
+
+ MacroAssembler masm(cx);
+ GeneratePrologue(masm);
+
+ const Address slotToken(sp, sizeof(EnterJITRegs) + offsetof(EnterJITArgs, calleeToken));
+ const Address slotVp(sp, sizeof(EnterJITRegs) + offsetof(EnterJITArgs, vp));
+
+ // Save stack pointer into s4
+ masm.movePtr(StackPointer, s4);
+
+ // Load calleeToken into s2.
+ masm.loadPtr(slotToken, s2);
+
+ // Save stack pointer as baseline frame.
+ if (type == EnterJitBaseline)
+ masm.movePtr(StackPointer, BaselineFrameReg);
+
+ // Load the number of actual arguments into s3.
+ masm.loadPtr(slotVp, s3);
+ masm.unboxInt32(Address(s3, 0), s3);
+
+ /***************************************************************
+ Loop over argv vector, push arguments onto stack in reverse order
+ ***************************************************************/
+
+ // if we are constructing, that also needs to include newTarget
+ {
+ Label noNewTarget;
+ masm.branchTest32(Assembler::Zero, s2, Imm32(CalleeToken_FunctionConstructing),
+ &noNewTarget);
+
+ masm.add32(Imm32(1), reg_argc);
+
+ masm.bind(&noNewTarget);
+ }
+
+ masm.as_sll(s0, reg_argc, 3); // s0 = argc * 8
+ masm.addPtr(reg_argv, s0); // s0 = argv + argc * 8
+
+ // Loop over arguments, copying them from an unknown buffer onto the Ion
+ // stack so they can be accessed from JIT'ed code.
+ Label header, footer;
+ // If there aren't any arguments, don't do anything
+ masm.ma_b(s0, reg_argv, &footer, Assembler::BelowOrEqual, ShortJump);
+ {
+ masm.bind(&header);
+
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), s0);
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+
+ ValueOperand value = ValueOperand(s6, s7);
+ masm.loadValue(Address(s0, 0), value);
+ masm.storeValue(value, Address(StackPointer, 0));
+
+ masm.ma_b(s0, reg_argv, &header, Assembler::Above, ShortJump);
+ }
+ masm.bind(&footer);
+
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(s3, Address(StackPointer, sizeof(uintptr_t))); // actual arguments
+ masm.storePtr(s2, Address(StackPointer, 0)); // callee token
+
+ masm.subPtr(StackPointer, s4);
+ masm.makeFrameDescriptor(s4, JitFrame_Entry, JitFrameLayout::Size());
+ masm.push(s4); // descriptor
+
+ CodeLabel returnLabel;
+ CodeLabel oomReturnLabel;
+ if (type == EnterJitBaseline) {
+ // Handle OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(OsrFrameReg);
+ regs.take(BaselineFrameReg);
+ regs.take(reg_code);
+ regs.take(ReturnReg);
+
+ const Address slotNumStackValues(BaselineFrameReg, sizeof(EnterJITRegs) +
+ offsetof(EnterJITArgs, numStackValues));
+ const Address slotScopeChain(BaselineFrameReg, sizeof(EnterJITRegs) +
+ offsetof(EnterJITArgs, scopeChain));
+
+ Label notOsr;
+ masm.ma_b(OsrFrameReg, OsrFrameReg, &notOsr, Assembler::Zero, ShortJump);
+
+ Register scratch = regs.takeAny();
+
+ Register numStackValues = regs.takeAny();
+ masm.load32(slotNumStackValues, numStackValues);
+
+ // Push return address.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.ma_li(scratch, returnLabel.patchAt());
+ masm.storePtr(scratch, Address(StackPointer, 0));
+
+ // Push previous frame pointer.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(BaselineFrameReg, Address(StackPointer, 0));
+
+ // Reserve frame.
+ Register framePtr = BaselineFrameReg;
+ masm.subPtr(Imm32(BaselineFrame::Size()), StackPointer);
+ masm.movePtr(StackPointer, framePtr);
+
+ // Reserve space for locals and stack values.
+ masm.ma_sll(scratch, numStackValues, Imm32(3));
+ masm.subPtr(scratch, StackPointer);
+
+ // Enter exit frame.
+ masm.addPtr(Imm32(BaselineFrame::Size() + BaselineFrame::FramePointerOffset), scratch);
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, ExitFrameLayout::Size());
+
+ // Push frame descriptor and fake return address.
+ masm.reserveStack(2 * sizeof(uintptr_t));
+ masm.storePtr(scratch, Address(StackPointer, sizeof(uintptr_t))); // Frame descriptor
+ masm.storePtr(zero, Address(StackPointer, 0)); // fake return address
+
+ // No GC things to mark, push a bare token.
+ masm.enterFakeExitFrame(ExitFrameLayoutBareToken);
+
+ masm.reserveStack(2 * sizeof(uintptr_t));
+ masm.storePtr(framePtr, Address(StackPointer, sizeof(uintptr_t))); // BaselineFrame
+ masm.storePtr(reg_code, Address(StackPointer, 0)); // jitcode
+
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(BaselineFrameReg); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, jit::InitBaselineFrameForOsr));
+
+ regs.add(OsrFrameReg);
+ regs.take(JSReturnOperand);
+ Register jitcode = regs.takeAny();
+ masm.loadPtr(Address(StackPointer, 0), jitcode);
+ masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), framePtr);
+ masm.freeStack(2 * sizeof(uintptr_t));
+
+ Label error;
+ masm.freeStack(ExitFrameLayout::SizeWithFooter());
+ masm.addPtr(Imm32(BaselineFrame::Size()), framePtr);
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ Register realFramePtr = numStackValues;
+ AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.ma_addu(realFramePtr, framePtr, Imm32(sizeof(void*)));
+ masm.profilerEnterFrame(realFramePtr, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(jitcode);
+
+ // OOM: load error value, discard return address and previous frame
+ // pointer and return.
+ masm.bind(&error);
+ masm.movePtr(framePtr, StackPointer);
+ masm.addPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.ma_li(scratch, oomReturnLabel.patchAt());
+ masm.jump(scratch);
+
+ masm.bind(&notOsr);
+ // Load the scope chain in R1.
+ MOZ_ASSERT(R1.scratchReg() != reg_code);
+ masm.loadPtr(slotScopeChain, R1.scratchReg());
+ }
+
+ // The call will push the return address on the stack, thus we check that
+ // the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, sizeof(uintptr_t));
+
+ // Call the function with pushing return address to stack.
+ masm.callJitNoProfiler(reg_code);
+
+ if (type == EnterJitBaseline) {
+ // Baseline OSR will return here.
+ masm.bind(returnLabel.target());
+ masm.addCodeLabel(returnLabel);
+ masm.bind(oomReturnLabel.target());
+ masm.addCodeLabel(oomReturnLabel);
+ }
+
+ // Pop arguments off the stack.
+ // s0 <- 8*argc (size of all arguments we pushed on the stack)
+ masm.pop(s0);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), s0);
+ masm.addPtr(s0, StackPointer);
+
+ // Store the returned value into the slotVp
+ masm.loadPtr(slotVp, s1);
+ masm.storeValue(JSReturnOperand, Address(s1, 0));
+
+ // Restore non-volatile registers and return.
+ GenerateReturn(masm, ShortJump);
+
+ Linker linker(masm);
+ AutoFlushICache afc("GenerateEnterJIT");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "EnterJIT");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateInvalidator(JSContext* cx)
+{
+ MacroAssembler masm(cx);
+
+ // NOTE: Members ionScript_ and osiPointReturnAddress_ of
+ // InvalidationBailoutStack are already on the stack.
+ static const uint32_t STACK_DATA_SIZE = sizeof(InvalidationBailoutStack) -
+ 2 * sizeof(uintptr_t);
+
+ // Stack has to be alligned here. If not, we will have to fix it.
+ masm.checkStackAlignment();
+
+ // Make room for data on stack.
+ masm.subPtr(Imm32(STACK_DATA_SIZE), StackPointer);
+
+ // Save general purpose registers
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ Address address = Address(StackPointer, InvalidationBailoutStack::offsetOfRegs() +
+ i * sizeof(uintptr_t));
+ masm.storePtr(Register::FromCode(i), address);
+ }
+
+ // Save floating point registers
+ // We can use as_sd because stack is alligned.
+ for (uint32_t i = 0; i < FloatRegisters::TotalDouble; i ++)
+ masm.as_sd(FloatRegister::FromIndex(i, FloatRegister::Double), StackPointer,
+ InvalidationBailoutStack::offsetOfFpRegs() + i * sizeof(double));
+
+ // Pass pointer to InvalidationBailoutStack structure.
+ masm.movePtr(StackPointer, a0);
+
+ // Reserve place for return value and BailoutInfo pointer
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ // Pass pointer to return value.
+ masm.ma_addu(a1, StackPointer, Imm32(sizeof(uintptr_t)));
+ // Pass pointer to BailoutInfo
+ masm.movePtr(StackPointer, a2);
+
+ masm.setupAlignedABICall();
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.passABIArg(a2);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, InvalidationBailout));
+
+ masm.loadPtr(Address(StackPointer, 0), a2);
+ masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), a1);
+ // Remove the return address, the IonScript, the register state
+ // (InvaliationBailoutStack) and the space that was allocated for the
+ // return value.
+ masm.addPtr(Imm32(sizeof(InvalidationBailoutStack) + 2 * sizeof(uintptr_t)), StackPointer);
+ // remove the space that this frame was using before the bailout
+ // (computed by InvalidationBailout)
+ masm.addPtr(a1, StackPointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
+ JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.branch(bailoutTail);
+
+ Linker linker(masm);
+ AutoFlushICache afc("Invalidator");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+ JitSpew(JitSpew_IonInvalidate, " invalidation thunk created at %p", (void*) code->raw());
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "Invalidator");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut)
+{
+ MacroAssembler masm(cx);
+ masm.pushReturnAddress();
+
+ // ArgumentsRectifierReg contains the |nargs| pushed onto the current
+ // frame. Including |this|, there are (|nargs| + 1) arguments to copy.
+ MOZ_ASSERT(ArgumentsRectifierReg == s3);
+
+ Register numActArgsReg = t6;
+ Register calleeTokenReg = t7;
+ Register numArgsReg = t5;
+
+ // Copy number of actual arguments into numActArgsReg
+ masm.loadPtr(Address(StackPointer, RectifierFrameLayout::offsetOfNumActualArgs()),
+ numActArgsReg);
+
+ // Load the number of |undefined|s to push into t1.
+ masm.loadPtr(Address(StackPointer, RectifierFrameLayout::offsetOfCalleeToken()),
+ calleeTokenReg);
+ masm.mov(calleeTokenReg, numArgsReg);
+ masm.andPtr(Imm32(CalleeTokenMask), numArgsReg);
+ masm.load16ZeroExtend(Address(numArgsReg, JSFunction::offsetOfNargs()), numArgsReg);
+
+ masm.as_subu(t1, numArgsReg, s3);
+
+ // Get the topmost argument.
+ masm.ma_sll(t0, s3, Imm32(3)); // t0 <- nargs * 8
+ masm.as_addu(t2, sp, t0); // t2 <- sp + nargs * 8
+ masm.addPtr(Imm32(sizeof(RectifierFrameLayout)), t2);
+
+ {
+ Label notConstructing;
+
+ masm.branchTest32(Assembler::Zero, calleeTokenReg, Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ // Add sizeof(Value) to overcome |this|
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.load32(Address(t2, NUNBOX32_TYPE_OFFSET + sizeof(Value)), t0);
+ masm.store32(t0, Address(StackPointer, NUNBOX32_TYPE_OFFSET));
+ masm.load32(Address(t2, NUNBOX32_PAYLOAD_OFFSET + sizeof(Value)), t0);
+ masm.store32(t0, Address(StackPointer, NUNBOX32_PAYLOAD_OFFSET));
+
+ // Include the newly pushed newTarget value in the frame size
+ // calculated below.
+ masm.add32(Imm32(1), numArgsReg);
+
+ masm.bind(&notConstructing);
+ }
+
+ // Push undefined.
+ masm.moveValue(UndefinedValue(), ValueOperand(t3, t4));
+ {
+ Label undefLoopTop;
+ masm.bind(&undefLoopTop);
+
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.storeValue(ValueOperand(t3, t4), Address(StackPointer, 0));
+ masm.sub32(Imm32(1), t1);
+
+ masm.ma_b(t1, t1, &undefLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // Push arguments, |nargs| + 1 times (to include |this|).
+ {
+ Label copyLoopTop, initialSkip;
+
+ masm.ma_b(&initialSkip, ShortJump);
+
+ masm.bind(&copyLoopTop);
+ masm.subPtr(Imm32(sizeof(Value)), t2);
+ masm.sub32(Imm32(1), s3);
+
+ masm.bind(&initialSkip);
+
+ MOZ_ASSERT(sizeof(Value) == 2 * sizeof(uint32_t));
+ // Read argument and push to stack.
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.load32(Address(t2, NUNBOX32_TYPE_OFFSET), t0);
+ masm.store32(t0, Address(StackPointer, NUNBOX32_TYPE_OFFSET));
+ masm.load32(Address(t2, NUNBOX32_PAYLOAD_OFFSET), t0);
+ masm.store32(t0, Address(StackPointer, NUNBOX32_PAYLOAD_OFFSET));
+
+ masm.ma_b(s3, s3, &copyLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // translate the framesize from values into bytes
+ masm.ma_addu(t0, numArgsReg, Imm32(1));
+ masm.lshiftPtr(Imm32(3), t0);
+
+ // Construct sizeDescriptor.
+ masm.makeFrameDescriptor(t0, JitFrame_Rectifier, JitFrameLayout::Size());
+
+ // Construct JitFrameLayout.
+ masm.subPtr(Imm32(3 * sizeof(uintptr_t)), StackPointer);
+ // Push actual arguments.
+ masm.storePtr(numActArgsReg, Address(StackPointer, 2 * sizeof(uintptr_t)));
+ // Push callee token.
+ masm.storePtr(calleeTokenReg, Address(StackPointer, sizeof(uintptr_t)));
+ // Push frame descriptor.
+ masm.storePtr(t0, Address(StackPointer, 0));
+
+ // Call the target function.
+ // Note that this code assumes the function is JITted.
+ masm.andPtr(Imm32(CalleeTokenMask), calleeTokenReg);
+ masm.loadPtr(Address(calleeTokenReg, JSFunction::offsetOfNativeOrScript()), t1);
+ masm.loadBaselineOrIonRaw(t1, t1, nullptr);
+ uint32_t returnOffset = masm.callJitNoProfiler(t1);
+
+ // arg1
+ // ...
+ // argN
+ // num actual args
+ // callee token
+ // sizeDescriptor <- sp now
+ // return address
+
+ // Remove the rectifier frame.
+ // t0 <- descriptor with FrameType.
+ masm.loadPtr(Address(StackPointer, 0), t0);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), t0); // t0 <- descriptor.
+
+ // Discard descriptor, calleeToken and number of actual arguments.
+ masm.addPtr(Imm32(3 * sizeof(uintptr_t)), StackPointer);
+
+ // arg1
+ // ...
+ // argN <- sp now; t0 <- frame descriptor
+ // num actual args
+ // callee token
+ // sizeDescriptor
+ // return address
+
+ // Discard pushed arguments.
+ masm.addPtr(t0, StackPointer);
+
+ masm.ret();
+ Linker linker(masm);
+ AutoFlushICache afc("ArgumentsRectifier");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+ if (returnAddrOut)
+ *returnAddrOut = (void*) (code->raw() + returnOffset);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ArgumentsRectifier");
+#endif
+
+ return code;
+}
+
+// NOTE: Members snapshotOffset_ and padding_ of BailoutStack
+// are not stored in PushBailoutFrame().
+static const uint32_t bailoutDataSize = sizeof(BailoutStack) - 2 * sizeof(uintptr_t);
+static const uint32_t bailoutInfoOutParamSize = 2 * sizeof(uintptr_t);
+
+/* There are two different stack layouts when doing bailout. They are
+ * represented via class BailoutStack.
+ *
+ * - First case is when bailout is done trough bailout table. In this case
+ * table offset is stored in $ra (look at JitRuntime::generateBailoutTable())
+ * and thunk code should save it on stack. In this case frameClassId_ cannot
+ * be NO_FRAME_SIZE_CLASS_ID. Members snapshotOffset_ and padding_ are not on
+ * the stack.
+ *
+ * - Other case is when bailout is done via out of line code (lazy bailout).
+ * In this case frame size is stored in $ra (look at
+ * CodeGeneratorMIPS::generateOutOfLineCode()) and thunk code should save it
+ * on stack. Other difference is that members snapshotOffset_ and padding_ are
+ * pushed to the stack by CodeGeneratorMIPS::visitOutOfLineBailout(). Field
+ * frameClassId_ is forced to be NO_FRAME_SIZE_CLASS_ID
+ * (See: JitRuntime::generateBailoutHandler).
+ */
+static void
+PushBailoutFrame(MacroAssembler& masm, uint32_t frameClass, Register spArg)
+{
+ // Make sure that alignment is proper.
+ masm.checkStackAlignment();
+
+ // Make room for data.
+ masm.subPtr(Imm32(bailoutDataSize), StackPointer);
+
+ // Save general purpose registers.
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ uint32_t off = BailoutStack::offsetOfRegs() + i * sizeof(uintptr_t);
+ masm.storePtr(Register::FromCode(i), Address(StackPointer, off));
+ }
+
+ // Save floating point registers
+ // We can use as_sd because stack is alligned.
+ for (uint32_t i = 0; i < FloatRegisters::TotalDouble; i++)
+ masm.as_sd(FloatRegister::FromIndex(i, FloatRegister::Double), StackPointer,
+ BailoutStack::offsetOfFpRegs() + i * sizeof(double));
+
+ // Store the frameSize_ or tableOffset_ stored in ra
+ // See: JitRuntime::generateBailoutTable()
+ // See: CodeGeneratorMIPS::generateOutOfLineCode()
+ masm.storePtr(ra, Address(StackPointer, BailoutStack::offsetOfFrameSize()));
+
+ // Put frame class to stack
+ masm.storePtr(ImmWord(frameClass), Address(StackPointer, BailoutStack::offsetOfFrameClass()));
+
+ // Put pointer to BailoutStack as first argument to the Bailout()
+ masm.movePtr(StackPointer, spArg);
+}
+
+static void
+GenerateBailoutThunk(JSContext* cx, MacroAssembler& masm, uint32_t frameClass)
+{
+ PushBailoutFrame(masm, frameClass, a0);
+
+ // Put pointer to BailoutInfo
+ masm.subPtr(Imm32(bailoutInfoOutParamSize), StackPointer);
+ masm.storePtr(ImmPtr(nullptr), Address(StackPointer, 0));
+ masm.movePtr(StackPointer, a1);
+
+ masm.setupAlignedABICall();
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, Bailout));
+
+ // Get BailoutInfo pointer
+ masm.loadPtr(Address(StackPointer, 0), a2);
+
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ if (frameClass == NO_FRAME_SIZE_CLASS_ID) {
+ // Load frameSize from stack
+ masm.loadPtr(Address(StackPointer,
+ bailoutInfoOutParamSize + BailoutStack::offsetOfFrameSize()), a1);
+
+ // Remove complete BailoutStack class and data after it
+ masm.addPtr(Imm32(sizeof(BailoutStack) + bailoutInfoOutParamSize), StackPointer);
+ // Remove frame size srom stack
+ masm.addPtr(a1, StackPointer);
+ } else {
+ uint32_t frameSize = FrameSizeClass::FromClass(frameClass).frameSize();
+ // Remove the data this fuction added and frame size.
+ masm.addPtr(Imm32(bailoutDataSize + bailoutInfoOutParamSize + frameSize), StackPointer);
+ }
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in a2.
+ JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.branch(bailoutTail);
+}
+
+JitCode*
+JitRuntime::generateBailoutTable(JSContext* cx, uint32_t frameClass)
+{
+ MacroAssembler masm(cx);
+
+ Label bailout;
+ for (size_t i = 0; i < BAILOUT_TABLE_SIZE; i++) {
+ // Calculate offset to the end of table
+ int32_t offset = (BAILOUT_TABLE_SIZE - i) * BAILOUT_TABLE_ENTRY_SIZE;
+
+ // We use the 'ra' as table offset later in GenerateBailoutThunk
+ masm.as_bal(BOffImm16(offset));
+ masm.nop();
+ }
+ masm.bind(&bailout);
+
+ GenerateBailoutThunk(cx, masm, frameClass);
+
+ Linker linker(masm);
+ AutoFlushICache afc("BailoutTable");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutTable");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateBailoutHandler(JSContext* cx)
+{
+ MacroAssembler masm(cx);
+ GenerateBailoutThunk(cx, masm, NO_FRAME_SIZE_CLASS_ID);
+
+ Linker linker(masm);
+ AutoFlushICache afc("BailoutHandler");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutHandler");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateVMWrapper(JSContext* cx, const VMFunction& f)
+{
+ MOZ_ASSERT(functionWrappers_);
+ MOZ_ASSERT(functionWrappers_->initialized());
+ VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
+ if (p)
+ return p->value();
+
+ MacroAssembler masm(cx);
+
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ static_assert((Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0,
+ "Wrapper register set should be a superset of Volatile register set.");
+
+ // The context is the first argument; a0 is the first argument register.
+ Register cxreg = a0;
+ regs.take(cxreg);
+
+ // If it isn't a tail call, then the return address needs to be saved
+ if (f.expectTailCall == NonTailCall)
+ masm.pushReturnAddress();
+
+ // We're aligned to an exit frame, so link it up.
+ masm.enterExitFrame(&f);
+ masm.loadJSContext(cxreg);
+
+ // Save the base of the argument set stored on the stack.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ argsBase = t1; // Use temporary register.
+ regs.take(argsBase);
+ masm.ma_addu(argsBase, StackPointer, Imm32(ExitFrameLayout::SizeWithFooter()));
+ }
+
+ masm.alignStackPointer();
+
+ // Reserve space for the outparameter. Reserve sizeof(Value) for every
+ // case so that stack stays aligned.
+ uint32_t outParamSize = 0;
+ switch (f.outParam) {
+ case Type_Value:
+ outParamSize = sizeof(Value);
+ masm.reserveStack(outParamSize);
+ break;
+
+ case Type_Handle:
+ {
+ uint32_t pushed = masm.framePushed();
+ masm.PushEmptyRooted(f.outParamRootType);
+ outParamSize = masm.framePushed() - pushed;
+ }
+ break;
+
+ case Type_Bool:
+ case Type_Int32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(uint32_t));
+ case Type_Pointer:
+ outParamSize = sizeof(uintptr_t);
+ masm.reserveStack(outParamSize);
+ break;
+
+ case Type_Double:
+ outParamSize = sizeof(double);
+ masm.reserveStack(outParamSize);
+ break;
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ uint32_t outParamOffset = 0;
+ if (f.outParam != Type_Void) {
+ // Make sure that stack is double aligned after outParam.
+ MOZ_ASSERT(outParamSize <= sizeof(double));
+ outParamOffset += sizeof(double) - outParamSize;
+ }
+ // Reserve stack for double sized args that are copied to be aligned.
+ outParamOffset += f.doubleByRefArgs() * sizeof(double);
+
+ Register doubleArgs = t0;
+ masm.reserveStack(outParamOffset);
+ masm.movePtr(StackPointer, doubleArgs);
+
+ if (!generateTLEnterVM(cx, masm, f))
+ return nullptr;
+
+ masm.setupAlignedABICall();
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = 0;
+ size_t doubleArgDisp = 0;
+
+ // Copy any arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ MoveOperand from;
+ switch (f.argProperties(explicitArg)) {
+ case VMFunction::WordByValue:
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ argDisp += sizeof(uint32_t);
+ break;
+ case VMFunction::DoubleByValue:
+ // Values should be passed by reference, not by value, so we
+ // assert that the argument is a double-precision float.
+ MOZ_ASSERT(f.argPassedInFloatReg(explicitArg));
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
+ argDisp += sizeof(double);
+ break;
+ case VMFunction::WordByRef:
+ masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ argDisp += sizeof(uint32_t);
+ break;
+ case VMFunction::DoubleByRef:
+ // Copy double sized argument to aligned place.
+ masm.ma_ld(ScratchDoubleReg, Address(argsBase, argDisp));
+ masm.as_sd(ScratchDoubleReg, doubleArgs, doubleArgDisp);
+ masm.passABIArg(MoveOperand(doubleArgs, doubleArgDisp, MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ doubleArgDisp += sizeof(double);
+ argDisp += sizeof(double);
+ break;
+ }
+ }
+
+ MOZ_ASSERT_IF(f.outParam != Type_Void,
+ doubleArgDisp + sizeof(double) == outParamOffset + outParamSize);
+
+ // Copy the implicit outparam, if any.
+ if (f.outParam != Type_Void) {
+ masm.passABIArg(MoveOperand(doubleArgs, outParamOffset, MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ }
+
+ masm.callWithABI(f.wrapped);
+
+ if (!generateTLExitVM(cx, masm, f))
+ return nullptr;
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Object:
+ masm.branchTestPtr(Assembler::Zero, v0, v0, masm.failureLabel());
+ break;
+ case Type_Bool:
+ // Called functions return bools, which are 0/false and non-zero/true
+ masm.branchIfFalseBool(v0, masm.failureLabel());
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ masm.freeStack(outParamOffset);
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Value:
+ masm.loadValue(Address(StackPointer, 0), JSReturnOperand);
+ masm.freeStack(sizeof(Value));
+ break;
+
+ case Type_Int32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(uint32_t));
+ case Type_Pointer:
+ masm.load32(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ case Type_Bool:
+ masm.load8ZeroExtend(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ case Type_Double:
+ if (cx->runtime()->jitSupportsFloatingPoint) {
+ masm.as_ld(ReturnDoubleReg, StackPointer, 0);
+ } else {
+ masm.assumeUnreachable("Unable to load into float reg, with no FP support.");
+ }
+ masm.freeStack(sizeof(double));
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ masm.restoreStackPointer();
+
+ masm.leaveExitFrame();
+ masm.retn(Imm32(sizeof(ExitFrameLayout) +
+ f.explicitStackSlots() * sizeof(uintptr_t) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ Linker linker(masm);
+ AutoFlushICache afc("VMWrapper");
+ JitCode* wrapper = linker.newCode<NoGC>(cx, OTHER_CODE);
+ if (!wrapper)
+ return nullptr;
+
+ // linker.newCode may trigger a GC and sweep functionWrappers_ so we have
+ // to use relookupOrAdd instead of add.
+ if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
+ return nullptr;
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(wrapper, "VMWrapper");
+#endif
+
+ return wrapper;
+}
+
+JitCode*
+JitRuntime::generatePreBarrier(JSContext* cx, MIRType type)
+{
+ MacroAssembler masm(cx);
+
+ LiveRegisterSet save;
+ if (cx->runtime()->jitSupportsFloatingPoint) {
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+ } else {
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet());
+ }
+ save.add(ra);
+ masm.PushRegsInMask(save);
+
+ MOZ_ASSERT(PreBarrierReg == a1);
+ masm.movePtr(ImmPtr(cx->runtime()), a0);
+
+ masm.setupUnalignedABICall(a2);
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI(IonMarkFunction(type));
+
+ save.take(AnyRegister(ra));
+ masm.PopRegsInMask(save);
+ masm.ret();
+
+ Linker linker(masm);
+ AutoFlushICache afc("PreBarrier");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "PreBarrier");
+#endif
+
+ return code;
+}
+
+typedef bool (*HandleDebugTrapFn)(JSContext*, BaselineFrame*, uint8_t*, bool*);
+static const VMFunction HandleDebugTrapInfo =
+ FunctionInfo<HandleDebugTrapFn>(HandleDebugTrap, "HandleDebugTrap");
+
+JitCode*
+JitRuntime::generateDebugTrapHandler(JSContext* cx)
+{
+ MacroAssembler masm(cx);
+
+ Register scratch1 = t0;
+ Register scratch2 = t1;
+
+ // Load BaselineFrame pointer in scratch1.
+ masm.movePtr(s5, scratch1);
+ masm.subPtr(Imm32(BaselineFrame::Size()), scratch1);
+
+ // Enter a stub frame and call the HandleDebugTrap VM function. Ensure
+ // the stub frame has a nullptr ICStub pointer, since this pointer is
+ // marked during GC.
+ masm.movePtr(ImmPtr(nullptr), ICStubReg);
+ EmitBaselineEnterStubFrame(masm, scratch2);
+
+ JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(HandleDebugTrapInfo);
+ if (!code)
+ return nullptr;
+
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(ra, Address(StackPointer, sizeof(uintptr_t)));
+ masm.storePtr(scratch1, Address(StackPointer, 0));
+
+ EmitBaselineCallVM(code, masm);
+
+ EmitBaselineLeaveStubFrame(masm);
+
+ // If the stub returns |true|, we have to perform a forced return
+ // (return from the JS frame). If the stub returns |false|, just return
+ // from the trap stub so that execution continues at the current pc.
+ Label forcedReturn;
+ masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg, &forcedReturn);
+
+ // ra was restored by EmitLeaveStubFrame
+ masm.branch(ra);
+
+ masm.bind(&forcedReturn);
+ masm.loadValue(Address(s5, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ masm.movePtr(s5, StackPointer);
+ masm.pop(s5);
+
+ // Before returning, if profiling is turned on, make sure that lastProfilingFrame
+ // is set to the correct caller frame.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skipProfilingInstrumentation);
+ masm.profilerExitFrame();
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.ret();
+
+ Linker linker(masm);
+ AutoFlushICache afc("DebugTrapHandler");
+ JitCode* codeDbg = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(codeDbg, "DebugTrapHandler");
+#endif
+
+ return codeDbg;
+}
+
+
+JitCode*
+JitRuntime::generateExceptionTailStub(JSContext* cx, void* handler)
+{
+ MacroAssembler masm;
+
+ masm.handleFailureWithHandlerTail(handler);
+
+ Linker linker(masm);
+ AutoFlushICache afc("ExceptionTailStub");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ExceptionTailStub");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateBailoutTailStub(JSContext* cx)
+{
+ MacroAssembler masm;
+
+ masm.generateBailoutTail(a1, a2);
+
+ Linker linker(masm);
+ AutoFlushICache afc("BailoutTailStub");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutTailStub");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
+{
+ MacroAssembler masm;
+
+ Register scratch1 = t0;
+ Register scratch2 = t1;
+ Register scratch3 = t2;
+ Register scratch4 = t3;
+
+ //
+ // The code generated below expects that the current stack pointer points
+ // to an Ion or Baseline frame, at the state it would be immediately
+ // before a ret(). Thus, after this stub's business is done, it executes
+ // a ret() and returns directly to the caller script, on behalf of the
+ // callee script that jumped to this code.
+ //
+ // Thus the expected stack is:
+ //
+ // StackPointer ----+
+ // v
+ // ..., ActualArgc, CalleeToken, Descriptor, ReturnAddr
+ // MEM-HI MEM-LOW
+ //
+ //
+ // The generated jitcode is responsible for overwriting the
+ // jitActivation->lastProfilingFrame field with a pointer to the previous
+ // Ion or Baseline jit-frame that was pushed before this one. It is also
+ // responsible for overwriting jitActivation->lastProfilingCallSite with
+ // the return address into that frame. The frame could either be an
+ // immediate "caller" frame, or it could be a frame in a previous
+ // JitActivation (if the current frame was entered from C++, and the C++
+ // was entered by some caller jit-frame further down the stack).
+ //
+ // So this jitcode is responsible for "walking up" the jit stack, finding
+ // the previous Ion or Baseline JS frame, and storing its address and the
+ // return address into the appropriate fields on the current jitActivation.
+ //
+ // There are a fixed number of different path types that can lead to the
+ // current frame, which is either a baseline or ion frame:
+ //
+ // <Baseline-Or-Ion>
+ // ^
+ // |
+ // ^--- Ion
+ // |
+ // ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Argument Rectifier
+ // | ^
+ // | |
+ // | ^--- Ion
+ // | |
+ // | ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Entry Frame (From C++)
+ //
+ Register actReg = scratch4;
+ AbsoluteAddress activationAddr(GetJitContext()->runtime->addressOfProfilingActivation());
+ masm.loadPtr(activationAddr, actReg);
+
+ Address lastProfilingFrame(actReg, JitActivation::offsetOfLastProfilingFrame());
+ Address lastProfilingCallSite(actReg, JitActivation::offsetOfLastProfilingCallSite());
+
+#ifdef DEBUG
+ // Ensure that frame we are exiting is current lastProfilingFrame
+ {
+ masm.loadPtr(lastProfilingFrame, scratch1);
+ Label checkOk;
+ masm.branchPtr(Assembler::Equal, scratch1, ImmWord(0), &checkOk);
+ masm.branchPtr(Assembler::Equal, StackPointer, scratch1, &checkOk);
+ masm.assumeUnreachable(
+ "Mismatch between stored lastProfilingFrame and current stack pointer.");
+ masm.bind(&checkOk);
+ }
+#endif
+
+ // Load the frame descriptor into |scratch1|, figure out what to do depending on its type.
+ masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfDescriptor()), scratch1);
+
+ // Going into the conditionals, we will have:
+ // FrameDescriptor.size in scratch1
+ // FrameDescriptor.type in scratch2
+ masm.ma_and(scratch2, scratch1, Imm32((1 << FRAMETYPE_BITS) - 1));
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch1);
+
+ // Handling of each case is dependent on FrameDescriptor.type
+ Label handle_IonJS;
+ Label handle_BaselineStub;
+ Label handle_Rectifier;
+ Label handle_IonAccessorIC;
+ Label handle_Entry;
+ Label end;
+
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonJS), &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineJS), &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineStub), &handle_BaselineStub);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Rectifier), &handle_Rectifier);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonAccessorIC), &handle_IonAccessorIC);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Entry), &handle_Entry);
+
+ masm.assumeUnreachable("Invalid caller frame type when exiting from Ion frame.");
+
+ //
+ // JitFrame_IonJS
+ //
+ // Stack layout:
+ // ...
+ // Ion-Descriptor
+ // Prev-FP ---> Ion-ReturnAddr
+ // ... previous frame data ... |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_IonJS);
+ {
+ // |scratch1| contains Descriptor.size
+
+ // returning directly to an IonJS frame. Store return addr to frame
+ // in lastProfilingCallSite.
+ masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfReturnAddress()), scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ // Store return frame in lastProfilingFrame.
+ // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size();
+ masm.as_addu(scratch2, StackPointer, scratch1);
+ masm.ma_addu(scratch2, scratch2, Imm32(JitFrameLayout::Size()));
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // JitFrame_BaselineStub
+ //
+ // Look past the stub and store the frame pointer to
+ // the baselineJS frame prior to it.
+ //
+ // Stack layout:
+ // ...
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-PrevFramePointer
+ // | ... BL-FrameData ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ // We take advantage of the fact that the stub frame saves the frame
+ // pointer pointing to the baseline frame, so a bunch of calculation can
+ // be avoided.
+ //
+ masm.bind(&handle_BaselineStub);
+ {
+ masm.as_addu(scratch3, StackPointer, scratch1);
+ Address stubFrameReturnAddr(scratch3,
+ JitFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ Address stubFrameSavedFramePtr(scratch3,
+ JitFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch2);
+ masm.addPtr(Imm32(sizeof(void*)), scratch2); // Skip past BL-PrevFramePtr
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+
+ //
+ // JitFrame_Rectifier
+ //
+ // The rectifier frame can be preceded by either an IonJS or a
+ // BaselineStub frame.
+ //
+ // Stack layout if caller of rectifier was Ion:
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- Rect-Descriptor.Size
+ // < COMMON LAYOUT >
+ //
+ // Stack layout if caller of rectifier was Baseline:
+ //
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-SavedFramePointer
+ // | ... baseline frame data ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Rect-Descriptor.Size
+ // ... args to rectifier ... |
+ // < COMMON LAYOUT >
+ //
+ // Common stack layout:
+ //
+ // ActualArgc |
+ // CalleeToken |- IonRectitiferFrameLayout::Size()
+ // Rect-Descriptor |
+ // Rect-ReturnAddr |
+ // ... rectifier data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_Rectifier);
+ {
+ // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size();
+ masm.as_addu(scratch2, StackPointer, scratch1);
+ masm.add32(Imm32(JitFrameLayout::Size()), scratch2);
+ masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfDescriptor()), scratch3);
+ masm.ma_srl(scratch1, scratch3, Imm32(FRAMESIZE_SHIFT));
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch3);
+
+ // Now |scratch1| contains Rect-Descriptor.Size
+ // and |scratch2| points to Rectifier frame
+ // and |scratch3| contains Rect-Descriptor.Type
+
+ // Check for either Ion or BaselineStub frame.
+ Label handle_Rectifier_BaselineStub;
+ masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS),
+ &handle_Rectifier_BaselineStub);
+
+ // Handle Rectifier <- IonJS
+ // scratch3 := RectFrame[ReturnAddr]
+ masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfReturnAddress()), scratch3);
+ masm.storePtr(scratch3, lastProfilingCallSite);
+
+ // scratch3 := RectFrame + Rect-Descriptor.Size + RectifierFrameLayout::Size()
+ masm.as_addu(scratch3, scratch2, scratch1);
+ masm.add32(Imm32(RectifierFrameLayout::Size()), scratch3);
+ masm.storePtr(scratch3, lastProfilingFrame);
+ masm.ret();
+
+ // Handle Rectifier <- BaselineStub <- BaselineJS
+ masm.bind(&handle_Rectifier_BaselineStub);
+#ifdef DEBUG
+ {
+ Label checkOk;
+ masm.branch32(Assembler::Equal, scratch3, Imm32(JitFrame_BaselineStub), &checkOk);
+ masm.assumeUnreachable("Unrecognized frame preceding baselineStub.");
+ masm.bind(&checkOk);
+ }
+#endif
+ masm.as_addu(scratch3, scratch2, scratch1);
+ Address stubFrameReturnAddr(scratch3, RectifierFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ Address stubFrameSavedFramePtr(scratch3,
+ RectifierFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch2);
+ masm.addPtr(Imm32(sizeof(void*)), scratch2);
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+ // JitFrame_IonAccessorIC
+ //
+ // The caller is always an IonJS frame.
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- AccFrame-Descriptor.Size
+ // StubCode |
+ // AccFrame-Descriptor |- IonAccessorICFrameLayout::Size()
+ // AccFrame-ReturnAddr |
+ // ... accessor frame data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ masm.bind(&handle_IonAccessorIC);
+ {
+ // scratch2 := StackPointer + Descriptor.size + JitFrameLayout::Size()
+ masm.as_addu(scratch2, StackPointer, scratch1);
+ masm.addPtr(Imm32(JitFrameLayout::Size()), scratch2);
+
+ // scratch3 := AccFrame-Descriptor.Size
+ masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfDescriptor()), scratch3);
+#ifdef DEBUG
+ // Assert previous frame is an IonJS frame.
+ masm.movePtr(scratch3, scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1);
+ {
+ Label checkOk;
+ masm.branch32(Assembler::Equal, scratch1, Imm32(JitFrame_IonJS), &checkOk);
+ masm.assumeUnreachable("IonAccessorIC frame must be preceded by IonJS frame");
+ masm.bind(&checkOk);
+ }
+#endif
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch3);
+
+ // lastProfilingCallSite := AccFrame-ReturnAddr
+ masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfReturnAddress()), scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+
+ // lastProfilingFrame := AccessorFrame + AccFrame-Descriptor.Size +
+ // IonAccessorICFrameLayout::Size()
+ masm.as_addu(scratch1, scratch2, scratch3);
+ masm.addPtr(Imm32(IonAccessorICFrameLayout::Size()), scratch1);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // JitFrame_Entry
+ //
+ // If at an entry frame, store null into both fields.
+ //
+ masm.bind(&handle_Entry);
+ {
+ masm.movePtr(ImmPtr(nullptr), scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+
+ Linker linker(masm);
+ AutoFlushICache afc("ProfilerExitFrameTailStub");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ProfilerExitFrameStub");
+#endif
+
+ return code;
+}
diff --git a/js/src/jit/mips64/Architecture-mips64.cpp b/js/src/jit/mips64/Architecture-mips64.cpp
new file mode 100644
index 000000000..d7b0a55a5
--- /dev/null
+++ b/js/src/jit/mips64/Architecture-mips64.cpp
@@ -0,0 +1,93 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/Architecture-mips64.h"
+
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+const char * const Registers::RegNames[] = { "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+ "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra" };
+
+const uint32_t Allocatable = 22;
+
+const Registers::SetType Registers::ArgRegMask =
+ Registers::SharedArgRegMask |
+ (1 << a4) | (1 << a5) | (1 << a6) | (1 << a7);
+
+const Registers::SetType Registers::JSCallMask =
+ (1 << Registers::v1);
+
+const Registers::SetType Registers::CallMask =
+ (1 << Registers::v0);
+
+FloatRegisters::Encoding
+FloatRegisters::FromName(const char* name)
+{
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(Encoding(i)), name) == 0)
+ return Encoding(i);
+ }
+
+ return Invalid;
+}
+
+FloatRegister
+FloatRegister::singleOverlay() const
+{
+ MOZ_ASSERT(!isInvalid());
+ if (kind_ == Codes::Double)
+ return FloatRegister(reg_, Codes::Single);
+ return *this;
+}
+
+FloatRegister
+FloatRegister::doubleOverlay() const
+{
+ MOZ_ASSERT(!isInvalid());
+ if (kind_ != Codes::Double)
+ return FloatRegister(reg_, Codes::Double);
+ return *this;
+}
+
+FloatRegisterSet
+FloatRegister::ReduceSetForPush(const FloatRegisterSet& s)
+{
+ LiveFloatRegisterSet mod;
+ for (FloatRegisterIterator iter(s); iter.more(); ++iter) {
+ if ((*iter).isSingle()) {
+ // Even for single size registers save complete double register.
+ mod.addUnchecked((*iter).doubleOverlay());
+ } else {
+ mod.addUnchecked(*iter);
+ }
+ }
+ return mod.set();
+}
+
+uint32_t
+FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s)
+{
+ FloatRegisterSet ss = s.reduceSetForPush();
+ uint64_t bits = ss.bits();
+ // We are only pushing double registers.
+ MOZ_ASSERT((bits & 0xffffffff) == 0);
+ uint32_t ret = mozilla::CountPopulation32(bits >> 32) * sizeof(double);
+ return ret;
+}
+uint32_t
+FloatRegister::getRegisterDumpOffsetInBytes()
+{
+ return id() * sizeof(double);
+}
+
+} // namespace ion
+} // namespace js
+
diff --git a/js/src/jit/mips64/Architecture-mips64.h b/js/src/jit/mips64/Architecture-mips64.h
new file mode 100644
index 000000000..dde783442
--- /dev/null
+++ b/js/src/jit/mips64/Architecture-mips64.h
@@ -0,0 +1,209 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_Architecture_mips64_h
+#define jit_mips64_Architecture_mips64_h
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "jit/mips-shared/Architecture-mips-shared.h"
+
+#include "js/Utility.h"
+
+namespace js {
+namespace jit {
+
+// Shadow stack space is not required on MIPS64.
+static const uint32_t ShadowStackSpace = 0;
+
+// MIPS64 have 64 bit floating-point coprocessor. There are 32 double
+// precision register which can also be used as single precision registers.
+class FloatRegisters : public FloatRegistersMIPSShared
+{
+ public:
+ enum ContentType {
+ Single,
+ Double,
+ NumTypes
+ };
+
+ static const char* GetName(uint32_t i) {
+ MOZ_ASSERT(i < TotalPhys);
+ return FloatRegistersMIPSShared::GetName(Encoding(i));
+ }
+
+ static Encoding FromName(const char* name);
+
+ static const uint32_t Total = 32 * NumTypes;
+ static const uint32_t Allocatable = 60;
+ // When saving all registers we only need to do is save double registers.
+ static const uint32_t TotalPhys = 32;
+
+ static_assert(sizeof(SetType) * 8 >= Total,
+ "SetType should be large enough to enumerate all registers.");
+
+ // Magic values which are used to duplicate a mask of physical register for
+ // a specific type of register. A multiplication is used to copy and shift
+ // the bits of the physical register mask.
+ static const SetType SpreadSingle = SetType(1) << (uint32_t(Single) * TotalPhys);
+ static const SetType SpreadDouble = SetType(1) << (uint32_t(Double) * TotalPhys);
+ static const SetType SpreadScalar = SpreadSingle | SpreadDouble;
+ static const SetType SpreadVector = 0;
+ static const SetType Spread = SpreadScalar | SpreadVector;
+
+ static const SetType AllPhysMask = ((SetType(1) << TotalPhys) - 1);
+ static const SetType AllMask = AllPhysMask * Spread;
+ static const SetType AllSingleMask = AllPhysMask * SpreadSingle;
+ static const SetType AllDoubleMask = AllPhysMask * SpreadDouble;
+
+ static const SetType NonVolatileMask =
+ ( (1U << FloatRegisters::f24) |
+ (1U << FloatRegisters::f25) |
+ (1U << FloatRegisters::f26) |
+ (1U << FloatRegisters::f27) |
+ (1U << FloatRegisters::f28) |
+ (1U << FloatRegisters::f29) |
+ (1U << FloatRegisters::f30) |
+ (1U << FloatRegisters::f31)
+ ) * SpreadScalar
+ | AllPhysMask * SpreadVector;
+
+ static const SetType VolatileMask = AllMask & ~NonVolatileMask;
+
+ static const SetType WrapperMask = VolatileMask;
+
+ static const SetType NonAllocatableMask =
+ ( // f21 and f23 are MIPS scratch float registers.
+ (1U << FloatRegisters::f21) |
+ (1U << FloatRegisters::f23)
+ ) * Spread;
+
+ // Registers that can be allocated without being saved, generally.
+ static const SetType TempMask = VolatileMask & ~NonAllocatableMask;
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+template <typename T>
+class TypedRegisterSet;
+
+class FloatRegister : public FloatRegisterMIPSShared
+{
+ public:
+ typedef FloatRegisters Codes;
+ typedef size_t Code;
+ typedef Codes::Encoding Encoding;
+ typedef Codes::ContentType ContentType;
+
+ Encoding reg_: 6;
+ private:
+ ContentType kind_ : 3;
+
+ public:
+ constexpr FloatRegister(uint32_t r, ContentType kind = Codes::Double)
+ : reg_(Encoding(r)), kind_(kind)
+ { }
+ constexpr FloatRegister()
+ : reg_(Encoding(FloatRegisters::invalid_freg)), kind_(Codes::Double)
+ { }
+
+ bool operator==(const FloatRegister& other) const {
+ MOZ_ASSERT(!isInvalid());
+ MOZ_ASSERT(!other.isInvalid());
+ return kind_ == other.kind_ && reg_ == other.reg_;
+ }
+ bool equiv(const FloatRegister& other) const { return other.kind_ == kind_; }
+ size_t size() const { return (kind_ == Codes::Double) ? sizeof(double) : sizeof (float); }
+ bool isInvalid() const {
+ return reg_ == FloatRegisters::invalid_freg;
+ }
+
+ bool isSingle() const { return kind_ == Codes::Single; }
+ bool isDouble() const { return kind_ == Codes::Double; }
+
+ FloatRegister singleOverlay() const;
+ FloatRegister doubleOverlay() const;
+
+ FloatRegister asSingle() const { return singleOverlay(); }
+ FloatRegister asDouble() const { return doubleOverlay(); }
+ FloatRegister asSimd128() const { MOZ_CRASH("NYI"); }
+
+ Code code() const {
+ MOZ_ASSERT(!isInvalid());
+ return Code(reg_ | (kind_ << 5));
+ }
+ Encoding encoding() const {
+ MOZ_ASSERT(!isInvalid());
+ MOZ_ASSERT(uint32_t(reg_) < Codes::TotalPhys);
+ return reg_;
+ }
+ uint32_t id() const {
+ return reg_;
+ }
+ static FloatRegister FromCode(uint32_t i) {
+ uint32_t code = i & 0x1f;
+ uint32_t kind = i >> 5;
+ return FloatRegister(Code(code), ContentType(kind));
+ }
+
+ bool volatile_() const {
+ return !!((1 << reg_) & FloatRegisters::VolatileMask);
+ }
+ const char* name() const {
+ return FloatRegisters::GetName(reg_);
+ }
+ bool operator != (const FloatRegister& other) const {
+ return kind_ != other.kind_ || reg_ != other.reg_;
+ }
+ bool aliases(const FloatRegister& other) {
+ return reg_ == other.reg_;
+ }
+ uint32_t numAliased() const {
+ return 2;
+ }
+ void aliased(uint32_t aliasIdx, FloatRegister* ret) {
+ if (aliasIdx == 0) {
+ *ret = *this;
+ return;
+ }
+ MOZ_ASSERT(aliasIdx == 1);
+ if (isDouble())
+ *ret = singleOverlay();
+ else
+ *ret = doubleOverlay();
+ }
+ uint32_t numAlignedAliased() const {
+ return 2;
+ }
+ void alignedAliased(uint32_t aliasIdx, FloatRegister* ret) {
+ MOZ_ASSERT(isDouble());
+ if (aliasIdx == 0) {
+ *ret = *this;
+ return;
+ }
+ MOZ_ASSERT(aliasIdx == 1);
+ *ret = singleOverlay();
+ }
+
+ SetType alignedOrDominatedAliasedSet() const {
+ return Codes::Spread << reg_;
+ }
+
+ static Code FromName(const char* name) {
+ return FloatRegisters::FromName(name);
+ }
+ static TypedRegisterSet<FloatRegister> ReduceSetForPush(const TypedRegisterSet<FloatRegister>& s);
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>& s);
+ uint32_t getRegisterDumpOffsetInBytes();
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_Architecture_mips64_h */
diff --git a/js/src/jit/mips64/Assembler-mips64.cpp b/js/src/jit/mips64/Assembler-mips64.cpp
new file mode 100644
index 000000000..4d251f152
--- /dev/null
+++ b/js/src/jit/mips64/Assembler-mips64.cpp
@@ -0,0 +1,529 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/Assembler-mips64.h"
+
+#include "mozilla/DebugOnly.h"
+
+using mozilla::DebugOnly;
+
+using namespace js;
+using namespace js::jit;
+
+ABIArgGenerator::ABIArgGenerator()
+ : usedArgSlots_(0),
+ firstArgFloat(false),
+ current_()
+{}
+
+ABIArg
+ABIArgGenerator::next(MIRType type)
+{
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Int64:
+ case MIRType::Pointer: {
+ Register destReg;
+ if (GetIntArgReg(usedArgSlots_, &destReg))
+ current_ = ABIArg(destReg);
+ else
+ current_ = ABIArg(GetArgStackDisp(usedArgSlots_));
+ usedArgSlots_++;
+ break;
+ }
+ case MIRType::Float32:
+ case MIRType::Double: {
+ FloatRegister destFReg;
+ FloatRegister::ContentType contentType;
+ if (!usedArgSlots_)
+ firstArgFloat = true;
+ contentType = (type == MIRType::Double) ?
+ FloatRegisters::Double : FloatRegisters::Single;
+ if (GetFloatArgReg(usedArgSlots_, &destFReg))
+ current_ = ABIArg(FloatRegister(destFReg.id(), contentType));
+ else
+ current_ = ABIArg(GetArgStackDisp(usedArgSlots_));
+ usedArgSlots_++;
+ break;
+ }
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+}
+
+uint32_t
+js::jit::RT(FloatRegister r)
+{
+ MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
+ return r.id() << RTShift;
+}
+
+uint32_t
+js::jit::RD(FloatRegister r)
+{
+ MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
+ return r.id() << RDShift;
+}
+
+uint32_t
+js::jit::RZ(FloatRegister r)
+{
+ MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
+ return r.id() << RZShift;
+}
+
+uint32_t
+js::jit::SA(FloatRegister r)
+{
+ MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
+ return r.id() << SAShift;
+}
+
+// Used to patch jumps created by MacroAssemblerMIPS64Compat::jumpWithPatch.
+void
+jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
+{
+ Instruction* inst = (Instruction*)jump_.raw();
+
+ // Six instructions used in load 64-bit imm.
+ MaybeAutoWritableJitCode awjc(inst, 6 * sizeof(uint32_t), reprotect);
+ Assembler::UpdateLoad64Value(inst, (uint64_t)label.raw());
+
+ AutoFlushICache::flush(uintptr_t(inst), 6 * sizeof(uint32_t));
+}
+
+// For more infromation about backedges look at comment in
+// MacroAssemblerMIPS64Compat::backedgeJump()
+void
+jit::PatchBackedge(CodeLocationJump& jump, CodeLocationLabel label,
+ JitRuntime::BackedgeTarget target)
+{
+ uintptr_t sourceAddr = (uintptr_t)jump.raw();
+ uintptr_t targetAddr = (uintptr_t)label.raw();
+ InstImm* branch = (InstImm*)jump.raw();
+
+ MOZ_ASSERT(branch->extractOpcode() == (uint32_t(op_beq) >> OpcodeShift));
+
+ if (BOffImm16::IsInRange(targetAddr - sourceAddr)) {
+ branch->setBOffImm16(BOffImm16(targetAddr - sourceAddr));
+ } else {
+ if (target == JitRuntime::BackedgeLoopHeader) {
+ Instruction* inst = &branch[1];
+ Assembler::UpdateLoad64Value(inst, targetAddr);
+ // Jump to first ori. The lui will be executed in delay slot.
+ branch->setBOffImm16(BOffImm16(2 * sizeof(uint32_t)));
+ } else {
+ Instruction* inst = &branch[6];
+ Assembler::UpdateLoad64Value(inst, targetAddr);
+ // Jump to first ori of interrupt loop.
+ branch->setBOffImm16(BOffImm16(6 * sizeof(uint32_t)));
+ }
+ }
+}
+
+void
+Assembler::executableCopy(uint8_t* buffer)
+{
+ MOZ_ASSERT(isFinished);
+ m_buffer.executableCopy(buffer);
+
+ // Patch all long jumps during code copy.
+ for (size_t i = 0; i < longJumps_.length(); i++) {
+ Instruction* inst = (Instruction*) ((uintptr_t)buffer + longJumps_[i]);
+
+ uint64_t value = Assembler::ExtractLoad64Value(inst);
+ Assembler::UpdateLoad64Value(inst, (uint64_t)buffer + value);
+ }
+
+ AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
+}
+
+uintptr_t
+Assembler::GetPointer(uint8_t* instPtr)
+{
+ Instruction* inst = (Instruction*)instPtr;
+ return Assembler::ExtractLoad64Value(inst);
+}
+
+static JitCode *
+CodeFromJump(Instruction* jump)
+{
+ uint8_t* target = (uint8_t*)Assembler::ExtractLoad64Value(jump);
+ return JitCode::FromExecutable(target);
+}
+
+void
+Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
+{
+ while (reader.more()) {
+ JitCode* child = CodeFromJump((Instruction*)(code->raw() + reader.readUnsigned()));
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ }
+}
+
+static void
+TraceOneDataRelocation(JSTracer* trc, Instruction* inst)
+{
+ void* ptr = (void*)Assembler::ExtractLoad64Value(inst);
+ void* prior = ptr;
+
+ // All pointers on MIPS64 will have the top bits cleared. If those bits
+ // are not cleared, this must be a Value.
+ uintptr_t word = reinterpret_cast<uintptr_t>(ptr);
+ if (word >> JSVAL_TAG_SHIFT) {
+ Value v = Value::fromRawBits(word);
+ TraceManuallyBarrieredEdge(trc, &v, "ion-masm-value");
+ ptr = (void*)v.bitsAsPunboxPointer();
+ } else {
+ // No barrier needed since these are constants.
+ TraceManuallyBarrieredGenericPointerEdge(trc, reinterpret_cast<gc::Cell**>(&ptr),
+ "ion-masm-ptr");
+ }
+
+ if (ptr != prior) {
+ Assembler::UpdateLoad64Value(inst, uint64_t(ptr));
+ AutoFlushICache::flush(uintptr_t(inst), 6 * sizeof(uint32_t));
+ }
+}
+
+static void
+TraceDataRelocations(JSTracer* trc, uint8_t* buffer, CompactBufferReader& reader)
+{
+ while (reader.more()) {
+ size_t offset = reader.readUnsigned();
+ Instruction* inst = (Instruction*)(buffer + offset);
+ TraceOneDataRelocation(trc, inst);
+ }
+}
+
+static void
+TraceDataRelocations(JSTracer* trc, MIPSBuffer* buffer, CompactBufferReader& reader)
+{
+ while (reader.more()) {
+ BufferOffset bo (reader.readUnsigned());
+ MIPSBuffer::AssemblerBufferInstIterator iter(bo, buffer);
+ TraceOneDataRelocation(trc, iter.cur());
+ }
+}
+
+void
+Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
+{
+ ::TraceDataRelocations(trc, code->raw(), reader);
+}
+
+void
+Assembler::trace(JSTracer* trc)
+{
+ for (size_t i = 0; i < jumps_.length(); i++) {
+ RelativePatch& rp = jumps_[i];
+ if (rp.kind == Relocation::JITCODE) {
+ JitCode* code = JitCode::FromExecutable((uint8_t*)rp.target);
+ TraceManuallyBarrieredEdge(trc, &code, "masmrel32");
+ MOZ_ASSERT(code == JitCode::FromExecutable((uint8_t*)rp.target));
+ }
+ }
+ if (dataRelocations_.length()) {
+ CompactBufferReader reader(dataRelocations_);
+ ::TraceDataRelocations(trc, &m_buffer, reader);
+ }
+}
+
+void
+Assembler::Bind(uint8_t* rawCode, CodeOffset* label, const void* address)
+{
+ if (label->bound()) {
+ intptr_t offset = label->offset();
+ Instruction* inst = (Instruction*) (rawCode + offset);
+ Assembler::UpdateLoad64Value(inst, (uint64_t)address);
+ }
+}
+
+void
+Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target)
+{
+ int64_t offset = target - branch;
+ InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ // If encoded offset is 4, then the jump must be short
+ if (BOffImm16(inst[0]).decode() == 4) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+ return;
+ }
+
+ // Generate the long jump for calls because return address has to be the
+ // address after the reserved block.
+ if (inst[0].encode() == inst_bgezal.encode()) {
+ addLongJump(BufferOffset(branch));
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister, target);
+ inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
+ // There is 1 nop after this.
+ return;
+ }
+
+ if (BOffImm16::IsInRange(offset)) {
+ // Don't skip trailing nops can improve performance
+ // on Loongson3 platform.
+ bool skipNops = !isLoongson() && (inst[0].encode() != inst_bgezal.encode() &&
+ inst[0].encode() != inst_beq.encode());
+
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+
+ if (skipNops) {
+ inst[2] = InstImm(op_regimm, zero, rt_bgez, BOffImm16(5 * sizeof(uint32_t))).encode();
+ // There are 4 nops after this
+ }
+ return;
+ }
+
+ if (inst[0].encode() == inst_beq.encode()) {
+ // Handle long unconditional jump.
+ addLongJump(BufferOffset(branch));
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister, target);
+ inst[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ // There is 1 nop after this.
+ } else {
+ // Handle long conditional jump.
+ inst[0] = invertBranch(inst[0], BOffImm16(7 * sizeof(uint32_t)));
+ // No need for a "nop" here because we can clobber scratch.
+ addLongJump(BufferOffset(branch + sizeof(uint32_t)));
+ Assembler::WriteLoad64Instructions(&inst[1], ScratchRegister, target);
+ inst[5] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ // There is 1 nop after this.
+ }
+}
+
+void
+Assembler::bind(RepatchLabel* label)
+{
+ BufferOffset dest = nextOffset();
+ if (label->used() && !oom()) {
+ // If the label has a use, then change this use to refer to
+ // the bound label;
+ BufferOffset b(label->offset());
+ InstImm* inst = (InstImm*)editSrc(b);
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+ uint64_t offset = dest.getOffset() - label->offset();
+
+ // If first instruction is lui, then this is a long jump.
+ // If second instruction is lui, then this is a loop backedge.
+ if (inst[0].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift)) {
+ // For unconditional long branches generated by ma_liPatchable,
+ // such as under:
+ // jumpWithpatch
+ Assembler::UpdateLoad64Value(inst, dest.getOffset());
+ } else if (inst[1].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift) ||
+ BOffImm16::IsInRange(offset))
+ {
+ // Handle code produced by:
+ // backedgeJump
+ // branchWithCode
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ MOZ_ASSERT(inst[0].extractOpcode() == (uint32_t(op_beq) >> OpcodeShift) ||
+ inst[0].extractOpcode() == (uint32_t(op_bne) >> OpcodeShift) ||
+ inst[0].extractOpcode() == (uint32_t(op_blez) >> OpcodeShift) ||
+ inst[0].extractOpcode() == (uint32_t(op_bgtz) >> OpcodeShift));
+ inst[0].setBOffImm16(BOffImm16(offset));
+ } else if (inst[0].encode() == inst_beq.encode()) {
+ // Handle open long unconditional jumps created by
+ // MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
+ // We need to add it to long jumps array here.
+ // See MacroAssemblerMIPS64::branchWithCode().
+ MOZ_ASSERT(inst[1].encode() == NopInst);
+ MOZ_ASSERT(inst[2].encode() == NopInst);
+ MOZ_ASSERT(inst[3].encode() == NopInst);
+ MOZ_ASSERT(inst[4].encode() == NopInst);
+ MOZ_ASSERT(inst[5].encode() == NopInst);
+ addLongJump(BufferOffset(label->offset()));
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister, dest.getOffset());
+ inst[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ } else {
+ // Handle open long conditional jumps created by
+ // MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
+ inst[0] = invertBranch(inst[0], BOffImm16(7 * sizeof(uint32_t)));
+ // No need for a "nop" here because we can clobber scratch.
+ // We need to add it to long jumps array here.
+ // See MacroAssemblerMIPS64::branchWithCode().
+ MOZ_ASSERT(inst[1].encode() == NopInst);
+ MOZ_ASSERT(inst[2].encode() == NopInst);
+ MOZ_ASSERT(inst[3].encode() == NopInst);
+ MOZ_ASSERT(inst[4].encode() == NopInst);
+ MOZ_ASSERT(inst[5].encode() == NopInst);
+ MOZ_ASSERT(inst[6].encode() == NopInst);
+ addLongJump(BufferOffset(label->offset() + sizeof(uint32_t)));
+ Assembler::WriteLoad64Instructions(&inst[1], ScratchRegister, dest.getOffset());
+ inst[5] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ }
+ }
+ label->bind(dest.getOffset());
+}
+
+uint32_t
+Assembler::PatchWrite_NearCallSize()
+{
+ // Load an address needs 4 instructions, and a jump with a delay slot.
+ return (4 + 2) * sizeof(uint32_t);
+}
+
+void
+Assembler::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
+{
+ Instruction* inst = (Instruction*) start.raw();
+ uint8_t* dest = toCall.raw();
+
+ // Overwrite whatever instruction used to be here with a call.
+ // Always use long jump for two reasons:
+ // - Jump has to be the same size because of PatchWrite_NearCallSize.
+ // - Return address has to be at the end of replaced block.
+ // Short jump wouldn't be more efficient.
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)dest);
+ inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+ inst[5] = InstNOP();
+
+ // Ensure everyone sees the code that was just written into memory.
+ AutoFlushICache::flush(uintptr_t(inst), PatchWrite_NearCallSize());
+}
+
+uint64_t
+Assembler::ExtractLoad64Value(Instruction* inst0)
+{
+ InstImm* i0 = (InstImm*) inst0;
+ InstImm* i1 = (InstImm*) i0->next();
+ InstReg* i2 = (InstReg*) i1->next();
+ InstImm* i3 = (InstImm*) i2->next();
+ InstImm* i5 = (InstImm*) i3->next()->next();
+
+ MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+ MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ if ((i2->extractOpcode() == ((uint32_t)op_special >> OpcodeShift)) &&
+ (i2->extractFunctionField() == ff_dsrl32))
+ {
+ uint64_t value = (uint64_t(i0->extractImm16Value()) << 32) |
+ (uint64_t(i1->extractImm16Value()) << 16) |
+ uint64_t(i3->extractImm16Value());
+ return uint64_t((int64_t(value) <<16) >> 16);
+ }
+
+ MOZ_ASSERT(i5->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+ uint64_t value = (uint64_t(i0->extractImm16Value()) << 48) |
+ (uint64_t(i1->extractImm16Value()) << 32) |
+ (uint64_t(i3->extractImm16Value()) << 16) |
+ uint64_t(i5->extractImm16Value());
+ return value;
+}
+
+void
+Assembler::UpdateLoad64Value(Instruction* inst0, uint64_t value)
+{
+ InstImm* i0 = (InstImm*) inst0;
+ InstImm* i1 = (InstImm*) i0->next();
+ InstReg* i2 = (InstReg*) i1->next();
+ InstImm* i3 = (InstImm*) i2->next();
+ InstImm* i5 = (InstImm*) i3->next()->next();
+
+ MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+ MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ if ((i2->extractOpcode() == ((uint32_t)op_special >> OpcodeShift)) &&
+ (i2->extractFunctionField() == ff_dsrl32))
+ {
+ i0->setImm16(Imm16::Lower(Imm32(value >> 32)));
+ i1->setImm16(Imm16::Upper(Imm32(value)));
+ i3->setImm16(Imm16::Lower(Imm32(value)));
+ return;
+ }
+
+ MOZ_ASSERT(i5->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ i0->setImm16(Imm16::Upper(Imm32(value >> 32)));
+ i1->setImm16(Imm16::Lower(Imm32(value >> 32)));
+ i3->setImm16(Imm16::Upper(Imm32(value)));
+ i5->setImm16(Imm16::Lower(Imm32(value)));
+}
+
+void
+Assembler::WriteLoad64Instructions(Instruction* inst0, Register reg, uint64_t value)
+{
+ Instruction* inst1 = inst0->next();
+ Instruction* inst2 = inst1->next();
+ Instruction* inst3 = inst2->next();
+
+ *inst0 = InstImm(op_lui, zero, reg, Imm16::Lower(Imm32(value >> 32)));
+ *inst1 = InstImm(op_ori, reg, reg, Imm16::Upper(Imm32(value)));
+ *inst2 = InstReg(op_special, rs_one, reg, reg, 48 - 32, ff_dsrl32);
+ *inst3 = InstImm(op_ori, reg, reg, Imm16::Lower(Imm32(value)));
+}
+
+void
+Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+ ImmPtr expectedValue)
+{
+ PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
+ PatchedImmPtr(expectedValue.value));
+}
+
+void
+Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue)
+{
+ Instruction* inst = (Instruction*) label.raw();
+
+ // Extract old Value
+ DebugOnly<uint64_t> value = Assembler::ExtractLoad64Value(inst);
+ MOZ_ASSERT(value == uint64_t(expectedValue.value));
+
+ // Replace with new value
+ Assembler::UpdateLoad64Value(inst, uint64_t(newValue.value));
+
+ AutoFlushICache::flush(uintptr_t(inst), 6 * sizeof(uint32_t));
+}
+
+void
+Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm)
+{
+ InstImm* inst = (InstImm*)code;
+ Assembler::UpdateLoad64Value(inst, (uint64_t)imm.value);
+}
+
+uint64_t
+Assembler::ExtractInstructionImmediate(uint8_t* code)
+{
+ InstImm* inst = (InstImm*)code;
+ return Assembler::ExtractLoad64Value(inst);
+}
+
+void
+Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
+{
+ Instruction* inst = (Instruction*)inst_.raw();
+ InstImm* i0 = (InstImm*) inst;
+ InstImm* i1 = (InstImm*) i0->next();
+ InstImm* i3 = (InstImm*) i1->next()->next();
+ Instruction* i4 = (Instruction*) i3->next();
+
+ MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+ MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ if (enabled) {
+ MOZ_ASSERT(i4->extractOpcode() != ((uint32_t)op_lui >> OpcodeShift));
+ InstReg jalr = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+ *i4 = jalr;
+ } else {
+ InstNOP nop;
+ *i4 = nop;
+ }
+
+ AutoFlushICache::flush(uintptr_t(i4), sizeof(uint32_t));
+}
diff --git a/js/src/jit/mips64/Assembler-mips64.h b/js/src/jit/mips64/Assembler-mips64.h
new file mode 100644
index 000000000..8a71c57bb
--- /dev/null
+++ b/js/src/jit/mips64/Assembler-mips64.h
@@ -0,0 +1,236 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_Assembler_mips64_h
+#define jit_mips64_Assembler_mips64_h
+
+#include "jit/mips-shared/Assembler-mips-shared.h"
+
+#include "jit/mips64/Architecture-mips64.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register CallTempReg4 = a4;
+static constexpr Register CallTempReg5 = a5;
+
+static constexpr Register CallTempNonArgRegs[] = { t0, t1, t2, t3 };
+static const uint32_t NumCallTempNonArgRegs = mozilla::ArrayLength(CallTempNonArgRegs);
+
+class ABIArgGenerator
+{
+ unsigned usedArgSlots_;
+ bool firstArgFloat;
+ ABIArg current_;
+
+ public:
+ ABIArgGenerator();
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+
+ uint32_t stackBytesConsumedSoFar() const {
+ if (usedArgSlots_ <= 8)
+ return 0;
+
+ return (usedArgSlots_ - 8) * sizeof(int64_t);
+ }
+};
+
+static constexpr Register ABINonArgReg0 = t0;
+static constexpr Register ABINonArgReg1 = t1;
+static constexpr Register ABINonArgReg2 = t2;
+static constexpr Register ABINonArgReturnReg0 = t0;
+static constexpr Register ABINonArgReturnReg1 = t1;
+
+// TLS pointer argument register for WebAssembly functions. This must not alias
+// any other register used for passing function arguments or return values.
+// Preserved by WebAssembly functions.
+static constexpr Register WasmTlsReg = s5;
+
+// Registers used for wasm table calls. These registers must be disjoint
+// from the ABI argument registers, WasmTlsReg and each other.
+static constexpr Register WasmTableCallScratchReg = ABINonArgReg0;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg1;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg2;
+
+static constexpr Register JSReturnReg = v1;
+static constexpr Register JSReturnReg_Type = JSReturnReg;
+static constexpr Register JSReturnReg_Data = JSReturnReg;
+static constexpr Register64 ReturnReg64(ReturnReg);
+static constexpr FloatRegister ReturnFloat32Reg = { FloatRegisters::f0, FloatRegisters::Single };
+static constexpr FloatRegister ReturnDoubleReg = { FloatRegisters::f0, FloatRegisters::Double };
+static constexpr FloatRegister ScratchFloat32Reg = { FloatRegisters::f23, FloatRegisters::Single };
+static constexpr FloatRegister ScratchDoubleReg = { FloatRegisters::f23, FloatRegisters::Double };
+static constexpr FloatRegister SecondScratchFloat32Reg = { FloatRegisters::f21, FloatRegisters::Single };
+static constexpr FloatRegister SecondScratchDoubleReg = { FloatRegisters::f21, FloatRegisters::Double };
+
+// Registers used in the GenerateFFIIonExit Disable Activation block.
+// None of these may be the second scratch register (t8).
+static constexpr Register WasmIonExitRegReturnData = JSReturnReg_Data;
+static constexpr Register WasmIonExitRegReturnType = JSReturnReg_Type;
+
+static constexpr FloatRegister f0 = { FloatRegisters::f0, FloatRegisters::Double };
+static constexpr FloatRegister f1 = { FloatRegisters::f1, FloatRegisters::Double };
+static constexpr FloatRegister f2 = { FloatRegisters::f2, FloatRegisters::Double };
+static constexpr FloatRegister f3 = { FloatRegisters::f3, FloatRegisters::Double };
+static constexpr FloatRegister f4 = { FloatRegisters::f4, FloatRegisters::Double };
+static constexpr FloatRegister f5 = { FloatRegisters::f5, FloatRegisters::Double };
+static constexpr FloatRegister f6 = { FloatRegisters::f6, FloatRegisters::Double };
+static constexpr FloatRegister f7 = { FloatRegisters::f7, FloatRegisters::Double };
+static constexpr FloatRegister f8 = { FloatRegisters::f8, FloatRegisters::Double };
+static constexpr FloatRegister f9 = { FloatRegisters::f9, FloatRegisters::Double };
+static constexpr FloatRegister f10 = { FloatRegisters::f10, FloatRegisters::Double };
+static constexpr FloatRegister f11 = { FloatRegisters::f11, FloatRegisters::Double };
+static constexpr FloatRegister f12 = { FloatRegisters::f12, FloatRegisters::Double };
+static constexpr FloatRegister f13 = { FloatRegisters::f13, FloatRegisters::Double };
+static constexpr FloatRegister f14 = { FloatRegisters::f14, FloatRegisters::Double };
+static constexpr FloatRegister f15 = { FloatRegisters::f15, FloatRegisters::Double };
+static constexpr FloatRegister f16 = { FloatRegisters::f16, FloatRegisters::Double };
+static constexpr FloatRegister f17 = { FloatRegisters::f17, FloatRegisters::Double };
+static constexpr FloatRegister f18 = { FloatRegisters::f18, FloatRegisters::Double };
+static constexpr FloatRegister f19 = { FloatRegisters::f19, FloatRegisters::Double };
+static constexpr FloatRegister f20 = { FloatRegisters::f20, FloatRegisters::Double };
+static constexpr FloatRegister f21 = { FloatRegisters::f21, FloatRegisters::Double };
+static constexpr FloatRegister f22 = { FloatRegisters::f22, FloatRegisters::Double };
+static constexpr FloatRegister f23 = { FloatRegisters::f23, FloatRegisters::Double };
+static constexpr FloatRegister f24 = { FloatRegisters::f24, FloatRegisters::Double };
+static constexpr FloatRegister f25 = { FloatRegisters::f25, FloatRegisters::Double };
+static constexpr FloatRegister f26 = { FloatRegisters::f26, FloatRegisters::Double };
+static constexpr FloatRegister f27 = { FloatRegisters::f27, FloatRegisters::Double };
+static constexpr FloatRegister f28 = { FloatRegisters::f28, FloatRegisters::Double };
+static constexpr FloatRegister f29 = { FloatRegisters::f29, FloatRegisters::Double };
+static constexpr FloatRegister f30 = { FloatRegisters::f30, FloatRegisters::Double };
+static constexpr FloatRegister f31 = { FloatRegisters::f31, FloatRegisters::Double };
+
+// MIPS64 CPUs can only load multibyte data that is "naturally"
+// eight-byte-aligned, sp register should be sixteen-byte-aligned.
+static constexpr uint32_t ABIStackAlignment = 16;
+static constexpr uint32_t JitStackAlignment = 16;
+
+static constexpr uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+// TODO this is just a filler to prevent a build failure. The MIPS SIMD
+// alignment requirements still need to be explored.
+// TODO Copy the static_asserts from x64/x86 assembler files.
+static constexpr uint32_t SimdMemoryAlignment = 16;
+
+static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
+
+// Does this architecture support SIMD conversions between Uint32x4 and Float32x4?
+static constexpr bool SupportsUint32x4FloatConversions = false;
+
+// Does this architecture support comparisons of unsigned integer vectors?
+static constexpr bool SupportsUint8x16Compares = false;
+static constexpr bool SupportsUint16x8Compares = false;
+static constexpr bool SupportsUint32x4Compares = false;
+
+static constexpr Scale ScalePointer = TimesEight;
+
+class Assembler : public AssemblerMIPSShared
+{
+ public:
+ Assembler()
+ : AssemblerMIPSShared()
+ { }
+
+ // MacroAssemblers hold onto gcthings, so they are traced by the GC.
+ void trace(JSTracer* trc);
+
+ static uintptr_t GetPointer(uint8_t*);
+
+ using AssemblerMIPSShared::bind;
+
+ void bind(RepatchLabel* label);
+ void Bind(uint8_t* rawCode, CodeOffset* label, const void* address);
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
+ static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
+
+ void bind(InstImm* inst, uintptr_t branch, uintptr_t target);
+
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+
+ static uint32_t PatchWrite_NearCallSize();
+
+ static uint64_t ExtractLoad64Value(Instruction* inst0);
+ static void UpdateLoad64Value(Instruction* inst0, uint64_t value);
+ static void WriteLoad64Instructions(Instruction* inst0, Register reg, uint64_t value);
+
+
+ static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall);
+ static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+ ImmPtr expectedValue);
+ static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue);
+
+ static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm);
+ static uint64_t ExtractInstructionImmediate(uint8_t* code);
+
+ static void ToggleCall(CodeLocationLabel inst_, bool enabled);
+}; // Assembler
+
+static const uint32_t NumIntArgRegs = 8;
+static const uint32_t NumFloatArgRegs = NumIntArgRegs;
+
+static inline bool
+GetIntArgReg(uint32_t usedArgSlots, Register* out)
+{
+ if (usedArgSlots < NumIntArgRegs) {
+ *out = Register::FromCode(a0.code() + usedArgSlots);
+ return true;
+ }
+ return false;
+}
+
+static inline bool
+GetFloatArgReg(uint32_t usedArgSlots, FloatRegister* out)
+{
+ if (usedArgSlots < NumFloatArgRegs) {
+ *out = FloatRegister::FromCode(f12.code() + usedArgSlots);
+ return true;
+ }
+ return false;
+}
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool
+GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
+{
+ // NOTE: We can't properly determine which regs are used if there are
+ // float arguments. If this is needed, we will have to guess.
+ MOZ_ASSERT(usedFloatArgs == 0);
+
+ if (GetIntArgReg(usedIntArgs, out))
+ return true;
+ // Unfortunately, we have to assume things about the point at which
+ // GetIntArgReg returns false, because we need to know how many registers it
+ // can allocate.
+ usedIntArgs -= NumIntArgRegs;
+ if (usedIntArgs >= NumCallTempNonArgRegs)
+ return false;
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+static inline uint32_t
+GetArgStackDisp(uint32_t usedArgSlots)
+{
+ MOZ_ASSERT(usedArgSlots >= NumIntArgRegs);
+ return (usedArgSlots - NumIntArgRegs) * sizeof(int64_t);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_Assembler_mips64_h */
diff --git a/js/src/jit/mips64/Bailouts-mips64.cpp b/js/src/jit/mips64/Bailouts-mips64.cpp
new file mode 100644
index 000000000..3c6c4c6c4
--- /dev/null
+++ b/js/src/jit/mips64/Bailouts-mips64.cpp
@@ -0,0 +1,28 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/Bailouts-mips64.h"
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+
+using namespace js;
+using namespace js::jit;
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ BailoutStack* bailout)
+ : machine_(bailout->machineState())
+{
+ uint8_t* sp = bailout->parentStackPointer();
+ framePointer_ = sp + bailout->frameSize();
+ topFrameSize_ = framePointer_ - sp;
+
+ JSScript* script = ScriptFromCalleeToken(((JitFrameLayout*) framePointer_)->calleeToken());
+ topIonScript_ = script->ionScript();
+
+ attachOnJitActivation(activations);
+ snapshotOffset_ = bailout->snapshotOffset();
+}
diff --git a/js/src/jit/mips64/Bailouts-mips64.h b/js/src/jit/mips64/Bailouts-mips64.h
new file mode 100644
index 000000000..1f80b303f
--- /dev/null
+++ b/js/src/jit/mips64/Bailouts-mips64.h
@@ -0,0 +1,44 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_Bailouts_mips64_h
+#define jit_mips64_Bailouts_mips64_h
+
+#include "jit/Bailouts.h"
+#include "jit/JitCompartment.h"
+
+namespace js {
+namespace jit {
+
+class BailoutStack
+{
+ RegisterDump::FPUArray fpregs_;
+ RegisterDump::GPRArray regs_;
+ uintptr_t frameSize_;
+ uintptr_t snapshotOffset_;
+
+ public:
+ MachineState machineState() {
+ return MachineState::FromBailout(regs_, fpregs_);
+ }
+ uint32_t snapshotOffset() const {
+ return snapshotOffset_;
+ }
+ uint32_t frameSize() const {
+ return frameSize_;
+ }
+ uint8_t* parentStackPointer() {
+ return (uint8_t*)this + sizeof(BailoutStack);
+ }
+ static size_t offsetOfFrameSize() {
+ return offsetof(BailoutStack, frameSize_);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_Bailouts_mips64_h */
diff --git a/js/src/jit/mips64/BaselineCompiler-mips64.cpp b/js/src/jit/mips64/BaselineCompiler-mips64.cpp
new file mode 100644
index 000000000..72535bf1e
--- /dev/null
+++ b/js/src/jit/mips64/BaselineCompiler-mips64.cpp
@@ -0,0 +1,16 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/BaselineCompiler-mips64.h"
+
+using namespace js;
+using namespace js::jit;
+
+BaselineCompilerMIPS64::BaselineCompilerMIPS64(JSContext* cx, TempAllocator& alloc,
+ JSScript* script)
+ : BaselineCompilerMIPSShared(cx, alloc, script)
+{
+}
diff --git a/js/src/jit/mips64/BaselineCompiler-mips64.h b/js/src/jit/mips64/BaselineCompiler-mips64.h
new file mode 100644
index 000000000..b06fdbf7a
--- /dev/null
+++ b/js/src/jit/mips64/BaselineCompiler-mips64.h
@@ -0,0 +1,26 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_BaselineCompiler_mips64_h
+#define jit_mips64_BaselineCompiler_mips64_h
+
+#include "jit/mips-shared/BaselineCompiler-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class BaselineCompilerMIPS64 : public BaselineCompilerMIPSShared
+{
+ protected:
+ BaselineCompilerMIPS64(JSContext* cx, TempAllocator& alloc, JSScript* script);
+};
+
+typedef BaselineCompilerMIPS64 BaselineCompilerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_BaselineCompiler_mips64_h */
diff --git a/js/src/jit/mips64/BaselineIC-mips64.cpp b/js/src/jit/mips64/BaselineIC-mips64.cpp
new file mode 100644
index 000000000..5c0e6d0b7
--- /dev/null
+++ b/js/src/jit/mips64/BaselineIC-mips64.cpp
@@ -0,0 +1,47 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineCompiler.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Linker.h"
+#include "jit/SharedICHelpers.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICCompare_Int32
+
+bool
+ICCompare_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ Label conditionTrue;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // Compare payload regs of R0 and R1.
+ masm.unboxInt32(R0, ExtractTemp0);
+ masm.unboxInt32(R1, ExtractTemp1);
+ Assembler::Condition cond = JSOpToCondition(op, /* signed = */true);
+ masm.ma_cmp_set(R0.valueReg(), ExtractTemp0, ExtractTemp1, cond);
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.valueReg(), R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/mips64/CodeGenerator-mips64.cpp b/js/src/jit/mips64/CodeGenerator-mips64.cpp
new file mode 100644
index 000000000..45f0e69d7
--- /dev/null
+++ b/js/src/jit/mips64/CodeGenerator-mips64.cpp
@@ -0,0 +1,774 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/CodeGenerator-mips64.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "js/Conversions.h"
+#include "vm/Shape.h"
+#include "vm/TraceLogging.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+class js::jit::OutOfLineTableSwitch : public OutOfLineCodeBase<CodeGeneratorMIPS64>
+{
+ MTableSwitch* mir_;
+ CodeLabel jumpLabel_;
+
+ void accept(CodeGeneratorMIPS64* codegen) {
+ codegen->visitOutOfLineTableSwitch(this);
+ }
+
+ public:
+ OutOfLineTableSwitch(MTableSwitch* mir)
+ : mir_(mir)
+ {}
+
+ MTableSwitch* mir() const {
+ return mir_;
+ }
+
+ CodeLabel* jumpLabel() {
+ return &jumpLabel_;
+ }
+};
+
+void
+CodeGeneratorMIPS64::visitOutOfLineBailout(OutOfLineBailout* ool)
+{
+ masm.push(ImmWord(ool->snapshot()->snapshotOffset()));
+
+ masm.jump(&deoptLabel_);
+}
+
+void
+CodeGeneratorMIPS64::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool)
+{
+ MTableSwitch* mir = ool->mir();
+
+ masm.haltingAlign(sizeof(void*));
+ masm.bind(ool->jumpLabel()->target());
+ masm.addCodeLabel(*ool->jumpLabel());
+
+ for (size_t i = 0; i < mir->numCases(); i++) {
+ LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
+ Label* caseheader = caseblock->label();
+ uint32_t caseoffset = caseheader->offset();
+
+ // The entries of the jump table need to be absolute addresses and thus
+ // must be patched after codegen is finished. Each table entry uses 8
+ // instructions (4 for load address, 2 for branch, and 2 padding).
+ CodeLabel cl;
+ masm.ma_li(ScratchRegister, cl.patchAt());
+ masm.branch(ScratchRegister);
+ masm.as_nop();
+ masm.as_nop();
+ cl.target()->bind(caseoffset);
+ masm.addCodeLabel(cl);
+ }
+}
+
+void
+CodeGeneratorMIPS64::emitTableSwitchDispatch(MTableSwitch* mir, Register index,
+ Register address)
+{
+ Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
+
+ // Lower value with low value
+ if (mir->low() != 0)
+ masm.subPtr(Imm32(mir->low()), index);
+
+ // Jump to default case if input is out of range
+ int32_t cases = mir->numCases();
+ masm.branch32(Assembler::AboveOrEqual, index, Imm32(cases), defaultcase);
+
+ // To fill in the CodeLabels for the case entries, we need to first
+ // generate the case entries (we don't yet know their offsets in the
+ // instruction stream).
+ OutOfLineTableSwitch* ool = new(alloc()) OutOfLineTableSwitch(mir);
+ addOutOfLineCode(ool, mir);
+
+ // Compute the position where a pointer to the right case stands.
+ masm.ma_li(address, ool->jumpLabel()->patchAt());
+ // index = size of table entry * index.
+ // See CodeGeneratorMIPS64::visitOutOfLineTableSwitch
+ masm.lshiftPtr(Imm32(5), index);
+ masm.addPtr(index, address);
+
+ masm.branch(address);
+}
+
+FrameSizeClass
+FrameSizeClass::FromDepth(uint32_t frameDepth)
+{
+ return FrameSizeClass::None();
+}
+
+FrameSizeClass
+FrameSizeClass::ClassLimit()
+{
+ return FrameSizeClass(0);
+}
+
+uint32_t
+FrameSizeClass::frameSize() const
+{
+ MOZ_CRASH("MIPS64 does not use frame size classes");
+}
+
+ValueOperand
+CodeGeneratorMIPS64::ToValue(LInstruction* ins, size_t pos)
+{
+ return ValueOperand(ToRegister(ins->getOperand(pos)));
+}
+
+ValueOperand
+CodeGeneratorMIPS64::ToOutValue(LInstruction* ins)
+{
+ return ValueOperand(ToRegister(ins->getDef(0)));
+}
+
+ValueOperand
+CodeGeneratorMIPS64::ToTempValue(LInstruction* ins, size_t pos)
+{
+ return ValueOperand(ToRegister(ins->getTemp(pos)));
+}
+
+void
+CodeGeneratorMIPS64::visitBox(LBox* box)
+{
+ const LAllocation* in = box->getOperand(0);
+ const LDefinition* result = box->getDef(0);
+
+ if (IsFloatingPointType(box->type())) {
+ FloatRegister reg = ToFloatRegister(in);
+ if (box->type() == MIRType::Float32) {
+ masm.convertFloat32ToDouble(reg, ScratchDoubleReg);
+ reg = ScratchDoubleReg;
+ }
+ masm.moveFromDouble(reg, ToRegister(result));
+ } else {
+ masm.boxValue(ValueTypeFromMIRType(box->type()), ToRegister(in), ToRegister(result));
+ }
+}
+
+void
+CodeGeneratorMIPS64::visitUnbox(LUnbox* unbox)
+{
+ MUnbox* mir = unbox->mir();
+
+ if (mir->fallible()) {
+ const ValueOperand value = ToValue(unbox, LUnbox::Input);
+ masm.splitTag(value, SecondScratchReg);
+ bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(MIRTypeToTag(mir->type())),
+ unbox->snapshot());
+ }
+
+ LAllocation* input = unbox->getOperand(LUnbox::Input);
+ Register result = ToRegister(unbox->output());
+ if (input->isRegister()) {
+ Register inputReg = ToRegister(input);
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.unboxInt32(inputReg, result);
+ break;
+ case MIRType::Boolean:
+ masm.unboxBoolean(inputReg, result);
+ break;
+ case MIRType::Object:
+ masm.unboxObject(inputReg, result);
+ break;
+ case MIRType::String:
+ masm.unboxString(inputReg, result);
+ break;
+ case MIRType::Symbol:
+ masm.unboxSymbol(inputReg, result);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+ return;
+ }
+
+ Address inputAddr = ToAddress(input);
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.unboxInt32(inputAddr, result);
+ break;
+ case MIRType::Boolean:
+ masm.unboxBoolean(inputAddr, result);
+ break;
+ case MIRType::Object:
+ masm.unboxObject(inputAddr, result);
+ break;
+ case MIRType::String:
+ masm.unboxString(inputAddr, result);
+ break;
+ case MIRType::Symbol:
+ masm.unboxSymbol(inputAddr, result);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+}
+
+Register
+CodeGeneratorMIPS64::splitTagForTest(const ValueOperand& value)
+{
+ MOZ_ASSERT(value.valueReg() != SecondScratchReg);
+ masm.splitTag(value.valueReg(), SecondScratchReg);
+ return SecondScratchReg;
+}
+
+void
+CodeGeneratorMIPS64::visitCompareB(LCompareB* lir)
+{
+ MCompare* mir = lir->mir();
+
+ const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
+ const LAllocation* rhs = lir->rhs();
+ const Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+
+ // Load boxed boolean in ScratchRegister.
+ if (rhs->isConstant())
+ masm.moveValue(rhs->toConstant()->toJSValue(), ScratchRegister);
+ else
+ masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), ScratchRegister);
+
+ // Perform the comparison.
+ masm.cmpPtrSet(cond, lhs.valueReg(), ScratchRegister, output);
+}
+
+void
+CodeGeneratorMIPS64::visitCompareBAndBranch(LCompareBAndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
+ const LAllocation* rhs = lir->rhs();
+
+ MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
+
+ // Load boxed boolean in ScratchRegister.
+ if (rhs->isConstant())
+ masm.moveValue(rhs->toConstant()->toJSValue(), ScratchRegister);
+ else
+ masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), ScratchRegister);
+
+ // Perform the comparison.
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+ emitBranch(lhs.valueReg(), ScratchRegister, cond, lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorMIPS64::visitCompareBitwise(LCompareBitwise* lir)
+{
+ MCompare* mir = lir->mir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+ const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput);
+ const ValueOperand rhs = ToValue(lir, LCompareBitwise::RhsInput);
+ const Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(IsEqualityOp(mir->jsop()));
+
+ masm.cmpPtrSet(cond, lhs.valueReg(), rhs.valueReg(), output);
+}
+
+void
+CodeGeneratorMIPS64::visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+ const ValueOperand lhs = ToValue(lir, LCompareBitwiseAndBranch::LhsInput);
+ const ValueOperand rhs = ToValue(lir, LCompareBitwiseAndBranch::RhsInput);
+
+ MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
+ mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
+
+ emitBranch(lhs.valueReg(), rhs.valueReg(), cond, lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorMIPS64::visitCompareI64(LCompareI64* lir)
+{
+ MCompare* mir = lir->mir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+ Register output = ToRegister(lir->output());
+ Register rhsReg;
+
+ if (IsConstant(rhs)) {
+ rhsReg = ScratchRegister;
+ masm.ma_li(rhsReg, ImmWord(ToInt64(rhs)));
+ } else {
+ rhsReg = ToRegister64(rhs).reg;
+ }
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ masm.cmpPtrSet(JSOpToCondition(lir->jsop(), isSigned), lhsReg, rhsReg, output);
+}
+
+void
+CodeGeneratorMIPS64::visitCompareI64AndBranch(LCompareI64AndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+ Register rhsReg;
+
+ if (IsConstant(rhs)) {
+ rhsReg = ScratchRegister;
+ masm.ma_li(rhsReg, ImmWord(ToInt64(rhs)));
+ } else {
+ rhsReg = ToRegister64(rhs).reg;
+ }
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition cond = JSOpToCondition(lir->jsop(), isSigned);
+ emitBranch(lhsReg, rhsReg, cond, lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorMIPS64::visitDivOrModI64(LDivOrModI64* lir)
+{
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+ Register output = ToRegister(lir->output());
+
+ Label done;
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero())
+ masm.ma_b(rhs, rhs, trap(lir, wasm::Trap::IntegerDivideByZero), Assembler::Zero);
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notmin;
+ masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), &notmin);
+ masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), &notmin);
+ if (lir->mir()->isMod()) {
+ masm.ma_xor(output, output);
+ } else {
+ masm.jump(trap(lir, wasm::Trap::IntegerOverflow));
+ }
+ masm.jump(&done);
+ masm.bind(&notmin);
+ }
+
+ masm.as_ddiv(lhs, rhs);
+
+ if (lir->mir()->isMod())
+ masm.as_mfhi(output);
+ else
+ masm.as_mflo(output);
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPS64::visitUDivOrModI64(LUDivOrModI64* lir)
+{
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+ Register output = ToRegister(lir->output());
+
+ Label done;
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero())
+ masm.ma_b(rhs, rhs, trap(lir, wasm::Trap::IntegerDivideByZero), Assembler::Zero);
+
+ masm.as_ddivu(lhs, rhs);
+
+ if (lir->mir()->isMod())
+ masm.as_mfhi(output);
+ else
+ masm.as_mflo(output);
+
+ masm.bind(&done);
+}
+
+template <typename T>
+void
+CodeGeneratorMIPS64::emitWasmLoadI64(T* lir)
+{
+ const MWasmLoad* mir = lir->mir();
+
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+ Register ptr = ToRegister(lir->ptr());
+
+ // Maybe add the offset.
+ if (offset) {
+ Register ptrPlusOffset = ToRegister(lir->ptrCopy());
+ masm.addPtr(Imm32(offset), ptrPlusOffset);
+ ptr = ptrPlusOffset;
+ } else {
+ MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
+ }
+
+ unsigned byteSize = mir->access().byteSize();
+ bool isSigned;
+
+ switch (mir->access().type()) {
+ case Scalar::Int8: isSigned = true; break;
+ case Scalar::Uint8: isSigned = false; break;
+ case Scalar::Int16: isSigned = true; break;
+ case Scalar::Uint16: isSigned = false; break;
+ case Scalar::Int32: isSigned = true; break;
+ case Scalar::Uint32: isSigned = false; break;
+ case Scalar::Int64: isSigned = true; break;
+ default: MOZ_CRASH("unexpected array type");
+ }
+
+ masm.memoryBarrier(mir->access().barrierBefore());
+
+ if (mir->access().isUnaligned()) {
+ Register temp = ToRegister(lir->getTemp(1));
+
+ masm.ma_load_unaligned(ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
+ temp, static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ return;
+ }
+
+ masm.ma_load(ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
+ static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
+
+ masm.memoryBarrier(mir->access().barrierAfter());
+}
+
+void
+CodeGeneratorMIPS64::visitWasmLoadI64(LWasmLoadI64* lir)
+{
+ emitWasmLoadI64(lir);
+}
+
+void
+CodeGeneratorMIPS64::visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir)
+{
+ emitWasmLoadI64(lir);
+}
+
+template <typename T>
+void
+CodeGeneratorMIPS64::emitWasmStoreI64(T* lir)
+{
+ const MWasmStore* mir = lir->mir();
+
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+ Register ptr = ToRegister(lir->ptr());
+
+ // Maybe add the offset.
+ if (offset) {
+ Register ptrPlusOffset = ToRegister(lir->ptrCopy());
+ masm.addPtr(Imm32(offset), ptrPlusOffset);
+ ptr = ptrPlusOffset;
+ } else {
+ MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
+ }
+
+ unsigned byteSize = mir->access().byteSize();
+ bool isSigned;
+
+ switch (mir->access().type()) {
+ case Scalar::Int8: isSigned = true; break;
+ case Scalar::Uint8: isSigned = false; break;
+ case Scalar::Int16: isSigned = true; break;
+ case Scalar::Uint16: isSigned = false; break;
+ case Scalar::Int32: isSigned = true; break;
+ case Scalar::Uint32: isSigned = false; break;
+ case Scalar::Int64: isSigned = true; break;
+ default: MOZ_CRASH("unexpected array type");
+ }
+
+ masm.memoryBarrier(mir->access().barrierBefore());
+
+ if (mir->access().isUnaligned()) {
+ Register temp = ToRegister(lir->getTemp(1));
+
+ masm.ma_store_unaligned(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
+ temp, static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ return;
+ }
+ masm.ma_store(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
+ static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
+
+ masm.memoryBarrier(mir->access().barrierAfter());
+}
+
+void
+CodeGeneratorMIPS64::visitWasmStoreI64(LWasmStoreI64* lir)
+{
+ emitWasmStoreI64(lir);
+}
+
+void
+CodeGeneratorMIPS64::visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* lir)
+{
+ emitWasmStoreI64(lir);
+}
+
+void
+CodeGeneratorMIPS64::visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins)
+{
+ const MWasmLoadGlobalVar* mir = ins->mir();
+ unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+ masm.load64(Address(GlobalReg, addr), ToOutRegister64(ins));
+}
+
+void
+CodeGeneratorMIPS64::visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins)
+{
+ const MWasmStoreGlobalVar* mir = ins->mir();
+ unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
+ MOZ_ASSERT(mir->value()->type() == MIRType::Int64);
+ masm.store64(ToRegister64(ins->value()), Address(GlobalReg, addr));
+}
+
+void
+CodeGeneratorMIPS64::visitWasmSelectI64(LWasmSelectI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+
+ Register cond = ToRegister(lir->condExpr());
+ const LInt64Allocation falseExpr = lir->falseExpr();
+
+ Register64 out = ToOutRegister64(lir);
+ MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out, "true expr is reused for input");
+
+ if (falseExpr.value().isRegister()) {
+ masm.as_movz(out.reg, ToRegister(falseExpr.value()), cond);
+ } else {
+ Label done;
+ masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
+ masm.loadPtr(ToAddress(falseExpr.value()), out.reg);
+ masm.bind(&done);
+ }
+}
+
+void
+CodeGeneratorMIPS64::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ masm.as_dmtc1(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void
+CodeGeneratorMIPS64::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ masm.as_dmfc1(ToRegister(lir->output()), ToFloatRegister(lir->input()));
+}
+
+void
+CodeGeneratorMIPS64::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir)
+{
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->isUnsigned())
+ masm.ma_dext(output, ToRegister(input), Imm32(0), Imm32(32));
+ else
+ masm.ma_sll(output, ToRegister(input), Imm32(0));
+}
+
+void
+CodeGeneratorMIPS64::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir)
+{
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf()) {
+ if (input->isMemory())
+ masm.load32(ToAddress(input), output);
+ else
+ masm.ma_sll(output, ToRegister(input), Imm32(0));
+ } else {
+ MOZ_CRASH("Not implemented.");
+ }
+}
+
+void
+CodeGeneratorMIPS64::visitClzI64(LClzI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.clz64(input, output.reg);
+}
+
+void
+CodeGeneratorMIPS64::visitCtzI64(LCtzI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.ctz64(input, output.reg);
+}
+
+void
+CodeGeneratorMIPS64::visitNotI64(LNotI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register output = ToRegister(lir->output());
+
+ masm.cmp64Set(Assembler::Equal, input.reg, Imm32(0), output);
+}
+
+void
+CodeGeneratorMIPS64::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ MWasmTruncateToInt64* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input);
+ addOutOfLineCode(ool, mir);
+
+ if (mir->isUnsigned()) {
+ Label isLarge, done;
+
+ if (fromType == MIRType::Double) {
+ masm.loadConstantDouble(double(INT64_MAX), ScratchDoubleReg);
+ masm.ma_bc1d(ScratchDoubleReg, input, &isLarge,
+ Assembler::DoubleLessThanOrEqual, ShortJump);
+
+ masm.as_truncld(ScratchDoubleReg, input);
+ } else {
+ masm.loadConstantFloat32(float(INT64_MAX), ScratchFloat32Reg);
+ masm.ma_bc1s(ScratchFloat32Reg, input, &isLarge,
+ Assembler::DoubleLessThanOrEqual, ShortJump);
+
+ masm.as_truncls(ScratchDoubleReg, input);
+ }
+
+ // Check that the result is in the uint64_t range.
+ masm.moveFromDouble(ScratchDoubleReg, output);
+ masm.as_cfc1(ScratchRegister, Assembler::FCSR);
+ masm.as_ext(ScratchRegister, ScratchRegister, 16, 1);
+ masm.ma_dsrl(SecondScratchReg, output, Imm32(63));
+ masm.ma_or(SecondScratchReg, ScratchRegister);
+ masm.ma_b(SecondScratchReg, Imm32(0), ool->entry(), Assembler::NotEqual);
+
+ masm.ma_b(&done, ShortJump);
+
+ // The input is greater than double(INT64_MAX).
+ masm.bind(&isLarge);
+ if (fromType == MIRType::Double) {
+ masm.as_subd(ScratchDoubleReg, input, ScratchDoubleReg);
+ masm.as_truncld(ScratchDoubleReg, ScratchDoubleReg);
+ } else {
+ masm.as_subs(ScratchDoubleReg, input, ScratchDoubleReg);
+ masm.as_truncls(ScratchDoubleReg, ScratchDoubleReg);
+ }
+
+ // Check that the result is in the uint64_t range.
+ masm.moveFromDouble(ScratchDoubleReg, output);
+ masm.as_cfc1(ScratchRegister, Assembler::FCSR);
+ masm.as_ext(ScratchRegister, ScratchRegister, 16, 1);
+ masm.ma_dsrl(SecondScratchReg, output, Imm32(63));
+ masm.ma_or(SecondScratchReg, ScratchRegister);
+ masm.ma_b(SecondScratchReg, Imm32(0), ool->entry(), Assembler::NotEqual);
+
+ masm.ma_li(ScratchRegister, Imm32(1));
+ masm.ma_dins(output, ScratchRegister, Imm32(63), Imm32(1));
+
+ masm.bind(&done);
+ return;
+ }
+
+ // When the input value is Infinity, NaN, or rounds to an integer outside the
+ // range [INT64_MIN; INT64_MAX + 1[, the Invalid Operation flag is set in the FCSR.
+ if (fromType == MIRType::Double)
+ masm.as_truncld(ScratchDoubleReg, input);
+ else
+ masm.as_truncls(ScratchDoubleReg, input);
+
+ // Check that the result is in the int64_t range.
+ masm.as_cfc1(output, Assembler::FCSR);
+ masm.as_ext(output, output, 16, 1);
+ masm.ma_b(output, Imm32(0), ool->entry(), Assembler::NotEqual);
+
+ masm.bind(ool->rejoin());
+ masm.moveFromDouble(ScratchDoubleReg, output);
+}
+
+void
+CodeGeneratorMIPS64::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir)
+{
+ Register input = ToRegister(lir->input());
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ MIRType outputType = lir->mir()->type();
+ MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
+
+ if (outputType == MIRType::Double) {
+ if (lir->mir()->isUnsigned())
+ masm.convertUInt64ToDouble(input, output);
+ else
+ masm.convertInt64ToDouble(input, output);
+ } else {
+ if (lir->mir()->isUnsigned())
+ masm.convertUInt64ToFloat32(input, output);
+ else
+ masm.convertInt64ToFloat32(input, output);
+ }
+}
+
+void
+CodeGeneratorMIPS64::visitTestI64AndBranch(LTestI64AndBranch* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ MBasicBlock* ifTrue = lir->ifTrue();
+ MBasicBlock* ifFalse = lir->ifFalse();
+
+ emitBranch(input.reg, Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
+}
+
+void
+CodeGeneratorMIPS64::setReturnDoubleRegs(LiveRegisterSet* regs)
+{
+ MOZ_ASSERT(ReturnFloat32Reg.reg_ == FloatRegisters::f0);
+ MOZ_ASSERT(ReturnDoubleReg.reg_ == FloatRegisters::f0);
+ FloatRegister f1 = { FloatRegisters::f1, FloatRegisters::Single };
+ regs->add(ReturnFloat32Reg);
+ regs->add(f1);
+ regs->add(ReturnDoubleReg);
+}
diff --git a/js/src/jit/mips64/CodeGenerator-mips64.h b/js/src/jit/mips64/CodeGenerator-mips64.h
new file mode 100644
index 000000000..3c859ef4c
--- /dev/null
+++ b/js/src/jit/mips64/CodeGenerator-mips64.h
@@ -0,0 +1,102 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_CodeGenerator_mips64_h
+#define jit_mips64_CodeGenerator_mips64_h
+
+#include "jit/mips-shared/CodeGenerator-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorMIPS64 : public CodeGeneratorMIPSShared
+{
+ protected:
+ void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ MOZ_ASSERT(value.valueReg() != SecondScratchReg);
+ masm.splitTag(value.valueReg(), SecondScratchReg);
+ emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_NULL), cond, ifTrue, ifFalse);
+ }
+ void testUndefinedEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ MOZ_ASSERT(value.valueReg() != SecondScratchReg);
+ masm.splitTag(value.valueReg(), SecondScratchReg);
+ emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), cond, ifTrue, ifFalse);
+ }
+ void testObjectEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ MOZ_ASSERT(value.valueReg() != SecondScratchReg);
+ masm.splitTag(value.valueReg(), SecondScratchReg);
+ emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), cond, ifTrue, ifFalse);
+ }
+
+ void emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base);
+
+ template <typename T>
+ void emitWasmLoadI64(T* ins);
+ template <typename T>
+ void emitWasmStoreI64(T* ins);
+
+ public:
+ void visitCompareB(LCompareB* lir);
+ void visitCompareBAndBranch(LCompareBAndBranch* lir);
+ void visitCompareBitwise(LCompareBitwise* lir);
+ void visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir);
+ void visitCompareI64(LCompareI64* lir);
+ void visitCompareI64AndBranch(LCompareI64AndBranch* lir);
+ void visitDivOrModI64(LDivOrModI64* lir);
+ void visitUDivOrModI64(LUDivOrModI64* lir);
+ void visitWasmLoadI64(LWasmLoadI64* lir);
+ void visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir);
+ void visitWasmStoreI64(LWasmStoreI64* ins);
+ void visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* ins);
+ void visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins);
+ void visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins);
+ void visitWasmSelectI64(LWasmSelectI64* ins);
+ void visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir);
+ void visitWasmReinterpretToI64(LWasmReinterpretToI64* lir);
+ void visitExtendInt32ToInt64(LExtendInt32ToInt64* lir);
+ void visitWrapInt64ToInt32(LWrapInt64ToInt32* lir);
+ void visitClzI64(LClzI64* lir);
+ void visitCtzI64(LCtzI64* lir);
+ void visitNotI64(LNotI64* lir);
+ void visitWasmTruncateToInt64(LWasmTruncateToInt64* lir);
+ void visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir);
+ void visitTestI64AndBranch(LTestI64AndBranch* lir);
+
+ // Out of line visitors.
+ void visitOutOfLineBailout(OutOfLineBailout* ool);
+ void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
+ protected:
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToOutValue(LInstruction* ins);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ // Functions for LTestVAndBranch.
+ Register splitTagForTest(const ValueOperand& value);
+
+ public:
+ CodeGeneratorMIPS64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorMIPSShared(gen, graph, masm)
+ { }
+
+ public:
+ void visitBox(LBox* box);
+ void visitUnbox(LUnbox* unbox);
+
+ void setReturnDoubleRegs(LiveRegisterSet* regs);
+};
+
+typedef CodeGeneratorMIPS64 CodeGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_CodeGenerator_mips64_h */
diff --git a/js/src/jit/mips64/LIR-mips64.h b/js/src/jit/mips64/LIR-mips64.h
new file mode 100644
index 000000000..b47ff0d59
--- /dev/null
+++ b/js/src/jit/mips64/LIR-mips64.h
@@ -0,0 +1,140 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_LIR_mips64_h
+#define jit_mips64_LIR_mips64_h
+
+namespace js {
+namespace jit {
+
+class LUnbox : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(Unbox);
+
+ explicit LUnbox(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ static const size_t Input = 0;
+
+ MUnbox* mir() const {
+ return mir_->toUnbox();
+ }
+ const char* extraName() const {
+ return StringFromMIRType(mir()->type());
+ }
+};
+
+class LUnboxFloatingPoint : public LUnbox
+{
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ LUnboxFloatingPoint(const LAllocation& input, MIRType type)
+ : LUnbox(input),
+ type_(type)
+ { }
+
+ MIRType type() const {
+ return type_;
+ }
+};
+
+class LDivOrModI64 : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ LDivOrModI64(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() {
+ return getTemp(0);
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeDivideByZero();
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeNegativeDividend();
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+class LUDivOrModI64 : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(UDivOrModI64);
+
+ LUDivOrModI64(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() {
+ return getTemp(0);
+ }
+
+ const char* extraName() const {
+ return mir()->isTruncated() ? "Truncated" : nullptr;
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeDivideByZero();
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+class LWasmTruncateToInt64 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ explicit LWasmTruncateToInt64(const LAllocation& in) {
+ setOperand(0, in);
+ }
+
+ MWasmTruncateToInt64* mir() const {
+ return mir_->toWasmTruncateToInt64();
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_LIR_mips64_h */
diff --git a/js/src/jit/mips64/LOpcodes-mips64.h b/js/src/jit/mips64/LOpcodes-mips64.h
new file mode 100644
index 000000000..166bfb1b1
--- /dev/null
+++ b/js/src/jit/mips64/LOpcodes-mips64.h
@@ -0,0 +1,24 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_LOpcodes_mips64_h__
+#define jit_mips64_LOpcodes_mips64_h__
+
+#include "jit/shared/LOpcodes-shared.h"
+
+#define LIR_CPU_OPCODE_LIST(_) \
+ _(ModMaskI) \
+ _(DivOrModI64) \
+ _(UDivOrMod) \
+ _(UDivOrModI64) \
+ _(WasmUnalignedLoad) \
+ _(WasmUnalignedStore) \
+ _(WasmUnalignedLoadI64) \
+ _(WasmUnalignedStoreI64) \
+ _(WasmTruncateToInt64) \
+ _(Int64ToFloatingPoint)
+
+#endif // jit_mips64_LOpcodes_mips64_h__
diff --git a/js/src/jit/mips64/Lowering-mips64.cpp b/js/src/jit/mips64/Lowering-mips64.cpp
new file mode 100644
index 000000000..bcc61163f
--- /dev/null
+++ b/js/src/jit/mips64/Lowering-mips64.cpp
@@ -0,0 +1,184 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/Lowering-mips64.h"
+
+#include "jit/mips64/Assembler-mips64.h"
+
+#include "jit/MIR.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void
+LIRGeneratorMIPS64::defineInt64Phi(MPhi* phi, size_t lirIndex)
+{
+ defineTypedPhi(phi, lirIndex);
+}
+
+void
+LIRGeneratorMIPS64::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex)
+{
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+
+LBoxAllocation
+LIRGeneratorMIPS64::useBoxFixed(MDefinition* mir, Register reg1, Register reg2, bool useAtStart)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart));
+}
+
+void
+LIRGeneratorMIPS64::lowerDivI64(MDiv* div)
+{
+ if (div->isUnsigned()) {
+ lowerUDivI64(div);
+ return;
+ }
+
+ LDivOrModI64* lir = new(alloc()) LDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()),
+ temp());
+ defineInt64(lir, div);
+}
+
+void
+LIRGeneratorMIPS64::lowerModI64(MMod* mod)
+{
+ if (mod->isUnsigned()) {
+ lowerUModI64(mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new(alloc()) LDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()),
+ temp());
+ defineInt64(lir, mod);
+}
+
+void
+LIRGeneratorMIPS64::lowerUDivI64(MDiv* div)
+{
+ LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useRegister(div->lhs()),
+ useRegister(div->rhs()),
+ temp());
+ defineInt64(lir, div);
+}
+
+void
+LIRGeneratorMIPS64::lowerUModI64(MMod* mod)
+{
+ LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useRegister(mod->lhs()),
+ useRegister(mod->rhs()),
+ temp());
+ defineInt64(lir, mod);
+}
+
+void
+LIRGeneratorMIPS64::visitBox(MBox* box)
+{
+ MDefinition* opd = box->getOperand(0);
+
+ // If the operand is a constant, emit near its uses.
+ if (opd->isConstant() && box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (opd->isConstant()) {
+ define(new(alloc()) LValue(opd->toConstant()->toJSValue()), box, LDefinition(LDefinition::BOX));
+ } else {
+ LBox* ins = new(alloc()) LBox(useRegister(opd), opd->type());
+ define(ins, box, LDefinition(LDefinition::BOX));
+ }
+}
+
+void
+LIRGeneratorMIPS64::visitUnbox(MUnbox* unbox)
+{
+ MDefinition* box = unbox->getOperand(0);
+
+ if (box->type() == MIRType::ObjectOrNull) {
+ LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(box));
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+ defineReuseInput(lir, unbox, 0);
+ return;
+ }
+
+ MOZ_ASSERT(box->type() == MIRType::Value);
+
+ LUnbox* lir;
+ if (IsFloatingPointType(unbox->type())) {
+ lir = new(alloc()) LUnboxFloatingPoint(useRegisterAtStart(box), unbox->type());
+ } else if (unbox->fallible()) {
+ // If the unbox is fallible, load the Value in a register first to
+ // avoid multiple loads.
+ lir = new(alloc()) LUnbox(useRegisterAtStart(box));
+ } else {
+ lir = new(alloc()) LUnbox(useAtStart(box));
+ }
+
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+
+ define(lir, unbox);
+}
+
+void
+LIRGeneratorMIPS64::visitReturn(MReturn* ret)
+{
+ MDefinition* opd = ret->getOperand(0);
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new(alloc()) LReturn;
+ ins->setOperand(0, useFixed(opd, JSReturnReg));
+ add(ins);
+}
+
+void
+LIRGeneratorMIPS64::defineUntypedPhi(MPhi* phi, size_t lirIndex)
+{
+ defineTypedPhi(phi, lirIndex);
+}
+
+void
+LIRGeneratorMIPS64::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex)
+{
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+
+void
+LIRGeneratorMIPS64::lowerTruncateDToInt32(MTruncateToInt32* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double);
+
+ define(new(alloc())
+ LTruncateDToInt32(useRegister(opd), tempDouble()), ins);
+}
+
+void
+LIRGeneratorMIPS64::lowerTruncateFToInt32(MTruncateToInt32* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Float32);
+
+ define(new(alloc())
+ LTruncateFToInt32(useRegister(opd), tempFloat32()), ins);
+}
+
+void
+LIRGeneratorMIPS64::visitRandom(MRandom* ins)
+{
+ LRandom *lir = new(alloc()) LRandom(temp(), temp(), temp());
+ defineFixed(lir, ins, LFloatReg(ReturnDoubleReg));
+}
diff --git a/js/src/jit/mips64/Lowering-mips64.h b/js/src/jit/mips64/Lowering-mips64.h
new file mode 100644
index 000000000..7427f1ecb
--- /dev/null
+++ b/js/src/jit/mips64/Lowering-mips64.h
@@ -0,0 +1,57 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_Lowering_mips64_h
+#define jit_mips64_Lowering_mips64_h
+
+#include "jit/mips-shared/Lowering-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorMIPS64 : public LIRGeneratorMIPSShared
+{
+ protected:
+ LIRGeneratorMIPS64(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorMIPSShared(gen, graph, lirGraph)
+ { }
+
+ protected:
+ void lowerInt64PhiInput(MPhi*, uint32_t, LBlock*, size_t);
+ void defineInt64Phi(MPhi*, size_t);
+
+ // Returns a box allocation. reg2 is ignored on 64-bit platforms.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2,
+ bool useAtStart = false);
+
+ inline LDefinition tempToUnbox() {
+ return temp();
+ }
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
+ void defineUntypedPhi(MPhi* phi, size_t lirIndex);
+
+ void lowerTruncateDToInt32(MTruncateToInt32* ins);
+ void lowerTruncateFToInt32(MTruncateToInt32* ins);
+
+ void lowerDivI64(MDiv* div);
+ void lowerModI64(MMod* mod);
+ void lowerUDivI64(MDiv* div);
+ void lowerUModI64(MMod* mod);
+
+ public:
+ void visitBox(MBox* box);
+ void visitUnbox(MUnbox* unbox);
+ void visitReturn(MReturn* ret);
+ void visitRandom(MRandom* ins);
+};
+
+typedef LIRGeneratorMIPS64 LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_Lowering_mips64_h */
diff --git a/js/src/jit/mips64/MacroAssembler-mips64-inl.h b/js/src/jit/mips64/MacroAssembler-mips64-inl.h
new file mode 100644
index 000000000..f5737748b
--- /dev/null
+++ b/js/src/jit/mips64/MacroAssembler-mips64-inl.h
@@ -0,0 +1,774 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_MacroAssembler_mips64_inl_h
+#define jit_mips64_MacroAssembler_mips64_inl_h
+
+#include "jit/mips64/MacroAssembler-mips64.h"
+
+#include "jit/mips-shared/MacroAssembler-mips-shared-inl.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void
+MacroAssembler::move64(Register64 src, Register64 dest)
+{
+ movePtr(src.reg, dest.reg);
+}
+
+void
+MacroAssembler::move64(Imm64 imm, Register64 dest)
+{
+ movePtr(ImmWord(imm.value), dest.reg);
+}
+
+// ===============================================================
+// Logical instructions
+
+void
+MacroAssembler::andPtr(Register src, Register dest)
+{
+ ma_and(dest, src);
+}
+
+void
+MacroAssembler::andPtr(Imm32 imm, Register dest)
+{
+ ma_and(dest, imm);
+}
+
+void
+MacroAssembler::and64(Imm64 imm, Register64 dest)
+{
+ ma_li(ScratchRegister, ImmWord(imm.value));
+ ma_and(dest.reg, ScratchRegister);
+}
+
+void
+MacroAssembler::and64(Register64 src, Register64 dest)
+{
+ ma_and(dest.reg, src.reg);
+}
+
+void
+MacroAssembler::and64(const Operand& src, Register64 dest)
+{
+ if (src.getTag() == Operand::MEM) {
+ Register64 scratch(ScratchRegister);
+
+ load64(src.toAddress(), scratch);
+ and64(scratch, dest);
+ } else {
+ and64(Register64(src.toReg()), dest);
+ }
+}
+
+void
+MacroAssembler::or64(Imm64 imm, Register64 dest)
+{
+ ma_li(ScratchRegister, ImmWord(imm.value));
+ ma_or(dest.reg, ScratchRegister);
+}
+
+void
+MacroAssembler::xor64(Imm64 imm, Register64 dest)
+{
+ ma_li(ScratchRegister, ImmWord(imm.value));
+ ma_xor(dest.reg, ScratchRegister);
+}
+
+void
+MacroAssembler::orPtr(Register src, Register dest)
+{
+ ma_or(dest, src);
+}
+
+void
+MacroAssembler::orPtr(Imm32 imm, Register dest)
+{
+ ma_or(dest, imm);
+}
+
+void
+MacroAssembler::or64(Register64 src, Register64 dest)
+{
+ ma_or(dest.reg, src.reg);
+}
+
+void
+MacroAssembler::or64(const Operand& src, Register64 dest)
+{
+ if (src.getTag() == Operand::MEM) {
+ Register64 scratch(ScratchRegister);
+
+ load64(src.toAddress(), scratch);
+ or64(scratch, dest);
+ } else {
+ or64(Register64(src.toReg()), dest);
+ }
+}
+
+void
+MacroAssembler::xor64(Register64 src, Register64 dest)
+{
+ ma_xor(dest.reg, src.reg);
+}
+
+void
+MacroAssembler::xor64(const Operand& src, Register64 dest)
+{
+ if (src.getTag() == Operand::MEM) {
+ Register64 scratch(ScratchRegister);
+
+ load64(src.toAddress(), scratch);
+ xor64(scratch, dest);
+ } else {
+ xor64(Register64(src.toReg()), dest);
+ }
+}
+
+void
+MacroAssembler::xorPtr(Register src, Register dest)
+{
+ ma_xor(dest, src);
+}
+
+void
+MacroAssembler::xorPtr(Imm32 imm, Register dest)
+{
+ ma_xor(dest, imm);
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void
+MacroAssembler::addPtr(Register src, Register dest)
+{
+ ma_daddu(dest, src);
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, Register dest)
+{
+ ma_daddu(dest, imm);
+}
+
+void
+MacroAssembler::addPtr(ImmWord imm, Register dest)
+{
+ movePtr(imm, ScratchRegister);
+ addPtr(ScratchRegister, dest);
+}
+
+void
+MacroAssembler::add64(Register64 src, Register64 dest)
+{
+ addPtr(src.reg, dest.reg);
+}
+
+void
+MacroAssembler::add64(const Operand& src, Register64 dest)
+{
+ if (src.getTag() == Operand::MEM) {
+ Register64 scratch(ScratchRegister);
+
+ load64(src.toAddress(), scratch);
+ add64(scratch, dest);
+ } else {
+ add64(Register64(src.toReg()), dest);
+ }
+}
+
+void
+MacroAssembler::add64(Imm32 imm, Register64 dest)
+{
+ ma_daddu(dest.reg, imm);
+}
+
+void
+MacroAssembler::add64(Imm64 imm, Register64 dest)
+{
+ MOZ_ASSERT(dest.reg != ScratchRegister);
+ mov(ImmWord(imm.value), ScratchRegister);
+ ma_daddu(dest.reg, ScratchRegister);
+}
+
+void
+MacroAssembler::subPtr(Register src, Register dest)
+{
+ as_dsubu(dest, dest, src);
+}
+
+void
+MacroAssembler::subPtr(Imm32 imm, Register dest)
+{
+ ma_dsubu(dest, dest, imm);
+}
+
+void
+MacroAssembler::sub64(Register64 src, Register64 dest)
+{
+ as_dsubu(dest.reg, dest.reg, src.reg);
+}
+
+void
+MacroAssembler::sub64(const Operand& src, Register64 dest)
+{
+ if (src.getTag() == Operand::MEM) {
+ Register64 scratch(ScratchRegister);
+
+ load64(src.toAddress(), scratch);
+ sub64(scratch, dest);
+ } else {
+ sub64(Register64(src.toReg()), dest);
+ }
+}
+
+void
+MacroAssembler::sub64(Imm64 imm, Register64 dest)
+{
+ MOZ_ASSERT(dest.reg != ScratchRegister);
+ mov(ImmWord(imm.value), ScratchRegister);
+ as_dsubu(dest.reg, dest.reg, ScratchRegister);
+}
+
+void
+MacroAssembler::mul64(Imm64 imm, const Register64& dest)
+{
+ MOZ_ASSERT(dest.reg != ScratchRegister);
+ mov(ImmWord(imm.value), ScratchRegister);
+ as_dmultu(dest.reg, ScratchRegister);
+ as_mflo(dest.reg);
+}
+
+void
+MacroAssembler::mul64(Imm64 imm, const Register64& dest, const Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ mul64(imm, dest);
+}
+
+void
+MacroAssembler::mul64(const Register64& src, const Register64& dest, const Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ as_dmultu(dest.reg, src.reg);
+ as_mflo(dest.reg);
+}
+
+void
+MacroAssembler::mul64(const Operand& src, const Register64& dest, const Register temp)
+{
+ if (src.getTag() == Operand::MEM) {
+ Register64 scratch(ScratchRegister);
+
+ load64(src.toAddress(), scratch);
+ mul64(scratch, dest, temp);
+ } else {
+ mul64(Register64(src.toReg()), dest, temp);
+ }
+}
+
+void
+MacroAssembler::mulBy3(Register src, Register dest)
+{
+ as_daddu(dest, src, src);
+ as_daddu(dest, dest, src);
+}
+
+void
+MacroAssembler::inc64(AbsoluteAddress dest)
+{
+ ma_li(ScratchRegister, ImmWord(uintptr_t(dest.addr)));
+ as_ld(SecondScratchReg, ScratchRegister, 0);
+ as_daddiu(SecondScratchReg, SecondScratchReg, 1);
+ as_sd(SecondScratchReg, ScratchRegister, 0);
+}
+
+void
+MacroAssembler::neg64(Register64 reg)
+{
+ as_dsubu(reg.reg, zero, reg.reg);
+}
+
+// ===============================================================
+// Shift functions
+
+void
+MacroAssembler::lshiftPtr(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ma_dsll(dest, dest, imm);
+}
+
+void
+MacroAssembler::lshift64(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ma_dsll(dest.reg, dest.reg, imm);
+}
+
+void
+MacroAssembler::lshift64(Register shift, Register64 dest)
+{
+ ma_dsll(dest.reg, dest.reg, shift);
+}
+
+void
+MacroAssembler::rshiftPtr(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ma_dsrl(dest, dest, imm);
+}
+
+void
+MacroAssembler::rshift64(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ma_dsrl(dest.reg, dest.reg, imm);
+}
+
+void
+MacroAssembler::rshift64(Register shift, Register64 dest)
+{
+ ma_dsrl(dest.reg, dest.reg, shift);
+}
+
+void
+MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ma_dsra(dest, dest, imm);
+}
+
+void
+MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ma_dsra(dest.reg, dest.reg, imm);
+}
+
+void
+MacroAssembler::rshift64Arithmetic(Register shift, Register64 dest)
+{
+ ma_dsra(dest.reg, dest.reg, shift);
+}
+
+// ===============================================================
+// Rotation functions
+
+void
+MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+
+ if (count.value)
+ ma_drol(dest.reg, src.reg, count);
+ else
+ ma_move(dest.reg, src.reg);
+}
+
+void
+MacroAssembler::rotateLeft64(Register count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ ma_drol(dest.reg, src.reg, count);
+}
+
+void
+MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+
+ if (count.value)
+ ma_dror(dest.reg, src.reg, count);
+ else
+ ma_move(dest.reg, src.reg);
+}
+
+void
+MacroAssembler::rotateRight64(Register count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ ma_dror(dest.reg, src.reg, count);
+}
+
+// ===============================================================
+// Condition functions
+
+template <typename T1, typename T2>
+void
+MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest)
+{
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+
+// Also see below for specializations of cmpPtrSet.
+
+template <typename T1, typename T2>
+void
+MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest)
+{
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+
+// ===============================================================
+// Bit counting functions
+
+void
+MacroAssembler::clz64(Register64 src, Register dest)
+{
+ as_dclz(dest, src.reg);
+}
+
+void
+MacroAssembler::ctz64(Register64 src, Register dest)
+{
+ ma_dctz(dest, src.reg);
+}
+
+void
+MacroAssembler::popcnt64(Register64 input, Register64 output, Register tmp)
+{
+ ma_move(output.reg, input.reg);
+ ma_dsra(tmp, input.reg, Imm32(1));
+ ma_li(ScratchRegister, ImmWord(0x5555555555555555UL));
+ ma_and(tmp, ScratchRegister);
+ ma_dsubu(output.reg, tmp);
+ ma_dsra(tmp, output.reg, Imm32(2));
+ ma_li(ScratchRegister, ImmWord(0x3333333333333333UL));
+ ma_and(output.reg, ScratchRegister);
+ ma_and(tmp, ScratchRegister);
+ ma_daddu(output.reg, tmp);
+ ma_dsrl(tmp, output.reg, Imm32(4));
+ ma_daddu(output.reg, tmp);
+ ma_li(ScratchRegister, ImmWord(0xF0F0F0F0F0F0F0FUL));
+ ma_and(output.reg, ScratchRegister);
+ ma_dsll(tmp, output.reg, Imm32(8));
+ ma_daddu(output.reg, tmp);
+ ma_dsll(tmp, output.reg, Imm32(16));
+ ma_daddu(output.reg, tmp);
+ ma_dsll(tmp, output.reg, Imm32(32));
+ ma_daddu(output.reg, tmp);
+ ma_dsra(output.reg, output.reg, Imm32(56));
+}
+
+// ===============================================================
+// Branch functions
+
+void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val, Label* success, Label* fail)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan || cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan || cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, ImmWord(val.value), success);
+ if (fail)
+ jump(fail);
+}
+
+void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs, Label* success, Label* fail)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan || cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan || cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, rhs.reg, success);
+ if (fail)
+ jump(fail);
+}
+
+void
+MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val, Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, ImmWord(val.value), label);
+}
+
+void
+MacroAssembler::branch64(Condition cond, const Address& lhs, const Address& rhs, Register scratch,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ loadPtr(rhs, scratch);
+ branchPtr(cond, lhs, scratch, label);
+}
+
+void
+MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs, Register rhs, Label* label)
+{
+ if (rhs != ScratchRegister)
+ movePtr(rhs, ScratchRegister);
+ // Instead of unboxing lhs, box rhs and do direct comparison with lhs.
+ rshiftPtr(Imm32(1), ScratchRegister);
+ branchPtr(cond, lhs, ScratchRegister, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTest64(Condition cond, Register64 lhs, Register64 rhs, Register temp,
+ L label)
+{
+ branchTestPtr(cond, lhs.reg, rhs.reg, label);
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, const ValueOperand& value, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestUndefined(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestInt32(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestInt32Truthy(bool b, const ValueOperand& value, Label* label)
+{
+ ScratchRegisterScope scratch(*this);
+ ma_dext(scratch, value.valueReg(), Imm32(0), Imm32(32));
+ ma_b(scratch, scratch, label, b ? NonZero : Zero);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? BelowOrEqual : Above;
+ ma_b(tag, ImmTag(JSVAL_TAG_MAX_DOUBLE), label, actual);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestDouble(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestNumber(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, const ValueOperand& value, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestBoolean(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestBooleanTruthy(bool b, const ValueOperand& value, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ unboxBoolean(value, scratch2);
+ ma_b(scratch2, scratch2, label, b ? NonZero : Zero);
+}
+
+void
+MacroAssembler::branchTestString(Condition cond, const ValueOperand& value, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestString(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestStringTruthy(bool b, const ValueOperand& value, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ unboxString(value, scratch2);
+ load32(Address(scratch2, JSString::offsetOfLength()), scratch2);
+ ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal);
+}
+
+void
+MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestSymbol(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestNull(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestObject(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestPrimitive(Condition cond, const ValueOperand& value, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestPrimitive(cond, scratch2, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value, L label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ ma_b(scratch2, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr, JSWhyMagic why, Label* label)
+{
+ uint64_t magic = MagicValue(why).asRawBits();
+ ScratchRegisterScope scratch(*this);
+ loadPtr(valaddr, scratch);
+ ma_b(scratch, ImmWord(magic), label, cond);
+}
+
+// ========================================================================
+// Memory access primitives.
+void
+MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const Address& addr)
+{
+ ma_sd(src, addr);
+}
+void
+MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& addr)
+{
+ MOZ_ASSERT(addr.offset == 0);
+ ma_sd(src, addr);
+}
+
+void
+MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const Address& addr)
+{
+ ma_ss(src, addr);
+}
+void
+MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& addr)
+{
+ MOZ_ASSERT(addr.offset == 0);
+ ma_ss(src, addr);
+}
+
+// ========================================================================
+// wasm support
+
+template <class L>
+void
+MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
+{
+ BufferOffset bo = ma_BoundsCheck(ScratchRegister);
+ append(wasm::BoundsCheck(bo.getOffset()));
+
+ ma_b(index, ScratchRegister, label, cond);
+}
+
+void
+MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
+{
+ // Replace with new value
+ Assembler::UpdateLoad64Value((Instruction*) patchAt, limit);
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+// The specializations for cmpPtrSet are outside the braces because check_macroassembler_style can't yet
+// deal with specializations.
+
+template<>
+inline void
+MacroAssembler::cmpPtrSet(Assembler::Condition cond, Address lhs, ImmPtr rhs,
+ Register dest)
+{
+ loadPtr(lhs, ScratchRegister);
+ movePtr(rhs, SecondScratchReg);
+ cmpPtrSet(cond, ScratchRegister, SecondScratchReg, dest);
+}
+
+template<>
+inline void
+MacroAssembler::cmpPtrSet(Assembler::Condition cond, Register lhs, Address rhs,
+ Register dest)
+{
+ loadPtr(rhs, ScratchRegister);
+ cmpPtrSet(cond, lhs, ScratchRegister, dest);
+}
+
+template<>
+inline void
+MacroAssembler::cmp32Set(Assembler::Condition cond, Register lhs, Address rhs,
+ Register dest)
+{
+ load32(rhs, ScratchRegister);
+ cmp32Set(cond, lhs, ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::incrementInt32Value(const Address& addr)
+{
+ asMasm().add32(Imm32(1), addr);
+}
+
+void
+MacroAssemblerMIPS64Compat::computeEffectiveAddress(const BaseIndex& address, Register dest)
+{
+ computeScaledAddress(address, dest);
+ if (address.offset)
+ asMasm().addPtr(Imm32(address.offset), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::retn(Imm32 n)
+{
+ // pc <- [sp]; sp += n
+ loadPtr(Address(StackPointer, 0), ra);
+ asMasm().addPtr(n, StackPointer);
+ as_jr(ra);
+ as_nop();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_MacroAssembler_mips64_inl_h */
diff --git a/js/src/jit/mips64/MacroAssembler-mips64.cpp b/js/src/jit/mips64/MacroAssembler-mips64.cpp
new file mode 100644
index 000000000..329fa83f8
--- /dev/null
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -0,0 +1,2485 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/MacroAssembler-mips64.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/mips64/Simulator-mips64.h"
+#include "jit/MoveEmitter.h"
+#include "jit/SharedICRegisters.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace jit;
+
+using mozilla::Abs;
+
+static_assert(sizeof(intptr_t) == 8, "Not 32-bit clean.");
+
+void
+MacroAssemblerMIPS64Compat::convertBoolToInt32(Register src, Register dest)
+{
+ // Note that C++ bool is only 1 byte, so zero extend it to clear the
+ // higher-order bits.
+ ma_and(dest, src, Imm32(0xff));
+}
+
+void
+MacroAssemblerMIPS64Compat::convertInt32ToDouble(Register src, FloatRegister dest)
+{
+ as_mtc1(src, dest);
+ as_cvtdw(dest, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertInt32ToDouble(const Address& src, FloatRegister dest)
+{
+ ma_ls(dest, src);
+ as_cvtdw(dest, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertInt32ToDouble(const BaseIndex& src, FloatRegister dest)
+{
+ computeScaledAddress(src, ScratchRegister);
+ convertInt32ToDouble(Address(ScratchRegister, src.offset), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertUInt32ToDouble(Register src, FloatRegister dest)
+{
+ // We use SecondScratchDoubleReg because MacroAssembler::loadFromTypedArray
+ // calls with ScratchDoubleReg as dest.
+ MOZ_ASSERT(dest != SecondScratchDoubleReg);
+
+ // Subtract INT32_MIN to get a positive number
+ ma_subu(ScratchRegister, src, Imm32(INT32_MIN));
+
+ // Convert value
+ as_mtc1(ScratchRegister, dest);
+ as_cvtdw(dest, dest);
+
+ // Add unsigned value of INT32_MIN
+ ma_lid(SecondScratchDoubleReg, 2147483648.0);
+ as_addd(dest, dest, SecondScratchDoubleReg);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertInt64ToDouble(Register src, FloatRegister dest)
+{
+ as_dmtc1(src, dest);
+ as_cvtdl(dest, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertInt64ToFloat32(Register src, FloatRegister dest)
+{
+ as_dmtc1(src, dest);
+ as_cvtsl(dest, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertUInt64ToDouble(Register src, FloatRegister dest)
+{
+ Label positive, done;
+ ma_b(src, src, &positive, NotSigned, ShortJump);
+
+ MOZ_ASSERT(src!= ScratchRegister);
+ MOZ_ASSERT(src!= SecondScratchReg);
+
+ ma_and(ScratchRegister, src, Imm32(1));
+ ma_dsrl(SecondScratchReg, src, Imm32(1));
+ ma_or(ScratchRegister, SecondScratchReg);
+ as_dmtc1(ScratchRegister, dest);
+ as_cvtdl(dest, dest);
+ asMasm().addDouble(dest, dest);
+ ma_b(&done, ShortJump);
+
+ bind(&positive);
+ as_dmtc1(src, dest);
+ as_cvtdl(dest, dest);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertUInt64ToFloat32(Register src, FloatRegister dest)
+{
+ Label positive, done;
+ ma_b(src, src, &positive, NotSigned, ShortJump);
+
+ MOZ_ASSERT(src!= ScratchRegister);
+ MOZ_ASSERT(src!= SecondScratchReg);
+
+ ma_and(ScratchRegister, src, Imm32(1));
+ ma_dsrl(SecondScratchReg, src, Imm32(1));
+ ma_or(ScratchRegister, SecondScratchReg);
+ as_dmtc1(ScratchRegister, dest);
+ as_cvtsl(dest, dest);
+ asMasm().addFloat32(dest, dest);
+ ma_b(&done, ShortJump);
+
+ bind(&positive);
+ as_dmtc1(src, dest);
+ as_cvtsl(dest, dest);
+
+ bind(&done);
+}
+
+bool
+MacroAssemblerMIPS64Compat::convertUInt64ToDoubleNeedsTemp()
+{
+ return false;
+}
+
+void
+MacroAssemblerMIPS64Compat::convertUInt64ToDouble(Register64 src, FloatRegister dest, Register temp)
+{
+ convertUInt64ToDouble(src.reg, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertUInt32ToFloat32(Register src, FloatRegister dest)
+{
+ Label positive, done;
+ ma_b(src, src, &positive, NotSigned, ShortJump);
+
+ // We cannot do the same as convertUInt32ToDouble because float32 doesn't
+ // have enough precision.
+ convertUInt32ToDouble(src, dest);
+ convertDoubleToFloat32(dest, dest);
+ ma_b(&done, ShortJump);
+
+ bind(&positive);
+ convertInt32ToFloat32(src, dest);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertDoubleToFloat32(FloatRegister src, FloatRegister dest)
+{
+ as_cvtsd(dest, src);
+}
+
+// Checks whether a double is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void
+MacroAssemblerMIPS64Compat::convertDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail, bool negativeZeroCheck)
+{
+ if (negativeZeroCheck) {
+ moveFromDouble(src, dest);
+ ma_drol(dest, dest, Imm32(1));
+ ma_b(dest, Imm32(1), fail, Assembler::Equal);
+ }
+
+ // Convert double to int, then convert back and check if we have the
+ // same number.
+ as_cvtwd(ScratchDoubleReg, src);
+ as_mfc1(dest, ScratchDoubleReg);
+ as_cvtdw(ScratchDoubleReg, ScratchDoubleReg);
+ ma_bc1d(src, ScratchDoubleReg, fail, Assembler::DoubleNotEqualOrUnordered);
+}
+
+// Checks whether a float32 is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void
+MacroAssemblerMIPS64Compat::convertFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail, bool negativeZeroCheck)
+{
+ if (negativeZeroCheck) {
+ moveFromFloat32(src, dest);
+ ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+ }
+
+ // Converting the floating point value to an integer and then converting it
+ // back to a float32 would not work, as float to int32 conversions are
+ // clamping (e.g. float(INT32_MAX + 1) would get converted into INT32_MAX
+ // and then back to float(INT32_MAX + 1)). If this ever happens, we just
+ // bail out.
+ as_cvtws(ScratchFloat32Reg, src);
+ as_mfc1(dest, ScratchFloat32Reg);
+ as_cvtsw(ScratchFloat32Reg, ScratchFloat32Reg);
+ ma_bc1s(src, ScratchFloat32Reg, fail, Assembler::DoubleNotEqualOrUnordered);
+
+ // Bail out in the clamped cases.
+ ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertFloat32ToDouble(FloatRegister src, FloatRegister dest)
+{
+ as_cvtds(dest, src);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertInt32ToFloat32(Register src, FloatRegister dest)
+{
+ as_mtc1(src, dest);
+ as_cvtsw(dest, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertInt32ToFloat32(const Address& src, FloatRegister dest)
+{
+ ma_ls(dest, src);
+ as_cvtsw(dest, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::movq(Register rs, Register rd)
+{
+ ma_move(rd, rs);
+}
+
+void
+MacroAssemblerMIPS64::ma_li(Register dest, CodeOffset* label)
+{
+ BufferOffset bo = m_buffer.nextOffset();
+ ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
+ label->bind(bo.getOffset());
+}
+
+void
+MacroAssemblerMIPS64::ma_li(Register dest, ImmWord imm)
+{
+ int64_t value = imm.value;
+
+ if (value >= INT16_MIN && value <= INT16_MAX) {
+ as_addiu(dest, zero, value);
+ } else if (imm.value <= UINT16_MAX) {
+ as_ori(dest, zero, Imm16::Lower(Imm32(value)).encode());
+ } else if (value >= INT32_MIN && value <= INT32_MAX) {
+ as_lui(dest, Imm16::Upper(Imm32(value)).encode());
+ if (value & 0xffff)
+ as_ori(dest, dest, Imm16::Lower(Imm32(value)).encode());
+ } else if (imm.value <= UINT32_MAX) {
+ as_lui(dest, Imm16::Upper(Imm32(value)).encode());
+ if (value & 0xffff)
+ as_ori(dest, dest, Imm16::Lower(Imm32(value)).encode());
+ as_dinsu(dest, zero, 32, 32);
+ } else {
+ uint64_t high = imm.value >> 32;
+
+ if (imm.value >> 48) {
+ as_lui(dest, Imm16::Upper(Imm32(high)).encode());
+ if (high & 0xffff)
+ as_ori(dest, dest, Imm16::Lower(Imm32(high)).encode());
+ as_dsll(dest, dest, 16);
+ } else {
+ as_lui(dest, Imm16::Lower(Imm32(high)).encode());
+ }
+ if ((imm.value >> 16) & 0xffff)
+ as_ori(dest, dest, Imm16::Upper(Imm32(value)).encode());
+ as_dsll(dest, dest, 16);
+ if (value & 0xffff)
+ as_ori(dest, dest, Imm16::Lower(Imm32(value)).encode());
+ }
+}
+
+// This method generates lui, dsll and ori instruction block that can be modified
+// by UpdateLoad64Value, either during compilation (eg. Assembler::bind), or
+// during execution (eg. jit::PatchJump).
+void
+MacroAssemblerMIPS64::ma_liPatchable(Register dest, ImmPtr imm)
+{
+ return ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
+}
+
+void
+MacroAssemblerMIPS64::ma_liPatchable(Register dest, ImmWord imm, LiFlags flags)
+{
+ if (Li64 == flags) {
+ m_buffer.ensureSpace(6 * sizeof(uint32_t));
+ as_lui(dest, Imm16::Upper(Imm32(imm.value >> 32)).encode());
+ as_ori(dest, dest, Imm16::Lower(Imm32(imm.value >> 32)).encode());
+ as_dsll(dest, dest, 16);
+ as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode());
+ as_dsll(dest, dest, 16);
+ as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
+ } else {
+ m_buffer.ensureSpace(4 * sizeof(uint32_t));
+ as_lui(dest, Imm16::Lower(Imm32(imm.value >> 32)).encode());
+ as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode());
+ as_drotr32(dest, dest, 48);
+ as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
+ }
+}
+
+void
+MacroAssemblerMIPS64::ma_dnegu(Register rd, Register rs)
+{
+ as_dsubu(rd, zero, rs);
+}
+
+// Shifts
+void
+MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Imm32 shift)
+{
+ if (31 < shift.value)
+ as_dsll32(rd, rt, shift.value);
+ else
+ as_dsll(rd, rt, shift.value);
+}
+
+void
+MacroAssemblerMIPS64::ma_dsrl(Register rd, Register rt, Imm32 shift)
+{
+ if (31 < shift.value)
+ as_dsrl32(rd, rt, shift.value);
+ else
+ as_dsrl(rd, rt, shift.value);
+}
+
+void
+MacroAssemblerMIPS64::ma_dsra(Register rd, Register rt, Imm32 shift)
+{
+ if (31 < shift.value)
+ as_dsra32(rd, rt, shift.value);
+ else
+ as_dsra(rd, rt, shift.value);
+}
+
+void
+MacroAssemblerMIPS64::ma_dror(Register rd, Register rt, Imm32 shift)
+{
+ if (31 < shift.value)
+ as_drotr32(rd, rt, shift.value);
+ else
+ as_drotr(rd, rt, shift.value);
+}
+
+void
+MacroAssemblerMIPS64::ma_drol(Register rd, Register rt, Imm32 shift)
+{
+ uint32_t s = 64 - shift.value;
+
+ if (31 < s)
+ as_drotr32(rd, rt, s);
+ else
+ as_drotr(rd, rt, s);
+}
+
+void
+MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Register shift)
+{
+ as_dsllv(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPS64::ma_dsrl(Register rd, Register rt, Register shift)
+{
+ as_dsrlv(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPS64::ma_dsra(Register rd, Register rt, Register shift)
+{
+ as_dsrav(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPS64::ma_dror(Register rd, Register rt, Register shift)
+{
+ as_drotrv(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPS64::ma_drol(Register rd, Register rt, Register shift)
+{
+ ma_negu(ScratchRegister, shift);
+ as_drotrv(rd, rt, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPS64::ma_dins(Register rt, Register rs, Imm32 pos, Imm32 size)
+{
+ if (pos.value >= 0 && pos.value < 32) {
+ if (pos.value + size.value > 32)
+ as_dinsm(rt, rs, pos.value, size.value);
+ else
+ as_dins(rt, rs, pos.value, size.value);
+ } else {
+ as_dinsu(rt, rs, pos.value, size.value);
+ }
+}
+
+void
+MacroAssemblerMIPS64::ma_dext(Register rt, Register rs, Imm32 pos, Imm32 size)
+{
+ if (pos.value >= 0 && pos.value < 32) {
+ if (size.value > 32)
+ as_dextm(rt, rs, pos.value, size.value);
+ else
+ as_dext(rt, rs, pos.value, size.value);
+ } else {
+ as_dextu(rt, rs, pos.value, size.value);
+ }
+}
+
+void
+MacroAssemblerMIPS64::ma_dctz(Register rd, Register rs)
+{
+ ma_dnegu(ScratchRegister, rs);
+ as_and(rd, ScratchRegister, rs);
+ as_dclz(rd, rd);
+ ma_dnegu(SecondScratchReg, rd);
+ ma_daddu(SecondScratchReg, Imm32(0x3f));
+ as_movn(rd, SecondScratchReg, ScratchRegister);
+}
+
+// Arithmetic-based ops.
+
+// Add.
+void
+MacroAssemblerMIPS64::ma_daddu(Register rd, Register rs, Imm32 imm)
+{
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_daddiu(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_daddu(rd, rs, ScratchRegister);
+ }
+}
+
+void
+MacroAssemblerMIPS64::ma_daddu(Register rd, Register rs)
+{
+ as_daddu(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPS64::ma_daddu(Register rd, Imm32 imm)
+{
+ ma_daddu(rd, rd, imm);
+}
+
+template <typename L>
+void
+MacroAssemblerMIPS64::ma_addTestOverflow(Register rd, Register rs, Register rt, L overflow)
+{
+ as_daddu(SecondScratchReg, rs, rt);
+ as_addu(rd, rs, rt);
+ ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual);
+}
+
+template void
+MacroAssemblerMIPS64::ma_addTestOverflow<Label*>(Register rd, Register rs,
+ Register rt, Label* overflow);
+template void
+MacroAssemblerMIPS64::ma_addTestOverflow<wasm::TrapDesc>(Register rd, Register rs, Register rt,
+ wasm::TrapDesc overflow);
+
+template <typename L>
+void
+MacroAssemblerMIPS64::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, L overflow)
+{
+ // Check for signed range because of as_daddiu
+ if (Imm16::IsInSignedRange(imm.value) && Imm16::IsInUnsignedRange(imm.value)) {
+ as_daddiu(SecondScratchReg, rs, imm.value);
+ as_addiu(rd, rs, imm.value);
+ ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual);
+ } else {
+ ma_li(ScratchRegister, imm);
+ ma_addTestOverflow(rd, rs, ScratchRegister, overflow);
+ }
+}
+
+template void
+MacroAssemblerMIPS64::ma_addTestOverflow<Label*>(Register rd, Register rs,
+ Imm32 imm, Label* overflow);
+template void
+MacroAssemblerMIPS64::ma_addTestOverflow<wasm::TrapDesc>(Register rd, Register rs, Imm32 imm,
+ wasm::TrapDesc overflow);
+
+// Subtract.
+void
+MacroAssemblerMIPS64::ma_dsubu(Register rd, Register rs, Imm32 imm)
+{
+ if (Imm16::IsInSignedRange(-imm.value)) {
+ as_daddiu(rd, rs, -imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_dsubu(rd, rs, ScratchRegister);
+ }
+}
+
+void
+MacroAssemblerMIPS64::ma_dsubu(Register rd, Register rs)
+{
+ as_dsubu(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPS64::ma_dsubu(Register rd, Imm32 imm)
+{
+ ma_dsubu(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPS64::ma_subTestOverflow(Register rd, Register rs, Register rt, Label* overflow)
+{
+ as_dsubu(SecondScratchReg, rs, rt);
+ as_subu(rd, rs, rt);
+ ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual);
+}
+
+void
+MacroAssemblerMIPS64::ma_dmult(Register rs, Imm32 imm)
+{
+ ma_li(ScratchRegister, imm);
+ as_dmult(rs, ScratchRegister);
+}
+
+// Memory.
+
+void
+MacroAssemblerMIPS64::ma_load(Register dest, Address address,
+ LoadStoreSize size, LoadStoreExtension extension)
+{
+ int16_t encodedOffset;
+ Register base;
+
+ if (isLoongson() && ZeroExtend != extension &&
+ !Imm16::IsInSignedRange(address.offset))
+ {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ base = address.base;
+
+ switch (size) {
+ case SizeByte:
+ as_gslbx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeHalfWord:
+ as_gslhx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeWord:
+ as_gslwx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeDouble:
+ as_gsldx(dest, base, ScratchRegister, 0);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+ return;
+ }
+
+ if (!Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_daddu(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = Imm16(0).encode();
+ } else {
+ encodedOffset = Imm16(address.offset).encode();
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ if (ZeroExtend == extension)
+ as_lbu(dest, base, encodedOffset);
+ else
+ as_lb(dest, base, encodedOffset);
+ break;
+ case SizeHalfWord:
+ if (ZeroExtend == extension)
+ as_lhu(dest, base, encodedOffset);
+ else
+ as_lh(dest, base, encodedOffset);
+ break;
+ case SizeWord:
+ if (ZeroExtend == extension)
+ as_lwu(dest, base, encodedOffset);
+ else
+ as_lw(dest, base, encodedOffset);
+ break;
+ case SizeDouble:
+ as_ld(dest, base, encodedOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+}
+
+void
+MacroAssemblerMIPS64::ma_store(Register data, Address address, LoadStoreSize size,
+ LoadStoreExtension extension)
+{
+ int16_t encodedOffset;
+ Register base;
+
+ if (isLoongson() && !Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ base = address.base;
+
+ switch (size) {
+ case SizeByte:
+ as_gssbx(data, base, ScratchRegister, 0);
+ break;
+ case SizeHalfWord:
+ as_gsshx(data, base, ScratchRegister, 0);
+ break;
+ case SizeWord:
+ as_gsswx(data, base, ScratchRegister, 0);
+ break;
+ case SizeDouble:
+ as_gssdx(data, base, ScratchRegister, 0);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+ return;
+ }
+
+ if (!Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_daddu(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = Imm16(0).encode();
+ } else {
+ encodedOffset = Imm16(address.offset).encode();
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_sb(data, base, encodedOffset);
+ break;
+ case SizeHalfWord:
+ as_sh(data, base, encodedOffset);
+ break;
+ case SizeWord:
+ as_sw(data, base, encodedOffset);
+ break;
+ case SizeDouble:
+ as_sd(data, base, encodedOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+}
+
+void
+MacroAssemblerMIPS64Compat::computeScaledAddress(const BaseIndex& address, Register dest)
+{
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+ if (shift) {
+ ma_dsll(ScratchRegister, address.index, Imm32(shift));
+ as_daddu(dest, address.base, ScratchRegister);
+ } else {
+ as_daddu(dest, address.base, address.index);
+ }
+}
+
+// Shortcut for when we know we're transferring 32 bits of data.
+void
+MacroAssemblerMIPS64::ma_pop(Register r)
+{
+ as_ld(r, StackPointer, 0);
+ as_daddiu(StackPointer, StackPointer, sizeof(intptr_t));
+}
+
+void
+MacroAssemblerMIPS64::ma_push(Register r)
+{
+ if (r == sp) {
+ // Pushing sp requires one more instruction.
+ ma_move(ScratchRegister, sp);
+ r = ScratchRegister;
+ }
+
+ as_daddiu(StackPointer, StackPointer, (int32_t)-sizeof(intptr_t));
+ as_sd(r, StackPointer, 0);
+}
+
+// Branches when done from within mips-specific code.
+void
+MacroAssemblerMIPS64::ma_b(Register lhs, ImmWord imm, Label* label, Condition c, JumpKind jumpKind)
+{
+ MOZ_ASSERT(c != Overflow);
+ if (imm.value == 0) {
+ if (c == Always || c == AboveOrEqual)
+ ma_b(label, jumpKind);
+ else if (c == Below)
+ ; // This condition is always false. No branch required.
+ else
+ branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+ } else {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+ ma_b(lhs, ScratchRegister, label, c, jumpKind);
+ }
+}
+
+void
+MacroAssemblerMIPS64::ma_b(Register lhs, Address addr, Label* label, Condition c, JumpKind jumpKind)
+{
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_load(ScratchRegister, addr, SizeDouble);
+ ma_b(lhs, ScratchRegister, label, c, jumpKind);
+}
+
+void
+MacroAssemblerMIPS64::ma_b(Address addr, Imm32 imm, Label* label, Condition c, JumpKind jumpKind)
+{
+ ma_load(SecondScratchReg, addr, SizeDouble);
+ ma_b(SecondScratchReg, imm, label, c, jumpKind);
+}
+
+void
+MacroAssemblerMIPS64::ma_b(Address addr, ImmGCPtr imm, Label* label, Condition c, JumpKind jumpKind)
+{
+ ma_load(SecondScratchReg, addr, SizeDouble);
+ ma_b(SecondScratchReg, imm, label, c, jumpKind);
+}
+
+void
+MacroAssemblerMIPS64::ma_bal(Label* label, DelaySlotFill delaySlotFill)
+{
+ if (label->bound()) {
+ // Generate the long jump for calls because return address has to be
+ // the address after the reserved block.
+ addLongJump(nextOffset());
+ ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
+ as_jalr(ScratchRegister);
+ if (delaySlotFill == FillDelaySlot)
+ as_nop();
+ return;
+ }
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ // Make the whole branch continous in the buffer. The '6'
+ // instructions are writing at below (contain delay slot).
+ m_buffer.ensureSpace(6 * sizeof(uint32_t));
+
+ BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
+ writeInst(nextInChain);
+ if (!oom())
+ label->use(bo.getOffset());
+ // Leave space for long jump.
+ as_nop();
+ as_nop();
+ as_nop();
+ if (delaySlotFill == FillDelaySlot)
+ as_nop();
+}
+
+void
+MacroAssemblerMIPS64::branchWithCode(InstImm code, Label* label, JumpKind jumpKind)
+{
+ MOZ_ASSERT(code.encode() != InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ if (label->bound()) {
+ int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
+
+ if (BOffImm16::IsInRange(offset))
+ jumpKind = ShortJump;
+
+ if (jumpKind == ShortJump) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ code.setBOffImm16(BOffImm16(offset));
+ writeInst(code.encode());
+ as_nop();
+ return;
+ }
+
+ if (code.encode() == inst_beq.encode()) {
+ // Handle long jump
+ addLongJump(nextOffset());
+ ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
+ as_jr(ScratchRegister);
+ as_nop();
+ return;
+ }
+
+ // Handle long conditional branch, the target offset is based on self,
+ // point to next instruction of nop at below.
+ writeInst(invertBranch(code, BOffImm16(7 * sizeof(uint32_t))).encode());
+ // No need for a "nop" here because we can clobber scratch.
+ addLongJump(nextOffset());
+ ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
+ as_jr(ScratchRegister);
+ as_nop();
+ return;
+ }
+
+ // Generate open jump and link it to a label.
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ if (jumpKind == ShortJump) {
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+
+ // Indicate that this is short jump with offset 4.
+ code.setBOffImm16(BOffImm16(4));
+ BufferOffset bo = writeInst(code.encode());
+ writeInst(nextInChain);
+ if (!oom())
+ label->use(bo.getOffset());
+ return;
+ }
+
+ bool conditional = code.encode() != inst_beq.encode();
+
+ // Make the whole branch continous in the buffer. The '7'
+ // instructions are writing at below (contain conditional nop).
+ m_buffer.ensureSpace(7 * sizeof(uint32_t));
+
+ BufferOffset bo = writeInst(code.encode());
+ writeInst(nextInChain);
+ if (!oom())
+ label->use(bo.getOffset());
+ // Leave space for potential long jump.
+ as_nop();
+ as_nop();
+ as_nop();
+ as_nop();
+ if (conditional)
+ as_nop();
+}
+
+void
+MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmWord imm, Condition c)
+{
+ ma_li(ScratchRegister, imm);
+ ma_cmp_set(rd, rs, ScratchRegister, c);
+}
+
+void
+MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmPtr imm, Condition c)
+{
+ ma_li(ScratchRegister, ImmWord(uintptr_t(imm.value)));
+ ma_cmp_set(rd, rs, ScratchRegister, c);
+}
+
+// fp instructions
+void
+MacroAssemblerMIPS64::ma_lid(FloatRegister dest, double value)
+{
+ ImmWord imm(mozilla::BitwiseCast<uint64_t>(value));
+
+ ma_li(ScratchRegister, imm);
+ moveToDouble(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPS64::ma_mv(FloatRegister src, ValueOperand dest)
+{
+ as_dmfc1(dest.valueReg(), src);
+}
+
+void
+MacroAssemblerMIPS64::ma_mv(ValueOperand src, FloatRegister dest)
+{
+ as_dmtc1(src.valueReg(), dest);
+}
+
+void
+MacroAssemblerMIPS64::ma_ls(FloatRegister ft, Address address)
+{
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_ls(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gslsx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_daddu(ScratchRegister, address.base, ScratchRegister);
+ as_ls(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void
+MacroAssemblerMIPS64::ma_ld(FloatRegister ft, Address address)
+{
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_ld(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gsldx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_daddu(ScratchRegister, address.base, ScratchRegister);
+ as_ld(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void
+MacroAssemblerMIPS64::ma_sd(FloatRegister ft, Address address)
+{
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_sd(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gssdx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_daddu(ScratchRegister, address.base, ScratchRegister);
+ as_sd(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void
+MacroAssemblerMIPS64::ma_ss(FloatRegister ft, Address address)
+{
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_ss(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gsssx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_daddu(ScratchRegister, address.base, ScratchRegister);
+ as_ss(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void
+MacroAssemblerMIPS64::ma_pop(FloatRegister fs)
+{
+ ma_ld(fs, Address(StackPointer, 0));
+ as_daddiu(StackPointer, StackPointer, sizeof(double));
+}
+
+void
+MacroAssemblerMIPS64::ma_push(FloatRegister fs)
+{
+ as_daddiu(StackPointer, StackPointer, (int32_t)-sizeof(double));
+ ma_sd(fs, Address(StackPointer, 0));
+}
+
+bool
+MacroAssemblerMIPS64Compat::buildOOLFakeExitFrame(void* fakeReturnAddr)
+{
+ uint32_t descriptor = MakeFrameDescriptor(asMasm().framePushed(), JitFrame_IonJS,
+ ExitFrameLayout::Size());
+
+ asMasm().Push(Imm32(descriptor)); // descriptor_
+ asMasm().Push(ImmPtr(fakeReturnAddr));
+
+ return true;
+}
+
+void
+MacroAssemblerMIPS64Compat::move32(Imm32 imm, Register dest)
+{
+ ma_li(dest, imm);
+}
+
+void
+MacroAssemblerMIPS64Compat::move32(Register src, Register dest)
+{
+ ma_move(dest, src);
+}
+
+void
+MacroAssemblerMIPS64Compat::movePtr(Register src, Register dest)
+{
+ ma_move(dest, src);
+}
+void
+MacroAssemblerMIPS64Compat::movePtr(ImmWord imm, Register dest)
+{
+ ma_li(dest, imm);
+}
+
+void
+MacroAssemblerMIPS64Compat::movePtr(ImmGCPtr imm, Register dest)
+{
+ ma_li(dest, imm);
+}
+
+void
+MacroAssemblerMIPS64Compat::movePtr(ImmPtr imm, Register dest)
+{
+ movePtr(ImmWord(uintptr_t(imm.value)), dest);
+}
+void
+MacroAssemblerMIPS64Compat::movePtr(wasm::SymbolicAddress imm, Register dest)
+{
+ append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm));
+ ma_liPatchable(dest, ImmWord(-1));
+}
+
+void
+MacroAssemblerMIPS64Compat::load8ZeroExtend(const Address& address, Register dest)
+{
+ ma_load(dest, address, SizeByte, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::load8ZeroExtend(const BaseIndex& src, Register dest)
+{
+ ma_load(dest, src, SizeByte, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::load8SignExtend(const Address& address, Register dest)
+{
+ ma_load(dest, address, SizeByte, SignExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::load8SignExtend(const BaseIndex& src, Register dest)
+{
+ ma_load(dest, src, SizeByte, SignExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::load16ZeroExtend(const Address& address, Register dest)
+{
+ ma_load(dest, address, SizeHalfWord, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::load16ZeroExtend(const BaseIndex& src, Register dest)
+{
+ ma_load(dest, src, SizeHalfWord, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::load16SignExtend(const Address& address, Register dest)
+{
+ ma_load(dest, address, SizeHalfWord, SignExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::load16SignExtend(const BaseIndex& src, Register dest)
+{
+ ma_load(dest, src, SizeHalfWord, SignExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::load32(const Address& address, Register dest)
+{
+ ma_load(dest, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPS64Compat::load32(const BaseIndex& address, Register dest)
+{
+ ma_load(dest, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPS64Compat::load32(AbsoluteAddress address, Register dest)
+{
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ load32(Address(ScratchRegister, 0), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::load32(wasm::SymbolicAddress address, Register dest)
+{
+ movePtr(address, ScratchRegister);
+ load32(Address(ScratchRegister, 0), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadPtr(const Address& address, Register dest)
+{
+ ma_load(dest, address, SizeDouble);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadPtr(const BaseIndex& src, Register dest)
+{
+ ma_load(dest, src, SizeDouble);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadPtr(AbsoluteAddress address, Register dest)
+{
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadPtr(wasm::SymbolicAddress address, Register dest)
+{
+ movePtr(address, ScratchRegister);
+ loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadPrivate(const Address& address, Register dest)
+{
+ loadPtr(address, dest);
+ ma_dsll(dest, dest, Imm32(1));
+}
+
+void
+MacroAssemblerMIPS64Compat::loadDouble(const Address& address, FloatRegister dest)
+{
+ ma_ld(dest, address);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadDouble(const BaseIndex& src, FloatRegister dest)
+{
+ computeScaledAddress(src, SecondScratchReg);
+ ma_ld(dest, Address(SecondScratchReg, src.offset));
+}
+
+void
+MacroAssemblerMIPS64Compat::loadUnalignedDouble(const BaseIndex& src, Register temp,
+ FloatRegister dest)
+{
+ computeScaledAddress(src, SecondScratchReg);
+
+ if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 7)) {
+ as_ldl(temp, SecondScratchReg, src.offset + 7);
+ as_ldr(temp, SecondScratchReg, src.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(src.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ as_ldl(temp, ScratchRegister, 7);
+ as_ldr(temp, ScratchRegister, 0);
+ }
+
+ moveToDouble(temp, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadFloatAsDouble(const Address& address, FloatRegister dest)
+{
+ ma_ls(dest, address);
+ as_cvtds(dest, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadFloatAsDouble(const BaseIndex& src, FloatRegister dest)
+{
+ loadFloat32(src, dest);
+ as_cvtds(dest, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadFloat32(const Address& address, FloatRegister dest)
+{
+ ma_ls(dest, address);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadFloat32(const BaseIndex& src, FloatRegister dest)
+{
+ computeScaledAddress(src, SecondScratchReg);
+ ma_ls(dest, Address(SecondScratchReg, src.offset));
+}
+
+void
+MacroAssemblerMIPS64Compat::loadUnalignedFloat32(const BaseIndex& src, Register temp,
+ FloatRegister dest)
+{
+ computeScaledAddress(src, SecondScratchReg);
+
+ if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 3)) {
+ as_lwl(temp, SecondScratchReg, src.offset + 3);
+ as_lwr(temp, SecondScratchReg, src.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(src.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ as_lwl(temp, ScratchRegister, 3);
+ as_lwr(temp, ScratchRegister, 0);
+ }
+
+ moveToFloat32(temp, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::store8(Imm32 imm, const Address& address)
+{
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeByte);
+}
+
+void
+MacroAssemblerMIPS64Compat::store8(Register src, const Address& address)
+{
+ ma_store(src, address, SizeByte);
+}
+
+void
+MacroAssemblerMIPS64Compat::store8(Imm32 imm, const BaseIndex& dest)
+{
+ ma_store(imm, dest, SizeByte);
+}
+
+void
+MacroAssemblerMIPS64Compat::store8(Register src, const BaseIndex& dest)
+{
+ ma_store(src, dest, SizeByte);
+}
+
+void
+MacroAssemblerMIPS64Compat::store16(Imm32 imm, const Address& address)
+{
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPS64Compat::store16(Register src, const Address& address)
+{
+ ma_store(src, address, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPS64Compat::store16(Imm32 imm, const BaseIndex& dest)
+{
+ ma_store(imm, dest, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPS64Compat::store16(Register src, const BaseIndex& address)
+{
+ ma_store(src, address, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPS64Compat::store32(Register src, AbsoluteAddress address)
+{
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ store32(src, Address(ScratchRegister, 0));
+}
+
+void
+MacroAssemblerMIPS64Compat::store32(Register src, const Address& address)
+{
+ ma_store(src, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPS64Compat::store32(Imm32 src, const Address& address)
+{
+ move32(src, SecondScratchReg);
+ ma_store(SecondScratchReg, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPS64Compat::store32(Imm32 imm, const BaseIndex& dest)
+{
+ ma_store(imm, dest, SizeWord);
+}
+
+void
+MacroAssemblerMIPS64Compat::store32(Register src, const BaseIndex& dest)
+{
+ ma_store(src, dest, SizeWord);
+}
+
+template <typename T>
+void
+MacroAssemblerMIPS64Compat::storePtr(ImmWord imm, T address)
+{
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeDouble);
+}
+
+template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmWord imm, Address address);
+template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(ImmWord imm, BaseIndex address);
+
+template <typename T>
+void
+MacroAssemblerMIPS64Compat::storePtr(ImmPtr imm, T address)
+{
+ storePtr(ImmWord(uintptr_t(imm.value)), address);
+}
+
+template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmPtr imm, Address address);
+template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(ImmPtr imm, BaseIndex address);
+
+template <typename T>
+void
+MacroAssemblerMIPS64Compat::storePtr(ImmGCPtr imm, T address)
+{
+ movePtr(imm, SecondScratchReg);
+ storePtr(SecondScratchReg, address);
+}
+
+template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmGCPtr imm, Address address);
+template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(ImmGCPtr imm, BaseIndex address);
+
+void
+MacroAssemblerMIPS64Compat::storePtr(Register src, const Address& address)
+{
+ ma_store(src, address, SizeDouble);
+}
+
+void
+MacroAssemblerMIPS64Compat::storePtr(Register src, const BaseIndex& address)
+{
+ ma_store(src, address, SizeDouble);
+}
+
+void
+MacroAssemblerMIPS64Compat::storePtr(Register src, AbsoluteAddress dest)
+{
+ movePtr(ImmPtr(dest.addr), ScratchRegister);
+ storePtr(src, Address(ScratchRegister, 0));
+}
+
+void
+MacroAssemblerMIPS64Compat::storeUnalignedFloat32(FloatRegister src, Register temp,
+ const BaseIndex& dest)
+{
+ computeScaledAddress(dest, SecondScratchReg);
+ moveFromFloat32(src, temp);
+
+ if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 3)) {
+ as_swl(temp, SecondScratchReg, dest.offset + 3);
+ as_swr(temp, SecondScratchReg, dest.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(dest.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ as_swl(temp, ScratchRegister, 3);
+ as_swr(temp, ScratchRegister, 0);
+ }
+}
+
+void
+MacroAssemblerMIPS64Compat::storeUnalignedDouble(FloatRegister src, Register temp,
+ const BaseIndex& dest)
+{
+ computeScaledAddress(dest, SecondScratchReg);
+ moveFromDouble(src, temp);
+
+ if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 7)) {
+ as_sdl(temp, SecondScratchReg, dest.offset + 7);
+ as_sdr(temp, SecondScratchReg, dest.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(dest.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ as_sdl(temp, ScratchRegister, 7);
+ as_sdr(temp, ScratchRegister, 0);
+ }
+}
+
+// Note: this function clobbers the input register.
+void
+MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
+{
+ MOZ_ASSERT(input != ScratchDoubleReg);
+ Label positive, done;
+
+ // <= 0 or NaN --> 0
+ zeroDouble(ScratchDoubleReg);
+ branchDouble(DoubleGreaterThan, input, ScratchDoubleReg, &positive);
+ {
+ move32(Imm32(0), output);
+ jump(&done);
+ }
+
+ bind(&positive);
+
+ // Add 0.5 and truncate.
+ loadConstantDouble(0.5, ScratchDoubleReg);
+ addDouble(ScratchDoubleReg, input);
+
+ Label outOfRange;
+
+ branchTruncateDoubleMaybeModUint32(input, output, &outOfRange);
+ asMasm().branch32(Assembler::Above, output, Imm32(255), &outOfRange);
+ {
+ // Check if we had a tie.
+ convertInt32ToDouble(output, ScratchDoubleReg);
+ branchDouble(DoubleNotEqual, input, ScratchDoubleReg, &done);
+
+ // It was a tie. Mask out the ones bit to get an even value.
+ // See also js_TypedArray_uint8_clamp_double.
+ and32(Imm32(~1), output);
+ jump(&done);
+ }
+
+ // > 255 --> 255
+ bind(&outOfRange);
+ {
+ move32(Imm32(255), output);
+ }
+
+ bind(&done);
+}
+
+void
+MacroAssemblerMIPS64Compat::testNullSet(Condition cond, const ValueOperand& value, Register dest)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ splitTag(value, SecondScratchReg);
+ ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_NULL), cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::testObjectSet(Condition cond, const ValueOperand& value, Register dest)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ splitTag(value, SecondScratchReg);
+ ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::testUndefinedSet(Condition cond, const ValueOperand& value, Register dest)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ splitTag(value, SecondScratchReg);
+ ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), cond);
+}
+
+// unboxing code
+void
+MacroAssemblerMIPS64Compat::unboxNonDouble(const ValueOperand& operand, Register dest)
+{
+ ma_dext(dest, operand.valueReg(), Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxNonDouble(const Address& src, Register dest)
+{
+ loadPtr(Address(src.base, src.offset), dest);
+ ma_dext(dest, dest, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxNonDouble(const BaseIndex& src, Register dest)
+{
+ computeScaledAddress(src, SecondScratchReg);
+ loadPtr(Address(SecondScratchReg, src.offset), dest);
+ ma_dext(dest, dest, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxInt32(const ValueOperand& operand, Register dest)
+{
+ ma_sll(dest, operand.valueReg(), Imm32(0));
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxInt32(Register src, Register dest)
+{
+ ma_sll(dest, src, Imm32(0));
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxInt32(const Address& src, Register dest)
+{
+ load32(Address(src.base, src.offset), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxInt32(const BaseIndex& src, Register dest)
+{
+ computeScaledAddress(src, SecondScratchReg);
+ load32(Address(SecondScratchReg, src.offset), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxBoolean(const ValueOperand& operand, Register dest)
+{
+ ma_dext(dest, operand.valueReg(), Imm32(0), Imm32(32));
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxBoolean(Register src, Register dest)
+{
+ ma_dext(dest, src, Imm32(0), Imm32(32));
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxBoolean(const Address& src, Register dest)
+{
+ ma_load(dest, Address(src.base, src.offset), SizeWord, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxBoolean(const BaseIndex& src, Register dest)
+{
+ computeScaledAddress(src, SecondScratchReg);
+ ma_load(dest, Address(SecondScratchReg, src.offset), SizeWord, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxDouble(const ValueOperand& operand, FloatRegister dest)
+{
+ as_dmtc1(operand.valueReg(), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxDouble(const Address& src, FloatRegister dest)
+{
+ ma_ld(dest, Address(src.base, src.offset));
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxString(const ValueOperand& operand, Register dest)
+{
+ unboxNonDouble(operand, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxString(Register src, Register dest)
+{
+ ma_dext(dest, src, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxString(const Address& src, Register dest)
+{
+ unboxNonDouble(src, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxSymbol(Register src, Register dest)
+{
+ ma_dext(dest, src, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxSymbol(const Address& src, Register dest)
+{
+ unboxNonDouble(src, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxObject(const ValueOperand& src, Register dest)
+{
+ unboxNonDouble(src, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxObject(Register src, Register dest)
+{
+ ma_dext(dest, src, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxObject(const Address& src, Register dest)
+{
+ unboxNonDouble(src, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxValue(const ValueOperand& src, AnyRegister dest)
+{
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.valueReg(), dest.fpu());
+ ma_b(&end, ShortJump);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else {
+ unboxNonDouble(src, dest.gpr());
+ }
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxPrivate(const ValueOperand& src, Register dest)
+{
+ ma_dsrl(dest, src.valueReg(), Imm32(1));
+}
+
+void
+MacroAssemblerMIPS64Compat::boxDouble(FloatRegister src, const ValueOperand& dest)
+{
+ as_dmfc1(dest.valueReg(), src);
+}
+
+void
+MacroAssemblerMIPS64Compat::boxNonDouble(JSValueType type, Register src,
+ const ValueOperand& dest)
+{
+ MOZ_ASSERT(src != dest.valueReg());
+ boxValue(type, src, dest.valueReg());
+}
+
+void
+MacroAssemblerMIPS64Compat::boolValueToDouble(const ValueOperand& operand, FloatRegister dest)
+{
+ convertBoolToInt32(operand.valueReg(), ScratchRegister);
+ convertInt32ToDouble(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::int32ValueToDouble(const ValueOperand& operand,
+ FloatRegister dest)
+{
+ convertInt32ToDouble(operand.valueReg(), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::boolValueToFloat32(const ValueOperand& operand,
+ FloatRegister dest)
+{
+
+ convertBoolToInt32(operand.valueReg(), ScratchRegister);
+ convertInt32ToFloat32(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::int32ValueToFloat32(const ValueOperand& operand,
+ FloatRegister dest)
+{
+ convertInt32ToFloat32(operand.valueReg(), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadConstantFloat32(float f, FloatRegister dest)
+{
+ ma_lis(dest, f);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadConstantFloat32(wasm::RawF32 f, FloatRegister dest)
+{
+ ma_lis(dest, f);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadInt32OrDouble(const Address& src, FloatRegister dest)
+{
+ Label notInt32, end;
+ // If it's an int, convert it to double.
+ loadPtr(Address(src.base, src.offset), ScratchRegister);
+ ma_dsrl(SecondScratchReg, ScratchRegister, Imm32(JSVAL_TAG_SHIFT));
+ asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+ loadPtr(Address(src.base, src.offset), SecondScratchReg);
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_b(&end, ShortJump);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ ma_ld(dest, src);
+ bind(&end);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadInt32OrDouble(const BaseIndex& addr, FloatRegister dest)
+{
+ Label notInt32, end;
+
+ // If it's an int, convert it to double.
+ computeScaledAddress(addr, SecondScratchReg);
+ // Since we only have one scratch, we need to stomp over it with the tag.
+ loadPtr(Address(SecondScratchReg, 0), ScratchRegister);
+ ma_dsrl(SecondScratchReg, ScratchRegister, Imm32(JSVAL_TAG_SHIFT));
+ asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+
+ computeScaledAddress(addr, SecondScratchReg);
+ loadPtr(Address(SecondScratchReg, 0), SecondScratchReg);
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_b(&end, ShortJump);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ // First, recompute the offset that had been stored in the scratch register
+ // since the scratch register was overwritten loading in the type.
+ computeScaledAddress(addr, SecondScratchReg);
+ loadDouble(Address(SecondScratchReg, 0), dest);
+ bind(&end);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadConstantDouble(double dp, FloatRegister dest)
+{
+ ma_lid(dest, dp);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadConstantDouble(wasm::RawF64 d, FloatRegister dest)
+{
+ ImmWord imm(d.bits());
+
+ ma_li(ScratchRegister, imm);
+ moveToDouble(ScratchRegister, dest);
+}
+
+Register
+MacroAssemblerMIPS64Compat::extractObject(const Address& address, Register scratch)
+{
+ loadPtr(Address(address.base, address.offset), scratch);
+ ma_dext(scratch, scratch, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+ return scratch;
+}
+
+Register
+MacroAssemblerMIPS64Compat::extractTag(const Address& address, Register scratch)
+{
+ loadPtr(Address(address.base, address.offset), scratch);
+ ma_dext(scratch, scratch, Imm32(JSVAL_TAG_SHIFT), Imm32(64 - JSVAL_TAG_SHIFT));
+ return scratch;
+}
+
+Register
+MacroAssemblerMIPS64Compat::extractTag(const BaseIndex& address, Register scratch)
+{
+ computeScaledAddress(address, scratch);
+ return extractTag(Address(scratch, address.offset), scratch);
+}
+
+void
+MacroAssemblerMIPS64Compat::moveValue(const Value& val, Register dest)
+{
+ writeDataRelocation(val);
+ movWithPatch(ImmWord(val.asRawBits()), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::moveValue(const Value& val, const ValueOperand& dest)
+{
+ moveValue(val, dest.valueReg());
+}
+
+/* There are 3 paths trough backedge jump. They are listed here in the order
+ * in which instructions are executed.
+ * - The short jump is simple:
+ * b offset # Jumps directly to target.
+ * lui at, addr1_hl # In delay slot. Don't care about 'at' here.
+ *
+ * - The long jump to loop header:
+ * b label1
+ * lui at, addr1_hl # In delay slot. We use the value in 'at' later.
+ * label1:
+ * ori at, addr1_lh
+ * drotr32 at, at, 48
+ * ori at, addr1_ll
+ * jr at
+ * lui at, addr2_hl # In delay slot. Don't care about 'at' here.
+ *
+ * - The long jump to interrupt loop:
+ * b label2
+ * ...
+ * jr at
+ * label2:
+ * lui at, addr2_hl # In delay slot. Don't care about 'at' here.
+ * ori at, addr2_lh
+ * drotr32 at, at, 48
+ * ori at, addr2_ll
+ * jr at
+ * nop # In delay slot.
+ *
+ * The backedge is done this way to avoid patching lui+ori pair while it is
+ * being executed. Look also at jit::PatchBackedge().
+ */
+CodeOffsetJump
+MacroAssemblerMIPS64Compat::backedgeJump(RepatchLabel* label, Label* documentation)
+{
+ // Only one branch per label.
+ MOZ_ASSERT(!label->used());
+ uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
+ BufferOffset bo = nextOffset();
+ label->use(bo.getOffset());
+
+ // Backedges are short jumps when bound, but can become long when patched.
+ m_buffer.ensureSpace(16 * sizeof(uint32_t));
+ if (label->bound()) {
+ int32_t offset = label->offset() - bo.getOffset();
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ as_b(BOffImm16(offset));
+ } else {
+ // Jump to "label1" by default to jump to the loop header.
+ as_b(BOffImm16(2 * sizeof(uint32_t)));
+ }
+ // No need for nop here. We can safely put next instruction in delay slot.
+ ma_liPatchable(ScratchRegister, ImmWord(dest));
+ MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 5 * sizeof(uint32_t));
+ as_jr(ScratchRegister);
+ // No need for nop here. We can safely put next instruction in delay slot.
+ ma_liPatchable(ScratchRegister, ImmWord(dest));
+ as_jr(ScratchRegister);
+ as_nop();
+ MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 12 * sizeof(uint32_t));
+ return CodeOffsetJump(bo.getOffset());
+}
+
+CodeOffsetJump
+MacroAssemblerMIPS64Compat::jumpWithPatch(RepatchLabel* label, Label* documentation)
+{
+ // Only one branch per label.
+ MOZ_ASSERT(!label->used());
+ uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ BufferOffset bo = nextOffset();
+ label->use(bo.getOffset());
+ addLongJump(bo);
+ ma_liPatchable(ScratchRegister, ImmWord(dest));
+ as_jr(ScratchRegister);
+ as_nop();
+ return CodeOffsetJump(bo.getOffset());
+}
+
+/////////////////////////////////////////////////////////////////
+// X86/X64-common/ARM/MIPS interface.
+/////////////////////////////////////////////////////////////////
+void
+MacroAssemblerMIPS64Compat::storeValue(ValueOperand val, Operand dst)
+{
+ storeValue(val, Address(Register::FromCode(dst.base()), dst.disp()));
+}
+
+void
+MacroAssemblerMIPS64Compat::storeValue(ValueOperand val, const BaseIndex& dest)
+{
+ computeScaledAddress(dest, SecondScratchReg);
+ storeValue(val, Address(SecondScratchReg, dest.offset));
+}
+
+void
+MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg, BaseIndex dest)
+{
+ computeScaledAddress(dest, ScratchRegister);
+
+ int32_t offset = dest.offset;
+ if (!Imm16::IsInSignedRange(offset)) {
+ ma_li(SecondScratchReg, Imm32(offset));
+ as_daddu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+
+ storeValue(type, reg, Address(ScratchRegister, offset));
+}
+
+void
+MacroAssemblerMIPS64Compat::storeValue(ValueOperand val, const Address& dest)
+{
+ storePtr(val.valueReg(), Address(dest.base, dest.offset));
+}
+
+void
+MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg, Address dest)
+{
+ MOZ_ASSERT(dest.base != SecondScratchReg);
+
+ ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ ma_dsll(SecondScratchReg, SecondScratchReg, Imm32(JSVAL_TAG_SHIFT));
+ ma_dins(SecondScratchReg, reg, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+ storePtr(SecondScratchReg, Address(dest.base, dest.offset));
+}
+
+void
+MacroAssemblerMIPS64Compat::storeValue(const Value& val, Address dest)
+{
+ if (val.isMarkable()) {
+ writeDataRelocation(val);
+ movWithPatch(ImmWord(val.asRawBits()), SecondScratchReg);
+ } else {
+ ma_li(SecondScratchReg, ImmWord(val.asRawBits()));
+ }
+ storePtr(SecondScratchReg, Address(dest.base, dest.offset));
+}
+
+void
+MacroAssemblerMIPS64Compat::storeValue(const Value& val, BaseIndex dest)
+{
+ computeScaledAddress(dest, ScratchRegister);
+
+ int32_t offset = dest.offset;
+ if (!Imm16::IsInSignedRange(offset)) {
+ ma_li(SecondScratchReg, Imm32(offset));
+ as_daddu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+ storeValue(val, Address(ScratchRegister, offset));
+}
+
+void
+MacroAssemblerMIPS64Compat::loadValue(const BaseIndex& addr, ValueOperand val)
+{
+ computeScaledAddress(addr, SecondScratchReg);
+ loadValue(Address(SecondScratchReg, addr.offset), val);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadValue(Address src, ValueOperand val)
+{
+ loadPtr(Address(src.base, src.offset), val.valueReg());
+}
+
+void
+MacroAssemblerMIPS64Compat::tagValue(JSValueType type, Register payload, ValueOperand dest)
+{
+ MOZ_ASSERT(dest.valueReg() != ScratchRegister);
+ if (payload != dest.valueReg())
+ ma_move(dest.valueReg(), payload);
+ ma_li(ScratchRegister, ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ ma_dins(dest.valueReg(), ScratchRegister, Imm32(JSVAL_TAG_SHIFT), Imm32(64 - JSVAL_TAG_SHIFT));
+}
+
+void
+MacroAssemblerMIPS64Compat::pushValue(ValueOperand val)
+{
+ // Allocate stack slots for Value. One for each.
+ asMasm().subPtr(Imm32(sizeof(Value)), StackPointer);
+ // Store Value
+ storeValue(val, Address(StackPointer, 0));
+}
+
+void
+MacroAssemblerMIPS64Compat::pushValue(const Address& addr)
+{
+ // Load value before allocate stack, addr.base may be is sp.
+ loadPtr(Address(addr.base, addr.offset), ScratchRegister);
+ ma_dsubu(StackPointer, StackPointer, Imm32(sizeof(Value)));
+ storePtr(ScratchRegister, Address(StackPointer, 0));
+}
+
+void
+MacroAssemblerMIPS64Compat::popValue(ValueOperand val)
+{
+ as_ld(val.valueReg(), StackPointer, 0);
+ as_daddiu(StackPointer, StackPointer, sizeof(Value));
+}
+
+void
+MacroAssemblerMIPS64Compat::breakpoint()
+{
+ as_break(0);
+}
+
+void
+MacroAssemblerMIPS64Compat::ensureDouble(const ValueOperand& source, FloatRegister dest,
+ Label* failure)
+{
+ Label isDouble, done;
+ Register tag = splitTagForTest(source);
+ asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
+
+ unboxInt32(source, ScratchRegister);
+ convertInt32ToDouble(ScratchRegister, dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerMIPS64Compat::checkStackAlignment()
+{
+#ifdef DEBUG
+ Label aligned;
+ as_andi(ScratchRegister, sp, ABIStackAlignment - 1);
+ ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
+ as_break(BREAK_STACK_UNALIGNED);
+ bind(&aligned);
+#endif
+}
+
+void
+MacroAssembler::alignFrameForICArguments(AfterICSaveLive& aic)
+{
+ if (framePushed() % ABIStackAlignment != 0) {
+ aic.alignmentPadding = ABIStackAlignment - (framePushed() % ABIStackAlignment);
+ reserveStack(aic.alignmentPadding);
+ } else {
+ aic.alignmentPadding = 0;
+ }
+ MOZ_ASSERT(framePushed() % ABIStackAlignment == 0);
+ checkStackAlignment();
+}
+
+void
+MacroAssembler::restoreFrameAlignmentForICArguments(AfterICSaveLive& aic)
+{
+ if (aic.alignmentPadding != 0)
+ freeStack(aic.alignmentPadding);
+}
+
+void
+MacroAssemblerMIPS64Compat::handleFailureWithHandlerTail(void* handler)
+{
+ // Reserve space for exception information.
+ int size = (sizeof(ResumeFromException) + ABIStackAlignment) & ~(ABIStackAlignment - 1);
+ asMasm().subPtr(Imm32(size), StackPointer);
+ ma_move(a0, StackPointer); // Use a0 since it is a first function argument
+
+ // Call the handler.
+ asMasm().setupUnalignedABICall(a1);
+ asMasm().passABIArg(a0);
+ asMasm().callWithABI(handler);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label return_;
+ Label bailout;
+
+ // Already clobbered a0, so use it...
+ load32(Address(StackPointer, offsetof(ResumeFromException, kind)), a0);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME),
+ &entryFrame);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_CATCH), &catch_);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FINALLY), &finally);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FORCED_RETURN),
+ &return_);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, load the new stack pointer
+ // and return from the entry frame.
+ bind(&entryFrame);
+ moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
+
+ // We're going to be returning by the ion calling convention
+ ma_pop(ra);
+ as_jr(ra);
+ as_nop();
+
+ // If we found a catch handler, this must be a baseline frame. Restore
+ // state and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, target)), a0);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
+ jump(a0);
+
+ // If we found a finally block, this must be a baseline frame. Push
+ // two values expected by JSOP_RETSUB: BooleanValue(true) and the
+ // exception.
+ bind(&finally);
+ ValueOperand exception = ValueOperand(a1);
+ loadValue(Address(sp, offsetof(ResumeFromException, exception)), exception);
+
+ loadPtr(Address(sp, offsetof(ResumeFromException, target)), a0);
+ loadPtr(Address(sp, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
+ loadPtr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp);
+
+ pushValue(BooleanValue(true));
+ pushValue(exception);
+ jump(a0);
+
+ // Only used in debug mode. Return BaselineFrame->returnValue() to the
+ // caller.
+ bind(&return_);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
+ loadValue(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ ma_move(StackPointer, BaselineFrameReg);
+ pop(BaselineFrameReg);
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to caller
+ // frame before returning.
+ {
+ Label skipProfilingInstrumentation;
+ // Test if profiler enabled.
+ AbsoluteAddress addressOfEnabled(GetJitContext()->runtime->spsProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ profilerExitFrame();
+ bind(&skipProfilingInstrumentation);
+ }
+
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to
+ // the bailout tail stub.
+ bind(&bailout);
+ loadPtr(Address(sp, offsetof(ResumeFromException, bailoutInfo)), a2);
+ ma_li(ReturnReg, Imm32(BAILOUT_RETURN_OK));
+ loadPtr(Address(sp, offsetof(ResumeFromException, target)), a1);
+ jump(a1);
+}
+
+template<typename T>
+void
+MacroAssemblerMIPS64Compat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
+ Register oldval, Register newval,
+ Register temp, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ AnyRegister output)
+{
+ switch (arrayType) {
+ case Scalar::Int8:
+ compareExchange8SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Uint8:
+ compareExchange8ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Int16:
+ compareExchange16SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Uint16:
+ compareExchange16ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Int32:
+ compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Uint32:
+ // At the moment, the code in MCallOptimize.cpp requires the output
+ // type to be double for uint32 arrays. See bug 1077305.
+ MOZ_ASSERT(output.isFloat());
+ compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, temp);
+ convertUInt32ToDouble(temp, output.fpu());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+MacroAssemblerMIPS64Compat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
+ Register oldval, Register newval, Register temp,
+ Register valueTemp, Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+template void
+MacroAssemblerMIPS64Compat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
+ Register oldval, Register newval, Register temp,
+ Register valueTemp, Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+
+template<typename T>
+void
+MacroAssemblerMIPS64Compat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
+ Register value, Register temp, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ AnyRegister output)
+{
+ switch (arrayType) {
+ case Scalar::Int8:
+ atomicExchange8SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Uint8:
+ atomicExchange8ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Int16:
+ atomicExchange16SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Uint16:
+ atomicExchange16ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Int32:
+ atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Uint32:
+ // At the moment, the code in MCallOptimize.cpp requires the output
+ // type to be double for uint32 arrays. See bug 1077305.
+ MOZ_ASSERT(output.isFloat());
+ atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, temp);
+ convertUInt32ToDouble(temp, output.fpu());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+MacroAssemblerMIPS64Compat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
+ Register value, Register temp, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+template void
+MacroAssemblerMIPS64Compat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
+ Register value, Register temp, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+
+CodeOffset
+MacroAssemblerMIPS64Compat::toggledJump(Label* label)
+{
+ CodeOffset ret(nextOffset().getOffset());
+ ma_b(label);
+ return ret;
+}
+
+CodeOffset
+MacroAssemblerMIPS64Compat::toggledCall(JitCode* target, bool enabled)
+{
+ BufferOffset bo = nextOffset();
+ CodeOffset offset(bo.getOffset());
+ addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
+ if (enabled) {
+ as_jalr(ScratchRegister);
+ as_nop();
+ } else {
+ as_nop();
+ as_nop();
+ }
+ MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() == ToggledCallSize(nullptr));
+ return offset;
+}
+
+void
+MacroAssemblerMIPS64Compat::profilerEnterFrame(Register framePtr, Register scratch)
+{
+ AbsoluteAddress activation(GetJitContext()->runtime->addressOfProfilingActivation());
+ loadPtr(activation, scratch);
+ storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr), Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void
+MacroAssemblerMIPS64Compat::profilerExitFrame()
+{
+ branch(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
+}
+
+void
+MacroAssembler::subFromStackPtr(Imm32 imm32)
+{
+ if (imm32.value)
+ asMasm().subPtr(imm32, StackPointer);
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// Stack manipulation functions.
+
+void
+MacroAssembler::PushRegsInMask(LiveRegisterSet set)
+{
+ int32_t diff = set.gprs().size() * sizeof(intptr_t) +
+ set.fpus().getPushSizeInBytes();
+ const int32_t reserved = diff;
+
+ reserveStack(reserved);
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diff -= sizeof(intptr_t);
+ storePtr(*iter, Address(StackPointer, diff));
+ }
+ for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ++iter) {
+ diff -= sizeof(double);
+ storeDouble(*iter, Address(StackPointer, diff));
+ }
+ MOZ_ASSERT(diff == 0);
+}
+
+void
+MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
+{
+ int32_t diff = set.gprs().size() * sizeof(intptr_t) +
+ set.fpus().getPushSizeInBytes();
+ const int32_t reserved = diff;
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diff -= sizeof(intptr_t);
+ if (!ignore.has(*iter))
+ loadPtr(Address(StackPointer, diff), *iter);
+ }
+ for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ++iter) {
+ diff -= sizeof(double);
+ if (!ignore.has(*iter))
+ loadDouble(Address(StackPointer, diff), *iter);
+ }
+ MOZ_ASSERT(diff == 0);
+ freeStack(reserved);
+}
+
+// ===============================================================
+// ABI function calls.
+
+void
+MacroAssembler::setupUnalignedABICall(Register scratch)
+{
+ setupABICall();
+ dynamicAlignment_ = true;
+
+ ma_move(scratch, StackPointer);
+
+ // Force sp to be aligned
+ asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
+ storePtr(scratch, Address(StackPointer, 0));
+}
+
+void
+MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm)
+{
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ // Reserve place for $ra.
+ stackForCall += sizeof(intptr_t);
+
+ if (dynamicAlignment_) {
+ stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
+ } else {
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(stackForCall + framePushed() + alignmentAtPrologue,
+ ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Save $ra because call is going to clobber it. Restore it in
+ // callWithABIPost. NOTE: This is needed for calls from SharedIC.
+ // Maybe we can do this differently.
+ storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
+
+ // Position all arguments.
+ {
+ enoughMemory_ = enoughMemory_ && moveResolver_.resolve();
+ if (!enoughMemory_)
+ return;
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+}
+
+void
+MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
+{
+ // Restore ra value (as stored in callWithABIPre()).
+ loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
+
+ if (dynamicAlignment_) {
+ // Restore sp value from stack (as stored in setupUnalignedABICall()).
+ loadPtr(Address(StackPointer, stackAdjust), StackPointer);
+ // Use adjustFrame instead of freeStack because we already restored sp.
+ adjustFrame(-stackAdjust);
+ } else {
+ freeStack(stackAdjust);
+ }
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+void
+MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
+{
+ // Load the callee in t9, no instruction between the lw and call
+ // should clobber it. Note that we can't use fun.base because it may
+ // be one of the IntArg registers clobbered before the call.
+ ma_move(t9, fun);
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(t9);
+ callWithABIPost(stackAdjust, result);
+}
+
+void
+MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result)
+{
+ // Load the callee in t9, as above.
+ loadPtr(Address(fun.base, fun.offset), t9);
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(t9);
+ callWithABIPost(stackAdjust, result);
+}
+
+// ===============================================================
+// Branch functions
+
+void
+MacroAssembler::branchValueIsNurseryObject(Condition cond, const Address& address, Register temp,
+ Label* label)
+{
+ branchValueIsNurseryObjectImpl(cond, address, temp, label);
+}
+
+void
+MacroAssembler::branchValueIsNurseryObject(Condition cond, ValueOperand value,
+ Register temp, Label* label)
+{
+ branchValueIsNurseryObjectImpl(cond, value, temp, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchValueIsNurseryObjectImpl(Condition cond, const T& value, Register temp,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+ branchTestObject(Assembler::NotEqual, value, cond == Assembler::Equal ? &done : label);
+
+ extractObject(value, SecondScratchReg);
+ orPtr(Imm32(gc::ChunkMask), SecondScratchReg);
+ branch32(cond, Address(SecondScratchReg, gc::ChunkLocationOffsetFromLastByte),
+ Imm32(int32_t(gc::ChunkLocation::Nursery)), label);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(*this);
+ moveValue(rhs, scratch);
+ ma_b(lhs.valueReg(), scratch, label, cond);
+}
+
+// ========================================================================
+// Memory access primitives.
+template <typename T>
+void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const T& dest, MIRType slotType)
+{
+ if (valueType == MIRType::Double) {
+ storeDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ // For known integers and booleans, we can just store the unboxed value if
+ // the slot has the same type.
+ if ((valueType == MIRType::Int32 || valueType == MIRType::Boolean) && slotType == valueType) {
+ if (value.constant()) {
+ Value val = value.value();
+ if (valueType == MIRType::Int32)
+ store32(Imm32(val.toInt32()), dest);
+ else
+ store32(Imm32(val.toBoolean() ? 1 : 0), dest);
+ } else {
+ store32(value.reg().typedReg().gpr(), dest);
+ }
+ return;
+ }
+
+ if (value.constant())
+ storeValue(value.value(), dest);
+ else
+ storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(), dest);
+}
+
+template void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const Address& dest, MIRType slotType);
+template void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const BaseIndex& dest, MIRType slotType);
+
+//}}} check_macroassembler_style
diff --git a/js/src/jit/mips64/MacroAssembler-mips64.h b/js/src/jit/mips64/MacroAssembler-mips64.h
new file mode 100644
index 000000000..4cff87236
--- /dev/null
+++ b/js/src/jit/mips64/MacroAssembler-mips64.h
@@ -0,0 +1,1041 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_MacroAssembler_mips64_h
+#define jit_mips64_MacroAssembler_mips64_h
+
+#include "jsopcode.h"
+
+#include "jit/IonCaches.h"
+#include "jit/JitFrames.h"
+#include "jit/mips-shared/MacroAssembler-mips-shared.h"
+#include "jit/MoveResolver.h"
+
+namespace js {
+namespace jit {
+
+enum LiFlags
+{
+ Li64 = 0,
+ Li48 = 1,
+};
+
+struct ImmShiftedTag : public ImmWord
+{
+ explicit ImmShiftedTag(JSValueShiftedTag shtag)
+ : ImmWord((uintptr_t)shtag)
+ { }
+
+ explicit ImmShiftedTag(JSValueType type)
+ : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type))))
+ { }
+};
+
+struct ImmTag : public Imm32
+{
+ ImmTag(JSValueTag mask)
+ : Imm32(int32_t(mask))
+ { }
+};
+
+static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg);
+
+static const int defaultShift = 3;
+static_assert(1 << defaultShift == sizeof(JS::Value), "The defaultShift is wrong");
+
+class MacroAssemblerMIPS64 : public MacroAssemblerMIPSShared
+{
+ public:
+ using MacroAssemblerMIPSShared::ma_b;
+ using MacroAssemblerMIPSShared::ma_li;
+ using MacroAssemblerMIPSShared::ma_ss;
+ using MacroAssemblerMIPSShared::ma_sd;
+ using MacroAssemblerMIPSShared::ma_load;
+ using MacroAssemblerMIPSShared::ma_store;
+ using MacroAssemblerMIPSShared::ma_cmp_set;
+ using MacroAssemblerMIPSShared::ma_subTestOverflow;
+
+ void ma_li(Register dest, CodeOffset* label);
+ void ma_li(Register dest, ImmWord imm);
+ void ma_liPatchable(Register dest, ImmPtr imm);
+ void ma_liPatchable(Register dest, ImmWord imm, LiFlags flags = Li48);
+
+ // Negate
+ void ma_dnegu(Register rd, Register rs);
+
+ // Shift operations
+ void ma_dsll(Register rd, Register rt, Imm32 shift);
+ void ma_dsrl(Register rd, Register rt, Imm32 shift);
+ void ma_dsra(Register rd, Register rt, Imm32 shift);
+ void ma_dror(Register rd, Register rt, Imm32 shift);
+ void ma_drol(Register rd, Register rt, Imm32 shift);
+
+ void ma_dsll(Register rd, Register rt, Register shift);
+ void ma_dsrl(Register rd, Register rt, Register shift);
+ void ma_dsra(Register rd, Register rt, Register shift);
+ void ma_dror(Register rd, Register rt, Register shift);
+ void ma_drol(Register rd, Register rt, Register shift);
+
+ void ma_dins(Register rt, Register rs, Imm32 pos, Imm32 size);
+ void ma_dext(Register rt, Register rs, Imm32 pos, Imm32 size);
+
+ void ma_dctz(Register rd, Register rs);
+
+ // load
+ void ma_load(Register dest, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // store
+ void ma_store(Register data, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // arithmetic based ops
+ // add
+ void ma_daddu(Register rd, Register rs, Imm32 imm);
+ void ma_daddu(Register rd, Register rs);
+ void ma_daddu(Register rd, Imm32 imm);
+ template <typename L>
+ void ma_addTestOverflow(Register rd, Register rs, Register rt, L overflow);
+ template <typename L>
+ void ma_addTestOverflow(Register rd, Register rs, Imm32 imm, L overflow);
+
+ // subtract
+ void ma_dsubu(Register rd, Register rs, Imm32 imm);
+ void ma_dsubu(Register rd, Register rs);
+ void ma_dsubu(Register rd, Imm32 imm);
+ void ma_subTestOverflow(Register rd, Register rs, Register rt, Label* overflow);
+
+ // multiplies. For now, there are only few that we care about.
+ void ma_dmult(Register rs, Imm32 imm);
+
+ // stack
+ void ma_pop(Register r);
+ void ma_push(Register r);
+
+ void branchWithCode(InstImm code, Label* label, JumpKind jumpKind);
+ // branches when done from within mips-specific code
+ void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, Address addr, Label* l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Register rhs, Label* l, Condition c, JumpKind jumpKind = LongJump) {
+ MOZ_ASSERT(rhs != ScratchRegister);
+ ma_load(ScratchRegister, addr, SizeDouble);
+ ma_b(ScratchRegister, rhs, l, c, jumpKind);
+ }
+
+ void ma_bal(Label* l, DelaySlotFill delaySlotFill = FillDelaySlot);
+
+ // fp instructions
+ void ma_lid(FloatRegister dest, double value);
+
+ void ma_mv(FloatRegister src, ValueOperand dest);
+ void ma_mv(ValueOperand src, FloatRegister dest);
+
+ void ma_ls(FloatRegister fd, Address address);
+ void ma_ld(FloatRegister fd, Address address);
+ void ma_sd(FloatRegister fd, Address address);
+ void ma_ss(FloatRegister fd, Address address);
+
+ void ma_pop(FloatRegister fs);
+ void ma_push(FloatRegister fs);
+
+ void ma_cmp_set(Register dst, Register lhs, ImmWord imm, Condition c);
+ void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c);
+
+ // These functions abstract the access to high part of the double precision
+ // float register. They are intended to work on both 32 bit and 64 bit
+ // floating point coprocessor.
+ void moveToDoubleHi(Register src, FloatRegister dest) {
+ as_mthc1(src, dest);
+ }
+ void moveFromDoubleHi(FloatRegister src, Register dest) {
+ as_mfhc1(dest, src);
+ }
+
+ void moveToDouble(Register src, FloatRegister dest) {
+ as_dmtc1(src, dest);
+ }
+ void moveFromDouble(FloatRegister src, Register dest) {
+ as_dmfc1(dest, src);
+ }
+};
+
+class MacroAssembler;
+
+class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
+{
+ public:
+ using MacroAssemblerMIPS64::call;
+
+ MacroAssemblerMIPS64Compat()
+ { }
+
+ void convertBoolToInt32(Register source, Register dest);
+ void convertInt32ToDouble(Register src, FloatRegister dest);
+ void convertInt32ToDouble(const Address& src, FloatRegister dest);
+ void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest);
+ void convertUInt32ToDouble(Register src, FloatRegister dest);
+ void convertUInt32ToFloat32(Register src, FloatRegister dest);
+ void convertDoubleToFloat32(FloatRegister src, FloatRegister dest);
+ void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+
+ void convertFloat32ToDouble(FloatRegister src, FloatRegister dest);
+ void convertInt32ToFloat32(Register src, FloatRegister dest);
+ void convertInt32ToFloat32(const Address& src, FloatRegister dest);
+
+ void movq(Register rs, Register rd);
+
+ void computeScaledAddress(const BaseIndex& address, Register dest);
+
+ void computeEffectiveAddress(const Address& address, Register dest) {
+ ma_daddu(dest, address.base, Imm32(address.offset));
+ }
+
+ inline void computeEffectiveAddress(const BaseIndex& address, Register dest);
+
+ void j(Label* dest) {
+ ma_b(dest);
+ }
+
+ void mov(Register src, Register dest) {
+ as_ori(dest, src, 0);
+ }
+ void mov(ImmWord imm, Register dest) {
+ ma_li(dest, imm);
+ }
+ void mov(ImmPtr imm, Register dest) {
+ mov(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(Register src, Address dest) {
+ MOZ_CRASH("NYI-IC");
+ }
+ void mov(Address src, Register dest) {
+ MOZ_CRASH("NYI-IC");
+ }
+
+ void writeDataRelocation(const Value& val) {
+ if (val.isMarkable()) {
+ gc::Cell* cell = val.toMarkablePointer();
+ if (cell && gc::IsInsideNursery(cell))
+ embedsNurseryPointers_ = true;
+ dataRelocations_.writeUnsigned(currentOffset());
+ }
+ }
+
+ void branch(JitCode* c) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(c->raw()));
+ as_jr(ScratchRegister);
+ as_nop();
+ }
+ void branch(const Register reg) {
+ as_jr(reg);
+ as_nop();
+ }
+ void nop() {
+ as_nop();
+ }
+ void ret() {
+ ma_pop(ra);
+ as_jr(ra);
+ as_nop();
+ }
+ inline void retn(Imm32 n);
+ void push(Imm32 imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(ImmWord imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(ImmGCPtr imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(const Address& address) {
+ loadPtr(address, ScratchRegister);
+ ma_push(ScratchRegister);
+ }
+ void push(Register reg) {
+ ma_push(reg);
+ }
+ void push(FloatRegister reg) {
+ ma_push(reg);
+ }
+ void pop(Register reg) {
+ ma_pop(reg);
+ }
+ void pop(FloatRegister reg) {
+ ma_pop(reg);
+ }
+
+ // Emit a branch that can be toggled to a non-operation. On MIPS64 we use
+ // "andi" instruction to toggle the branch.
+ // See ToggleToJmp(), ToggleToCmp().
+ CodeOffset toggledJump(Label* label);
+
+ // Emit a "jalr" or "nop" instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled);
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ // Six instructions used in: MacroAssemblerMIPS64Compat::toggledCall
+ return 6 * sizeof(uint32_t);
+ }
+
+ CodeOffset pushWithPatch(ImmWord imm) {
+ CodeOffset offset = movWithPatch(imm, ScratchRegister);
+ ma_push(ScratchRegister);
+ return offset;
+ }
+
+ CodeOffset movWithPatch(ImmWord imm, Register dest) {
+ CodeOffset offset = CodeOffset(currentOffset());
+ ma_liPatchable(dest, imm, Li64);
+ return offset;
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ CodeOffset offset = CodeOffset(currentOffset());
+ ma_liPatchable(dest, imm);
+ return offset;
+ }
+
+ void jump(Label* label) {
+ ma_b(label);
+ }
+ void jump(Register reg) {
+ as_jr(reg);
+ as_nop();
+ }
+ void jump(const Address& address) {
+ loadPtr(address, ScratchRegister);
+ as_jr(ScratchRegister);
+ as_nop();
+ }
+
+ void jump(JitCode* code) {
+ branch(code);
+ }
+
+ void jump(wasm::TrapDesc target) {
+ ma_b(target);
+ }
+
+ void splitTag(Register src, Register dest) {
+ ma_dsrl(dest, src, Imm32(JSVAL_TAG_SHIFT));
+ }
+
+ void splitTag(const ValueOperand& operand, Register dest) {
+ splitTag(operand.valueReg(), dest);
+ }
+
+ // Returns the register containing the type tag.
+ Register splitTagForTest(const ValueOperand& value) {
+ splitTag(value, SecondScratchReg);
+ return SecondScratchReg;
+ }
+
+ // unboxing code
+ void unboxNonDouble(const ValueOperand& operand, Register dest);
+ void unboxNonDouble(const Address& src, Register dest);
+ void unboxNonDouble(const BaseIndex& src, Register dest);
+ void unboxInt32(const ValueOperand& operand, Register dest);
+ void unboxInt32(Register src, Register dest);
+ void unboxInt32(const Address& src, Register dest);
+ void unboxInt32(const BaseIndex& src, Register dest);
+ void unboxBoolean(const ValueOperand& operand, Register dest);
+ void unboxBoolean(Register src, Register dest);
+ void unboxBoolean(const Address& src, Register dest);
+ void unboxBoolean(const BaseIndex& src, Register dest);
+ void unboxDouble(const ValueOperand& operand, FloatRegister dest);
+ void unboxDouble(Register src, Register dest);
+ void unboxDouble(const Address& src, FloatRegister dest);
+ void unboxString(const ValueOperand& operand, Register dest);
+ void unboxString(Register src, Register dest);
+ void unboxString(const Address& src, Register dest);
+ void unboxSymbol(const ValueOperand& src, Register dest);
+ void unboxSymbol(Register src, Register dest);
+ void unboxSymbol(const Address& src, Register dest);
+ void unboxObject(const ValueOperand& src, Register dest);
+ void unboxObject(Register src, Register dest);
+ void unboxObject(const Address& src, Register dest);
+ void unboxObject(const BaseIndex& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxValue(const ValueOperand& src, AnyRegister dest);
+ void unboxPrivate(const ValueOperand& src, Register dest);
+
+ void notBoolean(const ValueOperand& val) {
+ as_xori(val.valueReg(), val.valueReg(), 1);
+ }
+
+ // boxing code
+ void boxDouble(FloatRegister src, const ValueOperand& dest);
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest);
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ Register extractObject(const Address& address, Register scratch);
+ Register extractObject(const ValueOperand& value, Register scratch) {
+ unboxObject(value, scratch);
+ return scratch;
+ }
+ Register extractInt32(const ValueOperand& value, Register scratch) {
+ unboxInt32(value, scratch);
+ return scratch;
+ }
+ Register extractBoolean(const ValueOperand& value, Register scratch) {
+ unboxBoolean(value, scratch);
+ return scratch;
+ }
+ Register extractTag(const Address& address, Register scratch);
+ Register extractTag(const BaseIndex& address, Register scratch);
+ Register extractTag(const ValueOperand& value, Register scratch) {
+ MOZ_ASSERT(scratch != ScratchRegister);
+ splitTag(value, scratch);
+ return scratch;
+ }
+
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void loadInt32OrDouble(const Address& src, FloatRegister dest);
+ void loadInt32OrDouble(const BaseIndex& addr, FloatRegister dest);
+ void loadConstantDouble(double dp, FloatRegister dest);
+ void loadConstantDouble(wasm::RawF64 d, FloatRegister dest);
+
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+ void loadConstantFloat32(wasm::RawF32 f, FloatRegister dest);
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value, Register dest);
+
+ // higher level tag testing code
+ Address ToPayload(Address value) {
+ return value;
+ }
+
+ void moveValue(const Value& val, Register dest);
+
+ CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr);
+ CodeOffsetJump jumpWithPatch(RepatchLabel* label, Label* documentation = nullptr);
+
+ template <typename T>
+ void loadUnboxedValue(const T& address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat())
+ loadInt32OrDouble(address, dest.fpu());
+ else if (type == MIRType::Int32)
+ unboxInt32(address, dest.gpr());
+ else if (type == MIRType::Boolean)
+ unboxBoolean(address, dest.gpr());
+ else
+ unboxNonDouble(address, dest.gpr());
+ }
+
+ template <typename T>
+ void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes) {
+ switch (nbytes) {
+ case 8:
+ unboxNonDouble(value, ScratchRegister);
+ storePtr(ScratchRegister, address);
+ return;
+ case 4:
+ store32(value.valueReg(), address);
+ return;
+ case 1:
+ store8(value.valueReg(), address);
+ return;
+ default: MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void moveValue(const Value& val, const ValueOperand& dest);
+
+ void moveValue(const ValueOperand& src, const ValueOperand& dest) {
+ if (src.valueReg() != dest.valueReg())
+ ma_move(dest.valueReg(), src.valueReg());
+ }
+ void boxValue(JSValueType type, Register src, Register dest) {
+ MOZ_ASSERT(src != dest);
+
+ JSValueTag tag = (JSValueTag)JSVAL_TYPE_TO_TAG(type);
+ ma_li(dest, Imm32(tag));
+ ma_dsll(dest, dest, Imm32(JSVAL_TAG_SHIFT));
+ ma_dins(dest, src, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+ }
+
+ void storeValue(ValueOperand val, Operand dst);
+ void storeValue(ValueOperand val, const BaseIndex& dest);
+ void storeValue(JSValueType type, Register reg, BaseIndex dest);
+ void storeValue(ValueOperand val, const Address& dest);
+ void storeValue(JSValueType type, Register reg, Address dest);
+ void storeValue(const Value& val, Address dest);
+ void storeValue(const Value& val, BaseIndex dest);
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ loadPtr(src, temp);
+ storePtr(temp, dest);
+ }
+
+ void loadValue(Address src, ValueOperand val);
+ void loadValue(Operand dest, ValueOperand val) {
+ loadValue(dest.toAddress(), val);
+ }
+ void loadValue(const BaseIndex& addr, ValueOperand val);
+ void tagValue(JSValueType type, Register payload, ValueOperand dest);
+
+ void pushValue(ValueOperand val);
+ void popValue(ValueOperand val);
+ void pushValue(const Value& val) {
+ if (val.isMarkable()) {
+ writeDataRelocation(val);
+ movWithPatch(ImmWord(val.asRawBits()), ScratchRegister);
+ push(ScratchRegister);
+ } else {
+ push(ImmWord(val.asRawBits()));
+ }
+ }
+ void pushValue(JSValueType type, Register reg) {
+ boxValue(type, reg, ScratchRegister);
+ push(ScratchRegister);
+ }
+ void pushValue(const Address& addr);
+
+ void handleFailureWithHandlerTail(void* handler);
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+ public:
+ // The following functions are exposed for use in platform-shared code.
+
+ template<typename T>
+ void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ compareExchange(1, true, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ compareExchange(1, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ compareExchange(2, true, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ compareExchange(2, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void compareExchange32(const T& mem, Register oldval, Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ compareExchange(4, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
+ }
+
+ template<typename T>
+ void atomicExchange8SignExtend(const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicExchange(1, true, mem, value, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void atomicExchange8ZeroExtend(const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicExchange(1, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void atomicExchange16SignExtend(const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicExchange(2, true, mem, value, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void atomicExchange16ZeroExtend(const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicExchange(2, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void atomicExchange32(const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicExchange(4, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchAdd8SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, true, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAdd8ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAdd16SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, true, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAdd16ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAdd32(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(4, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template <typename T, typename S>
+ void atomicAdd8(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(1, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicAdd16(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(2, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicAdd32(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(4, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchSub8SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, true, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchSub8ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchSub16SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, true, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchSub16ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchSub32(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(4, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template <typename T, typename S>
+ void atomicSub8(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(1, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicSub16(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(2, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicSub32(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(4, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchAnd8SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, true, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAnd8ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAnd16SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, true, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAnd16ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAnd32(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(4, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template <typename T, typename S>
+ void atomicAnd8(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(1, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicAnd16(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(2, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicAnd32(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(4, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchOr8SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, true, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchOr8ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchOr16SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, true, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchOr16ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchOr32(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(4, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template <typename T, typename S>
+ void atomicOr8(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(1, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicOr16(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(2, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicOr32(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(4, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchXor8SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, true, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchXor8ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchXor16SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, true, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchXor16ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchXor32(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(4, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template <typename T, typename S>
+ void atomicXor8(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(1, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicXor16(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(2, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicXor32(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(4, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+
+ template<typename T>
+ void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register oldval, Register newval,
+ Register temp, Register valueTemp, Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+
+ template<typename T>
+ void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value,
+ Register temp, Register valueTemp, Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+
+ inline void incrementInt32Value(const Address& addr);
+
+ void move32(Imm32 imm, Register dest);
+ void move32(Register src, Register dest);
+
+ void movePtr(Register src, Register dest);
+ void movePtr(ImmWord imm, Register dest);
+ void movePtr(ImmPtr imm, Register dest);
+ void movePtr(wasm::SymbolicAddress imm, Register dest);
+ void movePtr(ImmGCPtr imm, Register dest);
+
+ void load8SignExtend(const Address& address, Register dest);
+ void load8SignExtend(const BaseIndex& src, Register dest);
+
+ void load8ZeroExtend(const Address& address, Register dest);
+ void load8ZeroExtend(const BaseIndex& src, Register dest);
+
+ void load16SignExtend(const Address& address, Register dest);
+ void load16SignExtend(const BaseIndex& src, Register dest);
+
+ void load16ZeroExtend(const Address& address, Register dest);
+ void load16ZeroExtend(const BaseIndex& src, Register dest);
+
+ void load32(const Address& address, Register dest);
+ void load32(const BaseIndex& address, Register dest);
+ void load32(AbsoluteAddress address, Register dest);
+ void load32(wasm::SymbolicAddress address, Register dest);
+ void load64(const Address& address, Register64 dest) {
+ loadPtr(address, dest.reg);
+ }
+
+ void loadPtr(const Address& address, Register dest);
+ void loadPtr(const BaseIndex& src, Register dest);
+ void loadPtr(AbsoluteAddress address, Register dest);
+ void loadPtr(wasm::SymbolicAddress address, Register dest);
+
+ void loadPrivate(const Address& address, Register dest);
+
+ void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x1(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x1(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x2(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x2(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+ void loadAlignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeAlignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Int(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Int(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
+
+ void loadFloat32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadFloat32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadAlignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeAlignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Float(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
+
+ void loadDouble(const Address& addr, FloatRegister dest);
+ void loadDouble(const BaseIndex& src, FloatRegister dest);
+ void loadUnalignedDouble(const BaseIndex& src, Register temp, FloatRegister dest);
+
+ // Load a float value into a register, then expand it to a double.
+ void loadFloatAsDouble(const Address& addr, FloatRegister dest);
+ void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
+
+ void loadFloat32(const Address& addr, FloatRegister dest);
+ void loadFloat32(const BaseIndex& src, FloatRegister dest);
+ void loadUnalignedFloat32(const BaseIndex& src, Register temp, FloatRegister dest);
+
+ void store8(Register src, const Address& address);
+ void store8(Imm32 imm, const Address& address);
+ void store8(Register src, const BaseIndex& address);
+ void store8(Imm32 imm, const BaseIndex& address);
+
+ void store16(Register src, const Address& address);
+ void store16(Imm32 imm, const Address& address);
+ void store16(Register src, const BaseIndex& address);
+ void store16(Imm32 imm, const BaseIndex& address);
+
+ void store32(Register src, AbsoluteAddress address);
+ void store32(Register src, const Address& address);
+ void store32(Register src, const BaseIndex& address);
+ void store32(Imm32 src, const Address& address);
+ void store32(Imm32 src, const BaseIndex& address);
+
+ // NOTE: This will use second scratch on MIPS64. Only ARM needs the
+ // implementation without second scratch.
+ void store32_NoSecondScratch(Imm32 src, const Address& address) {
+ store32(src, address);
+ }
+
+ void store64(Imm64 imm, Address address) {
+ storePtr(ImmWord(imm.value), address);
+ }
+
+ void store64(Register64 src, Address address) {
+ storePtr(src.reg, address);
+ }
+
+ template <typename T> void storePtr(ImmWord imm, T address);
+ template <typename T> void storePtr(ImmPtr imm, T address);
+ template <typename T> void storePtr(ImmGCPtr imm, T address);
+ void storePtr(Register src, const Address& address);
+ void storePtr(Register src, const BaseIndex& address);
+ void storePtr(Register src, AbsoluteAddress dest);
+
+ void storeUnalignedFloat32(FloatRegister src, Register temp, const BaseIndex& dest);
+ void storeUnalignedDouble(FloatRegister src, Register temp, const BaseIndex& dest);
+
+ void moveDouble(FloatRegister src, FloatRegister dest) {
+ as_movd(dest, src);
+ }
+
+ void zeroDouble(FloatRegister reg) {
+ moveToDouble(zero, reg);
+ }
+
+ void convertInt64ToDouble(Register src, FloatRegister dest);
+ void convertInt64ToFloat32(Register src, FloatRegister dest);
+
+ void convertUInt64ToDouble(Register src, FloatRegister dest);
+ void convertUInt64ToFloat32(Register src, FloatRegister dest);
+
+ static bool convertUInt64ToDoubleNeedsTemp();
+ void convertUInt64ToDouble(Register64 src, FloatRegister dest, Register temp);
+
+ void breakpoint();
+
+ void checkStackAlignment();
+
+ static void calculateAlignedStackPointer(void** stackPointer);
+
+ // If source is a double, load it into dest. If source is int32,
+ // convert it to double. Else, branch to failure.
+ void ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure);
+
+ void cmpPtrSet(Assembler::Condition cond, Address lhs, ImmPtr rhs, Register dest);
+ void cmpPtrSet(Assembler::Condition cond, Register lhs, Address rhs, Register dest);
+
+ void cmp32Set(Assembler::Condition cond, Register lhs, Address rhs, Register dest);
+
+ void cmp64Set(Assembler::Condition cond, Register lhs, Imm32 rhs, Register dest)
+ {
+ ma_cmp_set(dest, lhs, rhs, cond);
+ }
+
+ protected:
+ bool buildOOLFakeExitFrame(void* fakeReturnAddr);
+
+ public:
+ CodeOffset labelForPatch() {
+ return CodeOffset(nextOffset().getOffset());
+ }
+
+ void lea(Operand addr, Register dest) {
+ ma_daddu(dest, addr.baseReg(), Imm32(addr.disp()));
+ }
+
+ void abiret() {
+ as_jr(ra);
+ as_nop();
+ }
+
+ BufferOffset ma_BoundsCheck(Register bounded) {
+ BufferOffset bo = m_buffer.nextOffset();
+ ma_liPatchable(bounded, ImmWord(0));
+ return bo;
+ }
+
+ void moveFloat32(FloatRegister src, FloatRegister dest) {
+ as_movs(dest, src);
+ }
+
+ void loadWasmGlobalPtr(uint32_t globalDataOffset, Register dest) {
+ loadPtr(Address(GlobalReg, globalDataOffset - WasmGlobalRegBias), dest);
+ }
+ void loadWasmPinnedRegsFromTls() {
+ loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, memoryBase)), HeapReg);
+ loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, globalData)), GlobalReg);
+ ma_daddu(GlobalReg, Imm32(WasmGlobalRegBias));
+ }
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+};
+
+typedef MacroAssemblerMIPS64Compat MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_MacroAssembler_mips64_h */
diff --git a/js/src/jit/mips64/MoveEmitter-mips64.cpp b/js/src/jit/mips64/MoveEmitter-mips64.cpp
new file mode 100644
index 000000000..d208b83a7
--- /dev/null
+++ b/js/src/jit/mips64/MoveEmitter-mips64.cpp
@@ -0,0 +1,155 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/MoveEmitter-mips64.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void
+MoveEmitterMIPS64::breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slotId)
+{
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (A -> B), which we reach first. We save B, then allow
+ // the original move to continue.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchFloat32Reg;
+ masm.loadFloat32(getAdjustedAddress(to), temp);
+ masm.storeFloat32(temp, cycleSlot(slotId));
+ } else {
+ masm.storeFloat32(to.floatReg(), cycleSlot(slotId));
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchDoubleReg;
+ masm.loadDouble(getAdjustedAddress(to), temp);
+ masm.storeDouble(temp, cycleSlot(slotId));
+ } else {
+ masm.storeDouble(to.floatReg(), cycleSlot(slotId));
+ }
+ break;
+ case MoveOp::INT32:
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.load32(getAdjustedAddress(to), temp);
+ masm.store32(temp, cycleSlot(0));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.store32(to.reg(), cycleSlot(0));
+ }
+ break;
+ case MoveOp::GENERAL:
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.loadPtr(getAdjustedAddress(to), temp);
+ masm.storePtr(temp, cycleSlot(0));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.storePtr(to.reg(), cycleSlot(0));
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void
+MoveEmitterMIPS64::completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slotId)
+{
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (B -> A), which we reach last. We emit a move from the
+ // saved value of B, to A.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchFloat32Reg;
+ masm.loadFloat32(cycleSlot(slotId), temp);
+ masm.storeFloat32(temp, getAdjustedAddress(to));
+ } else {
+ masm.loadFloat32(cycleSlot(slotId), to.floatReg());
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchDoubleReg;
+ masm.loadDouble(cycleSlot(slotId), temp);
+ masm.storeDouble(temp, getAdjustedAddress(to));
+ } else {
+ masm.loadDouble(cycleSlot(slotId), to.floatReg());
+ }
+ break;
+ case MoveOp::INT32:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.load32(cycleSlot(0), temp);
+ masm.store32(temp, getAdjustedAddress(to));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.load32(cycleSlot(0), to.reg());
+ }
+ break;
+ case MoveOp::GENERAL:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.loadPtr(cycleSlot(0), temp);
+ masm.storePtr(temp, getAdjustedAddress(to));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.loadPtr(cycleSlot(0), to.reg());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void
+MoveEmitterMIPS64::emitDoubleMove(const MoveOperand& from, const MoveOperand& to)
+{
+ // Ensure that we can use ScratchDoubleReg in memory move.
+ MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchDoubleReg);
+ MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchDoubleReg);
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveDouble(from.floatReg(), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ masm.moveFromDouble(from.floatReg(), to.reg());
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeDouble(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ if (from.isMemory())
+ masm.loadDouble(getAdjustedAddress(from), to.floatReg());
+ else
+ masm.moveToDouble(from.reg(), to.floatReg());
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ masm.loadDouble(getAdjustedAddress(from), ScratchDoubleReg);
+ masm.storeDouble(ScratchDoubleReg, getAdjustedAddress(to));
+ }
+}
diff --git a/js/src/jit/mips64/MoveEmitter-mips64.h b/js/src/jit/mips64/MoveEmitter-mips64.h
new file mode 100644
index 000000000..77e412fb4
--- /dev/null
+++ b/js/src/jit/mips64/MoveEmitter-mips64.h
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_MoveEmitter_mips64_h
+#define jit_mips64_MoveEmitter_mips64_h
+
+#include "jit/mips-shared/MoveEmitter-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class MoveEmitterMIPS64 : public MoveEmitterMIPSShared
+{
+ void emitDoubleMove(const MoveOperand& from, const MoveOperand& to);
+ void breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+ void completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+
+ public:
+ MoveEmitterMIPS64(MacroAssembler& masm)
+ : MoveEmitterMIPSShared(masm)
+ { }
+};
+
+typedef MoveEmitterMIPS64 MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_MoveEmitter_mips64_h */
diff --git a/js/src/jit/mips64/SharedIC-mips64.cpp b/js/src/jit/mips64/SharedIC-mips64.cpp
new file mode 100644
index 000000000..ee325277f
--- /dev/null
+++ b/js/src/jit/mips64/SharedIC-mips64.cpp
@@ -0,0 +1,191 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jsiter.h"
+
+#include "jit/BaselineCompiler.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Linker.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jsboolinlines.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICBinaryArith_Int32
+
+bool
+ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // Add R0 and R1. Don't need to explicitly unbox, just use R2's valueReg.
+ Register scratchReg = R2.valueReg();
+
+ Label goodMul, divTest1, divTest2;
+ switch(op_) {
+ case JSOP_ADD:
+ masm.unboxInt32(R0, ExtractTemp0);
+ masm.unboxInt32(R1, ExtractTemp1);
+ masm.ma_addTestOverflow(scratchReg, ExtractTemp0, ExtractTemp1, &failure);
+ masm.boxValue(JSVAL_TYPE_INT32, scratchReg, R0.valueReg());
+ break;
+ case JSOP_SUB:
+ masm.unboxInt32(R0, ExtractTemp0);
+ masm.unboxInt32(R1, ExtractTemp1);
+ masm.ma_subTestOverflow(scratchReg, ExtractTemp0, ExtractTemp1, &failure);
+ masm.boxValue(JSVAL_TYPE_INT32, scratchReg, R0.valueReg());
+ break;
+ case JSOP_MUL: {
+ masm.unboxInt32(R0, ExtractTemp0);
+ masm.unboxInt32(R1, ExtractTemp1);
+ masm.ma_mul_branch_overflow(scratchReg, ExtractTemp0, ExtractTemp1, &failure);
+
+ masm.ma_b(scratchReg, Imm32(0), &goodMul, Assembler::NotEqual, ShortJump);
+
+ // Result is -0 if operands have different signs.
+ masm.as_xor(t8, ExtractTemp0, ExtractTemp1);
+ masm.ma_b(t8, Imm32(0), &failure, Assembler::LessThan, ShortJump);
+
+ masm.bind(&goodMul);
+ masm.boxValue(JSVAL_TYPE_INT32, scratchReg, R0.valueReg());
+ break;
+ }
+ case JSOP_DIV:
+ case JSOP_MOD: {
+ masm.unboxInt32(R0, ExtractTemp0);
+ masm.unboxInt32(R1, ExtractTemp1);
+ // Check for INT_MIN / -1, it results in a double.
+ masm.ma_b(ExtractTemp0, Imm32(INT_MIN), &divTest1, Assembler::NotEqual, ShortJump);
+ masm.ma_b(ExtractTemp1, Imm32(-1), &failure, Assembler::Equal, ShortJump);
+ masm.bind(&divTest1);
+
+ // Check for division by zero
+ masm.ma_b(ExtractTemp1, Imm32(0), &failure, Assembler::Equal, ShortJump);
+
+ // Check for 0 / X with X < 0 (results in -0).
+ masm.ma_b(ExtractTemp0, Imm32(0), &divTest2, Assembler::NotEqual, ShortJump);
+ masm.ma_b(ExtractTemp1, Imm32(0), &failure, Assembler::LessThan, ShortJump);
+ masm.bind(&divTest2);
+
+ masm.as_div(ExtractTemp0, ExtractTemp1);
+
+ if (op_ == JSOP_DIV) {
+ // Result is a double if the remainder != 0.
+ masm.as_mfhi(scratchReg);
+ masm.ma_b(scratchReg, Imm32(0), &failure, Assembler::NotEqual, ShortJump);
+ masm.as_mflo(scratchReg);
+ masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
+ } else {
+ Label done;
+ // If X % Y == 0 and X < 0, the result is -0.
+ masm.as_mfhi(scratchReg);
+ masm.ma_b(scratchReg, Imm32(0), &done, Assembler::NotEqual, ShortJump);
+ masm.ma_b(ExtractTemp0, Imm32(0), &failure, Assembler::LessThan, ShortJump);
+ masm.bind(&done);
+ masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
+ }
+ break;
+ }
+ case JSOP_BITOR:
+ masm.as_or(R0.valueReg() , R0.valueReg(), R1.valueReg());
+ break;
+ case JSOP_BITXOR:
+ masm.as_xor(scratchReg, R0.valueReg(), R1.valueReg());
+ masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
+ break;
+ case JSOP_BITAND:
+ masm.as_and(R0.valueReg() , R0.valueReg(), R1.valueReg());
+ break;
+ case JSOP_LSH:
+ masm.unboxInt32(R0, ExtractTemp0);
+ masm.unboxInt32(R1, ExtractTemp1);
+ // MIPS will only use 5 lowest bits in R1 as shift offset.
+ masm.ma_sll(scratchReg, ExtractTemp0, ExtractTemp1);
+ masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
+ break;
+ case JSOP_RSH:
+ masm.unboxInt32(R0, ExtractTemp0);
+ masm.unboxInt32(R1, ExtractTemp1);
+ masm.ma_sra(scratchReg, ExtractTemp0, ExtractTemp1);
+ masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
+ break;
+ case JSOP_URSH:
+ masm.unboxInt32(R0, ExtractTemp0);
+ masm.unboxInt32(R1, ExtractTemp1);
+ masm.ma_srl(scratchReg, ExtractTemp0, ExtractTemp1);
+ if (allowDouble_) {
+ Label toUint;
+ masm.ma_b(scratchReg, Imm32(0), &toUint, Assembler::LessThan, ShortJump);
+
+ // Move result and box for return.
+ masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&toUint);
+ masm.convertUInt32ToDouble(scratchReg, FloatReg1);
+ masm.boxDouble(FloatReg1, R0);
+ } else {
+ masm.ma_b(scratchReg, Imm32(0), &failure, Assembler::LessThan, ShortJump);
+ // Move result for return.
+ masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
+ }
+ break;
+ default:
+ MOZ_CRASH("Unhandled op for BinaryArith_Int32.");
+ }
+
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ return true;
+}
+
+bool
+ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+
+ switch (op) {
+ case JSOP_BITNOT:
+ masm.not32(R0.valueReg());
+ masm.tagValue(JSVAL_TYPE_INT32, R0.valueReg(), R0);
+ break;
+ case JSOP_NEG:
+ masm.unboxInt32(R0, ExtractTemp0);
+ // Guard against 0 and MIN_INT, both result in a double.
+ masm.branchTest32(Assembler::Zero, ExtractTemp0, Imm32(INT32_MAX), &failure);
+
+ masm.neg32(ExtractTemp0);
+ masm.tagValue(JSVAL_TYPE_INT32, ExtractTemp0, R0);
+ break;
+ default:
+ MOZ_CRASH("Unexpected op");
+ return false;
+ }
+
+ EmitReturnFromIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/mips64/SharedICRegisters-mips64.h b/js/src/jit/mips64/SharedICRegisters-mips64.h
new file mode 100644
index 000000000..401aca1f0
--- /dev/null
+++ b/js/src/jit/mips64/SharedICRegisters-mips64.h
@@ -0,0 +1,47 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_SharedICRegisters_mips64_h
+#define jit_mips64_SharedICRegisters_mips64_h
+
+#include "jit/MacroAssembler.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register BaselineFrameReg = s5;
+static constexpr Register BaselineStackReg = sp;
+
+// ValueOperands R0, R1, and R2.
+// R0 == JSReturnReg, and R2 uses registers not preserved across calls. R1 value
+// should be preserved across calls.
+static constexpr ValueOperand R0(v1);
+static constexpr ValueOperand R1(s4);
+static constexpr ValueOperand R2(a6);
+
+// ICTailCallReg and ICStubReg
+// These use registers that are not preserved across calls.
+static constexpr Register ICTailCallReg = ra;
+static constexpr Register ICStubReg = a5;
+
+static constexpr Register ExtractTemp0 = s6;
+static constexpr Register ExtractTemp1 = s7;
+
+// Register used internally by MacroAssemblerMIPS.
+static constexpr Register BaselineSecondScratchReg = SecondScratchReg;
+
+// Note that ICTailCallReg is actually just the link register.
+// In MIPS code emission, we do not clobber ICTailCallReg since we keep
+// the return address for calls there.
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = f0;
+static constexpr FloatRegister FloatReg1 = f2;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_SharedICRegisters_mips64_h */
diff --git a/js/src/jit/mips64/Simulator-mips64.cpp b/js/src/jit/mips64/Simulator-mips64.cpp
new file mode 100644
index 000000000..fcdf41fac
--- /dev/null
+++ b/js/src/jit/mips64/Simulator-mips64.cpp
@@ -0,0 +1,3874 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99: */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/mips64/Simulator-mips64.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/Likely.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <float.h>
+
+#include "jit/mips64/Assembler-mips64.h"
+#include "threading/LockGuard.h"
+#include "vm/Runtime.h"
+
+#define I8(v) static_cast<int8_t>(v)
+#define I16(v) static_cast<int16_t>(v)
+#define U16(v) static_cast<uint16_t>(v)
+#define I32(v) static_cast<int32_t>(v)
+#define U32(v) static_cast<uint32_t>(v)
+#define I64(v) static_cast<int64_t>(v)
+#define U64(v) static_cast<uint64_t>(v)
+#define I128(v) static_cast<__int128_t>(v)
+#define U128(v) static_cast<__uint128_t>(v)
+
+namespace js {
+namespace jit {
+
+static const Instr kCallRedirInstr = op_special | MAX_BREAK_CODE << FunctionBits | ff_break;
+
+// Utils functions.
+static uint32_t
+GetFCSRConditionBit(uint32_t cc)
+{
+ if (cc == 0)
+ return 23;
+ return 24 + cc;
+}
+
+// -----------------------------------------------------------------------------
+// MIPS assembly various constants.
+
+class SimInstruction
+{
+ public:
+ enum {
+ kInstrSize = 4,
+ // On MIPS PC cannot actually be directly accessed. We behave as if PC was
+ // always the value of the current instruction being executed.
+ kPCReadOffset = 0
+ };
+
+ // Get the raw instruction bits.
+ inline Instr instructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void setInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int bit(int nr) const {
+ return (instructionBits() >> nr) & 1;
+ }
+
+ // Read a bit field out of the instruction bits.
+ inline int bits(int hi, int lo) const {
+ return (instructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Instruction type.
+ enum Type {
+ kRegisterType,
+ kImmediateType,
+ kJumpType,
+ kUnsupported = -1
+ };
+
+ // Get the encoding type of the instruction.
+ Type instructionType() const;
+
+
+ // Accessors for the different named fields used in the MIPS encoding.
+ inline Opcode opcodeValue() const {
+ return static_cast<Opcode>(bits(OpcodeShift + OpcodeBits - 1, OpcodeShift));
+ }
+
+ inline int rsValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType);
+ return bits(RSShift + RSBits - 1, RSShift);
+ }
+
+ inline int rtValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType);
+ return bits(RTShift + RTBits - 1, RTShift);
+ }
+
+ inline int rdValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return bits(RDShift + RDBits - 1, RDShift);
+ }
+
+ inline int saValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return bits(SAShift + SABits - 1, SAShift);
+ }
+
+ inline int functionValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType);
+ return bits(FunctionShift + FunctionBits - 1, FunctionShift);
+ }
+
+ inline int fdValue() const {
+ return bits(FDShift + FDBits - 1, FDShift);
+ }
+
+ inline int fsValue() const {
+ return bits(FSShift + FSBits - 1, FSShift);
+ }
+
+ inline int ftValue() const {
+ return bits(FTShift + FTBits - 1, FTShift);
+ }
+
+ inline int frValue() const {
+ return bits(FRShift + FRBits - 1, FRShift);
+ }
+
+ // Float Compare condition code instruction bits.
+ inline int fcccValue() const {
+ return bits(FCccShift + FCccBits - 1, FCccShift);
+ }
+
+ // Float Branch condition code instruction bits.
+ inline int fbccValue() const {
+ return bits(FBccShift + FBccBits - 1, FBccShift);
+ }
+
+ // Float Branch true/false instruction bit.
+ inline int fbtrueValue() const {
+ return bits(FBtrueShift + FBtrueBits - 1, FBtrueShift);
+ }
+
+ // Return the fields at their original place in the instruction encoding.
+ inline Opcode opcodeFieldRaw() const {
+ return static_cast<Opcode>(instructionBits() & OpcodeMask);
+ }
+
+ inline int rsFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType);
+ return instructionBits() & RSMask;
+ }
+
+ // Same as above function, but safe to call within instructionType().
+ inline int rsFieldRawNoAssert() const {
+ return instructionBits() & RSMask;
+ }
+
+ inline int rtFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType);
+ return instructionBits() & RTMask;
+ }
+
+ inline int rdFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return instructionBits() & RDMask;
+ }
+
+ inline int saFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return instructionBits() & SAMask;
+ }
+
+ inline int functionFieldRaw() const {
+ return instructionBits() & FunctionMask;
+ }
+
+ // Get the secondary field according to the opcode.
+ inline int secondaryValue() const {
+ Opcode op = opcodeFieldRaw();
+ switch (op) {
+ case op_special:
+ case op_special2:
+ return functionValue();
+ case op_cop1:
+ return rsValue();
+ case op_regimm:
+ return rtValue();
+ default:
+ return ff_null;
+ }
+ }
+
+ inline int32_t imm16Value() const {
+ MOZ_ASSERT(instructionType() == kImmediateType);
+ return bits(Imm16Shift + Imm16Bits - 1, Imm16Shift);
+ }
+
+ inline int32_t imm26Value() const {
+ MOZ_ASSERT(instructionType() == kJumpType);
+ return bits(Imm26Shift + Imm26Bits - 1, Imm26Shift);
+ }
+
+ // Say if the instruction should not be used in a branch delay slot.
+ bool isForbiddenInBranchDelay() const;
+ // Say if the instruction 'links'. e.g. jal, bal.
+ bool isLinkingInstruction() const;
+ // Say if the instruction is a break or a trap.
+ bool isTrap() const;
+
+ private:
+
+ SimInstruction() = delete;
+ SimInstruction(const SimInstruction& other) = delete;
+ void operator=(const SimInstruction& other) = delete;
+};
+
+bool
+SimInstruction::isForbiddenInBranchDelay() const
+{
+ const int op = opcodeFieldRaw();
+ switch (op) {
+ case op_j:
+ case op_jal:
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ case op_beql:
+ case op_bnel:
+ case op_blezl:
+ case op_bgtzl:
+ return true;
+ case op_regimm:
+ switch (rtFieldRaw()) {
+ case rt_bltz:
+ case rt_bgez:
+ case rt_bltzal:
+ case rt_bgezal:
+ return true;
+ default:
+ return false;
+ };
+ break;
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ return true;
+ default:
+ return false;
+ };
+ break;
+ default:
+ return false;
+ };
+}
+
+bool
+SimInstruction::isLinkingInstruction() const
+{
+ const int op = opcodeFieldRaw();
+ switch (op) {
+ case op_jal:
+ return true;
+ case op_regimm:
+ switch (rtFieldRaw()) {
+ case rt_bgezal:
+ case rt_bltzal:
+ return true;
+ default:
+ return false;
+ };
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jalr:
+ return true;
+ default:
+ return false;
+ };
+ default:
+ return false;
+ };
+}
+
+bool
+SimInstruction::isTrap() const
+{
+ if (opcodeFieldRaw() != op_special) {
+ return false;
+ } else {
+ switch (functionFieldRaw()) {
+ case ff_break:
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ return true;
+ default:
+ return false;
+ };
+ }
+}
+
+SimInstruction::Type
+SimInstruction::instructionType() const
+{
+ switch (opcodeFieldRaw()) {
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ case ff_sync:
+ case ff_break:
+ case ff_sll:
+ case ff_dsll:
+ case ff_dsll32:
+ case ff_srl:
+ case ff_dsrl:
+ case ff_dsrl32:
+ case ff_sra:
+ case ff_dsra:
+ case ff_dsra32:
+ case ff_sllv:
+ case ff_dsllv:
+ case ff_srlv:
+ case ff_dsrlv:
+ case ff_srav:
+ case ff_dsrav:
+ case ff_mfhi:
+ case ff_mflo:
+ case ff_mult:
+ case ff_dmult:
+ case ff_multu:
+ case ff_dmultu:
+ case ff_div:
+ case ff_ddiv:
+ case ff_divu:
+ case ff_ddivu:
+ case ff_add:
+ case ff_dadd:
+ case ff_addu:
+ case ff_daddu:
+ case ff_sub:
+ case ff_dsub:
+ case ff_subu:
+ case ff_dsubu:
+ case ff_and:
+ case ff_or:
+ case ff_xor:
+ case ff_nor:
+ case ff_slt:
+ case ff_sltu:
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ case ff_movz:
+ case ff_movn:
+ case ff_movci:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_special2:
+ switch (functionFieldRaw()) {
+ case ff_mul:
+ case ff_clz:
+ case ff_dclz:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_special3:
+ switch (functionFieldRaw()) {
+ case ff_ins:
+ case ff_dins:
+ case ff_dinsm:
+ case ff_dinsu:
+ case ff_ext:
+ case ff_dext:
+ case ff_dextm:
+ case ff_dextu:
+ case ff_bshfl:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_cop1: // Coprocessor instructions.
+ switch (rsFieldRawNoAssert()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ return kImmediateType;
+ default:
+ return kRegisterType;
+ };
+ break;
+ case op_cop1x:
+ return kRegisterType;
+ // 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
+ case op_regimm:
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ case op_addi:
+ case op_daddi:
+ case op_addiu:
+ case op_daddiu:
+ case op_slti:
+ case op_sltiu:
+ case op_andi:
+ case op_ori:
+ case op_xori:
+ case op_lui:
+ case op_beql:
+ case op_bnel:
+ case op_blezl:
+ case op_bgtzl:
+ case op_lb:
+ case op_lbu:
+ case op_lh:
+ case op_lhu:
+ case op_lw:
+ case op_lwu:
+ case op_lwl:
+ case op_lwr:
+ case op_ll:
+ case op_ld:
+ case op_ldl:
+ case op_ldr:
+ case op_sb:
+ case op_sh:
+ case op_sw:
+ case op_swl:
+ case op_swr:
+ case op_sc:
+ case op_sd:
+ case op_sdl:
+ case op_sdr:
+ case op_lwc1:
+ case op_ldc1:
+ case op_swc1:
+ case op_sdc1:
+ return kImmediateType;
+ // 26 bits immediate type instructions. e.g.: j imm26.
+ case op_j:
+ case op_jal:
+ return kJumpType;
+ default:
+ return kUnsupported;
+ };
+ return kUnsupported;
+}
+
+// C/C++ argument slots size.
+const int kCArgSlotCount = 0;
+const int kCArgsSlotsSize = kCArgSlotCount * sizeof(uintptr_t);
+const int kBranchReturnOffset = 2 * SimInstruction::kInstrSize;
+
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() {
+ memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
+ }
+
+ char* validityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+
+ char* cachedData(int offset) {
+ return &data_[offset];
+ }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+// Protects the icache() and redirection() properties of the
+// Simulator.
+class AutoLockSimulatorCache : public LockGuard<Mutex>
+{
+ using Base = LockGuard<Mutex>;
+
+ public:
+ explicit AutoLockSimulatorCache(Simulator* sim)
+ : Base(sim->cacheLock_)
+ , sim_(sim)
+ {
+ MOZ_ASSERT(sim_->cacheLockHolder_.isNothing());
+#ifdef DEBUG
+ sim_->cacheLockHolder_ = mozilla::Some(ThisThread::GetId());
+#endif
+ }
+
+ ~AutoLockSimulatorCache() {
+ MOZ_ASSERT(sim_->cacheLockHolder_.isSome());
+#ifdef DEBUG
+ sim_->cacheLockHolder_.reset();
+#endif
+ }
+
+ private:
+ Simulator* const sim_;
+};
+
+bool Simulator::ICacheCheckingEnabled = false;
+
+int64_t Simulator::StopSimAt = -1;
+
+Simulator *
+Simulator::Create(JSContext* cx)
+{
+ Simulator* sim = js_new<Simulator>();
+ if (!sim)
+ return nullptr;
+
+ if (!sim->init()) {
+ js_delete(sim);
+ return nullptr;
+ }
+
+ if (getenv("MIPS_SIM_ICACHE_CHECKS"))
+ Simulator::ICacheCheckingEnabled = true;
+
+ int64_t stopAt;
+ char* stopAtStr = getenv("MIPS_SIM_STOP_AT");
+ if (stopAtStr && sscanf(stopAtStr, "%" PRIi64, &stopAt) == 1) {
+ fprintf(stderr, "\nStopping simulation at icount %" PRIi64 "\n", stopAt);
+ Simulator::StopSimAt = stopAt;
+ }
+
+ return sim;
+}
+
+void
+Simulator::Destroy(Simulator* sim)
+{
+ js_delete(sim);
+}
+
+// The MipsDebugger class is used by the simulator while debugging simulated
+// code.
+class MipsDebugger
+{
+ public:
+ explicit MipsDebugger(Simulator* sim) : sim_(sim) { }
+
+ void stop(SimInstruction* instr);
+ void debug();
+ // Print all registers with a nice formatting.
+ void printAllRegs();
+ void printAllRegsIncludingFPU();
+
+ private:
+ // We set the breakpoint code to 0xfffff to easily recognize it.
+ static const Instr kBreakpointInstr = op_special | ff_break | 0xfffff << 6;
+ static const Instr kNopInstr = op_special | ff_sll;
+
+ Simulator* sim_;
+
+ int64_t getRegisterValue(int regnum);
+ int64_t getFPURegisterValueLong(int regnum);
+ float getFPURegisterValueFloat(int regnum);
+ double getFPURegisterValueDouble(int regnum);
+ bool getValue(const char* desc, int64_t* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool setBreakpoint(SimInstruction* breakpc);
+ bool deleteBreakpoint(SimInstruction* breakpc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void undoBreakpoints();
+ void redoBreakpoints();
+};
+
+static void
+UNSUPPORTED()
+{
+ printf("Unsupported instruction.\n");
+ MOZ_CRASH();
+}
+
+void
+MipsDebugger::stop(SimInstruction* instr)
+{
+ // Get the stop code.
+ uint32_t code = instr->bits(25, 6);
+ // Retrieve the encoded address, which comes just after this stop.
+ char* msg = *reinterpret_cast<char**>(sim_->get_pc() +
+ SimInstruction::kInstrSize);
+ // Update this stop description.
+ if (!sim_->watchedStops_[code].desc_)
+ sim_->watchedStops_[code].desc_ = msg;
+ // Print the stop message and code if it is not the default code.
+ if (code != kMaxStopCode)
+ printf("Simulator hit stop %u: %s\n", code, msg);
+ else
+ printf("Simulator hit %s\n", msg);
+ sim_->set_pc(sim_->get_pc() + 2 * SimInstruction::kInstrSize);
+ debug();
+}
+
+int64_t
+MipsDebugger::getRegisterValue(int regnum)
+{
+ if (regnum == kPCRegister)
+ return sim_->get_pc();
+ return sim_->getRegister(regnum);
+}
+
+int64_t
+MipsDebugger::getFPURegisterValueLong(int regnum)
+{
+ return sim_->getFpuRegister(regnum);
+}
+
+float
+MipsDebugger::getFPURegisterValueFloat(int regnum)
+{
+ return sim_->getFpuRegisterFloat(regnum);
+}
+
+double
+MipsDebugger::getFPURegisterValueDouble(int regnum)
+{
+ return sim_->getFpuRegisterDouble(regnum);
+}
+
+bool
+MipsDebugger::getValue(const char* desc, int64_t* value)
+{
+ Register reg = Register::FromName(desc);
+ if (reg != InvalidReg) {
+ *value = getRegisterValue(reg.code());
+ return true;
+ }
+
+ if (strncmp(desc, "0x", 2) == 0)
+ return sscanf(desc, "%" PRIu64, reinterpret_cast<uint64_t*>(value)) == 1;
+ return sscanf(desc, "%" PRIi64, value) == 1;
+}
+
+bool
+MipsDebugger::setBreakpoint(SimInstruction* breakpc)
+{
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_ != nullptr)
+ return false;
+
+ // Set the breakpoint.
+ sim_->break_pc_ = breakpc;
+ sim_->break_instr_ = breakpc->instructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+
+}
+
+bool
+MipsDebugger::deleteBreakpoint(SimInstruction* breakpc)
+{
+ if (sim_->break_pc_ != nullptr)
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+
+ sim_->break_pc_ = nullptr;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+void
+MipsDebugger::undoBreakpoints()
+{
+ if (sim_->break_pc_)
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+}
+
+void
+MipsDebugger::redoBreakpoints()
+{
+ if (sim_->break_pc_)
+ sim_->break_pc_->setInstructionBits(kBreakpointInstr);
+}
+
+void
+MipsDebugger::printAllRegs()
+{
+ int64_t value;
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ value = getRegisterValue(i);
+ printf("%3s: 0x%016" PRIx64 " %20" PRIi64 " ", Registers::GetName(i), value, value);
+
+ if (i % 2)
+ printf("\n");
+ }
+ printf("\n");
+
+ value = getRegisterValue(Simulator::LO);
+ printf(" LO: 0x%016" PRIx64 " %20" PRIi64 " ", value, value);
+ value = getRegisterValue(Simulator::HI);
+ printf(" HI: 0x%016" PRIx64 " %20" PRIi64 "\n", value, value);
+ value = getRegisterValue(Simulator::pc);
+ printf(" pc: 0x%016" PRIx64 "\n", value);
+}
+
+void
+MipsDebugger::printAllRegsIncludingFPU()
+{
+ printAllRegs();
+
+ printf("\n\n");
+ // f0, f1, f2, ... f31.
+ for (uint32_t i = 0; i < FloatRegisters::TotalPhys; i++) {
+ printf("%3s: 0x%016" PRIi64 "\tflt: %-8.4g\tdbl: %-16.4g\n",
+ FloatRegisters::GetName(i),
+ getFPURegisterValueLong(i),
+ getFPURegisterValueFloat(i),
+ getFPURegisterValueDouble(i));
+ }
+}
+
+static char*
+ReadLine(const char* prompt)
+{
+ char* result = nullptr;
+ char lineBuf[256];
+ int offset = 0;
+ bool keepGoing = true;
+ fprintf(stdout, "%s", prompt);
+ fflush(stdout);
+ while (keepGoing) {
+ if (fgets(lineBuf, sizeof(lineBuf), stdin) == nullptr) {
+ // fgets got an error. Just give up.
+ if (result)
+ js_delete(result);
+ return nullptr;
+ }
+ int len = strlen(lineBuf);
+ if (len > 0 && lineBuf[len - 1] == '\n') {
+ // Since we read a new line we are done reading the line. This
+ // will exit the loop after copying this buffer into the result.
+ keepGoing = false;
+ }
+ if (!result) {
+ // Allocate the initial result and make room for the terminating '\0'
+ result = (char*)js_malloc(len + 1);
+ if (!result)
+ return nullptr;
+ } else {
+ // Allocate a new result with enough room for the new addition.
+ int new_len = offset + len + 1;
+ char* new_result = (char*)js_malloc(new_len);
+ if (!new_result)
+ return nullptr;
+ // Copy the existing input into the new array and set the new
+ // array as the result.
+ memcpy(new_result, result, offset * sizeof(char));
+ js_free(result);
+ result = new_result;
+ }
+ // Copy the newly read line into the result.
+ memcpy(result + offset, lineBuf, len * sizeof(char));
+ offset += len;
+ }
+
+ MOZ_ASSERT(result);
+ result[offset] = '\0';
+ return result;
+}
+
+static void
+DisassembleInstruction(uint64_t pc)
+{
+ uint8_t* bytes = reinterpret_cast<uint8_t*>(pc);
+ char hexbytes[256];
+ sprintf(hexbytes, "0x%x 0x%x 0x%x 0x%x", bytes[0], bytes[1], bytes[2], bytes[3]);
+ char llvmcmd[1024];
+ sprintf(llvmcmd, "bash -c \"echo -n '%p'; echo '%s' | "
+ "llvm-mc -disassemble -arch=mips64el -mcpu=mips64r2 | "
+ "grep -v pure_instructions | grep -v .text\"", static_cast<void*>(bytes), hexbytes);
+ if (system(llvmcmd))
+ printf("Cannot disassemble instruction.\n");
+}
+
+void
+MipsDebugger::debug()
+{
+ intptr_t lastPC = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = { cmd, arg1, arg2 };
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ undoBreakpoints();
+
+ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+ if (lastPC != sim_->get_pc()) {
+ DisassembleInstruction(sim_->get_pc());
+ lastPC = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == nullptr) {
+ break;
+ } else {
+ char* last_input = sim_->lastDebuggerInput();
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->setLastDebuggerInput(line);
+ }
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = sscanf(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ SimInstruction* instr = reinterpret_cast<SimInstruction*>(sim_->get_pc());
+ if (!(instr->isTrap()) ||
+ instr->instructionBits() == kCallRedirInstr) {
+ sim_->instructionDecode(
+ reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ } else {
+ // Allow si to jump over generated breakpoints.
+ printf("/!\\ Jumping over generated breakpoint.\n");
+ sim_->set_pc(sim_->get_pc() + SimInstruction::kInstrSize);
+ }
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->instructionDecode(reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ if (strcmp(arg1, "all") == 0) {
+ printAllRegs();
+ } else if (strcmp(arg1, "allf") == 0) {
+ printAllRegsIncludingFPU();
+ } else {
+ Register reg = Register::FromName(arg1);
+ FloatRegisters::Encoding fReg = FloatRegisters::FromName(arg1);
+ if (reg != InvalidReg) {
+ value = getRegisterValue(reg.code());
+ printf("%s: 0x%016" PRIi64 " %20" PRIi64 " \n", arg1, value, value);
+ } else if (fReg != FloatRegisters::Invalid) {
+ printf("%3s: 0x%016" PRIi64 "\tflt: %-8.4g\tdbl: %-16.4g\n",
+ FloatRegisters::GetName(fReg),
+ getFPURegisterValueLong(fReg),
+ getFPURegisterValueFloat(fReg),
+ getFPURegisterValueDouble(fReg));
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ printf("print <register> or print <fpu register> single\n");
+ }
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int64_t* cur = nullptr;
+ int64_t* end = nullptr;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int64_t*>(sim_->getRegister(Simulator::sp));
+ } else { // Command "mem".
+ int64_t value;
+ if (!getValue(arg1, &value)) {
+ printf("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int64_t*>(value);
+ next_arg++;
+ }
+
+ int64_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!getValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ printf(" %p: 0x%016" PRIx64 " %20" PRIi64, cur, *cur, *cur);
+ printf("\n");
+ cur++;
+ }
+
+ } else if ((strcmp(cmd, "disasm") == 0) ||
+ (strcmp(cmd, "dpc") == 0) ||
+ (strcmp(cmd, "di") == 0)) {
+ uint8_t* cur = nullptr;
+ uint8_t* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ end = cur + (10 * SimInstruction::kInstrSize);
+ } else if (argc == 2) {
+ Register reg = Register::FromName(arg1);
+ if (reg != InvalidReg || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int64_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * SimInstruction::kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ int64_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * SimInstruction::kInstrSize);
+ }
+ }
+ } else {
+ int64_t value1;
+ int64_t value2;
+ if (getValue(arg1, &value1) && getValue(arg2, &value2)) {
+ cur = reinterpret_cast<uint8_t*>(value1);
+ end = cur + (value2 * SimInstruction::kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ DisassembleInstruction(uint64_t(cur));
+ cur += SimInstruction::kInstrSize;
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ printf("relinquishing control to gdb\n");
+ asm("int $3");
+ printf("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (argc == 2) {
+ int64_t value;
+ if (getValue(arg1, &value)) {
+ if (!setBreakpoint(reinterpret_cast<SimInstruction*>(value)))
+ printf("setting breakpoint failed\n");
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ } else {
+ printf("break <address>\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!deleteBreakpoint(nullptr)) {
+ printf("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ printf("No flags on MIPS !\n");
+ } else if (strcmp(cmd, "stop") == 0) {
+ int64_t value;
+ intptr_t stop_pc = sim_->get_pc() -
+ 2 * SimInstruction::kInstrSize;
+ SimInstruction* stop_instr = reinterpret_cast<SimInstruction*>(stop_pc);
+ SimInstruction* msg_address =
+ reinterpret_cast<SimInstruction*>(stop_pc +
+ SimInstruction::kInstrSize);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->isStopInstruction(stop_instr)) {
+ stop_instr->setInstructionBits(kNopInstr);
+ msg_address->setInstructionBits(kNopInstr);
+ } else {
+ printf("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ printf("Stop information:\n");
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->printStopInfo(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->printStopInfo(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->enableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->enableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->disableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->disableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ printf("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ printf("cont\n");
+ printf(" continue execution (alias 'c')\n");
+ printf("stepi\n");
+ printf(" step one instruction (alias 'si')\n");
+ printf("print <register>\n");
+ printf(" print register content (alias 'p')\n");
+ printf(" use register name 'all' to print all registers\n");
+ printf("printobject <register>\n");
+ printf(" print an object from a register (alias 'po')\n");
+ printf("stack [<words>]\n");
+ printf(" dump stack content, default dump 10 words)\n");
+ printf("mem <address> [<words>]\n");
+ printf(" dump memory content, default dump 10 words)\n");
+ printf("flags\n");
+ printf(" print flags\n");
+ printf("disasm [<instructions>]\n");
+ printf("disasm [<address/register>]\n");
+ printf("disasm [[<address/register>] <instructions>]\n");
+ printf(" disassemble code, default is 10 instructions\n");
+ printf(" from pc (alias 'di')\n");
+ printf("gdb\n");
+ printf(" enter gdb\n");
+ printf("break <address>\n");
+ printf(" set a break point on the address\n");
+ printf("del\n");
+ printf(" delete the breakpoint\n");
+ printf("stop feature:\n");
+ printf(" Description:\n");
+ printf(" Stops are debug instructions inserted by\n");
+ printf(" the Assembler::stop() function.\n");
+ printf(" When hitting a stop, the Simulator will\n");
+ printf(" stop and and give control to the Debugger.\n");
+ printf(" All stop codes are watched:\n");
+ printf(" - They can be enabled / disabled: the Simulator\n");
+ printf(" will / won't stop when hitting them.\n");
+ printf(" - The Simulator keeps track of how many times they \n");
+ printf(" are met. (See the info command.) Going over a\n");
+ printf(" disabled stop still increases its counter. \n");
+ printf(" Commands:\n");
+ printf(" stop info all/<code> : print infos about number <code>\n");
+ printf(" or all stop(s).\n");
+ printf(" stop enable/disable all/<code> : enables / disables\n");
+ printf(" all or number <code> stop(s)\n");
+ printf(" stop unstop\n");
+ printf(" ignore the stop instruction at the current location\n");
+ printf(" from now on\n");
+ } else {
+ printf("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ redoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+static bool
+AllOnOnePage(uintptr_t start, int size)
+{
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+void
+Simulator::setLastDebuggerInput(char* input)
+{
+ js_free(lastDebuggerInput_);
+ lastDebuggerInput_ = input;
+}
+
+static CachePage*
+GetCachePageLocked(Simulator::ICacheMap& i_cache, void* page)
+{
+ Simulator::ICacheMap::AddPtr p = i_cache.lookupForAdd(page);
+ if (p)
+ return p->value();
+
+ CachePage* new_page = js_new<CachePage>();
+ if (!i_cache.add(p, page, new_page))
+ return nullptr;
+ return new_page;
+}
+
+// Flush from start up to and not including start + size.
+static void
+FlushOnePageLocked(Simulator::ICacheMap& i_cache, intptr_t start, int size)
+{
+ MOZ_ASSERT(size <= CachePage::kPageSize);
+ MOZ_ASSERT(AllOnOnePage(start, size - 1));
+ MOZ_ASSERT((start & CachePage::kLineMask) == 0);
+ MOZ_ASSERT((size & CachePage::kLineMask) == 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(i_cache, page);
+ char* valid_bytemap = cache_page->validityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+static void
+FlushICacheLocked(Simulator::ICacheMap& i_cache, void* start_addr, size_t size)
+{
+ intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+ int intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePageLocked(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ MOZ_ASSERT((start & CachePage::kPageMask) == 0);
+ offset = 0;
+ }
+ if (size != 0)
+ FlushOnePageLocked(i_cache, start, size);
+}
+
+static void
+CheckICacheLocked(Simulator::ICacheMap& i_cache, SimInstruction* instr)
+{
+ intptr_t address = reinterpret_cast<intptr_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(i_cache, page);
+ char* cache_valid_byte = cache_page->validityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->cachedData(offset & ~CachePage::kLineMask);
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ MOZ_ASSERT(memcmp(reinterpret_cast<void*>(instr),
+ cache_page->cachedData(offset),
+ SimInstruction::kInstrSize) == 0);
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+HashNumber
+Simulator::ICacheHasher::hash(const Lookup& l)
+{
+ return U32(reinterpret_cast<uintptr_t>(l)) >> 2;
+}
+
+bool
+Simulator::ICacheHasher::match(const Key& k, const Lookup& l)
+{
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(k) & CachePage::kPageMask) == 0);
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(l) & CachePage::kPageMask) == 0);
+ return k == l;
+}
+
+void
+Simulator::FlushICache(void* start_addr, size_t size)
+{
+ if (Simulator::ICacheCheckingEnabled) {
+ Simulator* sim = Simulator::Current();
+ AutoLockSimulatorCache als(sim);
+ js::jit::FlushICacheLocked(sim->icache(), start_addr, size);
+ }
+}
+
+Simulator::Simulator()
+ : cacheLock_(mutexid::SimulatorCacheLock)
+{
+ // Set up simulator support first. Some of this information is needed to
+ // setup the architecture state.
+
+ // Note, allocation and anything that depends on allocated memory is
+ // deferred until init(), in order to handle OOM properly.
+
+ stack_ = nullptr;
+ stackLimit_ = 0;
+ pc_modified_ = false;
+ icount_ = 0;
+ break_count_ = 0;
+ resume_pc_ = 0;
+ break_pc_ = nullptr;
+ break_instr_ = 0;
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < Register::kNumSimuRegisters; i++)
+ registers_[i] = 0;
+ for (int i = 0; i < Simulator::FPURegister::kNumFPURegisters; i++)
+ FPUregisters_[i] = 0;
+ FCSR_ = 0;
+
+ // The ra and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_ra;
+ registers_[ra] = bad_ra;
+
+ for (int i = 0; i < kNumExceptions; i++)
+ exceptions[i] = 0;
+
+ lastDebuggerInput_ = nullptr;
+
+ redirection_ = nullptr;
+}
+
+bool
+Simulator::init()
+{
+ if (!icache_.init())
+ return false;
+
+ // Allocate 2MB for the stack. Note that we will only use 1MB, see below.
+ static const size_t stackSize = 2 * 1024 * 1024;
+ stack_ = static_cast<char*>(js_malloc(stackSize));
+ if (!stack_)
+ return false;
+
+ // Leave a safety margin of 1MB to prevent overrunning the stack when
+ // pushing values (total stack size is 2MB).
+ stackLimit_ = reinterpret_cast<uintptr_t>(stack_) + 1024 * 1024;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int64_t>(stack_) + stackSize - 64;
+
+ return true;
+}
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a swi (software-interrupt) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the swi instruction so the simulator knows what to call.
+class Redirection
+{
+ friend class Simulator;
+
+ // sim's lock must already be held.
+ Redirection(void* nativeFunction, ABIFunctionType type, Simulator* sim)
+ : nativeFunction_(nativeFunction),
+ swiInstruction_(kCallRedirInstr),
+ type_(type),
+ next_(nullptr)
+ {
+ next_ = sim->redirection();
+ if (Simulator::ICacheCheckingEnabled)
+ FlushICacheLocked(sim->icache(), addressOfSwiInstruction(), SimInstruction::kInstrSize);
+ sim->setRedirection(this);
+ }
+
+ public:
+ void* addressOfSwiInstruction() { return &swiInstruction_; }
+ void* nativeFunction() const { return nativeFunction_; }
+ ABIFunctionType type() const { return type_; }
+
+ static Redirection* Get(void* nativeFunction, ABIFunctionType type) {
+ Simulator* sim = Simulator::Current();
+
+ AutoLockSimulatorCache als(sim);
+
+ Redirection* current = sim->redirection();
+ for (; current != nullptr; current = current->next_) {
+ if (current->nativeFunction_ == nativeFunction) {
+ MOZ_ASSERT(current->type() == type);
+ return current;
+ }
+ }
+
+ Redirection* redir = (Redirection*)js_malloc(sizeof(Redirection));
+ if (!redir) {
+ MOZ_ReportAssertionFailure("[unhandlable oom] Simulator redirection",
+ __FILE__, __LINE__);
+ MOZ_CRASH();
+ }
+ new(redir) Redirection(nativeFunction, type, sim);
+ return redir;
+ }
+
+ static Redirection* FromSwiInstruction(SimInstruction* swiInstruction) {
+ uint8_t* addrOfSwi = reinterpret_cast<uint8_t*>(swiInstruction);
+ uint8_t* addrOfRedirection = addrOfSwi - offsetof(Redirection, swiInstruction_);
+ return reinterpret_cast<Redirection*>(addrOfRedirection);
+ }
+
+ private:
+ void* nativeFunction_;
+ uint32_t swiInstruction_;
+ ABIFunctionType type_;
+ Redirection* next_;
+};
+
+Simulator::~Simulator()
+{
+ js_free(stack_);
+ Redirection* r = redirection_;
+ while (r) {
+ Redirection* next = r->next_;
+ js_delete(r);
+ r = next;
+ }
+}
+
+/* static */ void*
+Simulator::RedirectNativeFunction(void* nativeFunction, ABIFunctionType type)
+{
+ Redirection* redirection = Redirection::Get(nativeFunction, type);
+ return redirection->addressOfSwiInstruction();
+}
+
+// Get the active Simulator for the current thread.
+Simulator*
+Simulator::Current()
+{
+ return TlsPerThreadData.get()->simulator();
+}
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void
+Simulator::setRegister(int reg, int64_t value)
+{
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters));
+ if (reg == pc)
+ pc_modified_ = true;
+
+ // Zero register always holds 0.
+ registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+void
+Simulator::setFpuRegister(int fpureg, int64_t value)
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ FPUregisters_[fpureg] = value;
+}
+
+void
+Simulator::setFpuRegisterLo(int fpureg, int32_t value)
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg]) = value;
+}
+
+void
+Simulator::setFpuRegisterHi(int fpureg, int32_t value)
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *((mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg])) + 1) = value;
+}
+
+void
+Simulator::setFpuRegisterFloat(int fpureg, float value)
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<float*>(&FPUregisters_[fpureg]) = value;
+}
+
+void
+Simulator::setFpuRegisterDouble(int fpureg, double value)
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]) = value;
+}
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int64_t
+Simulator::getRegister(int reg) const
+{
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters));
+ if (reg == 0)
+ return 0;
+ return registers_[reg] + ((reg == pc) ? SimInstruction::kPCReadOffset : 0);
+}
+
+int64_t
+Simulator::getFpuRegister(int fpureg) const
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return FPUregisters_[fpureg];
+}
+
+int32_t
+Simulator::getFpuRegisterLo(int fpureg) const
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg]);
+}
+
+int32_t
+Simulator::getFpuRegisterHi(int fpureg) const
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *((mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg])) + 1);
+}
+
+float
+Simulator::getFpuRegisterFloat(int fpureg) const
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<float*>(&FPUregisters_[fpureg]);
+}
+
+double
+Simulator::getFpuRegisterDouble(int fpureg) const
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]);
+}
+
+void
+Simulator::setCallResultDouble(double result)
+{
+ setFpuRegisterDouble(f0, result);
+}
+
+void
+Simulator::setCallResultFloat(float result)
+{
+ setFpuRegisterFloat(f0, result);
+}
+
+void
+Simulator::setCallResult(int64_t res)
+{
+ setRegister(v0, res);
+}
+
+void
+Simulator::setCallResult(__int128_t res)
+{
+ setRegister(v0, I64(res));
+ setRegister(v1, I64(res >> 64));
+}
+
+// Helper functions for setting and testing the FCSR register's bits.
+void
+Simulator::setFCSRBit(uint32_t cc, bool value)
+{
+ if (value)
+ FCSR_ |= (1 << cc);
+ else
+ FCSR_ &= ~(1 << cc);
+}
+
+bool
+Simulator::testFCSRBit(uint32_t cc)
+{
+ return FCSR_ & (1 << cc);
+}
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool
+Simulator::setFCSRRoundError(double original, double rounded)
+{
+ bool ret = false;
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ if (original != rounded)
+ setFCSRBit(kFCSRInexactFlagBit, true);
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ setFCSRBit(kFCSRUnderflowFlagBit, true);
+ ret = true;
+ }
+
+ if (rounded > INT_MAX || rounded < INT_MIN) {
+ setFCSRBit(kFCSROverflowFlagBit, true);
+ // The reference is not really clear but it seems this is required:
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+// Raw access to the PC register.
+void
+Simulator::set_pc(int64_t value)
+{
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+bool
+Simulator::has_bad_pc() const
+{
+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int64_t
+Simulator::get_pc() const
+{
+ return registers_[pc];
+}
+
+// The MIPS cannot do unaligned reads and writes. On some MIPS platforms an
+// interrupt is caused. On others it does a funky rotation thing. For now we
+// simply disallow unaligned reads, but at some point we may want to move to
+// emulating the rotate behaviour. Note that simulator runs have the runtime
+// system running directly on the host system and only generated code is
+// executed in the simulator. Since the host is typically IA32 we will not
+// get the correct MIPS-like behaviour on unaligned accesses.
+
+uint8_t
+Simulator::readBU(uint64_t addr, SimInstruction* instr)
+{
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ return* ptr;
+}
+
+int8_t
+Simulator::readB(uint64_t addr, SimInstruction* instr)
+{
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ return* ptr;
+}
+
+void
+Simulator::writeB(uint64_t addr, uint8_t value, SimInstruction* instr)
+{
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ *ptr = value;
+}
+
+void
+Simulator::writeB(uint64_t addr, int8_t value, SimInstruction* instr)
+{
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ *ptr = value;
+}
+
+uint16_t
+Simulator::readHU(uint64_t addr, SimInstruction* instr)
+{
+ if ((addr & 1) == 0) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned unsigned halfword read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int16_t
+Simulator::readH(uint64_t addr, SimInstruction* instr)
+{
+ if ((addr & 1) == 0) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned signed halfword read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void
+Simulator::writeH(uint64_t addr, uint16_t value, SimInstruction* instr)
+{
+ if ((addr & 1) == 0) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned unsigned halfword write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+void
+Simulator::writeH(uint64_t addr, int16_t value, SimInstruction* instr)
+{
+ if ((addr & 1) == 0) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned halfword write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+uint32_t
+Simulator::readWU(uint64_t addr, SimInstruction* instr)
+{
+ if (addr < 0x400) {
+ // This has to be a NULL-dereference, drop into debugger.
+ printf("Memory read from bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ }
+ if ((addr & 3) == 0) {
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int32_t
+Simulator::readW(uint64_t addr, SimInstruction* instr)
+{
+ if (addr < 0x400) {
+ // This has to be a NULL-dereference, drop into debugger.
+ printf("Memory read from bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ }
+ if ((addr & 3) == 0) {
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void
+Simulator::writeW(uint64_t addr, uint32_t value, SimInstruction* instr)
+{
+ if (addr < 0x400) {
+ // This has to be a NULL-dereference, drop into debugger.
+ printf("Memory write to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ }
+ if ((addr & 3) == 0) {
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+void
+Simulator::writeW(uint64_t addr, int32_t value, SimInstruction* instr)
+{
+ if (addr < 0x400) {
+ // This has to be a NULL-dereference, drop into debugger.
+ printf("Memory write to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ }
+ if ((addr & 3) == 0) {
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+int64_t
+Simulator::readDW(uint64_t addr, SimInstruction* instr)
+{
+ if (addr < 0x400) {
+ // This has to be a NULL-dereference, drop into debugger.
+ printf("Memory read from bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ }
+ if ((addr & kPointerAlignmentMask) == 0) {
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ return* ptr;
+ }
+ printf("Unaligned read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void
+Simulator::writeDW(uint64_t addr, int64_t value, SimInstruction* instr)
+{
+ if (addr < 0x400) {
+ // This has to be a NULL-dereference, drop into debugger.
+ printf("Memory write to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ }
+ if ((addr & kPointerAlignmentMask) == 0) {
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+double
+Simulator::readD(uint64_t addr, SimInstruction* instr)
+{
+ if ((addr & kDoubleAlignmentMask) == 0) {
+ double* ptr = reinterpret_cast<double*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned (double) read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void
+Simulator::writeD(uint64_t addr, double value, SimInstruction* instr)
+{
+ if ((addr & kDoubleAlignmentMask) == 0) {
+ double* ptr = reinterpret_cast<double*>(addr);
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned (double) write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+uintptr_t
+Simulator::stackLimit() const
+{
+ return stackLimit_;
+}
+
+uintptr_t*
+Simulator::addressOfStackLimit()
+{
+ return &stackLimit_;
+}
+
+bool
+Simulator::overRecursed(uintptr_t newsp) const
+{
+ if (newsp == 0)
+ newsp = getRegister(sp);
+ return newsp <= stackLimit();
+}
+
+bool
+Simulator::overRecursedWithExtra(uint32_t extra) const
+{
+ uintptr_t newsp = getRegister(sp) - extra;
+ return newsp <= stackLimit();
+}
+
+// Unsupported instructions use format to print an error and stop execution.
+void
+Simulator::format(SimInstruction* instr, const char* format)
+{
+ printf("Simulator found unsupported instruction:\n 0x%016lx: %s\n",
+ reinterpret_cast<intptr_t>(instr), format);
+ MOZ_CRASH();
+}
+
+// Note: With the code below we assume that all runtime calls return a 64 bits
+// result. If they don't, the v1 result register contains a bogus value, which
+// is fine because it is caller-saved.
+typedef int64_t (*Prototype_General0)();
+typedef int64_t (*Prototype_General1)(int64_t arg0);
+typedef int64_t (*Prototype_General2)(int64_t arg0, int64_t arg1);
+typedef int64_t (*Prototype_General3)(int64_t arg0, int64_t arg1, int64_t arg2);
+typedef int64_t (*Prototype_General4)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3);
+typedef int64_t (*Prototype_General5)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
+ int64_t arg4);
+typedef int64_t (*Prototype_General6)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5);
+typedef int64_t (*Prototype_General7)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5, int64_t arg6);
+typedef int64_t (*Prototype_General8)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5, int64_t arg6, int64_t arg7);
+
+typedef double (*Prototype_Double_None)();
+typedef double (*Prototype_Double_Double)(double arg0);
+typedef double (*Prototype_Double_Int)(int64_t arg0);
+typedef int64_t (*Prototype_Int_Double)(double arg0);
+typedef int64_t (*Prototype_Int_DoubleIntInt)(double arg0, int64_t arg1, int64_t arg2);
+typedef int64_t (*Prototype_Int_IntDoubleIntInt)(int64_t arg0, double arg1, int64_t arg2,
+ int64_t arg3);
+typedef float (*Prototype_Float32_Float32)(float arg0);
+
+typedef double (*Prototype_DoubleInt)(double arg0, int64_t arg1);
+typedef double (*Prototype_Double_IntDouble)(int64_t arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1);
+typedef int64_t (*Prototype_Int_IntDouble)(int64_t arg0, double arg1);
+
+typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1, double arg2);
+typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0, double arg1,
+ double arg2, double arg3);
+
+// Software interrupt instructions are used by the simulator to call into C++.
+void
+Simulator::softwareInterrupt(SimInstruction* instr)
+{
+ int32_t func = instr->functionFieldRaw();
+ uint32_t code = (func == ff_break) ? instr->bits(25, 6) : -1;
+
+ // We first check if we met a call_rt_redirected.
+ if (instr->instructionBits() == kCallRedirInstr) {
+#if !defined(USES_N64_ABI)
+ MOZ_CRASH("Only N64 ABI supported.");
+#else
+ Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ int64_t arg0 = getRegister(a0);
+ int64_t arg1 = getRegister(a1);
+ int64_t arg2 = getRegister(a2);
+ int64_t arg3 = getRegister(a3);
+ int64_t arg4 = getRegister(a4);
+ int64_t arg5 = getRegister(a5);
+
+ // This is dodgy but it works because the C entry stubs are never moved.
+ // See comment in codegen-arm.cc and bug 1242173.
+ int64_t saved_ra = getRegister(ra);
+
+ intptr_t external = reinterpret_cast<intptr_t>(redirection->nativeFunction());
+
+ bool stack_aligned = (getRegister(sp) & (ABIStackAlignment - 1)) == 0;
+ if (!stack_aligned) {
+ fprintf(stderr, "Runtime call with unaligned stack!\n");
+ MOZ_CRASH();
+ }
+
+ if (single_stepping_)
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+
+ switch (redirection->type()) {
+ case Args_General0: {
+ Prototype_General0 target = reinterpret_cast<Prototype_General0>(external);
+ int64_t result = target();
+ setCallResult(result);
+ break;
+ }
+ case Args_General1: {
+ Prototype_General1 target = reinterpret_cast<Prototype_General1>(external);
+ int64_t result = target(arg0);
+ setCallResult(result);
+ break;
+ }
+ case Args_General2: {
+ Prototype_General2 target = reinterpret_cast<Prototype_General2>(external);
+ int64_t result = target(arg0, arg1);
+ setCallResult(result);
+ break;
+ }
+ case Args_General3: {
+ Prototype_General3 target = reinterpret_cast<Prototype_General3>(external);
+ int64_t result = target(arg0, arg1, arg2);
+ setCallResult(result);
+ break;
+ }
+ case Args_General4: {
+ Prototype_General4 target = reinterpret_cast<Prototype_General4>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ setCallResult(result);
+ break;
+ }
+ case Args_General5: {
+ Prototype_General5 target = reinterpret_cast<Prototype_General5>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4);
+ setCallResult(result);
+ break;
+ }
+ case Args_General6: {
+ Prototype_General6 target = reinterpret_cast<Prototype_General6>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ setCallResult(result);
+ break;
+ }
+ case Args_General7: {
+ Prototype_General7 target = reinterpret_cast<Prototype_General7>(external);
+ int64_t arg6 = getRegister(a6);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6);
+ setCallResult(result);
+ break;
+ }
+ case Args_General8: {
+ Prototype_General8 target = reinterpret_cast<Prototype_General8>(external);
+ int64_t arg6 = getRegister(a6);
+ int64_t arg7 = getRegister(a7);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+ setCallResult(result);
+ break;
+ }
+ case Args_Double_None: {
+ Prototype_Double_None target = reinterpret_cast<Prototype_Double_None>(external);
+ double dresult = target();
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_Double: {
+ double dval0 = getFpuRegisterDouble(12);
+ Prototype_Int_Double target = reinterpret_cast<Prototype_Int_Double>(external);
+ int64_t res = target(dval0);
+ setRegister(v0, res);
+ break;
+ }
+ case Args_Int_DoubleIntInt: {
+ double dval = getFpuRegisterDouble(12);
+ Prototype_Int_DoubleIntInt target = reinterpret_cast<Prototype_Int_DoubleIntInt>(external);
+ int64_t res = target(dval, arg1, arg2);
+ setRegister(v0, res);
+ break;
+ }
+ case Args_Int_IntDoubleIntInt: {
+ double dval = getFpuRegisterDouble(13);
+ Prototype_Int_IntDoubleIntInt target = reinterpret_cast<Prototype_Int_IntDoubleIntInt>(external);
+ int64_t res = target(arg0, dval, arg2, arg3);
+ setRegister(v0, res);
+ break;
+ }
+ case Args_Double_Double: {
+ double dval0 = getFpuRegisterDouble(12);
+ Prototype_Double_Double target = reinterpret_cast<Prototype_Double_Double>(external);
+ double dresult = target(dval0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Float32_Float32: {
+ float fval0;
+ fval0 = getFpuRegisterFloat(12);
+ Prototype_Float32_Float32 target = reinterpret_cast<Prototype_Float32_Float32>(external);
+ float fresult = target(fval0);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Double_Int: {
+ Prototype_Double_Int target = reinterpret_cast<Prototype_Double_Int>(external);
+ double dresult = target(arg0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleInt: {
+ double dval0 = getFpuRegisterDouble(12);
+ Prototype_DoubleInt target = reinterpret_cast<Prototype_DoubleInt>(external);
+ double dresult = target(dval0, arg1);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDouble: {
+ double dval0 = getFpuRegisterDouble(12);
+ double dval1 = getFpuRegisterDouble(13);
+ Prototype_Double_DoubleDouble target = reinterpret_cast<Prototype_Double_DoubleDouble>(external);
+ double dresult = target(dval0, dval1);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_IntDouble: {
+ double dval1 = getFpuRegisterDouble(13);
+ Prototype_Double_IntDouble target = reinterpret_cast<Prototype_Double_IntDouble>(external);
+ double dresult = target(arg0, dval1);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_IntDouble: {
+ double dval1 = getFpuRegisterDouble(13);
+ Prototype_Int_IntDouble target = reinterpret_cast<Prototype_Int_IntDouble>(external);
+ int64_t result = target(arg0, dval1);
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Double_DoubleDoubleDouble: {
+ double dval0 = getFpuRegisterDouble(12);
+ double dval1 = getFpuRegisterDouble(13);
+ double dval2 = getFpuRegisterDouble(14);
+ Prototype_Double_DoubleDoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDoubleDouble>(external);
+ double dresult = target(dval0, dval1, dval2);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDoubleDoubleDouble: {
+ double dval0 = getFpuRegisterDouble(12);
+ double dval1 = getFpuRegisterDouble(13);
+ double dval2 = getFpuRegisterDouble(14);
+ double dval3 = getFpuRegisterDouble(15);
+ Prototype_Double_DoubleDoubleDoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDoubleDoubleDouble>(external);
+ double dresult = target(dval0, dval1, dval2, dval3);
+ setCallResultDouble(dresult);
+ break;
+ }
+ default:
+ MOZ_CRASH("call");
+ }
+
+ if (single_stepping_)
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+
+ setRegister(ra, saved_ra);
+ set_pc(getRegister(ra));
+#endif
+ } else if (func == ff_break && code <= kMaxStopCode) {
+ if (isWatchpoint(code)) {
+ printWatchpoint(code);
+ } else {
+ increaseStopCounter(code);
+ handleStop(code, instr);
+ }
+ } else {
+ // All remaining break_ codes, and all traps are handled here.
+ MipsDebugger dbg(this);
+ dbg.debug();
+ }
+}
+
+// Stop helper functions.
+bool
+Simulator::isWatchpoint(uint32_t code)
+{
+ return (code <= kMaxWatchpointCode);
+}
+
+void
+Simulator::printWatchpoint(uint32_t code)
+{
+ MipsDebugger dbg(this);
+ ++break_count_;
+ printf("\n---- break %d marker: %20" PRIi64 " (instr count: %20" PRIi64 ") ----\n",
+ code, break_count_, icount_);
+ dbg.printAllRegs(); // Print registers and continue running.
+}
+
+void
+Simulator::handleStop(uint32_t code, SimInstruction* instr)
+{
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (isEnabledStop(code)) {
+ MipsDebugger dbg(this);
+ dbg.stop(instr);
+ } else {
+ set_pc(get_pc() + 2 * SimInstruction::kInstrSize);
+ }
+}
+
+bool
+Simulator::isStopInstruction(SimInstruction* instr)
+{
+ int32_t func = instr->functionFieldRaw();
+ uint32_t code = U32(instr->bits(25, 6));
+ return (func == ff_break) && code > kMaxWatchpointCode && code <= kMaxStopCode;
+}
+
+bool
+Simulator::isEnabledStop(uint32_t code)
+{
+ MOZ_ASSERT(code <= kMaxStopCode);
+ MOZ_ASSERT(code > kMaxWatchpointCode);
+ return !(watchedStops_[code].count_ & kStopDisabledBit);
+}
+
+void
+Simulator::enableStop(uint32_t code)
+{
+ if (!isEnabledStop(code))
+ watchedStops_[code].count_ &= ~kStopDisabledBit;
+}
+
+void
+Simulator::disableStop(uint32_t code)
+{
+ if (isEnabledStop(code))
+ watchedStops_[code].count_ |= kStopDisabledBit;
+}
+
+void
+Simulator::increaseStopCounter(uint32_t code)
+{
+ MOZ_ASSERT(code <= kMaxStopCode);
+ if ((watchedStops_[code].count_ & ~(1 << 31)) == 0x7fffffff) {
+ printf("Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n", code);
+ watchedStops_[code].count_ = 0;
+ enableStop(code);
+ } else {
+ watchedStops_[code].count_++;
+ }
+}
+
+// Print a stop status.
+void
+Simulator::printStopInfo(uint32_t code)
+{
+ if (code <= kMaxWatchpointCode) {
+ printf("That is a watchpoint, not a stop.\n");
+ return;
+ } else if (code > kMaxStopCode) {
+ printf("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+ return;
+ }
+ const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watchedStops_[code].count_ & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watchedStops_[code].desc_) {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n",
+ code, code, state, count, watchedStops_[code].desc_);
+ } else {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i\n",
+ code, code, state, count);
+ }
+ }
+}
+
+void
+Simulator::signalExceptions()
+{
+ for (int i = 1; i < kNumExceptions; i++) {
+ if (exceptions[i] != 0)
+ MOZ_CRASH("Error: Exception raised.");
+ }
+}
+
+// Helper function for decodeTypeRegister.
+void
+Simulator::configureTypeRegister(SimInstruction* instr,
+ int64_t& alu_out,
+ __int128& i128hilo,
+ unsigned __int128& u128hilo,
+ int64_t& next_pc,
+ int32_t& return_addr_reg,
+ bool& do_interrupt)
+{
+ // Every local variable declared here needs to be const.
+ // This is to make sure that changed values are sent back to
+ // decodeTypeRegister correctly.
+
+ // Instruction fields.
+ const Opcode op = instr->opcodeFieldRaw();
+ const int32_t rs_reg = instr->rsValue();
+ const int64_t rs = getRegister(rs_reg);
+ const int32_t rt_reg = instr->rtValue();
+ const int64_t rt = getRegister(rt_reg);
+ const int32_t rd_reg = instr->rdValue();
+ const uint32_t sa = instr->saValue();
+
+ const int32_t fs_reg = instr->fsValue();
+ __int128 temp;
+
+
+ // ---------- Configuration.
+ switch (op) {
+ case op_cop1: // Coprocessor instructions.
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Handled in DecodeTypeImmed, should never come here.
+ MOZ_CRASH();
+ break;
+ case rs_cfc1:
+ // At the moment only FCSR is supported.
+ MOZ_ASSERT(fs_reg == kFCSRRegister);
+ alu_out = FCSR_;
+ break;
+ case rs_mfc1:
+ alu_out = getFpuRegisterLo(fs_reg);
+ break;
+ case rs_dmfc1:
+ alu_out = getFpuRegister(fs_reg);
+ break;
+ case rs_mfhc1:
+ alu_out = getFpuRegisterHi(fs_reg);
+ break;
+ case rs_ctc1:
+ case rs_mtc1:
+ case rs_dmtc1:
+ case rs_mthc1:
+ // Do the store in the execution step.
+ break;
+ case rs_s:
+ case rs_d:
+ case rs_w:
+ case rs_l:
+ case rs_ps:
+ // Do everything in the execution step.
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ case op_cop1x:
+ break;
+ case op_special:
+ switch (instr->functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ next_pc = getRegister(instr->rsValue());
+ return_addr_reg = instr->rdValue();
+ break;
+ case ff_sll:
+ alu_out = I32(rt) << sa;
+ break;
+ case ff_dsll:
+ alu_out = rt << sa;
+ break;
+ case ff_dsll32:
+ alu_out = rt << (sa + 32);
+ break;
+ case ff_srl:
+ if (rs_reg == 0) {
+ // Regular logical right shift of a word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ alu_out = I32(U32(rt) >> sa);
+ } else {
+ // Logical right-rotate of a word by a fixed number of bits. This
+ // is special case of SRL instruction, added in MIPS32 Release 2.
+ // RS field is equal to 00001.
+ alu_out = I32((U32(rt) >> sa) | (U32(rt) << (32 - sa)));
+ }
+ break;
+ case ff_dsrl:
+ if (rs_reg == 0) {
+ // Regular logical right shift of a double word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ alu_out = U64(rt) >> sa;
+ } else {
+ // Logical right-rotate of a word by a fixed number of bits. This
+ // is special case of DSRL instruction, added in MIPS64 Release 2.
+ // RS field is equal to 00001.
+ alu_out = (U64(rt) >> sa) | (U64(rt) << (64 - sa));
+ }
+ break;
+ case ff_dsrl32:
+ if (rs_reg == 0) {
+ // Regular logical right shift of a double word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ alu_out = U64(rt) >> (sa + 32);
+ } else {
+ // Logical right-rotate of a double word by a fixed number of bits. This
+ // is special case of DSRL instruction, added in MIPS64 Release 2.
+ // RS field is equal to 00001.
+ alu_out = (U64(rt) >> (sa + 32)) | (U64(rt) << (64 - (sa + 32)));
+ }
+ break;
+ case ff_sra:
+ alu_out = I32(rt) >> sa;
+ break;
+ case ff_dsra:
+ alu_out = rt >> sa;
+ break;
+ case ff_dsra32:
+ alu_out = rt >> (sa + 32);
+ break;
+ case ff_sllv:
+ alu_out = I32(rt) << rs;
+ break;
+ case ff_dsllv:
+ alu_out = rt << rs;
+ break;
+ case ff_srlv:
+ if (sa == 0) {
+ // Regular logical right-shift of a word by a variable number of
+ // bits instruction. SA field is always equal to 0.
+ alu_out = I32(U32(rt) >> rs);
+ } else {
+ // Logical right-rotate of a word by a variable number of bits.
+ // This is special case od SRLV instruction, added in MIPS32
+ // Release 2. SA field is equal to 00001.
+ alu_out = I32((U32(rt) >> rs) | (U32(rt) << (32 - rs)));
+ }
+ break;
+ case ff_dsrlv:
+ if (sa == 0) {
+ // Regular logical right-shift of a double word by a variable number of
+ // bits instruction. SA field is always equal to 0.
+ alu_out = U64(rt) >> rs;
+ } else {
+ // Logical right-rotate of a double word by a variable number of bits.
+ // This is special case od DSRLV instruction, added in MIPS64
+ // Release 2. SA field is equal to 00001.
+ alu_out = (U64(rt) >> rs) | (U64(rt) << (64 - rs));
+ }
+ break;
+ case ff_srav:
+ alu_out = I32(rt) >> rs;
+ break;
+ case ff_dsrav:
+ alu_out = rt >> rs;
+ break;
+ case ff_mfhi:
+ alu_out = getRegister(HI);
+ break;
+ case ff_mflo:
+ alu_out = getRegister(LO);
+ break;
+ case ff_mult:
+ i128hilo = I32(rs) * I32(rt);
+ break;
+ case ff_dmult:
+ i128hilo = I128(rs) * I128(rt);
+ break;
+ case ff_multu:
+ u128hilo = U32(rs) * U32(rt);
+ break;
+ case ff_dmultu:
+ u128hilo = U128(rs) * U128(rt);
+ break;
+ case ff_add:
+ alu_out = I32(rs) + I32(rt);
+ if ((alu_out << 32) != (alu_out << 31))
+ exceptions[kIntegerOverflow] = 1;
+ alu_out = I32(alu_out);
+ break;
+ case ff_dadd:
+ temp = I128(rs) + I128(rt);
+ if ((temp << 64) != (temp << 63))
+ exceptions[kIntegerOverflow] = 1;
+ alu_out = I64(temp);
+ break;
+ case ff_addu:
+ alu_out = I32(U32(rs) + U32(rt));
+ break;
+ case ff_daddu:
+ alu_out = rs + rt;
+ break;
+ case ff_sub:
+ alu_out = I32(rs) - I32(rt);
+ if ((alu_out << 32) != (alu_out << 31))
+ exceptions[kIntegerUnderflow] = 1;
+ alu_out = I32(alu_out);
+ break;
+ case ff_dsub:
+ temp = I128(rs) - I128(rt);
+ if ((temp << 64) != (temp << 63))
+ exceptions[kIntegerUnderflow] = 1;
+ alu_out = I64(temp);
+ break;
+ case ff_subu:
+ alu_out = I32(U32(rs) - U32(rt));
+ break;
+ case ff_dsubu:
+ alu_out = rs - rt;
+ break;
+ case ff_and:
+ alu_out = rs & rt;
+ break;
+ case ff_or:
+ alu_out = rs | rt;
+ break;
+ case ff_xor:
+ alu_out = rs ^ rt;
+ break;
+ case ff_nor:
+ alu_out = ~(rs | rt);
+ break;
+ case ff_slt:
+ alu_out = rs < rt ? 1 : 0;
+ break;
+ case ff_sltu:
+ alu_out = U64(rs) < U64(rt) ? 1 : 0;
+ break;
+ case ff_sync:
+ break;
+ // Break and trap instructions.
+ case ff_break:
+ do_interrupt = true;
+ break;
+ case ff_tge:
+ do_interrupt = rs >= rt;
+ break;
+ case ff_tgeu:
+ do_interrupt = U64(rs) >= U64(rt);
+ break;
+ case ff_tlt:
+ do_interrupt = rs < rt;
+ break;
+ case ff_tltu:
+ do_interrupt = U64(rs) < U64(rt);
+ break;
+ case ff_teq:
+ do_interrupt = rs == rt;
+ break;
+ case ff_tne:
+ do_interrupt = rs != rt;
+ break;
+ case ff_movn:
+ case ff_movz:
+ case ff_movci:
+ // No action taken on decode.
+ break;
+ case ff_div:
+ if (I32(rs) == INT_MIN && I32(rt) == -1) {
+ i128hilo = U32(INT_MIN);
+ } else {
+ uint32_t div = I32(rs) / I32(rt);
+ uint32_t mod = I32(rs) % I32(rt);
+ i128hilo = (I64(mod) << 32) | div;
+ }
+ break;
+ case ff_ddiv:
+ if (I32(rs) == INT_MIN && I32(rt) == -1) {
+ i128hilo = U64(INT64_MIN);
+ } else {
+ uint64_t div = rs / rt;
+ uint64_t mod = rs % rt;
+ i128hilo = (I128(mod) << 64) | div;
+ }
+ break;
+ case ff_divu: {
+ uint32_t div = U32(rs) / U32(rt);
+ uint32_t mod = U32(rs) % U32(rt);
+ i128hilo = (U64(mod) << 32) | div;
+ }
+ break;
+ case ff_ddivu:
+ if (0 == rt) {
+ i128hilo = (I128(Unpredictable) << 64) | I64(Unpredictable);
+ } else {
+ uint64_t div = U64(rs) / U64(rt);
+ uint64_t mod = U64(rs) % U64(rt);
+ i128hilo = (I128(mod) << 64) | div;
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ case op_special2:
+ switch (instr->functionFieldRaw()) {
+ case ff_mul:
+ alu_out = I32(I32(rs) * I32(rt)); // Only the lower 32 bits are kept.
+ break;
+ case ff_clz:
+ alu_out = U32(rs) ? __builtin_clz(U32(rs)) : 32;
+ break;
+ case ff_dclz:
+ alu_out = U64(rs) ? __builtin_clzl(U64(rs)) : 64;
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ case op_special3:
+ switch (instr->functionFieldRaw()) {
+ case ff_ins: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa;
+ uint16_t size = msb - lsb + 1;
+ uint32_t mask = (1 << size) - 1;
+ if (lsb > msb)
+ alu_out = Unpredictable;
+ else
+ alu_out = (U32(rt) & ~(mask << lsb)) | ((U32(rs) & mask) << lsb);
+ break;
+ }
+ case ff_dins: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa;
+ uint16_t size = msb - lsb + 1;
+ uint64_t mask = (1ul << size) - 1;
+ if (lsb > msb)
+ alu_out = Unpredictable;
+ else
+ alu_out = (U64(rt) & ~(mask << lsb)) | ((U64(rs) & mask) << lsb);
+ break;
+ }
+ case ff_dinsm: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa;
+ uint16_t size = msb - lsb + 33;
+ uint64_t mask = (1ul << size) - 1;
+ alu_out = (U64(rt) & ~(mask << lsb)) | ((U64(rs) & mask) << lsb);
+ break;
+ }
+ case ff_dinsu: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa + 32;
+ uint16_t size = msb - lsb + 33;
+ uint64_t mask = (1ul << size) - 1;
+ if (sa > msb)
+ alu_out = Unpredictable;
+ else
+ alu_out = (U64(rt) & ~(mask << lsb)) | ((U64(rs) & mask) << lsb);
+ break;
+ }
+ case ff_ext: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa;
+ uint16_t size = msb + 1;
+ uint32_t mask = (1 << size) - 1;
+ if ((lsb + msb) > 31)
+ alu_out = Unpredictable;
+ else
+ alu_out = (U32(rs) & (mask << lsb)) >> lsb;
+ break;
+ }
+ case ff_dext: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa;
+ uint16_t size = msb + 1;
+ uint64_t mask = (1ul << size) - 1;
+ alu_out = (U64(rs) & (mask << lsb)) >> lsb;
+ break;
+ }
+ case ff_dextm: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa;
+ uint16_t size = msb + 33;
+ uint64_t mask = (1ul << size) - 1;
+ if ((lsb + msb + 32 + 1) > 64)
+ alu_out = Unpredictable;
+ else
+ alu_out = (U64(rs) & (mask << lsb)) >> lsb;
+ break;
+ }
+ case ff_dextu: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa + 32;
+ uint16_t size = msb + 1;
+ uint64_t mask = (1ul << size) - 1;
+ if ((lsb + msb + 1) > 64)
+ alu_out = Unpredictable;
+ else
+ alu_out = (U64(rs) & (mask << lsb)) >> lsb;
+ break;
+ }
+ case ff_bshfl: { // Mips32r2 instruction.
+ if (16 == sa) // seb
+ alu_out = I64(I8(rt));
+ else if (24 == sa) // seh
+ alu_out = I64(I16(rt));
+ break;
+ }
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ default:
+ MOZ_CRASH();
+ };
+}
+
+// Handle execution based on instruction types.
+void
+Simulator::decodeTypeRegister(SimInstruction* instr)
+{
+ // Instruction fields.
+ const Opcode op = instr->opcodeFieldRaw();
+ const int32_t rs_reg = instr->rsValue();
+ const int64_t rs = getRegister(rs_reg);
+ const int32_t rt_reg = instr->rtValue();
+ const int64_t rt = getRegister(rt_reg);
+ const int32_t rd_reg = instr->rdValue();
+
+ const int32_t fr_reg = instr->frValue();
+ const int32_t fs_reg = instr->fsValue();
+ const int32_t ft_reg = instr->ftValue();
+ const int32_t fd_reg = instr->fdValue();
+ __int128 i128hilo = 0;
+ unsigned __int128 u128hilo = 0;
+
+ // ALU output.
+ // It should not be used as is. Instructions using it should always
+ // initialize it first.
+ int64_t alu_out = 0x12345678;
+
+ // For break and trap instructions.
+ bool do_interrupt = false;
+
+ // For jr and jalr.
+ // Get current pc.
+ int64_t current_pc = get_pc();
+ // Next pc
+ int64_t next_pc = 0;
+ int32_t return_addr_reg = 31;
+
+ // Set up the variables if needed before executing the instruction.
+ configureTypeRegister(instr,
+ alu_out,
+ i128hilo,
+ u128hilo,
+ next_pc,
+ return_addr_reg,
+ do_interrupt);
+
+ // ---------- Raise exceptions triggered.
+ signalExceptions();
+
+ // ---------- Execution.
+ switch (op) {
+ case op_cop1:
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ MOZ_CRASH();
+ break;
+ case rs_cfc1:
+ setRegister(rt_reg, alu_out);
+ case rs_mfc1:
+ setRegister(rt_reg, alu_out);
+ break;
+ case rs_dmfc1:
+ setRegister(rt_reg, alu_out);
+ break;
+ case rs_mfhc1:
+ setRegister(rt_reg, alu_out);
+ break;
+ case rs_ctc1:
+ // At the moment only FCSR is supported.
+ MOZ_ASSERT(fs_reg == kFCSRRegister);
+ FCSR_ = registers_[rt_reg];
+ break;
+ case rs_mtc1:
+ setFpuRegisterLo(fs_reg, registers_[rt_reg]);
+ break;
+ case rs_dmtc1:
+ setFpuRegister(fs_reg, registers_[rt_reg]);
+ break;
+ case rs_mthc1:
+ setFpuRegisterHi(fs_reg, registers_[rt_reg]);
+ break;
+ case rs_s:
+ float f, ft_value, fs_value;
+ uint32_t cc, fcsr_cc;
+ int64_t i64;
+ fs_value = getFpuRegisterFloat(fs_reg);
+ ft_value = getFpuRegisterFloat(ft_reg);
+ cc = instr->fcccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ switch (instr->functionFieldRaw()) {
+ case ff_add_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value + ft_value);
+ break;
+ case ff_sub_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value - ft_value);
+ break;
+ case ff_mul_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value * ft_value);
+ break;
+ case ff_div_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value / ft_value);
+ break;
+ case ff_abs_fmt:
+ setFpuRegisterFloat(fd_reg, fabsf(fs_value));
+ break;
+ case ff_mov_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value);
+ break;
+ case ff_neg_fmt:
+ setFpuRegisterFloat(fd_reg, -fs_value);
+ break;
+ case ff_sqrt_fmt:
+ setFpuRegisterFloat(fd_reg, sqrtf(fs_value));
+ break;
+ case ff_c_un_fmt:
+ setFCSRBit(fcsr_cc, mozilla::IsNaN(fs_value) || mozilla::IsNaN(ft_value));
+ break;
+ case ff_c_eq_fmt:
+ setFCSRBit(fcsr_cc, (fs_value == ft_value));
+ break;
+ case ff_c_ueq_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value == ft_value) || (mozilla::IsNaN(fs_value) || mozilla::IsNaN(ft_value)));
+ break;
+ case ff_c_olt_fmt:
+ setFCSRBit(fcsr_cc, (fs_value < ft_value));
+ break;
+ case ff_c_ult_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value < ft_value) || (mozilla::IsNaN(fs_value) || mozilla::IsNaN(ft_value)));
+ break;
+ case ff_c_ole_fmt:
+ setFCSRBit(fcsr_cc, (fs_value <= ft_value));
+ break;
+ case ff_c_ule_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value <= ft_value) || (mozilla::IsNaN(fs_value) || mozilla::IsNaN(ft_value)));
+ break;
+ case ff_cvt_d_fmt:
+ f = getFpuRegisterFloat(fs_reg);
+ setFpuRegisterDouble(fd_reg, static_cast<double>(f));
+ break;
+ case ff_cvt_w_fmt: // Convert float to word.
+ // Rounding modes are not yet supported.
+ MOZ_ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ case ff_round_w_fmt: { // Round double to word (round half to even).
+ float rounded = std::floor(fs_value + 0.5);
+ int32_t result = I32(rounded);
+ if ((result & 1) != 0 && result - fs_value == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_trunc_w_fmt: { // Truncate float to word (round towards 0).
+ float rounded = truncf(fs_value);
+ int32_t result = I32(rounded);
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_floor_w_fmt: { // Round float to word towards negative infinity.
+ float rounded = std::floor(fs_value);
+ int32_t result = I32(rounded);
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_ceil_w_fmt: { // Round double to word towards positive infinity.
+ float rounded = std::ceil(fs_value);
+ int32_t result = I32(rounded);
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_cvt_l_fmt: { // Mips64r2: Truncate float to 64-bit long-word.
+ float rounded = truncf(fs_value);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ break;
+ }
+ case ff_round_l_fmt: { // Mips64r2 instruction.
+ float rounded =
+ fs_value > 0 ? std::floor(fs_value + 0.5) : std::ceil(fs_value - 0.5);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ break;
+ }
+ case ff_trunc_l_fmt: { // Mips64r2 instruction.
+ float rounded = truncf(fs_value);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ break;
+ }
+ case ff_floor_l_fmt: // Mips64r2 instruction.
+ i64 = I64(std::floor(fs_value));
+ setFpuRegister(fd_reg, i64);
+ break;
+ case ff_ceil_l_fmt: // Mips64r2 instruction.
+ i64 = I64(std::ceil(fs_value));
+ setFpuRegister(fd_reg, i64);
+ break;
+ case ff_cvt_ps_s:
+ case ff_c_f_fmt:
+ MOZ_CRASH();
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_d:
+ double dt_value, ds_value;
+ ds_value = getFpuRegisterDouble(fs_reg);
+ dt_value = getFpuRegisterDouble(ft_reg);
+ cc = instr->fcccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ switch (instr->functionFieldRaw()) {
+ case ff_add_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value + dt_value);
+ break;
+ case ff_sub_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value - dt_value);
+ break;
+ case ff_mul_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value * dt_value);
+ break;
+ case ff_div_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value / dt_value);
+ break;
+ case ff_abs_fmt:
+ setFpuRegisterDouble(fd_reg, fabs(ds_value));
+ break;
+ case ff_mov_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value);
+ break;
+ case ff_neg_fmt:
+ setFpuRegisterDouble(fd_reg, -ds_value);
+ break;
+ case ff_sqrt_fmt:
+ setFpuRegisterDouble(fd_reg, sqrt(ds_value));
+ break;
+ case ff_c_un_fmt:
+ setFCSRBit(fcsr_cc, mozilla::IsNaN(ds_value) || mozilla::IsNaN(dt_value));
+ break;
+ case ff_c_eq_fmt:
+ setFCSRBit(fcsr_cc, (ds_value == dt_value));
+ break;
+ case ff_c_ueq_fmt:
+ setFCSRBit(fcsr_cc,
+ (ds_value == dt_value) || (mozilla::IsNaN(ds_value) || mozilla::IsNaN(dt_value)));
+ break;
+ case ff_c_olt_fmt:
+ setFCSRBit(fcsr_cc, (ds_value < dt_value));
+ break;
+ case ff_c_ult_fmt:
+ setFCSRBit(fcsr_cc,
+ (ds_value < dt_value) || (mozilla::IsNaN(ds_value) || mozilla::IsNaN(dt_value)));
+ break;
+ case ff_c_ole_fmt:
+ setFCSRBit(fcsr_cc, (ds_value <= dt_value));
+ break;
+ case ff_c_ule_fmt:
+ setFCSRBit(fcsr_cc,
+ (ds_value <= dt_value) || (mozilla::IsNaN(ds_value) || mozilla::IsNaN(dt_value)));
+ break;
+ case ff_cvt_w_fmt: // Convert double to word.
+ // Rounding modes are not yet supported.
+ MOZ_ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ case ff_round_w_fmt: { // Round double to word (round half to even).
+ double rounded = std::floor(ds_value + 0.5);
+ int32_t result = I32(rounded);
+ if ((result & 1) != 0 && result - ds_value == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded))
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ break;
+ }
+ case ff_trunc_w_fmt: { // Truncate double to word (round towards 0).
+ double rounded = trunc(ds_value);
+ int32_t result = I32(rounded);
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded))
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ break;
+ }
+ case ff_floor_w_fmt: { // Round double to word towards negative infinity.
+ double rounded = std::floor(ds_value);
+ int32_t result = I32(rounded);
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded))
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ break;
+ }
+ case ff_ceil_w_fmt: { // Round double to word towards positive infinity.
+ double rounded = std::ceil(ds_value);
+ int32_t result = I32(rounded);
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded))
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ break;
+ }
+ case ff_cvt_s_fmt: // Convert double to float (single).
+ setFpuRegisterFloat(fd_reg, static_cast<float>(ds_value));
+ break;
+ case ff_cvt_l_fmt: { // Mips64r2: Truncate double to 64-bit long-word.
+ double rounded = trunc(ds_value);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ break;
+ }
+ case ff_trunc_l_fmt: { // Mips64r2 instruction.
+ double rounded = trunc(ds_value);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ break;
+ }
+ case ff_round_l_fmt: { // Mips64r2 instruction.
+ double rounded =
+ ds_value > 0 ? std::floor(ds_value + 0.5) : std::ceil(ds_value - 0.5);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ break;
+ }
+ case ff_floor_l_fmt: // Mips64r2 instruction.
+ i64 = I64(std::floor(ds_value));
+ setFpuRegister(fd_reg, i64);
+ break;
+ case ff_ceil_l_fmt: // Mips64r2 instruction.
+ i64 = I64(std::ceil(ds_value));
+ setFpuRegister(fd_reg, i64);
+ break;
+ case ff_c_f_fmt:
+ MOZ_CRASH();
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_w:
+ switch (instr->functionFieldRaw()) {
+ case ff_cvt_s_fmt: // Convert word to float (single).
+ i64 = getFpuRegisterLo(fs_reg);
+ setFpuRegisterFloat(fd_reg, static_cast<float>(i64));
+ break;
+ case ff_cvt_d_fmt: // Convert word to double.
+ i64 = getFpuRegisterLo(fs_reg);
+ setFpuRegisterDouble(fd_reg, static_cast<double>(i64));
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ case rs_l:
+ switch (instr->functionFieldRaw()) {
+ case ff_cvt_d_fmt: // Mips64r2 instruction.
+ i64 = getFpuRegister(fs_reg);
+ setFpuRegisterDouble(fd_reg, static_cast<double>(i64));
+ break;
+ case ff_cvt_s_fmt:
+ MOZ_CRASH();
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_ps:
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ case op_cop1x:
+ switch (instr->functionFieldRaw()) {
+ case ff_madd_s:
+ float fr, ft, fs;
+ fr = getFpuRegisterFloat(fr_reg);
+ fs = getFpuRegisterFloat(fs_reg);
+ ft = getFpuRegisterFloat(ft_reg);
+ setFpuRegisterFloat(fd_reg, fs * ft + fr);
+ break;
+ case ff_madd_d:
+ double dr, dt, ds;
+ dr = getFpuRegisterDouble(fr_reg);
+ ds = getFpuRegisterDouble(fs_reg);
+ dt = getFpuRegisterDouble(ft_reg);
+ setFpuRegisterDouble(fd_reg, ds * dt + dr);
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ case op_special:
+ switch (instr->functionFieldRaw()) {
+ case ff_jr: {
+ SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>(
+ current_pc + SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ case ff_jalr: {
+ SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>(
+ current_pc + SimInstruction::kInstrSize);
+ setRegister(return_addr_reg, current_pc + 2 * SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ // Instructions using HI and LO registers.
+ case ff_mult:
+ setRegister(LO, I32(i128hilo & 0xffffffff));
+ setRegister(HI, I32(i128hilo >> 32));
+ break;
+ case ff_dmult:
+ setRegister(LO, I64(i128hilo & 0xfffffffffffffffful));
+ setRegister(HI, I64(i128hilo >> 64));
+ break;
+ case ff_multu:
+ setRegister(LO, I32(u128hilo & 0xffffffff));
+ setRegister(HI, I32(u128hilo >> 32));
+ break;
+ case ff_dmultu:
+ setRegister(LO, I64(u128hilo & 0xfffffffffffffffful));
+ setRegister(HI, I64(u128hilo >> 64));
+ break;
+ case ff_div:
+ case ff_divu:
+ // Divide by zero and overflow was not checked in the configuration
+ // step - div and divu do not raise exceptions. On division by 0
+ // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1),
+ // return INT_MIN which is what the hardware does.
+ setRegister(LO, I32(i128hilo & 0xffffffff));
+ setRegister(HI, I32(i128hilo >> 32));
+ break;
+ case ff_ddiv:
+ case ff_ddivu:
+ // Divide by zero and overflow was not checked in the configuration
+ // step - div and divu do not raise exceptions. On division by 0
+ // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1),
+ // return INT_MIN which is what the hardware does.
+ setRegister(LO, I64(i128hilo & 0xfffffffffffffffful));
+ setRegister(HI, I64(i128hilo >> 64));
+ break;
+ case ff_sync:
+ break;
+ // Break and trap instructions.
+ case ff_break:
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ if (do_interrupt) {
+ softwareInterrupt(instr);
+ }
+ break;
+ // Conditional moves.
+ case ff_movn:
+ if (rt)
+ setRegister(rd_reg, rs);
+ break;
+ case ff_movci: {
+ uint32_t cc = instr->fbccValue();
+ uint32_t fcsr_cc = GetFCSRConditionBit(cc);
+ if (instr->bit(16)) { // Read Tf bit.
+ if (testFCSRBit(fcsr_cc))
+ setRegister(rd_reg, rs);
+ } else {
+ if (!testFCSRBit(fcsr_cc))
+ setRegister(rd_reg, rs);
+ }
+ break;
+ }
+ case ff_movz:
+ if (!rt)
+ setRegister(rd_reg, rs);
+ break;
+ default: // For other special opcodes we do the default operation.
+ setRegister(rd_reg, alu_out);
+ };
+ break;
+ case op_special2:
+ switch (instr->functionFieldRaw()) {
+ case ff_mul:
+ setRegister(rd_reg, alu_out);
+ // HI and LO are UNPREDICTABLE after the operation.
+ setRegister(LO, Unpredictable);
+ setRegister(HI, Unpredictable);
+ break;
+ default: // For other special2 opcodes we do the default operation.
+ setRegister(rd_reg, alu_out);
+ }
+ break;
+ case op_special3:
+ switch (instr->functionFieldRaw()) {
+ case ff_ins:
+ case ff_dins:
+ case ff_dinsm:
+ case ff_dinsu:
+ // Ins instr leaves result in Rt, rather than Rd.
+ setRegister(rt_reg, alu_out);
+ break;
+ case ff_ext:
+ case ff_dext:
+ case ff_dextm:
+ case ff_dextu:
+ // Ext instr leaves result in Rt, rather than Rd.
+ setRegister(rt_reg, alu_out);
+ break;
+ case ff_bshfl:
+ setRegister(rd_reg, alu_out);
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ // Unimplemented opcodes raised an error in the configuration step before,
+ // so we can use the default here to set the destination register in common
+ // cases.
+ default:
+ setRegister(rd_reg, alu_out);
+ };
+}
+
+// Type 2: instructions using a 16 bits immediate. (e.g. addi, beq).
+void
+Simulator::decodeTypeImmediate(SimInstruction* instr)
+{
+ // Instruction fields.
+ Opcode op = instr->opcodeFieldRaw();
+ int64_t rs = getRegister(instr->rsValue());
+ int32_t rt_reg = instr->rtValue(); // Destination register.
+ int64_t rt = getRegister(rt_reg);
+ int16_t imm16 = instr->imm16Value();
+
+ int32_t ft_reg = instr->ftValue(); // Destination register.
+
+ // Zero extended immediate.
+ uint32_t oe_imm16 = 0xffff & imm16;
+ // Sign extended immediate.
+ int32_t se_imm16 = imm16;
+
+ // Get current pc.
+ int64_t current_pc = get_pc();
+ // Next pc.
+ int64_t next_pc = bad_ra;
+
+ // Used for conditional branch instructions.
+ bool do_branch = false;
+ bool execute_branch_delay_instruction = false;
+
+ // Used for arithmetic instructions.
+ int64_t alu_out = 0;
+ // Floating point.
+ double fp_out = 0.0;
+ uint32_t cc, cc_value, fcsr_cc;
+
+ // Used for memory instructions.
+ uint64_t addr = 0x0;
+ // Value to be written in memory.
+ uint64_t mem_value = 0x0;
+ __int128 temp;
+
+ // ---------- Configuration (and execution for op_regimm).
+ switch (op) {
+ // ------------- op_cop1. Coprocessor instructions.
+ case op_cop1:
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ cc = instr->fbccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ cc_value = testFCSRBit(fcsr_cc);
+ do_branch = (instr->fbtrueValue()) ? cc_value : !cc_value;
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch)
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ else
+ next_pc = current_pc + kBranchReturnOffset;
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ // ------------- op_regimm class.
+ case op_regimm:
+ switch (instr->rtFieldRaw()) {
+ case rt_bltz:
+ do_branch = (rs < 0);
+ break;
+ case rt_bltzal:
+ do_branch = rs < 0;
+ break;
+ case rt_bgez:
+ do_branch = rs >= 0;
+ break;
+ case rt_bgezal:
+ do_branch = rs >= 0;
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ switch (instr->rtFieldRaw()) {
+ case rt_bltz:
+ case rt_bltzal:
+ case rt_bgez:
+ case rt_bgezal:
+ // Branch instructions common part.
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ if (instr->isLinkingInstruction())
+ setRegister(31, current_pc + kBranchReturnOffset);
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ default:
+ break;
+ };
+ break; // case op_regimm.
+ // ------------- Branch instructions.
+ // When comparing to zero, the encoding of rt field is always 0, so we don't
+ // need to replace rt with zero.
+ case op_beq:
+ do_branch = (rs == rt);
+ break;
+ case op_bne:
+ do_branch = rs != rt;
+ break;
+ case op_blez:
+ do_branch = rs <= 0;
+ break;
+ case op_bgtz:
+ do_branch = rs > 0;
+ break;
+ // ------------- Arithmetic instructions.
+ case op_addi:
+ alu_out = I32(rs) + se_imm16;
+ if ((alu_out << 32) != (alu_out << 31))
+ exceptions[kIntegerOverflow] = 1;
+ alu_out = I32(alu_out);
+ break;
+ case op_daddi:
+ temp = alu_out = rs + se_imm16;
+ if ((temp << 64) != (temp << 63))
+ exceptions[kIntegerOverflow] = 1;
+ alu_out = I64(temp);
+ break;
+ case op_addiu:
+ alu_out = I32(I32(rs) + se_imm16);
+ break;
+ case op_daddiu:
+ alu_out = rs + se_imm16;
+ break;
+ case op_slti:
+ alu_out = (rs < se_imm16) ? 1 : 0;
+ break;
+ case op_sltiu:
+ alu_out = (U64(rs) < U64(se_imm16)) ? 1 : 0;
+ break;
+ case op_andi:
+ alu_out = rs & oe_imm16;
+ break;
+ case op_ori:
+ alu_out = rs | oe_imm16;
+ break;
+ case op_xori:
+ alu_out = rs ^ oe_imm16;
+ break;
+ case op_lui:
+ alu_out = (se_imm16 << 16);
+ break;
+ // ------------- Memory instructions.
+ case op_lbu:
+ addr = rs + se_imm16;
+ alu_out = readBU(addr, instr);
+ break;
+ case op_lb:
+ addr = rs + se_imm16;
+ alu_out = readB(addr, instr);
+ break;
+ case op_lhu:
+ addr = rs + se_imm16;
+ alu_out = readHU(addr, instr);
+ break;
+ case op_lh:
+ addr = rs + se_imm16;
+ alu_out = readH(addr, instr);
+ break;
+ case op_lwu:
+ addr = rs + se_imm16;
+ alu_out = readWU(addr, instr);
+ break;
+ case op_lw:
+ addr = rs + se_imm16;
+ alu_out = readW(addr, instr);
+ break;
+ case op_lwl: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & 3;
+ uint8_t byte_shift = 3 - al_offset;
+ uint32_t mask = (1 << byte_shift * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readW(addr, instr);
+ alu_out <<= byte_shift * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case op_lwr: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & 3;
+ uint8_t byte_shift = 3 - al_offset;
+ uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readW(addr, instr);
+ alu_out = U32(alu_out) >> al_offset * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case op_ll:
+ addr = rs + se_imm16;
+ alu_out = readW(addr, instr);
+ break;
+ case op_ld:
+ addr = rs + se_imm16;
+ alu_out = readDW(addr, instr);
+ break;
+ case op_ldl: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & 7;
+ uint8_t byte_shift = 7 - al_offset;
+ uint64_t mask = (1ul << byte_shift * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readDW(addr, instr);
+ alu_out <<= byte_shift * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case op_ldr: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & 7;
+ uint8_t byte_shift = 7 - al_offset;
+ uint64_t mask = al_offset ? (~0ul << (byte_shift + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readDW(addr, instr);
+ alu_out = U64(alu_out) >> al_offset * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case op_sb:
+ addr = rs + se_imm16;
+ break;
+ case op_sh:
+ addr = rs + se_imm16;
+ break;
+ case op_sw:
+ addr = rs + se_imm16;
+ break;
+ case op_swl: {
+ uint8_t al_offset = (rs + se_imm16) & 3;
+ uint8_t byte_shift = 3 - al_offset;
+ uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr) & mask;
+ mem_value |= U32(rt) >> byte_shift * 8;
+ break;
+ }
+ case op_swr: {
+ uint8_t al_offset = (rs + se_imm16) & 3;
+ uint32_t mask = (1 << al_offset * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr);
+ mem_value = (rt << al_offset * 8) | (mem_value & mask);
+ break;
+ }
+ case op_sc:
+ addr = rs + se_imm16;
+ break;
+ case op_sd:
+ addr = rs + se_imm16;
+ break;
+ case op_sdl: {
+ uint8_t al_offset = (rs + se_imm16) & 7;
+ uint8_t byte_shift = 7 - al_offset;
+ uint64_t mask = byte_shift ? (~0ul << (al_offset + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr) & mask;
+ mem_value |= U64(rt) >> byte_shift * 8;
+ break;
+ }
+ case op_sdr: {
+ uint8_t al_offset = (rs + se_imm16) & 7;
+ uint64_t mask = (1ul << al_offset * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr);
+ mem_value = (rt << al_offset * 8) | (mem_value & mask);
+ break;
+ }
+ case op_lwc1:
+ addr = rs + se_imm16;
+ alu_out = readW(addr, instr);
+ break;
+ case op_ldc1:
+ addr = rs + se_imm16;
+ fp_out = readD(addr, instr);
+ break;
+ case op_swc1:
+ case op_sdc1:
+ addr = rs + se_imm16;
+ break;
+ default:
+ MOZ_CRASH();
+ };
+
+ // ---------- Raise exceptions triggered.
+ signalExceptions();
+
+ // ---------- Execution.
+ switch (op) {
+ // ------------- Branch instructions.
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ // Branch instructions common part.
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ if (instr->isLinkingInstruction()) {
+ setRegister(31, current_pc + 2 * SimInstruction::kInstrSize);
+ }
+ } else {
+ next_pc = current_pc + 2 * SimInstruction::kInstrSize;
+ }
+ break;
+ // ------------- Arithmetic instructions.
+ case op_addi:
+ case op_daddi:
+ case op_addiu:
+ case op_daddiu:
+ case op_slti:
+ case op_sltiu:
+ case op_andi:
+ case op_ori:
+ case op_xori:
+ case op_lui:
+ setRegister(rt_reg, alu_out);
+ break;
+ // ------------- Memory instructions.
+ case op_lbu:
+ case op_lb:
+ case op_lhu:
+ case op_lh:
+ case op_lwu:
+ case op_lw:
+ case op_lwl:
+ case op_lwr:
+ case op_ll:
+ case op_ld:
+ case op_ldl:
+ case op_ldr:
+ setRegister(rt_reg, alu_out);
+ break;
+ case op_sb:
+ writeB(addr, I8(rt), instr);
+ break;
+ case op_sh:
+ writeH(addr, U16(rt), instr);
+ break;
+ case op_sw:
+ writeW(addr, I32(rt), instr);
+ break;
+ case op_swl:
+ writeW(addr, I32(mem_value), instr);
+ break;
+ case op_swr:
+ writeW(addr, I32(mem_value), instr);
+ break;
+ case op_sc:
+ writeW(addr, I32(rt), instr);
+ setRegister(rt_reg, 1);
+ break;
+ case op_sd:
+ writeDW(addr, rt, instr);
+ break;
+ case op_sdl:
+ writeDW(addr, mem_value, instr);
+ break;
+ case op_sdr:
+ writeDW(addr, mem_value, instr);
+ break;
+ case op_lwc1:
+ setFpuRegisterLo(ft_reg, alu_out);
+ break;
+ case op_ldc1:
+ setFpuRegisterDouble(ft_reg, fp_out);
+ break;
+ case op_swc1:
+ writeW(addr, getFpuRegisterLo(ft_reg), instr);
+ break;
+ case op_sdc1:
+ writeD(addr, getFpuRegisterDouble(ft_reg), instr);
+ break;
+ default:
+ break;
+ };
+
+
+ if (execute_branch_delay_instruction) {
+ // Execute branch delay slot
+ // We don't check for end_sim_pc. First it should not be met as the current
+ // pc is valid. Secondly a jump should always execute its branch delay slot.
+ SimInstruction* branch_delay_instr =
+ reinterpret_cast<SimInstruction*>(current_pc + SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ }
+
+ // If needed update pc after the branch delay execution.
+ if (next_pc != bad_ra)
+ set_pc(next_pc);
+}
+
+// Type 3: instructions using a 26 bits immediate. (e.g. j, jal).
+void
+Simulator::decodeTypeJump(SimInstruction* instr)
+{
+ // Get current pc.
+ int64_t current_pc = get_pc();
+ // Get unchanged bits of pc.
+ int64_t pc_high_bits = current_pc & 0xfffffffff0000000ul;
+ // Next pc.
+ int64_t next_pc = pc_high_bits | (instr->imm26Value() << 2);
+
+ // Execute branch delay slot.
+ // We don't check for end_sim_pc. First it should not be met as the current pc
+ // is valid. Secondly a jump should always execute its branch delay slot.
+ SimInstruction* branch_delay_instr =
+ reinterpret_cast<SimInstruction*>(current_pc + SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+
+ // Update pc and ra if necessary.
+ // Do this after the branch delay execution.
+ if (instr->isLinkingInstruction())
+ setRegister(31, current_pc + 2 * SimInstruction::kInstrSize);
+ set_pc(next_pc);
+ pc_modified_ = true;
+}
+
+// Executes the current instruction.
+void
+Simulator::instructionDecode(SimInstruction* instr)
+{
+ if (Simulator::ICacheCheckingEnabled) {
+ AutoLockSimulatorCache als(this);
+ CheckICacheLocked(icache(), instr);
+ }
+ pc_modified_ = false;
+
+ switch (instr->instructionType()) {
+ case SimInstruction::kRegisterType:
+ decodeTypeRegister(instr);
+ break;
+ case SimInstruction::kImmediateType:
+ decodeTypeImmediate(instr);
+ break;
+ case SimInstruction::kJumpType:
+ decodeTypeJump(instr);
+ break;
+ default:
+ UNSUPPORTED();
+ }
+ if (!pc_modified_)
+ setRegister(pc, reinterpret_cast<int64_t>(instr) + SimInstruction::kInstrSize);
+}
+
+void
+Simulator::branchDelayInstructionDecode(SimInstruction* instr)
+{
+ if (single_stepping_)
+ single_step_callback_(single_step_callback_arg_, this, (void*)instr);
+
+ if (instr->instructionBits() == NopInst) {
+ // Short-cut generic nop instructions. They are always valid and they
+ // never change the simulator state.
+ return;
+ }
+
+ if (instr->isForbiddenInBranchDelay()) {
+ MOZ_CRASH("Eror:Unexpected opcode in a branch delay slot.");
+ }
+ instructionDecode(instr);
+}
+
+void
+Simulator::enable_single_stepping(SingleStepCallback cb, void* arg)
+{
+ single_stepping_ = true;
+ single_step_callback_ = cb;
+ single_step_callback_arg_ = arg;
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+}
+
+void
+Simulator::disable_single_stepping()
+{
+ if (!single_stepping_)
+ return;
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+}
+
+template<bool enableStopSimAt>
+void
+Simulator::execute()
+{
+ if (single_stepping_)
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ int64_t program_counter = get_pc();
+ WasmActivation* activation = TlsPerThreadData.get()->runtimeFromMainThread()->wasmActivationStack();
+
+ while (program_counter != end_sim_pc) {
+ if (enableStopSimAt && (icount_ == Simulator::StopSimAt)) {
+ MipsDebugger dbg(this);
+ dbg.debug();
+ } else {
+ if (single_stepping_)
+ single_step_callback_(single_step_callback_arg_, this, (void*)program_counter);
+ SimInstruction* instr = reinterpret_cast<SimInstruction *>(program_counter);
+ instructionDecode(instr);
+ icount_++;
+
+ int64_t rpc = resume_pc_;
+ if (MOZ_UNLIKELY(rpc != 0)) {
+ // wasm signal handler ran and we have to adjust the pc.
+ activation->setResumePC((void*)get_pc());
+ set_pc(rpc);
+ resume_pc_ = 0;
+ }
+ }
+ program_counter = get_pc();
+ }
+
+ if (single_stepping_)
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+}
+
+void
+Simulator::callInternal(uint8_t* entry)
+{
+ // Prepare to execute the code at entry.
+ setRegister(pc, reinterpret_cast<int64_t>(entry));
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ setRegister(ra, end_sim_pc);
+
+ // Remember the values of callee-saved registers.
+ // The code below assumes that r9 is not used as sb (static base) in
+ // simulator code and therefore is regarded as a callee-saved register.
+ int64_t s0_val = getRegister(s0);
+ int64_t s1_val = getRegister(s1);
+ int64_t s2_val = getRegister(s2);
+ int64_t s3_val = getRegister(s3);
+ int64_t s4_val = getRegister(s4);
+ int64_t s5_val = getRegister(s5);
+ int64_t s6_val = getRegister(s6);
+ int64_t s7_val = getRegister(s7);
+ int64_t gp_val = getRegister(gp);
+ int64_t sp_val = getRegister(sp);
+ int64_t fp_val = getRegister(fp);
+
+ // Set up the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ int64_t callee_saved_value = icount_;
+ setRegister(s0, callee_saved_value);
+ setRegister(s1, callee_saved_value);
+ setRegister(s2, callee_saved_value);
+ setRegister(s3, callee_saved_value);
+ setRegister(s4, callee_saved_value);
+ setRegister(s5, callee_saved_value);
+ setRegister(s6, callee_saved_value);
+ setRegister(s7, callee_saved_value);
+ setRegister(gp, callee_saved_value);
+ setRegister(fp, callee_saved_value);
+
+ // Start the simulation.
+ if (Simulator::StopSimAt != -1)
+ execute<true>();
+ else
+ execute<false>();
+
+ // Check that the callee-saved registers have been preserved.
+ MOZ_ASSERT(callee_saved_value == getRegister(s0));
+ MOZ_ASSERT(callee_saved_value == getRegister(s1));
+ MOZ_ASSERT(callee_saved_value == getRegister(s2));
+ MOZ_ASSERT(callee_saved_value == getRegister(s3));
+ MOZ_ASSERT(callee_saved_value == getRegister(s4));
+ MOZ_ASSERT(callee_saved_value == getRegister(s5));
+ MOZ_ASSERT(callee_saved_value == getRegister(s6));
+ MOZ_ASSERT(callee_saved_value == getRegister(s7));
+ MOZ_ASSERT(callee_saved_value == getRegister(gp));
+ MOZ_ASSERT(callee_saved_value == getRegister(fp));
+
+ // Restore callee-saved registers with the original value.
+ setRegister(s0, s0_val);
+ setRegister(s1, s1_val);
+ setRegister(s2, s2_val);
+ setRegister(s3, s3_val);
+ setRegister(s4, s4_val);
+ setRegister(s5, s5_val);
+ setRegister(s6, s6_val);
+ setRegister(s7, s7_val);
+ setRegister(gp, gp_val);
+ setRegister(sp, sp_val);
+ setRegister(fp, fp_val);
+}
+
+int64_t
+Simulator::call(uint8_t* entry, int argument_count, ...)
+{
+ va_list parameters;
+ va_start(parameters, argument_count);
+
+ int64_t original_stack = getRegister(sp);
+ // Compute position of stack on entry to generated code.
+ int64_t entry_stack = original_stack;
+ if (argument_count > kCArgSlotCount)
+ entry_stack = entry_stack - argument_count * sizeof(int64_t);
+ else
+ entry_stack = entry_stack - kCArgsSlotsSize;
+
+ entry_stack &= ~U64(ABIStackAlignment - 1);
+
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+
+ // Setup the arguments.
+ for (int i = 0; i < argument_count; i++) {
+ js::jit::Register argReg;
+ if (GetIntArgReg(i, &argReg))
+ setRegister(argReg.code(), va_arg(parameters, int64_t));
+ else
+ stack_argument[i] = va_arg(parameters, int64_t);
+ }
+
+ va_end(parameters);
+ setRegister(sp, entry_stack);
+
+ callInternal(entry);
+
+ // Pop stack passed arguments.
+ MOZ_ASSERT(entry_stack == getRegister(sp));
+ setRegister(sp, original_stack);
+
+ int64_t result = getRegister(v0);
+ return result;
+}
+
+uintptr_t
+Simulator::pushAddress(uintptr_t address)
+{
+ int new_sp = getRegister(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ setRegister(sp, new_sp);
+ return new_sp;
+}
+
+uintptr_t
+Simulator::popAddress()
+{
+ int current_sp = getRegister(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ setRegister(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+} // namespace jit
+} // namespace js
+
+js::jit::Simulator*
+JSRuntime::simulator() const
+{
+ return simulator_;
+}
+
+js::jit::Simulator*
+js::PerThreadData::simulator() const
+{
+ return runtime_->simulator();
+}
+
+uintptr_t*
+JSRuntime::addressOfSimulatorStackLimit()
+{
+ return simulator_->addressOfStackLimit();
+}
diff --git a/js/src/jit/mips64/Simulator-mips64.h b/js/src/jit/mips64/Simulator-mips64.h
new file mode 100644
index 000000000..de1930c91
--- /dev/null
+++ b/js/src/jit/mips64/Simulator-mips64.h
@@ -0,0 +1,440 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99: */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef jit_mips64_Simulator_mips64_h
+#define jit_mips64_Simulator_mips64_h
+
+#ifdef JS_SIMULATOR_MIPS64
+
+#include "jit/IonTypes.h"
+#include "threading/Thread.h"
+#include "vm/MutexIDs.h"
+
+namespace js {
+namespace jit {
+
+class Simulator;
+class Redirection;
+class CachePage;
+class AutoLockSimulator;
+
+// When the SingleStepCallback is called, the simulator is about to execute
+// sim->get_pc() and the current machine state represents the completed
+// execution of the previous pc.
+typedef void (*SingleStepCallback)(void* arg, Simulator* sim, void* pc);
+
+const intptr_t kPointerAlignment = 8;
+const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
+
+const intptr_t kDoubleAlignment = 8;
+const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
+
+
+// Number of general purpose registers.
+const int kNumRegisters = 32;
+
+// In the simulator, the PC register is simulated as the 34th register.
+const int kPCRegister = 34;
+
+// Number coprocessor registers.
+const int kNumFPURegisters = 32;
+
+// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
+const int kFCSRRegister = 31;
+const int kInvalidFPUControlRegister = -1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+
+// FCSR constants.
+const uint32_t kFCSRInexactFlagBit = 2;
+const uint32_t kFCSRUnderflowFlagBit = 3;
+const uint32_t kFCSROverflowFlagBit = 4;
+const uint32_t kFCSRDivideByZeroFlagBit = 5;
+const uint32_t kFCSRInvalidOpFlagBit = 6;
+
+const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+
+const uint32_t kFCSRFlagMask =
+ kFCSRInexactFlagMask |
+ kFCSRUnderflowFlagMask |
+ kFCSROverflowFlagMask |
+ kFCSRDivideByZeroFlagMask |
+ kFCSRInvalidOpFlagMask;
+
+const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
+
+// On MIPS64 Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+// the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+// instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+// debugger.
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+typedef uint32_t Instr;
+class SimInstruction;
+
+class Simulator {
+ friend class Redirection;
+ friend class MipsDebugger;
+ friend class AutoLockSimulatorCache;
+ public:
+
+ // Registers are declared in order. See "See MIPS Run Linux" chapter 2.
+ enum Register {
+ no_reg = -1,
+ zero_reg = 0,
+ at,
+ v0, v1,
+ a0, a1, a2, a3, a4, a5, a6, a7,
+ t0, t1, t2, t3,
+ s0, s1, s2, s3, s4, s5, s6, s7,
+ t8, t9,
+ k0, k1,
+ gp,
+ sp,
+ s8,
+ ra,
+ // LO, HI, and pc.
+ LO,
+ HI,
+ pc, // pc must be the last register.
+ kNumSimuRegisters,
+ // aliases
+ fp = s8
+ };
+
+ // Coprocessor registers.
+ enum FPURegister {
+ f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
+ f12, f13, f14, f15, f16, f17, f18, f19, f20, f21,
+ f22, f23, f24, f25, f26, f27, f28, f29, f30, f31,
+ kNumFPURegisters
+ };
+
+ // Returns nullptr on OOM.
+ static Simulator* Create(JSContext* cx);
+
+ static void Destroy(Simulator* simulator);
+
+ // Constructor/destructor are for internal use only; use the static methods above.
+ Simulator();
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ static Simulator* Current();
+
+ static inline uintptr_t StackLimit() {
+ return Simulator::Current()->stackLimit();
+ }
+
+ uintptr_t* addressOfStackLimit();
+
+ // Accessors for register state. Reading the pc value adheres to the MIPS
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void setRegister(int reg, int64_t value);
+ int64_t getRegister(int reg) const;
+ // Same for FPURegisters.
+ void setFpuRegister(int fpureg, int64_t value);
+ void setFpuRegisterLo(int fpureg, int32_t value);
+ void setFpuRegisterHi(int fpureg, int32_t value);
+ void setFpuRegisterFloat(int fpureg, float value);
+ void setFpuRegisterDouble(int fpureg, double value);
+ int64_t getFpuRegister(int fpureg) const;
+ int32_t getFpuRegisterLo(int fpureg) const;
+ int32_t getFpuRegisterHi(int fpureg) const;
+ float getFpuRegisterFloat(int fpureg) const;
+ double getFpuRegisterDouble(int fpureg) const;
+ void setFCSRBit(uint32_t cc, bool value);
+ bool testFCSRBit(uint32_t cc);
+ bool setFCSRRoundError(double original, double rounded);
+
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(int64_t value);
+ int64_t get_pc() const;
+
+ template <typename T>
+ T get_pc_as() const { return reinterpret_cast<T>(get_pc()); }
+
+ void set_resume_pc(void* value) {
+ resume_pc_ = int64_t(value);
+ }
+
+ void enable_single_stepping(SingleStepCallback cb, void* arg);
+ void disable_single_stepping();
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t stackLimit() const;
+ bool overRecursed(uintptr_t newsp = 0) const;
+ bool overRecursedWithExtra(uint32_t extra) const;
+
+ // Executes MIPS instructions until the PC reaches end_sim_pc.
+ template<bool enableStopSimAt>
+ void execute();
+
+ // Sets up the simulator state and grabs the result on return.
+ int64_t call(uint8_t* entry, int argument_count, ...);
+
+ // Push an address onto the JS stack.
+ uintptr_t pushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t popAddress();
+
+ // Debugger input.
+ void setLastDebuggerInput(char* input);
+ char* lastDebuggerInput() { return lastDebuggerInput_; }
+ // ICache checking.
+ static void FlushICache(void* start, size_t size);
+
+ // Returns true if pc register contains one of the 'SpecialValues' defined
+ // below (bad_ra, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum SpecialValues {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_ra = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the ra is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2,
+ // Unpredictable value.
+ Unpredictable = 0xbadbeaf
+ };
+
+ bool init();
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void format(SimInstruction* instr, const char* format);
+
+ // Read and write memory.
+ inline uint8_t readBU(uint64_t addr, SimInstruction* instr);
+ inline int8_t readB(uint64_t addr, SimInstruction* instr);
+ inline void writeB(uint64_t addr, uint8_t value, SimInstruction* instr);
+ inline void writeB(uint64_t addr, int8_t value, SimInstruction* instr);
+
+ inline uint16_t readHU(uint64_t addr, SimInstruction* instr);
+ inline int16_t readH(uint64_t addr, SimInstruction* instr);
+ inline void writeH(uint64_t addr, uint16_t value, SimInstruction* instr);
+ inline void writeH(uint64_t addr, int16_t value, SimInstruction* instr);
+
+ inline uint32_t readWU(uint64_t addr, SimInstruction* instr);
+ inline int32_t readW(uint64_t addr, SimInstruction* instr);
+ inline void writeW(uint64_t addr, uint32_t value, SimInstruction* instr);
+ inline void writeW(uint64_t addr, int32_t value, SimInstruction* instr);
+
+ inline int64_t readDW(uint64_t addr, SimInstruction* instr);
+ inline int64_t readDWL(uint64_t addr, SimInstruction* instr);
+ inline int64_t readDWR(uint64_t addr, SimInstruction* instr);
+ inline void writeDW(uint64_t addr, int64_t value, SimInstruction* instr);
+
+ inline double readD(uint64_t addr, SimInstruction* instr);
+ inline void writeD(uint64_t addr, double value, SimInstruction* instr);
+
+ // Helper function for decodeTypeRegister.
+ void configureTypeRegister(SimInstruction* instr,
+ int64_t& alu_out,
+ __int128& i128hilo,
+ unsigned __int128& u128hilo,
+ int64_t& next_pc,
+ int32_t& return_addr_reg,
+ bool& do_interrupt);
+
+ // Executing is handled based on the instruction type.
+ void decodeTypeRegister(SimInstruction* instr);
+ void decodeTypeImmediate(SimInstruction* instr);
+ void decodeTypeJump(SimInstruction* instr);
+
+ // Used for breakpoints and traps.
+ void softwareInterrupt(SimInstruction* instr);
+
+ // Stop helper functions.
+ bool isWatchpoint(uint32_t code);
+ void printWatchpoint(uint32_t code);
+ void handleStop(uint32_t code, SimInstruction* instr);
+ bool isStopInstruction(SimInstruction* instr);
+ bool isEnabledStop(uint32_t code);
+ void enableStop(uint32_t code);
+ void disableStop(uint32_t code);
+ void increaseStopCounter(uint32_t code);
+ void printStopInfo(uint32_t code);
+
+
+ // Executes one instruction.
+ void instructionDecode(SimInstruction* instr);
+ // Execute one instruction placed in a branch delay slot.
+ void branchDelayInstructionDecode(SimInstruction* instr);
+
+ public:
+ static bool ICacheCheckingEnabled;
+
+ static int64_t StopSimAt;
+
+ // Runtime call support.
+ static void* RedirectNativeFunction(void* nativeFunction, ABIFunctionType type);
+
+ private:
+ enum Exception {
+ kNone,
+ kIntegerOverflow,
+ kIntegerUnderflow,
+ kDivideByZero,
+ kNumExceptions
+ };
+ int16_t exceptions[kNumExceptions];
+
+ // Exceptions.
+ void signalExceptions();
+
+ // Handle return value for runtime FP functions.
+ void setCallResultDouble(double result);
+ void setCallResultFloat(float result);
+ void setCallResult(int64_t res);
+ void setCallResult(__int128 res);
+
+ void callInternal(uint8_t* entry);
+
+ // Architecture state.
+ // Registers.
+ int64_t registers_[kNumSimuRegisters];
+ // Coprocessor Registers.
+ int64_t FPUregisters_[kNumFPURegisters];
+ // FPU control register.
+ uint32_t FCSR_;
+
+ // Simulator support.
+ char* stack_;
+ uintptr_t stackLimit_;
+ bool pc_modified_;
+ int64_t icount_;
+ int64_t break_count_;
+
+ int64_t resume_pc_;
+
+ // Debugger input.
+ char* lastDebuggerInput_;
+
+ // Registered breakpoints.
+ SimInstruction* break_pc_;
+ Instr break_instr_;
+
+ // Single-stepping support
+ bool single_stepping_;
+ SingleStepCallback single_step_callback_;
+ void* single_step_callback_arg_;
+
+ // A stop is watched if its code is less than kNumOfWatchedStops.
+ // Only watched stops support enabling/disabling and the counter feature.
+ static const uint32_t kNumOfWatchedStops = 256;
+
+
+ // Stop is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1U << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watchedStops_[code].count is unset.
+ // The value watchedStops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count_;
+ char* desc_;
+ };
+ StopCountAndDesc watchedStops_[kNumOfWatchedStops];
+
+ private:
+ // ICache checking.
+ struct ICacheHasher {
+ typedef void* Key;
+ typedef void* Lookup;
+ static HashNumber hash(const Lookup& l);
+ static bool match(const Key& k, const Lookup& l);
+ };
+
+ public:
+ typedef HashMap<void*, CachePage*, ICacheHasher, SystemAllocPolicy> ICacheMap;
+
+ private:
+ // This lock creates a critical section around 'redirection_' and
+ // 'icache_', which are referenced both by the execution engine
+ // and by the off-thread compiler (see Redirection::Get in the cpp file).
+ Mutex cacheLock_;
+#ifdef DEBUG
+ mozilla::Maybe<Thread::Id> cacheLockHolder_;
+#endif
+
+ Redirection* redirection_;
+ ICacheMap icache_;
+
+ public:
+ ICacheMap& icache() {
+ // Technically we need the lock to access the innards of the
+ // icache, not to take its address, but the latter condition
+ // serves as a useful complement to the former.
+ MOZ_ASSERT(cacheLockHolder_.isSome());
+ return icache_;
+ }
+
+ Redirection* redirection() const {
+ MOZ_ASSERT(cacheLockHolder_.isSome());
+ return redirection_;
+ }
+
+ void setRedirection(js::jit::Redirection* redirection) {
+ MOZ_ASSERT(cacheLockHolder_.isSome());
+ redirection_ = redirection;
+ }
+};
+
+#define JS_CHECK_SIMULATOR_RECURSION_WITH_EXTRA(cx, extra, onerror) \
+ JS_BEGIN_MACRO \
+ if (cx->mainThread().simulator()->overRecursedWithExtra(extra)) { \
+ js::ReportOverRecursed(cx); \
+ onerror; \
+ } \
+ JS_END_MACRO
+
+} // namespace jit
+} // namespace js
+
+#endif /* JS_SIMULATOR_MIPS64 */
+
+#endif /* jit_mips64_Simulator_mips64_h */
diff --git a/js/src/jit/mips64/Trampoline-mips64.cpp b/js/src/jit/mips64/Trampoline-mips64.cpp
new file mode 100644
index 000000000..70a375019
--- /dev/null
+++ b/js/src/jit/mips64/Trampoline-mips64.cpp
@@ -0,0 +1,1363 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/DebugOnly.h"
+
+#include "jscompartment.h"
+
+#include "jit/Bailouts.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/JitSpewer.h"
+#include "jit/Linker.h"
+#include "jit/mips-shared/SharedICHelpers-mips-shared.h"
+#include "jit/mips64/Bailouts-mips64.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/VMFunctions.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// All registers to save and restore. This includes the stack pointer, since we
+// use the ability to reference register values on the stack by index.
+static const LiveRegisterSet AllRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::AllMask),
+ FloatRegisterSet(FloatRegisters::AllMask));
+
+static_assert(sizeof(uintptr_t) == sizeof(uint64_t), "Not 32-bit clean.");
+
+struct EnterJITRegs
+{
+ double f31;
+ double f30;
+ double f29;
+ double f28;
+ double f27;
+ double f26;
+ double f25;
+ double f24;
+
+ // non-volatile registers.
+ uint64_t ra;
+ uint64_t s7;
+ uint64_t s6;
+ uint64_t s5;
+ uint64_t s4;
+ uint64_t s3;
+ uint64_t s2;
+ uint64_t s1;
+ uint64_t s0;
+ // Save reg_vp(a7) on stack, use it after call jit code.
+ uint64_t a7;
+};
+
+static void
+GenerateReturn(MacroAssembler& masm, int returnCode)
+{
+ MOZ_ASSERT(masm.framePushed() == sizeof(EnterJITRegs));
+
+ if (isLoongson()) {
+ // Restore non-volatile registers
+ masm.as_ld(s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_gslq(s1, s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_gslq(s3, s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_gslq(s5, s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_gslq(s7, ra, StackPointer, offsetof(EnterJITRegs, ra));
+
+ // Restore non-volatile floating point registers
+ masm.as_gslq(f24, f25, StackPointer, offsetof(EnterJITRegs, f25));
+ masm.as_gslq(f26, f27, StackPointer, offsetof(EnterJITRegs, f27));
+ masm.as_gslq(f28, f29, StackPointer, offsetof(EnterJITRegs, f29));
+ masm.as_gslq(f30, f31, StackPointer, offsetof(EnterJITRegs, f31));
+ } else {
+ // Restore non-volatile registers
+ masm.as_ld(s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_ld(s1, StackPointer, offsetof(EnterJITRegs, s1));
+ masm.as_ld(s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_ld(s3, StackPointer, offsetof(EnterJITRegs, s3));
+ masm.as_ld(s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_ld(s5, StackPointer, offsetof(EnterJITRegs, s5));
+ masm.as_ld(s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_ld(s7, StackPointer, offsetof(EnterJITRegs, s7));
+ masm.as_ld(ra, StackPointer, offsetof(EnterJITRegs, ra));
+
+ // Restore non-volatile floating point registers
+ masm.as_ld(f24, StackPointer, offsetof(EnterJITRegs, f24));
+ masm.as_ld(f25, StackPointer, offsetof(EnterJITRegs, f25));
+ masm.as_ld(f26, StackPointer, offsetof(EnterJITRegs, f26));
+ masm.as_ld(f27, StackPointer, offsetof(EnterJITRegs, f27));
+ masm.as_ld(f28, StackPointer, offsetof(EnterJITRegs, f28));
+ masm.as_ld(f29, StackPointer, offsetof(EnterJITRegs, f29));
+ masm.as_ld(f30, StackPointer, offsetof(EnterJITRegs, f30));
+ masm.as_ld(f31, StackPointer, offsetof(EnterJITRegs, f31));
+ }
+
+ masm.freeStack(sizeof(EnterJITRegs));
+
+ masm.branch(ra);
+}
+
+static void
+GeneratePrologue(MacroAssembler& masm)
+{
+ masm.reserveStack(sizeof(EnterJITRegs));
+
+ if (isLoongson()) {
+ masm.as_gssq(a7, s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_gssq(s1, s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_gssq(s3, s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_gssq(s5, s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_gssq(s7, ra, StackPointer, offsetof(EnterJITRegs, ra));
+
+ masm.as_gssq(f24, f25, StackPointer, offsetof(EnterJITRegs, f25));
+ masm.as_gssq(f26, f27, StackPointer, offsetof(EnterJITRegs, f27));
+ masm.as_gssq(f28, f29, StackPointer, offsetof(EnterJITRegs, f29));
+ masm.as_gssq(f30, f31, StackPointer, offsetof(EnterJITRegs, f31));
+ return;
+ }
+
+ masm.as_sd(s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_sd(s1, StackPointer, offsetof(EnterJITRegs, s1));
+ masm.as_sd(s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_sd(s3, StackPointer, offsetof(EnterJITRegs, s3));
+ masm.as_sd(s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_sd(s5, StackPointer, offsetof(EnterJITRegs, s5));
+ masm.as_sd(s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_sd(s7, StackPointer, offsetof(EnterJITRegs, s7));
+ masm.as_sd(ra, StackPointer, offsetof(EnterJITRegs, ra));
+ masm.as_sd(a7, StackPointer, offsetof(EnterJITRegs, a7));
+
+ masm.as_sd(f24, StackPointer, offsetof(EnterJITRegs, f24));
+ masm.as_sd(f25, StackPointer, offsetof(EnterJITRegs, f25));
+ masm.as_sd(f26, StackPointer, offsetof(EnterJITRegs, f26));
+ masm.as_sd(f27, StackPointer, offsetof(EnterJITRegs, f27));
+ masm.as_sd(f28, StackPointer, offsetof(EnterJITRegs, f28));
+ masm.as_sd(f29, StackPointer, offsetof(EnterJITRegs, f29));
+ masm.as_sd(f30, StackPointer, offsetof(EnterJITRegs, f30));
+ masm.as_sd(f31, StackPointer, offsetof(EnterJITRegs, f31));
+}
+
+
+// Generates a trampoline for calling Jit compiled code from a C++ function.
+// The trampoline use the EnterJitCode signature, with the standard x64 fastcall
+// calling convention.
+JitCode *
+JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
+{
+ const Register reg_code = IntArgReg0;
+ const Register reg_argc = IntArgReg1;
+ const Register reg_argv = IntArgReg2;
+ const mozilla::DebugOnly<Register> reg_frame = IntArgReg3;
+ const Register reg_token = IntArgReg4;
+ const Register reg_chain = IntArgReg5;
+ const Register reg_values = IntArgReg6;
+ const Register reg_vp = IntArgReg7;
+ MacroAssembler masm(cx);
+
+ MOZ_ASSERT(OsrFrameReg == reg_frame);
+
+ GeneratePrologue(masm);
+
+ // Save stack pointer into s4
+ masm.movePtr(StackPointer, s4);
+
+ // Save stack pointer as baseline frame.
+ if (type == EnterJitBaseline)
+ masm.movePtr(StackPointer, BaselineFrameReg);
+
+ // Load the number of actual arguments into s3.
+ masm.unboxInt32(Address(reg_vp, 0), s3);
+
+ /***************************************************************
+ Loop over argv vector, push arguments onto stack in reverse order
+ ***************************************************************/
+
+ // if we are constructing, that also needs to include newTarget
+ {
+ Label noNewTarget;
+ masm.branchTest32(Assembler::Zero, reg_token, Imm32(CalleeToken_FunctionConstructing),
+ &noNewTarget);
+
+ masm.add32(Imm32(1), reg_argc);
+
+ masm.bind(&noNewTarget);
+ }
+
+ // Make stack algined
+ masm.ma_and(s0, reg_argc, Imm32(1));
+ masm.ma_dsubu(s1, StackPointer, Imm32(sizeof(Value)));
+ masm.as_movn(StackPointer, s1, s0);
+
+ masm.as_dsll(s0, reg_argc, 3); // Value* argv
+ masm.addPtr(reg_argv, s0); // s0 = &argv[argc]
+
+ // Loop over arguments, copying them from an unknown buffer onto the Ion
+ // stack so they can be accessed from JIT'ed code.
+ Label header, footer;
+ // If there aren't any arguments, don't do anything
+ masm.ma_b(s0, reg_argv, &footer, Assembler::BelowOrEqual, ShortJump);
+ {
+ masm.bind(&header);
+
+ masm.subPtr(Imm32(sizeof(Value)), s0);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+
+ ValueOperand value = ValueOperand(s6);
+ masm.loadValue(Address(s0, 0), value);
+ masm.storeValue(value, Address(StackPointer, 0));
+
+ masm.ma_b(s0, reg_argv, &header, Assembler::Above, ShortJump);
+ }
+ masm.bind(&footer);
+
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(s3, Address(StackPointer, sizeof(uintptr_t))); // actual arguments
+ masm.storePtr(reg_token, Address(StackPointer, 0)); // callee token
+
+ masm.subPtr(StackPointer, s4);
+ masm.makeFrameDescriptor(s4, JitFrame_Entry, JitFrameLayout::Size());
+ masm.push(s4); // descriptor
+
+ CodeLabel returnLabel;
+ CodeLabel oomReturnLabel;
+ if (type == EnterJitBaseline) {
+ // Handle OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(OsrFrameReg);
+ regs.take(BaselineFrameReg);
+ regs.take(reg_code);
+ regs.take(ReturnReg);
+ regs.take(JSReturnOperand);
+
+ Label notOsr;
+ masm.ma_b(OsrFrameReg, OsrFrameReg, &notOsr, Assembler::Zero, ShortJump);
+
+ Register numStackValues = reg_values;
+ regs.take(numStackValues);
+ Register scratch = regs.takeAny();
+
+ // Push return address.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.ma_li(scratch, returnLabel.patchAt());
+ masm.storePtr(scratch, Address(StackPointer, 0));
+
+ // Push previous frame pointer.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(BaselineFrameReg, Address(StackPointer, 0));
+
+ // Reserve frame.
+ Register framePtr = BaselineFrameReg;
+ masm.subPtr(Imm32(BaselineFrame::Size()), StackPointer);
+ masm.movePtr(StackPointer, framePtr);
+
+ // Reserve space for locals and stack values.
+ masm.ma_dsll(scratch, numStackValues, Imm32(3));
+ masm.subPtr(scratch, StackPointer);
+
+ // Enter exit frame.
+ masm.addPtr(Imm32(BaselineFrame::Size() + BaselineFrame::FramePointerOffset), scratch);
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, ExitFrameLayout::Size());
+
+ // Push frame descriptor and fake return address.
+ masm.reserveStack(2 * sizeof(uintptr_t));
+ masm.storePtr(scratch, Address(StackPointer, sizeof(uintptr_t))); // Frame descriptor
+ masm.storePtr(zero, Address(StackPointer, 0)); // fake return address
+
+ // No GC things to mark, push a bare token.
+ masm.enterFakeExitFrame(ExitFrameLayoutBareToken);
+
+ masm.reserveStack(2 * sizeof(uintptr_t));
+ masm.storePtr(framePtr, Address(StackPointer, sizeof(uintptr_t))); // BaselineFrame
+ masm.storePtr(reg_code, Address(StackPointer, 0)); // jitcode
+
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(BaselineFrameReg); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, jit::InitBaselineFrameForOsr));
+
+ regs.add(OsrFrameReg);
+ Register jitcode = regs.takeAny();
+ masm.loadPtr(Address(StackPointer, 0), jitcode);
+ masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), framePtr);
+ masm.freeStack(2 * sizeof(uintptr_t));
+
+ Label error;
+ masm.freeStack(ExitFrameLayout::SizeWithFooter());
+ masm.addPtr(Imm32(BaselineFrame::Size()), framePtr);
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ Register realFramePtr = numStackValues;
+ AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.ma_daddu(realFramePtr, framePtr, Imm32(sizeof(void*)));
+ masm.profilerEnterFrame(realFramePtr, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(jitcode);
+
+ // OOM: load error value, discard return address and previous frame
+ // pointer and return.
+ masm.bind(&error);
+ masm.movePtr(framePtr, StackPointer);
+ masm.addPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.ma_li(scratch, oomReturnLabel.patchAt());
+ masm.jump(scratch);
+
+ masm.bind(&notOsr);
+ // Load the scope chain in R1.
+ MOZ_ASSERT(R1.scratchReg() != reg_code);
+ masm.ma_move(R1.scratchReg(), reg_chain);
+ }
+
+ // The call will push the return address on the stack, thus we check that
+ // the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, sizeof(uintptr_t));
+
+ // Call the function with pushing return address to stack.
+ masm.callJitNoProfiler(reg_code);
+
+ if (type == EnterJitBaseline) {
+ // Baseline OSR will return here.
+ masm.bind(returnLabel.target());
+ masm.addCodeLabel(returnLabel);
+ masm.bind(oomReturnLabel.target());
+ masm.addCodeLabel(oomReturnLabel);
+ }
+
+ // Pop arguments off the stack.
+ // s0 <- 8*argc (size of all arguments we pushed on the stack)
+ masm.pop(s0);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), s0);
+ masm.addPtr(s0, StackPointer);
+
+ // Store the returned value into the vp
+ masm.as_ld(reg_vp, StackPointer, offsetof(EnterJITRegs, a7));
+ masm.storeValue(JSReturnOperand, Address(reg_vp, 0));
+
+ // Restore non-volatile registers and return.
+ GenerateReturn(masm, ShortJump);
+
+ Linker linker(masm);
+ AutoFlushICache afc("GenerateEnterJIT");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "EnterJIT");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateInvalidator(JSContext* cx)
+{
+ MacroAssembler masm(cx);
+
+ // Stack has to be alligned here. If not, we will have to fix it.
+ masm.checkStackAlignment();
+
+ // Push registers such that we can access them from [base + code].
+ masm.PushRegsInMask(AllRegs);
+
+ // Pass pointer to InvalidationBailoutStack structure.
+ masm.movePtr(StackPointer, a0);
+
+ // Reserve place for return value and BailoutInfo pointer
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ // Pass pointer to return value.
+ masm.ma_daddu(a1, StackPointer, Imm32(sizeof(uintptr_t)));
+ // Pass pointer to BailoutInfo
+ masm.movePtr(StackPointer, a2);
+
+ masm.setupAlignedABICall();
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.passABIArg(a2);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, InvalidationBailout));
+
+ masm.loadPtr(Address(StackPointer, 0), a2);
+ masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), a1);
+ // Remove the return address, the IonScript, the register state
+ // (InvaliationBailoutStack) and the space that was allocated for the
+ // return value.
+ masm.addPtr(Imm32(sizeof(InvalidationBailoutStack) + 2 * sizeof(uintptr_t)), StackPointer);
+ // remove the space that this frame was using before the bailout
+ // (computed by InvalidationBailout)
+ masm.addPtr(a1, StackPointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
+ JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.branch(bailoutTail);
+
+ Linker linker(masm);
+ AutoFlushICache afc("Invalidator");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+ JitSpew(JitSpew_IonInvalidate, " invalidation thunk created at %p", (void*) code->raw());
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "Invalidator");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut)
+{
+ // Do not erase the frame pointer in this function.
+
+ MacroAssembler masm(cx);
+ masm.pushReturnAddress();
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- sp
+ // '--- s3 ---'
+
+ // ArgumentsRectifierReg contains the |nargs| pushed onto the current
+ // frame. Including |this|, there are (|nargs| + 1) arguments to copy.
+ MOZ_ASSERT(ArgumentsRectifierReg == s3);
+
+ // Add |this|, in the counter of known arguments.
+ masm.addPtr(Imm32(1), ArgumentsRectifierReg);
+
+ Register numActArgsReg = a6;
+ Register calleeTokenReg = a7;
+ Register numArgsReg = a5;
+
+ // Load |nformals| into numArgsReg.
+ masm.loadPtr(Address(StackPointer, RectifierFrameLayout::offsetOfCalleeToken()),
+ calleeTokenReg);
+ masm.mov(calleeTokenReg, numArgsReg);
+ masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), numArgsReg);
+ masm.load16ZeroExtend(Address(numArgsReg, JSFunction::offsetOfNargs()), numArgsReg);
+
+ // Stash another copy in t3, since we are going to do destructive operations
+ // on numArgsReg
+ masm.mov(numArgsReg, t3);
+
+ static_assert(CalleeToken_FunctionConstructing == 1,
+ "Ensure that we can use the constructing bit to count the value");
+ masm.mov(calleeTokenReg, t2);
+ masm.ma_and(t2, Imm32(uint32_t(CalleeToken_FunctionConstructing)));
+
+ // Including |this|, and |new.target|, there are (|nformals| + 1 + isConstructing)
+ // arguments to push to the stack. Then we push a JitFrameLayout. We
+ // compute the padding expressed in the number of extra |undefined| values
+ // to push on the stack.
+ static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+ static_assert(JitStackAlignment % sizeof(Value) == 0,
+ "Ensure that we can pad the stack by pushing extra UndefinedValue");
+
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(JitStackValueAlignment));
+ masm.add32(Imm32(JitStackValueAlignment - 1 /* for padding */ + 1 /* for |this| */), numArgsReg);
+ masm.add32(t2, numArgsReg);
+ masm.and32(Imm32(~(JitStackValueAlignment - 1)), numArgsReg);
+
+ // Load the number of |undefined|s to push into t1.
+ masm.as_dsubu(t1, numArgsReg, s3);
+
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- sp <- t2
+ // '------ s3 -------'
+ //
+ // Rectifier frame:
+ // [undef] [undef] [undef] [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]]
+ // '-------- t1 ---------' '------- s3 -------'
+
+ // Copy number of actual arguments into numActArgsReg
+ masm.loadPtr(Address(StackPointer, RectifierFrameLayout::offsetOfNumActualArgs()),
+ numActArgsReg);
+
+
+ masm.moveValue(UndefinedValue(), ValueOperand(t0));
+
+ masm.movePtr(StackPointer, t2); // Save %sp.
+
+ // Push undefined. (including the padding)
+ {
+ Label undefLoopTop;
+
+ masm.bind(&undefLoopTop);
+ masm.sub32(Imm32(1), t1);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.storeValue(ValueOperand(t0), Address(StackPointer, 0));
+
+ masm.ma_b(t1, t1, &undefLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // Get the topmost argument.
+ static_assert(sizeof(Value) == 8, "TimesEight is used to skip arguments");
+
+ // | - sizeof(Value)| is used to put rcx such that we can read the last
+ // argument, and not the value which is after.
+ masm.ma_dsll(t0, s3, Imm32(3)); // t0 <- nargs * 8
+ masm.as_daddu(t1, t2, t0); // t1 <- t2(saved sp) + nargs * 8
+ masm.addPtr(Imm32(sizeof(RectifierFrameLayout) - sizeof(Value)), t1);
+
+ // Copy & Push arguments, |nargs| + 1 times (to include |this|).
+ {
+ Label copyLoopTop;
+
+ masm.bind(&copyLoopTop);
+ masm.sub32(Imm32(1), s3);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.loadValue(Address(t1, 0), ValueOperand(t0));
+ masm.storeValue(ValueOperand(t0), Address(StackPointer, 0));
+ masm.subPtr(Imm32(sizeof(Value)), t1);
+
+ masm.ma_b(s3, s3, &copyLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // if constructing, copy newTarget
+ {
+ Label notConstructing;
+
+ masm.branchTest32(Assembler::Zero, calleeTokenReg, Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ // thisFrame[numFormals] = prevFrame[argc]
+ ValueOperand newTarget(t0);
+
+ // +1 for |this|. We want vp[argc], so don't subtract 1
+ BaseIndex newTargetSrc(t2, numActArgsReg, TimesEight, sizeof(RectifierFrameLayout) + sizeof(Value));
+ masm.loadValue(newTargetSrc, newTarget);
+
+ // Again, 1 for |this|
+ BaseIndex newTargetDest(StackPointer, t3, TimesEight, sizeof(Value));
+ masm.storeValue(newTarget, newTargetDest);
+
+ masm.bind(&notConstructing);
+ }
+
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- t2
+ //
+ //
+ // Rectifier frame:
+ // [undef] [undef] [undef] [arg2] [arg1] [this] <- sp [[argc] [callee] [descr] [raddr]]
+
+ // Construct sizeDescriptor.
+ masm.subPtr(StackPointer, t2);
+ masm.makeFrameDescriptor(t2, JitFrame_Rectifier, JitFrameLayout::Size());
+
+ // Construct JitFrameLayout.
+ masm.subPtr(Imm32(3 * sizeof(uintptr_t)), StackPointer);
+ // Push actual arguments.
+ masm.storePtr(numActArgsReg, Address(StackPointer, 2 * sizeof(uintptr_t)));
+ // Push callee token.
+ masm.storePtr(calleeTokenReg, Address(StackPointer, sizeof(uintptr_t)));
+ // Push frame descriptor.
+ masm.storePtr(t2, Address(StackPointer, 0));
+
+ // Call the target function.
+ // Note that this code assumes the function is JITted.
+ masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), calleeTokenReg);
+ masm.loadPtr(Address(calleeTokenReg, JSFunction::offsetOfNativeOrScript()), t1);
+ masm.loadBaselineOrIonRaw(t1, t1, nullptr);
+ uint32_t returnOffset = masm.callJitNoProfiler(t1);
+
+ // Remove the rectifier frame.
+ // t2 <- descriptor with FrameType.
+ masm.loadPtr(Address(StackPointer, 0), t2);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), t2); // t2 <- descriptor.
+
+ // Discard descriptor, calleeToken and number of actual arguments.
+ masm.addPtr(Imm32(3 * sizeof(uintptr_t)), StackPointer);
+
+ // Discard pushed arguments.
+ masm.addPtr(t2, StackPointer);
+
+ masm.ret();
+ Linker linker(masm);
+ AutoFlushICache afc("ArgumentsRectifier");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+ if (returnAddrOut)
+ *returnAddrOut = (void*) (code->raw() + returnOffset);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ArgumentsRectifier");
+#endif
+
+ return code;
+}
+
+/* - When bailout is done via out of line code (lazy bailout).
+ * Frame size is stored in $ra (look at
+ * CodeGeneratorMIPS64::generateOutOfLineCode()) and thunk code should save it
+ * on stack. Other difference is that members snapshotOffset_ and padding_ are
+ * pushed to the stack by CodeGeneratorMIPS64::visitOutOfLineBailout(). Field
+ * frameClassId_ is forced to be NO_FRAME_SIZE_CLASS_ID
+ * (See: JitRuntime::generateBailoutHandler).
+ */
+static void
+PushBailoutFrame(MacroAssembler& masm, Register spArg)
+{
+ // Push the frameSize_ stored in ra
+ // See: CodeGeneratorMIPS64::generateOutOfLineCode()
+ masm.push(ra);
+
+ // Push registers such that we can access them from [base + code].
+ masm.PushRegsInMask(AllRegs);
+
+ // Put pointer to BailoutStack as first argument to the Bailout()
+ masm.movePtr(StackPointer, spArg);
+}
+
+static void
+GenerateBailoutThunk(JSContext* cx, MacroAssembler& masm, uint32_t frameClass)
+{
+ PushBailoutFrame(masm, a0);
+
+ // Put pointer to BailoutInfo
+ static const uint32_t sizeOfBailoutInfo = sizeof(uintptr_t) * 2;
+ masm.subPtr(Imm32(sizeOfBailoutInfo), StackPointer);
+ masm.movePtr(StackPointer, a1);
+
+ masm.setupAlignedABICall();
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, Bailout));
+
+ // Get BailoutInfo pointer
+ masm.loadPtr(Address(StackPointer, 0), a2);
+
+ // Stack is:
+ // [frame]
+ // snapshotOffset
+ // frameSize
+ // [bailoutFrame]
+ // [bailoutInfo]
+ //
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ // Load frameSize from stack
+ masm.loadPtr(Address(StackPointer,
+ sizeOfBailoutInfo + BailoutStack::offsetOfFrameSize()), a1);
+ // Remove complete BailoutStack class and data after it
+ masm.addPtr(Imm32(sizeof(BailoutStack) + sizeOfBailoutInfo), StackPointer);
+ // Remove frame size srom stack
+ masm.addPtr(a1, StackPointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in a2.
+ JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.branch(bailoutTail);
+}
+
+JitCode*
+JitRuntime::generateBailoutTable(JSContext* cx, uint32_t frameClass)
+{
+ MOZ_CRASH("MIPS64 does not use bailout tables");
+}
+
+JitCode*
+JitRuntime::generateBailoutHandler(JSContext* cx)
+{
+ MacroAssembler masm(cx);
+ GenerateBailoutThunk(cx, masm, NO_FRAME_SIZE_CLASS_ID);
+
+ Linker linker(masm);
+ AutoFlushICache afc("BailoutHandler");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutHandler");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateVMWrapper(JSContext* cx, const VMFunction& f)
+{
+ MOZ_ASSERT(functionWrappers_);
+ MOZ_ASSERT(functionWrappers_->initialized());
+ VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
+ if (p)
+ return p->value();
+
+ MacroAssembler masm(cx);
+
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ static_assert((Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0,
+ "Wrapper register set should be a superset of Volatile register set.");
+
+ // The context is the first argument; a0 is the first argument register.
+ Register cxreg = a0;
+ regs.take(cxreg);
+
+ // If it isn't a tail call, then the return address needs to be saved
+ if (f.expectTailCall == NonTailCall)
+ masm.pushReturnAddress();
+
+ // We're aligned to an exit frame, so link it up.
+ masm.enterExitFrame(&f);
+ masm.loadJSContext(cxreg);
+
+ // Save the base of the argument set stored on the stack.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ argsBase = t1; // Use temporary register.
+ regs.take(argsBase);
+ masm.ma_daddu(argsBase, StackPointer, Imm32(ExitFrameLayout::SizeWithFooter()));
+ }
+
+ // Reserve space for the outparameter.
+ Register outReg = InvalidReg;
+ switch (f.outParam) {
+ case Type_Value:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(Value));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Handle:
+ outReg = regs.takeAny();
+ masm.PushEmptyRooted(f.outParamRootType);
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Bool:
+ case Type_Int32:
+ outReg = regs.takeAny();
+ // Reserve 4-byte space to make stack aligned to 8-byte.
+ masm.reserveStack(2 * sizeof(int32_t));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Pointer:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(uintptr_t));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Double:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(double));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ if (!generateTLEnterVM(cx, masm, f))
+ return nullptr;
+
+ masm.setupUnalignedABICall(regs.getAny());
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = 0;
+
+ // Copy any arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ MoveOperand from;
+ switch (f.argProperties(explicitArg)) {
+ case VMFunction::WordByValue:
+ if (f.argPassedInFloatReg(explicitArg))
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
+ else
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunction::WordByRef:
+ masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunction::DoubleByValue:
+ case VMFunction::DoubleByRef:
+ MOZ_CRASH("NYI: MIPS64 callVM should not be used with 128bits values.");
+ break;
+ }
+ }
+
+ // Copy the implicit outparam, if any.
+ if (InvalidReg != outReg)
+ masm.passABIArg(outReg);
+
+ masm.callWithABI(f.wrapped);
+
+ if (!generateTLExitVM(cx, masm, f))
+ return nullptr;
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Object:
+ masm.branchTestPtr(Assembler::Zero, v0, v0, masm.failureLabel());
+ break;
+ case Type_Bool:
+ // Called functions return bools, which are 0/false and non-zero/true
+ masm.branchIfFalseBool(v0, masm.failureLabel());
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Value:
+ masm.loadValue(Address(StackPointer, 0), JSReturnOperand);
+ masm.freeStack(sizeof(Value));
+ break;
+
+ case Type_Int32:
+ masm.load32(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(2 * sizeof(int32_t));
+ break;
+
+ case Type_Pointer:
+ masm.loadPtr(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ case Type_Bool:
+ masm.load8ZeroExtend(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(2 * sizeof(int32_t));
+ break;
+
+ case Type_Double:
+ if (cx->runtime()->jitSupportsFloatingPoint) {
+ masm.as_ld(ReturnDoubleReg, StackPointer, 0);
+ } else {
+ masm.assumeUnreachable("Unable to load into float reg, with no FP support.");
+ }
+ masm.freeStack(sizeof(double));
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ masm.leaveExitFrame();
+ masm.retn(Imm32(sizeof(ExitFrameLayout) +
+ f.explicitStackSlots() * sizeof(void*) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ Linker linker(masm);
+ AutoFlushICache afc("VMWrapper");
+ JitCode* wrapper = linker.newCode<NoGC>(cx, OTHER_CODE);
+ if (!wrapper)
+ return nullptr;
+
+ // linker.newCode may trigger a GC and sweep functionWrappers_ so we have
+ // to use relookupOrAdd instead of add.
+ if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
+ return nullptr;
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(wrapper, "VMWrapper");
+#endif
+
+ return wrapper;
+}
+
+JitCode*
+JitRuntime::generatePreBarrier(JSContext* cx, MIRType type)
+{
+ MacroAssembler masm(cx);
+
+ LiveRegisterSet save;
+ if (cx->runtime()->jitSupportsFloatingPoint) {
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+ } else {
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet());
+ }
+ save.add(ra);
+ masm.PushRegsInMask(save);
+
+ MOZ_ASSERT(PreBarrierReg == a1);
+ masm.movePtr(ImmPtr(cx->runtime()), a0);
+
+ masm.setupUnalignedABICall(a2);
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI(IonMarkFunction(type));
+
+ save.take(AnyRegister(ra));
+ masm.PopRegsInMask(save);
+ masm.ret();
+
+ Linker linker(masm);
+ AutoFlushICache afc("PreBarrier");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "PreBarrier");
+#endif
+
+ return code;
+}
+
+typedef bool (*HandleDebugTrapFn)(JSContext*, BaselineFrame*, uint8_t*, bool*);
+static const VMFunction HandleDebugTrapInfo =
+ FunctionInfo<HandleDebugTrapFn>(HandleDebugTrap, "HandleDebugTrap");
+
+JitCode*
+JitRuntime::generateDebugTrapHandler(JSContext* cx)
+{
+ MacroAssembler masm(cx);
+
+ Register scratch1 = t0;
+ Register scratch2 = t1;
+
+ // Load BaselineFrame pointer in scratch1.
+ masm.movePtr(s5, scratch1);
+ masm.subPtr(Imm32(BaselineFrame::Size()), scratch1);
+
+ // Enter a stub frame and call the HandleDebugTrap VM function. Ensure
+ // the stub frame has a nullptr ICStub pointer, since this pointer is
+ // marked during GC.
+ masm.movePtr(ImmPtr(nullptr), ICStubReg);
+ EmitBaselineEnterStubFrame(masm, scratch2);
+
+ JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(HandleDebugTrapInfo);
+ if (!code)
+ return nullptr;
+
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(ra, Address(StackPointer, sizeof(uintptr_t)));
+ masm.storePtr(scratch1, Address(StackPointer, 0));
+
+ EmitBaselineCallVM(code, masm);
+
+ EmitBaselineLeaveStubFrame(masm);
+
+ // If the stub returns |true|, we have to perform a forced return
+ // (return from the JS frame). If the stub returns |false|, just return
+ // from the trap stub so that execution continues at the current pc.
+ Label forcedReturn;
+ masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg, &forcedReturn);
+
+ // ra was restored by EmitLeaveStubFrame
+ masm.branch(ra);
+
+ masm.bind(&forcedReturn);
+ masm.loadValue(Address(s5, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ masm.movePtr(s5, StackPointer);
+ masm.pop(s5);
+
+ // Before returning, if profiling is turned on, make sure that lastProfilingFrame
+ // is set to the correct caller frame.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skipProfilingInstrumentation);
+ masm.profilerExitFrame();
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.ret();
+
+ Linker linker(masm);
+ AutoFlushICache afc("DebugTrapHandler");
+ JitCode* codeDbg = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(codeDbg, "DebugTrapHandler");
+#endif
+
+ return codeDbg;
+}
+
+
+JitCode*
+JitRuntime::generateExceptionTailStub(JSContext* cx, void* handler)
+{
+ MacroAssembler masm;
+
+ masm.handleFailureWithHandlerTail(handler);
+
+ Linker linker(masm);
+ AutoFlushICache afc("ExceptionTailStub");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ExceptionTailStub");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateBailoutTailStub(JSContext* cx)
+{
+ MacroAssembler masm;
+
+ masm.generateBailoutTail(a1, a2);
+
+ Linker linker(masm);
+ AutoFlushICache afc("BailoutTailStub");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutTailStub");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
+{
+ MacroAssembler masm;
+
+ Register scratch1 = t0;
+ Register scratch2 = t1;
+ Register scratch3 = t2;
+ Register scratch4 = t3;
+
+ //
+ // The code generated below expects that the current stack pointer points
+ // to an Ion or Baseline frame, at the state it would be immediately
+ // before a ret(). Thus, after this stub's business is done, it executes
+ // a ret() and returns directly to the caller script, on behalf of the
+ // callee script that jumped to this code.
+ //
+ // Thus the expected stack is:
+ //
+ // StackPointer ----+
+ // v
+ // ..., ActualArgc, CalleeToken, Descriptor, ReturnAddr
+ // MEM-HI MEM-LOW
+ //
+ //
+ // The generated jitcode is responsible for overwriting the
+ // jitActivation->lastProfilingFrame field with a pointer to the previous
+ // Ion or Baseline jit-frame that was pushed before this one. It is also
+ // responsible for overwriting jitActivation->lastProfilingCallSite with
+ // the return address into that frame. The frame could either be an
+ // immediate "caller" frame, or it could be a frame in a previous
+ // JitActivation (if the current frame was entered from C++, and the C++
+ // was entered by some caller jit-frame further down the stack).
+ //
+ // So this jitcode is responsible for "walking up" the jit stack, finding
+ // the previous Ion or Baseline JS frame, and storing its address and the
+ // return address into the appropriate fields on the current jitActivation.
+ //
+ // There are a fixed number of different path types that can lead to the
+ // current frame, which is either a baseline or ion frame:
+ //
+ // <Baseline-Or-Ion>
+ // ^
+ // |
+ // ^--- Ion
+ // |
+ // ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Argument Rectifier
+ // | ^
+ // | |
+ // | ^--- Ion
+ // | |
+ // | ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Entry Frame (From C++)
+ //
+ Register actReg = scratch4;
+ AbsoluteAddress activationAddr(GetJitContext()->runtime->addressOfProfilingActivation());
+ masm.loadPtr(activationAddr, actReg);
+
+ Address lastProfilingFrame(actReg, JitActivation::offsetOfLastProfilingFrame());
+ Address lastProfilingCallSite(actReg, JitActivation::offsetOfLastProfilingCallSite());
+
+#ifdef DEBUG
+ // Ensure that frame we are exiting is current lastProfilingFrame
+ {
+ masm.loadPtr(lastProfilingFrame, scratch1);
+ Label checkOk;
+ masm.branchPtr(Assembler::Equal, scratch1, ImmWord(0), &checkOk);
+ masm.branchPtr(Assembler::Equal, StackPointer, scratch1, &checkOk);
+ masm.assumeUnreachable(
+ "Mismatch between stored lastProfilingFrame and current stack pointer.");
+ masm.bind(&checkOk);
+ }
+#endif
+
+ // Load the frame descriptor into |scratch1|, figure out what to do depending on its type.
+ masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfDescriptor()), scratch1);
+
+ // Going into the conditionals, we will have:
+ // FrameDescriptor.size in scratch1
+ // FrameDescriptor.type in scratch2
+ masm.ma_and(scratch2, scratch1, Imm32((1 << FRAMETYPE_BITS) - 1));
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch1);
+
+ // Handling of each case is dependent on FrameDescriptor.type
+ Label handle_IonJS;
+ Label handle_BaselineStub;
+ Label handle_Rectifier;
+ Label handle_IonAccessorIC;
+ Label handle_Entry;
+ Label end;
+
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonJS), &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineJS), &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineStub), &handle_BaselineStub);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Rectifier), &handle_Rectifier);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonAccessorIC), &handle_IonAccessorIC);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Entry), &handle_Entry);
+
+ masm.assumeUnreachable("Invalid caller frame type when exiting from Ion frame.");
+
+ //
+ // JitFrame_IonJS
+ //
+ // Stack layout:
+ // ...
+ // Ion-Descriptor
+ // Prev-FP ---> Ion-ReturnAddr
+ // ... previous frame data ... |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_IonJS);
+ {
+ // |scratch1| contains Descriptor.size
+
+ // returning directly to an IonJS frame. Store return addr to frame
+ // in lastProfilingCallSite.
+ masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfReturnAddress()), scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ // Store return frame in lastProfilingFrame.
+ // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size();
+ masm.as_daddu(scratch2, StackPointer, scratch1);
+ masm.ma_daddu(scratch2, scratch2, Imm32(JitFrameLayout::Size()));
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // JitFrame_BaselineStub
+ //
+ // Look past the stub and store the frame pointer to
+ // the baselineJS frame prior to it.
+ //
+ // Stack layout:
+ // ...
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-PrevFramePointer
+ // | ... BL-FrameData ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ // We take advantage of the fact that the stub frame saves the frame
+ // pointer pointing to the baseline frame, so a bunch of calculation can
+ // be avoided.
+ //
+ masm.bind(&handle_BaselineStub);
+ {
+ masm.as_daddu(scratch3, StackPointer, scratch1);
+ Address stubFrameReturnAddr(scratch3,
+ JitFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ Address stubFrameSavedFramePtr(scratch3,
+ JitFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch2);
+ masm.addPtr(Imm32(sizeof(void*)), scratch2); // Skip past BL-PrevFramePtr
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+
+ //
+ // JitFrame_Rectifier
+ //
+ // The rectifier frame can be preceded by either an IonJS or a
+ // BaselineStub frame.
+ //
+ // Stack layout if caller of rectifier was Ion:
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- Rect-Descriptor.Size
+ // < COMMON LAYOUT >
+ //
+ // Stack layout if caller of rectifier was Baseline:
+ //
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-SavedFramePointer
+ // | ... baseline frame data ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Rect-Descriptor.Size
+ // ... args to rectifier ... |
+ // < COMMON LAYOUT >
+ //
+ // Common stack layout:
+ //
+ // ActualArgc |
+ // CalleeToken |- IonRectitiferFrameLayout::Size()
+ // Rect-Descriptor |
+ // Rect-ReturnAddr |
+ // ... rectifier data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_Rectifier);
+ {
+ // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size();
+ masm.as_daddu(scratch2, StackPointer, scratch1);
+ masm.addPtr(Imm32(JitFrameLayout::Size()), scratch2);
+ masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfDescriptor()), scratch3);
+ masm.ma_dsrl(scratch1, scratch3, Imm32(FRAMESIZE_SHIFT));
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch3);
+
+ // Now |scratch1| contains Rect-Descriptor.Size
+ // and |scratch2| points to Rectifier frame
+ // and |scratch3| contains Rect-Descriptor.Type
+
+ // Check for either Ion or BaselineStub frame.
+ Label handle_Rectifier_BaselineStub;
+ masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS),
+ &handle_Rectifier_BaselineStub);
+
+ // Handle Rectifier <- IonJS
+ // scratch3 := RectFrame[ReturnAddr]
+ masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfReturnAddress()), scratch3);
+ masm.storePtr(scratch3, lastProfilingCallSite);
+
+ // scratch3 := RectFrame + Rect-Descriptor.Size + RectifierFrameLayout::Size()
+ masm.as_daddu(scratch3, scratch2, scratch1);
+ masm.addPtr(Imm32(RectifierFrameLayout::Size()), scratch3);
+ masm.storePtr(scratch3, lastProfilingFrame);
+ masm.ret();
+
+ // Handle Rectifier <- BaselineStub <- BaselineJS
+ masm.bind(&handle_Rectifier_BaselineStub);
+#ifdef DEBUG
+ {
+ Label checkOk;
+ masm.branch32(Assembler::Equal, scratch3, Imm32(JitFrame_BaselineStub), &checkOk);
+ masm.assumeUnreachable("Unrecognized frame preceding baselineStub.");
+ masm.bind(&checkOk);
+ }
+#endif
+ masm.as_daddu(scratch3, scratch2, scratch1);
+ Address stubFrameReturnAddr(scratch3, RectifierFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ Address stubFrameSavedFramePtr(scratch3,
+ RectifierFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch2);
+ masm.addPtr(Imm32(sizeof(void*)), scratch2);
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+ // JitFrame_IonAccessorIC
+ //
+ // The caller is always an IonJS frame.
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- AccFrame-Descriptor.Size
+ // StubCode |
+ // AccFrame-Descriptor |- IonAccessorICFrameLayout::Size()
+ // AccFrame-ReturnAddr |
+ // ... accessor frame data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ masm.bind(&handle_IonAccessorIC);
+ {
+ // scratch2 := StackPointer + Descriptor.size + JitFrameLayout::Size()
+ masm.as_daddu(scratch2, StackPointer, scratch1);
+ masm.addPtr(Imm32(JitFrameLayout::Size()), scratch2);
+
+ // scratch3 := AccFrame-Descriptor.Size
+ masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfDescriptor()), scratch3);
+#ifdef DEBUG
+ // Assert previous frame is an IonJS frame.
+ masm.movePtr(scratch3, scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1);
+ {
+ Label checkOk;
+ masm.branch32(Assembler::Equal, scratch1, Imm32(JitFrame_IonJS), &checkOk);
+ masm.assumeUnreachable("IonAccessorIC frame must be preceded by IonJS frame");
+ masm.bind(&checkOk);
+ }
+#endif
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch3);
+
+ // lastProfilingCallSite := AccFrame-ReturnAddr
+ masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfReturnAddress()), scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+
+ // lastProfilingFrame := AccessorFrame + AccFrame-Descriptor.Size +
+ // IonAccessorICFrameLayout::Size()
+ masm.as_daddu(scratch1, scratch2, scratch3);
+ masm.addPtr(Imm32(IonAccessorICFrameLayout::Size()), scratch1);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // JitFrame_Entry
+ //
+ // If at an entry frame, store null into both fields.
+ //
+ masm.bind(&handle_Entry);
+ {
+ masm.movePtr(ImmPtr(nullptr), scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+
+ Linker linker(masm);
+ AutoFlushICache afc("ProfilerExitFrameTailStub");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ProfilerExitFrameStub");
+#endif
+
+ return code;
+}
diff --git a/js/src/jit/none/Architecture-none.h b/js/src/jit/none/Architecture-none.h
new file mode 100644
index 000000000..0fa54793c
--- /dev/null
+++ b/js/src/jit/none/Architecture-none.h
@@ -0,0 +1,157 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_Architecture_none_h
+#define jit_none_Architecture_none_h
+
+// JitSpewer.h is included through MacroAssembler implementations for other
+// platforms, so include it here to avoid inadvertent build bustage.
+#include "jit/JitSpewer.h"
+
+namespace js {
+namespace jit {
+
+static const bool SupportsSimd = false;
+static const uint32_t SimdMemoryAlignment = 4; // Make it 4 to avoid a bunch of div-by-zero warnings
+static const uint32_t WasmStackAlignment = 8;
+
+// Does this architecture support SIMD conversions between Uint32x4 and Float32x4?
+static constexpr bool SupportsUint32x4FloatConversions = false;
+
+// Does this architecture support comparisons of unsigned integer vectors?
+static constexpr bool SupportsUint8x16Compares = false;
+static constexpr bool SupportsUint16x8Compares = false;
+static constexpr bool SupportsUint32x4Compares = false;
+
+class Registers
+{
+ public:
+ enum RegisterID {
+ r0 = 0,
+ invalid_reg
+ };
+ typedef uint8_t Code;
+ typedef RegisterID Encoding;
+ union RegisterContent {
+ uintptr_t r;
+ };
+
+ typedef uint8_t SetType;
+
+ static uint32_t SetSize(SetType) { MOZ_CRASH(); }
+ static uint32_t FirstBit(SetType) { MOZ_CRASH(); }
+ static uint32_t LastBit(SetType) { MOZ_CRASH(); }
+ static const char* GetName(Code) { MOZ_CRASH(); }
+ static Code FromName(const char*) { MOZ_CRASH(); }
+
+ static const Encoding StackPointer = invalid_reg;
+ static const Encoding Invalid = invalid_reg;
+ static const uint32_t Total = 1;
+ static const uint32_t TotalPhys = 0;
+ static const uint32_t Allocatable = 0;
+ static const SetType AllMask = 0;
+ static const SetType ArgRegMask = 0;
+ static const SetType VolatileMask = 0;
+ static const SetType NonVolatileMask = 0;
+ static const SetType NonAllocatableMask = 0;
+ static const SetType AllocatableMask = 0;
+ static const SetType TempMask = 0;
+ static const SetType JSCallMask = 0;
+ static const SetType CallMask = 0;
+};
+
+typedef uint8_t PackedRegisterMask;
+
+class FloatRegisters
+{
+ public:
+ enum FPRegisterID {
+ f0 = 0,
+ invalid_reg
+ };
+ typedef FPRegisterID Code;
+ typedef FPRegisterID Encoding;
+ union RegisterContent {
+ double d;
+ };
+
+ typedef uint32_t SetType;
+
+ static const char* GetName(Code) { MOZ_CRASH(); }
+ static Code FromName(const char*) { MOZ_CRASH(); }
+
+ static const Code Invalid = invalid_reg;
+ static const uint32_t Total = 0;
+ static const uint32_t TotalPhys = 0;
+ static const uint32_t Allocatable = 0;
+ static const SetType AllMask = 0;
+ static const SetType AllDoubleMask = 0;
+ static const SetType AllSingleMask = 0;
+ static const SetType VolatileMask = 0;
+ static const SetType NonVolatileMask = 0;
+ static const SetType NonAllocatableMask = 0;
+ static const SetType AllocatableMask = 0;
+};
+
+template <typename T>
+class TypedRegisterSet;
+
+struct FloatRegister
+{
+ typedef FloatRegisters Codes;
+ typedef Codes::Code Code;
+ typedef Codes::Encoding Encoding;
+ typedef Codes::SetType SetType;
+
+ Code _;
+
+ static uint32_t FirstBit(SetType) { MOZ_CRASH(); }
+ static uint32_t LastBit(SetType) { MOZ_CRASH(); }
+ static FloatRegister FromCode(uint32_t) { MOZ_CRASH(); }
+ bool isSingle() const { MOZ_CRASH(); }
+ bool isDouble() const { MOZ_CRASH(); }
+ bool isSimd128() const { MOZ_CRASH(); }
+ FloatRegister asSingle() const { MOZ_CRASH(); }
+ FloatRegister asDouble() const { MOZ_CRASH(); }
+ FloatRegister asSimd128() const { MOZ_CRASH(); }
+ Code code() const { MOZ_CRASH(); }
+ Encoding encoding() const { MOZ_CRASH(); }
+ const char* name() const { MOZ_CRASH(); }
+ bool volatile_() const { MOZ_CRASH(); }
+ bool operator != (FloatRegister) const { MOZ_CRASH(); }
+ bool operator == (FloatRegister) const { MOZ_CRASH(); }
+ bool aliases(FloatRegister) const { MOZ_CRASH(); }
+ uint32_t numAliased() const { MOZ_CRASH(); }
+ void aliased(uint32_t, FloatRegister*) { MOZ_CRASH(); }
+ bool equiv(FloatRegister) const { MOZ_CRASH(); }
+ uint32_t size() const { MOZ_CRASH(); }
+ uint32_t numAlignedAliased() const { MOZ_CRASH(); }
+ void alignedAliased(uint32_t, FloatRegister*) { MOZ_CRASH(); }
+ SetType alignedOrDominatedAliasedSet() const { MOZ_CRASH(); }
+ template <typename T> static T ReduceSetForPush(T) { MOZ_CRASH(); }
+ uint32_t getRegisterDumpOffsetInBytes() { MOZ_CRASH(); }
+ static uint32_t SetSize(SetType x) { MOZ_CRASH(); }
+ static Code FromName(const char* name) { MOZ_CRASH(); }
+
+ // This is used in static initializers, so produce a bogus value instead of crashing.
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>&) { return 0; }
+};
+
+inline bool hasUnaliasedDouble() { MOZ_CRASH(); }
+inline bool hasMultiAlias() { MOZ_CRASH(); }
+
+static const uint32_t ShadowStackSpace = 0;
+static const uint32_t JumpImmediateRange = INT32_MAX;
+
+#ifdef JS_NUNBOX32
+static const int32_t NUNBOX32_TYPE_OFFSET = 4;
+static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
+#endif
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_none_Architecture_none_h */
diff --git a/js/src/jit/none/AtomicOperations-none.h b/js/src/jit/none/AtomicOperations-none.h
new file mode 100644
index 000000000..f08ccf9e0
--- /dev/null
+++ b/js/src/jit/none/AtomicOperations-none.h
@@ -0,0 +1,134 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* For documentation, see jit/AtomicOperations.h */
+
+#ifndef jit_none_AtomicOperations_none_h
+#define jit_none_AtomicOperations_none_h
+
+#include "mozilla/Assertions.h"
+
+// A "none" build is never run (ref IRC discussion with h4writer) and
+// all functions here can therefore MOZ_CRASH, even if they are
+// referenced from other than jitted code.
+
+inline bool
+js::jit::AtomicOperations::isLockfree8()
+{
+ MOZ_CRASH();
+}
+
+inline void
+js::jit::AtomicOperations::fenceSeqCst()
+{
+ MOZ_CRASH();
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::loadSeqCst(T* addr)
+{
+ MOZ_CRASH();
+}
+
+template<typename T>
+inline void
+js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
+{
+ MOZ_CRASH();
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval)
+{
+ MOZ_CRASH();
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val)
+{
+ MOZ_CRASH();
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val)
+{
+ MOZ_CRASH();
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val)
+{
+ MOZ_CRASH();
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val)
+{
+ MOZ_CRASH();
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val)
+{
+ MOZ_CRASH();
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
+{
+ MOZ_CRASH();
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::loadSafeWhenRacy(T* addr)
+{
+ MOZ_CRASH();
+}
+
+template<typename T>
+inline void
+js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val)
+{
+ MOZ_CRASH();
+}
+
+inline void
+js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src, size_t nbytes)
+{
+ MOZ_CRASH();
+}
+
+inline void
+js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes)
+{
+ MOZ_CRASH();
+}
+
+template<size_t nbytes>
+inline void
+js::jit::RegionLock::acquire(void* addr)
+{
+ (void)spinlock; // Remove a lot of "unused" warnings.
+ MOZ_CRASH();
+}
+
+template<size_t nbytes>
+inline void
+js::jit::RegionLock::release(void* addr)
+{
+ MOZ_CRASH();
+}
+
+#endif // jit_none_AtomicOperations_none_h
diff --git a/js/src/jit/none/AtomicOperations-ppc.h b/js/src/jit/none/AtomicOperations-ppc.h
new file mode 100644
index 000000000..0f688445b
--- /dev/null
+++ b/js/src/jit/none/AtomicOperations-ppc.h
@@ -0,0 +1,242 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* For documentation, see jit/AtomicOperations.h */
+
+#ifndef jit_ppc_AtomicOperations_ppc_h
+#define jit_ppc_AtomicOperations_ppc_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+#if defined(__clang__) || defined(__GNUC__)
+
+// The default implementation tactic for gcc/clang is to use the newer
+// __atomic intrinsics added for use in C++11 <atomic>. Where that
+// isn't available, we use GCC's older __sync functions instead.
+//
+// ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS is kept as a backward
+// compatible option for older compilers: enable this to use GCC's old
+// __sync functions instead of the newer __atomic functions. This
+// will be required for GCC 4.6.x and earlier, and probably for Clang
+// 3.1, should we need to use those versions.
+
+//#define ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+
+inline bool
+js::jit::AtomicOperations::isLockfree8()
+{
+# ifndef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
+# if defined(__ppc64__) || defined (__PPC64__) || \
+ defined(__ppc64le__) || defined (__PPC64LE__)
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0));
+# endif
+ return true;
+# else
+ return false;
+# endif
+}
+
+inline void
+js::jit::AtomicOperations::fenceSeqCst()
+{
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+# else
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::loadSeqCst(T* addr)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+ T v = *addr;
+ __sync_synchronize();
+# else
+ T v;
+ __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
+# endif
+ return v;
+}
+
+template<typename T>
+inline void
+js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+ *addr = val;
+ __sync_synchronize();
+# else
+ __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_val_compare_and_swap(addr, oldval, newval);
+# else
+ __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return oldval;
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_add(addr, val);
+# else
+ return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_sub(addr, val);
+# else
+ return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_and(addr, val);
+# else
+ return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_or(addr, val);
+# else
+ return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_xor(addr, val);
+# else
+ return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::loadSafeWhenRacy(T* addr)
+{
+ return *addr; // FIXME (1208663): not yet safe
+}
+
+template<typename T>
+inline void
+js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val)
+{
+ *addr = val; // FIXME (1208663): not yet safe
+}
+
+inline void
+js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src, size_t nbytes)
+{
+ ::memcpy(dest, src, nbytes); // FIXME (1208663): not yet safe
+}
+
+inline void
+js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes)
+{
+ ::memmove(dest, src, nbytes); // FIXME (1208663): not yet safe
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ T v;
+ __sync_synchronize();
+ do {
+ v = *addr;
+ } while (__sync_val_compare_and_swap(addr, v, val) != v);
+ return v;
+# else
+ T v;
+ __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
+ return v;
+# endif
+}
+
+template<size_t nbytes>
+inline void
+js::jit::RegionLock::acquire(void* addr)
+{
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ while (!__sync_bool_compare_and_swap(&spinlock, 0, 1))
+ ;
+# else
+ uint32_t zero = 0;
+ uint32_t one = 1;
+ while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
+ zero = 0;
+ continue;
+ }
+# endif
+}
+
+template<size_t nbytes>
+inline void
+js::jit::RegionLock::release(void* addr)
+{
+ MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_sub_and_fetch(&spinlock, 1);
+# else
+ uint32_t zero = 0;
+ __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
+# endif
+}
+
+# undef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+
+#elif defined(ENABLE_SHARED_ARRAY_BUFFER)
+
+# error "Either disable JS shared memory, use GCC or Clang, or add code here"
+
+#endif
+
+#endif // jit_ppc_AtomicOperations_ppc_h
diff --git a/js/src/jit/none/AtomicOperations-sparc.h b/js/src/jit/none/AtomicOperations-sparc.h
new file mode 100644
index 000000000..706ada862
--- /dev/null
+++ b/js/src/jit/none/AtomicOperations-sparc.h
@@ -0,0 +1,251 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* For documentation, see jit/AtomicOperations.h */
+
+#ifndef jit_sparc_AtomicOperations_sparc_h
+#define jit_sparc_AtomicOperations_sparc_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+#if defined(__clang__) || defined(__GNUC__)
+
+// The default implementation tactic for gcc/clang is to use the newer
+// __atomic intrinsics added for use in C++11 <atomic>. Where that
+// isn't available, we use GCC's older __sync functions instead.
+//
+// ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS is kept as a backward
+// compatible option for older compilers: enable this to use GCC's old
+// __sync functions instead of the newer __atomic functions. This
+// will be required for GCC 4.6.x and earlier, and probably for Clang
+// 3.1, should we need to use those versions.
+
+//#define ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+
+inline bool
+js::jit::AtomicOperations::isLockfree8()
+{
+# ifndef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
+# if defined(__LP64__)
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0));
+# endif
+ return true;
+# else
+ return false;
+# endif
+}
+
+inline void
+js::jit::AtomicOperations::fenceSeqCst()
+{
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+# else
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::loadSeqCst(T* addr)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+ T v = *addr;
+ __sync_synchronize();
+# else
+ T v;
+ __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
+# endif
+ return v;
+}
+
+template<typename T>
+inline void
+js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+ *addr = val;
+ __sync_synchronize();
+# else
+ __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_val_compare_and_swap(addr, oldval, newval);
+# else
+ __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return oldval;
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val)
+{
+#if !defined( __LP64__)
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+#endif
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_add(addr, val);
+# else
+ return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val)
+{
+#if !defined( __LP64__)
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+#endif
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_sub(addr, val);
+# else
+ return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val)
+{
+#if !defined( __LP64__)
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+#endif
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_and(addr, val);
+# else
+ return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val)
+{
+#if !defined( __LP64__)
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+#endif
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_or(addr, val);
+# else
+ return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val)
+{
+#if !defined( __LP64__)
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+#endif
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_xor(addr, val);
+# else
+ return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::loadSafeWhenRacy(T* addr)
+{
+ return *addr; // FIXME (1208663): not yet safe
+}
+
+template<typename T>
+inline void
+js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val)
+{
+ *addr = val; // FIXME (1208663): not yet safe
+}
+
+inline void
+js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src, size_t nbytes)
+{
+ ::memcpy(dest, src, nbytes); // FIXME (1208663): not yet safe
+}
+
+inline void
+js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes)
+{
+ ::memmove(dest, src, nbytes); // FIXME (1208663): not yet safe
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ T v;
+ __sync_synchronize();
+ do {
+ v = *addr;
+ } while (__sync_val_compare_and_swap(addr, v, val) != v);
+ return v;
+# else
+ T v;
+ __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
+ return v;
+# endif
+}
+
+template<size_t nbytes>
+inline void
+js::jit::RegionLock::acquire(void* addr)
+{
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ while (!__sync_bool_compare_and_swap(&spinlock, 0, 1))
+ ;
+# else
+ uint32_t zero = 0;
+ uint32_t one = 1;
+ while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
+ zero = 0;
+ continue;
+ }
+# endif
+}
+
+template<size_t nbytes>
+inline void
+js::jit::RegionLock::release(void* addr)
+{
+ MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_sub_and_fetch(&spinlock, 1);
+# else
+ uint32_t zero = 0;
+ __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
+# endif
+}
+
+# undef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+
+#elif defined(ENABLE_SHARED_ARRAY_BUFFER)
+
+# error "Either disable JS shared memory, use GCC or Clang, or add code here"
+
+#endif
+
+#endif // jit_sparc_AtomicOperations_sparc_h
diff --git a/js/src/jit/none/BaselineCompiler-none.h b/js/src/jit/none/BaselineCompiler-none.h
new file mode 100644
index 000000000..ca0791eb3
--- /dev/null
+++ b/js/src/jit/none/BaselineCompiler-none.h
@@ -0,0 +1,30 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_BaselineCompiler_none_h
+#define jit_none_BaselineCompiler_none_h
+
+#include "jit/shared/BaselineCompiler-shared.h"
+
+namespace js {
+namespace jit {
+
+class BaselineCompilerNone : public BaselineCompilerShared
+{
+ protected:
+ BaselineCompilerNone(JSContext* cx, TempAllocator& alloc, JSScript* script)
+ : BaselineCompilerShared(cx, alloc, script)
+ {
+ MOZ_CRASH();
+ }
+};
+
+typedef BaselineCompilerNone BaselineCompilerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_none_BaselineCompiler_none_h */
diff --git a/js/src/jit/none/CodeGenerator-none.h b/js/src/jit/none/CodeGenerator-none.h
new file mode 100644
index 000000000..ad62ea452
--- /dev/null
+++ b/js/src/jit/none/CodeGenerator-none.h
@@ -0,0 +1,62 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_CodeGenerator_none_h
+#define jit_none_CodeGenerator_none_h
+
+#include "jit/shared/CodeGenerator-shared.h"
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorNone : public CodeGeneratorShared
+{
+ public:
+ CodeGeneratorNone(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorShared(gen, graph, masm)
+ {
+ MOZ_CRASH();
+ }
+
+ MoveOperand toMoveOperand(LAllocation) const { MOZ_CRASH(); }
+ template <typename T1, typename T2>
+ void bailoutCmp32(Assembler::Condition, T1, T2, LSnapshot*) { MOZ_CRASH(); }
+ template<typename T>
+ void bailoutTest32(Assembler::Condition, Register, T, LSnapshot*) { MOZ_CRASH(); }
+ template <typename T1, typename T2>
+ void bailoutCmpPtr(Assembler::Condition, T1, T2, LSnapshot*) { MOZ_CRASH(); }
+ void bailoutTestPtr(Assembler::Condition, Register, Register, LSnapshot*) { MOZ_CRASH(); }
+ void bailoutIfFalseBool(Register, LSnapshot*) { MOZ_CRASH(); }
+ void bailoutFrom(Label*, LSnapshot*) { MOZ_CRASH(); }
+ void bailout(LSnapshot*) { MOZ_CRASH(); }
+ void bailoutIf(Assembler::Condition, LSnapshot*) { MOZ_CRASH(); }
+ bool generateOutOfLineCode() { MOZ_CRASH(); }
+ void testNullEmitBranch(Assembler::Condition, ValueOperand, MBasicBlock*, MBasicBlock*) {
+ MOZ_CRASH();
+ }
+ void testUndefinedEmitBranch(Assembler::Condition, ValueOperand, MBasicBlock*, MBasicBlock*) {
+ MOZ_CRASH();
+ }
+ void testObjectEmitBranch(Assembler::Condition, ValueOperand, MBasicBlock*, MBasicBlock*) {
+ MOZ_CRASH();
+ }
+ void testZeroEmitBranch(Assembler::Condition, Register, MBasicBlock*, MBasicBlock*) {
+ MOZ_CRASH();
+ }
+ void emitTableSwitchDispatch(MTableSwitch*, Register, Register) { MOZ_CRASH(); }
+ ValueOperand ToValue(LInstruction*, size_t) { MOZ_CRASH(); }
+ ValueOperand ToOutValue(LInstruction*) { MOZ_CRASH(); }
+ ValueOperand ToTempValue(LInstruction*, size_t) { MOZ_CRASH(); }
+ void generateInvalidateEpilogue() { MOZ_CRASH(); }
+ void setReturnDoubleRegs(LiveRegisterSet* regs) { MOZ_CRASH(); }
+};
+
+typedef CodeGeneratorNone CodeGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_none_CodeGenerator_none_h */
diff --git a/js/src/jit/none/LIR-none.h b/js/src/jit/none/LIR-none.h
new file mode 100644
index 000000000..44fa9d871
--- /dev/null
+++ b/js/src/jit/none/LIR-none.h
@@ -0,0 +1,111 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_LIR_none_h
+#define jit_none_LIR_none_h
+
+namespace js {
+namespace jit {
+
+class LUnboxFloatingPoint : public LInstruction
+{
+ public:
+ static const size_t Input = 0;
+
+ MUnbox* mir() const { MOZ_CRASH(); }
+
+ const LDefinition* output() const { MOZ_CRASH(); }
+ MIRType type() const { MOZ_CRASH(); }
+};
+
+class LTableSwitch : public LInstruction
+{
+ public:
+ MTableSwitch* mir() { MOZ_CRASH(); }
+
+ const LAllocation* index() { MOZ_CRASH(); }
+ const LDefinition* tempInt() { MOZ_CRASH(); }
+ const LDefinition* tempPointer() { MOZ_CRASH(); }
+};
+
+class LTableSwitchV : public LInstruction
+{
+ public:
+ MTableSwitch* mir() { MOZ_CRASH(); }
+
+ const LDefinition* tempInt() { MOZ_CRASH(); }
+ const LDefinition* tempFloat() { MOZ_CRASH(); }
+ const LDefinition* tempPointer() { MOZ_CRASH(); }
+
+ static const size_t InputValue = 0;
+};
+
+class LWasmUint32ToFloat32 : public LInstruction
+{
+ public:
+ LWasmUint32ToFloat32(const LAllocation& ) { MOZ_CRASH(); }
+};
+
+class LUnbox : public LInstructionHelper<1, 2, 0>
+{
+ public:
+
+ MUnbox* mir() const { MOZ_CRASH(); }
+ const LAllocation* payload() { MOZ_CRASH(); }
+ const LAllocation* type() { MOZ_CRASH(); }
+ const char* extraName() const { MOZ_CRASH(); }
+};
+class LDivI : public LBinaryMath<1>
+{
+ public:
+ LDivI(const LAllocation& , const LAllocation& ,
+ const LDefinition& ) {
+ MOZ_CRASH();
+ }
+ MDiv* mir() const { MOZ_CRASH(); }
+};
+class LDivPowTwoI : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LDivPowTwoI(const LAllocation& , int32_t ) { MOZ_CRASH(); }
+ const LAllocation* numerator() { MOZ_CRASH(); }
+ int32_t shift() { MOZ_CRASH(); }
+ MDiv* mir() const { MOZ_CRASH(); }
+};
+class LModI : public LBinaryMath<1>
+{
+ public:
+ LModI(const LAllocation&, const LAllocation&,
+ const LDefinition&)
+ {
+ MOZ_CRASH();
+ }
+
+ const LDefinition* callTemp() { MOZ_CRASH(); }
+ MMod* mir() const { MOZ_CRASH(); }
+};
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LWasmUint32ToDouble(const LAllocation&) { MOZ_CRASH(); }
+};
+class LModPowTwoI : public LInstructionHelper<1, 1, 0>
+{
+
+ public:
+ int32_t shift() { MOZ_CRASH(); }
+ LModPowTwoI(const LAllocation& lhs, int32_t shift) { MOZ_CRASH(); }
+ MMod* mir() const { MOZ_CRASH(); }
+};
+
+class LGuardShape : public LInstruction {};
+class LGuardObjectGroup : public LInstruction {};
+class LMulI : public LInstruction {};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_none_LIR_none_h */
diff --git a/js/src/jit/none/LOpcodes-none.h b/js/src/jit/none/LOpcodes-none.h
new file mode 100644
index 000000000..e897f40b3
--- /dev/null
+++ b/js/src/jit/none/LOpcodes-none.h
@@ -0,0 +1,14 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_LOpcodes_none_h__
+#define jit_none_LOpcodes_none_h__
+
+#include "jit/shared/LOpcodes-shared.h"
+
+#define LIR_CPU_OPCODE_LIST(_)
+
+#endif // jit_none_LOpcodes_none_h__
diff --git a/js/src/jit/none/Lowering-none.h b/js/src/jit/none/Lowering-none.h
new file mode 100644
index 000000000..35c1b8231
--- /dev/null
+++ b/js/src/jit/none/Lowering-none.h
@@ -0,0 +1,118 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_Lowering_none_h
+#define jit_none_Lowering_none_h
+
+#include "jit/shared/Lowering-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorNone : public LIRGeneratorShared
+{
+ public:
+ LIRGeneratorNone(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorShared(gen, graph, lirGraph)
+ {
+ MOZ_CRASH();
+ }
+
+ LBoxAllocation useBoxFixed(MDefinition*, Register, Register, bool useAtStart = false) { MOZ_CRASH(); }
+
+ LAllocation useByteOpRegister(MDefinition*) { MOZ_CRASH(); }
+ LAllocation useByteOpRegisterAtStart(MDefinition*) { MOZ_CRASH(); }
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition*) { MOZ_CRASH(); }
+ LDefinition tempByteOpRegister() { MOZ_CRASH(); }
+ LDefinition tempToUnbox() { MOZ_CRASH(); }
+ bool needTempForPostBarrier() { MOZ_CRASH(); }
+ void lowerUntypedPhiInput(MPhi*, uint32_t, LBlock*, size_t) { MOZ_CRASH(); }
+ void lowerInt64PhiInput(MPhi*, uint32_t, LBlock*, size_t) { MOZ_CRASH(); }
+ void defineUntypedPhi(MPhi*, size_t) { MOZ_CRASH(); }
+ void defineInt64Phi(MPhi*, size_t) { MOZ_CRASH(); }
+ void lowerForShift(LInstructionHelper<1, 2, 0>*, MDefinition*, MDefinition*, MDefinition*) {
+ MOZ_CRASH();
+ }
+ void lowerUrshD(MUrsh*) { MOZ_CRASH(); }
+ template <typename T>
+ void lowerForALU(T, MDefinition*, MDefinition*, MDefinition* v = nullptr) { MOZ_CRASH(); }
+ template <typename T>
+ void lowerForFPU(T, MDefinition*, MDefinition*, MDefinition* v = nullptr) { MOZ_CRASH(); }
+ template <typename T>
+ void lowerForALUInt64(T, MDefinition*, MDefinition*, MDefinition* v = nullptr) { MOZ_CRASH(); }
+ void lowerForMulInt64(LMulI64*, MMul*, MDefinition*, MDefinition* v = nullptr) { MOZ_CRASH(); }
+ template <typename T>
+ void lowerForShiftInt64(T, MDefinition*, MDefinition*, MDefinition* v = nullptr) { MOZ_CRASH(); }
+ void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir,
+ MDefinition* lhs, MDefinition* rhs) {
+ MOZ_CRASH();
+ }
+ void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir,
+ MDefinition* lhs, MDefinition* rhs) {
+ MOZ_CRASH();
+ }
+ void lowerForBitAndAndBranch(LBitAndAndBranch*, MInstruction*,
+ MDefinition*, MDefinition*) {
+ MOZ_CRASH();
+ }
+
+ void lowerConstantDouble(double, MInstruction*) { MOZ_CRASH(); }
+ void lowerConstantFloat32(float, MInstruction*) { MOZ_CRASH(); }
+ void lowerTruncateDToInt32(MTruncateToInt32*) { MOZ_CRASH(); }
+ void lowerTruncateFToInt32(MTruncateToInt32*) { MOZ_CRASH(); }
+ void lowerDivI(MDiv*) { MOZ_CRASH(); }
+ void lowerModI(MMod*) { MOZ_CRASH(); }
+ void lowerDivI64(MDiv*) { MOZ_CRASH(); }
+ void lowerModI64(MMod*) { MOZ_CRASH(); }
+ void lowerMulI(MMul*, MDefinition*, MDefinition*) { MOZ_CRASH(); }
+ void lowerUDiv(MDiv*) { MOZ_CRASH(); }
+ void lowerUMod(MMod*) { MOZ_CRASH(); }
+ void visitBox(MBox* box) { MOZ_CRASH(); }
+ void visitUnbox(MUnbox* unbox) { MOZ_CRASH(); }
+ void visitReturn(MReturn* ret) { MOZ_CRASH(); }
+ void visitPowHalf(MPowHalf*) { MOZ_CRASH(); }
+ void visitAsmJSNeg(MAsmJSNeg*) { MOZ_CRASH(); }
+ void visitGuardShape(MGuardShape* ins) { MOZ_CRASH(); }
+ void visitGuardObjectGroup(MGuardObjectGroup* ins) { MOZ_CRASH(); }
+ void visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) { MOZ_CRASH(); }
+ void visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) { MOZ_CRASH(); }
+ void visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) { MOZ_CRASH(); }
+ void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) { MOZ_CRASH(); }
+ void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins) { MOZ_CRASH(); }
+ void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins) { MOZ_CRASH(); }
+ void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins) { MOZ_CRASH(); }
+ void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins) { MOZ_CRASH(); }
+ void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins) { MOZ_CRASH(); }
+ void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins) { MOZ_CRASH(); }
+ void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins) { MOZ_CRASH(); }
+ void visitWasmSelect(MWasmSelect*) { MOZ_CRASH(); }
+ void visitWasmBoundsCheck(MWasmBoundsCheck* ins) { MOZ_CRASH(); }
+ void visitWasmLoad(MWasmLoad* ins) { MOZ_CRASH(); }
+ void visitWasmStore(MWasmStore* ins) { MOZ_CRASH(); }
+
+ LTableSwitch* newLTableSwitch(LAllocation, LDefinition, MTableSwitch*) { MOZ_CRASH(); }
+ LTableSwitchV* newLTableSwitchV(MTableSwitch*) { MOZ_CRASH(); }
+ void visitSimdSelect(MSimdSelect* ins) { MOZ_CRASH(); }
+ void visitSimdSplat(MSimdSplat* ins) { MOZ_CRASH(); }
+ void visitSimdSwizzle(MSimdSwizzle* ins) { MOZ_CRASH(); }
+ void visitSimdShuffle(MSimdShuffle* ins) { MOZ_CRASH(); }
+ void visitSimdValueX4(MSimdValueX4* lir) { MOZ_CRASH(); }
+ void visitSubstr(MSubstr*) { MOZ_CRASH(); }
+ void visitSimdBinaryArith(js::jit::MSimdBinaryArith*) { MOZ_CRASH(); }
+ void visitSimdBinarySaturating(MSimdBinarySaturating* ins) { MOZ_CRASH(); }
+ void visitRandom(js::jit::MRandom*) { MOZ_CRASH(); }
+ void visitCopySign(js::jit::MCopySign*) { MOZ_CRASH(); }
+ void visitWasmTruncateToInt64(MWasmTruncateToInt64*) { MOZ_CRASH(); }
+ void visitInt64ToFloatingPoint(MInt64ToFloatingPoint*) { MOZ_CRASH(); }
+ void visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) { MOZ_CRASH(); }
+};
+
+typedef LIRGeneratorNone LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_none_Lowering_none_h */
diff --git a/js/src/jit/none/MacroAssembler-none.h b/js/src/jit/none/MacroAssembler-none.h
new file mode 100644
index 000000000..f27de5153
--- /dev/null
+++ b/js/src/jit/none/MacroAssembler-none.h
@@ -0,0 +1,464 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_MacroAssembler_none_h
+#define jit_none_MacroAssembler_none_h
+
+#include "jit/JitCompartment.h"
+#include "jit/MoveResolver.h"
+#include "jit/shared/Assembler-shared.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register StackPointer = { Registers::invalid_reg };
+static constexpr Register FramePointer = { Registers::invalid_reg };
+static constexpr Register ReturnReg = { Registers::invalid_reg };
+static constexpr FloatRegister ReturnFloat32Reg = { FloatRegisters::invalid_reg };
+static constexpr FloatRegister ReturnDoubleReg = { FloatRegisters::invalid_reg };
+static constexpr FloatRegister ReturnSimd128Reg = { FloatRegisters::invalid_reg };
+static constexpr FloatRegister ScratchFloat32Reg = { FloatRegisters::invalid_reg };
+static constexpr FloatRegister ScratchDoubleReg = { FloatRegisters::invalid_reg };
+static constexpr FloatRegister ScratchSimd128Reg = { FloatRegisters::invalid_reg };
+static constexpr FloatRegister InvalidFloatReg = { FloatRegisters::invalid_reg };
+
+static constexpr Register OsrFrameReg = { Registers::invalid_reg };
+static constexpr Register ArgumentsRectifierReg = { Registers::invalid_reg };
+static constexpr Register PreBarrierReg = { Registers::invalid_reg };
+static constexpr Register CallTempReg0 = { Registers::invalid_reg };
+static constexpr Register CallTempReg1 = { Registers::invalid_reg };
+static constexpr Register CallTempReg2 = { Registers::invalid_reg };
+static constexpr Register CallTempReg3 = { Registers::invalid_reg };
+static constexpr Register CallTempReg4 = { Registers::invalid_reg };
+static constexpr Register CallTempReg5 = { Registers::invalid_reg };
+static constexpr Register InvalidReg = { Registers::invalid_reg };
+
+static constexpr Register IntArgReg0 = { Registers::invalid_reg };
+static constexpr Register IntArgReg1 = { Registers::invalid_reg };
+static constexpr Register IntArgReg2 = { Registers::invalid_reg };
+static constexpr Register IntArgReg3 = { Registers::invalid_reg };
+static constexpr Register GlobalReg = { Registers::invalid_reg };
+static constexpr Register HeapReg = { Registers::invalid_reg };
+
+static constexpr Register WasmIonExitRegCallee = { Registers::invalid_reg };
+static constexpr Register WasmIonExitRegE0 = { Registers::invalid_reg };
+static constexpr Register WasmIonExitRegE1 = { Registers::invalid_reg };
+
+static constexpr Register WasmIonExitRegReturnData = { Registers::invalid_reg };
+static constexpr Register WasmIonExitRegReturnType = { Registers::invalid_reg };
+static constexpr Register WasmIonExitRegD0 = { Registers::invalid_reg };
+static constexpr Register WasmIonExitRegD1 = { Registers::invalid_reg };
+static constexpr Register WasmIonExitRegD2 = { Registers::invalid_reg };
+
+static constexpr Register RegExpTesterRegExpReg = { Registers::invalid_reg };
+static constexpr Register RegExpTesterStringReg = { Registers::invalid_reg };
+static constexpr Register RegExpTesterLastIndexReg = { Registers::invalid_reg };
+static constexpr Register RegExpTesterStickyReg = { Registers::invalid_reg };
+
+static constexpr Register RegExpMatcherRegExpReg = { Registers::invalid_reg };
+static constexpr Register RegExpMatcherStringReg = { Registers::invalid_reg };
+static constexpr Register RegExpMatcherLastIndexReg = { Registers::invalid_reg };
+static constexpr Register RegExpMatcherStickyReg = { Registers::invalid_reg };
+
+static constexpr Register JSReturnReg_Type = { Registers::invalid_reg };
+static constexpr Register JSReturnReg_Data = { Registers::invalid_reg };
+static constexpr Register JSReturnReg = { Registers::invalid_reg };
+
+#if defined(JS_NUNBOX32)
+static constexpr ValueOperand JSReturnOperand(InvalidReg, InvalidReg);
+static constexpr Register64 ReturnReg64(InvalidReg, InvalidReg);
+#elif defined(JS_PUNBOX64)
+static constexpr ValueOperand JSReturnOperand(InvalidReg);
+static constexpr Register64 ReturnReg64(InvalidReg);
+#else
+#error "Bad architecture"
+#endif
+
+static constexpr Register ABINonArgReg0 = { Registers::invalid_reg };
+static constexpr Register ABINonArgReg1 = { Registers::invalid_reg };
+static constexpr Register ABINonArgReturnReg0 = { Registers::invalid_reg };
+static constexpr Register ABINonArgReturnReg1 = { Registers::invalid_reg };
+
+static constexpr Register WasmTableCallScratchReg = { Registers::invalid_reg };
+static constexpr Register WasmTableCallSigReg = { Registers::invalid_reg };
+static constexpr Register WasmTableCallIndexReg = { Registers::invalid_reg };
+static constexpr Register WasmTlsReg = { Registers::invalid_reg };
+
+static constexpr uint32_t ABIStackAlignment = 4;
+static constexpr uint32_t CodeAlignment = 4;
+static constexpr uint32_t JitStackAlignment = 8;
+static constexpr uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value);
+
+static const Scale ScalePointer = TimesOne;
+
+class Assembler : public AssemblerShared
+{
+ public:
+ enum Condition {
+ Equal,
+ NotEqual,
+ Above,
+ AboveOrEqual,
+ Below,
+ BelowOrEqual,
+ GreaterThan,
+ GreaterThanOrEqual,
+ LessThan,
+ LessThanOrEqual,
+ Overflow,
+ CarrySet,
+ CarryClear,
+ Signed,
+ NotSigned,
+ Zero,
+ NonZero,
+ Always,
+ };
+
+ enum DoubleCondition {
+ DoubleOrdered,
+ DoubleEqual,
+ DoubleNotEqual,
+ DoubleGreaterThan,
+ DoubleGreaterThanOrEqual,
+ DoubleLessThan,
+ DoubleLessThanOrEqual,
+ DoubleUnordered,
+ DoubleEqualOrUnordered,
+ DoubleNotEqualOrUnordered,
+ DoubleGreaterThanOrUnordered,
+ DoubleGreaterThanOrEqualOrUnordered,
+ DoubleLessThanOrUnordered,
+ DoubleLessThanOrEqualOrUnordered
+ };
+
+ static Condition InvertCondition(Condition) { MOZ_CRASH(); }
+
+ template <typename T, typename S>
+ static void PatchDataWithValueCheck(CodeLocationLabel, T, S) { MOZ_CRASH(); }
+ static void PatchWrite_Imm32(CodeLocationLabel, Imm32) { MOZ_CRASH(); }
+
+ static void PatchWrite_NearCall(CodeLocationLabel, CodeLocationLabel) { MOZ_CRASH(); }
+ static uint32_t PatchWrite_NearCallSize() { MOZ_CRASH(); }
+ static void PatchInstructionImmediate(uint8_t*, PatchedImmPtr) { MOZ_CRASH(); }
+
+ static void ToggleToJmp(CodeLocationLabel) { MOZ_CRASH(); }
+ static void ToggleToCmp(CodeLocationLabel) { MOZ_CRASH(); }
+ static void ToggleCall(CodeLocationLabel, bool) { MOZ_CRASH(); }
+
+ static uintptr_t GetPointer(uint8_t*) { MOZ_CRASH(); }
+
+ void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
+ const Disassembler::HeapAccess& heapAccess)
+ {
+ MOZ_CRASH();
+ }
+};
+
+class Operand
+{
+ public:
+ Operand (const Address&) { MOZ_CRASH();}
+ Operand (const Register) { MOZ_CRASH();}
+ Operand (const FloatRegister) { MOZ_CRASH();}
+ Operand (Register, Imm32 ) { MOZ_CRASH(); }
+ Operand (Register, int32_t ) { MOZ_CRASH(); }
+};
+
+class MacroAssemblerNone : public Assembler
+{
+ public:
+ MacroAssemblerNone() { MOZ_CRASH(); }
+
+ MoveResolver moveResolver_;
+
+ size_t size() const { MOZ_CRASH(); }
+ size_t bytesNeeded() const { MOZ_CRASH(); }
+ size_t jumpRelocationTableBytes() const { MOZ_CRASH(); }
+ size_t dataRelocationTableBytes() const { MOZ_CRASH(); }
+ size_t preBarrierTableBytes() const { MOZ_CRASH(); }
+
+ size_t numCodeLabels() const { MOZ_CRASH(); }
+ CodeLabel codeLabel(size_t) { MOZ_CRASH(); }
+
+ bool asmMergeWith(const MacroAssemblerNone&) { MOZ_CRASH(); }
+
+ void trace(JSTracer*) { MOZ_CRASH(); }
+ static void TraceJumpRelocations(JSTracer*, JitCode*, CompactBufferReader&) { MOZ_CRASH(); }
+ static void TraceDataRelocations(JSTracer*, JitCode*, CompactBufferReader&) { MOZ_CRASH(); }
+
+ static bool SupportsFloatingPoint() { return false; }
+ static bool SupportsSimd() { return false; }
+ static bool SupportsUnalignedAccesses() { return false; }
+
+ void executableCopy(void*) { MOZ_CRASH(); }
+ void copyJumpRelocationTable(uint8_t*) { MOZ_CRASH(); }
+ void copyDataRelocationTable(uint8_t*) { MOZ_CRASH(); }
+ void copyPreBarrierTable(uint8_t*) { MOZ_CRASH(); }
+ void processCodeLabels(uint8_t*) { MOZ_CRASH(); }
+
+ void flushBuffer() { MOZ_CRASH(); }
+
+ template <typename T> void bind(T) { MOZ_CRASH(); }
+ void bindLater(Label*, wasm::TrapDesc) { MOZ_CRASH(); }
+ template <typename T> void j(Condition, T) { MOZ_CRASH(); }
+ template <typename T> void jump(T) { MOZ_CRASH(); }
+ void haltingAlign(size_t) { MOZ_CRASH(); }
+ void nopAlign(size_t) { MOZ_CRASH(); }
+ void checkStackAlignment() { MOZ_CRASH(); }
+ uint32_t currentOffset() { MOZ_CRASH(); }
+ uint32_t labelToPatchOffset(CodeOffset) { MOZ_CRASH(); }
+ CodeOffset labelForPatch() { MOZ_CRASH(); }
+
+ void nop() { MOZ_CRASH(); }
+ void breakpoint() { MOZ_CRASH(); }
+ void abiret() { MOZ_CRASH(); }
+ void ret() { MOZ_CRASH(); }
+
+ CodeOffset toggledJump(Label*) { MOZ_CRASH(); }
+ CodeOffset toggledCall(JitCode*, bool) { MOZ_CRASH(); }
+ static size_t ToggledCallSize(uint8_t*) { MOZ_CRASH(); }
+
+ void writePrebarrierOffset(CodeOffset) { MOZ_CRASH(); }
+
+ void finish() { MOZ_CRASH(); }
+
+ template <typename T, typename S> void moveValue(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S, typename U> void moveValue(T, S, U) { MOZ_CRASH(); }
+ template <typename T, typename S> void storeValue(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S, typename U> void storeValue(T, S, U) { MOZ_CRASH(); }
+ template <typename T, typename S> void loadValue(T, S) { MOZ_CRASH(); }
+ template <typename T> void pushValue(T) { MOZ_CRASH(); }
+ template <typename T, typename S> void pushValue(T, S) { MOZ_CRASH(); }
+ void popValue(ValueOperand) { MOZ_CRASH(); }
+ void tagValue(JSValueType, Register, ValueOperand) { MOZ_CRASH(); }
+ void retn(Imm32 n) { MOZ_CRASH(); }
+ template <typename T> void push(T) { MOZ_CRASH(); }
+ template <typename T> void Push(T) { MOZ_CRASH(); }
+ template <typename T> void pop(T) { MOZ_CRASH(); }
+ template <typename T> void Pop(T) { MOZ_CRASH(); }
+ template <typename T> CodeOffset pushWithPatch(T) { MOZ_CRASH(); }
+
+ CodeOffsetJump jumpWithPatch(RepatchLabel*, Label* doc = nullptr) { MOZ_CRASH(); }
+ CodeOffsetJump jumpWithPatch(RepatchLabel*, Condition, Label* doc = nullptr) { MOZ_CRASH(); }
+ CodeOffsetJump backedgeJump(RepatchLabel* label, Label* doc = nullptr) { MOZ_CRASH(); }
+
+ void testNullSet(Condition, ValueOperand, Register) { MOZ_CRASH(); }
+ void testObjectSet(Condition, ValueOperand, Register) { MOZ_CRASH(); }
+ void testUndefinedSet(Condition, ValueOperand, Register) { MOZ_CRASH(); }
+
+ template <typename T, typename S> void cmpPtrSet(Condition, T, S, Register) { MOZ_CRASH(); }
+ template <typename T, typename S> void cmp32Set(Condition, T, S, Register) { MOZ_CRASH(); }
+
+ template <typename T, typename S> void mov(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void movq(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void movePtr(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void move32(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void moveFloat32(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void moveDouble(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void move64(T, S) { MOZ_CRASH(); }
+ template <typename T> CodeOffset movWithPatch(T, Register) { MOZ_CRASH(); }
+
+ template <typename T> void loadInt32x1(T, FloatRegister dest) { MOZ_CRASH(); }
+ template <typename T> void loadInt32x2(T, FloatRegister dest) { MOZ_CRASH(); }
+ template <typename T> void loadInt32x3(T, FloatRegister dest) { MOZ_CRASH(); }
+ template <typename T> void loadFloat32x3(T, FloatRegister dest) { MOZ_CRASH(); }
+
+ template <typename T> void loadPtr(T, Register) { MOZ_CRASH(); }
+ template <typename T> void load32(T, Register) { MOZ_CRASH(); }
+ template <typename T> void loadFloat32(T, FloatRegister) { MOZ_CRASH(); }
+ template <typename T> void loadDouble(T, FloatRegister) { MOZ_CRASH(); }
+ template <typename T> void loadAlignedSimd128Int(T, FloatRegister) { MOZ_CRASH(); }
+ template <typename T> void loadUnalignedSimd128Int(T, FloatRegister) { MOZ_CRASH(); }
+ template <typename T> void loadAlignedSimd128Float(T, FloatRegister) { MOZ_CRASH(); }
+ template <typename T> void loadUnalignedSimd128Float(T, FloatRegister) { MOZ_CRASH(); }
+ template <typename T> void loadPrivate(T, Register) { MOZ_CRASH(); }
+ template <typename T> void load8SignExtend(T, Register) { MOZ_CRASH(); }
+ template <typename T> void load8ZeroExtend(T, Register) { MOZ_CRASH(); }
+ template <typename T> void load16SignExtend(T, Register) { MOZ_CRASH(); }
+ template <typename T> void load16ZeroExtend(T, Register) { MOZ_CRASH(); }
+ template <typename T> void load64(T, Register64 ) { MOZ_CRASH(); }
+
+ template <typename T, typename S> void storePtr(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void store32(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void store32_NoSecondScratch(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void storeFloat32(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void storeDouble(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void storeAlignedSimd128Int(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void storeUnalignedSimd128Int(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void storeAlignedSimd128Float(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void storeUnalignedSimd128Float(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void store8(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void store16(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void storeInt32x1(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void storeInt32x2(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void storeInt32x3(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void storeFloat32x3(T, S) { MOZ_CRASH(); }
+ template <typename T, typename S> void store64(T, S) { MOZ_CRASH(); }
+
+ template <typename T> void computeEffectiveAddress(T, Register) { MOZ_CRASH(); }
+
+ template <typename T> void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
+ template <typename T> void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
+ template <typename T> void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
+ template <typename T> void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
+ template <typename T> void compareExchange32(const T& mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
+ template<typename T> void atomicExchange8SignExtend(const T& mem, Register value, Register output) { MOZ_CRASH(); }
+ template<typename T> void atomicExchange8ZeroExtend(const T& mem, Register value, Register output) { MOZ_CRASH(); }
+ template<typename T> void atomicExchange16SignExtend(const T& mem, Register value, Register output) { MOZ_CRASH(); }
+ template<typename T> void atomicExchange16ZeroExtend(const T& mem, Register value, Register output) { MOZ_CRASH(); }
+ template<typename T> void atomicExchange32(const T& mem, Register value, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchAdd8SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchAdd8ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchAdd16SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchAdd16ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchAdd32(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicAdd8(const T& value, const S& mem) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicAdd16(const T& value, const S& mem) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicAdd32(const T& value, const S& mem) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchSub8SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchSub8ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchSub16SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchSub16ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchSub32(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicSub8(const T& value, const S& mem) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicSub16(const T& value, const S& mem) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicSub32(const T& value, const S& mem) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchAnd8SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchAnd8ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchAnd16SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchAnd16ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchAnd32(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicAnd8(const T& value, const S& mem) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicAnd16(const T& value, const S& mem) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicAnd32(const T& value, const S& mem) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchOr8SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchOr8ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchOr16SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchOr16ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchOr32(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicOr8(const T& value, const S& mem) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicOr16(const T& value, const S& mem) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicOr32(const T& value, const S& mem) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchXor8SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchXor8ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchXor16SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchXor16ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicFetchXor32(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicXor8(const T& value, const S& mem) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicXor16(const T& value, const S& mem) { MOZ_CRASH(); }
+ template <typename T, typename S> void atomicXor32(const T& value, const S& mem) { MOZ_CRASH(); }
+
+ Register splitTagForTest(ValueOperand) { MOZ_CRASH(); }
+
+ void boxDouble(FloatRegister, ValueOperand) { MOZ_CRASH(); }
+ void boxNonDouble(JSValueType, Register, ValueOperand) { MOZ_CRASH(); }
+ template <typename T> void unboxInt32(T, Register) { MOZ_CRASH(); }
+ template <typename T> void unboxBoolean(T, Register) { MOZ_CRASH(); }
+ template <typename T> void unboxString(T, Register) { MOZ_CRASH(); }
+ template <typename T> void unboxSymbol(T, Register) { MOZ_CRASH(); }
+ template <typename T> void unboxObject(T, Register) { MOZ_CRASH(); }
+ template <typename T> void unboxDouble(T, FloatRegister) { MOZ_CRASH(); }
+ void unboxValue(const ValueOperand&, AnyRegister) { MOZ_CRASH(); }
+ void unboxNonDouble(const ValueOperand&, Register ) { MOZ_CRASH();}
+ void notBoolean(ValueOperand) { MOZ_CRASH(); }
+ Register extractObject(Address, Register) { MOZ_CRASH(); }
+ Register extractObject(ValueOperand, Register) { MOZ_CRASH(); }
+ Register extractInt32(ValueOperand, Register) { MOZ_CRASH(); }
+ Register extractBoolean(ValueOperand, Register) { MOZ_CRASH(); }
+ template <typename T> Register extractTag(T, Register) { MOZ_CRASH(); }
+
+ void convertFloat32ToInt32(FloatRegister, Register, Label*, bool v = true) { MOZ_CRASH(); }
+ void convertDoubleToInt32(FloatRegister, Register, Label*, bool v = true) { MOZ_CRASH(); }
+ void convertBoolToInt32(Register, Register) { MOZ_CRASH(); }
+
+ void convertDoubleToFloat32(FloatRegister, FloatRegister) { MOZ_CRASH(); }
+ void convertInt32ToFloat32(Register, FloatRegister) { MOZ_CRASH(); }
+
+ template <typename T> void convertInt32ToDouble(T, FloatRegister) { MOZ_CRASH(); }
+ void convertFloat32ToDouble(FloatRegister, FloatRegister) { MOZ_CRASH(); }
+ static bool convertUInt64ToDoubleNeedsTemp() { MOZ_CRASH(); }
+ void convertUInt64ToDouble(Register64, FloatRegister, Register) { MOZ_CRASH(); }
+
+ void boolValueToDouble(ValueOperand, FloatRegister) { MOZ_CRASH(); }
+ void boolValueToFloat32(ValueOperand, FloatRegister) { MOZ_CRASH(); }
+ void int32ValueToDouble(ValueOperand, FloatRegister) { MOZ_CRASH(); }
+ void int32ValueToFloat32(ValueOperand, FloatRegister) { MOZ_CRASH(); }
+
+ void loadConstantDouble(double, FloatRegister) { MOZ_CRASH(); }
+ void loadConstantFloat32(float, FloatRegister) { MOZ_CRASH(); }
+ void loadConstantDouble(wasm::RawF64, FloatRegister) { MOZ_CRASH(); }
+ void loadConstantFloat32(wasm::RawF32, FloatRegister) { MOZ_CRASH(); }
+ Condition testInt32Truthy(bool, ValueOperand) { MOZ_CRASH(); }
+ Condition testStringTruthy(bool, ValueOperand) { MOZ_CRASH(); }
+
+ template <typename T> void loadUnboxedValue(T, MIRType, AnyRegister) { MOZ_CRASH(); }
+ template <typename T> void storeUnboxedValue(const ConstantOrRegister&, MIRType, T, MIRType) { MOZ_CRASH(); }
+ template <typename T> void storeUnboxedPayload(ValueOperand value, T, size_t) { MOZ_CRASH(); }
+
+ void convertUInt32ToDouble(Register, FloatRegister) { MOZ_CRASH(); }
+ void convertUInt32ToFloat32(Register, FloatRegister) { MOZ_CRASH(); }
+ void incrementInt32Value(Address) { MOZ_CRASH(); }
+ void ensureDouble(ValueOperand, FloatRegister, Label*) { MOZ_CRASH(); }
+ void handleFailureWithHandlerTail(void*) { MOZ_CRASH(); }
+
+ void buildFakeExitFrame(Register, uint32_t*) { MOZ_CRASH(); }
+ bool buildOOLFakeExitFrame(void*) { MOZ_CRASH(); }
+ void loadWasmGlobalPtr(uint32_t, Register) { MOZ_CRASH(); }
+ void loadWasmActivationFromTls(Register) { MOZ_CRASH(); }
+ void loadWasmActivationFromSymbolicAddress(Register) { MOZ_CRASH(); }
+ void loadWasmPinnedRegsFromTls() { MOZ_CRASH(); }
+
+ void setPrinter(Sprinter*) { MOZ_CRASH(); }
+ Operand ToPayload(Operand base) { MOZ_CRASH(); }
+
+ static const Register getStackPointer() { MOZ_CRASH(); }
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register , Register ) { MOZ_CRASH(); }
+ void profilerExitFrame() { MOZ_CRASH(); }
+
+#ifdef JS_NUNBOX32
+ Address ToPayload(Address) { MOZ_CRASH(); }
+ Address ToType(Address) { MOZ_CRASH(); }
+#endif
+
+ struct AutoPrepareForPatching {
+ explicit AutoPrepareForPatching(MacroAssemblerNone&) {
+ MOZ_CRASH();
+ }
+ };
+};
+
+typedef MacroAssemblerNone MacroAssemblerSpecific;
+
+class ABIArgGenerator
+{
+ public:
+ ABIArgGenerator() { MOZ_CRASH(); }
+ ABIArg next(MIRType) { MOZ_CRASH(); }
+ ABIArg& current() { MOZ_CRASH(); }
+ uint32_t stackBytesConsumedSoFar() const { MOZ_CRASH(); }
+};
+
+static inline void
+PatchJump(CodeLocationJump&, CodeLocationLabel, ReprotectCode reprotect = DontReprotect)
+{
+ MOZ_CRASH();
+}
+
+static inline bool GetTempRegForIntArg(uint32_t, uint32_t, Register*) { MOZ_CRASH(); }
+
+static inline
+void PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
+{
+ MOZ_CRASH();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_none_MacroAssembler_none_h */
diff --git a/js/src/jit/none/MoveEmitter-none.h b/js/src/jit/none/MoveEmitter-none.h
new file mode 100644
index 000000000..dcecb9f39
--- /dev/null
+++ b/js/src/jit/none/MoveEmitter-none.h
@@ -0,0 +1,30 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_MoveEmitter_none_h
+#define jit_none_MoveEmitter_none_h
+
+#include "jit/MacroAssembler.h"
+#include "jit/MoveResolver.h"
+
+namespace js {
+namespace jit {
+
+class MoveEmitterNone
+{
+ public:
+ MoveEmitterNone(MacroAssemblerNone&) { MOZ_CRASH(); }
+ void emit(const MoveResolver&) { MOZ_CRASH(); }
+ void finish() { MOZ_CRASH(); }
+ void setScratchRegister(Register) { MOZ_CRASH(); }
+};
+
+typedef MoveEmitterNone MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_none_MoveEmitter_none_h */
diff --git a/js/src/jit/none/SharedICHelpers-none.h b/js/src/jit/none/SharedICHelpers-none.h
new file mode 100644
index 000000000..4e6f84b72
--- /dev/null
+++ b/js/src/jit/none/SharedICHelpers-none.h
@@ -0,0 +1,42 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_SharedICHelpers_none_h
+#define jit_none_SharedICHelpers_none_h
+
+namespace js {
+namespace jit {
+
+static const size_t ICStackValueOffset = 0;
+static const uint32_t STUB_FRAME_SIZE = 0;
+static const uint32_t STUB_FRAME_SAVED_STUB_OFFSET = 0;
+
+inline void EmitRestoreTailCallReg(MacroAssembler&) { MOZ_CRASH(); }
+inline void EmitRepushTailCallReg(MacroAssembler&) { MOZ_CRASH(); }
+inline void EmitCallIC(CodeOffset*, MacroAssembler&) { MOZ_CRASH(); }
+inline void EmitEnterTypeMonitorIC(MacroAssembler&, size_t v = 0) { MOZ_CRASH(); }
+inline void EmitReturnFromIC(MacroAssembler&) { MOZ_CRASH(); }
+inline void EmitChangeICReturnAddress(MacroAssembler&, Register) { MOZ_CRASH(); }
+inline void EmitBaselineTailCallVM(JitCode*, MacroAssembler&, uint32_t) { MOZ_CRASH(); }
+inline void EmitIonTailCallVM(JitCode*, MacroAssembler&, uint32_t) { MOZ_CRASH(); }
+inline void EmitBaselineCreateStubFrameDescriptor(MacroAssembler&, Register, uint32_t) { MOZ_CRASH(); }
+inline void EmitBaselineCallVM(JitCode*, MacroAssembler&) { MOZ_CRASH(); }
+inline void EmitIonCallVM(JitCode*, size_t, MacroAssembler&) { MOZ_CRASH(); }
+inline void EmitBaselineEnterStubFrame(MacroAssembler&, Register) { MOZ_CRASH(); }
+inline void EmitIonEnterStubFrame(MacroAssembler&, Register) { MOZ_CRASH(); }
+inline void EmitBaselineLeaveStubFrame(MacroAssembler&, bool v = false) { MOZ_CRASH(); }
+inline void EmitIonLeaveStubFrame(MacroAssembler&) { MOZ_CRASH(); }
+inline void EmitStowICValues(MacroAssembler&, int) { MOZ_CRASH(); }
+inline void EmitUnstowICValues(MacroAssembler&, int, bool v = false) { MOZ_CRASH(); }
+inline void EmitCallTypeUpdateIC(MacroAssembler&, JitCode*, uint32_t) { MOZ_CRASH(); }
+inline void EmitStubGuardFailure(MacroAssembler&) { MOZ_CRASH(); }
+
+template <typename T> inline void EmitPreBarrier(MacroAssembler&, T, MIRType) { MOZ_CRASH(); }
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_none_SharedICHelpers_none_h */
diff --git a/js/src/jit/none/SharedICRegisters-none.h b/js/src/jit/none/SharedICRegisters-none.h
new file mode 100644
index 000000000..81e0fa8a9
--- /dev/null
+++ b/js/src/jit/none/SharedICRegisters-none.h
@@ -0,0 +1,35 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_SharedICRegisters_none_h
+#define jit_none_SharedICRegisters_none_h
+
+#include "jit/MacroAssembler.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register BaselineFrameReg = { Registers::invalid_reg };
+static constexpr Register BaselineStackReg = { Registers::invalid_reg };
+
+static constexpr ValueOperand R0 = JSReturnOperand;
+static constexpr ValueOperand R1 = JSReturnOperand;
+static constexpr ValueOperand R2 = JSReturnOperand;
+
+static constexpr Register ICTailCallReg = { Registers::invalid_reg };
+static constexpr Register ICStubReg = { Registers::invalid_reg };
+
+static constexpr Register ExtractTemp0 = { Registers::invalid_reg };
+static constexpr Register ExtractTemp1 = { Registers::invalid_reg };
+
+static constexpr FloatRegister FloatReg0 = { FloatRegisters::invalid_reg };
+static constexpr FloatRegister FloatReg1 = { FloatRegisters::invalid_reg };
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_none_SharedICRegisters_none_h */
+
diff --git a/js/src/jit/none/Trampoline-none.cpp b/js/src/jit/none/Trampoline-none.cpp
new file mode 100644
index 000000000..54d818a85
--- /dev/null
+++ b/js/src/jit/none/Trampoline-none.cpp
@@ -0,0 +1,49 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jscompartment.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineIC.h"
+#include "jit/IonCaches.h"
+
+using namespace js;
+using namespace js::jit;
+
+// This file includes stubs for generating the JIT trampolines when there is no
+// JIT backend, and also includes implementations for assorted random things
+// which can't be implemented in headers.
+
+JitCode* JitRuntime::generateEnterJIT(JSContext*, EnterJitType) { MOZ_CRASH(); }
+JitCode* JitRuntime::generateInvalidator(JSContext*) { MOZ_CRASH(); }
+JitCode* JitRuntime::generateArgumentsRectifier(JSContext*, void**) { MOZ_CRASH(); }
+JitCode* JitRuntime::generateBailoutTable(JSContext*, uint32_t) { MOZ_CRASH(); }
+JitCode* JitRuntime::generateBailoutHandler(JSContext*) { MOZ_CRASH(); }
+JitCode* JitRuntime::generateVMWrapper(JSContext*, const VMFunction&) { MOZ_CRASH(); }
+JitCode* JitRuntime::generatePreBarrier(JSContext*, MIRType) { MOZ_CRASH(); }
+JitCode* JitRuntime::generateDebugTrapHandler(JSContext*) { MOZ_CRASH(); }
+JitCode* JitRuntime::generateExceptionTailStub(JSContext*, void*) { MOZ_CRASH(); }
+JitCode* JitRuntime::generateBailoutTailStub(JSContext*) { MOZ_CRASH(); }
+
+FrameSizeClass FrameSizeClass::FromDepth(uint32_t) { MOZ_CRASH(); }
+FrameSizeClass FrameSizeClass::ClassLimit() { MOZ_CRASH(); }
+uint32_t FrameSizeClass::frameSize() const { MOZ_CRASH(); }
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& iter, BailoutStack* bailout)
+{
+ MOZ_CRASH();
+}
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& iter, InvalidationBailoutStack* bailout)
+{
+ MOZ_CRASH();
+}
+
+bool ICCompare_Int32::Compiler::generateStubCode(MacroAssembler&) { MOZ_CRASH(); }
+bool ICCompare_Double::Compiler::generateStubCode(MacroAssembler&) { MOZ_CRASH(); }
+bool ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler&) { MOZ_CRASH(); }
+bool ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler&) { MOZ_CRASH(); }
+JitCode* JitRuntime::generateProfilerExitFrameTailStub(JSContext*) { MOZ_CRASH(); }
diff --git a/js/src/jit/shared/Assembler-shared.h b/js/src/jit/shared/Assembler-shared.h
new file mode 100644
index 000000000..aac9687b8
--- /dev/null
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -0,0 +1,991 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_Assembler_shared_h
+#define jit_shared_Assembler_shared_h
+
+#include "mozilla/PodOperations.h"
+
+#include <limits.h>
+
+#include "jit/AtomicOp.h"
+#include "jit/JitAllocPolicy.h"
+#include "jit/Label.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#include "vm/HelperThreads.h"
+#include "wasm/WasmTypes.h"
+
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+// Push return addresses callee-side.
+# define JS_USE_LINK_REGISTER
+#endif
+
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+// JS_SMALL_BRANCH means the range on a branch instruction
+// is smaller than the whole address space
+# define JS_SMALL_BRANCH
+#endif
+
+namespace js {
+namespace jit {
+
+namespace Disassembler {
+class HeapAccess;
+} // namespace Disassembler
+
+static const uint32_t Simd128DataSize = 4 * sizeof(int32_t);
+static_assert(Simd128DataSize == 4 * sizeof(int32_t), "SIMD data should be able to contain int32x4");
+static_assert(Simd128DataSize == 4 * sizeof(float), "SIMD data should be able to contain float32x4");
+static_assert(Simd128DataSize == 2 * sizeof(double), "SIMD data should be able to contain float64x2");
+
+enum Scale {
+ TimesOne = 0,
+ TimesTwo = 1,
+ TimesFour = 2,
+ TimesEight = 3
+};
+
+static_assert(sizeof(JS::Value) == 8,
+ "required for TimesEight and 3 below to be correct");
+static const Scale ValueScale = TimesEight;
+static const size_t ValueShift = 3;
+
+static inline unsigned
+ScaleToShift(Scale scale)
+{
+ return unsigned(scale);
+}
+
+static inline bool
+IsShiftInScaleRange(int i)
+{
+ return i >= TimesOne && i <= TimesEight;
+}
+
+static inline Scale
+ShiftToScale(int i)
+{
+ MOZ_ASSERT(IsShiftInScaleRange(i));
+ return Scale(i);
+}
+
+static inline Scale
+ScaleFromElemWidth(int shift)
+{
+ switch (shift) {
+ case 1:
+ return TimesOne;
+ case 2:
+ return TimesTwo;
+ case 4:
+ return TimesFour;
+ case 8:
+ return TimesEight;
+ }
+
+ MOZ_CRASH("Invalid scale");
+}
+
+// Used for 32-bit immediates which do not require relocation.
+struct Imm32
+{
+ int32_t value;
+
+ explicit Imm32(int32_t value) : value(value)
+ { }
+
+ static inline Imm32 ShiftOf(enum Scale s) {
+ switch (s) {
+ case TimesOne:
+ return Imm32(0);
+ case TimesTwo:
+ return Imm32(1);
+ case TimesFour:
+ return Imm32(2);
+ case TimesEight:
+ return Imm32(3);
+ };
+ MOZ_CRASH("Invalid scale");
+ }
+
+ static inline Imm32 FactorOf(enum Scale s) {
+ return Imm32(1 << ShiftOf(s).value);
+ }
+};
+
+// Pointer-sized integer to be embedded as an immediate in an instruction.
+struct ImmWord
+{
+ uintptr_t value;
+
+ explicit ImmWord(uintptr_t value) : value(value)
+ { }
+};
+
+// Used for 64-bit immediates which do not require relocation.
+struct Imm64
+{
+ uint64_t value;
+
+ explicit Imm64(int64_t value) : value(value)
+ { }
+
+ Imm32 low() const {
+ return Imm32(int32_t(value));
+ }
+
+ Imm32 hi() const {
+ return Imm32(int32_t(value >> 32));
+ }
+
+ inline Imm32 firstHalf() const;
+ inline Imm32 secondHalf() const;
+};
+
+#ifdef DEBUG
+static inline bool
+IsCompilingWasm()
+{
+ // wasm compilation pushes a JitContext with a null JSCompartment.
+ return GetJitContext()->compartment == nullptr;
+}
+#endif
+
+// Pointer to be embedded as an immediate in an instruction.
+struct ImmPtr
+{
+ void* value;
+
+ explicit ImmPtr(const void* value) : value(const_cast<void*>(value))
+ {
+ // To make code serialization-safe, wasm compilation should only
+ // compile pointer immediates using a SymbolicAddress.
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R>
+ explicit ImmPtr(R (*pf)())
+ : value(JS_FUNC_TO_DATA_PTR(void*, pf))
+ {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R, class A1>
+ explicit ImmPtr(R (*pf)(A1))
+ : value(JS_FUNC_TO_DATA_PTR(void*, pf))
+ {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R, class A1, class A2>
+ explicit ImmPtr(R (*pf)(A1, A2))
+ : value(JS_FUNC_TO_DATA_PTR(void*, pf))
+ {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R, class A1, class A2, class A3>
+ explicit ImmPtr(R (*pf)(A1, A2, A3))
+ : value(JS_FUNC_TO_DATA_PTR(void*, pf))
+ {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R, class A1, class A2, class A3, class A4>
+ explicit ImmPtr(R (*pf)(A1, A2, A3, A4))
+ : value(JS_FUNC_TO_DATA_PTR(void*, pf))
+ {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+};
+
+// The same as ImmPtr except that the intention is to patch this
+// instruction. The initial value of the immediate is 'addr' and this value is
+// either clobbered or used in the patching process.
+struct PatchedImmPtr {
+ void* value;
+
+ explicit PatchedImmPtr()
+ : value(nullptr)
+ { }
+ explicit PatchedImmPtr(const void* value)
+ : value(const_cast<void*>(value))
+ { }
+};
+
+class AssemblerShared;
+class ImmGCPtr;
+
+// Used for immediates which require relocation.
+class ImmGCPtr
+{
+ public:
+ const gc::Cell* value;
+
+ explicit ImmGCPtr(const gc::Cell* ptr) : value(ptr)
+ {
+ // Nursery pointers can't be used if the main thread might be currently
+ // performing a minor GC.
+ MOZ_ASSERT_IF(ptr && !ptr->isTenured(),
+ !CurrentThreadIsIonCompilingSafeForMinorGC());
+
+ // wasm shouldn't be creating GC things
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ private:
+ ImmGCPtr() : value(0) {}
+};
+
+// Pointer to be embedded as an immediate that is loaded/stored from by an
+// instruction.
+struct AbsoluteAddress
+{
+ void* addr;
+
+ explicit AbsoluteAddress(const void* addr)
+ : addr(const_cast<void*>(addr))
+ {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ AbsoluteAddress offset(ptrdiff_t delta) {
+ return AbsoluteAddress(((uint8_t*) addr) + delta);
+ }
+};
+
+// The same as AbsoluteAddress except that the intention is to patch this
+// instruction. The initial value of the immediate is 'addr' and this value is
+// either clobbered or used in the patching process.
+struct PatchedAbsoluteAddress
+{
+ void* addr;
+
+ explicit PatchedAbsoluteAddress()
+ : addr(nullptr)
+ { }
+ explicit PatchedAbsoluteAddress(const void* addr)
+ : addr(const_cast<void*>(addr))
+ { }
+ explicit PatchedAbsoluteAddress(uintptr_t addr)
+ : addr(reinterpret_cast<void*>(addr))
+ { }
+};
+
+// Specifies an address computed in the form of a register base and a constant,
+// 32-bit offset.
+struct Address
+{
+ Register base;
+ int32_t offset;
+
+ Address(Register base, int32_t offset) : base(base), offset(offset)
+ { }
+
+ Address() { mozilla::PodZero(this); }
+};
+
+// Specifies an address computed in the form of a register base, a register
+// index with a scale, and a constant, 32-bit offset.
+struct BaseIndex
+{
+ Register base;
+ Register index;
+ Scale scale;
+ int32_t offset;
+
+ BaseIndex(Register base, Register index, Scale scale, int32_t offset = 0)
+ : base(base), index(index), scale(scale), offset(offset)
+ { }
+
+ BaseIndex() { mozilla::PodZero(this); }
+};
+
+// A BaseIndex used to access Values. Note that |offset| is *not* scaled by
+// sizeof(Value). Use this *only* if you're indexing into a series of Values
+// that aren't object elements or object slots (for example, values on the
+// stack, values in an arguments object, &c.). If you're indexing into an
+// object's elements or slots, don't use this directly! Use
+// BaseObject{Element,Slot}Index instead.
+struct BaseValueIndex : BaseIndex
+{
+ BaseValueIndex(Register base, Register index, int32_t offset = 0)
+ : BaseIndex(base, index, ValueScale, offset)
+ { }
+};
+
+// Specifies the address of an indexed Value within object elements from a
+// base. The index must not already be scaled by sizeof(Value)!
+struct BaseObjectElementIndex : BaseValueIndex
+{
+ BaseObjectElementIndex(Register base, Register index, int32_t offset = 0)
+ : BaseValueIndex(base, index, offset)
+ {
+ NativeObject::elementsSizeMustNotOverflow();
+ }
+};
+
+// Like BaseObjectElementIndex, except for object slots.
+struct BaseObjectSlotIndex : BaseValueIndex
+{
+ BaseObjectSlotIndex(Register base, Register index)
+ : BaseValueIndex(base, index)
+ {
+ NativeObject::slotsSizeMustNotOverflow();
+ }
+};
+
+class Relocation {
+ public:
+ enum Kind {
+ // The target is immovable, so patching is only needed if the source
+ // buffer is relocated and the reference is relative.
+ HARDCODED,
+
+ // The target is the start of a JitCode buffer, which must be traced
+ // during garbage collection. Relocations and patching may be needed.
+ JITCODE
+ };
+};
+
+class RepatchLabel
+{
+ static const int32_t INVALID_OFFSET = 0xC0000000;
+ int32_t offset_ : 31;
+ uint32_t bound_ : 1;
+ public:
+
+ RepatchLabel() : offset_(INVALID_OFFSET), bound_(0) {}
+
+ void use(uint32_t newOffset) {
+ MOZ_ASSERT(offset_ == INVALID_OFFSET);
+ MOZ_ASSERT(newOffset != (uint32_t)INVALID_OFFSET);
+ offset_ = newOffset;
+ }
+ bool bound() const {
+ return bound_;
+ }
+ void bind(int32_t dest) {
+ MOZ_ASSERT(!bound_);
+ MOZ_ASSERT(dest != INVALID_OFFSET);
+ offset_ = dest;
+ bound_ = true;
+ }
+ int32_t target() {
+ MOZ_ASSERT(bound());
+ int32_t ret = offset_;
+ offset_ = INVALID_OFFSET;
+ return ret;
+ }
+ int32_t offset() {
+ MOZ_ASSERT(!bound());
+ return offset_;
+ }
+ bool used() const {
+ return !bound() && offset_ != (INVALID_OFFSET);
+ }
+
+};
+// An absolute label is like a Label, except it represents an absolute
+// reference rather than a relative one. Thus, it cannot be patched until after
+// linking.
+struct AbsoluteLabel : public LabelBase
+{
+ public:
+ AbsoluteLabel()
+ { }
+ AbsoluteLabel(const AbsoluteLabel& label) : LabelBase(label)
+ { }
+ int32_t prev() const {
+ MOZ_ASSERT(!bound());
+ if (!used())
+ return INVALID_OFFSET;
+ return offset();
+ }
+ void setPrev(int32_t offset) {
+ use(offset);
+ }
+ void bind() {
+ bound_ = true;
+
+ // These labels cannot be used after being bound.
+ offset_ = -1;
+ }
+};
+
+class CodeOffset
+{
+ size_t offset_;
+
+ static const size_t NOT_BOUND = size_t(-1);
+
+ public:
+ explicit CodeOffset(size_t offset) : offset_(offset) {}
+ CodeOffset() : offset_(NOT_BOUND) {}
+
+ size_t offset() const {
+ MOZ_ASSERT(bound());
+ return offset_;
+ }
+
+ void bind(size_t offset) {
+ MOZ_ASSERT(!bound());
+ offset_ = offset;
+ MOZ_ASSERT(bound());
+ }
+ bool bound() const {
+ return offset_ != NOT_BOUND;
+ }
+
+ void offsetBy(size_t delta) {
+ MOZ_ASSERT(bound());
+ MOZ_ASSERT(offset_ + delta >= offset_, "no overflow");
+ offset_ += delta;
+ }
+};
+
+// A code label contains an absolute reference to a point in the code. Thus, it
+// cannot be patched until after linking.
+// When the source label is resolved into a memory address, this address is
+// patched into the destination address.
+class CodeLabel
+{
+ // The destination position, where the absolute reference should get
+ // patched into.
+ CodeOffset patchAt_;
+
+ // The source label (relative) in the code to where the destination should
+ // get patched to.
+ CodeOffset target_;
+
+ public:
+ CodeLabel()
+ { }
+ explicit CodeLabel(const CodeOffset& patchAt)
+ : patchAt_(patchAt)
+ { }
+ CodeLabel(const CodeOffset& patchAt, const CodeOffset& target)
+ : patchAt_(patchAt),
+ target_(target)
+ { }
+ CodeOffset* patchAt() {
+ return &patchAt_;
+ }
+ CodeOffset* target() {
+ return &target_;
+ }
+ void offsetBy(size_t delta) {
+ patchAt_.offsetBy(delta);
+ target_.offsetBy(delta);
+ }
+};
+
+// Location of a jump or label in a generated JitCode block, relative to the
+// start of the block.
+
+class CodeOffsetJump
+{
+ size_t offset_;
+
+#ifdef JS_SMALL_BRANCH
+ size_t jumpTableIndex_;
+#endif
+
+ public:
+
+#ifdef JS_SMALL_BRANCH
+ CodeOffsetJump(size_t offset, size_t jumpTableIndex)
+ : offset_(offset), jumpTableIndex_(jumpTableIndex)
+ {}
+ size_t jumpTableIndex() const {
+ return jumpTableIndex_;
+ }
+#else
+ explicit CodeOffsetJump(size_t offset) : offset_(offset) {}
+#endif
+
+ CodeOffsetJump() {
+ mozilla::PodZero(this);
+ }
+
+ size_t offset() const {
+ return offset_;
+ }
+ void fixup(MacroAssembler* masm);
+};
+
+// Absolute location of a jump or a label in some generated JitCode block.
+// Can also encode a CodeOffset{Jump,Label}, such that the offset is initially
+// set and the absolute location later filled in after the final JitCode is
+// allocated.
+
+class CodeLocationJump
+{
+ uint8_t* raw_;
+#ifdef DEBUG
+ enum State { Uninitialized, Absolute, Relative };
+ State state_;
+ void setUninitialized() {
+ state_ = Uninitialized;
+ }
+ void setAbsolute() {
+ state_ = Absolute;
+ }
+ void setRelative() {
+ state_ = Relative;
+ }
+#else
+ void setUninitialized() const {
+ }
+ void setAbsolute() const {
+ }
+ void setRelative() const {
+ }
+#endif
+
+#ifdef JS_SMALL_BRANCH
+ uint8_t* jumpTableEntry_;
+#endif
+
+ public:
+ CodeLocationJump() {
+ raw_ = nullptr;
+ setUninitialized();
+#ifdef JS_SMALL_BRANCH
+ jumpTableEntry_ = (uint8_t*) uintptr_t(0xdeadab1e);
+#endif
+ }
+ CodeLocationJump(JitCode* code, CodeOffsetJump base) {
+ *this = base;
+ repoint(code);
+ }
+
+ void operator = (CodeOffsetJump base) {
+ raw_ = (uint8_t*) base.offset();
+ setRelative();
+#ifdef JS_SMALL_BRANCH
+ jumpTableEntry_ = (uint8_t*) base.jumpTableIndex();
+#endif
+ }
+
+ void repoint(JitCode* code, MacroAssembler* masm = nullptr);
+
+ uint8_t* raw() const {
+ MOZ_ASSERT(state_ == Absolute);
+ return raw_;
+ }
+ uint8_t* offset() const {
+ MOZ_ASSERT(state_ == Relative);
+ return raw_;
+ }
+
+#ifdef JS_SMALL_BRANCH
+ uint8_t* jumpTableEntry() const {
+ MOZ_ASSERT(state_ == Absolute);
+ return jumpTableEntry_;
+ }
+#endif
+};
+
+class CodeLocationLabel
+{
+ uint8_t* raw_;
+#ifdef DEBUG
+ enum State { Uninitialized, Absolute, Relative };
+ State state_;
+ void setUninitialized() {
+ state_ = Uninitialized;
+ }
+ void setAbsolute() {
+ state_ = Absolute;
+ }
+ void setRelative() {
+ state_ = Relative;
+ }
+#else
+ void setUninitialized() const {
+ }
+ void setAbsolute() const {
+ }
+ void setRelative() const {
+ }
+#endif
+
+ public:
+ CodeLocationLabel() {
+ raw_ = nullptr;
+ setUninitialized();
+ }
+ CodeLocationLabel(JitCode* code, CodeOffset base) {
+ *this = base;
+ repoint(code);
+ }
+ explicit CodeLocationLabel(JitCode* code) {
+ raw_ = code->raw();
+ setAbsolute();
+ }
+ explicit CodeLocationLabel(uint8_t* raw) {
+ raw_ = raw;
+ setAbsolute();
+ }
+
+ void operator = (CodeOffset base) {
+ raw_ = (uint8_t*)base.offset();
+ setRelative();
+ }
+ ptrdiff_t operator - (const CodeLocationLabel& other) {
+ return raw_ - other.raw_;
+ }
+
+ void repoint(JitCode* code, MacroAssembler* masm = nullptr);
+
+#ifdef DEBUG
+ bool isSet() const {
+ return state_ != Uninitialized;
+ }
+#endif
+
+ uint8_t* raw() const {
+ MOZ_ASSERT(state_ == Absolute);
+ return raw_;
+ }
+ uint8_t* offset() const {
+ MOZ_ASSERT(state_ == Relative);
+ return raw_;
+ }
+};
+
+} // namespace jit
+
+namespace wasm {
+
+// As an invariant across architectures, within wasm code:
+// $sp % WasmStackAlignment = (sizeof(wasm::Frame) + masm.framePushed) % WasmStackAlignment
+// Thus, wasm::Frame represents the bytes pushed after the call (which occurred
+// with a WasmStackAlignment-aligned StackPointer) that are not included in
+// masm.framePushed.
+
+struct Frame
+{
+ // The caller's saved frame pointer. In non-profiling mode, internal
+ // wasm-to-wasm calls don't update fp and thus don't save the caller's
+ // frame pointer; the space is reserved, however, so that profiling mode can
+ // reuse the same function body without recompiling.
+ uint8_t* callerFP;
+
+ // The return address pushed by the call (in the case of ARM/MIPS the return
+ // address is pushed by the first instruction of the prologue).
+ void* returnAddress;
+};
+
+static_assert(sizeof(Frame) == 2 * sizeof(void*), "?!");
+static const uint32_t FrameBytesAfterReturnAddress = sizeof(void*);
+
+// Represents an instruction to be patched and the intended pointee. These
+// links are accumulated in the MacroAssembler, but patching is done outside
+// the MacroAssembler (in Module::staticallyLink).
+
+struct SymbolicAccess
+{
+ SymbolicAccess(jit::CodeOffset patchAt, SymbolicAddress target)
+ : patchAt(patchAt), target(target) {}
+
+ jit::CodeOffset patchAt;
+ SymbolicAddress target;
+};
+
+typedef Vector<SymbolicAccess, 0, SystemAllocPolicy> SymbolicAccessVector;
+
+// Describes a single wasm or asm.js memory access for the purpose of generating
+// code and metadata.
+
+class MemoryAccessDesc
+{
+ uint32_t offset_;
+ uint32_t align_;
+ Scalar::Type type_;
+ unsigned numSimdElems_;
+ jit::MemoryBarrierBits barrierBefore_;
+ jit::MemoryBarrierBits barrierAfter_;
+ mozilla::Maybe<wasm::TrapOffset> trapOffset_;
+
+ public:
+ explicit MemoryAccessDesc(Scalar::Type type, uint32_t align, uint32_t offset,
+ mozilla::Maybe<TrapOffset> trapOffset,
+ unsigned numSimdElems = 0,
+ jit::MemoryBarrierBits barrierBefore = jit::MembarNobits,
+ jit::MemoryBarrierBits barrierAfter = jit::MembarNobits)
+ : offset_(offset),
+ align_(align),
+ type_(type),
+ numSimdElems_(numSimdElems),
+ barrierBefore_(barrierBefore),
+ barrierAfter_(barrierAfter),
+ trapOffset_(trapOffset)
+ {
+ MOZ_ASSERT(Scalar::isSimdType(type) == (numSimdElems > 0));
+ MOZ_ASSERT(numSimdElems <= jit::ScalarTypeToLength(type));
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
+ MOZ_ASSERT_IF(isSimd(), hasTrap());
+ MOZ_ASSERT_IF(isAtomic(), hasTrap());
+ }
+
+ uint32_t offset() const { return offset_; }
+ uint32_t align() const { return align_; }
+ Scalar::Type type() const { return type_; }
+ unsigned byteSize() const {
+ return Scalar::isSimdType(type())
+ ? Scalar::scalarByteSize(type()) * numSimdElems()
+ : Scalar::byteSize(type());
+ }
+ unsigned numSimdElems() const { MOZ_ASSERT(isSimd()); return numSimdElems_; }
+ jit::MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
+ jit::MemoryBarrierBits barrierAfter() const { return barrierAfter_; }
+ bool hasTrap() const { return !!trapOffset_; }
+ TrapOffset trapOffset() const { return *trapOffset_; }
+ bool isAtomic() const { return (barrierBefore_ | barrierAfter_) != jit::MembarNobits; }
+ bool isSimd() const { return Scalar::isSimdType(type_); }
+ bool isUnaligned() const { return align() && align() < byteSize(); }
+ bool isPlainAsmJS() const { return !hasTrap(); }
+
+ void clearOffset() { offset_ = 0; }
+};
+
+// Summarizes a global access for a mutable (in asm.js) or immutable value (in
+// asm.js or the wasm MVP) that needs to get patched later.
+
+struct GlobalAccess
+{
+ GlobalAccess(jit::CodeOffset patchAt, unsigned globalDataOffset)
+ : patchAt(patchAt), globalDataOffset(globalDataOffset)
+ {}
+
+ jit::CodeOffset patchAt;
+ unsigned globalDataOffset;
+};
+
+typedef Vector<GlobalAccess, 0, SystemAllocPolicy> GlobalAccessVector;
+
+// The TrapDesc struct describes a wasm trap that is about to be emitted. This
+// includes the logical wasm bytecode offset to report, the kind of instruction
+// causing the trap, and the stack depth right before control is transferred to
+// the trap out-of-line path.
+
+struct TrapDesc : TrapOffset
+{
+ enum Kind { Jump, MemoryAccess };
+ Kind kind;
+ Trap trap;
+ uint32_t framePushed;
+
+ TrapDesc(TrapOffset offset, Trap trap, uint32_t framePushed, Kind kind = Jump)
+ : TrapOffset(offset), kind(kind), trap(trap), framePushed(framePushed)
+ {}
+};
+
+// A TrapSite captures all relevant information at the point of emitting the
+// in-line trapping instruction for the purpose of generating the out-of-line
+// trap code (at the end of the function).
+
+struct TrapSite : TrapDesc
+{
+ uint32_t codeOffset;
+
+ TrapSite(TrapDesc trap, uint32_t codeOffset)
+ : TrapDesc(trap), codeOffset(codeOffset)
+ {}
+};
+
+typedef Vector<TrapSite, 0, SystemAllocPolicy> TrapSiteVector;
+
+// A TrapFarJump records the offset of a jump that needs to be patched to a trap
+// exit at the end of the module when trap exits are emitted.
+
+struct TrapFarJump
+{
+ Trap trap;
+ jit::CodeOffset jump;
+
+ TrapFarJump(Trap trap, jit::CodeOffset jump)
+ : trap(trap), jump(jump)
+ {}
+
+ void offsetBy(size_t delta) {
+ jump.offsetBy(delta);
+ }
+};
+
+typedef Vector<TrapFarJump, 0, SystemAllocPolicy> TrapFarJumpVector;
+
+} // namespace wasm
+
+namespace jit {
+
+// The base class of all Assemblers for all archs.
+class AssemblerShared
+{
+ wasm::CallSiteAndTargetVector callSites_;
+ wasm::TrapSiteVector trapSites_;
+ wasm::TrapFarJumpVector trapFarJumps_;
+ wasm::MemoryAccessVector memoryAccesses_;
+ wasm::MemoryPatchVector memoryPatches_;
+ wasm::BoundsCheckVector boundsChecks_;
+ wasm::GlobalAccessVector globalAccesses_;
+ wasm::SymbolicAccessVector symbolicAccesses_;
+
+ protected:
+ Vector<CodeLabel, 0, SystemAllocPolicy> codeLabels_;
+
+ bool enoughMemory_;
+ bool embedsNurseryPointers_;
+
+ public:
+ AssemblerShared()
+ : enoughMemory_(true),
+ embedsNurseryPointers_(false)
+ {}
+
+ void propagateOOM(bool success) {
+ enoughMemory_ &= success;
+ }
+
+ void setOOM() {
+ enoughMemory_ = false;
+ }
+
+ bool oom() const {
+ return !enoughMemory_;
+ }
+
+ bool embedsNurseryPointers() const {
+ return embedsNurseryPointers_;
+ }
+
+ template <typename... Args>
+ void append(const wasm::CallSiteDesc& desc, CodeOffset retAddr, size_t framePushed,
+ Args&&... args)
+ {
+ // framePushed does not include sizeof(wasm:Frame), so add it in explicitly when
+ // setting the CallSite::stackDepth.
+ wasm::CallSite cs(desc, retAddr.offset(), framePushed + sizeof(wasm::Frame));
+ enoughMemory_ &= callSites_.emplaceBack(cs, mozilla::Forward<Args>(args)...);
+ }
+ wasm::CallSiteAndTargetVector& callSites() { return callSites_; }
+
+ void append(wasm::TrapSite trapSite) {
+ enoughMemory_ &= trapSites_.append(trapSite);
+ }
+ const wasm::TrapSiteVector& trapSites() const { return trapSites_; }
+ void clearTrapSites() { trapSites_.clear(); }
+
+ void append(wasm::TrapFarJump jmp) {
+ enoughMemory_ &= trapFarJumps_.append(jmp);
+ }
+ const wasm::TrapFarJumpVector& trapFarJumps() const { return trapFarJumps_; }
+
+ void append(wasm::MemoryAccess access) { enoughMemory_ &= memoryAccesses_.append(access); }
+ wasm::MemoryAccessVector&& extractMemoryAccesses() { return Move(memoryAccesses_); }
+
+ void append(const wasm::MemoryAccessDesc& access, size_t codeOffset, size_t framePushed) {
+ if (access.hasTrap()) {
+ // If a memory access is trapping (wasm, SIMD.js, Atomics), create a
+ // TrapSite now which will generate a trap out-of-line path at the end
+ // of the function which will *then* append a MemoryAccess.
+ wasm::TrapDesc trap(access.trapOffset(), wasm::Trap::OutOfBounds, framePushed,
+ wasm::TrapSite::MemoryAccess);
+ append(wasm::TrapSite(trap, codeOffset));
+ } else {
+ // Otherwise, this is a plain asm.js access. On WASM_HUGE_MEMORY
+ // platforms, asm.js uses signal handlers to remove bounds checks
+ // and thus requires a MemoryAccess.
+ MOZ_ASSERT(access.isPlainAsmJS());
+#ifdef WASM_HUGE_MEMORY
+ append(wasm::MemoryAccess(codeOffset));
+#endif
+ }
+ }
+
+ void append(wasm::MemoryPatch patch) { enoughMemory_ &= memoryPatches_.append(patch); }
+ wasm::MemoryPatchVector&& extractMemoryPatches() { return Move(memoryPatches_); }
+
+ void append(wasm::BoundsCheck check) { enoughMemory_ &= boundsChecks_.append(check); }
+ wasm::BoundsCheckVector&& extractBoundsChecks() { return Move(boundsChecks_); }
+
+ void append(wasm::GlobalAccess access) { enoughMemory_ &= globalAccesses_.append(access); }
+ const wasm::GlobalAccessVector& globalAccesses() const { return globalAccesses_; }
+
+ void append(wasm::SymbolicAccess access) { enoughMemory_ &= symbolicAccesses_.append(access); }
+ size_t numSymbolicAccesses() const { return symbolicAccesses_.length(); }
+ wasm::SymbolicAccess symbolicAccess(size_t i) const { return symbolicAccesses_[i]; }
+
+ static bool canUseInSingleByteInstruction(Register reg) { return true; }
+
+ void addCodeLabel(CodeLabel label) {
+ propagateOOM(codeLabels_.append(label));
+ }
+ size_t numCodeLabels() const {
+ return codeLabels_.length();
+ }
+ CodeLabel codeLabel(size_t i) {
+ return codeLabels_[i];
+ }
+
+ // Merge this assembler with the other one, invalidating it, by shifting all
+ // offsets by a delta.
+ bool asmMergeWith(size_t delta, const AssemblerShared& other) {
+ size_t i = callSites_.length();
+ enoughMemory_ &= callSites_.appendAll(other.callSites_);
+ for (; i < callSites_.length(); i++)
+ callSites_[i].offsetReturnAddressBy(delta);
+
+ MOZ_ASSERT(other.trapSites_.empty(), "should have been cleared by wasmEmitTrapOutOfLineCode");
+
+ i = trapFarJumps_.length();
+ enoughMemory_ &= trapFarJumps_.appendAll(other.trapFarJumps_);
+ for (; i < trapFarJumps_.length(); i++)
+ trapFarJumps_[i].offsetBy(delta);
+
+ i = memoryAccesses_.length();
+ enoughMemory_ &= memoryAccesses_.appendAll(other.memoryAccesses_);
+ for (; i < memoryAccesses_.length(); i++)
+ memoryAccesses_[i].offsetBy(delta);
+
+ i = memoryPatches_.length();
+ enoughMemory_ &= memoryPatches_.appendAll(other.memoryPatches_);
+ for (; i < memoryPatches_.length(); i++)
+ memoryPatches_[i].offsetBy(delta);
+
+ i = boundsChecks_.length();
+ enoughMemory_ &= boundsChecks_.appendAll(other.boundsChecks_);
+ for (; i < boundsChecks_.length(); i++)
+ boundsChecks_[i].offsetBy(delta);
+
+ i = globalAccesses_.length();
+ enoughMemory_ &= globalAccesses_.appendAll(other.globalAccesses_);
+ for (; i < globalAccesses_.length(); i++)
+ globalAccesses_[i].patchAt.offsetBy(delta);
+
+ i = symbolicAccesses_.length();
+ enoughMemory_ &= symbolicAccesses_.appendAll(other.symbolicAccesses_);
+ for (; i < symbolicAccesses_.length(); i++)
+ symbolicAccesses_[i].patchAt.offsetBy(delta);
+
+ i = codeLabels_.length();
+ enoughMemory_ &= codeLabels_.appendAll(other.codeLabels_);
+ for (; i < codeLabels_.length(); i++)
+ codeLabels_[i].offsetBy(delta);
+
+ return !oom();
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_Assembler_shared_h */
diff --git a/js/src/jit/shared/BaselineCompiler-shared.cpp b/js/src/jit/shared/BaselineCompiler-shared.cpp
new file mode 100644
index 000000000..5342eeb3f
--- /dev/null
+++ b/js/src/jit/shared/BaselineCompiler-shared.cpp
@@ -0,0 +1,146 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/shared/BaselineCompiler-shared.h"
+
+#include "jit/BaselineIC.h"
+#include "jit/VMFunctions.h"
+
+#include "jsscriptinlines.h"
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+BaselineCompilerShared::BaselineCompilerShared(JSContext* cx, TempAllocator& alloc, JSScript* script)
+ : cx(cx),
+ script(script),
+ pc(script->code()),
+ ionCompileable_(jit::IsIonEnabled(cx) && CanIonCompileScript(cx, script, false)),
+ ionOSRCompileable_(jit::IsIonEnabled(cx) && CanIonCompileScript(cx, script, true)),
+ compileDebugInstrumentation_(script->isDebuggee()),
+ alloc_(alloc),
+ analysis_(alloc, script),
+ frame(script, masm),
+ stubSpace_(),
+ icEntries_(),
+ pcMappingEntries_(),
+ icLoadLabels_(),
+ pushedBeforeCall_(0),
+#ifdef DEBUG
+ inCall_(false),
+#endif
+ spsPushToggleOffset_(),
+ profilerEnterFrameToggleOffset_(),
+ profilerExitFrameToggleOffset_(),
+ traceLoggerToggleOffsets_(cx),
+ traceLoggerScriptTextIdOffset_()
+{ }
+
+void
+BaselineCompilerShared::prepareVMCall()
+{
+ pushedBeforeCall_ = masm.framePushed();
+#ifdef DEBUG
+ inCall_ = true;
+#endif
+
+ // Ensure everything is synced.
+ frame.syncStack(0);
+
+ // Save the frame pointer.
+ masm.Push(BaselineFrameReg);
+}
+
+bool
+BaselineCompilerShared::callVM(const VMFunction& fun, CallVMPhase phase)
+{
+ JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(fun);
+ if (!code)
+ return false;
+
+#ifdef DEBUG
+ // Assert prepareVMCall() has been called.
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+
+ // Assert the frame does not have an override pc when we're executing JIT code.
+ {
+ Label ok;
+ masm.branchTest32(Assembler::Zero, frame.addressOfFlags(),
+ Imm32(BaselineFrame::HAS_OVERRIDE_PC), &ok);
+ masm.assumeUnreachable("BaselineFrame shouldn't override pc when executing JIT code");
+ masm.bind(&ok);
+ }
+#endif
+
+ // Compute argument size. Note that this include the size of the frame pointer
+ // pushed by prepareVMCall.
+ uint32_t argSize = fun.explicitStackSlots() * sizeof(void*) + sizeof(void*);
+
+ // Assert all arguments were pushed.
+ MOZ_ASSERT(masm.framePushed() - pushedBeforeCall_ == argSize);
+
+ Address frameSizeAddress(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize());
+ uint32_t frameVals = frame.nlocals() + frame.stackDepth();
+ uint32_t frameBaseSize = BaselineFrame::FramePointerOffset + BaselineFrame::Size();
+ uint32_t frameFullSize = frameBaseSize + (frameVals * sizeof(Value));
+ if (phase == POST_INITIALIZE) {
+ masm.store32(Imm32(frameFullSize), frameSizeAddress);
+ uint32_t descriptor = MakeFrameDescriptor(frameFullSize + argSize, JitFrame_BaselineJS,
+ ExitFrameLayout::Size());
+ masm.push(Imm32(descriptor));
+
+ } else if (phase == PRE_INITIALIZE) {
+ masm.store32(Imm32(frameBaseSize), frameSizeAddress);
+ uint32_t descriptor = MakeFrameDescriptor(frameBaseSize + argSize, JitFrame_BaselineJS,
+ ExitFrameLayout::Size());
+ masm.push(Imm32(descriptor));
+
+ } else {
+ MOZ_ASSERT(phase == CHECK_OVER_RECURSED);
+ Label afterWrite;
+ Label writePostInitialize;
+
+ // If OVER_RECURSED is set, then frame locals haven't been pushed yet.
+ masm.branchTest32(Assembler::Zero,
+ frame.addressOfFlags(),
+ Imm32(BaselineFrame::OVER_RECURSED),
+ &writePostInitialize);
+
+ masm.move32(Imm32(frameBaseSize), ICTailCallReg);
+ masm.jump(&afterWrite);
+
+ masm.bind(&writePostInitialize);
+ masm.move32(Imm32(frameFullSize), ICTailCallReg);
+
+ masm.bind(&afterWrite);
+ masm.store32(ICTailCallReg, frameSizeAddress);
+ masm.add32(Imm32(argSize), ICTailCallReg);
+ masm.makeFrameDescriptor(ICTailCallReg, JitFrame_BaselineJS, ExitFrameLayout::Size());
+ masm.push(ICTailCallReg);
+ }
+ MOZ_ASSERT(fun.expectTailCall == NonTailCall);
+ // Perform the call.
+ masm.call(code);
+ uint32_t callOffset = masm.currentOffset();
+ masm.pop(BaselineFrameReg);
+
+#ifdef DEBUG
+ // Assert the frame does not have an override pc when we're executing JIT code.
+ {
+ Label ok;
+ masm.branchTest32(Assembler::Zero, frame.addressOfFlags(),
+ Imm32(BaselineFrame::HAS_OVERRIDE_PC), &ok);
+ masm.assumeUnreachable("BaselineFrame shouldn't override pc after VM call");
+ masm.bind(&ok);
+ }
+#endif
+
+ // Add a fake ICEntry (without stubs), so that the return offset to
+ // pc mapping works.
+ return appendICEntry(ICEntry::Kind_CallVM, callOffset);
+}
diff --git a/js/src/jit/shared/BaselineCompiler-shared.h b/js/src/jit/shared/BaselineCompiler-shared.h
new file mode 100644
index 000000000..7d1402a9d
--- /dev/null
+++ b/js/src/jit/shared/BaselineCompiler-shared.h
@@ -0,0 +1,172 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_BaselineCompiler_shared_h
+#define jit_shared_BaselineCompiler_shared_h
+
+#include "jit/BaselineFrameInfo.h"
+#include "jit/BaselineIC.h"
+#include "jit/BytecodeAnalysis.h"
+#include "jit/MacroAssembler.h"
+
+namespace js {
+namespace jit {
+
+class BaselineCompilerShared
+{
+ protected:
+ JSContext* cx;
+ JSScript* script;
+ jsbytecode* pc;
+ MacroAssembler masm;
+ bool ionCompileable_;
+ bool ionOSRCompileable_;
+ bool compileDebugInstrumentation_;
+
+ TempAllocator& alloc_;
+ BytecodeAnalysis analysis_;
+ FrameInfo frame;
+
+ FallbackICStubSpace stubSpace_;
+ js::Vector<BaselineICEntry, 16, SystemAllocPolicy> icEntries_;
+
+ // Stores the native code offset for a bytecode pc.
+ struct PCMappingEntry
+ {
+ uint32_t pcOffset;
+ uint32_t nativeOffset;
+ PCMappingSlotInfo slotInfo;
+
+ // If set, insert a PCMappingIndexEntry before encoding the
+ // current entry.
+ bool addIndexEntry;
+ };
+
+ js::Vector<PCMappingEntry, 16, SystemAllocPolicy> pcMappingEntries_;
+
+ // Labels for the 'movWithPatch' for loading IC entry pointers in
+ // the generated IC-calling code in the main jitcode. These need
+ // to be patched with the actual icEntry offsets after the BaselineScript
+ // has been allocated.
+ struct ICLoadLabel {
+ size_t icEntry;
+ CodeOffset label;
+ };
+ js::Vector<ICLoadLabel, 16, SystemAllocPolicy> icLoadLabels_;
+
+ uint32_t pushedBeforeCall_;
+#ifdef DEBUG
+ bool inCall_;
+#endif
+
+ CodeOffset spsPushToggleOffset_;
+ CodeOffset profilerEnterFrameToggleOffset_;
+ CodeOffset profilerExitFrameToggleOffset_;
+
+ Vector<CodeOffset> traceLoggerToggleOffsets_;
+ CodeOffset traceLoggerScriptTextIdOffset_;
+
+ BaselineCompilerShared(JSContext* cx, TempAllocator& alloc, JSScript* script);
+
+ BaselineICEntry* allocateICEntry(ICStub* stub, ICEntry::Kind kind) {
+ if (!stub)
+ return nullptr;
+
+ // Create the entry and add it to the vector.
+ if (!icEntries_.append(BaselineICEntry(script->pcToOffset(pc), kind))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ BaselineICEntry& vecEntry = icEntries_.back();
+
+ // Set the first stub for the IC entry to the fallback stub
+ vecEntry.setFirstStub(stub);
+
+ // Return pointer to the IC entry
+ return &vecEntry;
+ }
+
+ // Append an ICEntry without a stub.
+ bool appendICEntry(ICEntry::Kind kind, uint32_t returnOffset) {
+ BaselineICEntry entry(script->pcToOffset(pc), kind);
+ entry.setReturnOffset(CodeOffset(returnOffset));
+ if (!icEntries_.append(entry)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ return true;
+ }
+
+ bool addICLoadLabel(CodeOffset label) {
+ MOZ_ASSERT(!icEntries_.empty());
+ ICLoadLabel loadLabel;
+ loadLabel.label = label;
+ loadLabel.icEntry = icEntries_.length() - 1;
+ if (!icLoadLabels_.append(loadLabel)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ return true;
+ }
+
+ JSFunction* function() const {
+ // Not delazifying here is ok as the function is guaranteed to have
+ // been delazified before compilation started.
+ return script->functionNonDelazifying();
+ }
+
+ ModuleObject* module() const {
+ return script->module();
+ }
+
+ PCMappingSlotInfo getStackTopSlotInfo() {
+ MOZ_ASSERT(frame.numUnsyncedSlots() <= 2);
+ switch (frame.numUnsyncedSlots()) {
+ case 0:
+ return PCMappingSlotInfo::MakeSlotInfo();
+ case 1:
+ return PCMappingSlotInfo::MakeSlotInfo(PCMappingSlotInfo::ToSlotLocation(frame.peek(-1)));
+ case 2:
+ default:
+ return PCMappingSlotInfo::MakeSlotInfo(PCMappingSlotInfo::ToSlotLocation(frame.peek(-1)),
+ PCMappingSlotInfo::ToSlotLocation(frame.peek(-2)));
+ }
+ }
+
+ template <typename T>
+ void pushArg(const T& t) {
+ masm.Push(t);
+ }
+ void prepareVMCall();
+
+ enum CallVMPhase {
+ POST_INITIALIZE,
+ PRE_INITIALIZE,
+ CHECK_OVER_RECURSED
+ };
+ bool callVM(const VMFunction& fun, CallVMPhase phase=POST_INITIALIZE);
+
+ bool callVMNonOp(const VMFunction& fun, CallVMPhase phase=POST_INITIALIZE) {
+ if (!callVM(fun, phase))
+ return false;
+ icEntries_.back().setFakeKind(ICEntry::Kind_NonOpCallVM);
+ return true;
+ }
+
+ public:
+ BytecodeAnalysis& analysis() {
+ return analysis_;
+ }
+
+ void setCompileDebugInstrumentation() {
+ compileDebugInstrumentation_ = true;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_BaselineCompiler_shared_h */
diff --git a/js/src/jit/shared/CodeGenerator-shared-inl.h b/js/src/jit/shared/CodeGenerator-shared-inl.h
new file mode 100644
index 000000000..662e2fa5d
--- /dev/null
+++ b/js/src/jit/shared/CodeGenerator-shared-inl.h
@@ -0,0 +1,437 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_CodeGenerator_shared_inl_h
+#define jit_shared_CodeGenerator_shared_inl_h
+
+#include "jit/shared/CodeGenerator-shared.h"
+#include "jit/Disassembler.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+static inline bool
+IsConstant(const LInt64Allocation& a)
+{
+#if JS_BITS_PER_WORD == 32
+ if (a.high().isConstantValue())
+ return true;
+ if (a.high().isConstantIndex())
+ return true;
+#else
+ if (a.value().isConstantValue())
+ return true;
+ if (a.value().isConstantIndex())
+ return true;
+#endif
+ return false;
+}
+
+static inline int32_t
+ToInt32(const LAllocation* a)
+{
+ if (a->isConstantValue())
+ return a->toConstant()->toInt32();
+ if (a->isConstantIndex())
+ return a->toConstantIndex()->index();
+ MOZ_CRASH("this is not a constant!");
+}
+
+static inline int64_t
+ToInt64(const LAllocation* a)
+{
+ if (a->isConstantValue())
+ return a->toConstant()->toInt64();
+ if (a->isConstantIndex())
+ return a->toConstantIndex()->index();
+ MOZ_CRASH("this is not a constant!");
+}
+
+static inline int64_t
+ToInt64(const LInt64Allocation& a)
+{
+#if JS_BITS_PER_WORD == 32
+ if (a.high().isConstantValue())
+ return a.high().toConstant()->toInt64();
+ if (a.high().isConstantIndex())
+ return a.high().toConstantIndex()->index();
+#else
+ if (a.value().isConstantValue())
+ return a.value().toConstant()->toInt64();
+ if (a.value().isConstantIndex())
+ return a.value().toConstantIndex()->index();
+#endif
+ MOZ_CRASH("this is not a constant!");
+}
+
+static inline double
+ToDouble(const LAllocation* a)
+{
+ return a->toConstant()->numberToDouble();
+}
+
+static inline Register
+ToRegister(const LAllocation& a)
+{
+ MOZ_ASSERT(a.isGeneralReg());
+ return a.toGeneralReg()->reg();
+}
+
+static inline Register
+ToRegister(const LAllocation* a)
+{
+ return ToRegister(*a);
+}
+
+static inline Register
+ToRegister(const LDefinition* def)
+{
+ return ToRegister(*def->output());
+}
+
+static inline Register64
+ToOutRegister64(LInstruction* ins)
+{
+#if JS_BITS_PER_WORD == 32
+ Register loReg = ToRegister(ins->getDef(INT64LOW_INDEX));
+ Register hiReg = ToRegister(ins->getDef(INT64HIGH_INDEX));
+ return Register64(hiReg, loReg);
+#else
+ return Register64(ToRegister(ins->getDef(0)));
+#endif
+}
+
+static inline Register64
+ToRegister64(const LInt64Allocation& a)
+{
+#if JS_BITS_PER_WORD == 32
+ return Register64(ToRegister(a.high()), ToRegister(a.low()));
+#else
+ return Register64(ToRegister(a.value()));
+#endif
+}
+
+static inline Register
+ToTempRegisterOrInvalid(const LDefinition* def)
+{
+ if (def->isBogusTemp())
+ return InvalidReg;
+ return ToRegister(def);
+}
+
+static inline Register
+ToTempUnboxRegister(const LDefinition* def)
+{
+ return ToTempRegisterOrInvalid(def);
+}
+
+static inline Register
+ToRegisterOrInvalid(const LDefinition* a)
+{
+ return a ? ToRegister(a) : InvalidReg;
+}
+
+static inline FloatRegister
+ToFloatRegister(const LAllocation& a)
+{
+ MOZ_ASSERT(a.isFloatReg());
+ return a.toFloatReg()->reg();
+}
+
+static inline FloatRegister
+ToFloatRegister(const LAllocation* a)
+{
+ return ToFloatRegister(*a);
+}
+
+static inline FloatRegister
+ToFloatRegister(const LDefinition* def)
+{
+ return ToFloatRegister(*def->output());
+}
+
+static inline FloatRegister
+ToTempFloatRegisterOrInvalid(const LDefinition* def)
+{
+ if (def->isBogusTemp())
+ return InvalidFloatReg;
+ return ToFloatRegister(def);
+}
+
+static inline AnyRegister
+ToAnyRegister(const LAllocation& a)
+{
+ MOZ_ASSERT(a.isGeneralReg() || a.isFloatReg());
+ if (a.isGeneralReg())
+ return AnyRegister(ToRegister(a));
+ return AnyRegister(ToFloatRegister(a));
+}
+
+static inline AnyRegister
+ToAnyRegister(const LAllocation* a)
+{
+ return ToAnyRegister(*a);
+}
+
+static inline AnyRegister
+ToAnyRegister(const LDefinition* def)
+{
+ return ToAnyRegister(def->output());
+}
+
+static inline RegisterOrInt32Constant
+ToRegisterOrInt32Constant(const LAllocation* a)
+{
+ if (a->isConstant())
+ return RegisterOrInt32Constant(ToInt32(a));
+ return RegisterOrInt32Constant(ToRegister(a));
+}
+
+static inline ValueOperand
+GetValueOutput(LInstruction* ins)
+{
+#if defined(JS_NUNBOX32)
+ return ValueOperand(ToRegister(ins->getDef(TYPE_INDEX)),
+ ToRegister(ins->getDef(PAYLOAD_INDEX)));
+#elif defined(JS_PUNBOX64)
+ return ValueOperand(ToRegister(ins->getDef(0)));
+#else
+#error "Unknown"
+#endif
+}
+
+static inline ValueOperand
+GetTempValue(Register type, Register payload)
+{
+#if defined(JS_NUNBOX32)
+ return ValueOperand(type, payload);
+#elif defined(JS_PUNBOX64)
+ (void)type;
+ return ValueOperand(payload);
+#else
+#error "Unknown"
+#endif
+}
+
+int32_t
+CodeGeneratorShared::ArgToStackOffset(int32_t slot) const
+{
+ return masm.framePushed() +
+ (gen->compilingWasm() ? sizeof(wasm::Frame) : sizeof(JitFrameLayout)) +
+ slot;
+}
+
+int32_t
+CodeGeneratorShared::CalleeStackOffset() const
+{
+ return masm.framePushed() + JitFrameLayout::offsetOfCalleeToken();
+}
+
+int32_t
+CodeGeneratorShared::SlotToStackOffset(int32_t slot) const
+{
+ MOZ_ASSERT(slot > 0 && slot <= int32_t(graph.localSlotCount()));
+ int32_t offset = masm.framePushed() - frameInitialAdjustment_ - slot;
+ MOZ_ASSERT(offset >= 0);
+ return offset;
+}
+
+int32_t
+CodeGeneratorShared::StackOffsetToSlot(int32_t offset) const
+{
+ // See: SlotToStackOffset. This is used to convert pushed arguments
+ // to a slot index that safepoints can use.
+ //
+ // offset = framePushed - frameInitialAdjustment - slot
+ // offset + slot = framePushed - frameInitialAdjustment
+ // slot = framePushed - frameInitialAdjustement - offset
+ return masm.framePushed() - frameInitialAdjustment_ - offset;
+}
+
+// For argument construction for calls. Argslots are Value-sized.
+int32_t
+CodeGeneratorShared::StackOffsetOfPassedArg(int32_t slot) const
+{
+ // A slot of 0 is permitted only to calculate %esp offset for calls.
+ MOZ_ASSERT(slot >= 0 && slot <= int32_t(graph.argumentSlotCount()));
+ int32_t offset = masm.framePushed() -
+ graph.paddedLocalSlotsSize() -
+ (slot * sizeof(Value));
+
+ // Passed arguments go below A function's local stack storage.
+ // When arguments are being pushed, there is nothing important on the stack.
+ // Therefore, It is safe to push the arguments down arbitrarily. Pushing
+ // by sizeof(Value) is desirable since everything on the stack is a Value.
+ // Note that paddedLocalSlotCount() aligns to at least a Value boundary
+ // specifically to support this.
+ MOZ_ASSERT(offset >= 0);
+ MOZ_ASSERT(offset % sizeof(Value) == 0);
+ return offset;
+}
+
+int32_t
+CodeGeneratorShared::ToStackOffset(LAllocation a) const
+{
+ if (a.isArgument())
+ return ArgToStackOffset(a.toArgument()->index());
+ return SlotToStackOffset(a.toStackSlot()->slot());
+}
+
+int32_t
+CodeGeneratorShared::ToStackOffset(const LAllocation* a) const
+{
+ return ToStackOffset(*a);
+}
+
+Address
+CodeGeneratorShared::ToAddress(const LAllocation& a)
+{
+ MOZ_ASSERT(a.isMemory());
+ return Address(masm.getStackPointer(), ToStackOffset(&a));
+}
+
+Address
+CodeGeneratorShared::ToAddress(const LAllocation* a)
+{
+ return ToAddress(*a);
+}
+
+void
+CodeGeneratorShared::saveLive(LInstruction* ins)
+{
+ MOZ_ASSERT(!ins->isCall());
+ LSafepoint* safepoint = ins->safepoint();
+ masm.PushRegsInMask(safepoint->liveRegs());
+}
+
+void
+CodeGeneratorShared::restoreLive(LInstruction* ins)
+{
+ MOZ_ASSERT(!ins->isCall());
+ LSafepoint* safepoint = ins->safepoint();
+ masm.PopRegsInMask(safepoint->liveRegs());
+}
+
+void
+CodeGeneratorShared::restoreLiveIgnore(LInstruction* ins, LiveRegisterSet ignore)
+{
+ MOZ_ASSERT(!ins->isCall());
+ LSafepoint* safepoint = ins->safepoint();
+ masm.PopRegsInMaskIgnore(safepoint->liveRegs(), ignore);
+}
+
+void
+CodeGeneratorShared::saveLiveVolatile(LInstruction* ins)
+{
+ MOZ_ASSERT(!ins->isCall());
+ LSafepoint* safepoint = ins->safepoint();
+ LiveRegisterSet regs;
+ regs.set() = RegisterSet::Intersect(safepoint->liveRegs().set(), RegisterSet::Volatile());
+ masm.PushRegsInMask(regs);
+}
+
+void
+CodeGeneratorShared::restoreLiveVolatile(LInstruction* ins)
+{
+ MOZ_ASSERT(!ins->isCall());
+ LSafepoint* safepoint = ins->safepoint();
+ LiveRegisterSet regs;
+ regs.set() = RegisterSet::Intersect(safepoint->liveRegs().set(), RegisterSet::Volatile());
+ masm.PopRegsInMask(regs);
+}
+
+void
+CodeGeneratorShared::verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, bool isLoad,
+ Scalar::Type type, Operand mem, LAllocation alloc)
+{
+#ifdef DEBUG
+ using namespace Disassembler;
+
+ Disassembler::HeapAccess::Kind kind = isLoad ? HeapAccess::Load : HeapAccess::Store;
+ switch (type) {
+ case Scalar::Int8:
+ case Scalar::Int16:
+ if (kind == HeapAccess::Load)
+ kind = HeapAccess::LoadSext32;
+ break;
+ default:
+ break;
+ }
+
+ OtherOperand op;
+ switch (type) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ if (!alloc.isConstant()) {
+ op = OtherOperand(ToRegister(alloc).encoding());
+ } else {
+ // x86 doesn't allow encoding an imm64 to memory move; the value
+ // is wrapped anyways.
+ int32_t i = ToInt32(&alloc);
+
+ // Sign-extend the immediate value out to 32 bits. We do this even
+ // for unsigned element types so that we match what the disassembly
+ // code does, as it doesn't know about signedness of stores.
+ unsigned shift = 32 - TypedArrayElemSize(type) * 8;
+ i = i << shift >> shift;
+ op = OtherOperand(i);
+ }
+ break;
+ case Scalar::Int64:
+ // Can't encode an imm64-to-memory move.
+ op = OtherOperand(ToRegister(alloc).encoding());
+ break;
+ case Scalar::Float32:
+ case Scalar::Float64:
+ case Scalar::Float32x4:
+ case Scalar::Int8x16:
+ case Scalar::Int16x8:
+ case Scalar::Int32x4:
+ op = OtherOperand(ToFloatRegister(alloc).encoding());
+ break;
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("Unexpected array type");
+ }
+
+ HeapAccess access(kind, TypedArrayElemSize(type), ComplexAddress(mem), op);
+ masm.verifyHeapAccessDisassembly(begin, end, access);
+#endif
+}
+
+void
+CodeGeneratorShared::verifyLoadDisassembly(uint32_t begin, uint32_t end, Scalar::Type type,
+ Operand mem, LAllocation alloc)
+{
+ verifyHeapAccessDisassembly(begin, end, true, type, mem, alloc);
+}
+
+void
+CodeGeneratorShared::verifyStoreDisassembly(uint32_t begin, uint32_t end, Scalar::Type type,
+ Operand mem, LAllocation alloc)
+{
+ verifyHeapAccessDisassembly(begin, end, false, type, mem, alloc);
+}
+
+inline bool
+CodeGeneratorShared::isGlobalObject(JSObject* object)
+{
+ // Calling object->is<GlobalObject>() is racy because this relies on
+ // checking the group and this can be changed while we are compiling off the
+ // main thread.
+ return object == gen->compartment->maybeGlobal();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_CodeGenerator_shared_inl_h */
diff --git a/js/src/jit/shared/CodeGenerator-shared.cpp b/js/src/jit/shared/CodeGenerator-shared.cpp
new file mode 100644
index 000000000..ba5d9d2f5
--- /dev/null
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -0,0 +1,1865 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/SizePrintfMacros.h"
+
+#include "jit/CompactBuffer.h"
+#include "jit/IonCaches.h"
+#include "jit/JitcodeMap.h"
+#include "jit/JitSpewer.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/OptimizationTracking.h"
+#include "js/Conversions.h"
+#include "vm/TraceLogging.h"
+
+#include "jit/JitFrames-inl.h"
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::BitwiseCast;
+using mozilla::DebugOnly;
+
+namespace js {
+namespace jit {
+
+MacroAssembler&
+CodeGeneratorShared::ensureMasm(MacroAssembler* masmArg)
+{
+ if (masmArg)
+ return *masmArg;
+ maybeMasm_.emplace();
+ return *maybeMasm_;
+}
+
+CodeGeneratorShared::CodeGeneratorShared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masmArg)
+ : maybeMasm_(),
+ masm(ensureMasm(masmArg)),
+ gen(gen),
+ graph(*graph),
+ current(nullptr),
+ snapshots_(),
+ recovers_(),
+ deoptTable_(nullptr),
+#ifdef DEBUG
+ pushedArgs_(0),
+#endif
+ lastOsiPointOffset_(0),
+ safepoints_(graph->totalSlotCount(), (gen->info().nargs() + 1) * sizeof(Value)),
+ returnLabel_(),
+ stubSpace_(),
+ nativeToBytecodeMap_(nullptr),
+ nativeToBytecodeMapSize_(0),
+ nativeToBytecodeTableOffset_(0),
+ nativeToBytecodeNumRegions_(0),
+ nativeToBytecodeScriptList_(nullptr),
+ nativeToBytecodeScriptListLength_(0),
+ trackedOptimizationsMap_(nullptr),
+ trackedOptimizationsMapSize_(0),
+ trackedOptimizationsRegionTableOffset_(0),
+ trackedOptimizationsTypesTableOffset_(0),
+ trackedOptimizationsAttemptsTableOffset_(0),
+ osrEntryOffset_(0),
+ skipArgCheckEntryOffset_(0),
+#ifdef CHECK_OSIPOINT_REGISTERS
+ checkOsiPointRegisters(JitOptions.checkOsiPointRegisters),
+#endif
+ frameDepth_(graph->paddedLocalSlotsSize() + graph->argumentsSize()),
+ frameInitialAdjustment_(0)
+{
+ if (gen->isProfilerInstrumentationEnabled())
+ masm.enableProfilingInstrumentation();
+
+ if (gen->compilingWasm()) {
+ // Since wasm uses the system ABI which does not necessarily use a
+ // regular array where all slots are sizeof(Value), it maintains the max
+ // argument stack depth separately.
+ MOZ_ASSERT(graph->argumentSlotCount() == 0);
+ frameDepth_ += gen->wasmMaxStackArgBytes();
+
+ if (gen->usesSimd()) {
+ // If the function uses any SIMD then we may need to insert padding
+ // so that local slots are aligned for SIMD.
+ frameInitialAdjustment_ = ComputeByteAlignment(sizeof(wasm::Frame),
+ WasmStackAlignment);
+ frameDepth_ += frameInitialAdjustment_;
+ // Keep the stack aligned. Some SIMD sequences build values on the
+ // stack and need the stack aligned.
+ frameDepth_ += ComputeByteAlignment(sizeof(wasm::Frame) + frameDepth_,
+ WasmStackAlignment);
+ } else if (gen->performsCall()) {
+ // An MWasmCall does not align the stack pointer at calls sites but
+ // instead relies on the a priori stack adjustment. This must be the
+ // last adjustment of frameDepth_.
+ frameDepth_ += ComputeByteAlignment(sizeof(wasm::Frame) + frameDepth_,
+ WasmStackAlignment);
+ }
+
+ // FrameSizeClass is only used for bailing, which cannot happen in
+ // wasm code.
+ frameClass_ = FrameSizeClass::None();
+ } else {
+ frameClass_ = FrameSizeClass::FromDepth(frameDepth_);
+ }
+}
+
+bool
+CodeGeneratorShared::generatePrologue()
+{
+ MOZ_ASSERT(masm.framePushed() == 0);
+ MOZ_ASSERT(!gen->compilingWasm());
+
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+
+ // If profiling, save the current frame pointer to a per-thread global field.
+ if (isProfilerInstrumentationEnabled())
+ masm.profilerEnterFrame(masm.getStackPointer(), CallTempReg0);
+
+ // Ensure that the Ion frame is properly aligned.
+ masm.assertStackAlignment(JitStackAlignment, 0);
+
+ // Note that this automatically sets MacroAssembler::framePushed().
+ masm.reserveStack(frameSize());
+ masm.checkStackAlignment();
+
+ emitTracelogIonStart();
+ return true;
+}
+
+bool
+CodeGeneratorShared::generateEpilogue()
+{
+ MOZ_ASSERT(!gen->compilingWasm());
+ masm.bind(&returnLabel_);
+
+ emitTracelogIonStop();
+
+ masm.freeStack(frameSize());
+ MOZ_ASSERT(masm.framePushed() == 0);
+
+ // If profiling, reset the per-thread global lastJitFrame to point to
+ // the previous frame.
+ if (isProfilerInstrumentationEnabled())
+ masm.profilerExitFrame();
+
+ masm.ret();
+
+ // On systems that use a constant pool, this is a good time to emit.
+ masm.flushBuffer();
+ return true;
+}
+
+bool
+CodeGeneratorShared::generateOutOfLineCode()
+{
+ for (size_t i = 0; i < outOfLineCode_.length(); i++) {
+ // Add native => bytecode mapping entries for OOL sites.
+ // Not enabled on wasm yet since it doesn't contain bytecode mappings.
+ if (!gen->compilingWasm()) {
+ if (!addNativeToBytecodeEntry(outOfLineCode_[i]->bytecodeSite()))
+ return false;
+ }
+
+ if (!gen->alloc().ensureBallast())
+ return false;
+
+ JitSpew(JitSpew_Codegen, "# Emitting out of line code");
+
+ masm.setFramePushed(outOfLineCode_[i]->framePushed());
+ lastPC_ = outOfLineCode_[i]->pc();
+ outOfLineCode_[i]->bind(&masm);
+
+ outOfLineCode_[i]->generate(this);
+ }
+
+ return !masm.oom();
+}
+
+void
+CodeGeneratorShared::addOutOfLineCode(OutOfLineCode* code, const MInstruction* mir)
+{
+ MOZ_ASSERT(mir);
+ addOutOfLineCode(code, mir->trackedSite());
+}
+
+void
+CodeGeneratorShared::addOutOfLineCode(OutOfLineCode* code, const BytecodeSite* site)
+{
+ code->setFramePushed(masm.framePushed());
+ code->setBytecodeSite(site);
+ MOZ_ASSERT_IF(!gen->compilingWasm(), code->script()->containsPC(code->pc()));
+ masm.propagateOOM(outOfLineCode_.append(code));
+}
+
+bool
+CodeGeneratorShared::addNativeToBytecodeEntry(const BytecodeSite* site)
+{
+ // Skip the table entirely if profiling is not enabled.
+ if (!isProfilerInstrumentationEnabled())
+ return true;
+
+ // Fails early if the last added instruction caused the macro assembler to
+ // run out of memory as continuity assumption below do not hold.
+ if (masm.oom())
+ return false;
+
+ MOZ_ASSERT(site);
+ MOZ_ASSERT(site->tree());
+ MOZ_ASSERT(site->pc());
+
+ InlineScriptTree* tree = site->tree();
+ jsbytecode* pc = site->pc();
+ uint32_t nativeOffset = masm.currentOffset();
+
+ MOZ_ASSERT_IF(nativeToBytecodeList_.empty(), nativeOffset == 0);
+
+ if (!nativeToBytecodeList_.empty()) {
+ size_t lastIdx = nativeToBytecodeList_.length() - 1;
+ NativeToBytecode& lastEntry = nativeToBytecodeList_[lastIdx];
+
+ MOZ_ASSERT(nativeOffset >= lastEntry.nativeOffset.offset());
+
+ // If the new entry is for the same inlineScriptTree and same
+ // bytecodeOffset, but the nativeOffset has changed, do nothing.
+ // The same site just generated some more code.
+ if (lastEntry.tree == tree && lastEntry.pc == pc) {
+ JitSpew(JitSpew_Profiling, " => In-place update [%" PRIuSIZE "-%" PRIu32 "]",
+ lastEntry.nativeOffset.offset(), nativeOffset);
+ return true;
+ }
+
+ // If the new entry is for the same native offset, then update the
+ // previous entry with the new bytecode site, since the previous
+ // bytecode site did not generate any native code.
+ if (lastEntry.nativeOffset.offset() == nativeOffset) {
+ lastEntry.tree = tree;
+ lastEntry.pc = pc;
+ JitSpew(JitSpew_Profiling, " => Overwriting zero-length native region.");
+
+ // This overwrite might have made the entry merge-able with a
+ // previous one. If so, merge it.
+ if (lastIdx > 0) {
+ NativeToBytecode& nextToLastEntry = nativeToBytecodeList_[lastIdx - 1];
+ if (nextToLastEntry.tree == lastEntry.tree && nextToLastEntry.pc == lastEntry.pc) {
+ JitSpew(JitSpew_Profiling, " => Merging with previous region");
+ nativeToBytecodeList_.erase(&lastEntry);
+ }
+ }
+
+ dumpNativeToBytecodeEntry(nativeToBytecodeList_.length() - 1);
+ return true;
+ }
+ }
+
+ // Otherwise, some native code was generated for the previous bytecode site.
+ // Add a new entry for code that is about to be generated.
+ NativeToBytecode entry;
+ entry.nativeOffset = CodeOffset(nativeOffset);
+ entry.tree = tree;
+ entry.pc = pc;
+ if (!nativeToBytecodeList_.append(entry))
+ return false;
+
+ JitSpew(JitSpew_Profiling, " => Push new entry.");
+ dumpNativeToBytecodeEntry(nativeToBytecodeList_.length() - 1);
+ return true;
+}
+
+void
+CodeGeneratorShared::dumpNativeToBytecodeEntries()
+{
+#ifdef JS_JITSPEW
+ InlineScriptTree* topTree = gen->info().inlineScriptTree();
+ JitSpewStart(JitSpew_Profiling, "Native To Bytecode Entries for %s:%" PRIuSIZE "\n",
+ topTree->script()->filename(), topTree->script()->lineno());
+ for (unsigned i = 0; i < nativeToBytecodeList_.length(); i++)
+ dumpNativeToBytecodeEntry(i);
+#endif
+}
+
+void
+CodeGeneratorShared::dumpNativeToBytecodeEntry(uint32_t idx)
+{
+#ifdef JS_JITSPEW
+ NativeToBytecode& ref = nativeToBytecodeList_[idx];
+ InlineScriptTree* tree = ref.tree;
+ JSScript* script = tree->script();
+ uint32_t nativeOffset = ref.nativeOffset.offset();
+ unsigned nativeDelta = 0;
+ unsigned pcDelta = 0;
+ if (idx + 1 < nativeToBytecodeList_.length()) {
+ NativeToBytecode* nextRef = &ref + 1;
+ nativeDelta = nextRef->nativeOffset.offset() - nativeOffset;
+ if (nextRef->tree == ref.tree)
+ pcDelta = nextRef->pc - ref.pc;
+ }
+ JitSpewStart(JitSpew_Profiling, " %08" PRIxSIZE " [+%-6d] => %-6ld [%-4d] {%-10s} (%s:%" PRIuSIZE,
+ ref.nativeOffset.offset(),
+ nativeDelta,
+ (long) (ref.pc - script->code()),
+ pcDelta,
+ CodeName[JSOp(*ref.pc)],
+ script->filename(), script->lineno());
+
+ for (tree = tree->caller(); tree; tree = tree->caller()) {
+ JitSpewCont(JitSpew_Profiling, " <= %s:%" PRIuSIZE, tree->script()->filename(),
+ tree->script()->lineno());
+ }
+ JitSpewCont(JitSpew_Profiling, ")");
+ JitSpewFin(JitSpew_Profiling);
+#endif
+}
+
+bool
+CodeGeneratorShared::addTrackedOptimizationsEntry(const TrackedOptimizations* optimizations)
+{
+ if (!isOptimizationTrackingEnabled())
+ return true;
+
+ MOZ_ASSERT(optimizations);
+
+ uint32_t nativeOffset = masm.currentOffset();
+
+ if (!trackedOptimizations_.empty()) {
+ NativeToTrackedOptimizations& lastEntry = trackedOptimizations_.back();
+ MOZ_ASSERT_IF(!masm.oom(), nativeOffset >= lastEntry.endOffset.offset());
+
+ // If we're still generating code for the same set of optimizations,
+ // we are done.
+ if (lastEntry.optimizations == optimizations)
+ return true;
+ }
+
+ // If we're generating code for a new set of optimizations, add a new
+ // entry.
+ NativeToTrackedOptimizations entry;
+ entry.startOffset = CodeOffset(nativeOffset);
+ entry.endOffset = CodeOffset(nativeOffset);
+ entry.optimizations = optimizations;
+ return trackedOptimizations_.append(entry);
+}
+
+void
+CodeGeneratorShared::extendTrackedOptimizationsEntry(const TrackedOptimizations* optimizations)
+{
+ if (!isOptimizationTrackingEnabled())
+ return;
+
+ uint32_t nativeOffset = masm.currentOffset();
+ NativeToTrackedOptimizations& entry = trackedOptimizations_.back();
+ MOZ_ASSERT(entry.optimizations == optimizations);
+ MOZ_ASSERT_IF(!masm.oom(), nativeOffset >= entry.endOffset.offset());
+
+ entry.endOffset = CodeOffset(nativeOffset);
+
+ // If we generated no code, remove the last entry.
+ if (nativeOffset == entry.startOffset.offset())
+ trackedOptimizations_.popBack();
+}
+
+// see OffsetOfFrameSlot
+static inline int32_t
+ToStackIndex(LAllocation* a)
+{
+ if (a->isStackSlot()) {
+ MOZ_ASSERT(a->toStackSlot()->slot() >= 1);
+ return a->toStackSlot()->slot();
+ }
+ return -int32_t(sizeof(JitFrameLayout) + a->toArgument()->index());
+}
+
+void
+CodeGeneratorShared::encodeAllocation(LSnapshot* snapshot, MDefinition* mir,
+ uint32_t* allocIndex)
+{
+ if (mir->isBox())
+ mir = mir->toBox()->getOperand(0);
+
+ MIRType type =
+ mir->isRecoveredOnBailout() ? MIRType::None :
+ mir->isUnused() ? MIRType::MagicOptimizedOut :
+ mir->type();
+
+ RValueAllocation alloc;
+
+ switch (type) {
+ case MIRType::None:
+ {
+ MOZ_ASSERT(mir->isRecoveredOnBailout());
+ uint32_t index = 0;
+ LRecoverInfo* recoverInfo = snapshot->recoverInfo();
+ MNode** it = recoverInfo->begin();
+ MNode** end = recoverInfo->end();
+ while (it != end && mir != *it) {
+ ++it;
+ ++index;
+ }
+
+ // This MDefinition is recovered, thus it should be listed in the
+ // LRecoverInfo.
+ MOZ_ASSERT(it != end && mir == *it);
+
+ // Lambda should have a default value readable for iterating over the
+ // inner frames.
+ if (mir->isLambda()) {
+ MConstant* constant = mir->toLambda()->functionOperand();
+ uint32_t cstIndex;
+ masm.propagateOOM(graph.addConstantToPool(constant->toJSValue(), &cstIndex));
+ alloc = RValueAllocation::RecoverInstruction(index, cstIndex);
+ break;
+ }
+
+ alloc = RValueAllocation::RecoverInstruction(index);
+ break;
+ }
+ case MIRType::Undefined:
+ alloc = RValueAllocation::Undefined();
+ break;
+ case MIRType::Null:
+ alloc = RValueAllocation::Null();
+ break;
+ case MIRType::Int32:
+ case MIRType::String:
+ case MIRType::Symbol:
+ case MIRType::Object:
+ case MIRType::ObjectOrNull:
+ case MIRType::Boolean:
+ case MIRType::Double:
+ {
+ LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
+ if (payload->isConstant()) {
+ MConstant* constant = mir->toConstant();
+ uint32_t index;
+ masm.propagateOOM(graph.addConstantToPool(constant->toJSValue(), &index));
+ alloc = RValueAllocation::ConstantPool(index);
+ break;
+ }
+
+ JSValueType valueType =
+ (type == MIRType::ObjectOrNull) ? JSVAL_TYPE_OBJECT : ValueTypeFromMIRType(type);
+
+ MOZ_ASSERT(payload->isMemory() || payload->isRegister());
+ if (payload->isMemory())
+ alloc = RValueAllocation::Typed(valueType, ToStackIndex(payload));
+ else if (payload->isGeneralReg())
+ alloc = RValueAllocation::Typed(valueType, ToRegister(payload));
+ else if (payload->isFloatReg())
+ alloc = RValueAllocation::Double(ToFloatRegister(payload));
+ break;
+ }
+ case MIRType::Float32:
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Float32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ {
+ LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
+ if (payload->isConstant()) {
+ MConstant* constant = mir->toConstant();
+ uint32_t index;
+ masm.propagateOOM(graph.addConstantToPool(constant->toJSValue(), &index));
+ alloc = RValueAllocation::ConstantPool(index);
+ break;
+ }
+
+ MOZ_ASSERT(payload->isMemory() || payload->isFloatReg());
+ if (payload->isFloatReg())
+ alloc = RValueAllocation::AnyFloat(ToFloatRegister(payload));
+ else
+ alloc = RValueAllocation::AnyFloat(ToStackIndex(payload));
+ break;
+ }
+ case MIRType::MagicOptimizedArguments:
+ case MIRType::MagicOptimizedOut:
+ case MIRType::MagicUninitializedLexical:
+ case MIRType::MagicIsConstructing:
+ {
+ uint32_t index;
+ JSWhyMagic why = JS_GENERIC_MAGIC;
+ switch (type) {
+ case MIRType::MagicOptimizedArguments:
+ why = JS_OPTIMIZED_ARGUMENTS;
+ break;
+ case MIRType::MagicOptimizedOut:
+ why = JS_OPTIMIZED_OUT;
+ break;
+ case MIRType::MagicUninitializedLexical:
+ why = JS_UNINITIALIZED_LEXICAL;
+ break;
+ case MIRType::MagicIsConstructing:
+ why = JS_IS_CONSTRUCTING;
+ break;
+ default:
+ MOZ_CRASH("Invalid Magic MIRType");
+ }
+
+ Value v = MagicValue(why);
+ masm.propagateOOM(graph.addConstantToPool(v, &index));
+ alloc = RValueAllocation::ConstantPool(index);
+ break;
+ }
+ default:
+ {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+ LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
+#ifdef JS_NUNBOX32
+ LAllocation* type = snapshot->typeOfSlot(*allocIndex);
+ if (type->isRegister()) {
+ if (payload->isRegister())
+ alloc = RValueAllocation::Untyped(ToRegister(type), ToRegister(payload));
+ else
+ alloc = RValueAllocation::Untyped(ToRegister(type), ToStackIndex(payload));
+ } else {
+ if (payload->isRegister())
+ alloc = RValueAllocation::Untyped(ToStackIndex(type), ToRegister(payload));
+ else
+ alloc = RValueAllocation::Untyped(ToStackIndex(type), ToStackIndex(payload));
+ }
+#elif JS_PUNBOX64
+ if (payload->isRegister())
+ alloc = RValueAllocation::Untyped(ToRegister(payload));
+ else
+ alloc = RValueAllocation::Untyped(ToStackIndex(payload));
+#endif
+ break;
+ }
+ }
+
+ // This set an extra bit as part of the RValueAllocation, such that we know
+ // that recover instruction have to be executed without wrapping the
+ // instruction in a no-op recover instruction.
+ if (mir->isIncompleteObject())
+ alloc.setNeedSideEffect();
+
+ masm.propagateOOM(snapshots_.add(alloc));
+
+ *allocIndex += mir->isRecoveredOnBailout() ? 0 : 1;
+}
+
+void
+CodeGeneratorShared::encode(LRecoverInfo* recover)
+{
+ if (recover->recoverOffset() != INVALID_RECOVER_OFFSET)
+ return;
+
+ uint32_t numInstructions = recover->numInstructions();
+ JitSpew(JitSpew_IonSnapshots, "Encoding LRecoverInfo %p (frameCount %u, instructions %u)",
+ (void*)recover, recover->mir()->frameCount(), numInstructions);
+
+ MResumePoint::Mode mode = recover->mir()->mode();
+ MOZ_ASSERT(mode != MResumePoint::Outer);
+ bool resumeAfter = (mode == MResumePoint::ResumeAfter);
+
+ RecoverOffset offset = recovers_.startRecover(numInstructions, resumeAfter);
+
+ for (MNode* insn : *recover)
+ recovers_.writeInstruction(insn);
+
+ recovers_.endRecover();
+ recover->setRecoverOffset(offset);
+ masm.propagateOOM(!recovers_.oom());
+}
+
+void
+CodeGeneratorShared::encode(LSnapshot* snapshot)
+{
+ if (snapshot->snapshotOffset() != INVALID_SNAPSHOT_OFFSET)
+ return;
+
+ LRecoverInfo* recoverInfo = snapshot->recoverInfo();
+ encode(recoverInfo);
+
+ RecoverOffset recoverOffset = recoverInfo->recoverOffset();
+ MOZ_ASSERT(recoverOffset != INVALID_RECOVER_OFFSET);
+
+ JitSpew(JitSpew_IonSnapshots, "Encoding LSnapshot %p (LRecover %p)",
+ (void*)snapshot, (void*) recoverInfo);
+
+ SnapshotOffset offset = snapshots_.startSnapshot(recoverOffset, snapshot->bailoutKind());
+
+#ifdef TRACK_SNAPSHOTS
+ uint32_t pcOpcode = 0;
+ uint32_t lirOpcode = 0;
+ uint32_t lirId = 0;
+ uint32_t mirOpcode = 0;
+ uint32_t mirId = 0;
+
+ if (LNode* ins = instruction()) {
+ lirOpcode = ins->op();
+ lirId = ins->id();
+ if (ins->mirRaw()) {
+ mirOpcode = ins->mirRaw()->op();
+ mirId = ins->mirRaw()->id();
+ if (ins->mirRaw()->trackedPc())
+ pcOpcode = *ins->mirRaw()->trackedPc();
+ }
+ }
+ snapshots_.trackSnapshot(pcOpcode, mirOpcode, mirId, lirOpcode, lirId);
+#endif
+
+ uint32_t allocIndex = 0;
+ for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
+ DebugOnly<uint32_t> allocWritten = snapshots_.allocWritten();
+ encodeAllocation(snapshot, *it, &allocIndex);
+ MOZ_ASSERT_IF(!snapshots_.oom(), allocWritten + 1 == snapshots_.allocWritten());
+ }
+
+ MOZ_ASSERT(allocIndex == snapshot->numSlots());
+ snapshots_.endSnapshot();
+ snapshot->setSnapshotOffset(offset);
+ masm.propagateOOM(!snapshots_.oom());
+}
+
+bool
+CodeGeneratorShared::assignBailoutId(LSnapshot* snapshot)
+{
+ MOZ_ASSERT(snapshot->snapshotOffset() != INVALID_SNAPSHOT_OFFSET);
+
+ // Can we not use bailout tables at all?
+ if (!deoptTable_)
+ return false;
+
+ MOZ_ASSERT(frameClass_ != FrameSizeClass::None());
+
+ if (snapshot->bailoutId() != INVALID_BAILOUT_ID)
+ return true;
+
+ // Is the bailout table full?
+ if (bailouts_.length() >= BAILOUT_TABLE_SIZE)
+ return false;
+
+ unsigned bailoutId = bailouts_.length();
+ snapshot->setBailoutId(bailoutId);
+ JitSpew(JitSpew_IonSnapshots, "Assigned snapshot bailout id %u", bailoutId);
+ masm.propagateOOM(bailouts_.append(snapshot->snapshotOffset()));
+ return true;
+}
+
+bool
+CodeGeneratorShared::encodeSafepoints()
+{
+ for (SafepointIndex& index : safepointIndices_) {
+ LSafepoint* safepoint = index.safepoint();
+
+ if (!safepoint->encoded())
+ safepoints_.encode(safepoint);
+
+ index.resolve();
+ }
+
+ return !safepoints_.oom();
+}
+
+bool
+CodeGeneratorShared::createNativeToBytecodeScriptList(JSContext* cx)
+{
+ js::Vector<JSScript*, 0, SystemAllocPolicy> scriptList;
+ InlineScriptTree* tree = gen->info().inlineScriptTree();
+ for (;;) {
+ // Add script from current tree.
+ bool found = false;
+ for (uint32_t i = 0; i < scriptList.length(); i++) {
+ if (scriptList[i] == tree->script()) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ if (!scriptList.append(tree->script()))
+ return false;
+ }
+
+ // Process rest of tree
+
+ // If children exist, emit children.
+ if (tree->hasChildren()) {
+ tree = tree->firstChild();
+ continue;
+ }
+
+ // Otherwise, find the first tree up the chain (including this one)
+ // that contains a next sibling.
+ while (!tree->hasNextCallee() && tree->hasCaller())
+ tree = tree->caller();
+
+ // If we found a sibling, use it.
+ if (tree->hasNextCallee()) {
+ tree = tree->nextCallee();
+ continue;
+ }
+
+ // Otherwise, we must have reached the top without finding any siblings.
+ MOZ_ASSERT(tree->isOutermostCaller());
+ break;
+ }
+
+ // Allocate array for list.
+ JSScript** data = cx->runtime()->pod_malloc<JSScript*>(scriptList.length());
+ if (!data)
+ return false;
+
+ for (uint32_t i = 0; i < scriptList.length(); i++)
+ data[i] = scriptList[i];
+
+ // Success.
+ nativeToBytecodeScriptListLength_ = scriptList.length();
+ nativeToBytecodeScriptList_ = data;
+ return true;
+}
+
+bool
+CodeGeneratorShared::generateCompactNativeToBytecodeMap(JSContext* cx, JitCode* code)
+{
+ MOZ_ASSERT(nativeToBytecodeScriptListLength_ == 0);
+ MOZ_ASSERT(nativeToBytecodeScriptList_ == nullptr);
+ MOZ_ASSERT(nativeToBytecodeMap_ == nullptr);
+ MOZ_ASSERT(nativeToBytecodeMapSize_ == 0);
+ MOZ_ASSERT(nativeToBytecodeTableOffset_ == 0);
+ MOZ_ASSERT(nativeToBytecodeNumRegions_ == 0);
+
+ if (!createNativeToBytecodeScriptList(cx))
+ return false;
+
+ MOZ_ASSERT(nativeToBytecodeScriptListLength_ > 0);
+ MOZ_ASSERT(nativeToBytecodeScriptList_ != nullptr);
+
+ CompactBufferWriter writer;
+ uint32_t tableOffset = 0;
+ uint32_t numRegions = 0;
+
+ if (!JitcodeIonTable::WriteIonTable(
+ writer, nativeToBytecodeScriptList_, nativeToBytecodeScriptListLength_,
+ &nativeToBytecodeList_[0],
+ &nativeToBytecodeList_[0] + nativeToBytecodeList_.length(),
+ &tableOffset, &numRegions))
+ {
+ js_free(nativeToBytecodeScriptList_);
+ return false;
+ }
+
+ MOZ_ASSERT(tableOffset > 0);
+ MOZ_ASSERT(numRegions > 0);
+
+ // Writer is done, copy it to sized buffer.
+ uint8_t* data = cx->runtime()->pod_malloc<uint8_t>(writer.length());
+ if (!data) {
+ js_free(nativeToBytecodeScriptList_);
+ return false;
+ }
+
+ memcpy(data, writer.buffer(), writer.length());
+ nativeToBytecodeMap_ = data;
+ nativeToBytecodeMapSize_ = writer.length();
+ nativeToBytecodeTableOffset_ = tableOffset;
+ nativeToBytecodeNumRegions_ = numRegions;
+
+ verifyCompactNativeToBytecodeMap(code);
+
+ JitSpew(JitSpew_Profiling, "Compact Native To Bytecode Map [%p-%p]",
+ data, data + nativeToBytecodeMapSize_);
+
+ return true;
+}
+
+void
+CodeGeneratorShared::verifyCompactNativeToBytecodeMap(JitCode* code)
+{
+#ifdef DEBUG
+ MOZ_ASSERT(nativeToBytecodeScriptListLength_ > 0);
+ MOZ_ASSERT(nativeToBytecodeScriptList_ != nullptr);
+ MOZ_ASSERT(nativeToBytecodeMap_ != nullptr);
+ MOZ_ASSERT(nativeToBytecodeMapSize_ > 0);
+ MOZ_ASSERT(nativeToBytecodeTableOffset_ > 0);
+ MOZ_ASSERT(nativeToBytecodeNumRegions_ > 0);
+
+ // The pointer to the table must be 4-byte aligned
+ const uint8_t* tablePtr = nativeToBytecodeMap_ + nativeToBytecodeTableOffset_;
+ MOZ_ASSERT(uintptr_t(tablePtr) % sizeof(uint32_t) == 0);
+
+ // Verify that numRegions was encoded correctly.
+ const JitcodeIonTable* ionTable = reinterpret_cast<const JitcodeIonTable*>(tablePtr);
+ MOZ_ASSERT(ionTable->numRegions() == nativeToBytecodeNumRegions_);
+
+ // Region offset for first region should be at the start of the payload region.
+ // Since the offsets are backward from the start of the table, the first entry
+ // backoffset should be equal to the forward table offset from the start of the
+ // allocated data.
+ MOZ_ASSERT(ionTable->regionOffset(0) == nativeToBytecodeTableOffset_);
+
+ // Verify each region.
+ for (uint32_t i = 0; i < ionTable->numRegions(); i++) {
+ // Back-offset must point into the payload region preceding the table, not before it.
+ MOZ_ASSERT(ionTable->regionOffset(i) <= nativeToBytecodeTableOffset_);
+
+ // Back-offset must point to a later area in the payload region than previous
+ // back-offset. This means that back-offsets decrease monotonically.
+ MOZ_ASSERT_IF(i > 0, ionTable->regionOffset(i) < ionTable->regionOffset(i - 1));
+
+ JitcodeRegionEntry entry = ionTable->regionEntry(i);
+
+ // Ensure native code offset for region falls within jitcode.
+ MOZ_ASSERT(entry.nativeOffset() <= code->instructionsSize());
+
+ // Read out script/pc stack and verify.
+ JitcodeRegionEntry::ScriptPcIterator scriptPcIter = entry.scriptPcIterator();
+ while (scriptPcIter.hasMore()) {
+ uint32_t scriptIdx = 0, pcOffset = 0;
+ scriptPcIter.readNext(&scriptIdx, &pcOffset);
+
+ // Ensure scriptIdx refers to a valid script in the list.
+ MOZ_ASSERT(scriptIdx < nativeToBytecodeScriptListLength_);
+ JSScript* script = nativeToBytecodeScriptList_[scriptIdx];
+
+ // Ensure pcOffset falls within the script.
+ MOZ_ASSERT(pcOffset < script->length());
+ }
+
+ // Obtain the original nativeOffset and pcOffset and script.
+ uint32_t curNativeOffset = entry.nativeOffset();
+ JSScript* script = nullptr;
+ uint32_t curPcOffset = 0;
+ {
+ uint32_t scriptIdx = 0;
+ scriptPcIter.reset();
+ scriptPcIter.readNext(&scriptIdx, &curPcOffset);
+ script = nativeToBytecodeScriptList_[scriptIdx];
+ }
+
+ // Read out nativeDeltas and pcDeltas and verify.
+ JitcodeRegionEntry::DeltaIterator deltaIter = entry.deltaIterator();
+ while (deltaIter.hasMore()) {
+ uint32_t nativeDelta = 0;
+ int32_t pcDelta = 0;
+ deltaIter.readNext(&nativeDelta, &pcDelta);
+
+ curNativeOffset += nativeDelta;
+ curPcOffset = uint32_t(int32_t(curPcOffset) + pcDelta);
+
+ // Ensure that nativeOffset still falls within jitcode after delta.
+ MOZ_ASSERT(curNativeOffset <= code->instructionsSize());
+
+ // Ensure that pcOffset still falls within bytecode after delta.
+ MOZ_ASSERT(curPcOffset < script->length());
+ }
+ }
+#endif // DEBUG
+}
+
+bool
+CodeGeneratorShared::generateCompactTrackedOptimizationsMap(JSContext* cx, JitCode* code,
+ IonTrackedTypeVector* allTypes)
+{
+ MOZ_ASSERT(trackedOptimizationsMap_ == nullptr);
+ MOZ_ASSERT(trackedOptimizationsMapSize_ == 0);
+ MOZ_ASSERT(trackedOptimizationsRegionTableOffset_ == 0);
+ MOZ_ASSERT(trackedOptimizationsTypesTableOffset_ == 0);
+ MOZ_ASSERT(trackedOptimizationsAttemptsTableOffset_ == 0);
+
+ if (trackedOptimizations_.empty())
+ return true;
+
+ UniqueTrackedOptimizations unique(cx);
+ if (!unique.init())
+ return false;
+
+ // Iterate through all entries to deduplicate their optimization attempts.
+ for (size_t i = 0; i < trackedOptimizations_.length(); i++) {
+ NativeToTrackedOptimizations& entry = trackedOptimizations_[i];
+ if (!unique.add(entry.optimizations))
+ return false;
+ }
+
+ // Sort the unique optimization attempts by frequency to stabilize the
+ // attempts' indices in the compact table we will write later.
+ if (!unique.sortByFrequency(cx))
+ return false;
+
+ // Write out the ranges and the table.
+ CompactBufferWriter writer;
+ uint32_t numRegions;
+ uint32_t regionTableOffset;
+ uint32_t typesTableOffset;
+ uint32_t attemptsTableOffset;
+ if (!WriteIonTrackedOptimizationsTable(cx, writer,
+ trackedOptimizations_.begin(),
+ trackedOptimizations_.end(),
+ unique, &numRegions,
+ &regionTableOffset, &typesTableOffset,
+ &attemptsTableOffset, allTypes))
+ {
+ return false;
+ }
+
+ MOZ_ASSERT(regionTableOffset > 0);
+ MOZ_ASSERT(typesTableOffset > 0);
+ MOZ_ASSERT(attemptsTableOffset > 0);
+ MOZ_ASSERT(typesTableOffset > regionTableOffset);
+ MOZ_ASSERT(attemptsTableOffset > typesTableOffset);
+
+ // Copy over the table out of the writer's buffer.
+ uint8_t* data = cx->runtime()->pod_malloc<uint8_t>(writer.length());
+ if (!data)
+ return false;
+
+ memcpy(data, writer.buffer(), writer.length());
+ trackedOptimizationsMap_ = data;
+ trackedOptimizationsMapSize_ = writer.length();
+ trackedOptimizationsRegionTableOffset_ = regionTableOffset;
+ trackedOptimizationsTypesTableOffset_ = typesTableOffset;
+ trackedOptimizationsAttemptsTableOffset_ = attemptsTableOffset;
+
+ verifyCompactTrackedOptimizationsMap(code, numRegions, unique, allTypes);
+
+ JitSpew(JitSpew_OptimizationTracking,
+ "== Compact Native To Optimizations Map [%p-%p] size %u",
+ data, data + trackedOptimizationsMapSize_, trackedOptimizationsMapSize_);
+ JitSpew(JitSpew_OptimizationTracking,
+ " with type list of length %" PRIuSIZE ", size %" PRIuSIZE,
+ allTypes->length(), allTypes->length() * sizeof(IonTrackedTypeWithAddendum));
+
+ return true;
+}
+
+#ifdef DEBUG
+class ReadTempAttemptsVectorOp : public JS::ForEachTrackedOptimizationAttemptOp
+{
+ TempOptimizationAttemptsVector* attempts_;
+ bool oom_;
+
+ public:
+ explicit ReadTempAttemptsVectorOp(TempOptimizationAttemptsVector* attempts)
+ : attempts_(attempts), oom_(false)
+ { }
+
+ bool oom() {
+ return oom_;
+ }
+
+ void operator()(JS::TrackedStrategy strategy, JS::TrackedOutcome outcome) override {
+ if (!attempts_->append(OptimizationAttempt(strategy, outcome)))
+ oom_ = true;
+ }
+};
+
+struct ReadTempTypeInfoVectorOp : public IonTrackedOptimizationsTypeInfo::ForEachOp
+{
+ TempAllocator& alloc_;
+ TempOptimizationTypeInfoVector* types_;
+ TempTypeList accTypes_;
+ bool oom_;
+
+ public:
+ ReadTempTypeInfoVectorOp(TempAllocator& alloc, TempOptimizationTypeInfoVector* types)
+ : alloc_(alloc),
+ types_(types),
+ accTypes_(alloc),
+ oom_(false)
+ { }
+
+ bool oom() {
+ return oom_;
+ }
+
+ void readType(const IonTrackedTypeWithAddendum& tracked) override {
+ if (!accTypes_.append(tracked.type))
+ oom_ = true;
+ }
+
+ void operator()(JS::TrackedTypeSite site, MIRType mirType) override {
+ OptimizationTypeInfo ty(alloc_, site, mirType);
+ for (uint32_t i = 0; i < accTypes_.length(); i++) {
+ if (!ty.trackType(accTypes_[i]))
+ oom_ = true;
+ }
+ if (!types_->append(mozilla::Move(ty)))
+ oom_ = true;
+ accTypes_.clear();
+ }
+};
+#endif // DEBUG
+
+void
+CodeGeneratorShared::verifyCompactTrackedOptimizationsMap(JitCode* code, uint32_t numRegions,
+ const UniqueTrackedOptimizations& unique,
+ const IonTrackedTypeVector* allTypes)
+{
+#ifdef DEBUG
+ MOZ_ASSERT(trackedOptimizationsMap_ != nullptr);
+ MOZ_ASSERT(trackedOptimizationsMapSize_ > 0);
+ MOZ_ASSERT(trackedOptimizationsRegionTableOffset_ > 0);
+ MOZ_ASSERT(trackedOptimizationsTypesTableOffset_ > 0);
+ MOZ_ASSERT(trackedOptimizationsAttemptsTableOffset_ > 0);
+
+ // Table pointers must all be 4-byte aligned.
+ const uint8_t* regionTableAddr = trackedOptimizationsMap_ +
+ trackedOptimizationsRegionTableOffset_;
+ const uint8_t* typesTableAddr = trackedOptimizationsMap_ +
+ trackedOptimizationsTypesTableOffset_;
+ const uint8_t* attemptsTableAddr = trackedOptimizationsMap_ +
+ trackedOptimizationsAttemptsTableOffset_;
+ MOZ_ASSERT(uintptr_t(regionTableAddr) % sizeof(uint32_t) == 0);
+ MOZ_ASSERT(uintptr_t(typesTableAddr) % sizeof(uint32_t) == 0);
+ MOZ_ASSERT(uintptr_t(attemptsTableAddr) % sizeof(uint32_t) == 0);
+
+ // Assert that the number of entries matches up for the tables.
+ const IonTrackedOptimizationsRegionTable* regionTable =
+ (const IonTrackedOptimizationsRegionTable*) regionTableAddr;
+ MOZ_ASSERT(regionTable->numEntries() == numRegions);
+ const IonTrackedOptimizationsTypesTable* typesTable =
+ (const IonTrackedOptimizationsTypesTable*) typesTableAddr;
+ MOZ_ASSERT(typesTable->numEntries() == unique.count());
+ const IonTrackedOptimizationsAttemptsTable* attemptsTable =
+ (const IonTrackedOptimizationsAttemptsTable*) attemptsTableAddr;
+ MOZ_ASSERT(attemptsTable->numEntries() == unique.count());
+
+ // Verify each region.
+ uint32_t trackedIdx = 0;
+ for (uint32_t regionIdx = 0; regionIdx < regionTable->numEntries(); regionIdx++) {
+ // Check reverse offsets are within bounds.
+ MOZ_ASSERT(regionTable->entryOffset(regionIdx) <= trackedOptimizationsRegionTableOffset_);
+ MOZ_ASSERT_IF(regionIdx > 0, regionTable->entryOffset(regionIdx) <
+ regionTable->entryOffset(regionIdx - 1));
+
+ IonTrackedOptimizationsRegion region = regionTable->entry(regionIdx);
+
+ // Check the region range is covered by jitcode.
+ MOZ_ASSERT(region.startOffset() <= code->instructionsSize());
+ MOZ_ASSERT(region.endOffset() <= code->instructionsSize());
+
+ IonTrackedOptimizationsRegion::RangeIterator iter = region.ranges();
+ while (iter.more()) {
+ // Assert that the offsets are correctly decoded from the delta.
+ uint32_t startOffset, endOffset;
+ uint8_t index;
+ iter.readNext(&startOffset, &endOffset, &index);
+ NativeToTrackedOptimizations& entry = trackedOptimizations_[trackedIdx++];
+ MOZ_ASSERT(startOffset == entry.startOffset.offset());
+ MOZ_ASSERT(endOffset == entry.endOffset.offset());
+ MOZ_ASSERT(index == unique.indexOf(entry.optimizations));
+
+ // Assert that the type info and attempts vectors are correctly
+ // decoded. This is disabled for now if the types table might
+ // contain nursery pointers, in which case the types might not
+ // match, see bug 1175761.
+ if (!code->runtimeFromMainThread()->gc.storeBuffer.cancelIonCompilations()) {
+ IonTrackedOptimizationsTypeInfo typeInfo = typesTable->entry(index);
+ TempOptimizationTypeInfoVector tvec(alloc());
+ ReadTempTypeInfoVectorOp top(alloc(), &tvec);
+ typeInfo.forEach(top, allTypes);
+ MOZ_ASSERT_IF(!top.oom(), entry.optimizations->matchTypes(tvec));
+ }
+
+ IonTrackedOptimizationsAttempts attempts = attemptsTable->entry(index);
+ TempOptimizationAttemptsVector avec(alloc());
+ ReadTempAttemptsVectorOp aop(&avec);
+ attempts.forEach(aop);
+ MOZ_ASSERT_IF(!aop.oom(), entry.optimizations->matchAttempts(avec));
+ }
+ }
+#endif
+}
+
+void
+CodeGeneratorShared::markSafepoint(LInstruction* ins)
+{
+ markSafepointAt(masm.currentOffset(), ins);
+}
+
+void
+CodeGeneratorShared::markSafepointAt(uint32_t offset, LInstruction* ins)
+{
+ MOZ_ASSERT_IF(!safepointIndices_.empty() && !masm.oom(),
+ offset - safepointIndices_.back().displacement() >= sizeof(uint32_t));
+ masm.propagateOOM(safepointIndices_.append(SafepointIndex(offset, ins->safepoint())));
+}
+
+void
+CodeGeneratorShared::ensureOsiSpace()
+{
+ // For a refresher, an invalidation point is of the form:
+ // 1: call <target>
+ // 2: ...
+ // 3: <osipoint>
+ //
+ // The four bytes *before* instruction 2 are overwritten with an offset.
+ // Callers must ensure that the instruction itself has enough bytes to
+ // support this.
+ //
+ // The bytes *at* instruction 3 are overwritten with an invalidation jump.
+ // jump. These bytes may be in a completely different IR sequence, but
+ // represent the join point of the call out of the function.
+ //
+ // At points where we want to ensure that invalidation won't corrupt an
+ // important instruction, we make sure to pad with nops.
+ if (masm.currentOffset() - lastOsiPointOffset_ < Assembler::PatchWrite_NearCallSize()) {
+ int32_t paddingSize = Assembler::PatchWrite_NearCallSize();
+ paddingSize -= masm.currentOffset() - lastOsiPointOffset_;
+ for (int32_t i = 0; i < paddingSize; ++i)
+ masm.nop();
+ }
+ MOZ_ASSERT_IF(!masm.oom(),
+ masm.currentOffset() - lastOsiPointOffset_ >= Assembler::PatchWrite_NearCallSize());
+ lastOsiPointOffset_ = masm.currentOffset();
+}
+
+uint32_t
+CodeGeneratorShared::markOsiPoint(LOsiPoint* ins)
+{
+ encode(ins->snapshot());
+ ensureOsiSpace();
+
+ uint32_t offset = masm.currentOffset();
+ SnapshotOffset so = ins->snapshot()->snapshotOffset();
+ masm.propagateOOM(osiIndices_.append(OsiIndex(offset, so)));
+
+ return offset;
+}
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+template <class Op>
+static void
+HandleRegisterDump(Op op, MacroAssembler& masm, LiveRegisterSet liveRegs, Register activation,
+ Register scratch)
+{
+ const size_t baseOffset = JitActivation::offsetOfRegs();
+
+ // Handle live GPRs.
+ for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); ++iter) {
+ Register reg = *iter;
+ Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
+
+ if (reg == activation) {
+ // To use the original value of the activation register (that's
+ // now on top of the stack), we need the scratch register.
+ masm.push(scratch);
+ masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), scratch);
+ op(scratch, dump);
+ masm.pop(scratch);
+ } else {
+ op(reg, dump);
+ }
+ }
+
+ // Handle live FPRs.
+ for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
+ op(reg, dump);
+ }
+}
+
+class StoreOp
+{
+ MacroAssembler& masm;
+
+ public:
+ explicit StoreOp(MacroAssembler& masm)
+ : masm(masm)
+ {}
+
+ void operator()(Register reg, Address dump) {
+ masm.storePtr(reg, dump);
+ }
+ void operator()(FloatRegister reg, Address dump) {
+ if (reg.isDouble())
+ masm.storeDouble(reg, dump);
+ else if (reg.isSingle())
+ masm.storeFloat32(reg, dump);
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ else if (reg.isSimd128())
+ masm.storeUnalignedSimd128Float(reg, dump);
+#endif
+ else
+ MOZ_CRASH("Unexpected register type.");
+ }
+};
+
+static void
+StoreAllLiveRegs(MacroAssembler& masm, LiveRegisterSet liveRegs)
+{
+ // Store a copy of all live registers before performing the call.
+ // When we reach the OsiPoint, we can use this to check nothing
+ // modified them in the meantime.
+
+ // Load pointer to the JitActivation in a scratch register.
+ AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
+ Register scratch = allRegs.takeAny();
+ masm.push(scratch);
+ masm.loadJitActivation(scratch);
+
+ Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
+ masm.add32(Imm32(1), checkRegs);
+
+ StoreOp op(masm);
+ HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());
+
+ masm.pop(scratch);
+}
+
+class VerifyOp
+{
+ MacroAssembler& masm;
+ Label* failure_;
+
+ public:
+ VerifyOp(MacroAssembler& masm, Label* failure)
+ : masm(masm), failure_(failure)
+ {}
+
+ void operator()(Register reg, Address dump) {
+ masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
+ }
+ void operator()(FloatRegister reg, Address dump) {
+ FloatRegister scratch;
+ if (reg.isDouble()) {
+ scratch = ScratchDoubleReg;
+ masm.loadDouble(dump, scratch);
+ masm.branchDouble(Assembler::DoubleNotEqual, scratch, reg, failure_);
+ } else if (reg.isSingle()) {
+ scratch = ScratchFloat32Reg;
+ masm.loadFloat32(dump, scratch);
+ masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
+ }
+
+ // :TODO: (Bug 1133745) Add support to verify SIMD registers.
+ }
+};
+
+void
+CodeGeneratorShared::verifyOsiPointRegs(LSafepoint* safepoint)
+{
+ // Ensure the live registers stored by callVM did not change between
+ // the call and this OsiPoint. Try-catch relies on this invariant.
+
+ // Load pointer to the JitActivation in a scratch register.
+ AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
+ Register scratch = allRegs.takeAny();
+ masm.push(scratch);
+ masm.loadJitActivation(scratch);
+
+ // If we should not check registers (because the instruction did not call
+ // into the VM, or a GC happened), we're done.
+ Label failure, done;
+ Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
+ masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);
+
+ // Having more than one VM function call made in one visit function at
+ // runtime is a sec-ciritcal error, because if we conservatively assume that
+ // one of the function call can re-enter Ion, then the invalidation process
+ // will potentially add a call at a random location, by patching the code
+ // before the return address.
+ masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);
+
+ // Set checkRegs to 0, so that we don't try to verify registers after we
+ // return from this script to the caller.
+ masm.store32(Imm32(0), checkRegs);
+
+ // Ignore clobbered registers. Some instructions (like LValueToInt32) modify
+ // temps after calling into the VM. This is fine because no other
+ // instructions (including this OsiPoint) will depend on them. Also
+ // backtracking can also use the same register for an input and an output.
+ // These are marked as clobbered and shouldn't get checked.
+ LiveRegisterSet liveRegs;
+ liveRegs.set() = RegisterSet::Intersect(safepoint->liveRegs().set(),
+ RegisterSet::Not(safepoint->clobberedRegs().set()));
+
+ VerifyOp op(masm, &failure);
+ HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());
+
+ masm.jump(&done);
+
+ // Do not profile the callWithABI that occurs below. This is to avoid a
+ // rare corner case that occurs when profiling interacts with itself:
+ //
+ // When slow profiling assertions are turned on, FunctionBoundary ops
+ // (which update the profiler pseudo-stack) may emit a callVM, which
+ // forces them to have an osi point associated with them. The
+ // FunctionBoundary for inline function entry is added to the caller's
+ // graph with a PC from the caller's code, but during codegen it modifies
+ // SPS instrumentation to add the callee as the current top-most script.
+ // When codegen gets to the OSIPoint, and the callWithABI below is
+ // emitted, the codegen thinks that the current frame is the callee, but
+ // the PC it's using from the OSIPoint refers to the caller. This causes
+ // the profiler instrumentation of the callWithABI below to ASSERT, since
+ // the script and pc are mismatched. To avoid this, we simply omit
+ // instrumentation for these callWithABIs.
+
+ // Any live register captured by a safepoint (other than temp registers)
+ // must remain unchanged between the call and the OsiPoint instruction.
+ masm.bind(&failure);
+ masm.assumeUnreachable("Modified registers between VM call and OsiPoint");
+
+ masm.bind(&done);
+ masm.pop(scratch);
+}
+
+bool
+CodeGeneratorShared::shouldVerifyOsiPointRegs(LSafepoint* safepoint)
+{
+ if (!checkOsiPointRegisters)
+ return false;
+
+ if (safepoint->liveRegs().emptyGeneral() && safepoint->liveRegs().emptyFloat())
+ return false; // No registers to check.
+
+ return true;
+}
+
+void
+CodeGeneratorShared::resetOsiPointRegs(LSafepoint* safepoint)
+{
+ if (!shouldVerifyOsiPointRegs(safepoint))
+ return;
+
+ // Set checkRegs to 0. If we perform a VM call, the instruction
+ // will set it to 1.
+ AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
+ Register scratch = allRegs.takeAny();
+ masm.push(scratch);
+ masm.loadJitActivation(scratch);
+ Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
+ masm.store32(Imm32(0), checkRegs);
+ masm.pop(scratch);
+}
+#endif
+
+// Before doing any call to Cpp, you should ensure that volatile
+// registers are evicted by the register allocator.
+void
+CodeGeneratorShared::callVM(const VMFunction& fun, LInstruction* ins, const Register* dynStack)
+{
+ // If we're calling a function with an out parameter type of double, make
+ // sure we have an FPU.
+ MOZ_ASSERT_IF(fun.outParam == Type_Double, GetJitContext()->runtime->jitSupportsFloatingPoint());
+
+#ifdef DEBUG
+ if (ins->mirRaw()) {
+ MOZ_ASSERT(ins->mirRaw()->isInstruction());
+ MInstruction* mir = ins->mirRaw()->toInstruction();
+ MOZ_ASSERT_IF(mir->needsResumePoint(), mir->resumePoint());
+ }
+#endif
+
+ // Stack is:
+ // ... frame ...
+ // [args]
+#ifdef DEBUG
+ MOZ_ASSERT(pushedArgs_ == fun.explicitArgs);
+ pushedArgs_ = 0;
+#endif
+
+ // Get the wrapper of the VM function.
+ JitCode* wrapper = gen->jitRuntime()->getVMWrapper(fun);
+ if (!wrapper) {
+ masm.setOOM();
+ return;
+ }
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ if (shouldVerifyOsiPointRegs(ins->safepoint()))
+ StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
+#endif
+
+ // Push an exit frame descriptor. If |dynStack| is a valid pointer to a
+ // register, then its value is added to the value of the |framePushed()| to
+ // fill the frame descriptor.
+ if (dynStack) {
+ masm.addPtr(Imm32(masm.framePushed()), *dynStack);
+ masm.makeFrameDescriptor(*dynStack, JitFrame_IonJS, ExitFrameLayout::Size());
+ masm.Push(*dynStack); // descriptor
+ } else {
+ masm.pushStaticFrameDescriptor(JitFrame_IonJS, ExitFrameLayout::Size());
+ }
+
+ // Call the wrapper function. The wrapper is in charge to unwind the stack
+ // when returning from the call. Failures are handled with exceptions based
+ // on the return value of the C functions. To guard the outcome of the
+ // returned value, use another LIR instruction.
+ uint32_t callOffset = masm.callJit(wrapper);
+ markSafepointAt(callOffset, ins);
+
+ // Remove rest of the frame left on the stack. We remove the return address
+ // which is implicitly poped when returning.
+ int framePop = sizeof(ExitFrameLayout) - sizeof(void*);
+
+ // Pop arguments from framePushed.
+ masm.implicitPop(fun.explicitStackSlots() * sizeof(void*) + framePop);
+ // Stack is:
+ // ... frame ...
+}
+
+class OutOfLineTruncateSlow : public OutOfLineCodeBase<CodeGeneratorShared>
+{
+ FloatRegister src_;
+ Register dest_;
+ bool widenFloatToDouble_;
+
+ public:
+ OutOfLineTruncateSlow(FloatRegister src, Register dest, bool widenFloatToDouble = false)
+ : src_(src), dest_(dest), widenFloatToDouble_(widenFloatToDouble)
+ { }
+
+ void accept(CodeGeneratorShared* codegen) {
+ codegen->visitOutOfLineTruncateSlow(this);
+ }
+ FloatRegister src() const {
+ return src_;
+ }
+ Register dest() const {
+ return dest_;
+ }
+ bool widenFloatToDouble() const {
+ return widenFloatToDouble_;
+ }
+
+};
+
+OutOfLineCode*
+CodeGeneratorShared::oolTruncateDouble(FloatRegister src, Register dest, MInstruction* mir)
+{
+ OutOfLineTruncateSlow* ool = new(alloc()) OutOfLineTruncateSlow(src, dest);
+ addOutOfLineCode(ool, mir);
+ return ool;
+}
+
+void
+CodeGeneratorShared::emitTruncateDouble(FloatRegister src, Register dest, MInstruction* mir)
+{
+ OutOfLineCode* ool = oolTruncateDouble(src, dest, mir);
+
+ masm.branchTruncateDoubleMaybeModUint32(src, dest, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGeneratorShared::emitTruncateFloat32(FloatRegister src, Register dest, MInstruction* mir)
+{
+ OutOfLineTruncateSlow* ool = new(alloc()) OutOfLineTruncateSlow(src, dest, true);
+ addOutOfLineCode(ool, mir);
+
+ masm.branchTruncateFloat32MaybeModUint32(src, dest, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGeneratorShared::visitOutOfLineTruncateSlow(OutOfLineTruncateSlow* ool)
+{
+ FloatRegister src = ool->src();
+ Register dest = ool->dest();
+
+ saveVolatile(dest);
+ masm.outOfLineTruncateSlow(src, dest, ool->widenFloatToDouble(), gen->compilingWasm());
+ restoreVolatile(dest);
+
+ masm.jump(ool->rejoin());
+}
+
+bool
+CodeGeneratorShared::omitOverRecursedCheck() const
+{
+ // If the current function makes no calls (which means it isn't recursive)
+ // and it uses only a small amount of stack space, it doesn't need a
+ // stack overflow check. Note that the actual number here is somewhat
+ // arbitrary, and codegen actually uses small bounded amounts of
+ // additional stack space in some cases too.
+ return frameSize() < 64 && !gen->performsCall();
+}
+
+void
+CodeGeneratorShared::emitWasmCallBase(LWasmCallBase* ins)
+{
+ MWasmCall* mir = ins->mir();
+
+ if (mir->spIncrement())
+ masm.freeStack(mir->spIncrement());
+
+ MOZ_ASSERT((sizeof(wasm::Frame) + masm.framePushed()) % WasmStackAlignment == 0);
+ static_assert(WasmStackAlignment >= ABIStackAlignment &&
+ WasmStackAlignment % ABIStackAlignment == 0,
+ "The wasm stack alignment should subsume the ABI-required alignment");
+
+#ifdef DEBUG
+ Label ok;
+ masm.branchTestStackPtr(Assembler::Zero, Imm32(WasmStackAlignment - 1), &ok);
+ masm.breakpoint();
+ masm.bind(&ok);
+#endif
+
+ // Save the caller's TLS register in a reserved stack slot (below the
+ // call's stack arguments) for retrieval after the call.
+ if (mir->saveTls())
+ masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), mir->tlsStackOffset()));
+
+ const wasm::CallSiteDesc& desc = mir->desc();
+ const wasm::CalleeDesc& callee = mir->callee();
+ switch (callee.which()) {
+ case wasm::CalleeDesc::Func:
+ masm.call(desc, callee.funcIndex());
+ break;
+ case wasm::CalleeDesc::Import:
+ masm.wasmCallImport(desc, callee);
+ break;
+ case wasm::CalleeDesc::WasmTable:
+ case wasm::CalleeDesc::AsmJSTable:
+ masm.wasmCallIndirect(desc, callee);
+ break;
+ case wasm::CalleeDesc::Builtin:
+ masm.call(callee.builtin());
+ break;
+ case wasm::CalleeDesc::BuiltinInstanceMethod:
+ masm.wasmCallBuiltinInstanceMethod(mir->instanceArg(), callee.builtin());
+ break;
+ }
+
+ // After return, restore the caller's TLS and pinned registers.
+ if (mir->saveTls()) {
+ masm.loadPtr(Address(masm.getStackPointer(), mir->tlsStackOffset()), WasmTlsReg);
+ masm.loadWasmPinnedRegsFromTls();
+ }
+
+ if (mir->spIncrement())
+ masm.reserveStack(mir->spIncrement());
+}
+
+void
+CodeGeneratorShared::emitPreBarrier(Register base, const LAllocation* index, int32_t offsetAdjustment)
+{
+ if (index->isConstant()) {
+ Address address(base, ToInt32(index) * sizeof(Value) + offsetAdjustment);
+ masm.patchableCallPreBarrier(address, MIRType::Value);
+ } else {
+ BaseIndex address(base, ToRegister(index), TimesEight, offsetAdjustment);
+ masm.patchableCallPreBarrier(address, MIRType::Value);
+ }
+}
+
+void
+CodeGeneratorShared::emitPreBarrier(Address address)
+{
+ masm.patchableCallPreBarrier(address, MIRType::Value);
+}
+
+Label*
+CodeGeneratorShared::labelForBackedgeWithImplicitCheck(MBasicBlock* mir)
+{
+ // If this is a loop backedge to a loop header with an implicit interrupt
+ // check, use a patchable jump. Skip this search if compiling without a
+ // script for wasm, as there will be no interrupt check instruction.
+ // Due to critical edge unsplitting there may no longer be unique loop
+ // backedges, so just look for any edge going to an earlier block in RPO.
+ if (!gen->compilingWasm() && mir->isLoopHeader() && mir->id() <= current->mir()->id()) {
+ for (LInstructionIterator iter = mir->lir()->begin(); iter != mir->lir()->end(); iter++) {
+ if (iter->isMoveGroup()) {
+ // Continue searching for an interrupt check.
+ } else {
+ // The interrupt check should be the first instruction in the
+ // loop header other than move groups.
+ MOZ_ASSERT(iter->isInterruptCheck());
+ if (iter->toInterruptCheck()->implicit())
+ return iter->toInterruptCheck()->oolEntry();
+ return nullptr;
+ }
+ }
+ }
+
+ return nullptr;
+}
+
+void
+CodeGeneratorShared::jumpToBlock(MBasicBlock* mir)
+{
+ // Skip past trivial blocks.
+ mir = skipTrivialBlocks(mir);
+
+ // No jump necessary if we can fall through to the next block.
+ if (isNextBlock(mir->lir()))
+ return;
+
+ if (Label* oolEntry = labelForBackedgeWithImplicitCheck(mir)) {
+ // Note: the backedge is initially a jump to the next instruction.
+ // It will be patched to the target block's label during link().
+ RepatchLabel rejoin;
+ CodeOffsetJump backedge = masm.backedgeJump(&rejoin, mir->lir()->label());
+ masm.bind(&rejoin);
+
+ masm.propagateOOM(patchableBackedges_.append(PatchableBackedgeInfo(backedge, mir->lir()->label(), oolEntry)));
+ } else {
+ masm.jump(mir->lir()->label());
+ }
+}
+
+Label*
+CodeGeneratorShared::getJumpLabelForBranch(MBasicBlock* block)
+{
+ // Skip past trivial blocks.
+ block = skipTrivialBlocks(block);
+
+ if (!labelForBackedgeWithImplicitCheck(block))
+ return block->lir()->label();
+
+ // We need to use a patchable jump for this backedge, but want to treat
+ // this as a normal label target to simplify codegen. Efficiency isn't so
+ // important here as these tests are extremely unlikely to be used in loop
+ // backedges, so emit inline code for the patchable jump. Heap allocating
+ // the label allows it to be used by out of line blocks.
+ Label* res = alloc().lifoAlloc()->newInfallible<Label>();
+ Label after;
+ masm.jump(&after);
+ masm.bind(res);
+ jumpToBlock(block);
+ masm.bind(&after);
+ return res;
+}
+
+// This function is not used for MIPS/MIPS64. MIPS has branchToBlock.
+#if !defined(JS_CODEGEN_MIPS32) && !defined(JS_CODEGEN_MIPS64)
+void
+CodeGeneratorShared::jumpToBlock(MBasicBlock* mir, Assembler::Condition cond)
+{
+ // Skip past trivial blocks.
+ mir = skipTrivialBlocks(mir);
+
+ if (Label* oolEntry = labelForBackedgeWithImplicitCheck(mir)) {
+ // Note: the backedge is initially a jump to the next instruction.
+ // It will be patched to the target block's label during link().
+ RepatchLabel rejoin;
+ CodeOffsetJump backedge = masm.jumpWithPatch(&rejoin, cond, mir->lir()->label());
+ masm.bind(&rejoin);
+
+ masm.propagateOOM(patchableBackedges_.append(PatchableBackedgeInfo(backedge, mir->lir()->label(), oolEntry)));
+ } else {
+ masm.j(cond, mir->lir()->label());
+ }
+}
+#endif
+
+MOZ_MUST_USE bool
+CodeGeneratorShared::addCacheLocations(const CacheLocationList& locs, size_t* numLocs,
+ size_t* curIndex)
+{
+ size_t firstIndex = runtimeData_.length();
+ size_t numLocations = 0;
+ for (CacheLocationList::iterator iter = locs.begin(); iter != locs.end(); iter++) {
+ // allocateData() ensures that sizeof(CacheLocation) is word-aligned.
+ // If this changes, we will need to pad to ensure alignment.
+ if (!allocateData(sizeof(CacheLocation), curIndex))
+ return false;
+ new (&runtimeData_[*curIndex]) CacheLocation(iter->pc, iter->script);
+ numLocations++;
+ }
+ MOZ_ASSERT(numLocations != 0);
+ *numLocs = numLocations;
+ *curIndex = firstIndex;
+ return true;
+}
+
+ReciprocalMulConstants
+CodeGeneratorShared::computeDivisionConstants(uint32_t d, int maxLog) {
+ MOZ_ASSERT(maxLog >= 2 && maxLog <= 32);
+ // In what follows, 0 < d < 2^maxLog and d is not a power of 2.
+ MOZ_ASSERT(d < (uint64_t(1) << maxLog) && (d & (d - 1)) != 0);
+
+ // Speeding up division by non power-of-2 constants is possible by
+ // calculating, during compilation, a value M such that high-order
+ // bits of M*n correspond to the result of the division of n by d.
+ // No value of M can serve this purpose for arbitrarily big values
+ // of n but, for optimizing integer division, we're just concerned
+ // with values of n whose absolute value is bounded (by fitting in
+ // an integer type, say). With this in mind, we'll find a constant
+ // M as above that works for -2^maxLog <= n < 2^maxLog; maxLog can
+ // then be 31 for signed division or 32 for unsigned division.
+ //
+ // The original presentation of this technique appears in Hacker's
+ // Delight, a book by Henry S. Warren, Jr.. A proof of correctness
+ // for our version follows; we'll denote maxLog by L in the proof,
+ // for conciseness.
+ //
+ // Formally, for |d| < 2^L, we'll compute two magic values M and s
+ // in the ranges 0 <= M < 2^(L+1) and 0 <= s <= L such that
+ // (M * n) >> (32 + s) = floor(n/d) if 0 <= n < 2^L
+ // (M * n) >> (32 + s) = ceil(n/d) - 1 if -2^L <= n < 0.
+ //
+ // Define p = 32 + s, M = ceil(2^p/d), and assume that s satisfies
+ // M - 2^p/d <= 2^(p-L)/d. (1)
+ // (Observe that p = CeilLog32(d) + L satisfies this, as the right
+ // side of (1) is at least one in this case). Then,
+ //
+ // a) If p <= CeilLog32(d) + L, then M < 2^(L+1) - 1.
+ // Proof: Indeed, M is monotone in p and, for p equal to the above
+ // value, the bounds 2^L > d >= 2^(p-L-1) + 1 readily imply that
+ // 2^p / d < 2^p/(d - 1) * (d - 1)/d
+ // <= 2^(L+1) * (1 - 1/d) < 2^(L+1) - 2.
+ // The claim follows by applying the ceiling function.
+ //
+ // b) For any 0 <= n < 2^L, floor(Mn/2^p) = floor(n/d).
+ // Proof: Put x = floor(Mn/2^p); it's the unique integer for which
+ // Mn/2^p - 1 < x <= Mn/2^p. (2)
+ // Using M >= 2^p/d on the LHS and (1) on the RHS, we get
+ // n/d - 1 < x <= n/d + n/(2^L d) < n/d + 1/d.
+ // Since x is an integer, it's not in the interval (n/d, (n+1)/d),
+ // and so n/d - 1 < x <= n/d, which implies x = floor(n/d).
+ //
+ // c) For any -2^L <= n < 0, floor(Mn/2^p) + 1 = ceil(n/d).
+ // Proof: The proof is similar. Equation (2) holds as above. Using
+ // M > 2^p/d (d isn't a power of 2) on the RHS and (1) on the LHS,
+ // n/d + n/(2^L d) - 1 < x < n/d.
+ // Using n >= -2^L and summing 1,
+ // n/d - 1/d < x + 1 < n/d + 1.
+ // Since x + 1 is an integer, this implies n/d <= x + 1 < n/d + 1.
+ // In other words, x + 1 = ceil(n/d).
+ //
+ // Condition (1) isn't necessary for the existence of M and s with
+ // the properties above. Hacker's Delight provides a slightly less
+ // restrictive condition when d >= 196611, at the cost of a 3-page
+ // proof of correctness, for the case L = 31.
+ //
+ // Note that, since d*M - 2^p = d - (2^p)%d, (1) can be written as
+ // 2^(p-L) >= d - (2^p)%d.
+ // In order to avoid overflow in the (2^p) % d calculation, we can
+ // compute it as (2^p-1) % d + 1, where 2^p-1 can then be computed
+ // without overflow as UINT64_MAX >> (64-p).
+
+ // We now compute the least p >= 32 with the property above...
+ int32_t p = 32;
+ while ((uint64_t(1) << (p-maxLog)) + (UINT64_MAX >> (64-p)) % d + 1 < d)
+ p++;
+
+ // ...and the corresponding M. For either the signed (L=31) or the
+ // unsigned (L=32) case, this value can be too large (cf. item a).
+ // Codegen can still multiply by M by multiplying by (M - 2^L) and
+ // adjusting the value afterwards, if this is the case.
+ ReciprocalMulConstants rmc;
+ rmc.multiplier = (UINT64_MAX >> (64-p))/d + 1;
+ rmc.shiftAmount = p - 32;
+
+ return rmc;
+}
+
+#ifdef JS_TRACE_LOGGING
+
+void
+CodeGeneratorShared::emitTracelogScript(bool isStart)
+{
+ if (!TraceLogTextIdEnabled(TraceLogger_Scripts))
+ return;
+
+ Label done;
+
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ Register logger = regs.takeAnyGeneral();
+ Register script = regs.takeAnyGeneral();
+
+ masm.Push(logger);
+
+ CodeOffset patchLogger = masm.movWithPatch(ImmPtr(nullptr), logger);
+ masm.propagateOOM(patchableTraceLoggers_.append(patchLogger));
+
+ masm.branchTest32(Assembler::Zero, logger, logger, &done);
+
+ Address enabledAddress(logger, TraceLoggerThread::offsetOfEnabled());
+ masm.branch32(Assembler::Equal, enabledAddress, Imm32(0), &done);
+
+ masm.Push(script);
+
+ CodeOffset patchScript = masm.movWithPatch(ImmWord(0), script);
+ masm.propagateOOM(patchableTLScripts_.append(patchScript));
+
+ if (isStart)
+ masm.tracelogStartId(logger, script);
+ else
+ masm.tracelogStopId(logger, script);
+
+ masm.Pop(script);
+
+ masm.bind(&done);
+
+ masm.Pop(logger);
+}
+
+void
+CodeGeneratorShared::emitTracelogTree(bool isStart, uint32_t textId)
+{
+ if (!TraceLogTextIdEnabled(textId))
+ return;
+
+ Label done;
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ Register logger = regs.takeAnyGeneral();
+
+ masm.Push(logger);
+
+ CodeOffset patchLocation = masm.movWithPatch(ImmPtr(nullptr), logger);
+ masm.propagateOOM(patchableTraceLoggers_.append(patchLocation));
+
+ masm.branchTest32(Assembler::Zero, logger, logger, &done);
+
+ Address enabledAddress(logger, TraceLoggerThread::offsetOfEnabled());
+ masm.branch32(Assembler::Equal, enabledAddress, Imm32(0), &done);
+
+ if (isStart)
+ masm.tracelogStartId(logger, textId);
+ else
+ masm.tracelogStopId(logger, textId);
+
+ masm.bind(&done);
+
+ masm.Pop(logger);
+}
+
+void
+CodeGeneratorShared::emitTracelogTree(bool isStart, const char* text,
+ TraceLoggerTextId enabledTextId)
+{
+ if (!TraceLogTextIdEnabled(enabledTextId))
+ return;
+
+ Label done;
+
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ Register loggerReg = regs.takeAnyGeneral();
+ Register eventReg = regs.takeAnyGeneral();
+
+ masm.Push(loggerReg);
+
+ CodeOffset patchLocation = masm.movWithPatch(ImmPtr(nullptr), loggerReg);
+ masm.propagateOOM(patchableTraceLoggers_.append(patchLocation));
+
+ masm.branchTest32(Assembler::Zero, loggerReg, loggerReg, &done);
+
+ Address enabledAddress(loggerReg, TraceLoggerThread::offsetOfEnabled());
+ masm.branch32(Assembler::Equal, enabledAddress, Imm32(0), &done);
+
+ masm.Push(eventReg);
+
+ PatchableTLEvent patchEvent(masm.movWithPatch(ImmWord(0), eventReg), text);
+ masm.propagateOOM(patchableTLEvents_.append(Move(patchEvent)));
+
+ if (isStart)
+ masm.tracelogStartId(loggerReg, eventReg);
+ else
+ masm.tracelogStopId(loggerReg, eventReg);
+
+ masm.Pop(eventReg);
+
+ masm.bind(&done);
+
+ masm.Pop(loggerReg);
+}
+#endif
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/shared/CodeGenerator-shared.h b/js/src/jit/shared/CodeGenerator-shared.h
new file mode 100644
index 000000000..c96808c2d
--- /dev/null
+++ b/js/src/jit/shared/CodeGenerator-shared.h
@@ -0,0 +1,850 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_CodeGenerator_shared_h
+#define jit_shared_CodeGenerator_shared_h
+
+#include "mozilla/Alignment.h"
+#include "mozilla/Move.h"
+#include "mozilla/TypeTraits.h"
+
+#include "jit/JitFrames.h"
+#include "jit/LIR.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+#include "jit/OptimizationTracking.h"
+#include "jit/Safepoints.h"
+#include "jit/Snapshots.h"
+#include "jit/VMFunctions.h"
+
+namespace js {
+namespace jit {
+
+class OutOfLineCode;
+class CodeGenerator;
+class MacroAssembler;
+class IonCache;
+
+template <class ArgSeq, class StoreOutputTo>
+class OutOfLineCallVM;
+
+class OutOfLineTruncateSlow;
+class OutOfLineWasmTruncateCheck;
+
+struct PatchableBackedgeInfo
+{
+ CodeOffsetJump backedge;
+ Label* loopHeader;
+ Label* interruptCheck;
+
+ PatchableBackedgeInfo(CodeOffsetJump backedge, Label* loopHeader, Label* interruptCheck)
+ : backedge(backedge), loopHeader(loopHeader), interruptCheck(interruptCheck)
+ {}
+};
+
+struct ReciprocalMulConstants {
+ int64_t multiplier;
+ int32_t shiftAmount;
+};
+
+// This should be nested in CodeGeneratorShared, but it is used in
+// optimization tracking implementation and nested classes cannot be
+// forward-declared.
+struct NativeToTrackedOptimizations
+{
+ // [startOffset, endOffset]
+ CodeOffset startOffset;
+ CodeOffset endOffset;
+ const TrackedOptimizations* optimizations;
+};
+
+class CodeGeneratorShared : public LElementVisitor
+{
+ js::Vector<OutOfLineCode*, 0, SystemAllocPolicy> outOfLineCode_;
+
+ MacroAssembler& ensureMasm(MacroAssembler* masm);
+ mozilla::Maybe<MacroAssembler> maybeMasm_;
+
+ public:
+ MacroAssembler& masm;
+
+ protected:
+ MIRGenerator* gen;
+ LIRGraph& graph;
+ LBlock* current;
+ SnapshotWriter snapshots_;
+ RecoverWriter recovers_;
+ JitCode* deoptTable_;
+#ifdef DEBUG
+ uint32_t pushedArgs_;
+#endif
+ uint32_t lastOsiPointOffset_;
+ SafepointWriter safepoints_;
+ Label invalidate_;
+ CodeOffset invalidateEpilogueData_;
+
+ // Label for the common return path.
+ NonAssertingLabel returnLabel_;
+
+ FallbackICStubSpace stubSpace_;
+
+ js::Vector<SafepointIndex, 0, SystemAllocPolicy> safepointIndices_;
+ js::Vector<OsiIndex, 0, SystemAllocPolicy> osiIndices_;
+
+ // Mapping from bailout table ID to an offset in the snapshot buffer.
+ js::Vector<SnapshotOffset, 0, SystemAllocPolicy> bailouts_;
+
+ // Allocated data space needed at runtime.
+ js::Vector<uint8_t, 0, SystemAllocPolicy> runtimeData_;
+
+ // Vector of information about generated polymorphic inline caches.
+ js::Vector<uint32_t, 0, SystemAllocPolicy> cacheList_;
+
+ // Patchable backedges generated for loops.
+ Vector<PatchableBackedgeInfo, 0, SystemAllocPolicy> patchableBackedges_;
+
+#ifdef JS_TRACE_LOGGING
+ struct PatchableTLEvent {
+ CodeOffset offset;
+ const char* event;
+ PatchableTLEvent(CodeOffset offset, const char* event)
+ : offset(offset), event(event)
+ {}
+ };
+ js::Vector<CodeOffset, 0, SystemAllocPolicy> patchableTraceLoggers_;
+ js::Vector<PatchableTLEvent, 0, SystemAllocPolicy> patchableTLEvents_;
+ js::Vector<CodeOffset, 0, SystemAllocPolicy> patchableTLScripts_;
+#endif
+
+ public:
+ struct NativeToBytecode {
+ CodeOffset nativeOffset;
+ InlineScriptTree* tree;
+ jsbytecode* pc;
+ };
+
+ protected:
+ js::Vector<NativeToBytecode, 0, SystemAllocPolicy> nativeToBytecodeList_;
+ uint8_t* nativeToBytecodeMap_;
+ uint32_t nativeToBytecodeMapSize_;
+ uint32_t nativeToBytecodeTableOffset_;
+ uint32_t nativeToBytecodeNumRegions_;
+
+ JSScript** nativeToBytecodeScriptList_;
+ uint32_t nativeToBytecodeScriptListLength_;
+
+ bool isProfilerInstrumentationEnabled() {
+ return gen->isProfilerInstrumentationEnabled();
+ }
+
+ js::Vector<NativeToTrackedOptimizations, 0, SystemAllocPolicy> trackedOptimizations_;
+ uint8_t* trackedOptimizationsMap_;
+ uint32_t trackedOptimizationsMapSize_;
+ uint32_t trackedOptimizationsRegionTableOffset_;
+ uint32_t trackedOptimizationsTypesTableOffset_;
+ uint32_t trackedOptimizationsAttemptsTableOffset_;
+
+ bool isOptimizationTrackingEnabled() {
+ return gen->isOptimizationTrackingEnabled();
+ }
+
+ protected:
+ // The offset of the first instruction of the OSR entry block from the
+ // beginning of the code buffer.
+ size_t osrEntryOffset_;
+
+ TempAllocator& alloc() const {
+ return graph.mir().alloc();
+ }
+
+ inline void setOsrEntryOffset(size_t offset) {
+ MOZ_ASSERT(osrEntryOffset_ == 0);
+ osrEntryOffset_ = offset;
+ }
+ inline size_t getOsrEntryOffset() const {
+ return osrEntryOffset_;
+ }
+
+ // The offset of the first instruction of the body.
+ // This skips the arguments type checks.
+ size_t skipArgCheckEntryOffset_;
+
+ inline void setSkipArgCheckEntryOffset(size_t offset) {
+ MOZ_ASSERT(skipArgCheckEntryOffset_ == 0);
+ skipArgCheckEntryOffset_ = offset;
+ }
+ inline size_t getSkipArgCheckEntryOffset() const {
+ return skipArgCheckEntryOffset_;
+ }
+
+ typedef js::Vector<SafepointIndex, 8, SystemAllocPolicy> SafepointIndices;
+
+ protected:
+#ifdef CHECK_OSIPOINT_REGISTERS
+ // See JitOptions.checkOsiPointRegisters. We set this here to avoid
+ // races when enableOsiPointRegisterChecks is called while we're generating
+ // code off-thread.
+ bool checkOsiPointRegisters;
+#endif
+
+ // The initial size of the frame in bytes. These are bytes beyond the
+ // constant header present for every Ion frame, used for pre-determined
+ // spills.
+ int32_t frameDepth_;
+
+ // In some cases, we force stack alignment to platform boundaries, see
+ // also CodeGeneratorShared constructor. This value records the adjustment
+ // we've done.
+ int32_t frameInitialAdjustment_;
+
+ // Frame class this frame's size falls into (see IonFrame.h).
+ FrameSizeClass frameClass_;
+
+ // For arguments to the current function.
+ inline int32_t ArgToStackOffset(int32_t slot) const;
+
+ // For the callee of the current function.
+ inline int32_t CalleeStackOffset() const;
+
+ inline int32_t SlotToStackOffset(int32_t slot) const;
+ inline int32_t StackOffsetToSlot(int32_t offset) const;
+
+ // For argument construction for calls. Argslots are Value-sized.
+ inline int32_t StackOffsetOfPassedArg(int32_t slot) const;
+
+ inline int32_t ToStackOffset(LAllocation a) const;
+ inline int32_t ToStackOffset(const LAllocation* a) const;
+
+ inline Address ToAddress(const LAllocation& a);
+ inline Address ToAddress(const LAllocation* a);
+
+ uint32_t frameSize() const {
+ return frameClass_ == FrameSizeClass::None() ? frameDepth_ : frameClass_.frameSize();
+ }
+
+ protected:
+#ifdef CHECK_OSIPOINT_REGISTERS
+ void resetOsiPointRegs(LSafepoint* safepoint);
+ bool shouldVerifyOsiPointRegs(LSafepoint* safepoint);
+ void verifyOsiPointRegs(LSafepoint* safepoint);
+#endif
+
+ bool addNativeToBytecodeEntry(const BytecodeSite* site);
+ void dumpNativeToBytecodeEntries();
+ void dumpNativeToBytecodeEntry(uint32_t idx);
+
+ bool addTrackedOptimizationsEntry(const TrackedOptimizations* optimizations);
+ void extendTrackedOptimizationsEntry(const TrackedOptimizations* optimizations);
+
+ public:
+ MIRGenerator& mirGen() const {
+ return *gen;
+ }
+
+ // When appending to runtimeData_, the vector might realloc, leaving pointers
+ // int the origianl vector stale and unusable. DataPtr acts like a pointer,
+ // but allows safety in the face of potentially realloc'ing vector appends.
+ friend class DataPtr;
+ template <typename T>
+ class DataPtr
+ {
+ CodeGeneratorShared* cg_;
+ size_t index_;
+
+ T* lookup() {
+ return reinterpret_cast<T*>(&cg_->runtimeData_[index_]);
+ }
+ public:
+ DataPtr(CodeGeneratorShared* cg, size_t index)
+ : cg_(cg), index_(index) { }
+
+ T * operator ->() {
+ return lookup();
+ }
+ T * operator*() {
+ return lookup();
+ }
+ };
+
+ protected:
+ MOZ_MUST_USE
+ bool allocateData(size_t size, size_t* offset) {
+ MOZ_ASSERT(size % sizeof(void*) == 0);
+ *offset = runtimeData_.length();
+ masm.propagateOOM(runtimeData_.appendN(0, size));
+ return !masm.oom();
+ }
+
+ // Ensure the cache is an IonCache while expecting the size of the derived
+ // class. We only need the cache list at GC time. Everyone else can just take
+ // runtimeData offsets.
+ template <typename T>
+ inline size_t allocateCache(const T& cache) {
+ static_assert(mozilla::IsBaseOf<IonCache, T>::value, "T must inherit from IonCache");
+ size_t index;
+ masm.propagateOOM(allocateData(sizeof(mozilla::AlignedStorage2<T>), &index));
+ masm.propagateOOM(cacheList_.append(index));
+ if (masm.oom())
+ return SIZE_MAX;
+ // Use the copy constructor on the allocated space.
+ MOZ_ASSERT(index == cacheList_.back());
+ new (&runtimeData_[index]) T(cache);
+ return index;
+ }
+
+ protected:
+ // Encodes an LSnapshot into the compressed snapshot buffer.
+ void encode(LRecoverInfo* recover);
+ void encode(LSnapshot* snapshot);
+ void encodeAllocation(LSnapshot* snapshot, MDefinition* def, uint32_t* startIndex);
+
+ // Attempts to assign a BailoutId to a snapshot, if one isn't already set.
+ // If the bailout table is full, this returns false, which is not a fatal
+ // error (the code generator may use a slower bailout mechanism).
+ bool assignBailoutId(LSnapshot* snapshot);
+
+ // Encode all encountered safepoints in CG-order, and resolve |indices| for
+ // safepoint offsets.
+ bool encodeSafepoints();
+
+ // Fixup offsets of native-to-bytecode map.
+ bool createNativeToBytecodeScriptList(JSContext* cx);
+ bool generateCompactNativeToBytecodeMap(JSContext* cx, JitCode* code);
+ void verifyCompactNativeToBytecodeMap(JitCode* code);
+
+ bool generateCompactTrackedOptimizationsMap(JSContext* cx, JitCode* code,
+ IonTrackedTypeVector* allTypes);
+ void verifyCompactTrackedOptimizationsMap(JitCode* code, uint32_t numRegions,
+ const UniqueTrackedOptimizations& unique,
+ const IonTrackedTypeVector* allTypes);
+
+ // Mark the safepoint on |ins| as corresponding to the current assembler location.
+ // The location should be just after a call.
+ void markSafepoint(LInstruction* ins);
+ void markSafepointAt(uint32_t offset, LInstruction* ins);
+
+ // Mark the OSI point |ins| as corresponding to the current
+ // assembler location inside the |osiIndices_|. Return the assembler
+ // location for the OSI point return location.
+ uint32_t markOsiPoint(LOsiPoint* ins);
+
+ // Ensure that there is enough room between the last OSI point and the
+ // current instruction, such that:
+ // (1) Invalidation will not overwrite the current instruction, and
+ // (2) Overwriting the current instruction will not overwrite
+ // an invalidation marker.
+ void ensureOsiSpace();
+
+ OutOfLineCode* oolTruncateDouble(FloatRegister src, Register dest, MInstruction* mir);
+ void emitTruncateDouble(FloatRegister src, Register dest, MInstruction* mir);
+ void emitTruncateFloat32(FloatRegister src, Register dest, MInstruction* mir);
+
+ void emitWasmCallBase(LWasmCallBase* ins);
+
+ void emitPreBarrier(Register base, const LAllocation* index, int32_t offsetAdjustment);
+ void emitPreBarrier(Address address);
+
+ // We don't emit code for trivial blocks, so if we want to branch to the
+ // given block, and it's trivial, return the ultimate block we should
+ // actually branch directly to.
+ MBasicBlock* skipTrivialBlocks(MBasicBlock* block) {
+ while (block->lir()->isTrivial()) {
+ MOZ_ASSERT(block->lir()->rbegin()->numSuccessors() == 1);
+ block = block->lir()->rbegin()->getSuccessor(0);
+ }
+ return block;
+ }
+
+ // Test whether the given block can be reached via fallthrough from the
+ // current block.
+ inline bool isNextBlock(LBlock* block) {
+ uint32_t target = skipTrivialBlocks(block->mir())->id();
+ uint32_t i = current->mir()->id() + 1;
+ if (target < i)
+ return false;
+ // Trivial blocks can be crossed via fallthrough.
+ for (; i != target; ++i) {
+ if (!graph.getBlock(i)->isTrivial())
+ return false;
+ }
+ return true;
+ }
+
+ public:
+ // Save and restore all volatile registers to/from the stack, excluding the
+ // specified register(s), before a function call made using callWithABI and
+ // after storing the function call's return value to an output register.
+ // (The only registers that don't need to be saved/restored are 1) the
+ // temporary register used to store the return value of the function call,
+ // if there is one [otherwise that stored value would be overwritten]; and
+ // 2) temporary registers whose values aren't needed in the rest of the LIR
+ // instruction [this is purely an optimization]. All other volatiles must
+ // be saved and restored in case future LIR instructions need those values.)
+ void saveVolatile(Register output) {
+ LiveRegisterSet regs(RegisterSet::Volatile());
+ regs.takeUnchecked(output);
+ masm.PushRegsInMask(regs);
+ }
+ void restoreVolatile(Register output) {
+ LiveRegisterSet regs(RegisterSet::Volatile());
+ regs.takeUnchecked(output);
+ masm.PopRegsInMask(regs);
+ }
+ void saveVolatile(FloatRegister output) {
+ LiveRegisterSet regs(RegisterSet::Volatile());
+ regs.takeUnchecked(output);
+ masm.PushRegsInMask(regs);
+ }
+ void restoreVolatile(FloatRegister output) {
+ LiveRegisterSet regs(RegisterSet::Volatile());
+ regs.takeUnchecked(output);
+ masm.PopRegsInMask(regs);
+ }
+ void saveVolatile(LiveRegisterSet temps) {
+ masm.PushRegsInMask(LiveRegisterSet(RegisterSet::VolatileNot(temps.set())));
+ }
+ void restoreVolatile(LiveRegisterSet temps) {
+ masm.PopRegsInMask(LiveRegisterSet(RegisterSet::VolatileNot(temps.set())));
+ }
+ void saveVolatile() {
+ masm.PushRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
+ }
+ void restoreVolatile() {
+ masm.PopRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
+ }
+
+ // These functions have to be called before and after any callVM and before
+ // any modifications of the stack. Modification of the stack made after
+ // these calls should update the framePushed variable, needed by the exit
+ // frame produced by callVM.
+ inline void saveLive(LInstruction* ins);
+ inline void restoreLive(LInstruction* ins);
+ inline void restoreLiveIgnore(LInstruction* ins, LiveRegisterSet reg);
+
+ // Save/restore all registers that are both live and volatile.
+ inline void saveLiveVolatile(LInstruction* ins);
+ inline void restoreLiveVolatile(LInstruction* ins);
+
+ template <typename T>
+ void pushArg(const T& t) {
+ masm.Push(t);
+#ifdef DEBUG
+ pushedArgs_++;
+#endif
+ }
+
+ void storePointerResultTo(Register reg) {
+ masm.storeCallPointerResult(reg);
+ }
+
+ void storeFloatResultTo(FloatRegister reg) {
+ masm.storeCallFloatResult(reg);
+ }
+
+ template <typename T>
+ void storeResultValueTo(const T& t) {
+ masm.storeCallResultValue(t);
+ }
+
+ void callVM(const VMFunction& f, LInstruction* ins, const Register* dynStack = nullptr);
+
+ template <class ArgSeq, class StoreOutputTo>
+ inline OutOfLineCode* oolCallVM(const VMFunction& fun, LInstruction* ins, const ArgSeq& args,
+ const StoreOutputTo& out);
+
+ void addCache(LInstruction* lir, size_t cacheIndex);
+ bool addCacheLocations(const CacheLocationList& locs, size_t* numLocs, size_t* offset);
+ ReciprocalMulConstants computeDivisionConstants(uint32_t d, int maxLog);
+
+ protected:
+ bool generatePrologue();
+ bool generateEpilogue();
+
+ void addOutOfLineCode(OutOfLineCode* code, const MInstruction* mir);
+ void addOutOfLineCode(OutOfLineCode* code, const BytecodeSite* site);
+ bool generateOutOfLineCode();
+
+ Label* getJumpLabelForBranch(MBasicBlock* block);
+
+ // Generate a jump to the start of the specified block, adding information
+ // if this is a loop backedge. Use this in place of jumping directly to
+ // mir->lir()->label(), or use getJumpLabelForBranch() if a label to use
+ // directly is needed.
+ void jumpToBlock(MBasicBlock* mir);
+
+ // Get a label for the start of block which can be used for jumping, in
+ // place of jumpToBlock.
+ Label* labelForBackedgeWithImplicitCheck(MBasicBlock* mir);
+
+// This function is not used for MIPS. MIPS has branchToBlock.
+#if !defined(JS_CODEGEN_MIPS32) && !defined(JS_CODEGEN_MIPS64)
+ void jumpToBlock(MBasicBlock* mir, Assembler::Condition cond);
+#endif
+
+ template <class T>
+ wasm::TrapDesc trap(T* mir, wasm::Trap trap) {
+ return wasm::TrapDesc(mir->trapOffset(), trap, masm.framePushed());
+ }
+
+ private:
+ void generateInvalidateEpilogue();
+
+ public:
+ CodeGeneratorShared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
+
+ public:
+ template <class ArgSeq, class StoreOutputTo>
+ void visitOutOfLineCallVM(OutOfLineCallVM<ArgSeq, StoreOutputTo>* ool);
+
+ void visitOutOfLineTruncateSlow(OutOfLineTruncateSlow* ool);
+
+ virtual void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool) {
+ MOZ_CRASH("NYI");
+ }
+
+ bool omitOverRecursedCheck() const;
+
+#ifdef JS_TRACE_LOGGING
+ protected:
+ void emitTracelogScript(bool isStart);
+ void emitTracelogTree(bool isStart, uint32_t textId);
+ void emitTracelogTree(bool isStart, const char* text, TraceLoggerTextId enabledTextId);
+
+ public:
+ void emitTracelogScriptStart() {
+ emitTracelogScript(/* isStart =*/ true);
+ }
+ void emitTracelogScriptStop() {
+ emitTracelogScript(/* isStart =*/ false);
+ }
+ void emitTracelogStartEvent(uint32_t textId) {
+ emitTracelogTree(/* isStart =*/ true, textId);
+ }
+ void emitTracelogStopEvent(uint32_t textId) {
+ emitTracelogTree(/* isStart =*/ false, textId);
+ }
+ // Log an arbitrary text. The TraceloggerTextId is used to toggle the
+ // logging on and off.
+ // Note: the text is not copied and need to be kept alive until linking.
+ void emitTracelogStartEvent(const char* text, TraceLoggerTextId enabledTextId) {
+ emitTracelogTree(/* isStart =*/ true, text, enabledTextId);
+ }
+ void emitTracelogStopEvent(const char* text, TraceLoggerTextId enabledTextId) {
+ emitTracelogTree(/* isStart =*/ false, text, enabledTextId);
+ }
+#endif
+ void emitTracelogIonStart() {
+#ifdef JS_TRACE_LOGGING
+ emitTracelogScriptStart();
+ emitTracelogStartEvent(TraceLogger_IonMonkey);
+#endif
+ }
+ void emitTracelogIonStop() {
+#ifdef JS_TRACE_LOGGING
+ emitTracelogStopEvent(TraceLogger_IonMonkey);
+ emitTracelogScriptStop();
+#endif
+ }
+
+ protected:
+ inline void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, bool isLoad,
+ Scalar::Type type, Operand mem, LAllocation alloc);
+
+ public:
+ inline void verifyLoadDisassembly(uint32_t begin, uint32_t end, Scalar::Type type,
+ Operand mem, LAllocation alloc);
+ inline void verifyStoreDisassembly(uint32_t begin, uint32_t end, Scalar::Type type,
+ Operand mem, LAllocation alloc);
+
+ bool isGlobalObject(JSObject* object);
+};
+
+// An out-of-line path is generated at the end of the function.
+class OutOfLineCode : public TempObject
+{
+ Label entry_;
+ Label rejoin_;
+ uint32_t framePushed_;
+ const BytecodeSite* site_;
+
+ public:
+ OutOfLineCode()
+ : framePushed_(0),
+ site_()
+ { }
+
+ virtual void generate(CodeGeneratorShared* codegen) = 0;
+
+ Label* entry() {
+ return &entry_;
+ }
+ virtual void bind(MacroAssembler* masm) {
+ masm->bind(entry());
+ }
+ Label* rejoin() {
+ return &rejoin_;
+ }
+ void setFramePushed(uint32_t framePushed) {
+ framePushed_ = framePushed;
+ }
+ uint32_t framePushed() const {
+ return framePushed_;
+ }
+ void setBytecodeSite(const BytecodeSite* site) {
+ site_ = site;
+ }
+ const BytecodeSite* bytecodeSite() const {
+ return site_;
+ }
+ jsbytecode* pc() const {
+ return site_->pc();
+ }
+ JSScript* script() const {
+ return site_->script();
+ }
+};
+
+// For OOL paths that want a specific-typed code generator.
+template <typename T>
+class OutOfLineCodeBase : public OutOfLineCode
+{
+ public:
+ virtual void generate(CodeGeneratorShared* codegen) {
+ accept(static_cast<T*>(codegen));
+ }
+
+ public:
+ virtual void accept(T* codegen) = 0;
+};
+
+// ArgSeq store arguments for OutOfLineCallVM.
+//
+// OutOfLineCallVM are created with "oolCallVM" function. The third argument of
+// this function is an instance of a class which provides a "generate" in charge
+// of pushing the argument, with "pushArg", for a VMFunction.
+//
+// Such list of arguments can be created by using the "ArgList" function which
+// creates one instance of "ArgSeq", where the type of the arguments are inferred
+// from the type of the arguments.
+//
+// The list of arguments must be written in the same order as if you were
+// calling the function in C++.
+//
+// Example:
+// ArgList(ToRegister(lir->lhs()), ToRegister(lir->rhs()))
+
+template <typename... ArgTypes>
+class ArgSeq;
+
+template <>
+class ArgSeq<>
+{
+ public:
+ ArgSeq() { }
+
+ inline void generate(CodeGeneratorShared* codegen) const {
+ }
+};
+
+template <typename HeadType, typename... TailTypes>
+class ArgSeq<HeadType, TailTypes...> : public ArgSeq<TailTypes...>
+{
+ private:
+ using RawHeadType = typename mozilla::RemoveReference<HeadType>::Type;
+ RawHeadType head_;
+
+ public:
+ template <typename ProvidedHead, typename... ProvidedTail>
+ explicit ArgSeq(ProvidedHead&& head, ProvidedTail&&... tail)
+ : ArgSeq<TailTypes...>(mozilla::Forward<ProvidedTail>(tail)...),
+ head_(mozilla::Forward<ProvidedHead>(head))
+ { }
+
+ // Arguments are pushed in reverse order, from last argument to first
+ // argument.
+ inline void generate(CodeGeneratorShared* codegen) const {
+ this->ArgSeq<TailTypes...>::generate(codegen);
+ codegen->pushArg(head_);
+ }
+};
+
+template <typename... ArgTypes>
+inline ArgSeq<ArgTypes...>
+ArgList(ArgTypes&&... args)
+{
+ return ArgSeq<ArgTypes...>(mozilla::Forward<ArgTypes>(args)...);
+}
+
+// Store wrappers, to generate the right move of data after the VM call.
+
+struct StoreNothing
+{
+ inline void generate(CodeGeneratorShared* codegen) const {
+ }
+ inline LiveRegisterSet clobbered() const {
+ return LiveRegisterSet(); // No register gets clobbered
+ }
+};
+
+class StoreRegisterTo
+{
+ private:
+ Register out_;
+
+ public:
+ explicit StoreRegisterTo(Register out)
+ : out_(out)
+ { }
+
+ inline void generate(CodeGeneratorShared* codegen) const {
+ // It's okay to use storePointerResultTo here - the VMFunction wrapper
+ // ensures the upper bytes are zero for bool/int32 return values.
+ codegen->storePointerResultTo(out_);
+ }
+ inline LiveRegisterSet clobbered() const {
+ LiveRegisterSet set;
+ set.add(out_);
+ return set;
+ }
+};
+
+class StoreFloatRegisterTo
+{
+ private:
+ FloatRegister out_;
+
+ public:
+ explicit StoreFloatRegisterTo(FloatRegister out)
+ : out_(out)
+ { }
+
+ inline void generate(CodeGeneratorShared* codegen) const {
+ codegen->storeFloatResultTo(out_);
+ }
+ inline LiveRegisterSet clobbered() const {
+ LiveRegisterSet set;
+ set.add(out_);
+ return set;
+ }
+};
+
+template <typename Output>
+class StoreValueTo_
+{
+ private:
+ Output out_;
+
+ public:
+ explicit StoreValueTo_(const Output& out)
+ : out_(out)
+ { }
+
+ inline void generate(CodeGeneratorShared* codegen) const {
+ codegen->storeResultValueTo(out_);
+ }
+ inline LiveRegisterSet clobbered() const {
+ LiveRegisterSet set;
+ set.add(out_);
+ return set;
+ }
+};
+
+template <typename Output>
+StoreValueTo_<Output> StoreValueTo(const Output& out)
+{
+ return StoreValueTo_<Output>(out);
+}
+
+template <class ArgSeq, class StoreOutputTo>
+class OutOfLineCallVM : public OutOfLineCodeBase<CodeGeneratorShared>
+{
+ private:
+ LInstruction* lir_;
+ const VMFunction& fun_;
+ ArgSeq args_;
+ StoreOutputTo out_;
+
+ public:
+ OutOfLineCallVM(LInstruction* lir, const VMFunction& fun, const ArgSeq& args,
+ const StoreOutputTo& out)
+ : lir_(lir),
+ fun_(fun),
+ args_(args),
+ out_(out)
+ { }
+
+ void accept(CodeGeneratorShared* codegen) {
+ codegen->visitOutOfLineCallVM(this);
+ }
+
+ LInstruction* lir() const { return lir_; }
+ const VMFunction& function() const { return fun_; }
+ const ArgSeq& args() const { return args_; }
+ const StoreOutputTo& out() const { return out_; }
+};
+
+template <class ArgSeq, class StoreOutputTo>
+inline OutOfLineCode*
+CodeGeneratorShared::oolCallVM(const VMFunction& fun, LInstruction* lir, const ArgSeq& args,
+ const StoreOutputTo& out)
+{
+ MOZ_ASSERT(lir->mirRaw());
+ MOZ_ASSERT(lir->mirRaw()->isInstruction());
+
+ OutOfLineCode* ool = new(alloc()) OutOfLineCallVM<ArgSeq, StoreOutputTo>(lir, fun, args, out);
+ addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
+ return ool;
+}
+
+template <class ArgSeq, class StoreOutputTo>
+void
+CodeGeneratorShared::visitOutOfLineCallVM(OutOfLineCallVM<ArgSeq, StoreOutputTo>* ool)
+{
+ LInstruction* lir = ool->lir();
+
+ saveLive(lir);
+ ool->args().generate(this);
+ callVM(ool->function(), lir);
+ ool->out().generate(this);
+ restoreLiveIgnore(lir, ool->out().clobbered());
+ masm.jump(ool->rejoin());
+}
+
+class OutOfLineWasmTruncateCheck : public OutOfLineCodeBase<CodeGeneratorShared>
+{
+ MIRType fromType_;
+ MIRType toType_;
+ FloatRegister input_;
+ bool isUnsigned_;
+ wasm::TrapOffset trapOffset_;
+
+ public:
+ OutOfLineWasmTruncateCheck(MWasmTruncateToInt32* mir, FloatRegister input)
+ : fromType_(mir->input()->type()), toType_(MIRType::Int32), input_(input),
+ isUnsigned_(mir->isUnsigned()), trapOffset_(mir->trapOffset())
+ { }
+
+ OutOfLineWasmTruncateCheck(MWasmTruncateToInt64* mir, FloatRegister input)
+ : fromType_(mir->input()->type()), toType_(MIRType::Int64), input_(input),
+ isUnsigned_(mir->isUnsigned()), trapOffset_(mir->trapOffset())
+ { }
+
+ void accept(CodeGeneratorShared* codegen) {
+ codegen->visitOutOfLineWasmTruncateCheck(this);
+ }
+
+ FloatRegister input() const { return input_; }
+ MIRType toType() const { return toType_; }
+ MIRType fromType() const { return fromType_; }
+ bool isUnsigned() const { return isUnsigned_; }
+ wasm::TrapOffset trapOffset() const { return trapOffset_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_CodeGenerator_shared_h */
diff --git a/js/src/jit/shared/IonAssemblerBuffer.h b/js/src/jit/shared/IonAssemblerBuffer.h
new file mode 100644
index 000000000..cc20e26d2
--- /dev/null
+++ b/js/src/jit/shared/IonAssemblerBuffer.h
@@ -0,0 +1,417 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_IonAssemblerBuffer_h
+#define jit_shared_IonAssemblerBuffer_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/shared/Assembler-shared.h"
+
+namespace js {
+namespace jit {
+
+// The offset into a buffer, in bytes.
+class BufferOffset
+{
+ int offset;
+
+ public:
+ friend BufferOffset nextOffset();
+
+ BufferOffset()
+ : offset(INT_MIN)
+ { }
+
+ explicit BufferOffset(int offset_)
+ : offset(offset_)
+ { }
+
+ explicit BufferOffset(Label* l)
+ : offset(l->offset())
+ { }
+
+ explicit BufferOffset(RepatchLabel* l)
+ : offset(l->offset())
+ { }
+
+ int getOffset() const { return offset; }
+ bool assigned() const { return offset != INT_MIN; }
+
+ // A BOffImm is a Branch Offset Immediate. It is an architecture-specific
+ // structure that holds the immediate for a pc relative branch. diffB takes
+ // the label for the destination of the branch, and encodes the immediate
+ // for the branch. This will need to be fixed up later, since A pool may be
+ // inserted between the branch and its destination.
+ template <class BOffImm>
+ BOffImm diffB(BufferOffset other) const {
+ if (!BOffImm::IsInRange(offset - other.offset))
+ return BOffImm();
+ return BOffImm(offset - other.offset);
+ }
+
+ template <class BOffImm>
+ BOffImm diffB(Label* other) const {
+ MOZ_ASSERT(other->bound());
+ if (!BOffImm::IsInRange(offset - other->offset()))
+ return BOffImm();
+ return BOffImm(offset - other->offset());
+ }
+};
+
+inline bool
+operator<(BufferOffset a, BufferOffset b)
+{
+ return a.getOffset() < b.getOffset();
+}
+
+inline bool
+operator>(BufferOffset a, BufferOffset b)
+{
+ return a.getOffset() > b.getOffset();
+}
+
+inline bool
+operator<=(BufferOffset a, BufferOffset b)
+{
+ return a.getOffset() <= b.getOffset();
+}
+
+inline bool
+operator>=(BufferOffset a, BufferOffset b)
+{
+ return a.getOffset() >= b.getOffset();
+}
+
+inline bool
+operator==(BufferOffset a, BufferOffset b)
+{
+ return a.getOffset() == b.getOffset();
+}
+
+inline bool
+operator!=(BufferOffset a, BufferOffset b)
+{
+ return a.getOffset() != b.getOffset();
+}
+
+template<int SliceSize>
+class BufferSlice
+{
+ protected:
+ BufferSlice<SliceSize>* prev_;
+ BufferSlice<SliceSize>* next_;
+
+ size_t bytelength_;
+
+ public:
+ mozilla::Array<uint8_t, SliceSize> instructions;
+
+ public:
+ explicit BufferSlice()
+ : prev_(nullptr), next_(nullptr), bytelength_(0)
+ { }
+
+ size_t length() const { return bytelength_; }
+ static inline size_t Capacity() { return SliceSize; }
+
+ BufferSlice* getNext() const { return next_; }
+ BufferSlice* getPrev() const { return prev_; }
+
+ void setNext(BufferSlice<SliceSize>* next) {
+ MOZ_ASSERT(next_ == nullptr);
+ MOZ_ASSERT(next->prev_ == nullptr);
+ next_ = next;
+ next->prev_ = this;
+ }
+
+ void putBytes(size_t numBytes, const void* source) {
+ MOZ_ASSERT(bytelength_ + numBytes <= SliceSize);
+ if (source)
+ memcpy(&instructions[length()], source, numBytes);
+ bytelength_ += numBytes;
+ }
+};
+
+template<int SliceSize, class Inst>
+class AssemblerBuffer
+{
+ protected:
+ typedef BufferSlice<SliceSize> Slice;
+ typedef AssemblerBuffer<SliceSize, Inst> AssemblerBuffer_;
+
+ // Doubly-linked list of BufferSlices, with the most recent in tail position.
+ Slice* head;
+ Slice* tail;
+
+ bool m_oom;
+ bool m_bail;
+
+ // How many bytes has been committed to the buffer thus far.
+ // Does not include tail.
+ uint32_t bufferSize;
+
+ // Finger for speeding up accesses.
+ Slice* finger;
+ int finger_offset;
+
+ LifoAlloc lifoAlloc_;
+
+ public:
+ explicit AssemblerBuffer()
+ : head(nullptr),
+ tail(nullptr),
+ m_oom(false),
+ m_bail(false),
+ bufferSize(0),
+ finger(nullptr),
+ finger_offset(0),
+ lifoAlloc_(8192)
+ { }
+
+ public:
+ bool isAligned(size_t alignment) const {
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(alignment));
+ return !(size() & (alignment - 1));
+ }
+
+ protected:
+ virtual Slice* newSlice(LifoAlloc& a) {
+ Slice* tmp = static_cast<Slice*>(a.alloc(sizeof(Slice)));
+ if (!tmp) {
+ fail_oom();
+ return nullptr;
+ }
+ return new (tmp) Slice;
+ }
+
+ public:
+ bool ensureSpace(size_t size) {
+ // Space can exist in the most recent Slice.
+ if (tail && tail->length() + size <= tail->Capacity()) {
+ // Simulate allocation failure even when we don't need a new slice.
+ if (js::oom::ShouldFailWithOOM())
+ return fail_oom();
+
+ return true;
+ }
+
+ // Otherwise, a new Slice must be added.
+ Slice* slice = newSlice(lifoAlloc_);
+ if (slice == nullptr)
+ return fail_oom();
+
+ // If this is the first Slice in the buffer, add to head position.
+ if (!head) {
+ head = slice;
+ finger = slice;
+ finger_offset = 0;
+ }
+
+ // Finish the last Slice and add the new Slice to the linked list.
+ if (tail) {
+ bufferSize += tail->length();
+ tail->setNext(slice);
+ }
+ tail = slice;
+
+ return true;
+ }
+
+ BufferOffset putByte(uint8_t value) {
+ return putBytes(sizeof(value), &value);
+ }
+
+ BufferOffset putShort(uint16_t value) {
+ return putBytes(sizeof(value), &value);
+ }
+
+ BufferOffset putInt(uint32_t value) {
+ return putBytes(sizeof(value), &value);
+ }
+
+ // Add numBytes bytes to this buffer.
+ // The data must fit in a single slice.
+ BufferOffset putBytes(size_t numBytes, const void* inst) {
+ if (!ensureSpace(numBytes))
+ return BufferOffset();
+
+ BufferOffset ret = nextOffset();
+ tail->putBytes(numBytes, inst);
+ return ret;
+ }
+
+ // Add a potentially large amount of data to this buffer.
+ // The data may be distrubuted across multiple slices.
+ // Return the buffer offset of the first added byte.
+ BufferOffset putBytesLarge(size_t numBytes, const void* data)
+ {
+ BufferOffset ret = nextOffset();
+ while (numBytes > 0) {
+ if (!ensureSpace(1))
+ return BufferOffset();
+ size_t avail = tail->Capacity() - tail->length();
+ size_t xfer = numBytes < avail ? numBytes : avail;
+ MOZ_ASSERT(xfer > 0, "ensureSpace should have allocated a slice");
+ tail->putBytes(xfer, data);
+ data = (const uint8_t*)data + xfer;
+ numBytes -= xfer;
+ }
+ return ret;
+ }
+
+ unsigned int size() const {
+ if (tail)
+ return bufferSize + tail->length();
+ return bufferSize;
+ }
+
+ bool oom() const { return m_oom || m_bail; }
+ bool bail() const { return m_bail; }
+
+ bool fail_oom() {
+ m_oom = true;
+ return false;
+ }
+ bool fail_bail() {
+ m_bail = true;
+ return false;
+ }
+
+ private:
+ void update_finger(Slice* finger_, int fingerOffset_) {
+ finger = finger_;
+ finger_offset = fingerOffset_;
+ }
+
+ static const unsigned SliceDistanceRequiringFingerUpdate = 3;
+
+ Inst* getInstForwards(BufferOffset off, Slice* start, int startOffset, bool updateFinger = false) {
+ const int offset = off.getOffset();
+
+ int cursor = startOffset;
+ unsigned slicesSkipped = 0;
+
+ MOZ_ASSERT(offset >= cursor);
+
+ for (Slice *slice = start; slice != nullptr; slice = slice->getNext()) {
+ const int slicelen = slice->length();
+
+ // Is the offset within the bounds of this slice?
+ if (offset < cursor + slicelen) {
+ if (updateFinger || slicesSkipped >= SliceDistanceRequiringFingerUpdate)
+ update_finger(slice, cursor);
+
+ MOZ_ASSERT(offset - cursor < (int)slice->length());
+ return (Inst*)&slice->instructions[offset - cursor];
+ }
+
+ cursor += slicelen;
+ slicesSkipped++;
+ }
+
+ MOZ_CRASH("Invalid instruction cursor.");
+ }
+
+ Inst* getInstBackwards(BufferOffset off, Slice* start, int startOffset, bool updateFinger = false) {
+ const int offset = off.getOffset();
+
+ int cursor = startOffset; // First (lowest) offset in the start Slice.
+ unsigned slicesSkipped = 0;
+
+ MOZ_ASSERT(offset < int(cursor + start->length()));
+
+ for (Slice* slice = start; slice != nullptr; ) {
+ // Is the offset within the bounds of this slice?
+ if (offset >= cursor) {
+ if (updateFinger || slicesSkipped >= SliceDistanceRequiringFingerUpdate)
+ update_finger(slice, cursor);
+
+ MOZ_ASSERT(offset - cursor < (int)slice->length());
+ return (Inst*)&slice->instructions[offset - cursor];
+ }
+
+ // Move the cursor to the start of the previous slice.
+ Slice* prev = slice->getPrev();
+ cursor -= prev->length();
+
+ slice = prev;
+ slicesSkipped++;
+ }
+
+ MOZ_CRASH("Invalid instruction cursor.");
+ }
+
+ public:
+ Inst* getInstOrNull(BufferOffset off) {
+ if (!off.assigned())
+ return nullptr;
+ return getInst(off);
+ }
+
+ // Get a pointer to the instruction at offset |off| which must be within the
+ // bounds of the buffer. Use |getInstOrNull()| if |off| may be unassigned.
+ Inst* getInst(BufferOffset off) {
+ const int offset = off.getOffset();
+ MOZ_RELEASE_ASSERT(off.assigned() && offset >= 0 && (unsigned)offset < size());
+
+ // Is the instruction in the last slice?
+ if (offset >= int(bufferSize))
+ return (Inst*)&tail->instructions[offset - bufferSize];
+
+ // How close is this offset to the previous one we looked up?
+ // If it is sufficiently far from the start and end of the buffer,
+ // use the finger to start midway through the list.
+ int finger_dist = abs(offset - finger_offset);
+ if (finger_dist < Min(offset, int(bufferSize - offset))) {
+ if (finger_offset < offset)
+ return getInstForwards(off, finger, finger_offset, true);
+ return getInstBackwards(off, finger, finger_offset, true);
+ }
+
+ // Is the instruction closer to the start or to the end?
+ if (offset < int(bufferSize - offset))
+ return getInstForwards(off, head, 0);
+
+ // The last slice was already checked above, so start at the
+ // second-to-last.
+ Slice* prev = tail->getPrev();
+ return getInstBackwards(off, prev, bufferSize - prev->length());
+ }
+
+ BufferOffset nextOffset() const {
+ if (tail)
+ return BufferOffset(bufferSize + tail->length());
+ return BufferOffset(bufferSize);
+ }
+
+ class AssemblerBufferInstIterator
+ {
+ BufferOffset bo;
+ AssemblerBuffer_* m_buffer;
+
+ public:
+ explicit AssemblerBufferInstIterator(BufferOffset off, AssemblerBuffer_* buffer)
+ : bo(off), m_buffer(buffer)
+ { }
+
+ Inst* next() {
+ Inst* i = m_buffer->getInst(bo);
+ bo = BufferOffset(bo.getOffset() + i->size());
+ return cur();
+ }
+
+ Inst* cur() {
+ return m_buffer->getInst(bo);
+ }
+ };
+};
+
+} // namespace ion
+} // namespace js
+
+#endif // jit_shared_IonAssemblerBuffer_h
diff --git a/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h b/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
new file mode 100644
index 000000000..74fa60b12
--- /dev/null
+++ b/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
@@ -0,0 +1,1145 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_IonAssemblerBufferWithConstantPools_h
+#define jit_shared_IonAssemblerBufferWithConstantPools_h
+
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/SizePrintfMacros.h"
+
+#include <algorithm>
+
+#include "jit/JitSpewer.h"
+#include "jit/shared/IonAssemblerBuffer.h"
+
+// This code extends the AssemblerBuffer to support the pooling of values loaded
+// using program-counter relative addressing modes. This is necessary with the
+// ARM instruction set because it has a fixed instruction size that can not
+// encode all values as immediate arguments in instructions. Pooling the values
+// allows the values to be placed in large chunks which minimizes the number of
+// forced branches around them in the code. This is used for loading floating
+// point constants, for loading 32 bit constants on the ARMv6, for absolute
+// branch targets, and in future will be needed for large branches on the ARMv6.
+//
+// For simplicity of the implementation, the constant pools are always placed
+// after the loads referencing them. When a new constant pool load is added to
+// the assembler buffer, a corresponding pool entry is added to the current
+// pending pool. The finishPool() method copies the current pending pool entries
+// into the assembler buffer at the current offset and patches the pending
+// constant pool load instructions.
+//
+// Before inserting instructions or pool entries, it is necessary to determine
+// if doing so would place a pending pool entry out of reach of an instruction,
+// and if so then the pool must firstly be dumped. With the allocation algorithm
+// used below, the recalculation of all the distances between instructions and
+// their pool entries can be avoided by noting that there will be a limiting
+// instruction and pool entry pair that does not change when inserting more
+// instructions. Adding more instructions makes the same increase to the
+// distance, between instructions and their pool entries, for all such
+// pairs. This pair is recorded as the limiter, and it is updated when new pool
+// entries are added, see updateLimiter()
+//
+// The pools consist of: a guard instruction that branches around the pool, a
+// header word that helps identify a pool in the instruction stream, and then
+// the pool entries allocated in units of words. The guard instruction could be
+// omitted if control does not reach the pool, and this is referred to as a
+// natural guard below, but for simplicity the guard branch is always
+// emitted. The pool header is an identifiable word that in combination with the
+// guard uniquely identifies a pool in the instruction stream. The header also
+// encodes the pool size and a flag indicating if the guard is natural. It is
+// possible to iterate through the code instructions skipping or examining the
+// pools. E.g. it might be necessary to skip pools when search for, or patching,
+// an instruction sequence.
+//
+// It is often required to keep a reference to a pool entry, to patch it after
+// the buffer is finished. Each pool entry is assigned a unique index, counting
+// up from zero (see the poolEntryCount slot below). These can be mapped back to
+// the offset of the pool entry in the finished buffer, see poolEntryOffset().
+//
+// The code supports no-pool regions, and for these the size of the region, in
+// instructions, must be supplied. This size is used to determine if inserting
+// the instructions would place a pool entry out of range, and if so then a pool
+// is firstly flushed. The DEBUG code checks that the emitted code is within the
+// supplied size to detect programming errors. See enterNoPool() and
+// leaveNoPool().
+
+// The only planned instruction sets that require inline constant pools are the
+// ARM, ARM64, and MIPS, and these all have fixed 32-bit sized instructions so
+// for simplicity the code below is specialized for fixed 32-bit sized
+// instructions and makes no attempt to support variable length
+// instructions. The base assembler buffer which supports variable width
+// instruction is used by the x86 and x64 backends.
+
+// The AssemblerBufferWithConstantPools template class uses static callbacks to
+// the provided Asm template argument class:
+//
+// void Asm::InsertIndexIntoTag(uint8_t* load_, uint32_t index)
+//
+// When allocEntry() is called to add a constant pool load with an associated
+// constant pool entry, this callback is called to encode the index of the
+// allocated constant pool entry into the load instruction.
+//
+// After the constant pool has been placed, PatchConstantPoolLoad() is called
+// to update the load instruction with the right load offset.
+//
+// void Asm::WritePoolGuard(BufferOffset branch,
+// Instruction* dest,
+// BufferOffset afterPool)
+//
+// Write out the constant pool guard branch before emitting the pool.
+//
+// branch
+// Offset of the guard branch in the buffer.
+//
+// dest
+// Pointer into the buffer where the guard branch should be emitted. (Same
+// as getInst(branch)). Space for guardSize_ instructions has been reserved.
+//
+// afterPool
+// Offset of the first instruction after the constant pool. This includes
+// both pool entries and branch veneers added after the pool data.
+//
+// void Asm::WritePoolHeader(uint8_t* start, Pool* p, bool isNatural)
+//
+// Write out the pool header which follows the guard branch.
+//
+// void Asm::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+//
+// Re-encode a load of a constant pool entry after the location of the
+// constant pool is known.
+//
+// The load instruction at loadAddr was previously passed to
+// InsertIndexIntoTag(). The constPoolAddr is the final address of the
+// constant pool in the assembler buffer.
+//
+// void Asm::PatchShortRangeBranchToVeneer(AssemblerBufferWithConstantPools*,
+// unsigned rangeIdx,
+// BufferOffset deadline,
+// BufferOffset veneer)
+//
+// Patch a short-range branch to jump through a veneer before it goes out of
+// range.
+//
+// rangeIdx, deadline
+// These arguments were previously passed to registerBranchDeadline(). It is
+// assumed that PatchShortRangeBranchToVeneer() knows how to compute the
+// offset of the short-range branch from this information.
+//
+// veneer
+// Space for a branch veneer, guaranteed to be <= deadline. At this
+// position, guardSize_ * InstSize bytes are allocated. They should be
+// initialized to the proper unconditional branch instruction.
+//
+// Unbound branches to the same unbound label are organized as a linked list:
+//
+// Label::offset -> Branch1 -> Branch2 -> Branch3 -> nil
+//
+// This callback should insert a new veneer branch into the list:
+//
+// Label::offset -> Branch1 -> Branch2 -> Veneer -> Branch3 -> nil
+//
+// When Assembler::bind() rewrites the branches with the real label offset, it
+// probably has to bind Branch2 to target the veneer branch instead of jumping
+// straight to the label.
+
+namespace js {
+namespace jit {
+
+// BranchDeadlineSet - Keep track of pending branch deadlines.
+//
+// Some architectures like arm and arm64 have branch instructions with limited
+// range. When assembling a forward branch, it is not always known if the final
+// target label will be in range of the branch instruction.
+//
+// The BranchDeadlineSet data structure is used to keep track of the set of
+// pending forward branches. It supports the following fast operations:
+//
+// 1. Get the earliest deadline in the set.
+// 2. Add a new branch deadline.
+// 3. Remove a branch deadline.
+//
+// Architectures may have different branch encodings with different ranges. Each
+// supported range is assigned a small integer starting at 0. This data
+// structure does not care about the actual range of branch instructions, just
+// the latest buffer offset that can be reached - the deadline offset.
+//
+// Branched are stored as (rangeIdx, deadline) tuples. The target-specific code
+// can compute the location of the branch itself from this information. This
+// data structure does not need to know.
+//
+template <unsigned NumRanges>
+class BranchDeadlineSet
+{
+ // Maintain a list of pending deadlines for each range separately.
+ //
+ // The offsets in each vector are always kept in ascending order.
+ //
+ // Because we have a separate vector for different ranges, as forward
+ // branches are added to the assembler buffer, their deadlines will
+ // always be appended to the vector corresponding to their range.
+ //
+ // When binding labels, we expect a more-or-less LIFO order of branch
+ // resolutions. This would always hold if we had strictly structured control
+ // flow.
+ //
+ // We allow branch deadlines to be added and removed in any order, but
+ // performance is best in the expected case of near LIFO order.
+ //
+ typedef Vector<BufferOffset, 8, LifoAllocPolicy<Fallible>> RangeVector;
+
+ // We really just want "RangeVector deadline_[NumRanges];", but each vector
+ // needs to be initialized with a LifoAlloc, and C++ doesn't bend that way.
+ //
+ // Use raw aligned storage instead and explicitly construct NumRanges
+ // vectors in our constructor.
+ mozilla::AlignedStorage2<RangeVector[NumRanges]> deadlineStorage_;
+
+ // Always access the range vectors through this method.
+ RangeVector& vectorForRange(unsigned rangeIdx)
+ {
+ MOZ_ASSERT(rangeIdx < NumRanges, "Invalid branch range index");
+ return (*deadlineStorage_.addr())[rangeIdx];
+ }
+
+ const RangeVector& vectorForRange(unsigned rangeIdx) const
+ {
+ MOZ_ASSERT(rangeIdx < NumRanges, "Invalid branch range index");
+ return (*deadlineStorage_.addr())[rangeIdx];
+ }
+
+ // Maintain a precomputed earliest deadline at all times.
+ // This is unassigned only when all deadline vectors are empty.
+ BufferOffset earliest_;
+
+ // The range vector owning earliest_. Uninitialized when empty.
+ unsigned earliestRange_;
+
+ // Recompute the earliest deadline after it's been invalidated.
+ void recomputeEarliest()
+ {
+ earliest_ = BufferOffset();
+ for (unsigned r = 0; r < NumRanges; r++) {
+ auto& vec = vectorForRange(r);
+ if (!vec.empty() && (!earliest_.assigned() || vec[0] < earliest_)) {
+ earliest_ = vec[0];
+ earliestRange_ = r;
+ }
+ }
+ }
+
+ // Update the earliest deadline if needed after inserting (rangeIdx,
+ // deadline). Always return true for convenience:
+ // return insert() && updateEarliest().
+ bool updateEarliest(unsigned rangeIdx, BufferOffset deadline)
+ {
+ if (!earliest_.assigned() || deadline < earliest_) {
+ earliest_ = deadline;
+ earliestRange_ = rangeIdx;
+ }
+ return true;
+ }
+
+ public:
+ explicit BranchDeadlineSet(LifoAlloc& alloc)
+ {
+ // Manually construct vectors in the uninitialized aligned storage.
+ // This is because C++ arrays can otherwise only be constructed with
+ // the default constructor.
+ for (unsigned r = 0; r < NumRanges; r++)
+ new (&vectorForRange(r)) RangeVector(alloc);
+ }
+
+ ~BranchDeadlineSet()
+ {
+ // Aligned storage doesn't destruct its contents automatically.
+ for (unsigned r = 0; r < NumRanges; r++)
+ vectorForRange(r).~RangeVector();
+ }
+
+ // Is this set completely empty?
+ bool empty() const { return !earliest_.assigned(); }
+
+ // Get the total number of deadlines in the set.
+ size_t size() const
+ {
+ size_t count = 0;
+ for (unsigned r = 0; r < NumRanges; r++)
+ count += vectorForRange(r).length();
+ return count;
+ }
+
+ // Get the number of deadlines for the range with the most elements.
+ size_t maxRangeSize() const
+ {
+ size_t count = 0;
+ for (unsigned r = 0; r < NumRanges; r++)
+ count = std::max(count, vectorForRange(r).length());
+ return count;
+ }
+
+ // Get the first deadline that is still in the set.
+ BufferOffset earliestDeadline() const
+ {
+ MOZ_ASSERT(!empty());
+ return earliest_;
+ }
+
+ // Get the range index corresponding to earliestDeadlineRange().
+ unsigned earliestDeadlineRange() const
+ {
+ MOZ_ASSERT(!empty());
+ return earliestRange_;
+ }
+
+ // Add a (rangeIdx, deadline) tuple to the set.
+ //
+ // It is assumed that this tuple is not already in the set.
+ // This function performs best id the added deadline is later than any
+ // existing deadline for the same range index.
+ //
+ // Return true if the tuple was added, false if the tuple could not be added
+ // because of an OOM error.
+ bool addDeadline(unsigned rangeIdx, BufferOffset deadline)
+ {
+ MOZ_ASSERT(deadline.assigned(), "Can only store assigned buffer offsets");
+ // This is the vector where deadline should be saved.
+ auto& vec = vectorForRange(rangeIdx);
+
+ // Fast case: Simple append to the relevant array. This never affects
+ // the earliest deadline.
+ if (!vec.empty() && vec.back() < deadline)
+ return vec.append(deadline);
+
+ // Fast case: First entry to the vector. We need to update earliest_.
+ if (vec.empty())
+ return vec.append(deadline) && updateEarliest(rangeIdx, deadline);
+
+ return addDeadlineSlow(rangeIdx, deadline);
+ }
+
+ private:
+ // General case of addDeadline. This is split into two functions such that
+ // the common case in addDeadline can be inlined while this part probably
+ // won't inline.
+ bool addDeadlineSlow(unsigned rangeIdx, BufferOffset deadline)
+ {
+ auto& vec = vectorForRange(rangeIdx);
+
+ // Inserting into the middle of the vector. Use a log time binary search
+ // and a linear time insert().
+ // Is it worthwhile special-casing the empty vector?
+ auto at = std::lower_bound(vec.begin(), vec.end(), deadline);
+ MOZ_ASSERT(at == vec.end() || *at != deadline, "Cannot insert duplicate deadlines");
+ return vec.insert(at, deadline) && updateEarliest(rangeIdx, deadline);
+ }
+
+ public:
+ // Remove a deadline from the set.
+ // If (rangeIdx, deadline) is not in the set, nothing happens.
+ void removeDeadline(unsigned rangeIdx, BufferOffset deadline)
+ {
+ auto& vec = vectorForRange(rangeIdx);
+
+ if (vec.empty())
+ return;
+
+ if (deadline == vec.back()) {
+ // Expected fast case: Structured control flow causes forward
+ // branches to be bound in reverse order.
+ vec.popBack();
+ } else {
+ // Slow case: Binary search + linear erase.
+ auto where = std::lower_bound(vec.begin(), vec.end(), deadline);
+ if (where == vec.end() || *where != deadline)
+ return;
+ vec.erase(where);
+ }
+ if (deadline == earliest_)
+ recomputeEarliest();
+ }
+};
+
+// Specialization for architectures that don't need to track short-range
+// branches.
+template <>
+class BranchDeadlineSet<0u>
+{
+ public:
+ explicit BranchDeadlineSet(LifoAlloc& alloc) {}
+ bool empty() const { return true; }
+ size_t size() const { return 0; }
+ size_t maxRangeSize() const { return 0; }
+ BufferOffset earliestDeadline() const { MOZ_CRASH(); }
+ unsigned earliestDeadlineRange() const { MOZ_CRASH(); }
+ bool addDeadline(unsigned rangeIdx, BufferOffset deadline) { MOZ_CRASH(); }
+ void removeDeadline(unsigned rangeIdx, BufferOffset deadline) { MOZ_CRASH(); }
+};
+
+// The allocation unit size for pools.
+typedef int32_t PoolAllocUnit;
+
+// Hysteresis given to short-range branches.
+//
+// If any short-range branches will go out of range in the next N bytes,
+// generate a veneer for them in the current pool. The hysteresis prevents the
+// creation of many tiny constant pools for branch veneers.
+const size_t ShortRangeBranchHysteresis = 128;
+
+struct Pool
+{
+ private:
+ // The maximum program-counter relative offset below which the instruction
+ // set can encode. Different classes of intructions might support different
+ // ranges but for simplicity the minimum is used here, and for the ARM this
+ // is constrained to 1024 by the float load instructions.
+ const size_t maxOffset_;
+ // An offset to apply to program-counter relative offsets. The ARM has a
+ // bias of 8.
+ const unsigned bias_;
+
+ // The content of the pool entries.
+ Vector<PoolAllocUnit, 8, LifoAllocPolicy<Fallible>> poolData_;
+
+ // Flag that tracks OOM conditions. This is set after any append failed.
+ bool oom_;
+
+ // The limiting instruction and pool-entry pair. The instruction program
+ // counter relative offset of this limiting instruction will go out of range
+ // first as the pool position moves forward. It is more efficient to track
+ // just this limiting pair than to recheck all offsets when testing if the
+ // pool needs to be dumped.
+ //
+ // 1. The actual offset of the limiting instruction referencing the limiting
+ // pool entry.
+ BufferOffset limitingUser;
+ // 2. The pool entry index of the limiting pool entry.
+ unsigned limitingUsee;
+
+ public:
+ // A record of the code offset of instructions that reference pool
+ // entries. These instructions need to be patched when the actual position
+ // of the instructions and pools are known, and for the code below this
+ // occurs when each pool is finished, see finishPool().
+ Vector<BufferOffset, 8, LifoAllocPolicy<Fallible>> loadOffsets;
+
+ // Create a Pool. Don't allocate anything from lifoAloc, just capture its reference.
+ explicit Pool(size_t maxOffset, unsigned bias, LifoAlloc& lifoAlloc)
+ : maxOffset_(maxOffset),
+ bias_(bias),
+ poolData_(lifoAlloc),
+ oom_(false),
+ limitingUser(),
+ limitingUsee(INT_MIN),
+ loadOffsets(lifoAlloc)
+ {
+ }
+
+ // If poolData() returns nullptr then oom_ will also be true.
+ const PoolAllocUnit* poolData() const {
+ return poolData_.begin();
+ }
+
+ unsigned numEntries() const {
+ return poolData_.length();
+ }
+
+ size_t getPoolSize() const {
+ return numEntries() * sizeof(PoolAllocUnit);
+ }
+
+ bool oom() const {
+ return oom_;
+ }
+
+ // Update the instruction/pool-entry pair that limits the position of the
+ // pool. The nextInst is the actual offset of the new instruction being
+ // allocated.
+ //
+ // This is comparing the offsets, see checkFull() below for the equation,
+ // but common expressions on both sides have been canceled from the ranges
+ // being compared. Notably, the poolOffset cancels out, so the limiting pair
+ // does not depend on where the pool is placed.
+ void updateLimiter(BufferOffset nextInst) {
+ ptrdiff_t oldRange = limitingUsee * sizeof(PoolAllocUnit) - limitingUser.getOffset();
+ ptrdiff_t newRange = getPoolSize() - nextInst.getOffset();
+ if (!limitingUser.assigned() || newRange > oldRange) {
+ // We have a new largest range!
+ limitingUser = nextInst;
+ limitingUsee = numEntries();
+ }
+ }
+
+ // Check if inserting a pool at the actual offset poolOffset would place
+ // pool entries out of reach. This is called before inserting instructions
+ // to check that doing so would not push pool entries out of reach, and if
+ // so then the pool would need to be firstly dumped. The poolOffset is the
+ // first word of the pool, after the guard and header and alignment fill.
+ bool checkFull(size_t poolOffset) const {
+ // Not full if there are no uses.
+ if (!limitingUser.assigned())
+ return false;
+ size_t offset = poolOffset + limitingUsee * sizeof(PoolAllocUnit)
+ - (limitingUser.getOffset() + bias_);
+ return offset >= maxOffset_;
+ }
+
+ static const unsigned OOM_FAIL = unsigned(-1);
+
+ unsigned insertEntry(unsigned num, uint8_t* data, BufferOffset off, LifoAlloc& lifoAlloc) {
+ if (oom_)
+ return OOM_FAIL;
+ unsigned ret = numEntries();
+ if (!poolData_.append((PoolAllocUnit*)data, num) || !loadOffsets.append(off)) {
+ oom_ = true;
+ return OOM_FAIL;
+ }
+ return ret;
+ }
+
+ void reset() {
+ poolData_.clear();
+ loadOffsets.clear();
+
+ limitingUser = BufferOffset();
+ limitingUsee = -1;
+ }
+};
+
+
+// Template arguments:
+//
+// SliceSize
+// Number of bytes in each allocated BufferSlice. See
+// AssemblerBuffer::SliceSize.
+//
+// InstSize
+// Size in bytes of the fixed-size instructions. This should be equal to
+// sizeof(Inst). This is only needed here because the buffer is defined before
+// the Instruction.
+//
+// Inst
+// The actual type used to represent instructions. This is only really used as
+// the return type of the getInst() method.
+//
+// Asm
+// Class defining the needed static callback functions. See documentation of
+// the Asm::* callbacks above.
+//
+// NumShortBranchRanges
+// The number of short branch ranges to support. This can be 0 if no support
+// for tracking short range branches is needed. The
+// AssemblerBufferWithConstantPools class does not need to know what the range
+// of branches is - it deals in branch 'deadlines' which is the last buffer
+// position that a short-range forward branch can reach. It is assumed that
+// the Asm class is able to find the actual branch instruction given a
+// (range-index, deadline) pair.
+//
+//
+template <size_t SliceSize, size_t InstSize, class Inst, class Asm,
+ unsigned NumShortBranchRanges = 0>
+struct AssemblerBufferWithConstantPools : public AssemblerBuffer<SliceSize, Inst>
+{
+ private:
+ // The PoolEntry index counter. Each PoolEntry is given a unique index,
+ // counting up from zero, and these can be mapped back to the actual pool
+ // entry offset after finishing the buffer, see poolEntryOffset().
+ size_t poolEntryCount;
+
+ public:
+ class PoolEntry
+ {
+ size_t index_;
+
+ public:
+ explicit PoolEntry(size_t index)
+ : index_(index)
+ { }
+
+ PoolEntry()
+ : index_(-1)
+ { }
+
+ size_t index() const {
+ return index_;
+ }
+ };
+
+ private:
+ typedef AssemblerBuffer<SliceSize, Inst> Parent;
+ using typename Parent::Slice;
+
+ // The size of a pool guard, in instructions. A branch around the pool.
+ const unsigned guardSize_;
+ // The size of the header that is put at the beginning of a full pool, in
+ // instruction sized units.
+ const unsigned headerSize_;
+
+ // The maximum pc relative offset encoded in instructions that reference
+ // pool entries. This is generally set to the maximum offset that can be
+ // encoded by the instructions, but for testing can be lowered to affect the
+ // pool placement and frequency of pool placement.
+ const size_t poolMaxOffset_;
+
+ // The bias on pc relative addressing mode offsets, in units of bytes. The
+ // ARM has a bias of 8 bytes.
+ const unsigned pcBias_;
+
+ // The current working pool. Copied out as needed before resetting.
+ Pool pool_;
+
+ // The buffer should be aligned to this address.
+ const size_t instBufferAlign_;
+
+ struct PoolInfo {
+ // The index of the first entry in this pool.
+ // Pool entries are numbered uniquely across all pools, starting from 0.
+ unsigned firstEntryIndex;
+
+ // The location of this pool's first entry in the main assembler buffer.
+ // Note that the pool guard and header come before this offset which
+ // points directly at the data.
+ BufferOffset offset;
+
+ explicit PoolInfo(unsigned index, BufferOffset data)
+ : firstEntryIndex(index)
+ , offset(data)
+ {
+ }
+ };
+
+ // Info for each pool that has already been dumped. This does not include
+ // any entries in pool_.
+ Vector<PoolInfo, 8, LifoAllocPolicy<Fallible>> poolInfo_;
+
+ // Set of short-range forward branches that have not yet been bound.
+ // We may need to insert veneers if the final label turns out to be out of
+ // range.
+ //
+ // This set stores (rangeIdx, deadline) pairs instead of the actual branch
+ // locations.
+ BranchDeadlineSet<NumShortBranchRanges> branchDeadlines_;
+
+ // When true dumping pools is inhibited.
+ bool canNotPlacePool_;
+
+#ifdef DEBUG
+ // State for validating the 'maxInst' argument to enterNoPool().
+ // The buffer offset when entering the no-pool region.
+ size_t canNotPlacePoolStartOffset_;
+ // The maximum number of word sized instructions declared for the no-pool
+ // region.
+ size_t canNotPlacePoolMaxInst_;
+#endif
+
+ // Instruction to use for alignment fill.
+ const uint32_t alignFillInst_;
+
+ // Insert a number of NOP instructions between each requested instruction at
+ // all locations at which a pool can potentially spill. This is useful for
+ // checking that instruction locations are correctly referenced and/or
+ // followed.
+ const uint32_t nopFillInst_;
+ const unsigned nopFill_;
+ // For inhibiting the insertion of fill NOPs in the dynamic context in which
+ // they are being inserted.
+ bool inhibitNops_;
+
+ public:
+ // A unique id within each JitContext, to identify pools in the debug
+ // spew. Set by the MacroAssembler, see getNextAssemblerId().
+ int id;
+
+ private:
+ // The buffer slices are in a double linked list.
+ Slice* getHead() const {
+ return this->head;
+ }
+ Slice* getTail() const {
+ return this->tail;
+ }
+
+ public:
+ // Create an assembler buffer.
+ // Note that this constructor is not allowed to actually allocate memory from this->lifoAlloc_
+ // because the MacroAssembler constructor has not yet created an AutoJitContextAlloc.
+ AssemblerBufferWithConstantPools(unsigned guardSize, unsigned headerSize,
+ size_t instBufferAlign, size_t poolMaxOffset,
+ unsigned pcBias, uint32_t alignFillInst, uint32_t nopFillInst,
+ unsigned nopFill = 0)
+ : poolEntryCount(0),
+ guardSize_(guardSize),
+ headerSize_(headerSize),
+ poolMaxOffset_(poolMaxOffset),
+ pcBias_(pcBias),
+ pool_(poolMaxOffset, pcBias, this->lifoAlloc_),
+ instBufferAlign_(instBufferAlign),
+ poolInfo_(this->lifoAlloc_),
+ branchDeadlines_(this->lifoAlloc_),
+ canNotPlacePool_(false),
+#ifdef DEBUG
+ canNotPlacePoolStartOffset_(0),
+ canNotPlacePoolMaxInst_(0),
+#endif
+ alignFillInst_(alignFillInst),
+ nopFillInst_(nopFillInst),
+ nopFill_(nopFill),
+ inhibitNops_(false),
+ id(-1)
+ { }
+
+ // We need to wait until an AutoJitContextAlloc is created by the
+ // MacroAssembler before allocating any space.
+ void initWithAllocator() {
+ // We hand out references to lifoAlloc_ in the constructor.
+ // Check that no allocations were made then.
+ MOZ_ASSERT(this->lifoAlloc_.isEmpty(), "Illegal LIFO allocations before AutoJitContextAlloc");
+ }
+
+ private:
+ size_t sizeExcludingCurrentPool() const {
+ // Return the actual size of the buffer, excluding the current pending
+ // pool.
+ return this->nextOffset().getOffset();
+ }
+
+ public:
+ size_t size() const {
+ // Return the current actual size of the buffer. This is only accurate
+ // if there are no pending pool entries to dump, check.
+ MOZ_ASSERT_IF(!this->oom(), pool_.numEntries() == 0);
+ return sizeExcludingCurrentPool();
+ }
+
+ private:
+ void insertNopFill() {
+ // Insert fill for testing.
+ if (nopFill_ > 0 && !inhibitNops_ && !canNotPlacePool_) {
+ inhibitNops_ = true;
+
+ // Fill using a branch-nop rather than a NOP so this can be
+ // distinguished and skipped.
+ for (size_t i = 0; i < nopFill_; i++)
+ putInt(nopFillInst_);
+
+ inhibitNops_ = false;
+ }
+ }
+
+ static const unsigned OOM_FAIL = unsigned(-1);
+ static const unsigned DUMMY_INDEX = unsigned(-2);
+
+ // Check if it is possible to add numInst instructions and numPoolEntries
+ // constant pool entries without needing to flush the current pool.
+ bool hasSpaceForInsts(unsigned numInsts, unsigned numPoolEntries) const
+ {
+ size_t nextOffset = sizeExcludingCurrentPool();
+ // Earliest starting offset for the current pool after adding numInsts.
+ // This is the beginning of the pool entries proper, after inserting a
+ // guard branch + pool header.
+ size_t poolOffset = nextOffset + (numInsts + guardSize_ + headerSize_) * InstSize;
+
+ // Any constant pool loads that would go out of range?
+ if (pool_.checkFull(poolOffset))
+ return false;
+
+ // Any short-range branch that would go out of range?
+ if (!branchDeadlines_.empty()) {
+ size_t deadline = branchDeadlines_.earliestDeadline().getOffset();
+ size_t poolEnd =
+ poolOffset + pool_.getPoolSize() + numPoolEntries * sizeof(PoolAllocUnit);
+
+ // When NumShortBranchRanges > 1, is is possible for branch deadlines to expire faster
+ // than we can insert veneers. Suppose branches are 4 bytes each, we could have the
+ // following deadline set:
+ //
+ // Range 0: 40, 44, 48
+ // Range 1: 44, 48
+ //
+ // It is not good enough to start inserting veneers at the 40 deadline; we would not be
+ // able to create veneers for the second 44 deadline. Instead, we need to start at 32:
+ //
+ // 32: veneer(40)
+ // 36: veneer(44)
+ // 40: veneer(44)
+ // 44: veneer(48)
+ // 48: veneer(48)
+ //
+ // This is a pretty conservative solution to the problem: If we begin at the earliest
+ // deadline, we can always emit all veneers for the range that currently has the most
+ // pending deadlines. That may not leave room for veneers for the remaining ranges, so
+ // reserve space for those secondary range veneers assuming the worst case deadlines.
+
+ // Total pending secondary range veneer size.
+ size_t secondaryVeneers =
+ guardSize_ * (branchDeadlines_.size() - branchDeadlines_.maxRangeSize());
+
+ if (deadline < poolEnd + secondaryVeneers)
+ return false;
+ }
+
+ return true;
+ }
+
+ unsigned insertEntryForwards(unsigned numInst, unsigned numPoolEntries, uint8_t* inst, uint8_t* data) {
+ // If inserting pool entries then find a new limiter before we do the
+ // range check.
+ if (numPoolEntries)
+ pool_.updateLimiter(BufferOffset(sizeExcludingCurrentPool()));
+
+ if (!hasSpaceForInsts(numInst, numPoolEntries)) {
+ if (numPoolEntries)
+ JitSpew(JitSpew_Pools, "[%d] Inserting pool entry caused a spill", id);
+ else
+ JitSpew(JitSpew_Pools, "[%d] Inserting instruction(%" PRIuSIZE ") caused a spill", id,
+ sizeExcludingCurrentPool());
+
+ finishPool();
+ if (this->oom())
+ return OOM_FAIL;
+ return insertEntryForwards(numInst, numPoolEntries, inst, data);
+ }
+ if (numPoolEntries) {
+ unsigned result = pool_.insertEntry(numPoolEntries, data, this->nextOffset(), this->lifoAlloc_);
+ if (result == Pool::OOM_FAIL) {
+ this->fail_oom();
+ return OOM_FAIL;
+ }
+ return result;
+ }
+
+ // The pool entry index is returned above when allocating an entry, but
+ // when not allocating an entry a dummy value is returned - it is not
+ // expected to be used by the caller.
+ return DUMMY_INDEX;
+ }
+
+ public:
+ // Get the next buffer offset where an instruction would be inserted.
+ // This may flush the current constant pool before returning nextOffset().
+ BufferOffset nextInstrOffset()
+ {
+ if (!hasSpaceForInsts(/* numInsts= */ 1, /* numPoolEntries= */ 0)) {
+ JitSpew(JitSpew_Pools, "[%d] nextInstrOffset @ %d caused a constant pool spill", id,
+ this->nextOffset().getOffset());
+ finishPool();
+ }
+ return this->nextOffset();
+ }
+
+ BufferOffset allocEntry(size_t numInst, unsigned numPoolEntries,
+ uint8_t* inst, uint8_t* data, PoolEntry* pe = nullptr,
+ bool markAsBranch = false)
+ {
+ // The allocation of pool entries is not supported in a no-pool region,
+ // check.
+ MOZ_ASSERT_IF(numPoolEntries, !canNotPlacePool_);
+
+ if (this->oom() && !this->bail())
+ return BufferOffset();
+
+ insertNopFill();
+
+#ifdef JS_JITSPEW
+ if (numPoolEntries && JitSpewEnabled(JitSpew_Pools)) {
+ JitSpew(JitSpew_Pools, "[%d] Inserting %d entries into pool", id, numPoolEntries);
+ JitSpewStart(JitSpew_Pools, "[%d] data is: 0x", id);
+ size_t length = numPoolEntries * sizeof(PoolAllocUnit);
+ for (unsigned idx = 0; idx < length; idx++) {
+ JitSpewCont(JitSpew_Pools, "%02x", data[length - idx - 1]);
+ if (((idx & 3) == 3) && (idx + 1 != length))
+ JitSpewCont(JitSpew_Pools, "_");
+ }
+ JitSpewFin(JitSpew_Pools);
+ }
+#endif
+
+ // Insert the pool value.
+ unsigned index = insertEntryForwards(numInst, numPoolEntries, inst, data);
+ if (this->oom())
+ return BufferOffset();
+
+ // Now to get an instruction to write.
+ PoolEntry retPE;
+ if (numPoolEntries) {
+ JitSpew(JitSpew_Pools, "[%d] Entry has index %u, offset %" PRIuSIZE, id, index,
+ sizeExcludingCurrentPool());
+ Asm::InsertIndexIntoTag(inst, index);
+ // Figure out the offset within the pool entries.
+ retPE = PoolEntry(poolEntryCount);
+ poolEntryCount += numPoolEntries;
+ }
+ // Now inst is a valid thing to insert into the instruction stream.
+ if (pe != nullptr)
+ *pe = retPE;
+ return this->putBytes(numInst * InstSize, inst);
+ }
+
+ BufferOffset putInt(uint32_t value, bool markAsBranch = false) {
+ return allocEntry(1, 0, (uint8_t*)&value, nullptr, nullptr, markAsBranch);
+ }
+
+ // Register a short-range branch deadline.
+ //
+ // After inserting a short-range forward branch, call this method to
+ // register the branch 'deadline' which is the last buffer offset that the
+ // branch instruction can reach.
+ //
+ // When the branch is bound to a destination label, call
+ // unregisterBranchDeadline() to stop tracking this branch,
+ //
+ // If the assembled code is about to exceed the registered branch deadline,
+ // and unregisterBranchDeadline() has not yet been called, an
+ // instruction-sized constant pool entry is allocated before the branch
+ // deadline.
+ //
+ // rangeIdx
+ // A number < NumShortBranchRanges identifying the range of the branch.
+ //
+ // deadline
+ // The highest buffer offset the the short-range branch can reach
+ // directly.
+ //
+ void registerBranchDeadline(unsigned rangeIdx, BufferOffset deadline)
+ {
+ if (!this->oom() && !branchDeadlines_.addDeadline(rangeIdx, deadline))
+ this->fail_oom();
+ }
+
+ // Un-register a short-range branch deadline.
+ //
+ // When a short-range branch has been successfully bound to its destination
+ // label, call this function to stop traching the branch.
+ //
+ // The (rangeIdx, deadline) pair must be previously registered.
+ //
+ void unregisterBranchDeadline(unsigned rangeIdx, BufferOffset deadline)
+ {
+ if (!this->oom())
+ branchDeadlines_.removeDeadline(rangeIdx, deadline);
+ }
+
+ private:
+ // Are any short-range branches about to expire?
+ bool hasExpirableShortRangeBranches() const
+ {
+ if (branchDeadlines_.empty())
+ return false;
+
+ // Include branches that would expire in the next N bytes.
+ // The hysteresis avoids the needless creation of many tiny constant
+ // pools.
+ return this->nextOffset().getOffset() + ShortRangeBranchHysteresis >
+ size_t(branchDeadlines_.earliestDeadline().getOffset());
+ }
+
+ void finishPool() {
+ JitSpew(JitSpew_Pools, "[%d] Attempting to finish pool %" PRIuSIZE " with %u entries.",
+ id, poolInfo_.length(), pool_.numEntries());
+
+ if (pool_.numEntries() == 0 && !hasExpirableShortRangeBranches()) {
+ // If there is no data in the pool being dumped, don't dump anything.
+ JitSpew(JitSpew_Pools, "[%d] Aborting because the pool is empty", id);
+ return;
+ }
+
+ // Should not be placing a pool in a no-pool region, check.
+ MOZ_ASSERT(!canNotPlacePool_);
+
+ // Dump the pool with a guard branch around the pool.
+ BufferOffset guard = this->putBytes(guardSize_ * InstSize, nullptr);
+ BufferOffset header = this->putBytes(headerSize_ * InstSize, nullptr);
+ BufferOffset data =
+ this->putBytesLarge(pool_.getPoolSize(), (const uint8_t*)pool_.poolData());
+ if (this->oom())
+ return;
+
+ // Now generate branch veneers for any short-range branches that are
+ // about to expire.
+ while (hasExpirableShortRangeBranches()) {
+ unsigned rangeIdx = branchDeadlines_.earliestDeadlineRange();
+ BufferOffset deadline = branchDeadlines_.earliestDeadline();
+
+ // Stop tracking this branch. The Asm callback below may register
+ // new branches to track.
+ branchDeadlines_.removeDeadline(rangeIdx, deadline);
+
+ // Make room for the veneer. Same as a pool guard branch.
+ BufferOffset veneer = this->putBytes(guardSize_ * InstSize, nullptr);
+ if (this->oom())
+ return;
+
+ // Fix the branch so it targets the veneer.
+ // The Asm class knows how to find the original branch given the
+ // (rangeIdx, deadline) pair.
+ Asm::PatchShortRangeBranchToVeneer(this, rangeIdx, deadline, veneer);
+ }
+
+ // We only reserved space for the guard branch and pool header.
+ // Fill them in.
+ BufferOffset afterPool = this->nextOffset();
+ Asm::WritePoolGuard(guard, this->getInst(guard), afterPool);
+ Asm::WritePoolHeader((uint8_t*)this->getInst(header), &pool_, false);
+
+ // With the pool's final position determined it is now possible to patch
+ // the instructions that reference entries in this pool, and this is
+ // done incrementally as each pool is finished.
+ size_t poolOffset = data.getOffset();
+
+ unsigned idx = 0;
+ for (BufferOffset* iter = pool_.loadOffsets.begin();
+ iter != pool_.loadOffsets.end();
+ ++iter, ++idx)
+ {
+ // All entries should be before the pool.
+ MOZ_ASSERT(iter->getOffset() < guard.getOffset());
+
+ // Everything here is known so we can safely do the necessary
+ // substitutions.
+ Inst* inst = this->getInst(*iter);
+ size_t codeOffset = poolOffset - iter->getOffset();
+
+ // That is, PatchConstantPoolLoad wants to be handed the address of
+ // the pool entry that is being loaded. We need to do a non-trivial
+ // amount of math here, since the pool that we've made does not
+ // actually reside there in memory.
+ JitSpew(JitSpew_Pools, "[%d] Fixing entry %d offset to %" PRIuSIZE, id, idx, codeOffset);
+ Asm::PatchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset);
+ }
+
+ // Record the pool info.
+ unsigned firstEntry = poolEntryCount - pool_.numEntries();
+ if (!poolInfo_.append(PoolInfo(firstEntry, data))) {
+ this->fail_oom();
+ return;
+ }
+
+ // Reset everything to the state that it was in when we started.
+ pool_.reset();
+ }
+
+ public:
+ void flushPool() {
+ if (this->oom())
+ return;
+ JitSpew(JitSpew_Pools, "[%d] Requesting a pool flush", id);
+ finishPool();
+ }
+
+ void enterNoPool(size_t maxInst) {
+ // Don't allow re-entry.
+ MOZ_ASSERT(!canNotPlacePool_);
+ insertNopFill();
+
+ // Check if the pool will spill by adding maxInst instructions, and if
+ // so then finish the pool before entering the no-pool region. It is
+ // assumed that no pool entries are allocated in a no-pool region and
+ // this is asserted when allocating entries.
+ if (!hasSpaceForInsts(maxInst, 0)) {
+ JitSpew(JitSpew_Pools, "[%d] No-Pool instruction(%" PRIuSIZE ") caused a spill.", id,
+ sizeExcludingCurrentPool());
+ finishPool();
+ }
+
+#ifdef DEBUG
+ // Record the buffer position to allow validating maxInst when leaving
+ // the region.
+ canNotPlacePoolStartOffset_ = this->nextOffset().getOffset();
+ canNotPlacePoolMaxInst_ = maxInst;
+#endif
+
+ canNotPlacePool_ = true;
+ }
+
+ void leaveNoPool() {
+ MOZ_ASSERT(canNotPlacePool_);
+ canNotPlacePool_ = false;
+
+ // Validate the maxInst argument supplied to enterNoPool().
+ MOZ_ASSERT(this->nextOffset().getOffset() - canNotPlacePoolStartOffset_ <= canNotPlacePoolMaxInst_ * InstSize);
+ }
+
+ void align(unsigned alignment) {
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(alignment));
+ MOZ_ASSERT(alignment >= InstSize);
+
+ // A pool many need to be dumped at this point, so insert NOP fill here.
+ insertNopFill();
+
+ // Check if the code position can be aligned without dumping a pool.
+ unsigned requiredFill = sizeExcludingCurrentPool() & (alignment - 1);
+ if (requiredFill == 0)
+ return;
+ requiredFill = alignment - requiredFill;
+
+ // Add an InstSize because it is probably not useful for a pool to be
+ // dumped at the aligned code position.
+ if (!hasSpaceForInsts(requiredFill / InstSize + 1, 0)) {
+ // Alignment would cause a pool dump, so dump the pool now.
+ JitSpew(JitSpew_Pools, "[%d] Alignment of %d at %" PRIuSIZE " caused a spill.",
+ id, alignment, sizeExcludingCurrentPool());
+ finishPool();
+ }
+
+ inhibitNops_ = true;
+ while ((sizeExcludingCurrentPool() & (alignment - 1)) && !this->oom())
+ putInt(alignFillInst_);
+ inhibitNops_ = false;
+ }
+
+ public:
+ void executableCopy(uint8_t* dest) {
+ if (this->oom())
+ return;
+ // The pools should have all been flushed, check.
+ MOZ_ASSERT(pool_.numEntries() == 0);
+ for (Slice* cur = getHead(); cur != nullptr; cur = cur->getNext()) {
+ memcpy(dest, &cur->instructions[0], cur->length());
+ dest += cur->length();
+ }
+ }
+
+ bool appendBuffer(const AssemblerBufferWithConstantPools& other) {
+ if (this->oom())
+ return false;
+ // The pools should have all been flushed, check.
+ MOZ_ASSERT(pool_.numEntries() == 0);
+ for (Slice* cur = other.getHead(); cur != nullptr; cur = cur->getNext()) {
+ this->putBytes(cur->length(), &cur->instructions[0]);
+ if (this->oom())
+ return false;
+ }
+ return true;
+ }
+
+ public:
+ size_t poolEntryOffset(PoolEntry pe) const {
+ MOZ_ASSERT(pe.index() < poolEntryCount - pool_.numEntries(),
+ "Invalid pool entry, or not flushed yet.");
+ // Find the pool containing pe.index().
+ // The array is sorted, so we can use a binary search.
+ auto b = poolInfo_.begin(), e = poolInfo_.end();
+ // A note on asymmetric types in the upper_bound comparator:
+ // http://permalink.gmane.org/gmane.comp.compilers.clang.devel/10101
+ auto i = std::upper_bound(b, e, pe.index(), [](size_t value, const PoolInfo& entry) {
+ return value < entry.firstEntryIndex;
+ });
+ // Since upper_bound finds the first pool greater than pe,
+ // we want the previous one which is the last one less than or equal.
+ MOZ_ASSERT(i != b, "PoolInfo not sorted or empty?");
+ --i;
+ // The i iterator now points to the pool containing pe.index.
+ MOZ_ASSERT(i->firstEntryIndex <= pe.index() &&
+ (i + 1 == e || (i + 1)->firstEntryIndex > pe.index()));
+ // Compute the byte offset into the pool.
+ unsigned relativeIndex = pe.index() - i->firstEntryIndex;
+ return i->offset.getOffset() + relativeIndex * sizeof(PoolAllocUnit);
+ }
+};
+
+} // namespace ion
+} // namespace js
+
+#endif // jit_shared_IonAssemblerBufferWithConstantPools_h
diff --git a/js/src/jit/shared/LIR-shared.h b/js/src/jit/shared/LIR-shared.h
new file mode 100644
index 000000000..a352f5d8a
--- /dev/null
+++ b/js/src/jit/shared/LIR-shared.h
@@ -0,0 +1,8904 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_LIR_shared_h
+#define jit_shared_LIR_shared_h
+
+#include "jsutil.h"
+
+#include "jit/AtomicOp.h"
+#include "jit/shared/Assembler-shared.h"
+
+// This file declares LIR instructions that are common to every platform.
+
+namespace js {
+namespace jit {
+
+class LBox : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ MIRType type_;
+
+ public:
+ LIR_HEADER(Box);
+
+ LBox(const LAllocation& payload, MIRType type)
+ : type_(type)
+ {
+ setOperand(0, payload);
+ }
+
+ MIRType type() const {
+ return type_;
+ }
+ const char* extraName() const {
+ return StringFromMIRType(type_);
+ }
+};
+
+template <size_t Temps, size_t ExtraUses = 0>
+class LBinaryMath : public LInstructionHelper<1, 2 + ExtraUses, Temps>
+{
+ public:
+ const LAllocation* lhs() {
+ return this->getOperand(0);
+ }
+ const LAllocation* rhs() {
+ return this->getOperand(1);
+ }
+};
+
+// An LOsiPoint captures a snapshot after a call and ensures enough space to
+// patch in a call to the invalidation mechanism.
+//
+// Note: LSafepoints are 1:1 with LOsiPoints, so it holds a reference to the
+// corresponding LSafepoint to inform it of the LOsiPoint's masm offset when it
+// gets CG'd.
+class LOsiPoint : public LInstructionHelper<0, 0, 0>
+{
+ LSafepoint* safepoint_;
+
+ public:
+ LOsiPoint(LSafepoint* safepoint, LSnapshot* snapshot)
+ : safepoint_(safepoint)
+ {
+ MOZ_ASSERT(safepoint && snapshot);
+ assignSnapshot(snapshot);
+ }
+
+ LSafepoint* associatedSafepoint() {
+ return safepoint_;
+ }
+
+ LIR_HEADER(OsiPoint)
+};
+
+class LMove
+{
+ LAllocation from_;
+ LAllocation to_;
+ LDefinition::Type type_;
+
+ public:
+ LMove(LAllocation from, LAllocation to, LDefinition::Type type)
+ : from_(from),
+ to_(to),
+ type_(type)
+ { }
+
+ LAllocation from() const {
+ return from_;
+ }
+ LAllocation to() const {
+ return to_;
+ }
+ LDefinition::Type type() const {
+ return type_;
+ }
+};
+
+class LMoveGroup : public LInstructionHelper<0, 0, 0>
+{
+ js::Vector<LMove, 2, JitAllocPolicy> moves_;
+
+#ifdef JS_CODEGEN_X86
+ // Optional general register available for use when executing moves.
+ LAllocation scratchRegister_;
+#endif
+
+ explicit LMoveGroup(TempAllocator& alloc)
+ : moves_(alloc)
+ { }
+
+ public:
+ LIR_HEADER(MoveGroup)
+
+ static LMoveGroup* New(TempAllocator& alloc) {
+ return new(alloc) LMoveGroup(alloc);
+ }
+
+ void printOperands(GenericPrinter& out);
+
+ // Add a move which takes place simultaneously with all others in the group.
+ bool add(LAllocation from, LAllocation to, LDefinition::Type type);
+
+ // Add a move which takes place after existing moves in the group.
+ bool addAfter(LAllocation from, LAllocation to, LDefinition::Type type);
+
+ size_t numMoves() const {
+ return moves_.length();
+ }
+ const LMove& getMove(size_t i) const {
+ return moves_[i];
+ }
+
+#ifdef JS_CODEGEN_X86
+ void setScratchRegister(Register reg) {
+ scratchRegister_ = LGeneralReg(reg);
+ }
+ LAllocation maybeScratchRegister() {
+ return scratchRegister_;
+ }
+#endif
+
+ bool uses(Register reg) {
+ for (size_t i = 0; i < numMoves(); i++) {
+ LMove move = getMove(i);
+ if (move.from() == LGeneralReg(reg) || move.to() == LGeneralReg(reg))
+ return true;
+ }
+ return false;
+ }
+};
+
+
+// Constructs a SIMD object (value type) based on the MIRType of its input.
+class LSimdBox : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(SimdBox)
+
+ explicit LSimdBox(const LAllocation& simd, const LDefinition& temp)
+ {
+ setOperand(0, simd);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MSimdBox* mir() const {
+ return mir_->toSimdBox();
+ }
+};
+
+class LSimdUnbox : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(SimdUnbox)
+
+ LSimdUnbox(const LAllocation& obj, const LDefinition& temp)
+ {
+ setOperand(0, obj);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MSimdUnbox* mir() const {
+ return mir_->toSimdUnbox();
+ }
+};
+
+// Constructs a SIMD value with 16 equal components (int8x16).
+class LSimdSplatX16 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(SimdSplatX16)
+ explicit LSimdSplatX16(const LAllocation& v)
+ {
+ setOperand(0, v);
+ }
+
+ MSimdSplat* mir() const {
+ return mir_->toSimdSplat();
+ }
+};
+
+// Constructs a SIMD value with 8 equal components (int16x8).
+class LSimdSplatX8 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(SimdSplatX8)
+ explicit LSimdSplatX8(const LAllocation& v)
+ {
+ setOperand(0, v);
+ }
+
+ MSimdSplat* mir() const {
+ return mir_->toSimdSplat();
+ }
+};
+
+// Constructs a SIMD value with 4 equal components (e.g. int32x4, float32x4).
+class LSimdSplatX4 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(SimdSplatX4)
+ explicit LSimdSplatX4(const LAllocation& v)
+ {
+ setOperand(0, v);
+ }
+
+ MSimdSplat* mir() const {
+ return mir_->toSimdSplat();
+ }
+};
+
+// Reinterpret the bits of a SIMD value with a different type.
+class LSimdReinterpretCast : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(SimdReinterpretCast)
+ explicit LSimdReinterpretCast(const LAllocation& v)
+ {
+ setOperand(0, v);
+ }
+
+ MSimdReinterpretCast* mir() const {
+ return mir_->toSimdReinterpretCast();
+ }
+};
+
+class LSimdExtractElementBase : public LInstructionHelper<1, 1, 0>
+{
+ protected:
+ explicit LSimdExtractElementBase(const LAllocation& base) {
+ setOperand(0, base);
+ }
+
+ public:
+ const LAllocation* getBase() {
+ return getOperand(0);
+ }
+ MSimdExtractElement* mir() const {
+ return mir_->toSimdExtractElement();
+ }
+};
+
+// Extracts an element from a given SIMD bool32x4 lane.
+class LSimdExtractElementB : public LSimdExtractElementBase
+{
+ public:
+ LIR_HEADER(SimdExtractElementB);
+ explicit LSimdExtractElementB(const LAllocation& base)
+ : LSimdExtractElementBase(base)
+ {}
+};
+
+// Extracts an element from a given SIMD int32x4 lane.
+class LSimdExtractElementI : public LSimdExtractElementBase
+{
+ public:
+ LIR_HEADER(SimdExtractElementI);
+ explicit LSimdExtractElementI(const LAllocation& base)
+ : LSimdExtractElementBase(base)
+ {}
+};
+
+// Extracts an element from a given SIMD float32x4 lane.
+class LSimdExtractElementF : public LSimdExtractElementBase
+{
+ public:
+ LIR_HEADER(SimdExtractElementF);
+ explicit LSimdExtractElementF(const LAllocation& base)
+ : LSimdExtractElementBase(base)
+ {}
+};
+
+// Extracts an element from an Uint32x4 SIMD vector, converts to double.
+class LSimdExtractElementU2D : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(SimdExtractElementU2D);
+ explicit LSimdExtractElementU2D(const LAllocation& base, const LDefinition& temp) {
+ setOperand(0, base);
+ setTemp(0, temp);
+ }
+ MSimdExtractElement* mir() const {
+ return mir_->toSimdExtractElement();
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+
+class LSimdInsertElementBase : public LInstructionHelper<1, 2, 0>
+{
+ protected:
+ LSimdInsertElementBase(const LAllocation& vec, const LAllocation& val)
+ {
+ setOperand(0, vec);
+ setOperand(1, val);
+ }
+
+ public:
+ const LAllocation* vector() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+ unsigned lane() const {
+ return mir_->toSimdInsertElement()->lane();
+ }
+ unsigned length() const {
+ return SimdTypeToLength(mir_->toSimdInsertElement()->type());
+ }
+};
+
+// Replace an element from a given SIMD integer or boolean lane with a given value.
+// The value inserted into a boolean lane should be 0 or -1.
+class LSimdInsertElementI : public LSimdInsertElementBase
+{
+ public:
+ LIR_HEADER(SimdInsertElementI);
+ LSimdInsertElementI(const LAllocation& vec, const LAllocation& val)
+ : LSimdInsertElementBase(vec, val)
+ {}
+};
+
+// Replace an element from a given SIMD float32x4 lane with a given value.
+class LSimdInsertElementF : public LSimdInsertElementBase
+{
+ public:
+ LIR_HEADER(SimdInsertElementF);
+ LSimdInsertElementF(const LAllocation& vec, const LAllocation& val)
+ : LSimdInsertElementBase(vec, val)
+ {}
+};
+
+// Base class for both int32x4 and float32x4 shuffle instructions.
+class LSimdSwizzleBase : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ explicit LSimdSwizzleBase(const LAllocation& base)
+ {
+ setOperand(0, base);
+ }
+
+ const LAllocation* getBase() {
+ return getOperand(0);
+ }
+
+ unsigned numLanes() const { return mir_->toSimdSwizzle()->numLanes(); }
+ uint32_t lane(unsigned i) const { return mir_->toSimdSwizzle()->lane(i); }
+
+ bool lanesMatch(uint32_t x, uint32_t y, uint32_t z, uint32_t w) const {
+ return mir_->toSimdSwizzle()->lanesMatch(x, y, z, w);
+ }
+};
+
+// Shuffles a int32x4 into another int32x4 vector.
+class LSimdSwizzleI : public LSimdSwizzleBase
+{
+ public:
+ LIR_HEADER(SimdSwizzleI);
+ explicit LSimdSwizzleI(const LAllocation& base) : LSimdSwizzleBase(base)
+ {}
+};
+// Shuffles a float32x4 into another float32x4 vector.
+class LSimdSwizzleF : public LSimdSwizzleBase
+{
+ public:
+ LIR_HEADER(SimdSwizzleF);
+ explicit LSimdSwizzleF(const LAllocation& base) : LSimdSwizzleBase(base)
+ {}
+};
+
+class LSimdGeneralShuffleBase : public LVariadicInstruction<1, 1>
+{
+ public:
+ explicit LSimdGeneralShuffleBase(const LDefinition& temp) {
+ setTemp(0, temp);
+ }
+ const LAllocation* vector(unsigned i) {
+ MOZ_ASSERT(i < mir()->numVectors());
+ return getOperand(i);
+ }
+ const LAllocation* lane(unsigned i) {
+ MOZ_ASSERT(i < mir()->numLanes());
+ return getOperand(mir()->numVectors() + i);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MSimdGeneralShuffle* mir() const {
+ return mir_->toSimdGeneralShuffle();
+ }
+};
+
+class LSimdGeneralShuffleI : public LSimdGeneralShuffleBase
+{
+ public:
+ LIR_HEADER(SimdGeneralShuffleI);
+ explicit LSimdGeneralShuffleI(const LDefinition& temp)
+ : LSimdGeneralShuffleBase(temp)
+ {}
+};
+
+class LSimdGeneralShuffleF : public LSimdGeneralShuffleBase
+{
+ public:
+ LIR_HEADER(SimdGeneralShuffleF);
+ explicit LSimdGeneralShuffleF(const LDefinition& temp)
+ : LSimdGeneralShuffleBase(temp)
+ {}
+};
+
+// Base class for both int32x4 and float32x4 shuffle instructions.
+class LSimdShuffleX4 : public LInstructionHelper<1, 2, 1>
+{
+ public:
+ LIR_HEADER(SimdShuffleX4);
+ LSimdShuffleX4()
+ {}
+
+ const LAllocation* lhs() {
+ return getOperand(0);
+ }
+ const LAllocation* rhs() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ uint32_t lane(unsigned i) const { return mir_->toSimdShuffle()->lane(i); }
+
+ bool lanesMatch(uint32_t x, uint32_t y, uint32_t z, uint32_t w) const {
+ return mir_->toSimdShuffle()->lanesMatch(x, y, z, w);
+ }
+};
+
+// Remaining shuffles (8x16, 16x8).
+class LSimdShuffle : public LInstructionHelper<1, 2, 1>
+{
+ public:
+ LIR_HEADER(SimdShuffle);
+ LSimdShuffle()
+ {}
+
+ const LAllocation* lhs() {
+ return getOperand(0);
+ }
+ const LAllocation* rhs() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ unsigned numLanes() const { return mir_->toSimdShuffle()->numLanes(); }
+ unsigned lane(unsigned i) const { return mir_->toSimdShuffle()->lane(i); }
+};
+
+// Binary SIMD comparison operation between two SIMD operands
+class LSimdBinaryComp: public LInstructionHelper<1, 2, 0>
+{
+ protected:
+ LSimdBinaryComp() {}
+
+public:
+ const LAllocation* lhs() {
+ return getOperand(0);
+ }
+ const LAllocation* rhs() {
+ return getOperand(1);
+ }
+ MSimdBinaryComp::Operation operation() const {
+ return mir_->toSimdBinaryComp()->operation();
+ }
+ const char* extraName() const {
+ return MSimdBinaryComp::OperationName(operation());
+ }
+};
+
+// Binary SIMD comparison operation between two Int8x16 operands.
+class LSimdBinaryCompIx16 : public LSimdBinaryComp
+{
+ public:
+ LIR_HEADER(SimdBinaryCompIx16);
+ LSimdBinaryCompIx16() : LSimdBinaryComp() {}
+};
+
+// Binary SIMD comparison operation between two Int16x8 operands.
+class LSimdBinaryCompIx8 : public LSimdBinaryComp
+{
+ public:
+ LIR_HEADER(SimdBinaryCompIx8);
+ LSimdBinaryCompIx8() : LSimdBinaryComp() {}
+};
+
+// Binary SIMD comparison operation between two Int32x4 operands.
+class LSimdBinaryCompIx4 : public LSimdBinaryComp
+{
+ public:
+ LIR_HEADER(SimdBinaryCompIx4);
+ LSimdBinaryCompIx4() : LSimdBinaryComp() {}
+};
+
+// Binary SIMD comparison operation between two Float32x4 operands
+class LSimdBinaryCompFx4 : public LSimdBinaryComp
+{
+ public:
+ LIR_HEADER(SimdBinaryCompFx4);
+ LSimdBinaryCompFx4() : LSimdBinaryComp() {}
+};
+
+// Binary SIMD arithmetic operation between two SIMD operands
+class LSimdBinaryArith : public LInstructionHelper<1, 2, 1>
+{
+ public:
+ LSimdBinaryArith() {}
+
+ const LAllocation* lhs() {
+ return this->getOperand(0);
+ }
+ const LAllocation* rhs() {
+ return this->getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MSimdBinaryArith::Operation operation() const {
+ return this->mir_->toSimdBinaryArith()->operation();
+ }
+ const char* extraName() const {
+ return MSimdBinaryArith::OperationName(operation());
+ }
+};
+
+// Binary SIMD arithmetic operation between two Int8x16 operands
+class LSimdBinaryArithIx16 : public LSimdBinaryArith
+{
+ public:
+ LIR_HEADER(SimdBinaryArithIx16);
+ LSimdBinaryArithIx16() : LSimdBinaryArith() {}
+};
+
+// Binary SIMD arithmetic operation between two Int16x8 operands
+class LSimdBinaryArithIx8 : public LSimdBinaryArith
+{
+ public:
+ LIR_HEADER(SimdBinaryArithIx8);
+ LSimdBinaryArithIx8() : LSimdBinaryArith() {}
+};
+
+// Binary SIMD arithmetic operation between two Int32x4 operands
+class LSimdBinaryArithIx4 : public LSimdBinaryArith
+{
+ public:
+ LIR_HEADER(SimdBinaryArithIx4);
+ LSimdBinaryArithIx4() : LSimdBinaryArith() {}
+};
+
+// Binary SIMD arithmetic operation between two Float32x4 operands
+class LSimdBinaryArithFx4 : public LSimdBinaryArith
+{
+ public:
+ LIR_HEADER(SimdBinaryArithFx4);
+ LSimdBinaryArithFx4() : LSimdBinaryArith() {}
+};
+
+// Binary SIMD saturating arithmetic operation between two SIMD operands
+class LSimdBinarySaturating : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(SimdBinarySaturating);
+ LSimdBinarySaturating() {}
+
+ const LAllocation* lhs() {
+ return this->getOperand(0);
+ }
+ const LAllocation* rhs() {
+ return this->getOperand(1);
+ }
+
+ MSimdBinarySaturating::Operation operation() const {
+ return this->mir_->toSimdBinarySaturating()->operation();
+ }
+ SimdSign signedness() const {
+ return this->mir_->toSimdBinarySaturating()->signedness();
+ }
+ MIRType type() const {
+ return mir_->type();
+ }
+ const char* extraName() const {
+ return MSimdBinarySaturating::OperationName(operation());
+ }
+};
+
+// Unary SIMD arithmetic operation on a SIMD operand
+class LSimdUnaryArith : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ explicit LSimdUnaryArith(const LAllocation& in) {
+ setOperand(0, in);
+ }
+ MSimdUnaryArith::Operation operation() const {
+ return mir_->toSimdUnaryArith()->operation();
+ }
+};
+
+// Unary SIMD arithmetic operation on a Int8x16 operand
+class LSimdUnaryArithIx16 : public LSimdUnaryArith
+{
+ public:
+ LIR_HEADER(SimdUnaryArithIx16);
+ explicit LSimdUnaryArithIx16(const LAllocation& in) : LSimdUnaryArith(in) {}
+};
+
+// Unary SIMD arithmetic operation on a Int16x8 operand
+class LSimdUnaryArithIx8 : public LSimdUnaryArith
+{
+ public:
+ LIR_HEADER(SimdUnaryArithIx8);
+ explicit LSimdUnaryArithIx8(const LAllocation& in) : LSimdUnaryArith(in) {}
+};
+
+// Unary SIMD arithmetic operation on a Int32x4 operand
+class LSimdUnaryArithIx4 : public LSimdUnaryArith
+{
+ public:
+ LIR_HEADER(SimdUnaryArithIx4);
+ explicit LSimdUnaryArithIx4(const LAllocation& in) : LSimdUnaryArith(in) {}
+};
+
+// Unary SIMD arithmetic operation on a Float32x4 operand
+class LSimdUnaryArithFx4 : public LSimdUnaryArith
+{
+ public:
+ LIR_HEADER(SimdUnaryArithFx4);
+ explicit LSimdUnaryArithFx4(const LAllocation& in) : LSimdUnaryArith(in) {}
+};
+
+// Binary SIMD bitwise operation between two 128-bit operands.
+class LSimdBinaryBitwise : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(SimdBinaryBitwise);
+ const LAllocation* lhs() {
+ return getOperand(0);
+ }
+ const LAllocation* rhs() {
+ return getOperand(1);
+ }
+ MSimdBinaryBitwise::Operation operation() const {
+ return mir_->toSimdBinaryBitwise()->operation();
+ }
+ const char* extraName() const {
+ return MSimdBinaryBitwise::OperationName(operation());
+ }
+ MIRType type() const {
+ return mir_->type();
+ }
+};
+
+// Shift a SIMD vector by a scalar amount.
+// The temp register is only required if the shift amount is a dynamical
+// value. If it is a constant, use a BogusTemp instead.
+class LSimdShift : public LInstructionHelper<1, 2, 1>
+{
+ public:
+ LIR_HEADER(SimdShift)
+ LSimdShift(const LAllocation& vec, const LAllocation& val, const LDefinition& temp) {
+ setOperand(0, vec);
+ setOperand(1, val);
+ setTemp(0, temp);
+ }
+ const LAllocation* vector() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MSimdShift::Operation operation() const {
+ return mir_->toSimdShift()->operation();
+ }
+ const char* extraName() const {
+ return MSimdShift::OperationName(operation());
+ }
+ MSimdShift* mir() const {
+ return mir_->toSimdShift();
+ }
+ MIRType type() const {
+ return mir_->type();
+ }
+};
+
+// SIMD selection of lanes from two int32x4 or float32x4 arguments based on a
+// int32x4 argument.
+class LSimdSelect : public LInstructionHelper<1, 3, 1>
+{
+ public:
+ LIR_HEADER(SimdSelect);
+ const LAllocation* mask() {
+ return getOperand(0);
+ }
+ const LAllocation* lhs() {
+ return getOperand(1);
+ }
+ const LAllocation* rhs() {
+ return getOperand(2);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MSimdSelect* mir() const {
+ return mir_->toSimdSelect();
+ }
+};
+
+class LSimdAnyTrue : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(SimdAnyTrue)
+ explicit LSimdAnyTrue(const LAllocation& input) {
+ setOperand(0, input);
+ }
+ const LAllocation* vector() {
+ return getOperand(0);
+ }
+ MSimdAnyTrue* mir() const {
+ return mir_->toSimdAnyTrue();
+ }
+};
+
+class LSimdAllTrue : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(SimdAllTrue)
+ explicit LSimdAllTrue(const LAllocation& input) {
+ setOperand(0, input);
+ }
+ const LAllocation* vector() {
+ return getOperand(0);
+ }
+ MSimdAllTrue* mir() const {
+ return mir_->toSimdAllTrue();
+ }
+};
+
+
+// Constant 32-bit integer.
+class LInteger : public LInstructionHelper<1, 0, 0>
+{
+ int32_t i32_;
+
+ public:
+ LIR_HEADER(Integer)
+
+ explicit LInteger(int32_t i32)
+ : i32_(i32)
+ { }
+
+ int32_t getValue() const {
+ return i32_;
+ }
+};
+
+// Constant 64-bit integer.
+class LInteger64 : public LInstructionHelper<INT64_PIECES, 0, 0>
+{
+ int64_t i64_;
+
+ public:
+ LIR_HEADER(Integer64)
+
+ explicit LInteger64(int64_t i64)
+ : i64_(i64)
+ { }
+
+ int64_t getValue() const {
+ return i64_;
+ }
+};
+
+// Constant pointer.
+class LPointer : public LInstructionHelper<1, 0, 0>
+{
+ public:
+ enum Kind {
+ GC_THING,
+ NON_GC_THING
+ };
+
+ private:
+ void* ptr_;
+ Kind kind_;
+
+ public:
+ LIR_HEADER(Pointer)
+
+ explicit LPointer(gc::Cell* ptr)
+ : ptr_(ptr), kind_(GC_THING)
+ { }
+
+ LPointer(void* ptr, Kind kind)
+ : ptr_(ptr), kind_(kind)
+ { }
+
+ void* ptr() const {
+ return ptr_;
+ }
+ Kind kind() const {
+ return kind_;
+ }
+ const char* extraName() const {
+ return kind_ == GC_THING ? "GC_THING" : "NON_GC_THING";
+ }
+
+ gc::Cell* gcptr() const {
+ MOZ_ASSERT(kind() == GC_THING);
+ return (gc::Cell*) ptr_;
+ }
+};
+
+// Constant double.
+class LDouble : public LInstructionHelper<1, 0, 0>
+{
+ wasm::RawF64 d_;
+ public:
+ LIR_HEADER(Double);
+
+ explicit LDouble(wasm::RawF64 d) : d_(d)
+ { }
+
+ wasm::RawF64 getDouble() const {
+ return d_;
+ }
+};
+
+// Constant float32.
+class LFloat32 : public LInstructionHelper<1, 0, 0>
+{
+ wasm::RawF32 f_;
+ public:
+ LIR_HEADER(Float32);
+
+ explicit LFloat32(wasm::RawF32 f)
+ : f_(f)
+ { }
+
+ wasm::RawF32 getFloat() const {
+ return f_;
+ }
+};
+
+// Constant 128-bit SIMD integer vector (8x16, 16x8, 32x4).
+// Also used for Bool32x4, Bool16x8, etc.
+class LSimd128Int : public LInstructionHelper<1, 0, 0>
+{
+ public:
+ LIR_HEADER(Simd128Int);
+
+ explicit LSimd128Int() {}
+ const SimdConstant& getValue() const { return mir_->toSimdConstant()->value(); }
+};
+
+// Constant 128-bit SIMD floating point vector (32x4, 64x2).
+class LSimd128Float : public LInstructionHelper<1, 0, 0>
+{
+ public:
+ LIR_HEADER(Simd128Float);
+
+ explicit LSimd128Float() {}
+ const SimdConstant& getValue() const { return mir_->toSimdConstant()->value(); }
+};
+
+// A constant Value.
+class LValue : public LInstructionHelper<BOX_PIECES, 0, 0>
+{
+ Value v_;
+
+ public:
+ LIR_HEADER(Value)
+
+ explicit LValue(const Value& v)
+ : v_(v)
+ { }
+
+ Value value() const {
+ return v_;
+ }
+};
+
+// Clone an object literal such as we are not modifying the object contained in
+// the sources.
+class LCloneLiteral : public LCallInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(CloneLiteral)
+
+ explicit LCloneLiteral(const LAllocation& obj)
+ {
+ setOperand(0, obj);
+ }
+
+ const LAllocation* getObjectLiteral() {
+ return getOperand(0);
+ }
+
+ MCloneLiteral* mir() const {
+ return mir_->toCloneLiteral();
+ }
+};
+
+// Formal argument for a function, returning a box. Formal arguments are
+// initially read from the stack.
+class LParameter : public LInstructionHelper<BOX_PIECES, 0, 0>
+{
+ public:
+ LIR_HEADER(Parameter)
+};
+
+// Stack offset for a word-sized immutable input value to a frame.
+class LCallee : public LInstructionHelper<1, 0, 0>
+{
+ public:
+ LIR_HEADER(Callee)
+};
+
+class LIsConstructing : public LInstructionHelper<1, 0, 0>
+{
+ public:
+ LIR_HEADER(IsConstructing)
+};
+
+// Base class for control instructions (goto, branch, etc.)
+template <size_t Succs, size_t Operands, size_t Temps>
+class LControlInstructionHelper : public LInstructionHelper<0, Operands, Temps> {
+
+ mozilla::Array<MBasicBlock*, Succs> successors_;
+
+ public:
+ virtual size_t numSuccessors() const final override { return Succs; }
+
+ virtual MBasicBlock* getSuccessor(size_t i) const final override {
+ return successors_[i];
+ }
+
+ virtual void setSuccessor(size_t i, MBasicBlock* successor) final override {
+ successors_[i] = successor;
+ }
+};
+
+// Jumps to the start of a basic block.
+class LGoto : public LControlInstructionHelper<1, 0, 0>
+{
+ public:
+ LIR_HEADER(Goto)
+
+ explicit LGoto(MBasicBlock* block)
+ {
+ setSuccessor(0, block);
+ }
+
+ MBasicBlock* target() const {
+ return getSuccessor(0);
+ }
+};
+
+class LNewArray : public LInstructionHelper<1, 0, 1>
+{
+ public:
+ LIR_HEADER(NewArray)
+
+ explicit LNewArray(const LDefinition& temp) {
+ setTemp(0, temp);
+ }
+
+ const char* extraName() const {
+ return mir()->isVMCall() ? "VMCall" : nullptr;
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MNewArray* mir() const {
+ return mir_->toNewArray();
+ }
+};
+
+class LNewArrayCopyOnWrite : public LInstructionHelper<1, 0, 1>
+{
+ public:
+ LIR_HEADER(NewArrayCopyOnWrite)
+
+ explicit LNewArrayCopyOnWrite(const LDefinition& temp) {
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MNewArrayCopyOnWrite* mir() const {
+ return mir_->toNewArrayCopyOnWrite();
+ }
+};
+
+class LNewArrayDynamicLength : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(NewArrayDynamicLength)
+
+ explicit LNewArrayDynamicLength(const LAllocation& length, const LDefinition& temp) {
+ setOperand(0, length);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* length() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MNewArrayDynamicLength* mir() const {
+ return mir_->toNewArrayDynamicLength();
+ }
+};
+
+class LNewTypedArray : public LInstructionHelper<1, 0, 2>
+{
+ public:
+ LIR_HEADER(NewTypedArray)
+
+ explicit LNewTypedArray(const LDefinition& temp1, const LDefinition& temp2) {
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() {
+ return getTemp(0);
+ }
+
+ const LDefinition* temp2() {
+ return getTemp(1);
+ }
+
+ MNewTypedArray* mir() const {
+ return mir_->toNewTypedArray();
+ }
+};
+
+class LNewTypedArrayDynamicLength : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(NewTypedArrayDynamicLength)
+
+ explicit LNewTypedArrayDynamicLength(const LAllocation& length, const LDefinition& temp) {
+ setOperand(0, length);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* length() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MNewTypedArrayDynamicLength* mir() const {
+ return mir_->toNewTypedArrayDynamicLength();
+ }
+};
+
+class LNewObject : public LInstructionHelper<1, 0, 1>
+{
+ public:
+ LIR_HEADER(NewObject)
+
+ explicit LNewObject(const LDefinition& temp) {
+ setTemp(0, temp);
+ }
+
+ const char* extraName() const {
+ return mir()->isVMCall() ? "VMCall" : nullptr;
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MNewObject* mir() const {
+ return mir_->toNewObject();
+ }
+};
+
+class LNewTypedObject : public LInstructionHelper<1, 0, 1>
+{
+ public:
+ LIR_HEADER(NewTypedObject)
+
+ explicit LNewTypedObject(const LDefinition& temp) {
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MNewTypedObject* mir() const {
+ return mir_->toNewTypedObject();
+ }
+};
+
+// Allocates a new NamedLambdaObject.
+//
+// This instruction generates two possible instruction sets:
+// (1) An inline allocation of the call object is attempted.
+// (2) Otherwise, a callVM create a new object.
+//
+class LNewNamedLambdaObject : public LInstructionHelper<1, 0, 1>
+{
+ public:
+ LIR_HEADER(NewNamedLambdaObject);
+
+ explicit LNewNamedLambdaObject(const LDefinition& temp) {
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MNewNamedLambdaObject* mir() const {
+ return mir_->toNewNamedLambdaObject();
+ }
+};
+
+// Allocates a new CallObject.
+//
+// This instruction generates two possible instruction sets:
+// (1) If the call object is extensible, this is a callVM to create the
+// call object.
+// (2) Otherwise, an inline allocation of the call object is attempted.
+//
+class LNewCallObject : public LInstructionHelper<1, 0, 1>
+{
+ public:
+ LIR_HEADER(NewCallObject)
+
+ explicit LNewCallObject(const LDefinition& temp) {
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+
+ MNewCallObject* mir() const {
+ return mir_->toNewCallObject();
+ }
+};
+
+// Performs a callVM to allocate a new CallObject with singleton type.
+class LNewSingletonCallObject : public LInstructionHelper<1, 0, 1>
+{
+ public:
+ LIR_HEADER(NewSingletonCallObject)
+
+ explicit LNewSingletonCallObject(const LDefinition& temp) {
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MNewSingletonCallObject* mir() const {
+ return mir_->toNewSingletonCallObject();
+ }
+};
+
+class LNewDerivedTypedObject : public LCallInstructionHelper<1, 3, 0>
+{
+ public:
+ LIR_HEADER(NewDerivedTypedObject);
+
+ LNewDerivedTypedObject(const LAllocation& type,
+ const LAllocation& owner,
+ const LAllocation& offset) {
+ setOperand(0, type);
+ setOperand(1, owner);
+ setOperand(2, offset);
+ }
+
+ const LAllocation* type() {
+ return getOperand(0);
+ }
+
+ const LAllocation* owner() {
+ return getOperand(1);
+ }
+
+ const LAllocation* offset() {
+ return getOperand(2);
+ }
+};
+
+class LNewStringObject : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(NewStringObject)
+
+ LNewStringObject(const LAllocation& input, const LDefinition& temp) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MNewStringObject* mir() const {
+ return mir_->toNewStringObject();
+ }
+};
+
+class LInitElem : public LCallInstructionHelper<0, 1 + 2*BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(InitElem)
+
+ LInitElem(const LAllocation& object, const LBoxAllocation& id, const LBoxAllocation& value) {
+ setOperand(0, object);
+ setBoxOperand(IdIndex, id);
+ setBoxOperand(ValueIndex, value);
+ }
+
+ static const size_t IdIndex = 1;
+ static const size_t ValueIndex = 1 + BOX_PIECES;
+
+ const LAllocation* getObject() {
+ return getOperand(0);
+ }
+ MInitElem* mir() const {
+ return mir_->toInitElem();
+ }
+};
+
+class LInitElemGetterSetter : public LCallInstructionHelper<0, 2 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(InitElemGetterSetter)
+
+ LInitElemGetterSetter(const LAllocation& object, const LBoxAllocation& id,
+ const LAllocation& value) {
+ setOperand(0, object);
+ setOperand(1, value);
+ setBoxOperand(IdIndex, id);
+ }
+
+ static const size_t IdIndex = 2;
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+ MInitElemGetterSetter* mir() const {
+ return mir_->toInitElemGetterSetter();
+ }
+};
+
+// Takes in an Object and a Value.
+class LMutateProto : public LCallInstructionHelper<0, 1 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(MutateProto)
+
+ LMutateProto(const LAllocation& object, const LBoxAllocation& value) {
+ setOperand(0, object);
+ setBoxOperand(ValueIndex, value);
+ }
+
+ static const size_t ValueIndex = 1;
+
+ const LAllocation* getObject() {
+ return getOperand(0);
+ }
+ const LAllocation* getValue() {
+ return getOperand(1);
+ }
+};
+
+// Takes in an Object and a Value.
+class LInitProp : public LCallInstructionHelper<0, 1 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(InitProp)
+
+ LInitProp(const LAllocation& object, const LBoxAllocation& value) {
+ setOperand(0, object);
+ setBoxOperand(ValueIndex, value);
+ }
+
+ static const size_t ValueIndex = 1;
+
+ const LAllocation* getObject() {
+ return getOperand(0);
+ }
+ const LAllocation* getValue() {
+ return getOperand(1);
+ }
+
+ MInitProp* mir() const {
+ return mir_->toInitProp();
+ }
+};
+
+class LInitPropGetterSetter : public LCallInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(InitPropGetterSetter)
+
+ LInitPropGetterSetter(const LAllocation& object, const LAllocation& value) {
+ setOperand(0, object);
+ setOperand(1, value);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+
+ MInitPropGetterSetter* mir() const {
+ return mir_->toInitPropGetterSetter();
+ }
+};
+
+class LCheckOverRecursed : public LInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(CheckOverRecursed)
+
+ LCheckOverRecursed()
+ { }
+
+ MCheckOverRecursed* mir() const {
+ return mir_->toCheckOverRecursed();
+ }
+};
+
+class LWasmTrap : public LInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(WasmTrap);
+
+ LWasmTrap()
+ { }
+
+ const MWasmTrap* mir() const {
+ return mir_->toWasmTrap();
+ }
+};
+
+template<size_t Defs, size_t Ops>
+class LWasmReinterpretBase : public LInstructionHelper<Defs, Ops, 0>
+{
+ typedef LInstructionHelper<Defs, Ops, 0> Base;
+
+ public:
+ const LAllocation* input() {
+ return Base::getOperand(0);
+ }
+ MWasmReinterpret* mir() const {
+ return Base::mir_->toWasmReinterpret();
+ }
+};
+
+class LWasmReinterpret : public LWasmReinterpretBase<1, 1>
+{
+ public:
+ LIR_HEADER(WasmReinterpret);
+ explicit LWasmReinterpret(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+class LWasmReinterpretFromI64 : public LWasmReinterpretBase<1, INT64_PIECES>
+{
+ public:
+ LIR_HEADER(WasmReinterpretFromI64);
+ explicit LWasmReinterpretFromI64(const LInt64Allocation& input) {
+ setInt64Operand(0, input);
+ }
+};
+
+class LWasmReinterpretToI64 : public LWasmReinterpretBase<INT64_PIECES, 1>
+{
+ public:
+ LIR_HEADER(WasmReinterpretToI64);
+ explicit LWasmReinterpretToI64(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+namespace details {
+ template<size_t Defs, size_t Ops, size_t Temps>
+ class RotateBase : public LInstructionHelper<Defs, Ops, Temps>
+ {
+ typedef LInstructionHelper<Defs, Ops, Temps> Base;
+ public:
+ MRotate* mir() {
+ return Base::mir_->toRotate();
+ }
+ };
+} // details
+
+class LRotate : public details::RotateBase<1, 2, 0>
+{
+ public:
+ LIR_HEADER(Rotate);
+
+ const LAllocation* input() { return getOperand(0); }
+ LAllocation* count() { return getOperand(1); }
+};
+
+class LRotateI64 : public details::RotateBase<INT64_PIECES, INT64_PIECES + 1, 1>
+{
+ public:
+ LIR_HEADER(RotateI64);
+
+ LRotateI64()
+ {
+ setTemp(0, LDefinition::BogusTemp());
+ }
+
+ static const size_t Input = 0;
+ static const size_t Count = INT64_PIECES;
+
+ const LInt64Allocation input() { return getInt64Operand(Input); }
+ const LDefinition* temp() { return getTemp(0); }
+ LAllocation* count() { return getOperand(Count); }
+};
+
+class LInterruptCheck : public LInstructionHelper<0, 0, 0>
+{
+ Label* oolEntry_;
+
+ // Whether this is an implicit interrupt check. Implicit interrupt checks
+ // use a patchable backedge and signal handlers instead of an explicit
+ // rt->interrupt check.
+ bool implicit_;
+
+ public:
+ LIR_HEADER(InterruptCheck)
+
+ LInterruptCheck()
+ : oolEntry_(nullptr),
+ implicit_(false)
+ {}
+
+ Label* oolEntry() {
+ MOZ_ASSERT(implicit_);
+ return oolEntry_;
+ }
+
+ void setOolEntry(Label* oolEntry) {
+ MOZ_ASSERT(implicit_);
+ oolEntry_ = oolEntry;
+ }
+ MInterruptCheck* mir() const {
+ return mir_->toInterruptCheck();
+ }
+
+ void setImplicit() {
+ implicit_ = true;
+ }
+ bool implicit() const {
+ return implicit_;
+ }
+};
+
+class LDefVar : public LCallInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(DefVar)
+
+ explicit LDefVar(const LAllocation& envChain)
+ {
+ setOperand(0, envChain);
+ }
+
+ const LAllocation* environmentChain() {
+ return getOperand(0);
+ }
+ MDefVar* mir() const {
+ return mir_->toDefVar();
+ }
+};
+
+class LDefLexical : public LCallInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(DefLexical)
+
+ MDefLexical* mir() const {
+ return mir_->toDefLexical();
+ }
+};
+
+class LDefFun : public LCallInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(DefFun)
+
+ LDefFun(const LAllocation& fun, const LAllocation& envChain)
+ {
+ setOperand(0, fun);
+ setOperand(1, envChain);
+ }
+
+ const LAllocation* fun() {
+ return getOperand(0);
+ }
+ const LAllocation* environmentChain() {
+ return getOperand(1);
+ }
+ MDefFun* mir() const {
+ return mir_->toDefFun();
+ }
+};
+
+class LTypeOfV : public LInstructionHelper<1, BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(TypeOfV)
+
+ LTypeOfV(const LBoxAllocation& input, const LDefinition& tempToUnbox) {
+ setBoxOperand(Input, input);
+ setTemp(0, tempToUnbox);
+ }
+
+ static const size_t Input = 0;
+
+ const LDefinition* tempToUnbox() {
+ return getTemp(0);
+ }
+
+ MTypeOf* mir() const {
+ return mir_->toTypeOf();
+ }
+};
+
+class LToAsync : public LCallInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(ToAsync)
+ explicit LToAsync(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ const LAllocation* unwrapped() {
+ return getOperand(0);
+ }
+};
+
+class LToIdV : public LInstructionHelper<BOX_PIECES, BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(ToIdV)
+
+ LToIdV(const LBoxAllocation& input, const LDefinition& temp)
+ {
+ setBoxOperand(Input, input);
+ setTemp(0, temp);
+ }
+
+ static const size_t Input = 0;
+
+ MToId* mir() const {
+ return mir_->toToId();
+ }
+
+ const LDefinition* tempFloat() {
+ return getTemp(0);
+ }
+};
+
+// Allocate an object for |new| on the caller-side,
+// when there is no templateObject or prototype known
+class LCreateThis : public LCallInstructionHelper<BOX_PIECES, 2, 0>
+{
+ public:
+ LIR_HEADER(CreateThis)
+
+ LCreateThis(const LAllocation& callee, const LAllocation& newTarget)
+ {
+ setOperand(0, callee);
+ setOperand(1, newTarget);
+ }
+
+ const LAllocation* getCallee() {
+ return getOperand(0);
+ }
+ const LAllocation* getNewTarget() {
+ return getOperand(1);
+ }
+
+ MCreateThis* mir() const {
+ return mir_->toCreateThis();
+ }
+};
+
+// Allocate an object for |new| on the caller-side,
+// when the prototype is known.
+class LCreateThisWithProto : public LCallInstructionHelper<1, 3, 0>
+{
+ public:
+ LIR_HEADER(CreateThisWithProto)
+
+ LCreateThisWithProto(const LAllocation& callee, const LAllocation& newTarget,
+ const LAllocation& prototype)
+ {
+ setOperand(0, callee);
+ setOperand(1, newTarget);
+ setOperand(2, prototype);
+ }
+
+ const LAllocation* getCallee() {
+ return getOperand(0);
+ }
+ const LAllocation* getNewTarget() {
+ return getOperand(1);
+ }
+ const LAllocation* getPrototype() {
+ return getOperand(2);
+ }
+
+ MCreateThis* mir() const {
+ return mir_->toCreateThis();
+ }
+};
+
+// Allocate an object for |new| on the caller-side.
+// Always performs object initialization with a fast path.
+class LCreateThisWithTemplate : public LInstructionHelper<1, 0, 1>
+{
+ public:
+ LIR_HEADER(CreateThisWithTemplate)
+
+ explicit LCreateThisWithTemplate(const LDefinition& temp) {
+ setTemp(0, temp);
+ }
+
+ MCreateThisWithTemplate* mir() const {
+ return mir_->toCreateThisWithTemplate();
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Allocate a new arguments object for the frame.
+class LCreateArgumentsObject : public LCallInstructionHelper<1, 1, 3>
+{
+ public:
+ LIR_HEADER(CreateArgumentsObject)
+
+ LCreateArgumentsObject(const LAllocation& callObj, const LDefinition& temp0,
+ const LDefinition& temp1, const LDefinition& temp2)
+ {
+ setOperand(0, callObj);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+ }
+
+ const LDefinition* temp0() {
+ return getTemp(0);
+ }
+ const LDefinition* temp1() {
+ return getTemp(1);
+ }
+ const LDefinition* temp2() {
+ return getTemp(2);
+ }
+
+ const LAllocation* getCallObject() {
+ return getOperand(0);
+ }
+
+ MCreateArgumentsObject* mir() const {
+ return mir_->toCreateArgumentsObject();
+ }
+};
+
+// Get argument from arguments object.
+class LGetArgumentsObjectArg : public LInstructionHelper<BOX_PIECES, 1, 1>
+{
+ public:
+ LIR_HEADER(GetArgumentsObjectArg)
+
+ LGetArgumentsObjectArg(const LAllocation& argsObj, const LDefinition& temp)
+ {
+ setOperand(0, argsObj);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* getArgsObject() {
+ return getOperand(0);
+ }
+
+ MGetArgumentsObjectArg* mir() const {
+ return mir_->toGetArgumentsObjectArg();
+ }
+};
+
+// Set argument on arguments object.
+class LSetArgumentsObjectArg : public LInstructionHelper<0, 1 + BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(SetArgumentsObjectArg)
+
+ LSetArgumentsObjectArg(const LAllocation& argsObj, const LBoxAllocation& value,
+ const LDefinition& temp)
+ {
+ setOperand(0, argsObj);
+ setBoxOperand(ValueIndex, value);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* getArgsObject() {
+ return getOperand(0);
+ }
+
+ MSetArgumentsObjectArg* mir() const {
+ return mir_->toSetArgumentsObjectArg();
+ }
+
+ static const size_t ValueIndex = 1;
+};
+
+// If the Value is an Object, return unbox(Value).
+// Otherwise, return the other Object.
+class LReturnFromCtor : public LInstructionHelper<1, BOX_PIECES + 1, 0>
+{
+ public:
+ LIR_HEADER(ReturnFromCtor)
+
+ LReturnFromCtor(const LBoxAllocation& value, const LAllocation& object)
+ {
+ setBoxOperand(ValueIndex, value);
+ setOperand(ObjectIndex, object);
+ }
+
+ const LAllocation* getObject() {
+ return getOperand(ObjectIndex);
+ }
+
+ static const size_t ValueIndex = 0;
+ static const size_t ObjectIndex = BOX_PIECES;
+};
+
+class LComputeThis : public LInstructionHelper<BOX_PIECES, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(ComputeThis)
+
+ static const size_t ValueIndex = 0;
+
+ explicit LComputeThis(const LBoxAllocation& value) {
+ setBoxOperand(ValueIndex, value);
+ }
+
+ const LDefinition* output() {
+ return getDef(0);
+ }
+
+ MComputeThis* mir() const {
+ return mir_->toComputeThis();
+ }
+};
+
+// Writes a typed argument for a function call to the frame's argument vector.
+class LStackArgT : public LInstructionHelper<0, 1, 0>
+{
+ uint32_t argslot_; // Index into frame-scope argument vector.
+ MIRType type_;
+
+ public:
+ LIR_HEADER(StackArgT)
+
+ LStackArgT(uint32_t argslot, MIRType type, const LAllocation& arg)
+ : argslot_(argslot),
+ type_(type)
+ {
+ setOperand(0, arg);
+ }
+ uint32_t argslot() const {
+ return argslot_;
+ }
+ MIRType type() const {
+ return type_;
+ }
+ const LAllocation* getArgument() {
+ return getOperand(0);
+ }
+};
+
+// Writes an untyped argument for a function call to the frame's argument vector.
+class LStackArgV : public LInstructionHelper<0, BOX_PIECES, 0>
+{
+ uint32_t argslot_; // Index into frame-scope argument vector.
+
+ public:
+ LIR_HEADER(StackArgV)
+
+ LStackArgV(uint32_t argslot, const LBoxAllocation& value)
+ : argslot_(argslot)
+ {
+ setBoxOperand(0, value);
+ }
+
+ uint32_t argslot() const {
+ return argslot_;
+ }
+};
+
+// Common code for LIR descended from MCall.
+template <size_t Defs, size_t Operands, size_t Temps>
+class LJSCallInstructionHelper : public LCallInstructionHelper<Defs, Operands, Temps>
+{
+ public:
+ uint32_t argslot() const {
+ if (JitStackValueAlignment > 1)
+ return AlignBytes(mir()->numStackArgs(), JitStackValueAlignment);
+ return mir()->numStackArgs();
+ }
+ MCall* mir() const {
+ return this->mir_->toCall();
+ }
+
+ bool hasSingleTarget() const {
+ return getSingleTarget() != nullptr;
+ }
+ WrappedFunction* getSingleTarget() const {
+ return mir()->getSingleTarget();
+ }
+
+ // Does not include |this|.
+ uint32_t numActualArgs() const {
+ return mir()->numActualArgs();
+ }
+
+ bool isConstructing() const {
+ return mir()->isConstructing();
+ }
+};
+
+// Generates a polymorphic callsite, wherein the function being called is
+// unknown and anticipated to vary.
+class LCallGeneric : public LJSCallInstructionHelper<BOX_PIECES, 1, 2>
+{
+ public:
+ LIR_HEADER(CallGeneric)
+
+ LCallGeneric(const LAllocation& func, const LDefinition& nargsreg,
+ const LDefinition& tmpobjreg)
+ {
+ setOperand(0, func);
+ setTemp(0, nargsreg);
+ setTemp(1, tmpobjreg);
+ }
+
+ const LAllocation* getFunction() {
+ return getOperand(0);
+ }
+ const LDefinition* getNargsReg() {
+ return getTemp(0);
+ }
+ const LDefinition* getTempObject() {
+ return getTemp(1);
+ }
+};
+
+// Generates a hardcoded callsite for a known, non-native target.
+class LCallKnown : public LJSCallInstructionHelper<BOX_PIECES, 1, 1>
+{
+ public:
+ LIR_HEADER(CallKnown)
+
+ LCallKnown(const LAllocation& func, const LDefinition& tmpobjreg)
+ {
+ setOperand(0, func);
+ setTemp(0, tmpobjreg);
+ }
+
+ const LAllocation* getFunction() {
+ return getOperand(0);
+ }
+ const LDefinition* getTempObject() {
+ return getTemp(0);
+ }
+};
+
+// Generates a hardcoded callsite for a known, native target.
+class LCallNative : public LJSCallInstructionHelper<BOX_PIECES, 0, 4>
+{
+ public:
+ LIR_HEADER(CallNative)
+
+ LCallNative(const LDefinition& argContext, const LDefinition& argUintN,
+ const LDefinition& argVp, const LDefinition& tmpreg)
+ {
+ // Registers used for callWithABI().
+ setTemp(0, argContext);
+ setTemp(1, argUintN);
+ setTemp(2, argVp);
+
+ // Temporary registers.
+ setTemp(3, tmpreg);
+ }
+
+ const LDefinition* getArgContextReg() {
+ return getTemp(0);
+ }
+ const LDefinition* getArgUintNReg() {
+ return getTemp(1);
+ }
+ const LDefinition* getArgVpReg() {
+ return getTemp(2);
+ }
+ const LDefinition* getTempReg() {
+ return getTemp(3);
+ }
+};
+
+// Generates a hardcoded callsite for a known, DOM-native target.
+class LCallDOMNative : public LJSCallInstructionHelper<BOX_PIECES, 0, 4>
+{
+ public:
+ LIR_HEADER(CallDOMNative)
+
+ LCallDOMNative(const LDefinition& argJSContext, const LDefinition& argObj,
+ const LDefinition& argPrivate, const LDefinition& argArgs)
+ {
+ setTemp(0, argJSContext);
+ setTemp(1, argObj);
+ setTemp(2, argPrivate);
+ setTemp(3, argArgs);
+ }
+
+ const LDefinition* getArgJSContext() {
+ return getTemp(0);
+ }
+ const LDefinition* getArgObj() {
+ return getTemp(1);
+ }
+ const LDefinition* getArgPrivate() {
+ return getTemp(2);
+ }
+ const LDefinition* getArgArgs() {
+ return getTemp(3);
+ }
+};
+
+class LBail : public LInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(Bail)
+};
+
+class LUnreachable : public LControlInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(Unreachable)
+};
+
+class LEncodeSnapshot : public LInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(EncodeSnapshot)
+};
+
+template <size_t defs, size_t ops>
+class LDOMPropertyInstructionHelper : public LCallInstructionHelper<defs, 1 + ops, 3>
+{
+ protected:
+ LDOMPropertyInstructionHelper(const LDefinition& JSContextReg, const LAllocation& ObjectReg,
+ const LDefinition& PrivReg, const LDefinition& ValueReg)
+ {
+ this->setOperand(0, ObjectReg);
+ this->setTemp(0, JSContextReg);
+ this->setTemp(1, PrivReg);
+ this->setTemp(2, ValueReg);
+ }
+
+ public:
+ const LDefinition* getJSContextReg() {
+ return this->getTemp(0);
+ }
+ const LAllocation* getObjectReg() {
+ return this->getOperand(0);
+ }
+ const LDefinition* getPrivReg() {
+ return this->getTemp(1);
+ }
+ const LDefinition* getValueReg() {
+ return this->getTemp(2);
+ }
+};
+
+
+class LGetDOMProperty : public LDOMPropertyInstructionHelper<BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(GetDOMProperty)
+
+ LGetDOMProperty(const LDefinition& JSContextReg, const LAllocation& ObjectReg,
+ const LDefinition& PrivReg, const LDefinition& ValueReg)
+ : LDOMPropertyInstructionHelper<BOX_PIECES, 0>(JSContextReg, ObjectReg,
+ PrivReg, ValueReg)
+ { }
+
+ MGetDOMProperty* mir() const {
+ return mir_->toGetDOMProperty();
+ }
+};
+
+class LGetDOMMemberV : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(GetDOMMemberV);
+ explicit LGetDOMMemberV(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+
+ MGetDOMMember* mir() const {
+ return mir_->toGetDOMMember();
+ }
+};
+
+class LGetDOMMemberT : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(GetDOMMemberT);
+ explicit LGetDOMMemberT(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+
+ MGetDOMMember* mir() const {
+ return mir_->toGetDOMMember();
+ }
+};
+
+class LSetDOMProperty : public LDOMPropertyInstructionHelper<0, BOX_PIECES>
+{
+ public:
+ LIR_HEADER(SetDOMProperty)
+
+ LSetDOMProperty(const LDefinition& JSContextReg, const LAllocation& ObjectReg,
+ const LBoxAllocation& value, const LDefinition& PrivReg,
+ const LDefinition& ValueReg)
+ : LDOMPropertyInstructionHelper<0, BOX_PIECES>(JSContextReg, ObjectReg,
+ PrivReg, ValueReg)
+ {
+ setBoxOperand(Value, value);
+ }
+
+ static const size_t Value = 1;
+
+ MSetDOMProperty* mir() const {
+ return mir_->toSetDOMProperty();
+ }
+};
+
+// Generates a polymorphic callsite, wherein the function being called is
+// unknown and anticipated to vary.
+class LApplyArgsGeneric : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2>
+{
+ public:
+ LIR_HEADER(ApplyArgsGeneric)
+
+ LApplyArgsGeneric(const LAllocation& func, const LAllocation& argc,
+ const LBoxAllocation& thisv, const LDefinition& tmpobjreg,
+ const LDefinition& tmpcopy)
+ {
+ setOperand(0, func);
+ setOperand(1, argc);
+ setBoxOperand(ThisIndex, thisv);
+ setTemp(0, tmpobjreg);
+ setTemp(1, tmpcopy);
+ }
+
+ MApplyArgs* mir() const {
+ return mir_->toApplyArgs();
+ }
+
+ bool hasSingleTarget() const {
+ return getSingleTarget() != nullptr;
+ }
+ WrappedFunction* getSingleTarget() const {
+ return mir()->getSingleTarget();
+ }
+
+ const LAllocation* getFunction() {
+ return getOperand(0);
+ }
+ const LAllocation* getArgc() {
+ return getOperand(1);
+ }
+ static const size_t ThisIndex = 2;
+
+ const LDefinition* getTempObject() {
+ return getTemp(0);
+ }
+ const LDefinition* getTempStackCounter() {
+ return getTemp(1);
+ }
+};
+
+class LApplyArrayGeneric : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2>
+{
+ public:
+ LIR_HEADER(ApplyArrayGeneric)
+
+ LApplyArrayGeneric(const LAllocation& func, const LAllocation& elements,
+ const LBoxAllocation& thisv, const LDefinition& tmpobjreg,
+ const LDefinition& tmpcopy)
+ {
+ setOperand(0, func);
+ setOperand(1, elements);
+ setBoxOperand(ThisIndex, thisv);
+ setTemp(0, tmpobjreg);
+ setTemp(1, tmpcopy);
+ }
+
+ MApplyArray* mir() const {
+ return mir_->toApplyArray();
+ }
+
+ bool hasSingleTarget() const {
+ return getSingleTarget() != nullptr;
+ }
+ WrappedFunction* getSingleTarget() const {
+ return mir()->getSingleTarget();
+ }
+
+ const LAllocation* getFunction() {
+ return getOperand(0);
+ }
+ const LAllocation* getElements() {
+ return getOperand(1);
+ }
+ // argc is mapped to the same register as elements: argc becomes
+ // live as elements is dying, all registers are calltemps.
+ const LAllocation* getArgc() {
+ return getOperand(1);
+ }
+ static const size_t ThisIndex = 2;
+
+ const LDefinition* getTempObject() {
+ return getTemp(0);
+ }
+ const LDefinition* getTempStackCounter() {
+ return getTemp(1);
+ }
+};
+
+class LArraySplice : public LCallInstructionHelper<0, 3, 0>
+{
+ public:
+ LIR_HEADER(ArraySplice)
+
+ LArraySplice(const LAllocation& object, const LAllocation& start,
+ const LAllocation& deleteCount)
+ {
+ setOperand(0, object);
+ setOperand(1, start);
+ setOperand(2, deleteCount);
+ }
+
+ MArraySplice* mir() const {
+ return mir_->toArraySplice();
+ }
+
+ const LAllocation* getObject() {
+ return getOperand(0);
+ }
+ const LAllocation* getStart() {
+ return getOperand(1);
+ }
+ const LAllocation* getDeleteCount() {
+ return getOperand(2);
+ }
+};
+
+class LGetDynamicName : public LCallInstructionHelper<BOX_PIECES, 2, 3>
+{
+ public:
+ LIR_HEADER(GetDynamicName)
+
+ LGetDynamicName(const LAllocation& envChain, const LAllocation& name,
+ const LDefinition& temp1, const LDefinition& temp2, const LDefinition& temp3)
+ {
+ setOperand(0, envChain);
+ setOperand(1, name);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+
+ MGetDynamicName* mir() const {
+ return mir_->toGetDynamicName();
+ }
+
+ const LAllocation* getEnvironmentChain() {
+ return getOperand(0);
+ }
+ const LAllocation* getName() {
+ return getOperand(1);
+ }
+
+ const LDefinition* temp1() {
+ return getTemp(0);
+ }
+ const LDefinition* temp2() {
+ return getTemp(1);
+ }
+ const LDefinition* temp3() {
+ return getTemp(2);
+ }
+};
+
+class LCallDirectEval : public LCallInstructionHelper<BOX_PIECES, 2 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CallDirectEval)
+
+ LCallDirectEval(const LAllocation& envChain, const LAllocation& string,
+ const LBoxAllocation& newTarget)
+ {
+ setOperand(0, envChain);
+ setOperand(1, string);
+ setBoxOperand(NewTarget, newTarget);
+ }
+
+ static const size_t NewTarget = 2;
+
+ MCallDirectEval* mir() const {
+ return mir_->toCallDirectEval();
+ }
+
+ const LAllocation* getEnvironmentChain() {
+ return getOperand(0);
+ }
+ const LAllocation* getString() {
+ return getOperand(1);
+ }
+};
+
+// Takes in either an integer or boolean input and tests it for truthiness.
+class LTestIAndBranch : public LControlInstructionHelper<2, 1, 0>
+{
+ public:
+ LIR_HEADER(TestIAndBranch)
+
+ LTestIAndBranch(const LAllocation& in, MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ setOperand(0, in);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+};
+
+// Takes in an int64 input and tests it for truthiness.
+class LTestI64AndBranch : public LControlInstructionHelper<2, INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(TestI64AndBranch)
+
+ LTestI64AndBranch(const LInt64Allocation& in, MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ setInt64Operand(0, in);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+};
+
+// Takes in either an integer or boolean input and tests it for truthiness.
+class LTestDAndBranch : public LControlInstructionHelper<2, 1, 0>
+{
+ public:
+ LIR_HEADER(TestDAndBranch)
+
+ LTestDAndBranch(const LAllocation& in, MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ setOperand(0, in);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+};
+
+// Takes in either an integer or boolean input and tests it for truthiness.
+class LTestFAndBranch : public LControlInstructionHelper<2, 1, 0>
+{
+ public:
+ LIR_HEADER(TestFAndBranch)
+
+ LTestFAndBranch(const LAllocation& in, MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ setOperand(0, in);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+};
+
+// Takes an object and tests it for truthiness. An object is falsy iff it
+// emulates |undefined|; see js::EmulatesUndefined.
+class LTestOAndBranch : public LControlInstructionHelper<2, 1, 1>
+{
+ public:
+ LIR_HEADER(TestOAndBranch)
+
+ LTestOAndBranch(const LAllocation& input, MBasicBlock* ifTruthy, MBasicBlock* ifFalsy,
+ const LDefinition& temp)
+ {
+ setOperand(0, input);
+ setSuccessor(0, ifTruthy);
+ setSuccessor(1, ifFalsy);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MBasicBlock* ifTruthy() {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalsy() {
+ return getSuccessor(1);
+ }
+
+ MTest* mir() {
+ return mir_->toTest();
+ }
+};
+
+// Takes in a boxed value and tests it for truthiness.
+class LTestVAndBranch : public LControlInstructionHelper<2, BOX_PIECES, 3>
+{
+ public:
+ LIR_HEADER(TestVAndBranch)
+
+ LTestVAndBranch(MBasicBlock* ifTruthy, MBasicBlock* ifFalsy, const LBoxAllocation& input,
+ const LDefinition& temp0, const LDefinition& temp1, const LDefinition& temp2)
+ {
+ setSuccessor(0, ifTruthy);
+ setSuccessor(1, ifFalsy);
+ setBoxOperand(Input, input);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+ }
+
+ const char* extraName() const {
+ return mir()->operandMightEmulateUndefined() ? "MightEmulateUndefined" : nullptr;
+ }
+
+ static const size_t Input = 0;
+
+ const LDefinition* tempFloat() {
+ return getTemp(0);
+ }
+
+ const LDefinition* temp1() {
+ return getTemp(1);
+ }
+
+ const LDefinition* temp2() {
+ return getTemp(2);
+ }
+
+ MBasicBlock* ifTruthy() {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalsy() {
+ return getSuccessor(1);
+ }
+
+ MTest* mir() const {
+ return mir_->toTest();
+ }
+};
+
+// Dispatches control flow to a successor based on incoming JSFunction*.
+// Used to implemenent polymorphic inlining.
+class LFunctionDispatch : public LInstructionHelper<0, 1, 0>
+{
+ // Dispatch is performed based on a function -> block map
+ // stored in the MIR.
+
+ public:
+ LIR_HEADER(FunctionDispatch);
+
+ explicit LFunctionDispatch(const LAllocation& in) {
+ setOperand(0, in);
+ }
+
+ MFunctionDispatch* mir() const {
+ return mir_->toFunctionDispatch();
+ }
+};
+
+class LObjectGroupDispatch : public LInstructionHelper<0, 1, 1>
+{
+ // Dispatch is performed based on an ObjectGroup -> block
+ // map inferred by the MIR.
+
+ public:
+ LIR_HEADER(ObjectGroupDispatch);
+
+ const char* extraName() const {
+ return mir()->hasFallback() ? "HasFallback" : "NoFallback";
+ }
+
+ LObjectGroupDispatch(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MObjectGroupDispatch* mir() const {
+ return mir_->toObjectGroupDispatch();
+ }
+};
+
+// Compares two integral values of the same JS type, either integer or object.
+// For objects, both operands are in registers.
+class LCompare : public LInstructionHelper<1, 2, 0>
+{
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(Compare)
+ LCompare(JSOp jsop, const LAllocation& left, const LAllocation& right)
+ : jsop_(jsop)
+ {
+ setOperand(0, left);
+ setOperand(1, right);
+ }
+
+ JSOp jsop() const {
+ return jsop_;
+ }
+ const LAllocation* left() {
+ return getOperand(0);
+ }
+ const LAllocation* right() {
+ return getOperand(1);
+ }
+ MCompare* mir() {
+ return mir_->toCompare();
+ }
+ const char* extraName() const {
+ return CodeName[jsop_];
+ }
+};
+
+class LCompareI64 : public LInstructionHelper<1, 2 * INT64_PIECES, 0>
+{
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(CompareI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LCompareI64(JSOp jsop, const LInt64Allocation& left, const LInt64Allocation& right)
+ : jsop_(jsop)
+ {
+ setInt64Operand(Lhs, left);
+ setInt64Operand(Rhs, right);
+ }
+
+ JSOp jsop() const {
+ return jsop_;
+ }
+ MCompare* mir() {
+ return mir_->toCompare();
+ }
+ const char* extraName() const {
+ return CodeName[jsop_];
+ }
+};
+
+class LCompareI64AndBranch : public LControlInstructionHelper<2, 2 * INT64_PIECES, 0>
+{
+ MCompare* cmpMir_;
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(CompareI64AndBranch)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LCompareI64AndBranch(MCompare* cmpMir, JSOp jsop,
+ const LInt64Allocation& left, const LInt64Allocation& right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ : cmpMir_(cmpMir), jsop_(jsop)
+ {
+ setInt64Operand(Lhs, left);
+ setInt64Operand(Rhs, right);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ JSOp jsop() const {
+ return jsop_;
+ }
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+ MTest* mir() const {
+ return mir_->toTest();
+ }
+ MCompare* cmpMir() const {
+ return cmpMir_;
+ }
+ const char* extraName() const {
+ return CodeName[jsop_];
+ }
+};
+
+// Compares two integral values of the same JS type, either integer or object.
+// For objects, both operands are in registers.
+class LCompareAndBranch : public LControlInstructionHelper<2, 2, 0>
+{
+ MCompare* cmpMir_;
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(CompareAndBranch)
+ LCompareAndBranch(MCompare* cmpMir, JSOp jsop,
+ const LAllocation& left, const LAllocation& right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ : cmpMir_(cmpMir), jsop_(jsop)
+ {
+ setOperand(0, left);
+ setOperand(1, right);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ JSOp jsop() const {
+ return jsop_;
+ }
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+ const LAllocation* left() {
+ return getOperand(0);
+ }
+ const LAllocation* right() {
+ return getOperand(1);
+ }
+ MTest* mir() const {
+ return mir_->toTest();
+ }
+ MCompare* cmpMir() const {
+ return cmpMir_;
+ }
+ const char* extraName() const {
+ return CodeName[jsop_];
+ }
+};
+
+class LCompareD : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(CompareD)
+ LCompareD(const LAllocation& left, const LAllocation& right) {
+ setOperand(0, left);
+ setOperand(1, right);
+ }
+
+ const LAllocation* left() {
+ return getOperand(0);
+ }
+ const LAllocation* right() {
+ return getOperand(1);
+ }
+ MCompare* mir() {
+ return mir_->toCompare();
+ }
+};
+
+class LCompareF : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(CompareF)
+ LCompareF(const LAllocation& left, const LAllocation& right) {
+ setOperand(0, left);
+ setOperand(1, right);
+ }
+
+ const LAllocation* left() {
+ return getOperand(0);
+ }
+ const LAllocation* right() {
+ return getOperand(1);
+ }
+ MCompare* mir() {
+ return mir_->toCompare();
+ }
+};
+
+class LCompareDAndBranch : public LControlInstructionHelper<2, 2, 0>
+{
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(CompareDAndBranch)
+ LCompareDAndBranch(MCompare* cmpMir, const LAllocation& left, const LAllocation& right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ : cmpMir_(cmpMir)
+ {
+ setOperand(0, left);
+ setOperand(1, right);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+ const LAllocation* left() {
+ return getOperand(0);
+ }
+ const LAllocation* right() {
+ return getOperand(1);
+ }
+ MTest* mir() const {
+ return mir_->toTest();
+ }
+ MCompare* cmpMir() const {
+ return cmpMir_;
+ }
+};
+
+class LCompareFAndBranch : public LControlInstructionHelper<2, 2, 0>
+{
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(CompareFAndBranch)
+ LCompareFAndBranch(MCompare* cmpMir, const LAllocation& left, const LAllocation& right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ : cmpMir_(cmpMir)
+ {
+ setOperand(0, left);
+ setOperand(1, right);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+ const LAllocation* left() {
+ return getOperand(0);
+ }
+ const LAllocation* right() {
+ return getOperand(1);
+ }
+ MTest* mir() const {
+ return mir_->toTest();
+ }
+ MCompare* cmpMir() const {
+ return cmpMir_;
+ }
+};
+
+class LCompareS : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(CompareS)
+ LCompareS(const LAllocation& left, const LAllocation& right) {
+ setOperand(0, left);
+ setOperand(1, right);
+ }
+
+ const LAllocation* left() {
+ return getOperand(0);
+ }
+ const LAllocation* right() {
+ return getOperand(1);
+ }
+ MCompare* mir() {
+ return mir_->toCompare();
+ }
+};
+
+// strict-equality between value and string.
+class LCompareStrictS : public LInstructionHelper<1, BOX_PIECES + 1, 1>
+{
+ public:
+ LIR_HEADER(CompareStrictS)
+ LCompareStrictS(const LBoxAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) {
+ setBoxOperand(Lhs, lhs);
+ setOperand(BOX_PIECES, rhs);
+ setTemp(0, temp);
+ }
+
+ static const size_t Lhs = 0;
+
+ const LAllocation* right() {
+ return getOperand(BOX_PIECES);
+ }
+ const LDefinition* tempToUnbox() {
+ return getTemp(0);
+ }
+ MCompare* mir() {
+ return mir_->toCompare();
+ }
+};
+
+// Used for strict-equality comparisons where one side is a boolean
+// and the other is a value. Note that CompareI is used to compare
+// two booleans.
+class LCompareB : public LInstructionHelper<1, BOX_PIECES + 1, 0>
+{
+ public:
+ LIR_HEADER(CompareB)
+
+ LCompareB(const LBoxAllocation& lhs, const LAllocation& rhs) {
+ setBoxOperand(Lhs, lhs);
+ setOperand(BOX_PIECES, rhs);
+ }
+
+ static const size_t Lhs = 0;
+
+ const LAllocation* rhs() {
+ return getOperand(BOX_PIECES);
+ }
+
+ MCompare* mir() {
+ return mir_->toCompare();
+ }
+};
+
+class LCompareBAndBranch : public LControlInstructionHelper<2, BOX_PIECES + 1, 0>
+{
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(CompareBAndBranch)
+
+ LCompareBAndBranch(MCompare* cmpMir, const LBoxAllocation& lhs, const LAllocation& rhs,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ : cmpMir_(cmpMir)
+ {
+ setBoxOperand(Lhs, lhs);
+ setOperand(BOX_PIECES, rhs);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ static const size_t Lhs = 0;
+
+ const LAllocation* rhs() {
+ return getOperand(BOX_PIECES);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+ MTest* mir() const {
+ return mir_->toTest();
+ }
+ MCompare* cmpMir() const {
+ return cmpMir_;
+ }
+};
+
+class LCompareBitwise : public LInstructionHelper<1, 2 * BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CompareBitwise)
+
+ static const size_t LhsInput = 0;
+ static const size_t RhsInput = BOX_PIECES;
+
+ LCompareBitwise(const LBoxAllocation& lhs, const LBoxAllocation& rhs) {
+ setBoxOperand(LhsInput, lhs);
+ setBoxOperand(RhsInput, rhs);
+ }
+
+ MCompare* mir() const {
+ return mir_->toCompare();
+ }
+};
+
+class LCompareBitwiseAndBranch : public LControlInstructionHelper<2, 2 * BOX_PIECES, 0>
+{
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(CompareBitwiseAndBranch)
+
+ static const size_t LhsInput = 0;
+ static const size_t RhsInput = BOX_PIECES;
+
+ LCompareBitwiseAndBranch(MCompare* cmpMir, MBasicBlock* ifTrue, MBasicBlock* ifFalse,
+ const LBoxAllocation& lhs, const LBoxAllocation& rhs)
+ : cmpMir_(cmpMir)
+ {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setBoxOperand(LhsInput, lhs);
+ setBoxOperand(RhsInput, rhs);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+ MTest* mir() const {
+ return mir_->toTest();
+ }
+ MCompare* cmpMir() const {
+ return cmpMir_;
+ }
+};
+
+class LCompareVM : public LCallInstructionHelper<1, 2 * BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CompareVM)
+
+ static const size_t LhsInput = 0;
+ static const size_t RhsInput = BOX_PIECES;
+
+ LCompareVM(const LBoxAllocation& lhs, const LBoxAllocation& rhs) {
+ setBoxOperand(LhsInput, lhs);
+ setBoxOperand(RhsInput, rhs);
+ }
+
+ MCompare* mir() const {
+ return mir_->toCompare();
+ }
+};
+
+class LBitAndAndBranch : public LControlInstructionHelper<2, 2, 0>
+{
+ public:
+ LIR_HEADER(BitAndAndBranch)
+ LBitAndAndBranch(MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+ const LAllocation* left() {
+ return getOperand(0);
+ }
+ const LAllocation* right() {
+ return getOperand(1);
+ }
+};
+
+// Takes a value and tests whether it is null, undefined, or is an object that
+// emulates |undefined|, as determined by the JSCLASS_EMULATES_UNDEFINED class
+// flag on unwrapped objects. See also js::EmulatesUndefined.
+class LIsNullOrLikeUndefinedV : public LInstructionHelper<1, BOX_PIECES, 2>
+{
+ public:
+ LIR_HEADER(IsNullOrLikeUndefinedV)
+
+ LIsNullOrLikeUndefinedV(const LBoxAllocation& value, const LDefinition& temp,
+ const LDefinition& tempToUnbox)
+ {
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ setTemp(1, tempToUnbox);
+ }
+
+ static const size_t Value = 0;
+
+ MCompare* mir() {
+ return mir_->toCompare();
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ const LDefinition* tempToUnbox() {
+ return getTemp(1);
+ }
+};
+
+// Takes an object or object-or-null pointer and tests whether it is null or is
+// an object that emulates |undefined|, as above.
+class LIsNullOrLikeUndefinedT : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(IsNullOrLikeUndefinedT)
+
+ explicit LIsNullOrLikeUndefinedT(const LAllocation& input)
+ {
+ setOperand(0, input);
+ }
+
+ MCompare* mir() {
+ return mir_->toCompare();
+ }
+};
+
+class LIsNullOrLikeUndefinedAndBranchV : public LControlInstructionHelper<2, BOX_PIECES, 2>
+{
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(IsNullOrLikeUndefinedAndBranchV)
+
+ LIsNullOrLikeUndefinedAndBranchV(MCompare* cmpMir, MBasicBlock* ifTrue, MBasicBlock* ifFalse,
+ const LBoxAllocation& value, const LDefinition& temp,
+ const LDefinition& tempToUnbox)
+ : cmpMir_(cmpMir)
+ {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ setTemp(1, tempToUnbox);
+ }
+
+ static const size_t Value = 0;
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+ MTest* mir() const {
+ return mir_->toTest();
+ }
+ MCompare* cmpMir() const {
+ return cmpMir_;
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const LDefinition* tempToUnbox() {
+ return getTemp(1);
+ }
+};
+
+class LIsNullOrLikeUndefinedAndBranchT : public LControlInstructionHelper<2, 1, 1>
+{
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(IsNullOrLikeUndefinedAndBranchT)
+
+ LIsNullOrLikeUndefinedAndBranchT(MCompare* cmpMir, const LAllocation& input,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse,
+ const LDefinition& temp)
+ : cmpMir_(cmpMir)
+ {
+ setOperand(0, input);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setTemp(0, temp);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+ MTest* mir() const {
+ return mir_->toTest();
+ }
+ MCompare* cmpMir() const {
+ return cmpMir_;
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Not operation on an integer.
+class LNotI : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(NotI)
+
+ explicit LNotI(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+// Not operation on an int64.
+class LNotI64 : public LInstructionHelper<1, INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(NotI64)
+
+ explicit LNotI64(const LInt64Allocation& input) {
+ setInt64Operand(0, input);
+ }
+};
+
+// Not operation on a double.
+class LNotD : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(NotD)
+
+ explicit LNotD(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ MNot* mir() {
+ return mir_->toNot();
+ }
+};
+
+// Not operation on a float32.
+class LNotF : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(NotF)
+
+ explicit LNotF(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ MNot* mir() {
+ return mir_->toNot();
+ }
+};
+
+// Boolean complement operation on an object.
+class LNotO : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(NotO)
+
+ explicit LNotO(const LAllocation& input)
+ {
+ setOperand(0, input);
+ }
+
+ MNot* mir() {
+ return mir_->toNot();
+ }
+};
+
+// Boolean complement operation on a value.
+class LNotV : public LInstructionHelper<1, BOX_PIECES, 3>
+{
+ public:
+ LIR_HEADER(NotV)
+
+ static const size_t Input = 0;
+ LNotV(const LBoxAllocation& input, const LDefinition& temp0, const LDefinition& temp1,
+ const LDefinition& temp2)
+ {
+ setBoxOperand(Input, input);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+ }
+
+ const LDefinition* tempFloat() {
+ return getTemp(0);
+ }
+
+ const LDefinition* temp1() {
+ return getTemp(1);
+ }
+
+ const LDefinition* temp2() {
+ return getTemp(2);
+ }
+
+ MNot* mir() {
+ return mir_->toNot();
+ }
+};
+
+// Bitwise not operation, takes a 32-bit integer as input and returning
+// a 32-bit integer result as an output.
+class LBitNotI : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(BitNotI)
+};
+
+// Call a VM function to perform a BITNOT operation.
+class LBitNotV : public LCallInstructionHelper<1, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(BitNotV)
+
+ static const size_t Input = 0;
+
+ explicit LBitNotV(const LBoxAllocation& input) {
+ setBoxOperand(Input, input);
+ }
+};
+
+// Binary bitwise operation, taking two 32-bit integers as inputs and returning
+// a 32-bit integer result as an output.
+class LBitOpI : public LInstructionHelper<1, 2, 0>
+{
+ JSOp op_;
+
+ public:
+ LIR_HEADER(BitOpI)
+
+ explicit LBitOpI(JSOp op)
+ : op_(op)
+ { }
+
+ const char* extraName() const {
+ if (bitop() == JSOP_URSH && mir_->toUrsh()->bailoutsDisabled())
+ return "ursh:BailoutsDisabled";
+ return CodeName[op_];
+ }
+
+ JSOp bitop() const {
+ return op_;
+ }
+};
+
+class LBitOpI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>
+{
+ JSOp op_;
+
+ public:
+ LIR_HEADER(BitOpI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ explicit LBitOpI64(JSOp op)
+ : op_(op)
+ { }
+
+ const char* extraName() const {
+ return CodeName[op_];
+ }
+
+ JSOp bitop() const {
+ return op_;
+ }
+};
+
+// Call a VM function to perform a bitwise operation.
+class LBitOpV : public LCallInstructionHelper<1, 2 * BOX_PIECES, 0>
+{
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(BitOpV)
+
+ LBitOpV(JSOp jsop, const LBoxAllocation& lhs, const LBoxAllocation& rhs)
+ : jsop_(jsop)
+ {
+ setBoxOperand(LhsInput, lhs);
+ setBoxOperand(RhsInput, rhs);
+ }
+
+ JSOp jsop() const {
+ return jsop_;
+ }
+
+ const char* extraName() const {
+ return CodeName[jsop_];
+ }
+
+ static const size_t LhsInput = 0;
+ static const size_t RhsInput = BOX_PIECES;
+};
+
+// Shift operation, taking two 32-bit integers as inputs and returning
+// a 32-bit integer result as an output.
+class LShiftI : public LBinaryMath<0>
+{
+ JSOp op_;
+
+ public:
+ LIR_HEADER(ShiftI)
+
+ explicit LShiftI(JSOp op)
+ : op_(op)
+ { }
+
+ JSOp bitop() {
+ return op_;
+ }
+
+ MInstruction* mir() {
+ return mir_->toInstruction();
+ }
+
+ const char* extraName() const {
+ return CodeName[op_];
+ }
+};
+
+class LShiftI64 : public LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 0>
+{
+ JSOp op_;
+
+ public:
+ LIR_HEADER(ShiftI64)
+
+ explicit LShiftI64(JSOp op)
+ : op_(op)
+ { }
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ JSOp bitop() {
+ return op_;
+ }
+
+ MInstruction* mir() {
+ return mir_->toInstruction();
+ }
+
+ const char* extraName() const {
+ return CodeName[op_];
+ }
+};
+
+// Sign extension
+class LSignExtend : public LInstructionHelper<1, 1, 0>
+{
+ MSignExtend::Mode mode_;
+
+ public:
+ LIR_HEADER(SignExtend);
+ explicit LSignExtend(const LAllocation& num, MSignExtend::Mode mode)
+ : mode_(mode)
+ {
+ setOperand(0, num);
+ }
+
+ MSignExtend::Mode mode() { return mode_; }
+};
+
+class LUrshD : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(UrshD)
+
+ LUrshD(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Returns from the function being compiled (not used in inlined frames). The
+// input must be a box.
+class LReturn : public LInstructionHelper<0, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(Return)
+};
+
+class LThrow : public LCallInstructionHelper<0, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(Throw)
+
+ static const size_t Value = 0;
+
+ explicit LThrow(const LBoxAllocation& value) {
+ setBoxOperand(Value, value);
+ }
+};
+
+class LMinMaxBase : public LInstructionHelper<1, 2, 0>
+{
+ protected:
+ LMinMaxBase(const LAllocation& first, const LAllocation& second)
+ {
+ setOperand(0, first);
+ setOperand(1, second);
+ }
+
+ public:
+ const LAllocation* first() {
+ return this->getOperand(0);
+ }
+ const LAllocation* second() {
+ return this->getOperand(1);
+ }
+ const LDefinition* output() {
+ return this->getDef(0);
+ }
+ MMinMax* mir() const {
+ return mir_->toMinMax();
+ }
+ const char* extraName() const {
+ return mir()->isMax() ? "Max" : "Min";
+ }
+};
+
+class LMinMaxI : public LMinMaxBase
+{
+ public:
+ LIR_HEADER(MinMaxI)
+ LMinMaxI(const LAllocation& first, const LAllocation& second) : LMinMaxBase(first, second)
+ {}
+};
+
+class LMinMaxD : public LMinMaxBase
+{
+ public:
+ LIR_HEADER(MinMaxD)
+ LMinMaxD(const LAllocation& first, const LAllocation& second) : LMinMaxBase(first, second)
+ {}
+};
+
+class LMinMaxF : public LMinMaxBase
+{
+ public:
+ LIR_HEADER(MinMaxF)
+ LMinMaxF(const LAllocation& first, const LAllocation& second) : LMinMaxBase(first, second)
+ {}
+};
+
+// Negative of an integer
+class LNegI : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(NegI);
+ explicit LNegI(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Negative of a double.
+class LNegD : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(NegD)
+ explicit LNegD(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Negative of a float32.
+class LNegF : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(NegF)
+ explicit LNegF(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Absolute value of an integer.
+class LAbsI : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(AbsI)
+ explicit LAbsI(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Absolute value of a double.
+class LAbsD : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(AbsD)
+ explicit LAbsD(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Absolute value of a float32.
+class LAbsF : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(AbsF)
+ explicit LAbsF(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Copysign for doubles.
+class LCopySignD : public LInstructionHelper<1, 2, 2>
+{
+ public:
+ LIR_HEADER(CopySignD)
+ explicit LCopySignD() {}
+};
+
+// Copysign for float32.
+class LCopySignF : public LInstructionHelper<1, 2, 2>
+{
+ public:
+ LIR_HEADER(CopySignF)
+ explicit LCopySignF() {}
+};
+
+// Count leading zeroes on an int32.
+class LClzI : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(ClzI)
+ explicit LClzI(const LAllocation& num) {
+ setOperand(0, num);
+ }
+
+ MClz* mir() const {
+ return mir_->toClz();
+ }
+};
+
+// Count leading zeroes on an int64.
+class LClzI64 : public LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(ClzI64)
+ explicit LClzI64(const LInt64Allocation& num) {
+ setInt64Operand(0, num);
+ }
+
+ MClz* mir() const {
+ return mir_->toClz();
+ }
+};
+
+// Count trailing zeroes on an int32.
+class LCtzI : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(CtzI)
+ explicit LCtzI(const LAllocation& num) {
+ setOperand(0, num);
+ }
+
+ MCtz* mir() const {
+ return mir_->toCtz();
+ }
+};
+
+// Count trailing zeroes on an int64.
+class LCtzI64 : public LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CtzI64)
+ explicit LCtzI64(const LInt64Allocation& num) {
+ setInt64Operand(0, num);
+ }
+
+ MCtz* mir() const {
+ return mir_->toCtz();
+ }
+};
+
+// Count population on an int32.
+class LPopcntI : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(PopcntI)
+ explicit LPopcntI(const LAllocation& num, const LDefinition& temp) {
+ setOperand(0, num);
+ setTemp(0, temp);
+ }
+
+ MPopcnt* mir() const {
+ return mir_->toPopcnt();
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Count population on an int64.
+class LPopcntI64 : public LInstructionHelper<INT64_PIECES, INT64_PIECES, 1>
+{
+ public:
+ LIR_HEADER(PopcntI64)
+ explicit LPopcntI64(const LInt64Allocation& num, const LDefinition& temp) {
+ setInt64Operand(0, num);
+ setTemp(0, temp);
+ }
+
+ MPopcnt* mir() const {
+ return mir_->toPopcnt();
+ }
+};
+
+// Square root of a double.
+class LSqrtD : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(SqrtD)
+ explicit LSqrtD(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Square root of a float32.
+class LSqrtF : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(SqrtF)
+ explicit LSqrtF(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+class LAtan2D : public LCallInstructionHelper<1, 2, 1>
+{
+ public:
+ LIR_HEADER(Atan2D)
+ LAtan2D(const LAllocation& y, const LAllocation& x, const LDefinition& temp) {
+ setOperand(0, y);
+ setOperand(1, x);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* y() {
+ return getOperand(0);
+ }
+
+ const LAllocation* x() {
+ return getOperand(1);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ const LDefinition* output() {
+ return getDef(0);
+ }
+};
+
+class LHypot : public LCallInstructionHelper<1, 4, 1>
+{
+ uint32_t numOperands_;
+ public:
+ LIR_HEADER(Hypot)
+ LHypot(const LAllocation& x, const LAllocation& y, const LDefinition& temp)
+ : numOperands_(2)
+ {
+ setOperand(0, x);
+ setOperand(1, y);
+ setTemp(0, temp);
+ }
+
+ LHypot(const LAllocation& x, const LAllocation& y, const LAllocation& z, const LDefinition& temp)
+ : numOperands_(3)
+ {
+ setOperand(0, x);
+ setOperand(1, y);
+ setOperand(2, z);
+ setTemp(0, temp);
+ }
+
+ LHypot(const LAllocation& x, const LAllocation& y, const LAllocation& z, const LAllocation& w, const LDefinition& temp)
+ : numOperands_(4)
+ {
+ setOperand(0, x);
+ setOperand(1, y);
+ setOperand(2, z);
+ setOperand(3, w);
+ setTemp(0, temp);
+ }
+
+ uint32_t numArgs() const { return numOperands_; }
+
+ const LAllocation* x() {
+ return getOperand(0);
+ }
+
+ const LAllocation* y() {
+ return getOperand(1);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ const LDefinition* output() {
+ return getDef(0);
+ }
+};
+
+// Double raised to an integer power.
+class LPowI : public LCallInstructionHelper<1, 2, 1>
+{
+ public:
+ LIR_HEADER(PowI)
+ LPowI(const LAllocation& value, const LAllocation& power, const LDefinition& temp) {
+ setOperand(0, value);
+ setOperand(1, power);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* value() {
+ return getOperand(0);
+ }
+ const LAllocation* power() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Double raised to a double power.
+class LPowD : public LCallInstructionHelper<1, 2, 1>
+{
+ public:
+ LIR_HEADER(PowD)
+ LPowD(const LAllocation& value, const LAllocation& power, const LDefinition& temp) {
+ setOperand(0, value);
+ setOperand(1, power);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* value() {
+ return getOperand(0);
+ }
+ const LAllocation* power() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+class LMathFunctionD : public LCallInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(MathFunctionD)
+ LMathFunctionD(const LAllocation& input, const LDefinition& temp) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MMathFunction* mir() const {
+ return mir_->toMathFunction();
+ }
+ const char* extraName() const {
+ return MMathFunction::FunctionName(mir()->function());
+ }
+};
+
+class LMathFunctionF : public LCallInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(MathFunctionF)
+ LMathFunctionF(const LAllocation& input, const LDefinition& temp) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MMathFunction* mir() const {
+ return mir_->toMathFunction();
+ }
+ const char* extraName() const {
+ return MMathFunction::FunctionName(mir()->function());
+ }
+};
+
+// Adds two integers, returning an integer value.
+class LAddI : public LBinaryMath<0>
+{
+ bool recoversInput_;
+
+ public:
+ LIR_HEADER(AddI)
+
+ LAddI()
+ : recoversInput_(false)
+ { }
+
+ const char* extraName() const {
+ return snapshot() ? "OverflowCheck" : nullptr;
+ }
+
+ virtual bool recoversInput() const {
+ return recoversInput_;
+ }
+ void setRecoversInput() {
+ recoversInput_ = true;
+ }
+
+ MAdd* mir() const {
+ return mir_->toAdd();
+ }
+};
+
+class LAddI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(AddI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+};
+
+// Subtracts two integers, returning an integer value.
+class LSubI : public LBinaryMath<0>
+{
+ bool recoversInput_;
+
+ public:
+ LIR_HEADER(SubI)
+
+ LSubI()
+ : recoversInput_(false)
+ { }
+
+ const char* extraName() const {
+ return snapshot() ? "OverflowCheck" : nullptr;
+ }
+
+ virtual bool recoversInput() const {
+ return recoversInput_;
+ }
+ void setRecoversInput() {
+ recoversInput_ = true;
+ }
+ MSub* mir() const {
+ return mir_->toSub();
+ }
+};
+
+class LSubI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(SubI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+};
+
+class LMulI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 1>
+{
+ public:
+ LIR_HEADER(MulI64)
+
+ explicit LMulI64()
+ {
+ setTemp(0, LDefinition());
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+};
+
+// Performs an add, sub, mul, or div on two double values.
+class LMathD : public LBinaryMath<0>
+{
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(MathD)
+
+ explicit LMathD(JSOp jsop)
+ : jsop_(jsop)
+ { }
+
+ JSOp jsop() const {
+ return jsop_;
+ }
+
+ const char* extraName() const {
+ return CodeName[jsop_];
+ }
+};
+
+// Performs an add, sub, mul, or div on two double values.
+class LMathF: public LBinaryMath<0>
+{
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(MathF)
+
+ explicit LMathF(JSOp jsop)
+ : jsop_(jsop)
+ { }
+
+ JSOp jsop() const {
+ return jsop_;
+ }
+
+ const char* extraName() const {
+ return CodeName[jsop_];
+ }
+};
+
+class LModD : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(ModD)
+
+ LModD(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ bool isCall() const {
+ return true;
+ }
+};
+
+// Call a VM function to perform a binary operation.
+class LBinaryV : public LCallInstructionHelper<BOX_PIECES, 2 * BOX_PIECES, 0>
+{
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(BinaryV)
+
+ LBinaryV(JSOp jsop, const LBoxAllocation& lhs, const LBoxAllocation& rhs)
+ : jsop_(jsop)
+ {
+ setBoxOperand(LhsInput, lhs);
+ setBoxOperand(RhsInput, rhs);
+ }
+
+ JSOp jsop() const {
+ return jsop_;
+ }
+
+ const char* extraName() const {
+ return CodeName[jsop_];
+ }
+
+ static const size_t LhsInput = 0;
+ static const size_t RhsInput = BOX_PIECES;
+};
+
+// Adds two string, returning a string.
+class LConcat : public LInstructionHelper<1, 2, 5>
+{
+ public:
+ LIR_HEADER(Concat)
+
+ LConcat(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp1,
+ const LDefinition& temp2, const LDefinition& temp3, const LDefinition& temp4,
+ const LDefinition& temp5)
+ {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ setTemp(3, temp4);
+ setTemp(4, temp5);
+ }
+
+ const LAllocation* lhs() {
+ return this->getOperand(0);
+ }
+ const LAllocation* rhs() {
+ return this->getOperand(1);
+ }
+ const LDefinition* temp1() {
+ return this->getTemp(0);
+ }
+ const LDefinition* temp2() {
+ return this->getTemp(1);
+ }
+ const LDefinition* temp3() {
+ return this->getTemp(2);
+ }
+ const LDefinition* temp4() {
+ return this->getTemp(3);
+ }
+ const LDefinition* temp5() {
+ return this->getTemp(4);
+ }
+};
+
+// Get uint16 character code from a string.
+class LCharCodeAt : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(CharCodeAt)
+
+ LCharCodeAt(const LAllocation& str, const LAllocation& index) {
+ setOperand(0, str);
+ setOperand(1, index);
+ }
+
+ const LAllocation* str() {
+ return this->getOperand(0);
+ }
+ const LAllocation* index() {
+ return this->getOperand(1);
+ }
+};
+
+// Convert uint16 character code to a string.
+class LFromCharCode : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(FromCharCode)
+
+ explicit LFromCharCode(const LAllocation& code) {
+ setOperand(0, code);
+ }
+
+ const LAllocation* code() {
+ return this->getOperand(0);
+ }
+};
+
+// Convert uint32 code point to a string.
+class LFromCodePoint : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(FromCodePoint)
+
+ explicit LFromCodePoint(const LAllocation& codePoint) {
+ setOperand(0, codePoint);
+ }
+
+ const LAllocation* codePoint() {
+ return this->getOperand(0);
+ }
+};
+
+// Calculates sincos(x) and returns two values (sin/cos).
+class LSinCos : public LCallInstructionHelper<2, 1, 2>
+{
+ public:
+ LIR_HEADER(SinCos)
+
+ LSinCos(const LAllocation &input, const LDefinition &temp, const LDefinition &temp2)
+ {
+ setOperand(0, input);
+ setTemp(0, temp);
+ setTemp(1, temp2);
+ }
+ const LAllocation *input() {
+ return getOperand(0);
+ }
+ const LDefinition *outputSin() {
+ return getDef(0);
+ }
+ const LDefinition *outputCos() {
+ return getDef(1);
+ }
+ const LDefinition *temp() {
+ return getTemp(0);
+ }
+ const LDefinition *temp2() {
+ return getTemp(1);
+ }
+ const MSinCos *mir() const {
+ return mir_->toSinCos();
+ }
+};
+
+class LStringSplit : public LCallInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(StringSplit)
+
+ LStringSplit(const LAllocation& string, const LAllocation& separator) {
+ setOperand(0, string);
+ setOperand(1, separator);
+ }
+ const LAllocation* string() {
+ return getOperand(0);
+ }
+ const LAllocation* separator() {
+ return getOperand(1);
+ }
+ const MStringSplit* mir() const {
+ return mir_->toStringSplit();
+ }
+};
+
+class LSubstr : public LInstructionHelper<1, 3, 3>
+{
+ public:
+ LIR_HEADER(Substr)
+
+ LSubstr(const LAllocation& string, const LAllocation& begin, const LAllocation& length,
+ const LDefinition& temp, const LDefinition& temp2, const LDefinition& temp3)
+ {
+ setOperand(0, string);
+ setOperand(1, begin);
+ setOperand(2, length);
+ setTemp(0, temp);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+ const LAllocation* string() {
+ return getOperand(0);
+ }
+ const LAllocation* begin() {
+ return getOperand(1);
+ }
+ const LAllocation* length() {
+ return getOperand(2);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const LDefinition* temp2() {
+ return getTemp(1);
+ }
+ const LDefinition* temp3() {
+ return getTemp(2);
+ }
+ const MStringSplit* mir() const {
+ return mir_->toStringSplit();
+ }
+};
+
+// Convert a 32-bit integer to a double.
+class LInt32ToDouble : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(Int32ToDouble)
+
+ explicit LInt32ToDouble(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit float to a double.
+class LFloat32ToDouble : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(Float32ToDouble)
+
+ explicit LFloat32ToDouble(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a double to a 32-bit float.
+class LDoubleToFloat32 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(DoubleToFloat32)
+
+ explicit LDoubleToFloat32(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit integer to a float32.
+class LInt32ToFloat32 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(Int32ToFloat32)
+
+ explicit LInt32ToFloat32(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a value to a double.
+class LValueToDouble : public LInstructionHelper<1, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(ValueToDouble)
+ static const size_t Input = 0;
+
+ explicit LValueToDouble(const LBoxAllocation& input) {
+ setBoxOperand(Input, input);
+ }
+
+ MToDouble* mir() {
+ return mir_->toToDouble();
+ }
+};
+
+// Convert a value to a float32.
+class LValueToFloat32 : public LInstructionHelper<1, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(ValueToFloat32)
+ static const size_t Input = 0;
+
+ explicit LValueToFloat32(const LBoxAllocation& input) {
+ setBoxOperand(Input, input);
+ }
+
+ MToFloat32* mir() {
+ return mir_->toToFloat32();
+ }
+};
+
+// Convert a value to an int32.
+// Input: components of a Value
+// Output: 32-bit integer
+// Bailout: undefined, string, object, or non-int32 double
+// Temps: one float register, one GP register
+//
+// This instruction requires a temporary float register.
+class LValueToInt32 : public LInstructionHelper<1, BOX_PIECES, 2>
+{
+ public:
+ enum Mode {
+ NORMAL,
+ TRUNCATE
+ };
+
+ private:
+ Mode mode_;
+
+ public:
+ LIR_HEADER(ValueToInt32)
+
+ LValueToInt32(const LBoxAllocation& input, const LDefinition& temp0, const LDefinition& temp1,
+ Mode mode)
+ : mode_(mode)
+ {
+ setBoxOperand(Input, input);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ const char* extraName() const {
+ return mode() == NORMAL ? "Normal" : "Truncate";
+ }
+
+ static const size_t Input = 0;
+
+ Mode mode() const {
+ return mode_;
+ }
+ const LDefinition* tempFloat() {
+ return getTemp(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(1);
+ }
+ MToInt32* mirNormal() const {
+ MOZ_ASSERT(mode_ == NORMAL);
+ return mir_->toToInt32();
+ }
+ MTruncateToInt32* mirTruncate() const {
+ MOZ_ASSERT(mode_ == TRUNCATE);
+ return mir_->toTruncateToInt32();
+ }
+ MInstruction* mir() const {
+ return mir_->toInstruction();
+ }
+};
+
+// Convert a double to an int32.
+// Input: floating-point register
+// Output: 32-bit integer
+// Bailout: if the double cannot be converted to an integer.
+class LDoubleToInt32 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(DoubleToInt32)
+
+ explicit LDoubleToInt32(const LAllocation& in) {
+ setOperand(0, in);
+ }
+
+ MToInt32* mir() const {
+ return mir_->toToInt32();
+ }
+};
+
+// Convert a float32 to an int32.
+// Input: floating-point register
+// Output: 32-bit integer
+// Bailout: if the float32 cannot be converted to an integer.
+class LFloat32ToInt32 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(Float32ToInt32)
+
+ explicit LFloat32ToInt32(const LAllocation& in) {
+ setOperand(0, in);
+ }
+
+ MToInt32* mir() const {
+ return mir_->toToInt32();
+ }
+};
+
+// Convert a double to a truncated int32.
+// Input: floating-point register
+// Output: 32-bit integer
+class LTruncateDToInt32 : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(TruncateDToInt32)
+
+ LTruncateDToInt32(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* tempFloat() {
+ return getTemp(0);
+ }
+
+ MTruncateToInt32* mir() const {
+ return mir_->toTruncateToInt32();
+ }
+};
+
+// Convert a float32 to a truncated int32.
+// Input: floating-point register
+// Output: 32-bit integer
+class LTruncateFToInt32 : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(TruncateFToInt32)
+
+ LTruncateFToInt32(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* tempFloat() {
+ return getTemp(0);
+ }
+
+ MTruncateToInt32* mir() const {
+ return mir_->toTruncateToInt32();
+ }
+};
+
+class LWasmTruncateToInt32 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmTruncateToInt32)
+
+ explicit LWasmTruncateToInt32(const LAllocation& in) {
+ setOperand(0, in);
+ }
+
+ MWasmTruncateToInt32* mir() const {
+ return mir_->toWasmTruncateToInt32();
+ }
+};
+
+class LWrapInt64ToInt32 : public LInstructionHelper<1, INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(WrapInt64ToInt32)
+
+ static const size_t Input = 0;
+
+ explicit LWrapInt64ToInt32(const LInt64Allocation& input) {
+ setInt64Operand(Input, input);
+ }
+
+ const MWrapInt64ToInt32* mir() {
+ return mir_->toWrapInt64ToInt32();
+ }
+};
+
+class LExtendInt32ToInt64 : public LInstructionHelper<INT64_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(ExtendInt32ToInt64)
+
+ explicit LExtendInt32ToInt64(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ const MExtendInt32ToInt64* mir() {
+ return mir_->toExtendInt32ToInt64();
+ }
+};
+
+// Convert a boolean value to a string.
+class LBooleanToString : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(BooleanToString)
+
+ explicit LBooleanToString(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ const MToString* mir() {
+ return mir_->toToString();
+ }
+};
+
+// Convert an integer hosted on one definition to a string with a function call.
+class LIntToString : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(IntToString)
+
+ explicit LIntToString(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ const MToString* mir() {
+ return mir_->toToString();
+ }
+};
+
+// Convert a double hosted on one definition to a string with a function call.
+class LDoubleToString : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(DoubleToString)
+
+ LDoubleToString(const LAllocation& input, const LDefinition& temp) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+ const MToString* mir() {
+ return mir_->toToString();
+ }
+};
+
+// Convert a primitive to a string with a function call.
+class LValueToString : public LInstructionHelper<1, BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(ValueToString)
+
+ LValueToString(const LBoxAllocation& input, const LDefinition& tempToUnbox)
+ {
+ setBoxOperand(Input, input);
+ setTemp(0, tempToUnbox);
+ }
+
+ static const size_t Input = 0;
+
+ const MToString* mir() {
+ return mir_->toToString();
+ }
+
+ const LDefinition* tempToUnbox() {
+ return getTemp(0);
+ }
+};
+
+// Convert a value to an object or null pointer.
+class LValueToObjectOrNull : public LInstructionHelper<1, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(ValueToObjectOrNull)
+
+ explicit LValueToObjectOrNull(const LBoxAllocation& input) {
+ setBoxOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+
+ const MToObjectOrNull* mir() {
+ return mir_->toToObjectOrNull();
+ }
+};
+
+class LInt32x4ToFloat32x4 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(Int32x4ToFloat32x4);
+ explicit LInt32x4ToFloat32x4(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+class LFloat32x4ToInt32x4 : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(Float32x4ToInt32x4);
+ explicit LFloat32x4ToInt32x4(const LAllocation& input, const LDefinition& temp) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const MSimdConvert* mir() const {
+ return mir_->toSimdConvert();
+ }
+};
+
+// Float32x4 to Uint32x4 needs one GPR temp and one FloatReg temp.
+class LFloat32x4ToUint32x4 : public LInstructionHelper<1, 1, 2>
+{
+ public:
+ LIR_HEADER(Float32x4ToUint32x4);
+ explicit LFloat32x4ToUint32x4(const LAllocation& input, const LDefinition& tempR,
+ const LDefinition& tempF)
+ {
+ setOperand(0, input);
+ setTemp(0, tempR);
+ setTemp(1, tempF);
+ }
+ const LDefinition* tempR() {
+ return getTemp(0);
+ }
+ const LDefinition* tempF() {
+ return getTemp(1);
+ }
+ const MSimdConvert* mir() const {
+ return mir_->toSimdConvert();
+ }
+};
+
+// Double raised to a half power.
+class LPowHalfD : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(PowHalfD);
+ explicit LPowHalfD(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+ const LDefinition* output() {
+ return getDef(0);
+ }
+ MPowHalf* mir() const {
+ return mir_->toPowHalf();
+ }
+};
+
+// No-op instruction that is used to hold the entry snapshot. This simplifies
+// register allocation as it doesn't need to sniff the snapshot out of the
+// LIRGraph.
+class LStart : public LInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(Start)
+};
+
+class LNaNToZero : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(NaNToZero)
+
+ explicit LNaNToZero(const LAllocation& input, const LDefinition& tempDouble) {
+ setOperand(0, input);
+ setTemp(0, tempDouble);
+ }
+
+ const MNaNToZero* mir() {
+ return mir_->toNaNToZero();
+ }
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+ const LDefinition* output() {
+ return getDef(0);
+ }
+ const LDefinition* tempDouble() {
+ return getTemp(0);
+ }
+};
+
+// Passed the BaselineFrame address in the OsrFrameReg by SideCannon().
+// Forwards this object to the LOsrValues for Value materialization.
+class LOsrEntry : public LInstructionHelper<1, 0, 1>
+{
+ protected:
+ Label label_;
+ uint32_t frameDepth_;
+
+ public:
+ LIR_HEADER(OsrEntry)
+
+ explicit LOsrEntry(const LDefinition& temp)
+ : frameDepth_(0)
+ {
+ setTemp(0, temp);
+ }
+
+ void setFrameDepth(uint32_t depth) {
+ frameDepth_ = depth;
+ }
+ uint32_t getFrameDepth() {
+ return frameDepth_;
+ }
+ Label* label() {
+ return &label_;
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Materialize a Value stored in an interpreter frame for OSR.
+class LOsrValue : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(OsrValue)
+
+ explicit LOsrValue(const LAllocation& entry)
+ {
+ setOperand(0, entry);
+ }
+
+ const MOsrValue* mir() {
+ return mir_->toOsrValue();
+ }
+};
+
+// Materialize a JSObject env chain stored in an interpreter frame for OSR.
+class LOsrEnvironmentChain : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(OsrEnvironmentChain)
+
+ explicit LOsrEnvironmentChain(const LAllocation& entry)
+ {
+ setOperand(0, entry);
+ }
+
+ const MOsrEnvironmentChain* mir() {
+ return mir_->toOsrEnvironmentChain();
+ }
+};
+
+// Materialize a JSObject env chain stored in an interpreter frame for OSR.
+class LOsrReturnValue : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(OsrReturnValue)
+
+ explicit LOsrReturnValue(const LAllocation& entry)
+ {
+ setOperand(0, entry);
+ }
+
+ const MOsrReturnValue* mir() {
+ return mir_->toOsrReturnValue();
+ }
+};
+
+// Materialize a JSObject ArgumentsObject stored in an interpreter frame for OSR.
+class LOsrArgumentsObject : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(OsrArgumentsObject)
+
+ explicit LOsrArgumentsObject(const LAllocation& entry)
+ {
+ setOperand(0, entry);
+ }
+
+ const MOsrArgumentsObject* mir() {
+ return mir_->toOsrArgumentsObject();
+ }
+};
+
+class LRegExp : public LCallInstructionHelper<1, 0, 0>
+{
+ public:
+ LIR_HEADER(RegExp)
+
+ const MRegExp* mir() const {
+ return mir_->toRegExp();
+ }
+};
+
+class LRegExpMatcher : public LCallInstructionHelper<BOX_PIECES, 3, 0>
+{
+ public:
+ LIR_HEADER(RegExpMatcher)
+
+ LRegExpMatcher(const LAllocation& regexp, const LAllocation& string,
+ const LAllocation& lastIndex)
+ {
+ setOperand(0, regexp);
+ setOperand(1, string);
+ setOperand(2, lastIndex);
+ }
+
+ const LAllocation* regexp() {
+ return getOperand(0);
+ }
+ const LAllocation* string() {
+ return getOperand(1);
+ }
+ const LAllocation* lastIndex() {
+ return getOperand(2);
+ }
+
+ const MRegExpMatcher* mir() const {
+ return mir_->toRegExpMatcher();
+ }
+};
+
+class LRegExpSearcher : public LCallInstructionHelper<1, 3, 0>
+{
+ public:
+ LIR_HEADER(RegExpSearcher)
+
+ LRegExpSearcher(const LAllocation& regexp, const LAllocation& string,
+ const LAllocation& lastIndex)
+ {
+ setOperand(0, regexp);
+ setOperand(1, string);
+ setOperand(2, lastIndex);
+ }
+
+ const LAllocation* regexp() {
+ return getOperand(0);
+ }
+ const LAllocation* string() {
+ return getOperand(1);
+ }
+ const LAllocation* lastIndex() {
+ return getOperand(2);
+ }
+
+ const MRegExpSearcher* mir() const {
+ return mir_->toRegExpSearcher();
+ }
+};
+
+class LRegExpTester : public LCallInstructionHelper<1, 3, 0>
+{
+ public:
+ LIR_HEADER(RegExpTester)
+
+ LRegExpTester(const LAllocation& regexp, const LAllocation& string,
+ const LAllocation& lastIndex)
+ {
+ setOperand(0, regexp);
+ setOperand(1, string);
+ setOperand(2, lastIndex);
+ }
+
+ const LAllocation* regexp() {
+ return getOperand(0);
+ }
+ const LAllocation* string() {
+ return getOperand(1);
+ }
+ const LAllocation* lastIndex() {
+ return getOperand(2);
+ }
+
+ const MRegExpTester* mir() const {
+ return mir_->toRegExpTester();
+ }
+};
+
+class LRegExpPrototypeOptimizable : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(RegExpPrototypeOptimizable);
+ explicit LRegExpPrototypeOptimizable(const LAllocation& object, const LDefinition& temp) {
+ setOperand(0, object);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MRegExpPrototypeOptimizable* mir() const {
+ return mir_->toRegExpPrototypeOptimizable();
+ }
+};
+
+class LRegExpInstanceOptimizable : public LInstructionHelper<1, 2, 1>
+{
+ public:
+ LIR_HEADER(RegExpInstanceOptimizable);
+ explicit LRegExpInstanceOptimizable(const LAllocation& object, const LAllocation& proto,
+ const LDefinition& temp) {
+ setOperand(0, object);
+ setOperand(1, proto);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* proto() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MRegExpInstanceOptimizable* mir() const {
+ return mir_->toRegExpInstanceOptimizable();
+ }
+};
+
+class LGetFirstDollarIndex : public LInstructionHelper<1, 1, 3>
+{
+ public:
+ LIR_HEADER(GetFirstDollarIndex);
+ explicit LGetFirstDollarIndex(const LAllocation& str, const LDefinition& temp0,
+ const LDefinition& temp1, const LDefinition& temp2) {
+ setOperand(0, str);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+ }
+
+ const LAllocation* str() {
+ return getOperand(0);
+ }
+ const LDefinition* temp0() {
+ return getTemp(0);
+ }
+ const LDefinition* temp1() {
+ return getTemp(1);
+ }
+ const LDefinition* temp2() {
+ return getTemp(2);
+ }
+};
+
+class LStringReplace: public LCallInstructionHelper<1, 3, 0>
+{
+ public:
+ LIR_HEADER(StringReplace);
+
+ LStringReplace(const LAllocation& string, const LAllocation& pattern,
+ const LAllocation& replacement)
+ {
+ setOperand(0, string);
+ setOperand(1, pattern);
+ setOperand(2, replacement);
+ }
+
+ const MStringReplace* mir() const {
+ return mir_->toStringReplace();
+ }
+
+ const LAllocation* string() {
+ return getOperand(0);
+ }
+ const LAllocation* pattern() {
+ return getOperand(1);
+ }
+ const LAllocation* replacement() {
+ return getOperand(2);
+ }
+};
+
+class LBinarySharedStub : public LCallInstructionHelper<BOX_PIECES, 2 * BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(BinarySharedStub)
+
+ LBinarySharedStub(const LBoxAllocation& lhs, const LBoxAllocation& rhs) {
+ setBoxOperand(LhsInput, lhs);
+ setBoxOperand(RhsInput, rhs);
+ }
+
+ const MBinarySharedStub* mir() const {
+ return mir_->toBinarySharedStub();
+ }
+
+ static const size_t LhsInput = 0;
+ static const size_t RhsInput = BOX_PIECES;
+};
+
+class LUnarySharedStub : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(UnarySharedStub)
+
+ explicit LUnarySharedStub(const LBoxAllocation& input) {
+ setBoxOperand(Input, input);
+ }
+
+ const MUnarySharedStub* mir() const {
+ return mir_->toUnarySharedStub();
+ }
+
+ static const size_t Input = 0;
+};
+
+class LNullarySharedStub : public LCallInstructionHelper<BOX_PIECES, 0, 0>
+{
+ public:
+ LIR_HEADER(NullarySharedStub)
+
+ const MNullarySharedStub* mir() const {
+ return mir_->toNullarySharedStub();
+ }
+};
+
+class LLambdaForSingleton : public LCallInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(LambdaForSingleton)
+
+ explicit LLambdaForSingleton(const LAllocation& envChain)
+ {
+ setOperand(0, envChain);
+ }
+ const LAllocation* environmentChain() {
+ return getOperand(0);
+ }
+ const MLambda* mir() const {
+ return mir_->toLambda();
+ }
+};
+
+class LLambda : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(Lambda)
+
+ LLambda(const LAllocation& envChain, const LDefinition& temp) {
+ setOperand(0, envChain);
+ setTemp(0, temp);
+ }
+ const LAllocation* environmentChain() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const MLambda* mir() const {
+ return mir_->toLambda();
+ }
+};
+
+class LLambdaArrow : public LInstructionHelper<1, 1 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(LambdaArrow)
+
+ static const size_t NewTargetValue = 1;
+
+ LLambdaArrow(const LAllocation& envChain, const LBoxAllocation& newTarget) {
+ setOperand(0, envChain);
+ setBoxOperand(NewTargetValue, newTarget);
+ }
+ const LAllocation* environmentChain() {
+ return getOperand(0);
+ }
+ const MLambdaArrow* mir() const {
+ return mir_->toLambdaArrow();
+ }
+};
+
+class LKeepAliveObject : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(KeepAliveObject)
+
+ explicit LKeepAliveObject(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+};
+
+// Load the "slots" member out of a JSObject.
+// Input: JSObject pointer
+// Output: slots pointer
+class LSlots : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(Slots)
+
+ explicit LSlots(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+};
+
+// Load the "elements" member out of a JSObject.
+// Input: JSObject pointer
+// Output: elements pointer
+class LElements : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(Elements)
+
+ explicit LElements(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+
+ const MElements* mir() const {
+ return mir_->toElements();
+ }
+};
+
+// If necessary, convert any int32 elements in a vector into doubles.
+class LConvertElementsToDoubles : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(ConvertElementsToDoubles)
+
+ explicit LConvertElementsToDoubles(const LAllocation& elements) {
+ setOperand(0, elements);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+};
+
+// If |elements| has the CONVERT_DOUBLE_ELEMENTS flag, convert int32 value to
+// double. Else return the original value.
+class LMaybeToDoubleElement : public LInstructionHelper<BOX_PIECES, 2, 1>
+{
+ public:
+ LIR_HEADER(MaybeToDoubleElement)
+
+ LMaybeToDoubleElement(const LAllocation& elements, const LAllocation& value,
+ const LDefinition& tempFloat) {
+ setOperand(0, elements);
+ setOperand(1, value);
+ setTemp(0, tempFloat);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+ const LDefinition* tempFloat() {
+ return getTemp(0);
+ }
+};
+
+// If necessary, copy the elements in an object so they may be written to.
+class LMaybeCopyElementsForWrite : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(MaybeCopyElementsForWrite)
+
+ explicit LMaybeCopyElementsForWrite(const LAllocation& obj, const LDefinition& temp) {
+ setOperand(0, obj);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ const MMaybeCopyElementsForWrite* mir() const {
+ return mir_->toMaybeCopyElementsForWrite();
+ }
+};
+
+// Load the initialized length from an elements header.
+class LInitializedLength : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(InitializedLength)
+
+ explicit LInitializedLength(const LAllocation& elements) {
+ setOperand(0, elements);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+};
+
+// Store to the initialized length in an elements header. Note the input is an
+// *index*, one less than the desired initialized length.
+class LSetInitializedLength : public LInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(SetInitializedLength)
+
+ LSetInitializedLength(const LAllocation& elements, const LAllocation& index) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+};
+
+class LUnboxedArrayLength : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(UnboxedArrayLength)
+
+ explicit LUnboxedArrayLength(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+};
+
+class LUnboxedArrayInitializedLength : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(UnboxedArrayInitializedLength)
+
+ explicit LUnboxedArrayInitializedLength(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+};
+
+class LIncrementUnboxedArrayInitializedLength : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(IncrementUnboxedArrayInitializedLength)
+
+ explicit LIncrementUnboxedArrayInitializedLength(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+};
+
+class LSetUnboxedArrayInitializedLength : public LInstructionHelper<0, 2, 1>
+{
+ public:
+ LIR_HEADER(SetUnboxedArrayInitializedLength)
+
+ explicit LSetUnboxedArrayInitializedLength(const LAllocation& object,
+ const LAllocation& length,
+ const LDefinition& temp) {
+ setOperand(0, object);
+ setOperand(1, length);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* length() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Load the length from an elements header.
+class LArrayLength : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(ArrayLength)
+
+ explicit LArrayLength(const LAllocation& elements) {
+ setOperand(0, elements);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+};
+
+// Store to the length in an elements header. Note the input is an *index*,
+// one less than the desired length.
+class LSetArrayLength : public LInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(SetArrayLength)
+
+ LSetArrayLength(const LAllocation& elements, const LAllocation& index) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+};
+
+class LGetNextEntryForIterator : public LInstructionHelper<1, 2, 3>
+{
+ public:
+ LIR_HEADER(GetNextEntryForIterator)
+
+ explicit LGetNextEntryForIterator(const LAllocation& iter, const LAllocation& result,
+ const LDefinition& temp0, const LDefinition& temp1,
+ const LDefinition& temp2)
+ {
+ setOperand(0, iter);
+ setOperand(1, result);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+ }
+
+ const MGetNextEntryForIterator* mir() const {
+ return mir_->toGetNextEntryForIterator();
+ }
+ const LAllocation* iter() {
+ return getOperand(0);
+ }
+ const LAllocation* result() {
+ return getOperand(1);
+ }
+ const LDefinition* temp0() {
+ return getTemp(0);
+ }
+ const LDefinition* temp1() {
+ return getTemp(1);
+ }
+ const LDefinition* temp2() {
+ return getTemp(2);
+ }
+};
+
+// Read the length of a typed array.
+class LTypedArrayLength : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(TypedArrayLength)
+
+ explicit LTypedArrayLength(const LAllocation& obj) {
+ setOperand(0, obj);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+};
+
+// Load a typed array's elements vector.
+class LTypedArrayElements : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(TypedArrayElements)
+
+ explicit LTypedArrayElements(const LAllocation& object) {
+ setOperand(0, object);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+};
+
+// Assign
+//
+// target[targetOffset..targetOffset + source.length] = source[0..source.length]
+//
+// where the source element range doesn't overlap the target element range in
+// memory.
+class LSetDisjointTypedElements : public LCallInstructionHelper<0, 3, 1>
+{
+ public:
+ LIR_HEADER(SetDisjointTypedElements)
+
+ explicit LSetDisjointTypedElements(const LAllocation& target, const LAllocation& targetOffset,
+ const LAllocation& source, const LDefinition& temp)
+ {
+ setOperand(0, target);
+ setOperand(1, targetOffset);
+ setOperand(2, source);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* target() {
+ return getOperand(0);
+ }
+
+ const LAllocation* targetOffset() {
+ return getOperand(1);
+ }
+
+ const LAllocation* source() {
+ return getOperand(2);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Load a typed object's descriptor.
+class LTypedObjectDescr : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(TypedObjectDescr)
+
+ explicit LTypedObjectDescr(const LAllocation& object) {
+ setOperand(0, object);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+};
+
+// Load a typed object's elements vector.
+class LTypedObjectElements : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(TypedObjectElements)
+
+ explicit LTypedObjectElements(const LAllocation& object) {
+ setOperand(0, object);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const MTypedObjectElements* mir() const {
+ return mir_->toTypedObjectElements();
+ }
+};
+
+// Load a typed array's elements vector.
+class LSetTypedObjectOffset : public LInstructionHelper<0, 2, 2>
+{
+ public:
+ LIR_HEADER(SetTypedObjectOffset)
+
+ LSetTypedObjectOffset(const LAllocation& object,
+ const LAllocation& offset,
+ const LDefinition& temp0,
+ const LDefinition& temp1)
+ {
+ setOperand(0, object);
+ setOperand(1, offset);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* offset() {
+ return getOperand(1);
+ }
+ const LDefinition* temp0() {
+ return getTemp(0);
+ }
+ const LDefinition* temp1() {
+ return getTemp(1);
+ }
+};
+
+// Bailout if index >= length.
+class LBoundsCheck : public LInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(BoundsCheck)
+
+ LBoundsCheck(const LAllocation& index, const LAllocation& length) {
+ setOperand(0, index);
+ setOperand(1, length);
+ }
+ const MBoundsCheck* mir() const {
+ return mir_->toBoundsCheck();
+ }
+ const LAllocation* index() {
+ return getOperand(0);
+ }
+ const LAllocation* length() {
+ return getOperand(1);
+ }
+};
+
+// Bailout if index + minimum < 0 or index + maximum >= length.
+class LBoundsCheckRange : public LInstructionHelper<0, 2, 1>
+{
+ public:
+ LIR_HEADER(BoundsCheckRange)
+
+ LBoundsCheckRange(const LAllocation& index, const LAllocation& length,
+ const LDefinition& temp)
+ {
+ setOperand(0, index);
+ setOperand(1, length);
+ setTemp(0, temp);
+ }
+ const MBoundsCheck* mir() const {
+ return mir_->toBoundsCheck();
+ }
+ const LAllocation* index() {
+ return getOperand(0);
+ }
+ const LAllocation* length() {
+ return getOperand(1);
+ }
+};
+
+// Bailout if index < minimum.
+class LBoundsCheckLower : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(BoundsCheckLower)
+
+ explicit LBoundsCheckLower(const LAllocation& index)
+ {
+ setOperand(0, index);
+ }
+ MBoundsCheckLower* mir() const {
+ return mir_->toBoundsCheckLower();
+ }
+ const LAllocation* index() {
+ return getOperand(0);
+ }
+};
+
+// Load a value from a dense array's elements vector. Bail out if it's the hole value.
+class LLoadElementV : public LInstructionHelper<BOX_PIECES, 2, 0>
+{
+ public:
+ LIR_HEADER(LoadElementV)
+
+ LLoadElementV(const LAllocation& elements, const LAllocation& index) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ }
+
+ const char* extraName() const {
+ return mir()->needsHoleCheck() ? "HoleCheck" : nullptr;
+ }
+
+ const MLoadElement* mir() const {
+ return mir_->toLoadElement();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+};
+
+class LInArray : public LInstructionHelper<1, 4, 0>
+{
+ public:
+ LIR_HEADER(InArray)
+
+ LInArray(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& initLength, const LAllocation& object)
+ {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, initLength);
+ setOperand(3, object);
+ }
+ const MInArray* mir() const {
+ return mir_->toInArray();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LAllocation* initLength() {
+ return getOperand(2);
+ }
+ const LAllocation* object() {
+ return getOperand(3);
+ }
+};
+
+
+// Load a value from an array's elements vector, loading |undefined| if we hit a hole.
+// Bail out if we get a negative index.
+class LLoadElementHole : public LInstructionHelper<BOX_PIECES, 3, 0>
+{
+ public:
+ LIR_HEADER(LoadElementHole)
+
+ LLoadElementHole(const LAllocation& elements, const LAllocation& index, const LAllocation& initLength) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, initLength);
+ }
+
+ const char* extraName() const {
+ return mir()->needsHoleCheck() ? "HoleCheck" : nullptr;
+ }
+
+ const MLoadElementHole* mir() const {
+ return mir_->toLoadElementHole();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LAllocation* initLength() {
+ return getOperand(2);
+ }
+};
+
+// Load a typed value from a dense array's elements vector. The array must be
+// known to be packed, so that we don't have to check for the hole value.
+// This instruction does not load the type tag and can directly load into a
+// FP register.
+class LLoadElementT : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(LoadElementT)
+
+ LLoadElementT(const LAllocation& elements, const LAllocation& index) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ }
+
+ const char* extraName() const {
+ return mir()->needsHoleCheck() ? "HoleCheck"
+ : (mir()->loadDoubles() ? "Doubles" : nullptr);
+ }
+
+ const MLoadElement* mir() const {
+ return mir_->toLoadElement();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+};
+
+class LLoadUnboxedPointerV : public LInstructionHelper<BOX_PIECES, 2, 0>
+{
+ public:
+ LIR_HEADER(LoadUnboxedPointerV)
+
+ LLoadUnboxedPointerV(const LAllocation& elements, const LAllocation& index) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ }
+
+ const MLoadUnboxedObjectOrNull* mir() const {
+ return mir_->toLoadUnboxedObjectOrNull();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+};
+
+class LLoadUnboxedPointerT : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(LoadUnboxedPointerT)
+
+ LLoadUnboxedPointerT(const LAllocation& elements, const LAllocation& index) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ }
+
+ MDefinition* mir() {
+ MOZ_ASSERT(mir_->isLoadUnboxedObjectOrNull() || mir_->isLoadUnboxedString());
+ return mir_;
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+};
+
+class LUnboxObjectOrNull : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(UnboxObjectOrNull);
+
+ explicit LUnboxObjectOrNull(const LAllocation& input)
+ {
+ setOperand(0, input);
+ }
+
+ MUnbox* mir() const {
+ return mir_->toUnbox();
+ }
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+};
+
+// Store a boxed value to a dense array's element vector.
+class LStoreElementV : public LInstructionHelper<0, 2 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(StoreElementV)
+
+ LStoreElementV(const LAllocation& elements, const LAllocation& index,
+ const LBoxAllocation& value) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setBoxOperand(Value, value);
+ }
+
+ const char* extraName() const {
+ return mir()->needsHoleCheck() ? "HoleCheck" : nullptr;
+ }
+
+ static const size_t Value = 2;
+
+ const MStoreElement* mir() const {
+ return mir_->toStoreElement();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+};
+
+// Store a typed value to a dense array's elements vector. Compared to
+// LStoreElementV, this instruction can store doubles and constants directly,
+// and does not store the type tag if the array is monomorphic and known to
+// be packed.
+class LStoreElementT : public LInstructionHelper<0, 3, 0>
+{
+ public:
+ LIR_HEADER(StoreElementT)
+
+ LStoreElementT(const LAllocation& elements, const LAllocation& index, const LAllocation& value) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ }
+
+ const char* extraName() const {
+ return mir()->needsHoleCheck() ? "HoleCheck" : nullptr;
+ }
+
+ const MStoreElement* mir() const {
+ return mir_->toStoreElement();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LAllocation* value() {
+ return getOperand(2);
+ }
+};
+
+// Like LStoreElementV, but supports indexes >= initialized length.
+class LStoreElementHoleV : public LInstructionHelper<0, 3 + BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(StoreElementHoleV)
+
+ LStoreElementHoleV(const LAllocation& object, const LAllocation& elements,
+ const LAllocation& index, const LBoxAllocation& value,
+ const LDefinition& temp) {
+ setOperand(0, object);
+ setOperand(1, elements);
+ setOperand(2, index);
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ }
+
+ static const size_t Value = 3;
+
+ const MStoreElementHole* mir() const {
+ return mir_->toStoreElementHole();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* elements() {
+ return getOperand(1);
+ }
+ const LAllocation* index() {
+ return getOperand(2);
+ }
+};
+
+// Like LStoreElementT, but supports indexes >= initialized length.
+class LStoreElementHoleT : public LInstructionHelper<0, 4, 1>
+{
+ public:
+ LIR_HEADER(StoreElementHoleT)
+
+ LStoreElementHoleT(const LAllocation& object, const LAllocation& elements,
+ const LAllocation& index, const LAllocation& value,
+ const LDefinition& temp) {
+ setOperand(0, object);
+ setOperand(1, elements);
+ setOperand(2, index);
+ setOperand(3, value);
+ setTemp(0, temp);
+ }
+
+ const MStoreElementHole* mir() const {
+ return mir_->toStoreElementHole();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* elements() {
+ return getOperand(1);
+ }
+ const LAllocation* index() {
+ return getOperand(2);
+ }
+ const LAllocation* value() {
+ return getOperand(3);
+ }
+};
+
+// Like LStoreElementV, but can just ignore assignment (for eg. frozen objects)
+class LFallibleStoreElementV : public LInstructionHelper<0, 3 + BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(FallibleStoreElementV)
+
+ LFallibleStoreElementV(const LAllocation& object, const LAllocation& elements,
+ const LAllocation& index, const LBoxAllocation& value,
+ const LDefinition& temp) {
+ setOperand(0, object);
+ setOperand(1, elements);
+ setOperand(2, index);
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ }
+
+ static const size_t Value = 3;
+
+ const MFallibleStoreElement* mir() const {
+ return mir_->toFallibleStoreElement();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* elements() {
+ return getOperand(1);
+ }
+ const LAllocation* index() {
+ return getOperand(2);
+ }
+};
+
+// Like LStoreElementT, but can just ignore assignment (for eg. frozen objects)
+class LFallibleStoreElementT : public LInstructionHelper<0, 4, 1>
+{
+ public:
+ LIR_HEADER(FallibleStoreElementT)
+
+ LFallibleStoreElementT(const LAllocation& object, const LAllocation& elements,
+ const LAllocation& index, const LAllocation& value,
+ const LDefinition& temp) {
+ setOperand(0, object);
+ setOperand(1, elements);
+ setOperand(2, index);
+ setOperand(3, value);
+ setTemp(0, temp);
+ }
+
+ const MFallibleStoreElement* mir() const {
+ return mir_->toFallibleStoreElement();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* elements() {
+ return getOperand(1);
+ }
+ const LAllocation* index() {
+ return getOperand(2);
+ }
+ const LAllocation* value() {
+ return getOperand(3);
+ }
+};
+
+class LStoreUnboxedPointer : public LInstructionHelper<0, 3, 0>
+{
+ public:
+ LIR_HEADER(StoreUnboxedPointer)
+
+ LStoreUnboxedPointer(LAllocation elements, LAllocation index, LAllocation value) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ }
+
+ MDefinition* mir() {
+ MOZ_ASSERT(mir_->isStoreUnboxedObjectOrNull() || mir_->isStoreUnboxedString());
+ return mir_;
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LAllocation* value() {
+ return getOperand(2);
+ }
+};
+
+// If necessary, convert an unboxed object in a particular group to its native
+// representation.
+class LConvertUnboxedObjectToNative : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(ConvertUnboxedObjectToNative)
+
+ explicit LConvertUnboxedObjectToNative(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ MConvertUnboxedObjectToNative* mir() {
+ return mir_->toConvertUnboxedObjectToNative();
+ }
+};
+
+class LArrayPopShiftV : public LInstructionHelper<BOX_PIECES, 1, 2>
+{
+ public:
+ LIR_HEADER(ArrayPopShiftV)
+
+ LArrayPopShiftV(const LAllocation& object, const LDefinition& temp0, const LDefinition& temp1) {
+ setOperand(0, object);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ const char* extraName() const {
+ return mir()->mode() == MArrayPopShift::Pop ? "Pop" : "Shift";
+ }
+
+ const MArrayPopShift* mir() const {
+ return mir_->toArrayPopShift();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp0() {
+ return getTemp(0);
+ }
+ const LDefinition* temp1() {
+ return getTemp(1);
+ }
+};
+
+class LArrayPopShiftT : public LInstructionHelper<1, 1, 2>
+{
+ public:
+ LIR_HEADER(ArrayPopShiftT)
+
+ LArrayPopShiftT(const LAllocation& object, const LDefinition& temp0, const LDefinition& temp1) {
+ setOperand(0, object);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ const char* extraName() const {
+ return mir()->mode() == MArrayPopShift::Pop ? "Pop" : "Shift";
+ }
+
+ const MArrayPopShift* mir() const {
+ return mir_->toArrayPopShift();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp0() {
+ return getTemp(0);
+ }
+ const LDefinition* temp1() {
+ return getTemp(1);
+ }
+};
+
+class LArrayPushV : public LInstructionHelper<1, 1 + BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(ArrayPushV)
+
+ LArrayPushV(const LAllocation& object, const LBoxAllocation& value, const LDefinition& temp) {
+ setOperand(0, object);
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ }
+
+ static const size_t Value = 1;
+
+ const MArrayPush* mir() const {
+ return mir_->toArrayPush();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+class LArrayPushT : public LInstructionHelper<1, 2, 1>
+{
+ public:
+ LIR_HEADER(ArrayPushT)
+
+ LArrayPushT(const LAllocation& object, const LAllocation& value, const LDefinition& temp) {
+ setOperand(0, object);
+ setOperand(1, value);
+ setTemp(0, temp);
+ }
+
+ const MArrayPush* mir() const {
+ return mir_->toArrayPush();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+class LArraySlice : public LCallInstructionHelper<1, 3, 2>
+{
+ public:
+ LIR_HEADER(ArraySlice)
+
+ LArraySlice(const LAllocation& obj, const LAllocation& begin, const LAllocation& end,
+ const LDefinition& temp1, const LDefinition& temp2) {
+ setOperand(0, obj);
+ setOperand(1, begin);
+ setOperand(2, end);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+ const MArraySlice* mir() const {
+ return mir_->toArraySlice();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* begin() {
+ return getOperand(1);
+ }
+ const LAllocation* end() {
+ return getOperand(2);
+ }
+ const LDefinition* temp1() {
+ return getTemp(0);
+ }
+ const LDefinition* temp2() {
+ return getTemp(1);
+ }
+};
+
+class LArrayJoin : public LCallInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(ArrayJoin)
+
+ LArrayJoin(const LAllocation& array, const LAllocation& sep) {
+ setOperand(0, array);
+ setOperand(1, sep);
+ }
+
+ const MArrayJoin* mir() const {
+ return mir_->toArrayJoin();
+ }
+ const LAllocation* array() {
+ return getOperand(0);
+ }
+ const LAllocation* separator() {
+ return getOperand(1);
+ }
+};
+
+class LLoadUnboxedScalar : public LInstructionHelper<1, 2, 1>
+{
+ public:
+ LIR_HEADER(LoadUnboxedScalar)
+
+ LLoadUnboxedScalar(const LAllocation& elements, const LAllocation& index,
+ const LDefinition& temp) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setTemp(0, temp);
+ }
+ const MLoadUnboxedScalar* mir() const {
+ return mir_->toLoadUnboxedScalar();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+class LLoadTypedArrayElementHole : public LInstructionHelper<BOX_PIECES, 2, 0>
+{
+ public:
+ LIR_HEADER(LoadTypedArrayElementHole)
+
+ LLoadTypedArrayElementHole(const LAllocation& object, const LAllocation& index) {
+ setOperand(0, object);
+ setOperand(1, index);
+ }
+ const MLoadTypedArrayElementHole* mir() const {
+ return mir_->toLoadTypedArrayElementHole();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+};
+
+class LLoadTypedArrayElementStatic : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(LoadTypedArrayElementStatic);
+ explicit LLoadTypedArrayElementStatic(const LAllocation& ptr) {
+ setOperand(0, ptr);
+ }
+ MLoadTypedArrayElementStatic* mir() const {
+ return mir_->toLoadTypedArrayElementStatic();
+ }
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+};
+
+class LStoreUnboxedScalar : public LInstructionHelper<0, 3, 0>
+{
+ public:
+ LIR_HEADER(StoreUnboxedScalar)
+
+ LStoreUnboxedScalar(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ }
+
+ const MStoreUnboxedScalar* mir() const {
+ return mir_->toStoreUnboxedScalar();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LAllocation* value() {
+ return getOperand(2);
+ }
+};
+
+class LStoreTypedArrayElementHole : public LInstructionHelper<0, 4, 0>
+{
+ public:
+ LIR_HEADER(StoreTypedArrayElementHole)
+
+ LStoreTypedArrayElementHole(const LAllocation& elements, const LAllocation& length,
+ const LAllocation& index, const LAllocation& value)
+ {
+ setOperand(0, elements);
+ setOperand(1, length);
+ setOperand(2, index);
+ setOperand(3, value);
+ }
+
+ const MStoreTypedArrayElementHole* mir() const {
+ return mir_->toStoreTypedArrayElementHole();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* length() {
+ return getOperand(1);
+ }
+ const LAllocation* index() {
+ return getOperand(2);
+ }
+ const LAllocation* value() {
+ return getOperand(3);
+ }
+};
+
+class LStoreTypedArrayElementStatic : public LInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(StoreTypedArrayElementStatic);
+ LStoreTypedArrayElementStatic(const LAllocation& ptr, const LAllocation& value) {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ }
+ MStoreTypedArrayElementStatic* mir() const {
+ return mir_->toStoreTypedArrayElementStatic();
+ }
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+};
+
+class LAtomicIsLockFree : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(AtomicIsLockFree)
+
+ explicit LAtomicIsLockFree(const LAllocation& value) {
+ setOperand(0, value);
+ }
+ const LAllocation* value() {
+ return getOperand(0);
+ }
+};
+
+class LCompareExchangeTypedArrayElement : public LInstructionHelper<1, 4, 4>
+{
+ public:
+ LIR_HEADER(CompareExchangeTypedArrayElement)
+
+ LCompareExchangeTypedArrayElement(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& oldval, const LAllocation& newval,
+ const LDefinition& temp)
+ {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, oldval);
+ setOperand(3, newval);
+ setTemp(0, temp);
+ }
+ LCompareExchangeTypedArrayElement(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& oldval, const LAllocation& newval,
+ const LDefinition& temp, const LDefinition& valueTemp,
+ const LDefinition& offsetTemp, const LDefinition& maskTemp)
+ {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, oldval);
+ setOperand(3, newval);
+ setTemp(0, temp);
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LAllocation* oldval() {
+ return getOperand(2);
+ }
+ const LAllocation* newval() {
+ return getOperand(3);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() {
+ return getTemp(1);
+ }
+ const LDefinition* offsetTemp() {
+ return getTemp(2);
+ }
+ const LDefinition* maskTemp() {
+ return getTemp(3);
+ }
+
+ const MCompareExchangeTypedArrayElement* mir() const {
+ return mir_->toCompareExchangeTypedArrayElement();
+ }
+};
+
+class LAtomicExchangeTypedArrayElement : public LInstructionHelper<1, 3, 4>
+{
+ public:
+ LIR_HEADER(AtomicExchangeTypedArrayElement)
+
+ LAtomicExchangeTypedArrayElement(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value, const LDefinition& temp)
+ {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, temp);
+ }
+ LAtomicExchangeTypedArrayElement(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value, const LDefinition& temp,
+ const LDefinition& valueTemp, const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, temp);
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LAllocation* value() {
+ return getOperand(2);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() {
+ return getTemp(1);
+ }
+ const LDefinition* offsetTemp() {
+ return getTemp(2);
+ }
+ const LDefinition* maskTemp() {
+ return getTemp(3);
+ }
+
+ const MAtomicExchangeTypedArrayElement* mir() const {
+ return mir_->toAtomicExchangeTypedArrayElement();
+ }
+};
+
+class LAtomicTypedArrayElementBinop : public LInstructionHelper<1, 3, 5>
+{
+ public:
+ LIR_HEADER(AtomicTypedArrayElementBinop)
+
+ static const int32_t valueOp = 2;
+
+ LAtomicTypedArrayElementBinop(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value, const LDefinition& temp1,
+ const LDefinition& temp2)
+ {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+ LAtomicTypedArrayElementBinop(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value, const LDefinition& temp1,
+ const LDefinition& temp2, const LDefinition& valueTemp,
+ const LDefinition& offsetTemp, const LDefinition& maskTemp)
+ {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, valueTemp);
+ setTemp(3, offsetTemp);
+ setTemp(4, maskTemp);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LAllocation* value() {
+ MOZ_ASSERT(valueOp == 2);
+ return getOperand(2);
+ }
+ const LDefinition* temp1() {
+ return getTemp(0);
+ }
+ const LDefinition* temp2() {
+ return getTemp(1);
+ }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() {
+ return getTemp(2);
+ }
+ const LDefinition* offsetTemp() {
+ return getTemp(3);
+ }
+ const LDefinition* maskTemp() {
+ return getTemp(4);
+ }
+
+ const MAtomicTypedArrayElementBinop* mir() const {
+ return mir_->toAtomicTypedArrayElementBinop();
+ }
+};
+
+// Atomic binary operation where the result is discarded.
+class LAtomicTypedArrayElementBinopForEffect : public LInstructionHelper<0, 3, 4>
+{
+ public:
+ LIR_HEADER(AtomicTypedArrayElementBinopForEffect)
+
+ LAtomicTypedArrayElementBinopForEffect(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value,
+ const LDefinition& flagTemp = LDefinition::BogusTemp())
+ {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, flagTemp);
+ }
+ LAtomicTypedArrayElementBinopForEffect(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value, const LDefinition& flagTemp,
+ const LDefinition& valueTemp, const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, flagTemp);
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LAllocation* value() {
+ return getOperand(2);
+ }
+
+ // Temp that may be used on LL/SC platforms for the flag result of the store.
+ const LDefinition* flagTemp() {
+ return getTemp(0);
+ }
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() {
+ return getTemp(1);
+ }
+ const LDefinition* offsetTemp() {
+ return getTemp(2);
+ }
+ const LDefinition* maskTemp() {
+ return getTemp(3);
+ }
+
+ const MAtomicTypedArrayElementBinop* mir() const {
+ return mir_->toAtomicTypedArrayElementBinop();
+ }
+};
+
+class LEffectiveAddress : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(EffectiveAddress);
+
+ LEffectiveAddress(const LAllocation& base, const LAllocation& index) {
+ setOperand(0, base);
+ setOperand(1, index);
+ }
+ const MEffectiveAddress* mir() const {
+ return mir_->toEffectiveAddress();
+ }
+ const LAllocation* base() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+};
+
+class LClampIToUint8 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(ClampIToUint8)
+
+ explicit LClampIToUint8(const LAllocation& in) {
+ setOperand(0, in);
+ }
+};
+
+class LClampDToUint8 : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(ClampDToUint8)
+
+ LClampDToUint8(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+};
+
+class LClampVToUint8 : public LInstructionHelper<1, BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(ClampVToUint8)
+
+ LClampVToUint8(const LBoxAllocation& input, const LDefinition& tempFloat) {
+ setBoxOperand(Input, input);
+ setTemp(0, tempFloat);
+ }
+
+ static const size_t Input = 0;
+
+ const LDefinition* tempFloat() {
+ return getTemp(0);
+ }
+ const MClampToUint8* mir() const {
+ return mir_->toClampToUint8();
+ }
+};
+
+// Load a boxed value from an object's fixed slot.
+class LLoadFixedSlotV : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(LoadFixedSlotV)
+
+ explicit LLoadFixedSlotV(const LAllocation& object) {
+ setOperand(0, object);
+ }
+ const MLoadFixedSlot* mir() const {
+ return mir_->toLoadFixedSlot();
+ }
+};
+
+// Load a typed value from an object's fixed slot.
+class LLoadFixedSlotT : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(LoadFixedSlotT)
+
+ explicit LLoadFixedSlotT(const LAllocation& object) {
+ setOperand(0, object);
+ }
+ const MLoadFixedSlot* mir() const {
+ return mir_->toLoadFixedSlot();
+ }
+};
+
+class LLoadFixedSlotAndUnbox : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(LoadFixedSlotAndUnbox)
+
+ explicit LLoadFixedSlotAndUnbox(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const MLoadFixedSlotAndUnbox* mir() const {
+ return mir_->toLoadFixedSlotAndUnbox();
+ }
+};
+
+// Store a boxed value to an object's fixed slot.
+class LStoreFixedSlotV : public LInstructionHelper<0, 1 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(StoreFixedSlotV)
+
+ LStoreFixedSlotV(const LAllocation& obj, const LBoxAllocation& value) {
+ setOperand(0, obj);
+ setBoxOperand(Value, value);
+ }
+
+ static const size_t Value = 1;
+
+ const MStoreFixedSlot* mir() const {
+ return mir_->toStoreFixedSlot();
+ }
+ const LAllocation* obj() {
+ return getOperand(0);
+ }
+};
+
+// Store a typed value to an object's fixed slot.
+class LStoreFixedSlotT : public LInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(StoreFixedSlotT)
+
+ LStoreFixedSlotT(const LAllocation& obj, const LAllocation& value)
+ {
+ setOperand(0, obj);
+ setOperand(1, value);
+ }
+ const MStoreFixedSlot* mir() const {
+ return mir_->toStoreFixedSlot();
+ }
+ const LAllocation* obj() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+};
+
+// Note, Name ICs always return a Value. There are no V/T variants.
+class LGetNameCache : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(GetNameCache)
+
+ explicit LGetNameCache(const LAllocation& envObj) {
+ setOperand(0, envObj);
+ }
+ const LAllocation* envObj() {
+ return getOperand(0);
+ }
+ const MGetNameCache* mir() const {
+ return mir_->toGetNameCache();
+ }
+};
+
+class LCallGetIntrinsicValue : public LCallInstructionHelper<BOX_PIECES, 0, 0>
+{
+ public:
+ LIR_HEADER(CallGetIntrinsicValue)
+
+ const MCallGetIntrinsicValue* mir() const {
+ return mir_->toCallGetIntrinsicValue();
+ }
+};
+
+// Patchable jump to stubs generated for a GetProperty cache, which loads a
+// boxed value.
+class LGetPropertyCacheV : public LInstructionHelper<BOX_PIECES, 1 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(GetPropertyCacheV)
+
+ static const size_t Id = 1;
+
+ LGetPropertyCacheV(const LAllocation& object, const LBoxAllocation& id) {
+ setOperand(0, object);
+ setBoxOperand(Id, id);
+ }
+ const MGetPropertyCache* mir() const {
+ return mir_->toGetPropertyCache();
+ }
+};
+
+// Patchable jump to stubs generated for a GetProperty cache, which loads a
+// value of a known type, possibly into an FP register.
+class LGetPropertyCacheT : public LInstructionHelper<1, 1 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(GetPropertyCacheT)
+
+ static const size_t Id = 1;
+
+ LGetPropertyCacheT(const LAllocation& object, const LBoxAllocation& id) {
+ setOperand(0, object);
+ setBoxOperand(Id, id);
+ }
+ const MGetPropertyCache* mir() const {
+ return mir_->toGetPropertyCache();
+ }
+};
+
+// Emit code to load a boxed value from an object's slots if its shape matches
+// one of the shapes observed by the baseline IC, else bails out.
+class LGetPropertyPolymorphicV : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(GetPropertyPolymorphicV)
+
+ explicit LGetPropertyPolymorphicV(const LAllocation& obj) {
+ setOperand(0, obj);
+ }
+ const LAllocation* obj() {
+ return getOperand(0);
+ }
+ const MGetPropertyPolymorphic* mir() const {
+ return mir_->toGetPropertyPolymorphic();
+ }
+ virtual const char* extraName() const {
+ return PropertyNameToExtraName(mir()->name());
+ }
+};
+
+// Emit code to load a typed value from an object's slots if its shape matches
+// one of the shapes observed by the baseline IC, else bails out.
+class LGetPropertyPolymorphicT : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(GetPropertyPolymorphicT)
+
+ LGetPropertyPolymorphicT(const LAllocation& obj, const LDefinition& temp) {
+ setOperand(0, obj);
+ setTemp(0, temp);
+ }
+ const LAllocation* obj() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const MGetPropertyPolymorphic* mir() const {
+ return mir_->toGetPropertyPolymorphic();
+ }
+ virtual const char* extraName() const {
+ return PropertyNameToExtraName(mir()->name());
+ }
+};
+
+// Emit code to store a boxed value to an object's slots if its shape matches
+// one of the shapes observed by the baseline IC, else bails out.
+class LSetPropertyPolymorphicV : public LInstructionHelper<0, 1 + BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(SetPropertyPolymorphicV)
+
+ LSetPropertyPolymorphicV(const LAllocation& obj, const LBoxAllocation& value,
+ const LDefinition& temp) {
+ setOperand(0, obj);
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ }
+
+ static const size_t Value = 1;
+
+ const LAllocation* obj() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const MSetPropertyPolymorphic* mir() const {
+ return mir_->toSetPropertyPolymorphic();
+ }
+};
+
+// Emit code to store a typed value to an object's slots if its shape matches
+// one of the shapes observed by the baseline IC, else bails out.
+class LSetPropertyPolymorphicT : public LInstructionHelper<0, 2, 1>
+{
+ MIRType valueType_;
+
+ public:
+ LIR_HEADER(SetPropertyPolymorphicT)
+
+ LSetPropertyPolymorphicT(const LAllocation& obj, const LAllocation& value, MIRType valueType,
+ const LDefinition& temp)
+ : valueType_(valueType)
+ {
+ setOperand(0, obj);
+ setOperand(1, value);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* obj() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MIRType valueType() const {
+ return valueType_;
+ }
+ const MSetPropertyPolymorphic* mir() const {
+ return mir_->toSetPropertyPolymorphic();
+ }
+ const char* extraName() const {
+ return StringFromMIRType(valueType_);
+ }
+};
+
+class LBindNameCache : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(BindNameCache)
+
+ explicit LBindNameCache(const LAllocation& envChain) {
+ setOperand(0, envChain);
+ }
+ const LAllocation* environmentChain() {
+ return getOperand(0);
+ }
+ const MBindNameCache* mir() const {
+ return mir_->toBindNameCache();
+ }
+};
+
+class LCallBindVar : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(CallBindVar)
+
+ explicit LCallBindVar(const LAllocation& envChain) {
+ setOperand(0, envChain);
+ }
+ const LAllocation* environmentChain() {
+ return getOperand(0);
+ }
+ const MCallBindVar* mir() const {
+ return mir_->toCallBindVar();
+ }
+};
+
+// Load a value from an object's dslots or a slots vector.
+class LLoadSlotV : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(LoadSlotV)
+
+ explicit LLoadSlotV(const LAllocation& in) {
+ setOperand(0, in);
+ }
+ const MLoadSlot* mir() const {
+ return mir_->toLoadSlot();
+ }
+};
+
+// Load a typed value from an object's dslots or a slots vector. Unlike
+// LLoadSlotV, this can bypass extracting a type tag, directly retrieving a
+// pointer, integer, or double.
+class LLoadSlotT : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(LoadSlotT)
+
+ explicit LLoadSlotT(const LAllocation& slots) {
+ setOperand(0, slots);
+ }
+ const LAllocation* slots() {
+ return getOperand(0);
+ }
+ const LDefinition* output() {
+ return this->getDef(0);
+ }
+ const MLoadSlot* mir() const {
+ return mir_->toLoadSlot();
+ }
+};
+
+// Store a value to an object's dslots or a slots vector.
+class LStoreSlotV : public LInstructionHelper<0, 1 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(StoreSlotV)
+
+ LStoreSlotV(const LAllocation& slots, const LBoxAllocation& value) {
+ setOperand(0, slots);
+ setBoxOperand(Value, value);
+ }
+
+ static const size_t Value = 1;
+
+ const MStoreSlot* mir() const {
+ return mir_->toStoreSlot();
+ }
+ const LAllocation* slots() {
+ return getOperand(0);
+ }
+};
+
+// Store a typed value to an object's dslots or a slots vector. This has a
+// few advantages over LStoreSlotV:
+// 1) We can bypass storing the type tag if the slot has the same type as
+// the value.
+// 2) Better register allocation: we can store constants and FP regs directly
+// without requiring a second register for the value.
+class LStoreSlotT : public LInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(StoreSlotT)
+
+ LStoreSlotT(const LAllocation& slots, const LAllocation& value) {
+ setOperand(0, slots);
+ setOperand(1, value);
+ }
+ const MStoreSlot* mir() const {
+ return mir_->toStoreSlot();
+ }
+ const LAllocation* slots() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+};
+
+// Read length field of a JSString*.
+class LStringLength : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(StringLength)
+
+ explicit LStringLength(const LAllocation& string) {
+ setOperand(0, string);
+ }
+
+ const LAllocation* string() {
+ return getOperand(0);
+ }
+};
+
+// Take the floor of a double precision number. Implements Math.floor().
+class LFloor : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(Floor)
+
+ explicit LFloor(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Take the floor of a single precision number. Implements Math.floor().
+class LFloorF : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(FloorF)
+
+ explicit LFloorF(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Take the ceiling of a double precision number. Implements Math.ceil().
+class LCeil : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(Ceil)
+
+ explicit LCeil(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Take the ceiling of a single precision number. Implements Math.ceil().
+class LCeilF : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(CeilF)
+
+ explicit LCeilF(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Round a double precision number. Implements Math.round().
+class LRound : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(Round)
+
+ LRound(const LAllocation& num, const LDefinition& temp) {
+ setOperand(0, num);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MRound* mir() const {
+ return mir_->toRound();
+ }
+};
+
+// Round a single precision number. Implements Math.round().
+class LRoundF : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(RoundF)
+
+ LRoundF(const LAllocation& num, const LDefinition& temp) {
+ setOperand(0, num);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MRound* mir() const {
+ return mir_->toRound();
+ }
+};
+
+// Load a function's call environment.
+class LFunctionEnvironment : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(FunctionEnvironment)
+
+ explicit LFunctionEnvironment(const LAllocation& function) {
+ setOperand(0, function);
+ }
+ const LAllocation* function() {
+ return getOperand(0);
+ }
+};
+
+class LCallGetProperty : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CallGetProperty)
+
+ static const size_t Value = 0;
+
+ explicit LCallGetProperty(const LBoxAllocation& val) {
+ setBoxOperand(Value, val);
+ }
+
+ MCallGetProperty* mir() const {
+ return mir_->toCallGetProperty();
+ }
+};
+
+// Call js::GetElement.
+class LCallGetElement : public LCallInstructionHelper<BOX_PIECES, 2 * BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CallGetElement)
+
+ static const size_t LhsInput = 0;
+ static const size_t RhsInput = BOX_PIECES;
+
+ LCallGetElement(const LBoxAllocation& lhs, const LBoxAllocation& rhs) {
+ setBoxOperand(LhsInput, lhs);
+ setBoxOperand(RhsInput, rhs);
+ }
+
+ MCallGetElement* mir() const {
+ return mir_->toCallGetElement();
+ }
+};
+
+// Call js::SetElement.
+class LCallSetElement : public LCallInstructionHelper<0, 1 + 2 * BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CallSetElement)
+
+ static const size_t Index = 1;
+ static const size_t Value = 1 + BOX_PIECES;
+
+ LCallSetElement(const LAllocation& obj, const LBoxAllocation& index,
+ const LBoxAllocation& value) {
+ setOperand(0, obj);
+ setBoxOperand(Index, index);
+ setBoxOperand(Value, value);
+ }
+
+ const MCallSetElement* mir() const {
+ return mir_->toCallSetElement();
+ }
+};
+
+// Call js::InitElementArray.
+class LCallInitElementArray : public LCallInstructionHelper<0, 1 + BOX_PIECES, 0>
+{
+public:
+ LIR_HEADER(CallInitElementArray)
+
+ static const size_t Value = 1;
+
+ LCallInitElementArray(const LAllocation& obj, const LBoxAllocation& value) {
+ setOperand(0, obj);
+ setBoxOperand(Value, value);
+ }
+
+ const MCallInitElementArray* mir() const {
+ return mir_->toCallInitElementArray();
+ }
+};
+
+// Call a VM function to perform a property or name assignment of a generic value.
+class LCallSetProperty : public LCallInstructionHelper<0, 1 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CallSetProperty)
+
+ LCallSetProperty(const LAllocation& obj, const LBoxAllocation& value) {
+ setOperand(0, obj);
+ setBoxOperand(Value, value);
+ }
+
+ static const size_t Value = 1;
+
+ const MCallSetProperty* mir() const {
+ return mir_->toCallSetProperty();
+ }
+};
+
+class LCallDeleteProperty : public LCallInstructionHelper<1, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CallDeleteProperty)
+
+ static const size_t Value = 0;
+
+ explicit LCallDeleteProperty(const LBoxAllocation& value) {
+ setBoxOperand(Value, value);
+ }
+
+ MDeleteProperty* mir() const {
+ return mir_->toDeleteProperty();
+ }
+};
+
+class LCallDeleteElement : public LCallInstructionHelper<1, 2 * BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CallDeleteElement)
+
+ static const size_t Value = 0;
+ static const size_t Index = BOX_PIECES;
+
+ LCallDeleteElement(const LBoxAllocation& value, const LBoxAllocation& index) {
+ setBoxOperand(Value, value);
+ setBoxOperand(Index, index);
+ }
+
+ MDeleteElement* mir() const {
+ return mir_->toDeleteElement();
+ }
+};
+
+// Patchable jump to stubs generated for a SetProperty cache.
+class LSetPropertyCache : public LInstructionHelper<0, 1 + 2 * BOX_PIECES, 4>
+{
+ public:
+ LIR_HEADER(SetPropertyCache)
+
+ LSetPropertyCache(const LAllocation& object, const LBoxAllocation& id,
+ const LBoxAllocation& value, const LDefinition& temp,
+ const LDefinition& tempToUnboxIndex, const LDefinition& tempDouble,
+ const LDefinition& tempFloat32) {
+ setOperand(0, object);
+ setBoxOperand(Id, id);
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ setTemp(1, tempToUnboxIndex);
+ setTemp(2, tempDouble);
+ setTemp(3, tempFloat32);
+ }
+
+ static const size_t Id = 1;
+ static const size_t Value = 1 + BOX_PIECES;
+
+ const MSetPropertyCache* mir() const {
+ return mir_->toSetPropertyCache();
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const LDefinition* tempToUnboxIndex() {
+ return getTemp(1);
+ }
+ const LDefinition* tempDouble() {
+ return getTemp(2);
+ }
+ const LDefinition* tempFloat32() {
+ if (hasUnaliasedDouble())
+ return getTemp(3);
+ return getTemp(2);
+ }
+};
+
+class LCallIteratorStartV : public LCallInstructionHelper<1, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CallIteratorStartV)
+
+ static const size_t Value = 0;
+
+ explicit LCallIteratorStartV(const LBoxAllocation& value) {
+ setBoxOperand(Value, value);
+ }
+ MIteratorStart* mir() const {
+ return mir_->toIteratorStart();
+ }
+};
+
+class LCallIteratorStartO : public LCallInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(CallIteratorStartO)
+
+ explicit LCallIteratorStartO(const LAllocation& object) {
+ setOperand(0, object);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ MIteratorStart* mir() const {
+ return mir_->toIteratorStart();
+ }
+};
+
+class LIteratorStartO : public LInstructionHelper<1, 1, 3>
+{
+ public:
+ LIR_HEADER(IteratorStartO)
+
+ LIteratorStartO(const LAllocation& object, const LDefinition& temp1,
+ const LDefinition& temp2, const LDefinition& temp3) {
+ setOperand(0, object);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp1() {
+ return getTemp(0);
+ }
+ const LDefinition* temp2() {
+ return getTemp(1);
+ }
+ const LDefinition* temp3() {
+ return getTemp(2);
+ }
+ MIteratorStart* mir() const {
+ return mir_->toIteratorStart();
+ }
+};
+
+class LIteratorMore : public LInstructionHelper<BOX_PIECES, 1, 1>
+{
+ public:
+ LIR_HEADER(IteratorMore)
+
+ LIteratorMore(const LAllocation& iterator, const LDefinition& temp) {
+ setOperand(0, iterator);
+ setTemp(0, temp);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MIteratorMore* mir() const {
+ return mir_->toIteratorMore();
+ }
+};
+
+class LIsNoIterAndBranch : public LControlInstructionHelper<2, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(IsNoIterAndBranch)
+
+ LIsNoIterAndBranch(MBasicBlock* ifTrue, MBasicBlock* ifFalse, const LBoxAllocation& input) {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setBoxOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+};
+
+class LIteratorEnd : public LInstructionHelper<0, 1, 3>
+{
+ public:
+ LIR_HEADER(IteratorEnd)
+
+ LIteratorEnd(const LAllocation& iterator, const LDefinition& temp1,
+ const LDefinition& temp2, const LDefinition& temp3) {
+ setOperand(0, iterator);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp1() {
+ return getTemp(0);
+ }
+ const LDefinition* temp2() {
+ return getTemp(1);
+ }
+ const LDefinition* temp3() {
+ return getTemp(2);
+ }
+ MIteratorEnd* mir() const {
+ return mir_->toIteratorEnd();
+ }
+};
+
+// Read the number of actual arguments.
+class LArgumentsLength : public LInstructionHelper<1, 0, 0>
+{
+ public:
+ LIR_HEADER(ArgumentsLength)
+};
+
+// Load a value from the actual arguments.
+class LGetFrameArgument : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(GetFrameArgument)
+
+ explicit LGetFrameArgument(const LAllocation& index) {
+ setOperand(0, index);
+ }
+ const LAllocation* index() {
+ return getOperand(0);
+ }
+};
+
+// Load a value from the actual arguments.
+class LSetFrameArgumentT : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(SetFrameArgumentT)
+
+ explicit LSetFrameArgumentT(const LAllocation& input) {
+ setOperand(0, input);
+ }
+ MSetFrameArgument* mir() const {
+ return mir_->toSetFrameArgument();
+ }
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+};
+
+// Load a value from the actual arguments.
+class LSetFrameArgumentC : public LInstructionHelper<0, 0, 0>
+{
+ Value val_;
+
+ public:
+ LIR_HEADER(SetFrameArgumentC)
+
+ explicit LSetFrameArgumentC(const Value& val) {
+ val_ = val;
+ }
+ MSetFrameArgument* mir() const {
+ return mir_->toSetFrameArgument();
+ }
+ const Value& val() const {
+ return val_;
+ }
+};
+
+// Load a value from the actual arguments.
+class LSetFrameArgumentV : public LInstructionHelper<0, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(SetFrameArgumentV)
+
+ explicit LSetFrameArgumentV(const LBoxAllocation& input) {
+ setBoxOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+
+ MSetFrameArgument* mir() const {
+ return mir_->toSetFrameArgument();
+ }
+};
+
+class LRunOncePrologue : public LCallInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(RunOncePrologue)
+
+ MRunOncePrologue* mir() const {
+ return mir_->toRunOncePrologue();
+ }
+};
+
+// Create the rest parameter.
+class LRest : public LCallInstructionHelper<1, 1, 3>
+{
+ public:
+ LIR_HEADER(Rest)
+
+ LRest(const LAllocation& numActuals, const LDefinition& temp1, const LDefinition& temp2,
+ const LDefinition& temp3)
+ {
+ setOperand(0, numActuals);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+ const LAllocation* numActuals() {
+ return getOperand(0);
+ }
+ MRest* mir() const {
+ return mir_->toRest();
+ }
+};
+
+class LGuardReceiverPolymorphic : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(GuardReceiverPolymorphic)
+
+ LGuardReceiverPolymorphic(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const MGuardReceiverPolymorphic* mir() const {
+ return mir_->toGuardReceiverPolymorphic();
+ }
+};
+
+class LGuardUnboxedExpando : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(GuardUnboxedExpando)
+
+ explicit LGuardUnboxedExpando(const LAllocation& in) {
+ setOperand(0, in);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const MGuardUnboxedExpando* mir() const {
+ return mir_->toGuardUnboxedExpando();
+ }
+};
+
+class LLoadUnboxedExpando : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(LoadUnboxedExpando)
+
+ explicit LLoadUnboxedExpando(const LAllocation& in) {
+ setOperand(0, in);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const MLoadUnboxedExpando* mir() const {
+ return mir_->toLoadUnboxedExpando();
+ }
+};
+
+// Guard that a value is in a TypeSet.
+class LTypeBarrierV : public LInstructionHelper<0, BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(TypeBarrierV)
+
+ LTypeBarrierV(const LBoxAllocation& input, const LDefinition& temp) {
+ setBoxOperand(Input, input);
+ setTemp(0, temp);
+ }
+
+ static const size_t Input = 0;
+
+ const MTypeBarrier* mir() const {
+ return mir_->toTypeBarrier();
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Guard that a object is in a TypeSet.
+class LTypeBarrierO : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(TypeBarrierO)
+
+ LTypeBarrierO(const LAllocation& obj, const LDefinition& temp) {
+ setOperand(0, obj);
+ setTemp(0, temp);
+ }
+ const MTypeBarrier* mir() const {
+ return mir_->toTypeBarrier();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Guard that a value is in a TypeSet.
+class LMonitorTypes : public LInstructionHelper<0, BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(MonitorTypes)
+
+ LMonitorTypes(const LBoxAllocation& input, const LDefinition& temp) {
+ setBoxOperand(Input, input);
+ setTemp(0, temp);
+ }
+
+ static const size_t Input = 0;
+
+ const MMonitorTypes* mir() const {
+ return mir_->toMonitorTypes();
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Generational write barrier used when writing an object to another object.
+class LPostWriteBarrierO : public LInstructionHelper<0, 2, 1>
+{
+ public:
+ LIR_HEADER(PostWriteBarrierO)
+
+ LPostWriteBarrierO(const LAllocation& obj, const LAllocation& value,
+ const LDefinition& temp) {
+ setOperand(0, obj);
+ setOperand(1, value);
+ setTemp(0, temp);
+ }
+
+ const MPostWriteBarrier* mir() const {
+ return mir_->toPostWriteBarrier();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Generational write barrier used when writing a value to another object.
+class LPostWriteBarrierV : public LInstructionHelper<0, 1 + BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(PostWriteBarrierV)
+
+ LPostWriteBarrierV(const LAllocation& obj, const LBoxAllocation& value,
+ const LDefinition& temp) {
+ setOperand(0, obj);
+ setBoxOperand(Input, value);
+ setTemp(0, temp);
+ }
+
+ static const size_t Input = 1;
+
+ const MPostWriteBarrier* mir() const {
+ return mir_->toPostWriteBarrier();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Generational write barrier used when writing an object to another object's
+// elements.
+class LPostWriteElementBarrierO : public LInstructionHelper<0, 3, 1>
+{
+ public:
+ LIR_HEADER(PostWriteElementBarrierO)
+
+ LPostWriteElementBarrierO(const LAllocation& obj, const LAllocation& value,
+ const LAllocation& index, const LDefinition& temp) {
+ setOperand(0, obj);
+ setOperand(1, value);
+ setOperand(2, index);
+ setTemp(0, temp);
+ }
+
+ const MPostWriteElementBarrier* mir() const {
+ return mir_->toPostWriteElementBarrier();
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+
+ const LAllocation* index() {
+ return getOperand(2);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Generational write barrier used when writing a value to another object's
+// elements.
+class LPostWriteElementBarrierV : public LInstructionHelper<0, 2 + BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(PostWriteElementBarrierV)
+
+ LPostWriteElementBarrierV(const LAllocation& obj, const LAllocation& index,
+ const LBoxAllocation& value, const LDefinition& temp) {
+ setOperand(0, obj);
+ setOperand(1, index);
+ setBoxOperand(Input, value);
+ setTemp(0, temp);
+ }
+
+ static const size_t Input = 2;
+
+ const MPostWriteElementBarrier* mir() const {
+ return mir_->toPostWriteElementBarrier();
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Guard against an object's identity.
+class LGuardObjectIdentity : public LInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(GuardObjectIdentity)
+
+ explicit LGuardObjectIdentity(const LAllocation& in, const LAllocation& expected) {
+ setOperand(0, in);
+ setOperand(1, expected);
+ }
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+ const LAllocation* expected() {
+ return getOperand(1);
+ }
+ const MGuardObjectIdentity* mir() const {
+ return mir_->toGuardObjectIdentity();
+ }
+};
+
+// Guard against an object's class.
+class LGuardClass : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(GuardClass)
+
+ LGuardClass(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+ const MGuardClass* mir() const {
+ return mir_->toGuardClass();
+ }
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+};
+
+// Guard against the sharedness of a TypedArray's memory.
+class LGuardSharedTypedArray : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(GuardSharedTypedArray)
+
+ LGuardSharedTypedArray(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+ const MGuardSharedTypedArray* mir() const {
+ return mir_->toGuardSharedTypedArray();
+ }
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+};
+
+class LIn : public LCallInstructionHelper<1, BOX_PIECES+1, 0>
+{
+ public:
+ LIR_HEADER(In)
+ LIn(const LBoxAllocation& lhs, const LAllocation& rhs) {
+ setBoxOperand(LHS, lhs);
+ setOperand(RHS, rhs);
+ }
+
+ const LAllocation* lhs() {
+ return getOperand(LHS);
+ }
+ const LAllocation* rhs() {
+ return getOperand(RHS);
+ }
+
+ static const size_t LHS = 0;
+ static const size_t RHS = BOX_PIECES;
+};
+
+class LInstanceOfO : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(InstanceOfO)
+ explicit LInstanceOfO(const LAllocation& lhs) {
+ setOperand(0, lhs);
+ }
+
+ MInstanceOf* mir() const {
+ return mir_->toInstanceOf();
+ }
+
+ const LAllocation* lhs() {
+ return getOperand(0);
+ }
+};
+
+class LInstanceOfV : public LInstructionHelper<1, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(InstanceOfV)
+ explicit LInstanceOfV(const LBoxAllocation& lhs) {
+ setBoxOperand(LHS, lhs);
+ }
+
+ MInstanceOf* mir() const {
+ return mir_->toInstanceOf();
+ }
+
+ const LAllocation* lhs() {
+ return getOperand(LHS);
+ }
+
+ static const size_t LHS = 0;
+};
+
+class LCallInstanceOf : public LCallInstructionHelper<1, BOX_PIECES+1, 0>
+{
+ public:
+ LIR_HEADER(CallInstanceOf)
+ LCallInstanceOf(const LBoxAllocation& lhs, const LAllocation& rhs) {
+ setBoxOperand(LHS, lhs);
+ setOperand(RHS, rhs);
+ }
+
+ const LDefinition* output() {
+ return this->getDef(0);
+ }
+ const LAllocation* lhs() {
+ return getOperand(LHS);
+ }
+ const LAllocation* rhs() {
+ return getOperand(RHS);
+ }
+
+ static const size_t LHS = 0;
+ static const size_t RHS = BOX_PIECES;
+};
+
+class LIsCallable : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(IsCallable);
+ explicit LIsCallable(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ MIsCallable* mir() const {
+ return mir_->toIsCallable();
+ }
+};
+
+class LIsConstructor : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(IsConstructor);
+ explicit LIsConstructor(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ MIsConstructor* mir() const {
+ return mir_->toIsConstructor();
+ }
+};
+
+class LIsObject : public LInstructionHelper<1, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(IsObject);
+ static const size_t Input = 0;
+
+ explicit LIsObject(const LBoxAllocation& input) {
+ setBoxOperand(Input, input);
+ }
+
+ MIsObject* mir() const {
+ return mir_->toIsObject();
+ }
+};
+
+class LIsObjectAndBranch : public LControlInstructionHelper<2, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(IsObjectAndBranch)
+
+ LIsObjectAndBranch(MBasicBlock* ifTrue, MBasicBlock* ifFalse, const LBoxAllocation& input) {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setBoxOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+};
+
+class LHasClass : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(HasClass);
+ explicit LHasClass(const LAllocation& lhs) {
+ setOperand(0, lhs);
+ }
+
+ const LAllocation* lhs() {
+ return getOperand(0);
+ }
+ MHasClass* mir() const {
+ return mir_->toHasClass();
+ }
+};
+
+template<size_t Defs, size_t Ops>
+class LWasmSelectBase : public LInstructionHelper<Defs, Ops, 0>
+{
+ typedef LInstructionHelper<Defs, Ops, 0> Base;
+ public:
+
+ MWasmSelect* mir() const {
+ return Base::mir_->toWasmSelect();
+ }
+};
+
+class LWasmSelect : public LWasmSelectBase<1, 3>
+{
+ public:
+ LIR_HEADER(WasmSelect);
+
+ static const size_t TrueExprIndex = 0;
+ static const size_t FalseExprIndex = 1;
+ static const size_t CondIndex = 2;
+
+ LWasmSelect(const LAllocation& trueExpr, const LAllocation& falseExpr,
+ const LAllocation& cond)
+ {
+ setOperand(TrueExprIndex, trueExpr);
+ setOperand(FalseExprIndex, falseExpr);
+ setOperand(CondIndex, cond);
+ }
+
+ const LAllocation* trueExpr() {
+ return getOperand(TrueExprIndex);
+ }
+ const LAllocation* falseExpr() {
+ return getOperand(FalseExprIndex);
+ }
+ const LAllocation* condExpr() {
+ return getOperand(CondIndex);
+ }
+};
+
+class LWasmSelectI64 : public LWasmSelectBase<INT64_PIECES, 2 * INT64_PIECES + 1>
+{
+ public:
+ LIR_HEADER(WasmSelectI64);
+
+ static const size_t TrueExprIndex = 0;
+ static const size_t FalseExprIndex = INT64_PIECES;
+ static const size_t CondIndex = INT64_PIECES * 2;
+
+ LWasmSelectI64(const LInt64Allocation& trueExpr, const LInt64Allocation& falseExpr,
+ const LAllocation& cond)
+ {
+ setInt64Operand(TrueExprIndex, trueExpr);
+ setInt64Operand(FalseExprIndex, falseExpr);
+ setOperand(CondIndex, cond);
+ }
+
+ const LInt64Allocation trueExpr() {
+ return getInt64Operand(TrueExprIndex);
+ }
+ const LInt64Allocation falseExpr() {
+ return getInt64Operand(FalseExprIndex);
+ }
+ const LAllocation* condExpr() {
+ return getOperand(CondIndex);
+ }
+};
+
+class LWasmAddOffset : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmAddOffset);
+ explicit LWasmAddOffset(const LAllocation& base) {
+ setOperand(0, base);
+ }
+ MWasmAddOffset* mir() const {
+ return mir_->toWasmAddOffset();
+ }
+ const LAllocation* base() {
+ return getOperand(0);
+ }
+};
+
+class LWasmBoundsCheck : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmBoundsCheck);
+ explicit LWasmBoundsCheck(const LAllocation& ptr) {
+ setOperand(0, ptr);
+ }
+ MWasmBoundsCheck* mir() const {
+ return mir_->toWasmBoundsCheck();
+ }
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+};
+
+namespace details {
+
+// This is a base class for LWasmLoad/LWasmLoadI64.
+template<size_t Defs, size_t Temp>
+class LWasmLoadBase : public LInstructionHelper<Defs, 1, Temp>
+{
+ public:
+ typedef LInstructionHelper<Defs, 1, Temp> Base;
+ explicit LWasmLoadBase(const LAllocation& ptr) {
+ Base::setOperand(0, ptr);
+ }
+ MWasmLoad* mir() const {
+ return Base::mir_->toWasmLoad();
+ }
+ const LAllocation* ptr() {
+ return Base::getOperand(0);
+ }
+};
+
+} // namespace details
+
+class LWasmLoad : public details::LWasmLoadBase<1, 1>
+{
+ public:
+ explicit LWasmLoad(const LAllocation& ptr)
+ : LWasmLoadBase(ptr)
+ {
+ setTemp(0, LDefinition::BogusTemp());
+ }
+
+ const LDefinition* ptrCopy() {
+ return Base::getTemp(0);
+ }
+
+ LIR_HEADER(WasmLoad);
+};
+
+class LWasmLoadI64 : public details::LWasmLoadBase<INT64_PIECES, 1>
+{
+ public:
+ explicit LWasmLoadI64(const LAllocation& ptr)
+ : LWasmLoadBase(ptr)
+ {
+ setTemp(0, LDefinition::BogusTemp());
+ }
+
+ const LDefinition* ptrCopy() {
+ return Base::getTemp(0);
+ }
+
+ LIR_HEADER(WasmLoadI64);
+};
+
+class LWasmStore : public LInstructionHelper<0, 2, 1>
+{
+ public:
+ LIR_HEADER(WasmStore);
+
+ static const size_t PtrIndex = 0;
+ static const size_t ValueIndex = 1;
+
+ LWasmStore(const LAllocation& ptr, const LAllocation& value) {
+ setOperand(PtrIndex, ptr);
+ setOperand(ValueIndex, value);
+ setTemp(0, LDefinition::BogusTemp());
+ }
+ MWasmStore* mir() const {
+ return mir_->toWasmStore();
+ }
+ const LAllocation* ptr() {
+ return getOperand(PtrIndex);
+ }
+ const LDefinition* ptrCopy() {
+ return getTemp(0);
+ }
+ const LAllocation* value() {
+ return getOperand(ValueIndex);
+ }
+};
+
+class LWasmStoreI64 : public LInstructionHelper<0, INT64_PIECES + 1, 1>
+{
+ public:
+ LIR_HEADER(WasmStoreI64);
+
+ static const size_t PtrIndex = 0;
+ static const size_t ValueIndex = 1;
+
+ LWasmStoreI64(const LAllocation& ptr, const LInt64Allocation& value) {
+ setOperand(PtrIndex, ptr);
+ setInt64Operand(ValueIndex, value);
+ setTemp(0, LDefinition::BogusTemp());
+ }
+ MWasmStore* mir() const {
+ return mir_->toWasmStore();
+ }
+ const LAllocation* ptr() {
+ return getOperand(PtrIndex);
+ }
+ const LDefinition* ptrCopy() {
+ return getTemp(0);
+ }
+ const LInt64Allocation value() {
+ return getInt64Operand(ValueIndex);
+ }
+};
+
+class LAsmJSLoadHeap : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(AsmJSLoadHeap);
+ explicit LAsmJSLoadHeap(const LAllocation& ptr) {
+ setOperand(0, ptr);
+ }
+ MAsmJSLoadHeap* mir() const {
+ return mir_->toAsmJSLoadHeap();
+ }
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+};
+
+class LAsmJSStoreHeap : public LInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(AsmJSStoreHeap);
+ LAsmJSStoreHeap(const LAllocation& ptr, const LAllocation& value) {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ }
+ MAsmJSStoreHeap* mir() const {
+ return mir_->toAsmJSStoreHeap();
+ }
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+};
+
+class LAsmJSCompareExchangeHeap : public LInstructionHelper<1, 3, 4>
+{
+ public:
+ LIR_HEADER(AsmJSCompareExchangeHeap);
+
+ LAsmJSCompareExchangeHeap(const LAllocation& ptr, const LAllocation& oldValue,
+ const LAllocation& newValue)
+ {
+ setOperand(0, ptr);
+ setOperand(1, oldValue);
+ setOperand(2, newValue);
+ setTemp(0, LDefinition::BogusTemp());
+ }
+ LAsmJSCompareExchangeHeap(const LAllocation& ptr, const LAllocation& oldValue,
+ const LAllocation& newValue, const LDefinition& valueTemp,
+ const LDefinition& offsetTemp, const LDefinition& maskTemp)
+ {
+ setOperand(0, ptr);
+ setOperand(1, oldValue);
+ setOperand(2, newValue);
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+ const LAllocation* oldValue() {
+ return getOperand(1);
+ }
+ const LAllocation* newValue() {
+ return getOperand(2);
+ }
+ const LDefinition* addrTemp() {
+ return getTemp(0);
+ }
+
+ void setAddrTemp(const LDefinition& addrTemp) {
+ setTemp(0, addrTemp);
+ }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() {
+ return getTemp(1);
+ }
+ const LDefinition* offsetTemp() {
+ return getTemp(2);
+ }
+ const LDefinition* maskTemp() {
+ return getTemp(3);
+ }
+
+ MAsmJSCompareExchangeHeap* mir() const {
+ return mir_->toAsmJSCompareExchangeHeap();
+ }
+};
+
+class LAsmJSAtomicExchangeHeap : public LInstructionHelper<1, 2, 4>
+{
+ public:
+ LIR_HEADER(AsmJSAtomicExchangeHeap);
+
+ LAsmJSAtomicExchangeHeap(const LAllocation& ptr, const LAllocation& value)
+ {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setTemp(0, LDefinition::BogusTemp());
+ }
+ LAsmJSAtomicExchangeHeap(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& valueTemp, const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+ const LDefinition* addrTemp() {
+ return getTemp(0);
+ }
+
+ void setAddrTemp(const LDefinition& addrTemp) {
+ setTemp(0, addrTemp);
+ }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() {
+ return getTemp(1);
+ }
+ const LDefinition* offsetTemp() {
+ return getTemp(2);
+ }
+ const LDefinition* maskTemp() {
+ return getTemp(3);
+ }
+
+ MAsmJSAtomicExchangeHeap* mir() const {
+ return mir_->toAsmJSAtomicExchangeHeap();
+ }
+};
+
+class LAsmJSAtomicBinopHeap : public LInstructionHelper<1, 2, 6>
+{
+ public:
+ LIR_HEADER(AsmJSAtomicBinopHeap);
+
+ static const int32_t valueOp = 1;
+
+ LAsmJSAtomicBinopHeap(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& temp,
+ const LDefinition& flagTemp = LDefinition::BogusTemp())
+ {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setTemp(0, temp);
+ setTemp(1, LDefinition::BogusTemp());
+ setTemp(2, flagTemp);
+ }
+ LAsmJSAtomicBinopHeap(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& temp, const LDefinition& flagTemp,
+ const LDefinition& valueTemp, const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setTemp(0, temp);
+ setTemp(1, LDefinition::BogusTemp());
+ setTemp(2, flagTemp);
+ setTemp(3, valueTemp);
+ setTemp(4, offsetTemp);
+ setTemp(5, maskTemp);
+ }
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ MOZ_ASSERT(valueOp == 1);
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ // Temp that may be used on some platforms to hold a computed address.
+ const LDefinition* addrTemp() {
+ return getTemp(1);
+ }
+ void setAddrTemp(const LDefinition& addrTemp) {
+ setTemp(1, addrTemp);
+ }
+
+ // Temp that may be used on LL/SC platforms for the flag result of the store.
+ const LDefinition* flagTemp() {
+ return getTemp(2);
+ }
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() {
+ return getTemp(3);
+ }
+ const LDefinition* offsetTemp() {
+ return getTemp(4);
+ }
+ const LDefinition* maskTemp() {
+ return getTemp(5);
+ }
+
+ MAsmJSAtomicBinopHeap* mir() const {
+ return mir_->toAsmJSAtomicBinopHeap();
+ }
+};
+
+// Atomic binary operation where the result is discarded.
+class LAsmJSAtomicBinopHeapForEffect : public LInstructionHelper<0, 2, 5>
+{
+ public:
+ LIR_HEADER(AsmJSAtomicBinopHeapForEffect);
+ LAsmJSAtomicBinopHeapForEffect(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& flagTemp = LDefinition::BogusTemp())
+ {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, flagTemp);
+ }
+ LAsmJSAtomicBinopHeapForEffect(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& flagTemp, const LDefinition& valueTemp,
+ const LDefinition& offsetTemp, const LDefinition& maskTemp)
+ {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, flagTemp);
+ setTemp(2, valueTemp);
+ setTemp(3, offsetTemp);
+ setTemp(4, maskTemp);
+ }
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+
+ // Temp that may be used on some platforms to hold a computed address.
+ const LDefinition* addrTemp() {
+ return getTemp(0);
+ }
+ void setAddrTemp(const LDefinition& addrTemp) {
+ setTemp(0, addrTemp);
+ }
+
+ // Temp that may be used on LL/SC platforms for the flag result of the store.
+ const LDefinition* flagTemp() {
+ return getTemp(1);
+ }
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() {
+ return getTemp(2);
+ }
+ const LDefinition* offsetTemp() {
+ return getTemp(3);
+ }
+ const LDefinition* maskTemp() {
+ return getTemp(4);
+ }
+
+ MAsmJSAtomicBinopHeap* mir() const {
+ return mir_->toAsmJSAtomicBinopHeap();
+ }
+};
+
+class LWasmLoadGlobalVar : public LInstructionHelper<1, 0, 0>
+{
+ public:
+ LIR_HEADER(WasmLoadGlobalVar);
+ MWasmLoadGlobalVar* mir() const {
+ return mir_->toWasmLoadGlobalVar();
+ }
+};
+
+class LWasmLoadGlobalVarI64 : public LInstructionHelper<INT64_PIECES, 0, 0>
+{
+ public:
+ LIR_HEADER(WasmLoadGlobalVarI64);
+ MWasmLoadGlobalVar* mir() const {
+ return mir_->toWasmLoadGlobalVar();
+ }
+};
+
+class LWasmStoreGlobalVar : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmStoreGlobalVar);
+ explicit LWasmStoreGlobalVar(const LAllocation& value) {
+ setOperand(0, value);
+ }
+ MWasmStoreGlobalVar* mir() const {
+ return mir_->toWasmStoreGlobalVar();
+ }
+ const LAllocation* value() {
+ return getOperand(0);
+ }
+};
+
+class LWasmStoreGlobalVarI64 : public LInstructionHelper<0, INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(WasmStoreGlobalVarI64);
+ explicit LWasmStoreGlobalVarI64(const LInt64Allocation& value) {
+ setInt64Operand(0, value);
+ }
+ MWasmStoreGlobalVar* mir() const {
+ return mir_->toWasmStoreGlobalVar();
+ }
+ static const uint32_t InputIndex = 0;
+
+ const LInt64Allocation value() {
+ return getInt64Operand(InputIndex);
+ }
+};
+
+class LWasmParameter : public LInstructionHelper<1, 0, 0>
+{
+ public:
+ LIR_HEADER(WasmParameter);
+};
+
+class LWasmParameterI64 : public LInstructionHelper<INT64_PIECES, 0, 0>
+{
+ public:
+ LIR_HEADER(WasmParameterI64);
+};
+
+class LWasmReturn : public LInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(WasmReturn);
+};
+
+class LWasmReturnI64 : public LInstructionHelper<0, INT64_PIECES + 1, 0>
+{
+ public:
+ LIR_HEADER(WasmReturnI64)
+
+ explicit LWasmReturnI64(const LInt64Allocation& input) {
+ setInt64Operand(0, input);
+ }
+};
+
+class LWasmReturnVoid : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmReturnVoid);
+};
+
+class LWasmStackArg : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmStackArg);
+ explicit LWasmStackArg(const LAllocation& arg) {
+ setOperand(0, arg);
+ }
+ MWasmStackArg* mir() const {
+ return mirRaw()->toWasmStackArg();
+ }
+ const LAllocation* arg() {
+ return getOperand(0);
+ }
+};
+
+class LWasmStackArgI64 : public LInstructionHelper<0, INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(WasmStackArgI64);
+ explicit LWasmStackArgI64(const LInt64Allocation& arg) {
+ setInt64Operand(0, arg);
+ }
+ MWasmStackArg* mir() const {
+ return mirRaw()->toWasmStackArg();
+ }
+ const LInt64Allocation arg() {
+ return getInt64Operand(0);
+ }
+};
+
+class LWasmCallBase : public LInstruction
+{
+ LAllocation* operands_;
+ uint32_t numOperands_;
+
+ public:
+
+ LWasmCallBase(LAllocation* operands, uint32_t numOperands)
+ : operands_(operands),
+ numOperands_(numOperands)
+ {}
+
+ MWasmCall* mir() const {
+ return mir_->toWasmCall();
+ }
+
+ bool isCall() const override {
+ return true;
+ }
+ bool isCallPreserved(AnyRegister reg) const override {
+ // All MWasmCalls preserve the TLS register:
+ // - internal/indirect calls do by the internal wasm ABI
+ // - import calls do by explicitly saving/restoring at the callsite
+ // - builtin calls do because the TLS reg is non-volatile
+ return !reg.isFloat() && reg.gpr() == WasmTlsReg;
+ }
+
+ // LInstruction interface
+ size_t numOperands() const override {
+ return numOperands_;
+ }
+ LAllocation* getOperand(size_t index) override {
+ MOZ_ASSERT(index < numOperands_);
+ return &operands_[index];
+ }
+ void setOperand(size_t index, const LAllocation& a) override {
+ MOZ_ASSERT(index < numOperands_);
+ operands_[index] = a;
+ }
+ size_t numTemps() const override {
+ return 0;
+ }
+ LDefinition* getTemp(size_t index) override {
+ MOZ_CRASH("no temps");
+ }
+ void setTemp(size_t index, const LDefinition& a) override {
+ MOZ_CRASH("no temps");
+ }
+ size_t numSuccessors() const override {
+ return 0;
+ }
+ MBasicBlock* getSuccessor(size_t i) const override {
+ MOZ_CRASH("no successors");
+ }
+ void setSuccessor(size_t i, MBasicBlock*) override {
+ MOZ_CRASH("no successors");
+ }
+};
+
+class LWasmCall : public LWasmCallBase
+{
+ LDefinition def_;
+
+ public:
+ LIR_HEADER(WasmCall);
+
+ LWasmCall(LAllocation* operands, uint32_t numOperands)
+ : LWasmCallBase(operands, numOperands),
+ def_(LDefinition::BogusTemp())
+ {}
+
+ // LInstruction interface
+ size_t numDefs() const {
+ return def_.isBogusTemp() ? 0 : 1;
+ }
+ LDefinition* getDef(size_t index) {
+ MOZ_ASSERT(numDefs() == 1);
+ MOZ_ASSERT(index == 0);
+ return &def_;
+ }
+ void setDef(size_t index, const LDefinition& def) {
+ MOZ_ASSERT(index == 0);
+ def_ = def;
+ }
+};
+
+class LWasmCallI64 : public LWasmCallBase
+{
+ LDefinition defs_[INT64_PIECES];
+
+ public:
+ LIR_HEADER(WasmCallI64);
+
+ LWasmCallI64(LAllocation* operands, uint32_t numOperands)
+ : LWasmCallBase(operands, numOperands)
+ {
+ for (size_t i = 0; i < numDefs(); i++)
+ defs_[i] = LDefinition::BogusTemp();
+ }
+
+ // LInstruction interface
+ size_t numDefs() const {
+ return INT64_PIECES;
+ }
+ LDefinition* getDef(size_t index) {
+ MOZ_ASSERT(index < numDefs());
+ return &defs_[index];
+ }
+ void setDef(size_t index, const LDefinition& def) {
+ MOZ_ASSERT(index < numDefs());
+ defs_[index] = def;
+ }
+};
+
+class LAssertRangeI : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(AssertRangeI)
+
+ explicit LAssertRangeI(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+
+ MAssertRange* mir() {
+ return mir_->toAssertRange();
+ }
+ const Range* range() {
+ return mir()->assertedRange();
+ }
+};
+
+class LAssertRangeD : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(AssertRangeD)
+
+ LAssertRangeD(const LAllocation& input, const LDefinition& temp) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MAssertRange* mir() {
+ return mir_->toAssertRange();
+ }
+ const Range* range() {
+ return mir()->assertedRange();
+ }
+};
+
+class LAssertRangeF : public LInstructionHelper<0, 1, 2>
+{
+ public:
+ LIR_HEADER(AssertRangeF)
+ LAssertRangeF(const LAllocation& input, const LDefinition& temp, const LDefinition& temp2) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ setTemp(1, temp2);
+ }
+
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const LDefinition* temp2() {
+ return getTemp(1);
+ }
+
+ MAssertRange* mir() {
+ return mir_->toAssertRange();
+ }
+ const Range* range() {
+ return mir()->assertedRange();
+ }
+};
+
+class LAssertRangeV : public LInstructionHelper<0, BOX_PIECES, 3>
+{
+ public:
+ LIR_HEADER(AssertRangeV)
+
+ LAssertRangeV(const LBoxAllocation& input, const LDefinition& temp,
+ const LDefinition& floatTemp1, const LDefinition& floatTemp2)
+ {
+ setBoxOperand(Input, input);
+ setTemp(0, temp);
+ setTemp(1, floatTemp1);
+ setTemp(2, floatTemp2);
+ }
+
+ static const size_t Input = 0;
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const LDefinition* floatTemp1() {
+ return getTemp(1);
+ }
+ const LDefinition* floatTemp2() {
+ return getTemp(2);
+ }
+
+ MAssertRange* mir() {
+ return mir_->toAssertRange();
+ }
+ const Range* range() {
+ return mir()->assertedRange();
+ }
+};
+
+class LAssertResultT : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(AssertResultT)
+
+ explicit LAssertResultT(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+};
+
+class LAssertResultV : public LInstructionHelper<0, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(AssertResultV)
+
+ static const size_t Input = 0;
+
+ explicit LAssertResultV(const LBoxAllocation& input) {
+ setBoxOperand(Input, input);
+ }
+};
+
+class LRecompileCheck : public LInstructionHelper<0, 0, 1>
+{
+ public:
+ LIR_HEADER(RecompileCheck)
+
+ explicit LRecompileCheck(const LDefinition& scratch) {
+ setTemp(0, scratch);
+ }
+
+ const LDefinition* scratch() {
+ return getTemp(0);
+ }
+ MRecompileCheck* mir() {
+ return mir_->toRecompileCheck();
+ }
+};
+
+class LLexicalCheck : public LInstructionHelper<0, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(LexicalCheck)
+
+ explicit LLexicalCheck(const LBoxAllocation& input) {
+ setBoxOperand(Input, input);
+ }
+
+ MLexicalCheck* mir() {
+ return mir_->toLexicalCheck();
+ }
+
+ static const size_t Input = 0;
+};
+
+class LThrowRuntimeLexicalError : public LCallInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(ThrowRuntimeLexicalError)
+
+ MThrowRuntimeLexicalError* mir() {
+ return mir_->toThrowRuntimeLexicalError();
+ }
+};
+
+class LGlobalNameConflictsCheck : public LInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(GlobalNameConflictsCheck)
+
+ MGlobalNameConflictsCheck* mir() {
+ return mir_->toGlobalNameConflictsCheck();
+ }
+};
+
+class LMemoryBarrier : public LInstructionHelper<0, 0, 0>
+{
+ private:
+ const MemoryBarrierBits type_;
+
+ public:
+ LIR_HEADER(MemoryBarrier)
+
+ // The parameter 'type' is a bitwise 'or' of the barrier types needed,
+ // see AtomicOp.h.
+ explicit LMemoryBarrier(MemoryBarrierBits type) : type_(type)
+ {
+ MOZ_ASSERT((type_ & ~MembarAllbits) == MembarNobits);
+ }
+
+ MemoryBarrierBits type() const {
+ return type_;
+ }
+};
+
+class LDebugger : public LCallInstructionHelper<0, 0, 2>
+{
+ public:
+ LIR_HEADER(Debugger)
+
+ LDebugger(const LDefinition& temp1, const LDefinition& temp2) {
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+};
+
+class LNewTarget : public LInstructionHelper<BOX_PIECES, 0, 0>
+{
+ public:
+ LIR_HEADER(NewTarget)
+};
+
+class LArrowNewTarget : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ public:
+ explicit LArrowNewTarget(const LAllocation& callee) {
+ setOperand(0, callee);
+ }
+
+ LIR_HEADER(ArrowNewTarget)
+
+ const LAllocation* callee() {
+ return getOperand(0);
+ }
+};
+
+// Math.random().
+#ifdef JS_PUNBOX64
+# define LRANDOM_NUM_TEMPS 3
+#else
+# define LRANDOM_NUM_TEMPS 5
+#endif
+
+class LRandom : public LInstructionHelper<1, 0, LRANDOM_NUM_TEMPS>
+{
+ public:
+ LIR_HEADER(Random)
+ LRandom(const LDefinition &temp0, const LDefinition &temp1,
+ const LDefinition &temp2
+#ifndef JS_PUNBOX64
+ , const LDefinition &temp3, const LDefinition &temp4
+#endif
+ )
+ {
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+#ifndef JS_PUNBOX64
+ setTemp(3, temp3);
+ setTemp(4, temp4);
+#endif
+ }
+ const LDefinition* temp0() {
+ return getTemp(0);
+ }
+ const LDefinition* temp1() {
+ return getTemp(1);
+ }
+ const LDefinition *temp2() {
+ return getTemp(2);
+ }
+#ifndef JS_PUNBOX64
+ const LDefinition *temp3() {
+ return getTemp(3);
+ }
+ const LDefinition *temp4() {
+ return getTemp(4);
+ }
+#endif
+
+ MRandom* mir() const {
+ return mir_->toRandom();
+ }
+};
+
+class LCheckReturn : public LCallInstructionHelper<BOX_PIECES, 2 * BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CheckReturn)
+
+ LCheckReturn(const LBoxAllocation& retVal, const LBoxAllocation& thisVal) {
+ setBoxOperand(ReturnValue, retVal);
+ setBoxOperand(ThisValue, thisVal);
+ }
+
+ static const size_t ReturnValue = 0;
+ static const size_t ThisValue = BOX_PIECES;
+};
+
+class LCheckIsObj : public LInstructionHelper<BOX_PIECES, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CheckIsObj)
+
+ static const size_t CheckValue = 0;
+
+ explicit LCheckIsObj(const LBoxAllocation& value) {
+ setBoxOperand(CheckValue, value);
+ }
+
+ MCheckIsObj* mir() const {
+ return mir_->toCheckIsObj();
+ }
+};
+
+class LCheckObjCoercible : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CheckObjCoercible)
+
+ static const size_t CheckValue = 0;
+
+ explicit LCheckObjCoercible(const LBoxAllocation& value) {
+ setBoxOperand(CheckValue, value);
+ }
+};
+
+class LDebugCheckSelfHosted : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(DebugCheckSelfHosted)
+
+ static const size_t CheckValue = 0;
+
+ explicit LDebugCheckSelfHosted(const LBoxAllocation& value) {
+ setBoxOperand(CheckValue, value);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_LIR_shared_h */
diff --git a/js/src/jit/shared/LOpcodes-shared.h b/js/src/jit/shared/LOpcodes-shared.h
new file mode 100644
index 000000000..bb04553a6
--- /dev/null
+++ b/js/src/jit/shared/LOpcodes-shared.h
@@ -0,0 +1,441 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_LOpcodes_shared_h
+#define jit_shared_LOpcodes_shared_h
+
+#define LIR_COMMON_OPCODE_LIST(_) \
+ _(Unbox) \
+ _(Box) \
+ _(UnboxFloatingPoint) \
+ _(OsiPoint) \
+ _(MoveGroup) \
+ _(Integer) \
+ _(Integer64) \
+ _(Pointer) \
+ _(Double) \
+ _(Float32) \
+ _(SimdBox) \
+ _(SimdUnbox) \
+ _(SimdSplatX16) \
+ _(SimdSplatX8) \
+ _(SimdSplatX4) \
+ _(Simd128Int) \
+ _(Simd128Float) \
+ _(SimdAllTrue) \
+ _(SimdAnyTrue) \
+ _(SimdReinterpretCast) \
+ _(SimdExtractElementI) \
+ _(SimdExtractElementU2D) \
+ _(SimdExtractElementB) \
+ _(SimdExtractElementF) \
+ _(SimdInsertElementI) \
+ _(SimdInsertElementF) \
+ _(SimdGeneralShuffleI) \
+ _(SimdGeneralShuffleF) \
+ _(SimdSwizzleI) \
+ _(SimdSwizzleF) \
+ _(SimdShuffle) \
+ _(SimdShuffleX4) \
+ _(SimdUnaryArithIx16) \
+ _(SimdUnaryArithIx8) \
+ _(SimdUnaryArithIx4) \
+ _(SimdUnaryArithFx4) \
+ _(SimdBinaryCompIx16) \
+ _(SimdBinaryCompIx8) \
+ _(SimdBinaryCompIx4) \
+ _(SimdBinaryCompFx4) \
+ _(SimdBinaryArithIx16) \
+ _(SimdBinaryArithIx8) \
+ _(SimdBinaryArithIx4) \
+ _(SimdBinaryArithFx4) \
+ _(SimdBinarySaturating) \
+ _(SimdBinaryBitwise) \
+ _(SimdShift) \
+ _(SimdSelect) \
+ _(Value) \
+ _(CloneLiteral) \
+ _(Parameter) \
+ _(Callee) \
+ _(IsConstructing) \
+ _(TableSwitch) \
+ _(TableSwitchV) \
+ _(Goto) \
+ _(NewArray) \
+ _(NewArrayCopyOnWrite) \
+ _(NewArrayDynamicLength) \
+ _(NewTypedArray) \
+ _(NewTypedArrayDynamicLength) \
+ _(ArraySplice) \
+ _(NewObject) \
+ _(NewTypedObject) \
+ _(NewNamedLambdaObject) \
+ _(NewCallObject) \
+ _(NewSingletonCallObject) \
+ _(NewStringObject) \
+ _(NewDerivedTypedObject) \
+ _(InitElem) \
+ _(InitElemGetterSetter) \
+ _(MutateProto) \
+ _(InitProp) \
+ _(InitPropGetterSetter) \
+ _(CheckOverRecursed) \
+ _(DefVar) \
+ _(DefLexical) \
+ _(DefFun) \
+ _(CallKnown) \
+ _(CallGeneric) \
+ _(CallNative) \
+ _(ApplyArgsGeneric) \
+ _(ApplyArrayGeneric) \
+ _(Bail) \
+ _(Unreachable) \
+ _(EncodeSnapshot) \
+ _(GetDynamicName) \
+ _(CallDirectEval) \
+ _(StackArgT) \
+ _(StackArgV) \
+ _(CreateThis) \
+ _(CreateThisWithProto) \
+ _(CreateThisWithTemplate) \
+ _(CreateArgumentsObject) \
+ _(GetArgumentsObjectArg) \
+ _(SetArgumentsObjectArg) \
+ _(ReturnFromCtor) \
+ _(ComputeThis) \
+ _(BitNotI) \
+ _(BitNotV) \
+ _(BitOpI) \
+ _(BitOpI64) \
+ _(BitOpV) \
+ _(ShiftI) \
+ _(ShiftI64) \
+ _(SignExtend) \
+ _(UrshD) \
+ _(Return) \
+ _(Throw) \
+ _(Phi) \
+ _(TestIAndBranch) \
+ _(TestI64AndBranch) \
+ _(TestDAndBranch) \
+ _(TestFAndBranch) \
+ _(TestVAndBranch) \
+ _(TestOAndBranch) \
+ _(FunctionDispatch) \
+ _(ObjectGroupDispatch) \
+ _(Compare) \
+ _(CompareAndBranch) \
+ _(CompareI64) \
+ _(CompareI64AndBranch) \
+ _(CompareD) \
+ _(CompareDAndBranch) \
+ _(CompareF) \
+ _(CompareFAndBranch) \
+ _(CompareS) \
+ _(CompareStrictS) \
+ _(CompareB) \
+ _(CompareBAndBranch) \
+ _(CompareBitwise) \
+ _(CompareBitwiseAndBranch) \
+ _(CompareVM) \
+ _(BitAndAndBranch) \
+ _(IsNullOrLikeUndefinedV) \
+ _(IsNullOrLikeUndefinedT) \
+ _(IsNullOrLikeUndefinedAndBranchV)\
+ _(IsNullOrLikeUndefinedAndBranchT)\
+ _(MinMaxI) \
+ _(MinMaxD) \
+ _(MinMaxF) \
+ _(NegI) \
+ _(NegD) \
+ _(NegF) \
+ _(AbsI) \
+ _(AbsD) \
+ _(AbsF) \
+ _(ClzI) \
+ _(ClzI64) \
+ _(CtzI) \
+ _(CtzI64) \
+ _(PopcntI) \
+ _(PopcntI64) \
+ _(SqrtD) \
+ _(SqrtF) \
+ _(CopySignD) \
+ _(CopySignF) \
+ _(Atan2D) \
+ _(Hypot) \
+ _(PowI) \
+ _(PowD) \
+ _(PowHalfD) \
+ _(Random) \
+ _(MathFunctionD) \
+ _(MathFunctionF) \
+ _(NotI) \
+ _(NotI64) \
+ _(NotD) \
+ _(NotF) \
+ _(NotO) \
+ _(NotV) \
+ _(AddI) \
+ _(AddI64) \
+ _(SubI) \
+ _(SubI64) \
+ _(MulI) \
+ _(MulI64) \
+ _(MathD) \
+ _(MathF) \
+ _(DivI) \
+ _(DivPowTwoI) \
+ _(ModI) \
+ _(ModPowTwoI) \
+ _(ModD) \
+ _(BinaryV) \
+ _(Concat) \
+ _(CharCodeAt) \
+ _(FromCharCode) \
+ _(FromCodePoint) \
+ _(SinCos) \
+ _(StringSplit) \
+ _(Int32ToDouble) \
+ _(Float32ToDouble) \
+ _(DoubleToFloat32) \
+ _(Int32ToFloat32) \
+ _(ValueToDouble) \
+ _(ValueToInt32) \
+ _(ValueToFloat32) \
+ _(DoubleToInt32) \
+ _(Float32ToInt32) \
+ _(TruncateDToInt32) \
+ _(TruncateFToInt32) \
+ _(WrapInt64ToInt32) \
+ _(ExtendInt32ToInt64) \
+ _(BooleanToString) \
+ _(IntToString) \
+ _(DoubleToString) \
+ _(ValueToString) \
+ _(ValueToObjectOrNull) \
+ _(Int32x4ToFloat32x4) \
+ _(Float32x4ToInt32x4) \
+ _(Float32x4ToUint32x4) \
+ _(Start) \
+ _(NaNToZero) \
+ _(OsrEntry) \
+ _(OsrValue) \
+ _(OsrEnvironmentChain) \
+ _(OsrReturnValue) \
+ _(OsrArgumentsObject) \
+ _(RegExp) \
+ _(RegExpMatcher) \
+ _(RegExpSearcher) \
+ _(RegExpTester) \
+ _(RegExpPrototypeOptimizable) \
+ _(RegExpInstanceOptimizable) \
+ _(GetFirstDollarIndex) \
+ _(StringReplace) \
+ _(Substr) \
+ _(BinarySharedStub) \
+ _(UnarySharedStub) \
+ _(NullarySharedStub) \
+ _(Lambda) \
+ _(LambdaArrow) \
+ _(LambdaForSingleton) \
+ _(KeepAliveObject) \
+ _(Slots) \
+ _(Elements) \
+ _(ConvertElementsToDoubles) \
+ _(MaybeToDoubleElement) \
+ _(MaybeCopyElementsForWrite) \
+ _(LoadSlotV) \
+ _(LoadSlotT) \
+ _(StoreSlotV) \
+ _(StoreSlotT) \
+ _(GuardShape) \
+ _(GuardReceiverPolymorphic) \
+ _(GuardObjectGroup) \
+ _(GuardObjectIdentity) \
+ _(GuardClass) \
+ _(GuardUnboxedExpando) \
+ _(LoadUnboxedExpando) \
+ _(TypeBarrierV) \
+ _(TypeBarrierO) \
+ _(MonitorTypes) \
+ _(PostWriteBarrierO) \
+ _(PostWriteBarrierV) \
+ _(PostWriteElementBarrierO) \
+ _(PostWriteElementBarrierV) \
+ _(InitializedLength) \
+ _(SetInitializedLength) \
+ _(UnboxedArrayLength) \
+ _(UnboxedArrayInitializedLength) \
+ _(IncrementUnboxedArrayInitializedLength) \
+ _(SetUnboxedArrayInitializedLength) \
+ _(BoundsCheck) \
+ _(BoundsCheckRange) \
+ _(BoundsCheckLower) \
+ _(LoadElementV) \
+ _(LoadElementT) \
+ _(LoadElementHole) \
+ _(LoadUnboxedScalar) \
+ _(LoadUnboxedPointerV) \
+ _(LoadUnboxedPointerT) \
+ _(UnboxObjectOrNull) \
+ _(StoreElementV) \
+ _(StoreElementT) \
+ _(StoreUnboxedScalar) \
+ _(StoreUnboxedPointer) \
+ _(ConvertUnboxedObjectToNative) \
+ _(ArrayPopShiftV) \
+ _(ArrayPopShiftT) \
+ _(ArrayPushV) \
+ _(ArrayPushT) \
+ _(ArraySlice) \
+ _(ArrayJoin) \
+ _(StoreElementHoleV) \
+ _(StoreElementHoleT) \
+ _(FallibleStoreElementV) \
+ _(FallibleStoreElementT) \
+ _(LoadTypedArrayElementHole) \
+ _(LoadTypedArrayElementStatic) \
+ _(StoreTypedArrayElementHole) \
+ _(StoreTypedArrayElementStatic) \
+ _(AtomicIsLockFree) \
+ _(GuardSharedTypedArray) \
+ _(CompareExchangeTypedArrayElement) \
+ _(AtomicExchangeTypedArrayElement) \
+ _(AtomicTypedArrayElementBinop) \
+ _(AtomicTypedArrayElementBinopForEffect) \
+ _(EffectiveAddress) \
+ _(ClampIToUint8) \
+ _(ClampDToUint8) \
+ _(ClampVToUint8) \
+ _(LoadFixedSlotV) \
+ _(LoadFixedSlotT) \
+ _(LoadFixedSlotAndUnbox) \
+ _(StoreFixedSlotV) \
+ _(StoreFixedSlotT) \
+ _(FunctionEnvironment) \
+ _(GetPropertyCacheV) \
+ _(GetPropertyCacheT) \
+ _(GetPropertyPolymorphicV) \
+ _(GetPropertyPolymorphicT) \
+ _(BindNameCache) \
+ _(CallBindVar) \
+ _(CallGetProperty) \
+ _(GetNameCache) \
+ _(CallGetIntrinsicValue) \
+ _(CallGetElement) \
+ _(CallSetElement) \
+ _(CallInitElementArray) \
+ _(CallSetProperty) \
+ _(CallDeleteProperty) \
+ _(CallDeleteElement) \
+ _(SetPropertyCache) \
+ _(SetPropertyPolymorphicV) \
+ _(SetPropertyPolymorphicT) \
+ _(CallIteratorStartV) \
+ _(CallIteratorStartO) \
+ _(IteratorStartO) \
+ _(IteratorMore) \
+ _(IsNoIterAndBranch) \
+ _(IteratorEnd) \
+ _(ArrayLength) \
+ _(SetArrayLength) \
+ _(GetNextEntryForIterator) \
+ _(TypedArrayLength) \
+ _(TypedArrayElements) \
+ _(SetDisjointTypedElements) \
+ _(TypedObjectDescr) \
+ _(TypedObjectElements) \
+ _(SetTypedObjectOffset) \
+ _(StringLength) \
+ _(ArgumentsLength) \
+ _(GetFrameArgument) \
+ _(SetFrameArgumentT) \
+ _(SetFrameArgumentC) \
+ _(SetFrameArgumentV) \
+ _(RunOncePrologue) \
+ _(Rest) \
+ _(TypeOfV) \
+ _(ToAsync) \
+ _(ToIdV) \
+ _(Floor) \
+ _(FloorF) \
+ _(Ceil) \
+ _(CeilF) \
+ _(Round) \
+ _(RoundF) \
+ _(In) \
+ _(InArray) \
+ _(InstanceOfO) \
+ _(InstanceOfV) \
+ _(CallInstanceOf) \
+ _(InterruptCheck) \
+ _(Rotate) \
+ _(RotateI64) \
+ _(GetDOMProperty) \
+ _(GetDOMMemberV) \
+ _(GetDOMMemberT) \
+ _(SetDOMProperty) \
+ _(CallDOMNative) \
+ _(IsCallable) \
+ _(IsConstructor) \
+ _(IsObject) \
+ _(IsObjectAndBranch) \
+ _(HasClass) \
+ _(RecompileCheck) \
+ _(MemoryBarrier) \
+ _(AssertRangeI) \
+ _(AssertRangeD) \
+ _(AssertRangeF) \
+ _(AssertRangeV) \
+ _(AssertResultV) \
+ _(AssertResultT) \
+ _(LexicalCheck) \
+ _(ThrowRuntimeLexicalError) \
+ _(GlobalNameConflictsCheck) \
+ _(Debugger) \
+ _(NewTarget) \
+ _(ArrowNewTarget) \
+ _(CheckReturn) \
+ _(CheckIsObj) \
+ _(CheckObjCoercible) \
+ _(DebugCheckSelfHosted) \
+ _(AsmJSLoadHeap) \
+ _(AsmJSStoreHeap) \
+ _(AsmJSCompareExchangeHeap) \
+ _(AsmJSAtomicExchangeHeap) \
+ _(AsmJSAtomicBinopHeap) \
+ _(AsmJSAtomicBinopHeapForEffect)\
+ _(WasmTruncateToInt32) \
+ _(WasmTrap) \
+ _(WasmReinterpret) \
+ _(WasmReinterpretToI64) \
+ _(WasmReinterpretFromI64) \
+ _(WasmSelect) \
+ _(WasmSelectI64) \
+ _(WasmBoundsCheck) \
+ _(WasmAddOffset) \
+ _(WasmLoad) \
+ _(WasmLoadI64) \
+ _(WasmStore) \
+ _(WasmStoreI64) \
+ _(WasmLoadGlobalVar) \
+ _(WasmLoadGlobalVarI64) \
+ _(WasmStoreGlobalVar) \
+ _(WasmStoreGlobalVarI64) \
+ _(WasmParameter) \
+ _(WasmParameterI64) \
+ _(WasmReturn) \
+ _(WasmReturnI64) \
+ _(WasmReturnVoid) \
+ _(WasmStackArg) \
+ _(WasmStackArgI64) \
+ _(WasmCall) \
+ _(WasmCallI64) \
+ _(WasmUint32ToDouble) \
+ _(WasmUint32ToFloat32)
+
+#endif /* jit_shared_LOpcodes_shared_h */
diff --git a/js/src/jit/shared/Lowering-shared-inl.h b/js/src/jit/shared/Lowering-shared-inl.h
new file mode 100644
index 000000000..61f1d3302
--- /dev/null
+++ b/js/src/jit/shared/Lowering-shared-inl.h
@@ -0,0 +1,858 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_Lowering_shared_inl_h
+#define jit_shared_Lowering_shared_inl_h
+
+#include "jit/shared/Lowering-shared.h"
+
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+
+namespace js {
+namespace jit {
+
+void
+LIRGeneratorShared::emitAtUses(MInstruction* mir)
+{
+ MOZ_ASSERT(mir->canEmitAtUses());
+ mir->setEmittedAtUses();
+ mir->setVirtualRegister(0);
+}
+
+LUse
+LIRGeneratorShared::use(MDefinition* mir, LUse policy)
+{
+ // It is illegal to call use() on an instruction with two defs.
+#if BOX_PIECES > 1
+ MOZ_ASSERT(mir->type() != MIRType::Value);
+#endif
+#if INT64_PIECES > 1
+ MOZ_ASSERT(mir->type() != MIRType::Int64);
+#endif
+ ensureDefined(mir);
+ policy.setVirtualRegister(mir->virtualRegister());
+ return policy;
+}
+
+template <size_t X> void
+LIRGeneratorShared::define(details::LInstructionFixedDefsTempsHelper<1, X>* lir, MDefinition* mir,
+ LDefinition::Policy policy)
+{
+ LDefinition::Type type = LDefinition::TypeFrom(mir->type());
+ define(lir, mir, LDefinition(type, policy));
+}
+
+template <size_t X> void
+LIRGeneratorShared::define(details::LInstructionFixedDefsTempsHelper<1, X>* lir, MDefinition* mir,
+ const LDefinition& def)
+{
+ // Call instructions should use defineReturn.
+ MOZ_ASSERT(!lir->isCall());
+
+ uint32_t vreg = getVirtualRegister();
+
+ // Assign the definition and a virtual register. Then, propagate this
+ // virtual register to the MIR, so we can map MIR to LIR during lowering.
+ lir->setDef(0, def);
+ lir->getDef(0)->setVirtualRegister(vreg);
+ lir->setMir(mir);
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+template <size_t X, size_t Y> void
+LIRGeneratorShared::defineFixed(LInstructionHelper<1, X, Y>* lir, MDefinition* mir, const LAllocation& output)
+{
+ LDefinition::Type type = LDefinition::TypeFrom(mir->type());
+
+ LDefinition def(type, LDefinition::FIXED);
+ def.setOutput(output);
+
+ define(lir, mir, def);
+}
+
+template <size_t Ops, size_t Temps> void
+LIRGeneratorShared::defineInt64Fixed(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ const LInt64Allocation& output)
+{
+ uint32_t vreg = getVirtualRegister();
+
+#if JS_BITS_PER_WORD == 64
+ LDefinition def(LDefinition::GENERAL, LDefinition::FIXED);
+ def.setOutput(output.value());
+ lir->setDef(0, def);
+ lir->getDef(0)->setVirtualRegister(vreg);
+#else
+ LDefinition def0(LDefinition::GENERAL, LDefinition::FIXED);
+ def0.setOutput(output.low());
+ lir->setDef(0, def0);
+ lir->getDef(0)->setVirtualRegister(vreg);
+
+ getVirtualRegister();
+ LDefinition def1(LDefinition::GENERAL, LDefinition::FIXED);
+ def1.setOutput(output.high());
+ lir->setDef(1, def1);
+ lir->getDef(1)->setVirtualRegister(vreg + 1);
+#endif
+
+ lir->setMir(mir);
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+template <size_t Ops, size_t Temps> void
+LIRGeneratorShared::defineReuseInput(LInstructionHelper<1, Ops, Temps>* lir, MDefinition* mir, uint32_t operand)
+{
+ // Note: Any other operand that is not the same as this operand should be
+ // marked as not being "atStart". The regalloc cannot handle those and can
+ // overwrite the inputs!
+
+ // The input should be used at the start of the instruction, to avoid moves.
+ MOZ_ASSERT(lir->getOperand(operand)->toUse()->usedAtStart());
+
+ LDefinition::Type type = LDefinition::TypeFrom(mir->type());
+
+ LDefinition def(type, LDefinition::MUST_REUSE_INPUT);
+ def.setReusedInput(operand);
+
+ define(lir, mir, def);
+}
+
+template <size_t Ops, size_t Temps> void
+LIRGeneratorShared::defineInt64ReuseInput(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir,
+ MDefinition* mir, uint32_t operand)
+{
+ // Note: Any other operand that is not the same as this operand should be
+ // marked as not being "atStart". The regalloc cannot handle those and can
+ // overwrite the inputs!
+
+ // The input should be used at the start of the instruction, to avoid moves.
+ MOZ_ASSERT(lir->getOperand(operand)->toUse()->usedAtStart());
+#if JS_BITS_PER_WORD == 32
+ MOZ_ASSERT(lir->getOperand(operand + 1)->toUse()->usedAtStart());
+#endif
+ MOZ_ASSERT(!lir->isCall());
+
+ uint32_t vreg = getVirtualRegister();
+
+ LDefinition def1(LDefinition::GENERAL, LDefinition::MUST_REUSE_INPUT);
+ def1.setReusedInput(operand);
+ lir->setDef(0, def1);
+ lir->getDef(0)->setVirtualRegister(vreg);
+
+#if JS_BITS_PER_WORD == 32
+ getVirtualRegister();
+ LDefinition def2(LDefinition::GENERAL, LDefinition::MUST_REUSE_INPUT);
+ def2.setReusedInput(operand + 1);
+ lir->setDef(1, def2);
+ lir->getDef(1)->setVirtualRegister(vreg + 1);
+#endif
+
+ lir->setMir(mir);
+ mir->setVirtualRegister(vreg);
+ add(lir);
+
+}
+
+template <size_t Ops, size_t Temps> void
+LIRGeneratorShared::defineBox(LInstructionHelper<BOX_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ LDefinition::Policy policy)
+{
+ // Call instructions should use defineReturn.
+ MOZ_ASSERT(!lir->isCall());
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ uint32_t vreg = getVirtualRegister();
+
+#if defined(JS_NUNBOX32)
+ lir->setDef(0, LDefinition(vreg + VREG_TYPE_OFFSET, LDefinition::TYPE, policy));
+ lir->setDef(1, LDefinition(vreg + VREG_DATA_OFFSET, LDefinition::PAYLOAD, policy));
+ getVirtualRegister();
+#elif defined(JS_PUNBOX64)
+ lir->setDef(0, LDefinition(vreg, LDefinition::BOX, policy));
+#endif
+ lir->setMir(mir);
+
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+template <size_t Ops, size_t Temps> void
+LIRGeneratorShared::defineInt64(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ LDefinition::Policy policy)
+{
+ // Call instructions should use defineReturn.
+ MOZ_ASSERT(!lir->isCall());
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+
+ uint32_t vreg = getVirtualRegister();
+
+#if JS_BITS_PER_WORD == 32
+ lir->setDef(0, LDefinition(vreg + INT64LOW_INDEX, LDefinition::GENERAL, policy));
+ lir->setDef(1, LDefinition(vreg + INT64HIGH_INDEX, LDefinition::GENERAL, policy));
+ getVirtualRegister();
+#else
+ lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL, policy));
+#endif
+ lir->setMir(mir);
+
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+void
+LIRGeneratorShared::defineSharedStubReturn(LInstruction* lir, MDefinition* mir)
+{
+ lir->setMir(mir);
+
+ MOZ_ASSERT(lir->isBinarySharedStub() || lir->isUnarySharedStub() || lir->isNullarySharedStub());
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ uint32_t vreg = getVirtualRegister();
+
+#if defined(JS_NUNBOX32)
+ lir->setDef(TYPE_INDEX, LDefinition(vreg + VREG_TYPE_OFFSET, LDefinition::TYPE,
+ LGeneralReg(JSReturnReg_Type)));
+ lir->setDef(PAYLOAD_INDEX, LDefinition(vreg + VREG_DATA_OFFSET, LDefinition::PAYLOAD,
+ LGeneralReg(JSReturnReg_Data)));
+ getVirtualRegister();
+#elif defined(JS_PUNBOX64)
+ lir->setDef(0, LDefinition(vreg, LDefinition::BOX, LGeneralReg(JSReturnReg)));
+#endif
+
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+void
+LIRGeneratorShared::defineReturn(LInstruction* lir, MDefinition* mir)
+{
+ lir->setMir(mir);
+
+ MOZ_ASSERT(lir->isCall());
+
+ uint32_t vreg = getVirtualRegister();
+
+ switch (mir->type()) {
+ case MIRType::Value:
+#if defined(JS_NUNBOX32)
+ lir->setDef(TYPE_INDEX, LDefinition(vreg + VREG_TYPE_OFFSET, LDefinition::TYPE,
+ LGeneralReg(JSReturnReg_Type)));
+ lir->setDef(PAYLOAD_INDEX, LDefinition(vreg + VREG_DATA_OFFSET, LDefinition::PAYLOAD,
+ LGeneralReg(JSReturnReg_Data)));
+ getVirtualRegister();
+#elif defined(JS_PUNBOX64)
+ lir->setDef(0, LDefinition(vreg, LDefinition::BOX, LGeneralReg(JSReturnReg)));
+#endif
+ break;
+ case MIRType::Int64:
+#if defined(JS_NUNBOX32)
+ lir->setDef(INT64LOW_INDEX, LDefinition(vreg + INT64LOW_INDEX, LDefinition::GENERAL,
+ LGeneralReg(ReturnReg64.low)));
+ lir->setDef(INT64HIGH_INDEX, LDefinition(vreg + INT64HIGH_INDEX, LDefinition::GENERAL,
+ LGeneralReg(ReturnReg64.high)));
+ getVirtualRegister();
+#elif defined(JS_PUNBOX64)
+ lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL, LGeneralReg(ReturnReg)));
+#endif
+ break;
+ case MIRType::Float32:
+ lir->setDef(0, LDefinition(vreg, LDefinition::FLOAT32, LFloatReg(ReturnFloat32Reg)));
+ break;
+ case MIRType::Double:
+ lir->setDef(0, LDefinition(vreg, LDefinition::DOUBLE, LFloatReg(ReturnDoubleReg)));
+ break;
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ lir->setDef(0, LDefinition(vreg, LDefinition::SIMD128INT, LFloatReg(ReturnSimd128Reg)));
+ break;
+ case MIRType::Float32x4:
+ lir->setDef(0, LDefinition(vreg, LDefinition::SIMD128FLOAT, LFloatReg(ReturnSimd128Reg)));
+ break;
+ default:
+ LDefinition::Type type = LDefinition::TypeFrom(mir->type());
+ MOZ_ASSERT(type != LDefinition::DOUBLE && type != LDefinition::FLOAT32);
+ lir->setDef(0, LDefinition(vreg, type, LGeneralReg(ReturnReg)));
+ break;
+ }
+
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+template <size_t Ops, size_t Temps> void
+LIRGeneratorShared::defineSinCos(LInstructionHelper<2, Ops, Temps> *lir, MDefinition *mir,
+ LDefinition::Policy policy)
+{
+ MOZ_ASSERT(lir->isCall());
+
+ uint32_t vreg = getVirtualRegister();
+ lir->setDef(0, LDefinition(vreg, LDefinition::DOUBLE, LFloatReg(ReturnDoubleReg)));
+#if defined(JS_CODEGEN_ARM)
+ lir->setDef(1, LDefinition(vreg + VREG_INCREMENT, LDefinition::DOUBLE,
+ LFloatReg(FloatRegister(FloatRegisters::d1, FloatRegister::Double))));
+#elif defined(JS_CODEGEN_ARM64)
+ lir->setDef(1, LDefinition(vreg + VREG_INCREMENT, LDefinition::DOUBLE,
+ LFloatReg(FloatRegister(FloatRegisters::d1, FloatRegisters::Double))));
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ lir->setDef(1, LDefinition(vreg + VREG_INCREMENT, LDefinition::DOUBLE, LFloatReg(f2)));
+#elif defined(JS_CODEGEN_NONE)
+ MOZ_CRASH();
+#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ lir->setDef(1, LDefinition(vreg + VREG_INCREMENT, LDefinition::DOUBLE, LFloatReg(xmm1)));
+#else
+#error "Unsupported architecture for SinCos"
+#endif
+
+ getVirtualRegister();
+
+ lir->setMir(mir);
+ mir->setVirtualRegister(vreg);
+ add(lir);
+
+ return;
+}
+
+// In LIR, we treat booleans and integers as the same low-level type (INTEGER).
+// When snapshotting, we recover the actual JS type from MIR. This function
+// checks that when making redefinitions, we don't accidentally coerce two
+// incompatible types.
+static inline bool
+IsCompatibleLIRCoercion(MIRType to, MIRType from)
+{
+ if (to == from)
+ return true;
+ if ((to == MIRType::Int32 || to == MIRType::Boolean) &&
+ (from == MIRType::Int32 || from == MIRType::Boolean)) {
+ return true;
+ }
+ // SIMD types can be coerced with from*Bits operators.
+ if (IsSimdType(to) && IsSimdType(from))
+ return true;
+ return false;
+}
+
+
+// We can redefine the sin(x) and cos(x) function to return the sincos result.
+void
+LIRGeneratorShared::redefine(MDefinition* def, MDefinition* as, MMathFunction::Function func)
+{
+ MOZ_ASSERT(def->isMathFunction());
+ MOZ_ASSERT(def->type() == MIRType::Double && as->type() == MIRType::SinCosDouble);
+ MOZ_ASSERT(MMathFunction::Sin == func || MMathFunction::Cos == func);
+
+ ensureDefined(as);
+ MMathFunction *math = def->toMathFunction();
+
+ MOZ_ASSERT(math->function() == MMathFunction::Cos ||
+ math->function() == MMathFunction::Sin);
+
+ // The sincos returns two values:
+ // - VREG: it returns the sin's value of the sincos;
+ // - VREG + VREG_INCREMENT: it returns the cos' value of the sincos.
+ if (math->function() == MMathFunction::Sin)
+ def->setVirtualRegister(as->virtualRegister());
+ else
+ def->setVirtualRegister(as->virtualRegister() + VREG_INCREMENT);
+}
+
+void
+LIRGeneratorShared::redefine(MDefinition* def, MDefinition* as)
+{
+ MOZ_ASSERT(IsCompatibleLIRCoercion(def->type(), as->type()));
+
+ // Try to emit MIR marked as emitted-at-uses at, well, uses. For
+ // snapshotting reasons we delay the MIRTypes match, or when we are
+ // coercing between bool and int32 constants.
+ if (as->isEmittedAtUses() &&
+ (def->type() == as->type() ||
+ (as->isConstant() &&
+ (def->type() == MIRType::Int32 || def->type() == MIRType::Boolean) &&
+ (as->type() == MIRType::Int32 || as->type() == MIRType::Boolean))))
+ {
+ MInstruction* replacement;
+ if (def->type() != as->type()) {
+ if (as->type() == MIRType::Int32)
+ replacement = MConstant::New(alloc(), BooleanValue(as->toConstant()->toInt32()));
+ else
+ replacement = MConstant::New(alloc(), Int32Value(as->toConstant()->toBoolean()));
+ def->block()->insertBefore(def->toInstruction(), replacement);
+ emitAtUses(replacement->toInstruction());
+ } else {
+ replacement = as->toInstruction();
+ }
+ def->replaceAllUsesWith(replacement);
+ } else {
+ ensureDefined(as);
+ def->setVirtualRegister(as->virtualRegister());
+
+#ifdef DEBUG
+ if (JitOptions.runExtraChecks &&
+ def->resultTypeSet() && as->resultTypeSet() &&
+ !def->resultTypeSet()->equals(as->resultTypeSet()))
+ {
+ switch (def->type()) {
+ case MIRType::Object:
+ case MIRType::ObjectOrNull:
+ case MIRType::String:
+ case MIRType::Symbol: {
+ LAssertResultT* check = new(alloc()) LAssertResultT(useRegister(def));
+ add(check, def->toInstruction());
+ break;
+ }
+ case MIRType::Value: {
+ LAssertResultV* check = new(alloc()) LAssertResultV(useBox(def));
+ add(check, def->toInstruction());
+ break;
+ }
+ default:
+ break;
+ }
+ }
+#endif
+ }
+}
+
+void
+LIRGeneratorShared::ensureDefined(MDefinition* mir)
+{
+ if (mir->isEmittedAtUses()) {
+ mir->toInstruction()->accept(this);
+ MOZ_ASSERT(mir->isLowered());
+ }
+}
+
+LUse
+LIRGeneratorShared::useRegister(MDefinition* mir)
+{
+ return use(mir, LUse(LUse::REGISTER));
+}
+
+LUse
+LIRGeneratorShared::useRegisterAtStart(MDefinition* mir)
+{
+ return use(mir, LUse(LUse::REGISTER, true));
+}
+
+LUse
+LIRGeneratorShared::use(MDefinition* mir)
+{
+ return use(mir, LUse(LUse::ANY));
+}
+
+LUse
+LIRGeneratorShared::useAtStart(MDefinition* mir)
+{
+ return use(mir, LUse(LUse::ANY, true));
+}
+
+LAllocation
+LIRGeneratorShared::useOrConstant(MDefinition* mir)
+{
+ if (mir->isConstant())
+ return LAllocation(mir->toConstant());
+ return use(mir);
+}
+
+LAllocation
+LIRGeneratorShared::useOrConstantAtStart(MDefinition* mir)
+{
+ if (mir->isConstant())
+ return LAllocation(mir->toConstant());
+ return useAtStart(mir);
+}
+
+LAllocation
+LIRGeneratorShared::useRegisterOrConstant(MDefinition* mir)
+{
+ if (mir->isConstant())
+ return LAllocation(mir->toConstant());
+ return useRegister(mir);
+}
+
+LAllocation
+LIRGeneratorShared::useRegisterOrConstantAtStart(MDefinition* mir)
+{
+ if (mir->isConstant())
+ return LAllocation(mir->toConstant());
+ return useRegisterAtStart(mir);
+}
+
+LAllocation
+LIRGeneratorShared::useRegisterOrZeroAtStart(MDefinition* mir)
+{
+ if (mir->isConstant() && mir->toConstant()->isInt32(0))
+ return LAllocation();
+ return useRegisterAtStart(mir);
+}
+
+LAllocation
+LIRGeneratorShared::useRegisterOrNonDoubleConstant(MDefinition* mir)
+{
+ if (mir->isConstant() && mir->type() != MIRType::Double && mir->type() != MIRType::Float32)
+ return LAllocation(mir->toConstant());
+ return useRegister(mir);
+}
+
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+LAllocation
+LIRGeneratorShared::useAnyOrConstant(MDefinition* mir)
+{
+ return useRegisterOrConstant(mir);
+}
+LAllocation
+LIRGeneratorShared::useStorable(MDefinition* mir)
+{
+ return useRegister(mir);
+}
+LAllocation
+LIRGeneratorShared::useStorableAtStart(MDefinition* mir)
+{
+ return useRegisterAtStart(mir);
+}
+
+LAllocation
+LIRGeneratorShared::useAny(MDefinition* mir)
+{
+ return useRegister(mir);
+}
+#else
+LAllocation
+LIRGeneratorShared::useAnyOrConstant(MDefinition* mir)
+{
+ return useOrConstant(mir);
+}
+
+LAllocation
+LIRGeneratorShared::useAny(MDefinition* mir)
+{
+ return use(mir);
+}
+LAllocation
+LIRGeneratorShared::useStorable(MDefinition* mir)
+{
+ return useRegisterOrConstant(mir);
+}
+LAllocation
+LIRGeneratorShared::useStorableAtStart(MDefinition* mir)
+{
+ return useRegisterOrConstantAtStart(mir);
+}
+
+#endif
+
+LAllocation
+LIRGeneratorShared::useKeepalive(MDefinition* mir)
+{
+ return use(mir, LUse(LUse::KEEPALIVE));
+}
+
+LAllocation
+LIRGeneratorShared::useKeepaliveOrConstant(MDefinition* mir)
+{
+ if (mir->isConstant())
+ return LAllocation(mir->toConstant());
+ return useKeepalive(mir);
+}
+
+LUse
+LIRGeneratorShared::useFixed(MDefinition* mir, Register reg)
+{
+ return use(mir, LUse(reg));
+}
+
+LUse
+LIRGeneratorShared::useFixedAtStart(MDefinition* mir, Register reg)
+{
+ return use(mir, LUse(reg, true));
+}
+
+LUse
+LIRGeneratorShared::useFixed(MDefinition* mir, FloatRegister reg)
+{
+ return use(mir, LUse(reg));
+}
+
+LUse
+LIRGeneratorShared::useFixed(MDefinition* mir, AnyRegister reg)
+{
+ return reg.isFloat() ? use(mir, LUse(reg.fpu())) : use(mir, LUse(reg.gpr()));
+}
+
+LUse
+LIRGeneratorShared::useFixedAtStart(MDefinition* mir, AnyRegister reg)
+{
+ return reg.isFloat() ? use(mir, LUse(reg.fpu(), true)) : use(mir, LUse(reg.gpr(), true));
+}
+
+LDefinition
+LIRGeneratorShared::temp(LDefinition::Type type, LDefinition::Policy policy)
+{
+ return LDefinition(getVirtualRegister(), type, policy);
+}
+
+LInt64Definition
+LIRGeneratorShared::tempInt64(LDefinition::Policy policy)
+{
+#if JS_BITS_PER_WORD == 32
+ LDefinition high = temp(LDefinition::GENERAL, policy);
+ LDefinition low = temp(LDefinition::GENERAL, policy);
+ return LInt64Definition(high, low);
+#else
+ return LInt64Definition(temp(LDefinition::GENERAL, policy));
+#endif
+}
+
+LDefinition
+LIRGeneratorShared::tempFixed(Register reg)
+{
+ LDefinition t = temp(LDefinition::GENERAL);
+ t.setOutput(LGeneralReg(reg));
+ return t;
+}
+
+LDefinition
+LIRGeneratorShared::tempFloat32()
+{
+ return temp(LDefinition::FLOAT32);
+}
+
+LDefinition
+LIRGeneratorShared::tempDouble()
+{
+ return temp(LDefinition::DOUBLE);
+}
+
+LDefinition
+LIRGeneratorShared::tempCopy(MDefinition* input, uint32_t reusedInput)
+{
+ MOZ_ASSERT(input->virtualRegister());
+ LDefinition t = temp(LDefinition::TypeFrom(input->type()), LDefinition::MUST_REUSE_INPUT);
+ t.setReusedInput(reusedInput);
+ return t;
+}
+
+template <typename T> void
+LIRGeneratorShared::annotate(T* ins)
+{
+ ins->setId(lirGraph_.getInstructionId());
+}
+
+template <typename T> void
+LIRGeneratorShared::add(T* ins, MInstruction* mir)
+{
+ MOZ_ASSERT(!ins->isPhi());
+ current->add(ins);
+ if (mir) {
+ MOZ_ASSERT(current == mir->block()->lir());
+ ins->setMir(mir);
+ }
+ annotate(ins);
+}
+
+#ifdef JS_NUNBOX32
+// Returns the virtual register of a js::Value-defining instruction. This is
+// abstracted because MBox is a special value-returning instruction that
+// redefines its input payload if its input is not constant. Therefore, it is
+// illegal to request a box's payload by adding VREG_DATA_OFFSET to its raw id.
+static inline uint32_t
+VirtualRegisterOfPayload(MDefinition* mir)
+{
+ if (mir->isBox()) {
+ MDefinition* inner = mir->toBox()->getOperand(0);
+ if (!inner->isConstant() && inner->type() != MIRType::Double && inner->type() != MIRType::Float32)
+ return inner->virtualRegister();
+ }
+ if (mir->isTypeBarrier())
+ return VirtualRegisterOfPayload(mir->getOperand(0));
+ return mir->virtualRegister() + VREG_DATA_OFFSET;
+}
+
+// Note: always call ensureDefined before calling useType/usePayload,
+// so that emitted-at-use operands are handled correctly.
+LUse
+LIRGeneratorShared::useType(MDefinition* mir, LUse::Policy policy)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ return LUse(mir->virtualRegister() + VREG_TYPE_OFFSET, policy);
+}
+
+LUse
+LIRGeneratorShared::usePayload(MDefinition* mir, LUse::Policy policy)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ return LUse(VirtualRegisterOfPayload(mir), policy);
+}
+
+LUse
+LIRGeneratorShared::usePayloadAtStart(MDefinition* mir, LUse::Policy policy)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ return LUse(VirtualRegisterOfPayload(mir), policy, true);
+}
+
+LUse
+LIRGeneratorShared::usePayloadInRegisterAtStart(MDefinition* mir)
+{
+ return usePayloadAtStart(mir, LUse::REGISTER);
+}
+
+void
+LIRGeneratorShared::fillBoxUses(LInstruction* lir, size_t n, MDefinition* mir)
+{
+ ensureDefined(mir);
+ lir->getOperand(n)->toUse()->setVirtualRegister(mir->virtualRegister() + VREG_TYPE_OFFSET);
+ lir->getOperand(n + 1)->toUse()->setVirtualRegister(VirtualRegisterOfPayload(mir));
+}
+#endif
+
+LUse
+LIRGeneratorShared::useRegisterForTypedLoad(MDefinition* mir, MIRType type)
+{
+ MOZ_ASSERT(type != MIRType::Value && type != MIRType::None);
+ MOZ_ASSERT(mir->type() == MIRType::Object || mir->type() == MIRType::Slots);
+
+#ifdef JS_PUNBOX64
+ // On x64, masm.loadUnboxedValue emits slightly less efficient code when
+ // the input and output use the same register and we're not loading an
+ // int32/bool/double, so we just call useRegister in this case.
+ if (type != MIRType::Int32 && type != MIRType::Boolean && type != MIRType::Double)
+ return useRegister(mir);
+#endif
+
+ return useRegisterAtStart(mir);
+}
+
+LBoxAllocation
+LIRGeneratorShared::useBox(MDefinition* mir, LUse::Policy policy, bool useAtStart)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ ensureDefined(mir);
+
+#if defined(JS_NUNBOX32)
+ return LBoxAllocation(LUse(mir->virtualRegister(), policy, useAtStart),
+ LUse(VirtualRegisterOfPayload(mir), policy, useAtStart));
+#else
+ return LBoxAllocation(LUse(mir->virtualRegister(), policy, useAtStart));
+#endif
+}
+
+LBoxAllocation
+LIRGeneratorShared::useBoxOrTypedOrConstant(MDefinition* mir, bool useConstant)
+{
+ if (mir->type() == MIRType::Value)
+ return useBox(mir);
+
+
+ if (useConstant && mir->isConstant()) {
+#if defined(JS_NUNBOX32)
+ return LBoxAllocation(LAllocation(mir->toConstant()), LAllocation());
+#else
+ return LBoxAllocation(LAllocation(mir->toConstant()));
+#endif
+ }
+
+#if defined(JS_NUNBOX32)
+ return LBoxAllocation(useRegister(mir), LAllocation());
+#else
+ return LBoxAllocation(useRegister(mir));
+#endif
+}
+
+LInt64Allocation
+LIRGeneratorShared::useInt64(MDefinition* mir, LUse::Policy policy, bool useAtStart)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+
+ ensureDefined(mir);
+
+ uint32_t vreg = mir->virtualRegister();
+#if JS_BITS_PER_WORD == 32
+ return LInt64Allocation(LUse(vreg + INT64HIGH_INDEX, policy, useAtStart),
+ LUse(vreg + INT64LOW_INDEX, policy, useAtStart));
+#else
+ return LInt64Allocation(LUse(vreg, policy, useAtStart));
+#endif
+}
+
+LInt64Allocation
+LIRGeneratorShared::useInt64Fixed(MDefinition* mir, Register64 regs, bool useAtStart)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+
+ ensureDefined(mir);
+
+ uint32_t vreg = mir->virtualRegister();
+#if JS_BITS_PER_WORD == 32
+ return LInt64Allocation(LUse(regs.high, vreg + INT64HIGH_INDEX, useAtStart),
+ LUse(regs.low, vreg + INT64LOW_INDEX, useAtStart));
+#else
+ return LInt64Allocation(LUse(regs.reg, vreg, useAtStart));
+#endif
+}
+
+LInt64Allocation
+LIRGeneratorShared::useInt64(MDefinition* mir, bool useAtStart)
+{
+ // On 32-bit platforms, always load the value in registers.
+#if JS_BITS_PER_WORD == 32
+ return useInt64(mir, LUse::REGISTER, useAtStart);
+#else
+ return useInt64(mir, LUse::ANY, useAtStart);
+#endif
+}
+
+LInt64Allocation
+LIRGeneratorShared::useInt64AtStart(MDefinition* mir)
+{
+ return useInt64(mir, /* useAtStart = */ true);
+}
+
+LInt64Allocation
+LIRGeneratorShared::useInt64Register(MDefinition* mir, bool useAtStart)
+{
+ return useInt64(mir, LUse::REGISTER, useAtStart);
+}
+
+LInt64Allocation
+LIRGeneratorShared::useInt64OrConstant(MDefinition* mir, bool useAtStart)
+{
+ if (mir->isConstant()) {
+#if defined(JS_NUNBOX32)
+ return LInt64Allocation(LAllocation(mir->toConstant()), LAllocation());
+#else
+ return LInt64Allocation(LAllocation(mir->toConstant()));
+#endif
+ }
+ return useInt64(mir, useAtStart);
+}
+
+LInt64Allocation
+LIRGeneratorShared::useInt64RegisterOrConstant(MDefinition* mir, bool useAtStart)
+{
+ if (mir->isConstant()) {
+#if defined(JS_NUNBOX32)
+ return LInt64Allocation(LAllocation(mir->toConstant()), LAllocation());
+#else
+ return LInt64Allocation(LAllocation(mir->toConstant()));
+#endif
+ }
+ return useInt64Register(mir, useAtStart);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_Lowering_shared_inl_h */
diff --git a/js/src/jit/shared/Lowering-shared.cpp b/js/src/jit/shared/Lowering-shared.cpp
new file mode 100644
index 000000000..4d313491d
--- /dev/null
+++ b/js/src/jit/shared/Lowering-shared.cpp
@@ -0,0 +1,306 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+#include "jit/LIR.h"
+#include "jit/MIR.h"
+
+#include "vm/Symbol.h"
+
+using namespace js;
+using namespace jit;
+
+bool
+LIRGeneratorShared::ShouldReorderCommutative(MDefinition* lhs, MDefinition* rhs, MInstruction* ins)
+{
+ // lhs and rhs are used by the commutative operator.
+ MOZ_ASSERT(lhs->hasDefUses());
+ MOZ_ASSERT(rhs->hasDefUses());
+
+ // Ensure that if there is a constant, then it is in rhs.
+ if (rhs->isConstant())
+ return false;
+ if (lhs->isConstant())
+ return true;
+
+ // Since clobbering binary operations clobber the left operand, prefer a
+ // non-constant lhs operand with no further uses. To be fully precise, we
+ // should check whether this is the *last* use, but checking hasOneDefUse()
+ // is a decent approximation which doesn't require any extra analysis.
+ bool rhsSingleUse = rhs->hasOneDefUse();
+ bool lhsSingleUse = lhs->hasOneDefUse();
+ if (rhsSingleUse) {
+ if (!lhsSingleUse)
+ return true;
+ } else {
+ if (lhsSingleUse)
+ return false;
+ }
+
+ // If this is a reduction-style computation, such as
+ //
+ // sum = 0;
+ // for (...)
+ // sum += ...;
+ //
+ // put the phi on the left to promote coalescing. This is fairly specific.
+ if (rhsSingleUse &&
+ rhs->isPhi() &&
+ rhs->block()->isLoopHeader() &&
+ ins == rhs->toPhi()->getLoopBackedgeOperand())
+ {
+ return true;
+ }
+
+ return false;
+}
+
+void
+LIRGeneratorShared::ReorderCommutative(MDefinition** lhsp, MDefinition** rhsp, MInstruction* ins)
+{
+ MDefinition* lhs = *lhsp;
+ MDefinition* rhs = *rhsp;
+
+ if (ShouldReorderCommutative(lhs, rhs, ins)) {
+ *rhsp = lhs;
+ *lhsp = rhs;
+ }
+}
+
+void
+LIRGeneratorShared::visitConstant(MConstant* ins)
+{
+ if (!IsFloatingPointType(ins->type()) && ins->canEmitAtUses()) {
+ emitAtUses(ins);
+ return;
+ }
+
+ switch (ins->type()) {
+ case MIRType::Double:
+ define(new(alloc()) LDouble(ins->toRawF64()), ins);
+ break;
+ case MIRType::Float32:
+ define(new(alloc()) LFloat32(ins->toRawF32()), ins);
+ break;
+ case MIRType::Boolean:
+ define(new(alloc()) LInteger(ins->toBoolean()), ins);
+ break;
+ case MIRType::Int32:
+ define(new(alloc()) LInteger(ins->toInt32()), ins);
+ break;
+ case MIRType::Int64:
+ defineInt64(new(alloc()) LInteger64(ins->toInt64()), ins);
+ break;
+ case MIRType::String:
+ define(new(alloc()) LPointer(ins->toString()), ins);
+ break;
+ case MIRType::Symbol:
+ define(new(alloc()) LPointer(ins->toSymbol()), ins);
+ break;
+ case MIRType::Object:
+ define(new(alloc()) LPointer(&ins->toObject()), ins);
+ break;
+ default:
+ // Constants of special types (undefined, null) should never flow into
+ // here directly. Operations blindly consuming them require a Box.
+ MOZ_CRASH("unexpected constant type");
+ }
+}
+
+void
+LIRGeneratorShared::defineTypedPhi(MPhi* phi, size_t lirIndex)
+{
+ LPhi* lir = current->getPhi(lirIndex);
+
+ uint32_t vreg = getVirtualRegister();
+
+ phi->setVirtualRegister(vreg);
+ lir->setDef(0, LDefinition(vreg, LDefinition::TypeFrom(phi->type())));
+ annotate(lir);
+}
+
+void
+LIRGeneratorShared::lowerTypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex)
+{
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* lir = block->getPhi(lirIndex);
+ lir->setOperand(inputPosition, LUse(operand->virtualRegister(), LUse::ANY));
+}
+
+LRecoverInfo*
+LIRGeneratorShared::getRecoverInfo(MResumePoint* rp)
+{
+ if (cachedRecoverInfo_ && cachedRecoverInfo_->mir() == rp)
+ return cachedRecoverInfo_;
+
+ LRecoverInfo* recoverInfo = LRecoverInfo::New(gen, rp);
+ if (!recoverInfo)
+ return nullptr;
+
+ cachedRecoverInfo_ = recoverInfo;
+ return recoverInfo;
+}
+
+#ifdef DEBUG
+bool
+LRecoverInfo::OperandIter::canOptimizeOutIfUnused()
+{
+ MDefinition* ins = **this;
+
+ // We check ins->type() in addition to ins->isUnused() because
+ // EliminateDeadResumePointOperands may replace nodes with the constant
+ // MagicValue(JS_OPTIMIZED_OUT).
+ if ((ins->isUnused() || ins->type() == MIRType::MagicOptimizedOut) &&
+ (*it_)->isResumePoint())
+ {
+ return !(*it_)->toResumePoint()->isObservableOperand(op_);
+ }
+
+ return true;
+}
+#endif
+
+#ifdef JS_NUNBOX32
+LSnapshot*
+LIRGeneratorShared::buildSnapshot(LInstruction* ins, MResumePoint* rp, BailoutKind kind)
+{
+ LRecoverInfo* recoverInfo = getRecoverInfo(rp);
+ if (!recoverInfo)
+ return nullptr;
+
+ LSnapshot* snapshot = LSnapshot::New(gen, recoverInfo, kind);
+ if (!snapshot)
+ return nullptr;
+
+ size_t index = 0;
+ for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
+ // Check that optimized out operands are in eliminable slots.
+ MOZ_ASSERT(it.canOptimizeOutIfUnused());
+
+ MDefinition* ins = *it;
+
+ if (ins->isRecoveredOnBailout())
+ continue;
+
+ LAllocation* type = snapshot->typeOfSlot(index);
+ LAllocation* payload = snapshot->payloadOfSlot(index);
+ ++index;
+
+ if (ins->isBox())
+ ins = ins->toBox()->getOperand(0);
+
+ // Guards should never be eliminated.
+ MOZ_ASSERT_IF(ins->isUnused(), !ins->isGuard());
+
+ // Snapshot operands other than constants should never be
+ // emitted-at-uses. Try-catch support depends on there being no
+ // code between an instruction and the LOsiPoint that follows it.
+ MOZ_ASSERT_IF(!ins->isConstant(), !ins->isEmittedAtUses());
+
+ // The register allocation will fill these fields in with actual
+ // register/stack assignments. During code generation, we can restore
+ // interpreter state with the given information. Note that for
+ // constants, including known types, we record a dummy placeholder,
+ // since we can recover the same information, much cleaner, from MIR.
+ if (ins->isConstant() || ins->isUnused()) {
+ *type = LAllocation();
+ *payload = LAllocation();
+ } else if (ins->type() != MIRType::Value) {
+ *type = LAllocation();
+ *payload = use(ins, LUse(LUse::KEEPALIVE));
+ } else {
+ *type = useType(ins, LUse::KEEPALIVE);
+ *payload = usePayload(ins, LUse::KEEPALIVE);
+ }
+ }
+
+ return snapshot;
+}
+
+#elif JS_PUNBOX64
+
+LSnapshot*
+LIRGeneratorShared::buildSnapshot(LInstruction* ins, MResumePoint* rp, BailoutKind kind)
+{
+ LRecoverInfo* recoverInfo = getRecoverInfo(rp);
+ if (!recoverInfo)
+ return nullptr;
+
+ LSnapshot* snapshot = LSnapshot::New(gen, recoverInfo, kind);
+ if (!snapshot)
+ return nullptr;
+
+ size_t index = 0;
+ for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
+ // Check that optimized out operands are in eliminable slots.
+ MOZ_ASSERT(it.canOptimizeOutIfUnused());
+
+ MDefinition* def = *it;
+
+ if (def->isRecoveredOnBailout())
+ continue;
+
+ if (def->isBox())
+ def = def->toBox()->getOperand(0);
+
+ // Guards should never be eliminated.
+ MOZ_ASSERT_IF(def->isUnused(), !def->isGuard());
+
+ // Snapshot operands other than constants should never be
+ // emitted-at-uses. Try-catch support depends on there being no
+ // code between an instruction and the LOsiPoint that follows it.
+ MOZ_ASSERT_IF(!def->isConstant(), !def->isEmittedAtUses());
+
+ LAllocation* a = snapshot->getEntry(index++);
+
+ if (def->isUnused()) {
+ *a = LAllocation();
+ continue;
+ }
+
+ *a = useKeepaliveOrConstant(def);
+ }
+
+ return snapshot;
+}
+#endif
+
+void
+LIRGeneratorShared::assignSnapshot(LInstruction* ins, BailoutKind kind)
+{
+ // assignSnapshot must be called before define/add, since
+ // it may add new instructions for emitted-at-use operands.
+ MOZ_ASSERT(ins->id() == 0);
+
+ LSnapshot* snapshot = buildSnapshot(ins, lastResumePoint_, kind);
+ if (snapshot)
+ ins->assignSnapshot(snapshot);
+ else
+ gen->abort("buildSnapshot failed");
+}
+
+void
+LIRGeneratorShared::assignSafepoint(LInstruction* ins, MInstruction* mir, BailoutKind kind)
+{
+ MOZ_ASSERT(!osiPoint_);
+ MOZ_ASSERT(!ins->safepoint());
+
+ ins->initSafepoint(alloc());
+
+ MResumePoint* mrp = mir->resumePoint() ? mir->resumePoint() : lastResumePoint_;
+ LSnapshot* postSnapshot = buildSnapshot(ins, mrp, kind);
+ if (!postSnapshot) {
+ gen->abort("buildSnapshot failed");
+ return;
+ }
+
+ osiPoint_ = new(alloc()) LOsiPoint(ins->safepoint(), postSnapshot);
+
+ if (!lirGraph_.noteNeedsSafepoint(ins))
+ gen->abort("noteNeedsSafepoint failed");
+}
+
diff --git a/js/src/jit/shared/Lowering-shared.h b/js/src/jit/shared/Lowering-shared.h
new file mode 100644
index 000000000..e73df780e
--- /dev/null
+++ b/js/src/jit/shared/Lowering-shared.h
@@ -0,0 +1,296 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_Lowering_shared_h
+#define jit_shared_Lowering_shared_h
+
+// This file declares the structures that are used for attaching LIR to a
+// MIRGraph.
+
+#include "jit/LIR.h"
+#include "jit/MIRGenerator.h"
+
+namespace js {
+namespace jit {
+
+class MIRGenerator;
+class MIRGraph;
+class MDefinition;
+class MInstruction;
+class LOsiPoint;
+
+class LIRGeneratorShared : public MDefinitionVisitor
+{
+ protected:
+ MIRGenerator* gen;
+ MIRGraph& graph;
+ LIRGraph& lirGraph_;
+ LBlock* current;
+ MResumePoint* lastResumePoint_;
+ LRecoverInfo* cachedRecoverInfo_;
+ LOsiPoint* osiPoint_;
+
+ public:
+ LIRGeneratorShared(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : gen(gen),
+ graph(graph),
+ lirGraph_(lirGraph),
+ lastResumePoint_(nullptr),
+ cachedRecoverInfo_(nullptr),
+ osiPoint_(nullptr)
+ { }
+
+ MIRGenerator* mir() {
+ return gen;
+ }
+
+ protected:
+
+ static void ReorderCommutative(MDefinition** lhsp, MDefinition** rhsp, MInstruction* ins);
+ static bool ShouldReorderCommutative(MDefinition* lhs, MDefinition* rhs, MInstruction* ins);
+
+ // A backend can decide that an instruction should be emitted at its uses,
+ // rather than at its definition. To communicate this, set the
+ // instruction's virtual register set to 0. When using the instruction,
+ // its virtual register is temporarily reassigned. To know to clear it
+ // after constructing the use information, the worklist bit is temporarily
+ // unset.
+ //
+ // The backend can use the worklist bit to determine whether or not a
+ // definition should be created.
+ inline void emitAtUses(MInstruction* mir);
+
+ // The lowest-level calls to use, those that do not wrap another call to
+ // use(), must prefix grabbing virtual register IDs by these calls.
+ inline void ensureDefined(MDefinition* mir);
+
+ // These all create a use of a virtual register, with an optional
+ // allocation policy.
+ //
+ // Some of these use functions have atStart variants.
+ // - non-atStart variants will tell the register allocator that the input
+ // allocation must be different from any Temp or Definition also needed for
+ // this LInstruction.
+ // - atStart variants relax that restriction and allow the input to be in
+ // the same register as any Temp or output Definition used by the
+ // LInstruction. Note that it doesn't *imply* this will actually happen,
+ // but gives a hint to the register allocator that it can do it.
+ //
+ // TL;DR: Use non-atStart variants only if you need the input value after
+ // writing to any temp or definitions, during code generation of this
+ // LInstruction. Otherwise, use atStart variants, which will lower register
+ // pressure.
+ inline LUse use(MDefinition* mir, LUse policy);
+ inline LUse use(MDefinition* mir);
+ inline LUse useAtStart(MDefinition* mir);
+ inline LUse useRegister(MDefinition* mir);
+ inline LUse useRegisterAtStart(MDefinition* mir);
+ inline LUse useFixed(MDefinition* mir, Register reg);
+ inline LUse useFixed(MDefinition* mir, FloatRegister reg);
+ inline LUse useFixed(MDefinition* mir, AnyRegister reg);
+ inline LUse useFixedAtStart(MDefinition* mir, Register reg);
+ inline LUse useFixedAtStart(MDefinition* mir, AnyRegister reg);
+ inline LAllocation useOrConstant(MDefinition* mir);
+ inline LAllocation useOrConstantAtStart(MDefinition* mir);
+ // "Any" is architecture dependent, and will include registers and stack
+ // slots on X86, and only registers on ARM.
+ inline LAllocation useAny(MDefinition* mir);
+ inline LAllocation useAnyOrConstant(MDefinition* mir);
+ // "Storable" is architecture dependend, and will include registers and
+ // constants on X86 and only registers on ARM. This is a generic "things
+ // we can expect to write into memory in 1 instruction".
+ inline LAllocation useStorable(MDefinition* mir);
+ inline LAllocation useStorableAtStart(MDefinition* mir);
+ inline LAllocation useKeepalive(MDefinition* mir);
+ inline LAllocation useKeepaliveOrConstant(MDefinition* mir);
+ inline LAllocation useRegisterOrConstant(MDefinition* mir);
+ inline LAllocation useRegisterOrConstantAtStart(MDefinition* mir);
+ inline LAllocation useRegisterOrZeroAtStart(MDefinition* mir);
+ inline LAllocation useRegisterOrNonDoubleConstant(MDefinition* mir);
+
+ inline LUse useRegisterForTypedLoad(MDefinition* mir, MIRType type);
+
+#ifdef JS_NUNBOX32
+ inline LUse useType(MDefinition* mir, LUse::Policy policy);
+ inline LUse usePayload(MDefinition* mir, LUse::Policy policy);
+ inline LUse usePayloadAtStart(MDefinition* mir, LUse::Policy policy);
+ inline LUse usePayloadInRegisterAtStart(MDefinition* mir);
+
+ // Adds a box input to an instruction, setting operand |n| to the type and
+ // |n+1| to the payload. Does not modify the operands, instead expecting a
+ // policy to already be set.
+ inline void fillBoxUses(LInstruction* lir, size_t n, MDefinition* mir);
+#endif
+
+ // These create temporary register requests.
+ inline LDefinition temp(LDefinition::Type type = LDefinition::GENERAL,
+ LDefinition::Policy policy = LDefinition::REGISTER);
+ inline LInt64Definition tempInt64(LDefinition::Policy policy = LDefinition::REGISTER);
+ inline LDefinition tempFloat32();
+ inline LDefinition tempDouble();
+ inline LDefinition tempCopy(MDefinition* input, uint32_t reusedInput);
+
+ // Note that the fixed register has a GENERAL type.
+ inline LDefinition tempFixed(Register reg);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineFixed(LInstructionHelper<1, Ops, Temps>* lir, MDefinition* mir,
+ const LAllocation& output);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineBox(LInstructionHelper<BOX_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ LDefinition::Policy policy = LDefinition::REGISTER);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineInt64(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ LDefinition::Policy policy = LDefinition::REGISTER);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineInt64Fixed(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ const LInt64Allocation& output);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineSinCos(LInstructionHelper<2, Ops, Temps> *lir, MDefinition *mir,
+ LDefinition::Policy policy = LDefinition::REGISTER);
+
+ inline void defineSharedStubReturn(LInstruction* lir, MDefinition* mir);
+ inline void defineReturn(LInstruction* lir, MDefinition* mir);
+
+ template <size_t X>
+ inline void define(details::LInstructionFixedDefsTempsHelper<1, X>* lir, MDefinition* mir,
+ LDefinition::Policy policy = LDefinition::REGISTER);
+ template <size_t X>
+ inline void define(details::LInstructionFixedDefsTempsHelper<1, X>* lir, MDefinition* mir,
+ const LDefinition& def);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineReuseInput(LInstructionHelper<1, Ops, Temps>* lir, MDefinition* mir,
+ uint32_t operand);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineInt64ReuseInput(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir,
+ MDefinition* mir, uint32_t operand);
+
+ // Returns a box allocation for a Value-typed instruction.
+ inline LBoxAllocation useBox(MDefinition* mir, LUse::Policy policy = LUse::REGISTER,
+ bool useAtStart = false);
+
+ // Returns a box allocation. The use is either typed, a Value, or
+ // a constant (if useConstant is true).
+ inline LBoxAllocation useBoxOrTypedOrConstant(MDefinition* mir, bool useConstant);
+
+ // Returns an int64 allocation for an Int64-typed instruction.
+ inline LInt64Allocation useInt64(MDefinition* mir, LUse::Policy policy, bool useAtStart);
+ inline LInt64Allocation useInt64(MDefinition* mir, bool useAtStart = false);
+ inline LInt64Allocation useInt64AtStart(MDefinition* mir);
+ inline LInt64Allocation useInt64OrConstant(MDefinition* mir, bool useAtStart = false);
+ inline LInt64Allocation useInt64Register(MDefinition* mir, bool useAtStart = false);
+ inline LInt64Allocation useInt64RegisterOrConstant(MDefinition* mir, bool useAtStart = false);
+ inline LInt64Allocation useInt64Fixed(MDefinition* mir, Register64 regs, bool useAtStart = false);
+
+ LInt64Allocation useInt64RegisterAtStart(MDefinition* mir) {
+ return useInt64Register(mir, /* useAtStart = */ true);
+ }
+ LInt64Allocation useInt64RegisterOrConstantAtStart(MDefinition* mir) {
+ return useInt64RegisterOrConstant(mir, /* useAtStart = */ true);
+ }
+ LInt64Allocation useInt64OrConstantAtStart(MDefinition* mir) {
+ return useInt64OrConstant(mir, /* useAtStart = */ true);
+ }
+
+ // Rather than defining a new virtual register, sets |ins| to have the same
+ // virtual register as |as|.
+ inline void redefine(MDefinition* ins, MDefinition* as);
+
+ // Redefine a sin/cos call to sincos.
+ inline void redefine(MDefinition* def, MDefinition* as, MMathFunction::Function func);
+
+ TempAllocator& alloc() const {
+ return graph.alloc();
+ }
+
+ uint32_t getVirtualRegister() {
+ uint32_t vreg = lirGraph_.getVirtualRegister();
+
+ // If we run out of virtual registers, mark code generation as having
+ // failed and return a dummy vreg. Include a + 1 here for NUNBOX32
+ // platforms that expect Value vregs to be adjacent.
+ if (vreg + 1 >= MAX_VIRTUAL_REGISTERS) {
+ gen->abort("max virtual registers");
+ return 1;
+ }
+ return vreg;
+ }
+
+ template <typename T> void annotate(T* ins);
+ template <typename T> void add(T* ins, MInstruction* mir = nullptr);
+
+ void lowerTypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
+ void defineTypedPhi(MPhi* phi, size_t lirIndex);
+
+ LOsiPoint* popOsiPoint() {
+ LOsiPoint* tmp = osiPoint_;
+ osiPoint_ = nullptr;
+ return tmp;
+ }
+
+ LRecoverInfo* getRecoverInfo(MResumePoint* rp);
+ LSnapshot* buildSnapshot(LInstruction* ins, MResumePoint* rp, BailoutKind kind);
+ bool assignPostSnapshot(MInstruction* mir, LInstruction* ins);
+
+ // Marks this instruction as fallible, meaning that before it performs
+ // effects (if any), it may check pre-conditions and bailout if they do not
+ // hold. This function informs the register allocator that it will need to
+ // capture appropriate state.
+ void assignSnapshot(LInstruction* ins, BailoutKind kind);
+
+ // Marks this instruction as needing to call into either the VM or GC. This
+ // function may build a snapshot that captures the result of its own
+ // instruction, and as such, should generally be called after define*().
+ void assignSafepoint(LInstruction* ins, MInstruction* mir,
+ BailoutKind kind = Bailout_DuringVMCall);
+
+ public:
+ void lowerConstantDouble(double d, MInstruction* mir) {
+ define(new(alloc()) LDouble(wasm::RawF64(d)), mir);
+ }
+ void lowerConstantFloat32(float f, MInstruction* mir) {
+ define(new(alloc()) LFloat32(wasm::RawF32(f)), mir);
+ }
+
+ void visitConstant(MConstant* ins) override;
+
+ // Whether to generate typed reads for element accesses with hole checks.
+ static bool allowTypedElementHoleCheck() {
+ return false;
+ }
+
+ // Whether to generate typed array accesses on statically known objects.
+ static bool allowStaticTypedArrayAccesses() {
+ return false;
+ }
+
+ // Provide NYI default implementations of the SIMD visitor functions.
+ // Many targets don't implement SIMD at all, and we don't want to duplicate
+ // these stubs in the specific sub-classes.
+ // Some SIMD visitors are implemented in LIRGenerator in Lowering.cpp. These
+ // shared implementations are not included here.
+ void visitSimdInsertElement(MSimdInsertElement*) override { MOZ_CRASH("NYI"); }
+ void visitSimdExtractElement(MSimdExtractElement*) override { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryArith(MSimdBinaryArith*) override { MOZ_CRASH("NYI"); }
+ void visitSimdSelect(MSimdSelect*) override { MOZ_CRASH("NYI"); }
+ void visitSimdSplat(MSimdSplat*) override { MOZ_CRASH("NYI"); }
+ void visitSimdValueX4(MSimdValueX4*) override { MOZ_CRASH("NYI"); }
+ void visitSimdBinarySaturating(MSimdBinarySaturating*) override { MOZ_CRASH("NYI"); }
+ void visitSimdSwizzle(MSimdSwizzle*) override { MOZ_CRASH("NYI"); }
+ void visitSimdShuffle(MSimdShuffle*) override { MOZ_CRASH("NYI"); }
+ void visitSimdGeneralShuffle(MSimdGeneralShuffle*) override { MOZ_CRASH("NYI"); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_Lowering_shared_h */
diff --git a/js/src/jit/x64/Assembler-x64.cpp b/js/src/jit/x64/Assembler-x64.cpp
new file mode 100644
index 000000000..37f29d009
--- /dev/null
+++ b/js/src/jit/x64/Assembler-x64.cpp
@@ -0,0 +1,303 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x64/Assembler-x64.h"
+
+#include "gc/Marking.h"
+
+using namespace js;
+using namespace js::jit;
+
+ABIArgGenerator::ABIArgGenerator()
+ :
+#if defined(XP_WIN)
+ regIndex_(0),
+ stackOffset_(ShadowStackSpace),
+#else
+ intRegIndex_(0),
+ floatRegIndex_(0),
+ stackOffset_(0),
+#endif
+ current_()
+{}
+
+ABIArg
+ABIArgGenerator::next(MIRType type)
+{
+#if defined(XP_WIN)
+ JS_STATIC_ASSERT(NumIntArgRegs == NumFloatArgRegs);
+ if (regIndex_ == NumIntArgRegs) {
+ if (IsSimdType(type)) {
+ // On Win64, >64 bit args need to be passed by reference, but wasm
+ // doesn't allow passing SIMD values to FFIs. The only way to reach
+ // here is asm to asm calls, so we can break the ABI here.
+ stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += Simd128DataSize;
+ } else {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ }
+ return current_;
+ }
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Int64:
+ case MIRType::Pointer:
+ current_ = ABIArg(IntArgRegs[regIndex_++]);
+ break;
+ case MIRType::Float32:
+ current_ = ABIArg(FloatArgRegs[regIndex_++].asSingle());
+ break;
+ case MIRType::Double:
+ current_ = ABIArg(FloatArgRegs[regIndex_++]);
+ break;
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Float32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ // On Win64, >64 bit args need to be passed by reference, but wasm
+ // doesn't allow passing SIMD values to FFIs. The only way to reach
+ // here is asm to asm calls, so we can break the ABI here.
+ current_ = ABIArg(FloatArgRegs[regIndex_++].asSimd128());
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+#else
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Int64:
+ case MIRType::Pointer:
+ if (intRegIndex_ == NumIntArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ break;
+ }
+ current_ = ABIArg(IntArgRegs[intRegIndex_++]);
+ break;
+ case MIRType::Double:
+ case MIRType::Float32:
+ if (floatRegIndex_ == NumFloatArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ break;
+ }
+ if (type == MIRType::Float32)
+ current_ = ABIArg(FloatArgRegs[floatRegIndex_++].asSingle());
+ else
+ current_ = ABIArg(FloatArgRegs[floatRegIndex_++]);
+ break;
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Float32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ if (floatRegIndex_ == NumFloatArgRegs) {
+ stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += Simd128DataSize;
+ break;
+ }
+ current_ = ABIArg(FloatArgRegs[floatRegIndex_++].asSimd128());
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+#endif
+}
+
+void
+Assembler::writeRelocation(JmpSrc src, Relocation::Kind reloc)
+{
+ if (!jumpRelocations_.length()) {
+ // The jump relocation table starts with a fixed-width integer pointing
+ // to the start of the extended jump table. But, we don't know the
+ // actual extended jump table offset yet, so write a 0 which we'll
+ // patch later.
+ jumpRelocations_.writeFixedUint32_t(0);
+ }
+ if (reloc == Relocation::JITCODE) {
+ jumpRelocations_.writeUnsigned(src.offset());
+ jumpRelocations_.writeUnsigned(jumps_.length());
+ }
+}
+
+void
+Assembler::addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind reloc)
+{
+ MOZ_ASSERT(target.value != nullptr);
+
+ // Emit reloc before modifying the jump table, since it computes a 0-based
+ // index. This jump is not patchable at runtime.
+ if (reloc == Relocation::JITCODE)
+ writeRelocation(src, reloc);
+ enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, reloc));
+}
+
+size_t
+Assembler::addPatchableJump(JmpSrc src, Relocation::Kind reloc)
+{
+ // This jump is patchable at runtime so we always need to make sure the
+ // jump table is emitted.
+ writeRelocation(src, reloc);
+
+ size_t index = jumps_.length();
+ enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), nullptr, reloc));
+ return index;
+}
+
+/* static */
+uint8_t*
+Assembler::PatchableJumpAddress(JitCode* code, size_t index)
+{
+ // The assembler stashed the offset into the code of the fragments used
+ // for far jumps at the start of the relocation table.
+ uint32_t jumpOffset = * (uint32_t*) code->jumpRelocTable();
+ jumpOffset += index * SizeOfJumpTableEntry;
+
+ MOZ_ASSERT(jumpOffset + SizeOfExtendedJump <= code->instructionsSize());
+ return code->raw() + jumpOffset;
+}
+
+/* static */
+void
+Assembler::PatchJumpEntry(uint8_t* entry, uint8_t* target, ReprotectCode reprotect)
+{
+ uint8_t** index = (uint8_t**) (entry + SizeOfExtendedJump - sizeof(void*));
+ MaybeAutoWritableJitCode awjc(index, sizeof(void*), reprotect);
+ *index = target;
+}
+
+void
+Assembler::finish()
+{
+ if (!jumps_.length() || oom())
+ return;
+
+ // Emit the jump table.
+ masm.haltingAlign(SizeOfJumpTableEntry);
+ extendedJumpTable_ = masm.size();
+
+ // Now that we know the offset to the jump table, squirrel it into the
+ // jump relocation buffer if any JitCode references exist and must be
+ // tracked for GC.
+ MOZ_ASSERT_IF(jumpRelocations_.length(), jumpRelocations_.length() >= sizeof(uint32_t));
+ if (jumpRelocations_.length())
+ *(uint32_t*)jumpRelocations_.buffer() = extendedJumpTable_;
+
+ // Zero the extended jumps table.
+ for (size_t i = 0; i < jumps_.length(); i++) {
+#ifdef DEBUG
+ size_t oldSize = masm.size();
+#endif
+ masm.jmp_rip(2);
+ MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == 6);
+ // Following an indirect branch with ud2 hints to the hardware that
+ // there's no fall-through. This also aligns the 64-bit immediate.
+ masm.ud2();
+ MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == 8);
+ masm.immediate64(0);
+ MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == SizeOfExtendedJump);
+ MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == SizeOfJumpTableEntry);
+ }
+}
+
+void
+Assembler::executableCopy(uint8_t* buffer)
+{
+ AssemblerX86Shared::executableCopy(buffer);
+
+ for (size_t i = 0; i < jumps_.length(); i++) {
+ RelativePatch& rp = jumps_[i];
+ uint8_t* src = buffer + rp.offset;
+ if (!rp.target) {
+ // The patch target is nullptr for jumps that have been linked to
+ // a label within the same code block, but may be repatched later
+ // to jump to a different code block.
+ continue;
+ }
+ if (X86Encoding::CanRelinkJump(src, rp.target)) {
+ X86Encoding::SetRel32(src, rp.target);
+ } else {
+ // An extended jump table must exist, and its offset must be in
+ // range.
+ MOZ_ASSERT(extendedJumpTable_);
+ MOZ_ASSERT((extendedJumpTable_ + i * SizeOfJumpTableEntry) <= size() - SizeOfJumpTableEntry);
+
+ // Patch the jump to go to the extended jump entry.
+ uint8_t* entry = buffer + extendedJumpTable_ + i * SizeOfJumpTableEntry;
+ X86Encoding::SetRel32(src, entry);
+
+ // Now patch the pointer, note that we need to align it to
+ // *after* the extended jump, i.e. after the 64-bit immedate.
+ X86Encoding::SetPointer(entry + SizeOfExtendedJump, rp.target);
+ }
+ }
+}
+
+class RelocationIterator
+{
+ CompactBufferReader reader_;
+ uint32_t tableStart_;
+ uint32_t offset_;
+ uint32_t extOffset_;
+
+ public:
+ explicit RelocationIterator(CompactBufferReader& reader)
+ : reader_(reader)
+ {
+ tableStart_ = reader_.readFixedUint32_t();
+ }
+
+ bool read() {
+ if (!reader_.more())
+ return false;
+ offset_ = reader_.readUnsigned();
+ extOffset_ = reader_.readUnsigned();
+ return true;
+ }
+
+ uint32_t offset() const {
+ return offset_;
+ }
+ uint32_t extendedOffset() const {
+ return extOffset_;
+ }
+};
+
+JitCode*
+Assembler::CodeFromJump(JitCode* code, uint8_t* jump)
+{
+ uint8_t* target = (uint8_t*)X86Encoding::GetRel32Target(jump);
+ if (target >= code->raw() && target < code->raw() + code->instructionsSize()) {
+ // This jump is within the code buffer, so it has been redirected to
+ // the extended jump table.
+ MOZ_ASSERT(target + SizeOfJumpTableEntry <= code->raw() + code->instructionsSize());
+
+ target = (uint8_t*)X86Encoding::GetPointer(target + SizeOfExtendedJump);
+ }
+
+ return JitCode::FromExecutable(target);
+}
+
+void
+Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
+{
+ RelocationIterator iter(reader);
+ while (iter.read()) {
+ JitCode* child = CodeFromJump(code, code->raw() + iter.offset());
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ MOZ_ASSERT(child == CodeFromJump(code, code->raw() + iter.offset()));
+ }
+}
diff --git a/js/src/jit/x64/Assembler-x64.h b/js/src/jit/x64/Assembler-x64.h
new file mode 100644
index 000000000..30e384158
--- /dev/null
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -0,0 +1,1040 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_Assembler_x64_h
+#define jit_x64_Assembler_x64_h
+
+#include "mozilla/ArrayUtils.h"
+
+#include "jit/IonCode.h"
+#include "jit/JitCompartment.h"
+#include "jit/shared/Assembler-shared.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register rax = { X86Encoding::rax };
+static constexpr Register rbx = { X86Encoding::rbx };
+static constexpr Register rcx = { X86Encoding::rcx };
+static constexpr Register rdx = { X86Encoding::rdx };
+static constexpr Register rsi = { X86Encoding::rsi };
+static constexpr Register rdi = { X86Encoding::rdi };
+static constexpr Register rbp = { X86Encoding::rbp };
+static constexpr Register r8 = { X86Encoding::r8 };
+static constexpr Register r9 = { X86Encoding::r9 };
+static constexpr Register r10 = { X86Encoding::r10 };
+static constexpr Register r11 = { X86Encoding::r11 };
+static constexpr Register r12 = { X86Encoding::r12 };
+static constexpr Register r13 = { X86Encoding::r13 };
+static constexpr Register r14 = { X86Encoding::r14 };
+static constexpr Register r15 = { X86Encoding::r15 };
+static constexpr Register rsp = { X86Encoding::rsp };
+
+static constexpr FloatRegister xmm0 = FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
+static constexpr FloatRegister xmm1 = FloatRegister(X86Encoding::xmm1, FloatRegisters::Double);
+static constexpr FloatRegister xmm2 = FloatRegister(X86Encoding::xmm2, FloatRegisters::Double);
+static constexpr FloatRegister xmm3 = FloatRegister(X86Encoding::xmm3, FloatRegisters::Double);
+static constexpr FloatRegister xmm4 = FloatRegister(X86Encoding::xmm4, FloatRegisters::Double);
+static constexpr FloatRegister xmm5 = FloatRegister(X86Encoding::xmm5, FloatRegisters::Double);
+static constexpr FloatRegister xmm6 = FloatRegister(X86Encoding::xmm6, FloatRegisters::Double);
+static constexpr FloatRegister xmm7 = FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
+static constexpr FloatRegister xmm8 = FloatRegister(X86Encoding::xmm8, FloatRegisters::Double);
+static constexpr FloatRegister xmm9 = FloatRegister(X86Encoding::xmm9, FloatRegisters::Double);
+static constexpr FloatRegister xmm10 = FloatRegister(X86Encoding::xmm10, FloatRegisters::Double);
+static constexpr FloatRegister xmm11 = FloatRegister(X86Encoding::xmm11, FloatRegisters::Double);
+static constexpr FloatRegister xmm12 = FloatRegister(X86Encoding::xmm12, FloatRegisters::Double);
+static constexpr FloatRegister xmm13 = FloatRegister(X86Encoding::xmm13, FloatRegisters::Double);
+static constexpr FloatRegister xmm14 = FloatRegister(X86Encoding::xmm14, FloatRegisters::Double);
+static constexpr FloatRegister xmm15 = FloatRegister(X86Encoding::xmm15, FloatRegisters::Double);
+
+// X86-common synonyms.
+static constexpr Register eax = rax;
+static constexpr Register ebx = rbx;
+static constexpr Register ecx = rcx;
+static constexpr Register edx = rdx;
+static constexpr Register esi = rsi;
+static constexpr Register edi = rdi;
+static constexpr Register ebp = rbp;
+static constexpr Register esp = rsp;
+
+static constexpr Register InvalidReg = { X86Encoding::invalid_reg };
+static constexpr FloatRegister InvalidFloatReg = FloatRegister();
+
+static constexpr Register StackPointer = rsp;
+static constexpr Register FramePointer = rbp;
+static constexpr Register JSReturnReg = rcx;
+// Avoid, except for assertions.
+static constexpr Register JSReturnReg_Type = JSReturnReg;
+static constexpr Register JSReturnReg_Data = JSReturnReg;
+
+static constexpr Register ScratchReg = r11;
+
+// Helper class for ScratchRegister usage. Asserts that only one piece
+// of code thinks it has exclusive ownership of the scratch register.
+struct ScratchRegisterScope : public AutoRegisterScope
+{
+ explicit ScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, ScratchReg)
+ { }
+};
+
+static constexpr Register ReturnReg = rax;
+static constexpr Register HeapReg = r15;
+static constexpr Register64 ReturnReg64(rax);
+static constexpr FloatRegister ReturnFloat32Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Single);
+static constexpr FloatRegister ReturnDoubleReg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
+static constexpr FloatRegister ReturnSimd128Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
+static constexpr FloatRegister ScratchFloat32Reg = FloatRegister(X86Encoding::xmm15, FloatRegisters::Single);
+static constexpr FloatRegister ScratchDoubleReg = FloatRegister(X86Encoding::xmm15, FloatRegisters::Double);
+static constexpr FloatRegister ScratchSimd128Reg = xmm15;
+
+// Avoid rbp, which is the FramePointer, which is unavailable in some modes.
+static constexpr Register ArgumentsRectifierReg = r8;
+static constexpr Register CallTempReg0 = rax;
+static constexpr Register CallTempReg1 = rdi;
+static constexpr Register CallTempReg2 = rbx;
+static constexpr Register CallTempReg3 = rcx;
+static constexpr Register CallTempReg4 = rsi;
+static constexpr Register CallTempReg5 = rdx;
+
+// Different argument registers for WIN64
+#if defined(_WIN64)
+static constexpr Register IntArgReg0 = rcx;
+static constexpr Register IntArgReg1 = rdx;
+static constexpr Register IntArgReg2 = r8;
+static constexpr Register IntArgReg3 = r9;
+static constexpr uint32_t NumIntArgRegs = 4;
+// Use "const" instead of constexpr here to work around a bug
+// of VS2015 Update 1. See bug 1229604.
+static const Register IntArgRegs[NumIntArgRegs] = { rcx, rdx, r8, r9 };
+
+static const Register CallTempNonArgRegs[] = { rax, rdi, rbx, rsi };
+static const uint32_t NumCallTempNonArgRegs =
+ mozilla::ArrayLength(CallTempNonArgRegs);
+
+static constexpr FloatRegister FloatArgReg0 = xmm0;
+static constexpr FloatRegister FloatArgReg1 = xmm1;
+static constexpr FloatRegister FloatArgReg2 = xmm2;
+static constexpr FloatRegister FloatArgReg3 = xmm3;
+static const uint32_t NumFloatArgRegs = 4;
+static constexpr FloatRegister FloatArgRegs[NumFloatArgRegs] = { xmm0, xmm1, xmm2, xmm3 };
+#else
+static constexpr Register IntArgReg0 = rdi;
+static constexpr Register IntArgReg1 = rsi;
+static constexpr Register IntArgReg2 = rdx;
+static constexpr Register IntArgReg3 = rcx;
+static constexpr Register IntArgReg4 = r8;
+static constexpr Register IntArgReg5 = r9;
+static constexpr uint32_t NumIntArgRegs = 6;
+static const Register IntArgRegs[NumIntArgRegs] = { rdi, rsi, rdx, rcx, r8, r9 };
+
+// Use "const" instead of constexpr here to work around a bug
+// of VS2015 Update 1. See bug 1229604.
+static const Register CallTempNonArgRegs[] = { rax, rbx };
+static const uint32_t NumCallTempNonArgRegs =
+ mozilla::ArrayLength(CallTempNonArgRegs);
+
+static constexpr FloatRegister FloatArgReg0 = xmm0;
+static constexpr FloatRegister FloatArgReg1 = xmm1;
+static constexpr FloatRegister FloatArgReg2 = xmm2;
+static constexpr FloatRegister FloatArgReg3 = xmm3;
+static constexpr FloatRegister FloatArgReg4 = xmm4;
+static constexpr FloatRegister FloatArgReg5 = xmm5;
+static constexpr FloatRegister FloatArgReg6 = xmm6;
+static constexpr FloatRegister FloatArgReg7 = xmm7;
+static constexpr uint32_t NumFloatArgRegs = 8;
+static constexpr FloatRegister FloatArgRegs[NumFloatArgRegs] = { xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 };
+#endif
+
+// Registers used in the GenerateFFIIonExit Enable Activation block.
+static constexpr Register WasmIonExitRegCallee = r10;
+static constexpr Register WasmIonExitRegE0 = rax;
+static constexpr Register WasmIonExitRegE1 = rdi;
+
+// Registers used in the GenerateFFIIonExit Disable Activation block.
+static constexpr Register WasmIonExitRegReturnData = ecx;
+static constexpr Register WasmIonExitRegReturnType = ecx;
+static constexpr Register WasmIonExitRegD0 = rax;
+static constexpr Register WasmIonExitRegD1 = rdi;
+static constexpr Register WasmIonExitRegD2 = rbx;
+
+// Registerd used in RegExpMatcher instruction (do not use JSReturnOperand).
+static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpMatcherStringReg = CallTempReg1;
+static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
+
+// Registerd used in RegExpTester instruction (do not use ReturnReg).
+static constexpr Register RegExpTesterRegExpReg = CallTempReg1;
+static constexpr Register RegExpTesterStringReg = CallTempReg2;
+static constexpr Register RegExpTesterLastIndexReg = CallTempReg3;
+
+class ABIArgGenerator
+{
+#if defined(XP_WIN)
+ unsigned regIndex_;
+#else
+ unsigned intRegIndex_;
+ unsigned floatRegIndex_;
+#endif
+ uint32_t stackOffset_;
+ ABIArg current_;
+
+ public:
+ ABIArgGenerator();
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+ uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
+};
+
+// Avoid r11, which is the MacroAssembler's ScratchReg.
+static constexpr Register ABINonArgReg0 = rax;
+static constexpr Register ABINonArgReg1 = rbx;
+static constexpr Register ABINonArgReg2 = r10;
+
+// Note: these three registers are all guaranteed to be different
+static constexpr Register ABINonArgReturnReg0 = r10;
+static constexpr Register ABINonArgReturnReg1 = r12;
+static constexpr Register ABINonVolatileReg = r13;
+
+// TLS pointer argument register for WebAssembly functions. This must not alias
+// any other register used for passing function arguments or return values.
+// Preserved by WebAssembly functions.
+static constexpr Register WasmTlsReg = r14;
+
+// Registers used for asm.js/wasm table calls. These registers must be disjoint
+// from the ABI argument registers, WasmTlsReg and each other.
+static constexpr Register WasmTableCallScratchReg = ABINonArgReg0;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg1;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg2;
+
+static constexpr Register OsrFrameReg = IntArgReg3;
+
+static constexpr Register PreBarrierReg = rdx;
+
+static constexpr uint32_t ABIStackAlignment = 16;
+static constexpr uint32_t CodeAlignment = 16;
+static constexpr uint32_t JitStackAlignment = 16;
+
+static constexpr uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+// This boolean indicates whether we support SIMD instructions flavoured for
+// this architecture or not. Rather than a method in the LIRGenerator, it is
+// here such that it is accessible from the entire codebase. Once full support
+// for SIMD is reached on all tier-1 platforms, this constant can be deleted.
+static constexpr bool SupportsSimd = true;
+static constexpr uint32_t SimdMemoryAlignment = 16;
+
+static_assert(CodeAlignment % SimdMemoryAlignment == 0,
+ "Code alignment should be larger than any of the alignments which are used for "
+ "the constant sections of the code buffer. Thus it should be larger than the "
+ "alignment for SIMD constants.");
+
+static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
+ "Stack alignment should be larger than any of the alignments which are used for "
+ "spilled values. Thus it should be larger than the alignment for SIMD accesses.");
+
+static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
+
+static const Scale ScalePointer = TimesEight;
+
+} // namespace jit
+} // namespace js
+
+#include "jit/x86-shared/Assembler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+// Return operand from a JS -> JS call.
+static constexpr ValueOperand JSReturnOperand = ValueOperand(JSReturnReg);
+
+class Assembler : public AssemblerX86Shared
+{
+ // x64 jumps may need extra bits of relocation, because a jump may extend
+ // beyond the signed 32-bit range. To account for this we add an extended
+ // jump table at the bottom of the instruction stream, and if a jump
+ // overflows its range, it will redirect here.
+ //
+ // In our relocation table, we store two offsets instead of one: the offset
+ // to the original jump, and an offset to the extended jump if we will need
+ // to use it instead. The offsets are stored as:
+ // [unsigned] Unsigned offset to short jump, from the start of the code.
+ // [unsigned] Unsigned offset to the extended jump, from the start of
+ // the jump table, in units of SizeOfJumpTableEntry.
+ //
+ // The start of the relocation table contains the offset from the code
+ // buffer to the start of the extended jump table.
+ //
+ // Each entry in this table is a jmp [rip], followed by a ud2 to hint to the
+ // hardware branch predictor that there is no fallthrough, followed by the
+ // eight bytes containing an immediate address. This comes out to 16 bytes.
+ // +1 byte for opcode
+ // +1 byte for mod r/m
+ // +4 bytes for rip-relative offset (2)
+ // +2 bytes for ud2 instruction
+ // +8 bytes for 64-bit address
+ //
+ static const uint32_t SizeOfExtendedJump = 1 + 1 + 4 + 2 + 8;
+ static const uint32_t SizeOfJumpTableEntry = 16;
+
+ uint32_t extendedJumpTable_;
+
+ static JitCode* CodeFromJump(JitCode* code, uint8_t* jump);
+
+ private:
+ void writeRelocation(JmpSrc src, Relocation::Kind reloc);
+ void addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind reloc);
+
+ protected:
+ size_t addPatchableJump(JmpSrc src, Relocation::Kind reloc);
+
+ public:
+ using AssemblerX86Shared::j;
+ using AssemblerX86Shared::jmp;
+ using AssemblerX86Shared::push;
+ using AssemblerX86Shared::pop;
+ using AssemblerX86Shared::vmovq;
+
+ static uint8_t* PatchableJumpAddress(JitCode* code, size_t index);
+ static void PatchJumpEntry(uint8_t* entry, uint8_t* target, ReprotectCode reprotect);
+
+ Assembler()
+ : extendedJumpTable_(0)
+ {
+ }
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
+
+ // The buffer is about to be linked, make sure any constant pools or excess
+ // bookkeeping has been flushed to the instruction stream.
+ void finish();
+
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+
+ // Actual assembly emitting functions.
+
+ void push(const ImmGCPtr ptr) {
+ movq(ptr, ScratchReg);
+ push(ScratchReg);
+ }
+ void push(const ImmWord ptr) {
+ // We often end up with ImmWords that actually fit into int32.
+ // Be aware of the sign extension behavior.
+ if (ptr.value <= INT32_MAX) {
+ push(Imm32(ptr.value));
+ } else {
+ movq(ptr, ScratchReg);
+ push(ScratchReg);
+ }
+ }
+ void push(ImmPtr imm) {
+ push(ImmWord(uintptr_t(imm.value)));
+ }
+ void push(FloatRegister src) {
+ subq(Imm32(sizeof(double)), StackPointer);
+ vmovsd(src, Address(StackPointer, 0));
+ }
+ CodeOffset pushWithPatch(ImmWord word) {
+ CodeOffset label = movWithPatch(word, ScratchReg);
+ push(ScratchReg);
+ return label;
+ }
+
+ void pop(FloatRegister src) {
+ vmovsd(Address(StackPointer, 0), src);
+ addq(Imm32(sizeof(double)), StackPointer);
+ }
+
+ CodeOffset movWithPatch(ImmWord word, Register dest) {
+ masm.movq_i64r(word.value, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
+ }
+
+ // Load an ImmWord value into a register. Note that this instruction will
+ // attempt to optimize its immediate field size. When a full 64-bit
+ // immediate is needed for a relocation, use movWithPatch.
+ void movq(ImmWord word, Register dest) {
+ // Load a 64-bit immediate into a register. If the value falls into
+ // certain ranges, we can use specialized instructions which have
+ // smaller encodings.
+ if (word.value <= UINT32_MAX) {
+ // movl has a 32-bit unsigned (effectively) immediate field.
+ masm.movl_i32r((uint32_t)word.value, dest.encoding());
+ } else if ((intptr_t)word.value >= INT32_MIN && (intptr_t)word.value <= INT32_MAX) {
+ // movq has a 32-bit signed immediate field.
+ masm.movq_i32r((int32_t)(intptr_t)word.value, dest.encoding());
+ } else {
+ // Otherwise use movabs.
+ masm.movq_i64r(word.value, dest.encoding());
+ }
+ }
+ void movq(ImmPtr imm, Register dest) {
+ movq(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void movq(ImmGCPtr ptr, Register dest) {
+ masm.movq_i64r(uintptr_t(ptr.value), dest.encoding());
+ writeDataRelocation(ptr);
+ }
+ void movq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.movq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movq_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movq(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.movq_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movq_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movq_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movq(Imm32 imm32, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.movl_i32r(imm32.value, dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movq_i32m(imm32.value, dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movq_i32m(imm32.value, dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movq_i32m(imm32.value, dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovq(Register src, FloatRegister dest) {
+ masm.vmovq_rr(src.encoding(), dest.encoding());
+ }
+ void vmovq(FloatRegister src, Register dest) {
+ masm.vmovq_rr(src.encoding(), dest.encoding());
+ }
+ void movq(Register src, Register dest) {
+ masm.movq_rr(src.encoding(), dest.encoding());
+ }
+
+ void cmovzq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.cmovzq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmovzq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmovzq_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void xchgq(Register src, Register dest) {
+ masm.xchgq_rr(src.encoding(), dest.encoding());
+ }
+
+ void movsbq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movsbq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movsbq_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void movzbq(const Operand& src, Register dest) {
+ // movzbl zero-extends to 64 bits and is one byte smaller, so use that
+ // instead.
+ movzbl(src, dest);
+ }
+
+ void movswq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movswq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movswq_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void movzwq(const Operand& src, Register dest) {
+ // movzwl zero-extends to 64 bits and is one byte smaller, so use that
+ // instead.
+ movzwl(src, dest);
+ }
+
+ void movslq(Register src, Register dest) {
+ masm.movslq_rr(src.encoding(), dest.encoding());
+ }
+ void movslq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.movslq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movslq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movslq_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void andq(Register src, Register dest) {
+ masm.andq_rr(src.encoding(), dest.encoding());
+ }
+ void andq(Imm32 imm, Register dest) {
+ masm.andq_ir(imm.value, dest.encoding());
+ }
+ void andq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.andq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.andq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.andq_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.andq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void addq(Imm32 imm, Register dest) {
+ masm.addq_ir(imm.value, dest.encoding());
+ }
+ void addq(Imm32 imm, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.addq_ir(imm.value, dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.addq_im(imm.value, dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.addq_im(imm.value, dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void addq(Register src, Register dest) {
+ masm.addq_rr(src.encoding(), dest.encoding());
+ }
+ void addq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.addq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.addq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.addq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void subq(Imm32 imm, Register dest) {
+ masm.subq_ir(imm.value, dest.encoding());
+ }
+ void subq(Register src, Register dest) {
+ masm.subq_rr(src.encoding(), dest.encoding());
+ }
+ void subq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.subq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.subq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.subq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void subq(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.subq_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.subq_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void shlq(Imm32 imm, Register dest) {
+ masm.shlq_ir(imm.value, dest.encoding());
+ }
+ void shrq(Imm32 imm, Register dest) {
+ masm.shrq_ir(imm.value, dest.encoding());
+ }
+ void sarq(Imm32 imm, Register dest) {
+ masm.sarq_ir(imm.value, dest.encoding());
+ }
+ void shlq_cl(Register dest) {
+ masm.shlq_CLr(dest.encoding());
+ }
+ void shrq_cl(Register dest) {
+ masm.shrq_CLr(dest.encoding());
+ }
+ void sarq_cl(Register dest) {
+ masm.sarq_CLr(dest.encoding());
+ }
+ void rolq(Imm32 imm, Register dest) {
+ masm.rolq_ir(imm.value, dest.encoding());
+ }
+ void rolq_cl(Register dest) {
+ masm.rolq_CLr(dest.encoding());
+ }
+ void rorq(Imm32 imm, Register dest) {
+ masm.rorq_ir(imm.value, dest.encoding());
+ }
+ void rorq_cl(Register dest) {
+ masm.rorq_CLr(dest.encoding());
+ }
+ void orq(Imm32 imm, Register dest) {
+ masm.orq_ir(imm.value, dest.encoding());
+ }
+ void orq(Register src, Register dest) {
+ masm.orq_rr(src.encoding(), dest.encoding());
+ }
+ void orq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.orq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.orq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.orq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void xorq(Register src, Register dest) {
+ masm.xorq_rr(src.encoding(), dest.encoding());
+ }
+ void xorq(Imm32 imm, Register dest) {
+ masm.xorq_ir(imm.value, dest.encoding());
+ }
+ void xorq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.xorq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.xorq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.xorq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void bsrq(const Register& src, const Register& dest) {
+ masm.bsrq_rr(src.encoding(), dest.encoding());
+ }
+ void bsfq(const Register& src, const Register& dest) {
+ masm.bsfq_rr(src.encoding(), dest.encoding());
+ }
+ void popcntq(const Register& src, const Register& dest) {
+ masm.popcntq_rr(src.encoding(), dest.encoding());
+ }
+
+ void imulq(Register src, Register dest) {
+ masm.imulq_rr(src.encoding(), dest.encoding());
+ }
+ void imulq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.imulq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.imulq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ MOZ_CRASH("NYI");
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void cqo() {
+ masm.cqo();
+ }
+ void idivq(Register divisor) {
+ masm.idivq_r(divisor.encoding());
+ }
+ void udivq(Register divisor) {
+ masm.divq_r(divisor.encoding());
+ }
+
+ void vcvtsi2sdq(Register src, FloatRegister dest) {
+ masm.vcvtsi2sdq_rr(src.encoding(), dest.encoding());
+ }
+
+ void negq(Register reg) {
+ masm.negq_r(reg.encoding());
+ }
+
+ void mov(ImmWord word, Register dest) {
+ // Use xor for setting registers to zero, as it is specially optimized
+ // for this purpose on modern hardware. Note that it does clobber FLAGS
+ // though. Use xorl instead of xorq since they are functionally
+ // equivalent (32-bit instructions zero-extend their results to 64 bits)
+ // and xorl has a smaller encoding.
+ if (word.value == 0)
+ xorl(dest, dest);
+ else
+ movq(word, dest);
+ }
+ void mov(ImmPtr imm, Register dest) {
+ movq(imm, dest);
+ }
+ void mov(wasm::SymbolicAddress imm, Register dest) {
+ masm.movq_i64r(-1, dest.encoding());
+ append(wasm::SymbolicAccess(CodeOffset(masm.currentOffset()), imm));
+ }
+ void mov(const Operand& src, Register dest) {
+ movq(src, dest);
+ }
+ void mov(Register src, const Operand& dest) {
+ movq(src, dest);
+ }
+ void mov(Imm32 imm32, const Operand& dest) {
+ movq(imm32, dest);
+ }
+ void mov(Register src, Register dest) {
+ movq(src, dest);
+ }
+ void mov(CodeOffset* label, Register dest) {
+ masm.movq_i64r(/* placeholder */ 0, dest.encoding());
+ label->bind(masm.size());
+ }
+ void xchg(Register src, Register dest) {
+ xchgq(src, dest);
+ }
+ void lea(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.leaq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.leaq_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexepcted operand kind");
+ }
+ }
+
+ CodeOffset loadRipRelativeInt32(Register dest) {
+ return CodeOffset(masm.movl_ripr(dest.encoding()).offset());
+ }
+ CodeOffset loadRipRelativeInt64(Register dest) {
+ return CodeOffset(masm.movq_ripr(dest.encoding()).offset());
+ }
+ CodeOffset loadRipRelativeDouble(FloatRegister dest) {
+ return CodeOffset(masm.vmovsd_ripr(dest.encoding()).offset());
+ }
+ CodeOffset loadRipRelativeFloat32(FloatRegister dest) {
+ return CodeOffset(masm.vmovss_ripr(dest.encoding()).offset());
+ }
+ CodeOffset loadRipRelativeInt32x4(FloatRegister dest) {
+ return CodeOffset(masm.vmovdqa_ripr(dest.encoding()).offset());
+ }
+ CodeOffset loadRipRelativeFloat32x4(FloatRegister dest) {
+ return CodeOffset(masm.vmovaps_ripr(dest.encoding()).offset());
+ }
+ CodeOffset storeRipRelativeInt32(Register dest) {
+ return CodeOffset(masm.movl_rrip(dest.encoding()).offset());
+ }
+ CodeOffset storeRipRelativeInt64(Register dest) {
+ return CodeOffset(masm.movq_rrip(dest.encoding()).offset());
+ }
+ CodeOffset storeRipRelativeDouble(FloatRegister dest) {
+ return CodeOffset(masm.vmovsd_rrip(dest.encoding()).offset());
+ }
+ CodeOffset storeRipRelativeFloat32(FloatRegister dest) {
+ return CodeOffset(masm.vmovss_rrip(dest.encoding()).offset());
+ }
+ CodeOffset storeRipRelativeInt32x4(FloatRegister dest) {
+ return CodeOffset(masm.vmovdqa_rrip(dest.encoding()).offset());
+ }
+ CodeOffset storeRipRelativeFloat32x4(FloatRegister dest) {
+ return CodeOffset(masm.vmovaps_rrip(dest.encoding()).offset());
+ }
+ CodeOffset leaRipRelative(Register dest) {
+ return CodeOffset(masm.leaq_rip(dest.encoding()).offset());
+ }
+
+ void cmpq(Register rhs, Register lhs) {
+ masm.cmpq_rr(rhs.encoding(), lhs.encoding());
+ }
+ void cmpq(Register rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.cmpq_rr(rhs.encoding(), lhs.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpq_rm(rhs.encoding(), lhs.disp(), lhs.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.cmpq_rm(rhs.encoding(), lhs.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmpq(Imm32 rhs, Register lhs) {
+ masm.cmpq_ir(rhs.value, lhs.encoding());
+ }
+ void cmpq(Imm32 rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.cmpq_ir(rhs.value, lhs.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpq_im(rhs.value, lhs.disp(), lhs.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.cmpq_im(rhs.value, lhs.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmpq(const Operand& rhs, Register lhs) {
+ switch (rhs.kind()) {
+ case Operand::REG:
+ masm.cmpq_rr(rhs.reg(), lhs.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpq_mr(rhs.disp(), rhs.base(), lhs.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void testq(Imm32 rhs, Register lhs) {
+ masm.testq_ir(rhs.value, lhs.encoding());
+ }
+ void testq(Register rhs, Register lhs) {
+ masm.testq_rr(rhs.encoding(), lhs.encoding());
+ }
+ void testq(Imm32 rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.testq_ir(rhs.value, lhs.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.testq_i32m(rhs.value, lhs.disp(), lhs.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+
+ void jmp(ImmPtr target, Relocation::Kind reloc = Relocation::HARDCODED) {
+ JmpSrc src = masm.jmp();
+ addPendingJump(src, target, reloc);
+ }
+ void j(Condition cond, ImmPtr target,
+ Relocation::Kind reloc = Relocation::HARDCODED) {
+ JmpSrc src = masm.jCC(static_cast<X86Encoding::Condition>(cond));
+ addPendingJump(src, target, reloc);
+ }
+
+ void jmp(JitCode* target) {
+ jmp(ImmPtr(target->raw()), Relocation::JITCODE);
+ }
+ void j(Condition cond, JitCode* target) {
+ j(cond, ImmPtr(target->raw()), Relocation::JITCODE);
+ }
+ void call(JitCode* target) {
+ JmpSrc src = masm.call();
+ addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
+ }
+ void call(ImmWord target) {
+ call(ImmPtr((void*)target.value));
+ }
+ void call(ImmPtr target) {
+ JmpSrc src = masm.call();
+ addPendingJump(src, target, Relocation::HARDCODED);
+ }
+
+ // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled) {
+ CodeOffset offset(size());
+ JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
+ addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
+ MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
+ return offset;
+ }
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ // Size of a call instruction.
+ return 5;
+ }
+
+ // Do not mask shared implementations.
+ using AssemblerX86Shared::call;
+
+ void vcvttsd2sq(FloatRegister src, Register dest) {
+ masm.vcvttsd2sq_rr(src.encoding(), dest.encoding());
+ }
+ void vcvttss2sq(FloatRegister src, Register dest) {
+ masm.vcvttss2sq_rr(src.encoding(), dest.encoding());
+ }
+ void vcvtsq2sd(Register src1, FloatRegister src0, FloatRegister dest) {
+ masm.vcvtsq2sd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vcvtsq2ss(Register src1, FloatRegister src0, FloatRegister dest) {
+ masm.vcvtsq2ss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+};
+
+static inline void
+PatchJump(CodeLocationJump jump, CodeLocationLabel label, ReprotectCode reprotect = DontReprotect)
+{
+ if (X86Encoding::CanRelinkJump(jump.raw(), label.raw())) {
+ MaybeAutoWritableJitCode awjc(jump.raw() - 8, 8, reprotect);
+ X86Encoding::SetRel32(jump.raw(), label.raw());
+ } else {
+ {
+ MaybeAutoWritableJitCode awjc(jump.raw() - 8, 8, reprotect);
+ X86Encoding::SetRel32(jump.raw(), jump.jumpTableEntry());
+ }
+ Assembler::PatchJumpEntry(jump.jumpTableEntry(), label.raw(), reprotect);
+ }
+}
+
+static inline void
+PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
+{
+ PatchJump(jump_, label);
+}
+
+static inline bool
+GetIntArgReg(uint32_t intArg, uint32_t floatArg, Register* out)
+{
+#if defined(_WIN64)
+ uint32_t arg = intArg + floatArg;
+#else
+ uint32_t arg = intArg;
+#endif
+ if (arg >= NumIntArgRegs)
+ return false;
+ *out = IntArgRegs[arg];
+ return true;
+}
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool
+GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
+{
+ if (GetIntArgReg(usedIntArgs, usedFloatArgs, out))
+ return true;
+ // Unfortunately, we have to assume things about the point at which
+ // GetIntArgReg returns false, because we need to know how many registers it
+ // can allocate.
+#if defined(_WIN64)
+ uint32_t arg = usedIntArgs + usedFloatArgs;
+#else
+ uint32_t arg = usedIntArgs;
+#endif
+ arg -= NumIntArgRegs;
+ if (arg >= NumCallTempNonArgRegs)
+ return false;
+ *out = CallTempNonArgRegs[arg];
+ return true;
+}
+
+static inline bool
+GetFloatArgReg(uint32_t intArg, uint32_t floatArg, FloatRegister* out)
+{
+#if defined(_WIN64)
+ uint32_t arg = intArg + floatArg;
+#else
+ uint32_t arg = floatArg;
+#endif
+ if (floatArg >= NumFloatArgRegs)
+ return false;
+ *out = FloatArgRegs[arg];
+ return true;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_Assembler_x64_h */
diff --git a/js/src/jit/x64/Bailouts-x64.cpp b/js/src/jit/x64/Bailouts-x64.cpp
new file mode 100644
index 000000000..a07aa31a9
--- /dev/null
+++ b/js/src/jit/x64/Bailouts-x64.cpp
@@ -0,0 +1,75 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Bailouts.h"
+
+using namespace js;
+using namespace js::jit;
+
+#if defined(_WIN32)
+# pragma pack(push, 1)
+#endif
+
+namespace js {
+namespace jit {
+
+class BailoutStack
+{
+ RegisterDump::FPUArray fpregs_;
+ RegisterDump::GPRArray regs_;
+ uintptr_t frameSize_;
+ uintptr_t snapshotOffset_;
+
+ public:
+ MachineState machineState() {
+ return MachineState::FromBailout(regs_, fpregs_);
+ }
+ uint32_t snapshotOffset() const {
+ return snapshotOffset_;
+ }
+ uint32_t frameSize() const {
+ return frameSize_;
+ }
+ uint8_t* parentStackPointer() {
+ return (uint8_t*)this + sizeof(BailoutStack);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#if defined(_WIN32)
+# pragma pack(pop)
+#endif
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ BailoutStack* bailout)
+ : machine_(bailout->machineState())
+{
+ uint8_t* sp = bailout->parentStackPointer();
+ framePointer_ = sp + bailout->frameSize();
+ topFrameSize_ = framePointer_ - sp;
+
+ JSScript* script = ScriptFromCalleeToken(((JitFrameLayout*) framePointer_)->calleeToken());
+ topIonScript_ = script->ionScript();
+
+ attachOnJitActivation(activations);
+ snapshotOffset_ = bailout->snapshotOffset();
+}
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ InvalidationBailoutStack* bailout)
+ : machine_(bailout->machine())
+{
+ framePointer_ = (uint8_t*) bailout->fp();
+ topFrameSize_ = framePointer_ - bailout->sp();
+ topIonScript_ = bailout->ionScript();
+ attachOnJitActivation(activations);
+
+ uint8_t* returnAddressToFp_ = bailout->osiPointReturnAddress();
+ const OsiIndex* osiIndex = topIonScript_->getOsiIndex(returnAddressToFp_);
+ snapshotOffset_ = osiIndex->snapshotOffset();
+}
diff --git a/js/src/jit/x64/BaseAssembler-x64.h b/js/src/jit/x64/BaseAssembler-x64.h
new file mode 100644
index 000000000..f26fedc07
--- /dev/null
+++ b/js/src/jit/x64/BaseAssembler-x64.h
@@ -0,0 +1,929 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_BaseAssembler_x64_h
+#define jit_x64_BaseAssembler_x64_h
+
+#include "jit/x86-shared/BaseAssembler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+namespace X86Encoding {
+
+class BaseAssemblerX64 : public BaseAssembler
+{
+ public:
+
+ // Arithmetic operations:
+
+ void addq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("addq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_ADD_GvEv, src, dst);
+ }
+
+ void addq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("addq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_ADD_GvEv, offset, base, dst);
+ }
+
+ void addq_mr(const void* addr, RegisterID dst)
+ {
+ spew("addq %p, %s", addr, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_ADD_GvEv, addr, dst);
+ }
+
+ void addq_ir(int32_t imm, RegisterID dst)
+ {
+ spew("addq $%d, %s", imm, GPReg64Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_ADD);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax)
+ m_formatter.oneByteOp64(OP_ADD_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_ADD);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addq_im(int32_t imm, int32_t offset, RegisterID base)
+ {
+ spew("addq $%d, " MEM_ob, imm, ADDR_ob(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, offset, base, GROUP1_OP_ADD);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, offset, base, GROUP1_OP_ADD);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addq_im(int32_t imm, const void* addr)
+ {
+ spew("addq $%d, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, addr, GROUP1_OP_ADD);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, addr, GROUP1_OP_ADD);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void andq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("andq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_AND_GvEv, src, dst);
+ }
+
+ void andq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("andq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_AND_GvEv, offset, base, dst);
+ }
+
+ void andq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("andq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_AND_GvEv, offset, base, index, scale, dst);
+ }
+
+ void andq_mr(const void* addr, RegisterID dst)
+ {
+ spew("andq %p, %s", addr, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_AND_GvEv, addr, dst);
+ }
+
+ void orq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("orq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_OR_GvEv, offset, base, dst);
+ }
+
+ void orq_mr(const void* addr, RegisterID dst)
+ {
+ spew("orq %p, %s", addr, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_OR_GvEv, addr, dst);
+ }
+
+ void xorq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("xorq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_XOR_GvEv, offset, base, dst);
+ }
+
+ void xorq_mr(const void* addr, RegisterID dst)
+ {
+ spew("xorq %p, %s", addr, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_XOR_GvEv, addr, dst);
+ }
+
+ void bsrq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("bsrq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_BSR_GvEv, src, dst);
+ }
+
+ void bsfq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("bsfq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_BSF_GvEv, src, dst);
+ }
+
+ void popcntq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("popcntq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.legacySSEPrefix(VEX_SS);
+ m_formatter.twoByteOp64(OP2_POPCNT_GvEv, src, dst);
+ }
+
+ void andq_ir(int32_t imm, RegisterID dst)
+ {
+ spew("andq $0x%" PRIx64 ", %s", int64_t(imm), GPReg64Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_AND);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax)
+ m_formatter.oneByteOp64(OP_AND_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_AND);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void negq_r(RegisterID dst)
+ {
+ spew("negq %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP3_Ev, dst, GROUP3_OP_NEG);
+ }
+
+ void orq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("orq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_OR_GvEv, src, dst);
+ }
+
+ void orq_ir(int32_t imm, RegisterID dst)
+ {
+ spew("orq $0x%" PRIx64 ", %s", int64_t(imm), GPReg64Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_OR);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax)
+ m_formatter.oneByteOp64(OP_OR_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_OR);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void notq_r(RegisterID dst)
+ {
+ spew("notq %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP3_Ev, dst, GROUP3_OP_NOT);
+ }
+
+ void subq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("subq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_SUB_GvEv, src, dst);
+ }
+
+ void subq_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("subq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_SUB_EvGv, offset, base, src);
+ }
+
+ void subq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("subq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_SUB_GvEv, offset, base, dst);
+ }
+
+ void subq_mr(const void* addr, RegisterID dst)
+ {
+ spew("subq %p, %s", addr, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_SUB_GvEv, addr, dst);
+ }
+
+ void subq_ir(int32_t imm, RegisterID dst)
+ {
+ spew("subq $%d, %s", imm, GPReg64Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_SUB);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax)
+ m_formatter.oneByteOp64(OP_SUB_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_SUB);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void xorq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("xorq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_XOR_GvEv, src, dst);
+ }
+
+ void xorq_ir(int32_t imm, RegisterID dst)
+ {
+ spew("xorq $0x%" PRIx64 ", %s", int64_t(imm), GPReg64Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_XOR);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax)
+ m_formatter.oneByteOp64(OP_XOR_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_XOR);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void sarq_CLr(RegisterID dst)
+ {
+ spew("sarq %%cl, %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_SAR);
+ }
+
+ void shlq_CLr(RegisterID dst)
+ {
+ spew("shlq %%cl, %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_SHL);
+ }
+
+ void shrq_CLr(RegisterID dst)
+ {
+ spew("shrq %%cl, %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_SHR);
+ }
+
+ void sarq_ir(int32_t imm, RegisterID dst)
+ {
+ MOZ_ASSERT(imm < 64);
+ spew("sarq $%d, %s", imm, GPReg64Name(dst));
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SAR);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SAR);
+ m_formatter.immediate8u(imm);
+ }
+ }
+
+ void shlq_ir(int32_t imm, RegisterID dst)
+ {
+ MOZ_ASSERT(imm < 64);
+ spew("shlq $%d, %s", imm, GPReg64Name(dst));
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SHL);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SHL);
+ m_formatter.immediate8u(imm);
+ }
+ }
+
+ void shrq_ir(int32_t imm, RegisterID dst)
+ {
+ MOZ_ASSERT(imm < 64);
+ spew("shrq $%d, %s", imm, GPReg64Name(dst));
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SHR);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SHR);
+ m_formatter.immediate8u(imm);
+ }
+ }
+
+ void rolq_ir(int32_t imm, RegisterID dst)
+ {
+ MOZ_ASSERT(imm < 64);
+ spew("rolq $%d, %s", imm, GPReg64Name(dst));
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_ROL);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_ROL);
+ m_formatter.immediate8u(imm);
+ }
+ }
+ void rolq_CLr(RegisterID dst)
+ {
+ spew("rolq %%cl, %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_ROL);
+ }
+
+ void rorq_ir(int32_t imm, RegisterID dst)
+ {
+ MOZ_ASSERT(imm < 64);
+ spew("rorq $%d, %s", imm, GPReg64Name(dst));
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_ROR);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_ROR);
+ m_formatter.immediate8u(imm);
+ }
+ }
+ void rorq_CLr(RegisterID dst)
+ {
+ spew("rorq %%cl, %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_ROR);
+ }
+
+ void imulq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("imulq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_IMUL_GvEv, src, dst);
+ }
+
+ void imulq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("imulq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_IMUL_GvEv, offset, base, dst);
+ }
+
+ void cqo()
+ {
+ spew("cqo ");
+ m_formatter.oneByteOp64(OP_CDQ);
+ }
+
+ void idivq_r(RegisterID divisor)
+ {
+ spew("idivq %s", GPReg64Name(divisor));
+ m_formatter.oneByteOp64(OP_GROUP3_Ev, divisor, GROUP3_OP_IDIV);
+ }
+
+ void divq_r(RegisterID divisor)
+ {
+ spew("divq %s", GPReg64Name(divisor));
+ m_formatter.oneByteOp64(OP_GROUP3_Ev, divisor, GROUP3_OP_DIV);
+ }
+
+ // Comparisons:
+
+ void cmpq_rr(RegisterID rhs, RegisterID lhs)
+ {
+ spew("cmpq %s, %s", GPReg64Name(rhs), GPReg64Name(lhs));
+ m_formatter.oneByteOp64(OP_CMP_GvEv, rhs, lhs);
+ }
+
+ void cmpq_rm(RegisterID rhs, int32_t offset, RegisterID base)
+ {
+ spew("cmpq %s, " MEM_ob, GPReg64Name(rhs), ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_CMP_EvGv, offset, base, rhs);
+ }
+
+ void cmpq_mr(int32_t offset, RegisterID base, RegisterID lhs)
+ {
+ spew("cmpq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(lhs));
+ m_formatter.oneByteOp64(OP_CMP_GvEv, offset, base, lhs);
+ }
+
+ void cmpq_ir(int32_t rhs, RegisterID lhs)
+ {
+ if (rhs == 0) {
+ testq_rr(lhs, lhs);
+ return;
+ }
+
+ spew("cmpq $0x%" PRIx64 ", %s", int64_t(rhs), GPReg64Name(lhs));
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, lhs, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ if (lhs == rax)
+ m_formatter.oneByteOp64(OP_CMP_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, lhs, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+ }
+
+ void cmpq_im(int32_t rhs, int32_t offset, RegisterID base)
+ {
+ spew("cmpq $0x%" PRIx64 ", " MEM_ob, int64_t(rhs), ADDR_ob(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, offset, base, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, offset, base, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+ }
+
+ void cmpq_im(int32_t rhs, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("cmpq $0x%x, " MEM_obs, rhs, ADDR_obs(offset, base, index, scale));
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, offset, base, index, scale, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, offset, base, index, scale, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+ }
+ void cmpq_im(int32_t rhs, const void* addr)
+ {
+ spew("cmpq $0x%" PRIx64 ", %p", int64_t(rhs), addr);
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, addr, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, addr, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+ }
+ void cmpq_rm(RegisterID rhs, const void* addr)
+ {
+ spew("cmpq %s, %p", GPReg64Name(rhs), addr);
+ m_formatter.oneByteOp64(OP_CMP_EvGv, addr, rhs);
+ }
+
+ void testq_rr(RegisterID rhs, RegisterID lhs)
+ {
+ spew("testq %s, %s", GPReg64Name(rhs), GPReg64Name(lhs));
+ m_formatter.oneByteOp64(OP_TEST_EvGv, lhs, rhs);
+ }
+
+ void testq_ir(int32_t rhs, RegisterID lhs)
+ {
+ // If the mask fits in a 32-bit immediate, we can use testl with a
+ // 32-bit subreg.
+ if (CAN_ZERO_EXTEND_32_64(rhs)) {
+ testl_ir(rhs, lhs);
+ return;
+ }
+ spew("testq $0x%" PRIx64 ", %s", int64_t(rhs), GPReg64Name(lhs));
+ if (lhs == rax)
+ m_formatter.oneByteOp64(OP_TEST_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, lhs, GROUP3_OP_TEST);
+ m_formatter.immediate32(rhs);
+ }
+
+ void testq_i32m(int32_t rhs, int32_t offset, RegisterID base)
+ {
+ spew("testq $0x%" PRIx64 ", " MEM_ob, int64_t(rhs), ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, offset, base, GROUP3_OP_TEST);
+ m_formatter.immediate32(rhs);
+ }
+
+ void testq_i32m(int32_t rhs, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("testq $0x%4x, " MEM_obs, rhs, ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, offset, base, index, scale, GROUP3_OP_TEST);
+ m_formatter.immediate32(rhs);
+ }
+
+ // Various move ops:
+
+ void cmovzq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("cmovz %s, %s", GPReg16Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp64(OP2_CMOVZ_GvEv, src, dst);
+ }
+ void cmovzq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("cmovz " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.twoByteOp64(OP2_CMOVZ_GvEv, offset, base, dst);
+ }
+ void cmovzq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("cmovz " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg32Name(dst));
+ m_formatter.twoByteOp64(OP2_CMOVZ_GvEv, offset, base, index, scale, dst);
+ }
+
+ void xchgq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("xchgq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_XCHG_GvEv, src, dst);
+ }
+ void xchgq_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("xchgq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_XCHG_GvEv, offset, base, src);
+ }
+ void xchgq_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("xchgq %s, " MEM_obs, GPReg64Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp64(OP_XCHG_GvEv, offset, base, index, scale, src);
+ }
+
+ void movq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("movq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOV_GvEv, src, dst);
+ }
+
+ void movq_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("movq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_MOV_EvGv, offset, base, src);
+ }
+
+ void movq_rm_disp32(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("movq %s, " MEM_o32b, GPReg64Name(src), ADDR_o32b(offset, base));
+ m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, offset, base, src);
+ }
+
+ void movq_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("movq %s, " MEM_obs, GPReg64Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp64(OP_MOV_EvGv, offset, base, index, scale, src);
+ }
+
+ void movq_rm(RegisterID src, const void* addr)
+ {
+ if (src == rax && !IsAddressImmediate(addr)) {
+ movq_EAXm(addr);
+ return;
+ }
+
+ spew("movq %s, %p", GPReg64Name(src), addr);
+ m_formatter.oneByteOp64(OP_MOV_EvGv, addr, src);
+ }
+
+ void movq_mEAX(const void* addr)
+ {
+ if (IsAddressImmediate(addr)) {
+ movq_mr(addr, rax);
+ return;
+ }
+
+ spew("movq %p, %%rax", addr);
+ m_formatter.oneByteOp64(OP_MOV_EAXOv);
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+ }
+
+ void movq_EAXm(const void* addr)
+ {
+ if (IsAddressImmediate(addr)) {
+ movq_rm(rax, addr);
+ return;
+ }
+
+ spew("movq %%rax, %p", addr);
+ m_formatter.oneByteOp64(OP_MOV_OvEAX);
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+ }
+
+ void movq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOV_GvEv, offset, base, dst);
+ }
+
+ void movq_mr_disp32(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movq " MEM_o32b ", %s", ADDR_o32b(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, offset, base, dst);
+ }
+
+ void movq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("movq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOV_GvEv, offset, base, index, scale, dst);
+ }
+
+ void movq_mr(const void* addr, RegisterID dst)
+ {
+ if (dst == rax && !IsAddressImmediate(addr)) {
+ movq_mEAX(addr);
+ return;
+ }
+
+ spew("movq %p, %s", addr, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOV_GvEv, addr, dst);
+ }
+
+ void leaq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("leaq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg64Name(dst)),
+ m_formatter.oneByteOp64(OP_LEA, offset, base, index, scale, dst);
+ }
+
+ void movq_i32m(int32_t imm, int32_t offset, RegisterID base)
+ {
+ spew("movq $%d, " MEM_ob, imm, ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_GROUP11_EvIz, offset, base, GROUP11_MOV);
+ m_formatter.immediate32(imm);
+ }
+ void movq_i32m(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("movq $%d, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp64(OP_GROUP11_EvIz, offset, base, index, scale, GROUP11_MOV);
+ m_formatter.immediate32(imm);
+ }
+ void movq_i32m(int32_t imm, const void* addr)
+ {
+ spew("movq $%d, %p", imm, addr);
+ m_formatter.oneByteOp64(OP_GROUP11_EvIz, addr, GROUP11_MOV);
+ m_formatter.immediate32(imm);
+ }
+
+ // Note that this instruction sign-extends its 32-bit immediate field to 64
+ // bits and loads the 64-bit value into a 64-bit register.
+ //
+ // Note also that this is similar to the movl_i32r instruction, except that
+ // movl_i32r *zero*-extends its 32-bit immediate, and it has smaller code
+ // size, so it's preferred for values which could use either.
+ void movq_i32r(int32_t imm, RegisterID dst)
+ {
+ spew("movq $%d, %s", imm, GPRegName(dst));
+ m_formatter.oneByteOp64(OP_GROUP11_EvIz, dst, GROUP11_MOV);
+ m_formatter.immediate32(imm);
+ }
+
+ void movq_i64r(int64_t imm, RegisterID dst)
+ {
+ spew("movabsq $0x%" PRIx64 ", %s", imm, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
+ m_formatter.immediate64(imm);
+ }
+
+ void movsbq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movsbq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_MOVSX_GvEb, offset, base, dst);
+ }
+ void movsbq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("movsbq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_MOVSX_GvEb, offset, base, index, scale, dst);
+ }
+
+ void movswq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movswq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_MOVSX_GvEw, offset, base, dst);
+ }
+ void movswq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("movswq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_MOVSX_GvEw, offset, base, index, scale, dst);
+ }
+
+ void movslq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("movslq %s, %s", GPReg32Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOVSXD_GvEv, src, dst);
+ }
+ void movslq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movslq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOVSXD_GvEv, offset, base, dst);
+ }
+ void movslq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("movslq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOVSXD_GvEv, offset, base, index, scale, dst);
+ }
+
+ MOZ_MUST_USE JmpSrc
+ movl_ripr(RegisterID dst)
+ {
+ m_formatter.oneByteRipOp(OP_MOV_GvEv, 0, (RegisterID)dst);
+ JmpSrc label(m_formatter.size());
+ spew("movl " MEM_o32r ", %s", ADDR_o32r(label.offset()), GPReg32Name(dst));
+ return label;
+ }
+
+ MOZ_MUST_USE JmpSrc
+ movl_rrip(RegisterID src)
+ {
+ m_formatter.oneByteRipOp(OP_MOV_EvGv, 0, (RegisterID)src);
+ JmpSrc label(m_formatter.size());
+ spew("movl %s, " MEM_o32r "", GPReg32Name(src), ADDR_o32r(label.offset()));
+ return label;
+ }
+
+ MOZ_MUST_USE JmpSrc
+ movq_ripr(RegisterID dst)
+ {
+ m_formatter.oneByteRipOp64(OP_MOV_GvEv, 0, dst);
+ JmpSrc label(m_formatter.size());
+ spew("movq " MEM_o32r ", %s", ADDR_o32r(label.offset()), GPRegName(dst));
+ return label;
+ }
+
+ MOZ_MUST_USE JmpSrc
+ movq_rrip(RegisterID src)
+ {
+ m_formatter.oneByteRipOp64(OP_MOV_EvGv, 0, (RegisterID)src);
+ JmpSrc label(m_formatter.size());
+ spew("movq %s, " MEM_o32r "", GPRegName(src), ADDR_o32r(label.offset()));
+ return label;
+ }
+
+ void leaq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("leaq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_LEA, offset, base, dst);
+ }
+
+ MOZ_MUST_USE JmpSrc
+ leaq_rip(RegisterID dst)
+ {
+ m_formatter.oneByteRipOp64(OP_LEA, 0, dst);
+ JmpSrc label(m_formatter.size());
+ spew("leaq " MEM_o32r ", %s", ADDR_o32r(label.offset()), GPRegName(dst));
+ return label;
+ }
+
+ // Flow control:
+
+ void jmp_rip(int ripOffset)
+ {
+ // rip-relative addressing.
+ spew("jmp *%d(%%rip)", ripOffset);
+ m_formatter.oneByteRipOp(OP_GROUP5_Ev, ripOffset, GROUP5_OP_JMPN);
+ }
+
+ void immediate64(int64_t imm)
+ {
+ spew(".quad %lld", (long long)imm);
+ m_formatter.immediate64(imm);
+ }
+
+ // SSE operations:
+
+ void vcvtsq2sd_rr(RegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpInt64Simd("vcvtsi2sd", VEX_SD, OP2_CVTSI2SD_VsdEd, src1, src0, dst);
+ }
+ void vcvtsq2ss_rr(RegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpInt64Simd("vcvtsi2ss", VEX_SS, OP2_CVTSI2SD_VsdEd, src1, src0, dst);
+ }
+
+ void vcvtsi2sdq_rr(RegisterID src, XMMRegisterID dst)
+ {
+ twoByteOpInt64Simd("vcvtsi2sdq", VEX_SD, OP2_CVTSI2SD_VsdEd, src, invalid_xmm, dst);
+ }
+
+ void vcvttsd2sq_rr(XMMRegisterID src, RegisterID dst)
+ {
+ twoByteOpSimdInt64("vcvttsd2si", VEX_SD, OP2_CVTTSD2SI_GdWsd, src, dst);
+ }
+
+ void vcvttss2sq_rr(XMMRegisterID src, RegisterID dst)
+ {
+ twoByteOpSimdInt64("vcvttss2si", VEX_SS, OP2_CVTTSD2SI_GdWsd, src, dst);
+ }
+
+ void vmovq_rr(XMMRegisterID src, RegisterID dst)
+ {
+ // While this is called "vmovq", it actually uses the vmovd encoding
+ // with a REX prefix modifying it to be 64-bit.
+ twoByteOpSimdInt64("vmovq", VEX_PD, OP2_MOVD_EdVd, (XMMRegisterID)dst, (RegisterID)src);
+ }
+
+ void vmovq_rr(RegisterID src, XMMRegisterID dst)
+ {
+ // While this is called "vmovq", it actually uses the vmovd encoding
+ // with a REX prefix modifying it to be 64-bit.
+ twoByteOpInt64Simd("vmovq", VEX_PD, OP2_MOVD_VdEd, src, invalid_xmm, dst);
+ }
+
+ MOZ_MUST_USE JmpSrc
+ vmovsd_ripr(XMMRegisterID dst)
+ {
+ return twoByteRipOpSimd("vmovsd", VEX_SD, OP2_MOVSD_VsdWsd, invalid_xmm, dst);
+ }
+ MOZ_MUST_USE JmpSrc
+ vmovss_ripr(XMMRegisterID dst)
+ {
+ return twoByteRipOpSimd("vmovss", VEX_SS, OP2_MOVSD_VsdWsd, invalid_xmm, dst);
+ }
+ MOZ_MUST_USE JmpSrc
+ vmovsd_rrip(XMMRegisterID src)
+ {
+ return twoByteRipOpSimd("vmovsd", VEX_SD, OP2_MOVSD_WsdVsd, invalid_xmm, src);
+ }
+ MOZ_MUST_USE JmpSrc
+ vmovss_rrip(XMMRegisterID src)
+ {
+ return twoByteRipOpSimd("vmovss", VEX_SS, OP2_MOVSD_WsdVsd, invalid_xmm, src);
+ }
+ MOZ_MUST_USE JmpSrc
+ vmovdqa_rrip(XMMRegisterID src)
+ {
+ return twoByteRipOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_WdqVdq, invalid_xmm, src);
+ }
+ MOZ_MUST_USE JmpSrc
+ vmovaps_rrip(XMMRegisterID src)
+ {
+ return twoByteRipOpSimd("vmovdqa", VEX_PS, OP2_MOVAPS_WsdVsd, invalid_xmm, src);
+ }
+
+ MOZ_MUST_USE JmpSrc
+ vmovaps_ripr(XMMRegisterID dst)
+ {
+ return twoByteRipOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_VsdWsd, invalid_xmm, dst);
+ }
+
+ MOZ_MUST_USE JmpSrc
+ vmovdqa_ripr(XMMRegisterID dst)
+ {
+ return twoByteRipOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_VdqWdq, invalid_xmm, dst);
+ }
+
+ private:
+
+ MOZ_MUST_USE JmpSrc
+ twoByteRipOpSimd(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteRipOp(opcode, 0, dst);
+ JmpSrc label(m_formatter.size());
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, " MEM_o32r "", legacySSEOpName(name), XMMRegName(dst), ADDR_o32r(label.offset()));
+ else
+ spew("%-11s" MEM_o32r ", %s", legacySSEOpName(name), ADDR_o32r(label.offset()), XMMRegName(dst));
+ return label;
+ }
+
+ m_formatter.twoByteRipOpVex(ty, opcode, 0, src0, dst);
+ JmpSrc label(m_formatter.size());
+ if (src0 == invalid_xmm) {
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, " MEM_o32r "", name, XMMRegName(dst), ADDR_o32r(label.offset()));
+ else
+ spew("%-11s" MEM_o32r ", %s", name, ADDR_o32r(label.offset()), XMMRegName(dst));
+ } else {
+ spew("%-11s" MEM_o32r ", %s, %s", name, ADDR_o32r(label.offset()), XMMRegName(src0), XMMRegName(dst));
+ }
+ return label;
+ }
+
+ void twoByteOpInt64Simd(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ RegisterID rm, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(dst), GPRegName(rm));
+ else
+ spew("%-11s%s, %s", legacySSEOpName(name), GPRegName(rm), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp64(opcode, rm, dst);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, %s", name, XMMRegName(dst), GPRegName(rm));
+ else
+ spew("%-11s%s, %s", name, GPRegName(rm), XMMRegName(dst));
+ } else {
+ spew("%-11s%s, %s, %s", name, GPRegName(rm), XMMRegName(src0), XMMRegName(dst));
+ }
+ m_formatter.twoByteOpVex64(ty, opcode, rm, src0, dst);
+ }
+
+ void twoByteOpSimdInt64(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ XMMRegisterID rm, RegisterID dst)
+ {
+ if (useLegacySSEEncodingForOtherOutput()) {
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, %s", legacySSEOpName(name), GPRegName(dst), XMMRegName(rm));
+ else if (opcode == OP2_MOVD_EdVd)
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName((XMMRegisterID)dst), GPRegName((RegisterID)rm));
+ else
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(rm), GPRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp64(opcode, (RegisterID)rm, dst);
+ return;
+ }
+
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, %s", name, GPRegName(dst), XMMRegName(rm));
+ else if (opcode == OP2_MOVD_EdVd)
+ spew("%-11s%s, %s", name, XMMRegName((XMMRegisterID)dst), GPRegName((RegisterID)rm));
+ else
+ spew("%-11s%s, %s", name, XMMRegName(rm), GPRegName(dst));
+ m_formatter.twoByteOpVex64(ty, opcode, (RegisterID)rm, invalid_xmm, (XMMRegisterID)dst);
+ }
+};
+
+typedef BaseAssemblerX64 BaseAssemblerSpecific;
+
+} // namespace X86Encoding
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_BaseAssembler_x64_h */
diff --git a/js/src/jit/x64/BaselineCompiler-x64.cpp b/js/src/jit/x64/BaselineCompiler-x64.cpp
new file mode 100644
index 000000000..8735414be
--- /dev/null
+++ b/js/src/jit/x64/BaselineCompiler-x64.cpp
@@ -0,0 +1,15 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x64/BaselineCompiler-x64.h"
+
+using namespace js;
+using namespace js::jit;
+
+BaselineCompilerX64::BaselineCompilerX64(JSContext* cx, TempAllocator& alloc, JSScript* script)
+ : BaselineCompilerX86Shared(cx, alloc, script)
+{
+}
diff --git a/js/src/jit/x64/BaselineCompiler-x64.h b/js/src/jit/x64/BaselineCompiler-x64.h
new file mode 100644
index 000000000..ff75f9c09
--- /dev/null
+++ b/js/src/jit/x64/BaselineCompiler-x64.h
@@ -0,0 +1,26 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_BaselineCompiler_x64_h
+#define jit_x64_BaselineCompiler_x64_h
+
+#include "jit/x86-shared/BaselineCompiler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+class BaselineCompilerX64 : public BaselineCompilerX86Shared
+{
+ protected:
+ BaselineCompilerX64(JSContext* cx, TempAllocator& alloc, JSScript* script);
+};
+
+typedef BaselineCompilerX64 BaselineCompilerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_BaselineCompiler_x64_h */
diff --git a/js/src/jit/x64/BaselineIC-x64.cpp b/js/src/jit/x64/BaselineIC-x64.cpp
new file mode 100644
index 000000000..f04052b0d
--- /dev/null
+++ b/js/src/jit/x64/BaselineIC-x64.cpp
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineIC.h"
+#include "jit/SharedICHelpers.h"
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICCompare_Int32
+
+bool
+ICCompare_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // Directly compare the int32 payload of R0 and R1.
+ ScratchRegisterScope scratch(masm);
+ Assembler::Condition cond = JSOpToCondition(op, /* signed = */true);
+ masm.mov(ImmWord(0), scratch);
+ masm.cmp32(R0.valueReg(), R1.valueReg());
+ masm.setCC(cond, scratch);
+
+ // Box the result and return
+ masm.boxValue(JSVAL_TYPE_BOOLEAN, scratch, R0.valueReg());
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/x64/CodeGenerator-x64.cpp b/js/src/jit/x64/CodeGenerator-x64.cpp
new file mode 100644
index 000000000..a5be15072
--- /dev/null
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -0,0 +1,880 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x64/CodeGenerator-x64.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/IonCaches.h"
+#include "jit/MIR.h"
+
+#include "jsscriptinlines.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::DebugOnly;
+
+CodeGeneratorX64::CodeGeneratorX64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorX86Shared(gen, graph, masm)
+{
+}
+
+ValueOperand
+CodeGeneratorX64::ToValue(LInstruction* ins, size_t pos)
+{
+ return ValueOperand(ToRegister(ins->getOperand(pos)));
+}
+
+ValueOperand
+CodeGeneratorX64::ToOutValue(LInstruction* ins)
+{
+ return ValueOperand(ToRegister(ins->getDef(0)));
+}
+
+ValueOperand
+CodeGeneratorX64::ToTempValue(LInstruction* ins, size_t pos)
+{
+ return ValueOperand(ToRegister(ins->getTemp(pos)));
+}
+
+Operand
+CodeGeneratorX64::ToOperand64(const LInt64Allocation& a64)
+{
+ const LAllocation& a = a64.value();
+ MOZ_ASSERT(!a.isFloatReg());
+ if (a.isGeneralReg())
+ return Operand(a.toGeneralReg()->reg());
+ return Operand(masm.getStackPointer(), ToStackOffset(a));
+}
+
+FrameSizeClass
+FrameSizeClass::FromDepth(uint32_t frameDepth)
+{
+ return FrameSizeClass::None();
+}
+
+FrameSizeClass
+FrameSizeClass::ClassLimit()
+{
+ return FrameSizeClass(0);
+}
+
+uint32_t
+FrameSizeClass::frameSize() const
+{
+ MOZ_CRASH("x64 does not use frame size classes");
+}
+
+void
+CodeGeneratorX64::visitValue(LValue* value)
+{
+ LDefinition* reg = value->getDef(0);
+ masm.moveValue(value->value(), ToRegister(reg));
+}
+
+void
+CodeGeneratorX64::visitBox(LBox* box)
+{
+ const LAllocation* in = box->getOperand(0);
+ const LDefinition* result = box->getDef(0);
+
+ if (IsFloatingPointType(box->type())) {
+ ScratchDoubleScope scratch(masm);
+ FloatRegister reg = ToFloatRegister(in);
+ if (box->type() == MIRType::Float32) {
+ masm.convertFloat32ToDouble(reg, scratch);
+ reg = scratch;
+ }
+ masm.vmovq(reg, ToRegister(result));
+ } else {
+ masm.boxValue(ValueTypeFromMIRType(box->type()), ToRegister(in), ToRegister(result));
+ }
+}
+
+void
+CodeGeneratorX64::visitUnbox(LUnbox* unbox)
+{
+ MUnbox* mir = unbox->mir();
+
+ if (mir->fallible()) {
+ const ValueOperand value = ToValue(unbox, LUnbox::Input);
+ Assembler::Condition cond;
+ switch (mir->type()) {
+ case MIRType::Int32:
+ cond = masm.testInt32(Assembler::NotEqual, value);
+ break;
+ case MIRType::Boolean:
+ cond = masm.testBoolean(Assembler::NotEqual, value);
+ break;
+ case MIRType::Object:
+ cond = masm.testObject(Assembler::NotEqual, value);
+ break;
+ case MIRType::String:
+ cond = masm.testString(Assembler::NotEqual, value);
+ break;
+ case MIRType::Symbol:
+ cond = masm.testSymbol(Assembler::NotEqual, value);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+ bailoutIf(cond, unbox->snapshot());
+ }
+
+ Operand input = ToOperand(unbox->getOperand(LUnbox::Input));
+ Register result = ToRegister(unbox->output());
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.unboxInt32(input, result);
+ break;
+ case MIRType::Boolean:
+ masm.unboxBoolean(input, result);
+ break;
+ case MIRType::Object:
+ masm.unboxObject(input, result);
+ break;
+ case MIRType::String:
+ masm.unboxString(input, result);
+ break;
+ case MIRType::Symbol:
+ masm.unboxSymbol(input, result);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+}
+
+void
+CodeGeneratorX64::visitCompareB(LCompareB* lir)
+{
+ MCompare* mir = lir->mir();
+
+ const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
+ const LAllocation* rhs = lir->rhs();
+ const Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
+
+ // Load boxed boolean in ScratchReg.
+ ScratchRegisterScope scratch(masm);
+ if (rhs->isConstant())
+ masm.moveValue(rhs->toConstant()->toJSValue(), scratch);
+ else
+ masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), scratch);
+
+ // Perform the comparison.
+ masm.cmpPtr(lhs.valueReg(), scratch);
+ masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output);
+}
+
+void
+CodeGeneratorX64::visitCompareBAndBranch(LCompareBAndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+
+ const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
+ const LAllocation* rhs = lir->rhs();
+
+ MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
+
+ // Load boxed boolean in ScratchReg.
+ ScratchRegisterScope scratch(masm);
+ if (rhs->isConstant())
+ masm.moveValue(rhs->toConstant()->toJSValue(), scratch);
+ else
+ masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), scratch);
+
+ // Perform the comparison.
+ masm.cmpPtr(lhs.valueReg(), scratch);
+ emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorX64::visitCompareBitwise(LCompareBitwise* lir)
+{
+ MCompare* mir = lir->mir();
+ const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput);
+ const ValueOperand rhs = ToValue(lir, LCompareBitwise::RhsInput);
+ const Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(IsEqualityOp(mir->jsop()));
+
+ masm.cmpPtr(lhs.valueReg(), rhs.valueReg());
+ masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output);
+}
+
+void
+CodeGeneratorX64::visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+
+ const ValueOperand lhs = ToValue(lir, LCompareBitwiseAndBranch::LhsInput);
+ const ValueOperand rhs = ToValue(lir, LCompareBitwiseAndBranch::RhsInput);
+
+ MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
+ mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
+
+ masm.cmpPtr(lhs.valueReg(), rhs.valueReg());
+ emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorX64::visitCompareI64(LCompareI64* lir)
+{
+ MCompare* mir = lir->mir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+ Register output = ToRegister(lir->output());
+
+ if (IsConstant(rhs))
+ masm.cmpPtr(lhsReg, ImmWord(ToInt64(rhs)));
+ else
+ masm.cmpPtr(lhsReg, ToOperand64(rhs));
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ masm.emitSet(JSOpToCondition(lir->jsop(), isSigned), output);
+}
+
+void
+CodeGeneratorX64::visitCompareI64AndBranch(LCompareI64AndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+
+ if (IsConstant(rhs))
+ masm.cmpPtr(lhsReg, ImmWord(ToInt64(rhs)));
+ else
+ masm.cmpPtr(lhsReg, ToOperand64(rhs));
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ emitBranch(JSOpToCondition(lir->jsop(), isSigned), lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorX64::visitDivOrModI64(LDivOrModI64* lir)
+{
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+ Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT_IF(lhs != rhs, rhs != rax);
+ MOZ_ASSERT(rhs != rdx);
+ MOZ_ASSERT_IF(output == rax, ToRegister(lir->remainder()) == rdx);
+ MOZ_ASSERT_IF(output == rdx, ToRegister(lir->remainder()) == rax);
+
+ Label done;
+
+ // Put the lhs in rax.
+ if (lhs != rax)
+ masm.mov(lhs, rax);
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero()) {
+ masm.branchTestPtr(Assembler::Zero, rhs, rhs, trap(lir, wasm::Trap::IntegerDivideByZero));
+ }
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notmin;
+ masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), &notmin);
+ masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), &notmin);
+ if (lir->mir()->isMod())
+ masm.xorl(output, output);
+ else
+ masm.jump(trap(lir, wasm::Trap::IntegerOverflow));
+ masm.jump(&done);
+ masm.bind(&notmin);
+ }
+
+ // Sign extend the lhs into rdx to make rdx:rax.
+ masm.cqo();
+ masm.idivq(rhs);
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorX64::visitUDivOrModI64(LUDivOrModI64* lir)
+{
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+
+ DebugOnly<Register> output = ToRegister(lir->output());
+ MOZ_ASSERT_IF(lhs != rhs, rhs != rax);
+ MOZ_ASSERT(rhs != rdx);
+ MOZ_ASSERT_IF(output.value == rax, ToRegister(lir->remainder()) == rdx);
+ MOZ_ASSERT_IF(output.value == rdx, ToRegister(lir->remainder()) == rax);
+
+ // Put the lhs in rax.
+ if (lhs != rax)
+ masm.mov(lhs, rax);
+
+ Label done;
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero())
+ masm.branchTestPtr(Assembler::Zero, rhs, rhs, trap(lir, wasm::Trap::IntegerDivideByZero));
+
+ // Zero extend the lhs into rdx to make (rdx:rax).
+ masm.xorl(rdx, rdx);
+ masm.udivq(rhs);
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorX64::visitWasmSelectI64(LWasmSelectI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+
+ Register cond = ToRegister(lir->condExpr());
+
+ Operand falseExpr = ToOperandOrRegister64(lir->falseExpr());
+
+ Register64 out = ToOutRegister64(lir);
+ MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out, "true expr is reused for input");
+
+ masm.test32(cond, cond);
+ masm.cmovzq(falseExpr, out.reg);
+}
+
+void
+CodeGeneratorX64::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ masm.vmovq(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void
+CodeGeneratorX64::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ masm.vmovq(ToFloatRegister(lir->input()), ToRegister(lir->output()));
+}
+
+void
+CodeGeneratorX64::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir)
+{
+ masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void
+CodeGeneratorX64::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir)
+{
+ masm.convertUInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void
+CodeGeneratorX64::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+CodeGeneratorX64::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+CodeGeneratorX64::visitWasmCall(LWasmCall* ins)
+{
+ emitWasmCallBase(ins);
+}
+
+void
+CodeGeneratorX64::visitWasmCallI64(LWasmCallI64* ins)
+{
+ emitWasmCallBase(ins);
+}
+
+void
+CodeGeneratorX64::wasmStore(const wasm::MemoryAccessDesc& access, const LAllocation* value,
+ Operand dstAddr)
+{
+ if (value->isConstant()) {
+ MOZ_ASSERT(!access.isSimd());
+
+ masm.memoryBarrier(access.barrierBefore());
+
+ const MConstant* mir = value->toConstant();
+ Imm32 cst = Imm32(mir->type() == MIRType::Int32 ? mir->toInt32() : mir->toInt64());
+
+ size_t storeOffset = masm.size();
+ switch (access.type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ masm.movb(cst, dstAddr);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ masm.movw(cst, dstAddr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ masm.movl(cst, dstAddr);
+ break;
+ case Scalar::Int64:
+ case Scalar::Float32:
+ case Scalar::Float64:
+ case Scalar::Float32x4:
+ case Scalar::Int8x16:
+ case Scalar::Int16x8:
+ case Scalar::Int32x4:
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+ masm.append(access, storeOffset, masm.framePushed());
+
+ masm.memoryBarrier(access.barrierAfter());
+ } else {
+ masm.wasmStore(access, ToAnyRegister(value), dstAddr);
+ }
+}
+
+template <typename T>
+void
+CodeGeneratorX64::emitWasmLoad(T* ins)
+{
+ const MWasmLoad* mir = ins->mir();
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+ const LAllocation* ptr = ins->ptr();
+ Operand srcAddr = ptr->isBogus()
+ ? Operand(HeapReg, offset)
+ : Operand(HeapReg, ToRegister(ptr), TimesOne, offset);
+
+ if (mir->type() == MIRType::Int64)
+ masm.wasmLoadI64(mir->access(), srcAddr, ToOutRegister64(ins));
+ else
+ masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorX64::visitWasmLoad(LWasmLoad* ins)
+{
+ emitWasmLoad(ins);
+}
+
+void
+CodeGeneratorX64::visitWasmLoadI64(LWasmLoadI64* ins)
+{
+ emitWasmLoad(ins);
+}
+
+template <typename T>
+void
+CodeGeneratorX64::emitWasmStore(T* ins)
+{
+ const MWasmStore* mir = ins->mir();
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+ const LAllocation* value = ins->getOperand(ins->ValueIndex);
+ const LAllocation* ptr = ins->ptr();
+ Operand dstAddr = ptr->isBogus()
+ ? Operand(HeapReg, offset)
+ : Operand(HeapReg, ToRegister(ptr), TimesOne, offset);
+
+ wasmStore(mir->access(), value, dstAddr);
+}
+
+void
+CodeGeneratorX64::visitWasmStore(LWasmStore* ins)
+{
+ emitWasmStore(ins);
+}
+
+void
+CodeGeneratorX64::visitWasmStoreI64(LWasmStoreI64* ins)
+{
+ emitWasmStore(ins);
+}
+
+void
+CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
+{
+ const MAsmJSLoadHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
+
+ const LAllocation* ptr = ins->ptr();
+ const LDefinition* out = ins->output();
+
+ Scalar::Type accessType = mir->access().type();
+ MOZ_ASSERT(!Scalar::isSimdType(accessType));
+
+ Operand srcAddr = ptr->isBogus()
+ ? Operand(HeapReg, mir->offset())
+ : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
+
+ uint32_t before = masm.size();
+ masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(out));
+ uint32_t after = masm.size();
+ verifyLoadDisassembly(before, after, accessType, srcAddr, *out->output());
+}
+
+void
+CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
+{
+ const MAsmJSStoreHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
+
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* value = ins->value();
+
+ Scalar::Type accessType = mir->access().type();
+ MOZ_ASSERT(!Scalar::isSimdType(accessType));
+
+ canonicalizeIfDeterministic(accessType, value);
+
+ Operand dstAddr = ptr->isBogus()
+ ? Operand(HeapReg, mir->offset())
+ : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
+
+ uint32_t before = masm.size();
+ wasmStore(mir->access(), value, dstAddr);
+ uint32_t after = masm.size();
+ verifyStoreDisassembly(before, after, accessType, dstAddr, *value);
+}
+
+void
+CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
+{
+ MAsmJSCompareExchangeHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+
+ Register ptr = ToRegister(ins->ptr());
+ Register oldval = ToRegister(ins->oldValue());
+ Register newval = ToRegister(ins->newValue());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Scalar::Type accessType = mir->access().type();
+ BaseIndex srcAddr(HeapReg, ptr, TimesOne);
+
+ masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
+ srcAddr,
+ oldval,
+ newval,
+ InvalidReg,
+ ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorX64::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
+{
+ MAsmJSAtomicExchangeHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+
+ Register ptr = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Scalar::Type accessType = mir->access().type();
+ MOZ_ASSERT(accessType <= Scalar::Uint32);
+
+ BaseIndex srcAddr(HeapReg, ptr, TimesOne);
+
+ masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
+ srcAddr,
+ value,
+ InvalidReg,
+ ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
+{
+ MAsmJSAtomicBinopHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+ MOZ_ASSERT(mir->hasUses());
+
+ Register ptr = ToRegister(ins->ptr());
+ const LAllocation* value = ins->value();
+ Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
+ AnyRegister output = ToAnyRegister(ins->output());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Scalar::Type accessType = mir->access().type();
+ if (accessType == Scalar::Uint32)
+ accessType = Scalar::Int32;
+
+ AtomicOp op = mir->operation();
+ BaseIndex srcAddr(HeapReg, ptr, TimesOne);
+
+ if (value->isConstant()) {
+ atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr, temp, InvalidReg,
+ output);
+ } else {
+ atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr, temp, InvalidReg,
+ output);
+ }
+}
+
+void
+CodeGeneratorX64::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
+{
+ MAsmJSAtomicBinopHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+ MOZ_ASSERT(!mir->hasUses());
+
+ Register ptr = ToRegister(ins->ptr());
+ const LAllocation* value = ins->value();
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Scalar::Type accessType = mir->access().type();
+ AtomicOp op = mir->operation();
+
+ BaseIndex srcAddr(HeapReg, ptr, TimesOne);
+
+ if (value->isConstant())
+ atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr);
+ else
+ atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr);
+}
+
+void
+CodeGeneratorX64::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins)
+{
+ MWasmLoadGlobalVar* mir = ins->mir();
+
+ MIRType type = mir->type();
+ MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
+
+ CodeOffset label;
+ switch (type) {
+ case MIRType::Int32:
+ label = masm.loadRipRelativeInt32(ToRegister(ins->output()));
+ break;
+ case MIRType::Float32:
+ label = masm.loadRipRelativeFloat32(ToFloatRegister(ins->output()));
+ break;
+ case MIRType::Double:
+ label = masm.loadRipRelativeDouble(ToFloatRegister(ins->output()));
+ break;
+ // Aligned access: code is aligned on PageSize + there is padding
+ // before the global data section.
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ label = masm.loadRipRelativeInt32x4(ToFloatRegister(ins->output()));
+ break;
+ case MIRType::Float32x4:
+ label = masm.loadRipRelativeFloat32x4(ToFloatRegister(ins->output()));
+ break;
+ default:
+ MOZ_CRASH("unexpected type in visitWasmLoadGlobalVar");
+ }
+
+ masm.append(wasm::GlobalAccess(label, mir->globalDataOffset()));
+}
+
+void
+CodeGeneratorX64::visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins)
+{
+ MWasmLoadGlobalVar* mir = ins->mir();
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+ CodeOffset label = masm.loadRipRelativeInt64(ToRegister(ins->output()));
+ masm.append(wasm::GlobalAccess(label, mir->globalDataOffset()));
+}
+
+void
+CodeGeneratorX64::visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins)
+{
+ MWasmStoreGlobalVar* mir = ins->mir();
+
+ MIRType type = mir->value()->type();
+ MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
+
+ CodeOffset label;
+ switch (type) {
+ case MIRType::Int32:
+ label = masm.storeRipRelativeInt32(ToRegister(ins->value()));
+ break;
+ case MIRType::Float32:
+ label = masm.storeRipRelativeFloat32(ToFloatRegister(ins->value()));
+ break;
+ case MIRType::Double:
+ label = masm.storeRipRelativeDouble(ToFloatRegister(ins->value()));
+ break;
+ // Aligned access: code is aligned on PageSize + there is padding
+ // before the global data section.
+ case MIRType::Int32x4:
+ case MIRType::Bool32x4:
+ label = masm.storeRipRelativeInt32x4(ToFloatRegister(ins->value()));
+ break;
+ case MIRType::Float32x4:
+ label = masm.storeRipRelativeFloat32x4(ToFloatRegister(ins->value()));
+ break;
+ default:
+ MOZ_CRASH("unexpected type in visitWasmStoreGlobalVar");
+ }
+
+ masm.append(wasm::GlobalAccess(label, mir->globalDataOffset()));
+}
+
+void
+CodeGeneratorX64::visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins)
+{
+ MWasmStoreGlobalVar* mir = ins->mir();
+ MOZ_ASSERT(mir->value()->type() == MIRType::Int64);
+ Register value = ToRegister(ins->getOperand(LWasmStoreGlobalVarI64::InputIndex));
+ CodeOffset label = masm.storeRipRelativeInt64(value);
+ masm.append(wasm::GlobalAccess(label, mir->globalDataOffset()));
+}
+
+void
+CodeGeneratorX64::visitTruncateDToInt32(LTruncateDToInt32* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ // On x64, branchTruncateDouble uses vcvttsd2sq. Unlike the x86
+ // implementation, this should handle most doubles and we can just
+ // call a stub if it fails.
+ emitTruncateDouble(input, output, ins->mir());
+}
+
+void
+CodeGeneratorX64::visitTruncateFToInt32(LTruncateFToInt32* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ // On x64, branchTruncateFloat32 uses vcvttss2sq. Unlike the x86
+ // implementation, this should handle most floats and we can just
+ // call a stub if it fails.
+ emitTruncateFloat32(input, output, ins->mir());
+}
+
+void
+CodeGeneratorX64::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir)
+{
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf())
+ masm.movl(ToOperand(input), output);
+ else
+ MOZ_CRASH("Not implemented.");
+}
+
+void
+CodeGeneratorX64::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir)
+{
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->isUnsigned())
+ masm.movl(ToOperand(input), output);
+ else
+ masm.movslq(ToOperand(input), output);
+}
+
+void
+CodeGeneratorX64::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ MWasmTruncateToInt64* mir = lir->mir();
+ MIRType inputType = mir->input()->type();
+
+ MOZ_ASSERT(inputType == MIRType::Double || inputType == MIRType::Float32);
+
+ auto* ool = new(alloc()) OutOfLineWasmTruncateCheck(mir, input);
+ addOutOfLineCode(ool, mir);
+
+ FloatRegister temp = mir->isUnsigned() ? ToFloatRegister(lir->temp()) : InvalidFloatReg;
+
+ Label* oolEntry = ool->entry();
+ Label* oolRejoin = ool->rejoin();
+ if (inputType == MIRType::Double) {
+ if (mir->isUnsigned())
+ masm.wasmTruncateDoubleToUInt64(input, output, oolEntry, oolRejoin, temp);
+ else
+ masm.wasmTruncateDoubleToInt64(input, output, oolEntry, oolRejoin, temp);
+ } else {
+ if (mir->isUnsigned())
+ masm.wasmTruncateFloat32ToUInt64(input, output, oolEntry, oolRejoin, temp);
+ else
+ masm.wasmTruncateFloat32ToInt64(input, output, oolEntry, oolRejoin, temp);
+ }
+}
+
+void
+CodeGeneratorX64::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ MIRType outputType = lir->mir()->type();
+ MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
+
+ if (outputType == MIRType::Double) {
+ if (lir->mir()->isUnsigned())
+ masm.convertUInt64ToDouble(input, output, Register::Invalid());
+ else
+ masm.convertInt64ToDouble(input, output);
+ } else {
+ if (lir->mir()->isUnsigned())
+ masm.convertUInt64ToFloat32(input, output, Register::Invalid());
+ else
+ masm.convertInt64ToFloat32(input, output);
+ }
+}
+
+void
+CodeGeneratorX64::visitNotI64(LNotI64* lir)
+{
+ masm.cmpq(Imm32(0), ToRegister(lir->input()));
+ masm.emitSet(Assembler::Equal, ToRegister(lir->output()));
+}
+
+void
+CodeGeneratorX64::visitClzI64(LClzI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.clz64(input, output.reg);
+}
+
+void
+CodeGeneratorX64::visitCtzI64(LCtzI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.ctz64(input, output.reg);
+}
+
+void
+CodeGeneratorX64::visitTestI64AndBranch(LTestI64AndBranch* lir)
+{
+ Register input = ToRegister(lir->input());
+ masm.testq(input, input);
+ emitBranch(Assembler::NonZero, lir->ifTrue(), lir->ifFalse());
+}
diff --git a/js/src/jit/x64/CodeGenerator-x64.h b/js/src/jit/x64/CodeGenerator-x64.h
new file mode 100644
index 000000000..c0290608d
--- /dev/null
+++ b/js/src/jit/x64/CodeGenerator-x64.h
@@ -0,0 +1,89 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_CodeGenerator_x64_h
+#define jit_x64_CodeGenerator_x64_h
+
+#include "jit/x86-shared/CodeGenerator-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorX64 : public CodeGeneratorX86Shared
+{
+ CodeGeneratorX64* thisFromCtor() {
+ return this;
+ }
+
+ protected:
+ Operand ToOperand64(const LInt64Allocation& a);
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToOutValue(LInstruction* ins);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ void storeUnboxedValue(const LAllocation* value, MIRType valueType,
+ Operand dest, MIRType slotType);
+
+ void wasmStore(const wasm::MemoryAccessDesc& access, const LAllocation* value, Operand dstAddr);
+ template <typename T> void emitWasmLoad(T* ins);
+ template <typename T> void emitWasmStore(T* ins);
+
+ public:
+ CodeGeneratorX64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
+
+ public:
+ void visitValue(LValue* value);
+ void visitBox(LBox* box);
+ void visitUnbox(LUnbox* unbox);
+ void visitCompareB(LCompareB* lir);
+ void visitCompareBAndBranch(LCompareBAndBranch* lir);
+ void visitCompareBitwise(LCompareBitwise* lir);
+ void visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir);
+ void visitCompareI64(LCompareI64* lir);
+ void visitCompareI64AndBranch(LCompareI64AndBranch* lir);
+ void visitDivOrModI64(LDivOrModI64* lir);
+ void visitUDivOrModI64(LUDivOrModI64* lir);
+ void visitNotI64(LNotI64* lir);
+ void visitClzI64(LClzI64* lir);
+ void visitCtzI64(LCtzI64* lir);
+ void visitTruncateDToInt32(LTruncateDToInt32* ins);
+ void visitTruncateFToInt32(LTruncateFToInt32* ins);
+ void visitWrapInt64ToInt32(LWrapInt64ToInt32* lir);
+ void visitExtendInt32ToInt64(LExtendInt32ToInt64* lir);
+ void visitWasmTruncateToInt64(LWasmTruncateToInt64* lir);
+ void visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir);
+ void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins);
+ void visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins);
+ void visitWasmLoad(LWasmLoad* ins);
+ void visitWasmLoadI64(LWasmLoadI64* ins);
+ void visitWasmStore(LWasmStore* ins);
+ void visitWasmStoreI64(LWasmStoreI64* ins);
+ void visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins);
+ void visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins);
+ void visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins);
+ void visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins);
+ void visitWasmSelectI64(LWasmSelectI64* ins);
+ void visitWasmCall(LWasmCall* ins);
+ void visitWasmCallI64(LWasmCallI64* ins);
+ void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
+ void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
+ void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins);
+ void visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins);
+ void visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins);
+ void visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins);
+ void visitWasmUint32ToDouble(LWasmUint32ToDouble* lir);
+ void visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir);
+ void visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir);
+ void visitWasmReinterpretToI64(LWasmReinterpretToI64* lir);
+ void visitTestI64AndBranch(LTestI64AndBranch* lir);
+};
+
+typedef CodeGeneratorX64 CodeGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_CodeGenerator_x64_h */
diff --git a/js/src/jit/x64/LIR-x64.h b/js/src/jit/x64/LIR-x64.h
new file mode 100644
index 000000000..f812ac692
--- /dev/null
+++ b/js/src/jit/x64/LIR-x64.h
@@ -0,0 +1,183 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_LIR_x64_h
+#define jit_x64_LIR_x64_h
+
+namespace js {
+namespace jit {
+
+// Given an untyped input, guards on whether it's a specific type and returns
+// the unboxed payload.
+class LUnboxBase : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ explicit LUnboxBase(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ static const size_t Input = 0;
+
+ MUnbox* mir() const {
+ return mir_->toUnbox();
+ }
+};
+
+class LUnbox : public LUnboxBase {
+ public:
+ LIR_HEADER(Unbox)
+
+ explicit LUnbox(const LAllocation& input)
+ : LUnboxBase(input)
+ { }
+
+ const char* extraName() const {
+ return StringFromMIRType(mir()->type());
+ }
+};
+
+class LUnboxFloatingPoint : public LUnboxBase {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint)
+
+ LUnboxFloatingPoint(const LAllocation& input, MIRType type)
+ : LUnboxBase(input),
+ type_(type)
+ { }
+
+ MIRType type() const {
+ return type_;
+ }
+ const char* extraName() const {
+ return StringFromMIRType(type_);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a double.
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmUint32ToDouble)
+
+ explicit LWasmUint32ToDouble(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmUint32ToFloat32)
+
+ explicit LWasmUint32ToFloat32(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+class LDivOrModI64 : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ LDivOrModI64(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() {
+ return getTemp(0);
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeDivideByZero();
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeNegativeDividend();
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+// This class performs a simple x86 'div', yielding either a quotient or
+// remainder depending on whether this instruction is defined to output
+// rax (quotient) or rdx (remainder).
+class LUDivOrModI64 : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(UDivOrModI64);
+
+ LUDivOrModI64(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() {
+ return getTemp(0);
+ }
+
+ const char* extraName() const {
+ return mir()->isTruncated() ? "Truncated" : nullptr;
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeDivideByZero();
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+class LWasmTruncateToInt64 : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ LWasmTruncateToInt64(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ MWasmTruncateToInt64* mir() const {
+ return mir_->toWasmTruncateToInt64();
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_LIR_x64_h */
diff --git a/js/src/jit/x64/LOpcodes-x64.h b/js/src/jit/x64/LOpcodes-x64.h
new file mode 100644
index 000000000..b61caeb26
--- /dev/null
+++ b/js/src/jit/x64/LOpcodes-x64.h
@@ -0,0 +1,23 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_LOpcodes_x64_h
+#define jit_x64_LOpcodes_x64_h
+
+#include "jit/shared/LOpcodes-shared.h"
+
+#define LIR_CPU_OPCODE_LIST(_) \
+ _(DivOrModConstantI) \
+ _(DivOrModI64) \
+ _(UDivOrModI64) \
+ _(WasmTruncateToInt64) \
+ _(Int64ToFloatingPoint) \
+ _(SimdValueInt32x4) \
+ _(SimdValueFloat32x4) \
+ _(UDivOrMod) \
+ _(UDivOrModConstant)
+
+#endif /* jit_x64_LOpcodes_x64_h */
diff --git a/js/src/jit/x64/Lowering-x64.cpp b/js/src/jit/x64/Lowering-x64.cpp
new file mode 100644
index 000000000..70801dc05
--- /dev/null
+++ b/js/src/jit/x64/Lowering-x64.cpp
@@ -0,0 +1,495 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x64/Lowering-x64.h"
+
+#include "jit/MIR.h"
+#include "jit/x64/Assembler-x64.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+LBoxAllocation
+LIRGeneratorX64::useBoxFixed(MDefinition* mir, Register reg1, Register, bool useAtStart)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart));
+}
+
+LAllocation
+LIRGeneratorX64::useByteOpRegister(MDefinition* mir)
+{
+ return useRegister(mir);
+}
+
+LAllocation
+LIRGeneratorX64::useByteOpRegisterAtStart(MDefinition* mir)
+{
+ return useRegisterAtStart(mir);
+}
+
+LAllocation
+LIRGeneratorX64::useByteOpRegisterOrNonDoubleConstant(MDefinition* mir)
+{
+ return useRegisterOrNonDoubleConstant(mir);
+}
+
+LDefinition
+LIRGeneratorX64::tempByteOpRegister()
+{
+ return temp();
+}
+
+LDefinition
+LIRGeneratorX64::tempToUnbox()
+{
+ return temp();
+}
+
+void
+LIRGeneratorX64::lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES,
+ lhs != rhs ? useInt64OrConstant(rhs) : useInt64OrConstantAtStart(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void
+LIRGeneratorX64::lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ // X64 doesn't need a temp for 64bit multiplication.
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES,
+ lhs != rhs ? useInt64OrConstant(rhs) : useInt64OrConstantAtStart(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void
+LIRGeneratorX64::visitBox(MBox* box)
+{
+ MDefinition* opd = box->getOperand(0);
+
+ // If the operand is a constant, emit near its uses.
+ if (opd->isConstant() && box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (opd->isConstant()) {
+ define(new(alloc()) LValue(opd->toConstant()->toJSValue()), box, LDefinition(LDefinition::BOX));
+ } else {
+ LBox* ins = new(alloc()) LBox(useRegister(opd), opd->type());
+ define(ins, box, LDefinition(LDefinition::BOX));
+ }
+}
+
+void
+LIRGeneratorX64::visitUnbox(MUnbox* unbox)
+{
+ MDefinition* box = unbox->getOperand(0);
+
+ if (box->type() == MIRType::ObjectOrNull) {
+ LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(box));
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+ defineReuseInput(lir, unbox, 0);
+ return;
+ }
+
+ MOZ_ASSERT(box->type() == MIRType::Value);
+
+ LUnboxBase* lir;
+ if (IsFloatingPointType(unbox->type())) {
+ lir = new(alloc()) LUnboxFloatingPoint(useRegisterAtStart(box), unbox->type());
+ } else if (unbox->fallible()) {
+ // If the unbox is fallible, load the Value in a register first to
+ // avoid multiple loads.
+ lir = new(alloc()) LUnbox(useRegisterAtStart(box));
+ } else {
+ lir = new(alloc()) LUnbox(useAtStart(box));
+ }
+
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+
+ define(lir, unbox);
+}
+
+void
+LIRGeneratorX64::visitReturn(MReturn* ret)
+{
+ MDefinition* opd = ret->getOperand(0);
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new(alloc()) LReturn;
+ ins->setOperand(0, useFixed(opd, JSReturnReg));
+ add(ins);
+}
+
+void
+LIRGeneratorX64::defineUntypedPhi(MPhi* phi, size_t lirIndex)
+{
+ defineTypedPhi(phi, lirIndex);
+}
+
+void
+LIRGeneratorX64::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex)
+{
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+
+void
+LIRGeneratorX64::defineInt64Phi(MPhi* phi, size_t lirIndex)
+{
+ defineTypedPhi(phi, lirIndex);
+}
+
+void
+LIRGeneratorX64::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex)
+{
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+
+void
+LIRGeneratorX64::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins)
+{
+ lowerCompareExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ false);
+}
+
+void
+LIRGeneratorX64::visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins)
+{
+ lowerAtomicExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ false);
+}
+
+void
+LIRGeneratorX64::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins)
+{
+ lowerAtomicTypedArrayElementBinop(ins, /* useI386ByteRegisters = */ false);
+}
+
+void
+LIRGeneratorX64::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToDouble* lir = new(alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX64::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToFloat32* lir = new(alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX64::visitWasmLoad(MWasmLoad* ins)
+{
+ if (ins->type() != MIRType::Int64) {
+ lowerWasmLoad(ins);
+ return;
+ }
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ auto* lir = new(alloc()) LWasmLoadI64(useRegisterOrZeroAtStart(base));
+ defineInt64(lir, ins);
+}
+
+void
+LIRGeneratorX64::visitWasmStore(MWasmStore* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* value = ins->value();
+ LAllocation valueAlloc;
+ switch (ins->access().type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ valueAlloc = useRegisterOrConstantAtStart(value);
+ break;
+ case Scalar::Int64:
+ // No way to encode an int64-to-memory move on x64.
+ if (value->isConstant() && value->type() != MIRType::Int64)
+ valueAlloc = useOrConstantAtStart(value);
+ else
+ valueAlloc = useRegisterAtStart(value);
+ break;
+ case Scalar::Float32:
+ case Scalar::Float64:
+ case Scalar::Float32x4:
+ case Scalar::Int8x16:
+ case Scalar::Int16x8:
+ case Scalar::Int32x4:
+ valueAlloc = useRegisterAtStart(value);
+ break;
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ LAllocation baseAlloc = useRegisterOrZeroAtStart(base);
+ auto* lir = new(alloc()) LWasmStore(baseAlloc, valueAlloc);
+ add(lir, ins);
+}
+
+void
+LIRGeneratorX64::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ define(new(alloc()) LAsmJSLoadHeap(useRegisterOrZeroAtStart(base)), ins);
+}
+
+void
+LIRGeneratorX64::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ LAsmJSStoreHeap* lir = nullptr; // initialize to silence GCC warning
+ switch (ins->access().type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ lir = new(alloc()) LAsmJSStoreHeap(useRegisterOrZeroAtStart(base),
+ useRegisterOrConstantAtStart(ins->value()));
+ break;
+ case Scalar::Float32:
+ case Scalar::Float64:
+ case Scalar::Float32x4:
+ case Scalar::Int8x16:
+ case Scalar::Int16x8:
+ case Scalar::Int32x4:
+ lir = new(alloc()) LAsmJSStoreHeap(useRegisterOrZeroAtStart(base),
+ useRegisterAtStart(ins->value()));
+ break;
+ case Scalar::Int64:
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+ add(lir, ins);
+}
+
+void
+LIRGeneratorX64::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ // The output may not be used but will be clobbered regardless, so
+ // pin the output to eax.
+ //
+ // The input values must both be in registers.
+
+ const LAllocation oldval = useRegister(ins->oldValue());
+ const LAllocation newval = useRegister(ins->newValue());
+
+ LAsmJSCompareExchangeHeap* lir =
+ new(alloc()) LAsmJSCompareExchangeHeap(useRegister(base), oldval, newval);
+
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+}
+
+void
+LIRGeneratorX64::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
+{
+ MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
+
+ const LAllocation base = useRegister(ins->base());
+ const LAllocation value = useRegister(ins->value());
+
+ // The output may not be used but will be clobbered regardless,
+ // so ignore the case where we're not using the value and just
+ // use the output register as a temp.
+
+ LAsmJSAtomicExchangeHeap* lir =
+ new(alloc()) LAsmJSAtomicExchangeHeap(base, value);
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX64::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ // Case 1: the result of the operation is not used.
+ //
+ // We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
+ // LOCK OR, or LOCK XOR.
+
+ if (!ins->hasUses()) {
+ LAsmJSAtomicBinopHeapForEffect* lir =
+ new(alloc()) LAsmJSAtomicBinopHeapForEffect(useRegister(base),
+ useRegisterOrConstant(ins->value()));
+ add(lir, ins);
+ return;
+ }
+
+ // Case 2: the result of the operation is used.
+ //
+ // For ADD and SUB we'll use XADD with word and byte ops as
+ // appropriate. Any output register can be used and if value is a
+ // register it's best if it's the same as output:
+ //
+ // movl value, output ; if value != output
+ // lock xaddl output, mem
+ //
+ // For AND/OR/XOR we need to use a CMPXCHG loop, and the output is
+ // always in rax:
+ //
+ // movl *mem, rax
+ // L: mov rax, temp
+ // andl value, temp
+ // lock cmpxchg temp, mem ; reads rax also
+ // jnz L
+ // ; result in rax
+ //
+ // Note the placement of L, cmpxchg will update rax with *mem if
+ // *mem does not have the expected value, so reloading it at the
+ // top of the loop would be redundant.
+
+ bool bitOp = !(ins->operation() == AtomicFetchAddOp || ins->operation() == AtomicFetchSubOp);
+ bool reuseInput = false;
+ LAllocation value;
+
+ if (bitOp || ins->value()->isConstant()) {
+ value = useRegisterOrConstant(ins->value());
+ } else {
+ reuseInput = true;
+ value = useRegisterAtStart(ins->value());
+ }
+
+ LAsmJSAtomicBinopHeap* lir =
+ new(alloc()) LAsmJSAtomicBinopHeap(useRegister(base),
+ value,
+ bitOp ? temp() : LDefinition::BogusTemp());
+
+ if (reuseInput)
+ defineReuseInput(lir, ins, LAsmJSAtomicBinopHeap::valueOp);
+ else if (bitOp)
+ defineFixed(lir, ins, LAllocation(AnyRegister(rax)));
+ else
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX64::visitSubstr(MSubstr* ins)
+{
+ LSubstr* lir = new (alloc()) LSubstr(useRegister(ins->string()),
+ useRegister(ins->begin()),
+ useRegister(ins->length()),
+ temp(),
+ temp(),
+ tempByteOpRegister());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGeneratorX64::visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+LIRGeneratorX64::visitRandom(MRandom* ins)
+{
+ LRandom *lir = new(alloc()) LRandom(temp(),
+ temp(),
+ temp());
+ defineFixed(lir, ins, LFloatReg(ReturnDoubleReg));
+}
+
+void
+LIRGeneratorX64::lowerDivI64(MDiv* div)
+{
+ if (div->isUnsigned()) {
+ lowerUDivI64(div);
+ return;
+ }
+
+ LDivOrModI64* lir = new(alloc()) LDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()),
+ tempFixed(rdx));
+ defineInt64Fixed(lir, div, LInt64Allocation(LAllocation(AnyRegister(rax))));
+}
+
+void
+LIRGeneratorX64::lowerModI64(MMod* mod)
+{
+ if (mod->isUnsigned()) {
+ lowerUModI64(mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new(alloc()) LDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()),
+ tempFixed(rax));
+ defineInt64Fixed(lir, mod, LInt64Allocation(LAllocation(AnyRegister(rdx))));
+}
+
+void
+LIRGeneratorX64::lowerUDivI64(MDiv* div)
+{
+ LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useRegister(div->lhs()),
+ useRegister(div->rhs()),
+ tempFixed(rdx));
+ defineInt64Fixed(lir, div, LInt64Allocation(LAllocation(AnyRegister(rax))));
+}
+
+void
+LIRGeneratorX64::lowerUModI64(MMod* mod)
+{
+ LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useRegister(mod->lhs()),
+ useRegister(mod->rhs()),
+ tempFixed(rax));
+ defineInt64Fixed(lir, mod, LInt64Allocation(LAllocation(AnyRegister(rdx))));
+}
+
+void
+LIRGeneratorX64::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ LDefinition maybeTemp = ins->isUnsigned() ? tempDouble() : LDefinition::BogusTemp();
+ defineInt64(new(alloc()) LWasmTruncateToInt64(useRegister(opd), maybeTemp), ins);
+}
+
+void
+LIRGeneratorX64::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Int64);
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+
+ define(new(alloc()) LInt64ToFloatingPoint(useInt64Register(opd), LDefinition::BogusTemp()), ins);
+}
+
+void
+LIRGeneratorX64::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins)
+{
+ defineInt64(new(alloc()) LExtendInt32ToInt64(useAtStart(ins->input())), ins);
+}
diff --git a/js/src/jit/x64/Lowering-x64.h b/js/src/jit/x64/Lowering-x64.h
new file mode 100644
index 000000000..24321f97b
--- /dev/null
+++ b/js/src/jit/x64/Lowering-x64.h
@@ -0,0 +1,80 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_Lowering_x64_h
+#define jit_x64_Lowering_x64_h
+
+#include "jit/x86-shared/Lowering-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorX64 : public LIRGeneratorX86Shared
+{
+ public:
+ LIRGeneratorX64(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorX86Shared(gen, graph, lirGraph)
+ { }
+
+ protected:
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
+ void defineUntypedPhi(MPhi* phi, size_t lirIndex);
+ void lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
+ void defineInt64Phi(MPhi* phi, size_t lirIndex);
+
+ void lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs);
+
+ // Returns a box allocation. reg2 is ignored on 64-bit platforms.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register, bool useAtStart = false);
+
+ // x86 has constraints on what registers can be formatted for 1-byte
+ // stores and loads; on x64 all registers are okay.
+ LAllocation useByteOpRegister(MDefinition* mir);
+ LAllocation useByteOpRegisterAtStart(MDefinition* mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
+ LDefinition tempByteOpRegister();
+
+ LDefinition tempToUnbox();
+
+ bool needTempForPostBarrier() { return true; }
+
+ void lowerDivI64(MDiv* div);
+ void lowerModI64(MMod* mod);
+ void lowerUDivI64(MDiv* div);
+ void lowerUModI64(MMod* mod);
+
+ public:
+ void visitBox(MBox* box);
+ void visitUnbox(MUnbox* unbox);
+ void visitReturn(MReturn* ret);
+ void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins);
+ void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins);
+ void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins);
+ void visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins);
+ void visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins);
+ void visitAsmJSLoadHeap(MAsmJSLoadHeap* ins);
+ void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins);
+ void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins);
+ void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins);
+ void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins);
+ void visitWasmLoad(MWasmLoad* ins);
+ void visitWasmStore(MWasmStore* ins);
+ void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins);
+ void visitSubstr(MSubstr* ins);
+ void visitRandom(MRandom* ins);
+ void visitWasmTruncateToInt64(MWasmTruncateToInt64* ins);
+ void visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins);
+ void visitExtendInt32ToInt64(MExtendInt32ToInt64* ins);
+};
+
+typedef LIRGeneratorX64 LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_Lowering_x64_h */
diff --git a/js/src/jit/x64/MacroAssembler-x64-inl.h b/js/src/jit/x64/MacroAssembler-x64-inl.h
new file mode 100644
index 000000000..f7a70e68e
--- /dev/null
+++ b/js/src/jit/x64/MacroAssembler-x64-inl.h
@@ -0,0 +1,897 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_MacroAssembler_x64_inl_h
+#define jit_x64_MacroAssembler_x64_inl_h
+
+#include "jit/x64/MacroAssembler-x64.h"
+
+#include "jit/x86-shared/MacroAssembler-x86-shared-inl.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+// ===============================================================
+
+void
+MacroAssembler::move64(Imm64 imm, Register64 dest)
+{
+ movq(ImmWord(imm.value), dest.reg);
+}
+
+void
+MacroAssembler::move64(Register64 src, Register64 dest)
+{
+ movq(src.reg, dest.reg);
+}
+
+void
+MacroAssembler::andPtr(Register src, Register dest)
+{
+ andq(src, dest);
+}
+
+void
+MacroAssembler::andPtr(Imm32 imm, Register dest)
+{
+ andq(imm, dest);
+}
+
+void
+MacroAssembler::and64(Imm64 imm, Register64 dest)
+{
+ if (INT32_MIN <= int64_t(imm.value) && int64_t(imm.value) <= INT32_MAX) {
+ andq(Imm32(imm.value), dest.reg);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ movq(ImmWord(uintptr_t(imm.value)), scratch);
+ andq(scratch, dest.reg);
+ }
+}
+
+void
+MacroAssembler::or64(Imm64 imm, Register64 dest)
+{
+ if (INT32_MIN <= int64_t(imm.value) && int64_t(imm.value) <= INT32_MAX) {
+ orq(Imm32(imm.value), dest.reg);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ movq(ImmWord(uintptr_t(imm.value)), scratch);
+ orq(scratch, dest.reg);
+ }
+}
+
+void
+MacroAssembler::xor64(Imm64 imm, Register64 dest)
+{
+ if (INT32_MIN <= int64_t(imm.value) && int64_t(imm.value) <= INT32_MAX) {
+ xorq(Imm32(imm.value), dest.reg);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ movq(ImmWord(uintptr_t(imm.value)), scratch);
+ xorq(scratch, dest.reg);
+ }
+}
+
+void
+MacroAssembler::orPtr(Register src, Register dest)
+{
+ orq(src, dest);
+}
+
+void
+MacroAssembler::orPtr(Imm32 imm, Register dest)
+{
+ orq(imm, dest);
+}
+
+void
+MacroAssembler::and64(Register64 src, Register64 dest)
+{
+ andq(src.reg, dest.reg);
+}
+
+void
+MacroAssembler::or64(Register64 src, Register64 dest)
+{
+ orq(src.reg, dest.reg);
+}
+
+void
+MacroAssembler::xor64(Register64 src, Register64 dest)
+{
+ xorq(src.reg, dest.reg);
+}
+
+void
+MacroAssembler::xorPtr(Register src, Register dest)
+{
+ xorq(src, dest);
+}
+
+void
+MacroAssembler::xorPtr(Imm32 imm, Register dest)
+{
+ xorq(imm, dest);
+}
+
+void
+MacroAssembler::and64(const Operand& src, Register64 dest)
+{
+ andq(src, dest.reg);
+}
+
+void
+MacroAssembler::or64(const Operand& src, Register64 dest)
+{
+ orq(src, dest.reg);
+}
+
+void
+MacroAssembler::xor64(const Operand& src, Register64 dest)
+{
+ xorq(src, dest.reg);
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void
+MacroAssembler::addPtr(Register src, Register dest)
+{
+ addq(src, dest);
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, Register dest)
+{
+ addq(imm, dest);
+}
+
+void
+MacroAssembler::addPtr(ImmWord imm, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(dest != scratch);
+ if ((intptr_t)imm.value <= INT32_MAX && (intptr_t)imm.value >= INT32_MIN) {
+ addq(Imm32((int32_t)imm.value), dest);
+ } else {
+ mov(imm, scratch);
+ addq(scratch, dest);
+ }
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, const Address& dest)
+{
+ addq(imm, Operand(dest));
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, const AbsoluteAddress& dest)
+{
+ addq(imm, Operand(dest));
+}
+
+void
+MacroAssembler::addPtr(const Address& src, Register dest)
+{
+ addq(Operand(src), dest);
+}
+
+void
+MacroAssembler::add64(const Operand& src, Register64 dest)
+{
+ addq(src, dest.reg);
+}
+
+void
+MacroAssembler::add64(Register64 src, Register64 dest)
+{
+ addq(src.reg, dest.reg);
+}
+
+void
+MacroAssembler::add64(Imm32 imm, Register64 dest)
+{
+ addq(imm, dest.reg);
+}
+
+void
+MacroAssembler::add64(Imm64 imm, Register64 dest)
+{
+ addPtr(ImmWord(imm.value), dest.reg);
+}
+
+void
+MacroAssembler::subPtr(Register src, Register dest)
+{
+ subq(src, dest);
+}
+
+void
+MacroAssembler::subPtr(Register src, const Address& dest)
+{
+ subq(src, Operand(dest));
+}
+
+void
+MacroAssembler::subPtr(Imm32 imm, Register dest)
+{
+ subq(imm, dest);
+}
+
+void
+MacroAssembler::subPtr(ImmWord imm, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(dest != scratch);
+ if ((intptr_t)imm.value <= INT32_MAX && (intptr_t)imm.value >= INT32_MIN) {
+ subq(Imm32((int32_t)imm.value), dest);
+ } else {
+ mov(imm, scratch);
+ subq(scratch, dest);
+ }
+}
+
+void
+MacroAssembler::subPtr(const Address& addr, Register dest)
+{
+ subq(Operand(addr), dest);
+}
+
+void
+MacroAssembler::sub64(const Operand& src, Register64 dest)
+{
+ subq(src, dest.reg);
+}
+
+void
+MacroAssembler::sub64(Register64 src, Register64 dest)
+{
+ subq(src.reg, dest.reg);
+}
+
+void
+MacroAssembler::sub64(Imm64 imm, Register64 dest)
+{
+ subPtr(ImmWord(imm.value), dest.reg);
+}
+
+void
+MacroAssembler::mul64(Imm64 imm, const Register64& dest, const Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ mul64(imm, dest);
+}
+
+void
+MacroAssembler::mul64(Imm64 imm, const Register64& dest)
+{
+ movq(ImmWord(uintptr_t(imm.value)), ScratchReg);
+ imulq(ScratchReg, dest.reg);
+}
+
+void
+MacroAssembler::mul64(const Register64& src, const Register64& dest, const Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ mul64(Operand(src.reg), dest);
+}
+
+void
+MacroAssembler::mul64(const Operand& src, const Register64& dest)
+{
+ imulq(src, dest.reg);
+}
+
+void
+MacroAssembler::mul64(const Operand& src, const Register64& dest, const Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ mul64(src, dest);
+}
+
+void
+MacroAssembler::mulBy3(Register src, Register dest)
+{
+ lea(Operand(src, src, TimesTwo), dest);
+}
+
+void
+MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest)
+{
+ movq(imm, ScratchReg);
+ vmulsd(Operand(ScratchReg, 0), dest, dest);
+}
+
+void
+MacroAssembler::inc64(AbsoluteAddress dest)
+{
+ if (X86Encoding::IsAddressImmediate(dest.addr)) {
+ addPtr(Imm32(1), dest);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ mov(ImmPtr(dest.addr), scratch);
+ addPtr(Imm32(1), Address(scratch, 0));
+ }
+}
+
+void
+MacroAssembler::neg64(Register64 reg)
+{
+ negq(reg.reg);
+}
+
+// ===============================================================
+// Shift functions
+
+void
+MacroAssembler::lshiftPtr(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ shlq(imm, dest);
+}
+
+void
+MacroAssembler::lshift64(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ lshiftPtr(imm, dest.reg);
+}
+
+void
+MacroAssembler::lshift64(Register shift, Register64 srcDest)
+{
+ MOZ_ASSERT(shift == rcx);
+ shlq_cl(srcDest.reg);
+}
+
+void
+MacroAssembler::rshiftPtr(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ shrq(imm, dest);
+}
+
+void
+MacroAssembler::rshift64(Imm32 imm, Register64 dest)
+{
+ rshiftPtr(imm, dest.reg);
+}
+
+void
+MacroAssembler::rshift64(Register shift, Register64 srcDest)
+{
+ MOZ_ASSERT(shift == rcx);
+ shrq_cl(srcDest.reg);
+}
+
+void
+MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ sarq(imm, dest);
+}
+
+void
+MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ rshiftPtrArithmetic(imm, dest.reg);
+}
+
+void
+MacroAssembler::rshift64Arithmetic(Register shift, Register64 srcDest)
+{
+ MOZ_ASSERT(shift == rcx);
+ sarq_cl(srcDest.reg);
+}
+
+// ===============================================================
+// Rotation functions
+
+void
+MacroAssembler::rotateLeft64(Register count, Register64 src, Register64 dest)
+{
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
+
+ rolq_cl(dest.reg);
+}
+
+void
+MacroAssembler::rotateLeft64(Register count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ rotateLeft64(count, src, dest);
+}
+
+void
+MacroAssembler::rotateRight64(Register count, Register64 src, Register64 dest)
+{
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
+
+ rorq_cl(dest.reg);
+}
+
+void
+MacroAssembler::rotateRight64(Register count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ rotateRight64(count, src, dest);
+}
+
+void
+MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest)
+{
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ rolq(count, dest.reg);
+}
+
+void
+MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ rotateLeft64(count, src, dest);
+}
+
+void
+MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest)
+{
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ rorq(count, dest.reg);
+}
+
+void
+MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ rotateRight64(count, src, dest);
+}
+
+// ===============================================================
+// Condition functions
+
+template <typename T1, typename T2>
+void
+MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest)
+{
+ cmpPtr(lhs, rhs);
+ emitSet(cond, dest);
+}
+
+// ===============================================================
+// Bit counting functions
+
+void
+MacroAssembler::clz64(Register64 src, Register dest)
+{
+ // On very recent chips (Haswell and newer) there is actually an
+ // LZCNT instruction that does all of this.
+
+ Label nonzero;
+ bsrq(src.reg, dest);
+ j(Assembler::NonZero, &nonzero);
+ movq(ImmWord(0x7F), dest);
+ bind(&nonzero);
+ xorq(Imm32(0x3F), dest);
+}
+
+void
+MacroAssembler::ctz64(Register64 src, Register dest)
+{
+ Label nonzero;
+ bsfq(src.reg, dest);
+ j(Assembler::NonZero, &nonzero);
+ movq(ImmWord(64), dest);
+ bind(&nonzero);
+}
+
+void
+MacroAssembler::popcnt64(Register64 src64, Register64 dest64, Register tmp)
+{
+ Register src = src64.reg;
+ Register dest = dest64.reg;
+
+ if (AssemblerX86Shared::HasPOPCNT()) {
+ MOZ_ASSERT(tmp == InvalidReg);
+ popcntq(src, dest);
+ return;
+ }
+
+ if (src != dest)
+ movq(src, dest);
+
+ MOZ_ASSERT(tmp != dest);
+
+ ScratchRegisterScope scratch(*this);
+
+ // Equivalent to mozilla::CountPopulation32, adapted for 64 bits.
+ // x -= (x >> 1) & m1;
+ movq(src, tmp);
+ movq(ImmWord(0x5555555555555555), scratch);
+ shrq(Imm32(1), tmp);
+ andq(scratch, tmp);
+ subq(tmp, dest);
+
+ // x = (x & m2) + ((x >> 2) & m2);
+ movq(dest, tmp);
+ movq(ImmWord(0x3333333333333333), scratch);
+ andq(scratch, dest);
+ shrq(Imm32(2), tmp);
+ andq(scratch, tmp);
+ addq(tmp, dest);
+
+ // x = (x + (x >> 4)) & m4;
+ movq(dest, tmp);
+ movq(ImmWord(0x0f0f0f0f0f0f0f0f), scratch);
+ shrq(Imm32(4), tmp);
+ addq(tmp, dest);
+ andq(scratch, dest);
+
+ // (x * h01) >> 56
+ movq(ImmWord(0x0101010101010101), scratch);
+ imulq(scratch, dest);
+ shrq(Imm32(56), dest);
+}
+
+// ===============================================================
+// Branch functions
+
+void
+MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
+{
+ if (X86Encoding::IsAddressImmediate(lhs.addr)) {
+ branch32(cond, Operand(lhs), rhs, label);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ mov(ImmPtr(lhs.addr), scratch);
+ branch32(cond, Address(scratch, 0), rhs, label);
+ }
+}
+void
+MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
+{
+ if (X86Encoding::IsAddressImmediate(lhs.addr)) {
+ branch32(cond, Operand(lhs), rhs, label);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ mov(ImmPtr(lhs.addr), scratch);
+ branch32(cond, Address(scratch, 0), rhs, label);
+ }
+}
+
+void
+MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, Label* label)
+{
+ ScratchRegisterScope scratch(*this);
+ mov(lhs, scratch);
+ branch32(cond, Address(scratch, 0), rhs, label);
+}
+
+void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val, Label* success, Label* fail)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan || cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan || cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, ImmWord(val.value), success);
+ if (fail)
+ jump(fail);
+}
+
+void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs, Label* success, Label* fail)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan || cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan || cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, rhs.reg, success);
+ if (fail)
+ jump(fail);
+}
+
+void
+MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val, Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, ImmWord(val.value), label);
+}
+
+void
+MacroAssembler::branch64(Condition cond, const Address& lhs, const Address& rhs, Register scratch,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ loadPtr(rhs, scratch);
+ branchPtr(cond, lhs, scratch, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
+{
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(rhs != scratch);
+ if (X86Encoding::IsAddressImmediate(lhs.addr)) {
+ branchPtrImpl(cond, Operand(lhs), rhs, label);
+ } else {
+ mov(ImmPtr(lhs.addr), scratch);
+ branchPtrImpl(cond, Operand(scratch, 0x0), rhs, label);
+ }
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, ImmWord rhs, Label* label)
+{
+ if (X86Encoding::IsAddressImmediate(lhs.addr)) {
+ branchPtrImpl(cond, Operand(lhs), rhs, label);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ mov(ImmPtr(lhs.addr), scratch);
+ branchPtrImpl(cond, Operand(scratch, 0x0), rhs, label);
+ }
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs, Label* label)
+{
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(rhs != scratch);
+ mov(lhs, scratch);
+ branchPtrImpl(cond, Operand(scratch, 0x0), rhs, label);
+}
+
+void
+MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs, Register rhs, Label* label)
+{
+ ScratchRegisterScope scratch(*this);
+ if (rhs != scratch)
+ movePtr(rhs, scratch);
+ // Instead of unboxing lhs, box rhs and do direct comparison with lhs.
+ rshiftPtr(Imm32(1), scratch);
+ branchPtr(cond, lhs, scratch, label);
+}
+
+void
+MacroAssembler::branchTruncateFloat32ToPtr(FloatRegister src, Register dest, Label* fail)
+{
+ vcvttss2sq(src, dest);
+
+ // Same trick as for Doubles
+ cmpPtr(dest, Imm32(1));
+ j(Assembler::Overflow, fail);
+}
+
+void
+MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src, Register dest, Label* fail)
+{
+ branchTruncateFloat32ToPtr(src, dest, fail);
+ movl(dest, dest); // Zero upper 32-bits.
+}
+
+void
+MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src, Register dest, Label* fail)
+{
+ branchTruncateFloat32ToPtr(src, dest, fail);
+ branch32(Assembler::Above, dest, Imm32(0xffffffff), fail);
+}
+
+void
+MacroAssembler::branchTruncateDoubleToPtr(FloatRegister src, Register dest, Label* fail)
+{
+ vcvttsd2sq(src, dest);
+
+ // vcvttsd2sq returns 0x8000000000000000 on failure. Test for it by
+ // subtracting 1 and testing overflow (this avoids the need to
+ // materialize that value in a register).
+ cmpPtr(dest, Imm32(1));
+ j(Assembler::Overflow, fail);
+}
+
+void
+MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src, Register dest, Label* fail)
+{
+ branchTruncateDoubleToPtr(src, dest, fail);
+ movl(dest, dest); // Zero upper 32-bits.
+}
+
+void
+MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src, Register dest, Label* fail)
+{
+ branchTruncateDoubleToPtr(src, dest, fail);
+ branch32(Assembler::Above, dest, Imm32(0xffffffff), fail);
+}
+
+void
+MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
+{
+ if (X86Encoding::IsAddressImmediate(lhs.addr)) {
+ test32(Operand(lhs), rhs);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ mov(ImmPtr(lhs.addr), scratch);
+ test32(Operand(scratch, 0), rhs);
+ }
+ j(cond, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTest64(Condition cond, Register64 lhs, Register64 rhs, Register temp,
+ L label)
+{
+ branchTestPtr(cond, lhs.reg, rhs.reg, label);
+}
+
+void
+MacroAssembler::branchTestBooleanTruthy(bool truthy, const ValueOperand& value, Label* label)
+{
+ test32(value.valueReg(), value.valueReg());
+ j(truthy ? NonZero : Zero, label);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr, JSWhyMagic why, Label* label)
+{
+ uint64_t magic = MagicValue(why).asRawBits();
+ cmpPtr(valaddr, ImmWord(magic));
+ j(cond, label);
+}
+// ========================================================================
+// Truncate floating point.
+
+void
+MacroAssembler::truncateFloat32ToUInt64(Address src, Address dest, Register temp,
+ FloatRegister floatTemp)
+{
+ Label done;
+
+ loadFloat32(src, floatTemp);
+
+ truncateFloat32ToInt64(src, dest, temp);
+
+ // For unsigned conversion the case of [INT64, UINT64] needs to get handle seperately.
+ loadPtr(dest, temp);
+ branchPtr(Assembler::Condition::NotSigned, temp, Imm32(0), &done);
+
+ // Move the value inside INT64 range.
+ storeFloat32(floatTemp, dest);
+ loadConstantFloat32(double(int64_t(0x8000000000000000)), floatTemp);
+ vaddss(Operand(dest), floatTemp, floatTemp);
+ storeFloat32(floatTemp, dest);
+ truncateFloat32ToInt64(dest, dest, temp);
+
+ loadPtr(dest, temp);
+ or64(Imm64(0x8000000000000000), Register64(temp));
+ storePtr(temp, dest);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::truncateDoubleToUInt64(Address src, Address dest, Register temp,
+ FloatRegister floatTemp)
+{
+ Label done;
+
+ loadDouble(src, floatTemp);
+
+ truncateDoubleToInt64(src, dest, temp);
+
+ // For unsigned conversion the case of [INT64, UINT64] needs to get handle seperately.
+ loadPtr(dest, temp);
+ branchPtr(Assembler::Condition::NotSigned, temp, Imm32(0), &done);
+
+ // Move the value inside INT64 range.
+ storeDouble(floatTemp, dest);
+ loadConstantDouble(double(int64_t(0x8000000000000000)), floatTemp);
+ vaddsd(Operand(dest), floatTemp, floatTemp);
+ storeDouble(floatTemp, dest);
+ truncateDoubleToInt64(dest, dest, temp);
+
+ loadPtr(dest, temp);
+ or64(Imm64(0x8000000000000000), Register64(temp));
+ storePtr(temp, dest);
+
+ bind(&done);
+}
+
+// ========================================================================
+// wasm support
+
+template <class L>
+void
+MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
+{
+ MOZ_CRASH("x64 should never emit a bounds check");
+}
+
+void
+MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
+{
+ MOZ_CRASH("x64 should never emit a bounds check");
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+void
+MacroAssemblerX64::incrementInt32Value(const Address& addr)
+{
+ asMasm().addPtr(Imm32(1), addr);
+}
+
+void
+MacroAssemblerX64::unboxValue(const ValueOperand& src, AnyRegister dest)
+{
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.valueReg(), dest.fpu());
+ jump(&end);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else {
+ unboxNonDouble(src, dest.gpr());
+ }
+}
+
+template <typename T>
+void
+MacroAssemblerX64::loadInt32OrDouble(const T& src, FloatRegister dest)
+{
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src, dest);
+ jump(&end);
+ bind(&notInt32);
+ loadDouble(src, dest);
+ bind(&end);
+}
+
+// If source is a double, load it into dest. If source is int32,
+// convert it to double. Else, branch to failure.
+void
+MacroAssemblerX64::ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure)
+{
+ Label isDouble, done;
+ Register tag = splitTagForTest(source);
+ asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
+
+ ScratchRegisterScope scratch(asMasm());
+ unboxInt32(source, scratch);
+ convertInt32ToDouble(scratch, dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_MacroAssembler_x64_inl_h */
diff --git a/js/src/jit/x64/MacroAssembler-x64.cpp b/js/src/jit/x64/MacroAssembler-x64.cpp
new file mode 100644
index 000000000..9d8287824
--- /dev/null
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -0,0 +1,859 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x64/MacroAssembler-x64.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MoveEmitter.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void
+MacroAssemblerX64::loadConstantDouble(wasm::RawF64 d, FloatRegister dest)
+{
+ if (maybeInlineDouble(d, dest))
+ return;
+ Double* dbl = getDouble(d);
+ if (!dbl)
+ return;
+ // The constants will be stored in a pool appended to the text (see
+ // finish()), so they will always be a fixed distance from the
+ // instructions which reference them. This allows the instructions to use
+ // PC-relative addressing. Use "jump" label support code, because we need
+ // the same PC-relative address patching that jumps use.
+ JmpSrc j = masm.vmovsd_ripr(dest.encoding());
+ propagateOOM(dbl->uses.append(CodeOffset(j.offset())));
+}
+
+void
+MacroAssemblerX64::loadConstantFloat32(wasm::RawF32 f, FloatRegister dest)
+{
+ if (maybeInlineFloat(f, dest))
+ return;
+ Float* flt = getFloat(f);
+ if (!flt)
+ return;
+ // See comment in loadConstantDouble
+ JmpSrc j = masm.vmovss_ripr(dest.encoding());
+ propagateOOM(flt->uses.append(CodeOffset(j.offset())));
+}
+
+void
+MacroAssemblerX64::loadConstantSimd128Int(const SimdConstant& v, FloatRegister dest)
+{
+ if (maybeInlineSimd128Int(v, dest))
+ return;
+ SimdData* val = getSimdData(v);
+ if (!val)
+ return;
+ JmpSrc j = masm.vmovdqa_ripr(dest.encoding());
+ propagateOOM(val->uses.append(CodeOffset(j.offset())));
+}
+
+void
+MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest)
+{
+ loadConstantFloat32(wasm::RawF32(f), dest);
+}
+
+void
+MacroAssemblerX64::loadConstantDouble(double d, FloatRegister dest)
+{
+ loadConstantDouble(wasm::RawF64(d), dest);
+}
+
+void
+MacroAssemblerX64::loadConstantSimd128Float(const SimdConstant&v, FloatRegister dest)
+{
+ if (maybeInlineSimd128Float(v, dest))
+ return;
+ SimdData* val = getSimdData(v);
+ if (!val)
+ return;
+ JmpSrc j = masm.vmovaps_ripr(dest.encoding());
+ propagateOOM(val->uses.append(CodeOffset(j.offset())));
+}
+
+void
+MacroAssemblerX64::convertInt64ToDouble(Register64 input, FloatRegister output)
+{
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(output);
+
+ vcvtsq2sd(input.reg, output, output);
+}
+
+void
+MacroAssemblerX64::convertInt64ToFloat32(Register64 input, FloatRegister output)
+{
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroFloat32(output);
+
+ vcvtsq2ss(input.reg, output, output);
+}
+
+bool
+MacroAssemblerX64::convertUInt64ToDoubleNeedsTemp()
+{
+ return false;
+}
+
+void
+MacroAssemblerX64::convertUInt64ToDouble(Register64 input, FloatRegister output, Register temp)
+{
+ MOZ_ASSERT(temp == Register::Invalid());
+
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(output);
+
+ // If the input's sign bit is not set we use vcvtsq2sd directly.
+ // Else, we divide by 2, convert to double, and multiply the result by 2.
+ Label done;
+ Label isSigned;
+
+ testq(input.reg, input.reg);
+ j(Assembler::Signed, &isSigned);
+ vcvtsq2sd(input.reg, output, output);
+ jump(&done);
+
+ bind(&isSigned);
+
+ ScratchRegisterScope scratch(asMasm());
+ mov(input.reg, scratch);
+ shrq(Imm32(1), scratch);
+ vcvtsq2sd(scratch, output, output);
+ vaddsd(output, output, output);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerX64::convertUInt64ToFloat32(Register64 input, FloatRegister output, Register temp)
+{
+ MOZ_ASSERT(temp == Register::Invalid());
+
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroFloat32(output);
+
+ // If the input's sign bit is not set we use vcvtsq2ss directly.
+ // Else, we divide by 2, convert to float, and multiply the result by 2.
+ Label done;
+ Label isSigned;
+
+ testq(input.reg, input.reg);
+ j(Assembler::Signed, &isSigned);
+ vcvtsq2ss(input.reg, output, output);
+ jump(&done);
+
+ bind(&isSigned);
+
+ ScratchRegisterScope scratch(asMasm());
+ mov(input.reg, scratch);
+ shrq(Imm32(1), scratch);
+ vcvtsq2ss(scratch, output, output);
+ vaddss(output, output, output);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerX64::wasmTruncateDoubleToInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg)
+{
+ vcvttsd2sq(input, output.reg);
+ cmpq(Imm32(1), output.reg);
+ j(Assembler::Overflow, oolEntry);
+ bind(oolRejoin);
+}
+
+void
+MacroAssemblerX64::wasmTruncateFloat32ToInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg)
+{
+ vcvttss2sq(input, output.reg);
+ cmpq(Imm32(1), output.reg);
+ j(Assembler::Overflow, oolEntry);
+ bind(oolRejoin);
+}
+
+void
+MacroAssemblerX64::wasmTruncateDoubleToUInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg)
+{
+ // If the input < INT64_MAX, vcvttsd2sq will do the right thing, so
+ // we use it directly. Else, we subtract INT64_MAX, convert to int64,
+ // and then add INT64_MAX to the result.
+
+ Label isLarge;
+
+ ScratchDoubleScope scratch(asMasm());
+ loadConstantDouble(double(0x8000000000000000), scratch);
+ asMasm().branchDouble(Assembler::DoubleGreaterThanOrEqual, input, scratch, &isLarge);
+ vcvttsd2sq(input, output.reg);
+ testq(output.reg, output.reg);
+ j(Assembler::Signed, oolEntry);
+ jump(oolRejoin);
+
+ bind(&isLarge);
+
+ moveDouble(input, tempReg);
+ vsubsd(scratch, tempReg, tempReg);
+ vcvttsd2sq(tempReg, output.reg);
+ testq(output.reg, output.reg);
+ j(Assembler::Signed, oolEntry);
+ asMasm().or64(Imm64(0x8000000000000000), output);
+
+ bind(oolRejoin);
+}
+
+void
+MacroAssemblerX64::wasmTruncateFloat32ToUInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg)
+{
+ // If the input < INT64_MAX, vcvttss2sq will do the right thing, so
+ // we use it directly. Else, we subtract INT64_MAX, convert to int64,
+ // and then add INT64_MAX to the result.
+
+ Label isLarge;
+
+ ScratchFloat32Scope scratch(asMasm());
+ loadConstantFloat32(float(0x8000000000000000), scratch);
+ asMasm().branchFloat(Assembler::DoubleGreaterThanOrEqual, input, scratch, &isLarge);
+ vcvttss2sq(input, output.reg);
+ testq(output.reg, output.reg);
+ j(Assembler::Signed, oolEntry);
+ jump(oolRejoin);
+
+ bind(&isLarge);
+
+ moveFloat32(input, tempReg);
+ vsubss(scratch, tempReg, tempReg);
+ vcvttss2sq(tempReg, output.reg);
+ testq(output.reg, output.reg);
+ j(Assembler::Signed, oolEntry);
+ asMasm().or64(Imm64(0x8000000000000000), output);
+
+ bind(oolRejoin);
+}
+
+void
+MacroAssemblerX64::bindOffsets(const MacroAssemblerX86Shared::UsesVector& uses)
+{
+ for (CodeOffset use : uses) {
+ JmpDst dst(currentOffset());
+ JmpSrc src(use.offset());
+ // Using linkJump here is safe, as explaind in the comment in
+ // loadConstantDouble.
+ masm.linkJump(src, dst);
+ }
+}
+
+void
+MacroAssemblerX64::finish()
+{
+ if (!doubles_.empty())
+ masm.haltingAlign(sizeof(double));
+ for (const Double& d : doubles_) {
+ bindOffsets(d.uses);
+ masm.int64Constant(d.value);
+ }
+
+ if (!floats_.empty())
+ masm.haltingAlign(sizeof(float));
+ for (const Float& f : floats_) {
+ bindOffsets(f.uses);
+ masm.int32Constant(f.value);
+ }
+
+ // SIMD memory values must be suitably aligned.
+ if (!simds_.empty())
+ masm.haltingAlign(SimdMemoryAlignment);
+ for (const SimdData& v : simds_) {
+ bindOffsets(v.uses);
+ masm.simd128Constant(v.value.bytes());
+ }
+
+ MacroAssemblerX86Shared::finish();
+}
+
+void
+MacroAssemblerX64::boxValue(JSValueType type, Register src, Register dest)
+{
+ MOZ_ASSERT(src != dest);
+
+ JSValueShiftedTag tag = (JSValueShiftedTag)JSVAL_TYPE_TO_SHIFTED_TAG(type);
+#ifdef DEBUG
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ Label upper32BitsZeroed;
+ movePtr(ImmWord(UINT32_MAX), dest);
+ asMasm().branchPtr(Assembler::BelowOrEqual, src, dest, &upper32BitsZeroed);
+ breakpoint();
+ bind(&upper32BitsZeroed);
+ }
+#endif
+ mov(ImmShiftedTag(tag), dest);
+ orq(src, dest);
+}
+
+void
+MacroAssemblerX64::handleFailureWithHandlerTail(void* handler)
+{
+ // Reserve space for exception information.
+ subq(Imm32(sizeof(ResumeFromException)), rsp);
+ movq(rsp, rax);
+
+ // Call the handler.
+ asMasm().setupUnalignedABICall(rcx);
+ asMasm().passABIArg(rax);
+ asMasm().callWithABI(handler);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label return_;
+ Label bailout;
+
+ loadPtr(Address(rsp, offsetof(ResumeFromException, kind)), rax);
+ asMasm().branch32(Assembler::Equal, rax, Imm32(ResumeFromException::RESUME_ENTRY_FRAME), &entryFrame);
+ asMasm().branch32(Assembler::Equal, rax, Imm32(ResumeFromException::RESUME_CATCH), &catch_);
+ asMasm().branch32(Assembler::Equal, rax, Imm32(ResumeFromException::RESUME_FINALLY), &finally);
+ asMasm().branch32(Assembler::Equal, rax, Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_);
+ asMasm().branch32(Assembler::Equal, rax, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, load the new stack pointer
+ // and return from the entry frame.
+ bind(&entryFrame);
+ moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(Address(rsp, offsetof(ResumeFromException, stackPointer)), rsp);
+ ret();
+
+ // If we found a catch handler, this must be a baseline frame. Restore state
+ // and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(rsp, offsetof(ResumeFromException, target)), rax);
+ loadPtr(Address(rsp, offsetof(ResumeFromException, framePointer)), rbp);
+ loadPtr(Address(rsp, offsetof(ResumeFromException, stackPointer)), rsp);
+ jmp(Operand(rax));
+
+ // If we found a finally block, this must be a baseline frame. Push
+ // two values expected by JSOP_RETSUB: BooleanValue(true) and the
+ // exception.
+ bind(&finally);
+ ValueOperand exception = ValueOperand(rcx);
+ loadValue(Address(esp, offsetof(ResumeFromException, exception)), exception);
+
+ loadPtr(Address(rsp, offsetof(ResumeFromException, target)), rax);
+ loadPtr(Address(rsp, offsetof(ResumeFromException, framePointer)), rbp);
+ loadPtr(Address(rsp, offsetof(ResumeFromException, stackPointer)), rsp);
+
+ pushValue(BooleanValue(true));
+ pushValue(exception);
+ jmp(Operand(rax));
+
+ // Only used in debug mode. Return BaselineFrame->returnValue() to the caller.
+ bind(&return_);
+ loadPtr(Address(rsp, offsetof(ResumeFromException, framePointer)), rbp);
+ loadPtr(Address(rsp, offsetof(ResumeFromException, stackPointer)), rsp);
+ loadValue(Address(rbp, BaselineFrame::reverseOffsetOfReturnValue()), JSReturnOperand);
+ movq(rbp, rsp);
+ pop(rbp);
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to caller
+ // frame before returning.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(GetJitContext()->runtime->spsProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skipProfilingInstrumentation);
+ profilerExitFrame();
+ bind(&skipProfilingInstrumentation);
+ }
+
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to
+ // the bailout tail stub.
+ bind(&bailout);
+ loadPtr(Address(esp, offsetof(ResumeFromException, bailoutInfo)), r9);
+ mov(ImmWord(BAILOUT_RETURN_OK), rax);
+ jmp(Operand(rsp, offsetof(ResumeFromException, target)));
+}
+
+void
+MacroAssemblerX64::profilerEnterFrame(Register framePtr, Register scratch)
+{
+ AbsoluteAddress activation(GetJitContext()->runtime->addressOfProfilingActivation());
+ loadPtr(activation, scratch);
+ storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr), Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void
+MacroAssemblerX64::profilerExitFrame()
+{
+ jmp(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
+}
+
+MacroAssembler&
+MacroAssemblerX64::asMasm()
+{
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler&
+MacroAssemblerX64::asMasm() const
+{
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+void
+MacroAssembler::subFromStackPtr(Imm32 imm32)
+{
+ if (imm32.value) {
+ // On windows, we cannot skip very far down the stack without touching the
+ // memory pages in-between. This is a corner-case code for situations where the
+ // Ion frame data for a piece of code is very large. To handle this special case,
+ // for frames over 1k in size we allocate memory on the stack incrementally, touching
+ // it as we go.
+ uint32_t amountLeft = imm32.value;
+ while (amountLeft > 4096) {
+ subq(Imm32(4096), StackPointer);
+ store32(Imm32(0), Address(StackPointer, 0));
+ amountLeft -= 4096;
+ }
+ subq(Imm32(amountLeft), StackPointer);
+ }
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// ABI function calls.
+
+void
+MacroAssembler::setupUnalignedABICall(Register scratch)
+{
+ setupABICall();
+ dynamicAlignment_ = true;
+
+ movq(rsp, scratch);
+ andq(Imm32(~(ABIStackAlignment - 1)), rsp);
+ push(scratch);
+}
+
+void
+MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm)
+{
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ if (dynamicAlignment_) {
+ // sizeof(intptr_t) accounts for the saved stack pointer pushed by
+ // setupUnalignedABICall.
+ stackForCall += ComputeByteAlignment(stackForCall + sizeof(intptr_t),
+ ABIStackAlignment);
+ } else {
+ static_assert(sizeof(wasm::Frame) % ABIStackAlignment == 0,
+ "wasm::Frame should be part of the stack alignment.");
+ stackForCall += ComputeByteAlignment(stackForCall + framePushed(),
+ ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Position all arguments.
+ {
+ enoughMemory_ &= moveResolver_.resolve();
+ if (!enoughMemory_)
+ return;
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+}
+
+void
+MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
+{
+ freeStack(stackAdjust);
+ if (dynamicAlignment_)
+ pop(rsp);
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+static bool
+IsIntArgReg(Register reg)
+{
+ for (uint32_t i = 0; i < NumIntArgRegs; i++) {
+ if (IntArgRegs[i] == reg)
+ return true;
+ }
+
+ return false;
+}
+
+void
+MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
+{
+ if (IsIntArgReg(fun)) {
+ // Callee register may be clobbered for an argument. Move the callee to
+ // r10, a volatile, non-argument register.
+ propagateOOM(moveResolver_.addMove(MoveOperand(fun), MoveOperand(r10),
+ MoveOp::GENERAL));
+ fun = r10;
+ }
+
+ MOZ_ASSERT(!IsIntArgReg(fun));
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(fun);
+ callWithABIPost(stackAdjust, result);
+}
+
+void
+MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result)
+{
+ Address safeFun = fun;
+ if (IsIntArgReg(safeFun.base)) {
+ // Callee register may be clobbered for an argument. Move the callee to
+ // r10, a volatile, non-argument register.
+ propagateOOM(moveResolver_.addMove(MoveOperand(fun.base), MoveOperand(r10),
+ MoveOp::GENERAL));
+ safeFun.base = r10;
+ }
+
+ MOZ_ASSERT(!IsIntArgReg(safeFun.base));
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(safeFun);
+ callWithABIPost(stackAdjust, result);
+}
+
+// ===============================================================
+// Branch functions
+
+void
+MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp, Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(ptr != temp);
+ MOZ_ASSERT(ptr != scratch);
+
+ movePtr(ptr, scratch);
+ orPtr(Imm32(gc::ChunkMask), scratch);
+ branch32(cond, Address(scratch, gc::ChunkLocationOffsetFromLastByte),
+ Imm32(int32_t(gc::ChunkLocation::Nursery)), label);
+}
+
+void
+MacroAssembler::branchValueIsNurseryObject(Condition cond, const Address& address, Register temp,
+ Label* label)
+{
+ branchValueIsNurseryObjectImpl(cond, address, temp, label);
+}
+
+void
+MacroAssembler::branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp,
+ Label* label)
+{
+ branchValueIsNurseryObjectImpl(cond, value, temp, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchValueIsNurseryObjectImpl(Condition cond, const T& value, Register temp,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ Label done;
+ branchTestObject(Assembler::NotEqual, value, cond == Assembler::Equal ? &done : label);
+
+ extractObject(value, temp);
+ orPtr(Imm32(gc::ChunkMask), temp);
+ branch32(cond, Address(temp, gc::ChunkLocationOffsetFromLastByte),
+ Imm32(int32_t(gc::ChunkLocation::Nursery)), label);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(lhs.valueReg() != scratch);
+ moveValue(rhs, scratch);
+ cmpPtr(lhs.valueReg(), scratch);
+ j(cond, label);
+}
+
+// ========================================================================
+// Memory access primitives.
+template <typename T>
+void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const T& dest, MIRType slotType)
+{
+ if (valueType == MIRType::Double) {
+ storeDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ // For known integers and booleans, we can just store the unboxed value if
+ // the slot has the same type.
+ if ((valueType == MIRType::Int32 || valueType == MIRType::Boolean) && slotType == valueType) {
+ if (value.constant()) {
+ Value val = value.value();
+ if (valueType == MIRType::Int32)
+ store32(Imm32(val.toInt32()), dest);
+ else
+ store32(Imm32(val.toBoolean() ? 1 : 0), dest);
+ } else {
+ store32(value.reg().typedReg().gpr(), dest);
+ }
+ return;
+ }
+
+ if (value.constant())
+ storeValue(value.value(), dest);
+ else
+ storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(), dest);
+}
+
+template void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const Address& dest, MIRType slotType);
+template void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const BaseIndex& dest, MIRType slotType);
+
+// ========================================================================
+// wasm support
+
+void
+MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr, AnyRegister out)
+{
+ memoryBarrier(access.barrierBefore());
+
+ size_t loadOffset = size();
+ switch (access.type()) {
+ case Scalar::Int8:
+ movsbl(srcAddr, out.gpr());
+ break;
+ case Scalar::Uint8:
+ movzbl(srcAddr, out.gpr());
+ break;
+ case Scalar::Int16:
+ movswl(srcAddr, out.gpr());
+ break;
+ case Scalar::Uint16:
+ movzwl(srcAddr, out.gpr());
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ movl(srcAddr, out.gpr());
+ break;
+ case Scalar::Float32:
+ loadFloat32(srcAddr, out.fpu());
+ break;
+ case Scalar::Float64:
+ loadDouble(srcAddr, out.fpu());
+ break;
+ case Scalar::Float32x4:
+ switch (access.numSimdElems()) {
+ // In memory-to-register mode, movss zeroes out the high lanes.
+ case 1: loadFloat32(srcAddr, out.fpu()); break;
+ // See comment above, which also applies to movsd.
+ case 2: loadDouble(srcAddr, out.fpu()); break;
+ case 4: loadUnalignedSimd128Float(srcAddr, out.fpu()); break;
+ default: MOZ_CRASH("unexpected size for partial load");
+ }
+ break;
+ case Scalar::Int32x4:
+ switch (access.numSimdElems()) {
+ // In memory-to-register mode, movd zeroes out the high lanes.
+ case 1: vmovd(srcAddr, out.fpu()); break;
+ // See comment above, which also applies to movq.
+ case 2: vmovq(srcAddr, out.fpu()); break;
+ case 4: loadUnalignedSimd128Int(srcAddr, out.fpu()); break;
+ default: MOZ_CRASH("unexpected size for partial load");
+ }
+ break;
+ case Scalar::Int8x16:
+ MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial load");
+ loadUnalignedSimd128Int(srcAddr, out.fpu());
+ break;
+ case Scalar::Int16x8:
+ MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial load");
+ loadUnalignedSimd128Int(srcAddr, out.fpu());
+ break;
+ case Scalar::Int64:
+ MOZ_CRASH("int64 loads must use load64");
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+ append(access, loadOffset, framePushed());
+
+ memoryBarrier(access.barrierAfter());
+}
+
+void
+MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr, Register64 out)
+{
+ MOZ_ASSERT(!access.isAtomic());
+ MOZ_ASSERT(!access.isSimd());
+
+ size_t loadOffset = size();
+ switch (access.type()) {
+ case Scalar::Int8:
+ movsbq(srcAddr, out.reg);
+ break;
+ case Scalar::Uint8:
+ movzbq(srcAddr, out.reg);
+ break;
+ case Scalar::Int16:
+ movswq(srcAddr, out.reg);
+ break;
+ case Scalar::Uint16:
+ movzwq(srcAddr, out.reg);
+ break;
+ case Scalar::Int32:
+ movslq(srcAddr, out.reg);
+ break;
+ // Int32 to int64 moves zero-extend by default.
+ case Scalar::Uint32:
+ movl(srcAddr, out.reg);
+ break;
+ case Scalar::Int64:
+ movq(srcAddr, out.reg);
+ break;
+ case Scalar::Float32:
+ case Scalar::Float64:
+ case Scalar::Float32x4:
+ case Scalar::Int8x16:
+ case Scalar::Int16x8:
+ case Scalar::Int32x4:
+ MOZ_CRASH("non-int64 loads should use load()");
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+ append(access, loadOffset, framePushed());
+}
+
+void
+MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Operand dstAddr)
+{
+ memoryBarrier(access.barrierBefore());
+
+ size_t storeOffset = size();
+ switch (access.type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ movb(value.gpr(), dstAddr);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ movw(value.gpr(), dstAddr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ movl(value.gpr(), dstAddr);
+ break;
+ case Scalar::Int64:
+ movq(value.gpr(), dstAddr);
+ break;
+ case Scalar::Float32:
+ storeUncanonicalizedFloat32(value.fpu(), dstAddr);
+ break;
+ case Scalar::Float64:
+ storeUncanonicalizedDouble(value.fpu(), dstAddr);
+ break;
+ case Scalar::Float32x4:
+ switch (access.numSimdElems()) {
+ // In memory-to-register mode, movss zeroes out the high lanes.
+ case 1: storeUncanonicalizedFloat32(value.fpu(), dstAddr); break;
+ // See comment above, which also applies to movsd.
+ case 2: storeUncanonicalizedDouble(value.fpu(), dstAddr); break;
+ case 4: storeUnalignedSimd128Float(value.fpu(), dstAddr); break;
+ default: MOZ_CRASH("unexpected size for partial load");
+ }
+ break;
+ case Scalar::Int32x4:
+ switch (access.numSimdElems()) {
+ // In memory-to-register mode, movd zeroes out the high lanes.
+ case 1: vmovd(value.fpu(), dstAddr); break;
+ // See comment above, which also applies to movq.
+ case 2: vmovq(value.fpu(), dstAddr); break;
+ case 4: storeUnalignedSimd128Int(value.fpu(), dstAddr); break;
+ default: MOZ_CRASH("unexpected size for partial load");
+ }
+ break;
+ case Scalar::Int8x16:
+ MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial store");
+ storeUnalignedSimd128Int(value.fpu(), dstAddr);
+ break;
+ case Scalar::Int16x8:
+ MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial store");
+ storeUnalignedSimd128Int(value.fpu(), dstAddr);
+ break;
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+ append(access, storeOffset, framePushed());
+
+ memoryBarrier(access.barrierAfter());
+}
+
+void
+MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input, Register output, Label* oolEntry)
+{
+ vcvttsd2sq(input, output);
+
+ // Check that the result is in the uint32_t range.
+ ScratchRegisterScope scratch(*this);
+ move32(Imm32(0xffffffff), scratch);
+ cmpq(scratch, output);
+ j(Assembler::Above, oolEntry);
+}
+
+void
+MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input, Register output, Label* oolEntry)
+{
+ vcvttss2sq(input, output);
+
+ // Check that the result is in the uint32_t range.
+ ScratchRegisterScope scratch(*this);
+ move32(Imm32(0xffffffff), scratch);
+ cmpq(scratch, output);
+ j(Assembler::Above, oolEntry);
+}
+
+//}}} check_macroassembler_style
diff --git a/js/src/jit/x64/MacroAssembler-x64.h b/js/src/jit/x64/MacroAssembler-x64.h
new file mode 100644
index 000000000..cb81bd7c1
--- /dev/null
+++ b/js/src/jit/x64/MacroAssembler-x64.h
@@ -0,0 +1,966 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_MacroAssembler_x64_h
+#define jit_x64_MacroAssembler_x64_h
+
+#include "jit/JitFrames.h"
+#include "jit/MoveResolver.h"
+#include "jit/x86-shared/MacroAssembler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+struct ImmShiftedTag : public ImmWord
+{
+ explicit ImmShiftedTag(JSValueShiftedTag shtag)
+ : ImmWord((uintptr_t)shtag)
+ { }
+
+ explicit ImmShiftedTag(JSValueType type)
+ : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type))))
+ { }
+};
+
+struct ImmTag : public Imm32
+{
+ explicit ImmTag(JSValueTag tag)
+ : Imm32(tag)
+ { }
+};
+
+class MacroAssemblerX64 : public MacroAssemblerX86Shared
+{
+ private:
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ void bindOffsets(const MacroAssemblerX86Shared::UsesVector&);
+
+ public:
+ using MacroAssemblerX86Shared::load32;
+ using MacroAssemblerX86Shared::store32;
+ using MacroAssemblerX86Shared::store16;
+
+ MacroAssemblerX64()
+ {
+ }
+
+ // The buffer is about to be linked, make sure any constant pools or excess
+ // bookkeeping has been flushed to the instruction stream.
+ void finish();
+
+ /////////////////////////////////////////////////////////////////
+ // X64 helpers.
+ /////////////////////////////////////////////////////////////////
+ void writeDataRelocation(const Value& val) {
+ if (val.isMarkable()) {
+ gc::Cell* cell = val.toMarkablePointer();
+ if (cell && gc::IsInsideNursery(cell))
+ embedsNurseryPointers_ = true;
+ dataRelocations_.writeUnsigned(masm.currentOffset());
+ }
+ }
+
+ // Refers to the upper 32 bits of a 64-bit Value operand.
+ // On x86_64, the upper 32 bits do not necessarily only contain the type.
+ Operand ToUpper32(Operand base) {
+ switch (base.kind()) {
+ case Operand::MEM_REG_DISP:
+ return Operand(Register::FromCode(base.base()), base.disp() + 4);
+
+ case Operand::MEM_SCALE:
+ return Operand(Register::FromCode(base.base()), Register::FromCode(base.index()),
+ base.scale(), base.disp() + 4);
+
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ static inline Operand ToUpper32(const Address& address) {
+ return Operand(address.base, address.offset + 4);
+ }
+ static inline Operand ToUpper32(const BaseIndex& address) {
+ return Operand(address.base, address.index, address.scale, address.offset + 4);
+ }
+
+ uint32_t Upper32Of(JSValueShiftedTag tag) {
+ union { // Implemented in this way to appease MSVC++.
+ uint64_t tag;
+ struct {
+ uint32_t lo32;
+ uint32_t hi32;
+ } s;
+ } e;
+ e.tag = tag;
+ return e.s.hi32;
+ }
+
+ JSValueShiftedTag GetShiftedTag(JSValueType type) {
+ return (JSValueShiftedTag)JSVAL_TYPE_TO_SHIFTED_TAG(type);
+ }
+
+ /////////////////////////////////////////////////////////////////
+ // X86/X64-common interface.
+ /////////////////////////////////////////////////////////////////
+ Address ToPayload(Address value) {
+ return value;
+ }
+
+ void storeValue(ValueOperand val, Operand dest) {
+ movq(val.valueReg(), dest);
+ }
+ void storeValue(ValueOperand val, const Address& dest) {
+ storeValue(val, Operand(dest));
+ }
+ template <typename T>
+ void storeValue(JSValueType type, Register reg, const T& dest) {
+ // Value types with 32-bit payloads can be emitted as two 32-bit moves.
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ movl(reg, Operand(dest));
+ movl(Imm32(Upper32Of(GetShiftedTag(type))), ToUpper32(Operand(dest)));
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ boxValue(type, reg, scratch);
+ movq(scratch, Operand(dest));
+ }
+ }
+ template <typename T>
+ void storeValue(const Value& val, const T& dest) {
+ ScratchRegisterScope scratch(asMasm());
+ if (val.isMarkable()) {
+ movWithPatch(ImmWord(val.asRawBits()), scratch);
+ writeDataRelocation(val);
+ } else {
+ mov(ImmWord(val.asRawBits()), scratch);
+ }
+ movq(scratch, Operand(dest));
+ }
+ void storeValue(ValueOperand val, BaseIndex dest) {
+ storeValue(val, Operand(dest));
+ }
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ loadPtr(src, temp);
+ storePtr(temp, dest);
+ }
+ void loadValue(Operand src, ValueOperand val) {
+ movq(src, val.valueReg());
+ }
+ void loadValue(Address src, ValueOperand val) {
+ loadValue(Operand(src), val);
+ }
+ void loadValue(const BaseIndex& src, ValueOperand val) {
+ loadValue(Operand(src), val);
+ }
+ void tagValue(JSValueType type, Register payload, ValueOperand dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dest.valueReg() != scratch);
+ if (payload != dest.valueReg())
+ movq(payload, dest.valueReg());
+ mov(ImmShiftedTag(type), scratch);
+ orq(scratch, dest.valueReg());
+ }
+ void pushValue(ValueOperand val) {
+ push(val.valueReg());
+ }
+ void popValue(ValueOperand val) {
+ pop(val.valueReg());
+ }
+ void pushValue(const Value& val) {
+ if (val.isMarkable()) {
+ ScratchRegisterScope scratch(asMasm());
+ movWithPatch(ImmWord(val.asRawBits()), scratch);
+ writeDataRelocation(val);
+ push(scratch);
+ } else {
+ push(ImmWord(val.asRawBits()));
+ }
+ }
+ void pushValue(JSValueType type, Register reg) {
+ ScratchRegisterScope scratch(asMasm());
+ boxValue(type, reg, scratch);
+ push(scratch);
+ }
+ void pushValue(const Address& addr) {
+ push(Operand(addr));
+ }
+
+ void moveValue(const Value& val, Register dest) {
+ movWithPatch(ImmWord(val.asRawBits()), dest);
+ writeDataRelocation(val);
+ }
+ void moveValue(const Value& src, const ValueOperand& dest) {
+ moveValue(src, dest.valueReg());
+ }
+ void moveValue(const ValueOperand& src, const ValueOperand& dest) {
+ if (src.valueReg() != dest.valueReg())
+ movq(src.valueReg(), dest.valueReg());
+ }
+ void boxValue(JSValueType type, Register src, Register dest);
+
+ Condition testUndefined(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+ }
+ Condition testInt32(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_INT32));
+ return cond;
+ }
+ Condition testBoolean(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+ }
+ Condition testNull(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_NULL));
+ return cond;
+ }
+ Condition testString(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_STRING));
+ return cond;
+ }
+ Condition testSymbol(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_SYMBOL));
+ return cond;
+ }
+ Condition testObject(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+ }
+ Condition testDouble(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, Imm32(JSVAL_TAG_MAX_DOUBLE));
+ return cond == Equal ? BelowOrEqual : Above;
+ }
+ Condition testNumber(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, Imm32(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET));
+ return cond == Equal ? BelowOrEqual : Above;
+ }
+ Condition testGCThing(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, Imm32(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET));
+ return cond == Equal ? AboveOrEqual : Below;
+ }
+
+ Condition testMagic(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testError(Condition cond, Register tag) {
+ return testMagic(cond, tag);
+ }
+ Condition testPrimitive(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET));
+ return cond == Equal ? Below : AboveOrEqual;
+ }
+
+ Condition testUndefined(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testUndefined(cond, scratch);
+ }
+ Condition testInt32(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testInt32(cond, scratch);
+ }
+ Condition testBoolean(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testBoolean(cond, scratch);
+ }
+ Condition testDouble(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testDouble(cond, scratch);
+ }
+ Condition testNumber(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testNumber(cond, scratch);
+ }
+ Condition testNull(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testNull(cond, scratch);
+ }
+ Condition testString(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testString(cond, scratch);
+ }
+ Condition testSymbol(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testSymbol(cond, scratch);
+ }
+ Condition testObject(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testObject(cond, scratch);
+ }
+ Condition testGCThing(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testGCThing(cond, scratch);
+ }
+ Condition testPrimitive(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testPrimitive(cond, scratch);
+ }
+
+
+ Condition testUndefined(Condition cond, const Address& src) {
+ cmp32(ToUpper32(src), Imm32(Upper32Of(GetShiftedTag(JSVAL_TYPE_UNDEFINED))));
+ return cond;
+ }
+ Condition testInt32(Condition cond, const Address& src) {
+ cmp32(ToUpper32(src), Imm32(Upper32Of(GetShiftedTag(JSVAL_TYPE_INT32))));
+ return cond;
+ }
+ Condition testBoolean(Condition cond, const Address& src) {
+ cmp32(ToUpper32(src), Imm32(Upper32Of(GetShiftedTag(JSVAL_TYPE_BOOLEAN))));
+ return cond;
+ }
+ Condition testDouble(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testDouble(cond, scratch);
+ }
+ Condition testNumber(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testNumber(cond, scratch);
+ }
+ Condition testNull(Condition cond, const Address& src) {
+ cmp32(ToUpper32(src), Imm32(Upper32Of(GetShiftedTag(JSVAL_TYPE_NULL))));
+ return cond;
+ }
+ Condition testString(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testString(cond, scratch);
+ }
+ Condition testSymbol(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testSymbol(cond, scratch);
+ }
+ Condition testObject(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testObject(cond, scratch);
+ }
+ Condition testPrimitive(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testPrimitive(cond, scratch);
+ }
+ Condition testGCThing(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testGCThing(cond, scratch);
+ }
+ Condition testMagic(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testMagic(cond, scratch);
+ }
+
+
+ Condition testUndefined(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testUndefined(cond, scratch);
+ }
+ Condition testNull(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testNull(cond, scratch);
+ }
+ Condition testBoolean(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testBoolean(cond, scratch);
+ }
+ Condition testString(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testString(cond, scratch);
+ }
+ Condition testSymbol(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testSymbol(cond, scratch);
+ }
+ Condition testInt32(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testInt32(cond, scratch);
+ }
+ Condition testObject(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testObject(cond, scratch);
+ }
+ Condition testDouble(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testDouble(cond, scratch);
+ }
+ Condition testMagic(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testMagic(cond, scratch);
+ }
+ Condition testGCThing(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testGCThing(cond, scratch);
+ }
+
+ Condition isMagic(Condition cond, const ValueOperand& src, JSWhyMagic why) {
+ uint64_t magic = MagicValue(why).asRawBits();
+ cmpPtr(src.valueReg(), ImmWord(magic));
+ return cond;
+ }
+
+ void cmpPtr(Register lhs, const ImmWord rhs) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+ if (intptr_t(rhs.value) <= INT32_MAX && intptr_t(rhs.value) >= INT32_MIN) {
+ cmpPtr(lhs, Imm32(int32_t(rhs.value)));
+ } else {
+ movePtr(rhs, scratch);
+ cmpPtr(lhs, scratch);
+ }
+ }
+ void cmpPtr(Register lhs, const ImmPtr rhs) {
+ cmpPtr(lhs, ImmWord(uintptr_t(rhs.value)));
+ }
+ void cmpPtr(Register lhs, const ImmGCPtr rhs) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+ movePtr(rhs, scratch);
+ cmpPtr(lhs, scratch);
+ }
+ void cmpPtr(Register lhs, const Imm32 rhs) {
+ cmpq(rhs, lhs);
+ }
+ void cmpPtr(const Operand& lhs, const ImmGCPtr rhs) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(!lhs.containsReg(scratch));
+ movePtr(rhs, scratch);
+ cmpPtr(lhs, scratch);
+ }
+ void cmpPtr(const Operand& lhs, const ImmWord rhs) {
+ if ((intptr_t)rhs.value <= INT32_MAX && (intptr_t)rhs.value >= INT32_MIN) {
+ cmpPtr(lhs, Imm32((int32_t)rhs.value));
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(rhs, scratch);
+ cmpPtr(lhs, scratch);
+ }
+ }
+ void cmpPtr(const Operand& lhs, const ImmPtr rhs) {
+ cmpPtr(lhs, ImmWord(uintptr_t(rhs.value)));
+ }
+ void cmpPtr(const Address& lhs, const ImmGCPtr rhs) {
+ cmpPtr(Operand(lhs), rhs);
+ }
+ void cmpPtr(const Address& lhs, const ImmWord rhs) {
+ cmpPtr(Operand(lhs), rhs);
+ }
+ void cmpPtr(const Address& lhs, const ImmPtr rhs) {
+ cmpPtr(lhs, ImmWord(uintptr_t(rhs.value)));
+ }
+ void cmpPtr(const Operand& lhs, Register rhs) {
+ cmpq(rhs, lhs);
+ }
+ void cmpPtr(Register lhs, const Operand& rhs) {
+ cmpq(rhs, lhs);
+ }
+ void cmpPtr(const Operand& lhs, const Imm32 rhs) {
+ cmpq(rhs, lhs);
+ }
+ void cmpPtr(const Address& lhs, Register rhs) {
+ cmpPtr(Operand(lhs), rhs);
+ }
+ void cmpPtr(Register lhs, Register rhs) {
+ cmpq(rhs, lhs);
+ }
+ void testPtr(Register lhs, Register rhs) {
+ testq(rhs, lhs);
+ }
+ void testPtr(Register lhs, Imm32 rhs) {
+ testq(rhs, lhs);
+ }
+ void testPtr(const Operand& lhs, Imm32 rhs) {
+ testq(rhs, lhs);
+ }
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+
+ CodeOffsetJump jumpWithPatch(RepatchLabel* label, Label* documentation = nullptr) {
+ JmpSrc src = jmpSrc(label);
+ return CodeOffsetJump(size(), addPatchableJump(src, Relocation::HARDCODED));
+ }
+
+ CodeOffsetJump jumpWithPatch(RepatchLabel* label, Condition cond,
+ Label* documentation = nullptr)
+ {
+ JmpSrc src = jSrc(cond, label);
+ return CodeOffsetJump(size(), addPatchableJump(src, Relocation::HARDCODED));
+ }
+
+ CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr) {
+ return jumpWithPatch(label);
+ }
+
+ void movePtr(Register src, Register dest) {
+ movq(src, dest);
+ }
+ void movePtr(Register src, const Operand& dest) {
+ movq(src, dest);
+ }
+ void movePtr(ImmWord imm, Register dest) {
+ mov(imm, dest);
+ }
+ void movePtr(ImmPtr imm, Register dest) {
+ mov(imm, dest);
+ }
+ void movePtr(wasm::SymbolicAddress imm, Register dest) {
+ mov(imm, dest);
+ }
+ void movePtr(ImmGCPtr imm, Register dest) {
+ movq(imm, dest);
+ }
+ void loadPtr(AbsoluteAddress address, Register dest) {
+ if (X86Encoding::IsAddressImmediate(address.addr)) {
+ movq(Operand(address), dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmPtr(address.addr), scratch);
+ loadPtr(Address(scratch, 0x0), dest);
+ }
+ }
+ void loadPtr(const Address& address, Register dest) {
+ movq(Operand(address), dest);
+ }
+ void loadPtr(const Operand& src, Register dest) {
+ movq(src, dest);
+ }
+ void loadPtr(const BaseIndex& src, Register dest) {
+ movq(Operand(src), dest);
+ }
+ void loadPrivate(const Address& src, Register dest) {
+ loadPtr(src, dest);
+ shlq(Imm32(1), dest);
+ }
+ void load32(AbsoluteAddress address, Register dest) {
+ if (X86Encoding::IsAddressImmediate(address.addr)) {
+ movl(Operand(address), dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmPtr(address.addr), scratch);
+ load32(Address(scratch, 0x0), dest);
+ }
+ }
+ void load64(const Address& address, Register64 dest) {
+ movq(Operand(address), dest.reg);
+ }
+ template <typename T>
+ void storePtr(ImmWord imm, T address) {
+ if ((intptr_t)imm.value <= INT32_MAX && (intptr_t)imm.value >= INT32_MIN) {
+ movq(Imm32((int32_t)imm.value), Operand(address));
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ mov(imm, scratch);
+ movq(scratch, Operand(address));
+ }
+ }
+ template <typename T>
+ void storePtr(ImmPtr imm, T address) {
+ storePtr(ImmWord(uintptr_t(imm.value)), address);
+ }
+ template <typename T>
+ void storePtr(ImmGCPtr imm, T address) {
+ ScratchRegisterScope scratch(asMasm());
+ movq(imm, scratch);
+ movq(scratch, Operand(address));
+ }
+ void storePtr(Register src, const Address& address) {
+ movq(src, Operand(address));
+ }
+ void storePtr(Register src, const BaseIndex& address) {
+ movq(src, Operand(address));
+ }
+ void storePtr(Register src, const Operand& dest) {
+ movq(src, dest);
+ }
+ void storePtr(Register src, AbsoluteAddress address) {
+ if (X86Encoding::IsAddressImmediate(address.addr)) {
+ movq(src, Operand(address));
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmPtr(address.addr), scratch);
+ storePtr(src, Address(scratch, 0x0));
+ }
+ }
+ void store32(Register src, AbsoluteAddress address) {
+ if (X86Encoding::IsAddressImmediate(address.addr)) {
+ movl(src, Operand(address));
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmPtr(address.addr), scratch);
+ store32(src, Address(scratch, 0x0));
+ }
+ }
+ void store16(Register src, AbsoluteAddress address) {
+ if (X86Encoding::IsAddressImmediate(address.addr)) {
+ movw(src, Operand(address));
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmPtr(address.addr), scratch);
+ store16(src, Address(scratch, 0x0));
+ }
+ }
+ void store64(Register64 src, Address address) {
+ storePtr(src.reg, address);
+ }
+ void store64(Imm64 imm, Address address) {
+ storePtr(ImmWord(imm.value), address);
+ }
+
+ void splitTag(Register src, Register dest) {
+ if (src != dest)
+ movq(src, dest);
+ shrq(Imm32(JSVAL_TAG_SHIFT), dest);
+ }
+ void splitTag(const ValueOperand& operand, Register dest) {
+ splitTag(operand.valueReg(), dest);
+ }
+ void splitTag(const Operand& operand, Register dest) {
+ movq(operand, dest);
+ shrq(Imm32(JSVAL_TAG_SHIFT), dest);
+ }
+ void splitTag(const Address& operand, Register dest) {
+ splitTag(Operand(operand), dest);
+ }
+ void splitTag(const BaseIndex& operand, Register dest) {
+ splitTag(Operand(operand), dest);
+ }
+
+ // Extracts the tag of a value and places it in ScratchReg.
+ Register splitTagForTest(const ValueOperand& value) {
+ splitTag(value, ScratchReg);
+ return ScratchReg;
+ }
+ void cmpTag(const ValueOperand& operand, ImmTag tag) {
+ Register reg = splitTagForTest(operand);
+ cmp32(reg, tag);
+ }
+
+ Condition testMagic(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testMagic(cond, scratch);
+ }
+ Condition testError(Condition cond, const ValueOperand& src) {
+ return testMagic(cond, src);
+ }
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testNull(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testObject(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testUndefined(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void boxDouble(FloatRegister src, const ValueOperand& dest) {
+ vmovq(src, dest.valueReg());
+ }
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) {
+ MOZ_ASSERT(src != dest.valueReg());
+ boxValue(type, src, dest.valueReg());
+ }
+
+ // Note that the |dest| register here may be ScratchReg, so we shouldn't
+ // use it.
+ void unboxInt32(const ValueOperand& src, Register dest) {
+ movl(src.valueReg(), dest);
+ }
+ void unboxInt32(const Operand& src, Register dest) {
+ movl(src, dest);
+ }
+ void unboxInt32(const Address& src, Register dest) {
+ unboxInt32(Operand(src), dest);
+ }
+ void unboxDouble(const Address& src, FloatRegister dest) {
+ loadDouble(Operand(src), dest);
+ }
+
+ void unboxArgObjMagic(const ValueOperand& src, Register dest) {
+ unboxArgObjMagic(Operand(src.valueReg()), dest);
+ }
+ void unboxArgObjMagic(const Operand& src, Register dest) {
+ mov(ImmWord(0), dest);
+ }
+ void unboxArgObjMagic(const Address& src, Register dest) {
+ unboxArgObjMagic(Operand(src), dest);
+ }
+
+ void unboxBoolean(const ValueOperand& src, Register dest) {
+ movl(src.valueReg(), dest);
+ }
+ void unboxBoolean(const Operand& src, Register dest) {
+ movl(src, dest);
+ }
+ void unboxBoolean(const Address& src, Register dest) {
+ unboxBoolean(Operand(src), dest);
+ }
+
+ void unboxMagic(const ValueOperand& src, Register dest) {
+ movl(src.valueReg(), dest);
+ }
+
+ void unboxDouble(const ValueOperand& src, FloatRegister dest) {
+ vmovq(src.valueReg(), dest);
+ }
+ void unboxPrivate(const ValueOperand& src, const Register dest) {
+ movq(src.valueReg(), dest);
+ shlq(Imm32(1), dest);
+ }
+
+ void notBoolean(const ValueOperand& val) {
+ xorq(Imm32(1), val.valueReg());
+ }
+
+ // Unbox any non-double value into dest. Prefer unboxInt32 or unboxBoolean
+ // instead if the source type is known.
+ void unboxNonDouble(const ValueOperand& src, Register dest) {
+ if (src.valueReg() == dest) {
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmWord(JSVAL_PAYLOAD_MASK), scratch);
+ andq(scratch, dest);
+ } else {
+ mov(ImmWord(JSVAL_PAYLOAD_MASK), dest);
+ andq(src.valueReg(), dest);
+ }
+ }
+ void unboxNonDouble(const Operand& src, Register dest) {
+ // Explicitly permits |dest| to be used in |src|.
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dest != scratch);
+ if (src.containsReg(dest)) {
+ mov(ImmWord(JSVAL_PAYLOAD_MASK), scratch);
+ // If src is already a register, then src and dest are the same
+ // thing and we don't need to move anything into dest.
+ if (src.kind() != Operand::REG)
+ movq(src, dest);
+ andq(scratch, dest);
+ } else {
+ mov(ImmWord(JSVAL_PAYLOAD_MASK), dest);
+ andq(src, dest);
+ }
+ }
+
+ void unboxString(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxString(const Operand& src, Register dest) { unboxNonDouble(src, dest); }
+
+ void unboxSymbol(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxSymbol(const Operand& src, Register dest) { unboxNonDouble(src, dest); }
+
+ void unboxObject(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxObject(const Operand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxObject(const Address& src, Register dest) { unboxNonDouble(Operand(src), dest); }
+ void unboxObject(const BaseIndex& src, Register dest) { unboxNonDouble(Operand(src), dest); }
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ Register extractObject(const Address& address, Register scratch) {
+ MOZ_ASSERT(scratch != ScratchReg);
+ unboxObject(address, scratch);
+ return scratch;
+ }
+ Register extractObject(const ValueOperand& value, Register scratch) {
+ MOZ_ASSERT(scratch != ScratchReg);
+ unboxObject(value, scratch);
+ return scratch;
+ }
+ Register extractInt32(const ValueOperand& value, Register scratch) {
+ MOZ_ASSERT(scratch != ScratchReg);
+ unboxInt32(value, scratch);
+ return scratch;
+ }
+ Register extractBoolean(const ValueOperand& value, Register scratch) {
+ MOZ_ASSERT(scratch != ScratchReg);
+ unboxBoolean(value, scratch);
+ return scratch;
+ }
+ Register extractTag(const Address& address, Register scratch) {
+ MOZ_ASSERT(scratch != ScratchReg);
+ loadPtr(address, scratch);
+ splitTag(scratch, scratch);
+ return scratch;
+ }
+ Register extractTag(const ValueOperand& value, Register scratch) {
+ MOZ_ASSERT(scratch != ScratchReg);
+ splitTag(value, scratch);
+ return scratch;
+ }
+
+ inline void unboxValue(const ValueOperand& src, AnyRegister dest);
+
+ // These two functions use the low 32-bits of the full value register.
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.valueReg(), dest);
+ }
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.valueReg(), dest);
+ }
+
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.valueReg(), dest);
+ }
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.valueReg(), dest);
+ }
+
+ void loadConstantDouble(double d, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+ void loadConstantDouble(wasm::RawF64 d, FloatRegister dest);
+ void loadConstantFloat32(wasm::RawF32 f, FloatRegister dest);
+
+ void loadConstantSimd128Int(const SimdConstant& v, FloatRegister dest);
+ void loadConstantSimd128Float(const SimdConstant& v, FloatRegister dest);
+
+ void convertInt64ToDouble(Register64 input, FloatRegister output);
+ void convertInt64ToFloat32(Register64 input, FloatRegister output);
+ static bool convertUInt64ToDoubleNeedsTemp();
+ void convertUInt64ToDouble(Register64 input, FloatRegister output, Register temp);
+ void convertUInt64ToFloat32(Register64 input, FloatRegister output, Register temp);
+
+ void wasmTruncateDoubleToInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble);
+ void wasmTruncateDoubleToUInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble);
+
+ void wasmTruncateFloat32ToInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble);
+ void wasmTruncateFloat32ToUInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble);
+
+ void loadWasmGlobalPtr(uint32_t globalDataOffset, Register dest) {
+ CodeOffset label = loadRipRelativeInt64(dest);
+ append(wasm::GlobalAccess(label, globalDataOffset));
+ }
+ void loadWasmPinnedRegsFromTls() {
+ loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, memoryBase)), HeapReg);
+ }
+
+ public:
+ Condition testInt32Truthy(bool truthy, const ValueOperand& operand) {
+ test32(operand.valueReg(), operand.valueReg());
+ return truthy ? NonZero : Zero;
+ }
+ Condition testStringTruthy(bool truthy, const ValueOperand& value) {
+ ScratchRegisterScope scratch(asMasm());
+ unboxString(value, scratch);
+ cmp32(Operand(scratch, JSString::offsetOfLength()), Imm32(0));
+ return truthy ? Assembler::NotEqual : Assembler::Equal;
+ }
+
+ template <typename T>
+ inline void loadInt32OrDouble(const T& src, FloatRegister dest);
+
+ template <typename T>
+ void loadUnboxedValue(const T& src, MIRType type, AnyRegister dest) {
+ if (dest.isFloat())
+ loadInt32OrDouble(src, dest.fpu());
+ else if (type == MIRType::Int32 || type == MIRType::Boolean)
+ movl(Operand(src), dest.gpr());
+ else
+ unboxNonDouble(Operand(src), dest.gpr());
+ }
+
+ template <typename T>
+ void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes) {
+ switch (nbytes) {
+ case 8: {
+ ScratchRegisterScope scratch(asMasm());
+ unboxNonDouble(value, scratch);
+ storePtr(scratch, address);
+ return;
+ }
+ case 4:
+ store32(value.valueReg(), address);
+ return;
+ case 1:
+ store8(value.valueReg(), address);
+ return;
+ default: MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void loadInstructionPointerAfterCall(Register dest) {
+ loadPtr(Address(StackPointer, 0x0), dest);
+ }
+
+ void convertUInt32ToDouble(Register src, FloatRegister dest) {
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(dest);
+
+ vcvtsq2sd(src, dest, dest);
+ }
+
+ void convertUInt32ToFloat32(Register src, FloatRegister dest) {
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(dest);
+
+ vcvtsq2ss(src, dest, dest);
+ }
+
+ inline void incrementInt32Value(const Address& addr);
+
+ inline void ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure);
+
+ public:
+ void handleFailureWithHandlerTail(void* handler);
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+};
+
+typedef MacroAssemblerX64 MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_MacroAssembler_x64_h */
diff --git a/js/src/jit/x64/SharedIC-x64.cpp b/js/src/jit/x64/SharedIC-x64.cpp
new file mode 100644
index 000000000..9e5898c3d
--- /dev/null
+++ b/js/src/jit/x64/SharedIC-x64.cpp
@@ -0,0 +1,234 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineIC.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICBinaryArith_Int32
+
+bool
+ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // The scratch register is only used in the case of JSOP_URSH.
+ mozilla::Maybe<ScratchRegisterScope> scratch;
+
+ Label revertRegister, maybeNegZero;
+ switch(op_) {
+ case JSOP_ADD:
+ masm.unboxInt32(R0, ExtractTemp0);
+ // Just jump to failure on overflow. R0 and R1 are preserved, so we can just jump to
+ // the next stub.
+ masm.addl(R1.valueReg(), ExtractTemp0);
+ masm.j(Assembler::Overflow, &failure);
+
+ // Box the result
+ masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
+ break;
+ case JSOP_SUB:
+ masm.unboxInt32(R0, ExtractTemp0);
+ masm.subl(R1.valueReg(), ExtractTemp0);
+ masm.j(Assembler::Overflow, &failure);
+ masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
+ break;
+ case JSOP_MUL:
+ masm.unboxInt32(R0, ExtractTemp0);
+ masm.imull(R1.valueReg(), ExtractTemp0);
+ masm.j(Assembler::Overflow, &failure);
+
+ masm.branchTest32(Assembler::Zero, ExtractTemp0, ExtractTemp0, &maybeNegZero);
+
+ masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
+ break;
+ case JSOP_DIV:
+ {
+ MOZ_ASSERT(R2.scratchReg() == rax);
+ MOZ_ASSERT(R0.valueReg() != rdx);
+ MOZ_ASSERT(R1.valueReg() != rdx);
+ masm.unboxInt32(R0, eax);
+ masm.unboxInt32(R1, ExtractTemp0);
+
+ // Prevent division by 0.
+ masm.branchTest32(Assembler::Zero, ExtractTemp0, ExtractTemp0, &failure);
+
+ // Prevent negative 0 and -2147483648 / -1.
+ masm.branch32(Assembler::Equal, eax, Imm32(INT32_MIN), &failure);
+
+ Label notZero;
+ masm.branch32(Assembler::NotEqual, eax, Imm32(0), &notZero);
+ masm.branchTest32(Assembler::Signed, ExtractTemp0, ExtractTemp0, &failure);
+ masm.bind(&notZero);
+
+ // Sign extend eax into edx to make (edx:eax), since idiv is 64-bit.
+ masm.cdq();
+ masm.idiv(ExtractTemp0);
+
+ // A remainder implies a double result.
+ masm.branchTest32(Assembler::NonZero, edx, edx, &failure);
+
+ masm.boxValue(JSVAL_TYPE_INT32, eax, R0.valueReg());
+ break;
+ }
+ case JSOP_MOD:
+ {
+ MOZ_ASSERT(R2.scratchReg() == rax);
+ MOZ_ASSERT(R0.valueReg() != rdx);
+ MOZ_ASSERT(R1.valueReg() != rdx);
+ masm.unboxInt32(R0, eax);
+ masm.unboxInt32(R1, ExtractTemp0);
+
+ // x % 0 always results in NaN.
+ masm.branchTest32(Assembler::Zero, ExtractTemp0, ExtractTemp0, &failure);
+
+ // Prevent negative 0 and -2147483648 % -1.
+ masm.branchTest32(Assembler::Zero, eax, Imm32(0x7fffffff), &failure);
+
+ // Sign extend eax into edx to make (edx:eax), since idiv is 64-bit.
+ masm.cdq();
+ masm.idiv(ExtractTemp0);
+
+ // Fail when we would need a negative remainder.
+ Label done;
+ masm.branchTest32(Assembler::NonZero, edx, edx, &done);
+ masm.orl(ExtractTemp0, eax);
+ masm.branchTest32(Assembler::Signed, eax, eax, &failure);
+
+ masm.bind(&done);
+ masm.boxValue(JSVAL_TYPE_INT32, edx, R0.valueReg());
+ break;
+ }
+ case JSOP_BITOR:
+ // We can overide R0, because the instruction is unfailable.
+ // Because the tag bits are the same, we don't need to retag.
+ masm.orq(R1.valueReg(), R0.valueReg());
+ break;
+ case JSOP_BITXOR:
+ masm.xorl(R1.valueReg(), R0.valueReg());
+ masm.tagValue(JSVAL_TYPE_INT32, R0.valueReg(), R0);
+ break;
+ case JSOP_BITAND:
+ masm.andq(R1.valueReg(), R0.valueReg());
+ break;
+ case JSOP_LSH:
+ masm.unboxInt32(R0, ExtractTemp0);
+ masm.unboxInt32(R1, ecx); // Unboxing R1 to ecx, clobbers R0.
+ masm.shll_cl(ExtractTemp0);
+ masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
+ break;
+ case JSOP_RSH:
+ masm.unboxInt32(R0, ExtractTemp0);
+ masm.unboxInt32(R1, ecx);
+ masm.sarl_cl(ExtractTemp0);
+ masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
+ break;
+ case JSOP_URSH:
+ if (!allowDouble_) {
+ scratch.emplace(masm);
+ masm.movq(R0.valueReg(), *scratch);
+ }
+
+ masm.unboxInt32(R0, ExtractTemp0);
+ masm.unboxInt32(R1, ecx); // This clobbers R0
+
+ masm.shrl_cl(ExtractTemp0);
+ masm.test32(ExtractTemp0, ExtractTemp0);
+ if (allowDouble_) {
+ Label toUint;
+ masm.j(Assembler::Signed, &toUint);
+
+ // Box and return.
+ masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
+ EmitReturnFromIC(masm);
+
+ masm.bind(&toUint);
+ ScratchDoubleScope scratchDouble(masm);
+ masm.convertUInt32ToDouble(ExtractTemp0, scratchDouble);
+ masm.boxDouble(scratchDouble, R0);
+ } else {
+ masm.j(Assembler::Signed, &revertRegister);
+ masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unhandled op in BinaryArith_Int32");
+ }
+
+ // Return from stub.
+ EmitReturnFromIC(masm);
+
+ if (op_ == JSOP_MUL) {
+ masm.bind(&maybeNegZero);
+
+ // Result is -0 if exactly one of lhs or rhs is negative.
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.movl(R0.valueReg(), scratch);
+ masm.orl(R1.valueReg(), scratch);
+ masm.j(Assembler::Signed, &failure);
+ }
+
+ // Result is +0.
+ masm.moveValue(Int32Value(0), R0);
+ EmitReturnFromIC(masm);
+ }
+
+ // Revert the content of R0 in the fallible >>> case.
+ if (op_ == JSOP_URSH && !allowDouble_) {
+ // Scope continuation from JSOP_URSH case above.
+ masm.bind(&revertRegister);
+ // Restore tag and payload.
+ masm.movq(*scratch, R0.valueReg());
+ // Fall through to failure.
+ }
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ return true;
+}
+
+bool
+ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+
+ switch (op) {
+ case JSOP_BITNOT:
+ masm.notl(R0.valueReg());
+ break;
+ case JSOP_NEG:
+ // Guard against 0 and MIN_INT, both result in a double.
+ masm.branchTest32(Assembler::Zero, R0.valueReg(), Imm32(0x7fffffff), &failure);
+ masm.negl(R0.valueReg());
+ break;
+ default:
+ MOZ_CRASH("Unexpected op");
+ }
+
+ masm.tagValue(JSVAL_TYPE_INT32, R0.valueReg(), R0);
+
+ EmitReturnFromIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/x64/SharedICHelpers-x64.h b/js/src/jit/x64/SharedICHelpers-x64.h
new file mode 100644
index 000000000..b59d05ddc
--- /dev/null
+++ b/js/src/jit/x64/SharedICHelpers-x64.h
@@ -0,0 +1,352 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_SharedICHelpers_x64_h
+#define jit_x64_SharedICHelpers_x64_h
+
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineIC.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+
+namespace js {
+namespace jit {
+
+// Distance from Stack top to the top Value inside an IC stub (this is the return address).
+static const size_t ICStackValueOffset = sizeof(void*);
+
+inline void
+EmitRestoreTailCallReg(MacroAssembler& masm)
+{
+ masm.Pop(ICTailCallReg);
+}
+
+inline void
+EmitRepushTailCallReg(MacroAssembler& masm)
+{
+ masm.Push(ICTailCallReg);
+}
+
+inline void
+EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm)
+{
+ // Move ICEntry offset into ICStubReg
+ CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
+ *patchOffset = offset;
+
+ // Load stub pointer into ICStubReg
+ masm.loadPtr(Address(ICStubReg, (int32_t) ICEntry::offsetOfFirstStub()),
+ ICStubReg);
+
+ // Call the stubcode.
+ masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
+}
+
+inline void
+EmitEnterTypeMonitorIC(MacroAssembler& masm,
+ size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub())
+{
+ // This is expected to be called from within an IC, when ICStubReg
+ // is properly initialized to point to the stub.
+ masm.loadPtr(Address(ICStubReg, (int32_t) monitorStubOffset), ICStubReg);
+
+ // Jump to the stubcode.
+ masm.jmp(Operand(ICStubReg, (int32_t) ICStub::offsetOfStubCode()));
+}
+
+inline void
+EmitReturnFromIC(MacroAssembler& masm)
+{
+ masm.ret();
+}
+
+inline void
+EmitChangeICReturnAddress(MacroAssembler& masm, Register reg)
+{
+ masm.storePtr(reg, Address(StackPointer, 0));
+}
+
+inline void
+EmitBaselineTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t argSize)
+{
+ ScratchRegisterScope scratch(masm);
+
+ // We an assume during this that R0 and R1 have been pushed.
+ masm.movq(BaselineFrameReg, scratch);
+ masm.addq(Imm32(BaselineFrame::FramePointerOffset), scratch);
+ masm.subq(BaselineStackReg, scratch);
+
+ // Store frame size without VMFunction arguments for GC marking.
+ masm.movq(scratch, rdx);
+ masm.subq(Imm32(argSize), rdx);
+ masm.store32(rdx, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+
+ // Push frame descriptor and perform the tail call.
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, ExitFrameLayout::Size());
+ masm.push(scratch);
+ masm.push(ICTailCallReg);
+ masm.jmp(target);
+}
+
+inline void
+EmitIonTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t stackSize)
+{
+ // For tail calls, find the already pushed JitFrame_IonJS signifying the
+ // end of the Ion frame. Retrieve the length of the frame and repush
+ // JitFrame_IonJS with the extra stacksize, rendering the original
+ // JitFrame_IonJS obsolete.
+
+ ScratchRegisterScope scratch(masm);
+
+ masm.loadPtr(Address(esp, stackSize), scratch);
+ masm.shrq(Imm32(FRAMESIZE_SHIFT), scratch);
+ masm.addq(Imm32(stackSize + JitStubFrameLayout::Size() - sizeof(intptr_t)), scratch);
+
+ // Push frame descriptor and perform the tail call.
+ masm.makeFrameDescriptor(scratch, JitFrame_IonJS, ExitFrameLayout::Size());
+ masm.push(scratch);
+ masm.push(ICTailCallReg);
+ masm.jmp(target);
+}
+
+inline void
+EmitBaselineCreateStubFrameDescriptor(MacroAssembler& masm, Register reg, uint32_t headerSize)
+{
+ // Compute stub frame size. We have to add two pointers: the stub reg and previous
+ // frame pointer pushed by EmitEnterStubFrame.
+ masm.movq(BaselineFrameReg, reg);
+ masm.addq(Imm32(sizeof(void*) * 2), reg);
+ masm.subq(BaselineStackReg, reg);
+
+ masm.makeFrameDescriptor(reg, JitFrame_BaselineStub, headerSize);
+}
+
+inline void
+EmitBaselineCallVM(JitCode* target, MacroAssembler& masm)
+{
+ ScratchRegisterScope scratch(masm);
+ EmitBaselineCreateStubFrameDescriptor(masm, scratch, ExitFrameLayout::Size());
+ masm.push(scratch);
+ masm.call(target);
+}
+
+inline void
+EmitIonCallVM(JitCode* target, size_t stackSlots, MacroAssembler& masm)
+{
+ // Stubs often use the return address. Which is actually accounted by the
+ // caller of the stub. Though in the stubcode we fake that is part of the
+ // stub. In order to make it possible to pop it. As a result we have to
+ // fix it here, by subtracting it. Else it would be counted twice.
+ uint32_t framePushed = masm.framePushed() - sizeof(void*);
+
+ uint32_t descriptor = MakeFrameDescriptor(framePushed, JitFrame_IonStub,
+ ExitFrameLayout::Size());
+ masm.Push(Imm32(descriptor));
+ masm.call(target);
+
+ // Remove rest of the frame left on the stack. We remove the return address
+ // which is implicitly poped when returning.
+ size_t framePop = sizeof(ExitFrameLayout) - sizeof(void*);
+
+ // Pop arguments from framePushed.
+ masm.implicitPop(stackSlots * sizeof(void*) + framePop);
+}
+
+// Size of vales pushed by EmitEnterStubFrame.
+static const uint32_t STUB_FRAME_SIZE = 4 * sizeof(void*);
+static const uint32_t STUB_FRAME_SAVED_STUB_OFFSET = sizeof(void*);
+
+inline void
+EmitBaselineEnterStubFrame(MacroAssembler& masm, Register)
+{
+ EmitRestoreTailCallReg(masm);
+
+ ScratchRegisterScope scratch(masm);
+
+ // Compute frame size.
+ masm.movq(BaselineFrameReg, scratch);
+ masm.addq(Imm32(BaselineFrame::FramePointerOffset), scratch);
+ masm.subq(BaselineStackReg, scratch);
+
+ masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+
+ // Note: when making changes here, don't forget to update STUB_FRAME_SIZE
+ // if needed.
+
+ // Push frame descriptor and return address.
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, BaselineStubFrameLayout::Size());
+ masm.Push(scratch);
+ masm.Push(ICTailCallReg);
+
+ // Save old frame pointer, stack pointer and stub reg.
+ masm.Push(ICStubReg);
+ masm.Push(BaselineFrameReg);
+ masm.mov(BaselineStackReg, BaselineFrameReg);
+}
+
+inline void
+EmitIonEnterStubFrame(MacroAssembler& masm, Register)
+{
+ masm.loadPtr(Address(masm.getStackPointer(), 0), ICTailCallReg);
+ masm.Push(ICStubReg);
+}
+
+inline void
+EmitBaselineLeaveStubFrame(MacroAssembler& masm, bool calledIntoIon = false)
+{
+ // Ion frames do not save and restore the frame pointer. If we called
+ // into Ion, we have to restore the stack pointer from the frame descriptor.
+ // If we performed a VM call, the descriptor has been popped already so
+ // in that case we use the frame pointer.
+ if (calledIntoIon) {
+ ScratchRegisterScope scratch(masm);
+ masm.Pop(scratch);
+ masm.shrq(Imm32(FRAMESIZE_SHIFT), scratch);
+ masm.addq(scratch, BaselineStackReg);
+ } else {
+ masm.mov(BaselineFrameReg, BaselineStackReg);
+ }
+
+ masm.Pop(BaselineFrameReg);
+ masm.Pop(ICStubReg);
+
+ // Pop return address.
+ masm.Pop(ICTailCallReg);
+
+ // Overwrite frame descriptor with return address, so that the stack matches
+ // the state before entering the stub frame.
+ masm.storePtr(ICTailCallReg, Address(BaselineStackReg, 0));
+}
+
+inline void
+EmitIonLeaveStubFrame(MacroAssembler& masm)
+{
+ masm.Pop(ICStubReg);
+}
+
+inline void
+EmitStowICValues(MacroAssembler& masm, int values)
+{
+ MOZ_ASSERT(values >= 0 && values <= 2);
+ switch(values) {
+ case 1:
+ // Stow R0
+ masm.pop(ICTailCallReg);
+ masm.Push(R0);
+ masm.push(ICTailCallReg);
+ break;
+ case 2:
+ // Stow R0 and R1
+ masm.pop(ICTailCallReg);
+ masm.Push(R0);
+ masm.Push(R1);
+ masm.push(ICTailCallReg);
+ break;
+ }
+}
+
+inline void
+EmitUnstowICValues(MacroAssembler& masm, int values, bool discard = false)
+{
+ MOZ_ASSERT(values >= 0 && values <= 2);
+ switch(values) {
+ case 1:
+ // Unstow R0
+ masm.pop(ICTailCallReg);
+ if (discard)
+ masm.addPtr(Imm32(sizeof(Value)), BaselineStackReg);
+ else
+ masm.popValue(R0);
+ masm.push(ICTailCallReg);
+ break;
+ case 2:
+ // Unstow R0 and R1
+ masm.pop(ICTailCallReg);
+ if (discard) {
+ masm.addPtr(Imm32(sizeof(Value) * 2), BaselineStackReg);
+ } else {
+ masm.popValue(R1);
+ masm.popValue(R0);
+ }
+ masm.push(ICTailCallReg);
+ break;
+ }
+ masm.adjustFrame(-values * sizeof(Value));
+}
+
+inline void
+EmitCallTypeUpdateIC(MacroAssembler& masm, JitCode* code, uint32_t objectOffset)
+{
+ // R0 contains the value that needs to be typechecked.
+ // The object we're updating is a boxed Value on the stack, at offset
+ // objectOffset from stack top, excluding the return address.
+
+ // Save the current ICStubReg to stack
+ masm.push(ICStubReg);
+
+ // This is expected to be called from within an IC, when ICStubReg
+ // is properly initialized to point to the stub.
+ masm.loadPtr(Address(ICStubReg, (int32_t) ICUpdatedStub::offsetOfFirstUpdateStub()),
+ ICStubReg);
+
+ // Call the stubcode.
+ masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
+
+ // Restore the old stub reg.
+ masm.pop(ICStubReg);
+
+ // The update IC will store 0 or 1 in R1.scratchReg() reflecting if the
+ // value in R0 type-checked properly or not.
+ Label success;
+ masm.cmp32(R1.scratchReg(), Imm32(1));
+ masm.j(Assembler::Equal, &success);
+
+ // If the IC failed, then call the update fallback function.
+ EmitBaselineEnterStubFrame(masm, R1.scratchReg());
+
+ masm.loadValue(Address(BaselineStackReg, STUB_FRAME_SIZE + objectOffset), R1);
+
+ masm.Push(R0);
+ masm.Push(R1);
+ masm.Push(ICStubReg);
+
+ // Load previous frame pointer, push BaselineFrame*.
+ masm.loadPtr(Address(BaselineFrameReg, 0), R0.scratchReg());
+ masm.pushBaselineFramePtr(R0.scratchReg(), R0.scratchReg());
+
+ EmitBaselineCallVM(code, masm);
+ EmitBaselineLeaveStubFrame(masm);
+
+ // Success at end.
+ masm.bind(&success);
+}
+
+template <typename AddrType>
+inline void
+EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type)
+{
+ masm.patchableCallPreBarrier(addr, type);
+}
+
+inline void
+EmitStubGuardFailure(MacroAssembler& masm)
+{
+ // NOTE: This routine assumes that the stub guard code left the stack in the
+ // same state it was in when it was entered.
+
+ // BaselineStubEntry points to the current stub.
+
+ // Load next stub into ICStubReg
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfNext()), ICStubReg);
+
+ // Return address is already loaded, just jump to the next stubcode.
+ masm.jmp(Operand(ICStubReg, ICStub::offsetOfStubCode()));
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_SharedICHelpers_x64_h */
diff --git a/js/src/jit/x64/SharedICRegisters-x64.h b/js/src/jit/x64/SharedICRegisters-x64.h
new file mode 100644
index 000000000..75f62ccb1
--- /dev/null
+++ b/js/src/jit/x64/SharedICRegisters-x64.h
@@ -0,0 +1,35 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_SharedICRegisters_x64_h
+#define jit_x64_SharedICRegisters_x64_h
+
+#include "jit/MacroAssembler.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register BaselineFrameReg = rbp;
+static constexpr Register BaselineStackReg = rsp;
+
+static constexpr ValueOperand R0(rcx);
+static constexpr ValueOperand R1(rbx);
+static constexpr ValueOperand R2(rax);
+
+static constexpr Register ICTailCallReg = rsi;
+static constexpr Register ICStubReg = rdi;
+
+static constexpr Register ExtractTemp0 = r14;
+static constexpr Register ExtractTemp1 = r15;
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = xmm0;
+static constexpr FloatRegister FloatReg1 = xmm1;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_SharedICRegisters_x64_h */
diff --git a/js/src/jit/x64/Trampoline-x64.cpp b/js/src/jit/x64/Trampoline-x64.cpp
new file mode 100644
index 000000000..aebacdd1c
--- /dev/null
+++ b/js/src/jit/x64/Trampoline-x64.cpp
@@ -0,0 +1,1303 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Bailouts.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/Linker.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/VMFunctions.h"
+#include "jit/x64/SharedICHelpers-x64.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::IsPowerOfTwo;
+
+// All registers to save and restore. This includes the stack pointer, since we
+// use the ability to reference register values on the stack by index.
+static const LiveRegisterSet AllRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::AllMask),
+ FloatRegisterSet(FloatRegisters::AllMask));
+
+// Generates a trampoline for calling Jit compiled code from a C++ function.
+// The trampoline use the EnterJitCode signature, with the standard x64 fastcall
+// calling convention.
+JitCode*
+JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
+{
+ MacroAssembler masm(cx);
+ masm.assertStackAlignment(ABIStackAlignment, -int32_t(sizeof(uintptr_t)) /* return address */);
+
+ const Register reg_code = IntArgReg0;
+ const Register reg_argc = IntArgReg1;
+ const Register reg_argv = IntArgReg2;
+ MOZ_ASSERT(OsrFrameReg == IntArgReg3);
+
+#if defined(_WIN64)
+ const Address token = Address(rbp, 16 + ShadowStackSpace);
+ const Operand scopeChain = Operand(rbp, 24 + ShadowStackSpace);
+ const Operand numStackValuesAddr = Operand(rbp, 32 + ShadowStackSpace);
+ const Operand result = Operand(rbp, 40 + ShadowStackSpace);
+#else
+ const Register token = IntArgReg4;
+ const Register scopeChain = IntArgReg5;
+ const Operand numStackValuesAddr = Operand(rbp, 16 + ShadowStackSpace);
+ const Operand result = Operand(rbp, 24 + ShadowStackSpace);
+#endif
+
+ // Save old stack frame pointer, set new stack frame pointer.
+ masm.push(rbp);
+ masm.mov(rsp, rbp);
+
+ // Save non-volatile registers. These must be saved by the trampoline, rather
+ // than by the JIT'd code, because they are scanned by the conservative scanner.
+ masm.push(rbx);
+ masm.push(r12);
+ masm.push(r13);
+ masm.push(r14);
+ masm.push(r15);
+#if defined(_WIN64)
+ masm.push(rdi);
+ masm.push(rsi);
+
+ // 16-byte aligment for vmovdqa
+ masm.subq(Imm32(16 * 10 + 8), rsp);
+
+ masm.vmovdqa(xmm6, Operand(rsp, 16 * 0));
+ masm.vmovdqa(xmm7, Operand(rsp, 16 * 1));
+ masm.vmovdqa(xmm8, Operand(rsp, 16 * 2));
+ masm.vmovdqa(xmm9, Operand(rsp, 16 * 3));
+ masm.vmovdqa(xmm10, Operand(rsp, 16 * 4));
+ masm.vmovdqa(xmm11, Operand(rsp, 16 * 5));
+ masm.vmovdqa(xmm12, Operand(rsp, 16 * 6));
+ masm.vmovdqa(xmm13, Operand(rsp, 16 * 7));
+ masm.vmovdqa(xmm14, Operand(rsp, 16 * 8));
+ masm.vmovdqa(xmm15, Operand(rsp, 16 * 9));
+#endif
+
+ // Save arguments passed in registers needed after function call.
+ masm.push(result);
+
+ // Remember stack depth without padding and arguments.
+ masm.mov(rsp, r14);
+
+ // Remember number of bytes occupied by argument vector
+ masm.mov(reg_argc, r13);
+
+ // if we are constructing, that also needs to include newTarget
+ {
+ Label noNewTarget;
+ masm.branchTest32(Assembler::Zero, token, Imm32(CalleeToken_FunctionConstructing),
+ &noNewTarget);
+
+ masm.addq(Imm32(1), r13);
+
+ masm.bind(&noNewTarget);
+ }
+
+ masm.shll(Imm32(3), r13); // r13 = argc * sizeof(Value)
+ static_assert(sizeof(Value) == 1 << 3, "Constant is baked in assembly code");
+
+ // Guarantee stack alignment of Jit frames.
+ //
+ // This code compensates for the offset created by the copy of the vector of
+ // arguments, such that the jit frame will be aligned once the return
+ // address is pushed on the stack.
+ //
+ // In the computation of the offset, we omit the size of the JitFrameLayout
+ // which is pushed on the stack, as the JitFrameLayout size is a multiple of
+ // the JitStackAlignment.
+ masm.mov(rsp, r12);
+ masm.subq(r13, r12);
+ static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+ masm.andl(Imm32(JitStackAlignment - 1), r12);
+ masm.subq(r12, rsp);
+
+ /***************************************************************
+ Loop over argv vector, push arguments onto stack in reverse order
+ ***************************************************************/
+
+ // r13 still stores the number of bytes in the argument vector.
+ masm.addq(reg_argv, r13); // r13 points above last argument or newTarget
+
+ // while r13 > rdx, push arguments.
+ {
+ Label header, footer;
+ masm.bind(&header);
+
+ masm.cmpPtr(r13, reg_argv);
+ masm.j(AssemblerX86Shared::BelowOrEqual, &footer);
+
+ masm.subq(Imm32(8), r13);
+ masm.push(Operand(r13, 0));
+ masm.jmp(&header);
+
+ masm.bind(&footer);
+ }
+
+ // Push the number of actual arguments. |result| is used to store the
+ // actual number of arguments without adding an extra argument to the enter
+ // JIT.
+ masm.movq(result, reg_argc);
+ masm.unboxInt32(Operand(reg_argc, 0), reg_argc);
+ masm.push(reg_argc);
+
+ // Push the callee token.
+ masm.push(token);
+
+ /*****************************************************************
+ Push the number of bytes we've pushed so far on the stack and call
+ *****************************************************************/
+ masm.subq(rsp, r14);
+
+ // Create a frame descriptor.
+ masm.makeFrameDescriptor(r14, JitFrame_Entry, JitFrameLayout::Size());
+ masm.push(r14);
+
+ CodeLabel returnLabel;
+ CodeLabel oomReturnLabel;
+ if (type == EnterJitBaseline) {
+ // Handle OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.takeUnchecked(OsrFrameReg);
+ regs.take(rbp);
+ regs.take(reg_code);
+
+ // Ensure that |scratch| does not end up being JSReturnOperand.
+ // Do takeUnchecked because on Win64/x64, reg_code (IntArgReg0) and JSReturnOperand are
+ // the same (rcx). See bug 849398.
+ regs.takeUnchecked(JSReturnOperand);
+ Register scratch = regs.takeAny();
+
+ Label notOsr;
+ masm.branchTestPtr(Assembler::Zero, OsrFrameReg, OsrFrameReg, &notOsr);
+
+ Register numStackValues = regs.takeAny();
+ masm.movq(numStackValuesAddr, numStackValues);
+
+ // Push return address
+ masm.mov(returnLabel.patchAt(), scratch);
+ masm.push(scratch);
+
+ // Push previous frame pointer.
+ masm.push(rbp);
+
+ // Reserve frame.
+ Register framePtr = rbp;
+ masm.subPtr(Imm32(BaselineFrame::Size()), rsp);
+ masm.mov(rsp, framePtr);
+
+#ifdef XP_WIN
+ // Can't push large frames blindly on windows. Touch frame memory incrementally.
+ masm.mov(numStackValues, scratch);
+ masm.lshiftPtr(Imm32(3), scratch);
+ masm.subPtr(scratch, framePtr);
+ {
+ masm.movePtr(rsp, scratch);
+ masm.subPtr(Imm32(WINDOWS_BIG_FRAME_TOUCH_INCREMENT), scratch);
+
+ Label touchFrameLoop;
+ Label touchFrameLoopEnd;
+ masm.bind(&touchFrameLoop);
+ masm.branchPtr(Assembler::Below, scratch, framePtr, &touchFrameLoopEnd);
+ masm.store32(Imm32(0), Address(scratch, 0));
+ masm.subPtr(Imm32(WINDOWS_BIG_FRAME_TOUCH_INCREMENT), scratch);
+ masm.jump(&touchFrameLoop);
+ masm.bind(&touchFrameLoopEnd);
+ }
+ masm.mov(rsp, framePtr);
+#endif
+
+ // Reserve space for locals and stack values.
+ Register valuesSize = regs.takeAny();
+ masm.mov(numStackValues, valuesSize);
+ masm.shll(Imm32(3), valuesSize);
+ masm.subPtr(valuesSize, rsp);
+
+ // Enter exit frame.
+ masm.addPtr(Imm32(BaselineFrame::Size() + BaselineFrame::FramePointerOffset), valuesSize);
+ masm.makeFrameDescriptor(valuesSize, JitFrame_BaselineJS, ExitFrameLayout::Size());
+ masm.push(valuesSize);
+ masm.push(Imm32(0)); // Fake return address.
+ // No GC things to mark, push a bare token.
+ masm.enterFakeExitFrame(ExitFrameLayoutBareToken);
+
+ regs.add(valuesSize);
+
+ masm.push(framePtr);
+ masm.push(reg_code);
+
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(framePtr); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, jit::InitBaselineFrameForOsr));
+
+ masm.pop(reg_code);
+ masm.pop(framePtr);
+
+ MOZ_ASSERT(reg_code != ReturnReg);
+
+ Label error;
+ masm.addPtr(Imm32(ExitFrameLayout::SizeWithFooter()), rsp);
+ masm.addPtr(Imm32(BaselineFrame::Size()), framePtr);
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ Register realFramePtr = numStackValues;
+ AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.lea(Operand(framePtr, sizeof(void*)), realFramePtr);
+ masm.profilerEnterFrame(realFramePtr, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(reg_code);
+
+ // OOM: load error value, discard return address and previous frame
+ // pointer and return.
+ masm.bind(&error);
+ masm.mov(framePtr, rsp);
+ masm.addPtr(Imm32(2 * sizeof(uintptr_t)), rsp);
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.mov(oomReturnLabel.patchAt(), scratch);
+ masm.jump(scratch);
+
+ masm.bind(&notOsr);
+ masm.movq(scopeChain, R1.scratchReg());
+ }
+
+ // The call will push the return address on the stack, thus we check that
+ // the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, sizeof(uintptr_t));
+
+ // Call function.
+ masm.callJitNoProfiler(reg_code);
+
+ if (type == EnterJitBaseline) {
+ // Baseline OSR will return here.
+ masm.use(returnLabel.target());
+ masm.addCodeLabel(returnLabel);
+ masm.use(oomReturnLabel.target());
+ masm.addCodeLabel(oomReturnLabel);
+ }
+
+ // Pop arguments and padding from stack.
+ masm.pop(r14); // Pop and decode descriptor.
+ masm.shrq(Imm32(FRAMESIZE_SHIFT), r14);
+ masm.addq(r14, rsp); // Remove arguments.
+
+ /*****************************************************************
+ Place return value where it belongs, pop all saved registers
+ *****************************************************************/
+ masm.pop(r12); // vp
+ masm.storeValue(JSReturnOperand, Operand(r12, 0));
+
+ // Restore non-volatile registers.
+#if defined(_WIN64)
+ masm.vmovdqa(Operand(rsp, 16 * 0), xmm6);
+ masm.vmovdqa(Operand(rsp, 16 * 1), xmm7);
+ masm.vmovdqa(Operand(rsp, 16 * 2), xmm8);
+ masm.vmovdqa(Operand(rsp, 16 * 3), xmm9);
+ masm.vmovdqa(Operand(rsp, 16 * 4), xmm10);
+ masm.vmovdqa(Operand(rsp, 16 * 5), xmm11);
+ masm.vmovdqa(Operand(rsp, 16 * 6), xmm12);
+ masm.vmovdqa(Operand(rsp, 16 * 7), xmm13);
+ masm.vmovdqa(Operand(rsp, 16 * 8), xmm14);
+ masm.vmovdqa(Operand(rsp, 16 * 9), xmm15);
+
+ masm.addq(Imm32(16 * 10 + 8), rsp);
+
+ masm.pop(rsi);
+ masm.pop(rdi);
+#endif
+ masm.pop(r15);
+ masm.pop(r14);
+ masm.pop(r13);
+ masm.pop(r12);
+ masm.pop(rbx);
+
+ // Restore frame pointer and return.
+ masm.pop(rbp);
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "EnterJIT");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateInvalidator(JSContext* cx)
+{
+ AutoJitContextAlloc ajca(cx);
+ MacroAssembler masm(cx);
+
+ // See explanatory comment in x86's JitRuntime::generateInvalidator.
+
+ masm.addq(Imm32(sizeof(uintptr_t)), rsp);
+
+ // Push registers such that we can access them from [base + code].
+ masm.PushRegsInMask(AllRegs);
+
+ masm.movq(rsp, rax); // Argument to jit::InvalidationBailout.
+
+ // Make space for InvalidationBailout's frameSize outparam.
+ masm.reserveStack(sizeof(size_t));
+ masm.movq(rsp, rbx);
+
+ // Make space for InvalidationBailout's bailoutInfo outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.movq(rsp, r9);
+
+ masm.setupUnalignedABICall(rdx);
+ masm.passABIArg(rax);
+ masm.passABIArg(rbx);
+ masm.passABIArg(r9);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, InvalidationBailout));
+
+ masm.pop(r9); // Get the bailoutInfo outparam.
+ masm.pop(rbx); // Get the frameSize outparam.
+
+ // Pop the machine state and the dead frame.
+ masm.lea(Operand(rsp, rbx, TimesOne, sizeof(InvalidationBailoutStack)), rsp);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r9.
+ JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.jmp(bailoutTail);
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "Invalidator");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut)
+{
+ // Do not erase the frame pointer in this function.
+
+ MacroAssembler masm(cx);
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- rsp
+ // '--- #r8 ---'
+
+ // ArgumentsRectifierReg contains the |nargs| pushed onto the current frame.
+ // Including |this|, there are (|nargs| + 1) arguments to copy.
+ MOZ_ASSERT(ArgumentsRectifierReg == r8);
+
+ // Add |this|, in the counter of known arguments.
+ masm.addl(Imm32(1), r8);
+
+ // Load |nformals| into %rcx.
+ masm.loadPtr(Address(rsp, RectifierFrameLayout::offsetOfCalleeToken()), rax);
+ masm.mov(rax, rcx);
+ masm.andq(Imm32(uint32_t(CalleeTokenMask)), rcx);
+ masm.movzwl(Operand(rcx, JSFunction::offsetOfNargs()), rcx);
+
+ // Stash another copy in r11, since we are going to do destructive operations
+ // on rcx
+ masm.mov(rcx, r11);
+
+ static_assert(CalleeToken_FunctionConstructing == 1,
+ "Ensure that we can use the constructing bit to count the value");
+ masm.mov(rax, rdx);
+ masm.andq(Imm32(uint32_t(CalleeToken_FunctionConstructing)), rdx);
+
+ // Including |this|, and |new.target|, there are (|nformals| + 1 + isConstructing)
+ // arguments to push to the stack. Then we push a JitFrameLayout. We
+ // compute the padding expressed in the number of extra |undefined| values
+ // to push on the stack.
+ static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+ static_assert(JitStackAlignment % sizeof(Value) == 0,
+ "Ensure that we can pad the stack by pushing extra UndefinedValue");
+ static_assert(IsPowerOfTwo(JitStackValueAlignment),
+ "must have power of two for masm.andl to do its job");
+
+ masm.addl(Imm32(JitStackValueAlignment - 1 /* for padding */ + 1 /* for |this| */), rcx);
+ masm.addl(rdx, rcx);
+ masm.andl(Imm32(~(JitStackValueAlignment - 1)), rcx);
+
+ // Load the number of |undefined|s to push into %rcx.
+ masm.subq(r8, rcx);
+
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- rsp <- r9
+ // '------ #r8 -------'
+ //
+ // Rectifier frame:
+ // [undef] [undef] [undef] [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]]
+ // '------- #rcx --------' '------ #r8 -------'
+
+ // Copy the number of actual arguments
+ masm.loadPtr(Address(rsp, RectifierFrameLayout::offsetOfNumActualArgs()), rdx);
+
+ masm.moveValue(UndefinedValue(), r10);
+
+ masm.movq(rsp, r9); // Save %rsp.
+
+ // Push undefined. (including the padding)
+ {
+ Label undefLoopTop;
+ masm.bind(&undefLoopTop);
+
+ masm.push(r10);
+ masm.subl(Imm32(1), rcx);
+ masm.j(Assembler::NonZero, &undefLoopTop);
+ }
+
+ // Get the topmost argument.
+ static_assert(sizeof(Value) == 8, "TimesEight is used to skip arguments");
+
+ // | - sizeof(Value)| is used to put rcx such that we can read the last
+ // argument, and not the value which is after.
+ BaseIndex b = BaseIndex(r9, r8, TimesEight, sizeof(RectifierFrameLayout) - sizeof(Value));
+ masm.lea(Operand(b), rcx);
+
+ // Copy & Push arguments, |nargs| + 1 times (to include |this|).
+ {
+ Label copyLoopTop;
+
+ masm.bind(&copyLoopTop);
+ masm.push(Operand(rcx, 0x0));
+ masm.subq(Imm32(sizeof(Value)), rcx);
+ masm.subl(Imm32(1), r8);
+ masm.j(Assembler::NonZero, &copyLoopTop);
+ }
+
+ // if constructing, copy newTarget
+ {
+ Label notConstructing;
+
+ masm.branchTest32(Assembler::Zero, rax, Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ // thisFrame[numFormals] = prevFrame[argc]
+ ValueOperand newTarget(r10);
+
+ // +1 for |this|. We want vp[argc], so don't subtract 1
+ BaseIndex newTargetSrc(r9, rdx, TimesEight, sizeof(RectifierFrameLayout) + sizeof(Value));
+ masm.loadValue(newTargetSrc, newTarget);
+
+ // Again, 1 for |this|
+ BaseIndex newTargetDest(rsp, r11, TimesEight, sizeof(Value));
+ masm.storeValue(newTarget, newTargetDest);
+
+ masm.bind(&notConstructing);
+ }
+
+
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- r9
+ //
+ //
+ // Rectifier frame:
+ // [undef] [undef] [undef] [arg2] [arg1] [this] <- rsp [[argc] [callee] [descr] [raddr]]
+ //
+
+ // Construct descriptor.
+ masm.subq(rsp, r9);
+ masm.makeFrameDescriptor(r9, JitFrame_Rectifier, JitFrameLayout::Size());
+
+ // Construct JitFrameLayout.
+ masm.push(rdx); // numActualArgs
+ masm.push(rax); // callee token
+ masm.push(r9); // descriptor
+
+ // Call the target function.
+ // Note that this code assumes the function is JITted.
+ masm.andq(Imm32(uint32_t(CalleeTokenMask)), rax);
+ masm.loadPtr(Address(rax, JSFunction::offsetOfNativeOrScript()), rax);
+ masm.loadBaselineOrIonRaw(rax, rax, nullptr);
+ uint32_t returnOffset = masm.callJitNoProfiler(rax);
+
+ // Remove the rectifier frame.
+ masm.pop(r9); // r9 <- descriptor with FrameType.
+ masm.shrq(Imm32(FRAMESIZE_SHIFT), r9);
+ masm.pop(r11); // Discard calleeToken.
+ masm.pop(r11); // Discard numActualArgs.
+ masm.addq(r9, rsp); // Discard pushed arguments.
+
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ArgumentsRectifier");
+#endif
+
+ if (returnAddrOut)
+ *returnAddrOut = (void*)(code->raw() + returnOffset);
+ return code;
+}
+
+static void
+PushBailoutFrame(MacroAssembler& masm, Register spArg)
+{
+ // Push registers such that we can access them from [base + code].
+ if (JitSupportsSimd()) {
+ masm.PushRegsInMask(AllRegs);
+ } else {
+ // When SIMD isn't supported, PushRegsInMask reduces the set of float
+ // registers to be double-sized, while the RegisterDump expects each of
+ // the float registers to have the maximal possible size
+ // (Simd128DataSize). To work around this, we just spill the double
+ // registers by hand here, using the register dump offset directly.
+ for (GeneralRegisterBackwardIterator iter(AllRegs.gprs()); iter.more(); ++iter)
+ masm.Push(*iter);
+
+ masm.reserveStack(sizeof(RegisterDump::FPUArray));
+ for (FloatRegisterBackwardIterator iter(AllRegs.fpus()); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ Address spillAddress(StackPointer, reg.getRegisterDumpOffsetInBytes());
+ masm.storeDouble(reg, spillAddress);
+ }
+ }
+
+ // Get the stack pointer into a register, pre-alignment.
+ masm.movq(rsp, spArg);
+}
+
+static void
+GenerateBailoutThunk(JSContext* cx, MacroAssembler& masm, uint32_t frameClass)
+{
+ PushBailoutFrame(masm, r8);
+
+ // Make space for Bailout's bailoutInfo outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.movq(rsp, r9);
+
+ // Call the bailout function.
+ masm.setupUnalignedABICall(rax);
+ masm.passABIArg(r8);
+ masm.passABIArg(r9);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, Bailout));
+
+ masm.pop(r9); // Get the bailoutInfo outparam.
+
+ // Stack is:
+ // [frame]
+ // snapshotOffset
+ // frameSize
+ // [bailoutFrame]
+ //
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ static const uint32_t BailoutDataSize = sizeof(RegisterDump);
+ masm.addq(Imm32(BailoutDataSize), rsp);
+ masm.pop(rcx);
+ masm.lea(Operand(rsp, rcx, TimesOne, sizeof(void*)), rsp);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r9.
+ JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.jmp(bailoutTail);
+}
+
+JitCode*
+JitRuntime::generateBailoutTable(JSContext* cx, uint32_t frameClass)
+{
+ MOZ_CRASH("x64 does not use bailout tables");
+}
+
+JitCode*
+JitRuntime::generateBailoutHandler(JSContext* cx)
+{
+ MacroAssembler masm;
+ GenerateBailoutThunk(cx, masm, NO_FRAME_SIZE_CLASS_ID);
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutHandler");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateVMWrapper(JSContext* cx, const VMFunction& f)
+{
+ MOZ_ASSERT(functionWrappers_);
+ MOZ_ASSERT(functionWrappers_->initialized());
+ VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
+ if (p)
+ return p->value();
+
+ // Generate a separated code for the wrapper.
+ MacroAssembler masm;
+
+ // Avoid conflicts with argument registers while discarding the result after
+ // the function call.
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ // Wrapper register set is a superset of Volatile register set.
+ JS_STATIC_ASSERT((Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0);
+
+ // The context is the first argument.
+ Register cxreg = IntArgReg0;
+ regs.take(cxreg);
+
+ // Stack is:
+ // ... frame ...
+ // +12 [args]
+ // +8 descriptor
+ // +0 returnAddress
+ //
+ // We're aligned to an exit frame, so link it up.
+ masm.enterExitFrame(&f);
+ masm.loadJSContext(cxreg);
+
+ // Save the current stack pointer as the base for copying arguments.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ argsBase = r10;
+ regs.take(argsBase);
+ masm.lea(Operand(rsp, ExitFrameLayout::SizeWithFooter()), argsBase);
+ }
+
+ // Reserve space for the outparameter.
+ Register outReg = InvalidReg;
+ switch (f.outParam) {
+ case Type_Value:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(Value));
+ masm.movq(esp, outReg);
+ break;
+
+ case Type_Handle:
+ outReg = regs.takeAny();
+ masm.PushEmptyRooted(f.outParamRootType);
+ masm.movq(esp, outReg);
+ break;
+
+ case Type_Int32:
+ case Type_Bool:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(int32_t));
+ masm.movq(esp, outReg);
+ break;
+
+ case Type_Double:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(double));
+ masm.movq(esp, outReg);
+ break;
+
+ case Type_Pointer:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(uintptr_t));
+ masm.movq(esp, outReg);
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ if (!generateTLEnterVM(cx, masm, f))
+ return nullptr;
+
+ masm.setupUnalignedABICall(regs.getAny());
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = 0;
+
+ // Copy arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ MoveOperand from;
+ switch (f.argProperties(explicitArg)) {
+ case VMFunction::WordByValue:
+ if (f.argPassedInFloatReg(explicitArg))
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
+ else
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunction::WordByRef:
+ masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunction::DoubleByValue:
+ case VMFunction::DoubleByRef:
+ MOZ_CRASH("NYI: x64 callVM should not be used with 128bits values.");
+ }
+ }
+
+ // Copy the implicit outparam, if any.
+ if (outReg != InvalidReg)
+ masm.passABIArg(outReg);
+
+ masm.callWithABI(f.wrapped);
+
+ if (!generateTLExitVM(cx, masm, f))
+ return nullptr;
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Object:
+ masm.branchTestPtr(Assembler::Zero, rax, rax, masm.failureLabel());
+ break;
+ case Type_Bool:
+ masm.testb(rax, rax);
+ masm.j(Assembler::Zero, masm.failureLabel());
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Value:
+ masm.loadValue(Address(esp, 0), JSReturnOperand);
+ masm.freeStack(sizeof(Value));
+ break;
+
+ case Type_Int32:
+ masm.load32(Address(esp, 0), ReturnReg);
+ masm.freeStack(sizeof(int32_t));
+ break;
+
+ case Type_Bool:
+ masm.load8ZeroExtend(Address(esp, 0), ReturnReg);
+ masm.freeStack(sizeof(int32_t));
+ break;
+
+ case Type_Double:
+ MOZ_ASSERT(cx->runtime()->jitSupportsFloatingPoint);
+ masm.loadDouble(Address(esp, 0), ReturnDoubleReg);
+ masm.freeStack(sizeof(double));
+ break;
+
+ case Type_Pointer:
+ masm.loadPtr(Address(esp, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+ masm.leaveExitFrame();
+ masm.retn(Imm32(sizeof(ExitFrameLayout) +
+ f.explicitStackSlots() * sizeof(void*) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ Linker linker(masm);
+ JitCode* wrapper = linker.newCode<NoGC>(cx, OTHER_CODE);
+ if (!wrapper)
+ return nullptr;
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(wrapper, "VMWrapper");
+#endif
+
+ // linker.newCode may trigger a GC and sweep functionWrappers_ so we have to
+ // use relookupOrAdd instead of add.
+ if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
+ return nullptr;
+
+ return wrapper;
+}
+
+JitCode*
+JitRuntime::generatePreBarrier(JSContext* cx, MIRType type)
+{
+ MacroAssembler masm;
+
+ LiveRegisterSet regs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+ masm.PushRegsInMask(regs);
+
+ MOZ_ASSERT(PreBarrierReg == rdx);
+ masm.mov(ImmPtr(cx->runtime()), rcx);
+
+ masm.setupUnalignedABICall(rax);
+ masm.passABIArg(rcx);
+ masm.passABIArg(rdx);
+ masm.callWithABI(IonMarkFunction(type));
+
+ masm.PopRegsInMask(regs);
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "PreBarrier");
+#endif
+
+ return code;
+}
+
+typedef bool (*HandleDebugTrapFn)(JSContext*, BaselineFrame*, uint8_t*, bool*);
+static const VMFunction HandleDebugTrapInfo =
+ FunctionInfo<HandleDebugTrapFn>(HandleDebugTrap, "HandleDebugTrap");
+
+JitCode*
+JitRuntime::generateDebugTrapHandler(JSContext* cx)
+{
+ MacroAssembler masm;
+#ifndef JS_USE_LINK_REGISTER
+ // The first value contains the return addres,
+ // which we pull into ICTailCallReg for tail calls.
+ masm.setFramePushed(sizeof(intptr_t));
+#endif
+
+ Register scratch1 = rax;
+ Register scratch2 = rcx;
+ Register scratch3 = rdx;
+
+ // Load the return address in scratch1.
+ masm.loadPtr(Address(rsp, 0), scratch1);
+
+ // Load BaselineFrame pointer in scratch2.
+ masm.mov(rbp, scratch2);
+ masm.subPtr(Imm32(BaselineFrame::Size()), scratch2);
+
+ // Enter a stub frame and call the HandleDebugTrap VM function. Ensure
+ // the stub frame has a nullptr ICStub pointer, since this pointer is marked
+ // during GC.
+ masm.movePtr(ImmPtr(nullptr), ICStubReg);
+ EmitBaselineEnterStubFrame(masm, scratch3);
+
+ JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(HandleDebugTrapInfo);
+ if (!code)
+ return nullptr;
+
+ masm.push(scratch1);
+ masm.push(scratch2);
+ EmitBaselineCallVM(code, masm);
+
+ EmitBaselineLeaveStubFrame(masm);
+
+ // If the stub returns |true|, we have to perform a forced return
+ // (return from the JS frame). If the stub returns |false|, just return
+ // from the trap stub so that execution continues at the current pc.
+ Label forcedReturn;
+ masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg, &forcedReturn);
+ masm.ret();
+
+ masm.bind(&forcedReturn);
+ masm.loadValue(Address(ebp, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ masm.mov(rbp, rsp);
+ masm.pop(rbp);
+
+ // Before returning, if profiling is turned on, make sure that lastProfilingFrame
+ // is set to the correct caller frame.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skipProfilingInstrumentation);
+ masm.profilerExitFrame();
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* codeDbg = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(codeDbg, "DebugTrapHandler");
+#endif
+
+ return codeDbg;
+}
+
+JitCode*
+JitRuntime::generateExceptionTailStub(JSContext* cx, void* handler)
+{
+ MacroAssembler masm;
+
+ masm.handleFailureWithHandlerTail(handler);
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ExceptionTailStub");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateBailoutTailStub(JSContext* cx)
+{
+ MacroAssembler masm;
+
+ masm.generateBailoutTail(rdx, r9);
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutTailStub");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
+{
+ MacroAssembler masm;
+
+ Register scratch1 = r8;
+ Register scratch2 = r9;
+ Register scratch3 = r10;
+ Register scratch4 = r11;
+
+ //
+ // The code generated below expects that the current stack pointer points
+ // to an Ion or Baseline frame, at the state it would be immediately
+ // before a ret(). Thus, after this stub's business is done, it executes
+ // a ret() and returns directly to the caller script, on behalf of the
+ // callee script that jumped to this code.
+ //
+ // Thus the expected stack is:
+ //
+ // StackPointer ----+
+ // v
+ // ..., ActualArgc, CalleeToken, Descriptor, ReturnAddr
+ // MEM-HI MEM-LOW
+ //
+ //
+ // The generated jitcode is responsible for overwriting the
+ // jitActivation->lastProfilingFrame field with a pointer to the previous
+ // Ion or Baseline jit-frame that was pushed before this one. It is also
+ // responsible for overwriting jitActivation->lastProfilingCallSite with
+ // the return address into that frame. The frame could either be an
+ // immediate "caller" frame, or it could be a frame in a previous
+ // JitActivation (if the current frame was entered from C++, and the C++
+ // was entered by some caller jit-frame further down the stack).
+ //
+ // So this jitcode is responsible for "walking up" the jit stack, finding
+ // the previous Ion or Baseline JS frame, and storing its address and the
+ // return address into the appropriate fields on the current jitActivation.
+ //
+ // There are a fixed number of different path types that can lead to the
+ // current frame, which is either a baseline or ion frame:
+ //
+ // <Baseline-Or-Ion>
+ // ^
+ // |
+ // ^--- Ion
+ // |
+ // ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Argument Rectifier
+ // | ^
+ // | |
+ // | ^--- Ion
+ // | |
+ // | ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Entry Frame (From C++)
+ //
+ Register actReg = scratch4;
+ AbsoluteAddress activationAddr(GetJitContext()->runtime->addressOfProfilingActivation());
+ masm.loadPtr(activationAddr, actReg);
+
+ Address lastProfilingFrame(actReg, JitActivation::offsetOfLastProfilingFrame());
+ Address lastProfilingCallSite(actReg, JitActivation::offsetOfLastProfilingCallSite());
+
+#ifdef DEBUG
+ // Ensure that frame we are exiting is current lastProfilingFrame
+ {
+ masm.loadPtr(lastProfilingFrame, scratch1);
+ Label checkOk;
+ masm.branchPtr(Assembler::Equal, scratch1, ImmWord(0), &checkOk);
+ masm.branchPtr(Assembler::Equal, StackPointer, scratch1, &checkOk);
+ masm.assumeUnreachable(
+ "Mismatch between stored lastProfilingFrame and current stack pointer.");
+ masm.bind(&checkOk);
+ }
+#endif
+
+ // Load the frame descriptor into |scratch1|, figure out what to do depending on its type.
+ masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfDescriptor()), scratch1);
+
+ // Going into the conditionals, we will have:
+ // FrameDescriptor.size in scratch1
+ // FrameDescriptor.type in scratch2
+ masm.movePtr(scratch1, scratch2);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch2);
+
+ // Handling of each case is dependent on FrameDescriptor.type
+ Label handle_IonJS;
+ Label handle_BaselineStub;
+ Label handle_Rectifier;
+ Label handle_IonAccessorIC;
+ Label handle_Entry;
+ Label end;
+
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonJS), &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineJS), &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineStub), &handle_BaselineStub);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Rectifier), &handle_Rectifier);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonAccessorIC), &handle_IonAccessorIC);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Entry), &handle_Entry);
+
+ masm.assumeUnreachable("Invalid caller frame type when exiting from Ion frame.");
+
+ //
+ // JitFrame_IonJS
+ //
+ // Stack layout:
+ // ...
+ // Ion-Descriptor
+ // Prev-FP ---> Ion-ReturnAddr
+ // ... previous frame data ... |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_IonJS);
+ {
+ // returning directly to an IonJS frame. Store return addr to frame
+ // in lastProfilingCallSite.
+ masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfReturnAddress()), scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ // Store return frame in lastProfilingFrame.
+ // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size();
+ masm.lea(Operand(StackPointer, scratch1, TimesOne, JitFrameLayout::Size()), scratch2);
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // JitFrame_BaselineStub
+ //
+ // Look past the stub and store the frame pointer to
+ // the baselineJS frame prior to it.
+ //
+ // Stack layout:
+ // ...
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-PrevFramePointer
+ // | ... BL-FrameData ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ // We take advantage of the fact that the stub frame saves the frame
+ // pointer pointing to the baseline frame, so a bunch of calculation can
+ // be avoided.
+ //
+ masm.bind(&handle_BaselineStub);
+ {
+ BaseIndex stubFrameReturnAddr(StackPointer, scratch1, TimesOne,
+ JitFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ BaseIndex stubFrameSavedFramePtr(StackPointer, scratch1, TimesOne,
+ JitFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch2);
+ masm.addPtr(Imm32(sizeof(void*)), scratch2); // Skip past BL-PrevFramePtr
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+
+ //
+ // JitFrame_Rectifier
+ //
+ // The rectifier frame can be preceded by either an IonJS or a
+ // BaselineStub frame.
+ //
+ // Stack layout if caller of rectifier was Ion:
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- Rect-Descriptor.Size
+ // < COMMON LAYOUT >
+ //
+ // Stack layout if caller of rectifier was Baseline:
+ //
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-SavedFramePointer
+ // | ... baseline frame data ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Rect-Descriptor.Size
+ // ... args to rectifier ... |
+ // < COMMON LAYOUT >
+ //
+ // Common stack layout:
+ //
+ // ActualArgc |
+ // CalleeToken |- IonRectitiferFrameLayout::Size()
+ // Rect-Descriptor |
+ // Rect-ReturnAddr |
+ // ... rectifier data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_Rectifier);
+ {
+ // scratch2 := StackPointer + Descriptor.size + JitFrameLayout::Size()
+ masm.lea(Operand(StackPointer, scratch1, TimesOne, JitFrameLayout::Size()), scratch2);
+ masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfDescriptor()), scratch3);
+ masm.movePtr(scratch3, scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch3);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch1);
+
+ // Now |scratch1| contains Rect-Descriptor.Size
+ // and |scratch2| points to Rectifier frame
+ // and |scratch3| contains Rect-Descriptor.Type
+
+ // Check for either Ion or BaselineStub frame.
+ Label handle_Rectifier_BaselineStub;
+ masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS),
+ &handle_Rectifier_BaselineStub);
+
+ // Handle Rectifier <- IonJS
+ // scratch3 := RectFrame[ReturnAddr]
+ masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfReturnAddress()), scratch3);
+ masm.storePtr(scratch3, lastProfilingCallSite);
+
+ // scratch3 := RectFrame + Rect-Descriptor.Size + RectifierFrameLayout::Size()
+ masm.lea(Operand(scratch2, scratch1, TimesOne, RectifierFrameLayout::Size()), scratch3);
+ masm.storePtr(scratch3, lastProfilingFrame);
+ masm.ret();
+
+ // Handle Rectifier <- BaselineStub <- BaselineJS
+ masm.bind(&handle_Rectifier_BaselineStub);
+#ifdef DEBUG
+ {
+ Label checkOk;
+ masm.branch32(Assembler::Equal, scratch3, Imm32(JitFrame_BaselineStub), &checkOk);
+ masm.assumeUnreachable("Unrecognized frame preceding baselineStub.");
+ masm.bind(&checkOk);
+ }
+#endif
+ BaseIndex stubFrameReturnAddr(scratch2, scratch1, TimesOne,
+ RectifierFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch3);
+ masm.storePtr(scratch3, lastProfilingCallSite);
+
+ BaseIndex stubFrameSavedFramePtr(scratch2, scratch1, TimesOne,
+ RectifierFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch3);
+ masm.addPtr(Imm32(sizeof(void*)), scratch3);
+ masm.storePtr(scratch3, lastProfilingFrame);
+ masm.ret();
+ }
+
+ // JitFrame_IonAccessorIC
+ //
+ // The caller is always an IonJS frame.
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- AccFrame-Descriptor.Size
+ // StubCode |
+ // AccFrame-Descriptor |- IonAccessorICFrameLayout::Size()
+ // AccFrame-ReturnAddr |
+ // ... accessor frame data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ masm.bind(&handle_IonAccessorIC);
+ {
+ // scratch2 := StackPointer + Descriptor.size + JitFrameLayout::Size()
+ masm.lea(Operand(StackPointer, scratch1, TimesOne, JitFrameLayout::Size()), scratch2);
+
+ // scratch3 := AccFrame-Descriptor.Size
+ masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfDescriptor()), scratch3);
+#ifdef DEBUG
+ // Assert previous frame is an IonJS frame.
+ masm.movePtr(scratch3, scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1);
+ {
+ Label checkOk;
+ masm.branch32(Assembler::Equal, scratch1, Imm32(JitFrame_IonJS), &checkOk);
+ masm.assumeUnreachable("IonAccessorIC frame must be preceded by IonJS frame");
+ masm.bind(&checkOk);
+ }
+#endif
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch3);
+
+ // lastProfilingCallSite := AccFrame-ReturnAddr
+ masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfReturnAddress()), scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+
+ // lastProfilingFrame := AccessorFrame + AccFrame-Descriptor.Size +
+ // IonAccessorICFrameLayout::Size()
+ masm.lea(Operand(scratch2, scratch3, TimesOne, IonAccessorICFrameLayout::Size()), scratch1);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // JitFrame_Entry
+ //
+ // If at an entry frame, store null into both fields.
+ //
+ masm.bind(&handle_Entry);
+ {
+ masm.movePtr(ImmPtr(nullptr), scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ProfilerExitFrameStub");
+#endif
+
+ return code;
+}
diff --git a/js/src/jit/x86-shared/Architecture-x86-shared.cpp b/js/src/jit/x86-shared/Architecture-x86-shared.cpp
new file mode 100644
index 000000000..5069d8ac9
--- /dev/null
+++ b/js/src/jit/x86-shared/Architecture-x86-shared.cpp
@@ -0,0 +1,97 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86-shared/Architecture-x86-shared.h"
+#if !defined(JS_CODEGEN_X86) && !defined(JS_CODEGEN_X64)
+# error "Wrong architecture. Only x86 and x64 should build this file!"
+#endif
+
+#include "jit/RegisterSets.h"
+
+const char*
+js::jit::FloatRegister::name() const {
+ static const char* const names[] = {
+
+#ifdef JS_CODEGEN_X64
+#define FLOAT_REGS_(TYPE) \
+ "%xmm0" TYPE, "%xmm1" TYPE, "%xmm2" TYPE, "%xmm3" TYPE, \
+ "%xmm4" TYPE, "%xmm5" TYPE, "%xmm6" TYPE, "%xmm7" TYPE, \
+ "%xmm8" TYPE, "%xmm9" TYPE, "%xmm10" TYPE, "%xmm11" TYPE, \
+ "%xmm12" TYPE, "%xmm13" TYPE, "%xmm14" TYPE, "%xmm15" TYPE
+#else
+#define FLOAT_REGS_(TYPE) \
+ "%xmm0" TYPE, "%xmm1" TYPE, "%xmm2" TYPE, "%xmm3" TYPE, \
+ "%xmm4" TYPE, "%xmm5" TYPE, "%xmm6" TYPE, "%xmm7" TYPE
+#endif
+
+ // These should be enumerated in the same order as in
+ // FloatRegisters::ContentType.
+ FLOAT_REGS_(".s"),
+ FLOAT_REGS_(".d"),
+ FLOAT_REGS_(".i4"),
+ FLOAT_REGS_(".s4")
+#undef FLOAT_REGS_
+
+ };
+ MOZ_ASSERT(size_t(code()) < mozilla::ArrayLength(names));
+ return names[size_t(code())];
+}
+
+js::jit::FloatRegisterSet
+js::jit::FloatRegister::ReduceSetForPush(const FloatRegisterSet& s)
+{
+ SetType bits = s.bits();
+
+ // Ignore all SIMD register, if not supported.
+ if (!JitSupportsSimd())
+ bits &= Codes::AllPhysMask * Codes::SpreadScalar;
+
+ // Exclude registers which are already pushed with a larger type. High bits
+ // are associated with larger register types. Thus we keep the set of
+ // registers which are not included in larger type.
+ bits &= ~(bits >> (1 * Codes::TotalPhys));
+ bits &= ~(bits >> (2 * Codes::TotalPhys));
+ bits &= ~(bits >> (3 * Codes::TotalPhys));
+
+ return FloatRegisterSet(bits);
+}
+
+uint32_t
+js::jit::FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s)
+{
+ SetType all = s.bits();
+ SetType set128b =
+ (all >> (uint32_t(Codes::Simd128) * Codes::TotalPhys)) & Codes::AllPhysMask;
+ SetType doubleSet =
+ (all >> (uint32_t(Codes::Double) * Codes::TotalPhys)) & Codes::AllPhysMask;
+ SetType singleSet =
+ (all >> (uint32_t(Codes::Single) * Codes::TotalPhys)) & Codes::AllPhysMask;
+
+ // PushRegsInMask pushes the largest register first, and thus avoids pushing
+ // aliased registers. So we have to filter out the physical registers which
+ // are already pushed as part of larger registers.
+ SetType set64b = doubleSet & ~set128b;
+ SetType set32b = singleSet & ~set64b & ~set128b;
+
+ static_assert(Codes::AllPhysMask <= 0xffff, "We can safely use CountPopulation32");
+ uint32_t count32b = mozilla::CountPopulation32(set32b);
+
+#if defined(JS_CODEGEN_X64)
+ // If we have an odd number of 32 bits values, then we increase the size to
+ // keep the stack aligned on 8 bytes. Note: Keep in sync with
+ // PushRegsInMask, and PopRegsInMaskIgnore.
+ count32b += count32b & 1;
+#endif
+
+ return mozilla::CountPopulation32(set128b) * (4 * sizeof(int32_t))
+ + mozilla::CountPopulation32(set64b) * sizeof(double)
+ + count32b * sizeof(float);
+}
+uint32_t
+js::jit::FloatRegister::getRegisterDumpOffsetInBytes()
+{
+ return uint32_t(encoding()) * sizeof(FloatRegisters::RegisterContent);
+}
diff --git a/js/src/jit/x86-shared/Architecture-x86-shared.h b/js/src/jit/x86-shared/Architecture-x86-shared.h
new file mode 100644
index 000000000..a4e4fa5f4
--- /dev/null
+++ b/js/src/jit/x86-shared/Architecture-x86-shared.h
@@ -0,0 +1,463 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_Architecture_x86_h
+#define jit_x86_shared_Architecture_x86_h
+
+#if !defined(JS_CODEGEN_X86) && !defined(JS_CODEGEN_X64)
+# error "Unsupported architecture!"
+#endif
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <string.h>
+
+#include "jit/x86-shared/Constants-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+// Does this architecture support SIMD conversions between Uint32x4 and Float32x4?
+static constexpr bool SupportsUint32x4FloatConversions = false;
+
+// Does this architecture support comparisons of unsigned integer vectors?
+static constexpr bool SupportsUint8x16Compares = false;
+static constexpr bool SupportsUint16x8Compares = false;
+static constexpr bool SupportsUint32x4Compares = false;
+
+#if defined(JS_CODEGEN_X86)
+// In bytes: slots needed for potential memory->memory move spills.
+// +8 for cycles
+// +4 for gpr spills
+// +8 for double spills
+static const uint32_t ION_FRAME_SLACK_SIZE = 20;
+
+#elif defined(JS_CODEGEN_X64)
+// In bytes: slots needed for potential memory->memory move spills.
+// +8 for cycles
+// +8 for gpr spills
+// +8 for double spills
+static const uint32_t ION_FRAME_SLACK_SIZE = 24;
+#endif
+
+#if defined(JS_CODEGEN_X86)
+// These offsets are specific to nunboxing, and capture offsets into the
+// components of a js::Value.
+static const int32_t NUNBOX32_TYPE_OFFSET = 4;
+static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
+
+// Size of each bailout table entry. On x86 this is a 5-byte relative call.
+static const uint32_t BAILOUT_TABLE_ENTRY_SIZE = 5;
+#endif
+
+#if defined(JS_CODEGEN_X64) && defined(_WIN64)
+static const uint32_t ShadowStackSpace = 32;
+#else
+static const uint32_t ShadowStackSpace = 0;
+#endif
+
+static const uint32_t JumpImmediateRange = INT32_MAX;
+
+class Registers {
+ public:
+ typedef uint8_t Code;
+ typedef X86Encoding::RegisterID Encoding;
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ uintptr_t r;
+ };
+
+#if defined(JS_CODEGEN_X86)
+ typedef uint8_t SetType;
+
+ static const char* GetName(Code code) {
+ return X86Encoding::GPRegName(Encoding(code));
+ }
+
+ static const uint32_t Total = 8;
+ static const uint32_t TotalPhys = 8;
+ static const uint32_t Allocatable = 7;
+
+#elif defined(JS_CODEGEN_X64)
+ typedef uint16_t SetType;
+
+ static const char* GetName(Code code) {
+ static const char * const Names[] = { "rax", "rcx", "rdx", "rbx",
+ "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11",
+ "r12", "r13", "r14", "r15" };
+ return Names[code];
+ }
+
+ static const uint32_t Total = 16;
+ static const uint32_t TotalPhys = 16;
+ static const uint32_t Allocatable = 14;
+#endif
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) <= 4, "SetType must be, at most, 32 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ return mozilla::CountTrailingZeroes32(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 31 - mozilla::CountLeadingZeroes32(x);
+ }
+
+ static Code FromName(const char* name) {
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(Code(i)), name) == 0)
+ return Code(i);
+ }
+ return Invalid;
+ }
+
+ static const Encoding StackPointer = X86Encoding::rsp;
+ static const Encoding Invalid = X86Encoding::invalid_reg;
+
+ static const SetType AllMask = (1 << Total) - 1;
+
+#if defined(JS_CODEGEN_X86)
+ static const SetType ArgRegMask = 0;
+
+ static const SetType VolatileMask =
+ (1 << X86Encoding::rax) |
+ (1 << X86Encoding::rcx) |
+ (1 << X86Encoding::rdx);
+
+ static const SetType WrapperMask =
+ VolatileMask |
+ (1 << X86Encoding::rbx);
+
+ static const SetType SingleByteRegs =
+ (1 << X86Encoding::rax) |
+ (1 << X86Encoding::rcx) |
+ (1 << X86Encoding::rdx) |
+ (1 << X86Encoding::rbx);
+
+ static const SetType NonAllocatableMask =
+ (1 << X86Encoding::rsp);
+
+ // Registers returned from a JS -> JS call.
+ static const SetType JSCallMask =
+ (1 << X86Encoding::rcx) |
+ (1 << X86Encoding::rdx);
+
+ // Registers returned from a JS -> C call.
+ static const SetType CallMask =
+ (1 << X86Encoding::rax);
+
+#elif defined(JS_CODEGEN_X64)
+ static const SetType ArgRegMask =
+# if !defined(_WIN64)
+ (1 << X86Encoding::rdi) |
+ (1 << X86Encoding::rsi) |
+# endif
+ (1 << X86Encoding::rdx) |
+ (1 << X86Encoding::rcx) |
+ (1 << X86Encoding::r8) |
+ (1 << X86Encoding::r9);
+
+ static const SetType VolatileMask =
+ (1 << X86Encoding::rax) |
+ ArgRegMask |
+ (1 << X86Encoding::r10) |
+ (1 << X86Encoding::r11);
+
+ static const SetType WrapperMask = VolatileMask;
+
+ static const SetType SingleByteRegs = AllMask & ~(1 << X86Encoding::rsp);
+
+ static const SetType NonAllocatableMask =
+ (1 << X86Encoding::rsp) |
+ (1 << X86Encoding::r11); // This is ScratchReg.
+
+ // Registers returned from a JS -> JS call.
+ static const SetType JSCallMask =
+ (1 << X86Encoding::rcx);
+
+ // Registers returned from a JS -> C call.
+ static const SetType CallMask =
+ (1 << X86Encoding::rax);
+
+#endif
+
+ static const SetType NonVolatileMask =
+ AllMask & ~VolatileMask & ~(1 << X86Encoding::rsp);
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+
+ // Registers that can be allocated without being saved, generally.
+ static const SetType TempMask = VolatileMask & ~NonAllocatableMask;
+};
+
+typedef Registers::SetType PackedRegisterMask;
+
+class FloatRegisters {
+ public:
+ typedef X86Encoding::XMMRegisterID Encoding;
+
+ enum ContentType {
+ Single, // 32-bit float.
+ Double, // 64-bit double.
+ Simd128, // 128-bit SIMD type (int32x4, bool16x8, etc).
+ NumTypes
+ };
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ float s;
+ double d;
+ int32_t i4[4];
+ float s4[4];
+ };
+
+ static const char* GetName(Encoding code) {
+ return X86Encoding::XMMRegName(code);
+ }
+
+ static Encoding FromName(const char* name) {
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(Encoding(i)), name) == 0)
+ return Encoding(i);
+ }
+ return Invalid;
+ }
+
+ static const Encoding Invalid = X86Encoding::invalid_xmm;
+
+#if defined(JS_CODEGEN_X86)
+ static const uint32_t Total = 8 * NumTypes;
+ static const uint32_t TotalPhys = 8;
+ static const uint32_t Allocatable = 7;
+ typedef uint32_t SetType;
+
+#elif defined(JS_CODEGEN_X64)
+ static const uint32_t Total = 16 * NumTypes;
+ static const uint32_t TotalPhys = 16;
+ static const uint32_t Allocatable = 15;
+ typedef uint64_t SetType;
+
+#endif
+
+ static_assert(sizeof(SetType) * 8 >= Total,
+ "SetType should be large enough to enumerate all registers.");
+
+ // Magic values which are used to duplicate a mask of physical register for
+ // a specific type of register. A multiplication is used to copy and shift
+ // the bits of the physical register mask.
+ static const SetType SpreadSingle = SetType(1) << (uint32_t(Single) * TotalPhys);
+ static const SetType SpreadDouble = SetType(1) << (uint32_t(Double) * TotalPhys);
+ static const SetType SpreadSimd128 = SetType(1) << (uint32_t(Simd128) * TotalPhys);
+ static const SetType SpreadScalar = SpreadSingle | SpreadDouble;
+ static const SetType SpreadVector = SpreadSimd128;
+ static const SetType Spread = SpreadScalar | SpreadVector;
+
+ static const SetType AllPhysMask = ((1 << TotalPhys) - 1);
+ static const SetType AllMask = AllPhysMask * Spread;
+ static const SetType AllDoubleMask = AllPhysMask * SpreadDouble;
+ static const SetType AllSingleMask = AllPhysMask * SpreadSingle;
+
+#if defined(JS_CODEGEN_X86)
+ static const SetType NonAllocatableMask =
+ Spread * (1 << X86Encoding::xmm7); // This is ScratchDoubleReg.
+
+#elif defined(JS_CODEGEN_X64)
+ static const SetType NonAllocatableMask =
+ Spread * (1 << X86Encoding::xmm15); // This is ScratchDoubleReg.
+#endif
+
+#if defined(JS_CODEGEN_X64) && defined(_WIN64)
+ static const SetType VolatileMask =
+ ( (1 << X86Encoding::xmm0) |
+ (1 << X86Encoding::xmm1) |
+ (1 << X86Encoding::xmm2) |
+ (1 << X86Encoding::xmm3) |
+ (1 << X86Encoding::xmm4) |
+ (1 << X86Encoding::xmm5)
+ ) * SpreadScalar
+ | AllPhysMask * SpreadVector;
+
+#else
+ static const SetType VolatileMask =
+ AllMask;
+#endif
+
+ static const SetType NonVolatileMask = AllMask & ~VolatileMask;
+ static const SetType WrapperMask = VolatileMask;
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+template <typename T>
+class TypedRegisterSet;
+
+struct FloatRegister {
+ typedef FloatRegisters Codes;
+ typedef size_t Code;
+ typedef Codes::Encoding Encoding;
+ typedef Codes::SetType SetType;
+ static uint32_t SetSize(SetType x) {
+ // Count the number of non-aliased registers, for the moment.
+ //
+ // Copy the set bits of each typed register to the low part of the of
+ // the Set, and count the number of registers. This is made to avoid
+ // registers which are allocated twice with different types (such as in
+ // AllMask).
+ x |= x >> (2 * Codes::TotalPhys);
+ x |= x >> Codes::TotalPhys;
+ x &= Codes::AllPhysMask;
+ static_assert(Codes::AllPhysMask <= 0xffff, "We can safely use CountPopulation32");
+ return mozilla::CountPopulation32(x);
+ }
+
+#if defined(JS_CODEGEN_X86)
+ static uint32_t FirstBit(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return mozilla::CountTrailingZeroes32(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 31 - mozilla::CountLeadingZeroes32(x);
+ }
+
+#elif defined(JS_CODEGEN_X64)
+ static uint32_t FirstBit(SetType x) {
+ static_assert(sizeof(SetType) == 8, "SetType must be 64 bits");
+ return mozilla::CountTrailingZeroes64(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 63 - mozilla::CountLeadingZeroes64(x);
+ }
+#endif
+
+ private:
+ // Note: These fields are using one extra bit to make the invalid enumerated
+ // values fit, and thus prevent a warning.
+ Codes::Encoding reg_ : 5;
+ Codes::ContentType type_ : 3;
+ bool isInvalid_ : 1;
+
+ // Constants used for exporting/importing the float register code.
+#if defined(JS_CODEGEN_X86)
+ static const size_t RegSize = 3;
+#elif defined(JS_CODEGEN_X64)
+ static const size_t RegSize = 4;
+#endif
+ static const size_t RegMask = (1 << RegSize) - 1;
+
+ public:
+ constexpr FloatRegister()
+ : reg_(Codes::Encoding(0)), type_(Codes::Single), isInvalid_(true)
+ { }
+ constexpr FloatRegister(uint32_t r, Codes::ContentType k)
+ : reg_(Codes::Encoding(r)), type_(k), isInvalid_(false)
+ { }
+ constexpr FloatRegister(Codes::Encoding r, Codes::ContentType k)
+ : reg_(r), type_(k), isInvalid_(false)
+ { }
+
+ static FloatRegister FromCode(uint32_t i) {
+ MOZ_ASSERT(i < Codes::Total);
+ return FloatRegister(i & RegMask, Codes::ContentType(i >> RegSize));
+ }
+
+ bool isSingle() const { MOZ_ASSERT(!isInvalid()); return type_ == Codes::Single; }
+ bool isDouble() const { MOZ_ASSERT(!isInvalid()); return type_ == Codes::Double; }
+ bool isSimd128() const { MOZ_ASSERT(!isInvalid()); return type_ == Codes::Simd128; }
+ bool isInvalid() const { return isInvalid_; }
+
+ FloatRegister asSingle() const { MOZ_ASSERT(!isInvalid()); return FloatRegister(reg_, Codes::Single); }
+ FloatRegister asDouble() const { MOZ_ASSERT(!isInvalid()); return FloatRegister(reg_, Codes::Double); }
+ FloatRegister asSimd128() const { MOZ_ASSERT(!isInvalid()); return FloatRegister(reg_, Codes::Simd128); }
+
+ uint32_t size() const {
+ MOZ_ASSERT(!isInvalid());
+ if (isSingle())
+ return sizeof(float);
+ if (isDouble())
+ return sizeof(double);
+ MOZ_ASSERT(isSimd128());
+ return 4 * sizeof(int32_t);
+ }
+
+ Code code() const {
+ MOZ_ASSERT(!isInvalid());
+ MOZ_ASSERT(uint32_t(reg_) < Codes::TotalPhys);
+ // :TODO: ARM is doing the same thing, but we should avoid this, except
+ // that the RegisterSets depends on this.
+ return Code(reg_ | (type_ << RegSize));
+ }
+ Encoding encoding() const {
+ MOZ_ASSERT(!isInvalid());
+ MOZ_ASSERT(uint32_t(reg_) < Codes::TotalPhys);
+ return reg_;
+ }
+ // defined in Assembler-x86-shared.cpp
+ const char* name() const;
+ bool volatile_() const {
+ return !!((SetType(1) << code()) & FloatRegisters::VolatileMask);
+ }
+ bool operator !=(FloatRegister other) const {
+ return other.reg_ != reg_ || other.type_ != type_;
+ }
+ bool operator ==(FloatRegister other) const {
+ return other.reg_ == reg_ && other.type_ == type_;
+ }
+ bool aliases(FloatRegister other) const {
+ return other.reg_ == reg_;
+ }
+ // Check if two floating point registers have the same type.
+ bool equiv(FloatRegister other) const {
+ return other.type_ == type_;
+ }
+
+ uint32_t numAliased() const {
+ return Codes::NumTypes;
+ }
+ uint32_t numAlignedAliased() const {
+ return numAliased();
+ }
+
+ // N.B. FloatRegister is an explicit outparam here because msvc-2010
+ // miscompiled it on win64 when the value was simply returned
+ void aliased(uint32_t aliasIdx, FloatRegister* ret) const {
+ MOZ_ASSERT(aliasIdx < Codes::NumTypes);
+ *ret = FloatRegister(reg_, Codes::ContentType((aliasIdx + type_) % Codes::NumTypes));
+ }
+ void alignedAliased(uint32_t aliasIdx, FloatRegister* ret) const {
+ aliased(aliasIdx, ret);
+ }
+
+ SetType alignedOrDominatedAliasedSet() const {
+ return Codes::Spread << reg_;
+ }
+
+ static TypedRegisterSet<FloatRegister> ReduceSetForPush(const TypedRegisterSet<FloatRegister>& s);
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>& s);
+ uint32_t getRegisterDumpOffsetInBytes();
+};
+
+// Arm/D32 has double registers that can NOT be treated as float32
+// and this requires some dances in lowering.
+inline bool
+hasUnaliasedDouble()
+{
+ return false;
+}
+
+// On ARM, Dn aliases both S2n and S2n+1, so if you need to convert a float32
+// to a double as a temporary, you need a temporary double register.
+inline bool
+hasMultiAlias()
+{
+ return false;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_Architecture_x86_h */
diff --git a/js/src/jit/x86-shared/Assembler-x86-shared.cpp b/js/src/jit/x86-shared/Assembler-x86-shared.cpp
new file mode 100644
index 000000000..8d761c138
--- /dev/null
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.cpp
@@ -0,0 +1,350 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Marking.h"
+#include "jit/Disassembler.h"
+#include "jit/JitCompartment.h"
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/MacroAssembler-x86.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/MacroAssembler-x64.h"
+#else
+# error "Wrong architecture. Only x86 and x64 should build this file!"
+#endif
+
+#ifdef _MSC_VER
+# include <intrin.h> // for __cpuid
+# if defined(_M_X64) && (_MSC_FULL_VER >= 160040219)
+# include <immintrin.h> // for _xgetbv
+# endif
+#endif
+
+using namespace js;
+using namespace js::jit;
+
+void
+AssemblerX86Shared::copyJumpRelocationTable(uint8_t* dest)
+{
+ if (jumpRelocations_.length())
+ memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
+}
+
+void
+AssemblerX86Shared::copyDataRelocationTable(uint8_t* dest)
+{
+ if (dataRelocations_.length())
+ memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
+}
+
+void
+AssemblerX86Shared::copyPreBarrierTable(uint8_t* dest)
+{
+ if (preBarriers_.length())
+ memcpy(dest, preBarriers_.buffer(), preBarriers_.length());
+}
+
+static void
+TraceDataRelocations(JSTracer* trc, uint8_t* buffer, CompactBufferReader& reader)
+{
+ while (reader.more()) {
+ size_t offset = reader.readUnsigned();
+ void* ptr = X86Encoding::GetPointer(buffer + offset);
+
+#ifdef JS_PUNBOX64
+ // All pointers on x64 will have the top bits cleared. If those bits
+ // are not cleared, this must be a Value.
+ uintptr_t word = reinterpret_cast<uintptr_t>(ptr);
+ if (word >> JSVAL_TAG_SHIFT) {
+ Value v = Value::fromRawBits(word);
+ TraceManuallyBarrieredEdge(trc, &v, "jit-masm-value");
+ if (word != v.asRawBits()) {
+ // Only update the code if the Value changed, because the code
+ // is not writable if we're not moving objects.
+ X86Encoding::SetPointer(buffer + offset, v.bitsAsPunboxPointer());
+ }
+ continue;
+ }
+#endif
+
+ // No barrier needed since these are constants.
+ gc::Cell* cellPtr = reinterpret_cast<gc::Cell*>(ptr);
+ TraceManuallyBarrieredGenericPointerEdge(trc, &cellPtr, "jit-masm-ptr");
+ if (cellPtr != ptr)
+ X86Encoding::SetPointer(buffer + offset, cellPtr);
+ }
+}
+
+
+void
+AssemblerX86Shared::TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
+{
+ ::TraceDataRelocations(trc, code->raw(), reader);
+}
+
+void
+AssemblerX86Shared::trace(JSTracer* trc)
+{
+ for (size_t i = 0; i < jumps_.length(); i++) {
+ RelativePatch& rp = jumps_[i];
+ if (rp.kind == Relocation::JITCODE) {
+ JitCode* code = JitCode::FromExecutable((uint8_t*)rp.target);
+ TraceManuallyBarrieredEdge(trc, &code, "masmrel32");
+ MOZ_ASSERT(code == JitCode::FromExecutable((uint8_t*)rp.target));
+ }
+ }
+ if (dataRelocations_.length()) {
+ CompactBufferReader reader(dataRelocations_);
+ ::TraceDataRelocations(trc, masm.data(), reader);
+ }
+}
+
+void
+AssemblerX86Shared::executableCopy(void* buffer)
+{
+ masm.executableCopy(buffer);
+
+ // Crash diagnostics for bug 1124397. Check the code buffer has not been
+ // poisoned with 0xE5 bytes.
+ static const size_t MinPoisoned = 16;
+ const uint8_t* bytes = (const uint8_t*)buffer;
+ size_t len = size();
+
+ for (size_t i = 0; i < len; i += MinPoisoned) {
+ if (bytes[i] != 0xE5)
+ continue;
+
+ size_t startOffset = i;
+ while (startOffset > 0 && bytes[startOffset - 1] == 0xE5)
+ startOffset--;
+
+ size_t endOffset = i;
+ while (endOffset + 1 < len && bytes[endOffset + 1] == 0xE5)
+ endOffset++;
+
+ if (endOffset - startOffset < MinPoisoned)
+ continue;
+
+ volatile uintptr_t dump[5];
+ blackbox = dump;
+ blackbox[0] = uintptr_t(0xABCD4321);
+ blackbox[1] = uintptr_t(len);
+ blackbox[2] = uintptr_t(startOffset);
+ blackbox[3] = uintptr_t(endOffset);
+ blackbox[4] = uintptr_t(0xFFFF8888);
+ MOZ_CRASH("Corrupt code buffer");
+ }
+}
+
+void
+AssemblerX86Shared::processCodeLabels(uint8_t* rawCode)
+{
+ for (size_t i = 0; i < codeLabels_.length(); i++) {
+ CodeLabel label = codeLabels_[i];
+ Bind(rawCode, label.patchAt(), rawCode + label.target()->offset());
+ }
+}
+
+AssemblerX86Shared::Condition
+AssemblerX86Shared::InvertCondition(Condition cond)
+{
+ switch (cond) {
+ case Zero:
+ return NonZero;
+ case NonZero:
+ return Zero;
+ case LessThan:
+ return GreaterThanOrEqual;
+ case LessThanOrEqual:
+ return GreaterThan;
+ case GreaterThan:
+ return LessThanOrEqual;
+ case GreaterThanOrEqual:
+ return LessThan;
+ case Above:
+ return BelowOrEqual;
+ case AboveOrEqual:
+ return Below;
+ case Below:
+ return AboveOrEqual;
+ case BelowOrEqual:
+ return Above;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+AssemblerX86Shared::Condition
+AssemblerX86Shared::UnsignedCondition(Condition cond)
+{
+ switch (cond) {
+ case Zero:
+ case NonZero:
+ return cond;
+ case LessThan:
+ case Below:
+ return Below;
+ case LessThanOrEqual:
+ case BelowOrEqual:
+ return BelowOrEqual;
+ case GreaterThan:
+ case Above:
+ return Above;
+ case AboveOrEqual:
+ case GreaterThanOrEqual:
+ return AboveOrEqual;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+AssemblerX86Shared::Condition
+AssemblerX86Shared::ConditionWithoutEqual(Condition cond)
+{
+ switch (cond) {
+ case LessThan:
+ case LessThanOrEqual:
+ return LessThan;
+ case Below:
+ case BelowOrEqual:
+ return Below;
+ case GreaterThan:
+ case GreaterThanOrEqual:
+ return GreaterThan;
+ case Above:
+ case AboveOrEqual:
+ return Above;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void
+AssemblerX86Shared::verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
+ const Disassembler::HeapAccess& heapAccess)
+{
+#ifdef DEBUG
+ if (masm.oom())
+ return;
+ Disassembler::VerifyHeapAccess(masm.data() + begin, masm.data() + end, heapAccess);
+#endif
+}
+
+CPUInfo::SSEVersion CPUInfo::maxSSEVersion = UnknownSSE;
+CPUInfo::SSEVersion CPUInfo::maxEnabledSSEVersion = UnknownSSE;
+bool CPUInfo::avxPresent = false;
+bool CPUInfo::avxEnabled = false;
+bool CPUInfo::popcntPresent = false;
+bool CPUInfo::needAmdBugWorkaround = false;
+
+static uintptr_t
+ReadXGETBV()
+{
+ // We use a variety of low-level mechanisms to get at the xgetbv
+ // instruction, including spelling out the xgetbv instruction as bytes,
+ // because older compilers and assemblers may not recognize the instruction
+ // by name.
+ size_t xcr0EAX = 0;
+#if defined(_XCR_XFEATURE_ENABLED_MASK)
+ xcr0EAX = _xgetbv(_XCR_XFEATURE_ENABLED_MASK);
+#elif defined(__GNUC__)
+ // xgetbv returns its results in %eax and %edx, and for our purposes here,
+ // we're only interested in the %eax value.
+ asm(".byte 0x0f, 0x01, 0xd0" : "=a"(xcr0EAX) : "c"(0) : "%edx");
+#elif defined(_MSC_VER) && defined(_M_IX86)
+ __asm {
+ xor ecx, ecx
+ _asm _emit 0x0f _asm _emit 0x01 _asm _emit 0xd0
+ mov xcr0EAX, eax
+ }
+#endif
+ return xcr0EAX;
+}
+
+void
+CPUInfo::SetSSEVersion()
+{
+ int flagsEAX = 0;
+ int flagsECX = 0;
+ int flagsEDX = 0;
+
+#ifdef _MSC_VER
+ int cpuinfo[4];
+ __cpuid(cpuinfo, 1);
+ flagsEAX = cpuinfo[0];
+ flagsECX = cpuinfo[2];
+ flagsEDX = cpuinfo[3];
+#elif defined(__GNUC__)
+# ifdef JS_CODEGEN_X64
+ asm (
+ "movl $0x1, %%eax;"
+ "cpuid;"
+ : "=a" (flagsEAX), "=c" (flagsECX), "=d" (flagsEDX)
+ :
+ : "%ebx"
+ );
+# else
+ // On x86, preserve ebx. The compiler needs it for PIC mode.
+ // Some older processors don't fill the ecx register with cpuid, so clobber
+ // it before calling cpuid, so that there's no risk of picking random bits
+ // indicating SSE3/SSE4 are present.
+ asm (
+ "xor %%ecx, %%ecx;"
+ "movl $0x1, %%eax;"
+ "pushl %%ebx;"
+ "cpuid;"
+ "popl %%ebx;"
+ : "=a" (flagsEAX), "=c" (flagsECX), "=d" (flagsEDX)
+ :
+ :
+ );
+# endif
+#else
+# error "Unsupported compiler"
+#endif
+
+ static const int SSEBit = 1 << 25;
+ static const int SSE2Bit = 1 << 26;
+ static const int SSE3Bit = 1 << 0;
+ static const int SSSE3Bit = 1 << 9;
+ static const int SSE41Bit = 1 << 19;
+ static const int SSE42Bit = 1 << 20;
+
+ if (flagsECX & SSE42Bit) maxSSEVersion = SSE4_2;
+ else if (flagsECX & SSE41Bit) maxSSEVersion = SSE4_1;
+ else if (flagsECX & SSSE3Bit) maxSSEVersion = SSSE3;
+ else if (flagsECX & SSE3Bit) maxSSEVersion = SSE3;
+ else if (flagsEDX & SSE2Bit) maxSSEVersion = SSE2;
+ else if (flagsEDX & SSEBit) maxSSEVersion = SSE;
+ else maxSSEVersion = NoSSE;
+
+ if (maxEnabledSSEVersion != UnknownSSE)
+ maxSSEVersion = Min(maxSSEVersion, maxEnabledSSEVersion);
+
+ static const int AVXBit = 1 << 28;
+ static const int XSAVEBit = 1 << 27;
+ avxPresent = (flagsECX & AVXBit) && (flagsECX & XSAVEBit) && avxEnabled;
+
+ // If the hardware supports AVX, check whether the OS supports it too.
+ if (avxPresent) {
+ size_t xcr0EAX = ReadXGETBV();
+ static const int xcr0SSEBit = 1 << 1;
+ static const int xcr0AVXBit = 1 << 2;
+ avxPresent = (xcr0EAX & xcr0SSEBit) && (xcr0EAX & xcr0AVXBit);
+ }
+
+ static const int POPCNTBit = 1 << 23;
+
+ popcntPresent = (flagsECX & POPCNTBit);
+
+ // Check if we need to work around an AMD CPU bug (see bug 1281759).
+ // We check for family 20 models 0-2. Intel doesn't use family 20 at
+ // this point, so this should only match AMD CPUs.
+ unsigned family = ((flagsEAX >> 20) & 0xff) + ((flagsEAX >> 8) & 0xf);
+ unsigned model = (((flagsEAX >> 16) & 0xf) << 4) + ((flagsEAX >> 4) & 0xf);
+ needAmdBugWorkaround = (family == 20 && model <= 2);
+}
+
+volatile uintptr_t* blackbox = nullptr;
diff --git a/js/src/jit/x86-shared/Assembler-x86-shared.h b/js/src/jit/x86-shared/Assembler-x86-shared.h
new file mode 100644
index 000000000..510ce9a99
--- /dev/null
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.h
@@ -0,0 +1,3652 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_Assembler_x86_shared_h
+#define jit_x86_shared_Assembler_x86_shared_h
+
+#include <cstddef>
+
+#include "jit/shared/Assembler-shared.h"
+
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/BaseAssembler-x86.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/BaseAssembler-x64.h"
+#else
+# error "Unknown architecture!"
+#endif
+
+namespace js {
+namespace jit {
+
+struct ScratchFloat32Scope : public AutoFloatRegisterScope
+{
+ explicit ScratchFloat32Scope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchFloat32Reg)
+ { }
+};
+
+struct ScratchDoubleScope : public AutoFloatRegisterScope
+{
+ explicit ScratchDoubleScope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchDoubleReg)
+ { }
+};
+
+struct ScratchSimd128Scope : public AutoFloatRegisterScope
+{
+ explicit ScratchSimd128Scope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchSimd128Reg)
+ { }
+};
+
+class Operand
+{
+ public:
+ enum Kind {
+ REG,
+ MEM_REG_DISP,
+ FPREG,
+ MEM_SCALE,
+ MEM_ADDRESS32
+ };
+
+ private:
+ Kind kind_ : 4;
+ // Used as a Register::Encoding and a FloatRegister::Encoding.
+ uint32_t base_ : 5;
+ Scale scale_ : 3;
+ // We don't use all 8 bits, of course, but GCC complains if the size of
+ // this field is smaller than the size of Register::Encoding.
+ Register::Encoding index_ : 8;
+ int32_t disp_;
+
+ public:
+ explicit Operand(Register reg)
+ : kind_(REG),
+ base_(reg.encoding()),
+ scale_(TimesOne),
+ index_(Registers::Invalid),
+ disp_(0)
+ { }
+ explicit Operand(FloatRegister reg)
+ : kind_(FPREG),
+ base_(reg.encoding()),
+ scale_(TimesOne),
+ index_(Registers::Invalid),
+ disp_(0)
+ { }
+ explicit Operand(const Address& address)
+ : kind_(MEM_REG_DISP),
+ base_(address.base.encoding()),
+ scale_(TimesOne),
+ index_(Registers::Invalid),
+ disp_(address.offset)
+ { }
+ explicit Operand(const BaseIndex& address)
+ : kind_(MEM_SCALE),
+ base_(address.base.encoding()),
+ scale_(address.scale),
+ index_(address.index.encoding()),
+ disp_(address.offset)
+ { }
+ Operand(Register base, Register index, Scale scale, int32_t disp = 0)
+ : kind_(MEM_SCALE),
+ base_(base.encoding()),
+ scale_(scale),
+ index_(index.encoding()),
+ disp_(disp)
+ { }
+ Operand(Register reg, int32_t disp)
+ : kind_(MEM_REG_DISP),
+ base_(reg.encoding()),
+ scale_(TimesOne),
+ index_(Registers::Invalid),
+ disp_(disp)
+ { }
+ explicit Operand(AbsoluteAddress address)
+ : kind_(MEM_ADDRESS32),
+ base_(Registers::Invalid),
+ scale_(TimesOne),
+ index_(Registers::Invalid),
+ disp_(X86Encoding::AddressImmediate(address.addr))
+ { }
+ explicit Operand(PatchedAbsoluteAddress address)
+ : kind_(MEM_ADDRESS32),
+ base_(Registers::Invalid),
+ scale_(TimesOne),
+ index_(Registers::Invalid),
+ disp_(X86Encoding::AddressImmediate(address.addr))
+ { }
+
+ Address toAddress() const {
+ MOZ_ASSERT(kind() == MEM_REG_DISP);
+ return Address(Register::FromCode(base()), disp());
+ }
+
+ BaseIndex toBaseIndex() const {
+ MOZ_ASSERT(kind() == MEM_SCALE);
+ return BaseIndex(Register::FromCode(base()), Register::FromCode(index()), scale(), disp());
+ }
+
+ Kind kind() const {
+ return kind_;
+ }
+ Register::Encoding reg() const {
+ MOZ_ASSERT(kind() == REG);
+ return Register::Encoding(base_);
+ }
+ Register::Encoding base() const {
+ MOZ_ASSERT(kind() == MEM_REG_DISP || kind() == MEM_SCALE);
+ return Register::Encoding(base_);
+ }
+ Register::Encoding index() const {
+ MOZ_ASSERT(kind() == MEM_SCALE);
+ return index_;
+ }
+ Scale scale() const {
+ MOZ_ASSERT(kind() == MEM_SCALE);
+ return scale_;
+ }
+ FloatRegister::Encoding fpu() const {
+ MOZ_ASSERT(kind() == FPREG);
+ return FloatRegister::Encoding(base_);
+ }
+ int32_t disp() const {
+ MOZ_ASSERT(kind() == MEM_REG_DISP || kind() == MEM_SCALE);
+ return disp_;
+ }
+ void* address() const {
+ MOZ_ASSERT(kind() == MEM_ADDRESS32);
+ return reinterpret_cast<void*>(disp_);
+ }
+
+ bool containsReg(Register r) const {
+ switch (kind()) {
+ case REG: return r.encoding() == reg();
+ case MEM_REG_DISP: return r.encoding() == base();
+ case MEM_SCALE: return r.encoding() == base() || r.encoding() == index();
+ default: return false;
+ }
+ }
+};
+
+inline Imm32
+Imm64::firstHalf() const
+{
+ return low();
+}
+
+inline Imm32
+Imm64::secondHalf() const
+{
+ return hi();
+}
+
+class CPUInfo
+{
+ public:
+ // As the SSE's were introduced in order, the presence of a later SSE implies
+ // the presence of an earlier SSE. For example, SSE4_2 support implies SSE2 support.
+ enum SSEVersion {
+ UnknownSSE = 0,
+ NoSSE = 1,
+ SSE = 2,
+ SSE2 = 3,
+ SSE3 = 4,
+ SSSE3 = 5,
+ SSE4_1 = 6,
+ SSE4_2 = 7
+ };
+
+ static SSEVersion GetSSEVersion() {
+ if (maxSSEVersion == UnknownSSE)
+ SetSSEVersion();
+
+ MOZ_ASSERT(maxSSEVersion != UnknownSSE);
+ MOZ_ASSERT_IF(maxEnabledSSEVersion != UnknownSSE, maxSSEVersion <= maxEnabledSSEVersion);
+ return maxSSEVersion;
+ }
+
+ static bool IsAVXPresent() {
+ if (MOZ_UNLIKELY(maxSSEVersion == UnknownSSE))
+ SetSSEVersion();
+
+ MOZ_ASSERT_IF(!avxEnabled, !avxPresent);
+ return avxPresent;
+ }
+
+ private:
+ static SSEVersion maxSSEVersion;
+ static SSEVersion maxEnabledSSEVersion;
+ static bool avxPresent;
+ static bool avxEnabled;
+ static bool popcntPresent;
+ static bool needAmdBugWorkaround;
+
+ static void SetSSEVersion();
+
+ public:
+ static bool IsSSE2Present() {
+#ifdef JS_CODEGEN_X64
+ return true;
+#else
+ return GetSSEVersion() >= SSE2;
+#endif
+ }
+ static bool IsSSE3Present() { return GetSSEVersion() >= SSE3; }
+ static bool IsSSSE3Present() { return GetSSEVersion() >= SSSE3; }
+ static bool IsSSE41Present() { return GetSSEVersion() >= SSE4_1; }
+ static bool IsSSE42Present() { return GetSSEVersion() >= SSE4_2; }
+ static bool IsPOPCNTPresent() { return popcntPresent; }
+ static bool NeedAmdBugWorkaround() { return needAmdBugWorkaround; }
+
+ static void SetSSE3Disabled() { maxEnabledSSEVersion = SSE2; avxEnabled = false; }
+ static void SetSSE4Disabled() { maxEnabledSSEVersion = SSSE3; avxEnabled = false; }
+ static void SetAVXEnabled() { avxEnabled = true; }
+};
+
+class AssemblerX86Shared : public AssemblerShared
+{
+ protected:
+ struct RelativePatch {
+ int32_t offset;
+ void* target;
+ Relocation::Kind kind;
+
+ RelativePatch(int32_t offset, void* target, Relocation::Kind kind)
+ : offset(offset),
+ target(target),
+ kind(kind)
+ { }
+ };
+
+ Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
+ CompactBufferWriter jumpRelocations_;
+ CompactBufferWriter dataRelocations_;
+ CompactBufferWriter preBarriers_;
+
+ void writeDataRelocation(ImmGCPtr ptr) {
+ if (ptr.value) {
+ if (gc::IsInsideNursery(ptr.value))
+ embedsNurseryPointers_ = true;
+ dataRelocations_.writeUnsigned(masm.currentOffset());
+ }
+ }
+ void writePrebarrierOffset(CodeOffset label) {
+ preBarriers_.writeUnsigned(label.offset());
+ }
+
+ protected:
+ X86Encoding::BaseAssemblerSpecific masm;
+
+ typedef X86Encoding::JmpSrc JmpSrc;
+ typedef X86Encoding::JmpDst JmpDst;
+
+ public:
+ AssemblerX86Shared()
+ {
+ if (!HasAVX())
+ masm.disableVEX();
+ }
+
+ enum Condition {
+ Equal = X86Encoding::ConditionE,
+ NotEqual = X86Encoding::ConditionNE,
+ Above = X86Encoding::ConditionA,
+ AboveOrEqual = X86Encoding::ConditionAE,
+ Below = X86Encoding::ConditionB,
+ BelowOrEqual = X86Encoding::ConditionBE,
+ GreaterThan = X86Encoding::ConditionG,
+ GreaterThanOrEqual = X86Encoding::ConditionGE,
+ LessThan = X86Encoding::ConditionL,
+ LessThanOrEqual = X86Encoding::ConditionLE,
+ Overflow = X86Encoding::ConditionO,
+ CarrySet = X86Encoding::ConditionC,
+ CarryClear = X86Encoding::ConditionNC,
+ Signed = X86Encoding::ConditionS,
+ NotSigned = X86Encoding::ConditionNS,
+ Zero = X86Encoding::ConditionE,
+ NonZero = X86Encoding::ConditionNE,
+ Parity = X86Encoding::ConditionP,
+ NoParity = X86Encoding::ConditionNP
+ };
+
+ // If this bit is set, the vucomisd operands have to be inverted.
+ static const int DoubleConditionBitInvert = 0x10;
+
+ // Bit set when a DoubleCondition does not map to a single x86 condition.
+ // The macro assembler has to special-case these conditions.
+ static const int DoubleConditionBitSpecial = 0x20;
+ static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleOrdered = NoParity,
+ DoubleEqual = Equal | DoubleConditionBitSpecial,
+ DoubleNotEqual = NotEqual,
+ DoubleGreaterThan = Above,
+ DoubleGreaterThanOrEqual = AboveOrEqual,
+ DoubleLessThan = Above | DoubleConditionBitInvert,
+ DoubleLessThanOrEqual = AboveOrEqual | DoubleConditionBitInvert,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleUnordered = Parity,
+ DoubleEqualOrUnordered = Equal,
+ DoubleNotEqualOrUnordered = NotEqual | DoubleConditionBitSpecial,
+ DoubleGreaterThanOrUnordered = Below | DoubleConditionBitInvert,
+ DoubleGreaterThanOrEqualOrUnordered = BelowOrEqual | DoubleConditionBitInvert,
+ DoubleLessThanOrUnordered = Below,
+ DoubleLessThanOrEqualOrUnordered = BelowOrEqual
+ };
+
+ enum NaNCond {
+ NaN_HandledByCond,
+ NaN_IsTrue,
+ NaN_IsFalse
+ };
+
+ // If the primary condition returned by ConditionFromDoubleCondition doesn't
+ // handle NaNs properly, return NaN_IsFalse if the comparison should be
+ // overridden to return false on NaN, NaN_IsTrue if it should be overridden
+ // to return true on NaN, or NaN_HandledByCond if no secondary check is
+ // needed.
+ static inline NaNCond NaNCondFromDoubleCondition(DoubleCondition cond) {
+ switch (cond) {
+ case DoubleOrdered:
+ case DoubleNotEqual:
+ case DoubleGreaterThan:
+ case DoubleGreaterThanOrEqual:
+ case DoubleLessThan:
+ case DoubleLessThanOrEqual:
+ case DoubleUnordered:
+ case DoubleEqualOrUnordered:
+ case DoubleGreaterThanOrUnordered:
+ case DoubleGreaterThanOrEqualOrUnordered:
+ case DoubleLessThanOrUnordered:
+ case DoubleLessThanOrEqualOrUnordered:
+ return NaN_HandledByCond;
+ case DoubleEqual:
+ return NaN_IsFalse;
+ case DoubleNotEqualOrUnordered:
+ return NaN_IsTrue;
+ }
+
+ MOZ_CRASH("Unknown double condition");
+ }
+
+ static void StaticAsserts() {
+ // DoubleConditionBits should not interfere with x86 condition codes.
+ JS_STATIC_ASSERT(!((Equal | NotEqual | Above | AboveOrEqual | Below |
+ BelowOrEqual | Parity | NoParity) & DoubleConditionBits));
+ }
+
+ static Condition InvertCondition(Condition cond);
+ static Condition UnsignedCondition(Condition cond);
+ static Condition ConditionWithoutEqual(Condition cond);
+
+ // Return the primary condition to test. Some primary conditions may not
+ // handle NaNs properly and may therefore require a secondary condition.
+ // Use NaNCondFromDoubleCondition to determine what else is needed.
+ static inline Condition ConditionFromDoubleCondition(DoubleCondition cond) {
+ return static_cast<Condition>(cond & ~DoubleConditionBits);
+ }
+
+ static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
+
+ // MacroAssemblers hold onto gcthings, so they are traced by the GC.
+ void trace(JSTracer* trc);
+
+ bool oom() const {
+ return AssemblerShared::oom() ||
+ masm.oom() ||
+ jumpRelocations_.oom() ||
+ dataRelocations_.oom() ||
+ preBarriers_.oom();
+ }
+
+ void setPrinter(Sprinter* sp) {
+ masm.setPrinter(sp);
+ }
+
+ static const Register getStackPointer() {
+ return StackPointer;
+ }
+
+ void executableCopy(void* buffer);
+ bool asmMergeWith(const AssemblerX86Shared& other) {
+ MOZ_ASSERT(other.jumps_.length() == 0);
+ if (!AssemblerShared::asmMergeWith(masm.size(), other))
+ return false;
+ return masm.appendBuffer(other.masm);
+ }
+ void processCodeLabels(uint8_t* rawCode);
+ void copyJumpRelocationTable(uint8_t* dest);
+ void copyDataRelocationTable(uint8_t* dest);
+ void copyPreBarrierTable(uint8_t* dest);
+
+ // Size of the instruction stream, in bytes.
+ size_t size() const {
+ return masm.size();
+ }
+ // Size of the jump relocation table, in bytes.
+ size_t jumpRelocationTableBytes() const {
+ return jumpRelocations_.length();
+ }
+ size_t dataRelocationTableBytes() const {
+ return dataRelocations_.length();
+ }
+ size_t preBarrierTableBytes() const {
+ return preBarriers_.length();
+ }
+ // Size of the data table, in bytes.
+ size_t bytesNeeded() const {
+ return size() +
+ jumpRelocationTableBytes() +
+ dataRelocationTableBytes() +
+ preBarrierTableBytes();
+ }
+
+ public:
+ void haltingAlign(int alignment) {
+ masm.haltingAlign(alignment);
+ }
+ void nopAlign(int alignment) {
+ masm.nopAlign(alignment);
+ }
+ void writeCodePointer(CodeOffset* label) {
+ // A CodeOffset only has one use, bake in the "end of list" value.
+ masm.jumpTablePointer(LabelBase::INVALID_OFFSET);
+ label->bind(masm.size());
+ }
+ void cmovz(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.cmovz_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmovz_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmovz_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movl(Imm32 imm32, Register dest) {
+ masm.movl_i32r(imm32.value, dest.encoding());
+ }
+ void movl(Register src, Register dest) {
+ masm.movl_rr(src.encoding(), dest.encoding());
+ }
+ void movl(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.movl_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movl_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movl(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.movl_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movl_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movl_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movl_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movl(Imm32 imm32, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.movl_i32r(imm32.value, dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movl_i32m(imm32.value, dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movl_i32m(imm32.value, dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movl_i32m(imm32.value, dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void xchgl(Register src, Register dest) {
+ masm.xchgl_rr(src.encoding(), dest.encoding());
+ }
+
+ // Eventually vmovapd should be overloaded to support loads and
+ // stores too.
+ void vmovapd(FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovapd_rr(src.encoding(), dest.encoding());
+ }
+
+ void vmovaps(FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovaps_rr(src.encoding(), dest.encoding());
+ }
+ void vmovaps(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovaps_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovaps_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ case Operand::FPREG:
+ masm.vmovaps_rr(src.fpu(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovaps(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovaps_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovaps_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovups(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovups_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovups_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovups(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovups_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovups_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ // vmovsd is only provided in load/store form since the
+ // register-to-register form has different semantics (it doesn't clobber
+ // the whole output register) and isn't needed currently.
+ void vmovsd(const Address& src, FloatRegister dest) {
+ masm.vmovsd_mr(src.offset, src.base.encoding(), dest.encoding());
+ }
+ void vmovsd(const BaseIndex& src, FloatRegister dest) {
+ masm.vmovsd_mr(src.offset, src.base.encoding(), src.index.encoding(), src.scale, dest.encoding());
+ }
+ void vmovsd(FloatRegister src, const Address& dest) {
+ masm.vmovsd_rm(src.encoding(), dest.offset, dest.base.encoding());
+ }
+ void vmovsd(FloatRegister src, const BaseIndex& dest) {
+ masm.vmovsd_rm(src.encoding(), dest.offset, dest.base.encoding(), dest.index.encoding(), dest.scale);
+ }
+ // Although vmovss is not only provided in load/store form (for the same
+ // reasons as vmovsd above), the register to register form should be only
+ // used in contexts where we care about not clearing the higher lanes of
+ // the FloatRegister.
+ void vmovss(const Address& src, FloatRegister dest) {
+ masm.vmovss_mr(src.offset, src.base.encoding(), dest.encoding());
+ }
+ void vmovss(const BaseIndex& src, FloatRegister dest) {
+ masm.vmovss_mr(src.offset, src.base.encoding(), src.index.encoding(), src.scale, dest.encoding());
+ }
+ void vmovss(FloatRegister src, const Address& dest) {
+ masm.vmovss_rm(src.encoding(), dest.offset, dest.base.encoding());
+ }
+ void vmovss(FloatRegister src, const BaseIndex& dest) {
+ masm.vmovss_rm(src.encoding(), dest.offset, dest.base.encoding(), dest.index.encoding(), dest.scale);
+ }
+ void vmovss(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ masm.vmovss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vmovdqu(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovdqu_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovdqu_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovdqu(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovdqu_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovdqu_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovdqa(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vmovdqa_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vmovdqa_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovdqa_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovdqa(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovdqa_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovdqa_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovdqa(FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovdqa_rr(src.encoding(), dest.encoding());
+ }
+ void vcvtss2sd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vcvtss2sd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vcvtsd2ss(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vcvtsd2ss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void movzbl(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movzbl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movzbl_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movsbl(Register src, Register dest) {
+ masm.movsbl_rr(src.encoding(), dest.encoding());
+ }
+ void movsbl(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movsbl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movsbl_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movb(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movb_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movb_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movb(Imm32 src, Register dest) {
+ masm.movb_ir(src.value & 255, dest.encoding());
+ }
+ void movb(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movb_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movb_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movb(Imm32 src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movb_im(src.value, dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movb_im(src.value, dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movzwl(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.movzwl_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movzwl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movzwl_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movzwl(Register src, Register dest) {
+ masm.movzwl_rr(src.encoding(), dest.encoding());
+ }
+ void movw(const Operand& src, Register dest) {
+ masm.prefix_16_for_32();
+ movl(src, dest);
+ }
+ void movw(Imm32 src, Register dest) {
+ masm.prefix_16_for_32();
+ movl(src, dest);
+ }
+ void movw(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movw_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movw_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movw(Imm32 src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movw_im(src.value, dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movw_im(src.value, dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movswl(Register src, Register dest) {
+ masm.movswl_rr(src.encoding(), dest.encoding());
+ }
+ void movswl(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movswl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movswl_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void leal(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.leal_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.leal_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ protected:
+ void jSrc(Condition cond, Label* label) {
+ if (label->bound()) {
+ // The jump can be immediately encoded to the correct destination.
+ masm.jCC_i(static_cast<X86Encoding::Condition>(cond), JmpDst(label->offset()));
+ } else {
+ // Thread the jump list through the unpatched jump targets.
+ JmpSrc j = masm.jCC(static_cast<X86Encoding::Condition>(cond));
+ JmpSrc prev = JmpSrc(label->use(j.offset()));
+ masm.setNextJump(j, prev);
+ }
+ }
+ void jmpSrc(Label* label) {
+ if (label->bound()) {
+ // The jump can be immediately encoded to the correct destination.
+ masm.jmp_i(JmpDst(label->offset()));
+ } else {
+ // Thread the jump list through the unpatched jump targets.
+ JmpSrc j = masm.jmp();
+ JmpSrc prev = JmpSrc(label->use(j.offset()));
+ masm.setNextJump(j, prev);
+ }
+ }
+
+ // Comparison of EAX against the address given by a Label.
+ JmpSrc cmpSrc(Label* label) {
+ JmpSrc j = masm.cmp_eax();
+ if (label->bound()) {
+ // The jump can be immediately patched to the correct destination.
+ masm.linkJump(j, JmpDst(label->offset()));
+ } else {
+ // Thread the jump list through the unpatched jump targets.
+ JmpSrc prev = JmpSrc(label->use(j.offset()));
+ masm.setNextJump(j, prev);
+ }
+ return j;
+ }
+
+ JmpSrc jSrc(Condition cond, RepatchLabel* label) {
+ JmpSrc j = masm.jCC(static_cast<X86Encoding::Condition>(cond));
+ if (label->bound()) {
+ // The jump can be immediately patched to the correct destination.
+ masm.linkJump(j, JmpDst(label->offset()));
+ } else {
+ label->use(j.offset());
+ }
+ return j;
+ }
+ JmpSrc jmpSrc(RepatchLabel* label) {
+ JmpSrc j = masm.jmp();
+ if (label->bound()) {
+ // The jump can be immediately patched to the correct destination.
+ masm.linkJump(j, JmpDst(label->offset()));
+ } else {
+ // Thread the jump list through the unpatched jump targets.
+ label->use(j.offset());
+ }
+ return j;
+ }
+
+ public:
+ void nop() { masm.nop(); }
+ void nop(size_t n) { masm.insert_nop(n); }
+ void j(Condition cond, Label* label) { jSrc(cond, label); }
+ void jmp(Label* label) { jmpSrc(label); }
+ void j(Condition cond, RepatchLabel* label) { jSrc(cond, label); }
+ void jmp(RepatchLabel* label) { jmpSrc(label); }
+
+ void j(Condition cond, wasm::TrapDesc target) {
+ Label l;
+ j(cond, &l);
+ bindLater(&l, target);
+ }
+ void jmp(wasm::TrapDesc target) {
+ Label l;
+ jmp(&l);
+ bindLater(&l, target);
+ }
+
+ void jmp(const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.jmp_m(op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.jmp_m(op.disp(), op.base(), op.index(), op.scale());
+ break;
+ case Operand::REG:
+ masm.jmp_r(op.reg());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmpEAX(Label* label) { cmpSrc(label); }
+ void bind(Label* label) {
+ JmpDst dst(masm.label());
+ if (label->used()) {
+ bool more;
+ JmpSrc jmp(label->offset());
+ do {
+ JmpSrc next;
+ more = masm.nextJump(jmp, &next);
+ masm.linkJump(jmp, dst);
+ jmp = next;
+ } while (more);
+ }
+ label->bind(dst.offset());
+ }
+ void bindLater(Label* label, wasm::TrapDesc target) {
+ if (label->used()) {
+ JmpSrc jmp(label->offset());
+ do {
+ append(wasm::TrapSite(target, jmp.offset()));
+ } while (masm.nextJump(jmp, &jmp));
+ }
+ label->reset();
+ }
+ void bind(RepatchLabel* label) {
+ JmpDst dst(masm.label());
+ if (label->used()) {
+ JmpSrc jmp(label->offset());
+ masm.linkJump(jmp, dst);
+ }
+ label->bind(dst.offset());
+ }
+ void use(CodeOffset* label) {
+ label->bind(currentOffset());
+ }
+ uint32_t currentOffset() {
+ return masm.label().offset();
+ }
+
+ // Re-routes pending jumps to a new label.
+ void retarget(Label* label, Label* target) {
+ if (!label->used())
+ return;
+ bool more;
+ JmpSrc jmp(label->offset());
+ do {
+ JmpSrc next;
+ more = masm.nextJump(jmp, &next);
+ if (target->bound()) {
+ // The jump can be immediately patched to the correct destination.
+ masm.linkJump(jmp, JmpDst(target->offset()));
+ } else {
+ // Thread the jump list through the unpatched jump targets.
+ JmpSrc prev(target->use(jmp.offset()));
+ masm.setNextJump(jmp, prev);
+ }
+ jmp = JmpSrc(next.offset());
+ } while (more);
+ label->reset();
+ }
+
+ static void Bind(uint8_t* raw, CodeOffset* label, const void* address) {
+ if (label->bound()) {
+ intptr_t offset = label->offset();
+ X86Encoding::SetPointer(raw + offset, address);
+ }
+ }
+
+ // See Bind and X86Encoding::setPointer.
+ size_t labelToPatchOffset(CodeOffset label) {
+ return label.offset() - sizeof(void*);
+ }
+
+ void ret() {
+ masm.ret();
+ }
+ void retn(Imm32 n) {
+ // Remove the size of the return address which is included in the frame.
+ masm.ret_i(n.value - sizeof(void*));
+ }
+ CodeOffset call(Label* label) {
+ if (label->bound()) {
+ masm.linkJump(masm.call(), JmpDst(label->offset()));
+ } else {
+ JmpSrc j = masm.call();
+ JmpSrc prev = JmpSrc(label->use(j.offset()));
+ masm.setNextJump(j, prev);
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset call(Register reg) {
+ masm.call_r(reg.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ void call(const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.call_r(op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.call_m(op.disp(), op.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ CodeOffset callWithPatch() {
+ return CodeOffset(masm.call().offset());
+ }
+
+ struct AutoPrepareForPatching : X86Encoding::AutoUnprotectAssemblerBufferRegion {
+ explicit AutoPrepareForPatching(AssemblerX86Shared& masm)
+ : X86Encoding::AutoUnprotectAssemblerBufferRegion(masm.masm, 0, masm.size())
+ {}
+ };
+
+ void patchCall(uint32_t callerOffset, uint32_t calleeOffset) {
+ // The caller uses AutoUnprotectBuffer.
+ unsigned char* code = masm.data();
+ X86Encoding::SetRel32(code + callerOffset, code + calleeOffset);
+ }
+ CodeOffset farJumpWithPatch() {
+ return CodeOffset(masm.jmp().offset());
+ }
+ void patchFarJump(CodeOffset farJump, uint32_t targetOffset) {
+ // The caller uses AutoUnprotectBuffer.
+ unsigned char* code = masm.data();
+ X86Encoding::SetRel32(code + farJump.offset(), code + targetOffset);
+ }
+ static void repatchFarJump(uint8_t* code, uint32_t farJumpOffset, uint32_t targetOffset) {
+ X86Encoding::SetRel32(code + farJumpOffset, code + targetOffset);
+ }
+
+ CodeOffset twoByteNop() {
+ return CodeOffset(masm.twoByteNop().offset());
+ }
+ static void patchTwoByteNopToJump(uint8_t* jump, uint8_t* target) {
+ X86Encoding::BaseAssembler::patchTwoByteNopToJump(jump, target);
+ }
+ static void patchJumpToTwoByteNop(uint8_t* jump) {
+ X86Encoding::BaseAssembler::patchJumpToTwoByteNop(jump);
+ }
+
+ void breakpoint() {
+ masm.int3();
+ }
+
+ static bool HasSSE2() { return CPUInfo::IsSSE2Present(); }
+ static bool HasSSE3() { return CPUInfo::IsSSE3Present(); }
+ static bool HasSSSE3() { return CPUInfo::IsSSSE3Present(); }
+ static bool HasSSE41() { return CPUInfo::IsSSE41Present(); }
+ static bool HasPOPCNT() { return CPUInfo::IsPOPCNTPresent(); }
+ static bool SupportsFloatingPoint() { return CPUInfo::IsSSE2Present(); }
+ static bool SupportsUnalignedAccesses() { return true; }
+ static bool SupportsSimd() { return CPUInfo::IsSSE2Present(); }
+ static bool HasAVX() { return CPUInfo::IsAVXPresent(); }
+
+ void cmpl(Register rhs, Register lhs) {
+ masm.cmpl_rr(rhs.encoding(), lhs.encoding());
+ }
+ void cmpl(const Operand& rhs, Register lhs) {
+ switch (rhs.kind()) {
+ case Operand::REG:
+ masm.cmpl_rr(rhs.reg(), lhs.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpl_mr(rhs.disp(), rhs.base(), lhs.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.cmpl_mr(rhs.address(), lhs.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmpl(Register rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.cmpl_rr(rhs.encoding(), lhs.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpl_rm(rhs.encoding(), lhs.disp(), lhs.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.cmpl_rm(rhs.encoding(), lhs.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmpl(Imm32 rhs, Register lhs) {
+ masm.cmpl_ir(rhs.value, lhs.encoding());
+ }
+ void cmpl(Imm32 rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.cmpl_ir(rhs.value, lhs.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpl_im(rhs.value, lhs.disp(), lhs.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmpl_im(rhs.value, lhs.disp(), lhs.base(), lhs.index(), lhs.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.cmpl_im(rhs.value, lhs.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ CodeOffset cmplWithPatch(Imm32 rhs, Register lhs) {
+ masm.cmpl_i32r(rhs.value, lhs.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ void cmpw(Register rhs, Register lhs) {
+ masm.cmpw_rr(rhs.encoding(), lhs.encoding());
+ }
+ void setCC(Condition cond, Register r) {
+ masm.setCC_r(static_cast<X86Encoding::Condition>(cond), r.encoding());
+ }
+ void testb(Register rhs, Register lhs) {
+ MOZ_ASSERT(AllocatableGeneralRegisterSet(Registers::SingleByteRegs).has(rhs));
+ MOZ_ASSERT(AllocatableGeneralRegisterSet(Registers::SingleByteRegs).has(lhs));
+ masm.testb_rr(rhs.encoding(), lhs.encoding());
+ }
+ void testw(Register rhs, Register lhs) {
+ masm.testw_rr(lhs.encoding(), rhs.encoding());
+ }
+ void testl(Register rhs, Register lhs) {
+ masm.testl_rr(lhs.encoding(), rhs.encoding());
+ }
+ void testl(Imm32 rhs, Register lhs) {
+ masm.testl_ir(rhs.value, lhs.encoding());
+ }
+ void testl(Imm32 rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.testl_ir(rhs.value, lhs.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.testl_i32m(rhs.value, lhs.disp(), lhs.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.testl_i32m(rhs.value, lhs.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+
+ void addl(Imm32 imm, Register dest) {
+ masm.addl_ir(imm.value, dest.encoding());
+ }
+ CodeOffset addlWithPatch(Imm32 imm, Register dest) {
+ masm.addl_i32r(imm.value, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ void addl(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.addl_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.addl_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.addl_im(imm.value, op.address());
+ break;
+ case Operand::MEM_SCALE:
+ masm.addl_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void addw(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.addw_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.addw_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.addw_im(imm.value, op.address());
+ break;
+ case Operand::MEM_SCALE:
+ masm.addw_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void subl(Imm32 imm, Register dest) {
+ masm.subl_ir(imm.value, dest.encoding());
+ }
+ void subl(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.subl_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.subl_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.subl_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void subw(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.subw_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.subw_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.subw_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void addl(Register src, Register dest) {
+ masm.addl_rr(src.encoding(), dest.encoding());
+ }
+ void addl(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.addl_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.addl_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.addl_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void addw(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.addw_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.addw_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.addw_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void subl(Register src, Register dest) {
+ masm.subl_rr(src.encoding(), dest.encoding());
+ }
+ void subl(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.subl_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.subl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void subl(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.subl_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.subl_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.subl_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void subw(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.subw_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.subw_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.subw_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void orl(Register reg, Register dest) {
+ masm.orl_rr(reg.encoding(), dest.encoding());
+ }
+ void orl(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.orl_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.orl_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.orl_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void orw(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.orw_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.orw_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.orw_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void orl(Imm32 imm, Register reg) {
+ masm.orl_ir(imm.value, reg.encoding());
+ }
+ void orl(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.orl_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.orl_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.orl_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void orw(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.orw_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.orw_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.orw_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void xorl(Register src, Register dest) {
+ masm.xorl_rr(src.encoding(), dest.encoding());
+ }
+ void xorl(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.xorl_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.xorl_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xorl_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void xorw(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.xorw_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.xorw_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xorw_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void xorl(Imm32 imm, Register reg) {
+ masm.xorl_ir(imm.value, reg.encoding());
+ }
+ void xorl(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.xorl_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.xorl_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xorl_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void xorw(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.xorw_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.xorw_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xorw_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void andl(Register src, Register dest) {
+ masm.andl_rr(src.encoding(), dest.encoding());
+ }
+ void andl(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.andl_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.andl_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.andl_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void andw(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.andw_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.andw_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.andw_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void andl(Imm32 imm, Register dest) {
+ masm.andl_ir(imm.value, dest.encoding());
+ }
+ void andl(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.andl_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.andl_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.andl_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void andw(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.andw_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.andw_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.andw_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void addl(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.addl_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.addl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void orl(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.orl_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.orl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void xorl(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.xorl_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.xorl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void andl(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.andl_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.andl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void bsrl(const Register& src, const Register& dest) {
+ masm.bsrl_rr(src.encoding(), dest.encoding());
+ }
+ void bsfl(const Register& src, const Register& dest) {
+ masm.bsfl_rr(src.encoding(), dest.encoding());
+ }
+ void popcntl(const Register& src, const Register& dest) {
+ masm.popcntl_rr(src.encoding(), dest.encoding());
+ }
+ void imull(Register multiplier) {
+ masm.imull_r(multiplier.encoding());
+ }
+ void umull(Register multiplier) {
+ masm.mull_r(multiplier.encoding());
+ }
+ void imull(Imm32 imm, Register dest) {
+ masm.imull_ir(imm.value, dest.encoding(), dest.encoding());
+ }
+ void imull(Register src, Register dest) {
+ masm.imull_rr(src.encoding(), dest.encoding());
+ }
+ void imull(Imm32 imm, Register src, Register dest) {
+ masm.imull_ir(imm.value, src.encoding(), dest.encoding());
+ }
+ void imull(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.imull_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.imull_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void negl(const Operand& src) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.negl_r(src.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.negl_m(src.disp(), src.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void negl(Register reg) {
+ masm.negl_r(reg.encoding());
+ }
+ void notl(const Operand& src) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.notl_r(src.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.notl_m(src.disp(), src.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void notl(Register reg) {
+ masm.notl_r(reg.encoding());
+ }
+ void shrl(const Imm32 imm, Register dest) {
+ masm.shrl_ir(imm.value, dest.encoding());
+ }
+ void shll(const Imm32 imm, Register dest) {
+ masm.shll_ir(imm.value, dest.encoding());
+ }
+ void sarl(const Imm32 imm, Register dest) {
+ masm.sarl_ir(imm.value, dest.encoding());
+ }
+ void shrl_cl(Register dest) {
+ masm.shrl_CLr(dest.encoding());
+ }
+ void shll_cl(Register dest) {
+ masm.shll_CLr(dest.encoding());
+ }
+ void sarl_cl(Register dest) {
+ masm.sarl_CLr(dest.encoding());
+ }
+ void shrdl_cl(Register src, Register dest) {
+ masm.shrdl_CLr(src.encoding(), dest.encoding());
+ }
+ void shldl_cl(Register src, Register dest) {
+ masm.shldl_CLr(src.encoding(), dest.encoding());
+ }
+
+ void roll(const Imm32 imm, Register dest) {
+ masm.roll_ir(imm.value, dest.encoding());
+ }
+ void roll_cl(Register dest) {
+ masm.roll_CLr(dest.encoding());
+ }
+ void rorl(const Imm32 imm, Register dest) {
+ masm.rorl_ir(imm.value, dest.encoding());
+ }
+ void rorl_cl(Register dest) {
+ masm.rorl_CLr(dest.encoding());
+ }
+
+ void incl(const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.incl_m32(op.disp(), op.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void lock_incl(const Operand& op) {
+ masm.prefix_lock();
+ incl(op);
+ }
+
+ void decl(const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.decl_m32(op.disp(), op.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void lock_decl(const Operand& op) {
+ masm.prefix_lock();
+ decl(op);
+ }
+
+ void addb(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.addb_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.addb_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+ void addb(Register src, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.addb_rm(src.encoding(), op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.addb_rm(src.encoding(), op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+
+ void subb(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.subb_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.subb_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+ void subb(Register src, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.subb_rm(src.encoding(), op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.subb_rm(src.encoding(), op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+
+ void andb(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.andb_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.andb_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+ void andb(Register src, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.andb_rm(src.encoding(), op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.andb_rm(src.encoding(), op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+
+ void orb(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.orb_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.orb_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+ void orb(Register src, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.orb_rm(src.encoding(), op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.orb_rm(src.encoding(), op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+
+ void xorb(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.xorb_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xorb_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+ void xorb(Register src, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.xorb_rm(src.encoding(), op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xorb_rm(src.encoding(), op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+
+ template<typename T>
+ void lock_addb(T src, const Operand& op) {
+ masm.prefix_lock();
+ addb(src, op);
+ }
+ template<typename T>
+ void lock_subb(T src, const Operand& op) {
+ masm.prefix_lock();
+ subb(src, op);
+ }
+ template<typename T>
+ void lock_andb(T src, const Operand& op) {
+ masm.prefix_lock();
+ andb(src, op);
+ }
+ template<typename T>
+ void lock_orb(T src, const Operand& op) {
+ masm.prefix_lock();
+ orb(src, op);
+ }
+ template<typename T>
+ void lock_xorb(T src, const Operand& op) {
+ masm.prefix_lock();
+ xorb(src, op);
+ }
+
+ template<typename T>
+ void lock_addw(T src, const Operand& op) {
+ masm.prefix_lock();
+ addw(src, op);
+ }
+ template<typename T>
+ void lock_subw(T src, const Operand& op) {
+ masm.prefix_lock();
+ subw(src, op);
+ }
+ template<typename T>
+ void lock_andw(T src, const Operand& op) {
+ masm.prefix_lock();
+ andw(src, op);
+ }
+ template<typename T>
+ void lock_orw(T src, const Operand& op) {
+ masm.prefix_lock();
+ orw(src, op);
+ }
+ template<typename T>
+ void lock_xorw(T src, const Operand& op) {
+ masm.prefix_lock();
+ xorw(src, op);
+ }
+
+ // Note, lock_addl(imm, op) is used for a memory barrier on non-SSE2 systems,
+ // among other things. Do not optimize, replace by XADDL, or similar.
+ template<typename T>
+ void lock_addl(T src, const Operand& op) {
+ masm.prefix_lock();
+ addl(src, op);
+ }
+ template<typename T>
+ void lock_subl(T src, const Operand& op) {
+ masm.prefix_lock();
+ subl(src, op);
+ }
+ template<typename T>
+ void lock_andl(T src, const Operand& op) {
+ masm.prefix_lock();
+ andl(src, op);
+ }
+ template<typename T>
+ void lock_orl(T src, const Operand& op) {
+ masm.prefix_lock();
+ orl(src, op);
+ }
+ template<typename T>
+ void lock_xorl(T src, const Operand& op) {
+ masm.prefix_lock();
+ xorl(src, op);
+ }
+
+ void lock_cmpxchgb(Register src, const Operand& mem) {
+ masm.prefix_lock();
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.cmpxchgb(src.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmpxchgb(src.encoding(), mem.disp(), mem.base(), mem.index(), mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void lock_cmpxchgw(Register src, const Operand& mem) {
+ masm.prefix_lock();
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.cmpxchgw(src.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmpxchgw(src.encoding(), mem.disp(), mem.base(), mem.index(), mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void lock_cmpxchgl(Register src, const Operand& mem) {
+ masm.prefix_lock();
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.cmpxchgl(src.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmpxchgl(src.encoding(), mem.disp(), mem.base(), mem.index(), mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void xchgb(Register src, const Operand& mem) {
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.xchgb_rm(src.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xchgb_rm(src.encoding(), mem.disp(), mem.base(), mem.index(), mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void xchgw(Register src, const Operand& mem) {
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.xchgw_rm(src.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xchgw_rm(src.encoding(), mem.disp(), mem.base(), mem.index(), mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void xchgl(Register src, const Operand& mem) {
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.xchgl_rm(src.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xchgl_rm(src.encoding(), mem.disp(), mem.base(), mem.index(), mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void lock_xaddb(Register srcdest, const Operand& mem) {
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.lock_xaddb_rm(srcdest.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.lock_xaddb_rm(srcdest.encoding(), mem.disp(), mem.base(), mem.index(), mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void lock_xaddw(Register srcdest, const Operand& mem) {
+ masm.prefix_16_for_32();
+ lock_xaddl(srcdest, mem);
+ }
+ void lock_xaddl(Register srcdest, const Operand& mem) {
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.lock_xaddl_rm(srcdest.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.lock_xaddl_rm(srcdest.encoding(), mem.disp(), mem.base(), mem.index(), mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void push(const Imm32 imm) {
+ masm.push_i(imm.value);
+ }
+
+ void push(const Operand& src) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.push_r(src.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.push_m(src.disp(), src.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void push(Register src) {
+ masm.push_r(src.encoding());
+ }
+ void push(const Address& src) {
+ masm.push_m(src.offset, src.base.encoding());
+ }
+
+ void pop(const Operand& src) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.pop_r(src.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.pop_m(src.disp(), src.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void pop(Register src) {
+ masm.pop_r(src.encoding());
+ }
+ void pop(const Address& src) {
+ masm.pop_m(src.offset, src.base.encoding());
+ }
+
+ void pushFlags() {
+ masm.push_flags();
+ }
+ void popFlags() {
+ masm.pop_flags();
+ }
+
+#ifdef JS_CODEGEN_X86
+ void pushAllRegs() {
+ masm.pusha();
+ }
+ void popAllRegs() {
+ masm.popa();
+ }
+#endif
+
+ // Zero-extend byte to 32-bit integer.
+ void movzbl(Register src, Register dest) {
+ masm.movzbl_rr(src.encoding(), dest.encoding());
+ }
+
+ void cdq() {
+ masm.cdq();
+ }
+ void idiv(Register divisor) {
+ masm.idivl_r(divisor.encoding());
+ }
+ void udiv(Register divisor) {
+ masm.divl_r(divisor.encoding());
+ }
+
+ void vpinsrb(unsigned lane, Register src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vpinsrb_irr(lane, src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpinsrw(unsigned lane, Register src1, FloatRegister src0, FloatRegister dest) {
+ masm.vpinsrw_irr(lane, src1.encoding(), src0.encoding(), dest.encoding());
+ }
+
+ void vpinsrd(unsigned lane, Register src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vpinsrd_irr(lane, src1.encoding(), src0.encoding(), dest.encoding());
+ }
+
+ void vpextrb(unsigned lane, FloatRegister src, Register dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vpextrb_irr(lane, src.encoding(), dest.encoding());
+ }
+ void vpextrw(unsigned lane, FloatRegister src, Register dest) {
+ masm.vpextrw_irr(lane, src.encoding(), dest.encoding());
+ }
+ void vpextrd(unsigned lane, FloatRegister src, Register dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vpextrd_irr(lane, src.encoding(), dest.encoding());
+ }
+ void vpsrldq(Imm32 shift, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsrldq_ir(shift.value, src0.encoding(), dest.encoding());
+ }
+ void vpsllq(Imm32 shift, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsllq_ir(shift.value, src0.encoding(), dest.encoding());
+ }
+ void vpsrlq(Imm32 shift, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsrlq_ir(shift.value, src0.encoding(), dest.encoding());
+ }
+ void vpslld(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpslld_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpslld(Imm32 count, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpslld_ir(count.value, src0.encoding(), dest.encoding());
+ }
+ void vpsrad(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsrad_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpsrad(Imm32 count, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsrad_ir(count.value, src0.encoding(), dest.encoding());
+ }
+ void vpsrld(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsrld_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpsrld(Imm32 count, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsrld_ir(count.value, src0.encoding(), dest.encoding());
+ }
+
+ void vpsllw(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsllw_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpsllw(Imm32 count, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsllw_ir(count.value, src0.encoding(), dest.encoding());
+ }
+ void vpsraw(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsraw_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpsraw(Imm32 count, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsraw_ir(count.value, src0.encoding(), dest.encoding());
+ }
+ void vpsrlw(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsrlw_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpsrlw(Imm32 count, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsrlw_ir(count.value, src0.encoding(), dest.encoding());
+ }
+
+ void vcvtsi2sd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::REG:
+ masm.vcvtsi2sd_rr(src1.reg(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vcvtsi2sd_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vcvtsi2sd_mr(src1.disp(), src1.base(), src1.index(), src1.scale(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vcvttsd2si(FloatRegister src, Register dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vcvttsd2si_rr(src.encoding(), dest.encoding());
+ }
+ void vcvttss2si(FloatRegister src, Register dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vcvttss2si_rr(src.encoding(), dest.encoding());
+ }
+ void vcvtsi2ss(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::REG:
+ masm.vcvtsi2ss_rr(src1.reg(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vcvtsi2ss_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vcvtsi2ss_mr(src1.disp(), src1.base(), src1.index(), src1.scale(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vcvtsi2ss(Register src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vcvtsi2ss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vcvtsi2sd(Register src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vcvtsi2sd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vcvttps2dq(FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vcvttps2dq_rr(src.encoding(), dest.encoding());
+ }
+ void vcvtdq2ps(FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vcvtdq2ps_rr(src.encoding(), dest.encoding());
+ }
+ void vmovmskpd(FloatRegister src, Register dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovmskpd_rr(src.encoding(), dest.encoding());
+ }
+ void vmovmskps(FloatRegister src, Register dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovmskps_rr(src.encoding(), dest.encoding());
+ }
+ void vptest(FloatRegister rhs, FloatRegister lhs) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vptest_rr(rhs.encoding(), lhs.encoding());
+ }
+ void vucomisd(FloatRegister rhs, FloatRegister lhs) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vucomisd_rr(rhs.encoding(), lhs.encoding());
+ }
+ void vucomiss(FloatRegister rhs, FloatRegister lhs) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vucomiss_rr(rhs.encoding(), lhs.encoding());
+ }
+
+ void vpcmpeqb(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (rhs.kind()) {
+ case Operand::FPREG:
+ masm.vpcmpeqb_rr(rhs.fpu(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpcmpeqb_mr(rhs.disp(), rhs.base(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpcmpeqb_mr(rhs.address(), lhs.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpcmpgtb(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (rhs.kind()) {
+ case Operand::FPREG:
+ masm.vpcmpgtb_rr(rhs.fpu(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpcmpgtb_mr(rhs.disp(), rhs.base(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpcmpgtb_mr(rhs.address(), lhs.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void vpcmpeqw(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (rhs.kind()) {
+ case Operand::FPREG:
+ masm.vpcmpeqw_rr(rhs.fpu(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpcmpeqw_mr(rhs.disp(), rhs.base(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpcmpeqw_mr(rhs.address(), lhs.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpcmpgtw(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (rhs.kind()) {
+ case Operand::FPREG:
+ masm.vpcmpgtw_rr(rhs.fpu(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpcmpgtw_mr(rhs.disp(), rhs.base(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpcmpgtw_mr(rhs.address(), lhs.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void vpcmpeqd(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (rhs.kind()) {
+ case Operand::FPREG:
+ masm.vpcmpeqd_rr(rhs.fpu(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpcmpeqd_mr(rhs.disp(), rhs.base(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpcmpeqd_mr(rhs.address(), lhs.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpcmpgtd(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (rhs.kind()) {
+ case Operand::FPREG:
+ masm.vpcmpgtd_rr(rhs.fpu(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpcmpgtd_mr(rhs.disp(), rhs.base(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpcmpgtd_mr(rhs.address(), lhs.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void vcmpps(uint8_t order, Operand src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ // :TODO: (Bug 1132894) See LIRGeneratorX86Shared::lowerForFPU
+ // FIXME: This logic belongs in the MacroAssembler.
+ if (!HasAVX() && !src0.aliases(dest)) {
+ if (src1.kind() == Operand::FPREG &&
+ dest.aliases(FloatRegister::FromCode(src1.fpu())))
+ {
+ vmovdqa(src1, ScratchSimd128Reg);
+ src1 = Operand(ScratchSimd128Reg);
+ }
+ vmovdqa(src0, dest);
+ src0 = dest;
+ }
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vcmpps_rr(order, src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vcmpps_mr(order, src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vcmpps_mr(order, src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vcmpeqps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ vcmpps(X86Encoding::ConditionCmp_EQ, src1, src0, dest);
+ }
+ void vcmpltps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ vcmpps(X86Encoding::ConditionCmp_LT, src1, src0, dest);
+ }
+ void vcmpleps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ vcmpps(X86Encoding::ConditionCmp_LE, src1, src0, dest);
+ }
+ void vcmpunordps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ vcmpps(X86Encoding::ConditionCmp_UNORD, src1, src0, dest);
+ }
+ void vcmpneqps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ vcmpps(X86Encoding::ConditionCmp_NEQ, src1, src0, dest);
+ }
+ void vcmpordps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ vcmpps(X86Encoding::ConditionCmp_ORD, src1, src0, dest);
+ }
+ void vrcpps(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vrcpps_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vrcpps_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vrcpps_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vsqrtps(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vsqrtps_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vsqrtps_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vsqrtps_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vrsqrtps(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vrsqrtps_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vrsqrtps_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vrsqrtps_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovd(Register src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovd_rr(src.encoding(), dest.encoding());
+ }
+ void vmovd(FloatRegister src, Register dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovd_rr(src.encoding(), dest.encoding());
+ }
+ void vmovd(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovd_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovd_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovd(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovd_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovd_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovq_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovq(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovq_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovq(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovq_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpaddb(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpaddb_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpaddb_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpaddb_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpsubb(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpsubb_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpsubb_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpsubb_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpaddsb(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpaddsb_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpaddsb_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpaddsb_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpaddusb(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpaddusb_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpaddusb_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpaddusb_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpsubsb(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpsubsb_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpsubsb_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpsubsb_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpsubusb(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpsubusb_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpsubusb_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpsubusb_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpaddw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpaddw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpaddw_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpaddw_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpsubw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpsubw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpsubw_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpsubw_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpaddsw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpaddsw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpaddsw_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpaddsw_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpaddusw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpaddusw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpaddusw_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpaddusw_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpsubsw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpsubsw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpsubsw_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpsubsw_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpsubusw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpsubusw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpsubusw_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpsubusw_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpaddd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpaddd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpaddd_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpaddd_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpsubd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpsubd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpsubd_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpsubd_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmuludq(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpmuludq_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpmuludq(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpmuludq_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpmuludq_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmullw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpmullw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpmullw_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmulld(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpmulld_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpmulld_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpmulld_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vaddps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vaddps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vaddps_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vaddps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vsubps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vsubps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vsubps_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vsubps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmulps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vmulps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vmulps_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmulps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vdivps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vdivps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vdivps_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vdivps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmaxps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vmaxps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vmaxps_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmaxps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vminps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vminps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vminps_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vminps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vandps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vandps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vandps_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vandps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vandnps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ // Negates bits of dest and then applies AND
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vandnps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vandnps_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vandnps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vorps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vorps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vorps_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vorps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vxorps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vxorps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vxorps_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vxorps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpand(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpand_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpand(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpand_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpand_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpand_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpor(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpor_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpor(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpor_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpor_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpor_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpxor(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpxor_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpxor(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpxor_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpxor_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpxor_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpandn(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpandn_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpandn(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpandn_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpandn_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpandn_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void vpshufd(uint32_t mask, FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpshufd_irr(mask, src.encoding(), dest.encoding());
+ }
+ void vpshufd(uint32_t mask, const Operand& src1, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpshufd_irr(mask, src1.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpshufd_imr(mask, src1.disp(), src1.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpshufd_imr(mask, src1.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void vpshuflw(uint32_t mask, FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpshuflw_irr(mask, src.encoding(), dest.encoding());
+ }
+ void vpshufhw(uint32_t mask, FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpshufhw_irr(mask, src.encoding(), dest.encoding());
+ }
+ void vpshufb(FloatRegister mask, FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSSE3());
+ masm.vpshufb_rr(mask.encoding(), src.encoding(), dest.encoding());
+ }
+ void vmovddup(FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE3());
+ masm.vmovddup_rr(src.encoding(), dest.encoding());
+ }
+ void vmovhlps(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovhlps_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vmovlhps(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovlhps_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vunpcklps(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vunpcklps_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vunpcklps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vunpcklps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vunpcklps_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vunpcklps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vunpckhps(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vunpckhps_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vunpckhps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vunpckhps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vunpckhps_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vunpckhps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vshufps(uint32_t mask, FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vshufps_irr(mask, src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vshufps(uint32_t mask, const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vshufps_irr(mask, src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vshufps_imr(mask, src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vshufps_imr(mask, src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vaddsd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vaddsd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vaddss(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vaddss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vaddsd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vaddsd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vaddsd_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vaddsd_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vaddss(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vaddss_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vaddss_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vaddss_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vsubsd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vsubsd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vsubss(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vsubss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vsubsd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vsubsd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vsubsd_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vsubss(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vsubss_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vsubss_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmulsd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmulsd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vmulsd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vmulsd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vmulsd_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmulss(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vmulss_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vmulss_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmulss(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmulss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vdivsd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vdivsd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vdivss(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vdivss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vdivsd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vdivsd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vdivsd_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vdivss(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vdivss_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vdivss_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vxorpd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vxorpd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vxorps(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vxorps_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vorpd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vorpd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vorps(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vorps_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vandpd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vandpd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vandps(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vandps_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vsqrtsd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vsqrtsd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vsqrtss(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vsqrtss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vroundsd(X86Encoding::RoundingMode mode, FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vroundsd_irr(mode, src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vroundss(X86Encoding::RoundingMode mode, FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vroundss_irr(mode, src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ unsigned vinsertpsMask(unsigned sourceLane, unsigned destLane, unsigned zeroMask = 0)
+ {
+ // Note that the sourceLane bits are ignored in the case of a source
+ // memory operand, and the source is the given 32-bits memory location.
+ MOZ_ASSERT(zeroMask < 16);
+ unsigned ret = zeroMask ;
+ ret |= destLane << 4;
+ ret |= sourceLane << 6;
+ MOZ_ASSERT(ret < 256);
+ return ret;
+ }
+ void vinsertps(uint32_t mask, FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vinsertps_irr(mask, src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vinsertps(uint32_t mask, const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vinsertps_irr(mask, src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vinsertps_imr(mask, src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ unsigned blendpsMask(bool x, bool y, bool z, bool w) {
+ return (x << 0) | (y << 1) | (z << 2) | (w << 3);
+ }
+ void vblendps(unsigned mask, FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vblendps_irr(mask, src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vblendps(unsigned mask, const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vblendps_irr(mask, src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vblendps_imr(mask, src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vblendvps(FloatRegister mask, FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vblendvps_rr(mask.encoding(), src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vblendvps(FloatRegister mask, const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vblendvps_rr(mask.encoding(), src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vblendvps_mr(mask.encoding(), src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovsldup(FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE3());
+ masm.vmovsldup_rr(src.encoding(), dest.encoding());
+ }
+ void vmovsldup(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE3());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vmovsldup_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vmovsldup_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovshdup(FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE3());
+ masm.vmovshdup_rr(src.encoding(), dest.encoding());
+ }
+ void vmovshdup(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE3());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vmovshdup_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vmovshdup_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vminsd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vminsd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vminsd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vminsd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vminsd_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vminss(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vminss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vmaxsd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmaxsd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vmaxsd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vmaxsd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vmaxsd_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmaxss(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmaxss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void fisttp(const Operand& dest) {
+ MOZ_ASSERT(HasSSE3());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fisttp_m(dest.disp(), dest.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void fistp(const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fistp_m(dest.disp(), dest.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void fnstcw(const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fnstcw_m(dest.disp(), dest.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void fldcw(const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fldcw_m(dest.disp(), dest.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void fnstsw(const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fnstsw_m(dest.disp(), dest.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void fld(const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fld_m(dest.disp(), dest.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void fld32(const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fld32_m(dest.disp(), dest.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void fstp(const Operand& src) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fstp_m(src.disp(), src.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void fstp32(const Operand& src) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fstp32_m(src.disp(), src.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ // Defined for compatibility with ARM's assembler
+ uint32_t actualIndex(uint32_t x) {
+ return x;
+ }
+
+ void flushBuffer() {
+ }
+
+ // Patching.
+
+ static size_t PatchWrite_NearCallSize() {
+ return 5;
+ }
+ static uintptr_t GetPointer(uint8_t* instPtr) {
+ uintptr_t* ptr = ((uintptr_t*) instPtr) - 1;
+ return *ptr;
+ }
+ // Write a relative call at the start location |dataLabel|.
+ // Note that this DOES NOT patch data that comes before |label|.
+ static void PatchWrite_NearCall(CodeLocationLabel startLabel, CodeLocationLabel target) {
+ uint8_t* start = startLabel.raw();
+ *start = 0xE8;
+ ptrdiff_t offset = target - startLabel - PatchWrite_NearCallSize();
+ MOZ_ASSERT(int32_t(offset) == offset);
+ *((int32_t*) (start + 1)) = offset;
+ }
+
+ static void PatchWrite_Imm32(CodeLocationLabel dataLabel, Imm32 toWrite) {
+ *((int32_t*) dataLabel.raw() - 1) = toWrite.value;
+ }
+
+ static void PatchDataWithValueCheck(CodeLocationLabel data, PatchedImmPtr newData,
+ PatchedImmPtr expectedData) {
+ // The pointer given is a pointer to *after* the data.
+ uintptr_t* ptr = ((uintptr_t*) data.raw()) - 1;
+ MOZ_ASSERT(*ptr == (uintptr_t)expectedData.value);
+ *ptr = (uintptr_t)newData.value;
+ }
+ static void PatchDataWithValueCheck(CodeLocationLabel data, ImmPtr newData, ImmPtr expectedData) {
+ PatchDataWithValueCheck(data, PatchedImmPtr(newData.value), PatchedImmPtr(expectedData.value));
+ }
+
+ static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm) {
+ MOZ_CRASH("Unused.");
+ }
+
+ static uint32_t NopSize() {
+ return 1;
+ }
+ static uint8_t* NextInstruction(uint8_t* cur, uint32_t* count) {
+ MOZ_CRASH("nextInstruction NYI on x86");
+ }
+
+ // Toggle a jmp or cmp emitted by toggledJump().
+ static void ToggleToJmp(CodeLocationLabel inst) {
+ uint8_t* ptr = (uint8_t*)inst.raw();
+ MOZ_ASSERT(*ptr == 0x3D);
+ *ptr = 0xE9;
+ }
+ static void ToggleToCmp(CodeLocationLabel inst) {
+ uint8_t* ptr = (uint8_t*)inst.raw();
+ MOZ_ASSERT(*ptr == 0xE9);
+ *ptr = 0x3D;
+ }
+ static void ToggleCall(CodeLocationLabel inst, bool enabled) {
+ uint8_t* ptr = (uint8_t*)inst.raw();
+ MOZ_ASSERT(*ptr == 0x3D || // CMP
+ *ptr == 0xE8); // CALL
+ *ptr = enabled ? 0xE8 : 0x3D;
+ }
+
+ MOZ_COLD void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
+ const Disassembler::HeapAccess& heapAccess);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_Assembler_x86_shared_h */
diff --git a/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.cpp b/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.cpp
new file mode 100644
index 000000000..6dec02a31
--- /dev/null
+++ b/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.cpp
@@ -0,0 +1,25 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86-shared/AssemblerBuffer-x86-shared.h"
+
+#include "mozilla/Sprintf.h"
+
+#include "jsopcode.h"
+
+void js::jit::GenericAssembler::spew(const char* fmt, va_list va)
+{
+ // Buffer to hold the formatted string. Note that this may contain
+ // '%' characters, so do not pass it directly to printf functions.
+ char buf[200];
+
+ int i = VsprintfLiteral(buf, fmt, va);
+ if (i > -1) {
+ if (printer)
+ printer->printf("%s\n", buf);
+ js::jit::JitSpew(js::jit::JitSpew_Codegen, "%s", buf);
+ }
+}
diff --git a/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h b/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h
new file mode 100644
index 000000000..8cb557784
--- /dev/null
+++ b/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h
@@ -0,0 +1,205 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jit_x86_shared_AssemblerBuffer_x86_shared_h
+#define jit_x86_shared_AssemblerBuffer_x86_shared_h
+
+#include <stdarg.h>
+#include <string.h>
+
+#include "ds/PageProtectingVector.h"
+#include "jit/ExecutableAllocator.h"
+#include "jit/JitSpewer.h"
+
+// Spew formatting helpers.
+#define PRETTYHEX(x) (((x)<0)?"-":""),(((x)<0)?-(x):(x))
+
+#define MEM_o "%s0x%x"
+#define MEM_os MEM_o "(,%s,%d)"
+#define MEM_ob MEM_o "(%s)"
+#define MEM_obs MEM_o "(%s,%s,%d)"
+
+#define MEM_o32 "%s0x%04x"
+#define MEM_o32s MEM_o32 "(,%s,%d)"
+#define MEM_o32b MEM_o32 "(%s)"
+#define MEM_o32bs MEM_o32 "(%s,%s,%d)"
+#define MEM_o32r ".Lfrom%d(%%rip)"
+
+#define ADDR_o(offset) PRETTYHEX(offset)
+#define ADDR_os(offset, index, scale) ADDR_o(offset), GPRegName((index)), (1<<(scale))
+#define ADDR_ob(offset, base) ADDR_o(offset), GPRegName((base))
+#define ADDR_obs(offset, base, index, scale) ADDR_ob(offset, base), GPRegName((index)), (1<<(scale))
+
+#define ADDR_o32(offset) ADDR_o(offset)
+#define ADDR_o32s(offset, index, scale) ADDR_os(offset, index, scale)
+#define ADDR_o32b(offset, base) ADDR_ob(offset, base)
+#define ADDR_o32bs(offset, base, index, scale) ADDR_obs(offset, base, index, scale)
+#define ADDR_o32r(offset) (offset)
+
+namespace js {
+
+ class Sprinter;
+
+namespace jit {
+
+ class AssemblerBuffer
+ {
+ template<size_t size, typename T>
+ MOZ_ALWAYS_INLINE void sizedAppendUnchecked(T value)
+ {
+ m_buffer.infallibleAppend(reinterpret_cast<unsigned char*>(&value), size);
+ }
+
+ template<size_t size, typename T>
+ MOZ_ALWAYS_INLINE void sizedAppend(T value)
+ {
+ if (MOZ_UNLIKELY(!m_buffer.append(reinterpret_cast<unsigned char*>(&value), size)))
+ oomDetected();
+ }
+
+ public:
+ AssemblerBuffer()
+ : m_oom(false)
+ {
+ // Provide memory protection once the buffer starts to get big.
+ m_buffer.setLowerBoundForProtection(32 * 1024);
+ }
+
+ void ensureSpace(size_t space)
+ {
+ if (MOZ_UNLIKELY(!m_buffer.reserve(m_buffer.length() + space)))
+ oomDetected();
+ }
+
+ bool isAligned(size_t alignment) const
+ {
+ return !(m_buffer.length() & (alignment - 1));
+ }
+
+ MOZ_ALWAYS_INLINE void putByteUnchecked(int value) { sizedAppendUnchecked<1>(value); }
+ MOZ_ALWAYS_INLINE void putShortUnchecked(int value) { sizedAppendUnchecked<2>(value); }
+ MOZ_ALWAYS_INLINE void putIntUnchecked(int value) { sizedAppendUnchecked<4>(value); }
+ MOZ_ALWAYS_INLINE void putInt64Unchecked(int64_t value) { sizedAppendUnchecked<8>(value); }
+
+ MOZ_ALWAYS_INLINE void putByte(int value) { sizedAppend<1>(value); }
+ MOZ_ALWAYS_INLINE void putShort(int value) { sizedAppend<2>(value); }
+ MOZ_ALWAYS_INLINE void putInt(int value) { sizedAppend<4>(value); }
+ MOZ_ALWAYS_INLINE void putInt64(int64_t value) { sizedAppend<8>(value); }
+
+ MOZ_MUST_USE bool append(const unsigned char* values, size_t size)
+ {
+ if (MOZ_UNLIKELY(!m_buffer.append(values, size))) {
+ oomDetected();
+ return false;
+ }
+ return true;
+ }
+
+ unsigned char* data()
+ {
+ return m_buffer.begin();
+ }
+
+ size_t size() const
+ {
+ return m_buffer.length();
+ }
+
+ bool oom() const
+ {
+ return m_oom;
+ }
+
+ const unsigned char* buffer() const {
+ MOZ_ASSERT(!m_oom);
+ return m_buffer.begin();
+ }
+
+ void unprotectDataRegion(size_t firstByteOffset, size_t lastByteOffset) {
+ m_buffer.unprotectRegion(firstByteOffset, lastByteOffset);
+ }
+ void reprotectDataRegion(size_t firstByteOffset, size_t lastByteOffset) {
+ m_buffer.reprotectRegion(firstByteOffset, lastByteOffset);
+ }
+
+ protected:
+ /*
+ * OOM handling: This class can OOM in the ensureSpace() method trying
+ * to allocate a new buffer. In response to an OOM, we need to avoid
+ * crashing and report the error. We also want to make it so that
+ * users of this class need to check for OOM only at certain points
+ * and not after every operation.
+ *
+ * Our strategy for handling an OOM is to set m_oom, and then clear (but
+ * not free) m_buffer, preserving the current buffer. This way, the user
+ * can continue assembling into the buffer, deferring OOM checking
+ * until the user wants to read code out of the buffer.
+ *
+ * See also the |buffer| method.
+ */
+ void oomDetected() {
+ m_oom = true;
+ m_buffer.clear();
+ }
+
+ PageProtectingVector<unsigned char, 256, SystemAllocPolicy> m_buffer;
+ bool m_oom;
+ };
+
+ class GenericAssembler
+ {
+ Sprinter* printer;
+
+ public:
+
+ GenericAssembler()
+ : printer(NULL)
+ {}
+
+ void setPrinter(Sprinter* sp) {
+ printer = sp;
+ }
+
+ void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3)
+ {
+ if (MOZ_UNLIKELY(printer || JitSpewEnabled(JitSpew_Codegen))) {
+ va_list va;
+ va_start(va, fmt);
+ spew(fmt, va);
+ va_end(va);
+ }
+ }
+
+ MOZ_COLD void spew(const char* fmt, va_list va);
+ };
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_AssemblerBuffer_x86_shared_h */
diff --git a/js/src/jit/x86-shared/AtomicOperations-x86-shared.h b/js/src/jit/x86-shared/AtomicOperations-x86-shared.h
new file mode 100644
index 000000000..b34aba7ef
--- /dev/null
+++ b/js/src/jit/x86-shared/AtomicOperations-x86-shared.h
@@ -0,0 +1,602 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* For overall documentation, see jit/AtomicOperations.h */
+
+#ifndef jit_shared_AtomicOperations_x86_shared_h
+#define jit_shared_AtomicOperations_x86_shared_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+// Lock-freedom on x86 and x64:
+//
+// On x86 and x64 there are atomic instructions for 8-byte accesses:
+//
+// Load and stores:
+// - Loads and stores are single-copy atomic for up to 8 bytes
+// starting with the Pentium; the store requires a post-fence for
+// sequential consistency
+//
+// CompareExchange:
+// - On x64 CMPXCHGQ can always be used
+// - On x86 CMPXCHG8B can be used starting with the first Pentium
+//
+// Exchange:
+// - On x64 XCHGQ can always be used
+// - On x86 one has to use a CompareExchange loop
+//
+// Observe also that the JIT will not be enabled unless we have SSE2,
+// which was introduced with the Pentium 4. Ergo the JIT will be able
+// to use atomic instructions for up to 8 bytes on all x86 platforms
+// for the primitives we care about.
+//
+// However, C++ compilers and libraries may not provide access to
+// those 8-byte instructions directly. Clang in 32-bit mode does not
+// provide 8-byte atomic primitives at all (even with eg -arch i686
+// specified). On Windows 32-bit, MSVC does not provide
+// _InterlockedExchange64 since it does not map directly to an
+// instruction.
+//
+// There are thus sundry workarounds below to handle known corner
+// cases.
+
+#if defined(__clang__) || defined(__GNUC__)
+
+// The default implementation tactic for gcc/clang is to use the newer
+// __atomic intrinsics added for use in C++11 <atomic>. Where that
+// isn't available, we use GCC's older __sync functions instead.
+//
+// ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS is kept as a backward
+// compatible option for older compilers: enable this to use GCC's old
+// __sync functions instead of the newer __atomic functions. This
+// will be required for GCC 4.6.x and earlier, and probably for Clang
+// 3.1, should we need to use those versions.
+
+// #define ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+
+// Lock-free 8-byte atomics are assumed on x86 but must be disabled in
+// corner cases, see comments below and in isLockfree8().
+
+# define LOCKFREE8
+
+// This pertains to Clang compiling with -m32, in this case the 64-bit
+// __atomic builtins are not available (observed on various Mac OS X
+// versions with Apple Clang and on Linux with Clang 3.5).
+//
+// For now just punt: disable lock-free 8-word data. The JIT will
+// call isLockfree8() to determine what to do and will stay in sync.
+// (Bug 1146817 tracks the work to improve on this.)
+
+# if defined(__clang__) && defined(__i386)
+# undef LOCKFREE8
+# endif
+
+inline bool
+js::jit::AtomicOperations::isLockfree8()
+{
+# ifndef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
+# endif
+# ifdef LOCKFREE8
+# ifndef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0));
+# endif
+ return true;
+# else
+ return false;
+# endif
+}
+
+inline void
+js::jit::AtomicOperations::fenceSeqCst()
+{
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+# else
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::loadSeqCst(T* addr)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ // Inhibit compiler reordering with a volatile load. The x86 does
+ // not reorder loads with respect to subsequent loads or stores
+ // and no ordering barrier is required here. See more elaborate
+ // comments in storeSeqCst.
+ T v = *static_cast<T volatile*>(addr);
+# else
+ T v;
+ __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
+# endif
+ return v;
+}
+
+# ifndef LOCKFREE8
+template<>
+inline int64_t
+js::jit::AtomicOperations::loadSeqCst(int64_t* addr)
+{
+ MOZ_CRASH();
+}
+
+template<>
+inline uint64_t
+js::jit::AtomicOperations::loadSeqCst(uint64_t* addr)
+{
+ MOZ_CRASH();
+}
+# endif // LOCKFREE8
+
+template<typename T>
+inline void
+js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ // Inhibit compiler reordering with a volatile store. The x86 may
+ // reorder a store with respect to a subsequent load from a
+ // different location, hence there is an ordering barrier here to
+ // prevent that.
+ //
+ // By way of background, look to eg
+ // http://bartoszmilewski.com/2008/11/05/who-ordered-memory-fences-on-an-x86/
+ //
+ // Consider:
+ //
+ // uint8_t x = 0, y = 0; // to start
+ //
+ // thread1:
+ // sx: AtomicOperations::store(&x, 1);
+ // gy: uint8_t obs1 = AtomicOperations::loadSeqCst(&y);
+ //
+ // thread2:
+ // sy: AtomicOperations::store(&y, 1);
+ // gx: uint8_t obs2 = AtomicOperations::loadSeqCst(&x);
+ //
+ // Sequential consistency requires a total global ordering of
+ // operations: sx-gy-sy-gx, sx-sy-gx-gy, sx-sy-gy-gx, sy-gx-sx-gy,
+ // sy-sx-gy-gx, or sy-sx-gx-gy. In every ordering at least one of
+ // sx-before-gx or sy-before-gy happens, so *at least one* of
+ // obs1/obs2 is 1.
+ //
+ // If AtomicOperations::{load,store}SeqCst were just volatile
+ // {load,store}, x86 could reorder gx/gy before each thread's
+ // prior load. That would permit gx-gy-sx-sy: both loads would be
+ // 0! Thus after a volatile store we must synchronize to ensure
+ // the store happens before the load.
+ *static_cast<T volatile*>(addr) = val;
+ __sync_synchronize();
+# else
+ __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+# ifndef LOCKFREE8
+template<>
+inline void
+js::jit::AtomicOperations::storeSeqCst(int64_t* addr, int64_t val)
+{
+ MOZ_CRASH();
+}
+
+template<>
+inline void
+js::jit::AtomicOperations::storeSeqCst(uint64_t* addr, uint64_t val)
+{
+ MOZ_CRASH();
+}
+# endif // LOCKFREE8
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ T v;
+ do {
+ // Here I assume the compiler will not hoist the load. It
+ // shouldn't, because the CAS could affect* addr.
+ v = *addr;
+ } while (!__sync_bool_compare_and_swap(addr, v, val));
+ return v;
+# else
+ T v;
+ __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
+ return v;
+# endif
+}
+
+# ifndef LOCKFREE8
+template<>
+inline int64_t
+js::jit::AtomicOperations::exchangeSeqCst(int64_t* addr, int64_t val)
+{
+ MOZ_CRASH();
+}
+
+template<>
+inline uint64_t
+js::jit::AtomicOperations::exchangeSeqCst(uint64_t* addr, uint64_t val)
+{
+ MOZ_CRASH();
+}
+# endif // LOCKFREE8
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_val_compare_and_swap(addr, oldval, newval);
+# else
+ __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return oldval;
+# endif
+}
+
+# ifndef LOCKFREE8
+template<>
+inline int64_t
+js::jit::AtomicOperations::compareExchangeSeqCst(int64_t* addr, int64_t oldval, int64_t newval)
+{
+ MOZ_CRASH();
+}
+
+template<>
+inline uint64_t
+js::jit::AtomicOperations::compareExchangeSeqCst(uint64_t* addr, uint64_t oldval, uint64_t newval)
+{
+ MOZ_CRASH();
+}
+# endif // LOCKFREE8
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_add(addr, val);
+# else
+ return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_sub(addr, val);
+# else
+ return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_and(addr, val);
+# else
+ return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_or(addr, val);
+# else
+ return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_xor(addr, val);
+# else
+ return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::loadSafeWhenRacy(T* addr)
+{
+ return *addr; // FIXME (1208663): not yet safe
+}
+
+template<typename T>
+inline void
+js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val)
+{
+ *addr = val; // FIXME (1208663): not yet safe
+}
+
+inline void
+js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src, size_t nbytes)
+{
+ ::memcpy(dest, src, nbytes); // FIXME (1208663): not yet safe
+}
+
+inline void
+js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes)
+{
+ ::memmove(dest, src, nbytes); // FIXME (1208663): not yet safe
+}
+
+template<size_t nbytes>
+inline void
+js::jit::RegionLock::acquire(void* addr)
+{
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ while (!__sync_bool_compare_and_swap(&spinlock, 0, 1))
+ continue;
+# else
+ uint32_t zero = 0;
+ uint32_t one = 1;
+ while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
+ zero = 0;
+ continue;
+ }
+# endif
+}
+
+template<size_t nbytes>
+inline void
+js::jit::RegionLock::release(void* addr)
+{
+ MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_sub_and_fetch(&spinlock, 1); // Should turn into LOCK XADD
+# else
+ uint32_t zero = 0;
+ __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
+# endif
+}
+
+# undef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+# undef LOCKFREE8
+
+#elif defined(_MSC_VER)
+
+// On 32-bit CPUs there is no 64-bit XCHG instruction, one must
+// instead use a loop with CMPXCHG8B. Since MSVC provides
+// _InterlockedExchange64 only if it maps directly to XCHG, the
+// workaround must be manual.
+
+# define HAVE_EXCHANGE64
+
+# if !_WIN64
+# undef HAVE_EXCHANGE64
+# endif
+
+// Below, _ReadWriteBarrier is a compiler directive, preventing
+// reordering of instructions and reuse of memory values across it.
+
+inline bool
+js::jit::AtomicOperations::isLockfree8()
+{
+ // See general comments at the start of this file.
+ //
+ // The MSDN docs suggest very strongly that if code is compiled for
+ // Pentium or better the 64-bit primitives will be lock-free, see
+ // eg the "Remarks" secion of the page for _InterlockedCompareExchange64,
+ // currently here:
+ // https://msdn.microsoft.com/en-us/library/ttk2z1ws%28v=vs.85%29.aspx
+ //
+ // But I've found no way to assert that at compile time or run time,
+ // there appears to be no WinAPI is_lock_free() test.
+ return true;
+}
+
+inline void
+js::jit::AtomicOperations::fenceSeqCst()
+{
+ _ReadWriteBarrier();
+# if JS_BITS_PER_WORD == 32
+ // If configured for SSE2+ we can use the MFENCE instruction, available
+ // through the _mm_mfence intrinsic. But for non-SSE2 systems we have
+ // to do something else. Linux uses "lock add [esp], 0", so why not?
+ __asm lock add [esp], 0;
+# else
+ _mm_mfence();
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::loadSeqCst(T* addr)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+ _ReadWriteBarrier();
+ T v = *addr;
+ _ReadWriteBarrier();
+ return v;
+}
+
+template<typename T>
+inline void
+js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+ _ReadWriteBarrier();
+ *addr = val;
+ fenceSeqCst();
+}
+
+# define MSC_EXCHANGEOP(T, U, xchgop) \
+ template<> inline T \
+ js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val) { \
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8()); \
+ return (T)xchgop((U volatile*)addr, (U)val); \
+ }
+
+# define MSC_EXCHANGEOP_CAS(T, U, cmpxchg) \
+ template<> inline T \
+ js::jit::AtomicOperations::exchangeSeqCst(T* addr, T newval) { \
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8()); \
+ T oldval; \
+ do { \
+ _ReadWriteBarrier(); \
+ oldval = *addr; \
+ } while (!cmpxchg((U volatile*)addr, (U)newval, (U)oldval)); \
+ return oldval; \
+ }
+
+MSC_EXCHANGEOP(int8_t, char, _InterlockedExchange8)
+MSC_EXCHANGEOP(uint8_t, char, _InterlockedExchange8)
+MSC_EXCHANGEOP(int16_t, short, _InterlockedExchange16)
+MSC_EXCHANGEOP(uint16_t, short, _InterlockedExchange16)
+MSC_EXCHANGEOP(int32_t, long, _InterlockedExchange)
+MSC_EXCHANGEOP(uint32_t, long, _InterlockedExchange)
+# ifdef HAVE_EXCHANGE64
+MSC_EXCHANGEOP(int64_t, __int64, _InterlockedExchange64)
+MSC_EXCHANGEOP(uint64_t, __int64, _InterlockedExchange64)
+# else
+MSC_EXCHANGEOP_CAS(int64_t, __int64, _InterlockedCompareExchange64)
+MSC_EXCHANGEOP_CAS(uint64_t, __int64, _InterlockedCompareExchange64)
+# endif
+
+# undef MSC_EXCHANGEOP
+# undef MSC_EXCHANGEOP_CAS
+
+# define MSC_CAS(T, U, cmpxchg) \
+ template<> inline T \
+ js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval) { \
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8()); \
+ return (T)cmpxchg((U volatile*)addr, (U)newval, (U)oldval); \
+ }
+
+MSC_CAS(int8_t, char, _InterlockedCompareExchange8)
+MSC_CAS(uint8_t, char, _InterlockedCompareExchange8)
+MSC_CAS(int16_t, short, _InterlockedCompareExchange16)
+MSC_CAS(uint16_t, short, _InterlockedCompareExchange16)
+MSC_CAS(int32_t, long, _InterlockedCompareExchange)
+MSC_CAS(uint32_t, long, _InterlockedCompareExchange)
+MSC_CAS(int64_t, __int64, _InterlockedCompareExchange64)
+MSC_CAS(uint64_t, __int64, _InterlockedCompareExchange64)
+
+# undef MSC_CAS
+
+# define MSC_FETCHADDOP(T, U, xadd) \
+ template<> inline T \
+ js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val) { \
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); \
+ return (T)xadd((U volatile*)addr, (U)val); \
+ } \
+ template<> inline T \
+ js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val) { \
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); \
+ return (T)xadd((U volatile*)addr, -(U)val); \
+ }
+
+MSC_FETCHADDOP(int8_t, char, _InterlockedExchangeAdd8)
+MSC_FETCHADDOP(uint8_t, char, _InterlockedExchangeAdd8)
+MSC_FETCHADDOP(int16_t, short, _InterlockedExchangeAdd16)
+MSC_FETCHADDOP(uint16_t, short, _InterlockedExchangeAdd16)
+MSC_FETCHADDOP(int32_t, long, _InterlockedExchangeAdd)
+MSC_FETCHADDOP(uint32_t, long, _InterlockedExchangeAdd)
+
+# undef MSC_FETCHADDOP
+
+# define MSC_FETCHBITOP(T, U, andop, orop, xorop) \
+ template<> inline T \
+ js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val) { \
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); \
+ return (T)andop((U volatile*)addr, (U)val); \
+ } \
+ template<> inline T \
+ js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val) { \
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); \
+ return (T)orop((U volatile*)addr, (U)val); \
+ } \
+ template<> inline T \
+ js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val) { \
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); \
+ return (T)xorop((U volatile*)addr, (U)val); \
+ }
+
+MSC_FETCHBITOP(int8_t, char, _InterlockedAnd8, _InterlockedOr8, _InterlockedXor8)
+MSC_FETCHBITOP(uint8_t, char, _InterlockedAnd8, _InterlockedOr8, _InterlockedXor8)
+MSC_FETCHBITOP(int16_t, short, _InterlockedAnd16, _InterlockedOr16, _InterlockedXor16)
+MSC_FETCHBITOP(uint16_t, short, _InterlockedAnd16, _InterlockedOr16, _InterlockedXor16)
+MSC_FETCHBITOP(int32_t, long, _InterlockedAnd, _InterlockedOr, _InterlockedXor)
+MSC_FETCHBITOP(uint32_t, long, _InterlockedAnd, _InterlockedOr, _InterlockedXor)
+
+# undef MSC_FETCHBITOP
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::loadSafeWhenRacy(T* addr)
+{
+ return *addr; // FIXME (1208663): not yet safe
+}
+
+template<typename T>
+inline void
+js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val)
+{
+ *addr = val; // FIXME (1208663): not yet safe
+}
+
+inline void
+js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src, size_t nbytes)
+{
+ ::memcpy(dest, src, nbytes); // FIXME (1208663): not yet safe
+}
+
+inline void
+js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes)
+{
+ ::memmove(dest, src, nbytes); // FIXME (1208663): not yet safe
+}
+
+template<size_t nbytes>
+inline void
+js::jit::RegionLock::acquire(void* addr)
+{
+ while (_InterlockedCompareExchange((long*)&spinlock, /*newval=*/1, /*oldval=*/0) == 1)
+ continue;
+}
+
+template<size_t nbytes>
+inline void
+js::jit::RegionLock::release(void* addr)
+{
+ MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
+ _InterlockedExchange((long*)&spinlock, 0);
+}
+
+# undef HAVE_EXCHANGE64
+
+#elif defined(ENABLE_SHARED_ARRAY_BUFFER)
+
+# error "Either disable JS shared memory at compile time, use GCC, Clang, or MSVC, or add code here"
+
+#endif // platform
+
+#endif // jit_shared_AtomicOperations_x86_shared_h
diff --git a/js/src/jit/x86-shared/BaseAssembler-x86-shared.h b/js/src/jit/x86-shared/BaseAssembler-x86-shared.h
new file mode 100644
index 000000000..844fd5c0e
--- /dev/null
+++ b/js/src/jit/x86-shared/BaseAssembler-x86-shared.h
@@ -0,0 +1,5393 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jit_x86_shared_BaseAssembler_x86_shared_h
+#define jit_x86_shared_BaseAssembler_x86_shared_h
+
+#include "mozilla/IntegerPrintfMacros.h"
+
+#include "jit/x86-shared/AssemblerBuffer-x86-shared.h"
+#include "jit/x86-shared/Encoding-x86-shared.h"
+#include "jit/x86-shared/Patching-x86-shared.h"
+
+extern volatile uintptr_t* blackbox;
+
+namespace js {
+namespace jit {
+
+namespace X86Encoding {
+
+class BaseAssembler;
+
+class AutoUnprotectAssemblerBufferRegion
+{
+ BaseAssembler* assembler;
+ size_t firstByteOffset;
+ size_t lastByteOffset;
+
+ public:
+ AutoUnprotectAssemblerBufferRegion(BaseAssembler& holder, int32_t offset, size_t size);
+ ~AutoUnprotectAssemblerBufferRegion();
+};
+
+class BaseAssembler : public GenericAssembler {
+public:
+ BaseAssembler()
+ : useVEX_(true)
+ { }
+
+ void disableVEX() { useVEX_ = false; }
+
+ size_t size() const { return m_formatter.size(); }
+ const unsigned char* buffer() const { return m_formatter.buffer(); }
+ unsigned char* data() { return m_formatter.data(); }
+ bool oom() const { return m_formatter.oom(); }
+
+ void nop()
+ {
+ spew("nop");
+ m_formatter.oneByteOp(OP_NOP);
+ }
+
+ void comment(const char* msg)
+ {
+ spew("; %s", msg);
+ }
+
+ MOZ_MUST_USE JmpSrc
+ twoByteNop()
+ {
+ spew("nop (2 byte)");
+ JmpSrc r(m_formatter.size());
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_NOP);
+ return r;
+ }
+
+ static void patchTwoByteNopToJump(uint8_t* jump, uint8_t* target)
+ {
+ // Note: the offset is relative to the address of the instruction after
+ // the jump which is two bytes.
+ ptrdiff_t rel8 = target - jump - 2;
+ MOZ_RELEASE_ASSERT(rel8 >= INT8_MIN && rel8 <= INT8_MAX);
+ MOZ_RELEASE_ASSERT(jump[0] == PRE_OPERAND_SIZE);
+ MOZ_RELEASE_ASSERT(jump[1] == OP_NOP);
+ jump[0] = OP_JMP_rel8;
+ jump[1] = rel8;
+ }
+
+ static void patchJumpToTwoByteNop(uint8_t* jump)
+ {
+ // See twoByteNop.
+ MOZ_RELEASE_ASSERT(jump[0] == OP_JMP_rel8);
+ jump[0] = PRE_OPERAND_SIZE;
+ jump[1] = OP_NOP;
+ }
+
+ /*
+ * The nop multibytes sequences are directly taken from the Intel's
+ * architecture software developer manual.
+ * They are defined for sequences of sizes from 1 to 9 included.
+ */
+ void nop_one()
+ {
+ m_formatter.oneByteOp(OP_NOP);
+ }
+
+ void nop_two()
+ {
+ m_formatter.oneByteOp(OP_NOP_66);
+ m_formatter.oneByteOp(OP_NOP);
+ }
+
+ void nop_three()
+ {
+ m_formatter.oneByteOp(OP_NOP_0F);
+ m_formatter.oneByteOp(OP_NOP_1F);
+ m_formatter.oneByteOp(OP_NOP_00);
+ }
+
+ void nop_four()
+ {
+ m_formatter.oneByteOp(OP_NOP_0F);
+ m_formatter.oneByteOp(OP_NOP_1F);
+ m_formatter.oneByteOp(OP_NOP_40);
+ m_formatter.oneByteOp(OP_NOP_00);
+ }
+
+ void nop_five()
+ {
+ m_formatter.oneByteOp(OP_NOP_0F);
+ m_formatter.oneByteOp(OP_NOP_1F);
+ m_formatter.oneByteOp(OP_NOP_44);
+ m_formatter.oneByteOp(OP_NOP_00);
+ m_formatter.oneByteOp(OP_NOP_00);
+ }
+
+ void nop_six()
+ {
+ m_formatter.oneByteOp(OP_NOP_66);
+ nop_five();
+ }
+
+ void nop_seven()
+ {
+ m_formatter.oneByteOp(OP_NOP_0F);
+ m_formatter.oneByteOp(OP_NOP_1F);
+ m_formatter.oneByteOp(OP_NOP_80);
+ for (int i = 0; i < 4; ++i)
+ m_formatter.oneByteOp(OP_NOP_00);
+ }
+
+ void nop_eight()
+ {
+ m_formatter.oneByteOp(OP_NOP_0F);
+ m_formatter.oneByteOp(OP_NOP_1F);
+ m_formatter.oneByteOp(OP_NOP_84);
+ for (int i = 0; i < 5; ++i)
+ m_formatter.oneByteOp(OP_NOP_00);
+ }
+
+ void nop_nine()
+ {
+ m_formatter.oneByteOp(OP_NOP_66);
+ nop_eight();
+ }
+
+ void insert_nop(int size)
+ {
+ switch (size) {
+ case 1:
+ nop_one();
+ break;
+ case 2:
+ nop_two();
+ break;
+ case 3:
+ nop_three();
+ break;
+ case 4:
+ nop_four();
+ break;
+ case 5:
+ nop_five();
+ break;
+ case 6:
+ nop_six();
+ break;
+ case 7:
+ nop_seven();
+ break;
+ case 8:
+ nop_eight();
+ break;
+ case 9:
+ nop_nine();
+ break;
+ case 10:
+ nop_three();
+ nop_seven();
+ break;
+ case 11:
+ nop_four();
+ nop_seven();
+ break;
+ case 12:
+ nop_six();
+ nop_six();
+ break;
+ case 13:
+ nop_six();
+ nop_seven();
+ break;
+ case 14:
+ nop_seven();
+ nop_seven();
+ break;
+ case 15:
+ nop_one();
+ nop_seven();
+ nop_seven();
+ break;
+ default:
+ MOZ_CRASH("Unhandled alignment");
+ }
+ }
+
+ // Stack operations:
+
+ void push_r(RegisterID reg)
+ {
+ spew("push %s", GPRegName(reg));
+ m_formatter.oneByteOp(OP_PUSH_EAX, reg);
+ }
+
+ void pop_r(RegisterID reg)
+ {
+ spew("pop %s", GPRegName(reg));
+ m_formatter.oneByteOp(OP_POP_EAX, reg);
+ }
+
+ void push_i(int32_t imm)
+ {
+ spew("push $%s0x%x", PRETTYHEX(imm));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_PUSH_Ib);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_PUSH_Iz);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void push_i32(int32_t imm)
+ {
+ spew("push $%s0x%04x", PRETTYHEX(imm));
+ m_formatter.oneByteOp(OP_PUSH_Iz);
+ m_formatter.immediate32(imm);
+ }
+
+ void push_m(int32_t offset, RegisterID base)
+ {
+ spew("push " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP5_Ev, offset, base, GROUP5_OP_PUSH);
+ }
+
+ void pop_m(int32_t offset, RegisterID base)
+ {
+ spew("pop " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP1A_Ev, offset, base, GROUP1A_OP_POP);
+ }
+
+ void push_flags()
+ {
+ spew("pushf");
+ m_formatter.oneByteOp(OP_PUSHFLAGS);
+ }
+
+ void pop_flags()
+ {
+ spew("popf");
+ m_formatter.oneByteOp(OP_POPFLAGS);
+ }
+
+ // Arithmetic operations:
+
+ void addl_rr(RegisterID src, RegisterID dst)
+ {
+ spew("addl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_ADD_GvEv, src, dst);
+ }
+
+ void addw_rr(RegisterID src, RegisterID dst)
+ {
+ spew("addw %s, %s", GPReg16Name(src), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_ADD_GvEv, src, dst);
+ }
+
+ void addl_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("addl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_ADD_GvEv, offset, base, dst);
+ }
+
+ void addl_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("addl %s, " MEM_ob, GPReg32Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_ADD_EvGv, offset, base, src);
+ }
+
+ void addl_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("addl %s, " MEM_obs, GPReg32Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_ADD_EvGv, offset, base, index, scale, src);
+ }
+
+ void addl_ir(int32_t imm, RegisterID dst)
+ {
+ spew("addl $%d, %s", imm, GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_ADD);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax)
+ m_formatter.oneByteOp(OP_ADD_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_ADD);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addw_ir(int32_t imm, RegisterID dst)
+ {
+ spew("addw $%d, %s", int16_t(imm), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_ADD);
+ m_formatter.immediate16(imm);
+ }
+
+ void addl_i32r(int32_t imm, RegisterID dst)
+ {
+ // 32-bit immediate always, for patching.
+ spew("addl $0x%04x, %s", imm, GPReg32Name(dst));
+ if (dst == rax)
+ m_formatter.oneByteOp(OP_ADD_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_ADD);
+ m_formatter.immediate32(imm);
+ }
+
+ void addl_im(int32_t imm, int32_t offset, RegisterID base)
+ {
+ spew("addl $%d, " MEM_ob, imm, ADDR_ob(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_ADD);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_ADD);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addl_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("addl $%d, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale, GROUP1_OP_ADD);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale, GROUP1_OP_ADD);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addl_im(int32_t imm, const void* addr)
+ {
+ spew("addl $%d, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_ADD);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_ADD);
+ m_formatter.immediate32(imm);
+ }
+ }
+ void addw_im(int32_t imm, const void* addr)
+ {
+ spew("addw $%d, %p", int16_t(imm), addr);
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_ADD);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_ADD);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void addw_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("addw $%d, " MEM_ob, int16_t(imm), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_ADD);
+ m_formatter.immediate16(imm);
+ }
+
+ void addw_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("addw $%d, " MEM_obs, int16_t(imm), ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale, GROUP1_OP_ADD);
+ m_formatter.immediate16(imm);
+ }
+
+ void addw_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("addw %s, " MEM_ob, GPReg16Name(src), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_ADD_EvGv, offset, base, src);
+ }
+
+ void addw_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("addw %s, " MEM_obs, GPReg16Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_ADD_EvGv, offset, base, index, scale, src);
+ }
+
+ void addb_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("addb $%d, " MEM_ob, int8_t(imm), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, GROUP1_OP_ADD);
+ m_formatter.immediate8(imm);
+ }
+
+ void addb_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("addb $%d, " MEM_obs, int8_t(imm), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, index, scale, GROUP1_OP_ADD);
+ m_formatter.immediate8(imm);
+ }
+
+ void addb_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("addb %s, " MEM_ob, GPReg8Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp8(OP_ADD_EbGb, offset, base, src);
+ }
+
+ void addb_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("addb %s, " MEM_obs, GPReg8Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp8(OP_ADD_EbGb, offset, base, index, scale, src);
+ }
+
+ void subb_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("subb $%d, " MEM_ob, int8_t(imm), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, GROUP1_OP_SUB);
+ m_formatter.immediate8(imm);
+ }
+
+ void subb_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("subb $%d, " MEM_obs, int8_t(imm), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, index, scale, GROUP1_OP_SUB);
+ m_formatter.immediate8(imm);
+ }
+
+ void subb_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("subb %s, " MEM_ob, GPReg8Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp8(OP_SUB_EbGb, offset, base, src);
+ }
+
+ void subb_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("subb %s, " MEM_obs, GPReg8Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp8(OP_SUB_EbGb, offset, base, index, scale, src);
+ }
+
+ void andb_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("andb $%d, " MEM_ob, int8_t(imm), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, GROUP1_OP_AND);
+ m_formatter.immediate8(imm);
+ }
+
+ void andb_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("andb $%d, " MEM_obs, int8_t(imm), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, index, scale, GROUP1_OP_AND);
+ m_formatter.immediate8(imm);
+ }
+
+ void andb_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("andb %s, " MEM_ob, GPReg8Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp8(OP_AND_EbGb, offset, base, src);
+ }
+
+ void andb_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("andb %s, " MEM_obs, GPReg8Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp8(OP_AND_EbGb, offset, base, index, scale, src);
+ }
+
+ void orb_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("orb $%d, " MEM_ob, int8_t(imm), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, GROUP1_OP_OR);
+ m_formatter.immediate8(imm);
+ }
+
+ void orb_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("orb $%d, " MEM_obs, int8_t(imm), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, index, scale, GROUP1_OP_OR);
+ m_formatter.immediate8(imm);
+ }
+
+ void orb_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("orb %s, " MEM_ob, GPReg8Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp8(OP_OR_EbGb, offset, base, src);
+ }
+
+ void orb_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("orb %s, " MEM_obs, GPReg8Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp8(OP_OR_EbGb, offset, base, index, scale, src);
+ }
+
+ void xorb_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("xorb $%d, " MEM_ob, int8_t(imm), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, GROUP1_OP_XOR);
+ m_formatter.immediate8(imm);
+ }
+
+ void xorb_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("xorb $%d, " MEM_obs, int8_t(imm), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, index, scale, GROUP1_OP_XOR);
+ m_formatter.immediate8(imm);
+ }
+
+ void xorb_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("xorb %s, " MEM_ob, GPReg8Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp8(OP_XOR_EbGb, offset, base, src);
+ }
+
+ void xorb_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("xorb %s, " MEM_obs, GPReg8Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp8(OP_XOR_EbGb, offset, base, index, scale, src);
+ }
+
+ void lock_xaddb_rm(RegisterID srcdest, int32_t offset, RegisterID base)
+ {
+ spew("lock xaddb %s, " MEM_ob, GPReg8Name(srcdest), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(PRE_LOCK);
+ m_formatter.twoByteOp8(OP2_XADD_EbGb, offset, base, srcdest);
+ }
+
+ void lock_xaddb_rm(RegisterID srcdest, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("lock xaddb %s, " MEM_obs, GPReg8Name(srcdest), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(PRE_LOCK);
+ m_formatter.twoByteOp8(OP2_XADD_EbGb, offset, base, index, scale, srcdest);
+ }
+
+ void lock_xaddl_rm(RegisterID srcdest, int32_t offset, RegisterID base)
+ {
+ spew("lock xaddl %s, " MEM_ob, GPReg32Name(srcdest), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(PRE_LOCK);
+ m_formatter.twoByteOp(OP2_XADD_EvGv, offset, base, srcdest);
+ }
+
+ void lock_xaddl_rm(RegisterID srcdest, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("lock xaddl %s, " MEM_obs, GPReg32Name(srcdest), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(PRE_LOCK);
+ m_formatter.twoByteOp(OP2_XADD_EvGv, offset, base, index, scale, srcdest);
+ }
+
+ void vpaddb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddb", VEX_PD, OP2_PADDB_VdqWdq, src1, src0, dst);
+ }
+ void vpaddb_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddb", VEX_PD, OP2_PADDB_VdqWdq, offset, base, src0, dst);
+ }
+ void vpaddb_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddb", VEX_PD, OP2_PADDB_VdqWdq, address, src0, dst);
+ }
+
+ void vpaddsb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddsb", VEX_PD, OP2_PADDSB_VdqWdq, src1, src0, dst);
+ }
+ void vpaddsb_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddsb", VEX_PD, OP2_PADDSB_VdqWdq, offset, base, src0, dst);
+ }
+ void vpaddsb_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddsb", VEX_PD, OP2_PADDSB_VdqWdq, address, src0, dst);
+ }
+
+ void vpaddusb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddusb", VEX_PD, OP2_PADDUSB_VdqWdq, src1, src0, dst);
+ }
+ void vpaddusb_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddusb", VEX_PD, OP2_PADDUSB_VdqWdq, offset, base, src0, dst);
+ }
+ void vpaddusb_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddusb", VEX_PD, OP2_PADDUSB_VdqWdq, address, src0, dst);
+ }
+
+ void vpaddw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddw", VEX_PD, OP2_PADDW_VdqWdq, src1, src0, dst);
+ }
+ void vpaddw_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddw", VEX_PD, OP2_PADDW_VdqWdq, offset, base, src0, dst);
+ }
+ void vpaddw_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddw", VEX_PD, OP2_PADDW_VdqWdq, address, src0, dst);
+ }
+
+ void vpaddsw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddsw", VEX_PD, OP2_PADDSW_VdqWdq, src1, src0, dst);
+ }
+ void vpaddsw_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddsw", VEX_PD, OP2_PADDSW_VdqWdq, offset, base, src0, dst);
+ }
+ void vpaddsw_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddsw", VEX_PD, OP2_PADDSW_VdqWdq, address, src0, dst);
+ }
+
+ void vpaddusw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddusw", VEX_PD, OP2_PADDUSW_VdqWdq, src1, src0, dst);
+ }
+ void vpaddusw_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddusw", VEX_PD, OP2_PADDUSW_VdqWdq, offset, base, src0, dst);
+ }
+ void vpaddusw_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddusw", VEX_PD, OP2_PADDUSW_VdqWdq, address, src0, dst);
+ }
+
+ void vpaddd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddd", VEX_PD, OP2_PADDD_VdqWdq, src1, src0, dst);
+ }
+ void vpaddd_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddd", VEX_PD, OP2_PADDD_VdqWdq, offset, base, src0, dst);
+ }
+ void vpaddd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpaddd", VEX_PD, OP2_PADDD_VdqWdq, address, src0, dst);
+ }
+
+ void vpsubb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubb", VEX_PD, OP2_PSUBB_VdqWdq, src1, src0, dst);
+ }
+ void vpsubb_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubb", VEX_PD, OP2_PSUBB_VdqWdq, offset, base, src0, dst);
+ }
+ void vpsubb_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubb", VEX_PD, OP2_PSUBB_VdqWdq, address, src0, dst);
+ }
+
+ void vpsubsb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubsb", VEX_PD, OP2_PSUBSB_VdqWdq, src1, src0, dst);
+ }
+ void vpsubsb_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubsb", VEX_PD, OP2_PSUBSB_VdqWdq, offset, base, src0, dst);
+ }
+ void vpsubsb_mr(const void* subress, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubsb", VEX_PD, OP2_PSUBSB_VdqWdq, subress, src0, dst);
+ }
+
+ void vpsubusb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubusb", VEX_PD, OP2_PSUBUSB_VdqWdq, src1, src0, dst);
+ }
+ void vpsubusb_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubusb", VEX_PD, OP2_PSUBUSB_VdqWdq, offset, base, src0, dst);
+ }
+ void vpsubusb_mr(const void* subress, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubusb", VEX_PD, OP2_PSUBUSB_VdqWdq, subress, src0, dst);
+ }
+
+ void vpsubw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubw", VEX_PD, OP2_PSUBW_VdqWdq, src1, src0, dst);
+ }
+ void vpsubw_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubw", VEX_PD, OP2_PSUBW_VdqWdq, offset, base, src0, dst);
+ }
+ void vpsubw_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubw", VEX_PD, OP2_PSUBW_VdqWdq, address, src0, dst);
+ }
+
+ void vpsubsw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubsw", VEX_PD, OP2_PSUBSW_VdqWdq, src1, src0, dst);
+ }
+ void vpsubsw_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubsw", VEX_PD, OP2_PSUBSW_VdqWdq, offset, base, src0, dst);
+ }
+ void vpsubsw_mr(const void* subress, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubsw", VEX_PD, OP2_PSUBSW_VdqWdq, subress, src0, dst);
+ }
+
+ void vpsubusw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubusw", VEX_PD, OP2_PSUBUSW_VdqWdq, src1, src0, dst);
+ }
+ void vpsubusw_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubusw", VEX_PD, OP2_PSUBUSW_VdqWdq, offset, base, src0, dst);
+ }
+ void vpsubusw_mr(const void* subress, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubusw", VEX_PD, OP2_PSUBUSW_VdqWdq, subress, src0, dst);
+ }
+
+ void vpsubd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubd", VEX_PD, OP2_PSUBD_VdqWdq, src1, src0, dst);
+ }
+ void vpsubd_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubd", VEX_PD, OP2_PSUBD_VdqWdq, offset, base, src0, dst);
+ }
+ void vpsubd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsubd", VEX_PD, OP2_PSUBD_VdqWdq, address, src0, dst);
+ }
+
+ void vpmuludq_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpmuludq", VEX_PD, OP2_PMULUDQ_VdqWdq, src1, src0, dst);
+ }
+ void vpmuludq_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpmuludq", VEX_PD, OP2_PMULUDQ_VdqWdq, offset, base, src0, dst);
+ }
+
+ void vpmullw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpmullw", VEX_PD, OP2_PMULLW_VdqWdq, src1, src0, dst);
+ }
+ void vpmullw_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpmullw", VEX_PD, OP2_PMULLW_VdqWdq, offset, base, src0, dst);
+ }
+
+ void vpmulld_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ threeByteOpSimd("vpmulld", VEX_PD, OP3_PMULLD_VdqWdq, ESCAPE_38, src1, src0, dst);
+ }
+ void vpmulld_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ threeByteOpSimd("vpmulld", VEX_PD, OP3_PMULLD_VdqWdq, ESCAPE_38, offset, base, src0, dst);
+ }
+ void vpmulld_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ threeByteOpSimd("vpmulld", VEX_PD, OP3_PMULLD_VdqWdq, ESCAPE_38, address, src0, dst);
+ }
+
+ void vaddps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vaddps", VEX_PS, OP2_ADDPS_VpsWps, src1, src0, dst);
+ }
+ void vaddps_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vaddps", VEX_PS, OP2_ADDPS_VpsWps, offset, base, src0, dst);
+ }
+ void vaddps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vaddps", VEX_PS, OP2_ADDPS_VpsWps, address, src0, dst);
+ }
+
+ void vsubps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vsubps", VEX_PS, OP2_SUBPS_VpsWps, src1, src0, dst);
+ }
+ void vsubps_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vsubps", VEX_PS, OP2_SUBPS_VpsWps, offset, base, src0, dst);
+ }
+ void vsubps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vsubps", VEX_PS, OP2_SUBPS_VpsWps, address, src0, dst);
+ }
+
+ void vmulps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmulps", VEX_PS, OP2_MULPS_VpsWps, src1, src0, dst);
+ }
+ void vmulps_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmulps", VEX_PS, OP2_MULPS_VpsWps, offset, base, src0, dst);
+ }
+ void vmulps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmulps", VEX_PS, OP2_MULPS_VpsWps, address, src0, dst);
+ }
+
+ void vdivps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vdivps", VEX_PS, OP2_DIVPS_VpsWps, src1, src0, dst);
+ }
+ void vdivps_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vdivps", VEX_PS, OP2_DIVPS_VpsWps, offset, base, src0, dst);
+ }
+ void vdivps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vdivps", VEX_PS, OP2_DIVPS_VpsWps, address, src0, dst);
+ }
+
+ void vmaxps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmaxps", VEX_PS, OP2_MAXPS_VpsWps, src1, src0, dst);
+ }
+ void vmaxps_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmaxps", VEX_PS, OP2_MAXPS_VpsWps, offset, base, src0, dst);
+ }
+ void vmaxps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmaxps", VEX_PS, OP2_MAXPS_VpsWps, address, src0, dst);
+ }
+
+ void vminps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vminps", VEX_PS, OP2_MINPS_VpsWps, src1, src0, dst);
+ }
+ void vminps_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vminps", VEX_PS, OP2_MINPS_VpsWps, offset, base, src0, dst);
+ }
+ void vminps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vminps", VEX_PS, OP2_MINPS_VpsWps, address, src0, dst);
+ }
+
+ void andl_rr(RegisterID src, RegisterID dst)
+ {
+ spew("andl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_AND_GvEv, src, dst);
+ }
+
+ void andw_rr(RegisterID src, RegisterID dst)
+ {
+ spew("andw %s, %s", GPReg16Name(src), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_AND_GvEv, src, dst);
+ }
+
+ void andl_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("andl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_AND_GvEv, offset, base, dst);
+ }
+
+ void andl_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("andl %s, " MEM_ob, GPReg32Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_AND_EvGv, offset, base, src);
+ }
+
+ void andw_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("andw %s, " MEM_ob, GPReg16Name(src), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_AND_EvGv, offset, base, src);
+ }
+
+ void andl_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("andl %s, " MEM_obs, GPReg32Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_AND_EvGv, offset, base, index, scale, src);
+ }
+
+ void andw_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("andw %s, " MEM_obs, GPReg16Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_AND_EvGv, offset, base, index, scale, src);
+ }
+
+ void andl_ir(int32_t imm, RegisterID dst)
+ {
+ spew("andl $0x%x, %s", imm, GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_AND);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax)
+ m_formatter.oneByteOp(OP_AND_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_AND);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void andw_ir(int32_t imm, RegisterID dst)
+ {
+ spew("andw $0x%x, %s", int16_t(imm), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_AND);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax)
+ m_formatter.oneByteOp(OP_AND_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_AND);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void andl_im(int32_t imm, int32_t offset, RegisterID base)
+ {
+ spew("andl $0x%x, " MEM_ob, imm, ADDR_ob(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_AND);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_AND);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void andw_im(int32_t imm, int32_t offset, RegisterID base)
+ {
+ spew("andw $0x%x, " MEM_ob, int16_t(imm), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_AND);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_AND);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void andl_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("andl $%d, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale, GROUP1_OP_AND);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale, GROUP1_OP_AND);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void andw_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("andw $%d, " MEM_obs, int16_t(imm), ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale, GROUP1_OP_AND);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale, GROUP1_OP_AND);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void fld_m(int32_t offset, RegisterID base)
+ {
+ spew("fld " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_FPU6, offset, base, FPU6_OP_FLD);
+ }
+ void fld32_m(int32_t offset, RegisterID base)
+ {
+ spew("fld " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_FPU6_F32, offset, base, FPU6_OP_FLD);
+ }
+ void faddp()
+ {
+ spew("addp ");
+ m_formatter.oneByteOp(OP_FPU6_ADDP);
+ m_formatter.oneByteOp(OP_ADDP_ST0_ST1);
+ }
+ void fisttp_m(int32_t offset, RegisterID base)
+ {
+ spew("fisttp " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_FPU6, offset, base, FPU6_OP_FISTTP);
+ }
+ void fistp_m(int32_t offset, RegisterID base)
+ {
+ spew("fistp " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_FILD, offset, base, FPU6_OP_FISTP);
+ }
+ void fstp_m(int32_t offset, RegisterID base)
+ {
+ spew("fstp " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_FPU6, offset, base, FPU6_OP_FSTP);
+ }
+ void fstp32_m(int32_t offset, RegisterID base)
+ {
+ spew("fstp32 " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_FPU6_F32, offset, base, FPU6_OP_FSTP);
+ }
+ void fnstcw_m(int32_t offset, RegisterID base)
+ {
+ spew("fnstcw " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_FPU6_F32, offset, base, FPU6_OP_FISTP);
+ }
+ void fldcw_m(int32_t offset, RegisterID base)
+ {
+ spew("fldcw " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_FPU6_F32, offset, base, FPU6_OP_FLDCW);
+ }
+ void fnstsw_m(int32_t offset, RegisterID base)
+ {
+ spew("fnstsw " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_FPU6, offset, base, FPU6_OP_FISTP);
+ }
+
+ void negl_r(RegisterID dst)
+ {
+ spew("negl %s", GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_GROUP3_Ev, dst, GROUP3_OP_NEG);
+ }
+
+ void negl_m(int32_t offset, RegisterID base)
+ {
+ spew("negl " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP3_Ev, offset, base, GROUP3_OP_NEG);
+ }
+
+ void notl_r(RegisterID dst)
+ {
+ spew("notl %s", GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_GROUP3_Ev, dst, GROUP3_OP_NOT);
+ }
+
+ void notl_m(int32_t offset, RegisterID base)
+ {
+ spew("notl " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP3_Ev, offset, base, GROUP3_OP_NOT);
+ }
+
+ void orl_rr(RegisterID src, RegisterID dst)
+ {
+ spew("orl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_OR_GvEv, src, dst);
+ }
+
+ void orw_rr(RegisterID src, RegisterID dst)
+ {
+ spew("orw %s, %s", GPReg16Name(src), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_OR_GvEv, src, dst);
+ }
+
+ void orl_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("orl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_OR_GvEv, offset, base, dst);
+ }
+
+ void orl_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("orl %s, " MEM_ob, GPReg32Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_OR_EvGv, offset, base, src);
+ }
+
+ void orw_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("orw %s, " MEM_ob, GPReg16Name(src), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_OR_EvGv, offset, base, src);
+ }
+
+ void orl_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("orl %s, " MEM_obs, GPReg32Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_OR_EvGv, offset, base, index, scale, src);
+ }
+
+ void orw_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("orw %s, " MEM_obs, GPReg16Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_OR_EvGv, offset, base, index, scale, src);
+ }
+
+ void orl_ir(int32_t imm, RegisterID dst)
+ {
+ spew("orl $0x%x, %s", imm, GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_OR);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax)
+ m_formatter.oneByteOp(OP_OR_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_OR);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void orw_ir(int32_t imm, RegisterID dst)
+ {
+ spew("orw $0x%x, %s", int16_t(imm), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_OR);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax)
+ m_formatter.oneByteOp(OP_OR_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_OR);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void orl_im(int32_t imm, int32_t offset, RegisterID base)
+ {
+ spew("orl $0x%x, " MEM_ob, imm, ADDR_ob(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_OR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_OR);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void orw_im(int32_t imm, int32_t offset, RegisterID base)
+ {
+ spew("orw $0x%x, " MEM_ob, int16_t(imm), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_OR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_OR);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void orl_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("orl $%d, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale, GROUP1_OP_OR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale, GROUP1_OP_OR);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void orw_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("orw $%d, " MEM_obs, int16_t(imm), ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale, GROUP1_OP_OR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale, GROUP1_OP_OR);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void subl_rr(RegisterID src, RegisterID dst)
+ {
+ spew("subl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_SUB_GvEv, src, dst);
+ }
+
+ void subw_rr(RegisterID src, RegisterID dst)
+ {
+ spew("subw %s, %s", GPReg16Name(src), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_SUB_GvEv, src, dst);
+ }
+
+ void subl_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("subl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_SUB_GvEv, offset, base, dst);
+ }
+
+ void subl_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("subl %s, " MEM_ob, GPReg32Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_SUB_EvGv, offset, base, src);
+ }
+
+ void subw_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("subw %s, " MEM_ob, GPReg16Name(src), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_SUB_EvGv, offset, base, src);
+ }
+
+ void subl_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("subl %s, " MEM_obs, GPReg32Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_SUB_EvGv, offset, base, index, scale, src);
+ }
+
+ void subw_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("subw %s, " MEM_obs, GPReg16Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_SUB_EvGv, offset, base, index, scale, src);
+ }
+
+ void subl_ir(int32_t imm, RegisterID dst)
+ {
+ spew("subl $%d, %s", imm, GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_SUB);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax)
+ m_formatter.oneByteOp(OP_SUB_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_SUB);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void subw_ir(int32_t imm, RegisterID dst)
+ {
+ spew("subw $%d, %s", int16_t(imm), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_SUB);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax)
+ m_formatter.oneByteOp(OP_SUB_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_SUB);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void subl_im(int32_t imm, int32_t offset, RegisterID base)
+ {
+ spew("subl $%d, " MEM_ob, imm, ADDR_ob(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_SUB);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_SUB);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void subw_im(int32_t imm, int32_t offset, RegisterID base)
+ {
+ spew("subw $%d, " MEM_ob, int16_t(imm), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_SUB);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_SUB);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void subl_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("subl $%d, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale, GROUP1_OP_SUB);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale, GROUP1_OP_SUB);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void subw_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("subw $%d, " MEM_obs, int16_t(imm), ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale, GROUP1_OP_SUB);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale, GROUP1_OP_SUB);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void xorl_rr(RegisterID src, RegisterID dst)
+ {
+ spew("xorl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_XOR_GvEv, src, dst);
+ }
+
+ void xorw_rr(RegisterID src, RegisterID dst)
+ {
+ spew("xorw %s, %s", GPReg16Name(src), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_XOR_GvEv, src, dst);
+ }
+
+ void xorl_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("xorl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_XOR_GvEv, offset, base, dst);
+ }
+
+ void xorl_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("xorl %s, " MEM_ob, GPReg32Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_XOR_EvGv, offset, base, src);
+ }
+
+ void xorw_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("xorw %s, " MEM_ob, GPReg16Name(src), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_XOR_EvGv, offset, base, src);
+ }
+
+ void xorl_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("xorl %s, " MEM_obs, GPReg32Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_XOR_EvGv, offset, base, index, scale, src);
+ }
+
+ void xorw_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("xorw %s, " MEM_obs, GPReg16Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_XOR_EvGv, offset, base, index, scale, src);
+ }
+
+ void xorl_im(int32_t imm, int32_t offset, RegisterID base)
+ {
+ spew("xorl $0x%x, " MEM_ob, imm, ADDR_ob(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_XOR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_XOR);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void xorw_im(int32_t imm, int32_t offset, RegisterID base)
+ {
+ spew("xorw $0x%x, " MEM_ob, int16_t(imm), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_XOR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_XOR);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void xorl_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("xorl $%d, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale, GROUP1_OP_XOR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale, GROUP1_OP_XOR);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void xorw_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("xorw $%d, " MEM_obs, int16_t(imm), ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale, GROUP1_OP_XOR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale, GROUP1_OP_XOR);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void xorl_ir(int32_t imm, RegisterID dst)
+ {
+ spew("xorl $%d, %s", imm, GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_XOR);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax)
+ m_formatter.oneByteOp(OP_XOR_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_XOR);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void xorw_ir(int32_t imm, RegisterID dst)
+ {
+ spew("xorw $%d, %s", int16_t(imm), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_XOR);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax)
+ m_formatter.oneByteOp(OP_XOR_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_XOR);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void sarl_ir(int32_t imm, RegisterID dst)
+ {
+ MOZ_ASSERT(imm < 32);
+ spew("sarl $%d, %s", imm, GPReg32Name(dst));
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, dst, GROUP2_OP_SAR);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, dst, GROUP2_OP_SAR);
+ m_formatter.immediate8u(imm);
+ }
+ }
+
+ void sarl_CLr(RegisterID dst)
+ {
+ spew("sarl %%cl, %s", GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, dst, GROUP2_OP_SAR);
+ }
+
+ void shrl_ir(int32_t imm, RegisterID dst)
+ {
+ MOZ_ASSERT(imm < 32);
+ spew("shrl $%d, %s", imm, GPReg32Name(dst));
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, dst, GROUP2_OP_SHR);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, dst, GROUP2_OP_SHR);
+ m_formatter.immediate8u(imm);
+ }
+ }
+
+ void shrl_CLr(RegisterID dst)
+ {
+ spew("shrl %%cl, %s", GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, dst, GROUP2_OP_SHR);
+ }
+
+ void shrdl_CLr(RegisterID src, RegisterID dst)
+ {
+ spew("shrdl %%cl, %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_SHRD_GvEv, dst, src);
+ }
+
+ void shldl_CLr(RegisterID src, RegisterID dst)
+ {
+ spew("shldl %%cl, %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_SHLD_GvEv, dst, src);
+ }
+
+ void shll_ir(int32_t imm, RegisterID dst)
+ {
+ MOZ_ASSERT(imm < 32);
+ spew("shll $%d, %s", imm, GPReg32Name(dst));
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, dst, GROUP2_OP_SHL);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, dst, GROUP2_OP_SHL);
+ m_formatter.immediate8u(imm);
+ }
+ }
+
+ void shll_CLr(RegisterID dst)
+ {
+ spew("shll %%cl, %s", GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, dst, GROUP2_OP_SHL);
+ }
+
+ void roll_ir(int32_t imm, RegisterID dst)
+ {
+ MOZ_ASSERT(imm < 32);
+ spew("roll $%d, %s", imm, GPReg32Name(dst));
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, dst, GROUP2_OP_ROL);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, dst, GROUP2_OP_ROL);
+ m_formatter.immediate8u(imm);
+ }
+ }
+ void roll_CLr(RegisterID dst)
+ {
+ spew("roll %%cl, %s", GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, dst, GROUP2_OP_ROL);
+ }
+
+ void rorl_ir(int32_t imm, RegisterID dst)
+ {
+ MOZ_ASSERT(imm < 32);
+ spew("rorl $%d, %s", imm, GPReg32Name(dst));
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, dst, GROUP2_OP_ROR);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, dst, GROUP2_OP_ROR);
+ m_formatter.immediate8u(imm);
+ }
+ }
+ void rorl_CLr(RegisterID dst)
+ {
+ spew("rorl %%cl, %s", GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, dst, GROUP2_OP_ROR);
+ }
+
+ void bsrl_rr(RegisterID src, RegisterID dst)
+ {
+ spew("bsrl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_BSR_GvEv, src, dst);
+ }
+
+ void bsfl_rr(RegisterID src, RegisterID dst)
+ {
+ spew("bsfl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_BSF_GvEv, src, dst);
+ }
+
+ void popcntl_rr(RegisterID src, RegisterID dst)
+ {
+ spew("popcntl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.legacySSEPrefix(VEX_SS);
+ m_formatter.twoByteOp(OP2_POPCNT_GvEv, src, dst);
+ }
+
+ void imull_rr(RegisterID src, RegisterID dst)
+ {
+ spew("imull %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_IMUL_GvEv, src, dst);
+ }
+
+ void imull_r(RegisterID multiplier)
+ {
+ spew("imull %s", GPReg32Name(multiplier));
+ m_formatter.oneByteOp(OP_GROUP3_Ev, multiplier, GROUP3_OP_IMUL);
+ }
+
+ void imull_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("imull " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_IMUL_GvEv, offset, base, dst);
+ }
+
+ void imull_ir(int32_t value, RegisterID src, RegisterID dst)
+ {
+ spew("imull $%d, %s, %s", value, GPReg32Name(src), GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(value)) {
+ m_formatter.oneByteOp(OP_IMUL_GvEvIb, src, dst);
+ m_formatter.immediate8s(value);
+ } else {
+ m_formatter.oneByteOp(OP_IMUL_GvEvIz, src, dst);
+ m_formatter.immediate32(value);
+ }
+ }
+
+ void mull_r(RegisterID multiplier)
+ {
+ spew("mull %s", GPReg32Name(multiplier));
+ m_formatter.oneByteOp(OP_GROUP3_Ev, multiplier, GROUP3_OP_MUL);
+ }
+
+ void idivl_r(RegisterID divisor)
+ {
+ spew("idivl %s", GPReg32Name(divisor));
+ m_formatter.oneByteOp(OP_GROUP3_Ev, divisor, GROUP3_OP_IDIV);
+ }
+
+ void divl_r(RegisterID divisor)
+ {
+ spew("div %s", GPReg32Name(divisor));
+ m_formatter.oneByteOp(OP_GROUP3_Ev, divisor, GROUP3_OP_DIV);
+ }
+
+ void prefix_lock()
+ {
+ spew("lock");
+ m_formatter.oneByteOp(PRE_LOCK);
+ }
+
+ void prefix_16_for_32()
+ {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ }
+
+ void incl_m32(int32_t offset, RegisterID base)
+ {
+ spew("incl " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP5_Ev, offset, base, GROUP5_OP_INC);
+ }
+
+ void decl_m32(int32_t offset, RegisterID base)
+ {
+ spew("decl " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP5_Ev, offset, base, GROUP5_OP_DEC);
+ }
+
+ // Note that CMPXCHG performs comparison against REG = %al/%ax/%eax/%rax.
+ // If %REG == [%base+offset], then %src -> [%base+offset].
+ // Otherwise, [%base+offset] -> %REG.
+ // For the 8-bit operations src must also be an 8-bit register.
+
+ void cmpxchgb(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("cmpxchgb %s, " MEM_ob, GPReg8Name(src), ADDR_ob(offset, base));
+ m_formatter.twoByteOp8(OP2_CMPXCHG_GvEb, offset, base, src);
+ }
+ void cmpxchgb(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("cmpxchgb %s, " MEM_obs, GPReg8Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.twoByteOp8(OP2_CMPXCHG_GvEb, offset, base, index, scale, src);
+ }
+ void cmpxchgw(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("cmpxchgw %s, " MEM_ob, GPReg16Name(src), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.twoByteOp(OP2_CMPXCHG_GvEw, offset, base, src);
+ }
+ void cmpxchgw(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("cmpxchgw %s, " MEM_obs, GPReg16Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.twoByteOp(OP2_CMPXCHG_GvEw, offset, base, index, scale, src);
+ }
+ void cmpxchgl(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("cmpxchgl %s, " MEM_ob, GPReg32Name(src), ADDR_ob(offset, base));
+ m_formatter.twoByteOp(OP2_CMPXCHG_GvEw, offset, base, src);
+ }
+ void cmpxchgl(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("cmpxchgl %s, " MEM_obs, GPReg32Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.twoByteOp(OP2_CMPXCHG_GvEw, offset, base, index, scale, src);
+ }
+
+
+ // Comparisons:
+
+ void cmpl_rr(RegisterID rhs, RegisterID lhs)
+ {
+ spew("cmpl %s, %s", GPReg32Name(rhs), GPReg32Name(lhs));
+ m_formatter.oneByteOp(OP_CMP_GvEv, rhs, lhs);
+ }
+
+ void cmpl_rm(RegisterID rhs, int32_t offset, RegisterID base)
+ {
+ spew("cmpl %s, " MEM_ob, GPReg32Name(rhs), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_CMP_EvGv, offset, base, rhs);
+ }
+
+ void cmpl_mr(int32_t offset, RegisterID base, RegisterID lhs)
+ {
+ spew("cmpl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(lhs));
+ m_formatter.oneByteOp(OP_CMP_GvEv, offset, base, lhs);
+ }
+
+ void cmpl_mr(const void* address, RegisterID lhs)
+ {
+ spew("cmpl %p, %s", address, GPReg32Name(lhs));
+ m_formatter.oneByteOp(OP_CMP_GvEv, address, lhs);
+ }
+
+ void cmpl_ir(int32_t rhs, RegisterID lhs)
+ {
+ if (rhs == 0) {
+ testl_rr(lhs, lhs);
+ return;
+ }
+
+ spew("cmpl $0x%x, %s", rhs, GPReg32Name(lhs));
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, lhs, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ if (lhs == rax)
+ m_formatter.oneByteOp(OP_CMP_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, lhs, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+ }
+
+ void cmpl_i32r(int32_t rhs, RegisterID lhs)
+ {
+ spew("cmpl $0x%04x, %s", rhs, GPReg32Name(lhs));
+ if (lhs == rax)
+ m_formatter.oneByteOp(OP_CMP_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, lhs, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+
+ void cmpl_im(int32_t rhs, int32_t offset, RegisterID base)
+ {
+ spew("cmpl $0x%x, " MEM_ob, rhs, ADDR_ob(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+ }
+
+ void cmpb_im(int32_t rhs, int32_t offset, RegisterID base)
+ {
+ spew("cmpb $0x%x, " MEM_ob, rhs, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, GROUP1_OP_CMP);
+ m_formatter.immediate8(rhs);
+ }
+
+ void cmpb_im(int32_t rhs, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("cmpb $0x%x, " MEM_obs, rhs, ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, index, scale, GROUP1_OP_CMP);
+ m_formatter.immediate8(rhs);
+ }
+
+ void cmpl_im(int32_t rhs, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("cmpl $0x%x, " MEM_o32b, rhs, ADDR_o32b(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+ }
+
+ MOZ_MUST_USE JmpSrc
+ cmpl_im_disp32(int32_t rhs, int32_t offset, RegisterID base)
+ {
+ spew("cmpl $0x%x, " MEM_o32b, rhs, ADDR_o32b(offset, base));
+ JmpSrc r;
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp_disp32(OP_GROUP1_EvIb, offset, base, GROUP1_OP_CMP);
+ r = JmpSrc(m_formatter.size());
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.oneByteOp_disp32(OP_GROUP1_EvIz, offset, base, GROUP1_OP_CMP);
+ r = JmpSrc(m_formatter.size());
+ m_formatter.immediate32(rhs);
+ }
+ return r;
+ }
+
+ MOZ_MUST_USE JmpSrc
+ cmpl_im_disp32(int32_t rhs, const void* addr)
+ {
+ spew("cmpl $0x%x, %p", rhs, addr);
+ JmpSrc r;
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp_disp32(OP_GROUP1_EvIb, addr, GROUP1_OP_CMP);
+ r = JmpSrc(m_formatter.size());
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.oneByteOp_disp32(OP_GROUP1_EvIz, addr, GROUP1_OP_CMP);
+ r = JmpSrc(m_formatter.size());
+ m_formatter.immediate32(rhs);
+ }
+ return r;
+ }
+
+ void cmpl_i32m(int32_t rhs, int32_t offset, RegisterID base)
+ {
+ spew("cmpl $0x%04x, " MEM_ob, rhs, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+
+ void cmpl_i32m(int32_t rhs, const void* addr)
+ {
+ spew("cmpl $0x%04x, %p", rhs, addr);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+
+ void cmpl_rm(RegisterID rhs, const void* addr)
+ {
+ spew("cmpl %s, %p", GPReg32Name(rhs), addr);
+ m_formatter.oneByteOp(OP_CMP_EvGv, addr, rhs);
+ }
+
+ void cmpl_rm_disp32(RegisterID rhs, const void* addr)
+ {
+ spew("cmpl %s, %p", GPReg32Name(rhs), addr);
+ m_formatter.oneByteOp_disp32(OP_CMP_EvGv, addr, rhs);
+ }
+
+ void cmpl_im(int32_t rhs, const void* addr)
+ {
+ spew("cmpl $0x%x, %p", rhs, addr);
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+ }
+
+ void cmpw_rr(RegisterID rhs, RegisterID lhs)
+ {
+ spew("cmpw %s, %s", GPReg16Name(rhs), GPReg16Name(lhs));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_CMP_GvEv, rhs, lhs);
+ }
+
+ void cmpw_rm(RegisterID rhs, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("cmpw %s, " MEM_obs, GPReg16Name(rhs), ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_CMP_EvGv, offset, base, index, scale, rhs);
+ }
+
+ void cmpw_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("cmpw $%d, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale, GROUP1_OP_CMP);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale, GROUP1_OP_CMP);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void testl_rr(RegisterID rhs, RegisterID lhs)
+ {
+ spew("testl %s, %s", GPReg32Name(rhs), GPReg32Name(lhs));
+ m_formatter.oneByteOp(OP_TEST_EvGv, lhs, rhs);
+ }
+
+ void testb_rr(RegisterID rhs, RegisterID lhs)
+ {
+ spew("testb %s, %s", GPReg8Name(rhs), GPReg8Name(lhs));
+ m_formatter.oneByteOp(OP_TEST_EbGb, lhs, rhs);
+ }
+
+ void testl_ir(int32_t rhs, RegisterID lhs)
+ {
+ // If the mask fits in an 8-bit immediate, we can use testb with an
+ // 8-bit subreg.
+ if (CAN_ZERO_EXTEND_8_32(rhs) && HasSubregL(lhs)) {
+ testb_ir(rhs, lhs);
+ return;
+ }
+ // If the mask is a subset of 0xff00, we can use testb with an h reg, if
+ // one happens to be available.
+ if (CAN_ZERO_EXTEND_8H_32(rhs) && HasSubregH(lhs)) {
+ testb_ir_norex(rhs >> 8, GetSubregH(lhs));
+ return;
+ }
+ spew("testl $0x%x, %s", rhs, GPReg32Name(lhs));
+ if (lhs == rax)
+ m_formatter.oneByteOp(OP_TEST_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, lhs, GROUP3_OP_TEST);
+ m_formatter.immediate32(rhs);
+ }
+
+ void testl_i32m(int32_t rhs, int32_t offset, RegisterID base)
+ {
+ spew("testl $0x%x, " MEM_ob, rhs, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, offset, base, GROUP3_OP_TEST);
+ m_formatter.immediate32(rhs);
+ }
+
+ void testl_i32m(int32_t rhs, const void* addr)
+ {
+ spew("testl $0x%x, %p", rhs, addr);
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, addr, GROUP3_OP_TEST);
+ m_formatter.immediate32(rhs);
+ }
+
+ void testb_im(int32_t rhs, int32_t offset, RegisterID base)
+ {
+ spew("testb $0x%x, " MEM_ob, rhs, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP3_EbIb, offset, base, GROUP3_OP_TEST);
+ m_formatter.immediate8(rhs);
+ }
+
+ void testb_im(int32_t rhs, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("testb $0x%x, " MEM_obs, rhs, ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP3_EbIb, offset, base, index, scale, GROUP3_OP_TEST);
+ m_formatter.immediate8(rhs);
+ }
+
+ void testl_i32m(int32_t rhs, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("testl $0x%4x, " MEM_obs, rhs, ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, offset, base, index, scale, GROUP3_OP_TEST);
+ m_formatter.immediate32(rhs);
+ }
+
+ void testw_rr(RegisterID rhs, RegisterID lhs)
+ {
+ spew("testw %s, %s", GPReg16Name(rhs), GPReg16Name(lhs));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_TEST_EvGv, lhs, rhs);
+ }
+
+ void testb_ir(int32_t rhs, RegisterID lhs)
+ {
+ spew("testb $0x%x, %s", rhs, GPReg8Name(lhs));
+ if (lhs == rax)
+ m_formatter.oneByteOp8(OP_TEST_EAXIb);
+ else
+ m_formatter.oneByteOp8(OP_GROUP3_EbIb, lhs, GROUP3_OP_TEST);
+ m_formatter.immediate8(rhs);
+ }
+
+ // Like testb_ir, but never emits a REX prefix. This may be used to
+ // reference ah..bh.
+ void testb_ir_norex(int32_t rhs, HRegisterID lhs)
+ {
+ spew("testb $0x%x, %s", rhs, HRegName8(lhs));
+ m_formatter.oneByteOp8_norex(OP_GROUP3_EbIb, lhs, GROUP3_OP_TEST);
+ m_formatter.immediate8(rhs);
+ }
+
+ void setCC_r(Condition cond, RegisterID lhs)
+ {
+ spew("set%s %s", CCName(cond), GPReg8Name(lhs));
+ m_formatter.twoByteOp8(setccOpcode(cond), lhs, (GroupOpcodeID)0);
+ }
+
+ void sete_r(RegisterID dst)
+ {
+ setCC_r(ConditionE, dst);
+ }
+
+ void setz_r(RegisterID dst)
+ {
+ sete_r(dst);
+ }
+
+ void setne_r(RegisterID dst)
+ {
+ setCC_r(ConditionNE, dst);
+ }
+
+ void setnz_r(RegisterID dst)
+ {
+ setne_r(dst);
+ }
+
+ // Various move ops:
+
+ void cdq()
+ {
+ spew("cdq ");
+ m_formatter.oneByteOp(OP_CDQ);
+ }
+
+ void xchgb_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("xchgb %s, " MEM_ob, GPReg8Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp8(OP_XCHG_GbEb, offset, base, src);
+ }
+ void xchgb_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("xchgb %s, " MEM_obs, GPReg8Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp8(OP_XCHG_GbEb, offset, base, index, scale, src);
+ }
+
+ void xchgw_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("xchgw %s, " MEM_ob, GPReg16Name(src), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_XCHG_GvEv, offset, base, src);
+ }
+ void xchgw_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("xchgw %s, " MEM_obs, GPReg16Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_XCHG_GvEv, offset, base, index, scale, src);
+ }
+
+ void xchgl_rr(RegisterID src, RegisterID dst)
+ {
+ spew("xchgl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_XCHG_GvEv, src, dst);
+ }
+ void xchgl_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("xchgl %s, " MEM_ob, GPReg32Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_XCHG_GvEv, offset, base, src);
+ }
+ void xchgl_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("xchgl %s, " MEM_obs, GPReg32Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_XCHG_GvEv, offset, base, index, scale, src);
+ }
+
+ void cmovz_rr(RegisterID src, RegisterID dst)
+ {
+ spew("cmovz %s, %s", GPReg16Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_CMOVZ_GvEv, src, dst);
+ }
+ void cmovz_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("cmovz " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_CMOVZ_GvEv, offset, base, dst);
+ }
+ void cmovz_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("cmovz " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_CMOVZ_GvEv, offset, base, index, scale, dst);
+ }
+
+ void movl_rr(RegisterID src, RegisterID dst)
+ {
+ spew("movl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_MOV_GvEv, src, dst);
+ }
+
+ void movw_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("movw %s, " MEM_ob, GPReg16Name(src), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_MOV_EvGv, offset, base, src);
+ }
+
+ void movw_rm_disp32(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("movw %s, " MEM_o32b, GPReg16Name(src), ADDR_o32b(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp_disp32(OP_MOV_EvGv, offset, base, src);
+ }
+
+ void movw_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("movw %s, " MEM_obs, GPReg16Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_MOV_EvGv, offset, base, index, scale, src);
+ }
+
+ void movw_rm(RegisterID src, const void* addr)
+ {
+ spew("movw %s, %p", GPReg16Name(src), addr);
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp_disp32(OP_MOV_EvGv, addr, src);
+ }
+
+ void movl_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("movl %s, " MEM_ob, GPReg32Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_MOV_EvGv, offset, base, src);
+ }
+
+ void movl_rm_disp32(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("movl %s, " MEM_o32b, GPReg32Name(src), ADDR_o32b(offset, base));
+ m_formatter.oneByteOp_disp32(OP_MOV_EvGv, offset, base, src);
+ }
+
+ void movl_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("movl %s, " MEM_obs, GPReg32Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_MOV_EvGv, offset, base, index, scale, src);
+ }
+
+ void movl_mEAX(const void* addr)
+ {
+#ifdef JS_CODEGEN_X64
+ if (IsAddressImmediate(addr)) {
+ movl_mr(addr, rax);
+ return;
+ }
+#endif
+
+#ifdef JS_CODEGEN_X64
+ spew("movabs %p, %%eax", addr);
+#else
+ spew("movl %p, %%eax", addr);
+#endif
+ m_formatter.oneByteOp(OP_MOV_EAXOv);
+#ifdef JS_CODEGEN_X64
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+#else
+ m_formatter.immediate32(reinterpret_cast<int32_t>(addr));
+#endif
+ }
+
+ void movl_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_MOV_GvEv, offset, base, dst);
+ }
+
+ void movl_mr_disp32(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movl " MEM_o32b ", %s", ADDR_o32b(offset, base), GPReg32Name(dst));
+ m_formatter.oneByteOp_disp32(OP_MOV_GvEv, offset, base, dst);
+ }
+
+ void movl_mr(const void* base, RegisterID index, int scale, RegisterID dst)
+ {
+ int32_t disp = AddressImmediate(base);
+
+ spew("movl " MEM_os ", %s", ADDR_os(disp, index, scale), GPReg32Name(dst));
+ m_formatter.oneByteOp_disp32(OP_MOV_GvEv, disp, index, scale, dst);
+ }
+
+ void movl_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("movl " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_MOV_GvEv, offset, base, index, scale, dst);
+ }
+
+ void movl_mr(const void* addr, RegisterID dst)
+ {
+ if (dst == rax
+#ifdef JS_CODEGEN_X64
+ && !IsAddressImmediate(addr)
+#endif
+ )
+ {
+ movl_mEAX(addr);
+ return;
+ }
+
+ spew("movl %p, %s", addr, GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_MOV_GvEv, addr, dst);
+ }
+
+ void movl_i32r(int32_t imm, RegisterID dst)
+ {
+ spew("movl $0x%x, %s", imm, GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void movb_ir(int32_t imm, RegisterID reg)
+ {
+ spew("movb $0x%x, %s", imm, GPReg8Name(reg));
+ m_formatter.oneByteOp8(OP_MOV_EbIb, reg);
+ m_formatter.immediate8(imm);
+ }
+
+ void movb_im(int32_t imm, int32_t offset, RegisterID base)
+ {
+ spew("movb $0x%x, " MEM_ob, imm, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP11_EvIb, offset, base, GROUP11_MOV);
+ m_formatter.immediate8(imm);
+ }
+
+ void movb_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("movb $0x%x, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP11_EvIb, offset, base, index, scale, GROUP11_MOV);
+ m_formatter.immediate8(imm);
+ }
+
+ void movb_im(int32_t imm, const void* addr)
+ {
+ spew("movb $%d, %p", imm, addr);
+ m_formatter.oneByteOp_disp32(OP_GROUP11_EvIb, addr, GROUP11_MOV);
+ m_formatter.immediate8(imm);
+ }
+
+ void movw_im(int32_t imm, int32_t offset, RegisterID base)
+ {
+ spew("movw $0x%x, " MEM_ob, imm, ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, offset, base, GROUP11_MOV);
+ m_formatter.immediate16(imm);
+ }
+
+ void movw_im(int32_t imm, const void* addr)
+ {
+ spew("movw $%d, %p", imm, addr);
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp_disp32(OP_GROUP11_EvIz, addr, GROUP11_MOV);
+ m_formatter.immediate16(imm);
+ }
+
+ void movl_i32m(int32_t imm, int32_t offset, RegisterID base)
+ {
+ spew("movl $0x%x, " MEM_ob, imm, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, offset, base, GROUP11_MOV);
+ m_formatter.immediate32(imm);
+ }
+
+ void movw_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("movw $0x%x, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, offset, base, index, scale, GROUP11_MOV);
+ m_formatter.immediate16(imm);
+ }
+
+ void movl_i32m(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("movl $0x%x, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, offset, base, index, scale, GROUP11_MOV);
+ m_formatter.immediate32(imm);
+ }
+
+ void movl_EAXm(const void* addr)
+ {
+#ifdef JS_CODEGEN_X64
+ if (IsAddressImmediate(addr)) {
+ movl_rm(rax, addr);
+ return;
+ }
+#endif
+
+ spew("movl %%eax, %p", addr);
+ m_formatter.oneByteOp(OP_MOV_OvEAX);
+#ifdef JS_CODEGEN_X64
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+#else
+ m_formatter.immediate32(reinterpret_cast<int32_t>(addr));
+#endif
+ }
+
+ void vmovq_rm(XMMRegisterID src, int32_t offset, RegisterID base)
+ {
+ // vmovq_rm can be encoded either as a true vmovq or as a vmovd with a
+ // REX prefix modifying it to be 64-bit. We choose the vmovq encoding
+ // because it's smaller (when it doesn't need a REX prefix for other
+ // reasons) and because it works on 32-bit x86 too.
+ twoByteOpSimd("vmovq", VEX_PD, OP2_MOVQ_WdVd, offset, base, invalid_xmm, src);
+ }
+
+ void vmovq_rm_disp32(XMMRegisterID src, int32_t offset, RegisterID base)
+ {
+ twoByteOpSimd_disp32("vmovq", VEX_PD, OP2_MOVQ_WdVd, offset, base, invalid_xmm, src);
+ }
+
+ void vmovq_rm(XMMRegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ twoByteOpSimd("vmovq", VEX_PD, OP2_MOVQ_WdVd, offset, base, index, scale, invalid_xmm, src);
+ }
+
+ void vmovq_rm(XMMRegisterID src, const void* addr)
+ {
+ twoByteOpSimd("vmovq", VEX_PD, OP2_MOVQ_WdVd, addr, invalid_xmm, src);
+ }
+
+ void vmovq_mr(int32_t offset, RegisterID base, XMMRegisterID dst)
+ {
+ // vmovq_mr can be encoded either as a true vmovq or as a vmovd with a
+ // REX prefix modifying it to be 64-bit. We choose the vmovq encoding
+ // because it's smaller (when it doesn't need a REX prefix for other
+ // reasons) and because it works on 32-bit x86 too.
+ twoByteOpSimd("vmovq", VEX_SS, OP2_MOVQ_VdWd, offset, base, invalid_xmm, dst);
+ }
+
+ void vmovq_mr_disp32(int32_t offset, RegisterID base, XMMRegisterID dst)
+ {
+ twoByteOpSimd_disp32("vmovq", VEX_SS, OP2_MOVQ_VdWd, offset, base, invalid_xmm, dst);
+ }
+
+ void vmovq_mr(int32_t offset, RegisterID base, RegisterID index, int32_t scale, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovq", VEX_SS, OP2_MOVQ_VdWd, offset, base, index, scale, invalid_xmm, dst);
+ }
+
+ void vmovq_mr(const void* addr, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovq", VEX_SS, OP2_MOVQ_VdWd, addr, invalid_xmm, dst);
+ }
+
+ void movl_rm(RegisterID src, const void* addr)
+ {
+ if (src == rax
+#ifdef JS_CODEGEN_X64
+ && !IsAddressImmediate(addr)
+#endif
+ ) {
+ movl_EAXm(addr);
+ return;
+ }
+
+ spew("movl %s, %p", GPReg32Name(src), addr);
+ m_formatter.oneByteOp(OP_MOV_EvGv, addr, src);
+ }
+
+ void movl_i32m(int32_t imm, const void* addr)
+ {
+ spew("movl $%d, %p", imm, addr);
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, addr, GROUP11_MOV);
+ m_formatter.immediate32(imm);
+ }
+
+ void movb_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("movb %s, " MEM_ob, GPReg8Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp8(OP_MOV_EbGv, offset, base, src);
+ }
+
+ void movb_rm_disp32(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("movb %s, " MEM_o32b, GPReg8Name(src), ADDR_o32b(offset, base));
+ m_formatter.oneByteOp8_disp32(OP_MOV_EbGv, offset, base, src);
+ }
+
+ void movb_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("movb %s, " MEM_obs, GPReg8Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp8(OP_MOV_EbGv, offset, base, index, scale, src);
+ }
+
+ void movb_rm(RegisterID src, const void* addr)
+ {
+ spew("movb %s, %p", GPReg8Name(src), addr);
+ m_formatter.oneByteOp8(OP_MOV_EbGv, addr, src);
+ }
+
+ void movb_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movb " MEM_ob ", %s", ADDR_ob(offset, base), GPReg8Name(dst));
+ m_formatter.oneByteOp(OP_MOV_GvEb, offset, base, dst);
+ }
+
+ void movb_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("movb " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg8Name(dst));
+ m_formatter.oneByteOp(OP_MOV_GvEb, offset, base, index, scale, dst);
+ }
+
+ void movzbl_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movzbl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVZX_GvEb, offset, base, dst);
+ }
+
+ void movzbl_mr_disp32(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movzbl " MEM_o32b ", %s", ADDR_o32b(offset, base), GPReg32Name(dst));
+ m_formatter.twoByteOp_disp32(OP2_MOVZX_GvEb, offset, base, dst);
+ }
+
+ void movzbl_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("movzbl " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVZX_GvEb, offset, base, index, scale, dst);
+ }
+
+ void movzbl_mr(const void* addr, RegisterID dst)
+ {
+ spew("movzbl %p, %s", addr, GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVZX_GvEb, addr, dst);
+ }
+
+ void movsbl_rr(RegisterID src, RegisterID dst)
+ {
+ spew("movsbl %s, %s", GPReg8Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp8_movx(OP2_MOVSX_GvEb, src, dst);
+ }
+
+ void movsbl_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movsbl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVSX_GvEb, offset, base, dst);
+ }
+
+ void movsbl_mr_disp32(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movsbl " MEM_o32b ", %s", ADDR_o32b(offset, base), GPReg32Name(dst));
+ m_formatter.twoByteOp_disp32(OP2_MOVSX_GvEb, offset, base, dst);
+ }
+
+ void movsbl_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("movsbl " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVSX_GvEb, offset, base, index, scale, dst);
+ }
+
+ void movsbl_mr(const void* addr, RegisterID dst)
+ {
+ spew("movsbl %p, %s", addr, GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVSX_GvEb, addr, dst);
+ }
+
+ void movzwl_rr(RegisterID src, RegisterID dst)
+ {
+ spew("movzwl %s, %s", GPReg16Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVZX_GvEw, src, dst);
+ }
+
+ void movzwl_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movzwl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVZX_GvEw, offset, base, dst);
+ }
+
+ void movzwl_mr_disp32(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movzwl " MEM_o32b ", %s", ADDR_o32b(offset, base), GPReg32Name(dst));
+ m_formatter.twoByteOp_disp32(OP2_MOVZX_GvEw, offset, base, dst);
+ }
+
+ void movzwl_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("movzwl " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVZX_GvEw, offset, base, index, scale, dst);
+ }
+
+ void movzwl_mr(const void* addr, RegisterID dst)
+ {
+ spew("movzwl %p, %s", addr, GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVZX_GvEw, addr, dst);
+ }
+
+ void movswl_rr(RegisterID src, RegisterID dst)
+ {
+ spew("movswl %s, %s", GPReg16Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVSX_GvEw, src, dst);
+ }
+
+ void movswl_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movswl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVSX_GvEw, offset, base, dst);
+ }
+
+ void movswl_mr_disp32(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movswl " MEM_o32b ", %s", ADDR_o32b(offset, base), GPReg32Name(dst));
+ m_formatter.twoByteOp_disp32(OP2_MOVSX_GvEw, offset, base, dst);
+ }
+
+ void movswl_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("movswl " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVSX_GvEw, offset, base, index, scale, dst);
+ }
+
+ void movswl_mr(const void* addr, RegisterID dst)
+ {
+ spew("movswl %p, %s", addr, GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVSX_GvEw, addr, dst);
+ }
+
+ void movzbl_rr(RegisterID src, RegisterID dst)
+ {
+ spew("movzbl %s, %s", GPReg8Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp8_movx(OP2_MOVZX_GvEb, src, dst);
+ }
+
+ void leal_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("leal " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_LEA, offset, base, index, scale, dst);
+ }
+
+ void leal_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("leal " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_LEA, offset, base, dst);
+ }
+
+ // Flow control:
+
+ MOZ_MUST_USE JmpSrc
+ call()
+ {
+ m_formatter.oneByteOp(OP_CALL_rel32);
+ JmpSrc r = m_formatter.immediateRel32();
+ spew("call .Lfrom%d", r.offset());
+ return r;
+ }
+
+ void call_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, dst, GROUP5_OP_CALLN);
+ spew("call *%s", GPRegName(dst));
+ }
+
+ void call_m(int32_t offset, RegisterID base)
+ {
+ spew("call *" MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP5_Ev, offset, base, GROUP5_OP_CALLN);
+ }
+
+ // Comparison of EAX against a 32-bit immediate. The immediate is patched
+ // in as if it were a jump target. The intention is to toggle the first
+ // byte of the instruction between a CMP and a JMP to produce a pseudo-NOP.
+ MOZ_MUST_USE JmpSrc
+ cmp_eax()
+ {
+ m_formatter.oneByteOp(OP_CMP_EAXIv);
+ JmpSrc r = m_formatter.immediateRel32();
+ spew("cmpl %%eax, .Lfrom%d", r.offset());
+ return r;
+ }
+
+ void jmp_i(JmpDst dst)
+ {
+ int32_t diff = dst.offset() - m_formatter.size();
+ spew("jmp .Llabel%d", dst.offset());
+
+ // The jump immediate is an offset from the end of the jump instruction.
+ // A jump instruction is either 1 byte opcode and 1 byte offset, or 1
+ // byte opcode and 4 bytes offset.
+ if (CAN_SIGN_EXTEND_8_32(diff - 2)) {
+ m_formatter.oneByteOp(OP_JMP_rel8);
+ m_formatter.immediate8s(diff - 2);
+ } else {
+ m_formatter.oneByteOp(OP_JMP_rel32);
+ m_formatter.immediate32(diff - 5);
+ }
+ }
+ MOZ_MUST_USE JmpSrc
+ jmp()
+ {
+ m_formatter.oneByteOp(OP_JMP_rel32);
+ JmpSrc r = m_formatter.immediateRel32();
+ spew("jmp .Lfrom%d", r.offset());
+ return r;
+ }
+
+ void jmp_r(RegisterID dst)
+ {
+ spew("jmp *%s", GPRegName(dst));
+ m_formatter.oneByteOp(OP_GROUP5_Ev, dst, GROUP5_OP_JMPN);
+ }
+
+ void jmp_m(int32_t offset, RegisterID base)
+ {
+ spew("jmp *" MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP5_Ev, offset, base, GROUP5_OP_JMPN);
+ }
+
+ void jmp_m(int32_t offset, RegisterID base, RegisterID index, int scale) {
+ spew("jmp *" MEM_obs, ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP5_Ev, offset, base, index, scale, GROUP5_OP_JMPN);
+ }
+
+ void jCC_i(Condition cond, JmpDst dst)
+ {
+ int32_t diff = dst.offset() - m_formatter.size();
+ spew("j%s .Llabel%d", CCName(cond), dst.offset());
+
+ // The jump immediate is an offset from the end of the jump instruction.
+ // A conditional jump instruction is either 1 byte opcode and 1 byte
+ // offset, or 2 bytes opcode and 4 bytes offset.
+ if (CAN_SIGN_EXTEND_8_32(diff - 2)) {
+ m_formatter.oneByteOp(jccRel8(cond));
+ m_formatter.immediate8s(diff - 2);
+ } else {
+ m_formatter.twoByteOp(jccRel32(cond));
+ m_formatter.immediate32(diff - 6);
+ }
+ }
+
+ MOZ_MUST_USE JmpSrc
+ jCC(Condition cond)
+ {
+ m_formatter.twoByteOp(jccRel32(cond));
+ JmpSrc r = m_formatter.immediateRel32();
+ spew("j%s .Lfrom%d", CCName(cond), r.offset());
+ return r;
+ }
+
+ // SSE operations:
+
+ void vpcmpeqb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpcmpeqb", VEX_PD, OP2_PCMPEQB_VdqWdq, src1, src0, dst);
+ }
+ void vpcmpeqb_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpcmpeqb", VEX_PD, OP2_PCMPEQB_VdqWdq, offset, base, src0, dst);
+ }
+ void vpcmpeqb_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpcmpeqb", VEX_PD, OP2_PCMPEQB_VdqWdq, address, src0, dst);
+ }
+
+ void vpcmpgtb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpcmpgtb", VEX_PD, OP2_PCMPGTB_VdqWdq, src1, src0, dst);
+ }
+ void vpcmpgtb_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpcmpgtb", VEX_PD, OP2_PCMPGTB_VdqWdq, offset, base, src0, dst);
+ }
+ void vpcmpgtb_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpcmpgtb", VEX_PD, OP2_PCMPGTB_VdqWdq, address, src0, dst);
+ }
+
+ void vpcmpeqw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpcmpeqw", VEX_PD, OP2_PCMPEQW_VdqWdq, src1, src0, dst);
+ }
+ void vpcmpeqw_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpcmpeqw", VEX_PD, OP2_PCMPEQW_VdqWdq, offset, base, src0, dst);
+ }
+ void vpcmpeqw_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpcmpeqw", VEX_PD, OP2_PCMPEQW_VdqWdq, address, src0, dst);
+ }
+
+ void vpcmpgtw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpcmpgtw", VEX_PD, OP2_PCMPGTW_VdqWdq, src1, src0, dst);
+ }
+ void vpcmpgtw_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpcmpgtw", VEX_PD, OP2_PCMPGTW_VdqWdq, offset, base, src0, dst);
+ }
+ void vpcmpgtw_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpcmpgtw", VEX_PD, OP2_PCMPGTW_VdqWdq, address, src0, dst);
+ }
+
+ void vpcmpeqd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpcmpeqd", VEX_PD, OP2_PCMPEQD_VdqWdq, src1, src0, dst);
+ }
+ void vpcmpeqd_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpcmpeqd", VEX_PD, OP2_PCMPEQD_VdqWdq, offset, base, src0, dst);
+ }
+ void vpcmpeqd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpcmpeqd", VEX_PD, OP2_PCMPEQD_VdqWdq, address, src0, dst);
+ }
+
+ void vpcmpgtd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpcmpgtd", VEX_PD, OP2_PCMPGTD_VdqWdq, src1, src0, dst);
+ }
+ void vpcmpgtd_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpcmpgtd", VEX_PD, OP2_PCMPGTD_VdqWdq, offset, base, src0, dst);
+ }
+ void vpcmpgtd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpcmpgtd", VEX_PD, OP2_PCMPGTD_VdqWdq, address, src0, dst);
+ }
+
+ void vcmpps_rr(uint8_t order, XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps, order, src1, src0, dst);
+ }
+ void vcmpps_mr(uint8_t order, int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps, order, offset, base, src0, dst);
+ }
+ void vcmpps_mr(uint8_t order, const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps, order, address, src0, dst);
+ }
+
+ void vrcpps_rr(XMMRegisterID src, XMMRegisterID dst) {
+ twoByteOpSimd("vrcpps", VEX_PS, OP2_RCPPS_VpsWps, src, invalid_xmm, dst);
+ }
+ void vrcpps_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd("vrcpps", VEX_PS, OP2_RCPPS_VpsWps, offset, base, invalid_xmm, dst);
+ }
+ void vrcpps_mr(const void* address, XMMRegisterID dst) {
+ twoByteOpSimd("vrcpps", VEX_PS, OP2_RCPPS_VpsWps, address, invalid_xmm, dst);
+ }
+
+ void vrsqrtps_rr(XMMRegisterID src, XMMRegisterID dst) {
+ twoByteOpSimd("vrsqrtps", VEX_PS, OP2_RSQRTPS_VpsWps, src, invalid_xmm, dst);
+ }
+ void vrsqrtps_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd("vrsqrtps", VEX_PS, OP2_RSQRTPS_VpsWps, offset, base, invalid_xmm, dst);
+ }
+ void vrsqrtps_mr(const void* address, XMMRegisterID dst) {
+ twoByteOpSimd("vrsqrtps", VEX_PS, OP2_RSQRTPS_VpsWps, address, invalid_xmm, dst);
+ }
+
+ void vsqrtps_rr(XMMRegisterID src, XMMRegisterID dst) {
+ twoByteOpSimd("vsqrtps", VEX_PS, OP2_SQRTPS_VpsWps, src, invalid_xmm, dst);
+ }
+ void vsqrtps_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd("vsqrtps", VEX_PS, OP2_SQRTPS_VpsWps, offset, base, invalid_xmm, dst);
+ }
+ void vsqrtps_mr(const void* address, XMMRegisterID dst) {
+ twoByteOpSimd("vsqrtps", VEX_PS, OP2_SQRTPS_VpsWps, address, invalid_xmm, dst);
+ }
+
+ void vaddsd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vaddsd", VEX_SD, OP2_ADDSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vaddss_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vaddss", VEX_SS, OP2_ADDSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vaddsd_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vaddsd", VEX_SD, OP2_ADDSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vaddss_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vaddss", VEX_SS, OP2_ADDSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vaddsd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vaddsd", VEX_SD, OP2_ADDSD_VsdWsd, address, src0, dst);
+ }
+ void vaddss_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vaddss", VEX_SS, OP2_ADDSD_VsdWsd, address, src0, dst);
+ }
+
+ void vcvtss2sd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vcvtss2sd", VEX_SS, OP2_CVTSS2SD_VsdEd, src1, src0, dst);
+ }
+
+ void vcvtsd2ss_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vcvtsd2ss", VEX_SD, OP2_CVTSD2SS_VsdEd, src1, src0, dst);
+ }
+
+ void vcvtsi2ss_rr(RegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpInt32Simd("vcvtsi2ss", VEX_SS, OP2_CVTSI2SD_VsdEd, src1, src0, dst);
+ }
+
+ void vcvtsi2sd_rr(RegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpInt32Simd("vcvtsi2sd", VEX_SD, OP2_CVTSI2SD_VsdEd, src1, src0, dst);
+ }
+
+ void vcvttps2dq_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vcvttps2dq", VEX_SS, OP2_CVTTPS2DQ_VdqWps, src, invalid_xmm, dst);
+ }
+
+ void vcvtdq2ps_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vcvtdq2ps", VEX_PS, OP2_CVTDQ2PS_VpsWdq, src, invalid_xmm, dst);
+ }
+
+ void vcvtsi2sd_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vcvtsi2sd", VEX_SD, OP2_CVTSI2SD_VsdEd, offset, base, src0, dst);
+ }
+
+ void vcvtsi2sd_mr(int32_t offset, RegisterID base, RegisterID index, int scale, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vcvtsi2sd", VEX_SD, OP2_CVTSI2SD_VsdEd, offset, base, index, scale, src0, dst);
+ }
+
+ void vcvtsi2ss_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vcvtsi2ss", VEX_SS, OP2_CVTSI2SD_VsdEd, offset, base, src0, dst);
+ }
+
+ void vcvtsi2ss_mr(int32_t offset, RegisterID base, RegisterID index, int scale, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vcvtsi2ss", VEX_SS, OP2_CVTSI2SD_VsdEd, offset, base, index, scale, src0, dst);
+ }
+
+ void vcvttsd2si_rr(XMMRegisterID src, RegisterID dst)
+ {
+ twoByteOpSimdInt32("vcvttsd2si", VEX_SD, OP2_CVTTSD2SI_GdWsd, src, dst);
+ }
+
+ void vcvttss2si_rr(XMMRegisterID src, RegisterID dst)
+ {
+ twoByteOpSimdInt32("vcvttss2si", VEX_SS, OP2_CVTTSD2SI_GdWsd, src, dst);
+ }
+
+ void vunpcklps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vunpcklps", VEX_PS, OP2_UNPCKLPS_VsdWsd, src1, src0, dst);
+ }
+ void vunpcklps_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vunpcklps", VEX_PS, OP2_UNPCKLPS_VsdWsd, offset, base, src0, dst);
+ }
+ void vunpcklps_mr(const void* addr, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vunpcklps", VEX_PS, OP2_UNPCKLPS_VsdWsd, addr, src0, dst);
+ }
+
+ void vunpckhps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vunpckhps", VEX_PS, OP2_UNPCKHPS_VsdWsd, src1, src0, dst);
+ }
+ void vunpckhps_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vunpckhps", VEX_PS, OP2_UNPCKHPS_VsdWsd, offset, base, src0, dst);
+ }
+ void vunpckhps_mr(const void* addr, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vunpckhps", VEX_PS, OP2_UNPCKHPS_VsdWsd, addr, src0, dst);
+ }
+
+ void vpand_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpand", VEX_PD, OP2_PANDDQ_VdqWdq, src1, src0, dst);
+ }
+ void vpand_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpand", VEX_PD, OP2_PANDDQ_VdqWdq, offset, base, src0, dst);
+ }
+ void vpand_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpand", VEX_PD, OP2_PANDDQ_VdqWdq, address, src0, dst);
+ }
+ void vpor_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpor", VEX_PD, OP2_PORDQ_VdqWdq, src1, src0, dst);
+ }
+ void vpor_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpor", VEX_PD, OP2_PORDQ_VdqWdq, offset, base, src0, dst);
+ }
+ void vpor_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpor", VEX_PD, OP2_PORDQ_VdqWdq, address, src0, dst);
+ }
+ void vpxor_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpxor", VEX_PD, OP2_PXORDQ_VdqWdq, src1, src0, dst);
+ }
+ void vpxor_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpxor", VEX_PD, OP2_PXORDQ_VdqWdq, offset, base, src0, dst);
+ }
+ void vpxor_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpxor", VEX_PD, OP2_PXORDQ_VdqWdq, address, src0, dst);
+ }
+ void vpandn_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpandn", VEX_PD, OP2_PANDNDQ_VdqWdq, src1, src0, dst);
+ }
+ void vpandn_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpandn", VEX_PD, OP2_PANDNDQ_VdqWdq, offset, base, src0, dst);
+ }
+ void vpandn_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpandn", VEX_PD, OP2_PANDNDQ_VdqWdq, address, src0, dst);
+ }
+
+ void vpshufd_irr(uint32_t mask, XMMRegisterID src, XMMRegisterID dst)
+ {
+ twoByteOpImmSimd("vpshufd", VEX_PD, OP2_PSHUFD_VdqWdqIb, mask, src, invalid_xmm, dst);
+ }
+ void vpshufd_imr(uint32_t mask, int32_t offset, RegisterID base, XMMRegisterID dst)
+ {
+ twoByteOpImmSimd("vpshufd", VEX_PD, OP2_PSHUFD_VdqWdqIb, mask, offset, base, invalid_xmm, dst);
+ }
+ void vpshufd_imr(uint32_t mask, const void* address, XMMRegisterID dst)
+ {
+ twoByteOpImmSimd("vpshufd", VEX_PD, OP2_PSHUFD_VdqWdqIb, mask, address, invalid_xmm, dst);
+ }
+
+ void vpshuflw_irr(uint32_t mask, XMMRegisterID src, XMMRegisterID dst)
+ {
+ twoByteOpImmSimd("vpshuflw", VEX_SD, OP2_PSHUFLW_VdqWdqIb, mask, src, invalid_xmm, dst);
+ }
+
+ void vpshufhw_irr(uint32_t mask, XMMRegisterID src, XMMRegisterID dst)
+ {
+ twoByteOpImmSimd("vpshufhw", VEX_SS, OP2_PSHUFHW_VdqWdqIb, mask, src, invalid_xmm, dst);
+ }
+
+ void vpshufb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ threeByteOpSimd("vpshufb", VEX_PD, OP3_PSHUFB_VdqWdq, ESCAPE_38, src1, src0, dst);
+ }
+
+ void vshufps_irr(uint32_t mask, XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpImmSimd("vshufps", VEX_PS, OP2_SHUFPS_VpsWpsIb, mask, src1, src0, dst);
+ }
+ void vshufps_imr(uint32_t mask, int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpImmSimd("vshufps", VEX_PS, OP2_SHUFPS_VpsWpsIb, mask, offset, base, src0, dst);
+ }
+ void vshufps_imr(uint32_t mask, const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpImmSimd("vshufps", VEX_PS, OP2_SHUFPS_VpsWpsIb, mask, address, src0, dst);
+ }
+
+ void vmovddup_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovddup", VEX_SD, OP2_MOVDDUP_VqWq, src, invalid_xmm, dst);
+ }
+
+ void vmovhlps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovhlps", VEX_PS, OP2_MOVHLPS_VqUq, src1, src0, dst);
+ }
+
+ void vmovlhps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovlhps", VEX_PS, OP2_MOVLHPS_VqUq, src1, src0, dst);
+ }
+
+ void vpsrldq_ir(uint32_t count, XMMRegisterID src, XMMRegisterID dst)
+ {
+ MOZ_ASSERT(count < 16);
+ shiftOpImmSimd("vpsrldq", OP2_PSRLDQ_Vd, ShiftID::vpsrldq, count, src, dst);
+ }
+
+ void vpsllq_ir(uint32_t count, XMMRegisterID src, XMMRegisterID dst)
+ {
+ MOZ_ASSERT(count < 64);
+ shiftOpImmSimd("vpsllq", OP2_PSRLDQ_Vd, ShiftID::vpsllx, count, src, dst);
+ }
+
+ void vpsrlq_ir(uint32_t count, XMMRegisterID src, XMMRegisterID dst)
+ {
+ MOZ_ASSERT(count < 64);
+ shiftOpImmSimd("vpsrlq", OP2_PSRLDQ_Vd, ShiftID::vpsrlx, count, src, dst);
+ }
+
+ void vpslld_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpslld", VEX_PD, OP2_PSLLD_VdqWdq, src1, src0, dst);
+ }
+
+ void vpslld_ir(uint32_t count, XMMRegisterID src, XMMRegisterID dst)
+ {
+ MOZ_ASSERT(count < 32);
+ shiftOpImmSimd("vpslld", OP2_PSLLD_UdqIb, ShiftID::vpsllx, count, src, dst);
+ }
+
+ void vpsrad_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsrad", VEX_PD, OP2_PSRAD_VdqWdq, src1, src0, dst);
+ }
+
+ void vpsrad_ir(int32_t count, XMMRegisterID src, XMMRegisterID dst)
+ {
+ MOZ_ASSERT(count < 32);
+ shiftOpImmSimd("vpsrad", OP2_PSRAD_UdqIb, ShiftID::vpsrad, count, src, dst);
+ }
+
+ void vpsrld_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsrld", VEX_PD, OP2_PSRLD_VdqWdq, src1, src0, dst);
+ }
+
+ void vpsrld_ir(uint32_t count, XMMRegisterID src, XMMRegisterID dst)
+ {
+ MOZ_ASSERT(count < 32);
+ shiftOpImmSimd("vpsrld", OP2_PSRLD_UdqIb, ShiftID::vpsrlx, count, src, dst);
+ }
+
+ void vpsllw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsllw", VEX_PD, OP2_PSLLW_VdqWdq, src1, src0, dst);
+ }
+
+ void vpsllw_ir(uint32_t count, XMMRegisterID src, XMMRegisterID dst)
+ {
+ MOZ_ASSERT(count < 16);
+ shiftOpImmSimd("vpsllw", OP2_PSLLW_UdqIb, ShiftID::vpsllx, count, src, dst);
+ }
+
+ void vpsraw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsraw", VEX_PD, OP2_PSRAW_VdqWdq, src1, src0, dst);
+ }
+
+ void vpsraw_ir(int32_t count, XMMRegisterID src, XMMRegisterID dst)
+ {
+ MOZ_ASSERT(count < 16);
+ shiftOpImmSimd("vpsraw", OP2_PSRAW_UdqIb, ShiftID::vpsrad, count, src, dst);
+ }
+
+ void vpsrlw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpsrlw", VEX_PD, OP2_PSRLW_VdqWdq, src1, src0, dst);
+ }
+
+ void vpsrlw_ir(uint32_t count, XMMRegisterID src, XMMRegisterID dst)
+ {
+ MOZ_ASSERT(count < 16);
+ shiftOpImmSimd("vpsrlw", OP2_PSRLW_UdqIb, ShiftID::vpsrlx, count, src, dst);
+ }
+
+ void vmovmskpd_rr(XMMRegisterID src, RegisterID dst)
+ {
+ twoByteOpSimdInt32("vmovmskpd", VEX_PD, OP2_MOVMSKPD_EdVd, src, dst);
+ }
+
+ void vmovmskps_rr(XMMRegisterID src, RegisterID dst)
+ {
+ twoByteOpSimdInt32("vmovmskps", VEX_PS, OP2_MOVMSKPD_EdVd, src, dst);
+ }
+
+ void vptest_rr(XMMRegisterID rhs, XMMRegisterID lhs) {
+ threeByteOpSimd("vptest", VEX_PD, OP3_PTEST_VdVd, ESCAPE_38, rhs, invalid_xmm, lhs);
+ }
+
+ void vmovd_rr(XMMRegisterID src, RegisterID dst)
+ {
+ twoByteOpSimdInt32("vmovd", VEX_PD, OP2_MOVD_EdVd, (XMMRegisterID)dst, (RegisterID)src);
+ }
+
+ void vmovd_rr(RegisterID src, XMMRegisterID dst)
+ {
+ twoByteOpInt32Simd("vmovd", VEX_PD, OP2_MOVD_VdEd, src, invalid_xmm, dst);
+ }
+
+ void vmovd_mr(int32_t offset, RegisterID base, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovd", VEX_PD, OP2_MOVD_VdEd, offset, base, invalid_xmm, dst);
+ }
+
+ void vmovd_mr(int32_t offset, RegisterID base, RegisterID index, int32_t scale, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovd", VEX_PD, OP2_MOVD_VdEd, offset, base, index, scale, invalid_xmm, dst);
+ }
+
+ void vmovd_mr_disp32(int32_t offset, RegisterID base, XMMRegisterID dst)
+ {
+ twoByteOpSimd_disp32("vmovd", VEX_PD, OP2_MOVD_VdEd, offset, base, invalid_xmm, dst);
+ }
+
+ void vmovd_mr(const void* address, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovd", VEX_PD, OP2_MOVD_VdEd, address, invalid_xmm, dst);
+ }
+
+ void vmovd_rm(XMMRegisterID src, int32_t offset, RegisterID base)
+ {
+ twoByteOpSimd("vmovd", VEX_PD, OP2_MOVD_EdVd, offset, base, invalid_xmm, src);
+ }
+
+ void vmovd_rm(XMMRegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ twoByteOpSimd("vmovd", VEX_PD, OP2_MOVD_EdVd, offset, base, index, scale, invalid_xmm, src);
+ }
+
+ void vmovd_rm_disp32(XMMRegisterID src, int32_t offset, RegisterID base)
+ {
+ twoByteOpSimd_disp32("vmovd", VEX_PD, OP2_MOVD_EdVd, offset, base, invalid_xmm, src);
+ }
+
+ void vmovd_rm(XMMRegisterID src, const void* address)
+ {
+ twoByteOpSimd("vmovd", VEX_PD, OP2_MOVD_EdVd, address, invalid_xmm, src);
+ }
+
+ void vmovsd_rm(XMMRegisterID src, int32_t offset, RegisterID base)
+ {
+ twoByteOpSimd("vmovsd", VEX_SD, OP2_MOVSD_WsdVsd, offset, base, invalid_xmm, src);
+ }
+
+ void vmovsd_rm_disp32(XMMRegisterID src, int32_t offset, RegisterID base)
+ {
+ twoByteOpSimd_disp32("vmovsd", VEX_SD, OP2_MOVSD_WsdVsd, offset, base, invalid_xmm, src);
+ }
+
+ void vmovss_rm(XMMRegisterID src, int32_t offset, RegisterID base)
+ {
+ twoByteOpSimd("vmovss", VEX_SS, OP2_MOVSD_WsdVsd, offset, base, invalid_xmm, src);
+ }
+
+ void vmovss_rm_disp32(XMMRegisterID src, int32_t offset, RegisterID base)
+ {
+ twoByteOpSimd_disp32("vmovss", VEX_SS, OP2_MOVSD_WsdVsd, offset, base, invalid_xmm, src);
+ }
+
+ void vmovss_mr(int32_t offset, RegisterID base, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovss", VEX_SS, OP2_MOVSD_VsdWsd, offset, base, invalid_xmm, dst);
+ }
+
+ void vmovss_mr_disp32(int32_t offset, RegisterID base, XMMRegisterID dst)
+ {
+ twoByteOpSimd_disp32("vmovss", VEX_SS, OP2_MOVSD_VsdWsd, offset, base, invalid_xmm, dst);
+ }
+
+ void vmovsd_rm(XMMRegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ twoByteOpSimd("vmovsd", VEX_SD, OP2_MOVSD_WsdVsd, offset, base, index, scale, invalid_xmm, src);
+ }
+
+ void vmovss_rm(XMMRegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ twoByteOpSimd("vmovss", VEX_SS, OP2_MOVSD_WsdVsd, offset, base, index, scale, invalid_xmm, src);
+ }
+
+ void vmovss_mr(int32_t offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovss", VEX_SS, OP2_MOVSD_VsdWsd, offset, base, index, scale, invalid_xmm, dst);
+ }
+
+ void vmovsd_mr(int32_t offset, RegisterID base, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovsd", VEX_SD, OP2_MOVSD_VsdWsd, offset, base, invalid_xmm, dst);
+ }
+
+ void vmovsd_mr_disp32(int32_t offset, RegisterID base, XMMRegisterID dst)
+ {
+ twoByteOpSimd_disp32("vmovsd", VEX_SD, OP2_MOVSD_VsdWsd, offset, base, invalid_xmm, dst);
+ }
+
+ void vmovsd_mr(int32_t offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovsd", VEX_SD, OP2_MOVSD_VsdWsd, offset, base, index, scale, invalid_xmm, dst);
+ }
+
+ // Note that the register-to-register form of vmovsd does not write to the
+ // entire output register. For general-purpose register-to-register moves,
+ // use vmovapd instead.
+ void vmovsd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovsd", VEX_SD, OP2_MOVSD_VsdWsd, src1, src0, dst);
+ }
+
+ // The register-to-register form of vmovss has the same problem as vmovsd
+ // above. Prefer vmovaps for register-to-register moves.
+ void vmovss_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovss", VEX_SS, OP2_MOVSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vmovsd_mr(const void* address, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovsd", VEX_SD, OP2_MOVSD_VsdWsd, address, invalid_xmm, dst);
+ }
+
+ void vmovss_mr(const void* address, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovss", VEX_SS, OP2_MOVSD_VsdWsd, address, invalid_xmm, dst);
+ }
+
+ void vmovups_mr(const void* address, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovups", VEX_PS, OP2_MOVPS_VpsWps, address, invalid_xmm, dst);
+ }
+
+ void vmovdqu_mr(const void* address, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovdqu", VEX_SS, OP2_MOVDQ_VdqWdq, address, invalid_xmm, dst);
+ }
+
+ void vmovsd_rm(XMMRegisterID src, const void* address)
+ {
+ twoByteOpSimd("vmovsd", VEX_SD, OP2_MOVSD_WsdVsd, address, invalid_xmm, src);
+ }
+
+ void vmovss_rm(XMMRegisterID src, const void* address)
+ {
+ twoByteOpSimd("vmovss", VEX_SS, OP2_MOVSD_WsdVsd, address, invalid_xmm, src);
+ }
+
+ void vmovdqa_rm(XMMRegisterID src, const void* address)
+ {
+ twoByteOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_WdqVdq, address, invalid_xmm, src);
+ }
+
+ void vmovaps_rm(XMMRegisterID src, const void* address)
+ {
+ twoByteOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_WsdVsd, address, invalid_xmm, src);
+ }
+
+ void vmovdqu_rm(XMMRegisterID src, const void* address)
+ {
+ twoByteOpSimd("vmovdqu", VEX_SS, OP2_MOVDQ_WdqVdq, address, invalid_xmm, src);
+ }
+
+ void vmovups_rm(XMMRegisterID src, const void* address)
+ {
+ twoByteOpSimd("vmovups", VEX_PS, OP2_MOVPS_WpsVps, address, invalid_xmm, src);
+ }
+
+ void vmovaps_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+#ifdef JS_CODEGEN_X64
+ // There are two opcodes that can encode this instruction. If we have
+ // one register in [xmm8,xmm15] and one in [xmm0,xmm7], use the
+ // opcode which swaps the operands, as that way we can get a two-byte
+ // VEX in that case.
+ if (src >= xmm8 && dst < xmm8) {
+ twoByteOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_WsdVsd, dst, invalid_xmm, src);
+ return;
+ }
+#endif
+ twoByteOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_VsdWsd, src, invalid_xmm, dst);
+ }
+ void vmovaps_rm(XMMRegisterID src, int32_t offset, RegisterID base)
+ {
+ twoByteOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_WsdVsd, offset, base, invalid_xmm, src);
+ }
+ void vmovaps_rm(XMMRegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ twoByteOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_WsdVsd, offset, base, index, scale, invalid_xmm, src);
+ }
+ void vmovaps_mr(int32_t offset, RegisterID base, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_VsdWsd, offset, base, invalid_xmm, dst);
+ }
+ void vmovaps_mr(int32_t offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_VsdWsd, offset, base, index, scale, invalid_xmm, dst);
+ }
+
+ void vmovups_rm(XMMRegisterID src, int32_t offset, RegisterID base)
+ {
+ twoByteOpSimd("vmovups", VEX_PS, OP2_MOVPS_WpsVps, offset, base, invalid_xmm, src);
+ }
+ void vmovups_rm_disp32(XMMRegisterID src, int32_t offset, RegisterID base)
+ {
+ twoByteOpSimd_disp32("vmovups", VEX_PS, OP2_MOVPS_WpsVps, offset, base, invalid_xmm, src);
+ }
+ void vmovups_rm(XMMRegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ twoByteOpSimd("vmovups", VEX_PS, OP2_MOVPS_WpsVps, offset, base, index, scale, invalid_xmm, src);
+ }
+ void vmovups_mr(int32_t offset, RegisterID base, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovups", VEX_PS, OP2_MOVPS_VpsWps, offset, base, invalid_xmm, dst);
+ }
+ void vmovups_mr_disp32(int32_t offset, RegisterID base, XMMRegisterID dst)
+ {
+ twoByteOpSimd_disp32("vmovups", VEX_PS, OP2_MOVPS_VpsWps, offset, base, invalid_xmm, dst);
+ }
+ void vmovups_mr(int32_t offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovups", VEX_PS, OP2_MOVPS_VpsWps, offset, base, index, scale, invalid_xmm, dst);
+ }
+
+ void vmovapd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+#ifdef JS_CODEGEN_X64
+ // There are two opcodes that can encode this instruction. If we have
+ // one register in [xmm8,xmm15] and one in [xmm0,xmm7], use the
+ // opcode which swaps the operands, as that way we can get a two-byte
+ // VEX in that case.
+ if (src >= xmm8 && dst < xmm8) {
+ twoByteOpSimd("vmovapd", VEX_PD, OP2_MOVAPS_WsdVsd, dst, invalid_xmm, src);
+ return;
+ }
+#endif
+ twoByteOpSimd("vmovapd", VEX_PD, OP2_MOVAPD_VsdWsd, src, invalid_xmm, dst);
+ }
+
+ void vmovdqu_rm(XMMRegisterID src, int32_t offset, RegisterID base)
+ {
+ twoByteOpSimd("vmovdqu", VEX_SS, OP2_MOVDQ_WdqVdq, offset, base, invalid_xmm, src);
+ }
+
+ void vmovdqu_rm_disp32(XMMRegisterID src, int32_t offset, RegisterID base)
+ {
+ twoByteOpSimd_disp32("vmovdqu", VEX_SS, OP2_MOVDQ_WdqVdq, offset, base, invalid_xmm, src);
+ }
+
+ void vmovdqu_rm(XMMRegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ twoByteOpSimd("vmovdqu", VEX_SS, OP2_MOVDQ_WdqVdq, offset, base, index, scale, invalid_xmm, src);
+ }
+
+ void vmovdqu_mr(int32_t offset, RegisterID base, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovdqu", VEX_SS, OP2_MOVDQ_VdqWdq, offset, base, invalid_xmm, dst);
+ }
+
+ void vmovdqu_mr_disp32(int32_t offset, RegisterID base, XMMRegisterID dst)
+ {
+ twoByteOpSimd_disp32("vmovdqu", VEX_SS, OP2_MOVDQ_VdqWdq, offset, base, invalid_xmm, dst);
+ }
+
+ void vmovdqu_mr(int32_t offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovdqu", VEX_SS, OP2_MOVDQ_VdqWdq, offset, base, index, scale, invalid_xmm, dst);
+ }
+
+ void vmovdqa_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+#ifdef JS_CODEGEN_X64
+ // There are two opcodes that can encode this instruction. If we have
+ // one register in [xmm8,xmm15] and one in [xmm0,xmm7], use the
+ // opcode which swaps the operands, as that way we can get a two-byte
+ // VEX in that case.
+ if (src >= xmm8 && dst < xmm8) {
+ twoByteOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_WdqVdq, dst, invalid_xmm, src);
+ return;
+ }
+#endif
+ twoByteOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_VdqWdq, src, invalid_xmm, dst);
+ }
+
+ void vmovdqa_rm(XMMRegisterID src, int32_t offset, RegisterID base)
+ {
+ twoByteOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_WdqVdq, offset, base, invalid_xmm, src);
+ }
+
+ void vmovdqa_rm(XMMRegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ twoByteOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_WdqVdq, offset, base, index, scale, invalid_xmm, src);
+ }
+
+ void vmovdqa_mr(int32_t offset, RegisterID base, XMMRegisterID dst)
+ {
+
+ twoByteOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_VdqWdq, offset, base, invalid_xmm, dst);
+ }
+
+ void vmovdqa_mr(int32_t offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_VdqWdq, offset, base, index, scale, invalid_xmm, dst);
+ }
+
+ void vmulsd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmulsd", VEX_SD, OP2_MULSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vmulss_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmulss", VEX_SS, OP2_MULSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vmulsd_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmulsd", VEX_SD, OP2_MULSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vmulss_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmulss", VEX_SS, OP2_MULSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vpinsrw_irr(uint32_t whichWord, RegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ MOZ_ASSERT(whichWord < 8);
+ twoByteOpImmInt32Simd("vpinsrw", VEX_PD, OP2_PINSRW, whichWord, src1, src0, dst);
+ }
+
+ void vpextrw_irr(uint32_t whichWord, XMMRegisterID src, RegisterID dst)
+ {
+ MOZ_ASSERT(whichWord < 8);
+ twoByteOpImmSimdInt32("vpextrw", VEX_PD, OP2_PEXTRW_GdUdIb, whichWord, src, dst);
+ }
+
+ void vsubsd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vsubsd", VEX_SD, OP2_SUBSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vsubss_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vsubss", VEX_SS, OP2_SUBSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vsubsd_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vsubsd", VEX_SD, OP2_SUBSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vsubss_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vsubss", VEX_SS, OP2_SUBSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vucomiss_rr(XMMRegisterID rhs, XMMRegisterID lhs)
+ {
+ twoByteOpSimdFlags("vucomiss", VEX_PS, OP2_UCOMISD_VsdWsd, rhs, lhs);
+ }
+
+ void vucomisd_rr(XMMRegisterID rhs, XMMRegisterID lhs)
+ {
+ twoByteOpSimdFlags("vucomisd", VEX_PD, OP2_UCOMISD_VsdWsd, rhs, lhs);
+ }
+
+ void vucomisd_mr(int32_t offset, RegisterID base, XMMRegisterID lhs)
+ {
+ twoByteOpSimdFlags("vucomisd", VEX_PD, OP2_UCOMISD_VsdWsd, offset, base, lhs);
+ }
+
+ void vdivsd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vdivsd", VEX_SD, OP2_DIVSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vdivss_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vdivss", VEX_SS, OP2_DIVSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vdivsd_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vdivsd", VEX_SD, OP2_DIVSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vdivss_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vdivss", VEX_SS, OP2_DIVSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vxorpd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vxorpd", VEX_PD, OP2_XORPD_VpdWpd, src1, src0, dst);
+ }
+
+ void vorpd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vorpd", VEX_PD, OP2_ORPD_VpdWpd, src1, src0, dst);
+ }
+
+ void vandpd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vandpd", VEX_PD, OP2_ANDPD_VpdWpd, src1, src0, dst);
+ }
+
+ void vandps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vandps", VEX_PS, OP2_ANDPS_VpsWps, src1, src0, dst);
+ }
+
+ void vandps_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vandps", VEX_PS, OP2_ANDPS_VpsWps, offset, base, src0, dst);
+ }
+
+ void vandps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vandps", VEX_PS, OP2_ANDPS_VpsWps, address, src0, dst);
+ }
+
+ void vandnps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vandnps", VEX_PS, OP2_ANDNPS_VpsWps, src1, src0, dst);
+ }
+
+ void vandnps_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vandnps", VEX_PS, OP2_ANDNPS_VpsWps, offset, base, src0, dst);
+ }
+
+ void vandnps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vandnps", VEX_PS, OP2_ANDNPS_VpsWps, address, src0, dst);
+ }
+
+ void vorps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vorps", VEX_PS, OP2_ORPS_VpsWps, src1, src0, dst);
+ }
+
+ void vorps_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vorps", VEX_PS, OP2_ORPS_VpsWps, offset, base, src0, dst);
+ }
+
+ void vorps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vorps", VEX_PS, OP2_ORPS_VpsWps, address, src0, dst);
+ }
+
+ void vxorps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vxorps", VEX_PS, OP2_XORPS_VpsWps, src1, src0, dst);
+ }
+
+ void vxorps_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vxorps", VEX_PS, OP2_XORPS_VpsWps, offset, base, src0, dst);
+ }
+
+ void vxorps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vxorps", VEX_PS, OP2_XORPS_VpsWps, address, src0, dst);
+ }
+
+ void vsqrtsd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vsqrtsd", VEX_SD, OP2_SQRTSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vsqrtss_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vsqrtss", VEX_SS, OP2_SQRTSS_VssWss, src1, src0, dst);
+ }
+
+ void vroundsd_irr(RoundingMode mode, XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ threeByteOpImmSimd("vroundsd", VEX_PD, OP3_ROUNDSD_VsdWsd, ESCAPE_3A, mode, src1, src0, dst);
+ }
+
+ void vroundss_irr(RoundingMode mode, XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ threeByteOpImmSimd("vroundss", VEX_PD, OP3_ROUNDSS_VsdWsd, ESCAPE_3A, mode, src1, src0, dst);
+ }
+
+ void vinsertps_irr(uint32_t mask, XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ threeByteOpImmSimd("vinsertps", VEX_PD, OP3_INSERTPS_VpsUps, ESCAPE_3A, mask, src1, src0, dst);
+ }
+ void vinsertps_imr(uint32_t mask, int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ threeByteOpImmSimd("vinsertps", VEX_PD, OP3_INSERTPS_VpsUps, ESCAPE_3A, mask, offset, base, src0, dst);
+ }
+
+ void vpinsrb_irr(unsigned lane, RegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ MOZ_ASSERT(lane < 16);
+ threeByteOpImmInt32Simd("vpinsrb", VEX_PD, OP3_PINSRB_VdqEdIb, ESCAPE_3A, lane, src1, src0, dst);
+ }
+
+ void vpinsrd_irr(unsigned lane, RegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ MOZ_ASSERT(lane < 4);
+ threeByteOpImmInt32Simd("vpinsrd", VEX_PD, OP3_PINSRD_VdqEdIb, ESCAPE_3A, lane, src1, src0, dst);
+ }
+
+ void vpextrb_irr(unsigned lane, XMMRegisterID src, RegisterID dst)
+ {
+ MOZ_ASSERT(lane < 16);
+ threeByteOpImmSimdInt32("vpextrb", VEX_PD, OP3_PEXTRB_EdVdqIb, ESCAPE_3A, lane, (XMMRegisterID)dst, (RegisterID)src);
+ }
+
+ void vpextrd_irr(unsigned lane, XMMRegisterID src, RegisterID dst)
+ {
+ MOZ_ASSERT(lane < 4);
+ threeByteOpImmSimdInt32("vpextrd", VEX_PD, OP3_PEXTRD_EdVdqIb, ESCAPE_3A, lane, (XMMRegisterID)dst, (RegisterID)src);
+ }
+
+ void vblendps_irr(unsigned imm, XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ MOZ_ASSERT(imm < 16);
+ // Despite being a "ps" instruction, vblendps is encoded with the "pd" prefix.
+ threeByteOpImmSimd("vblendps", VEX_PD, OP3_BLENDPS_VpsWpsIb, ESCAPE_3A, imm, src1, src0, dst);
+ }
+
+ void vblendps_imr(unsigned imm, int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ MOZ_ASSERT(imm < 16);
+ // Despite being a "ps" instruction, vblendps is encoded with the "pd" prefix.
+threeByteOpImmSimd("vblendps", VEX_PD, OP3_BLENDPS_VpsWpsIb, ESCAPE_3A, imm, offset, base, src0, dst);
+ }
+
+ void vblendvps_rr(XMMRegisterID mask, XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ vblendvOpSimd(mask, src1, src0, dst);
+ }
+ void vblendvps_mr(XMMRegisterID mask, int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst) {
+ vblendvOpSimd(mask, offset, base, src0, dst);
+ }
+
+ void vmovsldup_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovsldup", VEX_SS, OP2_MOVSLDUP_VpsWps, src, invalid_xmm, dst);
+ }
+ void vmovsldup_mr(int32_t offset, RegisterID base, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovsldup", VEX_SS, OP2_MOVSLDUP_VpsWps, offset, base, invalid_xmm, dst);
+ }
+
+ void vmovshdup_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovshdup", VEX_SS, OP2_MOVSHDUP_VpsWps, src, invalid_xmm, dst);
+ }
+ void vmovshdup_mr(int32_t offset, RegisterID base, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovshdup", VEX_SS, OP2_MOVSHDUP_VpsWps, offset, base, invalid_xmm, dst);
+ }
+
+ void vminsd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vminsd", VEX_SD, OP2_MINSD_VsdWsd, src1, src0, dst);
+ }
+ void vminsd_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vminsd", VEX_SD, OP2_MINSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vminss_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vminss", VEX_SS, OP2_MINSS_VssWss, src1, src0, dst);
+ }
+
+ void vmaxsd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmaxsd", VEX_SD, OP2_MAXSD_VsdWsd, src1, src0, dst);
+ }
+ void vmaxsd_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmaxsd", VEX_SD, OP2_MAXSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vmaxss_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmaxss", VEX_SS, OP2_MAXSS_VssWss, src1, src0, dst);
+ }
+
+ // Misc instructions:
+
+ void int3()
+ {
+ spew("int3");
+ m_formatter.oneByteOp(OP_INT3);
+ }
+
+ void ud2()
+ {
+ spew("ud2");
+ m_formatter.twoByteOp(OP2_UD2);
+ }
+
+ void ret()
+ {
+ spew("ret");
+ m_formatter.oneByteOp(OP_RET);
+ }
+
+ void ret_i(int32_t imm)
+ {
+ spew("ret $%d", imm);
+ m_formatter.oneByteOp(OP_RET_Iz);
+ m_formatter.immediate16u(imm);
+ }
+
+ void mfence() {
+ spew("mfence");
+ m_formatter.twoByteOp(OP_FENCE, (RegisterID)0, 6);
+ }
+
+ // Assembler admin methods:
+
+ JmpDst label()
+ {
+ JmpDst r = JmpDst(m_formatter.size());
+ spew(".set .Llabel%d, .", r.offset());
+ return r;
+ }
+
+ size_t currentOffset() const {
+ return m_formatter.size();
+ }
+
+ static JmpDst labelFor(JmpSrc jump, intptr_t offset = 0)
+ {
+ return JmpDst(jump.offset() + offset);
+ }
+
+ void haltingAlign(int alignment)
+ {
+ spew(".balign %d, 0x%x # hlt", alignment, OP_HLT);
+ while (!m_formatter.isAligned(alignment))
+ m_formatter.oneByteOp(OP_HLT);
+ }
+
+ void nopAlign(int alignment)
+ {
+ spew(".balign %d", alignment);
+
+ int remainder = m_formatter.size() % alignment;
+ if (remainder > 0)
+ insert_nop(alignment - remainder);
+ }
+
+ void jumpTablePointer(uintptr_t ptr)
+ {
+#ifdef JS_CODEGEN_X64
+ spew(".quad 0x%" PRIxPTR, ptr);
+#else
+ spew(".int 0x%" PRIxPTR, ptr);
+#endif
+ m_formatter.jumpTablePointer(ptr);
+ }
+
+ void doubleConstant(double d)
+ {
+ spew(".double %.16g", d);
+ m_formatter.doubleConstant(d);
+ }
+ void floatConstant(float f)
+ {
+ spew(".float %.16g", f);
+ m_formatter.floatConstant(f);
+ }
+
+ void simd128Constant(const void* data)
+ {
+ const uint32_t* dw = reinterpret_cast<const uint32_t*>(data);
+ spew(".int 0x%08x,0x%08x,0x%08x,0x%08x", dw[0], dw[1], dw[2], dw[3]);
+ MOZ_ASSERT(m_formatter.isAligned(16));
+ m_formatter.simd128Constant(data);
+ }
+
+ void int32Constant(int32_t i)
+ {
+ spew(".int %d", i);
+ m_formatter.int32Constant(i);
+ }
+ void int64Constant(int64_t i)
+ {
+ spew(".quad %lld", (long long)i);
+ m_formatter.int64Constant(i);
+ }
+
+ // Linking & patching:
+
+ void assertValidJmpSrc(JmpSrc src)
+ {
+ // The target offset is stored at offset - 4.
+ MOZ_RELEASE_ASSERT(src.offset() > int32_t(sizeof(int32_t)));
+ MOZ_RELEASE_ASSERT(size_t(src.offset()) <= size());
+ }
+
+ bool nextJump(const JmpSrc& from, JmpSrc* next)
+ {
+ // Sanity check - if the assembler has OOM'd, it will start overwriting
+ // its internal buffer and thus our links could be garbage.
+ if (oom())
+ return false;
+
+ assertValidJmpSrc(from);
+
+ const unsigned char* code = m_formatter.data();
+ int32_t offset = GetInt32(code + from.offset());
+ if (offset == -1)
+ return false;
+
+ if (MOZ_UNLIKELY(size_t(offset) >= size())) {
+#ifdef NIGHTLY_BUILD
+ // Stash some data on the stack so we can retrieve it from minidumps,
+ // see bug 1124397.
+ int32_t startOffset = from.offset() - 1;
+ while (startOffset >= 0 && code[startOffset] == 0xe5)
+ startOffset--;
+ int32_t endOffset = from.offset() - 1;
+ while (endOffset < int32_t(size()) && code[endOffset] == 0xe5)
+ endOffset++;
+ volatile uintptr_t dump[10];
+ blackbox = dump;
+ blackbox[0] = uintptr_t(0xABCD1234);
+ blackbox[1] = uintptr_t(offset);
+ blackbox[2] = uintptr_t(size());
+ blackbox[3] = uintptr_t(from.offset());
+ blackbox[4] = uintptr_t(code[from.offset() - 5]);
+ blackbox[5] = uintptr_t(code[from.offset() - 4]);
+ blackbox[6] = uintptr_t(code[from.offset() - 3]);
+ blackbox[7] = uintptr_t(startOffset);
+ blackbox[8] = uintptr_t(endOffset);
+ blackbox[9] = uintptr_t(0xFFFF7777);
+#endif
+ MOZ_CRASH("nextJump bogus offset");
+ }
+
+ *next = JmpSrc(offset);
+ return true;
+ }
+ void setNextJump(const JmpSrc& from, const JmpSrc& to)
+ {
+ // Sanity check - if the assembler has OOM'd, it will start overwriting
+ // its internal buffer and thus our links could be garbage.
+ if (oom())
+ return;
+
+ assertValidJmpSrc(from);
+ MOZ_RELEASE_ASSERT(to.offset() == -1 || size_t(to.offset()) <= size());
+
+ unsigned char* code = m_formatter.data();
+ AutoUnprotectAssemblerBufferRegion unprotect(*this, from.offset() - 4, 4);
+ SetInt32(code + from.offset(), to.offset());
+ }
+
+ void linkJump(JmpSrc from, JmpDst to)
+ {
+ MOZ_ASSERT(from.offset() != -1);
+ MOZ_ASSERT(to.offset() != -1);
+
+ // Sanity check - if the assembler has OOM'd, it will start overwriting
+ // its internal buffer and thus our links could be garbage.
+ if (oom())
+ return;
+
+ assertValidJmpSrc(from);
+ MOZ_RELEASE_ASSERT(size_t(to.offset()) <= size());
+
+ spew(".set .Lfrom%d, .Llabel%d", from.offset(), to.offset());
+ unsigned char* code = m_formatter.data();
+ AutoUnprotectAssemblerBufferRegion unprotect(*this, from.offset() - 4, 4);
+ SetRel32(code + from.offset(), code + to.offset());
+ }
+
+ void executableCopy(void* buffer)
+ {
+ memcpy(buffer, m_formatter.buffer(), size());
+ }
+ MOZ_MUST_USE bool appendBuffer(const BaseAssembler& other)
+ {
+ return m_formatter.append(other.m_formatter.buffer(), other.size());
+ }
+
+ void unprotectDataRegion(size_t firstByteOffset, size_t lastByteOffset) {
+ m_formatter.unprotectDataRegion(firstByteOffset, lastByteOffset);
+ }
+ void reprotectDataRegion(size_t firstByteOffset, size_t lastByteOffset) {
+ m_formatter.reprotectDataRegion(firstByteOffset, lastByteOffset);
+ }
+
+ protected:
+ static bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(int8_t)value; }
+ static bool CAN_SIGN_EXTEND_16_32(int32_t value) { return value == (int32_t)(int16_t)value; }
+ static bool CAN_ZERO_EXTEND_8_32(int32_t value) { return value == (int32_t)(uint8_t)value; }
+ static bool CAN_ZERO_EXTEND_8H_32(int32_t value) { return value == (value & 0xff00); }
+ static bool CAN_ZERO_EXTEND_16_32(int32_t value) { return value == (int32_t)(uint16_t)value; }
+ static bool CAN_ZERO_EXTEND_32_64(int32_t value) { return value >= 0; }
+
+ // Methods for encoding SIMD instructions via either legacy SSE encoding or
+ // VEX encoding.
+
+ bool useLegacySSEEncoding(XMMRegisterID src0, XMMRegisterID dst)
+ {
+ // If we don't have AVX or it's disabled, use the legacy SSE encoding.
+ if (!useVEX_) {
+ MOZ_ASSERT(src0 == invalid_xmm || src0 == dst,
+ "Legacy SSE (pre-AVX) encoding requires the output register to be "
+ "the same as the src0 input register");
+ return true;
+ }
+
+ // If src0 is the same as the output register, we might as well use
+ // the legacy SSE encoding, since it is smaller. However, this is only
+ // beneficial as long as we're not using ymm registers anywhere.
+ return src0 == dst;
+ }
+
+ bool useLegacySSEEncodingForVblendv(XMMRegisterID mask, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ // Similar to useLegacySSEEncoding, but for vblendv the Legacy SSE
+ // encoding also requires the mask to be in xmm0.
+
+ if (!useVEX_) {
+ MOZ_ASSERT(src0 == dst,
+ "Legacy SSE (pre-AVX) encoding requires the output register to be "
+ "the same as the src0 input register");
+ MOZ_ASSERT(mask == xmm0,
+ "Legacy SSE (pre-AVX) encoding for blendv requires the mask to be "
+ "in xmm0");
+ return true;
+ }
+
+ return src0 == dst && mask == xmm0;
+ }
+
+ bool useLegacySSEEncodingForOtherOutput()
+ {
+ return !useVEX_;
+ }
+
+ const char* legacySSEOpName(const char* name)
+ {
+ MOZ_ASSERT(name[0] == 'v');
+ return name + 1;
+ }
+
+ void twoByteOpSimd(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ XMMRegisterID rm, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(dst), XMMRegName(rm));
+ else
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(rm), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, (RegisterID)rm, dst);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, %s", name, XMMRegName(dst), XMMRegName(rm));
+ else
+ spew("%-11s%s, %s", name, XMMRegName(rm), XMMRegName(dst));
+ } else {
+ spew("%-11s%s, %s, %s", name, XMMRegName(rm), XMMRegName(src0), XMMRegName(dst));
+ }
+ m_formatter.twoByteOpVex(ty, opcode, (RegisterID)rm, src0, dst);
+ }
+
+ void twoByteOpImmSimd(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ uint32_t imm, XMMRegisterID rm, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s$0x%x, %s, %s", legacySSEOpName(name), imm, XMMRegName(rm), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, (RegisterID)rm, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ if (src0 == invalid_xmm)
+ spew("%-11s$0x%x, %s, %s", name, imm, XMMRegName(rm), XMMRegName(dst));
+ else
+ spew("%-11s$0x%x, %s, %s, %s", name, imm, XMMRegName(rm), XMMRegName(src0), XMMRegName(dst));
+ m_formatter.twoByteOpVex(ty, opcode, (RegisterID)rm, src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void twoByteOpSimd(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, " MEM_ob, legacySSEOpName(name),
+ XMMRegName(dst), ADDR_ob(offset, base));
+ } else {
+ spew("%-11s" MEM_ob ", %s", legacySSEOpName(name),
+ ADDR_ob(offset, base), XMMRegName(dst));
+ }
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, offset, base, dst);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, " MEM_ob, name, XMMRegName(dst), ADDR_ob(offset, base));
+ else
+ spew("%-11s" MEM_ob ", %s", name, ADDR_ob(offset, base), XMMRegName(dst));
+ } else {
+ spew("%-11s" MEM_ob ", %s, %s", name,
+ ADDR_ob(offset, base), XMMRegName(src0), XMMRegName(dst));
+ }
+ m_formatter.twoByteOpVex(ty, opcode, offset, base, src0, dst);
+ }
+
+ void twoByteOpSimd_disp32(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, " MEM_o32b, legacySSEOpName(name), XMMRegName(dst), ADDR_o32b(offset, base));
+ else
+ spew("%-11s" MEM_o32b ", %s", legacySSEOpName(name), ADDR_o32b(offset, base), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp_disp32(opcode, offset, base, dst);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, " MEM_o32b, name, XMMRegName(dst), ADDR_o32b(offset, base));
+ else
+ spew("%-11s" MEM_o32b ", %s", name, ADDR_o32b(offset, base), XMMRegName(dst));
+ } else {
+ spew("%-11s" MEM_o32b ", %s, %s", name,
+ ADDR_o32b(offset, base), XMMRegName(src0), XMMRegName(dst));
+ }
+ m_formatter.twoByteOpVex_disp32(ty, opcode, offset, base, src0, dst);
+ }
+
+ void twoByteOpImmSimd(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ uint32_t imm, int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s$0x%x, " MEM_ob ", %s", legacySSEOpName(name), imm,
+ ADDR_ob(offset, base), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, offset, base, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, " MEM_ob ", %s, %s", name, imm, ADDR_ob(offset, base),
+ XMMRegName(src0), XMMRegName(dst));
+ m_formatter.twoByteOpVex(ty, opcode, offset, base, src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void twoByteOpSimd(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ int32_t offset, RegisterID base, RegisterID index, int scale,
+ XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, " MEM_obs, legacySSEOpName(name),
+ XMMRegName(dst), ADDR_obs(offset, base, index, scale));
+ } else {
+ spew("%-11s" MEM_obs ", %s", legacySSEOpName(name),
+ ADDR_obs(offset, base, index, scale), XMMRegName(dst));
+ }
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, offset, base, index, scale, dst);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, " MEM_obs, name, XMMRegName(dst),
+ ADDR_obs(offset, base, index, scale));
+ } else {
+ spew("%-11s" MEM_obs ", %s", name, ADDR_obs(offset, base, index, scale),
+ XMMRegName(dst));
+ }
+ } else {
+ spew("%-11s" MEM_obs ", %s, %s", name, ADDR_obs(offset, base, index, scale),
+ XMMRegName(src0), XMMRegName(dst));
+ }
+ m_formatter.twoByteOpVex(ty, opcode, offset, base, index, scale, src0, dst);
+ }
+
+ void twoByteOpSimd(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, %p", legacySSEOpName(name), XMMRegName(dst), address);
+ else
+ spew("%-11s%p, %s", legacySSEOpName(name), address, XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, address, dst);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, %p", name, XMMRegName(dst), address);
+ else
+ spew("%-11s%p, %s", name, address, XMMRegName(dst));
+ } else {
+ spew("%-11s%p, %s, %s", name, address, XMMRegName(src0), XMMRegName(dst));
+ }
+ m_formatter.twoByteOpVex(ty, opcode, address, src0, dst);
+ }
+
+ void twoByteOpImmSimd(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ uint32_t imm, const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s$0x%x, %p, %s", legacySSEOpName(name), imm, address, XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, address, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, %p, %s, %s", name, imm, address, XMMRegName(src0), XMMRegName(dst));
+ m_formatter.twoByteOpVex(ty, opcode, address, src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void twoByteOpInt32Simd(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ RegisterID rm, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(dst), GPReg32Name(rm));
+ else
+ spew("%-11s%s, %s", legacySSEOpName(name), GPReg32Name(rm), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, rm, dst);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, %s", name, XMMRegName(dst), GPReg32Name(rm));
+ else
+ spew("%-11s%s, %s", name, GPReg32Name(rm), XMMRegName(dst));
+ } else {
+ spew("%-11s%s, %s, %s", name, GPReg32Name(rm), XMMRegName(src0), XMMRegName(dst));
+ }
+ m_formatter.twoByteOpVex(ty, opcode, rm, src0, dst);
+ }
+
+ void twoByteOpSimdInt32(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ XMMRegisterID rm, RegisterID dst)
+ {
+ if (useLegacySSEEncodingForOtherOutput()) {
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, %s", legacySSEOpName(name), GPReg32Name(dst), XMMRegName(rm));
+ else if (opcode == OP2_MOVD_EdVd)
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName((XMMRegisterID)dst), GPReg32Name((RegisterID)rm));
+ else
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(rm), GPReg32Name(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, (RegisterID)rm, dst);
+ return;
+ }
+
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, %s", name, GPReg32Name(dst), XMMRegName(rm));
+ else if (opcode == OP2_MOVD_EdVd)
+ spew("%-11s%s, %s", name, XMMRegName((XMMRegisterID)dst), GPReg32Name((RegisterID)rm));
+ else
+ spew("%-11s%s, %s", name, XMMRegName(rm), GPReg32Name(dst));
+ m_formatter.twoByteOpVex(ty, opcode, (RegisterID)rm, invalid_xmm, dst);
+ }
+
+ void twoByteOpImmSimdInt32(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ uint32_t imm, XMMRegisterID rm, RegisterID dst)
+ {
+ if (useLegacySSEEncodingForOtherOutput()) {
+ spew("%-11s$0x%x, %s, %s", legacySSEOpName(name), imm, XMMRegName(rm), GPReg32Name(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, (RegisterID)rm, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, %s, %s", name, imm, XMMRegName(rm), GPReg32Name(dst));
+ m_formatter.twoByteOpVex(ty, opcode, (RegisterID)rm, invalid_xmm, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void twoByteOpImmInt32Simd(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ uint32_t imm, RegisterID rm, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncodingForOtherOutput()) {
+ spew("%-11s$0x%x, %s, %s", legacySSEOpName(name), imm, GPReg32Name(rm), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, rm, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, %s, %s", name, imm, GPReg32Name(rm), XMMRegName(dst));
+ m_formatter.twoByteOpVex(ty, opcode, rm, src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void twoByteOpSimdFlags(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ XMMRegisterID rm, XMMRegisterID reg)
+ {
+ if (useLegacySSEEncodingForOtherOutput()) {
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(rm), XMMRegName(reg));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, (RegisterID)rm, reg);
+ return;
+ }
+
+ spew("%-11s%s, %s", name, XMMRegName(rm), XMMRegName(reg));
+ m_formatter.twoByteOpVex(ty, opcode, (RegisterID)rm, invalid_xmm, (XMMRegisterID)reg);
+ }
+
+ void twoByteOpSimdFlags(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ int32_t offset, RegisterID base, XMMRegisterID reg)
+ {
+ if (useLegacySSEEncodingForOtherOutput()) {
+ spew("%-11s" MEM_ob ", %s", legacySSEOpName(name),
+ ADDR_ob(offset, base), XMMRegName(reg));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, offset, base, reg);
+ return;
+ }
+
+ spew("%-11s" MEM_ob ", %s", name,
+ ADDR_ob(offset, base), XMMRegName(reg));
+ m_formatter.twoByteOpVex(ty, opcode, offset, base, invalid_xmm, (XMMRegisterID)reg);
+ }
+
+ void threeByteOpSimd(const char* name, VexOperandType ty, ThreeByteOpcodeID opcode,
+ ThreeByteEscape escape,
+ XMMRegisterID rm, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(rm), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, (RegisterID)rm, dst);
+ return;
+ }
+
+ spew("%-11s%s, %s, %s", name, XMMRegName(rm), XMMRegName(src0), XMMRegName(dst));
+ m_formatter.threeByteOpVex(ty, opcode, escape, (RegisterID)rm, src0, dst);
+ }
+
+ void threeByteOpImmSimd(const char* name, VexOperandType ty, ThreeByteOpcodeID opcode,
+ ThreeByteEscape escape,
+ uint32_t imm, XMMRegisterID rm, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s$0x%x, %s, %s", legacySSEOpName(name), imm, XMMRegName(rm), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, (RegisterID)rm, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, %s, %s, %s", name, imm, XMMRegName(rm), XMMRegName(src0), XMMRegName(dst));
+ m_formatter.threeByteOpVex(ty, opcode, escape, (RegisterID)rm, src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void threeByteOpSimd(const char* name, VexOperandType ty, ThreeByteOpcodeID opcode,
+ ThreeByteEscape escape,
+ int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s" MEM_ob ", %s", legacySSEOpName(name),
+ ADDR_ob(offset, base), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, offset, base, dst);
+ return;
+ }
+
+ spew("%-11s" MEM_ob ", %s, %s", name,
+ ADDR_ob(offset, base), XMMRegName(src0), XMMRegName(dst));
+ m_formatter.threeByteOpVex(ty, opcode, escape, offset, base, src0, dst);
+ }
+
+ void threeByteOpImmSimd(const char* name, VexOperandType ty, ThreeByteOpcodeID opcode,
+ ThreeByteEscape escape,
+ uint32_t imm, int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s$0x%x, " MEM_ob ", %s", legacySSEOpName(name), imm,
+ ADDR_ob(offset, base), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, offset, base, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, " MEM_ob ", %s, %s", name, imm, ADDR_ob(offset, base),
+ XMMRegName(src0), XMMRegName(dst));
+ m_formatter.threeByteOpVex(ty, opcode, escape, offset, base, src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void threeByteOpSimd(const char* name, VexOperandType ty, ThreeByteOpcodeID opcode,
+ ThreeByteEscape escape,
+ const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s%p, %s", legacySSEOpName(name), address, XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, address, dst);
+ return;
+ }
+
+ spew("%-11s%p, %s, %s", name, address, XMMRegName(src0), XMMRegName(dst));
+ m_formatter.threeByteOpVex(ty, opcode, escape, address, src0, dst);
+ }
+
+ void threeByteOpImmInt32Simd(const char* name, VexOperandType ty, ThreeByteOpcodeID opcode,
+ ThreeByteEscape escape, uint32_t imm,
+ RegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s$0x%x, %s, %s", legacySSEOpName(name), imm, GPReg32Name(src1), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, src1, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, %s, %s, %s", name, imm, GPReg32Name(src1), XMMRegName(src0), XMMRegName(dst));
+ m_formatter.threeByteOpVex(ty, opcode, escape, src1, src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void threeByteOpImmInt32Simd(const char* name, VexOperandType ty, ThreeByteOpcodeID opcode,
+ ThreeByteEscape escape, uint32_t imm,
+ int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s$0x%x, " MEM_ob ", %s", legacySSEOpName(name), imm, ADDR_ob(offset, base), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, offset, base, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, " MEM_ob ", %s, %s", name, imm, ADDR_ob(offset, base), XMMRegName(src0), XMMRegName(dst));
+ m_formatter.threeByteOpVex(ty, opcode, escape, offset, base, src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void threeByteOpImmSimdInt32(const char* name, VexOperandType ty, ThreeByteOpcodeID opcode,
+ ThreeByteEscape escape, uint32_t imm,
+ XMMRegisterID src, RegisterID dst)
+ {
+ if (useLegacySSEEncodingForOtherOutput()) {
+ spew("%-11s$0x%x, %s, %s", legacySSEOpName(name), imm, XMMRegName(src), GPReg32Name(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, (RegisterID)src, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ if (opcode == OP3_PEXTRD_EdVdqIb)
+ spew("%-11s$0x%x, %s, %s", name, imm, XMMRegName((XMMRegisterID)dst), GPReg32Name((RegisterID)src));
+ else
+ spew("%-11s$0x%x, %s, %s", name, imm, XMMRegName(src), GPReg32Name(dst));
+ m_formatter.threeByteOpVex(ty, opcode, escape, (RegisterID)src, invalid_xmm, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void threeByteOpImmSimdInt32(const char* name, VexOperandType ty, ThreeByteOpcodeID opcode,
+ ThreeByteEscape escape, uint32_t imm,
+ int32_t offset, RegisterID base, RegisterID dst)
+ {
+ if (useLegacySSEEncodingForOtherOutput()) {
+ spew("%-11s$0x%x, " MEM_ob ", %s", legacySSEOpName(name), imm, ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, offset, base, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, " MEM_ob ", %s", name, imm, ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.threeByteOpVex(ty, opcode, escape, offset, base, invalid_xmm, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ // Blendv is a three-byte op, but the VEX encoding has a different opcode
+ // than the SSE encoding, so we handle it specially.
+ void vblendvOpSimd(XMMRegisterID mask, XMMRegisterID rm, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncodingForVblendv(mask, src0, dst)) {
+ spew("blendvps %s, %s", XMMRegName(rm), XMMRegName(dst));
+ // Even though a "ps" instruction, vblendv is encoded with the "pd" prefix.
+ m_formatter.legacySSEPrefix(VEX_PD);
+ m_formatter.threeByteOp(OP3_BLENDVPS_VdqWdq, ESCAPE_3A, (RegisterID)rm, dst);
+ return;
+ }
+
+ spew("vblendvps %s, %s, %s, %s",
+ XMMRegName(mask), XMMRegName(rm), XMMRegName(src0), XMMRegName(dst));
+ // Even though a "ps" instruction, vblendv is encoded with the "pd" prefix.
+ m_formatter.vblendvOpVex(VEX_PD, OP3_VBLENDVPS_VdqWdq, ESCAPE_3A,
+ mask, (RegisterID)rm, src0, dst);
+ }
+
+ void vblendvOpSimd(XMMRegisterID mask, int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncodingForVblendv(mask, src0, dst)) {
+ spew("blendvps " MEM_ob ", %s", ADDR_ob(offset, base), XMMRegName(dst));
+ // Even though a "ps" instruction, vblendv is encoded with the "pd" prefix.
+ m_formatter.legacySSEPrefix(VEX_PD);
+ m_formatter.threeByteOp(OP3_BLENDVPS_VdqWdq, ESCAPE_3A, offset, base, dst);
+ return;
+ }
+
+ spew("vblendvps %s, " MEM_ob ", %s, %s",
+ XMMRegName(mask), ADDR_ob(offset, base), XMMRegName(src0), XMMRegName(dst));
+ // Even though a "ps" instruction, vblendv is encoded with the "pd" prefix.
+ m_formatter.vblendvOpVex(VEX_PD, OP3_VBLENDVPS_VdqWdq, ESCAPE_3A,
+ mask, offset, base, src0, dst);
+ }
+
+ void shiftOpImmSimd(const char* name, TwoByteOpcodeID opcode, ShiftID shiftKind,
+ uint32_t imm, XMMRegisterID src, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src, dst)) {
+ spew("%-11s$%d, %s", legacySSEOpName(name), imm, XMMRegName(dst));
+ m_formatter.legacySSEPrefix(VEX_PD);
+ m_formatter.twoByteOp(opcode, (RegisterID)dst, (int)shiftKind);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$%d, %s, %s", name, imm, XMMRegName(src), XMMRegName(dst));
+ m_formatter.twoByteOpVex(VEX_PD, opcode, (RegisterID)dst, src, (int)shiftKind);
+ m_formatter.immediate8u(imm);
+ }
+
+ class X86InstructionFormatter {
+
+ public:
+ // Legacy prefix bytes:
+ //
+ // These are emmitted prior to the instruction.
+
+ void prefix(OneByteOpcodeID pre)
+ {
+ m_buffer.putByte(pre);
+ }
+
+ void legacySSEPrefix(VexOperandType ty)
+ {
+ switch (ty) {
+ case VEX_PS: break;
+ case VEX_PD: prefix(PRE_SSE_66); break;
+ case VEX_SS: prefix(PRE_SSE_F3); break;
+ case VEX_SD: prefix(PRE_SSE_F2); break;
+ }
+ }
+
+ // Word-sized operands / no operand instruction formatters.
+ //
+ // In addition to the opcode, the following operand permutations are supported:
+ // * None - instruction takes no operands.
+ // * One register - the low three bits of the RegisterID are added into the opcode.
+ // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
+ // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
+ // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
+ //
+ // For 32-bit x86 targets, the address operand may also be provided as a
+ // void*. On 64-bit targets REX prefixes will be planted as necessary,
+ // where high numbered registers are used.
+ //
+ // The twoByteOp methods plant two-byte Intel instructions sequences
+ // (first opcode byte 0x0F).
+
+ void oneByteOp(OneByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(0, 0, reg);
+ m_buffer.putByteUnchecked(opcode + (reg & 7));
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, RegisterID rm, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(rm, reg);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, int32_t offset, RegisterID base, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, reg);
+ }
+
+ void oneByteOp_disp32(OneByteOpcodeID opcode, int32_t offset, RegisterID base, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(offset, base, reg);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, int32_t offset, RegisterID base, RegisterID index, int scale, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, index, scale, reg);
+ }
+
+ void oneByteOp_disp32(OneByteOpcodeID opcode, int32_t offset, RegisterID index, int scale, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, index, 0);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(offset, index, scale, reg);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, const void* address, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, 0);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(address, reg);
+ }
+
+ void oneByteOp_disp32(OneByteOpcodeID opcode, const void* address, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, 0);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(address, reg);
+ }
+#ifdef JS_CODEGEN_X64
+ void oneByteRipOp(OneByteOpcodeID opcode, int ripOffset, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, 0);
+ m_buffer.putByteUnchecked(opcode);
+ putModRm(ModRmMemoryNoDisp, noBase, reg);
+ m_buffer.putIntUnchecked(ripOffset);
+ }
+
+ void oneByteRipOp64(OneByteOpcodeID opcode, int ripOffset, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, 0, 0);
+ m_buffer.putByteUnchecked(opcode);
+ putModRm(ModRmMemoryNoDisp, noBase, reg);
+ m_buffer.putIntUnchecked(ripOffset);
+ }
+
+ void twoByteRipOp(TwoByteOpcodeID opcode, int ripOffset, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, 0);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ putModRm(ModRmMemoryNoDisp, noBase, reg);
+ m_buffer.putIntUnchecked(ripOffset);
+ }
+
+ void twoByteRipOpVex(VexOperandType ty, TwoByteOpcodeID opcode, int ripOffset,
+ XMMRegisterID src0, XMMRegisterID reg)
+ {
+ int r = (reg >> 3), x = 0, b = 0;
+ int m = 1; // 0x0F
+ int w = 0, v = src0, l = 0;
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ putModRm(ModRmMemoryNoDisp, noBase, reg);
+ m_buffer.putIntUnchecked(ripOffset);
+ }
+#endif
+
+ void twoByteOp(TwoByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, RegisterID rm, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(rm, reg);
+ }
+
+ void twoByteOpVex(VexOperandType ty, TwoByteOpcodeID opcode,
+ RegisterID rm, XMMRegisterID src0, int reg)
+ {
+ int r = (reg >> 3), x = 0, b = (rm >> 3);
+ int m = 1; // 0x0F
+ int w = 0, v = src0, l = 0;
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ registerModRM(rm, reg);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int32_t offset, RegisterID base, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, reg);
+ }
+
+ void twoByteOpVex(VexOperandType ty, TwoByteOpcodeID opcode,
+ int32_t offset, RegisterID base, XMMRegisterID src0, int reg)
+ {
+ int r = (reg >> 3), x = 0, b = (base >> 3);
+ int m = 1; // 0x0F
+ int w = 0, v = src0, l = 0;
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ memoryModRM(offset, base, reg);
+ }
+
+ void twoByteOp_disp32(TwoByteOpcodeID opcode, int32_t offset, RegisterID base, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(offset, base, reg);
+ }
+
+ void twoByteOpVex_disp32(VexOperandType ty, TwoByteOpcodeID opcode,
+ int32_t offset, RegisterID base, XMMRegisterID src0, int reg)
+ {
+ int r = (reg >> 3), x = 0, b = (base >> 3);
+ int m = 1; // 0x0F
+ int w = 0, v = src0, l = 0;
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ memoryModRM_disp32(offset, base, reg);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int32_t offset, RegisterID base, RegisterID index, int scale, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, index, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, index, scale, reg);
+ }
+
+ void twoByteOpVex(VexOperandType ty, TwoByteOpcodeID opcode,
+ int32_t offset, RegisterID base, RegisterID index, int scale,
+ XMMRegisterID src0, int reg)
+ {
+ int r = (reg >> 3), x = (index >> 3), b = (base >> 3);
+ int m = 1; // 0x0F
+ int w = 0, v = src0, l = 0;
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ memoryModRM(offset, base, index, scale, reg);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, const void* address, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, 0);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(address, reg);
+ }
+
+ void twoByteOpVex(VexOperandType ty, TwoByteOpcodeID opcode,
+ const void* address, XMMRegisterID src0, int reg)
+ {
+ int r = (reg >> 3), x = 0, b = 0;
+ int m = 1; // 0x0F
+ int w = 0, v = src0, l = 0;
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ memoryModRM(address, reg);
+ }
+
+ void threeByteOp(ThreeByteOpcodeID opcode, ThreeByteEscape escape, RegisterID rm, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(escape);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(rm, reg);
+ }
+
+ void threeByteOpVex(VexOperandType ty, ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ RegisterID rm, XMMRegisterID src0, int reg)
+ {
+ int r = (reg >> 3), x = 0, b = (rm >> 3);
+ int m = 0, w = 0, v = src0, l = 0;
+ switch (escape) {
+ case ESCAPE_38: m = 2; break;
+ case ESCAPE_3A: m = 3; break;
+ default: MOZ_CRASH("unexpected escape");
+ }
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ registerModRM(rm, reg);
+ }
+
+ void threeByteOp(ThreeByteOpcodeID opcode, ThreeByteEscape escape, int32_t offset, RegisterID base, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(escape);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, reg);
+ }
+
+ void threeByteOpVex(VexOperandType ty, ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ int32_t offset, RegisterID base, XMMRegisterID src0, int reg)
+ {
+ int r = (reg >> 3), x = 0, b = (base >> 3);
+ int m = 0, w = 0, v = src0, l = 0;
+ switch (escape) {
+ case ESCAPE_38: m = 2; break;
+ case ESCAPE_3A: m = 3; break;
+ default: MOZ_CRASH("unexpected escape");
+ }
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ memoryModRM(offset, base, reg);
+ }
+
+ void threeByteOp(ThreeByteOpcodeID opcode, ThreeByteEscape escape, const void* address, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, 0);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(escape);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(address, reg);
+ }
+
+ void threeByteOpVex(VexOperandType ty, ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ const void* address, XMMRegisterID src0, int reg)
+ {
+ int r = (reg >> 3), x = 0, b = 0;
+ int m = 0, w = 0, v = src0, l = 0;
+ switch (escape) {
+ case ESCAPE_38: m = 2; break;
+ case ESCAPE_3A: m = 3; break;
+ default: MOZ_CRASH("unexpected escape");
+ }
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ memoryModRM(address, reg);
+ }
+
+ void vblendvOpVex(VexOperandType ty, ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ XMMRegisterID mask, RegisterID rm, XMMRegisterID src0, int reg)
+ {
+ int r = (reg >> 3), x = 0, b = (rm >> 3);
+ int m = 0, w = 0, v = src0, l = 0;
+ switch (escape) {
+ case ESCAPE_38: m = 2; break;
+ case ESCAPE_3A: m = 3; break;
+ default: MOZ_CRASH("unexpected escape");
+ }
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ registerModRM(rm, reg);
+ immediate8u(mask << 4);
+ }
+
+ void vblendvOpVex(VexOperandType ty, ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ XMMRegisterID mask, int32_t offset, RegisterID base, XMMRegisterID src0, int reg)
+ {
+ int r = (reg >> 3), x = 0, b = (base >> 3);
+ int m = 0, w = 0, v = src0, l = 0;
+ switch (escape) {
+ case ESCAPE_38: m = 2; break;
+ case ESCAPE_3A: m = 3; break;
+ default: MOZ_CRASH("unexpected escape");
+ }
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ memoryModRM(offset, base, reg);
+ immediate8u(mask << 4);
+ }
+
+#ifdef JS_CODEGEN_X64
+ // Quad-word-sized operands:
+ //
+ // Used to format 64-bit operantions, planting a REX.w prefix. When
+ // planting d64 or f64 instructions, not requiring a REX.w prefix, the
+ // normal (non-'64'-postfixed) formatters should be used.
+
+ void oneByteOp64(OneByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(0, 0, 0);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(0, 0, reg);
+ m_buffer.putByteUnchecked(opcode + (reg & 7));
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, RegisterID rm, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(rm, reg);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, int32_t offset, RegisterID base, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, reg);
+ }
+
+ void oneByteOp64_disp32(OneByteOpcodeID opcode, int32_t offset, RegisterID base, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(offset, base, reg);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, int32_t offset, RegisterID base, RegisterID index, int scale, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, index, scale, reg);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, const void* address, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, 0, 0);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(address, reg);
+ }
+
+ void twoByteOp64(TwoByteOpcodeID opcode, RegisterID rm, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(rm, reg);
+ }
+
+ void twoByteOp64(TwoByteOpcodeID opcode, int offset, RegisterID base, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, reg);
+ }
+
+ void twoByteOp64(TwoByteOpcodeID opcode, int offset, RegisterID base, RegisterID index, int scale, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, index, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, index, scale, reg);
+ }
+
+ void twoByteOp64(TwoByteOpcodeID opcode, const void* address, int reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, 0, 0);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(address, reg);
+ }
+
+ void twoByteOpVex64(VexOperandType ty, TwoByteOpcodeID opcode,
+ RegisterID rm, XMMRegisterID src0, XMMRegisterID reg)
+ {
+ int r = (reg >> 3), x = 0, b = (rm >> 3);
+ int m = 1; // 0x0F
+ int w = 1, v = src0, l = 0;
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ registerModRM(rm, reg);
+ }
+#endif
+
+ // Byte-operands:
+ //
+ // These methods format byte operations. Byte operations differ from
+ // the normal formatters in the circumstances under which they will
+ // decide to emit REX prefixes. These should be used where any register
+ // operand signifies a byte register.
+ //
+ // The disctinction is due to the handling of register numbers in the
+ // range 4..7 on x86-64. These register numbers may either represent
+ // the second byte of the first four registers (ah..bh) or the first
+ // byte of the second four registers (spl..dil).
+ //
+ // Address operands should still be checked using regRequiresRex(),
+ // while byteRegRequiresRex() is provided to check byte register
+ // operands.
+
+ void oneByteOp8(OneByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void oneByteOp8(OneByteOpcodeID opcode, RegisterID r)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(r), 0, 0, r);
+ m_buffer.putByteUnchecked(opcode + (r & 7));
+ }
+
+ void oneByteOp8(OneByteOpcodeID opcode, RegisterID rm, GroupOpcodeID groupOp)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(rm, groupOp);
+ }
+
+ // Like oneByteOp8, but never emits a REX prefix.
+ void oneByteOp8_norex(OneByteOpcodeID opcode, HRegisterID rm, GroupOpcodeID groupOp)
+ {
+ MOZ_ASSERT(!regRequiresRex(RegisterID(rm)));
+ m_buffer.ensureSpace(MaxInstructionSize);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(RegisterID(rm), groupOp);
+ }
+
+ void oneByteOp8(OneByteOpcodeID opcode, int32_t offset, RegisterID base, RegisterID reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg), reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, reg);
+ }
+
+ void oneByteOp8_disp32(OneByteOpcodeID opcode, int32_t offset, RegisterID base,
+ RegisterID reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg), reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(offset, base, reg);
+ }
+
+ void oneByteOp8(OneByteOpcodeID opcode, int32_t offset, RegisterID base,
+ RegisterID index, int scale, RegisterID reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg), reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, index, scale, reg);
+ }
+
+ void oneByteOp8(OneByteOpcodeID opcode, const void* address, RegisterID reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg), reg, 0, 0);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(address, reg);
+ }
+
+ void twoByteOp8(TwoByteOpcodeID opcode, RegisterID rm, RegisterID reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(rm, reg);
+ }
+
+ void twoByteOp8(TwoByteOpcodeID opcode, int32_t offset, RegisterID base, RegisterID reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg)|regRequiresRex(base), reg, 0, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, reg);
+ }
+
+ void twoByteOp8(TwoByteOpcodeID opcode, int32_t offset, RegisterID base, RegisterID index,
+ int scale, RegisterID reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg)|regRequiresRex(base)|regRequiresRex(index),
+ reg, index, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, index, scale, reg);
+ }
+
+ // Like twoByteOp8 but doesn't add a REX prefix if the destination reg
+ // is in esp..edi. This may be used when the destination is not an 8-bit
+ // register (as in a movzbl instruction), so it doesn't need a REX
+ // prefix to disambiguate it from ah..bh.
+ void twoByteOp8_movx(TwoByteOpcodeID opcode, RegisterID rm, RegisterID reg)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(regRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(rm, reg);
+ }
+
+ void twoByteOp8(TwoByteOpcodeID opcode, RegisterID rm, GroupOpcodeID groupOp)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(rm, groupOp);
+ }
+
+ // Immediates:
+ //
+ // An immedaite should be appended where appropriate after an op has
+ // been emitted. The writes are unchecked since the opcode formatters
+ // above will have ensured space.
+
+ // A signed 8-bit immediate.
+ MOZ_ALWAYS_INLINE void immediate8s(int32_t imm)
+ {
+ MOZ_ASSERT(CAN_SIGN_EXTEND_8_32(imm));
+ m_buffer.putByteUnchecked(imm);
+ }
+
+ // An unsigned 8-bit immediate.
+ MOZ_ALWAYS_INLINE void immediate8u(uint32_t imm)
+ {
+ MOZ_ASSERT(CAN_ZERO_EXTEND_8_32(imm));
+ m_buffer.putByteUnchecked(int32_t(imm));
+ }
+
+ // An 8-bit immediate with is either signed or unsigned, for use in
+ // instructions which actually only operate on 8 bits.
+ MOZ_ALWAYS_INLINE void immediate8(int32_t imm)
+ {
+ m_buffer.putByteUnchecked(imm);
+ }
+
+ // A signed 16-bit immediate.
+ MOZ_ALWAYS_INLINE void immediate16s(int32_t imm)
+ {
+ MOZ_ASSERT(CAN_SIGN_EXTEND_16_32(imm));
+ m_buffer.putShortUnchecked(imm);
+ }
+
+ // An unsigned 16-bit immediate.
+ MOZ_ALWAYS_INLINE void immediate16u(int32_t imm)
+ {
+ MOZ_ASSERT(CAN_ZERO_EXTEND_16_32(imm));
+ m_buffer.putShortUnchecked(imm);
+ }
+
+ // A 16-bit immediate with is either signed or unsigned, for use in
+ // instructions which actually only operate on 16 bits.
+ MOZ_ALWAYS_INLINE void immediate16(int32_t imm)
+ {
+ m_buffer.putShortUnchecked(imm);
+ }
+
+ MOZ_ALWAYS_INLINE void immediate32(int32_t imm)
+ {
+ m_buffer.putIntUnchecked(imm);
+ }
+
+ MOZ_ALWAYS_INLINE void immediate64(int64_t imm)
+ {
+ m_buffer.putInt64Unchecked(imm);
+ }
+
+ MOZ_ALWAYS_INLINE MOZ_MUST_USE JmpSrc
+ immediateRel32()
+ {
+ m_buffer.putIntUnchecked(0);
+ return JmpSrc(m_buffer.size());
+ }
+
+ // Data:
+
+ void jumpTablePointer(uintptr_t ptr)
+ {
+ m_buffer.ensureSpace(sizeof(uintptr_t));
+#ifdef JS_CODEGEN_X64
+ m_buffer.putInt64Unchecked(ptr);
+#else
+ m_buffer.putIntUnchecked(ptr);
+#endif
+ }
+
+ void doubleConstant(double d)
+ {
+ m_buffer.ensureSpace(sizeof(double));
+ m_buffer.putInt64Unchecked(mozilla::BitwiseCast<uint64_t>(d));
+ }
+
+ void floatConstant(float f)
+ {
+ m_buffer.ensureSpace(sizeof(float));
+ m_buffer.putIntUnchecked(mozilla::BitwiseCast<uint32_t>(f));
+ }
+
+ void simd128Constant(const void* data)
+ {
+ const uint8_t* bytes = reinterpret_cast<const uint8_t*>(data);
+ m_buffer.ensureSpace(16);
+ for (size_t i = 0; i < 16; ++i)
+ m_buffer.putByteUnchecked(bytes[i]);
+ }
+
+ void int64Constant(int64_t i)
+ {
+ m_buffer.ensureSpace(sizeof(int64_t));
+ m_buffer.putInt64Unchecked(i);
+ }
+
+ void int32Constant(int32_t i)
+ {
+ m_buffer.ensureSpace(sizeof(int32_t));
+ m_buffer.putIntUnchecked(i);
+ }
+
+ // Administrative methods:
+
+ size_t size() const { return m_buffer.size(); }
+ const unsigned char* buffer() const { return m_buffer.buffer(); }
+ bool oom() const { return m_buffer.oom(); }
+ bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
+ unsigned char* data() { return m_buffer.data(); }
+
+ MOZ_MUST_USE bool append(const unsigned char* values, size_t size)
+ {
+ return m_buffer.append(values, size);
+ }
+
+ void unprotectDataRegion(size_t firstByteOffset, size_t lastByteOffset) {
+ m_buffer.unprotectDataRegion(firstByteOffset, lastByteOffset);
+ }
+ void reprotectDataRegion(size_t firstByteOffset, size_t lastByteOffset) {
+ m_buffer.reprotectDataRegion(firstByteOffset, lastByteOffset);
+ }
+
+ private:
+
+ // Internals; ModRm and REX formatters.
+
+ // Byte operand register spl & above requir a REX prefix, which precludes
+ // use of the h registers in the same instruction.
+ static bool byteRegRequiresRex(RegisterID reg)
+ {
+#ifdef JS_CODEGEN_X64
+ return reg >= rsp;
+#else
+ return false;
+#endif
+ }
+
+ // For non-byte sizes, registers r8 & above always require a REX prefix.
+ static bool regRequiresRex(RegisterID reg)
+ {
+#ifdef JS_CODEGEN_X64
+ return reg >= r8;
+#else
+ return false;
+#endif
+ }
+
+#ifdef JS_CODEGEN_X64
+ // Format a REX prefix byte.
+ void emitRex(bool w, int r, int x, int b)
+ {
+ m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
+ }
+
+ // Used to plant a REX byte with REX.w set (for 64-bit operations).
+ void emitRexW(int r, int x, int b)
+ {
+ emitRex(true, r, x, b);
+ }
+
+ // Used for operations with byte operands - use byteRegRequiresRex() to
+ // check register operands, regRequiresRex() to check other registers
+ // (i.e. address base & index).
+ //
+ // NB: WebKit's use of emitRexIf() is limited such that the
+ // reqRequiresRex() checks are not needed. SpiderMonkey extends
+ // oneByteOp8 and twoByteOp8 functionality such that r, x, and b
+ // can all be used.
+ void emitRexIf(bool condition, int r, int x, int b)
+ {
+ if (condition ||
+ regRequiresRex(RegisterID(r)) ||
+ regRequiresRex(RegisterID(x)) ||
+ regRequiresRex(RegisterID(b)))
+ {
+ emitRex(false, r, x, b);
+ }
+ }
+
+ // Used for word sized operations, will plant a REX prefix if necessary
+ // (if any register is r8 or above).
+ void emitRexIfNeeded(int r, int x, int b)
+ {
+ emitRexIf(false, r, x, b);
+ }
+#else
+ // No REX prefix bytes on 32-bit x86.
+ void emitRexIf(bool condition, int, int, int)
+ {
+ MOZ_ASSERT(!condition, "32-bit x86 should never use a REX prefix");
+ }
+ void emitRexIfNeeded(int, int, int) {}
+#endif
+
+ void putModRm(ModRmMode mode, RegisterID rm, int reg)
+ {
+ m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
+ }
+
+ void putModRmSib(ModRmMode mode, RegisterID base, RegisterID index, int scale, int reg)
+ {
+ MOZ_ASSERT(mode != ModRmRegister);
+
+ putModRm(mode, hasSib, reg);
+ m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
+ }
+
+ void registerModRM(RegisterID rm, int reg)
+ {
+ putModRm(ModRmRegister, rm, reg);
+ }
+
+ void memoryModRM(int32_t offset, RegisterID base, int reg)
+ {
+ // A base of esp or r12 would be interpreted as a sib, so force a
+ // sib with no index & put the base in there.
+#ifdef JS_CODEGEN_X64
+ if ((base == hasSib) || (base == hasSib2))
+#else
+ if (base == hasSib)
+#endif
+ {
+ if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
+ putModRmSib(ModRmMemoryNoDisp, base, noIndex, 0, reg);
+ else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRmSib(ModRmMemoryDisp8, base, noIndex, 0, reg);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRmSib(ModRmMemoryDisp32, base, noIndex, 0, reg);
+ m_buffer.putIntUnchecked(offset);
+ }
+ } else {
+#ifdef JS_CODEGEN_X64
+ if (!offset && (base != noBase) && (base != noBase2))
+#else
+ if (!offset && (base != noBase))
+#endif
+ putModRm(ModRmMemoryNoDisp, base, reg);
+ else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRm(ModRmMemoryDisp8, base, reg);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRm(ModRmMemoryDisp32, base, reg);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+ }
+
+ void memoryModRM_disp32(int32_t offset, RegisterID base, int reg)
+ {
+ // A base of esp or r12 would be interpreted as a sib, so force a
+ // sib with no index & put the base in there.
+#ifdef JS_CODEGEN_X64
+ if ((base == hasSib) || (base == hasSib2))
+#else
+ if (base == hasSib)
+#endif
+ {
+ putModRmSib(ModRmMemoryDisp32, base, noIndex, 0, reg);
+ m_buffer.putIntUnchecked(offset);
+ } else {
+ putModRm(ModRmMemoryDisp32, base, reg);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+
+ void memoryModRM(int32_t offset, RegisterID base, RegisterID index, int scale, int reg)
+ {
+ MOZ_ASSERT(index != noIndex);
+
+#ifdef JS_CODEGEN_X64
+ if (!offset && (base != noBase) && (base != noBase2))
+#else
+ if (!offset && (base != noBase))
+#endif
+ putModRmSib(ModRmMemoryNoDisp, base, index, scale, reg);
+ else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRmSib(ModRmMemoryDisp8, base, index, scale, reg);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRmSib(ModRmMemoryDisp32, base, index, scale, reg);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+
+ void memoryModRM_disp32(int32_t offset, RegisterID index, int scale, int reg)
+ {
+ MOZ_ASSERT(index != noIndex);
+
+ // NB: the base-less memoryModRM overloads generate different code
+ // then the base-full memoryModRM overloads in the base == noBase
+ // case. The base-less overloads assume that the desired effective
+ // address is:
+ //
+ // reg := [scaled index] + disp32
+ //
+ // which means the mod needs to be ModRmMemoryNoDisp. The base-full
+ // overloads pass ModRmMemoryDisp32 in all cases and thus, when
+ // base == noBase (== ebp), the effective address is:
+ //
+ // reg := [scaled index] + disp32 + [ebp]
+ //
+ // See Intel developer manual, Vol 2, 2.1.5, Table 2-3.
+ putModRmSib(ModRmMemoryNoDisp, noBase, index, scale, reg);
+ m_buffer.putIntUnchecked(offset);
+ }
+
+ void memoryModRM_disp32(const void* address, int reg)
+ {
+ int32_t disp = AddressImmediate(address);
+
+#ifdef JS_CODEGEN_X64
+ // On x64-64, non-RIP-relative absolute mode requires a SIB.
+ putModRmSib(ModRmMemoryNoDisp, noBase, noIndex, 0, reg);
+#else
+ // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
+ putModRm(ModRmMemoryNoDisp, noBase, reg);
+#endif
+ m_buffer.putIntUnchecked(disp);
+ }
+
+ void memoryModRM(const void* address, int reg)
+ {
+ memoryModRM_disp32(address, reg);
+ }
+
+ void threeOpVex(VexOperandType p, int r, int x, int b, int m, int w, int v, int l,
+ int opcode)
+ {
+ m_buffer.ensureSpace(MaxInstructionSize);
+
+ if (v == invalid_xmm)
+ v = XMMRegisterID(0);
+
+ if (x == 0 && b == 0 && m == 1 && w == 0) {
+ // Two byte VEX.
+ m_buffer.putByteUnchecked(PRE_VEX_C5);
+ m_buffer.putByteUnchecked(((r << 7) | (v << 3) | (l << 2) | p) ^ 0xf8);
+ } else {
+ // Three byte VEX.
+ m_buffer.putByteUnchecked(PRE_VEX_C4);
+ m_buffer.putByteUnchecked(((r << 7) | (x << 6) | (b << 5) | m) ^ 0xe0);
+ m_buffer.putByteUnchecked(((w << 7) | (v << 3) | (l << 2) | p) ^ 0x78);
+ }
+
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ AssemblerBuffer m_buffer;
+ } m_formatter;
+
+ bool useVEX_;
+};
+
+MOZ_ALWAYS_INLINE
+AutoUnprotectAssemblerBufferRegion::AutoUnprotectAssemblerBufferRegion(BaseAssembler& holder,
+ int32_t offset, size_t size)
+{
+ assembler = &holder;
+ MOZ_ASSERT(offset >= 0);
+ firstByteOffset = size_t(offset);
+ lastByteOffset = firstByteOffset + (size - 1);
+ assembler->unprotectDataRegion(firstByteOffset, lastByteOffset);
+}
+
+MOZ_ALWAYS_INLINE
+AutoUnprotectAssemblerBufferRegion::~AutoUnprotectAssemblerBufferRegion()
+{
+ assembler->reprotectDataRegion(firstByteOffset, lastByteOffset);
+}
+
+} // namespace X86Encoding
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_BaseAssembler_x86_shared_h */
diff --git a/js/src/jit/x86-shared/BaselineCompiler-x86-shared.cpp b/js/src/jit/x86-shared/BaselineCompiler-x86-shared.cpp
new file mode 100644
index 000000000..327015df8
--- /dev/null
+++ b/js/src/jit/x86-shared/BaselineCompiler-x86-shared.cpp
@@ -0,0 +1,15 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86-shared/BaselineCompiler-x86-shared.h"
+
+using namespace js;
+using namespace js::jit;
+
+BaselineCompilerX86Shared::BaselineCompilerX86Shared(JSContext* cx, TempAllocator& alloc, JSScript* script)
+ : BaselineCompilerShared(cx, alloc, script)
+{
+}
diff --git a/js/src/jit/x86-shared/BaselineCompiler-x86-shared.h b/js/src/jit/x86-shared/BaselineCompiler-x86-shared.h
new file mode 100644
index 000000000..65b702d54
--- /dev/null
+++ b/js/src/jit/x86-shared/BaselineCompiler-x86-shared.h
@@ -0,0 +1,24 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_BaselineCompiler_x86_shared_h
+#define jit_x86_shared_BaselineCompiler_x86_shared_h
+
+#include "jit/shared/BaselineCompiler-shared.h"
+
+namespace js {
+namespace jit {
+
+class BaselineCompilerX86Shared : public BaselineCompilerShared
+{
+ protected:
+ BaselineCompilerX86Shared(JSContext* cx, TempAllocator& alloc, JSScript* script);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_BaselineCompiler_x86_shared_h */
diff --git a/js/src/jit/x86-shared/BaselineIC-x86-shared.cpp b/js/src/jit/x86-shared/BaselineIC-x86-shared.cpp
new file mode 100644
index 000000000..4e25f87bf
--- /dev/null
+++ b/js/src/jit/x86-shared/BaselineIC-x86-shared.cpp
@@ -0,0 +1,44 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineIC.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+bool
+ICCompare_Double::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure, notNaN;
+ masm.ensureDouble(R0, FloatReg0, &failure);
+ masm.ensureDouble(R1, FloatReg1, &failure);
+
+ Register dest = R0.scratchReg();
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(op);
+ masm.mov(ImmWord(0), dest);
+ masm.compareDouble(cond, FloatReg0, FloatReg1);
+ masm.setCC(Assembler::ConditionFromDoubleCondition(cond), dest);
+
+ // Check for NaN, if needed.
+ Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond);
+ if (nanCond != Assembler::NaN_HandledByCond) {
+ masm.j(Assembler::NoParity, &notNaN);
+ masm.mov(ImmWord(nanCond == Assembler::NaN_IsTrue), dest);
+ masm.bind(&notNaN);
+ }
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
diff --git a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
new file mode 100644
index 000000000..9cf03aede
--- /dev/null
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -0,0 +1,4727 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86-shared/CodeGenerator-x86-shared.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jsmath.h"
+
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/Linker.h"
+#include "jit/RangeAnalysis.h"
+#include "vm/TraceLogging.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::Abs;
+using mozilla::BitwiseCast;
+using mozilla::DebugOnly;
+using mozilla::FloatingPoint;
+using mozilla::FloorLog2;
+using mozilla::NegativeInfinity;
+using mozilla::SpecificNaN;
+
+using JS::GenericNaN;
+
+namespace js {
+namespace jit {
+
+CodeGeneratorX86Shared::CodeGeneratorX86Shared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorShared(gen, graph, masm)
+{
+}
+
+#ifdef JS_PUNBOX64
+Operand
+CodeGeneratorX86Shared::ToOperandOrRegister64(const LInt64Allocation input)
+{
+ return ToOperand(input.value());
+}
+#else
+Register64
+CodeGeneratorX86Shared::ToOperandOrRegister64(const LInt64Allocation input)
+{
+ return ToRegister64(input);
+}
+#endif
+
+void
+OutOfLineBailout::accept(CodeGeneratorX86Shared* codegen)
+{
+ codegen->visitOutOfLineBailout(this);
+}
+
+void
+CodeGeneratorX86Shared::emitBranch(Assembler::Condition cond, MBasicBlock* mirTrue,
+ MBasicBlock* mirFalse, Assembler::NaNCond ifNaN)
+{
+ if (ifNaN == Assembler::NaN_IsFalse)
+ jumpToBlock(mirFalse, Assembler::Parity);
+ else if (ifNaN == Assembler::NaN_IsTrue)
+ jumpToBlock(mirTrue, Assembler::Parity);
+
+ if (isNextBlock(mirFalse->lir())) {
+ jumpToBlock(mirTrue, cond);
+ } else {
+ jumpToBlock(mirFalse, Assembler::InvertCondition(cond));
+ jumpToBlock(mirTrue);
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitDouble(LDouble* ins)
+{
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantDouble(ins->getDouble(), ToFloatRegister(out));
+}
+
+void
+CodeGeneratorX86Shared::visitFloat32(LFloat32* ins)
+{
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantFloat32(ins->getFloat(), ToFloatRegister(out));
+}
+
+void
+CodeGeneratorX86Shared::visitTestIAndBranch(LTestIAndBranch* test)
+{
+ Register input = ToRegister(test->input());
+ masm.test32(input, input);
+ emitBranch(Assembler::NonZero, test->ifTrue(), test->ifFalse());
+}
+
+void
+CodeGeneratorX86Shared::visitTestDAndBranch(LTestDAndBranch* test)
+{
+ const LAllocation* opd = test->input();
+
+ // vucomisd flags:
+ // Z P C
+ // ---------
+ // NaN 1 1 1
+ // > 0 0 0
+ // < 0 0 1
+ // = 1 0 0
+ //
+ // NaN is falsey, so comparing against 0 and then using the Z flag is
+ // enough to determine which branch to take.
+ ScratchDoubleScope scratch(masm);
+ masm.zeroDouble(scratch);
+ masm.vucomisd(scratch, ToFloatRegister(opd));
+ emitBranch(Assembler::NotEqual, test->ifTrue(), test->ifFalse());
+}
+
+void
+CodeGeneratorX86Shared::visitTestFAndBranch(LTestFAndBranch* test)
+{
+ const LAllocation* opd = test->input();
+ // vucomiss flags are the same as doubles; see comment above
+ {
+ ScratchFloat32Scope scratch(masm);
+ masm.zeroFloat32(scratch);
+ masm.vucomiss(scratch, ToFloatRegister(opd));
+ }
+ emitBranch(Assembler::NotEqual, test->ifTrue(), test->ifFalse());
+}
+
+void
+CodeGeneratorX86Shared::visitBitAndAndBranch(LBitAndAndBranch* baab)
+{
+ if (baab->right()->isConstant())
+ masm.test32(ToRegister(baab->left()), Imm32(ToInt32(baab->right())));
+ else
+ masm.test32(ToRegister(baab->left()), ToRegister(baab->right()));
+ emitBranch(Assembler::NonZero, baab->ifTrue(), baab->ifFalse());
+}
+
+void
+CodeGeneratorX86Shared::emitCompare(MCompare::CompareType type, const LAllocation* left, const LAllocation* right)
+{
+#ifdef JS_CODEGEN_X64
+ if (type == MCompare::Compare_Object) {
+ masm.cmpPtr(ToRegister(left), ToOperand(right));
+ return;
+ }
+#endif
+
+ if (right->isConstant())
+ masm.cmp32(ToRegister(left), Imm32(ToInt32(right)));
+ else
+ masm.cmp32(ToRegister(left), ToOperand(right));
+}
+
+void
+CodeGeneratorX86Shared::visitCompare(LCompare* comp)
+{
+ MCompare* mir = comp->mir();
+ emitCompare(mir->compareType(), comp->left(), comp->right());
+ masm.emitSet(JSOpToCondition(mir->compareType(), comp->jsop()), ToRegister(comp->output()));
+}
+
+void
+CodeGeneratorX86Shared::visitCompareAndBranch(LCompareAndBranch* comp)
+{
+ MCompare* mir = comp->cmpMir();
+ emitCompare(mir->compareType(), comp->left(), comp->right());
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
+ emitBranch(cond, comp->ifTrue(), comp->ifFalse());
+}
+
+void
+CodeGeneratorX86Shared::visitCompareD(LCompareD* comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+
+ Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond);
+ if (comp->mir()->operandsAreNeverNaN())
+ nanCond = Assembler::NaN_HandledByCond;
+
+ masm.compareDouble(cond, lhs, rhs);
+ masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output()), nanCond);
+}
+
+void
+CodeGeneratorX86Shared::visitCompareF(LCompareF* comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+
+ Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond);
+ if (comp->mir()->operandsAreNeverNaN())
+ nanCond = Assembler::NaN_HandledByCond;
+
+ masm.compareFloat(cond, lhs, rhs);
+ masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output()), nanCond);
+}
+
+void
+CodeGeneratorX86Shared::visitNotI(LNotI* ins)
+{
+ masm.cmp32(ToRegister(ins->input()), Imm32(0));
+ masm.emitSet(Assembler::Equal, ToRegister(ins->output()));
+}
+
+void
+CodeGeneratorX86Shared::visitNotD(LNotD* ins)
+{
+ FloatRegister opd = ToFloatRegister(ins->input());
+
+ // Not returns true if the input is a NaN. We don't have to worry about
+ // it if we know the input is never NaN though.
+ Assembler::NaNCond nanCond = Assembler::NaN_IsTrue;
+ if (ins->mir()->operandIsNeverNaN())
+ nanCond = Assembler::NaN_HandledByCond;
+
+ ScratchDoubleScope scratch(masm);
+ masm.zeroDouble(scratch);
+ masm.compareDouble(Assembler::DoubleEqualOrUnordered, opd, scratch);
+ masm.emitSet(Assembler::Equal, ToRegister(ins->output()), nanCond);
+}
+
+void
+CodeGeneratorX86Shared::visitNotF(LNotF* ins)
+{
+ FloatRegister opd = ToFloatRegister(ins->input());
+
+ // Not returns true if the input is a NaN. We don't have to worry about
+ // it if we know the input is never NaN though.
+ Assembler::NaNCond nanCond = Assembler::NaN_IsTrue;
+ if (ins->mir()->operandIsNeverNaN())
+ nanCond = Assembler::NaN_HandledByCond;
+
+ ScratchFloat32Scope scratch(masm);
+ masm.zeroFloat32(scratch);
+ masm.compareFloat(Assembler::DoubleEqualOrUnordered, opd, scratch);
+ masm.emitSet(Assembler::Equal, ToRegister(ins->output()), nanCond);
+}
+
+void
+CodeGeneratorX86Shared::visitCompareDAndBranch(LCompareDAndBranch* comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop());
+
+ Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond);
+ if (comp->cmpMir()->operandsAreNeverNaN())
+ nanCond = Assembler::NaN_HandledByCond;
+
+ masm.compareDouble(cond, lhs, rhs);
+ emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(), comp->ifFalse(), nanCond);
+}
+
+void
+CodeGeneratorX86Shared::visitCompareFAndBranch(LCompareFAndBranch* comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop());
+
+ Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond);
+ if (comp->cmpMir()->operandsAreNeverNaN())
+ nanCond = Assembler::NaN_HandledByCond;
+
+ masm.compareFloat(cond, lhs, rhs);
+ emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(), comp->ifFalse(), nanCond);
+}
+
+void
+CodeGeneratorX86Shared::visitWasmStackArg(LWasmStackArg* ins)
+{
+ const MWasmStackArg* mir = ins->mir();
+ Address dst(StackPointer, mir->spOffset());
+ if (ins->arg()->isConstant()) {
+ masm.storePtr(ImmWord(ToInt32(ins->arg())), dst);
+ } else if (ins->arg()->isGeneralReg()) {
+ masm.storePtr(ToRegister(ins->arg()), dst);
+ } else {
+ switch (mir->input()->type()) {
+ case MIRType::Double:
+ masm.storeDouble(ToFloatRegister(ins->arg()), dst);
+ return;
+ case MIRType::Float32:
+ masm.storeFloat32(ToFloatRegister(ins->arg()), dst);
+ return;
+ // StackPointer is SIMD-aligned and ABIArgGenerator guarantees
+ // stack offsets are SIMD-aligned.
+ case MIRType::Int32x4:
+ case MIRType::Bool32x4:
+ masm.storeAlignedSimd128Int(ToFloatRegister(ins->arg()), dst);
+ return;
+ case MIRType::Float32x4:
+ masm.storeAlignedSimd128Float(ToFloatRegister(ins->arg()), dst);
+ return;
+ default: break;
+ }
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected mir type in WasmStackArg");
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitWasmStackArgI64(LWasmStackArgI64* ins)
+{
+ const MWasmStackArg* mir = ins->mir();
+ Address dst(StackPointer, mir->spOffset());
+ if (IsConstant(ins->arg()))
+ masm.store64(Imm64(ToInt64(ins->arg())), dst);
+ else
+ masm.store64(ToRegister64(ins->arg()), dst);
+}
+
+void
+CodeGeneratorX86Shared::visitWasmSelect(LWasmSelect* ins)
+{
+ MIRType mirType = ins->mir()->type();
+
+ Register cond = ToRegister(ins->condExpr());
+ Operand falseExpr = ToOperand(ins->falseExpr());
+
+ masm.test32(cond, cond);
+
+ if (mirType == MIRType::Int32) {
+ Register out = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->trueExpr()) == out, "true expr input is reused for output");
+ masm.cmovz(falseExpr, out);
+ return;
+ }
+
+ FloatRegister out = ToFloatRegister(ins->output());
+ MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out, "true expr input is reused for output");
+
+ Label done;
+ masm.j(Assembler::NonZero, &done);
+
+ if (mirType == MIRType::Float32) {
+ if (falseExpr.kind() == Operand::FPREG)
+ masm.moveFloat32(ToFloatRegister(ins->falseExpr()), out);
+ else
+ masm.loadFloat32(falseExpr, out);
+ } else if (mirType == MIRType::Double) {
+ if (falseExpr.kind() == Operand::FPREG)
+ masm.moveDouble(ToFloatRegister(ins->falseExpr()), out);
+ else
+ masm.loadDouble(falseExpr, out);
+ } else {
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+ }
+
+ masm.bind(&done);
+ return;
+}
+
+void
+CodeGeneratorX86Shared::visitWasmReinterpret(LWasmReinterpret* lir)
+{
+ MOZ_ASSERT(gen->compilingWasm());
+ MWasmReinterpret* ins = lir->mir();
+
+ MIRType to = ins->type();
+#ifdef DEBUG
+ MIRType from = ins->input()->type();
+#endif
+
+ switch (to) {
+ case MIRType::Int32:
+ MOZ_ASSERT(from == MIRType::Float32);
+ masm.vmovd(ToFloatRegister(lir->input()), ToRegister(lir->output()));
+ break;
+ case MIRType::Float32:
+ MOZ_ASSERT(from == MIRType::Int32);
+ masm.vmovd(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+ break;
+ case MIRType::Double:
+ case MIRType::Int64:
+ MOZ_CRASH("not handled by this LIR opcode");
+ default:
+ MOZ_CRASH("unexpected WasmReinterpret");
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitOutOfLineLoadTypedArrayOutOfBounds(OutOfLineLoadTypedArrayOutOfBounds* ool)
+{
+ switch (ool->viewType()) {
+ case Scalar::Int64:
+ case Scalar::Float32x4:
+ case Scalar::Int8x16:
+ case Scalar::Int16x8:
+ case Scalar::Int32x4:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ case Scalar::Float32:
+ masm.loadConstantFloat32(float(GenericNaN()), ool->dest().fpu());
+ break;
+ case Scalar::Float64:
+ masm.loadConstantDouble(GenericNaN(), ool->dest().fpu());
+ break;
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Uint8Clamped:
+ Register destReg = ool->dest().gpr();
+ masm.mov(ImmWord(0), destReg);
+ break;
+ }
+ masm.jmp(ool->rejoin());
+}
+
+void
+CodeGeneratorX86Shared::visitWasmAddOffset(LWasmAddOffset* lir)
+{
+ MWasmAddOffset* mir = lir->mir();
+ Register base = ToRegister(lir->base());
+ Register out = ToRegister(lir->output());
+
+ if (base != out)
+ masm.move32(base, out);
+ masm.add32(Imm32(mir->offset()), out);
+
+ masm.j(Assembler::CarrySet, trap(mir, wasm::Trap::OutOfBounds));
+}
+
+void
+CodeGeneratorX86Shared::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ MWasmTruncateToInt32* mir = lir->mir();
+ MIRType inputType = mir->input()->type();
+
+ MOZ_ASSERT(inputType == MIRType::Double || inputType == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input);
+ addOutOfLineCode(ool, mir);
+
+ Label* oolEntry = ool->entry();
+ if (mir->isUnsigned()) {
+ if (inputType == MIRType::Double)
+ masm.wasmTruncateDoubleToUInt32(input, output, oolEntry);
+ else if (inputType == MIRType::Float32)
+ masm.wasmTruncateFloat32ToUInt32(input, output, oolEntry);
+ else
+ MOZ_CRASH("unexpected type");
+ return;
+ }
+
+ if (inputType == MIRType::Double)
+ masm.wasmTruncateDoubleToInt32(input, output, oolEntry);
+ else if (inputType == MIRType::Float32)
+ masm.wasmTruncateFloat32ToInt32(input, output, oolEntry);
+ else
+ MOZ_CRASH("unexpected type");
+
+ masm.bind(ool->rejoin());
+}
+
+bool
+CodeGeneratorX86Shared::generateOutOfLineCode()
+{
+ if (!CodeGeneratorShared::generateOutOfLineCode())
+ return false;
+
+ if (deoptLabel_.used()) {
+ // All non-table-based bailouts will go here.
+ masm.bind(&deoptLabel_);
+
+ // Push the frame size, so the handler can recover the IonScript.
+ masm.push(Imm32(frameSize()));
+
+ JitCode* handler = gen->jitRuntime()->getGenericBailoutHandler();
+ masm.jmp(ImmPtr(handler->raw()), Relocation::JITCODE);
+ }
+
+ return !masm.oom();
+}
+
+class BailoutJump {
+ Assembler::Condition cond_;
+
+ public:
+ explicit BailoutJump(Assembler::Condition cond) : cond_(cond)
+ { }
+#ifdef JS_CODEGEN_X86
+ void operator()(MacroAssembler& masm, uint8_t* code) const {
+ masm.j(cond_, ImmPtr(code), Relocation::HARDCODED);
+ }
+#endif
+ void operator()(MacroAssembler& masm, Label* label) const {
+ masm.j(cond_, label);
+ }
+};
+
+class BailoutLabel {
+ Label* label_;
+
+ public:
+ explicit BailoutLabel(Label* label) : label_(label)
+ { }
+#ifdef JS_CODEGEN_X86
+ void operator()(MacroAssembler& masm, uint8_t* code) const {
+ masm.retarget(label_, ImmPtr(code), Relocation::HARDCODED);
+ }
+#endif
+ void operator()(MacroAssembler& masm, Label* label) const {
+ masm.retarget(label_, label);
+ }
+};
+
+template <typename T> void
+CodeGeneratorX86Shared::bailout(const T& binder, LSnapshot* snapshot)
+{
+ encode(snapshot);
+
+ // Though the assembler doesn't track all frame pushes, at least make sure
+ // the known value makes sense. We can't use bailout tables if the stack
+ // isn't properly aligned to the static frame size.
+ MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None() && deoptTable_,
+ frameClass_.frameSize() == masm.framePushed());
+
+#ifdef JS_CODEGEN_X86
+ // On x64, bailout tables are pointless, because 16 extra bytes are
+ // reserved per external jump, whereas it takes only 10 bytes to encode a
+ // a non-table based bailout.
+ if (assignBailoutId(snapshot)) {
+ binder(masm, deoptTable_->raw() + snapshot->bailoutId() * BAILOUT_TABLE_ENTRY_SIZE);
+ return;
+ }
+#endif
+
+ // We could not use a jump table, either because all bailout IDs were
+ // reserved, or a jump table is not optimal for this frame size or
+ // platform. Whatever, we will generate a lazy bailout.
+ //
+ // All bailout code is associated with the bytecodeSite of the block we are
+ // bailing out from.
+ InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
+ OutOfLineBailout* ool = new(alloc()) OutOfLineBailout(snapshot);
+ addOutOfLineCode(ool, new(alloc()) BytecodeSite(tree, tree->script()->code()));
+
+ binder(masm, ool->entry());
+}
+
+void
+CodeGeneratorX86Shared::bailoutIf(Assembler::Condition condition, LSnapshot* snapshot)
+{
+ bailout(BailoutJump(condition), snapshot);
+}
+
+void
+CodeGeneratorX86Shared::bailoutIf(Assembler::DoubleCondition condition, LSnapshot* snapshot)
+{
+ MOZ_ASSERT(Assembler::NaNCondFromDoubleCondition(condition) == Assembler::NaN_HandledByCond);
+ bailoutIf(Assembler::ConditionFromDoubleCondition(condition), snapshot);
+}
+
+void
+CodeGeneratorX86Shared::bailoutFrom(Label* label, LSnapshot* snapshot)
+{
+ MOZ_ASSERT(label->used() && !label->bound());
+ bailout(BailoutLabel(label), snapshot);
+}
+
+void
+CodeGeneratorX86Shared::bailout(LSnapshot* snapshot)
+{
+ Label label;
+ masm.jump(&label);
+ bailoutFrom(&label, snapshot);
+}
+
+void
+CodeGeneratorX86Shared::visitOutOfLineBailout(OutOfLineBailout* ool)
+{
+ masm.push(Imm32(ool->snapshot()->snapshotOffset()));
+ masm.jmp(&deoptLabel_);
+}
+
+void
+CodeGeneratorX86Shared::visitMinMaxD(LMinMaxD* ins)
+{
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+#ifdef DEBUG
+ FloatRegister output = ToFloatRegister(ins->output());
+ MOZ_ASSERT(first == output);
+#endif
+
+ bool handleNaN = !ins->mir()->range() || ins->mir()->range()->canBeNaN();
+
+ if (ins->mir()->isMax())
+ masm.maxDouble(second, first, handleNaN);
+ else
+ masm.minDouble(second, first, handleNaN);
+}
+
+void
+CodeGeneratorX86Shared::visitMinMaxF(LMinMaxF* ins)
+{
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+#ifdef DEBUG
+ FloatRegister output = ToFloatRegister(ins->output());
+ MOZ_ASSERT(first == output);
+#endif
+
+ bool handleNaN = !ins->mir()->range() || ins->mir()->range()->canBeNaN();
+
+ if (ins->mir()->isMax())
+ masm.maxFloat32(second, first, handleNaN);
+ else
+ masm.minFloat32(second, first, handleNaN);
+}
+
+void
+CodeGeneratorX86Shared::visitAbsD(LAbsD* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ MOZ_ASSERT(input == ToFloatRegister(ins->output()));
+ // Load a value which is all ones except for the sign bit.
+ ScratchDoubleScope scratch(masm);
+ masm.loadConstantDouble(SpecificNaN<double>(0, FloatingPoint<double>::kSignificandBits), scratch);
+ masm.vandpd(scratch, input, input);
+}
+
+void
+CodeGeneratorX86Shared::visitAbsF(LAbsF* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ MOZ_ASSERT(input == ToFloatRegister(ins->output()));
+ // Same trick as visitAbsD above.
+ ScratchFloat32Scope scratch(masm);
+ masm.loadConstantFloat32(SpecificNaN<float>(0, FloatingPoint<float>::kSignificandBits), scratch);
+ masm.vandps(scratch, input, input);
+}
+
+void
+CodeGeneratorX86Shared::visitClzI(LClzI* ins)
+{
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+ bool knownNotZero = ins->mir()->operandIsNeverZero();
+
+ masm.clz32(input, output, knownNotZero);
+}
+
+void
+CodeGeneratorX86Shared::visitCtzI(LCtzI* ins)
+{
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+ bool knownNotZero = ins->mir()->operandIsNeverZero();
+
+ masm.ctz32(input, output, knownNotZero);
+}
+
+void
+CodeGeneratorX86Shared::visitPopcntI(LPopcntI* ins)
+{
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+ Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
+
+ masm.popcnt32(input, output, temp);
+}
+
+void
+CodeGeneratorX86Shared::visitSqrtD(LSqrtD* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ masm.vsqrtsd(input, output, output);
+}
+
+void
+CodeGeneratorX86Shared::visitSqrtF(LSqrtF* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ masm.vsqrtss(input, output, output);
+}
+
+void
+CodeGeneratorX86Shared::visitPowHalfD(LPowHalfD* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ ScratchDoubleScope scratch(masm);
+
+ Label done, sqrt;
+
+ if (!ins->mir()->operandIsNeverNegativeInfinity()) {
+ // Branch if not -Infinity.
+ masm.loadConstantDouble(NegativeInfinity<double>(), scratch);
+
+ Assembler::DoubleCondition cond = Assembler::DoubleNotEqualOrUnordered;
+ if (ins->mir()->operandIsNeverNaN())
+ cond = Assembler::DoubleNotEqual;
+ masm.branchDouble(cond, input, scratch, &sqrt);
+
+ // Math.pow(-Infinity, 0.5) == Infinity.
+ masm.zeroDouble(output);
+ masm.subDouble(scratch, output);
+ masm.jump(&done);
+
+ masm.bind(&sqrt);
+ }
+
+ if (!ins->mir()->operandIsNeverNegativeZero()) {
+ // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5). Adding 0 converts any -0 to 0.
+ masm.zeroDouble(scratch);
+ masm.addDouble(input, scratch);
+ masm.vsqrtsd(scratch, output, output);
+ } else {
+ masm.vsqrtsd(input, output, output);
+ }
+
+ masm.bind(&done);
+}
+
+class OutOfLineUndoALUOperation : public OutOfLineCodeBase<CodeGeneratorX86Shared>
+{
+ LInstruction* ins_;
+
+ public:
+ explicit OutOfLineUndoALUOperation(LInstruction* ins)
+ : ins_(ins)
+ { }
+
+ virtual void accept(CodeGeneratorX86Shared* codegen) {
+ codegen->visitOutOfLineUndoALUOperation(this);
+ }
+ LInstruction* ins() const {
+ return ins_;
+ }
+};
+
+void
+CodeGeneratorX86Shared::visitAddI(LAddI* ins)
+{
+ if (ins->rhs()->isConstant())
+ masm.addl(Imm32(ToInt32(ins->rhs())), ToOperand(ins->lhs()));
+ else
+ masm.addl(ToOperand(ins->rhs()), ToRegister(ins->lhs()));
+
+ if (ins->snapshot()) {
+ if (ins->recoversInput()) {
+ OutOfLineUndoALUOperation* ool = new(alloc()) OutOfLineUndoALUOperation(ins);
+ addOutOfLineCode(ool, ins->mir());
+ masm.j(Assembler::Overflow, ool->entry());
+ } else {
+ bailoutIf(Assembler::Overflow, ins->snapshot());
+ }
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitAddI64(LAddI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void
+CodeGeneratorX86Shared::visitSubI(LSubI* ins)
+{
+ if (ins->rhs()->isConstant())
+ masm.subl(Imm32(ToInt32(ins->rhs())), ToOperand(ins->lhs()));
+ else
+ masm.subl(ToOperand(ins->rhs()), ToRegister(ins->lhs()));
+
+ if (ins->snapshot()) {
+ if (ins->recoversInput()) {
+ OutOfLineUndoALUOperation* ool = new(alloc()) OutOfLineUndoALUOperation(ins);
+ addOutOfLineCode(ool, ins->mir());
+ masm.j(Assembler::Overflow, ool->entry());
+ } else {
+ bailoutIf(Assembler::Overflow, ins->snapshot());
+ }
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitSubI64(LSubI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LSubI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.sub64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void
+CodeGeneratorX86Shared::visitOutOfLineUndoALUOperation(OutOfLineUndoALUOperation* ool)
+{
+ LInstruction* ins = ool->ins();
+ Register reg = ToRegister(ins->getDef(0));
+
+ DebugOnly<LAllocation*> lhs = ins->getOperand(0);
+ LAllocation* rhs = ins->getOperand(1);
+
+ MOZ_ASSERT(reg == ToRegister(lhs));
+ MOZ_ASSERT_IF(rhs->isGeneralReg(), reg != ToRegister(rhs));
+
+ // Undo the effect of the ALU operation, which was performed on the output
+ // register and overflowed. Writing to the output register clobbered an
+ // input reg, and the original value of the input needs to be recovered
+ // to satisfy the constraint imposed by any RECOVERED_INPUT operands to
+ // the bailout snapshot.
+
+ if (rhs->isConstant()) {
+ Imm32 constant(ToInt32(rhs));
+ if (ins->isAddI())
+ masm.subl(constant, reg);
+ else
+ masm.addl(constant, reg);
+ } else {
+ if (ins->isAddI())
+ masm.subl(ToOperand(rhs), reg);
+ else
+ masm.addl(ToOperand(rhs), reg);
+ }
+
+ bailout(ool->ins()->snapshot());
+}
+
+class MulNegativeZeroCheck : public OutOfLineCodeBase<CodeGeneratorX86Shared>
+{
+ LMulI* ins_;
+
+ public:
+ explicit MulNegativeZeroCheck(LMulI* ins)
+ : ins_(ins)
+ { }
+
+ virtual void accept(CodeGeneratorX86Shared* codegen) {
+ codegen->visitMulNegativeZeroCheck(this);
+ }
+ LMulI* ins() const {
+ return ins_;
+ }
+};
+
+void
+CodeGeneratorX86Shared::visitMulI(LMulI* ins)
+{
+ const LAllocation* lhs = ins->lhs();
+ const LAllocation* rhs = ins->rhs();
+ MMul* mul = ins->mir();
+ MOZ_ASSERT_IF(mul->mode() == MMul::Integer, !mul->canBeNegativeZero() && !mul->canOverflow());
+
+ if (rhs->isConstant()) {
+ // Bailout on -0.0
+ int32_t constant = ToInt32(rhs);
+ if (mul->canBeNegativeZero() && constant <= 0) {
+ Assembler::Condition bailoutCond = (constant == 0) ? Assembler::Signed : Assembler::Equal;
+ masm.test32(ToRegister(lhs), ToRegister(lhs));
+ bailoutIf(bailoutCond, ins->snapshot());
+ }
+
+ switch (constant) {
+ case -1:
+ masm.negl(ToOperand(lhs));
+ break;
+ case 0:
+ masm.xorl(ToOperand(lhs), ToRegister(lhs));
+ return; // escape overflow check;
+ case 1:
+ // nop
+ return; // escape overflow check;
+ case 2:
+ masm.addl(ToOperand(lhs), ToRegister(lhs));
+ break;
+ default:
+ if (!mul->canOverflow() && constant > 0) {
+ // Use shift if cannot overflow and constant is power of 2
+ int32_t shift = FloorLog2(constant);
+ if ((1 << shift) == constant) {
+ masm.shll(Imm32(shift), ToRegister(lhs));
+ return;
+ }
+ }
+ masm.imull(Imm32(ToInt32(rhs)), ToRegister(lhs));
+ }
+
+ // Bailout on overflow
+ if (mul->canOverflow())
+ bailoutIf(Assembler::Overflow, ins->snapshot());
+ } else {
+ masm.imull(ToOperand(rhs), ToRegister(lhs));
+
+ // Bailout on overflow
+ if (mul->canOverflow())
+ bailoutIf(Assembler::Overflow, ins->snapshot());
+
+ if (mul->canBeNegativeZero()) {
+ // Jump to an OOL path if the result is 0.
+ MulNegativeZeroCheck* ool = new(alloc()) MulNegativeZeroCheck(ins);
+ addOutOfLineCode(ool, mul);
+
+ masm.test32(ToRegister(lhs), ToRegister(lhs));
+ masm.j(Assembler::Zero, ool->entry());
+ masm.bind(ool->rejoin());
+ }
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitMulI64(LMulI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
+
+ MOZ_ASSERT(ToRegister64(lhs) == ToOutRegister64(lir));
+
+ if (IsConstant(rhs)) {
+ int64_t constant = ToInt64(rhs);
+ switch (constant) {
+ case -1:
+ masm.neg64(ToRegister64(lhs));
+ return;
+ case 0:
+ masm.xor64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ case 1:
+ // nop
+ return;
+ case 2:
+ masm.add64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ default:
+ if (constant > 0) {
+ // Use shift if constant is power of 2.
+ int32_t shift = mozilla::FloorLog2(constant);
+ if (int64_t(1) << shift == constant) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ return;
+ }
+ }
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
+ }
+ } else {
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(ToOperandOrRegister64(rhs), ToRegister64(lhs), temp);
+ }
+}
+
+class ReturnZero : public OutOfLineCodeBase<CodeGeneratorX86Shared>
+{
+ Register reg_;
+
+ public:
+ explicit ReturnZero(Register reg)
+ : reg_(reg)
+ { }
+
+ virtual void accept(CodeGeneratorX86Shared* codegen) {
+ codegen->visitReturnZero(this);
+ }
+ Register reg() const {
+ return reg_;
+ }
+};
+
+void
+CodeGeneratorX86Shared::visitReturnZero(ReturnZero* ool)
+{
+ masm.mov(ImmWord(0), ool->reg());
+ masm.jmp(ool->rejoin());
+}
+
+void
+CodeGeneratorX86Shared::visitUDivOrMod(LUDivOrMod* ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+
+ MOZ_ASSERT_IF(lhs != rhs, rhs != eax);
+ MOZ_ASSERT(rhs != edx);
+ MOZ_ASSERT_IF(output == eax, ToRegister(ins->remainder()) == edx);
+
+ ReturnZero* ool = nullptr;
+
+ // Put the lhs in eax.
+ if (lhs != eax)
+ masm.mov(lhs, eax);
+
+ // Prevent divide by zero.
+ if (ins->canBeDivideByZero()) {
+ masm.test32(rhs, rhs);
+ if (ins->mir()->isTruncated()) {
+ if (ins->trapOnError()) {
+ masm.j(Assembler::Zero, trap(ins, wasm::Trap::IntegerDivideByZero));
+ } else {
+ ool = new(alloc()) ReturnZero(output);
+ masm.j(Assembler::Zero, ool->entry());
+ }
+ } else {
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ }
+ }
+
+ // Zero extend the lhs into edx to make (edx:eax), since udiv is 64-bit.
+ masm.mov(ImmWord(0), edx);
+ masm.udiv(rhs);
+
+ // If the remainder is > 0, bailout since this must be a double.
+ if (ins->mir()->isDiv() && !ins->mir()->toDiv()->canTruncateRemainder()) {
+ Register remainder = ToRegister(ins->remainder());
+ masm.test32(remainder, remainder);
+ bailoutIf(Assembler::NonZero, ins->snapshot());
+ }
+
+ // Unsigned div or mod can return a value that's not a signed int32.
+ // If our users aren't expecting that, bail.
+ if (!ins->mir()->isTruncated()) {
+ masm.test32(output, output);
+ bailoutIf(Assembler::Signed, ins->snapshot());
+ }
+
+ if (ool) {
+ addOutOfLineCode(ool, ins->mir());
+ masm.bind(ool->rejoin());
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitUDivOrModConstant(LUDivOrModConstant *ins) {
+ Register lhs = ToRegister(ins->numerator());
+ Register output = ToRegister(ins->output());
+ uint32_t d = ins->denominator();
+
+ // This emits the division answer into edx or the modulus answer into eax.
+ MOZ_ASSERT(output == eax || output == edx);
+ MOZ_ASSERT(lhs != eax && lhs != edx);
+ bool isDiv = (output == edx);
+
+ if (d == 0) {
+ if (ins->mir()->isTruncated()) {
+ if (ins->trapOnError())
+ masm.jump(trap(ins, wasm::Trap::IntegerDivideByZero));
+ else
+ masm.xorl(output, output);
+ } else {
+ bailout(ins->snapshot());
+ }
+ return;
+ }
+
+ // The denominator isn't a power of 2 (see LDivPowTwoI and LModPowTwoI).
+ MOZ_ASSERT((d & (d - 1)) != 0);
+
+ ReciprocalMulConstants rmc = computeDivisionConstants(d, /* maxLog = */ 32);
+
+ // We first compute (M * n) >> 32, where M = rmc.multiplier.
+ masm.movl(Imm32(rmc.multiplier), eax);
+ masm.umull(lhs);
+ if (rmc.multiplier > UINT32_MAX) {
+ // M >= 2^32 and shift == 0 is impossible, as d >= 2 implies that
+ // ((M * n) >> (32 + shift)) >= n > floor(n/d) whenever n >= d, contradicting
+ // the proof of correctness in computeDivisionConstants.
+ MOZ_ASSERT(rmc.shiftAmount > 0);
+ MOZ_ASSERT(rmc.multiplier < (int64_t(1) << 33));
+
+ // We actually computed edx = ((uint32_t(M) * n) >> 32) instead. Since
+ // (M * n) >> (32 + shift) is the same as (edx + n) >> shift, we can
+ // correct for the overflow. This case is a bit trickier than the signed
+ // case, though, as the (edx + n) addition itself can overflow; however,
+ // note that (edx + n) >> shift == (((n - edx) >> 1) + edx) >> (shift - 1),
+ // which is overflow-free. See Hacker's Delight, section 10-8 for details.
+
+ // Compute (n - edx) >> 1 into eax.
+ masm.movl(lhs, eax);
+ masm.subl(edx, eax);
+ masm.shrl(Imm32(1), eax);
+
+ // Finish the computation.
+ masm.addl(eax, edx);
+ masm.shrl(Imm32(rmc.shiftAmount - 1), edx);
+ } else {
+ masm.shrl(Imm32(rmc.shiftAmount), edx);
+ }
+
+ // We now have the truncated division value in edx. If we're
+ // computing a modulus or checking whether the division resulted
+ // in an integer, we need to multiply the obtained value by d and
+ // finish the computation/check.
+ if (!isDiv) {
+ masm.imull(Imm32(d), edx, edx);
+ masm.movl(lhs, eax);
+ masm.subl(edx, eax);
+
+ // The final result of the modulus op, just computed above by the
+ // sub instruction, can be a number in the range [2^31, 2^32). If
+ // this is the case and the modulus is not truncated, we must bail
+ // out.
+ if (!ins->mir()->isTruncated())
+ bailoutIf(Assembler::Signed, ins->snapshot());
+ } else if (!ins->mir()->isTruncated()) {
+ masm.imull(Imm32(d), edx, eax);
+ masm.cmpl(lhs, eax);
+ bailoutIf(Assembler::NotEqual, ins->snapshot());
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitMulNegativeZeroCheck(MulNegativeZeroCheck* ool)
+{
+ LMulI* ins = ool->ins();
+ Register result = ToRegister(ins->output());
+ Operand lhsCopy = ToOperand(ins->lhsCopy());
+ Operand rhs = ToOperand(ins->rhs());
+ MOZ_ASSERT_IF(lhsCopy.kind() == Operand::REG, lhsCopy.reg() != result.code());
+
+ // Result is -0 if lhs or rhs is negative.
+ masm.movl(lhsCopy, result);
+ masm.orl(rhs, result);
+ bailoutIf(Assembler::Signed, ins->snapshot());
+
+ masm.mov(ImmWord(0), result);
+ masm.jmp(ool->rejoin());
+}
+
+void
+CodeGeneratorX86Shared::visitDivPowTwoI(LDivPowTwoI* ins)
+{
+ Register lhs = ToRegister(ins->numerator());
+ DebugOnly<Register> output = ToRegister(ins->output());
+
+ int32_t shift = ins->shift();
+ bool negativeDivisor = ins->negativeDivisor();
+ MDiv* mir = ins->mir();
+
+ // We use defineReuseInput so these should always be the same, which is
+ // convenient since all of our instructions here are two-address.
+ MOZ_ASSERT(lhs == output);
+
+ if (!mir->isTruncated() && negativeDivisor) {
+ // 0 divided by a negative number must return a double.
+ masm.test32(lhs, lhs);
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ }
+
+ if (shift) {
+ if (!mir->isTruncated()) {
+ // If the remainder is != 0, bailout since this must be a double.
+ masm.test32(lhs, Imm32(UINT32_MAX >> (32 - shift)));
+ bailoutIf(Assembler::NonZero, ins->snapshot());
+ }
+
+ if (mir->isUnsigned()) {
+ masm.shrl(Imm32(shift), lhs);
+ } else {
+ // Adjust the value so that shifting produces a correctly
+ // rounded result when the numerator is negative. See 10-1
+ // "Signed Division by a Known Power of 2" in Henry
+ // S. Warren, Jr.'s Hacker's Delight.
+ if (mir->canBeNegativeDividend()) {
+ Register lhsCopy = ToRegister(ins->numeratorCopy());
+ MOZ_ASSERT(lhsCopy != lhs);
+ if (shift > 1)
+ masm.sarl(Imm32(31), lhs);
+ masm.shrl(Imm32(32 - shift), lhs);
+ masm.addl(lhsCopy, lhs);
+ }
+ masm.sarl(Imm32(shift), lhs);
+
+ if (negativeDivisor)
+ masm.negl(lhs);
+ }
+ return;
+ }
+
+ if (negativeDivisor) {
+ // INT32_MIN / -1 overflows.
+ masm.negl(lhs);
+ if (!mir->isTruncated())
+ bailoutIf(Assembler::Overflow, ins->snapshot());
+ else if (mir->trapOnError())
+ masm.j(Assembler::Overflow, trap(mir, wasm::Trap::IntegerOverflow));
+ } else if (mir->isUnsigned() && !mir->isTruncated()) {
+ // Unsigned division by 1 can overflow if output is not
+ // truncated.
+ masm.test32(lhs, lhs);
+ bailoutIf(Assembler::Signed, ins->snapshot());
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitDivOrModConstantI(LDivOrModConstantI* ins) {
+ Register lhs = ToRegister(ins->numerator());
+ Register output = ToRegister(ins->output());
+ int32_t d = ins->denominator();
+
+ // This emits the division answer into edx or the modulus answer into eax.
+ MOZ_ASSERT(output == eax || output == edx);
+ MOZ_ASSERT(lhs != eax && lhs != edx);
+ bool isDiv = (output == edx);
+
+ // The absolute value of the denominator isn't a power of 2 (see LDivPowTwoI
+ // and LModPowTwoI).
+ MOZ_ASSERT((Abs(d) & (Abs(d) - 1)) != 0);
+
+ // We will first divide by Abs(d), and negate the answer if d is negative.
+ // If desired, this can be avoided by generalizing computeDivisionConstants.
+ ReciprocalMulConstants rmc = computeDivisionConstants(Abs(d), /* maxLog = */ 31);
+
+ // We first compute (M * n) >> 32, where M = rmc.multiplier.
+ masm.movl(Imm32(rmc.multiplier), eax);
+ masm.imull(lhs);
+ if (rmc.multiplier > INT32_MAX) {
+ MOZ_ASSERT(rmc.multiplier < (int64_t(1) << 32));
+
+ // We actually computed edx = ((int32_t(M) * n) >> 32) instead. Since
+ // (M * n) >> 32 is the same as (edx + n), we can correct for the overflow.
+ // (edx + n) can't overflow, as n and edx have opposite signs because int32_t(M)
+ // is negative.
+ masm.addl(lhs, edx);
+ }
+ // (M * n) >> (32 + shift) is the truncated division answer if n is non-negative,
+ // as proved in the comments of computeDivisionConstants. We must add 1 later if n is
+ // negative to get the right answer in all cases.
+ masm.sarl(Imm32(rmc.shiftAmount), edx);
+
+ // We'll subtract -1 instead of adding 1, because (n < 0 ? -1 : 0) can be
+ // computed with just a sign-extending shift of 31 bits.
+ if (ins->canBeNegativeDividend()) {
+ masm.movl(lhs, eax);
+ masm.sarl(Imm32(31), eax);
+ masm.subl(eax, edx);
+ }
+
+ // After this, edx contains the correct truncated division result.
+ if (d < 0)
+ masm.negl(edx);
+
+ if (!isDiv) {
+ masm.imull(Imm32(-d), edx, eax);
+ masm.addl(lhs, eax);
+ }
+
+ if (!ins->mir()->isTruncated()) {
+ if (isDiv) {
+ // This is a division op. Multiply the obtained value by d to check if
+ // the correct answer is an integer. This cannot overflow, since |d| > 1.
+ masm.imull(Imm32(d), edx, eax);
+ masm.cmp32(lhs, eax);
+ bailoutIf(Assembler::NotEqual, ins->snapshot());
+
+ // If lhs is zero and the divisor is negative, the answer should have
+ // been -0.
+ if (d < 0) {
+ masm.test32(lhs, lhs);
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ }
+ } else if (ins->canBeNegativeDividend()) {
+ // This is a mod op. If the computed value is zero and lhs
+ // is negative, the answer should have been -0.
+ Label done;
+
+ masm.cmp32(lhs, Imm32(0));
+ masm.j(Assembler::GreaterThanOrEqual, &done);
+
+ masm.test32(eax, eax);
+ bailoutIf(Assembler::Zero, ins->snapshot());
+
+ masm.bind(&done);
+ }
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitDivI(LDivI* ins)
+{
+ Register remainder = ToRegister(ins->remainder());
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+
+ MDiv* mir = ins->mir();
+
+ MOZ_ASSERT_IF(lhs != rhs, rhs != eax);
+ MOZ_ASSERT(rhs != edx);
+ MOZ_ASSERT(remainder == edx);
+ MOZ_ASSERT(output == eax);
+
+ Label done;
+ ReturnZero* ool = nullptr;
+
+ // Put the lhs in eax, for either the negative overflow case or the regular
+ // divide case.
+ if (lhs != eax)
+ masm.mov(lhs, eax);
+
+ // Handle divide by zero.
+ if (mir->canBeDivideByZero()) {
+ masm.test32(rhs, rhs);
+ if (mir->trapOnError()) {
+ masm.j(Assembler::Zero, trap(mir, wasm::Trap::IntegerDivideByZero));
+ } else if (mir->canTruncateInfinities()) {
+ // Truncated division by zero is zero (Infinity|0 == 0)
+ if (!ool)
+ ool = new(alloc()) ReturnZero(output);
+ masm.j(Assembler::Zero, ool->entry());
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ }
+ }
+
+ // Handle an integer overflow exception from -2147483648 / -1.
+ if (mir->canBeNegativeOverflow()) {
+ Label notmin;
+ masm.cmp32(lhs, Imm32(INT32_MIN));
+ masm.j(Assembler::NotEqual, &notmin);
+ masm.cmp32(rhs, Imm32(-1));
+ if (mir->trapOnError()) {
+ masm.j(Assembler::Equal, trap(mir, wasm::Trap::IntegerOverflow));
+ } else if (mir->canTruncateOverflow()) {
+ // (-INT32_MIN)|0 == INT32_MIN and INT32_MIN is already in the
+ // output register (lhs == eax).
+ masm.j(Assembler::Equal, &done);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::Equal, ins->snapshot());
+ }
+ masm.bind(&notmin);
+ }
+
+ // Handle negative 0.
+ if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
+ Label nonzero;
+ masm.test32(lhs, lhs);
+ masm.j(Assembler::NonZero, &nonzero);
+ masm.cmp32(rhs, Imm32(0));
+ bailoutIf(Assembler::LessThan, ins->snapshot());
+ masm.bind(&nonzero);
+ }
+
+ // Sign extend the lhs into edx to make (edx:eax), since idiv is 64-bit.
+ if (lhs != eax)
+ masm.mov(lhs, eax);
+ masm.cdq();
+ masm.idiv(rhs);
+
+ if (!mir->canTruncateRemainder()) {
+ // If the remainder is > 0, bailout since this must be a double.
+ masm.test32(remainder, remainder);
+ bailoutIf(Assembler::NonZero, ins->snapshot());
+ }
+
+ masm.bind(&done);
+
+ if (ool) {
+ addOutOfLineCode(ool, mir);
+ masm.bind(ool->rejoin());
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitModPowTwoI(LModPowTwoI* ins)
+{
+ Register lhs = ToRegister(ins->getOperand(0));
+ int32_t shift = ins->shift();
+
+ Label negative;
+
+ if (!ins->mir()->isUnsigned() && ins->mir()->canBeNegativeDividend()) {
+ // Switch based on sign of the lhs.
+ // Positive numbers are just a bitmask
+ masm.branchTest32(Assembler::Signed, lhs, lhs, &negative);
+ }
+
+ masm.andl(Imm32((uint32_t(1) << shift) - 1), lhs);
+
+ if (!ins->mir()->isUnsigned() && ins->mir()->canBeNegativeDividend()) {
+ Label done;
+ masm.jump(&done);
+
+ // Negative numbers need a negate, bitmask, negate
+ masm.bind(&negative);
+
+ // Unlike in the visitModI case, we are not computing the mod by means of a
+ // division. Therefore, the divisor = -1 case isn't problematic (the andl
+ // always returns 0, which is what we expect).
+ //
+ // The negl instruction overflows if lhs == INT32_MIN, but this is also not
+ // a problem: shift is at most 31, and so the andl also always returns 0.
+ masm.negl(lhs);
+ masm.andl(Imm32((uint32_t(1) << shift) - 1), lhs);
+ masm.negl(lhs);
+
+ // Since a%b has the same sign as b, and a is negative in this branch,
+ // an answer of 0 means the correct result is actually -0. Bail out.
+ if (!ins->mir()->isTruncated())
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ masm.bind(&done);
+ }
+}
+
+class ModOverflowCheck : public OutOfLineCodeBase<CodeGeneratorX86Shared>
+{
+ Label done_;
+ LModI* ins_;
+ Register rhs_;
+
+ public:
+ explicit ModOverflowCheck(LModI* ins, Register rhs)
+ : ins_(ins), rhs_(rhs)
+ { }
+
+ virtual void accept(CodeGeneratorX86Shared* codegen) {
+ codegen->visitModOverflowCheck(this);
+ }
+ Label* done() {
+ return &done_;
+ }
+ LModI* ins() const {
+ return ins_;
+ }
+ Register rhs() const {
+ return rhs_;
+ }
+};
+
+void
+CodeGeneratorX86Shared::visitModOverflowCheck(ModOverflowCheck* ool)
+{
+ masm.cmp32(ool->rhs(), Imm32(-1));
+ if (ool->ins()->mir()->isTruncated()) {
+ masm.j(Assembler::NotEqual, ool->rejoin());
+ masm.mov(ImmWord(0), edx);
+ masm.jmp(ool->done());
+ } else {
+ bailoutIf(Assembler::Equal, ool->ins()->snapshot());
+ masm.jmp(ool->rejoin());
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitModI(LModI* ins)
+{
+ Register remainder = ToRegister(ins->remainder());
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+
+ // Required to use idiv.
+ MOZ_ASSERT_IF(lhs != rhs, rhs != eax);
+ MOZ_ASSERT(rhs != edx);
+ MOZ_ASSERT(remainder == edx);
+ MOZ_ASSERT(ToRegister(ins->getTemp(0)) == eax);
+
+ Label done;
+ ReturnZero* ool = nullptr;
+ ModOverflowCheck* overflow = nullptr;
+
+ // Set up eax in preparation for doing a div.
+ if (lhs != eax)
+ masm.mov(lhs, eax);
+
+ MMod* mir = ins->mir();
+
+ // Prevent divide by zero.
+ if (mir->canBeDivideByZero()) {
+ masm.test32(rhs, rhs);
+ if (mir->isTruncated()) {
+ if (mir->trapOnError()) {
+ masm.j(Assembler::Zero, trap(mir, wasm::Trap::IntegerDivideByZero));
+ } else {
+ if (!ool)
+ ool = new(alloc()) ReturnZero(edx);
+ masm.j(Assembler::Zero, ool->entry());
+ }
+ } else {
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ }
+ }
+
+ Label negative;
+
+ // Switch based on sign of the lhs.
+ if (mir->canBeNegativeDividend())
+ masm.branchTest32(Assembler::Signed, lhs, lhs, &negative);
+
+ // If lhs >= 0 then remainder = lhs % rhs. The remainder must be positive.
+ {
+ // Check if rhs is a power-of-two.
+ if (mir->canBePowerOfTwoDivisor()) {
+ MOZ_ASSERT(rhs != remainder);
+
+ // Rhs y is a power-of-two if (y & (y-1)) == 0. Note that if
+ // y is any negative number other than INT32_MIN, both y and
+ // y-1 will have the sign bit set so these are never optimized
+ // as powers-of-two. If y is INT32_MIN, y-1 will be INT32_MAX
+ // and because lhs >= 0 at this point, lhs & INT32_MAX returns
+ // the correct value.
+ Label notPowerOfTwo;
+ masm.mov(rhs, remainder);
+ masm.subl(Imm32(1), remainder);
+ masm.branchTest32(Assembler::NonZero, remainder, rhs, &notPowerOfTwo);
+ {
+ masm.andl(lhs, remainder);
+ masm.jmp(&done);
+ }
+ masm.bind(&notPowerOfTwo);
+ }
+
+ // Since lhs >= 0, the sign-extension will be 0
+ masm.mov(ImmWord(0), edx);
+ masm.idiv(rhs);
+ }
+
+ // Otherwise, we have to beware of two special cases:
+ if (mir->canBeNegativeDividend()) {
+ masm.jump(&done);
+
+ masm.bind(&negative);
+
+ // Prevent an integer overflow exception from -2147483648 % -1
+ Label notmin;
+ masm.cmp32(lhs, Imm32(INT32_MIN));
+ overflow = new(alloc()) ModOverflowCheck(ins, rhs);
+ masm.j(Assembler::Equal, overflow->entry());
+ masm.bind(overflow->rejoin());
+ masm.cdq();
+ masm.idiv(rhs);
+
+ if (!mir->isTruncated()) {
+ // A remainder of 0 means that the rval must be -0, which is a double.
+ masm.test32(remainder, remainder);
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ }
+ }
+
+ masm.bind(&done);
+
+ if (overflow) {
+ addOutOfLineCode(overflow, mir);
+ masm.bind(overflow->done());
+ }
+
+ if (ool) {
+ addOutOfLineCode(ool, mir);
+ masm.bind(ool->rejoin());
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitBitNotI(LBitNotI* ins)
+{
+ const LAllocation* input = ins->getOperand(0);
+ MOZ_ASSERT(!input->isConstant());
+
+ masm.notl(ToOperand(input));
+}
+
+void
+CodeGeneratorX86Shared::visitBitOpI(LBitOpI* ins)
+{
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+
+ switch (ins->bitop()) {
+ case JSOP_BITOR:
+ if (rhs->isConstant())
+ masm.orl(Imm32(ToInt32(rhs)), ToOperand(lhs));
+ else
+ masm.orl(ToOperand(rhs), ToRegister(lhs));
+ break;
+ case JSOP_BITXOR:
+ if (rhs->isConstant())
+ masm.xorl(Imm32(ToInt32(rhs)), ToOperand(lhs));
+ else
+ masm.xorl(ToOperand(rhs), ToRegister(lhs));
+ break;
+ case JSOP_BITAND:
+ if (rhs->isConstant())
+ masm.andl(Imm32(ToInt32(rhs)), ToOperand(lhs));
+ else
+ masm.andl(ToOperand(rhs), ToRegister(lhs));
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitBitOpI64(LBitOpI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LBitOpI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LBitOpI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ switch (lir->bitop()) {
+ case JSOP_BITOR:
+ if (IsConstant(rhs))
+ masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ else
+ masm.or64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ break;
+ case JSOP_BITXOR:
+ if (IsConstant(rhs))
+ masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ else
+ masm.xor64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ break;
+ case JSOP_BITAND:
+ if (IsConstant(rhs))
+ masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ else
+ masm.and64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitShiftI(LShiftI* ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ const LAllocation* rhs = ins->rhs();
+
+ if (rhs->isConstant()) {
+ int32_t shift = ToInt32(rhs) & 0x1F;
+ switch (ins->bitop()) {
+ case JSOP_LSH:
+ if (shift)
+ masm.shll(Imm32(shift), lhs);
+ break;
+ case JSOP_RSH:
+ if (shift)
+ masm.sarl(Imm32(shift), lhs);
+ break;
+ case JSOP_URSH:
+ if (shift) {
+ masm.shrl(Imm32(shift), lhs);
+ } else if (ins->mir()->toUrsh()->fallible()) {
+ // x >>> 0 can overflow.
+ masm.test32(lhs, lhs);
+ bailoutIf(Assembler::Signed, ins->snapshot());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ } else {
+ MOZ_ASSERT(ToRegister(rhs) == ecx);
+ switch (ins->bitop()) {
+ case JSOP_LSH:
+ masm.shll_cl(lhs);
+ break;
+ case JSOP_RSH:
+ masm.sarl_cl(lhs);
+ break;
+ case JSOP_URSH:
+ masm.shrl_cl(lhs);
+ if (ins->mir()->toUrsh()->fallible()) {
+ // x >>> 0 can overflow.
+ masm.test32(lhs, lhs);
+ bailoutIf(Assembler::Signed, ins->snapshot());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitShiftI64(LShiftI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LShiftI64::Lhs);
+ LAllocation* rhs = lir->getOperand(LShiftI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (rhs->isConstant()) {
+ int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
+ switch (lir->bitop()) {
+ case JSOP_LSH:
+ if (shift)
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ break;
+ case JSOP_RSH:
+ if (shift)
+ masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs));
+ break;
+ case JSOP_URSH:
+ if (shift)
+ masm.rshift64(Imm32(shift), ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ return;
+ }
+
+ MOZ_ASSERT(ToRegister(rhs) == ecx);
+ switch (lir->bitop()) {
+ case JSOP_LSH:
+ masm.lshift64(ecx, ToRegister64(lhs));
+ break;
+ case JSOP_RSH:
+ masm.rshift64Arithmetic(ecx, ToRegister64(lhs));
+ break;
+ case JSOP_URSH:
+ masm.rshift64(ecx, ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitUrshD(LUrshD* ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ MOZ_ASSERT(ToRegister(ins->temp()) == lhs);
+
+ const LAllocation* rhs = ins->rhs();
+ FloatRegister out = ToFloatRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ int32_t shift = ToInt32(rhs) & 0x1F;
+ if (shift)
+ masm.shrl(Imm32(shift), lhs);
+ } else {
+ MOZ_ASSERT(ToRegister(rhs) == ecx);
+ masm.shrl_cl(lhs);
+ }
+
+ masm.convertUInt32ToDouble(lhs, out);
+}
+
+Operand
+CodeGeneratorX86Shared::ToOperand(const LAllocation& a)
+{
+ if (a.isGeneralReg())
+ return Operand(a.toGeneralReg()->reg());
+ if (a.isFloatReg())
+ return Operand(a.toFloatReg()->reg());
+ return Operand(masm.getStackPointer(), ToStackOffset(&a));
+}
+
+Operand
+CodeGeneratorX86Shared::ToOperand(const LAllocation* a)
+{
+ return ToOperand(*a);
+}
+
+Operand
+CodeGeneratorX86Shared::ToOperand(const LDefinition* def)
+{
+ return ToOperand(def->output());
+}
+
+MoveOperand
+CodeGeneratorX86Shared::toMoveOperand(LAllocation a) const
+{
+ if (a.isGeneralReg())
+ return MoveOperand(ToRegister(a));
+ if (a.isFloatReg())
+ return MoveOperand(ToFloatRegister(a));
+ return MoveOperand(StackPointer, ToStackOffset(a));
+}
+
+class OutOfLineTableSwitch : public OutOfLineCodeBase<CodeGeneratorX86Shared>
+{
+ MTableSwitch* mir_;
+ CodeLabel jumpLabel_;
+
+ void accept(CodeGeneratorX86Shared* codegen) {
+ codegen->visitOutOfLineTableSwitch(this);
+ }
+
+ public:
+ explicit OutOfLineTableSwitch(MTableSwitch* mir)
+ : mir_(mir)
+ {}
+
+ MTableSwitch* mir() const {
+ return mir_;
+ }
+
+ CodeLabel* jumpLabel() {
+ return &jumpLabel_;
+ }
+};
+
+void
+CodeGeneratorX86Shared::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool)
+{
+ MTableSwitch* mir = ool->mir();
+
+ masm.haltingAlign(sizeof(void*));
+ masm.use(ool->jumpLabel()->target());
+ masm.addCodeLabel(*ool->jumpLabel());
+
+ for (size_t i = 0; i < mir->numCases(); i++) {
+ LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
+ Label* caseheader = caseblock->label();
+ uint32_t caseoffset = caseheader->offset();
+
+ // The entries of the jump table need to be absolute addresses and thus
+ // must be patched after codegen is finished.
+ CodeLabel cl;
+ masm.writeCodePointer(cl.patchAt());
+ cl.target()->bind(caseoffset);
+ masm.addCodeLabel(cl);
+ }
+}
+
+void
+CodeGeneratorX86Shared::emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base)
+{
+ Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
+
+ // Lower value with low value
+ if (mir->low() != 0)
+ masm.subl(Imm32(mir->low()), index);
+
+ // Jump to default case if input is out of range
+ int32_t cases = mir->numCases();
+ masm.cmp32(index, Imm32(cases));
+ masm.j(AssemblerX86Shared::AboveOrEqual, defaultcase);
+
+ // To fill in the CodeLabels for the case entries, we need to first
+ // generate the case entries (we don't yet know their offsets in the
+ // instruction stream).
+ OutOfLineTableSwitch* ool = new(alloc()) OutOfLineTableSwitch(mir);
+ addOutOfLineCode(ool, mir);
+
+ // Compute the position where a pointer to the right case stands.
+ masm.mov(ool->jumpLabel()->patchAt(), base);
+ Operand pointer = Operand(base, index, ScalePointer);
+
+ // Jump to the right case
+ masm.jmp(pointer);
+}
+
+void
+CodeGeneratorX86Shared::visitMathD(LMathD* math)
+{
+ FloatRegister lhs = ToFloatRegister(math->lhs());
+ Operand rhs = ToOperand(math->rhs());
+ FloatRegister output = ToFloatRegister(math->output());
+
+ switch (math->jsop()) {
+ case JSOP_ADD:
+ masm.vaddsd(rhs, lhs, output);
+ break;
+ case JSOP_SUB:
+ masm.vsubsd(rhs, lhs, output);
+ break;
+ case JSOP_MUL:
+ masm.vmulsd(rhs, lhs, output);
+ break;
+ case JSOP_DIV:
+ masm.vdivsd(rhs, lhs, output);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitMathF(LMathF* math)
+{
+ FloatRegister lhs = ToFloatRegister(math->lhs());
+ Operand rhs = ToOperand(math->rhs());
+ FloatRegister output = ToFloatRegister(math->output());
+
+ switch (math->jsop()) {
+ case JSOP_ADD:
+ masm.vaddss(rhs, lhs, output);
+ break;
+ case JSOP_SUB:
+ masm.vsubss(rhs, lhs, output);
+ break;
+ case JSOP_MUL:
+ masm.vmulss(rhs, lhs, output);
+ break;
+ case JSOP_DIV:
+ masm.vdivss(rhs, lhs, output);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitFloor(LFloor* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ Label bailout;
+
+ if (AssemblerX86Shared::HasSSE41()) {
+ // Bail on negative-zero.
+ masm.branchNegativeZero(input, output, &bailout);
+ bailoutFrom(&bailout, lir->snapshot());
+
+ // Round toward -Infinity.
+ {
+ ScratchDoubleScope scratch(masm);
+ masm.vroundsd(X86Encoding::RoundDown, input, scratch, scratch);
+ bailoutCvttsd2si(scratch, output, lir->snapshot());
+ }
+ } else {
+ Label negative, end;
+
+ // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
+ {
+ ScratchDoubleScope scratch(masm);
+ masm.zeroDouble(scratch);
+ masm.branchDouble(Assembler::DoubleLessThan, input, scratch, &negative);
+ }
+
+ // Bail on negative-zero.
+ masm.branchNegativeZero(input, output, &bailout);
+ bailoutFrom(&bailout, lir->snapshot());
+
+ // Input is non-negative, so truncation correctly rounds.
+ bailoutCvttsd2si(input, output, lir->snapshot());
+
+ masm.jump(&end);
+
+ // Input is negative, but isn't -0.
+ // Negative values go on a comparatively expensive path, since no
+ // native rounding mode matches JS semantics. Still better than callVM.
+ masm.bind(&negative);
+ {
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ bailoutCvttsd2si(input, output, lir->snapshot());
+
+ // Test whether the input double was integer-valued.
+ {
+ ScratchDoubleScope scratch(masm);
+ masm.convertInt32ToDouble(output, scratch);
+ masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, scratch, &end);
+ }
+
+ // Input is not integer-valued, so we rounded off-by-one in the
+ // wrong direction. Correct by subtraction.
+ masm.subl(Imm32(1), output);
+ // Cannot overflow: output was already checked against INT_MIN.
+ }
+
+ masm.bind(&end);
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitFloorF(LFloorF* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ Label bailout;
+
+ if (AssemblerX86Shared::HasSSE41()) {
+ // Bail on negative-zero.
+ masm.branchNegativeZeroFloat32(input, output, &bailout);
+ bailoutFrom(&bailout, lir->snapshot());
+
+ // Round toward -Infinity.
+ {
+ ScratchFloat32Scope scratch(masm);
+ masm.vroundss(X86Encoding::RoundDown, input, scratch, scratch);
+ bailoutCvttss2si(scratch, output, lir->snapshot());
+ }
+ } else {
+ Label negative, end;
+
+ // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
+ {
+ ScratchFloat32Scope scratch(masm);
+ masm.zeroFloat32(scratch);
+ masm.branchFloat(Assembler::DoubleLessThan, input, scratch, &negative);
+ }
+
+ // Bail on negative-zero.
+ masm.branchNegativeZeroFloat32(input, output, &bailout);
+ bailoutFrom(&bailout, lir->snapshot());
+
+ // Input is non-negative, so truncation correctly rounds.
+ bailoutCvttss2si(input, output, lir->snapshot());
+
+ masm.jump(&end);
+
+ // Input is negative, but isn't -0.
+ // Negative values go on a comparatively expensive path, since no
+ // native rounding mode matches JS semantics. Still better than callVM.
+ masm.bind(&negative);
+ {
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ bailoutCvttss2si(input, output, lir->snapshot());
+
+ // Test whether the input double was integer-valued.
+ {
+ ScratchFloat32Scope scratch(masm);
+ masm.convertInt32ToFloat32(output, scratch);
+ masm.branchFloat(Assembler::DoubleEqualOrUnordered, input, scratch, &end);
+ }
+
+ // Input is not integer-valued, so we rounded off-by-one in the
+ // wrong direction. Correct by subtraction.
+ masm.subl(Imm32(1), output);
+ // Cannot overflow: output was already checked against INT_MIN.
+ }
+
+ masm.bind(&end);
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitCeil(LCeil* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ ScratchDoubleScope scratch(masm);
+ Register output = ToRegister(lir->output());
+
+ Label bailout, lessThanMinusOne;
+
+ // Bail on ]-1; -0] range
+ masm.loadConstantDouble(-1, scratch);
+ masm.branchDouble(Assembler::DoubleLessThanOrEqualOrUnordered, input,
+ scratch, &lessThanMinusOne);
+
+ // Test for remaining values with the sign bit set, i.e. ]-1; -0]
+ masm.vmovmskpd(input, output);
+ masm.branchTest32(Assembler::NonZero, output, Imm32(1), &bailout);
+ bailoutFrom(&bailout, lir->snapshot());
+
+ if (AssemblerX86Shared::HasSSE41()) {
+ // x <= -1 or x > -0
+ masm.bind(&lessThanMinusOne);
+ // Round toward +Infinity.
+ masm.vroundsd(X86Encoding::RoundUp, input, scratch, scratch);
+ bailoutCvttsd2si(scratch, output, lir->snapshot());
+ return;
+ }
+
+ // No SSE4.1
+ Label end;
+
+ // x >= 0 and x is not -0.0, we can truncate (resp. truncate and add 1) for
+ // integer (resp. non-integer) values.
+ // Will also work for values >= INT_MAX + 1, as the truncate
+ // operation will return INT_MIN and there'll be a bailout.
+ bailoutCvttsd2si(input, output, lir->snapshot());
+ masm.convertInt32ToDouble(output, scratch);
+ masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, scratch, &end);
+
+ // Input is not integer-valued, add 1 to obtain the ceiling value
+ masm.addl(Imm32(1), output);
+ // if input > INT_MAX, output == INT_MAX so adding 1 will overflow.
+ bailoutIf(Assembler::Overflow, lir->snapshot());
+ masm.jump(&end);
+
+ // x <= -1, truncation is the way to go.
+ masm.bind(&lessThanMinusOne);
+ bailoutCvttsd2si(input, output, lir->snapshot());
+
+ masm.bind(&end);
+}
+
+void
+CodeGeneratorX86Shared::visitCeilF(LCeilF* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ ScratchFloat32Scope scratch(masm);
+ Register output = ToRegister(lir->output());
+
+ Label bailout, lessThanMinusOne;
+
+ // Bail on ]-1; -0] range
+ masm.loadConstantFloat32(-1.f, scratch);
+ masm.branchFloat(Assembler::DoubleLessThanOrEqualOrUnordered, input,
+ scratch, &lessThanMinusOne);
+
+ // Test for remaining values with the sign bit set, i.e. ]-1; -0]
+ masm.vmovmskps(input, output);
+ masm.branchTest32(Assembler::NonZero, output, Imm32(1), &bailout);
+ bailoutFrom(&bailout, lir->snapshot());
+
+ if (AssemblerX86Shared::HasSSE41()) {
+ // x <= -1 or x > -0
+ masm.bind(&lessThanMinusOne);
+ // Round toward +Infinity.
+ masm.vroundss(X86Encoding::RoundUp, input, scratch, scratch);
+ bailoutCvttss2si(scratch, output, lir->snapshot());
+ return;
+ }
+
+ // No SSE4.1
+ Label end;
+
+ // x >= 0 and x is not -0.0, we can truncate (resp. truncate and add 1) for
+ // integer (resp. non-integer) values.
+ // Will also work for values >= INT_MAX + 1, as the truncate
+ // operation will return INT_MIN and there'll be a bailout.
+ bailoutCvttss2si(input, output, lir->snapshot());
+ masm.convertInt32ToFloat32(output, scratch);
+ masm.branchFloat(Assembler::DoubleEqualOrUnordered, input, scratch, &end);
+
+ // Input is not integer-valued, add 1 to obtain the ceiling value
+ masm.addl(Imm32(1), output);
+ // if input > INT_MAX, output == INT_MAX so adding 1 will overflow.
+ bailoutIf(Assembler::Overflow, lir->snapshot());
+ masm.jump(&end);
+
+ // x <= -1, truncation is the way to go.
+ masm.bind(&lessThanMinusOne);
+ bailoutCvttss2si(input, output, lir->snapshot());
+
+ masm.bind(&end);
+}
+
+void
+CodeGeneratorX86Shared::visitRound(LRound* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister temp = ToFloatRegister(lir->temp());
+ ScratchDoubleScope scratch(masm);
+ Register output = ToRegister(lir->output());
+
+ Label negativeOrZero, negative, end, bailout;
+
+ // Branch to a slow path for non-positive inputs. Doesn't catch NaN.
+ masm.zeroDouble(scratch);
+ masm.loadConstantDouble(GetBiggestNumberLessThan(0.5), temp);
+ masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, scratch, &negativeOrZero);
+
+ // Input is positive. Add the biggest double less than 0.5 and
+ // truncate, rounding down (because if the input is the biggest double less
+ // than 0.5, adding 0.5 would undesirably round up to 1). Note that we have
+ // to add the input to the temp register because we're not allowed to
+ // modify the input register.
+ masm.addDouble(input, temp);
+ bailoutCvttsd2si(temp, output, lir->snapshot());
+
+ masm.jump(&end);
+
+ // Input is negative, +0 or -0.
+ masm.bind(&negativeOrZero);
+ // Branch on negative input.
+ masm.j(Assembler::NotEqual, &negative);
+
+ // Bail on negative-zero.
+ masm.branchNegativeZero(input, output, &bailout, /* maybeNonZero = */ false);
+ bailoutFrom(&bailout, lir->snapshot());
+
+ // Input is +0
+ masm.xor32(output, output);
+ masm.jump(&end);
+
+ // Input is negative.
+ masm.bind(&negative);
+
+ // Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
+ // be added the biggest double less than 0.5.
+ Label loadJoin;
+ masm.loadConstantDouble(-0.5, scratch);
+ masm.branchDouble(Assembler::DoubleLessThan, input, scratch, &loadJoin);
+ masm.loadConstantDouble(0.5, temp);
+ masm.bind(&loadJoin);
+
+ if (AssemblerX86Shared::HasSSE41()) {
+ // Add 0.5 and round toward -Infinity. The result is stored in the temp
+ // register (currently contains 0.5).
+ masm.addDouble(input, temp);
+ masm.vroundsd(X86Encoding::RoundDown, temp, scratch, scratch);
+
+ // Truncate.
+ bailoutCvttsd2si(scratch, output, lir->snapshot());
+
+ // If the result is positive zero, then the actual result is -0. Bail.
+ // Otherwise, the truncation will have produced the correct negative integer.
+ masm.test32(output, output);
+ bailoutIf(Assembler::Zero, lir->snapshot());
+ } else {
+ masm.addDouble(input, temp);
+
+ // Round toward -Infinity without the benefit of ROUNDSD.
+ {
+ // If input + 0.5 >= 0, input is a negative number >= -0.5 and the result is -0.
+ masm.compareDouble(Assembler::DoubleGreaterThanOrEqual, temp, scratch);
+ bailoutIf(Assembler::DoubleGreaterThanOrEqual, lir->snapshot());
+
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ bailoutCvttsd2si(temp, output, lir->snapshot());
+
+ // Test whether the truncated double was integer-valued.
+ masm.convertInt32ToDouble(output, scratch);
+ masm.branchDouble(Assembler::DoubleEqualOrUnordered, temp, scratch, &end);
+
+ // Input is not integer-valued, so we rounded off-by-one in the
+ // wrong direction. Correct by subtraction.
+ masm.subl(Imm32(1), output);
+ // Cannot overflow: output was already checked against INT_MIN.
+ }
+ }
+
+ masm.bind(&end);
+}
+
+void
+CodeGeneratorX86Shared::visitRoundF(LRoundF* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister temp = ToFloatRegister(lir->temp());
+ ScratchFloat32Scope scratch(masm);
+ Register output = ToRegister(lir->output());
+
+ Label negativeOrZero, negative, end, bailout;
+
+ // Branch to a slow path for non-positive inputs. Doesn't catch NaN.
+ masm.zeroFloat32(scratch);
+ masm.loadConstantFloat32(GetBiggestNumberLessThan(0.5f), temp);
+ masm.branchFloat(Assembler::DoubleLessThanOrEqual, input, scratch, &negativeOrZero);
+
+ // Input is non-negative. Add the biggest float less than 0.5 and truncate,
+ // rounding down (because if the input is the biggest float less than 0.5,
+ // adding 0.5 would undesirably round up to 1). Note that we have to add
+ // the input to the temp register because we're not allowed to modify the
+ // input register.
+ masm.addFloat32(input, temp);
+
+ bailoutCvttss2si(temp, output, lir->snapshot());
+
+ masm.jump(&end);
+
+ // Input is negative, +0 or -0.
+ masm.bind(&negativeOrZero);
+ // Branch on negative input.
+ masm.j(Assembler::NotEqual, &negative);
+
+ // Bail on negative-zero.
+ masm.branchNegativeZeroFloat32(input, output, &bailout);
+ bailoutFrom(&bailout, lir->snapshot());
+
+ // Input is +0.
+ masm.xor32(output, output);
+ masm.jump(&end);
+
+ // Input is negative.
+ masm.bind(&negative);
+
+ // Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
+ // be added the biggest double less than 0.5.
+ Label loadJoin;
+ masm.loadConstantFloat32(-0.5f, scratch);
+ masm.branchFloat(Assembler::DoubleLessThan, input, scratch, &loadJoin);
+ masm.loadConstantFloat32(0.5f, temp);
+ masm.bind(&loadJoin);
+
+ if (AssemblerX86Shared::HasSSE41()) {
+ // Add 0.5 and round toward -Infinity. The result is stored in the temp
+ // register (currently contains 0.5).
+ masm.addFloat32(input, temp);
+ masm.vroundss(X86Encoding::RoundDown, temp, scratch, scratch);
+
+ // Truncate.
+ bailoutCvttss2si(scratch, output, lir->snapshot());
+
+ // If the result is positive zero, then the actual result is -0. Bail.
+ // Otherwise, the truncation will have produced the correct negative integer.
+ masm.test32(output, output);
+ bailoutIf(Assembler::Zero, lir->snapshot());
+ } else {
+ masm.addFloat32(input, temp);
+ // Round toward -Infinity without the benefit of ROUNDSS.
+ {
+ // If input + 0.5 >= 0, input is a negative number >= -0.5 and the result is -0.
+ masm.compareFloat(Assembler::DoubleGreaterThanOrEqual, temp, scratch);
+ bailoutIf(Assembler::DoubleGreaterThanOrEqual, lir->snapshot());
+
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ bailoutCvttss2si(temp, output, lir->snapshot());
+
+ // Test whether the truncated double was integer-valued.
+ masm.convertInt32ToFloat32(output, scratch);
+ masm.branchFloat(Assembler::DoubleEqualOrUnordered, temp, scratch, &end);
+
+ // Input is not integer-valued, so we rounded off-by-one in the
+ // wrong direction. Correct by subtraction.
+ masm.subl(Imm32(1), output);
+ // Cannot overflow: output was already checked against INT_MIN.
+ }
+ }
+
+ masm.bind(&end);
+}
+
+void
+CodeGeneratorX86Shared::visitGuardShape(LGuardShape* guard)
+{
+ Register obj = ToRegister(guard->input());
+ masm.cmpPtr(Operand(obj, ShapedObject::offsetOfShape()), ImmGCPtr(guard->mir()->shape()));
+
+ bailoutIf(Assembler::NotEqual, guard->snapshot());
+}
+
+void
+CodeGeneratorX86Shared::visitGuardObjectGroup(LGuardObjectGroup* guard)
+{
+ Register obj = ToRegister(guard->input());
+
+ masm.cmpPtr(Operand(obj, JSObject::offsetOfGroup()), ImmGCPtr(guard->mir()->group()));
+
+ Assembler::Condition cond =
+ guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
+ bailoutIf(cond, guard->snapshot());
+}
+
+void
+CodeGeneratorX86Shared::visitGuardClass(LGuardClass* guard)
+{
+ Register obj = ToRegister(guard->input());
+ Register tmp = ToRegister(guard->tempInt());
+
+ masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), tmp);
+ masm.cmpPtr(Operand(tmp, ObjectGroup::offsetOfClasp()), ImmPtr(guard->mir()->getClass()));
+ bailoutIf(Assembler::NotEqual, guard->snapshot());
+}
+
+void
+CodeGeneratorX86Shared::visitEffectiveAddress(LEffectiveAddress* ins)
+{
+ const MEffectiveAddress* mir = ins->mir();
+ Register base = ToRegister(ins->base());
+ Register index = ToRegister(ins->index());
+ Register output = ToRegister(ins->output());
+ masm.leal(Operand(base, index, mir->scale(), mir->displacement()), output);
+}
+
+void
+CodeGeneratorX86Shared::generateInvalidateEpilogue()
+{
+ // Ensure that there is enough space in the buffer for the OsiPoint
+ // patching to occur. Otherwise, we could overwrite the invalidation
+ // epilogue.
+ for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize())
+ masm.nop();
+
+ masm.bind(&invalidate_);
+
+ // Push the Ion script onto the stack (when we determine what that pointer is).
+ invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
+ JitCode* thunk = gen->jitRuntime()->getInvalidationThunk();
+
+ masm.call(thunk);
+
+ // We should never reach this point in JIT code -- the invalidation thunk should
+ // pop the invalidated JS frame and return directly to its caller.
+ masm.assumeUnreachable("Should have returned directly to its caller instead of here.");
+}
+
+void
+CodeGeneratorX86Shared::visitNegI(LNegI* ins)
+{
+ Register input = ToRegister(ins->input());
+ MOZ_ASSERT(input == ToRegister(ins->output()));
+
+ masm.neg32(input);
+}
+
+void
+CodeGeneratorX86Shared::visitNegD(LNegD* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ MOZ_ASSERT(input == ToFloatRegister(ins->output()));
+
+ masm.negateDouble(input);
+}
+
+void
+CodeGeneratorX86Shared::visitNegF(LNegF* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ MOZ_ASSERT(input == ToFloatRegister(ins->output()));
+
+ masm.negateFloat(input);
+}
+
+void
+CodeGeneratorX86Shared::visitSimd128Int(LSimd128Int* ins)
+{
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantSimd128Int(ins->getValue(), ToFloatRegister(out));
+}
+
+void
+CodeGeneratorX86Shared::visitSimd128Float(LSimd128Float* ins)
+{
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantSimd128Float(ins->getValue(), ToFloatRegister(out));
+}
+
+void
+CodeGeneratorX86Shared::visitInt32x4ToFloat32x4(LInt32x4ToFloat32x4* ins)
+{
+ FloatRegister in = ToFloatRegister(ins->input());
+ FloatRegister out = ToFloatRegister(ins->output());
+ masm.convertInt32x4ToFloat32x4(in, out);
+}
+
+void
+CodeGeneratorX86Shared::visitFloat32x4ToInt32x4(LFloat32x4ToInt32x4* ins)
+{
+ FloatRegister in = ToFloatRegister(ins->input());
+ FloatRegister out = ToFloatRegister(ins->output());
+ Register temp = ToRegister(ins->temp());
+
+ masm.convertFloat32x4ToInt32x4(in, out);
+
+ auto* ool = new(alloc()) OutOfLineSimdFloatToIntCheck(temp, in, ins, ins->mir()->trapOffset());
+ addOutOfLineCode(ool, ins->mir());
+
+ static const SimdConstant InvalidResult = SimdConstant::SplatX4(int32_t(-2147483648));
+
+ ScratchSimd128Scope scratch(masm);
+ masm.loadConstantSimd128Int(InvalidResult, scratch);
+ masm.packedEqualInt32x4(Operand(out), scratch);
+ // TODO (bug 1156228): If we have SSE4.1, we can use PTEST here instead of
+ // the two following instructions.
+ masm.vmovmskps(scratch, temp);
+ masm.cmp32(temp, Imm32(0));
+ masm.j(Assembler::NotEqual, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGeneratorX86Shared::visitOutOfLineSimdFloatToIntCheck(OutOfLineSimdFloatToIntCheck *ool)
+{
+ static const SimdConstant Int32MaxX4 = SimdConstant::SplatX4(2147483647.f);
+ static const SimdConstant Int32MinX4 = SimdConstant::SplatX4(-2147483648.f);
+
+ Label onConversionError;
+
+ FloatRegister input = ool->input();
+ Register temp = ool->temp();
+
+ ScratchSimd128Scope scratch(masm);
+ masm.loadConstantSimd128Float(Int32MinX4, scratch);
+ masm.vcmpleps(Operand(input), scratch, scratch);
+ masm.vmovmskps(scratch, temp);
+ masm.cmp32(temp, Imm32(15));
+ masm.j(Assembler::NotEqual, &onConversionError);
+
+ masm.loadConstantSimd128Float(Int32MaxX4, scratch);
+ masm.vcmpleps(Operand(input), scratch, scratch);
+ masm.vmovmskps(scratch, temp);
+ masm.cmp32(temp, Imm32(0));
+ masm.j(Assembler::NotEqual, &onConversionError);
+
+ masm.jump(ool->rejoin());
+
+ if (gen->compilingWasm()) {
+ masm.bindLater(&onConversionError, trap(ool, wasm::Trap::ImpreciseSimdConversion));
+ } else {
+ masm.bind(&onConversionError);
+ bailout(ool->ins()->snapshot());
+ }
+}
+
+// Convert Float32x4 to Uint32x4.
+//
+// If any input lane value is out of range or NaN, bail out.
+void
+CodeGeneratorX86Shared::visitFloat32x4ToUint32x4(LFloat32x4ToUint32x4* ins)
+{
+ const MSimdConvert* mir = ins->mir();
+ FloatRegister in = ToFloatRegister(ins->input());
+ FloatRegister out = ToFloatRegister(ins->output());
+ Register temp = ToRegister(ins->tempR());
+ FloatRegister tempF = ToFloatRegister(ins->tempF());
+
+ // Classify lane values into 4 disjoint classes:
+ //
+ // N-lanes: in <= -1.0
+ // A-lanes: -1.0 < in <= 0x0.ffffffp31
+ // B-lanes: 0x1.0p31 <= in <= 0x0.ffffffp32
+ // V-lanes: 0x1.0p32 <= in, or isnan(in)
+ //
+ // We need to bail out to throw a RangeError if we see any N-lanes or
+ // V-lanes.
+ //
+ // For A-lanes and B-lanes, we make two float -> int32 conversions:
+ //
+ // A = cvttps2dq(in)
+ // B = cvttps2dq(in - 0x1.0p31f)
+ //
+ // Note that the subtraction for the B computation is exact for B-lanes.
+ // There is no rounding, so B is the low 31 bits of the correctly converted
+ // result.
+ //
+ // The cvttps2dq instruction produces 0x80000000 when the input is NaN or
+ // out of range for a signed int32_t. This conveniently provides the missing
+ // high bit for B, so the desired result is A for A-lanes and A|B for
+ // B-lanes.
+
+ ScratchSimd128Scope scratch(masm);
+
+ // TODO: If the majority of lanes are A-lanes, it could be faster to compute
+ // A first, use vmovmskps to check for any non-A-lanes and handle them in
+ // ool code. OTOH, we we're wrong about the lane distribution, that would be
+ // slower.
+
+ // Compute B in |scratch|.
+ static const float Adjust = 0x80000000; // 0x1.0p31f for the benefit of MSVC.
+ static const SimdConstant Bias = SimdConstant::SplatX4(-Adjust);
+ masm.loadConstantSimd128Float(Bias, scratch);
+ masm.packedAddFloat32(Operand(in), scratch);
+ masm.convertFloat32x4ToInt32x4(scratch, scratch);
+
+ // Compute A in |out|. This is the last time we use |in| and the first time
+ // we use |out|, so we can tolerate if they are the same register.
+ masm.convertFloat32x4ToInt32x4(in, out);
+
+ // We can identify A-lanes by the sign bits in A: Any A-lanes will be
+ // positive in A, and N, B, and V-lanes will be 0x80000000 in A. Compute a
+ // mask of non-A-lanes into |tempF|.
+ masm.zeroSimd128Float(tempF);
+ masm.packedGreaterThanInt32x4(Operand(out), tempF);
+
+ // Clear the A-lanes in B.
+ masm.bitwiseAndSimd128(Operand(tempF), scratch);
+
+ // Compute the final result: A for A-lanes, A|B for B-lanes.
+ masm.bitwiseOrSimd128(Operand(scratch), out);
+
+ // We still need to filter out the V-lanes. They would show up as 0x80000000
+ // in both A and B. Since we cleared the valid A-lanes in B, the V-lanes are
+ // the remaining negative lanes in B.
+ masm.vmovmskps(scratch, temp);
+ masm.cmp32(temp, Imm32(0));
+
+ if (gen->compilingWasm())
+ masm.j(Assembler::NotEqual, trap(mir, wasm::Trap::ImpreciseSimdConversion));
+ else
+ bailoutIf(Assembler::NotEqual, ins->snapshot());
+}
+
+void
+CodeGeneratorX86Shared::visitSimdValueInt32x4(LSimdValueInt32x4* ins)
+{
+ MOZ_ASSERT(ins->mir()->type() == MIRType::Int32x4 || ins->mir()->type() == MIRType::Bool32x4);
+
+ FloatRegister output = ToFloatRegister(ins->output());
+ if (AssemblerX86Shared::HasSSE41()) {
+ masm.vmovd(ToRegister(ins->getOperand(0)), output);
+ for (size_t i = 1; i < 4; ++i) {
+ Register r = ToRegister(ins->getOperand(i));
+ masm.vpinsrd(i, r, output, output);
+ }
+ return;
+ }
+
+ masm.reserveStack(Simd128DataSize);
+ for (size_t i = 0; i < 4; ++i) {
+ Register r = ToRegister(ins->getOperand(i));
+ masm.store32(r, Address(StackPointer, i * sizeof(int32_t)));
+ }
+ masm.loadAlignedSimd128Int(Address(StackPointer, 0), output);
+ masm.freeStack(Simd128DataSize);
+}
+
+void
+CodeGeneratorX86Shared::visitSimdValueFloat32x4(LSimdValueFloat32x4* ins)
+{
+ MOZ_ASSERT(ins->mir()->type() == MIRType::Float32x4);
+
+ FloatRegister r0 = ToFloatRegister(ins->getOperand(0));
+ FloatRegister r1 = ToFloatRegister(ins->getOperand(1));
+ FloatRegister r2 = ToFloatRegister(ins->getOperand(2));
+ FloatRegister r3 = ToFloatRegister(ins->getOperand(3));
+ FloatRegister tmp = ToFloatRegister(ins->getTemp(0));
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ FloatRegister r0Copy = masm.reusedInputFloat32x4(r0, output);
+ FloatRegister r1Copy = masm.reusedInputFloat32x4(r1, tmp);
+
+ masm.vunpcklps(r3, r1Copy, tmp);
+ masm.vunpcklps(r2, r0Copy, output);
+ masm.vunpcklps(tmp, output, output);
+}
+
+void
+CodeGeneratorX86Shared::visitSimdSplatX16(LSimdSplatX16* ins)
+{
+ MOZ_ASSERT(SimdTypeToLength(ins->mir()->type()) == 16);
+ Register input = ToRegister(ins->getOperand(0));
+ FloatRegister output = ToFloatRegister(ins->output());
+ masm.vmovd(input, output);
+ if (AssemblerX86Shared::HasSSSE3()) {
+ masm.zeroSimd128Int(ScratchSimd128Reg);
+ masm.vpshufb(ScratchSimd128Reg, output, output);
+ } else {
+ // Use two shifts to duplicate the low 8 bits into the low 16 bits.
+ masm.vpsllw(Imm32(8), output, output);
+ masm.vmovdqa(output, ScratchSimd128Reg);
+ masm.vpsrlw(Imm32(8), ScratchSimd128Reg, ScratchSimd128Reg);
+ masm.vpor(ScratchSimd128Reg, output, output);
+ // Then do an X8 splat.
+ masm.vpshuflw(0, output, output);
+ masm.vpshufd(0, output, output);
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitSimdSplatX8(LSimdSplatX8* ins)
+{
+ MOZ_ASSERT(SimdTypeToLength(ins->mir()->type()) == 8);
+ Register input = ToRegister(ins->getOperand(0));
+ FloatRegister output = ToFloatRegister(ins->output());
+ masm.vmovd(input, output);
+ masm.vpshuflw(0, output, output);
+ masm.vpshufd(0, output, output);
+}
+
+void
+CodeGeneratorX86Shared::visitSimdSplatX4(LSimdSplatX4* ins)
+{
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ MSimdSplat* mir = ins->mir();
+ MOZ_ASSERT(IsSimdType(mir->type()));
+ JS_STATIC_ASSERT(sizeof(float) == sizeof(int32_t));
+
+ if (mir->type() == MIRType::Float32x4) {
+ FloatRegister r = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rCopy = masm.reusedInputFloat32x4(r, output);
+ masm.vshufps(0, rCopy, rCopy, output);
+ } else {
+ Register r = ToRegister(ins->getOperand(0));
+ masm.vmovd(r, output);
+ masm.vpshufd(0, output, output);
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitSimdReinterpretCast(LSimdReinterpretCast* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ if (input.aliases(output))
+ return;
+
+ if (IsIntegerSimdType(ins->mir()->type()))
+ masm.vmovdqa(input, output);
+ else
+ masm.vmovaps(input, output);
+}
+
+// Extract an integer lane from the 32x4 vector register |input| and place it in
+// |output|.
+void
+CodeGeneratorX86Shared::emitSimdExtractLane32x4(FloatRegister input, Register output, unsigned lane)
+{
+ if (lane == 0) {
+ // The value we want to extract is in the low double-word
+ masm.moveLowInt32(input, output);
+ } else if (AssemblerX86Shared::HasSSE41()) {
+ masm.vpextrd(lane, input, output);
+ } else {
+ uint32_t mask = MacroAssembler::ComputeShuffleMask(lane);
+ masm.shuffleInt32(mask, input, ScratchSimd128Reg);
+ masm.moveLowInt32(ScratchSimd128Reg, output);
+ }
+}
+
+// Extract an integer lane from the 16x8 vector register |input|, sign- or
+// zero-extend to 32 bits and place the result in |output|.
+void
+CodeGeneratorX86Shared::emitSimdExtractLane16x8(FloatRegister input, Register output,
+ unsigned lane, SimdSign signedness)
+{
+ // Unlike pextrd and pextrb, this is available in SSE2.
+ masm.vpextrw(lane, input, output);
+
+ if (signedness == SimdSign::Signed)
+ masm.movswl(output, output);
+}
+
+// Extract an integer lane from the 8x16 vector register |input|, sign- or
+// zero-extend to 32 bits and place the result in |output|.
+void
+CodeGeneratorX86Shared::emitSimdExtractLane8x16(FloatRegister input, Register output,
+ unsigned lane, SimdSign signedness)
+{
+ if (AssemblerX86Shared::HasSSE41()) {
+ masm.vpextrb(lane, input, output);
+ // vpextrb clears the high bits, so no further extension required.
+ if (signedness == SimdSign::Unsigned)
+ signedness = SimdSign::NotApplicable;
+ } else {
+ // Extract the relevant 16 bits containing our lane, then shift the
+ // right 8 bits into place.
+ emitSimdExtractLane16x8(input, output, lane / 2, SimdSign::Unsigned);
+ if (lane % 2) {
+ masm.shrl(Imm32(8), output);
+ // The shrl handles the zero-extension. Don't repeat it.
+ if (signedness == SimdSign::Unsigned)
+ signedness = SimdSign::NotApplicable;
+ }
+ }
+
+ // We have the right low 8 bits in |output|, but we may need to fix the high
+ // bits. Note that this requires |output| to be one of the %eax-%edx
+ // registers.
+ switch (signedness) {
+ case SimdSign::Signed:
+ masm.movsbl(output, output);
+ break;
+ case SimdSign::Unsigned:
+ masm.movzbl(output, output);
+ break;
+ case SimdSign::NotApplicable:
+ // No adjustment needed.
+ break;
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitSimdExtractElementB(LSimdExtractElementB* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+ MSimdExtractElement* mir = ins->mir();
+ unsigned length = SimdTypeToLength(mir->specialization());
+
+ switch (length) {
+ case 4:
+ emitSimdExtractLane32x4(input, output, mir->lane());
+ break;
+ case 8:
+ // Get a lane, don't bother fixing the high bits since we'll mask below.
+ emitSimdExtractLane16x8(input, output, mir->lane(), SimdSign::NotApplicable);
+ break;
+ case 16:
+ emitSimdExtractLane8x16(input, output, mir->lane(), SimdSign::NotApplicable);
+ break;
+ default:
+ MOZ_CRASH("Unhandled SIMD length");
+ }
+
+ // We need to generate a 0/1 value. We have 0/-1 and possibly dirty high bits.
+ masm.and32(Imm32(1), output);
+}
+
+void
+CodeGeneratorX86Shared::visitSimdExtractElementI(LSimdExtractElementI* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+ MSimdExtractElement* mir = ins->mir();
+ unsigned length = SimdTypeToLength(mir->specialization());
+
+ switch (length) {
+ case 4:
+ emitSimdExtractLane32x4(input, output, mir->lane());
+ break;
+ case 8:
+ emitSimdExtractLane16x8(input, output, mir->lane(), mir->signedness());
+ break;
+ case 16:
+ emitSimdExtractLane8x16(input, output, mir->lane(), mir->signedness());
+ break;
+ default:
+ MOZ_CRASH("Unhandled SIMD length");
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitSimdExtractElementU2D(LSimdExtractElementU2D* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ Register temp = ToRegister(ins->temp());
+ MSimdExtractElement* mir = ins->mir();
+ MOZ_ASSERT(mir->specialization() == MIRType::Int32x4);
+ emitSimdExtractLane32x4(input, temp, mir->lane());
+ masm.convertUInt32ToDouble(temp, output);
+}
+
+void
+CodeGeneratorX86Shared::visitSimdExtractElementF(LSimdExtractElementF* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ unsigned lane = ins->mir()->lane();
+ if (lane == 0) {
+ // The value we want to extract is in the low double-word
+ if (input != output)
+ masm.moveFloat32(input, output);
+ } else if (lane == 2) {
+ masm.moveHighPairToLowPairFloat32(input, output);
+ } else {
+ uint32_t mask = MacroAssembler::ComputeShuffleMask(lane);
+ masm.shuffleFloat32(mask, input, output);
+ }
+ // NaNs contained within SIMD values are not enforced to be canonical, so
+ // when we extract an element into a "regular" scalar JS value, we have to
+ // canonicalize. In wasm code, we can skip this, as wasm only has to
+ // canonicalize NaNs at FFI boundaries.
+ if (!gen->compilingWasm())
+ masm.canonicalizeFloat(output);
+}
+
+void
+CodeGeneratorX86Shared::visitSimdInsertElementI(LSimdInsertElementI* ins)
+{
+ FloatRegister vector = ToFloatRegister(ins->vector());
+ Register value = ToRegister(ins->value());
+ FloatRegister output = ToFloatRegister(ins->output());
+ MOZ_ASSERT(vector == output); // defineReuseInput(0)
+
+ unsigned lane = ins->lane();
+ unsigned length = ins->length();
+
+ if (length == 8) {
+ // Available in SSE 2.
+ masm.vpinsrw(lane, value, vector, output);
+ return;
+ }
+
+ // Note that, contrarily to float32x4, we cannot use vmovd if the inserted
+ // value goes into the first component, as vmovd clears out the higher lanes
+ // of the output.
+ if (AssemblerX86Shared::HasSSE41()) {
+ // TODO: Teach Lowering that we don't need defineReuseInput if we have AVX.
+ switch (length) {
+ case 4:
+ masm.vpinsrd(lane, value, vector, output);
+ return;
+ case 16:
+ masm.vpinsrb(lane, value, vector, output);
+ return;
+ }
+ }
+
+ masm.reserveStack(Simd128DataSize);
+ masm.storeAlignedSimd128Int(vector, Address(StackPointer, 0));
+ switch (length) {
+ case 4:
+ masm.store32(value, Address(StackPointer, lane * sizeof(int32_t)));
+ break;
+ case 16:
+ // Note that this requires `value` to be in one the registers where the
+ // low 8 bits are addressible (%eax - %edx on x86, all of them on x86-64).
+ masm.store8(value, Address(StackPointer, lane * sizeof(int8_t)));
+ break;
+ default:
+ MOZ_CRASH("Unsupported SIMD length");
+ }
+ masm.loadAlignedSimd128Int(Address(StackPointer, 0), output);
+ masm.freeStack(Simd128DataSize);
+}
+
+void
+CodeGeneratorX86Shared::visitSimdInsertElementF(LSimdInsertElementF* ins)
+{
+ FloatRegister vector = ToFloatRegister(ins->vector());
+ FloatRegister value = ToFloatRegister(ins->value());
+ FloatRegister output = ToFloatRegister(ins->output());
+ MOZ_ASSERT(vector == output); // defineReuseInput(0)
+
+ if (ins->lane() == 0) {
+ // As both operands are registers, vmovss doesn't modify the upper bits
+ // of the destination operand.
+ if (value != output)
+ masm.vmovss(value, vector, output);
+ return;
+ }
+
+ if (AssemblerX86Shared::HasSSE41()) {
+ // The input value is in the low float32 of the 'value' FloatRegister.
+ masm.vinsertps(masm.vinsertpsMask(0, ins->lane()), value, output, output);
+ return;
+ }
+
+ unsigned component = unsigned(ins->lane());
+ masm.reserveStack(Simd128DataSize);
+ masm.storeAlignedSimd128Float(vector, Address(StackPointer, 0));
+ masm.storeFloat32(value, Address(StackPointer, component * sizeof(int32_t)));
+ masm.loadAlignedSimd128Float(Address(StackPointer, 0), output);
+ masm.freeStack(Simd128DataSize);
+}
+
+void
+CodeGeneratorX86Shared::visitSimdAllTrue(LSimdAllTrue* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.vmovmskps(input, output);
+ masm.cmp32(output, Imm32(0xf));
+ masm.emitSet(Assembler::Zero, output);
+}
+
+void
+CodeGeneratorX86Shared::visitSimdAnyTrue(LSimdAnyTrue* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.vmovmskps(input, output);
+ masm.cmp32(output, Imm32(0x0));
+ masm.emitSet(Assembler::NonZero, output);
+}
+
+template <class T, class Reg> void
+CodeGeneratorX86Shared::visitSimdGeneralShuffle(LSimdGeneralShuffleBase* ins, Reg tempRegister)
+{
+ MSimdGeneralShuffle* mir = ins->mir();
+ unsigned numVectors = mir->numVectors();
+
+ Register laneTemp = ToRegister(ins->temp());
+
+ // This won't generate fast code, but it's fine because we expect users
+ // to have used constant indices (and thus MSimdGeneralShuffle to be fold
+ // into MSimdSwizzle/MSimdShuffle, which are fast).
+
+ // We need stack space for the numVectors inputs and for the output vector.
+ unsigned stackSpace = Simd128DataSize * (numVectors + 1);
+ masm.reserveStack(stackSpace);
+
+ for (unsigned i = 0; i < numVectors; i++) {
+ masm.storeAlignedVector<T>(ToFloatRegister(ins->vector(i)),
+ Address(StackPointer, Simd128DataSize * (1 + i)));
+ }
+
+ Label bail;
+ const Scale laneScale = ScaleFromElemWidth(sizeof(T));
+
+ for (size_t i = 0; i < mir->numLanes(); i++) {
+ Operand lane = ToOperand(ins->lane(i));
+
+ masm.cmp32(lane, Imm32(numVectors * mir->numLanes() - 1));
+ masm.j(Assembler::Above, &bail);
+
+ if (lane.kind() == Operand::REG) {
+ masm.loadScalar<T>(Operand(StackPointer, ToRegister(ins->lane(i)), laneScale, Simd128DataSize),
+ tempRegister);
+ } else {
+ masm.load32(lane, laneTemp);
+ masm.loadScalar<T>(Operand(StackPointer, laneTemp, laneScale, Simd128DataSize), tempRegister);
+ }
+
+ masm.storeScalar<T>(tempRegister, Address(StackPointer, i * sizeof(T)));
+ }
+
+ FloatRegister output = ToFloatRegister(ins->output());
+ masm.loadAlignedVector<T>(Address(StackPointer, 0), output);
+
+ Label join;
+ masm.jump(&join);
+
+ {
+ masm.bind(&bail);
+ masm.freeStack(stackSpace);
+ bailout(ins->snapshot());
+ }
+
+ masm.bind(&join);
+ masm.setFramePushed(masm.framePushed() + stackSpace);
+ masm.freeStack(stackSpace);
+}
+
+void
+CodeGeneratorX86Shared::visitSimdGeneralShuffleI(LSimdGeneralShuffleI* ins)
+{
+ switch (ins->mir()->type()) {
+ case MIRType::Int8x16:
+ return visitSimdGeneralShuffle<int8_t, Register>(ins, ToRegister(ins->temp()));
+ case MIRType::Int16x8:
+ return visitSimdGeneralShuffle<int16_t, Register>(ins, ToRegister(ins->temp()));
+ case MIRType::Int32x4:
+ return visitSimdGeneralShuffle<int32_t, Register>(ins, ToRegister(ins->temp()));
+ default:
+ MOZ_CRASH("unsupported type for general shuffle");
+ }
+}
+void
+CodeGeneratorX86Shared::visitSimdGeneralShuffleF(LSimdGeneralShuffleF* ins)
+{
+ ScratchFloat32Scope scratch(masm);
+ visitSimdGeneralShuffle<float, FloatRegister>(ins, scratch);
+}
+
+void
+CodeGeneratorX86Shared::visitSimdSwizzleI(LSimdSwizzleI* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ const unsigned numLanes = ins->numLanes();
+
+ switch (numLanes) {
+ case 4: {
+ uint32_t x = ins->lane(0);
+ uint32_t y = ins->lane(1);
+ uint32_t z = ins->lane(2);
+ uint32_t w = ins->lane(3);
+
+ uint32_t mask = MacroAssembler::ComputeShuffleMask(x, y, z, w);
+ masm.shuffleInt32(mask, input, output);
+ return;
+ }
+ }
+
+ // In the general case, use pshufb if it is available. Convert to a
+ // byte-wise swizzle.
+ const unsigned bytesPerLane = 16 / numLanes;
+ int8_t bLane[16];
+ for (unsigned i = 0; i < numLanes; i++) {
+ for (unsigned b = 0; b < bytesPerLane; b++) {
+ bLane[i * bytesPerLane + b] = ins->lane(i) * bytesPerLane + b;
+ }
+ }
+
+ if (AssemblerX86Shared::HasSSSE3()) {
+ ScratchSimd128Scope scratch(masm);
+ masm.loadConstantSimd128Int(SimdConstant::CreateX16(bLane), scratch);
+ FloatRegister inputCopy = masm.reusedInputInt32x4(input, output);
+ masm.vpshufb(scratch, inputCopy, output);
+ return;
+ }
+
+ // Worst-case fallback for pre-SSSE3 machines. Bounce through memory.
+ Register temp = ToRegister(ins->getTemp(0));
+ masm.reserveStack(2 * Simd128DataSize);
+ masm.storeAlignedSimd128Int(input, Address(StackPointer, Simd128DataSize));
+ for (unsigned i = 0; i < 16; i++) {
+ masm.load8ZeroExtend(Address(StackPointer, Simd128DataSize + bLane[i]), temp);
+ masm.store8(temp, Address(StackPointer, i));
+ }
+ masm.loadAlignedSimd128Int(Address(StackPointer, 0), output);
+ masm.freeStack(2 * Simd128DataSize);
+}
+
+void
+CodeGeneratorX86Shared::visitSimdSwizzleF(LSimdSwizzleF* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ MOZ_ASSERT(ins->numLanes() == 4);
+
+ uint32_t x = ins->lane(0);
+ uint32_t y = ins->lane(1);
+ uint32_t z = ins->lane(2);
+ uint32_t w = ins->lane(3);
+
+ if (AssemblerX86Shared::HasSSE3()) {
+ if (ins->lanesMatch(0, 0, 2, 2)) {
+ masm.vmovsldup(input, output);
+ return;
+ }
+ if (ins->lanesMatch(1, 1, 3, 3)) {
+ masm.vmovshdup(input, output);
+ return;
+ }
+ }
+
+ // TODO Here and below, arch specific lowering could identify this pattern
+ // and use defineReuseInput to avoid this move (bug 1084404)
+ if (ins->lanesMatch(2, 3, 2, 3)) {
+ FloatRegister inputCopy = masm.reusedInputFloat32x4(input, output);
+ masm.vmovhlps(input, inputCopy, output);
+ return;
+ }
+
+ if (ins->lanesMatch(0, 1, 0, 1)) {
+ if (AssemblerX86Shared::HasSSE3() && !AssemblerX86Shared::HasAVX()) {
+ masm.vmovddup(input, output);
+ return;
+ }
+ FloatRegister inputCopy = masm.reusedInputFloat32x4(input, output);
+ masm.vmovlhps(input, inputCopy, output);
+ return;
+ }
+
+ if (ins->lanesMatch(0, 0, 1, 1)) {
+ FloatRegister inputCopy = masm.reusedInputFloat32x4(input, output);
+ masm.vunpcklps(input, inputCopy, output);
+ return;
+ }
+
+ if (ins->lanesMatch(2, 2, 3, 3)) {
+ FloatRegister inputCopy = masm.reusedInputFloat32x4(input, output);
+ masm.vunpckhps(input, inputCopy, output);
+ return;
+ }
+
+ uint32_t mask = MacroAssembler::ComputeShuffleMask(x, y, z, w);
+ masm.shuffleFloat32(mask, input, output);
+}
+
+void
+CodeGeneratorX86Shared::visitSimdShuffle(LSimdShuffle* ins)
+{
+ FloatRegister lhs = ToFloatRegister(ins->lhs());
+ FloatRegister rhs = ToFloatRegister(ins->rhs());
+ FloatRegister output = ToFloatRegister(ins->output());
+ const unsigned numLanes = ins->numLanes();
+ const unsigned bytesPerLane = 16 / numLanes;
+
+ // Convert the shuffle to a byte-wise shuffle.
+ uint8_t bLane[16];
+ for (unsigned i = 0; i < numLanes; i++) {
+ for (unsigned b = 0; b < bytesPerLane; b++) {
+ bLane[i * bytesPerLane + b] = ins->lane(i) * bytesPerLane + b;
+ }
+ }
+
+ // Use pshufb if it is available.
+ if (AssemblerX86Shared::HasSSSE3()) {
+ FloatRegister scratch1 = ToFloatRegister(ins->temp());
+ ScratchSimd128Scope scratch2(masm);
+
+ // Use pshufb instructions to gather the lanes from each source vector.
+ // A negative index creates a zero lane, so the two vectors can be combined.
+
+ // Set scratch2 = lanes from lhs.
+ int8_t idx[16];
+ for (unsigned i = 0; i < 16; i++)
+ idx[i] = bLane[i] < 16 ? bLane[i] : -1;
+ masm.loadConstantSimd128Int(SimdConstant::CreateX16(idx), scratch1);
+ FloatRegister lhsCopy = masm.reusedInputInt32x4(lhs, scratch2);
+ masm.vpshufb(scratch1, lhsCopy, scratch2);
+
+ // Set output = lanes from rhs.
+ for (unsigned i = 0; i < 16; i++)
+ idx[i] = bLane[i] >= 16 ? bLane[i] - 16 : -1;
+ masm.loadConstantSimd128Int(SimdConstant::CreateX16(idx), scratch1);
+ FloatRegister rhsCopy = masm.reusedInputInt32x4(rhs, output);
+ masm.vpshufb(scratch1, rhsCopy, output);
+
+ // Combine.
+ masm.vpor(scratch2, output, output);
+ return;
+ }
+
+ // Worst-case fallback for pre-SSE3 machines. Bounce through memory.
+ Register temp = ToRegister(ins->getTemp(0));
+ masm.reserveStack(3 * Simd128DataSize);
+ masm.storeAlignedSimd128Int(lhs, Address(StackPointer, Simd128DataSize));
+ masm.storeAlignedSimd128Int(rhs, Address(StackPointer, 2 * Simd128DataSize));
+ for (unsigned i = 0; i < 16; i++) {
+ masm.load8ZeroExtend(Address(StackPointer, Simd128DataSize + bLane[i]), temp);
+ masm.store8(temp, Address(StackPointer, i));
+ }
+ masm.loadAlignedSimd128Int(Address(StackPointer, 0), output);
+ masm.freeStack(3 * Simd128DataSize);
+}
+
+void
+CodeGeneratorX86Shared::visitSimdShuffleX4(LSimdShuffleX4* ins)
+{
+ FloatRegister lhs = ToFloatRegister(ins->lhs());
+ Operand rhs = ToOperand(ins->rhs());
+ FloatRegister out = ToFloatRegister(ins->output());
+
+ uint32_t x = ins->lane(0);
+ uint32_t y = ins->lane(1);
+ uint32_t z = ins->lane(2);
+ uint32_t w = ins->lane(3);
+
+ // Check that lanes come from LHS in majority:
+ unsigned numLanesFromLHS = (x < 4) + (y < 4) + (z < 4) + (w < 4);
+ MOZ_ASSERT(numLanesFromLHS >= 2);
+
+ // When reading this method, remember that vshufps takes the two first
+ // inputs of the destination operand (right operand) and the two last
+ // inputs of the source operand (left operand).
+ //
+ // Legend for explanations:
+ // - L: LHS
+ // - R: RHS
+ // - T: temporary
+
+ uint32_t mask;
+
+ // If all lanes came from a single vector, we should have constructed a
+ // MSimdSwizzle instead.
+ MOZ_ASSERT(numLanesFromLHS < 4);
+
+ // If all values stay in their lane, this is a blend.
+ if (AssemblerX86Shared::HasSSE41()) {
+ if (x % 4 == 0 && y % 4 == 1 && z % 4 == 2 && w % 4 == 3) {
+ masm.vblendps(masm.blendpsMask(x >= 4, y >= 4, z >= 4, w >= 4), rhs, lhs, out);
+ return;
+ }
+ }
+
+ // One element of the second, all other elements of the first
+ if (numLanesFromLHS == 3) {
+ unsigned firstMask = -1, secondMask = -1;
+
+ // register-register vmovss preserves the high lanes.
+ if (ins->lanesMatch(4, 1, 2, 3) && rhs.kind() == Operand::FPREG) {
+ masm.vmovss(FloatRegister::FromCode(rhs.fpu()), lhs, out);
+ return;
+ }
+
+ // SSE4.1 vinsertps can handle any single element.
+ unsigned numLanesUnchanged = (x == 0) + (y == 1) + (z == 2) + (w == 3);
+ if (AssemblerX86Shared::HasSSE41() && numLanesUnchanged == 3) {
+ unsigned srcLane;
+ unsigned dstLane;
+ if (x >= 4) {
+ srcLane = x - 4;
+ dstLane = 0;
+ } else if (y >= 4) {
+ srcLane = y - 4;
+ dstLane = 1;
+ } else if (z >= 4) {
+ srcLane = z - 4;
+ dstLane = 2;
+ } else {
+ MOZ_ASSERT(w >= 4);
+ srcLane = w - 4;
+ dstLane = 3;
+ }
+ masm.vinsertps(masm.vinsertpsMask(srcLane, dstLane), rhs, lhs, out);
+ return;
+ }
+
+ FloatRegister rhsCopy = ToFloatRegister(ins->temp());
+
+ if (x < 4 && y < 4) {
+ if (w >= 4) {
+ w %= 4;
+ // T = (Rw Rw Lz Lz) = vshufps(firstMask, lhs, rhs, rhsCopy)
+ firstMask = MacroAssembler::ComputeShuffleMask(w, w, z, z);
+ // (Lx Ly Lz Rw) = (Lx Ly Tz Tx) = vshufps(secondMask, T, lhs, out)
+ secondMask = MacroAssembler::ComputeShuffleMask(x, y, 2, 0);
+ } else {
+ MOZ_ASSERT(z >= 4);
+ z %= 4;
+ // T = (Rz Rz Lw Lw) = vshufps(firstMask, lhs, rhs, rhsCopy)
+ firstMask = MacroAssembler::ComputeShuffleMask(z, z, w, w);
+ // (Lx Ly Rz Lw) = (Lx Ly Tx Tz) = vshufps(secondMask, T, lhs, out)
+ secondMask = MacroAssembler::ComputeShuffleMask(x, y, 0, 2);
+ }
+
+ masm.vshufps(firstMask, lhs, rhsCopy, rhsCopy);
+ masm.vshufps(secondMask, rhsCopy, lhs, out);
+ return;
+ }
+
+ MOZ_ASSERT(z < 4 && w < 4);
+
+ if (y >= 4) {
+ y %= 4;
+ // T = (Ry Ry Lx Lx) = vshufps(firstMask, lhs, rhs, rhsCopy)
+ firstMask = MacroAssembler::ComputeShuffleMask(y, y, x, x);
+ // (Lx Ry Lz Lw) = (Tz Tx Lz Lw) = vshufps(secondMask, lhs, T, out)
+ secondMask = MacroAssembler::ComputeShuffleMask(2, 0, z, w);
+ } else {
+ MOZ_ASSERT(x >= 4);
+ x %= 4;
+ // T = (Rx Rx Ly Ly) = vshufps(firstMask, lhs, rhs, rhsCopy)
+ firstMask = MacroAssembler::ComputeShuffleMask(x, x, y, y);
+ // (Rx Ly Lz Lw) = (Tx Tz Lz Lw) = vshufps(secondMask, lhs, T, out)
+ secondMask = MacroAssembler::ComputeShuffleMask(0, 2, z, w);
+ }
+
+ masm.vshufps(firstMask, lhs, rhsCopy, rhsCopy);
+ if (AssemblerX86Shared::HasAVX()) {
+ masm.vshufps(secondMask, lhs, rhsCopy, out);
+ } else {
+ masm.vshufps(secondMask, lhs, rhsCopy, rhsCopy);
+ masm.moveSimd128Float(rhsCopy, out);
+ }
+ return;
+ }
+
+ // Two elements from one vector, two other elements from the other
+ MOZ_ASSERT(numLanesFromLHS == 2);
+
+ // TODO Here and below, symmetric case would be more handy to avoid a move,
+ // but can't be reached because operands would get swapped (bug 1084404).
+ if (ins->lanesMatch(2, 3, 6, 7)) {
+ ScratchSimd128Scope scratch(masm);
+ if (AssemblerX86Shared::HasAVX()) {
+ FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, scratch);
+ masm.vmovhlps(lhs, rhsCopy, out);
+ } else {
+ masm.loadAlignedSimd128Float(rhs, scratch);
+ masm.vmovhlps(lhs, scratch, scratch);
+ masm.moveSimd128Float(scratch, out);
+ }
+ return;
+ }
+
+ if (ins->lanesMatch(0, 1, 4, 5)) {
+ FloatRegister rhsCopy;
+ ScratchSimd128Scope scratch(masm);
+ if (rhs.kind() == Operand::FPREG) {
+ // No need to make an actual copy, since the operand is already
+ // in a register, and it won't be clobbered by the vmovlhps.
+ rhsCopy = FloatRegister::FromCode(rhs.fpu());
+ } else {
+ masm.loadAlignedSimd128Float(rhs, scratch);
+ rhsCopy = scratch;
+ }
+ masm.vmovlhps(rhsCopy, lhs, out);
+ return;
+ }
+
+ if (ins->lanesMatch(0, 4, 1, 5)) {
+ masm.vunpcklps(rhs, lhs, out);
+ return;
+ }
+
+ // TODO swapped case would be better (bug 1084404)
+ if (ins->lanesMatch(4, 0, 5, 1)) {
+ ScratchSimd128Scope scratch(masm);
+ if (AssemblerX86Shared::HasAVX()) {
+ FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, scratch);
+ masm.vunpcklps(lhs, rhsCopy, out);
+ } else {
+ masm.loadAlignedSimd128Float(rhs, scratch);
+ masm.vunpcklps(lhs, scratch, scratch);
+ masm.moveSimd128Float(scratch, out);
+ }
+ return;
+ }
+
+ if (ins->lanesMatch(2, 6, 3, 7)) {
+ masm.vunpckhps(rhs, lhs, out);
+ return;
+ }
+
+ // TODO swapped case would be better (bug 1084404)
+ if (ins->lanesMatch(6, 2, 7, 3)) {
+ ScratchSimd128Scope scratch(masm);
+ if (AssemblerX86Shared::HasAVX()) {
+ FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, scratch);
+ masm.vunpckhps(lhs, rhsCopy, out);
+ } else {
+ masm.loadAlignedSimd128Float(rhs, scratch);
+ masm.vunpckhps(lhs, scratch, scratch);
+ masm.moveSimd128Float(scratch, out);
+ }
+ return;
+ }
+
+ // In one vshufps
+ if (x < 4 && y < 4) {
+ mask = MacroAssembler::ComputeShuffleMask(x, y, z % 4, w % 4);
+ masm.vshufps(mask, rhs, lhs, out);
+ return;
+ }
+
+ // At creation, we should have explicitly swapped in this case.
+ MOZ_ASSERT(!(z >= 4 && w >= 4));
+
+ // In two vshufps, for the most generic case:
+ uint32_t firstMask[4], secondMask[4];
+ unsigned i = 0, j = 2, k = 0;
+
+#define COMPUTE_MASK(lane) \
+ if (lane >= 4) { \
+ firstMask[j] = lane % 4; \
+ secondMask[k++] = j++; \
+ } else { \
+ firstMask[i] = lane; \
+ secondMask[k++] = i++; \
+ }
+
+ COMPUTE_MASK(x)
+ COMPUTE_MASK(y)
+ COMPUTE_MASK(z)
+ COMPUTE_MASK(w)
+#undef COMPUTE_MASK
+
+ MOZ_ASSERT(i == 2 && j == 4 && k == 4);
+
+ mask = MacroAssembler::ComputeShuffleMask(firstMask[0], firstMask[1],
+ firstMask[2], firstMask[3]);
+ masm.vshufps(mask, rhs, lhs, lhs);
+
+ mask = MacroAssembler::ComputeShuffleMask(secondMask[0], secondMask[1],
+ secondMask[2], secondMask[3]);
+ masm.vshufps(mask, lhs, lhs, lhs);
+}
+
+void
+CodeGeneratorX86Shared::visitSimdBinaryCompIx16(LSimdBinaryCompIx16* ins)
+{
+ static const SimdConstant allOnes = SimdConstant::SplatX16(-1);
+
+ FloatRegister lhs = ToFloatRegister(ins->lhs());
+ Operand rhs = ToOperand(ins->rhs());
+ FloatRegister output = ToFloatRegister(ins->output());
+ MOZ_ASSERT_IF(!Assembler::HasAVX(), output == lhs);
+
+ ScratchSimd128Scope scratch(masm);
+
+ MSimdBinaryComp::Operation op = ins->operation();
+ switch (op) {
+ case MSimdBinaryComp::greaterThan:
+ masm.vpcmpgtb(rhs, lhs, output);
+ return;
+ case MSimdBinaryComp::equal:
+ masm.vpcmpeqb(rhs, lhs, output);
+ return;
+ case MSimdBinaryComp::lessThan:
+ // src := rhs
+ if (rhs.kind() == Operand::FPREG)
+ masm.moveSimd128Int(ToFloatRegister(ins->rhs()), scratch);
+ else
+ masm.loadAlignedSimd128Int(rhs, scratch);
+
+ // src := src > lhs (i.e. lhs < rhs)
+ // Improve by doing custom lowering (rhs is tied to the output register)
+ masm.vpcmpgtb(ToOperand(ins->lhs()), scratch, scratch);
+ masm.moveSimd128Int(scratch, output);
+ return;
+ case MSimdBinaryComp::notEqual:
+ // Ideally for notEqual, greaterThanOrEqual, and lessThanOrEqual, we
+ // should invert the comparison by, e.g. swapping the arms of a select
+ // if that's what it's used in.
+ masm.loadConstantSimd128Int(allOnes, scratch);
+ masm.vpcmpeqb(rhs, lhs, output);
+ masm.bitwiseXorSimd128(Operand(scratch), output);
+ return;
+ case MSimdBinaryComp::greaterThanOrEqual:
+ // src := rhs
+ if (rhs.kind() == Operand::FPREG)
+ masm.moveSimd128Int(ToFloatRegister(ins->rhs()), scratch);
+ else
+ masm.loadAlignedSimd128Int(rhs, scratch);
+ masm.vpcmpgtb(ToOperand(ins->lhs()), scratch, scratch);
+ masm.loadConstantSimd128Int(allOnes, output);
+ masm.bitwiseXorSimd128(Operand(scratch), output);
+ return;
+ case MSimdBinaryComp::lessThanOrEqual:
+ // lhs <= rhs is equivalent to !(rhs < lhs), which we compute here.
+ masm.loadConstantSimd128Int(allOnes, scratch);
+ masm.vpcmpgtb(rhs, lhs, output);
+ masm.bitwiseXorSimd128(Operand(scratch), output);
+ return;
+ }
+ MOZ_CRASH("unexpected SIMD op");
+}
+
+void
+CodeGeneratorX86Shared::visitSimdBinaryCompIx8(LSimdBinaryCompIx8* ins)
+{
+ static const SimdConstant allOnes = SimdConstant::SplatX8(-1);
+
+ FloatRegister lhs = ToFloatRegister(ins->lhs());
+ Operand rhs = ToOperand(ins->rhs());
+ FloatRegister output = ToFloatRegister(ins->output());
+ MOZ_ASSERT_IF(!Assembler::HasAVX(), output == lhs);
+
+ ScratchSimd128Scope scratch(masm);
+
+ MSimdBinaryComp::Operation op = ins->operation();
+ switch (op) {
+ case MSimdBinaryComp::greaterThan:
+ masm.vpcmpgtw(rhs, lhs, output);
+ return;
+ case MSimdBinaryComp::equal:
+ masm.vpcmpeqw(rhs, lhs, output);
+ return;
+ case MSimdBinaryComp::lessThan:
+ // src := rhs
+ if (rhs.kind() == Operand::FPREG)
+ masm.moveSimd128Int(ToFloatRegister(ins->rhs()), scratch);
+ else
+ masm.loadAlignedSimd128Int(rhs, scratch);
+
+ // src := src > lhs (i.e. lhs < rhs)
+ // Improve by doing custom lowering (rhs is tied to the output register)
+ masm.vpcmpgtw(ToOperand(ins->lhs()), scratch, scratch);
+ masm.moveSimd128Int(scratch, output);
+ return;
+ case MSimdBinaryComp::notEqual:
+ // Ideally for notEqual, greaterThanOrEqual, and lessThanOrEqual, we
+ // should invert the comparison by, e.g. swapping the arms of a select
+ // if that's what it's used in.
+ masm.loadConstantSimd128Int(allOnes, scratch);
+ masm.vpcmpeqw(rhs, lhs, output);
+ masm.bitwiseXorSimd128(Operand(scratch), output);
+ return;
+ case MSimdBinaryComp::greaterThanOrEqual:
+ // src := rhs
+ if (rhs.kind() == Operand::FPREG)
+ masm.moveSimd128Int(ToFloatRegister(ins->rhs()), scratch);
+ else
+ masm.loadAlignedSimd128Int(rhs, scratch);
+ masm.vpcmpgtw(ToOperand(ins->lhs()), scratch, scratch);
+ masm.loadConstantSimd128Int(allOnes, output);
+ masm.bitwiseXorSimd128(Operand(scratch), output);
+ return;
+ case MSimdBinaryComp::lessThanOrEqual:
+ // lhs <= rhs is equivalent to !(rhs < lhs), which we compute here.
+ masm.loadConstantSimd128Int(allOnes, scratch);
+ masm.vpcmpgtw(rhs, lhs, output);
+ masm.bitwiseXorSimd128(Operand(scratch), output);
+ return;
+ }
+ MOZ_CRASH("unexpected SIMD op");
+}
+
+void
+CodeGeneratorX86Shared::visitSimdBinaryCompIx4(LSimdBinaryCompIx4* ins)
+{
+ static const SimdConstant allOnes = SimdConstant::SplatX4(-1);
+
+ FloatRegister lhs = ToFloatRegister(ins->lhs());
+ Operand rhs = ToOperand(ins->rhs());
+ MOZ_ASSERT(ToFloatRegister(ins->output()) == lhs);
+
+ ScratchSimd128Scope scratch(masm);
+
+ MSimdBinaryComp::Operation op = ins->operation();
+ switch (op) {
+ case MSimdBinaryComp::greaterThan:
+ masm.packedGreaterThanInt32x4(rhs, lhs);
+ return;
+ case MSimdBinaryComp::equal:
+ masm.packedEqualInt32x4(rhs, lhs);
+ return;
+ case MSimdBinaryComp::lessThan:
+ // src := rhs
+ if (rhs.kind() == Operand::FPREG)
+ masm.moveSimd128Int(ToFloatRegister(ins->rhs()), scratch);
+ else
+ masm.loadAlignedSimd128Int(rhs, scratch);
+
+ // src := src > lhs (i.e. lhs < rhs)
+ // Improve by doing custom lowering (rhs is tied to the output register)
+ masm.packedGreaterThanInt32x4(ToOperand(ins->lhs()), scratch);
+ masm.moveSimd128Int(scratch, lhs);
+ return;
+ case MSimdBinaryComp::notEqual:
+ // Ideally for notEqual, greaterThanOrEqual, and lessThanOrEqual, we
+ // should invert the comparison by, e.g. swapping the arms of a select
+ // if that's what it's used in.
+ masm.loadConstantSimd128Int(allOnes, scratch);
+ masm.packedEqualInt32x4(rhs, lhs);
+ masm.bitwiseXorSimd128(Operand(scratch), lhs);
+ return;
+ case MSimdBinaryComp::greaterThanOrEqual:
+ // src := rhs
+ if (rhs.kind() == Operand::FPREG)
+ masm.moveSimd128Int(ToFloatRegister(ins->rhs()), scratch);
+ else
+ masm.loadAlignedSimd128Int(rhs, scratch);
+ masm.packedGreaterThanInt32x4(ToOperand(ins->lhs()), scratch);
+ masm.loadConstantSimd128Int(allOnes, lhs);
+ masm.bitwiseXorSimd128(Operand(scratch), lhs);
+ return;
+ case MSimdBinaryComp::lessThanOrEqual:
+ // lhs <= rhs is equivalent to !(rhs < lhs), which we compute here.
+ masm.loadConstantSimd128Int(allOnes, scratch);
+ masm.packedGreaterThanInt32x4(rhs, lhs);
+ masm.bitwiseXorSimd128(Operand(scratch), lhs);
+ return;
+ }
+ MOZ_CRASH("unexpected SIMD op");
+}
+
+void
+CodeGeneratorX86Shared::visitSimdBinaryCompFx4(LSimdBinaryCompFx4* ins)
+{
+ FloatRegister lhs = ToFloatRegister(ins->lhs());
+ Operand rhs = ToOperand(ins->rhs());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ MSimdBinaryComp::Operation op = ins->operation();
+ switch (op) {
+ case MSimdBinaryComp::equal:
+ masm.vcmpeqps(rhs, lhs, output);
+ return;
+ case MSimdBinaryComp::lessThan:
+ masm.vcmpltps(rhs, lhs, output);
+ return;
+ case MSimdBinaryComp::lessThanOrEqual:
+ masm.vcmpleps(rhs, lhs, output);
+ return;
+ case MSimdBinaryComp::notEqual:
+ masm.vcmpneqps(rhs, lhs, output);
+ return;
+ case MSimdBinaryComp::greaterThanOrEqual:
+ case MSimdBinaryComp::greaterThan:
+ // We reverse these before register allocation so that we don't have to
+ // copy into and out of temporaries after codegen.
+ MOZ_CRASH("lowering should have reversed this");
+ }
+ MOZ_CRASH("unexpected SIMD op");
+}
+
+void
+CodeGeneratorX86Shared::visitSimdBinaryArithIx16(LSimdBinaryArithIx16* ins)
+{
+ FloatRegister lhs = ToFloatRegister(ins->lhs());
+ Operand rhs = ToOperand(ins->rhs());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ MSimdBinaryArith::Operation op = ins->operation();
+ switch (op) {
+ case MSimdBinaryArith::Op_add:
+ masm.vpaddb(rhs, lhs, output);
+ return;
+ case MSimdBinaryArith::Op_sub:
+ masm.vpsubb(rhs, lhs, output);
+ return;
+ case MSimdBinaryArith::Op_mul:
+ // 8x16 mul is a valid operation, but not supported in SSE or AVX.
+ // The operation is synthesized from 16x8 multiplies by
+ // MSimdBinaryArith::AddLegalized().
+ break;
+ case MSimdBinaryArith::Op_div:
+ case MSimdBinaryArith::Op_max:
+ case MSimdBinaryArith::Op_min:
+ case MSimdBinaryArith::Op_minNum:
+ case MSimdBinaryArith::Op_maxNum:
+ break;
+ }
+ MOZ_CRASH("unexpected SIMD op");
+}
+
+void
+CodeGeneratorX86Shared::visitSimdBinaryArithIx8(LSimdBinaryArithIx8* ins)
+{
+ FloatRegister lhs = ToFloatRegister(ins->lhs());
+ Operand rhs = ToOperand(ins->rhs());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ MSimdBinaryArith::Operation op = ins->operation();
+ switch (op) {
+ case MSimdBinaryArith::Op_add:
+ masm.vpaddw(rhs, lhs, output);
+ return;
+ case MSimdBinaryArith::Op_sub:
+ masm.vpsubw(rhs, lhs, output);
+ return;
+ case MSimdBinaryArith::Op_mul:
+ masm.vpmullw(rhs, lhs, output);
+ return;
+ case MSimdBinaryArith::Op_div:
+ case MSimdBinaryArith::Op_max:
+ case MSimdBinaryArith::Op_min:
+ case MSimdBinaryArith::Op_minNum:
+ case MSimdBinaryArith::Op_maxNum:
+ break;
+ }
+ MOZ_CRASH("unexpected SIMD op");
+}
+
+void
+CodeGeneratorX86Shared::visitSimdBinaryArithIx4(LSimdBinaryArithIx4* ins)
+{
+ FloatRegister lhs = ToFloatRegister(ins->lhs());
+ Operand rhs = ToOperand(ins->rhs());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ ScratchSimd128Scope scratch(masm);
+
+ MSimdBinaryArith::Operation op = ins->operation();
+ switch (op) {
+ case MSimdBinaryArith::Op_add:
+ masm.vpaddd(rhs, lhs, output);
+ return;
+ case MSimdBinaryArith::Op_sub:
+ masm.vpsubd(rhs, lhs, output);
+ return;
+ case MSimdBinaryArith::Op_mul: {
+ if (AssemblerX86Shared::HasSSE41()) {
+ masm.vpmulld(rhs, lhs, output);
+ return;
+ }
+
+ masm.loadAlignedSimd128Int(rhs, scratch);
+ masm.vpmuludq(lhs, scratch, scratch);
+ // scratch contains (Rx, _, Rz, _) where R is the resulting vector.
+
+ FloatRegister temp = ToFloatRegister(ins->temp());
+ masm.vpshufd(MacroAssembler::ComputeShuffleMask(1, 1, 3, 3), lhs, lhs);
+ masm.vpshufd(MacroAssembler::ComputeShuffleMask(1, 1, 3, 3), rhs, temp);
+ masm.vpmuludq(temp, lhs, lhs);
+ // lhs contains (Ry, _, Rw, _) where R is the resulting vector.
+
+ masm.vshufps(MacroAssembler::ComputeShuffleMask(0, 2, 0, 2), scratch, lhs, lhs);
+ // lhs contains (Ry, Rw, Rx, Rz)
+ masm.vshufps(MacroAssembler::ComputeShuffleMask(2, 0, 3, 1), lhs, lhs, lhs);
+ return;
+ }
+ case MSimdBinaryArith::Op_div:
+ // x86 doesn't have SIMD i32 div.
+ break;
+ case MSimdBinaryArith::Op_max:
+ // we can do max with a single instruction only if we have SSE4.1
+ // using the PMAXSD instruction.
+ break;
+ case MSimdBinaryArith::Op_min:
+ // we can do max with a single instruction only if we have SSE4.1
+ // using the PMINSD instruction.
+ break;
+ case MSimdBinaryArith::Op_minNum:
+ case MSimdBinaryArith::Op_maxNum:
+ break;
+ }
+ MOZ_CRASH("unexpected SIMD op");
+}
+
+void
+CodeGeneratorX86Shared::visitSimdBinaryArithFx4(LSimdBinaryArithFx4* ins)
+{
+ FloatRegister lhs = ToFloatRegister(ins->lhs());
+ Operand rhs = ToOperand(ins->rhs());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ ScratchSimd128Scope scratch(masm);
+
+ MSimdBinaryArith::Operation op = ins->operation();
+ switch (op) {
+ case MSimdBinaryArith::Op_add:
+ masm.vaddps(rhs, lhs, output);
+ return;
+ case MSimdBinaryArith::Op_sub:
+ masm.vsubps(rhs, lhs, output);
+ return;
+ case MSimdBinaryArith::Op_mul:
+ masm.vmulps(rhs, lhs, output);
+ return;
+ case MSimdBinaryArith::Op_div:
+ masm.vdivps(rhs, lhs, output);
+ return;
+ case MSimdBinaryArith::Op_max: {
+ FloatRegister lhsCopy = masm.reusedInputFloat32x4(lhs, scratch);
+ masm.vcmpunordps(rhs, lhsCopy, scratch);
+
+ FloatRegister tmp = ToFloatRegister(ins->temp());
+ FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, tmp);
+ masm.vmaxps(Operand(lhs), rhsCopy, tmp);
+ masm.vmaxps(rhs, lhs, output);
+
+ masm.vandps(tmp, output, output);
+ masm.vorps(scratch, output, output); // or in the all-ones NaNs
+ return;
+ }
+ case MSimdBinaryArith::Op_min: {
+ FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, scratch);
+ masm.vminps(Operand(lhs), rhsCopy, scratch);
+ masm.vminps(rhs, lhs, output);
+ masm.vorps(scratch, output, output); // NaN or'd with arbitrary bits is NaN
+ return;
+ }
+ case MSimdBinaryArith::Op_minNum: {
+ FloatRegister tmp = ToFloatRegister(ins->temp());
+ masm.loadConstantSimd128Int(SimdConstant::SplatX4(int32_t(0x80000000)), tmp);
+
+ FloatRegister mask = scratch;
+ FloatRegister tmpCopy = masm.reusedInputFloat32x4(tmp, scratch);
+ masm.vpcmpeqd(Operand(lhs), tmpCopy, mask);
+ masm.vandps(tmp, mask, mask);
+
+ FloatRegister lhsCopy = masm.reusedInputFloat32x4(lhs, tmp);
+ masm.vminps(rhs, lhsCopy, tmp);
+ masm.vorps(mask, tmp, tmp);
+
+ FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, mask);
+ masm.vcmpneqps(rhs, rhsCopy, mask);
+
+ if (AssemblerX86Shared::HasAVX()) {
+ masm.vblendvps(mask, lhs, tmp, output);
+ } else {
+ // Emulate vblendvps.
+ // With SSE.4.1 we could use blendvps, however it's awkward since
+ // it requires the mask to be in xmm0.
+ if (lhs != output)
+ masm.moveSimd128Float(lhs, output);
+ masm.vandps(Operand(mask), output, output);
+ masm.vandnps(Operand(tmp), mask, mask);
+ masm.vorps(Operand(mask), output, output);
+ }
+ return;
+ }
+ case MSimdBinaryArith::Op_maxNum: {
+ FloatRegister mask = scratch;
+ masm.loadConstantSimd128Int(SimdConstant::SplatX4(0), mask);
+ masm.vpcmpeqd(Operand(lhs), mask, mask);
+
+ FloatRegister tmp = ToFloatRegister(ins->temp());
+ masm.loadConstantSimd128Int(SimdConstant::SplatX4(int32_t(0x80000000)), tmp);
+ masm.vandps(tmp, mask, mask);
+
+ FloatRegister lhsCopy = masm.reusedInputFloat32x4(lhs, tmp);
+ masm.vmaxps(rhs, lhsCopy, tmp);
+ masm.vandnps(Operand(tmp), mask, mask);
+
+ // Ensure tmp always contains the temporary result
+ mask = tmp;
+ tmp = scratch;
+
+ FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, mask);
+ masm.vcmpneqps(rhs, rhsCopy, mask);
+
+ if (AssemblerX86Shared::HasAVX()) {
+ masm.vblendvps(mask, lhs, tmp, output);
+ } else {
+ // Emulate vblendvps.
+ // With SSE.4.1 we could use blendvps, however it's awkward since
+ // it requires the mask to be in xmm0.
+ if (lhs != output)
+ masm.moveSimd128Float(lhs, output);
+ masm.vandps(Operand(mask), output, output);
+ masm.vandnps(Operand(tmp), mask, mask);
+ masm.vorps(Operand(mask), output, output);
+ }
+ return;
+ }
+ }
+ MOZ_CRASH("unexpected SIMD op");
+}
+
+void
+CodeGeneratorX86Shared::visitSimdBinarySaturating(LSimdBinarySaturating* ins)
+{
+ FloatRegister lhs = ToFloatRegister(ins->lhs());
+ Operand rhs = ToOperand(ins->rhs());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ SimdSign sign = ins->signedness();
+ MOZ_ASSERT(sign != SimdSign::NotApplicable);
+
+ switch (ins->type()) {
+ case MIRType::Int8x16:
+ switch (ins->operation()) {
+ case MSimdBinarySaturating::add:
+ if (sign == SimdSign::Signed)
+ masm.vpaddsb(rhs, lhs, output);
+ else
+ masm.vpaddusb(rhs, lhs, output);
+ return;
+ case MSimdBinarySaturating::sub:
+ if (sign == SimdSign::Signed)
+ masm.vpsubsb(rhs, lhs, output);
+ else
+ masm.vpsubusb(rhs, lhs, output);
+ return;
+ }
+ break;
+
+ case MIRType::Int16x8:
+ switch (ins->operation()) {
+ case MSimdBinarySaturating::add:
+ if (sign == SimdSign::Signed)
+ masm.vpaddsw(rhs, lhs, output);
+ else
+ masm.vpaddusw(rhs, lhs, output);
+ return;
+ case MSimdBinarySaturating::sub:
+ if (sign == SimdSign::Signed)
+ masm.vpsubsw(rhs, lhs, output);
+ else
+ masm.vpsubusw(rhs, lhs, output);
+ return;
+ }
+ break;
+
+ default:
+ break;
+ }
+ MOZ_CRASH("unsupported type for SIMD saturating arithmetic");
+}
+
+void
+CodeGeneratorX86Shared::visitSimdUnaryArithIx16(LSimdUnaryArithIx16* ins)
+{
+ Operand in = ToOperand(ins->input());
+ FloatRegister out = ToFloatRegister(ins->output());
+
+ static const SimdConstant allOnes = SimdConstant::SplatX16(-1);
+
+ switch (ins->operation()) {
+ case MSimdUnaryArith::neg:
+ masm.zeroSimd128Int(out);
+ masm.packedSubInt8(in, out);
+ return;
+ case MSimdUnaryArith::not_:
+ masm.loadConstantSimd128Int(allOnes, out);
+ masm.bitwiseXorSimd128(in, out);
+ return;
+ case MSimdUnaryArith::abs:
+ case MSimdUnaryArith::reciprocalApproximation:
+ case MSimdUnaryArith::reciprocalSqrtApproximation:
+ case MSimdUnaryArith::sqrt:
+ break;
+ }
+ MOZ_CRASH("unexpected SIMD op");
+}
+
+void
+CodeGeneratorX86Shared::visitSimdUnaryArithIx8(LSimdUnaryArithIx8* ins)
+{
+ Operand in = ToOperand(ins->input());
+ FloatRegister out = ToFloatRegister(ins->output());
+
+ static const SimdConstant allOnes = SimdConstant::SplatX8(-1);
+
+ switch (ins->operation()) {
+ case MSimdUnaryArith::neg:
+ masm.zeroSimd128Int(out);
+ masm.packedSubInt16(in, out);
+ return;
+ case MSimdUnaryArith::not_:
+ masm.loadConstantSimd128Int(allOnes, out);
+ masm.bitwiseXorSimd128(in, out);
+ return;
+ case MSimdUnaryArith::abs:
+ case MSimdUnaryArith::reciprocalApproximation:
+ case MSimdUnaryArith::reciprocalSqrtApproximation:
+ case MSimdUnaryArith::sqrt:
+ break;
+ }
+ MOZ_CRASH("unexpected SIMD op");
+}
+
+void
+CodeGeneratorX86Shared::visitSimdUnaryArithIx4(LSimdUnaryArithIx4* ins)
+{
+ Operand in = ToOperand(ins->input());
+ FloatRegister out = ToFloatRegister(ins->output());
+
+ static const SimdConstant allOnes = SimdConstant::SplatX4(-1);
+
+ switch (ins->operation()) {
+ case MSimdUnaryArith::neg:
+ masm.zeroSimd128Int(out);
+ masm.packedSubInt32(in, out);
+ return;
+ case MSimdUnaryArith::not_:
+ masm.loadConstantSimd128Int(allOnes, out);
+ masm.bitwiseXorSimd128(in, out);
+ return;
+ case MSimdUnaryArith::abs:
+ case MSimdUnaryArith::reciprocalApproximation:
+ case MSimdUnaryArith::reciprocalSqrtApproximation:
+ case MSimdUnaryArith::sqrt:
+ break;
+ }
+ MOZ_CRASH("unexpected SIMD op");
+}
+
+void
+CodeGeneratorX86Shared::visitSimdUnaryArithFx4(LSimdUnaryArithFx4* ins)
+{
+ Operand in = ToOperand(ins->input());
+ FloatRegister out = ToFloatRegister(ins->output());
+
+ // All ones but the sign bit
+ float signMask = SpecificNaN<float>(0, FloatingPoint<float>::kSignificandBits);
+ static const SimdConstant signMasks = SimdConstant::SplatX4(signMask);
+
+ // All ones including the sign bit
+ float ones = SpecificNaN<float>(1, FloatingPoint<float>::kSignificandBits);
+ static const SimdConstant allOnes = SimdConstant::SplatX4(ones);
+
+ // All zeros but the sign bit
+ static const SimdConstant minusZero = SimdConstant::SplatX4(-0.f);
+
+ switch (ins->operation()) {
+ case MSimdUnaryArith::abs:
+ masm.loadConstantSimd128Float(signMasks, out);
+ masm.bitwiseAndSimd128(in, out);
+ return;
+ case MSimdUnaryArith::neg:
+ masm.loadConstantSimd128Float(minusZero, out);
+ masm.bitwiseXorSimd128(in, out);
+ return;
+ case MSimdUnaryArith::not_:
+ masm.loadConstantSimd128Float(allOnes, out);
+ masm.bitwiseXorSimd128(in, out);
+ return;
+ case MSimdUnaryArith::reciprocalApproximation:
+ masm.packedRcpApproximationFloat32x4(in, out);
+ return;
+ case MSimdUnaryArith::reciprocalSqrtApproximation:
+ masm.packedRcpSqrtApproximationFloat32x4(in, out);
+ return;
+ case MSimdUnaryArith::sqrt:
+ masm.packedSqrtFloat32x4(in, out);
+ return;
+ }
+ MOZ_CRASH("unexpected SIMD op");
+}
+
+void
+CodeGeneratorX86Shared::visitSimdBinaryBitwise(LSimdBinaryBitwise* ins)
+{
+ FloatRegister lhs = ToFloatRegister(ins->lhs());
+ Operand rhs = ToOperand(ins->rhs());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ MSimdBinaryBitwise::Operation op = ins->operation();
+ switch (op) {
+ case MSimdBinaryBitwise::and_:
+ if (ins->type() == MIRType::Float32x4)
+ masm.vandps(rhs, lhs, output);
+ else
+ masm.vpand(rhs, lhs, output);
+ return;
+ case MSimdBinaryBitwise::or_:
+ if (ins->type() == MIRType::Float32x4)
+ masm.vorps(rhs, lhs, output);
+ else
+ masm.vpor(rhs, lhs, output);
+ return;
+ case MSimdBinaryBitwise::xor_:
+ if (ins->type() == MIRType::Float32x4)
+ masm.vxorps(rhs, lhs, output);
+ else
+ masm.vpxor(rhs, lhs, output);
+ return;
+ }
+ MOZ_CRASH("unexpected SIMD bitwise op");
+}
+
+void
+CodeGeneratorX86Shared::visitSimdShift(LSimdShift* ins)
+{
+ FloatRegister out = ToFloatRegister(ins->output());
+ MOZ_ASSERT(ToFloatRegister(ins->vector()) == out); // defineReuseInput(0);
+
+ // The shift amount is masked to the number of bits in a lane.
+ uint32_t shiftmask = (128u / SimdTypeToLength(ins->type())) - 1;
+
+ // Note that SSE doesn't have instructions for shifting 8x16 vectors.
+ // These shifts are synthesized by the MSimdShift::AddLegalized() function.
+ const LAllocation* val = ins->value();
+ if (val->isConstant()) {
+ MOZ_ASSERT(ins->temp()->isBogusTemp());
+ Imm32 count(uint32_t(ToInt32(val)) & shiftmask);
+ switch (ins->type()) {
+ case MIRType::Int16x8:
+ switch (ins->operation()) {
+ case MSimdShift::lsh:
+ masm.packedLeftShiftByScalarInt16x8(count, out);
+ return;
+ case MSimdShift::rsh:
+ masm.packedRightShiftByScalarInt16x8(count, out);
+ return;
+ case MSimdShift::ursh:
+ masm.packedUnsignedRightShiftByScalarInt16x8(count, out);
+ return;
+ }
+ break;
+ case MIRType::Int32x4:
+ switch (ins->operation()) {
+ case MSimdShift::lsh:
+ masm.packedLeftShiftByScalarInt32x4(count, out);
+ return;
+ case MSimdShift::rsh:
+ masm.packedRightShiftByScalarInt32x4(count, out);
+ return;
+ case MSimdShift::ursh:
+ masm.packedUnsignedRightShiftByScalarInt32x4(count, out);
+ return;
+ }
+ break;
+ default:
+ MOZ_CRASH("unsupported type for SIMD shifts");
+ }
+ MOZ_CRASH("unexpected SIMD bitwise op");
+ }
+
+ // Truncate val to 5 bits. We should have a temp register for that.
+ MOZ_ASSERT(val->isRegister());
+ Register count = ToRegister(ins->temp());
+ masm.mov(ToRegister(val), count);
+ masm.andl(Imm32(shiftmask), count);
+ ScratchFloat32Scope scratch(masm);
+ masm.vmovd(count, scratch);
+
+ switch (ins->type()) {
+ case MIRType::Int16x8:
+ switch (ins->operation()) {
+ case MSimdShift::lsh:
+ masm.packedLeftShiftByScalarInt16x8(scratch, out);
+ return;
+ case MSimdShift::rsh:
+ masm.packedRightShiftByScalarInt16x8(scratch, out);
+ return;
+ case MSimdShift::ursh:
+ masm.packedUnsignedRightShiftByScalarInt16x8(scratch, out);
+ return;
+ }
+ break;
+ case MIRType::Int32x4:
+ switch (ins->operation()) {
+ case MSimdShift::lsh:
+ masm.packedLeftShiftByScalarInt32x4(scratch, out);
+ return;
+ case MSimdShift::rsh:
+ masm.packedRightShiftByScalarInt32x4(scratch, out);
+ return;
+ case MSimdShift::ursh:
+ masm.packedUnsignedRightShiftByScalarInt32x4(scratch, out);
+ return;
+ }
+ break;
+ default:
+ MOZ_CRASH("unsupported type for SIMD shifts");
+ }
+ MOZ_CRASH("unexpected SIMD bitwise op");
+}
+
+void
+CodeGeneratorX86Shared::visitSimdSelect(LSimdSelect* ins)
+{
+ FloatRegister mask = ToFloatRegister(ins->mask());
+ FloatRegister onTrue = ToFloatRegister(ins->lhs());
+ FloatRegister onFalse = ToFloatRegister(ins->rhs());
+ FloatRegister output = ToFloatRegister(ins->output());
+ FloatRegister temp = ToFloatRegister(ins->temp());
+
+ if (onTrue != output)
+ masm.vmovaps(onTrue, output);
+ if (mask != temp)
+ masm.vmovaps(mask, temp);
+
+ MSimdSelect* mir = ins->mir();
+ unsigned lanes = SimdTypeToLength(mir->type());
+
+ if (AssemblerX86Shared::HasAVX() && lanes == 4) {
+ // TBD: Use vpblendvb for lanes > 4, HasAVX.
+ masm.vblendvps(mask, onTrue, onFalse, output);
+ return;
+ }
+
+ // SSE4.1 has plain blendvps which can do this, but it is awkward
+ // to use because it requires the mask to be in xmm0.
+
+ masm.bitwiseAndSimd128(Operand(temp), output);
+ masm.bitwiseAndNotSimd128(Operand(onFalse), temp);
+ masm.bitwiseOrSimd128(Operand(temp), output);
+}
+
+void
+CodeGeneratorX86Shared::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir)
+{
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ if (lir->index()->isConstant()) {
+ Address dest(elements, ToInt32(lir->index()) * width);
+ masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+ masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir)
+{
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ Register value = ToRegister(lir->value());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ if (lir->index()->isConstant()) {
+ Address dest(elements, ToInt32(lir->index()) * width);
+ masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp, output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+ masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp, output);
+ }
+}
+
+template<typename S, typename T>
+void
+CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
+ const T& mem, Register temp1, Register temp2, AnyRegister output)
+{
+ switch (arrayType) {
+ case Scalar::Int8:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd8SignExtend(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub8SignExtend(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd8SignExtend(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr8SignExtend(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor8SignExtend(value, mem, temp1, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Uint8:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd8ZeroExtend(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub8ZeroExtend(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd8ZeroExtend(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr8ZeroExtend(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor8ZeroExtend(value, mem, temp1, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Int16:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd16SignExtend(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub16SignExtend(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd16SignExtend(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr16SignExtend(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor16SignExtend(value, mem, temp1, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Uint16:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd16ZeroExtend(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub16ZeroExtend(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd16ZeroExtend(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr16ZeroExtend(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor16ZeroExtend(value, mem, temp1, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Int32:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd32(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub32(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd32(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr32(value, mem, temp1, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor32(value, mem, temp1, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Uint32:
+ // At the moment, the code in MCallOptimize.cpp requires the output
+ // type to be double for uint32 arrays. See bug 1077305.
+ MOZ_ASSERT(output.isFloat());
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd32(value, mem, InvalidReg, temp1);
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub32(value, mem, InvalidReg, temp1);
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd32(value, mem, temp2, temp1);
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr32(value, mem, temp2, temp1);
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor32(value, mem, temp2, temp1);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ masm.convertUInt32ToDouble(temp1, output.fpu());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Imm32& value, const Address& mem,
+ Register temp1, Register temp2, AnyRegister output);
+template void
+CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Imm32& value, const BaseIndex& mem,
+ Register temp1, Register temp2, AnyRegister output);
+template void
+CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Register& value, const Address& mem,
+ Register temp1, Register temp2, AnyRegister output);
+template void
+CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Register& value, const BaseIndex& mem,
+ Register temp1, Register temp2, AnyRegister output);
+
+// Binary operation for effect, result discarded.
+template<typename S, typename T>
+void
+CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
+ const T& mem)
+{
+ switch (arrayType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicAdd8(value, mem);
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicSub8(value, mem);
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicAnd8(value, mem);
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicOr8(value, mem);
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicXor8(value, mem);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicAdd16(value, mem);
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicSub16(value, mem);
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicAnd16(value, mem);
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicOr16(value, mem);
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicXor16(value, mem);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicAdd32(value, mem);
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicSub32(value, mem);
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicAnd32(value, mem);
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicOr32(value, mem);
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicXor32(value, mem);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Imm32& value, const Address& mem);
+template void
+CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Imm32& value, const BaseIndex& mem);
+template void
+CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Register& value, const Address& mem);
+template void
+CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Register& value, const BaseIndex& mem);
+
+
+template <typename T>
+static inline void
+AtomicBinopToTypedArray(CodeGeneratorX86Shared* cg, AtomicOp op,
+ Scalar::Type arrayType, const LAllocation* value, const T& mem,
+ Register temp1, Register temp2, AnyRegister output)
+{
+ if (value->isConstant())
+ cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, temp1, temp2, output);
+ else
+ cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, temp1, temp2, output);
+}
+
+void
+CodeGeneratorX86Shared::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir)
+{
+ MOZ_ASSERT(lir->mir()->hasUses());
+
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register elements = ToRegister(lir->elements());
+ Register temp1 = lir->temp1()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp1());
+ Register temp2 = lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
+ const LAllocation* value = lir->value();
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ if (lir->index()->isConstant()) {
+ Address mem(elements, ToInt32(lir->index()) * width);
+ AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+ AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
+ }
+}
+
+template <typename T>
+static inline void
+AtomicBinopToTypedArray(CodeGeneratorX86Shared* cg, AtomicOp op,
+ Scalar::Type arrayType, const LAllocation* value, const T& mem)
+{
+ if (value->isConstant())
+ cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem);
+ else
+ cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem);
+}
+
+void
+CodeGeneratorX86Shared::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir)
+{
+ MOZ_ASSERT(!lir->mir()->hasUses());
+
+ Register elements = ToRegister(lir->elements());
+ const LAllocation* value = lir->value();
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ if (lir->index()->isConstant()) {
+ Address mem(elements, ToInt32(lir->index()) * width);
+ AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+ AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem);
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitMemoryBarrier(LMemoryBarrier* ins)
+{
+ if (ins->type() & MembarStoreLoad)
+ masm.storeLoadFence();
+}
+
+void
+CodeGeneratorX86Shared::setReturnDoubleRegs(LiveRegisterSet* regs)
+{
+ MOZ_ASSERT(ReturnFloat32Reg.encoding() == X86Encoding::xmm0);
+ MOZ_ASSERT(ReturnDoubleReg.encoding() == X86Encoding::xmm0);
+ MOZ_ASSERT(ReturnSimd128Reg.encoding() == X86Encoding::xmm0);
+ regs->add(ReturnFloat32Reg);
+ regs->add(ReturnDoubleReg);
+ regs->add(ReturnSimd128Reg);
+}
+
+void
+CodeGeneratorX86Shared::visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool)
+{
+ FloatRegister input = ool->input();
+ MIRType fromType = ool->fromType();
+ MIRType toType = ool->toType();
+ Label* oolRejoin = ool->rejoin();
+ bool isUnsigned = ool->isUnsigned();
+ wasm::TrapOffset off = ool->trapOffset();
+
+ if (fromType == MIRType::Float32) {
+ if (toType == MIRType::Int32)
+ masm.outOfLineWasmTruncateFloat32ToInt32(input, isUnsigned, off, oolRejoin);
+ else if (toType == MIRType::Int64)
+ masm.outOfLineWasmTruncateFloat32ToInt64(input, isUnsigned, off, oolRejoin);
+ else
+ MOZ_CRASH("unexpected type");
+ } else if (fromType == MIRType::Double) {
+ if (toType == MIRType::Int32)
+ masm.outOfLineWasmTruncateDoubleToInt32(input, isUnsigned, off, oolRejoin);
+ else if (toType == MIRType::Int64)
+ masm.outOfLineWasmTruncateDoubleToInt64(input, isUnsigned, off, oolRejoin);
+ else
+ MOZ_CRASH("unexpected type");
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+void
+CodeGeneratorX86Shared::canonicalizeIfDeterministic(Scalar::Type type, const LAllocation* value)
+{
+#ifdef JS_MORE_DETERMINISTIC
+ switch (type) {
+ case Scalar::Float32: {
+ FloatRegister in = ToFloatRegister(value);
+ masm.canonicalizeFloatIfDeterministic(in);
+ break;
+ }
+ case Scalar::Float64: {
+ FloatRegister in = ToFloatRegister(value);
+ masm.canonicalizeDoubleIfDeterministic(in);
+ break;
+ }
+ case Scalar::Float32x4: {
+ FloatRegister in = ToFloatRegister(value);
+ MOZ_ASSERT(in.isSimd128());
+ FloatRegister scratch = in != xmm0.asSimd128() ? xmm0 : xmm1;
+ masm.push(scratch);
+ masm.canonicalizeFloat32x4(in, scratch);
+ masm.pop(scratch);
+ break;
+ }
+ default: {
+ // Other types don't need canonicalization.
+ break;
+ }
+ }
+#endif // JS_MORE_DETERMINISTIC
+}
+
+void
+CodeGeneratorX86Shared::visitCopySignF(LCopySignF* lir)
+{
+ FloatRegister lhs = ToFloatRegister(lir->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(lir->getOperand(1));
+
+ FloatRegister out = ToFloatRegister(lir->output());
+
+ if (lhs == rhs) {
+ if (lhs != out)
+ masm.moveFloat32(lhs, out);
+ return;
+ }
+
+ ScratchFloat32Scope scratch(masm);
+
+ float clearSignMask = BitwiseCast<float>(INT32_MAX);
+ masm.loadConstantFloat32(clearSignMask, scratch);
+ masm.vandps(scratch, lhs, out);
+
+ float keepSignMask = BitwiseCast<float>(INT32_MIN);
+ masm.loadConstantFloat32(keepSignMask, scratch);
+ masm.vandps(rhs, scratch, scratch);
+
+ masm.vorps(scratch, out, out);
+}
+
+void
+CodeGeneratorX86Shared::visitCopySignD(LCopySignD* lir)
+{
+ FloatRegister lhs = ToFloatRegister(lir->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(lir->getOperand(1));
+
+ FloatRegister out = ToFloatRegister(lir->output());
+
+ if (lhs == rhs) {
+ if (lhs != out)
+ masm.moveDouble(lhs, out);
+ return;
+ }
+
+ ScratchDoubleScope scratch(masm);
+
+ double clearSignMask = BitwiseCast<double>(INT64_MAX);
+ masm.loadConstantDouble(clearSignMask, scratch);
+ masm.vandpd(scratch, lhs, out);
+
+ double keepSignMask = BitwiseCast<double>(INT64_MIN);
+ masm.loadConstantDouble(keepSignMask, scratch);
+ masm.vandpd(rhs, scratch, scratch);
+
+ masm.vorpd(scratch, out, out);
+}
+
+void
+CodeGeneratorX86Shared::visitRotateI64(LRotateI64* lir)
+{
+ MRotate* mir = lir->mir();
+ LAllocation* count = lir->count();
+
+ Register64 input = ToRegister64(lir->input());
+ Register64 output = ToOutRegister64(lir);
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+
+ MOZ_ASSERT(input == output);
+
+ if (count->isConstant()) {
+ int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
+ if (!c)
+ return;
+ if (mir->isLeftRotate())
+ masm.rotateLeft64(Imm32(c), input, output, temp);
+ else
+ masm.rotateRight64(Imm32(c), input, output, temp);
+ } else {
+ if (mir->isLeftRotate())
+ masm.rotateLeft64(ToRegister(count), input, output, temp);
+ else
+ masm.rotateRight64(ToRegister(count), input, output, temp);
+ }
+}
+
+void
+CodeGeneratorX86Shared::visitPopcntI64(LPopcntI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ Register temp = InvalidReg;
+ if (!AssemblerX86Shared::HasPOPCNT())
+ temp = ToRegister(lir->getTemp(0));
+
+ masm.popcnt64(input, output, temp);
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/x86-shared/CodeGenerator-x86-shared.h b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
new file mode 100644
index 000000000..d7abb1db7
--- /dev/null
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
@@ -0,0 +1,357 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_CodeGenerator_x86_shared_h
+#define jit_x86_shared_CodeGenerator_x86_shared_h
+
+#include "jit/shared/CodeGenerator-shared.h"
+
+namespace js {
+namespace jit {
+
+class OutOfLineBailout;
+class OutOfLineUndoALUOperation;
+class OutOfLineLoadTypedArrayOutOfBounds;
+class MulNegativeZeroCheck;
+class ModOverflowCheck;
+class ReturnZero;
+class OutOfLineTableSwitch;
+
+class CodeGeneratorX86Shared : public CodeGeneratorShared
+{
+ friend class MoveResolverX86;
+
+ CodeGeneratorX86Shared* thisFromCtor() {
+ return this;
+ }
+
+ template <typename T>
+ void bailout(const T& t, LSnapshot* snapshot);
+
+ protected:
+ // Load a NaN or zero into a register for an out of bounds AsmJS or static
+ // typed array load.
+ class OutOfLineLoadTypedArrayOutOfBounds : public OutOfLineCodeBase<CodeGeneratorX86Shared>
+ {
+ AnyRegister dest_;
+ Scalar::Type viewType_;
+ public:
+ OutOfLineLoadTypedArrayOutOfBounds(AnyRegister dest, Scalar::Type viewType)
+ : dest_(dest), viewType_(viewType)
+ {}
+
+ AnyRegister dest() const { return dest_; }
+ Scalar::Type viewType() const { return viewType_; }
+ void accept(CodeGeneratorX86Shared* codegen) {
+ codegen->visitOutOfLineLoadTypedArrayOutOfBounds(this);
+ }
+ };
+
+ // Additional bounds check for vector Float to Int conversion, when the
+ // undefined pattern is seen. Might imply a bailout.
+ class OutOfLineSimdFloatToIntCheck : public OutOfLineCodeBase<CodeGeneratorX86Shared>
+ {
+ Register temp_;
+ FloatRegister input_;
+ LInstruction* ins_;
+ wasm::TrapOffset trapOffset_;
+
+ public:
+ OutOfLineSimdFloatToIntCheck(Register temp, FloatRegister input, LInstruction *ins,
+ wasm::TrapOffset trapOffset)
+ : temp_(temp), input_(input), ins_(ins), trapOffset_(trapOffset)
+ {}
+
+ Register temp() const { return temp_; }
+ FloatRegister input() const { return input_; }
+ LInstruction* ins() const { return ins_; }
+ wasm::TrapOffset trapOffset() const { return trapOffset_; }
+
+ void accept(CodeGeneratorX86Shared* codegen) {
+ codegen->visitOutOfLineSimdFloatToIntCheck(this);
+ }
+ };
+
+ public:
+ NonAssertingLabel deoptLabel_;
+
+ Operand ToOperand(const LAllocation& a);
+ Operand ToOperand(const LAllocation* a);
+ Operand ToOperand(const LDefinition* def);
+
+#ifdef JS_PUNBOX64
+ Operand ToOperandOrRegister64(const LInt64Allocation input);
+#else
+ Register64 ToOperandOrRegister64(const LInt64Allocation input);
+#endif
+
+ MoveOperand toMoveOperand(LAllocation a) const;
+
+ void bailoutIf(Assembler::Condition condition, LSnapshot* snapshot);
+ void bailoutIf(Assembler::DoubleCondition condition, LSnapshot* snapshot);
+ void bailoutFrom(Label* label, LSnapshot* snapshot);
+ void bailout(LSnapshot* snapshot);
+
+ template <typename T1, typename T2>
+ void bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) {
+ masm.cmpPtr(lhs, rhs);
+ bailoutIf(c, snapshot);
+ }
+ void bailoutTestPtr(Assembler::Condition c, Register lhs, Register rhs, LSnapshot* snapshot) {
+ masm.testPtr(lhs, rhs);
+ bailoutIf(c, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) {
+ masm.cmp32(lhs, rhs);
+ bailoutIf(c, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutTest32(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) {
+ masm.test32(lhs, rhs);
+ bailoutIf(c, snapshot);
+ }
+ void bailoutIfFalseBool(Register reg, LSnapshot* snapshot) {
+ masm.test32(reg, Imm32(0xFF));
+ bailoutIf(Assembler::Zero, snapshot);
+ }
+ void bailoutCvttsd2si(FloatRegister src, Register dest, LSnapshot* snapshot) {
+ // vcvttsd2si returns 0x80000000 on failure. Test for it by
+ // subtracting 1 and testing overflow. The other possibility is to test
+ // equality for INT_MIN after a comparison, but 1 costs fewer bytes to
+ // materialize.
+ masm.vcvttsd2si(src, dest);
+ masm.cmp32(dest, Imm32(1));
+ bailoutIf(Assembler::Overflow, snapshot);
+ }
+ void bailoutCvttss2si(FloatRegister src, Register dest, LSnapshot* snapshot) {
+ // Same trick as explained in the above comment.
+ masm.vcvttss2si(src, dest);
+ masm.cmp32(dest, Imm32(1));
+ bailoutIf(Assembler::Overflow, snapshot);
+ }
+
+ protected:
+ bool generateOutOfLineCode();
+
+ void emitCompare(MCompare::CompareType type, const LAllocation* left, const LAllocation* right);
+
+ // Emits a branch that directs control flow to the true block if |cond| is
+ // true, and the false block if |cond| is false.
+ void emitBranch(Assembler::Condition cond, MBasicBlock* ifTrue, MBasicBlock* ifFalse,
+ Assembler::NaNCond ifNaN = Assembler::NaN_HandledByCond);
+ void emitBranch(Assembler::DoubleCondition cond, MBasicBlock* ifTrue, MBasicBlock* ifFalse);
+
+ void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ cond = masm.testNull(cond, value);
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+ void testUndefinedEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ cond = masm.testUndefined(cond, value);
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+ void testObjectEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ cond = masm.testObject(cond, value);
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+
+ void testZeroEmitBranch(Assembler::Condition cond, Register reg,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ masm.cmpPtr(reg, ImmWord(0));
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+
+ void emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base);
+
+ void emitSimdExtractLane8x16(FloatRegister input, Register output, unsigned lane,
+ SimdSign signedness);
+ void emitSimdExtractLane16x8(FloatRegister input, Register output, unsigned lane,
+ SimdSign signedness);
+ void emitSimdExtractLane32x4(FloatRegister input, Register output, unsigned lane);
+
+ public:
+ CodeGeneratorX86Shared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
+
+ public:
+ // Instruction visitors.
+ virtual void visitDouble(LDouble* ins);
+ virtual void visitFloat32(LFloat32* ins);
+ virtual void visitMinMaxD(LMinMaxD* ins);
+ virtual void visitMinMaxF(LMinMaxF* ins);
+ virtual void visitAbsD(LAbsD* ins);
+ virtual void visitAbsF(LAbsF* ins);
+ virtual void visitClzI(LClzI* ins);
+ virtual void visitCtzI(LCtzI* ins);
+ virtual void visitPopcntI(LPopcntI* ins);
+ virtual void visitPopcntI64(LPopcntI64* lir);
+ virtual void visitSqrtD(LSqrtD* ins);
+ virtual void visitSqrtF(LSqrtF* ins);
+ virtual void visitPowHalfD(LPowHalfD* ins);
+ virtual void visitAddI(LAddI* ins);
+ virtual void visitAddI64(LAddI64* ins);
+ virtual void visitSubI(LSubI* ins);
+ virtual void visitSubI64(LSubI64* ins);
+ virtual void visitMulI(LMulI* ins);
+ virtual void visitMulI64(LMulI64* ins);
+ virtual void visitDivI(LDivI* ins);
+ virtual void visitDivPowTwoI(LDivPowTwoI* ins);
+ virtual void visitDivOrModConstantI(LDivOrModConstantI* ins);
+ virtual void visitModI(LModI* ins);
+ virtual void visitModPowTwoI(LModPowTwoI* ins);
+ virtual void visitBitNotI(LBitNotI* ins);
+ virtual void visitBitOpI(LBitOpI* ins);
+ virtual void visitBitOpI64(LBitOpI64* ins);
+ virtual void visitShiftI(LShiftI* ins);
+ virtual void visitShiftI64(LShiftI64* ins);
+ virtual void visitUrshD(LUrshD* ins);
+ virtual void visitTestIAndBranch(LTestIAndBranch* test);
+ virtual void visitTestDAndBranch(LTestDAndBranch* test);
+ virtual void visitTestFAndBranch(LTestFAndBranch* test);
+ virtual void visitCompare(LCompare* comp);
+ virtual void visitCompareAndBranch(LCompareAndBranch* comp);
+ virtual void visitCompareD(LCompareD* comp);
+ virtual void visitCompareDAndBranch(LCompareDAndBranch* comp);
+ virtual void visitCompareF(LCompareF* comp);
+ virtual void visitCompareFAndBranch(LCompareFAndBranch* comp);
+ virtual void visitBitAndAndBranch(LBitAndAndBranch* baab);
+ virtual void visitNotI(LNotI* comp);
+ virtual void visitNotD(LNotD* comp);
+ virtual void visitNotF(LNotF* comp);
+ virtual void visitMathD(LMathD* math);
+ virtual void visitMathF(LMathF* math);
+ virtual void visitFloor(LFloor* lir);
+ virtual void visitFloorF(LFloorF* lir);
+ virtual void visitCeil(LCeil* lir);
+ virtual void visitCeilF(LCeilF* lir);
+ virtual void visitRound(LRound* lir);
+ virtual void visitRoundF(LRoundF* lir);
+ virtual void visitGuardShape(LGuardShape* guard);
+ virtual void visitGuardObjectGroup(LGuardObjectGroup* guard);
+ virtual void visitGuardClass(LGuardClass* guard);
+ virtual void visitEffectiveAddress(LEffectiveAddress* ins);
+ virtual void visitUDivOrMod(LUDivOrMod* ins);
+ virtual void visitUDivOrModConstant(LUDivOrModConstant *ins);
+ virtual void visitWasmStackArg(LWasmStackArg* ins);
+ virtual void visitWasmStackArgI64(LWasmStackArgI64* ins);
+ virtual void visitWasmSelect(LWasmSelect* ins);
+ virtual void visitWasmReinterpret(LWasmReinterpret* lir);
+ virtual void visitMemoryBarrier(LMemoryBarrier* ins);
+ virtual void visitWasmAddOffset(LWasmAddOffset* lir);
+ virtual void visitWasmTruncateToInt32(LWasmTruncateToInt32* lir);
+ virtual void visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir);
+ virtual void visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir);
+ virtual void visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir);
+ virtual void visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir);
+ virtual void visitCopySignD(LCopySignD* lir);
+ virtual void visitCopySignF(LCopySignF* lir);
+ virtual void visitRotateI64(LRotateI64* lir);
+
+ void visitOutOfLineLoadTypedArrayOutOfBounds(OutOfLineLoadTypedArrayOutOfBounds* ool);
+
+ void visitNegI(LNegI* lir);
+ void visitNegD(LNegD* lir);
+ void visitNegF(LNegF* lir);
+
+ void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);
+
+ // SIMD operators
+ void visitSimdValueInt32x4(LSimdValueInt32x4* lir);
+ void visitSimdValueFloat32x4(LSimdValueFloat32x4* lir);
+ void visitSimdSplatX16(LSimdSplatX16* lir);
+ void visitSimdSplatX8(LSimdSplatX8* lir);
+ void visitSimdSplatX4(LSimdSplatX4* lir);
+ void visitSimd128Int(LSimd128Int* ins);
+ void visitSimd128Float(LSimd128Float* ins);
+ void visitInt32x4ToFloat32x4(LInt32x4ToFloat32x4* ins);
+ void visitFloat32x4ToInt32x4(LFloat32x4ToInt32x4* ins);
+ void visitFloat32x4ToUint32x4(LFloat32x4ToUint32x4* ins);
+ void visitSimdReinterpretCast(LSimdReinterpretCast* lir);
+ void visitSimdExtractElementB(LSimdExtractElementB* lir);
+ void visitSimdExtractElementI(LSimdExtractElementI* lir);
+ void visitSimdExtractElementU2D(LSimdExtractElementU2D* lir);
+ void visitSimdExtractElementF(LSimdExtractElementF* lir);
+ void visitSimdInsertElementI(LSimdInsertElementI* lir);
+ void visitSimdInsertElementF(LSimdInsertElementF* lir);
+ void visitSimdSwizzleI(LSimdSwizzleI* lir);
+ void visitSimdSwizzleF(LSimdSwizzleF* lir);
+ void visitSimdShuffleX4(LSimdShuffleX4* lir);
+ void visitSimdShuffle(LSimdShuffle* lir);
+ void visitSimdUnaryArithIx16(LSimdUnaryArithIx16* lir);
+ void visitSimdUnaryArithIx8(LSimdUnaryArithIx8* lir);
+ void visitSimdUnaryArithIx4(LSimdUnaryArithIx4* lir);
+ void visitSimdUnaryArithFx4(LSimdUnaryArithFx4* lir);
+ void visitSimdBinaryCompIx16(LSimdBinaryCompIx16* lir);
+ void visitSimdBinaryCompIx8(LSimdBinaryCompIx8* lir);
+ void visitSimdBinaryCompIx4(LSimdBinaryCompIx4* lir);
+ void visitSimdBinaryCompFx4(LSimdBinaryCompFx4* lir);
+ void visitSimdBinaryArithIx16(LSimdBinaryArithIx16* lir);
+ void visitSimdBinaryArithIx8(LSimdBinaryArithIx8* lir);
+ void visitSimdBinaryArithIx4(LSimdBinaryArithIx4* lir);
+ void visitSimdBinaryArithFx4(LSimdBinaryArithFx4* lir);
+ void visitSimdBinarySaturating(LSimdBinarySaturating* lir);
+ void visitSimdBinaryBitwise(LSimdBinaryBitwise* lir);
+ void visitSimdShift(LSimdShift* lir);
+ void visitSimdSelect(LSimdSelect* ins);
+ void visitSimdAllTrue(LSimdAllTrue* ins);
+ void visitSimdAnyTrue(LSimdAnyTrue* ins);
+
+ template <class T, class Reg> void visitSimdGeneralShuffle(LSimdGeneralShuffleBase* lir, Reg temp);
+ void visitSimdGeneralShuffleI(LSimdGeneralShuffleI* lir);
+ void visitSimdGeneralShuffleF(LSimdGeneralShuffleF* lir);
+
+ // Out of line visitors.
+ void visitOutOfLineBailout(OutOfLineBailout* ool);
+ void visitOutOfLineUndoALUOperation(OutOfLineUndoALUOperation* ool);
+ void visitMulNegativeZeroCheck(MulNegativeZeroCheck* ool);
+ void visitModOverflowCheck(ModOverflowCheck* ool);
+ void visitReturnZero(ReturnZero* ool);
+ void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
+ void visitOutOfLineSimdFloatToIntCheck(OutOfLineSimdFloatToIntCheck* ool);
+ void generateInvalidateEpilogue();
+
+ // Generating a result.
+ template<typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
+ const T& mem, Register temp1, Register temp2, AnyRegister output);
+
+ // Generating no result.
+ template<typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, const T& mem);
+
+ void setReturnDoubleRegs(LiveRegisterSet* regs);
+
+ void canonicalizeIfDeterministic(Scalar::Type type, const LAllocation* value);
+};
+
+// An out-of-line bailout thunk.
+class OutOfLineBailout : public OutOfLineCodeBase<CodeGeneratorX86Shared>
+{
+ LSnapshot* snapshot_;
+
+ public:
+ explicit OutOfLineBailout(LSnapshot* snapshot)
+ : snapshot_(snapshot)
+ { }
+
+ void accept(CodeGeneratorX86Shared* codegen);
+
+ LSnapshot* snapshot() const {
+ return snapshot_;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_CodeGenerator_x86_shared_h */
diff --git a/js/src/jit/x86-shared/Constants-x86-shared.h b/js/src/jit/x86-shared/Constants-x86-shared.h
new file mode 100644
index 000000000..7f0ba0744
--- /dev/null
+++ b/js/src/jit/x86-shared/Constants-x86-shared.h
@@ -0,0 +1,228 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_Constants_x86_shared_h
+#define jit_x86_shared_Constants_x86_shared_h
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/Assertions.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace js {
+namespace jit {
+
+namespace X86Encoding {
+
+enum RegisterID : uint8_t {
+ rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi
+#ifdef JS_CODEGEN_X64
+ ,r8, r9, r10, r11, r12, r13, r14, r15
+#endif
+ ,invalid_reg
+};
+
+enum HRegisterID {
+ ah = rsp,
+ ch = rbp,
+ dh = rsi,
+ bh = rdi
+};
+
+enum XMMRegisterID {
+ xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
+#ifdef JS_CODEGEN_X64
+ ,xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15
+#endif
+ ,invalid_xmm
+};
+
+inline const char* XMMRegName(XMMRegisterID reg)
+{
+ static const char* const names[] = {
+ "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7"
+#ifdef JS_CODEGEN_X64
+ ,"%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15"
+#endif
+ };
+ MOZ_ASSERT(size_t(reg) < mozilla::ArrayLength(names));
+ return names[reg];
+}
+
+#ifdef JS_CODEGEN_X64
+inline const char* GPReg64Name(RegisterID reg)
+{
+ static const char* const names[] = {
+ "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi"
+#ifdef JS_CODEGEN_X64
+ ,"%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15"
+#endif
+ };
+ MOZ_ASSERT(size_t(reg) < mozilla::ArrayLength(names));
+ return names[reg];
+}
+#endif
+
+inline const char* GPReg32Name(RegisterID reg)
+{
+ static const char* const names[] = {
+ "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi"
+#ifdef JS_CODEGEN_X64
+ ,"%r8d", "%r9d", "%r10d", "%r11d", "%r12d", "%r13d", "%r14d", "%r15d"
+#endif
+ };
+ MOZ_ASSERT(size_t(reg) < mozilla::ArrayLength(names));
+ return names[reg];
+}
+
+inline const char* GPReg16Name(RegisterID reg)
+{
+ static const char* const names[] = {
+ "%ax", "%cx", "%dx", "%bx", "%sp", "%bp", "%si", "%di"
+#ifdef JS_CODEGEN_X64
+ ,"%r8w", "%r9w", "%r10w", "%r11w", "%r12w", "%r13w", "%r14w", "%r15w"
+#endif
+ };
+ MOZ_ASSERT(size_t(reg) < mozilla::ArrayLength(names));
+ return names[reg];
+}
+
+inline const char* GPReg8Name(RegisterID reg)
+{
+ static const char* const names[] = {
+ "%al", "%cl", "%dl", "%bl"
+#ifdef JS_CODEGEN_X64
+ ,"%spl", "%bpl", "%sil", "%dil",
+ "%r8b", "%r9b", "%r10b", "%r11b", "%r12b", "%r13b", "%r14b", "%r15b"
+#endif
+ };
+ MOZ_ASSERT(size_t(reg) < mozilla::ArrayLength(names));
+ return names[reg];
+}
+
+inline const char* GPRegName(RegisterID reg)
+{
+#ifdef JS_CODEGEN_X64
+ return GPReg64Name(reg);
+#else
+ return GPReg32Name(reg);
+#endif
+}
+
+inline bool HasSubregL(RegisterID reg)
+{
+#ifdef JS_CODEGEN_X64
+ // In 64-bit mode, all registers have an 8-bit lo subreg.
+ return true;
+#else
+ // In 32-bit mode, only the first four registers do.
+ return reg <= rbx;
+#endif
+}
+
+inline bool HasSubregH(RegisterID reg)
+{
+ // The first four registers always have h registers. However, note that
+ // on x64, h registers may not be used in instructions using REX
+ // prefixes. Also note that this may depend on what other registers are
+ // used!
+ return reg <= rbx;
+}
+
+inline HRegisterID GetSubregH(RegisterID reg)
+{
+ MOZ_ASSERT(HasSubregH(reg));
+ return HRegisterID(reg + 4);
+}
+
+inline const char* HRegName8(HRegisterID reg)
+{
+ static const char* const names[] = {
+ "%ah", "%ch", "%dh", "%bh"
+ };
+ size_t index = reg - GetSubregH(rax);
+ MOZ_ASSERT(index < mozilla::ArrayLength(names));
+ return names[index];
+}
+
+enum Condition {
+ ConditionO,
+ ConditionNO,
+ ConditionB,
+ ConditionAE,
+ ConditionE,
+ ConditionNE,
+ ConditionBE,
+ ConditionA,
+ ConditionS,
+ ConditionNS,
+ ConditionP,
+ ConditionNP,
+ ConditionL,
+ ConditionGE,
+ ConditionLE,
+ ConditionG,
+
+ ConditionC = ConditionB,
+ ConditionNC = ConditionAE
+};
+
+inline const char* CCName(Condition cc)
+{
+ static const char* const names[] = {
+ "o ", "no", "b ", "ae", "e ", "ne", "be", "a ",
+ "s ", "ns", "p ", "np", "l ", "ge", "le", "g "
+ };
+ MOZ_ASSERT(size_t(cc) < mozilla::ArrayLength(names));
+ return names[cc];
+}
+
+// Conditions for CMP instructions (CMPSS, CMPSD, CMPPS, CMPPD, etc).
+enum ConditionCmp {
+ ConditionCmp_EQ = 0x0,
+ ConditionCmp_LT = 0x1,
+ ConditionCmp_LE = 0x2,
+ ConditionCmp_UNORD = 0x3,
+ ConditionCmp_NEQ = 0x4,
+ ConditionCmp_NLT = 0x5,
+ ConditionCmp_NLE = 0x6,
+ ConditionCmp_ORD = 0x7,
+};
+
+// Rounding modes for ROUNDSD.
+enum RoundingMode {
+ RoundToNearest = 0x0,
+ RoundDown = 0x1,
+ RoundUp = 0x2,
+ RoundToZero = 0x3
+};
+
+// Test whether the given address will fit in an address immediate field.
+// This is always true on x86, but on x64 it's only true for addreses which
+// fit in the 32-bit immediate field.
+inline bool IsAddressImmediate(const void* address)
+{
+ intptr_t value = reinterpret_cast<intptr_t>(address);
+ int32_t immediate = static_cast<int32_t>(value);
+ return value == immediate;
+}
+
+// Convert the given address to a 32-bit immediate field value. This is a
+// no-op on x86, but on x64 it asserts that the address is actually a valid
+// address immediate.
+inline int32_t AddressImmediate(const void* address)
+{
+ MOZ_ASSERT(IsAddressImmediate(address));
+ return static_cast<int32_t>(reinterpret_cast<intptr_t>(address));
+}
+
+} // namespace X86Encoding
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_Constants_x86_shared_h */
diff --git a/js/src/jit/x86-shared/Disassembler-x86-shared.cpp b/js/src/jit/x86-shared/Disassembler-x86-shared.cpp
new file mode 100644
index 000000000..e033cfa5c
--- /dev/null
+++ b/js/src/jit/x86-shared/Disassembler-x86-shared.cpp
@@ -0,0 +1,568 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Disassembler.h"
+
+#include "jit/x86-shared/Encoding-x86-shared.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::jit::X86Encoding;
+using namespace js::jit::Disassembler;
+
+MOZ_COLD static bool REX_W(uint8_t rex) { return (rex >> 3) & 0x1; }
+MOZ_COLD static bool REX_R(uint8_t rex) { return (rex >> 2) & 0x1; }
+MOZ_COLD static bool REX_X(uint8_t rex) { return (rex >> 1) & 0x1; }
+MOZ_COLD static bool REX_B(uint8_t rex) { return (rex >> 0) & 0x1; }
+
+MOZ_COLD static uint8_t
+MakeREXFlags(bool w, bool r, bool x, bool b)
+{
+ uint8_t rex = (w << 3) | (r << 2) | (x << 1) | (b << 0);
+ MOZ_RELEASE_ASSERT(REX_W(rex) == w);
+ MOZ_RELEASE_ASSERT(REX_R(rex) == r);
+ MOZ_RELEASE_ASSERT(REX_X(rex) == x);
+ MOZ_RELEASE_ASSERT(REX_B(rex) == b);
+ return rex;
+}
+
+MOZ_COLD static ModRmMode
+ModRM_Mode(uint8_t modrm)
+{
+ return ModRmMode((modrm >> 6) & 0x3);
+}
+
+MOZ_COLD static uint8_t
+ModRM_Reg(uint8_t modrm)
+{
+ return (modrm >> 3) & 0x7;
+}
+
+MOZ_COLD static uint8_t
+ModRM_RM(uint8_t modrm)
+{
+ return (modrm >> 0) & 0x7;
+}
+
+MOZ_COLD static bool
+ModRM_hasSIB(uint8_t modrm)
+{
+ return ModRM_Mode(modrm) != ModRmRegister && ModRM_RM(modrm) == hasSib;
+}
+MOZ_COLD static bool
+ModRM_hasDisp8(uint8_t modrm)
+{
+ return ModRM_Mode(modrm) == ModRmMemoryDisp8;
+}
+MOZ_COLD static bool
+ModRM_hasRIP(uint8_t modrm)
+{
+#ifdef JS_CODEGEN_X64
+ return ModRM_Mode(modrm) == ModRmMemoryNoDisp && ModRM_RM(modrm) == noBase;
+#else
+ return false;
+#endif
+}
+MOZ_COLD static bool
+ModRM_hasDisp32(uint8_t modrm)
+{
+ return ModRM_Mode(modrm) == ModRmMemoryDisp32 ||
+ ModRM_hasRIP(modrm);
+}
+
+MOZ_COLD static uint8_t
+SIB_SS(uint8_t sib)
+{
+ return (sib >> 6) & 0x3;
+}
+
+MOZ_COLD static uint8_t
+SIB_Index(uint8_t sib)
+{
+ return (sib >> 3) & 0x7;
+}
+
+MOZ_COLD static uint8_t
+SIB_Base(uint8_t sib)
+{
+ return (sib >> 0) & 0x7;
+}
+
+MOZ_COLD static bool
+SIB_hasRIP(uint8_t sib)
+{
+ return SIB_Base(sib) == noBase && SIB_Index(sib) == noIndex;
+}
+
+MOZ_COLD static bool
+HasRIP(uint8_t modrm, uint8_t sib, uint8_t rex)
+{
+ return ModRM_hasRIP(modrm) && SIB_hasRIP(sib);
+}
+
+MOZ_COLD static bool
+HasDisp8(uint8_t modrm)
+{
+ return ModRM_hasDisp8(modrm);
+}
+
+MOZ_COLD static bool
+HasDisp32(uint8_t modrm, uint8_t sib)
+{
+ return ModRM_hasDisp32(modrm) ||
+ (SIB_Base(sib) == noBase &&
+ SIB_Index(sib) == noIndex &&
+ ModRM_Mode(modrm) == ModRmMemoryNoDisp);
+}
+
+MOZ_COLD static uint32_t
+Reg(uint8_t modrm, uint8_t sib, uint8_t rex)
+{
+ return ModRM_Reg(modrm) | (REX_R(rex) << 3);
+}
+
+MOZ_COLD static bool
+HasBase(uint8_t modrm, uint8_t sib)
+{
+ return !ModRM_hasSIB(modrm) ||
+ SIB_Base(sib) != noBase ||
+ SIB_Index(sib) != noIndex ||
+ ModRM_Mode(modrm) != ModRmMemoryNoDisp;
+}
+
+MOZ_COLD static RegisterID
+DecodeBase(uint8_t modrm, uint8_t sib, uint8_t rex)
+{
+ return HasBase(modrm, sib)
+ ? RegisterID((ModRM_hasSIB(modrm) ? SIB_Base(sib) : ModRM_RM(modrm)) | (REX_B(rex) << 3))
+ : invalid_reg;
+}
+
+MOZ_COLD static RegisterID
+DecodeIndex(uint8_t modrm, uint8_t sib, uint8_t rex)
+{
+ RegisterID index = RegisterID(SIB_Index(sib) | (REX_X(rex) << 3));
+ return ModRM_hasSIB(modrm) && index != noIndex ? index : invalid_reg;
+}
+
+MOZ_COLD static uint32_t
+DecodeScale(uint8_t modrm, uint8_t sib, uint8_t rex)
+{
+ return ModRM_hasSIB(modrm) ? SIB_SS(sib) : 0;
+}
+
+#define PackOpcode(op0, op1, op2) ((op0) | ((op1) << 8) | ((op2) << 16))
+#define Pack2ByteOpcode(op1) PackOpcode(OP_2BYTE_ESCAPE, op1, 0)
+#define Pack3ByteOpcode(op1, op2) PackOpcode(OP_2BYTE_ESCAPE, op1, op2)
+
+uint8_t*
+js::jit::Disassembler::DisassembleHeapAccess(uint8_t* ptr, HeapAccess* access)
+{
+ VexOperandType type = VEX_PS;
+ uint32_t opcode = OP_HLT;
+ uint8_t modrm = 0;
+ uint8_t sib = 0;
+ uint8_t rex = 0;
+ int32_t disp = 0;
+ int32_t imm = 0;
+ bool haveImm = false;
+ int opsize = 4;
+
+ // Legacy prefixes
+ switch (*ptr) {
+ case PRE_LOCK:
+ case PRE_PREDICT_BRANCH_NOT_TAKEN: // (obsolete), aka %cs
+ case 0x3E: // aka predict-branch-taken (obsolete)
+ case 0x36: // %ss
+ case 0x26: // %es
+ case 0x64: // %fs
+ case 0x65: // %gs
+ case 0x67: // address-size override
+ MOZ_CRASH("Unable to disassemble instruction");
+ case PRE_SSE_F2: // aka REPNZ/REPNE
+ type = VEX_SD;
+ ptr++;
+ break;
+ case PRE_SSE_F3: // aka REP/REPE/REPZ
+ type = VEX_SS;
+ ptr++;
+ break;
+ case PRE_SSE_66: // aka PRE_OPERAND_SIZE
+ type = VEX_PD;
+ opsize = 2;
+ ptr++;
+ break;
+ default:
+ break;
+ }
+
+ // REX and VEX prefixes
+ {
+ int x = 0, b = 0, m = 1, w = 0;
+ int r, l, p;
+ switch (*ptr) {
+#ifdef JS_CODEGEN_X64
+ case PRE_REX | 0x0: case PRE_REX | 0x1: case PRE_REX | 0x2: case PRE_REX | 0x3:
+ case PRE_REX | 0x4: case PRE_REX | 0x5: case PRE_REX | 0x6: case PRE_REX | 0x7:
+ case PRE_REX | 0x8: case PRE_REX | 0x9: case PRE_REX | 0xa: case PRE_REX | 0xb:
+ case PRE_REX | 0xc: case PRE_REX | 0xd: case PRE_REX | 0xe: case PRE_REX | 0xf:
+ rex = *ptr++ & 0xf;
+ goto rex_done;
+#endif
+ case PRE_VEX_C4: {
+ if (type != VEX_PS)
+ MOZ_CRASH("Unable to disassemble instruction");
+ ++ptr;
+ uint8_t c4a = *ptr++ ^ 0xe0;
+ uint8_t c4b = *ptr++ ^ 0x78;
+ r = (c4a >> 7) & 0x1;
+ x = (c4a >> 6) & 0x1;
+ b = (c4a >> 5) & 0x1;
+ m = (c4a >> 0) & 0x1f;
+ w = (c4b >> 7) & 0x1;
+ l = (c4b >> 2) & 0x1;
+ p = (c4b >> 0) & 0x3;
+ break;
+ }
+ case PRE_VEX_C5: {
+ if (type != VEX_PS)
+ MOZ_CRASH("Unable to disassemble instruction");
+ ++ptr;
+ uint8_t c5 = *ptr++ ^ 0xf8;
+ r = (c5 >> 7) & 0x1;
+ l = (c5 >> 2) & 0x1;
+ p = (c5 >> 0) & 0x3;
+ break;
+ }
+ default:
+ goto rex_done;
+ }
+ type = VexOperandType(p);
+ rex = MakeREXFlags(w, r, x, b);
+ switch (m) {
+ case 0x1:
+ opcode = Pack2ByteOpcode(*ptr++);
+ goto opcode_done;
+ case 0x2:
+ opcode = Pack3ByteOpcode(ESCAPE_38, *ptr++);
+ goto opcode_done;
+ case 0x3:
+ opcode = Pack3ByteOpcode(ESCAPE_3A, *ptr++);
+ goto opcode_done;
+ default:
+ MOZ_CRASH("Unable to disassemble instruction");
+ }
+ if (l != 0) // 256-bit SIMD
+ MOZ_CRASH("Unable to disassemble instruction");
+ }
+ rex_done:;
+ if (REX_W(rex))
+ opsize = 8;
+
+ // Opcode.
+ opcode = *ptr++;
+ switch (opcode) {
+#ifdef JS_CODEGEN_X64
+ case OP_PUSH_EAX + 0: case OP_PUSH_EAX + 1: case OP_PUSH_EAX + 2: case OP_PUSH_EAX + 3:
+ case OP_PUSH_EAX + 4: case OP_PUSH_EAX + 5: case OP_PUSH_EAX + 6: case OP_PUSH_EAX + 7:
+ case OP_POP_EAX + 0: case OP_POP_EAX + 1: case OP_POP_EAX + 2: case OP_POP_EAX + 3:
+ case OP_POP_EAX + 4: case OP_POP_EAX + 5: case OP_POP_EAX + 6: case OP_POP_EAX + 7:
+ case OP_PUSH_Iz:
+ case OP_PUSH_Ib:
+ opsize = 8;
+ break;
+#endif
+ case OP_2BYTE_ESCAPE:
+ opcode |= *ptr << 8;
+ switch (*ptr++) {
+ case ESCAPE_38:
+ case ESCAPE_3A:
+ opcode |= *ptr++ << 16;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ opcode_done:;
+
+ // ModR/M
+ modrm = *ptr++;
+
+ // SIB
+ if (ModRM_hasSIB(modrm))
+ sib = *ptr++;
+
+ // Address Displacement
+ if (HasDisp8(modrm)) {
+ disp = int8_t(*ptr++);
+ } else if (HasDisp32(modrm, sib)) {
+ memcpy(&disp, ptr, sizeof(int32_t));
+ ptr += sizeof(int32_t);
+ }
+
+ // Immediate operand
+ switch (opcode) {
+ case OP_PUSH_Ib:
+ case OP_IMUL_GvEvIb:
+ case OP_GROUP1_EbIb:
+ case OP_GROUP1_EvIb:
+ case OP_TEST_EAXIb:
+ case OP_GROUP2_EvIb:
+ case OP_GROUP11_EvIb:
+ case OP_GROUP3_EbIb:
+ case Pack2ByteOpcode(OP2_PSHUFD_VdqWdqIb):
+ case Pack2ByteOpcode(OP2_PSLLD_UdqIb): // aka OP2_PSRAD_UdqIb, aka OP2_PSRLD_UdqIb
+ case Pack2ByteOpcode(OP2_PEXTRW_GdUdIb):
+ case Pack2ByteOpcode(OP2_SHUFPS_VpsWpsIb):
+ case Pack3ByteOpcode(ESCAPE_3A, OP3_PEXTRD_EdVdqIb):
+ case Pack3ByteOpcode(ESCAPE_3A, OP3_BLENDPS_VpsWpsIb):
+ case Pack3ByteOpcode(ESCAPE_3A, OP3_PINSRD_VdqEdIb):
+ // 8-bit signed immediate
+ imm = int8_t(*ptr++);
+ haveImm = true;
+ break;
+ case OP_RET_Iz:
+ // 16-bit unsigned immediate
+ memcpy(&imm, ptr, sizeof(int16_t));
+ ptr += sizeof(int16_t);
+ haveImm = true;
+ break;
+ case OP_ADD_EAXIv:
+ case OP_OR_EAXIv:
+ case OP_AND_EAXIv:
+ case OP_SUB_EAXIv:
+ case OP_XOR_EAXIv:
+ case OP_CMP_EAXIv:
+ case OP_PUSH_Iz:
+ case OP_IMUL_GvEvIz:
+ case OP_GROUP1_EvIz:
+ case OP_TEST_EAXIv:
+ case OP_MOV_EAXIv:
+ case OP_GROUP3_EvIz:
+ // 32-bit signed immediate
+ memcpy(&imm, ptr, sizeof(int32_t));
+ ptr += sizeof(int32_t);
+ haveImm = true;
+ break;
+ case OP_GROUP11_EvIz:
+ // opsize-sized signed immediate
+ memcpy(&imm, ptr, opsize);
+ imm = (imm << (32 - opsize * 8)) >> (32 - opsize * 8);
+ ptr += opsize;
+ haveImm = true;
+ break;
+ default:
+ break;
+ }
+
+ // Interpret the opcode.
+ if (HasRIP(modrm, sib, rex))
+ MOZ_CRASH("Unable to disassemble instruction");
+
+ size_t memSize = 0;
+ OtherOperand otherOperand(imm);
+ HeapAccess::Kind kind = HeapAccess::Unknown;
+ RegisterID gpr(RegisterID(Reg(modrm, sib, rex)));
+ XMMRegisterID xmm(XMMRegisterID(Reg(modrm, sib, rex)));
+ ComplexAddress addr(disp,
+ DecodeBase(modrm, sib, rex),
+ DecodeIndex(modrm, sib, rex),
+ DecodeScale(modrm, sib, rex));
+ switch (opcode) {
+ case OP_GROUP11_EvIb:
+ if (gpr != RegisterID(GROUP11_MOV))
+ MOZ_CRASH("Unable to disassemble instruction");
+ MOZ_RELEASE_ASSERT(haveImm);
+ memSize = 1;
+ kind = HeapAccess::Store;
+ break;
+ case OP_GROUP11_EvIz:
+ if (gpr != RegisterID(GROUP11_MOV))
+ MOZ_CRASH("Unable to disassemble instruction");
+ MOZ_RELEASE_ASSERT(haveImm);
+ memSize = opsize;
+ kind = HeapAccess::Store;
+ break;
+ case OP_MOV_GvEv:
+ MOZ_RELEASE_ASSERT(!haveImm);
+ otherOperand = OtherOperand(gpr);
+ memSize = opsize;
+ kind = HeapAccess::Load;
+ break;
+ case OP_MOV_GvEb:
+ MOZ_RELEASE_ASSERT(!haveImm);
+ otherOperand = OtherOperand(gpr);
+ memSize = 1;
+ kind = HeapAccess::Load;
+ break;
+ case OP_MOV_EvGv:
+ if (!haveImm)
+ otherOperand = OtherOperand(gpr);
+ memSize = opsize;
+ kind = HeapAccess::Store;
+ break;
+ case OP_MOV_EbGv:
+ if (!haveImm)
+ otherOperand = OtherOperand(gpr);
+ memSize = 1;
+ kind = HeapAccess::Store;
+ break;
+ case Pack2ByteOpcode(OP2_MOVZX_GvEb):
+ MOZ_RELEASE_ASSERT(!haveImm);
+ otherOperand = OtherOperand(gpr);
+ memSize = 1;
+ kind = HeapAccess::Load;
+ break;
+ case Pack2ByteOpcode(OP2_MOVZX_GvEw):
+ MOZ_RELEASE_ASSERT(!haveImm);
+ otherOperand = OtherOperand(gpr);
+ memSize = 2;
+ kind = HeapAccess::Load;
+ break;
+ case Pack2ByteOpcode(OP2_MOVSX_GvEb):
+ MOZ_RELEASE_ASSERT(!haveImm);
+ otherOperand = OtherOperand(gpr);
+ memSize = 1;
+ kind = opsize == 8 ? HeapAccess::LoadSext64 : HeapAccess::LoadSext32;
+ break;
+ case Pack2ByteOpcode(OP2_MOVSX_GvEw):
+ MOZ_RELEASE_ASSERT(!haveImm);
+ otherOperand = OtherOperand(gpr);
+ memSize = 2;
+ kind = opsize == 8 ? HeapAccess::LoadSext64 : HeapAccess::LoadSext32;
+ break;
+#ifdef JS_CODEGEN_X64
+ case OP_MOVSXD_GvEv:
+ MOZ_RELEASE_ASSERT(!haveImm);
+ otherOperand = OtherOperand(gpr);
+ memSize = 4;
+ kind = HeapAccess::LoadSext64;
+ break;
+#endif // JS_CODEGEN_X64
+ case Pack2ByteOpcode(OP2_MOVDQ_VdqWdq): // aka OP2_MOVDQ_VsdWsd
+ case Pack2ByteOpcode(OP2_MOVAPS_VsdWsd):
+ MOZ_RELEASE_ASSERT(!haveImm);
+ otherOperand = OtherOperand(xmm);
+ memSize = 16;
+ kind = HeapAccess::Load;
+ break;
+ case Pack2ByteOpcode(OP2_MOVSD_VsdWsd): // aka OP2_MOVPS_VpsWps
+ MOZ_RELEASE_ASSERT(!haveImm);
+ otherOperand = OtherOperand(xmm);
+ switch (type) {
+ case VEX_SS: memSize = 4; break;
+ case VEX_SD: memSize = 8; break;
+ case VEX_PS:
+ case VEX_PD: memSize = 16; break;
+ default: MOZ_CRASH("Unexpected VEX type");
+ }
+ kind = HeapAccess::Load;
+ break;
+ case Pack2ByteOpcode(OP2_MOVDQ_WdqVdq):
+ MOZ_RELEASE_ASSERT(!haveImm);
+ otherOperand = OtherOperand(xmm);
+ memSize = 16;
+ kind = HeapAccess::Store;
+ break;
+ case Pack2ByteOpcode(OP2_MOVSD_WsdVsd): // aka OP2_MOVPS_WpsVps
+ MOZ_RELEASE_ASSERT(!haveImm);
+ otherOperand = OtherOperand(xmm);
+ switch (type) {
+ case VEX_SS: memSize = 4; break;
+ case VEX_SD: memSize = 8; break;
+ case VEX_PS:
+ case VEX_PD: memSize = 16; break;
+ default: MOZ_CRASH("Unexpected VEX type");
+ }
+ kind = HeapAccess::Store;
+ break;
+ case Pack2ByteOpcode(OP2_MOVD_VdEd):
+ MOZ_RELEASE_ASSERT(!haveImm);
+ otherOperand = OtherOperand(xmm);
+ switch (type) {
+ case VEX_PD: memSize = 4; break;
+ default: MOZ_CRASH("Unexpected VEX type");
+ }
+ kind = HeapAccess::Load;
+ break;
+ case Pack2ByteOpcode(OP2_MOVQ_WdVd):
+ MOZ_RELEASE_ASSERT(!haveImm);
+ otherOperand = OtherOperand(xmm);
+ switch (type) {
+ case VEX_PD: memSize = 8; break;
+ default: MOZ_CRASH("Unexpected VEX type");
+ }
+ kind = HeapAccess::Store;
+ break;
+ case Pack2ByteOpcode(OP2_MOVD_EdVd): // aka OP2_MOVQ_VdWd
+ MOZ_RELEASE_ASSERT(!haveImm);
+ otherOperand = OtherOperand(xmm);
+ switch (type) {
+ case VEX_SS: memSize = 8; kind = HeapAccess::Load; break;
+ case VEX_PD: memSize = 4; kind = HeapAccess::Store; break;
+ default: MOZ_CRASH("Unexpected VEX type");
+ }
+ break;
+ default:
+ MOZ_CRASH("Unable to disassemble instruction");
+ }
+
+ *access = HeapAccess(kind, memSize, addr, otherOperand);
+ return ptr;
+}
+
+#ifdef DEBUG
+void
+js::jit::Disassembler::DumpHeapAccess(const HeapAccess& access)
+{
+ switch (access.kind()) {
+ case HeapAccess::Store: fprintf(stderr, "store"); break;
+ case HeapAccess::Load: fprintf(stderr, "load"); break;
+ case HeapAccess::LoadSext32: fprintf(stderr, "loadSext32"); break;
+ case HeapAccess::LoadSext64: fprintf(stderr, "loadSext64"); break;
+ default: fprintf(stderr, "unknown"); break;
+ }
+ fprintf(stderr, "%u ", unsigned(access.size()));
+
+ switch (access.otherOperand().kind()) {
+ case OtherOperand::Imm:
+ fprintf(stderr, "imm %d", access.otherOperand().imm());
+ break;
+ case OtherOperand::GPR:
+ fprintf(stderr, "gpr %s", X86Encoding::GPRegName(access.otherOperand().gpr()));
+ break;
+ case OtherOperand::FPR:
+ fprintf(stderr, "fpr %s", X86Encoding::XMMRegName(access.otherOperand().fpr()));
+ break;
+ default: fprintf(stderr, "unknown");
+ }
+
+ fprintf(stderr, " @ ");
+
+ if (access.address().isPCRelative()) {
+ fprintf(stderr, MEM_o32r " ", ADDR_o32r(access.address().disp()));
+ } else if (access.address().hasIndex()) {
+ if (access.address().hasBase()) {
+ fprintf(stderr, MEM_obs " ",
+ ADDR_obs(access.address().disp(), access.address().base(),
+ access.address().index(), access.address().scale()));
+ } else {
+ fprintf(stderr, MEM_os " ",
+ ADDR_os(access.address().disp(),
+ access.address().index(), access.address().scale()));
+ }
+ } else if (access.address().hasBase()) {
+ fprintf(stderr, MEM_ob " ", ADDR_ob(access.address().disp(), access.address().base()));
+ } else {
+ fprintf(stderr, MEM_o " ", ADDR_o(access.address().disp()));
+ }
+
+ fprintf(stderr, "\n");
+}
+#endif
diff --git a/js/src/jit/x86-shared/Encoding-x86-shared.h b/js/src/jit/x86-shared/Encoding-x86-shared.h
new file mode 100644
index 000000000..5190164de
--- /dev/null
+++ b/js/src/jit/x86-shared/Encoding-x86-shared.h
@@ -0,0 +1,413 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_Encoding_x86_shared_h
+#define jit_x86_shared_Encoding_x86_shared_h
+
+#include "jit/x86-shared/Constants-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+namespace X86Encoding {
+
+static const size_t MaxInstructionSize = 16;
+
+// These enumerated values are following the Intel documentation Volume 2C [1],
+// Appendix A.2 and Appendix A.3.
+//
+// Operand size/types as listed in the Appendix A.2. Tables of the instructions
+// and their operands can be found in the Appendix A.3.
+//
+// E = reg/mem
+// G = reg (reg field of ModR/M)
+// U = xmm (R/M field of ModR/M)
+// V = xmm (reg field of ModR/M)
+// W = xmm/mem64
+// I = immediate
+// O = offset
+//
+// b = byte (8-bit)
+// w = word (16-bit)
+// v = register size
+// d = double (32-bit)
+// dq = double-quad (128-bit) (xmm)
+// ss = scalar float 32 (xmm)
+// ps = packed float 32 (xmm)
+// sd = scalar double (xmm)
+// pd = packed double (xmm)
+// z = 16/32/64-bit
+// vqp = (*)
+//
+// (*) Some website [2] provides a convenient list of all instructions, but be
+// aware that they do not follow the Intel documentation naming, as the
+// following enumeration does. Do not use these names as a reference for adding
+// new instructions.
+//
+// [1] http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-manual-325462.html
+// [2] http://ref.x86asm.net/geek.html
+//
+// OPn_NAME_DstSrc
+enum OneByteOpcodeID {
+ OP_NOP_00 = 0x00,
+ OP_ADD_EbGb = 0x00,
+ OP_ADD_EvGv = 0x01,
+ OP_ADD_GvEv = 0x03,
+ OP_ADD_EAXIv = 0x05,
+ OP_OR_EbGb = 0x08,
+ OP_OR_EvGv = 0x09,
+ OP_OR_GvEv = 0x0B,
+ OP_OR_EAXIv = 0x0D,
+ OP_2BYTE_ESCAPE = 0x0F,
+ OP_NOP_0F = 0x0F,
+ OP_ADC_GvEv = 0x13,
+ OP_SBB_GvEv = 0x1B,
+ OP_NOP_1F = 0x1F,
+ OP_AND_EbGb = 0x20,
+ OP_AND_EvGv = 0x21,
+ OP_AND_GvEv = 0x23,
+ OP_AND_EAXIv = 0x25,
+ OP_SUB_EbGb = 0x28,
+ OP_SUB_EvGv = 0x29,
+ OP_SUB_GvEv = 0x2B,
+ OP_SUB_EAXIv = 0x2D,
+ PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
+ OP_XOR_EbGb = 0x30,
+ OP_XOR_EvGv = 0x31,
+ OP_XOR_GvEv = 0x33,
+ OP_XOR_EAXIv = 0x35,
+ OP_CMP_EvGv = 0x39,
+ OP_CMP_GvEv = 0x3B,
+ OP_CMP_EAXIv = 0x3D,
+#ifdef JS_CODEGEN_X64
+ PRE_REX = 0x40,
+#endif
+ OP_NOP_40 = 0x40,
+ OP_NOP_44 = 0x44,
+ OP_PUSH_EAX = 0x50,
+ OP_POP_EAX = 0x58,
+#ifdef JS_CODEGEN_X86
+ OP_PUSHA = 0x60,
+ OP_POPA = 0x61,
+#endif
+#ifdef JS_CODEGEN_X64
+ OP_MOVSXD_GvEv = 0x63,
+#endif
+ PRE_OPERAND_SIZE = 0x66,
+ PRE_SSE_66 = 0x66,
+ OP_NOP_66 = 0x66,
+ OP_PUSH_Iz = 0x68,
+ OP_IMUL_GvEvIz = 0x69,
+ OP_PUSH_Ib = 0x6a,
+ OP_IMUL_GvEvIb = 0x6b,
+ OP_JCC_rel8 = 0x70,
+ OP_GROUP1_EbIb = 0x80,
+ OP_NOP_80 = 0x80,
+ OP_GROUP1_EvIz = 0x81,
+ OP_GROUP1_EvIb = 0x83,
+ OP_TEST_EbGb = 0x84,
+ OP_NOP_84 = 0x84,
+ OP_TEST_EvGv = 0x85,
+ OP_XCHG_GbEb = 0x86,
+ OP_XCHG_GvEv = 0x87,
+ OP_MOV_EbGv = 0x88,
+ OP_MOV_EvGv = 0x89,
+ OP_MOV_GvEb = 0x8A,
+ OP_MOV_GvEv = 0x8B,
+ OP_LEA = 0x8D,
+ OP_GROUP1A_Ev = 0x8F,
+ OP_NOP = 0x90,
+ OP_PUSHFLAGS = 0x9C,
+ OP_POPFLAGS = 0x9D,
+ OP_CDQ = 0x99,
+ OP_MOV_EAXOv = 0xA1,
+ OP_MOV_OvEAX = 0xA3,
+ OP_TEST_EAXIb = 0xA8,
+ OP_TEST_EAXIv = 0xA9,
+ OP_MOV_EbIb = 0xB0,
+ OP_MOV_EAXIv = 0xB8,
+ OP_GROUP2_EvIb = 0xC1,
+ OP_ADDP_ST0_ST1 = 0xC1,
+ OP_RET_Iz = 0xC2,
+ PRE_VEX_C4 = 0xC4,
+ PRE_VEX_C5 = 0xC5,
+ OP_RET = 0xC3,
+ OP_GROUP11_EvIb = 0xC6,
+ OP_GROUP11_EvIz = 0xC7,
+ OP_INT3 = 0xCC,
+ OP_GROUP2_Ev1 = 0xD1,
+ OP_GROUP2_EvCL = 0xD3,
+ OP_FPU6 = 0xDD,
+ OP_FPU6_F32 = 0xD9,
+ OP_FPU6_ADDP = 0xDE,
+ OP_FILD = 0xDF,
+ OP_CALL_rel32 = 0xE8,
+ OP_JMP_rel32 = 0xE9,
+ OP_JMP_rel8 = 0xEB,
+ PRE_LOCK = 0xF0,
+ PRE_SSE_F2 = 0xF2,
+ PRE_SSE_F3 = 0xF3,
+ OP_HLT = 0xF4,
+ OP_GROUP3_EbIb = 0xF6,
+ OP_GROUP3_Ev = 0xF7,
+ OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
+ OP_GROUP5_Ev = 0xFF
+};
+
+enum class ShiftID {
+ vpsrlx = 2,
+ vpsrldq = 3,
+ vpsrad = 4,
+ vpsllx = 6
+};
+
+enum TwoByteOpcodeID {
+ OP2_UD2 = 0x0B,
+ OP2_MOVSD_VsdWsd = 0x10,
+ OP2_MOVPS_VpsWps = 0x10,
+ OP2_MOVSD_WsdVsd = 0x11,
+ OP2_MOVPS_WpsVps = 0x11,
+ OP2_MOVDDUP_VqWq = 0x12,
+ OP2_MOVHLPS_VqUq = 0x12,
+ OP2_MOVSLDUP_VpsWps = 0x12,
+ OP2_UNPCKLPS_VsdWsd = 0x14,
+ OP2_UNPCKHPS_VsdWsd = 0x15,
+ OP2_MOVLHPS_VqUq = 0x16,
+ OP2_MOVSHDUP_VpsWps = 0x16,
+ OP2_MOVAPD_VsdWsd = 0x28,
+ OP2_MOVAPS_VsdWsd = 0x28,
+ OP2_MOVAPS_WsdVsd = 0x29,
+ OP2_CVTSI2SD_VsdEd = 0x2A,
+ OP2_CVTTSD2SI_GdWsd = 0x2C,
+ OP2_UCOMISD_VsdWsd = 0x2E,
+ OP2_CMOVZ_GvEv = 0x44,
+ OP2_MOVMSKPD_EdVd = 0x50,
+ OP2_ANDPS_VpsWps = 0x54,
+ OP2_ANDNPS_VpsWps = 0x55,
+ OP2_ORPS_VpsWps = 0x56,
+ OP2_XORPS_VpsWps = 0x57,
+ OP2_ADDSD_VsdWsd = 0x58,
+ OP2_ADDPS_VpsWps = 0x58,
+ OP2_MULSD_VsdWsd = 0x59,
+ OP2_MULPS_VpsWps = 0x59,
+ OP2_CVTSS2SD_VsdEd = 0x5A,
+ OP2_CVTSD2SS_VsdEd = 0x5A,
+ OP2_CVTTPS2DQ_VdqWps = 0x5B,
+ OP2_CVTDQ2PS_VpsWdq = 0x5B,
+ OP2_SUBSD_VsdWsd = 0x5C,
+ OP2_SUBPS_VpsWps = 0x5C,
+ OP2_MINSD_VsdWsd = 0x5D,
+ OP2_MINSS_VssWss = 0x5D,
+ OP2_MINPS_VpsWps = 0x5D,
+ OP2_DIVSD_VsdWsd = 0x5E,
+ OP2_DIVPS_VpsWps = 0x5E,
+ OP2_MAXSD_VsdWsd = 0x5F,
+ OP2_MAXSS_VssWss = 0x5F,
+ OP2_MAXPS_VpsWps = 0x5F,
+ OP2_SQRTSD_VsdWsd = 0x51,
+ OP2_SQRTSS_VssWss = 0x51,
+ OP2_SQRTPS_VpsWps = 0x51,
+ OP2_RSQRTPS_VpsWps = 0x52,
+ OP2_RCPPS_VpsWps = 0x53,
+ OP2_ANDPD_VpdWpd = 0x54,
+ OP2_ORPD_VpdWpd = 0x56,
+ OP2_XORPD_VpdWpd = 0x57,
+ OP2_PUNPCKLDQ = 0x62,
+ OP2_PCMPGTB_VdqWdq = 0x64,
+ OP2_PCMPGTW_VdqWdq = 0x65,
+ OP2_PCMPGTD_VdqWdq = 0x66,
+ OP2_MOVD_VdEd = 0x6E,
+ OP2_MOVDQ_VsdWsd = 0x6F,
+ OP2_MOVDQ_VdqWdq = 0x6F,
+ OP2_PSHUFD_VdqWdqIb = 0x70,
+ OP2_PSHUFLW_VdqWdqIb = 0x70,
+ OP2_PSHUFHW_VdqWdqIb = 0x70,
+ OP2_PSLLW_UdqIb = 0x71,
+ OP2_PSRAW_UdqIb = 0x71,
+ OP2_PSRLW_UdqIb = 0x71,
+ OP2_PSLLD_UdqIb = 0x72,
+ OP2_PSRAD_UdqIb = 0x72,
+ OP2_PSRLD_UdqIb = 0x72,
+ OP2_PSRLDQ_Vd = 0x73,
+ OP2_PCMPEQB_VdqWdq = 0x74,
+ OP2_PCMPEQW_VdqWdq = 0x75,
+ OP2_PCMPEQD_VdqWdq = 0x76,
+ OP2_HADDPD = 0x7C,
+ OP2_MOVD_EdVd = 0x7E,
+ OP2_MOVQ_VdWd = 0x7E,
+ OP2_MOVDQ_WdqVdq = 0x7F,
+ OP2_JCC_rel32 = 0x80,
+ OP_SETCC = 0x90,
+ OP2_SHLD = 0xA4,
+ OP2_SHLD_GvEv = 0xA5,
+ OP2_SHRD = 0xAC,
+ OP2_SHRD_GvEv = 0xAD,
+ OP_FENCE = 0xAE,
+ OP2_IMUL_GvEv = 0xAF,
+ OP2_CMPXCHG_GvEb = 0xB0,
+ OP2_CMPXCHG_GvEw = 0xB1,
+ OP2_POPCNT_GvEv = 0xB8,
+ OP2_BSF_GvEv = 0xBC,
+ OP2_BSR_GvEv = 0xBD,
+ OP2_MOVSX_GvEb = 0xBE,
+ OP2_MOVSX_GvEw = 0xBF,
+ OP2_MOVZX_GvEb = 0xB6,
+ OP2_MOVZX_GvEw = 0xB7,
+ OP2_XADD_EbGb = 0xC0,
+ OP2_XADD_EvGv = 0xC1,
+ OP2_CMPPS_VpsWps = 0xC2,
+ OP2_PINSRW = 0xC4,
+ OP2_PEXTRW_GdUdIb = 0xC5,
+ OP2_SHUFPS_VpsWpsIb = 0xC6,
+ OP2_PSRLW_VdqWdq = 0xD1,
+ OP2_PSRLD_VdqWdq = 0xD2,
+ OP2_PMULLW_VdqWdq = 0xD5,
+ OP2_MOVQ_WdVd = 0xD6,
+ OP2_PSUBUSB_VdqWdq = 0xD8,
+ OP2_PSUBUSW_VdqWdq = 0xD9,
+ OP2_PANDDQ_VdqWdq = 0xDB,
+ OP2_PADDUSB_VdqWdq = 0xDC,
+ OP2_PADDUSW_VdqWdq = 0xDD,
+ OP2_PANDNDQ_VdqWdq = 0xDF,
+ OP2_PSRAW_VdqWdq = 0xE1,
+ OP2_PSRAD_VdqWdq = 0xE2,
+ OP2_PSUBSB_VdqWdq = 0xE8,
+ OP2_PSUBSW_VdqWdq = 0xE9,
+ OP2_PORDQ_VdqWdq = 0xEB,
+ OP2_PADDSB_VdqWdq = 0xEC,
+ OP2_PADDSW_VdqWdq = 0xED,
+ OP2_PXORDQ_VdqWdq = 0xEF,
+ OP2_PSLLW_VdqWdq = 0xF1,
+ OP2_PSLLD_VdqWdq = 0xF2,
+ OP2_PMULUDQ_VdqWdq = 0xF4,
+ OP2_PSUBB_VdqWdq = 0xF8,
+ OP2_PSUBW_VdqWdq = 0xF9,
+ OP2_PSUBD_VdqWdq = 0xFA,
+ OP2_PADDB_VdqWdq = 0xFC,
+ OP2_PADDW_VdqWdq = 0xFD,
+ OP2_PADDD_VdqWdq = 0xFE
+};
+
+enum ThreeByteOpcodeID {
+ OP3_PSHUFB_VdqWdq = 0x00,
+ OP3_ROUNDSS_VsdWsd = 0x0A,
+ OP3_ROUNDSD_VsdWsd = 0x0B,
+ OP3_BLENDVPS_VdqWdq = 0x14,
+ OP3_PEXTRB_EdVdqIb = 0x14,
+ OP3_PEXTRD_EdVdqIb = 0x16,
+ OP3_BLENDPS_VpsWpsIb = 0x0C,
+ OP3_PTEST_VdVd = 0x17,
+ OP3_PINSRB_VdqEdIb = 0x20,
+ OP3_INSERTPS_VpsUps = 0x21,
+ OP3_PINSRD_VdqEdIb = 0x22,
+ OP3_PMULLD_VdqWdq = 0x40,
+ OP3_VBLENDVPS_VdqWdq = 0x4A
+};
+
+// Test whether the given opcode should be printed with its operands reversed.
+inline bool IsXMMReversedOperands(TwoByteOpcodeID opcode)
+{
+ switch (opcode) {
+ case OP2_MOVSD_WsdVsd: // also OP2_MOVPS_WpsVps
+ case OP2_MOVAPS_WsdVsd:
+ case OP2_MOVDQ_WdqVdq:
+ case OP3_PEXTRD_EdVdqIb:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+enum ThreeByteEscape {
+ ESCAPE_38 = 0x38,
+ ESCAPE_3A = 0x3A
+};
+
+enum VexOperandType {
+ VEX_PS = 0,
+ VEX_PD = 1,
+ VEX_SS = 2,
+ VEX_SD = 3
+};
+
+inline OneByteOpcodeID jccRel8(Condition cond)
+{
+ return OneByteOpcodeID(OP_JCC_rel8 + cond);
+}
+inline TwoByteOpcodeID jccRel32(Condition cond)
+{
+ return TwoByteOpcodeID(OP2_JCC_rel32 + cond);
+}
+inline TwoByteOpcodeID setccOpcode(Condition cond)
+{
+ return TwoByteOpcodeID(OP_SETCC + cond);
+}
+
+enum GroupOpcodeID {
+ GROUP1_OP_ADD = 0,
+ GROUP1_OP_OR = 1,
+ GROUP1_OP_ADC = 2,
+ GROUP1_OP_SBB = 3,
+ GROUP1_OP_AND = 4,
+ GROUP1_OP_SUB = 5,
+ GROUP1_OP_XOR = 6,
+ GROUP1_OP_CMP = 7,
+
+ GROUP1A_OP_POP = 0,
+
+ GROUP2_OP_ROL = 0,
+ GROUP2_OP_ROR = 1,
+ GROUP2_OP_SHL = 4,
+ GROUP2_OP_SHR = 5,
+ GROUP2_OP_SAR = 7,
+
+ GROUP3_OP_TEST = 0,
+ GROUP3_OP_NOT = 2,
+ GROUP3_OP_NEG = 3,
+ GROUP3_OP_MUL = 4,
+ GROUP3_OP_IMUL = 5,
+ GROUP3_OP_DIV = 6,
+ GROUP3_OP_IDIV = 7,
+
+ GROUP5_OP_INC = 0,
+ GROUP5_OP_DEC = 1,
+ GROUP5_OP_CALLN = 2,
+ GROUP5_OP_JMPN = 4,
+ GROUP5_OP_PUSH = 6,
+
+ FILD_OP_64 = 5,
+
+ FPU6_OP_FLD = 0,
+ FPU6_OP_FISTTP = 1,
+ FPU6_OP_FSTP = 3,
+ FPU6_OP_FLDCW = 5,
+ FPU6_OP_FISTP = 7,
+
+ GROUP11_MOV = 0
+};
+
+static const RegisterID noBase = rbp;
+static const RegisterID hasSib = rsp;
+static const RegisterID noIndex = rsp;
+#ifdef JS_CODEGEN_X64
+static const RegisterID noBase2 = r13;
+static const RegisterID hasSib2 = r12;
+#endif
+
+enum ModRmMode {
+ ModRmMemoryNoDisp,
+ ModRmMemoryDisp8,
+ ModRmMemoryDisp32,
+ ModRmRegister
+};
+
+} // namespace X86Encoding
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_Encoding_x86_shared_h */
diff --git a/js/src/jit/x86-shared/LIR-x86-shared.h b/js/src/jit/x86-shared/LIR-x86-shared.h
new file mode 100644
index 000000000..7408b8fc2
--- /dev/null
+++ b/js/src/jit/x86-shared/LIR-x86-shared.h
@@ -0,0 +1,421 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_LIR_x86_shared_h
+#define jit_x86_shared_LIR_x86_shared_h
+
+namespace js {
+namespace jit {
+
+class LDivI : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(DivI)
+
+ LDivI(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const char* extraName() const {
+ if (mir()->isTruncated()) {
+ if (mir()->canBeNegativeZero()) {
+ return mir()->canBeNegativeOverflow()
+ ? "Truncate_NegativeZero_NegativeOverflow"
+ : "Truncate_NegativeZero";
+ }
+ return mir()->canBeNegativeOverflow() ? "Truncate_NegativeOverflow" : "Truncate";
+ }
+ if (mir()->canBeNegativeZero())
+ return mir()->canBeNegativeOverflow() ? "NegativeZero_NegativeOverflow" : "NegativeZero";
+ return mir()->canBeNegativeOverflow() ? "NegativeOverflow" : nullptr;
+ }
+
+ const LDefinition* remainder() {
+ return getTemp(0);
+ }
+ MDiv* mir() const {
+ return mir_->toDiv();
+ }
+};
+
+// Signed division by a power-of-two constant.
+class LDivPowTwoI : public LBinaryMath<0>
+{
+ const int32_t shift_;
+ const bool negativeDivisor_;
+
+ public:
+ LIR_HEADER(DivPowTwoI)
+
+ LDivPowTwoI(const LAllocation& lhs, const LAllocation& lhsCopy, int32_t shift, bool negativeDivisor)
+ : shift_(shift), negativeDivisor_(negativeDivisor)
+ {
+ setOperand(0, lhs);
+ setOperand(1, lhsCopy);
+ }
+
+ const LAllocation* numerator() {
+ return getOperand(0);
+ }
+ const LAllocation* numeratorCopy() {
+ return getOperand(1);
+ }
+ int32_t shift() const {
+ return shift_;
+ }
+ bool negativeDivisor() const {
+ return negativeDivisor_;
+ }
+ MDiv* mir() const {
+ return mir_->toDiv();
+ }
+};
+
+class LDivOrModConstantI : public LInstructionHelper<1, 1, 1>
+{
+ const int32_t denominator_;
+
+ public:
+ LIR_HEADER(DivOrModConstantI)
+
+ LDivOrModConstantI(const LAllocation& lhs, int32_t denominator, const LDefinition& temp)
+ : denominator_(denominator)
+ {
+ setOperand(0, lhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* numerator() {
+ return getOperand(0);
+ }
+ int32_t denominator() const {
+ return denominator_;
+ }
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeNegativeDividend() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeNegativeDividend();
+ return mir_->toDiv()->canBeNegativeDividend();
+ }
+};
+
+class LModI : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(ModI)
+
+ LModI(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const char* extraName() const {
+ return mir()->isTruncated() ? "Truncated" : nullptr;
+ }
+
+ const LDefinition* remainder() {
+ return getDef(0);
+ }
+ MMod* mir() const {
+ return mir_->toMod();
+ }
+};
+
+// This class performs a simple x86 'div', yielding either a quotient or remainder depending on
+// whether this instruction is defined to output eax (quotient) or edx (remainder).
+class LUDivOrMod : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(UDivOrMod);
+
+ LUDivOrMod(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() {
+ return getTemp(0);
+ }
+
+ const char* extraName() const {
+ return mir()->isTruncated() ? "Truncated" : nullptr;
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeDivideByZero();
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+
+ bool trapOnError() const {
+ if (mir_->isMod())
+ return mir_->toMod()->trapOnError();
+ return mir_->toDiv()->trapOnError();
+ }
+
+ wasm::TrapOffset trapOffset() const {
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+class LUDivOrModConstant : public LInstructionHelper<1, 1, 1>
+{
+ const uint32_t denominator_;
+
+ public:
+ LIR_HEADER(UDivOrModConstant)
+
+ LUDivOrModConstant(const LAllocation &lhs, uint32_t denominator, const LDefinition& temp)
+ : denominator_(denominator)
+ {
+ setOperand(0, lhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation *numerator() {
+ return getOperand(0);
+ }
+ uint32_t denominator() const {
+ return denominator_;
+ }
+ MBinaryArithInstruction *mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction *>(mir_);
+ }
+ bool canBeNegativeDividend() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeNegativeDividend();
+ return mir_->toDiv()->canBeNegativeDividend();
+ }
+ bool trapOnError() const {
+ if (mir_->isMod())
+ return mir_->toMod()->trapOnError();
+ return mir_->toDiv()->trapOnError();
+ }
+ wasm::TrapOffset trapOffset() const {
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+class LModPowTwoI : public LInstructionHelper<1,1,0>
+{
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModPowTwoI)
+
+ LModPowTwoI(const LAllocation& lhs, int32_t shift)
+ : shift_(shift)
+ {
+ setOperand(0, lhs);
+ }
+
+ int32_t shift() const {
+ return shift_;
+ }
+ const LDefinition* remainder() {
+ return getDef(0);
+ }
+ MMod* mir() const {
+ return mir_->toMod();
+ }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitch : public LInstructionHelper<0, 1, 2>
+{
+ public:
+ LIR_HEADER(TableSwitch)
+
+ LTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ const LDefinition& jumpTablePointer, MTableSwitch* ins)
+ {
+ setOperand(0, in);
+ setTemp(0, inputCopy);
+ setTemp(1, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const {
+ return mir_->toTableSwitch();
+ }
+
+ const LAllocation* index() {
+ return getOperand(0);
+ }
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+ const LDefinition* tempPointer() {
+ return getTemp(1);
+ }
+};
+
+// Takes a tableswitch with a value to decide
+class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 3>
+{
+ public:
+ LIR_HEADER(TableSwitchV)
+
+ LTableSwitchV(const LBoxAllocation& input, const LDefinition& inputCopy,
+ const LDefinition& floatCopy, const LDefinition& jumpTablePointer,
+ MTableSwitch* ins)
+ {
+ setBoxOperand(InputValue, input);
+ setTemp(0, inputCopy);
+ setTemp(1, floatCopy);
+ setTemp(2, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const {
+ return mir_->toTableSwitch();
+ }
+
+ static const size_t InputValue = 0;
+
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+ const LDefinition* tempFloat() {
+ return getTemp(1);
+ }
+ const LDefinition* tempPointer() {
+ return getTemp(2);
+ }
+};
+
+class LGuardShape : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(GuardShape)
+
+ explicit LGuardShape(const LAllocation& in) {
+ setOperand(0, in);
+ }
+ const MGuardShape* mir() const {
+ return mir_->toGuardShape();
+ }
+};
+
+class LGuardObjectGroup : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(GuardObjectGroup)
+
+ explicit LGuardObjectGroup(const LAllocation& in) {
+ setOperand(0, in);
+ }
+ const MGuardObjectGroup* mir() const {
+ return mir_->toGuardObjectGroup();
+ }
+};
+
+class LMulI : public LBinaryMath<0, 1>
+{
+ public:
+ LIR_HEADER(MulI)
+
+ LMulI(const LAllocation& lhs, const LAllocation& rhs, const LAllocation& lhsCopy) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setOperand(2, lhsCopy);
+ }
+
+ const char* extraName() const {
+ return (mir()->mode() == MMul::Integer)
+ ? "Integer"
+ : (mir()->canBeNegativeZero() ? "CanBeNegativeZero" : nullptr);
+ }
+
+ MMul* mir() const {
+ return mir_->toMul();
+ }
+ const LAllocation* lhsCopy() {
+ return this->getOperand(2);
+ }
+};
+
+// Constructs an int32x4 SIMD value.
+class LSimdValueInt32x4 : public LInstructionHelper<1, 4, 0>
+{
+ public:
+ LIR_HEADER(SimdValueInt32x4)
+ LSimdValueInt32x4(const LAllocation& x, const LAllocation& y,
+ const LAllocation& z, const LAllocation& w)
+ {
+ setOperand(0, x);
+ setOperand(1, y);
+ setOperand(2, z);
+ setOperand(3, w);
+ }
+
+ MSimdValueX4* mir() const {
+ return mir_->toSimdValueX4();
+ }
+};
+
+// Constructs a float32x4 SIMD value, optimized for x86 family
+class LSimdValueFloat32x4 : public LInstructionHelper<1, 4, 1>
+{
+ public:
+ LIR_HEADER(SimdValueFloat32x4)
+ LSimdValueFloat32x4(const LAllocation& x, const LAllocation& y,
+ const LAllocation& z, const LAllocation& w,
+ const LDefinition& copyY)
+ {
+ setOperand(0, x);
+ setOperand(1, y);
+ setOperand(2, z);
+ setOperand(3, w);
+
+ setTemp(0, copyY);
+ }
+
+ MSimdValueX4* mir() const {
+ return mir_->toSimdValueX4();
+ }
+};
+
+class LInt64ToFloatingPoint : public LInstructionHelper<1, INT64_PIECES, 1>
+{
+ public:
+ LIR_HEADER(Int64ToFloatingPoint);
+
+ explicit LInt64ToFloatingPoint(const LInt64Allocation& in, const LDefinition& temp) {
+ setInt64Operand(0, in);
+ setTemp(0, temp);
+ }
+
+ MInt64ToFloatingPoint* mir() const {
+ return mir_->toInt64ToFloatingPoint();
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_LIR_x86_shared_h */
diff --git a/js/src/jit/x86-shared/Lowering-x86-shared.cpp b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
new file mode 100644
index 000000000..8e820070a
--- /dev/null
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
@@ -0,0 +1,1019 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86-shared/Lowering-x86-shared.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/MIR.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::Abs;
+using mozilla::FloorLog2;
+using mozilla::Swap;
+
+LTableSwitch*
+LIRGeneratorX86Shared::newLTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ MTableSwitch* tableswitch)
+{
+ return new(alloc()) LTableSwitch(in, inputCopy, temp(), tableswitch);
+}
+
+LTableSwitchV*
+LIRGeneratorX86Shared::newLTableSwitchV(MTableSwitch* tableswitch)
+{
+ return new(alloc()) LTableSwitchV(useBox(tableswitch->getOperand(0)),
+ temp(), tempDouble(), temp(), tableswitch);
+}
+
+void
+LIRGeneratorX86Shared::visitGuardShape(MGuardShape* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ LGuardShape* guard = new(alloc()) LGuardShape(useRegisterAtStart(ins->object()));
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, ins->object());
+}
+
+void
+LIRGeneratorX86Shared::visitGuardObjectGroup(MGuardObjectGroup* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ LGuardObjectGroup* guard = new(alloc()) LGuardObjectGroup(useRegisterAtStart(ins->object()));
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, ins->object());
+}
+
+void
+LIRGeneratorX86Shared::visitPowHalf(MPowHalf* ins)
+{
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Double);
+ LPowHalfD* lir = new(alloc()) LPowHalfD(useRegisterAtStart(input));
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX86Shared::lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setOperand(0, useRegisterAtStart(lhs));
+
+ // shift operator should be constant or in register ecx
+ // x86 can't shift a non-ecx register
+ if (rhs->isConstant())
+ ins->setOperand(1, useOrConstantAtStart(rhs));
+ else
+ ins->setOperand(1, lhs != rhs ? useFixed(rhs, ecx) : useFixedAtStart(rhs, ecx));
+
+ defineReuseInput(ins, mir, 0);
+}
+
+template<size_t Temps>
+void
+LIRGeneratorX86Shared::lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+#if defined(JS_NUNBOX32)
+ if (mir->isRotate())
+ ins->setTemp(0, temp());
+#endif
+
+ static_assert(LShiftI64::Rhs == INT64_PIECES, "Assume Rhs is located at INT64_PIECES.");
+ static_assert(LRotateI64::Count == INT64_PIECES, "Assume Count is located at INT64_PIECES.");
+
+ // shift operator should be constant or in register ecx
+ // x86 can't shift a non-ecx register
+ if (rhs->isConstant()) {
+ ins->setOperand(INT64_PIECES, useOrConstantAtStart(rhs));
+ } else {
+ // The operands are int64, but we only care about the lower 32 bits of
+ // the RHS. On 32-bit, the code below will load that part in ecx and
+ // will discard the upper half.
+ ensureDefined(rhs);
+ LUse use(ecx);
+ use.setVirtualRegister(rhs->virtualRegister());
+ ins->setOperand(INT64_PIECES, use);
+ }
+
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+template void LIRGeneratorX86Shared::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorX86Shared::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 1>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+void
+LIRGeneratorX86Shared::lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* input)
+{
+ ins->setOperand(0, useRegisterAtStart(input));
+ defineReuseInput(ins, mir, 0);
+}
+
+void
+LIRGeneratorX86Shared::lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setOperand(0, useRegisterAtStart(lhs));
+ ins->setOperand(1, lhs != rhs ? useOrConstant(rhs) : useOrConstantAtStart(rhs));
+ defineReuseInput(ins, mir, 0);
+}
+
+template<size_t Temps>
+void
+LIRGeneratorX86Shared::lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ // Without AVX, we'll need to use the x86 encodings where one of the
+ // inputs must be the same location as the output.
+ if (!Assembler::HasAVX()) {
+ ins->setOperand(0, useRegisterAtStart(lhs));
+ ins->setOperand(1, lhs != rhs ? use(rhs) : useAtStart(rhs));
+ defineReuseInput(ins, mir, 0);
+ } else {
+ ins->setOperand(0, useRegisterAtStart(lhs));
+ ins->setOperand(1, useAtStart(rhs));
+ define(ins, mir);
+ }
+}
+
+template void LIRGeneratorX86Shared::lowerForFPU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorX86Shared::lowerForFPU(LInstructionHelper<1, 2, 1>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+void
+LIRGeneratorX86Shared::lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ lowerForALU(ins, mir, lhs, rhs);
+}
+
+void
+LIRGeneratorX86Shared::lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ // Swap the operands around to fit the instructions that x86 actually has.
+ // We do this here, before register allocation, so that we don't need
+ // temporaries and copying afterwards.
+ switch (mir->operation()) {
+ case MSimdBinaryComp::greaterThan:
+ case MSimdBinaryComp::greaterThanOrEqual:
+ mir->reverse();
+ Swap(lhs, rhs);
+ break;
+ default:
+ break;
+ }
+
+ lowerForFPU(ins, mir, lhs, rhs);
+}
+
+void
+LIRGeneratorX86Shared::lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+ MDefinition* lhs, MDefinition* rhs)
+{
+ baab->setOperand(0, useRegisterAtStart(lhs));
+ baab->setOperand(1, useRegisterOrConstantAtStart(rhs));
+ add(baab, mir);
+}
+
+void
+LIRGeneratorX86Shared::lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs)
+{
+ // Note: If we need a negative zero check, lhs is used twice.
+ LAllocation lhsCopy = mul->canBeNegativeZero() ? use(lhs) : LAllocation();
+ LMulI* lir = new(alloc()) LMulI(useRegisterAtStart(lhs), useOrConstant(rhs), lhsCopy);
+ if (mul->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ defineReuseInput(lir, mul, 0);
+}
+
+void
+LIRGeneratorX86Shared::lowerDivI(MDiv* div)
+{
+ if (div->isUnsigned()) {
+ lowerUDiv(div);
+ return;
+ }
+
+ // Division instructions are slow. Division by constant denominators can be
+ // rewritten to use other instructions.
+ if (div->rhs()->isConstant()) {
+ int32_t rhs = div->rhs()->toConstant()->toInt32();
+
+ // Division by powers of two can be done by shifting, and division by
+ // other numbers can be done by a reciprocal multiplication technique.
+ int32_t shift = FloorLog2(Abs(rhs));
+ if (rhs != 0 && uint32_t(1) << shift == Abs(rhs)) {
+ LAllocation lhs = useRegisterAtStart(div->lhs());
+ LDivPowTwoI* lir;
+ if (!div->canBeNegativeDividend()) {
+ // Numerator is unsigned, so does not need adjusting.
+ lir = new(alloc()) LDivPowTwoI(lhs, lhs, shift, rhs < 0);
+ } else {
+ // Numerator is signed, and needs adjusting, and an extra
+ // lhs copy register is needed.
+ lir = new(alloc()) LDivPowTwoI(lhs, useRegister(div->lhs()), shift, rhs < 0);
+ }
+ if (div->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ defineReuseInput(lir, div, 0);
+ return;
+ }
+ if (rhs != 0) {
+ LDivOrModConstantI* lir;
+ lir = new(alloc()) LDivOrModConstantI(useRegister(div->lhs()), rhs, tempFixed(eax));
+ if (div->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ defineFixed(lir, div, LAllocation(AnyRegister(edx)));
+ return;
+ }
+ }
+
+ LDivI* lir = new(alloc()) LDivI(useRegister(div->lhs()), useRegister(div->rhs()),
+ tempFixed(edx));
+ if (div->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ defineFixed(lir, div, LAllocation(AnyRegister(eax)));
+}
+
+void
+LIRGeneratorX86Shared::lowerModI(MMod* mod)
+{
+ if (mod->isUnsigned()) {
+ lowerUMod(mod);
+ return;
+ }
+
+ if (mod->rhs()->isConstant()) {
+ int32_t rhs = mod->rhs()->toConstant()->toInt32();
+ int32_t shift = FloorLog2(Abs(rhs));
+ if (rhs != 0 && uint32_t(1) << shift == Abs(rhs)) {
+ LModPowTwoI* lir = new(alloc()) LModPowTwoI(useRegisterAtStart(mod->lhs()), shift);
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ defineReuseInput(lir, mod, 0);
+ return;
+ }
+ if (rhs != 0) {
+ LDivOrModConstantI* lir;
+ lir = new(alloc()) LDivOrModConstantI(useRegister(mod->lhs()), rhs, tempFixed(edx));
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ defineFixed(lir, mod, LAllocation(AnyRegister(eax)));
+ return;
+ }
+ }
+
+ LModI* lir = new(alloc()) LModI(useRegister(mod->lhs()),
+ useRegister(mod->rhs()),
+ tempFixed(eax));
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ defineFixed(lir, mod, LAllocation(AnyRegister(edx)));
+}
+
+void
+LIRGeneratorX86Shared::visitWasmSelect(MWasmSelect* ins)
+{
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new(alloc()) LWasmSelectI64(useInt64RegisterAtStart(ins->trueExpr()),
+ useInt64(ins->falseExpr()),
+ useRegister(ins->condExpr()));
+
+ defineInt64ReuseInput(lir, ins, LWasmSelectI64::TrueExprIndex);
+ return;
+ }
+
+ auto* lir = new(alloc()) LWasmSelect(useRegisterAtStart(ins->trueExpr()),
+ use(ins->falseExpr()),
+ useRegister(ins->condExpr()));
+
+ defineReuseInput(lir, ins, LWasmSelect::TrueExprIndex);
+}
+
+void
+LIRGeneratorX86Shared::visitAsmJSNeg(MAsmJSNeg* ins)
+{
+ switch (ins->type()) {
+ case MIRType::Int32:
+ defineReuseInput(new(alloc()) LNegI(useRegisterAtStart(ins->input())), ins, 0);
+ break;
+ case MIRType::Float32:
+ defineReuseInput(new(alloc()) LNegF(useRegisterAtStart(ins->input())), ins, 0);
+ break;
+ case MIRType::Double:
+ defineReuseInput(new(alloc()) LNegD(useRegisterAtStart(ins->input())), ins, 0);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+void
+LIRGeneratorX86Shared::lowerWasmLoad(MWasmLoad* ins)
+{
+ MOZ_ASSERT(ins->type() != MIRType::Int64);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ auto* lir = new(alloc()) LWasmLoad(useRegisterOrZeroAtStart(base));
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX86Shared::lowerUDiv(MDiv* div)
+{
+ if (div->rhs()->isConstant()) {
+ uint32_t rhs = div->rhs()->toConstant()->toInt32();
+ int32_t shift = FloorLog2(rhs);
+
+ LAllocation lhs = useRegisterAtStart(div->lhs());
+ if (rhs != 0 && uint32_t(1) << shift == rhs) {
+ LDivPowTwoI* lir = new(alloc()) LDivPowTwoI(lhs, lhs, shift, false);
+ if (div->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ defineReuseInput(lir, div, 0);
+ } else {
+ LUDivOrModConstant* lir = new(alloc()) LUDivOrModConstant(useRegister(div->lhs()),
+ rhs, tempFixed(eax));
+ if (div->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ defineFixed(lir, div, LAllocation(AnyRegister(edx)));
+ }
+ return;
+ }
+
+ LUDivOrMod* lir = new(alloc()) LUDivOrMod(useRegister(div->lhs()),
+ useRegister(div->rhs()),
+ tempFixed(edx));
+ if (div->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ defineFixed(lir, div, LAllocation(AnyRegister(eax)));
+}
+
+void
+LIRGeneratorX86Shared::lowerUMod(MMod* mod)
+{
+ if (mod->rhs()->isConstant()) {
+ uint32_t rhs = mod->rhs()->toConstant()->toInt32();
+ int32_t shift = FloorLog2(rhs);
+
+ if (rhs != 0 && uint32_t(1) << shift == rhs) {
+ LModPowTwoI* lir = new(alloc()) LModPowTwoI(useRegisterAtStart(mod->lhs()), shift);
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ defineReuseInput(lir, mod, 0);
+ } else {
+ LUDivOrModConstant* lir = new(alloc()) LUDivOrModConstant(useRegister(mod->lhs()),
+ rhs, tempFixed(edx));
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ defineFixed(lir, mod, LAllocation(AnyRegister(eax)));
+ }
+ return;
+ }
+
+ LUDivOrMod* lir = new(alloc()) LUDivOrMod(useRegister(mod->lhs()),
+ useRegister(mod->rhs()),
+ tempFixed(eax));
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ defineFixed(lir, mod, LAllocation(AnyRegister(edx)));
+}
+
+void
+LIRGeneratorX86Shared::lowerUrshD(MUrsh* mir)
+{
+ MDefinition* lhs = mir->lhs();
+ MDefinition* rhs = mir->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+ MOZ_ASSERT(mir->type() == MIRType::Double);
+
+#ifdef JS_CODEGEN_X64
+ MOZ_ASSERT(ecx == rcx);
+#endif
+
+ LUse lhsUse = useRegisterAtStart(lhs);
+ LAllocation rhsAlloc = rhs->isConstant() ? useOrConstant(rhs) : useFixed(rhs, ecx);
+
+ LUrshD* lir = new(alloc()) LUrshD(lhsUse, rhsAlloc, tempCopy(lhs, 0));
+ define(lir, mir);
+}
+
+void
+LIRGeneratorX86Shared::lowerTruncateDToInt32(MTruncateToInt32* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double);
+
+ LDefinition maybeTemp = Assembler::HasSSE3() ? LDefinition::BogusTemp() : tempDouble();
+ define(new(alloc()) LTruncateDToInt32(useRegister(opd), maybeTemp), ins);
+}
+
+void
+LIRGeneratorX86Shared::lowerTruncateFToInt32(MTruncateToInt32* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Float32);
+
+ LDefinition maybeTemp = Assembler::HasSSE3() ? LDefinition::BogusTemp() : tempFloat32();
+ define(new(alloc()) LTruncateFToInt32(useRegister(opd), maybeTemp), ins);
+}
+
+void
+LIRGeneratorX86Shared::lowerCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins,
+ bool useI386ByteRegisters)
+{
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrConstant(ins->index());
+
+ // If the target is a floating register then we need a temp at the
+ // lower level; that temp must be eax.
+ //
+ // Otherwise the target (if used) is an integer register, which
+ // must be eax. If the target is not used the machine code will
+ // still clobber eax, so just pretend it's used.
+ //
+ // oldval must be in a register.
+ //
+ // newval must be in a register. If the source is a byte array
+ // then newval must be a register that has a byte size: on x86
+ // this must be ebx, ecx, or edx (eax is taken for the output).
+ //
+ // Bug #1077036 describes some further optimization opportunities.
+
+ bool fixedOutput = false;
+ LDefinition tempDef = LDefinition::BogusTemp();
+ LAllocation newval;
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+ tempDef = tempFixed(eax);
+ newval = useRegister(ins->newval());
+ } else {
+ fixedOutput = true;
+ if (useI386ByteRegisters && ins->isByteArray())
+ newval = useFixed(ins->newval(), ebx);
+ else
+ newval = useRegister(ins->newval());
+ }
+
+ const LAllocation oldval = useRegister(ins->oldval());
+
+ LCompareExchangeTypedArrayElement* lir =
+ new(alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval, newval, tempDef);
+
+ if (fixedOutput)
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+ else
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX86Shared::lowerAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins,
+ bool useI386ByteRegisters)
+{
+ MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrConstant(ins->index());
+ const LAllocation value = useRegister(ins->value());
+
+ // The underlying instruction is XCHG, which can operate on any
+ // register.
+ //
+ // If the target is a floating register (for Uint32) then we need
+ // a temp into which to exchange.
+ //
+ // If the source is a byte array then we need a register that has
+ // a byte size; in this case -- on x86 only -- pin the output to
+ // an appropriate register and use that as a temp in the back-end.
+
+ LDefinition tempDef = LDefinition::BogusTemp();
+ if (ins->arrayType() == Scalar::Uint32) {
+ // This restriction is bug 1077305.
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ tempDef = temp();
+ }
+
+ LAtomicExchangeTypedArrayElement* lir =
+ new(alloc()) LAtomicExchangeTypedArrayElement(elements, index, value, tempDef);
+
+ if (useI386ByteRegisters && ins->isByteArray())
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+ else
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX86Shared::lowerAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins,
+ bool useI386ByteRegisters)
+{
+ MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrConstant(ins->index());
+
+ // Case 1: the result of the operation is not used.
+ //
+ // We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
+ // LOCK OR, or LOCK XOR. We can do this even for the Uint32 case.
+
+ if (!ins->hasUses()) {
+ LAllocation value;
+ if (useI386ByteRegisters && ins->isByteArray() && !ins->value()->isConstant())
+ value = useFixed(ins->value(), ebx);
+ else
+ value = useRegisterOrConstant(ins->value());
+
+ LAtomicTypedArrayElementBinopForEffect* lir =
+ new(alloc()) LAtomicTypedArrayElementBinopForEffect(elements, index, value);
+
+ add(lir, ins);
+ return;
+ }
+
+ // Case 2: the result of the operation is used.
+ //
+ // For ADD and SUB we'll use XADD:
+ //
+ // movl src, output
+ // lock xaddl output, mem
+ //
+ // For the 8-bit variants XADD needs a byte register for the output.
+ //
+ // For AND/OR/XOR we need to use a CMPXCHG loop:
+ //
+ // movl *mem, eax
+ // L: mov eax, temp
+ // andl src, temp
+ // lock cmpxchg temp, mem ; reads eax also
+ // jnz L
+ // ; result in eax
+ //
+ // Note the placement of L, cmpxchg will update eax with *mem if
+ // *mem does not have the expected value, so reloading it at the
+ // top of the loop would be redundant.
+ //
+ // If the array is not a uint32 array then:
+ // - eax should be the output (one result of the cmpxchg)
+ // - there is a temp, which must have a byte register if
+ // the array has 1-byte elements elements
+ //
+ // If the array is a uint32 array then:
+ // - eax is the first temp
+ // - we also need a second temp
+ //
+ // There are optimization opportunities:
+ // - better register allocation in the x86 8-bit case, Bug #1077036.
+
+ bool bitOp = !(ins->operation() == AtomicFetchAddOp || ins->operation() == AtomicFetchSubOp);
+ bool fixedOutput = true;
+ bool reuseInput = false;
+ LDefinition tempDef1 = LDefinition::BogusTemp();
+ LDefinition tempDef2 = LDefinition::BogusTemp();
+ LAllocation value;
+
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+ value = useRegisterOrConstant(ins->value());
+ fixedOutput = false;
+ if (bitOp) {
+ tempDef1 = tempFixed(eax);
+ tempDef2 = temp();
+ } else {
+ tempDef1 = temp();
+ }
+ } else if (useI386ByteRegisters && ins->isByteArray()) {
+ if (ins->value()->isConstant())
+ value = useRegisterOrConstant(ins->value());
+ else
+ value = useFixed(ins->value(), ebx);
+ if (bitOp)
+ tempDef1 = tempFixed(ecx);
+ } else if (bitOp) {
+ value = useRegisterOrConstant(ins->value());
+ tempDef1 = temp();
+ } else if (ins->value()->isConstant()) {
+ fixedOutput = false;
+ value = useRegisterOrConstant(ins->value());
+ } else {
+ fixedOutput = false;
+ reuseInput = true;
+ value = useRegisterAtStart(ins->value());
+ }
+
+ LAtomicTypedArrayElementBinop* lir =
+ new(alloc()) LAtomicTypedArrayElementBinop(elements, index, value, tempDef1, tempDef2);
+
+ if (fixedOutput)
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+ else if (reuseInput)
+ defineReuseInput(lir, ins, LAtomicTypedArrayElementBinop::valueOp);
+ else
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX86Shared::visitSimdInsertElement(MSimdInsertElement* ins)
+{
+ MOZ_ASSERT(IsSimdType(ins->type()));
+
+ LUse vec = useRegisterAtStart(ins->vector());
+ LUse val = useRegister(ins->value());
+ switch (ins->type()) {
+ case MIRType::Int8x16:
+ case MIRType::Bool8x16:
+ // When SSE 4.1 is not available, we need to go via the stack.
+ // This requires the value to be inserted to be in %eax-%edx.
+ // Pick %ebx since other instructions use %eax or %ecx hard-wired.
+#if defined(JS_CODEGEN_X86)
+ if (!AssemblerX86Shared::HasSSE41())
+ val = useFixed(ins->value(), ebx);
+#endif
+ defineReuseInput(new(alloc()) LSimdInsertElementI(vec, val), ins, 0);
+ break;
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ defineReuseInput(new(alloc()) LSimdInsertElementI(vec, val), ins, 0);
+ break;
+ case MIRType::Float32x4:
+ defineReuseInput(new(alloc()) LSimdInsertElementF(vec, val), ins, 0);
+ break;
+ default:
+ MOZ_CRASH("Unknown SIMD kind when generating constant");
+ }
+}
+
+void
+LIRGeneratorX86Shared::visitSimdExtractElement(MSimdExtractElement* ins)
+{
+ MOZ_ASSERT(IsSimdType(ins->input()->type()));
+ MOZ_ASSERT(!IsSimdType(ins->type()));
+
+ switch (ins->input()->type()) {
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4: {
+ MOZ_ASSERT(ins->signedness() != SimdSign::NotApplicable);
+ LUse use = useRegisterAtStart(ins->input());
+ if (ins->type() == MIRType::Double) {
+ // Extract an Uint32 lane into a double.
+ MOZ_ASSERT(ins->signedness() == SimdSign::Unsigned);
+ define(new (alloc()) LSimdExtractElementU2D(use, temp()), ins);
+ } else {
+ auto* lir = new (alloc()) LSimdExtractElementI(use);
+#if defined(JS_CODEGEN_X86)
+ // On x86 (32-bit), we may need to use movsbl or movzbl instructions
+ // to sign or zero extend the extracted lane to 32 bits. The 8-bit
+ // version of these instructions require a source register that is
+ // %al, %bl, %cl, or %dl.
+ // Fix it to %ebx since we can't express that constraint better.
+ if (ins->input()->type() == MIRType::Int8x16) {
+ defineFixed(lir, ins, LAllocation(AnyRegister(ebx)));
+ return;
+ }
+#endif
+ define(lir, ins);
+ }
+ break;
+ }
+ case MIRType::Float32x4: {
+ MOZ_ASSERT(ins->signedness() == SimdSign::NotApplicable);
+ LUse use = useRegisterAtStart(ins->input());
+ define(new(alloc()) LSimdExtractElementF(use), ins);
+ break;
+ }
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4: {
+ MOZ_ASSERT(ins->signedness() == SimdSign::NotApplicable);
+ LUse use = useRegisterAtStart(ins->input());
+ define(new(alloc()) LSimdExtractElementB(use), ins);
+ break;
+ }
+ default:
+ MOZ_CRASH("Unknown SIMD kind when extracting element");
+ }
+}
+
+void
+LIRGeneratorX86Shared::visitSimdBinaryArith(MSimdBinaryArith* ins)
+{
+ MOZ_ASSERT(IsSimdType(ins->lhs()->type()));
+ MOZ_ASSERT(IsSimdType(ins->rhs()->type()));
+ MOZ_ASSERT(IsSimdType(ins->type()));
+
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ if (ins->isCommutative())
+ ReorderCommutative(&lhs, &rhs, ins);
+
+ switch (ins->type()) {
+ case MIRType::Int8x16: {
+ LSimdBinaryArithIx16* lir = new (alloc()) LSimdBinaryArithIx16();
+ lir->setTemp(0, LDefinition::BogusTemp());
+ lowerForFPU(lir, ins, lhs, rhs);
+ return;
+ }
+
+ case MIRType::Int16x8: {
+ LSimdBinaryArithIx8* lir = new (alloc()) LSimdBinaryArithIx8();
+ lir->setTemp(0, LDefinition::BogusTemp());
+ lowerForFPU(lir, ins, lhs, rhs);
+ return;
+ }
+
+ case MIRType::Int32x4: {
+ LSimdBinaryArithIx4* lir = new (alloc()) LSimdBinaryArithIx4();
+ bool needsTemp =
+ ins->operation() == MSimdBinaryArith::Op_mul && !MacroAssembler::HasSSE41();
+ lir->setTemp(0, needsTemp ? temp(LDefinition::SIMD128INT) : LDefinition::BogusTemp());
+ lowerForFPU(lir, ins, lhs, rhs);
+ return;
+ }
+
+ case MIRType::Float32x4: {
+ LSimdBinaryArithFx4* lir = new (alloc()) LSimdBinaryArithFx4();
+
+ bool needsTemp = ins->operation() == MSimdBinaryArith::Op_max ||
+ ins->operation() == MSimdBinaryArith::Op_minNum ||
+ ins->operation() == MSimdBinaryArith::Op_maxNum;
+ lir->setTemp(0,
+ needsTemp ? temp(LDefinition::SIMD128FLOAT) : LDefinition::BogusTemp());
+ lowerForFPU(lir, ins, lhs, rhs);
+ return;
+ }
+
+ default:
+ MOZ_CRASH("unknown simd type on binary arith operation");
+ }
+}
+
+void
+LIRGeneratorX86Shared::visitSimdBinarySaturating(MSimdBinarySaturating* ins)
+{
+ MOZ_ASSERT(IsSimdType(ins->lhs()->type()));
+ MOZ_ASSERT(IsSimdType(ins->rhs()->type()));
+ MOZ_ASSERT(IsSimdType(ins->type()));
+
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ if (ins->isCommutative())
+ ReorderCommutative(&lhs, &rhs, ins);
+
+ LSimdBinarySaturating* lir = new (alloc()) LSimdBinarySaturating();
+ lowerForFPU(lir, ins, lhs, rhs);
+}
+
+void
+LIRGeneratorX86Shared::visitSimdSelect(MSimdSelect* ins)
+{
+ MOZ_ASSERT(IsSimdType(ins->type()));
+
+ LSimdSelect* lins = new(alloc()) LSimdSelect;
+ MDefinition* r0 = ins->getOperand(0);
+ MDefinition* r1 = ins->getOperand(1);
+ MDefinition* r2 = ins->getOperand(2);
+
+ lins->setOperand(0, useRegister(r0));
+ lins->setOperand(1, useRegister(r1));
+ lins->setOperand(2, useRegister(r2));
+ lins->setTemp(0, temp(LDefinition::SIMD128FLOAT));
+
+ define(lins, ins);
+}
+
+void
+LIRGeneratorX86Shared::visitSimdSplat(MSimdSplat* ins)
+{
+ LAllocation x = useRegisterAtStart(ins->getOperand(0));
+
+ switch (ins->type()) {
+ case MIRType::Int8x16:
+ define(new (alloc()) LSimdSplatX16(x), ins);
+ break;
+ case MIRType::Int16x8:
+ define(new (alloc()) LSimdSplatX8(x), ins);
+ break;
+ case MIRType::Int32x4:
+ case MIRType::Float32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ // Use the SplatX4 instruction for all boolean splats. Since the input
+ // value is a 32-bit int that is either 0 or -1, the X4 splat gives
+ // the right result for all boolean geometries.
+ // For floats, (Non-AVX) codegen actually wants the input and the output
+ // to be in the same register, but we can't currently use
+ // defineReuseInput because they have different types (scalar vs
+ // vector), so a spill slot for one may not be suitable for the other.
+ define(new (alloc()) LSimdSplatX4(x), ins);
+ break;
+ default:
+ MOZ_CRASH("Unknown SIMD kind");
+ }
+}
+
+void
+LIRGeneratorX86Shared::visitSimdValueX4(MSimdValueX4* ins)
+{
+ switch (ins->type()) {
+ case MIRType::Float32x4: {
+ // Ideally, x would be used at start and reused for the output, however
+ // register allocation currently doesn't permit us to tie together two
+ // virtual registers with different types.
+ LAllocation x = useRegister(ins->getOperand(0));
+ LAllocation y = useRegister(ins->getOperand(1));
+ LAllocation z = useRegister(ins->getOperand(2));
+ LAllocation w = useRegister(ins->getOperand(3));
+ LDefinition t = temp(LDefinition::SIMD128FLOAT);
+ define(new (alloc()) LSimdValueFloat32x4(x, y, z, w, t), ins);
+ break;
+ }
+ case MIRType::Bool32x4:
+ case MIRType::Int32x4: {
+ // No defineReuseInput => useAtStart for everyone.
+ LAllocation x = useRegisterAtStart(ins->getOperand(0));
+ LAllocation y = useRegisterAtStart(ins->getOperand(1));
+ LAllocation z = useRegisterAtStart(ins->getOperand(2));
+ LAllocation w = useRegisterAtStart(ins->getOperand(3));
+ define(new(alloc()) LSimdValueInt32x4(x, y, z, w), ins);
+ break;
+ }
+ default:
+ MOZ_CRASH("Unknown SIMD kind");
+ }
+}
+
+void
+LIRGeneratorX86Shared::visitSimdSwizzle(MSimdSwizzle* ins)
+{
+ MOZ_ASSERT(IsSimdType(ins->input()->type()));
+ MOZ_ASSERT(IsSimdType(ins->type()));
+
+ if (IsIntegerSimdType(ins->input()->type())) {
+ LUse use = useRegisterAtStart(ins->input());
+ LSimdSwizzleI* lir = new (alloc()) LSimdSwizzleI(use);
+ define(lir, ins);
+ // We need a GPR temp register for pre-SSSE3 codegen (no vpshufb).
+ if (Assembler::HasSSSE3()) {
+ lir->setTemp(0, LDefinition::BogusTemp());
+ } else {
+ // The temp must be a GPR usable with 8-bit loads and stores.
+#if defined(JS_CODEGEN_X86)
+ lir->setTemp(0, tempFixed(ebx));
+#else
+ lir->setTemp(0, temp());
+#endif
+ }
+ } else if (ins->input()->type() == MIRType::Float32x4) {
+ LUse use = useRegisterAtStart(ins->input());
+ LSimdSwizzleF* lir = new (alloc()) LSimdSwizzleF(use);
+ define(lir, ins);
+ lir->setTemp(0, LDefinition::BogusTemp());
+ } else {
+ MOZ_CRASH("Unknown SIMD kind when getting lane");
+ }
+}
+
+void
+LIRGeneratorX86Shared::visitSimdShuffle(MSimdShuffle* ins)
+{
+ MOZ_ASSERT(IsSimdType(ins->lhs()->type()));
+ MOZ_ASSERT(IsSimdType(ins->rhs()->type()));
+ MOZ_ASSERT(IsSimdType(ins->type()));
+ if (ins->type() == MIRType::Int32x4 || ins->type() == MIRType::Float32x4) {
+ bool zFromLHS = ins->lane(2) < 4;
+ bool wFromLHS = ins->lane(3) < 4;
+ uint32_t lanesFromLHS = (ins->lane(0) < 4) + (ins->lane(1) < 4) + zFromLHS + wFromLHS;
+
+ LSimdShuffleX4* lir = new (alloc()) LSimdShuffleX4();
+ lowerForFPU(lir, ins, ins->lhs(), ins->rhs());
+
+ // See codegen for requirements details.
+ LDefinition temp =
+ (lanesFromLHS == 3) ? tempCopy(ins->rhs(), 1) : LDefinition::BogusTemp();
+ lir->setTemp(0, temp);
+ } else {
+ MOZ_ASSERT(ins->type() == MIRType::Int8x16 || ins->type() == MIRType::Int16x8);
+ LSimdShuffle* lir = new (alloc()) LSimdShuffle();
+ lir->setOperand(0, useRegister(ins->lhs()));
+ lir->setOperand(1, useRegister(ins->rhs()));
+ define(lir, ins);
+ // We need a GPR temp register for pre-SSSE3 codegen, and an SSE temp
+ // when using pshufb.
+ if (Assembler::HasSSSE3()) {
+ lir->setTemp(0, temp(LDefinition::SIMD128INT));
+ } else {
+ // The temp must be a GPR usable with 8-bit loads and stores.
+#if defined(JS_CODEGEN_X86)
+ lir->setTemp(0, tempFixed(ebx));
+#else
+ lir->setTemp(0, temp());
+#endif
+ }
+ }
+}
+
+void
+LIRGeneratorX86Shared::visitSimdGeneralShuffle(MSimdGeneralShuffle* ins)
+{
+ MOZ_ASSERT(IsSimdType(ins->type()));
+
+ LSimdGeneralShuffleBase* lir;
+ if (IsIntegerSimdType(ins->type())) {
+#if defined(JS_CODEGEN_X86)
+ // The temp register must be usable with 8-bit load and store
+ // instructions, so one of %eax-%edx.
+ LDefinition t;
+ if (ins->type() == MIRType::Int8x16)
+ t = tempFixed(ebx);
+ else
+ t = temp();
+#else
+ LDefinition t = temp();
+#endif
+ lir = new (alloc()) LSimdGeneralShuffleI(t);
+ } else if (ins->type() == MIRType::Float32x4) {
+ lir = new (alloc()) LSimdGeneralShuffleF(temp());
+ } else {
+ MOZ_CRASH("Unknown SIMD kind when doing a shuffle");
+ }
+
+ if (!lir->init(alloc(), ins->numVectors() + ins->numLanes()))
+ return;
+
+ for (unsigned i = 0; i < ins->numVectors(); i++) {
+ MOZ_ASSERT(IsSimdType(ins->vector(i)->type()));
+ lir->setOperand(i, useRegister(ins->vector(i)));
+ }
+
+ for (unsigned i = 0; i < ins->numLanes(); i++) {
+ MOZ_ASSERT(ins->lane(i)->type() == MIRType::Int32);
+ // Note that there can be up to 16 lane arguments, so we can't assume
+ // that they all get an allocated register.
+ lir->setOperand(i + ins->numVectors(), use(ins->lane(i)));
+ }
+
+ assignSnapshot(lir, Bailout_BoundsCheck);
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX86Shared::visitCopySign(MCopySign* ins)
+{
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(IsFloatingPointType(lhs->type()));
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(lhs->type() == ins->type());
+
+ LInstructionHelper<1, 2, 2>* lir;
+ if (lhs->type() == MIRType::Double)
+ lir = new(alloc()) LCopySignD();
+ else
+ lir = new(alloc()) LCopySignF();
+
+ // As lowerForFPU, but we want rhs to be in a FP register too.
+ lir->setOperand(0, useRegisterAtStart(lhs));
+ lir->setOperand(1, lhs != rhs ? useRegister(rhs) : useRegisterAtStart(rhs));
+ if (!Assembler::HasAVX())
+ defineReuseInput(lir, ins, 0);
+ else
+ define(lir, ins);
+}
diff --git a/js/src/jit/x86-shared/Lowering-x86-shared.h b/js/src/jit/x86-shared/Lowering-x86-shared.h
new file mode 100644
index 000000000..275cee301
--- /dev/null
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.h
@@ -0,0 +1,81 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_Lowering_x86_shared_h
+#define jit_x86_shared_Lowering_x86_shared_h
+
+#include "jit/shared/Lowering-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorX86Shared : public LIRGeneratorShared
+{
+ protected:
+ LIRGeneratorX86Shared(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorShared(gen, graph, lirGraph)
+ {}
+
+ LTableSwitch* newLTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ MTableSwitch* ins);
+ LTableSwitchV* newLTableSwitchV(MTableSwitch* ins);
+
+ void visitGuardShape(MGuardShape* ins);
+ void visitGuardObjectGroup(MGuardObjectGroup* ins);
+ void visitPowHalf(MPowHalf* ins);
+ void lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs);
+ void lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, MDefinition* input);
+ void lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs);
+
+ template<size_t Temps>
+ void lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+ template<size_t Temps>
+ void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs);
+ void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void visitAsmJSNeg(MAsmJSNeg* ins);
+ void lowerWasmLoad(MWasmLoad* ins);
+ void visitWasmSelect(MWasmSelect* ins);
+ void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
+ void lowerDivI(MDiv* div);
+ void lowerModI(MMod* mod);
+ void lowerUDiv(MDiv* div);
+ void lowerUMod(MMod* mod);
+ void lowerUrshD(MUrsh* mir);
+ void lowerTruncateDToInt32(MTruncateToInt32* ins);
+ void lowerTruncateFToInt32(MTruncateToInt32* ins);
+ void visitSimdInsertElement(MSimdInsertElement* ins);
+ void visitSimdExtractElement(MSimdExtractElement* ins);
+ void visitSimdBinaryArith(MSimdBinaryArith* ins);
+ void visitSimdBinarySaturating(MSimdBinarySaturating* ins);
+ void visitSimdSelect(MSimdSelect* ins);
+ void visitSimdSplat(MSimdSplat* ins);
+ void visitSimdSwizzle(MSimdSwizzle* ins);
+ void visitSimdShuffle(MSimdShuffle* ins);
+ void visitSimdGeneralShuffle(MSimdGeneralShuffle* ins);
+ void visitSimdValueX4(MSimdValueX4* ins);
+ void lowerCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins,
+ bool useI386ByteRegisters);
+ void lowerAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins,
+ bool useI386ByteRegisters);
+ void lowerAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins,
+ bool useI386ByteRegisters);
+ void visitCopySign(MCopySign* ins);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_Lowering_x86_shared_h */
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
new file mode 100644
index 000000000..33bfd46db
--- /dev/null
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
@@ -0,0 +1,1284 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_MacroAssembler_x86_shared_inl_h
+#define jit_x86_shared_MacroAssembler_x86_shared_inl_h
+
+#include "jit/x86-shared/MacroAssembler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// Move instructions
+
+void
+MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest)
+{
+ vmovd(src, dest);
+}
+
+void
+MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest)
+{
+ vmovd(src, dest);
+}
+
+void
+MacroAssembler::move8SignExtend(Register src, Register dest)
+{
+ movsbl(src, dest);
+}
+
+void
+MacroAssembler::move16SignExtend(Register src, Register dest)
+{
+ movswl(src, dest);
+}
+
+// ===============================================================
+// Logical instructions
+
+void
+MacroAssembler::not32(Register reg)
+{
+ notl(reg);
+}
+
+void
+MacroAssembler::and32(Register src, Register dest)
+{
+ andl(src, dest);
+}
+
+void
+MacroAssembler::and32(Imm32 imm, Register dest)
+{
+ andl(imm, dest);
+}
+
+void
+MacroAssembler::and32(Imm32 imm, const Address& dest)
+{
+ andl(imm, Operand(dest));
+}
+
+void
+MacroAssembler::and32(const Address& src, Register dest)
+{
+ andl(Operand(src), dest);
+}
+
+void
+MacroAssembler::or32(Register src, Register dest)
+{
+ orl(src, dest);
+}
+
+void
+MacroAssembler::or32(Imm32 imm, Register dest)
+{
+ orl(imm, dest);
+}
+
+void
+MacroAssembler::or32(Imm32 imm, const Address& dest)
+{
+ orl(imm, Operand(dest));
+}
+
+void
+MacroAssembler::xor32(Register src, Register dest)
+{
+ xorl(src, dest);
+}
+
+void
+MacroAssembler::xor32(Imm32 imm, Register dest)
+{
+ xorl(imm, dest);
+}
+
+void
+MacroAssembler::clz32(Register src, Register dest, bool knownNotZero)
+{
+ // On very recent chips (Haswell and newer?) there is actually an
+ // LZCNT instruction that does all of this.
+
+ bsrl(src, dest);
+ if (!knownNotZero) {
+ // If the source is zero then bsrl leaves garbage in the destination.
+ Label nonzero;
+ j(Assembler::NonZero, &nonzero);
+ movl(Imm32(0x3F), dest);
+ bind(&nonzero);
+ }
+ xorl(Imm32(0x1F), dest);
+}
+
+void
+MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero)
+{
+ bsfl(src, dest);
+ if (!knownNotZero) {
+ Label nonzero;
+ j(Assembler::NonZero, &nonzero);
+ movl(Imm32(32), dest);
+ bind(&nonzero);
+ }
+}
+
+void
+MacroAssembler::popcnt32(Register input, Register output, Register tmp)
+{
+ if (AssemblerX86Shared::HasPOPCNT()) {
+ popcntl(input, output);
+ return;
+ }
+
+ MOZ_ASSERT(tmp != InvalidReg);
+
+ // Equivalent to mozilla::CountPopulation32()
+
+ movl(input, tmp);
+ if (input != output)
+ movl(input, output);
+ shrl(Imm32(1), output);
+ andl(Imm32(0x55555555), output);
+ subl(output, tmp);
+ movl(tmp, output);
+ andl(Imm32(0x33333333), output);
+ shrl(Imm32(2), tmp);
+ andl(Imm32(0x33333333), tmp);
+ addl(output, tmp);
+ movl(tmp, output);
+ shrl(Imm32(4), output);
+ addl(tmp, output);
+ andl(Imm32(0xF0F0F0F), output);
+ imull(Imm32(0x1010101), output, output);
+ shrl(Imm32(24), output);
+}
+
+// ===============================================================
+// Arithmetic instructions
+
+void
+MacroAssembler::add32(Register src, Register dest)
+{
+ addl(src, dest);
+}
+
+void
+MacroAssembler::add32(Imm32 imm, Register dest)
+{
+ addl(imm, dest);
+}
+
+void
+MacroAssembler::add32(Imm32 imm, const Address& dest)
+{
+ addl(imm, Operand(dest));
+}
+
+void
+MacroAssembler::add32(Imm32 imm, const AbsoluteAddress& dest)
+{
+ addl(imm, Operand(dest));
+}
+
+void
+MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest)
+{
+ vaddss(src, dest, dest);
+}
+
+void
+MacroAssembler::addDouble(FloatRegister src, FloatRegister dest)
+{
+ vaddsd(src, dest, dest);
+}
+
+void
+MacroAssembler::sub32(Register src, Register dest)
+{
+ subl(src, dest);
+}
+
+void
+MacroAssembler::sub32(Imm32 imm, Register dest)
+{
+ subl(imm, dest);
+}
+
+void
+MacroAssembler::sub32(const Address& src, Register dest)
+{
+ subl(Operand(src), dest);
+}
+
+void
+MacroAssembler::subDouble(FloatRegister src, FloatRegister dest)
+{
+ vsubsd(src, dest, dest);
+}
+
+void
+MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest)
+{
+ vsubss(src, dest, dest);
+}
+
+void
+MacroAssembler::mul32(Register rhs, Register srcDest)
+{
+ MOZ_ASSERT(srcDest == eax);
+ imull(rhs, srcDest); // Clobbers edx
+}
+
+void
+MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest)
+{
+ vmulss(src, dest, dest);
+}
+
+void
+MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest)
+{
+ vmulsd(src, dest, dest);
+}
+
+void
+MacroAssembler::quotient32(Register rhs, Register srcDest, bool isUnsigned)
+{
+ MOZ_ASSERT(srcDest == eax);
+
+ // Sign extend eax into edx to make (edx:eax): idiv/udiv are 64-bit.
+ if (isUnsigned) {
+ mov(ImmWord(0), edx);
+ udiv(rhs);
+ } else {
+ cdq();
+ idiv(rhs);
+ }
+}
+
+void
+MacroAssembler::remainder32(Register rhs, Register srcDest, bool isUnsigned)
+{
+ MOZ_ASSERT(srcDest == eax);
+
+ // Sign extend eax into edx to make (edx:eax): idiv/udiv are 64-bit.
+ if (isUnsigned) {
+ mov(ImmWord(0), edx);
+ udiv(rhs);
+ } else {
+ cdq();
+ idiv(rhs);
+ }
+ mov(edx, eax);
+}
+
+void
+MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest)
+{
+ vdivss(src, dest, dest);
+}
+
+void
+MacroAssembler::divDouble(FloatRegister src, FloatRegister dest)
+{
+ vdivsd(src, dest, dest);
+}
+
+void
+MacroAssembler::neg32(Register reg)
+{
+ negl(reg);
+}
+
+void
+MacroAssembler::negateFloat(FloatRegister reg)
+{
+ ScratchFloat32Scope scratch(*this);
+ vpcmpeqw(Operand(scratch), scratch, scratch);
+ vpsllq(Imm32(31), scratch, scratch);
+
+ // XOR the float in a float register with -0.0.
+ vxorps(scratch, reg, reg); // s ^ 0x80000000
+}
+
+void
+MacroAssembler::negateDouble(FloatRegister reg)
+{
+ // From MacroAssemblerX86Shared::maybeInlineDouble
+ ScratchDoubleScope scratch(*this);
+ vpcmpeqw(Operand(scratch), scratch, scratch);
+ vpsllq(Imm32(63), scratch, scratch);
+
+ // XOR the float in a float register with -0.0.
+ vxorpd(scratch, reg, reg); // s ^ 0x80000000000000
+}
+
+void
+MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest)
+{
+ ScratchFloat32Scope scratch(*this);
+ loadConstantFloat32(mozilla::SpecificNaN<float>(0, mozilla::FloatingPoint<float>::kSignificandBits), scratch);
+ vandps(scratch, src, dest);
+}
+
+void
+MacroAssembler::absDouble(FloatRegister src, FloatRegister dest)
+{
+ ScratchDoubleScope scratch(*this);
+ loadConstantDouble(mozilla::SpecificNaN<double>(0, mozilla::FloatingPoint<double>::kSignificandBits), scratch);
+ vandpd(scratch, src, dest);
+}
+
+void
+MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest)
+{
+ vsqrtss(src, src, dest);
+}
+
+void
+MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest)
+{
+ vsqrtsd(src, src, dest);
+}
+
+void
+MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ minMaxFloat32(srcDest, other, handleNaN, false);
+}
+
+void
+MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ minMaxDouble(srcDest, other, handleNaN, false);
+}
+
+void
+MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ minMaxFloat32(srcDest, other, handleNaN, true);
+}
+
+void
+MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ minMaxDouble(srcDest, other, handleNaN, true);
+}
+
+// ===============================================================
+// Rotation instructions
+void
+MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest)
+{
+ MOZ_ASSERT(input == dest, "defineReuseInput");
+ count.value &= 0x1f;
+ if (count.value)
+ roll(count, input);
+}
+
+void
+MacroAssembler::rotateLeft(Register count, Register input, Register dest)
+{
+ MOZ_ASSERT(input == dest, "defineReuseInput");
+ MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
+ roll_cl(input);
+}
+
+void
+MacroAssembler::rotateRight(Imm32 count, Register input, Register dest)
+{
+ MOZ_ASSERT(input == dest, "defineReuseInput");
+ count.value &= 0x1f;
+ if (count.value)
+ rorl(count, input);
+}
+
+void
+MacroAssembler::rotateRight(Register count, Register input, Register dest)
+{
+ MOZ_ASSERT(input == dest, "defineReuseInput");
+ MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
+ rorl_cl(input);
+}
+
+// ===============================================================
+// Shift instructions
+
+void
+MacroAssembler::lshift32(Register shift, Register srcDest)
+{
+ MOZ_ASSERT(shift == ecx);
+ shll_cl(srcDest);
+}
+
+void
+MacroAssembler::rshift32(Register shift, Register srcDest)
+{
+ MOZ_ASSERT(shift == ecx);
+ shrl_cl(srcDest);
+}
+
+void
+MacroAssembler::rshift32Arithmetic(Register shift, Register srcDest)
+{
+ MOZ_ASSERT(shift == ecx);
+ sarl_cl(srcDest);
+}
+
+void
+MacroAssembler::lshift32(Imm32 shift, Register srcDest)
+{
+ shll(shift, srcDest);
+}
+
+void
+MacroAssembler::rshift32(Imm32 shift, Register srcDest)
+{
+ shrl(shift, srcDest);
+}
+
+void
+MacroAssembler::rshift32Arithmetic(Imm32 shift, Register srcDest)
+{
+ sarl(shift, srcDest);
+}
+
+// ===============================================================
+// Condition functions
+
+template <typename T1, typename T2>
+void
+MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest)
+{
+ cmp32(lhs, rhs);
+ emitSet(cond, dest);
+}
+
+// ===============================================================
+// Branch instructions
+
+template <class L>
+void
+MacroAssembler::branch32(Condition cond, Register lhs, Register rhs, L label)
+{
+ cmp32(lhs, rhs);
+ j(cond, label);
+}
+
+template <class L>
+void
+MacroAssembler::branch32(Condition cond, Register lhs, Imm32 rhs, L label)
+{
+ cmp32(lhs, rhs);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs, Label* label)
+{
+ cmp32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 rhs, Label* label)
+{
+ cmp32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Register rhs, Label* label)
+{
+ cmp32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs, Label* label)
+{
+ cmp32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const Operand& lhs, Register rhs, Label* label)
+{
+ cmp32(lhs, rhs);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const Operand& lhs, Imm32 rhs, Label* label)
+{
+ cmp32(lhs, rhs);
+ j(cond, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs, L label)
+{
+ cmpPtr(lhs, rhs);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label)
+{
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs, Label* label)
+{
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs, Label* label)
+{
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs, Label* label)
+{
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs, L label)
+{
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs, Label* label)
+{
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs, Label* label)
+{
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs, Label* label)
+{
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+template <typename T, typename S, typename L>
+void
+MacroAssembler::branchPtrImpl(Condition cond, const T& lhs, const S& rhs, L label)
+{
+ cmpPtr(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+template <typename T>
+CodeOffsetJump
+MacroAssembler::branchPtrWithPatch(Condition cond, Register lhs, T rhs, RepatchLabel* label)
+{
+ cmpPtr(lhs, rhs);
+ return jumpWithPatch(label, cond);
+}
+
+template <typename T>
+CodeOffsetJump
+MacroAssembler::branchPtrWithPatch(Condition cond, Address lhs, T rhs, RepatchLabel* label)
+{
+ cmpPtr(lhs, rhs);
+ return jumpWithPatch(label, cond);
+}
+
+void
+MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
+ Label* label)
+{
+ compareFloat(cond, lhs, rhs);
+
+ if (cond == DoubleEqual) {
+ Label unordered;
+ j(Parity, &unordered);
+ j(Equal, label);
+ bind(&unordered);
+ return;
+ }
+
+ if (cond == DoubleNotEqualOrUnordered) {
+ j(NotEqual, label);
+ j(Parity, label);
+ return;
+ }
+
+ MOZ_ASSERT(!(cond & DoubleConditionBitSpecial));
+ j(ConditionFromDoubleCondition(cond), label);
+}
+
+void
+MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
+ Label* label)
+{
+ compareDouble(cond, lhs, rhs);
+
+ if (cond == DoubleEqual) {
+ Label unordered;
+ j(Parity, &unordered);
+ j(Equal, label);
+ bind(&unordered);
+ return;
+ }
+ if (cond == DoubleNotEqualOrUnordered) {
+ j(NotEqual, label);
+ j(Parity, label);
+ return;
+ }
+
+ MOZ_ASSERT(!(cond & DoubleConditionBitSpecial));
+ j(ConditionFromDoubleCondition(cond), label);
+}
+
+template <typename T, typename L>
+void
+MacroAssembler::branchAdd32(Condition cond, T src, Register dest, L label)
+{
+ addl(src, dest);
+ j(cond, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchSub32(Condition cond, T src, Register dest, Label* label)
+{
+ subl(src, dest);
+ j(cond, label);
+}
+
+void
+MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label)
+{
+ subPtr(rhs, lhs);
+ j(cond, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs, L label)
+{
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
+ test32(lhs, rhs);
+ j(cond, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs, L label)
+{
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
+ test32(lhs, rhs);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs, Label* label)
+{
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
+ test32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs, L label)
+{
+ testPtr(lhs, rhs);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs, Label* label)
+{
+ testPtr(lhs, rhs);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchTestPtr(Condition cond, const Address& lhs, Imm32 rhs, Label* label)
+{
+ testPtr(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, Register tag, Label* label)
+{
+ branchTestUndefinedImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, const Address& address, Label* label)
+{
+ branchTestUndefinedImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestUndefinedImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestUndefinedImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestUndefinedImpl(Condition cond, const T& t, Label* label)
+{
+ cond = testUndefined(cond, t);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, Register tag, Label* label)
+{
+ branchTestInt32Impl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, const Address& address, Label* label)
+{
+ branchTestInt32Impl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestInt32Impl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestInt32Impl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestInt32Impl(Condition cond, const T& t, Label* label)
+{
+ cond = testInt32(cond, t);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchTestInt32Truthy(bool truthy, const ValueOperand& value, Label* label)
+{
+ Condition cond = testInt32Truthy(truthy, value);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, Register tag, Label* label)
+{
+ branchTestDoubleImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, const Address& address, Label* label)
+{
+ branchTestDoubleImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestDoubleImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestDoubleImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestDoubleImpl(Condition cond, const T& t, Label* label)
+{
+ cond = testDouble(cond, t);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchTestDoubleTruthy(bool truthy, FloatRegister reg, Label* label)
+{
+ Condition cond = testDoubleTruthy(truthy, reg);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchTestNumber(Condition cond, Register tag, Label* label)
+{
+ branchTestNumberImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestNumberImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestNumberImpl(Condition cond, const T& t, Label* label)
+{
+ cond = testNumber(cond, t);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, Register tag, Label* label)
+{
+ branchTestBooleanImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, const Address& address, Label* label)
+{
+ branchTestBooleanImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestBooleanImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestBooleanImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestBooleanImpl(Condition cond, const T& t, Label* label)
+{
+ cond = testBoolean(cond, t);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchTestString(Condition cond, Register tag, Label* label)
+{
+ branchTestStringImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestString(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestStringImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestString(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestStringImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestStringImpl(Condition cond, const T& t, Label* label)
+{
+ cond = testString(cond, t);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchTestStringTruthy(bool truthy, const ValueOperand& value, Label* label)
+{
+ Condition cond = testStringTruthy(truthy, value);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchTestSymbol(Condition cond, Register tag, Label* label)
+{
+ branchTestSymbolImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestSymbolImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestSymbolImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestSymbolImpl(Condition cond, const T& t, Label* label)
+{
+ cond = testSymbol(cond, t);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, Register tag, Label* label)
+{
+ branchTestNullImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, const Address& address, Label* label)
+{
+ branchTestNullImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestNullImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestNullImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestNullImpl(Condition cond, const T& t, Label* label)
+{
+ cond = testNull(cond, t);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, Register tag, Label* label)
+{
+ branchTestObjectImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, const Address& address, Label* label)
+{
+ branchTestObjectImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestObjectImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestObjectImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestObjectImpl(Condition cond, const T& t, Label* label)
+{
+ cond = testObject(cond, t);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchTestGCThing(Condition cond, const Address& address, Label* label)
+{
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestGCThingImpl(cond, address, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestGCThingImpl(Condition cond, const T& t, Label* label)
+{
+ cond = testGCThing(cond, t);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchTestPrimitive(Condition cond, Register tag, Label* label)
+{
+ branchTestPrimitiveImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestPrimitive(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestPrimitiveImpl(cond, value, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchTestPrimitiveImpl(Condition cond, const T& t, Label* label)
+{
+ cond = testPrimitive(cond, t);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, Register tag, Label* label)
+{
+ branchTestMagicImpl(cond, tag, label);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, const Address& address, Label* label)
+{
+ branchTestMagicImpl(cond, address, label);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address, Label* label)
+{
+ branchTestMagicImpl(cond, address, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value, L label)
+{
+ branchTestMagicImpl(cond, value, label);
+}
+
+template <typename T, class L>
+void
+MacroAssembler::branchTestMagicImpl(Condition cond, const T& t, L label)
+{
+ cond = testMagic(cond, t);
+ j(cond, label);
+}
+
+// ========================================================================
+// Canonicalization primitives.
+void
+MacroAssembler::canonicalizeFloat32x4(FloatRegister reg, FloatRegister scratch)
+{
+ ScratchSimd128Scope scratch2(*this);
+
+ MOZ_ASSERT(scratch.asSimd128() != scratch2.asSimd128());
+ MOZ_ASSERT(reg.asSimd128() != scratch2.asSimd128());
+ MOZ_ASSERT(reg.asSimd128() != scratch.asSimd128());
+
+ FloatRegister mask = scratch;
+ vcmpordps(Operand(reg), reg, mask);
+
+ FloatRegister ifFalse = scratch2;
+ float nanf = float(JS::GenericNaN());
+ loadConstantSimd128Float(SimdConstant::SplatX4(nanf), ifFalse);
+
+ bitwiseAndSimd128(Operand(mask), reg);
+ bitwiseAndNotSimd128(Operand(ifFalse), mask);
+ bitwiseOrSimd128(Operand(mask), reg);
+}
+
+// ========================================================================
+// Memory access primitives.
+void
+MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const Address& dest)
+{
+ vmovsd(src, dest);
+}
+void
+MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& dest)
+{
+ vmovsd(src, dest);
+}
+void
+MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const Operand& dest)
+{
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ storeUncanonicalizedDouble(src, dest.toAddress());
+ break;
+ case Operand::MEM_SCALE:
+ storeUncanonicalizedDouble(src, dest.toBaseIndex());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+}
+
+template void MacroAssembler::storeDouble(FloatRegister src, const Operand& dest);
+
+void
+MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const Address& dest)
+{
+ vmovss(src, dest);
+}
+void
+MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& dest)
+{
+ vmovss(src, dest);
+}
+void
+MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const Operand& dest)
+{
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ storeUncanonicalizedFloat32(src, dest.toAddress());
+ break;
+ case Operand::MEM_SCALE:
+ storeUncanonicalizedFloat32(src, dest.toBaseIndex());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+}
+
+template void MacroAssembler::storeFloat32(FloatRegister src, const Operand& dest);
+
+void
+MacroAssembler::storeFloat32x3(FloatRegister src, const Address& dest)
+{
+ Address destZ(dest);
+ destZ.offset += 2 * sizeof(int32_t);
+ storeDouble(src, dest);
+ ScratchSimd128Scope scratch(*this);
+ vmovhlps(src, scratch, scratch);
+ storeFloat32(scratch, destZ);
+}
+void
+MacroAssembler::storeFloat32x3(FloatRegister src, const BaseIndex& dest)
+{
+ BaseIndex destZ(dest);
+ destZ.offset += 2 * sizeof(int32_t);
+ storeDouble(src, dest);
+ ScratchSimd128Scope scratch(*this);
+ vmovhlps(src, scratch, scratch);
+ storeFloat32(scratch, destZ);
+}
+
+void
+MacroAssembler::memoryBarrier(MemoryBarrierBits barrier)
+{
+ if (barrier & MembarStoreLoad)
+ storeLoadFence();
+}
+
+// ========================================================================
+// Truncate floating point.
+
+void
+MacroAssembler::truncateFloat32ToInt64(Address src, Address dest, Register temp)
+{
+ if (Assembler::HasSSE3()) {
+ fld32(Operand(src));
+ fisttp(Operand(dest));
+ return;
+ }
+
+ if (src.base == esp)
+ src.offset += 2 * sizeof(int32_t);
+ if (dest.base == esp)
+ dest.offset += 2 * sizeof(int32_t);
+
+ reserveStack(2 * sizeof(int32_t));
+
+ // Set conversion to truncation.
+ fnstcw(Operand(esp, 0));
+ load32(Operand(esp, 0), temp);
+ andl(Imm32(~0xFF00), temp);
+ orl(Imm32(0xCFF), temp);
+ store32(temp, Address(esp, sizeof(int32_t)));
+ fldcw(Operand(esp, sizeof(int32_t)));
+
+ // Load double on fp stack, convert and load regular stack.
+ fld32(Operand(src));
+ fistp(Operand(dest));
+
+ // Reset the conversion flag.
+ fldcw(Operand(esp, 0));
+
+ freeStack(2 * sizeof(int32_t));
+}
+void
+MacroAssembler::truncateDoubleToInt64(Address src, Address dest, Register temp)
+{
+ if (Assembler::HasSSE3()) {
+ fld(Operand(src));
+ fisttp(Operand(dest));
+ return;
+ }
+
+ if (src.base == esp)
+ src.offset += 2*sizeof(int32_t);
+ if (dest.base == esp)
+ dest.offset += 2*sizeof(int32_t);
+
+ reserveStack(2*sizeof(int32_t));
+
+ // Set conversion to truncation.
+ fnstcw(Operand(esp, 0));
+ load32(Operand(esp, 0), temp);
+ andl(Imm32(~0xFF00), temp);
+ orl(Imm32(0xCFF), temp);
+ store32(temp, Address(esp, 1*sizeof(int32_t)));
+ fldcw(Operand(esp, 1*sizeof(int32_t)));
+
+ // Load double on fp stack, convert and load regular stack.
+ fld(Operand(src));
+ fistp(Operand(dest));
+
+ // Reset the conversion flag.
+ fldcw(Operand(esp, 0));
+
+ freeStack(2*sizeof(int32_t));
+}
+
+// ===============================================================
+// Clamping functions.
+
+void
+MacroAssembler::clampIntToUint8(Register reg)
+{
+ Label inRange;
+ branchTest32(Assembler::Zero, reg, Imm32(0xffffff00), &inRange);
+ {
+ sarl(Imm32(31), reg);
+ notl(reg);
+ andl(Imm32(255), reg);
+ }
+ bind(&inRange);
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_MacroAssembler_x86_shared_inl_h */
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp b/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
new file mode 100644
index 000000000..7d86e8edf
--- /dev/null
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
@@ -0,0 +1,855 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86-shared/MacroAssembler-x86-shared.h"
+
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// Note: this function clobbers the input register.
+void
+MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
+{
+ ScratchDoubleScope scratch(*this);
+ MOZ_ASSERT(input != scratch);
+ Label positive, done;
+
+ // <= 0 or NaN --> 0
+ zeroDouble(scratch);
+ branchDouble(DoubleGreaterThan, input, scratch, &positive);
+ {
+ move32(Imm32(0), output);
+ jump(&done);
+ }
+
+ bind(&positive);
+
+ // Add 0.5 and truncate.
+ loadConstantDouble(0.5, scratch);
+ addDouble(scratch, input);
+
+ Label outOfRange;
+
+ // Truncate to int32 and ensure the result <= 255. This relies on the
+ // processor setting output to a value > 255 for doubles outside the int32
+ // range (for instance 0x80000000).
+ vcvttsd2si(input, output);
+ branch32(Assembler::Above, output, Imm32(255), &outOfRange);
+ {
+ // Check if we had a tie.
+ convertInt32ToDouble(output, scratch);
+ branchDouble(DoubleNotEqual, input, scratch, &done);
+
+ // It was a tie. Mask out the ones bit to get an even value.
+ // See also js_TypedArray_uint8_clamp_double.
+ and32(Imm32(~1), output);
+ jump(&done);
+ }
+
+ // > 255 --> 255
+ bind(&outOfRange);
+ {
+ move32(Imm32(255), output);
+ }
+
+ bind(&done);
+}
+
+void
+MacroAssembler::alignFrameForICArguments(AfterICSaveLive& aic)
+{
+ // Exists for MIPS compatibility.
+}
+
+void
+MacroAssembler::restoreFrameAlignmentForICArguments(AfterICSaveLive& aic)
+{
+ // Exists for MIPS compatibility.
+}
+
+bool
+MacroAssemblerX86Shared::buildOOLFakeExitFrame(void* fakeReturnAddr)
+{
+ uint32_t descriptor = MakeFrameDescriptor(asMasm().framePushed(), JitFrame_IonJS,
+ ExitFrameLayout::Size());
+ asMasm().Push(Imm32(descriptor));
+ asMasm().Push(ImmPtr(fakeReturnAddr));
+ return true;
+}
+
+void
+MacroAssemblerX86Shared::branchNegativeZero(FloatRegister reg,
+ Register scratch,
+ Label* label,
+ bool maybeNonZero)
+{
+ // Determines whether the low double contained in the XMM register reg
+ // is equal to -0.0.
+
+#if defined(JS_CODEGEN_X86)
+ Label nonZero;
+
+ // if not already compared to zero
+ if (maybeNonZero) {
+ ScratchDoubleScope scratchDouble(asMasm());
+
+ // Compare to zero. Lets through {0, -0}.
+ zeroDouble(scratchDouble);
+
+ // If reg is non-zero, jump to nonZero.
+ asMasm().branchDouble(DoubleNotEqual, reg, scratchDouble, &nonZero);
+ }
+ // Input register is either zero or negative zero. Retrieve sign of input.
+ vmovmskpd(reg, scratch);
+
+ // If reg is 1 or 3, input is negative zero.
+ // If reg is 0 or 2, input is a normal zero.
+ asMasm().branchTest32(NonZero, scratch, Imm32(1), label);
+
+ bind(&nonZero);
+#elif defined(JS_CODEGEN_X64)
+ vmovq(reg, scratch);
+ cmpq(Imm32(1), scratch);
+ j(Overflow, label);
+#endif
+}
+
+void
+MacroAssemblerX86Shared::branchNegativeZeroFloat32(FloatRegister reg,
+ Register scratch,
+ Label* label)
+{
+ vmovd(reg, scratch);
+ cmp32(scratch, Imm32(1));
+ j(Overflow, label);
+}
+
+MacroAssembler&
+MacroAssemblerX86Shared::asMasm()
+{
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler&
+MacroAssemblerX86Shared::asMasm() const
+{
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+template<typename T>
+void
+MacroAssemblerX86Shared::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
+ Register oldval, Register newval,
+ Register temp, AnyRegister output)
+{
+ switch (arrayType) {
+ case Scalar::Int8:
+ compareExchange8SignExtend(mem, oldval, newval, output.gpr());
+ break;
+ case Scalar::Uint8:
+ compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
+ break;
+ case Scalar::Int16:
+ compareExchange16SignExtend(mem, oldval, newval, output.gpr());
+ break;
+ case Scalar::Uint16:
+ compareExchange16ZeroExtend(mem, oldval, newval, output.gpr());
+ break;
+ case Scalar::Int32:
+ compareExchange32(mem, oldval, newval, output.gpr());
+ break;
+ case Scalar::Uint32:
+ // At the moment, the code in MCallOptimize.cpp requires the output
+ // type to be double for uint32 arrays. See bug 1077305.
+ MOZ_ASSERT(output.isFloat());
+ compareExchange32(mem, oldval, newval, temp);
+ asMasm().convertUInt32ToDouble(temp, output.fpu());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+MacroAssemblerX86Shared::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
+ Register oldval, Register newval, Register temp,
+ AnyRegister output);
+template void
+MacroAssemblerX86Shared::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
+ Register oldval, Register newval, Register temp,
+ AnyRegister output);
+
+template<typename T>
+void
+MacroAssemblerX86Shared::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
+ Register value, Register temp, AnyRegister output)
+{
+ switch (arrayType) {
+ case Scalar::Int8:
+ atomicExchange8SignExtend(mem, value, output.gpr());
+ break;
+ case Scalar::Uint8:
+ atomicExchange8ZeroExtend(mem, value, output.gpr());
+ break;
+ case Scalar::Int16:
+ atomicExchange16SignExtend(mem, value, output.gpr());
+ break;
+ case Scalar::Uint16:
+ atomicExchange16ZeroExtend(mem, value, output.gpr());
+ break;
+ case Scalar::Int32:
+ atomicExchange32(mem, value, output.gpr());
+ break;
+ case Scalar::Uint32:
+ // At the moment, the code in MCallOptimize.cpp requires the output
+ // type to be double for uint32 arrays. See bug 1077305.
+ MOZ_ASSERT(output.isFloat());
+ atomicExchange32(mem, value, temp);
+ asMasm().convertUInt32ToDouble(temp, output.fpu());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+MacroAssemblerX86Shared::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
+ Register value, Register temp, AnyRegister output);
+template void
+MacroAssemblerX86Shared::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
+ Register value, Register temp, AnyRegister output);
+
+template<class T, class Map>
+T*
+MacroAssemblerX86Shared::getConstant(const typename T::Pod& value, Map& map,
+ Vector<T, 0, SystemAllocPolicy>& vec)
+{
+ typedef typename Map::AddPtr AddPtr;
+ if (!map.initialized()) {
+ enoughMemory_ &= map.init();
+ if (!enoughMemory_)
+ return nullptr;
+ }
+ size_t index;
+ if (AddPtr p = map.lookupForAdd(value)) {
+ index = p->value();
+ } else {
+ index = vec.length();
+ enoughMemory_ &= vec.append(T(value));
+ if (!enoughMemory_)
+ return nullptr;
+ enoughMemory_ &= map.add(p, value, index);
+ if (!enoughMemory_)
+ return nullptr;
+ }
+ return &vec[index];
+}
+
+MacroAssemblerX86Shared::Float*
+MacroAssemblerX86Shared::getFloat(wasm::RawF32 f)
+{
+ return getConstant<Float, FloatMap>(f.bits(), floatMap_, floats_);
+}
+
+MacroAssemblerX86Shared::Double*
+MacroAssemblerX86Shared::getDouble(wasm::RawF64 d)
+{
+ return getConstant<Double, DoubleMap>(d.bits(), doubleMap_, doubles_);
+}
+
+MacroAssemblerX86Shared::SimdData*
+MacroAssemblerX86Shared::getSimdData(const SimdConstant& v)
+{
+ return getConstant<SimdData, SimdMap>(v, simdMap_, simds_);
+}
+
+template<class T, class Map>
+static bool
+MergeConstants(size_t delta, const Vector<T, 0, SystemAllocPolicy>& other,
+ Map& map, Vector<T, 0, SystemAllocPolicy>& vec)
+{
+ typedef typename Map::AddPtr AddPtr;
+ if (!map.initialized() && !map.init())
+ return false;
+
+ for (const T& c : other) {
+ size_t index;
+ if (AddPtr p = map.lookupForAdd(c.value)) {
+ index = p->value();
+ } else {
+ index = vec.length();
+ if (!vec.append(T(c.value)) || !map.add(p, c.value, index))
+ return false;
+ }
+ MacroAssemblerX86Shared::UsesVector& uses = vec[index].uses;
+ for (CodeOffset use : c.uses) {
+ use.offsetBy(delta);
+ if (!uses.append(use))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool
+MacroAssemblerX86Shared::asmMergeWith(const MacroAssemblerX86Shared& other)
+{
+ size_t sizeBefore = masm.size();
+ if (!Assembler::asmMergeWith(other))
+ return false;
+ if (!MergeConstants<Double, DoubleMap>(sizeBefore, other.doubles_, doubleMap_, doubles_))
+ return false;
+ if (!MergeConstants<Float, FloatMap>(sizeBefore, other.floats_, floatMap_, floats_))
+ return false;
+ if (!MergeConstants<SimdData, SimdMap>(sizeBefore, other.simds_, simdMap_, simds_))
+ return false;
+ return true;
+}
+
+void
+MacroAssemblerX86Shared::minMaxDouble(FloatRegister first, FloatRegister second, bool canBeNaN,
+ bool isMax)
+{
+ Label done, nan, minMaxInst;
+
+ // Do a vucomisd to catch equality and NaNs, which both require special
+ // handling. If the operands are ordered and inequal, we branch straight to
+ // the min/max instruction. If we wanted, we could also branch for less-than
+ // or greater-than here instead of using min/max, however these conditions
+ // will sometimes be hard on the branch predictor.
+ vucomisd(second, first);
+ j(Assembler::NotEqual, &minMaxInst);
+ if (canBeNaN)
+ j(Assembler::Parity, &nan);
+
+ // Ordered and equal. The operands are bit-identical unless they are zero
+ // and negative zero. These instructions merge the sign bits in that
+ // case, and are no-ops otherwise.
+ if (isMax)
+ vandpd(second, first, first);
+ else
+ vorpd(second, first, first);
+ jump(&done);
+
+ // x86's min/max are not symmetric; if either operand is a NaN, they return
+ // the read-only operand. We need to return a NaN if either operand is a
+ // NaN, so we explicitly check for a NaN in the read-write operand.
+ if (canBeNaN) {
+ bind(&nan);
+ vucomisd(first, first);
+ j(Assembler::Parity, &done);
+ }
+
+ // When the values are inequal, or second is NaN, x86's min and max will
+ // return the value we need.
+ bind(&minMaxInst);
+ if (isMax)
+ vmaxsd(second, first, first);
+ else
+ vminsd(second, first, first);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerX86Shared::minMaxFloat32(FloatRegister first, FloatRegister second, bool canBeNaN,
+ bool isMax)
+{
+ Label done, nan, minMaxInst;
+
+ // Do a vucomiss to catch equality and NaNs, which both require special
+ // handling. If the operands are ordered and inequal, we branch straight to
+ // the min/max instruction. If we wanted, we could also branch for less-than
+ // or greater-than here instead of using min/max, however these conditions
+ // will sometimes be hard on the branch predictor.
+ vucomiss(second, first);
+ j(Assembler::NotEqual, &minMaxInst);
+ if (canBeNaN)
+ j(Assembler::Parity, &nan);
+
+ // Ordered and equal. The operands are bit-identical unless they are zero
+ // and negative zero. These instructions merge the sign bits in that
+ // case, and are no-ops otherwise.
+ if (isMax)
+ vandps(second, first, first);
+ else
+ vorps(second, first, first);
+ jump(&done);
+
+ // x86's min/max are not symmetric; if either operand is a NaN, they return
+ // the read-only operand. We need to return a NaN if either operand is a
+ // NaN, so we explicitly check for a NaN in the read-write operand.
+ if (canBeNaN) {
+ bind(&nan);
+ vucomiss(first, first);
+ j(Assembler::Parity, &done);
+ }
+
+ // When the values are inequal, or second is NaN, x86's min and max will
+ // return the value we need.
+ bind(&minMaxInst);
+ if (isMax)
+ vmaxss(second, first, first);
+ else
+ vminss(second, first, first);
+
+ bind(&done);
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// MacroAssembler high-level usage.
+
+void
+MacroAssembler::flush()
+{
+}
+
+void
+MacroAssembler::comment(const char* msg)
+{
+ masm.comment(msg);
+}
+
+// ===============================================================
+// Stack manipulation functions.
+
+void
+MacroAssembler::PushRegsInMask(LiveRegisterSet set)
+{
+ FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
+ unsigned numFpu = fpuSet.size();
+ int32_t diffF = fpuSet.getPushSizeInBytes();
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+
+ // On x86, always use push to push the integer registers, as it's fast
+ // on modern hardware and it's a small instruction.
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ Push(*iter);
+ }
+ MOZ_ASSERT(diffG == 0);
+
+ reserveStack(diffF);
+ for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ diffF -= reg.size();
+ numFpu -= 1;
+ Address spillAddress(StackPointer, diffF);
+ if (reg.isDouble())
+ storeDouble(reg, spillAddress);
+ else if (reg.isSingle())
+ storeFloat32(reg, spillAddress);
+ else if (reg.isSimd128())
+ storeUnalignedSimd128Float(reg, spillAddress);
+ else
+ MOZ_CRASH("Unknown register type.");
+ }
+ MOZ_ASSERT(numFpu == 0);
+ // x64 padding to keep the stack aligned on uintptr_t. Keep in sync with
+ // GetPushBytesInSize.
+ diffF -= diffF % sizeof(uintptr_t);
+ MOZ_ASSERT(diffF == 0);
+}
+
+void
+MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
+{
+ FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
+ unsigned numFpu = fpuSet.size();
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+ int32_t diffF = fpuSet.getPushSizeInBytes();
+ const int32_t reservedG = diffG;
+ const int32_t reservedF = diffF;
+
+ for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ diffF -= reg.size();
+ numFpu -= 1;
+ if (ignore.has(reg))
+ continue;
+
+ Address spillAddress(StackPointer, diffF);
+ if (reg.isDouble())
+ loadDouble(spillAddress, reg);
+ else if (reg.isSingle())
+ loadFloat32(spillAddress, reg);
+ else if (reg.isSimd128())
+ loadUnalignedSimd128Float(spillAddress, reg);
+ else
+ MOZ_CRASH("Unknown register type.");
+ }
+ freeStack(reservedF);
+ MOZ_ASSERT(numFpu == 0);
+ // x64 padding to keep the stack aligned on uintptr_t. Keep in sync with
+ // GetPushBytesInSize.
+ diffF -= diffF % sizeof(uintptr_t);
+ MOZ_ASSERT(diffF == 0);
+
+ // On x86, use pop to pop the integer registers, if we're not going to
+ // ignore any slots, as it's fast on modern hardware and it's a small
+ // instruction.
+ if (ignore.emptyGeneral()) {
+ for (GeneralRegisterForwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ Pop(*iter);
+ }
+ } else {
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ if (!ignore.has(*iter))
+ loadPtr(Address(StackPointer, diffG), *iter);
+ }
+ freeStack(reservedG);
+ }
+ MOZ_ASSERT(diffG == 0);
+}
+
+void
+MacroAssembler::Push(const Operand op)
+{
+ push(op);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(Register reg)
+{
+ push(reg);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(const Imm32 imm)
+{
+ push(imm);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(const ImmWord imm)
+{
+ push(imm);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(const ImmPtr imm)
+{
+ Push(ImmWord(uintptr_t(imm.value)));
+}
+
+void
+MacroAssembler::Push(const ImmGCPtr ptr)
+{
+ push(ptr);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(FloatRegister t)
+{
+ push(t);
+ adjustFrame(sizeof(double));
+}
+
+void
+MacroAssembler::Pop(const Operand op)
+{
+ pop(op);
+ implicitPop(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Pop(Register reg)
+{
+ pop(reg);
+ implicitPop(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Pop(FloatRegister reg)
+{
+ pop(reg);
+ implicitPop(sizeof(double));
+}
+
+void
+MacroAssembler::Pop(const ValueOperand& val)
+{
+ popValue(val);
+ implicitPop(sizeof(Value));
+}
+
+// ===============================================================
+// Simple call functions.
+
+CodeOffset
+MacroAssembler::call(Register reg)
+{
+ return Assembler::call(reg);
+}
+
+CodeOffset
+MacroAssembler::call(Label* label)
+{
+ return Assembler::call(label);
+}
+
+void
+MacroAssembler::call(const Address& addr)
+{
+ Assembler::call(Operand(addr.base, addr.offset));
+}
+
+void
+MacroAssembler::call(wasm::SymbolicAddress target)
+{
+ mov(target, eax);
+ Assembler::call(eax);
+}
+
+void
+MacroAssembler::call(ImmWord target)
+{
+ Assembler::call(target);
+}
+
+void
+MacroAssembler::call(ImmPtr target)
+{
+ Assembler::call(target);
+}
+
+void
+MacroAssembler::call(JitCode* target)
+{
+ Assembler::call(target);
+}
+
+CodeOffset
+MacroAssembler::callWithPatch()
+{
+ return Assembler::callWithPatch();
+}
+void
+MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset)
+{
+ Assembler::patchCall(callerOffset, calleeOffset);
+}
+
+void
+MacroAssembler::callAndPushReturnAddress(Register reg)
+{
+ call(reg);
+}
+
+void
+MacroAssembler::callAndPushReturnAddress(Label* label)
+{
+ call(label);
+}
+
+// ===============================================================
+// Patchable near/far jumps.
+
+CodeOffset
+MacroAssembler::farJumpWithPatch()
+{
+ return Assembler::farJumpWithPatch();
+}
+
+void
+MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset)
+{
+ Assembler::patchFarJump(farJump, targetOffset);
+}
+
+void
+MacroAssembler::repatchFarJump(uint8_t* code, uint32_t farJumpOffset, uint32_t targetOffset)
+{
+ Assembler::repatchFarJump(code, farJumpOffset, targetOffset);
+}
+
+CodeOffset
+MacroAssembler::nopPatchableToNearJump()
+{
+ return Assembler::twoByteNop();
+}
+
+void
+MacroAssembler::patchNopToNearJump(uint8_t* jump, uint8_t* target)
+{
+ Assembler::patchTwoByteNopToJump(jump, target);
+}
+
+void
+MacroAssembler::patchNearJumpToNop(uint8_t* jump)
+{
+ Assembler::patchJumpToTwoByteNop(jump);
+}
+
+// ===============================================================
+// Jit Frames.
+
+uint32_t
+MacroAssembler::pushFakeReturnAddress(Register scratch)
+{
+ CodeLabel cl;
+
+ mov(cl.patchAt(), scratch);
+ Push(scratch);
+ use(cl.target());
+ uint32_t retAddr = currentOffset();
+
+ addCodeLabel(cl);
+ return retAddr;
+}
+
+// wasm specific methods, used in both the wasm baseline compiler and ion.
+
+// RAII class that generates the jumps to traps when it's destructed, to
+// prevent some code duplication in the outOfLineWasmTruncateXtoY methods.
+struct MOZ_RAII AutoHandleWasmTruncateToIntErrors
+{
+ MacroAssembler& masm;
+ Label inputIsNaN;
+ Label fail;
+ wasm::TrapOffset off;
+
+ explicit AutoHandleWasmTruncateToIntErrors(MacroAssembler& masm, wasm::TrapOffset off)
+ : masm(masm), off(off)
+ { }
+
+ ~AutoHandleWasmTruncateToIntErrors() {
+ // Handle errors.
+ masm.bind(&fail);
+ masm.jump(wasm::TrapDesc(off, wasm::Trap::IntegerOverflow, masm.framePushed()));
+
+ masm.bind(&inputIsNaN);
+ masm.jump(wasm::TrapDesc(off, wasm::Trap::InvalidConversionToInteger, masm.framePushed()));
+ }
+};
+
+void
+MacroAssembler::wasmTruncateDoubleToInt32(FloatRegister input, Register output, Label* oolEntry)
+{
+ vcvttsd2si(input, output);
+ cmp32(output, Imm32(1));
+ j(Assembler::Overflow, oolEntry);
+}
+
+void
+MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input, Register output, Label* oolEntry)
+{
+ vcvttss2si(input, output);
+ cmp32(output, Imm32(1));
+ j(Assembler::Overflow, oolEntry);
+}
+
+void
+MacroAssembler::outOfLineWasmTruncateDoubleToInt32(FloatRegister input, bool isUnsigned,
+ wasm::TrapOffset off, Label* rejoin)
+{
+ AutoHandleWasmTruncateToIntErrors traps(*this, off);
+
+ // Eagerly take care of NaNs.
+ branchDouble(Assembler::DoubleUnordered, input, input, &traps.inputIsNaN);
+
+ // Handle special values (not needed for unsigned values).
+ if (isUnsigned)
+ return;
+
+ // We've used vcvttsd2si. The only valid double values that can
+ // truncate to INT32_MIN are in ]INT32_MIN - 1; INT32_MIN].
+ loadConstantDouble(double(INT32_MIN) - 1.0, ScratchDoubleReg);
+ branchDouble(Assembler::DoubleLessThanOrEqual, input, ScratchDoubleReg, &traps.fail);
+
+ loadConstantDouble(double(INT32_MIN), ScratchDoubleReg);
+ branchDouble(Assembler::DoubleGreaterThan, input, ScratchDoubleReg, &traps.fail);
+ jump(rejoin);
+}
+
+void
+MacroAssembler::outOfLineWasmTruncateFloat32ToInt32(FloatRegister input, bool isUnsigned,
+ wasm::TrapOffset off, Label* rejoin)
+{
+ AutoHandleWasmTruncateToIntErrors traps(*this, off);
+
+ // Eagerly take care of NaNs.
+ branchFloat(Assembler::DoubleUnordered, input, input, &traps.inputIsNaN);
+
+ // Handle special values (not needed for unsigned values).
+ if (isUnsigned)
+ return;
+
+ // We've used vcvttss2si. Check that the input wasn't
+ // float(INT32_MIN), which is the only legimitate input that
+ // would truncate to INT32_MIN.
+ loadConstantFloat32(float(INT32_MIN), ScratchFloat32Reg);
+ branchFloat(Assembler::DoubleNotEqual, input, ScratchFloat32Reg, &traps.fail);
+ jump(rejoin);
+}
+
+void
+MacroAssembler::outOfLineWasmTruncateDoubleToInt64(FloatRegister input, bool isUnsigned,
+ wasm::TrapOffset off, Label* rejoin)
+{
+ AutoHandleWasmTruncateToIntErrors traps(*this, off);
+
+ // Eagerly take care of NaNs.
+ branchDouble(Assembler::DoubleUnordered, input, input, &traps.inputIsNaN);
+
+ // Handle special values.
+ if (isUnsigned) {
+ loadConstantDouble(-0.0, ScratchDoubleReg);
+ branchDouble(Assembler::DoubleGreaterThan, input, ScratchDoubleReg, &traps.fail);
+ loadConstantDouble(-1.0, ScratchDoubleReg);
+ branchDouble(Assembler::DoubleLessThanOrEqual, input, ScratchDoubleReg, &traps.fail);
+ jump(rejoin);
+ return;
+ }
+
+ // We've used vcvtsd2sq. The only legit value whose i64
+ // truncation is INT64_MIN is double(INT64_MIN): exponent is so
+ // high that the highest resolution around is much more than 1.
+ loadConstantDouble(double(int64_t(INT64_MIN)), ScratchDoubleReg);
+ branchDouble(Assembler::DoubleNotEqual, input, ScratchDoubleReg, &traps.fail);
+ jump(rejoin);
+}
+
+void
+MacroAssembler::outOfLineWasmTruncateFloat32ToInt64(FloatRegister input, bool isUnsigned,
+ wasm::TrapOffset off, Label* rejoin)
+{
+ AutoHandleWasmTruncateToIntErrors traps(*this, off);
+
+ // Eagerly take care of NaNs.
+ branchFloat(Assembler::DoubleUnordered, input, input, &traps.inputIsNaN);
+
+ // Handle special values.
+ if (isUnsigned) {
+ loadConstantFloat32(-0.0f, ScratchFloat32Reg);
+ branchFloat(Assembler::DoubleGreaterThan, input, ScratchFloat32Reg, &traps.fail);
+ loadConstantFloat32(-1.0f, ScratchFloat32Reg);
+ branchFloat(Assembler::DoubleLessThanOrEqual, input, ScratchFloat32Reg, &traps.fail);
+ jump(rejoin);
+ return;
+ }
+
+ // We've used vcvtss2sq. See comment in outOfLineWasmTruncateDoubleToInt64.
+ loadConstantFloat32(float(int64_t(INT64_MIN)), ScratchFloat32Reg);
+ branchFloat(Assembler::DoubleNotEqual, input, ScratchFloat32Reg, &traps.fail);
+ jump(rejoin);
+}
+
+//}}} check_macroassembler_style
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared.h b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
new file mode 100644
index 000000000..8a0e154f1
--- /dev/null
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
@@ -0,0 +1,1411 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_MacroAssembler_x86_shared_h
+#define jit_x86_shared_MacroAssembler_x86_shared_h
+
+#include "mozilla/Casting.h"
+
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/Assembler-x86.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/Assembler-x64.h"
+#endif
+
+#ifdef DEBUG
+ #define CHECK_BYTEREG(reg) \
+ JS_BEGIN_MACRO \
+ AllocatableGeneralRegisterSet byteRegs(Registers::SingleByteRegs); \
+ MOZ_ASSERT(byteRegs.has(reg)); \
+ JS_END_MACRO
+ #define CHECK_BYTEREGS(r1, r2) \
+ JS_BEGIN_MACRO \
+ AllocatableGeneralRegisterSet byteRegs(Registers::SingleByteRegs); \
+ MOZ_ASSERT(byteRegs.has(r1)); \
+ MOZ_ASSERT(byteRegs.has(r2)); \
+ JS_END_MACRO
+#else
+ #define CHECK_BYTEREG(reg) (void)0
+ #define CHECK_BYTEREGS(r1, r2) (void)0
+#endif
+
+namespace js {
+namespace jit {
+
+class MacroAssembler;
+
+class MacroAssemblerX86Shared : public Assembler
+{
+ private:
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ public:
+ typedef Vector<CodeOffset, 0, SystemAllocPolicy> UsesVector;
+
+ protected:
+
+ // For Double, Float and SimdData, make the move ctors explicit so that MSVC
+ // knows what to use instead of copying these data structures.
+ template<class T>
+ struct Constant {
+ typedef T Pod;
+
+ T value;
+ UsesVector uses;
+
+ explicit Constant(const T& value) : value(value) {}
+ Constant(Constant<T>&& other) : value(other.value), uses(mozilla::Move(other.uses)) {}
+ explicit Constant(const Constant<T>&) = delete;
+ };
+
+ // Containers use SystemAllocPolicy since wasm releases memory after each
+ // function is compiled, and these need to live until after all functions
+ // are compiled.
+ using Double = Constant<uint64_t>;
+ Vector<Double, 0, SystemAllocPolicy> doubles_;
+ typedef HashMap<uint64_t, size_t, DefaultHasher<uint64_t>, SystemAllocPolicy> DoubleMap;
+ DoubleMap doubleMap_;
+
+ using Float = Constant<uint32_t>;
+ Vector<Float, 0, SystemAllocPolicy> floats_;
+ typedef HashMap<uint32_t, size_t, DefaultHasher<uint32_t>, SystemAllocPolicy> FloatMap;
+ FloatMap floatMap_;
+
+ struct SimdData : public Constant<SimdConstant> {
+ explicit SimdData(SimdConstant d) : Constant<SimdConstant>(d) {}
+ SimdData(SimdData&& d) : Constant<SimdConstant>(mozilla::Move(d)) {}
+ explicit SimdData(const SimdData&) = delete;
+ SimdConstant::Type type() const { return value.type(); }
+ };
+
+ Vector<SimdData, 0, SystemAllocPolicy> simds_;
+ typedef HashMap<SimdConstant, size_t, SimdConstant, SystemAllocPolicy> SimdMap;
+ SimdMap simdMap_;
+
+ template<class T, class Map>
+ T* getConstant(const typename T::Pod& value, Map& map, Vector<T, 0, SystemAllocPolicy>& vec);
+
+ Float* getFloat(wasm::RawF32 f);
+ Double* getDouble(wasm::RawF64 d);
+ SimdData* getSimdData(const SimdConstant& v);
+
+ public:
+ using Assembler::call;
+
+ MacroAssemblerX86Shared()
+ { }
+
+ bool asmMergeWith(const MacroAssemblerX86Shared& other);
+
+ // Evaluate srcDest = minmax<isMax>{Float32,Double}(srcDest, second).
+ // Checks for NaN if canBeNaN is true.
+ void minMaxDouble(FloatRegister srcDest, FloatRegister second, bool canBeNaN, bool isMax);
+ void minMaxFloat32(FloatRegister srcDest, FloatRegister second, bool canBeNaN, bool isMax);
+
+ void compareDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs) {
+ if (cond & DoubleConditionBitInvert)
+ vucomisd(lhs, rhs);
+ else
+ vucomisd(rhs, lhs);
+ }
+
+ void compareFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs) {
+ if (cond & DoubleConditionBitInvert)
+ vucomiss(lhs, rhs);
+ else
+ vucomiss(rhs, lhs);
+ }
+
+ void branchNegativeZero(FloatRegister reg, Register scratch, Label* label, bool maybeNonZero = true);
+ void branchNegativeZeroFloat32(FloatRegister reg, Register scratch, Label* label);
+
+ void move32(Imm32 imm, Register dest) {
+ // Use the ImmWord version of mov to register, which has special
+ // optimizations. Casting to uint32_t here ensures that the value
+ // is zero-extended.
+ mov(ImmWord(uint32_t(imm.value)), dest);
+ }
+ void move32(Imm32 imm, const Operand& dest) {
+ movl(imm, dest);
+ }
+ void move32(Register src, Register dest) {
+ movl(src, dest);
+ }
+ void move32(Register src, const Operand& dest) {
+ movl(src, dest);
+ }
+ void test32(Register lhs, Register rhs) {
+ testl(rhs, lhs);
+ }
+ void test32(const Address& addr, Imm32 imm) {
+ testl(imm, Operand(addr));
+ }
+ void test32(const Operand lhs, Imm32 imm) {
+ testl(imm, lhs);
+ }
+ void test32(Register lhs, Imm32 rhs) {
+ testl(rhs, lhs);
+ }
+ void cmp32(Register lhs, Imm32 rhs) {
+ cmpl(rhs, lhs);
+ }
+ void cmp32(Register lhs, Register rhs) {
+ cmpl(rhs, lhs);
+ }
+ void cmp32(const Address& lhs, Register rhs) {
+ cmp32(Operand(lhs), rhs);
+ }
+ void cmp32(const Address& lhs, Imm32 rhs) {
+ cmp32(Operand(lhs), rhs);
+ }
+ void cmp32(const Operand& lhs, Imm32 rhs) {
+ cmpl(rhs, lhs);
+ }
+ void cmp32(const Operand& lhs, Register rhs) {
+ cmpl(rhs, lhs);
+ }
+ void cmp32(Register lhs, const Operand& rhs) {
+ cmpl(rhs, lhs);
+ }
+ CodeOffset cmp32WithPatch(Register lhs, Imm32 rhs) {
+ return cmplWithPatch(rhs, lhs);
+ }
+ void atomic_inc32(const Operand& addr) {
+ lock_incl(addr);
+ }
+ void atomic_dec32(const Operand& addr) {
+ lock_decl(addr);
+ }
+
+ template <typename T>
+ void atomicFetchAdd8SignExtend(Register src, const T& mem, Register temp, Register output) {
+ CHECK_BYTEREGS(src, output);
+ if (src != output)
+ movl(src, output);
+ lock_xaddb(output, Operand(mem));
+ movsbl(output, output);
+ }
+
+ template <typename T>
+ void atomicFetchAdd8ZeroExtend(Register src, const T& mem, Register temp, Register output) {
+ CHECK_BYTEREGS(src, output);
+ MOZ_ASSERT(temp == InvalidReg);
+ if (src != output)
+ movl(src, output);
+ lock_xaddb(output, Operand(mem));
+ movzbl(output, output);
+ }
+
+ template <typename T>
+ void atomicFetchAdd8SignExtend(Imm32 src, const T& mem, Register temp, Register output) {
+ CHECK_BYTEREG(output);
+ MOZ_ASSERT(temp == InvalidReg);
+ movb(src, output);
+ lock_xaddb(output, Operand(mem));
+ movsbl(output, output);
+ }
+
+ template <typename T>
+ void atomicFetchAdd8ZeroExtend(Imm32 src, const T& mem, Register temp, Register output) {
+ CHECK_BYTEREG(output);
+ MOZ_ASSERT(temp == InvalidReg);
+ movb(src, output);
+ lock_xaddb(output, Operand(mem));
+ movzbl(output, output);
+ }
+
+ template <typename T>
+ void atomicFetchAdd16SignExtend(Register src, const T& mem, Register temp, Register output) {
+ MOZ_ASSERT(temp == InvalidReg);
+ if (src != output)
+ movl(src, output);
+ lock_xaddw(output, Operand(mem));
+ movswl(output, output);
+ }
+
+ template <typename T>
+ void atomicFetchAdd16ZeroExtend(Register src, const T& mem, Register temp, Register output) {
+ MOZ_ASSERT(temp == InvalidReg);
+ if (src != output)
+ movl(src, output);
+ lock_xaddw(output, Operand(mem));
+ movzwl(output, output);
+ }
+
+ template <typename T>
+ void atomicFetchAdd16SignExtend(Imm32 src, const T& mem, Register temp, Register output) {
+ MOZ_ASSERT(temp == InvalidReg);
+ movl(src, output);
+ lock_xaddw(output, Operand(mem));
+ movswl(output, output);
+ }
+
+ template <typename T>
+ void atomicFetchAdd16ZeroExtend(Imm32 src, const T& mem, Register temp, Register output) {
+ MOZ_ASSERT(temp == InvalidReg);
+ movl(src, output);
+ lock_xaddw(output, Operand(mem));
+ movzwl(output, output);
+ }
+
+ template <typename T>
+ void atomicFetchAdd32(Register src, const T& mem, Register temp, Register output) {
+ MOZ_ASSERT(temp == InvalidReg);
+ if (src != output)
+ movl(src, output);
+ lock_xaddl(output, Operand(mem));
+ }
+
+ template <typename T>
+ void atomicFetchAdd32(Imm32 src, const T& mem, Register temp, Register output) {
+ MOZ_ASSERT(temp == InvalidReg);
+ movl(src, output);
+ lock_xaddl(output, Operand(mem));
+ }
+
+ template <typename T>
+ void atomicFetchSub8SignExtend(Register src, const T& mem, Register temp, Register output) {
+ CHECK_BYTEREGS(src, output);
+ MOZ_ASSERT(temp == InvalidReg);
+ if (src != output)
+ movl(src, output);
+ negl(output);
+ lock_xaddb(output, Operand(mem));
+ movsbl(output, output);
+ }
+
+ template <typename T>
+ void atomicFetchSub8ZeroExtend(Register src, const T& mem, Register temp, Register output) {
+ CHECK_BYTEREGS(src, output);
+ MOZ_ASSERT(temp == InvalidReg);
+ if (src != output)
+ movl(src, output);
+ negl(output);
+ lock_xaddb(output, Operand(mem));
+ movzbl(output, output);
+ }
+
+ template <typename T>
+ void atomicFetchSub8SignExtend(Imm32 src, const T& mem, Register temp, Register output) {
+ CHECK_BYTEREG(output);
+ MOZ_ASSERT(temp == InvalidReg);
+ movb(Imm32(-src.value), output);
+ lock_xaddb(output, Operand(mem));
+ movsbl(output, output);
+ }
+
+ template <typename T>
+ void atomicFetchSub8ZeroExtend(Imm32 src, const T& mem, Register temp, Register output) {
+ CHECK_BYTEREG(output);
+ MOZ_ASSERT(temp == InvalidReg);
+ movb(Imm32(-src.value), output);
+ lock_xaddb(output, Operand(mem));
+ movzbl(output, output);
+ }
+
+ template <typename T>
+ void atomicFetchSub16SignExtend(Register src, const T& mem, Register temp, Register output) {
+ MOZ_ASSERT(temp == InvalidReg);
+ if (src != output)
+ movl(src, output);
+ negl(output);
+ lock_xaddw(output, Operand(mem));
+ movswl(output, output);
+ }
+
+ template <typename T>
+ void atomicFetchSub16ZeroExtend(Register src, const T& mem, Register temp, Register output) {
+ MOZ_ASSERT(temp == InvalidReg);
+ if (src != output)
+ movl(src, output);
+ negl(output);
+ lock_xaddw(output, Operand(mem));
+ movzwl(output, output);
+ }
+
+ template <typename T>
+ void atomicFetchSub16SignExtend(Imm32 src, const T& mem, Register temp, Register output) {
+ MOZ_ASSERT(temp == InvalidReg);
+ movl(Imm32(-src.value), output);
+ lock_xaddw(output, Operand(mem));
+ movswl(output, output);
+ }
+
+ template <typename T>
+ void atomicFetchSub16ZeroExtend(Imm32 src, const T& mem, Register temp, Register output) {
+ MOZ_ASSERT(temp == InvalidReg);
+ movl(Imm32(-src.value), output);
+ lock_xaddw(output, Operand(mem));
+ movzwl(output, output);
+ }
+
+ template <typename T>
+ void atomicFetchSub32(Register src, const T& mem, Register temp, Register output) {
+ MOZ_ASSERT(temp == InvalidReg);
+ if (src != output)
+ movl(src, output);
+ negl(output);
+ lock_xaddl(output, Operand(mem));
+ }
+
+ template <typename T>
+ void atomicFetchSub32(Imm32 src, const T& mem, Register temp, Register output) {
+ movl(Imm32(-src.value), output);
+ lock_xaddl(output, Operand(mem));
+ }
+
+ // requires output == eax
+#define ATOMIC_BITOP_BODY(LOAD, OP, LOCK_CMPXCHG) \
+ MOZ_ASSERT(output == eax); \
+ LOAD(Operand(mem), eax); \
+ Label again; \
+ bind(&again); \
+ movl(eax, temp); \
+ OP(src, temp); \
+ LOCK_CMPXCHG(temp, Operand(mem)); \
+ j(NonZero, &again);
+
+ template <typename S, typename T>
+ void atomicFetchAnd8SignExtend(const S& src, const T& mem, Register temp, Register output) {
+ ATOMIC_BITOP_BODY(movb, andl, lock_cmpxchgb)
+ CHECK_BYTEREG(temp);
+ movsbl(eax, eax);
+ }
+ template <typename S, typename T>
+ void atomicFetchAnd8ZeroExtend(const S& src, const T& mem, Register temp, Register output) {
+ ATOMIC_BITOP_BODY(movb, andl, lock_cmpxchgb)
+ CHECK_BYTEREG(temp);
+ movzbl(eax, eax);
+ }
+ template <typename S, typename T>
+ void atomicFetchAnd16SignExtend(const S& src, const T& mem, Register temp, Register output) {
+ ATOMIC_BITOP_BODY(movw, andl, lock_cmpxchgw)
+ movswl(eax, eax);
+ }
+ template <typename S, typename T>
+ void atomicFetchAnd16ZeroExtend(const S& src, const T& mem, Register temp, Register output) {
+ ATOMIC_BITOP_BODY(movw, andl, lock_cmpxchgw)
+ movzwl(eax, eax);
+ }
+ template <typename S, typename T>
+ void atomicFetchAnd32(const S& src, const T& mem, Register temp, Register output) {
+ ATOMIC_BITOP_BODY(movl, andl, lock_cmpxchgl)
+ }
+
+ template <typename S, typename T>
+ void atomicFetchOr8SignExtend(const S& src, const T& mem, Register temp, Register output) {
+ ATOMIC_BITOP_BODY(movb, orl, lock_cmpxchgb)
+ CHECK_BYTEREG(temp);
+ movsbl(eax, eax);
+ }
+ template <typename S, typename T>
+ void atomicFetchOr8ZeroExtend(const S& src, const T& mem, Register temp, Register output) {
+ ATOMIC_BITOP_BODY(movb, orl, lock_cmpxchgb)
+ CHECK_BYTEREG(temp);
+ movzbl(eax, eax);
+ }
+ template <typename S, typename T>
+ void atomicFetchOr16SignExtend(const S& src, const T& mem, Register temp, Register output) {
+ ATOMIC_BITOP_BODY(movw, orl, lock_cmpxchgw)
+ movswl(eax, eax);
+ }
+ template <typename S, typename T>
+ void atomicFetchOr16ZeroExtend(const S& src, const T& mem, Register temp, Register output) {
+ ATOMIC_BITOP_BODY(movw, orl, lock_cmpxchgw)
+ movzwl(eax, eax);
+ }
+ template <typename S, typename T>
+ void atomicFetchOr32(const S& src, const T& mem, Register temp, Register output) {
+ ATOMIC_BITOP_BODY(movl, orl, lock_cmpxchgl)
+ }
+
+ template <typename S, typename T>
+ void atomicFetchXor8SignExtend(const S& src, const T& mem, Register temp, Register output) {
+ ATOMIC_BITOP_BODY(movb, xorl, lock_cmpxchgb)
+ CHECK_BYTEREG(temp);
+ movsbl(eax, eax);
+ }
+ template <typename S, typename T>
+ void atomicFetchXor8ZeroExtend(const S& src, const T& mem, Register temp, Register output) {
+ ATOMIC_BITOP_BODY(movb, xorl, lock_cmpxchgb)
+ CHECK_BYTEREG(temp);
+ movzbl(eax, eax);
+ }
+ template <typename S, typename T>
+ void atomicFetchXor16SignExtend(const S& src, const T& mem, Register temp, Register output) {
+ ATOMIC_BITOP_BODY(movw, xorl, lock_cmpxchgw)
+ movswl(eax, eax);
+ }
+ template <typename S, typename T>
+ void atomicFetchXor16ZeroExtend(const S& src, const T& mem, Register temp, Register output) {
+ ATOMIC_BITOP_BODY(movw, xorl, lock_cmpxchgw)
+ movzwl(eax, eax);
+ }
+ template <typename S, typename T>
+ void atomicFetchXor32(const S& src, const T& mem, Register temp, Register output) {
+ ATOMIC_BITOP_BODY(movl, xorl, lock_cmpxchgl)
+ }
+
+#undef ATOMIC_BITOP_BODY
+
+ // S is Register or Imm32; T is Address or BaseIndex.
+
+ template <typename S, typename T>
+ void atomicAdd8(const S& src, const T& mem) {
+ lock_addb(src, Operand(mem));
+ }
+ template <typename S, typename T>
+ void atomicAdd16(const S& src, const T& mem) {
+ lock_addw(src, Operand(mem));
+ }
+ template <typename S, typename T>
+ void atomicAdd32(const S& src, const T& mem) {
+ lock_addl(src, Operand(mem));
+ }
+ template <typename S, typename T>
+ void atomicSub8(const S& src, const T& mem) {
+ lock_subb(src, Operand(mem));
+ }
+ template <typename S, typename T>
+ void atomicSub16(const S& src, const T& mem) {
+ lock_subw(src, Operand(mem));
+ }
+ template <typename S, typename T>
+ void atomicSub32(const S& src, const T& mem) {
+ lock_subl(src, Operand(mem));
+ }
+ template <typename S, typename T>
+ void atomicAnd8(const S& src, const T& mem) {
+ lock_andb(src, Operand(mem));
+ }
+ template <typename S, typename T>
+ void atomicAnd16(const S& src, const T& mem) {
+ lock_andw(src, Operand(mem));
+ }
+ template <typename S, typename T>
+ void atomicAnd32(const S& src, const T& mem) {
+ lock_andl(src, Operand(mem));
+ }
+ template <typename S, typename T>
+ void atomicOr8(const S& src, const T& mem) {
+ lock_orb(src, Operand(mem));
+ }
+ template <typename S, typename T>
+ void atomicOr16(const S& src, const T& mem) {
+ lock_orw(src, Operand(mem));
+ }
+ template <typename S, typename T>
+ void atomicOr32(const S& src, const T& mem) {
+ lock_orl(src, Operand(mem));
+ }
+ template <typename S, typename T>
+ void atomicXor8(const S& src, const T& mem) {
+ lock_xorb(src, Operand(mem));
+ }
+ template <typename S, typename T>
+ void atomicXor16(const S& src, const T& mem) {
+ lock_xorw(src, Operand(mem));
+ }
+ template <typename S, typename T>
+ void atomicXor32(const S& src, const T& mem) {
+ lock_xorl(src, Operand(mem));
+ }
+
+ void storeLoadFence() {
+ // This implementation follows Linux.
+ if (HasSSE2())
+ masm.mfence();
+ else
+ lock_addl(Imm32(0), Operand(Address(esp, 0)));
+ }
+
+ void branch16(Condition cond, Register lhs, Register rhs, Label* label) {
+ cmpw(rhs, lhs);
+ j(cond, label);
+ }
+ void branchTest16(Condition cond, Register lhs, Register rhs, Label* label) {
+ testw(rhs, lhs);
+ j(cond, label);
+ }
+
+ void jump(Label* label) {
+ jmp(label);
+ }
+ void jump(JitCode* code) {
+ jmp(code);
+ }
+ void jump(RepatchLabel* label) {
+ jmp(label);
+ }
+ void jump(Register reg) {
+ jmp(Operand(reg));
+ }
+ void jump(const Address& addr) {
+ jmp(Operand(addr));
+ }
+ void jump(wasm::TrapDesc target) {
+ jmp(target);
+ }
+
+ void convertInt32ToDouble(Register src, FloatRegister dest) {
+ // vcvtsi2sd and friends write only part of their output register, which
+ // causes slowdowns on out-of-order processors. Explicitly break
+ // dependencies with vxorpd (and vxorps elsewhere), which are handled
+ // specially in modern CPUs, for this purpose. See sections 8.14, 9.8,
+ // 10.8, 12.9, 13.16, 14.14, and 15.8 of Agner's Microarchitecture
+ // document.
+ zeroDouble(dest);
+ vcvtsi2sd(src, dest, dest);
+ }
+ void convertInt32ToDouble(const Address& src, FloatRegister dest) {
+ convertInt32ToDouble(Operand(src), dest);
+ }
+ void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest) {
+ convertInt32ToDouble(Operand(src), dest);
+ }
+ void convertInt32ToDouble(const Operand& src, FloatRegister dest) {
+ // Clear the output register first to break dependencies; see above;
+ zeroDouble(dest);
+ vcvtsi2sd(Operand(src), dest, dest);
+ }
+ void convertInt32ToFloat32(Register src, FloatRegister dest) {
+ // Clear the output register first to break dependencies; see above;
+ zeroFloat32(dest);
+ vcvtsi2ss(src, dest, dest);
+ }
+ void convertInt32ToFloat32(const Address& src, FloatRegister dest) {
+ convertInt32ToFloat32(Operand(src), dest);
+ }
+ void convertInt32ToFloat32(const Operand& src, FloatRegister dest) {
+ // Clear the output register first to break dependencies; see above;
+ zeroFloat32(dest);
+ vcvtsi2ss(src, dest, dest);
+ }
+ Condition testDoubleTruthy(bool truthy, FloatRegister reg) {
+ ScratchDoubleScope scratch(asMasm());
+ zeroDouble(scratch);
+ vucomisd(reg, scratch);
+ return truthy ? NonZero : Zero;
+ }
+
+ // Class which ensures that registers used in byte ops are compatible with
+ // such instructions, even if the original register passed in wasn't. This
+ // only applies to x86, as on x64 all registers are valid single byte regs.
+ // This doesn't lead to great code but helps to simplify code generation.
+ //
+ // Note that this can currently only be used in cases where the register is
+ // read from by the guarded instruction, not written to.
+ class AutoEnsureByteRegister {
+ MacroAssemblerX86Shared* masm;
+ Register original_;
+ Register substitute_;
+
+ public:
+ template <typename T>
+ AutoEnsureByteRegister(MacroAssemblerX86Shared* masm, T address, Register reg)
+ : masm(masm), original_(reg)
+ {
+ AllocatableGeneralRegisterSet singleByteRegs(Registers::SingleByteRegs);
+ if (singleByteRegs.has(reg)) {
+ substitute_ = reg;
+ } else {
+ MOZ_ASSERT(address.base != StackPointer);
+ do {
+ substitute_ = singleByteRegs.takeAny();
+ } while (Operand(address).containsReg(substitute_));
+
+ masm->push(substitute_);
+ masm->mov(reg, substitute_);
+ }
+ }
+
+ ~AutoEnsureByteRegister() {
+ if (original_ != substitute_)
+ masm->pop(substitute_);
+ }
+
+ Register reg() {
+ return substitute_;
+ }
+ };
+
+ void load8ZeroExtend(const Operand& src, Register dest) {
+ movzbl(src, dest);
+ }
+ void load8ZeroExtend(const Address& src, Register dest) {
+ movzbl(Operand(src), dest);
+ }
+ void load8ZeroExtend(const BaseIndex& src, Register dest) {
+ movzbl(Operand(src), dest);
+ }
+ void load8SignExtend(const Operand& src, Register dest) {
+ movsbl(src, dest);
+ }
+ void load8SignExtend(const Address& src, Register dest) {
+ movsbl(Operand(src), dest);
+ }
+ void load8SignExtend(const BaseIndex& src, Register dest) {
+ movsbl(Operand(src), dest);
+ }
+ template <typename T>
+ void store8(Imm32 src, const T& dest) {
+ movb(src, Operand(dest));
+ }
+ template <typename T>
+ void store8(Register src, const T& dest) {
+ AutoEnsureByteRegister ensure(this, dest, src);
+ movb(ensure.reg(), Operand(dest));
+ }
+ template <typename T>
+ void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register output) {
+ MOZ_ASSERT(output == eax);
+ CHECK_BYTEREG(newval);
+ if (oldval != output)
+ movl(oldval, output);
+ lock_cmpxchgb(newval, Operand(mem));
+ movzbl(output, output);
+ }
+ template <typename T>
+ void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register output) {
+ MOZ_ASSERT(output == eax);
+ CHECK_BYTEREG(newval);
+ if (oldval != output)
+ movl(oldval, output);
+ lock_cmpxchgb(newval, Operand(mem));
+ movsbl(output, output);
+ }
+ template <typename T>
+ void atomicExchange8ZeroExtend(const T& mem, Register value, Register output) {
+ if (value != output)
+ movl(value, output);
+ xchgb(output, Operand(mem));
+ movzbl(output, output);
+ }
+ template <typename T>
+ void atomicExchange8SignExtend(const T& mem, Register value, Register output) {
+ if (value != output)
+ movl(value, output);
+ xchgb(output, Operand(mem));
+ movsbl(output, output);
+ }
+ void load16ZeroExtend(const Operand& src, Register dest) {
+ movzwl(src, dest);
+ }
+ void load16ZeroExtend(const Address& src, Register dest) {
+ movzwl(Operand(src), dest);
+ }
+ void load16ZeroExtend(const BaseIndex& src, Register dest) {
+ movzwl(Operand(src), dest);
+ }
+ template <typename S, typename T>
+ void store16(const S& src, const T& dest) {
+ movw(src, Operand(dest));
+ }
+ template <typename T>
+ void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register output) {
+ MOZ_ASSERT(output == eax);
+ if (oldval != output)
+ movl(oldval, output);
+ lock_cmpxchgw(newval, Operand(mem));
+ movzwl(output, output);
+ }
+ template <typename T>
+ void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register output) {
+ MOZ_ASSERT(output == eax);
+ if (oldval != output)
+ movl(oldval, output);
+ lock_cmpxchgw(newval, Operand(mem));
+ movswl(output, output);
+ }
+ template <typename T>
+ void atomicExchange16ZeroExtend(const T& mem, Register value, Register output) {
+ if (value != output)
+ movl(value, output);
+ xchgw(output, Operand(mem));
+ movzwl(output, output);
+ }
+ template <typename T>
+ void atomicExchange16SignExtend(const T& mem, Register value, Register output) {
+ if (value != output)
+ movl(value, output);
+ xchgw(output, Operand(mem));
+ movswl(output, output);
+ }
+ void load16SignExtend(const Operand& src, Register dest) {
+ movswl(src, dest);
+ }
+ void load16SignExtend(const Address& src, Register dest) {
+ movswl(Operand(src), dest);
+ }
+ void load16SignExtend(const BaseIndex& src, Register dest) {
+ movswl(Operand(src), dest);
+ }
+ void load32(const Address& address, Register dest) {
+ movl(Operand(address), dest);
+ }
+ void load32(const BaseIndex& src, Register dest) {
+ movl(Operand(src), dest);
+ }
+ void load32(const Operand& src, Register dest) {
+ movl(src, dest);
+ }
+ template <typename S, typename T>
+ void store32(const S& src, const T& dest) {
+ movl(src, Operand(dest));
+ }
+ template <typename T>
+ void compareExchange32(const T& mem, Register oldval, Register newval, Register output) {
+ MOZ_ASSERT(output == eax);
+ if (oldval != output)
+ movl(oldval, output);
+ lock_cmpxchgl(newval, Operand(mem));
+ }
+ template <typename T>
+ void atomicExchange32(const T& mem, Register value, Register output) {
+ if (value != output)
+ movl(value, output);
+ xchgl(output, Operand(mem));
+ }
+ template <typename S, typename T>
+ void store32_NoSecondScratch(const S& src, const T& dest) {
+ store32(src, dest);
+ }
+ void loadDouble(const Address& src, FloatRegister dest) {
+ vmovsd(src, dest);
+ }
+ void loadDouble(const BaseIndex& src, FloatRegister dest) {
+ vmovsd(src, dest);
+ }
+ void loadDouble(const Operand& src, FloatRegister dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ loadDouble(src.toAddress(), dest);
+ break;
+ case Operand::MEM_SCALE:
+ loadDouble(src.toBaseIndex(), dest);
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void moveDouble(FloatRegister src, FloatRegister dest) {
+ // Use vmovapd instead of vmovsd to avoid dependencies.
+ vmovapd(src, dest);
+ }
+ void zeroDouble(FloatRegister reg) {
+ vxorpd(reg, reg, reg);
+ }
+ void zeroFloat32(FloatRegister reg) {
+ vxorps(reg, reg, reg);
+ }
+ void convertFloat32ToDouble(FloatRegister src, FloatRegister dest) {
+ vcvtss2sd(src, dest, dest);
+ }
+ void convertDoubleToFloat32(FloatRegister src, FloatRegister dest) {
+ vcvtsd2ss(src, dest, dest);
+ }
+
+ void convertFloat32x4ToInt32x4(FloatRegister src, FloatRegister dest) {
+ // Note that if the conversion failed (because the converted
+ // result is larger than the maximum signed int32, or less than the
+ // least signed int32, or NaN), this will return the undefined integer
+ // value (0x8000000).
+ vcvttps2dq(src, dest);
+ }
+ void convertInt32x4ToFloat32x4(FloatRegister src, FloatRegister dest) {
+ vcvtdq2ps(src, dest);
+ }
+
+ void bitwiseAndSimd128(const Operand& src, FloatRegister dest) {
+ // TODO Using the "ps" variant for all types incurs a domain crossing
+ // penalty for integer types and double.
+ vandps(src, dest, dest);
+ }
+ void bitwiseAndNotSimd128(const Operand& src, FloatRegister dest) {
+ vandnps(src, dest, dest);
+ }
+ void bitwiseOrSimd128(const Operand& src, FloatRegister dest) {
+ vorps(src, dest, dest);
+ }
+ void bitwiseXorSimd128(const Operand& src, FloatRegister dest) {
+ vxorps(src, dest, dest);
+ }
+ void zeroSimd128Float(FloatRegister dest) {
+ vxorps(dest, dest, dest);
+ }
+ void zeroSimd128Int(FloatRegister dest) {
+ vpxor(dest, dest, dest);
+ }
+
+ template <class T, class Reg> inline void loadScalar(const Operand& src, Reg dest);
+ template <class T, class Reg> inline void storeScalar(Reg src, const Address& dest);
+ template <class T> inline void loadAlignedVector(const Address& src, FloatRegister dest);
+ template <class T> inline void storeAlignedVector(FloatRegister src, const Address& dest);
+
+ void loadInt32x1(const Address& src, FloatRegister dest) {
+ vmovd(Operand(src), dest);
+ }
+ void loadInt32x1(const BaseIndex& src, FloatRegister dest) {
+ vmovd(Operand(src), dest);
+ }
+ void loadInt32x2(const Address& src, FloatRegister dest) {
+ vmovq(Operand(src), dest);
+ }
+ void loadInt32x2(const BaseIndex& src, FloatRegister dest) {
+ vmovq(Operand(src), dest);
+ }
+ void loadInt32x3(const BaseIndex& src, FloatRegister dest) {
+ BaseIndex srcZ(src);
+ srcZ.offset += 2 * sizeof(int32_t);
+
+ ScratchSimd128Scope scratch(asMasm());
+ vmovq(Operand(src), dest);
+ vmovd(Operand(srcZ), scratch);
+ vmovlhps(scratch, dest, dest);
+ }
+ void loadInt32x3(const Address& src, FloatRegister dest) {
+ Address srcZ(src);
+ srcZ.offset += 2 * sizeof(int32_t);
+
+ ScratchSimd128Scope scratch(asMasm());
+ vmovq(Operand(src), dest);
+ vmovd(Operand(srcZ), scratch);
+ vmovlhps(scratch, dest, dest);
+ }
+
+ void loadAlignedSimd128Int(const Address& src, FloatRegister dest) {
+ vmovdqa(Operand(src), dest);
+ }
+ void loadAlignedSimd128Int(const Operand& src, FloatRegister dest) {
+ vmovdqa(src, dest);
+ }
+ void storeAlignedSimd128Int(FloatRegister src, const Address& dest) {
+ vmovdqa(src, Operand(dest));
+ }
+ void moveSimd128Int(FloatRegister src, FloatRegister dest) {
+ vmovdqa(src, dest);
+ }
+ FloatRegister reusedInputInt32x4(FloatRegister src, FloatRegister dest) {
+ if (HasAVX())
+ return src;
+ moveSimd128Int(src, dest);
+ return dest;
+ }
+ FloatRegister reusedInputAlignedInt32x4(const Operand& src, FloatRegister dest) {
+ if (HasAVX() && src.kind() == Operand::FPREG)
+ return FloatRegister::FromCode(src.fpu());
+ loadAlignedSimd128Int(src, dest);
+ return dest;
+ }
+ void loadUnalignedSimd128Int(const Address& src, FloatRegister dest) {
+ vmovdqu(Operand(src), dest);
+ }
+ void loadUnalignedSimd128Int(const BaseIndex& src, FloatRegister dest) {
+ vmovdqu(Operand(src), dest);
+ }
+ void loadUnalignedSimd128Int(const Operand& src, FloatRegister dest) {
+ vmovdqu(src, dest);
+ }
+
+ void storeInt32x1(FloatRegister src, const Address& dest) {
+ vmovd(src, Operand(dest));
+ }
+ void storeInt32x1(FloatRegister src, const BaseIndex& dest) {
+ vmovd(src, Operand(dest));
+ }
+ void storeInt32x2(FloatRegister src, const Address& dest) {
+ vmovq(src, Operand(dest));
+ }
+ void storeInt32x2(FloatRegister src, const BaseIndex& dest) {
+ vmovq(src, Operand(dest));
+ }
+ void storeInt32x3(FloatRegister src, const Address& dest) {
+ Address destZ(dest);
+ destZ.offset += 2 * sizeof(int32_t);
+ vmovq(src, Operand(dest));
+ ScratchSimd128Scope scratch(asMasm());
+ vmovhlps(src, scratch, scratch);
+ vmovd(scratch, Operand(destZ));
+ }
+ void storeInt32x3(FloatRegister src, const BaseIndex& dest) {
+ BaseIndex destZ(dest);
+ destZ.offset += 2 * sizeof(int32_t);
+ vmovq(src, Operand(dest));
+ ScratchSimd128Scope scratch(asMasm());
+ vmovhlps(src, scratch, scratch);
+ vmovd(scratch, Operand(destZ));
+ }
+
+ void storeUnalignedSimd128Int(FloatRegister src, const Address& dest) {
+ vmovdqu(src, Operand(dest));
+ }
+ void storeUnalignedSimd128Int(FloatRegister src, const BaseIndex& dest) {
+ vmovdqu(src, Operand(dest));
+ }
+ void storeUnalignedSimd128Int(FloatRegister src, const Operand& dest) {
+ vmovdqu(src, dest);
+ }
+ void packedEqualInt32x4(const Operand& src, FloatRegister dest) {
+ vpcmpeqd(src, dest, dest);
+ }
+ void packedGreaterThanInt32x4(const Operand& src, FloatRegister dest) {
+ vpcmpgtd(src, dest, dest);
+ }
+ void packedAddInt8(const Operand& src, FloatRegister dest) {
+ vpaddb(src, dest, dest);
+ }
+ void packedSubInt8(const Operand& src, FloatRegister dest) {
+ vpsubb(src, dest, dest);
+ }
+ void packedAddInt16(const Operand& src, FloatRegister dest) {
+ vpaddw(src, dest, dest);
+ }
+ void packedSubInt16(const Operand& src, FloatRegister dest) {
+ vpsubw(src, dest, dest);
+ }
+ void packedAddInt32(const Operand& src, FloatRegister dest) {
+ vpaddd(src, dest, dest);
+ }
+ void packedSubInt32(const Operand& src, FloatRegister dest) {
+ vpsubd(src, dest, dest);
+ }
+ void packedRcpApproximationFloat32x4(const Operand& src, FloatRegister dest) {
+ // This function is an approximation of the result, this might need
+ // fix up if the spec requires a given precision for this operation.
+ // TODO See also bug 1068028.
+ vrcpps(src, dest);
+ }
+ void packedRcpSqrtApproximationFloat32x4(const Operand& src, FloatRegister dest) {
+ // TODO See comment above. See also bug 1068028.
+ vrsqrtps(src, dest);
+ }
+ void packedSqrtFloat32x4(const Operand& src, FloatRegister dest) {
+ vsqrtps(src, dest);
+ }
+
+ void packedLeftShiftByScalarInt16x8(FloatRegister src, FloatRegister dest) {
+ vpsllw(src, dest, dest);
+ }
+ void packedLeftShiftByScalarInt16x8(Imm32 count, FloatRegister dest) {
+ vpsllw(count, dest, dest);
+ }
+ void packedRightShiftByScalarInt16x8(FloatRegister src, FloatRegister dest) {
+ vpsraw(src, dest, dest);
+ }
+ void packedRightShiftByScalarInt16x8(Imm32 count, FloatRegister dest) {
+ vpsraw(count, dest, dest);
+ }
+ void packedUnsignedRightShiftByScalarInt16x8(FloatRegister src, FloatRegister dest) {
+ vpsrlw(src, dest, dest);
+ }
+ void packedUnsignedRightShiftByScalarInt16x8(Imm32 count, FloatRegister dest) {
+ vpsrlw(count, dest, dest);
+ }
+
+ void packedLeftShiftByScalarInt32x4(FloatRegister src, FloatRegister dest) {
+ vpslld(src, dest, dest);
+ }
+ void packedLeftShiftByScalarInt32x4(Imm32 count, FloatRegister dest) {
+ vpslld(count, dest, dest);
+ }
+ void packedRightShiftByScalarInt32x4(FloatRegister src, FloatRegister dest) {
+ vpsrad(src, dest, dest);
+ }
+ void packedRightShiftByScalarInt32x4(Imm32 count, FloatRegister dest) {
+ vpsrad(count, dest, dest);
+ }
+ void packedUnsignedRightShiftByScalarInt32x4(FloatRegister src, FloatRegister dest) {
+ vpsrld(src, dest, dest);
+ }
+ void packedUnsignedRightShiftByScalarInt32x4(Imm32 count, FloatRegister dest) {
+ vpsrld(count, dest, dest);
+ }
+
+ void loadFloat32x3(const Address& src, FloatRegister dest) {
+ Address srcZ(src);
+ srcZ.offset += 2 * sizeof(float);
+ vmovsd(src, dest);
+ ScratchSimd128Scope scratch(asMasm());
+ vmovss(srcZ, scratch);
+ vmovlhps(scratch, dest, dest);
+ }
+ void loadFloat32x3(const BaseIndex& src, FloatRegister dest) {
+ BaseIndex srcZ(src);
+ srcZ.offset += 2 * sizeof(float);
+ vmovsd(src, dest);
+ ScratchSimd128Scope scratch(asMasm());
+ vmovss(srcZ, scratch);
+ vmovlhps(scratch, dest, dest);
+ }
+
+ void loadAlignedSimd128Float(const Address& src, FloatRegister dest) {
+ vmovaps(Operand(src), dest);
+ }
+ void loadAlignedSimd128Float(const Operand& src, FloatRegister dest) {
+ vmovaps(src, dest);
+ }
+
+ void storeAlignedSimd128Float(FloatRegister src, const Address& dest) {
+ vmovaps(src, Operand(dest));
+ }
+ void moveSimd128Float(FloatRegister src, FloatRegister dest) {
+ vmovaps(src, dest);
+ }
+ FloatRegister reusedInputFloat32x4(FloatRegister src, FloatRegister dest) {
+ if (HasAVX())
+ return src;
+ moveSimd128Float(src, dest);
+ return dest;
+ }
+ FloatRegister reusedInputAlignedFloat32x4(const Operand& src, FloatRegister dest) {
+ if (HasAVX() && src.kind() == Operand::FPREG)
+ return FloatRegister::FromCode(src.fpu());
+ loadAlignedSimd128Float(src, dest);
+ return dest;
+ }
+ void loadUnalignedSimd128Float(const Address& src, FloatRegister dest) {
+ vmovups(Operand(src), dest);
+ }
+ void loadUnalignedSimd128Float(const BaseIndex& src, FloatRegister dest) {
+ vmovdqu(Operand(src), dest);
+ }
+ void loadUnalignedSimd128Float(const Operand& src, FloatRegister dest) {
+ vmovups(src, dest);
+ }
+ void storeUnalignedSimd128Float(FloatRegister src, const Address& dest) {
+ vmovups(src, Operand(dest));
+ }
+ void storeUnalignedSimd128Float(FloatRegister src, const BaseIndex& dest) {
+ vmovups(src, Operand(dest));
+ }
+ void storeUnalignedSimd128Float(FloatRegister src, const Operand& dest) {
+ vmovups(src, dest);
+ }
+ void packedAddFloat32(const Operand& src, FloatRegister dest) {
+ vaddps(src, dest, dest);
+ }
+ void packedSubFloat32(const Operand& src, FloatRegister dest) {
+ vsubps(src, dest, dest);
+ }
+ void packedMulFloat32(const Operand& src, FloatRegister dest) {
+ vmulps(src, dest, dest);
+ }
+ void packedDivFloat32(const Operand& src, FloatRegister dest) {
+ vdivps(src, dest, dest);
+ }
+
+ static uint32_t ComputeShuffleMask(uint32_t x = 0, uint32_t y = 1,
+ uint32_t z = 2, uint32_t w = 3)
+ {
+ MOZ_ASSERT(x < 4 && y < 4 && z < 4 && w < 4);
+ uint32_t r = (w << 6) | (z << 4) | (y << 2) | (x << 0);
+ MOZ_ASSERT(r < 256);
+ return r;
+ }
+
+ void shuffleInt32(uint32_t mask, FloatRegister src, FloatRegister dest) {
+ vpshufd(mask, src, dest);
+ }
+ void moveLowInt32(FloatRegister src, Register dest) {
+ vmovd(src, dest);
+ }
+
+ void moveHighPairToLowPairFloat32(FloatRegister src, FloatRegister dest) {
+ vmovhlps(src, dest, dest);
+ }
+ void shuffleFloat32(uint32_t mask, FloatRegister src, FloatRegister dest) {
+ // The shuffle instruction on x86 is such that it moves 2 words from
+ // the dest and 2 words from the src operands. To simplify things, just
+ // clobber the output with the input and apply the instruction
+ // afterwards.
+ // Note: this is useAtStart-safe because src isn't read afterwards.
+ FloatRegister srcCopy = reusedInputFloat32x4(src, dest);
+ vshufps(mask, srcCopy, srcCopy, dest);
+ }
+ void shuffleMix(uint32_t mask, const Operand& src, FloatRegister dest) {
+ // Note this uses vshufps, which is a cross-domain penalty on CPU where it
+ // applies, but that's the way clang and gcc do it.
+ vshufps(mask, src, dest, dest);
+ }
+
+ void moveFloatAsDouble(Register src, FloatRegister dest) {
+ vmovd(src, dest);
+ vcvtss2sd(dest, dest, dest);
+ }
+ void loadFloatAsDouble(const Address& src, FloatRegister dest) {
+ vmovss(src, dest);
+ vcvtss2sd(dest, dest, dest);
+ }
+ void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest) {
+ vmovss(src, dest);
+ vcvtss2sd(dest, dest, dest);
+ }
+ void loadFloatAsDouble(const Operand& src, FloatRegister dest) {
+ loadFloat32(src, dest);
+ vcvtss2sd(dest, dest, dest);
+ }
+ void loadFloat32(const Address& src, FloatRegister dest) {
+ vmovss(src, dest);
+ }
+ void loadFloat32(const BaseIndex& src, FloatRegister dest) {
+ vmovss(src, dest);
+ }
+ void loadFloat32(const Operand& src, FloatRegister dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ loadFloat32(src.toAddress(), dest);
+ break;
+ case Operand::MEM_SCALE:
+ loadFloat32(src.toBaseIndex(), dest);
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void moveFloat32(FloatRegister src, FloatRegister dest) {
+ // Use vmovaps instead of vmovss to avoid dependencies.
+ vmovaps(src, dest);
+ }
+
+ // Checks whether a double is representable as a 32-bit integer. If so, the
+ // integer is written to the output register. Otherwise, a bailout is taken to
+ // the given snapshot. This function overwrites the scratch float register.
+ void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true)
+ {
+ // Check for -0.0
+ if (negativeZeroCheck)
+ branchNegativeZero(src, dest, fail);
+
+ ScratchDoubleScope scratch(asMasm());
+ vcvttsd2si(src, dest);
+ convertInt32ToDouble(dest, scratch);
+ vucomisd(scratch, src);
+ j(Assembler::Parity, fail);
+ j(Assembler::NotEqual, fail);
+ }
+
+ // Checks whether a float32 is representable as a 32-bit integer. If so, the
+ // integer is written to the output register. Otherwise, a bailout is taken to
+ // the given snapshot. This function overwrites the scratch float register.
+ void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true)
+ {
+ // Check for -0.0
+ if (negativeZeroCheck)
+ branchNegativeZeroFloat32(src, dest, fail);
+
+ ScratchFloat32Scope scratch(asMasm());
+ vcvttss2si(src, dest);
+ convertInt32ToFloat32(dest, scratch);
+ vucomiss(scratch, src);
+ j(Assembler::Parity, fail);
+ j(Assembler::NotEqual, fail);
+ }
+
+ inline void clampIntToUint8(Register reg);
+
+ bool maybeInlineDouble(wasm::RawF64 d, FloatRegister dest) {
+ // Loading zero with xor is specially optimized in hardware.
+ if (d.bits() == 0) {
+ zeroDouble(dest);
+ return true;
+ }
+
+ // It is also possible to load several common constants using vpcmpeqw
+ // to get all ones and then vpsllq and vpsrlq to get zeros at the ends,
+ // as described in "13.4 Generating constants" of
+ // "2. Optimizing subroutines in assembly language" by Agner Fog, and as
+ // previously implemented here. However, with x86 and x64 both using
+ // constant pool loads for double constants, this is probably only
+ // worthwhile in cases where a load is likely to be delayed.
+
+ return false;
+ }
+
+ bool maybeInlineFloat(wasm::RawF32 f, FloatRegister dest) {
+ // See comment above
+ if (f.bits() == 0) {
+ zeroFloat32(dest);
+ return true;
+ }
+ return false;
+ }
+
+ bool maybeInlineSimd128Int(const SimdConstant& v, const FloatRegister& dest) {
+ static const SimdConstant zero = SimdConstant::SplatX4(0);
+ static const SimdConstant minusOne = SimdConstant::SplatX4(-1);
+ if (v == zero) {
+ zeroSimd128Int(dest);
+ return true;
+ }
+ if (v == minusOne) {
+ vpcmpeqw(Operand(dest), dest, dest);
+ return true;
+ }
+ return false;
+ }
+ bool maybeInlineSimd128Float(const SimdConstant& v, const FloatRegister& dest) {
+ static const SimdConstant zero = SimdConstant::SplatX4(0.f);
+ if (v == zero) {
+ // This won't get inlined if the SimdConstant v contains -0 in any
+ // lane, as operator== here does a memcmp.
+ zeroSimd128Float(dest);
+ return true;
+ }
+ return false;
+ }
+
+ void convertBoolToInt32(Register source, Register dest) {
+ // Note that C++ bool is only 1 byte, so zero extend it to clear the
+ // higher-order bits.
+ movzbl(source, dest);
+ }
+
+ void emitSet(Assembler::Condition cond, Register dest,
+ Assembler::NaNCond ifNaN = Assembler::NaN_HandledByCond) {
+ if (AllocatableGeneralRegisterSet(Registers::SingleByteRegs).has(dest)) {
+ // If the register we're defining is a single byte register,
+ // take advantage of the setCC instruction
+ setCC(cond, dest);
+ movzbl(dest, dest);
+
+ if (ifNaN != Assembler::NaN_HandledByCond) {
+ Label noNaN;
+ j(Assembler::NoParity, &noNaN);
+ mov(ImmWord(ifNaN == Assembler::NaN_IsTrue), dest);
+ bind(&noNaN);
+ }
+ } else {
+ Label end;
+ Label ifFalse;
+
+ if (ifNaN == Assembler::NaN_IsFalse)
+ j(Assembler::Parity, &ifFalse);
+ // Note a subtlety here: FLAGS is live at this point, and the
+ // mov interface doesn't guarantee to preserve FLAGS. Use
+ // movl instead of mov, because the movl instruction
+ // preserves FLAGS.
+ movl(Imm32(1), dest);
+ j(cond, &end);
+ if (ifNaN == Assembler::NaN_IsTrue)
+ j(Assembler::Parity, &end);
+ bind(&ifFalse);
+ mov(ImmWord(0), dest);
+
+ bind(&end);
+ }
+ }
+
+ // Emit a JMP that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp().
+ CodeOffset toggledJump(Label* label) {
+ CodeOffset offset(size());
+ jump(label);
+ return offset;
+ }
+
+ template <typename T>
+ void computeEffectiveAddress(const T& address, Register dest) {
+ lea(Operand(address), dest);
+ }
+
+ void checkStackAlignment() {
+ // Exists for ARM compatibility.
+ }
+
+ CodeOffset labelForPatch() {
+ return CodeOffset(size());
+ }
+
+ void abiret() {
+ ret();
+ }
+
+ template<typename T>
+ void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register oldval, Register newval,
+ Register temp, AnyRegister output);
+
+ template<typename T>
+ void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value,
+ Register temp, AnyRegister output);
+
+ protected:
+ bool buildOOLFakeExitFrame(void* fakeReturnAddr);
+};
+
+// Specialize for float to use movaps. Use movdqa for everything else.
+template <>
+inline void
+MacroAssemblerX86Shared::loadAlignedVector<float>(const Address& src, FloatRegister dest)
+{
+ loadAlignedSimd128Float(src, dest);
+}
+
+template <typename T>
+inline void
+MacroAssemblerX86Shared::loadAlignedVector(const Address& src, FloatRegister dest)
+{
+ loadAlignedSimd128Int(src, dest);
+}
+
+// Specialize for float to use movaps. Use movdqa for everything else.
+template <>
+inline void
+MacroAssemblerX86Shared::storeAlignedVector<float>(FloatRegister src, const Address& dest)
+{
+ storeAlignedSimd128Float(src, dest);
+}
+
+template <typename T>
+inline void
+MacroAssemblerX86Shared::storeAlignedVector(FloatRegister src, const Address& dest)
+{
+ storeAlignedSimd128Int(src, dest);
+}
+
+template <> inline void
+MacroAssemblerX86Shared::loadScalar<int8_t>(const Operand& src, Register dest) {
+ load8ZeroExtend(src, dest);
+}
+template <> inline void
+MacroAssemblerX86Shared::loadScalar<int16_t>(const Operand& src, Register dest) {
+ load16ZeroExtend(src, dest);
+}
+template <> inline void
+MacroAssemblerX86Shared::loadScalar<int32_t>(const Operand& src, Register dest) {
+ load32(src, dest);
+}
+template <> inline void
+MacroAssemblerX86Shared::loadScalar<float>(const Operand& src, FloatRegister dest) {
+ loadFloat32(src, dest);
+}
+
+template <> inline void
+MacroAssemblerX86Shared::storeScalar<int8_t>(Register src, const Address& dest) {
+ store8(src, dest);
+}
+template <> inline void
+MacroAssemblerX86Shared::storeScalar<int16_t>(Register src, const Address& dest) {
+ store16(src, dest);
+}
+template <> inline void
+MacroAssemblerX86Shared::storeScalar<int32_t>(Register src, const Address& dest) {
+ store32(src, dest);
+}
+template <> inline void
+MacroAssemblerX86Shared::storeScalar<float>(FloatRegister src, const Address& dest) {
+ vmovss(src, dest);
+}
+
+} // namespace jit
+} // namespace js
+
+#undef CHECK_BYTEREG
+#undef CHECK_BYTEREGS
+
+#endif /* jit_x86_shared_MacroAssembler_x86_shared_h */
diff --git a/js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp b/js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp
new file mode 100644
index 000000000..1ca4a1e1c
--- /dev/null
+++ b/js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp
@@ -0,0 +1,581 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86-shared/MoveEmitter-x86-shared.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::Maybe;
+
+MoveEmitterX86::MoveEmitterX86(MacroAssembler& masm)
+ : inCycle_(false),
+ masm(masm),
+ pushedAtCycle_(-1)
+{
+ pushedAtStart_ = masm.framePushed();
+}
+
+// Examine the cycle in moves starting at position i. Determine if it's a
+// simple cycle consisting of all register-to-register moves in a single class,
+// and whether it can be implemented entirely by swaps.
+size_t
+MoveEmitterX86::characterizeCycle(const MoveResolver& moves, size_t i,
+ bool* allGeneralRegs, bool* allFloatRegs)
+{
+ size_t swapCount = 0;
+
+ for (size_t j = i; ; j++) {
+ const MoveOp& move = moves.getMove(j);
+
+ // If it isn't a cycle of registers of the same kind, we won't be able
+ // to optimize it.
+ if (!move.to().isGeneralReg())
+ *allGeneralRegs = false;
+ if (!move.to().isFloatReg())
+ *allFloatRegs = false;
+ if (!*allGeneralRegs && !*allFloatRegs)
+ return -1;
+
+ // Stop iterating when we see the last one.
+ if (j != i && move.isCycleEnd())
+ break;
+
+ // Check that this move is actually part of the cycle. This is
+ // over-conservative when there are multiple reads from the same source,
+ // but that's expected to be rare.
+ if (move.from() != moves.getMove(j + 1).to()) {
+ *allGeneralRegs = false;
+ *allFloatRegs = false;
+ return -1;
+ }
+
+ swapCount++;
+ }
+
+ // Check that the last move cycles back to the first move.
+ const MoveOp& move = moves.getMove(i + swapCount);
+ if (move.from() != moves.getMove(i).to()) {
+ *allGeneralRegs = false;
+ *allFloatRegs = false;
+ return -1;
+ }
+
+ return swapCount;
+}
+
+// If we can emit optimized code for the cycle in moves starting at position i,
+// do so, and return true.
+bool
+MoveEmitterX86::maybeEmitOptimizedCycle(const MoveResolver& moves, size_t i,
+ bool allGeneralRegs, bool allFloatRegs, size_t swapCount)
+{
+ if (allGeneralRegs && swapCount <= 2) {
+ // Use x86's swap-integer-registers instruction if we only have a few
+ // swaps. (x86 also has a swap between registers and memory but it's
+ // slow.)
+ for (size_t k = 0; k < swapCount; k++)
+ masm.xchg(moves.getMove(i + k).to().reg(), moves.getMove(i + k + 1).to().reg());
+ return true;
+ }
+
+ if (allFloatRegs && swapCount == 1) {
+ // There's no xchg for xmm registers, but if we only need a single swap,
+ // it's cheap to do an XOR swap.
+ FloatRegister a = moves.getMove(i).to().floatReg();
+ FloatRegister b = moves.getMove(i + 1).to().floatReg();
+ masm.vxorpd(a, b, b);
+ masm.vxorpd(b, a, a);
+ masm.vxorpd(a, b, b);
+ return true;
+ }
+
+ return false;
+}
+
+void
+MoveEmitterX86::emit(const MoveResolver& moves)
+{
+#if defined(JS_CODEGEN_X86) && defined(DEBUG)
+ // Clobber any scratch register we have, to make regalloc bugs more visible.
+ if (scratchRegister_.isSome())
+ masm.mov(ImmWord(0xdeadbeef), scratchRegister_.value());
+#endif
+
+ for (size_t i = 0; i < moves.numMoves(); i++) {
+#if defined(JS_CODEGEN_X86) && defined(DEBUG)
+ if (!scratchRegister_.isSome()) {
+ Maybe<Register> reg = findScratchRegister(moves, i);
+ if (reg.isSome())
+ masm.mov(ImmWord(0xdeadbeef), reg.value());
+ }
+#endif
+
+ const MoveOp& move = moves.getMove(i);
+ const MoveOperand& from = move.from();
+ const MoveOperand& to = move.to();
+
+ if (move.isCycleEnd()) {
+ MOZ_ASSERT(inCycle_);
+ completeCycle(to, move.type());
+ inCycle_ = false;
+ continue;
+ }
+
+ if (move.isCycleBegin()) {
+ MOZ_ASSERT(!inCycle_);
+
+ // Characterize the cycle.
+ bool allGeneralRegs = true, allFloatRegs = true;
+ size_t swapCount = characterizeCycle(moves, i, &allGeneralRegs, &allFloatRegs);
+
+ // Attempt to optimize it to avoid using the stack.
+ if (maybeEmitOptimizedCycle(moves, i, allGeneralRegs, allFloatRegs, swapCount)) {
+ i += swapCount;
+ continue;
+ }
+
+ // Otherwise use the stack.
+ breakCycle(to, move.endCycleType());
+ inCycle_ = true;
+ }
+
+ // A normal move which is not part of a cycle.
+ switch (move.type()) {
+ case MoveOp::FLOAT32:
+ emitFloat32Move(from, to);
+ break;
+ case MoveOp::DOUBLE:
+ emitDoubleMove(from, to);
+ break;
+ case MoveOp::INT32:
+ emitInt32Move(from, to, moves, i);
+ break;
+ case MoveOp::GENERAL:
+ emitGeneralMove(from, to, moves, i);
+ break;
+ case MoveOp::SIMD128INT:
+ emitSimd128IntMove(from, to);
+ break;
+ case MoveOp::SIMD128FLOAT:
+ emitSimd128FloatMove(from, to);
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+ }
+}
+
+MoveEmitterX86::~MoveEmitterX86()
+{
+ assertDone();
+}
+
+Address
+MoveEmitterX86::cycleSlot()
+{
+ if (pushedAtCycle_ == -1) {
+ // Reserve stack for cycle resolution
+ masm.reserveStack(Simd128DataSize);
+ pushedAtCycle_ = masm.framePushed();
+ }
+
+ return Address(StackPointer, masm.framePushed() - pushedAtCycle_);
+}
+
+Address
+MoveEmitterX86::toAddress(const MoveOperand& operand) const
+{
+ if (operand.base() != StackPointer)
+ return Address(operand.base(), operand.disp());
+
+ MOZ_ASSERT(operand.disp() >= 0);
+
+ // Otherwise, the stack offset may need to be adjusted.
+ return Address(StackPointer, operand.disp() + (masm.framePushed() - pushedAtStart_));
+}
+
+// Warning, do not use the resulting operand with pop instructions, since they
+// compute the effective destination address after altering the stack pointer.
+// Use toPopOperand if an Operand is needed for a pop.
+Operand
+MoveEmitterX86::toOperand(const MoveOperand& operand) const
+{
+ if (operand.isMemoryOrEffectiveAddress())
+ return Operand(toAddress(operand));
+ if (operand.isGeneralReg())
+ return Operand(operand.reg());
+
+ MOZ_ASSERT(operand.isFloatReg());
+ return Operand(operand.floatReg());
+}
+
+// This is the same as toOperand except that it computes an Operand suitable for
+// use in a pop.
+Operand
+MoveEmitterX86::toPopOperand(const MoveOperand& operand) const
+{
+ if (operand.isMemory()) {
+ if (operand.base() != StackPointer)
+ return Operand(operand.base(), operand.disp());
+
+ MOZ_ASSERT(operand.disp() >= 0);
+
+ // Otherwise, the stack offset may need to be adjusted.
+ // Note the adjustment by the stack slot here, to offset for the fact that pop
+ // computes its effective address after incrementing the stack pointer.
+ return Operand(StackPointer,
+ operand.disp() + (masm.framePushed() - sizeof(void*) - pushedAtStart_));
+ }
+ if (operand.isGeneralReg())
+ return Operand(operand.reg());
+
+ MOZ_ASSERT(operand.isFloatReg());
+ return Operand(operand.floatReg());
+}
+
+void
+MoveEmitterX86::breakCycle(const MoveOperand& to, MoveOp::Type type)
+{
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (A -> B), which we reach first. We save B, then allow
+ // the original move to continue.
+ switch (type) {
+ case MoveOp::SIMD128INT:
+ if (to.isMemory()) {
+ ScratchSimd128Scope scratch(masm);
+ masm.loadAlignedSimd128Int(toAddress(to), scratch);
+ masm.storeAlignedSimd128Int(scratch, cycleSlot());
+ } else {
+ masm.storeAlignedSimd128Int(to.floatReg(), cycleSlot());
+ }
+ break;
+ case MoveOp::SIMD128FLOAT:
+ if (to.isMemory()) {
+ ScratchSimd128Scope scratch(masm);
+ masm.loadAlignedSimd128Float(toAddress(to), scratch);
+ masm.storeAlignedSimd128Float(scratch, cycleSlot());
+ } else {
+ masm.storeAlignedSimd128Float(to.floatReg(), cycleSlot());
+ }
+ break;
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ ScratchFloat32Scope scratch(masm);
+ masm.loadFloat32(toAddress(to), scratch);
+ masm.storeFloat32(scratch, cycleSlot());
+ } else {
+ masm.storeFloat32(to.floatReg(), cycleSlot());
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ ScratchDoubleScope scratch(masm);
+ masm.loadDouble(toAddress(to), scratch);
+ masm.storeDouble(scratch, cycleSlot());
+ } else {
+ masm.storeDouble(to.floatReg(), cycleSlot());
+ }
+ break;
+ case MoveOp::INT32:
+#ifdef JS_CODEGEN_X64
+ // x64 can't pop to a 32-bit destination, so don't push.
+ if (to.isMemory()) {
+ masm.load32(toAddress(to), ScratchReg);
+ masm.store32(ScratchReg, cycleSlot());
+ } else {
+ masm.store32(to.reg(), cycleSlot());
+ }
+ break;
+#endif
+ case MoveOp::GENERAL:
+ masm.Push(toOperand(to));
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void
+MoveEmitterX86::completeCycle(const MoveOperand& to, MoveOp::Type type)
+{
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (B -> A), which we reach last. We emit a move from the
+ // saved value of B, to A.
+ switch (type) {
+ case MoveOp::SIMD128INT:
+ MOZ_ASSERT(pushedAtCycle_ != -1);
+ MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= Simd128DataSize);
+ if (to.isMemory()) {
+ ScratchSimd128Scope scratch(masm);
+ masm.loadAlignedSimd128Int(cycleSlot(), scratch);
+ masm.storeAlignedSimd128Int(scratch, toAddress(to));
+ } else {
+ masm.loadAlignedSimd128Int(cycleSlot(), to.floatReg());
+ }
+ break;
+ case MoveOp::SIMD128FLOAT:
+ MOZ_ASSERT(pushedAtCycle_ != -1);
+ MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= Simd128DataSize);
+ if (to.isMemory()) {
+ ScratchSimd128Scope scratch(masm);
+ masm.loadAlignedSimd128Float(cycleSlot(), scratch);
+ masm.storeAlignedSimd128Float(scratch, toAddress(to));
+ } else {
+ masm.loadAlignedSimd128Float(cycleSlot(), to.floatReg());
+ }
+ break;
+ case MoveOp::FLOAT32:
+ MOZ_ASSERT(pushedAtCycle_ != -1);
+ MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(float));
+ if (to.isMemory()) {
+ ScratchFloat32Scope scratch(masm);
+ masm.loadFloat32(cycleSlot(), scratch);
+ masm.storeFloat32(scratch, toAddress(to));
+ } else {
+ masm.loadFloat32(cycleSlot(), to.floatReg());
+ }
+ break;
+ case MoveOp::DOUBLE:
+ MOZ_ASSERT(pushedAtCycle_ != -1);
+ MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(double));
+ if (to.isMemory()) {
+ ScratchDoubleScope scratch(masm);
+ masm.loadDouble(cycleSlot(), scratch);
+ masm.storeDouble(scratch, toAddress(to));
+ } else {
+ masm.loadDouble(cycleSlot(), to.floatReg());
+ }
+ break;
+ case MoveOp::INT32:
+#ifdef JS_CODEGEN_X64
+ MOZ_ASSERT(pushedAtCycle_ != -1);
+ MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(int32_t));
+ // x64 can't pop to a 32-bit destination.
+ if (to.isMemory()) {
+ masm.load32(cycleSlot(), ScratchReg);
+ masm.store32(ScratchReg, toAddress(to));
+ } else {
+ masm.load32(cycleSlot(), to.reg());
+ }
+ break;
+#endif
+ case MoveOp::GENERAL:
+ MOZ_ASSERT(masm.framePushed() - pushedAtStart_ >= sizeof(intptr_t));
+ masm.Pop(toPopOperand(to));
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void
+MoveEmitterX86::emitInt32Move(const MoveOperand& from, const MoveOperand& to,
+ const MoveResolver& moves, size_t i)
+{
+ if (from.isGeneralReg()) {
+ masm.move32(from.reg(), toOperand(to));
+ } else if (to.isGeneralReg()) {
+ MOZ_ASSERT(from.isMemory());
+ masm.load32(toAddress(from), to.reg());
+ } else {
+ // Memory to memory gpr move.
+ MOZ_ASSERT(from.isMemory());
+ Maybe<Register> reg = findScratchRegister(moves, i);
+ if (reg.isSome()) {
+ masm.load32(toAddress(from), reg.value());
+ masm.move32(reg.value(), toOperand(to));
+ } else {
+ // No scratch register available; bounce it off the stack.
+ masm.Push(toOperand(from));
+ masm.Pop(toPopOperand(to));
+ }
+ }
+}
+
+void
+MoveEmitterX86::emitGeneralMove(const MoveOperand& from, const MoveOperand& to,
+ const MoveResolver& moves, size_t i)
+{
+ if (from.isGeneralReg()) {
+ masm.mov(from.reg(), toOperand(to));
+ } else if (to.isGeneralReg()) {
+ MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
+ if (from.isMemory())
+ masm.loadPtr(toAddress(from), to.reg());
+ else
+ masm.lea(toOperand(from), to.reg());
+ } else if (from.isMemory()) {
+ // Memory to memory gpr move.
+ Maybe<Register> reg = findScratchRegister(moves, i);
+ if (reg.isSome()) {
+ masm.loadPtr(toAddress(from), reg.value());
+ masm.mov(reg.value(), toOperand(to));
+ } else {
+ // No scratch register available; bounce it off the stack.
+ masm.Push(toOperand(from));
+ masm.Pop(toPopOperand(to));
+ }
+ } else {
+ // Effective address to memory move.
+ MOZ_ASSERT(from.isEffectiveAddress());
+ Maybe<Register> reg = findScratchRegister(moves, i);
+ if (reg.isSome()) {
+ masm.lea(toOperand(from), reg.value());
+ masm.mov(reg.value(), toOperand(to));
+ } else {
+ // This is tricky without a scratch reg. We can't do an lea. Bounce the
+ // base register off the stack, then add the offset in place. Note that
+ // this clobbers FLAGS!
+ masm.Push(from.base());
+ masm.Pop(toPopOperand(to));
+ MOZ_ASSERT(to.isMemoryOrEffectiveAddress());
+ masm.addPtr(Imm32(from.disp()), toAddress(to));
+ }
+ }
+}
+
+void
+MoveEmitterX86::emitFloat32Move(const MoveOperand& from, const MoveOperand& to)
+{
+ MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isSingle());
+ MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isSingle());
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg())
+ masm.moveFloat32(from.floatReg(), to.floatReg());
+ else
+ masm.storeFloat32(from.floatReg(), toAddress(to));
+ } else if (to.isFloatReg()) {
+ masm.loadFloat32(toAddress(from), to.floatReg());
+ } else {
+ // Memory to memory move.
+ MOZ_ASSERT(from.isMemory());
+ ScratchFloat32Scope scratch(masm);
+ masm.loadFloat32(toAddress(from), scratch);
+ masm.storeFloat32(scratch, toAddress(to));
+ }
+}
+
+void
+MoveEmitterX86::emitDoubleMove(const MoveOperand& from, const MoveOperand& to)
+{
+ MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isDouble());
+ MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isDouble());
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg())
+ masm.moveDouble(from.floatReg(), to.floatReg());
+ else
+ masm.storeDouble(from.floatReg(), toAddress(to));
+ } else if (to.isFloatReg()) {
+ masm.loadDouble(toAddress(from), to.floatReg());
+ } else {
+ // Memory to memory move.
+ MOZ_ASSERT(from.isMemory());
+ ScratchDoubleScope scratch(masm);
+ masm.loadDouble(toAddress(from), scratch);
+ masm.storeDouble(scratch, toAddress(to));
+ }
+}
+
+void
+MoveEmitterX86::emitSimd128IntMove(const MoveOperand& from, const MoveOperand& to)
+{
+ MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isSimd128());
+ MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isSimd128());
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg())
+ masm.moveSimd128Int(from.floatReg(), to.floatReg());
+ else
+ masm.storeAlignedSimd128Int(from.floatReg(), toAddress(to));
+ } else if (to.isFloatReg()) {
+ masm.loadAlignedSimd128Int(toAddress(from), to.floatReg());
+ } else {
+ // Memory to memory move.
+ MOZ_ASSERT(from.isMemory());
+ ScratchSimd128Scope scratch(masm);
+ masm.loadAlignedSimd128Int(toAddress(from), scratch);
+ masm.storeAlignedSimd128Int(scratch, toAddress(to));
+ }
+}
+
+void
+MoveEmitterX86::emitSimd128FloatMove(const MoveOperand& from, const MoveOperand& to)
+{
+ MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isSimd128());
+ MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isSimd128());
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg())
+ masm.moveSimd128Float(from.floatReg(), to.floatReg());
+ else
+ masm.storeAlignedSimd128Float(from.floatReg(), toAddress(to));
+ } else if (to.isFloatReg()) {
+ masm.loadAlignedSimd128Float(toAddress(from), to.floatReg());
+ } else {
+ // Memory to memory move.
+ MOZ_ASSERT(from.isMemory());
+ ScratchSimd128Scope scratch(masm);
+ masm.loadAlignedSimd128Float(toAddress(from), scratch);
+ masm.storeAlignedSimd128Float(scratch, toAddress(to));
+ }
+}
+
+void
+MoveEmitterX86::assertDone()
+{
+ MOZ_ASSERT(!inCycle_);
+}
+
+void
+MoveEmitterX86::finish()
+{
+ assertDone();
+
+ masm.freeStack(masm.framePushed() - pushedAtStart_);
+}
+
+Maybe<Register>
+MoveEmitterX86::findScratchRegister(const MoveResolver& moves, size_t initial)
+{
+#ifdef JS_CODEGEN_X86
+ if (scratchRegister_.isSome())
+ return scratchRegister_;
+
+ // All registers are either in use by this move group or are live
+ // afterwards. Look through the remaining moves for a register which is
+ // clobbered before it is used, and is thus dead at this point.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ for (size_t i = initial; i < moves.numMoves(); i++) {
+ const MoveOp& move = moves.getMove(i);
+ if (move.from().isGeneralReg())
+ regs.takeUnchecked(move.from().reg());
+ else if (move.from().isMemoryOrEffectiveAddress())
+ regs.takeUnchecked(move.from().base());
+ if (move.to().isGeneralReg()) {
+ if (i != initial && !move.isCycleBegin() && regs.has(move.to().reg()))
+ return mozilla::Some(move.to().reg());
+ regs.takeUnchecked(move.to().reg());
+ } else if (move.to().isMemoryOrEffectiveAddress()) {
+ regs.takeUnchecked(move.to().base());
+ }
+ }
+
+ return mozilla::Nothing();
+#else
+ return mozilla::Some(ScratchReg);
+#endif
+}
diff --git a/js/src/jit/x86-shared/MoveEmitter-x86-shared.h b/js/src/jit/x86-shared/MoveEmitter-x86-shared.h
new file mode 100644
index 000000000..6602206f2
--- /dev/null
+++ b/js/src/jit/x86-shared/MoveEmitter-x86-shared.h
@@ -0,0 +1,74 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_MoveEmitter_x86_shared_h
+#define jit_MoveEmitter_x86_shared_h
+
+#include "jit/MacroAssembler.h"
+#include "jit/MoveResolver.h"
+
+namespace js {
+namespace jit {
+
+class MoveEmitterX86
+{
+ bool inCycle_;
+ MacroAssembler& masm;
+
+ // Original stack push value.
+ uint32_t pushedAtStart_;
+
+ // This is a store stack offset for the cycle-break spill slot, snapshotting
+ // codegen->framePushed_ at the time it is allocated. -1 if not allocated.
+ int32_t pushedAtCycle_;
+
+#ifdef JS_CODEGEN_X86
+ // Optional scratch register for performing moves.
+ mozilla::Maybe<Register> scratchRegister_;
+#endif
+
+ void assertDone();
+ Address cycleSlot();
+ Address toAddress(const MoveOperand& operand) const;
+ Operand toOperand(const MoveOperand& operand) const;
+ Operand toPopOperand(const MoveOperand& operand) const;
+
+ size_t characterizeCycle(const MoveResolver& moves, size_t i,
+ bool* allGeneralRegs, bool* allFloatRegs);
+ bool maybeEmitOptimizedCycle(const MoveResolver& moves, size_t i,
+ bool allGeneralRegs, bool allFloatRegs, size_t swapCount);
+ void emitInt32Move(const MoveOperand& from, const MoveOperand& to,
+ const MoveResolver& moves, size_t i);
+ void emitGeneralMove(const MoveOperand& from, const MoveOperand& to,
+ const MoveResolver& moves, size_t i);
+ void emitFloat32Move(const MoveOperand& from, const MoveOperand& to);
+ void emitDoubleMove(const MoveOperand& from, const MoveOperand& to);
+ void emitSimd128FloatMove(const MoveOperand& from, const MoveOperand& to);
+ void emitSimd128IntMove(const MoveOperand& from, const MoveOperand& to);
+ void breakCycle(const MoveOperand& to, MoveOp::Type type);
+ void completeCycle(const MoveOperand& to, MoveOp::Type type);
+
+ public:
+ explicit MoveEmitterX86(MacroAssembler& masm);
+ ~MoveEmitterX86();
+ void emit(const MoveResolver& moves);
+ void finish();
+
+ void setScratchRegister(Register reg) {
+#ifdef JS_CODEGEN_X86
+ scratchRegister_.emplace(reg);
+#endif
+ }
+
+ mozilla::Maybe<Register> findScratchRegister(const MoveResolver& moves, size_t i);
+};
+
+typedef MoveEmitterX86 MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_MoveEmitter_x86_shared_h */
diff --git a/js/src/jit/x86-shared/Patching-x86-shared.h b/js/src/jit/x86-shared/Patching-x86-shared.h
new file mode 100644
index 000000000..b73492870
--- /dev/null
+++ b/js/src/jit/x86-shared/Patching-x86-shared.h
@@ -0,0 +1,124 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_Patching_x86_shared_h
+#define jit_x86_shared_Patching_x86_shared_h
+
+namespace js {
+namespace jit {
+
+namespace X86Encoding {
+
+inline void*
+GetPointer(const void* where)
+{
+ void* res;
+ memcpy(&res, (const char*)where - sizeof(void*), sizeof(void*));
+ return res;
+}
+
+inline void
+SetPointer(void* where, const void* value)
+{
+ memcpy((char*)where - sizeof(void*), &value, sizeof(void*));
+}
+
+inline int32_t
+GetInt32(const void* where)
+{
+ int32_t res;
+ memcpy(&res, (const char*)where - sizeof(int32_t), sizeof(int32_t));
+ return res;
+}
+
+inline void
+SetInt32(void* where, int32_t value)
+{
+ memcpy((char*)where - sizeof(int32_t), &value, sizeof(int32_t));
+}
+
+inline void
+SetRel32(void* from, void* to)
+{
+ intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
+ MOZ_ASSERT(offset == static_cast<int32_t>(offset),
+ "offset is too great for a 32-bit relocation");
+ if (offset != static_cast<int32_t>(offset))
+ MOZ_CRASH("offset is too great for a 32-bit relocation");
+
+ SetInt32(from, offset);
+}
+
+inline void*
+GetRel32Target(void* where)
+{
+ int32_t rel = GetInt32(where);
+ return (char*)where + rel;
+}
+
+class JmpSrc {
+ public:
+ JmpSrc()
+ : offset_(-1)
+ {
+ }
+
+ explicit JmpSrc(int32_t offset)
+ : offset_(offset)
+ {
+ }
+
+ int32_t offset() const {
+ return offset_;
+ }
+
+ bool isSet() const {
+ return offset_ != -1;
+ }
+
+ private:
+ int offset_;
+};
+
+class JmpDst {
+ public:
+ JmpDst()
+ : offset_(-1)
+ , used_(false)
+ {
+ }
+
+ bool isUsed() const { return used_; }
+ void used() { used_ = true; }
+ bool isValid() const { return offset_ != -1; }
+
+ explicit JmpDst(int32_t offset)
+ : offset_(offset)
+ , used_(false)
+ {
+ MOZ_ASSERT(offset_ == offset);
+ }
+ int32_t offset() const {
+ return offset_;
+ }
+ private:
+ int32_t offset_ : 31;
+ bool used_ : 1;
+};
+
+inline bool
+CanRelinkJump(void* from, void* to)
+{
+ intptr_t offset = static_cast<char*>(to) - static_cast<char*>(from);
+ return (offset == static_cast<int32_t>(offset));
+}
+
+} // namespace X86Encoding
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_Patching_x86_shared_h */
diff --git a/js/src/jit/x86/Assembler-x86.cpp b/js/src/jit/x86/Assembler-x86.cpp
new file mode 100644
index 000000000..7fca29434
--- /dev/null
+++ b/js/src/jit/x86/Assembler-x86.cpp
@@ -0,0 +1,106 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86/Assembler-x86.h"
+
+#include "gc/Marking.h"
+
+using namespace js;
+using namespace js::jit;
+
+ABIArgGenerator::ABIArgGenerator()
+ : stackOffset_(0),
+ current_()
+{}
+
+ABIArg
+ABIArgGenerator::next(MIRType type)
+{
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Float32:
+ case MIRType::Pointer:
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint32_t);
+ break;
+ case MIRType::Double:
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ break;
+ case MIRType::Int64:
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ break;
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Float32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ // SIMD values aren't passed in or out of C++, so we can make up
+ // whatever internal ABI we like. visitWasmStackArg assumes
+ // SimdMemoryAlignment.
+ stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += Simd128DataSize;
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+}
+
+void
+Assembler::executableCopy(uint8_t* buffer)
+{
+ AssemblerX86Shared::executableCopy(buffer);
+
+ for (size_t i = 0; i < jumps_.length(); i++) {
+ RelativePatch& rp = jumps_[i];
+ X86Encoding::SetRel32(buffer + rp.offset, rp.target);
+ }
+}
+
+class RelocationIterator
+{
+ CompactBufferReader reader_;
+ uint32_t offset_;
+
+ public:
+ explicit RelocationIterator(CompactBufferReader& reader)
+ : reader_(reader)
+ { }
+
+ bool read() {
+ if (!reader_.more())
+ return false;
+ offset_ = reader_.readUnsigned();
+ return true;
+ }
+
+ uint32_t offset() const {
+ return offset_;
+ }
+};
+
+static inline JitCode*
+CodeFromJump(uint8_t* jump)
+{
+ uint8_t* target = (uint8_t*)X86Encoding::GetRel32Target(jump);
+ return JitCode::FromExecutable(target);
+}
+
+void
+Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
+{
+ RelocationIterator iter(reader);
+ while (iter.read()) {
+ JitCode* child = CodeFromJump(code->raw() + iter.offset());
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ MOZ_ASSERT(child == CodeFromJump(code->raw() + iter.offset()));
+ }
+}
diff --git a/js/src/jit/x86/Assembler-x86.h b/js/src/jit/x86/Assembler-x86.h
new file mode 100644
index 000000000..3fb5efaff
--- /dev/null
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -0,0 +1,991 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_Assembler_x86_h
+#define jit_x86_Assembler_x86_h
+
+#include "mozilla/ArrayUtils.h"
+
+#include "jit/CompactBuffer.h"
+#include "jit/IonCode.h"
+#include "jit/JitCompartment.h"
+#include "jit/shared/Assembler-shared.h"
+#include "jit/x86-shared/Constants-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register eax = { X86Encoding::rax };
+static constexpr Register ecx = { X86Encoding::rcx };
+static constexpr Register edx = { X86Encoding::rdx };
+static constexpr Register ebx = { X86Encoding::rbx };
+static constexpr Register esp = { X86Encoding::rsp };
+static constexpr Register ebp = { X86Encoding::rbp };
+static constexpr Register esi = { X86Encoding::rsi };
+static constexpr Register edi = { X86Encoding::rdi };
+
+static constexpr FloatRegister xmm0 = FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
+static constexpr FloatRegister xmm1 = FloatRegister(X86Encoding::xmm1, FloatRegisters::Double);
+static constexpr FloatRegister xmm2 = FloatRegister(X86Encoding::xmm2, FloatRegisters::Double);
+static constexpr FloatRegister xmm3 = FloatRegister(X86Encoding::xmm3, FloatRegisters::Double);
+static constexpr FloatRegister xmm4 = FloatRegister(X86Encoding::xmm4, FloatRegisters::Double);
+static constexpr FloatRegister xmm5 = FloatRegister(X86Encoding::xmm5, FloatRegisters::Double);
+static constexpr FloatRegister xmm6 = FloatRegister(X86Encoding::xmm6, FloatRegisters::Double);
+static constexpr FloatRegister xmm7 = FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
+
+static constexpr Register InvalidReg = { X86Encoding::invalid_reg };
+static constexpr FloatRegister InvalidFloatReg = FloatRegister();
+
+static constexpr Register JSReturnReg_Type = ecx;
+static constexpr Register JSReturnReg_Data = edx;
+static constexpr Register StackPointer = esp;
+static constexpr Register FramePointer = ebp;
+static constexpr Register ReturnReg = eax;
+static constexpr Register64 ReturnReg64(edi, eax);
+static constexpr FloatRegister ReturnFloat32Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Single);
+static constexpr FloatRegister ReturnDoubleReg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
+static constexpr FloatRegister ReturnSimd128Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
+static constexpr FloatRegister ScratchFloat32Reg = FloatRegister(X86Encoding::xmm7, FloatRegisters::Single);
+static constexpr FloatRegister ScratchDoubleReg = FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
+static constexpr FloatRegister ScratchSimd128Reg = FloatRegister(X86Encoding::xmm7, FloatRegisters::Simd128);
+
+// Avoid ebp, which is the FramePointer, which is unavailable in some modes.
+static constexpr Register ArgumentsRectifierReg = esi;
+static constexpr Register CallTempReg0 = edi;
+static constexpr Register CallTempReg1 = eax;
+static constexpr Register CallTempReg2 = ebx;
+static constexpr Register CallTempReg3 = ecx;
+static constexpr Register CallTempReg4 = esi;
+static constexpr Register CallTempReg5 = edx;
+
+// We have no arg regs, so our NonArgRegs are just our CallTempReg*
+// Use "const" instead of constexpr here to work around a bug
+// of VS2015 Update 1. See bug 1229604.
+static const Register CallTempNonArgRegs[] = { edi, eax, ebx, ecx, esi, edx };
+static const uint32_t NumCallTempNonArgRegs =
+ mozilla::ArrayLength(CallTempNonArgRegs);
+
+class ABIArgGenerator
+{
+ uint32_t stackOffset_;
+ ABIArg current_;
+
+ public:
+ ABIArgGenerator();
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+ uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
+
+};
+
+static constexpr Register ABINonArgReg0 = eax;
+static constexpr Register ABINonArgReg1 = ebx;
+static constexpr Register ABINonArgReg2 = ecx;
+
+// Note: these three registers are all guaranteed to be different
+static constexpr Register ABINonArgReturnReg0 = ecx;
+static constexpr Register ABINonArgReturnReg1 = edx;
+static constexpr Register ABINonVolatileReg = ebx;
+
+// TLS pointer argument register for WebAssembly functions. This must not alias
+// any other register used for passing function arguments or return values.
+// Preserved by WebAssembly functions.
+static constexpr Register WasmTlsReg = esi;
+
+// Registers used for asm.js/wasm table calls. These registers must be disjoint
+// from the ABI argument registers, WasmTlsReg and each other.
+static constexpr Register WasmTableCallScratchReg = ABINonArgReg0;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg1;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg2;
+
+static constexpr Register OsrFrameReg = edx;
+static constexpr Register PreBarrierReg = edx;
+
+// Registers used in the GenerateFFIIonExit Enable Activation block.
+static constexpr Register WasmIonExitRegCallee = ecx;
+static constexpr Register WasmIonExitRegE0 = edi;
+static constexpr Register WasmIonExitRegE1 = eax;
+
+// Registers used in the GenerateFFIIonExit Disable Activation block.
+static constexpr Register WasmIonExitRegReturnData = edx;
+static constexpr Register WasmIonExitRegReturnType = ecx;
+static constexpr Register WasmIonExitRegD0 = edi;
+static constexpr Register WasmIonExitRegD1 = eax;
+static constexpr Register WasmIonExitRegD2 = esi;
+
+// Registerd used in RegExpMatcher instruction (do not use JSReturnOperand).
+static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpMatcherStringReg = CallTempReg1;
+static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
+
+// Registerd used in RegExpTester instruction (do not use ReturnReg).
+static constexpr Register RegExpTesterRegExpReg = CallTempReg0;
+static constexpr Register RegExpTesterStringReg = CallTempReg2;
+static constexpr Register RegExpTesterLastIndexReg = CallTempReg3;
+
+// GCC stack is aligned on 16 bytes. Ion does not maintain this for internal
+// calls. wasm code does.
+#if defined(__GNUC__)
+static constexpr uint32_t ABIStackAlignment = 16;
+#else
+static constexpr uint32_t ABIStackAlignment = 4;
+#endif
+static constexpr uint32_t CodeAlignment = 16;
+static constexpr uint32_t JitStackAlignment = 16;
+
+static constexpr uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+// This boolean indicates whether we support SIMD instructions flavoured for
+// this architecture or not. Rather than a method in the LIRGenerator, it is
+// here such that it is accessible from the entire codebase. Once full support
+// for SIMD is reached on all tier-1 platforms, this constant can be deleted.
+static constexpr bool SupportsSimd = true;
+static constexpr uint32_t SimdMemoryAlignment = 16;
+
+static_assert(CodeAlignment % SimdMemoryAlignment == 0,
+ "Code alignment should be larger than any of the alignments which are used for "
+ "the constant sections of the code buffer. Thus it should be larger than the "
+ "alignment for SIMD constants.");
+
+static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
+ "Stack alignment should be larger than any of the alignments which are used for "
+ "spilled values. Thus it should be larger than the alignment for SIMD accesses.");
+
+static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
+
+struct ImmTag : public Imm32
+{
+ explicit ImmTag(JSValueTag mask)
+ : Imm32(int32_t(mask))
+ { }
+};
+
+struct ImmType : public ImmTag
+{
+ explicit ImmType(JSValueType type)
+ : ImmTag(JSVAL_TYPE_TO_TAG(type))
+ { }
+};
+
+static const Scale ScalePointer = TimesFour;
+
+} // namespace jit
+} // namespace js
+
+#include "jit/x86-shared/Assembler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+static inline void
+PatchJump(CodeLocationJump jump, CodeLocationLabel label, ReprotectCode reprotect = DontReprotect)
+{
+#ifdef DEBUG
+ // Assert that we're overwriting a jump instruction, either:
+ // 0F 80+cc <imm32>, or
+ // E9 <imm32>
+ unsigned char* x = (unsigned char*)jump.raw() - 5;
+ MOZ_ASSERT(((*x >= 0x80 && *x <= 0x8F) && *(x - 1) == 0x0F) ||
+ (*x == 0xE9));
+#endif
+ MaybeAutoWritableJitCode awjc(jump.raw() - 8, 8, reprotect);
+ X86Encoding::SetRel32(jump.raw(), label.raw());
+}
+static inline void
+PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
+{
+ PatchJump(jump_, label);
+}
+
+// Return operand from a JS -> JS call.
+static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data);
+
+class Assembler : public AssemblerX86Shared
+{
+ void writeRelocation(JmpSrc src) {
+ jumpRelocations_.writeUnsigned(src.offset());
+ }
+ void addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind kind) {
+ enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, kind));
+ if (kind == Relocation::JITCODE)
+ writeRelocation(src);
+ }
+
+ public:
+ using AssemblerX86Shared::movl;
+ using AssemblerX86Shared::j;
+ using AssemblerX86Shared::jmp;
+ using AssemblerX86Shared::vmovsd;
+ using AssemblerX86Shared::vmovss;
+ using AssemblerX86Shared::retarget;
+ using AssemblerX86Shared::cmpl;
+ using AssemblerX86Shared::call;
+ using AssemblerX86Shared::push;
+ using AssemblerX86Shared::pop;
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
+
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+
+ // Actual assembly emitting functions.
+
+ void push(ImmGCPtr ptr) {
+ masm.push_i32(int32_t(ptr.value));
+ writeDataRelocation(ptr);
+ }
+ void push(const ImmWord imm) {
+ push(Imm32(imm.value));
+ }
+ void push(const ImmPtr imm) {
+ push(ImmWord(uintptr_t(imm.value)));
+ }
+ void push(FloatRegister src) {
+ subl(Imm32(sizeof(double)), StackPointer);
+ vmovsd(src, Address(StackPointer, 0));
+ }
+
+ CodeOffset pushWithPatch(ImmWord word) {
+ masm.push_i32(int32_t(word.value));
+ return CodeOffset(masm.currentOffset());
+ }
+
+ void pop(FloatRegister src) {
+ vmovsd(Address(StackPointer, 0), src);
+ addl(Imm32(sizeof(double)), StackPointer);
+ }
+
+ CodeOffset movWithPatch(ImmWord word, Register dest) {
+ movl(Imm32(word.value), dest);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
+ }
+
+ void movl(ImmGCPtr ptr, Register dest) {
+ masm.movl_i32r(uintptr_t(ptr.value), dest.encoding());
+ writeDataRelocation(ptr);
+ }
+ void movl(ImmGCPtr ptr, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.movl_i32r(uintptr_t(ptr.value), dest.reg());
+ writeDataRelocation(ptr);
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movl_i32m(uintptr_t(ptr.value), dest.disp(), dest.base());
+ writeDataRelocation(ptr);
+ break;
+ case Operand::MEM_SCALE:
+ masm.movl_i32m(uintptr_t(ptr.value), dest.disp(), dest.base(), dest.index(), dest.scale());
+ writeDataRelocation(ptr);
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movl(ImmWord imm, Register dest) {
+ masm.movl_i32r(imm.value, dest.encoding());
+ }
+ void movl(ImmPtr imm, Register dest) {
+ movl(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(ImmWord imm, Register dest) {
+ // Use xor for setting registers to zero, as it is specially optimized
+ // for this purpose on modern hardware. Note that it does clobber FLAGS
+ // though.
+ if (imm.value == 0)
+ xorl(dest, dest);
+ else
+ movl(imm, dest);
+ }
+ void mov(ImmPtr imm, Register dest) {
+ mov(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(wasm::SymbolicAddress imm, Register dest) {
+ masm.movl_i32r(-1, dest.encoding());
+ append(wasm::SymbolicAccess(CodeOffset(masm.currentOffset()), imm));
+ }
+ void mov(const Operand& src, Register dest) {
+ movl(src, dest);
+ }
+ void mov(Register src, const Operand& dest) {
+ movl(src, dest);
+ }
+ void mov(Imm32 imm, const Operand& dest) {
+ movl(imm, dest);
+ }
+ void mov(CodeOffset* label, Register dest) {
+ // Put a placeholder value in the instruction stream.
+ masm.movl_i32r(0, dest.encoding());
+ label->bind(masm.size());
+ }
+ void mov(Register src, Register dest) {
+ movl(src, dest);
+ }
+ void xchg(Register src, Register dest) {
+ xchgl(src, dest);
+ }
+ void lea(const Operand& src, Register dest) {
+ return leal(src, dest);
+ }
+
+ void fstp32(const Operand& src) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fstp32_m(src.disp(), src.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void faddp() {
+ masm.faddp();
+ }
+
+ void cmpl(ImmWord rhs, Register lhs) {
+ masm.cmpl_ir(rhs.value, lhs.encoding());
+ }
+ void cmpl(ImmPtr rhs, Register lhs) {
+ cmpl(ImmWord(uintptr_t(rhs.value)), lhs);
+ }
+ void cmpl(ImmGCPtr rhs, Register lhs) {
+ masm.cmpl_i32r(uintptr_t(rhs.value), lhs.encoding());
+ writeDataRelocation(rhs);
+ }
+ void cmpl(Register rhs, Register lhs) {
+ masm.cmpl_rr(rhs.encoding(), lhs.encoding());
+ }
+ void cmpl(ImmGCPtr rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.cmpl_i32r(uintptr_t(rhs.value), lhs.reg());
+ writeDataRelocation(rhs);
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpl_i32m(uintptr_t(rhs.value), lhs.disp(), lhs.base());
+ writeDataRelocation(rhs);
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.cmpl_i32m(uintptr_t(rhs.value), lhs.address());
+ writeDataRelocation(rhs);
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmpl(Register rhs, wasm::SymbolicAddress lhs) {
+ masm.cmpl_rm_disp32(rhs.encoding(), (void*)-1);
+ append(wasm::SymbolicAccess(CodeOffset(masm.currentOffset()), lhs));
+ }
+ void cmpl(Imm32 rhs, wasm::SymbolicAddress lhs) {
+ JmpSrc src = masm.cmpl_im_disp32(rhs.value, (void*)-1);
+ append(wasm::SymbolicAccess(CodeOffset(src.offset()), lhs));
+ }
+
+ void adcl(Imm32 imm, Register dest) {
+ masm.adcl_ir(imm.value, dest.encoding());
+ }
+ void adcl(Register src, Register dest) {
+ masm.adcl_rr(src.encoding(), dest.encoding());
+ }
+
+ void sbbl(Imm32 imm, Register dest) {
+ masm.sbbl_ir(imm.value, dest.encoding());
+ }
+ void sbbl(Register src, Register dest) {
+ masm.sbbl_rr(src.encoding(), dest.encoding());
+ }
+
+ void mull(Register multiplier) {
+ masm.mull_r(multiplier.encoding());
+ }
+
+ void shldl(const Imm32 imm, Register src, Register dest) {
+ masm.shldl_irr(imm.value, src.encoding(), dest.encoding());
+ }
+ void shrdl(const Imm32 imm, Register src, Register dest) {
+ masm.shrdl_irr(imm.value, src.encoding(), dest.encoding());
+ }
+
+ void vhaddpd(FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE3());
+ MOZ_ASSERT(src.size() == 16);
+ MOZ_ASSERT(dest.size() == 16);
+ masm.vhaddpd_rr(src.encoding(), dest.encoding());
+ }
+ void vsubpd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ MOZ_ASSERT(src0.size() == 16);
+ MOZ_ASSERT(dest.size() == 16);
+ switch (src1.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vsubpd_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vsubpd_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void vpunpckldq(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ MOZ_ASSERT(src0.size() == 16);
+ MOZ_ASSERT(src1.size() == 16);
+ MOZ_ASSERT(dest.size() == 16);
+ masm.vpunpckldq_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpunpckldq(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ MOZ_ASSERT(src0.size() == 16);
+ MOZ_ASSERT(dest.size() == 16);
+ switch (src1.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vpunpckldq_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpunpckldq_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void fild(const Operand& src) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fild_m(src.disp(), src.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void jmp(ImmPtr target, Relocation::Kind reloc = Relocation::HARDCODED) {
+ JmpSrc src = masm.jmp();
+ addPendingJump(src, target, reloc);
+ }
+ void j(Condition cond, ImmPtr target,
+ Relocation::Kind reloc = Relocation::HARDCODED) {
+ JmpSrc src = masm.jCC(static_cast<X86Encoding::Condition>(cond));
+ addPendingJump(src, target, reloc);
+ }
+
+ void jmp(JitCode* target) {
+ jmp(ImmPtr(target->raw()), Relocation::JITCODE);
+ }
+ void j(Condition cond, JitCode* target) {
+ j(cond, ImmPtr(target->raw()), Relocation::JITCODE);
+ }
+ void call(JitCode* target) {
+ JmpSrc src = masm.call();
+ addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
+ }
+ void call(ImmWord target) {
+ call(ImmPtr((void*)target.value));
+ }
+ void call(ImmPtr target) {
+ JmpSrc src = masm.call();
+ addPendingJump(src, target, Relocation::HARDCODED);
+ }
+
+ // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled) {
+ CodeOffset offset(size());
+ JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
+ addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
+ MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
+ return offset;
+ }
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ // Size of a call instruction.
+ return 5;
+ }
+
+ // Re-routes pending jumps to an external target, flushing the label in the
+ // process.
+ void retarget(Label* label, ImmPtr target, Relocation::Kind reloc) {
+ if (label->used()) {
+ bool more;
+ X86Encoding::JmpSrc jmp(label->offset());
+ do {
+ X86Encoding::JmpSrc next;
+ more = masm.nextJump(jmp, &next);
+ addPendingJump(jmp, target, reloc);
+ jmp = next;
+ } while (more);
+ }
+ label->reset();
+ }
+
+ // Move a 32-bit immediate into a register where the immediate can be
+ // patched.
+ CodeOffset movlWithPatch(Imm32 imm, Register dest) {
+ masm.movl_i32r(imm.value, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Load from *(base + disp32) where disp32 can be patched.
+ CodeOffset movsblWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movsbl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movsbl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movzblWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movzbl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movzbl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movswlWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movswl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movswl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movzwlWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movzwl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movzwl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovssWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovss_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovss_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovd_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovd_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovqWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovq_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovsdWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovsd_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovsd_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovupsWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovups_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovups_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdquWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovdqu_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovdqu_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Store to *(base + disp32) where disp32 can be patched.
+ CodeOffset movbWithPatch(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movb_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movb_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movwWithPatch(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movw_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movw_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatch(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movl_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movl_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatchLow(Register regLow, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP: {
+ Address addr = dest.toAddress();
+ Operand low(addr.base, addr.offset + INT64LOW_OFFSET);
+ return movlWithPatch(regLow, low);
+ }
+ case Operand::MEM_ADDRESS32: {
+ Operand low(PatchedAbsoluteAddress(uint32_t(dest.address()) + INT64LOW_OFFSET));
+ return movlWithPatch(regLow, low);
+ }
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ CodeOffset movlWithPatchHigh(Register regHigh, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP: {
+ Address addr = dest.toAddress();
+ Operand high(addr.base, addr.offset + INT64HIGH_OFFSET);
+ return movlWithPatch(regHigh, high);
+ }
+ case Operand::MEM_ADDRESS32: {
+ Operand high(PatchedAbsoluteAddress(uint32_t(dest.address()) + INT64HIGH_OFFSET));
+ return movlWithPatch(regHigh, high);
+ }
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ CodeOffset vmovdWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovd_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovd_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovqWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovq_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovq_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovssWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovss_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovss_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovsdWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovsd_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovsd_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovupsWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovups_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovups_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdquWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovdqu_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovdqu_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Load from *(addr + index*scale) where addr can be patched.
+ CodeOffset movlWithPatch(PatchedAbsoluteAddress addr, Register index, Scale scale,
+ Register dest)
+ {
+ masm.movl_mr(addr.addr, index.encoding(), scale, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Load from *src where src can be patched.
+ CodeOffset movsblWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movsbl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movzblWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movzbl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movswlWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movswl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movzwlWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movzwl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovssWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovss_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovd_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovqWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovq_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovsdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovsd_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdqaWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovdqa_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdquWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovdqu_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovapsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovaps_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovupsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovups_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Store to *dest where dest can be patched.
+ CodeOffset movbWithPatch(Register src, PatchedAbsoluteAddress dest) {
+ masm.movb_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movwWithPatch(Register src, PatchedAbsoluteAddress dest) {
+ masm.movw_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatch(Register src, PatchedAbsoluteAddress dest) {
+ masm.movl_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovssWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovss_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovd_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovqWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovq_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovsdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovsd_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdqaWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovdqa_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovapsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovaps_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdquWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovdqu_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovupsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovups_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+
+ static bool canUseInSingleByteInstruction(Register reg) {
+ return X86Encoding::HasSubregL(reg.encoding());
+ }
+};
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool
+GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
+{
+ if (usedIntArgs >= NumCallTempNonArgRegs)
+ return false;
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_Assembler_x86_h */
diff --git a/js/src/jit/x86/Bailouts-x86.cpp b/js/src/jit/x86/Bailouts-x86.cpp
new file mode 100644
index 000000000..42dc1468c
--- /dev/null
+++ b/js/src/jit/x86/Bailouts-x86.cpp
@@ -0,0 +1,115 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+
+#include "jit/Bailouts.h"
+#include "jit/JitCompartment.h"
+
+using namespace js;
+using namespace js::jit;
+
+#if defined(_WIN32)
+# pragma pack(push, 1)
+#endif
+
+namespace js {
+namespace jit {
+
+class BailoutStack
+{
+ uintptr_t frameClassId_;
+ RegisterDump::FPUArray fpregs_;
+ RegisterDump::GPRArray regs_;
+ union {
+ uintptr_t frameSize_;
+ uintptr_t tableOffset_;
+ };
+ uintptr_t snapshotOffset_;
+
+ public:
+ FrameSizeClass frameClass() const {
+ return FrameSizeClass::FromClass(frameClassId_);
+ }
+ uintptr_t tableOffset() const {
+ MOZ_ASSERT(frameClass() != FrameSizeClass::None());
+ return tableOffset_;
+ }
+ uint32_t frameSize() const {
+ if (frameClass() == FrameSizeClass::None())
+ return frameSize_;
+ return frameClass().frameSize();
+ }
+ MachineState machine() {
+ return MachineState::FromBailout(regs_, fpregs_);
+ }
+ SnapshotOffset snapshotOffset() const {
+ MOZ_ASSERT(frameClass() == FrameSizeClass::None());
+ return snapshotOffset_;
+ }
+ uint8_t* parentStackPointer() const {
+ if (frameClass() == FrameSizeClass::None())
+ return (uint8_t*)this + sizeof(BailoutStack);
+ return (uint8_t*)this + offsetof(BailoutStack, snapshotOffset_);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#if defined(_WIN32)
+# pragma pack(pop)
+#endif
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ BailoutStack* bailout)
+ : machine_(bailout->machine())
+{
+ uint8_t* sp = bailout->parentStackPointer();
+ framePointer_ = sp + bailout->frameSize();
+ topFrameSize_ = framePointer_ - sp;
+
+ JSScript* script = ScriptFromCalleeToken(((JitFrameLayout*) framePointer_)->calleeToken());
+ JitActivation* activation = activations.activation()->asJit();
+ topIonScript_ = script->ionScript();
+
+ attachOnJitActivation(activations);
+
+ if (bailout->frameClass() == FrameSizeClass::None()) {
+ snapshotOffset_ = bailout->snapshotOffset();
+ return;
+ }
+
+ // Compute the snapshot offset from the bailout ID.
+ JSRuntime* rt = activation->compartment()->runtimeFromMainThread();
+ JitCode* code = rt->jitRuntime()->getBailoutTable(bailout->frameClass());
+ uintptr_t tableOffset = bailout->tableOffset();
+ uintptr_t tableStart = reinterpret_cast<uintptr_t>(code->raw());
+
+ MOZ_ASSERT(tableOffset >= tableStart &&
+ tableOffset < tableStart + code->instructionsSize());
+ MOZ_ASSERT((tableOffset - tableStart) % BAILOUT_TABLE_ENTRY_SIZE == 0);
+
+ uint32_t bailoutId = ((tableOffset - tableStart) / BAILOUT_TABLE_ENTRY_SIZE) - 1;
+ MOZ_ASSERT(bailoutId < BAILOUT_TABLE_SIZE);
+
+ snapshotOffset_ = topIonScript_->bailoutToSnapshot(bailoutId);
+}
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ InvalidationBailoutStack* bailout)
+ : machine_(bailout->machine())
+{
+ framePointer_ = (uint8_t*) bailout->fp();
+ topFrameSize_ = framePointer_ - bailout->sp();
+ topIonScript_ = bailout->ionScript();
+ attachOnJitActivation(activations);
+
+ uint8_t* returnAddressToFp_ = bailout->osiPointReturnAddress();
+ const OsiIndex* osiIndex = topIonScript_->getOsiIndex(returnAddressToFp_);
+ snapshotOffset_ = osiIndex->snapshotOffset();
+}
diff --git a/js/src/jit/x86/BaseAssembler-x86.h b/js/src/jit/x86/BaseAssembler-x86.h
new file mode 100644
index 000000000..5b16311d0
--- /dev/null
+++ b/js/src/jit/x86/BaseAssembler-x86.h
@@ -0,0 +1,203 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_BaseAssembler_x86_h
+#define jit_x86_BaseAssembler_x86_h
+
+#include "jit/x86-shared/BaseAssembler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+namespace X86Encoding {
+
+class BaseAssemblerX86 : public BaseAssembler
+{
+ public:
+
+ // Arithmetic operations:
+
+ void adcl_ir(int32_t imm, RegisterID dst)
+ {
+ spew("adcl $%d, %s", imm, GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_ADC);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_ADC);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void adcl_im(int32_t imm, const void* addr)
+ {
+ spew("adcl %d, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_ADC);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_ADC);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void adcl_rr(RegisterID src, RegisterID dst)
+ {
+ spew("adcl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_ADC_GvEv, src, dst);
+ }
+
+ void sbbl_ir(int32_t imm, RegisterID dst)
+ {
+ spew("sbbl $%d, %s", imm, GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_SBB);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_SBB);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void sbbl_rr(RegisterID src, RegisterID dst)
+ {
+ spew("sbbl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_SBB_GvEv, src, dst);
+ }
+
+ using BaseAssembler::andl_im;
+ void andl_im(int32_t imm, const void* addr)
+ {
+ spew("andl $0x%x, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_AND);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_AND);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ using BaseAssembler::orl_im;
+ void orl_im(int32_t imm, const void* addr)
+ {
+ spew("orl $0x%x, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_OR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_OR);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ using BaseAssembler::subl_im;
+ void subl_im(int32_t imm, const void* addr)
+ {
+ spew("subl $%d, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_SUB);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_SUB);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void shldl_irr(int32_t imm, RegisterID src, RegisterID dst)
+ {
+ MOZ_ASSERT(imm < 32);
+ spew("shldl $%d, %s, %s", imm, GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp8(OP2_SHLD, dst, src);
+ m_formatter.immediate8u(imm);
+ }
+
+ void shrdl_irr(int32_t imm, RegisterID src, RegisterID dst)
+ {
+ MOZ_ASSERT(imm < 32);
+ spew("shrdl $%d, %s, %s", imm, GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp8(OP2_SHRD, dst, src);
+ m_formatter.immediate8u(imm);
+ }
+
+ // SSE operations:
+
+ using BaseAssembler::vcvtsi2sd_mr;
+ void vcvtsi2sd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vcvtsi2sd", VEX_SD, OP2_CVTSI2SD_VsdEd, address, src0, dst);
+ }
+
+ using BaseAssembler::vmovaps_mr;
+ void vmovaps_mr(const void* address, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_VsdWsd, address, invalid_xmm, dst);
+ }
+
+ using BaseAssembler::vmovdqa_mr;
+ void vmovdqa_mr(const void* address, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_VdqWdq, address, invalid_xmm, dst);
+ }
+
+ void vhaddpd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ twoByteOpSimdFlags("vhaddpd", VEX_PD, OP2_HADDPD, src, dst);
+ }
+
+ void vsubpd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vsubpd", VEX_PD, OP2_SUBPS_VpsWps, src1, src0, dst);
+ }
+ void vsubpd_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vsubpd", VEX_PD, OP2_SUBPS_VpsWps, offset, base, src0, dst);
+ }
+ void vsubpd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vsubpd", VEX_PD, OP2_SUBPS_VpsWps, address, src0, dst);
+ }
+
+ void vpunpckldq_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpunpckldq", VEX_PD, OP2_PUNPCKLDQ, src1, src0, dst);
+ }
+ void vpunpckldq_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpunpckldq", VEX_PD, OP2_PUNPCKLDQ, offset, base, src0, dst);
+ }
+ void vpunpckldq_mr(const void* addr, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpunpckldq", VEX_PD, OP2_PUNPCKLDQ, addr, src0, dst);
+ }
+
+ void fild_m(int32_t offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_FILD, offset, base, FILD_OP_64);
+ }
+
+ // Misc instructions:
+
+ void pusha()
+ {
+ spew("pusha");
+ m_formatter.oneByteOp(OP_PUSHA);
+ }
+
+ void popa()
+ {
+ spew("popa");
+ m_formatter.oneByteOp(OP_POPA);
+ }
+};
+
+typedef BaseAssemblerX86 BaseAssemblerSpecific;
+
+} // namespace X86Encoding
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_BaseAssembler_x86_h */
diff --git a/js/src/jit/x86/BaselineCompiler-x86.cpp b/js/src/jit/x86/BaselineCompiler-x86.cpp
new file mode 100644
index 000000000..8520fd8c7
--- /dev/null
+++ b/js/src/jit/x86/BaselineCompiler-x86.cpp
@@ -0,0 +1,15 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86/BaselineCompiler-x86.h"
+
+using namespace js;
+using namespace js::jit;
+
+BaselineCompilerX86::BaselineCompilerX86(JSContext* cx, TempAllocator& alloc, JSScript* script)
+ : BaselineCompilerX86Shared(cx, alloc, script)
+{
+}
diff --git a/js/src/jit/x86/BaselineCompiler-x86.h b/js/src/jit/x86/BaselineCompiler-x86.h
new file mode 100644
index 000000000..a0311bc55
--- /dev/null
+++ b/js/src/jit/x86/BaselineCompiler-x86.h
@@ -0,0 +1,26 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_BaselineCompiler_x86_h
+#define jit_x86_BaselineCompiler_x86_h
+
+#include "jit/x86-shared/BaselineCompiler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+class BaselineCompilerX86 : public BaselineCompilerX86Shared
+{
+ protected:
+ BaselineCompilerX86(JSContext* cx, TempAllocator& alloc, JSScript* script);
+};
+
+typedef BaselineCompilerX86 BaselineCompilerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_BaselineCompiler_x86_h */
diff --git a/js/src/jit/x86/BaselineIC-x86.cpp b/js/src/jit/x86/BaselineIC-x86.cpp
new file mode 100644
index 000000000..a2227ab0a
--- /dev/null
+++ b/js/src/jit/x86/BaselineIC-x86.cpp
@@ -0,0 +1,48 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineCompiler.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Linker.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICCompare_Int32
+
+bool
+ICCompare_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // Compare payload regs of R0 and R1.
+ Assembler::Condition cond = JSOpToCondition(op, /* signed = */true);
+ masm.cmp32(R0.payloadReg(), R1.payloadReg());
+ masm.setCC(cond, R0.payloadReg());
+ masm.movzbl(R0.payloadReg(), R0.payloadReg());
+
+ // Box the result and return
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.payloadReg(), R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/x86/CodeGenerator-x86.cpp b/js/src/jit/x86/CodeGenerator-x86.cpp
new file mode 100644
index 000000000..1fb431894
--- /dev/null
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -0,0 +1,1298 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86/CodeGenerator-x86.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/DebugOnly.h"
+
+#include "jsnum.h"
+
+#include "jit/IonCaches.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "js/Conversions.h"
+#include "vm/Shape.h"
+
+#include "jsscriptinlines.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::BitwiseCast;
+using mozilla::DebugOnly;
+using mozilla::FloatingPoint;
+using JS::GenericNaN;
+
+CodeGeneratorX86::CodeGeneratorX86(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorX86Shared(gen, graph, masm)
+{
+}
+
+static const uint32_t FrameSizes[] = { 128, 256, 512, 1024 };
+
+FrameSizeClass
+FrameSizeClass::FromDepth(uint32_t frameDepth)
+{
+ for (uint32_t i = 0; i < JS_ARRAY_LENGTH(FrameSizes); i++) {
+ if (frameDepth < FrameSizes[i])
+ return FrameSizeClass(i);
+ }
+
+ return FrameSizeClass::None();
+}
+
+FrameSizeClass
+FrameSizeClass::ClassLimit()
+{
+ return FrameSizeClass(JS_ARRAY_LENGTH(FrameSizes));
+}
+
+uint32_t
+FrameSizeClass::frameSize() const
+{
+ MOZ_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID);
+ MOZ_ASSERT(class_ < JS_ARRAY_LENGTH(FrameSizes));
+
+ return FrameSizes[class_];
+}
+
+ValueOperand
+CodeGeneratorX86::ToValue(LInstruction* ins, size_t pos)
+{
+ Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+ValueOperand
+CodeGeneratorX86::ToOutValue(LInstruction* ins)
+{
+ Register typeReg = ToRegister(ins->getDef(TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getDef(PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+ValueOperand
+CodeGeneratorX86::ToTempValue(LInstruction* ins, size_t pos)
+{
+ Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+void
+CodeGeneratorX86::visitValue(LValue* value)
+{
+ const ValueOperand out = ToOutValue(value);
+ masm.moveValue(value->value(), out);
+}
+
+void
+CodeGeneratorX86::visitBox(LBox* box)
+{
+ const LDefinition* type = box->getDef(TYPE_INDEX);
+
+ DebugOnly<const LAllocation*> a = box->getOperand(0);
+ MOZ_ASSERT(!a->isConstant());
+
+ // On x86, the input operand and the output payload have the same
+ // virtual register. All that needs to be written is the type tag for
+ // the type definition.
+ masm.mov(ImmWord(MIRTypeToTag(box->type())), ToRegister(type));
+}
+
+void
+CodeGeneratorX86::visitBoxFloatingPoint(LBoxFloatingPoint* box)
+{
+ const LAllocation* in = box->getOperand(0);
+ const ValueOperand out = ToOutValue(box);
+
+ FloatRegister reg = ToFloatRegister(in);
+ if (box->type() == MIRType::Float32) {
+ masm.convertFloat32ToDouble(reg, ScratchFloat32Reg);
+ reg = ScratchFloat32Reg;
+ }
+ masm.boxDouble(reg, out);
+}
+
+void
+CodeGeneratorX86::visitUnbox(LUnbox* unbox)
+{
+ // Note that for unbox, the type and payload indexes are switched on the
+ // inputs.
+ MUnbox* mir = unbox->mir();
+
+ if (mir->fallible()) {
+ masm.cmp32(ToOperand(unbox->type()), Imm32(MIRTypeToTag(mir->type())));
+ bailoutIf(Assembler::NotEqual, unbox->snapshot());
+ }
+}
+
+void
+CodeGeneratorX86::visitCompareB(LCompareB* lir)
+{
+ MCompare* mir = lir->mir();
+
+ const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
+ const LAllocation* rhs = lir->rhs();
+ const Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
+
+ Label notBoolean, done;
+ masm.branchTestBoolean(Assembler::NotEqual, lhs, &notBoolean);
+ {
+ if (rhs->isConstant())
+ masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
+ else
+ masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
+ masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output);
+ masm.jump(&done);
+ }
+ masm.bind(&notBoolean);
+ {
+ masm.move32(Imm32(mir->jsop() == JSOP_STRICTNE), output);
+ }
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorX86::visitCompareBAndBranch(LCompareBAndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
+ const LAllocation* rhs = lir->rhs();
+
+ MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
+
+ Assembler::Condition cond = masm.testBoolean(Assembler::NotEqual, lhs);
+ jumpToBlock((mir->jsop() == JSOP_STRICTEQ) ? lir->ifFalse() : lir->ifTrue(), cond);
+
+ if (rhs->isConstant())
+ masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
+ else
+ masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
+ emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorX86::visitCompareBitwise(LCompareBitwise* lir)
+{
+ MCompare* mir = lir->mir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+ const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput);
+ const ValueOperand rhs = ToValue(lir, LCompareBitwise::RhsInput);
+ const Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(IsEqualityOp(mir->jsop()));
+
+ Label notEqual, done;
+ masm.cmp32(lhs.typeReg(), rhs.typeReg());
+ masm.j(Assembler::NotEqual, &notEqual);
+ {
+ masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
+ masm.emitSet(cond, output);
+ masm.jump(&done);
+ }
+ masm.bind(&notEqual);
+ {
+ masm.move32(Imm32(cond == Assembler::NotEqual), output);
+ }
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorX86::visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+ const ValueOperand lhs = ToValue(lir, LCompareBitwiseAndBranch::LhsInput);
+ const ValueOperand rhs = ToValue(lir, LCompareBitwiseAndBranch::RhsInput);
+
+ MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
+ mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
+
+ MBasicBlock* notEqual = (cond == Assembler::Equal) ? lir->ifFalse() : lir->ifTrue();
+
+ masm.cmp32(lhs.typeReg(), rhs.typeReg());
+ jumpToBlock(notEqual, Assembler::NotEqual);
+ masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
+ emitBranch(cond, lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorX86::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir)
+{
+ Register input = ToRegister(lir->input());
+ Register temp = ToRegister(lir->temp());
+
+ if (input != temp)
+ masm.mov(input, temp);
+
+ // Beware: convertUInt32ToDouble clobbers input.
+ masm.convertUInt32ToDouble(temp, ToFloatRegister(lir->output()));
+}
+
+void
+CodeGeneratorX86::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir)
+{
+ Register input = ToRegister(lir->input());
+ Register temp = ToRegister(lir->temp());
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ if (input != temp)
+ masm.mov(input, temp);
+
+ // Beware: convertUInt32ToFloat32 clobbers input.
+ masm.convertUInt32ToFloat32(temp, output);
+}
+
+void
+CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins)
+{
+ const MLoadTypedArrayElementStatic* mir = ins->mir();
+ Scalar::Type accessType = mir->accessType();
+ MOZ_ASSERT_IF(accessType == Scalar::Float32, mir->type() == MIRType::Float32);
+
+ Register ptr = ToRegister(ins->ptr());
+ AnyRegister out = ToAnyRegister(ins->output());
+ OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
+ uint32_t offset = mir->offset();
+
+ if (mir->needsBoundsCheck()) {
+ MOZ_ASSERT(offset == 0);
+ if (!mir->fallible()) {
+ ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(out, accessType);
+ addOutOfLineCode(ool, ins->mir());
+ }
+
+ masm.cmpPtr(ptr, ImmWord(mir->length()));
+ if (ool)
+ masm.j(Assembler::AboveOrEqual, ool->entry());
+ else
+ bailoutIf(Assembler::AboveOrEqual, ins->snapshot());
+ }
+
+ Operand srcAddr(ptr, int32_t(mir->base().asValue()) + int32_t(offset));
+ switch (accessType) {
+ case Scalar::Int8: masm.movsblWithPatch(srcAddr, out.gpr()); break;
+ case Scalar::Uint8Clamped:
+ case Scalar::Uint8: masm.movzblWithPatch(srcAddr, out.gpr()); break;
+ case Scalar::Int16: masm.movswlWithPatch(srcAddr, out.gpr()); break;
+ case Scalar::Uint16: masm.movzwlWithPatch(srcAddr, out.gpr()); break;
+ case Scalar::Int32:
+ case Scalar::Uint32: masm.movlWithPatch(srcAddr, out.gpr()); break;
+ case Scalar::Float32: masm.vmovssWithPatch(srcAddr, out.fpu()); break;
+ case Scalar::Float64: masm.vmovsdWithPatch(srcAddr, out.fpu()); break;
+ default: MOZ_CRASH("Unexpected type");
+ }
+
+ if (accessType == Scalar::Float64)
+ masm.canonicalizeDouble(out.fpu());
+ if (accessType == Scalar::Float32)
+ masm.canonicalizeFloat(out.fpu());
+
+ if (ool)
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGeneratorX86::emitWasmCall(LWasmCallBase* ins)
+{
+ MWasmCall* mir = ins->mir();
+
+ emitWasmCallBase(ins);
+
+ if (IsFloatingPointType(mir->type()) && mir->callee().which() == wasm::CalleeDesc::Builtin) {
+ if (mir->type() == MIRType::Float32) {
+ masm.reserveStack(sizeof(float));
+ Operand op(esp, 0);
+ masm.fstp32(op);
+ masm.loadFloat32(op, ReturnFloat32Reg);
+ masm.freeStack(sizeof(float));
+ } else {
+ MOZ_ASSERT(mir->type() == MIRType::Double);
+ masm.reserveStack(sizeof(double));
+ Operand op(esp, 0);
+ masm.fstp(op);
+ masm.loadDouble(op, ReturnDoubleReg);
+ masm.freeStack(sizeof(double));
+ }
+ }
+}
+
+void
+CodeGeneratorX86::visitWasmCall(LWasmCall* ins)
+{
+ emitWasmCall(ins);
+}
+
+void
+CodeGeneratorX86::visitWasmCallI64(LWasmCallI64* ins)
+{
+ emitWasmCall(ins);
+}
+
+template <typename T>
+void
+CodeGeneratorX86::emitWasmLoad(T* ins)
+{
+ const MWasmLoad* mir = ins->mir();
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+ const LAllocation* ptr = ins->ptr();
+
+ Operand srcAddr = ptr->isBogus()
+ ? Operand(PatchedAbsoluteAddress(offset))
+ : Operand(ToRegister(ptr), offset);
+
+ if (mir->type() == MIRType::Int64)
+ masm.wasmLoadI64(mir->access(), srcAddr, ToOutRegister64(ins));
+ else
+ masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorX86::visitWasmLoad(LWasmLoad* ins)
+{
+ emitWasmLoad(ins);
+}
+
+void
+CodeGeneratorX86::visitWasmLoadI64(LWasmLoadI64* ins)
+{
+ emitWasmLoad(ins);
+}
+
+template <typename T>
+void
+CodeGeneratorX86::emitWasmStore(T* ins)
+{
+ const MWasmStore* mir = ins->mir();
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+ const LAllocation* ptr = ins->ptr();
+ Operand dstAddr = ptr->isBogus()
+ ? Operand(PatchedAbsoluteAddress(offset))
+ : Operand(ToRegister(ptr), offset);
+
+ if (mir->access().type() == Scalar::Int64) {
+ Register64 value = ToRegister64(ins->getInt64Operand(LWasmStoreI64::ValueIndex));
+ masm.wasmStoreI64(mir->access(), value, dstAddr);
+ } else {
+ AnyRegister value = ToAnyRegister(ins->getOperand(LWasmStore::ValueIndex));
+ masm.wasmStore(mir->access(), value, dstAddr);
+ }
+}
+
+void
+CodeGeneratorX86::visitWasmStore(LWasmStore* ins)
+{
+ emitWasmStore(ins);
+}
+
+void
+CodeGeneratorX86::visitWasmStoreI64(LWasmStoreI64* ins)
+{
+ emitWasmStore(ins);
+}
+
+void
+CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
+{
+ const MAsmJSLoadHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+
+ const LAllocation* ptr = ins->ptr();
+ AnyRegister out = ToAnyRegister(ins->output());
+
+ Scalar::Type accessType = mir->accessType();
+ MOZ_ASSERT(!Scalar::isSimdType(accessType));
+
+ OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
+ if (mir->needsBoundsCheck()) {
+ ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(out, accessType);
+ addOutOfLineCode(ool, mir);
+
+ masm.wasmBoundsCheck(Assembler::AboveOrEqual, ToRegister(ptr), ool->entry());
+ }
+
+ Operand srcAddr = ptr->isBogus()
+ ? Operand(PatchedAbsoluteAddress())
+ : Operand(ToRegister(ptr), 0);
+
+ masm.wasmLoad(mir->access(), srcAddr, out);
+
+ if (ool)
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGeneratorX86::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins)
+{
+ MStoreTypedArrayElementStatic* mir = ins->mir();
+ Scalar::Type accessType = mir->accessType();
+ Register ptr = ToRegister(ins->ptr());
+ const LAllocation* value = ins->value();
+
+ canonicalizeIfDeterministic(accessType, value);
+
+ uint32_t offset = mir->offset();
+ MOZ_ASSERT_IF(mir->needsBoundsCheck(), offset == 0);
+
+ Label rejoin;
+ if (mir->needsBoundsCheck()) {
+ MOZ_ASSERT(offset == 0);
+ masm.cmpPtr(ptr, ImmWord(mir->length()));
+ masm.j(Assembler::AboveOrEqual, &rejoin);
+ }
+
+ Operand dstAddr(ptr, int32_t(mir->base().asValue()) + int32_t(offset));
+ switch (accessType) {
+ case Scalar::Int8:
+ case Scalar::Uint8Clamped:
+ case Scalar::Uint8:
+ masm.movbWithPatch(ToRegister(value), dstAddr);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ masm.movwWithPatch(ToRegister(value), dstAddr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ masm.movlWithPatch(ToRegister(value), dstAddr);
+ break;
+ case Scalar::Float32:
+ masm.vmovssWithPatch(ToFloatRegister(value), dstAddr);
+ break;
+ case Scalar::Float64:
+ masm.vmovsdWithPatch(ToFloatRegister(value), dstAddr);
+ break;
+ default:
+ MOZ_CRASH("unexpected type");
+ }
+
+ if (rejoin.used())
+ masm.bind(&rejoin);
+}
+
+void
+CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
+{
+ const MAsmJSStoreHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->offset() == 0);
+
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* value = ins->value();
+
+ Scalar::Type accessType = mir->accessType();
+ MOZ_ASSERT(!Scalar::isSimdType(accessType));
+ canonicalizeIfDeterministic(accessType, value);
+
+ Operand dstAddr = ptr->isBogus()
+ ? Operand(PatchedAbsoluteAddress())
+ : Operand(ToRegister(ptr), 0);
+
+ Label rejoin;
+ if (mir->needsBoundsCheck())
+ masm.wasmBoundsCheck(Assembler::AboveOrEqual, ToRegister(ptr), &rejoin);
+
+ masm.wasmStore(mir->access(), ToAnyRegister(value), dstAddr);
+
+ if (rejoin.used())
+ masm.bind(&rejoin);
+}
+
+// Perform bounds checking on the access if necessary; if it fails,
+// jump to out-of-line code that throws. If the bounds check passes,
+// set up the heap address in addrTemp.
+
+void
+CodeGeneratorX86::asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg)
+{
+ // Add in the actual heap pointer explicitly, to avoid opening up
+ // the abstraction that is atomicBinopToTypedIntArray at this time.
+ masm.movl(ptrReg, addrTemp);
+ masm.addlWithPatch(Imm32(0), addrTemp);
+ masm.append(wasm::MemoryPatch(masm.size()));
+}
+
+void
+CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
+{
+ MAsmJSCompareExchangeHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+
+ Scalar::Type accessType = mir->access().type();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register oldval = ToRegister(ins->oldValue());
+ Register newval = ToRegister(ins->newValue());
+ Register addrTemp = ToRegister(ins->addrTemp());
+
+ asmJSAtomicComputeAddress(addrTemp, ptrReg);
+
+ Address memAddr(addrTemp, 0);
+ masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
+ memAddr,
+ oldval,
+ newval,
+ InvalidReg,
+ ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorX86::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
+{
+ MAsmJSAtomicExchangeHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+
+ Scalar::Type accessType = mir->access().type();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ Register addrTemp = ToRegister(ins->addrTemp());
+
+ asmJSAtomicComputeAddress(addrTemp, ptrReg);
+
+ Address memAddr(addrTemp, 0);
+ masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
+ memAddr,
+ value,
+ InvalidReg,
+ ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorX86::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
+{
+ MAsmJSAtomicBinopHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+
+ Scalar::Type accessType = mir->access().type();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
+ Register addrTemp = ToRegister(ins->addrTemp());
+ const LAllocation* value = ins->value();
+ AtomicOp op = mir->operation();
+
+ asmJSAtomicComputeAddress(addrTemp, ptrReg);
+
+ Address memAddr(addrTemp, 0);
+ if (value->isConstant()) {
+ atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
+ Imm32(ToInt32(value)),
+ memAddr,
+ temp,
+ InvalidReg,
+ ToAnyRegister(ins->output()));
+ } else {
+ atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
+ ToRegister(value),
+ memAddr,
+ temp,
+ InvalidReg,
+ ToAnyRegister(ins->output()));
+ }
+}
+
+void
+CodeGeneratorX86::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
+{
+ MAsmJSAtomicBinopHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+ MOZ_ASSERT(!mir->hasUses());
+
+ Scalar::Type accessType = mir->access().type();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register addrTemp = ToRegister(ins->addrTemp());
+ const LAllocation* value = ins->value();
+ AtomicOp op = mir->operation();
+
+ asmJSAtomicComputeAddress(addrTemp, ptrReg);
+
+ Address memAddr(addrTemp, 0);
+ if (value->isConstant())
+ atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), memAddr);
+ else
+ atomicBinopToTypedIntArray(op, accessType, ToRegister(value), memAddr);
+}
+
+void
+CodeGeneratorX86::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins)
+{
+ MWasmLoadGlobalVar* mir = ins->mir();
+ MIRType type = mir->type();
+ MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
+
+ CodeOffset label;
+ switch (type) {
+ case MIRType::Int32:
+ label = masm.movlWithPatch(PatchedAbsoluteAddress(), ToRegister(ins->output()));
+ break;
+ case MIRType::Float32:
+ label = masm.vmovssWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
+ break;
+ case MIRType::Double:
+ label = masm.vmovsdWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
+ break;
+ // Aligned access: code is aligned on PageSize + there is padding
+ // before the global data section.
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ label = masm.vmovdqaWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
+ break;
+ case MIRType::Float32x4:
+ label = masm.vmovapsWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
+ break;
+ default:
+ MOZ_CRASH("unexpected type in visitWasmLoadGlobalVar");
+ }
+ masm.append(wasm::GlobalAccess(label, mir->globalDataOffset()));
+}
+
+void
+CodeGeneratorX86::visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins)
+{
+ MWasmLoadGlobalVar* mir = ins->mir();
+
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+ Register64 output = ToOutRegister64(ins);
+
+ CodeOffset labelLow = masm.movlWithPatch(PatchedAbsoluteAddress(), output.low);
+ masm.append(wasm::GlobalAccess(labelLow, mir->globalDataOffset() + INT64LOW_OFFSET));
+ CodeOffset labelHigh = masm.movlWithPatch(PatchedAbsoluteAddress(), output.high);
+ masm.append(wasm::GlobalAccess(labelHigh, mir->globalDataOffset() + INT64HIGH_OFFSET));
+}
+
+void
+CodeGeneratorX86::visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins)
+{
+ MWasmStoreGlobalVar* mir = ins->mir();
+
+ MIRType type = mir->value()->type();
+ MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
+
+ CodeOffset label;
+ switch (type) {
+ case MIRType::Int32:
+ label = masm.movlWithPatch(ToRegister(ins->value()), PatchedAbsoluteAddress());
+ break;
+ case MIRType::Float32:
+ label = masm.vmovssWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
+ break;
+ case MIRType::Double:
+ label = masm.vmovsdWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
+ break;
+ // Aligned access: code is aligned on PageSize + there is padding
+ // before the global data section.
+ case MIRType::Int32x4:
+ case MIRType::Bool32x4:
+ label = masm.vmovdqaWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
+ break;
+ case MIRType::Float32x4:
+ label = masm.vmovapsWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
+ break;
+ default:
+ MOZ_CRASH("unexpected type in visitWasmStoreGlobalVar");
+ }
+ masm.append(wasm::GlobalAccess(label, mir->globalDataOffset()));
+}
+
+void
+CodeGeneratorX86::visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins)
+{
+ MWasmStoreGlobalVar* mir = ins->mir();
+
+ MOZ_ASSERT(mir->value()->type() == MIRType::Int64);
+ Register64 input = ToRegister64(ins->value());
+
+ CodeOffset labelLow = masm.movlWithPatch(input.low, PatchedAbsoluteAddress());
+ masm.append(wasm::GlobalAccess(labelLow, mir->globalDataOffset() + INT64LOW_OFFSET));
+ CodeOffset labelHigh = masm.movlWithPatch(input.high, PatchedAbsoluteAddress());
+ masm.append(wasm::GlobalAccess(labelHigh, mir->globalDataOffset() + INT64HIGH_OFFSET));
+}
+
+namespace js {
+namespace jit {
+
+class OutOfLineTruncate : public OutOfLineCodeBase<CodeGeneratorX86>
+{
+ LTruncateDToInt32* ins_;
+
+ public:
+ explicit OutOfLineTruncate(LTruncateDToInt32* ins)
+ : ins_(ins)
+ { }
+
+ void accept(CodeGeneratorX86* codegen) {
+ codegen->visitOutOfLineTruncate(this);
+ }
+ LTruncateDToInt32* ins() const {
+ return ins_;
+ }
+};
+
+class OutOfLineTruncateFloat32 : public OutOfLineCodeBase<CodeGeneratorX86>
+{
+ LTruncateFToInt32* ins_;
+
+ public:
+ explicit OutOfLineTruncateFloat32(LTruncateFToInt32* ins)
+ : ins_(ins)
+ { }
+
+ void accept(CodeGeneratorX86* codegen) {
+ codegen->visitOutOfLineTruncateFloat32(this);
+ }
+ LTruncateFToInt32* ins() const {
+ return ins_;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+void
+CodeGeneratorX86::visitTruncateDToInt32(LTruncateDToInt32* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ OutOfLineTruncate* ool = new(alloc()) OutOfLineTruncate(ins);
+ addOutOfLineCode(ool, ins->mir());
+
+ masm.branchTruncateDoubleMaybeModUint32(input, output, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGeneratorX86::visitTruncateFToInt32(LTruncateFToInt32* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ OutOfLineTruncateFloat32* ool = new(alloc()) OutOfLineTruncateFloat32(ins);
+ addOutOfLineCode(ool, ins->mir());
+
+ masm.branchTruncateFloat32MaybeModUint32(input, output, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGeneratorX86::visitOutOfLineTruncate(OutOfLineTruncate* ool)
+{
+ LTruncateDToInt32* ins = ool->ins();
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ Label fail;
+
+ if (Assembler::HasSSE3()) {
+ Label failPopDouble;
+ // Push double.
+ masm.subl(Imm32(sizeof(double)), esp);
+ masm.storeDouble(input, Operand(esp, 0));
+
+ // Check exponent to avoid fp exceptions.
+ masm.branchDoubleNotInInt64Range(Address(esp, 0), output, &failPopDouble);
+
+ // Load double, perform 64-bit truncation.
+ masm.truncateDoubleToInt64(Address(esp, 0), Address(esp, 0), output);
+
+ // Load low word, pop double and jump back.
+ masm.load32(Address(esp, 0), output);
+ masm.addl(Imm32(sizeof(double)), esp);
+ masm.jump(ool->rejoin());
+
+ masm.bind(&failPopDouble);
+ masm.addl(Imm32(sizeof(double)), esp);
+ masm.jump(&fail);
+ } else {
+ FloatRegister temp = ToFloatRegister(ins->tempFloat());
+
+ // Try to convert doubles representing integers within 2^32 of a signed
+ // integer, by adding/subtracting 2^32 and then trying to convert to int32.
+ // This has to be an exact conversion, as otherwise the truncation works
+ // incorrectly on the modified value.
+ masm.zeroDouble(ScratchDoubleReg);
+ masm.vucomisd(ScratchDoubleReg, input);
+ masm.j(Assembler::Parity, &fail);
+
+ {
+ Label positive;
+ masm.j(Assembler::Above, &positive);
+
+ masm.loadConstantDouble(4294967296.0, temp);
+ Label skip;
+ masm.jmp(&skip);
+
+ masm.bind(&positive);
+ masm.loadConstantDouble(-4294967296.0, temp);
+ masm.bind(&skip);
+ }
+
+ masm.addDouble(input, temp);
+ masm.vcvttsd2si(temp, output);
+ masm.vcvtsi2sd(output, ScratchDoubleReg, ScratchDoubleReg);
+
+ masm.vucomisd(ScratchDoubleReg, temp);
+ masm.j(Assembler::Parity, &fail);
+ masm.j(Assembler::Equal, ool->rejoin());
+ }
+
+ masm.bind(&fail);
+ {
+ saveVolatile(output);
+
+ masm.setupUnalignedABICall(output);
+ masm.passABIArg(input, MoveOp::DOUBLE);
+ if (gen->compilingWasm())
+ masm.callWithABI(wasm::SymbolicAddress::ToInt32);
+ else
+ masm.callWithABI(BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32));
+ masm.storeCallInt32Result(output);
+
+ restoreVolatile(output);
+ }
+
+ masm.jump(ool->rejoin());
+}
+
+void
+CodeGeneratorX86::visitOutOfLineTruncateFloat32(OutOfLineTruncateFloat32* ool)
+{
+ LTruncateFToInt32* ins = ool->ins();
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ Label fail;
+
+ if (Assembler::HasSSE3()) {
+ Label failPopFloat;
+
+ // Push float32, but subtracts 64 bits so that the value popped by fisttp fits
+ masm.subl(Imm32(sizeof(uint64_t)), esp);
+ masm.storeFloat32(input, Operand(esp, 0));
+
+ // Check exponent to avoid fp exceptions.
+ masm.branchDoubleNotInInt64Range(Address(esp, 0), output, &failPopFloat);
+
+ // Load float, perform 32-bit truncation.
+ masm.truncateFloat32ToInt64(Address(esp, 0), Address(esp, 0), output);
+
+ // Load low word, pop 64bits and jump back.
+ masm.load32(Address(esp, 0), output);
+ masm.addl(Imm32(sizeof(uint64_t)), esp);
+ masm.jump(ool->rejoin());
+
+ masm.bind(&failPopFloat);
+ masm.addl(Imm32(sizeof(uint64_t)), esp);
+ masm.jump(&fail);
+ } else {
+ FloatRegister temp = ToFloatRegister(ins->tempFloat());
+
+ // Try to convert float32 representing integers within 2^32 of a signed
+ // integer, by adding/subtracting 2^32 and then trying to convert to int32.
+ // This has to be an exact conversion, as otherwise the truncation works
+ // incorrectly on the modified value.
+ masm.zeroFloat32(ScratchFloat32Reg);
+ masm.vucomiss(ScratchFloat32Reg, input);
+ masm.j(Assembler::Parity, &fail);
+
+ {
+ Label positive;
+ masm.j(Assembler::Above, &positive);
+
+ masm.loadConstantFloat32(4294967296.f, temp);
+ Label skip;
+ masm.jmp(&skip);
+
+ masm.bind(&positive);
+ masm.loadConstantFloat32(-4294967296.f, temp);
+ masm.bind(&skip);
+ }
+
+ masm.addFloat32(input, temp);
+ masm.vcvttss2si(temp, output);
+ masm.vcvtsi2ss(output, ScratchFloat32Reg, ScratchFloat32Reg);
+
+ masm.vucomiss(ScratchFloat32Reg, temp);
+ masm.j(Assembler::Parity, &fail);
+ masm.j(Assembler::Equal, ool->rejoin());
+ }
+
+ masm.bind(&fail);
+ {
+ saveVolatile(output);
+
+ masm.push(input);
+ masm.setupUnalignedABICall(output);
+ masm.vcvtss2sd(input, input, input);
+ masm.passABIArg(input.asDouble(), MoveOp::DOUBLE);
+
+ if (gen->compilingWasm())
+ masm.callWithABI(wasm::SymbolicAddress::ToInt32);
+ else
+ masm.callWithABI(BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32));
+
+ masm.storeCallInt32Result(output);
+ masm.pop(input);
+
+ restoreVolatile(output);
+ }
+
+ masm.jump(ool->rejoin());
+}
+
+void
+CodeGeneratorX86::visitCompareI64(LCompareI64* lir)
+{
+ MCompare* mir = lir->mir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+ Register output = ToRegister(lir->output());
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+ Label done;
+
+ masm.move32(Imm32(1), output);
+
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.branch64(condition, lhsRegs, imm, &done);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.branch64(condition, lhsRegs, rhsRegs, &done);
+ }
+
+ masm.xorl(output, output);
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorX86::visitCompareI64AndBranch(LCompareI64AndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+
+ Label* trueLabel = getJumpLabelForBranch(lir->ifTrue());
+ Label* falseLabel = getJumpLabelForBranch(lir->ifFalse());
+
+ if (isNextBlock(lir->ifFalse()->lir())) {
+ falseLabel = nullptr;
+ } else if (isNextBlock(lir->ifTrue()->lir())) {
+ condition = Assembler::InvertCondition(condition);
+ trueLabel = falseLabel;
+ falseLabel = nullptr;
+ }
+
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.branch64(condition, lhsRegs, imm, trueLabel, falseLabel);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.branch64(condition, lhsRegs, rhsRegs, trueLabel, falseLabel);
+ }
+}
+
+void
+CodeGeneratorX86::visitDivOrModI64(LDivOrModI64* lir)
+{
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+ Register64 output = ToOutRegister64(lir);
+
+ MOZ_ASSERT(output == ReturnReg64);
+
+ // We are free to clobber all registers, since this is a call instruction.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(lhs.low);
+ regs.take(lhs.high);
+ if (lhs != rhs) {
+ regs.take(rhs.low);
+ regs.take(rhs.high);
+ }
+ Register temp = regs.takeAny();
+
+ Label done;
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero())
+ masm.branchTest64(Assembler::Zero, rhs, rhs, temp, trap(lir, wasm::Trap::IntegerDivideByZero));
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notmin;
+ masm.branch64(Assembler::NotEqual, lhs, Imm64(INT64_MIN), &notmin);
+ masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), &notmin);
+ if (lir->mir()->isMod())
+ masm.xor64(output, output);
+ else
+ masm.jump(trap(lir, wasm::Trap::IntegerOverflow));
+ masm.jump(&done);
+ masm.bind(&notmin);
+ }
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ MOZ_ASSERT(gen->compilingWasm());
+ if (lir->mir()->isMod())
+ masm.callWithABI(wasm::SymbolicAddress::ModI64);
+ else
+ masm.callWithABI(wasm::SymbolicAddress::DivI64);
+
+ // output in edx:eax, move to output register.
+ masm.movl(edx, output.high);
+ MOZ_ASSERT(eax == output.low);
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorX86::visitUDivOrModI64(LUDivOrModI64* lir)
+{
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+ Register64 output = ToOutRegister64(lir);
+
+ MOZ_ASSERT(output == ReturnReg64);
+
+ // We are free to clobber all registers, since this is a call instruction.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(lhs.low);
+ regs.take(lhs.high);
+ if (lhs != rhs) {
+ regs.take(rhs.low);
+ regs.take(rhs.high);
+ }
+ Register temp = regs.takeAny();
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero())
+ masm.branchTest64(Assembler::Zero, rhs, rhs, temp, trap(lir, wasm::Trap::IntegerDivideByZero));
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ MOZ_ASSERT(gen->compilingWasm());
+ if (lir->mir()->isMod())
+ masm.callWithABI(wasm::SymbolicAddress::UModI64);
+ else
+ masm.callWithABI(wasm::SymbolicAddress::UDivI64);
+
+ // output in edx:eax, move to output register.
+ masm.movl(edx, output.high);
+ MOZ_ASSERT(eax == output.low);
+}
+
+void
+CodeGeneratorX86::visitWasmSelectI64(LWasmSelectI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+
+ Register cond = ToRegister(lir->condExpr());
+ Register64 falseExpr = ToRegister64(lir->falseExpr());
+ Register64 out = ToOutRegister64(lir);
+
+ MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out, "true expr is reused for input");
+
+ Label done;
+ masm.branchTest32(Assembler::NonZero, cond, cond, &done);
+ masm.movl(falseExpr.low, out.low);
+ masm.movl(falseExpr.high, out.high);
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorX86::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+
+ masm.Push(input.high);
+ masm.Push(input.low);
+ masm.vmovq(Operand(esp, 0), ToFloatRegister(lir->output()));
+ masm.freeStack(sizeof(uint64_t));
+}
+
+void
+CodeGeneratorX86::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ Register64 output = ToOutRegister64(lir);
+
+ masm.reserveStack(sizeof(uint64_t));
+ masm.vmovq(ToFloatRegister(lir->input()), Operand(esp, 0));
+ masm.Pop(output.low);
+ masm.Pop(output.high);
+}
+
+void
+CodeGeneratorX86::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir)
+{
+ Register64 output = ToOutRegister64(lir);
+ Register input = ToRegister(lir->input());
+
+ if (lir->mir()->isUnsigned()) {
+ if (output.low != input)
+ masm.movl(input, output.low);
+ masm.xorl(output.high, output.high);
+ } else {
+ MOZ_ASSERT(output.low == input);
+ MOZ_ASSERT(output.low == eax);
+ MOZ_ASSERT(output.high == edx);
+ masm.cdq();
+ }
+}
+
+void
+CodeGeneratorX86::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir)
+{
+ const LInt64Allocation& input = lir->getInt64Operand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf())
+ masm.movl(ToRegister(input.low()), output);
+ else
+ masm.movl(ToRegister(input.high()), output);
+}
+
+void
+CodeGeneratorX86::visitClzI64(LClzI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+
+ masm.clz64(input, output.low);
+ masm.xorl(output.high, output.high);
+}
+
+void
+CodeGeneratorX86::visitCtzI64(LCtzI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+
+ masm.ctz64(input, output.low);
+ masm.xorl(output.high, output.high);
+}
+
+void
+CodeGeneratorX86::visitNotI64(LNotI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register output = ToRegister(lir->output());
+
+ if (input.high == output) {
+ masm.orl(input.low, output);
+ } else if (input.low == output) {
+ masm.orl(input.high, output);
+ } else {
+ masm.movl(input.high, output);
+ masm.orl(input.low, output);
+ }
+
+ masm.cmpl(Imm32(0), output);
+ masm.emitSet(Assembler::Equal, output);
+}
+
+void
+CodeGeneratorX86::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ MWasmTruncateToInt64* mir = lir->mir();
+ FloatRegister floatTemp = ToFloatRegister(lir->temp());
+
+ Label fail, convert;
+
+ MOZ_ASSERT (mir->input()->type() == MIRType::Double || mir->input()->type() == MIRType::Float32);
+
+ auto* ool = new(alloc()) OutOfLineWasmTruncateCheck(mir, input);
+ addOutOfLineCode(ool, mir);
+
+ if (mir->input()->type() == MIRType::Float32) {
+ if (mir->isUnsigned())
+ masm.wasmTruncateFloat32ToUInt64(input, output, ool->entry(), ool->rejoin(), floatTemp);
+ else
+ masm.wasmTruncateFloat32ToInt64(input, output, ool->entry(), ool->rejoin(), floatTemp);
+ } else {
+ if (mir->isUnsigned())
+ masm.wasmTruncateDoubleToUInt64(input, output, ool->entry(), ool->rejoin(), floatTemp);
+ else
+ masm.wasmTruncateDoubleToInt64(input, output, ool->entry(), ool->rejoin(), floatTemp);
+ }
+}
+
+void
+CodeGeneratorX86::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+ Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ MIRType outputType = lir->mir()->type();
+ MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
+
+ if (outputType == MIRType::Double) {
+ if (lir->mir()->isUnsigned())
+ masm.convertUInt64ToDouble(input, output, temp);
+ else
+ masm.convertInt64ToDouble(input, output);
+ } else {
+ if (lir->mir()->isUnsigned())
+ masm.convertUInt64ToFloat32(input, output, temp);
+ else
+ masm.convertInt64ToFloat32(input, output);
+ }
+}
+
+void
+CodeGeneratorX86::visitTestI64AndBranch(LTestI64AndBranch* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+
+ masm.testl(input.high, input.high);
+ jumpToBlock(lir->ifTrue(), Assembler::NonZero);
+ masm.testl(input.low, input.low);
+ emitBranch(Assembler::NonZero, lir->ifTrue(), lir->ifFalse());
+}
diff --git a/js/src/jit/x86/CodeGenerator-x86.h b/js/src/jit/x86/CodeGenerator-x86.h
new file mode 100644
index 000000000..1cc8e183a
--- /dev/null
+++ b/js/src/jit/x86/CodeGenerator-x86.h
@@ -0,0 +1,98 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_CodeGenerator_x86_h
+#define jit_x86_CodeGenerator_x86_h
+
+#include "jit/x86-shared/CodeGenerator-x86-shared.h"
+#include "jit/x86/Assembler-x86.h"
+
+namespace js {
+namespace jit {
+
+class OutOfLineTruncate;
+class OutOfLineTruncateFloat32;
+
+class CodeGeneratorX86 : public CodeGeneratorX86Shared
+{
+ private:
+ CodeGeneratorX86* thisFromCtor() {
+ return this;
+ }
+
+ protected:
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToOutValue(LInstruction* ins);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ template <typename T> void emitWasmLoad(T* ins);
+ template <typename T> void emitWasmStore(T* ins);
+
+ public:
+ CodeGeneratorX86(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
+
+ public:
+ void visitBox(LBox* box);
+ void visitBoxFloatingPoint(LBoxFloatingPoint* box);
+ void visitUnbox(LUnbox* unbox);
+ void visitValue(LValue* value);
+ void visitCompareB(LCompareB* lir);
+ void visitCompareBAndBranch(LCompareBAndBranch* lir);
+ void visitCompareBitwise(LCompareBitwise* lir);
+ void visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir);
+ void visitWasmUint32ToDouble(LWasmUint32ToDouble* lir);
+ void visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir);
+ void visitTruncateDToInt32(LTruncateDToInt32* ins);
+ void visitTruncateFToInt32(LTruncateFToInt32* ins);
+ void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins);
+ void visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins);
+ void emitWasmCall(LWasmCallBase* ins);
+ void visitWasmCall(LWasmCall* ins);
+ void visitWasmCallI64(LWasmCallI64* ins);
+ void visitWasmLoad(LWasmLoad* ins);
+ void visitWasmLoadI64(LWasmLoadI64* ins);
+ void visitWasmStore(LWasmStore* ins);
+ void visitWasmStoreI64(LWasmStoreI64* ins);
+ void visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins);
+ void visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins);
+ void visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins);
+ void visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins);
+ void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
+ void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
+ void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins);
+ void visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins);
+ void visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins);
+ void visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins);
+
+ void visitOutOfLineTruncate(OutOfLineTruncate* ool);
+ void visitOutOfLineTruncateFloat32(OutOfLineTruncateFloat32* ool);
+
+ void visitCompareI64(LCompareI64* lir);
+ void visitCompareI64AndBranch(LCompareI64AndBranch* lir);
+ void visitDivOrModI64(LDivOrModI64* lir);
+ void visitUDivOrModI64(LUDivOrModI64* lir);
+ void visitWasmSelectI64(LWasmSelectI64* lir);
+ void visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir);
+ void visitWasmReinterpretToI64(LWasmReinterpretToI64* lir);
+ void visitExtendInt32ToInt64(LExtendInt32ToInt64* lir);
+ void visitWrapInt64ToInt32(LWrapInt64ToInt32* lir);
+ void visitClzI64(LClzI64* lir);
+ void visitCtzI64(LCtzI64* lir);
+ void visitNotI64(LNotI64* lir);
+ void visitWasmTruncateToInt64(LWasmTruncateToInt64* lir);
+ void visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir);
+ void visitTestI64AndBranch(LTestI64AndBranch* lir);
+
+ private:
+ void asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg);
+};
+
+typedef CodeGeneratorX86 CodeGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_CodeGenerator_x86_h */
diff --git a/js/src/jit/x86/LIR-x86.h b/js/src/jit/x86/LIR-x86.h
new file mode 100644
index 000000000..f49ec7b87
--- /dev/null
+++ b/js/src/jit/x86/LIR-x86.h
@@ -0,0 +1,207 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_LIR_x86_h
+#define jit_x86_LIR_x86_h
+
+namespace js {
+namespace jit {
+
+class LBoxFloatingPoint : public LInstructionHelper<2, 1, 1>
+{
+ MIRType type_;
+
+ public:
+ LIR_HEADER(BoxFloatingPoint);
+
+ LBoxFloatingPoint(const LAllocation& in, const LDefinition& temp, MIRType type)
+ : type_(type)
+ {
+ MOZ_ASSERT(IsFloatingPointType(type));
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ MIRType type() const {
+ return type_;
+ }
+ const char* extraName() const {
+ return StringFromMIRType(type_);
+ }
+};
+
+class LUnbox : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(Unbox);
+
+ MUnbox* mir() const {
+ return mir_->toUnbox();
+ }
+ const LAllocation* payload() {
+ return getOperand(0);
+ }
+ const LAllocation* type() {
+ return getOperand(1);
+ }
+ const char* extraName() const {
+ return StringFromMIRType(mir()->type());
+ }
+};
+
+class LUnboxFloatingPoint : public LInstructionHelper<1, 2, 0>
+{
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ static const size_t Input = 0;
+
+ LUnboxFloatingPoint(const LBoxAllocation& input, MIRType type)
+ : type_(type)
+ {
+ setBoxOperand(Input, input);
+ }
+
+ MUnbox* mir() const {
+ return mir_->toUnbox();
+ }
+
+ MIRType type() const {
+ return type_;
+ }
+ const char* extraName() const {
+ return StringFromMIRType(type_);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a double.
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(WasmUint32ToDouble)
+
+ LWasmUint32ToDouble(const LAllocation& input, const LDefinition& temp) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LWasmUint32ToFloat32: public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(WasmUint32ToFloat32)
+
+ LWasmUint32ToFloat32(const LAllocation& input, const LDefinition& temp) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+class LDivOrModI64 : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES*2, 0>
+{
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs)
+ {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeDivideByZero();
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeNegativeDividend();
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+class LUDivOrModI64 : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES*2, 0>
+{
+ public:
+ LIR_HEADER(UDivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LUDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs)
+ {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeDivideByZero();
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeNegativeDividend();
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+class LWasmTruncateToInt64 : public LInstructionHelper<INT64_PIECES, 1, 1>
+{
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ LWasmTruncateToInt64(const LAllocation& in, const LDefinition& temp)
+ {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ MWasmTruncateToInt64* mir() const {
+ return mir_->toWasmTruncateToInt64();
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_LIR_x86_h */
diff --git a/js/src/jit/x86/LOpcodes-x86.h b/js/src/jit/x86/LOpcodes-x86.h
new file mode 100644
index 000000000..70c2ff384
--- /dev/null
+++ b/js/src/jit/x86/LOpcodes-x86.h
@@ -0,0 +1,24 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_LOpcodes_x86_h
+#define jit_x86_LOpcodes_x86_h
+
+#include "jit/shared/LOpcodes-shared.h"
+
+#define LIR_CPU_OPCODE_LIST(_) \
+ _(BoxFloatingPoint) \
+ _(DivOrModConstantI) \
+ _(SimdValueInt32x4) \
+ _(SimdValueFloat32x4) \
+ _(UDivOrMod) \
+ _(UDivOrModConstant) \
+ _(UDivOrModI64) \
+ _(DivOrModI64) \
+ _(WasmTruncateToInt64) \
+ _(Int64ToFloatingPoint)
+
+#endif /* jit_x86_LOpcodes_x86_h */
diff --git a/js/src/jit/x86/Lowering-x86.cpp b/js/src/jit/x86/Lowering-x86.cpp
new file mode 100644
index 000000000..5dbaefe5b
--- /dev/null
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -0,0 +1,658 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86/Lowering-x86.h"
+
+#include "jit/MIR.h"
+#include "jit/x86/Assembler-x86.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+LBoxAllocation
+LIRGeneratorX86::useBoxFixed(MDefinition* mir, Register reg1, Register reg2, bool useAtStart)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+ MOZ_ASSERT(reg1 != reg2);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart),
+ LUse(reg2, VirtualRegisterOfPayload(mir), useAtStart));
+}
+
+LAllocation
+LIRGeneratorX86::useByteOpRegister(MDefinition* mir)
+{
+ return useFixed(mir, eax);
+}
+
+LAllocation
+LIRGeneratorX86::useByteOpRegisterAtStart(MDefinition* mir)
+{
+ return useFixedAtStart(mir, eax);
+}
+
+LAllocation
+LIRGeneratorX86::useByteOpRegisterOrNonDoubleConstant(MDefinition* mir)
+{
+ return useFixed(mir, eax);
+}
+
+LDefinition
+LIRGeneratorX86::tempByteOpRegister()
+{
+ return tempFixed(eax);
+}
+
+void
+LIRGeneratorX86::visitBox(MBox* box)
+{
+ MDefinition* inner = box->getOperand(0);
+
+ // If the box wrapped a double, it needs a new register.
+ if (IsFloatingPointType(inner->type())) {
+ defineBox(new(alloc()) LBoxFloatingPoint(useRegisterAtStart(inner), tempCopy(inner, 0),
+ inner->type()), box);
+ return;
+ }
+
+ if (box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (inner->isConstant()) {
+ defineBox(new(alloc()) LValue(inner->toConstant()->toJSValue()), box);
+ return;
+ }
+
+ LBox* lir = new(alloc()) LBox(use(inner), inner->type());
+
+ // Otherwise, we should not define a new register for the payload portion
+ // of the output, so bypass defineBox().
+ uint32_t vreg = getVirtualRegister();
+
+ // Note that because we're using BogusTemp(), we do not change the type of
+ // the definition. We also do not define the first output as "TYPE",
+ // because it has no corresponding payload at (vreg + 1). Also note that
+ // although we copy the input's original type for the payload half of the
+ // definition, this is only for clarity. BogusTemp() definitions are
+ // ignored.
+ lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL));
+ lir->setDef(1, LDefinition::BogusTemp());
+ box->setVirtualRegister(vreg);
+ add(lir);
+}
+
+void
+LIRGeneratorX86::visitUnbox(MUnbox* unbox)
+{
+ MDefinition* inner = unbox->getOperand(0);
+
+ if (inner->type() == MIRType::ObjectOrNull) {
+ LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(inner));
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+ defineReuseInput(lir, unbox, 0);
+ return;
+ }
+
+ // An unbox on x86 reads in a type tag (either in memory or a register) and
+ // a payload. Unlike most instructions consuming a box, we ask for the type
+ // second, so that the result can re-use the first input.
+ MOZ_ASSERT(inner->type() == MIRType::Value);
+
+ ensureDefined(inner);
+
+ if (IsFloatingPointType(unbox->type())) {
+ LUnboxFloatingPoint* lir = new(alloc()) LUnboxFloatingPoint(useBox(inner), unbox->type());
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+ define(lir, unbox);
+ return;
+ }
+
+ // Swap the order we use the box pieces so we can re-use the payload register.
+ LUnbox* lir = new(alloc()) LUnbox;
+ lir->setOperand(0, usePayloadInRegisterAtStart(inner));
+ lir->setOperand(1, useType(inner, LUse::ANY));
+
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+
+ // Types and payloads form two separate intervals. If the type becomes dead
+ // before the payload, it could be used as a Value without the type being
+ // recoverable. Unbox's purpose is to eagerly kill the definition of a type
+ // tag, so keeping both alive (for the purpose of gcmaps) is unappealing.
+ // Instead, we create a new virtual register.
+ defineReuseInput(lir, unbox, 0);
+}
+
+void
+LIRGeneratorX86::visitReturn(MReturn* ret)
+{
+ MDefinition* opd = ret->getOperand(0);
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new(alloc()) LReturn;
+ ins->setOperand(0, LUse(JSReturnReg_Type));
+ ins->setOperand(1, LUse(JSReturnReg_Data));
+ fillBoxUses(ins, 0, opd);
+ add(ins);
+}
+
+void
+LIRGeneratorX86::defineUntypedPhi(MPhi* phi, size_t lirIndex)
+{
+ LPhi* type = current->getPhi(lirIndex + VREG_TYPE_OFFSET);
+ LPhi* payload = current->getPhi(lirIndex + VREG_DATA_OFFSET);
+
+ uint32_t typeVreg = getVirtualRegister();
+
+ phi->setVirtualRegister(typeVreg);
+
+ uint32_t payloadVreg = getVirtualRegister();
+ MOZ_ASSERT(typeVreg + 1 == payloadVreg);
+
+ type->setDef(0, LDefinition(typeVreg, LDefinition::TYPE));
+ payload->setDef(0, LDefinition(payloadVreg, LDefinition::PAYLOAD));
+ annotate(type);
+ annotate(payload);
+}
+
+void
+LIRGeneratorX86::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex)
+{
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* type = block->getPhi(lirIndex + VREG_TYPE_OFFSET);
+ LPhi* payload = block->getPhi(lirIndex + VREG_DATA_OFFSET);
+ type->setOperand(inputPosition, LUse(operand->virtualRegister() + VREG_TYPE_OFFSET, LUse::ANY));
+ payload->setOperand(inputPosition, LUse(VirtualRegisterOfPayload(operand), LUse::ANY));
+}
+
+void
+LIRGeneratorX86::defineInt64Phi(MPhi* phi, size_t lirIndex)
+{
+ LPhi* low = current->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = current->getPhi(lirIndex + INT64HIGH_INDEX);
+
+ uint32_t lowVreg = getVirtualRegister();
+
+ phi->setVirtualRegister(lowVreg);
+
+ uint32_t highVreg = getVirtualRegister();
+ MOZ_ASSERT(lowVreg + INT64HIGH_INDEX == highVreg + INT64LOW_INDEX);
+
+ low->setDef(0, LDefinition(lowVreg, LDefinition::INT32));
+ high->setDef(0, LDefinition(highVreg, LDefinition::INT32));
+ annotate(high);
+ annotate(low);
+}
+
+void
+LIRGeneratorX86::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex)
+{
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* low = block->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = block->getPhi(lirIndex + INT64HIGH_INDEX);
+ low->setOperand(inputPosition, LUse(operand->virtualRegister() + INT64LOW_INDEX, LUse::ANY));
+ high->setOperand(inputPosition, LUse(operand->virtualRegister() + INT64HIGH_INDEX, LUse::ANY));
+}
+
+void
+LIRGeneratorX86::lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void
+LIRGeneratorX86::lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ bool needsTemp = true;
+
+ if (rhs->isConstant()) {
+ int64_t constant = rhs->toConstant()->toInt64();
+ int32_t shift = mozilla::FloorLog2(constant);
+ // See special cases in CodeGeneratorX86Shared::visitMulI64.
+ if (constant >= -1 && constant <= 2)
+ needsTemp = false;
+ if (int64_t(1) << shift == constant)
+ needsTemp = false;
+ }
+
+ // MulI64 on x86 needs output to be in edx, eax;
+ ins->setInt64Operand(0, useInt64Fixed(lhs, Register64(edx, eax), /*useAtStart = */ true));
+ ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
+ if (needsTemp)
+ ins->setTemp(0, temp());
+
+ defineInt64Fixed(ins, mir, LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+}
+
+void
+LIRGeneratorX86::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins)
+{
+ lowerCompareExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ true);
+}
+
+void
+LIRGeneratorX86::visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins)
+{
+ lowerAtomicExchangeTypedArrayElement(ins, /*useI386ByteRegisters=*/ true);
+}
+
+void
+LIRGeneratorX86::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins)
+{
+ lowerAtomicTypedArrayElementBinop(ins, /* useI386ByteRegisters = */ true);
+}
+
+void
+LIRGeneratorX86::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToDouble* lir = new(alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()), temp());
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX86::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToFloat32* lir = new(alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()), temp());
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX86::visitWasmLoad(MWasmLoad* ins)
+{
+ if (ins->type() != MIRType::Int64) {
+ lowerWasmLoad(ins);
+ return;
+ }
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ auto* lir = new(alloc()) LWasmLoadI64(useRegisterOrZeroAtStart(base));
+
+ Scalar::Type accessType = ins->access().type();
+ if (accessType == Scalar::Int8 || accessType == Scalar::Int16 || accessType == Scalar::Int32) {
+ // We use cdq to sign-extend the result and cdq demands these registers.
+ defineInt64Fixed(lir, ins, LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+ return;
+ }
+
+ defineInt64(lir, ins);
+}
+
+void
+LIRGeneratorX86::visitWasmStore(MWasmStore* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ LAllocation baseAlloc = useRegisterOrZeroAtStart(base);
+
+ LAllocation valueAlloc;
+ switch (ins->access().type()) {
+ case Scalar::Int8: case Scalar::Uint8:
+ // See comment for LIRGeneratorX86::useByteOpRegister.
+ valueAlloc = useFixed(ins->value(), eax);
+ break;
+ case Scalar::Int16: case Scalar::Uint16:
+ case Scalar::Int32: case Scalar::Uint32:
+ case Scalar::Float32: case Scalar::Float64:
+ case Scalar::Float32x4:
+ case Scalar::Int8x16:
+ case Scalar::Int16x8:
+ case Scalar::Int32x4:
+ // For now, don't allow constant values. The immediate operand affects
+ // instruction layout which affects patching.
+ valueAlloc = useRegisterAtStart(ins->value());
+ break;
+ case Scalar::Int64: {
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(ins->value());
+ auto* lir = new(alloc()) LWasmStoreI64(baseAlloc, valueAlloc);
+ add(lir, ins);
+ return;
+ }
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ auto* lir = new(alloc()) LWasmStore(baseAlloc, valueAlloc);
+ add(lir, ins);
+}
+
+void
+LIRGeneratorX86::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ // For simplicity, require a register if we're going to emit a bounds-check
+ // branch, so that we don't have special cases for constants.
+ LAllocation baseAlloc = ins->needsBoundsCheck()
+ ? useRegisterAtStart(base)
+ : useRegisterOrZeroAtStart(base);
+
+ define(new(alloc()) LAsmJSLoadHeap(baseAlloc), ins);
+}
+
+void
+LIRGeneratorX86::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ // For simplicity, require a register if we're going to emit a bounds-check
+ // branch, so that we don't have special cases for constants.
+ LAllocation baseAlloc = ins->needsBoundsCheck()
+ ? useRegisterAtStart(base)
+ : useRegisterOrZeroAtStart(base);
+
+ LAsmJSStoreHeap* lir = nullptr;
+ switch (ins->access().type()) {
+ case Scalar::Int8: case Scalar::Uint8:
+ // See comment for LIRGeneratorX86::useByteOpRegister.
+ lir = new(alloc()) LAsmJSStoreHeap(baseAlloc, useFixed(ins->value(), eax));
+ break;
+ case Scalar::Int16: case Scalar::Uint16:
+ case Scalar::Int32: case Scalar::Uint32:
+ case Scalar::Float32: case Scalar::Float64:
+ case Scalar::Float32x4:
+ case Scalar::Int8x16:
+ case Scalar::Int16x8:
+ case Scalar::Int32x4:
+ // For now, don't allow constant values. The immediate operand affects
+ // instruction layout which affects patching.
+ lir = new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()));
+ break;
+ case Scalar::Int64:
+ MOZ_CRASH("NYI");
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+ add(lir, ins);
+}
+
+void
+LIRGeneratorX86::visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins)
+{
+ // The code generated for StoreTypedArrayElementStatic is identical to that
+ // for AsmJSStoreHeap, and the same concerns apply.
+ LStoreTypedArrayElementStatic* lir;
+ switch (ins->accessType()) {
+ case Scalar::Int8: case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ lir = new(alloc()) LStoreTypedArrayElementStatic(useRegister(ins->ptr()),
+ useFixed(ins->value(), eax));
+ break;
+ case Scalar::Int16: case Scalar::Uint16:
+ case Scalar::Int32: case Scalar::Uint32:
+ case Scalar::Float32: case Scalar::Float64:
+ lir = new(alloc()) LStoreTypedArrayElementStatic(useRegisterAtStart(ins->ptr()),
+ useRegisterAtStart(ins->value()));
+ break;
+ default: MOZ_CRASH("unexpected array type");
+ }
+
+ add(lir, ins);
+}
+
+void
+LIRGeneratorX86::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
+{
+ MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ bool byteArray = byteSize(ins->access().type()) == 1;
+
+ // Register allocation:
+ //
+ // The output may not be used, but eax will be clobbered regardless
+ // so pin the output to eax.
+ //
+ // oldval must be in a register.
+ //
+ // newval must be in a register. If the source is a byte array
+ // then newval must be a register that has a byte size: this must
+ // be ebx, ecx, or edx (eax is taken).
+ //
+ // Bug #1077036 describes some optimization opportunities.
+
+ const LAllocation oldval = useRegister(ins->oldValue());
+ const LAllocation newval = byteArray ? useFixed(ins->newValue(), ebx) : useRegister(ins->newValue());
+
+ LAsmJSCompareExchangeHeap* lir =
+ new(alloc()) LAsmJSCompareExchangeHeap(useRegister(base), oldval, newval);
+
+ lir->setAddrTemp(temp());
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+}
+
+void
+LIRGeneratorX86::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
+{
+ MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
+
+ const LAllocation base = useRegister(ins->base());
+ const LAllocation value = useRegister(ins->value());
+
+ LAsmJSAtomicExchangeHeap* lir =
+ new(alloc()) LAsmJSAtomicExchangeHeap(base, value);
+
+ lir->setAddrTemp(temp());
+ if (byteSize(ins->access().type()) == 1)
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+ else
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX86::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
+{
+ MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ bool byteArray = byteSize(ins->access().type()) == 1;
+
+ // Case 1: the result of the operation is not used.
+ //
+ // We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
+ // LOCK OR, or LOCK XOR. These can all take an immediate.
+
+ if (!ins->hasUses()) {
+ LAllocation value;
+ if (byteArray && !ins->value()->isConstant())
+ value = useFixed(ins->value(), ebx);
+ else
+ value = useRegisterOrConstant(ins->value());
+ LAsmJSAtomicBinopHeapForEffect* lir =
+ new(alloc()) LAsmJSAtomicBinopHeapForEffect(useRegister(base), value);
+ lir->setAddrTemp(temp());
+ add(lir, ins);
+ return;
+ }
+
+ // Case 2: the result of the operation is used.
+ //
+ // For ADD and SUB we'll use XADD:
+ //
+ // movl value, output
+ // lock xaddl output, mem
+ //
+ // For the 8-bit variants XADD needs a byte register for the
+ // output only, we can still set up with movl; just pin the output
+ // to eax (or ebx / ecx / edx).
+ //
+ // For AND/OR/XOR we need to use a CMPXCHG loop:
+ //
+ // movl *mem, eax
+ // L: mov eax, temp
+ // andl value, temp
+ // lock cmpxchg temp, mem ; reads eax also
+ // jnz L
+ // ; result in eax
+ //
+ // Note the placement of L, cmpxchg will update eax with *mem if
+ // *mem does not have the expected value, so reloading it at the
+ // top of the loop would be redundant.
+ //
+ // We want to fix eax as the output. We also need a temp for
+ // the intermediate value.
+ //
+ // For the 8-bit variants the temp must have a byte register.
+ //
+ // There are optimization opportunities:
+ // - better 8-bit register allocation and instruction selection, Bug #1077036.
+
+ bool bitOp = !(ins->operation() == AtomicFetchAddOp || ins->operation() == AtomicFetchSubOp);
+ LDefinition tempDef = LDefinition::BogusTemp();
+ LAllocation value;
+
+ if (byteArray) {
+ value = useFixed(ins->value(), ebx);
+ if (bitOp)
+ tempDef = tempFixed(ecx);
+ } else if (bitOp || ins->value()->isConstant()) {
+ value = useRegisterOrConstant(ins->value());
+ if (bitOp)
+ tempDef = temp();
+ } else {
+ value = useRegisterAtStart(ins->value());
+ }
+
+ LAsmJSAtomicBinopHeap* lir =
+ new(alloc()) LAsmJSAtomicBinopHeap(useRegister(base), value, tempDef);
+
+ lir->setAddrTemp(temp());
+ if (byteArray || bitOp)
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+ else if (ins->value()->isConstant())
+ define(lir, ins);
+ else
+ defineReuseInput(lir, ins, LAsmJSAtomicBinopHeap::valueOp);
+}
+
+void
+LIRGeneratorX86::lowerDivI64(MDiv* div)
+{
+ if (div->isUnsigned()) {
+ lowerUDivI64(div);
+ return;
+ }
+
+ LDivOrModI64* lir = new(alloc()) LDivOrModI64(useInt64RegisterAtStart(div->lhs()),
+ useInt64RegisterAtStart(div->rhs()));
+ defineReturn(lir, div);
+}
+
+void
+LIRGeneratorX86::lowerModI64(MMod* mod)
+{
+ if (mod->isUnsigned()) {
+ lowerUModI64(mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new(alloc()) LDivOrModI64(useInt64RegisterAtStart(mod->lhs()),
+ useInt64RegisterAtStart(mod->rhs()));
+ defineReturn(lir, mod);
+}
+
+void
+LIRGeneratorX86::lowerUDivI64(MDiv* div)
+{
+ LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64RegisterAtStart(div->lhs()),
+ useInt64RegisterAtStart(div->rhs()));
+ defineReturn(lir, div);
+}
+
+void
+LIRGeneratorX86::lowerUModI64(MMod* mod)
+{
+ LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64RegisterAtStart(mod->lhs()),
+ useInt64RegisterAtStart(mod->rhs()));
+ defineReturn(lir, mod);
+}
+
+void
+LIRGeneratorX86::visitSubstr(MSubstr* ins)
+{
+ // Due to lack of registers on x86, we reuse the string register as
+ // temporary. As a result we only need two temporary registers and take a
+ // bugos temporary as fifth argument.
+ LSubstr* lir = new (alloc()) LSubstr(useRegister(ins->string()),
+ useRegister(ins->begin()),
+ useRegister(ins->length()),
+ temp(),
+ LDefinition::BogusTemp(),
+ tempByteOpRegister());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGeneratorX86::visitRandom(MRandom* ins)
+{
+ LRandom *lir = new(alloc()) LRandom(temp(),
+ temp(),
+ temp(),
+ temp(),
+ temp());
+ defineFixed(lir, ins, LFloatReg(ReturnDoubleReg));
+}
+
+void
+LIRGeneratorX86::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ LDefinition temp = tempDouble();
+ defineInt64(new(alloc()) LWasmTruncateToInt64(useRegister(opd), temp), ins);
+}
+
+void
+LIRGeneratorX86::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Int64);
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+
+ LDefinition maybeTemp =
+ (ins->isUnsigned() && AssemblerX86Shared::HasSSE3()) ? temp() : LDefinition::BogusTemp();
+
+ define(new(alloc()) LInt64ToFloatingPoint(useInt64Register(opd), maybeTemp), ins);
+}
+
+void
+LIRGeneratorX86::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins)
+{
+ if (ins->isUnsigned()) {
+ defineInt64(new(alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input())), ins);
+ } else {
+ LExtendInt32ToInt64* lir =
+ new(alloc()) LExtendInt32ToInt64(useFixedAtStart(ins->input(), eax));
+ defineInt64Fixed(lir, ins, LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+ }
+}
diff --git a/js/src/jit/x86/Lowering-x86.h b/js/src/jit/x86/Lowering-x86.h
new file mode 100644
index 000000000..af823bad2
--- /dev/null
+++ b/js/src/jit/x86/Lowering-x86.h
@@ -0,0 +1,96 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_Lowering_x86_h
+#define jit_x86_Lowering_x86_h
+
+#include "jit/x86-shared/Lowering-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorX86 : public LIRGeneratorX86Shared
+{
+ public:
+ LIRGeneratorX86(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorX86Shared(gen, graph, lirGraph)
+ { }
+
+ protected:
+ // Returns a box allocation with type set to reg1 and payload set to reg2.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2,
+ bool useAtStart = false);
+
+ // It's a trap! On x86, the 1-byte store can only use one of
+ // {al,bl,cl,dl,ah,bh,ch,dh}. That means if the register allocator
+ // gives us one of {edi,esi,ebp,esp}, we're out of luck. (The formatter
+ // will assert on us.) Ideally, we'd just ask the register allocator to
+ // give us one of {al,bl,cl,dl}. For now, just useFixed(al).
+ LAllocation useByteOpRegister(MDefinition* mir);
+ LAllocation useByteOpRegisterAtStart(MDefinition* mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
+ LDefinition tempByteOpRegister();
+
+ inline LDefinition tempToUnbox() {
+ return LDefinition::BogusTemp();
+ }
+
+ bool needTempForPostBarrier() { return true; }
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
+ void defineUntypedPhi(MPhi* phi, size_t lirIndex);
+
+ void lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
+ void defineInt64Phi(MPhi* phi, size_t lirIndex);
+
+ void lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs);
+
+ void lowerDivI64(MDiv* div);
+ void lowerModI64(MMod* mod);
+ void lowerUDivI64(MDiv* div);
+ void lowerUModI64(MMod* mod);
+
+ public:
+ void visitBox(MBox* box);
+ void visitUnbox(MUnbox* unbox);
+ void visitReturn(MReturn* ret);
+ void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins);
+ void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins);
+ void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins);
+ void visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins);
+ void visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins);
+ void visitAsmJSLoadHeap(MAsmJSLoadHeap* ins);
+ void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins);
+ void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins);
+ void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins);
+ void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins);
+ void visitWasmLoad(MWasmLoad* ins);
+ void visitWasmStore(MWasmStore* ins);
+ void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins);
+ void visitSubstr(MSubstr* ins);
+ void visitRandom(MRandom* ins);
+ void visitWasmTruncateToInt64(MWasmTruncateToInt64* ins);
+ void visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins);
+ void visitExtendInt32ToInt64(MExtendInt32ToInt64* ins);
+ void lowerPhi(MPhi* phi);
+
+ static bool allowTypedElementHoleCheck() {
+ return true;
+ }
+
+ static bool allowStaticTypedArrayAccesses() {
+ return true;
+ }
+};
+
+typedef LIRGeneratorX86 LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_Lowering_x86_h */
diff --git a/js/src/jit/x86/MacroAssembler-x86-inl.h b/js/src/jit/x86/MacroAssembler-x86-inl.h
new file mode 100644
index 000000000..11520c78f
--- /dev/null
+++ b/js/src/jit/x86/MacroAssembler-x86-inl.h
@@ -0,0 +1,1075 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_MacroAssembler_x86_inl_h
+#define jit_x86_MacroAssembler_x86_inl_h
+
+#include "jit/x86/MacroAssembler-x86.h"
+
+#include "jit/x86-shared/MacroAssembler-x86-shared-inl.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void
+MacroAssembler::move64(Imm64 imm, Register64 dest)
+{
+ movl(Imm32(imm.value & 0xFFFFFFFFL), dest.low);
+ movl(Imm32((imm.value >> 32) & 0xFFFFFFFFL), dest.high);
+}
+
+void
+MacroAssembler::move64(Register64 src, Register64 dest)
+{
+ movl(src.low, dest.low);
+ movl(src.high, dest.high);
+}
+
+// ===============================================================
+// Logical functions
+
+void
+MacroAssembler::andPtr(Register src, Register dest)
+{
+ andl(src, dest);
+}
+
+void
+MacroAssembler::andPtr(Imm32 imm, Register dest)
+{
+ andl(imm, dest);
+}
+
+void
+MacroAssembler::and64(Imm64 imm, Register64 dest)
+{
+ if (imm.low().value != int32_t(0xFFFFFFFF))
+ andl(imm.low(), dest.low);
+ if (imm.hi().value != int32_t(0xFFFFFFFF))
+ andl(imm.hi(), dest.high);
+}
+
+void
+MacroAssembler::or64(Imm64 imm, Register64 dest)
+{
+ if (imm.low().value != 0)
+ orl(imm.low(), dest.low);
+ if (imm.hi().value != 0)
+ orl(imm.hi(), dest.high);
+}
+
+void
+MacroAssembler::xor64(Imm64 imm, Register64 dest)
+{
+ if (imm.low().value != 0)
+ xorl(imm.low(), dest.low);
+ if (imm.hi().value != 0)
+ xorl(imm.hi(), dest.high);
+}
+
+void
+MacroAssembler::orPtr(Register src, Register dest)
+{
+ orl(src, dest);
+}
+
+void
+MacroAssembler::orPtr(Imm32 imm, Register dest)
+{
+ orl(imm, dest);
+}
+
+void
+MacroAssembler::and64(Register64 src, Register64 dest)
+{
+ andl(src.low, dest.low);
+ andl(src.high, dest.high);
+}
+
+void
+MacroAssembler::or64(Register64 src, Register64 dest)
+{
+ orl(src.low, dest.low);
+ orl(src.high, dest.high);
+}
+
+void
+MacroAssembler::xor64(Register64 src, Register64 dest)
+{
+ xorl(src.low, dest.low);
+ xorl(src.high, dest.high);
+}
+
+void
+MacroAssembler::xorPtr(Register src, Register dest)
+{
+ xorl(src, dest);
+}
+
+void
+MacroAssembler::xorPtr(Imm32 imm, Register dest)
+{
+ xorl(imm, dest);
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void
+MacroAssembler::addPtr(Register src, Register dest)
+{
+ addl(src, dest);
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, Register dest)
+{
+ addl(imm, dest);
+}
+
+void
+MacroAssembler::addPtr(ImmWord imm, Register dest)
+{
+ addl(Imm32(imm.value), dest);
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, const Address& dest)
+{
+ addl(imm, Operand(dest));
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, const AbsoluteAddress& dest)
+{
+ addl(imm, Operand(dest));
+}
+
+void
+MacroAssembler::addPtr(const Address& src, Register dest)
+{
+ addl(Operand(src), dest);
+}
+
+void
+MacroAssembler::add64(Register64 src, Register64 dest)
+{
+ addl(src.low, dest.low);
+ adcl(src.high, dest.high);
+}
+
+void
+MacroAssembler::add64(Imm32 imm, Register64 dest)
+{
+ addl(imm, dest.low);
+ adcl(Imm32(0), dest.high);
+}
+
+void
+MacroAssembler::add64(Imm64 imm, Register64 dest)
+{
+ if (imm.low().value == 0) {
+ addl(imm.hi(), dest.high);
+ return;
+ }
+ addl(imm.low(), dest.low);
+ adcl(imm.hi(), dest.high);
+}
+
+void
+MacroAssembler::addConstantDouble(double d, FloatRegister dest)
+{
+ Double* dbl = getDouble(wasm::RawF64(d));
+ if (!dbl)
+ return;
+ masm.vaddsd_mr(nullptr, dest.encoding(), dest.encoding());
+ propagateOOM(dbl->uses.append(CodeOffset(masm.size())));
+}
+
+void
+MacroAssembler::subPtr(Register src, Register dest)
+{
+ subl(src, dest);
+}
+
+void
+MacroAssembler::subPtr(Register src, const Address& dest)
+{
+ subl(src, Operand(dest));
+}
+
+void
+MacroAssembler::subPtr(Imm32 imm, Register dest)
+{
+ subl(imm, dest);
+}
+
+void
+MacroAssembler::subPtr(const Address& addr, Register dest)
+{
+ subl(Operand(addr), dest);
+}
+
+void
+MacroAssembler::sub64(Register64 src, Register64 dest)
+{
+ subl(src.low, dest.low);
+ sbbl(src.high, dest.high);
+}
+
+void
+MacroAssembler::sub64(Imm64 imm, Register64 dest)
+{
+ if (imm.low().value == 0) {
+ subl(imm.hi(), dest.high);
+ return;
+ }
+ subl(imm.low(), dest.low);
+ sbbl(imm.hi(), dest.high);
+}
+
+// Note: this function clobbers eax and edx.
+void
+MacroAssembler::mul64(Imm64 imm, const Register64& dest)
+{
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+
+ MOZ_ASSERT(dest.low != eax && dest.low != edx);
+ MOZ_ASSERT(dest.high != eax && dest.high != edx);
+
+ // HIGH(dest) = LOW(HIGH(dest) * LOW(imm));
+ movl(Imm32(imm.value & 0xFFFFFFFFL), edx);
+ imull(edx, dest.high);
+
+ // edx:eax = LOW(dest) * LOW(imm);
+ movl(Imm32(imm.value & 0xFFFFFFFFL), edx);
+ movl(dest.low, eax);
+ mull(edx);
+
+ // HIGH(dest) += edx;
+ addl(edx, dest.high);
+
+ // HIGH(dest) += LOW(LOW(dest) * HIGH(imm));
+ if (((imm.value >> 32) & 0xFFFFFFFFL) == 5)
+ leal(Operand(dest.low, dest.low, TimesFour), edx);
+ else
+ MOZ_CRASH("Unsupported imm");
+ addl(edx, dest.high);
+
+ // LOW(dest) = eax;
+ movl(eax, dest.low);
+}
+
+void
+MacroAssembler::mul64(Imm64 imm, const Register64& dest, const Register temp)
+{
+ // LOW32 = LOW(LOW(dest) * LOW(src)); (1)
+ // HIGH32 = LOW(HIGH(dest) * LOW(src)) [multiply src into upper bits] (2)
+ // + LOW(LOW(dest) * HIGH(src)) [multiply dest into upper bits] (3)
+ // + HIGH(LOW(dest) * LOW(src)) [carry] (4)
+
+ MOZ_ASSERT(dest == Register64(edx, eax));
+ MOZ_ASSERT(temp != edx && temp != eax);
+
+ movl(dest.low, temp);
+
+ // Compute mul64
+ imull(imm.low(), dest.high); // (2)
+ imull(imm.hi(), temp); // (3)
+ addl(dest.high, temp);
+ movl(imm.low(), dest.high);
+ mull(dest.high/*, dest.low*/); // (4) + (1) output in edx:eax (dest_hi:dest_lo)
+ addl(temp, dest.high);
+}
+
+void
+MacroAssembler::mul64(const Register64& src, const Register64& dest, const Register temp)
+{
+ // LOW32 = LOW(LOW(dest) * LOW(src)); (1)
+ // HIGH32 = LOW(HIGH(dest) * LOW(src)) [multiply src into upper bits] (2)
+ // + LOW(LOW(dest) * HIGH(src)) [multiply dest into upper bits] (3)
+ // + HIGH(LOW(dest) * LOW(src)) [carry] (4)
+
+ MOZ_ASSERT(dest == Register64(edx, eax));
+ MOZ_ASSERT(src != Register64(edx, eax) && src != Register64(eax, edx));
+
+ // Make sure the rhs.high isn't the dest.high register anymore.
+ // This saves us from doing other register moves.
+ movl(dest.low, temp);
+
+ // Compute mul64
+ imull(src.low, dest.high); // (2)
+ imull(src.high, temp); // (3)
+ addl(dest.high, temp);
+ movl(src.low, dest.high);
+ mull(dest.high/*, dest.low*/); // (4) + (1) output in edx:eax (dest_hi:dest_lo)
+ addl(temp, dest.high);
+}
+
+void
+MacroAssembler::mulBy3(Register src, Register dest)
+{
+ lea(Operand(src, src, TimesTwo), dest);
+}
+
+void
+MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest)
+{
+ movl(imm, temp);
+ vmulsd(Operand(temp, 0), dest, dest);
+}
+
+void
+MacroAssembler::inc64(AbsoluteAddress dest)
+{
+ addl(Imm32(1), Operand(dest));
+ Label noOverflow;
+ j(NonZero, &noOverflow);
+ addl(Imm32(1), Operand(dest.offset(4)));
+ bind(&noOverflow);
+}
+
+void
+MacroAssembler::neg64(Register64 reg)
+{
+ negl(reg.low);
+ adcl(Imm32(0), reg.high);
+ negl(reg.high);
+}
+
+// ===============================================================
+// Shift functions
+
+void
+MacroAssembler::lshiftPtr(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ shll(imm, dest);
+}
+
+void
+MacroAssembler::lshift64(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ if (imm.value < 32) {
+ shldl(imm, dest.low, dest.high);
+ shll(imm, dest.low);
+ return;
+ }
+
+ mov(dest.low, dest.high);
+ shll(Imm32(imm.value & 0x1f), dest.high);
+ xorl(dest.low, dest.low);
+}
+
+void
+MacroAssembler::lshift64(Register shift, Register64 srcDest)
+{
+ MOZ_ASSERT(shift == ecx);
+ MOZ_ASSERT(srcDest.low != ecx && srcDest.high != ecx);
+
+ Label done;
+
+ shldl_cl(srcDest.low, srcDest.high);
+ shll_cl(srcDest.low);
+
+ testl(Imm32(0x20), ecx);
+ j(Condition::Equal, &done);
+
+ // 32 - 63 bit shift
+ movl(srcDest.low, srcDest.high);
+ xorl(srcDest.low, srcDest.low);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::rshiftPtr(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ shrl(imm, dest);
+}
+
+void
+MacroAssembler::rshift64(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ if (imm.value < 32) {
+ shrdl(imm, dest.high, dest.low);
+ shrl(imm, dest.high);
+ return;
+ }
+
+ movl(dest.high, dest.low);
+ shrl(Imm32(imm.value & 0x1f), dest.low);
+ xorl(dest.high, dest.high);
+}
+
+void
+MacroAssembler::rshift64(Register shift, Register64 srcDest)
+{
+ MOZ_ASSERT(shift == ecx);
+ MOZ_ASSERT(srcDest.low != ecx && srcDest.high != ecx);
+
+ Label done;
+
+ shrdl_cl(srcDest.high, srcDest.low);
+ shrl_cl(srcDest.high);
+
+ testl(Imm32(0x20), ecx);
+ j(Condition::Equal, &done);
+
+ // 32 - 63 bit shift
+ movl(srcDest.high, srcDest.low);
+ xorl(srcDest.high, srcDest.high);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ sarl(imm, dest);
+}
+
+void
+MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ if (imm.value < 32) {
+ shrdl(imm, dest.high, dest.low);
+ sarl(imm, dest.high);
+ return;
+ }
+
+ movl(dest.high, dest.low);
+ sarl(Imm32(imm.value & 0x1f), dest.low);
+ sarl(Imm32(0x1f), dest.high);
+}
+
+void
+MacroAssembler::rshift64Arithmetic(Register shift, Register64 srcDest)
+{
+ MOZ_ASSERT(shift == ecx);
+ MOZ_ASSERT(srcDest.low != ecx && srcDest.high != ecx);
+
+ Label done;
+
+ shrdl_cl(srcDest.high, srcDest.low);
+ sarl_cl(srcDest.high);
+
+ testl(Imm32(0x20), ecx);
+ j(Condition::Equal, &done);
+
+ // 32 - 63 bit shift
+ movl(srcDest.high, srcDest.low);
+ sarl(Imm32(0x1f), srcDest.high);
+
+ bind(&done);
+}
+
+// ===============================================================
+// Rotation functions
+
+void
+MacroAssembler::rotateLeft64(Register count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
+
+ Label done;
+
+ movl(dest.high, temp);
+ shldl_cl(dest.low, dest.high);
+ shldl_cl(temp, dest.low);
+
+ testl(Imm32(0x20), count);
+ j(Condition::Equal, &done);
+ xchgl(dest.high, dest.low);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::rotateRight64(Register count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
+
+ Label done;
+
+ movl(dest.high, temp);
+ shrdl_cl(dest.low, dest.high);
+ shrdl_cl(temp, dest.low);
+
+ testl(Imm32(0x20), count);
+ j(Condition::Equal, &done);
+ xchgl(dest.high, dest.low);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+
+ int32_t amount = count.value & 0x3f;
+ if ((amount & 0x1f) != 0) {
+ movl(dest.high, temp);
+ shldl(Imm32(amount & 0x1f), dest.low, dest.high);
+ shldl(Imm32(amount & 0x1f), temp, dest.low);
+ }
+
+ if (!!(amount & 0x20))
+ xchgl(dest.high, dest.low);
+}
+
+void
+MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+
+ int32_t amount = count.value & 0x3f;
+ if ((amount & 0x1f) != 0) {
+ movl(dest.high, temp);
+ shrdl(Imm32(amount & 0x1f), dest.low, dest.high);
+ shrdl(Imm32(amount & 0x1f), temp, dest.low);
+ }
+
+ if (!!(amount & 0x20))
+ xchgl(dest.high, dest.low);
+}
+
+// ===============================================================
+// Bit counting functions
+
+void
+MacroAssembler::clz64(Register64 src, Register dest)
+{
+ Label nonzero, zero;
+
+ bsrl(src.high, dest);
+ j(Assembler::Zero, &zero);
+ orl(Imm32(32), dest);
+ jump(&nonzero);
+
+ bind(&zero);
+ bsrl(src.low, dest);
+ j(Assembler::NonZero, &nonzero);
+ movl(Imm32(0x7F), dest);
+
+ bind(&nonzero);
+ xorl(Imm32(0x3F), dest);
+}
+
+void
+MacroAssembler::ctz64(Register64 src, Register dest)
+{
+ Label done, nonzero;
+
+ bsfl(src.low, dest);
+ j(Assembler::NonZero, &done);
+ bsfl(src.high, dest);
+ j(Assembler::NonZero, &nonzero);
+ movl(Imm32(64), dest);
+ jump(&done);
+
+ bind(&nonzero);
+ orl(Imm32(32), dest);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::popcnt64(Register64 src, Register64 dest, Register tmp)
+{
+ // The tmp register is only needed if there is no native POPCNT.
+
+ MOZ_ASSERT(src.low != tmp && src.high != tmp);
+ MOZ_ASSERT(dest.low != tmp && dest.high != tmp);
+
+ if (dest.low != src.high) {
+ popcnt32(src.low, dest.low, tmp);
+ popcnt32(src.high, dest.high, tmp);
+ } else {
+ MOZ_ASSERT(dest.high != src.high);
+ popcnt32(src.low, dest.high, tmp);
+ popcnt32(src.high, dest.low, tmp);
+ }
+ addl(dest.high, dest.low);
+ xorl(dest.high, dest.high);
+}
+
+// ===============================================================
+// Condition functions
+
+template <typename T1, typename T2>
+void
+MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest)
+{
+ cmpPtr(lhs, rhs);
+ emitSet(cond, dest);
+}
+
+// ===============================================================
+// Branch functions
+
+void
+MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
+{
+ cmp32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
+{
+ cmp32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, Label* label)
+{
+ cmpl(rhs, lhs);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val, Label* success, Label* fail)
+{
+ bool fallthrough = false;
+ Label fallthroughLabel;
+
+ if (!fail) {
+ fail = &fallthroughLabel;
+ fallthrough = true;
+ }
+
+ switch(cond) {
+ case Assembler::Equal:
+ branch32(Assembler::NotEqual, lhs.low, val.low(), fail);
+ branch32(Assembler::Equal, lhs.high, val.hi(), success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::NotEqual:
+ branch32(Assembler::NotEqual, lhs.low, val.low(), success);
+ branch32(Assembler::NotEqual, lhs.high, val.hi(), success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual: {
+ Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
+ Assembler::Condition cond2 =
+ Assembler::ConditionWithoutEqual(Assembler::InvertCondition(cond));
+ Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
+
+ cmp32(lhs.high, val.hi());
+ j(cond1, success);
+ j(cond2, fail);
+ cmp32(lhs.low, val.low());
+ j(cond3, success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ }
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+
+ if (fallthrough)
+ bind(fail);
+}
+
+void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs, Label* success, Label* fail)
+{
+ bool fallthrough = false;
+ Label fallthroughLabel;
+
+ if (!fail) {
+ fail = &fallthroughLabel;
+ fallthrough = true;
+ }
+
+ switch(cond) {
+ case Assembler::Equal:
+ branch32(Assembler::NotEqual, lhs.low, rhs.low, fail);
+ branch32(Assembler::Equal, lhs.high, rhs.high, success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::NotEqual:
+ branch32(Assembler::NotEqual, lhs.low, rhs.low, success);
+ branch32(Assembler::NotEqual, lhs.high, rhs.high, success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual: {
+ Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
+ Assembler::Condition cond2 =
+ Assembler::ConditionWithoutEqual(Assembler::InvertCondition(cond));
+ Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
+
+ cmp32(lhs.high, rhs.high);
+ j(cond1, success);
+ j(cond2, fail);
+ cmp32(lhs.low, rhs.low);
+ j(cond3, success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ }
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+
+ if (fallthrough)
+ bind(fail);
+}
+
+void
+MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val, Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ Label done;
+
+ if (cond == Assembler::Equal)
+ branch32(Assembler::NotEqual, lhs, val.firstHalf(), &done);
+ else
+ branch32(Assembler::NotEqual, lhs, val.firstHalf(), label);
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), val.secondHalf(), label);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::branch64(Condition cond, const Address& lhs, const Address& rhs, Register scratch,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ Label done;
+
+ load32(rhs, scratch);
+ if (cond == Assembler::Equal)
+ branch32(Assembler::NotEqual, lhs, scratch, &done);
+ else
+ branch32(Assembler::NotEqual, lhs, scratch, label);
+
+ load32(Address(rhs.base, rhs.offset + sizeof(uint32_t)), scratch);
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), scratch, label);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
+{
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, ImmWord rhs, Label* label)
+{
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs, Label* label)
+{
+ cmpl(rhs, lhs);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs, Register rhs, Label* label)
+{
+ branchPtr(cond, lhs, rhs, label);
+}
+
+void
+MacroAssembler::branchTruncateFloat32ToPtr(FloatRegister src, Register dest, Label* fail)
+{
+ branchTruncateFloat32ToInt32(src, dest, fail);
+}
+
+void
+MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src, Register dest, Label* fail)
+{
+ branchTruncateFloat32ToInt32(src, dest, fail);
+}
+
+void
+MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src, Register dest, Label* fail)
+{
+ vcvttss2si(src, dest);
+
+ // vcvttss2si returns 0x80000000 on failure. Test for it by
+ // subtracting 1 and testing overflow (this permits the use of a
+ // smaller immediate field).
+ cmp32(dest, Imm32(1));
+ j(Assembler::Overflow, fail);
+}
+
+void
+MacroAssembler::branchTruncateDoubleToPtr(FloatRegister src, Register dest, Label* fail)
+{
+ branchTruncateDoubleToInt32(src, dest, fail);
+}
+
+void
+MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src, Register dest, Label* fail)
+{
+ // TODO: X64 supports supports integers up till 64bits. Here we only support 32bits,
+ // before failing. Implementing this for x86 might give a x86 kraken win.
+ branchTruncateDoubleToInt32(src, dest, fail);
+}
+
+void
+MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src, Register dest, Label* fail)
+{
+ vcvttsd2si(src, dest);
+
+ // vcvttsd2si returns 0x80000000 on failure. Test for it by
+ // subtracting 1 and testing overflow (this permits the use of a
+ // smaller immediate field).
+ cmp32(dest, Imm32(1));
+ j(Assembler::Overflow, fail);
+}
+
+void
+MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
+{
+ test32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTest64(Condition cond, Register64 lhs, Register64 rhs, Register temp,
+ L label)
+{
+ if (cond == Assembler::Zero) {
+ MOZ_ASSERT(lhs.low == rhs.low);
+ MOZ_ASSERT(lhs.high == rhs.high);
+ movl(lhs.low, temp);
+ orl(lhs.high, temp);
+ branchTestPtr(cond, temp, temp, label);
+ } else {
+ MOZ_CRASH("Unsupported condition");
+ }
+}
+
+void
+MacroAssembler::branchTestBooleanTruthy(bool truthy, const ValueOperand& value, Label* label)
+{
+ test32(value.payloadReg(), value.payloadReg());
+ j(truthy ? NonZero : Zero, label);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr, JSWhyMagic why, Label* label)
+{
+ branchTestMagic(cond, valaddr, label);
+ branch32(cond, ToPayload(valaddr), Imm32(why), label);
+}
+
+// ========================================================================
+// Truncate floating point.
+
+void
+MacroAssembler::truncateFloat32ToUInt64(Address src, Address dest, Register temp,
+ FloatRegister floatTemp)
+{
+ Label done;
+
+ loadFloat32(src, floatTemp);
+
+ truncateFloat32ToInt64(src, dest, temp);
+
+ // For unsigned conversion the case of [INT64, UINT64] needs to get handle seperately.
+ load32(Address(dest.base, dest.offset + INT64HIGH_OFFSET), temp);
+ branch32(Assembler::Condition::NotSigned, temp, Imm32(0), &done);
+
+ // Move the value inside INT64 range.
+ storeFloat32(floatTemp, dest);
+ loadConstantFloat32(double(int64_t(0x8000000000000000)), floatTemp);
+ vaddss(Operand(dest), floatTemp, floatTemp);
+ storeFloat32(floatTemp, dest);
+ truncateFloat32ToInt64(dest, dest, temp);
+
+ load32(Address(dest.base, dest.offset + INT64HIGH_OFFSET), temp);
+ orl(Imm32(0x80000000), temp);
+ store32(temp, Address(dest.base, dest.offset + INT64HIGH_OFFSET));
+
+ bind(&done);
+}
+
+void
+MacroAssembler::truncateDoubleToUInt64(Address src, Address dest, Register temp,
+ FloatRegister floatTemp)
+{
+ Label done;
+
+ loadDouble(src, floatTemp);
+
+ truncateDoubleToInt64(src, dest, temp);
+
+ // For unsigned conversion the case of [INT64, UINT64] needs to get handle seperately.
+ load32(Address(dest.base, dest.offset + INT64HIGH_OFFSET), temp);
+ branch32(Assembler::Condition::NotSigned, temp, Imm32(0), &done);
+
+ // Move the value inside INT64 range.
+ storeDouble(floatTemp, dest);
+ loadConstantDouble(double(int64_t(0x8000000000000000)), floatTemp);
+ vaddsd(Operand(dest), floatTemp, floatTemp);
+ storeDouble(floatTemp, dest);
+ truncateDoubleToInt64(dest, dest, temp);
+
+ load32(Address(dest.base, dest.offset + INT64HIGH_OFFSET), temp);
+ orl(Imm32(0x80000000), temp);
+ store32(temp, Address(dest.base, dest.offset + INT64HIGH_OFFSET));
+
+ bind(&done);
+}
+
+// ========================================================================
+// wasm support
+
+template <class L>
+void
+MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
+{
+ CodeOffset off = cmp32WithPatch(index, Imm32(0));
+ append(wasm::BoundsCheck(off.offset()));
+
+ j(cond, label);
+}
+
+void
+MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
+{
+ reinterpret_cast<uint32_t*>(patchAt)[-1] = limit;
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+// Note: this function clobbers the source register.
+void
+MacroAssemblerX86::convertUInt32ToDouble(Register src, FloatRegister dest)
+{
+ // src is [0, 2^32-1]
+ subl(Imm32(0x80000000), src);
+
+ // Now src is [-2^31, 2^31-1] - int range, but not the same value.
+ convertInt32ToDouble(src, dest);
+
+ // dest is now a double with the int range.
+ // correct the double value by adding 0x80000000.
+ asMasm().addConstantDouble(2147483648.0, dest);
+}
+
+// Note: this function clobbers the source register.
+void
+MacroAssemblerX86::convertUInt32ToFloat32(Register src, FloatRegister dest)
+{
+ convertUInt32ToDouble(src, dest);
+ convertDoubleToFloat32(dest, dest);
+}
+
+void
+MacroAssemblerX86::unboxValue(const ValueOperand& src, AnyRegister dest)
+{
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.payloadReg(), dest.fpu());
+ jump(&end);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else {
+ if (src.payloadReg() != dest.gpr())
+ movl(src.payloadReg(), dest.gpr());
+ }
+}
+
+template <typename T>
+void
+MacroAssemblerX86::loadInt32OrDouble(const T& src, FloatRegister dest)
+{
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(ToPayload(src), dest);
+ jump(&end);
+ bind(&notInt32);
+ loadDouble(src, dest);
+ bind(&end);
+}
+
+template <typename T>
+void
+MacroAssemblerX86::loadUnboxedValue(const T& src, MIRType type, AnyRegister dest)
+{
+ if (dest.isFloat())
+ loadInt32OrDouble(src, dest.fpu());
+ else
+ movl(Operand(src), dest.gpr());
+}
+
+// If source is a double, load it into dest. If source is int32,
+// convert it to double. Else, branch to failure.
+void
+MacroAssemblerX86::ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure)
+{
+ Label isDouble, done;
+ asMasm().branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, source.typeReg(), failure);
+
+ convertInt32ToDouble(source.payloadReg(), dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_MacroAssembler_x86_inl_h */
diff --git a/js/src/jit/x86/MacroAssembler-x86.cpp b/js/src/jit/x86/MacroAssembler-x86.cpp
new file mode 100644
index 000000000..754b29c2d
--- /dev/null
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -0,0 +1,1028 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86/MacroAssembler-x86.h"
+
+#include "mozilla/Alignment.h"
+#include "mozilla/Casting.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MoveEmitter.h"
+
+#include "jsscriptinlines.h"
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// vpunpckldq requires 16-byte boundary for memory operand.
+// See convertUInt64ToDouble for the details.
+MOZ_ALIGNED_DECL(static const uint64_t, 16) TO_DOUBLE[4] = {
+ 0x4530000043300000LL,
+ 0x0LL,
+ 0x4330000000000000LL,
+ 0x4530000000000000LL
+};
+
+static const double TO_DOUBLE_HIGH_SCALE = 0x100000000;
+
+bool
+MacroAssemblerX86::convertUInt64ToDoubleNeedsTemp()
+{
+ return HasSSE3();
+}
+
+void
+MacroAssemblerX86::convertUInt64ToDouble(Register64 src, FloatRegister dest, Register temp)
+{
+ // SUBPD needs SSE2, HADDPD needs SSE3.
+ if (!HasSSE3()) {
+ MOZ_ASSERT(temp == Register::Invalid());
+
+ // Zero the dest register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(dest);
+
+ asMasm().Push(src.high);
+ asMasm().Push(src.low);
+ fild(Operand(esp, 0));
+
+ Label notNegative;
+ asMasm().branch32(Assembler::NotSigned, src.high, Imm32(0), &notNegative);
+ double add_constant = 18446744073709551616.0; // 2^64
+ store64(Imm64(mozilla::BitwiseCast<uint64_t>(add_constant)), Address(esp, 0));
+ fld(Operand(esp, 0));
+ faddp();
+ bind(&notNegative);
+
+ fstp(Operand(esp, 0));
+ vmovsd(Address(esp, 0), dest);
+ asMasm().freeStack(2*sizeof(intptr_t));
+ return;
+ }
+
+ // Following operation uses entire 128-bit of dest XMM register.
+ // Currently higher 64-bit is free when we have access to lower 64-bit.
+ MOZ_ASSERT(dest.size() == 8);
+ FloatRegister dest128 = FloatRegister(dest.encoding(), FloatRegisters::Simd128);
+
+ // Assume that src is represented as following:
+ // src = 0x HHHHHHHH LLLLLLLL
+
+ // Move src to dest (=dest128) and ScratchInt32x4Reg (=scratch):
+ // dest = 0x 00000000 00000000 00000000 LLLLLLLL
+ // scratch = 0x 00000000 00000000 00000000 HHHHHHHH
+ vmovd(src.low, dest128);
+ vmovd(src.high, ScratchSimd128Reg);
+
+ // Unpack and interleave dest and scratch to dest:
+ // dest = 0x 00000000 00000000 HHHHHHHH LLLLLLLL
+ vpunpckldq(ScratchSimd128Reg, dest128, dest128);
+
+ // Unpack and interleave dest and a constant C1 to dest:
+ // C1 = 0x 00000000 00000000 45300000 43300000
+ // dest = 0x 45300000 HHHHHHHH 43300000 LLLLLLLL
+ // here, each 64-bit part of dest represents following double:
+ // HI(dest) = 0x 1.00000HHHHHHHH * 2**84 == 2**84 + 0x HHHHHHHH 00000000
+ // LO(dest) = 0x 1.00000LLLLLLLL * 2**52 == 2**52 + 0x 00000000 LLLLLLLL
+ movePtr(ImmWord((uintptr_t)TO_DOUBLE), temp);
+ vpunpckldq(Operand(temp, 0), dest128, dest128);
+
+ // Subtract a constant C2 from dest, for each 64-bit part:
+ // C2 = 0x 45300000 00000000 43300000 00000000
+ // here, each 64-bit part of C2 represents following double:
+ // HI(C2) = 0x 1.0000000000000 * 2**84 == 2**84
+ // LO(C2) = 0x 1.0000000000000 * 2**52 == 2**52
+ // after the operation each 64-bit part of dest represents following:
+ // HI(dest) = double(0x HHHHHHHH 00000000)
+ // LO(dest) = double(0x 00000000 LLLLLLLL)
+ vsubpd(Operand(temp, sizeof(uint64_t) * 2), dest128, dest128);
+
+ // Add HI(dest) and LO(dest) in double and store it into LO(dest),
+ // LO(dest) = double(0x HHHHHHHH 00000000) + double(0x 00000000 LLLLLLLL)
+ // = double(0x HHHHHHHH LLLLLLLL)
+ // = double(src)
+ vhaddpd(dest128, dest128);
+}
+
+void
+MacroAssemblerX86::loadConstantDouble(wasm::RawF64 d, FloatRegister dest)
+{
+ if (maybeInlineDouble(d, dest))
+ return;
+ Double* dbl = getDouble(d);
+ if (!dbl)
+ return;
+ masm.vmovsd_mr(nullptr, dest.encoding());
+ propagateOOM(dbl->uses.append(CodeOffset(masm.size())));
+}
+
+void
+MacroAssemblerX86::loadConstantDouble(double d, FloatRegister dest)
+{
+ loadConstantDouble(wasm::RawF64(d), dest);
+}
+
+void
+MacroAssemblerX86::loadConstantFloat32(wasm::RawF32 f, FloatRegister dest)
+{
+ if (maybeInlineFloat(f, dest))
+ return;
+ Float* flt = getFloat(f);
+ if (!flt)
+ return;
+ masm.vmovss_mr(nullptr, dest.encoding());
+ propagateOOM(flt->uses.append(CodeOffset(masm.size())));
+}
+
+void
+MacroAssemblerX86::loadConstantFloat32(float f, FloatRegister dest)
+{
+ loadConstantFloat32(wasm::RawF32(f), dest);
+}
+
+void
+MacroAssemblerX86::loadConstantSimd128Int(const SimdConstant& v, FloatRegister dest)
+{
+ if (maybeInlineSimd128Int(v, dest))
+ return;
+ SimdData* i4 = getSimdData(v);
+ if (!i4)
+ return;
+ masm.vmovdqa_mr(nullptr, dest.encoding());
+ propagateOOM(i4->uses.append(CodeOffset(masm.size())));
+}
+
+void
+MacroAssemblerX86::loadConstantSimd128Float(const SimdConstant& v, FloatRegister dest)
+{
+ if (maybeInlineSimd128Float(v, dest))
+ return;
+ SimdData* f4 = getSimdData(v);
+ if (!f4)
+ return;
+ masm.vmovaps_mr(nullptr, dest.encoding());
+ propagateOOM(f4->uses.append(CodeOffset(masm.size())));
+}
+
+void
+MacroAssemblerX86::finish()
+{
+ if (!doubles_.empty())
+ masm.haltingAlign(sizeof(double));
+ for (const Double& d : doubles_) {
+ CodeOffset cst(masm.currentOffset());
+ for (CodeOffset use : d.uses)
+ addCodeLabel(CodeLabel(use, cst));
+ masm.int64Constant(d.value);
+ if (!enoughMemory_)
+ return;
+ }
+
+ if (!floats_.empty())
+ masm.haltingAlign(sizeof(float));
+ for (const Float& f : floats_) {
+ CodeOffset cst(masm.currentOffset());
+ for (CodeOffset use : f.uses)
+ addCodeLabel(CodeLabel(use, cst));
+ masm.int32Constant(f.value);
+ if (!enoughMemory_)
+ return;
+ }
+
+ // SIMD memory values must be suitably aligned.
+ if (!simds_.empty())
+ masm.haltingAlign(SimdMemoryAlignment);
+ for (const SimdData& v : simds_) {
+ CodeOffset cst(masm.currentOffset());
+ for (CodeOffset use : v.uses)
+ addCodeLabel(CodeLabel(use, cst));
+ masm.simd128Constant(v.value.bytes());
+ if (!enoughMemory_)
+ return;
+ }
+}
+
+void
+MacroAssemblerX86::handleFailureWithHandlerTail(void* handler)
+{
+ // Reserve space for exception information.
+ subl(Imm32(sizeof(ResumeFromException)), esp);
+ movl(esp, eax);
+
+ // Call the handler.
+ asMasm().setupUnalignedABICall(ecx);
+ asMasm().passABIArg(eax);
+ asMasm().callWithABI(handler);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label return_;
+ Label bailout;
+
+ loadPtr(Address(esp, offsetof(ResumeFromException, kind)), eax);
+ asMasm().branch32(Assembler::Equal, eax, Imm32(ResumeFromException::RESUME_ENTRY_FRAME),
+ &entryFrame);
+ asMasm().branch32(Assembler::Equal, eax, Imm32(ResumeFromException::RESUME_CATCH), &catch_);
+ asMasm().branch32(Assembler::Equal, eax, Imm32(ResumeFromException::RESUME_FINALLY), &finally);
+ asMasm().branch32(Assembler::Equal, eax, Imm32(ResumeFromException::RESUME_FORCED_RETURN),
+ &return_);
+ asMasm().branch32(Assembler::Equal, eax, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, load the new stack pointer
+ // and return from the entry frame.
+ bind(&entryFrame);
+ moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(Address(esp, offsetof(ResumeFromException, stackPointer)), esp);
+ ret();
+
+ // If we found a catch handler, this must be a baseline frame. Restore state
+ // and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(esp, offsetof(ResumeFromException, target)), eax);
+ loadPtr(Address(esp, offsetof(ResumeFromException, framePointer)), ebp);
+ loadPtr(Address(esp, offsetof(ResumeFromException, stackPointer)), esp);
+ jmp(Operand(eax));
+
+ // If we found a finally block, this must be a baseline frame. Push
+ // two values expected by JSOP_RETSUB: BooleanValue(true) and the
+ // exception.
+ bind(&finally);
+ ValueOperand exception = ValueOperand(ecx, edx);
+ loadValue(Address(esp, offsetof(ResumeFromException, exception)), exception);
+
+ loadPtr(Address(esp, offsetof(ResumeFromException, target)), eax);
+ loadPtr(Address(esp, offsetof(ResumeFromException, framePointer)), ebp);
+ loadPtr(Address(esp, offsetof(ResumeFromException, stackPointer)), esp);
+
+ pushValue(BooleanValue(true));
+ pushValue(exception);
+ jmp(Operand(eax));
+
+ // Only used in debug mode. Return BaselineFrame->returnValue() to the caller.
+ bind(&return_);
+ loadPtr(Address(esp, offsetof(ResumeFromException, framePointer)), ebp);
+ loadPtr(Address(esp, offsetof(ResumeFromException, stackPointer)), esp);
+ loadValue(Address(ebp, BaselineFrame::reverseOffsetOfReturnValue()), JSReturnOperand);
+ movl(ebp, esp);
+ pop(ebp);
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to caller
+ // frame before returning.
+ {
+ Label skipProfilingInstrumentation;
+ // Test if profiler enabled.
+ AbsoluteAddress addressOfEnabled(GetJitContext()->runtime->spsProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ profilerExitFrame();
+ bind(&skipProfilingInstrumentation);
+ }
+
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to
+ // the bailout tail stub.
+ bind(&bailout);
+ loadPtr(Address(esp, offsetof(ResumeFromException, bailoutInfo)), ecx);
+ movl(Imm32(BAILOUT_RETURN_OK), eax);
+ jmp(Operand(esp, offsetof(ResumeFromException, target)));
+}
+
+void
+MacroAssemblerX86::profilerEnterFrame(Register framePtr, Register scratch)
+{
+ AbsoluteAddress activation(GetJitContext()->runtime->addressOfProfilingActivation());
+ loadPtr(activation, scratch);
+ storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr), Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void
+MacroAssemblerX86::profilerExitFrame()
+{
+ jmp(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
+}
+
+MacroAssembler&
+MacroAssemblerX86::asMasm()
+{
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler&
+MacroAssemblerX86::asMasm() const
+{
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+void
+MacroAssembler::subFromStackPtr(Imm32 imm32)
+{
+ if (imm32.value) {
+ // On windows, we cannot skip very far down the stack without touching the
+ // memory pages in-between. This is a corner-case code for situations where the
+ // Ion frame data for a piece of code is very large. To handle this special case,
+ // for frames over 1k in size we allocate memory on the stack incrementally, touching
+ // it as we go.
+ uint32_t amountLeft = imm32.value;
+ while (amountLeft > 4096) {
+ subl(Imm32(4096), StackPointer);
+ store32(Imm32(0), Address(StackPointer, 0));
+ amountLeft -= 4096;
+ }
+ subl(Imm32(amountLeft), StackPointer);
+ }
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// ABI function calls.
+
+void
+MacroAssembler::setupUnalignedABICall(Register scratch)
+{
+ setupABICall();
+ dynamicAlignment_ = true;
+
+ movl(esp, scratch);
+ andl(Imm32(~(ABIStackAlignment - 1)), esp);
+ push(scratch);
+}
+
+void
+MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm)
+{
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ if (dynamicAlignment_) {
+ // sizeof(intptr_t) accounts for the saved stack pointer pushed by
+ // setupUnalignedABICall.
+ stackForCall += ComputeByteAlignment(stackForCall + sizeof(intptr_t),
+ ABIStackAlignment);
+ } else {
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(stackForCall + framePushed() + alignmentAtPrologue,
+ ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Position all arguments.
+ {
+ enoughMemory_ &= moveResolver_.resolve();
+ if (!enoughMemory_)
+ return;
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+}
+
+void
+MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
+{
+ freeStack(stackAdjust);
+ if (result == MoveOp::DOUBLE) {
+ reserveStack(sizeof(double));
+ fstp(Operand(esp, 0));
+ loadDouble(Operand(esp, 0), ReturnDoubleReg);
+ freeStack(sizeof(double));
+ } else if (result == MoveOp::FLOAT32) {
+ reserveStack(sizeof(float));
+ fstp32(Operand(esp, 0));
+ loadFloat32(Operand(esp, 0), ReturnFloat32Reg);
+ freeStack(sizeof(float));
+ }
+ if (dynamicAlignment_)
+ pop(esp);
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+void
+MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
+{
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(fun);
+ callWithABIPost(stackAdjust, result);
+}
+
+void
+MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result)
+{
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(fun);
+ callWithABIPost(stackAdjust, result);
+}
+
+// ===============================================================
+// Branch functions
+
+void
+MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp,
+ Label* label)
+{
+ MOZ_ASSERT(temp != InvalidReg); // A temp register is required for x86.
+ MOZ_ASSERT(ptr != temp);
+ movePtr(ptr, temp);
+ branchPtrInNurseryChunkImpl(cond, temp, label);
+}
+
+void
+MacroAssembler::branchPtrInNurseryChunk(Condition cond, const Address& address, Register temp,
+ Label* label)
+{
+ MOZ_ASSERT(temp != InvalidReg); // A temp register is required for x86.
+ loadPtr(address, temp);
+ branchPtrInNurseryChunkImpl(cond, temp, label);
+}
+
+void
+MacroAssembler::branchPtrInNurseryChunkImpl(Condition cond, Register ptr, Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ orPtr(Imm32(gc::ChunkMask), ptr);
+ branch32(cond, Address(ptr, gc::ChunkLocationOffsetFromLastByte),
+ Imm32(int32_t(gc::ChunkLocation::Nursery)), label);
+}
+
+void
+MacroAssembler::branchValueIsNurseryObject(Condition cond, const Address& address, Register temp,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+
+ branchTestObject(Assembler::NotEqual, address, cond == Assembler::Equal ? &done : label);
+ branchPtrInNurseryChunk(cond, address, temp, label);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+
+ branchTestObject(Assembler::NotEqual, value, cond == Assembler::Equal ? &done : label);
+ branchPtrInNurseryChunk(cond, value.payloadReg(), temp, label);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ if (rhs.isMarkable())
+ cmpPtr(lhs.payloadReg(), ImmGCPtr(rhs.toMarkablePointer()));
+ else
+ cmpPtr(lhs.payloadReg(), ImmWord(rhs.toNunboxPayload()));
+
+ if (cond == Equal) {
+ Label done;
+ j(NotEqual, &done);
+ {
+ cmp32(lhs.typeReg(), Imm32(rhs.toNunboxTag()));
+ j(Equal, label);
+ }
+ bind(&done);
+ } else {
+ j(NotEqual, label);
+
+ cmp32(lhs.typeReg(), Imm32(rhs.toNunboxTag()));
+ j(NotEqual, label);
+ }
+}
+
+// ========================================================================
+// Memory access primitives.
+template <typename T>
+void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const T& dest, MIRType slotType)
+{
+ if (valueType == MIRType::Double) {
+ storeDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ // Store the type tag if needed.
+ if (valueType != slotType)
+ storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), Operand(dest));
+
+ // Store the payload.
+ if (value.constant())
+ storePayload(value.value(), Operand(dest));
+ else
+ storePayload(value.reg().typedReg().gpr(), Operand(dest));
+}
+
+template void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const Address& dest, MIRType slotType);
+template void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const BaseIndex& dest, MIRType slotType);
+
+// wasm specific methods, used in both the wasm baseline compiler and ion.
+
+void
+MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr, AnyRegister out)
+{
+ memoryBarrier(access.barrierBefore());
+
+ size_t loadOffset = size();
+ switch (access.type()) {
+ case Scalar::Int8:
+ movsblWithPatch(srcAddr, out.gpr());
+ break;
+ case Scalar::Uint8:
+ movzblWithPatch(srcAddr, out.gpr());
+ break;
+ case Scalar::Int16:
+ movswlWithPatch(srcAddr, out.gpr());
+ break;
+ case Scalar::Uint16:
+ movzwlWithPatch(srcAddr, out.gpr());
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ movlWithPatch(srcAddr, out.gpr());
+ break;
+ case Scalar::Float32:
+ vmovssWithPatch(srcAddr, out.fpu());
+ break;
+ case Scalar::Float64:
+ vmovsdWithPatch(srcAddr, out.fpu());
+ break;
+ case Scalar::Float32x4:
+ switch (access.numSimdElems()) {
+ // In memory-to-register mode, movss zeroes out the high lanes.
+ case 1: vmovssWithPatch(srcAddr, out.fpu()); break;
+ // See comment above, which also applies to movsd.
+ case 2: vmovsdWithPatch(srcAddr, out.fpu()); break;
+ case 4: vmovupsWithPatch(srcAddr, out.fpu()); break;
+ default: MOZ_CRASH("unexpected size for partial load");
+ }
+ break;
+ case Scalar::Int32x4:
+ switch (access.numSimdElems()) {
+ // In memory-to-register mode, movd zeroes out the high lanes.
+ case 1: vmovdWithPatch(srcAddr, out.fpu()); break;
+ // See comment above, which also applies to movq.
+ case 2: vmovqWithPatch(srcAddr, out.fpu()); break;
+ case 4: vmovdquWithPatch(srcAddr, out.fpu()); break;
+ default: MOZ_CRASH("unexpected size for partial load");
+ }
+ break;
+ case Scalar::Int8x16:
+ MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial load");
+ vmovdquWithPatch(srcAddr, out.fpu());
+ break;
+ case Scalar::Int16x8:
+ MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial load");
+ vmovdquWithPatch(srcAddr, out.fpu());
+ break;
+ case Scalar::Int64:
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected type");
+ }
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ memoryBarrier(access.barrierAfter());
+}
+
+void
+MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr, Register64 out)
+{
+ MOZ_ASSERT(!access.isAtomic());
+ MOZ_ASSERT(!access.isSimd());
+
+ size_t loadOffset = size();
+ switch (access.type()) {
+ case Scalar::Int8:
+ MOZ_ASSERT(out == Register64(edx, eax));
+ movsblWithPatch(srcAddr, out.low);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ cdq();
+ break;
+ case Scalar::Uint8:
+ movzblWithPatch(srcAddr, out.low);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ xorl(out.high, out.high);
+ break;
+ case Scalar::Int16:
+ MOZ_ASSERT(out == Register64(edx, eax));
+ movswlWithPatch(srcAddr, out.low);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ cdq();
+ break;
+ case Scalar::Uint16:
+ movzwlWithPatch(srcAddr, out.low);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ xorl(out.high, out.high);
+ break;
+ case Scalar::Int32:
+ MOZ_ASSERT(out == Register64(edx, eax));
+ movlWithPatch(srcAddr, out.low);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ cdq();
+ break;
+ case Scalar::Uint32:
+ movlWithPatch(srcAddr, out.low);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ xorl(out.high, out.high);
+ break;
+ case Scalar::Int64:
+ if (srcAddr.kind() == Operand::MEM_ADDRESS32) {
+ Operand low(PatchedAbsoluteAddress(uint32_t(srcAddr.address()) + INT64LOW_OFFSET));
+ Operand high(PatchedAbsoluteAddress(uint32_t(srcAddr.address()) + INT64HIGH_OFFSET));
+
+ movlWithPatch(low, out.low);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ loadOffset = size();
+ movlWithPatch(high, out.high);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+ } else {
+ MOZ_ASSERT(srcAddr.kind() == Operand::MEM_REG_DISP);
+ Address addr = srcAddr.toAddress();
+ Operand low(addr.base, addr.offset + INT64LOW_OFFSET);
+ Operand high(addr.base, addr.offset + INT64HIGH_OFFSET);
+
+ if (addr.base != out.low) {
+ movlWithPatch(low, out.low);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ loadOffset = size();
+ movlWithPatch(high, out.high);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+ } else {
+ MOZ_ASSERT(addr.base != out.high);
+ movlWithPatch(high, out.high);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ loadOffset = size();
+ movlWithPatch(low, out.low);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+ }
+ }
+ break;
+ case Scalar::Float32:
+ case Scalar::Float64:
+ case Scalar::Float32x4:
+ case Scalar::Int8x16:
+ case Scalar::Int16x8:
+ case Scalar::Int32x4:
+ MOZ_CRASH("non-int64 loads should use load()");
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+}
+
+void
+MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Operand dstAddr)
+{
+ memoryBarrier(access.barrierBefore());
+
+ size_t storeOffset = size();
+ switch (access.type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8Clamped:
+ case Scalar::Uint8:
+ movbWithPatch(value.gpr(), dstAddr);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ movwWithPatch(value.gpr(), dstAddr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ movlWithPatch(value.gpr(), dstAddr);
+ break;
+ case Scalar::Float32:
+ vmovssWithPatch(value.fpu(), dstAddr);
+ break;
+ case Scalar::Float64:
+ vmovsdWithPatch(value.fpu(), dstAddr);
+ break;
+ case Scalar::Float32x4:
+ switch (access.numSimdElems()) {
+ // In memory-to-register mode, movss zeroes out the high lanes.
+ case 1: vmovssWithPatch(value.fpu(), dstAddr); break;
+ // See comment above, which also applies to movsd.
+ case 2: vmovsdWithPatch(value.fpu(), dstAddr); break;
+ case 4: vmovupsWithPatch(value.fpu(), dstAddr); break;
+ default: MOZ_CRASH("unexpected size for partial load");
+ }
+ break;
+ case Scalar::Int32x4:
+ switch (access.numSimdElems()) {
+ // In memory-to-register mode, movd zeroes out the high lanes.
+ case 1: vmovdWithPatch(value.fpu(), dstAddr); break;
+ // See comment above, which also applies to movsd.
+ case 2: vmovqWithPatch(value.fpu(), dstAddr); break;
+ case 4: vmovdquWithPatch(value.fpu(), dstAddr); break;
+ default: MOZ_CRASH("unexpected size for partial load");
+ }
+ break;
+ case Scalar::Int8x16:
+ MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial store");
+ vmovdquWithPatch(value.fpu(), dstAddr);
+ break;
+ case Scalar::Int16x8:
+ MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial store");
+ vmovdquWithPatch(value.fpu(), dstAddr);
+ break;
+ case Scalar::Int64:
+ MOZ_CRASH("Should be handled in storeI64.");
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected type");
+ }
+ append(wasm::MemoryPatch(size()));
+ append(access, storeOffset, framePushed());
+
+ memoryBarrier(access.barrierAfter());
+}
+
+void
+MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Operand dstAddr)
+{
+ MOZ_ASSERT(!access.isAtomic());
+ MOZ_ASSERT(!access.isSimd());
+
+ size_t storeOffset = size();
+ if (dstAddr.kind() == Operand::MEM_ADDRESS32) {
+ Operand low(PatchedAbsoluteAddress(uint32_t(dstAddr.address()) + INT64LOW_OFFSET));
+ Operand high(PatchedAbsoluteAddress(uint32_t(dstAddr.address()) + INT64HIGH_OFFSET));
+
+ movlWithPatch(value.low, low);
+ append(wasm::MemoryPatch(size()));
+ append(access, storeOffset, framePushed());
+
+ storeOffset = size();
+ movlWithPatch(value.high, high);
+ append(wasm::MemoryPatch(size()));
+ append(access, storeOffset, framePushed());
+ } else {
+ MOZ_ASSERT(dstAddr.kind() == Operand::MEM_REG_DISP);
+ Address addr = dstAddr.toAddress();
+ Operand low(addr.base, addr.offset + INT64LOW_OFFSET);
+ Operand high(addr.base, addr.offset + INT64HIGH_OFFSET);
+
+ if (addr.base != value.low) {
+ movlWithPatch(value.low, low);
+ append(wasm::MemoryPatch(size()));
+ append(access, storeOffset, framePushed());
+
+ storeOffset = size();
+ movlWithPatch(value.high, high);
+ append(wasm::MemoryPatch(size()));
+ append(access, storeOffset, framePushed());
+ } else {
+ MOZ_ASSERT(addr.base != value.high);
+
+ movlWithPatch(value.high, high);
+ append(wasm::MemoryPatch(size()));
+ append(access, storeOffset, framePushed());
+
+ storeOffset = size();
+ movlWithPatch(value.low, low);
+ append(wasm::MemoryPatch(size()));
+ append(access, storeOffset, framePushed());
+ }
+ }
+}
+
+void
+MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input, Register output, Label* oolEntry)
+{
+ Label done;
+ vcvttsd2si(input, output);
+ branch32(Assembler::Condition::NotSigned, output, Imm32(0), &done);
+
+ loadConstantDouble(double(int32_t(0x80000000)), ScratchDoubleReg);
+ addDouble(input, ScratchDoubleReg);
+ vcvttsd2si(ScratchDoubleReg, output);
+
+ branch32(Assembler::Condition::Signed, output, Imm32(0), oolEntry);
+ or32(Imm32(0x80000000), output);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input, Register output, Label* oolEntry)
+{
+ Label done;
+ vcvttss2si(input, output);
+ branch32(Assembler::Condition::NotSigned, output, Imm32(0), &done);
+
+ loadConstantFloat32(float(int32_t(0x80000000)), ScratchFloat32Reg);
+ addFloat32(input, ScratchFloat32Reg);
+ vcvttss2si(ScratchFloat32Reg, output);
+
+ branch32(Assembler::Condition::Signed, output, Imm32(0), oolEntry);
+ or32(Imm32(0x80000000), output);
+
+ bind(&done);
+}
+
+//}}} check_macroassembler_style
+
+void
+MacroAssemblerX86::convertInt64ToDouble(Register64 input, FloatRegister output)
+{
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(output);
+
+ asMasm().Push(input.high);
+ asMasm().Push(input.low);
+ fild(Operand(esp, 0));
+
+ fstp(Operand(esp, 0));
+ vmovsd(Address(esp, 0), output);
+ asMasm().freeStack(2*sizeof(intptr_t));
+}
+
+void
+MacroAssemblerX86::convertInt64ToFloat32(Register64 input, FloatRegister output)
+{
+ convertInt64ToDouble(input, output);
+ convertDoubleToFloat32(output, output);
+}
+
+void
+MacroAssemblerX86::convertUInt64ToFloat32(Register64 input, FloatRegister output, Register temp)
+{
+ convertUInt64ToDouble(input, output.asDouble(), temp);
+ convertDoubleToFloat32(output, output);
+}
+
+void
+MacroAssemblerX86::wasmTruncateDoubleToInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg)
+{
+ Label fail, convert;
+ Register temp = output.high;
+
+ // Make sure input fits in (u)int64.
+ asMasm().reserveStack(2 * sizeof(int32_t));
+ asMasm().storeDouble(input, Operand(esp, 0));
+ asMasm().branchDoubleNotInInt64Range(Address(esp, 0), temp, &fail);
+ jump(&convert);
+
+ // Handle failure in ool.
+ bind(&fail);
+ asMasm().freeStack(2 * sizeof(int32_t));
+ jump(oolEntry);
+ bind(oolRejoin);
+ asMasm().reserveStack(2 * sizeof(int32_t));
+ asMasm().storeDouble(input, Operand(esp, 0));
+
+ // Convert the double/float to int64.
+ bind(&convert);
+ asMasm().truncateDoubleToInt64(Address(esp, 0), Address(esp, 0), temp);
+
+ // Load value into int64 register.
+ load64(Address(esp, 0), output);
+ asMasm().freeStack(2 * sizeof(int32_t));
+}
+
+void
+MacroAssemblerX86::wasmTruncateFloat32ToInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg)
+{
+ Label fail, convert;
+ Register temp = output.high;
+
+ // Make sure input fits in (u)int64.
+ asMasm().reserveStack(2 * sizeof(int32_t));
+ asMasm().storeFloat32(input, Operand(esp, 0));
+ asMasm().branchFloat32NotInInt64Range(Address(esp, 0), temp, &fail);
+ jump(&convert);
+
+ // Handle failure in ool.
+ bind(&fail);
+ asMasm().freeStack(2 * sizeof(int32_t));
+ jump(oolEntry);
+ bind(oolRejoin);
+ asMasm().reserveStack(2 * sizeof(int32_t));
+ asMasm().storeFloat32(input, Operand(esp, 0));
+
+ // Convert the double/float to int64.
+ bind(&convert);
+ asMasm().truncateFloat32ToInt64(Address(esp, 0), Address(esp, 0), temp);
+
+ // Load value into int64 register.
+ load64(Address(esp, 0), output);
+ asMasm().freeStack(2 * sizeof(int32_t));
+}
+
+void
+MacroAssemblerX86::wasmTruncateDoubleToUInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg)
+{
+ Label fail, convert;
+ Register temp = output.high;
+
+ // Make sure input fits in (u)int64.
+ asMasm().reserveStack(2 * sizeof(int32_t));
+ asMasm().storeDouble(input, Operand(esp, 0));
+ asMasm().branchDoubleNotInUInt64Range(Address(esp, 0), temp, &fail);
+ jump(&convert);
+
+ // Handle failure in ool.
+ bind(&fail);
+ asMasm().freeStack(2 * sizeof(int32_t));
+ jump(oolEntry);
+ bind(oolRejoin);
+ asMasm().reserveStack(2 * sizeof(int32_t));
+ asMasm().storeDouble(input, Operand(esp, 0));
+
+ // Convert the double/float to int64.
+ bind(&convert);
+ asMasm().truncateDoubleToUInt64(Address(esp, 0), Address(esp, 0), temp, tempReg);
+
+ // Load value into int64 register.
+ load64(Address(esp, 0), output);
+ asMasm().freeStack(2 * sizeof(int32_t));
+}
+
+void
+MacroAssemblerX86::wasmTruncateFloat32ToUInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg)
+{
+ Label fail, convert;
+ Register temp = output.high;
+
+ // Make sure input fits in (u)int64.
+ asMasm().reserveStack(2 * sizeof(int32_t));
+ asMasm().storeFloat32(input, Operand(esp, 0));
+ asMasm().branchFloat32NotInUInt64Range(Address(esp, 0), temp, &fail);
+ jump(&convert);
+
+ // Handle failure in ool.
+ bind(&fail);
+ asMasm().freeStack(2 * sizeof(int32_t));
+ jump(oolEntry);
+ bind(oolRejoin);
+ asMasm().reserveStack(2 * sizeof(int32_t));
+ asMasm().storeFloat32(input, Operand(esp, 0));
+
+ // Convert the double/float to int64.
+ bind(&convert);
+ asMasm().truncateFloat32ToUInt64(Address(esp, 0), Address(esp, 0), temp, tempReg);
+
+ // Load value into int64 register.
+ load64(Address(esp, 0), output);
+ asMasm().freeStack(2 * sizeof(int32_t));
+}
+
diff --git a/js/src/jit/x86/MacroAssembler-x86.h b/js/src/jit/x86/MacroAssembler-x86.h
new file mode 100644
index 000000000..21cd63a0c
--- /dev/null
+++ b/js/src/jit/x86/MacroAssembler-x86.h
@@ -0,0 +1,870 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_MacroAssembler_x86_h
+#define jit_x86_MacroAssembler_x86_h
+
+#include "jscompartment.h"
+
+#include "jit/JitFrames.h"
+#include "jit/MoveResolver.h"
+#include "jit/x86-shared/MacroAssembler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+class MacroAssemblerX86 : public MacroAssemblerX86Shared
+{
+ private:
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ protected:
+ MoveResolver moveResolver_;
+
+ private:
+ Operand payloadOfAfterStackPush(const Address& address) {
+ // If we are basing off %esp, the address will be invalid after the
+ // first push.
+ if (address.base == StackPointer)
+ return Operand(address.base, address.offset + 4);
+ return payloadOf(address);
+ }
+ Operand payloadOf(const Address& address) {
+ return Operand(address.base, address.offset);
+ }
+ Operand payloadOf(const BaseIndex& address) {
+ return Operand(address.base, address.index, address.scale, address.offset);
+ }
+ Operand tagOf(const Address& address) {
+ return Operand(address.base, address.offset + 4);
+ }
+ Operand tagOf(const BaseIndex& address) {
+ return Operand(address.base, address.index, address.scale, address.offset + 4);
+ }
+
+ void setupABICall(uint32_t args);
+
+ public:
+ using MacroAssemblerX86Shared::load32;
+ using MacroAssemblerX86Shared::store32;
+ using MacroAssemblerX86Shared::store16;
+ using MacroAssemblerX86Shared::call;
+
+ MacroAssemblerX86()
+ {
+ }
+
+ // The buffer is about to be linked, make sure any constant pools or excess
+ // bookkeeping has been flushed to the instruction stream.
+ void finish();
+
+ /////////////////////////////////////////////////////////////////
+ // X86-specific interface.
+ /////////////////////////////////////////////////////////////////
+
+ Operand ToPayload(Operand base) {
+ return base;
+ }
+ Address ToPayload(Address base) {
+ return base;
+ }
+ BaseIndex ToPayload(BaseIndex base) {
+ return base;
+ }
+ Operand ToType(Operand base) {
+ switch (base.kind()) {
+ case Operand::MEM_REG_DISP:
+ return Operand(Register::FromCode(base.base()), base.disp() + sizeof(void*));
+
+ case Operand::MEM_SCALE:
+ return Operand(Register::FromCode(base.base()), Register::FromCode(base.index()),
+ base.scale(), base.disp() + sizeof(void*));
+
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ Address ToType(Address base) {
+ return ToType(Operand(base)).toAddress();
+ }
+ void moveValue(const Value& val, Register type, Register data) {
+ movl(Imm32(val.toNunboxTag()), type);
+ if (val.isMarkable())
+ movl(ImmGCPtr(val.toMarkablePointer()), data);
+ else
+ movl(Imm32(val.toNunboxPayload()), data);
+ }
+ void moveValue(const Value& val, const ValueOperand& dest) {
+ moveValue(val, dest.typeReg(), dest.payloadReg());
+ }
+ void moveValue(const ValueOperand& src, const ValueOperand& dest) {
+ Register s0 = src.typeReg(), d0 = dest.typeReg(),
+ s1 = src.payloadReg(), d1 = dest.payloadReg();
+
+ // Either one or both of the source registers could be the same as a
+ // destination register.
+ if (s1 == d0) {
+ if (s0 == d1) {
+ // If both are, this is just a swap of two registers.
+ xchgl(d0, d1);
+ return;
+ }
+ // If only one is, copy that source first.
+ mozilla::Swap(s0, s1);
+ mozilla::Swap(d0, d1);
+ }
+
+ if (s0 != d0)
+ movl(s0, d0);
+ if (s1 != d1)
+ movl(s1, d1);
+ }
+
+ /////////////////////////////////////////////////////////////////
+ // X86/X64-common interface.
+ /////////////////////////////////////////////////////////////////
+ void storeValue(ValueOperand val, Operand dest) {
+ movl(val.payloadReg(), ToPayload(dest));
+ movl(val.typeReg(), ToType(dest));
+ }
+ void storeValue(ValueOperand val, const Address& dest) {
+ storeValue(val, Operand(dest));
+ }
+ template <typename T>
+ void storeValue(JSValueType type, Register reg, const T& dest) {
+ storeTypeTag(ImmTag(JSVAL_TYPE_TO_TAG(type)), Operand(dest));
+ storePayload(reg, Operand(dest));
+ }
+ template <typename T>
+ void storeValue(const Value& val, const T& dest) {
+ storeTypeTag(ImmTag(val.toNunboxTag()), Operand(dest));
+ storePayload(val, Operand(dest));
+ }
+ void storeValue(ValueOperand val, BaseIndex dest) {
+ storeValue(val, Operand(dest));
+ }
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ MOZ_ASSERT(src.base != temp);
+ MOZ_ASSERT(dest.base != temp);
+
+ load32(ToType(src), temp);
+ store32(temp, ToType(dest));
+
+ load32(ToPayload(src), temp);
+ store32(temp, ToPayload(dest));
+ }
+ void loadValue(Operand src, ValueOperand val) {
+ Operand payload = ToPayload(src);
+ Operand type = ToType(src);
+
+ // Ensure that loading the payload does not erase the pointer to the
+ // Value in memory or the index.
+ Register baseReg = Register::FromCode(src.base());
+ Register indexReg = (src.kind() == Operand::MEM_SCALE) ? Register::FromCode(src.index()) : InvalidReg;
+
+ // If we have a BaseIndex that uses both result registers, first compute
+ // the address and then load the Value from there.
+ if ((baseReg == val.payloadReg() && indexReg == val.typeReg()) ||
+ (baseReg == val.typeReg() && indexReg == val.payloadReg()))
+ {
+ computeEffectiveAddress(src, val.scratchReg());
+ loadValue(Address(val.scratchReg(), 0), val);
+ return;
+ }
+
+ if (baseReg == val.payloadReg() || indexReg == val.payloadReg()) {
+ MOZ_ASSERT(baseReg != val.typeReg());
+ MOZ_ASSERT(indexReg != val.typeReg());
+
+ movl(type, val.typeReg());
+ movl(payload, val.payloadReg());
+ } else {
+ MOZ_ASSERT(baseReg != val.payloadReg());
+ MOZ_ASSERT(indexReg != val.payloadReg());
+
+ movl(payload, val.payloadReg());
+ movl(type, val.typeReg());
+ }
+ }
+ void loadValue(Address src, ValueOperand val) {
+ loadValue(Operand(src), val);
+ }
+ void loadValue(const BaseIndex& src, ValueOperand val) {
+ loadValue(Operand(src), val);
+ }
+ void tagValue(JSValueType type, Register payload, ValueOperand dest) {
+ MOZ_ASSERT(dest.typeReg() != dest.payloadReg());
+ if (payload != dest.payloadReg())
+ movl(payload, dest.payloadReg());
+ movl(ImmType(type), dest.typeReg());
+ }
+ void pushValue(ValueOperand val) {
+ push(val.typeReg());
+ push(val.payloadReg());
+ }
+ void popValue(ValueOperand val) {
+ pop(val.payloadReg());
+ pop(val.typeReg());
+ }
+ void pushValue(const Value& val) {
+ push(Imm32(val.toNunboxTag()));
+ if (val.isMarkable())
+ push(ImmGCPtr(val.toMarkablePointer()));
+ else
+ push(Imm32(val.toNunboxPayload()));
+ }
+ void pushValue(JSValueType type, Register reg) {
+ push(ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ push(reg);
+ }
+ void pushValue(const Address& addr) {
+ push(tagOf(addr));
+ push(payloadOfAfterStackPush(addr));
+ }
+ void push64(Register64 src) {
+ push(src.high);
+ push(src.low);
+ }
+ void pop64(Register64 dest) {
+ pop(dest.low);
+ pop(dest.high);
+ }
+ void storePayload(const Value& val, Operand dest) {
+ if (val.isMarkable())
+ movl(ImmGCPtr(val.toMarkablePointer()), ToPayload(dest));
+ else
+ movl(Imm32(val.toNunboxPayload()), ToPayload(dest));
+ }
+ void storePayload(Register src, Operand dest) {
+ movl(src, ToPayload(dest));
+ }
+ void storeTypeTag(ImmTag tag, Operand dest) {
+ movl(tag, ToType(dest));
+ }
+
+ void movePtr(Register src, Register dest) {
+ movl(src, dest);
+ }
+ void movePtr(Register src, const Operand& dest) {
+ movl(src, dest);
+ }
+
+ // Returns the register containing the type tag.
+ Register splitTagForTest(const ValueOperand& value) {
+ return value.typeReg();
+ }
+
+ Condition testUndefined(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+ }
+ Condition testBoolean(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+ }
+ Condition testInt32(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_INT32));
+ return cond;
+ }
+ Condition testDouble(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ cmp32(tag, ImmTag(JSVAL_TAG_CLEAR));
+ return actual;
+ }
+ Condition testNull(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_NULL));
+ return cond;
+ }
+ Condition testString(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_STRING));
+ return cond;
+ }
+ Condition testSymbol(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_SYMBOL));
+ return cond;
+ }
+ Condition testObject(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+ }
+ Condition testNumber(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET));
+ return cond == Equal ? BelowOrEqual : Above;
+ }
+ Condition testGCThing(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET));
+ return cond == Equal ? AboveOrEqual : Below;
+ }
+ Condition testGCThing(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET));
+ return cond == Equal ? AboveOrEqual : Below;
+ }
+ Condition testMagic(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testMagic(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testMagic(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testPrimitive(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET));
+ return cond == Equal ? Below : AboveOrEqual;
+ }
+ Condition testError(Condition cond, Register tag) {
+ return testMagic(cond, tag);
+ }
+ Condition testBoolean(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(Operand(ToType(address)), ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+ }
+ Condition testInt32(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_INT32));
+ return cond;
+ }
+ Condition testInt32(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ return testInt32(cond, Operand(address));
+ }
+ Condition testObject(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+ }
+ Condition testObject(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ return testObject(cond, Operand(address));
+ }
+ Condition testDouble(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_CLEAR));
+ return actual;
+ }
+ Condition testDouble(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ return testDouble(cond, Operand(address));
+ }
+
+
+ Condition testUndefined(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+ }
+ Condition testUndefined(Condition cond, const Address& addr) {
+ return testUndefined(cond, Operand(addr));
+ }
+ Condition testNull(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_NULL));
+ return cond;
+ }
+ Condition testNull(Condition cond, const Address& addr) {
+ return testNull(cond, Operand(addr));
+ }
+
+ Condition testUndefined(Condition cond, const ValueOperand& value) {
+ return testUndefined(cond, value.typeReg());
+ }
+ Condition testBoolean(Condition cond, const ValueOperand& value) {
+ return testBoolean(cond, value.typeReg());
+ }
+ Condition testInt32(Condition cond, const ValueOperand& value) {
+ return testInt32(cond, value.typeReg());
+ }
+ Condition testDouble(Condition cond, const ValueOperand& value) {
+ return testDouble(cond, value.typeReg());
+ }
+ Condition testNull(Condition cond, const ValueOperand& value) {
+ return testNull(cond, value.typeReg());
+ }
+ Condition testString(Condition cond, const ValueOperand& value) {
+ return testString(cond, value.typeReg());
+ }
+ Condition testSymbol(Condition cond, const ValueOperand& value) {
+ return testSymbol(cond, value.typeReg());
+ }
+ Condition testObject(Condition cond, const ValueOperand& value) {
+ return testObject(cond, value.typeReg());
+ }
+ Condition testMagic(Condition cond, const ValueOperand& value) {
+ return testMagic(cond, value.typeReg());
+ }
+ Condition testError(Condition cond, const ValueOperand& value) {
+ return testMagic(cond, value);
+ }
+ Condition testNumber(Condition cond, const ValueOperand& value) {
+ return testNumber(cond, value.typeReg());
+ }
+ Condition testGCThing(Condition cond, const ValueOperand& value) {
+ return testGCThing(cond, value.typeReg());
+ }
+ Condition testPrimitive(Condition cond, const ValueOperand& value) {
+ return testPrimitive(cond, value.typeReg());
+ }
+
+
+ Condition testUndefined(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+ }
+ Condition testNull(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_NULL));
+ return cond;
+ }
+ Condition testBoolean(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+ }
+ Condition testString(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_STRING));
+ return cond;
+ }
+ Condition testSymbol(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_SYMBOL));
+ return cond;
+ }
+ Condition testInt32(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_INT32));
+ return cond;
+ }
+ Condition testObject(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+ }
+ Condition testDouble(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_CLEAR));
+ return actual;
+ }
+ Condition testMagic(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testGCThing(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET));
+ return cond == Equal ? AboveOrEqual : Below;
+ }
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testNull(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testObject(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testUndefined(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void cmpPtr(Register lhs, const ImmWord rhs) {
+ cmpl(Imm32(rhs.value), lhs);
+ }
+ void cmpPtr(Register lhs, const ImmPtr imm) {
+ cmpPtr(lhs, ImmWord(uintptr_t(imm.value)));
+ }
+ void cmpPtr(Register lhs, const ImmGCPtr rhs) {
+ cmpl(rhs, lhs);
+ }
+ void cmpPtr(const Operand& lhs, Imm32 rhs) {
+ cmp32(lhs, rhs);
+ }
+ void cmpPtr(const Operand& lhs, const ImmWord rhs) {
+ cmp32(lhs, Imm32(rhs.value));
+ }
+ void cmpPtr(const Operand& lhs, const ImmPtr imm) {
+ cmpPtr(lhs, ImmWord(uintptr_t(imm.value)));
+ }
+ void cmpPtr(const Operand& lhs, const ImmGCPtr rhs) {
+ cmpl(rhs, lhs);
+ }
+ void cmpPtr(const Address& lhs, Register rhs) {
+ cmpPtr(Operand(lhs), rhs);
+ }
+ void cmpPtr(const Operand& lhs, Register rhs) {
+ cmp32(lhs, rhs);
+ }
+ void cmpPtr(const Address& lhs, const ImmWord rhs) {
+ cmpPtr(Operand(lhs), rhs);
+ }
+ void cmpPtr(const Address& lhs, const ImmPtr rhs) {
+ cmpPtr(lhs, ImmWord(uintptr_t(rhs.value)));
+ }
+ void cmpPtr(const Address& lhs, const ImmGCPtr rhs) {
+ cmpPtr(Operand(lhs), rhs);
+ }
+ void cmpPtr(Register lhs, Register rhs) {
+ cmp32(lhs, rhs);
+ }
+ void testPtr(Register lhs, Register rhs) {
+ test32(lhs, rhs);
+ }
+ void testPtr(Register lhs, Imm32 rhs) {
+ test32(lhs, rhs);
+ }
+ void testPtr(Register lhs, ImmWord rhs) {
+ test32(lhs, Imm32(rhs.value));
+ }
+ void testPtr(const Operand& lhs, Imm32 rhs) {
+ test32(lhs, rhs);
+ }
+ void testPtr(const Operand& lhs, ImmWord rhs) {
+ test32(lhs, Imm32(rhs.value));
+ }
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+
+ template <typename T, typename S>
+ void branchPtr(Condition cond, T lhs, S ptr, RepatchLabel* label) {
+ cmpPtr(Operand(lhs), ptr);
+ j(cond, label);
+ }
+
+ CodeOffsetJump jumpWithPatch(RepatchLabel* label, Label* documentation = nullptr) {
+ jump(label);
+ return CodeOffsetJump(size());
+ }
+
+ CodeOffsetJump jumpWithPatch(RepatchLabel* label, Assembler::Condition cond,
+ Label* documentation = nullptr)
+ {
+ j(cond, label);
+ return CodeOffsetJump(size());
+ }
+
+ CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr) {
+ return jumpWithPatch(label);
+ }
+
+ void branchPtr(Condition cond, Register lhs, Register rhs, RepatchLabel* label) {
+ cmpPtr(lhs, rhs);
+ j(cond, label);
+ }
+
+ void movePtr(ImmWord imm, Register dest) {
+ movl(Imm32(imm.value), dest);
+ }
+ void movePtr(ImmPtr imm, Register dest) {
+ movl(imm, dest);
+ }
+ void movePtr(wasm::SymbolicAddress imm, Register dest) {
+ mov(imm, dest);
+ }
+ void movePtr(ImmGCPtr imm, Register dest) {
+ movl(imm, dest);
+ }
+ void loadPtr(const Address& address, Register dest) {
+ movl(Operand(address), dest);
+ }
+ void loadPtr(const Operand& src, Register dest) {
+ movl(src, dest);
+ }
+ void loadPtr(const BaseIndex& src, Register dest) {
+ movl(Operand(src), dest);
+ }
+ void loadPtr(AbsoluteAddress address, Register dest) {
+ movl(Operand(address), dest);
+ }
+ void loadPrivate(const Address& src, Register dest) {
+ movl(payloadOf(src), dest);
+ }
+ void load32(AbsoluteAddress address, Register dest) {
+ movl(Operand(address), dest);
+ }
+ void load64(const Address& address, Register64 dest) {
+ movl(Operand(Address(address.base, address.offset + INT64LOW_OFFSET)), dest.low);
+ int32_t highOffset = (address.offset < 0) ? -int32_t(INT64HIGH_OFFSET) : INT64HIGH_OFFSET;
+ movl(Operand(Address(address.base, address.offset + highOffset)), dest.high);
+ }
+ template <typename T>
+ void storePtr(ImmWord imm, T address) {
+ movl(Imm32(imm.value), Operand(address));
+ }
+ template <typename T>
+ void storePtr(ImmPtr imm, T address) {
+ storePtr(ImmWord(uintptr_t(imm.value)), address);
+ }
+ template <typename T>
+ void storePtr(ImmGCPtr imm, T address) {
+ movl(imm, Operand(address));
+ }
+ void storePtr(Register src, const Address& address) {
+ movl(src, Operand(address));
+ }
+ void storePtr(Register src, const BaseIndex& address) {
+ movl(src, Operand(address));
+ }
+ void storePtr(Register src, const Operand& dest) {
+ movl(src, dest);
+ }
+ void storePtr(Register src, AbsoluteAddress address) {
+ movl(src, Operand(address));
+ }
+ void store32(Register src, AbsoluteAddress address) {
+ movl(src, Operand(address));
+ }
+ void store16(Register src, AbsoluteAddress address) {
+ movw(src, Operand(address));
+ }
+ void store64(Register64 src, Address address) {
+ movl(src.low, Operand(Address(address.base, address.offset + INT64LOW_OFFSET)));
+ movl(src.high, Operand(Address(address.base, address.offset + INT64HIGH_OFFSET)));
+ }
+ void store64(Imm64 imm, Address address) {
+ movl(imm.low(), Operand(Address(address.base, address.offset + INT64LOW_OFFSET)));
+ movl(imm.hi(), Operand(Address(address.base, address.offset + INT64HIGH_OFFSET)));
+ }
+
+ void setStackArg(Register reg, uint32_t arg) {
+ movl(reg, Operand(esp, arg * sizeof(intptr_t)));
+ }
+
+ // Note: this function clobbers the source register.
+ void boxDouble(FloatRegister src, const ValueOperand& dest) {
+ if (Assembler::HasSSE41()) {
+ vmovd(src, dest.payloadReg());
+ vpextrd(1, src, dest.typeReg());
+ } else {
+ vmovd(src, dest.payloadReg());
+ vpsrldq(Imm32(4), src, src);
+ vmovd(src, dest.typeReg());
+ }
+ }
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) {
+ if (src != dest.payloadReg())
+ movl(src, dest.payloadReg());
+ movl(ImmType(type), dest.typeReg());
+ }
+
+ void unboxNonDouble(const ValueOperand& src, Register dest) {
+ if (src.payloadReg() != dest)
+ movl(src.payloadReg(), dest);
+ }
+ void unboxNonDouble(const Address& src, Register dest) {
+ movl(payloadOf(src), dest);
+ }
+ void unboxNonDouble(const BaseIndex& src, Register dest) {
+ movl(payloadOf(src), dest);
+ }
+ void unboxInt32(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxInt32(const Address& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxBoolean(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxBoolean(const Address& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxString(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxString(const Address& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxSymbol(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxSymbol(const Address& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxObject(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxObject(const Address& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxObject(const BaseIndex& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxDouble(const Address& src, FloatRegister dest) {
+ loadDouble(Operand(src), dest);
+ }
+ void unboxDouble(const ValueOperand& src, FloatRegister dest) {
+ MOZ_ASSERT(dest != ScratchDoubleReg);
+ if (Assembler::HasSSE41()) {
+ vmovd(src.payloadReg(), dest);
+ vpinsrd(1, src.typeReg(), dest, dest);
+ } else {
+ vmovd(src.payloadReg(), dest);
+ vmovd(src.typeReg(), ScratchDoubleReg);
+ vunpcklps(ScratchDoubleReg, dest, dest);
+ }
+ }
+ void unboxDouble(const Operand& payload, const Operand& type,
+ Register scratch, FloatRegister dest) {
+ MOZ_ASSERT(dest != ScratchDoubleReg);
+ if (Assembler::HasSSE41()) {
+ movl(payload, scratch);
+ vmovd(scratch, dest);
+ movl(type, scratch);
+ vpinsrd(1, scratch, dest, dest);
+ } else {
+ movl(payload, scratch);
+ vmovd(scratch, dest);
+ movl(type, scratch);
+ vmovd(scratch, ScratchDoubleReg);
+ vunpcklps(ScratchDoubleReg, dest, dest);
+ }
+ }
+ inline void unboxValue(const ValueOperand& src, AnyRegister dest);
+ void unboxPrivate(const ValueOperand& src, Register dest) {
+ if (src.payloadReg() != dest)
+ movl(src.payloadReg(), dest);
+ }
+
+ void notBoolean(const ValueOperand& val) {
+ xorl(Imm32(1), val.payloadReg());
+ }
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ Register extractObject(const Address& address, Register scratch) {
+ movl(payloadOf(address), scratch);
+ return scratch;
+ }
+ Register extractObject(const ValueOperand& value, Register scratch) {
+ return value.payloadReg();
+ }
+ Register extractInt32(const ValueOperand& value, Register scratch) {
+ return value.payloadReg();
+ }
+ Register extractBoolean(const ValueOperand& value, Register scratch) {
+ return value.payloadReg();
+ }
+ Register extractTag(const Address& address, Register scratch) {
+ movl(tagOf(address), scratch);
+ return scratch;
+ }
+ Register extractTag(const ValueOperand& value, Register scratch) {
+ return value.typeReg();
+ }
+
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.payloadReg(), dest);
+ }
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.payloadReg(), dest);
+ }
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.payloadReg(), dest);
+ }
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.payloadReg(), dest);
+ }
+
+ void loadConstantDouble(double d, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+ void loadConstantDouble(wasm::RawF64 d, FloatRegister dest);
+ void loadConstantFloat32(wasm::RawF32 f, FloatRegister dest);
+
+ void loadConstantSimd128Int(const SimdConstant& v, FloatRegister dest);
+ void loadConstantSimd128Float(const SimdConstant& v, FloatRegister dest);
+
+ Condition testInt32Truthy(bool truthy, const ValueOperand& operand) {
+ test32(operand.payloadReg(), operand.payloadReg());
+ return truthy ? NonZero : Zero;
+ }
+ Condition testStringTruthy(bool truthy, const ValueOperand& value) {
+ Register string = value.payloadReg();
+ cmp32(Operand(string, JSString::offsetOfLength()), Imm32(0));
+ return truthy ? Assembler::NotEqual : Assembler::Equal;
+ }
+
+ template <typename T>
+ inline void loadInt32OrDouble(const T& src, FloatRegister dest);
+
+ template <typename T>
+ inline void loadUnboxedValue(const T& src, MIRType type, AnyRegister dest);
+
+ template <typename T>
+ void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes) {
+ switch (nbytes) {
+ case 4:
+ storePtr(value.payloadReg(), address);
+ return;
+ case 1:
+ store8(value.payloadReg(), address);
+ return;
+ default: MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void loadInstructionPointerAfterCall(Register dest) {
+ movl(Operand(StackPointer, 0x0), dest);
+ }
+
+ // Note: this function clobbers the source register.
+ inline void convertUInt32ToDouble(Register src, FloatRegister dest);
+
+ // Note: this function clobbers the source register.
+ inline void convertUInt32ToFloat32(Register src, FloatRegister dest);
+
+ void convertUInt64ToFloat32(Register64 src, FloatRegister dest, Register temp);
+ void convertInt64ToFloat32(Register64 src, FloatRegister dest);
+ static bool convertUInt64ToDoubleNeedsTemp();
+ void convertUInt64ToDouble(Register64 src, FloatRegister dest, Register temp);
+ void convertInt64ToDouble(Register64 src, FloatRegister dest);
+
+ void wasmTruncateDoubleToInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble);
+ void wasmTruncateDoubleToUInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble);
+ void wasmTruncateFloat32ToInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble);
+ void wasmTruncateFloat32ToUInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble);
+
+ void incrementInt32Value(const Address& addr) {
+ addl(Imm32(1), payloadOf(addr));
+ }
+
+ inline void ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure);
+
+ void loadWasmGlobalPtr(uint32_t globalDataOffset, Register dest) {
+ CodeOffset label = movlWithPatch(PatchedAbsoluteAddress(), dest);
+ append(wasm::GlobalAccess(label, globalDataOffset));
+ }
+ void loadWasmPinnedRegsFromTls() {
+ // x86 doesn't have any pinned registers.
+ }
+
+ public:
+ // Used from within an Exit frame to handle a pending exception.
+ void handleFailureWithHandlerTail(void* handler);
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+};
+
+typedef MacroAssemblerX86 MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_MacroAssembler_x86_h */
diff --git a/js/src/jit/x86/SharedIC-x86.cpp b/js/src/jit/x86/SharedIC-x86.cpp
new file mode 100644
index 000000000..355b73096
--- /dev/null
+++ b/js/src/jit/x86/SharedIC-x86.cpp
@@ -0,0 +1,242 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineCompiler.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Linker.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICBinaryArith_Int32
+
+bool
+ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // Add R0 and R1. Don't need to explicitly unbox, just use the TailCallReg which
+ // should be available.
+ Register scratchReg = ICTailCallReg;
+
+ Label revertRegister, maybeNegZero;
+ switch(op_) {
+ case JSOP_ADD:
+ // Add R0 and R1. Don't need to explicitly unbox.
+ masm.movl(R0.payloadReg(), scratchReg);
+ masm.addl(R1.payloadReg(), scratchReg);
+
+ // Just jump to failure on overflow. R0 and R1 are preserved, so we can just jump to
+ // the next stub.
+ masm.j(Assembler::Overflow, &failure);
+
+ // Just overwrite the payload, the tag is still fine.
+ masm.movl(scratchReg, R0.payloadReg());
+ break;
+ case JSOP_SUB:
+ masm.movl(R0.payloadReg(), scratchReg);
+ masm.subl(R1.payloadReg(), scratchReg);
+ masm.j(Assembler::Overflow, &failure);
+ masm.movl(scratchReg, R0.payloadReg());
+ break;
+ case JSOP_MUL:
+ masm.movl(R0.payloadReg(), scratchReg);
+ masm.imull(R1.payloadReg(), scratchReg);
+ masm.j(Assembler::Overflow, &failure);
+
+ masm.test32(scratchReg, scratchReg);
+ masm.j(Assembler::Zero, &maybeNegZero);
+
+ masm.movl(scratchReg, R0.payloadReg());
+ break;
+ case JSOP_DIV:
+ {
+ // Prevent division by 0.
+ masm.branchTest32(Assembler::Zero, R1.payloadReg(), R1.payloadReg(), &failure);
+
+ // Prevent negative 0 and -2147483648 / -1.
+ masm.branch32(Assembler::Equal, R0.payloadReg(), Imm32(INT32_MIN), &failure);
+
+ Label notZero;
+ masm.branch32(Assembler::NotEqual, R0.payloadReg(), Imm32(0), &notZero);
+ masm.branchTest32(Assembler::Signed, R1.payloadReg(), R1.payloadReg(), &failure);
+ masm.bind(&notZero);
+
+ // For idiv we need eax.
+ MOZ_ASSERT(R1.typeReg() == eax);
+ masm.movl(R0.payloadReg(), eax);
+ // Preserve R0.payloadReg()/edx, eax is JSVAL_TYPE_INT32.
+ masm.movl(R0.payloadReg(), scratchReg);
+ // Sign extend eax into edx to make (edx:eax), since idiv is 64-bit.
+ masm.cdq();
+ masm.idiv(R1.payloadReg());
+
+ // A remainder implies a double result.
+ masm.branchTest32(Assembler::NonZero, edx, edx, &revertRegister);
+
+ masm.movl(eax, R0.payloadReg());
+ break;
+ }
+ case JSOP_MOD:
+ {
+ // x % 0 always results in NaN.
+ masm.branchTest32(Assembler::Zero, R1.payloadReg(), R1.payloadReg(), &failure);
+
+ // Prevent negative 0 and -2147483648 % -1.
+ masm.branchTest32(Assembler::Zero, R0.payloadReg(), Imm32(0x7fffffff), &failure);
+
+ // For idiv we need eax.
+ MOZ_ASSERT(R1.typeReg() == eax);
+ masm.movl(R0.payloadReg(), eax);
+ // Preserve R0.payloadReg()/edx, eax is JSVAL_TYPE_INT32.
+ masm.movl(R0.payloadReg(), scratchReg);
+ // Sign extend eax into edx to make (edx:eax), since idiv is 64-bit.
+ masm.cdq();
+ masm.idiv(R1.payloadReg());
+
+ // Fail when we would need a negative remainder.
+ Label done;
+ masm.branchTest32(Assembler::NonZero, edx, edx, &done);
+ masm.branchTest32(Assembler::Signed, scratchReg, scratchReg, &revertRegister);
+ masm.branchTest32(Assembler::Signed, R1.payloadReg(), R1.payloadReg(), &revertRegister);
+
+ masm.bind(&done);
+ // Result is in edx, tag in ecx remains untouched.
+ MOZ_ASSERT(R0.payloadReg() == edx);
+ MOZ_ASSERT(R0.typeReg() == ecx);
+ break;
+ }
+ case JSOP_BITOR:
+ // We can overide R0, because the instruction is unfailable.
+ // The R0.typeReg() is also still intact.
+ masm.orl(R1.payloadReg(), R0.payloadReg());
+ break;
+ case JSOP_BITXOR:
+ masm.xorl(R1.payloadReg(), R0.payloadReg());
+ break;
+ case JSOP_BITAND:
+ masm.andl(R1.payloadReg(), R0.payloadReg());
+ break;
+ case JSOP_LSH:
+ // RHS needs to be in ecx for shift operations.
+ MOZ_ASSERT(R0.typeReg() == ecx);
+ masm.movl(R1.payloadReg(), ecx);
+ masm.shll_cl(R0.payloadReg());
+ // We need to tag again, because we overwrote it.
+ masm.tagValue(JSVAL_TYPE_INT32, R0.payloadReg(), R0);
+ break;
+ case JSOP_RSH:
+ masm.movl(R1.payloadReg(), ecx);
+ masm.sarl_cl(R0.payloadReg());
+ masm.tagValue(JSVAL_TYPE_INT32, R0.payloadReg(), R0);
+ break;
+ case JSOP_URSH:
+ if (!allowDouble_)
+ masm.movl(R0.payloadReg(), scratchReg);
+
+ masm.movl(R1.payloadReg(), ecx);
+ masm.shrl_cl(R0.payloadReg());
+ masm.test32(R0.payloadReg(), R0.payloadReg());
+ if (allowDouble_) {
+ Label toUint;
+ masm.j(Assembler::Signed, &toUint);
+
+ // Box and return.
+ masm.tagValue(JSVAL_TYPE_INT32, R0.payloadReg(), R0);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&toUint);
+ masm.convertUInt32ToDouble(R0.payloadReg(), ScratchDoubleReg);
+ masm.boxDouble(ScratchDoubleReg, R0);
+ } else {
+ masm.j(Assembler::Signed, &revertRegister);
+ masm.tagValue(JSVAL_TYPE_INT32, R0.payloadReg(), R0);
+ }
+ break;
+ default:
+ MOZ_CRASH("Unhandled op for BinaryArith_Int32.");
+ }
+
+ // Return.
+ EmitReturnFromIC(masm);
+
+ switch(op_) {
+ case JSOP_MUL:
+ masm.bind(&maybeNegZero);
+
+ // Result is -0 if exactly one of lhs or rhs is negative.
+ masm.movl(R0.payloadReg(), scratchReg);
+ masm.orl(R1.payloadReg(), scratchReg);
+ masm.j(Assembler::Signed, &failure);
+
+ // Result is +0.
+ masm.mov(ImmWord(0), R0.payloadReg());
+ EmitReturnFromIC(masm);
+ break;
+ case JSOP_DIV:
+ case JSOP_MOD:
+ masm.bind(&revertRegister);
+ masm.movl(scratchReg, R0.payloadReg());
+ masm.movl(ImmType(JSVAL_TYPE_INT32), R1.typeReg());
+ break;
+ case JSOP_URSH:
+ // Revert the content of R0 in the fallible >>> case.
+ if (!allowDouble_) {
+ masm.bind(&revertRegister);
+ masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
+ }
+ break;
+ default:
+ // No special failure handling required.
+ // Fall through to failure.
+ break;
+ }
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ return true;
+}
+
+bool
+ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+
+ switch (op) {
+ case JSOP_BITNOT:
+ masm.notl(R0.payloadReg());
+ break;
+ case JSOP_NEG:
+ // Guard against 0 and MIN_INT, both result in a double.
+ masm.branchTest32(Assembler::Zero, R0.payloadReg(), Imm32(0x7fffffff), &failure);
+ masm.negl(R0.payloadReg());
+ break;
+ default:
+ MOZ_CRASH("Unexpected op");
+ }
+
+ EmitReturnFromIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/x86/SharedICHelpers-x86.h b/js/src/jit/x86/SharedICHelpers-x86.h
new file mode 100644
index 000000000..e7f75cc95
--- /dev/null
+++ b/js/src/jit/x86/SharedICHelpers-x86.h
@@ -0,0 +1,353 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_SharedICHelpers_x86_h
+#define jit_x86_SharedICHelpers_x86_h
+
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineIC.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+
+namespace js {
+namespace jit {
+
+// Distance from stack top to the top Value inside an IC stub (this is the return address).
+static const size_t ICStackValueOffset = sizeof(void*);
+
+inline void
+EmitRestoreTailCallReg(MacroAssembler& masm)
+{
+ masm.Pop(ICTailCallReg);
+}
+
+inline void
+EmitRepushTailCallReg(MacroAssembler& masm)
+{
+ masm.Push(ICTailCallReg);
+}
+
+inline void
+EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm)
+{
+ // Move ICEntry offset into ICStubReg
+ CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
+ *patchOffset = offset;
+
+ // Load stub pointer into ICStubReg
+ masm.loadPtr(Address(ICStubReg, (int32_t) ICEntry::offsetOfFirstStub()),
+ ICStubReg);
+
+ // Load stubcode pointer from BaselineStubEntry into ICTailCallReg
+ // ICTailCallReg will always be unused in the contexts where ICs are called.
+ masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
+}
+
+inline void
+EmitEnterTypeMonitorIC(MacroAssembler& masm,
+ size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub())
+{
+ // This is expected to be called from within an IC, when ICStubReg
+ // is properly initialized to point to the stub.
+ masm.loadPtr(Address(ICStubReg, (int32_t) monitorStubOffset), ICStubReg);
+
+ // Jump to the stubcode.
+ masm.jmp(Operand(ICStubReg, (int32_t) ICStub::offsetOfStubCode()));
+}
+
+inline void
+EmitReturnFromIC(MacroAssembler& masm)
+{
+ masm.ret();
+}
+
+inline void
+EmitChangeICReturnAddress(MacroAssembler& masm, Register reg)
+{
+ masm.storePtr(reg, Address(StackPointer, 0));
+}
+
+inline void
+EmitBaselineTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t argSize)
+{
+ // We assume during this that R0 and R1 have been pushed.
+
+ // Compute frame size.
+ masm.movl(BaselineFrameReg, eax);
+ masm.addl(Imm32(BaselineFrame::FramePointerOffset), eax);
+ masm.subl(BaselineStackReg, eax);
+
+ // Store frame size without VMFunction arguments for GC marking.
+ masm.movl(eax, ebx);
+ masm.subl(Imm32(argSize), ebx);
+ masm.store32(ebx, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+
+ // Push frame descriptor and perform the tail call.
+ masm.makeFrameDescriptor(eax, JitFrame_BaselineJS, ExitFrameLayout::Size());
+ masm.push(eax);
+ masm.push(ICTailCallReg);
+ masm.jmp(target);
+}
+
+inline void
+EmitIonTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t stackSize)
+{
+ // For tail calls, find the already pushed JitFrame_IonJS signifying the
+ // end of the Ion frame. Retrieve the length of the frame and repush
+ // JitFrame_IonJS with the extra stacksize, rendering the original
+ // JitFrame_IonJS obsolete.
+
+ masm.loadPtr(Address(esp, stackSize), eax);
+ masm.shrl(Imm32(FRAMESIZE_SHIFT), eax);
+ masm.addl(Imm32(stackSize + JitStubFrameLayout::Size() - sizeof(intptr_t)), eax);
+
+ // Push frame descriptor and perform the tail call.
+ masm.makeFrameDescriptor(eax, JitFrame_IonJS, ExitFrameLayout::Size());
+ masm.push(eax);
+ masm.push(ICTailCallReg);
+ masm.jmp(target);
+}
+
+inline void
+EmitBaselineCreateStubFrameDescriptor(MacroAssembler& masm, Register reg, uint32_t headerSize)
+{
+ // Compute stub frame size. We have to add two pointers: the stub reg and previous
+ // frame pointer pushed by EmitEnterStubFrame.
+ masm.movl(BaselineFrameReg, reg);
+ masm.addl(Imm32(sizeof(void*) * 2), reg);
+ masm.subl(BaselineStackReg, reg);
+
+ masm.makeFrameDescriptor(reg, JitFrame_BaselineStub, headerSize);
+}
+
+inline void
+EmitBaselineCallVM(JitCode* target, MacroAssembler& masm)
+{
+ EmitBaselineCreateStubFrameDescriptor(masm, eax, ExitFrameLayout::Size());
+ masm.push(eax);
+ masm.call(target);
+}
+
+inline void
+EmitIonCallVM(JitCode* target, size_t stackSlots, MacroAssembler& masm)
+{
+ // Stubs often use the return address. Which is actually accounted by the
+ // caller of the stub. Though in the stubcode we fake that is part of the
+ // stub. In order to make it possible to pop it. As a result we have to
+ // fix it here, by subtracting it. Else it would be counted twice.
+ uint32_t framePushed = masm.framePushed() - sizeof(void*);
+
+ uint32_t descriptor = MakeFrameDescriptor(framePushed, JitFrame_IonStub,
+ ExitFrameLayout::Size());
+ masm.Push(Imm32(descriptor));
+ masm.call(target);
+
+ // Remove rest of the frame left on the stack. We remove the return address
+ // which is implicitly poped when returning.
+ size_t framePop = sizeof(ExitFrameLayout) - sizeof(void*);
+
+ // Pop arguments from framePushed.
+ masm.implicitPop(stackSlots * sizeof(void*) + framePop);
+}
+
+// Size of vales pushed by EmitEnterStubFrame.
+static const uint32_t STUB_FRAME_SIZE = 4 * sizeof(void*);
+static const uint32_t STUB_FRAME_SAVED_STUB_OFFSET = sizeof(void*);
+
+inline void
+EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch)
+{
+ MOZ_ASSERT(scratch != ICTailCallReg);
+
+ EmitRestoreTailCallReg(masm);
+
+ // Compute frame size.
+ masm.movl(BaselineFrameReg, scratch);
+ masm.addl(Imm32(BaselineFrame::FramePointerOffset), scratch);
+ masm.subl(BaselineStackReg, scratch);
+
+ masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+
+ // Note: when making changes here, don't forget to update STUB_FRAME_SIZE
+ // if needed.
+
+ // Push frame descriptor and return address.
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, BaselineStubFrameLayout::Size());
+ masm.Push(scratch);
+ masm.Push(ICTailCallReg);
+
+ // Save old frame pointer, stack pointer and stub reg.
+ masm.Push(ICStubReg);
+ masm.Push(BaselineFrameReg);
+ masm.mov(BaselineStackReg, BaselineFrameReg);
+}
+
+inline void
+EmitIonEnterStubFrame(MacroAssembler& masm, Register scratch)
+{
+ MOZ_ASSERT(scratch != ICTailCallReg);
+
+ masm.loadPtr(Address(masm.getStackPointer(), 0), ICTailCallReg);
+ masm.Push(ICStubReg);
+}
+
+inline void
+EmitBaselineLeaveStubFrame(MacroAssembler& masm, bool calledIntoIon = false)
+{
+ // Ion frames do not save and restore the frame pointer. If we called
+ // into Ion, we have to restore the stack pointer from the frame descriptor.
+ // If we performed a VM call, the descriptor has been popped already so
+ // in that case we use the frame pointer.
+ if (calledIntoIon) {
+ Register scratch = ICTailCallReg;
+ masm.Pop(scratch);
+ masm.shrl(Imm32(FRAMESIZE_SHIFT), scratch);
+ masm.addl(scratch, BaselineStackReg);
+ } else {
+ masm.mov(BaselineFrameReg, BaselineStackReg);
+ }
+
+ masm.Pop(BaselineFrameReg);
+ masm.Pop(ICStubReg);
+
+ // Pop return address.
+ masm.Pop(ICTailCallReg);
+
+ // Overwrite frame descriptor with return address, so that the stack matches
+ // the state before entering the stub frame.
+ masm.storePtr(ICTailCallReg, Address(BaselineStackReg, 0));
+}
+
+inline void
+EmitIonLeaveStubFrame(MacroAssembler& masm)
+{
+ masm.Pop(ICStubReg);
+}
+
+inline void
+EmitStowICValues(MacroAssembler& masm, int values)
+{
+ MOZ_ASSERT(values >= 0 && values <= 2);
+ switch(values) {
+ case 1:
+ // Stow R0
+ masm.pop(ICTailCallReg);
+ masm.Push(R0);
+ masm.push(ICTailCallReg);
+ break;
+ case 2:
+ // Stow R0 and R1
+ masm.pop(ICTailCallReg);
+ masm.Push(R0);
+ masm.Push(R1);
+ masm.push(ICTailCallReg);
+ break;
+ }
+}
+
+inline void
+EmitUnstowICValues(MacroAssembler& masm, int values, bool discard = false)
+{
+ MOZ_ASSERT(values >= 0 && values <= 2);
+ switch(values) {
+ case 1:
+ // Unstow R0
+ masm.pop(ICTailCallReg);
+ if (discard)
+ masm.addPtr(Imm32(sizeof(Value)), BaselineStackReg);
+ else
+ masm.popValue(R0);
+ masm.push(ICTailCallReg);
+ break;
+ case 2:
+ // Unstow R0 and R1
+ masm.pop(ICTailCallReg);
+ if (discard) {
+ masm.addPtr(Imm32(sizeof(Value) * 2), BaselineStackReg);
+ } else {
+ masm.popValue(R1);
+ masm.popValue(R0);
+ }
+ masm.push(ICTailCallReg);
+ break;
+ }
+ masm.adjustFrame(-values * sizeof(Value));
+}
+
+inline void
+EmitCallTypeUpdateIC(MacroAssembler& masm, JitCode* code, uint32_t objectOffset)
+{
+ // R0 contains the value that needs to be typechecked.
+ // The object we're updating is a boxed Value on the stack, at offset
+ // objectOffset from stack top, excluding the return address.
+
+ // Save the current ICStubReg to stack
+ masm.push(ICStubReg);
+
+ // This is expected to be called from within an IC, when ICStubReg
+ // is properly initialized to point to the stub.
+ masm.loadPtr(Address(ICStubReg, (int32_t) ICUpdatedStub::offsetOfFirstUpdateStub()),
+ ICStubReg);
+
+ // Call the stubcode.
+ masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
+
+ // Restore the old stub reg.
+ masm.pop(ICStubReg);
+
+ // The update IC will store 0 or 1 in R1.scratchReg() reflecting if the
+ // value in R0 type-checked properly or not.
+ Label success;
+ masm.cmp32(R1.scratchReg(), Imm32(1));
+ masm.j(Assembler::Equal, &success);
+
+ // If the IC failed, then call the update fallback function.
+ EmitBaselineEnterStubFrame(masm, R1.scratchReg());
+
+ masm.loadValue(Address(BaselineStackReg, STUB_FRAME_SIZE + objectOffset), R1);
+
+ masm.Push(R0);
+ masm.Push(R1);
+ masm.Push(ICStubReg);
+
+ // Load previous frame pointer, push BaselineFrame*.
+ masm.loadPtr(Address(BaselineFrameReg, 0), R0.scratchReg());
+ masm.pushBaselineFramePtr(R0.scratchReg(), R0.scratchReg());
+
+ EmitBaselineCallVM(code, masm);
+ EmitBaselineLeaveStubFrame(masm);
+
+ // Success at end.
+ masm.bind(&success);
+}
+
+template <typename AddrType>
+inline void
+EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type)
+{
+ masm.patchableCallPreBarrier(addr, type);
+}
+
+inline void
+EmitStubGuardFailure(MacroAssembler& masm)
+{
+ // NOTE: This routine assumes that the stub guard code left the stack in the
+ // same state it was in when it was entered.
+
+ // BaselineStubEntry points to the current stub.
+
+ // Load next stub into ICStubReg
+ masm.loadPtr(Address(ICStubReg, (int32_t) ICStub::offsetOfNext()), ICStubReg);
+
+ // Return address is already loaded, just jump to the next stubcode.
+ masm.jmp(Operand(ICStubReg, (int32_t) ICStub::offsetOfStubCode()));
+}
+
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_SharedICHelpers_x86_h */
diff --git a/js/src/jit/x86/SharedICRegisters-x86.h b/js/src/jit/x86/SharedICRegisters-x86.h
new file mode 100644
index 000000000..d34999b74
--- /dev/null
+++ b/js/src/jit/x86/SharedICRegisters-x86.h
@@ -0,0 +1,38 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_SharedICRegisters_x86_h
+#define jit_x86_SharedICRegisters_x86_h
+
+#include "jit/MacroAssembler.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register BaselineFrameReg = ebp;
+static constexpr Register BaselineStackReg = esp;
+
+// ValueOperands R0, R1, and R2
+static constexpr ValueOperand R0(ecx, edx);
+static constexpr ValueOperand R1(eax, ebx);
+static constexpr ValueOperand R2(esi, edi);
+
+// ICTailCallReg and ICStubReg reuse
+// registers from R2.
+static constexpr Register ICTailCallReg = esi;
+static constexpr Register ICStubReg = edi;
+
+static constexpr Register ExtractTemp0 = InvalidReg;
+static constexpr Register ExtractTemp1 = InvalidReg;
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = xmm0;
+static constexpr FloatRegister FloatReg1 = xmm1;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_SharedICRegisters_x86_h */
diff --git a/js/src/jit/x86/Trampoline-x86.cpp b/js/src/jit/x86/Trampoline-x86.cpp
new file mode 100644
index 000000000..d91379cd3
--- /dev/null
+++ b/js/src/jit/x86/Trampoline-x86.cpp
@@ -0,0 +1,1336 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jscompartment.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineJIT.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/JitSpewer.h"
+#include "jit/Linker.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/VMFunctions.h"
+#include "jit/x86/SharedICHelpers-x86.h"
+
+#include "jsscriptinlines.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using mozilla::IsPowerOfTwo;
+
+using namespace js;
+using namespace js::jit;
+
+// All registers to save and restore. This includes the stack pointer, since we
+// use the ability to reference register values on the stack by index.
+static const LiveRegisterSet AllRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::AllMask),
+ FloatRegisterSet(FloatRegisters::AllMask));
+
+enum EnterJitEbpArgumentOffset {
+ ARG_JITCODE = 2 * sizeof(void*),
+ ARG_ARGC = 3 * sizeof(void*),
+ ARG_ARGV = 4 * sizeof(void*),
+ ARG_STACKFRAME = 5 * sizeof(void*),
+ ARG_CALLEETOKEN = 6 * sizeof(void*),
+ ARG_SCOPECHAIN = 7 * sizeof(void*),
+ ARG_STACKVALUES = 8 * sizeof(void*),
+ ARG_RESULT = 9 * sizeof(void*)
+};
+
+
+// Generates a trampoline for calling Jit compiled code from a C++ function.
+// The trampoline use the EnterJitCode signature, with the standard cdecl
+// calling convention.
+JitCode*
+JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
+{
+ MacroAssembler masm(cx);
+ masm.assertStackAlignment(ABIStackAlignment, -int32_t(sizeof(uintptr_t)) /* return address */);
+
+ // Save old stack frame pointer, set new stack frame pointer.
+ masm.push(ebp);
+ masm.movl(esp, ebp);
+
+ // Save non-volatile registers. These must be saved by the trampoline,
+ // rather than the JIT'd code, because they are scanned by the conservative
+ // scanner.
+ masm.push(ebx);
+ masm.push(esi);
+ masm.push(edi);
+
+ // Keep track of the stack which has to be unwound after returning from the
+ // compiled function.
+ masm.movl(esp, esi);
+
+ // Load the number of values to be copied (argc) into eax
+ masm.loadPtr(Address(ebp, ARG_ARGC), eax);
+
+ // If we are constructing, that also needs to include newTarget
+ {
+ Label noNewTarget;
+ masm.loadPtr(Address(ebp, ARG_CALLEETOKEN), edx);
+ masm.branchTest32(Assembler::Zero, edx, Imm32(CalleeToken_FunctionConstructing),
+ &noNewTarget);
+
+ masm.addl(Imm32(1), eax);
+
+ masm.bind(&noNewTarget);
+ }
+
+ // eax <- 8*numValues, eax is now the offset betwen argv and the last value.
+ masm.shll(Imm32(3), eax);
+
+ // Guarantee stack alignment of Jit frames.
+ //
+ // This code compensates for the offset created by the copy of the vector of
+ // arguments, such that the jit frame will be aligned once the return
+ // address is pushed on the stack.
+ //
+ // In the computation of the offset, we omit the size of the JitFrameLayout
+ // which is pushed on the stack, as the JitFrameLayout size is a multiple of
+ // the JitStackAlignment.
+ masm.movl(esp, ecx);
+ masm.subl(eax, ecx);
+ static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+
+ // ecx = ecx & 15, holds alignment.
+ masm.andl(Imm32(JitStackAlignment - 1), ecx);
+ masm.subl(ecx, esp);
+
+ /***************************************************************
+ Loop over argv vector, push arguments onto stack in reverse order
+ ***************************************************************/
+
+ // ebx = argv --argv pointer is in ebp + 16
+ masm.loadPtr(Address(ebp, ARG_ARGV), ebx);
+
+ // eax = argv[8(argc)] --eax now points one value past the last argument
+ masm.addl(ebx, eax);
+
+ // while (eax > ebx) --while still looping through arguments
+ {
+ Label header, footer;
+ masm.bind(&header);
+
+ masm.cmp32(eax, ebx);
+ masm.j(Assembler::BelowOrEqual, &footer);
+
+ // eax -= 8 --move to previous argument
+ masm.subl(Imm32(8), eax);
+
+ // Push what eax points to on stack, a Value is 2 words
+ masm.push(Operand(eax, 4));
+ masm.push(Operand(eax, 0));
+
+ masm.jmp(&header);
+ masm.bind(&footer);
+ }
+
+
+ // Push the number of actual arguments. |result| is used to store the
+ // actual number of arguments without adding an extra argument to the enter
+ // JIT.
+ masm.mov(Operand(ebp, ARG_RESULT), eax);
+ masm.unboxInt32(Address(eax, 0x0), eax);
+ masm.push(eax);
+
+ // Push the callee token.
+ masm.push(Operand(ebp, ARG_CALLEETOKEN));
+
+ // Load the InterpreterFrame address into the OsrFrameReg.
+ // This address is also used for setting the constructing bit on all paths.
+ masm.loadPtr(Address(ebp, ARG_STACKFRAME), OsrFrameReg);
+
+ /*****************************************************************
+ Push the number of bytes we've pushed so far on the stack and call
+ *****************************************************************/
+ // Create a frame descriptor.
+ masm.subl(esp, esi);
+ masm.makeFrameDescriptor(esi, JitFrame_Entry, JitFrameLayout::Size());
+ masm.push(esi);
+
+ CodeLabel returnLabel;
+ CodeLabel oomReturnLabel;
+ if (type == EnterJitBaseline) {
+ // Handle OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(JSReturnOperand);
+ regs.takeUnchecked(OsrFrameReg);
+ regs.take(ebp);
+ regs.take(ReturnReg);
+
+ Register scratch = regs.takeAny();
+
+ Label notOsr;
+ masm.branchTestPtr(Assembler::Zero, OsrFrameReg, OsrFrameReg, &notOsr);
+
+ Register numStackValues = regs.takeAny();
+ masm.loadPtr(Address(ebp, ARG_STACKVALUES), numStackValues);
+
+ Register jitcode = regs.takeAny();
+ masm.loadPtr(Address(ebp, ARG_JITCODE), jitcode);
+
+ // Push return address.
+ masm.mov(returnLabel.patchAt(), scratch);
+ masm.push(scratch);
+
+ // Push previous frame pointer.
+ masm.push(ebp);
+
+ // Reserve frame.
+ Register framePtr = ebp;
+ masm.subPtr(Imm32(BaselineFrame::Size()), esp);
+ masm.mov(esp, framePtr);
+
+#ifdef XP_WIN
+ // Can't push large frames blindly on windows. Touch frame memory incrementally.
+ masm.mov(numStackValues, scratch);
+ masm.shll(Imm32(3), scratch);
+ masm.subPtr(scratch, framePtr);
+ {
+ masm.movePtr(esp, scratch);
+ masm.subPtr(Imm32(WINDOWS_BIG_FRAME_TOUCH_INCREMENT), scratch);
+
+ Label touchFrameLoop;
+ Label touchFrameLoopEnd;
+ masm.bind(&touchFrameLoop);
+ masm.branchPtr(Assembler::Below, scratch, framePtr, &touchFrameLoopEnd);
+ masm.store32(Imm32(0), Address(scratch, 0));
+ masm.subPtr(Imm32(WINDOWS_BIG_FRAME_TOUCH_INCREMENT), scratch);
+ masm.jump(&touchFrameLoop);
+ masm.bind(&touchFrameLoopEnd);
+ }
+ masm.mov(esp, framePtr);
+#endif
+
+ // Reserve space for locals and stack values.
+ masm.mov(numStackValues, scratch);
+ masm.shll(Imm32(3), scratch);
+ masm.subPtr(scratch, esp);
+
+ // Enter exit frame.
+ masm.addPtr(Imm32(BaselineFrame::Size() + BaselineFrame::FramePointerOffset), scratch);
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, ExitFrameLayout::Size());
+ masm.push(scratch); // Fake return address.
+ masm.push(Imm32(0));
+ // No GC things to mark on the stack, push a bare token.
+ masm.enterFakeExitFrame(ExitFrameLayoutBareToken);
+
+ masm.push(framePtr);
+ masm.push(jitcode);
+
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(framePtr); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, jit::InitBaselineFrameForOsr));
+
+ masm.pop(jitcode);
+ masm.pop(framePtr);
+
+ MOZ_ASSERT(jitcode != ReturnReg);
+
+ Label error;
+ masm.addPtr(Imm32(ExitFrameLayout::SizeWithFooter()), esp);
+ masm.addPtr(Imm32(BaselineFrame::Size()), framePtr);
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ Register realFramePtr = numStackValues;
+ AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.lea(Operand(framePtr, sizeof(void*)), realFramePtr);
+ masm.profilerEnterFrame(realFramePtr, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(jitcode);
+
+ // OOM: load error value, discard return address and previous frame
+ // pointer and return.
+ masm.bind(&error);
+ masm.mov(framePtr, esp);
+ masm.addPtr(Imm32(2 * sizeof(uintptr_t)), esp);
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.mov(oomReturnLabel.patchAt(), scratch);
+ masm.jump(scratch);
+
+ masm.bind(&notOsr);
+ masm.loadPtr(Address(ebp, ARG_SCOPECHAIN), R1.scratchReg());
+ }
+
+ // The call will push the return address on the stack, thus we check that
+ // the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, sizeof(uintptr_t));
+
+ /***************************************************************
+ Call passed-in code, get return value and fill in the
+ passed in return value pointer
+ ***************************************************************/
+ masm.call(Address(ebp, ARG_JITCODE));
+
+ if (type == EnterJitBaseline) {
+ // Baseline OSR will return here.
+ masm.use(returnLabel.target());
+ masm.addCodeLabel(returnLabel);
+ masm.use(oomReturnLabel.target());
+ masm.addCodeLabel(oomReturnLabel);
+ }
+
+ // Pop arguments off the stack.
+ // eax <- 8*argc (size of all arguments we pushed on the stack)
+ masm.pop(eax);
+ masm.shrl(Imm32(FRAMESIZE_SHIFT), eax); // Unmark EntryFrame.
+ masm.addl(eax, esp);
+
+ // |ebp| could have been clobbered by the inner function.
+ // Grab the address for the Value result from the argument stack.
+ // +20 ... arguments ...
+ // +16 <return>
+ // +12 ebp <- original %ebp pointing here.
+ // +8 ebx
+ // +4 esi
+ // +0 edi
+ masm.loadPtr(Address(esp, ARG_RESULT + 3 * sizeof(void*)), eax);
+ masm.storeValue(JSReturnOperand, Operand(eax, 0));
+
+ /**************************************************************
+ Return stack and registers to correct state
+ **************************************************************/
+
+ // Restore non-volatile registers
+ masm.pop(edi);
+ masm.pop(esi);
+ masm.pop(ebx);
+
+ // Restore old stack frame pointer
+ masm.pop(ebp);
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "EnterJIT");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateInvalidator(JSContext* cx)
+{
+ AutoJitContextAlloc ajca(cx);
+ MacroAssembler masm(cx);
+
+ // We do the minimum amount of work in assembly and shunt the rest
+ // off to InvalidationBailout. Assembly does:
+ //
+ // - Pop the return address from the invalidation epilogue call.
+ // - Push the machine state onto the stack.
+ // - Call the InvalidationBailout routine with the stack pointer.
+ // - Now that the frame has been bailed out, convert the invalidated
+ // frame into an exit frame.
+ // - Do the normal check-return-code-and-thunk-to-the-interpreter dance.
+
+ masm.addl(Imm32(sizeof(uintptr_t)), esp);
+
+ // Push registers such that we can access them from [base + code].
+ masm.PushRegsInMask(AllRegs);
+
+ masm.movl(esp, eax); // Argument to jit::InvalidationBailout.
+
+ // Make space for InvalidationBailout's frameSize outparam.
+ masm.reserveStack(sizeof(size_t));
+ masm.movl(esp, ebx);
+
+ // Make space for InvalidationBailout's bailoutInfo outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.movl(esp, ecx);
+
+ masm.setupUnalignedABICall(edx);
+ masm.passABIArg(eax);
+ masm.passABIArg(ebx);
+ masm.passABIArg(ecx);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, InvalidationBailout));
+
+ masm.pop(ecx); // Get bailoutInfo outparam.
+ masm.pop(ebx); // Get the frameSize outparam.
+
+ // Pop the machine state and the dead frame.
+ masm.lea(Operand(esp, ebx, TimesOne, sizeof(InvalidationBailoutStack)), esp);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in ecx.
+ JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.jmp(bailoutTail);
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+ JitSpew(JitSpew_IonInvalidate, " invalidation thunk created at %p", (void*) code->raw());
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "Invalidator");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut)
+{
+ MacroAssembler masm(cx);
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- esp
+ // '-- #esi ---'
+
+ // ArgumentsRectifierReg contains the |nargs| pushed onto the current frame.
+ // Including |this|, there are (|nargs| + 1) arguments to copy.
+ MOZ_ASSERT(ArgumentsRectifierReg == esi);
+
+ // Load the number of |undefined|s to push into %ecx.
+ masm.loadPtr(Address(esp, RectifierFrameLayout::offsetOfCalleeToken()), eax);
+ masm.mov(eax, ecx);
+ masm.andl(Imm32(CalleeTokenMask), ecx);
+ masm.movzwl(Operand(ecx, JSFunction::offsetOfNargs()), ecx);
+
+ // The frame pointer and its padding are pushed on the stack.
+ // Including |this|, there are (|nformals| + 1) arguments to push to the
+ // stack. Then we push a JitFrameLayout. We compute the padding expressed
+ // in the number of extra |undefined| values to push on the stack.
+ static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+ static_assert((sizeof(Value) + 2 * sizeof(void*)) % JitStackAlignment == 0,
+ "No need to consider |this| and the frame pointer and its padding for aligning the stack");
+ static_assert(JitStackAlignment % sizeof(Value) == 0,
+ "Ensure that we can pad the stack by pushing extra UndefinedValue");
+ static_assert(IsPowerOfTwo(JitStackValueAlignment),
+ "must have power of two for masm.andl to do its job");
+
+ masm.addl(Imm32(JitStackValueAlignment - 1 /* for padding */), ecx);
+
+ // Account for newTarget, if necessary.
+ static_assert(CalleeToken_FunctionConstructing == 1,
+ "Ensure that we can use the constructing bit to count an extra push");
+ masm.mov(eax, edx);
+ masm.andl(Imm32(CalleeToken_FunctionConstructing), edx);
+ masm.addl(edx, ecx);
+
+ masm.andl(Imm32(~(JitStackValueAlignment - 1)), ecx);
+ masm.subl(esi, ecx);
+
+ // Copy the number of actual arguments.
+ masm.loadPtr(Address(esp, RectifierFrameLayout::offsetOfNumActualArgs()), edx);
+
+ masm.moveValue(UndefinedValue(), ebx, edi);
+
+ // NOTE: The fact that x86 ArgumentsRectifier saves the FramePointer is relied upon
+ // by the baseline bailout code. If this changes, fix that code! See
+ // BaselineJIT.cpp/BaselineStackBuilder::calculatePrevFramePtr, and
+ // BaselineJIT.cpp/InitFromBailout. Check for the |#if defined(JS_CODEGEN_X86)| portions.
+ masm.push(FramePointer);
+ masm.movl(esp, FramePointer); // Save %esp.
+ masm.push(FramePointer /* padding */);
+
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]]
+ // '-- #esi ---'
+ //
+ // Rectifier frame:
+ // [ebp'] <- ebp [padding] <- esp [undef] [undef] [arg2] [arg1] [this]
+ // '--- #ecx ----' '-- #esi ---'
+ //
+ // [[argc] [callee] [descr] [raddr]]
+
+ // Push undefined.
+ {
+ Label undefLoopTop;
+ masm.bind(&undefLoopTop);
+
+ masm.push(ebx); // type(undefined);
+ masm.push(edi); // payload(undefined);
+ masm.subl(Imm32(1), ecx);
+ masm.j(Assembler::NonZero, &undefLoopTop);
+ }
+
+ // Get the topmost argument. We did a push of %ebp earlier, so be sure to
+ // account for this in the offset
+ BaseIndex b = BaseIndex(FramePointer, esi, TimesEight,
+ sizeof(RectifierFrameLayout) + sizeof(void*));
+ masm.lea(Operand(b), ecx);
+
+ // Push arguments, |nargs| + 1 times (to include |this|).
+ masm.addl(Imm32(1), esi);
+ {
+ Label copyLoopTop;
+
+ masm.bind(&copyLoopTop);
+ masm.push(Operand(ecx, sizeof(Value)/2));
+ masm.push(Operand(ecx, 0x0));
+ masm.subl(Imm32(sizeof(Value)), ecx);
+ masm.subl(Imm32(1), esi);
+ masm.j(Assembler::NonZero, &copyLoopTop);
+ }
+
+ {
+ Label notConstructing;
+
+ masm.mov(eax, ebx);
+ masm.branchTest32(Assembler::Zero, ebx, Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ BaseValueIndex src(FramePointer, edx,
+ sizeof(RectifierFrameLayout) +
+ sizeof(Value) +
+ sizeof(void*));
+
+ masm.andl(Imm32(CalleeTokenMask), ebx);
+ masm.movzwl(Operand(ebx, JSFunction::offsetOfNargs()), ebx);
+
+ BaseValueIndex dst(esp, ebx, sizeof(Value));
+
+ ValueOperand newTarget(ecx, edi);
+
+ masm.loadValue(src, newTarget);
+ masm.storeValue(newTarget, dst);
+
+ masm.bind(&notConstructing);
+ }
+
+ // Construct descriptor, accounting for pushed frame pointer above
+ masm.lea(Operand(FramePointer, sizeof(void*)), ebx);
+ masm.subl(esp, ebx);
+ masm.makeFrameDescriptor(ebx, JitFrame_Rectifier, JitFrameLayout::Size());
+
+ // Construct JitFrameLayout.
+ masm.push(edx); // number of actual arguments
+ masm.push(eax); // callee token
+ masm.push(ebx); // descriptor
+
+ // Call the target function.
+ // Note that this assumes the function is JITted.
+ masm.andl(Imm32(CalleeTokenMask), eax);
+ masm.loadPtr(Address(eax, JSFunction::offsetOfNativeOrScript()), eax);
+ masm.loadBaselineOrIonRaw(eax, eax, nullptr);
+ uint32_t returnOffset = masm.callJitNoProfiler(eax);
+
+ // Remove the rectifier frame.
+ masm.pop(ebx); // ebx <- descriptor with FrameType.
+ masm.shrl(Imm32(FRAMESIZE_SHIFT), ebx); // ebx <- descriptor.
+ masm.pop(edi); // Discard calleeToken.
+ masm.pop(edi); // Discard number of actual arguments.
+
+ // Discard pushed arguments, but not the pushed frame pointer.
+ BaseIndex unwind = BaseIndex(esp, ebx, TimesOne, -int32_t(sizeof(void*)));
+ masm.lea(Operand(unwind), esp);
+
+ masm.pop(FramePointer);
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ArgumentsRectifier");
+#endif
+
+ if (returnAddrOut)
+ *returnAddrOut = (void*) (code->raw() + returnOffset);
+ return code;
+}
+
+static void
+PushBailoutFrame(MacroAssembler& masm, uint32_t frameClass, Register spArg)
+{
+ // Push registers such that we can access them from [base + code].
+ if (JitSupportsSimd()) {
+ masm.PushRegsInMask(AllRegs);
+ } else {
+ // When SIMD isn't supported, PushRegsInMask reduces the set of float
+ // registers to be double-sized, while the RegisterDump expects each of
+ // the float registers to have the maximal possible size
+ // (Simd128DataSize). To work around this, we just spill the double
+ // registers by hand here, using the register dump offset directly.
+ for (GeneralRegisterBackwardIterator iter(AllRegs.gprs()); iter.more(); ++iter)
+ masm.Push(*iter);
+
+ masm.reserveStack(sizeof(RegisterDump::FPUArray));
+ for (FloatRegisterBackwardIterator iter(AllRegs.fpus()); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ Address spillAddress(StackPointer, reg.getRegisterDumpOffsetInBytes());
+ masm.storeDouble(reg, spillAddress);
+ }
+ }
+
+ // Push the bailout table number.
+ masm.push(Imm32(frameClass));
+
+ // The current stack pointer is the first argument to jit::Bailout.
+ masm.movl(esp, spArg);
+}
+
+static void
+GenerateBailoutThunk(JSContext* cx, MacroAssembler& masm, uint32_t frameClass)
+{
+ PushBailoutFrame(masm, frameClass, eax);
+
+ // Make space for Bailout's baioutInfo outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.movl(esp, ebx);
+
+ // Call the bailout function. This will correct the size of the bailout.
+ masm.setupUnalignedABICall(ecx);
+ masm.passABIArg(eax);
+ masm.passABIArg(ebx);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, Bailout));
+
+ masm.pop(ecx); // Get bailoutInfo outparam.
+
+ // Common size of stuff we've pushed.
+ static const uint32_t BailoutDataSize = 0
+ + sizeof(void*) // frameClass
+ + sizeof(RegisterDump);
+
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ if (frameClass == NO_FRAME_SIZE_CLASS_ID) {
+ // We want the frameSize. Stack is:
+ // ... frame ...
+ // snapshotOffset
+ // frameSize
+ // ... bailoutFrame ...
+ masm.addl(Imm32(BailoutDataSize), esp);
+ masm.pop(ebx);
+ masm.addl(Imm32(sizeof(uint32_t)), esp);
+ masm.addl(ebx, esp);
+ } else {
+ // Stack is:
+ // ... frame ...
+ // bailoutId
+ // ... bailoutFrame ...
+ uint32_t frameSize = FrameSizeClass::FromClass(frameClass).frameSize();
+ masm.addl(Imm32(BailoutDataSize + sizeof(void*) + frameSize), esp);
+ }
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in ecx.
+ JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.jmp(bailoutTail);
+}
+
+JitCode*
+JitRuntime::generateBailoutTable(JSContext* cx, uint32_t frameClass)
+{
+ MacroAssembler masm;
+
+ Label bailout;
+ for (size_t i = 0; i < BAILOUT_TABLE_SIZE; i++)
+ masm.call(&bailout);
+ masm.bind(&bailout);
+
+ GenerateBailoutThunk(cx, masm, frameClass);
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutHandler");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateBailoutHandler(JSContext* cx)
+{
+ MacroAssembler masm;
+ GenerateBailoutThunk(cx, masm, NO_FRAME_SIZE_CLASS_ID);
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutHandler");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateVMWrapper(JSContext* cx, const VMFunction& f)
+{
+ MOZ_ASSERT(functionWrappers_);
+ MOZ_ASSERT(functionWrappers_->initialized());
+ VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
+ if (p)
+ return p->value();
+
+ // Generate a separated code for the wrapper.
+ MacroAssembler masm;
+
+ // Avoid conflicts with argument registers while discarding the result after
+ // the function call.
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ // Wrapper register set is a superset of Volatile register set.
+ JS_STATIC_ASSERT((Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0);
+
+ // The context is the first argument.
+ Register cxreg = regs.takeAny();
+
+ // Stack is:
+ // ... frame ...
+ // +8 [args]
+ // +4 descriptor
+ // +0 returnAddress
+ //
+ // We're aligned to an exit frame, so link it up.
+ masm.enterExitFrame(&f);
+ masm.loadJSContext(cxreg);
+
+ // Save the current stack pointer as the base for copying arguments.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ argsBase = regs.takeAny();
+ masm.lea(Operand(esp, ExitFrameLayout::SizeWithFooter()), argsBase);
+ }
+
+ // Reserve space for the outparameter.
+ Register outReg = InvalidReg;
+ switch (f.outParam) {
+ case Type_Value:
+ outReg = regs.takeAny();
+ masm.Push(UndefinedValue());
+ masm.movl(esp, outReg);
+ break;
+
+ case Type_Handle:
+ outReg = regs.takeAny();
+ masm.PushEmptyRooted(f.outParamRootType);
+ masm.movl(esp, outReg);
+ break;
+
+ case Type_Int32:
+ case Type_Pointer:
+ case Type_Bool:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(int32_t));
+ masm.movl(esp, outReg);
+ break;
+
+ case Type_Double:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(double));
+ masm.movl(esp, outReg);
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ if (!generateTLEnterVM(cx, masm, f))
+ return nullptr;
+
+ masm.setupUnalignedABICall(regs.getAny());
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = 0;
+
+ // Copy arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ MoveOperand from;
+ switch (f.argProperties(explicitArg)) {
+ case VMFunction::WordByValue:
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunction::DoubleByValue:
+ // We don't pass doubles in float registers on x86, so no need
+ // to check for argPassedInFloatReg.
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunction::WordByRef:
+ masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunction::DoubleByRef:
+ masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ argDisp += 2 * sizeof(void*);
+ break;
+ }
+ }
+
+ // Copy the implicit outparam, if any.
+ if (outReg != InvalidReg)
+ masm.passABIArg(outReg);
+
+ masm.callWithABI(f.wrapped);
+
+ if (!generateTLExitVM(cx, masm, f))
+ return nullptr;
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Object:
+ masm.branchTestPtr(Assembler::Zero, eax, eax, masm.failureLabel());
+ break;
+ case Type_Bool:
+ masm.testb(eax, eax);
+ masm.j(Assembler::Zero, masm.failureLabel());
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Value:
+ masm.Pop(JSReturnOperand);
+ break;
+
+ case Type_Int32:
+ case Type_Pointer:
+ masm.Pop(ReturnReg);
+ break;
+
+ case Type_Bool:
+ masm.Pop(ReturnReg);
+ masm.movzbl(ReturnReg, ReturnReg);
+ break;
+
+ case Type_Double:
+ if (cx->runtime()->jitSupportsFloatingPoint)
+ masm.Pop(ReturnDoubleReg);
+ else
+ masm.assumeUnreachable("Unable to pop to float reg, with no FP support.");
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+ masm.leaveExitFrame();
+ masm.retn(Imm32(sizeof(ExitFrameLayout) +
+ f.explicitStackSlots() * sizeof(void*) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ Linker linker(masm);
+ JitCode* wrapper = linker.newCode<NoGC>(cx, OTHER_CODE);
+ if (!wrapper)
+ return nullptr;
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(wrapper, "VMWrapper");
+#endif
+
+ // linker.newCode may trigger a GC and sweep functionWrappers_ so we have to
+ // use relookupOrAdd instead of add.
+ if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
+ return nullptr;
+
+ return wrapper;
+}
+
+JitCode*
+JitRuntime::generatePreBarrier(JSContext* cx, MIRType type)
+{
+ MacroAssembler masm;
+
+ LiveRegisterSet save;
+ if (cx->runtime()->jitSupportsFloatingPoint) {
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+ } else {
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet());
+ }
+ masm.PushRegsInMask(save);
+
+ MOZ_ASSERT(PreBarrierReg == edx);
+ masm.movl(ImmPtr(cx->runtime()), ecx);
+
+ masm.setupUnalignedABICall(eax);
+ masm.passABIArg(ecx);
+ masm.passABIArg(edx);
+ masm.callWithABI(IonMarkFunction(type));
+
+ masm.PopRegsInMask(save);
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "PreBarrier");
+#endif
+
+ return code;
+}
+
+typedef bool (*HandleDebugTrapFn)(JSContext*, BaselineFrame*, uint8_t*, bool*);
+static const VMFunction HandleDebugTrapInfo =
+ FunctionInfo<HandleDebugTrapFn>(HandleDebugTrap, "HandleDebugTrap");
+
+JitCode*
+JitRuntime::generateDebugTrapHandler(JSContext* cx)
+{
+ MacroAssembler masm;
+#ifndef JS_USE_LINK_REGISTER
+ // The first value contains the return addres,
+ // which we pull into ICTailCallReg for tail calls.
+ masm.setFramePushed(sizeof(intptr_t));
+#endif
+
+ Register scratch1 = eax;
+ Register scratch2 = ecx;
+ Register scratch3 = edx;
+
+ // Load the return address in scratch1.
+ masm.loadPtr(Address(esp, 0), scratch1);
+
+ // Load BaselineFrame pointer in scratch2.
+ masm.mov(ebp, scratch2);
+ masm.subPtr(Imm32(BaselineFrame::Size()), scratch2);
+
+ // Enter a stub frame and call the HandleDebugTrap VM function. Ensure
+ // the stub frame has a nullptr ICStub pointer, since this pointer is
+ // marked during GC.
+ masm.movePtr(ImmPtr(nullptr), ICStubReg);
+ EmitBaselineEnterStubFrame(masm, scratch3);
+
+ JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(HandleDebugTrapInfo);
+ if (!code)
+ return nullptr;
+
+ masm.push(scratch1);
+ masm.push(scratch2);
+ EmitBaselineCallVM(code, masm);
+
+ EmitBaselineLeaveStubFrame(masm);
+
+ // If the stub returns |true|, we have to perform a forced return
+ // (return from the JS frame). If the stub returns |false|, just return
+ // from the trap stub so that execution continues at the current pc.
+ Label forcedReturn;
+ masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg, &forcedReturn);
+ masm.ret();
+
+ masm.bind(&forcedReturn);
+ masm.loadValue(Address(ebp, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ masm.mov(ebp, esp);
+ masm.pop(ebp);
+
+ // Before returning, if profiling is turned on, make sure that lastProfilingFrame
+ // is set to the correct caller frame.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skipProfilingInstrumentation);
+ masm.profilerExitFrame();
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* codeDbg = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(codeDbg, "DebugTrapHandler");
+#endif
+
+ return codeDbg;
+}
+
+JitCode*
+JitRuntime::generateExceptionTailStub(JSContext* cx, void* handler)
+{
+ MacroAssembler masm;
+
+ masm.handleFailureWithHandlerTail(handler);
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ExceptionTailStub");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateBailoutTailStub(JSContext* cx)
+{
+ MacroAssembler masm;
+
+ masm.generateBailoutTail(edx, ecx);
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutTailStub");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
+{
+ MacroAssembler masm;
+
+ Register scratch1 = eax;
+ Register scratch2 = ebx;
+ Register scratch3 = esi;
+ Register scratch4 = edi;
+
+ //
+ // The code generated below expects that the current stack pointer points
+ // to an Ion or Baseline frame, at the state it would be immediately
+ // before a ret(). Thus, after this stub's business is done, it executes
+ // a ret() and returns directly to the caller script, on behalf of the
+ // callee script that jumped to this code.
+ //
+ // Thus the expected stack is:
+ //
+ // StackPointer ----+
+ // v
+ // ..., ActualArgc, CalleeToken, Descriptor, ReturnAddr
+ // MEM-HI MEM-LOW
+ //
+ //
+ // The generated jitcode is responsible for overwriting the
+ // jitActivation->lastProfilingFrame field with a pointer to the previous
+ // Ion or Baseline jit-frame that was pushed before this one. It is also
+ // responsible for overwriting jitActivation->lastProfilingCallSite with
+ // the return address into that frame. The frame could either be an
+ // immediate "caller" frame, or it could be a frame in a previous
+ // JitActivation (if the current frame was entered from C++, and the C++
+ // was entered by some caller jit-frame further down the stack).
+ //
+ // So this jitcode is responsible for "walking up" the jit stack, finding
+ // the previous Ion or Baseline JS frame, and storing its address and the
+ // return address into the appropriate fields on the current jitActivation.
+ //
+ // There are a fixed number of different path types that can lead to the
+ // current frame, which is either a baseline or ion frame:
+ //
+ // <Baseline-Or-Ion>
+ // ^
+ // |
+ // ^--- Ion
+ // |
+ // ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Argument Rectifier
+ // | ^
+ // | |
+ // | ^--- Ion
+ // | |
+ // | ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Entry Frame (From C++)
+ //
+ Register actReg = scratch4;
+ AbsoluteAddress activationAddr(GetJitContext()->runtime->addressOfProfilingActivation());
+ masm.loadPtr(activationAddr, actReg);
+
+ Address lastProfilingFrame(actReg, JitActivation::offsetOfLastProfilingFrame());
+ Address lastProfilingCallSite(actReg, JitActivation::offsetOfLastProfilingCallSite());
+
+#ifdef DEBUG
+ // Ensure that frame we are exiting is current lastProfilingFrame
+ {
+ masm.loadPtr(lastProfilingFrame, scratch1);
+ Label checkOk;
+ masm.branchPtr(Assembler::Equal, scratch1, ImmWord(0), &checkOk);
+ masm.branchPtr(Assembler::Equal, StackPointer, scratch1, &checkOk);
+ masm.assumeUnreachable(
+ "Mismatch between stored lastProfilingFrame and current stack pointer.");
+ masm.bind(&checkOk);
+ }
+#endif
+
+ // Load the frame descriptor into |scratch1|, figure out what to do
+ // depending on its type.
+ masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfDescriptor()), scratch1);
+
+ // Going into the conditionals, we will have:
+ // FrameDescriptor.size in scratch1
+ // FrameDescriptor.type in scratch2
+ masm.movePtr(scratch1, scratch2);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch2);
+
+ // Handling of each case is dependent on FrameDescriptor.type
+ Label handle_IonJS;
+ Label handle_BaselineStub;
+ Label handle_Rectifier;
+ Label handle_IonAccessorIC;
+ Label handle_Entry;
+ Label end;
+
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonJS), &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineJS), &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineStub), &handle_BaselineStub);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Rectifier), &handle_Rectifier);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonAccessorIC), &handle_IonAccessorIC);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Entry), &handle_Entry);
+
+ masm.assumeUnreachable("Invalid caller frame type when exiting from Ion frame.");
+
+ //
+ // JitFrame_IonJS
+ //
+ // Stack layout:
+ // ...
+ // Ion-Descriptor
+ // Prev-FP ---> Ion-ReturnAddr
+ // ... previous frame data ... |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_IonJS);
+ {
+ // |scratch1| contains Descriptor.size
+
+ // returning directly to an IonJS frame. Store return addr to frame
+ // in lastProfilingCallSite.
+ masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfReturnAddress()), scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ // Store return frame in lastProfilingFrame.
+ // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size();
+ masm.lea(Operand(StackPointer, scratch1, TimesOne, JitFrameLayout::Size()), scratch2);
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // JitFrame_BaselineStub
+ //
+ // Look past the stub and store the frame pointer to
+ // the baselineJS frame prior to it.
+ //
+ // Stack layout:
+ // ...
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-PrevFramePointer
+ // | ... BL-FrameData ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ // We take advantage of the fact that the stub frame saves the frame
+ // pointer pointing to the baseline frame, so a bunch of calculation can
+ // be avoided.
+ //
+ masm.bind(&handle_BaselineStub);
+ {
+ BaseIndex stubFrameReturnAddr(StackPointer, scratch1, TimesOne,
+ JitFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ BaseIndex stubFrameSavedFramePtr(StackPointer, scratch1, TimesOne,
+ JitFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch2);
+ masm.addPtr(Imm32(sizeof(void*)), scratch2); // Skip past BL-PrevFramePtr
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+
+ //
+ // JitFrame_Rectifier
+ //
+ // The rectifier frame can be preceded by either an IonJS or a
+ // BaselineStub frame.
+ //
+ // Stack layout if caller of rectifier was Ion:
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- Rect-Descriptor.Size
+ // < COMMON LAYOUT >
+ //
+ // Stack layout if caller of rectifier was Baseline:
+ //
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-SavedFramePointer
+ // | ... baseline frame data ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Rect-Descriptor.Size
+ // ... args to rectifier ... |
+ // < COMMON LAYOUT >
+ //
+ // Common stack layout:
+ //
+ // ActualArgc |
+ // CalleeToken |- IonRectitiferFrameLayout::Size()
+ // Rect-Descriptor |
+ // Rect-ReturnAddr |
+ // ... rectifier data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_Rectifier);
+ {
+ // scratch2 := StackPointer + Descriptor.size + JitFrameLayout::Size()
+ masm.lea(Operand(StackPointer, scratch1, TimesOne, JitFrameLayout::Size()), scratch2);
+ masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfDescriptor()), scratch3);
+ masm.movePtr(scratch3, scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch3);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch1);
+
+ // Now |scratch1| contains Rect-Descriptor.Size
+ // and |scratch2| points to Rectifier frame
+ // and |scratch3| contains Rect-Descriptor.Type
+
+ // Check for either Ion or BaselineStub frame.
+ Label handle_Rectifier_BaselineStub;
+ masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS),
+ &handle_Rectifier_BaselineStub);
+
+ // Handle Rectifier <- IonJS
+ // scratch3 := RectFrame[ReturnAddr]
+ masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfReturnAddress()), scratch3);
+ masm.storePtr(scratch3, lastProfilingCallSite);
+
+ // scratch3 := RectFrame + Rect-Descriptor.Size + RectifierFrameLayout::Size()
+ masm.lea(Operand(scratch2, scratch1, TimesOne, RectifierFrameLayout::Size()), scratch3);
+ masm.storePtr(scratch3, lastProfilingFrame);
+ masm.ret();
+
+ // Handle Rectifier <- BaselineStub <- BaselineJS
+ masm.bind(&handle_Rectifier_BaselineStub);
+#ifdef DEBUG
+ {
+ Label checkOk;
+ masm.branch32(Assembler::Equal, scratch3, Imm32(JitFrame_BaselineStub), &checkOk);
+ masm.assumeUnreachable("Unrecognized frame preceding baselineStub.");
+ masm.bind(&checkOk);
+ }
+#endif
+ BaseIndex stubFrameReturnAddr(scratch2, scratch1, TimesOne,
+ RectifierFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch3);
+ masm.storePtr(scratch3, lastProfilingCallSite);
+
+ BaseIndex stubFrameSavedFramePtr(scratch2, scratch1, TimesOne,
+ RectifierFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch3);
+ masm.addPtr(Imm32(sizeof(void*)), scratch3);
+ masm.storePtr(scratch3, lastProfilingFrame);
+ masm.ret();
+ }
+
+ // JitFrame_IonAccessorIC
+ //
+ // The caller is always an IonJS frame.
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- AccFrame-Descriptor.Size
+ // StubCode |
+ // AccFrame-Descriptor |- IonAccessorICFrameLayout::Size()
+ // AccFrame-ReturnAddr |
+ // ... accessor frame data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ masm.bind(&handle_IonAccessorIC);
+ {
+ // scratch2 := StackPointer + Descriptor.size + JitFrameLayout::Size()
+ masm.lea(Operand(StackPointer, scratch1, TimesOne, JitFrameLayout::Size()), scratch2);
+
+ // scratch3 := AccFrame-Descriptor.Size
+ masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfDescriptor()), scratch3);
+#ifdef DEBUG
+ // Assert previous frame is an IonJS frame.
+ masm.movePtr(scratch3, scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1);
+ {
+ Label checkOk;
+ masm.branch32(Assembler::Equal, scratch1, Imm32(JitFrame_IonJS), &checkOk);
+ masm.assumeUnreachable("IonAccessorIC frame must be preceded by IonJS frame");
+ masm.bind(&checkOk);
+ }
+#endif
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch3);
+
+ // lastProfilingCallSite := AccFrame-ReturnAddr
+ masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfReturnAddress()), scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+
+ // lastProfilingFrame := AccessorFrame + AccFrame-Descriptor.Size +
+ // IonAccessorICFrameLayout::Size()
+ masm.lea(Operand(scratch2, scratch3, TimesOne, IonAccessorICFrameLayout::Size()), scratch1);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // JitFrame_Entry
+ //
+ // If at an entry frame, store null into both fields.
+ //
+ masm.bind(&handle_Entry);
+ {
+ masm.movePtr(ImmPtr(nullptr), scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ProfilerExitFrameStub");
+#endif
+
+ return code;
+}